From ef2721f4e15f1ea142cc7504eabe30720594fab0 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 30 Dec 2025 10:29:45 +0100 Subject: [PATCH 001/374] Filter out own peer from remote peers list during peer updates. (#4986) --- client/internal/engine.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/client/internal/engine.go b/client/internal/engine.go index 55645b494..4f18c3bc8 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1121,6 +1121,15 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { e.updateOfflinePeers(networkMap.GetOfflinePeers()) + // Filter out own peer from the remote peers list + localPubKey := e.config.WgPrivateKey.PublicKey().String() + remotePeers := make([]*mgmProto.RemotePeerConfig, 0, len(networkMap.GetRemotePeers())) + for _, p := range networkMap.GetRemotePeers() { + if p.GetWgPubKey() != localPubKey { + remotePeers = append(remotePeers, p) + } + } + // cleanup request, most likely our peer has been deleted if networkMap.GetRemotePeersIsEmpty() { err := e.removeAllPeers() @@ -1129,26 +1138,26 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { return err } } else { - err := e.removePeers(networkMap.GetRemotePeers()) + err := e.removePeers(remotePeers) if err != nil { return err } - err = e.modifyPeers(networkMap.GetRemotePeers()) + err = e.modifyPeers(remotePeers) if err != nil { return err } - err = e.addNewPeers(networkMap.GetRemotePeers()) + err = e.addNewPeers(remotePeers) if err != nil { return err } e.statusRecorder.FinishPeerListModifications() - e.updatePeerSSHHostKeys(networkMap.GetRemotePeers()) + e.updatePeerSSHHostKeys(remotePeers) - if err := e.updateSSHClientConfig(networkMap.GetRemotePeers()); err != nil { + if err := e.updateSSHClientConfig(remotePeers); err != nil { log.Warnf("failed to update SSH client config: %v", err) } @@ -1156,7 +1165,7 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { } // must set the exclude list after the peers are added. Without it the manager can not figure out the peers parameters from the store - excludedLazyPeers := e.toExcludedLazyPeers(forwardingRules, networkMap.GetRemotePeers()) + excludedLazyPeers := e.toExcludedLazyPeers(forwardingRules, remotePeers) e.connMgr.SetExcludeList(e.ctx, excludedLazyPeers) e.networkSerial = serial From 4035f07248001ee51969b4209b74721f3ba4194f Mon Sep 17 00:00:00 2001 From: Haruki Hasegawa Date: Tue, 30 Dec 2025 18:36:12 +0900 Subject: [PATCH 002/374] [client] Fix Advanced Settings not opening on Windows with Japanese locale (#4455) (#4637) The Fyne framework does not support TTC font files. Use the default system font (Segoe UI) instead, so Windows can automatically fall back to a Japanese font when needed. --- client/ui/font_windows.go | 1 - 1 file changed, 1 deletion(-) diff --git a/client/ui/font_windows.go b/client/ui/font_windows.go index 93b23a21b..6346a9fb9 100644 --- a/client/ui/font_windows.go +++ b/client/ui/font_windows.go @@ -31,7 +31,6 @@ func (s *serviceClient) getWindowsFontFilePath() string { "chr-CHER-US": "Gadugi.ttf", "zh-HK": "Segoeui.ttf", "zh-TW": "Segoeui.ttf", - "ja-JP": "Yugothm.ttc", "km-KH": "Leelawui.ttf", "ko-KR": "Malgun.ttf", "th-TH": "Leelawui.ttf", From 1d2c7776fde174649e827c37b7dce4f81efb204a Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 30 Dec 2025 10:46:00 +0100 Subject: [PATCH 003/374] [management] apply login filter only for setup key peers (#4943) --- management/internals/shared/grpc/server.go | 8 ++- management/server/account.go | 4 ++ management/server/account/manager.go | 1 + management/server/mock_server/account_mock.go | 7 ++- management/server/store/sql_store.go | 18 ++++++ management/server/store/sql_store_test.go | 63 +++++++++++++++++++ management/server/store/store.go | 1 + 7 files changed, 100 insertions(+), 2 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index ad6b34c5f..0b9326fbc 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -184,8 +184,14 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S realIP := getRealIP(ctx) sRealIP := realIP.String() peerMeta := extractPeerMeta(ctx, syncReq.GetMeta()) + userID, err := s.accountManager.GetUserIDByPeerKey(ctx, peerKey.String()) + if err != nil { + s.syncSem.Add(-1) + return mapError(ctx, err) + } + metahashed := metaHash(peerMeta, sRealIP) - if !s.loginFilter.allowLogin(peerKey.String(), metahashed) { + if userID == "" && !s.loginFilter.allowLogin(peerKey.String(), metahashed) { if s.appMetrics != nil { s.appMetrics.GRPCMetrics().CountSyncRequestBlocked() } diff --git a/management/server/account.go b/management/server/account.go index 405a3c0f6..52dcc567e 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -2156,3 +2156,7 @@ func (am *DefaultAccountManager) savePeerIPUpdate(ctx context.Context, transacti return nil } + +func (am *DefaultAccountManager) GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error) { + return am.Store.GetUserIDByPeerKey(ctx, store.LockingStrengthNone, peerKey) +} diff --git a/management/server/account/manager.go b/management/server/account/manager.go index b5921ec7a..f0b7c3857 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -123,4 +123,5 @@ type Manager interface { UpdateToPrimaryAccount(ctx context.Context, accountId string) error GetOwnerInfo(ctx context.Context, accountId string) (*types.UserInfo, error) GetCurrentUserInfo(ctx context.Context, userAuth auth.UserAuth) (*users.UserInfoWithPermissions, error) + GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error) } diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 928098dbe..0d7d2bc3d 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -2,11 +2,12 @@ package mock_server import ( "context" - "github.com/netbirdio/netbird/shared/auth" "net" "net/netip" "time" + "github.com/netbirdio/netbird/shared/auth" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -988,3 +989,7 @@ func (am *MockAccountManager) RecalculateNetworkMapCache(ctx context.Context, ac } return nil } + +func (am *MockAccountManager) GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error) { + return "something", nil +} diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index d2220d4b4..73565a462 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -4082,3 +4082,21 @@ func (s *SqlStore) GetPeersByGroupIDs(ctx context.Context, accountID string, gro return peers, nil } + +func (s *SqlStore) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var userID string + result := tx.Model(&nbpeer.Peer{}). + Select("user_id"). + Take(&userID, GetKeyQueryCondition(s), peerKey) + + if result.Error != nil { + return "", status.Errorf(status.Internal, "failed to get user ID by peer key") + } + + return userID, nil +} diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 2e2623910..d63d624de 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -3718,6 +3718,69 @@ func TestSqlStore_GetPeersByGroupIDs(t *testing.T) { } } +func TestSqlStore_GetUserIDByPeerKey(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + userID := "test-user-123" + peerKey := "peer-key-abc" + + peer := &nbpeer.Peer{ + ID: "test-peer-1", + Key: peerKey, + AccountID: existingAccountID, + UserID: userID, + IP: net.IP{10, 0, 0, 1}, + DNSLabel: "test-peer-1", + } + + err = store.AddPeerToAccount(context.Background(), peer) + require.NoError(t, err) + + retrievedUserID, err := store.GetUserIDByPeerKey(context.Background(), LockingStrengthNone, peerKey) + require.NoError(t, err) + assert.Equal(t, userID, retrievedUserID) +} + +func TestSqlStore_GetUserIDByPeerKey_NotFound(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + nonExistentPeerKey := "non-existent-peer-key" + + userID, err := store.GetUserIDByPeerKey(context.Background(), LockingStrengthNone, nonExistentPeerKey) + require.Error(t, err) + assert.Equal(t, "", userID) +} + +func TestSqlStore_GetUserIDByPeerKey_NoUserID(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + peerKey := "peer-key-abc" + + peer := &nbpeer.Peer{ + ID: "test-peer-1", + Key: peerKey, + AccountID: existingAccountID, + UserID: "", + IP: net.IP{10, 0, 0, 1}, + DNSLabel: "test-peer-1", + } + + err = store.AddPeerToAccount(context.Background(), peer) + require.NoError(t, err) + + retrievedUserID, err := store.GetUserIDByPeerKey(context.Background(), LockingStrengthNone, peerKey) + require.NoError(t, err) + assert.Equal(t, "", retrievedUserID) +} + func TestSqlStore_ApproveAccountPeers(t *testing.T) { runTestForAllEngines(t, "", func(t *testing.T, store Store) { accountID := "test-account" diff --git a/management/server/store/store.go b/management/server/store/store.go index 0ec7949f9..dbe135406 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -204,6 +204,7 @@ type Store interface { MarkAccountPrimary(ctx context.Context, accountID string) error UpdateAccountNetwork(ctx context.Context, accountID string, ipNet net.IPNet) error GetPolicyRulesByResourceID(ctx context.Context, lockStrength LockingStrength, accountID string, peerID string) ([]*types.PolicyRule, error) + GetUserIDByPeerKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, error) } const ( From d88e046d00a47159e43c55e57f22a08b9944e88f Mon Sep 17 00:00:00 2001 From: Nicolas Henneaux Date: Tue, 30 Dec 2025 10:48:17 +0100 Subject: [PATCH 004/374] fix(router): nft tables limit number of peers source (#4852) * fix(router): nft tables limit number of peers source batching them, failing at 3277 prefixes on nftables v1.0.9 with Ubuntu 24.04.3 LTS, 6.14.0-35-generic #35~24.04.1-Ubuntu * fix(router): nft tables limit number of prefixes on ipSet creation --- .../firewall/nftables/manager_linux_test.go | 91 +++++++++++++++++++ client/firewall/nftables/router_linux.go | 33 +++++-- 2 files changed, 118 insertions(+), 6 deletions(-) diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go index adec802c8..6b29c5606 100644 --- a/client/firewall/nftables/manager_linux_test.go +++ b/client/firewall/nftables/manager_linux_test.go @@ -386,6 +386,97 @@ func TestNftablesManagerCompatibilityWithIptables(t *testing.T) { verifyIptablesOutput(t, stdout, stderr) } +func TestNftablesManagerCompatibilityWithIptablesFor6kPrefixes(t *testing.T) { + if check() != NFTABLES { + t.Skip("nftables not supported on this system") + } + + if _, err := exec.LookPath("iptables-save"); err != nil { + t.Skipf("iptables-save not available on this system: %v", err) + } + + // First ensure iptables-nft tables exist by running iptables-save + stdout, stderr := runIptablesSave(t) + verifyIptablesOutput(t, stdout, stderr) + + manager, err := Create(ifaceMock, iface.DefaultMTU) + require.NoError(t, err, "failed to create manager") + require.NoError(t, manager.Init(nil)) + + t.Cleanup(func() { + err := manager.Close(nil) + require.NoError(t, err, "failed to reset manager state") + + // Verify iptables output after reset + stdout, stderr := runIptablesSave(t) + verifyIptablesOutput(t, stdout, stderr) + }) + + const octet2Count = 25 + const octet3Count = 255 + prefixes := make([]netip.Prefix, 0, (octet2Count-1)*(octet3Count-1)) + for i := 1; i < octet2Count; i++ { + for j := 1; j < octet3Count; j++ { + addr := netip.AddrFrom4([4]byte{192, byte(j), byte(i), 0}) + prefixes = append(prefixes, netip.PrefixFrom(addr, 24)) + } + } + _, err = manager.AddRouteFiltering( + nil, + prefixes, + fw.Network{Prefix: netip.MustParsePrefix("10.2.0.0/24")}, + fw.ProtocolTCP, + nil, + &fw.Port{Values: []uint16{443}}, + fw.ActionAccept, + ) + require.NoError(t, err, "failed to add route filtering rule") + + stdout, stderr = runIptablesSave(t) + verifyIptablesOutput(t, stdout, stderr) +} + +func TestNftablesManagerCompatibilityWithIptablesForEmptyPrefixes(t *testing.T) { + if check() != NFTABLES { + t.Skip("nftables not supported on this system") + } + + if _, err := exec.LookPath("iptables-save"); err != nil { + t.Skipf("iptables-save not available on this system: %v", err) + } + + // First ensure iptables-nft tables exist by running iptables-save + stdout, stderr := runIptablesSave(t) + verifyIptablesOutput(t, stdout, stderr) + + manager, err := Create(ifaceMock, iface.DefaultMTU) + require.NoError(t, err, "failed to create manager") + require.NoError(t, manager.Init(nil)) + + t.Cleanup(func() { + err := manager.Close(nil) + require.NoError(t, err, "failed to reset manager state") + + // Verify iptables output after reset + stdout, stderr := runIptablesSave(t) + verifyIptablesOutput(t, stdout, stderr) + }) + + _, err = manager.AddRouteFiltering( + nil, + []netip.Prefix{}, + fw.Network{Prefix: netip.MustParsePrefix("10.2.0.0/24")}, + fw.ProtocolTCP, + nil, + &fw.Port{Values: []uint16{443}}, + fw.ActionAccept, + ) + require.NoError(t, err, "failed to add route filtering rule") + + stdout, stderr = runIptablesSave(t) + verifyIptablesOutput(t, stdout, stderr) +} + func compareExprsIgnoringCounters(t *testing.T, got, want []expr.Any) { t.Helper() require.Equal(t, len(got), len(want), "expression count mismatch") diff --git a/client/firewall/nftables/router_linux.go b/client/firewall/nftables/router_linux.go index 7f95992da..b6e0cf5b2 100644 --- a/client/firewall/nftables/router_linux.go +++ b/client/firewall/nftables/router_linux.go @@ -48,9 +48,11 @@ const ( // ipTCPHeaderMinSize represents minimum IP (20) + TCP (20) header size for MSS calculation ipTCPHeaderMinSize = 40 -) -const refreshRulesMapError = "refresh rules map: %w" + // maxPrefixesSet 1638 prefixes start to fail, taking some margin + maxPrefixesSet = 1500 + refreshRulesMapError = "refresh rules map: %w" +) var ( errFilterTableNotFound = fmt.Errorf("'filter' table not found") @@ -513,16 +515,35 @@ func (r *router) createIpSet(setName string, input setInput) (*nftables.Set, err } elements := convertPrefixesToSet(prefixes) - if err := r.conn.AddSet(nfset, elements); err != nil { - return nil, fmt.Errorf("error adding elements to set %s: %w", setName, err) - } + nElements := len(elements) + maxElements := maxPrefixesSet * 2 + initialElements := elements[:min(maxElements, nElements)] + + if err := r.conn.AddSet(nfset, initialElements); err != nil { + return nil, fmt.Errorf("error adding set %s: %w", setName, err) + } if err := r.conn.Flush(); err != nil { return nil, fmt.Errorf("flush error: %w", err) } + log.Debugf("Created new ipset: %s with %d initial prefixes (total prefixes %d)", setName, len(initialElements)/2, len(prefixes)) - log.Printf("Created new ipset: %s with %d elements", setName, len(elements)/2) + var subEnd int + for subStart := maxElements; subStart < nElements; subStart += maxElements { + subEnd = min(subStart+maxElements, nElements) + subElement := elements[subStart:subEnd] + nSubPrefixes := len(subElement) / 2 + log.Tracef("Adding new prefixes (%d) in ipset: %s", nSubPrefixes, setName) + if err := r.conn.SetAddElements(nfset, subElement); err != nil { + return nil, fmt.Errorf("error adding prefixes (%d) to set %s: %w", nSubPrefixes, setName, err) + } + if err := r.conn.Flush(); err != nil { + return nil, fmt.Errorf("flush error: %w", err) + } + log.Debugf("Added new prefixes (%d) in ipset: %s", nSubPrefixes, setName) + } + log.Infof("Created new ipset: %s with %d prefixes", setName, len(prefixes)) return nfset, nil } From a8604ef51cb7a9682f693fa4f0dc15fe14513094 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 30 Dec 2025 10:49:43 +0100 Subject: [PATCH 005/374] [management] filter own peer when having a group to peer policy to themself (#4956) --- management/server/types/account.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/management/server/types/account.go b/management/server/types/account.go index c43e0bb57..06170a132 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -1235,7 +1235,11 @@ func (a *Account) getPeerFromResource(resource Resource, peerID string) ([]*nbpe return []*nbpeer.Peer{}, false } - return []*nbpeer.Peer{peer}, resource.ID == peerID + if peer.ID == peerID { + return []*nbpeer.Peer{}, true + } + + return []*nbpeer.Peer{peer}, false } // validatePostureChecksOnPeer validates the posture checks on a peer From 9ed143744228fa5dd0d7adbefe678e0dd1ac4324 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 30 Dec 2025 07:42:34 -0500 Subject: [PATCH 006/374] Add DEX IdP Support (#4949) --- go.mod | 17 +- go.sum | 40 +- .../getting-started-with-dex.sh | 554 ++++++++++++++++++ management/server/idp/dex.go | 445 ++++++++++++++ management/server/idp/dex_test.go | 137 +++++ management/server/idp/idp.go | 30 +- 6 files changed, 1182 insertions(+), 41 deletions(-) create mode 100755 infrastructure_files/getting-started-with-dex.sh create mode 100644 management/server/idp/dex.go create mode 100644 management/server/idp/dex_test.go diff --git a/go.mod b/go.mod index 8f4ec530b..e52f21e55 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 golang.zx2c4.com/wireguard/windows v0.5.3 - google.golang.org/grpc v1.73.0 + google.golang.org/grpc v1.75.0 google.golang.org/protobuf v1.36.8 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -41,6 +41,7 @@ require ( github.com/coder/websocket v1.8.13 github.com/coreos/go-iptables v0.7.0 github.com/creack/pty v1.1.18 + github.com/dexidp/dex/api/v2 v2.4.0 github.com/eko/gocache/lib/v4 v4.2.0 github.com/eko/gocache/store/go_cache/v4 v4.2.2 github.com/eko/gocache/store/redis/v4 v4.2.2 @@ -97,10 +98,10 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 github.com/zcalusic/sysinfo v1.1.3 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 - go.opentelemetry.io/otel v1.35.0 + go.opentelemetry.io/otel v1.37.0 go.opentelemetry.io/otel/exporters/prometheus v0.48.0 - go.opentelemetry.io/otel/metric v1.35.0 - go.opentelemetry.io/otel/sdk/metric v1.35.0 + go.opentelemetry.io/otel/metric v1.37.0 + go.opentelemetry.io/otel/sdk/metric v1.37.0 go.uber.org/mock v0.5.0 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 @@ -124,7 +125,7 @@ require ( require ( cloud.google.com/go/auth v0.3.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect dario.cat/mergo v1.0.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -170,7 +171,7 @@ require ( github.com/fyne-io/oksvg v0.2.0 // indirect github.com/go-gl/gl v0.0.0-20231021071112-07e5d0ea2e71 // indirect github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-sql-driver/mysql v1.8.1 // indirect @@ -248,8 +249,8 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect - go.opentelemetry.io/otel/sdk v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/image v0.33.0 // indirect golang.org/x/text v0.31.0 // indirect diff --git a/go.sum b/go.sum index f10e1e6da..b362d75ee 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9 cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cunicu.li/go-rosenpass v0.4.0 h1:LtPtBgFWY/9emfgC4glKLEqS0MJTylzV6+ChRhiZERw= cunicu.li/go-rosenpass v0.4.0/go.mod h1:MPbjH9nxV4l3vEagKVdFNwHOketqgS5/To1VYJplf/M= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -117,6 +117,8 @@ github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6/go.mod h1:+CauBF6R70J github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dexidp/dex/api/v2 v2.4.0 h1:gNba7n6BKVp8X4Jp24cxYn5rIIGhM6kDOXcZoL6tr9A= +github.com/dexidp/dex/api/v2 v2.4.0/go.mod h1:/p550ADvFFh7K95VmhUD+jgm15VdaNnab9td8DHOpyI= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -164,8 +166,8 @@ github.com/go-gl/gl v0.0.0-20231021071112-07e5d0ea2e71/go.mod h1:9YTyiznxEY1fVin github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a h1:vxnBhFDDT+xzxf1jTJKMKZw3H0swfWk9RpWbBbDK5+0= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -561,22 +563,22 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -761,6 +763,8 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvY golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk= google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -770,8 +774,8 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -779,8 +783,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/infrastructure_files/getting-started-with-dex.sh b/infrastructure_files/getting-started-with-dex.sh new file mode 100755 index 000000000..a14c6134e --- /dev/null +++ b/infrastructure_files/getting-started-with-dex.sh @@ -0,0 +1,554 @@ +#!/bin/bash + +set -e + +# NetBird Getting Started with Dex IDP +# This script sets up NetBird with Dex as the identity provider + +# Sed pattern to strip base64 padding characters +SED_STRIP_PADDING='s/=//g' + +check_docker_compose() { + if command -v docker-compose &> /dev/null + then + echo "docker-compose" + return + fi + if docker compose --help &> /dev/null + then + echo "docker compose" + return + fi + + echo "docker-compose is not installed or not in PATH. Please follow the steps from the official guide: https://docs.docker.com/engine/install/" > /dev/stderr + exit 1 +} + +check_jq() { + if ! command -v jq &> /dev/null + then + echo "jq is not installed or not in PATH, please install with your package manager. e.g. sudo apt install jq" > /dev/stderr + exit 1 + fi + return 0 +} + +get_main_ip_address() { + if [[ "$OSTYPE" == "darwin"* ]]; then + interface=$(route -n get default | grep 'interface:' | awk '{print $2}') + ip_address=$(ifconfig "$interface" | grep 'inet ' | awk '{print $2}') + else + interface=$(ip route | grep default | awk '{print $5}' | head -n 1) + ip_address=$(ip addr show "$interface" | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1) + fi + + echo "$ip_address" + return 0 +} + +check_nb_domain() { + DOMAIN=$1 + if [[ "$DOMAIN-x" == "-x" ]]; then + echo "The NETBIRD_DOMAIN variable cannot be empty." > /dev/stderr + return 1 + fi + + if [[ "$DOMAIN" == "netbird.example.com" ]]; then + echo "The NETBIRD_DOMAIN cannot be netbird.example.com" > /dev/stderr + return 1 + fi + return 0 +} + +read_nb_domain() { + READ_NETBIRD_DOMAIN="" + echo -n "Enter the domain you want to use for NetBird (e.g. netbird.my-domain.com): " > /dev/stderr + read -r READ_NETBIRD_DOMAIN < /dev/tty + if ! check_nb_domain "$READ_NETBIRD_DOMAIN"; then + read_nb_domain + fi + echo "$READ_NETBIRD_DOMAIN" + return 0 +} + +get_turn_external_ip() { + TURN_EXTERNAL_IP_CONFIG="#external-ip=" + IP=$(curl -s -4 https://jsonip.com | jq -r '.ip') + if [[ "x-$IP" != "x-" ]]; then + TURN_EXTERNAL_IP_CONFIG="external-ip=$IP" + fi + echo "$TURN_EXTERNAL_IP_CONFIG" + return 0 +} + +wait_dex() { + set +e + echo -n "Waiting for Dex to become ready (via $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN)" + counter=1 + while true; do + # Check Dex through Caddy proxy (also validates TLS is working) + if curl -sk -f -o /dev/null "$NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/dex/.well-known/openid-configuration" 2>/dev/null; then + break + fi + if [[ $counter -eq 60 ]]; then + echo "" + echo "Taking too long. Checking logs..." + $DOCKER_COMPOSE_COMMAND logs --tail=20 caddy + $DOCKER_COMPOSE_COMMAND logs --tail=20 dex + fi + echo -n " ." + sleep 2 + counter=$((counter + 1)) + done + echo " done" + set -e + return 0 +} + +init_environment() { + CADDY_SECURE_DOMAIN="" + NETBIRD_PORT=80 + NETBIRD_HTTP_PROTOCOL="http" + NETBIRD_RELAY_PROTO="rel" + TURN_USER="self" + TURN_PASSWORD=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") + NETBIRD_RELAY_AUTH_SECRET=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") + TURN_MIN_PORT=49152 + TURN_MAX_PORT=65535 + TURN_EXTERNAL_IP_CONFIG=$(get_turn_external_ip) + + # Generate secrets for Dex + DEX_DASHBOARD_CLIENT_SECRET=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") + + # Generate admin password + NETBIRD_ADMIN_PASSWORD=$(openssl rand -base64 16 | sed "$SED_STRIP_PADDING") + + if ! check_nb_domain "$NETBIRD_DOMAIN"; then + NETBIRD_DOMAIN=$(read_nb_domain) + fi + + if [[ "$NETBIRD_DOMAIN" == "use-ip" ]]; then + NETBIRD_DOMAIN=$(get_main_ip_address) + else + NETBIRD_PORT=443 + CADDY_SECURE_DOMAIN=", $NETBIRD_DOMAIN:$NETBIRD_PORT" + NETBIRD_HTTP_PROTOCOL="https" + NETBIRD_RELAY_PROTO="rels" + fi + + check_jq + + DOCKER_COMPOSE_COMMAND=$(check_docker_compose) + + if [[ -f dex.yaml ]]; then + echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." + echo "You can use the following commands:" + echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" + echo " rm -f docker-compose.yml Caddyfile dex.yaml dashboard.env turnserver.conf management.json relay.env" + echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." + exit 1 + fi + + echo Rendering initial files... + render_docker_compose > docker-compose.yml + render_caddyfile > Caddyfile + render_dex_config > dex.yaml + render_dashboard_env > dashboard.env + render_management_json > management.json + render_turn_server_conf > turnserver.conf + render_relay_env > relay.env + + echo -e "\nStarting Dex IDP\n" + $DOCKER_COMPOSE_COMMAND up -d caddy dex + + # Wait for Dex to be ready (through caddy proxy) + sleep 3 + wait_dex + + echo -e "\nStarting NetBird services\n" + $DOCKER_COMPOSE_COMMAND up -d + + echo -e "\nDone!\n" + echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + echo "" + echo "Login with the following credentials:" + echo "Email: admin@$NETBIRD_DOMAIN" | tee .env + echo "Password: $NETBIRD_ADMIN_PASSWORD" | tee -a .env + echo "" + echo "Dex admin UI is not available (Dex has no built-in UI)." + echo "To add more users, edit dex.yaml and restart: $DOCKER_COMPOSE_COMMAND restart dex" + return 0 +} + +render_caddyfile() { + cat < /dev/null; then + ADMIN_PASSWORD_HASH=$(htpasswd -bnBC 10 "" "$NETBIRD_ADMIN_PASSWORD" | tr -d ':\n') + elif command -v python3 &> /dev/null; then + ADMIN_PASSWORD_HASH=$(python3 -c "import bcrypt; print(bcrypt.hashpw('$NETBIRD_ADMIN_PASSWORD'.encode(), bcrypt.gensalt(rounds=10)).decode())" 2>/dev/null || echo "") + fi + + # Fallback to a known hash if we can't generate one + if [[ -z "$ADMIN_PASSWORD_HASH" ]]; then + # This is hash of "password" - user should change it + ADMIN_PASSWORD_HASH='$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W' + NETBIRD_ADMIN_PASSWORD="password" + echo "Warning: Could not generate password hash. Using default password: password. Please change it in dex.yaml" > /dev/stderr + fi + + cat </dev/null || cat /proc/sys/kernel/random/uuid 2>/dev/null || echo "admin-user-id-001")" + +# Optional: Add external identity provider connectors +# connectors: +# - type: github +# id: github +# name: GitHub +# config: +# clientID: \$GITHUB_CLIENT_ID +# clientSecret: \$GITHUB_CLIENT_SECRET +# redirectURI: $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/dex/callback +# +# - type: ldap +# id: ldap +# name: LDAP +# config: +# host: ldap.example.com:636 +# insecureNoSSL: false +# bindDN: cn=admin,dc=example,dc=com +# bindPW: admin +# userSearch: +# baseDN: ou=users,dc=example,dc=com +# filter: "(objectClass=person)" +# username: uid +# idAttr: uid +# emailAttr: mail +# nameAttr: cn +EOF + return 0 +} + +render_turn_server_conf() { + cat < Date: Tue, 30 Dec 2025 16:41:36 +0000 Subject: [PATCH 007/374] Feat/add support for forcing device auth flow on ios (#4944) * updates to client file writing * numerous * minor * - Align OnLoginSuccess behavior with Android (only call on nil error) - Remove verbose debug logging from WaitToken in device_flow.go - Improve TUN FD=0 fallback comments and warning messages - Document why config save after login differs from Android * Add nolint directive for staticcheck SA1029 in login.go * Fix CodeRabbit review issues for iOS/tvOS SDK - Remove goroutine from OnLoginSuccess callback, invoke synchronously - Stop treating PermissionDenied as success, propagate as permanent error - Replace context.TODO() with bounded timeout context (30s) in RequestAuthInfo - Handle DirectUpdateOrCreateConfig errors in IsLoginRequired and LoginForMobile - Add permission enforcement to DirectUpdateOrCreateConfig for existing configs - Fix variable shadowing in device_ios.go where err was masked by := in else block * Address additional CodeRabbit review issues for iOS/tvOS SDK - Make tunFd == 0 a hard error with exported ErrInvalidTunnelFD (remove dead fallback code) - Apply defaults in ConfigFromJSON to prevent partially-initialized configs - Add nil guards for listener/urlOpener interfaces in public SDK entry points - Reorder config save before OnLoginSuccess to prevent teardown race - Add explanatory comment for urlOpener.Open goroutine * Make urlOpener.Open() synchronous in device auth flow --- client/iface/device/device_ios.go | 26 ++- client/internal/profilemanager/config.go | 83 +++++++++ client/ios/NetBirdSDK/client.go | 102 +++++++++-- client/ios/NetBirdSDK/login.go | 206 +++++++++++++++++++++-- client/ios/NetBirdSDK/preferences.go | 4 +- 5 files changed, 392 insertions(+), 29 deletions(-) diff --git a/client/iface/device/device_ios.go b/client/iface/device/device_ios.go index f96edf992..d841ac2fe 100644 --- a/client/iface/device/device_ios.go +++ b/client/iface/device/device_ios.go @@ -4,6 +4,7 @@ package device import ( + "fmt" "os" log "github.com/sirupsen/logrus" @@ -45,10 +46,31 @@ func NewTunDevice(name string, address wgaddr.Address, port int, key string, mtu } } +// ErrInvalidTunnelFD is returned when the tunnel file descriptor is invalid (0). +// This typically means the Swift code couldn't find the utun control socket. +var ErrInvalidTunnelFD = fmt.Errorf("invalid tunnel file descriptor: fd is 0 (Swift failed to locate utun socket)") + func (t *TunDevice) Create() (WGConfigurer, error) { log.Infof("create tun interface") - dupTunFd, err := unix.Dup(t.tunFd) + var tunDevice tun.Device + var err error + + // Validate the tunnel file descriptor. + // On iOS/tvOS, the FD must be provided by the NEPacketTunnelProvider. + // A value of 0 means the Swift code couldn't find the utun control socket + // (the low-level APIs like ctl_info, sockaddr_ctl may not be exposed in + // tvOS SDK headers). This is a hard error - there's no viable fallback + // since tun.CreateTUN() cannot work within the iOS/tvOS sandbox. + if t.tunFd == 0 { + log.Errorf("Tunnel file descriptor is 0 - Swift code failed to locate the utun control socket. " + + "On tvOS, ensure the NEPacketTunnelProvider is properly configured and the tunnel is started.") + return nil, ErrInvalidTunnelFD + } + + // Normal iOS/tvOS path: use the provided file descriptor from NEPacketTunnelProvider + var dupTunFd int + dupTunFd, err = unix.Dup(t.tunFd) if err != nil { log.Errorf("Unable to dup tun fd: %v", err) return nil, err @@ -60,7 +82,7 @@ func (t *TunDevice) Create() (WGConfigurer, error) { _ = unix.Close(dupTunFd) return nil, err } - tunDevice, err := tun.CreateTUNFromFile(os.NewFile(uintptr(dupTunFd), "/dev/tun"), 0) + tunDevice, err = tun.CreateTUNFromFile(os.NewFile(uintptr(dupTunFd), "/dev/tun"), 0) if err != nil { log.Errorf("Unable to create new tun device from fd: %v", err) _ = unix.Close(dupTunFd) diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index 84ee73902..de4436f19 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -3,6 +3,7 @@ package profilemanager import ( "context" "crypto/tls" + "encoding/json" "fmt" "net/url" "os" @@ -820,3 +821,85 @@ func readConfig(configPath string, createIfMissing bool) (*Config, error) { func WriteOutConfig(path string, config *Config) error { return util.WriteJson(context.Background(), path, config) } + +// DirectWriteOutConfig writes config directly without atomic temp file operations. +// Use this on platforms where atomic writes are blocked (e.g., tvOS sandbox). +func DirectWriteOutConfig(path string, config *Config) error { + return util.DirectWriteJson(context.Background(), path, config) +} + +// DirectUpdateOrCreateConfig is like UpdateOrCreateConfig but uses direct (non-atomic) writes. +// Use this on platforms where atomic writes are blocked (e.g., tvOS sandbox). +func DirectUpdateOrCreateConfig(input ConfigInput) (*Config, error) { + if !fileExists(input.ConfigPath) { + log.Infof("generating new config %s", input.ConfigPath) + cfg, err := createNewConfig(input) + if err != nil { + return nil, err + } + err = util.DirectWriteJson(context.Background(), input.ConfigPath, cfg) + return cfg, err + } + + if isPreSharedKeyHidden(input.PreSharedKey) { + input.PreSharedKey = nil + } + + // Enforce permissions on existing config files (same as UpdateOrCreateConfig) + if err := util.EnforcePermission(input.ConfigPath); err != nil { + log.Errorf("failed to enforce permission on config file: %v", err) + } + + return directUpdate(input) +} + +func directUpdate(input ConfigInput) (*Config, error) { + config := &Config{} + + if _, err := util.ReadJson(input.ConfigPath, config); err != nil { + return nil, err + } + + updated, err := config.apply(input) + if err != nil { + return nil, err + } + + if updated { + if err := util.DirectWriteJson(context.Background(), input.ConfigPath, config); err != nil { + return nil, err + } + } + + return config, nil +} + +// ConfigToJSON serializes a Config struct to a JSON string. +// This is useful for exporting config to alternative storage mechanisms +// (e.g., UserDefaults on tvOS where file writes are blocked). +func ConfigToJSON(config *Config) (string, error) { + bs, err := json.MarshalIndent(config, "", " ") + if err != nil { + return "", err + } + return string(bs), nil +} + +// ConfigFromJSON deserializes a JSON string to a Config struct. +// This is useful for restoring config from alternative storage mechanisms. +// After unmarshaling, defaults are applied to ensure the config is fully initialized. +func ConfigFromJSON(jsonStr string) (*Config, error) { + config := &Config{} + err := json.Unmarshal([]byte(jsonStr), config) + if err != nil { + return nil, err + } + + // Apply defaults to ensure required fields are initialized. + // This mirrors what readConfig does after loading from file. + if _, err := config.apply(ConfigInput{}); err != nil { + return nil, fmt.Errorf("failed to apply defaults to config: %w", err) + } + + return config, nil +} diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index f3458ccea..e901386d9 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -75,6 +75,8 @@ type Client struct { dnsManager dns.IosDnsManager loginComplete bool connectClient *internal.ConnectClient + // preloadedConfig holds config loaded from JSON (used on tvOS where file writes are blocked) + preloadedConfig *profilemanager.Config } // NewClient instantiate a new Client @@ -92,17 +94,44 @@ func NewClient(cfgFile, stateFile, deviceName string, osVersion string, osName s } } +// SetConfigFromJSON loads config from a JSON string into memory. +// This is used on tvOS where file writes to App Group containers are blocked. +// When set, IsLoginRequired() and Run() will use this preloaded config instead of reading from file. +func (c *Client) SetConfigFromJSON(jsonStr string) error { + cfg, err := profilemanager.ConfigFromJSON(jsonStr) + if err != nil { + log.Errorf("SetConfigFromJSON: failed to parse config JSON: %v", err) + return err + } + c.preloadedConfig = cfg + log.Infof("SetConfigFromJSON: config loaded successfully from JSON") + return nil +} + // Run start the internal client. It is a blocker function func (c *Client) Run(fd int32, interfaceName string, envList *EnvList) error { exportEnvList(envList) log.Infof("Starting NetBird client") log.Debugf("Tunnel uses interface: %s", interfaceName) - cfg, err := profilemanager.UpdateOrCreateConfig(profilemanager.ConfigInput{ - ConfigPath: c.cfgFile, - StateFilePath: c.stateFile, - }) - if err != nil { - return err + + var cfg *profilemanager.Config + var err error + + // Use preloaded config if available (tvOS where file writes are blocked) + if c.preloadedConfig != nil { + log.Infof("Run: using preloaded config from memory") + cfg = c.preloadedConfig + } else { + log.Infof("Run: loading config from file") + // Use DirectUpdateOrCreateConfig to avoid atomic file operations (temp file + rename) + // which are blocked by the tvOS sandbox in App Group containers + cfg, err = profilemanager.DirectUpdateOrCreateConfig(profilemanager.ConfigInput{ + ConfigPath: c.cfgFile, + StateFilePath: c.stateFile, + }) + if err != nil { + return err + } } c.recorder.UpdateManagementAddress(cfg.ManagementURL.String()) c.recorder.UpdateRosenpass(cfg.RosenpassEnabled, cfg.RosenpassPermissive) @@ -120,7 +149,7 @@ func (c *Client) Run(fd int32, interfaceName string, envList *EnvList) error { c.ctxCancelLock.Unlock() auth := NewAuthWithConfig(ctx, cfg) - err = auth.Login() + err = auth.LoginSync() if err != nil { return err } @@ -208,14 +237,45 @@ func (c *Client) IsLoginRequired() bool { defer c.ctxCancelLock.Unlock() ctx, c.ctxCancel = context.WithCancel(ctxWithValues) - cfg, _ := profilemanager.UpdateOrCreateConfig(profilemanager.ConfigInput{ - ConfigPath: c.cfgFile, - }) + var cfg *profilemanager.Config + var err error - needsLogin, _ := internal.IsLoginRequired(ctx, cfg) + // Use preloaded config if available (tvOS where file writes are blocked) + if c.preloadedConfig != nil { + log.Infof("IsLoginRequired: using preloaded config from memory") + cfg = c.preloadedConfig + } else { + log.Infof("IsLoginRequired: loading config from file") + // Use DirectUpdateOrCreateConfig to avoid atomic file operations (temp file + rename) + // which are blocked by the tvOS sandbox in App Group containers + cfg, err = profilemanager.DirectUpdateOrCreateConfig(profilemanager.ConfigInput{ + ConfigPath: c.cfgFile, + }) + if err != nil { + log.Errorf("IsLoginRequired: failed to load config: %v", err) + // If we can't load config, assume login is required + return true + } + } + + if cfg == nil { + log.Errorf("IsLoginRequired: config is nil") + return true + } + + needsLogin, err := internal.IsLoginRequired(ctx, cfg) + if err != nil { + log.Errorf("IsLoginRequired: check failed: %v", err) + // If the check fails, assume login is required to be safe + return true + } + log.Infof("IsLoginRequired: needsLogin=%v", needsLogin) return needsLogin } +// loginForMobileAuthTimeout is the timeout for requesting auth info from the server +const loginForMobileAuthTimeout = 30 * time.Second + func (c *Client) LoginForMobile() string { var ctx context.Context //nolint @@ -228,16 +288,26 @@ func (c *Client) LoginForMobile() string { defer c.ctxCancelLock.Unlock() ctx, c.ctxCancel = context.WithCancel(ctxWithValues) - cfg, _ := profilemanager.UpdateOrCreateConfig(profilemanager.ConfigInput{ + // Use DirectUpdateOrCreateConfig to avoid atomic file operations (temp file + rename) + // which are blocked by the tvOS sandbox in App Group containers + cfg, err := profilemanager.DirectUpdateOrCreateConfig(profilemanager.ConfigInput{ ConfigPath: c.cfgFile, }) + if err != nil { + log.Errorf("LoginForMobile: failed to load config: %v", err) + return fmt.Sprintf("failed to load config: %v", err) + } oAuthFlow, err := auth.NewOAuthFlow(ctx, cfg, false, false, "") if err != nil { return err.Error() } - flowInfo, err := oAuthFlow.RequestAuthInfo(context.TODO()) + // Use a bounded timeout for the auth info request to prevent indefinite hangs + authInfoCtx, authInfoCancel := context.WithTimeout(ctx, loginForMobileAuthTimeout) + defer authInfoCancel() + + flowInfo, err := oAuthFlow.RequestAuthInfo(authInfoCtx) if err != nil { return err.Error() } @@ -249,10 +319,14 @@ func (c *Client) LoginForMobile() string { defer cancel() tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) if err != nil { + log.Errorf("LoginForMobile: WaitToken failed: %v", err) return } jwtToken := tokenInfo.GetTokenToUse() - _ = internal.Login(ctx, cfg, "", jwtToken) + if err := internal.Login(ctx, cfg, "", jwtToken); err != nil { + log.Errorf("LoginForMobile: Login failed: %v", err) + return + } c.loginComplete = true }() diff --git a/client/ios/NetBirdSDK/login.go b/client/ios/NetBirdSDK/login.go index 1c2b38a61..27fdcf5ef 100644 --- a/client/ios/NetBirdSDK/login.go +++ b/client/ios/NetBirdSDK/login.go @@ -14,6 +14,7 @@ import ( "github.com/netbirdio/netbird/client/cmd" "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/system" ) @@ -33,7 +34,8 @@ type ErrListener interface { // URLOpener it is a callback interface. The Open function will be triggered if // the backend want to show an url for the user type URLOpener interface { - Open(string) + Open(url string, userCode string) + OnLoginSuccess() } // Auth can register or login new client @@ -72,13 +74,32 @@ func NewAuthWithConfig(ctx context.Context, config *profilemanager.Config) *Auth // SaveConfigIfSSOSupported test the connectivity with the management server by retrieving the server device flow info. // If it returns a flow info than save the configuration and return true. If it gets a codes.NotFound, it means that SSO // is not supported and returns false without saving the configuration. For other errors return false. -func (a *Auth) SaveConfigIfSSOSupported() (bool, error) { +func (a *Auth) SaveConfigIfSSOSupported(listener SSOListener) { + if listener == nil { + log.Errorf("SaveConfigIfSSOSupported: listener is nil") + return + } + go func() { + sso, err := a.saveConfigIfSSOSupported() + if err != nil { + listener.OnError(err) + } else { + listener.OnSuccess(sso) + } + }() +} + +func (a *Auth) saveConfigIfSSOSupported() (bool, error) { supportsSSO := true err := a.withBackOff(a.ctx, func() (err error) { - _, err = internal.GetDeviceAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL) + _, err = internal.GetPKCEAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL, nil) if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { - _, err = internal.GetPKCEAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL, nil) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { + _, err = internal.GetDeviceAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL) + s, ok := gstatus.FromError(err) + if !ok { + return err + } + if s.Code() == codes.NotFound || s.Code() == codes.Unimplemented { supportsSSO = false err = nil } @@ -97,12 +118,29 @@ func (a *Auth) SaveConfigIfSSOSupported() (bool, error) { return false, fmt.Errorf("backoff cycle failed: %v", err) } - err = profilemanager.WriteOutConfig(a.cfgPath, a.config) + // Use DirectWriteOutConfig to avoid atomic file operations (temp file + rename) + // which are blocked by the tvOS sandbox in App Group containers + err = profilemanager.DirectWriteOutConfig(a.cfgPath, a.config) return true, err } // LoginWithSetupKeyAndSaveConfig test the connectivity with the management server with the setup key. -func (a *Auth) LoginWithSetupKeyAndSaveConfig(setupKey string, deviceName string) error { +func (a *Auth) LoginWithSetupKeyAndSaveConfig(resultListener ErrListener, setupKey string, deviceName string) { + if resultListener == nil { + log.Errorf("LoginWithSetupKeyAndSaveConfig: resultListener is nil") + return + } + go func() { + err := a.loginWithSetupKeyAndSaveConfig(setupKey, deviceName) + if err != nil { + resultListener.OnError(err) + } else { + resultListener.OnSuccess() + } + }() +} + +func (a *Auth) loginWithSetupKeyAndSaveConfig(setupKey string, deviceName string) error { //nolint ctxWithValues := context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) @@ -118,10 +156,14 @@ func (a *Auth) LoginWithSetupKeyAndSaveConfig(setupKey string, deviceName string return fmt.Errorf("backoff cycle failed: %v", err) } - return profilemanager.WriteOutConfig(a.cfgPath, a.config) + // Use DirectWriteOutConfig to avoid atomic file operations (temp file + rename) + // which are blocked by the tvOS sandbox in App Group containers + return profilemanager.DirectWriteOutConfig(a.cfgPath, a.config) } -func (a *Auth) Login() error { +// LoginSync performs a synchronous login check without UI interaction +// Used for background VPN connection where user should already be authenticated +func (a *Auth) LoginSync() error { var needsLogin bool // check if we need to generate JWT token @@ -135,23 +177,142 @@ func (a *Auth) Login() error { jwtToken := "" if needsLogin { - return fmt.Errorf("Not authenticated") + return fmt.Errorf("not authenticated") } err = a.withBackOff(a.ctx, func() error { err := internal.Login(a.ctx, a.config, "", jwtToken) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { - return nil + if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) { + // PermissionDenied means registration is required or peer is blocked + return backoff.Permanent(err) } return err }) + if err != nil { + return fmt.Errorf("login failed: %v", err) + } + + return nil +} + +// Login performs interactive login with device authentication support +// Deprecated: Use LoginWithDeviceName instead to ensure proper device naming on tvOS +func (a *Auth) Login(resultListener ErrListener, urlOpener URLOpener, forceDeviceAuth bool) { + // Use empty device name - system will use hostname as fallback + a.LoginWithDeviceName(resultListener, urlOpener, forceDeviceAuth, "") +} + +// LoginWithDeviceName performs interactive login with device authentication support +// The deviceName parameter allows specifying a custom device name (required for tvOS) +func (a *Auth) LoginWithDeviceName(resultListener ErrListener, urlOpener URLOpener, forceDeviceAuth bool, deviceName string) { + if resultListener == nil { + log.Errorf("LoginWithDeviceName: resultListener is nil") + return + } + if urlOpener == nil { + log.Errorf("LoginWithDeviceName: urlOpener is nil") + resultListener.OnError(fmt.Errorf("urlOpener is nil")) + return + } + go func() { + err := a.login(urlOpener, forceDeviceAuth, deviceName) + if err != nil { + resultListener.OnError(err) + } else { + resultListener.OnSuccess() + } + }() +} + +func (a *Auth) login(urlOpener URLOpener, forceDeviceAuth bool, deviceName string) error { + var needsLogin bool + + // Create context with device name if provided + ctx := a.ctx + if deviceName != "" { + //nolint:staticcheck + ctx = context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) + } + + // check if we need to generate JWT token + err := a.withBackOff(ctx, func() (err error) { + needsLogin, err = internal.IsLoginRequired(ctx, a.config) + return + }) if err != nil { return fmt.Errorf("backoff cycle failed: %v", err) } + jwtToken := "" + if needsLogin { + tokenInfo, err := a.foregroundGetTokenInfo(urlOpener, forceDeviceAuth) + if err != nil { + return fmt.Errorf("interactive sso login failed: %v", err) + } + jwtToken = tokenInfo.GetTokenToUse() + } + + err = a.withBackOff(ctx, func() error { + err := internal.Login(ctx, a.config, "", jwtToken) + if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) { + // PermissionDenied means registration is required or peer is blocked + return backoff.Permanent(err) + } + return err + }) + if err != nil { + return fmt.Errorf("login failed: %v", err) + } + + // Save the config before notifying success to ensure persistence completes + // before the callback potentially triggers teardown on the Swift side. + // Note: This differs from Android which doesn't save config after login. + // On iOS/tvOS, we save here because: + // 1. The config may have been modified during login (e.g., new tokens) + // 2. On tvOS, the Network Extension context may be the only place with + // write permissions to the App Group container + if a.cfgPath != "" { + if err := profilemanager.DirectWriteOutConfig(a.cfgPath, a.config); err != nil { + log.Warnf("failed to save config after login: %v", err) + } + } + + // Notify caller of successful login synchronously before returning + urlOpener.OnLoginSuccess() + return nil } +const authInfoRequestTimeout = 30 * time.Second + +func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, forceDeviceAuth bool) (*auth.TokenInfo, error) { + oAuthFlow, err := auth.NewOAuthFlow(a.ctx, a.config, false, forceDeviceAuth, "") + if err != nil { + return nil, err + } + + // Use a bounded timeout for the auth info request to prevent indefinite hangs + authInfoCtx, authInfoCancel := context.WithTimeout(a.ctx, authInfoRequestTimeout) + defer authInfoCancel() + + flowInfo, err := oAuthFlow.RequestAuthInfo(authInfoCtx) + if err != nil { + return nil, fmt.Errorf("getting a request OAuth flow info failed: %v", err) + } + + urlOpener.Open(flowInfo.VerificationURIComplete, flowInfo.UserCode) + + waitTimeout := time.Duration(flowInfo.ExpiresIn) * time.Second + waitCTX, cancel := context.WithTimeout(a.ctx, waitTimeout) + defer cancel() + tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) + if err != nil { + return nil, fmt.Errorf("waiting for browser login failed: %v", err) + } + + return &tokenInfo, nil +} + func (a *Auth) withBackOff(ctx context.Context, bf func() error) error { return backoff.RetryNotify( bf, @@ -160,3 +321,24 @@ func (a *Auth) withBackOff(ctx context.Context, bf func() error) error { log.Warnf("retrying Login to the Management service in %v due to error %v", duration, err) }) } + +// GetConfigJSON returns the current config as a JSON string. +// This can be used by the caller to persist the config via alternative storage +// mechanisms (e.g., UserDefaults on tvOS where file writes are blocked). +func (a *Auth) GetConfigJSON() (string, error) { + if a.config == nil { + return "", fmt.Errorf("no config available") + } + return profilemanager.ConfigToJSON(a.config) +} + +// SetConfigFromJSON loads config from a JSON string. +// This can be used to restore config from alternative storage mechanisms. +func (a *Auth) SetConfigFromJSON(jsonStr string) error { + cfg, err := profilemanager.ConfigFromJSON(jsonStr) + if err != nil { + return err + } + a.config = cfg + return nil +} diff --git a/client/ios/NetBirdSDK/preferences.go b/client/ios/NetBirdSDK/preferences.go index 39ae06538..c26a6decd 100644 --- a/client/ios/NetBirdSDK/preferences.go +++ b/client/ios/NetBirdSDK/preferences.go @@ -112,6 +112,8 @@ func (p *Preferences) GetRosenpassPermissive() (bool, error) { // Commit write out the changes into config file func (p *Preferences) Commit() error { - _, err := profilemanager.UpdateOrCreateConfig(p.configInput) + // Use DirectUpdateOrCreateConfig to avoid atomic file operations (temp file + rename) + // which are blocked by the tvOS sandbox in App Group containers + _, err := profilemanager.DirectUpdateOrCreateConfig(p.configInput) return err } From 2e9c31685289742afd26ec5d9f1ea2ed0b8e5306 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 31 Dec 2025 11:50:43 +0100 Subject: [PATCH 008/374] Fix UI stuck in "Connecting" state when daemon reports "Connected" status. (#5014) The UI can get stuck showing "Connecting" status even after the daemon successfully connects and reports "Connected" status. This occurs because the condition to update the UI to "Connected" state checks the wrong flag. --- client/ui/client_ui.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 87bac8c31..78934ea95 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -909,7 +909,7 @@ func (s *serviceClient) updateStatus() error { var systrayIconState bool switch { - case status.Status == string(internal.StatusConnected) && !s.mUp.Disabled(): + case status.Status == string(internal.StatusConnected) && !s.connected: s.connected = true s.sendNotification = true if s.isUpdateIconActive { From 7ac65bf1adceb7f7f1e2834be3fa1ccc2b2e6399 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 31 Dec 2025 11:53:20 +0100 Subject: [PATCH 009/374] [management] Fix/delete groups without lock (#5012) --- management/internals/shared/grpc/server.go | 3 +++ management/server/group.go | 6 ++++- management/server/store/sql_store.go | 30 +++++++++++++++++++--- management/server/store/sql_store_test.go | 27 +++++++++++++++++++ 4 files changed, 61 insertions(+), 5 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 0b9326fbc..063dda7e4 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -276,6 +276,8 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S unlock() unlock = nil + log.WithContext(ctx).Debugf("Sync took %s", time.Since(reqStart)) + s.syncSem.Add(-1) return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv) @@ -565,6 +567,7 @@ func (s *Server) Login(ctx context.Context, req *proto.EncryptedMessage) (*proto if s.appMetrics != nil { s.appMetrics.GRPCMetrics().CountLoginRequestDuration(time.Since(reqStart), accountID) } + log.WithContext(ctx).Debugf("Login took %s", time.Since(reqStart)) }() if loginReq.GetMeta() == nil { diff --git a/management/server/group.go b/management/server/group.go index 84e641f26..9fc8db120 100644 --- a/management/server/group.go +++ b/management/server/group.go @@ -427,7 +427,7 @@ func (am *DefaultAccountManager) DeleteGroups(ctx context.Context, accountID, us err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { for _, groupID := range groupIDs { - group, err := transaction.GetGroupByID(ctx, store.LockingStrengthUpdate, accountID, groupID) + group, err := transaction.GetGroupByID(ctx, store.LockingStrengthNone, accountID, groupID) if err != nil { allErrors = errors.Join(allErrors, err) continue @@ -442,6 +442,10 @@ func (am *DefaultAccountManager) DeleteGroups(ctx context.Context, accountID, us deletedGroups = append(deletedGroups, group) } + if len(groupIDsToDelete) == 0 { + return allErrors + } + if err = transaction.DeleteGroups(ctx, accountID, groupIDsToDelete); err != nil { return err } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 73565a462..08d19f0d3 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -63,6 +63,8 @@ type SqlStore struct { installationPK int storeEngine types.Engine pool *pgxpool.Pool + + transactionTimeout time.Duration } type installation struct { @@ -84,6 +86,14 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met conns = runtime.NumCPU() } + transactionTimeout := 5 * time.Minute + if v := os.Getenv("NB_STORE_TRANSACTION_TIMEOUT"); v != "" { + if parsed, err := time.ParseDuration(v); err == nil { + transactionTimeout = parsed + } + } + log.WithContext(ctx).Infof("Setting transaction timeout to %v", transactionTimeout) + if storeEngine == types.SqliteStoreEngine { if err == nil { log.WithContext(ctx).Warnf("setting NB_SQL_MAX_OPEN_CONNS is not supported for sqlite, using default value 1") @@ -101,7 +111,7 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met if skipMigration { log.WithContext(ctx).Infof("skipping migration") - return &SqlStore{db: db, storeEngine: storeEngine, metrics: metrics, installationPK: 1}, nil + return &SqlStore{db: db, storeEngine: storeEngine, metrics: metrics, installationPK: 1, transactionTimeout: transactionTimeout}, nil } if err := migratePreAuto(ctx, db); err != nil { @@ -120,7 +130,7 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met return nil, fmt.Errorf("migratePostAuto: %w", err) } - return &SqlStore{db: db, storeEngine: storeEngine, metrics: metrics, installationPK: 1}, nil + return &SqlStore{db: db, storeEngine: storeEngine, metrics: metrics, installationPK: 1, transactionTimeout: transactionTimeout}, nil } func GetKeyQueryCondition(s *SqlStore) string { @@ -2897,8 +2907,11 @@ func (s *SqlStore) IncrementNetworkSerial(ctx context.Context, accountId string) } func (s *SqlStore) ExecuteInTransaction(ctx context.Context, operation func(store Store) error) error { + timeoutCtx, cancel := context.WithTimeout(context.Background(), s.transactionTimeout) + defer cancel() + startTime := time.Now() - tx := s.db.Begin() + tx := s.db.WithContext(timeoutCtx).Begin() if tx.Error != nil { return tx.Error } @@ -2933,6 +2946,9 @@ func (s *SqlStore) ExecuteInTransaction(ctx context.Context, operation func(stor err := operation(repo) if err != nil { tx.Rollback() + if errors.Is(err, context.DeadlineExceeded) || errors.Is(timeoutCtx.Err(), context.DeadlineExceeded) { + log.WithContext(ctx).Warnf("transaction exceeded %s timeout after %v, stack: %s", s.transactionTimeout, time.Since(startTime), debug.Stack()) + } return err } @@ -2945,13 +2961,19 @@ func (s *SqlStore) ExecuteInTransaction(ctx context.Context, operation func(stor } err = tx.Commit().Error + if err != nil { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(timeoutCtx.Err(), context.DeadlineExceeded) { + log.WithContext(ctx).Warnf("transaction commit exceeded %s timeout after %v, stack: %s", s.transactionTimeout, time.Since(startTime), debug.Stack()) + } + return err + } log.WithContext(ctx).Tracef("transaction took %v", time.Since(startTime)) if s.metrics != nil { s.metrics.StoreMetrics().CountTransactionDuration(time.Since(startTime)) } - return err + return nil } func (s *SqlStore) withTx(tx *gorm.DB) Store { diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index d63d624de..714927a5a 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -3857,3 +3857,30 @@ func TestSqlStore_ApproveAccountPeers(t *testing.T) { }) }) } + +func TestSqlStore_ExecuteInTransaction_Timeout(t *testing.T) { + if os.Getenv("NETBIRD_STORE_ENGINE") == "mysql" { + t.Skip("Skipping timeout test for MySQL") + } + + t.Setenv("NB_STORE_TRANSACTION_TIMEOUT", "1s") + + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "", t.TempDir()) + require.NoError(t, err) + t.Cleanup(cleanup) + + sqlStore, ok := store.(*SqlStore) + require.True(t, ok) + assert.Equal(t, 1*time.Second, sqlStore.transactionTimeout) + + ctx := context.Background() + err = sqlStore.ExecuteInTransaction(ctx, func(transaction Store) error { + // Sleep for 2 seconds to exceed the 1 second timeout + time.Sleep(2 * time.Second) + return nil + }) + + // The transaction should fail with an error (either timeout or already rolled back) + require.Error(t, err) + assert.Contains(t, err.Error(), "transaction has already been committed or rolled back", "expected transaction rolled back error, got: %v", err) +} From 9ba067391fc8e5a35ad20c9a5f6b03178ab60eb2 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Sat, 3 Jan 2026 09:10:02 +0100 Subject: [PATCH 010/374] [client] Fix semaphore slot leaks (#5018) - Remove WaitGroup, make SemaphoreGroup a pure semaphore - Make Add() return error instead of silently failing on context cancel - Remove context parameter from Done() to prevent slot leaks - Fix missing Done() call in conn.go error path --- client/internal/peer/conn.go | 9 +- util/semaphore-group/semaphore_group.go | 29 +---- util/semaphore-group/semaphore_group_test.go | 128 +++++++++++-------- 3 files changed, 89 insertions(+), 77 deletions(-) diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 426c31e1a..20a2eb342 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -148,13 +148,15 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { // It will try to establish a connection using ICE and in parallel with relay. The higher priority connection type will // be used. func (conn *Conn) Open(engineCtx context.Context) error { - conn.semaphore.Add(engineCtx) + if err := conn.semaphore.Add(engineCtx); err != nil { + return err + } conn.mu.Lock() defer conn.mu.Unlock() if conn.opened { - conn.semaphore.Done(engineCtx) + conn.semaphore.Done() return nil } @@ -165,6 +167,7 @@ func (conn *Conn) Open(engineCtx context.Context) error { relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) if err != nil { + conn.semaphore.Done() return err } conn.workerICE = workerICE @@ -200,7 +203,7 @@ func (conn *Conn) Open(engineCtx context.Context) error { defer conn.wg.Done() conn.waitInitialRandomSleepTime(conn.ctx) - conn.semaphore.Done(conn.ctx) + conn.semaphore.Done() conn.guard.Start(conn.ctx, conn.onGuardEvent) }() diff --git a/util/semaphore-group/semaphore_group.go b/util/semaphore-group/semaphore_group.go index ad74e1bfc..462300672 100644 --- a/util/semaphore-group/semaphore_group.go +++ b/util/semaphore-group/semaphore_group.go @@ -2,12 +2,10 @@ package semaphoregroup import ( "context" - "sync" ) // SemaphoreGroup is a custom type that combines sync.WaitGroup and a semaphore. type SemaphoreGroup struct { - waitGroup sync.WaitGroup semaphore chan struct{} } @@ -18,31 +16,18 @@ func NewSemaphoreGroup(limit int) *SemaphoreGroup { } } -// Add increments the internal WaitGroup counter and acquires a semaphore slot. -func (sg *SemaphoreGroup) Add(ctx context.Context) { - sg.waitGroup.Add(1) - +// Add acquire a slot +func (sg *SemaphoreGroup) Add(ctx context.Context) error { // Acquire semaphore slot select { case <-ctx.Done(): - return + return ctx.Err() case sg.semaphore <- struct{}{}: + return nil } } -// Done decrements the internal WaitGroup counter and releases a semaphore slot. -func (sg *SemaphoreGroup) Done(ctx context.Context) { - sg.waitGroup.Done() - - // Release semaphore slot - select { - case <-ctx.Done(): - return - case <-sg.semaphore: - } -} - -// Wait waits until the internal WaitGroup counter is zero. -func (sg *SemaphoreGroup) Wait() { - sg.waitGroup.Wait() +// Done releases a slot. Must be called after a successful Add. +func (sg *SemaphoreGroup) Done() { + <-sg.semaphore } diff --git a/util/semaphore-group/semaphore_group_test.go b/util/semaphore-group/semaphore_group_test.go index d4491cf77..9406da4a0 100644 --- a/util/semaphore-group/semaphore_group_test.go +++ b/util/semaphore-group/semaphore_group_test.go @@ -2,65 +2,89 @@ package semaphoregroup import ( "context" + "sync" "testing" "time" ) func TestSemaphoreGroup(t *testing.T) { - semGroup := NewSemaphoreGroup(2) - - for i := 0; i < 5; i++ { - semGroup.Add(context.Background()) - go func(id int) { - defer semGroup.Done(context.Background()) - - got := len(semGroup.semaphore) - if got == 0 { - t.Errorf("Expected semaphore length > 0 , got 0") - } - - time.Sleep(time.Millisecond) - t.Logf("Goroutine %d is running\n", id) - }(i) - } - - semGroup.Wait() - - want := 0 - got := len(semGroup.semaphore) - if got != want { - t.Errorf("Expected semaphore length %d, got %d", want, got) - } -} - -func TestSemaphoreGroupContext(t *testing.T) { semGroup := NewSemaphoreGroup(1) - semGroup.Add(context.Background()) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + _ = semGroup.Add(context.Background()) + + ctxTimeout, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) t.Cleanup(cancel) - rChan := make(chan struct{}) - go func() { - semGroup.Add(ctx) - rChan <- struct{}{} - }() - select { - case <-rChan: - case <-time.NewTimer(2 * time.Second).C: - t.Error("Adding to semaphore group should not block when context is not done") - } - - semGroup.Done(context.Background()) - - ctxDone, cancelDone := context.WithTimeout(context.Background(), 1*time.Second) - t.Cleanup(cancelDone) - go func() { - semGroup.Done(ctxDone) - rChan <- struct{}{} - }() - select { - case <-rChan: - case <-time.NewTimer(2 * time.Second).C: - t.Error("Releasing from semaphore group should not block when context is not done") + if err := semGroup.Add(ctxTimeout); err == nil { + t.Error("Adding to semaphore group should not block") + } +} + +func TestSemaphoreGroupFreeUp(t *testing.T) { + semGroup := NewSemaphoreGroup(1) + _ = semGroup.Add(context.Background()) + semGroup.Done() + + ctxTimeout, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + t.Cleanup(cancel) + if err := semGroup.Add(ctxTimeout); err != nil { + t.Error(err) + } +} + +func TestSemaphoreGroupCanceledContext(t *testing.T) { + semGroup := NewSemaphoreGroup(1) + _ = semGroup.Add(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + if err := semGroup.Add(ctx); err == nil { + t.Error("Add should return error when context is already canceled") + } +} + +func TestSemaphoreGroupCancelWhileWaiting(t *testing.T) { + semGroup := NewSemaphoreGroup(1) + _ = semGroup.Add(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + errChan := make(chan error, 1) + + go func() { + errChan <- semGroup.Add(ctx) + }() + + time.Sleep(10 * time.Millisecond) + cancel() + + if err := <-errChan; err == nil { + t.Error("Add should return error when context is canceled while waiting") + } +} + +func TestSemaphoreGroupHighConcurrency(t *testing.T) { + const limit = 10 + const numGoroutines = 100 + + semGroup := NewSemaphoreGroup(limit) + var wg sync.WaitGroup + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := semGroup.Add(context.Background()); err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + time.Sleep(time.Millisecond) + semGroup.Done() + }() + } + + wg.Wait() + + // Verify all slots were released + if got := len(semGroup.semaphore); got != 0 { + t.Errorf("Expected semaphore to be empty, got %d slots occupied", got) } } From 80a312cc9cf98f4d98cd25c8f9cb584ce5dc4377 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Sat, 3 Jan 2026 13:32:41 +0300 Subject: [PATCH 011/374] [client] add verbose flag for free ad tests (#5021) add verbose flag for free ad tests --- .github/workflows/golang-test-freebsd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golang-test-freebsd.yml b/.github/workflows/golang-test-freebsd.yml index b03313bbd..0d19e8a19 100644 --- a/.github/workflows/golang-test-freebsd.yml +++ b/.github/workflows/golang-test-freebsd.yml @@ -39,7 +39,7 @@ jobs: # check all component except management, since we do not support management server on freebsd time go test -timeout 1m -failfast ./base62/... # NOTE: without -p1 `client/internal/dns` will fail because of `listen udp4 :33100: bind: address already in use` - time go test -timeout 8m -failfast -p 1 ./client/... + time go test -timeout 8m -failfast -v -p 1 ./client/... time go test -timeout 1m -failfast ./dns/... time go test -timeout 1m -failfast ./encryption/... time go test -timeout 1m -failfast ./formatter/... From 08b782d6ba6b05da04923e95f6f0931fda51bb12 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Sat, 3 Jan 2026 18:05:38 +0100 Subject: [PATCH 012/374] [client] Fix update download url (#5023) --- .../internal/updatemanager/installer/installer_run_darwin.go | 2 +- .../internal/updatemanager/installer/installer_run_windows.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/client/internal/updatemanager/installer/installer_run_darwin.go b/client/internal/updatemanager/installer/installer_run_darwin.go index 462e2c227..248a404aa 100644 --- a/client/internal/updatemanager/installer/installer_run_darwin.go +++ b/client/internal/updatemanager/installer/installer_run_darwin.go @@ -22,7 +22,7 @@ const ( defaultTempDir = "/var/lib/netbird/tmp-install" - pkgDownloadURL = "https://github.com/mlsmaycon/netbird/releases/download/v%version/netbird_%version_darwin_%arch.pkg" + pkgDownloadURL = "https://github.com/netbirdio/netbird/releases/download/v%version/netbird_%version_darwin_%arch.pkg" ) var ( diff --git a/client/internal/updatemanager/installer/installer_run_windows.go b/client/internal/updatemanager/installer/installer_run_windows.go index 353cd885d..70c7e32cf 100644 --- a/client/internal/updatemanager/installer/installer_run_windows.go +++ b/client/internal/updatemanager/installer/installer_run_windows.go @@ -22,8 +22,8 @@ const ( msiLogFile = "msi.log" - msiDownloadURL = "https://github.com/mlsmaycon/netbird/releases/download/v%version/netbird_installer_%version_windows_%arch.msi" - exeDownloadURL = "https://github.com/mlsmaycon/netbird/releases/download/v%version/netbird_installer_%version_windows_%arch.exe" + msiDownloadURL = "https://github.com/netbirdio/netbird/releases/download/v%version/netbird_installer_%version_windows_%arch.msi" + exeDownloadURL = "https://github.com/netbirdio/netbird/releases/download/v%version/netbird_installer_%version_windows_%arch.exe" ) var ( From 07856f516cbc6498edeb90acd6202ec2776fb3db Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 5 Jan 2026 15:53:17 +0300 Subject: [PATCH 013/374] [client] Fix/stuck connecting when can't access api.netbird.io (#5033) - Connect on daemon start only if the file existed before - fixed a bug that happened when the default profile config was removed, which would recreate it and reset the active profile to the default. --- .../networkmonitor/check_change_darwin.go | 1 - client/internal/profilemanager/config.go | 2 +- client/internal/profilemanager/service.go | 8 -- client/server/server.go | 80 +++++++------------ 4 files changed, 29 insertions(+), 62 deletions(-) diff --git a/client/internal/networkmonitor/check_change_darwin.go b/client/internal/networkmonitor/check_change_darwin.go index ddc6e1736..cb5236070 100644 --- a/client/internal/networkmonitor/check_change_darwin.go +++ b/client/internal/networkmonitor/check_change_darwin.go @@ -110,7 +110,6 @@ func wakeUpListen(ctx context.Context) { } if newHash == initialHash { - log.Tracef("no wakeup detected") continue } diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index de4436f19..f2fda84e0 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -685,7 +685,7 @@ func update(input ConfigInput) (*Config, error) { return config, nil } -// GetConfig read config file and return with Config. Errors out if it does not exist +// GetConfig read config file and return with Config and if it was created. Errors out if it does not exist func GetConfig(configPath string) (*Config, error) { return readConfig(configPath, false) } diff --git a/client/internal/profilemanager/service.go b/client/internal/profilemanager/service.go index 5a0c14000..bdb722c67 100644 --- a/client/internal/profilemanager/service.go +++ b/client/internal/profilemanager/service.go @@ -126,14 +126,6 @@ func (s *ServiceManager) CopyDefaultProfileIfNotExists() (bool, error) { log.Warnf("failed to set permissions for default profile: %v", err) } - if err := s.SetActiveProfileState(&ActiveProfileState{ - Name: "default", - Username: "", - }); err != nil { - log.Errorf("failed to set active profile state: %v", err) - return false, fmt.Errorf("failed to set active profile state: %w", err) - } - return true, nil } diff --git a/client/server/server.go b/client/server/server.go index 99da4e36f..35ac04381 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -145,10 +145,10 @@ func (s *Server) Start() error { ctx, cancel := context.WithCancel(s.rootCtx) s.actCancel = cancel - // set the default config if not exists - if err := s.setDefaultConfigIfNotExists(ctx); err != nil { - log.Errorf("failed to set default config: %v", err) - return fmt.Errorf("failed to set default config: %w", err) + // copy old default config + _, err = s.profileManager.CopyDefaultProfileIfNotExists() + if err != nil && !errors.Is(err, profilemanager.ErrorOldDefaultConfigNotFound) { + return err } activeProf, err := s.profileManager.GetActiveProfileState() @@ -156,23 +156,11 @@ func (s *Server) Start() error { return fmt.Errorf("failed to get active profile state: %w", err) } - config, err := s.getConfig(activeProf) + config, existingConfig, err := s.getConfig(activeProf) if err != nil { log.Errorf("failed to get active profile config: %v", err) - if err := s.profileManager.SetActiveProfileState(&profilemanager.ActiveProfileState{ - Name: "default", - Username: "", - }); err != nil { - log.Errorf("failed to set active profile state: %v", err) - return fmt.Errorf("failed to set active profile state: %w", err) - } - - config, err = profilemanager.GetConfig(s.profileManager.DefaultProfilePath()) - if err != nil { - log.Errorf("failed to get default profile config: %v", err) - return fmt.Errorf("failed to get default profile config: %w", err) - } + return err } s.config = config @@ -186,6 +174,13 @@ func (s *Server) Start() error { } if config.DisableAutoConnect { + state.Set(internal.StatusIdle) + return nil + } + + if !existingConfig { + log.Warnf("not trying to connect when configuration was just created") + state.Set(internal.StatusNeedsLogin) return nil } @@ -196,30 +191,6 @@ func (s *Server) Start() error { return nil } -func (s *Server) setDefaultConfigIfNotExists(ctx context.Context) error { - ok, err := s.profileManager.CopyDefaultProfileIfNotExists() - if err != nil { - if err := s.profileManager.CreateDefaultProfile(); err != nil { - log.Errorf("failed to create default profile: %v", err) - return fmt.Errorf("failed to create default profile: %w", err) - } - - if err := s.profileManager.SetActiveProfileState(&profilemanager.ActiveProfileState{ - Name: "default", - Username: "", - }); err != nil { - log.Errorf("failed to set active profile state: %v", err) - return fmt.Errorf("failed to set active profile state: %w", err) - } - } - if ok { - state := internal.CtxGetState(ctx) - state.Set(internal.StatusNeedsLogin) - } - - return nil -} - // connectWithRetryRuns runs the client connection with a backoff strategy where we retry the operation as additional // mechanism to keep the client connected even when the connection is lost. // we cancel retry if the client receive a stop or down command, or if disable auto connect is configured. @@ -487,7 +458,7 @@ func (s *Server) Login(callerCtx context.Context, msg *proto.LoginRequest) (*pro s.mutex.Unlock() - config, err := s.getConfig(activeProf) + config, _, err := s.getConfig(activeProf) if err != nil { log.Errorf("failed to get active profile config: %v", err) return nil, fmt.Errorf("failed to get active profile config: %w", err) @@ -716,7 +687,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR log.Infof("active profile: %s for %s", activeProf.Name, activeProf.Username) - config, err := s.getConfig(activeProf) + config, _, err := s.getConfig(activeProf) if err != nil { log.Errorf("failed to get active profile config: %v", err) return nil, fmt.Errorf("failed to get active profile config: %w", err) @@ -811,7 +782,7 @@ func (s *Server) SwitchProfile(callerCtx context.Context, msg *proto.SwitchProfi log.Errorf("failed to get active profile state: %v", err) return nil, fmt.Errorf("failed to get active profile state: %w", err) } - config, err := s.getConfig(activeProf) + config, _, err := s.getConfig(activeProf) if err != nil { log.Errorf("failed to get default profile config: %v", err) return nil, fmt.Errorf("failed to get default profile config: %w", err) @@ -908,7 +879,7 @@ func (s *Server) handleActiveProfileLogout(ctx context.Context) (*proto.LogoutRe return nil, gstatus.Errorf(codes.FailedPrecondition, "failed to get active profile state: %v", err) } - config, err := s.getConfig(activeProf) + config, _, err := s.getConfig(activeProf) if err != nil { return nil, gstatus.Errorf(codes.FailedPrecondition, "not logged in") } @@ -932,19 +903,24 @@ func (s *Server) handleActiveProfileLogout(ctx context.Context) (*proto.LogoutRe return &proto.LogoutResponse{}, nil } -// getConfig loads the config from the active profile -func (s *Server) getConfig(activeProf *profilemanager.ActiveProfileState) (*profilemanager.Config, error) { +// GetConfig reads config file and returns Config and whether the config file already existed. Errors out if it does not exist +func (s *Server) getConfig(activeProf *profilemanager.ActiveProfileState) (*profilemanager.Config, bool, error) { cfgPath, err := activeProf.FilePath() if err != nil { - return nil, fmt.Errorf("failed to get active profile file path: %w", err) + return nil, false, fmt.Errorf("failed to get active profile file path: %w", err) } - config, err := profilemanager.GetConfig(cfgPath) + _, err = os.Stat(cfgPath) + configExisted := !os.IsNotExist(err) + + log.Infof("active profile config existed: %t, err %v", configExisted, err) + + config, err := profilemanager.ReadConfig(cfgPath) if err != nil { - return nil, fmt.Errorf("failed to get config: %w", err) + return nil, false, fmt.Errorf("failed to get config: %w", err) } - return config, nil + return config, configExisted, nil } func (s *Server) canRemoveProfile(profileName string) error { From 7bb4fc3450f9ce36aad58205592790a85e28024b Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Mon, 5 Jan 2026 20:55:22 +0300 Subject: [PATCH 014/374] [management] Refactor integrated peer validator (#5035) --- management/server/peer.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/management/server/peer.go b/management/server/peer.go index 7c48a8052..8d0e18171 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -663,11 +663,10 @@ func getPeerIPDNSLabel(ip net.IP, peerHostName string) (string, error) { // SyncPeer checks whether peer is eligible for receiving NetworkMap (authenticated) and returns its NetworkMap if eligible func (am *DefaultAccountManager) SyncPeer(ctx context.Context, sync types.PeerSync, accountID string) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) { var peer *nbpeer.Peer - var peerNotValid bool - var isStatusChanged bool var updated, versionChanged bool var err error var postureChecks []*posture.Checks + var peerGroupIDs []string settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) if err != nil { @@ -695,12 +694,7 @@ func (am *DefaultAccountManager) SyncPeer(ctx context.Context, sync types.PeerSy return status.NewPeerLoginExpiredError() } - peerGroupIDs, err := getPeerGroupIDs(ctx, transaction, accountID, peer.ID) - if err != nil { - return err - } - - peerNotValid, isStatusChanged, err = am.integratedPeerValidator.IsNotValidPeer(ctx, accountID, peer, peerGroupIDs, settings.Extra) + peerGroupIDs, err = getPeerGroupIDs(ctx, transaction, accountID, peer.ID) if err != nil { return err } @@ -724,6 +718,11 @@ func (am *DefaultAccountManager) SyncPeer(ctx context.Context, sync types.PeerSy return nil, nil, nil, 0, err } + peerNotValid, isStatusChanged, err := am.integratedPeerValidator.IsNotValidPeer(ctx, accountID, peer, peerGroupIDs, settings.Extra) + if err != nil { + return nil, nil, nil, 0, err + } + if isStatusChanged || sync.UpdateAccountPeers || (updated && (len(postureChecks) > 0 || versionChanged)) { err = am.networkMapController.OnPeersUpdated(ctx, accountID, []string{peer.ID}) if err != nil { @@ -773,10 +772,9 @@ func (am *DefaultAccountManager) LoginPeer(ctx context.Context, login types.Peer var peer *nbpeer.Peer var updateRemotePeers bool - var isRequiresApproval bool - var isStatusChanged bool var isPeerUpdated bool var postureChecks []*posture.Checks + var peerGroupIDs []string settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) if err != nil { @@ -809,12 +807,7 @@ func (am *DefaultAccountManager) LoginPeer(ctx context.Context, login types.Peer } } - peerGroupIDs, err := getPeerGroupIDs(ctx, transaction, accountID, peer.ID) - if err != nil { - return err - } - - isRequiresApproval, isStatusChanged, err = am.integratedPeerValidator.IsNotValidPeer(ctx, accountID, peer, peerGroupIDs, settings.Extra) + peerGroupIDs, err = getPeerGroupIDs(ctx, transaction, accountID, peer.ID) if err != nil { return err } @@ -852,6 +845,11 @@ func (am *DefaultAccountManager) LoginPeer(ctx context.Context, login types.Peer return nil, nil, nil, err } + isRequiresApproval, isStatusChanged, err := am.integratedPeerValidator.IsNotValidPeer(ctx, accountID, peer, peerGroupIDs, settings.Extra) + if err != nil { + return nil, nil, nil, err + } + if updateRemotePeers || isStatusChanged || (isPeerUpdated && len(postureChecks) > 0) { err = am.networkMapController.OnPeersUpdated(ctx, accountID, []string{peer.ID}) if err != nil { From f022e34287a65eb94771609faaf1da865a4d9eba Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 6 Jan 2026 10:52:36 +0100 Subject: [PATCH 015/374] [shared] allow setting a user agent for the rest client (#5037) --- shared/management/client/rest/client.go | 4 ++ shared/management/client/rest/client_test.go | 51 ++++++++++++++++++++ shared/management/client/rest/options.go | 7 +++ 3 files changed, 62 insertions(+) diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index 2a5de5bbc..4d1de2631 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -16,6 +16,7 @@ type Client struct { managementURL string authHeader string httpClient HttpClient + userAgent string // Accounts NetBird account APIs // see more: https://docs.netbird.io/api/resources/accounts @@ -128,6 +129,9 @@ func (c *Client) NewRequest(ctx context.Context, method, path string, body io.Re if body != nil { req.Header.Add("Content-Type", "application/json") } + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } if len(query) != 0 { q := req.URL.Query() diff --git a/shared/management/client/rest/client_test.go b/shared/management/client/rest/client_test.go index 54a0290d0..17df8dd8b 100644 --- a/shared/management/client/rest/client_test.go +++ b/shared/management/client/rest/client_test.go @@ -4,10 +4,14 @@ package rest_test import ( + "context" "net/http" "net/http/httptest" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" "github.com/netbirdio/netbird/shared/management/client/rest" ) @@ -32,3 +36,50 @@ func withBlackBoxServer(t *testing.T, callback func(*rest.Client)) { c := rest.New(server.URL, "nbp_apTmlmUXHSC4PKmHwtIZNaGr8eqcVI2gMURp") callback(c) } + +func TestClient_UserAgent_Set(t *testing.T) { + expectedUserAgent := "TestApp/1.2.3" + mux := &http.ServeMux{} + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/api/accounts", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, expectedUserAgent, r.Header.Get("User-Agent")) + w.WriteHeader(200) + _, err := w.Write([]byte("[]")) + require.NoError(t, err) + }) + + c := rest.NewWithOptions( + rest.WithManagementURL(server.URL), + rest.WithPAT("test-token"), + rest.WithUserAgent(expectedUserAgent), + ) + + _, err := c.Accounts.List(context.Background()) + require.NoError(t, err) +} + +func TestClient_UserAgent_NotSet(t *testing.T) { + mux := &http.ServeMux{} + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/api/accounts", func(w http.ResponseWriter, r *http.Request) { + // When no custom user agent is set, Go's default HTTP client will set one + // We just verify that the header exists (it will be Go's default) + userAgent := r.Header.Get("User-Agent") + assert.NotEmpty(t, userAgent) + w.WriteHeader(200) + _, err := w.Write([]byte("[]")) + require.NoError(t, err) + }) + + c := rest.NewWithOptions( + rest.WithManagementURL(server.URL), + rest.WithPAT("test-token"), + ) + + _, err := c.Accounts.List(context.Background()) + require.NoError(t, err) +} diff --git a/shared/management/client/rest/options.go b/shared/management/client/rest/options.go index 21f2394e9..17c7e15cd 100644 --- a/shared/management/client/rest/options.go +++ b/shared/management/client/rest/options.go @@ -42,3 +42,10 @@ func WithAuthHeader(value string) option { c.authHeader = value } } + +// WithUserAgent sets a custom User-Agent header for HTTP requests +func WithUserAgent(userAgent string) option { + return func(c *Client) { + c.userAgent = userAgent + } +} From 9bd578d4ea98e93ee409c7f093934898d42208d7 Mon Sep 17 00:00:00 2001 From: Dennis Schridde <63082+devurandom@users.noreply.github.com> Date: Tue, 6 Jan 2026 11:36:19 +0100 Subject: [PATCH 016/374] Fix ui-post-install.sh to use the full username (#4809) Fixes #4808 by extracting the full username by: - Get PID using pgrep - Get UID from PID using /proc/${PID}/loginuid - Get user name from UID using id Also replaces "complex" pipe from ps to sed with a (hopefully) "simpler" (as in requiring less knowledge about the arguments of ps and regexps) invocation of cat and id. --- release_files/ui-post-install.sh | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/release_files/ui-post-install.sh b/release_files/ui-post-install.sh index f6e8ddf92..ff6c4ee9b 100644 --- a/release_files/ui-post-install.sh +++ b/release_files/ui-post-install.sh @@ -1,10 +1,15 @@ #!/bin/sh +set -e +set -u + # Check if netbird-ui is running -if pgrep -x -f /usr/bin/netbird-ui >/dev/null 2>&1; +pid="$(pgrep -x -f /usr/bin/netbird-ui || true)" +if [ -n "${pid}" ] then - runner=$(ps --no-headers -o '%U' -p $(pgrep -x -f /usr/bin/netbird-ui) | sed 's/^[ \t]*//;s/[ \t]*$//') + uid="$(cat /proc/"${pid}"/loginuid)" + username="$(id -nu "${uid}")" # Only re-run if it was already running pkill -x -f /usr/bin/netbird-ui >/dev/null 2>&1 - su -l - "$runner" -c 'nohup /usr/bin/netbird-ui > /dev/null 2>&1 &' + su - "${username}" -c 'nohup /usr/bin/netbird-ui > /dev/null 2>&1 &' fi From 7142d45ef3035f4e4953e68cb19ba6f8e5206252 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Tue, 6 Jan 2026 19:25:55 +0100 Subject: [PATCH 017/374] [management] network map builder concurrent batch processing for peer updates (#5040) --- .../network_map/controller/controller.go | 32 +- management/server/types/holder.go | 4 + management/server/types/networkmap.go | 11 +- .../server/types/networkmap_golden_test.go | 96 ++++- management/server/types/networkmapbuilder.go | 335 ++++++++++++++---- management/server/user.go | 6 + 6 files changed, 390 insertions(+), 94 deletions(-) diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index df16e1922..7f0f9bd4b 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -447,7 +447,9 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr if c.experimentalNetworkMap(accountID) { networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, customZone, c.accountManagerMetrics) } else { - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, customZone, approvedPeersMap, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), c.accountManagerMetrics, account.GetActiveGroupUsers()) + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + networkMap = account.GetPeerNetworkMap(ctx, peer.ID, customZone, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, account.GetActiveGroupUsers()) } proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] @@ -480,12 +482,13 @@ func (c *Controller) getPeerNetworkMapExp( Network: &types.Network{}, } } + return account.GetPeerNetworkMapExp(ctx, peerId, customZone, validatedPeers, metrics) } -func (c *Controller) onPeerAddedUpdNetworkMapCache(account *types.Account, peerId string) error { +func (c *Controller) onPeersAddedUpdNetworkMapCache(account *types.Account, peerIds ...string) { c.enrichAccountFromHolder(account) - return account.OnPeerAddedUpdNetworkMapCache(peerId) + account.OnPeersAddedUpdNetworkMapCache(peerIds...) } func (c *Controller) onPeerDeletedUpdNetworkMapCache(account *types.Account, peerId string) error { @@ -537,7 +540,6 @@ func (c *Controller) enrichAccountFromHolder(account *types.Account) { if account.NetworkMapCache == nil { return } - account.NetworkMapCache.UpdateAccountPointer(account) c.holder.AddAccount(account) } @@ -715,18 +717,14 @@ func (c *Controller) OnPeersUpdated(ctx context.Context, accountID string, peerI } func (c *Controller) OnPeersAdded(ctx context.Context, accountID string, peerIDs []string) error { - for _, peerID := range peerIDs { - if c.experimentalNetworkMap(accountID) { - account, err := c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) - if err != nil { - return err - } - - err = c.onPeerAddedUpdNetworkMapCache(account, peerID) - if err != nil { - return err - } + log.WithContext(ctx).Debugf("OnPeersAdded call to add peers: %v", peerIDs) + if c.experimentalNetworkMap(accountID) { + account, err := c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) + if err != nil { + return err } + log.WithContext(ctx).Debugf("peers are ready to be added to networkmap cache: %v", peerIDs) + c.onPeersAddedUpdNetworkMapCache(account, peerIDs...) } return c.bufferSendUpdateAccountPeers(ctx, accountID) } @@ -813,7 +811,9 @@ func (c *Controller) GetNetworkMap(ctx context.Context, peerID string) (*types.N if c.experimentalNetworkMap(peer.AccountID) { networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peerID, validatedPeers, customZone, nil) } else { - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, customZone, validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + networkMap = account.GetPeerNetworkMap(ctx, peer.ID, customZone, validatedPeers, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) } proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] diff --git a/management/server/types/holder.go b/management/server/types/holder.go index 3996db2b6..ad7d07522 100644 --- a/management/server/types/holder.go +++ b/management/server/types/holder.go @@ -25,6 +25,10 @@ func (h *Holder) GetAccount(id string) *Account { func (h *Holder) AddAccount(account *Account) { h.mu.Lock() defer h.mu.Unlock() + a := h.accounts[account.Id] + if a != nil && a.Network.CurrentSerial() >= account.Network.CurrentSerial() { + return + } h.accounts[account.Id] = account } diff --git a/management/server/types/networkmap.go b/management/server/types/networkmap.go index c1099726f..ff81e5dc1 100644 --- a/management/server/types/networkmap.go +++ b/management/server/types/networkmap.go @@ -36,14 +36,21 @@ func (a *Account) OnPeerAddedUpdNetworkMapCache(peerId string) error { if a.NetworkMapCache == nil { return nil } - return a.NetworkMapCache.OnPeerAddedIncremental(peerId) + return a.NetworkMapCache.OnPeerAddedIncremental(a, peerId) +} + +func (a *Account) OnPeersAddedUpdNetworkMapCache(peerIds ...string) { + if a.NetworkMapCache == nil { + return + } + a.NetworkMapCache.EnqueuePeersForIncrementalAdd(a, peerIds...) } func (a *Account) OnPeerDeletedUpdNetworkMapCache(peerId string) error { if a.NetworkMapCache == nil { return nil } - return a.NetworkMapCache.OnPeerDeleted(peerId) + return a.NetworkMapCache.OnPeerDeleted(a, peerId) } func (a *Account) UpdatePeerInNetworkMapCache(peer *nbpeer.Peer) { diff --git a/management/server/types/networkmap_golden_test.go b/management/server/types/networkmap_golden_test.go index 913094e4c..9135024d2 100644 --- a/management/server/types/networkmap_golden_test.go +++ b/management/server/types/networkmap_golden_test.go @@ -266,7 +266,7 @@ func TestGetPeerNetworkMap_Golden_New_WithOnPeerAdded(t *testing.T) { account.Network.Serial++ } - err := builder.OnPeerAddedIncremental(newPeerID) + err := builder.OnPeerAddedIncremental(account, newPeerID) require.NoError(t, err, "error adding peer to cache") networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) @@ -328,7 +328,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerAdded(b *testing.B) { b.ResetTimer() b.Run("new builder after add", func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = builder.OnPeerAddedIncremental(newPeerID) + _ = builder.OnPeerAddedIncremental(account, newPeerID) for _, testingPeerID := range peerIDs { _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) } @@ -473,7 +473,7 @@ func TestGetPeerNetworkMap_Golden_New_WithOnPeerAddedRouter(t *testing.T) { account.Network.Serial++ } - err := builder.OnPeerAddedIncremental(newRouterID) + err := builder.OnPeerAddedIncremental(account, newRouterID) require.NoError(t, err, "error adding router to cache") networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) @@ -558,7 +558,7 @@ func BenchmarkGetPeerNetworkMap_AfterRouterPeerAdded(b *testing.B) { b.ResetTimer() b.Run("new builder after add", func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = builder.OnPeerAddedIncremental(newRouterID) + _ = builder.OnPeerAddedIncremental(account, newRouterID) for _, testingPeerID := range peerIDs { _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) } @@ -662,7 +662,7 @@ func TestGetPeerNetworkMap_Golden_New_WithOnPeerDeleted(t *testing.T) { account.Network.Serial++ } - err := builder.OnPeerDeleted(deletedPeerID) + err := builder.OnPeerDeleted(account, deletedPeerID) require.NoError(t, err, "error deleting peer from cache") networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) @@ -794,7 +794,7 @@ func TestGetPeerNetworkMap_Golden_New_WithDeletedRouterPeer(t *testing.T) { account.Network.Serial++ } - err := builder.OnPeerDeleted(deletedRouterID) + err := builder.OnPeerDeleted(account, deletedRouterID) require.NoError(t, err, "error deleting routing peer from cache") networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) @@ -855,7 +855,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerDeleted(b *testing.B) { b.ResetTimer() b.Run("new builder after delete", func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = builder.OnPeerDeleted(deletedPeerID) + _ = builder.OnPeerDeleted(account, deletedPeerID) for _, testingPeerID := range peerIDs { _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) } @@ -1067,3 +1067,85 @@ func createTestAccountWithEntities() *types.Account { return account } + +func TestGetPeerNetworkMap_Golden_New_WithOnPeerAddedRouter_Batched(t *testing.T) { + account := createTestAccountWithEntities() + + ctx := context.Background() + validatedPeersMap := make(map[string]struct{}) + for i := range numPeers { + peerID := fmt.Sprintf("peer-%d", i) + if peerID == offlinePeerID { + continue + } + validatedPeersMap[peerID] = struct{}{} + } + + builder := types.NewNetworkMapBuilder(account, validatedPeersMap) + + newRouterID := "peer-new-router-102" + newRouterIP := net.IP{100, 64, 1, 2} + newRouter := &nbpeer.Peer{ + ID: newRouterID, + IP: newRouterIP, + Key: fmt.Sprintf("key-%s", newRouterID), + DNSLabel: "newrouter102", + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, + UserID: "user-admin", + Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, + LastLogin: func() *time.Time { t := time.Now(); return &t }(), + } + + account.Peers[newRouterID] = newRouter + + if opsGroup, exists := account.Groups[opsGroupID]; exists { + opsGroup.Peers = append(opsGroup.Peers, newRouterID) + } + if allGroup, exists := account.Groups[allGroupID]; exists { + allGroup.Peers = append(allGroup.Peers, newRouterID) + } + + newRoute := &route.Route{ + ID: route.ID("route-new-router"), + Network: netip.MustParsePrefix("172.16.0.0/24"), + Peer: newRouter.Key, + PeerID: newRouterID, + Description: "Route from new router", + Enabled: true, + PeerGroups: []string{opsGroupID}, + Groups: []string{devGroupID, opsGroupID}, + AccessControlGroups: []string{devGroupID}, + AccountID: account.Id, + } + account.Routes[newRoute.ID] = newRoute + + validatedPeersMap[newRouterID] = struct{}{} + + if account.Network != nil { + account.Network.Serial++ + } + + builder.EnqueuePeersForIncrementalAdd(account, newRouterID) + + time.Sleep(100 * time.Millisecond) + + networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + + normalizeAndSortNetworkMap(networkMap) + + jsonData, err := json.MarshalIndent(networkMap, "", " ") + require.NoError(t, err, "error marshaling network map to JSON") + + goldenFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded_router.json") + + t.Log("Update golden file with OnPeerAdded router...") + err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) + require.NoError(t, err) + err = os.WriteFile(goldenFilePath, jsonData, 0644) + require.NoError(t, err) + + expectedJSON, err := os.ReadFile(goldenFilePath) + require.NoError(t, err, "error reading golden file") + + require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from NEW builder with OnPeerAdded router does not match golden file") +} diff --git a/management/server/types/networkmapbuilder.go b/management/server/types/networkmapbuilder.go index 5790f1646..a508cf725 100644 --- a/management/server/types/networkmapbuilder.go +++ b/management/server/types/networkmapbuilder.go @@ -7,7 +7,6 @@ import ( "strconv" "strings" "sync" - "sync/atomic" "time" log "github.com/sirupsen/logrus" @@ -27,6 +26,9 @@ const ( v6AllWildcard = "::/0" fw = "fw:" rfw = "route-fw:" + + szAddPeerBatch = 10 + maxPeerAddRetries = 20 ) type NetworkMapCache struct { @@ -75,9 +77,19 @@ type PeerRoutesView struct { } type NetworkMapBuilder struct { - account atomic.Pointer[Account] + account *Account cache *NetworkMapCache validatedPeers map[string]struct{} + + apb addPeerBatch +} + +type addPeerBatch struct { + mu sync.Mutex + sg *sync.Cond + ids []string + la *Account + retryCount map[string]int } func NewNetworkMapBuilder(account *Account, validatedPeers map[string]struct{}) *NetworkMapBuilder { @@ -102,11 +114,16 @@ func NewNetworkMapBuilder(account *Account, validatedPeers map[string]struct{}) }, validatedPeers: make(map[string]struct{}), } - builder.account.Store(account) + builder.apb.sg = sync.NewCond(&builder.apb.mu) + builder.apb.ids = make([]string, 0, szAddPeerBatch) + builder.apb.la = account + builder.apb.retryCount = make(map[string]int) + maps.Copy(builder.validatedPeers, validatedPeers) builder.initialBuild(account) + go builder.incAddPeerLoop() return builder } @@ -114,6 +131,8 @@ func (b *NetworkMapBuilder) initialBuild(account *Account) { b.cache.mu.Lock() defer b.cache.mu.Unlock() + b.account = account + start := time.Now() b.buildGlobalIndexes(account) @@ -259,6 +278,7 @@ func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *n validatedPeersMap map[string]struct{}, ) ([]*nbpeer.Peer, []*FirewallRule) { peerID := peer.ID + ctx := context.Background() peerGroups := b.cache.peerToGroups[peerID] peerGroupsMap := make(map[string]struct{}, len(peerGroups)) @@ -274,6 +294,9 @@ func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *n for _, group := range peerGroups { policies := b.cache.groupToPolicies[group] for _, policy := range policies { + if isValid := account.validatePostureChecksOnPeer(ctx, policy.SourcePostureChecks, peerID); !isValid { + continue + } rules := b.cache.policyToRules[policy.ID] for _, rule := range rules { var sourcePeers, destinationPeers []*nbpeer.Peer @@ -316,13 +339,13 @@ func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *n if rule.Bidirectional { if peerInSources { b.generateResourcescached( - account, rule, destinationPeers, FirewallRuleDirectionIN, + rule, destinationPeers, FirewallRuleDirectionIN, peer, &peers, &fwRules, peersExists, rulesExists, ) } if peerInDestinations { b.generateResourcescached( - account, rule, sourcePeers, FirewallRuleDirectionOUT, + rule, sourcePeers, FirewallRuleDirectionOUT, peer, &peers, &fwRules, peersExists, rulesExists, ) } @@ -330,14 +353,14 @@ func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *n if peerInSources { b.generateResourcescached( - account, rule, destinationPeers, FirewallRuleDirectionOUT, + rule, destinationPeers, FirewallRuleDirectionOUT, peer, &peers, &fwRules, peersExists, rulesExists, ) } if peerInDestinations { b.generateResourcescached( - account, rule, sourcePeers, FirewallRuleDirectionIN, + rule, sourcePeers, FirewallRuleDirectionIN, peer, &peers, &fwRules, peersExists, rulesExists, ) } @@ -398,14 +421,9 @@ func (b *NetworkMapBuilder) getPeersFromGroupscached(account *Account, groupIDs } func (b *NetworkMapBuilder) generateResourcescached( - account *Account, rule *PolicyRule, groupPeers []*nbpeer.Peer, direction int, targetPeer *nbpeer.Peer, + rule *PolicyRule, groupPeers []*nbpeer.Peer, direction int, targetPeer *nbpeer.Peer, peers *[]*nbpeer.Peer, rules *[]*FirewallRule, peersExists map[string]struct{}, rulesExists map[string]struct{}, ) { - isAll := false - if allGroup, err := account.GetGroupAll(); err == nil { - isAll = (len(allGroup.Peers) - 1) == len(groupPeers) - } - for _, peer := range groupPeers { if peer == nil { continue @@ -423,10 +441,6 @@ func (b *NetworkMapBuilder) generateResourcescached( Protocol: string(rule.Protocol), } - if isAll { - fr.PeerIP = allPeers - } - var s strings.Builder s.WriteString(rule.ID) s.WriteString(fr.PeerIP) @@ -931,8 +945,12 @@ func (b *NetworkMapBuilder) getPeerNSGroups(account *Account, peerID string, che return peerNSGroups } -func (b *NetworkMapBuilder) UpdateAccountPointer(account *Account) { - b.account.Store(account) +// lock should be held +func (b *NetworkMapBuilder) updateAccountLocked(account *Account) *Account { + if account.Network.CurrentSerial() > b.account.Network.CurrentSerial() { + b.account = account + } + return b.account } func (b *NetworkMapBuilder) GetPeerNetworkMap( @@ -940,16 +958,17 @@ func (b *NetworkMapBuilder) GetPeerNetworkMap( validatedPeers map[string]struct{}, metrics *telemetry.AccountManagerMetrics, ) *NetworkMap { start := time.Now() - account := b.account.Load() + + b.cache.mu.RLock() + defer b.cache.mu.RUnlock() + + account := b.account peer := account.GetPeer(peerID) if peer == nil { return &NetworkMap{Network: account.Network.Copy()} } - b.cache.mu.RLock() - defer b.cache.mu.RUnlock() - aclView := b.cache.peerACLs[peerID] routesView := b.cache.peerRoutes[peerID] dnsConfig := b.cache.peerDNS[peerID] @@ -1013,6 +1032,8 @@ func (b *NetworkMapBuilder) assembleNetworkMap( for _, ruleID := range aclView.FirewallRuleIDs { if rule := b.cache.globalRules[ruleID]; rule != nil { firewallRules = append(firewallRules, rule) + } else { + log.Debugf("NetworkMapBuilder: peer %s assembling network map has no fwrule %s in globalRules", peer.ID, ruleID) } } @@ -1119,6 +1140,106 @@ func (b *NetworkMapBuilder) isPeerRouter(account *Account, peerID string) bool { return false } +func (b *NetworkMapBuilder) incAddPeerLoop() { + for { + b.apb.mu.Lock() + if len(b.apb.ids) == 0 { + b.apb.sg.Wait() + } + b.addPeersIncrementally() + b.apb.mu.Unlock() + } +} + +// lock on b.apb level should be held +func (b *NetworkMapBuilder) addPeersIncrementally() { + peers := slices.Clone(b.apb.ids) + clear(b.apb.ids) + b.apb.ids = b.apb.ids[:0] + latestAcc := b.apb.la + b.apb.mu.Unlock() + + tt := time.Now() + b.cache.mu.Lock() + defer b.cache.mu.Unlock() + + account := b.updateAccountLocked(latestAcc) + + log.Debugf("NetworkMapBuilder: Starting incremental add of %d peers", len(peers)) + + allUpdates := make(map[string]*PeerUpdateDelta) + + for _, peerID := range peers { + peer := account.GetPeer(peerID) + if peer == nil { + b.apb.mu.Lock() + retries := b.apb.retryCount[peerID] + b.apb.mu.Unlock() + + if retries >= maxPeerAddRetries { + log.Errorf("NetworkMapBuilder: peer %s not found in account %s after %d retries, giving up", peerID, account.Id, retries) + b.apb.mu.Lock() + delete(b.apb.retryCount, peerID) + b.apb.mu.Unlock() + continue + } + + log.Warnf("NetworkMapBuilder: peer %s not found in account %s, retry %d/%d", peerID, account.Id, retries+1, maxPeerAddRetries) + b.apb.mu.Lock() + b.apb.retryCount[peerID] = retries + 1 + b.apb.mu.Unlock() + b.enqueuePeersForIncrementalAdd(latestAcc, peerID) + continue + } + + b.apb.mu.Lock() + delete(b.apb.retryCount, peerID) + b.apb.mu.Unlock() + + b.validatedPeers[peerID] = struct{}{} + b.cache.globalPeers[peerID] = peer + + peerGroups := b.updateIndexesForNewPeer(account, peerID) + b.buildPeerACLView(account, peerID) + b.buildPeerRoutesView(account, peerID) + b.buildPeerDNSView(account, peerID) + + peerDeltas := b.collectDeltasForNewPeer(account, peerID, peerGroups) + for affectedPeerID, delta := range peerDeltas { + if existing, ok := allUpdates[affectedPeerID]; ok { + existing.mergeFrom(delta) + continue + } + allUpdates[affectedPeerID] = delta + } + } + + for affectedPeerID, delta := range allUpdates { + b.applyDeltaToPeer(account, affectedPeerID, delta) + } + + log.Debugf("NetworkMapBuilder: Added %d peers to cache, affected %d peers, took %s", len(peers), len(allUpdates), time.Since(tt)) + + b.apb.mu.Lock() + if len(b.apb.ids) > 0 { + b.apb.sg.Signal() + } +} + +func (b *NetworkMapBuilder) enqueuePeersForIncrementalAdd(acc *Account, peerIDs ...string) { + b.apb.mu.Lock() + b.apb.ids = append(b.apb.ids, peerIDs...) + if b.apb.la != nil && acc.Network.CurrentSerial() > b.apb.la.Network.CurrentSerial() { + b.apb.la = acc + } + b.apb.sg.Signal() + b.apb.mu.Unlock() +} + +func (b *NetworkMapBuilder) EnqueuePeersForIncrementalAdd(acc *Account, peerIDs ...string) { + b.enqueuePeersForIncrementalAdd(acc, peerIDs...) +} + type ViewDelta struct { AddedPeerIDs []string RemovedPeerIDs []string @@ -1126,17 +1247,18 @@ type ViewDelta struct { RemovedRuleIDs []string } -func (b *NetworkMapBuilder) OnPeerAddedIncremental(peerID string) error { +func (b *NetworkMapBuilder) OnPeerAddedIncremental(acc *Account, peerID string) error { tt := time.Now() - account := b.account.Load() - peer := account.GetPeer(peerID) + peer := acc.GetPeer(peerID) if peer == nil { - return fmt.Errorf("peer %s not found in account", peerID) + return fmt.Errorf("NetworkMapBuilder: peer %s not found in account", peerID) } b.cache.mu.Lock() defer b.cache.mu.Unlock() + account := b.updateAccountLocked(acc) + log.Debugf("NetworkMapBuilder: Adding peer %s (IP: %s) to cache", peerID, peer.IP.String()) b.validatedPeers[peerID] = struct{}{} @@ -1195,6 +1317,13 @@ func (b *NetworkMapBuilder) updateIndexesForNewPeer(account *Account, peerID str } func (b *NetworkMapBuilder) incrementalUpdateAffectedPeers(account *Account, newPeerID string, peerGroups []string) { + updates := b.collectDeltasForNewPeer(account, newPeerID, peerGroups) + for affectedPeerID, delta := range updates { + b.applyDeltaToPeer(account, affectedPeerID, delta) + } +} + +func (b *NetworkMapBuilder) collectDeltasForNewPeer(account *Account, newPeerID string, peerGroups []string) map[string]*PeerUpdateDelta { updates := b.calculateIncrementalUpdates(account, newPeerID, peerGroups) if b.isPeerRouter(account, newPeerID) { @@ -1214,9 +1343,7 @@ func (b *NetworkMapBuilder) incrementalUpdateAffectedPeers(account *Account, new } } - for affectedPeerID, delta := range updates { - b.applyDeltaToPeer(account, affectedPeerID, delta) - } + return updates } func (b *NetworkMapBuilder) findPeersAffectedByNewRouter(account *Account, newRouterID string, routerGroups []string) map[string]struct{} { @@ -1410,8 +1537,8 @@ func (b *NetworkMapBuilder) calculateNewRouterNetworkResourceUpdates( updates[peerID] = delta } - if delta.AddConnectedPeer == "" { - delta.AddConnectedPeer = newPeerID + if !slices.Contains(delta.AddConnectedPeers, newPeerID) { + delta.AddConnectedPeers = append(delta.AddConnectedPeers, newPeerID) } delta.RebuildRoutesView = true @@ -1540,8 +1667,8 @@ func (b *NetworkMapBuilder) calculateNetworkResourceFirewallUpdates( updates[routerPeerID] = delta } - if delta.AddConnectedPeer == "" { - delta.AddConnectedPeer = newPeerID + if !slices.Contains(delta.AddConnectedPeers, newPeerID) { + delta.AddConnectedPeers = append(delta.AddConnectedPeers, newPeerID) } delta.RebuildRoutesView = true @@ -1551,13 +1678,63 @@ func (b *NetworkMapBuilder) calculateNetworkResourceFirewallUpdates( type PeerUpdateDelta struct { PeerID string - AddConnectedPeer string + AddConnectedPeers []string AddFirewallRules []*FirewallRuleDelta AddRoutes []route.ID UpdateRouteFirewallRules []*RouteFirewallRuleUpdate UpdateDNS bool RebuildRoutesView bool } + +func (d *PeerUpdateDelta) mergeFrom(other *PeerUpdateDelta) { + for _, peerID := range other.AddConnectedPeers { + if !slices.Contains(d.AddConnectedPeers, peerID) { + d.AddConnectedPeers = append(d.AddConnectedPeers, peerID) + } + } + + existingRuleIDs := make(map[string]struct{}, len(d.AddFirewallRules)) + for _, rule := range d.AddFirewallRules { + existingRuleIDs[rule.RuleID] = struct{}{} + } + for _, rule := range other.AddFirewallRules { + if _, exists := existingRuleIDs[rule.RuleID]; !exists { + d.AddFirewallRules = append(d.AddFirewallRules, rule) + existingRuleIDs[rule.RuleID] = struct{}{} + } + } + + for _, routeID := range other.AddRoutes { + if !slices.Contains(d.AddRoutes, routeID) { + d.AddRoutes = append(d.AddRoutes, routeID) + } + } + + existingRouteUpdates := make(map[string]map[string]struct{}) + for _, update := range d.UpdateRouteFirewallRules { + if existingRouteUpdates[update.RuleID] == nil { + existingRouteUpdates[update.RuleID] = make(map[string]struct{}) + } + existingRouteUpdates[update.RuleID][update.AddSourceIP] = struct{}{} + } + for _, update := range other.UpdateRouteFirewallRules { + if existingRouteUpdates[update.RuleID] == nil { + existingRouteUpdates[update.RuleID] = make(map[string]struct{}) + } + if _, exists := existingRouteUpdates[update.RuleID][update.AddSourceIP]; !exists { + d.UpdateRouteFirewallRules = append(d.UpdateRouteFirewallRules, update) + existingRouteUpdates[update.RuleID][update.AddSourceIP] = struct{}{} + } + } + + if other.UpdateDNS { + d.UpdateDNS = true + } + if other.RebuildRoutesView { + d.RebuildRoutesView = true + } +} + type FirewallRuleDelta struct { Rule *FirewallRule RuleID string @@ -1659,11 +1836,13 @@ func (b *NetworkMapBuilder) addOrUpdateFirewallRuleInDelta( delta := updates[targetPeerID] if delta == nil { delta = &PeerUpdateDelta{ - PeerID: targetPeerID, - AddConnectedPeer: newPeerID, - AddFirewallRules: make([]*FirewallRuleDelta, 0), + PeerID: targetPeerID, + AddConnectedPeers: []string{newPeerID}, + AddFirewallRules: make([]*FirewallRuleDelta, 0), } updates[targetPeerID] = delta + } else if !slices.Contains(delta.AddConnectedPeers, newPeerID) { + delta.AddConnectedPeers = append(delta.AddConnectedPeers, newPeerID) } baseRule.PeerIP = peerIP @@ -1689,10 +1868,12 @@ func (b *NetworkMapBuilder) addOrUpdateFirewallRuleInDelta( } func (b *NetworkMapBuilder) applyDeltaToPeer(account *Account, peerID string, delta *PeerUpdateDelta) { - if delta.AddConnectedPeer != "" || len(delta.AddFirewallRules) > 0 { + if len(delta.AddConnectedPeers) > 0 || len(delta.AddFirewallRules) > 0 { if aclView := b.cache.peerACLs[peerID]; aclView != nil { - if delta.AddConnectedPeer != "" && !slices.Contains(aclView.ConnectedPeerIDs, delta.AddConnectedPeer) { - aclView.ConnectedPeerIDs = append(aclView.ConnectedPeerIDs, delta.AddConnectedPeer) + for _, connectedPeerID := range delta.AddConnectedPeers { + if !slices.Contains(aclView.ConnectedPeerIDs, connectedPeerID) { + aclView.ConnectedPeerIDs = append(aclView.ConnectedPeerIDs, connectedPeerID) + } } for _, ruleDelta := range delta.AddFirewallRules { @@ -1748,11 +1929,11 @@ func (b *NetworkMapBuilder) updateRouteFirewallRules(routesView *PeerRoutesView, } } -func (b *NetworkMapBuilder) OnPeerDeleted(peerID string) error { +func (b *NetworkMapBuilder) OnPeerDeleted(acc *Account, peerID string) error { b.cache.mu.Lock() defer b.cache.mu.Unlock() - account := b.account.Load() + account := b.updateAccountLocked(acc) deletedPeer := b.cache.globalPeers[peerID] if deletedPeer == nil { @@ -1858,11 +2039,16 @@ func (b *NetworkMapBuilder) OnPeerDeleted(peerID string) error { b.buildPeerRoutesView(account, affectedPeerID) } - peerDeletionUpdates := b.findPeersAffectedByDeletedPeerACL(peerID, peerIP) + peersToRebuildACL := make(map[string]struct{}) + peerDeletionUpdates := b.findPeersAffectedByDeletedPeerACL(peerID, peerIP, peerGroups, peersToRebuildACL) for affectedPeerID, updates := range peerDeletionUpdates { b.applyDeletionUpdates(affectedPeerID, updates) } + for affectedPeerID := range peersToRebuildACL { + b.buildPeerACLView(account, affectedPeerID) + } + b.cleanupUnusedRules() log.Debugf("NetworkMapBuilder: Deleted peer %s, affected %d other peers", peerID, len(affectedPeers)) @@ -1873,6 +2059,8 @@ func (b *NetworkMapBuilder) OnPeerDeleted(peerID string) error { func (b *NetworkMapBuilder) findPeersAffectedByDeletedPeerACL( deletedPeerID string, peerIP string, + peerGroups []string, + peersToRebuildACL map[string]struct{}, ) map[string]*PeerDeletionUpdate { affected := make(map[string]*PeerDeletionUpdate) @@ -1882,26 +2070,47 @@ func (b *NetworkMapBuilder) findPeersAffectedByDeletedPeerACL( continue } - if !slices.Contains(aclView.ConnectedPeerIDs, deletedPeerID) { - continue - } - if affected[peerID] == nil { - affected[peerID] = &PeerDeletionUpdate{ - RemovePeerID: deletedPeerID, - PeerIP: peerIP, + if slices.Contains(aclView.ConnectedPeerIDs, deletedPeerID) { + peersToRebuildACL[peerID] = struct{}{} + if affected[peerID] == nil { + affected[peerID] = &PeerDeletionUpdate{ + RemovePeerID: deletedPeerID, + PeerIP: peerIP, + } } } + } - for _, ruleID := range aclView.FirewallRuleIDs { - if rule := b.cache.globalRules[ruleID]; rule != nil && rule.PeerIP == peerIP { - affected[peerID].RemoveFirewallRuleIDs = append( - affected[peerID].RemoveFirewallRuleIDs, - ruleID, - ) + affectedRouteOwners := make(map[string]struct{}) + + for _, groupID := range peerGroups { + if routeMap, ok := b.cache.acgToRoutes[groupID]; ok { + for _, info := range routeMap { + if info.PeerID != deletedPeerID { + affectedRouteOwners[info.PeerID] = struct{}{} + } } } } + for _, info := range b.cache.noACGRoutes { + if info.PeerID != deletedPeerID { + affectedRouteOwners[info.PeerID] = struct{}{} + } + } + + for ownerPeerID := range affectedRouteOwners { + if affected[ownerPeerID] == nil { + affected[ownerPeerID] = &PeerDeletionUpdate{ + RemovePeerID: deletedPeerID, + PeerIP: peerIP, + RemoveFromSourceRanges: true, + } + } else { + affected[ownerPeerID].RemoveFromSourceRanges = true + } + } + return affected } @@ -1914,18 +2123,6 @@ type PeerDeletionUpdate struct { } func (b *NetworkMapBuilder) applyDeletionUpdates(peerID string, updates *PeerDeletionUpdate) { - if aclView := b.cache.peerACLs[peerID]; aclView != nil { - aclView.ConnectedPeerIDs = slices.DeleteFunc(aclView.ConnectedPeerIDs, func(id string) bool { - return id == updates.RemovePeerID - }) - - if len(updates.RemoveFirewallRuleIDs) > 0 { - aclView.FirewallRuleIDs = slices.DeleteFunc(aclView.FirewallRuleIDs, func(ruleID string) bool { - return slices.Contains(updates.RemoveFirewallRuleIDs, ruleID) - }) - } - } - if routesView := b.cache.peerRoutes[peerID]; routesView != nil { if len(updates.RemoveRouteIDs) > 0 { routesView.NetworkResourceIDs = slices.DeleteFunc(routesView.NetworkResourceIDs, func(routeID route.ID) bool { diff --git a/management/server/user.go b/management/server/user.go index 9d4620462..e393b2c04 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -994,6 +994,12 @@ func (am *DefaultAccountManager) expireAndUpdatePeers(ctx context.Context, accou ) } + if len(peerIDs) != 0 { + if err := am.Store.IncrementNetworkSerial(ctx, accountID); err != nil { + return err + } + } + err = am.networkMapController.OnPeersUpdated(ctx, accountID, peerIDs) if err != nil { return fmt.Errorf("notify network map controller of peer update: %w", err) From f012fb85924adf2e1845160c78e409e2e576f7dc Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 7 Jan 2026 12:18:04 +0800 Subject: [PATCH 018/374] [client] Add port forwarding to ssh proxy (#5031) * Implement port forwarding for the ssh proxy * Allow user switching for port forwarding --- client/cmd/ssh.go | 41 +++- client/proto/daemon.pb.go | 13 +- client/proto/daemon.proto | 1 + client/server/server.go | 1 + client/ssh/auth/auth.go | 29 +-- client/ssh/auth/auth_test.go | 110 +++++----- client/ssh/client/client.go | 36 +--- client/ssh/common.go | 61 ++++++ client/ssh/proxy/proxy.go | 254 ++++++++++++++++++++-- client/ssh/server/port_forwarding.go | 216 ++++++++++--------- client/ssh/server/server.go | 274 +++++++++++++++++------- client/ssh/server/server_config_test.go | 182 ++++++++++++++++ client/ssh/server/session_handlers.go | 117 +++++----- client/ssh/server/sftp.go | 28 ++- client/status/status.go | 13 +- 15 files changed, 1006 insertions(+), 370 deletions(-) diff --git a/client/cmd/ssh.go b/client/cmd/ssh.go index 525bcdef1..0acf0b133 100644 --- a/client/cmd/ssh.go +++ b/client/cmd/ssh.go @@ -634,7 +634,11 @@ func parseAndStartLocalForward(ctx context.Context, c *sshclient.Client, forward return err } - cmd.Printf("Local port forwarding: %s -> %s\n", localAddr, remoteAddr) + if err := validateDestinationPort(remoteAddr); err != nil { + return fmt.Errorf("invalid remote address: %w", err) + } + + log.Debugf("Local port forwarding: %s -> %s", localAddr, remoteAddr) go func() { if err := c.LocalPortForward(ctx, localAddr, remoteAddr); err != nil && !errors.Is(err, context.Canceled) { @@ -652,7 +656,11 @@ func parseAndStartRemoteForward(ctx context.Context, c *sshclient.Client, forwar return err } - cmd.Printf("Remote port forwarding: %s -> %s\n", remoteAddr, localAddr) + if err := validateDestinationPort(localAddr); err != nil { + return fmt.Errorf("invalid local address: %w", err) + } + + log.Debugf("Remote port forwarding: %s -> %s", remoteAddr, localAddr) go func() { if err := c.RemotePortForward(ctx, remoteAddr, localAddr); err != nil && !errors.Is(err, context.Canceled) { @@ -663,6 +671,35 @@ func parseAndStartRemoteForward(ctx context.Context, c *sshclient.Client, forwar return nil } +// validateDestinationPort checks that the destination address has a valid port. +// Port 0 is only valid for bind addresses (where the OS picks an available port), +// not for destination addresses where we need to connect. +func validateDestinationPort(addr string) error { + if strings.HasPrefix(addr, "/") || strings.HasPrefix(addr, "./") { + return nil + } + + _, portStr, err := net.SplitHostPort(addr) + if err != nil { + return fmt.Errorf("parse address %s: %w", addr, err) + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return fmt.Errorf("invalid port %s: %w", portStr, err) + } + + if port == 0 { + return fmt.Errorf("port 0 is not valid for destination address") + } + + if port < 0 || port > 65535 { + return fmt.Errorf("port %d out of range (1-65535)", port) + } + + return nil +} + // parsePortForwardSpec parses port forward specifications like "8080:localhost:80" or "[::1]:8080:localhost:80". // Also supports Unix sockets like "8080:/tmp/socket" or "127.0.0.1:8080:/tmp/socket". func parsePortForwardSpec(spec string) (string, string, error) { diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 5ae0c1ad1..5d56befc7 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -2013,6 +2013,7 @@ type SSHSessionInfo struct { RemoteAddress string `protobuf:"bytes,2,opt,name=remoteAddress,proto3" json:"remoteAddress,omitempty"` Command string `protobuf:"bytes,3,opt,name=command,proto3" json:"command,omitempty"` JwtUsername string `protobuf:"bytes,4,opt,name=jwtUsername,proto3" json:"jwtUsername,omitempty"` + PortForwards []string `protobuf:"bytes,5,rep,name=portForwards,proto3" json:"portForwards,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2075,6 +2076,13 @@ func (x *SSHSessionInfo) GetJwtUsername() string { return "" } +func (x *SSHSessionInfo) GetPortForwards() []string { + if x != nil { + return x.PortForwards + } + return nil +} + // SSHServerState contains the latest state of the SSH server type SSHServerState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -5706,12 +5714,13 @@ const file_daemon_proto_rawDesc = "" + "\aservers\x18\x01 \x03(\tR\aservers\x12\x18\n" + "\adomains\x18\x02 \x03(\tR\adomains\x12\x18\n" + "\aenabled\x18\x03 \x01(\bR\aenabled\x12\x14\n" + - "\x05error\x18\x04 \x01(\tR\x05error\"\x8e\x01\n" + + "\x05error\x18\x04 \x01(\tR\x05error\"\xb2\x01\n" + "\x0eSSHSessionInfo\x12\x1a\n" + "\busername\x18\x01 \x01(\tR\busername\x12$\n" + "\rremoteAddress\x18\x02 \x01(\tR\rremoteAddress\x12\x18\n" + "\acommand\x18\x03 \x01(\tR\acommand\x12 \n" + - "\vjwtUsername\x18\x04 \x01(\tR\vjwtUsername\"^\n" + + "\vjwtUsername\x18\x04 \x01(\tR\vjwtUsername\x12\"\n" + + "\fportForwards\x18\x05 \x03(\tR\fportForwards\"^\n" + "\x0eSSHServerState\x12\x18\n" + "\aenabled\x18\x01 \x01(\bR\aenabled\x122\n" + "\bsessions\x18\x02 \x03(\v2\x16.daemon.SSHSessionInfoR\bsessions\"\xaf\x04\n" + diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 5f30bfe4b..b75ca821a 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -372,6 +372,7 @@ message SSHSessionInfo { string remoteAddress = 2; string command = 3; string jwtUsername = 4; + repeated string portForwards = 5; } // SSHServerState contains the latest state of the SSH server diff --git a/client/server/server.go b/client/server/server.go index 35ac04381..7b6c4e98c 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1104,6 +1104,7 @@ func (s *Server) getSSHServerState() *proto.SSHServerState { RemoteAddress: session.RemoteAddress, Command: session.Command, JwtUsername: session.JWTUsername, + PortForwards: session.PortForwards, }) } diff --git a/client/ssh/auth/auth.go b/client/ssh/auth/auth.go index 488b6e12e..079282fdc 100644 --- a/client/ssh/auth/auth.go +++ b/client/ssh/auth/auth.go @@ -98,19 +98,17 @@ func (a *Authorizer) Update(config *Config) { len(config.AuthorizedUsers), len(machineUsers)) } -// Authorize validates if a user is authorized to login as the specified OS user -// Returns nil if authorized, or an error describing why authorization failed -func (a *Authorizer) Authorize(jwtUserID, osUsername string) error { +// Authorize validates if a user is authorized to login as the specified OS user. +// Returns a success message describing how authorization was granted, or an error. +func (a *Authorizer) Authorize(jwtUserID, osUsername string) (string, error) { if jwtUserID == "" { - log.Warnf("SSH auth denied: JWT user ID is empty for OS user '%s'", osUsername) - return ErrEmptyUserID + return "", fmt.Errorf("JWT user ID is empty for OS user %q: %w", osUsername, ErrEmptyUserID) } // Hash the JWT user ID for comparison hashedUserID, err := sshuserhash.HashUserID(jwtUserID) if err != nil { - log.Errorf("SSH auth denied: failed to hash user ID '%s' for OS user '%s': %v", jwtUserID, osUsername, err) - return fmt.Errorf("failed to hash user ID: %w", err) + return "", fmt.Errorf("hash user ID %q for OS user %q: %w", jwtUserID, osUsername, err) } a.mu.RLock() @@ -119,8 +117,7 @@ func (a *Authorizer) Authorize(jwtUserID, osUsername string) error { // Find the index of this user in the authorized list userIndex, found := a.findUserIndex(hashedUserID) if !found { - log.Warnf("SSH auth denied: user '%s' (hash: %s) not in authorized list for OS user '%s'", jwtUserID, hashedUserID, osUsername) - return ErrUserNotAuthorized + return "", fmt.Errorf("user %q (hash: %s) not in authorized list for OS user %q: %w", jwtUserID, hashedUserID, osUsername, ErrUserNotAuthorized) } return a.checkMachineUserMapping(jwtUserID, osUsername, userIndex) @@ -128,12 +125,11 @@ func (a *Authorizer) Authorize(jwtUserID, osUsername string) error { // checkMachineUserMapping validates if a user's index is authorized for the specified OS user // Checks wildcard mapping first, then specific OS user mappings -func (a *Authorizer) checkMachineUserMapping(jwtUserID, osUsername string, userIndex int) error { +func (a *Authorizer) checkMachineUserMapping(jwtUserID, osUsername string, userIndex int) (string, error) { // If wildcard exists and user's index is in the wildcard list, allow access to any OS user if wildcardIndexes, hasWildcard := a.machineUsers[Wildcard]; hasWildcard { if a.isIndexInList(uint32(userIndex), wildcardIndexes) { - log.Infof("SSH auth granted: user '%s' authorized for OS user '%s' via wildcard (index: %d)", jwtUserID, osUsername, userIndex) - return nil + return fmt.Sprintf("granted via wildcard (index: %d)", userIndex), nil } } @@ -141,18 +137,15 @@ func (a *Authorizer) checkMachineUserMapping(jwtUserID, osUsername string, userI allowedIndexes, hasMachineUserMapping := a.machineUsers[osUsername] if !hasMachineUserMapping { // No mapping for this OS user - deny by default (fail closed) - log.Warnf("SSH auth denied: no machine user mapping for OS user '%s' (JWT user: %s)", osUsername, jwtUserID) - return ErrNoMachineUserMapping + return "", fmt.Errorf("no machine user mapping for OS user %q (JWT user: %s): %w", osUsername, jwtUserID, ErrNoMachineUserMapping) } // Check if user's index is in the allowed indexes for this specific OS user if !a.isIndexInList(uint32(userIndex), allowedIndexes) { - log.Warnf("SSH auth denied: user '%s' not mapped to OS user '%s' (user index: %d)", jwtUserID, osUsername, userIndex) - return ErrUserNotMappedToOSUser + return "", fmt.Errorf("user %q not mapped to OS user %q (index: %d): %w", jwtUserID, osUsername, userIndex, ErrUserNotMappedToOSUser) } - log.Infof("SSH auth granted: user '%s' authorized for OS user '%s' (index: %d)", jwtUserID, osUsername, userIndex) - return nil + return fmt.Sprintf("granted (index: %d)", userIndex), nil } // GetUserIDClaim returns the JWT claim name used to extract user IDs diff --git a/client/ssh/auth/auth_test.go b/client/ssh/auth/auth_test.go index 2b3b5a414..fa27b72e8 100644 --- a/client/ssh/auth/auth_test.go +++ b/client/ssh/auth/auth_test.go @@ -24,7 +24,7 @@ func TestAuthorizer_Authorize_UserNotInList(t *testing.T) { authorizer.Update(config) // Try to authorize a different user - err = authorizer.Authorize("unauthorized-user", "root") + _, err = authorizer.Authorize("unauthorized-user", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotAuthorized) } @@ -45,15 +45,15 @@ func TestAuthorizer_Authorize_UserInList_NoMachineUserRestrictions(t *testing.T) authorizer.Update(config) // All attempts should fail when no machine user mappings exist (fail closed) - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) - err = authorizer.Authorize("user2", "admin") + _, err = authorizer.Authorize("user2", "admin") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) - err = authorizer.Authorize("user1", "postgres") + _, err = authorizer.Authorize("user1", "postgres") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) } @@ -80,21 +80,21 @@ func TestAuthorizer_Authorize_UserInList_WithMachineUserMapping_Allowed(t *testi authorizer.Update(config) // user1 (index 0) should access root and admin - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.NoError(t, err) - err = authorizer.Authorize("user1", "admin") + _, err = authorizer.Authorize("user1", "admin") assert.NoError(t, err) // user2 (index 1) should access root and postgres - err = authorizer.Authorize("user2", "root") + _, err = authorizer.Authorize("user2", "root") assert.NoError(t, err) - err = authorizer.Authorize("user2", "postgres") + _, err = authorizer.Authorize("user2", "postgres") assert.NoError(t, err) // user3 (index 2) should access postgres - err = authorizer.Authorize("user3", "postgres") + _, err = authorizer.Authorize("user3", "postgres") assert.NoError(t, err) } @@ -121,22 +121,22 @@ func TestAuthorizer_Authorize_UserInList_WithMachineUserMapping_Denied(t *testin authorizer.Update(config) // user1 (index 0) should NOT access postgres - err = authorizer.Authorize("user1", "postgres") + _, err = authorizer.Authorize("user1", "postgres") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotMappedToOSUser) // user2 (index 1) should NOT access admin - err = authorizer.Authorize("user2", "admin") + _, err = authorizer.Authorize("user2", "admin") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotMappedToOSUser) // user3 (index 2) should NOT access root - err = authorizer.Authorize("user3", "root") + _, err = authorizer.Authorize("user3", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotMappedToOSUser) // user3 (index 2) should NOT access admin - err = authorizer.Authorize("user3", "admin") + _, err = authorizer.Authorize("user3", "admin") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotMappedToOSUser) } @@ -158,7 +158,7 @@ func TestAuthorizer_Authorize_UserInList_OSUserNotInMapping(t *testing.T) { authorizer.Update(config) // user1 should NOT access an unmapped OS user (fail closed) - err = authorizer.Authorize("user1", "postgres") + _, err = authorizer.Authorize("user1", "postgres") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) } @@ -178,7 +178,7 @@ func TestAuthorizer_Authorize_EmptyJWTUserID(t *testing.T) { authorizer.Update(config) // Empty user ID should fail - err = authorizer.Authorize("", "root") + _, err = authorizer.Authorize("", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrEmptyUserID) } @@ -211,12 +211,12 @@ func TestAuthorizer_Authorize_MultipleUsersInList(t *testing.T) { // All users should be authorized for root for i := 0; i < 10; i++ { - err := authorizer.Authorize("user"+string(rune('0'+i)), "root") + _, err := authorizer.Authorize("user"+string(rune('0'+i)), "root") assert.NoError(t, err, "user%d should be authorized", i) } // User not in list should fail - err := authorizer.Authorize("unknown-user", "root") + _, err := authorizer.Authorize("unknown-user", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotAuthorized) } @@ -236,14 +236,14 @@ func TestAuthorizer_Update_ClearsConfiguration(t *testing.T) { authorizer.Update(config) // user1 should be authorized - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.NoError(t, err) // Clear configuration authorizer.Update(nil) // user1 should no longer be authorized - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotAuthorized) } @@ -267,16 +267,16 @@ func TestAuthorizer_Update_EmptyMachineUsersListEntries(t *testing.T) { authorizer.Update(config) // root should work - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.NoError(t, err) // postgres should fail (no mapping) - err = authorizer.Authorize("user1", "postgres") + _, err = authorizer.Authorize("user1", "postgres") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) // admin should fail (no mapping) - err = authorizer.Authorize("user1", "admin") + _, err = authorizer.Authorize("user1", "admin") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) } @@ -301,7 +301,7 @@ func TestAuthorizer_CustomUserIDClaim(t *testing.T) { assert.Equal(t, "email", authorizer.GetUserIDClaim()) // Authorize with email as user ID - err = authorizer.Authorize("user@example.com", "root") + _, err = authorizer.Authorize("user@example.com", "root") assert.NoError(t, err) } @@ -349,19 +349,19 @@ func TestAuthorizer_MachineUserMapping_LargeIndexes(t *testing.T) { authorizer.Update(config) // First user should have access - err := authorizer.Authorize("user"+string(rune(0)), "root") + _, err := authorizer.Authorize("user"+string(rune(0)), "root") assert.NoError(t, err) // Middle user should have access - err = authorizer.Authorize("user"+string(rune(500)), "root") + _, err = authorizer.Authorize("user"+string(rune(500)), "root") assert.NoError(t, err) // Last user should have access - err = authorizer.Authorize("user"+string(rune(999)), "root") + _, err = authorizer.Authorize("user"+string(rune(999)), "root") assert.NoError(t, err) // User not in mapping should NOT have access - err = authorizer.Authorize("user"+string(rune(100)), "root") + _, err = authorizer.Authorize("user"+string(rune(100)), "root") assert.Error(t, err) } @@ -393,7 +393,7 @@ func TestAuthorizer_ConcurrentAuthorization(t *testing.T) { if idx%2 == 0 { user = "user2" } - err := authorizer.Authorize(user, "root") + _, err := authorizer.Authorize(user, "root") errChan <- err }(i) } @@ -426,22 +426,22 @@ func TestAuthorizer_Wildcard_AllowsAllAuthorizedUsers(t *testing.T) { authorizer.Update(config) // All authorized users should be able to access any OS user - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.NoError(t, err) - err = authorizer.Authorize("user2", "postgres") + _, err = authorizer.Authorize("user2", "postgres") assert.NoError(t, err) - err = authorizer.Authorize("user3", "admin") + _, err = authorizer.Authorize("user3", "admin") assert.NoError(t, err) - err = authorizer.Authorize("user1", "ubuntu") + _, err = authorizer.Authorize("user1", "ubuntu") assert.NoError(t, err) - err = authorizer.Authorize("user2", "nginx") + _, err = authorizer.Authorize("user2", "nginx") assert.NoError(t, err) - err = authorizer.Authorize("user3", "docker") + _, err = authorizer.Authorize("user3", "docker") assert.NoError(t, err) } @@ -462,11 +462,11 @@ func TestAuthorizer_Wildcard_UnauthorizedUserStillDenied(t *testing.T) { authorizer.Update(config) // user1 should have access - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.NoError(t, err) // Unauthorized user should still be denied even with wildcard - err = authorizer.Authorize("unauthorized-user", "root") + _, err = authorizer.Authorize("unauthorized-user", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotAuthorized) } @@ -492,17 +492,17 @@ func TestAuthorizer_Wildcard_TakesPrecedenceOverSpecificMappings(t *testing.T) { authorizer.Update(config) // Both users should be able to access root via wildcard (takes precedence over specific mapping) - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.NoError(t, err) - err = authorizer.Authorize("user2", "root") + _, err = authorizer.Authorize("user2", "root") assert.NoError(t, err) // Both users should be able to access any other OS user via wildcard - err = authorizer.Authorize("user1", "postgres") + _, err = authorizer.Authorize("user1", "postgres") assert.NoError(t, err) - err = authorizer.Authorize("user2", "admin") + _, err = authorizer.Authorize("user2", "admin") assert.NoError(t, err) } @@ -526,29 +526,29 @@ func TestAuthorizer_NoWildcard_SpecificMappingsOnly(t *testing.T) { authorizer.Update(config) // user1 can access root - err = authorizer.Authorize("user1", "root") + _, err = authorizer.Authorize("user1", "root") assert.NoError(t, err) // user2 can access postgres - err = authorizer.Authorize("user2", "postgres") + _, err = authorizer.Authorize("user2", "postgres") assert.NoError(t, err) // user1 cannot access postgres - err = authorizer.Authorize("user1", "postgres") + _, err = authorizer.Authorize("user1", "postgres") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotMappedToOSUser) // user2 cannot access root - err = authorizer.Authorize("user2", "root") + _, err = authorizer.Authorize("user2", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotMappedToOSUser) // Neither can access unmapped OS users - err = authorizer.Authorize("user1", "admin") + _, err = authorizer.Authorize("user1", "admin") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) - err = authorizer.Authorize("user2", "admin") + _, err = authorizer.Authorize("user2", "admin") assert.Error(t, err) assert.ErrorIs(t, err, ErrNoMachineUserMapping) } @@ -578,35 +578,35 @@ func TestAuthorizer_Wildcard_WithPartialIndexes_AllowsAllUsers(t *testing.T) { authorizer.Update(config) // wasm (index 0) should access any OS user via wildcard - err = authorizer.Authorize("wasm", "root") + _, err = authorizer.Authorize("wasm", "root") assert.NoError(t, err, "wasm should access root via wildcard") - err = authorizer.Authorize("wasm", "alice") + _, err = authorizer.Authorize("wasm", "alice") assert.NoError(t, err, "wasm should access alice via wildcard") - err = authorizer.Authorize("wasm", "bob") + _, err = authorizer.Authorize("wasm", "bob") assert.NoError(t, err, "wasm should access bob via wildcard") - err = authorizer.Authorize("wasm", "postgres") + _, err = authorizer.Authorize("wasm", "postgres") assert.NoError(t, err, "wasm should access postgres via wildcard") // user2 (index 1) should only access alice and bob (explicitly mapped), NOT root or postgres - err = authorizer.Authorize("user2", "alice") + _, err = authorizer.Authorize("user2", "alice") assert.NoError(t, err, "user2 should access alice via explicit mapping") - err = authorizer.Authorize("user2", "bob") + _, err = authorizer.Authorize("user2", "bob") assert.NoError(t, err, "user2 should access bob via explicit mapping") - err = authorizer.Authorize("user2", "root") + _, err = authorizer.Authorize("user2", "root") assert.Error(t, err, "user2 should NOT access root (not in wildcard indexes)") assert.ErrorIs(t, err, ErrNoMachineUserMapping) - err = authorizer.Authorize("user2", "postgres") + _, err = authorizer.Authorize("user2", "postgres") assert.Error(t, err, "user2 should NOT access postgres (not explicitly mapped)") assert.ErrorIs(t, err, ErrNoMachineUserMapping) // Unauthorized user should still be denied - err = authorizer.Authorize("user3", "root") + _, err = authorizer.Authorize("user3", "root") assert.Error(t, err) assert.ErrorIs(t, err, ErrUserNotAuthorized, "unauthorized user should be denied") } diff --git a/client/ssh/client/client.go b/client/ssh/client/client.go index aab222093..342da7303 100644 --- a/client/ssh/client/client.go +++ b/client/ssh/client/client.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "io" "net" "os" "path/filepath" @@ -551,14 +550,15 @@ func (c *Client) LocalPortForward(ctx context.Context, localAddr, remoteAddr str func (c *Client) handleLocalForward(localConn net.Conn, remoteAddr string) { defer func() { if err := localConn.Close(); err != nil { - log.Debugf("local connection close error: %v", err) + log.Debugf("local port forwarding: close local connection: %v", err) } }() channel, err := c.client.Dial("tcp", remoteAddr) if err != nil { - if strings.Contains(err.Error(), "administratively prohibited") { - _, _ = fmt.Fprintf(os.Stderr, "channel open failed: administratively prohibited: port forwarding is disabled\n") + var openErr *ssh.OpenChannelError + if errors.As(err, &openErr) && openErr.Reason == ssh.Prohibited { + _, _ = fmt.Fprintf(os.Stderr, "channel open failed: port forwarding is disabled\n") } else { log.Debugf("local port forwarding to %s failed: %v", remoteAddr, err) } @@ -566,19 +566,11 @@ func (c *Client) handleLocalForward(localConn net.Conn, remoteAddr string) { } defer func() { if err := channel.Close(); err != nil { - log.Debugf("remote channel close error: %v", err) + log.Debugf("local port forwarding: close remote channel: %v", err) } }() - go func() { - if _, err := io.Copy(channel, localConn); err != nil { - log.Debugf("local forward copy error (local->remote): %v", err) - } - }() - - if _, err := io.Copy(localConn, channel); err != nil { - log.Debugf("local forward copy error (remote->local): %v", err) - } + nbssh.BidirectionalCopy(log.NewEntry(log.StandardLogger()), localConn, channel) } // RemotePortForward sets up remote port forwarding, binding on remote and forwarding to localAddr @@ -633,7 +625,7 @@ func (c *Client) sendTCPIPForwardRequest(req tcpipForwardMsg) error { return fmt.Errorf("send tcpip-forward request: %w", err) } if !ok { - return fmt.Errorf("remote port forwarding denied by server (check if --allow-ssh-remote-port-forwarding is enabled)") + return fmt.Errorf("remote port forwarding denied by server") } return nil } @@ -676,7 +668,7 @@ func (c *Client) handleRemoteForwardChannel(newChan ssh.NewChannel, localAddr st } defer func() { if err := channel.Close(); err != nil { - log.Debugf("remote channel close error: %v", err) + log.Debugf("remote port forwarding: close remote channel: %v", err) } }() @@ -688,19 +680,11 @@ func (c *Client) handleRemoteForwardChannel(newChan ssh.NewChannel, localAddr st } defer func() { if err := localConn.Close(); err != nil { - log.Debugf("local connection close error: %v", err) + log.Debugf("remote port forwarding: close local connection: %v", err) } }() - go func() { - if _, err := io.Copy(localConn, channel); err != nil { - log.Debugf("remote forward copy error (remote->local): %v", err) - } - }() - - if _, err := io.Copy(channel, localConn); err != nil { - log.Debugf("remote forward copy error (local->remote): %v", err) - } + nbssh.BidirectionalCopy(log.NewEntry(log.StandardLogger()), localConn, channel) } // tcpipForwardMsg represents the structure for tcpip-forward requests diff --git a/client/ssh/common.go b/client/ssh/common.go index 6574437b5..f6aec5f9c 100644 --- a/client/ssh/common.go +++ b/client/ssh/common.go @@ -193,3 +193,64 @@ func buildAddressList(hostname string, remote net.Addr) []string { } return addresses } + +// BidirectionalCopy copies data bidirectionally between two io.ReadWriter connections. +// It waits for both directions to complete before returning. +// The caller is responsible for closing the connections. +func BidirectionalCopy(logger *log.Entry, rw1, rw2 io.ReadWriter) { + done := make(chan struct{}, 2) + + go func() { + if _, err := io.Copy(rw2, rw1); err != nil && !isExpectedCopyError(err) { + logger.Debugf("copy error (1->2): %v", err) + } + done <- struct{}{} + }() + + go func() { + if _, err := io.Copy(rw1, rw2); err != nil && !isExpectedCopyError(err) { + logger.Debugf("copy error (2->1): %v", err) + } + done <- struct{}{} + }() + + <-done + <-done +} + +func isExpectedCopyError(err error) bool { + return errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) +} + +// BidirectionalCopyWithContext copies data bidirectionally between two io.ReadWriteCloser connections. +// It waits for both directions to complete or for context cancellation before returning. +// Both connections are closed when the function returns. +func BidirectionalCopyWithContext(logger *log.Entry, ctx context.Context, conn1, conn2 io.ReadWriteCloser) { + done := make(chan struct{}, 2) + + go func() { + if _, err := io.Copy(conn2, conn1); err != nil && !isExpectedCopyError(err) { + logger.Debugf("copy error (1->2): %v", err) + } + done <- struct{}{} + }() + + go func() { + if _, err := io.Copy(conn1, conn2); err != nil && !isExpectedCopyError(err) { + logger.Debugf("copy error (2->1): %v", err) + } + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + case <-done: + select { + case <-ctx.Done(): + case <-done: + } + } + + _ = conn1.Close() + _ = conn2.Close() +} diff --git a/client/ssh/proxy/proxy.go b/client/ssh/proxy/proxy.go index 4e807e33c..cb1c36e13 100644 --- a/client/ssh/proxy/proxy.go +++ b/client/ssh/proxy/proxy.go @@ -2,6 +2,7 @@ package proxy import ( "context" + "encoding/binary" "errors" "fmt" "io" @@ -42,6 +43,14 @@ type SSHProxy struct { conn *grpc.ClientConn daemonClient proto.DaemonServiceClient browserOpener func(string) error + + mu sync.RWMutex + backendClient *cryptossh.Client + // jwtToken is set once in runProxySSHServer before any handlers are called, + // so concurrent access is safe without additional synchronization. + jwtToken string + + forwardedChannelsOnce sync.Once } func New(daemonAddr, targetHost string, targetPort int, stderr io.Writer, browserOpener func(string) error) (*SSHProxy, error) { @@ -63,6 +72,17 @@ func New(daemonAddr, targetHost string, targetPort int, stderr io.Writer, browse } func (p *SSHProxy) Close() error { + p.mu.Lock() + backendClient := p.backendClient + p.backendClient = nil + p.mu.Unlock() + + if backendClient != nil { + if err := backendClient.Close(); err != nil { + log.Debugf("close backend client: %v", err) + } + } + if p.conn != nil { return p.conn.Close() } @@ -77,16 +97,16 @@ func (p *SSHProxy) Connect(ctx context.Context) error { return fmt.Errorf(jwtAuthErrorMsg, err) } - return p.runProxySSHServer(ctx, jwtToken) + log.Debugf("JWT authentication successful, starting proxy to %s:%d", p.targetHost, p.targetPort) + return p.runProxySSHServer(jwtToken) } -func (p *SSHProxy) runProxySSHServer(ctx context.Context, jwtToken string) error { +func (p *SSHProxy) runProxySSHServer(jwtToken string) error { + p.jwtToken = jwtToken serverVersion := fmt.Sprintf("%s-%s", detection.ProxyIdentifier, version.NetbirdVersion()) sshServer := &ssh.Server{ - Handler: func(s ssh.Session) { - p.handleSSHSession(ctx, s, jwtToken) - }, + Handler: p.handleSSHSession, ChannelHandlers: map[string]ssh.ChannelHandler{ "session": ssh.DefaultSessionHandler, "direct-tcpip": p.directTCPIPHandler, @@ -119,15 +139,20 @@ func (p *SSHProxy) runProxySSHServer(ctx context.Context, jwtToken string) error return nil } -func (p *SSHProxy) handleSSHSession(ctx context.Context, session ssh.Session, jwtToken string) { - targetAddr := net.JoinHostPort(p.targetHost, strconv.Itoa(p.targetPort)) +func (p *SSHProxy) handleSSHSession(session ssh.Session) { + ptyReq, winCh, isPty := session.Pty() + hasCommand := len(session.Command()) > 0 - sshClient, err := p.dialBackend(ctx, targetAddr, session.User(), jwtToken) + sshClient, err := p.getOrCreateBackendClient(session.Context(), session.User()) if err != nil { _, _ = fmt.Fprintf(p.stderr, "SSH connection to NetBird server failed: %v\n", err) return } - defer func() { _ = sshClient.Close() }() + + if !isPty && !hasCommand { + p.handleNonInteractiveSession(session, sshClient) + return + } serverSession, err := sshClient.NewSession() if err != nil { @@ -140,7 +165,6 @@ func (p *SSHProxy) handleSSHSession(ctx context.Context, session ssh.Session, jw serverSession.Stdout = session serverSession.Stderr = session.Stderr() - ptyReq, winCh, isPty := session.Pty() if isPty { if err := serverSession.RequestPty(ptyReq.Term, ptyReq.Window.Width, ptyReq.Window.Height, nil); err != nil { log.Debugf("PTY request to backend: %v", err) @@ -155,7 +179,7 @@ func (p *SSHProxy) handleSSHSession(ctx context.Context, session ssh.Session, jw }() } - if len(session.Command()) > 0 { + if hasCommand { if err := serverSession.Run(strings.Join(session.Command(), " ")); err != nil { log.Debugf("run command: %v", err) p.handleProxyExitCode(session, err) @@ -176,12 +200,29 @@ func (p *SSHProxy) handleSSHSession(ctx context.Context, session ssh.Session, jw func (p *SSHProxy) handleProxyExitCode(session ssh.Session, err error) { var exitErr *cryptossh.ExitError if errors.As(err, &exitErr) { - if exitErr := session.Exit(exitErr.ExitStatus()); exitErr != nil { - log.Debugf("set exit status: %v", exitErr) + if err := session.Exit(exitErr.ExitStatus()); err != nil { + log.Debugf("set exit status: %v", err) } } } +func (p *SSHProxy) handleNonInteractiveSession(session ssh.Session, sshClient *cryptossh.Client) { + // Create a backend session to mirror the client's session request. + // This keeps the connection alive on the server side while port forwarding channels operate. + serverSession, err := sshClient.NewSession() + if err != nil { + _, _ = fmt.Fprintf(p.stderr, "create server session: %v\n", err) + return + } + defer func() { _ = serverSession.Close() }() + + <-session.Context().Done() + + if err := session.Exit(0); err != nil { + log.Debugf("session exit: %v", err) + } +} + func generateHostKey() (ssh.Signer, error) { keyPEM, err := nbssh.GeneratePrivateKey(nbssh.ED25519) if err != nil { @@ -250,8 +291,52 @@ func (c *stdioConn) SetWriteDeadline(_ time.Time) error { return nil } -func (p *SSHProxy) directTCPIPHandler(_ *ssh.Server, _ *cryptossh.ServerConn, newChan cryptossh.NewChannel, _ ssh.Context) { - _ = newChan.Reject(cryptossh.Prohibited, "port forwarding not supported in proxy") +// directTCPIPHandler handles local port forwarding (direct-tcpip channel). +func (p *SSHProxy) directTCPIPHandler(_ *ssh.Server, _ *cryptossh.ServerConn, newChan cryptossh.NewChannel, sshCtx ssh.Context) { + var payload struct { + DestAddr string + DestPort uint32 + OriginAddr string + OriginPort uint32 + } + if err := cryptossh.Unmarshal(newChan.ExtraData(), &payload); err != nil { + _, _ = fmt.Fprintf(p.stderr, "parse direct-tcpip payload: %v\n", err) + _ = newChan.Reject(cryptossh.ConnectionFailed, "invalid payload") + return + } + + dest := fmt.Sprintf("%s:%d", payload.DestAddr, payload.DestPort) + log.Debugf("local port forwarding: %s", dest) + + backendClient, err := p.getOrCreateBackendClient(sshCtx, sshCtx.User()) + if err != nil { + _, _ = fmt.Fprintf(p.stderr, "backend connection for port forwarding: %v\n", err) + _ = newChan.Reject(cryptossh.ConnectionFailed, "backend connection failed") + return + } + + backendChan, backendReqs, err := backendClient.OpenChannel("direct-tcpip", newChan.ExtraData()) + if err != nil { + _, _ = fmt.Fprintf(p.stderr, "open backend channel for %s: %v\n", dest, err) + var openErr *cryptossh.OpenChannelError + if errors.As(err, &openErr) { + _ = newChan.Reject(openErr.Reason, openErr.Message) + } else { + _ = newChan.Reject(cryptossh.ConnectionFailed, err.Error()) + } + return + } + go cryptossh.DiscardRequests(backendReqs) + + clientChan, clientReqs, err := newChan.Accept() + if err != nil { + log.Debugf("local port forwarding: accept channel: %v", err) + _ = backendChan.Close() + return + } + go cryptossh.DiscardRequests(clientReqs) + + nbssh.BidirectionalCopyWithContext(log.NewEntry(log.StandardLogger()), sshCtx, clientChan, backendChan) } func (p *SSHProxy) sftpSubsystemHandler(s ssh.Session, jwtToken string) { @@ -354,12 +439,143 @@ func (p *SSHProxy) runSFTPBridge(ctx context.Context, s ssh.Session, stdin io.Wr } } -func (p *SSHProxy) tcpipForwardHandler(_ ssh.Context, _ *ssh.Server, _ *cryptossh.Request) (bool, []byte) { - return false, []byte("port forwarding not supported in proxy") +// tcpipForwardHandler handles remote port forwarding (tcpip-forward request). +func (p *SSHProxy) tcpipForwardHandler(sshCtx ssh.Context, _ *ssh.Server, req *cryptossh.Request) (bool, []byte) { + var reqPayload struct { + Host string + Port uint32 + } + if err := cryptossh.Unmarshal(req.Payload, &reqPayload); err != nil { + _, _ = fmt.Fprintf(p.stderr, "parse tcpip-forward payload: %v\n", err) + return false, nil + } + + log.Debugf("tcpip-forward request for %s:%d", reqPayload.Host, reqPayload.Port) + + backendClient, err := p.getOrCreateBackendClient(sshCtx, sshCtx.User()) + if err != nil { + _, _ = fmt.Fprintf(p.stderr, "backend connection for remote port forwarding: %v\n", err) + return false, nil + } + + ok, payload, err := backendClient.SendRequest(req.Type, req.WantReply, req.Payload) + if err != nil { + _, _ = fmt.Fprintf(p.stderr, "forward tcpip-forward request for %s:%d: %v\n", reqPayload.Host, reqPayload.Port, err) + return false, nil + } + + if ok { + actualPort := reqPayload.Port + if reqPayload.Port == 0 && len(payload) >= 4 { + actualPort = binary.BigEndian.Uint32(payload) + } + log.Debugf("remote port forwarding established for %s:%d", reqPayload.Host, actualPort) + p.forwardedChannelsOnce.Do(func() { + go p.handleForwardedChannels(sshCtx, backendClient) + }) + } + + return ok, payload } -func (p *SSHProxy) cancelTcpipForwardHandler(_ ssh.Context, _ *ssh.Server, _ *cryptossh.Request) (bool, []byte) { - return true, nil +// cancelTcpipForwardHandler handles cancel-tcpip-forward request. +func (p *SSHProxy) cancelTcpipForwardHandler(_ ssh.Context, _ *ssh.Server, req *cryptossh.Request) (bool, []byte) { + var reqPayload struct { + Host string + Port uint32 + } + if err := cryptossh.Unmarshal(req.Payload, &reqPayload); err != nil { + _, _ = fmt.Fprintf(p.stderr, "parse cancel-tcpip-forward payload: %v\n", err) + return false, nil + } + + log.Debugf("cancel-tcpip-forward request for %s:%d", reqPayload.Host, reqPayload.Port) + + backendClient := p.getBackendClient() + if backendClient == nil { + return false, nil + } + + ok, payload, err := backendClient.SendRequest(req.Type, req.WantReply, req.Payload) + if err != nil { + _, _ = fmt.Fprintf(p.stderr, "cancel-tcpip-forward for %s:%d: %v\n", reqPayload.Host, reqPayload.Port, err) + return false, nil + } + + return ok, payload +} + +// getOrCreateBackendClient returns the existing backend client or creates a new one. +func (p *SSHProxy) getOrCreateBackendClient(ctx context.Context, user string) (*cryptossh.Client, error) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.backendClient != nil { + return p.backendClient, nil + } + + targetAddr := net.JoinHostPort(p.targetHost, strconv.Itoa(p.targetPort)) + log.Debugf("connecting to backend %s", targetAddr) + + client, err := p.dialBackend(ctx, targetAddr, user, p.jwtToken) + if err != nil { + return nil, err + } + + log.Debugf("backend connection established to %s", targetAddr) + p.backendClient = client + return client, nil +} + +// getBackendClient returns the existing backend client or nil. +func (p *SSHProxy) getBackendClient() *cryptossh.Client { + p.mu.RLock() + defer p.mu.RUnlock() + return p.backendClient +} + +// handleForwardedChannels handles forwarded-tcpip channels from the backend for remote port forwarding. +// When the backend receives incoming connections on the forwarded port, it sends them as +// "forwarded-tcpip" channels which we need to proxy to the client. +func (p *SSHProxy) handleForwardedChannels(sshCtx ssh.Context, backendClient *cryptossh.Client) { + sshConn, ok := sshCtx.Value(ssh.ContextKeyConn).(*cryptossh.ServerConn) + if !ok || sshConn == nil { + log.Debugf("no SSH connection in context for forwarded channels") + return + } + + channelChan := backendClient.HandleChannelOpen("forwarded-tcpip") + for { + select { + case <-sshCtx.Done(): + return + case newChannel, ok := <-channelChan: + if !ok { + return + } + go p.handleForwardedChannel(sshCtx, sshConn, newChannel) + } + } +} + +// handleForwardedChannel handles a single forwarded-tcpip channel from the backend. +func (p *SSHProxy) handleForwardedChannel(sshCtx ssh.Context, sshConn *cryptossh.ServerConn, newChannel cryptossh.NewChannel) { + backendChan, backendReqs, err := newChannel.Accept() + if err != nil { + log.Debugf("remote port forwarding: accept from backend: %v", err) + return + } + go cryptossh.DiscardRequests(backendReqs) + + clientChan, clientReqs, err := sshConn.OpenChannel("forwarded-tcpip", newChannel.ExtraData()) + if err != nil { + log.Debugf("remote port forwarding: open to client: %v", err) + _ = backendChan.Close() + return + } + go cryptossh.DiscardRequests(clientReqs) + + nbssh.BidirectionalCopyWithContext(log.NewEntry(log.StandardLogger()), sshCtx, clientChan, backendChan) } func (p *SSHProxy) dialBackend(ctx context.Context, addr, user, jwtToken string) (*cryptossh.Client, error) { diff --git a/client/ssh/server/port_forwarding.go b/client/ssh/server/port_forwarding.go index 6138f9296..c60cf4f58 100644 --- a/client/ssh/server/port_forwarding.go +++ b/client/ssh/server/port_forwarding.go @@ -1,25 +1,32 @@ +// Package server implements port forwarding for the SSH server. +// +// Security note: Port forwarding runs in the main server process without privilege separation. +// The attack surface is primarily io.Copy through well-tested standard library code, making it +// lower risk than shell execution which uses privilege-separated child processes. We enforce +// user-level port restrictions: non-privileged users cannot bind to ports < 1024. package server import ( "encoding/binary" "fmt" - "io" "net" + "runtime" "strconv" "github.com/gliderlabs/ssh" log "github.com/sirupsen/logrus" cryptossh "golang.org/x/crypto/ssh" + + nbssh "github.com/netbirdio/netbird/client/ssh" ) -// SessionKey uniquely identifies an SSH session -type SessionKey string +const privilegedPortThreshold = 1024 -// ConnectionKey uniquely identifies a port forwarding connection within a session -type ConnectionKey string +// sessionKey uniquely identifies an SSH session +type sessionKey string -// ForwardKey uniquely identifies a port forwarding listener -type ForwardKey string +// forwardKey uniquely identifies a port forwarding listener +type forwardKey string // tcpipForwardMsg represents the structure for tcpip-forward SSH requests type tcpipForwardMsg struct { @@ -47,34 +54,32 @@ func (s *Server) configurePortForwarding(server *ssh.Server) { allowRemote := s.allowRemotePortForwarding server.LocalPortForwardingCallback = func(ctx ssh.Context, dstHost string, dstPort uint32) bool { + logger := s.getRequestLogger(ctx) if !allowLocal { - log.Warnf("local port forwarding denied for %s from %s: disabled by configuration", - net.JoinHostPort(dstHost, fmt.Sprintf("%d", dstPort)), ctx.RemoteAddr()) + logger.Warnf("local port forwarding denied for %s:%d: disabled", dstHost, dstPort) return false } if err := s.checkPortForwardingPrivileges(ctx, "local", dstPort); err != nil { - log.Warnf("local port forwarding denied for %s:%d from %s: %v", dstHost, dstPort, ctx.RemoteAddr(), err) + logger.Warnf("local port forwarding denied for %s:%d: %v", dstHost, dstPort, err) return false } - log.Debugf("local port forwarding allowed: %s:%d", dstHost, dstPort) return true } server.ReversePortForwardingCallback = func(ctx ssh.Context, bindHost string, bindPort uint32) bool { + logger := s.getRequestLogger(ctx) if !allowRemote { - log.Warnf("remote port forwarding denied for %s from %s: disabled by configuration", - net.JoinHostPort(bindHost, fmt.Sprintf("%d", bindPort)), ctx.RemoteAddr()) + logger.Warnf("remote port forwarding denied for %s:%d: disabled", bindHost, bindPort) return false } if err := s.checkPortForwardingPrivileges(ctx, "remote", bindPort); err != nil { - log.Warnf("remote port forwarding denied for %s:%d from %s: %v", bindHost, bindPort, ctx.RemoteAddr(), err) + logger.Warnf("remote port forwarding denied for %s:%d: %v", bindHost, bindPort, err) return false } - log.Debugf("remote port forwarding allowed: %s:%d", bindHost, bindPort) return true } @@ -82,23 +87,20 @@ func (s *Server) configurePortForwarding(server *ssh.Server) { } // checkPortForwardingPrivileges validates privilege requirements for port forwarding operations. -// Returns nil if allowed, error if denied. +// For remote port forwarding (binding), it enforces that non-privileged users cannot bind to +// ports below 1024, mirroring the restriction they would face if binding directly. +// +// Note: FeatureSupportsUserSwitch is true because we accept requests from any authenticated user, +// though we don't actually switch users - port forwarding runs in the server process. The resolved +// user is used for privileged port access checks. func (s *Server) checkPortForwardingPrivileges(ctx ssh.Context, forwardType string, port uint32) error { if ctx == nil { return fmt.Errorf("%s port forwarding denied: no context", forwardType) } - username := ctx.User() - remoteAddr := "unknown" - if ctx.RemoteAddr() != nil { - remoteAddr = ctx.RemoteAddr().String() - } - - logger := log.WithFields(log.Fields{"user": username, "remote": remoteAddr, "port": port}) - result := s.CheckPrivileges(PrivilegeCheckRequest{ - RequestedUsername: username, - FeatureSupportsUserSwitch: false, + RequestedUsername: ctx.User(), + FeatureSupportsUserSwitch: true, FeatureName: forwardType + " port forwarding", }) @@ -106,12 +108,42 @@ func (s *Server) checkPortForwardingPrivileges(ctx ssh.Context, forwardType stri return result.Error } - logger.Debugf("%s port forwarding allowed: user %s validated (port %d)", - forwardType, result.User.Username, port) + if err := s.checkPrivilegedPortAccess(forwardType, port, result); err != nil { + return err + } return nil } +// checkPrivilegedPortAccess enforces that non-privileged users cannot bind to privileged ports. +// This applies to remote port forwarding where the server binds a port on behalf of the user. +// On Windows, there is no privileged port restriction, so this check is skipped. +func (s *Server) checkPrivilegedPortAccess(forwardType string, port uint32, result PrivilegeCheckResult) error { + if runtime.GOOS == "windows" { + return nil + } + + isBindOperation := forwardType == "remote" || forwardType == "tcpip-forward" + if !isBindOperation { + return nil + } + + // Port 0 means "pick any available port", which will be >= 1024 + if port == 0 || port >= privilegedPortThreshold { + return nil + } + + if result.User != nil && isPrivilegedUsername(result.User.Username) { + return nil + } + + username := "unknown" + if result.User != nil { + username = result.User.Username + } + return fmt.Errorf("user %s cannot bind to privileged port %d (requires root)", username, port) +} + // tcpipForwardHandler handles tcpip-forward requests for remote port forwarding. func (s *Server) tcpipForwardHandler(ctx ssh.Context, _ *ssh.Server, req *cryptossh.Request) (bool, []byte) { logger := s.getRequestLogger(ctx) @@ -132,8 +164,6 @@ func (s *Server) tcpipForwardHandler(ctx ssh.Context, _ *ssh.Server, req *crypto return false, nil } - logger.Debugf("tcpip-forward request: %s:%d", payload.Host, payload.Port) - sshConn, err := s.getSSHConnection(ctx) if err != nil { logger.Warnf("tcpip-forward request denied: %v", err) @@ -153,8 +183,10 @@ func (s *Server) cancelTcpipForwardHandler(ctx ssh.Context, _ *ssh.Server, req * return false, nil } - key := ForwardKey(fmt.Sprintf("%s:%d", payload.Host, payload.Port)) + key := forwardKey(fmt.Sprintf("%s:%d", payload.Host, payload.Port)) if s.removeRemoteForwardListener(key) { + forwardAddr := fmt.Sprintf("-R %s:%d", payload.Host, payload.Port) + s.removeConnectionPortForward(ctx.RemoteAddr(), forwardAddr) logger.Infof("remote port forwarding cancelled: %s:%d", payload.Host, payload.Port) return true, nil } @@ -165,14 +197,11 @@ func (s *Server) cancelTcpipForwardHandler(ctx ssh.Context, _ *ssh.Server, req * // handleRemoteForwardListener handles incoming connections for remote port forwarding. func (s *Server) handleRemoteForwardListener(ctx ssh.Context, ln net.Listener, host string, port uint32) { - log.Debugf("starting remote forward listener handler for %s:%d", host, port) + logger := s.getRequestLogger(ctx) defer func() { - log.Debugf("cleaning up remote forward listener for %s:%d", host, port) if err := ln.Close(); err != nil { - log.Debugf("remote forward listener close error: %v", err) - } else { - log.Debugf("remote forward listener closed successfully for %s:%d", host, port) + logger.Debugf("remote forward listener close error for %s:%d: %v", host, port, err) } }() @@ -196,28 +225,43 @@ func (s *Server) handleRemoteForwardListener(ctx ssh.Context, ln net.Listener, h select { case result := <-acceptChan: if result.err != nil { - log.Debugf("remote forward accept error: %v", result.err) + logger.Debugf("remote forward accept error: %v", result.err) return } go s.handleRemoteForwardConnection(ctx, result.conn, host, port) case <-ctx.Done(): - log.Debugf("remote forward listener shutting down due to context cancellation for %s:%d", host, port) + logger.Debugf("remote forward listener shutting down for %s:%d", host, port) return } } } -// getRequestLogger creates a logger with user and remote address context +// getRequestLogger creates a logger with session/conn and jwt_user context func (s *Server) getRequestLogger(ctx ssh.Context) *log.Entry { - remoteAddr := "unknown" - username := "unknown" - if ctx != nil { - if ctx.RemoteAddr() != nil { - remoteAddr = ctx.RemoteAddr().String() + sessionKey := s.findSessionKeyByContext(ctx) + + s.mu.RLock() + defer s.mu.RUnlock() + + if state, exists := s.sessions[sessionKey]; exists { + logger := log.WithField("session", sessionKey) + if state.jwtUsername != "" { + logger = logger.WithField("jwt_user", state.jwtUsername) } - username = ctx.User() + return logger } - return log.WithFields(log.Fields{"user": username, "remote": remoteAddr}) + + if ctx.RemoteAddr() != nil { + if connState, exists := s.connections[connKey(ctx.RemoteAddr().String())]; exists { + return s.connLogger(connState) + } + } + + remoteAddr := "unknown" + if ctx.RemoteAddr() != nil { + remoteAddr = ctx.RemoteAddr().String() + } + return log.WithField("session", fmt.Sprintf("%s@%s", ctx.User(), remoteAddr)) } // isRemotePortForwardingAllowed checks if remote port forwarding is enabled @@ -227,6 +271,13 @@ func (s *Server) isRemotePortForwardingAllowed() bool { return s.allowRemotePortForwarding } +// isPortForwardingEnabled checks if any port forwarding (local or remote) is enabled +func (s *Server) isPortForwardingEnabled() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return s.allowLocalPortForwarding || s.allowRemotePortForwarding +} + // parseTcpipForwardRequest parses the SSH request payload func (s *Server) parseTcpipForwardRequest(req *cryptossh.Request) (*tcpipForwardMsg, error) { var payload tcpipForwardMsg @@ -267,10 +318,11 @@ func (s *Server) setupDirectForward(ctx ssh.Context, logger *log.Entry, sshConn logger.Debugf("tcpip-forward allocated port %d for %s", actualPort, payload.Host) } - key := ForwardKey(fmt.Sprintf("%s:%d", payload.Host, payload.Port)) + key := forwardKey(fmt.Sprintf("%s:%d", payload.Host, payload.Port)) s.storeRemoteForwardListener(key, ln) - s.markConnectionActivePortForward(sshConn, ctx.User(), ctx.RemoteAddr().String()) + forwardAddr := fmt.Sprintf("-R %s:%d", payload.Host, actualPort) + s.addConnectionPortForward(ctx.User(), ctx.RemoteAddr(), forwardAddr) go s.handleRemoteForwardListener(ctx, ln, payload.Host, actualPort) response := make([]byte, 4) @@ -288,44 +340,34 @@ type acceptResult struct { // handleRemoteForwardConnection handles a single remote port forwarding connection func (s *Server) handleRemoteForwardConnection(ctx ssh.Context, conn net.Conn, host string, port uint32) { - sessionKey := s.findSessionKeyByContext(ctx) - connID := fmt.Sprintf("pf-%s->%s:%d", conn.RemoteAddr(), host, port) - logger := log.WithFields(log.Fields{ - "session": sessionKey, - "conn": connID, - }) + logger := s.getRequestLogger(ctx) - defer func() { - if err := conn.Close(); err != nil { - logger.Debugf("connection close error: %v", err) - } - }() - - sshConn := ctx.Value(ssh.ContextKeyConn).(*cryptossh.ServerConn) - if sshConn == nil { + sshConn, ok := ctx.Value(ssh.ContextKeyConn).(*cryptossh.ServerConn) + if !ok || sshConn == nil { logger.Debugf("remote forward: no SSH connection in context") + _ = conn.Close() return } remoteAddr, ok := conn.RemoteAddr().(*net.TCPAddr) if !ok { logger.Warnf("remote forward: non-TCP connection type: %T", conn.RemoteAddr()) + _ = conn.Close() return } - channel, err := s.openForwardChannel(sshConn, host, port, remoteAddr, logger) + channel, err := s.openForwardChannel(sshConn, host, port, remoteAddr) if err != nil { - logger.Debugf("open forward channel: %v", err) + logger.Debugf("open forward channel for %s:%d: %v", host, port, err) + _ = conn.Close() return } - s.proxyForwardConnection(ctx, logger, conn, channel) + nbssh.BidirectionalCopyWithContext(logger, ctx, conn, channel) } // openForwardChannel creates an SSH forwarded-tcpip channel -func (s *Server) openForwardChannel(sshConn *cryptossh.ServerConn, host string, port uint32, remoteAddr *net.TCPAddr, logger *log.Entry) (cryptossh.Channel, error) { - logger.Tracef("opening forwarded-tcpip channel for %s:%d", host, port) - +func (s *Server) openForwardChannel(sshConn *cryptossh.ServerConn, host string, port uint32, remoteAddr *net.TCPAddr) (cryptossh.Channel, error) { payload := struct { ConnectedAddress string ConnectedPort uint32 @@ -346,41 +388,3 @@ func (s *Server) openForwardChannel(sshConn *cryptossh.ServerConn, host string, go cryptossh.DiscardRequests(reqs) return channel, nil } - -// proxyForwardConnection handles bidirectional data transfer between connection and SSH channel -func (s *Server) proxyForwardConnection(ctx ssh.Context, logger *log.Entry, conn net.Conn, channel cryptossh.Channel) { - done := make(chan struct{}, 2) - - go func() { - if _, err := io.Copy(channel, conn); err != nil { - logger.Debugf("copy error (conn->channel): %v", err) - } - done <- struct{}{} - }() - - go func() { - if _, err := io.Copy(conn, channel); err != nil { - logger.Debugf("copy error (channel->conn): %v", err) - } - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - logger.Debugf("session ended, closing connections") - case <-done: - // First copy finished, wait for second copy or context cancellation - select { - case <-ctx.Done(): - logger.Debugf("session ended, closing connections") - case <-done: - } - } - - if err := channel.Close(); err != nil { - logger.Debugf("channel close error: %v", err) - } - if err := conn.Close(); err != nil { - logger.Debugf("connection close error: %v", err) - } -} diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index 82718d002..f957e66a5 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -9,6 +9,7 @@ import ( "io" "net" "net/netip" + "slices" "strings" "sync" "time" @@ -40,6 +41,11 @@ const ( msgPrivilegedUserDisabled = "privileged user login is disabled" + cmdInteractiveShell = "" + cmdPortForwarding = "" + cmdSFTP = "" + cmdNonInteractive = "" + // DefaultJWTMaxTokenAge is the default maximum age for JWT tokens accepted by the SSH server DefaultJWTMaxTokenAge = 5 * 60 ) @@ -90,10 +96,10 @@ func logSessionExitError(logger *log.Entry, err error) { } } -// safeLogCommand returns a safe representation of the command for logging +// safeLogCommand returns a safe representation of the command for logging. func safeLogCommand(cmd []string) string { if len(cmd) == 0 { - return "" + return cmdInteractiveShell } if len(cmd) == 1 { return cmd[0] @@ -101,26 +107,50 @@ func safeLogCommand(cmd []string) string { return fmt.Sprintf("%s [%d args]", cmd[0], len(cmd)-1) } -type sshConnectionState struct { - hasActivePortForward bool - username string - remoteAddr string +// connState tracks the state of an SSH connection for port forwarding and status display. +type connState struct { + username string + remoteAddr net.Addr + portForwards []string + jwtUsername string } +// authKey uniquely identifies an authentication attempt by username and remote address. +// Used to temporarily store JWT username between passwordHandler and sessionHandler. type authKey string +// connKey uniquely identifies an SSH connection by its remote address. +// Used to track authenticated connections for status display and port forwarding. +type connKey string + func newAuthKey(username string, remoteAddr net.Addr) authKey { return authKey(fmt.Sprintf("%s@%s", username, remoteAddr.String())) } +// sessionState tracks an active SSH session (shell, command, or subsystem like SFTP). +type sessionState struct { + session ssh.Session + sessionType string + jwtUsername string +} + type Server struct { - sshServer *ssh.Server - mu sync.RWMutex - hostKeyPEM []byte - sessions map[SessionKey]ssh.Session - sessionCancels map[ConnectionKey]context.CancelFunc - sessionJWTUsers map[SessionKey]string - pendingAuthJWT map[authKey]string + sshServer *ssh.Server + mu sync.RWMutex + hostKeyPEM []byte + + // sessions tracks active SSH sessions (shell, command, SFTP). + // These are created when a client opens a session channel and requests shell/exec/subsystem. + sessions map[sessionKey]*sessionState + + // pendingAuthJWT temporarily stores JWT username during the auth→session handoff. + // Populated in passwordHandler, consumed in sessionHandler/sftpSubsystemHandler. + pendingAuthJWT map[authKey]string + + // connections tracks all SSH connections by their remote address. + // Populated at authentication time, stores JWT username and port forwards for status display. + connections map[connKey]*connState + allowLocalPortForwarding bool allowRemotePortForwarding bool @@ -132,8 +162,7 @@ type Server struct { wgAddress wgaddr.Address - remoteForwardListeners map[ForwardKey]net.Listener - sshConnections map[*cryptossh.ServerConn]*sshConnectionState + remoteForwardListeners map[forwardKey]net.Listener jwtValidator *jwt.Validator jwtExtractor *jwt.ClaimsExtractor @@ -167,6 +196,7 @@ type SessionInfo struct { RemoteAddress string Command string JWTUsername string + PortForwards []string } // New creates an SSH server instance with the provided host key and optional JWT configuration @@ -175,11 +205,10 @@ func New(config *Config) *Server { s := &Server{ mu: sync.RWMutex{}, hostKeyPEM: config.HostKeyPEM, - sessions: make(map[SessionKey]ssh.Session), - sessionJWTUsers: make(map[SessionKey]string), + sessions: make(map[sessionKey]*sessionState), pendingAuthJWT: make(map[authKey]string), - remoteForwardListeners: make(map[ForwardKey]net.Listener), - sshConnections: make(map[*cryptossh.ServerConn]*sshConnectionState), + remoteForwardListeners: make(map[forwardKey]net.Listener), + connections: make(map[connKey]*connState), jwtEnabled: config.JWT != nil, jwtConfig: config.JWT, authorizer: sshauth.NewAuthorizer(), // Initialize with empty config @@ -265,14 +294,8 @@ func (s *Server) Stop() error { s.sshServer = nil maps.Clear(s.sessions) - maps.Clear(s.sessionJWTUsers) maps.Clear(s.pendingAuthJWT) - maps.Clear(s.sshConnections) - - for _, cancelFunc := range s.sessionCancels { - cancelFunc() - } - maps.Clear(s.sessionCancels) + maps.Clear(s.connections) for _, listener := range s.remoteForwardListeners { if err := listener.Close(); err != nil { @@ -284,32 +307,70 @@ func (s *Server) Stop() error { return nil } -// GetStatus returns the current status of the SSH server and active sessions +// GetStatus returns the current status of the SSH server and active sessions. func (s *Server) GetStatus() (enabled bool, sessions []SessionInfo) { s.mu.RLock() defer s.mu.RUnlock() enabled = s.sshServer != nil + reportedAddrs := make(map[string]bool) - for sessionKey, session := range s.sessions { - cmd := "" - if len(session.Command()) > 0 { - cmd = safeLogCommand(session.Command()) + for _, state := range s.sessions { + info := s.buildSessionInfo(state) + reportedAddrs[info.RemoteAddress] = true + sessions = append(sessions, info) + } + + // Add authenticated connections without sessions (e.g., -N/-T or port-forwarding only) + for key, connState := range s.connections { + remoteAddr := string(key) + if reportedAddrs[remoteAddr] { + continue + } + cmd := cmdNonInteractive + if len(connState.portForwards) > 0 { + cmd = cmdPortForwarding } - - jwtUsername := s.sessionJWTUsers[sessionKey] - sessions = append(sessions, SessionInfo{ - Username: session.User(), - RemoteAddress: session.RemoteAddr().String(), + Username: connState.username, + RemoteAddress: remoteAddr, Command: cmd, - JWTUsername: jwtUsername, + JWTUsername: connState.jwtUsername, + PortForwards: connState.portForwards, }) } return enabled, sessions } +func (s *Server) buildSessionInfo(state *sessionState) SessionInfo { + session := state.session + cmd := state.sessionType + if cmd == "" { + cmd = safeLogCommand(session.Command()) + } + + remoteAddr := session.RemoteAddr().String() + info := SessionInfo{ + Username: session.User(), + RemoteAddress: remoteAddr, + Command: cmd, + JWTUsername: state.jwtUsername, + } + + connState, exists := s.connections[connKey(remoteAddr)] + if !exists { + return info + } + + info.PortForwards = connState.portForwards + if len(connState.portForwards) > 0 && (cmd == cmdInteractiveShell || cmd == cmdNonInteractive) { + info.Command = cmdPortForwarding + } + + return info +} + // SetNetstackNet sets the netstack network for userspace networking func (s *Server) SetNetstackNet(net *netstack.Net) { s.mu.Lock() @@ -520,69 +581,129 @@ func (s *Server) parseTokenWithoutValidation(tokenString string) (map[string]int func (s *Server) passwordHandler(ctx ssh.Context, password string) bool { osUsername := ctx.User() remoteAddr := ctx.RemoteAddr() + logger := s.getRequestLogger(ctx) if err := s.ensureJWTValidator(); err != nil { - log.Errorf("JWT validator initialization failed for user %s from %s: %v", osUsername, remoteAddr, err) + logger.Errorf("JWT validator initialization failed: %v", err) return false } token, err := s.validateJWTToken(password) if err != nil { - log.Warnf("JWT authentication failed for user %s from %s: %v", osUsername, remoteAddr, err) + logger.Warnf("JWT authentication failed: %v", err) return false } userAuth, err := s.extractAndValidateUser(token) if err != nil { - log.Warnf("User validation failed for user %s from %s: %v", osUsername, remoteAddr, err) + logger.Warnf("user validation failed: %v", err) return false } + logger = logger.WithField("jwt_user", userAuth.UserId) + s.mu.RLock() authorizer := s.authorizer s.mu.RUnlock() - if err := authorizer.Authorize(userAuth.UserId, osUsername); err != nil { - log.Warnf("SSH authorization denied for user %s (JWT user ID: %s) from %s: %v", osUsername, userAuth.UserId, remoteAddr, err) + msg, err := authorizer.Authorize(userAuth.UserId, osUsername) + if err != nil { + logger.Warnf("SSH auth denied: %v", err) return false } + logger.Infof("SSH auth %s", msg) + key := newAuthKey(osUsername, remoteAddr) + remoteAddrStr := ctx.RemoteAddr().String() s.mu.Lock() s.pendingAuthJWT[key] = userAuth.UserId + s.connections[connKey(remoteAddrStr)] = &connState{ + username: ctx.User(), + remoteAddr: ctx.RemoteAddr(), + jwtUsername: userAuth.UserId, + } s.mu.Unlock() - log.Infof("JWT authentication successful for user %s (JWT user ID: %s) from %s", osUsername, userAuth.UserId, remoteAddr) return true } -func (s *Server) markConnectionActivePortForward(sshConn *cryptossh.ServerConn, username, remoteAddr string) { +func (s *Server) addConnectionPortForward(username string, remoteAddr net.Addr, forwardAddr string) { s.mu.Lock() defer s.mu.Unlock() - if state, exists := s.sshConnections[sshConn]; exists { - state.hasActivePortForward = true - } else { - s.sshConnections[sshConn] = &sshConnectionState{ - hasActivePortForward: true, - username: username, - remoteAddr: remoteAddr, + key := connKey(remoteAddr.String()) + if state, exists := s.connections[key]; exists { + if !slices.Contains(state.portForwards, forwardAddr) { + state.portForwards = append(state.portForwards, forwardAddr) } + return + } + + // Connection not in connections (non-JWT auth path) + s.connections[key] = &connState{ + username: username, + remoteAddr: remoteAddr, + portForwards: []string{forwardAddr}, + jwtUsername: s.pendingAuthJWT[newAuthKey(username, remoteAddr)], } } -func (s *Server) connectionCloseHandler(conn net.Conn, err error) { - // We can't extract the SSH connection from net.Conn directly - // Connection cleanup will happen during session cleanup or via timeout - log.Debugf("SSH connection failed for %s: %v", conn.RemoteAddr(), err) +func (s *Server) removeConnectionPortForward(remoteAddr net.Addr, forwardAddr string) { + s.mu.Lock() + defer s.mu.Unlock() + + state, exists := s.connections[connKey(remoteAddr.String())] + if !exists { + return + } + + state.portForwards = slices.DeleteFunc(state.portForwards, func(addr string) bool { + return addr == forwardAddr + }) } -func (s *Server) findSessionKeyByContext(ctx ssh.Context) SessionKey { +// trackedConn wraps a net.Conn to detect when it closes +type trackedConn struct { + net.Conn + server *Server + remoteAddr string + onceClose sync.Once +} + +func (c *trackedConn) Close() error { + err := c.Conn.Close() + c.onceClose.Do(func() { + c.server.handleConnectionClose(c.remoteAddr) + }) + return err +} + +func (s *Server) handleConnectionClose(remoteAddr string) { + s.mu.Lock() + defer s.mu.Unlock() + + key := connKey(remoteAddr) + state, exists := s.connections[key] + if exists && len(state.portForwards) > 0 { + s.connLogger(state).Info("port forwarding connection closed") + } + delete(s.connections, key) +} + +func (s *Server) connLogger(state *connState) *log.Entry { + logger := log.WithField("session", fmt.Sprintf("%s@%s", state.username, state.remoteAddr)) + if state.jwtUsername != "" { + logger = logger.WithField("jwt_user", state.jwtUsername) + } + return logger +} + +func (s *Server) findSessionKeyByContext(ctx ssh.Context) sessionKey { if ctx == nil { return "unknown" } - // Try to match by SSH connection sshConn := ctx.Value(ssh.ContextKeyConn) if sshConn == nil { return "unknown" @@ -591,19 +712,14 @@ func (s *Server) findSessionKeyByContext(ctx ssh.Context) SessionKey { s.mu.RLock() defer s.mu.RUnlock() - // Look through sessions to find one with matching connection - for sessionKey, session := range s.sessions { - if session.Context().Value(ssh.ContextKeyConn) == sshConn { + for sessionKey, state := range s.sessions { + if state.session.Context().Value(ssh.ContextKeyConn) == sshConn { return sessionKey } } - // If no session found, this might be during early connection setup - // Return a temporary key that we'll fix up later if ctx.User() != "" && ctx.RemoteAddr() != nil { - tempKey := SessionKey(fmt.Sprintf("%s@%s", ctx.User(), ctx.RemoteAddr().String())) - log.Debugf("Using temporary session key for early port forward tracking: %s (will be updated when session established)", tempKey) - return tempKey + return sessionKey(fmt.Sprintf("%s@%s", ctx.User(), ctx.RemoteAddr().String())) } return "unknown" @@ -644,7 +760,11 @@ func (s *Server) connectionValidator(_ ssh.Context, conn net.Conn) net.Conn { } log.Infof("SSH connection from NetBird peer %s allowed", tcpAddr) - return conn + return &trackedConn{ + Conn: conn, + server: s, + remoteAddr: conn.RemoteAddr().String(), + } } func (s *Server) createSSHServer(addr net.Addr) (*ssh.Server, error) { @@ -672,9 +792,8 @@ func (s *Server) createSSHServer(addr net.Addr) (*ssh.Server, error) { "tcpip-forward": s.tcpipForwardHandler, "cancel-tcpip-forward": s.cancelTcpipForwardHandler, }, - ConnCallback: s.connectionValidator, - ConnectionFailedCallback: s.connectionCloseHandler, - Version: serverVersion, + ConnCallback: s.connectionValidator, + Version: serverVersion, } if s.jwtEnabled { @@ -690,13 +809,13 @@ func (s *Server) createSSHServer(addr net.Addr) (*ssh.Server, error) { return server, nil } -func (s *Server) storeRemoteForwardListener(key ForwardKey, ln net.Listener) { +func (s *Server) storeRemoteForwardListener(key forwardKey, ln net.Listener) { s.mu.Lock() defer s.mu.Unlock() s.remoteForwardListeners[key] = ln } -func (s *Server) removeRemoteForwardListener(key ForwardKey) bool { +func (s *Server) removeRemoteForwardListener(key forwardKey) bool { s.mu.Lock() defer s.mu.Unlock() @@ -714,6 +833,8 @@ func (s *Server) removeRemoteForwardListener(key ForwardKey) bool { } func (s *Server) directTCPIPHandler(srv *ssh.Server, conn *cryptossh.ServerConn, newChan cryptossh.NewChannel, ctx ssh.Context) { + logger := s.getRequestLogger(ctx) + var payload struct { Host string Port uint32 @@ -723,7 +844,7 @@ func (s *Server) directTCPIPHandler(srv *ssh.Server, conn *cryptossh.ServerConn, if err := cryptossh.Unmarshal(newChan.ExtraData(), &payload); err != nil { if err := newChan.Reject(cryptossh.ConnectionFailed, "parse payload"); err != nil { - log.Debugf("channel reject error: %v", err) + logger.Debugf("channel reject error: %v", err) } return } @@ -733,19 +854,20 @@ func (s *Server) directTCPIPHandler(srv *ssh.Server, conn *cryptossh.ServerConn, s.mu.RUnlock() if !allowLocal { - log.Warnf("local port forwarding denied for %s:%d: disabled by configuration", payload.Host, payload.Port) + logger.Warnf("local port forwarding denied for %s:%d: disabled", payload.Host, payload.Port) _ = newChan.Reject(cryptossh.Prohibited, "local port forwarding disabled") return } - // Check privilege requirements for the destination port if err := s.checkPortForwardingPrivileges(ctx, "local", payload.Port); err != nil { - log.Warnf("local port forwarding denied for %s:%d: %v", payload.Host, payload.Port, err) + logger.Warnf("local port forwarding denied for %s:%d: %v", payload.Host, payload.Port, err) _ = newChan.Reject(cryptossh.Prohibited, "insufficient privileges") return } - log.Infof("local port forwarding: %s:%d", payload.Host, payload.Port) + forwardAddr := fmt.Sprintf("-L %s:%d", payload.Host, payload.Port) + s.addConnectionPortForward(ctx.User(), ctx.RemoteAddr(), forwardAddr) + logger.Infof("local port forwarding: %s:%d", payload.Host, payload.Port) ssh.DirectTCPIPHandler(srv, conn, newChan, ctx) } diff --git a/client/ssh/server/server_config_test.go b/client/ssh/server/server_config_test.go index 24e455025..d85d85a51 100644 --- a/client/ssh/server/server_config_test.go +++ b/client/ssh/server/server_config_test.go @@ -224,6 +224,96 @@ func TestServer_PortForwardingRestriction(t *testing.T) { } } +func TestServer_PrivilegedPortAccess(t *testing.T) { + hostKey, err := ssh.GeneratePrivateKey(ssh.ED25519) + require.NoError(t, err) + + serverConfig := &Config{ + HostKeyPEM: hostKey, + } + server := New(serverConfig) + server.SetAllowRemotePortForwarding(true) + + tests := []struct { + name string + forwardType string + port uint32 + username string + expectError bool + errorMsg string + skipOnWindows bool + }{ + { + name: "non-root user remote forward privileged port", + forwardType: "remote", + port: 80, + username: "testuser", + expectError: true, + errorMsg: "cannot bind to privileged port", + skipOnWindows: true, + }, + { + name: "non-root user tcpip-forward privileged port", + forwardType: "tcpip-forward", + port: 443, + username: "testuser", + expectError: true, + errorMsg: "cannot bind to privileged port", + skipOnWindows: true, + }, + { + name: "non-root user remote forward unprivileged port", + forwardType: "remote", + port: 8080, + username: "testuser", + expectError: false, + }, + { + name: "non-root user remote forward port 0", + forwardType: "remote", + port: 0, + username: "testuser", + expectError: false, + }, + { + name: "root user remote forward privileged port", + forwardType: "remote", + port: 22, + username: "root", + expectError: false, + }, + { + name: "local forward privileged port allowed for non-root", + forwardType: "local", + port: 80, + username: "testuser", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.skipOnWindows && runtime.GOOS == "windows" { + t.Skip("Windows does not have privileged port restrictions") + } + + result := PrivilegeCheckResult{ + Allowed: true, + User: &user.User{Username: tt.username}, + } + + err := server.checkPrivilegedPortAccess(tt.forwardType, tt.port, result) + + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + func TestServer_PortConflictHandling(t *testing.T) { // Test that multiple sessions requesting the same local port are handled naturally by the OS // Get current user for SSH connection @@ -392,3 +482,95 @@ func TestServer_IsPrivilegedUser(t *testing.T) { }) } } + +func TestServer_PortForwardingOnlySession(t *testing.T) { + // Test that sessions without PTY and command are allowed when port forwarding is enabled + currentUser, err := user.Current() + require.NoError(t, err, "Should be able to get current user") + + // Generate host key for server + hostKey, err := ssh.GeneratePrivateKey(ssh.ED25519) + require.NoError(t, err) + + tests := []struct { + name string + allowLocalForwarding bool + allowRemoteForwarding bool + expectAllowed bool + description string + }{ + { + name: "session_allowed_with_local_forwarding", + allowLocalForwarding: true, + allowRemoteForwarding: false, + expectAllowed: true, + description: "Port-forwarding-only session should be allowed when local forwarding is enabled", + }, + { + name: "session_allowed_with_remote_forwarding", + allowLocalForwarding: false, + allowRemoteForwarding: true, + expectAllowed: true, + description: "Port-forwarding-only session should be allowed when remote forwarding is enabled", + }, + { + name: "session_allowed_with_both", + allowLocalForwarding: true, + allowRemoteForwarding: true, + expectAllowed: true, + description: "Port-forwarding-only session should be allowed when both forwarding types enabled", + }, + { + name: "session_denied_without_forwarding", + allowLocalForwarding: false, + allowRemoteForwarding: false, + expectAllowed: false, + description: "Port-forwarding-only session should be denied when all forwarding is disabled", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + serverConfig := &Config{ + HostKeyPEM: hostKey, + JWT: nil, + } + server := New(serverConfig) + server.SetAllowRootLogin(true) + server.SetAllowLocalPortForwarding(tt.allowLocalForwarding) + server.SetAllowRemotePortForwarding(tt.allowRemoteForwarding) + + serverAddr := StartTestServer(t, server) + defer func() { + _ = server.Stop() + }() + + // Connect to the server without requesting PTY or command + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, err := sshclient.Dial(ctx, serverAddr, currentUser.Username, sshclient.DialOptions{ + InsecureSkipVerify: true, + }) + require.NoError(t, err) + defer func() { + _ = client.Close() + }() + + // Execute a command without PTY - this simulates ssh -T with no command + // The server should either allow it (port forwarding enabled) or reject it + output, err := client.ExecuteCommand(ctx, "") + if tt.expectAllowed { + // When allowed, the session stays open until cancelled + // ExecuteCommand with empty command should return without error + assert.NoError(t, err, "Session should be allowed when port forwarding is enabled") + assert.NotContains(t, output, "port forwarding is disabled", + "Output should not contain port forwarding disabled message") + } else if err != nil { + // When denied, we expect an error message about port forwarding being disabled + assert.Contains(t, err.Error(), "port forwarding is disabled", + "Should get port forwarding disabled message") + } + }) + } +} diff --git a/client/ssh/server/session_handlers.go b/client/ssh/server/session_handlers.go index 4e6d72098..3fd578064 100644 --- a/client/ssh/server/session_handlers.go +++ b/client/ssh/server/session_handlers.go @@ -6,37 +6,45 @@ import ( "errors" "fmt" "io" - "strings" "time" "github.com/gliderlabs/ssh" log "github.com/sirupsen/logrus" - cryptossh "golang.org/x/crypto/ssh" ) +// associateJWTUsername extracts pending JWT username for the session and associates it with the session state. +// Returns the JWT username (empty if none) for logging purposes. +func (s *Server) associateJWTUsername(sess ssh.Session, sessionKey sessionKey) string { + key := newAuthKey(sess.User(), sess.RemoteAddr()) + + s.mu.Lock() + defer s.mu.Unlock() + + jwtUsername := s.pendingAuthJWT[key] + if jwtUsername == "" { + return "" + } + + if state, exists := s.sessions[sessionKey]; exists { + state.jwtUsername = jwtUsername + } + delete(s.pendingAuthJWT, key) + return jwtUsername +} + // sessionHandler handles SSH sessions func (s *Server) sessionHandler(session ssh.Session) { - sessionKey := s.registerSession(session) - - key := newAuthKey(session.User(), session.RemoteAddr()) - s.mu.Lock() - jwtUsername := s.pendingAuthJWT[key] - if jwtUsername != "" { - s.sessionJWTUsers[sessionKey] = jwtUsername - delete(s.pendingAuthJWT, key) - } - s.mu.Unlock() + sessionKey := s.registerSession(session, "") + jwtUsername := s.associateJWTUsername(session, sessionKey) logger := log.WithField("session", sessionKey) if jwtUsername != "" { logger = logger.WithField("jwt_user", jwtUsername) - logger.Infof("SSH session started (JWT user: %s)", jwtUsername) - } else { - logger.Infof("SSH session started") } + logger.Info("SSH session started") sessionStart := time.Now() - defer s.unregisterSession(sessionKey, session) + defer s.unregisterSession(sessionKey) defer func() { duration := time.Since(sessionStart).Round(time.Millisecond) if err := session.Close(); err != nil && !errors.Is(err, io.EOF) { @@ -65,27 +73,52 @@ func (s *Server) sessionHandler(session ssh.Session) { // ssh - non-Pty command execution s.handleCommand(logger, session, privilegeResult, nil) default: - s.rejectInvalidSession(logger, session) + // ssh -T (or ssh -N) - no PTY, no command + s.handleNonInteractiveSession(logger, session) } } -func (s *Server) rejectInvalidSession(logger *log.Entry, session ssh.Session) { - if _, err := io.WriteString(session, "no command specified and Pty not requested\n"); err != nil { - logger.Debugf(errWriteSession, err) +// handleNonInteractiveSession handles sessions that have no PTY and no command. +// These are typically used for port forwarding (ssh -L/-R) or tunneling (ssh -N). +func (s *Server) handleNonInteractiveSession(logger *log.Entry, session ssh.Session) { + s.updateSessionType(session, cmdNonInteractive) + + if !s.isPortForwardingEnabled() { + if _, err := io.WriteString(session, "port forwarding is disabled on this server\n"); err != nil { + logger.Debugf(errWriteSession, err) + } + if err := session.Exit(1); err != nil { + logSessionExitError(logger, err) + } + logger.Infof("rejected non-interactive session: port forwarding disabled") + return } - if err := session.Exit(1); err != nil { + + <-session.Context().Done() + + if err := session.Exit(0); err != nil { logSessionExitError(logger, err) } - logger.Infof("rejected non-Pty session without command from %s", session.RemoteAddr()) } -func (s *Server) registerSession(session ssh.Session) SessionKey { +func (s *Server) updateSessionType(session ssh.Session, sessionType string) { + s.mu.Lock() + defer s.mu.Unlock() + + for _, state := range s.sessions { + if state.session == session { + state.sessionType = sessionType + return + } + } +} + +func (s *Server) registerSession(session ssh.Session, sessionType string) sessionKey { sessionID := session.Context().Value(ssh.ContextKeySessionID) if sessionID == nil { sessionID = fmt.Sprintf("%p", session) } - // Create a short 4-byte identifier from the full session ID hasher := sha256.New() hasher.Write([]byte(fmt.Sprintf("%v", sessionID))) hash := hasher.Sum(nil) @@ -93,43 +126,23 @@ func (s *Server) registerSession(session ssh.Session) SessionKey { remoteAddr := session.RemoteAddr().String() username := session.User() - sessionKey := SessionKey(fmt.Sprintf("%s@%s-%s", username, remoteAddr, shortID)) + sessionKey := sessionKey(fmt.Sprintf("%s@%s-%s", username, remoteAddr, shortID)) s.mu.Lock() - s.sessions[sessionKey] = session + s.sessions[sessionKey] = &sessionState{ + session: session, + sessionType: sessionType, + } s.mu.Unlock() return sessionKey } -func (s *Server) unregisterSession(sessionKey SessionKey, session ssh.Session) { +func (s *Server) unregisterSession(sessionKey sessionKey) { s.mu.Lock() + defer s.mu.Unlock() + delete(s.sessions, sessionKey) - delete(s.sessionJWTUsers, sessionKey) - - // Cancel all port forwarding connections for this session - var connectionsToCancel []ConnectionKey - for key := range s.sessionCancels { - if strings.HasPrefix(string(key), string(sessionKey)+"-") { - connectionsToCancel = append(connectionsToCancel, key) - } - } - - for _, key := range connectionsToCancel { - if cancelFunc, exists := s.sessionCancels[key]; exists { - log.WithField("session", sessionKey).Debugf("cancelling port forwarding context: %s", key) - cancelFunc() - delete(s.sessionCancels, key) - } - } - - if sshConnValue := session.Context().Value(ssh.ContextKeyConn); sshConnValue != nil { - if sshConn, ok := sshConnValue.(*cryptossh.ServerConn); ok { - delete(s.sshConnections, sshConn) - } - } - - s.mu.Unlock() } func (s *Server) handlePrivError(logger *log.Entry, session ssh.Session, err error) { diff --git a/client/ssh/server/sftp.go b/client/ssh/server/sftp.go index c2b9f552b..199444abb 100644 --- a/client/ssh/server/sftp.go +++ b/client/ssh/server/sftp.go @@ -18,14 +18,26 @@ func (s *Server) SetAllowSFTP(allow bool) { // sftpSubsystemHandler handles SFTP subsystem requests func (s *Server) sftpSubsystemHandler(sess ssh.Session) { + sessionKey := s.registerSession(sess, cmdSFTP) + defer s.unregisterSession(sessionKey) + + jwtUsername := s.associateJWTUsername(sess, sessionKey) + + logger := log.WithField("session", sessionKey) + if jwtUsername != "" { + logger = logger.WithField("jwt_user", jwtUsername) + } + logger.Info("SFTP session started") + defer logger.Info("SFTP session closed") + s.mu.RLock() allowSFTP := s.allowSFTP s.mu.RUnlock() if !allowSFTP { - log.Debugf("SFTP subsystem request denied: SFTP disabled") + logger.Debug("SFTP subsystem request denied: SFTP disabled") if err := sess.Exit(1); err != nil { - log.Debugf("SFTP session exit failed: %v", err) + logger.Debugf("SFTP session exit: %v", err) } return } @@ -37,31 +49,27 @@ func (s *Server) sftpSubsystemHandler(sess ssh.Session) { }) if !result.Allowed { - log.Warnf("SFTP access denied for user %s from %s: %v", sess.User(), sess.RemoteAddr(), result.Error) + logger.Warnf("SFTP access denied: %v", result.Error) if err := sess.Exit(1); err != nil { - log.Debugf("exit SFTP session: %v", err) + logger.Debugf("exit SFTP session: %v", err) } return } - log.Debugf("SFTP subsystem request from user %s (effective user %s)", sess.User(), result.User.Username) - if !result.RequiresUserSwitching { if err := s.executeSftpDirect(sess); err != nil { - log.Errorf("SFTP direct execution: %v", err) + logger.Errorf("SFTP direct execution: %v", err) } return } if err := s.executeSftpWithPrivilegeDrop(sess, result.User); err != nil { - log.Errorf("SFTP privilege drop execution: %v", err) + logger.Errorf("SFTP privilege drop execution: %v", err) } } // executeSftpDirect executes SFTP directly without privilege dropping func (s *Server) executeSftpDirect(sess ssh.Session) error { - log.Debugf("starting SFTP session for user %s (no privilege dropping)", sess.User()) - sftpServer, err := sftp.NewServer(sess) if err != nil { return fmt.Errorf("SFTP server creation: %w", err) diff --git a/client/status/status.go b/client/status/status.go index d975f0e29..4f31f3637 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -82,10 +82,11 @@ type NsServerGroupStateOutput struct { } type SSHSessionOutput struct { - Username string `json:"username" yaml:"username"` - RemoteAddress string `json:"remoteAddress" yaml:"remoteAddress"` - Command string `json:"command" yaml:"command"` - JWTUsername string `json:"jwtUsername,omitempty" yaml:"jwtUsername,omitempty"` + Username string `json:"username" yaml:"username"` + RemoteAddress string `json:"remoteAddress" yaml:"remoteAddress"` + Command string `json:"command" yaml:"command"` + JWTUsername string `json:"jwtUsername,omitempty" yaml:"jwtUsername,omitempty"` + PortForwards []string `json:"portForwards,omitempty" yaml:"portForwards,omitempty"` } type SSHServerStateOutput struct { @@ -220,6 +221,7 @@ func mapSSHServer(sshServerState *proto.SSHServerState) SSHServerStateOutput { RemoteAddress: session.GetRemoteAddress(), Command: session.GetCommand(), JWTUsername: session.GetJwtUsername(), + PortForwards: session.GetPortForwards(), }) } @@ -475,6 +477,9 @@ func ParseGeneralSummary(overview OutputOverview, showURL bool, showRelays bool, ) } sshServerStatus += "\n " + sessionDisplay + for _, pf := range session.PortForwards { + sshServerStatus += "\n " + pf + } } } } From d35b7d675c7666ab5b8f438642f8b4574bd36859 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Wed, 7 Jan 2026 14:00:39 +0300 Subject: [PATCH 019/374] [management] Refactor integrated peer deletion (#5042) --- management/internals/modules/peers/manager.go | 15 ++++++++---- management/server/peer.go | 24 ++++++++++--------- management/server/user.go | 12 +++++++++- 3 files changed, 34 insertions(+), 17 deletions(-) diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index b200b9663..4935c608e 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -7,6 +7,8 @@ import ( "fmt" "time" + log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral" "github.com/netbirdio/netbird/management/server/account" @@ -102,7 +104,7 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs for _, peerID := range peerIDs { var eventsToStore []func() - err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { peer, err := transaction.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) if err != nil { return err @@ -116,10 +118,6 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs return fmt.Errorf("failed to remove peer %s from groups", peerID) } - if err := m.integratedPeerValidator.PeerDeleted(ctx, accountID, peerID, settings.Extra); err != nil { - return err - } - peerPolicyRules, err := transaction.GetPolicyRulesByResourceID(ctx, store.LockingStrengthNone, accountID, peerID) if err != nil { return err @@ -153,6 +151,13 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs if err != nil { return err } + + if m.integratedPeerValidator != nil { + if err = m.integratedPeerValidator.PeerDeleted(ctx, accountID, peerID, settings.Extra); err != nil { + log.WithContext(ctx).Errorf("failed to delete peer %s from integrated validator: %v", peerID, err) + } + } + for _, event := range eventsToStore { event() } diff --git a/management/server/peer.go b/management/server/peer.go index 8d0e18171..0b837f04c 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -340,6 +340,7 @@ func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peer } var peer *nbpeer.Peer + var settings *types.Settings var eventsToStore []func() err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { @@ -348,11 +349,16 @@ func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peer return err } + settings, err = transaction.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return err + } + if err = am.validatePeerDelete(ctx, transaction, accountID, peerID); err != nil { return err } - eventsToStore, err = deletePeers(ctx, am, transaction, accountID, userID, []*nbpeer.Peer{peer}) + eventsToStore, err = deletePeers(ctx, am, transaction, accountID, userID, []*nbpeer.Peer{peer}, settings) if err != nil { return fmt.Errorf("failed to delete peer: %w", err) } @@ -371,7 +377,11 @@ func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peer storeEvent() } - if err := am.networkMapController.OnPeersDeleted(ctx, accountID, []string{peerID}); err != nil { + if err = am.integratedPeerValidator.PeerDeleted(ctx, accountID, peerID, settings.Extra); err != nil { + log.WithContext(ctx).Errorf("failed to delete peer %s from integrated validator: %v", peerID, err) + } + + if err = am.networkMapController.OnPeersDeleted(ctx, accountID, []string{peerID}); err != nil { log.WithContext(ctx).Errorf("failed to delete peer %s from network map: %v", peerID, err) } @@ -1227,13 +1237,9 @@ func getPeerGroupIDs(ctx context.Context, transaction store.Store, accountID str // deletePeers deletes all specified peers and sends updates to the remote peers. // Returns a slice of functions to save events after successful peer deletion. -func deletePeers(ctx context.Context, am *DefaultAccountManager, transaction store.Store, accountID, userID string, peers []*nbpeer.Peer) ([]func(), error) { +func deletePeers(ctx context.Context, am *DefaultAccountManager, transaction store.Store, accountID, userID string, peers []*nbpeer.Peer, settings *types.Settings) ([]func(), error) { var peerDeletedEvents []func() - settings, err := transaction.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) - if err != nil { - return nil, err - } dnsDomain := am.networkMapController.GetDNSDomain(settings) for _, peer := range peers { @@ -1241,10 +1247,6 @@ func deletePeers(ctx context.Context, am *DefaultAccountManager, transaction sto return nil, fmt.Errorf("failed to remove peer %s from groups", peer.ID) } - if err := am.integratedPeerValidator.PeerDeleted(ctx, accountID, peer.ID, settings.Extra); err != nil { - return nil, err - } - peerPolicyRules, err := transaction.GetPolicyRulesByResourceID(ctx, store.LockingStrengthNone, accountID, peer.ID) if err != nil { return nil, err diff --git a/management/server/user.go b/management/server/user.go index e393b2c04..85e84051c 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -1115,6 +1115,7 @@ func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountI var updateAccountPeers bool var userPeers []*nbpeer.Peer var targetUser *types.User + var settings *types.Settings var err error err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { @@ -1123,6 +1124,11 @@ func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountI return fmt.Errorf("failed to get user to delete: %w", err) } + settings, err = transaction.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return fmt.Errorf("failed to get account settings: %w", err) + } + userPeers, err = transaction.GetUserPeers(ctx, store.LockingStrengthNone, accountID, targetUserInfo.ID) if err != nil { return fmt.Errorf("failed to get user peers: %w", err) @@ -1130,7 +1136,7 @@ func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountI if len(userPeers) > 0 { updateAccountPeers = true - addPeerRemovedEvents, err = deletePeers(ctx, am, transaction, accountID, targetUserInfo.ID, userPeers) + addPeerRemovedEvents, err = deletePeers(ctx, am, transaction, accountID, targetUserInfo.ID, userPeers, settings) if err != nil { return fmt.Errorf("failed to delete user peers: %w", err) } @@ -1149,6 +1155,9 @@ func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountI var peerIDs []string for _, peer := range userPeers { peerIDs = append(peerIDs, peer.ID) + if err = am.integratedPeerValidator.PeerDeleted(ctx, accountID, peer.ID, settings.Extra); err != nil { + log.WithContext(ctx).Errorf("failed to delete peer %s from integrated validator: %v", peer.ID, err) + } } if err := am.networkMapController.OnPeersDeleted(ctx, accountID, peerIDs); err != nil { log.WithContext(ctx).Errorf("failed to delete peers %s from network map: %v", peerIDs, err) @@ -1157,6 +1166,7 @@ func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountI for _, addPeerRemovedEvent := range addPeerRemovedEvents { addPeerRemovedEvent() } + meta := map[string]any{"name": targetUserInfo.Name, "email": targetUserInfo.Email, "created_at": targetUser.CreatedAt} am.StoreEvent(ctx, initiatorUserID, targetUser.Id, accountID, activity.UserDeleted, meta) From 20d6beff1bc57c27f31668175d8077359f7b6b65 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Wed, 7 Jan 2026 14:59:49 +0300 Subject: [PATCH 020/374] [management] Increment network serial on peer update (#5051) Increment the serial on peer update and prevent double serial increments and account updates when updating a user while there are peers set to expire --- management/server/peer.go | 4 ++++ management/server/user.go | 4 +--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/management/server/peer.go b/management/server/peer.go index 0b837f04c..977bd52af 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -269,6 +269,10 @@ func (am *DefaultAccountManager) UpdatePeer(ctx context.Context, accountID, user inactivityExpirationChanged = true } + if err = transaction.IncrementNetworkSerial(ctx, accountID); err != nil { + return fmt.Errorf("failed to increment network serial: %w", err) + } + return transaction.SavePeer(ctx, accountID, peer) }) if err != nil { diff --git a/management/server/user.go b/management/server/user.go index 85e84051c..656ebca67 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -577,9 +577,7 @@ func (am *DefaultAccountManager) SaveOrAddUsers(ctx context.Context, accountID, log.WithContext(ctx).Errorf("failed update expired peers: %s", err) return nil, err } - } - - if updateAccountPeers { + } else if updateAccountPeers { if err = am.Store.IncrementNetworkSerial(ctx, accountID); err != nil { return nil, fmt.Errorf("failed to increment network serial: %w", err) } From 5393ad948f91c3d4b9d6c011b2712517733d17ca Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 7 Jan 2026 13:05:39 +0100 Subject: [PATCH 021/374] [management] fix nil handling for extra settings (#5049) --- management/server/account.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/management/server/account.go b/management/server/account.go index 52dcc567e..a1046432a 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -333,8 +333,9 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco } } - newSettings.Extra.IntegratedValidatorGroups = oldSettings.Extra.IntegratedValidatorGroups - newSettings.Extra.IntegratedValidator = oldSettings.Extra.IntegratedValidator + if newSettings.Extra == nil { + newSettings.Extra = oldSettings.Extra + } if err = transaction.SaveAccountSettings(ctx, accountID, newSettings); err != nil { return err From e586c20e36de0b263caa27015c273e54e9e75e5a Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Wed, 7 Jan 2026 08:52:32 -0500 Subject: [PATCH 022/374] [management, infrastructure, idp] Simplified IdP Management - Embedded IdP (#5008) Embed Dex as a built-in IdP to simplify self-hosting setup. Adds an embedded OIDC Identity Provider (Dex) with local user management and optional external IdP connectors (Google/GitHub/OIDC/SAML), plus device-auth flow for CLI login. Introduces instance onboarding/setup endpoints (including owner creation), field-level encryption for sensitive user data, a streamlined self-hosting provisioning script, and expanded APIs + test coverage for IdP management. more at https://github.com/netbirdio/netbird/pull/5008#issuecomment-3718987393 --- .../workflows/test-infrastructure-files.yml | 1 + .gitignore | 1 + client/cmd/testutil_test.go | 2 +- client/internal/engine_test.go | 2 +- client/server/server_test.go | 2 +- go.mod | 96 +- go.sum | 278 +++--- idp/dex/config.go | 301 ++++++ idp/dex/provider.go | 934 ++++++++++++++++++ idp/dex/provider_test.go | 197 ++++ idp/dex/web/robots.txt | 2 + idp/dex/web/static/main.css | 1 + idp/dex/web/templates/approval.html | 26 + idp/dex/web/templates/device.html | 34 + idp/dex/web/templates/device_success.html | 16 + idp/dex/web/templates/error.html | 16 + idp/dex/web/templates/footer.html | 3 + idp/dex/web/templates/header.html | 70 ++ idp/dex/web/templates/login.html | 56 ++ idp/dex/web/templates/oob.html | 19 + idp/dex/web/templates/password.html | 58 ++ idp/dex/web/themes/light/favicon.ico | Bin 0 -> 106176 bytes idp/dex/web/themes/light/favicon.png | Bin 0 -> 300 bytes idp/dex/web/themes/light/logo.png | Bin 0 -> 300 bytes idp/dex/web/themes/light/styles.css | 1 + idp/dex/web/web.go | 14 + idp/sdk/sdk.go | 135 +++ infrastructure_files/getting-started.sh | 407 ++++++++ management/cmd/management.go | 243 +++-- .../peers/ephemeral/manager/ephemeral_test.go | 2 +- management/internals/server/boot.go | 25 +- management/internals/server/config/config.go | 4 + management/internals/server/container.go | 3 + management/internals/server/controllers.go | 31 +- management/internals/server/modules.go | 30 + management/internals/server/server.go | 16 +- management/internals/shared/grpc/server.go | 113 ++- management/server/account.go | 46 +- management/server/account/manager.go | 9 +- management/server/account_test.go | 54 +- management/server/activity/codes.go | 8 + management/server/auth/manager.go | 3 +- management/server/dns_test.go | 2 +- management/server/group_test.go | 2 +- management/server/http/handler.go | 47 +- .../handlers/accounts/accounts_handler.go | 23 +- .../accounts/accounts_handler_test.go | 7 + .../server/http/handlers/idp/idp_handler.go | 196 ++++ .../http/handlers/idp/idp_handler_test.go | 438 ++++++++ .../handlers/instance/instance_handler.go | 67 ++ .../instance/instance_handler_test.go | 281 ++++++ .../http/handlers/peers/peers_handler_test.go | 4 +- .../http/handlers/users/users_handler.go | 12 + .../server/http/middleware/auth_middleware.go | 3 + .../testing/testing_tools/channel/channel.go | 2 +- management/server/identity_provider.go | 234 +++++ management/server/identity_provider_test.go | 202 ++++ management/server/idp/embedded.go | 511 ++++++++++ management/server/idp/embedded_test.go | 249 +++++ management/server/idp/idp.go | 1 + management/server/instance/manager.go | 136 +++ management/server/instance/manager_test.go | 268 +++++ management/server/management_proto_test.go | 2 +- management/server/management_test.go | 1 + management/server/mock_server/account_mock.go | 58 +- management/server/nameserver_test.go | 2 +- management/server/peer_test.go | 24 +- .../server/permissions/modules/module.go | 54 +- .../server/permissions/roles/network_admin.go | 6 + management/server/posture_checks_test.go | 2 +- management/server/route_test.go | 2 +- management/server/setupkey_test.go | 9 +- management/server/store/file_store.go | 6 + management/server/store/sql_store.go | 78 +- management/server/store/sql_store_test.go | 135 ++- management/server/store/store.go | 10 + management/server/types/identity_provider.go | 122 +++ .../server/types/identity_provider_test.go | 137 +++ management/server/types/user.go | 83 +- management/server/types/user_test.go | 298 ++++++ management/server/user.go | 76 +- management/server/user_test.go | 226 ++++- shared/auth/jwt/extractor.go | 25 +- shared/auth/jwt/extractor_test.go | 322 ++++++ shared/auth/jwt/validator.go | 76 +- shared/auth/user.go | 9 + shared/management/client/client_test.go | 2 +- shared/management/http/api/openapi.yml | 315 ++++++ shared/management/http/api/types.gen.go | 99 +- util/crypt/crypt.go | 96 ++ 90 files changed, 7702 insertions(+), 517 deletions(-) create mode 100644 idp/dex/config.go create mode 100644 idp/dex/provider.go create mode 100644 idp/dex/provider_test.go create mode 100755 idp/dex/web/robots.txt create mode 100755 idp/dex/web/static/main.css create mode 100755 idp/dex/web/templates/approval.html create mode 100755 idp/dex/web/templates/device.html create mode 100755 idp/dex/web/templates/device_success.html create mode 100755 idp/dex/web/templates/error.html create mode 100755 idp/dex/web/templates/footer.html create mode 100755 idp/dex/web/templates/header.html create mode 100755 idp/dex/web/templates/login.html create mode 100755 idp/dex/web/templates/oob.html create mode 100755 idp/dex/web/templates/password.html create mode 100644 idp/dex/web/themes/light/favicon.ico create mode 100755 idp/dex/web/themes/light/favicon.png create mode 100755 idp/dex/web/themes/light/logo.png create mode 100755 idp/dex/web/themes/light/styles.css create mode 100644 idp/dex/web/web.go create mode 100644 idp/sdk/sdk.go create mode 100755 infrastructure_files/getting-started.sh create mode 100644 management/server/http/handlers/idp/idp_handler.go create mode 100644 management/server/http/handlers/idp/idp_handler_test.go create mode 100644 management/server/http/handlers/instance/instance_handler.go create mode 100644 management/server/http/handlers/instance/instance_handler_test.go create mode 100644 management/server/identity_provider.go create mode 100644 management/server/identity_provider_test.go create mode 100644 management/server/idp/embedded.go create mode 100644 management/server/idp/embedded_test.go create mode 100644 management/server/instance/manager.go create mode 100644 management/server/instance/manager_test.go create mode 100644 management/server/types/identity_provider.go create mode 100644 management/server/types/identity_provider_test.go create mode 100644 management/server/types/user_test.go create mode 100644 shared/auth/jwt/extractor_test.go create mode 100644 util/crypt/crypt.go diff --git a/.github/workflows/test-infrastructure-files.yml b/.github/workflows/test-infrastructure-files.yml index f4513e0e1..e2f950731 100644 --- a/.github/workflows/test-infrastructure-files.yml +++ b/.github/workflows/test-infrastructure-files.yml @@ -243,6 +243,7 @@ jobs: working-directory: infrastructure_files/artifacts run: | sleep 30 + docker compose logs docker compose exec management ls -l /var/lib/netbird/ | grep -i GeoLite2-City_[0-9]*.mmdb docker compose exec management ls -l /var/lib/netbird/ | grep -i geonames_[0-9]*.db diff --git a/.gitignore b/.gitignore index e6c0c0aca..89024d190 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,4 @@ infrastructure_files/setup-*.env .DS_Store vendor/ /netbird +client/netbird-electron/ diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index b9ff35945..888a9a3f7 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -127,7 +127,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil) if err != nil { t.Fatal(err) } diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index 26ea6f8c2..a15ee0581 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -1631,7 +1631,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, "", err } diff --git a/client/server/server_test.go b/client/server/server_test.go index 69b4453ea..1ed115769 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -326,7 +326,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, "", err } diff --git a/go.mod b/go.mod index e52f21e55..23cf0f37d 100644 --- a/go.mod +++ b/go.mod @@ -8,22 +8,22 @@ require ( github.com/cloudflare/circl v1.3.3 // indirect github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 - github.com/gorilla/mux v1.8.0 + github.com/gorilla/mux v1.8.1 github.com/kardianos/service v1.2.3-0.20240613133416-becf2eb62b83 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.27.6 github.com/rs/cors v1.8.0 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.7.0 - github.com/spf13/pflag v1.0.5 + github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.9 github.com/vishvananda/netlink v1.3.1 - golang.org/x/crypto v0.45.0 - golang.org/x/sys v0.38.0 + golang.org/x/crypto v0.46.0 + golang.org/x/sys v0.39.0 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 golang.zx2c4.com/wireguard/windows v0.5.3 - google.golang.org/grpc v1.75.0 - google.golang.org/protobuf v1.36.8 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.10 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -41,6 +41,7 @@ require ( github.com/coder/websocket v1.8.13 github.com/coreos/go-iptables v0.7.0 github.com/creack/pty v1.1.18 + github.com/dexidp/dex v0.0.0-00010101000000-000000000000 github.com/dexidp/dex/api/v2 v2.4.0 github.com/eko/gocache/lib/v4 v4.2.0 github.com/eko/gocache/store/go_cache/v4 v4.2.2 @@ -79,7 +80,7 @@ require ( github.com/pion/transport/v3 v3.0.7 github.com/pion/turn/v3 v3.0.1 github.com/pkg/sftp v1.13.9 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.2 github.com/quic-go/quic-go v0.49.1 github.com/redis/go-redis/v9 v9.7.3 github.com/rs/xid v1.3.0 @@ -97,11 +98,11 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 github.com/yusufpapurcu/wmi v1.2.4 github.com/zcalusic/sysinfo v1.1.3 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 - go.opentelemetry.io/otel v1.37.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 + go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/exporters/prometheus v0.48.0 - go.opentelemetry.io/otel/metric v1.37.0 - go.opentelemetry.io/otel/sdk/metric v1.37.0 + go.opentelemetry.io/otel/metric v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 go.uber.org/mock v0.5.0 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 @@ -109,11 +110,11 @@ require ( golang.org/x/mobile v0.0.0-20251113184115-a159579294ab golang.org/x/mod v0.30.0 golang.org/x/net v0.47.0 - golang.org/x/oauth2 v0.30.0 - golang.org/x/sync v0.18.0 - golang.org/x/term v0.37.0 - golang.org/x/time v0.12.0 - google.golang.org/api v0.177.0 + golang.org/x/oauth2 v0.34.0 + golang.org/x/sync v0.19.0 + golang.org/x/term v0.38.0 + golang.org/x/time v0.14.0 + google.golang.org/api v0.257.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.7 gorm.io/driver/postgres v1.5.7 @@ -123,13 +124,18 @@ require ( ) require ( - cloud.google.com/go/auth v0.3.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.7.0 // indirect - dario.cat/mergo v1.0.0 // indirect + cloud.google.com/go/auth v0.17.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect + github.com/AppsFlyer/go-sundheit v0.6.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/BurntSushi/toml v1.5.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect @@ -150,12 +156,14 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect github.com/aws/smithy-go v1.22.2 // indirect + github.com/beevik/etree v1.6.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/containerd v1.7.29 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect + github.com/coreos/go-oidc/v3 v3.14.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -169,26 +177,30 @@ require ( github.com/fyne-io/glfw-js v0.3.0 // indirect github.com/fyne-io/image v0.1.1 // indirect github.com/fyne-io/oksvg v0.2.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect github.com/go-gl/gl v0.0.0-20231021071112-07e5d0ea2e71 // indirect github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-ldap/ldap/v3 v3.4.12 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-sql-driver/mysql v1.8.1 // indirect + github.com/go-sql-driver/mysql v1.9.3 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-text/render v0.2.0 // indirect github.com/go-text/typesetting v0.2.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect github.com/hack-pad/go-indexeddb v0.3.2 // indirect github.com/hack-pad/safejs v0.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect @@ -197,18 +209,23 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jonboulle/clockwork v0.5.0 // indirect github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kr/fs v0.1.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/libdns/libdns v0.2.2 // indirect github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect + github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect github.com/mholt/acmez/v2 v2.0.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect @@ -231,11 +248,14 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/russellhaering/goxmldsig v1.5.0 // indirect github.com/rymdport/portal v0.4.2 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c // indirect github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -246,17 +266,17 @@ require ( github.com/wlynxg/anet v0.0.3 // indirect github.com/yuin/goldmark v1.7.8 // indirect github.com/zeebo/blake3 v0.2.3 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect - go.opentelemetry.io/otel/sdk v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect golang.org/x/image v0.33.0 // indirect - golang.org/x/text v0.31.0 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/tools v0.39.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect ) @@ -272,3 +292,5 @@ replace github.com/cloudflare/circl => github.com/cunicu/circl v0.0.0-2023080111 replace github.com/pion/ice/v4 => github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51 replace github.com/libp2p/go-netroute => github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 + +replace github.com/dexidp/dex => github.com/netbirdio/dex v0.244.0 diff --git a/go.sum b/go.sum index b362d75ee..354c7732e 100644 --- a/go.sum +++ b/go.sum @@ -1,15 +1,14 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs= -cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= -cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cunicu.li/go-rosenpass v0.4.0 h1:LtPtBgFWY/9emfgC4glKLEqS0MJTylzV6+ChRhiZERw= cunicu.li/go-rosenpass v0.4.0/go.mod h1:MPbjH9nxV4l3vEagKVdFNwHOketqgS5/To1VYJplf/M= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= fyne.io/fyne/v2 v2.7.0 h1:GvZSpE3X0liU/fqstInVvRsaboIVpIWQ4/sfjDGIGGQ= @@ -18,17 +17,28 @@ fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58 h1:eA5/u2XRd8OUkoMqEv3IBlF fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AppsFlyer/go-sundheit v0.6.0 h1:d2hBvCjBSb2lUsEWGfPigr4MCOt04sxB+Rppl0yUMSk= +github.com/AppsFlyer/go-sundheit v0.6.0/go.mod h1:LDdBHD6tQBtmHsdW+i1GwdTt6Wqc0qazf5ZEJVTbTME= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible h1:hqcTK6ZISdip65SR792lwYJTa/axESA0889D3UlZbLo= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible/go.mod h1:6B1nuc1MUs6c62ODZDl7hVE5Pv7O2XGSkgg2olnq34I= +github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI= +github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/awnumar/memcall v0.4.0 h1:B7hgZYdfH6Ot1Goaz8jGne/7i8xD4taZie/PNSFZ29g= @@ -73,6 +83,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/Xv github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE= +github.com/beevik/etree v1.6.0/go.mod h1:bh4zJxiIr62SOf9pRzN7UUYaEDa9HEKafK25+sLc0Gc= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= @@ -87,7 +99,6 @@ github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+Y github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -95,8 +106,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= @@ -107,9 +116,11 @@ github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpS github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk= +github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU= github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6 h1:/DS5cDX3FJdl+XaN2D7XAwFpuanTxnp52DBLZAaJKx0= @@ -135,14 +146,14 @@ github.com/eko/gocache/store/go_cache/v4 v4.2.2 h1:tAI9nl6TLoJyKG1ujF0CS0n/IgTEM github.com/eko/gocache/store/go_cache/v4 v4.2.2/go.mod h1:T9zkHokzr8K9EiC7RfMbDg6HSwaV6rv3UdcNu13SGcA= github.com/eko/gocache/store/redis/v4 v4.2.2 h1:Thw31fzGuH3WzJywsdbMivOmP550D6JS7GDHhvCJPA0= github.com/eko/gocache/store/redis/v4 v4.2.2/go.mod h1:LaTxLKx9TG/YUEybQvPMij++D7PBTIJ4+pzvk0ykz0w= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fredbi/uri v1.1.1 h1:xZHJC08GZNIUhbP5ImTHnt5Ya0T8FI2VAwI/37kh2Ko= github.com/fredbi/uri v1.1.1/go.mod h1:4+DZQ5zBjEwQCDmXW5JdIjz0PUA+yJbvtBv+u+adr5o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -161,10 +172,16 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo= +github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-gl/gl v0.0.0-20231021071112-07e5d0ea2e71 h1:5BVwOaUSBTlVZowGO6VZGw2H/zl9nrd3eCZfYV+NfQA= github.com/go-gl/gl v0.0.0-20231021071112-07e5d0ea2e71/go.mod h1:9YTyiznxEY1fVinfM7RvRcjRHbw2xLBJ3AAGIT0I4Nw= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a h1:vxnBhFDDT+xzxf1jTJKMKZw3H0swfWk9RpWbBbDK5+0= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4= +github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -180,8 +197,8 @@ github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZs github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -197,11 +214,6 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -212,9 +224,7 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -222,12 +232,9 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -242,23 +249,24 @@ github.com/google/nftables v0.3.0 h1:bkyZ0cbpVeMHXOrtlFc8ISmfVqq5gPJukoYieyVmITg github.com/google/nftables v0.3.0/go.mod h1:BCp9FsrbF1Fn/Yu6CLUc9GGZFw/+hsxfluNXXmxBfRM= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gopacket/gopacket v1.1.1 h1:zbx9F9d6A7sWNkFKrvMBZTfGgxFoY4NgUudFVVHMfcw= github.com/gopacket/gopacket v1.1.1/go.mod h1:HavMeONEl7W9036of9LbSWoonqhH7HA1+ZRO+rMIvFs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.2-0.20240212192251-757544f21357 h1:Fkzd8ktnpOR9h47SXHe2AYPwelXLH2GjGsjlAloiWfo= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.2-0.20240212192251-757544f21357/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hack-pad/go-indexeddb v0.3.2 h1:DTqeJJYc1usa45Q5r52t01KhvlSN02+Oq+tQbSBI91A= github.com/hack-pad/go-indexeddb v0.3.2/go.mod h1:QvfTevpDVlkfomY498LhstjwbPW6QC4VC/lxYb0Kom0= github.com/hack-pad/safejs v0.1.0 h1:qPS6vjreAqh2amUqj4WNG1zIw7qlRQJ9K10eDKMCnE8= @@ -276,6 +284,8 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -287,6 +297,18 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jeandeaual/go-locale v0.0.0-20250612000132-0ef82f21eade h1:FmusiCI1wHw+XQbvL9M+1r/C3SPqKrmBaIOYwVfQoDE= github.com/jeandeaual/go-locale v0.0.0-20250612000132-0ef82f21eade/go.mod h1:ZDXo8KHryOWSIqnsb/CiDq7hQUYryCgdVnxbj8tDG7o= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -297,6 +319,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= +github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 h1:YLvr1eE6cdCqjOe972w/cYF+FjW34v27+9Vo5106B4M= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25/go.mod h1:kLgvv7o6UM+0QSf0QjAse3wReFDsb9qbZJdfexWlrQw= @@ -311,8 +335,11 @@ github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuV github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -331,9 +358,11 @@ github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tA github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= +github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= @@ -346,8 +375,12 @@ github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= @@ -366,6 +399,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/netbirdio/dex v0.244.0 h1:1GOvi8wnXYassnKGildzNqRHq0RbcfEUw7LKYpKIN7U= +github.com/netbirdio/dex v0.244.0/go.mod h1:STGInJhPcAflrHmDO7vyit2kSq03PdL+8zQPoGALtcU= github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 h1:TDtJKmM6Sf8uYFx/dMeqNOL90KUoRscdfpFZ3Im89uk= github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944/go.mod h1:sHA6TRxjQ6RLbnI+3R4DZo2Eseg/iKiPRfNmcuNySVQ= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51 h1:Ov4qdafATOgGMB1wbSuh+0aAHcwz9hdvB6VZjh1mVMI= @@ -436,6 +471,7 @@ github.com/pion/turn/v3 v3.0.1 h1:wLi7BTQr6/Q20R0vt/lHbjv6y4GChFtC33nkYbasoT8= github.com/pion/turn/v3 v3.0.1/go.mod h1:MrJDKgqryDyWy1/4NT9TWfXWGMC7UHT6pJIv1+gMeNE= github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc= github.com/pion/turn/v4 v4.1.1/go.mod h1:2123tHk1O++vmjI5VSD0awT50NywDAq5A2NNNU4Jjs8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= @@ -447,25 +483,26 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/quic-go/quic-go v0.49.1 h1:e5JXpUyF0f2uFjckQzD8jTghZrOUK1xxDqqZhlwixo0= github.com/quic-go/quic-go v0.49.1/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/russellhaering/goxmldsig v1.5.0 h1:AU2UkkYIUOTyZRbe08XMThaOCelArgvNfYapcmSjBNw= +github.com/russellhaering/goxmldsig v1.5.0/go.mod h1:x98CjQNFJcWfMxeOrMnMKg70lvDP6tE0nTaeUnjXDmk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rymdport/portal v0.4.2 h1:7jKRSemwlTyVHHrTGgQg7gmNPJs88xkbKcIL3NlcmSU= github.com/rymdport/portal v0.4.2/go.mod h1:kFF4jslnJ8pD5uCi17brj/ODlfIidOxlgUDTO5ncnC4= @@ -475,21 +512,26 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c h1:km8GpoQut05eY3GiYWEedbTT0qnSxrCjsVbb7yKY1KE= github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c/go.mod h1:cNQ3dwVJtS5Hmnjxy6AgTPd0Inb3pW05ftPSX7NZO7Q= github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef h1:Ch6Q+AZUxDBCVqdkI8FSpFyZDtCVBc2VmejdNrm5rRQ= github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef/go.mod h1:nXTWP6+gD5+LUJ8krVhhoeHjvHTutPxMYl5SvkcnJNE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -501,7 +543,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -555,30 +596,28 @@ github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -589,6 +628,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= goauthentik.io/api/v3 v3.2023051.3 h1:NebAhD/TeTWNo/9X3/Uj+rM5fG1HaiLOlKTNLQv9Qq4= goauthentik.io/api/v3 v3.2023051.3/go.mod h1:nYECml4jGbp/541hj8GcylKQG1gVBsKppHy4+7G8u4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -602,16 +643,12 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ= golang.org/x/image v0.33.0/go.mod h1:DD3OsTYT9chzuzTQt+zMcOlBHgfoKQb1gry8p76Y1sc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20251113184115-a159579294ab h1:Iqyc+2zr7aGyLuEadIm0KRJP0Wwt+fhlXLa51Fxf1+Q= golang.org/x/mobile v0.0.0-20251113184115-a159579294ab/go.mod h1:Eq3Nh/5pFSWug2ohiudJ1iyU59SO78QFuh4qTTN++I0= @@ -626,18 +663,13 @@ golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -651,12 +683,10 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -667,9 +697,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -705,8 +734,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -719,8 +748,8 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -732,15 +761,11 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -765,42 +790,31 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk= -google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/api v0.257.0 h1:8Y0lzvHlZps53PEaw+G29SsQIkuKrumGWs9puiexNAA= +google.golang.org/api v0.257.0/go.mod h1:4eJrr+vbVaZSqs7vovFd1Jb/A6ml6iw2e6FBYf3GAO4= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= -google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 h1:Wgl1rcDNThT+Zn47YyCXOXyX/COgMTIdhJ717F0l4xk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -836,5 +850,3 @@ gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1 h1:qDCwdCWECGnwQSQC01Dpnp09fRHxJs9PbktotUqG+hs= gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1/go.mod h1:8hmigyCdYtw5xJGfQDJzSH5Ju8XEIDBnpyi8+O6GRt8= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/idp/dex/config.go b/idp/dex/config.go new file mode 100644 index 000000000..57f832406 --- /dev/null +++ b/idp/dex/config.go @@ -0,0 +1,301 @@ +package dex + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "log/slog" + "os" + "time" + + "golang.org/x/crypto/bcrypt" + "gopkg.in/yaml.v3" + + "github.com/dexidp/dex/server" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/sql" + + "github.com/netbirdio/netbird/idp/dex/web" +) + +// parseDuration parses a duration string (e.g., "6h", "24h", "168h"). +func parseDuration(s string) (time.Duration, error) { + return time.ParseDuration(s) +} + +// YAMLConfig represents the YAML configuration file format (mirrors dex's config format) +type YAMLConfig struct { + Issuer string `yaml:"issuer" json:"issuer"` + Storage Storage `yaml:"storage" json:"storage"` + Web Web `yaml:"web" json:"web"` + GRPC GRPC `yaml:"grpc" json:"grpc"` + OAuth2 OAuth2 `yaml:"oauth2" json:"oauth2"` + Expiry Expiry `yaml:"expiry" json:"expiry"` + Logger Logger `yaml:"logger" json:"logger"` + Frontend Frontend `yaml:"frontend" json:"frontend"` + + // StaticConnectors are user defined connectors specified in the config file + StaticConnectors []Connector `yaml:"connectors" json:"connectors"` + + // StaticClients cause the server to use this list of clients rather than + // querying the storage. Write operations, like creating a client, will fail. + StaticClients []storage.Client `yaml:"staticClients" json:"staticClients"` + + // If enabled, the server will maintain a list of passwords which can be used + // to identify a user. + EnablePasswordDB bool `yaml:"enablePasswordDB" json:"enablePasswordDB"` + + // StaticPasswords cause the server use this list of passwords rather than + // querying the storage. + StaticPasswords []Password `yaml:"staticPasswords" json:"staticPasswords"` +} + +// Web is the config format for the HTTP server. +type Web struct { + HTTP string `yaml:"http" json:"http"` + HTTPS string `yaml:"https" json:"https"` + AllowedOrigins []string `yaml:"allowedOrigins" json:"allowedOrigins"` + AllowedHeaders []string `yaml:"allowedHeaders" json:"allowedHeaders"` +} + +// GRPC is the config for the gRPC API. +type GRPC struct { + Addr string `yaml:"addr" json:"addr"` + TLSCert string `yaml:"tlsCert" json:"tlsCert"` + TLSKey string `yaml:"tlsKey" json:"tlsKey"` + TLSClientCA string `yaml:"tlsClientCA" json:"tlsClientCA"` +} + +// OAuth2 describes enabled OAuth2 extensions. +type OAuth2 struct { + SkipApprovalScreen bool `yaml:"skipApprovalScreen" json:"skipApprovalScreen"` + AlwaysShowLoginScreen bool `yaml:"alwaysShowLoginScreen" json:"alwaysShowLoginScreen"` + PasswordConnector string `yaml:"passwordConnector" json:"passwordConnector"` + ResponseTypes []string `yaml:"responseTypes" json:"responseTypes"` + GrantTypes []string `yaml:"grantTypes" json:"grantTypes"` +} + +// Expiry holds configuration for the validity period of components. +type Expiry struct { + SigningKeys string `yaml:"signingKeys" json:"signingKeys"` + IDTokens string `yaml:"idTokens" json:"idTokens"` + AuthRequests string `yaml:"authRequests" json:"authRequests"` + DeviceRequests string `yaml:"deviceRequests" json:"deviceRequests"` + RefreshTokens RefreshTokensExpiry `yaml:"refreshTokens" json:"refreshTokens"` +} + +// RefreshTokensExpiry holds configuration for refresh token expiry. +type RefreshTokensExpiry struct { + ReuseInterval string `yaml:"reuseInterval" json:"reuseInterval"` + ValidIfNotUsedFor string `yaml:"validIfNotUsedFor" json:"validIfNotUsedFor"` + AbsoluteLifetime string `yaml:"absoluteLifetime" json:"absoluteLifetime"` + DisableRotation bool `yaml:"disableRotation" json:"disableRotation"` +} + +// Logger holds configuration required to customize logging. +type Logger struct { + Level string `yaml:"level" json:"level"` + Format string `yaml:"format" json:"format"` +} + +// Frontend holds the server's frontend templates and assets config. +type Frontend struct { + Dir string `yaml:"dir" json:"dir"` + Theme string `yaml:"theme" json:"theme"` + Issuer string `yaml:"issuer" json:"issuer"` + LogoURL string `yaml:"logoURL" json:"logoURL"` + Extra map[string]string `yaml:"extra" json:"extra"` +} + +// Storage holds app's storage configuration. +type Storage struct { + Type string `yaml:"type" json:"type"` + Config map[string]interface{} `yaml:"config" json:"config"` +} + +// Password represents a static user configuration +type Password storage.Password + +func (p *Password) UnmarshalYAML(node *yaml.Node) error { + var data struct { + Email string `yaml:"email"` + Username string `yaml:"username"` + UserID string `yaml:"userID"` + Hash string `yaml:"hash"` + HashFromEnv string `yaml:"hashFromEnv"` + } + if err := node.Decode(&data); err != nil { + return err + } + *p = Password(storage.Password{ + Email: data.Email, + Username: data.Username, + UserID: data.UserID, + }) + if len(data.Hash) == 0 && len(data.HashFromEnv) > 0 { + data.Hash = os.Getenv(data.HashFromEnv) + } + if len(data.Hash) == 0 { + return fmt.Errorf("no password hash provided for user %s", data.Email) + } + + // If this value is a valid bcrypt, use it. + _, bcryptErr := bcrypt.Cost([]byte(data.Hash)) + if bcryptErr == nil { + p.Hash = []byte(data.Hash) + return nil + } + + // For backwards compatibility try to base64 decode this value. + hashBytes, err := base64.StdEncoding.DecodeString(data.Hash) + if err != nil { + return fmt.Errorf("malformed bcrypt hash: %v", bcryptErr) + } + if _, err := bcrypt.Cost(hashBytes); err != nil { + return fmt.Errorf("malformed bcrypt hash: %v", err) + } + p.Hash = hashBytes + return nil +} + +// Connector is a connector configuration that can unmarshal YAML dynamically. +type Connector struct { + Type string `yaml:"type" json:"type"` + Name string `yaml:"name" json:"name"` + ID string `yaml:"id" json:"id"` + Config map[string]interface{} `yaml:"config" json:"config"` +} + +// ToStorageConnector converts a Connector to storage.Connector type. +func (c *Connector) ToStorageConnector() (storage.Connector, error) { + data, err := json.Marshal(c.Config) + if err != nil { + return storage.Connector{}, fmt.Errorf("failed to marshal connector config: %v", err) + } + + return storage.Connector{ + ID: c.ID, + Type: c.Type, + Name: c.Name, + Config: data, + }, nil +} + +// StorageConfig is a configuration that can create a storage. +type StorageConfig interface { + Open(logger *slog.Logger) (storage.Storage, error) +} + +// OpenStorage opens a storage based on the config +func (s *Storage) OpenStorage(logger *slog.Logger) (storage.Storage, error) { + switch s.Type { + case "sqlite3": + file, _ := s.Config["file"].(string) + if file == "" { + return nil, fmt.Errorf("sqlite3 storage requires 'file' config") + } + return (&sql.SQLite3{File: file}).Open(logger) + default: + return nil, fmt.Errorf("unsupported storage type: %s", s.Type) + } +} + +// Validate validates the configuration +func (c *YAMLConfig) Validate() error { + if c.Issuer == "" { + return fmt.Errorf("no issuer specified in config file") + } + if c.Storage.Type == "" { + return fmt.Errorf("no storage type specified in config file") + } + if c.Web.HTTP == "" && c.Web.HTTPS == "" { + return fmt.Errorf("must supply a HTTP/HTTPS address to listen on") + } + if !c.EnablePasswordDB && len(c.StaticPasswords) != 0 { + return fmt.Errorf("cannot specify static passwords without enabling password db") + } + return nil +} + +// ToServerConfig converts YAMLConfig to dex server.Config +func (c *YAMLConfig) ToServerConfig(stor storage.Storage, logger *slog.Logger) server.Config { + cfg := server.Config{ + Issuer: c.Issuer, + Storage: stor, + Logger: logger, + SkipApprovalScreen: c.OAuth2.SkipApprovalScreen, + AllowedOrigins: c.Web.AllowedOrigins, + AllowedHeaders: c.Web.AllowedHeaders, + Web: server.WebConfig{ + Issuer: c.Frontend.Issuer, + LogoURL: c.Frontend.LogoURL, + Theme: c.Frontend.Theme, + Dir: c.Frontend.Dir, + Extra: c.Frontend.Extra, + }, + } + + // Use embedded NetBird-styled templates if no custom dir specified + if c.Frontend.Dir == "" { + cfg.Web.WebFS = web.FS() + } + + if len(c.OAuth2.ResponseTypes) > 0 { + cfg.SupportedResponseTypes = c.OAuth2.ResponseTypes + } + + // Apply expiry settings + if c.Expiry.SigningKeys != "" { + if d, err := parseDuration(c.Expiry.SigningKeys); err == nil { + cfg.RotateKeysAfter = d + } + } + if c.Expiry.IDTokens != "" { + if d, err := parseDuration(c.Expiry.IDTokens); err == nil { + cfg.IDTokensValidFor = d + } + } + if c.Expiry.AuthRequests != "" { + if d, err := parseDuration(c.Expiry.AuthRequests); err == nil { + cfg.AuthRequestsValidFor = d + } + } + if c.Expiry.DeviceRequests != "" { + if d, err := parseDuration(c.Expiry.DeviceRequests); err == nil { + cfg.DeviceRequestsValidFor = d + } + } + + return cfg +} + +// GetRefreshTokenPolicy creates a RefreshTokenPolicy from the expiry config. +// This should be called after ToServerConfig and the policy set on the config. +func (c *YAMLConfig) GetRefreshTokenPolicy(logger *slog.Logger) (*server.RefreshTokenPolicy, error) { + return server.NewRefreshTokenPolicy( + logger, + c.Expiry.RefreshTokens.DisableRotation, + c.Expiry.RefreshTokens.ValidIfNotUsedFor, + c.Expiry.RefreshTokens.AbsoluteLifetime, + c.Expiry.RefreshTokens.ReuseInterval, + ) +} + +// LoadConfig loads configuration from a YAML file +func LoadConfig(path string) (*YAMLConfig, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read config file: %w", err) + } + + var cfg YAMLConfig + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("failed to parse config file: %w", err) + } + + if err := cfg.Validate(); err != nil { + return nil, err + } + + return &cfg, nil +} diff --git a/idp/dex/provider.go b/idp/dex/provider.go new file mode 100644 index 000000000..09713a226 --- /dev/null +++ b/idp/dex/provider.go @@ -0,0 +1,934 @@ +// Package dex provides an embedded Dex OIDC identity provider. +package dex + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" + + dexapi "github.com/dexidp/dex/api/v2" + "github.com/dexidp/dex/server" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/sql" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/crypto/bcrypt" + "google.golang.org/grpc" +) + +// Config matches what management/internals/server/server.go expects +type Config struct { + Issuer string + Port int + DataDir string + DevMode bool + + // GRPCAddr is the address for the gRPC API (e.g., ":5557"). Empty disables gRPC. + GRPCAddr string +} + +// Provider wraps a Dex server +type Provider struct { + config *Config + yamlConfig *YAMLConfig + dexServer *server.Server + httpServer *http.Server + listener net.Listener + grpcServer *grpc.Server + grpcListener net.Listener + storage storage.Storage + logger *slog.Logger + mu sync.Mutex + running bool +} + +// NewProvider creates and initializes the Dex server +func NewProvider(ctx context.Context, config *Config) (*Provider, error) { + if config.Issuer == "" { + return nil, fmt.Errorf("issuer is required") + } + if config.Port <= 0 { + return nil, fmt.Errorf("invalid port") + } + if config.DataDir == "" { + return nil, fmt.Errorf("data directory is required") + } + + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + + // Ensure data directory exists + if err := os.MkdirAll(config.DataDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create data directory: %w", err) + } + + // Initialize SQLite storage + dbPath := filepath.Join(config.DataDir, "oidc.db") + sqliteConfig := &sql.SQLite3{File: dbPath} + stor, err := sqliteConfig.Open(logger) + if err != nil { + return nil, fmt.Errorf("failed to open storage: %w", err) + } + + // Ensure a local connector exists (for password authentication) + if err := ensureLocalConnector(ctx, stor); err != nil { + stor.Close() + return nil, fmt.Errorf("failed to ensure local connector: %w", err) + } + + // Ensure issuer ends with /oauth2 for proper path mounting + issuer := strings.TrimSuffix(config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + + // Build refresh token policy (required to avoid nil pointer panics) + refreshPolicy, err := server.NewRefreshTokenPolicy(logger, false, "", "", "") + if err != nil { + stor.Close() + return nil, fmt.Errorf("failed to create refresh token policy: %w", err) + } + + // Build Dex server config - use Dex's types directly + dexConfig := server.Config{ + Issuer: issuer, + Storage: stor, + SkipApprovalScreen: true, + SupportedResponseTypes: []string{"code"}, + Logger: logger, + PrometheusRegistry: prometheus.NewRegistry(), + RotateKeysAfter: 6 * time.Hour, + IDTokensValidFor: 24 * time.Hour, + RefreshTokenPolicy: refreshPolicy, + Web: server.WebConfig{ + Issuer: "NetBird", + }, + } + + dexSrv, err := server.NewServer(ctx, dexConfig) + if err != nil { + stor.Close() + return nil, fmt.Errorf("failed to create dex server: %w", err) + } + + return &Provider{ + config: config, + dexServer: dexSrv, + storage: stor, + logger: logger, + }, nil +} + +// NewProviderFromYAML creates and initializes the Dex server from a YAMLConfig +func NewProviderFromYAML(ctx context.Context, yamlConfig *YAMLConfig) (*Provider, error) { + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + + stor, err := yamlConfig.Storage.OpenStorage(logger) + if err != nil { + return nil, fmt.Errorf("failed to open storage: %w", err) + } + + if err := initializeStorage(ctx, stor, yamlConfig); err != nil { + stor.Close() + return nil, err + } + + dexConfig := buildDexConfig(yamlConfig, stor, logger) + dexConfig.RefreshTokenPolicy, err = yamlConfig.GetRefreshTokenPolicy(logger) + if err != nil { + stor.Close() + return nil, fmt.Errorf("failed to create refresh token policy: %w", err) + } + + dexSrv, err := server.NewServer(ctx, dexConfig) + if err != nil { + stor.Close() + return nil, fmt.Errorf("failed to create dex server: %w", err) + } + + return &Provider{ + config: &Config{Issuer: yamlConfig.Issuer, GRPCAddr: yamlConfig.GRPC.Addr}, + yamlConfig: yamlConfig, + dexServer: dexSrv, + storage: stor, + logger: logger, + }, nil +} + +// initializeStorage sets up connectors, passwords, and clients in storage +func initializeStorage(ctx context.Context, stor storage.Storage, cfg *YAMLConfig) error { + if cfg.EnablePasswordDB { + if err := ensureLocalConnector(ctx, stor); err != nil { + return fmt.Errorf("failed to ensure local connector: %w", err) + } + } + if err := ensureStaticPasswords(ctx, stor, cfg.StaticPasswords); err != nil { + return err + } + if err := ensureStaticClients(ctx, stor, cfg.StaticClients); err != nil { + return err + } + return ensureStaticConnectors(ctx, stor, cfg.StaticConnectors) +} + +// ensureStaticPasswords creates or updates static passwords in storage +func ensureStaticPasswords(ctx context.Context, stor storage.Storage, passwords []Password) error { + for _, pw := range passwords { + existing, err := stor.GetPassword(ctx, pw.Email) + if errors.Is(err, storage.ErrNotFound) { + if err := stor.CreatePassword(ctx, storage.Password(pw)); err != nil { + return fmt.Errorf("failed to create password for %s: %w", pw.Email, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to get password for %s: %w", pw.Email, err) + } + if string(existing.Hash) != string(pw.Hash) { + if err := stor.UpdatePassword(ctx, pw.Email, func(old storage.Password) (storage.Password, error) { + old.Hash = pw.Hash + old.Username = pw.Username + return old, nil + }); err != nil { + return fmt.Errorf("failed to update password for %s: %w", pw.Email, err) + } + } + } + return nil +} + +// ensureStaticClients creates or updates static clients in storage +func ensureStaticClients(ctx context.Context, stor storage.Storage, clients []storage.Client) error { + for _, client := range clients { + _, err := stor.GetClient(ctx, client.ID) + if errors.Is(err, storage.ErrNotFound) { + if err := stor.CreateClient(ctx, client); err != nil { + return fmt.Errorf("failed to create client %s: %w", client.ID, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to get client %s: %w", client.ID, err) + } + if err := stor.UpdateClient(ctx, client.ID, func(old storage.Client) (storage.Client, error) { + old.RedirectURIs = client.RedirectURIs + old.Name = client.Name + old.Public = client.Public + return old, nil + }); err != nil { + return fmt.Errorf("failed to update client %s: %w", client.ID, err) + } + } + return nil +} + +// ensureStaticConnectors creates or updates static connectors in storage +func ensureStaticConnectors(ctx context.Context, stor storage.Storage, connectors []Connector) error { + for _, conn := range connectors { + storConn, err := conn.ToStorageConnector() + if err != nil { + return fmt.Errorf("failed to convert connector %s: %w", conn.ID, err) + } + _, err = stor.GetConnector(ctx, conn.ID) + if errors.Is(err, storage.ErrNotFound) { + if err := stor.CreateConnector(ctx, storConn); err != nil { + return fmt.Errorf("failed to create connector %s: %w", conn.ID, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to get connector %s: %w", conn.ID, err) + } + if err := stor.UpdateConnector(ctx, conn.ID, func(old storage.Connector) (storage.Connector, error) { + old.Name = storConn.Name + old.Config = storConn.Config + return old, nil + }); err != nil { + return fmt.Errorf("failed to update connector %s: %w", conn.ID, err) + } + } + return nil +} + +// buildDexConfig creates a server.Config with defaults applied +func buildDexConfig(yamlConfig *YAMLConfig, stor storage.Storage, logger *slog.Logger) server.Config { + cfg := yamlConfig.ToServerConfig(stor, logger) + cfg.PrometheusRegistry = prometheus.NewRegistry() + if cfg.RotateKeysAfter == 0 { + cfg.RotateKeysAfter = 24 * 30 * time.Hour + } + if cfg.IDTokensValidFor == 0 { + cfg.IDTokensValidFor = 24 * time.Hour + } + if cfg.Web.Issuer == "" { + cfg.Web.Issuer = "NetBird" + } + if len(cfg.SupportedResponseTypes) == 0 { + cfg.SupportedResponseTypes = []string{"code"} + } + return cfg +} + +// Start starts the HTTP server and optionally the gRPC API server +func (p *Provider) Start(_ context.Context) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.running { + return fmt.Errorf("already running") + } + + // Determine listen address from config + var addr string + if p.yamlConfig != nil { + addr = p.yamlConfig.Web.HTTP + if addr == "" { + addr = p.yamlConfig.Web.HTTPS + } + } else if p.config != nil && p.config.Port > 0 { + addr = fmt.Sprintf(":%d", p.config.Port) + } + if addr == "" { + return fmt.Errorf("no listen address configured") + } + + listener, err := net.Listen("tcp", addr) + if err != nil { + return fmt.Errorf("failed to listen on %s: %w", addr, err) + } + p.listener = listener + + // Mount Dex at /oauth2/ path for reverse proxy compatibility + // Don't strip the prefix - Dex's issuer includes /oauth2 so it expects the full path + mux := http.NewServeMux() + mux.Handle("/oauth2/", p.dexServer) + + p.httpServer = &http.Server{Handler: mux} + p.running = true + + go func() { + if err := p.httpServer.Serve(listener); err != nil && err != http.ErrServerClosed { + p.logger.Error("http server error", "error", err) + } + }() + + // Start gRPC API server if configured + if p.config.GRPCAddr != "" { + if err := p.startGRPCServer(); err != nil { + // Clean up HTTP server on failure + _ = p.httpServer.Close() + _ = p.listener.Close() + return fmt.Errorf("failed to start gRPC server: %w", err) + } + } + + p.logger.Info("HTTP server started", "addr", addr) + return nil +} + +// startGRPCServer starts the gRPC API server using Dex's built-in API +func (p *Provider) startGRPCServer() error { + grpcListener, err := net.Listen("tcp", p.config.GRPCAddr) + if err != nil { + return fmt.Errorf("failed to listen on %s: %w", p.config.GRPCAddr, err) + } + p.grpcListener = grpcListener + + p.grpcServer = grpc.NewServer() + // Use Dex's built-in API server implementation + // server.NewAPI(storage, logger, version, dexServer) + dexapi.RegisterDexServer(p.grpcServer, server.NewAPI(p.storage, p.logger, "netbird-dex", p.dexServer)) + + go func() { + if err := p.grpcServer.Serve(grpcListener); err != nil { + p.logger.Error("grpc server error", "error", err) + } + }() + + p.logger.Info("gRPC API server started", "addr", p.config.GRPCAddr) + return nil +} + +// Stop gracefully shuts down +func (p *Provider) Stop(ctx context.Context) error { + p.mu.Lock() + defer p.mu.Unlock() + + if !p.running { + return nil + } + + var errs []error + + // Stop gRPC server first + if p.grpcServer != nil { + p.grpcServer.GracefulStop() + p.grpcServer = nil + } + if p.grpcListener != nil { + p.grpcListener.Close() + p.grpcListener = nil + } + + if p.httpServer != nil { + if err := p.httpServer.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + + // Explicitly close listener as fallback (Shutdown should do this, but be safe) + if p.listener != nil { + if err := p.listener.Close(); err != nil { + // Ignore "use of closed network connection" - expected after Shutdown + if !strings.Contains(err.Error(), "use of closed") { + errs = append(errs, err) + } + } + p.listener = nil + } + + if p.storage != nil { + if err := p.storage.Close(); err != nil { + errs = append(errs, err) + } + } + + p.httpServer = nil + p.running = false + + if len(errs) > 0 { + return fmt.Errorf("shutdown errors: %v", errs) + } + return nil +} + +// EnsureDefaultClients creates dashboard and CLI OAuth clients +// Uses Dex's storage.Client directly - no custom wrappers +func (p *Provider) EnsureDefaultClients(ctx context.Context, dashboardURIs, cliURIs []string) error { + clients := []storage.Client{ + { + ID: "netbird-dashboard", + Name: "NetBird Dashboard", + RedirectURIs: dashboardURIs, + Public: true, + }, + { + ID: "netbird-cli", + Name: "NetBird CLI", + RedirectURIs: cliURIs, + Public: true, + }, + } + + for _, client := range clients { + _, err := p.storage.GetClient(ctx, client.ID) + if err == storage.ErrNotFound { + if err := p.storage.CreateClient(ctx, client); err != nil { + return fmt.Errorf("failed to create client %s: %w", client.ID, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to get client %s: %w", client.ID, err) + } + // Update if exists + if err := p.storage.UpdateClient(ctx, client.ID, func(old storage.Client) (storage.Client, error) { + old.RedirectURIs = client.RedirectURIs + return old, nil + }); err != nil { + return fmt.Errorf("failed to update client %s: %w", client.ID, err) + } + } + + p.logger.Info("default OIDC clients ensured") + return nil +} + +// Storage returns the underlying Dex storage for direct access +// Users can use storage.Client, storage.Password, storage.Connector directly +func (p *Provider) Storage() storage.Storage { + return p.storage +} + +// Handler returns the Dex server as an http.Handler for embedding in another server. +// The handler expects requests with path prefix "/oauth2/". +func (p *Provider) Handler() http.Handler { + return p.dexServer +} + +// CreateUser creates a new user with the given email, username, and password. +// Returns the encoded user ID in Dex's format (base64-encoded protobuf with connector ID). +func (p *Provider) CreateUser(ctx context.Context, email, username, password string) (string, error) { + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return "", fmt.Errorf("failed to hash password: %w", err) + } + + userID := uuid.New().String() + err = p.storage.CreatePassword(ctx, storage.Password{ + Email: email, + Username: username, + UserID: userID, + Hash: hash, + }) + if err != nil { + return "", err + } + + // Encode the user ID in Dex's format: base64(protobuf{user_id, connector_id}) + // This matches the format Dex uses in JWT tokens + encodedID := EncodeDexUserID(userID, "local") + return encodedID, nil +} + +// EncodeDexUserID encodes user ID and connector ID into Dex's base64-encoded protobuf format. +// Dex uses this format for the 'sub' claim in JWT tokens. +// Format: base64(protobuf message with field 1 = user_id, field 2 = connector_id) +func EncodeDexUserID(userID, connectorID string) string { + // Manually encode protobuf: field 1 (user_id) and field 2 (connector_id) + // Wire type 2 (length-delimited) for strings + var buf []byte + + // Field 1: user_id (tag = 0x0a = field 1, wire type 2) + buf = append(buf, 0x0a) + buf = append(buf, byte(len(userID))) + buf = append(buf, []byte(userID)...) + + // Field 2: connector_id (tag = 0x12 = field 2, wire type 2) + buf = append(buf, 0x12) + buf = append(buf, byte(len(connectorID))) + buf = append(buf, []byte(connectorID)...) + + return base64.RawStdEncoding.EncodeToString(buf) +} + +// DecodeDexUserID decodes Dex's base64-encoded user ID back to the raw user ID and connector ID. +func DecodeDexUserID(encodedID string) (userID, connectorID string, err error) { + // Try RawStdEncoding first, then StdEncoding (with padding) + buf, err := base64.RawStdEncoding.DecodeString(encodedID) + if err != nil { + buf, err = base64.StdEncoding.DecodeString(encodedID) + if err != nil { + return "", "", fmt.Errorf("failed to decode base64: %w", err) + } + } + + // Parse protobuf manually + i := 0 + for i < len(buf) { + if i >= len(buf) { + break + } + tag := buf[i] + i++ + + fieldNum := tag >> 3 + wireType := tag & 0x07 + + if wireType != 2 { // We only expect length-delimited strings + return "", "", fmt.Errorf("unexpected wire type %d", wireType) + } + + if i >= len(buf) { + return "", "", fmt.Errorf("truncated message") + } + length := int(buf[i]) + i++ + + if i+length > len(buf) { + return "", "", fmt.Errorf("truncated string field") + } + value := string(buf[i : i+length]) + i += length + + switch fieldNum { + case 1: + userID = value + case 2: + connectorID = value + } + } + + return userID, connectorID, nil +} + +// GetUser returns a user by email +func (p *Provider) GetUser(ctx context.Context, email string) (storage.Password, error) { + return p.storage.GetPassword(ctx, email) +} + +// GetUserByID returns a user by user ID. +// The userID can be either an encoded Dex ID (base64 protobuf) or a raw UUID. +// Note: This requires iterating through all users since dex storage doesn't index by userID. +func (p *Provider) GetUserByID(ctx context.Context, userID string) (storage.Password, error) { + // Try to decode the user ID in case it's encoded + rawUserID, _, err := DecodeDexUserID(userID) + if err != nil { + // If decoding fails, assume it's already a raw UUID + rawUserID = userID + } + + users, err := p.storage.ListPasswords(ctx) + if err != nil { + return storage.Password{}, fmt.Errorf("failed to list users: %w", err) + } + for _, user := range users { + if user.UserID == rawUserID { + return user, nil + } + } + return storage.Password{}, storage.ErrNotFound +} + +// DeleteUser removes a user by email +func (p *Provider) DeleteUser(ctx context.Context, email string) error { + return p.storage.DeletePassword(ctx, email) +} + +// ListUsers returns all users +func (p *Provider) ListUsers(ctx context.Context) ([]storage.Password, error) { + return p.storage.ListPasswords(ctx) +} + +// ensureLocalConnector creates a local (password) connector if none exists +func ensureLocalConnector(ctx context.Context, stor storage.Storage) error { + connectors, err := stor.ListConnectors(ctx) + if err != nil { + return fmt.Errorf("failed to list connectors: %w", err) + } + + // If any connector exists, we're good + if len(connectors) > 0 { + return nil + } + + // Create a local connector for password authentication + localConnector := storage.Connector{ + ID: "local", + Type: "local", + Name: "Email", + } + + if err := stor.CreateConnector(ctx, localConnector); err != nil { + return fmt.Errorf("failed to create local connector: %w", err) + } + + return nil +} + +// ConnectorConfig represents the configuration for an identity provider connector +type ConnectorConfig struct { + // ID is the unique identifier for the connector + ID string + // Name is a human-readable name for the connector + Name string + // Type is the connector type (oidc, google, microsoft) + Type string + // Issuer is the OIDC issuer URL (for OIDC-based connectors) + Issuer string + // ClientID is the OAuth2 client ID + ClientID string + // ClientSecret is the OAuth2 client secret + ClientSecret string + // RedirectURI is the OAuth2 redirect URI + RedirectURI string +} + +// CreateConnector creates a new connector in Dex storage. +// It maps the connector config to the appropriate Dex connector type and configuration. +func (p *Provider) CreateConnector(ctx context.Context, cfg *ConnectorConfig) (*ConnectorConfig, error) { + // Fill in the redirect URI if not provided + if cfg.RedirectURI == "" { + cfg.RedirectURI = p.GetRedirectURI() + } + + storageConn, err := p.buildStorageConnector(cfg) + if err != nil { + return nil, fmt.Errorf("failed to build connector: %w", err) + } + + if err := p.storage.CreateConnector(ctx, storageConn); err != nil { + return nil, fmt.Errorf("failed to create connector: %w", err) + } + + p.logger.Info("connector created", "id", cfg.ID, "type", cfg.Type) + return cfg, nil +} + +// GetConnector retrieves a connector by ID from Dex storage. +func (p *Provider) GetConnector(ctx context.Context, id string) (*ConnectorConfig, error) { + conn, err := p.storage.GetConnector(ctx, id) + if err != nil { + if err == storage.ErrNotFound { + return nil, err + } + return nil, fmt.Errorf("failed to get connector: %w", err) + } + + return p.parseStorageConnector(conn) +} + +// ListConnectors returns all connectors from Dex storage (excluding the local connector). +func (p *Provider) ListConnectors(ctx context.Context) ([]*ConnectorConfig, error) { + connectors, err := p.storage.ListConnectors(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list connectors: %w", err) + } + + result := make([]*ConnectorConfig, 0, len(connectors)) + for _, conn := range connectors { + // Skip the local password connector + if conn.ID == "local" && conn.Type == "local" { + continue + } + + cfg, err := p.parseStorageConnector(conn) + if err != nil { + p.logger.Warn("failed to parse connector", "id", conn.ID, "error", err) + continue + } + result = append(result, cfg) + } + + return result, nil +} + +// UpdateConnector updates an existing connector in Dex storage. +func (p *Provider) UpdateConnector(ctx context.Context, cfg *ConnectorConfig) error { + storageConn, err := p.buildStorageConnector(cfg) + if err != nil { + return fmt.Errorf("failed to build connector: %w", err) + } + + if err := p.storage.UpdateConnector(ctx, cfg.ID, func(old storage.Connector) (storage.Connector, error) { + return storageConn, nil + }); err != nil { + return fmt.Errorf("failed to update connector: %w", err) + } + + p.logger.Info("connector updated", "id", cfg.ID, "type", cfg.Type) + return nil +} + +// DeleteConnector removes a connector from Dex storage. +func (p *Provider) DeleteConnector(ctx context.Context, id string) error { + // Prevent deletion of the local connector + if id == "local" { + return fmt.Errorf("cannot delete the local password connector") + } + + if err := p.storage.DeleteConnector(ctx, id); err != nil { + return fmt.Errorf("failed to delete connector: %w", err) + } + + p.logger.Info("connector deleted", "id", id) + return nil +} + +// buildStorageConnector creates a storage.Connector from ConnectorConfig. +// It handles the type-specific configuration for each connector type. +func (p *Provider) buildStorageConnector(cfg *ConnectorConfig) (storage.Connector, error) { + redirectURI := p.resolveRedirectURI(cfg.RedirectURI) + + var dexType string + var configData []byte + var err error + + switch cfg.Type { + case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": + dexType = "oidc" + configData, err = buildOIDCConnectorConfig(cfg, redirectURI) + case "google": + dexType = "google" + configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) + case "microsoft": + dexType = "microsoft" + configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) + default: + return storage.Connector{}, fmt.Errorf("unsupported connector type: %s", cfg.Type) + } + if err != nil { + return storage.Connector{}, err + } + + return storage.Connector{ID: cfg.ID, Type: dexType, Name: cfg.Name, Config: configData}, nil +} + +// resolveRedirectURI returns the redirect URI, using a default if not provided +func (p *Provider) resolveRedirectURI(redirectURI string) string { + if redirectURI != "" || p.config == nil { + return redirectURI + } + issuer := strings.TrimSuffix(p.config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + return issuer + "/callback" +} + +// buildOIDCConnectorConfig creates config for OIDC-based connectors +func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { + oidcConfig := map[string]interface{}{ + "issuer": cfg.Issuer, + "clientID": cfg.ClientID, + "clientSecret": cfg.ClientSecret, + "redirectURI": redirectURI, + "scopes": []string{"openid", "profile", "email"}, + } + switch cfg.Type { + case "zitadel": + oidcConfig["getUserInfo"] = true + case "entra": + oidcConfig["insecureSkipEmailVerified"] = true + oidcConfig["claimMapping"] = map[string]string{"email": "preferred_username"} + case "okta": + oidcConfig["insecureSkipEmailVerified"] = true + } + return encodeConnectorConfig(oidcConfig) +} + +// buildOAuth2ConnectorConfig creates config for OAuth2 connectors (google, microsoft) +func buildOAuth2ConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { + return encodeConnectorConfig(map[string]interface{}{ + "clientID": cfg.ClientID, + "clientSecret": cfg.ClientSecret, + "redirectURI": redirectURI, + }) +} + +// parseStorageConnector converts a storage.Connector back to ConnectorConfig. +// It infers the original identity provider type from the Dex connector type and ID. +func (p *Provider) parseStorageConnector(conn storage.Connector) (*ConnectorConfig, error) { + cfg := &ConnectorConfig{ + ID: conn.ID, + Name: conn.Name, + } + + if len(conn.Config) == 0 { + cfg.Type = conn.Type + return cfg, nil + } + + var configMap map[string]interface{} + if err := decodeConnectorConfig(conn.Config, &configMap); err != nil { + return nil, fmt.Errorf("failed to parse connector config: %w", err) + } + + // Extract common fields + if v, ok := configMap["clientID"].(string); ok { + cfg.ClientID = v + } + if v, ok := configMap["clientSecret"].(string); ok { + cfg.ClientSecret = v + } + if v, ok := configMap["redirectURI"].(string); ok { + cfg.RedirectURI = v + } + if v, ok := configMap["issuer"].(string); ok { + cfg.Issuer = v + } + + // Infer the original identity provider type from Dex connector type and ID + cfg.Type = inferIdentityProviderType(conn.Type, conn.ID, configMap) + + return cfg, nil +} + +// inferIdentityProviderType determines the original identity provider type +// based on the Dex connector type, connector ID, and configuration. +func inferIdentityProviderType(dexType, connectorID string, _ map[string]interface{}) string { + if dexType != "oidc" { + return dexType + } + return inferOIDCProviderType(connectorID) +} + +// inferOIDCProviderType infers the specific OIDC provider from connector ID +func inferOIDCProviderType(connectorID string) string { + connectorIDLower := strings.ToLower(connectorID) + for _, provider := range []string{"pocketid", "zitadel", "entra", "okta", "authentik", "keycloak"} { + if strings.Contains(connectorIDLower, provider) { + return provider + } + } + return "oidc" +} + +// encodeConnectorConfig serializes connector config to JSON bytes. +func encodeConnectorConfig(config map[string]interface{}) ([]byte, error) { + return json.Marshal(config) +} + +// decodeConnectorConfig deserializes connector config from JSON bytes. +func decodeConnectorConfig(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// GetRedirectURI returns the default redirect URI for connectors. +func (p *Provider) GetRedirectURI() string { + if p.config == nil { + return "" + } + issuer := strings.TrimSuffix(p.config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + return issuer + "/callback" +} + +// GetIssuer returns the OIDC issuer URL. +func (p *Provider) GetIssuer() string { + if p.config == nil { + return "" + } + issuer := strings.TrimSuffix(p.config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + return issuer +} + +// GetKeysLocation returns the JWKS endpoint URL for token validation. +func (p *Provider) GetKeysLocation() string { + issuer := p.GetIssuer() + if issuer == "" { + return "" + } + return issuer + "/keys" +} + +// GetTokenEndpoint returns the OAuth2 token endpoint URL. +func (p *Provider) GetTokenEndpoint() string { + issuer := p.GetIssuer() + if issuer == "" { + return "" + } + return issuer + "/token" +} + +// GetDeviceAuthEndpoint returns the OAuth2 device authorization endpoint URL. +func (p *Provider) GetDeviceAuthEndpoint() string { + issuer := p.GetIssuer() + if issuer == "" { + return "" + } + return issuer + "/device/code" +} + +// GetAuthorizationEndpoint returns the OAuth2 authorization endpoint URL. +func (p *Provider) GetAuthorizationEndpoint() string { + issuer := p.GetIssuer() + if issuer == "" { + return "" + } + return issuer + "/auth" +} diff --git a/idp/dex/provider_test.go b/idp/dex/provider_test.go new file mode 100644 index 000000000..bc34e592f --- /dev/null +++ b/idp/dex/provider_test.go @@ -0,0 +1,197 @@ +package dex + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUserCreationFlow(t *testing.T) { + ctx := context.Background() + + // Create a temporary directory for the test + tmpDir, err := os.MkdirTemp("", "dex-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + // Create provider with minimal config + config := &Config{ + Issuer: "http://localhost:5556/dex", + Port: 5556, + DataDir: tmpDir, + } + + provider, err := NewProvider(ctx, config) + require.NoError(t, err) + defer func() { _ = provider.Stop(ctx) }() + + // Test user data + email := "test@example.com" + username := "testuser" + password := "testpassword123" + + // Create the user + encodedID, err := provider.CreateUser(ctx, email, username, password) + require.NoError(t, err) + require.NotEmpty(t, encodedID) + + t.Logf("Created user with encoded ID: %s", encodedID) + + // Verify the encoded ID can be decoded + rawUserID, connectorID, err := DecodeDexUserID(encodedID) + require.NoError(t, err) + assert.NotEmpty(t, rawUserID) + assert.Equal(t, "local", connectorID) + + t.Logf("Decoded: rawUserID=%s, connectorID=%s", rawUserID, connectorID) + + // Verify we can look up the user by encoded ID + user, err := provider.GetUserByID(ctx, encodedID) + require.NoError(t, err) + assert.Equal(t, email, user.Email) + assert.Equal(t, username, user.Username) + assert.Equal(t, rawUserID, user.UserID) + + // Verify we can also look up by raw UUID (backwards compatibility) + user2, err := provider.GetUserByID(ctx, rawUserID) + require.NoError(t, err) + assert.Equal(t, email, user2.Email) + + // Verify we can look up by email + user3, err := provider.GetUser(ctx, email) + require.NoError(t, err) + assert.Equal(t, rawUserID, user3.UserID) + + // Verify encoding produces consistent format + reEncodedID := EncodeDexUserID(rawUserID, "local") + assert.Equal(t, encodedID, reEncodedID) +} + +func TestDecodeDexUserID(t *testing.T) { + tests := []struct { + name string + encodedID string + wantUserID string + wantConnID string + wantErr bool + }{ + { + name: "valid encoded ID", + encodedID: "CiQ3YWFkOGMwNS0zMjg3LTQ3M2YtYjQyYS0zNjU1MDRiZjI1ZTcSBWxvY2Fs", + wantUserID: "7aad8c05-3287-473f-b42a-365504bf25e7", + wantConnID: "local", + wantErr: false, + }, + { + name: "invalid base64", + encodedID: "not-valid-base64!!!", + wantUserID: "", + wantConnID: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + userID, connID, err := DecodeDexUserID(tt.encodedID) + if tt.wantErr { + assert.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.wantUserID, userID) + assert.Equal(t, tt.wantConnID, connID) + }) + } +} + +func TestEncodeDexUserID(t *testing.T) { + userID := "7aad8c05-3287-473f-b42a-365504bf25e7" + connectorID := "local" + + encoded := EncodeDexUserID(userID, connectorID) + assert.NotEmpty(t, encoded) + + // Verify round-trip + decodedUserID, decodedConnID, err := DecodeDexUserID(encoded) + require.NoError(t, err) + assert.Equal(t, userID, decodedUserID) + assert.Equal(t, connectorID, decodedConnID) +} + +func TestEncodeDexUserID_MatchesDexFormat(t *testing.T) { + // This is an actual ID from Dex - verify our encoding matches + knownEncodedID := "CiQ3YWFkOGMwNS0zMjg3LTQ3M2YtYjQyYS0zNjU1MDRiZjI1ZTcSBWxvY2Fs" + knownUserID := "7aad8c05-3287-473f-b42a-365504bf25e7" + knownConnectorID := "local" + + // Decode the known ID + userID, connID, err := DecodeDexUserID(knownEncodedID) + require.NoError(t, err) + assert.Equal(t, knownUserID, userID) + assert.Equal(t, knownConnectorID, connID) + + // Re-encode and verify it matches + reEncoded := EncodeDexUserID(knownUserID, knownConnectorID) + assert.Equal(t, knownEncodedID, reEncoded) +} + +func TestCreateUserInTempDB(t *testing.T) { + ctx := context.Background() + + // Create temp directory + tmpDir, err := os.MkdirTemp("", "dex-create-user-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + // Create YAML config for the test + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + // Load config and create provider + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + provider, err := NewProviderFromYAML(ctx, yamlConfig) + require.NoError(t, err) + defer func() { _ = provider.Stop(ctx) }() + + // Create user + email := "newuser@example.com" + username := "newuser" + password := "securepassword123" + + encodedID, err := provider.CreateUser(ctx, email, username, password) + require.NoError(t, err) + + t.Logf("Created user: email=%s, encodedID=%s", email, encodedID) + + // Verify lookup works with encoded ID + user, err := provider.GetUserByID(ctx, encodedID) + require.NoError(t, err) + assert.Equal(t, email, user.Email) + assert.Equal(t, username, user.Username) + + // Decode and verify format + rawID, connID, err := DecodeDexUserID(encodedID) + require.NoError(t, err) + assert.Equal(t, "local", connID) + assert.Equal(t, rawID, user.UserID) + + t.Logf("User lookup successful: rawID=%s, connectorID=%s", rawID, connID) +} diff --git a/idp/dex/web/robots.txt b/idp/dex/web/robots.txt new file mode 100755 index 000000000..77470cb39 --- /dev/null +++ b/idp/dex/web/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: / \ No newline at end of file diff --git a/idp/dex/web/static/main.css b/idp/dex/web/static/main.css new file mode 100755 index 000000000..39302c4c1 --- /dev/null +++ b/idp/dex/web/static/main.css @@ -0,0 +1 @@ +/* NetBird DEX Static CSS - main styles are inline in header.html */ \ No newline at end of file diff --git a/idp/dex/web/templates/approval.html b/idp/dex/web/templates/approval.html new file mode 100755 index 000000000..c84c3b3a0 --- /dev/null +++ b/idp/dex/web/templates/approval.html @@ -0,0 +1,26 @@ +{{ template "header.html" . }} + +
+

Grant Access

+

{{ .Client }} wants to access your account

+ +
+ + + +
+ +
+ +
+ + + +
+
+ +{{ template "footer.html" . }} \ No newline at end of file diff --git a/idp/dex/web/templates/device.html b/idp/dex/web/templates/device.html new file mode 100755 index 000000000..61faa6d53 --- /dev/null +++ b/idp/dex/web/templates/device.html @@ -0,0 +1,34 @@ +{{ template "header.html" . }} + +
+

Device Login

+

Enter the code shown on your device

+ +
+ {{ if .Invalid }} +
+ Invalid user code. +
+ {{ end }} + +
+ + +
+ + +
+
+ +{{ template "footer.html" . }} \ No newline at end of file diff --git a/idp/dex/web/templates/device_success.html b/idp/dex/web/templates/device_success.html new file mode 100755 index 000000000..af1d02031 --- /dev/null +++ b/idp/dex/web/templates/device_success.html @@ -0,0 +1,16 @@ +{{ template "header.html" . }} + +
+
+ + + + +
+

Device Authorized

+

+ Your device has been successfully authorized. You can close this window. +

+
+ +{{ template "footer.html" . }} \ No newline at end of file diff --git a/idp/dex/web/templates/error.html b/idp/dex/web/templates/error.html new file mode 100755 index 000000000..5dc2d190f --- /dev/null +++ b/idp/dex/web/templates/error.html @@ -0,0 +1,16 @@ +{{ template "header.html" . }} + +
+
+ + + + +
+

{{ .ErrType }}

+
+ {{ .ErrMsg }} +
+
+ +{{ template "footer.html" . }} \ No newline at end of file diff --git a/idp/dex/web/templates/footer.html b/idp/dex/web/templates/footer.html new file mode 100755 index 000000000..17c7245b6 --- /dev/null +++ b/idp/dex/web/templates/footer.html @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/idp/dex/web/templates/header.html b/idp/dex/web/templates/header.html new file mode 100755 index 000000000..5759ee321 --- /dev/null +++ b/idp/dex/web/templates/header.html @@ -0,0 +1,70 @@ + + + + + + {{ issuer }} + + + + + +
+ \ No newline at end of file diff --git a/idp/dex/web/templates/login.html b/idp/dex/web/templates/login.html new file mode 100755 index 000000000..681532d86 --- /dev/null +++ b/idp/dex/web/templates/login.html @@ -0,0 +1,56 @@ +{{ template "header.html" . }} + +
+

Sign in

+

Choose your login method

+ + {{/* First pass: render Email/Local connectors at the top */}} + {{ range $c := .Connectors }} + {{- $nameLower := lower $c.Name -}} + {{- $idLower := lower $c.ID -}} + {{- if or (contains "email" $nameLower) (contains "email" $idLower) (contains "local" $nameLower) (contains "local" $idLower) -}} + + + Continue with {{ $c.Name }} + + {{- end -}} + {{ end }} + + {{/* Second pass: render all other connectors */}} + {{ range $c := .Connectors }} + {{- $nameLower := lower $c.Name -}} + {{- $idLower := lower $c.ID -}} + {{- if not (or (contains "email" $nameLower) (contains "email" $idLower) (contains "local" $nameLower) (contains "local" $idLower)) -}} + + {{- $iconClass := "nb-icon-default" -}} + {{- if or (contains "google" $nameLower) (contains "google" $idLower) -}} + {{- $iconClass = "nb-icon-google" -}} + {{- else if or (contains "github" $nameLower) (contains "github" $idLower) -}} + {{- $iconClass = "nb-icon-github" -}} + {{- else if or (contains "entra" $nameLower) (contains "entra" $idLower) -}} + {{- $iconClass = "nb-icon-entra" -}} + {{- else if or (contains "azure" $nameLower) (contains "azure" $idLower) -}} + {{- $iconClass = "nb-icon-azure" -}} + {{- else if or (contains "microsoft" $nameLower) (contains "microsoft" $idLower) -}} + {{- $iconClass = "nb-icon-microsoft" -}} + {{- else if or (contains "okta" $nameLower) (contains "okta" $idLower) -}} + {{- $iconClass = "nb-icon-okta" -}} + {{- else if or (contains "jumpcloud" $nameLower) (contains "jumpcloud" $idLower) -}} + {{- $iconClass = "nb-icon-jumpcloud" -}} + {{- else if or (contains "pocket" $nameLower) (contains "pocket" $idLower) -}} + {{- $iconClass = "nb-icon-pocketid" -}} + {{- else if or (contains "zitadel" $nameLower) (contains "zitadel" $idLower) -}} + {{- $iconClass = "nb-icon-zitadel" -}} + {{- else if or (contains "authentik" $nameLower) (contains "authentik" $idLower) -}} + {{- $iconClass = "nb-icon-authentik" -}} + {{- else if or (contains "keycloak" $nameLower) (contains "keycloak" $idLower) -}} + {{- $iconClass = "nb-icon-keycloak" -}} + {{- end -}} + + Continue with {{ $c.Name }} + + {{- end -}} + {{ end }} +
+ +{{ template "footer.html" . }} \ No newline at end of file diff --git a/idp/dex/web/templates/oob.html b/idp/dex/web/templates/oob.html new file mode 100755 index 000000000..b887dab61 --- /dev/null +++ b/idp/dex/web/templates/oob.html @@ -0,0 +1,19 @@ +{{ template "header.html" . }} + +
+
+ + + + +
+

Login Successful

+

+ Copy this code back to your application: +

+
+ {{ .Code }} +
+
+ +{{ template "footer.html" . }} \ No newline at end of file diff --git a/idp/dex/web/templates/password.html b/idp/dex/web/templates/password.html new file mode 100755 index 000000000..1d1b8282e --- /dev/null +++ b/idp/dex/web/templates/password.html @@ -0,0 +1,58 @@ +{{ template "header.html" . }} + +
+

Sign in

+

Enter your credentials

+ +
+ {{ if .Invalid }} +
+ Invalid {{ .UsernamePrompt }} or password. +
+ {{ end }} + +
+ + +
+ +
+ + +
+ + +
+ + {{ if .BackLink }} + + {{ end }} +
+ + + +{{ template "footer.html" . }} \ No newline at end of file diff --git a/idp/dex/web/themes/light/favicon.ico b/idp/dex/web/themes/light/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..2bab8a503d92aad4dd9bb5eccc02f044762a7f6b GIT binary patch literal 106176 zcmeGl30zFi|80k;2q8xYa!0l0mm@0oEr0jXr6MG9tkSs&xz8U-BKMKD9Fa=4$W@fW zCQ2cG6gJzf&71k2Z{ED`d}jv3NLULD{gJTl*vu{%wiJF- zshoVjmLx0`t{E9|^P@2=y$1=?*5>3d^v1B3)=;xMH{TY+g13{fwveLsmPujQ{G%jn z`;6%mTFZ8og{E6inmBeQDn-x|p*C520dRKdGHL9n*^Vh+Q#T${wsHNG6F+OD`m}jB zt&8?$dbXY4Ub)R8JF|3ppMlobjt$$seG=7L@qm%uD~ng@vt}jwt6BwX-ylauOSD-d z5heAm^VK&;)T3GqHdVJ$*6#e)+d6fY`toyGkJMgR^>W@5dE(NS??3atcnA7?`(mEA z-u>srtODvbQT!=KhiV#~QJH0R-szmh3DPl%lAcUCx~_*J^*m`pD2*&xHjUKXr|ldK zW0^7;X3zA&>DK-q!f5ior0J?_O}9LBIkh6QUwTKnE!CS8Z*My5sY`-FwAuiy*FUFn z9w!Ayv>B>WILALd?A)=Gt&0_@5=O^%+&3Py_-hxs@0AE;rU_#(Hoo+ky;48y-u?>< zH@0C8ng4VA^}Dna(aA3jM-(VXQ*(E#czzz8nMs%K^|~OXRjPZ|<8G^h;!DSnx{tIO zGez^u5jK(L{jf8(z}-fA9$n6q z(bH>Kyh8NFBDa&+zCB;V|0}w`)9Y!A`yTBWc@Gyyj+}L(=)o4Zi*~x7AFdC%dO!N@ zLFJbR*KhYw`Z=mFb;~#ZcGT;GF3oj&5#5)n2#v9dlP%TU5|(=>o&&DCF+1jF7zf<8 z&@}lny(?+rl>EK7={x0emZq;{ggiWz_Ejdt%RB6#gwfGXb2MVD-gTog!fE3= zw?NlnHa2nAsV^z*?G>3>G~4~XZzq|X>nFsibi}TnDSZ)=*_pMPJ_T@2DV}^qwQR~Y zhnLfh7Ce7G&bG8OYgleZ{H6sKeqC(KNIu<2*@1Iq#{@_BIq!40>kzem60y`wy8Zj| z)|2LSkxafHIDM_l`WDFw$}3BTNaT}d(G9v?3%^c2w&r%E=D7E^UfZ9XpX8J?so3N zts-*h8^sx3nyv@OzX;KYDrs-uo@sKx;A{MfH|xiB&c@uZmC}(4&#AAK z!i?^QfPN&IYi4SX{`cIQjF{Qu^|oFMGzdOx$Xp+yW0^6m*T`3&R>k)y+WOr0>_3!r zLyyV$83;%+aglh<+eqrUD2}g4P(c@KcsVb5dE6zowkYBeM`ERq;60fA!R(+ z)};^gt%o^j`rVX+lk(3@DOV;%gsnNf%4f%=LqQVeEm$${mVGsJ4%3QS`%f{`<`F5~ z&u<`o?2E}A3l$F(J@1w3CjUm>rH!KdUA5GdkY}T22bLHxCoweo4Z3wtA(}OO0d@O* zjriyv(+-TWKcJ#djh8q<-+3oHXtkmIxqw42I(_RtEO)>wX3HZ7OZ;Cx%dnAz?jc@g z6=~0g+jA2StI%zz-7Ht9mhdz0dSso#V+o?Wg%+`=G}l)EG}8&zv0?AzxVD z>->w!H)7~YR9iX66N^-*j>u%5QgK#I+LEzoMts@8EA%O)-Af8q4b1NU^WW1eGDCXZ zouoo~xYwDgurPbVf!c@i@z)o2gRYK}Dfzn^$MQjQ%+C!t-SHk~M<|#`)f+ zdqqXQbzJJZ`E6>Rk!oKNo5lL8s6 zwK$#ikN3A7H+`*@PD+PAm~n@pp0@NrfMTz}r+Gu3p9{a8&|6=k!(leX_5cCtmL%z@W%rr3;7ONvGF8>55;r#J$+@jv*gix z>-Es+-ET>>6aQ`_U)Hr+>FF3nHh|l zV(Kwv)KE{d4Q9Ts`0B*icYb|7cX>U)cV(x01e`RnW0x1#)&T@u$X7|TFiy%`EGXxB!)RUdv9f~ zJMAG8kw0EBamTgNyo@L_!$}j?7?_$mAB%DEJk#pJnSHzEb38{DzSN{TT#orSWSzf^ z!FQPq+v1$DDo^BR%lAe&9h9@J69KDH`FW2pC%XnpsnGlyPKyZD;#_O<&kqI40CI1 zCbNFdGfqoFNbGg(_Xl)T?o0{RPh@>{?-y?DD=Fug z5FMi__uobg>#%pwuF>DMR(7^FzFsuBfB3u}+Iv}G51Ch~ky6x+uRDFoE1a7C-GsK% z?3LuB{0%9uM)sNWv!8M}<%U_I3}akwUQj!`;b*BiWvzy$=I!`2sf6nF)hYa8TF#y; zN7QAU&Qqn$vSl(HOq3G6+tHQX@4gI+irjDgc)(|gaEA4ly9$p!KAzR}V4K5tjPH7> z6)R&{jKvTCyzK9!x9e~J@L_}+818&Gq4frLJ=^aI2A;R*ly>WiRWa;r-ogDbC7rZ0 zilrZ3b)4X97nw-!bH26Bjk5gP;hmqunC6~xUlKM$c11WjLblK&OR2}frTVK`nI~LsNV;jcPGN!dR*o`+cvdW$9?~NKlZLT@X6q% z-@BbBVd*j%p7e5wnYsff>+4UWZ8!y@Q1l|}=f1FXeR1AYDaZP$BnCZ4_@ppdI|tJ# zodymMHy$7JT0S>I`kXEWUe4h$6cywBM;#8RZ8pTrT4%2@opSWm$lE~GSos|FTd8XH zerlU9X8ZdcWou53>1!mbxoq0Wq2XSoM;9yP2JK#;_Vr$H6s4Wqq&XN?II3{k0bOa9 ze)pBL-oADjX5r8^dp&KmMVwk|H3!psqh~51`*2frRJ`RLe$!&B$MTSlhDZ0RE7KJ$ zBWz8St{j=&0-4weiLzr65gmtjSRMPs%W}Eytydj}1}INGb9v0&jG5ioHl}dwb66Y3 zO6wN1*R<3nSJM{eFEwm==fALRXU`1BFnIjhw=`j7Onsc|K~sqL#mCX8ve<*ZwVa*Xb-; zDuG$_YH6kL^Nxo*#dJ#0E6F`e3nujl*UAU^zrcU#R=;-ag*!pmsG-kB??guvoH<{2EltH&G3c9TPw|Iz)`Kyr^ z-*zcB24Tih+66wg#;{#d83w^Y(tS)BiF3NFV!g2KuPQy3ev7tq*@UIY3i>!m1WCAk z2s^y$ihk0NaN~u^t`}9LP2S05l%jjiu`Ol7B&SwyK7}3L^JMg~JU#P~hojXc3SP;d zIDMdq-9H!{ppD)eN;|8V^WI1QW2(c3)nCR>{B(Ge_BHRIPd(XkkL}|tsSiZ#kw205 z(xHFX-mU>d+q)-H+W9@4hGGA1ve#gCwFt^uxbV;eJ@ea^1!vw#UFDaY@k{HWA zenUe%yIx)`XT5NxkT(QHEj^dgA^2kFL9Q^Uj*GY5AI_%3mfq!zo$VTfVXIj$G_Jh< z*w2}oM>F?a^>7I?LPuTZgEKBS7e?snp1*Z2cIEqE|86htKRX{6Ysa?HR_R)kY_hhferj4ZXEGPEHfntk>~7uO8B)fc?n* zocQXW6g?eHui#tHKKXf=_uI2|NVwUjRb97XSij@BJG)U~s860S?*MM(o>yb zKd-f(iXG8iWqreJs9fQ;tcBU%jy?VMK3cwX_-3aNwapG;W~r;X9f2>zVK-v#zl`V@ z`fNeFHJ5@`#GWyj`fr#@T)0V)jh6gukl&E!U+y0rB6-3gr(^}|g{y0__thwc6+R4AG(-qS?#Pj}6W0}Y_Mqh&qph2?lRG&2UMdbX|ra_iL@?a<(7^`KAO!(_Wir;8JG`?{tmM4J>GSN@OP3FuE6 zv;RbH_N3<@0}7V@m%sl|DSa4iUI3$o0n)!q=WGqCjPn(jaE*|_%RkPBpQgsb)A%}X zK;BUa|0sn%?kf)Lr{ADy4s}*sWIb1D$Q4zIw69LC#iK~}+bq|fdJsURnJzsLbumyU z_S^YIL3z?{ZNII%ZBXiuIkq=!X-i)%G0RUgR(0aI_dB8%hwI3O4x%Z%O?R48JdUKj z_r8?E`J@c_JP?h$%5en?58YU+w<<@-zG)}r;Poa)S4;dTeSX7<&zX<&bn$?|S}ODDd4C3)TXQl3HZr*PVxw;ix4P}_?BdeO#Rb9}YV zuXH|5-C1h!qX#3Tz*oY*6}L-aHi$}ZYO6ok`WOG_Ia1m-(CexF$| z%QvV)^4qo830fWbAm*sq7Wy39TQt z(Bf&?jhMJHbBnb1_l9SCwqFyn2fNLerRsGU@^UFTf>hYv|CQ^^3){8Rq@`$C>!*&)DiN)HbiVyNVB>W}+7xH6r`pteu8TUGGHJ#VX>#EEr6(84^3Zwg~ zoJ^WXN;p^gVwM**_IP2`40S14n#RGtBl7gix+g=ItTC+vX1C*tyYz^HV6`@$1%tO3 zElfUtT#iXHJW{ehD=cv=DPijcr!Qps{USG;!yXQ!FVE9!-R7;7V~4oVtOJSTS0~*_ zo<{$lT<=g>MPEQ8jR63;T=lGnWviv+Ye>C_ZT9o{6i1+8f zD@U%5_P5zuIBD_jTW=^`7WiZH?|r-SsY|-7ll|bg`cEw<%X*k;s~?;5X7I_PBfhBE zesSS6`cCJo_L6Bioo21SeYnVSYthW2FWY@h8T&iRQl;Ueu8X6w@rirA7_E=AW7zsC zZVk)%>O+ft*j}0*t%*&~F1qHpyY2jMPLy>PUDO7eE(lKo)Tgi`6J`gFP#hY*+Iapl zlTjD8_WY{B40Sl;>Yb%?bv#M?&J!bdnUCvQkK1Yz61Vh z3~G|!h@#*y*^njMUdugrWozoaf1Fc*isg!=luhK1y7aC#<2uk;b~-Uf_O#c0Ld_}i zO#GN)Z0dbrTvw8EbZ3?ly+19>Vwhv!g{73EtNle{t9#$qwYx=!k6!E@;gdF*9zU$f8zTeiEPB`2jqq5yc z3|iKaz5N3M5+YLNfvL*+mF!+H_m6c(gG4DmW`qBO1?HPqVI_Gussov%Dnu?CCy5* z!L%r*6Bv$04;A_E8;lQ|2r?;yc%5*3TVTIxR-gBrU?6zD!Q<$)JQ8zFzOVK5y!hR^sYUAr{b5^Ry%g+os!7yw`p0Od)<;p1}4pXP0O07zV6=` zWuNG_o!{x7eLt_v52%^;GG9S!d6?7VeKEdL$Fsr?58kP!o1EIeh}BIy(sfeN(Ud^i zlrDpBbtBD>DEBldnet;#$dQhpJGVO0V^xCJ+ubJ8pnv7oyTC9tBL3+#Ypt?`>a26t z@s!eq@|R4cvEWuiA5+@-kQp=1{HHCo^;T%EN6KBA`HoSCd@*o$9rU;vMpIumb?xKe zE^C-?q`&3u`7o2A71cd<9F|PRG}R52qVv7IO-4NbP8+@S$h9fnQ?ngz7kds=B59jm zDA5GZVb;Uzx3=~y9lp+TThO8M4eyd@^PCfwz%#P*rKU{Ur?5RH{cfsbd zK{u12Q@wOlFbk~~b3A%U0tlowPAHCcku4(Y9gXQ<}KT_W5kBiejl4(&uIWDpEUgXBiC5zm$o%}x7 z=nRvXueZoik=dR>k(ZtIWi(J_uqcw2KaP>r(<{~ln>vSam|74@vr0J9vD5cty)krc ztMQjpT+Yt;C_%S)Dl>h(>4WY%6Xr))rL^?up{ZeRnv`I;MW-XOWBGJHFa+z56L$`~ zw52W6F>cXfnJ}{L(Io>g>J#eD*ts9trakL)?b0o z=NI&m@VAINA!!S>jWbnl8qGT?aiX93qMr_oyhrb5s$tI@mvo##G4&cqA3WOMF*&}c zZhKoByB22*u;7(abHg>qbjta#wda_Kn=X_>|E#Ol%P+P;Pjc_{AZT$K^T5`g=N^`B zQO{n=$P~qN!nSSKGwxu!75Zg<){QWRxe{#Bxaniz+j3Js_)SF@U?K(0v~d1N*q(tH z8@LyUgpdFs0YU zBtS@jkN_b8LIQ*Y2ni4pAS6IYfRI3i1hhT#WdYm(sDBh}%BPZ-BQ}j=^1KpdkIIx)INT|7P34rdM{sjG# zS69(JvH^3}51pmCu-^so$4>&F`<4J{zq1XH*W^_`_xSUVQVW(pKi-Kljs!sWvwm0o zlh^UkJ;x5pbgWshB#`*yCxJm;ObqlN`McQwnZj51oIb&V^+3;=iMA*r{`g1$bgv0e z^1Ijo*+Br^=kb z0Pw9j+W^@~NZsRhv6!`)Klel_k^ty_b+gsKGw9wzi02;HJxa}41v*PvJqRuHmjLKq z8sKHKvjJDo{o;D+9=8Jv)@Ab%=dZx2JSF&yi{M~ne?)v}~HK`3yLHE`o)IHt@ShAM=-P#q5d(i#P zCeyzM=zd8JpFXa?hNkAMEb?+zJHe#z_ud7%R{;1=>^6XOZ_`M+$8BdZYv13iS;096 z-CK)Q|C{p5A%3Iw`5at#qDhOPf9Wr{B>v7jwKf+>fc~F|#Rib>SBORT_}sv)zf-Hi zaSgg32~gJ9Ho%VE5zVtLc>iIY3!AzUe|1R!bbp~S^&FdlWPNPEs4k&r{u0pI zlt%*n-)o2suw!?GiQmAV(Z)_KSogH4a7F_0$5#TNdtHFi`q%(@BRu!bQTMoQS?4a~ z%REuYmH_D9PiXx&Bz^&TF=B z$@1XCI#I-x0O+0~kp5j^KBu9vJ2iFGn0a$nK70?V5sKz70nmL5fE2zqAj;UC#?*Pu zwimN5^5>o?MG^qrPv@zBQOEAo)KMen!8bL8&@z7s%(+yKY~a=}Ho%VEX=1-_1MNfG zBR^VeYa#JH$lnBz{s#d3tgrzTzp?c9);$`paQT^62m3@ZNC0GYfGsbW!*BH1od!PF z0?ju-{1I3G8CstCtq3LamjKABz4oU3Z1VEo{JH1a7MhRd>K~=<{JAGewKwNW!gwiC zpl3@Q&u;^9pmpU{^pEDfRR}HDE&;Mf0h$vy2G4)v#q2gvpTgQDT4z{A|LFI*+PVL` zuMXS89{=ybWySXIi&Q<2ZVeDU1|0w>A*jKEExr0ooxbE>(tTCL8VQbda-u+eW zAWmm|83x{`02J3DAGZ7i+Mh|xy5*(witDiEPypW0y4YTSn+*WZMgaK&$frVvzndQz zy|)D9@5#{&wQyr*#D-#-SFI_w?5BlBh}FczePQGMg5}UpJoHVQzwArddQ=88KSuXQSRd-Y8$AW@2#DF z{{7#fKS}?f^R@u-BGWzI2MDugtElx`M|*YneCo0Z6#FvhPp|=P&pbKc<-VwOkGIz> zV?)4Ch2}1C8ta)xXzncktxW*l>s&8i`uWJ{4V3+M=MKS|PPE6*89sc~S01(3L$Sfl$|qp|92P%fF7Fk>@W5?4z<0L#c&PnG1!Q9kJiRF>vM;IzLky0 zzreC>l6Y~5O{%A_Phh@4w)b^BbDQPbA)w6+fM@KESnUF>4Hk2(SdG|jVRS0&z0L02 zA<*qifXcO6{Qa83@>{R_e>uCc5&Qo}?)Pi*{{dv(Y|b45S|$J#0@S3lY4a#nOdR_I zpz(g)v0b8;g=cdZ3xdu^0OU41-Q(YbjlCBfdguSTrbE=auC2WSka_o}v;n}c1Hf#q zy2t5iwEh0!eS%^UaXd|s1~iraf!F>3Uz&ht;pL6ac58%pe?8+mgr}eXd(}wXHUK*B z0f5FJqUNR1+7orG72kM{xf*&iC_l+m0*Gv^Q^pDYR+ zfNu&Ezj6GJ(>?wkSji_w0OHAO*{4O+dTOlplC?MIHR#--Ayjt!#{NH5_iVmuTRQ~b zhisqD@Au5tw57ET=>tGMoBn9s<9$GtGlGzR=r?-L*VK2-Jo3qRs*epoz4d>C?%6zU z$}c0Y%>5zc+T5D5Zm#*G!s;JnvJ{|s#BVg-_z39>&7GlhCNuze?=#)FG&Hu|pnX)x zg4zJwi_Tv{@nwxoS5vl+)*Yg=1v=nz(cp}A8h33<)5$c7ytx2pm1hd_mdmXgSUf_ME;of!tqXAGqc^Uxb7uoO2=E^4v=tI6~wB7*qWA&N` zLHi6t-8%rymCuII0f=nw8`?s96NqGA3TQ`AzQl1~F3_VawBiM<0ByGh=mRhk0L1}Y1EBt6GXU}< z`2w5)xC9Uca0MV3fCg|8;1s|SfL#D?0CoUqJURwo0D#!`Mdaf93aL2sB2FA<1ytu> ze;ept4?tY~nz(ELjq8U0^~x1c03a)L7AcCW5SM<$fhU%{C7;-5T5uca1^Q?FVK%T> zK=z5-Kmwq91%SXm$Oh1!d#wrO3N8WUHv*Y00YGc;#nDeUHjZtAb5GofO8|7Q0&w{c zumQAI@2}fu8rNe(Q;?kjz}?^64uTshhrhZ$XWj_e0rF=wt98#p*yY&2T8AQH9^~B( z0QtL6d_4NjZtVBBsM=2gzxN-#&$NhCEO0ZR`>p^M0LKAd0HFCOQR%BO+F2BtS@jkN_b8LIQ*Y2ni4pAS6IYfRF$o0YUos(C?%WLNA$zV;5Rpe_j#3Nt9 z5Le?Bd9HYk8v2PK+nxrpvO22RiOVh&t@pSvVAr~ z^cs0;WuDDoWtPog6>``NRw0L-uR;!n7_34Lh8Rk((jLmwtL6>MapUD&g?!awV6ZBB z#9&qQ7-*~-deB%k^uOdKtCIr^RwD-(qH^%#dqU#msBy`{^Z0%4>Fff9iah6r3N|?m z0o$DWDohloQ>~o)$K?YzNu1A$JXa2E4YI3n2&&A}*%vCFIWB_AJO@E#ugT>E$B$E3 znde}v%oD$m1kk3@MgUX*64o95hd(0>+cS^@>^$)wApt@Hgail)5E39HKuCax1Ym6o z+AGWuz^Kt-Q@#?O&Xdq54@8A9*9B)==`NR&+oHfIX5N0wc!Td z(Oz;5S`$v*C;;!A^G#VgmaJ~I(b1IGfPepb+Wg1mayumJwUc&-W379v1NdGU*1T(bL9Z%IvI$IH$m|L@2e31qCS`W zm%anW4}j+h?BU+5_I(AKq``mjq?6WLu|K^;?k5*Ba0pAk=knL5~L$%*z3ViGG>Bs6*rtMno*{_Q4CD0-*YCdag z5AdKfG4lBFj+cpa>pY>e}cPHTwNuadF0<73aW- zl5f-&@Qlta$r6}%{Cx=Lkk-f6tJSe)kNIljx#qP-nL7fWdjez%%RBpiZK+t0TEyrB)mjnhy)MgD(2JmY0({44_e zXnvxrFfxGqr2rcBYz#P`nXe4UYZ|QgsYO{$_GJ6955m2|wqLLNe~EWMV_W!cZY2B z?L49PA(|6`Z$uRTAO_rKyq*KVy+I84Yx0;$cFH@d4d2OP!i~nR@-<}u*OmeN;=gDl zn$Q^?R{&-KNORArZk+c_?a%R+TKG2rTATn-pZ^A+6hKruL1XbmfWrXj49Q>S?%-N? zfbRgJ!pW~46u;K9rVOg)p&X5Sv;YvjXm4+He&uO^8vro?@c?K%oD6{Wf`1AS4S@9O z3*Zi53ZMrdSG8?kzu`XQOZj#FtZ`%#$cv7qybk=&1t5LTA)ZnpZwS{RK`Ck<_UYhkU;zjbJqR&-$rT_> zM|nu;C0A!extu{6n?r;P7+}-DPT>Z2+JOJiJ`vgg7629dMD!x|i2!jBe}n`G3H$~UfbU=( z0DDmZ#G0&txf!vysziLd!e(qcmUc_ z3{^r+9;)o|QN6F^O?Tye<}5#P(a+VvIH12`KKz%M6+vjOe$P+nPyZ1Y4{#1VdX{NO z2R!%Ks7_y{ta)6AXZ{YfC$@EGfJ+_L5fnBwei(nA(V7HkbGF!Y0Pkdo<@<@_Z)mCw zdnW+>X#n+(?G%)Ez=7;PAI0d@D|WZ04WU?GcyBXYVx{Y;>=EwJ@e}#N2qe3xj(?CYP93yAGGgB zO`8xj59Y*QGC&Gp$O!2Gqi=~R?}(B$Y->GZ|*C{78<0IBOi-2ad#^9H+0xw=re6TQfgmlKME$(%+rxos# zt&`rWH=ud|_qpxVg@;p)`qRqqT>iYEwcgd&d-B4uwJKeL{05p^0^AmgcCOyhdK^J? zAjG;Iw4M$;&w~BIz36-WYJF(O>yY_Ybvs1U%MN z@mVLz%YM*x7e`CwMa%|R69?-r7)z)D`9{FcQNjr*{ zy#+8IpfvykprLeEtzXS|2W<}lSPgIjAQk|P&jgKEMc=P5{VXH5y6W zXp8T<4PLa==Q`}Cg1(pM0HAeK4WS#ai|k#jzB|M>;Oql{o()kvZ?B*ovH9+By@Brn z_0fANxOc9G5$WvE#TPg=PUEhCy{egkkC2l#WvH5Ky@( zN-%sx#7^NqSW&~o+<`M_8byq8(sXnyv8|<4!WC3oYy71RJM`6ygMjN4#)lva)cL&I?Nkg6c zz?-vn)Y4qNE&*=OYWUIqR6KlWRqUV$eCECDg|J2?z%2)W{LSUar!KHB-kf!{MpO09 z0&Wwuk6MM_st(^jTMlUcWmEpxdLyk;5%BkdJ&lU1$hYbmmX0_6127hz5IQD}cW*fE|E0 zz==A*v$7`f>u;0*$FETNKNGwqh2SA61b0jcKpA*N5R|jsFR-kP3a$w%+vVa8jtN8X zgBXGjL=Bu33h4lJ@N!T&0NxGoa7dzxs1hB3iToPX{*a#n+!AbGMTMUNMr_UX++ld0 z&|V7hzkq9gJjA@&0V{m}pdR|Zz6(&2`RI1St}3VKK4?N_Au zSg4lx5A^K{UO~3J-D|7MzUGqm5Y7QWW7LW^tNgymt=|i-83Uj=ZwdhFK2TqZO(G@K18G3?@TC}Z3ukte=DqTvlwiQyeR2SbKff%(%pj2J)~kL=)D zEk5z-hn0%A8FHpA>K4pp*u!?*pb&U@ V&GG(=-_bx8gQu&X%Q~loCIDuGbl?C0 literal 0 HcmV?d00001 diff --git a/idp/dex/web/themes/light/logo.png b/idp/dex/web/themes/light/logo.png new file mode 100755 index 0000000000000000000000000000000000000000..d534ca53d32f2b3cf8088fbe3e4c37cd5e374b94 GIT binary patch literal 300 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!61|;P_|4#%`oCO|{#S9GG!XV7ZFl&wkQ1G#* zi(^Q|oa6+FEf4O(G@K18G3?@TC}Z3ukte=DqTvlwiQyeR2SbKff%(%pj2J)~kL=)D zEk5z-hn0%A8FHpA>K4pp*u!?*pb&U@ V&GG(=-_bx8gQu&X%Q~loCIDuGbl?C0 literal 0 HcmV?d00001 diff --git a/idp/dex/web/themes/light/styles.css b/idp/dex/web/themes/light/styles.css new file mode 100755 index 000000000..3033ebd76 --- /dev/null +++ b/idp/dex/web/themes/light/styles.css @@ -0,0 +1 @@ +/* NetBird DEX Theme - styles loaded but CSS is inline in header.html */ \ No newline at end of file diff --git a/idp/dex/web/web.go b/idp/dex/web/web.go new file mode 100644 index 000000000..8cf81392a --- /dev/null +++ b/idp/dex/web/web.go @@ -0,0 +1,14 @@ +package web + +import ( + "embed" + "io/fs" +) + +//go:embed static/* templates/* themes/* robots.txt +var files embed.FS + +// FS returns the embedded web assets filesystem. +func FS() fs.FS { + return files +} diff --git a/idp/sdk/sdk.go b/idp/sdk/sdk.go new file mode 100644 index 000000000..d2189135b --- /dev/null +++ b/idp/sdk/sdk.go @@ -0,0 +1,135 @@ +// Package sdk provides an embeddable SDK for the Dex OIDC identity provider. +package sdk + +import ( + "context" + + "github.com/dexidp/dex/storage" + + "github.com/netbirdio/netbird/idp/dex" +) + +// DexIdP wraps the Dex provider with a builder pattern +type DexIdP struct { + provider *dex.Provider + config *dex.Config + yamlConfig *dex.YAMLConfig +} + +// Option configures a DexIdP instance +type Option func(*dex.Config) + +// WithIssuer sets the OIDC issuer URL +func WithIssuer(issuer string) Option { + return func(c *dex.Config) { c.Issuer = issuer } +} + +// WithPort sets the HTTP port +func WithPort(port int) Option { + return func(c *dex.Config) { c.Port = port } +} + +// WithDataDir sets the data directory for storage +func WithDataDir(dir string) Option { + return func(c *dex.Config) { c.DataDir = dir } +} + +// WithDevMode enables development mode (allows HTTP) +func WithDevMode(dev bool) Option { + return func(c *dex.Config) { c.DevMode = dev } +} + +// WithGRPCAddr sets the gRPC API address +func WithGRPCAddr(addr string) Option { + return func(c *dex.Config) { c.GRPCAddr = addr } +} + +// New creates a new DexIdP instance with the given options +func New(opts ...Option) (*DexIdP, error) { + config := &dex.Config{ + Port: 33081, + DevMode: true, + } + + for _, opt := range opts { + opt(config) + } + + return &DexIdP{config: config}, nil +} + +// NewFromConfigFile creates a new DexIdP instance from a YAML config file +func NewFromConfigFile(path string) (*DexIdP, error) { + yamlConfig, err := dex.LoadConfig(path) + if err != nil { + return nil, err + } + return &DexIdP{yamlConfig: yamlConfig}, nil +} + +// NewFromYAMLConfig creates a new DexIdP instance from a YAMLConfig +func NewFromYAMLConfig(yamlConfig *dex.YAMLConfig) (*DexIdP, error) { + return &DexIdP{yamlConfig: yamlConfig}, nil +} + +// Start initializes and starts the embedded OIDC provider +func (d *DexIdP) Start(ctx context.Context) error { + var err error + if d.yamlConfig != nil { + d.provider, err = dex.NewProviderFromYAML(ctx, d.yamlConfig) + } else { + d.provider, err = dex.NewProvider(ctx, d.config) + } + if err != nil { + return err + } + return d.provider.Start(ctx) +} + +// Stop gracefully shuts down the provider +func (d *DexIdP) Stop(ctx context.Context) error { + if d.provider != nil { + return d.provider.Stop(ctx) + } + return nil +} + +// EnsureDefaultClients creates the default NetBird OAuth clients +func (d *DexIdP) EnsureDefaultClients(ctx context.Context, dashboardURIs, cliURIs []string) error { + return d.provider.EnsureDefaultClients(ctx, dashboardURIs, cliURIs) +} + +// Storage exposes Dex storage for direct user/client/connector management +// Use storage.Client, storage.Password, storage.Connector directly +func (d *DexIdP) Storage() storage.Storage { + return d.provider.Storage() +} + +// CreateUser creates a new user with the given email, username, and password. +// Returns the encoded user ID in Dex's format. +func (d *DexIdP) CreateUser(ctx context.Context, email, username, password string) (string, error) { + return d.provider.CreateUser(ctx, email, username, password) +} + +// DeleteUser removes a user by email +func (d *DexIdP) DeleteUser(ctx context.Context, email string) error { + return d.provider.DeleteUser(ctx, email) +} + +// ListUsers returns all users +func (d *DexIdP) ListUsers(ctx context.Context) ([]storage.Password, error) { + return d.provider.ListUsers(ctx) +} + +// IssuerURL returns the OIDC issuer URL +func (d *DexIdP) IssuerURL() string { + if d.yamlConfig != nil { + return d.yamlConfig.Issuer + } + return d.config.Issuer +} + +// DiscoveryEndpoint returns the OIDC discovery endpoint URL +func (d *DexIdP) DiscoveryEndpoint() string { + return d.IssuerURL() + "/.well-known/openid-configuration" +} diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh new file mode 100755 index 000000000..e25b943a0 --- /dev/null +++ b/infrastructure_files/getting-started.sh @@ -0,0 +1,407 @@ +#!/bin/bash + +set -e + +# NetBird Getting Started with Embedded IdP (Dex) +# This script sets up NetBird with the embedded Dex identity provider +# No separate Dex container or reverse proxy needed - IdP is built into management server + +# Sed pattern to strip base64 padding characters +SED_STRIP_PADDING='s/=//g' + +check_docker_compose() { + if command -v docker-compose &> /dev/null + then + echo "docker-compose" + return + fi + if docker compose --help &> /dev/null + then + echo "docker compose" + return + fi + + echo "docker-compose is not installed or not in PATH. Please follow the steps from the official guide: https://docs.docker.com/engine/install/" > /dev/stderr + exit 1 +} + +check_jq() { + if ! command -v jq &> /dev/null + then + echo "jq is not installed or not in PATH, please install with your package manager. e.g. sudo apt install jq" > /dev/stderr + exit 1 + fi + return 0 +} + +get_main_ip_address() { + if [[ "$OSTYPE" == "darwin"* ]]; then + interface=$(route -n get default | grep 'interface:' | awk '{print $2}') + ip_address=$(ifconfig "$interface" | grep 'inet ' | awk '{print $2}') + else + interface=$(ip route | grep default | awk '{print $5}' | head -n 1) + ip_address=$(ip addr show "$interface" | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1) + fi + + echo "$ip_address" + return 0 +} + +check_nb_domain() { + DOMAIN=$1 + if [[ "$DOMAIN-x" == "-x" ]]; then + echo "The NETBIRD_DOMAIN variable cannot be empty." > /dev/stderr + return 1 + fi + + if [[ "$DOMAIN" == "netbird.example.com" ]]; then + echo "The NETBIRD_DOMAIN cannot be netbird.example.com" > /dev/stderr + return 1 + fi + return 0 +} + +read_nb_domain() { + READ_NETBIRD_DOMAIN="" + echo -n "Enter the domain you want to use for NetBird (e.g. netbird.my-domain.com): " > /dev/stderr + read -r READ_NETBIRD_DOMAIN < /dev/tty + if ! check_nb_domain "$READ_NETBIRD_DOMAIN"; then + read_nb_domain + fi + echo "$READ_NETBIRD_DOMAIN" + return 0 +} + +get_turn_external_ip() { + TURN_EXTERNAL_IP_CONFIG="#external-ip=" + IP=$(curl -s -4 https://jsonip.com | jq -r '.ip') + if [[ "x-$IP" != "x-" ]]; then + TURN_EXTERNAL_IP_CONFIG="external-ip=$IP" + fi + echo "$TURN_EXTERNAL_IP_CONFIG" + return 0 +} + +wait_management() { + set +e + echo -n "Waiting for Management server to become ready" + counter=1 + while true; do + # Check the embedded IdP endpoint + if curl -sk -f -o /dev/null "$NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/oauth2/.well-known/openid-configuration" 2>/dev/null; then + break + fi + if [[ $counter -eq 60 ]]; then + echo "" + echo "Taking too long. Checking logs..." + $DOCKER_COMPOSE_COMMAND logs --tail=20 caddy + $DOCKER_COMPOSE_COMMAND logs --tail=20 management + fi + echo -n " ." + sleep 2 + counter=$((counter + 1)) + done + echo " done" + set -e + return 0 +} + +init_environment() { + CADDY_SECURE_DOMAIN="" + NETBIRD_PORT=80 + NETBIRD_HTTP_PROTOCOL="http" + NETBIRD_RELAY_PROTO="rel" + TURN_USER="self" + TURN_PASSWORD=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") + NETBIRD_RELAY_AUTH_SECRET=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") + # Note: DataStoreEncryptionKey must keep base64 padding (=) for Go's base64.StdEncoding + DATASTORE_ENCRYPTION_KEY=$(openssl rand -base64 32) + TURN_MIN_PORT=49152 + TURN_MAX_PORT=65535 + TURN_EXTERNAL_IP_CONFIG=$(get_turn_external_ip) + + if ! check_nb_domain "$NETBIRD_DOMAIN"; then + NETBIRD_DOMAIN=$(read_nb_domain) + fi + + if [[ "$NETBIRD_DOMAIN" == "use-ip" ]]; then + NETBIRD_DOMAIN=$(get_main_ip_address) + else + NETBIRD_PORT=443 + CADDY_SECURE_DOMAIN=", $NETBIRD_DOMAIN:$NETBIRD_PORT" + NETBIRD_HTTP_PROTOCOL="https" + NETBIRD_RELAY_PROTO="rels" + fi + + check_jq + + DOCKER_COMPOSE_COMMAND=$(check_docker_compose) + + if [[ -f management.json ]]; then + echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." + echo "You can use the following commands:" + echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" + echo " rm -f docker-compose.yml Caddyfile dashboard.env turnserver.conf management.json relay.env" + echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." + exit 1 + fi + + echo Rendering initial files... + render_docker_compose > docker-compose.yml + render_caddyfile > Caddyfile + render_dashboard_env > dashboard.env + render_management_json > management.json + render_turn_server_conf > turnserver.conf + render_relay_env > relay.env + + echo -e "\nStarting NetBird services\n" + $DOCKER_COMPOSE_COMMAND up -d + + # Wait for management (and embedded IdP) to be ready + sleep 3 + wait_management + + echo -e "\nDone!\n" + echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + echo "Follow the onboarding steps to set up your NetBird instance." + return 0 +} + +render_caddyfile() { + cat < 0 { + audience = audiences[0] // Use the first client ID as the primary audience + } + keysLocation = oauthProvider.GetKeysLocation() + signingKeyRefreshEnabled = true + issuer = oauthProvider.GetIssuer() + userIDClaim = oauthProvider.GetUserIDClaim() + } + return Create(s, func() auth.Manager { return auth.NewManager(s.Store(), - s.Config.HttpConfig.AuthIssuer, - s.Config.HttpConfig.AuthAudience, - s.Config.HttpConfig.AuthKeysLocation, - s.Config.HttpConfig.AuthUserIDClaim, - s.Config.GetAuthAudiences(), - s.Config.HttpConfig.IdpSignKeyRefreshEnabled) + issuer, + audience, + keysLocation, + userIDClaim, + audiences, + signingKeyRefreshEnabled) }) } diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index af9ca5f2d..d179f2b68 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -95,6 +95,17 @@ func (s *BaseServer) IdpManager() idp.Manager { return Create(s, func() idp.Manager { var idpManager idp.Manager var err error + // Use embedded IdP manager if embedded Dex is configured and enabled. + // Legacy IdpManager won't be used anymore even if configured. + if s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled { + idpManager, err = idp.NewEmbeddedIdPManager(context.Background(), s.Config.EmbeddedIdP, s.Metrics()) + if err != nil { + log.Fatalf("failed to create embedded IDP manager: %v", err) + } + return idpManager + } + + // Fall back to external IdP manager if s.Config.IdpManagerConfig != nil { idpManager, err = idp.NewManager(context.Background(), *s.Config.IdpManagerConfig, s.Metrics()) if err != nil { @@ -105,6 +116,25 @@ func (s *BaseServer) IdpManager() idp.Manager { }) } +// OAuthConfigProvider is only relevant when we have an embedded IdP manager. Otherwise must be nil +func (s *BaseServer) OAuthConfigProvider() idp.OAuthConfigProvider { + if s.Config.EmbeddedIdP == nil || !s.Config.EmbeddedIdP.Enabled { + return nil + } + + idpManager := s.IdpManager() + if idpManager == nil { + return nil + } + + // Reuse the EmbeddedIdPManager instance from IdpManager + // EmbeddedIdPManager implements both idp.Manager and idp.OAuthConfigProvider + if provider, ok := idpManager.(idp.OAuthConfigProvider); ok { + return provider + } + return nil +} + func (s *BaseServer) GroupsManager() groups.Manager { return Create(s, func() groups.Manager { return groups.NewManager(s.Store(), s.PermissionsManager(), s.AccountManager()) diff --git a/management/internals/server/server.go b/management/internals/server/server.go index d9c715225..d5840ab41 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -11,6 +11,7 @@ import ( "time" "github.com/google/uuid" + "github.com/netbirdio/netbird/management/server/idp" log "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/metric" "golang.org/x/crypto/acme/autocert" @@ -22,7 +23,6 @@ import ( nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/metrics" "github.com/netbirdio/netbird/management/server/store" - "github.com/netbirdio/netbird/util" "github.com/netbirdio/netbird/util/wsproxy" wsproxyserver "github.com/netbirdio/netbird/util/wsproxy/server" "github.com/netbirdio/netbird/version" @@ -40,7 +40,7 @@ type Server interface { SetContainer(key string, container any) } -// Server holds the HTTP BaseServer instance. +// BaseServer holds the HTTP server instance. // Add any additional fields you need, such as database connections, Config, etc. type BaseServer struct { // Config holds the server configuration @@ -144,7 +144,7 @@ func (s *BaseServer) Start(ctx context.Context) error { log.WithContext(srvCtx).Infof("running gRPC backward compatibility server: %s", compatListener.Addr().String()) } - rootHandler := s.handlerFunc(s.GRPCServer(), s.APIHandler(), s.Metrics().GetMeter()) + rootHandler := s.handlerFunc(srvCtx, s.GRPCServer(), s.APIHandler(), s.Metrics().GetMeter()) switch { case s.certManager != nil: // a call to certManager.Listener() always creates a new listener so we do it once @@ -215,6 +215,10 @@ func (s *BaseServer) Stop() error { if s.update != nil { s.update.StopWatch() } + // Stop embedded IdP if configured + if embeddedIdP, ok := s.IdpManager().(*idp.EmbeddedIdPManager); ok { + _ = embeddedIdP.Stop(ctx) + } select { case <-s.Errors(): @@ -246,11 +250,7 @@ func (s *BaseServer) SetContainer(key string, container any) { log.Tracef("container with key %s set successfully", key) } -func updateMgmtConfig(ctx context.Context, path string, config *nbconfig.Config) error { - return util.DirectWriteJson(ctx, path, config) -} - -func (s *BaseServer) handlerFunc(gRPCHandler *grpc.Server, httpHandler http.Handler, meter metric.Meter) http.Handler { +func (s *BaseServer) handlerFunc(_ context.Context, gRPCHandler *grpc.Server, httpHandler http.Handler, meter metric.Meter) http.Handler { wsProxy := wsproxyserver.New(gRPCHandler, wsproxyserver.WithOTelMeter(meter)) return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 063dda7e4..801c15158 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -16,6 +16,7 @@ import ( pb "github.com/golang/protobuf/proto" // nolint "github.com/golang/protobuf/ptypes/timestamp" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/realip" + "github.com/netbirdio/netbird/shared/management/client/common" log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc/codes" @@ -24,6 +25,7 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/store" @@ -69,6 +71,8 @@ type Server struct { networkMapController network_map.Controller + oAuthConfigProvider idp.OAuthConfigProvider + syncSem atomic.Int32 syncLim int32 } @@ -83,6 +87,7 @@ func NewServer( authManager auth.Manager, integratedPeerValidator integrated_validator.IntegratedValidator, networkMapController network_map.Controller, + oAuthConfigProvider idp.OAuthConfigProvider, ) (*Server, error) { if appMetrics != nil { // update gauge based on number of connected peers which is equal to open gRPC streams @@ -119,6 +124,7 @@ func NewServer( blockPeersWithSameConfig: blockPeersWithSameConfig, integratedPeerValidator: integratedPeerValidator, networkMapController: networkMapController, + oAuthConfigProvider: oAuthConfigProvider, loginFilter: newLoginFilter(), @@ -761,32 +767,48 @@ func (s *Server) GetDeviceAuthorizationFlow(ctx context.Context, req *proto.Encr return nil, status.Error(codes.InvalidArgument, errMSG) } - if s.config.DeviceAuthorizationFlow == nil || s.config.DeviceAuthorizationFlow.Provider == string(nbconfig.NONE) { - return nil, status.Error(codes.NotFound, "no device authorization flow information available") - } + var flowInfoResp *proto.DeviceAuthorizationFlow - provider, ok := proto.DeviceAuthorizationFlowProvider_value[strings.ToUpper(s.config.DeviceAuthorizationFlow.Provider)] - if !ok { - return nil, status.Errorf(codes.InvalidArgument, "no provider found in the protocol for %s", s.config.DeviceAuthorizationFlow.Provider) - } + // Use embedded IdP configuration if available + if s.oAuthConfigProvider != nil { + flowInfoResp = &proto.DeviceAuthorizationFlow{ + Provider: proto.DeviceAuthorizationFlow_HOSTED, + ProviderConfig: &proto.ProviderConfig{ + ClientID: s.oAuthConfigProvider.GetCLIClientID(), + Audience: s.oAuthConfigProvider.GetCLIClientID(), + DeviceAuthEndpoint: s.oAuthConfigProvider.GetDeviceAuthEndpoint(), + TokenEndpoint: s.oAuthConfigProvider.GetTokenEndpoint(), + Scope: s.oAuthConfigProvider.GetDefaultScopes(), + }, + } + } else { + if s.config.DeviceAuthorizationFlow == nil || s.config.DeviceAuthorizationFlow.Provider == string(nbconfig.NONE) { + return nil, status.Error(codes.NotFound, "no device authorization flow information available") + } - flowInfoResp := &proto.DeviceAuthorizationFlow{ - Provider: proto.DeviceAuthorizationFlowProvider(provider), - ProviderConfig: &proto.ProviderConfig{ - ClientID: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientID, - ClientSecret: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientSecret, - Domain: s.config.DeviceAuthorizationFlow.ProviderConfig.Domain, - Audience: s.config.DeviceAuthorizationFlow.ProviderConfig.Audience, - DeviceAuthEndpoint: s.config.DeviceAuthorizationFlow.ProviderConfig.DeviceAuthEndpoint, - TokenEndpoint: s.config.DeviceAuthorizationFlow.ProviderConfig.TokenEndpoint, - Scope: s.config.DeviceAuthorizationFlow.ProviderConfig.Scope, - UseIDToken: s.config.DeviceAuthorizationFlow.ProviderConfig.UseIDToken, - }, + provider, ok := proto.DeviceAuthorizationFlowProvider_value[strings.ToUpper(s.config.DeviceAuthorizationFlow.Provider)] + if !ok { + return nil, status.Errorf(codes.InvalidArgument, "no provider found in the protocol for %s", s.config.DeviceAuthorizationFlow.Provider) + } + + flowInfoResp = &proto.DeviceAuthorizationFlow{ + Provider: proto.DeviceAuthorizationFlowProvider(provider), + ProviderConfig: &proto.ProviderConfig{ + ClientID: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientID, + ClientSecret: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientSecret, + Domain: s.config.DeviceAuthorizationFlow.ProviderConfig.Domain, + Audience: s.config.DeviceAuthorizationFlow.ProviderConfig.Audience, + DeviceAuthEndpoint: s.config.DeviceAuthorizationFlow.ProviderConfig.DeviceAuthEndpoint, + TokenEndpoint: s.config.DeviceAuthorizationFlow.ProviderConfig.TokenEndpoint, + Scope: s.config.DeviceAuthorizationFlow.ProviderConfig.Scope, + UseIDToken: s.config.DeviceAuthorizationFlow.ProviderConfig.UseIDToken, + }, + } } encryptedResp, err := encryption.EncryptMessage(peerKey, key, flowInfoResp) if err != nil { - return nil, status.Error(codes.Internal, "failed to encrypt no device authorization flow information") + return nil, status.Error(codes.Internal, "failed to encrypt device authorization flow information") } return &proto.EncryptedMessage{ @@ -820,30 +842,47 @@ func (s *Server) GetPKCEAuthorizationFlow(ctx context.Context, req *proto.Encryp return nil, status.Error(codes.InvalidArgument, errMSG) } - if s.config.PKCEAuthorizationFlow == nil { - return nil, status.Error(codes.NotFound, "no pkce authorization flow information available") - } + var initInfoFlow *proto.PKCEAuthorizationFlow - initInfoFlow := &proto.PKCEAuthorizationFlow{ - ProviderConfig: &proto.ProviderConfig{ - Audience: s.config.PKCEAuthorizationFlow.ProviderConfig.Audience, - ClientID: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientID, - ClientSecret: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientSecret, - TokenEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.TokenEndpoint, - AuthorizationEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.AuthorizationEndpoint, - Scope: s.config.PKCEAuthorizationFlow.ProviderConfig.Scope, - RedirectURLs: s.config.PKCEAuthorizationFlow.ProviderConfig.RedirectURLs, - UseIDToken: s.config.PKCEAuthorizationFlow.ProviderConfig.UseIDToken, - DisablePromptLogin: s.config.PKCEAuthorizationFlow.ProviderConfig.DisablePromptLogin, - LoginFlag: uint32(s.config.PKCEAuthorizationFlow.ProviderConfig.LoginFlag), - }, + // Use embedded IdP configuration if available + if s.oAuthConfigProvider != nil { + initInfoFlow = &proto.PKCEAuthorizationFlow{ + ProviderConfig: &proto.ProviderConfig{ + Audience: s.oAuthConfigProvider.GetCLIClientID(), + ClientID: s.oAuthConfigProvider.GetCLIClientID(), + TokenEndpoint: s.oAuthConfigProvider.GetTokenEndpoint(), + AuthorizationEndpoint: s.oAuthConfigProvider.GetAuthorizationEndpoint(), + Scope: s.oAuthConfigProvider.GetDefaultScopes(), + RedirectURLs: s.oAuthConfigProvider.GetCLIRedirectURLs(), + LoginFlag: uint32(common.LoginFlagPromptLogin), + }, + } + } else { + if s.config.PKCEAuthorizationFlow == nil { + return nil, status.Error(codes.NotFound, "no pkce authorization flow information available") + } + + initInfoFlow = &proto.PKCEAuthorizationFlow{ + ProviderConfig: &proto.ProviderConfig{ + Audience: s.config.PKCEAuthorizationFlow.ProviderConfig.Audience, + ClientID: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientID, + ClientSecret: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientSecret, + TokenEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.TokenEndpoint, + AuthorizationEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.AuthorizationEndpoint, + Scope: s.config.PKCEAuthorizationFlow.ProviderConfig.Scope, + RedirectURLs: s.config.PKCEAuthorizationFlow.ProviderConfig.RedirectURLs, + UseIDToken: s.config.PKCEAuthorizationFlow.ProviderConfig.UseIDToken, + DisablePromptLogin: s.config.PKCEAuthorizationFlow.ProviderConfig.DisablePromptLogin, + LoginFlag: uint32(s.config.PKCEAuthorizationFlow.ProviderConfig.LoginFlag), + }, + } } flowInfoResp := s.integratedPeerValidator.ValidateFlowResponse(ctx, peerKey.String(), initInfoFlow) encryptedResp, err := encryption.EncryptMessage(peerKey, key, flowInfoResp) if err != nil { - return nil, status.Error(codes.Internal, "failed to encrypt no pkce authorization flow information") + return nil, status.Error(codes.Internal, "failed to encrypt pkce authorization flow information") } return &proto.EncryptedMessage{ diff --git a/management/server/account.go b/management/server/account.go index a1046432a..29415b038 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -243,7 +243,7 @@ func BuildManager( am.externalCacheManager = nbcache.NewUserDataCache(cacheStore) am.cacheManager = nbcache.NewAccountUserDataCache(am.loadAccount, cacheStore) - if !isNil(am.idpManager) { + if !isNil(am.idpManager) && !IsEmbeddedIdp(am.idpManager) { go func() { err := am.warmupIDPCache(ctx, cacheStore) if err != nil { @@ -557,7 +557,7 @@ func (am *DefaultAccountManager) checkAndSchedulePeerInactivityExpiration(ctx co // newAccount creates a new Account with a generated ID and generated default setup keys. // If ID is already in use (due to collision) we try one more time before returning error -func (am *DefaultAccountManager) newAccount(ctx context.Context, userID, domain string) (*types.Account, error) { +func (am *DefaultAccountManager) newAccount(ctx context.Context, userID, domain, email, name string) (*types.Account, error) { for i := 0; i < 2; i++ { accountId := xid.New().String() @@ -568,7 +568,7 @@ func (am *DefaultAccountManager) newAccount(ctx context.Context, userID, domain log.WithContext(ctx).Warnf("an account with ID already exists, retrying...") continue case statusErr.Type() == status.NotFound: - newAccount := newAccountWithId(ctx, accountId, userID, domain, am.disableDefaultPolicy) + newAccount := newAccountWithId(ctx, accountId, userID, domain, email, name, am.disableDefaultPolicy) am.StoreEvent(ctx, userID, newAccount.Id, accountId, activity.AccountCreated, nil) return newAccount, nil default: @@ -741,23 +741,23 @@ func (am *DefaultAccountManager) AccountExists(ctx context.Context, accountID st // If user does have an account, it returns the user's account ID. // If the user doesn't have an account, it creates one using the provided domain. // Returns the account ID or an error if none is found or created. -func (am *DefaultAccountManager) GetAccountIDByUserID(ctx context.Context, userID, domain string) (string, error) { - if userID == "" { +func (am *DefaultAccountManager) GetAccountIDByUserID(ctx context.Context, userAuth auth.UserAuth) (string, error) { + if userAuth.UserId == "" { return "", status.Errorf(status.NotFound, "no valid userID provided") } - accountID, err := am.Store.GetAccountIDByUserID(ctx, store.LockingStrengthNone, userID) + accountID, err := am.Store.GetAccountIDByUserID(ctx, store.LockingStrengthNone, userAuth.UserId) if err != nil { if s, ok := status.FromError(err); ok && s.Type() == status.NotFound { - account, err := am.GetOrCreateAccountByUser(ctx, userID, domain) + acc, err := am.GetOrCreateAccountByUser(ctx, userAuth) if err != nil { - return "", status.Errorf(status.NotFound, "account not found or created for user id: %s", userID) + return "", status.Errorf(status.NotFound, "account not found or created for user id: %s", userAuth.UserId) } - if err = am.addAccountIDToIDPAppMeta(ctx, userID, account.Id); err != nil { + if err = am.addAccountIDToIDPAppMeta(ctx, userAuth.UserId, acc.Id); err != nil { return "", err } - return account.Id, nil + return acc.Id, nil } return "", err } @@ -768,9 +768,19 @@ func isNil(i idp.Manager) bool { return i == nil || reflect.ValueOf(i).IsNil() } +// IsEmbeddedIdp checks if the IDP manager is an embedded IDP (data stored locally in DB). +// When true, user cache should be skipped and data fetched directly from the IDP manager. +func IsEmbeddedIdp(i idp.Manager) bool { + if isNil(i) { + return false + } + _, ok := i.(*idp.EmbeddedIdPManager) + return ok +} + // addAccountIDToIDPAppMeta update user's app metadata in idp manager func (am *DefaultAccountManager) addAccountIDToIDPAppMeta(ctx context.Context, userID string, accountID string) error { - if !isNil(am.idpManager) { + if !isNil(am.idpManager) && !IsEmbeddedIdp(am.idpManager) { // user can be nil if it wasn't found (e.g., just created) user, err := am.lookupUserInCache(ctx, userID, accountID) if err != nil { @@ -1016,6 +1026,9 @@ func (am *DefaultAccountManager) isCacheFresh(ctx context.Context, accountUsers } func (am *DefaultAccountManager) removeUserFromCache(ctx context.Context, accountID, userID string) error { + if IsEmbeddedIdp(am.idpManager) { + return nil + } data, err := am.getAccountFromCache(ctx, accountID, false) if err != nil { return err @@ -1107,7 +1120,7 @@ func (am *DefaultAccountManager) addNewPrivateAccount(ctx context.Context, domai lowerDomain := strings.ToLower(userAuth.Domain) - newAccount, err := am.newAccount(ctx, userAuth.UserId, lowerDomain) + newAccount, err := am.newAccount(ctx, userAuth.UserId, lowerDomain, userAuth.Email, userAuth.Name) if err != nil { return "", err } @@ -1132,7 +1145,7 @@ func (am *DefaultAccountManager) addNewPrivateAccount(ctx context.Context, domai } func (am *DefaultAccountManager) addNewUserToDomainAccount(ctx context.Context, domainAccountID string, userAuth auth.UserAuth) (string, error) { - newUser := types.NewRegularUser(userAuth.UserId) + newUser := types.NewRegularUser(userAuth.UserId, userAuth.Email, userAuth.Name) newUser.AccountID = domainAccountID settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthNone, domainAccountID) @@ -1315,6 +1328,7 @@ func (am *DefaultAccountManager) GetAccountIDFromUserAuth(ctx context.Context, u user, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, userAuth.UserId) if err != nil { // this is not really possible because we got an account by user ID + log.Errorf("failed to get user by ID %s: %v", userAuth.UserId, err) return "", "", status.Errorf(status.NotFound, "user %s not found", userAuth.UserId) } @@ -1512,7 +1526,7 @@ func (am *DefaultAccountManager) getAccountIDWithAuthorizationClaims(ctx context } if userAuth.DomainCategory != types.PrivateCategory || !isDomainValid(userAuth.Domain) { - return am.GetAccountIDByUserID(ctx, userAuth.UserId, userAuth.Domain) + return am.GetAccountIDByUserID(ctx, userAuth) } if userAuth.AccountId != "" { @@ -1734,7 +1748,7 @@ func (am *DefaultAccountManager) GetAccountSettings(ctx context.Context, account } // newAccountWithId creates a new Account with a default SetupKey (doesn't store in a Store) and provided id -func newAccountWithId(ctx context.Context, accountID, userID, domain string, disableDefaultPolicy bool) *types.Account { +func newAccountWithId(ctx context.Context, accountID, userID, domain, email, name string, disableDefaultPolicy bool) *types.Account { log.WithContext(ctx).Debugf("creating new account") network := types.NewNetwork() @@ -1744,7 +1758,7 @@ func newAccountWithId(ctx context.Context, accountID, userID, domain string, dis setupKeys := map[string]*types.SetupKey{} nameServersGroups := make(map[string]*nbdns.NameServerGroup) - owner := types.NewOwnerUser(userID) + owner := types.NewOwnerUser(userID, email, name) owner.AccountID = accountID users[userID] = owner diff --git a/management/server/account/manager.go b/management/server/account/manager.go index f0b7c3857..7680a8464 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -24,7 +24,7 @@ import ( type ExternalCacheManager nbcache.UserDataCache type Manager interface { - GetOrCreateAccountByUser(ctx context.Context, userId, domain string) (*types.Account, error) + GetOrCreateAccountByUser(ctx context.Context, userAuth auth.UserAuth) (*types.Account, error) GetAccount(ctx context.Context, accountID string) (*types.Account, error) CreateSetupKey(ctx context.Context, accountID string, keyName string, keyType types.SetupKeyType, expiresIn time.Duration, autoGroups []string, usageLimit int, userID string, ephemeral bool, allowExtraDNSLabels bool) (*types.SetupKey, error) @@ -44,7 +44,7 @@ type Manager interface { GetAccountMeta(ctx context.Context, accountID string, userID string) (*types.AccountMeta, error) GetAccountOnboarding(ctx context.Context, accountID string, userID string) (*types.AccountOnboarding, error) AccountExists(ctx context.Context, accountID string) (bool, error) - GetAccountIDByUserID(ctx context.Context, userID, domain string) (string, error) + GetAccountIDByUserID(ctx context.Context, userAuth auth.UserAuth) (string, error) GetAccountIDFromUserAuth(ctx context.Context, userAuth auth.UserAuth) (string, string, error) DeleteAccount(ctx context.Context, accountID, userID string) error GetUserByID(ctx context.Context, id string) (*types.User, error) @@ -124,4 +124,9 @@ type Manager interface { GetOwnerInfo(ctx context.Context, accountId string) (*types.UserInfo, error) GetCurrentUserInfo(ctx context.Context, userAuth auth.UserAuth) (*users.UserInfoWithPermissions, error) GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error) + GetIdentityProvider(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) + GetIdentityProviders(ctx context.Context, accountID, userID string) ([]*types.IdentityProvider, error) + CreateIdentityProvider(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) + UpdateIdentityProvider(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) + DeleteIdentityProvider(ctx context.Context, accountID, idpID, userID string) error } diff --git a/management/server/account_test.go b/management/server/account_test.go index 25818ada2..59d6e4928 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -382,7 +382,7 @@ func TestAccount_GetPeerNetworkMap(t *testing.T) { } for _, testCase := range tt { - account := newAccountWithId(context.Background(), "account-1", userID, "netbird.io", false) + account := newAccountWithId(context.Background(), "account-1", userID, "netbird.io", "", "", false) account.UpdateSettings(&testCase.accountSettings) account.Network = network account.Peers = testCase.peers @@ -407,7 +407,7 @@ func TestNewAccount(t *testing.T) { domain := "netbird.io" userId := "account_creator" accountID := "account_id" - account := newAccountWithId(context.Background(), accountID, userId, domain, false) + account := newAccountWithId(context.Background(), accountID, userId, domain, "", "", false) verifyNewAccountHasDefaultFields(t, account, userId, domain, []string{userId}) } @@ -418,7 +418,7 @@ func TestAccountManager_GetOrCreateAccountByUser(t *testing.T) { return } - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID, Domain: ""}) if err != nil { t.Fatal(err) } @@ -612,7 +612,7 @@ func TestDefaultAccountManager_GetAccountIDFromToken(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - accountID, err := manager.GetAccountIDByUserID(context.Background(), testCase.inputInitUserParams.UserId, testCase.inputInitUserParams.Domain) + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: testCase.inputInitUserParams.UserId, Domain: testCase.inputInitUserParams.Domain}) require.NoError(t, err, "create init user failed") initAccount, err := manager.Store.GetAccount(context.Background(), accountID) @@ -649,10 +649,10 @@ func TestDefaultAccountManager_GetAccountIDFromToken(t *testing.T) { func TestDefaultAccountManager_SyncUserJWTGroups(t *testing.T) { userId := "user-id" domain := "test.domain" - _ = newAccountWithId(context.Background(), "", userId, domain, false) + _ = newAccountWithId(context.Background(), "", userId, domain, "", "", false) manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - accountID, err := manager.GetAccountIDByUserID(context.Background(), userId, domain) + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userId, Domain: domain}) require.NoError(t, err, "create init user failed") // as initAccount was created without account id we have to take the id after account initialization // that happens inside the GetAccountIDByUserID where the id is getting generated @@ -718,7 +718,7 @@ func TestAccountManager_PrivateAccount(t *testing.T) { } userId := "test_user" - account, err := manager.GetOrCreateAccountByUser(context.Background(), userId, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userId, Domain: ""}) if err != nil { t.Fatal(err) } @@ -745,7 +745,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { userId := "test_user" domain := "hotmail.com" - account, err := manager.GetOrCreateAccountByUser(context.Background(), userId, domain) + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userId, Domain: domain}) if err != nil { t.Fatal(err) } @@ -759,7 +759,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { domain = "gmail.com" - account, err = manager.GetOrCreateAccountByUser(context.Background(), userId, domain) + account, err = manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userId, Domain: domain}) if err != nil { t.Fatalf("got the following error while retrieving existing acc: %v", err) } @@ -782,7 +782,7 @@ func TestAccountManager_GetAccountByUserID(t *testing.T) { userId := "test_user" - accountID, err := manager.GetAccountIDByUserID(context.Background(), userId, "") + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userId, Domain: ""}) if err != nil { t.Fatal(err) } @@ -795,14 +795,14 @@ func TestAccountManager_GetAccountByUserID(t *testing.T) { assert.NoError(t, err) assert.True(t, exists, "expected to get existing account after creation using userid") - _, err = manager.GetAccountIDByUserID(context.Background(), "", "") + _, err = manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: "", Domain: ""}) if err == nil { t.Errorf("expected an error when user ID is empty") } } func createAccount(am *DefaultAccountManager, accountID, userID, domain string) (*types.Account, error) { - account := newAccountWithId(context.Background(), accountID, userID, domain, false) + account := newAccountWithId(context.Background(), accountID, userID, domain, "", "", false) err := am.Store.SaveAccount(context.Background(), account) if err != nil { return nil, err @@ -1098,7 +1098,7 @@ func TestAccountManager_AddPeerWithUserID(t *testing.T) { return } - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "netbird.cloud") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID, Domain: "netbird.cloud"}) if err != nil { t.Fatal(err) } @@ -1849,7 +1849,7 @@ func TestDefaultAccountManager_DefaultAccountSettings(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - accountID, err := manager.GetAccountIDByUserID(context.Background(), userID, "") + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to create an account") settings, err := manager.Store.GetAccountSettings(context.Background(), store.LockingStrengthNone, accountID) @@ -1864,7 +1864,7 @@ func TestDefaultAccountManager_UpdatePeer_PeerLoginExpiration(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - _, err = manager.GetAccountIDByUserID(context.Background(), userID, "") + _, err = manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to create an account") key, err := wgtypes.GenerateKey() @@ -1876,7 +1876,7 @@ func TestDefaultAccountManager_UpdatePeer_PeerLoginExpiration(t *testing.T) { }, false) require.NoError(t, err, "unable to add peer") - accountID, err := manager.GetAccountIDByUserID(context.Background(), userID, "") + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to get the account") err = manager.MarkPeerConnected(context.Background(), key.PublicKey().String(), true, nil, accountID) @@ -1920,7 +1920,7 @@ func TestDefaultAccountManager_MarkPeerConnected_PeerLoginExpiration(t *testing. manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - accountID, err := manager.GetAccountIDByUserID(context.Background(), userID, "") + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to create an account") key, err := wgtypes.GenerateKey() @@ -1946,7 +1946,7 @@ func TestDefaultAccountManager_MarkPeerConnected_PeerLoginExpiration(t *testing. }, } - accountID, err = manager.GetAccountIDByUserID(context.Background(), userID, "") + accountID, err = manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to get the account") // when we mark peer as connected, the peer login expiration routine should trigger @@ -1963,7 +1963,7 @@ func TestDefaultAccountManager_UpdateAccountSettings_PeerLoginExpiration(t *test manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - _, err = manager.GetAccountIDByUserID(context.Background(), userID, "") + _, err = manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to create an account") key, err := wgtypes.GenerateKey() @@ -1975,7 +1975,7 @@ func TestDefaultAccountManager_UpdateAccountSettings_PeerLoginExpiration(t *test }, false) require.NoError(t, err, "unable to add peer") - accountID, err := manager.GetAccountIDByUserID(context.Background(), userID, "") + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to get the account") account, err := manager.Store.GetAccount(context.Background(), accountID) @@ -2025,7 +2025,7 @@ func TestDefaultAccountManager_UpdateAccountSettings(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - accountID, err := manager.GetAccountIDByUserID(context.Background(), userID, "") + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to create an account") updatedSettings, err := manager.UpdateAccountSettings(context.Background(), accountID, userID, &types.Settings{ @@ -3434,7 +3434,7 @@ func TestDefaultAccountManager_IsCacheCold(t *testing.T) { assert.True(t, cold) }) - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err) t.Run("should return true when account is not found in cache", func(t *testing.T) { @@ -3462,7 +3462,7 @@ func TestPropagateUserGroupMemberships(t *testing.T) { initiatorId := "test-user" domain := "example.com" - account, err := manager.GetOrCreateAccountByUser(ctx, initiatorId, domain) + account, err := manager.GetOrCreateAccountByUser(ctx, auth.UserAuth{UserId: initiatorId, Domain: domain}) require.NoError(t, err) peer1 := &nbpeer.Peer{ID: "peer1", AccountID: account.Id, UserID: initiatorId, IP: net.IP{1, 1, 1, 1}, DNSLabel: "peer1.domain.test"} @@ -3575,7 +3575,7 @@ func TestDefaultAccountManager_GetAccountOnboarding(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err) - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err) t.Run("should return account onboarding when onboarding exist", func(t *testing.T) { @@ -3607,7 +3607,7 @@ func TestDefaultAccountManager_UpdateAccountOnboarding(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err) - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err) onboarding := &types.AccountOnboarding{ @@ -3646,7 +3646,7 @@ func TestDefaultAccountManager_UpdatePeerIP(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") - accountID, err := manager.GetAccountIDByUserID(context.Background(), userID, "") + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to create an account") key1, err := wgtypes.GenerateKey() @@ -3717,7 +3717,7 @@ func TestAddNewUserToDomainAccountWithApproval(t *testing.T) { // Create a domain-based account with user approval enabled existingAccountID := "existing-account" - account := newAccountWithId(context.Background(), existingAccountID, "owner-user", "example.com", false) + account := newAccountWithId(context.Background(), existingAccountID, "owner-user", "example.com", "", "", false) account.Settings.Extra = &types.ExtraSettings{ UserApprovalRequired: true, } diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index 6344b2904..7b939ddff 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -183,6 +183,10 @@ const ( AccountAutoUpdateVersionUpdated Activity = 92 + IdentityProviderCreated Activity = 93 + IdentityProviderUpdated Activity = 94 + IdentityProviderDeleted Activity = 95 + AccountDeleted Activity = 99999 ) @@ -295,6 +299,10 @@ var activityMap = map[Activity]Code{ UserCreated: {"User created", "user.create"}, AccountAutoUpdateVersionUpdated: {"Account AutoUpdate Version updated", "account.settings.auto.version.update"}, + + IdentityProviderCreated: {"Identity provider created", "identityprovider.create"}, + IdentityProviderUpdated: {"Identity provider updated", "identityprovider.update"}, + IdentityProviderDeleted: {"Identity provider deleted", "identityprovider.delete"}, } // StringCode returns a string code of the activity diff --git a/management/server/auth/manager.go b/management/server/auth/manager.go index 0c62357dc..76cc750b6 100644 --- a/management/server/auth/manager.go +++ b/management/server/auth/manager.go @@ -49,8 +49,7 @@ func NewManager(store store.Store, issuer, audience, keysLocation, userIdClaim s ) return &manager{ - store: store, - + store: store, validator: jwtValidator, extractor: claimsExtractor, } diff --git a/management/server/dns_test.go b/management/server/dns_test.go index b5e3f2b99..d1da79380 100644 --- a/management/server/dns_test.go +++ b/management/server/dns_test.go @@ -277,7 +277,7 @@ func initTestDNSAccount(t *testing.T, am *DefaultAccountManager) (*types.Account domain := "example.com" - account := newAccountWithId(context.Background(), dnsAccountID, dnsAdminUserID, domain, false) + account := newAccountWithId(context.Background(), dnsAccountID, dnsAdminUserID, domain, "", "", false) account.Users[dnsRegularUserID] = &types.User{ Id: dnsRegularUserID, diff --git a/management/server/group_test.go b/management/server/group_test.go index 4935dac5d..95f37a3ff 100644 --- a/management/server/group_test.go +++ b/management/server/group_test.go @@ -379,7 +379,7 @@ func initTestGroupAccount(am *DefaultAccountManager) (*DefaultAccountManager, *t Id: "example user", AutoGroups: []string{groupForUsers.ID}, } - account := newAccountWithId(context.Background(), accountID, groupAdminUserID, domain, false) + account := newAccountWithId(context.Background(), accountID, groupAdminUserID, domain, "", "", false) account.Routes[routeResource.ID] = routeResource account.Routes[routePeerGroupResource.ID] = routePeerGroupResource account.NameServerGroups[nameServerGroup.ID] = nameServerGroup diff --git a/management/server/http/handler.go b/management/server/http/handler.go index b7c6c113c..bbd6b4750 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -9,6 +9,7 @@ import ( "time" "github.com/gorilla/mux" + idpmanager "github.com/netbirdio/netbird/management/server/idp" "github.com/rs/cors" log "github.com/sirupsen/logrus" @@ -29,6 +30,8 @@ import ( "github.com/netbirdio/netbird/management/server/http/handlers/dns" "github.com/netbirdio/netbird/management/server/http/handlers/events" "github.com/netbirdio/netbird/management/server/http/handlers/groups" + "github.com/netbirdio/netbird/management/server/http/handlers/idp" + "github.com/netbirdio/netbird/management/server/http/handlers/instance" "github.com/netbirdio/netbird/management/server/http/handlers/networks" "github.com/netbirdio/netbird/management/server/http/handlers/peers" "github.com/netbirdio/netbird/management/server/http/handlers/policies" @@ -36,6 +39,8 @@ import ( "github.com/netbirdio/netbird/management/server/http/handlers/setup_keys" "github.com/netbirdio/netbird/management/server/http/handlers/users" "github.com/netbirdio/netbird/management/server/http/middleware" + "github.com/netbirdio/netbird/management/server/http/middleware/bypass" + nbinstance "github.com/netbirdio/netbird/management/server/instance" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" nbnetworks "github.com/netbirdio/netbird/management/server/networks" "github.com/netbirdio/netbird/management/server/networks/resources" @@ -51,23 +56,15 @@ const ( ) // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. -func NewAPIHandler( - ctx context.Context, - accountManager account.Manager, - networksManager nbnetworks.Manager, - resourceManager resources.Manager, - routerManager routers.Manager, - groupsManager nbgroups.Manager, - LocationManager geolocation.Geolocation, - authManager auth.Manager, - appMetrics telemetry.AppMetrics, - integratedValidator integrated_validator.IntegratedValidator, - proxyController port_forwarding.Controller, - permissionsManager permissions.Manager, - peersManager nbpeers.Manager, - settingsManager settings.Manager, - networkMapController network_map.Controller, -) (http.Handler, error) { +func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager) (http.Handler, error) { + + // Register bypass paths for unauthenticated endpoints + if err := bypass.AddBypassPath("/api/instance"); err != nil { + return nil, fmt.Errorf("failed to add bypass path: %w", err) + } + if err := bypass.AddBypassPath("/api/setup"); err != nil { + return nil, fmt.Errorf("failed to add bypass path: %w", err) + } var rateLimitingConfig *middleware.RateLimiterConfig if os.Getenv(rateLimitingEnabledKey) == "true" { @@ -122,7 +119,14 @@ func NewAPIHandler( return nil, fmt.Errorf("register integrations endpoints: %w", err) } - accounts.AddEndpoints(accountManager, settingsManager, router) + // Check if embedded IdP is enabled + embeddedIdP, embeddedIdpEnabled := idpManager.(*idpmanager.EmbeddedIdPManager) + instanceManager, err := nbinstance.NewManager(ctx, accountManager.GetStore(), embeddedIdP) + if err != nil { + return nil, fmt.Errorf("failed to create instance manager: %w", err) + } + + accounts.AddEndpoints(accountManager, settingsManager, embeddedIdpEnabled, router) peers.AddEndpoints(accountManager, router, networkMapController) users.AddEndpoints(accountManager, router) setup_keys.AddEndpoints(accountManager, router) @@ -134,6 +138,13 @@ func NewAPIHandler( dns.AddEndpoints(accountManager, router) events.AddEndpoints(accountManager, router) networks.AddEndpoints(networksManager, resourceManager, routerManager, groupsManager, accountManager, router) + idp.AddEndpoints(accountManager, router) + instance.AddEndpoints(instanceManager, router) + + // Mount embedded IdP handler at /oauth2 path if configured + if embeddedIdpEnabled { + rootRouter.PathPrefix("/oauth2").Handler(corsMiddleware.Handler(embeddedIdP.Handler())) + } return rootRouter, nil } diff --git a/management/server/http/handlers/accounts/accounts_handler.go b/management/server/http/handlers/accounts/accounts_handler.go index 3797b0512..de778d59a 100644 --- a/management/server/http/handlers/accounts/accounts_handler.go +++ b/management/server/http/handlers/accounts/accounts_handler.go @@ -36,22 +36,24 @@ const ( // handler is a handler that handles the server.Account HTTP endpoints type handler struct { - accountManager account.Manager - settingsManager settings.Manager + accountManager account.Manager + settingsManager settings.Manager + embeddedIdpEnabled bool } -func AddEndpoints(accountManager account.Manager, settingsManager settings.Manager, router *mux.Router) { - accountsHandler := newHandler(accountManager, settingsManager) +func AddEndpoints(accountManager account.Manager, settingsManager settings.Manager, embeddedIdpEnabled bool, router *mux.Router) { + accountsHandler := newHandler(accountManager, settingsManager, embeddedIdpEnabled) router.HandleFunc("/accounts/{accountId}", accountsHandler.updateAccount).Methods("PUT", "OPTIONS") router.HandleFunc("/accounts/{accountId}", accountsHandler.deleteAccount).Methods("DELETE", "OPTIONS") router.HandleFunc("/accounts", accountsHandler.getAllAccounts).Methods("GET", "OPTIONS") } // newHandler creates a new handler HTTP handler -func newHandler(accountManager account.Manager, settingsManager settings.Manager) *handler { +func newHandler(accountManager account.Manager, settingsManager settings.Manager, embeddedIdpEnabled bool) *handler { return &handler{ - accountManager: accountManager, - settingsManager: settingsManager, + accountManager: accountManager, + settingsManager: settingsManager, + embeddedIdpEnabled: embeddedIdpEnabled, } } @@ -163,7 +165,7 @@ func (h *handler) getAllAccounts(w http.ResponseWriter, r *http.Request) { return } - resp := toAccountResponse(accountID, settings, meta, onboarding) + resp := toAccountResponse(accountID, settings, meta, onboarding, h.embeddedIdpEnabled) util.WriteJSONObject(r.Context(), w, []*api.Account{resp}) } @@ -290,7 +292,7 @@ func (h *handler) updateAccount(w http.ResponseWriter, r *http.Request) { return } - resp := toAccountResponse(accountID, updatedSettings, meta, updatedOnboarding) + resp := toAccountResponse(accountID, updatedSettings, meta, updatedOnboarding, h.embeddedIdpEnabled) util.WriteJSONObject(r.Context(), w, &resp) } @@ -319,7 +321,7 @@ func (h *handler) deleteAccount(w http.ResponseWriter, r *http.Request) { util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) } -func toAccountResponse(accountID string, settings *types.Settings, meta *types.AccountMeta, onboarding *types.AccountOnboarding) *api.Account { +func toAccountResponse(accountID string, settings *types.Settings, meta *types.AccountMeta, onboarding *types.AccountOnboarding, embeddedIdpEnabled bool) *api.Account { jwtAllowGroups := settings.JWTAllowGroups if jwtAllowGroups == nil { jwtAllowGroups = []string{} @@ -339,6 +341,7 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A LazyConnectionEnabled: &settings.LazyConnectionEnabled, DnsDomain: &settings.DNSDomain, AutoUpdateVersion: &settings.AutoUpdateVersion, + EmbeddedIdpEnabled: &embeddedIdpEnabled, } if settings.NetworkRange.IsValid() { diff --git a/management/server/http/handlers/accounts/accounts_handler_test.go b/management/server/http/handlers/accounts/accounts_handler_test.go index 2e48ac83e..e455372c8 100644 --- a/management/server/http/handlers/accounts/accounts_handler_test.go +++ b/management/server/http/handlers/accounts/accounts_handler_test.go @@ -33,6 +33,7 @@ func initAccountsTestData(t *testing.T, account *types.Account) *handler { AnyTimes() return &handler{ + embeddedIdpEnabled: false, accountManager: &mock_server.MockAccountManager{ GetAccountSettingsFunc: func(ctx context.Context, accountID string, userID string) (*types.Settings, error) { return account.Settings, nil @@ -122,6 +123,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { LazyConnectionEnabled: br(false), DnsDomain: sr(""), AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), }, expectedArray: true, expectedID: accountID, @@ -145,6 +147,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { LazyConnectionEnabled: br(false), DnsDomain: sr(""), AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -168,6 +171,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { LazyConnectionEnabled: br(false), DnsDomain: sr(""), AutoUpdateVersion: sr("latest"), + EmbeddedIdpEnabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -191,6 +195,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { LazyConnectionEnabled: br(false), DnsDomain: sr(""), AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -214,6 +219,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { LazyConnectionEnabled: br(false), DnsDomain: sr(""), AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -237,6 +243,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { LazyConnectionEnabled: br(false), DnsDomain: sr(""), AutoUpdateVersion: sr(""), + EmbeddedIdpEnabled: br(false), }, expectedArray: false, expectedID: accountID, diff --git a/management/server/http/handlers/idp/idp_handler.go b/management/server/http/handlers/idp/idp_handler.go new file mode 100644 index 000000000..077507b89 --- /dev/null +++ b/management/server/http/handlers/idp/idp_handler.go @@ -0,0 +1,196 @@ +package idp + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/server/account" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" + "github.com/netbirdio/netbird/shared/management/status" +) + +// handler handles identity provider HTTP endpoints +type handler struct { + accountManager account.Manager +} + +// AddEndpoints registers identity provider endpoints +func AddEndpoints(accountManager account.Manager, router *mux.Router) { + h := newHandler(accountManager) + router.HandleFunc("/identity-providers", h.getAllIdentityProviders).Methods("GET", "OPTIONS") + router.HandleFunc("/identity-providers", h.createIdentityProvider).Methods("POST", "OPTIONS") + router.HandleFunc("/identity-providers/{idpId}", h.getIdentityProvider).Methods("GET", "OPTIONS") + router.HandleFunc("/identity-providers/{idpId}", h.updateIdentityProvider).Methods("PUT", "OPTIONS") + router.HandleFunc("/identity-providers/{idpId}", h.deleteIdentityProvider).Methods("DELETE", "OPTIONS") +} + +func newHandler(accountManager account.Manager) *handler { + return &handler{ + accountManager: accountManager, + } +} + +// getAllIdentityProviders returns all identity providers for the account +func (h *handler) getAllIdentityProviders(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + accountID, userID := userAuth.AccountId, userAuth.UserId + + providers, err := h.accountManager.GetIdentityProviders(r.Context(), accountID, userID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + response := make([]api.IdentityProvider, 0, len(providers)) + for _, p := range providers { + response = append(response, toAPIResponse(p)) + } + + util.WriteJSONObject(r.Context(), w, response) +} + +// getIdentityProvider returns a specific identity provider +func (h *handler) getIdentityProvider(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + accountID, userID := userAuth.AccountId, userAuth.UserId + + vars := mux.Vars(r) + idpID := vars["idpId"] + if idpID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "identity provider ID is required"), w) + return + } + + provider, err := h.accountManager.GetIdentityProvider(r.Context(), accountID, idpID, userID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, toAPIResponse(provider)) +} + +// createIdentityProvider creates a new identity provider +func (h *handler) createIdentityProvider(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + accountID, userID := userAuth.AccountId, userAuth.UserId + + var req api.IdentityProviderRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + idp := fromAPIRequest(&req) + + created, err := h.accountManager.CreateIdentityProvider(r.Context(), accountID, userID, idp) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, toAPIResponse(created)) +} + +// updateIdentityProvider updates an existing identity provider +func (h *handler) updateIdentityProvider(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + accountID, userID := userAuth.AccountId, userAuth.UserId + + vars := mux.Vars(r) + idpID := vars["idpId"] + if idpID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "identity provider ID is required"), w) + return + } + + var req api.IdentityProviderRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + idp := fromAPIRequest(&req) + + updated, err := h.accountManager.UpdateIdentityProvider(r.Context(), accountID, idpID, userID, idp) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, toAPIResponse(updated)) +} + +// deleteIdentityProvider deletes an identity provider +func (h *handler) deleteIdentityProvider(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + accountID, userID := userAuth.AccountId, userAuth.UserId + + vars := mux.Vars(r) + idpID := vars["idpId"] + if idpID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "identity provider ID is required"), w) + return + } + + if err := h.accountManager.DeleteIdentityProvider(r.Context(), accountID, idpID, userID); err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} + +func toAPIResponse(idp *types.IdentityProvider) api.IdentityProvider { + resp := api.IdentityProvider{ + Type: api.IdentityProviderType(idp.Type), + Name: idp.Name, + Issuer: idp.Issuer, + ClientId: idp.ClientID, + } + if idp.ID != "" { + resp.Id = &idp.ID + } + // Note: ClientSecret is never returned in responses for security + return resp +} + +func fromAPIRequest(req *api.IdentityProviderRequest) *types.IdentityProvider { + return &types.IdentityProvider{ + Type: types.IdentityProviderType(req.Type), + Name: req.Name, + Issuer: req.Issuer, + ClientID: req.ClientId, + ClientSecret: req.ClientSecret, + } +} diff --git a/management/server/http/handlers/idp/idp_handler_test.go b/management/server/http/handlers/idp/idp_handler_test.go new file mode 100644 index 000000000..74b204048 --- /dev/null +++ b/management/server/http/handlers/idp/idp_handler_test.go @@ -0,0 +1,438 @@ +package idp + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/management/server/mock_server" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/auth" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/status" +) + +const ( + testAccountID = "test-account-id" + testUserID = "test-user-id" + existingIDPID = "existing-idp-id" + newIDPID = "new-idp-id" +) + +func initIDPTestData(existingIDP *types.IdentityProvider) *handler { + return &handler{ + accountManager: &mock_server.MockAccountManager{ + GetIdentityProvidersFunc: func(_ context.Context, accountID, userID string) ([]*types.IdentityProvider, error) { + if accountID != testAccountID { + return nil, status.Errorf(status.NotFound, "account not found") + } + if existingIDP != nil { + return []*types.IdentityProvider{existingIDP}, nil + } + return []*types.IdentityProvider{}, nil + }, + GetIdentityProviderFunc: func(_ context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) { + if accountID != testAccountID { + return nil, status.Errorf(status.NotFound, "account not found") + } + if existingIDP != nil && idpID == existingIDP.ID { + return existingIDP, nil + } + return nil, status.Errorf(status.NotFound, "identity provider not found") + }, + CreateIdentityProviderFunc: func(_ context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) { + if accountID != testAccountID { + return nil, status.Errorf(status.NotFound, "account not found") + } + if idp.Name == "" { + return nil, status.Errorf(status.InvalidArgument, "name is required") + } + created := idp.Copy() + created.ID = newIDPID + created.AccountID = accountID + return created, nil + }, + UpdateIdentityProviderFunc: func(_ context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) { + if accountID != testAccountID { + return nil, status.Errorf(status.NotFound, "account not found") + } + if existingIDP == nil || idpID != existingIDP.ID { + return nil, status.Errorf(status.NotFound, "identity provider not found") + } + updated := idp.Copy() + updated.ID = idpID + updated.AccountID = accountID + return updated, nil + }, + DeleteIdentityProviderFunc: func(_ context.Context, accountID, idpID, userID string) error { + if accountID != testAccountID { + return status.Errorf(status.NotFound, "account not found") + } + if existingIDP == nil || idpID != existingIDP.ID { + return status.Errorf(status.NotFound, "identity provider not found") + } + return nil + }, + }, + } +} + +func TestGetAllIdentityProviders(t *testing.T) { + existingIDP := &types.IdentityProvider{ + ID: existingIDPID, + Name: "Test IDP", + Type: types.IdentityProviderTypeOIDC, + Issuer: "https://issuer.example.com", + ClientID: "client-id", + } + + tt := []struct { + name string + expectedStatus int + expectedCount int + }{ + { + name: "Get All Identity Providers", + expectedStatus: http.StatusOK, + expectedCount: 1, + }, + } + + h := initIDPTestData(existingIDP) + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/identity-providers", nil) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + router := mux.NewRouter() + router.HandleFunc("/api/identity-providers", h.getAllIdentityProviders).Methods("GET") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + assert.Equal(t, tc.expectedStatus, recorder.Code) + + content, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var idps []api.IdentityProvider + err = json.Unmarshal(content, &idps) + require.NoError(t, err) + assert.Len(t, idps, tc.expectedCount) + }) + } +} + +func TestGetIdentityProvider(t *testing.T) { + existingIDP := &types.IdentityProvider{ + ID: existingIDPID, + Name: "Test IDP", + Type: types.IdentityProviderTypeOIDC, + Issuer: "https://issuer.example.com", + ClientID: "client-id", + } + + tt := []struct { + name string + idpID string + expectedStatus int + expectedBody bool + }{ + { + name: "Get Existing Identity Provider", + idpID: existingIDPID, + expectedStatus: http.StatusOK, + expectedBody: true, + }, + { + name: "Get Non-Existing Identity Provider", + idpID: "non-existing-id", + expectedStatus: http.StatusNotFound, + expectedBody: false, + }, + } + + h := initIDPTestData(existingIDP) + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/api/identity-providers/%s", tc.idpID), nil) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + router := mux.NewRouter() + router.HandleFunc("/api/identity-providers/{idpId}", h.getIdentityProvider).Methods("GET") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + assert.Equal(t, tc.expectedStatus, recorder.Code) + + if tc.expectedBody { + content, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var idp api.IdentityProvider + err = json.Unmarshal(content, &idp) + require.NoError(t, err) + assert.Equal(t, existingIDPID, *idp.Id) + assert.Equal(t, existingIDP.Name, idp.Name) + } + }) + } +} + +func TestCreateIdentityProvider(t *testing.T) { + tt := []struct { + name string + requestBody string + expectedStatus int + expectedBody bool + }{ + { + name: "Create Identity Provider", + requestBody: `{ + "name": "New IDP", + "type": "oidc", + "issuer": "https://new-issuer.example.com", + "client_id": "new-client-id", + "client_secret": "new-client-secret" + }`, + expectedStatus: http.StatusOK, + expectedBody: true, + }, + { + name: "Create Identity Provider with Invalid JSON", + requestBody: `{invalid json`, + expectedStatus: http.StatusBadRequest, + expectedBody: false, + }, + } + + h := initIDPTestData(nil) + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/identity-providers", bytes.NewBufferString(tc.requestBody)) + req.Header.Set("Content-Type", "application/json") + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + router := mux.NewRouter() + router.HandleFunc("/api/identity-providers", h.createIdentityProvider).Methods("POST") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + assert.Equal(t, tc.expectedStatus, recorder.Code) + + if tc.expectedBody { + content, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var idp api.IdentityProvider + err = json.Unmarshal(content, &idp) + require.NoError(t, err) + assert.Equal(t, newIDPID, *idp.Id) + assert.Equal(t, "New IDP", idp.Name) + assert.Equal(t, api.IdentityProviderTypeOidc, idp.Type) + } + }) + } +} + +func TestUpdateIdentityProvider(t *testing.T) { + existingIDP := &types.IdentityProvider{ + ID: existingIDPID, + Name: "Test IDP", + Type: types.IdentityProviderTypeOIDC, + Issuer: "https://issuer.example.com", + ClientID: "client-id", + ClientSecret: "client-secret", + } + + tt := []struct { + name string + idpID string + requestBody string + expectedStatus int + expectedBody bool + }{ + { + name: "Update Existing Identity Provider", + idpID: existingIDPID, + requestBody: `{ + "name": "Updated IDP", + "type": "oidc", + "issuer": "https://updated-issuer.example.com", + "client_id": "updated-client-id" + }`, + expectedStatus: http.StatusOK, + expectedBody: true, + }, + { + name: "Update Non-Existing Identity Provider", + idpID: "non-existing-id", + requestBody: `{ + "name": "Updated IDP", + "type": "oidc", + "issuer": "https://updated-issuer.example.com", + "client_id": "updated-client-id" + }`, + expectedStatus: http.StatusNotFound, + expectedBody: false, + }, + { + name: "Update Identity Provider with Invalid JSON", + idpID: existingIDPID, + requestBody: `{invalid json`, + expectedStatus: http.StatusBadRequest, + expectedBody: false, + }, + } + + h := initIDPTestData(existingIDP) + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/api/identity-providers/%s", tc.idpID), bytes.NewBufferString(tc.requestBody)) + req.Header.Set("Content-Type", "application/json") + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + router := mux.NewRouter() + router.HandleFunc("/api/identity-providers/{idpId}", h.updateIdentityProvider).Methods("PUT") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + assert.Equal(t, tc.expectedStatus, recorder.Code) + + if tc.expectedBody { + content, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var idp api.IdentityProvider + err = json.Unmarshal(content, &idp) + require.NoError(t, err) + assert.Equal(t, existingIDPID, *idp.Id) + assert.Equal(t, "Updated IDP", idp.Name) + } + }) + } +} + +func TestDeleteIdentityProvider(t *testing.T) { + existingIDP := &types.IdentityProvider{ + ID: existingIDPID, + Name: "Test IDP", + Type: types.IdentityProviderTypeOIDC, + Issuer: "https://issuer.example.com", + ClientID: "client-id", + } + + tt := []struct { + name string + idpID string + expectedStatus int + }{ + { + name: "Delete Existing Identity Provider", + idpID: existingIDPID, + expectedStatus: http.StatusOK, + }, + { + name: "Delete Non-Existing Identity Provider", + idpID: "non-existing-id", + expectedStatus: http.StatusNotFound, + }, + } + + h := initIDPTestData(existingIDP) + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/api/identity-providers/%s", tc.idpID), nil) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + router := mux.NewRouter() + router.HandleFunc("/api/identity-providers/{idpId}", h.deleteIdentityProvider).Methods("DELETE") + router.ServeHTTP(recorder, req) + + res := recorder.Result() + defer res.Body.Close() + + assert.Equal(t, tc.expectedStatus, recorder.Code) + }) + } +} + +func TestToAPIResponse(t *testing.T) { + idp := &types.IdentityProvider{ + ID: "test-id", + Name: "Test IDP", + Type: types.IdentityProviderTypeGoogle, + Issuer: "https://accounts.google.com", + ClientID: "client-id", + ClientSecret: "should-not-be-returned", + } + + response := toAPIResponse(idp) + + assert.Equal(t, "test-id", *response.Id) + assert.Equal(t, "Test IDP", response.Name) + assert.Equal(t, api.IdentityProviderTypeGoogle, response.Type) + assert.Equal(t, "https://accounts.google.com", response.Issuer) + assert.Equal(t, "client-id", response.ClientId) + // Note: ClientSecret is not included in response type by design +} + +func TestFromAPIRequest(t *testing.T) { + req := &api.IdentityProviderRequest{ + Name: "New IDP", + Type: api.IdentityProviderTypeOkta, + Issuer: "https://dev-123456.okta.com", + ClientId: "okta-client-id", + ClientSecret: "okta-client-secret", + } + + idp := fromAPIRequest(req) + + assert.Equal(t, "New IDP", idp.Name) + assert.Equal(t, types.IdentityProviderTypeOkta, idp.Type) + assert.Equal(t, "https://dev-123456.okta.com", idp.Issuer) + assert.Equal(t, "okta-client-id", idp.ClientID) + assert.Equal(t, "okta-client-secret", idp.ClientSecret) +} diff --git a/management/server/http/handlers/instance/instance_handler.go b/management/server/http/handlers/instance/instance_handler.go new file mode 100644 index 000000000..889c3133e --- /dev/null +++ b/management/server/http/handlers/instance/instance_handler.go @@ -0,0 +1,67 @@ +package instance + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" + + nbinstance "github.com/netbirdio/netbird/management/server/instance" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +// handler handles the instance setup HTTP endpoints +type handler struct { + instanceManager nbinstance.Manager +} + +// AddEndpoints registers the instance setup endpoints. +// These endpoints bypass authentication for initial setup. +func AddEndpoints(instanceManager nbinstance.Manager, router *mux.Router) { + h := &handler{ + instanceManager: instanceManager, + } + + router.HandleFunc("/instance", h.getInstanceStatus).Methods("GET", "OPTIONS") + router.HandleFunc("/setup", h.setup).Methods("POST", "OPTIONS") +} + +// getInstanceStatus returns the instance status including whether setup is required. +// This endpoint is unauthenticated. +func (h *handler) getInstanceStatus(w http.ResponseWriter, r *http.Request) { + setupRequired, err := h.instanceManager.IsSetupRequired(r.Context()) + if err != nil { + log.WithContext(r.Context()).Errorf("failed to check setup status: %v", err) + util.WriteErrorResponse("failed to check instance status", http.StatusInternalServerError, w) + return + } + + util.WriteJSONObject(r.Context(), w, api.InstanceStatus{ + SetupRequired: setupRequired, + }) +} + +// setup creates the initial admin user for the instance. +// This endpoint is unauthenticated but only works when setup is required. +func (h *handler) setup(w http.ResponseWriter, r *http.Request) { + var req api.SetupRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("invalid request body", http.StatusBadRequest, w) + return + } + + userData, err := h.instanceManager.CreateOwnerUser(r.Context(), req.Email, req.Password, req.Name) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + log.WithContext(r.Context()).Infof("instance setup completed: created user %s", req.Email) + + util.WriteJSONObject(r.Context(), w, api.SetupResponse{ + UserId: userData.ID, + Email: userData.Email, + }) +} diff --git a/management/server/http/handlers/instance/instance_handler_test.go b/management/server/http/handlers/instance/instance_handler_test.go new file mode 100644 index 000000000..7a3a2bc88 --- /dev/null +++ b/management/server/http/handlers/instance/instance_handler_test.go @@ -0,0 +1,281 @@ +package instance + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "net/mail" + "testing" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/idp" + nbinstance "github.com/netbirdio/netbird/management/server/instance" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/status" +) + +// mockInstanceManager implements instance.Manager for testing +type mockInstanceManager struct { + isSetupRequired bool + isSetupRequiredFn func(ctx context.Context) (bool, error) + createOwnerUserFn func(ctx context.Context, email, password, name string) (*idp.UserData, error) +} + +func (m *mockInstanceManager) IsSetupRequired(ctx context.Context) (bool, error) { + if m.isSetupRequiredFn != nil { + return m.isSetupRequiredFn(ctx) + } + return m.isSetupRequired, nil +} + +func (m *mockInstanceManager) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { + if m.createOwnerUserFn != nil { + return m.createOwnerUserFn(ctx, email, password, name) + } + + // Default mock includes validation like the real manager + if !m.isSetupRequired { + return nil, status.Errorf(status.PreconditionFailed, "setup already completed") + } + if email == "" { + return nil, status.Errorf(status.InvalidArgument, "email is required") + } + if _, err := mail.ParseAddress(email); err != nil { + return nil, status.Errorf(status.InvalidArgument, "invalid email format") + } + if name == "" { + return nil, status.Errorf(status.InvalidArgument, "name is required") + } + if password == "" { + return nil, status.Errorf(status.InvalidArgument, "password is required") + } + if len(password) < 8 { + return nil, status.Errorf(status.InvalidArgument, "password must be at least 8 characters") + } + + return &idp.UserData{ + ID: "test-user-id", + Email: email, + Name: name, + }, nil +} + +var _ nbinstance.Manager = (*mockInstanceManager)(nil) + +func setupTestRouter(manager nbinstance.Manager) *mux.Router { + router := mux.NewRouter() + AddEndpoints(manager, router) + return router +} + +func TestGetInstanceStatus_SetupRequired(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouter(manager) + + req := httptest.NewRequest(http.MethodGet, "/instance", nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var response api.InstanceStatus + err := json.NewDecoder(rec.Body).Decode(&response) + require.NoError(t, err) + assert.True(t, response.SetupRequired) +} + +func TestGetInstanceStatus_SetupNotRequired(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: false} + router := setupTestRouter(manager) + + req := httptest.NewRequest(http.MethodGet, "/instance", nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var response api.InstanceStatus + err := json.NewDecoder(rec.Body).Decode(&response) + require.NoError(t, err) + assert.False(t, response.SetupRequired) +} + +func TestGetInstanceStatus_Error(t *testing.T) { + manager := &mockInstanceManager{ + isSetupRequiredFn: func(ctx context.Context) (bool, error) { + return false, errors.New("database error") + }, + } + router := setupTestRouter(manager) + + req := httptest.NewRequest(http.MethodGet, "/instance", nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.Code) +} + +func TestSetup_Success(t *testing.T) { + manager := &mockInstanceManager{ + isSetupRequired: true, + createOwnerUserFn: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + assert.Equal(t, "admin@example.com", email) + assert.Equal(t, "securepassword123", password) + assert.Equal(t, "Admin User", name) + return &idp.UserData{ + ID: "created-user-id", + Email: email, + Name: name, + }, nil + }, + } + router := setupTestRouter(manager) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin User"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var response api.SetupResponse + err := json.NewDecoder(rec.Body).Decode(&response) + require.NoError(t, err) + assert.Equal(t, "created-user-id", response.UserId) + assert.Equal(t, "admin@example.com", response.Email) +} + +func TestSetup_AlreadyCompleted(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: false} + router := setupTestRouter(manager) + + body := `{"email": "admin@example.com", "password": "securepassword123"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusPreconditionFailed, rec.Code) +} + +func TestSetup_MissingEmail(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouter(manager) + + body := `{"password": "securepassword123"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnprocessableEntity, rec.Code) +} + +func TestSetup_InvalidEmail(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouter(manager) + + body := `{"email": "not-an-email", "password": "securepassword123", "name": "User"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + // Note: Invalid email format uses mail.ParseAddress which is treated differently + // and returns 400 Bad Request instead of 422 Unprocessable Entity + assert.Equal(t, http.StatusUnprocessableEntity, rec.Code) +} + +func TestSetup_MissingPassword(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouter(manager) + + body := `{"email": "admin@example.com", "name": "User"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnprocessableEntity, rec.Code) +} + +func TestSetup_PasswordTooShort(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouter(manager) + + body := `{"email": "admin@example.com", "password": "short", "name": "User"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnprocessableEntity, rec.Code) +} + +func TestSetup_InvalidJSON(t *testing.T) { + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouter(manager) + + body := `{invalid json}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestSetup_CreateUserError(t *testing.T) { + manager := &mockInstanceManager{ + isSetupRequired: true, + createOwnerUserFn: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + return nil, errors.New("user creation failed") + }, + } + router := setupTestRouter(manager) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "User"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.Code) +} + +func TestSetup_ManagerError(t *testing.T) { + manager := &mockInstanceManager{ + isSetupRequired: true, + createOwnerUserFn: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + return nil, status.Errorf(status.Internal, "database error") + }, + } + router := setupTestRouter(manager) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "User"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.Code) +} diff --git a/management/server/http/handlers/peers/peers_handler_test.go b/management/server/http/handlers/peers/peers_handler_test.go index 55e779ff0..869a39b5e 100644 --- a/management/server/http/handlers/peers/peers_handler_test.go +++ b/management/server/http/handlers/peers/peers_handler_test.go @@ -66,7 +66,7 @@ func initTestMetaData(t *testing.T, peers ...*nbpeer.Peer) *Handler { }, } - srvUser := types.NewRegularUser(serviceUser) + srvUser := types.NewRegularUser(serviceUser, "", "") srvUser.IsServiceUser = true account := &types.Account{ @@ -75,7 +75,7 @@ func initTestMetaData(t *testing.T, peers ...*nbpeer.Peer) *Handler { Peers: peersMap, Users: map[string]*types.User{ adminUser: types.NewAdminUser(adminUser), - regularUser: types.NewRegularUser(regularUser), + regularUser: types.NewRegularUser(regularUser, "", ""), serviceUser: srvUser, }, Groups: map[string]*types.Group{ diff --git a/management/server/http/handlers/users/users_handler.go b/management/server/http/handlers/users/users_handler.go index 4e03e5e9b..7669d7404 100644 --- a/management/server/http/handlers/users/users_handler.go +++ b/management/server/http/handlers/users/users_handler.go @@ -326,6 +326,16 @@ func toUserResponse(user *types.UserInfo, currenUserID string) *api.User { isCurrent := user.ID == currenUserID + var password *string + if user.Password != "" { + password = &user.Password + } + + var idpID *string + if user.IdPID != "" { + idpID = &user.IdPID + } + return &api.User{ Id: user.ID, Name: user.Name, @@ -339,6 +349,8 @@ func toUserResponse(user *types.UserInfo, currenUserID string) *api.User { LastLogin: &user.LastLogin, Issued: &user.Issued, PendingApproval: user.PendingApproval, + Password: password, + IdpId: idpID, } } diff --git a/management/server/http/middleware/auth_middleware.go b/management/server/http/middleware/auth_middleware.go index 38cf0c290..966a6802a 100644 --- a/management/server/http/middleware/auth_middleware.go +++ b/management/server/http/middleware/auth_middleware.go @@ -134,6 +134,9 @@ func (m *AuthMiddleware) checkJWTFromRequest(r *http.Request, authHeaderParts [] userAuth.IsChild = ok } + // Email is now extracted in ToUserAuth (from claims or userinfo endpoint) + // Available as userAuth.Email + // we need to call this method because if user is new, we will automatically add it to existing or create a new account accountId, _, err := m.ensureAccount(ctx, userAuth) if err != nil { diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index e8513feb5..656f72997 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -94,7 +94,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee groupsManagerMock := groups.NewManagerMock() peersManager := peers.NewManager(store, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, networkMapController) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, networkMapController, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } diff --git a/management/server/identity_provider.go b/management/server/identity_provider.go new file mode 100644 index 000000000..6649c3953 --- /dev/null +++ b/management/server/identity_provider.go @@ -0,0 +1,234 @@ +package server + +import ( + "context" + "errors" + + "github.com/dexidp/dex/storage" + "github.com/rs/xid" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" +) + +// GetIdentityProviders returns all identity providers for an account +func (am *DefaultAccountManager) GetIdentityProviders(ctx context.Context, accountID, userID string) ([]*types.IdentityProvider, error) { + ok, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.IdentityProviders, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + embeddedManager, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + log.Warn("identity provider management requires embedded IdP") + return []*types.IdentityProvider{}, nil + } + + connectors, err := embeddedManager.ListConnectors(ctx) + if err != nil { + return nil, status.Errorf(status.Internal, "failed to list identity providers: %v", err) + } + + result := make([]*types.IdentityProvider, 0, len(connectors)) + for _, conn := range connectors { + result = append(result, connectorConfigToIdentityProvider(conn, accountID)) + } + + return result, nil +} + +// GetIdentityProvider returns a specific identity provider by ID +func (am *DefaultAccountManager) GetIdentityProvider(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) { + ok, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.IdentityProviders, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + embeddedManager, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return nil, status.Errorf(status.Internal, "identity provider management requires embedded IdP") + } + + conn, err := embeddedManager.GetConnector(ctx, idpID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Errorf(status.NotFound, "identity provider not found") + } + return nil, status.Errorf(status.Internal, "failed to get identity provider: %v", err) + } + + return connectorConfigToIdentityProvider(conn, accountID), nil +} + +// CreateIdentityProvider creates a new identity provider +func (am *DefaultAccountManager) CreateIdentityProvider(ctx context.Context, accountID, userID string, idpConfig *types.IdentityProvider) (*types.IdentityProvider, error) { + ok, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.IdentityProviders, operations.Create) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + if err := idpConfig.Validate(); err != nil { + return nil, status.Errorf(status.InvalidArgument, "%s", err.Error()) + } + + embeddedManager, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return nil, status.Errorf(status.Internal, "identity provider management requires embedded IdP") + } + + // Generate ID if not provided + if idpConfig.ID == "" { + idpConfig.ID = generateIdentityProviderID(idpConfig.Type) + } + idpConfig.AccountID = accountID + + connCfg := identityProviderToConnectorConfig(idpConfig) + + _, err = embeddedManager.CreateConnector(ctx, connCfg) + if err != nil { + return nil, status.Errorf(status.Internal, "failed to create identity provider: %v", err) + } + + am.StoreEvent(ctx, userID, idpConfig.ID, accountID, activity.IdentityProviderCreated, idpConfig.EventMeta()) + + return idpConfig, nil +} + +// UpdateIdentityProvider updates an existing identity provider +func (am *DefaultAccountManager) UpdateIdentityProvider(ctx context.Context, accountID, idpID, userID string, idpConfig *types.IdentityProvider) (*types.IdentityProvider, error) { + ok, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.IdentityProviders, operations.Update) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + if err := idpConfig.Validate(); err != nil { + return nil, status.Errorf(status.InvalidArgument, "%s", err.Error()) + } + + embeddedManager, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return nil, status.Errorf(status.Internal, "identity provider management requires embedded IdP") + } + + idpConfig.ID = idpID + idpConfig.AccountID = accountID + + connCfg := identityProviderToConnectorConfig(idpConfig) + + if err := embeddedManager.UpdateConnector(ctx, connCfg); err != nil { + return nil, status.Errorf(status.Internal, "failed to update identity provider: %v", err) + } + + am.StoreEvent(ctx, userID, idpConfig.ID, accountID, activity.IdentityProviderUpdated, idpConfig.EventMeta()) + + return idpConfig, nil +} + +// DeleteIdentityProvider deletes an identity provider +func (am *DefaultAccountManager) DeleteIdentityProvider(ctx context.Context, accountID, idpID, userID string) error { + ok, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.IdentityProviders, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !ok { + return status.NewPermissionDeniedError() + } + + embeddedManager, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return status.Errorf(status.Internal, "identity provider management requires embedded IdP") + } + + // Get the IDP info before deleting for the activity event + conn, err := embeddedManager.GetConnector(ctx, idpID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return status.Errorf(status.NotFound, "identity provider not found") + } + return status.Errorf(status.Internal, "failed to get identity provider: %v", err) + } + idpConfig := connectorConfigToIdentityProvider(conn, accountID) + + if err := embeddedManager.DeleteConnector(ctx, idpID); err != nil { + if errors.Is(err, storage.ErrNotFound) { + return status.Errorf(status.NotFound, "identity provider not found") + } + return status.Errorf(status.Internal, "failed to delete identity provider: %v", err) + } + + am.StoreEvent(ctx, userID, idpID, accountID, activity.IdentityProviderDeleted, idpConfig.EventMeta()) + + return nil +} + +// connectorConfigToIdentityProvider converts a dex.ConnectorConfig to types.IdentityProvider +func connectorConfigToIdentityProvider(conn *dex.ConnectorConfig, accountID string) *types.IdentityProvider { + return &types.IdentityProvider{ + ID: conn.ID, + AccountID: accountID, + Type: types.IdentityProviderType(conn.Type), + Name: conn.Name, + Issuer: conn.Issuer, + ClientID: conn.ClientID, + ClientSecret: conn.ClientSecret, + } +} + +// identityProviderToConnectorConfig converts a types.IdentityProvider to dex.ConnectorConfig +func identityProviderToConnectorConfig(idpConfig *types.IdentityProvider) *dex.ConnectorConfig { + return &dex.ConnectorConfig{ + ID: idpConfig.ID, + Name: idpConfig.Name, + Type: string(idpConfig.Type), + Issuer: idpConfig.Issuer, + ClientID: idpConfig.ClientID, + ClientSecret: idpConfig.ClientSecret, + } +} + +// generateIdentityProviderID generates a unique ID for an identity provider. +// For specific provider types (okta, zitadel, entra, google, pocketid, microsoft), +// the ID is prefixed with the type name. Generic OIDC providers get no prefix. +func generateIdentityProviderID(idpType types.IdentityProviderType) string { + id := xid.New().String() + + switch idpType { + case types.IdentityProviderTypeOkta: + return "okta-" + id + case types.IdentityProviderTypeZitadel: + return "zitadel-" + id + case types.IdentityProviderTypeEntra: + return "entra-" + id + case types.IdentityProviderTypeGoogle: + return "google-" + id + case types.IdentityProviderTypePocketID: + return "pocketid-" + id + case types.IdentityProviderTypeMicrosoft: + return "microsoft-" + id + case types.IdentityProviderTypeAuthentik: + return "authentik-" + id + case types.IdentityProviderTypeKeycloak: + return "keycloak-" + id + default: + // Generic OIDC - no prefix + return id + } +} diff --git a/management/server/identity_provider_test.go b/management/server/identity_provider_test.go new file mode 100644 index 000000000..d637c4a8f --- /dev/null +++ b/management/server/identity_provider_test.go @@ -0,0 +1,202 @@ +package server + +import ( + "context" + "path/filepath" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" + "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" + "github.com/netbirdio/netbird/management/internals/modules/peers" + ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" + "github.com/netbirdio/netbird/management/internals/server/config" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/settings" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/telemetry" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/auth" +) + +func createManagerWithEmbeddedIdP(t testing.TB) (*DefaultAccountManager, *update_channel.PeersUpdateManager, error) { + t.Helper() + + ctx := context.Background() + + dataDir := t.TempDir() + testStore, cleanUp, err := store.NewTestStoreFromSQL(ctx, "", dataDir) + if err != nil { + return nil, nil, err + } + t.Cleanup(cleanUp) + + // Create embedded IdP manager + embeddedConfig := &idp.EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: idp.EmbeddedStorageConfig{ + Type: "sqlite3", + Config: idp.EmbeddedStorageTypeConfig{ + File: filepath.Join(dataDir, "dex.db"), + }, + }, + } + + idpManager, err := idp.NewEmbeddedIdPManager(ctx, embeddedConfig, nil) + if err != nil { + return nil, nil, err + } + t.Cleanup(func() { _ = idpManager.Stop(ctx) }) + + eventStore := &activity.InMemoryEventStore{} + + metrics, err := telemetry.NewDefaultAppMetrics(ctx) + if err != nil { + return nil, nil, err + } + + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + settingsMockManager := settings.NewMockManager(ctrl) + settingsMockManager.EXPECT(). + GetExtraSettings(gomock.Any(), gomock.Any()). + Return(&types.ExtraSettings{}, nil). + AnyTimes() + settingsMockManager.EXPECT(). + UpdateExtraSettings(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(false, nil). + AnyTimes() + + permissionsManager := permissions.NewManager(testStore) + + updateManager := update_channel.NewPeersUpdateManager(metrics) + requestBuffer := NewAccountRequestBuffer(ctx, testStore) + networkMapController := controller.NewController(ctx, testStore, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(testStore, peers.NewManager(testStore, permissionsManager)), &config.Config{}) + manager, err := BuildManager(ctx, &config.Config{}, testStore, networkMapController, idpManager, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + if err != nil { + return nil, nil, err + } + + return manager, updateManager, nil +} + +func TestDefaultAccountManager_CreateIdentityProvider_Validation(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err) + + userID := "testingUser" + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err) + + testCases := []struct { + name string + idp *types.IdentityProvider + expectError bool + errorMsg string + }{ + { + name: "Missing Name", + idp: &types.IdentityProvider{ + Type: types.IdentityProviderTypeOIDC, + Issuer: "https://issuer.example.com", + ClientID: "client-id", + }, + expectError: true, + errorMsg: "name is required", + }, + { + name: "Missing Type", + idp: &types.IdentityProvider{ + Name: "Test IDP", + Issuer: "https://issuer.example.com", + ClientID: "client-id", + }, + expectError: true, + errorMsg: "type is required", + }, + { + name: "Missing Issuer", + idp: &types.IdentityProvider{ + Name: "Test IDP", + Type: types.IdentityProviderTypeOIDC, + ClientID: "client-id", + }, + expectError: true, + errorMsg: "issuer is required", + }, + { + name: "Missing ClientID", + idp: &types.IdentityProvider{ + Name: "Test IDP", + Type: types.IdentityProviderTypeOIDC, + Issuer: "https://issuer.example.com", + }, + expectError: true, + errorMsg: "client ID is required", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := manager.CreateIdentityProvider(context.Background(), account.Id, userID, tc.idp) + if tc.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorMsg) + } + }) + } +} + +func TestDefaultAccountManager_GetIdentityProviders(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err) + + userID := "testingUser" + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err) + + // Should return empty list (stub implementation) + providers, err := manager.GetIdentityProviders(context.Background(), account.Id, userID) + require.NoError(t, err) + assert.Empty(t, providers) +} + +func TestDefaultAccountManager_GetIdentityProvider_NotFound(t *testing.T) { + manager, _, err := createManagerWithEmbeddedIdP(t) + require.NoError(t, err) + + userID := "testingUser" + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err) + + // Should return not found error when identity provider doesn't exist + _, err = manager.GetIdentityProvider(context.Background(), account.Id, "any-id", userID) + require.Error(t, err) + assert.Contains(t, err.Error(), "not found") +} + +func TestDefaultAccountManager_UpdateIdentityProvider_Validation(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err) + + userID := "testingUser" + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err) + + // Should fail validation before reaching "not implemented" error + invalidIDP := &types.IdentityProvider{ + Name: "", // Empty name should fail validation + } + + _, err = manager.UpdateIdentityProvider(context.Background(), account.Id, "some-id", userID, invalidIDP) + require.Error(t, err) + assert.Contains(t, err.Error(), "name is required") +} diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go new file mode 100644 index 000000000..963b5ae3d --- /dev/null +++ b/management/server/idp/embedded.go @@ -0,0 +1,511 @@ +package idp + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/dexidp/dex/storage" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/telemetry" +) + +const ( + staticClientDashboard = "netbird-dashboard" + staticClientCLI = "netbird-cli" + defaultCLIRedirectURL1 = "http://localhost:53000/" + defaultCLIRedirectURL2 = "http://localhost:54000/" + defaultScopes = "openid profile email offline_access" + defaultUserIDClaim = "sub" +) + +// EmbeddedIdPConfig contains configuration for the embedded Dex OIDC identity provider +type EmbeddedIdPConfig struct { + // Enabled indicates whether the embedded IDP is enabled + Enabled bool + // Issuer is the OIDC issuer URL (e.g., "http://localhost:3002/oauth2") + Issuer string + // Storage configuration for the IdP database + Storage EmbeddedStorageConfig + // DashboardRedirectURIs are the OAuth2 redirect URIs for the dashboard client + DashboardRedirectURIs []string + // DashboardRedirectURIs are the OAuth2 redirect URIs for the dashboard client + CLIRedirectURIs []string + // Owner is the initial owner/admin user (optional, can be nil) + Owner *OwnerConfig + // SignKeyRefreshEnabled enables automatic key rotation for signing keys + SignKeyRefreshEnabled bool +} + +// EmbeddedStorageConfig holds storage configuration for the embedded IdP. +type EmbeddedStorageConfig struct { + // Type is the storage type (currently only "sqlite3" is supported) + Type string + // Config contains type-specific configuration + Config EmbeddedStorageTypeConfig +} + +// EmbeddedStorageTypeConfig contains type-specific storage configuration. +type EmbeddedStorageTypeConfig struct { + // File is the path to the SQLite database file (for sqlite3 type) + File string +} + +// OwnerConfig represents the initial owner/admin user for the embedded IdP. +type OwnerConfig struct { + // Email is the user's email address (required) + Email string + // Hash is the bcrypt hash of the user's password (required) + Hash string + // Username is the display name for the user (optional, defaults to email) + Username string +} + +// ToYAMLConfig converts EmbeddedIdPConfig to dex.YAMLConfig. +func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { + if c.Issuer == "" { + return nil, fmt.Errorf("issuer is required") + } + if c.Storage.Type == "" { + c.Storage.Type = "sqlite3" + } + if c.Storage.Type == "sqlite3" && c.Storage.Config.File == "" { + return nil, fmt.Errorf("storage file is required for sqlite3") + } + + // Build CLI redirect URIs including the device callback (both relative and absolute) + cliRedirectURIs := c.CLIRedirectURIs + cliRedirectURIs = append(cliRedirectURIs, "/device/callback") + cliRedirectURIs = append(cliRedirectURIs, c.Issuer+"/device/callback") + + cfg := &dex.YAMLConfig{ + Issuer: c.Issuer, + Storage: dex.Storage{ + Type: c.Storage.Type, + Config: map[string]interface{}{ + "file": c.Storage.Config.File, + }, + }, + Web: dex.Web{ + AllowedOrigins: []string{"*"}, + AllowedHeaders: []string{"Authorization", "Content-Type"}, + }, + OAuth2: dex.OAuth2{ + SkipApprovalScreen: true, + }, + Frontend: dex.Frontend{ + Issuer: "NetBird", + Theme: "light", + }, + EnablePasswordDB: true, + StaticClients: []storage.Client{ + { + ID: staticClientDashboard, + Name: "NetBird Dashboard", + Public: true, + RedirectURIs: c.DashboardRedirectURIs, + }, + { + ID: staticClientCLI, + Name: "NetBird CLI", + Public: true, + RedirectURIs: cliRedirectURIs, + }, + }, + } + + // Add owner user if provided + if c.Owner != nil && c.Owner.Email != "" && c.Owner.Hash != "" { + username := c.Owner.Username + if username == "" { + username = c.Owner.Email + } + cfg.StaticPasswords = []dex.Password{ + { + Email: c.Owner.Email, + Hash: []byte(c.Owner.Hash), + Username: username, + UserID: uuid.New().String(), + }, + } + } + + return cfg, nil +} + +// Compile-time check that EmbeddedIdPManager implements Manager interface +var _ Manager = (*EmbeddedIdPManager)(nil) + +// Compile-time check that EmbeddedIdPManager implements OAuthConfigProvider interface +var _ OAuthConfigProvider = (*EmbeddedIdPManager)(nil) + +// OAuthConfigProvider defines the interface for OAuth configuration needed by auth flows. +type OAuthConfigProvider interface { + GetIssuer() string + GetKeysLocation() string + GetClientIDs() []string + GetUserIDClaim() string + GetTokenEndpoint() string + GetDeviceAuthEndpoint() string + GetAuthorizationEndpoint() string + GetDefaultScopes() string + GetCLIClientID() string + GetCLIRedirectURLs() []string +} + +// EmbeddedIdPManager implements the Manager interface using the embedded Dex IdP. +type EmbeddedIdPManager struct { + provider *dex.Provider + appMetrics telemetry.AppMetrics + config EmbeddedIdPConfig +} + +// NewEmbeddedIdPManager creates a new instance of EmbeddedIdPManager from a configuration. +// It instantiates the underlying Dex provider internally. +// Note: Storage defaults are applied in config loading (applyEmbeddedIdPConfig) based on Datadir. +func NewEmbeddedIdPManager(ctx context.Context, config *EmbeddedIdPConfig, appMetrics telemetry.AppMetrics) (*EmbeddedIdPManager, error) { + if config == nil { + return nil, fmt.Errorf("embedded IdP config is required") + } + + // Apply defaults for CLI redirect URIs + if len(config.CLIRedirectURIs) == 0 { + config.CLIRedirectURIs = []string{defaultCLIRedirectURL1, defaultCLIRedirectURL2} + } + + // there are some properties create when creating YAML config (e.g., auth clients) + yamlConfig, err := config.ToYAMLConfig() + if err != nil { + return nil, err + } + + provider, err := dex.NewProviderFromYAML(ctx, yamlConfig) + if err != nil { + return nil, fmt.Errorf("failed to create embedded IdP provider: %w", err) + } + + log.WithContext(ctx).Infof("embedded Dex IDP initialized with issuer: %s", yamlConfig.Issuer) + + return &EmbeddedIdPManager{ + provider: provider, + appMetrics: appMetrics, + config: *config, + }, nil +} + +// Handler returns the HTTP handler for serving OIDC requests. +func (m *EmbeddedIdPManager) Handler() http.Handler { + return m.provider.Handler() +} + +// Stop gracefully shuts down the embedded IdP provider. +func (m *EmbeddedIdPManager) Stop(ctx context.Context) error { + return m.provider.Stop(ctx) +} + +// UpdateUserAppMetadata updates user app metadata based on userID and metadata map. +func (m *EmbeddedIdPManager) UpdateUserAppMetadata(ctx context.Context, userID string, appMetadata AppMetadata) error { + // TODO: implement + return nil +} + +// GetUserDataByID requests user data from the embedded IdP via user ID. +func (m *EmbeddedIdPManager) GetUserDataByID(ctx context.Context, userID string, appMetadata AppMetadata) (*UserData, error) { + user, err := m.provider.GetUserByID(ctx, userID) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to get user by ID: %w", err) + } + + return &UserData{ + Email: user.Email, + Name: user.Username, + ID: user.UserID, + AppMetadata: appMetadata, + }, nil +} + +// GetAccount returns all the users for a given account. +// Note: Embedded dex doesn't store account metadata, so this returns all users. +func (m *EmbeddedIdPManager) GetAccount(ctx context.Context, accountID string) ([]*UserData, error) { + users, err := m.provider.ListUsers(ctx) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to list users: %w", err) + } + + result := make([]*UserData, 0, len(users)) + for _, user := range users { + result = append(result, &UserData{ + Email: user.Email, + Name: user.Username, + ID: user.UserID, + AppMetadata: AppMetadata{ + WTAccountID: accountID, + }, + }) + } + + return result, nil +} + +// GetAllAccounts gets all registered accounts with corresponding user data. +// Note: Embedded dex doesn't store account metadata, so all users are indexed under UnsetAccountID. +func (m *EmbeddedIdPManager) GetAllAccounts(ctx context.Context) (map[string][]*UserData, error) { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountGetAllAccounts() + } + + users, err := m.provider.ListUsers(ctx) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to list users: %w", err) + } + + indexedUsers := make(map[string][]*UserData) + for _, user := range users { + indexedUsers[UnsetAccountID] = append(indexedUsers[UnsetAccountID], &UserData{ + Email: user.Email, + Name: user.Username, + ID: user.UserID, + }) + } + + return indexedUsers, nil +} + +// CreateUser creates a new user in the embedded IdP. +func (m *EmbeddedIdPManager) CreateUser(ctx context.Context, email, name, accountID, invitedByEmail string) (*UserData, error) { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountCreateUser() + } + + // Check if user already exists + _, err := m.provider.GetUser(ctx, email) + if err == nil { + return nil, fmt.Errorf("user with email %s already exists", email) + } + if !errors.Is(err, storage.ErrNotFound) { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to check existing user: %w", err) + } + + // Generate a random password for the new user + password := GeneratePassword(16, 2, 2, 2) + + // Create the user via provider (handles hashing and ID generation) + // The provider returns an encoded user ID in Dex's format (base64 protobuf with connector ID) + userID, err := m.provider.CreateUser(ctx, email, name, password) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to create user in embedded IdP: %w", err) + } + + log.WithContext(ctx).Debugf("created user %s in embedded IdP", email) + + return &UserData{ + Email: email, + Name: name, + ID: userID, + Password: password, + AppMetadata: AppMetadata{ + WTAccountID: accountID, + WTInvitedBy: invitedByEmail, + }, + }, nil +} + +// GetUserByEmail searches users with a given email. +func (m *EmbeddedIdPManager) GetUserByEmail(ctx context.Context, email string) ([]*UserData, error) { + user, err := m.provider.GetUser(ctx, email) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, nil // Return empty slice for not found + } + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to get user by email: %w", err) + } + + return []*UserData{ + { + Email: user.Email, + Name: user.Username, + ID: user.UserID, + }, + }, nil +} + +// CreateUserWithPassword creates a new user in the embedded IdP with a provided password. +// Unlike CreateUser which auto-generates a password, this method uses the provided password. +// This is useful for instance setup where the user provides their own password. +func (m *EmbeddedIdPManager) CreateUserWithPassword(ctx context.Context, email, password, name string) (*UserData, error) { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountCreateUser() + } + + // Check if user already exists + _, err := m.provider.GetUser(ctx, email) + if err == nil { + return nil, fmt.Errorf("user with email %s already exists", email) + } + if !errors.Is(err, storage.ErrNotFound) { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to check existing user: %w", err) + } + + // Create the user via provider with the provided password + userID, err := m.provider.CreateUser(ctx, email, name, password) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return nil, fmt.Errorf("failed to create user in embedded IdP: %w", err) + } + + log.WithContext(ctx).Debugf("created user %s in embedded IdP with provided password", email) + + return &UserData{ + Email: email, + Name: name, + ID: userID, + }, nil +} + +// InviteUserByID resends an invitation to a user. +func (m *EmbeddedIdPManager) InviteUserByID(ctx context.Context, userID string) error { + // TODO: implement + return fmt.Errorf("not implemented") +} + +// DeleteUser deletes a user from the embedded IdP by user ID. +func (m *EmbeddedIdPManager) DeleteUser(ctx context.Context, userID string) error { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountDeleteUser() + } + + // Get user by ID to retrieve email (provider.DeleteUser requires email) + user, err := m.provider.GetUserByID(ctx, userID) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return fmt.Errorf("failed to get user for deletion: %w", err) + } + + err = m.provider.DeleteUser(ctx, user.Email) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return fmt.Errorf("failed to delete user from embedded IdP: %w", err) + } + + log.WithContext(ctx).Debugf("deleted user %s from embedded IdP", user.Email) + + return nil +} + +// CreateConnector creates a new identity provider connector in Dex. +// Returns the created connector config with the redirect URL populated. +func (m *EmbeddedIdPManager) CreateConnector(ctx context.Context, cfg *dex.ConnectorConfig) (*dex.ConnectorConfig, error) { + return m.provider.CreateConnector(ctx, cfg) +} + +// GetConnector retrieves an identity provider connector by ID. +func (m *EmbeddedIdPManager) GetConnector(ctx context.Context, id string) (*dex.ConnectorConfig, error) { + return m.provider.GetConnector(ctx, id) +} + +// ListConnectors returns all identity provider connectors. +func (m *EmbeddedIdPManager) ListConnectors(ctx context.Context) ([]*dex.ConnectorConfig, error) { + return m.provider.ListConnectors(ctx) +} + +// UpdateConnector updates an existing identity provider connector. +func (m *EmbeddedIdPManager) UpdateConnector(ctx context.Context, cfg *dex.ConnectorConfig) error { + // Preserve existing secret if not provided in update + if cfg.ClientSecret == "" { + existing, err := m.provider.GetConnector(ctx, cfg.ID) + if err != nil { + return fmt.Errorf("failed to get existing connector: %w", err) + } + cfg.ClientSecret = existing.ClientSecret + } + return m.provider.UpdateConnector(ctx, cfg) +} + +// DeleteConnector removes an identity provider connector. +func (m *EmbeddedIdPManager) DeleteConnector(ctx context.Context, id string) error { + return m.provider.DeleteConnector(ctx, id) +} + +// GetIssuer returns the OIDC issuer URL. +func (m *EmbeddedIdPManager) GetIssuer() string { + return m.provider.GetIssuer() +} + +// GetTokenEndpoint returns the OAuth2 token endpoint URL. +func (m *EmbeddedIdPManager) GetTokenEndpoint() string { + return m.provider.GetTokenEndpoint() +} + +// GetDeviceAuthEndpoint returns the OAuth2 device authorization endpoint URL. +func (m *EmbeddedIdPManager) GetDeviceAuthEndpoint() string { + return m.provider.GetDeviceAuthEndpoint() +} + +// GetAuthorizationEndpoint returns the OAuth2 authorization endpoint URL. +func (m *EmbeddedIdPManager) GetAuthorizationEndpoint() string { + return m.provider.GetAuthorizationEndpoint() +} + +// GetDefaultScopes returns the default OAuth2 scopes for authentication. +func (m *EmbeddedIdPManager) GetDefaultScopes() string { + return defaultScopes +} + +// GetCLIClientID returns the client ID for CLI authentication. +func (m *EmbeddedIdPManager) GetCLIClientID() string { + return staticClientCLI +} + +// GetCLIRedirectURLs returns the redirect URLs configured for the CLI client. +func (m *EmbeddedIdPManager) GetCLIRedirectURLs() []string { + if len(m.config.CLIRedirectURIs) == 0 { + return []string{defaultCLIRedirectURL1, defaultCLIRedirectURL2} + } + return m.config.CLIRedirectURIs +} + +// GetKeysLocation returns the JWKS endpoint URL for token validation. +func (m *EmbeddedIdPManager) GetKeysLocation() string { + return m.provider.GetKeysLocation() +} + +// GetClientIDs returns the OAuth2 client IDs configured for this provider. +func (m *EmbeddedIdPManager) GetClientIDs() []string { + return []string{staticClientDashboard, staticClientCLI} +} + +// GetUserIDClaim returns the JWT claim name used for user identification. +func (m *EmbeddedIdPManager) GetUserIDClaim() string { + return defaultUserIDClaim +} diff --git a/management/server/idp/embedded_test.go b/management/server/idp/embedded_test.go new file mode 100644 index 000000000..cfd9c2b54 --- /dev/null +++ b/management/server/idp/embedded_test.go @@ -0,0 +1,249 @@ +package idp + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/idp/dex" +) + +func TestEmbeddedIdPManager_CreateUser_EndToEnd(t *testing.T) { + ctx := context.Background() + + // Create a temporary directory for the test + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + // Create the embedded IDP config + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + // Create the embedded IDP manager + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Test data + email := "newuser@example.com" + name := "New User" + accountID := "test-account-id" + invitedByEmail := "admin@example.com" + + // Create the user + userData, err := manager.CreateUser(ctx, email, name, accountID, invitedByEmail) + require.NoError(t, err) + require.NotNil(t, userData) + + t.Logf("Created user: ID=%s, Email=%s, Name=%s, Password=%s", + userData.ID, userData.Email, userData.Name, userData.Password) + + // Verify user data + assert.Equal(t, email, userData.Email) + assert.Equal(t, name, userData.Name) + assert.NotEmpty(t, userData.ID) + assert.NotEmpty(t, userData.Password) + assert.Equal(t, accountID, userData.AppMetadata.WTAccountID) + assert.Equal(t, invitedByEmail, userData.AppMetadata.WTInvitedBy) + + // Verify the user ID is in Dex's encoded format (base64 protobuf) + rawUserID, connectorID, err := dex.DecodeDexUserID(userData.ID) + require.NoError(t, err) + assert.NotEmpty(t, rawUserID) + assert.Equal(t, "local", connectorID) + + t.Logf("Decoded user ID: rawUserID=%s, connectorID=%s", rawUserID, connectorID) + + // Verify we can look up the user by the encoded ID + lookedUpUser, err := manager.GetUserDataByID(ctx, userData.ID, AppMetadata{WTAccountID: accountID}) + require.NoError(t, err) + assert.Equal(t, email, lookedUpUser.Email) + + // Verify we can look up by email + users, err := manager.GetUserByEmail(ctx, email) + require.NoError(t, err) + require.Len(t, users, 1) + assert.Equal(t, email, users[0].Email) + + // Verify creating duplicate user fails + _, err = manager.CreateUser(ctx, email, name, accountID, invitedByEmail) + assert.Error(t, err) + assert.Contains(t, err.Error(), "already exists") +} + +func TestEmbeddedIdPManager_GetUserDataByID_WithEncodedID(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Create a user first + userData, err := manager.CreateUser(ctx, "test@example.com", "Test User", "account1", "admin@example.com") + require.NoError(t, err) + + // The returned ID should be encoded + encodedID := userData.ID + + // Lookup should work with the encoded ID + lookedUp, err := manager.GetUserDataByID(ctx, encodedID, AppMetadata{WTAccountID: "account1"}) + require.NoError(t, err) + assert.Equal(t, "test@example.com", lookedUp.Email) + assert.Equal(t, "Test User", lookedUp.Name) +} + +func TestEmbeddedIdPManager_DeleteUser(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Create a user + userData, err := manager.CreateUser(ctx, "delete-me@example.com", "Delete Me", "account1", "admin@example.com") + require.NoError(t, err) + + // Delete the user using the encoded ID + err = manager.DeleteUser(ctx, userData.ID) + require.NoError(t, err) + + // Verify user no longer exists + _, err = manager.GetUserDataByID(ctx, userData.ID, AppMetadata{}) + assert.Error(t, err) +} + +func TestEmbeddedIdPManager_GetAccount(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Create multiple users + _, err = manager.CreateUser(ctx, "user1@example.com", "User 1", "account1", "admin@example.com") + require.NoError(t, err) + + _, err = manager.CreateUser(ctx, "user2@example.com", "User 2", "account1", "admin@example.com") + require.NoError(t, err) + + // Get all users for the account + users, err := manager.GetAccount(ctx, "account1") + require.NoError(t, err) + assert.Len(t, users, 2) + + emails := make([]string, len(users)) + for i, u := range users { + emails[i] = u.Email + } + assert.Contains(t, emails, "user1@example.com") + assert.Contains(t, emails, "user2@example.com") +} + +func TestEmbeddedIdPManager_UserIDFormat_MatchesJWT(t *testing.T) { + // This test verifies that the user ID returned by CreateUser + // matches the format that Dex uses in JWT tokens (the 'sub' claim) + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Create a user + userData, err := manager.CreateUser(ctx, "jwt-test@example.com", "JWT Test", "account1", "admin@example.com") + require.NoError(t, err) + + // The ID should be in the format: base64(protobuf{user_id, connector_id}) + // Example: CiQ3YWFkOGMwNS0zMjg3LTQ3M2YtYjQyYS0zNjU1MDRiZjI1ZTcSBWxvY2Fs + + // Verify it can be decoded + rawUserID, connectorID, err := dex.DecodeDexUserID(userData.ID) + require.NoError(t, err) + + // Raw user ID should be a UUID + assert.Regexp(t, `^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`, rawUserID) + + // Connector ID should be "local" for password-based auth + assert.Equal(t, "local", connectorID) + + // Re-encoding should produce the same result + reEncoded := dex.EncodeDexUserID(rawUserID, connectorID) + assert.Equal(t, userData.ID, reEncoded) + + t.Logf("User ID format verified:") + t.Logf(" Encoded ID: %s", userData.ID) + t.Logf(" Raw UUID: %s", rawUserID) + t.Logf(" Connector: %s", connectorID) +} diff --git a/management/server/idp/idp.go b/management/server/idp/idp.go index 4aad674d3..28e3d81f9 100644 --- a/management/server/idp/idp.go +++ b/management/server/idp/idp.go @@ -72,6 +72,7 @@ type UserData struct { Name string `json:"name"` ID string `json:"user_id"` AppMetadata AppMetadata `json:"app_metadata"` + Password string `json:"-"` // Plain password, only set on user creation, excluded from JSON } func (u *UserData) MarshalBinary() (data []byte, err error) { diff --git a/management/server/instance/manager.go b/management/server/instance/manager.go new file mode 100644 index 000000000..6f50e3ff7 --- /dev/null +++ b/management/server/instance/manager.go @@ -0,0 +1,136 @@ +package instance + +import ( + "context" + "errors" + "fmt" + "net/mail" + "sync" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/status" +) + +// Manager handles instance-level operations like initial setup. +type Manager interface { + // IsSetupRequired checks if instance setup is required. + // Returns true if embedded IDP is enabled and no accounts exist. + IsSetupRequired(ctx context.Context) (bool, error) + + // CreateOwnerUser creates the initial owner user in the embedded IDP. + // This should only be called when IsSetupRequired returns true. + CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) +} + +// DefaultManager is the default implementation of Manager. +type DefaultManager struct { + store store.Store + embeddedIdpManager *idp.EmbeddedIdPManager + + setupRequired bool + setupMu sync.RWMutex +} + +// NewManager creates a new instance manager. +// If idpManager is not an EmbeddedIdPManager, setup-related operations will return appropriate defaults. +func NewManager(ctx context.Context, store store.Store, idpManager idp.Manager) (Manager, error) { + embeddedIdp, _ := idpManager.(*idp.EmbeddedIdPManager) + + m := &DefaultManager{ + store: store, + embeddedIdpManager: embeddedIdp, + setupRequired: false, + } + + if embeddedIdp != nil { + err := m.loadSetupRequired(ctx) + if err != nil { + return nil, err + } + } + + return m, nil +} + +func (m *DefaultManager) loadSetupRequired(ctx context.Context) error { + users, err := m.embeddedIdpManager.GetAllAccounts(ctx) + if err != nil { + return err + } + + m.setupMu.Lock() + m.setupRequired = len(users) == 0 + m.setupMu.Unlock() + + return nil +} + +// IsSetupRequired checks if instance setup is required. +// Setup is required when: +// 1. Embedded IDP is enabled +// 2. No accounts exist in the store +func (m *DefaultManager) IsSetupRequired(_ context.Context) (bool, error) { + if m.embeddedIdpManager == nil { + return false, nil + } + + m.setupMu.RLock() + defer m.setupMu.RUnlock() + + return m.setupRequired, nil +} + +// CreateOwnerUser creates the initial owner user in the embedded IDP. +func (m *DefaultManager) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { + + if err := m.validateSetupInfo(email, password, name); err != nil { + return nil, err + } + + if m.embeddedIdpManager == nil { + return nil, errors.New("embedded IDP is not enabled") + } + + m.setupMu.RLock() + setupRequired := m.setupRequired + m.setupMu.RUnlock() + + if !setupRequired { + return nil, status.Errorf(status.PreconditionFailed, "setup already completed") + } + + userData, err := m.embeddedIdpManager.CreateUserWithPassword(ctx, email, password, name) + if err != nil { + return nil, fmt.Errorf("failed to create user in embedded IdP: %w", err) + } + + m.setupMu.Lock() + m.setupRequired = false + m.setupMu.Unlock() + + log.WithContext(ctx).Infof("created owner user %s in embedded IdP", email) + + return userData, nil +} + +func (m *DefaultManager) validateSetupInfo(email, password, name string) error { + if email == "" { + return status.Errorf(status.InvalidArgument, "email is required") + } + if _, err := mail.ParseAddress(email); err != nil { + return status.Errorf(status.InvalidArgument, "invalid email format") + } + if name == "" { + return status.Errorf(status.InvalidArgument, "name is required") + } + if password == "" { + return status.Errorf(status.InvalidArgument, "password is required") + } + if len(password) < 8 { + return status.Errorf(status.InvalidArgument, "password must be at least 8 characters") + } + return nil +} diff --git a/management/server/instance/manager_test.go b/management/server/instance/manager_test.go new file mode 100644 index 000000000..35d0ff53c --- /dev/null +++ b/management/server/instance/manager_test.go @@ -0,0 +1,268 @@ +package instance + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/idp" +) + +// mockStore implements a minimal store.Store for testing +type mockStore struct { + accountsCount int64 + err error +} + +func (m *mockStore) GetAccountsCounter(ctx context.Context) (int64, error) { + if m.err != nil { + return 0, m.err + } + return m.accountsCount, nil +} + +// mockEmbeddedIdPManager wraps the real EmbeddedIdPManager for testing +type mockEmbeddedIdPManager struct { + createUserFunc func(ctx context.Context, email, password, name string) (*idp.UserData, error) +} + +func (m *mockEmbeddedIdPManager) CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) { + if m.createUserFunc != nil { + return m.createUserFunc(ctx, email, password, name) + } + return &idp.UserData{ + ID: "test-user-id", + Email: email, + Name: name, + }, nil +} + +// testManager is a test implementation that accepts our mock types +type testManager struct { + store *mockStore + embeddedIdpManager *mockEmbeddedIdPManager +} + +func (m *testManager) IsSetupRequired(ctx context.Context) (bool, error) { + if m.embeddedIdpManager == nil { + return false, nil + } + + count, err := m.store.GetAccountsCounter(ctx) + if err != nil { + return false, err + } + + return count == 0, nil +} + +func (m *testManager) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { + if m.embeddedIdpManager == nil { + return nil, errors.New("embedded IDP is not enabled") + } + + return m.embeddedIdpManager.CreateUserWithPassword(ctx, email, password, name) +} + +func TestIsSetupRequired_EmbeddedIdPDisabled(t *testing.T) { + manager := &testManager{ + store: &mockStore{accountsCount: 0}, + embeddedIdpManager: nil, // No embedded IDP + } + + required, err := manager.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required, "setup should not be required when embedded IDP is disabled") +} + +func TestIsSetupRequired_NoAccounts(t *testing.T) { + manager := &testManager{ + store: &mockStore{accountsCount: 0}, + embeddedIdpManager: &mockEmbeddedIdPManager{}, + } + + required, err := manager.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.True(t, required, "setup should be required when no accounts exist") +} + +func TestIsSetupRequired_AccountsExist(t *testing.T) { + manager := &testManager{ + store: &mockStore{accountsCount: 1}, + embeddedIdpManager: &mockEmbeddedIdPManager{}, + } + + required, err := manager.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required, "setup should not be required when accounts exist") +} + +func TestIsSetupRequired_MultipleAccounts(t *testing.T) { + manager := &testManager{ + store: &mockStore{accountsCount: 5}, + embeddedIdpManager: &mockEmbeddedIdPManager{}, + } + + required, err := manager.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required, "setup should not be required when multiple accounts exist") +} + +func TestIsSetupRequired_StoreError(t *testing.T) { + manager := &testManager{ + store: &mockStore{err: errors.New("database error")}, + embeddedIdpManager: &mockEmbeddedIdPManager{}, + } + + _, err := manager.IsSetupRequired(context.Background()) + assert.Error(t, err, "should return error when store fails") +} + +func TestCreateOwnerUser_Success(t *testing.T) { + expectedEmail := "admin@example.com" + expectedName := "Admin User" + expectedPassword := "securepassword123" + + manager := &testManager{ + store: &mockStore{accountsCount: 0}, + embeddedIdpManager: &mockEmbeddedIdPManager{ + createUserFunc: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + assert.Equal(t, expectedEmail, email) + assert.Equal(t, expectedPassword, password) + assert.Equal(t, expectedName, name) + return &idp.UserData{ + ID: "created-user-id", + Email: email, + Name: name, + }, nil + }, + }, + } + + userData, err := manager.CreateOwnerUser(context.Background(), expectedEmail, expectedPassword, expectedName) + require.NoError(t, err) + assert.Equal(t, "created-user-id", userData.ID) + assert.Equal(t, expectedEmail, userData.Email) + assert.Equal(t, expectedName, userData.Name) +} + +func TestCreateOwnerUser_EmbeddedIdPDisabled(t *testing.T) { + manager := &testManager{ + store: &mockStore{accountsCount: 0}, + embeddedIdpManager: nil, + } + + _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + assert.Error(t, err, "should return error when embedded IDP is disabled") + assert.Contains(t, err.Error(), "embedded IDP is not enabled") +} + +func TestCreateOwnerUser_IdPError(t *testing.T) { + manager := &testManager{ + store: &mockStore{accountsCount: 0}, + embeddedIdpManager: &mockEmbeddedIdPManager{ + createUserFunc: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + return nil, errors.New("user already exists") + }, + }, + } + + _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + assert.Error(t, err, "should return error when IDP fails") +} + +func TestDefaultManager_ValidateSetupRequest(t *testing.T) { + manager := &DefaultManager{ + setupRequired: true, + } + + tests := []struct { + name string + email string + password string + userName string + expectError bool + errorMsg string + }{ + { + name: "valid request", + email: "admin@example.com", + password: "password123", + userName: "Admin User", + expectError: false, + }, + { + name: "empty email", + email: "", + password: "password123", + userName: "Admin User", + expectError: true, + errorMsg: "email is required", + }, + { + name: "invalid email format", + email: "not-an-email", + password: "password123", + userName: "Admin User", + expectError: true, + errorMsg: "invalid email format", + }, + { + name: "empty name", + email: "admin@example.com", + password: "password123", + userName: "", + expectError: true, + errorMsg: "name is required", + }, + { + name: "empty password", + email: "admin@example.com", + password: "", + userName: "Admin User", + expectError: true, + errorMsg: "password is required", + }, + { + name: "password too short", + email: "admin@example.com", + password: "short", + userName: "Admin User", + expectError: true, + errorMsg: "password must be at least 8 characters", + }, + { + name: "password exactly 8 characters", + email: "admin@example.com", + password: "12345678", + userName: "Admin User", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := manager.validateSetupInfo(tt.email, tt.password, tt.userName) + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestDefaultManager_CreateOwnerUser_SetupAlreadyCompleted(t *testing.T) { + manager := &DefaultManager{ + setupRequired: false, + embeddedIdpManager: &idp.EmbeddedIdPManager{}, + } + + _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") +} diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index 42f192c0a..cc302400f 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -381,7 +381,7 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config return nil, nil, "", cleanup, err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, nil, "", cleanup, err } diff --git a/management/server/management_test.go b/management/server/management_test.go index 648201d4e..ace372509 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -242,6 +242,7 @@ func startServer( nil, server.MockIntegratedValidator{}, networkMapController, + nil, ) if err != nil { t.Fatalf("failed creating management server: %v", err) diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 0d7d2bc3d..422829eba 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -27,13 +27,13 @@ import ( var _ account.Manager = (*MockAccountManager)(nil) type MockAccountManager struct { - GetOrCreateAccountByUserFunc func(ctx context.Context, userId, domain string) (*types.Account, error) + GetOrCreateAccountByUserFunc func(ctx context.Context, userAuth auth.UserAuth) (*types.Account, error) GetAccountFunc func(ctx context.Context, accountID string) (*types.Account, error) CreateSetupKeyFunc func(ctx context.Context, accountId string, keyName string, keyType types.SetupKeyType, expiresIn time.Duration, autoGroups []string, usageLimit int, userID string, ephemeral bool, allowExtraDNSLabels bool) (*types.SetupKey, error) GetSetupKeyFunc func(ctx context.Context, accountID, userID, keyID string) (*types.SetupKey, error) AccountExistsFunc func(ctx context.Context, accountID string) (bool, error) - GetAccountIDByUserIdFunc func(ctx context.Context, userId, domain string) (string, error) + GetAccountIDByUserIdFunc func(ctx context.Context, userAuth auth.UserAuth) (string, error) GetUserFromUserAuthFunc func(ctx context.Context, userAuth auth.UserAuth) (*types.User, error) ListUsersFunc func(ctx context.Context, accountID string) ([]*types.User, error) GetPeersFunc func(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) @@ -129,6 +129,12 @@ type MockAccountManager struct { UpdateAccountPeersFunc func(ctx context.Context, accountID string) BufferUpdateAccountPeersFunc func(ctx context.Context, accountID string) RecalculateNetworkMapCacheFunc func(ctx context.Context, accountId string) error + + GetIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) + GetIdentityProvidersFunc func(ctx context.Context, accountID, userID string) ([]*types.IdentityProvider, error) + CreateIdentityProviderFunc func(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) + UpdateIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) + DeleteIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string) error } func (am *MockAccountManager) CreateGroup(ctx context.Context, accountID, userID string, group *types.Group) error { @@ -237,10 +243,10 @@ func (am *MockAccountManager) DeletePeer(ctx context.Context, accountID, peerID, // GetOrCreateAccountByUser mock implementation of GetOrCreateAccountByUser from server.AccountManager interface func (am *MockAccountManager) GetOrCreateAccountByUser( - ctx context.Context, userId, domain string, + ctx context.Context, userAuth auth.UserAuth, ) (*types.Account, error) { if am.GetOrCreateAccountByUserFunc != nil { - return am.GetOrCreateAccountByUserFunc(ctx, userId, domain) + return am.GetOrCreateAccountByUserFunc(ctx, userAuth) } return nil, status.Errorf( codes.Unimplemented, @@ -276,9 +282,9 @@ func (am *MockAccountManager) AccountExists(ctx context.Context, accountID strin } // GetAccountIDByUserID mock implementation of GetAccountIDByUserID from server.AccountManager interface -func (am *MockAccountManager) GetAccountIDByUserID(ctx context.Context, userId, domain string) (string, error) { +func (am *MockAccountManager) GetAccountIDByUserID(ctx context.Context, userAuth auth.UserAuth) (string, error) { if am.GetAccountIDByUserIdFunc != nil { - return am.GetAccountIDByUserIdFunc(ctx, userId, domain) + return am.GetAccountIDByUserIdFunc(ctx, userAuth) } return "", status.Errorf( codes.Unimplemented, @@ -993,3 +999,43 @@ func (am *MockAccountManager) RecalculateNetworkMapCache(ctx context.Context, ac func (am *MockAccountManager) GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error) { return "something", nil } + +// GetIdentityProvider mocks GetIdentityProvider of the AccountManager interface +func (am *MockAccountManager) GetIdentityProvider(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) { + if am.GetIdentityProviderFunc != nil { + return am.GetIdentityProviderFunc(ctx, accountID, idpID, userID) + } + return nil, status.Errorf(codes.Unimplemented, "method GetIdentityProvider is not implemented") +} + +// GetIdentityProviders mocks GetIdentityProviders of the AccountManager interface +func (am *MockAccountManager) GetIdentityProviders(ctx context.Context, accountID, userID string) ([]*types.IdentityProvider, error) { + if am.GetIdentityProvidersFunc != nil { + return am.GetIdentityProvidersFunc(ctx, accountID, userID) + } + return nil, status.Errorf(codes.Unimplemented, "method GetIdentityProviders is not implemented") +} + +// CreateIdentityProvider mocks CreateIdentityProvider of the AccountManager interface +func (am *MockAccountManager) CreateIdentityProvider(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) { + if am.CreateIdentityProviderFunc != nil { + return am.CreateIdentityProviderFunc(ctx, accountID, userID, idp) + } + return nil, status.Errorf(codes.Unimplemented, "method CreateIdentityProvider is not implemented") +} + +// UpdateIdentityProvider mocks UpdateIdentityProvider of the AccountManager interface +func (am *MockAccountManager) UpdateIdentityProvider(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) { + if am.UpdateIdentityProviderFunc != nil { + return am.UpdateIdentityProviderFunc(ctx, accountID, idpID, userID, idp) + } + return nil, status.Errorf(codes.Unimplemented, "method UpdateIdentityProvider is not implemented") +} + +// DeleteIdentityProvider mocks DeleteIdentityProvider of the AccountManager interface +func (am *MockAccountManager) DeleteIdentityProvider(ctx context.Context, accountID, idpID, userID string) error { + if am.DeleteIdentityProviderFunc != nil { + return am.DeleteIdentityProviderFunc(ctx, accountID, idpID, userID) + } + return status.Errorf(codes.Unimplemented, "method DeleteIdentityProvider is not implemented") +} diff --git a/management/server/nameserver_test.go b/management/server/nameserver_test.go index e3dd8b0b8..955c6b0ef 100644 --- a/management/server/nameserver_test.go +++ b/management/server/nameserver_test.go @@ -865,7 +865,7 @@ func initTestNSAccount(t *testing.T, am *DefaultAccountManager) (*types.Account, userID := testUserID domain := "example.com" - account := newAccountWithId(context.Background(), accountID, userID, domain, false) + account := newAccountWithId(context.Background(), accountID, userID, domain, "", "", false) account.NameServerGroups[existingNSGroup.ID] = &existingNSGroup diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 752563299..ce04adf9e 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -502,7 +502,7 @@ func TestDefaultAccountManager_GetPeer(t *testing.T) { accountID := "test_account" adminUser := "account_creator" someUser := "some_user" - account := newAccountWithId(context.Background(), accountID, adminUser, "", false) + account := newAccountWithId(context.Background(), accountID, adminUser, "", "", "", false) account.Users[someUser] = &types.User{ Id: someUser, Role: types.UserRoleUser, @@ -689,7 +689,7 @@ func TestDefaultAccountManager_GetPeers(t *testing.T) { accountID := "test_account" adminUser := "account_creator" someUser := "some_user" - account := newAccountWithId(context.Background(), accountID, adminUser, "", false) + account := newAccountWithId(context.Background(), accountID, adminUser, "", "", "", false) account.Users[someUser] = &types.User{ Id: someUser, Role: testCase.role, @@ -759,7 +759,7 @@ func setupTestAccountManager(b testing.TB, peers int, groups int) (*DefaultAccou adminUser := "account_creator" regularUser := "regular_user" - account := newAccountWithId(context.Background(), accountID, adminUser, "", false) + account := newAccountWithId(context.Background(), accountID, adminUser, "", "", "", false) account.Users[regularUser] = &types.User{ Id: regularUser, Role: types.UserRoleUser, @@ -2124,7 +2124,7 @@ func Test_DeletePeer(t *testing.T) { // account with an admin and a regular user accountID := "test_account" adminUser := "account_creator" - account := newAccountWithId(context.Background(), accountID, adminUser, "", false) + account := newAccountWithId(context.Background(), accountID, adminUser, "", "", "", false) account.Peers = map[string]*nbpeer.Peer{ "peer1": { ID: "peer1", @@ -2307,12 +2307,12 @@ func TestAddPeer_UserPendingApprovalBlocked(t *testing.T) { } // Create account - account := newAccountWithId(context.Background(), "test-account", "owner", "", false) + account := newAccountWithId(context.Background(), "test-account", "owner", "", "", "", false) err = manager.Store.SaveAccount(context.Background(), account) require.NoError(t, err) // Create user pending approval - pendingUser := types.NewRegularUser("pending-user") + pendingUser := types.NewRegularUser("pending-user", "", "") pendingUser.AccountID = account.Id pendingUser.Blocked = true pendingUser.PendingApproval = true @@ -2344,12 +2344,12 @@ func TestAddPeer_ApprovedUserCanAddPeers(t *testing.T) { } // Create account - account := newAccountWithId(context.Background(), "test-account", "owner", "", false) + account := newAccountWithId(context.Background(), "test-account", "owner", "", "", "", false) err = manager.Store.SaveAccount(context.Background(), account) require.NoError(t, err) // Create regular user (not pending approval) - regularUser := types.NewRegularUser("regular-user") + regularUser := types.NewRegularUser("regular-user", "", "") regularUser.AccountID = account.Id err = manager.Store.SaveUser(context.Background(), regularUser) require.NoError(t, err) @@ -2378,12 +2378,12 @@ func TestLoginPeer_UserPendingApprovalBlocked(t *testing.T) { } // Create account - account := newAccountWithId(context.Background(), "test-account", "owner", "", false) + account := newAccountWithId(context.Background(), "test-account", "owner", "", "", "", false) err = manager.Store.SaveAccount(context.Background(), account) require.NoError(t, err) // Create user pending approval - pendingUser := types.NewRegularUser("pending-user") + pendingUser := types.NewRegularUser("pending-user", "", "") pendingUser.AccountID = account.Id pendingUser.Blocked = true pendingUser.PendingApproval = true @@ -2443,12 +2443,12 @@ func TestLoginPeer_ApprovedUserCanLogin(t *testing.T) { } // Create account - account := newAccountWithId(context.Background(), "test-account", "owner", "", false) + account := newAccountWithId(context.Background(), "test-account", "owner", "", "", "", false) err = manager.Store.SaveAccount(context.Background(), account) require.NoError(t, err) // Create regular user (not pending approval) - regularUser := types.NewRegularUser("regular-user") + regularUser := types.NewRegularUser("regular-user", "", "") regularUser.AccountID = account.Id err = manager.Store.SaveUser(context.Background(), regularUser) require.NoError(t, err) diff --git a/management/server/permissions/modules/module.go b/management/server/permissions/modules/module.go index 3d021a235..0ae10d521 100644 --- a/management/server/permissions/modules/module.go +++ b/management/server/permissions/modules/module.go @@ -3,33 +3,35 @@ package modules type Module string const ( - Networks Module = "networks" - Peers Module = "peers" - Groups Module = "groups" - Settings Module = "settings" - Accounts Module = "accounts" - Dns Module = "dns" - Nameservers Module = "nameservers" - Events Module = "events" - Policies Module = "policies" - Routes Module = "routes" - Users Module = "users" - SetupKeys Module = "setup_keys" - Pats Module = "pats" + Networks Module = "networks" + Peers Module = "peers" + Groups Module = "groups" + Settings Module = "settings" + Accounts Module = "accounts" + Dns Module = "dns" + Nameservers Module = "nameservers" + Events Module = "events" + Policies Module = "policies" + Routes Module = "routes" + Users Module = "users" + SetupKeys Module = "setup_keys" + Pats Module = "pats" + IdentityProviders Module = "identity_providers" ) var All = map[Module]struct{}{ - Networks: {}, - Peers: {}, - Groups: {}, - Settings: {}, - Accounts: {}, - Dns: {}, - Nameservers: {}, - Events: {}, - Policies: {}, - Routes: {}, - Users: {}, - SetupKeys: {}, - Pats: {}, + Networks: {}, + Peers: {}, + Groups: {}, + Settings: {}, + Accounts: {}, + Dns: {}, + Nameservers: {}, + Events: {}, + Policies: {}, + Routes: {}, + Users: {}, + SetupKeys: {}, + Pats: {}, + IdentityProviders: {}, } diff --git a/management/server/permissions/roles/network_admin.go b/management/server/permissions/roles/network_admin.go index e95d58381..8f69d46ad 100644 --- a/management/server/permissions/roles/network_admin.go +++ b/management/server/permissions/roles/network_admin.go @@ -93,5 +93,11 @@ var NetworkAdmin = RolePermissions{ operations.Update: false, operations.Delete: false, }, + modules.IdentityProviders: { + operations.Read: true, + operations.Create: false, + operations.Update: false, + operations.Delete: false, + }, }, } diff --git a/management/server/posture_checks_test.go b/management/server/posture_checks_test.go index 13152ed12..7f0a48dc7 100644 --- a/management/server/posture_checks_test.go +++ b/management/server/posture_checks_test.go @@ -109,7 +109,7 @@ func initTestPostureChecksAccount(am *DefaultAccountManager) (*types.Account, er ID: "peer1", } - account := newAccountWithId(context.Background(), accountID, groupAdminUserID, domain, false) + account := newAccountWithId(context.Background(), accountID, groupAdminUserID, domain, "", "", false) account.Users[admin.Id] = admin account.Users[user.Id] = user account.Peers["peer1"] = peer1 diff --git a/management/server/route_test.go b/management/server/route_test.go index a413d545b..6dc8c4cf4 100644 --- a/management/server/route_test.go +++ b/management/server/route_test.go @@ -1320,7 +1320,7 @@ func initTestRouteAccount(t *testing.T, am *DefaultAccountManager) (*types.Accou accountID := "testingAcc" domain := "example.com" - account := newAccountWithId(context.Background(), accountID, userID, domain, false) + account := newAccountWithId(context.Background(), accountID, userID, domain, "", "", false) err := am.Store.SaveAccount(context.Background(), account) if err != nil { return nil, err diff --git a/management/server/setupkey_test.go b/management/server/setupkey_test.go index bc361bbd7..6eca27efd 100644 --- a/management/server/setupkey_test.go +++ b/management/server/setupkey_test.go @@ -15,6 +15,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/auth" ) func TestDefaultAccountManager_SaveSetupKey(t *testing.T) { @@ -24,7 +25,7 @@ func TestDefaultAccountManager_SaveSetupKey(t *testing.T) { } userID := "testingUser" - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) if err != nil { t.Fatal(err) } @@ -99,7 +100,7 @@ func TestDefaultAccountManager_CreateSetupKey(t *testing.T) { } userID := "testingUser" - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) if err != nil { t.Fatal(err) } @@ -204,7 +205,7 @@ func TestGetSetupKeys(t *testing.T) { } userID := "testingUser" - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) if err != nil { t.Fatal(err) } @@ -471,7 +472,7 @@ func TestDefaultAccountManager_CreateSetupKey_ShouldNotAllowToUpdateRevokedKey(t } userID := "testingUser" - account, err := manager.GetOrCreateAccountByUser(context.Background(), userID, "") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: userID}) if err != nil { t.Fatal(err) } diff --git a/management/server/store/file_store.go b/management/server/store/file_store.go index d5d9337ca..8db37ec30 100644 --- a/management/server/store/file_store.go +++ b/management/server/store/file_store.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/management/server/types" nbutil "github.com/netbirdio/netbird/management/server/util" "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/util/crypt" ) // storeFileName Store file name. Stored in the datadir @@ -263,3 +264,8 @@ func (s *FileStore) Close(ctx context.Context) error { func (s *FileStore) GetStoreEngine() types.Engine { return types.FileStoreEngine } + +// SetFieldEncrypt is a no-op for FileStore as it doesn't support field encryption. +func (s *FileStore) SetFieldEncrypt(_ *crypt.FieldEncrypt) { + // no-op: FileStore stores data in plaintext JSON; encryption is not supported +} diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 08d19f0d3..3a9f8d188 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -37,6 +37,7 @@ import ( "github.com/netbirdio/netbird/management/server/util" "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/status" + "github.com/netbirdio/netbird/util/crypt" ) const ( @@ -57,13 +58,13 @@ const ( // SqlStore represents an account storage backed by a Sql DB persisted to disk type SqlStore struct { - db *gorm.DB - globalAccountLock sync.Mutex - metrics telemetry.AppMetrics - installationPK int - storeEngine types.Engine - pool *pgxpool.Pool - + db *gorm.DB + globalAccountLock sync.Mutex + metrics telemetry.AppMetrics + installationPK int + storeEngine types.Engine + pool *pgxpool.Pool + fieldEncrypt *crypt.FieldEncrypt transactionTimeout time.Duration } @@ -175,6 +176,13 @@ func (s *SqlStore) SaveAccount(ctx context.Context, account *types.Account) erro generateAccountSQLTypes(account) + // Encrypt sensitive user data before saving + for i := range account.UsersG { + if err := account.UsersG[i].EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt user: %w", err) + } + } + for _, group := range account.GroupsG { group.StoreGroupPeers() } @@ -440,7 +448,18 @@ func (s *SqlStore) SaveUsers(ctx context.Context, users []*types.User) error { return nil } - result := s.db.Clauses(clause.OnConflict{UpdateAll: true}).Create(&users) + usersCopy := make([]*types.User, len(users)) + for i, user := range users { + userCopy := user.Copy() + userCopy.Email = user.Email + userCopy.Name = user.Name + if err := userCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt user: %w", err) + } + usersCopy[i] = userCopy + } + + result := s.db.Clauses(clause.OnConflict{UpdateAll: true}).Create(&usersCopy) if result.Error != nil { log.WithContext(ctx).Errorf("failed to save users to store: %s", result.Error) return status.Errorf(status.Internal, "failed to save users to store") @@ -450,7 +469,15 @@ func (s *SqlStore) SaveUsers(ctx context.Context, users []*types.User) error { // SaveUser saves the given user to the database. func (s *SqlStore) SaveUser(ctx context.Context, user *types.User) error { - result := s.db.Save(user) + userCopy := user.Copy() + userCopy.Email = user.Email + userCopy.Name = user.Name + + if err := userCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt user: %w", err) + } + + result := s.db.Save(userCopy) if result.Error != nil { log.WithContext(ctx).Errorf("failed to save user to store: %s", result.Error) return status.Errorf(status.Internal, "failed to save user to store") @@ -600,6 +627,10 @@ func (s *SqlStore) GetUserByPATID(ctx context.Context, lockStrength LockingStren return nil, status.NewGetUserFromStoreError() } + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } + return &user, nil } @@ -618,6 +649,10 @@ func (s *SqlStore) GetUserByUserID(ctx context.Context, lockStrength LockingStre return nil, status.NewGetUserFromStoreError() } + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } + return &user, nil } @@ -654,6 +689,12 @@ func (s *SqlStore) GetAccountUsers(ctx context.Context, lockStrength LockingStre return nil, status.Errorf(status.Internal, "issue getting users from store") } + for _, user := range users { + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } + } + return users, nil } @@ -672,6 +713,10 @@ func (s *SqlStore) GetAccountOwner(ctx context.Context, lockStrength LockingStre return nil, status.Errorf(status.Internal, "failed to get account owner from the store") } + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } + return &user, nil } @@ -866,6 +911,9 @@ func (s *SqlStore) getAccountGorm(ctx context.Context, accountID string) (*types if user.AutoGroups == nil { user.AutoGroups = []string{} } + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } account.Users[user.Id] = &user user.PATsG = nil } @@ -1141,6 +1189,9 @@ func (s *SqlStore) getAccountPgx(ctx context.Context, accountID string) (*types. account.Users = make(map[string]*types.User, len(account.UsersG)) for i := range account.UsersG { user := &account.UsersG[i] + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } user.PATs = make(map[string]*types.PersonalAccessToken) if userPats, ok := patsByUserID[user.Id]; ok { for j := range userPats { @@ -1545,7 +1596,7 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee } func (s *SqlStore) getUsers(ctx context.Context, accountID string) ([]types.User, error) { - const query = `SELECT id, account_id, role, is_service_user, non_deletable, service_user_name, auto_groups, blocked, pending_approval, last_login, created_at, issued, integration_ref_id, integration_ref_integration_type FROM users WHERE account_id = $1` + const query = `SELECT id, account_id, role, is_service_user, non_deletable, service_user_name, auto_groups, blocked, pending_approval, last_login, created_at, issued, integration_ref_id, integration_ref_integration_type, email, name FROM users WHERE account_id = $1` rows, err := s.pool.Query(ctx, query, accountID) if err != nil { return nil, err @@ -1555,7 +1606,7 @@ func (s *SqlStore) getUsers(ctx context.Context, accountID string) ([]types.User var autoGroups []byte var lastLogin, createdAt sql.NullTime var isServiceUser, nonDeletable, blocked, pendingApproval sql.NullBool - err := row.Scan(&u.Id, &u.AccountID, &u.Role, &isServiceUser, &nonDeletable, &u.ServiceUserName, &autoGroups, &blocked, &pendingApproval, &lastLogin, &createdAt, &u.Issued, &u.IntegrationReference.ID, &u.IntegrationReference.IntegrationType) + err := row.Scan(&u.Id, &u.AccountID, &u.Role, &isServiceUser, &nonDeletable, &u.ServiceUserName, &autoGroups, &blocked, &pendingApproval, &lastLogin, &createdAt, &u.Issued, &u.IntegrationReference.ID, &u.IntegrationReference.IntegrationType, &u.Email, &u.Name) if err == nil { if lastLogin.Valid { u.LastLogin = &lastLogin.Time @@ -3012,6 +3063,11 @@ func (s *SqlStore) GetDB() *gorm.DB { return s.db } +// SetFieldEncrypt sets the field encryptor for encrypting sensitive user data. +func (s *SqlStore) SetFieldEncrypt(enc *crypt.FieldEncrypt) { + s.fieldEncrypt = enc +} + func (s *SqlStore) GetAccountDNSSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types.DNSSettings, error) { tx := s.db if lockStrength != LockingStrengthNone { diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 714927a5a..97aa81b12 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -32,6 +32,7 @@ import ( nbroute "github.com/netbirdio/netbird/route" route2 "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/status" + "github.com/netbirdio/netbird/util/crypt" ) func runTestForAllEngines(t *testing.T, testDataFile string, f func(t *testing.T, store Store)) { @@ -2090,7 +2091,7 @@ func newAccountWithId(ctx context.Context, accountID, userID, domain string) *ty setupKeys := map[string]*types.SetupKey{} nameServersGroups := make(map[string]*nbdns.NameServerGroup) - owner := types.NewOwnerUser(userID) + owner := types.NewOwnerUser(userID, "", "") owner.AccountID = accountID users[userID] = owner @@ -3114,6 +3115,138 @@ func TestSqlStore_SaveUsers(t *testing.T) { require.Equal(t, users[1].AutoGroups, user.AutoGroups) } +func TestSqlStore_SaveUserWithEncryption(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + // Enable encryption + key, err := crypt.GenerateKey() + require.NoError(t, err) + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + store.SetFieldEncrypt(fieldEncrypt) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + // rawUser is used to read raw (potentially encrypted) data from the database + // without any gorm hooks or automatic decryption + type rawUser struct { + Id string + Email string + Name string + } + + t.Run("save user with empty email and name", func(t *testing.T) { + user := &types.User{ + Id: "user-empty-fields", + AccountID: accountID, + Role: types.UserRoleUser, + Email: "", + Name: "", + AutoGroups: []string{"groupA"}, + } + err = store.SaveUser(context.Background(), user) + require.NoError(t, err) + + // Verify using direct database query that empty strings remain empty (not encrypted) + var raw rawUser + err = store.(*SqlStore).db.Table("users").Select("id, email, name").Where("id = ?", user.Id).First(&raw).Error + require.NoError(t, err) + require.Equal(t, "", raw.Email, "empty email should remain empty in database") + require.Equal(t, "", raw.Name, "empty name should remain empty in database") + + // Verify manual decryption returns empty strings + decryptedEmail, err := fieldEncrypt.Decrypt(raw.Email) + require.NoError(t, err) + require.Equal(t, "", decryptedEmail) + + decryptedName, err := fieldEncrypt.Decrypt(raw.Name) + require.NoError(t, err) + require.Equal(t, "", decryptedName) + }) + + t.Run("save user with email and name", func(t *testing.T) { + user := &types.User{ + Id: "user-with-fields", + AccountID: accountID, + Role: types.UserRoleAdmin, + Email: "test@example.com", + Name: "Test User", + AutoGroups: []string{"groupB"}, + } + err = store.SaveUser(context.Background(), user) + require.NoError(t, err) + + // Verify using direct database query that the data is encrypted (not plaintext) + var raw rawUser + err = store.(*SqlStore).db.Table("users").Select("id, email, name").Where("id = ?", user.Id).First(&raw).Error + require.NoError(t, err) + require.NotEqual(t, "test@example.com", raw.Email, "email should be encrypted in database") + require.NotEqual(t, "Test User", raw.Name, "name should be encrypted in database") + + // Verify manual decryption returns correct values + decryptedEmail, err := fieldEncrypt.Decrypt(raw.Email) + require.NoError(t, err) + require.Equal(t, "test@example.com", decryptedEmail) + + decryptedName, err := fieldEncrypt.Decrypt(raw.Name) + require.NoError(t, err) + require.Equal(t, "Test User", decryptedName) + }) + + t.Run("save multiple users with mixed fields", func(t *testing.T) { + users := []*types.User{ + { + Id: "batch-user-1", + AccountID: accountID, + Email: "", + Name: "", + }, + { + Id: "batch-user-2", + AccountID: accountID, + Email: "batch@example.com", + Name: "Batch User", + }, + } + err = store.SaveUsers(context.Background(), users) + require.NoError(t, err) + + // Verify first user (empty fields) using direct database query + var raw1 rawUser + err = store.(*SqlStore).db.Table("users").Select("id, email, name").Where("id = ?", "batch-user-1").First(&raw1).Error + require.NoError(t, err) + require.Equal(t, "", raw1.Email, "empty email should remain empty in database") + require.Equal(t, "", raw1.Name, "empty name should remain empty in database") + + // Verify second user (with fields) using direct database query + var raw2 rawUser + err = store.(*SqlStore).db.Table("users").Select("id, email, name").Where("id = ?", "batch-user-2").First(&raw2).Error + require.NoError(t, err) + require.NotEqual(t, "batch@example.com", raw2.Email, "email should be encrypted in database") + require.NotEqual(t, "Batch User", raw2.Name, "name should be encrypted in database") + + // Verify manual decryption returns empty strings for first user + decryptedEmail1, err := fieldEncrypt.Decrypt(raw1.Email) + require.NoError(t, err) + require.Equal(t, "", decryptedEmail1) + + decryptedName1, err := fieldEncrypt.Decrypt(raw1.Name) + require.NoError(t, err) + require.Equal(t, "", decryptedName1) + + // Verify manual decryption returns correct values for second user + decryptedEmail2, err := fieldEncrypt.Decrypt(raw2.Email) + require.NoError(t, err) + require.Equal(t, "batch@example.com", decryptedEmail2) + + decryptedName2, err := fieldEncrypt.Decrypt(raw2.Name) + require.NoError(t, err) + require.Equal(t, "Batch User", decryptedName2) + }) +} + func TestSqlStore_DeleteUser(t *testing.T) { store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) t.Cleanup(cleanup) diff --git a/management/server/store/store.go b/management/server/store/store.go index dbe135406..013a66d73 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -27,6 +27,7 @@ import ( "github.com/netbirdio/netbird/management/server/testutil" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/util/crypt" "github.com/netbirdio/netbird/management/server/migration" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -204,6 +205,9 @@ type Store interface { MarkAccountPrimary(ctx context.Context, accountID string) error UpdateAccountNetwork(ctx context.Context, accountID string, ipNet net.IPNet) error GetPolicyRulesByResourceID(ctx context.Context, lockStrength LockingStrength, accountID string, peerID string) ([]*types.PolicyRule, error) + + // SetFieldEncrypt sets the field encryptor for encrypting sensitive user data. + SetFieldEncrypt(enc *crypt.FieldEncrypt) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, error) } @@ -340,6 +344,12 @@ func getMigrationsPreAuto(ctx context.Context) []migrationFunc { func(db *gorm.DB) error { return migration.DropIndex[routerTypes.NetworkRouter](ctx, db, "idx_network_routers_id") }, + func(db *gorm.DB) error { + return migration.MigrateNewField[types.User](ctx, db, "name", "") + }, + func(db *gorm.DB) error { + return migration.MigrateNewField[types.User](ctx, db, "email", "") + }, } } // migratePostAuto migrates the SQLite database to the latest schema func migratePostAuto(ctx context.Context, db *gorm.DB) error { diff --git a/management/server/types/identity_provider.go b/management/server/types/identity_provider.go new file mode 100644 index 000000000..e809590de --- /dev/null +++ b/management/server/types/identity_provider.go @@ -0,0 +1,122 @@ +package types + +import ( + "errors" + "net/url" +) + +// Identity provider validation errors +var ( + ErrIdentityProviderNameRequired = errors.New("identity provider name is required") + ErrIdentityProviderTypeRequired = errors.New("identity provider type is required") + ErrIdentityProviderTypeUnsupported = errors.New("unsupported identity provider type") + ErrIdentityProviderIssuerRequired = errors.New("identity provider issuer is required") + ErrIdentityProviderIssuerInvalid = errors.New("identity provider issuer must be a valid URL") + ErrIdentityProviderClientIDRequired = errors.New("identity provider client ID is required") +) + +// IdentityProviderType is the type of identity provider +type IdentityProviderType string + +const ( + // IdentityProviderTypeOIDC is a generic OIDC identity provider + IdentityProviderTypeOIDC IdentityProviderType = "oidc" + // IdentityProviderTypeZitadel is the Zitadel identity provider + IdentityProviderTypeZitadel IdentityProviderType = "zitadel" + // IdentityProviderTypeEntra is the Microsoft Entra (Azure AD) identity provider + IdentityProviderTypeEntra IdentityProviderType = "entra" + // IdentityProviderTypeGoogle is the Google identity provider + IdentityProviderTypeGoogle IdentityProviderType = "google" + // IdentityProviderTypeOkta is the Okta identity provider + IdentityProviderTypeOkta IdentityProviderType = "okta" + // IdentityProviderTypePocketID is the PocketID identity provider + IdentityProviderTypePocketID IdentityProviderType = "pocketid" + // IdentityProviderTypeMicrosoft is the Microsoft identity provider + IdentityProviderTypeMicrosoft IdentityProviderType = "microsoft" + // IdentityProviderTypeAuthentik is the Authentik identity provider + IdentityProviderTypeAuthentik IdentityProviderType = "authentik" + // IdentityProviderTypeKeycloak is the Keycloak identity provider + IdentityProviderTypeKeycloak IdentityProviderType = "keycloak" +) + +// IdentityProvider represents an identity provider configuration +type IdentityProvider struct { + // ID is the unique identifier of the identity provider + ID string `gorm:"primaryKey"` + // AccountID is a reference to Account that this object belongs + AccountID string `json:"-" gorm:"index"` + // Type is the type of identity provider + Type IdentityProviderType + // Name is a human-readable name for the identity provider + Name string + // Issuer is the OIDC issuer URL + Issuer string + // ClientID is the OAuth2 client ID + ClientID string + // ClientSecret is the OAuth2 client secret + ClientSecret string +} + +// Copy returns a copy of the IdentityProvider +func (idp *IdentityProvider) Copy() *IdentityProvider { + return &IdentityProvider{ + ID: idp.ID, + AccountID: idp.AccountID, + Type: idp.Type, + Name: idp.Name, + Issuer: idp.Issuer, + ClientID: idp.ClientID, + ClientSecret: idp.ClientSecret, + } +} + +// EventMeta returns a map of metadata for activity events +func (idp *IdentityProvider) EventMeta() map[string]any { + return map[string]any{ + "name": idp.Name, + "type": string(idp.Type), + "issuer": idp.Issuer, + } +} + +// Validate validates the identity provider configuration +func (idp *IdentityProvider) Validate() error { + if idp.Name == "" { + return ErrIdentityProviderNameRequired + } + if idp.Type == "" { + return ErrIdentityProviderTypeRequired + } + if !idp.Type.IsValid() { + return ErrIdentityProviderTypeUnsupported + } + if !idp.Type.HasBuiltInIssuer() && idp.Issuer == "" { + return ErrIdentityProviderIssuerRequired + } + if idp.Issuer != "" { + parsedURL, err := url.Parse(idp.Issuer) + if err != nil || parsedURL.Scheme == "" || parsedURL.Host == "" { + return ErrIdentityProviderIssuerInvalid + } + } + if idp.ClientID == "" { + return ErrIdentityProviderClientIDRequired + } + return nil +} + +// IsValid checks if the given type is a supported identity provider type +func (t IdentityProviderType) IsValid() bool { + switch t { + case IdentityProviderTypeOIDC, IdentityProviderTypeZitadel, IdentityProviderTypeEntra, + IdentityProviderTypeGoogle, IdentityProviderTypeOkta, IdentityProviderTypePocketID, + IdentityProviderTypeMicrosoft, IdentityProviderTypeAuthentik, IdentityProviderTypeKeycloak: + return true + } + return false +} + +// HasBuiltInIssuer returns true for types that don't require an issuer URL +func (t IdentityProviderType) HasBuiltInIssuer() bool { + return t == IdentityProviderTypeGoogle || t == IdentityProviderTypeMicrosoft +} diff --git a/management/server/types/identity_provider_test.go b/management/server/types/identity_provider_test.go new file mode 100644 index 000000000..6ddc563f2 --- /dev/null +++ b/management/server/types/identity_provider_test.go @@ -0,0 +1,137 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIdentityProvider_Validate(t *testing.T) { + tests := []struct { + name string + idp *IdentityProvider + expectedErr error + }{ + { + name: "valid OIDC provider", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: IdentityProviderTypeOIDC, + Issuer: "https://example.com", + ClientID: "client-id", + }, + expectedErr: nil, + }, + { + name: "valid OIDC provider with path", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: IdentityProviderTypeOIDC, + Issuer: "https://example.com/oauth2/issuer", + ClientID: "client-id", + }, + expectedErr: nil, + }, + { + name: "missing name", + idp: &IdentityProvider{ + Type: IdentityProviderTypeOIDC, + Issuer: "https://example.com", + ClientID: "client-id", + }, + expectedErr: ErrIdentityProviderNameRequired, + }, + { + name: "missing type", + idp: &IdentityProvider{ + Name: "Test Provider", + Issuer: "https://example.com", + ClientID: "client-id", + }, + expectedErr: ErrIdentityProviderTypeRequired, + }, + { + name: "invalid type", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: "invalid", + Issuer: "https://example.com", + ClientID: "client-id", + }, + expectedErr: ErrIdentityProviderTypeUnsupported, + }, + { + name: "missing issuer for OIDC", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: IdentityProviderTypeOIDC, + ClientID: "client-id", + }, + expectedErr: ErrIdentityProviderIssuerRequired, + }, + { + name: "invalid issuer URL - no scheme", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: IdentityProviderTypeOIDC, + Issuer: "example.com", + ClientID: "client-id", + }, + expectedErr: ErrIdentityProviderIssuerInvalid, + }, + { + name: "invalid issuer URL - no host", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: IdentityProviderTypeOIDC, + Issuer: "https://", + ClientID: "client-id", + }, + expectedErr: ErrIdentityProviderIssuerInvalid, + }, + { + name: "invalid issuer URL - just path", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: IdentityProviderTypeOIDC, + Issuer: "/oauth2/issuer", + ClientID: "client-id", + }, + expectedErr: ErrIdentityProviderIssuerInvalid, + }, + { + name: "missing client ID", + idp: &IdentityProvider{ + Name: "Test Provider", + Type: IdentityProviderTypeOIDC, + Issuer: "https://example.com", + }, + expectedErr: ErrIdentityProviderClientIDRequired, + }, + { + name: "Google provider without issuer is valid", + idp: &IdentityProvider{ + Name: "Google SSO", + Type: IdentityProviderTypeGoogle, + ClientID: "client-id", + }, + expectedErr: nil, + }, + { + name: "Microsoft provider without issuer is valid", + idp: &IdentityProvider{ + Name: "Microsoft SSO", + Type: IdentityProviderTypeMicrosoft, + ClientID: "client-id", + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.idp.Validate() + assert.Equal(t, tt.expectedErr, err) + }) + } +} diff --git a/management/server/types/user.go b/management/server/types/user.go index beb3586df..dc601e15b 100644 --- a/management/server/types/user.go +++ b/management/server/types/user.go @@ -7,6 +7,7 @@ import ( "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integration_reference" + "github.com/netbirdio/netbird/util/crypt" ) const ( @@ -65,7 +66,11 @@ type UserInfo struct { LastLogin time.Time `json:"last_login"` Issued string `json:"issued"` PendingApproval bool `json:"pending_approval"` + Password string `json:"password"` IntegrationReference integration_reference.IntegrationReference `json:"-"` + // IdPID is the identity provider ID (connector ID) extracted from the Dex-encoded user ID. + // This field is only populated when the user ID can be decoded from Dex's format. + IdPID string `json:"idp_id,omitempty"` } // User represents a user of the system @@ -96,6 +101,9 @@ type User struct { Issued string `gorm:"default:api"` IntegrationReference integration_reference.IntegrationReference `gorm:"embedded;embeddedPrefix:integration_ref_"` + + Name string `gorm:"default:''"` + Email string `gorm:"default:''"` } // IsBlocked returns true if the user is blocked, false otherwise @@ -143,10 +151,16 @@ func (u *User) ToUserInfo(userData *idp.UserData) (*UserInfo, error) { } if userData == nil { + + name := u.Name + if u.IsServiceUser { + name = u.ServiceUserName + } + return &UserInfo{ ID: u.Id, - Email: "", - Name: u.ServiceUserName, + Email: u.Email, + Name: name, Role: string(u.Role), AutoGroups: u.AutoGroups, Status: string(UserStatusActive), @@ -178,6 +192,7 @@ func (u *User) ToUserInfo(userData *idp.UserData) (*UserInfo, error) { LastLogin: u.GetLastLogin(), Issued: u.Issued, PendingApproval: u.PendingApproval, + Password: userData.Password, }, nil } @@ -204,11 +219,13 @@ func (u *User) Copy() *User { CreatedAt: u.CreatedAt, Issued: u.Issued, IntegrationReference: u.IntegrationReference, + Email: u.Email, + Name: u.Name, } } // NewUser creates a new user -func NewUser(id string, role UserRole, isServiceUser bool, nonDeletable bool, serviceUserName string, autoGroups []string, issued string) *User { +func NewUser(id string, role UserRole, isServiceUser bool, nonDeletable bool, serviceUserName string, autoGroups []string, issued string, email string, name string) *User { return &User{ Id: id, Role: role, @@ -218,20 +235,70 @@ func NewUser(id string, role UserRole, isServiceUser bool, nonDeletable bool, se AutoGroups: autoGroups, Issued: issued, CreatedAt: time.Now().UTC(), + Name: name, + Email: email, } } // NewRegularUser creates a new user with role UserRoleUser -func NewRegularUser(id string) *User { - return NewUser(id, UserRoleUser, false, false, "", []string{}, UserIssuedAPI) +func NewRegularUser(id, email, name string) *User { + return NewUser(id, UserRoleUser, false, false, "", []string{}, UserIssuedAPI, email, name) } // NewAdminUser creates a new user with role UserRoleAdmin func NewAdminUser(id string) *User { - return NewUser(id, UserRoleAdmin, false, false, "", []string{}, UserIssuedAPI) + return NewUser(id, UserRoleAdmin, false, false, "", []string{}, UserIssuedAPI, "", "") } // NewOwnerUser creates a new user with role UserRoleOwner -func NewOwnerUser(id string) *User { - return NewUser(id, UserRoleOwner, false, false, "", []string{}, UserIssuedAPI) +func NewOwnerUser(id string, email string, name string) *User { + return NewUser(id, UserRoleOwner, false, false, "", []string{}, UserIssuedAPI, email, name) +} + +// EncryptSensitiveData encrypts the user's sensitive fields (Email and Name) in place. +func (u *User) EncryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + var err error + if u.Email != "" { + u.Email, err = enc.Encrypt(u.Email) + if err != nil { + return fmt.Errorf("encrypt email: %w", err) + } + } + + if u.Name != "" { + u.Name, err = enc.Encrypt(u.Name) + if err != nil { + return fmt.Errorf("encrypt name: %w", err) + } + } + + return nil +} + +// DecryptSensitiveData decrypts the user's sensitive fields (Email and Name) in place. +func (u *User) DecryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + var err error + if u.Email != "" { + u.Email, err = enc.Decrypt(u.Email) + if err != nil { + return fmt.Errorf("decrypt email: %w", err) + } + } + + if u.Name != "" { + u.Name, err = enc.Decrypt(u.Name) + if err != nil { + return fmt.Errorf("decrypt name: %w", err) + } + } + + return nil } diff --git a/management/server/types/user_test.go b/management/server/types/user_test.go new file mode 100644 index 000000000..e11df96aa --- /dev/null +++ b/management/server/types/user_test.go @@ -0,0 +1,298 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/util/crypt" +) + +func TestUser_EncryptSensitiveData(t *testing.T) { + key, err := crypt.GenerateKey() + require.NoError(t, err) + + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + + t.Run("encrypt email and name", func(t *testing.T) { + user := &User{ + Id: "user-1", + Email: "test@example.com", + Name: "Test User", + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.NotEqual(t, "test@example.com", user.Email, "email should be encrypted") + assert.NotEqual(t, "Test User", user.Name, "name should be encrypted") + assert.NotEmpty(t, user.Email, "encrypted email should not be empty") + assert.NotEmpty(t, user.Name, "encrypted name should not be empty") + }) + + t.Run("encrypt empty email and name", func(t *testing.T) { + user := &User{ + Id: "user-2", + Email: "", + Name: "", + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.Equal(t, "", user.Email, "empty email should remain empty") + assert.Equal(t, "", user.Name, "empty name should remain empty") + }) + + t.Run("encrypt only email", func(t *testing.T) { + user := &User{ + Id: "user-3", + Email: "test@example.com", + Name: "", + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.NotEqual(t, "test@example.com", user.Email, "email should be encrypted") + assert.NotEmpty(t, user.Email, "encrypted email should not be empty") + assert.Equal(t, "", user.Name, "empty name should remain empty") + }) + + t.Run("encrypt only name", func(t *testing.T) { + user := &User{ + Id: "user-4", + Email: "", + Name: "Test User", + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.Equal(t, "", user.Email, "empty email should remain empty") + assert.NotEqual(t, "Test User", user.Name, "name should be encrypted") + assert.NotEmpty(t, user.Name, "encrypted name should not be empty") + }) + + t.Run("nil encryptor returns no error", func(t *testing.T) { + user := &User{ + Id: "user-5", + Email: "test@example.com", + Name: "Test User", + } + + err := user.EncryptSensitiveData(nil) + require.NoError(t, err) + + assert.Equal(t, "test@example.com", user.Email, "email should remain unchanged with nil encryptor") + assert.Equal(t, "Test User", user.Name, "name should remain unchanged with nil encryptor") + }) +} + +func TestUser_DecryptSensitiveData(t *testing.T) { + key, err := crypt.GenerateKey() + require.NoError(t, err) + + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + + t.Run("decrypt email and name", func(t *testing.T) { + originalEmail := "test@example.com" + originalName := "Test User" + + user := &User{ + Id: "user-1", + Email: originalEmail, + Name: originalName, + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + err = user.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.Equal(t, originalEmail, user.Email, "decrypted email should match original") + assert.Equal(t, originalName, user.Name, "decrypted name should match original") + }) + + t.Run("decrypt empty email and name", func(t *testing.T) { + user := &User{ + Id: "user-2", + Email: "", + Name: "", + } + + err := user.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.Equal(t, "", user.Email, "empty email should remain empty") + assert.Equal(t, "", user.Name, "empty name should remain empty") + }) + + t.Run("decrypt only email", func(t *testing.T) { + originalEmail := "test@example.com" + + user := &User{ + Id: "user-3", + Email: originalEmail, + Name: "", + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + err = user.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.Equal(t, originalEmail, user.Email, "decrypted email should match original") + assert.Equal(t, "", user.Name, "empty name should remain empty") + }) + + t.Run("decrypt only name", func(t *testing.T) { + originalName := "Test User" + + user := &User{ + Id: "user-4", + Email: "", + Name: originalName, + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + err = user.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.Equal(t, "", user.Email, "empty email should remain empty") + assert.Equal(t, originalName, user.Name, "decrypted name should match original") + }) + + t.Run("nil encryptor returns no error", func(t *testing.T) { + user := &User{ + Id: "user-5", + Email: "test@example.com", + Name: "Test User", + } + + err := user.DecryptSensitiveData(nil) + require.NoError(t, err) + + assert.Equal(t, "test@example.com", user.Email, "email should remain unchanged with nil encryptor") + assert.Equal(t, "Test User", user.Name, "name should remain unchanged with nil encryptor") + }) + + t.Run("decrypt with invalid ciphertext returns error", func(t *testing.T) { + user := &User{ + Id: "user-6", + Email: "not-valid-base64-ciphertext!!!", + Name: "Test User", + } + + err := user.DecryptSensitiveData(fieldEncrypt) + require.Error(t, err) + assert.Contains(t, err.Error(), "decrypt email") + }) + + t.Run("decrypt with wrong key returns error", func(t *testing.T) { + originalEmail := "test@example.com" + originalName := "Test User" + + user := &User{ + Id: "user-7", + Email: originalEmail, + Name: originalName, + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + differentKey, err := crypt.GenerateKey() + require.NoError(t, err) + + differentEncrypt, err := crypt.NewFieldEncrypt(differentKey) + require.NoError(t, err) + + err = user.DecryptSensitiveData(differentEncrypt) + require.Error(t, err) + assert.Contains(t, err.Error(), "decrypt email") + }) +} + +func TestUser_EncryptDecryptRoundTrip(t *testing.T) { + key, err := crypt.GenerateKey() + require.NoError(t, err) + + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + + testCases := []struct { + name string + email string + uname string + }{ + { + name: "standard email and name", + email: "user@example.com", + uname: "John Doe", + }, + { + name: "email with special characters", + email: "user+tag@sub.example.com", + uname: "O'Brien, Mary-Jane", + }, + { + name: "unicode characters", + email: "user@example.com", + uname: "Jean-Pierre Müller 日本語", + }, + { + name: "long values", + email: "very.long.email.address.that.is.quite.extended@subdomain.example.organization.com", + uname: "A Very Long Name That Contains Many Words And Is Quite Extended For Testing Purposes", + }, + { + name: "empty email only", + email: "", + uname: "Name Only", + }, + { + name: "empty name only", + email: "email@only.com", + uname: "", + }, + { + name: "both empty", + email: "", + uname: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + user := &User{ + Id: "test-user", + Email: tc.email, + Name: tc.uname, + } + + err := user.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + if tc.email != "" { + assert.NotEqual(t, tc.email, user.Email, "email should be encrypted") + } + if tc.uname != "" { + assert.NotEqual(t, tc.uname, user.Name, "name should be encrypted") + } + + err = user.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + assert.Equal(t, tc.email, user.Email, "decrypted email should match original") + assert.Equal(t, tc.uname, user.Name, "decrypted name should match original") + }) + } +} diff --git a/management/server/user.go b/management/server/user.go index 656ebca67..4f9007b61 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -13,6 +13,7 @@ import ( "github.com/google/uuid" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/idp/dex" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/idp" nbpeer "github.com/netbirdio/netbird/management/server/peer" @@ -40,7 +41,7 @@ func (am *DefaultAccountManager) createServiceUser(ctx context.Context, accountI } newUserID := uuid.New().String() - newUser := types.NewUser(newUserID, role, true, nonDeletable, serviceUserName, autoGroups, types.UserIssuedAPI) + newUser := types.NewUser(newUserID, role, true, nonDeletable, serviceUserName, autoGroups, types.UserIssuedAPI, "", "") newUser.AccountID = accountID log.WithContext(ctx).Debugf("New User: %v", newUser) @@ -104,7 +105,12 @@ func (am *DefaultAccountManager) inviteNewUser(ctx context.Context, accountID, u inviterID = createdBy } - idpUser, err := am.createNewIdpUser(ctx, accountID, inviterID, invite) + var idpUser *idp.UserData + if IsEmbeddedIdp(am.idpManager) { + idpUser, err = am.createEmbeddedIdpUser(ctx, accountID, inviterID, invite) + } else { + idpUser, err = am.createNewIdpUser(ctx, accountID, inviterID, invite) + } if err != nil { return nil, err } @@ -117,18 +123,26 @@ func (am *DefaultAccountManager) inviteNewUser(ctx context.Context, accountID, u Issued: invite.Issued, IntegrationReference: invite.IntegrationReference, CreatedAt: time.Now().UTC(), + Email: invite.Email, + Name: invite.Name, } if err = am.Store.SaveUser(ctx, newUser); err != nil { return nil, err } - _, err = am.refreshCache(ctx, accountID) - if err != nil { - return nil, err + if !IsEmbeddedIdp(am.idpManager) { + _, err = am.refreshCache(ctx, accountID) + if err != nil { + return nil, err + } } - am.StoreEvent(ctx, userID, newUser.Id, accountID, activity.UserInvited, nil) + eventType := activity.UserInvited + if IsEmbeddedIdp(am.idpManager) { + eventType = activity.UserCreated + } + am.StoreEvent(ctx, userID, newUser.Id, accountID, eventType, nil) return newUser.ToUserInfo(idpUser) } @@ -172,6 +186,34 @@ func (am *DefaultAccountManager) createNewIdpUser(ctx context.Context, accountID return am.idpManager.CreateUser(ctx, invite.Email, invite.Name, accountID, inviterUser.Email) } +// createEmbeddedIdpUser validates the invite and creates a new user in the embedded IdP. +// Unlike createNewIdpUser, this method fetches user data directly from the database +// since the embedded IdP usage ensures the username and email are stored locally in the User table. +func (am *DefaultAccountManager) createEmbeddedIdpUser(ctx context.Context, accountID string, inviterID string, invite *types.UserInfo) (*idp.UserData, error) { + inviter, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, inviterID) + if err != nil { + return nil, fmt.Errorf("failed to get inviter user: %w", err) + } + + if inviter == nil { + return nil, status.Errorf(status.NotFound, "inviter user with ID %s doesn't exist", inviterID) + } + + // check if the user is already registered with this email => reject + existingUsers, err := am.Store.GetAccountUsers(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return nil, err + } + + for _, user := range existingUsers { + if strings.EqualFold(user.Email, invite.Email) { + return nil, status.Errorf(status.UserAlreadyExists, "can't invite a user with an existing NetBird account") + } + } + + return am.idpManager.CreateUser(ctx, invite.Email, invite.Name, accountID, inviter.Email) +} + func (am *DefaultAccountManager) GetUserByID(ctx context.Context, id string) (*types.User, error) { return am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, id) } @@ -757,7 +799,7 @@ func handleOwnerRoleTransfer(ctx context.Context, transaction store.Store, initi // If the AccountManager has a non-nil idpManager and the User is not a service user, // it will attempt to look up the UserData from the cache. func (am *DefaultAccountManager) getUserInfo(ctx context.Context, user *types.User, accountID string) (*types.UserInfo, error) { - if !isNil(am.idpManager) && !user.IsServiceUser { + if !isNil(am.idpManager) && !user.IsServiceUser && !IsEmbeddedIdp(am.idpManager) { userData, err := am.lookupUserInCache(ctx, user.Id, accountID) if err != nil { return nil, err @@ -808,7 +850,10 @@ func validateUserUpdate(groupsMap map[string]*types.Group, initiatorUser, oldUse } // GetOrCreateAccountByUser returns an existing account for a given user id or creates a new one if doesn't exist -func (am *DefaultAccountManager) GetOrCreateAccountByUser(ctx context.Context, userID, domain string) (*types.Account, error) { +func (am *DefaultAccountManager) GetOrCreateAccountByUser(ctx context.Context, userAuth auth.UserAuth) (*types.Account, error) { + userID := userAuth.UserId + domain := userAuth.Domain + start := time.Now() unlock := am.Store.AcquireGlobalLock(ctx) defer unlock() @@ -819,7 +864,7 @@ func (am *DefaultAccountManager) GetOrCreateAccountByUser(ctx context.Context, u account, err := am.Store.GetAccountByUser(ctx, userID) if err != nil { if s, ok := status.FromError(err); ok && s.Type() == status.NotFound { - account, err = am.newAccount(ctx, userID, lowerDomain) + account, err = am.newAccount(ctx, userID, lowerDomain, userAuth.Email, userAuth.Name) if err != nil { return nil, err } @@ -884,7 +929,8 @@ func (am *DefaultAccountManager) BuildUserInfosForAccount(ctx context.Context, a var queriedUsers []*idp.UserData var err error - if !isNil(am.idpManager) { + // embedded IdP ensures that we have user data (email and name) stored in the database. + if !isNil(am.idpManager) && !IsEmbeddedIdp(am.idpManager) { users := make(map[string]userLoggedInOnce, len(accountUsers)) usersFromIntegration := make([]*idp.UserData, 0) for _, user := range accountUsers { @@ -921,6 +967,10 @@ func (am *DefaultAccountManager) BuildUserInfosForAccount(ctx context.Context, a if err != nil { return nil, err } + // Try to decode Dex user ID to extract the IdP ID (connector ID) + if _, connectorID, decodeErr := dex.DecodeDexUserID(accountUser.Id); decodeErr == nil && connectorID != "" { + info.IdPID = connectorID + } userInfosMap[accountUser.Id] = info } @@ -942,7 +992,7 @@ func (am *DefaultAccountManager) BuildUserInfosForAccount(ctx context.Context, a info = &types.UserInfo{ ID: localUser.Id, - Email: "", + Email: localUser.Email, Name: name, Role: string(localUser.Role), AutoGroups: localUser.AutoGroups, @@ -951,6 +1001,10 @@ func (am *DefaultAccountManager) BuildUserInfosForAccount(ctx context.Context, a NonDeletable: localUser.NonDeletable, } } + // Try to decode Dex user ID to extract the IdP ID (connector ID) + if _, connectorID, decodeErr := dex.DecodeDexUserID(localUser.Id); decodeErr == nil && connectorID != "" { + info.IdPID = connectorID + } userInfosMap[info.ID] = info } diff --git a/management/server/user_test.go b/management/server/user_test.go index 3032ee3e8..6d356a8b1 100644 --- a/management/server/user_test.go +++ b/management/server/user_test.go @@ -3,6 +3,7 @@ package server import ( "context" "fmt" + "os" "reflect" "testing" "time" @@ -29,6 +30,7 @@ import ( "github.com/stretchr/testify/require" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "github.com/netbirdio/netbird/idp/dex" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integration_reference" @@ -58,7 +60,7 @@ func TestUser_CreatePAT_ForSameUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = s.SaveAccount(context.Background(), account) if err != nil { @@ -105,7 +107,7 @@ func TestUser_CreatePAT_ForDifferentUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockTargetUserId] = &types.User{ Id: mockTargetUserId, IsServiceUser: false, @@ -133,7 +135,7 @@ func TestUser_CreatePAT_ForServiceUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockTargetUserId] = &types.User{ Id: mockTargetUserId, IsServiceUser: true, @@ -165,7 +167,7 @@ func TestUser_CreatePAT_WithWrongExpiration(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -190,7 +192,7 @@ func TestUser_CreatePAT_WithEmptyName(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -215,7 +217,7 @@ func TestUser_DeletePAT(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockUserID] = &types.User{ Id: mockUserID, PATs: map[string]*types.PersonalAccessToken{ @@ -258,7 +260,7 @@ func TestUser_GetPAT(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockUserID] = &types.User{ Id: mockUserID, AccountID: mockAccountID, @@ -298,7 +300,7 @@ func TestUser_GetAllPATs(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockUserID] = &types.User{ Id: mockUserID, AccountID: mockAccountID, @@ -362,6 +364,8 @@ func TestUser_Copy(t *testing.T) { ID: 0, IntegrationType: "test", }, + Email: "whatever@gmail.com", + Name: "John Doe", } err := validateStruct(user) @@ -408,7 +412,7 @@ func TestUser_CreateServiceUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -455,7 +459,7 @@ func TestUser_CreateUser_ServiceUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -503,7 +507,7 @@ func TestUser_CreateUser_RegularUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -534,7 +538,7 @@ func TestUser_InviteNewUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -641,7 +645,7 @@ func TestUser_DeleteUser_ServiceUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockServiceUserID] = tt.serviceUser err = store.SaveAccount(context.Background(), account) @@ -680,7 +684,7 @@ func TestUser_DeleteUser_SelfDelete(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -707,7 +711,7 @@ func TestUser_DeleteUser_regularUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) targetId := "user2" account.Users[targetId] = &types.User{ @@ -801,7 +805,7 @@ func TestUser_DeleteUser_RegularUsers(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) targetId := "user2" account.Users[targetId] = &types.User{ @@ -969,7 +973,7 @@ func TestDefaultAccountManager_GetUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) err = store.SaveAccount(context.Background(), account) if err != nil { @@ -1005,9 +1009,9 @@ func TestDefaultAccountManager_ListUsers(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) - account.Users["normal_user1"] = types.NewRegularUser("normal_user1") - account.Users["normal_user2"] = types.NewRegularUser("normal_user2") + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) + account.Users["normal_user1"] = types.NewRegularUser("normal_user1", "", "") + account.Users["normal_user2"] = types.NewRegularUser("normal_user2", "", "") err = store.SaveAccount(context.Background(), account) if err != nil { @@ -1047,7 +1051,7 @@ func TestDefaultAccountManager_ExternalCache(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) externalUser := &types.User{ Id: "externalUser", Role: types.UserRoleUser, @@ -1104,7 +1108,7 @@ func TestUser_IsAdmin(t *testing.T) { user := types.NewAdminUser(mockUserID) assert.True(t, user.HasAdminPower()) - user = types.NewRegularUser(mockUserID) + user = types.NewRegularUser(mockUserID, "", "") assert.False(t, user.HasAdminPower()) } @@ -1115,7 +1119,7 @@ func TestUser_GetUsersFromAccount_ForAdmin(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockServiceUserID] = &types.User{ Id: mockServiceUserID, Role: "user", @@ -1149,7 +1153,7 @@ func TestUser_GetUsersFromAccount_ForUser(t *testing.T) { } t.Cleanup(cleanup) - account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", false) + account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "", "", "", false) account.Users[mockServiceUserID] = &types.User{ Id: mockServiceUserID, Role: "user", @@ -1320,13 +1324,13 @@ func TestDefaultAccountManager_SaveUser(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // create an account and an admin user - account, err := manager.GetOrCreateAccountByUser(context.Background(), ownerUserID, "netbird.io") + account, err := manager.GetOrCreateAccountByUser(context.Background(), auth.UserAuth{UserId: ownerUserID, Domain: "netbird.io"}) if err != nil { t.Fatal(err) } // create other users - account.Users[regularUserID] = types.NewRegularUser(regularUserID) + account.Users[regularUserID] = types.NewRegularUser(regularUserID, "", "") account.Users[adminUserID] = types.NewAdminUser(adminUserID) account.Users[serviceUserID] = &types.User{IsServiceUser: true, Id: serviceUserID, Role: types.UserRoleAdmin, ServiceUserName: "service"} err = manager.Store.SaveAccount(context.Background(), account) @@ -1516,7 +1520,7 @@ func TestSaveOrAddUser_PreventAccountSwitch(t *testing.T) { } t.Cleanup(cleanup) - account1 := newAccountWithId(context.Background(), "account1", "ownerAccount1", "", false) + account1 := newAccountWithId(context.Background(), "account1", "ownerAccount1", "", "", "", false) targetId := "user2" account1.Users[targetId] = &types.User{ Id: targetId, @@ -1525,7 +1529,7 @@ func TestSaveOrAddUser_PreventAccountSwitch(t *testing.T) { } require.NoError(t, s.SaveAccount(context.Background(), account1)) - account2 := newAccountWithId(context.Background(), "account2", "ownerAccount2", "", false) + account2 := newAccountWithId(context.Background(), "account2", "ownerAccount2", "", "", "", false) require.NoError(t, s.SaveAccount(context.Background(), account2)) permissionsManager := permissions.NewManager(s) @@ -1552,7 +1556,7 @@ func TestDefaultAccountManager_GetCurrentUserInfo(t *testing.T) { } t.Cleanup(cleanup) - account1 := newAccountWithId(context.Background(), "account1", "account1Owner", "", false) + account1 := newAccountWithId(context.Background(), "account1", "account1Owner", "", "", "", false) account1.Settings.RegularUsersViewBlocked = false account1.Users["blocked-user"] = &types.User{ Id: "blocked-user", @@ -1574,7 +1578,7 @@ func TestDefaultAccountManager_GetCurrentUserInfo(t *testing.T) { } require.NoError(t, store.SaveAccount(context.Background(), account1)) - account2 := newAccountWithId(context.Background(), "account2", "account2Owner", "", false) + account2 := newAccountWithId(context.Background(), "account2", "account2Owner", "", "", "", false) account2.Users["settings-blocked-user"] = &types.User{ Id: "settings-blocked-user", Role: types.UserRoleUser, @@ -1771,7 +1775,7 @@ func TestApproveUser(t *testing.T) { } // Create account with admin and pending approval user - account := newAccountWithId(context.Background(), "account-1", "admin-user", "example.com", false) + account := newAccountWithId(context.Background(), "account-1", "admin-user", "example.com", "", "", false) err = manager.Store.SaveAccount(context.Background(), account) require.NoError(t, err) @@ -1782,7 +1786,7 @@ func TestApproveUser(t *testing.T) { require.NoError(t, err) // Create user pending approval - pendingUser := types.NewRegularUser("pending-user") + pendingUser := types.NewRegularUser("pending-user", "", "") pendingUser.AccountID = account.Id pendingUser.Blocked = true pendingUser.PendingApproval = true @@ -1807,12 +1811,12 @@ func TestApproveUser(t *testing.T) { assert.Contains(t, err.Error(), "not pending approval") // Test approval by non-admin should fail - regularUser := types.NewRegularUser("regular-user") + regularUser := types.NewRegularUser("regular-user", "", "") regularUser.AccountID = account.Id err = manager.Store.SaveUser(context.Background(), regularUser) require.NoError(t, err) - pendingUser2 := types.NewRegularUser("pending-user-2") + pendingUser2 := types.NewRegularUser("pending-user-2", "", "") pendingUser2.AccountID = account.Id pendingUser2.Blocked = true pendingUser2.PendingApproval = true @@ -1830,7 +1834,7 @@ func TestRejectUser(t *testing.T) { } // Create account with admin and pending approval user - account := newAccountWithId(context.Background(), "account-1", "admin-user", "example.com", false) + account := newAccountWithId(context.Background(), "account-1", "admin-user", "example.com", "", "", false) err = manager.Store.SaveAccount(context.Background(), account) require.NoError(t, err) @@ -1841,7 +1845,7 @@ func TestRejectUser(t *testing.T) { require.NoError(t, err) // Create user pending approval - pendingUser := types.NewRegularUser("pending-user") + pendingUser := types.NewRegularUser("pending-user", "", "") pendingUser.AccountID = account.Id pendingUser.Blocked = true pendingUser.PendingApproval = true @@ -1857,7 +1861,7 @@ func TestRejectUser(t *testing.T) { require.Error(t, err) // Test rejection of non-pending user should fail - regularUser := types.NewRegularUser("regular-user") + regularUser := types.NewRegularUser("regular-user", "", "") regularUser.AccountID = account.Id err = manager.Store.SaveUser(context.Background(), regularUser) require.NoError(t, err) @@ -1867,7 +1871,7 @@ func TestRejectUser(t *testing.T) { assert.Contains(t, err.Error(), "not pending approval") // Test rejection by non-admin should fail - pendingUser2 := types.NewRegularUser("pending-user-2") + pendingUser2 := types.NewRegularUser("pending-user-2", "", "") pendingUser2.AccountID = account.Id pendingUser2.Blocked = true pendingUser2.PendingApproval = true @@ -1877,3 +1881,149 @@ func TestRejectUser(t *testing.T) { err = manager.RejectUser(context.Background(), account.Id, regularUser.Id, pendingUser2.Id) require.Error(t, err) } + +func TestUser_Operations_WithEmbeddedIDP(t *testing.T) { + ctx := context.Background() + + // Create temporary directory for Dex + tmpDir := t.TempDir() + dexDataDir := tmpDir + "/dex" + require.NoError(t, os.MkdirAll(dexDataDir, 0700)) + + // Create embedded IDP config + embeddedIdPConfig := &idp.EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: idp.EmbeddedStorageConfig{ + Type: "sqlite3", + Config: idp.EmbeddedStorageTypeConfig{ + File: dexDataDir + "/dex.db", + }, + }, + } + + // Create embedded IDP manager + embeddedIdp, err := idp.NewEmbeddedIdPManager(ctx, embeddedIdPConfig, nil) + require.NoError(t, err) + defer func() { _ = embeddedIdp.Stop(ctx) }() + + // Create test store + testStore, cleanup, err := store.NewTestStoreFromSQL(ctx, "", tmpDir) + require.NoError(t, err) + defer cleanup() + + // Create account with owner user + account := newAccountWithId(ctx, mockAccountID, mockUserID, "", "owner@test.com", "Owner User", false) + require.NoError(t, testStore.SaveAccount(ctx, account)) + + // Create mock network map controller + ctrl := gomock.NewController(t) + networkMapControllerMock := network_map.NewMockController(ctrl) + networkMapControllerMock.EXPECT(). + OnPeersDeleted(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil). + AnyTimes() + + // Create account manager with embedded IDP + permissionsManager := permissions.NewManager(testStore) + am := DefaultAccountManager{ + Store: testStore, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: permissionsManager, + idpManager: embeddedIdp, + cacheLoading: map[string]chan struct{}{}, + networkMapController: networkMapControllerMock, + } + + // Initialize cache manager + cacheStore, err := nbcache.NewStore(ctx, nbcache.DefaultIDPCacheExpirationMax, nbcache.DefaultIDPCacheCleanupInterval, nbcache.DefaultIDPCacheOpenConn) + require.NoError(t, err) + am.cacheManager = nbcache.NewAccountUserDataCache(am.loadAccount, cacheStore) + am.externalCacheManager = nbcache.NewUserDataCache(cacheStore) + + t.Run("create regular user returns password", func(t *testing.T) { + userInfo, err := am.CreateUser(ctx, mockAccountID, mockUserID, &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + IsServiceUser: false, + }) + require.NoError(t, err) + require.NotNil(t, userInfo) + + // Verify user data + assert.Equal(t, "newuser@test.com", userInfo.Email) + assert.Equal(t, "New User", userInfo.Name) + assert.Equal(t, "user", userInfo.Role) + assert.NotEmpty(t, userInfo.ID) + + // IMPORTANT: Password should be returned for embedded IDP + assert.NotEmpty(t, userInfo.Password, "Password should be returned for embedded IDP user") + t.Logf("Created user: ID=%s, Email=%s, Password=%s", userInfo.ID, userInfo.Email, userInfo.Password) + + // Verify user ID is in Dex encoded format + rawUserID, connectorID, err := dex.DecodeDexUserID(userInfo.ID) + require.NoError(t, err) + assert.NotEmpty(t, rawUserID) + assert.Equal(t, "local", connectorID) + t.Logf("Decoded user ID: rawUserID=%s, connectorID=%s", rawUserID, connectorID) + + // Verify user exists in database with correct data + dbUser, err := testStore.GetUserByUserID(ctx, store.LockingStrengthNone, userInfo.ID) + require.NoError(t, err) + assert.Equal(t, "newuser@test.com", dbUser.Email) + assert.Equal(t, "New User", dbUser.Name) + + // Store user ID for delete test + createdUserID := userInfo.ID + + t.Run("delete user works", func(t *testing.T) { + err := am.DeleteUser(ctx, mockAccountID, mockUserID, createdUserID) + require.NoError(t, err) + + // Verify user is deleted from database + _, err = testStore.GetUserByUserID(ctx, store.LockingStrengthNone, createdUserID) + assert.Error(t, err, "User should be deleted from database") + }) + }) + + t.Run("create service user does not return password", func(t *testing.T) { + userInfo, err := am.CreateUser(ctx, mockAccountID, mockUserID, &types.UserInfo{ + Name: "Service User", + Role: "user", + AutoGroups: []string{}, + IsServiceUser: true, + }) + require.NoError(t, err) + require.NotNil(t, userInfo) + + assert.True(t, userInfo.IsServiceUser) + assert.Equal(t, "Service User", userInfo.Name) + // Service users don't have passwords + assert.Empty(t, userInfo.Password, "Service users should not have passwords") + }) + + t.Run("duplicate email fails", func(t *testing.T) { + // Create first user + _, err := am.CreateUser(ctx, mockAccountID, mockUserID, &types.UserInfo{ + Email: "duplicate@test.com", + Name: "First User", + Role: "user", + AutoGroups: []string{}, + IsServiceUser: false, + }) + require.NoError(t, err) + + // Try to create second user with same email + _, err = am.CreateUser(ctx, mockAccountID, mockUserID, &types.UserInfo{ + Email: "duplicate@test.com", + Name: "Second User", + Role: "user", + AutoGroups: []string{}, + IsServiceUser: false, + }) + assert.Error(t, err, "Creating user with duplicate email should fail") + t.Logf("Duplicate email error: %v", err) + }) +} diff --git a/shared/auth/jwt/extractor.go b/shared/auth/jwt/extractor.go index a41d5f07a..5806d1f4d 100644 --- a/shared/auth/jwt/extractor.go +++ b/shared/auth/jwt/extractor.go @@ -78,16 +78,18 @@ func parseTime(timeString string) time.Time { return parsedTime } -func (c ClaimsExtractor) audienceClaim(claimName string) string { - url, err := url.JoinPath(c.authAudience, claimName) +func (c *ClaimsExtractor) audienceClaim(claimName string) string { + audienceURL, err := url.JoinPath(c.authAudience, claimName) if err != nil { return c.authAudience + claimName // as it was previously } - return url + return audienceURL } -// ToUserAuth extracts user authentication information from a JWT token +// ToUserAuth extracts user authentication information from a JWT token. +// The token should contain standard claims like email, name, preferred_username. +// When using Dex, make sure to set getUserInfo: true to have these claims populated. func (c *ClaimsExtractor) ToUserAuth(token *jwt.Token) (auth.UserAuth, error) { claims := token.Claims.(jwt.MapClaims) userAuth := auth.UserAuth{} @@ -120,6 +122,21 @@ func (c *ClaimsExtractor) ToUserAuth(token *jwt.Token) (auth.UserAuth, error) { } } + // Extract email from standard "email" claim + if email, ok := claims["email"].(string); ok { + userAuth.Email = email + } + + // Extract name from standard "name" claim + if name, ok := claims["name"].(string); ok { + userAuth.Name = name + } + + // Extract name from standard "preferred_username" claim + if preferredName, ok := claims["preferred_username"].(string); ok { + userAuth.PreferredName = preferredName + } + return userAuth, nil } diff --git a/shared/auth/jwt/extractor_test.go b/shared/auth/jwt/extractor_test.go new file mode 100644 index 000000000..45529770d --- /dev/null +++ b/shared/auth/jwt/extractor_test.go @@ -0,0 +1,322 @@ +package jwt + +import ( + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClaimsExtractor_ToUserAuth_ExtractsEmailAndName(t *testing.T) { + tests := []struct { + name string + claims jwt.MapClaims + userIDClaim string + audience string + expectedUserID string + expectedEmail string + expectedName string + expectError bool + }{ + { + name: "extracts email and name from standard claims", + claims: jwt.MapClaims{ + "sub": "user-123", + "email": "test@example.com", + "name": "Test User", + }, + userIDClaim: "sub", + expectedUserID: "user-123", + expectedEmail: "test@example.com", + expectedName: "Test User", + }, + { + name: "extracts Dex encoded user ID", + claims: jwt.MapClaims{ + "sub": "CiQ3YWFkOGMwNS0zMjg3LTQ3M2YtYjQyYS0zNjU1MDRiZjI1ZTcSBWxvY2Fs", + "email": "dex-user@example.com", + "name": "Dex User", + }, + userIDClaim: "sub", + expectedUserID: "CiQ3YWFkOGMwNS0zMjg3LTQ3M2YtYjQyYS0zNjU1MDRiZjI1ZTcSBWxvY2Fs", + expectedEmail: "dex-user@example.com", + expectedName: "Dex User", + }, + { + name: "handles missing email claim", + claims: jwt.MapClaims{ + "sub": "user-456", + "name": "User Without Email", + }, + userIDClaim: "sub", + expectedUserID: "user-456", + expectedEmail: "", + expectedName: "User Without Email", + }, + { + name: "handles missing name claim", + claims: jwt.MapClaims{ + "sub": "user-789", + "email": "noname@example.com", + }, + userIDClaim: "sub", + expectedUserID: "user-789", + expectedEmail: "noname@example.com", + expectedName: "", + }, + { + name: "handles missing both email and name", + claims: jwt.MapClaims{ + "sub": "user-minimal", + }, + userIDClaim: "sub", + expectedUserID: "user-minimal", + expectedEmail: "", + expectedName: "", + }, + { + name: "extracts preferred_username", + claims: jwt.MapClaims{ + "sub": "user-pref", + "email": "pref@example.com", + "name": "Preferred User", + "preferred_username": "prefuser", + }, + userIDClaim: "sub", + expectedUserID: "user-pref", + expectedEmail: "pref@example.com", + expectedName: "Preferred User", + }, + { + name: "fails when user ID claim is empty", + claims: jwt.MapClaims{ + "email": "test@example.com", + "name": "Test User", + }, + userIDClaim: "sub", + expectError: true, + }, + { + name: "uses custom user ID claim", + claims: jwt.MapClaims{ + "user_id": "custom-user-id", + "email": "custom@example.com", + "name": "Custom User", + }, + userIDClaim: "user_id", + expectedUserID: "custom-user-id", + expectedEmail: "custom@example.com", + expectedName: "Custom User", + }, + { + name: "extracts account ID with audience prefix", + claims: jwt.MapClaims{ + "sub": "user-with-account", + "email": "account@example.com", + "name": "Account User", + "https://api.netbird.io/wt_account_id": "account-123", + "https://api.netbird.io/wt_account_domain": "example.com", + }, + userIDClaim: "sub", + audience: "https://api.netbird.io", + expectedUserID: "user-with-account", + expectedEmail: "account@example.com", + expectedName: "Account User", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create extractor with options + opts := []ClaimsExtractorOption{} + if tt.userIDClaim != "" { + opts = append(opts, WithUserIDClaim(tt.userIDClaim)) + } + if tt.audience != "" { + opts = append(opts, WithAudience(tt.audience)) + } + extractor := NewClaimsExtractor(opts...) + + // Create a mock token with the claims + token := &jwt.Token{ + Claims: tt.claims, + } + + // Extract user auth + userAuth, err := extractor.ToUserAuth(token) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expectedUserID, userAuth.UserId) + assert.Equal(t, tt.expectedEmail, userAuth.Email) + assert.Equal(t, tt.expectedName, userAuth.Name) + }) + } +} + +func TestClaimsExtractor_ToUserAuth_PreferredUsername(t *testing.T) { + extractor := NewClaimsExtractor(WithUserIDClaim("sub")) + + claims := jwt.MapClaims{ + "sub": "user-123", + "email": "test@example.com", + "name": "Test User", + "preferred_username": "testuser", + } + + token := &jwt.Token{Claims: claims} + + userAuth, err := extractor.ToUserAuth(token) + require.NoError(t, err) + + assert.Equal(t, "user-123", userAuth.UserId) + assert.Equal(t, "test@example.com", userAuth.Email) + assert.Equal(t, "Test User", userAuth.Name) + assert.Equal(t, "testuser", userAuth.PreferredName) +} + +func TestClaimsExtractor_ToUserAuth_LastLogin(t *testing.T) { + extractor := NewClaimsExtractor( + WithUserIDClaim("sub"), + WithAudience("https://api.netbird.io"), + ) + + expectedTime := time.Date(2025, 1, 15, 10, 30, 0, 0, time.UTC) + + claims := jwt.MapClaims{ + "sub": "user-123", + "email": "test@example.com", + "https://api.netbird.io/nb_last_login": expectedTime.Format(time.RFC3339), + } + + token := &jwt.Token{Claims: claims} + + userAuth, err := extractor.ToUserAuth(token) + require.NoError(t, err) + + assert.Equal(t, expectedTime, userAuth.LastLogin) +} + +func TestClaimsExtractor_ToUserAuth_Invited(t *testing.T) { + extractor := NewClaimsExtractor( + WithUserIDClaim("sub"), + WithAudience("https://api.netbird.io"), + ) + + claims := jwt.MapClaims{ + "sub": "user-123", + "email": "invited@example.com", + "https://api.netbird.io/nb_invited": true, + } + + token := &jwt.Token{Claims: claims} + + userAuth, err := extractor.ToUserAuth(token) + require.NoError(t, err) + + assert.True(t, userAuth.Invited) +} + +func TestClaimsExtractor_ToGroups(t *testing.T) { + extractor := NewClaimsExtractor(WithUserIDClaim("sub")) + + tests := []struct { + name string + claims jwt.MapClaims + groupClaimName string + expectedGroups []string + }{ + { + name: "extracts groups from claim", + claims: jwt.MapClaims{ + "sub": "user-123", + "groups": []interface{}{"admin", "users", "developers"}, + }, + groupClaimName: "groups", + expectedGroups: []string{"admin", "users", "developers"}, + }, + { + name: "returns empty slice when claim missing", + claims: jwt.MapClaims{ + "sub": "user-123", + }, + groupClaimName: "groups", + expectedGroups: []string{}, + }, + { + name: "handles custom claim name", + claims: jwt.MapClaims{ + "sub": "user-123", + "user_roles": []interface{}{"role1", "role2"}, + }, + groupClaimName: "user_roles", + expectedGroups: []string{"role1", "role2"}, + }, + { + name: "filters non-string values", + claims: jwt.MapClaims{ + "sub": "user-123", + "groups": []interface{}{"admin", 123, "users", true}, + }, + groupClaimName: "groups", + expectedGroups: []string{"admin", "users"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + token := &jwt.Token{Claims: tt.claims} + groups := extractor.ToGroups(token, tt.groupClaimName) + assert.Equal(t, tt.expectedGroups, groups) + }) + } +} + +func TestClaimsExtractor_DefaultUserIDClaim(t *testing.T) { + // When no user ID claim is specified, it should default to "sub" + extractor := NewClaimsExtractor() + + claims := jwt.MapClaims{ + "sub": "default-user-id", + "email": "default@example.com", + } + + token := &jwt.Token{Claims: claims} + + userAuth, err := extractor.ToUserAuth(token) + require.NoError(t, err) + + assert.Equal(t, "default-user-id", userAuth.UserId) +} + +func TestClaimsExtractor_DexUserIDFormat(t *testing.T) { + // Test that the extractor correctly handles Dex's encoded user ID format + // Dex encodes user IDs as base64(protobuf{user_id, connector_id}) + extractor := NewClaimsExtractor(WithUserIDClaim("sub")) + + // This is an actual Dex-encoded user ID + dexEncodedID := "CiQ3YWFkOGMwNS0zMjg3LTQ3M2YtYjQyYS0zNjU1MDRiZjI1ZTcSBWxvY2Fs" + + claims := jwt.MapClaims{ + "sub": dexEncodedID, + "email": "dex@example.com", + "name": "Dex User", + } + + token := &jwt.Token{Claims: claims} + + userAuth, err := extractor.ToUserAuth(token) + require.NoError(t, err) + + // The extractor should pass through the encoded ID as-is + // Decoding is done elsewhere (e.g., in the Dex provider) + assert.Equal(t, dexEncodedID, userAuth.UserId) + assert.Equal(t, "dex@example.com", userAuth.Email) + assert.Equal(t, "Dex User", userAuth.Name) +} diff --git a/shared/auth/jwt/validator.go b/shared/auth/jwt/validator.go index 239447b96..ede7acea5 100644 --- a/shared/auth/jwt/validator.go +++ b/shared/auth/jwt/validator.go @@ -60,6 +60,7 @@ type Validator struct { keysLocation string idpSignkeyRefreshEnabled bool keys *Jwks + lastForcedRefresh time.Time } var ( @@ -84,26 +85,17 @@ func NewValidator(issuer string, audienceList []string, keysLocation string, idp } } +// forcedRefreshCooldown is the minimum time between forced key refreshes +// to prevent abuse from invalid tokens with fake kid values +const forcedRefreshCooldown = 30 * time.Second + func (v *Validator) getKeyFunc(ctx context.Context) jwt.Keyfunc { return func(token *jwt.Token) (interface{}, error) { // If keys are rotated, verify the keys prior to token validation if v.idpSignkeyRefreshEnabled { // If the keys are invalid, retrieve new ones - // @todo propose a separate go routine to regularly check these to prevent blocking when actually - // validating the token if !v.keys.stillValid() { - v.lock.Lock() - defer v.lock.Unlock() - - refreshedKeys, err := getPemKeys(v.keysLocation) - if err != nil { - log.WithContext(ctx).Debugf("cannot get JSONWebKey: %v, falling back to old keys", err) - refreshedKeys = v.keys - } - - log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.expiresInTime.UTC()) - - v.keys = refreshedKeys + v.refreshKeys(ctx) } } @@ -112,6 +104,18 @@ func (v *Validator) getKeyFunc(ctx context.Context) jwt.Keyfunc { return publicKey, nil } + // If key not found and refresh is enabled, try refreshing keys and retry once. + // This handles the case where keys were rotated but cache hasn't expired yet. + // Use a cooldown to prevent abuse from tokens with fake kid values. + if errors.Is(err, errKeyNotFound) && v.idpSignkeyRefreshEnabled { + if v.forceRefreshKeys(ctx) { + publicKey, err = getPublicKey(token, v.keys) + if err == nil { + return publicKey, nil + } + } + } + msg := fmt.Sprintf("getPublicKey error: %s", err) if errors.Is(err, errKeyNotFound) && !v.idpSignkeyRefreshEnabled { msg = fmt.Sprintf("getPublicKey error: %s. You can enable key refresh by setting HttpServerConfig.IdpSignKeyRefreshEnabled to true in your management.json file and restart the service", err) @@ -123,6 +127,46 @@ func (v *Validator) getKeyFunc(ctx context.Context) jwt.Keyfunc { } } +func (v *Validator) refreshKeys(ctx context.Context) { + v.lock.Lock() + defer v.lock.Unlock() + + refreshedKeys, err := getPemKeys(v.keysLocation) + if err != nil { + log.WithContext(ctx).Debugf("cannot get JSONWebKey: %v, falling back to old keys", err) + return + } + + log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.expiresInTime.UTC()) + v.keys = refreshedKeys +} + +// forceRefreshKeys refreshes keys if the cooldown period has passed. +// Returns true if keys were refreshed, false if cooldown prevented refresh. +// The cooldown check is done inside the lock to prevent race conditions. +func (v *Validator) forceRefreshKeys(ctx context.Context) bool { + v.lock.Lock() + defer v.lock.Unlock() + + // Check cooldown inside lock to prevent multiple goroutines from refreshing + if time.Since(v.lastForcedRefresh) <= forcedRefreshCooldown { + return false + } + + log.WithContext(ctx).Debugf("key not found in cache, forcing JWKS refresh") + + refreshedKeys, err := getPemKeys(v.keysLocation) + if err != nil { + log.WithContext(ctx).Debugf("cannot get JSONWebKey: %v, falling back to old keys", err) + return false + } + + log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.expiresInTime.UTC()) + v.keys = refreshedKeys + v.lastForcedRefresh = time.Now() + return true +} + // ValidateAndParse validates the token and returns the parsed token func (v *Validator) ValidateAndParse(ctx context.Context, token string) (*jwt.Token, error) { // If the token is empty... @@ -165,12 +209,12 @@ func (jwks *Jwks) stillValid() bool { func getPemKeys(keysLocation string) (*Jwks, error) { jwks := &Jwks{} - url, err := url.ParseRequestURI(keysLocation) + requestURI, err := url.ParseRequestURI(keysLocation) if err != nil { return jwks, err } - resp, err := http.Get(url.String()) + resp, err := http.Get(requestURI.String()) if err != nil { return jwks, err } diff --git a/shared/auth/user.go b/shared/auth/user.go index c1bae808e..00a3d2b64 100644 --- a/shared/auth/user.go +++ b/shared/auth/user.go @@ -18,6 +18,15 @@ type UserAuth struct { // The user id UserId string + // The user's email address + // (optional, may be empty if not in token, make sure to set getUserInfo: true in Dex to have this field) + Email string + // The user's name + // (optional, may be empty if not in token, make sure to set getUserInfo: true in Dex to have this field) + Name string + // The user's preferred name + // (optional, may be empty if not in token, make sure to set getUserInfo: true in Dex to have this field) + PreferredName string // Last login time for this user LastLogin time.Time // The Groups the user belongs to on this account diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index 9fbe70948..64f6831f2 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -129,7 +129,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil) if err != nil { t.Fatal(err) } diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index c9edcdda6..64086e7ec 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -32,6 +32,10 @@ tags: - name: Ingress Ports description: Interact with and view information about the ingress peers and ports. x-cloud-only: true + - name: Identity Providers + description: Interact with and view information about identity providers. + - name: Instance + description: Instance setup and status endpoints for initial configuration. components: schemas: Account: @@ -149,6 +153,11 @@ components: description: Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") type: string example: "0.51.2" + embedded_idp_enabled: + description: Indicates whether the embedded identity provider (Dex) is enabled for this account. This is a read-only field. + type: boolean + readOnly: true + example: false required: - peer_login_expiration_enabled - peer_login_expiration @@ -206,6 +215,10 @@ components: description: User's email address type: string example: demo@netbird.io + password: + description: User's password. Only present when user is created (create user endpoint is called) and only when IdP supports user creation with password. + type: string + example: super_secure_password name: description: User's name from idp provider type: string @@ -252,6 +265,10 @@ components: description: How user was issued by API or Integration type: string example: api + idp_id: + description: Identity provider ID (connector ID) that the user authenticated with. Only populated for users with Dex-encoded user IDs. + type: string + example: okta-abc123 permissions: $ref: '#/components/schemas/UserPermissions' required: @@ -2250,6 +2267,118 @@ components: - page_size - total_records - total_pages + IdentityProviderType: + type: string + description: Type of identity provider + enum: + - oidc + - zitadel + - entra + - google + - okta + - pocketid + - microsoft + example: oidc + IdentityProvider: + type: object + properties: + id: + description: Identity provider ID + type: string + example: ch8i4ug6lnn4g9hqv7l0 + type: + $ref: '#/components/schemas/IdentityProviderType' + name: + description: Human-readable name for the identity provider + type: string + example: My OIDC Provider + issuer: + description: OIDC issuer URL + type: string + example: https://accounts.google.com + client_id: + description: OAuth2 client ID + type: string + example: 123456789.apps.googleusercontent.com + required: + - type + - name + - issuer + - client_id + IdentityProviderRequest: + type: object + properties: + type: + $ref: '#/components/schemas/IdentityProviderType' + name: + description: Human-readable name for the identity provider + type: string + example: My OIDC Provider + issuer: + description: OIDC issuer URL + type: string + example: https://accounts.google.com + client_id: + description: OAuth2 client ID + type: string + example: 123456789.apps.googleusercontent.com + client_secret: + description: OAuth2 client secret + type: string + example: secret123 + required: + - type + - name + - issuer + - client_id + - client_secret + InstanceStatus: + type: object + description: Instance status information + properties: + setup_required: + description: Indicates whether the instance requires initial setup + type: boolean + example: true + required: + - setup_required + SetupRequest: + type: object + description: Request to set up the initial admin user + properties: + email: + description: Email address for the admin user + type: string + example: admin@example.com + password: + description: Password for the admin user (minimum 8 characters) + type: string + format: password + minLength: 8 + example: securepassword123 + name: + description: Display name for the admin user (defaults to email if not provided) + type: string + example: Admin User + required: + - email + - password + - name + SetupResponse: + type: object + description: Response after successful instance setup + properties: + user_id: + description: The ID of the created user + type: string + example: abc123def456 + email: + description: Email address of the created user + type: string + example: admin@example.com + required: + - user_id + - email responses: not_found: description: Resource not found @@ -2287,6 +2416,48 @@ security: - BearerAuth: [ ] - TokenAuth: [ ] paths: + /api/instance: + get: + summary: Get Instance Status + description: Returns the instance status including whether initial setup is required. This endpoint does not require authentication. + tags: [ Instance ] + security: [ ] + responses: + '200': + description: Instance status information + content: + application/json: + schema: + $ref: '#/components/schemas/InstanceStatus' + '500': + "$ref": "#/components/responses/internal_error" + /api/setup: + post: + summary: Setup Instance + description: Creates the initial admin user for the instance. This endpoint does not require authentication but only works when setup is required (no accounts exist and embedded IDP is enabled). + tags: [ Instance ] + security: [ ] + requestBody: + description: Initial admin user details + required: true + content: + 'application/json': + schema: + $ref: '#/components/schemas/SetupRequest' + responses: + '200': + description: Setup completed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/SetupResponse' + '400': + "$ref": "#/components/responses/bad_request" + '412': + description: Setup already completed + content: { } + '500': + "$ref": "#/components/responses/internal_error" /api/accounts: get: summary: List all Accounts @@ -4877,3 +5048,147 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/identity-providers: + get: + summary: List all Identity Providers + description: Returns a list of all identity providers configured for the account + tags: [ Identity Providers ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: A JSON array of identity providers + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdentityProvider' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + post: + summary: Create an Identity Provider + description: Creates a new identity provider configuration + tags: [ Identity Providers ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + requestBody: + description: Identity provider configuration + content: + 'application/json': + schema: + $ref: '#/components/schemas/IdentityProviderRequest' + responses: + '200': + description: An Identity Provider object + content: + application/json: + schema: + $ref: '#/components/schemas/IdentityProvider' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + /api/identity-providers/{idpId}: + get: + summary: Retrieve an Identity Provider + description: Get information about a specific identity provider + tags: [ Identity Providers ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: idpId + required: true + schema: + type: string + description: The unique identifier of an identity provider + responses: + '200': + description: An Identity Provider object + content: + application/json: + schema: + $ref: '#/components/schemas/IdentityProvider' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + put: + summary: Update an Identity Provider + description: Update an existing identity provider configuration + tags: [ Identity Providers ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: idpId + required: true + schema: + type: string + description: The unique identifier of an identity provider + requestBody: + description: Identity provider update + content: + 'application/json': + schema: + $ref: '#/components/schemas/IdentityProviderRequest' + responses: + '200': + description: An Identity Provider object + content: + application/json: + schema: + $ref: '#/components/schemas/IdentityProvider' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + delete: + summary: Delete an Identity Provider + description: Delete an identity provider configuration + tags: [ Identity Providers ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: idpId + required: true + schema: + type: string + description: The unique identifier of an identity provider + responses: + '200': + description: Delete status code + content: { } + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index f242f5a18..ab5a65cb0 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -83,6 +83,17 @@ const ( GroupMinimumIssuedJwt GroupMinimumIssued = "jwt" ) +// Defines values for IdentityProviderType. +const ( + IdentityProviderTypeEntra IdentityProviderType = "entra" + IdentityProviderTypeGoogle IdentityProviderType = "google" + IdentityProviderTypeMicrosoft IdentityProviderType = "microsoft" + IdentityProviderTypeOidc IdentityProviderType = "oidc" + IdentityProviderTypeOkta IdentityProviderType = "okta" + IdentityProviderTypePocketid IdentityProviderType = "pocketid" + IdentityProviderTypeZitadel IdentityProviderType = "zitadel" +) + // Defines values for IngressPortAllocationPortMappingProtocol. const ( IngressPortAllocationPortMappingProtocolTcp IngressPortAllocationPortMappingProtocol = "tcp" @@ -298,8 +309,11 @@ type AccountSettings struct { AutoUpdateVersion *string `json:"auto_update_version,omitempty"` // DnsDomain Allows to define a custom dns domain for the account - DnsDomain *string `json:"dns_domain,omitempty"` - Extra *AccountExtraSettings `json:"extra,omitempty"` + DnsDomain *string `json:"dns_domain,omitempty"` + + // EmbeddedIdpEnabled Indicates whether the embedded identity provider (Dex) is enabled for this account. This is a read-only field. + EmbeddedIdpEnabled *bool `json:"embedded_idp_enabled,omitempty"` + Extra *AccountExtraSettings `json:"extra,omitempty"` // GroupsPropagationEnabled Allows propagate the new user auto groups to peers that belongs to the user GroupsPropagationEnabled *bool `json:"groups_propagation_enabled,omitempty"` @@ -520,6 +534,45 @@ type GroupRequest struct { Resources *[]Resource `json:"resources,omitempty"` } +// IdentityProvider defines model for IdentityProvider. +type IdentityProvider struct { + // ClientId OAuth2 client ID + ClientId string `json:"client_id"` + + // Id Identity provider ID + Id *string `json:"id,omitempty"` + + // Issuer OIDC issuer URL + Issuer string `json:"issuer"` + + // Name Human-readable name for the identity provider + Name string `json:"name"` + + // Type Type of identity provider + Type IdentityProviderType `json:"type"` +} + +// IdentityProviderRequest defines model for IdentityProviderRequest. +type IdentityProviderRequest struct { + // ClientId OAuth2 client ID + ClientId string `json:"client_id"` + + // ClientSecret OAuth2 client secret + ClientSecret string `json:"client_secret"` + + // Issuer OIDC issuer URL + Issuer string `json:"issuer"` + + // Name Human-readable name for the identity provider + Name string `json:"name"` + + // Type Type of identity provider + Type IdentityProviderType `json:"type"` +} + +// IdentityProviderType Type of identity provider +type IdentityProviderType string + // IngressPeer defines model for IngressPeer. type IngressPeer struct { AvailablePorts AvailablePorts `json:"available_ports"` @@ -653,6 +706,12 @@ type IngressPortAllocationRequestPortRange struct { // IngressPortAllocationRequestPortRangeProtocol The protocol accepted by the port range type IngressPortAllocationRequestPortRangeProtocol string +// InstanceStatus Instance status information +type InstanceStatus struct { + // SetupRequired Indicates whether the instance requires initial setup + SetupRequired bool `json:"setup_required"` +} + // Location Describe geographical location information type Location struct { // CityName Commonly used English name of the city @@ -1833,6 +1892,27 @@ type SetupKeyRequest struct { Revoked bool `json:"revoked"` } +// SetupRequest Request to set up the initial admin user +type SetupRequest struct { + // Email Email address for the admin user + Email string `json:"email"` + + // Name Display name for the admin user (defaults to email if not provided) + Name string `json:"name"` + + // Password Password for the admin user (minimum 8 characters) + Password string `json:"password"` +} + +// SetupResponse Response after successful instance setup +type SetupResponse struct { + // Email Email address of the created user + Email string `json:"email"` + + // UserId The ID of the created user + UserId string `json:"user_id"` +} + // User defines model for User. type User struct { // AutoGroups Group IDs to auto-assign to peers registered by this user @@ -1844,6 +1924,9 @@ type User struct { // Id User ID Id string `json:"id"` + // IdpId Identity provider ID (connector ID) that the user authenticated with. Only populated for users with Dex-encoded user IDs. + IdpId *string `json:"idp_id,omitempty"` + // IsBlocked Is true if this user is blocked. Blocked users can't use the system IsBlocked bool `json:"is_blocked"` @@ -1862,6 +1945,9 @@ type User struct { // Name User's name from idp provider Name string `json:"name"` + // Password User's password. Only present when user is created (create user endpoint is called) and only when IdP supports user creation with password. + Password *string `json:"password,omitempty"` + // PendingApproval Is true if this user requires approval before being activated. Only applicable for users joining via domain matching when user_approval_required is enabled. PendingApproval bool `json:"pending_approval"` Permissions *UserPermissions `json:"permissions,omitempty"` @@ -2003,6 +2089,12 @@ type PostApiGroupsJSONRequestBody = GroupRequest // PutApiGroupsGroupIdJSONRequestBody defines body for PutApiGroupsGroupId for application/json ContentType. type PutApiGroupsGroupIdJSONRequestBody = GroupRequest +// PostApiIdentityProvidersJSONRequestBody defines body for PostApiIdentityProviders for application/json ContentType. +type PostApiIdentityProvidersJSONRequestBody = IdentityProviderRequest + +// PutApiIdentityProvidersIdpIdJSONRequestBody defines body for PutApiIdentityProvidersIdpId for application/json ContentType. +type PutApiIdentityProvidersIdpIdJSONRequestBody = IdentityProviderRequest + // PostApiIngressPeersJSONRequestBody defines body for PostApiIngressPeers for application/json ContentType. type PostApiIngressPeersJSONRequestBody = IngressPeerCreateRequest @@ -2057,6 +2149,9 @@ type PostApiRoutesJSONRequestBody = RouteRequest // PutApiRoutesRouteIdJSONRequestBody defines body for PutApiRoutesRouteId for application/json ContentType. type PutApiRoutesRouteIdJSONRequestBody = RouteRequest +// PostApiSetupJSONRequestBody defines body for PostApiSetup for application/json ContentType. +type PostApiSetupJSONRequestBody = SetupRequest + // PostApiSetupKeysJSONRequestBody defines body for PostApiSetupKeys for application/json ContentType. type PostApiSetupKeysJSONRequestBody = CreateSetupKeyRequest diff --git a/util/crypt/crypt.go b/util/crypt/crypt.go new file mode 100644 index 000000000..0e5589895 --- /dev/null +++ b/util/crypt/crypt.go @@ -0,0 +1,96 @@ +package crypt + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "fmt" + "io" +) + +// FieldEncrypt provides AES-GCM encryption for sensitive fields. +type FieldEncrypt struct { + block cipher.Block +} + +// NewFieldEncrypt creates a new FieldEncrypt with the given base64-encoded key. +// The key must be 32 bytes when decoded (for AES-256). +func NewFieldEncrypt(base64Key string) (*FieldEncrypt, error) { + key, err := base64.StdEncoding.DecodeString(base64Key) + if err != nil { + return nil, fmt.Errorf("decode encryption key: %w", err) + } + + if len(key) != 32 { + return nil, fmt.Errorf("encryption key must be 32 bytes, got %d", len(key)) + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf("create cipher: %w", err) + } + + return &FieldEncrypt{block: block}, nil +} + +// Encrypt encrypts the given plaintext and returns base64-encoded ciphertext. +// Returns empty string for empty input. +func (f *FieldEncrypt) Encrypt(plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + gcm, err := cipher.NewGCM(f.block) + if err != nil { + return "", fmt.Errorf("create GCM: %w", err) + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", fmt.Errorf("generate nonce: %w", err) + } + + ciphertext := gcm.Seal(nonce, nonce, []byte(plaintext), nil) + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +// Decrypt decrypts the given base64-encoded ciphertext and returns the plaintext. +// Returns empty string for empty input. +func (f *FieldEncrypt) Decrypt(ciphertext string) (string, error) { + if ciphertext == "" { + return "", nil + } + + data, err := base64.StdEncoding.DecodeString(ciphertext) + if err != nil { + return "", fmt.Errorf("decode ciphertext: %w", err) + } + + gcm, err := cipher.NewGCM(f.block) + if err != nil { + return "", fmt.Errorf("create GCM: %w", err) + } + + nonceSize := gcm.NonceSize() + if len(data) < nonceSize { + return "", fmt.Errorf("ciphertext too short") + } + + nonce, ciphertextBytes := data[:nonceSize], data[nonceSize:] + plaintext, err := gcm.Open(nil, nonce, ciphertextBytes, nil) + if err != nil { + return "", fmt.Errorf("decrypt: %w", err) + } + + return string(plaintext), nil +} + +// GenerateKey generates a new random 32-byte encryption key and returns it as base64. +func GenerateKey() (string, error) { + key := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + return "", fmt.Errorf("generate key: %w", err) + } + return base64.StdEncoding.EncodeToString(key), nil +} From 6ff9aa036686f7601cd4b3af66dfd7416bc3f298 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 7 Jan 2026 15:34:26 +0100 Subject: [PATCH 023/374] Refactor SSH server to manage listener lifecycle and expose active address via `Addr` method. (#5036) --- client/ssh/server/server.go | 16 +++++++++++++++- client/ssh/server/test.go | 25 +++++++++++-------------- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index f957e66a5..3a8568979 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -136,6 +136,7 @@ type sessionState struct { type Server struct { sshServer *ssh.Server + listener net.Listener mu sync.RWMutex hostKeyPEM []byte @@ -151,7 +152,6 @@ type Server struct { // Populated at authentication time, stores JWT username and port forwards for status display. connections map[connKey]*connState - allowLocalPortForwarding bool allowRemotePortForwarding bool allowRootLogin bool @@ -240,6 +240,7 @@ func (s *Server) Start(ctx context.Context, addr netip.AddrPort) error { return fmt.Errorf("create SSH server: %w", err) } + s.listener = ln s.sshServer = sshServer log.Infof("SSH server started on %s", addrDesc) @@ -292,6 +293,7 @@ func (s *Server) Stop() error { } s.sshServer = nil + s.listener = nil maps.Clear(s.sessions) maps.Clear(s.pendingAuthJWT) @@ -307,6 +309,18 @@ func (s *Server) Stop() error { return nil } +// Addr returns the address the SSH server is listening on, or nil if the server is not running +func (s *Server) Addr() net.Addr { + s.mu.RLock() + defer s.mu.RUnlock() + + if s.listener == nil { + return nil + } + + return s.listener.Addr() +} + // GetStatus returns the current status of the SSH server and active sessions. func (s *Server) GetStatus() (enabled bool, sessions []SessionInfo) { s.mu.RLock() diff --git a/client/ssh/server/test.go b/client/ssh/server/test.go index 20930c721..f8abd1752 100644 --- a/client/ssh/server/test.go +++ b/client/ssh/server/test.go @@ -3,7 +3,6 @@ package server import ( "context" "fmt" - "net" "net/netip" "testing" "time" @@ -14,23 +13,21 @@ func StartTestServer(t *testing.T, server *Server) string { errChan := make(chan error, 1) go func() { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - errChan <- err - return - } - actualAddr := ln.Addr().String() - if err := ln.Close(); err != nil { - errChan <- fmt.Errorf("close temp listener: %w", err) - return - } - - addrPort := netip.MustParseAddrPort(actualAddr) + // Use port 0 to let the OS assign a free port + addrPort := netip.MustParseAddrPort("127.0.0.1:0") if err := server.Start(context.Background(), addrPort); err != nil { errChan <- err return } - started <- actualAddr + + // Get the actual listening address from the server + actualAddr := server.Addr() + if actualAddr == nil { + errChan <- fmt.Errorf("server started but no listener address available") + return + } + + started <- actualAddr.String() }() select { From 12a7fa24d7a0ab6c9605254bff5b0e39498f1ca2 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 7 Jan 2026 15:34:52 +0100 Subject: [PATCH 024/374] Add support for disabling eBPF WireGuard proxy via environment variable (#5047) --- client/iface/wgproxy/factory_kernel.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/client/iface/wgproxy/factory_kernel.go b/client/iface/wgproxy/factory_kernel.go index ad2807546..2714c5774 100644 --- a/client/iface/wgproxy/factory_kernel.go +++ b/client/iface/wgproxy/factory_kernel.go @@ -3,12 +3,19 @@ package wgproxy import ( + "os" + "strconv" + log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/iface/wgproxy/ebpf" udpProxy "github.com/netbirdio/netbird/client/iface/wgproxy/udp" ) +const ( + envDisableEBPFWGProxy = "NB_DISABLE_EBPF_WG_PROXY" +) + type KernelFactory struct { wgPort int mtu uint16 @@ -22,6 +29,12 @@ func NewKernelFactory(wgPort int, mtu uint16) *KernelFactory { mtu: mtu, } + if isEBPFDisabled() { + log.Infof("WireGuard Proxy Factory will produce UDP proxy") + log.Infof("eBPF WireGuard proxy is disabled via %s environment variable", envDisableEBPFWGProxy) + return f + } + ebpfProxy := ebpf.NewWGEBPFProxy(wgPort, mtu) if err := ebpfProxy.Listen(); err != nil { log.Infof("WireGuard Proxy Factory will produce UDP proxy") @@ -47,3 +60,16 @@ func (w *KernelFactory) Free() error { } return w.ebpfProxy.Free() } + +func isEBPFDisabled() bool { + val := os.Getenv(envDisableEBPFWGProxy) + if val == "" { + return false + } + disabled, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", envDisableEBPFWGProxy, err) + return false + } + return disabled +} From afcdef6121f03ff37dd04dc45c6938bfc059acd0 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Wed, 7 Jan 2026 15:53:18 +0100 Subject: [PATCH 025/374] [management] add ssh authorized users to network map cache (#5048) --- .../server/types/networkmap_golden_test.go | 569 ++++++------------ management/server/types/networkmapbuilder.go | 106 +++- 2 files changed, 289 insertions(+), 386 deletions(-) diff --git a/management/server/types/networkmap_golden_test.go b/management/server/types/networkmap_golden_test.go index 9135024d2..19ccbd688 100644 --- a/management/server/types/networkmap_golden_test.go +++ b/management/server/types/networkmap_golden_test.go @@ -25,15 +25,12 @@ import ( "github.com/netbirdio/netbird/route" ) -// update flag is used to update the golden file. -// example: go test ./... -v -update -// var update = flag.Bool("update", false, "update golden files") - const ( numPeers = 100 devGroupID = "group-dev" opsGroupID = "group-ops" allGroupID = "group-all" + sshUsersGroupID = "group-ssh-users" routeID = route.ID("route-main") routeHA1ID = route.ID("route-ha-1") routeHA2ID = route.ID("route-ha-2") @@ -41,6 +38,7 @@ const ( policyIDAll = "policy-all" policyIDPosture = "policy-posture" policyIDDrop = "policy-drop" + policyIDSSH = "policy-ssh" postureCheckID = "posture-check-ver" networkResourceID = "res-database" networkID = "net-database" @@ -51,6 +49,9 @@ const ( offlinePeerID = "peer-99" // This peer will be completely offline. routingPeerID = "peer-95" // This peer is used for routing, it has a route to the network. testAccountID = "account-golden-test" + userAdminID = "user-admin" + userDevID = "user-dev" + userOpsID = "user-ops" ) func TestGetPeerNetworkMap_Golden(t *testing.T) { @@ -69,61 +70,34 @@ func TestGetPeerNetworkMap_Golden(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) - - normalizeAndSortNetworkMap(networkMap) - - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") - - goldenFilePath := filepath.Join("testdata", "networkmap_golden.json") - - t.Log("Update golden file...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) - - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "resulted network map from OLD method does not match golden file") -} - -func TestGetPeerNetworkMap_Golden_New(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + normalizeAndSortNetworkMap(legacyNetworkMap) + legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") + require.NoError(t, err, "error marshaling legacy network map to JSON") builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + normalizeAndSortNetworkMap(newNetworkMap) + newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") + require.NoError(t, err, "error marshaling new network map to JSON") - normalizeAndSortNetworkMap(networkMap) + if string(legacyJSON) != string(newJSON) { + legacyFilePath := filepath.Join("testdata", "networkmap_golden.json") + newFilePath := filepath.Join("testdata", "networkmap_golden_new.json") - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") + err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) + require.NoError(t, err) - goldenFilePath := filepath.Join("testdata", "networkmap_golden_new.json") + err = os.WriteFile(legacyFilePath, legacyJSON, 0644) + require.NoError(t, err) + t.Logf("Saved legacy network map to %s", legacyFilePath) - t.Log("Update golden file...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) + err = os.WriteFile(newFilePath, newJSON, 0644) + require.NoError(t, err) + t.Logf("Saved new network map to %s", newFilePath) - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "resulted network map from NEW builder does not match golden file") + require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps from legacy and new builder do not match") + } } func BenchmarkGetPeerNetworkMap(b *testing.B) { @@ -169,6 +143,8 @@ func TestGetPeerNetworkMap_Golden_WithNewPeer(t *testing.T) { validatedPeersMap[peerID] = struct{}{} } + builder := types.NewNetworkMapBuilder(account, validatedPeersMap) + newPeerID := "peer-new-101" newPeerIP := net.IP{100, 64, 1, 1} newPeer := &nbpeer.Peer{ @@ -201,92 +177,36 @@ func TestGetPeerNetworkMap_Golden_WithNewPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + normalizeAndSortNetworkMap(legacyNetworkMap) + legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") + require.NoError(t, err, "error marshaling legacy network map to JSON") - normalizeAndSortNetworkMap(networkMap) - - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") - - goldenFilePath := filepath.Join("testdata", "networkmap_golden_with_new_peer.json") - - t.Log("Update golden file with new peer...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) - - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from OLD method with new peer does not match golden file") -} - -func TestGetPeerNetworkMap_Golden_New_WithOnPeerAdded(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - newPeerID := "peer-new-101" - newPeerIP := net.IP{100, 64, 1, 1} - newPeer := &nbpeer.Peer{ - ID: newPeerID, - IP: newPeerIP, - Key: fmt.Sprintf("key-%s", newPeerID), - DNSLabel: "peernew101", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", - Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, - LastLogin: func() *time.Time { t := time.Now(); return &t }(), - } - - account.Peers[newPeerID] = newPeer - - if devGroup, exists := account.Groups[devGroupID]; exists { - devGroup.Peers = append(devGroup.Peers, newPeerID) - } - - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = append(allGroup.Peers, newPeerID) - } - - validatedPeersMap[newPeerID] = struct{}{} - - if account.Network != nil { - account.Network.Serial++ - } - - err := builder.OnPeerAddedIncremental(account, newPeerID) + err = builder.OnPeerAddedIncremental(account, newPeerID) require.NoError(t, err, "error adding peer to cache") - networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + normalizeAndSortNetworkMap(newNetworkMap) + newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") + require.NoError(t, err, "error marshaling new network map to JSON") - normalizeAndSortNetworkMap(networkMap) + if string(legacyJSON) != string(newJSON) { + legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_new_peer.json") + newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded.json") - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") + err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) + require.NoError(t, err) - goldenFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded.json") - t.Log("Update golden file with OnPeerAdded...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) + err = os.WriteFile(legacyFilePath, legacyJSON, 0644) + require.NoError(t, err) + t.Logf("Saved legacy network map to %s", legacyFilePath) - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") + err = os.WriteFile(newFilePath, newJSON, 0644) + require.NoError(t, err) + t.Logf("Saved new network map to %s", newFilePath) - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from NEW builder with OnPeerAdded does not match golden file") + require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with new peer from legacy and new builder do not match") + } } func BenchmarkGetPeerNetworkMap_AfterPeerAdded(b *testing.B) { @@ -349,6 +269,8 @@ func TestGetPeerNetworkMap_Golden_WithNewRoutingPeer(t *testing.T) { validatedPeersMap[peerID] = struct{}{} } + builder := types.NewNetworkMapBuilder(account, validatedPeersMap) + newRouterID := "peer-new-router-102" newRouterIP := net.IP{100, 64, 1, 2} newRouter := &nbpeer.Peer{ @@ -395,106 +317,36 @@ func TestGetPeerNetworkMap_Golden_WithNewRoutingPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + normalizeAndSortNetworkMap(legacyNetworkMap) + legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") + require.NoError(t, err, "error marshaling legacy network map to JSON") - normalizeAndSortNetworkMap(networkMap) - - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") - - goldenFilePath := filepath.Join("testdata", "networkmap_golden_with_new_router.json") - - t.Log("Update golden file with new router...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) - - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from OLD method with new router does not match golden file") -} - -func TestGetPeerNetworkMap_Golden_New_WithOnPeerAddedRouter(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - newRouterID := "peer-new-router-102" - newRouterIP := net.IP{100, 64, 1, 2} - newRouter := &nbpeer.Peer{ - ID: newRouterID, - IP: newRouterIP, - Key: fmt.Sprintf("key-%s", newRouterID), - DNSLabel: "newrouter102", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", - Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, - LastLogin: func() *time.Time { t := time.Now(); return &t }(), - } - - account.Peers[newRouterID] = newRouter - - if opsGroup, exists := account.Groups[opsGroupID]; exists { - opsGroup.Peers = append(opsGroup.Peers, newRouterID) - } - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = append(allGroup.Peers, newRouterID) - } - - newRoute := &route.Route{ - ID: route.ID("route-new-router"), - Network: netip.MustParsePrefix("172.16.0.0/24"), - Peer: newRouter.Key, - PeerID: newRouterID, - Description: "Route from new router", - Enabled: true, - PeerGroups: []string{opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{devGroupID}, - AccountID: account.Id, - } - account.Routes[newRoute.ID] = newRoute - - validatedPeersMap[newRouterID] = struct{}{} - - if account.Network != nil { - account.Network.Serial++ - } - - err := builder.OnPeerAddedIncremental(account, newRouterID) + err = builder.OnPeerAddedIncremental(account, newRouterID) require.NoError(t, err, "error adding router to cache") - networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + normalizeAndSortNetworkMap(newNetworkMap) + newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") + require.NoError(t, err, "error marshaling new network map to JSON") - normalizeAndSortNetworkMap(networkMap) + if string(legacyJSON) != string(newJSON) { + legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_new_router.json") + newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded_router.json") - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") + err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) + require.NoError(t, err) - goldenFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded_router.json") + err = os.WriteFile(legacyFilePath, legacyJSON, 0644) + require.NoError(t, err) + t.Logf("Saved legacy network map to %s", legacyFilePath) - t.Log("Update golden file with OnPeerAdded router...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) + err = os.WriteFile(newFilePath, newJSON, 0644) + require.NoError(t, err) + t.Logf("Saved new network map to %s", newFilePath) - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from NEW builder with OnPeerAdded router does not match golden file") + require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with new router from legacy and new builder do not match") + } } func BenchmarkGetPeerNetworkMap_AfterRouterPeerAdded(b *testing.B) { @@ -579,7 +431,9 @@ func TestGetPeerNetworkMap_Golden_WithDeletedPeer(t *testing.T) { validatedPeersMap[peerID] = struct{}{} } - deletedPeerID := "peer-25" // peer from devs group + builder := types.NewNetworkMapBuilder(account, validatedPeersMap) + + deletedPeerID := "peer-25" delete(account.Peers, deletedPeerID) @@ -604,85 +458,36 @@ func TestGetPeerNetworkMap_Golden_WithDeletedPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + normalizeAndSortNetworkMap(legacyNetworkMap) + legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") + require.NoError(t, err, "error marshaling legacy network map to JSON") - normalizeAndSortNetworkMap(networkMap) - - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") - - goldenFilePath := filepath.Join("testdata", "networkmap_golden_with_deleted_peer.json") - - t.Log("Update golden file with deleted peer...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) - - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from OLD method with deleted peer does not match golden file") -} - -func TestGetPeerNetworkMap_Golden_New_WithOnPeerDeleted(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - deletedPeerID := "peer-25" // devs group peer - - delete(account.Peers, deletedPeerID) - - if devGroup, exists := account.Groups[devGroupID]; exists { - devGroup.Peers = slices.DeleteFunc(devGroup.Peers, func(id string) bool { - return id == deletedPeerID - }) - } - - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = slices.DeleteFunc(allGroup.Peers, func(id string) bool { - return id == deletedPeerID - }) - } - - delete(validatedPeersMap, deletedPeerID) - - if account.Network != nil { - account.Network.Serial++ - } - - err := builder.OnPeerDeleted(account, deletedPeerID) + err = builder.OnPeerDeleted(account, deletedPeerID) require.NoError(t, err, "error deleting peer from cache") - networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + normalizeAndSortNetworkMap(newNetworkMap) + newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") + require.NoError(t, err, "error marshaling new network map to JSON") - normalizeAndSortNetworkMap(networkMap) + if string(legacyJSON) != string(newJSON) { + legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_deleted_peer.json") + newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeerdeleted.json") - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") + err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) + require.NoError(t, err) - goldenFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeerdeleted.json") - t.Log("Update golden file with OnPeerDeleted...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) + err = os.WriteFile(legacyFilePath, legacyJSON, 0644) + require.NoError(t, err) + t.Logf("Saved legacy network map to %s", legacyFilePath) - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") + err = os.WriteFile(newFilePath, newJSON, 0644) + require.NoError(t, err) + t.Logf("Saved new network map to %s", newFilePath) - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from NEW builder with OnPeerDeleted does not match golden file") + require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with deleted peer from legacy and new builder do not match") + } } func TestGetPeerNetworkMap_Golden_WithDeletedRouterPeer(t *testing.T) { @@ -698,7 +503,9 @@ func TestGetPeerNetworkMap_Golden_WithDeletedRouterPeer(t *testing.T) { validatedPeersMap[peerID] = struct{}{} } - deletedRouterID := "peer-75" // router peer + builder := types.NewNetworkMapBuilder(account, validatedPeersMap) + + deletedRouterID := "peer-75" var affectedRoute *route.Route for _, r := range account.Routes { @@ -730,93 +537,36 @@ func TestGetPeerNetworkMap_Golden_WithDeletedRouterPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + normalizeAndSortNetworkMap(legacyNetworkMap) + legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") + require.NoError(t, err, "error marshaling legacy network map to JSON") - normalizeAndSortNetworkMap(networkMap) - - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") - - goldenFilePath := filepath.Join("testdata", "networkmap_golden_with_deleted_router_peer.json") - - t.Log("Update golden file with deleted peer...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) - - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from OLD method with deleted peer does not match golden file") -} - -func TestGetPeerNetworkMap_Golden_New_WithDeletedRouterPeer(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - deletedRouterID := "peer-75" // router peer - - var affectedRoute *route.Route - for _, r := range account.Routes { - if r.PeerID == deletedRouterID { - affectedRoute = r - break - } - } - require.NotNil(t, affectedRoute, "Router peer should have a route") - - for _, group := range account.Groups { - group.Peers = slices.DeleteFunc(group.Peers, func(id string) bool { - return id == deletedRouterID - }) - } - for routeID, r := range account.Routes { - if r.Peer == account.Peers[deletedRouterID].Key || r.PeerID == deletedRouterID { - delete(account.Routes, routeID) - } - } - delete(account.Peers, deletedRouterID) - delete(validatedPeersMap, deletedRouterID) - - if account.Network != nil { - account.Network.Serial++ - } - - err := builder.OnPeerDeleted(account, deletedRouterID) + err = builder.OnPeerDeleted(account, deletedRouterID) require.NoError(t, err, "error deleting routing peer from cache") - networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + normalizeAndSortNetworkMap(newNetworkMap) + newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") + require.NoError(t, err, "error marshaling new network map to JSON") - normalizeAndSortNetworkMap(networkMap) + if string(legacyJSON) != string(newJSON) { + legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_deleted_router_peer.json") + newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_deleted_router.json") - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err) + err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) + require.NoError(t, err) - goldenFilePath := filepath.Join("testdata", "networkmap_golden_new_with_deleted_router.json") + err = os.WriteFile(legacyFilePath, legacyJSON, 0644) + require.NoError(t, err) + t.Logf("Saved legacy network map to %s", legacyFilePath) - t.Log("Update golden file with deleted router...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) + err = os.WriteFile(newFilePath, newJSON, 0644) + require.NoError(t, err) + t.Logf("Saved new network map to %s", newFilePath) - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err) - - require.JSONEq(t, string(expectedJSON), string(jsonData), - "network map after deleting router does not match golden file") + require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with deleted router from legacy and new builder do not match") + } } func BenchmarkGetPeerNetworkMap_AfterPeerDeleted(b *testing.B) { @@ -924,6 +674,54 @@ func normalizeAndSortNetworkMap(networkMap *types.NetworkMap) { } } +type networkMapJSON struct { + Peers []*nbpeer.Peer `json:"Peers"` + Network *types.Network `json:"Network"` + Routes []*route.Route `json:"Routes"` + DNSConfig dns.Config `json:"DNSConfig"` + OfflinePeers []*nbpeer.Peer `json:"OfflinePeers"` + FirewallRules []*types.FirewallRule `json:"FirewallRules"` + RoutesFirewallRules []*types.RouteFirewallRule `json:"RoutesFirewallRules"` + ForwardingRules []*types.ForwardingRule `json:"ForwardingRules"` + AuthorizedUsers map[string][]string `json:"AuthorizedUsers,omitempty"` + EnableSSH bool `json:"EnableSSH"` +} + +func toNetworkMapJSON(nm *types.NetworkMap) *networkMapJSON { + result := &networkMapJSON{ + Peers: nm.Peers, + Network: nm.Network, + Routes: nm.Routes, + DNSConfig: nm.DNSConfig, + OfflinePeers: nm.OfflinePeers, + FirewallRules: nm.FirewallRules, + RoutesFirewallRules: nm.RoutesFirewallRules, + ForwardingRules: nm.ForwardingRules, + EnableSSH: nm.EnableSSH, + } + + if len(nm.AuthorizedUsers) > 0 { + result.AuthorizedUsers = make(map[string][]string) + localUsers := make([]string, 0, len(nm.AuthorizedUsers)) + for localUser := range nm.AuthorizedUsers { + localUsers = append(localUsers, localUser) + } + sort.Strings(localUsers) + + for _, localUser := range localUsers { + userIDs := nm.AuthorizedUsers[localUser] + sortedUserIDs := make([]string, 0, len(userIDs)) + for userID := range userIDs { + sortedUserIDs = append(sortedUserIDs, userID) + } + sort.Strings(sortedUserIDs) + result.AuthorizedUsers[localUser] = sortedUserIDs + } + } + + return result +} + func createTestAccountWithEntities() *types.Account { peers := make(map[string]*nbpeer.Peer) devGroupPeers, opsGroupPeers, allGroupPeers := []string{}, []string{}, []string{} @@ -959,9 +757,10 @@ func createTestAccountWithEntities() *types.Account { } groups := map[string]*types.Group{ - allGroupID: {ID: allGroupID, Name: "All", Peers: allGroupPeers}, - devGroupID: {ID: devGroupID, Name: "Developers", Peers: devGroupPeers}, - opsGroupID: {ID: opsGroupID, Name: "Operations", Peers: opsGroupPeers}, + allGroupID: {ID: allGroupID, Name: "All", Peers: allGroupPeers}, + devGroupID: {ID: devGroupID, Name: "Developers", Peers: devGroupPeers}, + opsGroupID: {ID: opsGroupID, Name: "Operations", Peers: opsGroupPeers}, + sshUsersGroupID: {ID: sshUsersGroupID, Name: "SSH Users", Peers: []string{}}, } policies := []*types.Policy{ @@ -999,6 +798,15 @@ func createTestAccountWithEntities() *types.Account { Sources: []string{opsGroupID}, DestinationResource: types.Resource{ID: networkResourceID}, }}, }, + { + ID: policyIDSSH, Name: "SSH Access Policy", Enabled: true, + Rules: []*types.PolicyRule{{ + ID: policyIDSSH, Name: "Allow SSH to Ops", Enabled: true, Action: types.PolicyTrafficActionAccept, + Protocol: types.PolicyRuleProtocolNetbirdSSH, Bidirectional: false, + Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, + AuthorizedGroups: map[string][]string{sshUsersGroupID: {"root", "admin"}}, + }}, + }, } routes := map[route.ID]*route.Route{ @@ -1031,8 +839,15 @@ func createTestAccountWithEntities() *types.Account { }, } + users := map[string]*types.User{ + userAdminID: {Id: userAdminID, Role: types.UserRoleAdmin, IsServiceUser: false, AccountID: testAccountID, AutoGroups: []string{allGroupID}}, + userDevID: {Id: userDevID, Role: types.UserRoleUser, IsServiceUser: false, AccountID: testAccountID, AutoGroups: []string{sshUsersGroupID, devGroupID}}, + userOpsID: {Id: userOpsID, Role: types.UserRoleUser, IsServiceUser: false, AccountID: testAccountID, AutoGroups: []string{sshUsersGroupID, opsGroupID}}, + } + account := &types.Account{ Id: testAccountID, Peers: peers, Groups: groups, Policies: policies, Routes: routes, + Users: users, Network: &types.Network{ Identifier: "net-golden-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(16, 32)}, Serial: 1, }, diff --git a/management/server/types/networkmapbuilder.go b/management/server/types/networkmapbuilder.go index a508cf725..0acd3a026 100644 --- a/management/server/types/networkmapbuilder.go +++ b/management/server/types/networkmapbuilder.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" + "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" @@ -47,6 +48,10 @@ type NetworkMapCache struct { peerACLs map[string]*PeerACLView peerRoutes map[string]*PeerRoutesView peerDNS map[string]*nbdns.Config + peerSSH map[string]*PeerSSHView + + groupIDToUserIDs map[string][]string + allowedUserIDs map[string]struct{} resourceRouters map[string]map[string]*routerTypes.NetworkRouter resourcePolicies map[string][]*Policy @@ -76,6 +81,11 @@ type PeerRoutesView struct { RouteFirewallRuleIDs []string } +type PeerSSHView struct { + EnableSSH bool + AuthorizedUsers map[string]map[string]struct{} +} + type NetworkMapBuilder struct { account *Account cache *NetworkMapCache @@ -108,6 +118,9 @@ func NewNetworkMapBuilder(account *Account, validatedPeers map[string]struct{}) peerACLs: make(map[string]*PeerACLView), peerRoutes: make(map[string]*PeerRoutesView), peerDNS: make(map[string]*nbdns.Config), + peerSSH: make(map[string]*PeerSSHView), + groupIDToUserIDs: make(map[string][]string), + allowedUserIDs: make(map[string]struct{}), globalResources: make(map[string]*resourceTypes.NetworkResource), acgToRoutes: make(map[string]map[route.ID]*RouteOwnerInfo), noACGRoutes: make(map[route.ID]*RouteOwnerInfo), @@ -165,9 +178,15 @@ func (b *NetworkMapBuilder) buildGlobalIndexes(account *Account) { clear(b.cache.peerToRoutes) clear(b.cache.acgToRoutes) clear(b.cache.noACGRoutes) + clear(b.cache.groupIDToUserIDs) + clear(b.cache.allowedUserIDs) + clear(b.cache.peerSSH) maps.Copy(b.cache.globalPeers, account.Peers) + b.cache.groupIDToUserIDs = account.GetActiveGroupUsers() + b.cache.allowedUserIDs = b.buildAllowedUserIDs(account) + for groupID, group := range account.Groups { peersCopy := make([]string, len(group.Peers)) copy(peersCopy, group.Peers) @@ -242,7 +261,7 @@ func (b *NetworkMapBuilder) buildPeerACLView(account *Account, peerID string) { return } - allPotentialPeers, firewallRules := b.getPeerConnectionResources(account, peer, b.validatedPeers) + allPotentialPeers, firewallRules, authorizedUsers, sshEnabled := b.getPeerConnectionResources(account, peer, b.validatedPeers) isRouter, networkResourcesRoutes, sourcePeers := b.getNetworkResourcesForPeer(account, peer) @@ -272,11 +291,15 @@ func (b *NetworkMapBuilder) buildPeerACLView(account *Account, peerID string) { } b.cache.peerACLs[peerID] = view + b.cache.peerSSH[peerID] = &PeerSSHView{ + EnableSSH: sshEnabled, + AuthorizedUsers: authorizedUsers, + } } func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *nbpeer.Peer, validatedPeersMap map[string]struct{}, -) ([]*nbpeer.Peer, []*FirewallRule) { +) ([]*nbpeer.Peer, []*FirewallRule, map[string]map[string]struct{}, bool) { peerID := peer.ID ctx := context.Background() @@ -291,6 +314,9 @@ func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *n fwRules := make([]*FirewallRule, 0) peers := make([]*nbpeer.Peer, 0) + authorizedUsers := make(map[string]map[string]struct{}) + sshEnabled := false + for _, group := range peerGroups { policies := b.cache.groupToPolicies[group] for _, policy := range policies { @@ -363,12 +389,48 @@ func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *n rule, sourcePeers, FirewallRuleDirectionIN, peer, &peers, &fwRules, peersExists, rulesExists, ) + + if rule.Protocol == PolicyRuleProtocolNetbirdSSH { + sshEnabled = true + switch { + case len(rule.AuthorizedGroups) > 0: + for groupID, localUsers := range rule.AuthorizedGroups { + userIDs, ok := b.cache.groupIDToUserIDs[groupID] + if !ok { + continue + } + + if len(localUsers) == 0 { + localUsers = []string{auth.Wildcard} + } + + for _, localUser := range localUsers { + if authorizedUsers[localUser] == nil { + authorizedUsers[localUser] = make(map[string]struct{}) + } + for _, userID := range userIDs { + authorizedUsers[localUser][userID] = struct{}{} + } + } + } + case rule.AuthorizedUser != "": + if authorizedUsers[auth.Wildcard] == nil { + authorizedUsers[auth.Wildcard] = make(map[string]struct{}) + } + authorizedUsers[auth.Wildcard][rule.AuthorizedUser] = struct{}{} + default: + authorizedUsers[auth.Wildcard] = maps.Clone(b.cache.allowedUserIDs) + } + } else if policyRuleImpliesLegacySSH(rule) && peer.SSHEnabled { + sshEnabled = true + authorizedUsers[auth.Wildcard] = maps.Clone(b.cache.allowedUserIDs) + } } } } } - return peers, fwRules + return peers, fwRules, authorizedUsers, sshEnabled } func (b *NetworkMapBuilder) isPeerInGroupscached(groupIDs []string, peerGroupsMap map[string]struct{}) bool { @@ -438,7 +500,7 @@ func (b *NetworkMapBuilder) generateResourcescached( PeerIP: peer.IP.String(), Direction: direction, Action: string(rule.Action), - Protocol: string(rule.Protocol), + Protocol: firewallRuleProtocol(rule.Protocol), } var s strings.Builder @@ -945,6 +1007,23 @@ func (b *NetworkMapBuilder) getPeerNSGroups(account *Account, peerID string, che return peerNSGroups } +func (b *NetworkMapBuilder) buildAllowedUserIDs(account *Account) map[string]struct{} { + users := make(map[string]struct{}) + for _, nbUser := range account.Users { + if !nbUser.IsBlocked() && !nbUser.IsServiceUser { + users[nbUser.Id] = struct{}{} + } + } + return users +} + +func firewallRuleProtocol(protocol PolicyRuleProtocolType) string { + if protocol == PolicyRuleProtocolNetbirdSSH { + return string(PolicyRuleProtocolTCP) + } + return string(protocol) +} + // lock should be held func (b *NetworkMapBuilder) updateAccountLocked(account *Account) *Account { if account.Network.CurrentSerial() > b.account.Network.CurrentSerial() { @@ -972,12 +1051,13 @@ func (b *NetworkMapBuilder) GetPeerNetworkMap( aclView := b.cache.peerACLs[peerID] routesView := b.cache.peerRoutes[peerID] dnsConfig := b.cache.peerDNS[peerID] + sshView := b.cache.peerSSH[peerID] if aclView == nil || routesView == nil || dnsConfig == nil { return &NetworkMap{Network: account.Network.Copy()} } - nm := b.assembleNetworkMap(account, peer, aclView, routesView, dnsConfig, peersCustomZone, validatedPeers) + nm := b.assembleNetworkMap(account, peer, aclView, routesView, dnsConfig, sshView, peersCustomZone, validatedPeers) if metrics != nil { objectCount := int64(len(nm.Peers) + len(nm.OfflinePeers) + len(nm.Routes) + len(nm.FirewallRules) + len(nm.RoutesFirewallRules)) @@ -995,7 +1075,7 @@ func (b *NetworkMapBuilder) GetPeerNetworkMap( func (b *NetworkMapBuilder) assembleNetworkMap( account *Account, peer *nbpeer.Peer, aclView *PeerACLView, routesView *PeerRoutesView, - dnsConfig *nbdns.Config, customZone nbdns.CustomZone, validatedPeers map[string]struct{}, + dnsConfig *nbdns.Config, sshView *PeerSSHView, customZone nbdns.CustomZone, validatedPeers map[string]struct{}, ) *NetworkMap { var peersToConnect []*nbpeer.Peer @@ -1055,7 +1135,7 @@ func (b *NetworkMapBuilder) assembleNetworkMap( finalDNSConfig.CustomZones = zones } - return &NetworkMap{ + nm := &NetworkMap{ Peers: peersToConnect, Network: account.Network.Copy(), Routes: routes, @@ -1064,6 +1144,13 @@ func (b *NetworkMapBuilder) assembleNetworkMap( FirewallRules: firewallRules, RoutesFirewallRules: routesFirewallRules, } + + if sshView != nil { + nm.EnableSSH = sshView.EnableSSH + nm.AuthorizedUsers = sshView.AuthorizedUsers + } + + return nm } func (b *NetworkMapBuilder) generateFirewallRuleID(rule *FirewallRule) string { @@ -1772,7 +1859,7 @@ func (b *NetworkMapBuilder) addUpdateForPeersInGroups( PeerIP: newPeer.IP.String(), Direction: direction, Action: string(rule.Action), - Protocol: string(rule.Protocol), + Protocol: firewallRuleProtocol(rule.Protocol), } for _, peerID := range peers { if peerID == newPeerID { @@ -1823,7 +1910,7 @@ func (b *NetworkMapBuilder) addUpdateForDirectPeerResource( PeerIP: newPeer.IP.String(), Direction: direction, Action: string(rule.Action), - Protocol: string(rule.Protocol), + Protocol: firewallRuleProtocol(rule.Protocol), } b.addOrUpdateFirewallRuleInDelta(updates, targetPeerID, newPeerID, rule, direction, fr, fr.PeerIP, targetPeer) @@ -1989,6 +2076,7 @@ func (b *NetworkMapBuilder) OnPeerDeleted(acc *Account, peerID string) error { delete(b.cache.peerACLs, peerID) delete(b.cache.peerRoutes, peerID) delete(b.cache.peerDNS, peerID) + delete(b.cache.peerSSH, peerID) delete(b.cache.globalPeers, peerID) From 8722b79799ae5db6e58961aebccbcb19e7cab7aa Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 7 Jan 2026 16:30:29 +0100 Subject: [PATCH 026/374] [relay] Update GO version and QUIC version (#4736) - Go 1.25.5 - QUIC 0.55.0 --- .devcontainer/Dockerfile | 14 +- .github/workflows/golang-test-freebsd.yml | 2 +- .github/workflows/golang-test-linux.yml | 4 +- .github/workflows/golangci-lint.yml | 7 +- .github/workflows/release.yml | 2 +- .github/workflows/wasm-build-validation.yml | 13 +- .golangci.yaml | 255 +++++++++--------- client/cmd/debug.go | 1 + client/cmd/login.go | 2 + client/cmd/pprof.go | 1 - client/cmd/root.go | 1 + client/cmd/status.go | 1 + client/cmd/testutil_test.go | 3 - client/cmd/up.go | 1 + client/firewall/iptables/acl_linux.go | 5 +- .../firewall/iptables/manager_linux_test.go | 2 +- .../firewall/nftables/manager_linux_test.go | 13 +- client/firewall/uspfilter/filter.go | 2 +- client/firewall/uspfilter/localip.go | 1 + client/firewall/uspfilter/localip_test.go | 4 +- client/firewall/uspfilter/nat_test.go | 5 +- client/iface/device/device_ios.go | 3 - client/internal/debug/debug_linux.go | 10 +- client/internal/iface.go | 1 - client/internal/routemanager/iface/iface.go | 1 - .../systemops/systemops_generic.go | 14 +- client/ios/NetBirdSDK/client.go | 2 +- client/server/panic_windows.go | 1 - client/ssh/server/jwt_test.go | 5 +- client/system/info_android.go | 3 - client/system/info_darwin.go | 1 - client/system/info_ios.go | 3 - client/ui/client_ui.go | 12 +- client/ui/signal_windows.go | 2 +- go.mod | 10 +- go.sum | 13 +- management/cmd/management.go | 2 +- .../internals/shared/grpc/loginfilter_test.go | 1 + management/server/account.go | 2 +- management/server/account_test.go | 4 +- .../policies/posture_checks_handler_test.go | 2 +- .../peers_handler_benchmark_test.go | 1 - .../setupkeys_handler_benchmark_test.go | 1 - .../users_handler_benchmark_test.go | 1 - .../setupkeys_handler_integration_test.go | 1 - management/server/idp/pocketid.go | 4 +- management/server/idp/zitadel.go | 2 +- management/server/migration/migration.go | 2 +- management/server/nameserver.go | 4 +- management/server/posture_checks.go | 2 +- .../store/sql_store_get_account_test.go | 5 +- management/server/store/sql_store_test.go | 19 +- management/server/testutil/store.go | 1 - management/server/testutil/store_ios.go | 1 - relay/cmd/pprof.go | 1 - relay/server/listener/quic/conn.go | 4 +- relay/server/listener/ws/conn.go | 2 +- .../management/client/rest/accounts_test.go | 1 - shared/management/client/rest/client.go | 4 +- shared/management/client/rest/client_test.go | 1 - shared/management/client/rest/dns_test.go | 1 - shared/management/client/rest/events_test.go | 1 - shared/management/client/rest/geo_test.go | 1 - shared/management/client/rest/groups_test.go | 1 - .../client/rest/impersonation_test.go | 1 - .../management/client/rest/networks_test.go | 1 - shared/management/client/rest/peers_test.go | 1 - .../management/client/rest/policies_test.go | 1 - .../client/rest/posturechecks_test.go | 1 - shared/management/client/rest/routes_test.go | 1 - .../management/client/rest/setupkeys_test.go | 1 - shared/management/client/rest/tokens_test.go | 1 - shared/management/client/rest/users_test.go | 1 - shared/relay/client/client_test.go | 82 +++--- shared/relay/client/dialer/quic/conn.go | 4 +- shared/relay/client/manager_test.go | 57 ++-- signal/cmd/run.go | 6 +- util/syslog_nonwindows.go | 1 - 78 files changed, 311 insertions(+), 340 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 9e5e97a31..80809e667 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,15 +1,15 @@ -FROM golang:1.23-bullseye +FROM golang:1.25-bookworm RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ && apt-get -y install --no-install-recommends\ - gettext-base=0.21-4 \ - iptables=1.8.7-1 \ - libgl1-mesa-dev=20.3.5-1 \ - xorg-dev=1:7.7+22 \ - libayatana-appindicator3-dev=0.5.5-2+deb11u2 \ + gettext-base=0.21-12 \ + iptables=1.8.9-2 \ + libgl1-mesa-dev=22.3.6-1+deb12u1 \ + xorg-dev=1:7.7+23 \ + libayatana-appindicator3-dev=0.5.92-1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ - && go install -v golang.org/x/tools/gopls@v0.18.1 + && go install -v golang.org/x/tools/gopls@latest WORKDIR /app diff --git a/.github/workflows/golang-test-freebsd.yml b/.github/workflows/golang-test-freebsd.yml index 0d19e8a19..df64e86bb 100644 --- a/.github/workflows/golang-test-freebsd.yml +++ b/.github/workflows/golang-test-freebsd.yml @@ -25,7 +25,7 @@ jobs: release: "14.2" prepare: | pkg install -y curl pkgconf xorg - GO_TARBALL="go1.24.10.freebsd-amd64.tar.gz" + GO_TARBALL="go1.25.3.freebsd-amd64.tar.gz" GO_URL="https://go.dev/dl/$GO_TARBALL" curl -vLO "$GO_URL" tar -C /usr/local -vxzf "$GO_TARBALL" diff --git a/.github/workflows/golang-test-linux.yml b/.github/workflows/golang-test-linux.yml index c09bfab39..195a37a1f 100644 --- a/.github/workflows/golang-test-linux.yml +++ b/.github/workflows/golang-test-linux.yml @@ -200,7 +200,7 @@ jobs: -e GOCACHE=${CONTAINER_GOCACHE} \ -e GOMODCACHE=${CONTAINER_GOMODCACHE} \ -e CONTAINER=${CONTAINER} \ - golang:1.24-alpine \ + golang:1.25-alpine \ sh -c ' \ apk update; apk add --no-cache \ ca-certificates iptables ip6tables dbus dbus-dev libpcap-dev build-base; \ @@ -259,7 +259,7 @@ jobs: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ go test ${{ matrix.raceFlag }} \ -exec 'sudo' \ - -timeout 10m ./relay/... ./shared/relay/... + -timeout 10m -p 1 ./relay/... ./shared/relay/... test_signal: name: "Signal / Unit" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index c524f6f6b..9ce779dbb 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -52,7 +52,10 @@ jobs: if: matrix.os == 'ubuntu-latest' run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev libpcap-dev - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: version: latest - args: --timeout=12m --out-format colored-line-number + skip-cache: true + skip-save-cache: true + cache-invalidation-interval: 0 + args: --timeout=12m diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2fa847dce..84f6f64ed 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -63,7 +63,7 @@ jobs: pkg install -y git curl portlint go # Install Go for building - GO_TARBALL="go1.24.10.freebsd-amd64.tar.gz" + GO_TARBALL="go1.25.5.freebsd-amd64.tar.gz" GO_URL="https://go.dev/dl/$GO_TARBALL" curl -LO "$GO_URL" tar -C /usr/local -xzf "$GO_TARBALL" diff --git a/.github/workflows/wasm-build-validation.yml b/.github/workflows/wasm-build-validation.yml index 4100e16dd..47e45165b 100644 --- a/.github/workflows/wasm-build-validation.yml +++ b/.github/workflows/wasm-build-validation.yml @@ -14,6 +14,9 @@ jobs: js_lint: name: "JS / Lint" runs-on: ubuntu-latest + env: + GOOS: js + GOARCH: wasm steps: - name: Checkout repository uses: actions/checkout@v4 @@ -24,16 +27,14 @@ jobs: - name: Install dependencies run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev libpcap-dev - name: Install golangci-lint - uses: golangci/golangci-lint-action@d6238b002a20823d52840fda27e2d4891c5952dc + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: version: latest install-mode: binary skip-cache: true - skip-pkg-cache: true - skip-build-cache: true - - name: Run golangci-lint for WASM - run: | - GOOS=js GOARCH=wasm golangci-lint run --timeout=12m --out-format colored-line-number ./client/... + skip-save-cache: true + cache-invalidation-interval: 0 + working-directory: ./client continue-on-error: true js_build: diff --git a/.golangci.yaml b/.golangci.yaml index 461677c2e..d81ad1377 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,139 +1,124 @@ -run: - # Timeout for analysis, e.g. 30s, 5m. - # Default: 1m - timeout: 6m - -# This file contains only configs which differ from defaults. -# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml -linters-settings: - errcheck: - # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. - # Such cases aren't reported by default. - # Default: false - check-type-assertions: false - - gosec: - includes: - - G101 # Look for hard coded credentials - #- G102 # Bind to all interfaces - - G103 # Audit the use of unsafe block - - G104 # Audit errors not checked - - G106 # Audit the use of ssh.InsecureIgnoreHostKey - #- G107 # Url provided to HTTP request as taint input - - G108 # Profiling endpoint automatically exposed on /debug/pprof - - G109 # Potential Integer overflow made by strconv.Atoi result conversion to int16/32 - - G110 # Potential DoS vulnerability via decompression bomb - - G111 # Potential directory traversal - #- G112 # Potential slowloris attack - - G113 # Usage of Rat.SetString in math/big with an overflow (CVE-2022-23772) - #- G114 # Use of net/http serve function that has no support for setting timeouts - - G201 # SQL query construction using format string - - G202 # SQL query construction using string concatenation - - G203 # Use of unescaped data in HTML templates - #- G204 # Audit use of command execution - - G301 # Poor file permissions used when creating a directory - - G302 # Poor file permissions used with chmod - - G303 # Creating tempfile using a predictable path - - G304 # File path provided as taint input - - G305 # File traversal when extracting zip/tar archive - - G306 # Poor file permissions used when writing to a new file - - G307 # Poor file permissions used when creating a file with os.Create - #- G401 # Detect the usage of DES, RC4, MD5 or SHA1 - #- G402 # Look for bad TLS connection settings - - G403 # Ensure minimum RSA key length of 2048 bits - #- G404 # Insecure random number source (rand) - #- G501 # Import blocklist: crypto/md5 - - G502 # Import blocklist: crypto/des - - G503 # Import blocklist: crypto/rc4 - - G504 # Import blocklist: net/http/cgi - #- G505 # Import blocklist: crypto/sha1 - - G601 # Implicit memory aliasing of items from a range statement - - G602 # Slice access out of bounds - - gocritic: - disabled-checks: - - commentFormatting - - captLocal - - deprecatedComment - - govet: - # Enable all analyzers. - # Default: false - enable-all: false - enable: - - nilness - - revive: - rules: - - name: exported - severity: warning - disabled: false - arguments: - - "checkPrivateReceivers" - - "sayRepetitiveInsteadOfStutters" - tenv: - # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. - # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. - # Default: false - all: true - +version: "2" linters: - disable-all: true + default: none enable: - ## enabled by default - - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases - - gosimple # specializes in simplifying a code - - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - - ineffassign # detects when assignments to existing variables are not used - - staticcheck # is a go vet on steroids, applying a ton of static analysis checks - - tenv # Tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17. - - typecheck # like the front-end of a Go compiler, parses and type-checks Go code - - unused # checks for unused constants, variables, functions and types - ## disable by default but the have interesting results so lets add them - - bodyclose # checks whether HTTP response body is closed successfully - - dupword # dupword checks for duplicate words in the source code - - durationcheck # durationcheck checks for two durations multiplied together - - forbidigo # forbidigo forbids identifiers - - gocritic # provides diagnostics that check for bugs, performance and style issues - - gosec # inspects source code for security problems - - mirror # mirror reports wrong mirror patterns of bytes/strings usage - - misspell # misspess finds commonly misspelled English words in comments - - nilerr # finds the code that returns nil even if it checks that the error is not nil - - nilnil # checks that there is no simultaneous return of nil error and an invalid value - - predeclared # predeclared finds code that shadows one of Go's predeclared identifiers - - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed - # - thelper # thelper detects Go test helpers without t.Helper() call and checks the consistency of test helpers. - - wastedassign # wastedassign finds wasted assignment statements + - bodyclose + - dupword + - durationcheck + - errcheck + - forbidigo + - gocritic + - gosec + - govet + - ineffassign + - mirror + - misspell + - nilerr + - nilnil + - predeclared + - revive + - sqlclosecheck + - staticcheck + - unused + - wastedassign + settings: + errcheck: + check-type-assertions: false + gocritic: + disabled-checks: + - commentFormatting + - captLocal + - deprecatedComment + gosec: + includes: + - G101 + - G103 + - G104 + - G106 + - G108 + - G109 + - G110 + - G111 + - G201 + - G202 + - G203 + - G301 + - G302 + - G303 + - G304 + - G305 + - G306 + - G307 + - G403 + - G502 + - G503 + - G504 + - G601 + - G602 + govet: + enable: + - nilness + enable-all: false + revive: + rules: + - name: exported + arguments: + - checkPrivateReceivers + - sayRepetitiveInsteadOfStutters + severity: warning + disabled: false + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - forbidigo + path: management/cmd/root\.go + - linters: + - forbidigo + path: signal/cmd/root\.go + - linters: + - unused + path: sharedsock/filter\.go + - linters: + - unused + path: client/firewall/iptables/rule\.go + - linters: + - gosec + - mirror + path: test\.go + - linters: + - nilnil + path: mock\.go + - linters: + - staticcheck + text: grpc.DialContext is deprecated + - linters: + - staticcheck + text: grpc.WithBlock is deprecated + - linters: + - staticcheck + text: "QF1001" + - linters: + - staticcheck + text: "QF1008" + - linters: + - staticcheck + text: "QF1012" + paths: + - third_party$ + - builtin$ + - examples$ issues: - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 max-same-issues: 5 - - exclude-rules: - # allow fmt - - path: management/cmd/root\.go - linters: forbidigo - - path: signal/cmd/root\.go - linters: forbidigo - - path: sharedsock/filter\.go - linters: - - unused - - path: client/firewall/iptables/rule\.go - linters: - - unused - - path: test\.go - linters: - - mirror - - gosec - - path: mock\.go - linters: - - nilnil - # Exclude specific deprecation warnings for grpc methods - - linters: - - staticcheck - text: "grpc.DialContext is deprecated" - - linters: - - staticcheck - text: "grpc.WithBlock is deprecated" +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/client/cmd/debug.go b/client/cmd/debug.go index 430012a17..7ca56857b 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -136,6 +136,7 @@ func setLogLevel(cmd *cobra.Command, args []string) error { client := proto.NewDaemonServiceClient(conn) level := server.ParseLogLevel(args[0]) if level == proto.LogLevel_UNKNOWN { + //nolint return fmt.Errorf("unknown log level: %s. Available levels are: panic, fatal, error, warn, info, debug, trace\n", args[0]) } diff --git a/client/cmd/login.go b/client/cmd/login.go index a34bb7c70..57c010571 100644 --- a/client/cmd/login.go +++ b/client/cmd/login.go @@ -81,6 +81,7 @@ var loginCmd = &cobra.Command{ func doDaemonLogin(ctx context.Context, cmd *cobra.Command, providedSetupKey string, activeProf *profilemanager.Profile, username string, pm *profilemanager.ProfileManager) error { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) @@ -206,6 +207,7 @@ func switchProfileOnDaemon(ctx context.Context, pm *profilemanager.ProfileManage func switchProfile(ctx context.Context, profileName string, username string) error { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/pprof.go b/client/cmd/pprof.go index 37efd35f0..c041c6ea9 100644 --- a/client/cmd/pprof.go +++ b/client/cmd/pprof.go @@ -1,5 +1,4 @@ //go:build pprof -// +build pprof package cmd diff --git a/client/cmd/root.go b/client/cmd/root.go index 30120c196..f4f4f6052 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -390,6 +390,7 @@ func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) { conn, err := DialClientGRPCServer(cmd.Context(), daemonAddr) if err != nil { + //nolint return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/status.go b/client/cmd/status.go index 06460a6a7..99d47cd1a 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -124,6 +124,7 @@ func statusFunc(cmd *cobra.Command, args []string) error { func getStatus(ctx context.Context, shouldRunProbes bool) (*proto.StatusResponse, error) { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index 888a9a3f7..2650d6225 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -89,9 +89,6 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp t.Cleanup(cleanUp) eventStore := &activity.InMemoryEventStore{} - if err != nil { - return nil, nil - } ctrl := gomock.NewController(t) t.Cleanup(ctrl.Finish) diff --git a/client/cmd/up.go b/client/cmd/up.go index 9efc2e60d..057d35268 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -216,6 +216,7 @@ func runInDaemonMode(ctx context.Context, cmd *cobra.Command, pm *profilemanager conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/firewall/iptables/acl_linux.go b/client/firewall/iptables/acl_linux.go index 5ccaf17ba..d83798f09 100644 --- a/client/firewall/iptables/acl_linux.go +++ b/client/firewall/iptables/acl_linux.go @@ -386,11 +386,8 @@ func (m *aclManager) updateState() { // filterRuleSpecs returns the specs of a filtering rule func filterRuleSpecs(ip net.IP, protocol string, sPort, dPort *firewall.Port, action firewall.Action, ipsetName string) (specs []string) { - matchByIP := true // don't use IP matching if IP is 0.0.0.0 - if ip.IsUnspecified() { - matchByIP = false - } + matchByIP := !ip.IsUnspecified() if matchByIP { if ipsetName != "" { diff --git a/client/firewall/iptables/manager_linux_test.go b/client/firewall/iptables/manager_linux_test.go index 6b5401e2b..ee47a27c0 100644 --- a/client/firewall/iptables/manager_linux_test.go +++ b/client/firewall/iptables/manager_linux_test.go @@ -161,7 +161,7 @@ func TestIptablesManagerDenyRules(t *testing.T) { t.Logf(" [%d] %s", i, rule) } - var denyRuleIndex, acceptRuleIndex int = -1, -1 + var denyRuleIndex, acceptRuleIndex = -1, -1 for i, rule := range rules { if strings.Contains(rule, "DROP") { t.Logf("Found DROP rule at index %d: %s", i, rule) diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go index 6b29c5606..75b1e2b6c 100644 --- a/client/firewall/nftables/manager_linux_test.go +++ b/client/firewall/nftables/manager_linux_test.go @@ -198,7 +198,7 @@ func TestNftablesManagerRuleOrder(t *testing.T) { t.Logf("Found %d rules in nftables chain", len(rules)) // Find the accept and deny rules and verify deny comes before accept - var acceptRuleIndex, denyRuleIndex int = -1, -1 + var acceptRuleIndex, denyRuleIndex = -1, -1 for i, rule := range rules { hasAcceptHTTPSet := false hasDenyHTTPSet := false @@ -208,11 +208,13 @@ func TestNftablesManagerRuleOrder(t *testing.T) { for _, e := range rule.Exprs { // Check for set lookup if lookup, ok := e.(*expr.Lookup); ok { - if lookup.SetName == "accept-http" { + switch lookup.SetName { + case "accept-http": hasAcceptHTTPSet = true - } else if lookup.SetName == "deny-http" { + case "deny-http": hasDenyHTTPSet = true } + } // Check for port 80 if cmp, ok := e.(*expr.Cmp); ok { @@ -222,9 +224,10 @@ func TestNftablesManagerRuleOrder(t *testing.T) { } // Check for verdict if verdict, ok := e.(*expr.Verdict); ok { - if verdict.Kind == expr.VerdictAccept { + switch verdict.Kind { + case expr.VerdictAccept: action = "ACCEPT" - } else if verdict.Kind == expr.VerdictDrop { + case expr.VerdictDrop: action = "DROP" } } diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index 4e22bde3f..3d3d79631 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -795,7 +795,7 @@ func (m *Manager) recalculateTCPChecksum(packetData []byte, d *decoder, tcpHeade pseudoSum += uint32(d.ip4.Protocol) pseudoSum += uint32(tcpLength) - var sum uint32 = pseudoSum + var sum = pseudoSum for i := 0; i < tcpLength-1; i += 2 { sum += uint32(tcpLayer[i])<<8 | uint32(tcpLayer[i+1]) } diff --git a/client/firewall/uspfilter/localip.go b/client/firewall/uspfilter/localip.go index 7f6b52c71..ffc807f46 100644 --- a/client/firewall/uspfilter/localip.go +++ b/client/firewall/uspfilter/localip.go @@ -130,6 +130,7 @@ func (m *localIPManager) UpdateLocalIPs(iface common.IFaceMapper) (err error) { // 127.0.0.0/8 newIPv4Bitmap[127] = &ipv4LowBitmap{} for i := 0; i < 8192; i++ { + // #nosec G602 -- bitmap is defined as [8192]uint32, loop range is correct newIPv4Bitmap[127].bitmap[i] = 0xFFFFFFFF } diff --git a/client/firewall/uspfilter/localip_test.go b/client/firewall/uspfilter/localip_test.go index 45ac912cd..6653947fa 100644 --- a/client/firewall/uspfilter/localip_test.go +++ b/client/firewall/uspfilter/localip_test.go @@ -218,7 +218,7 @@ func BenchmarkIPChecks(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // nolint:gosimple - _, _ = mapManager.localIPs[ip.String()] + _ = mapManager.localIPs[ip.String()] } }) @@ -227,7 +227,7 @@ func BenchmarkIPChecks(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // nolint:gosimple - _, _ = mapManager.localIPs[ip.String()] + _ = mapManager.localIPs[ip.String()] } }) } diff --git a/client/firewall/uspfilter/nat_test.go b/client/firewall/uspfilter/nat_test.go index 400d61020..50743d006 100644 --- a/client/firewall/uspfilter/nat_test.go +++ b/client/firewall/uspfilter/nat_test.go @@ -234,9 +234,10 @@ func TestInboundPortDNATNegative(t *testing.T) { require.False(t, translated, "Packet should NOT be translated for %s", tc.name) d = parsePacket(t, packet) - if tc.protocol == layers.IPProtocolTCP { + switch tc.protocol { + case layers.IPProtocolTCP: require.Equal(t, tc.dstPort, uint16(d.tcp.DstPort), "Port should remain unchanged") - } else if tc.protocol == layers.IPProtocolUDP { + case layers.IPProtocolUDP: require.Equal(t, tc.dstPort, uint16(d.udp.DstPort), "Port should remain unchanged") } }) diff --git a/client/iface/device/device_ios.go b/client/iface/device/device_ios.go index d841ac2fe..aa77cee45 100644 --- a/client/iface/device/device_ios.go +++ b/client/iface/device/device_ios.go @@ -1,6 +1,3 @@ -//go:build ios -// +build ios - package device import ( diff --git a/client/internal/debug/debug_linux.go b/client/internal/debug/debug_linux.go index 39d796fda..aedf88b79 100644 --- a/client/internal/debug/debug_linux.go +++ b/client/internal/debug/debug_linux.go @@ -507,15 +507,13 @@ func formatPayloadWithCmp(p *expr.Payload, cmp *expr.Cmp) string { if p.Base == expr.PayloadBaseNetworkHeader { switch p.Offset { case 12: - if p.Len == 4 { - return fmt.Sprintf("ip saddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) - } else if p.Len == 2 { + switch p.Len { + case 4, 2: return fmt.Sprintf("ip saddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) } case 16: - if p.Len == 4 { - return fmt.Sprintf("ip daddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) - } else if p.Len == 2 { + switch p.Len { + case 4, 2: return fmt.Sprintf("ip daddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) } } diff --git a/client/internal/iface.go b/client/internal/iface.go index bd0069c19..a82d87aab 100644 --- a/client/internal/iface.go +++ b/client/internal/iface.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package internal diff --git a/client/internal/routemanager/iface/iface.go b/client/internal/routemanager/iface/iface.go index 57dbec03d..b44d9fa65 100644 --- a/client/internal/routemanager/iface/iface.go +++ b/client/internal/routemanager/iface/iface.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package iface diff --git a/client/internal/routemanager/systemops/systemops_generic.go b/client/internal/routemanager/systemops/systemops_generic.go index 26a548634..ec219c7fe 100644 --- a/client/internal/routemanager/systemops/systemops_generic.go +++ b/client/internal/routemanager/systemops/systemops_generic.go @@ -210,7 +210,8 @@ func (r *SysOps) refreshLocalSubnetsCache() { func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) error { nextHop := Nexthop{netip.Addr{}, intf} - if prefix == vars.Defaultv4 { + switch prefix { + case vars.Defaultv4: if err := r.addToRouteTable(splitDefaultv4_1, nextHop); err != nil { return err } @@ -233,7 +234,7 @@ func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) er } return nil - } else if prefix == vars.Defaultv6 { + case vars.Defaultv6: if err := r.addToRouteTable(splitDefaultv6_1, nextHop); err != nil { return fmt.Errorf("add unreachable route split 1: %w", err) } @@ -255,7 +256,8 @@ func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) er func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) error { nextHop := Nexthop{netip.Addr{}, intf} - if prefix == vars.Defaultv4 { + switch prefix { + case vars.Defaultv4: var result *multierror.Error if err := r.removeFromRouteTable(splitDefaultv4_1, nextHop); err != nil { result = multierror.Append(result, err) @@ -273,7 +275,7 @@ func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) } return nberrors.FormatErrorOrNil(result) - } else if prefix == vars.Defaultv6 { + case vars.Defaultv6: var result *multierror.Error if err := r.removeFromRouteTable(splitDefaultv6_1, nextHop); err != nil { result = multierror.Append(result, err) @@ -283,9 +285,9 @@ func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) } return nberrors.FormatErrorOrNil(result) + default: + return r.removeFromRouteTable(prefix, nextHop) } - - return r.removeFromRouteTable(prefix, nextHop) } func (r *SysOps) setupHooks(initAddresses []net.IP, stateManager *statemanager.Manager) error { diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index e901386d9..935910fc9 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -76,7 +76,7 @@ type Client struct { loginComplete bool connectClient *internal.ConnectClient // preloadedConfig holds config loaded from JSON (used on tvOS where file writes are blocked) - preloadedConfig *profilemanager.Config + preloadedConfig *profilemanager.Config } // NewClient instantiate a new Client diff --git a/client/server/panic_windows.go b/client/server/panic_windows.go index f441ec9ea..8592f12ad 100644 --- a/client/server/panic_windows.go +++ b/client/server/panic_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package server diff --git a/client/ssh/server/jwt_test.go b/client/ssh/server/jwt_test.go index d36d7cbbf..6eb88accc 100644 --- a/client/ssh/server/jwt_test.go +++ b/client/ssh/server/jwt_test.go @@ -602,12 +602,13 @@ func TestJWTAuthentication(t *testing.T) { require.NoError(t, err) var authMethods []cryptossh.AuthMethod - if tc.token == "valid" { + switch tc.token { + case "valid": token := generateValidJWT(t, privateKey, issuer, audience) authMethods = []cryptossh.AuthMethod{ cryptossh.Password(token), } - } else if tc.token == "invalid" { + case "invalid": invalidToken := "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.invalid" authMethods = []cryptossh.AuthMethod{ cryptossh.Password(invalidToken), diff --git a/client/system/info_android.go b/client/system/info_android.go index 78895bfa8..794ff15ed 100644 --- a/client/system/info_android.go +++ b/client/system/info_android.go @@ -1,6 +1,3 @@ -//go:build android -// +build android - package system import ( diff --git a/client/system/info_darwin.go b/client/system/info_darwin.go index caa344737..4a31920ec 100644 --- a/client/system/info_darwin.go +++ b/client/system/info_darwin.go @@ -1,5 +1,4 @@ //go:build !ios -// +build !ios package system diff --git a/client/system/info_ios.go b/client/system/info_ios.go index 705c37920..322609db4 100644 --- a/client/system/info_ios.go +++ b/client/system/info_ios.go @@ -1,6 +1,3 @@ -//go:build ios -// +build ios - package system import ( diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 78934ea95..5d955ed25 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -510,7 +510,7 @@ func (s *serviceClient) saveSettings() { // Continue with default behavior if features can't be retrieved } else if features != nil && features.DisableUpdateSettings { log.Warn("Configuration updates are disabled by daemon") - dialog.ShowError(fmt.Errorf("Configuration updates are disabled by daemon"), s.wSettings) + dialog.ShowError(fmt.Errorf("configuration updates are disabled by daemon"), s.wSettings) return } @@ -540,7 +540,7 @@ func (s *serviceClient) saveSettings() { func (s *serviceClient) validateSettings() error { if s.iPreSharedKey.Text != "" && s.iPreSharedKey.Text != censoredPreSharedKey { if _, err := wgtypes.ParseKey(s.iPreSharedKey.Text); err != nil { - return fmt.Errorf("Invalid Pre-shared Key Value") + return fmt.Errorf("invalid pre-shared key value") } } return nil @@ -549,10 +549,10 @@ func (s *serviceClient) validateSettings() error { func (s *serviceClient) parseNumericSettings() (int64, int64, error) { port, err := strconv.ParseInt(s.iInterfacePort.Text, 10, 64) if err != nil { - return 0, 0, errors.New("Invalid interface port") + return 0, 0, errors.New("invalid interface port") } if port < 1 || port > 65535 { - return 0, 0, errors.New("Invalid interface port: out of range 1-65535") + return 0, 0, errors.New("invalid interface port: out of range 1-65535") } var mtu int64 @@ -560,7 +560,7 @@ func (s *serviceClient) parseNumericSettings() (int64, int64, error) { if mtuText != "" { mtu, err = strconv.ParseInt(mtuText, 10, 64) if err != nil { - return 0, 0, errors.New("Invalid MTU value") + return 0, 0, errors.New("invalid MTU value") } if mtu < iface.MinMTU || mtu > iface.MaxMTU { return 0, 0, fmt.Errorf("MTU must be between %d and %d bytes", iface.MinMTU, iface.MaxMTU) @@ -645,7 +645,7 @@ func (s *serviceClient) buildSetConfigRequest(iMngURL string, port, mtu int64) ( if sshJWTCacheTTLText != "" { sshJWTCacheTTL, err := strconv.ParseInt(sshJWTCacheTTLText, 10, 32) if err != nil { - return nil, errors.New("Invalid SSH JWT Cache TTL value") + return nil, errors.New("invalid SSH JWT Cache TTL value") } if sshJWTCacheTTL < 0 || sshJWTCacheTTL > maxSSHJWTCacheTTL { return nil, fmt.Errorf("SSH JWT Cache TTL must be between 0 and %d seconds", maxSSHJWTCacheTTL) diff --git a/client/ui/signal_windows.go b/client/ui/signal_windows.go index ca98be526..58f46374f 100644 --- a/client/ui/signal_windows.go +++ b/client/ui/signal_windows.go @@ -164,7 +164,7 @@ func sendShowWindowSignal(pid int32) error { err = windows.SetEvent(eventHandle) if err != nil { - return fmt.Errorf("Error setting event: %w", err) + return fmt.Errorf("error setting event: %w", err) } return nil diff --git a/go.mod b/go.mod index 23cf0f37d..1b4612da3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/netbirdio/netbird -go 1.24.10 +go 1.25 + +toolchain go1.25.5 require ( cunicu.li/go-rosenpass v0.4.0 @@ -81,7 +83,7 @@ require ( github.com/pion/turn/v3 v3.0.1 github.com/pkg/sftp v1.13.9 github.com/prometheus/client_golang v1.23.2 - github.com/quic-go/quic-go v0.49.1 + github.com/quic-go/quic-go v0.55.0 github.com/redis/go-redis/v9 v9.7.3 github.com/rs/xid v1.3.0 github.com/shirou/gopsutil/v3 v3.24.4 @@ -103,7 +105,7 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.48.0 go.opentelemetry.io/otel/metric v1.38.0 go.opentelemetry.io/otel/sdk/metric v1.38.0 - go.uber.org/mock v0.5.0 + go.uber.org/mock v0.5.2 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 @@ -186,12 +188,10 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-sql-driver/mysql v1.9.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-text/render v0.2.0 // indirect github.com/go-text/typesetting v0.2.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect diff --git a/go.sum b/go.sum index 354c7732e..60b6304c3 100644 --- a/go.sum +++ b/go.sum @@ -101,9 +101,6 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= @@ -286,7 +283,6 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -491,8 +487,8 @@ github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9Z github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/quic-go/quic-go v0.49.1 h1:e5JXpUyF0f2uFjckQzD8jTghZrOUK1xxDqqZhlwixo0= -github.com/quic-go/quic-go v0.49.1/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= +github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= +github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -622,8 +618,8 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= -go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -717,7 +713,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/management/cmd/management.go b/management/cmd/management.go index 81a154510..376adda20 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -64,7 +64,7 @@ var ( config.HttpConfig.IdpSignKeyRefreshEnabled = idpSignKeyRefreshEnabled } - tlsEnabled := false + var tlsEnabled bool if mgmtLetsencryptDomain != "" || (config.HttpConfig.CertFile != "" && config.HttpConfig.CertKey != "") { tlsEnabled = true } diff --git a/management/internals/shared/grpc/loginfilter_test.go b/management/internals/shared/grpc/loginfilter_test.go index 8b26e14ab..797879ae7 100644 --- a/management/internals/shared/grpc/loginfilter_test.go +++ b/management/internals/shared/grpc/loginfilter_test.go @@ -85,6 +85,7 @@ func (s *LoginFilterTestSuite) TestBanDurationIncreasesExponentially() { s.True(s.filter.logged[pubKey].isBanned) s.Equal(2, s.filter.logged[pubKey].banLevel) secondBanDuration := s.filter.logged[pubKey].banExpiresAt.Sub(s.filter.logged[pubKey].lastSeen) + // nolint expectedSecondDuration := time.Duration(float64(baseBan) * math.Pow(2, 1)) s.InDelta(expectedSecondDuration, secondBanDuration, float64(time.Millisecond)) } diff --git a/management/server/account.go b/management/server/account.go index 29415b038..9785f446c 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -1006,7 +1006,7 @@ func (am *DefaultAccountManager) isCacheFresh(ctx context.Context, accountUsers for user, loggedInOnce := range accountUsers { if datum, ok := userDataMap[user]; ok { // check if the matching user data has a pending invite and if the user has logged in once, forcing the cache to be refreshed - if datum.AppMetadata.WTPendingInvite != nil && *datum.AppMetadata.WTPendingInvite && loggedInOnce == true { //nolint:gosimple + if datum.AppMetadata.WTPendingInvite != nil && *datum.AppMetadata.WTPendingInvite && loggedInOnce == true { //nolint log.WithContext(ctx).Infof("user %s has a pending invite and has logged in once, cache invalid", user) return false } diff --git a/management/server/account_test.go b/management/server/account_test.go index 59d6e4928..32d2b4ea3 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -753,7 +753,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { t.Fatalf("expected to create an account for a user %s", userId) } - if account != nil && account.Domain != domain { + if account.Domain != domain { t.Errorf("setting account domain failed, expected %s, got %s", domain, account.Domain) } @@ -768,7 +768,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { t.Fatalf("expected to get an account for a user %s", userId) } - if account != nil && account.Domain != domain { + if account.Domain != domain { t.Errorf("updating domain. expected %s got %s", domain, account.Domain) } } diff --git a/management/server/http/handlers/policies/posture_checks_handler_test.go b/management/server/http/handlers/policies/posture_checks_handler_test.go index 35198da32..a5999f6c7 100644 --- a/management/server/http/handlers/policies/posture_checks_handler_test.go +++ b/management/server/http/handlers/policies/posture_checks_handler_test.go @@ -46,7 +46,7 @@ func initPostureChecksTestData(postureChecks ...*posture.Checks) *postureChecksH testPostureChecks[postureChecks.ID] = postureChecks if err := postureChecks.Validate(); err != nil { - return nil, status.Errorf(status.InvalidArgument, "%s", err.Error()) //nolint + return nil, status.Errorf(status.InvalidArgument, "%v", err) //nolint } return postureChecks, nil diff --git a/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go b/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go index 3fe3fe809..3345a034b 100644 --- a/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go @@ -1,5 +1,4 @@ //go:build benchmark -// +build benchmark package benchmarks diff --git a/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go b/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go index 36b226db0..ca25861dd 100644 --- a/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go @@ -1,5 +1,4 @@ //go:build benchmark -// +build benchmark package benchmarks diff --git a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go index 2868a20bd..b13773268 100644 --- a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go @@ -1,5 +1,4 @@ //go:build benchmark -// +build benchmark package benchmarks diff --git a/management/server/http/testing/integration/setupkeys_handler_integration_test.go b/management/server/http/testing/integration/setupkeys_handler_integration_test.go index 1079de4aa..c1a9829da 100644 --- a/management/server/http/testing/integration/setupkeys_handler_integration_test.go +++ b/management/server/http/testing/integration/setupkeys_handler_integration_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package integration diff --git a/management/server/idp/pocketid.go b/management/server/idp/pocketid.go index 38a5cc67f..d8d764830 100644 --- a/management/server/idp/pocketid.go +++ b/management/server/idp/pocketid.go @@ -121,7 +121,7 @@ func NewPocketIdManager(config PocketIdClientConfig, appMetrics telemetry.AppMet func (p *PocketIdManager) request(ctx context.Context, method, resource string, query *url.Values, body string) ([]byte, error) { var MethodsWithBody = []string{http.MethodPost, http.MethodPut} if !slices.Contains(MethodsWithBody, method) && body != "" { - return nil, fmt.Errorf("Body provided to unsupported method: %s", method) + return nil, fmt.Errorf("body provided to unsupported method: %s", method) } reqURL := fmt.Sprintf("%s/api/%s", p.managementEndpoint, resource) @@ -301,7 +301,7 @@ func (p *PocketIdManager) CreateUser(ctx context.Context, email, name, accountID if p.appMetrics != nil { p.appMetrics.IDPMetrics().CountCreateUser() } - var pending bool = true + pending := true ret := &UserData{ Email: email, Name: name, diff --git a/management/server/idp/zitadel.go b/management/server/idp/zitadel.go index 24228346a..8db3c4796 100644 --- a/management/server/idp/zitadel.go +++ b/management/server/idp/zitadel.go @@ -357,7 +357,7 @@ func (zm *ZitadelManager) CreateUser(ctx context.Context, email, name, accountID return nil, err } - var pending bool = true + pending := true ret := &UserData{ Email: email, Name: name, diff --git a/management/server/migration/migration.go b/management/server/migration/migration.go index 78f4afbd5..7fcb98ccb 100644 --- a/management/server/migration/migration.go +++ b/management/server/migration/migration.go @@ -393,7 +393,7 @@ func CreateIndexIfNotExists[T any](ctx context.Context, db *gorm.DB, indexName s return fmt.Errorf("failed to parse model schema: %w", err) } tableName := stmt.Schema.Table - dialect := db.Dialector.Name() + dialect := db.Name() if db.Migrator().HasIndex(&model, indexName) { log.WithContext(ctx).Infof("index %s already exists on table %s", indexName, tableName) diff --git a/management/server/nameserver.go b/management/server/nameserver.go index f278e1761..a3eb4ae2e 100644 --- a/management/server/nameserver.go +++ b/management/server/nameserver.go @@ -20,7 +20,7 @@ import ( const domainPattern = `^(?i)[a-z0-9]+([\-\.]{1}[a-z0-9]+)*[*.a-z]{1,}$` -var invalidDomainName = errors.New("invalid domain name") +var errInvalidDomainName = errors.New("invalid domain name") // GetNameServerGroup gets a nameserver group object from account and nameserver group IDs func (am *DefaultAccountManager) GetNameServerGroup(ctx context.Context, accountID, userID, nsGroupID string) (*nbdns.NameServerGroup, error) { @@ -314,7 +314,7 @@ func validateDomain(domain string) error { _, valid := dns.IsDomainName(domain) if !valid { - return invalidDomainName + return errInvalidDomainName } return nil diff --git a/management/server/posture_checks.go b/management/server/posture_checks.go index 9a743eb8c..ba901c771 100644 --- a/management/server/posture_checks.go +++ b/management/server/posture_checks.go @@ -158,7 +158,7 @@ func arePostureCheckChangesAffectPeers(ctx context.Context, transaction store.St // validatePostureChecks validates the posture checks. func validatePostureChecks(ctx context.Context, transaction store.Store, accountID string, postureChecks *posture.Checks) error { if err := postureChecks.Validate(); err != nil { - return status.Errorf(status.InvalidArgument, "%s", err.Error()) //nolint + return status.Errorf(status.InvalidArgument, "%v", err.Error()) //nolint } // If the posture check already has an ID, verify its existence in the store. diff --git a/management/server/store/sql_store_get_account_test.go b/management/server/store/sql_store_get_account_test.go index 8ff04d68a..69e346ae7 100644 --- a/management/server/store/sql_store_get_account_test.go +++ b/management/server/store/sql_store_get_account_test.go @@ -997,9 +997,10 @@ func TestGetAccount_ComprehensiveFieldValidation(t *testing.T) { // Find posture checks by ID var pc1, pc2 *posture.Checks for _, pc := range retrievedAccount.PostureChecks { - if pc.ID == postureCheckID1 { + switch pc.ID { + case postureCheckID1: pc1 = pc - } else if pc.ID == postureCheckID2 { + case postureCheckID2: pc2 = pc } } diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 97aa81b12..728d67273 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -30,7 +30,6 @@ import ( "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/management/server/util" nbroute "github.com/netbirdio/netbird/route" - route2 "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/status" "github.com/netbirdio/netbird/util/crypt" ) @@ -110,12 +109,12 @@ func runLargeTest(t *testing.T, store Store) { AccountID: account.Id, } account.Users[user.Id] = user - route := &route2.Route{ - ID: route2.ID(fmt.Sprintf("network-id-%d", n)), + route := &nbroute.Route{ + ID: nbroute.ID(fmt.Sprintf("network-id-%d", n)), Description: "base route", - NetID: route2.NetID(fmt.Sprintf("network-id-%d", n)), + NetID: nbroute.NetID(fmt.Sprintf("network-id-%d", n)), Network: netip.MustParsePrefix(netIP.String() + "/24"), - NetworkType: route2.IPv4Network, + NetworkType: nbroute.IPv4Network, Metric: 9999, Masquerade: false, Enabled: true, @@ -689,7 +688,7 @@ func TestMigrate(t *testing.T) { require.NoError(t, err, "Failed to insert Gob data") type route struct { - route2.Route + nbroute.Route Network netip.Prefix `gorm:"serializer:gob"` PeerGroups []string `gorm:"serializer:gob"` } @@ -698,7 +697,7 @@ func TestMigrate(t *testing.T) { rt := &route{ Network: prefix, PeerGroups: []string{"group1", "group2"}, - Route: route2.Route{ID: "route1"}, + Route: nbroute.Route{ID: "route1"}, } err = store.(*SqlStore).db.Save(rt).Error @@ -714,7 +713,7 @@ func TestMigrate(t *testing.T) { require.NoError(t, err, "Failed to delete Gob data") prefix = netip.MustParsePrefix("12.0.0.0/24") - nRT := &route2.Route{ + nRT := &nbroute.Route{ Network: prefix, ID: "route2", Peer: "peer-id", @@ -3544,13 +3543,13 @@ func TestSqlStore_SaveRoute(t *testing.T) { accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" - route := &route2.Route{ + route := &nbroute.Route{ ID: "route-id", AccountID: accountID, Network: netip.MustParsePrefix("10.10.0.0/16"), NetID: "netID", PeerGroups: []string{"routeA"}, - NetworkType: route2.IPv4Network, + NetworkType: nbroute.IPv4Network, Masquerade: true, Metric: 9999, Enabled: true, diff --git a/management/server/testutil/store.go b/management/server/testutil/store.go index db418c45b..f92153399 100644 --- a/management/server/testutil/store.go +++ b/management/server/testutil/store.go @@ -1,5 +1,4 @@ //go:build !ios -// +build !ios package testutil diff --git a/management/server/testutil/store_ios.go b/management/server/testutil/store_ios.go index c3dd839d3..9e3b5ce4a 100644 --- a/management/server/testutil/store_ios.go +++ b/management/server/testutil/store_ios.go @@ -1,5 +1,4 @@ //go:build ios -// +build ios package testutil diff --git a/relay/cmd/pprof.go b/relay/cmd/pprof.go index 37efd35f0..c041c6ea9 100644 --- a/relay/cmd/pprof.go +++ b/relay/cmd/pprof.go @@ -1,5 +1,4 @@ //go:build pprof -// +build pprof package cmd diff --git a/relay/server/listener/quic/conn.go b/relay/server/listener/quic/conn.go index 909ec1cc6..6e2201bf7 100644 --- a/relay/server/listener/quic/conn.go +++ b/relay/server/listener/quic/conn.go @@ -12,14 +12,14 @@ import ( ) type Conn struct { - session quic.Connection + session *quic.Conn closed bool closedMu sync.Mutex ctx context.Context ctxCancel context.CancelFunc } -func NewConn(session quic.Connection) *Conn { +func NewConn(session *quic.Conn) *Conn { ctx, cancel := context.WithCancel(context.Background()) return &Conn{ session: session, diff --git a/relay/server/listener/ws/conn.go b/relay/server/listener/ws/conn.go index 3ec08945b..d5bce56f7 100644 --- a/relay/server/listener/ws/conn.go +++ b/relay/server/listener/ws/conn.go @@ -88,7 +88,7 @@ func (c *Conn) Close() error { c.closedMu.Lock() c.closed = true c.closedMu.Unlock() - return c.Conn.CloseNow() + return c.CloseNow() } func (c *Conn) isClosed() bool { diff --git a/shared/management/client/rest/accounts_test.go b/shared/management/client/rest/accounts_test.go index be0066488..e44ada298 100644 --- a/shared/management/client/rest/accounts_test.go +++ b/shared/management/client/rest/accounts_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index 4d1de2631..77c960435 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -161,7 +161,7 @@ func (c *Client) NewRequest(ctx context.Context, method, path string, body io.Re func parseResponse[T any](resp *http.Response) (T, error) { var ret T if resp.Body == nil { - return ret, fmt.Errorf("Body missing, HTTP Error code %d", resp.StatusCode) + return ret, fmt.Errorf("body missing, HTTP Error code %d", resp.StatusCode) } bs, err := io.ReadAll(resp.Body) if err != nil { @@ -169,7 +169,7 @@ func parseResponse[T any](resp *http.Response) (T, error) { } err = json.Unmarshal(bs, &ret) if err != nil { - return ret, fmt.Errorf("Error code %d, error unmarshalling body: %w", resp.StatusCode, err) + return ret, fmt.Errorf("error code %d, error unmarshalling body: %w", resp.StatusCode, err) } return ret, nil diff --git a/shared/management/client/rest/client_test.go b/shared/management/client/rest/client_test.go index 17df8dd8b..2b3e6cabe 100644 --- a/shared/management/client/rest/client_test.go +++ b/shared/management/client/rest/client_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/dns_test.go b/shared/management/client/rest/dns_test.go index 58082abe8..8e8633f8d 100644 --- a/shared/management/client/rest/dns_test.go +++ b/shared/management/client/rest/dns_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/events_test.go b/shared/management/client/rest/events_test.go index b28390001..1ee10eb6e 100644 --- a/shared/management/client/rest/events_test.go +++ b/shared/management/client/rest/events_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/geo_test.go b/shared/management/client/rest/geo_test.go index fcb4808a1..2410f2641 100644 --- a/shared/management/client/rest/geo_test.go +++ b/shared/management/client/rest/geo_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/groups_test.go b/shared/management/client/rest/groups_test.go index fcd759e9a..51fd0c0ee 100644 --- a/shared/management/client/rest/groups_test.go +++ b/shared/management/client/rest/groups_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/impersonation_test.go b/shared/management/client/rest/impersonation_test.go index 4fb8f24eb..d257d0987 100644 --- a/shared/management/client/rest/impersonation_test.go +++ b/shared/management/client/rest/impersonation_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/networks_test.go b/shared/management/client/rest/networks_test.go index ca2a294ae..2bf1a0d3b 100644 --- a/shared/management/client/rest/networks_test.go +++ b/shared/management/client/rest/networks_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/peers_test.go b/shared/management/client/rest/peers_test.go index a45f9d6ec..c464de7ed 100644 --- a/shared/management/client/rest/peers_test.go +++ b/shared/management/client/rest/peers_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/policies_test.go b/shared/management/client/rest/policies_test.go index a19d0a728..e948e2949 100644 --- a/shared/management/client/rest/policies_test.go +++ b/shared/management/client/rest/policies_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/posturechecks_test.go b/shared/management/client/rest/posturechecks_test.go index 9b1b618df..d74d455a5 100644 --- a/shared/management/client/rest/posturechecks_test.go +++ b/shared/management/client/rest/posturechecks_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/routes_test.go b/shared/management/client/rest/routes_test.go index 9452a07fc..5ee2def24 100644 --- a/shared/management/client/rest/routes_test.go +++ b/shared/management/client/rest/routes_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/setupkeys_test.go b/shared/management/client/rest/setupkeys_test.go index 0fa782da5..bd8d3f835 100644 --- a/shared/management/client/rest/setupkeys_test.go +++ b/shared/management/client/rest/setupkeys_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/tokens_test.go b/shared/management/client/rest/tokens_test.go index ce3748751..5af41eb73 100644 --- a/shared/management/client/rest/tokens_test.go +++ b/shared/management/client/rest/tokens_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/users_test.go b/shared/management/client/rest/users_test.go index d53c4eb6a..68815d4f9 100644 --- a/shared/management/client/rest/users_test.go +++ b/shared/management/client/rest/users_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/relay/client/client_test.go b/shared/relay/client/client_test.go index 8fe5f04f4..9820d642f 100644 --- a/shared/relay/client/client_test.go +++ b/shared/relay/client/client_test.go @@ -19,15 +19,7 @@ import ( ) var ( - hmacTokenStore = &hmac.TokenStore{} - serverListenAddr = "127.0.0.1:1234" - serverURL = "rel://127.0.0.1:1234" - serverCfg = server.Config{ - Meter: otel.Meter(""), - ExposedAddress: serverURL, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - } + hmacTokenStore = &hmac.TokenStore{} ) func TestMain(m *testing.M) { @@ -36,8 +28,20 @@ func TestMain(m *testing.M) { os.Exit(code) } +// newClientTestServerConfig creates a new server config for client testing with the given address +func newClientTestServerConfig(address string) server.Config { + return server.Config{ + Meter: otel.Meter(""), + ExposedAddress: "rel://" + address, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + } +} + func TestClient(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50001" + serverCfg := newClientTestServerConfig(serverListenAddr) srv, err := server.NewServer(serverCfg) if err != nil { @@ -64,7 +68,7 @@ func TestClient(t *testing.T) { t.Fatalf("failed to start server: %s", err) } t.Log("alice connecting to server") - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -72,7 +76,7 @@ func TestClient(t *testing.T) { defer clientAlice.Close() t.Log("placeholder connecting to server") - clientPlaceHolder := NewClient(serverURL, hmacTokenStore, "clientPlaceHolder", iface.DefaultMTU) + clientPlaceHolder := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "clientPlaceHolder", iface.DefaultMTU) err = clientPlaceHolder.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -80,7 +84,7 @@ func TestClient(t *testing.T) { defer clientPlaceHolder.Close() t.Log("Bob connecting to server") - clientBob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -120,6 +124,8 @@ func TestClient(t *testing.T) { func TestRegistration(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50101" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) if err != nil { @@ -138,7 +144,7 @@ func TestRegistration(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { _ = srv.Shutdown(ctx) @@ -157,7 +163,7 @@ func TestRegistration(t *testing.T) { func TestRegistrationTimeout(t *testing.T) { ctx := context.Background() fakeUDPListener, err := net.ListenUDP("udp", &net.UDPAddr{ - Port: 1234, + Port: 50201, IP: net.ParseIP("0.0.0.0"), }) if err != nil { @@ -168,7 +174,7 @@ func TestRegistrationTimeout(t *testing.T) { }(fakeUDPListener) fakeTCPListener, err := net.ListenTCP("tcp", &net.TCPAddr{ - Port: 1234, + Port: 50201, IP: net.ParseIP("0.0.0.0"), }) if err != nil { @@ -178,7 +184,7 @@ func TestRegistrationTimeout(t *testing.T) { _ = fakeTCPListener.Close() }(fakeTCPListener) - clientAlice := NewClient("127.0.0.1:1234", hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient("127.0.0.1:50201", hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err == nil { t.Errorf("failed to connect to server: %s", err) @@ -192,6 +198,8 @@ func TestRegistrationTimeout(t *testing.T) { func TestEcho(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50301" + serverCfg := newClientTestServerConfig(serverListenAddr) idAlice := "alice" idBob := "bob" srvCfg := server.ListenerConfig{Address: serverListenAddr} @@ -219,7 +227,7 @@ func TestEcho(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -231,7 +239,7 @@ func TestEcho(t *testing.T) { } }() - clientBob := NewClient(serverURL, hmacTokenStore, idBob, iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idBob, iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -282,6 +290,8 @@ func TestEcho(t *testing.T) { func TestBindToUnavailabePeer(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50401" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -309,7 +319,7 @@ func TestBindToUnavailabePeer(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -328,6 +338,8 @@ func TestBindToUnavailabePeer(t *testing.T) { func TestBindReconnect(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50501" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -355,13 +367,13 @@ func TestBindReconnect(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) } - clientBob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -383,7 +395,7 @@ func TestBindReconnect(t *testing.T) { t.Errorf("failed to close client: %s", err) } - clientAlice = NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice = NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -429,6 +441,8 @@ func TestBindReconnect(t *testing.T) { func TestCloseConn(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50601" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -456,13 +470,13 @@ func TestCloseConn(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - bob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + bob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = bob.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -492,6 +506,8 @@ func TestCloseConn(t *testing.T) { func TestCloseRelayConn(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50701" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -518,13 +534,13 @@ func TestCloseRelayConn(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - bob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + bob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = bob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -550,6 +566,8 @@ func TestCloseRelayConn(t *testing.T) { func TestCloseByServer(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50801" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv1, err := server.NewServer(serverCfg) @@ -572,7 +590,7 @@ func TestCloseByServer(t *testing.T) { idAlice := "alice" log.Debugf("connect by alice") - relayClient := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + relayClient := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) if err = relayClient.Connect(ctx); err != nil { log.Fatalf("failed to connect to server: %s", err) } @@ -607,6 +625,8 @@ func TestCloseByServer(t *testing.T) { func TestCloseByClient(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50901" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -628,7 +648,7 @@ func TestCloseByClient(t *testing.T) { idAlice := "alice" log.Debugf("connect by alice") - relayClient := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + relayClient := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) err = relayClient.Connect(ctx) if err != nil { log.Fatalf("failed to connect to server: %s", err) @@ -652,6 +672,8 @@ func TestCloseByClient(t *testing.T) { func TestCloseNotDrainedChannel(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:51001" + serverCfg := newClientTestServerConfig(serverListenAddr) idAlice := "alice" idBob := "bob" srvCfg := server.ListenerConfig{Address: serverListenAddr} @@ -679,7 +701,7 @@ func TestCloseNotDrainedChannel(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -691,7 +713,7 @@ func TestCloseNotDrainedChannel(t *testing.T) { } }() - clientBob := NewClient(serverURL, hmacTokenStore, idBob, iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idBob, iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) diff --git a/shared/relay/client/dialer/quic/conn.go b/shared/relay/client/dialer/quic/conn.go index 9243605b5..1d90d7139 100644 --- a/shared/relay/client/dialer/quic/conn.go +++ b/shared/relay/client/dialer/quic/conn.go @@ -30,11 +30,11 @@ func (a Addr) String() string { } type Conn struct { - session quic.Connection + session *quic.Conn ctx context.Context } -func NewConn(session quic.Connection) net.Conn { +func NewConn(session *quic.Conn) net.Conn { return &Conn{ session: session, ctx: context.Background(), diff --git a/shared/relay/client/manager_test.go b/shared/relay/client/manager_test.go index f00b35707..fb91f7682 100644 --- a/shared/relay/client/manager_test.go +++ b/shared/relay/client/manager_test.go @@ -13,6 +13,16 @@ import ( "github.com/netbirdio/netbird/shared/relay/auth/allow" ) +// newManagerTestServerConfig creates a new server config for manager testing with the given address +func newManagerTestServerConfig(address string) server.Config { + return server.Config{ + Meter: otel.Meter(""), + ExposedAddress: address, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + } +} + func TestEmptyURL(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -27,15 +37,10 @@ func TestForeignConn(t *testing.T) { ctx := context.Background() lstCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52101", } - srv1, err := server.NewServer(server.Config{ - Meter: otel.Meter(""), - ExposedAddress: lstCfg1.Address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - }) + srv1, err := server.NewServer(newManagerTestServerConfig(lstCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -59,14 +64,9 @@ func TestForeignConn(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:2234", + Address: "localhost:52102", } - srv2, err := server.NewServer(server.Config{ - Meter: otel.Meter(""), - ExposedAddress: srvCfg2.Address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - }) + srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -144,9 +144,9 @@ func TestForeginConnClose(t *testing.T) { ctx := context.Background() srvCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52201", } - srv1, err := server.NewServer(serverCfg) + srv1, err := server.NewServer(newManagerTestServerConfig(srvCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -170,9 +170,9 @@ func TestForeginConnClose(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:2234", + Address: "localhost:52202", } - srv2, err := server.NewServer(serverCfg) + srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -225,9 +225,9 @@ func TestForeignAutoClose(t *testing.T) { keepUnusedServerTime = 2 * time.Second srvCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52301", } - srv1, err := server.NewServer(serverCfg) + srv1, err := server.NewServer(newManagerTestServerConfig(srvCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -252,9 +252,9 @@ func TestForeignAutoClose(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:2234", + Address: "localhost:52302", } - srv2, err := server.NewServer(serverCfg) + srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -327,9 +327,9 @@ func TestAutoReconnect(t *testing.T) { ctx := context.Background() srvCfg := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52401", } - srv, err := server.NewServer(serverCfg) + srv, err := server.NewServer(newManagerTestServerConfig(srvCfg.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -397,14 +397,9 @@ func TestNotifierDoubleAdd(t *testing.T) { ctx := context.Background() listenerCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52501", } - srv, err := server.NewServer(server.Config{ - Meter: otel.Meter(""), - ExposedAddress: listenerCfg1.Address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - }) + srv, err := server.NewServer(newManagerTestServerConfig(listenerCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } diff --git a/signal/cmd/run.go b/signal/cmd/run.go index bf8f8e327..d7662a886 100644 --- a/signal/cmd/run.go +++ b/signal/cmd/run.go @@ -73,7 +73,7 @@ var ( // detect whether user specified a port userPort := cmd.Flag("port").Changed - tlsEnabled := false + var tlsEnabled bool if signalLetsencryptDomain != "" || (signalCertFile != "" && signalCertKey != "") { tlsEnabled = true } @@ -259,8 +259,8 @@ func grpcHandlerFunc(grpcServer *grpc.Server, meter metric.Meter) http.Handler { wsProxy := wsproxyserver.New(grpcServer, wsproxyserver.WithOTelMeter(meter)) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch { - case r.URL.Path == wsproxy.ProxyPath+wsproxy.SignalComponent: + switch r.URL.Path { + case wsproxy.ProxyPath + wsproxy.SignalComponent: wsProxy.Handler().ServeHTTP(w, r) default: grpcServer.ServeHTTP(w, r) diff --git a/util/syslog_nonwindows.go b/util/syslog_nonwindows.go index 6ffbcb8be..328bb8b1c 100644 --- a/util/syslog_nonwindows.go +++ b/util/syslog_nonwindows.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package util From 24df442198f179c8b24ce8eedefa828dec0f566f Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 7 Jan 2026 21:02:20 +0300 Subject: [PATCH 027/374] Revert "[relay] Update GO version and QUIC version (#4736)" (#5055) This reverts commit 8722b79799ae5db6e58961aebccbcb19e7cab7aa. --- .devcontainer/Dockerfile | 14 +- .github/workflows/golang-test-freebsd.yml | 2 +- .github/workflows/golang-test-linux.yml | 4 +- .github/workflows/golangci-lint.yml | 7 +- .github/workflows/release.yml | 2 +- .github/workflows/wasm-build-validation.yml | 13 +- .golangci.yaml | 257 +++++++++--------- client/cmd/debug.go | 1 - client/cmd/login.go | 2 - client/cmd/pprof.go | 1 + client/cmd/root.go | 1 - client/cmd/status.go | 1 - client/cmd/testutil_test.go | 3 + client/cmd/up.go | 1 - client/firewall/iptables/acl_linux.go | 5 +- .../firewall/iptables/manager_linux_test.go | 2 +- .../firewall/nftables/manager_linux_test.go | 13 +- client/firewall/uspfilter/filter.go | 2 +- client/firewall/uspfilter/localip.go | 1 - client/firewall/uspfilter/localip_test.go | 4 +- client/firewall/uspfilter/nat_test.go | 5 +- client/iface/device/device_ios.go | 3 + client/internal/debug/debug_linux.go | 10 +- client/internal/iface.go | 1 + client/internal/routemanager/iface/iface.go | 1 + .../systemops/systemops_generic.go | 14 +- client/ios/NetBirdSDK/client.go | 2 +- client/server/panic_windows.go | 1 + client/ssh/server/jwt_test.go | 5 +- client/system/info_android.go | 3 + client/system/info_darwin.go | 1 + client/system/info_ios.go | 3 + client/ui/client_ui.go | 12 +- client/ui/signal_windows.go | 2 +- go.mod | 10 +- go.sum | 13 +- management/cmd/management.go | 2 +- .../internals/shared/grpc/loginfilter_test.go | 1 - management/server/account.go | 2 +- management/server/account_test.go | 4 +- .../policies/posture_checks_handler_test.go | 2 +- .../peers_handler_benchmark_test.go | 1 + .../setupkeys_handler_benchmark_test.go | 1 + .../users_handler_benchmark_test.go | 1 + .../setupkeys_handler_integration_test.go | 1 + management/server/idp/pocketid.go | 4 +- management/server/idp/zitadel.go | 2 +- management/server/migration/migration.go | 2 +- management/server/nameserver.go | 4 +- management/server/posture_checks.go | 2 +- .../store/sql_store_get_account_test.go | 5 +- management/server/store/sql_store_test.go | 19 +- management/server/testutil/store.go | 1 + management/server/testutil/store_ios.go | 1 + relay/cmd/pprof.go | 1 + relay/server/listener/quic/conn.go | 4 +- relay/server/listener/ws/conn.go | 2 +- .../management/client/rest/accounts_test.go | 1 + shared/management/client/rest/client.go | 4 +- shared/management/client/rest/client_test.go | 1 + shared/management/client/rest/dns_test.go | 1 + shared/management/client/rest/events_test.go | 1 + shared/management/client/rest/geo_test.go | 1 + shared/management/client/rest/groups_test.go | 1 + .../client/rest/impersonation_test.go | 1 + .../management/client/rest/networks_test.go | 1 + shared/management/client/rest/peers_test.go | 1 + .../management/client/rest/policies_test.go | 1 + .../client/rest/posturechecks_test.go | 1 + shared/management/client/rest/routes_test.go | 1 + .../management/client/rest/setupkeys_test.go | 1 + shared/management/client/rest/tokens_test.go | 1 + shared/management/client/rest/users_test.go | 1 + shared/relay/client/client_test.go | 82 ++---- shared/relay/client/dialer/quic/conn.go | 4 +- shared/relay/client/manager_test.go | 57 ++-- signal/cmd/run.go | 6 +- util/syslog_nonwindows.go | 1 + 78 files changed, 341 insertions(+), 312 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 80809e667..9e5e97a31 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,15 +1,15 @@ -FROM golang:1.25-bookworm +FROM golang:1.23-bullseye RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ && apt-get -y install --no-install-recommends\ - gettext-base=0.21-12 \ - iptables=1.8.9-2 \ - libgl1-mesa-dev=22.3.6-1+deb12u1 \ - xorg-dev=1:7.7+23 \ - libayatana-appindicator3-dev=0.5.92-1 \ + gettext-base=0.21-4 \ + iptables=1.8.7-1 \ + libgl1-mesa-dev=20.3.5-1 \ + xorg-dev=1:7.7+22 \ + libayatana-appindicator3-dev=0.5.5-2+deb11u2 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ - && go install -v golang.org/x/tools/gopls@latest + && go install -v golang.org/x/tools/gopls@v0.18.1 WORKDIR /app diff --git a/.github/workflows/golang-test-freebsd.yml b/.github/workflows/golang-test-freebsd.yml index df64e86bb..0d19e8a19 100644 --- a/.github/workflows/golang-test-freebsd.yml +++ b/.github/workflows/golang-test-freebsd.yml @@ -25,7 +25,7 @@ jobs: release: "14.2" prepare: | pkg install -y curl pkgconf xorg - GO_TARBALL="go1.25.3.freebsd-amd64.tar.gz" + GO_TARBALL="go1.24.10.freebsd-amd64.tar.gz" GO_URL="https://go.dev/dl/$GO_TARBALL" curl -vLO "$GO_URL" tar -C /usr/local -vxzf "$GO_TARBALL" diff --git a/.github/workflows/golang-test-linux.yml b/.github/workflows/golang-test-linux.yml index 195a37a1f..c09bfab39 100644 --- a/.github/workflows/golang-test-linux.yml +++ b/.github/workflows/golang-test-linux.yml @@ -200,7 +200,7 @@ jobs: -e GOCACHE=${CONTAINER_GOCACHE} \ -e GOMODCACHE=${CONTAINER_GOMODCACHE} \ -e CONTAINER=${CONTAINER} \ - golang:1.25-alpine \ + golang:1.24-alpine \ sh -c ' \ apk update; apk add --no-cache \ ca-certificates iptables ip6tables dbus dbus-dev libpcap-dev build-base; \ @@ -259,7 +259,7 @@ jobs: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ go test ${{ matrix.raceFlag }} \ -exec 'sudo' \ - -timeout 10m -p 1 ./relay/... ./shared/relay/... + -timeout 10m ./relay/... ./shared/relay/... test_signal: name: "Signal / Unit" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9ce779dbb..c524f6f6b 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -52,10 +52,7 @@ jobs: if: matrix.os == 'ubuntu-latest' run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev libpcap-dev - name: golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + uses: golangci/golangci-lint-action@v4 with: version: latest - skip-cache: true - skip-save-cache: true - cache-invalidation-interval: 0 - args: --timeout=12m + args: --timeout=12m --out-format colored-line-number diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 84f6f64ed..2fa847dce 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -63,7 +63,7 @@ jobs: pkg install -y git curl portlint go # Install Go for building - GO_TARBALL="go1.25.5.freebsd-amd64.tar.gz" + GO_TARBALL="go1.24.10.freebsd-amd64.tar.gz" GO_URL="https://go.dev/dl/$GO_TARBALL" curl -LO "$GO_URL" tar -C /usr/local -xzf "$GO_TARBALL" diff --git a/.github/workflows/wasm-build-validation.yml b/.github/workflows/wasm-build-validation.yml index 47e45165b..4100e16dd 100644 --- a/.github/workflows/wasm-build-validation.yml +++ b/.github/workflows/wasm-build-validation.yml @@ -14,9 +14,6 @@ jobs: js_lint: name: "JS / Lint" runs-on: ubuntu-latest - env: - GOOS: js - GOARCH: wasm steps: - name: Checkout repository uses: actions/checkout@v4 @@ -27,14 +24,16 @@ jobs: - name: Install dependencies run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev libpcap-dev - name: Install golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + uses: golangci/golangci-lint-action@d6238b002a20823d52840fda27e2d4891c5952dc with: version: latest install-mode: binary skip-cache: true - skip-save-cache: true - cache-invalidation-interval: 0 - working-directory: ./client + skip-pkg-cache: true + skip-build-cache: true + - name: Run golangci-lint for WASM + run: | + GOOS=js GOARCH=wasm golangci-lint run --timeout=12m --out-format colored-line-number ./client/... continue-on-error: true js_build: diff --git a/.golangci.yaml b/.golangci.yaml index d81ad1377..461677c2e 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,124 +1,139 @@ -version: "2" -linters: - default: none - enable: - - bodyclose - - dupword - - durationcheck - - errcheck - - forbidigo - - gocritic - - gosec - - govet - - ineffassign - - mirror - - misspell - - nilerr - - nilnil - - predeclared - - revive - - sqlclosecheck - - staticcheck - - unused - - wastedassign - settings: - errcheck: - check-type-assertions: false - gocritic: - disabled-checks: - - commentFormatting - - captLocal - - deprecatedComment - gosec: - includes: - - G101 - - G103 - - G104 - - G106 - - G108 - - G109 - - G110 - - G111 - - G201 - - G202 - - G203 - - G301 - - G302 - - G303 - - G304 - - G305 - - G306 - - G307 - - G403 - - G502 - - G503 - - G504 - - G601 - - G602 - govet: - enable: - - nilness - enable-all: false - revive: - rules: - - name: exported - arguments: - - checkPrivateReceivers - - sayRepetitiveInsteadOfStutters - severity: warning - disabled: false - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling +run: + # Timeout for analysis, e.g. 30s, 5m. + # Default: 1m + timeout: 6m + +# This file contains only configs which differ from defaults. +# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +linters-settings: + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: false + + gosec: + includes: + - G101 # Look for hard coded credentials + #- G102 # Bind to all interfaces + - G103 # Audit the use of unsafe block + - G104 # Audit errors not checked + - G106 # Audit the use of ssh.InsecureIgnoreHostKey + #- G107 # Url provided to HTTP request as taint input + - G108 # Profiling endpoint automatically exposed on /debug/pprof + - G109 # Potential Integer overflow made by strconv.Atoi result conversion to int16/32 + - G110 # Potential DoS vulnerability via decompression bomb + - G111 # Potential directory traversal + #- G112 # Potential slowloris attack + - G113 # Usage of Rat.SetString in math/big with an overflow (CVE-2022-23772) + #- G114 # Use of net/http serve function that has no support for setting timeouts + - G201 # SQL query construction using format string + - G202 # SQL query construction using string concatenation + - G203 # Use of unescaped data in HTML templates + #- G204 # Audit use of command execution + - G301 # Poor file permissions used when creating a directory + - G302 # Poor file permissions used with chmod + - G303 # Creating tempfile using a predictable path + - G304 # File path provided as taint input + - G305 # File traversal when extracting zip/tar archive + - G306 # Poor file permissions used when writing to a new file + - G307 # Poor file permissions used when creating a file with os.Create + #- G401 # Detect the usage of DES, RC4, MD5 or SHA1 + #- G402 # Look for bad TLS connection settings + - G403 # Ensure minimum RSA key length of 2048 bits + #- G404 # Insecure random number source (rand) + #- G501 # Import blocklist: crypto/md5 + - G502 # Import blocklist: crypto/des + - G503 # Import blocklist: crypto/rc4 + - G504 # Import blocklist: net/http/cgi + #- G505 # Import blocklist: crypto/sha1 + - G601 # Implicit memory aliasing of items from a range statement + - G602 # Slice access out of bounds + + gocritic: + disabled-checks: + - commentFormatting + - captLocal + - deprecatedComment + + govet: + # Enable all analyzers. + # Default: false + enable-all: false + enable: + - nilness + + revive: rules: - - linters: - - forbidigo - path: management/cmd/root\.go - - linters: - - forbidigo - path: signal/cmd/root\.go - - linters: - - unused - path: sharedsock/filter\.go - - linters: - - unused - path: client/firewall/iptables/rule\.go - - linters: - - gosec - - mirror - path: test\.go - - linters: - - nilnil - path: mock\.go - - linters: - - staticcheck - text: grpc.DialContext is deprecated - - linters: - - staticcheck - text: grpc.WithBlock is deprecated - - linters: - - staticcheck - text: "QF1001" - - linters: - - staticcheck - text: "QF1008" - - linters: - - staticcheck - text: "QF1012" - paths: - - third_party$ - - builtin$ - - examples$ + - name: exported + severity: warning + disabled: false + arguments: + - "checkPrivateReceivers" + - "sayRepetitiveInsteadOfStutters" + tenv: + # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + # Default: false + all: true + +linters: + disable-all: true + enable: + ## enabled by default + - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases + - gosimple # specializes in simplifying a code + - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # detects when assignments to existing variables are not used + - staticcheck # is a go vet on steroids, applying a ton of static analysis checks + - tenv # Tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17. + - typecheck # like the front-end of a Go compiler, parses and type-checks Go code + - unused # checks for unused constants, variables, functions and types + ## disable by default but the have interesting results so lets add them + - bodyclose # checks whether HTTP response body is closed successfully + - dupword # dupword checks for duplicate words in the source code + - durationcheck # durationcheck checks for two durations multiplied together + - forbidigo # forbidigo forbids identifiers + - gocritic # provides diagnostics that check for bugs, performance and style issues + - gosec # inspects source code for security problems + - mirror # mirror reports wrong mirror patterns of bytes/strings usage + - misspell # misspess finds commonly misspelled English words in comments + - nilerr # finds the code that returns nil even if it checks that the error is not nil + - nilnil # checks that there is no simultaneous return of nil error and an invalid value + - predeclared # predeclared finds code that shadows one of Go's predeclared identifiers + - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. + - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed + # - thelper # thelper detects Go test helpers without t.Helper() call and checks the consistency of test helpers. + - wastedassign # wastedassign finds wasted assignment statements issues: + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 max-same-issues: 5 -formatters: - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ + + exclude-rules: + # allow fmt + - path: management/cmd/root\.go + linters: forbidigo + - path: signal/cmd/root\.go + linters: forbidigo + - path: sharedsock/filter\.go + linters: + - unused + - path: client/firewall/iptables/rule\.go + linters: + - unused + - path: test\.go + linters: + - mirror + - gosec + - path: mock\.go + linters: + - nilnil + # Exclude specific deprecation warnings for grpc methods + - linters: + - staticcheck + text: "grpc.DialContext is deprecated" + - linters: + - staticcheck + text: "grpc.WithBlock is deprecated" diff --git a/client/cmd/debug.go b/client/cmd/debug.go index 7ca56857b..430012a17 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -136,7 +136,6 @@ func setLogLevel(cmd *cobra.Command, args []string) error { client := proto.NewDaemonServiceClient(conn) level := server.ParseLogLevel(args[0]) if level == proto.LogLevel_UNKNOWN { - //nolint return fmt.Errorf("unknown log level: %s. Available levels are: panic, fatal, error, warn, info, debug, trace\n", args[0]) } diff --git a/client/cmd/login.go b/client/cmd/login.go index 57c010571..a34bb7c70 100644 --- a/client/cmd/login.go +++ b/client/cmd/login.go @@ -81,7 +81,6 @@ var loginCmd = &cobra.Command{ func doDaemonLogin(ctx context.Context, cmd *cobra.Command, providedSetupKey string, activeProf *profilemanager.Profile, username string, pm *profilemanager.ProfileManager) error { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { - //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) @@ -207,7 +206,6 @@ func switchProfileOnDaemon(ctx context.Context, pm *profilemanager.ProfileManage func switchProfile(ctx context.Context, profileName string, username string) error { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { - //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/pprof.go b/client/cmd/pprof.go index c041c6ea9..37efd35f0 100644 --- a/client/cmd/pprof.go +++ b/client/cmd/pprof.go @@ -1,4 +1,5 @@ //go:build pprof +// +build pprof package cmd diff --git a/client/cmd/root.go b/client/cmd/root.go index f4f4f6052..30120c196 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -390,7 +390,6 @@ func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) { conn, err := DialClientGRPCServer(cmd.Context(), daemonAddr) if err != nil { - //nolint return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/status.go b/client/cmd/status.go index 99d47cd1a..06460a6a7 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -124,7 +124,6 @@ func statusFunc(cmd *cobra.Command, args []string) error { func getStatus(ctx context.Context, shouldRunProbes bool) (*proto.StatusResponse, error) { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { - //nolint return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index 2650d6225..888a9a3f7 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -89,6 +89,9 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp t.Cleanup(cleanUp) eventStore := &activity.InMemoryEventStore{} + if err != nil { + return nil, nil + } ctrl := gomock.NewController(t) t.Cleanup(ctrl.Finish) diff --git a/client/cmd/up.go b/client/cmd/up.go index 057d35268..9efc2e60d 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -216,7 +216,6 @@ func runInDaemonMode(ctx context.Context, cmd *cobra.Command, pm *profilemanager conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { - //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/firewall/iptables/acl_linux.go b/client/firewall/iptables/acl_linux.go index d83798f09..5ccaf17ba 100644 --- a/client/firewall/iptables/acl_linux.go +++ b/client/firewall/iptables/acl_linux.go @@ -386,8 +386,11 @@ func (m *aclManager) updateState() { // filterRuleSpecs returns the specs of a filtering rule func filterRuleSpecs(ip net.IP, protocol string, sPort, dPort *firewall.Port, action firewall.Action, ipsetName string) (specs []string) { + matchByIP := true // don't use IP matching if IP is 0.0.0.0 - matchByIP := !ip.IsUnspecified() + if ip.IsUnspecified() { + matchByIP = false + } if matchByIP { if ipsetName != "" { diff --git a/client/firewall/iptables/manager_linux_test.go b/client/firewall/iptables/manager_linux_test.go index ee47a27c0..6b5401e2b 100644 --- a/client/firewall/iptables/manager_linux_test.go +++ b/client/firewall/iptables/manager_linux_test.go @@ -161,7 +161,7 @@ func TestIptablesManagerDenyRules(t *testing.T) { t.Logf(" [%d] %s", i, rule) } - var denyRuleIndex, acceptRuleIndex = -1, -1 + var denyRuleIndex, acceptRuleIndex int = -1, -1 for i, rule := range rules { if strings.Contains(rule, "DROP") { t.Logf("Found DROP rule at index %d: %s", i, rule) diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go index 75b1e2b6c..6b29c5606 100644 --- a/client/firewall/nftables/manager_linux_test.go +++ b/client/firewall/nftables/manager_linux_test.go @@ -198,7 +198,7 @@ func TestNftablesManagerRuleOrder(t *testing.T) { t.Logf("Found %d rules in nftables chain", len(rules)) // Find the accept and deny rules and verify deny comes before accept - var acceptRuleIndex, denyRuleIndex = -1, -1 + var acceptRuleIndex, denyRuleIndex int = -1, -1 for i, rule := range rules { hasAcceptHTTPSet := false hasDenyHTTPSet := false @@ -208,13 +208,11 @@ func TestNftablesManagerRuleOrder(t *testing.T) { for _, e := range rule.Exprs { // Check for set lookup if lookup, ok := e.(*expr.Lookup); ok { - switch lookup.SetName { - case "accept-http": + if lookup.SetName == "accept-http" { hasAcceptHTTPSet = true - case "deny-http": + } else if lookup.SetName == "deny-http" { hasDenyHTTPSet = true } - } // Check for port 80 if cmp, ok := e.(*expr.Cmp); ok { @@ -224,10 +222,9 @@ func TestNftablesManagerRuleOrder(t *testing.T) { } // Check for verdict if verdict, ok := e.(*expr.Verdict); ok { - switch verdict.Kind { - case expr.VerdictAccept: + if verdict.Kind == expr.VerdictAccept { action = "ACCEPT" - case expr.VerdictDrop: + } else if verdict.Kind == expr.VerdictDrop { action = "DROP" } } diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index 3d3d79631..4e22bde3f 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -795,7 +795,7 @@ func (m *Manager) recalculateTCPChecksum(packetData []byte, d *decoder, tcpHeade pseudoSum += uint32(d.ip4.Protocol) pseudoSum += uint32(tcpLength) - var sum = pseudoSum + var sum uint32 = pseudoSum for i := 0; i < tcpLength-1; i += 2 { sum += uint32(tcpLayer[i])<<8 | uint32(tcpLayer[i+1]) } diff --git a/client/firewall/uspfilter/localip.go b/client/firewall/uspfilter/localip.go index ffc807f46..7f6b52c71 100644 --- a/client/firewall/uspfilter/localip.go +++ b/client/firewall/uspfilter/localip.go @@ -130,7 +130,6 @@ func (m *localIPManager) UpdateLocalIPs(iface common.IFaceMapper) (err error) { // 127.0.0.0/8 newIPv4Bitmap[127] = &ipv4LowBitmap{} for i := 0; i < 8192; i++ { - // #nosec G602 -- bitmap is defined as [8192]uint32, loop range is correct newIPv4Bitmap[127].bitmap[i] = 0xFFFFFFFF } diff --git a/client/firewall/uspfilter/localip_test.go b/client/firewall/uspfilter/localip_test.go index 6653947fa..45ac912cd 100644 --- a/client/firewall/uspfilter/localip_test.go +++ b/client/firewall/uspfilter/localip_test.go @@ -218,7 +218,7 @@ func BenchmarkIPChecks(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // nolint:gosimple - _ = mapManager.localIPs[ip.String()] + _, _ = mapManager.localIPs[ip.String()] } }) @@ -227,7 +227,7 @@ func BenchmarkIPChecks(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // nolint:gosimple - _ = mapManager.localIPs[ip.String()] + _, _ = mapManager.localIPs[ip.String()] } }) } diff --git a/client/firewall/uspfilter/nat_test.go b/client/firewall/uspfilter/nat_test.go index 50743d006..400d61020 100644 --- a/client/firewall/uspfilter/nat_test.go +++ b/client/firewall/uspfilter/nat_test.go @@ -234,10 +234,9 @@ func TestInboundPortDNATNegative(t *testing.T) { require.False(t, translated, "Packet should NOT be translated for %s", tc.name) d = parsePacket(t, packet) - switch tc.protocol { - case layers.IPProtocolTCP: + if tc.protocol == layers.IPProtocolTCP { require.Equal(t, tc.dstPort, uint16(d.tcp.DstPort), "Port should remain unchanged") - case layers.IPProtocolUDP: + } else if tc.protocol == layers.IPProtocolUDP { require.Equal(t, tc.dstPort, uint16(d.udp.DstPort), "Port should remain unchanged") } }) diff --git a/client/iface/device/device_ios.go b/client/iface/device/device_ios.go index aa77cee45..d841ac2fe 100644 --- a/client/iface/device/device_ios.go +++ b/client/iface/device/device_ios.go @@ -1,3 +1,6 @@ +//go:build ios +// +build ios + package device import ( diff --git a/client/internal/debug/debug_linux.go b/client/internal/debug/debug_linux.go index aedf88b79..39d796fda 100644 --- a/client/internal/debug/debug_linux.go +++ b/client/internal/debug/debug_linux.go @@ -507,13 +507,15 @@ func formatPayloadWithCmp(p *expr.Payload, cmp *expr.Cmp) string { if p.Base == expr.PayloadBaseNetworkHeader { switch p.Offset { case 12: - switch p.Len { - case 4, 2: + if p.Len == 4 { + return fmt.Sprintf("ip saddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) + } else if p.Len == 2 { return fmt.Sprintf("ip saddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) } case 16: - switch p.Len { - case 4, 2: + if p.Len == 4 { + return fmt.Sprintf("ip daddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) + } else if p.Len == 2 { return fmt.Sprintf("ip daddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) } } diff --git a/client/internal/iface.go b/client/internal/iface.go index a82d87aab..bd0069c19 100644 --- a/client/internal/iface.go +++ b/client/internal/iface.go @@ -1,4 +1,5 @@ //go:build !windows +// +build !windows package internal diff --git a/client/internal/routemanager/iface/iface.go b/client/internal/routemanager/iface/iface.go index b44d9fa65..57dbec03d 100644 --- a/client/internal/routemanager/iface/iface.go +++ b/client/internal/routemanager/iface/iface.go @@ -1,4 +1,5 @@ //go:build !windows +// +build !windows package iface diff --git a/client/internal/routemanager/systemops/systemops_generic.go b/client/internal/routemanager/systemops/systemops_generic.go index ec219c7fe..26a548634 100644 --- a/client/internal/routemanager/systemops/systemops_generic.go +++ b/client/internal/routemanager/systemops/systemops_generic.go @@ -210,8 +210,7 @@ func (r *SysOps) refreshLocalSubnetsCache() { func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) error { nextHop := Nexthop{netip.Addr{}, intf} - switch prefix { - case vars.Defaultv4: + if prefix == vars.Defaultv4 { if err := r.addToRouteTable(splitDefaultv4_1, nextHop); err != nil { return err } @@ -234,7 +233,7 @@ func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) er } return nil - case vars.Defaultv6: + } else if prefix == vars.Defaultv6 { if err := r.addToRouteTable(splitDefaultv6_1, nextHop); err != nil { return fmt.Errorf("add unreachable route split 1: %w", err) } @@ -256,8 +255,7 @@ func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) er func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) error { nextHop := Nexthop{netip.Addr{}, intf} - switch prefix { - case vars.Defaultv4: + if prefix == vars.Defaultv4 { var result *multierror.Error if err := r.removeFromRouteTable(splitDefaultv4_1, nextHop); err != nil { result = multierror.Append(result, err) @@ -275,7 +273,7 @@ func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) } return nberrors.FormatErrorOrNil(result) - case vars.Defaultv6: + } else if prefix == vars.Defaultv6 { var result *multierror.Error if err := r.removeFromRouteTable(splitDefaultv6_1, nextHop); err != nil { result = multierror.Append(result, err) @@ -285,9 +283,9 @@ func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) } return nberrors.FormatErrorOrNil(result) - default: - return r.removeFromRouteTable(prefix, nextHop) } + + return r.removeFromRouteTable(prefix, nextHop) } func (r *SysOps) setupHooks(initAddresses []net.IP, stateManager *statemanager.Manager) error { diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index 935910fc9..e901386d9 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -76,7 +76,7 @@ type Client struct { loginComplete bool connectClient *internal.ConnectClient // preloadedConfig holds config loaded from JSON (used on tvOS where file writes are blocked) - preloadedConfig *profilemanager.Config + preloadedConfig *profilemanager.Config } // NewClient instantiate a new Client diff --git a/client/server/panic_windows.go b/client/server/panic_windows.go index 8592f12ad..f441ec9ea 100644 --- a/client/server/panic_windows.go +++ b/client/server/panic_windows.go @@ -1,4 +1,5 @@ //go:build windows +// +build windows package server diff --git a/client/ssh/server/jwt_test.go b/client/ssh/server/jwt_test.go index 6eb88accc..d36d7cbbf 100644 --- a/client/ssh/server/jwt_test.go +++ b/client/ssh/server/jwt_test.go @@ -602,13 +602,12 @@ func TestJWTAuthentication(t *testing.T) { require.NoError(t, err) var authMethods []cryptossh.AuthMethod - switch tc.token { - case "valid": + if tc.token == "valid" { token := generateValidJWT(t, privateKey, issuer, audience) authMethods = []cryptossh.AuthMethod{ cryptossh.Password(token), } - case "invalid": + } else if tc.token == "invalid" { invalidToken := "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.invalid" authMethods = []cryptossh.AuthMethod{ cryptossh.Password(invalidToken), diff --git a/client/system/info_android.go b/client/system/info_android.go index 794ff15ed..78895bfa8 100644 --- a/client/system/info_android.go +++ b/client/system/info_android.go @@ -1,3 +1,6 @@ +//go:build android +// +build android + package system import ( diff --git a/client/system/info_darwin.go b/client/system/info_darwin.go index 4a31920ec..caa344737 100644 --- a/client/system/info_darwin.go +++ b/client/system/info_darwin.go @@ -1,4 +1,5 @@ //go:build !ios +// +build !ios package system diff --git a/client/system/info_ios.go b/client/system/info_ios.go index 322609db4..705c37920 100644 --- a/client/system/info_ios.go +++ b/client/system/info_ios.go @@ -1,3 +1,6 @@ +//go:build ios +// +build ios + package system import ( diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 5d955ed25..78934ea95 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -510,7 +510,7 @@ func (s *serviceClient) saveSettings() { // Continue with default behavior if features can't be retrieved } else if features != nil && features.DisableUpdateSettings { log.Warn("Configuration updates are disabled by daemon") - dialog.ShowError(fmt.Errorf("configuration updates are disabled by daemon"), s.wSettings) + dialog.ShowError(fmt.Errorf("Configuration updates are disabled by daemon"), s.wSettings) return } @@ -540,7 +540,7 @@ func (s *serviceClient) saveSettings() { func (s *serviceClient) validateSettings() error { if s.iPreSharedKey.Text != "" && s.iPreSharedKey.Text != censoredPreSharedKey { if _, err := wgtypes.ParseKey(s.iPreSharedKey.Text); err != nil { - return fmt.Errorf("invalid pre-shared key value") + return fmt.Errorf("Invalid Pre-shared Key Value") } } return nil @@ -549,10 +549,10 @@ func (s *serviceClient) validateSettings() error { func (s *serviceClient) parseNumericSettings() (int64, int64, error) { port, err := strconv.ParseInt(s.iInterfacePort.Text, 10, 64) if err != nil { - return 0, 0, errors.New("invalid interface port") + return 0, 0, errors.New("Invalid interface port") } if port < 1 || port > 65535 { - return 0, 0, errors.New("invalid interface port: out of range 1-65535") + return 0, 0, errors.New("Invalid interface port: out of range 1-65535") } var mtu int64 @@ -560,7 +560,7 @@ func (s *serviceClient) parseNumericSettings() (int64, int64, error) { if mtuText != "" { mtu, err = strconv.ParseInt(mtuText, 10, 64) if err != nil { - return 0, 0, errors.New("invalid MTU value") + return 0, 0, errors.New("Invalid MTU value") } if mtu < iface.MinMTU || mtu > iface.MaxMTU { return 0, 0, fmt.Errorf("MTU must be between %d and %d bytes", iface.MinMTU, iface.MaxMTU) @@ -645,7 +645,7 @@ func (s *serviceClient) buildSetConfigRequest(iMngURL string, port, mtu int64) ( if sshJWTCacheTTLText != "" { sshJWTCacheTTL, err := strconv.ParseInt(sshJWTCacheTTLText, 10, 32) if err != nil { - return nil, errors.New("invalid SSH JWT Cache TTL value") + return nil, errors.New("Invalid SSH JWT Cache TTL value") } if sshJWTCacheTTL < 0 || sshJWTCacheTTL > maxSSHJWTCacheTTL { return nil, fmt.Errorf("SSH JWT Cache TTL must be between 0 and %d seconds", maxSSHJWTCacheTTL) diff --git a/client/ui/signal_windows.go b/client/ui/signal_windows.go index 58f46374f..ca98be526 100644 --- a/client/ui/signal_windows.go +++ b/client/ui/signal_windows.go @@ -164,7 +164,7 @@ func sendShowWindowSignal(pid int32) error { err = windows.SetEvent(eventHandle) if err != nil { - return fmt.Errorf("error setting event: %w", err) + return fmt.Errorf("Error setting event: %w", err) } return nil diff --git a/go.mod b/go.mod index 1b4612da3..23cf0f37d 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/netbirdio/netbird -go 1.25 - -toolchain go1.25.5 +go 1.24.10 require ( cunicu.li/go-rosenpass v0.4.0 @@ -83,7 +81,7 @@ require ( github.com/pion/turn/v3 v3.0.1 github.com/pkg/sftp v1.13.9 github.com/prometheus/client_golang v1.23.2 - github.com/quic-go/quic-go v0.55.0 + github.com/quic-go/quic-go v0.49.1 github.com/redis/go-redis/v9 v9.7.3 github.com/rs/xid v1.3.0 github.com/shirou/gopsutil/v3 v3.24.4 @@ -105,7 +103,7 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.48.0 go.opentelemetry.io/otel/metric v1.38.0 go.opentelemetry.io/otel/sdk/metric v1.38.0 - go.uber.org/mock v0.5.2 + go.uber.org/mock v0.5.0 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 @@ -188,10 +186,12 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-sql-driver/mysql v1.9.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-text/render v0.2.0 // indirect github.com/go-text/typesetting v0.2.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect diff --git a/go.sum b/go.sum index 60b6304c3..354c7732e 100644 --- a/go.sum +++ b/go.sum @@ -101,6 +101,9 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= @@ -283,6 +286,7 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -487,8 +491,8 @@ github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9Z github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= -github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= +github.com/quic-go/quic-go v0.49.1 h1:e5JXpUyF0f2uFjckQzD8jTghZrOUK1xxDqqZhlwixo0= +github.com/quic-go/quic-go v0.49.1/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -618,8 +622,8 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -713,6 +717,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/management/cmd/management.go b/management/cmd/management.go index 376adda20..81a154510 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -64,7 +64,7 @@ var ( config.HttpConfig.IdpSignKeyRefreshEnabled = idpSignKeyRefreshEnabled } - var tlsEnabled bool + tlsEnabled := false if mgmtLetsencryptDomain != "" || (config.HttpConfig.CertFile != "" && config.HttpConfig.CertKey != "") { tlsEnabled = true } diff --git a/management/internals/shared/grpc/loginfilter_test.go b/management/internals/shared/grpc/loginfilter_test.go index 797879ae7..8b26e14ab 100644 --- a/management/internals/shared/grpc/loginfilter_test.go +++ b/management/internals/shared/grpc/loginfilter_test.go @@ -85,7 +85,6 @@ func (s *LoginFilterTestSuite) TestBanDurationIncreasesExponentially() { s.True(s.filter.logged[pubKey].isBanned) s.Equal(2, s.filter.logged[pubKey].banLevel) secondBanDuration := s.filter.logged[pubKey].banExpiresAt.Sub(s.filter.logged[pubKey].lastSeen) - // nolint expectedSecondDuration := time.Duration(float64(baseBan) * math.Pow(2, 1)) s.InDelta(expectedSecondDuration, secondBanDuration, float64(time.Millisecond)) } diff --git a/management/server/account.go b/management/server/account.go index 9785f446c..29415b038 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -1006,7 +1006,7 @@ func (am *DefaultAccountManager) isCacheFresh(ctx context.Context, accountUsers for user, loggedInOnce := range accountUsers { if datum, ok := userDataMap[user]; ok { // check if the matching user data has a pending invite and if the user has logged in once, forcing the cache to be refreshed - if datum.AppMetadata.WTPendingInvite != nil && *datum.AppMetadata.WTPendingInvite && loggedInOnce == true { //nolint + if datum.AppMetadata.WTPendingInvite != nil && *datum.AppMetadata.WTPendingInvite && loggedInOnce == true { //nolint:gosimple log.WithContext(ctx).Infof("user %s has a pending invite and has logged in once, cache invalid", user) return false } diff --git a/management/server/account_test.go b/management/server/account_test.go index 32d2b4ea3..59d6e4928 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -753,7 +753,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { t.Fatalf("expected to create an account for a user %s", userId) } - if account.Domain != domain { + if account != nil && account.Domain != domain { t.Errorf("setting account domain failed, expected %s, got %s", domain, account.Domain) } @@ -768,7 +768,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { t.Fatalf("expected to get an account for a user %s", userId) } - if account.Domain != domain { + if account != nil && account.Domain != domain { t.Errorf("updating domain. expected %s got %s", domain, account.Domain) } } diff --git a/management/server/http/handlers/policies/posture_checks_handler_test.go b/management/server/http/handlers/policies/posture_checks_handler_test.go index a5999f6c7..35198da32 100644 --- a/management/server/http/handlers/policies/posture_checks_handler_test.go +++ b/management/server/http/handlers/policies/posture_checks_handler_test.go @@ -46,7 +46,7 @@ func initPostureChecksTestData(postureChecks ...*posture.Checks) *postureChecksH testPostureChecks[postureChecks.ID] = postureChecks if err := postureChecks.Validate(); err != nil { - return nil, status.Errorf(status.InvalidArgument, "%v", err) //nolint + return nil, status.Errorf(status.InvalidArgument, "%s", err.Error()) //nolint } return postureChecks, nil diff --git a/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go b/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go index 3345a034b..3fe3fe809 100644 --- a/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go @@ -1,4 +1,5 @@ //go:build benchmark +// +build benchmark package benchmarks diff --git a/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go b/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go index ca25861dd..36b226db0 100644 --- a/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go @@ -1,4 +1,5 @@ //go:build benchmark +// +build benchmark package benchmarks diff --git a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go index b13773268..2868a20bd 100644 --- a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go @@ -1,4 +1,5 @@ //go:build benchmark +// +build benchmark package benchmarks diff --git a/management/server/http/testing/integration/setupkeys_handler_integration_test.go b/management/server/http/testing/integration/setupkeys_handler_integration_test.go index c1a9829da..1079de4aa 100644 --- a/management/server/http/testing/integration/setupkeys_handler_integration_test.go +++ b/management/server/http/testing/integration/setupkeys_handler_integration_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package integration diff --git a/management/server/idp/pocketid.go b/management/server/idp/pocketid.go index d8d764830..38a5cc67f 100644 --- a/management/server/idp/pocketid.go +++ b/management/server/idp/pocketid.go @@ -121,7 +121,7 @@ func NewPocketIdManager(config PocketIdClientConfig, appMetrics telemetry.AppMet func (p *PocketIdManager) request(ctx context.Context, method, resource string, query *url.Values, body string) ([]byte, error) { var MethodsWithBody = []string{http.MethodPost, http.MethodPut} if !slices.Contains(MethodsWithBody, method) && body != "" { - return nil, fmt.Errorf("body provided to unsupported method: %s", method) + return nil, fmt.Errorf("Body provided to unsupported method: %s", method) } reqURL := fmt.Sprintf("%s/api/%s", p.managementEndpoint, resource) @@ -301,7 +301,7 @@ func (p *PocketIdManager) CreateUser(ctx context.Context, email, name, accountID if p.appMetrics != nil { p.appMetrics.IDPMetrics().CountCreateUser() } - pending := true + var pending bool = true ret := &UserData{ Email: email, Name: name, diff --git a/management/server/idp/zitadel.go b/management/server/idp/zitadel.go index 8db3c4796..24228346a 100644 --- a/management/server/idp/zitadel.go +++ b/management/server/idp/zitadel.go @@ -357,7 +357,7 @@ func (zm *ZitadelManager) CreateUser(ctx context.Context, email, name, accountID return nil, err } - pending := true + var pending bool = true ret := &UserData{ Email: email, Name: name, diff --git a/management/server/migration/migration.go b/management/server/migration/migration.go index 7fcb98ccb..78f4afbd5 100644 --- a/management/server/migration/migration.go +++ b/management/server/migration/migration.go @@ -393,7 +393,7 @@ func CreateIndexIfNotExists[T any](ctx context.Context, db *gorm.DB, indexName s return fmt.Errorf("failed to parse model schema: %w", err) } tableName := stmt.Schema.Table - dialect := db.Name() + dialect := db.Dialector.Name() if db.Migrator().HasIndex(&model, indexName) { log.WithContext(ctx).Infof("index %s already exists on table %s", indexName, tableName) diff --git a/management/server/nameserver.go b/management/server/nameserver.go index a3eb4ae2e..f278e1761 100644 --- a/management/server/nameserver.go +++ b/management/server/nameserver.go @@ -20,7 +20,7 @@ import ( const domainPattern = `^(?i)[a-z0-9]+([\-\.]{1}[a-z0-9]+)*[*.a-z]{1,}$` -var errInvalidDomainName = errors.New("invalid domain name") +var invalidDomainName = errors.New("invalid domain name") // GetNameServerGroup gets a nameserver group object from account and nameserver group IDs func (am *DefaultAccountManager) GetNameServerGroup(ctx context.Context, accountID, userID, nsGroupID string) (*nbdns.NameServerGroup, error) { @@ -314,7 +314,7 @@ func validateDomain(domain string) error { _, valid := dns.IsDomainName(domain) if !valid { - return errInvalidDomainName + return invalidDomainName } return nil diff --git a/management/server/posture_checks.go b/management/server/posture_checks.go index ba901c771..9a743eb8c 100644 --- a/management/server/posture_checks.go +++ b/management/server/posture_checks.go @@ -158,7 +158,7 @@ func arePostureCheckChangesAffectPeers(ctx context.Context, transaction store.St // validatePostureChecks validates the posture checks. func validatePostureChecks(ctx context.Context, transaction store.Store, accountID string, postureChecks *posture.Checks) error { if err := postureChecks.Validate(); err != nil { - return status.Errorf(status.InvalidArgument, "%v", err.Error()) //nolint + return status.Errorf(status.InvalidArgument, "%s", err.Error()) //nolint } // If the posture check already has an ID, verify its existence in the store. diff --git a/management/server/store/sql_store_get_account_test.go b/management/server/store/sql_store_get_account_test.go index 69e346ae7..8ff04d68a 100644 --- a/management/server/store/sql_store_get_account_test.go +++ b/management/server/store/sql_store_get_account_test.go @@ -997,10 +997,9 @@ func TestGetAccount_ComprehensiveFieldValidation(t *testing.T) { // Find posture checks by ID var pc1, pc2 *posture.Checks for _, pc := range retrievedAccount.PostureChecks { - switch pc.ID { - case postureCheckID1: + if pc.ID == postureCheckID1 { pc1 = pc - case postureCheckID2: + } else if pc.ID == postureCheckID2 { pc2 = pc } } diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 728d67273..97aa81b12 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -30,6 +30,7 @@ import ( "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/management/server/util" nbroute "github.com/netbirdio/netbird/route" + route2 "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/status" "github.com/netbirdio/netbird/util/crypt" ) @@ -109,12 +110,12 @@ func runLargeTest(t *testing.T, store Store) { AccountID: account.Id, } account.Users[user.Id] = user - route := &nbroute.Route{ - ID: nbroute.ID(fmt.Sprintf("network-id-%d", n)), + route := &route2.Route{ + ID: route2.ID(fmt.Sprintf("network-id-%d", n)), Description: "base route", - NetID: nbroute.NetID(fmt.Sprintf("network-id-%d", n)), + NetID: route2.NetID(fmt.Sprintf("network-id-%d", n)), Network: netip.MustParsePrefix(netIP.String() + "/24"), - NetworkType: nbroute.IPv4Network, + NetworkType: route2.IPv4Network, Metric: 9999, Masquerade: false, Enabled: true, @@ -688,7 +689,7 @@ func TestMigrate(t *testing.T) { require.NoError(t, err, "Failed to insert Gob data") type route struct { - nbroute.Route + route2.Route Network netip.Prefix `gorm:"serializer:gob"` PeerGroups []string `gorm:"serializer:gob"` } @@ -697,7 +698,7 @@ func TestMigrate(t *testing.T) { rt := &route{ Network: prefix, PeerGroups: []string{"group1", "group2"}, - Route: nbroute.Route{ID: "route1"}, + Route: route2.Route{ID: "route1"}, } err = store.(*SqlStore).db.Save(rt).Error @@ -713,7 +714,7 @@ func TestMigrate(t *testing.T) { require.NoError(t, err, "Failed to delete Gob data") prefix = netip.MustParsePrefix("12.0.0.0/24") - nRT := &nbroute.Route{ + nRT := &route2.Route{ Network: prefix, ID: "route2", Peer: "peer-id", @@ -3543,13 +3544,13 @@ func TestSqlStore_SaveRoute(t *testing.T) { accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" - route := &nbroute.Route{ + route := &route2.Route{ ID: "route-id", AccountID: accountID, Network: netip.MustParsePrefix("10.10.0.0/16"), NetID: "netID", PeerGroups: []string{"routeA"}, - NetworkType: nbroute.IPv4Network, + NetworkType: route2.IPv4Network, Masquerade: true, Metric: 9999, Enabled: true, diff --git a/management/server/testutil/store.go b/management/server/testutil/store.go index f92153399..db418c45b 100644 --- a/management/server/testutil/store.go +++ b/management/server/testutil/store.go @@ -1,4 +1,5 @@ //go:build !ios +// +build !ios package testutil diff --git a/management/server/testutil/store_ios.go b/management/server/testutil/store_ios.go index 9e3b5ce4a..c3dd839d3 100644 --- a/management/server/testutil/store_ios.go +++ b/management/server/testutil/store_ios.go @@ -1,4 +1,5 @@ //go:build ios +// +build ios package testutil diff --git a/relay/cmd/pprof.go b/relay/cmd/pprof.go index c041c6ea9..37efd35f0 100644 --- a/relay/cmd/pprof.go +++ b/relay/cmd/pprof.go @@ -1,4 +1,5 @@ //go:build pprof +// +build pprof package cmd diff --git a/relay/server/listener/quic/conn.go b/relay/server/listener/quic/conn.go index 6e2201bf7..909ec1cc6 100644 --- a/relay/server/listener/quic/conn.go +++ b/relay/server/listener/quic/conn.go @@ -12,14 +12,14 @@ import ( ) type Conn struct { - session *quic.Conn + session quic.Connection closed bool closedMu sync.Mutex ctx context.Context ctxCancel context.CancelFunc } -func NewConn(session *quic.Conn) *Conn { +func NewConn(session quic.Connection) *Conn { ctx, cancel := context.WithCancel(context.Background()) return &Conn{ session: session, diff --git a/relay/server/listener/ws/conn.go b/relay/server/listener/ws/conn.go index d5bce56f7..3ec08945b 100644 --- a/relay/server/listener/ws/conn.go +++ b/relay/server/listener/ws/conn.go @@ -88,7 +88,7 @@ func (c *Conn) Close() error { c.closedMu.Lock() c.closed = true c.closedMu.Unlock() - return c.CloseNow() + return c.Conn.CloseNow() } func (c *Conn) isClosed() bool { diff --git a/shared/management/client/rest/accounts_test.go b/shared/management/client/rest/accounts_test.go index e44ada298..be0066488 100644 --- a/shared/management/client/rest/accounts_test.go +++ b/shared/management/client/rest/accounts_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index 77c960435..4d1de2631 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -161,7 +161,7 @@ func (c *Client) NewRequest(ctx context.Context, method, path string, body io.Re func parseResponse[T any](resp *http.Response) (T, error) { var ret T if resp.Body == nil { - return ret, fmt.Errorf("body missing, HTTP Error code %d", resp.StatusCode) + return ret, fmt.Errorf("Body missing, HTTP Error code %d", resp.StatusCode) } bs, err := io.ReadAll(resp.Body) if err != nil { @@ -169,7 +169,7 @@ func parseResponse[T any](resp *http.Response) (T, error) { } err = json.Unmarshal(bs, &ret) if err != nil { - return ret, fmt.Errorf("error code %d, error unmarshalling body: %w", resp.StatusCode, err) + return ret, fmt.Errorf("Error code %d, error unmarshalling body: %w", resp.StatusCode, err) } return ret, nil diff --git a/shared/management/client/rest/client_test.go b/shared/management/client/rest/client_test.go index 2b3e6cabe..17df8dd8b 100644 --- a/shared/management/client/rest/client_test.go +++ b/shared/management/client/rest/client_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/dns_test.go b/shared/management/client/rest/dns_test.go index 8e8633f8d..58082abe8 100644 --- a/shared/management/client/rest/dns_test.go +++ b/shared/management/client/rest/dns_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/events_test.go b/shared/management/client/rest/events_test.go index 1ee10eb6e..b28390001 100644 --- a/shared/management/client/rest/events_test.go +++ b/shared/management/client/rest/events_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/geo_test.go b/shared/management/client/rest/geo_test.go index 2410f2641..fcb4808a1 100644 --- a/shared/management/client/rest/geo_test.go +++ b/shared/management/client/rest/geo_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/groups_test.go b/shared/management/client/rest/groups_test.go index 51fd0c0ee..fcd759e9a 100644 --- a/shared/management/client/rest/groups_test.go +++ b/shared/management/client/rest/groups_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/impersonation_test.go b/shared/management/client/rest/impersonation_test.go index d257d0987..4fb8f24eb 100644 --- a/shared/management/client/rest/impersonation_test.go +++ b/shared/management/client/rest/impersonation_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/networks_test.go b/shared/management/client/rest/networks_test.go index 2bf1a0d3b..ca2a294ae 100644 --- a/shared/management/client/rest/networks_test.go +++ b/shared/management/client/rest/networks_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/peers_test.go b/shared/management/client/rest/peers_test.go index c464de7ed..a45f9d6ec 100644 --- a/shared/management/client/rest/peers_test.go +++ b/shared/management/client/rest/peers_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/policies_test.go b/shared/management/client/rest/policies_test.go index e948e2949..a19d0a728 100644 --- a/shared/management/client/rest/policies_test.go +++ b/shared/management/client/rest/policies_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/posturechecks_test.go b/shared/management/client/rest/posturechecks_test.go index d74d455a5..9b1b618df 100644 --- a/shared/management/client/rest/posturechecks_test.go +++ b/shared/management/client/rest/posturechecks_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/routes_test.go b/shared/management/client/rest/routes_test.go index 5ee2def24..9452a07fc 100644 --- a/shared/management/client/rest/routes_test.go +++ b/shared/management/client/rest/routes_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/setupkeys_test.go b/shared/management/client/rest/setupkeys_test.go index bd8d3f835..0fa782da5 100644 --- a/shared/management/client/rest/setupkeys_test.go +++ b/shared/management/client/rest/setupkeys_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/tokens_test.go b/shared/management/client/rest/tokens_test.go index 5af41eb73..ce3748751 100644 --- a/shared/management/client/rest/tokens_test.go +++ b/shared/management/client/rest/tokens_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/management/client/rest/users_test.go b/shared/management/client/rest/users_test.go index 68815d4f9..d53c4eb6a 100644 --- a/shared/management/client/rest/users_test.go +++ b/shared/management/client/rest/users_test.go @@ -1,4 +1,5 @@ //go:build integration +// +build integration package rest_test diff --git a/shared/relay/client/client_test.go b/shared/relay/client/client_test.go index 9820d642f..8fe5f04f4 100644 --- a/shared/relay/client/client_test.go +++ b/shared/relay/client/client_test.go @@ -19,7 +19,15 @@ import ( ) var ( - hmacTokenStore = &hmac.TokenStore{} + hmacTokenStore = &hmac.TokenStore{} + serverListenAddr = "127.0.0.1:1234" + serverURL = "rel://127.0.0.1:1234" + serverCfg = server.Config{ + Meter: otel.Meter(""), + ExposedAddress: serverURL, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + } ) func TestMain(m *testing.M) { @@ -28,20 +36,8 @@ func TestMain(m *testing.M) { os.Exit(code) } -// newClientTestServerConfig creates a new server config for client testing with the given address -func newClientTestServerConfig(address string) server.Config { - return server.Config{ - Meter: otel.Meter(""), - ExposedAddress: "rel://" + address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - } -} - func TestClient(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50001" - serverCfg := newClientTestServerConfig(serverListenAddr) srv, err := server.NewServer(serverCfg) if err != nil { @@ -68,7 +64,7 @@ func TestClient(t *testing.T) { t.Fatalf("failed to start server: %s", err) } t.Log("alice connecting to server") - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -76,7 +72,7 @@ func TestClient(t *testing.T) { defer clientAlice.Close() t.Log("placeholder connecting to server") - clientPlaceHolder := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "clientPlaceHolder", iface.DefaultMTU) + clientPlaceHolder := NewClient(serverURL, hmacTokenStore, "clientPlaceHolder", iface.DefaultMTU) err = clientPlaceHolder.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -84,7 +80,7 @@ func TestClient(t *testing.T) { defer clientPlaceHolder.Close() t.Log("Bob connecting to server") - clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) + clientBob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -124,8 +120,6 @@ func TestClient(t *testing.T) { func TestRegistration(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50101" - serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) if err != nil { @@ -144,7 +138,7 @@ func TestRegistration(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { _ = srv.Shutdown(ctx) @@ -163,7 +157,7 @@ func TestRegistration(t *testing.T) { func TestRegistrationTimeout(t *testing.T) { ctx := context.Background() fakeUDPListener, err := net.ListenUDP("udp", &net.UDPAddr{ - Port: 50201, + Port: 1234, IP: net.ParseIP("0.0.0.0"), }) if err != nil { @@ -174,7 +168,7 @@ func TestRegistrationTimeout(t *testing.T) { }(fakeUDPListener) fakeTCPListener, err := net.ListenTCP("tcp", &net.TCPAddr{ - Port: 50201, + Port: 1234, IP: net.ParseIP("0.0.0.0"), }) if err != nil { @@ -184,7 +178,7 @@ func TestRegistrationTimeout(t *testing.T) { _ = fakeTCPListener.Close() }(fakeTCPListener) - clientAlice := NewClient("127.0.0.1:50201", hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient("127.0.0.1:1234", hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err == nil { t.Errorf("failed to connect to server: %s", err) @@ -198,8 +192,6 @@ func TestRegistrationTimeout(t *testing.T) { func TestEcho(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50301" - serverCfg := newClientTestServerConfig(serverListenAddr) idAlice := "alice" idBob := "bob" srvCfg := server.ListenerConfig{Address: serverListenAddr} @@ -227,7 +219,7 @@ func TestEcho(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -239,7 +231,7 @@ func TestEcho(t *testing.T) { } }() - clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idBob, iface.DefaultMTU) + clientBob := NewClient(serverURL, hmacTokenStore, idBob, iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -290,8 +282,6 @@ func TestEcho(t *testing.T) { func TestBindToUnavailabePeer(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50401" - serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -319,7 +309,7 @@ func TestBindToUnavailabePeer(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -338,8 +328,6 @@ func TestBindToUnavailabePeer(t *testing.T) { func TestBindReconnect(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50501" - serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -367,13 +355,13 @@ func TestBindReconnect(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) } - clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) + clientBob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -395,7 +383,7 @@ func TestBindReconnect(t *testing.T) { t.Errorf("failed to close client: %s", err) } - clientAlice = NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice = NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -441,8 +429,6 @@ func TestBindReconnect(t *testing.T) { func TestCloseConn(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50601" - serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -470,13 +456,13 @@ func TestCloseConn(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - bob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) + bob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) err = bob.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) } - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -506,8 +492,6 @@ func TestCloseConn(t *testing.T) { func TestCloseRelayConn(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50701" - serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -534,13 +518,13 @@ func TestCloseRelayConn(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - bob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) + bob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) err = bob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) } - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -566,8 +550,6 @@ func TestCloseRelayConn(t *testing.T) { func TestCloseByServer(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50801" - serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv1, err := server.NewServer(serverCfg) @@ -590,7 +572,7 @@ func TestCloseByServer(t *testing.T) { idAlice := "alice" log.Debugf("connect by alice") - relayClient := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) + relayClient := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) if err = relayClient.Connect(ctx); err != nil { log.Fatalf("failed to connect to server: %s", err) } @@ -625,8 +607,6 @@ func TestCloseByServer(t *testing.T) { func TestCloseByClient(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:50901" - serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -648,7 +628,7 @@ func TestCloseByClient(t *testing.T) { idAlice := "alice" log.Debugf("connect by alice") - relayClient := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) + relayClient := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) err = relayClient.Connect(ctx) if err != nil { log.Fatalf("failed to connect to server: %s", err) @@ -672,8 +652,6 @@ func TestCloseByClient(t *testing.T) { func TestCloseNotDrainedChannel(t *testing.T) { ctx := context.Background() - serverListenAddr := "127.0.0.1:51001" - serverCfg := newClientTestServerConfig(serverListenAddr) idAlice := "alice" idBob := "bob" srvCfg := server.ListenerConfig{Address: serverListenAddr} @@ -701,7 +679,7 @@ func TestCloseNotDrainedChannel(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) + clientAlice := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -713,7 +691,7 @@ func TestCloseNotDrainedChannel(t *testing.T) { } }() - clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idBob, iface.DefaultMTU) + clientBob := NewClient(serverURL, hmacTokenStore, idBob, iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) diff --git a/shared/relay/client/dialer/quic/conn.go b/shared/relay/client/dialer/quic/conn.go index 1d90d7139..9243605b5 100644 --- a/shared/relay/client/dialer/quic/conn.go +++ b/shared/relay/client/dialer/quic/conn.go @@ -30,11 +30,11 @@ func (a Addr) String() string { } type Conn struct { - session *quic.Conn + session quic.Connection ctx context.Context } -func NewConn(session *quic.Conn) net.Conn { +func NewConn(session quic.Connection) net.Conn { return &Conn{ session: session, ctx: context.Background(), diff --git a/shared/relay/client/manager_test.go b/shared/relay/client/manager_test.go index fb91f7682..f00b35707 100644 --- a/shared/relay/client/manager_test.go +++ b/shared/relay/client/manager_test.go @@ -13,16 +13,6 @@ import ( "github.com/netbirdio/netbird/shared/relay/auth/allow" ) -// newManagerTestServerConfig creates a new server config for manager testing with the given address -func newManagerTestServerConfig(address string) server.Config { - return server.Config{ - Meter: otel.Meter(""), - ExposedAddress: address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - } -} - func TestEmptyURL(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -37,10 +27,15 @@ func TestForeignConn(t *testing.T) { ctx := context.Background() lstCfg1 := server.ListenerConfig{ - Address: "localhost:52101", + Address: "localhost:1234", } - srv1, err := server.NewServer(newManagerTestServerConfig(lstCfg1.Address)) + srv1, err := server.NewServer(server.Config{ + Meter: otel.Meter(""), + ExposedAddress: lstCfg1.Address, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + }) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -64,9 +59,14 @@ func TestForeignConn(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:52102", + Address: "localhost:2234", } - srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) + srv2, err := server.NewServer(server.Config{ + Meter: otel.Meter(""), + ExposedAddress: srvCfg2.Address, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + }) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -144,9 +144,9 @@ func TestForeginConnClose(t *testing.T) { ctx := context.Background() srvCfg1 := server.ListenerConfig{ - Address: "localhost:52201", + Address: "localhost:1234", } - srv1, err := server.NewServer(newManagerTestServerConfig(srvCfg1.Address)) + srv1, err := server.NewServer(serverCfg) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -170,9 +170,9 @@ func TestForeginConnClose(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:52202", + Address: "localhost:2234", } - srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) + srv2, err := server.NewServer(serverCfg) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -225,9 +225,9 @@ func TestForeignAutoClose(t *testing.T) { keepUnusedServerTime = 2 * time.Second srvCfg1 := server.ListenerConfig{ - Address: "localhost:52301", + Address: "localhost:1234", } - srv1, err := server.NewServer(newManagerTestServerConfig(srvCfg1.Address)) + srv1, err := server.NewServer(serverCfg) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -252,9 +252,9 @@ func TestForeignAutoClose(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:52302", + Address: "localhost:2234", } - srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) + srv2, err := server.NewServer(serverCfg) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -327,9 +327,9 @@ func TestAutoReconnect(t *testing.T) { ctx := context.Background() srvCfg := server.ListenerConfig{ - Address: "localhost:52401", + Address: "localhost:1234", } - srv, err := server.NewServer(newManagerTestServerConfig(srvCfg.Address)) + srv, err := server.NewServer(serverCfg) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -397,9 +397,14 @@ func TestNotifierDoubleAdd(t *testing.T) { ctx := context.Background() listenerCfg1 := server.ListenerConfig{ - Address: "localhost:52501", + Address: "localhost:1234", } - srv, err := server.NewServer(newManagerTestServerConfig(listenerCfg1.Address)) + srv, err := server.NewServer(server.Config{ + Meter: otel.Meter(""), + ExposedAddress: listenerCfg1.Address, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + }) if err != nil { t.Fatalf("failed to create server: %s", err) } diff --git a/signal/cmd/run.go b/signal/cmd/run.go index d7662a886..bf8f8e327 100644 --- a/signal/cmd/run.go +++ b/signal/cmd/run.go @@ -73,7 +73,7 @@ var ( // detect whether user specified a port userPort := cmd.Flag("port").Changed - var tlsEnabled bool + tlsEnabled := false if signalLetsencryptDomain != "" || (signalCertFile != "" && signalCertKey != "") { tlsEnabled = true } @@ -259,8 +259,8 @@ func grpcHandlerFunc(grpcServer *grpc.Server, meter metric.Meter) http.Handler { wsProxy := wsproxyserver.New(grpcServer, wsproxyserver.WithOTelMeter(meter)) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case wsproxy.ProxyPath + wsproxy.SignalComponent: + switch { + case r.URL.Path == wsproxy.ProxyPath+wsproxy.SignalComponent: wsProxy.Handler().ServeHTTP(w, r) default: grpcServer.ServeHTTP(w, r) diff --git a/util/syslog_nonwindows.go b/util/syslog_nonwindows.go index 328bb8b1c..6ffbcb8be 100644 --- a/util/syslog_nonwindows.go +++ b/util/syslog_nonwindows.go @@ -1,4 +1,5 @@ //go:build !windows +// +build !windows package util From cf535f8c61e94f41cd3b26859a59e5c39681d0c4 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Thu, 8 Jan 2026 06:07:59 -0500 Subject: [PATCH 028/374] [management] Fix role change in transaction and update readme (#5060) --- README.md | 4 ++-- management/cmd/management.go | 5 +++++ management/internals/server/config/config.go | 3 +++ management/internals/shared/grpc/conversion.go | 6 +++++- management/server/store/sql_store.go | 5 +++-- 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index ebf108cdb..28b53d5b6 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ Follow the [Advanced guide with a custom identity provider](https://docs.netbird **Infrastructure requirements:** - A Linux VM with at least **1CPU** and **2GB** of memory. -- The VM should be publicly accessible on TCP ports **80** and **443** and UDP ports: **3478**, **49152-65535**. +- The VM should be publicly accessible on TCP ports **80** and **443** and UDP port: **3478**. - **Public domain** name pointing to the VM. **Software requirements:** @@ -98,7 +98,7 @@ Follow the [Advanced guide with a custom identity provider](https://docs.netbird **Steps** - Download and run the installation script: ```bash -export NETBIRD_DOMAIN=netbird.example.com; curl -fsSL https://github.com/netbirdio/netbird/releases/latest/download/getting-started-with-zitadel.sh | bash +export NETBIRD_DOMAIN=netbird.example.com; curl -fsSL https://github.com/netbirdio/netbird/releases/latest/download/getting-started.sh | bash ``` - Once finished, you can manage the resources via `docker-compose` diff --git a/management/cmd/management.go b/management/cmd/management.go index 81a154510..557cf45f8 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -215,6 +215,11 @@ func applyEmbeddedIdPConfig(cfg *nbconfig.Config) error { cfg.HttpConfig.AuthAudience = "netbird-dashboard" } + // Set CLIAuthAudience to the client app client ID + if cfg.HttpConfig.CLIAuthAudience == "" { + cfg.HttpConfig.CLIAuthAudience = "netbird-cli" + } + // Set AuthUserIDClaim to "sub" (standard OIDC claim) if cfg.HttpConfig.AuthUserIDClaim == "" { cfg.HttpConfig.AuthUserIDClaim = "sub" diff --git a/management/internals/server/config/config.go b/management/internals/server/config/config.go index 0ffc43044..7b8783943 100644 --- a/management/internals/server/config/config.go +++ b/management/internals/server/config/config.go @@ -102,6 +102,9 @@ type HttpServerConfig struct { CertKey string // AuthAudience identifies the recipients that the JWT is intended for (aud in JWT) AuthAudience string + // CLIAuthAudience identifies the client app recipients that the JWT is intended for (aud in JWT) + // Used only in conjunction with EmbeddedIdP + CLIAuthAudience string // AuthIssuer identifies principal that issued the JWT AuthIssuer string // AuthUserIDClaim is the name of the claim that used as user ID diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index f984c73df..455e6bd58 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -428,9 +428,13 @@ func buildJWTConfig(config *nbconfig.HttpServerConfig, deviceFlowConfig *nbconfi keysLocation = strings.TrimSuffix(issuer, "/") + "/.well-known/jwks.json" } + audience := config.AuthAudience + if config.CLIAuthAudience != "" { + audience = config.CLIAuthAudience + } return &proto.JWTConfig{ Issuer: issuer, - Audience: config.AuthAudience, + Audience: audience, KeysLocation: keysLocation, } } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 3a9f8d188..f407a35e6 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -3029,8 +3029,9 @@ func (s *SqlStore) ExecuteInTransaction(ctx context.Context, operation func(stor func (s *SqlStore) withTx(tx *gorm.DB) Store { return &SqlStore{ - db: tx, - storeEngine: s.storeEngine, + db: tx, + storeEngine: s.storeEngine, + fieldEncrypt: s.fieldEncrypt, } } From 00e2689ffb02d5583bf1331f67ffa12154a3f889 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Thu, 8 Jan 2026 14:10:09 +0300 Subject: [PATCH 029/374] [management] Fix race condition in experimental network map when deleting account (#5064) --- .../controllers/network_map/controller/controller.go | 10 +++++----- management/server/types/holder.go | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index 7f0f9bd4b..f051e5331 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -142,7 +142,7 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin err error ) if c.experimentalNetworkMap(accountID) { - account = c.getAccountFromHolderOrInit(accountID) + account = c.getAccountFromHolderOrInit(ctx, accountID) } else { account, err = c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) if err != nil { @@ -414,7 +414,7 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr err error ) if c.experimentalNetworkMap(accountID) { - account = c.getAccountFromHolderOrInit(accountID) + account = c.getAccountFromHolderOrInit(ctx, accountID) } else { account, err = c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) if err != nil { @@ -475,7 +475,7 @@ func (c *Controller) getPeerNetworkMapExp( customZone nbdns.CustomZone, metrics *telemetry.AccountManagerMetrics, ) *types.NetworkMap { - account := c.getAccountFromHolderOrInit(accountId) + account := c.getAccountFromHolderOrInit(ctx, accountId) if account == nil { log.WithContext(ctx).Warnf("account %s not found in holder when getting peer network map", accountId) return &types.NetworkMap{ @@ -547,12 +547,12 @@ func (c *Controller) getAccountFromHolder(accountID string) *types.Account { return c.holder.GetAccount(accountID) } -func (c *Controller) getAccountFromHolderOrInit(accountID string) *types.Account { +func (c *Controller) getAccountFromHolderOrInit(ctx context.Context, accountID string) *types.Account { a := c.holder.GetAccount(accountID) if a != nil { return a } - account, err := c.holder.LoadOrStoreFunc(accountID, c.requestBuffer.GetAccountWithBackpressure) + account, err := c.holder.LoadOrStoreFunc(ctx, accountID, c.requestBuffer.GetAccountWithBackpressure) if err != nil { return nil } diff --git a/management/server/types/holder.go b/management/server/types/holder.go index ad7d07522..de8ac8110 100644 --- a/management/server/types/holder.go +++ b/management/server/types/holder.go @@ -32,13 +32,13 @@ func (h *Holder) AddAccount(account *Account) { h.accounts[account.Id] = account } -func (h *Holder) LoadOrStoreFunc(id string, accGetter func(context.Context, string) (*Account, error)) (*Account, error) { +func (h *Holder) LoadOrStoreFunc(ctx context.Context, id string, accGetter func(context.Context, string) (*Account, error)) (*Account, error) { h.mu.Lock() defer h.mu.Unlock() if acc, ok := h.accounts[id]; ok { return acc, nil } - account, err := accGetter(context.Background(), id) + account, err := accGetter(ctx, id) if err != nil { return nil, err } From 9c5b2575e3e8b5a1b95f3d923e2a5acddb71e835 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 8 Jan 2026 12:12:19 +0100 Subject: [PATCH 030/374] [misc] add embedded provider support metrics count local vs idp users if embedded --- management/internals/server/server.go | 5 +++++ management/server/metrics/selfhosted.go | 16 ++++++++++++++++ management/server/metrics/selfhosted_test.go | 18 ++++++++++++++++-- 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/management/internals/server/server.go b/management/internals/server/server.go index d5840ab41..cd8d8e8fb 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -129,6 +129,11 @@ func (s *BaseServer) Start(ctx context.Context) error { if s.Config.IdpManagerConfig != nil && s.Config.IdpManagerConfig.ManagerType != "" { idpManager = s.Config.IdpManagerConfig.ManagerType } + + if s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled { + idpManager = metrics.EmbeddedType + } + metricsWorker := metrics.NewWorker(srvCtx, installationID, s.Store(), s.PeersUpdateManager(), idpManager) go metricsWorker.Run(srvCtx) } diff --git a/management/server/metrics/selfhosted.go b/management/server/metrics/selfhosted.go index 4ce57b1da..f7a344fcd 100644 --- a/management/server/metrics/selfhosted.go +++ b/management/server/metrics/selfhosted.go @@ -13,6 +13,7 @@ import ( "time" "github.com/hashicorp/go-version" + "github.com/netbirdio/netbird/idp/dex" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/management/server/types" @@ -28,6 +29,7 @@ const ( defaultPushInterval = 12 * time.Hour // requestTimeout http request timeout requestTimeout = 45 * time.Second + EmbeddedType = "embedded" ) type getTokenResponse struct { @@ -206,6 +208,8 @@ func (w *Worker) generateProperties(ctx context.Context) properties { peerActiveVersions []string osUIClients map[string]int rosenpassEnabled int + localUsers int + idpUsers int ) start := time.Now() metricsProperties := make(properties) @@ -266,6 +270,16 @@ func (w *Worker) generateProperties(ctx context.Context) properties { serviceUsers++ } else { users++ + if w.idpManager == EmbeddedType { + _, idpID, err := dex.DecodeDexUserID(user.Id) + if err == nil { + if idpID == "local" { + localUsers++ + } else { + idpUsers++ + } + } + } } pats += len(user.PATs) } @@ -353,6 +367,8 @@ func (w *Worker) generateProperties(ctx context.Context) properties { metricsProperties["idp_manager"] = w.idpManager metricsProperties["store_engine"] = w.dataSource.GetStoreEngine() metricsProperties["rosenpass_enabled"] = rosenpassEnabled + metricsProperties["local_users_count"] = localUsers + metricsProperties["idp_users_count"] = idpUsers for protocol, count := range rulesProtocol { metricsProperties["rules_protocol_"+protocol] = count diff --git a/management/server/metrics/selfhosted_test.go b/management/server/metrics/selfhosted_test.go index db0d90e64..d0ab45cd7 100644 --- a/management/server/metrics/selfhosted_test.go +++ b/management/server/metrics/selfhosted_test.go @@ -5,6 +5,7 @@ import ( "testing" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/idp/dex" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -25,6 +26,8 @@ func (mockDatasource) GetAllConnectedPeers() map[string]struct{} { // GetAllAccounts returns a list of *server.Account for use in tests with predefined information func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { + localUserID := dex.EncodeDexUserID("10", "local") + idpUserID := dex.EncodeDexUserID("20", "zitadel") return []*types.Account{ { Id: "1", @@ -98,12 +101,14 @@ func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { }, Users: map[string]*types.User{ "1": { + Id: "1", IsServiceUser: true, PATs: map[string]*types.PersonalAccessToken{ "1": {}, }, }, - "2": { + localUserID: { + Id: localUserID, IsServiceUser: false, PATs: map[string]*types.PersonalAccessToken{ "1": {}, @@ -162,12 +167,14 @@ func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { }, Users: map[string]*types.User{ "1": { + Id: "1", IsServiceUser: true, PATs: map[string]*types.PersonalAccessToken{ "1": {}, }, }, - "2": { + idpUserID: { + Id: idpUserID, IsServiceUser: false, PATs: map[string]*types.PersonalAccessToken{ "1": {}, @@ -214,6 +221,7 @@ func TestGenerateProperties(t *testing.T) { worker := Worker{ dataSource: ds, connManager: ds, + idpManager: EmbeddedType, } properties := worker.generateProperties(context.Background()) @@ -327,4 +335,10 @@ func TestGenerateProperties(t *testing.T) { t.Errorf("expected 1 active_users_last_day, got %d", properties["active_users_last_day"]) } + if properties["local_users_count"] != 1 { + t.Errorf("expected 1 local_users_count, got %d", properties["local_users_count"]) + } + if properties["idp_users_count"] != 1 { + t.Errorf("expected 1 idp_users_count, got %d", properties["idp_users_count"]) + } } From ab7d6b2196aff750be56300ed832c1dd6b6914f2 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 8 Jan 2026 12:12:50 +0100 Subject: [PATCH 031/374] [misc] add new getting started to release (#5057) --- .goreleaser.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 952e946dc..7c6651f83 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -713,8 +713,10 @@ checksum: extra_files: - glob: ./infrastructure_files/getting-started-with-zitadel.sh - glob: ./release_files/install.sh + - glob: ./infrastructure_files/getting-started.sh release: extra_files: - glob: ./infrastructure_files/getting-started-with-zitadel.sh - glob: ./release_files/install.sh + - glob: ./infrastructure_files/getting-started.sh From fb71b0d04be59dd3dd0aadd4fb00fe59fa6c94ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Diego=20Nogu=C3=AAs?= <49420+diegocn@users.noreply.github.com> Date: Thu, 8 Jan 2026 12:49:45 +0100 Subject: [PATCH 032/374] [infrastructure] fix: disable Caddy debug (#5067) --- infrastructure_files/getting-started.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index e25b943a0..b693f807e 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -169,8 +169,7 @@ init_environment() { render_caddyfile() { cat < Date: Thu, 8 Jan 2026 18:58:22 +0100 Subject: [PATCH 033/374] Revert "Revert "[relay] Update GO version and QUIC version (#4736)" (#5055)" (#5071) This reverts commit 24df442198f179c8b24ce8eedefa828dec0f566f. --- .devcontainer/Dockerfile | 14 +- .github/workflows/golang-test-freebsd.yml | 2 +- .github/workflows/golang-test-linux.yml | 4 +- .github/workflows/golangci-lint.yml | 7 +- .github/workflows/release.yml | 2 +- .github/workflows/wasm-build-validation.yml | 13 +- .golangci.yaml | 255 +++++++++--------- client/cmd/debug.go | 1 + client/cmd/login.go | 2 + client/cmd/pprof.go | 1 - client/cmd/root.go | 1 + client/cmd/status.go | 1 + client/cmd/testutil_test.go | 3 - client/cmd/up.go | 1 + client/firewall/iptables/acl_linux.go | 5 +- .../firewall/iptables/manager_linux_test.go | 2 +- .../firewall/nftables/manager_linux_test.go | 13 +- client/firewall/uspfilter/filter.go | 2 +- client/firewall/uspfilter/localip.go | 1 + client/firewall/uspfilter/localip_test.go | 4 +- client/firewall/uspfilter/nat_test.go | 5 +- client/iface/device/device_ios.go | 3 - client/internal/debug/debug_linux.go | 10 +- client/internal/iface.go | 1 - client/internal/routemanager/iface/iface.go | 1 - .../systemops/systemops_generic.go | 14 +- client/ios/NetBirdSDK/client.go | 2 +- client/server/panic_windows.go | 1 - client/ssh/server/jwt_test.go | 5 +- client/system/info_android.go | 3 - client/system/info_darwin.go | 1 - client/system/info_ios.go | 3 - client/ui/client_ui.go | 12 +- client/ui/signal_windows.go | 2 +- go.mod | 10 +- go.sum | 13 +- management/cmd/management.go | 2 +- .../internals/shared/grpc/loginfilter_test.go | 1 + management/server/account.go | 2 +- management/server/account_test.go | 4 +- .../policies/posture_checks_handler_test.go | 2 +- .../peers_handler_benchmark_test.go | 1 - .../setupkeys_handler_benchmark_test.go | 1 - .../users_handler_benchmark_test.go | 1 - .../setupkeys_handler_integration_test.go | 1 - management/server/idp/pocketid.go | 4 +- management/server/idp/zitadel.go | 2 +- management/server/migration/migration.go | 2 +- management/server/nameserver.go | 4 +- management/server/posture_checks.go | 2 +- .../store/sql_store_get_account_test.go | 5 +- management/server/store/sql_store_test.go | 19 +- management/server/testutil/store.go | 1 - management/server/testutil/store_ios.go | 1 - relay/cmd/pprof.go | 1 - relay/server/listener/quic/conn.go | 4 +- relay/server/listener/ws/conn.go | 2 +- .../management/client/rest/accounts_test.go | 1 - shared/management/client/rest/client.go | 4 +- shared/management/client/rest/client_test.go | 1 - shared/management/client/rest/dns_test.go | 1 - shared/management/client/rest/events_test.go | 1 - shared/management/client/rest/geo_test.go | 1 - shared/management/client/rest/groups_test.go | 1 - .../client/rest/impersonation_test.go | 1 - .../management/client/rest/networks_test.go | 1 - shared/management/client/rest/peers_test.go | 1 - .../management/client/rest/policies_test.go | 1 - .../client/rest/posturechecks_test.go | 1 - shared/management/client/rest/routes_test.go | 1 - .../management/client/rest/setupkeys_test.go | 1 - shared/management/client/rest/tokens_test.go | 1 - shared/management/client/rest/users_test.go | 1 - shared/relay/client/client_test.go | 82 +++--- shared/relay/client/dialer/quic/conn.go | 4 +- shared/relay/client/manager_test.go | 57 ++-- signal/cmd/run.go | 6 +- util/syslog_nonwindows.go | 1 - 78 files changed, 311 insertions(+), 340 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 9e5e97a31..80809e667 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,15 +1,15 @@ -FROM golang:1.23-bullseye +FROM golang:1.25-bookworm RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ && apt-get -y install --no-install-recommends\ - gettext-base=0.21-4 \ - iptables=1.8.7-1 \ - libgl1-mesa-dev=20.3.5-1 \ - xorg-dev=1:7.7+22 \ - libayatana-appindicator3-dev=0.5.5-2+deb11u2 \ + gettext-base=0.21-12 \ + iptables=1.8.9-2 \ + libgl1-mesa-dev=22.3.6-1+deb12u1 \ + xorg-dev=1:7.7+23 \ + libayatana-appindicator3-dev=0.5.92-1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ - && go install -v golang.org/x/tools/gopls@v0.18.1 + && go install -v golang.org/x/tools/gopls@latest WORKDIR /app diff --git a/.github/workflows/golang-test-freebsd.yml b/.github/workflows/golang-test-freebsd.yml index 0d19e8a19..df64e86bb 100644 --- a/.github/workflows/golang-test-freebsd.yml +++ b/.github/workflows/golang-test-freebsd.yml @@ -25,7 +25,7 @@ jobs: release: "14.2" prepare: | pkg install -y curl pkgconf xorg - GO_TARBALL="go1.24.10.freebsd-amd64.tar.gz" + GO_TARBALL="go1.25.3.freebsd-amd64.tar.gz" GO_URL="https://go.dev/dl/$GO_TARBALL" curl -vLO "$GO_URL" tar -C /usr/local -vxzf "$GO_TARBALL" diff --git a/.github/workflows/golang-test-linux.yml b/.github/workflows/golang-test-linux.yml index c09bfab39..195a37a1f 100644 --- a/.github/workflows/golang-test-linux.yml +++ b/.github/workflows/golang-test-linux.yml @@ -200,7 +200,7 @@ jobs: -e GOCACHE=${CONTAINER_GOCACHE} \ -e GOMODCACHE=${CONTAINER_GOMODCACHE} \ -e CONTAINER=${CONTAINER} \ - golang:1.24-alpine \ + golang:1.25-alpine \ sh -c ' \ apk update; apk add --no-cache \ ca-certificates iptables ip6tables dbus dbus-dev libpcap-dev build-base; \ @@ -259,7 +259,7 @@ jobs: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ go test ${{ matrix.raceFlag }} \ -exec 'sudo' \ - -timeout 10m ./relay/... ./shared/relay/... + -timeout 10m -p 1 ./relay/... ./shared/relay/... test_signal: name: "Signal / Unit" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index c524f6f6b..9ce779dbb 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -52,7 +52,10 @@ jobs: if: matrix.os == 'ubuntu-latest' run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev libpcap-dev - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: version: latest - args: --timeout=12m --out-format colored-line-number + skip-cache: true + skip-save-cache: true + cache-invalidation-interval: 0 + args: --timeout=12m diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2fa847dce..84f6f64ed 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -63,7 +63,7 @@ jobs: pkg install -y git curl portlint go # Install Go for building - GO_TARBALL="go1.24.10.freebsd-amd64.tar.gz" + GO_TARBALL="go1.25.5.freebsd-amd64.tar.gz" GO_URL="https://go.dev/dl/$GO_TARBALL" curl -LO "$GO_URL" tar -C /usr/local -xzf "$GO_TARBALL" diff --git a/.github/workflows/wasm-build-validation.yml b/.github/workflows/wasm-build-validation.yml index 4100e16dd..47e45165b 100644 --- a/.github/workflows/wasm-build-validation.yml +++ b/.github/workflows/wasm-build-validation.yml @@ -14,6 +14,9 @@ jobs: js_lint: name: "JS / Lint" runs-on: ubuntu-latest + env: + GOOS: js + GOARCH: wasm steps: - name: Checkout repository uses: actions/checkout@v4 @@ -24,16 +27,14 @@ jobs: - name: Install dependencies run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev libpcap-dev - name: Install golangci-lint - uses: golangci/golangci-lint-action@d6238b002a20823d52840fda27e2d4891c5952dc + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: version: latest install-mode: binary skip-cache: true - skip-pkg-cache: true - skip-build-cache: true - - name: Run golangci-lint for WASM - run: | - GOOS=js GOARCH=wasm golangci-lint run --timeout=12m --out-format colored-line-number ./client/... + skip-save-cache: true + cache-invalidation-interval: 0 + working-directory: ./client continue-on-error: true js_build: diff --git a/.golangci.yaml b/.golangci.yaml index 461677c2e..d81ad1377 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,139 +1,124 @@ -run: - # Timeout for analysis, e.g. 30s, 5m. - # Default: 1m - timeout: 6m - -# This file contains only configs which differ from defaults. -# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml -linters-settings: - errcheck: - # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. - # Such cases aren't reported by default. - # Default: false - check-type-assertions: false - - gosec: - includes: - - G101 # Look for hard coded credentials - #- G102 # Bind to all interfaces - - G103 # Audit the use of unsafe block - - G104 # Audit errors not checked - - G106 # Audit the use of ssh.InsecureIgnoreHostKey - #- G107 # Url provided to HTTP request as taint input - - G108 # Profiling endpoint automatically exposed on /debug/pprof - - G109 # Potential Integer overflow made by strconv.Atoi result conversion to int16/32 - - G110 # Potential DoS vulnerability via decompression bomb - - G111 # Potential directory traversal - #- G112 # Potential slowloris attack - - G113 # Usage of Rat.SetString in math/big with an overflow (CVE-2022-23772) - #- G114 # Use of net/http serve function that has no support for setting timeouts - - G201 # SQL query construction using format string - - G202 # SQL query construction using string concatenation - - G203 # Use of unescaped data in HTML templates - #- G204 # Audit use of command execution - - G301 # Poor file permissions used when creating a directory - - G302 # Poor file permissions used with chmod - - G303 # Creating tempfile using a predictable path - - G304 # File path provided as taint input - - G305 # File traversal when extracting zip/tar archive - - G306 # Poor file permissions used when writing to a new file - - G307 # Poor file permissions used when creating a file with os.Create - #- G401 # Detect the usage of DES, RC4, MD5 or SHA1 - #- G402 # Look for bad TLS connection settings - - G403 # Ensure minimum RSA key length of 2048 bits - #- G404 # Insecure random number source (rand) - #- G501 # Import blocklist: crypto/md5 - - G502 # Import blocklist: crypto/des - - G503 # Import blocklist: crypto/rc4 - - G504 # Import blocklist: net/http/cgi - #- G505 # Import blocklist: crypto/sha1 - - G601 # Implicit memory aliasing of items from a range statement - - G602 # Slice access out of bounds - - gocritic: - disabled-checks: - - commentFormatting - - captLocal - - deprecatedComment - - govet: - # Enable all analyzers. - # Default: false - enable-all: false - enable: - - nilness - - revive: - rules: - - name: exported - severity: warning - disabled: false - arguments: - - "checkPrivateReceivers" - - "sayRepetitiveInsteadOfStutters" - tenv: - # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. - # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. - # Default: false - all: true - +version: "2" linters: - disable-all: true + default: none enable: - ## enabled by default - - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases - - gosimple # specializes in simplifying a code - - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - - ineffassign # detects when assignments to existing variables are not used - - staticcheck # is a go vet on steroids, applying a ton of static analysis checks - - tenv # Tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17. - - typecheck # like the front-end of a Go compiler, parses and type-checks Go code - - unused # checks for unused constants, variables, functions and types - ## disable by default but the have interesting results so lets add them - - bodyclose # checks whether HTTP response body is closed successfully - - dupword # dupword checks for duplicate words in the source code - - durationcheck # durationcheck checks for two durations multiplied together - - forbidigo # forbidigo forbids identifiers - - gocritic # provides diagnostics that check for bugs, performance and style issues - - gosec # inspects source code for security problems - - mirror # mirror reports wrong mirror patterns of bytes/strings usage - - misspell # misspess finds commonly misspelled English words in comments - - nilerr # finds the code that returns nil even if it checks that the error is not nil - - nilnil # checks that there is no simultaneous return of nil error and an invalid value - - predeclared # predeclared finds code that shadows one of Go's predeclared identifiers - - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed - # - thelper # thelper detects Go test helpers without t.Helper() call and checks the consistency of test helpers. - - wastedassign # wastedassign finds wasted assignment statements + - bodyclose + - dupword + - durationcheck + - errcheck + - forbidigo + - gocritic + - gosec + - govet + - ineffassign + - mirror + - misspell + - nilerr + - nilnil + - predeclared + - revive + - sqlclosecheck + - staticcheck + - unused + - wastedassign + settings: + errcheck: + check-type-assertions: false + gocritic: + disabled-checks: + - commentFormatting + - captLocal + - deprecatedComment + gosec: + includes: + - G101 + - G103 + - G104 + - G106 + - G108 + - G109 + - G110 + - G111 + - G201 + - G202 + - G203 + - G301 + - G302 + - G303 + - G304 + - G305 + - G306 + - G307 + - G403 + - G502 + - G503 + - G504 + - G601 + - G602 + govet: + enable: + - nilness + enable-all: false + revive: + rules: + - name: exported + arguments: + - checkPrivateReceivers + - sayRepetitiveInsteadOfStutters + severity: warning + disabled: false + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - forbidigo + path: management/cmd/root\.go + - linters: + - forbidigo + path: signal/cmd/root\.go + - linters: + - unused + path: sharedsock/filter\.go + - linters: + - unused + path: client/firewall/iptables/rule\.go + - linters: + - gosec + - mirror + path: test\.go + - linters: + - nilnil + path: mock\.go + - linters: + - staticcheck + text: grpc.DialContext is deprecated + - linters: + - staticcheck + text: grpc.WithBlock is deprecated + - linters: + - staticcheck + text: "QF1001" + - linters: + - staticcheck + text: "QF1008" + - linters: + - staticcheck + text: "QF1012" + paths: + - third_party$ + - builtin$ + - examples$ issues: - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 max-same-issues: 5 - - exclude-rules: - # allow fmt - - path: management/cmd/root\.go - linters: forbidigo - - path: signal/cmd/root\.go - linters: forbidigo - - path: sharedsock/filter\.go - linters: - - unused - - path: client/firewall/iptables/rule\.go - linters: - - unused - - path: test\.go - linters: - - mirror - - gosec - - path: mock\.go - linters: - - nilnil - # Exclude specific deprecation warnings for grpc methods - - linters: - - staticcheck - text: "grpc.DialContext is deprecated" - - linters: - - staticcheck - text: "grpc.WithBlock is deprecated" +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/client/cmd/debug.go b/client/cmd/debug.go index 430012a17..7ca56857b 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -136,6 +136,7 @@ func setLogLevel(cmd *cobra.Command, args []string) error { client := proto.NewDaemonServiceClient(conn) level := server.ParseLogLevel(args[0]) if level == proto.LogLevel_UNKNOWN { + //nolint return fmt.Errorf("unknown log level: %s. Available levels are: panic, fatal, error, warn, info, debug, trace\n", args[0]) } diff --git a/client/cmd/login.go b/client/cmd/login.go index a34bb7c70..57c010571 100644 --- a/client/cmd/login.go +++ b/client/cmd/login.go @@ -81,6 +81,7 @@ var loginCmd = &cobra.Command{ func doDaemonLogin(ctx context.Context, cmd *cobra.Command, providedSetupKey string, activeProf *profilemanager.Profile, username string, pm *profilemanager.ProfileManager) error { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) @@ -206,6 +207,7 @@ func switchProfileOnDaemon(ctx context.Context, pm *profilemanager.ProfileManage func switchProfile(ctx context.Context, profileName string, username string) error { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/pprof.go b/client/cmd/pprof.go index 37efd35f0..c041c6ea9 100644 --- a/client/cmd/pprof.go +++ b/client/cmd/pprof.go @@ -1,5 +1,4 @@ //go:build pprof -// +build pprof package cmd diff --git a/client/cmd/root.go b/client/cmd/root.go index 30120c196..f4f4f6052 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -390,6 +390,7 @@ func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) { conn, err := DialClientGRPCServer(cmd.Context(), daemonAddr) if err != nil { + //nolint return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/status.go b/client/cmd/status.go index 06460a6a7..99d47cd1a 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -124,6 +124,7 @@ func statusFunc(cmd *cobra.Command, args []string) error { func getStatus(ctx context.Context, shouldRunProbes bool) (*proto.StatusResponse, error) { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index 888a9a3f7..2650d6225 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -89,9 +89,6 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp t.Cleanup(cleanUp) eventStore := &activity.InMemoryEventStore{} - if err != nil { - return nil, nil - } ctrl := gomock.NewController(t) t.Cleanup(ctrl.Finish) diff --git a/client/cmd/up.go b/client/cmd/up.go index 9efc2e60d..057d35268 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -216,6 +216,7 @@ func runInDaemonMode(ctx context.Context, cmd *cobra.Command, pm *profilemanager conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { + //nolint return fmt.Errorf("failed to connect to daemon error: %v\n"+ "If the daemon is not running please run: "+ "\nnetbird service install \nnetbird service start\n", err) diff --git a/client/firewall/iptables/acl_linux.go b/client/firewall/iptables/acl_linux.go index 5ccaf17ba..d83798f09 100644 --- a/client/firewall/iptables/acl_linux.go +++ b/client/firewall/iptables/acl_linux.go @@ -386,11 +386,8 @@ func (m *aclManager) updateState() { // filterRuleSpecs returns the specs of a filtering rule func filterRuleSpecs(ip net.IP, protocol string, sPort, dPort *firewall.Port, action firewall.Action, ipsetName string) (specs []string) { - matchByIP := true // don't use IP matching if IP is 0.0.0.0 - if ip.IsUnspecified() { - matchByIP = false - } + matchByIP := !ip.IsUnspecified() if matchByIP { if ipsetName != "" { diff --git a/client/firewall/iptables/manager_linux_test.go b/client/firewall/iptables/manager_linux_test.go index 6b5401e2b..ee47a27c0 100644 --- a/client/firewall/iptables/manager_linux_test.go +++ b/client/firewall/iptables/manager_linux_test.go @@ -161,7 +161,7 @@ func TestIptablesManagerDenyRules(t *testing.T) { t.Logf(" [%d] %s", i, rule) } - var denyRuleIndex, acceptRuleIndex int = -1, -1 + var denyRuleIndex, acceptRuleIndex = -1, -1 for i, rule := range rules { if strings.Contains(rule, "DROP") { t.Logf("Found DROP rule at index %d: %s", i, rule) diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go index 6b29c5606..75b1e2b6c 100644 --- a/client/firewall/nftables/manager_linux_test.go +++ b/client/firewall/nftables/manager_linux_test.go @@ -198,7 +198,7 @@ func TestNftablesManagerRuleOrder(t *testing.T) { t.Logf("Found %d rules in nftables chain", len(rules)) // Find the accept and deny rules and verify deny comes before accept - var acceptRuleIndex, denyRuleIndex int = -1, -1 + var acceptRuleIndex, denyRuleIndex = -1, -1 for i, rule := range rules { hasAcceptHTTPSet := false hasDenyHTTPSet := false @@ -208,11 +208,13 @@ func TestNftablesManagerRuleOrder(t *testing.T) { for _, e := range rule.Exprs { // Check for set lookup if lookup, ok := e.(*expr.Lookup); ok { - if lookup.SetName == "accept-http" { + switch lookup.SetName { + case "accept-http": hasAcceptHTTPSet = true - } else if lookup.SetName == "deny-http" { + case "deny-http": hasDenyHTTPSet = true } + } // Check for port 80 if cmp, ok := e.(*expr.Cmp); ok { @@ -222,9 +224,10 @@ func TestNftablesManagerRuleOrder(t *testing.T) { } // Check for verdict if verdict, ok := e.(*expr.Verdict); ok { - if verdict.Kind == expr.VerdictAccept { + switch verdict.Kind { + case expr.VerdictAccept: action = "ACCEPT" - } else if verdict.Kind == expr.VerdictDrop { + case expr.VerdictDrop: action = "DROP" } } diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index 4e22bde3f..3d3d79631 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -795,7 +795,7 @@ func (m *Manager) recalculateTCPChecksum(packetData []byte, d *decoder, tcpHeade pseudoSum += uint32(d.ip4.Protocol) pseudoSum += uint32(tcpLength) - var sum uint32 = pseudoSum + var sum = pseudoSum for i := 0; i < tcpLength-1; i += 2 { sum += uint32(tcpLayer[i])<<8 | uint32(tcpLayer[i+1]) } diff --git a/client/firewall/uspfilter/localip.go b/client/firewall/uspfilter/localip.go index 7f6b52c71..ffc807f46 100644 --- a/client/firewall/uspfilter/localip.go +++ b/client/firewall/uspfilter/localip.go @@ -130,6 +130,7 @@ func (m *localIPManager) UpdateLocalIPs(iface common.IFaceMapper) (err error) { // 127.0.0.0/8 newIPv4Bitmap[127] = &ipv4LowBitmap{} for i := 0; i < 8192; i++ { + // #nosec G602 -- bitmap is defined as [8192]uint32, loop range is correct newIPv4Bitmap[127].bitmap[i] = 0xFFFFFFFF } diff --git a/client/firewall/uspfilter/localip_test.go b/client/firewall/uspfilter/localip_test.go index 45ac912cd..6653947fa 100644 --- a/client/firewall/uspfilter/localip_test.go +++ b/client/firewall/uspfilter/localip_test.go @@ -218,7 +218,7 @@ func BenchmarkIPChecks(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // nolint:gosimple - _, _ = mapManager.localIPs[ip.String()] + _ = mapManager.localIPs[ip.String()] } }) @@ -227,7 +227,7 @@ func BenchmarkIPChecks(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // nolint:gosimple - _, _ = mapManager.localIPs[ip.String()] + _ = mapManager.localIPs[ip.String()] } }) } diff --git a/client/firewall/uspfilter/nat_test.go b/client/firewall/uspfilter/nat_test.go index 400d61020..50743d006 100644 --- a/client/firewall/uspfilter/nat_test.go +++ b/client/firewall/uspfilter/nat_test.go @@ -234,9 +234,10 @@ func TestInboundPortDNATNegative(t *testing.T) { require.False(t, translated, "Packet should NOT be translated for %s", tc.name) d = parsePacket(t, packet) - if tc.protocol == layers.IPProtocolTCP { + switch tc.protocol { + case layers.IPProtocolTCP: require.Equal(t, tc.dstPort, uint16(d.tcp.DstPort), "Port should remain unchanged") - } else if tc.protocol == layers.IPProtocolUDP { + case layers.IPProtocolUDP: require.Equal(t, tc.dstPort, uint16(d.udp.DstPort), "Port should remain unchanged") } }) diff --git a/client/iface/device/device_ios.go b/client/iface/device/device_ios.go index d841ac2fe..aa77cee45 100644 --- a/client/iface/device/device_ios.go +++ b/client/iface/device/device_ios.go @@ -1,6 +1,3 @@ -//go:build ios -// +build ios - package device import ( diff --git a/client/internal/debug/debug_linux.go b/client/internal/debug/debug_linux.go index 39d796fda..aedf88b79 100644 --- a/client/internal/debug/debug_linux.go +++ b/client/internal/debug/debug_linux.go @@ -507,15 +507,13 @@ func formatPayloadWithCmp(p *expr.Payload, cmp *expr.Cmp) string { if p.Base == expr.PayloadBaseNetworkHeader { switch p.Offset { case 12: - if p.Len == 4 { - return fmt.Sprintf("ip saddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) - } else if p.Len == 2 { + switch p.Len { + case 4, 2: return fmt.Sprintf("ip saddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) } case 16: - if p.Len == 4 { - return fmt.Sprintf("ip daddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) - } else if p.Len == 2 { + switch p.Len { + case 4, 2: return fmt.Sprintf("ip daddr %s %s", formatCmpOp(cmp.Op), formatIPBytes(cmp.Data)) } } diff --git a/client/internal/iface.go b/client/internal/iface.go index bd0069c19..a82d87aab 100644 --- a/client/internal/iface.go +++ b/client/internal/iface.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package internal diff --git a/client/internal/routemanager/iface/iface.go b/client/internal/routemanager/iface/iface.go index 57dbec03d..b44d9fa65 100644 --- a/client/internal/routemanager/iface/iface.go +++ b/client/internal/routemanager/iface/iface.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package iface diff --git a/client/internal/routemanager/systemops/systemops_generic.go b/client/internal/routemanager/systemops/systemops_generic.go index 26a548634..ec219c7fe 100644 --- a/client/internal/routemanager/systemops/systemops_generic.go +++ b/client/internal/routemanager/systemops/systemops_generic.go @@ -210,7 +210,8 @@ func (r *SysOps) refreshLocalSubnetsCache() { func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) error { nextHop := Nexthop{netip.Addr{}, intf} - if prefix == vars.Defaultv4 { + switch prefix { + case vars.Defaultv4: if err := r.addToRouteTable(splitDefaultv4_1, nextHop); err != nil { return err } @@ -233,7 +234,7 @@ func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) er } return nil - } else if prefix == vars.Defaultv6 { + case vars.Defaultv6: if err := r.addToRouteTable(splitDefaultv6_1, nextHop); err != nil { return fmt.Errorf("add unreachable route split 1: %w", err) } @@ -255,7 +256,8 @@ func (r *SysOps) genericAddVPNRoute(prefix netip.Prefix, intf *net.Interface) er func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) error { nextHop := Nexthop{netip.Addr{}, intf} - if prefix == vars.Defaultv4 { + switch prefix { + case vars.Defaultv4: var result *multierror.Error if err := r.removeFromRouteTable(splitDefaultv4_1, nextHop); err != nil { result = multierror.Append(result, err) @@ -273,7 +275,7 @@ func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) } return nberrors.FormatErrorOrNil(result) - } else if prefix == vars.Defaultv6 { + case vars.Defaultv6: var result *multierror.Error if err := r.removeFromRouteTable(splitDefaultv6_1, nextHop); err != nil { result = multierror.Append(result, err) @@ -283,9 +285,9 @@ func (r *SysOps) genericRemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) } return nberrors.FormatErrorOrNil(result) + default: + return r.removeFromRouteTable(prefix, nextHop) } - - return r.removeFromRouteTable(prefix, nextHop) } func (r *SysOps) setupHooks(initAddresses []net.IP, stateManager *statemanager.Manager) error { diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index e901386d9..935910fc9 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -76,7 +76,7 @@ type Client struct { loginComplete bool connectClient *internal.ConnectClient // preloadedConfig holds config loaded from JSON (used on tvOS where file writes are blocked) - preloadedConfig *profilemanager.Config + preloadedConfig *profilemanager.Config } // NewClient instantiate a new Client diff --git a/client/server/panic_windows.go b/client/server/panic_windows.go index f441ec9ea..8592f12ad 100644 --- a/client/server/panic_windows.go +++ b/client/server/panic_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package server diff --git a/client/ssh/server/jwt_test.go b/client/ssh/server/jwt_test.go index d36d7cbbf..6eb88accc 100644 --- a/client/ssh/server/jwt_test.go +++ b/client/ssh/server/jwt_test.go @@ -602,12 +602,13 @@ func TestJWTAuthentication(t *testing.T) { require.NoError(t, err) var authMethods []cryptossh.AuthMethod - if tc.token == "valid" { + switch tc.token { + case "valid": token := generateValidJWT(t, privateKey, issuer, audience) authMethods = []cryptossh.AuthMethod{ cryptossh.Password(token), } - } else if tc.token == "invalid" { + case "invalid": invalidToken := "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.invalid" authMethods = []cryptossh.AuthMethod{ cryptossh.Password(invalidToken), diff --git a/client/system/info_android.go b/client/system/info_android.go index 78895bfa8..794ff15ed 100644 --- a/client/system/info_android.go +++ b/client/system/info_android.go @@ -1,6 +1,3 @@ -//go:build android -// +build android - package system import ( diff --git a/client/system/info_darwin.go b/client/system/info_darwin.go index caa344737..4a31920ec 100644 --- a/client/system/info_darwin.go +++ b/client/system/info_darwin.go @@ -1,5 +1,4 @@ //go:build !ios -// +build !ios package system diff --git a/client/system/info_ios.go b/client/system/info_ios.go index 705c37920..322609db4 100644 --- a/client/system/info_ios.go +++ b/client/system/info_ios.go @@ -1,6 +1,3 @@ -//go:build ios -// +build ios - package system import ( diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 78934ea95..5d955ed25 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -510,7 +510,7 @@ func (s *serviceClient) saveSettings() { // Continue with default behavior if features can't be retrieved } else if features != nil && features.DisableUpdateSettings { log.Warn("Configuration updates are disabled by daemon") - dialog.ShowError(fmt.Errorf("Configuration updates are disabled by daemon"), s.wSettings) + dialog.ShowError(fmt.Errorf("configuration updates are disabled by daemon"), s.wSettings) return } @@ -540,7 +540,7 @@ func (s *serviceClient) saveSettings() { func (s *serviceClient) validateSettings() error { if s.iPreSharedKey.Text != "" && s.iPreSharedKey.Text != censoredPreSharedKey { if _, err := wgtypes.ParseKey(s.iPreSharedKey.Text); err != nil { - return fmt.Errorf("Invalid Pre-shared Key Value") + return fmt.Errorf("invalid pre-shared key value") } } return nil @@ -549,10 +549,10 @@ func (s *serviceClient) validateSettings() error { func (s *serviceClient) parseNumericSettings() (int64, int64, error) { port, err := strconv.ParseInt(s.iInterfacePort.Text, 10, 64) if err != nil { - return 0, 0, errors.New("Invalid interface port") + return 0, 0, errors.New("invalid interface port") } if port < 1 || port > 65535 { - return 0, 0, errors.New("Invalid interface port: out of range 1-65535") + return 0, 0, errors.New("invalid interface port: out of range 1-65535") } var mtu int64 @@ -560,7 +560,7 @@ func (s *serviceClient) parseNumericSettings() (int64, int64, error) { if mtuText != "" { mtu, err = strconv.ParseInt(mtuText, 10, 64) if err != nil { - return 0, 0, errors.New("Invalid MTU value") + return 0, 0, errors.New("invalid MTU value") } if mtu < iface.MinMTU || mtu > iface.MaxMTU { return 0, 0, fmt.Errorf("MTU must be between %d and %d bytes", iface.MinMTU, iface.MaxMTU) @@ -645,7 +645,7 @@ func (s *serviceClient) buildSetConfigRequest(iMngURL string, port, mtu int64) ( if sshJWTCacheTTLText != "" { sshJWTCacheTTL, err := strconv.ParseInt(sshJWTCacheTTLText, 10, 32) if err != nil { - return nil, errors.New("Invalid SSH JWT Cache TTL value") + return nil, errors.New("invalid SSH JWT Cache TTL value") } if sshJWTCacheTTL < 0 || sshJWTCacheTTL > maxSSHJWTCacheTTL { return nil, fmt.Errorf("SSH JWT Cache TTL must be between 0 and %d seconds", maxSSHJWTCacheTTL) diff --git a/client/ui/signal_windows.go b/client/ui/signal_windows.go index ca98be526..58f46374f 100644 --- a/client/ui/signal_windows.go +++ b/client/ui/signal_windows.go @@ -164,7 +164,7 @@ func sendShowWindowSignal(pid int32) error { err = windows.SetEvent(eventHandle) if err != nil { - return fmt.Errorf("Error setting event: %w", err) + return fmt.Errorf("error setting event: %w", err) } return nil diff --git a/go.mod b/go.mod index 23cf0f37d..1b4612da3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/netbirdio/netbird -go 1.24.10 +go 1.25 + +toolchain go1.25.5 require ( cunicu.li/go-rosenpass v0.4.0 @@ -81,7 +83,7 @@ require ( github.com/pion/turn/v3 v3.0.1 github.com/pkg/sftp v1.13.9 github.com/prometheus/client_golang v1.23.2 - github.com/quic-go/quic-go v0.49.1 + github.com/quic-go/quic-go v0.55.0 github.com/redis/go-redis/v9 v9.7.3 github.com/rs/xid v1.3.0 github.com/shirou/gopsutil/v3 v3.24.4 @@ -103,7 +105,7 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.48.0 go.opentelemetry.io/otel/metric v1.38.0 go.opentelemetry.io/otel/sdk/metric v1.38.0 - go.uber.org/mock v0.5.0 + go.uber.org/mock v0.5.2 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 @@ -186,12 +188,10 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-sql-driver/mysql v1.9.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-text/render v0.2.0 // indirect github.com/go-text/typesetting v0.2.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect diff --git a/go.sum b/go.sum index 354c7732e..60b6304c3 100644 --- a/go.sum +++ b/go.sum @@ -101,9 +101,6 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= @@ -286,7 +283,6 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -491,8 +487,8 @@ github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9Z github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/quic-go/quic-go v0.49.1 h1:e5JXpUyF0f2uFjckQzD8jTghZrOUK1xxDqqZhlwixo0= -github.com/quic-go/quic-go v0.49.1/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= +github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= +github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -622,8 +618,8 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= -go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -717,7 +713,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/management/cmd/management.go b/management/cmd/management.go index 557cf45f8..5391b0866 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -64,7 +64,7 @@ var ( config.HttpConfig.IdpSignKeyRefreshEnabled = idpSignKeyRefreshEnabled } - tlsEnabled := false + var tlsEnabled bool if mgmtLetsencryptDomain != "" || (config.HttpConfig.CertFile != "" && config.HttpConfig.CertKey != "") { tlsEnabled = true } diff --git a/management/internals/shared/grpc/loginfilter_test.go b/management/internals/shared/grpc/loginfilter_test.go index 8b26e14ab..797879ae7 100644 --- a/management/internals/shared/grpc/loginfilter_test.go +++ b/management/internals/shared/grpc/loginfilter_test.go @@ -85,6 +85,7 @@ func (s *LoginFilterTestSuite) TestBanDurationIncreasesExponentially() { s.True(s.filter.logged[pubKey].isBanned) s.Equal(2, s.filter.logged[pubKey].banLevel) secondBanDuration := s.filter.logged[pubKey].banExpiresAt.Sub(s.filter.logged[pubKey].lastSeen) + // nolint expectedSecondDuration := time.Duration(float64(baseBan) * math.Pow(2, 1)) s.InDelta(expectedSecondDuration, secondBanDuration, float64(time.Millisecond)) } diff --git a/management/server/account.go b/management/server/account.go index 29415b038..9785f446c 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -1006,7 +1006,7 @@ func (am *DefaultAccountManager) isCacheFresh(ctx context.Context, accountUsers for user, loggedInOnce := range accountUsers { if datum, ok := userDataMap[user]; ok { // check if the matching user data has a pending invite and if the user has logged in once, forcing the cache to be refreshed - if datum.AppMetadata.WTPendingInvite != nil && *datum.AppMetadata.WTPendingInvite && loggedInOnce == true { //nolint:gosimple + if datum.AppMetadata.WTPendingInvite != nil && *datum.AppMetadata.WTPendingInvite && loggedInOnce == true { //nolint log.WithContext(ctx).Infof("user %s has a pending invite and has logged in once, cache invalid", user) return false } diff --git a/management/server/account_test.go b/management/server/account_test.go index 59d6e4928..32d2b4ea3 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -753,7 +753,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { t.Fatalf("expected to create an account for a user %s", userId) } - if account != nil && account.Domain != domain { + if account.Domain != domain { t.Errorf("setting account domain failed, expected %s, got %s", domain, account.Domain) } @@ -768,7 +768,7 @@ func TestAccountManager_SetOrUpdateDomain(t *testing.T) { t.Fatalf("expected to get an account for a user %s", userId) } - if account != nil && account.Domain != domain { + if account.Domain != domain { t.Errorf("updating domain. expected %s got %s", domain, account.Domain) } } diff --git a/management/server/http/handlers/policies/posture_checks_handler_test.go b/management/server/http/handlers/policies/posture_checks_handler_test.go index 35198da32..a5999f6c7 100644 --- a/management/server/http/handlers/policies/posture_checks_handler_test.go +++ b/management/server/http/handlers/policies/posture_checks_handler_test.go @@ -46,7 +46,7 @@ func initPostureChecksTestData(postureChecks ...*posture.Checks) *postureChecksH testPostureChecks[postureChecks.ID] = postureChecks if err := postureChecks.Validate(); err != nil { - return nil, status.Errorf(status.InvalidArgument, "%s", err.Error()) //nolint + return nil, status.Errorf(status.InvalidArgument, "%v", err) //nolint } return postureChecks, nil diff --git a/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go b/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go index 3fe3fe809..3345a034b 100644 --- a/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/peers_handler_benchmark_test.go @@ -1,5 +1,4 @@ //go:build benchmark -// +build benchmark package benchmarks diff --git a/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go b/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go index 36b226db0..ca25861dd 100644 --- a/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/setupkeys_handler_benchmark_test.go @@ -1,5 +1,4 @@ //go:build benchmark -// +build benchmark package benchmarks diff --git a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go index 2868a20bd..b13773268 100644 --- a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go @@ -1,5 +1,4 @@ //go:build benchmark -// +build benchmark package benchmarks diff --git a/management/server/http/testing/integration/setupkeys_handler_integration_test.go b/management/server/http/testing/integration/setupkeys_handler_integration_test.go index 1079de4aa..c1a9829da 100644 --- a/management/server/http/testing/integration/setupkeys_handler_integration_test.go +++ b/management/server/http/testing/integration/setupkeys_handler_integration_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package integration diff --git a/management/server/idp/pocketid.go b/management/server/idp/pocketid.go index 38a5cc67f..d8d764830 100644 --- a/management/server/idp/pocketid.go +++ b/management/server/idp/pocketid.go @@ -121,7 +121,7 @@ func NewPocketIdManager(config PocketIdClientConfig, appMetrics telemetry.AppMet func (p *PocketIdManager) request(ctx context.Context, method, resource string, query *url.Values, body string) ([]byte, error) { var MethodsWithBody = []string{http.MethodPost, http.MethodPut} if !slices.Contains(MethodsWithBody, method) && body != "" { - return nil, fmt.Errorf("Body provided to unsupported method: %s", method) + return nil, fmt.Errorf("body provided to unsupported method: %s", method) } reqURL := fmt.Sprintf("%s/api/%s", p.managementEndpoint, resource) @@ -301,7 +301,7 @@ func (p *PocketIdManager) CreateUser(ctx context.Context, email, name, accountID if p.appMetrics != nil { p.appMetrics.IDPMetrics().CountCreateUser() } - var pending bool = true + pending := true ret := &UserData{ Email: email, Name: name, diff --git a/management/server/idp/zitadel.go b/management/server/idp/zitadel.go index 24228346a..8db3c4796 100644 --- a/management/server/idp/zitadel.go +++ b/management/server/idp/zitadel.go @@ -357,7 +357,7 @@ func (zm *ZitadelManager) CreateUser(ctx context.Context, email, name, accountID return nil, err } - var pending bool = true + pending := true ret := &UserData{ Email: email, Name: name, diff --git a/management/server/migration/migration.go b/management/server/migration/migration.go index 78f4afbd5..7fcb98ccb 100644 --- a/management/server/migration/migration.go +++ b/management/server/migration/migration.go @@ -393,7 +393,7 @@ func CreateIndexIfNotExists[T any](ctx context.Context, db *gorm.DB, indexName s return fmt.Errorf("failed to parse model schema: %w", err) } tableName := stmt.Schema.Table - dialect := db.Dialector.Name() + dialect := db.Name() if db.Migrator().HasIndex(&model, indexName) { log.WithContext(ctx).Infof("index %s already exists on table %s", indexName, tableName) diff --git a/management/server/nameserver.go b/management/server/nameserver.go index f278e1761..a3eb4ae2e 100644 --- a/management/server/nameserver.go +++ b/management/server/nameserver.go @@ -20,7 +20,7 @@ import ( const domainPattern = `^(?i)[a-z0-9]+([\-\.]{1}[a-z0-9]+)*[*.a-z]{1,}$` -var invalidDomainName = errors.New("invalid domain name") +var errInvalidDomainName = errors.New("invalid domain name") // GetNameServerGroup gets a nameserver group object from account and nameserver group IDs func (am *DefaultAccountManager) GetNameServerGroup(ctx context.Context, accountID, userID, nsGroupID string) (*nbdns.NameServerGroup, error) { @@ -314,7 +314,7 @@ func validateDomain(domain string) error { _, valid := dns.IsDomainName(domain) if !valid { - return invalidDomainName + return errInvalidDomainName } return nil diff --git a/management/server/posture_checks.go b/management/server/posture_checks.go index 9a743eb8c..ba901c771 100644 --- a/management/server/posture_checks.go +++ b/management/server/posture_checks.go @@ -158,7 +158,7 @@ func arePostureCheckChangesAffectPeers(ctx context.Context, transaction store.St // validatePostureChecks validates the posture checks. func validatePostureChecks(ctx context.Context, transaction store.Store, accountID string, postureChecks *posture.Checks) error { if err := postureChecks.Validate(); err != nil { - return status.Errorf(status.InvalidArgument, "%s", err.Error()) //nolint + return status.Errorf(status.InvalidArgument, "%v", err.Error()) //nolint } // If the posture check already has an ID, verify its existence in the store. diff --git a/management/server/store/sql_store_get_account_test.go b/management/server/store/sql_store_get_account_test.go index 8ff04d68a..69e346ae7 100644 --- a/management/server/store/sql_store_get_account_test.go +++ b/management/server/store/sql_store_get_account_test.go @@ -997,9 +997,10 @@ func TestGetAccount_ComprehensiveFieldValidation(t *testing.T) { // Find posture checks by ID var pc1, pc2 *posture.Checks for _, pc := range retrievedAccount.PostureChecks { - if pc.ID == postureCheckID1 { + switch pc.ID { + case postureCheckID1: pc1 = pc - } else if pc.ID == postureCheckID2 { + case postureCheckID2: pc2 = pc } } diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 97aa81b12..728d67273 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -30,7 +30,6 @@ import ( "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/management/server/util" nbroute "github.com/netbirdio/netbird/route" - route2 "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/status" "github.com/netbirdio/netbird/util/crypt" ) @@ -110,12 +109,12 @@ func runLargeTest(t *testing.T, store Store) { AccountID: account.Id, } account.Users[user.Id] = user - route := &route2.Route{ - ID: route2.ID(fmt.Sprintf("network-id-%d", n)), + route := &nbroute.Route{ + ID: nbroute.ID(fmt.Sprintf("network-id-%d", n)), Description: "base route", - NetID: route2.NetID(fmt.Sprintf("network-id-%d", n)), + NetID: nbroute.NetID(fmt.Sprintf("network-id-%d", n)), Network: netip.MustParsePrefix(netIP.String() + "/24"), - NetworkType: route2.IPv4Network, + NetworkType: nbroute.IPv4Network, Metric: 9999, Masquerade: false, Enabled: true, @@ -689,7 +688,7 @@ func TestMigrate(t *testing.T) { require.NoError(t, err, "Failed to insert Gob data") type route struct { - route2.Route + nbroute.Route Network netip.Prefix `gorm:"serializer:gob"` PeerGroups []string `gorm:"serializer:gob"` } @@ -698,7 +697,7 @@ func TestMigrate(t *testing.T) { rt := &route{ Network: prefix, PeerGroups: []string{"group1", "group2"}, - Route: route2.Route{ID: "route1"}, + Route: nbroute.Route{ID: "route1"}, } err = store.(*SqlStore).db.Save(rt).Error @@ -714,7 +713,7 @@ func TestMigrate(t *testing.T) { require.NoError(t, err, "Failed to delete Gob data") prefix = netip.MustParsePrefix("12.0.0.0/24") - nRT := &route2.Route{ + nRT := &nbroute.Route{ Network: prefix, ID: "route2", Peer: "peer-id", @@ -3544,13 +3543,13 @@ func TestSqlStore_SaveRoute(t *testing.T) { accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" - route := &route2.Route{ + route := &nbroute.Route{ ID: "route-id", AccountID: accountID, Network: netip.MustParsePrefix("10.10.0.0/16"), NetID: "netID", PeerGroups: []string{"routeA"}, - NetworkType: route2.IPv4Network, + NetworkType: nbroute.IPv4Network, Masquerade: true, Metric: 9999, Enabled: true, diff --git a/management/server/testutil/store.go b/management/server/testutil/store.go index db418c45b..f92153399 100644 --- a/management/server/testutil/store.go +++ b/management/server/testutil/store.go @@ -1,5 +1,4 @@ //go:build !ios -// +build !ios package testutil diff --git a/management/server/testutil/store_ios.go b/management/server/testutil/store_ios.go index c3dd839d3..9e3b5ce4a 100644 --- a/management/server/testutil/store_ios.go +++ b/management/server/testutil/store_ios.go @@ -1,5 +1,4 @@ //go:build ios -// +build ios package testutil diff --git a/relay/cmd/pprof.go b/relay/cmd/pprof.go index 37efd35f0..c041c6ea9 100644 --- a/relay/cmd/pprof.go +++ b/relay/cmd/pprof.go @@ -1,5 +1,4 @@ //go:build pprof -// +build pprof package cmd diff --git a/relay/server/listener/quic/conn.go b/relay/server/listener/quic/conn.go index 909ec1cc6..6e2201bf7 100644 --- a/relay/server/listener/quic/conn.go +++ b/relay/server/listener/quic/conn.go @@ -12,14 +12,14 @@ import ( ) type Conn struct { - session quic.Connection + session *quic.Conn closed bool closedMu sync.Mutex ctx context.Context ctxCancel context.CancelFunc } -func NewConn(session quic.Connection) *Conn { +func NewConn(session *quic.Conn) *Conn { ctx, cancel := context.WithCancel(context.Background()) return &Conn{ session: session, diff --git a/relay/server/listener/ws/conn.go b/relay/server/listener/ws/conn.go index 3ec08945b..d5bce56f7 100644 --- a/relay/server/listener/ws/conn.go +++ b/relay/server/listener/ws/conn.go @@ -88,7 +88,7 @@ func (c *Conn) Close() error { c.closedMu.Lock() c.closed = true c.closedMu.Unlock() - return c.Conn.CloseNow() + return c.CloseNow() } func (c *Conn) isClosed() bool { diff --git a/shared/management/client/rest/accounts_test.go b/shared/management/client/rest/accounts_test.go index be0066488..e44ada298 100644 --- a/shared/management/client/rest/accounts_test.go +++ b/shared/management/client/rest/accounts_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index 4d1de2631..77c960435 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -161,7 +161,7 @@ func (c *Client) NewRequest(ctx context.Context, method, path string, body io.Re func parseResponse[T any](resp *http.Response) (T, error) { var ret T if resp.Body == nil { - return ret, fmt.Errorf("Body missing, HTTP Error code %d", resp.StatusCode) + return ret, fmt.Errorf("body missing, HTTP Error code %d", resp.StatusCode) } bs, err := io.ReadAll(resp.Body) if err != nil { @@ -169,7 +169,7 @@ func parseResponse[T any](resp *http.Response) (T, error) { } err = json.Unmarshal(bs, &ret) if err != nil { - return ret, fmt.Errorf("Error code %d, error unmarshalling body: %w", resp.StatusCode, err) + return ret, fmt.Errorf("error code %d, error unmarshalling body: %w", resp.StatusCode, err) } return ret, nil diff --git a/shared/management/client/rest/client_test.go b/shared/management/client/rest/client_test.go index 17df8dd8b..2b3e6cabe 100644 --- a/shared/management/client/rest/client_test.go +++ b/shared/management/client/rest/client_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/dns_test.go b/shared/management/client/rest/dns_test.go index 58082abe8..8e8633f8d 100644 --- a/shared/management/client/rest/dns_test.go +++ b/shared/management/client/rest/dns_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/events_test.go b/shared/management/client/rest/events_test.go index b28390001..1ee10eb6e 100644 --- a/shared/management/client/rest/events_test.go +++ b/shared/management/client/rest/events_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/geo_test.go b/shared/management/client/rest/geo_test.go index fcb4808a1..2410f2641 100644 --- a/shared/management/client/rest/geo_test.go +++ b/shared/management/client/rest/geo_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/groups_test.go b/shared/management/client/rest/groups_test.go index fcd759e9a..51fd0c0ee 100644 --- a/shared/management/client/rest/groups_test.go +++ b/shared/management/client/rest/groups_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/impersonation_test.go b/shared/management/client/rest/impersonation_test.go index 4fb8f24eb..d257d0987 100644 --- a/shared/management/client/rest/impersonation_test.go +++ b/shared/management/client/rest/impersonation_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/networks_test.go b/shared/management/client/rest/networks_test.go index ca2a294ae..2bf1a0d3b 100644 --- a/shared/management/client/rest/networks_test.go +++ b/shared/management/client/rest/networks_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/peers_test.go b/shared/management/client/rest/peers_test.go index a45f9d6ec..c464de7ed 100644 --- a/shared/management/client/rest/peers_test.go +++ b/shared/management/client/rest/peers_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/policies_test.go b/shared/management/client/rest/policies_test.go index a19d0a728..e948e2949 100644 --- a/shared/management/client/rest/policies_test.go +++ b/shared/management/client/rest/policies_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/posturechecks_test.go b/shared/management/client/rest/posturechecks_test.go index 9b1b618df..d74d455a5 100644 --- a/shared/management/client/rest/posturechecks_test.go +++ b/shared/management/client/rest/posturechecks_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/routes_test.go b/shared/management/client/rest/routes_test.go index 9452a07fc..5ee2def24 100644 --- a/shared/management/client/rest/routes_test.go +++ b/shared/management/client/rest/routes_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/setupkeys_test.go b/shared/management/client/rest/setupkeys_test.go index 0fa782da5..bd8d3f835 100644 --- a/shared/management/client/rest/setupkeys_test.go +++ b/shared/management/client/rest/setupkeys_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/tokens_test.go b/shared/management/client/rest/tokens_test.go index ce3748751..5af41eb73 100644 --- a/shared/management/client/rest/tokens_test.go +++ b/shared/management/client/rest/tokens_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/management/client/rest/users_test.go b/shared/management/client/rest/users_test.go index d53c4eb6a..68815d4f9 100644 --- a/shared/management/client/rest/users_test.go +++ b/shared/management/client/rest/users_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package rest_test diff --git a/shared/relay/client/client_test.go b/shared/relay/client/client_test.go index 8fe5f04f4..9820d642f 100644 --- a/shared/relay/client/client_test.go +++ b/shared/relay/client/client_test.go @@ -19,15 +19,7 @@ import ( ) var ( - hmacTokenStore = &hmac.TokenStore{} - serverListenAddr = "127.0.0.1:1234" - serverURL = "rel://127.0.0.1:1234" - serverCfg = server.Config{ - Meter: otel.Meter(""), - ExposedAddress: serverURL, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - } + hmacTokenStore = &hmac.TokenStore{} ) func TestMain(m *testing.M) { @@ -36,8 +28,20 @@ func TestMain(m *testing.M) { os.Exit(code) } +// newClientTestServerConfig creates a new server config for client testing with the given address +func newClientTestServerConfig(address string) server.Config { + return server.Config{ + Meter: otel.Meter(""), + ExposedAddress: "rel://" + address, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + } +} + func TestClient(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50001" + serverCfg := newClientTestServerConfig(serverListenAddr) srv, err := server.NewServer(serverCfg) if err != nil { @@ -64,7 +68,7 @@ func TestClient(t *testing.T) { t.Fatalf("failed to start server: %s", err) } t.Log("alice connecting to server") - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -72,7 +76,7 @@ func TestClient(t *testing.T) { defer clientAlice.Close() t.Log("placeholder connecting to server") - clientPlaceHolder := NewClient(serverURL, hmacTokenStore, "clientPlaceHolder", iface.DefaultMTU) + clientPlaceHolder := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "clientPlaceHolder", iface.DefaultMTU) err = clientPlaceHolder.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -80,7 +84,7 @@ func TestClient(t *testing.T) { defer clientPlaceHolder.Close() t.Log("Bob connecting to server") - clientBob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -120,6 +124,8 @@ func TestClient(t *testing.T) { func TestRegistration(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50101" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) if err != nil { @@ -138,7 +144,7 @@ func TestRegistration(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { _ = srv.Shutdown(ctx) @@ -157,7 +163,7 @@ func TestRegistration(t *testing.T) { func TestRegistrationTimeout(t *testing.T) { ctx := context.Background() fakeUDPListener, err := net.ListenUDP("udp", &net.UDPAddr{ - Port: 1234, + Port: 50201, IP: net.ParseIP("0.0.0.0"), }) if err != nil { @@ -168,7 +174,7 @@ func TestRegistrationTimeout(t *testing.T) { }(fakeUDPListener) fakeTCPListener, err := net.ListenTCP("tcp", &net.TCPAddr{ - Port: 1234, + Port: 50201, IP: net.ParseIP("0.0.0.0"), }) if err != nil { @@ -178,7 +184,7 @@ func TestRegistrationTimeout(t *testing.T) { _ = fakeTCPListener.Close() }(fakeTCPListener) - clientAlice := NewClient("127.0.0.1:1234", hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient("127.0.0.1:50201", hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err == nil { t.Errorf("failed to connect to server: %s", err) @@ -192,6 +198,8 @@ func TestRegistrationTimeout(t *testing.T) { func TestEcho(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50301" + serverCfg := newClientTestServerConfig(serverListenAddr) idAlice := "alice" idBob := "bob" srvCfg := server.ListenerConfig{Address: serverListenAddr} @@ -219,7 +227,7 @@ func TestEcho(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -231,7 +239,7 @@ func TestEcho(t *testing.T) { } }() - clientBob := NewClient(serverURL, hmacTokenStore, idBob, iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idBob, iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -282,6 +290,8 @@ func TestEcho(t *testing.T) { func TestBindToUnavailabePeer(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50401" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -309,7 +319,7 @@ func TestBindToUnavailabePeer(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -328,6 +338,8 @@ func TestBindToUnavailabePeer(t *testing.T) { func TestBindReconnect(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50501" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -355,13 +367,13 @@ func TestBindReconnect(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) } - clientBob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -383,7 +395,7 @@ func TestBindReconnect(t *testing.T) { t.Errorf("failed to close client: %s", err) } - clientAlice = NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice = NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -429,6 +441,8 @@ func TestBindReconnect(t *testing.T) { func TestCloseConn(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50601" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -456,13 +470,13 @@ func TestCloseConn(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - bob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + bob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = bob.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Errorf("failed to connect to server: %s", err) @@ -492,6 +506,8 @@ func TestCloseConn(t *testing.T) { func TestCloseRelayConn(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50701" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -518,13 +534,13 @@ func TestCloseRelayConn(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - bob := NewClient(serverURL, hmacTokenStore, "bob", iface.DefaultMTU) + bob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "bob", iface.DefaultMTU) err = bob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, "alice", iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, "alice", iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -550,6 +566,8 @@ func TestCloseRelayConn(t *testing.T) { func TestCloseByServer(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50801" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv1, err := server.NewServer(serverCfg) @@ -572,7 +590,7 @@ func TestCloseByServer(t *testing.T) { idAlice := "alice" log.Debugf("connect by alice") - relayClient := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + relayClient := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) if err = relayClient.Connect(ctx); err != nil { log.Fatalf("failed to connect to server: %s", err) } @@ -607,6 +625,8 @@ func TestCloseByServer(t *testing.T) { func TestCloseByClient(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:50901" + serverCfg := newClientTestServerConfig(serverListenAddr) srvCfg := server.ListenerConfig{Address: serverListenAddr} srv, err := server.NewServer(serverCfg) @@ -628,7 +648,7 @@ func TestCloseByClient(t *testing.T) { idAlice := "alice" log.Debugf("connect by alice") - relayClient := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + relayClient := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) err = relayClient.Connect(ctx) if err != nil { log.Fatalf("failed to connect to server: %s", err) @@ -652,6 +672,8 @@ func TestCloseByClient(t *testing.T) { func TestCloseNotDrainedChannel(t *testing.T) { ctx := context.Background() + serverListenAddr := "127.0.0.1:51001" + serverCfg := newClientTestServerConfig(serverListenAddr) idAlice := "alice" idBob := "bob" srvCfg := server.ListenerConfig{Address: serverListenAddr} @@ -679,7 +701,7 @@ func TestCloseNotDrainedChannel(t *testing.T) { t.Fatalf("failed to start server: %s", err) } - clientAlice := NewClient(serverURL, hmacTokenStore, idAlice, iface.DefaultMTU) + clientAlice := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idAlice, iface.DefaultMTU) err = clientAlice.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) @@ -691,7 +713,7 @@ func TestCloseNotDrainedChannel(t *testing.T) { } }() - clientBob := NewClient(serverURL, hmacTokenStore, idBob, iface.DefaultMTU) + clientBob := NewClient(serverCfg.ExposedAddress, hmacTokenStore, idBob, iface.DefaultMTU) err = clientBob.Connect(ctx) if err != nil { t.Fatalf("failed to connect to server: %s", err) diff --git a/shared/relay/client/dialer/quic/conn.go b/shared/relay/client/dialer/quic/conn.go index 9243605b5..1d90d7139 100644 --- a/shared/relay/client/dialer/quic/conn.go +++ b/shared/relay/client/dialer/quic/conn.go @@ -30,11 +30,11 @@ func (a Addr) String() string { } type Conn struct { - session quic.Connection + session *quic.Conn ctx context.Context } -func NewConn(session quic.Connection) net.Conn { +func NewConn(session *quic.Conn) net.Conn { return &Conn{ session: session, ctx: context.Background(), diff --git a/shared/relay/client/manager_test.go b/shared/relay/client/manager_test.go index f00b35707..fb91f7682 100644 --- a/shared/relay/client/manager_test.go +++ b/shared/relay/client/manager_test.go @@ -13,6 +13,16 @@ import ( "github.com/netbirdio/netbird/shared/relay/auth/allow" ) +// newManagerTestServerConfig creates a new server config for manager testing with the given address +func newManagerTestServerConfig(address string) server.Config { + return server.Config{ + Meter: otel.Meter(""), + ExposedAddress: address, + TLSSupport: false, + AuthValidator: &allow.Auth{}, + } +} + func TestEmptyURL(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -27,15 +37,10 @@ func TestForeignConn(t *testing.T) { ctx := context.Background() lstCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52101", } - srv1, err := server.NewServer(server.Config{ - Meter: otel.Meter(""), - ExposedAddress: lstCfg1.Address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - }) + srv1, err := server.NewServer(newManagerTestServerConfig(lstCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -59,14 +64,9 @@ func TestForeignConn(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:2234", + Address: "localhost:52102", } - srv2, err := server.NewServer(server.Config{ - Meter: otel.Meter(""), - ExposedAddress: srvCfg2.Address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - }) + srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -144,9 +144,9 @@ func TestForeginConnClose(t *testing.T) { ctx := context.Background() srvCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52201", } - srv1, err := server.NewServer(serverCfg) + srv1, err := server.NewServer(newManagerTestServerConfig(srvCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -170,9 +170,9 @@ func TestForeginConnClose(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:2234", + Address: "localhost:52202", } - srv2, err := server.NewServer(serverCfg) + srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -225,9 +225,9 @@ func TestForeignAutoClose(t *testing.T) { keepUnusedServerTime = 2 * time.Second srvCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52301", } - srv1, err := server.NewServer(serverCfg) + srv1, err := server.NewServer(newManagerTestServerConfig(srvCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -252,9 +252,9 @@ func TestForeignAutoClose(t *testing.T) { } srvCfg2 := server.ListenerConfig{ - Address: "localhost:2234", + Address: "localhost:52302", } - srv2, err := server.NewServer(serverCfg) + srv2, err := server.NewServer(newManagerTestServerConfig(srvCfg2.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -327,9 +327,9 @@ func TestAutoReconnect(t *testing.T) { ctx := context.Background() srvCfg := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52401", } - srv, err := server.NewServer(serverCfg) + srv, err := server.NewServer(newManagerTestServerConfig(srvCfg.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } @@ -397,14 +397,9 @@ func TestNotifierDoubleAdd(t *testing.T) { ctx := context.Background() listenerCfg1 := server.ListenerConfig{ - Address: "localhost:1234", + Address: "localhost:52501", } - srv, err := server.NewServer(server.Config{ - Meter: otel.Meter(""), - ExposedAddress: listenerCfg1.Address, - TLSSupport: false, - AuthValidator: &allow.Auth{}, - }) + srv, err := server.NewServer(newManagerTestServerConfig(listenerCfg1.Address)) if err != nil { t.Fatalf("failed to create server: %s", err) } diff --git a/signal/cmd/run.go b/signal/cmd/run.go index bf8f8e327..d7662a886 100644 --- a/signal/cmd/run.go +++ b/signal/cmd/run.go @@ -73,7 +73,7 @@ var ( // detect whether user specified a port userPort := cmd.Flag("port").Changed - tlsEnabled := false + var tlsEnabled bool if signalLetsencryptDomain != "" || (signalCertFile != "" && signalCertKey != "") { tlsEnabled = true } @@ -259,8 +259,8 @@ func grpcHandlerFunc(grpcServer *grpc.Server, meter metric.Meter) http.Handler { wsProxy := wsproxyserver.New(grpcServer, wsproxyserver.WithOTelMeter(meter)) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch { - case r.URL.Path == wsproxy.ProxyPath+wsproxy.SignalComponent: + switch r.URL.Path { + case wsproxy.ProxyPath + wsproxy.SignalComponent: wsProxy.Handler().ServeHTTP(w, r) default: grpcServer.ServeHTTP(w, r) diff --git a/util/syslog_nonwindows.go b/util/syslog_nonwindows.go index 6ffbcb8be..328bb8b1c 100644 --- a/util/syslog_nonwindows.go +++ b/util/syslog_nonwindows.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package util From e8863fbb554e32ebff0bbc669433992c202a5a8e Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 9 Jan 2026 02:53:37 +0800 Subject: [PATCH 034/374] [client] Add non-root ICMP support to userspace firewall forwarder (#4792) --- .../firewall/uspfilter/forwarder/endpoint.go | 21 +- .../firewall/uspfilter/forwarder/forwarder.go | 63 +++-- client/firewall/uspfilter/forwarder/icmp.go | 244 +++++++++++++----- client/firewall/uspfilter/forwarder/udp.go | 18 +- client/firewall/uspfilter/log/log.go | 9 + client/iface/bind/ice_bind.go | 19 +- go.mod | 6 +- go.sum | 12 +- 8 files changed, 291 insertions(+), 101 deletions(-) diff --git a/client/firewall/uspfilter/forwarder/endpoint.go b/client/firewall/uspfilter/forwarder/endpoint.go index f91291ea8..692a24140 100644 --- a/client/firewall/uspfilter/forwarder/endpoint.go +++ b/client/firewall/uspfilter/forwarder/endpoint.go @@ -2,6 +2,7 @@ package forwarder import ( "fmt" + "sync/atomic" wgdevice "golang.zx2c4.com/wireguard/device" "gvisor.dev/gvisor/pkg/tcpip" @@ -16,7 +17,7 @@ type endpoint struct { logger *nblog.Logger dispatcher stack.NetworkDispatcher device *wgdevice.Device - mtu uint32 + mtu atomic.Uint32 } func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) { @@ -28,7 +29,7 @@ func (e *endpoint) IsAttached() bool { } func (e *endpoint) MTU() uint32 { - return e.mtu + return e.mtu.Load() } func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities { @@ -82,6 +83,22 @@ func (e *endpoint) ParseHeader(*stack.PacketBuffer) bool { return true } +func (e *endpoint) Close() { + // Endpoint cleanup - nothing to do as device is managed externally +} + +func (e *endpoint) SetLinkAddress(tcpip.LinkAddress) { + // Link address is not used for this endpoint type +} + +func (e *endpoint) SetMTU(mtu uint32) { + e.mtu.Store(mtu) +} + +func (e *endpoint) SetOnCloseAction(func()) { + // No action needed on close +} + type epID stack.TransportEndpointID func (i epID) String() string { diff --git a/client/firewall/uspfilter/forwarder/forwarder.go b/client/firewall/uspfilter/forwarder/forwarder.go index 00cb3f1df..d17c3cd5c 100644 --- a/client/firewall/uspfilter/forwarder/forwarder.go +++ b/client/firewall/uspfilter/forwarder/forwarder.go @@ -7,6 +7,7 @@ import ( "net/netip" "runtime" "sync" + "time" log "github.com/sirupsen/logrus" "gvisor.dev/gvisor/pkg/buffer" @@ -35,14 +36,16 @@ type Forwarder struct { logger *nblog.Logger flowLogger nftypes.FlowLogger // ruleIdMap is used to store the rule ID for a given connection - ruleIdMap sync.Map - stack *stack.Stack - endpoint *endpoint - udpForwarder *udpForwarder - ctx context.Context - cancel context.CancelFunc - ip tcpip.Address - netstack bool + ruleIdMap sync.Map + stack *stack.Stack + endpoint *endpoint + udpForwarder *udpForwarder + ctx context.Context + cancel context.CancelFunc + ip tcpip.Address + netstack bool + hasRawICMPAccess bool + pingSemaphore chan struct{} } func New(iface common.IFaceMapper, logger *nblog.Logger, flowLogger nftypes.FlowLogger, netstack bool, mtu uint16) (*Forwarder, error) { @@ -60,8 +63,8 @@ func New(iface common.IFaceMapper, logger *nblog.Logger, flowLogger nftypes.Flow endpoint := &endpoint{ logger: logger, device: iface.GetWGDevice(), - mtu: uint32(mtu), } + endpoint.mtu.Store(uint32(mtu)) if err := s.CreateNIC(nicID, endpoint); err != nil { return nil, fmt.Errorf("create NIC: %v", err) @@ -103,15 +106,16 @@ func New(iface common.IFaceMapper, logger *nblog.Logger, flowLogger nftypes.Flow ctx, cancel := context.WithCancel(context.Background()) f := &Forwarder{ - logger: logger, - flowLogger: flowLogger, - stack: s, - endpoint: endpoint, - udpForwarder: newUDPForwarder(mtu, logger, flowLogger), - ctx: ctx, - cancel: cancel, - netstack: netstack, - ip: tcpip.AddrFromSlice(iface.Address().IP.AsSlice()), + logger: logger, + flowLogger: flowLogger, + stack: s, + endpoint: endpoint, + udpForwarder: newUDPForwarder(mtu, logger, flowLogger), + ctx: ctx, + cancel: cancel, + netstack: netstack, + ip: tcpip.AddrFromSlice(iface.Address().IP.AsSlice()), + pingSemaphore: make(chan struct{}, 3), } receiveWindow := defaultReceiveWindow @@ -129,6 +133,8 @@ func New(iface common.IFaceMapper, logger *nblog.Logger, flowLogger nftypes.Flow s.SetTransportProtocolHandler(icmp.ProtocolNumber4, f.handleICMP) + f.checkICMPCapability() + log.Debugf("forwarder: Initialization complete with NIC %d", nicID) return f, nil } @@ -198,3 +204,24 @@ func buildKey(srcIP, dstIP netip.Addr, srcPort, dstPort uint16) conntrack.ConnKe DstPort: dstPort, } } + +// checkICMPCapability tests whether we have raw ICMP socket access at startup. +func (f *Forwarder) checkICMPCapability() { + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + lc := net.ListenConfig{} + conn, err := lc.ListenPacket(ctx, "ip4:icmp", "0.0.0.0") + if err != nil { + f.hasRawICMPAccess = false + f.logger.Debug("forwarder: No raw ICMP socket access, will use ping binary fallback") + return + } + + if err := conn.Close(); err != nil { + f.logger.Debug1("forwarder: Failed to close ICMP capability test socket: %v", err) + } + + f.hasRawICMPAccess = true + f.logger.Debug("forwarder: Raw ICMP socket access available") +} diff --git a/client/firewall/uspfilter/forwarder/icmp.go b/client/firewall/uspfilter/forwarder/icmp.go index 939c04789..cb3db325d 100644 --- a/client/firewall/uspfilter/forwarder/icmp.go +++ b/client/firewall/uspfilter/forwarder/icmp.go @@ -2,8 +2,11 @@ package forwarder import ( "context" + "fmt" "net" "net/netip" + "os/exec" + "runtime" "time" "github.com/google/uuid" @@ -14,30 +17,95 @@ import ( ) // handleICMP handles ICMP packets from the network stack -func (f *Forwarder) handleICMP(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) bool { +func (f *Forwarder) handleICMP(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool { icmpHdr := header.ICMPv4(pkt.TransportHeader().View().AsSlice()) - icmpType := uint8(icmpHdr.Type()) - icmpCode := uint8(icmpHdr.Code()) - - if header.ICMPv4Type(icmpType) == header.ICMPv4EchoReply { - // dont process our own replies - return true - } flowID := uuid.New() - f.sendICMPEvent(nftypes.TypeStart, flowID, id, icmpType, icmpCode, 0, 0) + f.sendICMPEvent(nftypes.TypeStart, flowID, id, uint8(icmpHdr.Type()), uint8(icmpHdr.Code()), 0, 0) - ctx, cancel := context.WithTimeout(f.ctx, 5*time.Second) + // For Echo Requests, send and wait for response + if icmpHdr.Type() == header.ICMPv4Echo { + return f.handleICMPEcho(flowID, id, pkt, uint8(icmpHdr.Type()), uint8(icmpHdr.Code())) + } + + // For other ICMP types (Time Exceeded, Destination Unreachable, etc), forward without waiting + if !f.hasRawICMPAccess { + f.logger.Debug2("forwarder: Cannot handle ICMP type %v without raw socket access for %v", icmpHdr.Type(), epID(id)) + return false + } + + icmpData := stack.PayloadSince(pkt.TransportHeader()).AsSlice() + conn, err := f.forwardICMPPacket(id, icmpData, uint8(icmpHdr.Type()), uint8(icmpHdr.Code()), 100*time.Millisecond) + if err != nil { + f.logger.Error2("forwarder: Failed to forward ICMP packet for %v: %v", epID(id), err) + return true + } + if err := conn.Close(); err != nil { + f.logger.Debug1("forwarder: Failed to close ICMP socket: %v", err) + } + + return true +} + +// handleICMPEcho handles ICMP echo requests asynchronously with rate limiting. +func (f *Forwarder) handleICMPEcho(flowID uuid.UUID, id stack.TransportEndpointID, pkt *stack.PacketBuffer, icmpType, icmpCode uint8) bool { + select { + case f.pingSemaphore <- struct{}{}: + icmpData := stack.PayloadSince(pkt.TransportHeader()).ToSlice() + rxBytes := pkt.Size() + + go func() { + defer func() { <-f.pingSemaphore }() + + if f.hasRawICMPAccess { + f.handleICMPViaSocket(flowID, id, icmpType, icmpCode, icmpData, rxBytes) + } else { + f.handleICMPViaPing(flowID, id, icmpType, icmpCode, icmpData, rxBytes) + } + }() + default: + f.logger.Debug3("forwarder: ICMP rate limit exceeded for %v type %v code %v", + epID(id), icmpType, icmpCode) + } + return true +} + +// forwardICMPPacket creates a raw ICMP socket and sends the packet, returning the connection. +// The caller is responsible for closing the returned connection. +func (f *Forwarder) forwardICMPPacket(id stack.TransportEndpointID, payload []byte, icmpType, icmpCode uint8, timeout time.Duration) (net.PacketConn, error) { + ctx, cancel := context.WithTimeout(f.ctx, timeout) defer cancel() lc := net.ListenConfig{} - // TODO: support non-root conn, err := lc.ListenPacket(ctx, "ip4:icmp", "0.0.0.0") if err != nil { - f.logger.Error2("forwarder: Failed to create ICMP socket for %v: %v", epID(id), err) + return nil, fmt.Errorf("create ICMP socket: %w", err) + } - // This will make netstack reply on behalf of the original destination, that's ok for now - return false + dstIP := f.determineDialAddr(id.LocalAddress) + dst := &net.IPAddr{IP: dstIP} + + if _, err = conn.WriteTo(payload, dst); err != nil { + if closeErr := conn.Close(); closeErr != nil { + f.logger.Debug1("forwarder: Failed to close ICMP socket: %v", closeErr) + } + return nil, fmt.Errorf("write ICMP packet: %w", err) + } + + f.logger.Trace3("forwarder: Forwarded ICMP packet %v type %v code %v", + epID(id), icmpType, icmpCode) + + return conn, nil +} + +// handleICMPViaSocket handles ICMP echo requests using raw sockets. +func (f *Forwarder) handleICMPViaSocket(flowID uuid.UUID, id stack.TransportEndpointID, icmpType, icmpCode uint8, icmpData []byte, rxBytes int) { + sendTime := time.Now() + + conn, err := f.forwardICMPPacket(id, icmpData, icmpType, icmpCode, 5*time.Second) + if err != nil { + f.logger.Error2("forwarder: Failed to send ICMP packet for %v: %v", epID(id), err) + return } defer func() { if err := conn.Close(); err != nil { @@ -45,38 +113,22 @@ func (f *Forwarder) handleICMP(id stack.TransportEndpointID, pkt stack.PacketBuf } }() - dstIP := f.determineDialAddr(id.LocalAddress) - dst := &net.IPAddr{IP: dstIP} + txBytes := f.handleEchoResponse(conn, id) + rtt := time.Since(sendTime).Round(10 * time.Microsecond) - fullPacket := stack.PayloadSince(pkt.TransportHeader()) - payload := fullPacket.AsSlice() + f.logger.Trace4("forwarder: Forwarded ICMP echo reply %v type %v code %v (rtt=%v, raw socket)", + epID(id), icmpType, icmpCode, rtt) - if _, err = conn.WriteTo(payload, dst); err != nil { - f.logger.Error2("forwarder: Failed to write ICMP packet for %v: %v", epID(id), err) - return true - } - - f.logger.Trace3("forwarder: Forwarded ICMP packet %v type %v code %v", - epID(id), icmpHdr.Type(), icmpHdr.Code()) - - // For Echo Requests, send and handle response - if header.ICMPv4Type(icmpType) == header.ICMPv4Echo { - rxBytes := pkt.Size() - txBytes := f.handleEchoResponse(icmpHdr, conn, id) - f.sendICMPEvent(nftypes.TypeEnd, flowID, id, icmpType, icmpCode, uint64(rxBytes), uint64(txBytes)) - } - - // For other ICMP types (Time Exceeded, Destination Unreachable, etc) do nothing - return true + f.sendICMPEvent(nftypes.TypeEnd, flowID, id, icmpType, icmpCode, uint64(rxBytes), uint64(txBytes)) } -func (f *Forwarder) handleEchoResponse(icmpHdr header.ICMPv4, conn net.PacketConn, id stack.TransportEndpointID) int { +func (f *Forwarder) handleEchoResponse(conn net.PacketConn, id stack.TransportEndpointID) int { if err := conn.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil { f.logger.Error1("forwarder: Failed to set read deadline for ICMP response: %v", err) return 0 } - response := make([]byte, f.endpoint.mtu) + response := make([]byte, f.endpoint.mtu.Load()) n, _, err := conn.ReadFrom(response) if err != nil { if !isTimeout(err) { @@ -85,31 +137,7 @@ func (f *Forwarder) handleEchoResponse(icmpHdr header.ICMPv4, conn net.PacketCon return 0 } - ipHdr := make([]byte, header.IPv4MinimumSize) - ip := header.IPv4(ipHdr) - ip.Encode(&header.IPv4Fields{ - TotalLength: uint16(header.IPv4MinimumSize + n), - TTL: 64, - Protocol: uint8(header.ICMPv4ProtocolNumber), - SrcAddr: id.LocalAddress, - DstAddr: id.RemoteAddress, - }) - ip.SetChecksum(^ip.CalculateChecksum()) - - fullPacket := make([]byte, 0, len(ipHdr)+n) - fullPacket = append(fullPacket, ipHdr...) - fullPacket = append(fullPacket, response[:n]...) - - if err := f.InjectIncomingPacket(fullPacket); err != nil { - f.logger.Error1("forwarder: Failed to inject ICMP response: %v", err) - - return 0 - } - - f.logger.Trace3("forwarder: Forwarded ICMP echo reply for %v type %v code %v", - epID(id), icmpHdr.Type(), icmpHdr.Code()) - - return len(fullPacket) + return f.injectICMPReply(id, response[:n]) } // sendICMPEvent stores flow events for ICMP packets @@ -152,3 +180,95 @@ func (f *Forwarder) sendICMPEvent(typ nftypes.Type, flowID uuid.UUID, id stack.T f.flowLogger.StoreEvent(fields) } + +// handleICMPViaPing handles ICMP echo requests by executing the system ping binary. +// This is used as a fallback when raw socket access is not available. +func (f *Forwarder) handleICMPViaPing(flowID uuid.UUID, id stack.TransportEndpointID, icmpType, icmpCode uint8, icmpData []byte, rxBytes int) { + ctx, cancel := context.WithTimeout(f.ctx, 5*time.Second) + defer cancel() + + dstIP := f.determineDialAddr(id.LocalAddress) + cmd := buildPingCommand(ctx, dstIP, 5*time.Second) + + pingStart := time.Now() + if err := cmd.Run(); err != nil { + f.logger.Warn4("forwarder: Ping binary failed for %v type %v code %v: %v", epID(id), + icmpType, icmpCode, err) + return + } + rtt := time.Since(pingStart).Round(10 * time.Microsecond) + + f.logger.Trace3("forwarder: Forwarded ICMP echo request %v type %v code %v", + epID(id), icmpType, icmpCode) + + txBytes := f.synthesizeEchoReply(id, icmpData) + + f.logger.Trace4("forwarder: Forwarded ICMP echo reply %v type %v code %v (rtt=%v, ping binary)", + epID(id), icmpType, icmpCode, rtt) + + f.sendICMPEvent(nftypes.TypeEnd, flowID, id, icmpType, icmpCode, uint64(rxBytes), uint64(txBytes)) +} + +// buildPingCommand creates a platform-specific ping command. +func buildPingCommand(ctx context.Context, target net.IP, timeout time.Duration) *exec.Cmd { + timeoutSec := int(timeout.Seconds()) + if timeoutSec < 1 { + timeoutSec = 1 + } + + switch runtime.GOOS { + case "linux", "android": + return exec.CommandContext(ctx, "ping", "-c", "1", "-W", fmt.Sprintf("%d", timeoutSec), "-q", target.String()) + case "darwin", "ios": + return exec.CommandContext(ctx, "ping", "-c", "1", "-t", fmt.Sprintf("%d", timeoutSec), "-q", target.String()) + case "freebsd": + return exec.CommandContext(ctx, "ping", "-c", "1", "-t", fmt.Sprintf("%d", timeoutSec), target.String()) + case "openbsd", "netbsd": + return exec.CommandContext(ctx, "ping", "-c", "1", "-w", fmt.Sprintf("%d", timeoutSec), target.String()) + case "windows": + return exec.CommandContext(ctx, "ping", "-n", "1", "-w", fmt.Sprintf("%d", timeoutSec*1000), target.String()) + default: + return exec.CommandContext(ctx, "ping", "-c", "1", target.String()) + } +} + +// synthesizeEchoReply creates an ICMP echo reply from raw ICMP data and injects it back into the network stack. +// Returns the size of the injected packet. +func (f *Forwarder) synthesizeEchoReply(id stack.TransportEndpointID, icmpData []byte) int { + replyICMP := make([]byte, len(icmpData)) + copy(replyICMP, icmpData) + + replyICMPHdr := header.ICMPv4(replyICMP) + replyICMPHdr.SetType(header.ICMPv4EchoReply) + replyICMPHdr.SetChecksum(0) + replyICMPHdr.SetChecksum(header.ICMPv4Checksum(replyICMPHdr, 0)) + + return f.injectICMPReply(id, replyICMP) +} + +// injectICMPReply wraps an ICMP payload in an IP header and injects it into the network stack. +// Returns the total size of the injected packet, or 0 if injection failed. +func (f *Forwarder) injectICMPReply(id stack.TransportEndpointID, icmpPayload []byte) int { + ipHdr := make([]byte, header.IPv4MinimumSize) + ip := header.IPv4(ipHdr) + ip.Encode(&header.IPv4Fields{ + TotalLength: uint16(header.IPv4MinimumSize + len(icmpPayload)), + TTL: 64, + Protocol: uint8(header.ICMPv4ProtocolNumber), + SrcAddr: id.LocalAddress, + DstAddr: id.RemoteAddress, + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + fullPacket := make([]byte, 0, len(ipHdr)+len(icmpPayload)) + fullPacket = append(fullPacket, ipHdr...) + fullPacket = append(fullPacket, icmpPayload...) + + // Bypass netstack and send directly to peer to avoid looping through our ICMP handler + if err := f.endpoint.device.CreateOutboundPacket(fullPacket, id.RemoteAddress.AsSlice()); err != nil { + f.logger.Error1("forwarder: Failed to send ICMP reply to peer: %v", err) + return 0 + } + + return len(fullPacket) +} diff --git a/client/firewall/uspfilter/forwarder/udp.go b/client/firewall/uspfilter/forwarder/udp.go index 55743d975..f175e275b 100644 --- a/client/firewall/uspfilter/forwarder/udp.go +++ b/client/firewall/uspfilter/forwarder/udp.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io" "net" "net/netip" "sync" @@ -131,10 +132,10 @@ func (f *udpForwarder) cleanup() { } // handleUDP is called by the UDP forwarder for new packets -func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) { +func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) bool { if f.ctx.Err() != nil { f.logger.Trace("forwarder: context done, dropping UDP packet") - return + return false } id := r.ID() @@ -144,7 +145,7 @@ func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) { f.udpForwarder.RUnlock() if exists { f.logger.Trace1("forwarder: existing UDP connection for %v", epID(id)) - return + return true } flowID := uuid.New() @@ -162,7 +163,7 @@ func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) { if err != nil { f.logger.Debug2("forwarder: UDP dial error for %v: %v", epID(id), err) // TODO: Send ICMP error message - return + return false } // Create wait queue for blocking syscalls @@ -173,10 +174,10 @@ func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) { if err := outConn.Close(); err != nil { f.logger.Debug2("forwarder: UDP outConn close error for %v: %v", epID(id), err) } - return + return false } - inConn := gonet.NewUDPConn(f.stack, &wq, ep) + inConn := gonet.NewUDPConn(&wq, ep) connCtx, connCancel := context.WithCancel(f.ctx) pConn := &udpPacketConn{ @@ -199,7 +200,7 @@ func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) { if err := outConn.Close(); err != nil { f.logger.Debug2("forwarder: UDP outConn close error for %v: %v", epID(id), err) } - return + return true } f.udpForwarder.conns[id] = pConn f.udpForwarder.Unlock() @@ -208,6 +209,7 @@ func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) { f.logger.Trace1("forwarder: established UDP connection %v", epID(id)) go f.proxyUDP(connCtx, pConn, id, ep) + return true } func (f *Forwarder) proxyUDP(ctx context.Context, pConn *udpPacketConn, id stack.TransportEndpointID, ep tcpip.Endpoint) { @@ -348,7 +350,7 @@ func (c *udpPacketConn) copy(ctx context.Context, dst net.Conn, src net.Conn, bu } func isClosedError(err error) bool { - return errors.Is(err, net.ErrClosed) || errors.Is(err, context.Canceled) + return errors.Is(err, net.ErrClosed) || errors.Is(err, context.Canceled) || errors.Is(err, io.EOF) } func isTimeout(err error) bool { diff --git a/client/firewall/uspfilter/log/log.go b/client/firewall/uspfilter/log/log.go index 139f702f2..66308defc 100644 --- a/client/firewall/uspfilter/log/log.go +++ b/client/firewall/uspfilter/log/log.go @@ -168,6 +168,15 @@ func (l *Logger) Warn3(format string, arg1, arg2, arg3 any) { } } +func (l *Logger) Warn4(format string, arg1, arg2, arg3, arg4 any) { + if l.level.Load() >= uint32(LevelWarn) { + select { + case l.msgChannel <- logMessage{level: LevelWarn, format: format, arg1: arg1, arg2: arg2, arg3: arg3, arg4: arg4}: + default: + } + } +} + func (l *Logger) Debug1(format string, arg1 any) { if l.level.Load() >= uint32(LevelDebug) { select { diff --git a/client/iface/bind/ice_bind.go b/client/iface/bind/ice_bind.go index dfb22ecde..0957d2dd5 100644 --- a/client/iface/bind/ice_bind.go +++ b/client/iface/bind/ice_bind.go @@ -27,8 +27,23 @@ type receiverCreator struct { iceBind *ICEBind } -func (rc receiverCreator) CreateIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, rxOffload bool, msgPool *sync.Pool) wgConn.ReceiveFunc { - return rc.iceBind.createIPv4ReceiverFn(pc, conn, rxOffload, msgPool) +func (rc receiverCreator) CreateReceiverFn(pc wgConn.BatchReader, conn *net.UDPConn, rxOffload bool, msgPool *sync.Pool) wgConn.ReceiveFunc { + if ipv4PC, ok := pc.(*ipv4.PacketConn); ok { + return rc.iceBind.createIPv4ReceiverFn(ipv4PC, conn, rxOffload, msgPool) + } + // IPv6 is currently not supported in the udpmux, this is a stub for compatibility with the + // wireguard-go ReceiverCreator interface which is called for both IPv4 and IPv6. + return func(bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (n int, err error) { + buf := bufs[0] + size, ep, err := conn.ReadFromUDPAddrPort(buf) + if err != nil { + return 0, err + } + sizes[0] = size + stdEp := &wgConn.StdNetEndpoint{AddrPort: ep} + eps[0] = stdEp + return 1, nil + } } // ICEBind is a bind implementation with two main features: diff --git a/go.mod b/go.mod index 1b4612da3..cf55b9260 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/cilium/ebpf v0.15.0 github.com/coder/websocket v1.8.13 github.com/coreos/go-iptables v0.7.0 - github.com/creack/pty v1.1.18 + github.com/creack/pty v1.1.24 github.com/dexidp/dex v0.0.0-00010101000000-000000000000 github.com/dexidp/dex/api/v2 v2.4.0 github.com/eko/gocache/lib/v4 v4.2.0 @@ -122,7 +122,7 @@ require ( gorm.io/driver/postgres v1.5.7 gorm.io/driver/sqlite v1.5.7 gorm.io/gorm v1.25.12 - gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1 + gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c ) require ( @@ -285,7 +285,7 @@ replace github.com/kardianos/service => github.com/netbirdio/service v0.0.0-2024 replace github.com/getlantern/systray => github.com/netbirdio/systray v0.0.0-20231030152038-ef1ed2a27949 -replace golang.zx2c4.com/wireguard => github.com/netbirdio/wireguard-go v0.0.0-20241230120307-6a676aebaaf6 +replace golang.zx2c4.com/wireguard => github.com/netbirdio/wireguard-go v0.0.0-20260107100953-33b7c9d03db0 replace github.com/cloudflare/circl => github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6 diff --git a/go.sum b/go.sum index 60b6304c3..e89e0ef12 100644 --- a/go.sum +++ b/go.sum @@ -118,8 +118,8 @@ github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmr github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6 h1:/DS5cDX3FJdl+XaN2D7XAwFpuanTxnp52DBLZAaJKx0= github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6/go.mod h1:+CauBF6R70Jqcyl8N2hC8pAXYbWkGIezuSbuGLtRhnw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -407,8 +407,8 @@ github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502 h1:3tHlFmhTdX9ax github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 h1:ujgviVYmx243Ksy7NdSwrdGPSRNE3pb8kEDSpH0QuAQ= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45/go.mod h1:5/sjFmLb8O96B5737VCqhHyGRzNFIaN/Bu7ZodXc3qQ= -github.com/netbirdio/wireguard-go v0.0.0-20241230120307-6a676aebaaf6 h1:X5h5QgP7uHAv78FWgHV8+WYLjHxK9v3ilkVXT1cpCrQ= -github.com/netbirdio/wireguard-go v0.0.0-20241230120307-6a676aebaaf6/go.mod h1:tkCQ4FQXmpAgYVh++1cq16/dH4QJtmvpRv19DWGAHSA= +github.com/netbirdio/wireguard-go v0.0.0-20260107100953-33b7c9d03db0 h1:h/QnNzm7xzHPm+gajcblYUOclrW2FeNeDlUNj6tTWKQ= +github.com/netbirdio/wireguard-go v0.0.0-20260107100953-33b7c9d03db0/go.mod h1:rpwXGsirqLqN2L0JDJQlwOboGHmptD5ZD6T2VmcqhTw= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= github.com/nicksnyder/go-i18n/v2 v2.5.1 h1:IxtPxYsR9Gp60cGXjfuR/llTqV8aYMsC472zD0D1vHk= @@ -843,5 +843,5 @@ gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1 h1:qDCwdCWECGnwQSQC01Dpnp09fRHxJs9PbktotUqG+hs= -gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1/go.mod h1:8hmigyCdYtw5xJGfQDJzSH5Ju8XEIDBnpyi8+O6GRt8= +gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c h1:pfzmXIkkDgydR4ZRP+e1hXywZfYR21FA0Fbk6ptMkiA= +gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c/go.mod h1:/mc6CfwbOm5KKmqoV7Qx20Q+Ja8+vO4g7FuCdlVoAfQ= From 0ad0c818996b7989d81ab8d02c24fdc3e14954c1 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 9 Jan 2026 16:13:04 +0800 Subject: [PATCH 035/374] [client] Reorder userspace ACL checks to fail faster for better performance (#4226) --- client/firewall/uspfilter/filter.go | 109 +++++++++--------- .../firewall/uspfilter/filter_bench_test.go | 2 +- .../firewall/uspfilter/filter_filter_test.go | 8 +- client/firewall/uspfilter/filter_test.go | 16 +-- client/firewall/uspfilter/rule.go | 2 +- client/firewall/uspfilter/tracer.go | 4 +- 6 files changed, 72 insertions(+), 69 deletions(-) diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index 3d3d79631..8caa1a0ad 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -29,7 +29,7 @@ import ( ) const ( - layerTypeAll = 0 + layerTypeAll = 255 // ipTCPHeaderMinSize represents minimum IP (20) + TCP (20) header size for MSS calculation ipTCPHeaderMinSize = 40 @@ -262,10 +262,7 @@ func create(iface common.IFaceMapper, nativeFirewall firewall.Manager, disableSe } func (m *Manager) blockInvalidRouted(iface common.IFaceMapper) (firewall.Rule, error) { - wgPrefix, err := netip.ParsePrefix(iface.Address().Network.String()) - if err != nil { - return nil, fmt.Errorf("parse wireguard network: %w", err) - } + wgPrefix := iface.Address().Network log.Debugf("blocking invalid routed traffic for %s", wgPrefix) rule, err := m.addRouteFiltering( @@ -439,19 +436,7 @@ func (m *Manager) AddPeerFiltering( r.sPort = sPort r.dPort = dPort - switch proto { - case firewall.ProtocolTCP: - r.protoLayer = layers.LayerTypeTCP - case firewall.ProtocolUDP: - r.protoLayer = layers.LayerTypeUDP - case firewall.ProtocolICMP: - r.protoLayer = layers.LayerTypeICMPv4 - if r.ipLayer == layers.LayerTypeIPv6 { - r.protoLayer = layers.LayerTypeICMPv6 - } - case firewall.ProtocolALL: - r.protoLayer = layerTypeAll - } + r.protoLayer = protoToLayer(proto, r.ipLayer) m.mutex.Lock() var targetMap map[netip.Addr]RuleSet @@ -496,16 +481,17 @@ func (m *Manager) addRouteFiltering( } ruleID := uuid.New().String() + rule := RouteRule{ // TODO: consolidate these IDs - id: ruleID, - mgmtId: id, - sources: sources, - dstSet: destination.Set, - proto: proto, - srcPort: sPort, - dstPort: dPort, - action: action, + id: ruleID, + mgmtId: id, + sources: sources, + dstSet: destination.Set, + protoLayer: protoToLayer(proto, layers.LayerTypeIPv4), + srcPort: sPort, + dstPort: dPort, + action: action, } if destination.IsPrefix() { rule.destinations = []netip.Prefix{destination.Prefix} @@ -945,7 +931,7 @@ func (m *Manager) filterInbound(packetData []byte, size int) bool { func (m *Manager) handleLocalTraffic(d *decoder, srcIP, dstIP netip.Addr, packetData []byte, size int) bool { ruleID, blocked := m.peerACLsBlock(srcIP, d, packetData) if blocked { - _, pnum := getProtocolFromPacket(d) + pnum := getProtocolFromPacket(d) srcPort, dstPort := getPortsFromPacket(d) m.logger.Trace6("Dropping local packet (ACL denied): rule_id=%s proto=%v src=%s:%d dst=%s:%d", @@ -1010,20 +996,22 @@ func (m *Manager) handleRoutedTraffic(d *decoder, srcIP, dstIP netip.Addr, packe return false } - proto, pnum := getProtocolFromPacket(d) + protoLayer := d.decoded[1] srcPort, dstPort := getPortsFromPacket(d) - ruleID, pass := m.routeACLsPass(srcIP, dstIP, proto, srcPort, dstPort) + ruleID, pass := m.routeACLsPass(srcIP, dstIP, protoLayer, srcPort, dstPort) if !pass { + proto := getProtocolFromPacket(d) + m.logger.Trace6("Dropping routed packet (ACL denied): rule_id=%s proto=%v src=%s:%d dst=%s:%d", - ruleID, pnum, srcIP, srcPort, dstIP, dstPort) + ruleID, proto, srcIP, srcPort, dstIP, dstPort) m.flowLogger.StoreEvent(nftypes.EventFields{ FlowID: uuid.New(), Type: nftypes.TypeDrop, RuleID: ruleID, Direction: nftypes.Ingress, - Protocol: pnum, + Protocol: proto, SourceIP: srcIP, DestIP: dstIP, SourcePort: srcPort, @@ -1052,16 +1040,33 @@ func (m *Manager) handleRoutedTraffic(d *decoder, srcIP, dstIP netip.Addr, packe return true } -func getProtocolFromPacket(d *decoder) (firewall.Protocol, nftypes.Protocol) { +func protoToLayer(proto firewall.Protocol, ipLayer gopacket.LayerType) gopacket.LayerType { + switch proto { + case firewall.ProtocolTCP: + return layers.LayerTypeTCP + case firewall.ProtocolUDP: + return layers.LayerTypeUDP + case firewall.ProtocolICMP: + if ipLayer == layers.LayerTypeIPv6 { + return layers.LayerTypeICMPv6 + } + return layers.LayerTypeICMPv4 + case firewall.ProtocolALL: + return layerTypeAll + } + return 0 +} + +func getProtocolFromPacket(d *decoder) nftypes.Protocol { switch d.decoded[1] { case layers.LayerTypeTCP: - return firewall.ProtocolTCP, nftypes.TCP + return nftypes.TCP case layers.LayerTypeUDP: - return firewall.ProtocolUDP, nftypes.UDP + return nftypes.UDP case layers.LayerTypeICMPv4, layers.LayerTypeICMPv6: - return firewall.ProtocolICMP, nftypes.ICMP + return nftypes.ICMP default: - return firewall.ProtocolALL, nftypes.ProtocolUnknown + return nftypes.ProtocolUnknown } } @@ -1233,19 +1238,30 @@ func validateRule(ip netip.Addr, packetData []byte, rules map[string]PeerRule, d } // routeACLsPass returns true if the packet is allowed by the route ACLs -func (m *Manager) routeACLsPass(srcIP, dstIP netip.Addr, proto firewall.Protocol, srcPort, dstPort uint16) ([]byte, bool) { +func (m *Manager) routeACLsPass(srcIP, dstIP netip.Addr, protoLayer gopacket.LayerType, srcPort, dstPort uint16) ([]byte, bool) { m.mutex.RLock() defer m.mutex.RUnlock() for _, rule := range m.routeRules { - if matches := m.ruleMatches(rule, srcIP, dstIP, proto, srcPort, dstPort); matches { + if matches := m.ruleMatches(rule, srcIP, dstIP, protoLayer, srcPort, dstPort); matches { return rule.mgmtId, rule.action == firewall.ActionAccept } } return nil, false } -func (m *Manager) ruleMatches(rule *RouteRule, srcAddr, dstAddr netip.Addr, proto firewall.Protocol, srcPort, dstPort uint16) bool { +func (m *Manager) ruleMatches(rule *RouteRule, srcAddr, dstAddr netip.Addr, protoLayer gopacket.LayerType, srcPort, dstPort uint16) bool { + // TODO: handle ipv6 vs ipv4 icmp rules + if rule.protoLayer != layerTypeAll && rule.protoLayer != protoLayer { + return false + } + + if protoLayer == layers.LayerTypeTCP || protoLayer == layers.LayerTypeUDP { + if !portsMatch(rule.srcPort, srcPort) || !portsMatch(rule.dstPort, dstPort) { + return false + } + } + destMatched := false for _, dst := range rule.destinations { if dst.Contains(dstAddr) { @@ -1264,21 +1280,8 @@ func (m *Manager) ruleMatches(rule *RouteRule, srcAddr, dstAddr netip.Addr, prot break } } - if !sourceMatched { - return false - } - if rule.proto != firewall.ProtocolALL && rule.proto != proto { - return false - } - - if proto == firewall.ProtocolTCP || proto == firewall.ProtocolUDP { - if !portsMatch(rule.srcPort, srcPort) || !portsMatch(rule.dstPort, dstPort) { - return false - } - } - - return true + return sourceMatched } // AddUDPPacketHook calls hook when UDP packet from given direction matched diff --git a/client/firewall/uspfilter/filter_bench_test.go b/client/firewall/uspfilter/filter_bench_test.go index 5a2d0410f..10ff62ed3 100644 --- a/client/firewall/uspfilter/filter_bench_test.go +++ b/client/firewall/uspfilter/filter_bench_test.go @@ -955,7 +955,7 @@ func BenchmarkRouteACLs(b *testing.B) { for _, tc := range cases { srcIP := netip.MustParseAddr(tc.srcIP) dstIP := netip.MustParseAddr(tc.dstIP) - manager.routeACLsPass(srcIP, dstIP, tc.proto, 0, tc.dstPort) + manager.routeACLsPass(srcIP, dstIP, protoToLayer(tc.proto, layers.LayerTypeIPv4), 0, tc.dstPort) } } } diff --git a/client/firewall/uspfilter/filter_filter_test.go b/client/firewall/uspfilter/filter_filter_test.go index eb5aa3343..a8efbac1c 100644 --- a/client/firewall/uspfilter/filter_filter_test.go +++ b/client/firewall/uspfilter/filter_filter_test.go @@ -1259,7 +1259,7 @@ func TestRouteACLFiltering(t *testing.T) { // testing routeACLsPass only and not FilterInbound, as routed packets are dropped after being passed // to the forwarder - _, isAllowed := manager.routeACLsPass(srcIP, dstIP, tc.proto, tc.srcPort, tc.dstPort) + _, isAllowed := manager.routeACLsPass(srcIP, dstIP, protoToLayer(tc.proto, layers.LayerTypeIPv4), tc.srcPort, tc.dstPort) require.Equal(t, tc.shouldPass, isAllowed) }) } @@ -1445,7 +1445,7 @@ func TestRouteACLOrder(t *testing.T) { srcIP := netip.MustParseAddr(p.srcIP) dstIP := netip.MustParseAddr(p.dstIP) - _, isAllowed := manager.routeACLsPass(srcIP, dstIP, p.proto, p.srcPort, p.dstPort) + _, isAllowed := manager.routeACLsPass(srcIP, dstIP, protoToLayer(p.proto, layers.LayerTypeIPv4), p.srcPort, p.dstPort) require.Equal(t, p.shouldPass, isAllowed, "packet %d failed", i) } }) @@ -1488,13 +1488,13 @@ func TestRouteACLSet(t *testing.T) { dstIP := netip.MustParseAddr("192.168.1.100") // Check that traffic is dropped (empty set shouldn't match anything) - _, isAllowed := manager.routeACLsPass(srcIP, dstIP, fw.ProtocolTCP, 12345, 80) + _, isAllowed := manager.routeACLsPass(srcIP, dstIP, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) require.False(t, isAllowed, "Empty set should not allow any traffic") err = manager.UpdateSet(set, []netip.Prefix{netip.MustParsePrefix("192.168.1.0/24")}) require.NoError(t, err) // Now the packet should be allowed - _, isAllowed = manager.routeACLsPass(srcIP, dstIP, fw.ProtocolTCP, 12345, 80) + _, isAllowed = manager.routeACLsPass(srcIP, dstIP, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) require.True(t, isAllowed, "After set update, traffic to the added network should be allowed") } diff --git a/client/firewall/uspfilter/filter_test.go b/client/firewall/uspfilter/filter_test.go index 120a9f418..c6a4ebeb8 100644 --- a/client/firewall/uspfilter/filter_test.go +++ b/client/firewall/uspfilter/filter_test.go @@ -767,9 +767,9 @@ func TestUpdateSetMerge(t *testing.T) { dstIP2 := netip.MustParseAddr("192.168.1.100") dstIP3 := netip.MustParseAddr("172.16.0.100") - _, isAllowed1 := manager.routeACLsPass(srcIP, dstIP1, fw.ProtocolTCP, 12345, 80) - _, isAllowed2 := manager.routeACLsPass(srcIP, dstIP2, fw.ProtocolTCP, 12345, 80) - _, isAllowed3 := manager.routeACLsPass(srcIP, dstIP3, fw.ProtocolTCP, 12345, 80) + _, isAllowed1 := manager.routeACLsPass(srcIP, dstIP1, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) + _, isAllowed2 := manager.routeACLsPass(srcIP, dstIP2, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) + _, isAllowed3 := manager.routeACLsPass(srcIP, dstIP3, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) require.True(t, isAllowed1, "Traffic to 10.0.0.100 should be allowed") require.True(t, isAllowed2, "Traffic to 192.168.1.100 should be allowed") @@ -784,8 +784,8 @@ func TestUpdateSetMerge(t *testing.T) { require.NoError(t, err) // Check that all original prefixes are still included - _, isAllowed1 = manager.routeACLsPass(srcIP, dstIP1, fw.ProtocolTCP, 12345, 80) - _, isAllowed2 = manager.routeACLsPass(srcIP, dstIP2, fw.ProtocolTCP, 12345, 80) + _, isAllowed1 = manager.routeACLsPass(srcIP, dstIP1, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) + _, isAllowed2 = manager.routeACLsPass(srcIP, dstIP2, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) require.True(t, isAllowed1, "Traffic to 10.0.0.100 should still be allowed after update") require.True(t, isAllowed2, "Traffic to 192.168.1.100 should still be allowed after update") @@ -793,8 +793,8 @@ func TestUpdateSetMerge(t *testing.T) { dstIP4 := netip.MustParseAddr("172.16.1.100") dstIP5 := netip.MustParseAddr("10.1.0.50") - _, isAllowed4 := manager.routeACLsPass(srcIP, dstIP4, fw.ProtocolTCP, 12345, 80) - _, isAllowed5 := manager.routeACLsPass(srcIP, dstIP5, fw.ProtocolTCP, 12345, 80) + _, isAllowed4 := manager.routeACLsPass(srcIP, dstIP4, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) + _, isAllowed5 := manager.routeACLsPass(srcIP, dstIP5, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) require.True(t, isAllowed4, "Traffic to new prefix 172.16.0.0/16 should be allowed") require.True(t, isAllowed5, "Traffic to new prefix 10.1.0.0/24 should be allowed") @@ -922,7 +922,7 @@ func TestUpdateSetDeduplication(t *testing.T) { srcIP := netip.MustParseAddr("100.10.0.1") for _, tc := range testCases { - _, isAllowed := manager.routeACLsPass(srcIP, tc.dstIP, fw.ProtocolTCP, 12345, 80) + _, isAllowed := manager.routeACLsPass(srcIP, tc.dstIP, protoToLayer(fw.ProtocolTCP, layers.LayerTypeIPv4), 12345, 80) require.Equal(t, tc.expected, isAllowed, tc.desc) } } diff --git a/client/firewall/uspfilter/rule.go b/client/firewall/uspfilter/rule.go index b765c72e9..dbe3a7858 100644 --- a/client/firewall/uspfilter/rule.go +++ b/client/firewall/uspfilter/rule.go @@ -34,7 +34,7 @@ type RouteRule struct { sources []netip.Prefix dstSet firewall.Set destinations []netip.Prefix - proto firewall.Protocol + protoLayer gopacket.LayerType srcPort *firewall.Port dstPort *firewall.Port action firewall.Action diff --git a/client/firewall/uspfilter/tracer.go b/client/firewall/uspfilter/tracer.go index c46a6581d..69c2519bf 100644 --- a/client/firewall/uspfilter/tracer.go +++ b/client/firewall/uspfilter/tracer.go @@ -379,9 +379,9 @@ func (m *Manager) handleNativeRouter(trace *PacketTrace) *PacketTrace { } func (m *Manager) handleRouteACLs(trace *PacketTrace, d *decoder, srcIP, dstIP netip.Addr) *PacketTrace { - proto, _ := getProtocolFromPacket(d) + protoLayer := d.decoded[1] srcPort, dstPort := getPortsFromPacket(d) - id, allowed := m.routeACLsPass(srcIP, dstIP, proto, srcPort, dstPort) + id, allowed := m.routeACLsPass(srcIP, dstIP, protoLayer, srcPort, dstPort) strId := string(id) if id == nil { From 684fc0d2a25e3a1814cca9defad42db875a3841f Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Fri, 9 Jan 2026 11:49:26 +0100 Subject: [PATCH 036/374] [management] fix the issue with duplicated peers with the same key (#5053) --- management/server/account_test.go | 4 +- management/server/group_test.go | 1 + management/server/migration/migration.go | 58 +++++++++- management/server/migration/migration_test.go | 101 ++++++++++++++++++ management/server/peer/peer.go | 2 +- management/server/peer_test.go | 2 + management/server/store/sql_store_test.go | 9 ++ management/server/store/store.go | 13 ++- management/server/testdata/extended-store.sql | 2 +- management/server/testdata/store.sql | 4 +- .../server/testdata/store_policy_migrate.sql | 2 +- .../testdata/store_with_expired_peers.sql | 4 +- management/server/testdata/storev1.sql | 2 +- 13 files changed, 190 insertions(+), 14 deletions(-) diff --git a/management/server/account_test.go b/management/server/account_test.go index 32d2b4ea3..b5f15ed98 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -3465,11 +3465,11 @@ func TestPropagateUserGroupMemberships(t *testing.T) { account, err := manager.GetOrCreateAccountByUser(ctx, auth.UserAuth{UserId: initiatorId, Domain: domain}) require.NoError(t, err) - peer1 := &nbpeer.Peer{ID: "peer1", AccountID: account.Id, UserID: initiatorId, IP: net.IP{1, 1, 1, 1}, DNSLabel: "peer1.domain.test"} + peer1 := &nbpeer.Peer{ID: "peer1", AccountID: account.Id, Key: "key1", UserID: initiatorId, IP: net.IP{1, 1, 1, 1}, DNSLabel: "peer1.domain.test"} err = manager.Store.AddPeerToAccount(ctx, peer1) require.NoError(t, err) - peer2 := &nbpeer.Peer{ID: "peer2", AccountID: account.Id, UserID: initiatorId, IP: net.IP{2, 2, 2, 2}, DNSLabel: "peer2.domain.test"} + peer2 := &nbpeer.Peer{ID: "peer2", AccountID: account.Id, Key: "key2", UserID: initiatorId, IP: net.IP{2, 2, 2, 2}, DNSLabel: "peer2.domain.test"} err = manager.Store.AddPeerToAccount(ctx, peer2) require.NoError(t, err) diff --git a/management/server/group_test.go b/management/server/group_test.go index 95f37a3ff..f7cc8d60c 100644 --- a/management/server/group_test.go +++ b/management/server/group_test.go @@ -893,6 +893,7 @@ func Test_AddPeerAndAddToAll(t *testing.T) { peer := &peer2.Peer{ ID: strconv.Itoa(i), AccountID: accountID, + Key: "key" + strconv.Itoa(i), DNSLabel: "peer" + strconv.Itoa(i), IP: uint32ToIP(uint32(i)), } diff --git a/management/server/migration/migration.go b/management/server/migration/migration.go index 7fcb98ccb..29555ed0c 100644 --- a/management/server/migration/migration.go +++ b/management/server/migration/migration.go @@ -404,10 +404,11 @@ func CreateIndexIfNotExists[T any](ctx context.Context, db *gorm.DB, indexName s if dialect == "mysql" { var withLength []string for _, col := range columns { - if col == "ip" || col == "dns_label" { - withLength = append(withLength, fmt.Sprintf("%s(64)", col)) + quotedCol := fmt.Sprintf("`%s`", col) + if col == "ip" || col == "dns_label" || col == "key" { + withLength = append(withLength, fmt.Sprintf("%s(64)", quotedCol)) } else { - withLength = append(withLength, col) + withLength = append(withLength, quotedCol) } } columnClause = strings.Join(withLength, ", ") @@ -487,3 +488,54 @@ func MigrateJsonToTable[T any](ctx context.Context, db *gorm.DB, columnName stri log.WithContext(ctx).Infof("Migration of JSON field %s from table %s into separate table completed", columnName, tableName) return nil } + +func RemoveDuplicatePeerKeys(ctx context.Context, db *gorm.DB) error { + if !db.Migrator().HasTable("peers") { + log.WithContext(ctx).Debug("peers table does not exist, skipping duplicate key cleanup") + return nil + } + + keyColumn := GetColumnName(db, "key") + + var duplicates []struct { + Key string + Count int64 + } + + if err := db.Table("peers"). + Select(keyColumn + ", COUNT(*) as count"). + Group(keyColumn). + Having("COUNT(*) > 1"). + Find(&duplicates).Error; err != nil { + return fmt.Errorf("find duplicate keys: %w", err) + } + + if len(duplicates) == 0 { + return nil + } + + log.WithContext(ctx).Warnf("Found %d duplicate peer keys, cleaning up", len(duplicates)) + + for _, dup := range duplicates { + var peerIDs []string + if err := db.Table("peers"). + Select("id"). + Where(keyColumn+" = ?", dup.Key). + Order("peer_status_last_seen DESC"). + Pluck("id", &peerIDs).Error; err != nil { + return fmt.Errorf("get peers for key: %w", err) + } + + if len(peerIDs) <= 1 { + continue + } + + idsToDelete := peerIDs[1:] + + if err := db.Table("peers").Where("id IN ?", idsToDelete).Delete(nil).Error; err != nil { + return fmt.Errorf("delete duplicate peers: %w", err) + } + } + + return nil +} diff --git a/management/server/migration/migration_test.go b/management/server/migration/migration_test.go index ce76bd668..c1be8a3a3 100644 --- a/management/server/migration/migration_test.go +++ b/management/server/migration/migration_test.go @@ -340,3 +340,104 @@ func TestCreateIndexIfExists(t *testing.T) { exist = db.Migrator().HasIndex(&nbpeer.Peer{}, indexName) assert.True(t, exist, "Should have the index") } + +type testPeer struct { + ID string `gorm:"primaryKey"` + Key string `gorm:"index"` + PeerStatusLastSeen time.Time + PeerStatusConnected bool +} + +func (testPeer) TableName() string { + return "peers" +} + +func setupPeerTestDB(t *testing.T) *gorm.DB { + t.Helper() + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testPeer{}) + err := db.AutoMigrate(&testPeer{}) + require.NoError(t, err, "Failed to auto-migrate tables") + return db +} + +func TestRemoveDuplicatePeerKeys_NoDuplicates(t *testing.T) { + db := setupPeerTestDB(t) + + now := time.Now() + peers := []testPeer{ + {ID: "peer1", Key: "key1", PeerStatusLastSeen: now}, + {ID: "peer2", Key: "key2", PeerStatusLastSeen: now}, + {ID: "peer3", Key: "key3", PeerStatusLastSeen: now}, + } + + for _, p := range peers { + err := db.Create(&p).Error + require.NoError(t, err) + } + + err := migration.RemoveDuplicatePeerKeys(context.Background(), db) + require.NoError(t, err) + + var count int64 + db.Model(&testPeer{}).Count(&count) + assert.Equal(t, int64(len(peers)), count, "All peers should remain when no duplicates") +} + +func TestRemoveDuplicatePeerKeys_WithDuplicates(t *testing.T) { + db := setupPeerTestDB(t) + + now := time.Now() + peers := []testPeer{ + {ID: "peer1", Key: "key1", PeerStatusLastSeen: now.Add(-2 * time.Hour)}, + {ID: "peer2", Key: "key1", PeerStatusLastSeen: now.Add(-1 * time.Hour)}, + {ID: "peer3", Key: "key1", PeerStatusLastSeen: now}, + {ID: "peer4", Key: "key2", PeerStatusLastSeen: now}, + {ID: "peer5", Key: "key3", PeerStatusLastSeen: now.Add(-1 * time.Hour)}, + {ID: "peer6", Key: "key3", PeerStatusLastSeen: now}, + } + + for _, p := range peers { + err := db.Create(&p).Error + require.NoError(t, err) + } + + err := migration.RemoveDuplicatePeerKeys(context.Background(), db) + require.NoError(t, err) + + var count int64 + db.Model(&testPeer{}).Count(&count) + assert.Equal(t, int64(3), count, "Should have 3 peers after removing duplicates") + + var remainingPeers []testPeer + err = db.Find(&remainingPeers).Error + require.NoError(t, err) + + remainingIDs := make(map[string]bool) + for _, p := range remainingPeers { + remainingIDs[p.ID] = true + } + + assert.True(t, remainingIDs["peer3"], "peer3 should remain (most recent for key1)") + assert.True(t, remainingIDs["peer4"], "peer4 should remain (only peer for key2)") + assert.True(t, remainingIDs["peer6"], "peer6 should remain (most recent for key3)") + + assert.False(t, remainingIDs["peer1"], "peer1 should be deleted (older duplicate)") + assert.False(t, remainingIDs["peer2"], "peer2 should be deleted (older duplicate)") + assert.False(t, remainingIDs["peer5"], "peer5 should be deleted (older duplicate)") +} + +func TestRemoveDuplicatePeerKeys_EmptyTable(t *testing.T) { + db := setupPeerTestDB(t) + + err := migration.RemoveDuplicatePeerKeys(context.Background(), db) + require.NoError(t, err, "Should not fail on empty table") +} + +func TestRemoveDuplicatePeerKeys_NoTable(t *testing.T) { + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testPeer{}) + + err := migration.RemoveDuplicatePeerKeys(context.Background(), db) + require.NoError(t, err, "Should not fail when table does not exist") +} diff --git a/management/server/peer/peer.go b/management/server/peer/peer.go index a898fd782..2439e8a22 100644 --- a/management/server/peer/peer.go +++ b/management/server/peer/peer.go @@ -19,7 +19,7 @@ type Peer struct { // AccountID is a reference to Account that this object belongs AccountID string `json:"-" gorm:"index"` // WireGuard public key - Key string `gorm:"index"` + Key string // uniqueness index (check migrations) // IP address of the Peer IP net.IP `gorm:"serializer:json"` // uniqueness index per accountID (check migrations) // Meta is a Peer system meta data diff --git a/management/server/peer_test.go b/management/server/peer_test.go index ce04adf9e..0160ff586 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -2129,12 +2129,14 @@ func Test_DeletePeer(t *testing.T) { "peer1": { ID: "peer1", AccountID: accountID, + Key: "key1", IP: net.IP{1, 1, 1, 1}, DNSLabel: "peer1.test", }, "peer2": { ID: "peer2", AccountID: accountID, + Key: "key2", IP: net.IP{2, 2, 2, 2}, DNSLabel: "peer2.test", }, diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 728d67273..952432252 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -968,6 +968,7 @@ func TestSqlite_GetTakenIPs(t *testing.T) { peer1 := &nbpeer.Peer{ ID: "peer1", AccountID: existingAccountID, + Key: "key1", DNSLabel: "peer1", IP: net.IP{1, 1, 1, 1}, } @@ -982,6 +983,7 @@ func TestSqlite_GetTakenIPs(t *testing.T) { peer2 := &nbpeer.Peer{ ID: "peer1second", AccountID: existingAccountID, + Key: "key2", DNSLabel: "peer1-1", IP: net.IP{2, 2, 2, 2}, } @@ -1009,6 +1011,7 @@ func TestSqlite_GetPeerLabelsInAccount(t *testing.T) { peer1 := &nbpeer.Peer{ ID: "peer1", AccountID: existingAccountID, + Key: "key1", DNSLabel: "peer1", IP: net.IP{1, 1, 1, 1}, } @@ -1022,6 +1025,7 @@ func TestSqlite_GetPeerLabelsInAccount(t *testing.T) { peer2 := &nbpeer.Peer{ ID: "peer1second", AccountID: existingAccountID, + Key: "key2", DNSLabel: "peer1-1", IP: net.IP{2, 2, 2, 2}, } @@ -1048,6 +1052,7 @@ func Test_AddPeerWithSameDnsLabel(t *testing.T) { peer1 := &nbpeer.Peer{ ID: "peer1", AccountID: existingAccountID, + Key: "key1", DNSLabel: "peer1.domain.test", } err = store.AddPeerToAccount(context.Background(), peer1) @@ -1056,6 +1061,7 @@ func Test_AddPeerWithSameDnsLabel(t *testing.T) { peer2 := &nbpeer.Peer{ ID: "peer1second", AccountID: existingAccountID, + Key: "key2", DNSLabel: "peer1.domain.test", } err = store.AddPeerToAccount(context.Background(), peer2) @@ -1073,6 +1079,7 @@ func Test_AddPeerWithSameIP(t *testing.T) { peer1 := &nbpeer.Peer{ ID: "peer1", AccountID: existingAccountID, + Key: "key1", IP: net.IP{1, 1, 1, 1}, } err = store.AddPeerToAccount(context.Background(), peer1) @@ -1081,6 +1088,7 @@ func Test_AddPeerWithSameIP(t *testing.T) { peer2 := &nbpeer.Peer{ ID: "peer1second", AccountID: existingAccountID, + Key: "key2", IP: net.IP{1, 1, 1, 1}, } err = store.AddPeerToAccount(context.Background(), peer2) @@ -3696,6 +3704,7 @@ func BenchmarkGetAccountPeers(b *testing.B) { peer := &nbpeer.Peer{ ID: fmt.Sprintf("peer-%d", i), AccountID: accountID, + Key: fmt.Sprintf("key-%d", i), DNSLabel: fmt.Sprintf("peer%d.example.com", i), IP: intToIPv4(uint32(i)), } diff --git a/management/server/store/store.go b/management/server/store/store.go index 013a66d73..55d11c36a 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -350,8 +350,13 @@ func getMigrationsPreAuto(ctx context.Context) []migrationFunc { func(db *gorm.DB) error { return migration.MigrateNewField[types.User](ctx, db, "email", "") }, + func(db *gorm.DB) error { + return migration.RemoveDuplicatePeerKeys(ctx, db) + }, } -} // migratePostAuto migrates the SQLite database to the latest schema +} + +// migratePostAuto migrates the SQLite database to the latest schema func migratePostAuto(ctx context.Context, db *gorm.DB) error { migrations := getMigrationsPostAuto(ctx) @@ -381,6 +386,12 @@ func getMigrationsPostAuto(ctx context.Context) []migrationFunc { } }) }, + func(db *gorm.DB) error { + return migration.DropIndex[nbpeer.Peer](ctx, db, "idx_peers_key") + }, + func(db *gorm.DB) error { + return migration.CreateIndexIfNotExists[nbpeer.Peer](ctx, db, "idx_peers_key_unique", "key") + }, } } diff --git a/management/server/testdata/extended-store.sql b/management/server/testdata/extended-store.sql index 0393d1ade..9bb5dbace 100644 --- a/management/server/testdata/extended-store.sql +++ b/management/server/testdata/extended-store.sql @@ -14,7 +14,7 @@ CREATE TABLE `posture_checks` (`id` text,`name` text,`description` text,`account CREATE TABLE `network_addresses` (`net_ip` text,`mac` text); CREATE INDEX `idx_accounts_domain` ON `accounts`(`domain`); CREATE INDEX `idx_setup_keys_account_id` ON `setup_keys`(`account_id`); -CREATE INDEX `idx_peers_key` ON `peers`(`key`); +CREATE UNIQUE INDEX `idx_peers_key_unique` ON `peers`(`key`); CREATE INDEX `idx_peers_account_id` ON `peers`(`account_id`); CREATE INDEX `idx_users_account_id` ON `users`(`account_id`); CREATE INDEX `idx_personal_access_tokens_user_id` ON `personal_access_tokens`(`user_id`); diff --git a/management/server/testdata/store.sql b/management/server/testdata/store.sql index a21783857..022508323 100644 --- a/management/server/testdata/store.sql +++ b/management/server/testdata/store.sql @@ -18,7 +18,7 @@ CREATE TABLE `network_resources` (`id` text,`network_id` text,`account_id` text, CREATE TABLE `networks` (`id` text,`account_id` text,`name` text,`description` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_networks` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); CREATE INDEX `idx_accounts_domain` ON `accounts`(`domain`); CREATE INDEX `idx_setup_keys_account_id` ON `setup_keys`(`account_id`); -CREATE INDEX `idx_peers_key` ON `peers`(`key`); +CREATE UNIQUE INDEX `idx_peers_key_unique` ON `peers`(`key`); CREATE INDEX `idx_peers_account_id` ON `peers`(`account_id`); CREATE INDEX `idx_peers_account_id_ip` ON `peers`(`account_id`,`ip`); CREATE INDEX `idx_users_account_id` ON `users`(`account_id`); @@ -54,4 +54,4 @@ INSERT INTO policy_rules VALUES('cs387mkv2d4bgq41b6n0','cs1tnh0hhcjnqoiuebf0','D INSERT INTO network_routers VALUES('ctc20ji7qv9ck2sebc80','ct286bi7qv930dsrrug0','bf1c8084-ba50-4ce7-9439-34653001fc3b','cs1tnh0hhcjnqoiuebeg',NULL,0,0); INSERT INTO network_resources VALUES ('ctc4nci7qv9061u6ilfg','ct286bi7qv930dsrrug0','bf1c8084-ba50-4ce7-9439-34653001fc3b','Host','192.168.1.1'); INSERT INTO networks VALUES('ct286bi7qv930dsrrug0','bf1c8084-ba50-4ce7-9439-34653001fc3b','Test Network','Test Network'); -INSERT INTO peers VALUES('ct286bi7qv930dsrrug0','bf1c8084-ba50-4ce7-9439-34653001fc3b','','','"192.168.0.0"','','','','','','','','','','','','','','','','','test','test','2023-01-01 00:00:00+00:00',0,0,0,'a23efe53-63fb-11ec-90d6-0242ac120003','',0,0,'2023-01-01 00:00:00+00:00','2023-01-01 00:00:00+00:00',0,'','','',0); +INSERT INTO peers VALUES('ct286bi7qv930dsrrug0','bf1c8084-ba50-4ce7-9439-34653001fc3b','6kjbmVq1hmucVzvBXo5OucY5OYv+jSsB1jUTLq291Do=','','"192.168.0.0"','','','','','','','','','','','','','','','','','test','test','2023-01-01 00:00:00+00:00',0,0,0,'a23efe53-63fb-11ec-90d6-0242ac120003','',0,0,'2023-01-01 00:00:00+00:00','2023-01-01 00:00:00+00:00',0,'','','',0); diff --git a/management/server/testdata/store_policy_migrate.sql b/management/server/testdata/store_policy_migrate.sql index a88411795..395276cb1 100644 --- a/management/server/testdata/store_policy_migrate.sql +++ b/management/server/testdata/store_policy_migrate.sql @@ -14,7 +14,7 @@ CREATE TABLE `posture_checks` (`id` text,`name` text,`description` text,`account CREATE TABLE `network_addresses` (`net_ip` text,`mac` text); CREATE INDEX `idx_accounts_domain` ON `accounts`(`domain`); CREATE INDEX `idx_setup_keys_account_id` ON `setup_keys`(`account_id`); -CREATE INDEX `idx_peers_key` ON `peers`(`key`); +CREATE UNIQUE INDEX `idx_peers_key_unique` ON `peers`(`key`); CREATE INDEX `idx_peers_account_id` ON `peers`(`account_id`); CREATE INDEX `idx_users_account_id` ON `users`(`account_id`); CREATE INDEX `idx_personal_access_tokens_user_id` ON `personal_access_tokens`(`user_id`); diff --git a/management/server/testdata/store_with_expired_peers.sql b/management/server/testdata/store_with_expired_peers.sql index f2ef56a23..dfcaeee6f 100644 --- a/management/server/testdata/store_with_expired_peers.sql +++ b/management/server/testdata/store_with_expired_peers.sql @@ -14,7 +14,7 @@ CREATE TABLE `posture_checks` (`id` text,`name` text,`description` text,`account CREATE TABLE `network_addresses` (`net_ip` text,`mac` text); CREATE INDEX `idx_accounts_domain` ON `accounts`(`domain`); CREATE INDEX `idx_setup_keys_account_id` ON `setup_keys`(`account_id`); -CREATE INDEX `idx_peers_key` ON `peers`(`key`); +CREATE UNIQUE INDEX `idx_peers_key_unique` ON `peers`(`key`); CREATE INDEX `idx_peers_account_id` ON `peers`(`account_id`); CREATE INDEX `idx_users_account_id` ON `users`(`account_id`); CREATE INDEX `idx_personal_access_tokens_user_id` ON `personal_access_tokens`(`user_id`); @@ -30,7 +30,7 @@ INSERT INTO setup_keys VALUES('','bf1c8084-ba50-4ce7-9439-34653001fc3b','A2C8E62 INSERT INTO peers VALUES('cfvprsrlo1hqoo49ohog','bf1c8084-ba50-4ce7-9439-34653001fc3b','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,0,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); INSERT INTO peers VALUES('cg05lnblo1hkg2j514p0','bf1c8084-ba50-4ce7-9439-34653001fc3b','RlSy2vzoG2HyMBTUImXOiVhCBiiBa5qD5xzMxkiFDW4=','','"100.64.39.54"','expiredhost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'expiredhost','expiredhost','2023-03-02 09:19:57.276717255+01:00',0,1,0,'edafee4e-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMbK5ZXJsGOOWoBT4OmkPtgdPZe2Q7bDuS/zjn2CZxhK',0,1,0,'2023-03-02 09:14:21.791679181+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); INSERT INTO peers VALUES('cg3161rlo1hs9cq94gdg','bf1c8084-ba50-4ce7-9439-34653001fc3b','mVABSKj28gv+JRsf7e0NEGKgSOGTfU/nPB2cpuG56HU=','','"100.64.117.96"','testhost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'testhost','testhost','2023-03-06 18:21:27.252010027+01:00',0,0,0,'edafee4e-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINWvvUkFFcrj48CWTkNUb/do/n52i1L5dH4DhGu+4ZuM',0,0,0,'2023-03-07 09:02:47.442857106+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); -INSERT INTO peers VALUES('csrnkiq7qv9d8aitqd50','bf1c8084-ba50-4ce7-9439-34653001fc3b','mVABSKj28gv+JRsf7e0NEGKgSOGTfU/nPB2cpuG56HU=','','"100.64.117.97"','testhost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'testhost','testhost-1','2023-03-06 18:21:27.252010027+01:00',0,0,0,'f4f6d672-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINWvvUkFFcrj48CWTkNUb/do/n52i1L5dH4DhGu+4ZuM',0,0,1,'2023-03-07 09:02:47.442857106+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); +INSERT INTO peers VALUES('csrnkiq7qv9d8aitqd50','bf1c8084-ba50-4ce7-9439-34653001fc3b','nVABSKj28gv+JRsf7e0NEGKgSOGTfU/nPB2cpuG56HX=','','"100.64.117.97"','testhost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'testhost','testhost-1','2023-03-06 18:21:27.252010027+01:00',0,0,0,'f4f6d672-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINWvvUkFFcrj48CWTkNUb/do/n52i1L5dH4DhGu+4ZuM',0,0,1,'2023-03-07 09:02:47.442857106+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); INSERT INTO users VALUES('f4f6d672-63fb-11ec-90d6-0242ac120003','bf1c8084-ba50-4ce7-9439-34653001fc3b','user',0,0,'','[]',0,NULL,'2024-10-02 17:00:32.528196+02:00','api',0,''); INSERT INTO users VALUES('edafee4e-63fb-11ec-90d6-0242ac120003','bf1c8084-ba50-4ce7-9439-34653001fc3b','admin',0,0,'','[]',0,NULL,'2024-10-02 17:00:32.528196+02:00','api',0,''); INSERT INTO installations VALUES(1,''); diff --git a/management/server/testdata/storev1.sql b/management/server/testdata/storev1.sql index 8b09ec2be..eb5be31b7 100644 --- a/management/server/testdata/storev1.sql +++ b/management/server/testdata/storev1.sql @@ -14,7 +14,7 @@ CREATE TABLE `posture_checks` (`id` text,`name` text,`description` text,`account CREATE TABLE `network_addresses` (`net_ip` text,`mac` text); CREATE INDEX `idx_accounts_domain` ON `accounts`(`domain`); CREATE INDEX `idx_setup_keys_account_id` ON `setup_keys`(`account_id`); -CREATE INDEX `idx_peers_key` ON `peers`(`key`); +CREATE UNIQUE INDEX `idx_peers_key_unique` ON `peers`(`key`); CREATE INDEX `idx_peers_account_id` ON `peers`(`account_id`); CREATE INDEX `idx_users_account_id` ON `users`(`account_id`); CREATE INDEX `idx_personal_access_tokens_user_id` ON `personal_access_tokens`(`user_id`); From f7967f9ae3724fcb658f8883632bca64eeb94639 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Fri, 9 Jan 2026 09:41:27 -0500 Subject: [PATCH 037/374] Feature/resolve local jwks keys (#5073) --- idp/dex/logrus_handler.go | 113 +++++++++++++++++++++ idp/dex/provider.go | 16 ++- management/cmd/management.go | 3 + management/internals/server/controllers.go | 3 +- management/server/idp/embedded.go | 27 ++++- management/server/idp/embedded_test.go | 58 +++++++++++ 6 files changed, 217 insertions(+), 3 deletions(-) create mode 100644 idp/dex/logrus_handler.go diff --git a/idp/dex/logrus_handler.go b/idp/dex/logrus_handler.go new file mode 100644 index 000000000..d911cb417 --- /dev/null +++ b/idp/dex/logrus_handler.go @@ -0,0 +1,113 @@ +package dex + +import ( + "context" + "log/slog" + + "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/formatter" +) + +// LogrusHandler is an slog.Handler that delegates to logrus. +// This allows Dex to use the same log format as the rest of NetBird. +type LogrusHandler struct { + logger *logrus.Logger + attrs []slog.Attr + groups []string +} + +// NewLogrusHandler creates a new slog handler that wraps logrus with NetBird's text formatter. +func NewLogrusHandler(level slog.Level) *LogrusHandler { + logger := logrus.New() + formatter.SetTextFormatter(logger) + + // Map slog level to logrus level + switch level { + case slog.LevelDebug: + logger.SetLevel(logrus.DebugLevel) + case slog.LevelInfo: + logger.SetLevel(logrus.InfoLevel) + case slog.LevelWarn: + logger.SetLevel(logrus.WarnLevel) + case slog.LevelError: + logger.SetLevel(logrus.ErrorLevel) + default: + logger.SetLevel(logrus.WarnLevel) + } + + return &LogrusHandler{logger: logger} +} + +// Enabled reports whether the handler handles records at the given level. +func (h *LogrusHandler) Enabled(_ context.Context, level slog.Level) bool { + switch level { + case slog.LevelDebug: + return h.logger.IsLevelEnabled(logrus.DebugLevel) + case slog.LevelInfo: + return h.logger.IsLevelEnabled(logrus.InfoLevel) + case slog.LevelWarn: + return h.logger.IsLevelEnabled(logrus.WarnLevel) + case slog.LevelError: + return h.logger.IsLevelEnabled(logrus.ErrorLevel) + default: + return true + } +} + +// Handle handles the Record. +func (h *LogrusHandler) Handle(_ context.Context, r slog.Record) error { + fields := make(logrus.Fields) + + // Add pre-set attributes + for _, attr := range h.attrs { + fields[attr.Key] = attr.Value.Any() + } + + // Add record attributes + r.Attrs(func(attr slog.Attr) bool { + fields[attr.Key] = attr.Value.Any() + return true + }) + + entry := h.logger.WithFields(fields) + + switch r.Level { + case slog.LevelDebug: + entry.Debug(r.Message) + case slog.LevelInfo: + entry.Info(r.Message) + case slog.LevelWarn: + entry.Warn(r.Message) + case slog.LevelError: + entry.Error(r.Message) + default: + entry.Info(r.Message) + } + + return nil +} + +// WithAttrs returns a new Handler with the given attributes added. +func (h *LogrusHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + newAttrs := make([]slog.Attr, len(h.attrs)+len(attrs)) + copy(newAttrs, h.attrs) + copy(newAttrs[len(h.attrs):], attrs) + return &LogrusHandler{ + logger: h.logger, + attrs: newAttrs, + groups: h.groups, + } +} + +// WithGroup returns a new Handler with the given group appended to the receiver's groups. +func (h *LogrusHandler) WithGroup(name string) slog.Handler { + newGroups := make([]string, len(h.groups)+1) + copy(newGroups, h.groups) + newGroups[len(h.groups)] = name + return &LogrusHandler{ + logger: h.logger, + attrs: h.attrs, + groups: newGroups, + } +} diff --git a/idp/dex/provider.go b/idp/dex/provider.go index 09713a226..fae682959 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -130,7 +130,21 @@ func NewProvider(ctx context.Context, config *Config) (*Provider, error) { // NewProviderFromYAML creates and initializes the Dex server from a YAMLConfig func NewProviderFromYAML(ctx context.Context, yamlConfig *YAMLConfig) (*Provider, error) { - logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + // Configure log level from config, default to WARN to avoid logging sensitive data (emails) + logLevel := slog.LevelWarn + if yamlConfig.Logger.Level != "" { + switch strings.ToLower(yamlConfig.Logger.Level) { + case "debug": + logLevel = slog.LevelDebug + case "info": + logLevel = slog.LevelInfo + case "warn", "warning": + logLevel = slog.LevelWarn + case "error": + logLevel = slog.LevelError + } + } + logger := slog.New(NewLogrusHandler(logLevel)) stor, err := yamlConfig.Storage.OpenStorage(logger) if err != nil { diff --git a/management/cmd/management.go b/management/cmd/management.go index 5391b0866..9dbd4a6d4 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -190,6 +190,9 @@ func applyEmbeddedIdPConfig(cfg *nbconfig.Config) error { // Enable user deletion from IDP by default if EmbeddedIdP is enabled userDeleteFromIDPEnabled = true + // Set LocalAddress for embedded IdP if enabled, used for internal JWT validation + cfg.EmbeddedIdP.LocalAddress = fmt.Sprintf("localhost:%d", mgmtPort) + // Ensure HttpConfig exists if cfg.HttpConfig == nil { cfg.HttpConfig = &nbconfig.HttpServerConfig{} diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index 688ae5241..9f35d436f 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -68,7 +68,8 @@ func (s *BaseServer) AuthManager() auth.Manager { if len(audiences) > 0 { audience = audiences[0] // Use the first client ID as the primary audience } - keysLocation = oauthProvider.GetKeysLocation() + // Use localhost keys location for internal validation (management has embedded Dex) + keysLocation = oauthProvider.GetLocalKeysLocation() signingKeyRefreshEnabled = true issuer = oauthProvider.GetIssuer() userIDClaim = oauthProvider.GetUserIDClaim() diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 963b5ae3d..7b8e5033c 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "strings" "github.com/dexidp/dex/storage" "github.com/google/uuid" @@ -27,8 +28,11 @@ const ( type EmbeddedIdPConfig struct { // Enabled indicates whether the embedded IDP is enabled Enabled bool - // Issuer is the OIDC issuer URL (e.g., "http://localhost:3002/oauth2") + // Issuer is the OIDC issuer URL (e.g., "https://management.netbird.io/oauth2") Issuer string + // LocalAddress is the management server's local listen address (e.g., ":8080" or "localhost:8080") + // Used for internal JWT validation to avoid external network calls + LocalAddress string // Storage configuration for the IdP database Storage EmbeddedStorageConfig // DashboardRedirectURIs are the OAuth2 redirect URIs for the dashboard client @@ -146,7 +150,12 @@ var _ OAuthConfigProvider = (*EmbeddedIdPManager)(nil) // OAuthConfigProvider defines the interface for OAuth configuration needed by auth flows. type OAuthConfigProvider interface { GetIssuer() string + // GetKeysLocation returns the public JWKS endpoint URL (uses external issuer URL) GetKeysLocation() string + // GetLocalKeysLocation returns the localhost JWKS endpoint URL for internal use. + // Management server has embedded Dex and can validate tokens via localhost, + // avoiding external network calls and DNS resolution issues during startup. + GetLocalKeysLocation() string GetClientIDs() []string GetUserIDClaim() string GetTokenEndpoint() string @@ -500,6 +509,22 @@ func (m *EmbeddedIdPManager) GetKeysLocation() string { return m.provider.GetKeysLocation() } +// GetLocalKeysLocation returns the localhost JWKS endpoint URL for internal token validation. +// Uses the LocalAddress from config (management server's listen address) since embedded Dex +// is served by the management HTTP server, not a standalone Dex server. +func (m *EmbeddedIdPManager) GetLocalKeysLocation() string { + addr := m.config.LocalAddress + if addr == "" { + return "" + } + // Construct localhost URL from listen address + // addr is in format ":port" or "host:port" or "localhost:port" + if strings.HasPrefix(addr, ":") { + return fmt.Sprintf("http://localhost%s/oauth2/keys", addr) + } + return fmt.Sprintf("http://%s/oauth2/keys", addr) +} + // GetClientIDs returns the OAuth2 client IDs configured for this provider. func (m *EmbeddedIdPManager) GetClientIDs() []string { return []string{staticClientDashboard, staticClientCLI} diff --git a/management/server/idp/embedded_test.go b/management/server/idp/embedded_test.go index cfd9c2b54..04e3f0699 100644 --- a/management/server/idp/embedded_test.go +++ b/management/server/idp/embedded_test.go @@ -247,3 +247,61 @@ func TestEmbeddedIdPManager_UserIDFormat_MatchesJWT(t *testing.T) { t.Logf(" Raw UUID: %s", rawUserID) t.Logf(" Connector: %s", connectorID) } + +func TestEmbeddedIdPManager_GetLocalKeysLocation(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + tests := []struct { + name string + localAddress string + expected string + }{ + { + name: "localhost with port", + localAddress: "localhost:8080", + expected: "http://localhost:8080/oauth2/keys", + }, + { + name: "localhost with https port", + localAddress: "localhost:443", + expected: "http://localhost:443/oauth2/keys", + }, + { + name: "port only format", + localAddress: ":8080", + expected: "http://localhost:8080/oauth2/keys", + }, + { + name: "empty address", + localAddress: "", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + LocalAddress: tt.localAddress, + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex-"+tt.name+".db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + result := manager.GetLocalKeysLocation() + assert.Equal(t, tt.expected, result) + }) + } +} From 614e7d5b90667b807e788dd3f0d7421dac4a8cac Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Fri, 9 Jan 2026 09:45:43 -0500 Subject: [PATCH 038/374] Validate OIDC issuer when creating or updating (#5074) --- management/server/identity_provider.go | 77 ++++++++++++- management/server/identity_provider_test.go | 110 +++++++++++++++++++ management/server/types/identity_provider.go | 14 ++- 3 files changed, 191 insertions(+), 10 deletions(-) diff --git a/management/server/identity_provider.go b/management/server/identity_provider.go index 6649c3953..8fd96c238 100644 --- a/management/server/identity_provider.go +++ b/management/server/identity_provider.go @@ -2,7 +2,13 @@ package server import ( "context" + "encoding/json" "errors" + "fmt" + "io" + "net/http" + "strings" + "time" "github.com/dexidp/dex/storage" "github.com/rs/xid" @@ -17,6 +23,69 @@ import ( "github.com/netbirdio/netbird/shared/management/status" ) +// oidcProviderJSON represents the OpenID Connect discovery document +type oidcProviderJSON struct { + Issuer string `json:"issuer"` +} + +// validateOIDCIssuer validates the OIDC issuer by fetching the OpenID configuration +// and verifying that the returned issuer matches the configured one. +func validateOIDCIssuer(ctx context.Context, issuer string) error { + wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" + + httpClient := &http.Client{ + Timeout: 10 * time.Second, + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, wellKnown, nil) + if err != nil { + return fmt.Errorf("%w: %v", types.ErrIdentityProviderIssuerUnreachable, err) + } + + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("%w: %v", types.ErrIdentityProviderIssuerUnreachable, err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("%w: unable to read response body: %v", types.ErrIdentityProviderIssuerUnreachable, err) + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%w: %s: %s", types.ErrIdentityProviderIssuerUnreachable, resp.Status, body) + } + + var p oidcProviderJSON + if err := json.Unmarshal(body, &p); err != nil { + return fmt.Errorf("%w: failed to decode provider discovery object: %v", types.ErrIdentityProviderIssuerUnreachable, err) + } + + if p.Issuer != issuer { + return fmt.Errorf("%w: expected %q got %q", types.ErrIdentityProviderIssuerMismatch, issuer, p.Issuer) + } + + return nil +} + +// validateIdentityProviderConfig validates the identity provider configuration including +// basic validation and OIDC issuer verification. +func validateIdentityProviderConfig(ctx context.Context, idpConfig *types.IdentityProvider) error { + if err := idpConfig.Validate(); err != nil { + return status.Errorf(status.InvalidArgument, "%s", err.Error()) + } + + // Validate the issuer by calling the OIDC discovery endpoint + if idpConfig.Issuer != "" { + if err := validateOIDCIssuer(ctx, idpConfig.Issuer); err != nil { + return status.Errorf(status.InvalidArgument, "%s", err.Error()) + } + } + + return nil +} + // GetIdentityProviders returns all identity providers for an account func (am *DefaultAccountManager) GetIdentityProviders(ctx context.Context, accountID, userID string) ([]*types.IdentityProvider, error) { ok, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.IdentityProviders, operations.Read) @@ -82,8 +151,8 @@ func (am *DefaultAccountManager) CreateIdentityProvider(ctx context.Context, acc return nil, status.NewPermissionDeniedError() } - if err := idpConfig.Validate(); err != nil { - return nil, status.Errorf(status.InvalidArgument, "%s", err.Error()) + if err := validateIdentityProviderConfig(ctx, idpConfig); err != nil { + return nil, err } embeddedManager, ok := am.idpManager.(*idp.EmbeddedIdPManager) @@ -119,8 +188,8 @@ func (am *DefaultAccountManager) UpdateIdentityProvider(ctx context.Context, acc return nil, status.NewPermissionDeniedError() } - if err := idpConfig.Validate(); err != nil { - return nil, status.Errorf(status.InvalidArgument, "%s", err.Error()) + if err := validateIdentityProviderConfig(ctx, idpConfig); err != nil { + return nil, err } embeddedManager, ok := am.idpManager.(*idp.EmbeddedIdPManager) diff --git a/management/server/identity_provider_test.go b/management/server/identity_provider_test.go index d637c4a8f..78dcbeb74 100644 --- a/management/server/identity_provider_test.go +++ b/management/server/identity_provider_test.go @@ -2,6 +2,10 @@ package server import ( "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" "path/filepath" "testing" @@ -200,3 +204,109 @@ func TestDefaultAccountManager_UpdateIdentityProvider_Validation(t *testing.T) { require.Error(t, err) assert.Contains(t, err.Error(), "name is required") } + +func TestValidateOIDCIssuer(t *testing.T) { + tests := []struct { + name string + setupServer func() *httptest.Server + expectedErr error + expectedErrMsg string + }{ + { + name: "issuer mismatch", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + resp := oidcProviderJSON{Issuer: "https://different-issuer.com"} + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + })) + }, + expectedErr: types.ErrIdentityProviderIssuerMismatch, + expectedErrMsg: "does not match", + }, + { + name: "server returns non-200 status", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte("not found")) + })) + }, + expectedErr: types.ErrIdentityProviderIssuerUnreachable, + expectedErrMsg: "404", + }, + { + name: "server returns invalid JSON", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte("invalid json")) + })) + }, + expectedErr: types.ErrIdentityProviderIssuerUnreachable, + expectedErrMsg: "failed to decode", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := tt.setupServer() + defer server.Close() + + err := validateOIDCIssuer(context.Background(), server.URL) + + require.Error(t, err) + assert.True(t, errors.Is(err, tt.expectedErr), "expected error %v, got %v", tt.expectedErr, err) + if tt.expectedErrMsg != "" { + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } + }) + } +} + +func TestValidateOIDCIssuer_Success(t *testing.T) { + // Create a server that returns its own URL as the issuer + var server *httptest.Server + server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/.well-known/openid-configuration" { + http.NotFound(w, r) + return + } + resp := oidcProviderJSON{Issuer: server.URL} + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + })) + defer server.Close() + + err := validateOIDCIssuer(context.Background(), server.URL) + require.NoError(t, err) +} + +func TestValidateOIDCIssuer_UnreachableServer(t *testing.T) { + // Use a URL that will definitely fail to connect + err := validateOIDCIssuer(context.Background(), "http://localhost:59999") + require.Error(t, err) + assert.True(t, errors.Is(err, types.ErrIdentityProviderIssuerUnreachable)) +} + +func TestValidateOIDCIssuer_TrailingSlash(t *testing.T) { + // Test that trailing slashes are handled correctly + var server *httptest.Server + server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/.well-known/openid-configuration" { + http.NotFound(w, r) + return + } + // Return issuer without trailing slash + resp := oidcProviderJSON{Issuer: server.URL} + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + })) + defer server.Close() + + // Pass issuer with trailing slash + err := validateOIDCIssuer(context.Background(), server.URL+"/") + // This should fail because the issuer returned doesn't have trailing slash + require.Error(t, err) + assert.True(t, errors.Is(err, types.ErrIdentityProviderIssuerMismatch)) +} diff --git a/management/server/types/identity_provider.go b/management/server/types/identity_provider.go index e809590de..c4498e4d4 100644 --- a/management/server/types/identity_provider.go +++ b/management/server/types/identity_provider.go @@ -7,12 +7,14 @@ import ( // Identity provider validation errors var ( - ErrIdentityProviderNameRequired = errors.New("identity provider name is required") - ErrIdentityProviderTypeRequired = errors.New("identity provider type is required") - ErrIdentityProviderTypeUnsupported = errors.New("unsupported identity provider type") - ErrIdentityProviderIssuerRequired = errors.New("identity provider issuer is required") - ErrIdentityProviderIssuerInvalid = errors.New("identity provider issuer must be a valid URL") - ErrIdentityProviderClientIDRequired = errors.New("identity provider client ID is required") + ErrIdentityProviderNameRequired = errors.New("identity provider name is required") + ErrIdentityProviderTypeRequired = errors.New("identity provider type is required") + ErrIdentityProviderTypeUnsupported = errors.New("unsupported identity provider type") + ErrIdentityProviderIssuerRequired = errors.New("identity provider issuer is required") + ErrIdentityProviderIssuerInvalid = errors.New("identity provider issuer must be a valid URL") + ErrIdentityProviderIssuerUnreachable = errors.New("identity provider issuer is unreachable") + ErrIdentityProviderIssuerMismatch = errors.New("identity provider issuer does not match the issuer returned by the provider") + ErrIdentityProviderClientIDRequired = errors.New("identity provider client ID is required") ) // IdentityProviderType is the type of identity provider From 394ad195074d8456a3df0aaac4ccb458bd013944 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 12 Jan 2026 19:35:38 +0800 Subject: [PATCH 039/374] [client] Chase CNAMEs in local resolver to ensure musl compatibility (#5046) --- client/internal/dns/handler_chain.go | 132 +++- client/internal/dns/local/local.go | 279 +++++++- client/internal/dns/local/local_test.go | 599 +++++++++++++++++- client/internal/dns/resutil/resolve.go | 197 ++++++ client/internal/dns/server.go | 11 +- client/internal/dns/server_test.go | 8 +- client/internal/dns/upstream.go | 22 +- client/internal/dnsfwd/forwarder.go | 200 ++---- client/internal/dnsfwd/forwarder_test.go | 23 +- .../routemanager/dnsinterceptor/handler.go | 68 +- 10 files changed, 1267 insertions(+), 272 deletions(-) create mode 100644 client/internal/dns/resutil/resolve.go diff --git a/client/internal/dns/handler_chain.go b/client/internal/dns/handler_chain.go index 2e54bffd9..7e3eb6d1f 100644 --- a/client/internal/dns/handler_chain.go +++ b/client/internal/dns/handler_chain.go @@ -3,11 +3,15 @@ package dns import ( "fmt" "slices" + "strconv" "strings" "sync" + "time" "github.com/miekg/dns" log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/dns/resutil" ) const ( @@ -43,7 +47,23 @@ type HandlerChain struct { type ResponseWriterChain struct { dns.ResponseWriter origPattern string + requestID string shouldContinue bool + response *dns.Msg + meta map[string]string +} + +// RequestID returns the request ID for tracing +func (w *ResponseWriterChain) RequestID() string { + return w.requestID +} + +// SetMeta sets a metadata key-value pair for logging +func (w *ResponseWriterChain) SetMeta(key, value string) { + if w.meta == nil { + w.meta = make(map[string]string) + } + w.meta[key] = value } func (w *ResponseWriterChain) WriteMsg(m *dns.Msg) error { @@ -52,6 +72,7 @@ func (w *ResponseWriterChain) WriteMsg(m *dns.Msg) error { w.shouldContinue = true return nil } + w.response = m return w.ResponseWriter.WriteMsg(m) } @@ -101,6 +122,8 @@ func (c *HandlerChain) AddHandler(pattern string, handler dns.Handler, priority pos := c.findHandlerPosition(entry) c.handlers = append(c.handlers[:pos], append([]HandlerEntry{entry}, c.handlers[pos:]...)...) + + c.logHandlers() } // findHandlerPosition determines where to insert a new handler based on priority and specificity @@ -140,68 +163,109 @@ func (c *HandlerChain) removeEntry(pattern string, priority int) { for i := len(c.handlers) - 1; i >= 0; i-- { entry := c.handlers[i] if strings.EqualFold(entry.OrigPattern, pattern) && entry.Priority == priority { + log.Debugf("removing handler pattern: domain=%s priority=%d", entry.OrigPattern, priority) c.handlers = append(c.handlers[:i], c.handlers[i+1:]...) + c.logHandlers() break } } } +// logHandlers logs the current handler chain state. Caller must hold the lock. +func (c *HandlerChain) logHandlers() { + if !log.IsLevelEnabled(log.TraceLevel) { + return + } + + var b strings.Builder + b.WriteString("handler chain (" + strconv.Itoa(len(c.handlers)) + "):\n") + for _, h := range c.handlers { + b.WriteString(" - pattern: domain=" + h.Pattern + " original: domain=" + h.OrigPattern + + " wildcard=" + strconv.FormatBool(h.IsWildcard) + + " match_subdomain=" + strconv.FormatBool(h.MatchSubdomains) + + " priority=" + strconv.Itoa(h.Priority) + "\n") + } + log.Trace(strings.TrimSuffix(b.String(), "\n")) +} + func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { if len(r.Question) == 0 { return } - qname := strings.ToLower(r.Question[0].Name) + startTime := time.Now() + requestID := resutil.GenerateRequestID() + logger := log.WithFields(log.Fields{ + "request_id": requestID, + "dns_id": fmt.Sprintf("%04x", r.Id), + }) + + question := r.Question[0] + qname := strings.ToLower(question.Name) c.mu.RLock() handlers := slices.Clone(c.handlers) c.mu.RUnlock() - if log.IsLevelEnabled(log.TraceLevel) { - var b strings.Builder - b.WriteString(fmt.Sprintf("DNS request domain=%s, handlers (%d):\n", qname, len(handlers))) - for _, h := range handlers { - b.WriteString(fmt.Sprintf(" - pattern: domain=%s original: domain=%s wildcard=%v match_subdomain=%v priority=%d\n", - h.Pattern, h.OrigPattern, h.IsWildcard, h.MatchSubdomains, h.Priority)) - } - log.Trace(strings.TrimSuffix(b.String(), "\n")) - } - // Try handlers in priority order for _, entry := range handlers { - matched := c.isHandlerMatch(qname, entry) - - if matched { - log.Tracef("handler matched: domain=%s -> pattern=%s wildcard=%v match_subdomain=%v priority=%d", - qname, entry.OrigPattern, entry.IsWildcard, entry.MatchSubdomains, entry.Priority) - - chainWriter := &ResponseWriterChain{ - ResponseWriter: w, - origPattern: entry.OrigPattern, - } - entry.Handler.ServeDNS(chainWriter, r) - - // If handler wants to continue, try next handler - if chainWriter.shouldContinue { - // Only log continue for non-management cache handlers to reduce noise - if entry.Priority != PriorityMgmtCache { - log.Tracef("handler requested continue to next handler for domain=%s", qname) - } - continue - } - return + if !c.isHandlerMatch(qname, entry) { + continue } + + handlerName := entry.OrigPattern + if s, ok := entry.Handler.(interface{ String() string }); ok { + handlerName = s.String() + } + + logger.Tracef("question: domain=%s type=%s class=%s -> handler=%s pattern=%s wildcard=%v match_subdomain=%v priority=%d", + qname, dns.TypeToString[question.Qtype], dns.ClassToString[question.Qclass], + handlerName, entry.OrigPattern, entry.IsWildcard, entry.MatchSubdomains, entry.Priority) + + chainWriter := &ResponseWriterChain{ + ResponseWriter: w, + origPattern: entry.OrigPattern, + requestID: requestID, + } + entry.Handler.ServeDNS(chainWriter, r) + + // If handler wants to continue, try next handler + if chainWriter.shouldContinue { + if entry.Priority != PriorityMgmtCache { + logger.Tracef("handler requested continue for domain=%s", qname) + } + continue + } + + c.logResponse(logger, chainWriter, qname, startTime) + return } // No handler matched or all handlers passed - log.Tracef("no handler found for domain=%s", qname) + logger.Tracef("no handler found for domain=%s type=%s class=%s", + qname, dns.TypeToString[question.Qtype], dns.ClassToString[question.Qclass]) resp := &dns.Msg{} resp.SetRcode(r, dns.RcodeRefused) if err := w.WriteMsg(resp); err != nil { - log.Errorf("failed to write DNS response: %v", err) + logger.Errorf("failed to write DNS response: %v", err) } } +func (c *HandlerChain) logResponse(logger *log.Entry, cw *ResponseWriterChain, qname string, startTime time.Time) { + if cw.response == nil { + return + } + + var meta string + for k, v := range cw.meta { + meta += " " + k + "=" + v + } + + logger.Tracef("response: domain=%s rcode=%s answers=%s%s took=%s", + qname, dns.RcodeToString[cw.response.Rcode], resutil.FormatAnswers(cw.response.Answer), + meta, time.Since(startTime)) +} + func (c *HandlerChain) isHandlerMatch(qname string, entry HandlerEntry) bool { switch { case entry.Pattern == ".": diff --git a/client/internal/dns/local/local.go b/client/internal/dns/local/local.go index bac7875ec..cb1fa5293 100644 --- a/client/internal/dns/local/local.go +++ b/client/internal/dns/local/local.go @@ -1,30 +1,50 @@ package local import ( + "context" + "errors" "fmt" + "net" + "net/netip" "slices" "strings" "sync" + "time" "github.com/miekg/dns" log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" + "github.com/netbirdio/netbird/client/internal/dns/resutil" "github.com/netbirdio/netbird/client/internal/dns/types" nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/shared/management/domain" ) +const externalResolutionTimeout = 4 * time.Second + +type resolver interface { + LookupNetIP(ctx context.Context, network, host string) ([]netip.Addr, error) +} + type Resolver struct { - mu sync.RWMutex - records map[dns.Question][]dns.RR - domains map[domain.Domain]struct{} + mu sync.RWMutex + records map[dns.Question][]dns.RR + domains map[domain.Domain]struct{} + zones []domain.Domain + resolver resolver + + ctx context.Context + cancel context.CancelFunc } func NewResolver() *Resolver { + ctx, cancel := context.WithCancel(context.Background()) return &Resolver{ records: make(map[dns.Question][]dns.RR), domains: make(map[domain.Domain]struct{}), + ctx: ctx, + cancel: cancel, } } @@ -37,7 +57,18 @@ func (d *Resolver) String() string { return fmt.Sprintf("LocalResolver [%d records]", len(d.records)) } -func (d *Resolver) Stop() {} +func (d *Resolver) Stop() { + if d.cancel != nil { + d.cancel() + } + + d.mu.Lock() + defer d.mu.Unlock() + + maps.Clear(d.records) + maps.Clear(d.domains) + d.zones = nil +} // ID returns the unique handler ID func (d *Resolver) ID() types.HandlerID { @@ -48,38 +79,47 @@ func (d *Resolver) ProbeAvailability() {} // ServeDNS handles a DNS request func (d *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { + logger := log.WithField("request_id", resutil.GetRequestID(w)) + if len(r.Question) == 0 { - log.Debugf("received local resolver request with no question") + logger.Debug("received local resolver request with no question") return } question := r.Question[0] question.Name = strings.ToLower(dns.Fqdn(question.Name)) - log.Tracef("received local question: domain=%s type=%v class=%v", r.Question[0].Name, question.Qtype, question.Qclass) - replyMessage := &dns.Msg{} replyMessage.SetReply(r) replyMessage.RecursionAvailable = true - // lookup all records matching the question - records := d.lookupRecords(question) - if len(records) > 0 { - replyMessage.Rcode = dns.RcodeSuccess - replyMessage.Answer = append(replyMessage.Answer, records...) - } else { - // Check if we have any records for this domain name with different types - if d.hasRecordsForDomain(domain.Domain(question.Name)) { - replyMessage.Rcode = dns.RcodeSuccess // NOERROR with 0 records - } else { - replyMessage.Rcode = dns.RcodeNameError // NXDOMAIN - } - } + result := d.lookupRecords(logger, question) + replyMessage.Authoritative = !result.hasExternalData + replyMessage.Answer = result.records + replyMessage.Rcode = d.determineRcode(question, result) if err := w.WriteMsg(replyMessage); err != nil { - log.Warnf("failed to write the local resolver response: %v", err) + logger.Warnf("failed to write the local resolver response: %v", err) } } +// determineRcode returns the appropriate DNS response code. +// Per RFC 6604, CNAME chains should return the rcode of the final target resolution, +// even if CNAME records are included in the answer. +func (d *Resolver) determineRcode(question dns.Question, result lookupResult) int { + // Use the rcode from lookup - this properly handles CNAME chains where + // the target may be NXDOMAIN or SERVFAIL even though we have CNAME records + if result.rcode != 0 { + return result.rcode + } + + // No records found, but domain exists with different record types (NODATA) + if d.hasRecordsForDomain(domain.Domain(question.Name)) { + return dns.RcodeSuccess + } + + return dns.RcodeNameError +} + // hasRecordsForDomain checks if any records exist for the given domain name regardless of type func (d *Resolver) hasRecordsForDomain(domainName domain.Domain) bool { d.mu.RLock() @@ -89,8 +129,33 @@ func (d *Resolver) hasRecordsForDomain(domainName domain.Domain) bool { return exists } +// isInManagedZone checks if the given name falls within any of our managed zones. +// This is used to avoid unnecessary external resolution for CNAME targets that +// are within zones we manage - if we don't have a record for it, it doesn't exist. +// Caller must NOT hold the lock. +func (d *Resolver) isInManagedZone(name string) bool { + d.mu.RLock() + defer d.mu.RUnlock() + + name = dns.Fqdn(name) + for _, zone := range d.zones { + zoneStr := dns.Fqdn(zone.PunycodeString()) + if strings.EqualFold(name, zoneStr) || strings.HasSuffix(strings.ToLower(name), strings.ToLower("."+zoneStr)) { + return true + } + } + return false +} + +// lookupResult contains the result of a DNS lookup operation. +type lookupResult struct { + records []dns.RR + rcode int + hasExternalData bool +} + // lookupRecords fetches *all* DNS records matching the first question in r. -func (d *Resolver) lookupRecords(question dns.Question) []dns.RR { +func (d *Resolver) lookupRecords(logger *log.Entry, question dns.Question) lookupResult { d.mu.RLock() records, found := d.records[question] @@ -98,10 +163,14 @@ func (d *Resolver) lookupRecords(question dns.Question) []dns.RR { d.mu.RUnlock() // alternatively check if we have a cname if question.Qtype != dns.TypeCNAME { - question.Qtype = dns.TypeCNAME - return d.lookupRecords(question) + cnameQuestion := dns.Question{ + Name: question.Name, + Qtype: dns.TypeCNAME, + Qclass: question.Qclass, + } + return d.lookupCNAMEChain(logger, cnameQuestion, question.Qtype) } - return nil + return lookupResult{rcode: dns.RcodeNameError} } recordsCopy := slices.Clone(records) @@ -119,16 +188,172 @@ func (d *Resolver) lookupRecords(question dns.Question) []dns.RR { d.mu.Unlock() } - return recordsCopy + return lookupResult{records: recordsCopy, rcode: dns.RcodeSuccess} } -func (d *Resolver) Update(update []nbdns.SimpleRecord) { +// lookupCNAMEChain follows a CNAME chain and returns the CNAME records along with +// the final resolved record of the requested type. This is required for musl libc +// compatibility, which expects the full answer chain rather than just the CNAME. +func (d *Resolver) lookupCNAMEChain(logger *log.Entry, cnameQuestion dns.Question, targetType uint16) lookupResult { + const maxDepth = 8 + var chain []dns.RR + + for range maxDepth { + cnameRecords := d.getRecords(cnameQuestion) + if len(cnameRecords) == 0 { + break + } + + chain = append(chain, cnameRecords...) + + cname, ok := cnameRecords[0].(*dns.CNAME) + if !ok { + break + } + + targetName := strings.ToLower(cname.Target) + targetResult := d.resolveCNAMETarget(logger, targetName, targetType, cnameQuestion.Qclass) + + // keep following chain + if targetResult.rcode == -1 { + cnameQuestion = dns.Question{Name: targetName, Qtype: dns.TypeCNAME, Qclass: cnameQuestion.Qclass} + continue + } + + return d.buildChainResult(chain, targetResult) + } + + if len(chain) > 0 { + return lookupResult{records: chain, rcode: dns.RcodeSuccess} + } + return lookupResult{rcode: dns.RcodeSuccess} +} + +// buildChainResult combines CNAME chain records with the target resolution result. +// Per RFC 6604, the final rcode is propagated through the chain. +func (d *Resolver) buildChainResult(chain []dns.RR, target lookupResult) lookupResult { + records := chain + if len(target.records) > 0 { + records = append(records, target.records...) + } + + // preserve hasExternalData for SERVFAIL so caller knows the error came from upstream + if target.hasExternalData && target.rcode == dns.RcodeServerFailure { + return lookupResult{ + records: records, + rcode: dns.RcodeServerFailure, + hasExternalData: true, + } + } + + return lookupResult{ + records: records, + rcode: target.rcode, + hasExternalData: target.hasExternalData, + } +} + +// resolveCNAMETarget attempts to resolve a CNAME target name. +// Returns rcode=-1 to signal "keep following the chain". +func (d *Resolver) resolveCNAMETarget(logger *log.Entry, targetName string, targetType uint16, qclass uint16) lookupResult { + if records := d.getRecords(dns.Question{Name: targetName, Qtype: targetType, Qclass: qclass}); len(records) > 0 { + return lookupResult{records: records, rcode: dns.RcodeSuccess} + } + + // another CNAME, keep following + if d.hasRecord(dns.Question{Name: targetName, Qtype: dns.TypeCNAME, Qclass: qclass}) { + return lookupResult{rcode: -1} + } + + // domain exists locally but not this record type (NODATA) + if d.hasRecordsForDomain(domain.Domain(targetName)) { + return lookupResult{rcode: dns.RcodeSuccess} + } + + // in our zone but doesn't exist (NXDOMAIN) + if d.isInManagedZone(targetName) { + return lookupResult{rcode: dns.RcodeNameError} + } + + return d.resolveExternal(logger, targetName, targetType) +} + +func (d *Resolver) getRecords(q dns.Question) []dns.RR { + d.mu.RLock() + defer d.mu.RUnlock() + return d.records[q] +} + +func (d *Resolver) hasRecord(q dns.Question) bool { + d.mu.RLock() + defer d.mu.RUnlock() + _, ok := d.records[q] + return ok +} + +// resolveExternal resolves a domain name using the system resolver. +// This is used to resolve CNAME targets that point outside our local zone, +// which is required for musl libc compatibility (musl expects complete answers). +func (d *Resolver) resolveExternal(logger *log.Entry, name string, qtype uint16) lookupResult { + network := resutil.NetworkForQtype(qtype) + if network == "" { + return lookupResult{rcode: dns.RcodeNotImplemented} + } + + resolver := d.resolver + if resolver == nil { + resolver = net.DefaultResolver + } + + ctx, cancel := context.WithTimeout(d.ctx, externalResolutionTimeout) + defer cancel() + + result := resutil.LookupIP(ctx, resolver, network, name, qtype) + if result.Err != nil { + d.logDNSError(logger, name, qtype, result.Err) + return lookupResult{rcode: result.Rcode, hasExternalData: true} + } + + return lookupResult{ + records: resutil.IPsToRRs(name, result.IPs, 60), + rcode: dns.RcodeSuccess, + hasExternalData: true, + } +} + +// logDNSError logs DNS resolution errors for debugging. +func (d *Resolver) logDNSError(logger *log.Entry, hostname string, qtype uint16, err error) { + qtypeName := dns.TypeToString[qtype] + + var dnsErr *net.DNSError + if !errors.As(err, &dnsErr) { + logger.Debugf("DNS resolution failed for %s type %s: %v", hostname, qtypeName, err) + return + } + + if dnsErr.IsNotFound { + logger.Tracef("DNS target not found: %s type %s", hostname, qtypeName) + return + } + + if dnsErr.Server != "" { + logger.Debugf("DNS resolution failed for %s type %s server=%s: %v", hostname, qtypeName, dnsErr.Server, err) + } else { + logger.Debugf("DNS resolution failed for %s type %s: %v", hostname, qtypeName, err) + } +} + +// Update updates the resolver with new records and zone information. +// The zones parameter specifies which DNS zones this resolver manages. +func (d *Resolver) Update(update []nbdns.SimpleRecord, zones []domain.Domain) { d.mu.Lock() defer d.mu.Unlock() maps.Clear(d.records) maps.Clear(d.domains) + d.zones = zones + for _, rec := range update { if err := d.registerRecord(rec); err != nil { log.Warnf("failed to register the record (%s): %v", rec, err) diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go index 8b13b69ff..2f8e08b1a 100644 --- a/client/internal/dns/local/local_test.go +++ b/client/internal/dns/local/local_test.go @@ -1,8 +1,14 @@ package local import ( + "context" + "fmt" + "net" + "net/netip" "strings" + "sync" "testing" + "time" "github.com/miekg/dns" "github.com/stretchr/testify/assert" @@ -10,8 +16,21 @@ import ( "github.com/netbirdio/netbird/client/internal/dns/test" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/shared/management/domain" ) +// mockResolver implements resolver for testing +type mockResolver struct { + lookupFunc func(ctx context.Context, network, host string) ([]netip.Addr, error) +} + +func (m *mockResolver) LookupNetIP(ctx context.Context, network, host string) ([]netip.Addr, error) { + if m.lookupFunc != nil { + return m.lookupFunc(ctx, network, host) + } + return nil, nil +} + func TestLocalResolver_ServeDNS(t *testing.T) { recordA := nbdns.SimpleRecord{ Name: "peera.netbird.cloud.", @@ -110,7 +129,7 @@ func TestLocalResolver_Update_StaleRecord(t *testing.T) { update2 := []nbdns.SimpleRecord{record2} // Apply first update - resolver.Update(update1) + resolver.Update(update1, nil) // Verify first update resolver.mu.RLock() @@ -122,7 +141,7 @@ func TestLocalResolver_Update_StaleRecord(t *testing.T) { assert.Contains(t, rrSlice1[0].String(), record1.RData, "Record after first update should be %s", record1.RData) // Apply second update - resolver.Update(update2) + resolver.Update(update2, nil) // Verify second update resolver.mu.RLock() @@ -154,7 +173,7 @@ func TestLocalResolver_MultipleRecords_SameQuestion(t *testing.T) { update := []nbdns.SimpleRecord{record1, record2} // Apply update with both records - resolver.Update(update) + resolver.Update(update, nil) // Create question that matches both records question := dns.Question{ @@ -198,7 +217,7 @@ func TestLocalResolver_RecordRotation(t *testing.T) { update := []nbdns.SimpleRecord{record1, record2, record3} // Apply update with all three records - resolver.Update(update) + resolver.Update(update, nil) msg := new(dns.Msg).SetQuestion(recordName, recordType) @@ -264,7 +283,7 @@ func TestLocalResolver_CaseInsensitiveMatching(t *testing.T) { } // Update resolver with the records - resolver.Update([]nbdns.SimpleRecord{lowerCaseRecord, mixedCaseRecord}) + resolver.Update([]nbdns.SimpleRecord{lowerCaseRecord, mixedCaseRecord}, nil) testCases := []struct { name string @@ -379,7 +398,7 @@ func TestLocalResolver_CNAMEFallback(t *testing.T) { } // Update resolver with both records - resolver.Update([]nbdns.SimpleRecord{cnameRecord, targetRecord}) + resolver.Update([]nbdns.SimpleRecord{cnameRecord, targetRecord}, nil) testCases := []struct { name string @@ -476,6 +495,20 @@ func TestLocalResolver_CNAMEFallback(t *testing.T) { // with 0 records instead of NXDOMAIN func TestLocalResolver_NoErrorWithDifferentRecordType(t *testing.T) { resolver := NewResolver() + // Mock external resolver for CNAME target resolution + resolver.resolver = &mockResolver{ + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if host == "target.example.com." { + if network == "ip4" { + return []netip.Addr{netip.MustParseAddr("93.184.216.34")}, nil + } + if network == "ip6" { + return []netip.Addr{netip.MustParseAddr("2606:2800:220:1:248:1893:25c8:1946")}, nil + } + } + return nil, &net.DNSError{IsNotFound: true, Name: host} + }, + } recordA := nbdns.SimpleRecord{ Name: "example.netbird.cloud.", @@ -493,7 +526,7 @@ func TestLocalResolver_NoErrorWithDifferentRecordType(t *testing.T) { RData: "target.example.com.", } - resolver.Update([]nbdns.SimpleRecord{recordA, recordCNAME}) + resolver.Update([]nbdns.SimpleRecord{recordA, recordCNAME}, nil) testCases := []struct { name string @@ -582,3 +615,555 @@ func TestLocalResolver_NoErrorWithDifferentRecordType(t *testing.T) { }) } } + +// TestLocalResolver_CNAMEChainResolution tests comprehensive CNAME chain following +func TestLocalResolver_CNAMEChainResolution(t *testing.T) { + t.Run("simple internal CNAME chain", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.1"}, + }, nil) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 2) + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "target.example.com.", cname.Target) + + a, ok := resp.Answer[1].(*dns.A) + require.True(t, ok) + assert.Equal(t, "192.168.1.1", a.A.String()) + }) + + t.Run("multi-hop CNAME chain", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "hop1.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop2.test."}, + {Name: "hop2.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop3.test."}, + {Name: "hop3.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, nil) + + msg := new(dns.Msg).SetQuestion("hop1.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 3) + }) + + t.Run("CNAME to non-existent internal target returns only CNAME", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.test."}, + }, nil) + + msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Len(t, resp.Answer, 1) + _, ok := resp.Answer[0].(*dns.CNAME) + assert.True(t, ok) + }) +} + +// TestLocalResolver_CNAMEMaxDepth tests the maximum depth limit for CNAME chains +func TestLocalResolver_CNAMEMaxDepth(t *testing.T) { + t.Run("chain at max depth resolves", func(t *testing.T) { + resolver := NewResolver() + var records []nbdns.SimpleRecord + // Create chain of 7 CNAMEs (under max of 8) + for i := 1; i <= 7; i++ { + records = append(records, nbdns.SimpleRecord{ + Name: fmt.Sprintf("hop%d.test.", i), + Type: int(dns.TypeCNAME), + Class: nbdns.DefaultClass, + TTL: 300, + RData: fmt.Sprintf("hop%d.test.", i+1), + }) + } + records = append(records, nbdns.SimpleRecord{ + Name: "hop8.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.10.10.10", + }) + + resolver.Update(records, nil) + + msg := new(dns.Msg).SetQuestion("hop1.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 8) + }) + + t.Run("chain exceeding max depth stops", func(t *testing.T) { + resolver := NewResolver() + var records []nbdns.SimpleRecord + // Create chain of 10 CNAMEs (exceeds max of 8) + for i := 1; i <= 10; i++ { + records = append(records, nbdns.SimpleRecord{ + Name: fmt.Sprintf("deep%d.test.", i), + Type: int(dns.TypeCNAME), + Class: nbdns.DefaultClass, + TTL: 300, + RData: fmt.Sprintf("deep%d.test.", i+1), + }) + } + records = append(records, nbdns.SimpleRecord{ + Name: "deep11.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.10.10.10", + }) + + resolver.Update(records, nil) + + msg := new(dns.Msg).SetQuestion("deep1.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + // Should NOT have the final A record (chain too deep) + assert.LessOrEqual(t, len(resp.Answer), 8) + }) + + t.Run("circular CNAME is protected by max depth", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "loop1.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "loop2.test."}, + {Name: "loop2.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "loop1.test."}, + }, nil) + + msg := new(dns.Msg).SetQuestion("loop1.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.LessOrEqual(t, len(resp.Answer), 8) + }) +} + +// TestLocalResolver_ExternalCNAMEResolution tests CNAME resolution to external domains +func TestLocalResolver_ExternalCNAMEResolution(t *testing.T) { + t.Run("CNAME to external domain resolves via external resolver", func(t *testing.T) { + resolver := NewResolver() + resolver.resolver = &mockResolver{ + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if host == "external.example.com." && network == "ip4" { + return []netip.Addr{netip.MustParseAddr("93.184.216.34")}, nil + } + return nil, nil + }, + } + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, nil) + + msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Len(t, resp.Answer, 2, "Should have CNAME + A record") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "external.example.com.", cname.Target) + + a, ok := resp.Answer[1].(*dns.A) + require.True(t, ok) + assert.Equal(t, "93.184.216.34", a.A.String()) + }) + + t.Run("CNAME to external domain resolves IPv6", func(t *testing.T) { + resolver := NewResolver() + resolver.resolver = &mockResolver{ + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if host == "external.example.com." && network == "ip6" { + return []netip.Addr{netip.MustParseAddr("2606:2800:220:1:248:1893:25c8:1946")}, nil + } + return nil, nil + }, + } + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, nil) + + msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Len(t, resp.Answer, 2, "Should have CNAME + AAAA record") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "external.example.com.", cname.Target) + + aaaa, ok := resp.Answer[1].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2606:2800:220:1:248:1893:25c8:1946", aaaa.AAAA.String()) + }) + + t.Run("concurrent external resolution", func(t *testing.T) { + resolver := NewResolver() + resolver.resolver = &mockResolver{ + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if host == "external.example.com." && network == "ip4" { + return []netip.Addr{netip.MustParseAddr("93.184.216.34")}, nil + } + return nil, nil + }, + } + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "concurrent.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, nil) + + var wg sync.WaitGroup + results := make([]*dns.Msg, 10) + + for i := 0; i < 10; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + msg := new(dns.Msg).SetQuestion("concurrent.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + results[idx] = resp + }(i) + } + wg.Wait() + + for i, resp := range results { + require.NotNil(t, resp, "Response %d should not be nil", i) + require.Len(t, resp.Answer, 2, "Response %d should have CNAME + A", i) + } + }) +} + +// TestLocalResolver_ZoneManagement tests zone-aware CNAME resolution +func TestLocalResolver_ZoneManagement(t *testing.T) { + t.Run("Update sets zones correctly", func(t *testing.T) { + resolver := NewResolver() + + zones := []domain.Domain{"example.com", "test.local"} + resolver.Update([]nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, zones) + + assert.True(t, resolver.isInManagedZone("host.example.com.")) + assert.True(t, resolver.isInManagedZone("other.example.com.")) + assert.True(t, resolver.isInManagedZone("sub.test.local.")) + assert.False(t, resolver.isInManagedZone("external.com.")) + }) + + t.Run("isInManagedZone case insensitive", func(t *testing.T) { + resolver := NewResolver() + resolver.Update(nil, []domain.Domain{"Example.COM"}) + + assert.True(t, resolver.isInManagedZone("host.example.com.")) + assert.True(t, resolver.isInManagedZone("HOST.EXAMPLE.COM.")) + }) + + t.Run("Update clears zones", func(t *testing.T) { + resolver := NewResolver() + resolver.Update(nil, []domain.Domain{"example.com"}) + assert.True(t, resolver.isInManagedZone("host.example.com.")) + + resolver.Update(nil, nil) + assert.False(t, resolver.isInManagedZone("host.example.com.")) + }) +} + +// TestLocalResolver_CNAMEZoneAwareResolution tests CNAME resolution with zone awareness +func TestLocalResolver_CNAMEZoneAwareResolution(t *testing.T) { + t.Run("CNAME target in managed zone returns NXDOMAIN per RFC 6604", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.myzone.test."}, + }, []domain.Domain{"myzone.test"}) + + msg := new(dns.Msg).SetQuestion("alias.myzone.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeNameError, resp.Rcode, "Should return NXDOMAIN") + require.Len(t, resp.Answer, 1, "Should include CNAME in answer") + }) + + t.Run("CNAME to external domain skips zone check", func(t *testing.T) { + resolver := NewResolver() + resolver.resolver = &mockResolver{ + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if host == "external.other.com." && network == "ip4" { + return []netip.Addr{netip.MustParseAddr("203.0.113.1")}, nil + } + return nil, nil + }, + } + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.other.com."}, + }, []domain.Domain{"myzone.test"}) + + msg := new(dns.Msg).SetQuestion("alias.myzone.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 2, "Should have CNAME + A from external resolution") + }) + + t.Run("CNAME target exists with different type returns NODATA not NXDOMAIN", func(t *testing.T) { + resolver := NewResolver() + // CNAME points to target that has A but no AAAA - query for AAAA should be NODATA + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.myzone.test."}, + {Name: "target.myzone.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "1.1.1.1"}, + }, []domain.Domain{"myzone.test"}) + + msg := new(dns.Msg).SetQuestion("alias.myzone.test.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success), not NXDOMAIN") + require.Len(t, resp.Answer, 1, "Should have only CNAME, no AAAA") + _, ok := resp.Answer[0].(*dns.CNAME) + assert.True(t, ok, "Answer should be CNAME record") + }) + + t.Run("external CNAME target exists but no AAAA records (NODATA)", func(t *testing.T) { + resolver := NewResolver() + resolver.resolver = &mockResolver{ + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if host == "external.example.com." { + if network == "ip6" { + // No AAAA records + return nil, &net.DNSError{IsNotFound: true, Name: host} + } + if network == "ip4" { + // But A records exist - domain exists + return []netip.Addr{netip.MustParseAddr("93.184.216.34")}, nil + } + } + return nil, &net.DNSError{IsNotFound: true, Name: host} + }, + } + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, nil) + + msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success), not NXDOMAIN") + require.Len(t, resp.Answer, 1, "Should have only CNAME") + _, ok := resp.Answer[0].(*dns.CNAME) + assert.True(t, ok, "Answer should be CNAME record") + }) + + // Table-driven test for all external resolution outcomes + externalCases := []struct { + name string + lookupFunc func(context.Context, string, string) ([]netip.Addr, error) + expectedRcode int + expectedAnswer int + }{ + { + name: "external NXDOMAIN (both A and AAAA not found)", + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + return nil, &net.DNSError{IsNotFound: true, Name: host} + }, + expectedRcode: dns.RcodeNameError, + expectedAnswer: 1, // CNAME only + }, + { + name: "external SERVFAIL (temporary error)", + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + return nil, &net.DNSError{IsTemporary: true, Name: host} + }, + expectedRcode: dns.RcodeServerFailure, + expectedAnswer: 1, // CNAME only + }, + { + name: "external SERVFAIL (timeout)", + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + return nil, &net.DNSError{IsTimeout: true, Name: host} + }, + expectedRcode: dns.RcodeServerFailure, + expectedAnswer: 1, // CNAME only + }, + { + name: "external SERVFAIL (generic error)", + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + return nil, fmt.Errorf("connection refused") + }, + expectedRcode: dns.RcodeServerFailure, + expectedAnswer: 1, // CNAME only + }, + { + name: "external success with IPs", + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if network == "ip4" { + return []netip.Addr{netip.MustParseAddr("93.184.216.34")}, nil + } + return nil, &net.DNSError{IsNotFound: true, Name: host} + }, + expectedRcode: dns.RcodeSuccess, + expectedAnswer: 2, // CNAME + A + }, + } + + for _, tc := range externalCases { + t.Run(tc.name, func(t *testing.T) { + resolver := NewResolver() + resolver.resolver = &mockResolver{lookupFunc: tc.lookupFunc} + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, nil) + + msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, tc.expectedRcode, resp.Rcode, "rcode mismatch") + assert.Len(t, resp.Answer, tc.expectedAnswer, "answer count mismatch") + if tc.expectedAnswer > 0 { + _, ok := resp.Answer[0].(*dns.CNAME) + assert.True(t, ok, "first answer should be CNAME") + } + }) + } +} + +// TestLocalResolver_AuthoritativeFlag tests the AA flag behavior +func TestLocalResolver_AuthoritativeFlag(t *testing.T) { + t.Run("direct record lookup is authoritative", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, []domain.Domain{"example.com"}) + + msg := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.True(t, resp.Authoritative) + }) + + t.Run("external resolution is not authoritative", func(t *testing.T) { + resolver := NewResolver() + resolver.resolver = &mockResolver{ + lookupFunc: func(_ context.Context, network, host string) ([]netip.Addr, error) { + if host == "external.example.com." && network == "ip4" { + return []netip.Addr{netip.MustParseAddr("93.184.216.34")}, nil + } + return nil, nil + }, + } + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, nil) + + msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Len(t, resp.Answer, 2) + assert.False(t, resp.Authoritative) + }) +} + +// TestLocalResolver_Stop tests cleanup on Stop +func TestLocalResolver_Stop(t *testing.T) { + t.Run("Stop clears all state", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, []domain.Domain{"example.com"}) + + resolver.Stop() + + msg := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Len(t, resp.Answer, 0) + assert.False(t, resolver.isInManagedZone("host.example.com.")) + }) + + t.Run("Stop is safe to call multiple times", func(t *testing.T) { + resolver := NewResolver() + resolver.Update([]nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, []domain.Domain{"example.com"}) + + resolver.Stop() + resolver.Stop() + resolver.Stop() + }) + + t.Run("Stop cancels in-flight external resolution", func(t *testing.T) { + resolver := NewResolver() + + lookupStarted := make(chan struct{}) + lookupCtxCanceled := make(chan struct{}) + + resolver.resolver = &mockResolver{ + lookupFunc: func(ctx context.Context, network, host string) ([]netip.Addr, error) { + close(lookupStarted) + <-ctx.Done() + close(lookupCtxCanceled) + return nil, ctx.Err() + }, + } + + resolver.Update([]nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, nil) + + done := make(chan struct{}) + go func() { + msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { return nil }}, msg) + close(done) + }() + + <-lookupStarted + resolver.Stop() + + select { + case <-lookupCtxCanceled: + case <-time.After(time.Second): + t.Fatal("external lookup context was not canceled") + } + + select { + case <-done: + case <-time.After(time.Second): + t.Fatal("ServeDNS did not return after Stop") + } + }) +} diff --git a/client/internal/dns/resutil/resolve.go b/client/internal/dns/resutil/resolve.go new file mode 100644 index 000000000..5a3744719 --- /dev/null +++ b/client/internal/dns/resutil/resolve.go @@ -0,0 +1,197 @@ +// Package resutil provides shared DNS resolution utilities +package resutil + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "net" + "net/netip" + "strings" + + "github.com/miekg/dns" + log "github.com/sirupsen/logrus" +) + +// GenerateRequestID creates a random 8-character hex string for request tracing. +func GenerateRequestID() string { + bytes := make([]byte, 4) + if _, err := rand.Read(bytes); err != nil { + log.Errorf("generate request ID: %v", err) + return "" + } + return hex.EncodeToString(bytes) +} + +// IPsToRRs converts a slice of IP addresses to DNS resource records. +// IPv4 addresses become A records, IPv6 addresses become AAAA records. +func IPsToRRs(name string, ips []netip.Addr, ttl uint32) []dns.RR { + var result []dns.RR + + for _, ip := range ips { + if ip.Is6() { + result = append(result, &dns.AAAA{ + Hdr: dns.RR_Header{ + Name: name, + Rrtype: dns.TypeAAAA, + Class: dns.ClassINET, + Ttl: ttl, + }, + AAAA: ip.AsSlice(), + }) + } else { + result = append(result, &dns.A{ + Hdr: dns.RR_Header{ + Name: name, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + Ttl: ttl, + }, + A: ip.AsSlice(), + }) + } + } + + return result +} + +// NetworkForQtype returns the network string ("ip4" or "ip6") for a DNS query type. +// Returns empty string for unsupported types. +func NetworkForQtype(qtype uint16) string { + switch qtype { + case dns.TypeA: + return "ip4" + case dns.TypeAAAA: + return "ip6" + default: + return "" + } +} + +type resolver interface { + LookupNetIP(ctx context.Context, network, host string) ([]netip.Addr, error) +} + +// chainedWriter is implemented by ResponseWriters that carry request metadata +type chainedWriter interface { + RequestID() string + SetMeta(key, value string) +} + +// GetRequestID extracts a request ID from the ResponseWriter if available, +// otherwise generates a new one. +func GetRequestID(w dns.ResponseWriter) string { + if cw, ok := w.(chainedWriter); ok { + if id := cw.RequestID(); id != "" { + return id + } + } + return GenerateRequestID() +} + +// SetMeta sets metadata on the ResponseWriter if it supports it. +func SetMeta(w dns.ResponseWriter, key, value string) { + if cw, ok := w.(chainedWriter); ok { + cw.SetMeta(key, value) + } +} + +// LookupResult contains the result of an external DNS lookup +type LookupResult struct { + IPs []netip.Addr + Rcode int + Err error // Original error for caller's logging needs +} + +// LookupIP performs a DNS lookup and determines the appropriate rcode. +func LookupIP(ctx context.Context, r resolver, network, host string, qtype uint16) LookupResult { + ips, err := r.LookupNetIP(ctx, network, host) + if err != nil { + return LookupResult{ + Rcode: getRcodeForError(ctx, r, host, qtype, err), + Err: err, + } + } + + // Unmap IPv4-mapped IPv6 addresses that some resolvers may return + for i, ip := range ips { + ips[i] = ip.Unmap() + } + + return LookupResult{ + IPs: ips, + Rcode: dns.RcodeSuccess, + } +} + +func getRcodeForError(ctx context.Context, r resolver, host string, qtype uint16, err error) int { + var dnsErr *net.DNSError + if !errors.As(err, &dnsErr) { + return dns.RcodeServerFailure + } + + if dnsErr.IsNotFound { + return getRcodeForNotFound(ctx, r, host, qtype) + } + + return dns.RcodeServerFailure +} + +// getRcodeForNotFound distinguishes between NXDOMAIN (domain doesn't exist) and NODATA +// (domain exists but no records of requested type) by checking the opposite record type. +// +// musl libc (the reason we need this distinction) only queries A/AAAA pairs in getaddrinfo, +// so checking the opposite A/AAAA type is sufficient. Other record types (MX, TXT, etc.) +// are not queried by musl and don't need this handling. +func getRcodeForNotFound(ctx context.Context, r resolver, domain string, originalQtype uint16) int { + // Try querying for a different record type to see if the domain exists + // If the original query was for AAAA, try A. If it was for A, try AAAA. + // This helps distinguish between NXDOMAIN and NODATA. + var alternativeNetwork string + switch originalQtype { + case dns.TypeAAAA: + alternativeNetwork = "ip4" + case dns.TypeA: + alternativeNetwork = "ip6" + default: + return dns.RcodeNameError + } + + if _, err := r.LookupNetIP(ctx, alternativeNetwork, domain); err != nil { + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) && dnsErr.IsNotFound { + // Alternative query also returned not found - domain truly doesn't exist + return dns.RcodeNameError + } + // Some other error (timeout, server failure, etc.) - can't determine, assume domain exists + return dns.RcodeSuccess + } + + // Alternative query succeeded - domain exists but has no records of this type + return dns.RcodeSuccess +} + +// FormatAnswers formats DNS resource records for logging. +func FormatAnswers(answers []dns.RR) string { + if len(answers) == 0 { + return "[]" + } + + parts := make([]string, 0, len(answers)) + for _, rr := range answers { + switch r := rr.(type) { + case *dns.A: + parts = append(parts, r.A.String()) + case *dns.AAAA: + parts = append(parts, r.AAAA.String()) + case *dns.CNAME: + parts = append(parts, "CNAME:"+r.Target) + case *dns.PTR: + parts = append(parts, "PTR:"+r.Ptr) + default: + parts = append(parts, dns.TypeToString[rr.Header().Rrtype]) + } + } + return "[" + strings.Join(parts, ", ") + "]" +} diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 94945b55a..0a56b92a1 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -485,7 +485,7 @@ func (s *DefaultServer) applyConfiguration(update nbdns.Config) error { } } - localMuxUpdates, localRecords, err := s.buildLocalHandlerUpdate(update.CustomZones) + localMuxUpdates, localRecords, localZones, err := s.buildLocalHandlerUpdate(update.CustomZones) if err != nil { return fmt.Errorf("local handler updater: %w", err) } @@ -499,7 +499,7 @@ func (s *DefaultServer) applyConfiguration(update nbdns.Config) error { s.updateMux(muxUpdates) // register local records - s.localResolver.Update(localRecords) + s.localResolver.Update(localRecords, localZones) s.currentConfig = dnsConfigToHostDNSConfig(update, s.service.RuntimeIP(), s.service.RuntimePort()) @@ -659,9 +659,10 @@ func (s *DefaultServer) registerFallback(config HostDNSConfig) { s.registerHandler([]string{nbdns.RootZone}, handler, PriorityFallback) } -func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) ([]handlerWrapper, []nbdns.SimpleRecord, error) { +func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) ([]handlerWrapper, []nbdns.SimpleRecord, []domain.Domain, error) { var muxUpdates []handlerWrapper var localRecords []nbdns.SimpleRecord + var zones []domain.Domain for _, customZone := range customZones { if len(customZone.Records) == 0 { @@ -675,6 +676,8 @@ func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) priority: PriorityLocal, }) + zones = append(zones, domain.Domain(customZone.Domain)) + for _, record := range customZone.Records { if record.Class != nbdns.DefaultClass { log.Warnf("received an invalid class type: %s", record.Class) @@ -685,7 +688,7 @@ func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) } } - return muxUpdates, localRecords, nil + return muxUpdates, localRecords, zones, nil } func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.NameServerGroup) ([]handlerWrapper, error) { diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index fe1f67f66..2b5b460b4 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -385,7 +385,7 @@ func TestUpdateDNSServer(t *testing.T) { }() dnsServer.dnsMuxMap = testCase.initUpstreamMap - dnsServer.localResolver.Update(testCase.initLocalRecords) + dnsServer.localResolver.Update(testCase.initLocalRecords, nil) dnsServer.updateSerial = testCase.initSerial err = dnsServer.UpdateDNSServer(testCase.inputSerial, testCase.inputUpdate) @@ -511,7 +511,7 @@ func TestDNSFakeResolverHandleUpdates(t *testing.T) { }, } //dnsServer.localResolver.RegisteredMap = local.RegistrationMap{local.BuildRecordKey("netbird.cloud", dns.ClassINET, dns.TypeA): struct{}{}} - dnsServer.localResolver.Update([]nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}) + dnsServer.localResolver.Update([]nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}, nil) dnsServer.updateSerial = 0 nameServers := []nbdns.NameServer{ @@ -2013,7 +2013,7 @@ func TestLocalResolverPriorityInServer(t *testing.T) { }, } - localMuxUpdates, _, err := server.buildLocalHandlerUpdate(config.CustomZones) + localMuxUpdates, _, _, err := server.buildLocalHandlerUpdate(config.CustomZones) assert.NoError(t, err) upstreamMuxUpdates, err := server.buildUpstreamHandlerUpdate(config.NameServerGroups) @@ -2074,7 +2074,7 @@ func TestLocalResolverPriorityConstants(t *testing.T) { }, } - localMuxUpdates, _, err := server.buildLocalHandlerUpdate(config.CustomZones) + localMuxUpdates, _, _, err := server.buildLocalHandlerUpdate(config.CustomZones) assert.NoError(t, err) assert.Len(t, localMuxUpdates, 1) assert.Equal(t, PriorityLocal, localMuxUpdates[0].priority, "Local handler should use PriorityLocal") diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 2a92fd6d8..6b52010fb 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -2,7 +2,6 @@ package dns import ( "context" - "crypto/rand" "crypto/sha256" "encoding/hex" "errors" @@ -21,6 +20,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/iface" + "github.com/netbirdio/netbird/client/internal/dns/resutil" "github.com/netbirdio/netbird/client/internal/dns/types" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/proto" @@ -113,10 +113,7 @@ func (u *upstreamResolverBase) Stop() { // ServeDNS handles a DNS request func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { - requestID := GenerateRequestID() - logger := log.WithField("request_id", requestID) - - logger.Tracef("received upstream question: domain=%s type=%v class=%v", r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) + logger := log.WithField("request_id", resutil.GetRequestID(w)) u.prepareRequest(r) @@ -202,11 +199,14 @@ func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.Add func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, logger *log.Entry) bool { u.successCount.Add(1) - logger.Tracef("took %s to query the upstream %s for question domain=%s", t, upstream, domain) + + resutil.SetMeta(w, "upstream", upstream.String()) if err := w.WriteMsg(rm); err != nil { logger.Errorf("failed to write DNS response for question domain=%s: %s", domain, err) + return true } + return true } @@ -414,16 +414,6 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u return rm, t, nil } -func GenerateRequestID() string { - bytes := make([]byte, 4) - _, err := rand.Read(bytes) - if err != nil { - log.Errorf("failed to generate request ID: %v", err) - return "" - } - return hex.EncodeToString(bytes) -} - // FormatPeerStatus formats peer connection status information for debugging DNS timeouts func FormatPeerStatus(peerState *peer.State) string { isConnected := peerState.ConnStatus == peer.StatusConnected diff --git a/client/internal/dnsfwd/forwarder.go b/client/internal/dnsfwd/forwarder.go index 6b8042ccb..1230a4e46 100644 --- a/client/internal/dnsfwd/forwarder.go +++ b/client/internal/dnsfwd/forwarder.go @@ -18,6 +18,7 @@ import ( nberrors "github.com/netbirdio/netbird/client/errors" firewall "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/internal/dns/resutil" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/route" ) @@ -189,29 +190,22 @@ func (f *DNSForwarder) Close(ctx context.Context) error { return nberrors.FormatErrorOrNil(result) } -func (f *DNSForwarder) handleDNSQuery(w dns.ResponseWriter, query *dns.Msg) *dns.Msg { +func (f *DNSForwarder) handleDNSQuery(logger *log.Entry, w dns.ResponseWriter, query *dns.Msg) *dns.Msg { if len(query.Question) == 0 { return nil } question := query.Question[0] - log.Tracef("received DNS request for DNS forwarder: domain=%v type=%v class=%v", - question.Name, question.Qtype, question.Qclass) + logger.Tracef("received DNS request for DNS forwarder: domain=%s type=%s class=%s", + question.Name, dns.TypeToString[question.Qtype], dns.ClassToString[question.Qclass]) domain := strings.ToLower(question.Name) resp := query.SetReply(query) - var network string - switch question.Qtype { - case dns.TypeA: - network = "ip4" - case dns.TypeAAAA: - network = "ip6" - default: - // TODO: Handle other types - + network := resutil.NetworkForQtype(question.Qtype) + if network == "" { resp.Rcode = dns.RcodeNotImplemented if err := w.WriteMsg(resp); err != nil { - log.Errorf("failed to write DNS response: %v", err) + logger.Errorf("failed to write DNS response: %v", err) } return nil } @@ -221,33 +215,35 @@ func (f *DNSForwarder) handleDNSQuery(w dns.ResponseWriter, query *dns.Msg) *dns if mostSpecificResId == "" { resp.Rcode = dns.RcodeRefused if err := w.WriteMsg(resp); err != nil { - log.Errorf("failed to write DNS response: %v", err) + logger.Errorf("failed to write DNS response: %v", err) } return nil } ctx, cancel := context.WithTimeout(context.Background(), upstreamTimeout) defer cancel() - ips, err := f.resolver.LookupNetIP(ctx, network, domain) - if err != nil { - f.handleDNSError(ctx, w, question, resp, domain, err) + + result := resutil.LookupIP(ctx, f.resolver, network, domain, question.Qtype) + if result.Err != nil { + f.handleDNSError(ctx, logger, w, question, resp, domain, result) return nil } - // Unmap IPv4-mapped IPv6 addresses that some resolvers may return - for i, ip := range ips { - ips[i] = ip.Unmap() - } - - f.updateInternalState(ips, mostSpecificResId, matchingEntries) - f.addIPsToResponse(resp, domain, ips) - f.cache.set(domain, question.Qtype, ips) + f.updateInternalState(result.IPs, mostSpecificResId, matchingEntries) + resp.Answer = append(resp.Answer, resutil.IPsToRRs(domain, result.IPs, f.ttl)...) + f.cache.set(domain, question.Qtype, result.IPs) return resp } func (f *DNSForwarder) handleDNSQueryUDP(w dns.ResponseWriter, query *dns.Msg) { - resp := f.handleDNSQuery(w, query) + startTime := time.Now() + logger := log.WithFields(log.Fields{ + "request_id": resutil.GenerateRequestID(), + "dns_id": fmt.Sprintf("%04x", query.Id), + }) + + resp := f.handleDNSQuery(logger, w, query) if resp == nil { return } @@ -265,19 +261,33 @@ func (f *DNSForwarder) handleDNSQueryUDP(w dns.ResponseWriter, query *dns.Msg) { } if err := w.WriteMsg(resp); err != nil { - log.Errorf("failed to write DNS response: %v", err) + logger.Errorf("failed to write DNS response: %v", err) + return } + + logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s", + query.Question[0].Name, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime)) } func (f *DNSForwarder) handleDNSQueryTCP(w dns.ResponseWriter, query *dns.Msg) { - resp := f.handleDNSQuery(w, query) + startTime := time.Now() + logger := log.WithFields(log.Fields{ + "request_id": resutil.GenerateRequestID(), + "dns_id": fmt.Sprintf("%04x", query.Id), + }) + + resp := f.handleDNSQuery(logger, w, query) if resp == nil { return } if err := w.WriteMsg(resp); err != nil { - log.Errorf("failed to write DNS response: %v", err) + logger.Errorf("failed to write DNS response: %v", err) + return } + + logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s", + query.Question[0].Name, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime)) } func (f *DNSForwarder) updateInternalState(ips []netip.Addr, mostSpecificResId route.ResID, matchingEntries []*ForwarderEntry) { @@ -315,140 +325,64 @@ func (f *DNSForwarder) updateFirewall(matchingEntries []*ForwarderEntry, prefixe } } -// setResponseCodeForNotFound determines and sets the appropriate response code when IsNotFound is true -// It distinguishes between NXDOMAIN (domain doesn't exist) and NODATA (domain exists but no records of requested type) -// -// LIMITATION: This function only checks A and AAAA record types to determine domain existence. -// If a domain has only other record types (MX, TXT, CNAME, etc.) but no A/AAAA records, -// it may incorrectly return NXDOMAIN instead of NODATA. This is acceptable since the forwarder -// only handles A/AAAA queries and returns NOTIMP for other types. -func (f *DNSForwarder) setResponseCodeForNotFound(ctx context.Context, resp *dns.Msg, domain string, originalQtype uint16) { - // Try querying for a different record type to see if the domain exists - // If the original query was for AAAA, try A. If it was for A, try AAAA. - // This helps distinguish between NXDOMAIN and NODATA. - var alternativeNetwork string - switch originalQtype { - case dns.TypeAAAA: - alternativeNetwork = "ip4" - case dns.TypeA: - alternativeNetwork = "ip6" - default: - resp.Rcode = dns.RcodeNameError - return - } - - if _, err := f.resolver.LookupNetIP(ctx, alternativeNetwork, domain); err != nil { - var dnsErr *net.DNSError - if errors.As(err, &dnsErr) && dnsErr.IsNotFound { - // Alternative query also returned not found - domain truly doesn't exist - resp.Rcode = dns.RcodeNameError - return - } - // Some other error (timeout, server failure, etc.) - can't determine, assume domain exists - resp.Rcode = dns.RcodeSuccess - return - } - - // Alternative query succeeded - domain exists but has no records of this type - resp.Rcode = dns.RcodeSuccess -} - // handleDNSError processes DNS lookup errors and sends an appropriate error response. func (f *DNSForwarder) handleDNSError( ctx context.Context, + logger *log.Entry, w dns.ResponseWriter, question dns.Question, resp *dns.Msg, domain string, - err error, + result resutil.LookupResult, ) { - // Default to SERVFAIL; override below when appropriate. - resp.Rcode = dns.RcodeServerFailure - qType := question.Qtype qTypeName := dns.TypeToString[qType] - // Prefer typed DNS errors; fall back to generic logging otherwise. - var dnsErr *net.DNSError - if !errors.As(err, &dnsErr) { - log.Warnf(errResolveFailed, domain, err) - if writeErr := w.WriteMsg(resp); writeErr != nil { - log.Errorf("failed to write failure DNS response: %v", writeErr) - } - return - } + resp.Rcode = result.Rcode - // NotFound: set NXDOMAIN / appropriate code via helper. - if dnsErr.IsNotFound { - f.setResponseCodeForNotFound(ctx, resp, domain, qType) - if writeErr := w.WriteMsg(resp); writeErr != nil { - log.Errorf("failed to write failure DNS response: %v", writeErr) - } + // NotFound: cache negative result and respond + if result.Rcode == dns.RcodeNameError || result.Rcode == dns.RcodeSuccess { f.cache.set(domain, question.Qtype, nil) + if writeErr := w.WriteMsg(resp); writeErr != nil { + logger.Errorf("failed to write failure DNS response: %v", writeErr) + } return } // Upstream failed but we might have a cached answer—serve it if present. if ips, ok := f.cache.get(domain, qType); ok { if len(ips) > 0 { - log.Debugf("serving cached DNS response after upstream failure: domain=%s type=%s", domain, qTypeName) - f.addIPsToResponse(resp, domain, ips) + logger.Debugf("serving cached DNS response after upstream failure: domain=%s type=%s", domain, qTypeName) + resp.Answer = append(resp.Answer, resutil.IPsToRRs(domain, ips, f.ttl)...) resp.Rcode = dns.RcodeSuccess if writeErr := w.WriteMsg(resp); writeErr != nil { - log.Errorf("failed to write cached DNS response: %v", writeErr) - } - } else { // send NXDOMAIN / appropriate code if cache is empty - f.setResponseCodeForNotFound(ctx, resp, domain, qType) - if writeErr := w.WriteMsg(resp); writeErr != nil { - log.Errorf("failed to write failure DNS response: %v", writeErr) + logger.Errorf("failed to write cached DNS response: %v", writeErr) } + return + } + + // Cached negative result - re-verify NXDOMAIN vs NODATA + verifyResult := resutil.LookupIP(ctx, f.resolver, resutil.NetworkForQtype(qType), domain, qType) + if verifyResult.Rcode == dns.RcodeNameError || verifyResult.Rcode == dns.RcodeSuccess { + resp.Rcode = verifyResult.Rcode + if writeErr := w.WriteMsg(resp); writeErr != nil { + logger.Errorf("failed to write failure DNS response: %v", writeErr) + } + return } - return } - // No cache. Log with or without the server field for more context. - if dnsErr.Server != "" { - log.Warnf("failed to resolve: type=%s domain=%s server=%s: %v", qTypeName, domain, dnsErr.Server, err) + // No cache or verification failed. Log with or without the server field for more context. + var dnsErr *net.DNSError + if errors.As(result.Err, &dnsErr) && dnsErr.Server != "" { + logger.Warnf("failed to resolve: type=%s domain=%s server=%s: %v", qTypeName, domain, dnsErr.Server, result.Err) } else { - log.Warnf(errResolveFailed, domain, err) + logger.Warnf(errResolveFailed, domain, result.Err) } // Write final failure response. if writeErr := w.WriteMsg(resp); writeErr != nil { - log.Errorf("failed to write failure DNS response: %v", writeErr) - } -} - -// addIPsToResponse adds IP addresses to the DNS response as appropriate A or AAAA records -func (f *DNSForwarder) addIPsToResponse(resp *dns.Msg, domain string, ips []netip.Addr) { - for _, ip := range ips { - var respRecord dns.RR - if ip.Is6() { - log.Tracef("resolved domain=%s to IPv6=%s", domain, ip) - rr := dns.AAAA{ - AAAA: ip.AsSlice(), - Hdr: dns.RR_Header{ - Name: domain, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: f.ttl, - }, - } - respRecord = &rr - } else { - log.Tracef("resolved domain=%s to IPv4=%s", domain, ip) - rr := dns.A{ - A: ip.AsSlice(), - Hdr: dns.RR_Header{ - Name: domain, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: f.ttl, - }, - } - respRecord = &rr - } - resp.Answer = append(resp.Answer, respRecord) + logger.Errorf("failed to write failure DNS response: %v", writeErr) } } diff --git a/client/internal/dnsfwd/forwarder_test.go b/client/internal/dnsfwd/forwarder_test.go index 4d0b96a75..6416c2f21 100644 --- a/client/internal/dnsfwd/forwarder_test.go +++ b/client/internal/dnsfwd/forwarder_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/miekg/dns" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -317,7 +318,7 @@ func TestDNSForwarder_UnauthorizedDomainAccess(t *testing.T) { query.SetQuestion(dns.Fqdn(tt.queryDomain), dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(mockWriter, query) + resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) if tt.shouldResolve { require.NotNil(t, resp, "Expected response for authorized domain") @@ -465,7 +466,7 @@ func TestDNSForwarder_FirewallSetUpdates(t *testing.T) { dnsQuery.SetQuestion(dns.Fqdn(tt.query), dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(mockWriter, dnsQuery) + resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, dnsQuery) // Verify response if tt.shouldResolve { @@ -527,7 +528,7 @@ func TestDNSForwarder_MultipleIPsInSingleUpdate(t *testing.T) { query.SetQuestion("example.com.", dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(mockWriter, query) + resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) // Verify response contains all IPs require.NotNil(t, resp) @@ -604,7 +605,7 @@ func TestDNSForwarder_ResponseCodes(t *testing.T) { }, } - _ = forwarder.handleDNSQuery(mockWriter, query) + _ = forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) // Check the response written to the writer require.NotNil(t, writtenResp, "Expected response to be written") @@ -674,7 +675,7 @@ func TestDNSForwarder_ServeFromCacheOnUpstreamFailure(t *testing.T) { q1 := &dns.Msg{} q1.SetQuestion(dns.Fqdn("example.com"), dns.TypeA) w1 := &test.MockResponseWriter{} - resp1 := forwarder.handleDNSQuery(w1, q1) + resp1 := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w1, q1) require.NotNil(t, resp1) require.Equal(t, dns.RcodeSuccess, resp1.Rcode) require.Len(t, resp1.Answer, 1) @@ -684,7 +685,7 @@ func TestDNSForwarder_ServeFromCacheOnUpstreamFailure(t *testing.T) { q2.SetQuestion(dns.Fqdn("example.com"), dns.TypeA) var writtenResp *dns.Msg w2 := &test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { writtenResp = m; return nil }} - _ = forwarder.handleDNSQuery(w2, q2) + _ = forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w2, q2) require.NotNil(t, writtenResp, "expected response to be written") require.Equal(t, dns.RcodeSuccess, writtenResp.Rcode) @@ -714,7 +715,7 @@ func TestDNSForwarder_CacheNormalizationCasingAndDot(t *testing.T) { q1 := &dns.Msg{} q1.SetQuestion(mixedQuery+".", dns.TypeA) w1 := &test.MockResponseWriter{} - resp1 := forwarder.handleDNSQuery(w1, q1) + resp1 := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w1, q1) require.NotNil(t, resp1) require.Equal(t, dns.RcodeSuccess, resp1.Rcode) require.Len(t, resp1.Answer, 1) @@ -728,7 +729,7 @@ func TestDNSForwarder_CacheNormalizationCasingAndDot(t *testing.T) { q2.SetQuestion("EXAMPLE.COM", dns.TypeA) var writtenResp *dns.Msg w2 := &test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { writtenResp = m; return nil }} - _ = forwarder.handleDNSQuery(w2, q2) + _ = forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w2, q2) require.NotNil(t, writtenResp) require.Equal(t, dns.RcodeSuccess, writtenResp.Rcode) @@ -783,7 +784,7 @@ func TestDNSForwarder_MultipleOverlappingPatterns(t *testing.T) { query.SetQuestion("smtp.mail.example.com.", dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(mockWriter, query) + resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) require.NotNil(t, resp) assert.Equal(t, dns.RcodeSuccess, resp.Rcode) @@ -904,7 +905,7 @@ func TestDNSForwarder_NodataVsNxdomain(t *testing.T) { }, } - resp := forwarder.handleDNSQuery(mockWriter, query) + resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) // If a response was returned, it means it should be written (happens in wrapper functions) if resp != nil && writtenResp == nil { @@ -937,7 +938,7 @@ func TestDNSForwarder_EmptyQuery(t *testing.T) { return nil }, } - resp := forwarder.handleDNSQuery(mockWriter, query) + resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) assert.Nil(t, resp, "Should return nil for empty query") assert.False(t, writeCalled, "Should not write response for empty query") diff --git a/client/internal/routemanager/dnsinterceptor/handler.go b/client/internal/routemanager/dnsinterceptor/handler.go index 348338dac..928b85acb 100644 --- a/client/internal/routemanager/dnsinterceptor/handler.go +++ b/client/internal/routemanager/dnsinterceptor/handler.go @@ -19,6 +19,7 @@ import ( firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface/wgaddr" nbdns "github.com/netbirdio/netbird/client/internal/dns" + "github.com/netbirdio/netbird/client/internal/dns/resutil" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/peerstore" "github.com/netbirdio/netbird/client/internal/routemanager/common" @@ -219,14 +220,14 @@ func (d *DnsInterceptor) RemoveAllowedIPs() error { // ServeDNS implements the dns.Handler interface func (d *DnsInterceptor) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { - requestID := nbdns.GenerateRequestID() - logger := log.WithField("request_id", requestID) + logger := log.WithFields(log.Fields{ + "request_id": resutil.GetRequestID(w), + "dns_id": fmt.Sprintf("%04x", r.Id), + }) if len(r.Question) == 0 { return } - logger.Tracef("received DNS request for domain=%s type=%v class=%v", - r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) // pass if non A/AAAA query if r.Question[0].Qtype != dns.TypeA && r.Question[0].Qtype != dns.TypeAAAA { @@ -280,15 +281,10 @@ func (d *DnsInterceptor) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { return } - var answer []dns.RR - if reply != nil { - answer = reply.Answer - } - - logger.Tracef("upstream %s (%s) DNS response for domain=%s answers=%v", upstreamIP.String(), peerKey, r.Question[0].Name, answer) + resutil.SetMeta(w, "peer", peerKey) reply.Id = r.Id - if err := d.writeMsg(w, reply); err != nil { + if err := d.writeMsg(w, reply, logger); err != nil { logger.Errorf("failed writing DNS response: %v", err) } } @@ -324,7 +320,7 @@ func (d *DnsInterceptor) getUpstreamIP(peerKey string) (netip.Addr, error) { return peerAllowedIP, nil } -func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg) error { +func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) error { if r == nil { return fmt.Errorf("received nil DNS message") } @@ -350,14 +346,14 @@ func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg) error { case *dns.A: addr, ok := netip.AddrFromSlice(rr.A) if !ok { - log.Tracef("failed to convert A record for domain=%s ip=%v", resolvedDomain, rr.A) + logger.Tracef("failed to convert A record for domain=%s ip=%v", resolvedDomain, rr.A) continue } ip = addr case *dns.AAAA: addr, ok := netip.AddrFromSlice(rr.AAAA) if !ok { - log.Tracef("failed to convert AAAA record for domain=%s ip=%v", resolvedDomain, rr.AAAA) + logger.Tracef("failed to convert AAAA record for domain=%s ip=%v", resolvedDomain, rr.AAAA) continue } ip = addr @@ -370,11 +366,11 @@ func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg) error { } if len(newPrefixes) > 0 { - if err := d.updateDomainPrefixes(resolvedDomain, originalDomain, newPrefixes); err != nil { - log.Errorf("failed to update domain prefixes: %v", err) + if err := d.updateDomainPrefixes(resolvedDomain, originalDomain, newPrefixes, logger); err != nil { + logger.Errorf("failed to update domain prefixes: %v", err) } - d.replaceIPsInDNSResponse(r, newPrefixes) + d.replaceIPsInDNSResponse(r, newPrefixes, logger) } } @@ -386,22 +382,22 @@ func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg) error { } // logPrefixChanges handles the logging for prefix changes -func (d *DnsInterceptor) logPrefixChanges(resolvedDomain, originalDomain domain.Domain, toAdd, toRemove []netip.Prefix) { +func (d *DnsInterceptor) logPrefixChanges(resolvedDomain, originalDomain domain.Domain, toAdd, toRemove []netip.Prefix, logger *log.Entry) { if len(toAdd) > 0 { - log.Debugf("added dynamic route(s) for domain=%s (pattern: domain=%s): %s", + logger.Debugf("added dynamic route(s) for domain=%s (pattern: domain=%s): %s", resolvedDomain.SafeString(), originalDomain.SafeString(), toAdd) } if len(toRemove) > 0 && !d.route.KeepRoute { - log.Debugf("removed dynamic route(s) for domain=%s (pattern: domain=%s): %s", + logger.Debugf("removed dynamic route(s) for domain=%s (pattern: domain=%s): %s", resolvedDomain.SafeString(), originalDomain.SafeString(), toRemove) } } -func (d *DnsInterceptor) updateDomainPrefixes(resolvedDomain, originalDomain domain.Domain, newPrefixes []netip.Prefix) error { +func (d *DnsInterceptor) updateDomainPrefixes(resolvedDomain, originalDomain domain.Domain, newPrefixes []netip.Prefix, logger *log.Entry) error { d.mu.Lock() defer d.mu.Unlock() @@ -418,9 +414,9 @@ func (d *DnsInterceptor) updateDomainPrefixes(resolvedDomain, originalDomain dom realIP := prefix.Addr() if fakeIP, err := d.fakeIPManager.AllocateFakeIP(realIP); err == nil { dnatMappings[fakeIP] = realIP - log.Tracef("allocated fake IP %s for real IP %s", fakeIP, realIP) + logger.Tracef("allocated fake IP %s for real IP %s", fakeIP, realIP) } else { - log.Errorf("Failed to allocate fake IP for %s: %v", realIP, err) + logger.Errorf("failed to allocate fake IP for %s: %v", realIP, err) } } } @@ -432,7 +428,7 @@ func (d *DnsInterceptor) updateDomainPrefixes(resolvedDomain, originalDomain dom } } - d.addDNATMappings(dnatMappings) + d.addDNATMappings(dnatMappings, logger) if !d.route.KeepRoute { // Remove old prefixes @@ -448,7 +444,7 @@ func (d *DnsInterceptor) updateDomainPrefixes(resolvedDomain, originalDomain dom } } - d.removeDNATMappings(toRemove) + d.removeDNATMappings(toRemove, logger) } // Update domain prefixes using resolved domain as key - store real IPs @@ -463,14 +459,14 @@ func (d *DnsInterceptor) updateDomainPrefixes(resolvedDomain, originalDomain dom // Store real IPs for status (user-facing), not fake IPs d.statusRecorder.UpdateResolvedDomainsStates(originalDomain, resolvedDomain, newPrefixes, d.route.GetResourceID()) - d.logPrefixChanges(resolvedDomain, originalDomain, toAdd, toRemove) + d.logPrefixChanges(resolvedDomain, originalDomain, toAdd, toRemove, logger) } return nberrors.FormatErrorOrNil(merr) } // removeDNATMappings removes DNAT mappings from the firewall for real IP prefixes -func (d *DnsInterceptor) removeDNATMappings(realPrefixes []netip.Prefix) { +func (d *DnsInterceptor) removeDNATMappings(realPrefixes []netip.Prefix, logger *log.Entry) { if len(realPrefixes) == 0 { return } @@ -484,9 +480,9 @@ func (d *DnsInterceptor) removeDNATMappings(realPrefixes []netip.Prefix) { realIP := prefix.Addr() if fakeIP, exists := d.fakeIPManager.GetFakeIP(realIP); exists { if err := dnatFirewall.RemoveInternalDNATMapping(fakeIP); err != nil { - log.Errorf("Failed to remove DNAT mapping for %s: %v", fakeIP, err) + logger.Errorf("failed to remove DNAT mapping for %s: %v", fakeIP, err) } else { - log.Debugf("Removed DNAT mapping for: %s -> %s", fakeIP, realIP) + logger.Debugf("removed DNAT mapping: %s -> %s", fakeIP, realIP) } } } @@ -502,7 +498,7 @@ func (d *DnsInterceptor) internalDnatFw() (internalDNATer, bool) { } // addDNATMappings adds DNAT mappings to the firewall -func (d *DnsInterceptor) addDNATMappings(mappings map[netip.Addr]netip.Addr) { +func (d *DnsInterceptor) addDNATMappings(mappings map[netip.Addr]netip.Addr, logger *log.Entry) { if len(mappings) == 0 { return } @@ -514,9 +510,9 @@ func (d *DnsInterceptor) addDNATMappings(mappings map[netip.Addr]netip.Addr) { for fakeIP, realIP := range mappings { if err := dnatFirewall.AddInternalDNATMapping(fakeIP, realIP); err != nil { - log.Errorf("Failed to add DNAT mapping %s -> %s: %v", fakeIP, realIP, err) + logger.Errorf("failed to add DNAT mapping %s -> %s: %v", fakeIP, realIP, err) } else { - log.Debugf("Added DNAT mapping: %s -> %s", fakeIP, realIP) + logger.Debugf("added DNAT mapping: %s -> %s", fakeIP, realIP) } } } @@ -528,12 +524,12 @@ func (d *DnsInterceptor) cleanupDNATMappings() { } for _, prefixes := range d.interceptedDomains { - d.removeDNATMappings(prefixes) + d.removeDNATMappings(prefixes, log.NewEntry(log.StandardLogger())) } } // replaceIPsInDNSResponse replaces real IPs with fake IPs in the DNS response -func (d *DnsInterceptor) replaceIPsInDNSResponse(reply *dns.Msg, realPrefixes []netip.Prefix) { +func (d *DnsInterceptor) replaceIPsInDNSResponse(reply *dns.Msg, realPrefixes []netip.Prefix, logger *log.Entry) { if _, ok := d.internalDnatFw(); !ok { return } @@ -549,7 +545,7 @@ func (d *DnsInterceptor) replaceIPsInDNSResponse(reply *dns.Msg, realPrefixes [] if fakeIP, exists := d.fakeIPManager.GetFakeIP(realIP); exists { rr.A = fakeIP.AsSlice() - log.Tracef("Replaced real IP %s with fake IP %s in DNS response", realIP, fakeIP) + logger.Tracef("replaced real IP %s with fake IP %s in DNS response", realIP, fakeIP) } case *dns.AAAA: @@ -560,7 +556,7 @@ func (d *DnsInterceptor) replaceIPsInDNSResponse(reply *dns.Msg, realPrefixes [] if fakeIP, exists := d.fakeIPManager.GetFakeIP(realIP); exists { rr.AAAA = fakeIP.AsSlice() - log.Tracef("Replaced real IP %s with fake IP %s in DNS response", realIP, fakeIP) + logger.Tracef("replaced real IP %s with fake IP %s in DNS response", realIP, fakeIP) } } } From b12c084a50cf274aadc00a9b41b2133a1053bdf0 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 12 Jan 2026 20:56:39 +0800 Subject: [PATCH 040/374] [client] Fall through dns chain for custom dns zones (#5081) --- client/internal/dns.go | 2 +- client/internal/dns/local/local.go | 81 +++- client/internal/dns/local/local_test.go | 406 ++++++++++++++---- client/internal/dns/server.go | 19 +- client/internal/dns/server_test.go | 25 +- client/internal/dns/upstream.go | 4 + client/internal/engine.go | 9 +- .../routemanager/dnsinterceptor/handler.go | 4 + dns/dns.go | 4 +- .../internals/shared/grpc/conversion.go | 5 +- shared/management/proto/management.pb.go | 6 +- shared/management/proto/management.proto | 4 +- 12 files changed, 437 insertions(+), 132 deletions(-) diff --git a/client/internal/dns.go b/client/internal/dns.go index 3c68e4d00..f5040ee49 100644 --- a/client/internal/dns.go +++ b/client/internal/dns.go @@ -76,7 +76,7 @@ func collectPTRRecords(config *nbdns.Config, prefix netip.Prefix) []nbdns.Simple var records []nbdns.SimpleRecord for _, zone := range config.CustomZones { - if zone.SkipPTRProcess { + if zone.NonAuthoritative { continue } for _, record := range zone.Records { diff --git a/client/internal/dns/local/local.go b/client/internal/dns/local/local.go index cb1fa5293..63c2428ce 100644 --- a/client/internal/dns/local/local.go +++ b/client/internal/dns/local/local.go @@ -28,10 +28,11 @@ type resolver interface { } type Resolver struct { - mu sync.RWMutex - records map[dns.Question][]dns.RR - domains map[domain.Domain]struct{} - zones []domain.Domain + mu sync.RWMutex + records map[dns.Question][]dns.RR + domains map[domain.Domain]struct{} + // zones maps zone domain -> NonAuthoritative (true = non-authoritative, user-created zone) + zones map[domain.Domain]bool resolver resolver ctx context.Context @@ -43,6 +44,7 @@ func NewResolver() *Resolver { return &Resolver{ records: make(map[dns.Question][]dns.RR), domains: make(map[domain.Domain]struct{}), + zones: make(map[domain.Domain]bool), ctx: ctx, cancel: cancel, } @@ -67,7 +69,7 @@ func (d *Resolver) Stop() { maps.Clear(d.records) maps.Clear(d.domains) - d.zones = nil + maps.Clear(d.zones) } // ID returns the unique handler ID @@ -97,6 +99,11 @@ func (d *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { replyMessage.Answer = result.records replyMessage.Rcode = d.determineRcode(question, result) + if replyMessage.Rcode == dns.RcodeNameError && d.shouldFallthrough(question.Name) { + d.continueToNext(logger, w, r) + return + } + if err := w.WriteMsg(replyMessage); err != nil { logger.Warnf("failed to write the local resolver response: %v", err) } @@ -120,6 +127,42 @@ func (d *Resolver) determineRcode(question dns.Question, result lookupResult) in return dns.RcodeNameError } +// findZone finds the matching zone for a query name using reverse suffix lookup. +// Returns (nonAuthoritative, found). This is O(k) where k = number of labels in qname. +func (d *Resolver) findZone(qname string) (nonAuthoritative bool, found bool) { + qname = strings.ToLower(dns.Fqdn(qname)) + for { + if nonAuth, ok := d.zones[domain.Domain(qname)]; ok { + return nonAuth, true + } + // Move to parent domain + idx := strings.Index(qname, ".") + if idx == -1 || idx == len(qname)-1 { + return false, false + } + qname = qname[idx+1:] + } +} + +// shouldFallthrough checks if the query should fallthrough to the next handler. +// Returns true if the queried name belongs to a non-authoritative zone. +func (d *Resolver) shouldFallthrough(qname string) bool { + d.mu.RLock() + defer d.mu.RUnlock() + + nonAuth, found := d.findZone(qname) + return found && nonAuth +} + +func (d *Resolver) continueToNext(logger *log.Entry, w dns.ResponseWriter, r *dns.Msg) { + resp := &dns.Msg{} + resp.SetRcode(r, dns.RcodeNameError) + resp.MsgHdr.Zero = true + if err := w.WriteMsg(resp); err != nil { + logger.Warnf("failed to write continue signal: %v", err) + } +} + // hasRecordsForDomain checks if any records exist for the given domain name regardless of type func (d *Resolver) hasRecordsForDomain(domainName domain.Domain) bool { d.mu.RLock() @@ -137,14 +180,8 @@ func (d *Resolver) isInManagedZone(name string) bool { d.mu.RLock() defer d.mu.RUnlock() - name = dns.Fqdn(name) - for _, zone := range d.zones { - zoneStr := dns.Fqdn(zone.PunycodeString()) - if strings.EqualFold(name, zoneStr) || strings.HasSuffix(strings.ToLower(name), strings.ToLower("."+zoneStr)) { - return true - } - } - return false + _, found := d.findZone(name) + return found } // lookupResult contains the result of a DNS lookup operation. @@ -343,21 +380,23 @@ func (d *Resolver) logDNSError(logger *log.Entry, hostname string, qtype uint16, } } -// Update updates the resolver with new records and zone information. -// The zones parameter specifies which DNS zones this resolver manages. -func (d *Resolver) Update(update []nbdns.SimpleRecord, zones []domain.Domain) { +// Update replaces all zones and their records +func (d *Resolver) Update(customZones []nbdns.CustomZone) { d.mu.Lock() defer d.mu.Unlock() maps.Clear(d.records) maps.Clear(d.domains) + maps.Clear(d.zones) - d.zones = zones + for _, zone := range customZones { + zoneDomain := domain.Domain(strings.ToLower(dns.Fqdn(zone.Domain))) + d.zones[zoneDomain] = zone.NonAuthoritative - for _, rec := range update { - if err := d.registerRecord(rec); err != nil { - log.Warnf("failed to register the record (%s): %v", rec, err) - continue + for _, rec := range zone.Records { + if err := d.registerRecord(rec); err != nil { + log.Warnf("failed to register the record (%s): %v", rec, err) + } } } } diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go index 2f8e08b1a..1c7cad5d1 100644 --- a/client/internal/dns/local/local_test.go +++ b/client/internal/dns/local/local_test.go @@ -16,7 +16,6 @@ import ( "github.com/netbirdio/netbird/client/internal/dns/test" nbdns "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/shared/management/domain" ) // mockResolver implements resolver for testing @@ -125,11 +124,11 @@ func TestLocalResolver_Update_StaleRecord(t *testing.T) { resolver := NewResolver() - update1 := []nbdns.SimpleRecord{record1} - update2 := []nbdns.SimpleRecord{record2} + zone1 := []nbdns.CustomZone{{Domain: "example.com.", Records: []nbdns.SimpleRecord{record1}}} + zone2 := []nbdns.CustomZone{{Domain: "example.com.", Records: []nbdns.SimpleRecord{record2}}} // Apply first update - resolver.Update(update1, nil) + resolver.Update(zone1) // Verify first update resolver.mu.RLock() @@ -141,7 +140,7 @@ func TestLocalResolver_Update_StaleRecord(t *testing.T) { assert.Contains(t, rrSlice1[0].String(), record1.RData, "Record after first update should be %s", record1.RData) // Apply second update - resolver.Update(update2, nil) + resolver.Update(zone2) // Verify second update resolver.mu.RLock() @@ -170,10 +169,10 @@ func TestLocalResolver_MultipleRecords_SameQuestion(t *testing.T) { Name: recordName, Type: int(recordType), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.2", } - update := []nbdns.SimpleRecord{record1, record2} + zones := []nbdns.CustomZone{{Domain: "example.com.", Records: []nbdns.SimpleRecord{record1, record2}}} // Apply update with both records - resolver.Update(update, nil) + resolver.Update(zones) // Create question that matches both records question := dns.Question{ @@ -214,10 +213,10 @@ func TestLocalResolver_RecordRotation(t *testing.T) { Name: recordName, Type: int(recordType), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.3", } - update := []nbdns.SimpleRecord{record1, record2, record3} + zones := []nbdns.CustomZone{{Domain: "example.com.", Records: []nbdns.SimpleRecord{record1, record2, record3}}} // Apply update with all three records - resolver.Update(update, nil) + resolver.Update(zones) msg := new(dns.Msg).SetQuestion(recordName, recordType) @@ -283,7 +282,7 @@ func TestLocalResolver_CaseInsensitiveMatching(t *testing.T) { } // Update resolver with the records - resolver.Update([]nbdns.SimpleRecord{lowerCaseRecord, mixedCaseRecord}, nil) + resolver.Update([]nbdns.CustomZone{{Domain: "example.com.", Records: []nbdns.SimpleRecord{lowerCaseRecord, mixedCaseRecord}}}) testCases := []struct { name string @@ -398,7 +397,7 @@ func TestLocalResolver_CNAMEFallback(t *testing.T) { } // Update resolver with both records - resolver.Update([]nbdns.SimpleRecord{cnameRecord, targetRecord}, nil) + resolver.Update([]nbdns.CustomZone{{Domain: "example.com.", Records: []nbdns.SimpleRecord{cnameRecord, targetRecord}}}) testCases := []struct { name string @@ -526,7 +525,7 @@ func TestLocalResolver_NoErrorWithDifferentRecordType(t *testing.T) { RData: "target.example.com.", } - resolver.Update([]nbdns.SimpleRecord{recordA, recordCNAME}, nil) + resolver.Update([]nbdns.CustomZone{{Domain: "netbird.cloud.", Records: []nbdns.SimpleRecord{recordA, recordCNAME}}}) testCases := []struct { name string @@ -620,10 +619,13 @@ func TestLocalResolver_NoErrorWithDifferentRecordType(t *testing.T) { func TestLocalResolver_CNAMEChainResolution(t *testing.T) { t.Run("simple internal CNAME chain", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, - {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.1"}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.1"}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeA) var resp *dns.Msg @@ -644,11 +646,14 @@ func TestLocalResolver_CNAMEChainResolution(t *testing.T) { t.Run("multi-hop CNAME chain", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "hop1.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop2.test."}, - {Name: "hop2.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop3.test."}, - {Name: "hop3.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "hop1.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop2.test."}, + {Name: "hop2.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop3.test."}, + {Name: "hop3.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) msg := new(dns.Msg).SetQuestion("hop1.test.", dns.TypeA) var resp *dns.Msg @@ -661,9 +666,12 @@ func TestLocalResolver_CNAMEChainResolution(t *testing.T) { t.Run("CNAME to non-existent internal target returns only CNAME", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.test."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.test."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) var resp *dns.Msg @@ -695,7 +703,7 @@ func TestLocalResolver_CNAMEMaxDepth(t *testing.T) { Name: "hop8.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.10.10.10", }) - resolver.Update(records, nil) + resolver.Update([]nbdns.CustomZone{{Domain: "test.", Records: records}}) msg := new(dns.Msg).SetQuestion("hop1.test.", dns.TypeA) var resp *dns.Msg @@ -723,7 +731,7 @@ func TestLocalResolver_CNAMEMaxDepth(t *testing.T) { Name: "deep11.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.10.10.10", }) - resolver.Update(records, nil) + resolver.Update([]nbdns.CustomZone{{Domain: "test.", Records: records}}) msg := new(dns.Msg).SetQuestion("deep1.test.", dns.TypeA) var resp *dns.Msg @@ -736,10 +744,13 @@ func TestLocalResolver_CNAMEMaxDepth(t *testing.T) { t.Run("circular CNAME is protected by max depth", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "loop1.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "loop2.test."}, - {Name: "loop2.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "loop1.test."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "loop1.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "loop2.test."}, + {Name: "loop2.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "loop1.test."}, + }, + }}) msg := new(dns.Msg).SetQuestion("loop1.test.", dns.TypeA) var resp *dns.Msg @@ -763,9 +774,12 @@ func TestLocalResolver_ExternalCNAMEResolution(t *testing.T) { }, } - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) var resp *dns.Msg @@ -794,9 +808,12 @@ func TestLocalResolver_ExternalCNAMEResolution(t *testing.T) { }, } - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeAAAA) var resp *dns.Msg @@ -825,9 +842,12 @@ func TestLocalResolver_ExternalCNAMEResolution(t *testing.T) { }, } - resolver.Update([]nbdns.SimpleRecord{ - {Name: "concurrent.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "concurrent.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, + }}) var wg sync.WaitGroup results := make([]*dns.Msg, 10) @@ -856,10 +876,12 @@ func TestLocalResolver_ZoneManagement(t *testing.T) { t.Run("Update sets zones correctly", func(t *testing.T) { resolver := NewResolver() - zones := []domain.Domain{"example.com", "test.local"} - resolver.Update([]nbdns.SimpleRecord{ - {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, - }, zones) + resolver.Update([]nbdns.CustomZone{ + {Domain: "example.com.", Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }}, + {Domain: "test.local."}, + }) assert.True(t, resolver.isInManagedZone("host.example.com.")) assert.True(t, resolver.isInManagedZone("other.example.com.")) @@ -869,7 +891,7 @@ func TestLocalResolver_ZoneManagement(t *testing.T) { t.Run("isInManagedZone case insensitive", func(t *testing.T) { resolver := NewResolver() - resolver.Update(nil, []domain.Domain{"Example.COM"}) + resolver.Update([]nbdns.CustomZone{{Domain: "Example.COM."}}) assert.True(t, resolver.isInManagedZone("host.example.com.")) assert.True(t, resolver.isInManagedZone("HOST.EXAMPLE.COM.")) @@ -877,10 +899,10 @@ func TestLocalResolver_ZoneManagement(t *testing.T) { t.Run("Update clears zones", func(t *testing.T) { resolver := NewResolver() - resolver.Update(nil, []domain.Domain{"example.com"}) + resolver.Update([]nbdns.CustomZone{{Domain: "example.com."}}) assert.True(t, resolver.isInManagedZone("host.example.com.")) - resolver.Update(nil, nil) + resolver.Update(nil) assert.False(t, resolver.isInManagedZone("host.example.com.")) }) } @@ -889,9 +911,12 @@ func TestLocalResolver_ZoneManagement(t *testing.T) { func TestLocalResolver_CNAMEZoneAwareResolution(t *testing.T) { t.Run("CNAME target in managed zone returns NXDOMAIN per RFC 6604", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.myzone.test."}, - }, []domain.Domain{"myzone.test"}) + resolver.Update([]nbdns.CustomZone{{ + Domain: "myzone.test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.myzone.test."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.myzone.test.", dns.TypeA) var resp *dns.Msg @@ -913,9 +938,12 @@ func TestLocalResolver_CNAMEZoneAwareResolution(t *testing.T) { }, } - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.other.com."}, - }, []domain.Domain{"myzone.test"}) + resolver.Update([]nbdns.CustomZone{{ + Domain: "myzone.test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.other.com."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.myzone.test.", dns.TypeA) var resp *dns.Msg @@ -929,10 +957,13 @@ func TestLocalResolver_CNAMEZoneAwareResolution(t *testing.T) { t.Run("CNAME target exists with different type returns NODATA not NXDOMAIN", func(t *testing.T) { resolver := NewResolver() // CNAME points to target that has A but no AAAA - query for AAAA should be NODATA - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.myzone.test."}, - {Name: "target.myzone.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "1.1.1.1"}, - }, []domain.Domain{"myzone.test"}) + resolver.Update([]nbdns.CustomZone{{ + Domain: "myzone.test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.myzone.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.myzone.test."}, + {Name: "target.myzone.test.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "1.1.1.1"}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.myzone.test.", dns.TypeAAAA) var resp *dns.Msg @@ -963,9 +994,12 @@ func TestLocalResolver_CNAMEZoneAwareResolution(t *testing.T) { }, } - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeAAAA) var resp *dns.Msg @@ -1035,9 +1069,12 @@ func TestLocalResolver_CNAMEZoneAwareResolution(t *testing.T) { resolver := NewResolver() resolver.resolver = &mockResolver{lookupFunc: tc.lookupFunc} - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) var resp *dns.Msg @@ -1054,13 +1091,112 @@ func TestLocalResolver_CNAMEZoneAwareResolution(t *testing.T) { } } +// TestLocalResolver_Fallthrough verifies that non-authoritative zones +// trigger fallthrough (Zero bit set) when no records match +func TestLocalResolver_Fallthrough(t *testing.T) { + resolver := NewResolver() + + record := nbdns.SimpleRecord{ + Name: "existing.custom.zone.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "10.0.0.1", + } + + testCases := []struct { + name string + zones []nbdns.CustomZone + queryName string + expectFallthrough bool + expectRecord bool + }{ + { + name: "Authoritative zone returns NXDOMAIN without fallthrough", + zones: []nbdns.CustomZone{{ + Domain: "custom.zone.", + Records: []nbdns.SimpleRecord{record}, + }}, + queryName: "nonexistent.custom.zone.", + expectFallthrough: false, + expectRecord: false, + }, + { + name: "Non-authoritative zone triggers fallthrough", + zones: []nbdns.CustomZone{{ + Domain: "custom.zone.", + Records: []nbdns.SimpleRecord{record}, + NonAuthoritative: true, + }}, + queryName: "nonexistent.custom.zone.", + expectFallthrough: true, + expectRecord: false, + }, + { + name: "Record found in non-authoritative zone returns normally", + zones: []nbdns.CustomZone{{ + Domain: "custom.zone.", + Records: []nbdns.SimpleRecord{record}, + NonAuthoritative: true, + }}, + queryName: "existing.custom.zone.", + expectFallthrough: false, + expectRecord: true, + }, + { + name: "Record found in authoritative zone returns normally", + zones: []nbdns.CustomZone{{ + Domain: "custom.zone.", + Records: []nbdns.SimpleRecord{record}, + }}, + queryName: "existing.custom.zone.", + expectFallthrough: false, + expectRecord: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + resolver.Update(tc.zones) + + var responseMSG *dns.Msg + responseWriter := &test.MockResponseWriter{ + WriteMsgFunc: func(m *dns.Msg) error { + responseMSG = m + return nil + }, + } + + msg := new(dns.Msg).SetQuestion(tc.queryName, dns.TypeA) + resolver.ServeDNS(responseWriter, msg) + + require.NotNil(t, responseMSG, "Should have received a response") + + if tc.expectFallthrough { + assert.True(t, responseMSG.MsgHdr.Zero, "Zero bit should be set for fallthrough") + assert.Equal(t, dns.RcodeNameError, responseMSG.Rcode, "Should return NXDOMAIN") + } else { + assert.False(t, responseMSG.MsgHdr.Zero, "Zero bit should not be set") + } + + if tc.expectRecord { + assert.Greater(t, len(responseMSG.Answer), 0, "Should have answer records") + assert.Equal(t, dns.RcodeSuccess, responseMSG.Rcode) + } + }) + } +} + // TestLocalResolver_AuthoritativeFlag tests the AA flag behavior func TestLocalResolver_AuthoritativeFlag(t *testing.T) { t.Run("direct record lookup is authoritative", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, - }, []domain.Domain{"example.com"}) + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) msg := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) var resp *dns.Msg @@ -1081,9 +1217,12 @@ func TestLocalResolver_AuthoritativeFlag(t *testing.T) { }, } - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, + }}) msg := new(dns.Msg).SetQuestion("alias.test.", dns.TypeA) var resp *dns.Msg @@ -1099,9 +1238,12 @@ func TestLocalResolver_AuthoritativeFlag(t *testing.T) { func TestLocalResolver_Stop(t *testing.T) { t.Run("Stop clears all state", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, - }, []domain.Domain{"example.com"}) + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) resolver.Stop() @@ -1116,9 +1258,12 @@ func TestLocalResolver_Stop(t *testing.T) { t.Run("Stop is safe to call multiple times", func(t *testing.T) { resolver := NewResolver() - resolver.Update([]nbdns.SimpleRecord{ - {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, - }, []domain.Domain{"example.com"}) + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) resolver.Stop() resolver.Stop() @@ -1140,9 +1285,12 @@ func TestLocalResolver_Stop(t *testing.T) { }, } - resolver.Update([]nbdns.SimpleRecord{ - {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, - }, nil) + resolver.Update([]nbdns.CustomZone{{ + Domain: "test.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.test.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "external.example.com."}, + }, + }}) done := make(chan struct{}) go func() { @@ -1167,3 +1315,107 @@ func TestLocalResolver_Stop(t *testing.T) { } }) } + +// TestLocalResolver_FallthroughCaseInsensitive verifies case-insensitive domain matching for fallthrough +func TestLocalResolver_FallthroughCaseInsensitive(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "EXAMPLE.COM.", + Records: []nbdns.SimpleRecord{{Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "1.2.3.4"}}, + NonAuthoritative: true, + }}) + + var responseMSG *dns.Msg + responseWriter := &test.MockResponseWriter{ + WriteMsgFunc: func(m *dns.Msg) error { + responseMSG = m + return nil + }, + } + + msg := new(dns.Msg).SetQuestion("nonexistent.example.com.", dns.TypeA) + resolver.ServeDNS(responseWriter, msg) + + require.NotNil(t, responseMSG) + assert.True(t, responseMSG.MsgHdr.Zero, "Should fallthrough for non-authoritative zone with case-insensitive match") +} + +// BenchmarkFindZone_BestCase benchmarks zone lookup with immediate match (first label) +func BenchmarkFindZone_BestCase(b *testing.B) { + resolver := NewResolver() + + // Single zone that matches immediately + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + NonAuthoritative: true, + }}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + resolver.shouldFallthrough("example.com.") + } +} + +// BenchmarkFindZone_WorstCase benchmarks zone lookup with many zones, no match, many labels +func BenchmarkFindZone_WorstCase(b *testing.B) { + resolver := NewResolver() + + // 100 zones that won't match + var zones []nbdns.CustomZone + for i := 0; i < 100; i++ { + zones = append(zones, nbdns.CustomZone{ + Domain: fmt.Sprintf("zone%d.internal.", i), + NonAuthoritative: true, + }) + } + resolver.Update(zones) + + // Query with many labels that won't match any zone + qname := "a.b.c.d.e.f.g.h.external.com." + + b.ResetTimer() + for i := 0; i < b.N; i++ { + resolver.shouldFallthrough(qname) + } +} + +// BenchmarkFindZone_TypicalCase benchmarks typical usage: few zones, subdomain match +func BenchmarkFindZone_TypicalCase(b *testing.B) { + resolver := NewResolver() + + // Typical setup: peer zone (authoritative) + one user zone (non-authoritative) + resolver.Update([]nbdns.CustomZone{ + {Domain: "netbird.cloud.", NonAuthoritative: false}, + {Domain: "custom.local.", NonAuthoritative: true}, + }) + + // Query for subdomain of user zone + qname := "myhost.custom.local." + + b.ResetTimer() + for i := 0; i < b.N; i++ { + resolver.shouldFallthrough(qname) + } +} + +// BenchmarkIsInManagedZone_ManyZones benchmarks isInManagedZone with 100 zones +func BenchmarkIsInManagedZone_ManyZones(b *testing.B) { + resolver := NewResolver() + + var zones []nbdns.CustomZone + for i := 0; i < 100; i++ { + zones = append(zones, nbdns.CustomZone{ + Domain: fmt.Sprintf("zone%d.internal.", i), + }) + } + resolver.Update(zones) + + // Query that matches zone50 + qname := "host.zone50.internal." + + b.ResetTimer() + for i := 0; i < b.N; i++ { + resolver.isInManagedZone(qname) + } +} diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 0a56b92a1..29bb7f3dc 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -485,7 +485,7 @@ func (s *DefaultServer) applyConfiguration(update nbdns.Config) error { } } - localMuxUpdates, localRecords, localZones, err := s.buildLocalHandlerUpdate(update.CustomZones) + localMuxUpdates, localZones, err := s.buildLocalHandlerUpdate(update.CustomZones) if err != nil { return fmt.Errorf("local handler updater: %w", err) } @@ -498,8 +498,7 @@ func (s *DefaultServer) applyConfiguration(update nbdns.Config) error { s.updateMux(muxUpdates) - // register local records - s.localResolver.Update(localRecords, localZones) + s.localResolver.Update(localZones) s.currentConfig = dnsConfigToHostDNSConfig(update, s.service.RuntimeIP(), s.service.RuntimePort()) @@ -659,10 +658,9 @@ func (s *DefaultServer) registerFallback(config HostDNSConfig) { s.registerHandler([]string{nbdns.RootZone}, handler, PriorityFallback) } -func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) ([]handlerWrapper, []nbdns.SimpleRecord, []domain.Domain, error) { +func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) ([]handlerWrapper, []nbdns.CustomZone, error) { var muxUpdates []handlerWrapper - var localRecords []nbdns.SimpleRecord - var zones []domain.Domain + var zones []nbdns.CustomZone for _, customZone := range customZones { if len(customZone.Records) == 0 { @@ -676,19 +674,20 @@ func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) priority: PriorityLocal, }) - zones = append(zones, domain.Domain(customZone.Domain)) - + // zone records contain the fqdn, so we can just flatten them + var localRecords []nbdns.SimpleRecord for _, record := range customZone.Records { if record.Class != nbdns.DefaultClass { log.Warnf("received an invalid class type: %s", record.Class) continue } - // zone records contain the fqdn, so we can just flatten them localRecords = append(localRecords, record) } + customZone.Records = localRecords + zones = append(zones, customZone) } - return muxUpdates, localRecords, zones, nil + return muxUpdates, zones, nil } func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.NameServerGroup) ([]handlerWrapper, error) { diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index 2b5b460b4..200a5f496 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -128,7 +128,7 @@ func TestUpdateDNSServer(t *testing.T) { testCases := []struct { name string initUpstreamMap registeredHandlerMap - initLocalRecords []nbdns.SimpleRecord + initLocalZones []nbdns.CustomZone initSerial uint64 inputSerial uint64 inputUpdate nbdns.Config @@ -181,7 +181,7 @@ func TestUpdateDNSServer(t *testing.T) { }, { name: "New Config Should Succeed", - initLocalRecords: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: 1, Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}, + initLocalZones: []nbdns.CustomZone{{Domain: "netbird.cloud", Records: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: 1, Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}}}, initUpstreamMap: registeredHandlerMap{ generateDummyHandler(zoneRecords[0].Name, nameServers).ID(): handlerWrapper{ domain: "netbird.cloud", @@ -222,7 +222,7 @@ func TestUpdateDNSServer(t *testing.T) { }, { name: "Smaller Config Serial Should Be Skipped", - initLocalRecords: []nbdns.SimpleRecord{}, + initLocalZones: []nbdns.CustomZone{}, initUpstreamMap: make(registeredHandlerMap), initSerial: 2, inputSerial: 1, @@ -230,7 +230,7 @@ func TestUpdateDNSServer(t *testing.T) { }, { name: "Empty NS Group Domain Or Not Primary Element Should Fail", - initLocalRecords: []nbdns.SimpleRecord{}, + initLocalZones: []nbdns.CustomZone{}, initUpstreamMap: make(registeredHandlerMap), initSerial: 0, inputSerial: 1, @@ -252,7 +252,7 @@ func TestUpdateDNSServer(t *testing.T) { }, { name: "Invalid NS Group Nameservers list Should Fail", - initLocalRecords: []nbdns.SimpleRecord{}, + initLocalZones: []nbdns.CustomZone{}, initUpstreamMap: make(registeredHandlerMap), initSerial: 0, inputSerial: 1, @@ -274,7 +274,7 @@ func TestUpdateDNSServer(t *testing.T) { }, { name: "Invalid Custom Zone Records list Should Skip", - initLocalRecords: []nbdns.SimpleRecord{}, + initLocalZones: []nbdns.CustomZone{}, initUpstreamMap: make(registeredHandlerMap), initSerial: 0, inputSerial: 1, @@ -300,7 +300,7 @@ func TestUpdateDNSServer(t *testing.T) { }, { name: "Empty Config Should Succeed and Clean Maps", - initLocalRecords: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}, + initLocalZones: []nbdns.CustomZone{{Domain: "netbird.cloud", Records: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}}}, initUpstreamMap: registeredHandlerMap{ generateDummyHandler(zoneRecords[0].Name, nameServers).ID(): handlerWrapper{ domain: zoneRecords[0].Name, @@ -316,7 +316,7 @@ func TestUpdateDNSServer(t *testing.T) { }, { name: "Disabled Service Should clean map", - initLocalRecords: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}, + initLocalZones: []nbdns.CustomZone{{Domain: "netbird.cloud", Records: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}}}, initUpstreamMap: registeredHandlerMap{ generateDummyHandler(zoneRecords[0].Name, nameServers).ID(): handlerWrapper{ domain: zoneRecords[0].Name, @@ -385,7 +385,7 @@ func TestUpdateDNSServer(t *testing.T) { }() dnsServer.dnsMuxMap = testCase.initUpstreamMap - dnsServer.localResolver.Update(testCase.initLocalRecords, nil) + dnsServer.localResolver.Update(testCase.initLocalZones) dnsServer.updateSerial = testCase.initSerial err = dnsServer.UpdateDNSServer(testCase.inputSerial, testCase.inputUpdate) @@ -510,8 +510,7 @@ func TestDNSFakeResolverHandleUpdates(t *testing.T) { priority: PriorityUpstream, }, } - //dnsServer.localResolver.RegisteredMap = local.RegistrationMap{local.BuildRecordKey("netbird.cloud", dns.ClassINET, dns.TypeA): struct{}{}} - dnsServer.localResolver.Update([]nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}, nil) + dnsServer.localResolver.Update([]nbdns.CustomZone{{Domain: "netbird.cloud", Records: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}}}) dnsServer.updateSerial = 0 nameServers := []nbdns.NameServer{ @@ -2013,7 +2012,7 @@ func TestLocalResolverPriorityInServer(t *testing.T) { }, } - localMuxUpdates, _, _, err := server.buildLocalHandlerUpdate(config.CustomZones) + localMuxUpdates, _, err := server.buildLocalHandlerUpdate(config.CustomZones) assert.NoError(t, err) upstreamMuxUpdates, err := server.buildUpstreamHandlerUpdate(config.NameServerGroups) @@ -2074,7 +2073,7 @@ func TestLocalResolverPriorityConstants(t *testing.T) { }, } - localMuxUpdates, _, _, err := server.buildLocalHandlerUpdate(config.CustomZones) + localMuxUpdates, _, err := server.buildLocalHandlerUpdate(config.CustomZones) assert.NoError(t, err) assert.Len(t, localMuxUpdates, 1) assert.Equal(t, PriorityLocal, localMuxUpdates[0].priority, "Local handler should use PriorityLocal") diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 6b52010fb..c997acc75 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -202,6 +202,10 @@ func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dn resutil.SetMeta(w, "upstream", upstream.String()) + // Clear Zero bit from external responses to prevent upstream servers from + // manipulating our internal fallthrough signaling mechanism + rm.MsgHdr.Zero = false + if err := w.WriteMsg(rm); err != nil { logger.Errorf("failed to write DNS response for question domain=%s: %s", domain, err) return true diff --git a/client/internal/engine.go b/client/internal/engine.go index 4f18c3bc8..2acd86a16 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1251,11 +1251,16 @@ func toDNSConfig(protoDNSConfig *mgmProto.DNSConfig, network netip.Prefix) nbdns ForwarderPort: forwarderPort, } - for _, zone := range protoDNSConfig.GetCustomZones() { + protoZones := protoDNSConfig.GetCustomZones() + // Treat single zone as authoritative for backward compatibility with old servers + // that only send the peer FQDN zone without setting field 4. + singleZoneCompat := len(protoZones) == 1 + + for _, zone := range protoZones { dnsZone := nbdns.CustomZone{ Domain: zone.GetDomain(), SearchDomainDisabled: zone.GetSearchDomainDisabled(), - SkipPTRProcess: zone.GetSkipPTRProcess(), + NonAuthoritative: zone.GetNonAuthoritative() && !singleZoneCompat, } for _, record := range zone.Records { dnsRecord := nbdns.SimpleRecord{ diff --git a/client/internal/routemanager/dnsinterceptor/handler.go b/client/internal/routemanager/dnsinterceptor/handler.go index 928b85acb..c7ec47da4 100644 --- a/client/internal/routemanager/dnsinterceptor/handler.go +++ b/client/internal/routemanager/dnsinterceptor/handler.go @@ -325,6 +325,10 @@ func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg, logger *log. return fmt.Errorf("received nil DNS message") } + // Clear Zero bit from peer responses to prevent external sources from + // manipulating our internal fallthrough signaling mechanism + r.MsgHdr.Zero = false + if len(r.Answer) > 0 && len(r.Question) > 0 { origPattern := "" if writer, ok := w.(*nbdns.ResponseWriterChain); ok { diff --git a/dns/dns.go b/dns/dns.go index aa0e16eb1..c43e5de00 100644 --- a/dns/dns.go +++ b/dns/dns.go @@ -47,8 +47,8 @@ type CustomZone struct { Records []SimpleRecord // SearchDomainDisabled indicates whether to add match domains to a search domains list or not SearchDomainDisabled bool - // SkipPTRProcess indicates whether a client should process PTR records from custom zones - SkipPTRProcess bool + // NonAuthoritative marks user-created zones + NonAuthoritative bool } // SimpleRecord provides a simple DNS record specification for CNAME, A and AAAA records diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index 455e6bd58..c4d2e92f9 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -374,8 +374,9 @@ func shouldUsePortRange(rule *proto.FirewallRule) bool { // Helper function to convert nbdns.CustomZone to proto.CustomZone func convertToProtoCustomZone(zone nbdns.CustomZone) *proto.CustomZone { protoZone := &proto.CustomZone{ - Domain: zone.Domain, - Records: make([]*proto.SimpleRecord, 0, len(zone.Records)), + Domain: zone.Domain, + Records: make([]*proto.SimpleRecord, 0, len(zone.Records)), + NonAuthoritative: zone.NonAuthoritative, } for _, record := range zone.Records { protoZone.Records = append(protoZone.Records, &proto.SimpleRecord{ diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 2047c51ea..077f84ed3 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -2873,7 +2873,7 @@ type CustomZone struct { Domain string `protobuf:"bytes,1,opt,name=Domain,proto3" json:"Domain,omitempty"` Records []*SimpleRecord `protobuf:"bytes,2,rep,name=Records,proto3" json:"Records,omitempty"` SearchDomainDisabled bool `protobuf:"varint,3,opt,name=SearchDomainDisabled,proto3" json:"SearchDomainDisabled,omitempty"` - SkipPTRProcess bool `protobuf:"varint,4,opt,name=SkipPTRProcess,proto3" json:"SkipPTRProcess,omitempty"` + NonAuthoritative bool `protobuf:"varint,4,opt,name=NonAuthoritative,proto3" json:"NonAuthoritative,omitempty"` } func (x *CustomZone) Reset() { @@ -2929,9 +2929,9 @@ func (x *CustomZone) GetSearchDomainDisabled() bool { return false } -func (x *CustomZone) GetSkipPTRProcess() bool { +func (x *CustomZone) GetNonAuthoritative() bool { if x != nil { - return x.SkipPTRProcess + return x.NonAuthoritative } return false } diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index f2e591e88..c4cc43295 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -464,7 +464,9 @@ message CustomZone { string Domain = 1; repeated SimpleRecord Records = 2; bool SearchDomainDisabled = 3; - bool SkipPTRProcess = 4; + // NonAuthoritative indicates this is a user-created zone (not the built-in peer DNS zone). + // Non-authoritative zones will fallthrough to lower-priority handlers on NXDOMAIN and skip PTR processing. + bool NonAuthoritative = 4; } // SimpleRecord represents a dns.SimpleRecord From 37abab8b69ca1e5072953328397622d03fc4a55e Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Mon, 12 Jan 2026 17:09:03 +0100 Subject: [PATCH 041/374] [management] Check config compatibility (#5087) * Enforce HttpConfig overwrite when embeddedIdp is enabled * Disable offline_access scope in dashboard by default * Add group propagation foundation to embedded idp * Require groups scope in dex config for okt and pocket * remove offline_access from device default scopes --- idp/dex/provider.go | 14 ++++-- infrastructure_files/getting-started.sh | 2 +- management/cmd/management.go | 62 +++++++++---------------- management/server/idp/embedded.go | 2 +- 4 files changed, 33 insertions(+), 47 deletions(-) diff --git a/idp/dex/provider.go b/idp/dex/provider.go index fae682959..6a4fe7873 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -792,11 +792,12 @@ func (p *Provider) resolveRedirectURI(redirectURI string) string { // buildOIDCConnectorConfig creates config for OIDC-based connectors func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { oidcConfig := map[string]interface{}{ - "issuer": cfg.Issuer, - "clientID": cfg.ClientID, - "clientSecret": cfg.ClientSecret, - "redirectURI": redirectURI, - "scopes": []string{"openid", "profile", "email"}, + "issuer": cfg.Issuer, + "clientID": cfg.ClientID, + "clientSecret": cfg.ClientSecret, + "redirectURI": redirectURI, + "scopes": []string{"openid", "profile", "email"}, + "insecureEnableGroups": true, } switch cfg.Type { case "zitadel": @@ -806,6 +807,9 @@ func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, oidcConfig["claimMapping"] = map[string]string{"email": "preferred_username"} case "okta": oidcConfig["insecureSkipEmailVerified"] = true + oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} + case "pocketid": + oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} } return encodeConnectorConfig(oidcConfig) } diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index b693f807e..5a9488fad 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -270,7 +270,7 @@ AUTH_CLIENT_ID=netbird-dashboard AUTH_CLIENT_SECRET= AUTH_AUTHORITY=$NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/oauth2 USE_AUTH0=false -AUTH_SUPPORTED_SCOPES=openid profile email offline_access +AUTH_SUPPORTED_SCOPES=openid profile email groups AUTH_REDIRECT_URI=/nb-auth AUTH_SILENT_REDIRECT_URI=/nb-silent-auth # SSL diff --git a/management/cmd/management.go b/management/cmd/management.go index 9dbd4a6d4..7da04074b 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -143,7 +143,7 @@ func loadMgmtConfig(ctx context.Context, mgmtConfigPath string) (*nbconfig.Confi applyCommandLineOverrides(loadedConfig) // Apply EmbeddedIdP config to HttpConfig if embedded IdP is enabled - err := applyEmbeddedIdPConfig(loadedConfig) + err := applyEmbeddedIdPConfig(ctx, loadedConfig) if err != nil { return nil, err } @@ -177,7 +177,7 @@ func applyCommandLineOverrides(cfg *nbconfig.Config) { // applyEmbeddedIdPConfig populates HttpConfig and EmbeddedIdP storage from config when embedded IdP is enabled. // This allows users to only specify EmbeddedIdP config without duplicating values in HttpConfig. -func applyEmbeddedIdPConfig(cfg *nbconfig.Config) error { +func applyEmbeddedIdPConfig(ctx context.Context, cfg *nbconfig.Config) error { if cfg.EmbeddedIdP == nil || !cfg.EmbeddedIdP.Enabled { return nil } @@ -193,11 +193,6 @@ func applyEmbeddedIdPConfig(cfg *nbconfig.Config) error { // Set LocalAddress for embedded IdP if enabled, used for internal JWT validation cfg.EmbeddedIdP.LocalAddress = fmt.Sprintf("localhost:%d", mgmtPort) - // Ensure HttpConfig exists - if cfg.HttpConfig == nil { - cfg.HttpConfig = &nbconfig.HttpServerConfig{} - } - // Set storage defaults based on Datadir if cfg.EmbeddedIdP.Storage.Type == "" { cfg.EmbeddedIdP.Storage.Type = "sqlite3" @@ -208,40 +203,22 @@ func applyEmbeddedIdPConfig(cfg *nbconfig.Config) error { issuer := cfg.EmbeddedIdP.Issuer - // Set AuthIssuer from EmbeddedIdP issuer - if cfg.HttpConfig.AuthIssuer == "" { - cfg.HttpConfig.AuthIssuer = issuer + if cfg.HttpConfig != nil { + log.WithContext(ctx).Warnf("overriding HttpConfig with EmbeddedIdP config. " + + "HttpConfig is ignored when EmbeddedIdP is enabled. Please remove HttpConfig section from the config file") + } else { + // Ensure HttpConfig exists. We need it for backwards compatibility with the old config format. + cfg.HttpConfig = &nbconfig.HttpServerConfig{} } - // Set AuthAudience to the dashboard client ID - if cfg.HttpConfig.AuthAudience == "" { - cfg.HttpConfig.AuthAudience = "netbird-dashboard" - } - - // Set CLIAuthAudience to the client app client ID - if cfg.HttpConfig.CLIAuthAudience == "" { - cfg.HttpConfig.CLIAuthAudience = "netbird-cli" - } - - // Set AuthUserIDClaim to "sub" (standard OIDC claim) - if cfg.HttpConfig.AuthUserIDClaim == "" { - cfg.HttpConfig.AuthUserIDClaim = "sub" - } - - // Set AuthKeysLocation to the JWKS endpoint - if cfg.HttpConfig.AuthKeysLocation == "" { - cfg.HttpConfig.AuthKeysLocation = issuer + "/keys" - } - - // Set OIDCConfigEndpoint to the discovery endpoint - if cfg.HttpConfig.OIDCConfigEndpoint == "" { - cfg.HttpConfig.OIDCConfigEndpoint = issuer + "/.well-known/openid-configuration" - } - - // Copy SignKeyRefreshEnabled from EmbeddedIdP config - if cfg.EmbeddedIdP.SignKeyRefreshEnabled { - cfg.HttpConfig.IdpSignKeyRefreshEnabled = true - } + // Set HttpConfig values from EmbeddedIdP + cfg.HttpConfig.AuthIssuer = issuer + cfg.HttpConfig.AuthAudience = "netbird-dashboard" + cfg.HttpConfig.CLIAuthAudience = "netbird-cli" + cfg.HttpConfig.AuthUserIDClaim = "sub" + cfg.HttpConfig.AuthKeysLocation = issuer + "/keys" + cfg.HttpConfig.OIDCConfigEndpoint = issuer + "/.well-known/openid-configuration" + cfg.HttpConfig.IdpSignKeyRefreshEnabled = true return nil } @@ -249,7 +226,12 @@ func applyEmbeddedIdPConfig(cfg *nbconfig.Config) error { // applyOIDCConfig fetches and applies OIDC configuration if endpoint is specified func applyOIDCConfig(ctx context.Context, cfg *nbconfig.Config) error { oidcEndpoint := cfg.HttpConfig.OIDCConfigEndpoint - if oidcEndpoint == "" || cfg.EmbeddedIdP != nil { + if oidcEndpoint == "" { + return nil + } + + if cfg.EmbeddedIdP != nil && cfg.EmbeddedIdP.Enabled { + // skip OIDC config fetching if EmbeddedIdP is enabled as it is unnecessary given it is embedded return nil } diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 7b8e5033c..0e46b506e 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -20,7 +20,7 @@ const ( staticClientCLI = "netbird-cli" defaultCLIRedirectURL1 = "http://localhost:53000/" defaultCLIRedirectURL2 = "http://localhost:54000/" - defaultScopes = "openid profile email offline_access" + defaultScopes = "openid profile email" defaultUserIDClaim = "sub" ) From 94de656fae1c49c0cb358cf11bc1be3afcb688dd Mon Sep 17 00:00:00 2001 From: Nima Sadeghifard Date: Mon, 12 Jan 2026 19:06:28 +0100 Subject: [PATCH 042/374] [misc] Add hiring announcement with link to careers.netbird.io (#5095) --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 28b53d5b6..8f4c04641 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,11 @@
+ + 🚀 We are hiring! Join us at careers.netbird.io + +
+
New: NetBird terraform provider From d9118eb239a98a158eeaebce1907cb0560f3a1e6 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 13 Jan 2026 13:33:15 +0100 Subject: [PATCH 043/374] [client] Fix WASM peer connection to lazy peers (#5097) WASM peers now properly initiate relay connections instead of waiting for offers that lazy peers won't send. --- client/internal/peer/conn.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 20a2eb342..80ca36789 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -669,10 +669,17 @@ func (conn *Conn) isConnectedOnAllWay() (connected bool) { } }() - if runtime.GOOS != "js" && conn.statusICE.Get() == worker.StatusDisconnected && !conn.workerICE.InProgress() { + // For JS platform: only relay connection is supported + if runtime.GOOS == "js" { + return conn.statusRelay.Get() == worker.StatusConnected + } + + // For non-JS platforms: check ICE connection status + if conn.statusICE.Get() == worker.StatusDisconnected && !conn.workerICE.InProgress() { return false } + // If relay is supported with peer, it must also be connected if conn.workerRelay.IsRelayConnectionSupportedWithPeer() { if conn.statusRelay.Get() == worker.StatusDisconnected { return false From 00b747ad5d27e545a6f4d1afb8c23783cf6df2d8 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 14 Jan 2026 09:53:14 +0100 Subject: [PATCH 044/374] Handle fallback for invalid `loginuid` in `ui-post-install.sh`. (#5099) --- release_files/ui-post-install.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/release_files/ui-post-install.sh b/release_files/ui-post-install.sh index ff6c4ee9b..e2eb32cdc 100644 --- a/release_files/ui-post-install.sh +++ b/release_files/ui-post-install.sh @@ -8,6 +8,10 @@ pid="$(pgrep -x -f /usr/bin/netbird-ui || true)" if [ -n "${pid}" ] then uid="$(cat /proc/"${pid}"/loginuid)" + # loginuid can be 4294967295 (-1) if not set, fall back to process uid + if [ "${uid}" = "4294967295" ] || [ "${uid}" = "-1" ]; then + uid="$(stat -c '%u' /proc/"${pid}")" + fi username="$(id -nu "${uid}")" # Only re-run if it was already running pkill -x -f /usr/bin/netbird-ui >/dev/null 2>&1 From ff10498a8bea49ad5117677e23d9d04fe73ccd2b Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Wed, 14 Jan 2026 13:13:30 +0100 Subject: [PATCH 045/374] Feature/embedded STUN (#5062) --- go.mod | 8 +- go.sum | 16 +- relay/cmd/root.go | 178 +++++++++++++--- stun/server.go | 170 ++++++++++++++++ stun/server_test.go | 479 ++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 806 insertions(+), 45 deletions(-) create mode 100644 stun/server.go create mode 100644 stun/server_test.go diff --git a/go.mod b/go.mod index cf55b9260..773869cb5 100644 --- a/go.mod +++ b/go.mod @@ -78,8 +78,8 @@ require ( github.com/pion/logging v0.2.4 github.com/pion/randutil v0.1.0 github.com/pion/stun/v2 v2.0.0 - github.com/pion/stun/v3 v3.0.0 - github.com/pion/transport/v3 v3.0.7 + github.com/pion/stun/v3 v3.1.0 + github.com/pion/transport/v3 v3.1.1 github.com/pion/turn/v3 v3.0.1 github.com/pkg/sftp v1.13.9 github.com/prometheus/client_golang v1.23.2 @@ -241,7 +241,7 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pion/dtls/v2 v2.2.10 // indirect - github.com/pion/dtls/v3 v3.0.7 // indirect + github.com/pion/dtls/v3 v3.0.9 // indirect github.com/pion/mdns/v2 v2.0.7 // indirect github.com/pion/transport/v2 v2.2.4 // indirect github.com/pion/turn/v4 v4.1.1 // indirect @@ -263,7 +263,7 @@ require ( github.com/tklauser/numcpus v0.8.0 // indirect github.com/vishvananda/netns v0.0.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/wlynxg/anet v0.0.3 // indirect + github.com/wlynxg/anet v0.0.5 // indirect github.com/yuin/goldmark v1.7.8 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect diff --git a/go.sum b/go.sum index e89e0ef12..4ea00b399 100644 --- a/go.sum +++ b/go.sum @@ -444,8 +444,8 @@ github.com/petermattis/goid v0.0.0-20250303134427-723919f7f203/go.mod h1:pxMtw7c github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.10 h1:u2Axk+FyIR1VFTPurktB+1zoEPGIW3bmyj3LEFrXjAA= github.com/pion/dtls/v2 v2.2.10/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/dtls/v3 v3.0.7 h1:bItXtTYYhZwkPFk4t1n3Kkf5TDrfj6+4wG+CZR8uI9Q= -github.com/pion/dtls/v3 v3.0.7/go.mod h1:uDlH5VPrgOQIw59irKYkMudSFprY9IEFCqz/eTz16f8= +github.com/pion/dtls/v3 v3.0.9 h1:4AijfFRm8mAjd1gfdlB1wzJF3fjjR/VPIpJgkEtvYmM= +github.com/pion/dtls/v3 v3.0.9/go.mod h1:abApPjgadS/ra1wvUzHLc3o2HvoxppAh+NZkyApL4Os= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8= github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so= @@ -455,14 +455,14 @@ github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= -github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= -github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= +github.com/pion/stun/v3 v3.1.0 h1:bS1jjT3tGWZ4UPmIUeyalOylamTMTFg1OvXtY/r6seM= +github.com/pion/stun/v3 v3.1.0/go.mod h1:egmx1CUcfSSGJxQCOjtVlomfPqmQ58BibPyuOWNGQEU= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= github.com/pion/transport/v2 v2.2.4 h1:41JJK6DZQYSeVLxILA2+F4ZkKb4Xd/tFJZRFZQ9QAlo= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= -github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/transport/v3 v3.1.1 h1:Tr684+fnnKlhPceU+ICdrw6KKkTms+5qHMgw6bIkYOM= +github.com/pion/transport/v3 v3.1.1/go.mod h1:+c2eewC5WJQHiAA46fkMMzoYZSuGzA/7E2FPrOYHctQ= github.com/pion/turn/v3 v3.0.1 h1:wLi7BTQr6/Q20R0vt/lHbjv6y4GChFtC33nkYbasoT8= github.com/pion/turn/v3 v3.0.1/go.mod h1:MrJDKgqryDyWy1/4NT9TWfXWGMC7UHT6pJIv1+gMeNE= github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc= @@ -574,8 +574,8 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= -github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= diff --git a/relay/cmd/root.go b/relay/cmd/root.go index e7dadcfdf..20c565c3d 100644 --- a/relay/cmd/root.go +++ b/relay/cmd/root.go @@ -6,6 +6,7 @@ import ( "crypto/tls" "errors" "fmt" + "net" "net/http" "os" "os/signal" @@ -22,6 +23,7 @@ import ( "github.com/netbirdio/netbird/relay/server" "github.com/netbirdio/netbird/shared/relay/auth" "github.com/netbirdio/netbird/signal/metrics" + "github.com/netbirdio/netbird/stun" "github.com/netbirdio/netbird/util" ) @@ -43,6 +45,10 @@ type Config struct { LogLevel string LogFile string HealthcheckListenAddress string + // STUN server configuration + EnableSTUN bool + STUNPorts []int + STUNLogLevel string } func (c Config) Validate() error { @@ -52,6 +58,25 @@ func (c Config) Validate() error { if c.AuthSecret == "" { return fmt.Errorf("auth secret is required") } + + // Validate STUN configuration + if c.EnableSTUN { + if len(c.STUNPorts) == 0 { + return fmt.Errorf("--stun-ports is required when --enable-stun is set") + } + + seen := make(map[int]bool) + for _, port := range c.STUNPorts { + if port <= 0 || port > 65535 { + return fmt.Errorf("invalid STUN port %d: must be between 1 and 65535", port) + } + if seen[port] { + return fmt.Errorf("duplicate STUN port %d", port) + } + seen[port] = true + } + } + return nil } @@ -91,6 +116,9 @@ func init() { rootCmd.PersistentFlags().StringVar(&cobraConfig.LogLevel, "log-level", "info", "log level") rootCmd.PersistentFlags().StringVar(&cobraConfig.LogFile, "log-file", "console", "log file") rootCmd.PersistentFlags().StringVarP(&cobraConfig.HealthcheckListenAddress, "health-listen-address", "H", ":9000", "listen address of healthcheck server") + rootCmd.PersistentFlags().BoolVar(&cobraConfig.EnableSTUN, "enable-stun", false, "enable embedded STUN server") + rootCmd.PersistentFlags().IntSliceVar(&cobraConfig.STUNPorts, "stun-ports", []int{3478}, "ports for the embedded STUN server (can be specified multiple times or comma-separated)") + rootCmd.PersistentFlags().StringVar(&cobraConfig.STUNLogLevel, "stun-log-level", "info", "log level for STUN server (panic, fatal, error, warn, info, debug, trace)") setFlagsFromEnvVars(rootCmd) } @@ -119,21 +147,14 @@ func execute(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to initialize log: %s", err) } + // Resource creation phase (fail fast before starting any goroutines) + metricsServer, err := metrics.NewServer(cobraConfig.MetricsPort, "") if err != nil { log.Debugf("setup metrics: %v", err) return fmt.Errorf("setup metrics: %v", err) } - wg.Add(1) - go func() { - defer wg.Done() - log.Infof("running metrics server: %s%s", metricsServer.Addr, metricsServer.Endpoint) - if err := metricsServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { - log.Fatalf("Failed to start metrics server: %v", err) - } - }() - srvListenerCfg := server.ListenerConfig{ Address: cobraConfig.ListenAddress, } @@ -145,6 +166,12 @@ func execute(cmd *cobra.Command, args []string) error { } srvListenerCfg.TLSConfig = tlsConfig + // Create STUN listeners early to fail fast + stunListeners, err := createSTUNListeners() + if err != nil { + return err + } + hashedSecret := sha256.Sum256([]byte(cobraConfig.AuthSecret)) authenticator := auth.NewTimedHMACValidator(hashedSecret[:], 24*time.Hour) @@ -155,60 +182,145 @@ func execute(cmd *cobra.Command, args []string) error { TLSSupport: tlsSupport, } - srv, err := server.NewServer(cfg) + srv, err := createRelayServer(cfg) if err != nil { - log.Debugf("failed to create relay server: %v", err) - return fmt.Errorf("failed to create relay server: %v", err) + cleanupSTUNListeners(stunListeners) + return err } + + hCfg := healthcheck.Config{ + ListenAddress: cobraConfig.HealthcheckListenAddress, + ServiceChecker: srv, + } + httpHealthcheck, err := createHealthCheck(hCfg) + if err != nil { + cleanupSTUNListeners(stunListeners) + return err + } + + var stunServer *stun.Server + if len(stunListeners) > 0 { + stunServer = stun.NewServer(stunListeners, cobraConfig.STUNLogLevel) + } + + // Start all servers (only after all resources are successfully created) + startServers(&wg, metricsServer, srv, srvListenerCfg, httpHealthcheck, stunServer) + + waitForExitSignal() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + err = shutdownServers(ctx, metricsServer, srv, httpHealthcheck, stunServer) + wg.Wait() + return err +} + +func startServers(wg *sync.WaitGroup, metricsServer *metrics.Metrics, srv *server.Server, srvListenerCfg server.ListenerConfig, httpHealthcheck *healthcheck.Server, stunServer *stun.Server) { + wg.Add(1) + go func() { + defer wg.Done() + log.Infof("running metrics server: %s%s", metricsServer.Addr, metricsServer.Endpoint) + if err := metricsServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + log.Fatalf("failed to start metrics server: %v", err) + } + }() + instanceURL := srv.InstanceURL() log.Infof("server will be available on: %s", instanceURL.String()) wg.Add(1) go func() { defer wg.Done() if err := srv.Listen(srvListenerCfg); err != nil { - log.Fatalf("failed to bind server: %s", err) + log.Fatalf("failed to bind relay server: %s", err) } }() - hCfg := healthcheck.Config{ - ListenAddress: cobraConfig.HealthcheckListenAddress, - ServiceChecker: srv, - } - httpHealthcheck, err := healthcheck.NewServer(hCfg) - if err != nil { - log.Debugf("failed to create healthcheck server: %v", err) - return fmt.Errorf("failed to create healthcheck server: %v", err) - } wg.Add(1) go func() { defer wg.Done() if err := httpHealthcheck.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { - log.Fatalf("Failed to start healthcheck server: %v", err) + log.Fatalf("failed to start healthcheck server: %v", err) } }() - // it will block until exit signal - waitForExitSignal() + if stunServer != nil { + wg.Add(1) + go func() { + defer wg.Done() + if err := stunServer.Listen(); err != nil { + if errors.Is(err, stun.ErrServerClosed) { + return + } + log.Errorf("STUN server error: %v", err) + } + }() + } +} - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() +func shutdownServers(ctx context.Context, metricsServer *metrics.Metrics, srv *server.Server, httpHealthcheck *healthcheck.Server, stunServer *stun.Server) error { + var errs error - var shutDownErrors error if err := httpHealthcheck.Shutdown(ctx); err != nil { - shutDownErrors = multierror.Append(shutDownErrors, fmt.Errorf("failed to close healthcheck server: %v", err)) + errs = multierror.Append(errs, fmt.Errorf("failed to close healthcheck server: %w", err)) + } + + if stunServer != nil { + if err := stunServer.Shutdown(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("failed to close STUN server: %w", err)) + } } if err := srv.Shutdown(ctx); err != nil { - shutDownErrors = multierror.Append(shutDownErrors, fmt.Errorf("failed to close server: %s", err)) + errs = multierror.Append(errs, fmt.Errorf("failed to close relay server: %w", err)) } log.Infof("shutting down metrics server") if err := metricsServer.Shutdown(ctx); err != nil { - shutDownErrors = multierror.Append(shutDownErrors, fmt.Errorf("failed to close metrics server: %v", err)) + errs = multierror.Append(errs, fmt.Errorf("failed to close metrics server: %w", err)) } - wg.Wait() - return shutDownErrors + return errs +} + +func createHealthCheck(hCfg healthcheck.Config) (*healthcheck.Server, error) { + httpHealthcheck, err := healthcheck.NewServer(hCfg) + if err != nil { + log.Debugf("failed to create healthcheck server: %v", err) + return nil, fmt.Errorf("failed to create healthcheck server: %v", err) + } + return httpHealthcheck, nil +} + +func createRelayServer(cfg server.Config) (*server.Server, error) { + srv, err := server.NewServer(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create relay server: %v", err) + } + return srv, nil +} + +func cleanupSTUNListeners(stunListeners []*net.UDPConn) { + for _, l := range stunListeners { + _ = l.Close() + } +} + +func createSTUNListeners() ([]*net.UDPConn, error) { + var stunListeners []*net.UDPConn + if cobraConfig.EnableSTUN { + for _, port := range cobraConfig.STUNPorts { + listener, err := net.ListenUDP("udp", &net.UDPAddr{Port: port}) + if err != nil { + // Close already opened listeners on failure + cleanupSTUNListeners(stunListeners) + log.Debugf("failed to create STUN listener on port %d: %v", port, err) + return nil, fmt.Errorf("failed to create STUN listener on port %d: %v", port, err) + } + stunListeners = append(stunListeners, listener) + } + } + return stunListeners, nil } func handleTLSConfig(cfg *Config) (*tls.Config, bool, error) { diff --git a/stun/server.go b/stun/server.go new file mode 100644 index 000000000..be5717d48 --- /dev/null +++ b/stun/server.go @@ -0,0 +1,170 @@ +// Package stun provides an embedded STUN server for NAT traversal discovery. +package stun + +import ( + "errors" + "fmt" + "net" + "sync" + + "github.com/hashicorp/go-multierror" + nberrors "github.com/netbirdio/netbird/client/errors" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/formatter" + "github.com/pion/stun/v3" +) + +// ErrServerClosed is returned by Listen when the server is shut down gracefully. +var ErrServerClosed = errors.New("stun: server closed") + +// ErrNoListeners is returned by Listen when no UDP connections were provided. +var ErrNoListeners = errors.New("stun: no listeners configured") + +// Server implements a STUN server that responds to binding requests +// with the client's reflexive transport address. +type Server struct { + conns []*net.UDPConn + logger *log.Entry + logLevel log.Level + + wg sync.WaitGroup +} + +// NewServer creates a new STUN server with the given UDP listeners. +// The caller is responsible for creating and providing the listeners. +// logLevel can be: panic, fatal, error, warn, info, debug, trace +func NewServer(conns []*net.UDPConn, logLevel string) *Server { + level, err := log.ParseLevel(logLevel) + if err != nil { + level = log.InfoLevel + } + + // Create a separate logger with its own level setting + // This allows --stun-log-level to work independently of --log-level + stunLogger := log.New() + stunLogger.SetOutput(log.StandardLogger().Out) + stunLogger.SetLevel(level) + // Use the formatter package to set up formatter, ReportCaller, and context hook + formatter.SetTextFormatter(stunLogger) + + logger := stunLogger.WithField("component", "stun-server") + logger.Infof("STUN server log level set to: %s", level.String()) + + return &Server{ + conns: conns, + logger: logger, + logLevel: level, + } +} + +// Listen starts the STUN server and blocks until the server is shut down. +// Returns ErrServerClosed when shut down gracefully via Shutdown. +// Returns ErrNoListeners if no UDP connections were provided. +func (s *Server) Listen() error { + if len(s.conns) == 0 { + return ErrNoListeners + } + + // Start a read loop for each listener + for _, conn := range s.conns { + s.logger.Infof("STUN server listening on %s", conn.LocalAddr()) + s.wg.Add(1) + go s.readLoop(conn) + } + + s.wg.Wait() + return ErrServerClosed +} + +// readLoop continuously reads UDP packets and handles STUN requests. +func (s *Server) readLoop(conn *net.UDPConn) { + defer s.wg.Done() + buf := make([]byte, 1500) // Standard MTU size + for { + n, remoteAddr, err := conn.ReadFromUDP(buf) + + if err != nil { + // Check if the connection was closed externally + if errors.Is(err, net.ErrClosed) { + s.logger.Info("UDP connection closed, stopping read loop") + return + } + s.logger.Warnf("failed to read UDP packet: %v", err) + continue + } + + // Handle packet in the same goroutine to avoid complexity + // STUN responses are small and fast + s.handlePacket(conn, buf[:n], remoteAddr) + } +} + +// handlePacket processes a STUN request and sends a response. +func (s *Server) handlePacket(conn *net.UDPConn, data []byte, addr *net.UDPAddr) { + localPort := conn.LocalAddr().(*net.UDPAddr).Port + + s.logger.Debugf("[port:%d] received %d bytes from %s", localPort, len(data), addr) + + // Check if it's a STUN message + if !stun.IsMessage(data) { + s.logger.Debugf("[port:%d] not a STUN message (first bytes: %x)", localPort, data[:min(len(data), 8)]) + return + } + + // Parse the STUN message + msg := &stun.Message{Raw: data} + if err := msg.Decode(); err != nil { + s.logger.Warnf("[port:%d] failed to decode STUN message from %s: %v", localPort, addr, err) + return + } + + s.logger.Debugf("[port:%d] received STUN %s from %s (tx=%x)", localPort, msg.Type, addr, msg.TransactionID[:8]) + + // Only handle binding requests + if msg.Type != stun.BindingRequest { + s.logger.Debugf("[port:%d] ignoring non-binding request: %s", localPort, msg.Type) + return + } + + // Build the response + response, err := stun.Build( + stun.NewTransactionIDSetter(msg.TransactionID), + stun.BindingSuccess, + &stun.XORMappedAddress{ + IP: addr.IP, + Port: addr.Port, + }, + stun.Fingerprint, + ) + if err != nil { + s.logger.Errorf("[port:%d] failed to build STUN response: %v", localPort, err) + return + } + + // Send the response on the same connection it was received on + n, err := conn.WriteToUDP(response.Raw, addr) + if err != nil { + s.logger.Errorf("[port:%d] failed to send STUN response to %s: %v", localPort, addr, err) + return + } + + s.logger.Debugf("[port:%d] sent STUN BindingSuccess to %s (%d bytes) with XORMappedAddress %s:%d", localPort, addr, n, addr.IP, addr.Port) +} + +// Shutdown gracefully stops the STUN server. +func (s *Server) Shutdown() error { + s.logger.Info("shutting down STUN server") + + var merr *multierror.Error + + for _, conn := range s.conns { + if err := conn.Close(); err != nil && !errors.Is(err, net.ErrClosed) { + merr = multierror.Append(merr, fmt.Errorf("close STUN UDP connection: %w", err)) + } + } + + // Wait for all readLoops to finish + s.wg.Wait() + return nberrors.FormatErrorOrNil(merr) +} diff --git a/stun/server_test.go b/stun/server_test.go new file mode 100644 index 000000000..4fd949863 --- /dev/null +++ b/stun/server_test.go @@ -0,0 +1,479 @@ +package stun + +import ( + "errors" + "fmt" + "math/rand" + "net" + "os" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/pion/stun/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// createTestServer creates a STUN server listening on a random port for testing. +// Returns the server, the listener connection (caller must close), and the server address. +func createTestServer(t testing.TB) (*Server, *net.UDPConn, *net.UDPAddr) { + t.Helper() + conn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0}) + require.NoError(t, err) + server := NewServer([]*net.UDPConn{conn}, "debug") + return server, conn, conn.LocalAddr().(*net.UDPAddr) +} + +// waitForServerReady polls the server with STUN binding requests until it responds. +// This avoids flaky tests on slow CI machines that relied on time.Sleep. +func waitForServerReady(t testing.TB, serverAddr *net.UDPAddr, timeout time.Duration) { + t.Helper() + deadline := time.Now().Add(timeout) + retryInterval := 10 * time.Millisecond + + clientConn, err := net.DialUDP("udp", nil, serverAddr) + require.NoError(t, err) + defer clientConn.Close() + + buf := make([]byte, 1500) + for time.Now().Before(deadline) { + msg, err := stun.Build(stun.TransactionID, stun.BindingRequest) + require.NoError(t, err) + + _, err = clientConn.Write(msg.Raw) + require.NoError(t, err) + + _ = clientConn.SetReadDeadline(time.Now().Add(retryInterval)) + n, err := clientConn.Read(buf) + if err != nil { + // Timeout or other error, retry + continue + } + + response := &stun.Message{Raw: buf[:n]} + if err := response.Decode(); err != nil { + continue + } + + if response.Type == stun.BindingSuccess { + return // Server is ready + } + } + + t.Fatalf("server did not become ready within %v", timeout) +} + +func TestServer_BindingRequest(t *testing.T) { + // Start the STUN server on a random port + server, listener, serverAddr := createTestServer(t) + + // Start server in background + serverErrCh := make(chan error, 1) + go func() { + serverErrCh <- server.Listen() + }() + + // Wait for server to be ready + waitForServerReady(t, serverAddr, 2*time.Second) + + // Create a UDP client + clientConn, err := net.DialUDP("udp", nil, serverAddr) + require.NoError(t, err) + defer clientConn.Close() + + // Build a STUN binding request + msg, err := stun.Build(stun.TransactionID, stun.BindingRequest) + require.NoError(t, err) + + // Send the request + _, err = clientConn.Write(msg.Raw) + require.NoError(t, err) + + // Read the response + buf := make([]byte, 1500) + _ = clientConn.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err := clientConn.Read(buf) + require.NoError(t, err) + + // Parse the response + response := &stun.Message{Raw: buf[:n]} + err = response.Decode() + require.NoError(t, err) + + // Verify it's a binding success + assert.Equal(t, stun.BindingSuccess, response.Type) + + // Extract the XOR-MAPPED-ADDRESS + var xorAddr stun.XORMappedAddress + err = xorAddr.GetFrom(response) + require.NoError(t, err) + + // Verify the address matches our client's local address + clientAddr := clientConn.LocalAddr().(*net.UDPAddr) + assert.Equal(t, clientAddr.IP.String(), xorAddr.IP.String()) + assert.Equal(t, clientAddr.Port, xorAddr.Port) + + // Close listener first to unblock readLoop, then shutdown + _ = listener.Close() + err = server.Shutdown() + require.NoError(t, err) +} + +func TestServer_IgnoresNonSTUNPackets(t *testing.T) { + server, listener, serverAddr := createTestServer(t) + + go func() { + _ = server.Listen() + }() + + waitForServerReady(t, serverAddr, 2*time.Second) + + clientConn, err := net.DialUDP("udp", nil, serverAddr) + require.NoError(t, err) + defer clientConn.Close() + + // Send non-STUN data + _, err = clientConn.Write([]byte("hello world")) + require.NoError(t, err) + + // Try to read response (should timeout since server ignores non-STUN) + buf := make([]byte, 1500) + _ = clientConn.SetReadDeadline(time.Now().Add(200 * time.Millisecond)) + _, err = clientConn.Read(buf) + assert.Error(t, err) // Should be a timeout error + + // Close listener first to unblock readLoop, then shutdown + _ = listener.Close() + _ = server.Shutdown() +} + +func TestServer_Shutdown(t *testing.T) { + server, listener, serverAddr := createTestServer(t) + + serverDone := make(chan struct{}) + go func() { + err := server.Listen() + assert.True(t, errors.Is(err, ErrServerClosed)) + close(serverDone) + }() + + waitForServerReady(t, serverAddr, 2*time.Second) + + // Close listener first to unblock readLoop, then shutdown + _ = listener.Close() + + err := server.Shutdown() + require.NoError(t, err) + + // Wait for Listen to return + select { + case <-serverDone: + // Success + case <-time.After(3 * time.Second): + t.Fatal("server did not shutdown in time") + } +} + +func TestServer_MultipleRequests(t *testing.T) { + server, listener, serverAddr := createTestServer(t) + + go func() { + _ = server.Listen() + }() + + waitForServerReady(t, serverAddr, 2*time.Second) + + // Create multiple clients and send requests + for i := 0; i < 5; i++ { + func() { + clientConn, err := net.DialUDP("udp", nil, serverAddr) + require.NoError(t, err) + defer clientConn.Close() + + msg, err := stun.Build(stun.TransactionID, stun.BindingRequest) + require.NoError(t, err) + + _, err = clientConn.Write(msg.Raw) + require.NoError(t, err) + + buf := make([]byte, 1500) + _ = clientConn.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err := clientConn.Read(buf) + require.NoError(t, err) + + response := &stun.Message{Raw: buf[:n]} + err = response.Decode() + require.NoError(t, err) + + assert.Equal(t, stun.BindingSuccess, response.Type) + }() + } + + // Close listener first to unblock readLoop, then shutdown + _ = listener.Close() + _ = server.Shutdown() +} + +func TestServer_ConcurrentClients(t *testing.T) { + numClients := 100 + requestsPerClient := 5 + maxStartDelay := 100 * time.Millisecond // Random delay before client starts + maxRequestDelay := 500 * time.Millisecond // Random delay between requests + + // Remote server to test against via env var STUN_TEST_SERVER + // Example: STUN_TEST_SERVER=example.netbird.io:3478 go test -v ./stun/... -run ConcurrentClients + remoteServer := os.Getenv("STUN_TEST_SERVER") + + var serverAddr *net.UDPAddr + var server *Server + var listener *net.UDPConn + + if remoteServer != "" { + // Use remote server + var err error + serverAddr, err = net.ResolveUDPAddr("udp", remoteServer) + require.NoError(t, err) + t.Logf("Testing against remote server: %s", remoteServer) + } else { + // Start local server + server, listener, serverAddr = createTestServer(t) + go func() { + _ = server.Listen() + }() + waitForServerReady(t, serverAddr, 2*time.Second) + t.Logf("Testing against local server: %s", serverAddr) + } + + var wg sync.WaitGroup + errorz := make(chan error, numClients*requestsPerClient) + successCount := make(chan int, numClients) + + startTime := time.Now() + + for i := 0; i < numClients; i++ { + wg.Add(1) + go func(clientID int) { + defer wg.Done() + + // Random delay before starting + time.Sleep(time.Duration(rand.Int63n(int64(maxStartDelay)))) + + clientConn, err := net.DialUDP("udp", nil, serverAddr) + if err != nil { + errorz <- fmt.Errorf("client %d: failed to dial: %w", clientID, err) + return + } + defer clientConn.Close() + + success := 0 + for j := 0; j < requestsPerClient; j++ { + // Random delay between requests + if j > 0 { + time.Sleep(time.Duration(rand.Int63n(int64(maxRequestDelay)))) + } + + msg, err := stun.Build(stun.TransactionID, stun.BindingRequest) + if err != nil { + errorz <- fmt.Errorf("client %d: failed to build request: %w", clientID, err) + continue + } + + _, err = clientConn.Write(msg.Raw) + if err != nil { + errorz <- fmt.Errorf("client %d: failed to write: %w", clientID, err) + continue + } + + buf := make([]byte, 1500) + _ = clientConn.SetReadDeadline(time.Now().Add(5 * time.Second)) + n, err := clientConn.Read(buf) + if err != nil { + errorz <- fmt.Errorf("client %d: failed to read: %w", clientID, err) + continue + } + + response := &stun.Message{Raw: buf[:n]} + if err := response.Decode(); err != nil { + errorz <- fmt.Errorf("client %d: failed to decode: %w", clientID, err) + continue + } + + if response.Type != stun.BindingSuccess { + errorz <- fmt.Errorf("client %d: unexpected response type: %s", clientID, response.Type) + continue + } + + success++ + } + successCount <- success + }(i) + } + + wg.Wait() + close(errorz) + close(successCount) + + elapsed := time.Since(startTime) + + totalSuccess := 0 + for count := range successCount { + totalSuccess += count + } + + var errs []error + for err := range errorz { + errs = append(errs, err) + } + + totalRequests := numClients * requestsPerClient + t.Logf("Completed %d/%d requests in %v (%.2f req/s)", + totalSuccess, totalRequests, elapsed, + float64(totalSuccess)/elapsed.Seconds()) + + if len(errs) > 0 { + t.Logf("Errors (%d):", len(errs)) + for i, err := range errs { + if i < 10 { // Only show first 10 errors + t.Logf(" - %v", err) + } + } + } + + // Require at least 95% success rate + successRate := float64(totalSuccess) / float64(totalRequests) + require.GreaterOrEqual(t, successRate, 0.95, "success rate too low: %.2f%%", successRate*100) + + // Cleanup local server if used + if server != nil { + // Close listener first to unblock readLoop, then shutdown + _ = listener.Close() + _ = server.Shutdown() + } +} + +func TestServer_MultiplePorts(t *testing.T) { + // Create listeners on two random ports + conn1, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0}) + require.NoError(t, err) + conn2, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0}) + require.NoError(t, err) + + addr1 := conn1.LocalAddr().(*net.UDPAddr) + addr2 := conn2.LocalAddr().(*net.UDPAddr) + + server := NewServer([]*net.UDPConn{conn1, conn2}, "debug") + + go func() { + _ = server.Listen() + }() + + // Wait for server to be ready (checking first port is sufficient) + waitForServerReady(t, addr1, 2*time.Second) + + // Test requests on both ports + for _, serverAddr := range []*net.UDPAddr{addr1, addr2} { + func() { + clientConn, err := net.DialUDP("udp", nil, serverAddr) + require.NoError(t, err) + defer clientConn.Close() + + msg, err := stun.Build(stun.TransactionID, stun.BindingRequest) + require.NoError(t, err) + + _, err = clientConn.Write(msg.Raw) + require.NoError(t, err) + + buf := make([]byte, 1500) + _ = clientConn.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err := clientConn.Read(buf) + require.NoError(t, err) + + response := &stun.Message{Raw: buf[:n]} + err = response.Decode() + require.NoError(t, err) + + assert.Equal(t, stun.BindingSuccess, response.Type) + + var xorAddr stun.XORMappedAddress + err = xorAddr.GetFrom(response) + require.NoError(t, err) + + clientAddr := clientConn.LocalAddr().(*net.UDPAddr) + assert.Equal(t, clientAddr.Port, xorAddr.Port) + }() + } + + // Close listeners first to unblock readLoops, then shutdown + _ = conn1.Close() + _ = conn2.Close() + _ = server.Shutdown() +} + +// BenchmarkSTUNServer benchmarks the STUN server with concurrent clients +func BenchmarkSTUNServer(b *testing.B) { + server, listener, serverAddr := createTestServer(b) + + go func() { + _ = server.Listen() + }() + + waitForServerReady(b, serverAddr, 2*time.Second) + + // Capture first error atomically - b.Fatal cannot be called from worker goroutines + var firstErr atomic.Pointer[error] + setErr := func(err error) { + firstErr.CompareAndSwap(nil, &err) + } + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + // Stop work if an error has occurred + if firstErr.Load() != nil { + return + } + + clientConn, err := net.DialUDP("udp", nil, serverAddr) + if err != nil { + setErr(err) + return + } + defer clientConn.Close() + + buf := make([]byte, 1500) + + for pb.Next() { + if firstErr.Load() != nil { + return + } + + msg, _ := stun.Build(stun.TransactionID, stun.BindingRequest) + _, _ = clientConn.Write(msg.Raw) + + _ = clientConn.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err := clientConn.Read(buf) + if err != nil { + setErr(err) + return + } + + response := &stun.Message{Raw: buf[:n]} + if err := response.Decode(); err != nil { + setErr(err) + return + } + } + }) + + b.StopTimer() + + // Fail after RunParallel completes + if errPtr := firstErr.Load(); errPtr != nil { + b.Fatal(*errPtr) + } + + // Close listener first to unblock readLoop, then shutdown + _ = listener.Close() + _ = server.Shutdown() +} From 520d9c66cf33722d7f0f580ac3f9fa63ee001c81 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 14 Jan 2026 20:56:16 +0800 Subject: [PATCH 046/374] [client] Fix netstack upstream dns and add wasm debug methods (#4648) --- client/cmd/debug.go | 5 +- client/cmd/status.go | 8 +- client/embed/embed.go | 83 ++++- client/internal/connect.go | 13 + client/internal/dns/server.go | 12 +- client/internal/dns/server_test.go | 5 + client/internal/dns/upstream.go | 51 +++ client/internal/dns/upstream_android.go | 4 +- client/internal/dns/upstream_general.go | 18 +- client/internal/dns/upstream_ios.go | 10 +- client/internal/dns/upstream_test.go | 19 +- client/internal/dns/wgiface.go | 3 + client/internal/dns/wgiface_windows.go | 3 + client/internal/engine.go | 28 +- client/internal/peer/status.go | 97 ++++++ .../routemanager/dnsinterceptor/handler.go | 69 ++-- .../routemanager/iface/iface_common.go | 3 + client/server/debug.go | 15 +- client/server/event.go | 7 - client/server/server.go | 95 +----- client/status/status.go | 80 ++--- client/status/status_test.go | 8 +- client/ui/debug.go | 6 +- client/wasm/cmd/main.go | 308 +++++++++++++++++- 24 files changed, 707 insertions(+), 243 deletions(-) diff --git a/client/cmd/debug.go b/client/cmd/debug.go index 7ca56857b..e56f66103 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -314,9 +314,8 @@ func getStatusOutput(cmd *cobra.Command, anon bool) string { profName = activeProf.Name } - statusOutputString = nbstatus.ParseToFullDetailSummary( - nbstatus.ConvertToStatusOutputOverview(statusResp, anon, "", nil, nil, nil, "", profName), - ) + overview := nbstatus.ConvertToStatusOutputOverview(statusResp, anon, "", nil, nil, nil, "", profName) + statusOutputString = overview.FullDetailSummary() } return statusOutputString } diff --git a/client/cmd/status.go b/client/cmd/status.go index 99d47cd1a..05175663c 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -103,13 +103,13 @@ func statusFunc(cmd *cobra.Command, args []string) error { var statusOutputString string switch { case detailFlag: - statusOutputString = nbstatus.ParseToFullDetailSummary(outputInformationHolder) + statusOutputString = outputInformationHolder.FullDetailSummary() case jsonFlag: - statusOutputString, err = nbstatus.ParseToJSON(outputInformationHolder) + statusOutputString, err = outputInformationHolder.JSON() case yamlFlag: - statusOutputString, err = nbstatus.ParseToYAML(outputInformationHolder) + statusOutputString, err = outputInformationHolder.YAML() default: - statusOutputString = nbstatus.ParseGeneralSummary(outputInformationHolder, false, false, false, false) + statusOutputString = outputInformationHolder.GeneralSummary(false, false, false, false) } if err != nil { diff --git a/client/embed/embed.go b/client/embed/embed.go index 353c5438f..43089fc9d 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/client/internal/profilemanager" sshcommon "github.com/netbirdio/netbird/client/ssh" "github.com/netbirdio/netbird/client/system" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) var ( @@ -38,6 +39,7 @@ type Client struct { setupKey string jwtToken string connect *internal.ConnectClient + recorder *peer.Status } // Options configures a new Client. @@ -161,11 +163,17 @@ func New(opts Options) (*Client, error) { func (c *Client) Start(startCtx context.Context) error { c.mu.Lock() defer c.mu.Unlock() - if c.cancel != nil { + if c.connect != nil { return ErrClientAlreadyStarted } - ctx := internal.CtxInitState(context.Background()) + ctx, cancel := context.WithCancel(internal.CtxInitState(context.Background())) + defer func() { + if c.connect == nil { + cancel() + } + }() + // nolint:staticcheck ctx = context.WithValue(ctx, system.DeviceNameCtxKey, c.deviceName) if err := internal.Login(ctx, c.config, c.setupKey, c.jwtToken); err != nil { @@ -173,7 +181,9 @@ func (c *Client) Start(startCtx context.Context) error { } recorder := peer.NewRecorder(c.config.ManagementURL.String()) + c.recorder = recorder client := internal.NewConnectClient(ctx, c.config, recorder, false) + client.SetSyncResponsePersistence(true) // either startup error (permanent backoff err) or nil err (successful engine up) // TODO: make after-startup backoff err available @@ -197,6 +207,7 @@ func (c *Client) Start(startCtx context.Context) error { } c.connect = client + c.cancel = cancel return nil } @@ -211,17 +222,23 @@ func (c *Client) Stop(ctx context.Context) error { return ErrClientNotStarted } + if c.cancel != nil { + c.cancel() + c.cancel = nil + } + done := make(chan error, 1) + connect := c.connect go func() { - done <- c.connect.Stop() + done <- connect.Stop() }() select { case <-ctx.Done(): - c.cancel = nil + c.connect = nil return ctx.Err() case err := <-done: - c.cancel = nil + c.connect = nil if err != nil { return fmt.Errorf("stop: %w", err) } @@ -315,6 +332,62 @@ func (c *Client) NewHTTPClient() *http.Client { } } +// Status returns the current status of the client. +func (c *Client) Status() (peer.FullStatus, error) { + c.mu.Lock() + recorder := c.recorder + connect := c.connect + c.mu.Unlock() + + if recorder == nil { + return peer.FullStatus{}, errors.New("client not started") + } + + if connect != nil { + engine := connect.Engine() + if engine != nil { + _ = engine.RunHealthProbes(false) + } + } + + return recorder.GetFullStatus(), nil +} + +// GetLatestSyncResponse returns the latest sync response from the management server. +func (c *Client) GetLatestSyncResponse() (*mgmProto.SyncResponse, error) { + engine, err := c.getEngine() + if err != nil { + return nil, err + } + + syncResp, err := engine.GetLatestSyncResponse() + if err != nil { + return nil, fmt.Errorf("get sync response: %w", err) + } + + return syncResp, nil +} + +// SetLogLevel sets the logging level for the client and its components. +func (c *Client) SetLogLevel(levelStr string) error { + level, err := logrus.ParseLevel(levelStr) + if err != nil { + return fmt.Errorf("parse log level: %w", err) + } + + logrus.SetLevel(level) + + c.mu.Lock() + connect := c.connect + c.mu.Unlock() + + if connect != nil { + connect.SetLogLevel(level) + } + + return nil +} + // VerifySSHHostKey verifies an SSH host key against stored peer keys. // Returns nil if the key matches, ErrPeerNotFound if peer is not in network, // ErrNoStoredKey if peer has no stored key, or an error for verification failures. diff --git a/client/internal/connect.go b/client/internal/connect.go index 017c8bf10..65637c073 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -420,6 +420,19 @@ func (c *ConnectClient) GetLatestSyncResponse() (*mgmProto.SyncResponse, error) return syncResponse, nil } +// SetLogLevel sets the log level for the firewall manager if the engine is running. +func (c *ConnectClient) SetLogLevel(level log.Level) { + engine := c.Engine() + if engine == nil { + return + } + + fwManager := engine.GetFirewallManager() + if fwManager != nil { + fwManager.SetLogLevel(level) + } +} + // Status returns the current client status func (c *ConnectClient) Status() StatusType { if c == nil { diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 29bb7f3dc..1ce7bf1c6 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -631,9 +631,7 @@ func (s *DefaultServer) registerFallback(config HostDNSConfig) { handler, err := newUpstreamResolver( s.ctx, - s.wgInterface.Name(), - s.wgInterface.Address().IP, - s.wgInterface.Address().Network, + s.wgInterface, s.statusRecorder, s.hostsDNSHolder, nbdns.RootZone, @@ -743,9 +741,7 @@ func (s *DefaultServer) createHandlersForDomainGroup(domainGroup nsGroupsByDomai log.Debugf("creating handler for domain=%s with priority=%d", domainGroup.domain, priority) handler, err := newUpstreamResolver( s.ctx, - s.wgInterface.Name(), - s.wgInterface.Address().IP, - s.wgInterface.Address().Network, + s.wgInterface, s.statusRecorder, s.hostsDNSHolder, domainGroup.domain, @@ -926,9 +922,7 @@ func (s *DefaultServer) addHostRootZone() { handler, err := newUpstreamResolver( s.ctx, - s.wgInterface.Name(), - s.wgInterface.Address().IP, - s.wgInterface.Address().Network, + s.wgInterface, s.statusRecorder, s.hostsDNSHolder, nbdns.RootZone, diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index 200a5f496..31e58b9f5 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "golang.zx2c4.com/wireguard/tun/netstack" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/netbirdio/netbird/client/firewall/uspfilter" @@ -81,6 +82,10 @@ func (w *mocWGIface) GetStats(_ string) (configurer.WGStats, error) { return configurer.WGStats{}, nil } +func (w *mocWGIface) GetNet() *netstack.Net { + return nil +} + var zoneRecords = []nbdns.SimpleRecord{ { Name: "peera.netbird.cloud", diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index c997acc75..654d280ef 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/miekg/dns" log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/tun/netstack" "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/internal/dns/resutil" @@ -418,6 +419,56 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u return rm, t, nil } +// ExchangeWithNetstack performs a DNS exchange using netstack for dialing. +// This is needed when netstack is enabled to reach peer IPs through the tunnel. +func ExchangeWithNetstack(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upstream string) (*dns.Msg, error) { + reply, err := netstackExchange(ctx, nsNet, r, upstream, "udp") + if err != nil { + return nil, err + } + + // If response is truncated, retry with TCP + if reply != nil && reply.MsgHdr.Truncated { + log.Tracef("udp response for domain=%s type=%v class=%v is truncated, trying TCP", + r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) + return netstackExchange(ctx, nsNet, r, upstream, "tcp") + } + + return reply, nil +} + +func netstackExchange(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upstream, network string) (*dns.Msg, error) { + conn, err := nsNet.DialContext(ctx, network, upstream) + if err != nil { + return nil, fmt.Errorf("with %s: %w", network, err) + } + defer func() { + if err := conn.Close(); err != nil { + log.Debugf("failed to close DNS connection: %v", err) + } + }() + + if deadline, ok := ctx.Deadline(); ok { + if err := conn.SetDeadline(deadline); err != nil { + return nil, fmt.Errorf("set deadline: %w", err) + } + } + + dnsConn := &dns.Conn{Conn: conn} + + if err := dnsConn.WriteMsg(r); err != nil { + return nil, fmt.Errorf("write %s message: %w", network, err) + } + + reply, err := dnsConn.ReadMsg() + if err != nil { + return nil, fmt.Errorf("read %s message: %w", network, err) + } + + return reply, nil +} + + // FormatPeerStatus formats peer connection status information for debugging DNS timeouts func FormatPeerStatus(peerState *peer.State) string { isConnected := peerState.ConnStatus == peer.StatusConnected diff --git a/client/internal/dns/upstream_android.go b/client/internal/dns/upstream_android.go index def281f28..d7cff377b 100644 --- a/client/internal/dns/upstream_android.go +++ b/client/internal/dns/upstream_android.go @@ -23,9 +23,7 @@ type upstreamResolver struct { // first time, and we need to wait for a while to start to use again the proper DNS resolver. func newUpstreamResolver( ctx context.Context, - _ string, - _ netip.Addr, - _ netip.Prefix, + _ WGIface, statusRecorder *peer.Status, hostsDNSHolder *hostsDNSHolder, domain string, diff --git a/client/internal/dns/upstream_general.go b/client/internal/dns/upstream_general.go index 434e5880b..1143b6c51 100644 --- a/client/internal/dns/upstream_general.go +++ b/client/internal/dns/upstream_general.go @@ -5,22 +5,23 @@ package dns import ( "context" "net/netip" + "runtime" "time" "github.com/miekg/dns" + "golang.zx2c4.com/wireguard/tun/netstack" "github.com/netbirdio/netbird/client/internal/peer" ) type upstreamResolver struct { *upstreamResolverBase + nsNet *netstack.Net } func newUpstreamResolver( ctx context.Context, - _ string, - _ netip.Addr, - _ netip.Prefix, + wgIface WGIface, statusRecorder *peer.Status, _ *hostsDNSHolder, domain string, @@ -28,12 +29,23 @@ func newUpstreamResolver( upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder, domain) nonIOS := &upstreamResolver{ upstreamResolverBase: upstreamResolverBase, + nsNet: wgIface.GetNet(), } upstreamResolverBase.upstreamClient = nonIOS return nonIOS, nil } func (u *upstreamResolver) exchange(ctx context.Context, upstream string, r *dns.Msg) (rm *dns.Msg, t time.Duration, err error) { + // TODO: Check if upstream DNS server is routed through a peer before using netstack. + // Similar to iOS logic, we should determine if the DNS server is reachable directly + // or needs to go through the tunnel, and only use netstack when necessary. + // For now, only use netstack on JS platform where direct access is not possible. + if u.nsNet != nil && runtime.GOOS == "js" { + start := time.Now() + reply, err := ExchangeWithNetstack(ctx, u.nsNet, r, upstream) + return reply, time.Since(start), err + } + client := &dns.Client{ Timeout: ClientTimeout, } diff --git a/client/internal/dns/upstream_ios.go b/client/internal/dns/upstream_ios.go index eadcdd117..4d053a5a1 100644 --- a/client/internal/dns/upstream_ios.go +++ b/client/internal/dns/upstream_ios.go @@ -26,9 +26,7 @@ type upstreamResolverIOS struct { func newUpstreamResolver( ctx context.Context, - interfaceName string, - ip netip.Addr, - net netip.Prefix, + wgIface WGIface, statusRecorder *peer.Status, _ *hostsDNSHolder, domain string, @@ -37,9 +35,9 @@ func newUpstreamResolver( ios := &upstreamResolverIOS{ upstreamResolverBase: upstreamResolverBase, - lIP: ip, - lNet: net, - interfaceName: interfaceName, + lIP: wgIface.Address().IP, + lNet: wgIface.Address().Network, + interfaceName: wgIface.Name(), } ios.upstreamClient = ios diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go index e1573e75e..2852f4775 100644 --- a/client/internal/dns/upstream_test.go +++ b/client/internal/dns/upstream_test.go @@ -2,13 +2,17 @@ package dns import ( "context" + "net" "net/netip" "strings" "testing" "time" "github.com/miekg/dns" + "golang.zx2c4.com/wireguard/tun/netstack" + "github.com/netbirdio/netbird/client/iface/device" + "github.com/netbirdio/netbird/client/iface/wgaddr" "github.com/netbirdio/netbird/client/internal/dns/test" ) @@ -58,7 +62,7 @@ func TestUpstreamResolver_ServeDNS(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) - resolver, _ := newUpstreamResolver(ctx, "", netip.Addr{}, netip.Prefix{}, nil, nil, ".") + resolver, _ := newUpstreamResolver(ctx, &mockNetstackProvider{}, nil, nil, ".") // Convert test servers to netip.AddrPort var servers []netip.AddrPort for _, server := range testCase.InputServers { @@ -112,6 +116,19 @@ func TestUpstreamResolver_ServeDNS(t *testing.T) { } } +type mockNetstackProvider struct{} + +func (m *mockNetstackProvider) Name() string { return "mock" } +func (m *mockNetstackProvider) Address() wgaddr.Address { return wgaddr.Address{} } +func (m *mockNetstackProvider) ToInterface() *net.Interface { return nil } +func (m *mockNetstackProvider) IsUserspaceBind() bool { return false } +func (m *mockNetstackProvider) GetFilter() device.PacketFilter { return nil } +func (m *mockNetstackProvider) GetDevice() *device.FilteredDevice { return nil } +func (m *mockNetstackProvider) GetNet() *netstack.Net { return nil } +func (m *mockNetstackProvider) GetInterfaceGUIDString() (string, error) { + return "", nil +} + type mockUpstreamResolver struct { r *dns.Msg rtt time.Duration diff --git a/client/internal/dns/wgiface.go b/client/internal/dns/wgiface.go index 28e9cebf1..717e16325 100644 --- a/client/internal/dns/wgiface.go +++ b/client/internal/dns/wgiface.go @@ -5,6 +5,8 @@ package dns import ( "net" + "golang.zx2c4.com/wireguard/tun/netstack" + "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/iface/wgaddr" ) @@ -17,4 +19,5 @@ type WGIface interface { IsUserspaceBind() bool GetFilter() device.PacketFilter GetDevice() *device.FilteredDevice + GetNet() *netstack.Net } diff --git a/client/internal/dns/wgiface_windows.go b/client/internal/dns/wgiface_windows.go index d1374fd54..347e0233a 100644 --- a/client/internal/dns/wgiface_windows.go +++ b/client/internal/dns/wgiface_windows.go @@ -1,6 +1,8 @@ package dns import ( + "golang.zx2c4.com/wireguard/tun/netstack" + "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/iface/wgaddr" ) @@ -12,5 +14,6 @@ type WGIface interface { IsUserspaceBind() bool GetFilter() device.PacketFilter GetDevice() *device.FilteredDevice + GetNet() *netstack.Net GetInterfaceGUIDString() (string, error) } diff --git a/client/internal/engine.go b/client/internal/engine.go index 2acd86a16..0182b2530 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1748,22 +1748,26 @@ func (e *Engine) RunHealthProbes(waitForResult bool) bool { } e.syncMsgMux.Unlock() - var results []relay.ProbeResult - if waitForResult { - results = e.probeStunTurn.ProbeAllWaitResult(e.ctx, stuns, turns) - } else { - results = e.probeStunTurn.ProbeAll(e.ctx, stuns, turns) - } - e.statusRecorder.UpdateRelayStates(results) + // Skip STUN/TURN probing for JS/WASM as it's not available relayHealthy := true - for _, res := range results { - if res.Err != nil { - relayHealthy = false - break + if runtime.GOOS != "js" { + var results []relay.ProbeResult + if waitForResult { + results = e.probeStunTurn.ProbeAllWaitResult(e.ctx, stuns, turns) + } else { + results = e.probeStunTurn.ProbeAll(e.ctx, stuns, turns) } + e.statusRecorder.UpdateRelayStates(results) + + for _, res := range results { + if res.Err != nil { + relayHealthy = false + break + } + } + log.Debugf("relay health check: healthy=%t", relayHealthy) } - log.Debugf("relay health check: healthy=%t", relayHealthy) allHealthy := signalHealthy && managementHealthy && relayHealthy log.Debugf("all health checks completed: healthy=%t", allHealthy) diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index 76f4f523c..697bda2ff 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -14,6 +14,7 @@ import ( "golang.org/x/exp/maps" "google.golang.org/grpc/codes" gstatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/timestamppb" firewall "github.com/netbirdio/netbird/client/firewall/manager" @@ -158,6 +159,7 @@ type FullStatus struct { NSGroupStates []NSGroupState NumOfForwardingRules int LazyConnectionEnabled bool + Events []*proto.SystemEvent } type StatusChangeSubscription struct { @@ -981,6 +983,7 @@ func (d *Status) GetFullStatus() FullStatus { } fullStatus.Peers = append(fullStatus.Peers, d.offlinePeers...) + fullStatus.Events = d.GetEventHistory() return fullStatus } @@ -1181,3 +1184,97 @@ type EventSubscription struct { func (s *EventSubscription) Events() <-chan *proto.SystemEvent { return s.events } + +// ToProto converts FullStatus to proto.FullStatus. +func (fs FullStatus) ToProto() *proto.FullStatus { + pbFullStatus := proto.FullStatus{ + ManagementState: &proto.ManagementState{}, + SignalState: &proto.SignalState{}, + LocalPeerState: &proto.LocalPeerState{}, + Peers: []*proto.PeerState{}, + } + + pbFullStatus.ManagementState.URL = fs.ManagementState.URL + pbFullStatus.ManagementState.Connected = fs.ManagementState.Connected + if err := fs.ManagementState.Error; err != nil { + pbFullStatus.ManagementState.Error = err.Error() + } + + pbFullStatus.SignalState.URL = fs.SignalState.URL + pbFullStatus.SignalState.Connected = fs.SignalState.Connected + if err := fs.SignalState.Error; err != nil { + pbFullStatus.SignalState.Error = err.Error() + } + + pbFullStatus.LocalPeerState.IP = fs.LocalPeerState.IP + pbFullStatus.LocalPeerState.PubKey = fs.LocalPeerState.PubKey + pbFullStatus.LocalPeerState.KernelInterface = fs.LocalPeerState.KernelInterface + pbFullStatus.LocalPeerState.Fqdn = fs.LocalPeerState.FQDN + pbFullStatus.LocalPeerState.RosenpassPermissive = fs.RosenpassState.Permissive + pbFullStatus.LocalPeerState.RosenpassEnabled = fs.RosenpassState.Enabled + pbFullStatus.NumberOfForwardingRules = int32(fs.NumOfForwardingRules) + pbFullStatus.LazyConnectionEnabled = fs.LazyConnectionEnabled + + pbFullStatus.LocalPeerState.Networks = maps.Keys(fs.LocalPeerState.Routes) + + for _, peerState := range fs.Peers { + networks := maps.Keys(peerState.GetRoutes()) + + pbPeerState := &proto.PeerState{ + IP: peerState.IP, + PubKey: peerState.PubKey, + ConnStatus: peerState.ConnStatus.String(), + ConnStatusUpdate: timestamppb.New(peerState.ConnStatusUpdate), + Relayed: peerState.Relayed, + LocalIceCandidateType: peerState.LocalIceCandidateType, + RemoteIceCandidateType: peerState.RemoteIceCandidateType, + LocalIceCandidateEndpoint: peerState.LocalIceCandidateEndpoint, + RemoteIceCandidateEndpoint: peerState.RemoteIceCandidateEndpoint, + RelayAddress: peerState.RelayServerAddress, + Fqdn: peerState.FQDN, + LastWireguardHandshake: timestamppb.New(peerState.LastWireguardHandshake), + BytesRx: peerState.BytesRx, + BytesTx: peerState.BytesTx, + RosenpassEnabled: peerState.RosenpassEnabled, + Networks: networks, + Latency: durationpb.New(peerState.Latency), + SshHostKey: peerState.SSHHostKey, + } + pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) + } + + for _, relayState := range fs.Relays { + pbRelayState := &proto.RelayState{ + URI: relayState.URI, + Available: relayState.Err == nil, + } + if err := relayState.Err; err != nil { + pbRelayState.Error = err.Error() + } + pbFullStatus.Relays = append(pbFullStatus.Relays, pbRelayState) + } + + for _, dnsState := range fs.NSGroupStates { + var err string + if dnsState.Error != nil { + err = dnsState.Error.Error() + } + + var servers []string + for _, server := range dnsState.Servers { + servers = append(servers, server.String()) + } + + pbDnsState := &proto.NSGroupState{ + Servers: servers, + Domains: dnsState.Domains, + Enabled: dnsState.Enabled, + Error: err, + } + pbFullStatus.DnsServers = append(pbFullStatus.DnsServers, pbDnsState) + } + + pbFullStatus.Events = fs.Events + + return &pbFullStatus +} diff --git a/client/internal/routemanager/dnsinterceptor/handler.go b/client/internal/routemanager/dnsinterceptor/handler.go index c7ec47da4..12c9ff4af 100644 --- a/client/internal/routemanager/dnsinterceptor/handler.go +++ b/client/internal/routemanager/dnsinterceptor/handler.go @@ -17,13 +17,13 @@ import ( nberrors "github.com/netbirdio/netbird/client/errors" firewall "github.com/netbirdio/netbird/client/firewall/manager" - "github.com/netbirdio/netbird/client/iface/wgaddr" nbdns "github.com/netbirdio/netbird/client/internal/dns" "github.com/netbirdio/netbird/client/internal/dns/resutil" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/peerstore" "github.com/netbirdio/netbird/client/internal/routemanager/common" "github.com/netbirdio/netbird/client/internal/routemanager/fakeip" + iface "github.com/netbirdio/netbird/client/internal/routemanager/iface" "github.com/netbirdio/netbird/client/internal/routemanager/refcounter" "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/domain" @@ -38,11 +38,6 @@ type internalDNATer interface { AddInternalDNATMapping(netip.Addr, netip.Addr) error } -type wgInterface interface { - Name() string - Address() wgaddr.Address -} - type DnsInterceptor struct { mu sync.RWMutex route *route.Route @@ -52,7 +47,7 @@ type DnsInterceptor struct { dnsServer nbdns.Server currentPeerKey string interceptedDomains domainMap - wgInterface wgInterface + wgInterface iface.WGIface peerStore *peerstore.Store firewall firewall.Manager fakeIPManager *fakeip.Manager @@ -250,12 +245,6 @@ func (d *DnsInterceptor) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { return } - client, err := nbdns.GetClientPrivate(d.wgInterface.Address().IP, d.wgInterface.Name(), dnsTimeout) - if err != nil { - d.writeDNSError(w, r, logger, fmt.Sprintf("create DNS client: %v", err)) - return - } - if r.Extra == nil { r.MsgHdr.AuthenticatedData = true } @@ -264,20 +253,8 @@ func (d *DnsInterceptor) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { ctx, cancel := context.WithTimeout(context.Background(), dnsTimeout) defer cancel() - startTime := time.Now() - reply, _, err := nbdns.ExchangeWithFallback(ctx, client, r, upstream) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - elapsed := time.Since(startTime) - peerInfo := d.debugPeerTimeout(upstreamIP, peerKey) - logger.Errorf("peer DNS timeout after %v (timeout=%v) for domain=%s to peer %s (%s)%s - error: %v", - elapsed.Truncate(time.Millisecond), dnsTimeout, r.Question[0].Name, upstreamIP.String(), peerKey, peerInfo, err) - } else { - logger.Errorf("failed to exchange DNS request with %s (%s) for domain=%s: %v", upstreamIP.String(), peerKey, r.Question[0].Name, err) - } - if err := w.WriteMsg(&dns.Msg{MsgHdr: dns.MsgHdr{Rcode: dns.RcodeServerFailure, Id: r.Id}}); err != nil { - logger.Errorf("failed writing DNS response: %v", err) - } + reply := d.queryUpstreamDNS(ctx, w, r, upstream, upstreamIP, peerKey, logger) + if reply == nil { return } @@ -586,6 +563,44 @@ func determinePrefixChanges(oldPrefixes, newPrefixes []netip.Prefix) (toAdd, toR return } +// queryUpstreamDNS queries the upstream DNS server using netstack if available, otherwise uses regular client. +// Returns the DNS reply on success, or nil on error (error responses are written internally). +func (d *DnsInterceptor) queryUpstreamDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg, upstream string, upstreamIP netip.Addr, peerKey string, logger *log.Entry) *dns.Msg { + startTime := time.Now() + + nsNet := d.wgInterface.GetNet() + var reply *dns.Msg + var err error + + if nsNet != nil { + reply, err = nbdns.ExchangeWithNetstack(ctx, nsNet, r, upstream) + } else { + client, clientErr := nbdns.GetClientPrivate(d.wgInterface.Address().IP, d.wgInterface.Name(), dnsTimeout) + if clientErr != nil { + d.writeDNSError(w, r, logger, fmt.Sprintf("create DNS client: %v", clientErr)) + return nil + } + reply, _, err = nbdns.ExchangeWithFallback(ctx, client, r, upstream) + } + + if err == nil { + return reply + } + + if errors.Is(err, context.DeadlineExceeded) { + elapsed := time.Since(startTime) + peerInfo := d.debugPeerTimeout(upstreamIP, peerKey) + logger.Errorf("peer DNS timeout after %v (timeout=%v) for domain=%s to peer %s (%s)%s - error: %v", + elapsed.Truncate(time.Millisecond), dnsTimeout, r.Question[0].Name, upstreamIP.String(), peerKey, peerInfo, err) + } else { + logger.Errorf("failed to exchange DNS request with %s (%s) for domain=%s: %v", upstreamIP.String(), peerKey, r.Question[0].Name, err) + } + if err := w.WriteMsg(&dns.Msg{MsgHdr: dns.MsgHdr{Rcode: dns.RcodeServerFailure, Id: r.Id}}); err != nil { + logger.Errorf("failed writing DNS response: %v", err) + } + return nil +} + func (d *DnsInterceptor) debugPeerTimeout(peerIP netip.Addr, peerKey string) string { if d.statusRecorder == nil { return "" diff --git a/client/internal/routemanager/iface/iface_common.go b/client/internal/routemanager/iface/iface_common.go index f844f4bed..9b7bce751 100644 --- a/client/internal/routemanager/iface/iface_common.go +++ b/client/internal/routemanager/iface/iface_common.go @@ -4,6 +4,8 @@ import ( "net" "net/netip" + "golang.zx2c4.com/wireguard/tun/netstack" + "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/iface/wgaddr" ) @@ -18,4 +20,5 @@ type wgIfaceBase interface { IsUserspaceBind() bool GetFilter() device.PacketFilter GetDevice() *device.FilteredDevice + GetNet() *netstack.Net } diff --git a/client/server/debug.go b/client/server/debug.go index 056d9df21..dfad41604 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -173,20 +173,9 @@ func (s *Server) SetLogLevel(_ context.Context, req *proto.SetLogLevelRequest) ( log.SetLevel(level) - if s.connectClient == nil { - return nil, fmt.Errorf("connect client not initialized") + if s.connectClient != nil { + s.connectClient.SetLogLevel(level) } - engine := s.connectClient.Engine() - if engine == nil { - return nil, fmt.Errorf("engine not initialized") - } - - fwManager := engine.GetFirewallManager() - if fwManager == nil { - return nil, fmt.Errorf("firewall manager not initialized") - } - - fwManager.SetLogLevel(level) log.Infof("Log level set to %s", level.String()) diff --git a/client/server/event.go b/client/server/event.go index 9a4e0fbf5..b5c12a3a6 100644 --- a/client/server/event.go +++ b/client/server/event.go @@ -1,8 +1,6 @@ package server import ( - "context" - log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/proto" @@ -29,8 +27,3 @@ func (s *Server) SubscribeEvents(req *proto.SubscribeRequest, stream proto.Daemo } } } - -func (s *Server) GetEvents(context.Context, *proto.GetEventsRequest) (*proto.GetEventsResponse, error) { - events := s.statusRecorder.GetEventHistory() - return &proto.GetEventsResponse{Events: events}, nil -} diff --git a/client/server/server.go b/client/server/server.go index 7b6c4e98c..d593b3f34 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -13,15 +13,12 @@ import ( "time" "github.com/cenkalti/backoff/v4" - "golang.org/x/exp/maps" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "google.golang.org/protobuf/types/known/durationpb" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/timestamppb" "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/profilemanager" @@ -1067,11 +1064,9 @@ func (s *Server) Status( if msg.GetFullPeerStatus { s.runProbes(msg.ShouldRunProbes) fullStatus := s.statusRecorder.GetFullStatus() - pbFullStatus := toProtoFullStatus(fullStatus) + pbFullStatus := fullStatus.ToProto() pbFullStatus.Events = s.statusRecorder.GetEventHistory() - pbFullStatus.SshServerState = s.getSSHServerState() - statusResponse.FullStatus = pbFullStatus } @@ -1600,94 +1595,6 @@ func parseEnvDuration(envVar string, defaultDuration time.Duration) time.Duratio return defaultDuration } -func toProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { - pbFullStatus := proto.FullStatus{ - ManagementState: &proto.ManagementState{}, - SignalState: &proto.SignalState{}, - LocalPeerState: &proto.LocalPeerState{}, - Peers: []*proto.PeerState{}, - } - - pbFullStatus.ManagementState.URL = fullStatus.ManagementState.URL - pbFullStatus.ManagementState.Connected = fullStatus.ManagementState.Connected - if err := fullStatus.ManagementState.Error; err != nil { - pbFullStatus.ManagementState.Error = err.Error() - } - - pbFullStatus.SignalState.URL = fullStatus.SignalState.URL - pbFullStatus.SignalState.Connected = fullStatus.SignalState.Connected - if err := fullStatus.SignalState.Error; err != nil { - pbFullStatus.SignalState.Error = err.Error() - } - - pbFullStatus.LocalPeerState.IP = fullStatus.LocalPeerState.IP - pbFullStatus.LocalPeerState.PubKey = fullStatus.LocalPeerState.PubKey - pbFullStatus.LocalPeerState.KernelInterface = fullStatus.LocalPeerState.KernelInterface - pbFullStatus.LocalPeerState.Fqdn = fullStatus.LocalPeerState.FQDN - pbFullStatus.LocalPeerState.RosenpassPermissive = fullStatus.RosenpassState.Permissive - pbFullStatus.LocalPeerState.RosenpassEnabled = fullStatus.RosenpassState.Enabled - pbFullStatus.LocalPeerState.Networks = maps.Keys(fullStatus.LocalPeerState.Routes) - pbFullStatus.NumberOfForwardingRules = int32(fullStatus.NumOfForwardingRules) - pbFullStatus.LazyConnectionEnabled = fullStatus.LazyConnectionEnabled - - for _, peerState := range fullStatus.Peers { - pbPeerState := &proto.PeerState{ - IP: peerState.IP, - PubKey: peerState.PubKey, - ConnStatus: peerState.ConnStatus.String(), - ConnStatusUpdate: timestamppb.New(peerState.ConnStatusUpdate), - Relayed: peerState.Relayed, - LocalIceCandidateType: peerState.LocalIceCandidateType, - RemoteIceCandidateType: peerState.RemoteIceCandidateType, - LocalIceCandidateEndpoint: peerState.LocalIceCandidateEndpoint, - RemoteIceCandidateEndpoint: peerState.RemoteIceCandidateEndpoint, - RelayAddress: peerState.RelayServerAddress, - Fqdn: peerState.FQDN, - LastWireguardHandshake: timestamppb.New(peerState.LastWireguardHandshake), - BytesRx: peerState.BytesRx, - BytesTx: peerState.BytesTx, - RosenpassEnabled: peerState.RosenpassEnabled, - Networks: maps.Keys(peerState.GetRoutes()), - Latency: durationpb.New(peerState.Latency), - SshHostKey: peerState.SSHHostKey, - } - pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) - } - - for _, relayState := range fullStatus.Relays { - pbRelayState := &proto.RelayState{ - URI: relayState.URI, - Available: relayState.Err == nil, - } - if err := relayState.Err; err != nil { - pbRelayState.Error = err.Error() - } - pbFullStatus.Relays = append(pbFullStatus.Relays, pbRelayState) - } - - for _, dnsState := range fullStatus.NSGroupStates { - var err string - if dnsState.Error != nil { - err = dnsState.Error.Error() - } - - var servers []string - for _, server := range dnsState.Servers { - servers = append(servers, server.String()) - } - - pbDnsState := &proto.NSGroupState{ - Servers: servers, - Domains: dnsState.Domains, - Enabled: dnsState.Enabled, - Error: err, - } - pbFullStatus.DnsServers = append(pbFullStatus.DnsServers, pbDnsState) - } - - return &pbFullStatus -} - // sendTerminalNotification sends a terminal notification message // to inform the user that the NetBird connection session has expired. func sendTerminalNotification() error { diff --git a/client/status/status.go b/client/status/status.go index 4f31f3637..305797eee 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -325,61 +325,64 @@ func sortPeersByIP(peersStateDetail []PeerStateDetailOutput) { } } -func ParseToJSON(overview OutputOverview) (string, error) { - jsonBytes, err := json.Marshal(overview) +// JSON returns the status overview as a JSON string. +func (o *OutputOverview) JSON() (string, error) { + jsonBytes, err := json.Marshal(o) if err != nil { return "", fmt.Errorf("json marshal failed") } return string(jsonBytes), err } -func ParseToYAML(overview OutputOverview) (string, error) { - yamlBytes, err := yaml.Marshal(overview) +// YAML returns the status overview as a YAML string. +func (o *OutputOverview) YAML() (string, error) { + yamlBytes, err := yaml.Marshal(o) if err != nil { return "", fmt.Errorf("yaml marshal failed") } return string(yamlBytes), nil } -func ParseGeneralSummary(overview OutputOverview, showURL bool, showRelays bool, showNameServers bool, showSSHSessions bool) string { +// GeneralSummary returns a general summary of the status overview. +func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameServers bool, showSSHSessions bool) string { var managementConnString string - if overview.ManagementState.Connected { + if o.ManagementState.Connected { managementConnString = "Connected" if showURL { - managementConnString = fmt.Sprintf("%s to %s", managementConnString, overview.ManagementState.URL) + managementConnString = fmt.Sprintf("%s to %s", managementConnString, o.ManagementState.URL) } } else { managementConnString = "Disconnected" - if overview.ManagementState.Error != "" { - managementConnString = fmt.Sprintf("%s, reason: %s", managementConnString, overview.ManagementState.Error) + if o.ManagementState.Error != "" { + managementConnString = fmt.Sprintf("%s, reason: %s", managementConnString, o.ManagementState.Error) } } var signalConnString string - if overview.SignalState.Connected { + if o.SignalState.Connected { signalConnString = "Connected" if showURL { - signalConnString = fmt.Sprintf("%s to %s", signalConnString, overview.SignalState.URL) + signalConnString = fmt.Sprintf("%s to %s", signalConnString, o.SignalState.URL) } } else { signalConnString = "Disconnected" - if overview.SignalState.Error != "" { - signalConnString = fmt.Sprintf("%s, reason: %s", signalConnString, overview.SignalState.Error) + if o.SignalState.Error != "" { + signalConnString = fmt.Sprintf("%s, reason: %s", signalConnString, o.SignalState.Error) } } interfaceTypeString := "Userspace" - interfaceIP := overview.IP - if overview.KernelInterface { + interfaceIP := o.IP + if o.KernelInterface { interfaceTypeString = "Kernel" - } else if overview.IP == "" { + } else if o.IP == "" { interfaceTypeString = "N/A" interfaceIP = "N/A" } var relaysString string if showRelays { - for _, relay := range overview.Relays.Details { + for _, relay := range o.Relays.Details { available := "Available" reason := "" @@ -395,18 +398,18 @@ func ParseGeneralSummary(overview OutputOverview, showURL bool, showRelays bool, relaysString += fmt.Sprintf("\n [%s] is %s%s", relay.URI, available, reason) } } else { - relaysString = fmt.Sprintf("%d/%d Available", overview.Relays.Available, overview.Relays.Total) + relaysString = fmt.Sprintf("%d/%d Available", o.Relays.Available, o.Relays.Total) } networks := "-" - if len(overview.Networks) > 0 { - sort.Strings(overview.Networks) - networks = strings.Join(overview.Networks, ", ") + if len(o.Networks) > 0 { + sort.Strings(o.Networks) + networks = strings.Join(o.Networks, ", ") } var dnsServersString string if showNameServers { - for _, nsServerGroup := range overview.NSServerGroups { + for _, nsServerGroup := range o.NSServerGroups { enabled := "Available" if !nsServerGroup.Enabled { enabled = "Unavailable" @@ -430,25 +433,25 @@ func ParseGeneralSummary(overview OutputOverview, showURL bool, showRelays bool, ) } } else { - dnsServersString = fmt.Sprintf("%d/%d Available", countEnabled(overview.NSServerGroups), len(overview.NSServerGroups)) + dnsServersString = fmt.Sprintf("%d/%d Available", countEnabled(o.NSServerGroups), len(o.NSServerGroups)) } rosenpassEnabledStatus := "false" - if overview.RosenpassEnabled { + if o.RosenpassEnabled { rosenpassEnabledStatus = "true" - if overview.RosenpassPermissive { + if o.RosenpassPermissive { rosenpassEnabledStatus = "true (permissive)" //nolint:gosec } } lazyConnectionEnabledStatus := "false" - if overview.LazyConnectionEnabled { + if o.LazyConnectionEnabled { lazyConnectionEnabledStatus = "true" } sshServerStatus := "Disabled" - if overview.SSHServerState.Enabled { - sessionCount := len(overview.SSHServerState.Sessions) + if o.SSHServerState.Enabled { + sessionCount := len(o.SSHServerState.Sessions) if sessionCount > 0 { sessionWord := "session" if sessionCount > 1 { @@ -460,7 +463,7 @@ func ParseGeneralSummary(overview OutputOverview, showURL bool, showRelays bool, } if showSSHSessions && sessionCount > 0 { - for _, session := range overview.SSHServerState.Sessions { + for _, session := range o.SSHServerState.Sessions { var sessionDisplay string if session.JWTUsername != "" { sessionDisplay = fmt.Sprintf("[%s@%s -> %s] %s", @@ -484,7 +487,7 @@ func ParseGeneralSummary(overview OutputOverview, showURL bool, showRelays bool, } } - peersCountString := fmt.Sprintf("%d/%d Connected", overview.Peers.Connected, overview.Peers.Total) + peersCountString := fmt.Sprintf("%d/%d Connected", o.Peers.Connected, o.Peers.Total) goos := runtime.GOOS goarch := runtime.GOARCH @@ -512,30 +515,31 @@ func ParseGeneralSummary(overview OutputOverview, showURL bool, showRelays bool, "Forwarding rules: %d\n"+ "Peers count: %s\n", fmt.Sprintf("%s/%s%s", goos, goarch, goarm), - overview.DaemonVersion, + o.DaemonVersion, version.NetbirdVersion(), - overview.ProfileName, + o.ProfileName, managementConnString, signalConnString, relaysString, dnsServersString, - domain.Domain(overview.FQDN).SafeString(), + domain.Domain(o.FQDN).SafeString(), interfaceIP, interfaceTypeString, rosenpassEnabledStatus, lazyConnectionEnabledStatus, sshServerStatus, networks, - overview.NumberOfForwardingRules, + o.NumberOfForwardingRules, peersCountString, ) return summary } -func ParseToFullDetailSummary(overview OutputOverview) string { - parsedPeersString := parsePeers(overview.Peers, overview.RosenpassEnabled, overview.RosenpassPermissive) - parsedEventsString := parseEvents(overview.Events) - summary := ParseGeneralSummary(overview, true, true, true, true) +// FullDetailSummary returns a full detailed summary with peer details and events. +func (o *OutputOverview) FullDetailSummary() string { + parsedPeersString := parsePeers(o.Peers, o.RosenpassEnabled, o.RosenpassPermissive) + parsedEventsString := parseEvents(o.Events) + summary := o.GeneralSummary(true, true, true, true) return fmt.Sprintf( "Peers detail:"+ diff --git a/client/status/status_test.go b/client/status/status_test.go index 1dca1e5b1..f4585827b 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -268,7 +268,7 @@ func TestSortingOfPeers(t *testing.T) { } func TestParsingToJSON(t *testing.T) { - jsonString, _ := ParseToJSON(overview) + jsonString, _ := overview.JSON() //@formatter:off expectedJSONString := ` @@ -404,7 +404,7 @@ func TestParsingToJSON(t *testing.T) { } func TestParsingToYAML(t *testing.T) { - yaml, _ := ParseToYAML(overview) + yaml, _ := overview.YAML() expectedYAML := `peers: @@ -511,7 +511,7 @@ func TestParsingToDetail(t *testing.T) { lastConnectionUpdate2 := timeAgo(overview.Peers.Details[1].LastStatusUpdate) lastHandshake2 := timeAgo(overview.Peers.Details[1].LastWireguardHandshake) - detail := ParseToFullDetailSummary(overview) + detail := overview.FullDetailSummary() expectedDetail := fmt.Sprintf( `Peers detail: @@ -575,7 +575,7 @@ Peers count: 2/2 Connected } func TestParsingToShortVersion(t *testing.T) { - shortVersion := ParseGeneralSummary(overview, false, false, false, false) + shortVersion := overview.GeneralSummary(false, false, false, false) expectedString := fmt.Sprintf("OS: %s/%s", runtime.GOOS, runtime.GOARCH) + ` Daemon version: 0.14.1 diff --git a/client/ui/debug.go b/client/ui/debug.go index 51fa28575..a057b2a85 100644 --- a/client/ui/debug.go +++ b/client/ui/debug.go @@ -441,7 +441,7 @@ func (s *serviceClient) collectDebugData( var postUpStatusOutput string if postUpStatus != nil { overview := nbstatus.ConvertToStatusOutputOverview(postUpStatus, params.anonymize, "", nil, nil, nil, "", profName) - postUpStatusOutput = nbstatus.ParseToFullDetailSummary(overview) + postUpStatusOutput = overview.FullDetailSummary() } headerPostUp := fmt.Sprintf("----- NetBird post-up - Timestamp: %s", time.Now().Format(time.RFC3339)) statusOutput := fmt.Sprintf("%s\n%s", headerPostUp, postUpStatusOutput) @@ -458,7 +458,7 @@ func (s *serviceClient) collectDebugData( var preDownStatusOutput string if preDownStatus != nil { overview := nbstatus.ConvertToStatusOutputOverview(preDownStatus, params.anonymize, "", nil, nil, nil, "", profName) - preDownStatusOutput = nbstatus.ParseToFullDetailSummary(overview) + preDownStatusOutput = overview.FullDetailSummary() } headerPreDown := fmt.Sprintf("----- NetBird pre-down - Timestamp: %s - Duration: %s", time.Now().Format(time.RFC3339), params.duration) @@ -595,7 +595,7 @@ func (s *serviceClient) createDebugBundle(anonymize bool, systemInfo bool, uploa var statusOutput string if statusResp != nil { overview := nbstatus.ConvertToStatusOutputOverview(statusResp, anonymize, "", nil, nil, nil, "", profName) - statusOutput = nbstatus.ParseToFullDetailSummary(overview) + statusOutput = overview.FullDetailSummary() } request := &proto.DebugBundleRequest{ diff --git a/client/wasm/cmd/main.go b/client/wasm/cmd/main.go index 238e272fa..2647c2f0d 100644 --- a/client/wasm/cmd/main.go +++ b/client/wasm/cmd/main.go @@ -9,20 +9,29 @@ import ( "time" log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/encoding/protojson" netbird "github.com/netbirdio/netbird/client/embed" + "github.com/netbirdio/netbird/client/proto" sshdetection "github.com/netbirdio/netbird/client/ssh/detection" + nbstatus "github.com/netbirdio/netbird/client/status" "github.com/netbirdio/netbird/client/wasm/internal/http" "github.com/netbirdio/netbird/client/wasm/internal/rdp" "github.com/netbirdio/netbird/client/wasm/internal/ssh" "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/version" ) const ( clientStartTimeout = 30 * time.Second clientStopTimeout = 10 * time.Second + pingTimeout = 10 * time.Second defaultLogLevel = "warn" defaultSSHDetectionTimeout = 20 * time.Second + + icmpEchoRequest = 8 + icmpCodeEcho = 0 + pingBufferSize = 1500 ) func main() { @@ -113,18 +122,45 @@ func createStopMethod(client *netbird.Client) js.Func { }) } +// validateSSHArgs validates SSH connection arguments +func validateSSHArgs(args []js.Value) (host string, port int, username string, err js.Value) { + if len(args) < 2 { + return "", 0, "", js.ValueOf("error: requires host and port") + } + + if args[0].Type() != js.TypeString { + return "", 0, "", js.ValueOf("host parameter must be a string") + } + if args[1].Type() != js.TypeNumber { + return "", 0, "", js.ValueOf("port parameter must be a number") + } + + host = args[0].String() + port = args[1].Int() + username = "root" + + if len(args) > 2 { + if args[2].Type() == js.TypeString && args[2].String() != "" { + username = args[2].String() + } else if args[2].Type() != js.TypeString { + return "", 0, "", js.ValueOf("username parameter must be a string") + } + } + + return host, port, username, js.Undefined() +} + // createSSHMethod creates the SSH connection method func createSSHMethod(client *netbird.Client) js.Func { return js.FuncOf(func(this js.Value, args []js.Value) any { - if len(args) < 2 { - return js.ValueOf("error: requires host and port") - } - - host := args[0].String() - port := args[1].Int() - username := "root" - if len(args) > 2 && args[2].String() != "" { - username = args[2].String() + host, port, username, validationErr := validateSSHArgs(args) + if !validationErr.IsUndefined() { + if validationErr.Type() == js.TypeString && validationErr.String() == "error: requires host and port" { + return validationErr + } + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(validationErr) + }) } var jwtToken string @@ -154,6 +190,110 @@ func createSSHMethod(client *netbird.Client) js.Func { }) } +func performPing(client *netbird.Client, hostname string) { + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + defer cancel() + + start := time.Now() + conn, err := client.Dial(ctx, "ping", hostname) + if err != nil { + js.Global().Get("console").Call("log", fmt.Sprintf("Ping to %s failed: %v", hostname, err)) + return + } + defer func() { + if err := conn.Close(); err != nil { + log.Debugf("failed to close ping connection: %v", err) + } + }() + + icmpData := make([]byte, 8) + icmpData[0] = icmpEchoRequest + icmpData[1] = icmpCodeEcho + + if _, err := conn.Write(icmpData); err != nil { + js.Global().Get("console").Call("log", fmt.Sprintf("Ping to %s write failed: %v", hostname, err)) + return + } + + buf := make([]byte, pingBufferSize) + if _, err := conn.Read(buf); err != nil { + js.Global().Get("console").Call("log", fmt.Sprintf("Ping to %s read failed: %v", hostname, err)) + return + } + + latency := time.Since(start) + js.Global().Get("console").Call("log", fmt.Sprintf("Ping to %s: %dms", hostname, latency.Milliseconds())) +} + +func performPingTCP(client *netbird.Client, hostname string, port int) { + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + defer cancel() + + address := fmt.Sprintf("%s:%d", hostname, port) + start := time.Now() + conn, err := client.Dial(ctx, "tcp", address) + if err != nil { + js.Global().Get("console").Call("log", fmt.Sprintf("TCP ping to %s failed: %v", address, err)) + return + } + latency := time.Since(start) + + if err := conn.Close(); err != nil { + log.Debugf("failed to close TCP connection: %v", err) + } + + js.Global().Get("console").Call("log", fmt.Sprintf("TCP ping to %s succeeded: %dms", address, latency.Milliseconds())) +} + +// createPingMethod creates the ping method +func createPingMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(this js.Value, args []js.Value) any { + if len(args) < 1 { + return js.ValueOf("error: hostname required") + } + + if args[0].Type() != js.TypeString { + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(js.ValueOf("hostname parameter must be a string")) + }) + } + + hostname := args[0].String() + return createPromise(func(resolve, reject js.Value) { + performPing(client, hostname) + resolve.Invoke(js.Undefined()) + }) + }) +} + +// createPingTCPMethod creates the pingtcp method +func createPingTCPMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(this js.Value, args []js.Value) any { + if len(args) < 2 { + return js.ValueOf("error: hostname and port required") + } + + if args[0].Type() != js.TypeString { + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(js.ValueOf("hostname parameter must be a string")) + }) + } + + if args[1].Type() != js.TypeNumber { + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(js.ValueOf("port parameter must be a number")) + }) + } + + hostname := args[0].String() + port := args[1].Int() + return createPromise(func(resolve, reject js.Value) { + performPingTCP(client, hostname, port) + resolve.Invoke(js.Undefined()) + }) + }) +} + // createProxyRequestMethod creates the proxyRequest method func createProxyRequestMethod(client *netbird.Client) js.Func { return js.FuncOf(func(this js.Value, args []js.Value) any { @@ -162,6 +302,11 @@ func createProxyRequestMethod(client *netbird.Client) js.Func { } request := args[0] + if request.Type() != js.TypeObject { + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(js.ValueOf("request parameter must be an object")) + }) + } return createPromise(func(resolve, reject js.Value) { response, err := http.ProxyRequest(client, request) @@ -181,11 +326,145 @@ func createRDPProxyMethod(client *netbird.Client) js.Func { return js.ValueOf("error: hostname and port required") } + if args[0].Type() != js.TypeString { + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(js.ValueOf("hostname parameter must be a string")) + }) + } + if args[1].Type() != js.TypeString { + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(js.ValueOf("port parameter must be a string")) + }) + } + proxy := rdp.NewRDCleanPathProxy(client) return proxy.CreateProxy(args[0].String(), args[1].String()) }) } +// getStatusOverview is a helper to get the status overview +func getStatusOverview(client *netbird.Client) (nbstatus.OutputOverview, error) { + fullStatus, err := client.Status() + if err != nil { + return nbstatus.OutputOverview{}, err + } + + pbFullStatus := fullStatus.ToProto() + statusResp := &proto.StatusResponse{ + DaemonVersion: version.NetbirdVersion(), + FullStatus: pbFullStatus, + } + + return nbstatus.ConvertToStatusOutputOverview(statusResp, false, "", nil, nil, nil, "", ""), nil +} + +// createStatusMethod creates the status method that returns JSON +func createStatusMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(_ js.Value, args []js.Value) any { + return createPromise(func(resolve, reject js.Value) { + overview, err := getStatusOverview(client) + if err != nil { + reject.Invoke(js.ValueOf(err.Error())) + return + } + + jsonStr, err := overview.JSON() + if err != nil { + reject.Invoke(js.ValueOf(err.Error())) + return + } + jsonObj := js.Global().Get("JSON").Call("parse", jsonStr) + resolve.Invoke(jsonObj) + }) + }) +} + +// createStatusSummaryMethod creates the statusSummary method +func createStatusSummaryMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(_ js.Value, args []js.Value) any { + return createPromise(func(resolve, reject js.Value) { + overview, err := getStatusOverview(client) + if err != nil { + reject.Invoke(js.ValueOf(err.Error())) + return + } + + summary := overview.GeneralSummary(false, false, false, false) + js.Global().Get("console").Call("log", summary) + resolve.Invoke(js.Undefined()) + }) + }) +} + +// createStatusDetailMethod creates the statusDetail method +func createStatusDetailMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(_ js.Value, args []js.Value) any { + return createPromise(func(resolve, reject js.Value) { + overview, err := getStatusOverview(client) + if err != nil { + reject.Invoke(js.ValueOf(err.Error())) + return + } + + detail := overview.FullDetailSummary() + js.Global().Get("console").Call("log", detail) + resolve.Invoke(js.Undefined()) + }) + }) +} + +// createGetSyncResponseMethod creates the getSyncResponse method that returns the latest sync response as JSON +func createGetSyncResponseMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(_ js.Value, args []js.Value) any { + return createPromise(func(resolve, reject js.Value) { + syncResp, err := client.GetLatestSyncResponse() + if err != nil { + reject.Invoke(js.ValueOf(err.Error())) + return + } + + options := protojson.MarshalOptions{ + EmitUnpopulated: true, + UseProtoNames: true, + AllowPartial: true, + } + jsonBytes, err := options.Marshal(syncResp) + if err != nil { + reject.Invoke(js.ValueOf(fmt.Sprintf("marshal sync response: %v", err))) + return + } + + jsonObj := js.Global().Get("JSON").Call("parse", string(jsonBytes)) + resolve.Invoke(jsonObj) + }) + }) +} + +// createSetLogLevelMethod creates the setLogLevel method to dynamically change logging level +func createSetLogLevelMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(_ js.Value, args []js.Value) any { + if len(args) < 1 { + return js.ValueOf("error: log level required") + } + + if args[0].Type() != js.TypeString { + return createPromise(func(resolve, reject js.Value) { + reject.Invoke(js.ValueOf("log level parameter must be a string")) + }) + } + + logLevel := args[0].String() + return createPromise(func(resolve, reject js.Value) { + if err := client.SetLogLevel(logLevel); err != nil { + reject.Invoke(js.ValueOf(fmt.Sprintf("set log level: %v", err))) + return + } + log.Infof("Log level set to: %s", logLevel) + resolve.Invoke(js.ValueOf(true)) + }) + }) +} + // createPromise is a helper to create JavaScript promises func createPromise(handler func(resolve, reject js.Value)) js.Value { return js.Global().Get("Promise").New(js.FuncOf(func(_ js.Value, promiseArgs []js.Value) any { @@ -237,17 +516,24 @@ func createClientObject(client *netbird.Client) js.Value { obj["start"] = createStartMethod(client) obj["stop"] = createStopMethod(client) + obj["ping"] = createPingMethod(client) + obj["pingtcp"] = createPingTCPMethod(client) obj["detectSSHServerType"] = createDetectSSHServerMethod(client) obj["createSSHConnection"] = createSSHMethod(client) obj["proxyRequest"] = createProxyRequestMethod(client) obj["createRDPProxy"] = createRDPProxyMethod(client) + obj["status"] = createStatusMethod(client) + obj["statusSummary"] = createStatusSummaryMethod(client) + obj["statusDetail"] = createStatusDetailMethod(client) + obj["getSyncResponse"] = createGetSyncResponseMethod(client) + obj["setLogLevel"] = createSetLogLevelMethod(client) return js.ValueOf(obj) } // netBirdClientConstructor acts as a JavaScript constructor function -func netBirdClientConstructor(this js.Value, args []js.Value) any { - return js.Global().Get("Promise").New(js.FuncOf(func(this js.Value, promiseArgs []js.Value) any { +func netBirdClientConstructor(_ js.Value, args []js.Value) any { + return js.Global().Get("Promise").New(js.FuncOf(func(_ js.Value, promiseArgs []js.Value) any { resolve := promiseArgs[0] reject := promiseArgs[1] From cac9326d3d39179a93ac32f9f28f5691e653c060 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Wed, 14 Jan 2026 17:09:17 +0100 Subject: [PATCH 047/374] [management] fetch all users data from external cache in one request (#5104) --------- Co-authored-by: pascal --- management/server/cache/idp.go | 25 ++++++++++++++++++++++++ management/server/user.go | 35 ++++++++++++++++++++++++++-------- management/server/user_test.go | 8 ++++++-- 3 files changed, 58 insertions(+), 10 deletions(-) diff --git a/management/server/cache/idp.go b/management/server/cache/idp.go index 19dfc0f38..6ec42e217 100644 --- a/management/server/cache/idp.go +++ b/management/server/cache/idp.go @@ -26,6 +26,8 @@ type UserDataCache interface { Get(ctx context.Context, key string) (*idp.UserData, error) Set(ctx context.Context, key string, value *idp.UserData, expiration time.Duration) error Delete(ctx context.Context, key string) error + GetUsers(ctx context.Context, key string) ([]*idp.UserData, error) + SetUsers(ctx context.Context, key string, users []*idp.UserData, expiration time.Duration) error } // UserDataCacheImpl is a struct that implements the UserDataCache interface. @@ -51,6 +53,29 @@ func (u *UserDataCacheImpl) Delete(ctx context.Context, key string) error { return u.cache.Delete(ctx, key) } +func (u *UserDataCacheImpl) GetUsers(ctx context.Context, key string) ([]*idp.UserData, error) { + var users []*idp.UserData + v, err := u.cache.Get(ctx, key, &users) + if err != nil { + return nil, err + } + + switch v := v.(type) { + case []*idp.UserData: + return v, nil + case *[]*idp.UserData: + return *v, nil + case []byte: + return unmarshalUserData(v) + } + + return nil, fmt.Errorf("unexpected type: %T", v) +} + +func (u *UserDataCacheImpl) SetUsers(ctx context.Context, key string, users []*idp.UserData, expiration time.Duration) error { + return u.cache.Set(ctx, key, users, store.WithExpiration(expiration)) +} + // NewUserDataCache creates a new UserDataCacheImpl object. func NewUserDataCache(store store.StoreInterface) *UserDataCacheImpl { simpleCache := cache.New[any](store) diff --git a/management/server/user.go b/management/server/user.go index 4f9007b61..d12dd4f11 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -911,10 +911,12 @@ func (am *DefaultAccountManager) GetUsersFromAccount(ctx context.Context, accoun accountUsers := []*types.User{} switch { case allowed: + start := time.Now() accountUsers, err = am.Store.GetAccountUsers(ctx, store.LockingStrengthNone, accountID) if err != nil { return nil, err } + log.WithContext(ctx).Tracef("Got %d users from account %s after %s", len(accountUsers), accountID, time.Since(start)) case user != nil && user.AccountID == accountID: accountUsers = append(accountUsers, user) default: @@ -933,23 +935,40 @@ func (am *DefaultAccountManager) BuildUserInfosForAccount(ctx context.Context, a if !isNil(am.idpManager) && !IsEmbeddedIdp(am.idpManager) { users := make(map[string]userLoggedInOnce, len(accountUsers)) usersFromIntegration := make([]*idp.UserData, 0) + filtered := make(map[string]*idp.UserData, len(accountUsers)) + log.WithContext(ctx).Tracef("Querying users from IDP for account %s", accountID) + start := time.Now() + + integrationKeys := make(map[string]struct{}) for _, user := range accountUsers { if user.Issued == types.UserIssuedIntegration { - key := user.IntegrationReference.CacheKey(accountID, user.Id) - info, err := am.externalCacheManager.Get(am.ctx, key) - if err != nil { - log.WithContext(ctx).Infof("Get ExternalCache for key: %s, error: %s", key, err) - users[user.Id] = true - continue - } - usersFromIntegration = append(usersFromIntegration, info) + integrationKeys[user.IntegrationReference.CacheKey(accountID)] = struct{}{} continue } if !user.IsServiceUser { users[user.Id] = userLoggedInOnce(!user.GetLastLogin().IsZero()) } } + + for key := range integrationKeys { + usersData, err := am.externalCacheManager.GetUsers(am.ctx, key) + if err != nil { + log.WithContext(ctx).Debugf("GetUsers from ExternalCache for key: %s, error: %s", key, err) + continue + } + for _, ud := range usersData { + filtered[ud.ID] = ud + } + } + + for _, ud := range filtered { + usersFromIntegration = append(usersFromIntegration, ud) + } + + log.WithContext(ctx).Tracef("Got user info from external cache after %s", time.Since(start)) + start = time.Now() queriedUsers, err = am.lookupCache(ctx, users, accountID) + log.WithContext(ctx).Tracef("Got user info from cache for %d users after %s", len(queriedUsers), time.Since(start)) if err != nil { return nil, err } diff --git a/management/server/user_test.go b/management/server/user_test.go index 6d356a8b1..2dd1cea2e 100644 --- a/management/server/user_test.go +++ b/management/server/user_test.go @@ -1086,8 +1086,12 @@ func TestDefaultAccountManager_ExternalCache(t *testing.T) { assert.NoError(t, err) cacheManager := am.GetExternalCacheManager() - cacheKey := externalUser.IntegrationReference.CacheKey(mockAccountID, externalUser.Id) - err = cacheManager.Set(context.Background(), cacheKey, &idp.UserData{ID: externalUser.Id, Name: "Test User", Email: "user@example.com"}, time.Minute) + tud := &idp.UserData{ID: externalUser.Id, Name: "Test User", Email: "user@example.com"} + cacheKeyUser := externalUser.IntegrationReference.CacheKey(mockAccountID, externalUser.Id) + err = cacheManager.Set(context.Background(), cacheKeyUser, tud, time.Minute) + assert.NoError(t, err) + cacheKeyAccount := externalUser.IntegrationReference.CacheKey(mockAccountID) + err = cacheManager.SetUsers(context.Background(), cacheKeyAccount, []*idp.UserData{tud}, time.Minute) assert.NoError(t, err) infos, err := am.GetUsersFromAccount(context.Background(), mockAccountID, mockUserID) From efb954b7d63bfca7ea8b6303e6794c0a022941d8 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 15 Jan 2026 16:39:14 +0100 Subject: [PATCH 048/374] [management] adapt ratelimiting (#5080) --- .../server/http/middleware/auth_middleware.go | 7 +- .../http/middleware/auth_middleware_test.go | 97 +++++++++++++++++++ 2 files changed, 103 insertions(+), 1 deletion(-) diff --git a/management/server/http/middleware/auth_middleware.go b/management/server/http/middleware/auth_middleware.go index 966a6802a..257347153 100644 --- a/management/server/http/middleware/auth_middleware.go +++ b/management/server/http/middleware/auth_middleware.go @@ -178,7 +178,7 @@ func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts [] m.patUsageTracker.IncrementUsage(token) } - if m.rateLimiter != nil { + if m.rateLimiter != nil && !isTerraformRequest(r) { if !m.rateLimiter.Allow(token) { return r, status.Errorf(status.TooManyRequests, "too many requests") } @@ -214,6 +214,11 @@ func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts [] return nbcontext.SetUserAuthInRequest(r, userAuth), nil } +func isTerraformRequest(r *http.Request) bool { + ua := strings.ToLower(r.Header.Get("User-Agent")) + return strings.Contains(ua, "terraform") +} + // getTokenFromJWTRequest is a "TokenExtractor" that takes auth header parts and extracts // the JWT token from the Authorization header. func getTokenFromJWTRequest(authHeaderParts []string) (string, error) { diff --git a/management/server/http/middleware/auth_middleware_test.go b/management/server/http/middleware/auth_middleware_test.go index ba4d16796..05ca59419 100644 --- a/management/server/http/middleware/auth_middleware_test.go +++ b/management/server/http/middleware/auth_middleware_test.go @@ -508,6 +508,103 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { handler.ServeHTTP(rec, req) assert.Equal(t, http.StatusTooManyRequests, rec.Code, "Second request after cleanup should be rate limited again") }) + + t.Run("Terraform User Agent Not Rate Limited", func(t *testing.T) { + rateLimitConfig := &RateLimiterConfig{ + RequestsPerMinute: 1, + Burst: 1, + CleanupInterval: 5 * time.Minute, + LimiterTTL: 10 * time.Minute, + } + + authMiddleware := NewAuthMiddleware( + mockAuth, + func(ctx context.Context, userAuth nbauth.UserAuth) (string, string, error) { + return userAuth.AccountId, userAuth.UserId, nil + }, + func(ctx context.Context, userAuth nbauth.UserAuth) error { + return nil + }, + func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { + return &types.User{}, nil + }, + rateLimitConfig, + nil, + ) + + handler := authMiddleware.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // Test various Terraform user agent formats + terraformUserAgents := []string{ + "Terraform/1.5.0", + "terraform/1.0.0", + "Terraform-Provider/2.0.0", + "Mozilla/5.0 (compatible; Terraform/1.3.0)", + } + + for _, userAgent := range terraformUserAgents { + t.Run("UserAgent: "+userAgent, func(t *testing.T) { + successCount := 0 + for i := 0; i < 10; i++ { + req := httptest.NewRequest("GET", "http://testing/test", nil) + req.Header.Set("Authorization", "Token "+PAT) + req.Header.Set("User-Agent", userAgent) + rec := httptest.NewRecorder() + + handler.ServeHTTP(rec, req) + if rec.Code == http.StatusOK { + successCount++ + } + } + + assert.Equal(t, 10, successCount, "All Terraform user agent requests should succeed (not rate limited)") + }) + } + }) + + t.Run("Non-Terraform User Agent With PAT Is Rate Limited", func(t *testing.T) { + rateLimitConfig := &RateLimiterConfig{ + RequestsPerMinute: 1, + Burst: 1, + CleanupInterval: 5 * time.Minute, + LimiterTTL: 10 * time.Minute, + } + + authMiddleware := NewAuthMiddleware( + mockAuth, + func(ctx context.Context, userAuth nbauth.UserAuth) (string, string, error) { + return userAuth.AccountId, userAuth.UserId, nil + }, + func(ctx context.Context, userAuth nbauth.UserAuth) error { + return nil + }, + func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { + return &types.User{}, nil + }, + rateLimitConfig, + nil, + ) + + handler := authMiddleware.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "http://testing/test", nil) + req.Header.Set("Authorization", "Token "+PAT) + req.Header.Set("User-Agent", "curl/7.68.0") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code, "First request should succeed") + + req = httptest.NewRequest("GET", "http://testing/test", nil) + req.Header.Set("Authorization", "Token "+PAT) + req.Header.Set("User-Agent", "curl/7.68.0") + rec = httptest.NewRecorder() + handler.ServeHTTP(rec, req) + assert.Equal(t, http.StatusTooManyRequests, rec.Code, "Second request should be rate limited") + }) } func TestAuthMiddleware_Handler_Child(t *testing.T) { From 291e640b28a4208906099855e1d1fbc3dc06a270 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 15 Jan 2026 17:30:10 +0100 Subject: [PATCH 049/374] [client] Change priority between local and dns route handlers (#5106) * Change priority between local and dns route handlers * update priority tests --- client/internal/dns/handler_chain.go | 4 +-- client/internal/dns/server_test.go | 50 ++++++++++++++-------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/client/internal/dns/handler_chain.go b/client/internal/dns/handler_chain.go index 7e3eb6d1f..06a2056b1 100644 --- a/client/internal/dns/handler_chain.go +++ b/client/internal/dns/handler_chain.go @@ -16,8 +16,8 @@ import ( const ( PriorityMgmtCache = 150 - PriorityLocal = 100 - PriorityDNSRoute = 75 + PriorityDNSRoute = 100 + PriorityLocal = 75 PriorityUpstream = 50 PriorityDefault = 1 PriorityFallback = -100 diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index 31e58b9f5..3606d48b9 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -185,7 +185,7 @@ func TestUpdateDNSServer(t *testing.T) { expectedLocalQs: []dns.Question{{Name: "peera.netbird.cloud.", Qtype: dns.TypeA, Qclass: dns.ClassINET}}, }, { - name: "New Config Should Succeed", + name: "New Config Should Succeed", initLocalZones: []nbdns.CustomZone{{Domain: "netbird.cloud", Records: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: 1, Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}}}, initUpstreamMap: registeredHandlerMap{ generateDummyHandler(zoneRecords[0].Name, nameServers).ID(): handlerWrapper{ @@ -226,19 +226,19 @@ func TestUpdateDNSServer(t *testing.T) { expectedLocalQs: []dns.Question{{Name: zoneRecords[0].Name, Qtype: 1, Qclass: 1}}, }, { - name: "Smaller Config Serial Should Be Skipped", - initLocalZones: []nbdns.CustomZone{}, - initUpstreamMap: make(registeredHandlerMap), - initSerial: 2, - inputSerial: 1, - shouldFail: true, + name: "Smaller Config Serial Should Be Skipped", + initLocalZones: []nbdns.CustomZone{}, + initUpstreamMap: make(registeredHandlerMap), + initSerial: 2, + inputSerial: 1, + shouldFail: true, }, { - name: "Empty NS Group Domain Or Not Primary Element Should Fail", - initLocalZones: []nbdns.CustomZone{}, - initUpstreamMap: make(registeredHandlerMap), - initSerial: 0, - inputSerial: 1, + name: "Empty NS Group Domain Or Not Primary Element Should Fail", + initLocalZones: []nbdns.CustomZone{}, + initUpstreamMap: make(registeredHandlerMap), + initSerial: 0, + inputSerial: 1, inputUpdate: nbdns.Config{ ServiceEnable: true, CustomZones: []nbdns.CustomZone{ @@ -256,11 +256,11 @@ func TestUpdateDNSServer(t *testing.T) { shouldFail: true, }, { - name: "Invalid NS Group Nameservers list Should Fail", - initLocalZones: []nbdns.CustomZone{}, - initUpstreamMap: make(registeredHandlerMap), - initSerial: 0, - inputSerial: 1, + name: "Invalid NS Group Nameservers list Should Fail", + initLocalZones: []nbdns.CustomZone{}, + initUpstreamMap: make(registeredHandlerMap), + initSerial: 0, + inputSerial: 1, inputUpdate: nbdns.Config{ ServiceEnable: true, CustomZones: []nbdns.CustomZone{ @@ -278,11 +278,11 @@ func TestUpdateDNSServer(t *testing.T) { shouldFail: true, }, { - name: "Invalid Custom Zone Records list Should Skip", - initLocalZones: []nbdns.CustomZone{}, - initUpstreamMap: make(registeredHandlerMap), - initSerial: 0, - inputSerial: 1, + name: "Invalid Custom Zone Records list Should Skip", + initLocalZones: []nbdns.CustomZone{}, + initUpstreamMap: make(registeredHandlerMap), + initSerial: 0, + inputSerial: 1, inputUpdate: nbdns.Config{ ServiceEnable: true, CustomZones: []nbdns.CustomZone{ @@ -304,7 +304,7 @@ func TestUpdateDNSServer(t *testing.T) { }}, }, { - name: "Empty Config Should Succeed and Clean Maps", + name: "Empty Config Should Succeed and Clean Maps", initLocalZones: []nbdns.CustomZone{{Domain: "netbird.cloud", Records: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}}}, initUpstreamMap: registeredHandlerMap{ generateDummyHandler(zoneRecords[0].Name, nameServers).ID(): handlerWrapper{ @@ -320,7 +320,7 @@ func TestUpdateDNSServer(t *testing.T) { expectedLocalQs: []dns.Question{}, }, { - name: "Disabled Service Should clean map", + name: "Disabled Service Should clean map", initLocalZones: []nbdns.CustomZone{{Domain: "netbird.cloud", Records: []nbdns.SimpleRecord{{Name: "netbird.cloud", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}}}}, initUpstreamMap: registeredHandlerMap{ generateDummyHandler(zoneRecords[0].Name, nameServers).ID(): handlerWrapper{ @@ -2052,7 +2052,7 @@ func TestLocalResolverPriorityInServer(t *testing.T) { func TestLocalResolverPriorityConstants(t *testing.T) { // Test that priority constants are ordered correctly - assert.Greater(t, PriorityLocal, PriorityDNSRoute, "Local priority should be higher than DNS route") + assert.Greater(t, PriorityDNSRoute, PriorityLocal, "DNS Route should be higher than Local priority") assert.Greater(t, PriorityLocal, PriorityUpstream, "Local priority should be higher than upstream") assert.Greater(t, PriorityUpstream, PriorityDefault, "Upstream priority should be higher than default") From 067c77e49e88be9698f792b5350a28e7d8932758 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Fri, 16 Jan 2026 10:12:05 +0100 Subject: [PATCH 050/374] [management] Add custom dns zones (#4849) --- .../network_map/controller/controller.go | 57 +- .../network_map/controller/repository.go | 6 + .../internals/modules/zones/interface.go | 13 + .../internals/modules/zones/manager/api.go | 161 +++++ .../modules/zones/manager/manager.go | 229 +++++++ .../modules/zones/manager/manager_test.go | 553 +++++++++++++++++ .../modules/zones/records/interface.go | 13 + .../modules/zones/records/manager/api.go | 191 ++++++ .../modules/zones/records/manager/manager.go | 236 ++++++++ .../zones/records/manager/manager_test.go | 573 ++++++++++++++++++ .../internals/modules/zones/records/record.go | 129 ++++ management/internals/modules/zones/zone.go | 89 +++ management/internals/server/boot.go | 2 +- management/internals/server/modules.go | 16 + .../internals/shared/grpc/conversion.go | 7 +- management/server/account.go | 16 +- management/server/account_test.go | 36 +- management/server/activity/codes.go | 16 + management/server/http/handler.go | 9 +- .../http/handlers/peers/peers_handler.go | 4 +- .../testing/testing_tools/channel/channel.go | 6 +- management/server/store/sql_store.go | 184 ++++++ management/server/store/sql_store_test.go | 475 +++++++++++++++ management/server/store/store.go | 17 + management/server/types/account.go | 80 ++- management/server/types/account_test.go | 514 ++++++++++++++++ management/server/types/networkmap.go | 4 +- .../server/types/networkmap_golden_test.go | 38 +- management/server/types/networkmapbuilder.go | 34 +- management/server/util/util.go | 11 + shared/management/client/rest/client.go | 7 +- shared/management/client/rest/dns_zones.go | 170 ++++++ .../management/client/rest/dns_zones_test.go | 460 ++++++++++++++ shared/management/http/api/openapi.yml | 437 +++++++++++++ shared/management/http/api/types.gen.go | 97 +++ shared/management/status/error.go | 10 + 36 files changed, 4837 insertions(+), 63 deletions(-) create mode 100644 management/internals/modules/zones/interface.go create mode 100644 management/internals/modules/zones/manager/api.go create mode 100644 management/internals/modules/zones/manager/manager.go create mode 100644 management/internals/modules/zones/manager/manager_test.go create mode 100644 management/internals/modules/zones/records/interface.go create mode 100644 management/internals/modules/zones/records/manager/api.go create mode 100644 management/internals/modules/zones/records/manager/manager.go create mode 100644 management/internals/modules/zones/records/manager/manager_test.go create mode 100644 management/internals/modules/zones/records/record.go create mode 100644 management/internals/modules/zones/zone.go create mode 100644 shared/management/client/rest/dns_zones.go create mode 100644 shared/management/client/rest/dns_zones_test.go diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index f051e5331..d46737c26 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller/cache" "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral" + "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/account" @@ -175,7 +176,7 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin dnsCache := &cache.DNSConfigCache{} dnsDomain := c.GetDNSDomain(account.Settings) - customZone := account.GetPeersCustomZone(ctx, dnsDomain) + peersCustomZone := account.GetPeersCustomZone(ctx, dnsDomain) resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() groupIDToUserIDs := account.GetActiveGroupUsers() @@ -197,6 +198,12 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin dnsFwdPort := computeForwarderPort(maps.Values(account.Peers), network_map.DnsForwarderPortMinVersion) + accountZones, err := c.repo.GetAccountZones(ctx, account.Id) + if err != nil { + log.WithContext(ctx).Errorf("failed to get account zones: %v", err) + return fmt.Errorf("failed to get account zones: %v", err) + } + for _, peer := range account.Peers { if !c.peersUpdateManager.HasChannel(peer.ID) { log.WithContext(ctx).Tracef("peer %s doesn't have a channel, skipping network map update", peer.ID) @@ -223,9 +230,9 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin var remotePeerNetworkMap *types.NetworkMap if c.experimentalNetworkMap(accountID) { - remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, p.AccountID, p.ID, approvedPeersMap, customZone, c.accountManagerMetrics) + remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, p.AccountID, p.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) } else { - remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, p.ID, customZone, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) + remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, p.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) } c.metrics.CountCalcPeerNetworkMapDuration(time.Since(start)) @@ -318,7 +325,7 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe dnsCache := &cache.DNSConfigCache{} dnsDomain := c.GetDNSDomain(account.Settings) - customZone := account.GetPeersCustomZone(ctx, dnsDomain) + peersCustomZone := account.GetPeersCustomZone(ctx, dnsDomain) resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() groupIDToUserIDs := account.GetActiveGroupUsers() @@ -335,12 +342,18 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe return err } + accountZones, err := c.repo.GetAccountZones(ctx, account.Id) + if err != nil { + log.WithContext(ctx).Errorf("failed to get account zones: %v", err) + return err + } + var remotePeerNetworkMap *types.NetworkMap if c.experimentalNetworkMap(accountId) { - remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, customZone, c.accountManagerMetrics) + remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) } else { - remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, peerId, customZone, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) + remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, peerId, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) } proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] @@ -434,7 +447,14 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr } log.WithContext(ctx).Debugf("getPeerPostureChecks took %s", time.Since(startPosture)) - customZone := account.GetPeersCustomZone(ctx, c.GetDNSDomain(account.Settings)) + accountZones, err := c.repo.GetAccountZones(ctx, account.Id) + if err != nil { + log.WithContext(ctx).Errorf("failed to get account zones: %v", err) + return nil, nil, nil, 0, err + } + + dnsDomain := c.GetDNSDomain(account.Settings) + peersCustomZone := account.GetPeersCustomZone(ctx, dnsDomain) proxyNetworkMaps, err := c.proxyController.GetProxyNetworkMaps(ctx, account.Id, peer.ID, account.Peers) if err != nil { @@ -445,11 +465,11 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr var networkMap *types.NetworkMap if c.experimentalNetworkMap(accountID) { - networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, customZone, c.accountManagerMetrics) + networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) } else { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, customZone, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, account.GetActiveGroupUsers()) + networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, account.GetActiveGroupUsers()) } proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] @@ -472,7 +492,8 @@ func (c *Controller) getPeerNetworkMapExp( accountId string, peerId string, validatedPeers map[string]struct{}, - customZone nbdns.CustomZone, + peersCustomZone nbdns.CustomZone, + accountZones []*zones.Zone, metrics *telemetry.AccountManagerMetrics, ) *types.NetworkMap { account := c.getAccountFromHolderOrInit(ctx, accountId) @@ -483,7 +504,7 @@ func (c *Controller) getPeerNetworkMapExp( } } - return account.GetPeerNetworkMapExp(ctx, peerId, customZone, validatedPeers, metrics) + return account.GetPeerNetworkMapExp(ctx, peerId, peersCustomZone, accountZones, validatedPeers, metrics) } func (c *Controller) onPeersAddedUpdNetworkMapCache(account *types.Account, peerIds ...string) { @@ -798,7 +819,15 @@ func (c *Controller) GetNetworkMap(ctx context.Context, peerID string) (*types.N if err != nil { return nil, err } - customZone := account.GetPeersCustomZone(ctx, c.GetDNSDomain(account.Settings)) + + accountZones, err := c.repo.GetAccountZones(ctx, account.Id) + if err != nil { + log.WithContext(ctx).Errorf("failed to get account zones: %v", err) + return nil, err + } + + dnsDomain := c.GetDNSDomain(account.Settings) + peersCustomZone := account.GetPeersCustomZone(ctx, dnsDomain) proxyNetworkMaps, err := c.proxyController.GetProxyNetworkMaps(ctx, account.Id, peerID, account.Peers) if err != nil { @@ -809,11 +838,11 @@ func (c *Controller) GetNetworkMap(ctx context.Context, peerID string) (*types.N var networkMap *types.NetworkMap if c.experimentalNetworkMap(peer.AccountID) { - networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peerID, validatedPeers, customZone, nil) + networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peerID, validatedPeers, peersCustomZone, accountZones, nil) } else { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, customZone, validatedPeers, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, validatedPeers, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) } proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] diff --git a/management/internals/controllers/network_map/controller/repository.go b/management/internals/controllers/network_map/controller/repository.go index 3ed51a5c3..caef362cb 100644 --- a/management/internals/controllers/network_map/controller/repository.go +++ b/management/internals/controllers/network_map/controller/repository.go @@ -3,6 +3,7 @@ package controller import ( "context" + "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" @@ -14,6 +15,7 @@ type Repository interface { GetAccountByPeerID(ctx context.Context, peerID string) (*types.Account, error) GetPeersByIDs(ctx context.Context, accountID string, peerIDs []string) (map[string]*peer.Peer, error) GetPeerByID(ctx context.Context, accountID string, peerID string) (*peer.Peer, error) + GetAccountZones(ctx context.Context, accountID string) ([]*zones.Zone, error) } type repository struct { @@ -47,3 +49,7 @@ func (r *repository) GetPeersByIDs(ctx context.Context, accountID string, peerID func (r *repository) GetPeerByID(ctx context.Context, accountID string, peerID string) (*peer.Peer, error) { return r.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) } + +func (r *repository) GetAccountZones(ctx context.Context, accountID string) ([]*zones.Zone, error) { + return r.store.GetAccountZones(ctx, store.LockingStrengthNone, accountID) +} diff --git a/management/internals/modules/zones/interface.go b/management/internals/modules/zones/interface.go new file mode 100644 index 000000000..8e2306230 --- /dev/null +++ b/management/internals/modules/zones/interface.go @@ -0,0 +1,13 @@ +package zones + +import ( + "context" +) + +type Manager interface { + GetAllZones(ctx context.Context, accountID, userID string) ([]*Zone, error) + GetZone(ctx context.Context, accountID, userID, zone string) (*Zone, error) + CreateZone(ctx context.Context, accountID, userID string, zone *Zone) (*Zone, error) + UpdateZone(ctx context.Context, accountID, userID string, zone *Zone) (*Zone, error) + DeleteZone(ctx context.Context, accountID, userID, zoneID string) error +} diff --git a/management/internals/modules/zones/manager/api.go b/management/internals/modules/zones/manager/api.go new file mode 100644 index 000000000..919d77d61 --- /dev/null +++ b/management/internals/modules/zones/manager/api.go @@ -0,0 +1,161 @@ +package manager + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/internals/modules/zones" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" + "github.com/netbirdio/netbird/shared/management/status" +) + +type handler struct { + manager zones.Manager +} + +func RegisterEndpoints(router *mux.Router, manager zones.Manager) { + h := &handler{ + manager: manager, + } + + router.HandleFunc("/dns/zones", h.getAllZones).Methods("GET", "OPTIONS") + router.HandleFunc("/dns/zones", h.createZone).Methods("POST", "OPTIONS") + router.HandleFunc("/dns/zones/{zoneId}", h.getZone).Methods("GET", "OPTIONS") + router.HandleFunc("/dns/zones/{zoneId}", h.updateZone).Methods("PUT", "OPTIONS") + router.HandleFunc("/dns/zones/{zoneId}", h.deleteZone).Methods("DELETE", "OPTIONS") +} + +func (h *handler) getAllZones(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + allZones, err := h.manager.GetAllZones(r.Context(), userAuth.AccountId, userAuth.UserId) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + apiZones := make([]*api.Zone, 0, len(allZones)) + for _, zone := range allZones { + apiZones = append(apiZones, zone.ToAPIResponse()) + } + + util.WriteJSONObject(r.Context(), w, apiZones) +} + +func (h *handler) createZone(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var req api.PostApiDnsZonesJSONRequestBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + zone := new(zones.Zone) + zone.FromAPIRequest(&req) + + if err = zone.Validate(); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } + + createdZone, err := h.manager.CreateZone(r.Context(), userAuth.AccountId, userAuth.UserId, zone) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, createdZone.ToAPIResponse()) +} + +func (h *handler) getZone(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + zone, err := h.manager.GetZone(r.Context(), userAuth.AccountId, userAuth.UserId, zoneID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, zone.ToAPIResponse()) +} + +func (h *handler) updateZone(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + var req api.PutApiDnsZonesZoneIdJSONRequestBody + if err = json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + zone := new(zones.Zone) + zone.FromAPIRequest(&req) + zone.ID = zoneID + + if err = zone.Validate(); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } + + updatedZone, err := h.manager.UpdateZone(r.Context(), userAuth.AccountId, userAuth.UserId, zone) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, updatedZone.ToAPIResponse()) +} + +func (h *handler) deleteZone(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + if err = h.manager.DeleteZone(r.Context(), userAuth.AccountId, userAuth.UserId, zoneID); err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} diff --git a/management/internals/modules/zones/manager/manager.go b/management/internals/modules/zones/manager/manager.go new file mode 100644 index 000000000..8548dd48c --- /dev/null +++ b/management/internals/modules/zones/manager/manager.go @@ -0,0 +1,229 @@ +package manager + +import ( + "context" + "fmt" + + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/server/account" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/status" +) + +type managerImpl struct { + store store.Store + accountManager account.Manager + permissionsManager permissions.Manager + dnsDomain string +} + +func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, dnsDomain string) zones.Manager { + return &managerImpl{ + store: store, + accountManager: accountManager, + permissionsManager: permissionsManager, + dnsDomain: dnsDomain, + } +} + +func (m *managerImpl) GetAllZones(ctx context.Context, accountID, userID string) ([]*zones.Zone, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + return m.store.GetAccountZones(ctx, store.LockingStrengthNone, accountID) +} + +func (m *managerImpl) GetZone(ctx context.Context, accountID, userID, zoneID string) (*zones.Zone, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + return m.store.GetZoneByID(ctx, store.LockingStrengthNone, accountID, zoneID) +} + +func (m *managerImpl) CreateZone(ctx context.Context, accountID, userID string, zone *zones.Zone) (*zones.Zone, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Create) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + if err = m.validateZoneDomainConflict(ctx, accountID, zone.Domain); err != nil { + return nil, err + } + + zone = zones.NewZone(accountID, zone.Name, zone.Domain, zone.Enabled, zone.EnableSearchDomain, zone.DistributionGroups) + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + existingZone, err := transaction.GetZoneByDomain(ctx, accountID, zone.Domain) + if err != nil { + if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { + return fmt.Errorf("failed to check existing zone: %w", err) + } + } + if existingZone != nil { + return status.Errorf(status.AlreadyExists, "zone with domain %s already exists", zone.Domain) + } + + for _, groupID := range zone.DistributionGroups { + _, err = transaction.GetGroupByID(ctx, store.LockingStrengthNone, accountID, groupID) + if err != nil { + return status.Errorf(status.InvalidArgument, "%s", err.Error()) + } + } + + if err = transaction.CreateZone(ctx, zone); err != nil { + return fmt.Errorf("failed to create zone: %w", err) + } + + return nil + }) + if err != nil { + return nil, err + } + + m.accountManager.StoreEvent(ctx, userID, zone.ID, accountID, activity.DNSZoneCreated, zone.EventMeta()) + + return zone, nil +} + +func (m *managerImpl) UpdateZone(ctx context.Context, accountID, userID string, updatedZone *zones.Zone) (*zones.Zone, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Update) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + zone, err := m.store.GetZoneByID(ctx, store.LockingStrengthUpdate, accountID, updatedZone.ID) + if err != nil { + return nil, fmt.Errorf("failed to get zone: %w", err) + } + + if zone.Domain != updatedZone.Domain { + return nil, status.Errorf(status.InvalidArgument, "zone domain cannot be updated") + } + + zone.Name = updatedZone.Name + zone.Enabled = updatedZone.Enabled + zone.EnableSearchDomain = updatedZone.EnableSearchDomain + zone.DistributionGroups = updatedZone.DistributionGroups + + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + for _, groupID := range zone.DistributionGroups { + _, err = transaction.GetGroupByID(ctx, store.LockingStrengthNone, accountID, groupID) + if err != nil { + return status.Errorf(status.InvalidArgument, "%s", err.Error()) + } + } + + if err = transaction.UpdateZone(ctx, zone); err != nil { + return fmt.Errorf("failed to update zone: %w", err) + } + + return nil + }) + if err != nil { + return nil, err + } + + m.accountManager.StoreEvent(ctx, userID, zone.ID, accountID, activity.DNSZoneUpdated, zone.EventMeta()) + + go m.accountManager.UpdateAccountPeers(ctx, accountID) + + return zone, nil +} + +func (m *managerImpl) DeleteZone(ctx context.Context, accountID, userID, zoneID string) error { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !ok { + return status.NewPermissionDeniedError() + } + + zone, err := m.store.GetZoneByID(ctx, store.LockingStrengthUpdate, accountID, zoneID) + if err != nil { + return fmt.Errorf("failed to get zone: %w", err) + } + + var eventsToStore []func() + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + records, err := transaction.GetZoneDNSRecords(ctx, store.LockingStrengthNone, accountID, zoneID) + if err != nil { + return fmt.Errorf("failed to get records: %w", err) + } + + err = transaction.DeleteZoneDNSRecords(ctx, accountID, zoneID) + if err != nil { + return fmt.Errorf("failed to delete zone dns records: %w", err) + } + + err = transaction.DeleteZone(ctx, accountID, zoneID) + if err != nil { + return fmt.Errorf("failed to delete zone: %w", err) + } + + err = transaction.IncrementNetworkSerial(ctx, accountID) + if err != nil { + return fmt.Errorf("failed to increment network serial: %w", err) + } + + for _, record := range records { + eventsToStore = append(eventsToStore, func() { + meta := record.EventMeta(zone.ID, zone.Name) + m.accountManager.StoreEvent(ctx, userID, record.ID, accountID, activity.DNSRecordDeleted, meta) + }) + } + + eventsToStore = append(eventsToStore, func() { + m.accountManager.StoreEvent(ctx, userID, zoneID, accountID, activity.DNSZoneDeleted, zone.EventMeta()) + }) + + return nil + }) + if err != nil { + return err + } + + for _, event := range eventsToStore { + event() + } + + go m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + +func (m *managerImpl) validateZoneDomainConflict(ctx context.Context, accountID, domain string) error { + if m.dnsDomain != "" && m.dnsDomain == domain { + return status.Errorf(status.InvalidArgument, "zone domain %s conflicts with peer DNS domain", domain) + } + + settings, err := m.store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return err + } + + if settings.DNSDomain != "" && settings.DNSDomain == domain { + return status.Errorf(status.InvalidArgument, "zone domain %s conflicts with peer DNS domain", domain) + } + + return nil +} diff --git a/management/internals/modules/zones/manager/manager_test.go b/management/internals/modules/zones/manager/manager_test.go new file mode 100644 index 000000000..b45ec7874 --- /dev/null +++ b/management/internals/modules/zones/manager/manager_test.go @@ -0,0 +1,553 @@ +package manager + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/mock_server" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" +) + +const ( + testAccountID = "test-account-id" + testUserID = "test-user-id" + testZoneID = "test-zone-id" + testGroupID = "test-group-id" + testDNSDomain = "netbird.selfhosted" +) + +func setupTest(t *testing.T) (*managerImpl, store.Store, *mock_server.MockAccountManager, *permissions.MockManager, *gomock.Controller, func()) { + t.Helper() + + ctx := context.Background() + testStore, cleanup, err := store.NewTestStoreFromSQL(ctx, "", t.TempDir()) + require.NoError(t, err) + + err = testStore.SaveAccount(ctx, &types.Account{ + Id: testAccountID, + Groups: map[string]*types.Group{ + testGroupID: { + ID: testGroupID, + Name: "Test Group", + }, + }, + }) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + mockAccountManager := &mock_server.MockAccountManager{} + mockPermissionsManager := permissions.NewMockManager(ctrl) + + manager := &managerImpl{ + store: testStore, + accountManager: mockAccountManager, + permissionsManager: mockPermissionsManager, + dnsDomain: testDNSDomain, + } + + return manager, testStore, mockAccountManager, mockPermissionsManager, ctrl, cleanup +} + +func TestManagerImpl_GetAllZones(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, testStore, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + zone1 := zones.NewZone(testAccountID, "Zone 1", "zone1.example.com", true, true, []string{testGroupID}) + err := testStore.CreateZone(ctx, zone1) + require.NoError(t, err) + + zone2 := zones.NewZone(testAccountID, "Zone 2", "zone2.example.com", false, false, []string{testGroupID}) + err = testStore.CreateZone(ctx, zone2) + require.NoError(t, err) + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(true, nil) + + result, err := manager.GetAllZones(ctx, testAccountID, testUserID) + require.NoError(t, err) + assert.Len(t, result, 2) + assert.Equal(t, zone1.ID, result[0].ID) + assert.Equal(t, zone2.ID, result[1].ID) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(false, nil) + + result, err := manager.GetAllZones(ctx, testAccountID, testUserID) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("permission validation error", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(false, status.Errorf(status.Internal, "permission check failed")) + + result, err := manager.GetAllZones(ctx, testAccountID, testUserID) + require.Error(t, err) + assert.Nil(t, result) + }) +} + +func TestManagerImpl_GetZone(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, testStore, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + zone := zones.NewZone(testAccountID, "Test Zone", "test.example.com", true, true, []string{testGroupID}) + err := testStore.CreateZone(ctx, zone) + require.NoError(t, err) + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(true, nil) + + result, err := manager.GetZone(ctx, testAccountID, testUserID, zone.ID) + require.NoError(t, err) + assert.Equal(t, zone.ID, result.ID) + assert.Equal(t, zone.Name, result.Name) + assert.Equal(t, zone.Domain, result.Domain) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(false, nil) + + result, err := manager.GetZone(ctx, testAccountID, testUserID, testZoneID) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) +} + +func TestManagerImpl_CreateZone(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, _, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputZone := &zones.Zone{ + Name: "New Zone", + Domain: "new.example.com", + Enabled: true, + EnableSearchDomain: true, + DistributionGroups: []string{testGroupID}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSZoneCreated, activityID) + } + + result, err := manager.CreateZone(ctx, testAccountID, testUserID, inputZone) + require.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result.ID) + assert.Equal(t, testAccountID, result.AccountID) + assert.Equal(t, inputZone.Name, result.Name) + assert.Equal(t, inputZone.Domain, result.Domain) + assert.Equal(t, inputZone.Enabled, result.Enabled) + assert.Equal(t, inputZone.EnableSearchDomain, result.EnableSearchDomain) + assert.Equal(t, inputZone.DistributionGroups, result.DistributionGroups) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputZone := &zones.Zone{ + Name: "New Zone", + Domain: "new.example.com", + DistributionGroups: []string{testGroupID}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(false, nil) + + result, err := manager.CreateZone(ctx, testAccountID, testUserID, inputZone) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("invalid group", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputZone := &zones.Zone{ + Name: "New Zone", + Domain: "new.example.com", + DistributionGroups: []string{"invalid-group"}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + result, err := manager.CreateZone(ctx, testAccountID, testUserID, inputZone) + require.Error(t, err) + assert.Nil(t, result) + }) + + t.Run("duplicate domain", func(t *testing.T) { + manager, testStore, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + existingZone := zones.NewZone(testAccountID, "Existing Zone", "duplicate.example.com", true, false, []string{testGroupID}) + err := testStore.CreateZone(ctx, existingZone) + require.NoError(t, err) + + inputZone := &zones.Zone{ + Name: "New Zone", + Domain: "duplicate.example.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{testGroupID}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + result, err := manager.CreateZone(ctx, testAccountID, testUserID, inputZone) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "zone with domain duplicate.example.com already exists") + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.AlreadyExists, s.Type()) + }) + + t.Run("peer DNS domain conflict", func(t *testing.T) { + manager, testStore, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + account, err := testStore.GetAccount(ctx, testAccountID) + require.NoError(t, err) + account.Settings.DNSDomain = "peers.example.com" + err = testStore.SaveAccount(ctx, account) + require.NoError(t, err) + + inputZone := &zones.Zone{ + Name: "Test Zone", + Domain: "peers.example.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{testGroupID}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + result, err := manager.CreateZone(ctx, testAccountID, testUserID, inputZone) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "zone domain peers.example.com conflicts with peer DNS domain") + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.InvalidArgument, s.Type()) + }) + + t.Run("default DNS domain conflict", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputZone := &zones.Zone{ + Name: "Test Zone", + Domain: testDNSDomain, + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{testGroupID}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + result, err := manager.CreateZone(ctx, testAccountID, testUserID, inputZone) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), fmt.Sprintf("zone domain %s conflicts with peer DNS domain", testDNSDomain)) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.InvalidArgument, s.Type()) + }) +} + +func TestManagerImpl_UpdateZone(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, testStore, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + existingZone := zones.NewZone(testAccountID, "Old Name", "example.com", false, false, []string{testGroupID}) + err := testStore.CreateZone(ctx, existingZone) + require.NoError(t, err) + + updatedZone := &zones.Zone{ + ID: existingZone.ID, + Name: "Updated Name", + Domain: "example.com", + Enabled: true, + EnableSearchDomain: true, + DistributionGroups: []string{testGroupID}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(true, nil) + + storeEventCalled := false + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + storeEventCalled = true + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, existingZone.ID, targetID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSZoneUpdated, activityID) + } + + result, err := manager.UpdateZone(ctx, testAccountID, testUserID, updatedZone) + require.NoError(t, err) + assert.NotNil(t, result) + assert.Equal(t, updatedZone.Name, result.Name) + assert.Equal(t, updatedZone.Enabled, result.Enabled) + assert.Equal(t, updatedZone.EnableSearchDomain, result.EnableSearchDomain) + assert.True(t, storeEventCalled, "StoreEvent should have been called") + }) + + t.Run("domain change not allowed", func(t *testing.T) { + manager, testStore, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + existingZone := zones.NewZone(testAccountID, "Test Zone", "example.com", true, true, []string{testGroupID}) + err := testStore.CreateZone(ctx, existingZone) + require.NoError(t, err) + + updatedZone := &zones.Zone{ + ID: existingZone.ID, + Name: "Test Zone", + Domain: "different.com", + Enabled: true, + EnableSearchDomain: true, + DistributionGroups: []string{testGroupID}, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(true, nil) + + result, err := manager.UpdateZone(ctx, testAccountID, testUserID, updatedZone) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "zone domain cannot be updated") + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.InvalidArgument, s.Type()) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + updatedZone := &zones.Zone{ + ID: testZoneID, + Name: "Updated Name", + Domain: "example.com", + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(false, nil) + + result, err := manager.UpdateZone(ctx, testAccountID, testUserID, updatedZone) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("zone not found", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + updatedZone := &zones.Zone{ + ID: "non-existent-zone", + Name: "Updated Name", + Domain: "example.com", + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(true, nil) + + result, err := manager.UpdateZone(ctx, testAccountID, testUserID, updatedZone) + require.Error(t, err) + assert.Nil(t, result) + }) +} + +func TestManagerImpl_DeleteZone(t *testing.T) { + ctx := context.Background() + + t.Run("success with records", func(t *testing.T) { + manager, testStore, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + zone := zones.NewZone(testAccountID, "Test Zone", "example.com", true, true, []string{testGroupID}) + err := testStore.CreateZone(ctx, zone) + require.NoError(t, err) + + record1 := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err = testStore.CreateDNSRecord(ctx, record1) + require.NoError(t, err) + + record2 := records.NewRecord(testAccountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.2", 300) + err = testStore.CreateDNSRecord(ctx, record2) + require.NoError(t, err) + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Delete). + Return(true, nil) + + storeEventCallCount := 0 + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + storeEventCallCount++ + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, testAccountID, accountID) + } + + err = manager.DeleteZone(ctx, testAccountID, testUserID, zone.ID) + require.NoError(t, err) + assert.Equal(t, 3, storeEventCallCount) + + _, err = testStore.GetZoneByID(ctx, store.LockingStrengthNone, testAccountID, zone.ID) + require.Error(t, err) + + zoneRecords, err := testStore.GetZoneDNSRecords(ctx, store.LockingStrengthNone, testAccountID, zone.ID) + require.NoError(t, err) + assert.Empty(t, zoneRecords) + }) + + t.Run("success without records", func(t *testing.T) { + manager, testStore, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + zone := zones.NewZone(testAccountID, "Test Zone", "example.com", true, true, []string{testGroupID}) + err := testStore.CreateZone(ctx, zone) + require.NoError(t, err) + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Delete). + Return(true, nil) + + storeEventCalled := false + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + storeEventCalled = true + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, zone.ID, targetID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSZoneDeleted, activityID) + } + + err = manager.DeleteZone(ctx, testAccountID, testUserID, zone.ID) + require.NoError(t, err) + assert.True(t, storeEventCalled, "StoreEvent should have been called") + + _, err = testStore.GetZoneByID(ctx, store.LockingStrengthNone, testAccountID, zone.ID) + require.Error(t, err) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Delete). + Return(false, nil) + + err := manager.DeleteZone(ctx, testAccountID, testUserID, testZoneID) + require.Error(t, err) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("zone not found", func(t *testing.T) { + manager, _, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Delete). + Return(true, nil) + + err := manager.DeleteZone(ctx, testAccountID, testUserID, "non-existent-zone") + require.Error(t, err) + }) +} diff --git a/management/internals/modules/zones/records/interface.go b/management/internals/modules/zones/records/interface.go new file mode 100644 index 000000000..ceb8c5318 --- /dev/null +++ b/management/internals/modules/zones/records/interface.go @@ -0,0 +1,13 @@ +package records + +import ( + "context" +) + +type Manager interface { + GetAllRecords(ctx context.Context, accountID, userID, zoneID string) ([]*Record, error) + GetRecord(ctx context.Context, accountID, userID, zoneID, recordID string) (*Record, error) + CreateRecord(ctx context.Context, accountID, userID, zoneID string, record *Record) (*Record, error) + UpdateRecord(ctx context.Context, accountID, userID, zoneID string, record *Record) (*Record, error) + DeleteRecord(ctx context.Context, accountID, userID, zoneID, recordID string) error +} diff --git a/management/internals/modules/zones/records/manager/api.go b/management/internals/modules/zones/records/manager/api.go new file mode 100644 index 000000000..f8ecfef7d --- /dev/null +++ b/management/internals/modules/zones/records/manager/api.go @@ -0,0 +1,191 @@ +package manager + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/internals/modules/zones/records" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" + "github.com/netbirdio/netbird/shared/management/status" +) + +type handler struct { + manager records.Manager +} + +func RegisterEndpoints(router *mux.Router, manager records.Manager) { + h := &handler{ + manager: manager, + } + + router.HandleFunc("/dns/zones/{zoneId}/records", h.getAllRecords).Methods("GET", "OPTIONS") + router.HandleFunc("/dns/zones/{zoneId}/records", h.createRecord).Methods("POST", "OPTIONS") + router.HandleFunc("/dns/zones/{zoneId}/records/{recordId}", h.getRecord).Methods("GET", "OPTIONS") + router.HandleFunc("/dns/zones/{zoneId}/records/{recordId}", h.updateRecord).Methods("PUT", "OPTIONS") + router.HandleFunc("/dns/zones/{zoneId}/records/{recordId}", h.deleteRecord).Methods("DELETE", "OPTIONS") +} + +func (h *handler) getAllRecords(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + allRecords, err := h.manager.GetAllRecords(r.Context(), userAuth.AccountId, userAuth.UserId, zoneID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + apiRecords := make([]*api.DNSRecord, 0, len(allRecords)) + for _, record := range allRecords { + apiRecords = append(apiRecords, record.ToAPIResponse()) + } + + util.WriteJSONObject(r.Context(), w, apiRecords) +} + +func (h *handler) createRecord(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + var req api.PostApiDnsZonesZoneIdRecordsJSONRequestBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + record := new(records.Record) + record.FromAPIRequest(&req) + + if err = record.Validate(); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } + + createdRecord, err := h.manager.CreateRecord(r.Context(), userAuth.AccountId, userAuth.UserId, zoneID, record) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, createdRecord.ToAPIResponse()) +} + +func (h *handler) getRecord(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + recordID := mux.Vars(r)["recordId"] + if recordID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "record ID is required"), w) + return + } + + record, err := h.manager.GetRecord(r.Context(), userAuth.AccountId, userAuth.UserId, zoneID, recordID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, record.ToAPIResponse()) +} + +func (h *handler) updateRecord(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + recordID := mux.Vars(r)["recordId"] + if recordID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "record ID is required"), w) + return + } + + var req api.PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody + if err = json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + record := new(records.Record) + record.FromAPIRequest(&req) + record.ID = recordID + + if err = record.Validate(); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } + + updatedRecord, err := h.manager.UpdateRecord(r.Context(), userAuth.AccountId, userAuth.UserId, zoneID, record) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, updatedRecord.ToAPIResponse()) +} + +func (h *handler) deleteRecord(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + zoneID := mux.Vars(r)["zoneId"] + if zoneID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "zone ID is required"), w) + return + } + + recordID := mux.Vars(r)["recordId"] + if recordID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "record ID is required"), w) + return + } + + if err = h.manager.DeleteRecord(r.Context(), userAuth.AccountId, userAuth.UserId, zoneID, recordID); err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} diff --git a/management/internals/modules/zones/records/manager/manager.go b/management/internals/modules/zones/records/manager/manager.go new file mode 100644 index 000000000..5374a2ef2 --- /dev/null +++ b/management/internals/modules/zones/records/manager/manager.go @@ -0,0 +1,236 @@ +package manager + +import ( + "context" + "fmt" + "strings" + + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" + "github.com/netbirdio/netbird/management/server/account" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/status" +) + +type managerImpl struct { + store store.Store + accountManager account.Manager + permissionsManager permissions.Manager +} + +func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager) records.Manager { + return &managerImpl{ + store: store, + accountManager: accountManager, + permissionsManager: permissionsManager, + } +} + +func (m *managerImpl) GetAllRecords(ctx context.Context, accountID, userID, zoneID string) ([]*records.Record, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + return m.store.GetZoneDNSRecords(ctx, store.LockingStrengthNone, accountID, zoneID) +} + +func (m *managerImpl) GetRecord(ctx context.Context, accountID, userID, zoneID, recordID string) (*records.Record, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + return m.store.GetDNSRecordByID(ctx, store.LockingStrengthNone, accountID, zoneID, recordID) +} + +func (m *managerImpl) CreateRecord(ctx context.Context, accountID, userID, zoneID string, record *records.Record) (*records.Record, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Create) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + var zone *zones.Zone + + record = records.NewRecord(accountID, zoneID, record.Name, record.Type, record.Content, record.TTL) + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + zone, err = transaction.GetZoneByID(ctx, store.LockingStrengthUpdate, accountID, zoneID) + if err != nil { + return fmt.Errorf("failed to get zone: %w", err) + } + + err = validateRecordConflicts(ctx, transaction, zone, record) + if err != nil { + return err + } + + if err = transaction.CreateDNSRecord(ctx, record); err != nil { + return fmt.Errorf("failed to create dns record: %w", err) + } + + err = transaction.IncrementNetworkSerial(ctx, accountID) + if err != nil { + return fmt.Errorf("failed to increment network serial: %w", err) + } + + return nil + }) + if err != nil { + return nil, err + } + + meta := record.EventMeta(zone.ID, zone.Name) + m.accountManager.StoreEvent(ctx, userID, record.ID, accountID, activity.DNSRecordCreated, meta) + + go m.accountManager.UpdateAccountPeers(ctx, accountID) + + return record, nil +} + +func (m *managerImpl) UpdateRecord(ctx context.Context, accountID, userID, zoneID string, updatedRecord *records.Record) (*records.Record, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Update) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + var zone *zones.Zone + var record *records.Record + + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + zone, err = transaction.GetZoneByID(ctx, store.LockingStrengthUpdate, accountID, zoneID) + if err != nil { + return fmt.Errorf("failed to get zone: %w", err) + } + + record, err = transaction.GetDNSRecordByID(ctx, store.LockingStrengthUpdate, accountID, zoneID, updatedRecord.ID) + if err != nil { + return fmt.Errorf("failed to get record: %w", err) + } + + hasChanges := record.Name != updatedRecord.Name || record.Type != updatedRecord.Type || record.Content != updatedRecord.Content + + record.Name = updatedRecord.Name + record.Type = updatedRecord.Type + record.Content = updatedRecord.Content + record.TTL = updatedRecord.TTL + + if hasChanges { + if err = validateRecordConflicts(ctx, transaction, zone, record); err != nil { + return err + } + } + + if err = transaction.UpdateDNSRecord(ctx, record); err != nil { + return fmt.Errorf("failed to update dns record: %w", err) + } + + err = transaction.IncrementNetworkSerial(ctx, accountID) + if err != nil { + return fmt.Errorf("failed to increment network serial: %w", err) + } + + return nil + }) + if err != nil { + return nil, err + } + + meta := record.EventMeta(zone.ID, zone.Name) + m.accountManager.StoreEvent(ctx, userID, record.ID, accountID, activity.DNSRecordUpdated, meta) + + go m.accountManager.UpdateAccountPeers(ctx, accountID) + + return record, nil +} + +func (m *managerImpl) DeleteRecord(ctx context.Context, accountID, userID, zoneID, recordID string) error { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Dns, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !ok { + return status.NewPermissionDeniedError() + } + + var record *records.Record + var zone *zones.Zone + + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + zone, err = transaction.GetZoneByID(ctx, store.LockingStrengthUpdate, accountID, zoneID) + if err != nil { + return fmt.Errorf("failed to get zone: %w", err) + } + + record, err = transaction.GetDNSRecordByID(ctx, store.LockingStrengthUpdate, accountID, zoneID, recordID) + if err != nil { + return fmt.Errorf("failed to get record: %w", err) + } + + err = transaction.DeleteDNSRecord(ctx, accountID, zoneID, recordID) + if err != nil { + return fmt.Errorf("failed to delete dns record: %w", err) + } + + err = transaction.IncrementNetworkSerial(ctx, accountID) + if err != nil { + return fmt.Errorf("failed to increment network serial: %w", err) + } + + return nil + }) + if err != nil { + return err + } + + meta := record.EventMeta(zone.ID, zone.Name) + m.accountManager.StoreEvent(ctx, userID, recordID, accountID, activity.DNSRecordDeleted, meta) + + go m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + +// validateRecordConflicts checks for duplicate records and CNAME conflicts +func validateRecordConflicts(ctx context.Context, transaction store.Store, zone *zones.Zone, record *records.Record) error { + if record.Name != zone.Domain && !strings.HasSuffix(record.Name, "."+zone.Domain) { + return status.Errorf(status.InvalidArgument, "record name does not belong to zone") + } + + existingRecords, err := transaction.GetZoneDNSRecordsByName(ctx, store.LockingStrengthNone, zone.AccountID, zone.ID, record.Name) + if err != nil { + return fmt.Errorf("failed to check existing records: %w", err) + } + + for _, existing := range existingRecords { + if existing.ID == record.ID { + continue + } + + if existing.Type == record.Type && existing.Content == record.Content { + return status.Errorf(status.AlreadyExists, "identical record already exists") + } + + if record.Type == records.RecordTypeCNAME || existing.Type == records.RecordTypeCNAME { + return status.Errorf(status.InvalidArgument, + "An A, AAAA, or CNAME record with name %s already exists", record.Name) + } + } + + return nil +} diff --git a/management/internals/modules/zones/records/manager/manager_test.go b/management/internals/modules/zones/records/manager/manager_test.go new file mode 100644 index 000000000..0a962e0f4 --- /dev/null +++ b/management/internals/modules/zones/records/manager/manager_test.go @@ -0,0 +1,573 @@ +package manager + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/mock_server" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" +) + +const ( + testAccountID = "test-account-id" + testUserID = "test-user-id" + testRecordID = "test-record-id" + testGroupID = "test-group-id" +) + +func setupTest(t *testing.T) (*managerImpl, store.Store, *zones.Zone, *mock_server.MockAccountManager, *permissions.MockManager, *gomock.Controller, func()) { + t.Helper() + + ctx := context.Background() + testStore, cleanup, err := store.NewTestStoreFromSQL(ctx, "", t.TempDir()) + require.NoError(t, err) + + err = testStore.SaveAccount(ctx, &types.Account{ + Id: testAccountID, + Groups: map[string]*types.Group{ + testGroupID: { + ID: testGroupID, + Name: "Test Group", + }, + }, + }) + require.NoError(t, err) + + zone := zones.NewZone(testAccountID, "Test Zone", "example.com", true, true, []string{testGroupID}) + err = testStore.CreateZone(ctx, zone) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + mockAccountManager := &mock_server.MockAccountManager{} + mockPermissionsManager := permissions.NewMockManager(ctrl) + + manager := &managerImpl{ + store: testStore, + accountManager: mockAccountManager, + permissionsManager: mockPermissionsManager, + } + + return manager, testStore, zone, mockAccountManager, mockPermissionsManager, ctrl, cleanup +} + +func TestManagerImpl_GetAllRecords(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, testStore, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + record1 := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, record1) + require.NoError(t, err) + + record2 := records.NewRecord(testAccountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.2", 300) + err = testStore.CreateDNSRecord(ctx, record2) + require.NoError(t, err) + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(true, nil) + + result, err := manager.GetAllRecords(ctx, testAccountID, testUserID, zone.ID) + require.NoError(t, err) + assert.Len(t, result, 2) + assert.Equal(t, record1.ID, result[0].ID) + assert.Equal(t, record2.ID, result[1].ID) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(false, nil) + + result, err := manager.GetAllRecords(ctx, testAccountID, testUserID, zone.ID) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("permission validation error", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(false, status.Errorf(status.Internal, "permission check failed")) + + result, err := manager.GetAllRecords(ctx, testAccountID, testUserID, zone.ID) + require.Error(t, err) + assert.Nil(t, result) + }) +} + +func TestManagerImpl_GetRecord(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, testStore, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + record := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, record) + require.NoError(t, err) + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(true, nil) + + result, err := manager.GetRecord(ctx, testAccountID, testUserID, zone.ID, record.ID) + require.NoError(t, err) + assert.Equal(t, record.ID, result.ID) + assert.Equal(t, record.Name, result.Name) + assert.Equal(t, record.Type, result.Type) + assert.Equal(t, record.Content, result.Content) + assert.Equal(t, record.TTL, result.TTL) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Read). + Return(false, nil) + + result, err := manager.GetRecord(ctx, testAccountID, testUserID, zone.ID, testRecordID) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) +} + +func TestManagerImpl_CreateRecord(t *testing.T) { + ctx := context.Background() + + t.Run("success - A record", func(t *testing.T) { + manager, _, zone, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputRecord := &records.Record{ + Name: "api.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSRecordCreated, activityID) + } + + result, err := manager.CreateRecord(ctx, testAccountID, testUserID, zone.ID, inputRecord) + require.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result.ID) + assert.Equal(t, testAccountID, result.AccountID) + assert.Equal(t, zone.ID, result.ZoneID) + assert.Equal(t, inputRecord.Name, result.Name) + assert.Equal(t, inputRecord.Type, result.Type) + assert.Equal(t, inputRecord.Content, result.Content) + assert.Equal(t, inputRecord.TTL, result.TTL) + }) + + t.Run("success - AAAA record", func(t *testing.T) { + manager, _, zone, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputRecord := &records.Record{ + Name: "ipv6.example.com", + Type: records.RecordTypeAAAA, + Content: "2001:db8::1", + TTL: 600, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSRecordCreated, activityID) + } + + result, err := manager.CreateRecord(ctx, testAccountID, testUserID, zone.ID, inputRecord) + require.NoError(t, err) + assert.NotNil(t, result) + assert.Equal(t, inputRecord.Type, result.Type) + assert.Equal(t, inputRecord.Content, result.Content) + }) + + t.Run("success - CNAME record", func(t *testing.T) { + manager, _, zone, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputRecord := &records.Record{ + Name: "www.example.com", + Type: records.RecordTypeCNAME, + Content: "example.com", + TTL: 300, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSRecordCreated, activityID) + } + + result, err := manager.CreateRecord(ctx, testAccountID, testUserID, zone.ID, inputRecord) + require.NoError(t, err) + assert.NotNil(t, result) + assert.Equal(t, inputRecord.Type, result.Type) + assert.Equal(t, inputRecord.Content, result.Content) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputRecord := &records.Record{ + Name: "api.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(false, nil) + + result, err := manager.CreateRecord(ctx, testAccountID, testUserID, zone.ID, inputRecord) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("record name not in zone", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + inputRecord := &records.Record{ + Name: "api.different.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + result, err := manager.CreateRecord(ctx, testAccountID, testUserID, zone.ID, inputRecord) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "does not belong to zone") + }) + + t.Run("duplicate record", func(t *testing.T) { + manager, testStore, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + existingRecord := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, existingRecord) + require.NoError(t, err) + + inputRecord := &records.Record{ + Name: "api.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + result, err := manager.CreateRecord(ctx, testAccountID, testUserID, zone.ID, inputRecord) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "identical record already exists") + }) + + t.Run("CNAME conflict with existing A record", func(t *testing.T) { + manager, testStore, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + existingRecord := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, existingRecord) + require.NoError(t, err) + + inputRecord := &records.Record{ + Name: "api.example.com", + Type: records.RecordTypeCNAME, + Content: "example.com", + TTL: 300, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Create). + Return(true, nil) + + result, err := manager.CreateRecord(ctx, testAccountID, testUserID, zone.ID, inputRecord) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "already exists") + }) +} + +func TestManagerImpl_UpdateRecord(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, testStore, zone, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + existingRecord := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, existingRecord) + require.NoError(t, err) + + updatedRecord := &records.Record{ + ID: existingRecord.ID, + Name: "api.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.100", // Changed IP + TTL: 600, // Changed TTL + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(true, nil) + + storeEventCalled := false + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + storeEventCalled = true + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, existingRecord.ID, targetID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSRecordUpdated, activityID) + } + + result, err := manager.UpdateRecord(ctx, testAccountID, testUserID, zone.ID, updatedRecord) + require.NoError(t, err) + assert.NotNil(t, result) + assert.Equal(t, updatedRecord.Content, result.Content) + assert.Equal(t, updatedRecord.TTL, result.TTL) + assert.True(t, storeEventCalled, "StoreEvent should have been called") + }) + + t.Run("update only TTL - no validation", func(t *testing.T) { + manager, testStore, zone, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + existingRecord := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, existingRecord) + require.NoError(t, err) + + updatedRecord := &records.Record{ + ID: existingRecord.ID, + Name: existingRecord.Name, + Type: existingRecord.Type, + Content: existingRecord.Content, + TTL: 600, // Only TTL changed + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(true, nil) + + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + // Event should be stored + } + + result, err := manager.UpdateRecord(ctx, testAccountID, testUserID, zone.ID, updatedRecord) + require.NoError(t, err) + assert.NotNil(t, result) + assert.Equal(t, 600, result.TTL) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + updatedRecord := &records.Record{ + ID: testRecordID, + Name: "api.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.100", + TTL: 600, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(false, nil) + + result, err := manager.UpdateRecord(ctx, testAccountID, testUserID, zone.ID, updatedRecord) + require.Error(t, err) + assert.Nil(t, result) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("record not found", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + updatedRecord := &records.Record{ + ID: "non-existent-record", + Name: "api.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.100", + TTL: 600, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(true, nil) + + result, err := manager.UpdateRecord(ctx, testAccountID, testUserID, zone.ID, updatedRecord) + require.Error(t, err) + assert.Nil(t, result) + }) + + t.Run("update creates duplicate", func(t *testing.T) { + manager, testStore, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + record1 := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, record1) + require.NoError(t, err) + + record2 := records.NewRecord(testAccountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.2", 300) + err = testStore.CreateDNSRecord(ctx, record2) + require.NoError(t, err) + + updatedRecord := &records.Record{ + ID: record2.ID, + Name: "api.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + } + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Update). + Return(true, nil) + + result, err := manager.UpdateRecord(ctx, testAccountID, testUserID, zone.ID, updatedRecord) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "identical record already exists") + }) +} + +func TestManagerImpl_DeleteRecord(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + manager, testStore, zone, mockAccountManager, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + record := records.NewRecord(testAccountID, zone.ID, "api.example.com", records.RecordTypeA, "192.168.1.1", 300) + err := testStore.CreateDNSRecord(ctx, record) + require.NoError(t, err) + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Delete). + Return(true, nil) + + storeEventCalled := false + mockAccountManager.StoreEventFunc = func(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + storeEventCalled = true + assert.Equal(t, testUserID, initiatorID) + assert.Equal(t, record.ID, targetID) + assert.Equal(t, testAccountID, accountID) + assert.Equal(t, activity.DNSRecordDeleted, activityID) + } + + err = manager.DeleteRecord(ctx, testAccountID, testUserID, zone.ID, record.ID) + require.NoError(t, err) + assert.True(t, storeEventCalled, "StoreEvent should have been called") + + _, err = testStore.GetDNSRecordByID(ctx, store.LockingStrengthNone, testAccountID, zone.ID, record.ID) + require.Error(t, err) + }) + + t.Run("permission denied", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Delete). + Return(false, nil) + + err := manager.DeleteRecord(ctx, testAccountID, testUserID, zone.ID, testRecordID) + require.Error(t, err) + s, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, status.PermissionDenied, s.Type()) + }) + + t.Run("record not found", func(t *testing.T) { + manager, _, zone, _, mockPermissionsManager, ctrl, cleanup := setupTest(t) + defer cleanup() + defer ctrl.Finish() + + mockPermissionsManager.EXPECT(). + ValidateUserPermissions(ctx, testAccountID, testUserID, modules.Dns, operations.Delete). + Return(true, nil) + + err := manager.DeleteRecord(ctx, testAccountID, testUserID, zone.ID, "non-existent-record") + require.Error(t, err) + }) +} diff --git a/management/internals/modules/zones/records/record.go b/management/internals/modules/zones/records/record.go new file mode 100644 index 000000000..e44de08f4 --- /dev/null +++ b/management/internals/modules/zones/records/record.go @@ -0,0 +1,129 @@ +package records + +import ( + "errors" + "net" + + "github.com/rs/xid" + + "github.com/netbirdio/netbird/management/server/util" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +type RecordType string + +const ( + RecordTypeA RecordType = "A" + RecordTypeAAAA RecordType = "AAAA" + RecordTypeCNAME RecordType = "CNAME" +) + +type Record struct { + AccountID string `gorm:"index"` + ZoneID string `gorm:"index"` + ID string `gorm:"primaryKey"` + Name string + Type RecordType + Content string + TTL int +} + +func NewRecord(accountID, zoneID, name string, recordType RecordType, content string, ttl int) *Record { + return &Record{ + ID: xid.New().String(), + AccountID: accountID, + ZoneID: zoneID, + Name: name, + Type: recordType, + Content: content, + TTL: ttl, + } +} + +func (r *Record) ToAPIResponse() *api.DNSRecord { + recordType := api.DNSRecordType(r.Type) + return &api.DNSRecord{ + Id: r.ID, + Name: r.Name, + Type: recordType, + Content: r.Content, + Ttl: r.TTL, + } +} + +func (r *Record) FromAPIRequest(req *api.DNSRecordRequest) { + r.Name = req.Name + r.Type = RecordType(req.Type) + r.Content = req.Content + r.TTL = req.Ttl +} + +func (r *Record) Validate() error { + if r.Name == "" { + return errors.New("record name is required") + } + + if !util.IsValidDomain(r.Name) { + return errors.New("invalid record name format") + } + + if r.Type == "" { + return errors.New("record type is required") + } + + switch r.Type { + case RecordTypeA: + if err := validateIPv4(r.Content); err != nil { + return err + } + case RecordTypeAAAA: + if err := validateIPv6(r.Content); err != nil { + return err + } + case RecordTypeCNAME: + if !util.IsValidDomain(r.Content) { + return errors.New("invalid CNAME record format") + } + default: + return errors.New("invalid record type, must be A, AAAA, or CNAME") + } + + if r.TTL < 0 { + return errors.New("TTL cannot be negative") + } + + return nil +} + +func (r *Record) EventMeta(zoneID, zoneName string) map[string]any { + return map[string]any{ + "name": r.Name, + "type": string(r.Type), + "content": r.Content, + "ttl": r.TTL, + "zone_id": zoneID, + "zone_name": zoneName, + } +} + +func validateIPv4(content string) error { + if content == "" { + return errors.New("A record is required") //nolint:staticcheck + } + ip := net.ParseIP(content) + if ip == nil || ip.To4() == nil { + return errors.New("A record must be a valid IPv4 address") //nolint:staticcheck + } + return nil +} + +func validateIPv6(content string) error { + if content == "" { + return errors.New("AAAA record is required") + } + ip := net.ParseIP(content) + if ip == nil || ip.To4() != nil { + return errors.New("AAAA record must be a valid IPv6 address") + } + return nil +} diff --git a/management/internals/modules/zones/zone.go b/management/internals/modules/zones/zone.go new file mode 100644 index 000000000..27adac1ac --- /dev/null +++ b/management/internals/modules/zones/zone.go @@ -0,0 +1,89 @@ +package zones + +import ( + "errors" + + "github.com/rs/xid" + + "github.com/netbirdio/netbird/management/internals/modules/zones/records" + "github.com/netbirdio/netbird/management/server/util" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +type Zone struct { + ID string `gorm:"primaryKey"` + AccountID string `gorm:"index"` + Name string + Domain string + Enabled bool + EnableSearchDomain bool + DistributionGroups []string `gorm:"serializer:json"` + Records []*records.Record `gorm:"foreignKey:ZoneID;references:ID"` +} + +func NewZone(accountID, name, domain string, enabled, enableSearchDomain bool, distributionGroups []string) *Zone { + return &Zone{ + ID: xid.New().String(), + AccountID: accountID, + Name: name, + Domain: domain, + Enabled: enabled, + EnableSearchDomain: enableSearchDomain, + DistributionGroups: distributionGroups, + } +} + +func (z *Zone) ToAPIResponse() *api.Zone { + apiRecords := make([]api.DNSRecord, 0, len(z.Records)) + for _, record := range z.Records { + if apiRecord := record.ToAPIResponse(); apiRecord != nil { + apiRecords = append(apiRecords, *apiRecord) + } + } + + return &api.Zone{ + DistributionGroups: z.DistributionGroups, + Domain: z.Domain, + EnableSearchDomain: z.EnableSearchDomain, + Enabled: z.Enabled, + Id: z.ID, + Name: z.Name, + Records: apiRecords, + } +} + +func (z *Zone) FromAPIRequest(req *api.ZoneRequest) { + z.Name = req.Name + z.Domain = req.Domain + z.EnableSearchDomain = req.EnableSearchDomain + z.DistributionGroups = req.DistributionGroups + + enabled := true + if req.Enabled != nil { + enabled = *req.Enabled + } + z.Enabled = enabled +} + +func (z *Zone) Validate() error { + if z.Name == "" { + return errors.New("zone name is required") + } + if len(z.Name) > 255 { + return errors.New("zone name exceeds maximum length of 255 characters") + } + + if !util.IsValidDomain(z.Domain) { + return errors.New("invalid zone domain format") + } + + if len(z.DistributionGroups) == 0 { + return errors.New("at least one distribution group is required") + } + + return nil +} + +func (z *Zone) EventMeta() map[string]any { + return map[string]any{"name": z.Name, "domain": z.Domain} +} diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index b2afe214e..5d312ef94 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -92,7 +92,7 @@ func (s *BaseServer) EventStore() activity.Store { func (s *BaseServer) APIHandler() http.Handler { return Create(s, func() http.Handler { - httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.NetworkMapController(), s.IdpManager()) + httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager()) if err != nil { log.Fatalf("failed to create API handler: %v", err) } diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index d179f2b68..9649caead 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -8,6 +8,10 @@ import ( "github.com/netbirdio/management-integrations/integrations" "github.com/netbirdio/netbird/management/internals/modules/peers" + "github.com/netbirdio/netbird/management/internals/modules/zones" + zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" + recordsManager "github.com/netbirdio/netbird/management/internals/modules/zones/records/manager" "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/geolocation" @@ -158,3 +162,15 @@ func (s *BaseServer) NetworksManager() networks.Manager { return networks.NewManager(s.Store(), s.PermissionsManager(), s.ResourcesManager(), s.RoutesManager(), s.AccountManager()) }) } + +func (s *BaseServer) ZonesManager() zones.Manager { + return Create(s, func() zones.Manager { + return zonesManager.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.DNSDomain()) + }) +} + +func (s *BaseServer) RecordsManager() records.Manager { + return Create(s, func() records.Manager { + return recordsManager.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager()) + }) +} diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index c4d2e92f9..ba06b81a0 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -374,9 +374,10 @@ func shouldUsePortRange(rule *proto.FirewallRule) bool { // Helper function to convert nbdns.CustomZone to proto.CustomZone func convertToProtoCustomZone(zone nbdns.CustomZone) *proto.CustomZone { protoZone := &proto.CustomZone{ - Domain: zone.Domain, - Records: make([]*proto.SimpleRecord, 0, len(zone.Records)), - NonAuthoritative: zone.NonAuthoritative, + Domain: zone.Domain, + Records: make([]*proto.SimpleRecord, 0, len(zone.Records)), + SearchDomainDisabled: zone.SearchDomainDisabled, + NonAuthoritative: zone.NonAuthoritative, } for _, record := range zone.Records { protoZone.Records = append(protoZone.Records, &proto.SimpleRecord{ diff --git a/management/server/account.go b/management/server/account.go index 9785f446c..61882411b 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -295,7 +295,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco return err } - if err = am.validateSettingsUpdate(ctx, newSettings, oldSettings, userID, accountID); err != nil { + if err = am.validateSettingsUpdate(ctx, transaction, newSettings, oldSettings, userID, accountID); err != nil { return err } @@ -388,7 +388,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco return newSettings, nil } -func (am *DefaultAccountManager) validateSettingsUpdate(ctx context.Context, newSettings, oldSettings *types.Settings, userID, accountID string) error { +func (am *DefaultAccountManager) validateSettingsUpdate(ctx context.Context, transaction store.Store, newSettings, oldSettings *types.Settings, userID, accountID string) error { halfYearLimit := 180 * 24 * time.Hour if newSettings.PeerLoginExpiration > halfYearLimit { return status.Errorf(status.InvalidArgument, "peer login expiration can't be larger than 180 days") @@ -402,6 +402,18 @@ func (am *DefaultAccountManager) validateSettingsUpdate(ctx context.Context, new return status.Errorf(status.InvalidArgument, "invalid domain \"%s\" provided for DNS domain", newSettings.DNSDomain) } + if newSettings.DNSDomain != oldSettings.DNSDomain && newSettings.DNSDomain != "" { + existingZone, err := transaction.GetZoneByDomain(ctx, accountID, newSettings.DNSDomain) + if err != nil { + if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { + return fmt.Errorf("failed to check existing zone: %w", err) + } + } + if existingZone != nil { + return status.Errorf(status.InvalidArgument, "peer DNS domain %s conflicts with existing custom DNS zone", newSettings.DNSDomain) + } + } + return am.integratedPeerValidator.ValidateExtraSettings(ctx, newSettings.Extra, oldSettings.Extra, userID, accountID) } diff --git a/management/server/account_test.go b/management/server/account_test.go index b5f15ed98..3279a373b 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -27,6 +27,7 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" "github.com/netbirdio/netbird/management/internals/modules/peers" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" + "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/server/config" nbAccount "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" @@ -397,7 +398,7 @@ func TestAccount_GetPeerNetworkMap(t *testing.T) { } customZone := account.GetPeersCustomZone(context.Background(), "netbird.io") - networkMap := account.GetPeerNetworkMap(context.Background(), testCase.peerID, customZone, validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) + networkMap := account.GetPeerNetworkMap(context.Background(), testCase.peerID, customZone, nil, validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) assert.Len(t, networkMap.Peers, len(testCase.expectedPeers)) assert.Len(t, networkMap.OfflinePeers, len(testCase.expectedOfflinePeers)) } @@ -1676,7 +1677,7 @@ func TestAccount_GetRoutesToSync(t *testing.T) { }, } - routes := account.GetRoutesToSync(context.Background(), "peer-2", []*nbpeer.Peer{{Key: "peer-1"}, {Key: "peer-3"}}) + routes := account.GetRoutesToSync(context.Background(), "peer-2", []*nbpeer.Peer{{Key: "peer-1"}, {Key: "peer-3"}}, account.GetPeerGroups("peer-2")) assert.Len(t, routes, 2) routeIDs := make(map[route.ID]struct{}, 2) @@ -1686,7 +1687,7 @@ func TestAccount_GetRoutesToSync(t *testing.T) { assert.Contains(t, routeIDs, route.ID("route-2")) assert.Contains(t, routeIDs, route.ID("route-3")) - emptyRoutes := account.GetRoutesToSync(context.Background(), "peer-3", []*nbpeer.Peer{{Key: "peer-1"}, {Key: "peer-2"}}) + emptyRoutes := account.GetRoutesToSync(context.Background(), "peer-3", []*nbpeer.Peer{{Key: "peer-1"}, {Key: "peer-2"}}, account.GetPeerGroups("peer-3")) assert.Len(t, emptyRoutes, 0) } @@ -2095,6 +2096,35 @@ func TestDefaultAccountManager_UpdateAccountSettings_PeerApproval(t *testing.T) } } +func TestDefaultAccountManager_UpdateAccountSettings_DNSDomainConflict(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err, "unable to create account manager") + + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err, "unable to create an account") + + ctx := context.Background() + err = manager.Store.CreateZone(ctx, &zones.Zone{ + ID: "test-zone-id", + AccountID: accountID, + Name: "Test Zone", + Domain: "custom.example.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{}, + }) + require.NoError(t, err, "unable to create custom DNS zone") + + _, err = manager.UpdateAccountSettings(ctx, accountID, userID, &types.Settings{ + DNSDomain: "custom.example.com", + PeerLoginExpiration: time.Hour, + PeerLoginExpirationEnabled: false, + Extra: &types.ExtraSettings{}, + }) + require.Error(t, err, "expecting to fail when DNS domain conflicts with custom zone") + assert.Contains(t, err.Error(), "conflicts with existing custom DNS zone") +} + func TestAccount_GetExpiredPeers(t *testing.T) { type test struct { name string diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index 7b939ddff..7593e1230 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -187,6 +187,14 @@ const ( IdentityProviderUpdated Activity = 94 IdentityProviderDeleted Activity = 95 + DNSZoneCreated Activity = 96 + DNSZoneUpdated Activity = 97 + DNSZoneDeleted Activity = 98 + + DNSRecordCreated Activity = 99 + DNSRecordUpdated Activity = 100 + DNSRecordDeleted Activity = 101 + AccountDeleted Activity = 99999 ) @@ -303,6 +311,14 @@ var activityMap = map[Activity]Code{ IdentityProviderCreated: {"Identity provider created", "identityprovider.create"}, IdentityProviderUpdated: {"Identity provider updated", "identityprovider.update"}, IdentityProviderDeleted: {"Identity provider deleted", "identityprovider.delete"}, + + DNSZoneCreated: {"DNS zone created", "dns.zone.create"}, + DNSZoneUpdated: {"DNS zone updated", "dns.zone.update"}, + DNSZoneDeleted: {"DNS zone deleted", "dns.zone.delete"}, + + DNSRecordCreated: {"DNS zone record created", "dns.zone.record.create"}, + DNSRecordUpdated: {"DNS zone record updated", "dns.zone.record.update"}, + DNSRecordDeleted: {"DNS zone record deleted", "dns.zone.record.delete"}, } // StringCode returns a string code of the activity diff --git a/management/server/http/handler.go b/management/server/http/handler.go index bbd6b4750..64f914afe 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -15,7 +15,10 @@ import ( "github.com/netbirdio/management-integrations/integrations" "github.com/netbirdio/netbird/management/internals/controllers/network_map" - + "github.com/netbirdio/netbird/management/internals/modules/zones" + zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" + recordsManager "github.com/netbirdio/netbird/management/internals/modules/zones/records/manager" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/settings" @@ -56,7 +59,7 @@ const ( ) // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. -func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager) (http.Handler, error) { +func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager) (http.Handler, error) { // Register bypass paths for unauthenticated endpoints if err := bypass.AddBypassPath("/api/instance"); err != nil { @@ -138,6 +141,8 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks dns.AddEndpoints(accountManager, router) events.AddEndpoints(accountManager, router) networks.AddEndpoints(networksManager, resourceManager, routerManager, groupsManager, accountManager, router) + zonesManager.RegisterEndpoints(router, zManager) + recordsManager.RegisterEndpoints(router, rManager) idp.AddEndpoints(accountManager, router) instance.AddEndpoints(instanceManager, router) diff --git a/management/server/http/handlers/peers/peers_handler.go b/management/server/http/handlers/peers/peers_handler.go index a5c9ab0ac..b8fb3ea36 100644 --- a/management/server/http/handlers/peers/peers_handler.go +++ b/management/server/http/handlers/peers/peers_handler.go @@ -10,6 +10,7 @@ import ( "github.com/gorilla/mux" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" @@ -298,8 +299,7 @@ func (h *Handler) GetAccessiblePeers(w http.ResponseWriter, r *http.Request) { dnsDomain := h.networkMapController.GetDNSDomain(account.Settings) - customZone := account.GetPeersCustomZone(r.Context(), dnsDomain) - netMap := account.GetPeerNetworkMap(r.Context(), peerID, customZone, validPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) + netMap := account.GetPeerNetworkMap(r.Context(), peerID, dns.CustomZone{}, nil, validPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) util.WriteJSONObject(r.Context(), w, toAccessiblePeers(netMap, dnsDomain)) } diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 656f72997..8c8f1a7b2 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -10,6 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/netbirdio/management-integrations/integrations" + zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" + recordsManager "github.com/netbirdio/netbird/management/internals/modules/zones/records/manager" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/internals/controllers/network_map" @@ -93,8 +95,10 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee routersManagerMock := routers.NewManagerMock() groupsManagerMock := groups.NewManagerMock() peersManager := peers.NewManager(store, permissionsManager) + customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") + zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, networkMapController, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index f407a35e6..7d71030eb 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -27,6 +27,8 @@ import ( "gorm.io/gorm/logger" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -123,6 +125,7 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met &types.Account{}, &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &installation{}, &types.ExtraSettings{}, &posture.Checks{}, &nbpeer.NetworkAddress{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, + &zones.Zone{}, &records.Record{}, ) if err != nil { return nil, fmt.Errorf("auto migratePreAuto: %w", err) @@ -4179,3 +4182,184 @@ func (s *SqlStore) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingS return userID, nil } + +func (s *SqlStore) CreateZone(ctx context.Context, zone *zones.Zone) error { + result := s.db.Create(zone) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to create zone to store: %v", result.Error) + return status.Errorf(status.Internal, "failed to create zone to store") + } + + return nil +} + +func (s *SqlStore) UpdateZone(ctx context.Context, zone *zones.Zone) error { + result := s.db.Select("*").Save(zone) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to update zone to store: %v", result.Error) + return status.Errorf(status.Internal, "failed to update zone to store") + } + + return nil +} + +func (s *SqlStore) DeleteZone(ctx context.Context, accountID, zoneID string) error { + result := s.db.Delete(&zones.Zone{}, accountAndIDQueryCondition, accountID, zoneID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete zone from store: %v", result.Error) + return status.Errorf(status.Internal, "failed to delete zone from store") + } + + if result.RowsAffected == 0 { + return status.NewZoneNotFoundError(zoneID) + } + + return nil +} + +func (s *SqlStore) GetZoneByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) (*zones.Zone, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var zone *zones.Zone + result := tx.Preload("Records").Take(&zone, accountAndIDQueryCondition, accountID, zoneID) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.NewZoneNotFoundError(zoneID) + } + + log.WithContext(ctx).Errorf("failed to get zone from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get zone from store") + } + + return zone, nil +} + +func (s *SqlStore) GetZoneByDomain(ctx context.Context, accountID, domain string) (*zones.Zone, error) { + var zone *zones.Zone + result := s.db.Where("account_id = ? AND domain = ?", accountID, domain).First(&zone) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.NewZoneNotFoundError(domain) + } + + log.WithContext(ctx).Errorf("failed to get zone by domain from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get zone by domain from store") + } + + return zone, nil +} + +func (s *SqlStore) GetAccountZones(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*zones.Zone, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var zones []*zones.Zone + result := tx.Preload("Records").Find(&zones, accountIDCondition, accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get zones from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get zones from store") + } + + return zones, nil +} + +func (s *SqlStore) CreateDNSRecord(ctx context.Context, record *records.Record) error { + result := s.db.Create(record) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to create dns record to store: %v", result.Error) + return status.Errorf(status.Internal, "failed to create dns record to store") + } + + return nil +} + +func (s *SqlStore) UpdateDNSRecord(ctx context.Context, record *records.Record) error { + result := s.db.Select("*").Save(record) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to update dns record to store: %v", result.Error) + return status.Errorf(status.Internal, "failed to update dns record to store") + } + + return nil +} + +func (s *SqlStore) DeleteDNSRecord(ctx context.Context, accountID, zoneID, recordID string) error { + result := s.db.Delete(&records.Record{}, "account_id = ? AND zone_id = ? AND id = ?", accountID, zoneID, recordID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete dns record from store: %v", result.Error) + return status.Errorf(status.Internal, "failed to delete dns record from store") + } + + if result.RowsAffected == 0 { + return status.NewDNSRecordNotFoundError(recordID) + } + + return nil +} + +func (s *SqlStore) GetDNSRecordByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, recordID string) (*records.Record, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var record *records.Record + result := tx.Where("account_id = ? AND zone_id = ? AND id = ?", accountID, zoneID, recordID).Take(&record) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.NewDNSRecordNotFoundError(recordID) + } + + log.WithContext(ctx).Errorf("failed to get dns record from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get dns record from store") + } + + return record, nil +} + +func (s *SqlStore) GetZoneDNSRecords(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) ([]*records.Record, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var recordsList []*records.Record + result := tx.Where("account_id = ? AND zone_id = ?", accountID, zoneID).Find(&recordsList) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get zone dns records from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get zone dns records from store") + } + + return recordsList, nil +} + +func (s *SqlStore) GetZoneDNSRecordsByName(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, name string) ([]*records.Record, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var recordsList []*records.Record + result := tx.Where("account_id = ? AND zone_id = ? AND name = ?", accountID, zoneID, name).Find(&recordsList) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get zone dns records by name from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get zone dns records by name from store") + } + + return recordsList, nil +} + +func (s *SqlStore) DeleteZoneDNSRecords(ctx context.Context, accountID, zoneID string) error { + result := s.db.Delete(&records.Record{}, "account_id = ? AND zone_id = ?", accountID, zoneID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete zone dns records from store: %v", result.Error) + return status.Errorf(status.Internal, "failed to delete zone dns records from store") + } + + return nil +} diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 952432252..7cf42c4e8 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/require" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -4025,3 +4027,476 @@ func TestSqlStore_ExecuteInTransaction_Timeout(t *testing.T) { require.Error(t, err) assert.Contains(t, err.Error(), "transaction has already been committed or rolled back", "expected transaction rolled back error, got: %v", err) } + +func TestSqlStore_CreateZone(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + savedZone, err := store.GetZoneByID(context.Background(), LockingStrengthNone, accountID, zone.ID) + require.NoError(t, err) + require.NotNil(t, savedZone) + assert.Equal(t, zone.ID, savedZone.ID) + assert.Equal(t, zone.Name, savedZone.Name) + assert.Equal(t, zone.Domain, savedZone.Domain) + assert.Equal(t, zone.Enabled, savedZone.Enabled) + assert.Equal(t, zone.EnableSearchDomain, savedZone.EnableSearchDomain) + assert.Equal(t, zone.DistributionGroups, savedZone.DistributionGroups) +} + +func TestSqlStore_GetZoneByID(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + tests := []struct { + name string + accountID string + zoneID string + expectError bool + }{ + { + name: "retrieve existing zone", + accountID: accountID, + zoneID: zone.ID, + expectError: false, + }, + { + name: "retrieve non-existing zone", + accountID: accountID, + zoneID: "non-existing", + expectError: true, + }, + { + name: "retrieve with empty zone ID", + accountID: accountID, + zoneID: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + savedZone, err := store.GetZoneByID(context.Background(), LockingStrengthNone, tt.accountID, tt.zoneID) + if tt.expectError { + require.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, sErr.Type(), status.NotFound) + require.Nil(t, savedZone) + } else { + require.NoError(t, err) + require.NotNil(t, savedZone) + assert.Equal(t, tt.zoneID, savedZone.ID) + } + }) + } +} + +func TestSqlStore_GetAccountZones(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone1 := zones.NewZone(accountID, "Zone 1", "example1.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone1) + require.NoError(t, err) + + zone2 := zones.NewZone(accountID, "Zone 2", "example2.com", true, true, []string{"group1", "group2"}) + err = store.CreateZone(context.Background(), zone2) + require.NoError(t, err) + + allZones, err := store.GetAccountZones(context.Background(), LockingStrengthNone, accountID) + require.NoError(t, err) + require.NotNil(t, allZones) + assert.GreaterOrEqual(t, len(allZones), 2) + + zoneIDs := make(map[string]bool) + for _, z := range allZones { + zoneIDs[z.ID] = true + } + assert.True(t, zoneIDs[zone1.ID]) + assert.True(t, zoneIDs[zone2.ID]) +} + +func TestSqlStore_GetZoneByDomain(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + otherAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3c" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + tests := []struct { + name string + accountID string + domain string + expectError bool + errorType status.Type + }{ + { + name: "retrieve existing zone by domain", + accountID: accountID, + domain: "example.com", + expectError: false, + }, + { + name: "retrieve non-existing zone domain", + accountID: accountID, + domain: "non-existing.com", + expectError: true, + errorType: status.NotFound, + }, + { + name: "retrieve with empty domain", + accountID: accountID, + domain: "", + expectError: true, + errorType: status.NotFound, + }, + { + name: "retrieve with different account ID", + accountID: otherAccountID, + domain: "example.com", + expectError: true, + errorType: status.NotFound, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + savedZone, err := store.GetZoneByDomain(context.Background(), tt.accountID, tt.domain) + if tt.expectError { + require.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, tt.errorType, sErr.Type()) + require.Nil(t, savedZone) + } else { + require.NoError(t, err) + require.NotNil(t, savedZone) + assert.Equal(t, tt.domain, savedZone.Domain) + assert.Equal(t, zone.ID, savedZone.ID) + assert.Equal(t, zone.Name, savedZone.Name) + } + }) + } +} + +func TestSqlStore_UpdateZone(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + zone.Name = "Updated Zone" + zone.Domain = "updated.com" + zone.Enabled = false + zone.EnableSearchDomain = true + zone.DistributionGroups = []string{"group2", "group3"} + + err = store.UpdateZone(context.Background(), zone) + require.NoError(t, err) + + updatedZone, err := store.GetZoneByID(context.Background(), LockingStrengthNone, accountID, zone.ID) + require.NoError(t, err) + require.NotNil(t, updatedZone) + assert.Equal(t, "Updated Zone", updatedZone.Name) + assert.Equal(t, "updated.com", updatedZone.Domain) + assert.False(t, updatedZone.Enabled) + assert.True(t, updatedZone.EnableSearchDomain) + assert.Equal(t, []string{"group2", "group3"}, updatedZone.DistributionGroups) +} + +func TestSqlStore_DeleteZone(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + err = store.DeleteZone(context.Background(), accountID, zone.ID) + require.NoError(t, err) + + deletedZone, err := store.GetZoneByID(context.Background(), LockingStrengthNone, accountID, zone.ID) + require.Error(t, err) + require.Nil(t, deletedZone) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, sErr.Type(), status.NotFound) +} + +func TestSqlStore_CreateDNSRecord(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + record := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.1", 300) + + err = store.CreateDNSRecord(context.Background(), record) + require.NoError(t, err) + + savedRecord, err := store.GetDNSRecordByID(context.Background(), LockingStrengthNone, accountID, zone.ID, record.ID) + require.NoError(t, err) + require.NotNil(t, savedRecord) + assert.Equal(t, record.ID, savedRecord.ID) + assert.Equal(t, record.Name, savedRecord.Name) + assert.Equal(t, record.Type, savedRecord.Type) + assert.Equal(t, record.Content, savedRecord.Content) + assert.Equal(t, record.TTL, savedRecord.TTL) + assert.Equal(t, zone.ID, savedRecord.ZoneID) +} + +func TestSqlStore_GetDNSRecordByID(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + record := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.1", 300) + err = store.CreateDNSRecord(context.Background(), record) + require.NoError(t, err) + + tests := []struct { + name string + accountID string + zoneID string + recordID string + expectError bool + }{ + { + name: "retrieve existing record", + accountID: accountID, + zoneID: zone.ID, + recordID: record.ID, + expectError: false, + }, + { + name: "retrieve non-existing record", + accountID: accountID, + zoneID: zone.ID, + recordID: "non-existing", + expectError: true, + }, + { + name: "retrieve with empty record ID", + accountID: accountID, + zoneID: zone.ID, + recordID: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + savedRecord, err := store.GetDNSRecordByID(context.Background(), LockingStrengthNone, tt.accountID, tt.zoneID, tt.recordID) + if tt.expectError { + require.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, sErr.Type(), status.NotFound) + require.Nil(t, savedRecord) + } else { + require.NoError(t, err) + require.NotNil(t, savedRecord) + assert.Equal(t, tt.recordID, savedRecord.ID) + } + }) + } +} + +func TestSqlStore_GetZoneDNSRecords(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + recordA := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.1", 300) + err = store.CreateDNSRecord(context.Background(), recordA) + require.NoError(t, err) + + recordAAAA := records.NewRecord(accountID, zone.ID, "ipv6.example.com", records.RecordTypeAAAA, "2001:db8::1", 300) + err = store.CreateDNSRecord(context.Background(), recordAAAA) + require.NoError(t, err) + + recordCNAME := records.NewRecord(accountID, zone.ID, "alias.example.com", records.RecordTypeCNAME, "www.example.com", 300) + err = store.CreateDNSRecord(context.Background(), recordCNAME) + require.NoError(t, err) + + allRecords, err := store.GetZoneDNSRecords(context.Background(), LockingStrengthNone, accountID, zone.ID) + require.NoError(t, err) + require.NotNil(t, allRecords) + assert.Equal(t, 3, len(allRecords)) + + recordIDs := make(map[string]bool) + for _, r := range allRecords { + recordIDs[r.ID] = true + } + assert.True(t, recordIDs[recordA.ID]) + assert.True(t, recordIDs[recordAAAA.ID]) + assert.True(t, recordIDs[recordCNAME.ID]) +} + +func TestSqlStore_GetZoneDNSRecordsByName(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + record1 := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.1", 300) + err = store.CreateDNSRecord(context.Background(), record1) + require.NoError(t, err) + + record2 := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeAAAA, "2001:db8::1", 300) + err = store.CreateDNSRecord(context.Background(), record2) + require.NoError(t, err) + + record3 := records.NewRecord(accountID, zone.ID, "mail.example.com", records.RecordTypeA, "192.168.1.2", 600) + err = store.CreateDNSRecord(context.Background(), record3) + require.NoError(t, err) + + recordsByName, err := store.GetZoneDNSRecordsByName(context.Background(), LockingStrengthNone, accountID, zone.ID, "www.example.com") + require.NoError(t, err) + require.NotNil(t, recordsByName) + assert.Equal(t, 2, len(recordsByName)) + + for _, r := range recordsByName { + assert.Equal(t, "www.example.com", r.Name) + } +} + +func TestSqlStore_UpdateDNSRecord(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + record := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.1", 300) + err = store.CreateDNSRecord(context.Background(), record) + require.NoError(t, err) + + record.Name = "api.example.com" + record.Content = "192.168.1.100" + record.TTL = 600 + + err = store.UpdateDNSRecord(context.Background(), record) + require.NoError(t, err) + + updatedRecord, err := store.GetDNSRecordByID(context.Background(), LockingStrengthNone, accountID, zone.ID, record.ID) + require.NoError(t, err) + require.NotNil(t, updatedRecord) + assert.Equal(t, "api.example.com", updatedRecord.Name) + assert.Equal(t, "192.168.1.100", updatedRecord.Content) + assert.Equal(t, 600, updatedRecord.TTL) +} + +func TestSqlStore_DeleteDNSRecord(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + record := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.1", 300) + err = store.CreateDNSRecord(context.Background(), record) + require.NoError(t, err) + + err = store.DeleteDNSRecord(context.Background(), accountID, zone.ID, record.ID) + require.NoError(t, err) + + deletedRecord, err := store.GetDNSRecordByID(context.Background(), LockingStrengthNone, accountID, zone.ID, record.ID) + require.Error(t, err) + require.Nil(t, deletedRecord) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, sErr.Type(), status.NotFound) +} + +func TestSqlStore_DeleteZoneDNSRecords(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + zone := zones.NewZone(accountID, "Test Zone", "example.com", true, false, []string{"group1"}) + err = store.CreateZone(context.Background(), zone) + require.NoError(t, err) + + record1 := records.NewRecord(accountID, zone.ID, "www.example.com", records.RecordTypeA, "192.168.1.1", 300) + err = store.CreateDNSRecord(context.Background(), record1) + require.NoError(t, err) + + record2 := records.NewRecord(accountID, zone.ID, "mail.example.com", records.RecordTypeA, "192.168.1.2", 600) + err = store.CreateDNSRecord(context.Background(), record2) + require.NoError(t, err) + + allRecords, err := store.GetZoneDNSRecords(context.Background(), LockingStrengthNone, accountID, zone.ID) + require.NoError(t, err) + assert.Equal(t, 2, len(allRecords)) + + err = store.DeleteZoneDNSRecords(context.Background(), accountID, zone.ID) + require.NoError(t, err) + + remainingRecords, err := store.GetZoneDNSRecords(context.Background(), LockingStrengthNone, accountID, zone.ID) + require.NoError(t, err) + assert.Equal(t, 0, len(remainingRecords)) +} diff --git a/management/server/store/store.go b/management/server/store/store.go index 55d11c36a..3838b235e 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -23,6 +23,8 @@ import ( "gorm.io/gorm" "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" "github.com/netbirdio/netbird/management/server/telemetry" "github.com/netbirdio/netbird/management/server/testutil" "github.com/netbirdio/netbird/management/server/types" @@ -209,6 +211,21 @@ type Store interface { // SetFieldEncrypt sets the field encryptor for encrypting sensitive user data. SetFieldEncrypt(enc *crypt.FieldEncrypt) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, error) + + CreateZone(ctx context.Context, zone *zones.Zone) error + UpdateZone(ctx context.Context, zone *zones.Zone) error + DeleteZone(ctx context.Context, accountID, zoneID string) error + GetZoneByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) (*zones.Zone, error) + GetZoneByDomain(ctx context.Context, accountID, domain string) (*zones.Zone, error) + GetAccountZones(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*zones.Zone, error) + + CreateDNSRecord(ctx context.Context, record *records.Record) error + UpdateDNSRecord(ctx context.Context, record *records.Record) error + DeleteDNSRecord(ctx context.Context, accountID, zoneID, recordID string) error + GetDNSRecordByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, recordID string) (*records.Record, error) + GetZoneDNSRecords(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) ([]*records.Record, error) + GetZoneDNSRecordsByName(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, name string) ([]*records.Record, error) + DeleteZoneDNSRecords(ctx context.Context, accountID, zoneID string) error } const ( diff --git a/management/server/types/account.go b/management/server/types/account.go index 06170a132..a2b5140d4 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -18,6 +18,8 @@ import ( "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -150,17 +152,16 @@ func (o AccountOnboarding) IsEqual(onboarding AccountOnboarding) bool { // GetRoutesToSync returns the enabled routes for the peer ID and the routes // from the ACL peers that have distribution groups associated with the peer ID. // Please mind, that the returned route.Route objects will contain Peer.Key instead of Peer.ID. -func (a *Account) GetRoutesToSync(ctx context.Context, peerID string, aclPeers []*nbpeer.Peer) []*route.Route { +func (a *Account) GetRoutesToSync(ctx context.Context, peerID string, aclPeers []*nbpeer.Peer, peerGroups LookupMap) []*route.Route { routes, peerDisabledRoutes := a.getRoutingPeerRoutes(ctx, peerID) peerRoutesMembership := make(LookupMap) for _, r := range append(routes, peerDisabledRoutes...) { peerRoutesMembership[string(r.GetHAUniqueID())] = struct{}{} } - groupListMap := a.GetPeerGroups(peerID) for _, peer := range aclPeers { activeRoutes, _ := a.getRoutingPeerRoutes(ctx, peer.ID) - groupFilteredRoutes := a.filterRoutesByGroups(activeRoutes, groupListMap) + groupFilteredRoutes := a.filterRoutesByGroups(activeRoutes, peerGroups) filteredRoutes := a.filterRoutesFromPeersOfSameHAGroup(groupFilteredRoutes, peerRoutesMembership) routes = append(routes, filteredRoutes...) } @@ -274,6 +275,7 @@ func (a *Account) GetPeerNetworkMap( ctx context.Context, peerID string, peersCustomZone nbdns.CustomZone, + accountZones []*zones.Zone, validatedPeersMap map[string]struct{}, resourcePolicies map[string][]*Policy, routers map[string]map[string]*routerTypes.NetworkRouter, @@ -294,6 +296,8 @@ func (a *Account) GetPeerNetworkMap( } } + peerGroups := a.GetPeerGroups(peerID) + aclPeers, firewallRules, authorizedUsers, enableSSH := a.GetPeerConnectionResources(ctx, peer, validatedPeersMap, groupIDToUserIDs) // exclude expired peers var peersToConnect []*nbpeer.Peer @@ -307,7 +311,7 @@ func (a *Account) GetPeerNetworkMap( peersToConnect = append(peersToConnect, p) } - routesUpdate := a.GetRoutesToSync(ctx, peerID, peersToConnect) + routesUpdate := a.GetRoutesToSync(ctx, peerID, peersToConnect, peerGroups) routesFirewallRules := a.GetPeerRoutesFirewallRules(ctx, peerID, validatedPeersMap) isRouter, networkResourcesRoutes, sourcePeers := a.GetNetworkResourcesRoutesToSync(ctx, peerID, resourcePolicies, routers) var networkResourcesFirewallRules []*RouteFirewallRule @@ -323,6 +327,7 @@ func (a *Account) GetPeerNetworkMap( if dnsManagementStatus { var zones []nbdns.CustomZone + if peersCustomZone.Domain != "" { records := filterZoneRecordsForPeers(peer, peersCustomZone, peersToConnectIncludingRouters, expiredPeers) zones = append(zones, nbdns.CustomZone{ @@ -330,6 +335,10 @@ func (a *Account) GetPeerNetworkMap( Records: records, }) } + + filteredAccountZones := filterPeerAppliedZones(ctx, accountZones, peerGroups) + zones = append(zones, filteredAccountZones...) + dnsUpdate.CustomZones = zones dnsUpdate.NameServerGroups = getPeerNSGroups(a, peerID) } @@ -1881,3 +1890,66 @@ func filterZoneRecordsForPeers(peer *nbpeer.Peer, customZone nbdns.CustomZone, p return filteredRecords } + +// filterPeerAppliedZones filters account zones based on the peer's group membership +func filterPeerAppliedZones(ctx context.Context, accountZones []*zones.Zone, peerGroups LookupMap) []nbdns.CustomZone { + var customZones []nbdns.CustomZone + + if len(peerGroups) == 0 { + return customZones + } + + for _, zone := range accountZones { + if !zone.Enabled || len(zone.Records) == 0 { + continue + } + + hasAccess := false + for _, distGroupID := range zone.DistributionGroups { + if _, found := peerGroups[distGroupID]; found { + hasAccess = true + break + } + } + + if !hasAccess { + continue + } + + simpleRecords := make([]nbdns.SimpleRecord, 0, len(zone.Records)) + for _, record := range zone.Records { + var recordType int + rData := record.Content + + switch record.Type { + case records.RecordTypeA: + recordType = int(dns.TypeA) + case records.RecordTypeAAAA: + recordType = int(dns.TypeAAAA) + case records.RecordTypeCNAME: + recordType = int(dns.TypeCNAME) + rData = dns.Fqdn(record.Content) + default: + log.WithContext(ctx).Warnf("unknown DNS record type %s for record %s", record.Type, record.ID) + continue + } + + simpleRecords = append(simpleRecords, nbdns.SimpleRecord{ + Name: dns.Fqdn(record.Name), + Type: recordType, + Class: nbdns.DefaultClass, + TTL: record.TTL, + RData: rData, + }) + } + + customZones = append(customZones, nbdns.CustomZone{ + Domain: dns.Fqdn(zone.Domain), + Records: simpleRecords, + SearchDomainDisabled: !zone.EnableSearchDomain, + NonAuthoritative: true, + }) + } + + return customZones +} diff --git a/management/server/types/account_test.go b/management/server/types/account_test.go index 2c9f2428d..af2896216 100644 --- a/management/server/types/account_test.go +++ b/management/server/types/account_test.go @@ -13,6 +13,8 @@ import ( "github.com/stretchr/testify/require" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" + "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -1425,3 +1427,515 @@ func Test_FilterZoneRecordsForPeers(t *testing.T) { }) } } + +func Test_filterPeerAppliedZones(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + accountZones []*zones.Zone + peerGroups LookupMap + expected []nbdns.CustomZone + }{ + { + name: "empty peer groups returns empty custom zones", + accountZones: []*zones.Zone{}, + peerGroups: LookupMap{}, + expected: []nbdns.CustomZone{}, + }, + { + name: "peer has access to zone with A record", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "example.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.example.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.example.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.1", + }, + }, + SearchDomainDisabled: true, + }, + }, + }, + { + name: "peer has access to zone with search domain enabled", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "internal.local", + Enabled: true, + EnableSearchDomain: true, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "api.internal.local", + Type: records.RecordTypeA, + Content: "10.0.0.1", + TTL: 600, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "internal.local.", + Records: []nbdns.SimpleRecord{ + { + Name: "api.internal.local.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 600, + RData: "10.0.0.1", + }, + }, + SearchDomainDisabled: false, + }, + }, + }, + { + name: "peer has no access to zone", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "private.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group2"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "secret.private.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{}, + }, + { + name: "disabled zone is filtered out", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "disabled.com", + Enabled: false, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.disabled.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{}, + }, + { + name: "zone with no records is filtered out", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "empty.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{}, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{}, + }, + { + name: "peer has access via multiple groups", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "multi.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1", "group2", "group3"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.multi.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + }, + }, + }, + peerGroups: LookupMap{"group2": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "multi.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.multi.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.1", + }, + }, + SearchDomainDisabled: true, + }, + }, + }, + { + name: "multiple zones with mixed access", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "allowed.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.allowed.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + }, + }, + { + ID: "zone2", + Domain: "denied.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group2"}, + Records: []*records.Record{ + { + ID: "record2", + Name: "www.denied.com", + Type: records.RecordTypeA, + Content: "192.168.1.2", + TTL: 300, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "allowed.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.allowed.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.1", + }, + }, + SearchDomainDisabled: true, + }, + }, + }, + { + name: "zone with multiple record types", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "mixed.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.mixed.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + { + ID: "record2", + Name: "ipv6.mixed.com", + Type: records.RecordTypeAAAA, + Content: "2001:db8::1", + TTL: 600, + }, + { + ID: "record3", + Name: "alias.mixed.com", + Type: records.RecordTypeCNAME, + Content: "www.mixed.com", + TTL: 900, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "mixed.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.mixed.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.1", + }, + { + Name: "ipv6.mixed.com.", + Type: int(dns.TypeAAAA), + Class: nbdns.DefaultClass, + TTL: 600, + RData: "2001:db8::1", + }, + { + Name: "alias.mixed.com.", + Type: int(dns.TypeCNAME), + Class: nbdns.DefaultClass, + TTL: 900, + RData: "www.mixed.com.", + }, + }, + SearchDomainDisabled: true, + }, + }, + }, + { + name: "multiple zones both accessible", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "first.com", + Enabled: true, + EnableSearchDomain: true, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.first.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + }, + }, + { + ID: "zone2", + Domain: "second.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record2", + Name: "www.second.com", + Type: records.RecordTypeA, + Content: "192.168.1.2", + TTL: 600, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "first.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.first.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.1", + }, + }, + SearchDomainDisabled: false, + }, + { + Domain: "second.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.second.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 600, + RData: "192.168.1.2", + }, + }, + SearchDomainDisabled: true, + }, + }, + }, + { + name: "zone with multiple records of same type", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "multi-a.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.multi-a.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + { + ID: "record2", + Name: "www.multi-a.com", + Type: records.RecordTypeA, + Content: "192.168.1.2", + TTL: 300, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "multi-a.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.multi-a.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.1", + }, + { + Name: "www.multi-a.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.2", + }, + }, + SearchDomainDisabled: true, + }, + }, + }, + { + name: "peer in multiple groups accessing different zones", + accountZones: []*zones.Zone{ + { + ID: "zone1", + Domain: "zone1.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + Records: []*records.Record{ + { + ID: "record1", + Name: "www.zone1.com", + Type: records.RecordTypeA, + Content: "192.168.1.1", + TTL: 300, + }, + }, + }, + { + ID: "zone2", + Domain: "zone2.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group2"}, + Records: []*records.Record{ + { + ID: "record2", + Name: "www.zone2.com", + Type: records.RecordTypeA, + Content: "192.168.1.2", + TTL: 300, + }, + }, + }, + }, + peerGroups: LookupMap{"group1": struct{}{}, "group2": struct{}{}}, + expected: []nbdns.CustomZone{ + { + Domain: "zone1.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.zone1.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.1", + }, + }, + SearchDomainDisabled: true, + }, + { + Domain: "zone2.com.", + Records: []nbdns.SimpleRecord{ + { + Name: "www.zone2.com.", + Type: int(dns.TypeA), + Class: nbdns.DefaultClass, + TTL: 300, + RData: "192.168.1.2", + }, + }, + SearchDomainDisabled: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := filterPeerAppliedZones(ctx, tt.accountZones, tt.peerGroups) + require.Equal(t, len(tt.expected), len(result), "number of custom zones should match") + + for i, expectedZone := range tt.expected { + assert.Equal(t, expectedZone.Domain, result[i].Domain, "domain should match") + assert.Equal(t, expectedZone.SearchDomainDisabled, result[i].SearchDomainDisabled, "search domain disabled flag should match") + assert.Equal(t, len(expectedZone.Records), len(result[i].Records), "number of records should match") + + for j, expectedRecord := range expectedZone.Records { + assert.Equal(t, expectedRecord.Name, result[i].Records[j].Name, "record name should match") + assert.Equal(t, expectedRecord.Type, result[i].Records[j].Type, "record type should match") + assert.Equal(t, expectedRecord.Class, result[i].Records[j].Class, "record class should match") + assert.Equal(t, expectedRecord.TTL, result[i].Records[j].TTL, "record TTL should match") + assert.Equal(t, expectedRecord.RData, result[i].Records[j].RData, "record RData should match") + } + } + }) + } +} diff --git a/management/server/types/networkmap.go b/management/server/types/networkmap.go index ff81e5dc1..68c988a93 100644 --- a/management/server/types/networkmap.go +++ b/management/server/types/networkmap.go @@ -4,6 +4,7 @@ import ( "context" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/telemetry" ) @@ -25,11 +26,12 @@ func (a *Account) GetPeerNetworkMapExp( ctx context.Context, peerID string, peersCustomZone nbdns.CustomZone, + accountZones []*zones.Zone, validatedPeers map[string]struct{}, metrics *telemetry.AccountManagerMetrics, ) *NetworkMap { a.initNetworkMapBuilder(validatedPeers) - return a.NetworkMapCache.GetPeerNetworkMap(ctx, peerID, peersCustomZone, validatedPeers, metrics) + return a.NetworkMapCache.GetPeerNetworkMap(ctx, peerID, peersCustomZone, accountZones, validatedPeers, metrics) } func (a *Account) OnPeerAddedUpdNetworkMapCache(peerId string) error { diff --git a/management/server/types/networkmap_golden_test.go b/management/server/types/networkmap_golden_test.go index 19ccbd688..ef6c51779 100644 --- a/management/server/types/networkmap_golden_test.go +++ b/management/server/types/networkmap_golden_test.go @@ -70,13 +70,13 @@ func TestGetPeerNetworkMap_Golden(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) normalizeAndSortNetworkMap(newNetworkMap) newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") require.NoError(t, err, "error marshaling new network map to JSON") @@ -115,7 +115,7 @@ func BenchmarkGetPeerNetworkMap(b *testing.B) { b.Run("old builder", func(b *testing.B) { for range b.N { for _, peerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) @@ -124,7 +124,7 @@ func BenchmarkGetPeerNetworkMap(b *testing.B) { for range b.N { builder := types.NewNetworkMapBuilder(account, validatedPeersMap) for _, peerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, validatedPeersMap, nil) + _ = builder.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, nil, validatedPeersMap, nil) } } }) @@ -177,7 +177,7 @@ func TestGetPeerNetworkMap_Golden_WithNewPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -185,7 +185,7 @@ func TestGetPeerNetworkMap_Golden_WithNewPeer(t *testing.T) { err = builder.OnPeerAddedIncremental(account, newPeerID) require.NoError(t, err, "error adding peer to cache") - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) normalizeAndSortNetworkMap(newNetworkMap) newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") require.NoError(t, err, "error marshaling new network map to JSON") @@ -240,7 +240,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerAdded(b *testing.B) { b.Run("old builder after add", func(b *testing.B) { for i := 0; i < b.N; i++ { for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) @@ -250,7 +250,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerAdded(b *testing.B) { for i := 0; i < b.N; i++ { _ = builder.OnPeerAddedIncremental(account, newPeerID) for _, testingPeerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) } } }) @@ -317,7 +317,7 @@ func TestGetPeerNetworkMap_Golden_WithNewRoutingPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -325,7 +325,7 @@ func TestGetPeerNetworkMap_Golden_WithNewRoutingPeer(t *testing.T) { err = builder.OnPeerAddedIncremental(account, newRouterID) require.NoError(t, err, "error adding router to cache") - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) normalizeAndSortNetworkMap(newNetworkMap) newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") require.NoError(t, err, "error marshaling new network map to JSON") @@ -402,7 +402,7 @@ func BenchmarkGetPeerNetworkMap_AfterRouterPeerAdded(b *testing.B) { b.Run("old builder after add", func(b *testing.B) { for i := 0; i < b.N; i++ { for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) @@ -412,7 +412,7 @@ func BenchmarkGetPeerNetworkMap_AfterRouterPeerAdded(b *testing.B) { for i := 0; i < b.N; i++ { _ = builder.OnPeerAddedIncremental(account, newRouterID) for _, testingPeerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) } } }) @@ -458,7 +458,7 @@ func TestGetPeerNetworkMap_Golden_WithDeletedPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -466,7 +466,7 @@ func TestGetPeerNetworkMap_Golden_WithDeletedPeer(t *testing.T) { err = builder.OnPeerDeleted(account, deletedPeerID) require.NoError(t, err, "error deleting peer from cache") - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) normalizeAndSortNetworkMap(newNetworkMap) newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") require.NoError(t, err, "error marshaling new network map to JSON") @@ -537,7 +537,7 @@ func TestGetPeerNetworkMap_Golden_WithDeletedRouterPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -545,7 +545,7 @@ func TestGetPeerNetworkMap_Golden_WithDeletedRouterPeer(t *testing.T) { err = builder.OnPeerDeleted(account, deletedRouterID) require.NoError(t, err, "error deleting routing peer from cache") - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) normalizeAndSortNetworkMap(newNetworkMap) newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") require.NoError(t, err, "error marshaling new network map to JSON") @@ -597,7 +597,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerDeleted(b *testing.B) { b.Run("old builder after delete", func(b *testing.B) { for i := 0; i < b.N; i++ { for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) @@ -607,7 +607,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerDeleted(b *testing.B) { for i := 0; i < b.N; i++ { _ = builder.OnPeerDeleted(account, deletedPeerID) for _, testingPeerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) } } }) @@ -944,7 +944,7 @@ func TestGetPeerNetworkMap_Golden_New_WithOnPeerAddedRouter_Batched(t *testing.T time.Sleep(100 * time.Millisecond) - networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, validatedPeersMap, nil) + networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) normalizeAndSortNetworkMap(networkMap) diff --git a/management/server/types/networkmapbuilder.go b/management/server/types/networkmapbuilder.go index 0acd3a026..6448b8403 100644 --- a/management/server/types/networkmapbuilder.go +++ b/management/server/types/networkmapbuilder.go @@ -14,6 +14,7 @@ import ( "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" nbpeer "github.com/netbirdio/netbird/management/server/peer" @@ -1033,7 +1034,7 @@ func (b *NetworkMapBuilder) updateAccountLocked(account *Account) *Account { } func (b *NetworkMapBuilder) GetPeerNetworkMap( - ctx context.Context, peerID string, peersCustomZone nbdns.CustomZone, + ctx context.Context, peerID string, peersCustomZone nbdns.CustomZone, accountZones []*zones.Zone, validatedPeers map[string]struct{}, metrics *telemetry.AccountManagerMetrics, ) *NetworkMap { start := time.Now() @@ -1057,7 +1058,7 @@ func (b *NetworkMapBuilder) GetPeerNetworkMap( return &NetworkMap{Network: account.Network.Copy()} } - nm := b.assembleNetworkMap(account, peer, aclView, routesView, dnsConfig, sshView, peersCustomZone, validatedPeers) + nm := b.assembleNetworkMap(ctx, account, peer, aclView, routesView, dnsConfig, sshView, peersCustomZone, accountZones, validatedPeers) if metrics != nil { objectCount := int64(len(nm.Peers) + len(nm.OfflinePeers) + len(nm.Routes) + len(nm.FirewallRules) + len(nm.RoutesFirewallRules)) @@ -1074,8 +1075,8 @@ func (b *NetworkMapBuilder) GetPeerNetworkMap( } func (b *NetworkMapBuilder) assembleNetworkMap( - account *Account, peer *nbpeer.Peer, aclView *PeerACLView, routesView *PeerRoutesView, - dnsConfig *nbdns.Config, sshView *PeerSSHView, customZone nbdns.CustomZone, validatedPeers map[string]struct{}, + ctx context.Context, account *Account, peer *nbpeer.Peer, aclView *PeerACLView, routesView *PeerRoutesView, + dnsConfig *nbdns.Config, sshView *PeerSSHView, peersCustomZone nbdns.CustomZone, accountZones []*zones.Zone, validatedPeers map[string]struct{}, ) *NetworkMap { var peersToConnect []*nbpeer.Peer @@ -1125,13 +1126,26 @@ func (b *NetworkMapBuilder) assembleNetworkMap( } finalDNSConfig := *dnsConfig - if finalDNSConfig.ServiceEnable && customZone.Domain != "" { + if finalDNSConfig.ServiceEnable { var zones []nbdns.CustomZone - records := filterZoneRecordsForPeers(peer, customZone, peersToConnect, expiredPeers) - zones = append(zones, nbdns.CustomZone{ - Domain: customZone.Domain, - Records: records, - }) + + peerGroupsSlice := b.cache.peerToGroups[peer.ID] + peerGroups := make(LookupMap, len(peerGroupsSlice)) + for _, groupID := range peerGroupsSlice { + peerGroups[groupID] = struct{}{} + } + + if peersCustomZone.Domain != "" { + records := filterZoneRecordsForPeers(peer, peersCustomZone, peersToConnect, expiredPeers) + zones = append(zones, nbdns.CustomZone{ + Domain: peersCustomZone.Domain, + Records: records, + }) + } + + filteredAccountZones := filterPeerAppliedZones(ctx, accountZones, peerGroups) + zones = append(zones, filteredAccountZones...) + finalDNSConfig.CustomZones = zones } diff --git a/management/server/util/util.go b/management/server/util/util.go index 617484274..eea6a72b0 100644 --- a/management/server/util/util.go +++ b/management/server/util/util.go @@ -1,5 +1,9 @@ package util +import "regexp" + +var domainRegex = regexp.MustCompile(`^(\*\.)?([a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}$`) + // Difference returns the elements in `a` that aren't in `b`. func Difference(a, b []string) []string { mb := make(map[string]struct{}, len(b)) @@ -50,3 +54,10 @@ func contains[T comparableObject[T]](slice []T, element T) bool { } return false } + +func IsValidDomain(domain string) bool { + if domain == "" { + return false + } + return domainRegex.MatchString(domain) +} diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index 77c960435..ad8328093 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -59,9 +59,13 @@ type Client struct { Routes *RoutesAPI // DNS NetBird DNS APIs - // see more: https://docs.netbird.io/api/resources/routes + // see more: https://docs.netbird.io/api/resources/dns DNS *DNSAPI + // DNSZones NetBird DNS Zones APIs + // see more: https://docs.netbird.io/api/resources/dns-zones + DNSZones *DNSZonesAPI + // GeoLocation NetBird Geo Location APIs // see more: https://docs.netbird.io/api/resources/geo-locations GeoLocation *GeoLocationAPI @@ -113,6 +117,7 @@ func (c *Client) initialize() { c.Networks = &NetworksAPI{c} c.Routes = &RoutesAPI{c} c.DNS = &DNSAPI{c} + c.DNSZones = &DNSZonesAPI{c} c.GeoLocation = &GeoLocationAPI{c} c.Events = &EventsAPI{c} } diff --git a/shared/management/client/rest/dns_zones.go b/shared/management/client/rest/dns_zones.go new file mode 100644 index 000000000..6ee7d336e --- /dev/null +++ b/shared/management/client/rest/dns_zones.go @@ -0,0 +1,170 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// DNSZonesAPI APIs for DNS Zones Management, do not use directly +type DNSZonesAPI struct { + c *Client +} + +// ListZones list all DNS zones +// See more: https://docs.netbird.io/api/resources/dns-zones#list-all-dns-zones +func (a *DNSZonesAPI) ListZones(ctx context.Context) ([]api.Zone, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/dns/zones", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.Zone](resp) + return ret, err +} + +// GetZone get DNS zone info +// See more: https://docs.netbird.io/api/resources/dns-zones#retrieve-a-dns-zone +func (a *DNSZonesAPI) GetZone(ctx context.Context, zoneID string) (*api.Zone, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/dns/zones/"+zoneID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.Zone](resp) + return &ret, err +} + +// CreateZone create new DNS zone +// See more: https://docs.netbird.io/api/resources/dns-zones#create-a-dns-zone +func (a *DNSZonesAPI) CreateZone(ctx context.Context, request api.PostApiDnsZonesJSONRequestBody) (*api.Zone, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/dns/zones", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.Zone](resp) + return &ret, err +} + +// UpdateZone update DNS zone info +// See more: https://docs.netbird.io/api/resources/dns-zones#update-a-dns-zone +func (a *DNSZonesAPI) UpdateZone(ctx context.Context, zoneID string, request api.PutApiDnsZonesZoneIdJSONRequestBody) (*api.Zone, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/dns/zones/"+zoneID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.Zone](resp) + return &ret, err +} + +// DeleteZone delete DNS zone +// See more: https://docs.netbird.io/api/resources/dns-zones#delete-a-dns-zone +func (a *DNSZonesAPI) DeleteZone(ctx context.Context, zoneID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/dns/zones/"+zoneID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} + +// ListRecords list all DNS records in a zone +// See more: https://docs.netbird.io/api/resources/dns-zones#list-all-dns-records +func (a *DNSZonesAPI) ListRecords(ctx context.Context, zoneID string) ([]api.DNSRecord, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/dns/zones/"+zoneID+"/records", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.DNSRecord](resp) + return ret, err +} + +// GetRecord get DNS record info +// See more: https://docs.netbird.io/api/resources/dns-zones#retrieve-a-dns-record +func (a *DNSZonesAPI) GetRecord(ctx context.Context, zoneID, recordID string) (*api.DNSRecord, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/dns/zones/"+zoneID+"/records/"+recordID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.DNSRecord](resp) + return &ret, err +} + +// CreateRecord create new DNS record in a zone +// See more: https://docs.netbird.io/api/resources/dns-zones#create-a-dns-record +func (a *DNSZonesAPI) CreateRecord(ctx context.Context, zoneID string, request api.PostApiDnsZonesZoneIdRecordsJSONRequestBody) (*api.DNSRecord, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/dns/zones/"+zoneID+"/records", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.DNSRecord](resp) + return &ret, err +} + +// UpdateRecord update DNS record info +// See more: https://docs.netbird.io/api/resources/dns-zones#update-a-dns-record +func (a *DNSZonesAPI) UpdateRecord(ctx context.Context, zoneID, recordID string, request api.PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody) (*api.DNSRecord, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/dns/zones/"+zoneID+"/records/"+recordID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.DNSRecord](resp) + return &ret, err +} + +// DeleteRecord delete DNS record +// See more: https://docs.netbird.io/api/resources/dns-zones#delete-a-dns-record +func (a *DNSZonesAPI) DeleteRecord(ctx context.Context, zoneID, recordID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/dns/zones/"+zoneID+"/records/"+recordID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} diff --git a/shared/management/client/rest/dns_zones_test.go b/shared/management/client/rest/dns_zones_test.go new file mode 100644 index 000000000..c04a3ea57 --- /dev/null +++ b/shared/management/client/rest/dns_zones_test.go @@ -0,0 +1,460 @@ +//go:build integration +// +build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var ( + testZone = api.Zone{ + Id: "zone123", + Name: "test-zone", + Domain: "example.com", + Enabled: true, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + } + + testDNSRecord = api.DNSRecord{ + Id: "record123", + Name: "www", + Content: "192.168.1.1", + Type: api.DNSRecordTypeA, + Ttl: 300, + } +) + +func TestDNSZone_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.Zone{testZone}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.ListZones(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testZone, ret[0]) + }) +} + +func TestDNSZone_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.ListZones(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestDNSZone_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testZone) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.GetZone(context.Background(), "zone123") + require.NoError(t, err) + assert.Equal(t, testZone, *ret) + }) +} + +func TestDNSZone_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.GetZone(context.Background(), "zone123") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestDNSZone_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PostApiDnsZonesJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "test-zone", req.Name) + assert.Equal(t, "example.com", req.Domain) + retBytes, _ := json.Marshal(testZone) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + enabled := true + ret, err := c.DNSZones.CreateZone(context.Background(), api.PostApiDnsZonesJSONRequestBody{ + Name: "test-zone", + Domain: "example.com", + Enabled: &enabled, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + }) + require.NoError(t, err) + assert.Equal(t, testZone, *ret) + }) +} + +func TestDNSZone_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Invalid request", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.CreateZone(context.Background(), api.PostApiDnsZonesJSONRequestBody{ + Name: "test-zone", + Domain: "example.com", + }) + assert.Error(t, err) + assert.Equal(t, "Invalid request", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestDNSZone_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PutApiDnsZonesZoneIdJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "updated-zone", req.Name) + retBytes, _ := json.Marshal(testZone) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + enabled := true + ret, err := c.DNSZones.UpdateZone(context.Background(), "zone123", api.PutApiDnsZonesZoneIdJSONRequestBody{ + Name: "updated-zone", + Domain: "example.com", + Enabled: &enabled, + EnableSearchDomain: false, + DistributionGroups: []string{"group1"}, + }) + require.NoError(t, err) + assert.Equal(t, testZone, *ret) + }) +} + +func TestDNSZone_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Invalid request", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.UpdateZone(context.Background(), "zone123", api.PutApiDnsZonesZoneIdJSONRequestBody{ + Name: "updated-zone", + Domain: "example.com", + }) + assert.Error(t, err) + assert.Equal(t, "Invalid request", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestDNSZone_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.DNSZones.DeleteZone(context.Background(), "zone123") + require.NoError(t, err) + }) +} + +func TestDNSZone_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.DNSZones.DeleteZone(context.Background(), "zone123") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestDNSRecord_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.DNSRecord{testDNSRecord}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.ListRecords(context.Background(), "zone123") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testDNSRecord, ret[0]) + }) +} + +func TestDNSRecord_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Zone not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.ListRecords(context.Background(), "zone123") + assert.Error(t, err) + assert.Equal(t, "Zone not found", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestDNSRecord_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records/record123", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testDNSRecord) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.GetRecord(context.Background(), "zone123", "record123") + require.NoError(t, err) + assert.Equal(t, testDNSRecord, *ret) + }) +} + +func TestDNSRecord_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records/record123", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.GetRecord(context.Background(), "zone123", "record123") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestDNSRecord_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PostApiDnsZonesZoneIdRecordsJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "www", req.Name) + assert.Equal(t, "192.168.1.1", req.Content) + assert.Equal(t, api.DNSRecordTypeA, req.Type) + retBytes, _ := json.Marshal(testDNSRecord) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.CreateRecord(context.Background(), "zone123", api.PostApiDnsZonesZoneIdRecordsJSONRequestBody{ + Name: "www", + Content: "192.168.1.1", + Type: api.DNSRecordTypeA, + Ttl: 300, + }) + require.NoError(t, err) + assert.Equal(t, testDNSRecord, *ret) + }) +} + +func TestDNSRecord_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Invalid record", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.CreateRecord(context.Background(), "zone123", api.PostApiDnsZonesZoneIdRecordsJSONRequestBody{ + Name: "www", + Content: "192.168.1.1", + Type: api.DNSRecordTypeA, + Ttl: 300, + }) + assert.Error(t, err) + assert.Equal(t, "Invalid record", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestDNSRecord_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records/record123", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "api", req.Name) + assert.Equal(t, "192.168.1.2", req.Content) + retBytes, _ := json.Marshal(testDNSRecord) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.UpdateRecord(context.Background(), "zone123", "record123", api.PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody{ + Name: "api", + Content: "192.168.1.2", + Type: api.DNSRecordTypeA, + Ttl: 300, + }) + require.NoError(t, err) + assert.Equal(t, testDNSRecord, *ret) + }) +} + +func TestDNSRecord_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records/record123", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Invalid record", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.DNSZones.UpdateRecord(context.Background(), "zone123", "record123", api.PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody{ + Name: "api", + Content: "192.168.1.2", + Type: api.DNSRecordTypeA, + Ttl: 300, + }) + assert.Error(t, err) + assert.Equal(t, "Invalid record", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestDNSRecord_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records/record123", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.DNSZones.DeleteRecord(context.Background(), "zone123", "record123") + require.NoError(t, err) + }) +} + +func TestDNSRecord_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/dns/zones/zone123/records/record123", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.DNSZones.DeleteRecord(context.Background(), "zone123", "record123") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestDNSZones_Integration(t *testing.T) { + enabled := true + zoneReq := api.ZoneRequest{ + Name: "test-zone", + Domain: "test.example.com", + Enabled: &enabled, + EnableSearchDomain: false, + DistributionGroups: []string{"cs1tnh0hhcjnqoiuebeg"}, + } + + recordReq := api.DNSRecordRequest{ + Name: "api.test.example.com", + Content: "192.168.1.100", + Type: api.DNSRecordTypeA, + Ttl: 300, + } + + withBlackBoxServer(t, func(c *rest.Client) { + zone, err := c.DNSZones.CreateZone(context.Background(), zoneReq) + require.NoError(t, err) + assert.Equal(t, "test-zone", zone.Name) + assert.Equal(t, "test.example.com", zone.Domain) + + zones, err := c.DNSZones.ListZones(context.Background()) + require.NoError(t, err) + assert.Equal(t, *zone, zones[0]) + + getZone, err := c.DNSZones.GetZone(context.Background(), zone.Id) + require.NoError(t, err) + assert.Equal(t, *zone, *getZone) + + zoneReq.Name = "updated-zone" + updatedZone, err := c.DNSZones.UpdateZone(context.Background(), zone.Id, zoneReq) + require.NoError(t, err) + assert.Equal(t, "updated-zone", updatedZone.Name) + + record, err := c.DNSZones.CreateRecord(context.Background(), zone.Id, recordReq) + require.NoError(t, err) + assert.Equal(t, "api.test.example.com", record.Name) + assert.Equal(t, "192.168.1.100", record.Content) + + records, err := c.DNSZones.ListRecords(context.Background(), zone.Id) + require.NoError(t, err) + assert.Equal(t, *record, records[0]) + + getRecord, err := c.DNSZones.GetRecord(context.Background(), zone.Id, record.Id) + require.NoError(t, err) + assert.Equal(t, *record, *getRecord) + + recordReq.Name = "www.test.example.com" + updatedRecord, err := c.DNSZones.UpdateRecord(context.Background(), zone.Id, record.Id, recordReq) + require.NoError(t, err) + assert.Equal(t, "www.test.example.com", updatedRecord.Name) + + err = c.DNSZones.DeleteRecord(context.Background(), zone.Id, record.Id) + require.NoError(t, err) + + records, err = c.DNSZones.ListRecords(context.Background(), zone.Id) + require.NoError(t, err) + assert.Len(t, records, 0) + + err = c.DNSZones.DeleteZone(context.Background(), zone.Id) + require.NoError(t, err) + + zones, err = c.DNSZones.ListZones(context.Background()) + require.NoError(t, err) + assert.Len(t, zones, 0) + }) +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 64086e7ec..7b9451b15 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -25,6 +25,8 @@ tags: description: Interact with and view information about routes. - name: DNS description: Interact with and view information about DNS configuration. + - name: DNS Zones + description: Interact with and view information about custom DNS zones. - name: Events description: View information about the account and network events. - name: Accounts @@ -1779,6 +1781,100 @@ components: example: ch8i4ug6lnn4g9hqv7m0 required: - disabled_management_groups + ZoneRequest: + type: object + properties: + name: + description: Zone name identifier + type: string + maxLength: 255 + minLength: 1 + example: Office Zone + domain: + description: Zone domain (FQDN) + type: string + example: example.com + enabled: + description: Zone status + type: boolean + default: true + enable_search_domain: + description: Enable this zone as a search domain + type: boolean + example: false + distribution_groups: + description: Group IDs that defines groups of peers that will resolve this zone + type: array + items: + type: string + example: ch8i4ug6lnn4g9hqv7m0 + required: + - name + - domain + - enable_search_domain + - distribution_groups + Zone: + allOf: + - type: object + properties: + id: + description: Zone ID + type: string + example: ch8i4ug6lnn4g9hqv7m0 + records: + description: DNS records associated with this zone + type: array + items: + $ref: '#/components/schemas/DNSRecord' + required: + - id + - enabled + - records + - $ref: '#/components/schemas/ZoneRequest' + DNSRecordType: + type: string + description: DNS record type + enum: + - A + - AAAA + - CNAME + example: A + DNSRecordRequest: + type: object + properties: + name: + description: FQDN for the DNS record. Must be a subdomain within or match the zone's domain. + type: string + example: www.example.com + type: + $ref: '#/components/schemas/DNSRecordType' + content: + description: DNS record content (IP address for A/AAAA, domain for CNAME) + type: string + maxLength: 255 + minLength: 1 + example: 192.168.1.1 + ttl: + description: Time to live in seconds + type: integer + minimum: 0 + example: 300 + required: + - name + - type + - content + - ttl + DNSRecord: + allOf: + - type: object + properties: + id: + description: DNS record ID + type: string + example: ch8i4ug6lnn4g9hqv7m0 + required: + - id + - $ref: '#/components/schemas/DNSRecordRequest' Event: type: object properties: @@ -4733,6 +4829,347 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/dns/zones: + get: + summary: List all DNS Zones + description: Returns a list of all custom DNS zones + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: A JSON Array of DNS Zones + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Zone' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + post: + summary: Create a DNS Zone + description: Creates a new custom DNS zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + requestBody: + description: A DNS zone object + content: + 'application/json': + schema: + $ref: '#/components/schemas/ZoneRequest' + responses: + '200': + description: A JSON Object of the created DNS Zone + content: + application/json: + schema: + $ref: '#/components/schemas/Zone' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + /api/dns/zones/{zoneId}: + get: + summary: Retrieve a DNS Zone + description: Returns information about a specific DNS zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + responses: + '200': + description: A JSON Object of a DNS Zone + content: + application/json: + schema: + $ref: '#/components/schemas/Zone' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + put: + summary: Update a DNS Zone + description: Updates a custom DNS zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + requestBody: + description: A DNS zone object + content: + 'application/json': + schema: + $ref: '#/components/schemas/ZoneRequest' + responses: + '200': + description: A JSON Object of the updated DNS Zone + content: + application/json: + schema: + $ref: '#/components/schemas/Zone' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + delete: + summary: Delete a DNS Zone + description: Deletes a custom DNS zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + responses: + '200': + description: Zone deletion successful + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/dns/zones/{zoneId}/records: + get: + summary: List all DNS Records + description: Returns a list of all DNS records in a zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + responses: + '200': + description: A JSON Array of DNS Records + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DNSRecord' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + post: + summary: Create a DNS Record + description: Creates a new DNS record in a zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + requestBody: + description: A DNS record object + content: + 'application/json': + schema: + $ref: '#/components/schemas/DNSRecordRequest' + responses: + '200': + description: A JSON Object of the created DNS Record + content: + application/json: + schema: + $ref: '#/components/schemas/DNSRecord' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/dns/zones/{zoneId}/records/{recordId}: + get: + summary: Retrieve a DNS Record + description: Returns information about a specific DNS record + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + - in: path + name: recordId + required: true + schema: + type: string + description: The unique identifier of a DNS record + example: chacbco6lnnbn6cg5s92 + responses: + '200': + description: A JSON Object of a DNS Record + content: + application/json: + schema: + $ref: '#/components/schemas/DNSRecord' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + put: + summary: Update a DNS Record + description: Updates a DNS record in a zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + - in: path + name: recordId + required: true + schema: + type: string + description: The unique identifier of a DNS record + example: chacbco6lnnbn6cg5s92 + requestBody: + description: A DNS record object + content: + 'application/json': + schema: + $ref: '#/components/schemas/DNSRecordRequest' + responses: + '200': + description: A JSON Object of the updated DNS Record + content: + application/json: + schema: + $ref: '#/components/schemas/DNSRecord' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + delete: + summary: Delete a DNS Record + description: Deletes a DNS record from a zone + tags: [ DNS Zones ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: zoneId + required: true + schema: + type: string + description: The unique identifier of a zone + example: chacbco6lnnbn6cg5s91 + - in: path + name: recordId + required: true + schema: + type: string + description: The unique identifier of a DNS record + example: chacbco6lnnbn6cg5s92 + responses: + '200': + description: Record deletion successful + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" /api/events/audit: get: summary: List all Audit Events diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index ab5a65cb0..94a52b6e1 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -12,6 +12,13 @@ const ( TokenAuthScopes = "TokenAuth.Scopes" ) +// Defines values for DNSRecordType. +const ( + DNSRecordTypeA DNSRecordType = "A" + DNSRecordTypeAAAA DNSRecordType = "AAAA" + DNSRecordTypeCNAME DNSRecordType = "CNAME" +) + // Defines values for EventActivityCode. const ( EventActivityCodeAccountCreate EventActivityCode = "account.create" @@ -427,6 +434,42 @@ type CreateSetupKeyRequest struct { UsageLimit int `json:"usage_limit"` } +// DNSRecord defines model for DNSRecord. +type DNSRecord struct { + // Content DNS record content (IP address for A/AAAA, domain for CNAME) + Content string `json:"content"` + + // Id DNS record ID + Id string `json:"id"` + + // Name FQDN for the DNS record. Must be a subdomain within or match the zone's domain. + Name string `json:"name"` + + // Ttl Time to live in seconds + Ttl int `json:"ttl"` + + // Type DNS record type + Type DNSRecordType `json:"type"` +} + +// DNSRecordRequest defines model for DNSRecordRequest. +type DNSRecordRequest struct { + // Content DNS record content (IP address for A/AAAA, domain for CNAME) + Content string `json:"content"` + + // Name FQDN for the DNS record. Must be a subdomain within or match the zone's domain. + Name string `json:"name"` + + // Ttl Time to live in seconds + Ttl int `json:"ttl"` + + // Type DNS record type + Type DNSRecordType `json:"type"` +} + +// DNSRecordType DNS record type +type DNSRecordType string + // DNSSettings defines model for DNSSettings. type DNSSettings struct { // DisabledManagementGroups Groups whose DNS management is disabled @@ -1999,6 +2042,48 @@ type UserRequest struct { Role string `json:"role"` } +// Zone defines model for Zone. +type Zone struct { + // DistributionGroups Group IDs that defines groups of peers that will resolve this zone + DistributionGroups []string `json:"distribution_groups"` + + // Domain Zone domain (FQDN) + Domain string `json:"domain"` + + // EnableSearchDomain Enable this zone as a search domain + EnableSearchDomain bool `json:"enable_search_domain"` + + // Enabled Zone status + Enabled bool `json:"enabled"` + + // Id Zone ID + Id string `json:"id"` + + // Name Zone name identifier + Name string `json:"name"` + + // Records DNS records associated with this zone + Records []DNSRecord `json:"records"` +} + +// ZoneRequest defines model for ZoneRequest. +type ZoneRequest struct { + // DistributionGroups Group IDs that defines groups of peers that will resolve this zone + DistributionGroups []string `json:"distribution_groups"` + + // Domain Zone domain (FQDN) + Domain string `json:"domain"` + + // EnableSearchDomain Enable this zone as a search domain + EnableSearchDomain bool `json:"enable_search_domain"` + + // Enabled Zone status + Enabled *bool `json:"enabled,omitempty"` + + // Name Zone name identifier + Name string `json:"name"` +} + // GetApiEventsNetworkTrafficParams defines parameters for GetApiEventsNetworkTraffic. type GetApiEventsNetworkTrafficParams struct { // Page Page number @@ -2083,6 +2168,18 @@ type PutApiDnsNameserversNsgroupIdJSONRequestBody = NameserverGroupRequest // PutApiDnsSettingsJSONRequestBody defines body for PutApiDnsSettings for application/json ContentType. type PutApiDnsSettingsJSONRequestBody = DNSSettings +// PostApiDnsZonesJSONRequestBody defines body for PostApiDnsZones for application/json ContentType. +type PostApiDnsZonesJSONRequestBody = ZoneRequest + +// PutApiDnsZonesZoneIdJSONRequestBody defines body for PutApiDnsZonesZoneId for application/json ContentType. +type PutApiDnsZonesZoneIdJSONRequestBody = ZoneRequest + +// PostApiDnsZonesZoneIdRecordsJSONRequestBody defines body for PostApiDnsZonesZoneIdRecords for application/json ContentType. +type PostApiDnsZonesZoneIdRecordsJSONRequestBody = DNSRecordRequest + +// PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody defines body for PutApiDnsZonesZoneIdRecordsRecordId for application/json ContentType. +type PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody = DNSRecordRequest + // PostApiGroupsJSONRequestBody defines body for PostApiGroups for application/json ContentType. type PostApiGroupsJSONRequestBody = GroupRequest diff --git a/shared/management/status/error.go b/shared/management/status/error.go index 09676847e..ea02173e9 100644 --- a/shared/management/status/error.go +++ b/shared/management/status/error.go @@ -252,3 +252,13 @@ func NewOperationNotFoundError(operation operations.Operation) error { func NewRouteNotFoundError(routeID string) error { return Errorf(NotFound, "route: %s not found", routeID) } + +// NewZoneNotFoundError creates a new Error with NotFound type for a missing dns zone. +func NewZoneNotFoundError(zoneID string) error { + return Errorf(NotFound, "zone: %s not found", zoneID) +} + +// NewDNSRecordNotFoundError creates a new Error with NotFound type for a missing dns record. +func NewDNSRecordNotFoundError(recordID string) error { + return Errorf(NotFound, "dns record: %s not found", recordID) +} From 1ff7abe909c1f1c4355bcc5a6e82ca75c3794a11 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Fri, 16 Jan 2026 12:28:17 +0100 Subject: [PATCH 051/374] [management, client] Fix SSH server audience validator (#5105) * **New Features** * SSH server JWT validation now accepts multiple audiences with backward-compatible handling of the previous single-audience setting and a guard ensuring at least one audience is configured. * **Tests** * Test suites updated and new tests added to cover multiple-audience scenarios and compatibility with existing behavior. * **Other** * Startup logging enhanced to report configured audiences for JWT auth. --- client/internal/engine_ssh.go | 9 +- client/ssh/proxy/proxy_test.go | 2 +- client/ssh/server/jwt_test.go | 113 ++- client/ssh/server/server.go | 19 +- .../internals/shared/grpc/conversion.go | 7 + .../internals/shared/grpc/conversion_test.go | 51 ++ shared/management/proto/management.pb.go | 729 +++++++++--------- shared/management/proto/management.proto | 6 +- 8 files changed, 565 insertions(+), 371 deletions(-) diff --git a/client/internal/engine_ssh.go b/client/internal/engine_ssh.go index e683d8cee..a8c05fe0a 100644 --- a/client/internal/engine_ssh.go +++ b/client/internal/engine_ssh.go @@ -72,9 +72,16 @@ func (e *Engine) updateSSH(sshConf *mgmProto.SSHConfig) error { } if protoJWT := sshConf.GetJwtConfig(); protoJWT != nil { + audiences := protoJWT.GetAudiences() + if len(audiences) == 0 && protoJWT.GetAudience() != "" { + audiences = []string{protoJWT.GetAudience()} + } + + log.Debugf("starting SSH server with JWT authentication: audiences=%v", audiences) + jwtConfig := &sshserver.JWTConfig{ Issuer: protoJWT.GetIssuer(), - Audience: protoJWT.GetAudience(), + Audiences: audiences, KeysLocation: protoJWT.GetKeysLocation(), MaxTokenAge: protoJWT.GetMaxTokenAge(), } diff --git a/client/ssh/proxy/proxy_test.go b/client/ssh/proxy/proxy_test.go index 81d588801..dba2e88da 100644 --- a/client/ssh/proxy/proxy_test.go +++ b/client/ssh/proxy/proxy_test.go @@ -132,7 +132,7 @@ func TestSSHProxy_Connect(t *testing.T) { HostKeyPEM: hostKey, JWT: &server.JWTConfig{ Issuer: issuer, - Audience: audience, + Audiences: []string{audience}, KeysLocation: jwksURL, }, } diff --git a/client/ssh/server/jwt_test.go b/client/ssh/server/jwt_test.go index 6eb88accc..dbef011ac 100644 --- a/client/ssh/server/jwt_test.go +++ b/client/ssh/server/jwt_test.go @@ -43,7 +43,7 @@ func TestJWTEnforcement(t *testing.T) { t.Run("blocks_without_jwt", func(t *testing.T) { jwtConfig := &JWTConfig{ Issuer: "test-issuer", - Audience: "test-audience", + Audiences: []string{"test-audience"}, KeysLocation: "test-keys", } serverConfig := &Config{ @@ -202,7 +202,7 @@ func TestJWTDetection(t *testing.T) { jwtConfig := &JWTConfig{ Issuer: issuer, - Audience: audience, + Audiences: []string{audience}, KeysLocation: jwksURL, } serverConfig := &Config{ @@ -329,7 +329,7 @@ func TestJWTFailClose(t *testing.T) { t.Run(tc.name, func(t *testing.T) { jwtConfig := &JWTConfig{ Issuer: issuer, - Audience: audience, + Audiences: []string{audience}, KeysLocation: jwksURL, MaxTokenAge: 3600, } @@ -567,7 +567,7 @@ func TestJWTAuthentication(t *testing.T) { jwtConfig := &JWTConfig{ Issuer: issuer, - Audience: audience, + Audiences: []string{audience}, KeysLocation: jwksURL, } serverConfig := &Config{ @@ -646,3 +646,108 @@ func TestJWTAuthentication(t *testing.T) { }) } } + +// TestJWTMultipleAudiences tests JWT validation with multiple audiences (dashboard and CLI). +func TestJWTMultipleAudiences(t *testing.T) { + if testing.Short() { + t.Skip("Skipping JWT multiple audiences tests in short mode") + } + + jwksServer, privateKey, jwksURL := setupJWKSServer(t) + defer jwksServer.Close() + + const ( + issuer = "https://test-issuer.example.com" + dashboardAudience = "dashboard-audience" + cliAudience = "cli-audience" + ) + + hostKey, err := nbssh.GeneratePrivateKey(nbssh.ED25519) + require.NoError(t, err) + + testCases := []struct { + name string + audience string + wantAuthOK bool + }{ + { + name: "accepts_dashboard_audience", + audience: dashboardAudience, + wantAuthOK: true, + }, + { + name: "accepts_cli_audience", + audience: cliAudience, + wantAuthOK: true, + }, + { + name: "rejects_unknown_audience", + audience: "unknown-audience", + wantAuthOK: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + jwtConfig := &JWTConfig{ + Issuer: issuer, + Audiences: []string{dashboardAudience, cliAudience}, + KeysLocation: jwksURL, + } + serverConfig := &Config{ + HostKeyPEM: hostKey, + JWT: jwtConfig, + } + server := New(serverConfig) + server.SetAllowRootLogin(true) + + testUserHash, err := sshuserhash.HashUserID("test-user") + require.NoError(t, err) + + currentUser := testutil.GetTestUsername(t) + authConfig := &sshauth.Config{ + UserIDClaim: sshauth.DefaultUserIDClaim, + AuthorizedUsers: []sshuserhash.UserIDHash{testUserHash}, + MachineUsers: map[string][]uint32{ + currentUser: {0}, + }, + } + server.UpdateSSHAuth(authConfig) + + serverAddr := StartTestServer(t, server) + defer require.NoError(t, server.Stop()) + + host, portStr, err := net.SplitHostPort(serverAddr) + require.NoError(t, err) + + token := generateValidJWT(t, privateKey, issuer, tc.audience) + config := &cryptossh.ClientConfig{ + User: testutil.GetTestUsername(t), + Auth: []cryptossh.AuthMethod{ + cryptossh.Password(token), + }, + HostKeyCallback: cryptossh.InsecureIgnoreHostKey(), + Timeout: 2 * time.Second, + } + + conn, err := cryptossh.Dial("tcp", net.JoinHostPort(host, portStr), config) + if tc.wantAuthOK { + require.NoError(t, err, "JWT authentication should succeed for audience %s", tc.audience) + defer func() { + if err := conn.Close(); err != nil { + t.Logf("close connection: %v", err) + } + }() + + session, err := conn.NewSession() + require.NoError(t, err) + defer session.Close() + + err = session.Shell() + require.NoError(t, err, "Shell should work with valid audience") + } else { + assert.Error(t, err, "JWT authentication should fail for unknown audience") + } + }) + } +} diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index 3a8568979..a663614f4 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -176,9 +176,9 @@ type Server struct { type JWTConfig struct { Issuer string - Audience string KeysLocation string MaxTokenAge int64 + Audiences []string } // Config contains all SSH server configuration options @@ -427,18 +427,25 @@ func (s *Server) ensureJWTValidator() error { return fmt.Errorf("JWT config not set") } - log.Debugf("Initializing JWT validator (issuer: %s, audience: %s)", config.Issuer, config.Audience) + if len(config.Audiences) == 0 { + return fmt.Errorf("JWT config has no audiences configured") + } + log.Debugf("Initializing JWT validator (issuer: %s, audiences: %v)", config.Issuer, config.Audiences) validator := jwt.NewValidator( config.Issuer, - []string{config.Audience}, + config.Audiences, config.KeysLocation, true, ) // Use custom userIDClaim from authorizer if available + audience := "" + if len(config.Audiences) != 0 { + audience = config.Audiences[0] + } extractorOptions := []jwt.ClaimsExtractorOption{ - jwt.WithAudience(config.Audience), + jwt.WithAudience(audience), } if authorizer.GetUserIDClaim() != "" { extractorOptions = append(extractorOptions, jwt.WithUserIDClaim(authorizer.GetUserIDClaim())) @@ -475,8 +482,8 @@ func (s *Server) validateJWTToken(tokenString string) (*gojwt.Token, error) { if err != nil { if jwtConfig != nil { if claims, parseErr := s.parseTokenWithoutValidation(tokenString); parseErr == nil { - return nil, fmt.Errorf("validate token (expected issuer=%s, audience=%s, actual issuer=%v, audience=%v): %w", - jwtConfig.Issuer, jwtConfig.Audience, claims["iss"], claims["aud"], err) + return nil, fmt.Errorf("validate token (expected issuer=%s, audiences=%v, actual issuer=%v, audience=%v): %w", + jwtConfig.Issuer, jwtConfig.Audiences, claims["iss"], claims["aud"], err) } } return nil, fmt.Errorf("validate token: %w", err) diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index ba06b81a0..c74fa2660 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -434,9 +434,16 @@ func buildJWTConfig(config *nbconfig.HttpServerConfig, deviceFlowConfig *nbconfi if config.CLIAuthAudience != "" { audience = config.CLIAuthAudience } + + audiences := []string{config.AuthAudience} + if config.CLIAuthAudience != "" && config.CLIAuthAudience != config.AuthAudience { + audiences = append(audiences, config.CLIAuthAudience) + } + return &proto.JWTConfig{ Issuer: issuer, Audience: audience, + Audiences: audiences, KeysLocation: keysLocation, } } diff --git a/management/internals/shared/grpc/conversion_test.go b/management/internals/shared/grpc/conversion_test.go index 701271345..95ad05eec 100644 --- a/management/internals/shared/grpc/conversion_test.go +++ b/management/internals/shared/grpc/conversion_test.go @@ -6,9 +6,12 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/assert" + nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller/cache" + nbconfig "github.com/netbirdio/netbird/management/internals/server/config" ) func TestToProtocolDNSConfigWithCache(t *testing.T) { @@ -148,3 +151,51 @@ func generateTestData(size int) nbdns.Config { return config } + +func TestBuildJWTConfig_Audiences(t *testing.T) { + tests := []struct { + name string + authAudience string + cliAuthAudience string + expectedAudiences []string + expectedAudience string + }{ + { + name: "only_auth_audience", + authAudience: "dashboard-aud", + cliAuthAudience: "", + expectedAudiences: []string{"dashboard-aud"}, + expectedAudience: "dashboard-aud", + }, + { + name: "both_audiences_different", + authAudience: "dashboard-aud", + cliAuthAudience: "cli-aud", + expectedAudiences: []string{"dashboard-aud", "cli-aud"}, + expectedAudience: "cli-aud", + }, + { + name: "both_audiences_same", + authAudience: "same-aud", + cliAuthAudience: "same-aud", + expectedAudiences: []string{"same-aud"}, + expectedAudience: "same-aud", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + config := &nbconfig.HttpServerConfig{ + AuthIssuer: "https://issuer.example.com", + AuthAudience: tc.authAudience, + CLIAuthAudience: tc.cliAuthAudience, + } + + result := buildJWTConfig(config, nil) + + assert.NotNil(t, result) + assert.Equal(t, tc.expectedAudiences, result.Audiences, "audiences should match expected") + assert.Equal(t, tc.expectedAudience, result.Audience, "audience should match expected") + }) + } +} diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 077f84ed3..84b74bf8c 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.1 +// protoc v6.33.3 // source: management.proto package proto @@ -1617,6 +1617,8 @@ type JWTConfig struct { Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"` KeysLocation string `protobuf:"bytes,3,opt,name=keysLocation,proto3" json:"keysLocation,omitempty"` MaxTokenAge int64 `protobuf:"varint,4,opt,name=maxTokenAge,proto3" json:"maxTokenAge,omitempty"` + // audiences + Audiences []string `protobuf:"bytes,5,rep,name=audiences,proto3" json:"audiences,omitempty"` } func (x *JWTConfig) Reset() { @@ -1679,6 +1681,13 @@ func (x *JWTConfig) GetMaxTokenAge() int64 { return 0 } +func (x *JWTConfig) GetAudiences() []string { + if x != nil { + return x.Audiences + } + return nil +} + // ProtectedHostConfig is similar to HostConfig but has additional user and password // Mostly used for TURN servers type ProtectedHostConfig struct { @@ -2873,7 +2882,9 @@ type CustomZone struct { Domain string `protobuf:"bytes,1,opt,name=Domain,proto3" json:"Domain,omitempty"` Records []*SimpleRecord `protobuf:"bytes,2,rep,name=Records,proto3" json:"Records,omitempty"` SearchDomainDisabled bool `protobuf:"varint,3,opt,name=SearchDomainDisabled,proto3" json:"SearchDomainDisabled,omitempty"` - NonAuthoritative bool `protobuf:"varint,4,opt,name=NonAuthoritative,proto3" json:"NonAuthoritative,omitempty"` + // NonAuthoritative indicates this is a user-created zone (not the built-in peer DNS zone). + // Non-authoritative zones will fallthrough to lower-priority handlers on NXDOMAIN and skip PTR processing. + NonAuthoritative bool `protobuf:"varint,4,opt,name=NonAuthoritative,proto3" json:"NonAuthoritative,omitempty"` } func (x *CustomZone) Reset() { @@ -3918,7 +3929,7 @@ var file_management_proto_rawDesc = []byte{ 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x0a, 0x09, 0x4a, 0x57, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, @@ -3927,364 +3938,366 @@ var file_management_proto_rawDesc = []byte{ 0x09, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, - 0x65, 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x22, 0xd3, 0x02, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x66, 0x71, 0x64, 0x6e, 0x12, 0x48, 0x0a, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, - 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, - 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, - 0x0a, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, - 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x03, 0x6d, 0x74, 0x75, 0x12, 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, - 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, - 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, - 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, - 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, - 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, - 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, - 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, - 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, - 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x4f, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, - 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, - 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x12, 0x3e, 0x0a, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, - 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, - 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x44, 0x0a, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, - 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, - 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, - 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, - 0x68, 0x41, 0x75, 0x74, 0x68, 0x22, 0x82, 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, - 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, - 0x61, 0x69, 0x6d, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, - 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, - 0x0d, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, - 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, - 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, - 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0d, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x66, 0x71, 0x64, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, - 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, - 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, - 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, - 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x4f, 0x53, 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, - 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, - 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, - 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, - 0x4c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, - 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, - 0x6c, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, - 0x46, 0x6c, 0x61, 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, - 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, - 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, - 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, - 0x72, 0x61, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, - 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, - 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, - 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, - 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, - 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, - 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, - 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, - 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb4, 0x01, 0x0a, 0x0a, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, - 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x53, 0x6b, - 0x69, 0x70, 0x50, 0x54, 0x52, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0e, 0x53, 0x6b, 0x69, 0x70, 0x50, 0x54, 0x52, 0x50, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, - 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, - 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, - 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x48, - 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, - 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, - 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, 0x53, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x72, - 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, - 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, - 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, 0x06, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, - 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, - 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, - 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, - 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x22, - 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, - 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, - 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x50, 0x6f, 0x72, 0x74, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, - 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, - 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, - 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x32, 0xcd, 0x04, 0x0a, 0x11, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, - 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, - 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, + 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, + 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, + 0x02, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, + 0x64, 0x6e, 0x12, 0x48, 0x0a, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, + 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, + 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, + 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x6d, 0x74, 0x75, 0x12, 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x22, 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, + 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, + 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, + 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, + 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, + 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, + 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, + 0x0a, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, + 0x0a, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, + 0x75, 0x74, 0x68, 0x22, 0x82, 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, + 0x20, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, + 0x6d, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, + 0x73, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, + 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, + 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, + 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, + 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, + 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, + 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, + 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, + 0x64, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, + 0x79, 0x12, 0x33, 0x0a, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, 0x77, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x42, + 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x16, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0a, + 0x0a, 0x06, 0x48, 0x4f, 0x53, 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x4b, + 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, 0x50, 0x4b, + 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, + 0x0a, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, + 0x0a, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, + 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x55, 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, + 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, + 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, + 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, + 0x61, 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, + 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, + 0x72, 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, + 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, + 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, + 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, + 0x6e, 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, + 0x28, 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x12, 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, + 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, + 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, + 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x22, 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, + 0x0a, 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, + 0x0a, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, + 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, + 0x65, 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, + 0x72, 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, + 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, + 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, + 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, + 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, + 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, + 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, + 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, + 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, + 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, + 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, + 0x44, 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, + 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, + 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, + 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, + 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, + 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x32, 0xcd, 0x04, 0x0a, 0x11, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, + 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, + 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, + 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, + 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, + 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, - 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, - 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, + 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, + 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index c4cc43295..e44b49781 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -246,12 +246,16 @@ message FlowConfig { bool dnsCollection = 8; } -// JWTConfig represents JWT authentication configuration +// JWTConfig represents JWT authentication configuration for validating tokens. message JWTConfig { string issuer = 1; + // Deprecated: audience is kept for backwards compatibility only. Use audiences instead in the client code but populate this field. string audience = 2; string keysLocation = 3; int64 maxTokenAge = 4; + // audiences contains the list of valid audiences for JWT validation. + // Tokens matching any audience in this list are considered valid. + repeated string audiences = 5; } // ProtectedHostConfig is similar to HostConfig but has additional user and password From daf144917435ad39c3c03ea4421bd9d31f5035d2 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Fri, 16 Jan 2026 13:25:02 +0100 Subject: [PATCH 052/374] [client] Remove duplicate audiences check (#5117) --- client/ssh/server/server.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index a663614f4..e897bbade 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -440,12 +440,8 @@ func (s *Server) ensureJWTValidator() error { ) // Use custom userIDClaim from authorizer if available - audience := "" - if len(config.Audiences) != 0 { - audience = config.Audiences[0] - } extractorOptions := []jwt.ClaimsExtractorOption{ - jwt.WithAudience(audience), + jwt.WithAudience(config.Audiences[0]), } if authorizer.GetUserIDClaim() != "" { extractorOptions = append(extractorOptions, jwt.WithUserIDClaim(authorizer.GetUserIDClaim())) From 4c2eb2af734504492e047549ab7576a7ee71b50d Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Fri, 16 Jan 2026 16:01:39 +0100 Subject: [PATCH 053/374] [management] Skip email_verified if not present (#5118) --- idp/dex/provider.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/idp/dex/provider.go b/idp/dex/provider.go index 6a4fe7873..6625d9eaf 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -798,15 +798,15 @@ func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, "redirectURI": redirectURI, "scopes": []string{"openid", "profile", "email"}, "insecureEnableGroups": true, + //some providers don't return email verified, so we need to skip it if not present (e.g., Entra, Okta, Duo) + "insecureSkipEmailVerified": true, } switch cfg.Type { case "zitadel": oidcConfig["getUserInfo"] = true case "entra": - oidcConfig["insecureSkipEmailVerified"] = true oidcConfig["claimMapping"] = map[string]string{"email": "preferred_username"} case "okta": - oidcConfig["insecureSkipEmailVerified"] = true oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} case "pocketid": oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} From 3ce5d6a4f87c7d7e74b4e99431f9e3bddc75fd6b Mon Sep 17 00:00:00 2001 From: ressys1978 Date: Fri, 16 Jan 2026 11:23:37 -0400 Subject: [PATCH 054/374] [management] Add idp timeout env variable (#4647) Introduced the NETBIRD_IDP_TIMEOUT environment variable to the management service. This allows configuring a timeout for supported IDPs. If the variable is unset or contains an invalid value, a default timeout of 10 seconds is used as a fallback. This is needed for larger IDP environments where 10s is just not enough time. --- management/server/idp/auth0.go | 5 +++-- management/server/idp/authentik.go | 7 +++---- management/server/idp/azure.go | 5 +++-- management/server/idp/google_workspace.go | 4 ++-- management/server/idp/jumpcloud.go | 4 ++-- management/server/idp/keycloak.go | 3 ++- management/server/idp/okta.go | 3 +-- management/server/idp/pocketid.go | 4 ++-- management/server/idp/util.go | 23 +++++++++++++++++++++++ management/server/idp/zitadel.go | 3 ++- 10 files changed, 43 insertions(+), 18 deletions(-) diff --git a/management/server/idp/auth0.go b/management/server/idp/auth0.go index 1eb8434d3..0d4461e89 100644 --- a/management/server/idp/auth0.go +++ b/management/server/idp/auth0.go @@ -135,10 +135,11 @@ func NewAuth0Manager(config Auth0ClientConfig, appMetrics telemetry.AppMetrics) httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 - httpClient := &http.Client{ - Timeout: 10 * time.Second, + httpClient := &http.Client{ + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.AuthIssuer == "" { diff --git a/management/server/idp/authentik.go b/management/server/idp/authentik.go index 2f87a9bba..0f30cc63d 100644 --- a/management/server/idp/authentik.go +++ b/management/server/idp/authentik.go @@ -48,16 +48,15 @@ type AuthentikCredentials struct { } // NewAuthentikManager creates a new instance of the AuthentikManager. -func NewAuthentikManager(config AuthentikClientConfig, - appMetrics telemetry.AppMetrics) (*AuthentikManager, error) { +func NewAuthentikManager(config AuthentikClientConfig, appMetrics telemetry.AppMetrics) (*AuthentikManager, error) { httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/azure.go b/management/server/idp/azure.go index 393a39e3e..e098424b5 100644 --- a/management/server/idp/azure.go +++ b/management/server/idp/azure.go @@ -57,10 +57,11 @@ func NewAzureManager(config AzureClientConfig, appMetrics telemetry.AppMetrics) httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 - httpClient := &http.Client{ - Timeout: 10 * time.Second, + httpClient := &http.Client{ + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/google_workspace.go b/management/server/idp/google_workspace.go index 09ea8c430..6e417d394 100644 --- a/management/server/idp/google_workspace.go +++ b/management/server/idp/google_workspace.go @@ -5,7 +5,6 @@ import ( "encoding/base64" "fmt" "net/http" - "time" log "github.com/sirupsen/logrus" "golang.org/x/oauth2/google" @@ -49,9 +48,10 @@ func NewGoogleWorkspaceManager(ctx context.Context, config GoogleWorkspaceClient httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.CustomerID == "" { diff --git a/management/server/idp/jumpcloud.go b/management/server/idp/jumpcloud.go index 6345e424a..8c4a9d089 100644 --- a/management/server/idp/jumpcloud.go +++ b/management/server/idp/jumpcloud.go @@ -5,7 +5,6 @@ import ( "fmt" "net/http" "strings" - "time" v1 "github.com/TheJumpCloud/jcapi-go/v1" @@ -46,9 +45,10 @@ func NewJumpCloudManager(config JumpCloudClientConfig, appMetrics telemetry.AppM httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.APIToken == "" { diff --git a/management/server/idp/keycloak.go b/management/server/idp/keycloak.go index c611317ab..b640f7520 100644 --- a/management/server/idp/keycloak.go +++ b/management/server/idp/keycloak.go @@ -63,9 +63,10 @@ func NewKeycloakManager(config KeycloakClientConfig, appMetrics telemetry.AppMet httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/okta.go b/management/server/idp/okta.go index b9cd006be..07f0d8008 100644 --- a/management/server/idp/okta.go +++ b/management/server/idp/okta.go @@ -6,7 +6,6 @@ import ( "net/http" "net/url" "strings" - "time" "github.com/okta/okta-sdk-golang/v2/okta" "github.com/okta/okta-sdk-golang/v2/okta/query" @@ -45,7 +44,7 @@ func NewOktaManager(config OktaClientConfig, appMetrics telemetry.AppMetrics) (* httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } diff --git a/management/server/idp/pocketid.go b/management/server/idp/pocketid.go index d8d764830..ee8e304ee 100644 --- a/management/server/idp/pocketid.go +++ b/management/server/idp/pocketid.go @@ -8,7 +8,6 @@ import ( "net/url" "slices" "strings" - "time" "github.com/netbirdio/netbird/management/server/telemetry" ) @@ -88,9 +87,10 @@ func NewPocketIdManager(config PocketIdClientConfig, appMetrics telemetry.AppMet httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.ManagementEndpoint == "" { diff --git a/management/server/idp/util.go b/management/server/idp/util.go index df1497114..4310d1388 100644 --- a/management/server/idp/util.go +++ b/management/server/idp/util.go @@ -4,7 +4,9 @@ import ( "encoding/json" "math/rand" "net/url" + "os" "strings" + "time" ) var ( @@ -69,3 +71,24 @@ func baseURL(rawURL string) string { return parsedURL.Scheme + "://" + parsedURL.Host } + +const ( + // Provides the env variable name for use with idpTimeout function + idpTimeoutEnv = "NB_IDP_TIMEOUT" + // Sets the defaultTimeout to 10s. + defaultTimeout = 10 * time.Second +) + +// idpTimeout returns a timeout value for the IDP +func idpTimeout() time.Duration { + timeoutStr, ok := os.LookupEnv(idpTimeoutEnv) + if !ok || timeoutStr == "" { + return defaultTimeout + } + + timeout, err := time.ParseDuration(timeoutStr) + if err != nil { + return defaultTimeout + } + return timeout +} diff --git a/management/server/idp/zitadel.go b/management/server/idp/zitadel.go index 8db3c4796..ea0fd0aa7 100644 --- a/management/server/idp/zitadel.go +++ b/management/server/idp/zitadel.go @@ -164,9 +164,10 @@ func NewZitadelManager(config ZitadelClientConfig, appMetrics telemetry.AppMetri httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} hasPAT := config.PAT != "" From b352ab84c082c51368e587b83787fed2d4591fee Mon Sep 17 00:00:00 2001 From: shuuri-labs <61762328+shuuri-labs@users.noreply.github.com> Date: Fri, 16 Jan 2026 17:42:28 +0100 Subject: [PATCH 055/374] Feat/quickstart reverse proxy assistant (#5100) * add external reverse proxy config steps to quickstart script * remove generated files * - Remove 'press enter' prompt from post-traefik config since traefik requires no manual config - Improve npm flow (ask users for docker network, user container names in config) * fixes for npm flow * nginx flow fixes * caddy flow fixes * Consolidate NPM_NETWORK, NGINX_NETWORK, CADDY_NETWORK into single EXTERNAL_PROXY_NETWORK variable. Add read_proxy_docker_network() function that prompts for Docker network for options 2-4 (Nginx, NPM, Caddy). Generated configs now use container names when a Docker network is specified. * fix https for traefik * fix sonar code smells * fix sonar smell (add return to render_dashboard_env) * added tls instructions to nginx flow * removed unused bind_addr variable from quickstart.sh * Refactor getting-started.sh for improved maintainability Break down large functions into focused, single-responsibility components: - Split init_environment() into 6 initialization functions - Split print_post_setup_instructions() into 6 proxy-specific functions - Add section headers for better code organization - Fix 3 code smell issues (unused bind_addr variables) - Add TLS certificate documentation for Nginx - Link reverse proxy names to docs sections Reduces largest function from 205 to ~90 lines while maintaining single-file distribution. No functional changes. * - Remove duplicate network display logic in Traefik instructions - Use upstream_host instead of bind_addr for NPM forward hostname - Use upstream_host instead of bind_addr in manual proxy route examples - Prevents displaying invalid 0.0.0.0 as connection target in setup instructions * add wait_management_direct to caddy flow to ensure script waits until containers are running/passing healthchecks before reporting 'done!' --- infrastructure_files/getting-started.sh | 1153 ++++++++++++++++++++++- 1 file changed, 1136 insertions(+), 17 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 5a9488fad..8676840a6 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -9,6 +9,16 @@ set -e # Sed pattern to strip base64 padding characters SED_STRIP_PADDING='s/=//g' +# Constants for repeated string literals +readonly MSG_STARTING_SERVICES="\nStarting NetBird services\n" +readonly MSG_DONE="\nDone!\n" +readonly MSG_NEXT_STEPS="Next steps:" +readonly MSG_SEPARATOR="==========================================" + +############################################ +# Utility Functions +############################################ + check_docker_compose() { if command -v docker-compose &> /dev/null then @@ -82,6 +92,106 @@ get_turn_external_ip() { return 0 } +read_reverse_proxy_type() { + echo "" > /dev/stderr + echo "Which reverse proxy will you use?" > /dev/stderr + echo " [0] Built-in Caddy (recommended - automatic TLS)" > /dev/stderr + echo " [1] Traefik (labels added to containers)" > /dev/stderr + echo " [2] Nginx (generates config template)" > /dev/stderr + echo " [3] Nginx Proxy Manager (generates config + instructions)" > /dev/stderr + echo " [4] External Caddy (generates Caddyfile snippet)" > /dev/stderr + echo " [5] Other/Manual (displays setup documentation)" > /dev/stderr + echo "" > /dev/stderr + echo -n "Enter choice [0-5] (default: 0): " > /dev/stderr + read -r CHOICE < /dev/tty + + if [[ -z "$CHOICE" ]]; then + CHOICE="0" + fi + + if [[ ! "$CHOICE" =~ ^[0-5]$ ]]; then + echo "Invalid choice. Please enter a number between 0 and 5." > /dev/stderr + read_reverse_proxy_type + return + fi + + echo "$CHOICE" + return 0 +} + +read_traefik_network() { + echo "" > /dev/stderr + echo "If you have an existing Traefik instance, enter its external network name." > /dev/stderr + echo -n "External network (leave empty to create 'netbird' network): " > /dev/stderr + read -r NETWORK < /dev/tty + echo "$NETWORK" + return 0 +} + +read_traefik_entrypoint() { + echo "" > /dev/stderr + echo "Enter the name of your Traefik HTTPS entrypoint." > /dev/stderr + echo -n "HTTPS entrypoint name (default: websecure): " > /dev/stderr + read -r ENTRYPOINT < /dev/tty + if [[ -z "$ENTRYPOINT" ]]; then + ENTRYPOINT="websecure" + fi + echo "$ENTRYPOINT" + return 0 +} + +read_traefik_certresolver() { + echo "" > /dev/stderr + echo "Enter the name of your Traefik certificate resolver (for automatic TLS)." > /dev/stderr + echo "Leave empty if you handle TLS termination elsewhere or use a wildcard cert." > /dev/stderr + echo -n "Certificate resolver name (e.g., letsencrypt): " > /dev/stderr + read -r RESOLVER < /dev/tty + echo "$RESOLVER" + return 0 +} + +read_port_binding_preference() { + echo "" > /dev/stderr + echo "Should container ports be bound to localhost only (127.0.0.1)?" > /dev/stderr + echo "Choose 'yes' if your reverse proxy runs on the same host (more secure)." > /dev/stderr + echo -n "Bind to localhost only? [Y/n]: " > /dev/stderr + read -r CHOICE < /dev/tty + + if [[ "$CHOICE" =~ ^[Nn]$ ]]; then + echo "false" + else + echo "true" + fi + return 0 +} + +read_proxy_docker_network() { + local proxy_name="$1" + echo "" > /dev/stderr + echo "Is ${proxy_name} running in Docker?" > /dev/stderr + echo "If yes, enter the Docker network ${proxy_name} is on (NetBird will join it)." > /dev/stderr + echo -n "Docker network (leave empty if not in Docker): " > /dev/stderr + read -r NETWORK < /dev/tty + echo "$NETWORK" + return 0 +} + +get_bind_address() { + if [[ "$BIND_LOCALHOST_ONLY" == "true" ]]; then + echo "127.0.0.1" + else + echo "0.0.0.0" + fi + return 0 +} + +get_upstream_host() { + # Always return 127.0.0.1 for health checks and upstream targets + # Cannot use 0.0.0.0 as a connection target + echo "127.0.0.1" + return 0 +} + wait_management() { set +e echo -n "Waiting for Management server to become ready" @@ -106,7 +216,35 @@ wait_management() { return 0 } -init_environment() { +wait_management_direct() { + set +e + local upstream_host=$(get_upstream_host) + echo -n "Waiting for Management server to become ready" + counter=1 + while true; do + # Check the embedded IdP endpoint directly (no reverse proxy) + if curl -sk -f -o /dev/null "http://${upstream_host}:${MANAGEMENT_HOST_PORT}/oauth2/.well-known/openid-configuration" 2>/dev/null; then + break + fi + if [[ $counter -eq 60 ]]; then + echo "" + echo "Taking too long. Checking logs..." + $DOCKER_COMPOSE_COMMAND logs --tail=20 management + fi + echo -n " ." + sleep 2 + counter=$((counter + 1)) + done + echo " done" + set -e + return 0 +} + +############################################ +# Initialization and Configuration +############################################ + +initialize_default_values() { CADDY_SECURE_DOMAIN="" NETBIRD_PORT=80 NETBIRD_HTTP_PROTOCOL="http" @@ -120,6 +258,22 @@ init_environment() { TURN_MAX_PORT=65535 TURN_EXTERNAL_IP_CONFIG=$(get_turn_external_ip) + # Reverse proxy configuration + REVERSE_PROXY_TYPE="0" + TRAEFIK_EXTERNAL_NETWORK="" + TRAEFIK_ENTRYPOINT="websecure" + TRAEFIK_CERTRESOLVER="" + DASHBOARD_HOST_PORT="8080" + MANAGEMENT_HOST_PORT="8081" + SIGNAL_HOST_PORT="8083" + SIGNAL_GRPC_PORT="10000" + RELAY_HOST_PORT="8084" + BIND_LOCALHOST_ONLY="true" + EXTERNAL_PROXY_NETWORK="" + return 0 +} + +configure_domain() { if ! check_nb_domain "$NETBIRD_DOMAIN"; then NETBIRD_DOMAIN=$(read_nb_domain) fi @@ -132,41 +286,169 @@ init_environment() { NETBIRD_HTTP_PROTOCOL="https" NETBIRD_RELAY_PROTO="rels" fi + return 0 +} - check_jq +configure_reverse_proxy() { + # Prompt for reverse proxy type + REVERSE_PROXY_TYPE=$(read_reverse_proxy_type) - DOCKER_COMPOSE_COMMAND=$(check_docker_compose) + # Handle Traefik-specific prompts + if [[ "$REVERSE_PROXY_TYPE" == "1" ]]; then + TRAEFIK_EXTERNAL_NETWORK=$(read_traefik_network) + TRAEFIK_ENTRYPOINT=$(read_traefik_entrypoint) + TRAEFIK_CERTRESOLVER=$(read_traefik_certresolver) + fi + # Handle port binding for external proxy options (2-5) + if [[ "$REVERSE_PROXY_TYPE" -ge 2 ]]; then + BIND_LOCALHOST_ONLY=$(read_port_binding_preference) + fi + + # Handle Docker network prompts for external proxies (options 2-4) + case "$REVERSE_PROXY_TYPE" in + 2) EXTERNAL_PROXY_NETWORK=$(read_proxy_docker_network "Nginx") ;; + 3) EXTERNAL_PROXY_NETWORK=$(read_proxy_docker_network "Nginx Proxy Manager") ;; + 4) EXTERNAL_PROXY_NETWORK=$(read_proxy_docker_network "Caddy") ;; + *) ;; # No network prompt for other options + esac + return 0 +} + +check_existing_installation() { if [[ -f management.json ]]; then echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." echo "You can use the following commands:" echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" - echo " rm -f docker-compose.yml Caddyfile dashboard.env turnserver.conf management.json relay.env" + echo " rm -f docker-compose.yml Caddyfile dashboard.env turnserver.conf management.json relay.env nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." exit 1 fi + return 0 +} +generate_configuration_files() { echo Rendering initial files... - render_docker_compose > docker-compose.yml - render_caddyfile > Caddyfile + + # Render docker-compose and proxy config based on selection + case "$REVERSE_PROXY_TYPE" in + 0) + render_docker_compose > docker-compose.yml + render_caddyfile > Caddyfile + ;; + 1) + render_docker_compose_traefik > docker-compose.yml + ;; + 2) + render_docker_compose_exposed_ports > docker-compose.yml + render_nginx_conf > nginx-netbird.conf + ;; + 3) + render_docker_compose_exposed_ports > docker-compose.yml + render_npm_advanced_config > npm-advanced-config.txt + ;; + 4) + render_docker_compose_exposed_ports > docker-compose.yml + render_external_caddyfile > caddyfile-netbird.txt + ;; + 5) + render_docker_compose_exposed_ports > docker-compose.yml + ;; + *) + echo "Invalid reverse proxy type: $REVERSE_PROXY_TYPE" > /dev/stderr + exit 1 + ;; + esac + + # Common files for all configurations render_dashboard_env > dashboard.env render_management_json > management.json render_turn_server_conf > turnserver.conf render_relay_env > relay.env - - echo -e "\nStarting NetBird services\n" - $DOCKER_COMPOSE_COMMAND up -d - - # Wait for management (and embedded IdP) to be ready - sleep 3 - wait_management - - echo -e "\nDone!\n" - echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" - echo "Follow the onboarding steps to set up your NetBird instance." return 0 } +start_services_and_show_instructions() { + # For built-in Caddy and Traefik, start containers immediately + # For NPM, start containers first (NPM needs services running to create proxy) + # For other external proxies, show instructions first and wait for user confirmation + if [[ "$REVERSE_PROXY_TYPE" == "0" ]]; then + # Built-in Caddy - handles everything automatically + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management + + echo -e "$MSG_DONE" + print_post_setup_instructions + elif [[ "$REVERSE_PROXY_TYPE" == "1" ]]; then + # Traefik - start containers first, then show instructions + # Traefik discovers services via Docker labels, so containers must be running + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management_direct + + echo -e "$MSG_DONE" + print_post_setup_instructions + echo "" + echo "NetBird containers are running. Once Traefik is connected, access the dashboard at:" + echo " $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + elif [[ "$REVERSE_PROXY_TYPE" == "3" ]]; then + # NPM - start containers first, then show instructions + # NPM requires backend services to be running before creating proxy hosts + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management_direct + + echo -e "$MSG_DONE" + print_post_setup_instructions + echo "" + echo "NetBird containers are running. Configure NPM as shown above, then access:" + echo " $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + else + # External proxies (nginx, external Caddy, other) - need manual config first + print_post_setup_instructions + + echo "" + echo -n "Press Enter when your reverse proxy is configured (or Ctrl+C to exit)... " + read -r < /dev/tty + + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management_direct + + echo -e "$MSG_DONE" + echo "NetBird is now running. Access the dashboard at:" + echo " $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + fi + return 0 +} + +init_environment() { + initialize_default_values + configure_domain + configure_reverse_proxy + + check_jq + DOCKER_COMPOSE_COMMAND=$(check_docker_compose) + + check_existing_installation + generate_configuration_files + start_services_and_show_instructions + return 0 +} + +############################################ +# Configuration File Renderers +############################################ + render_caddyfile() { cat < ${upstream_host}:${RELAY_HOST_PORT}" + echo " (HTTP with WebSocket upgrade)" + echo "" + echo " /ws-proxy/signal* -> ${upstream_host}:${SIGNAL_HOST_PORT}" + echo " (HTTP with WebSocket upgrade)" + echo "" + echo " /signalexchange.SignalExchange/* -> ${upstream_host}:${SIGNAL_GRPC_PORT}" + echo " (gRPC/h2c - plaintext HTTP/2)" + echo "" + echo " /api/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (HTTP)" + echo "" + echo " /ws-proxy/management* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (HTTP with WebSocket upgrade)" + echo "" + echo " /management.ManagementService/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (gRPC/h2c - plaintext HTTP/2)" + echo "" + echo " /oauth2/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (HTTP - embedded IdP)" + echo "" + echo " /* -> ${upstream_host}:${DASHBOARD_HOST_PORT}" + echo " (HTTP - catch-all for dashboard)" + echo "" + echo "IMPORTANT: gRPC routes require HTTP/2 (h2c) upstream support." + echo "Long-running connections need extended timeouts (recommend 1 day)." + return 0 +} + +print_post_setup_instructions() { + case "$REVERSE_PROXY_TYPE" in + 0) + print_caddy_instructions + ;; + 1) + print_traefik_instructions + ;; + 2) + print_nginx_instructions + ;; + 3) + print_npm_instructions + ;; + 4) + print_external_caddy_instructions + ;; + 5) + print_manual_instructions + ;; + *) + echo "Unknown reverse proxy type: $REVERSE_PROXY_TYPE" > /dev/stderr + ;; + esac + return 0 +} + init_environment From 245481f33be5b37a45806d7a56e7190ca38f30e7 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Fri, 16 Jan 2026 18:05:41 +0100 Subject: [PATCH 056/374] [client] fix: client/Dockerfile to reduce vulnerabilities (#5119) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-ALPINE322-BUSYBOX-14091698 - https://snyk.io/vuln/SNYK-ALPINE322-BUSYBOX-14091698 - https://snyk.io/vuln/SNYK-ALPINE322-BUSYBOX-14091698 - https://snyk.io/vuln/SNYK-ALPINE322-BUSYBOX-14091701 - https://snyk.io/vuln/SNYK-ALPINE322-BUSYBOX-14091701 Co-authored-by: snyk-bot --- client/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/Dockerfile b/client/Dockerfile index 5cd459357..2ff0cca19 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -4,7 +4,7 @@ # sudo podman build -t localhost/netbird:latest -f client/Dockerfile --ignorefile .dockerignore-client . # sudo podman run --rm -it --cap-add={BPF,NET_ADMIN,NET_RAW} localhost/netbird:latest -FROM alpine:3.22.2 +FROM alpine:3.23.2 # iproute2: busybox doesn't display ip rules properly RUN apk add --no-cache \ bash \ From 58daa674efb14faacff5710274db5bf8d4aa5af5 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 19 Jan 2026 11:22:16 +0100 Subject: [PATCH 057/374] [Management/Client] Trigger debug bundle runs from API/Dashboard (#4592) (#4832) This PR adds the ability to trigger debug bundle generation remotely from the Management API/Dashboard. --- client/cmd/debug.go | 28 +- client/cmd/status.go | 2 +- client/cmd/testutil_test.go | 7 +- client/cmd/up.go | 2 +- client/embed/embed.go | 2 +- client/internal/connect.go | 20 +- client/internal/debug/debug.go | 45 +- client/internal/debug/upload.go | 101 + .../debug/upload_test.go} | 4 +- client/internal/engine.go | 131 +- client/internal/engine_test.go | 6 +- client/jobexec/executor.go | 76 + client/proto/daemon.pb.go | 15 +- client/proto/daemon.proto | 1 - client/server/debug.go | 99 +- client/server/server.go | 5 +- client/server/server_test.go | 7 +- client/status/status.go | 100 +- client/status/status_test.go | 2 +- client/ui/debug.go | 70 +- client/wasm/cmd/main.go | 7 +- go.mod | 2 + go.sum | 8 + management/internals/modules/peers/manager.go | 5 + .../internals/modules/peers/manager_mock.go | 15 + management/internals/server/boot.go | 2 +- management/internals/server/controllers.go | 8 + management/internals/server/modules.go | 2 +- .../internals/shared/grpc/conversion_test.go | 1 + management/internals/shared/grpc/server.go | 133 +- management/server/account.go | 4 + management/server/account/manager.go | 3 + management/server/account_test.go | 4 +- management/server/activity/codes.go | 4 + management/server/dns_test.go | 4 +- .../http/handlers/peers/peers_handler.go | 118 + .../testing/testing_tools/channel/channel.go | 10 +- management/server/identity_provider_test.go | 6 +- management/server/job/channel.go | 59 + management/server/job/manager.go | 182 ++ management/server/management_proto_test.go | 7 +- management/server/management_test.go | 5 + management/server/mock_server/account_mock.go | 23 + management/server/nameserver_test.go | 4 +- management/server/peer.go | 130 + management/server/peer_test.go | 13 +- .../server/permissions/modules/module.go | 54 +- management/server/route_test.go | 4 +- management/server/store/sql_store.go | 130 +- management/server/store/store.go | 7 + management/server/types/job.go | 228 ++ shared/management/client/client.go | 1 + shared/management/client/client_test.go | 10 +- shared/management/client/grpc.go | 200 +- shared/management/client/mock.go | 8 + shared/management/http/api/generate.sh | 4 +- shared/management/http/api/openapi.yml | 224 ++ shared/management/http/api/types.gen.go | 213 +- shared/management/proto/management.pb.go | 2222 ++++++++++------- shared/management/proto/management.proto | 39 + shared/management/proto/management_grpc.pb.go | 70 + 61 files changed, 3657 insertions(+), 1239 deletions(-) create mode 100644 client/internal/debug/upload.go rename client/{server/debug_test.go => internal/debug/upload_test.go} (93%) create mode 100644 client/jobexec/executor.go create mode 100644 management/server/job/channel.go create mode 100644 management/server/job/manager.go create mode 100644 management/server/types/job.go diff --git a/client/cmd/debug.go b/client/cmd/debug.go index e56f66103..bbb0ef0d6 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -16,7 +16,6 @@ import ( "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/client/server" - nbstatus "github.com/netbirdio/netbird/client/status" mgmProto "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/upload-server/types" ) @@ -98,7 +97,6 @@ func debugBundle(cmd *cobra.Command, _ []string) error { client := proto.NewDaemonServiceClient(conn) request := &proto.DebugBundleRequest{ Anonymize: anonymizeFlag, - Status: getStatusOutput(cmd, anonymizeFlag), SystemInfo: systemInfoFlag, LogFileCount: logFileCount, } @@ -221,9 +219,6 @@ func runForDuration(cmd *cobra.Command, args []string) error { time.Sleep(3 * time.Second) - headerPostUp := fmt.Sprintf("----- NetBird post-up - Timestamp: %s", time.Now().Format(time.RFC3339)) - statusOutput := fmt.Sprintf("%s\n%s", headerPostUp, getStatusOutput(cmd, anonymizeFlag)) - if waitErr := waitForDurationOrCancel(cmd.Context(), duration, cmd); waitErr != nil { return waitErr } @@ -231,11 +226,8 @@ func runForDuration(cmd *cobra.Command, args []string) error { cmd.Println("Creating debug bundle...") - headerPreDown := fmt.Sprintf("----- NetBird pre-down - Timestamp: %s - Duration: %s", time.Now().Format(time.RFC3339), duration) - statusOutput = fmt.Sprintf("%s\n%s\n%s", statusOutput, headerPreDown, getStatusOutput(cmd, anonymizeFlag)) request := &proto.DebugBundleRequest{ Anonymize: anonymizeFlag, - Status: statusOutput, SystemInfo: systemInfoFlag, LogFileCount: logFileCount, } @@ -302,24 +294,6 @@ func setSyncResponsePersistence(cmd *cobra.Command, args []string) error { return nil } -func getStatusOutput(cmd *cobra.Command, anon bool) string { - var statusOutputString string - statusResp, err := getStatus(cmd.Context(), true) - if err != nil { - cmd.PrintErrf("Failed to get status: %v\n", err) - } else { - pm := profilemanager.NewProfileManager() - var profName string - if activeProf, err := pm.GetActiveProfile(); err == nil { - profName = activeProf.Name - } - - overview := nbstatus.ConvertToStatusOutputOverview(statusResp, anon, "", nil, nil, nil, "", profName) - statusOutputString = overview.FullDetailSummary() - } - return statusOutputString -} - func waitForDurationOrCancel(ctx context.Context, duration time.Duration, cmd *cobra.Command) error { ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() @@ -378,7 +352,7 @@ func generateDebugBundle(config *profilemanager.Config, recorder *peer.Status, c InternalConfig: config, StatusRecorder: recorder, SyncResponse: syncResponse, - LogFile: logFilePath, + LogPath: logFilePath, }, debug.BundleConfig{ IncludeSystemInfo: true, diff --git a/client/cmd/status.go b/client/cmd/status.go index 05175663c..f09c35c2c 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -99,7 +99,7 @@ func statusFunc(cmd *cobra.Command, args []string) error { profName = activeProf.Name } - var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp, anonymizeFlag, statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilterMap, connectionTypeFilter, profName) + var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp.GetFullStatus(), anonymizeFlag, resp.GetDaemonVersion(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilterMap, connectionTypeFilter, profName) var statusOutputString string switch { case detailFlag: diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index 2650d6225..4bda33e65 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers" "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/job" clientProto "github.com/netbirdio/netbird/client/proto" client "github.com/netbirdio/netbird/client/server" @@ -97,6 +98,8 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp peersmanager := peers.NewManager(store, permissionsManagerMock) settingsManagerMock := settings.NewMockManager(ctrl) + jobManager := job.NewJobManager(nil, store, peersmanager) + iv, _ := integrations.NewIntegratedValidator(context.Background(), peersmanager, settingsManagerMock, eventStore) metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) @@ -115,7 +118,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp requestBuffer := mgmt.NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersmanager), config) - accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, iv, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, iv, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) if err != nil { t.Fatal(err) } @@ -124,7 +127,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil) if err != nil { t.Fatal(err) } diff --git a/client/cmd/up.go b/client/cmd/up.go index 057d35268..9559287d5 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -200,7 +200,7 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command, activeProf *pr connectClient := internal.NewConnectClient(ctx, config, r, false) SetupDebugHandler(ctx, config, r, connectClient, "") - return connectClient.Run(nil) + return connectClient.Run(nil, util.FindFirstLogPath(logFiles)) } func runInDaemonMode(ctx context.Context, cmd *cobra.Command, pm *profilemanager.ProfileManager, activeProf *profilemanager.Profile, profileSwitched bool) error { diff --git a/client/embed/embed.go b/client/embed/embed.go index 43089fc9d..8bbbef0f2 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -190,7 +190,7 @@ func (c *Client) Start(startCtx context.Context) error { run := make(chan struct{}) clientErr := make(chan error, 1) go func() { - if err := client.Run(run); err != nil { + if err := client.Run(run, ""); err != nil { clientErr <- err } }() diff --git a/client/internal/connect.go b/client/internal/connect.go index 65637c073..7fc3c9a96 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -59,7 +59,6 @@ func NewConnectClient( config *profilemanager.Config, statusRecorder *peer.Status, doInitalAutoUpdate bool, - ) *ConnectClient { return &ConnectClient{ ctx: ctx, @@ -71,8 +70,8 @@ func NewConnectClient( } // Run with main logic. -func (c *ConnectClient) Run(runningChan chan struct{}) error { - return c.run(MobileDependency{}, runningChan) +func (c *ConnectClient) Run(runningChan chan struct{}, logPath string) error { + return c.run(MobileDependency{}, runningChan, logPath) } // RunOnAndroid with main logic on mobile system @@ -93,7 +92,7 @@ func (c *ConnectClient) RunOnAndroid( DnsReadyListener: dnsReadyListener, StateFilePath: stateFilePath, } - return c.run(mobileDependency, nil) + return c.run(mobileDependency, nil, "") } func (c *ConnectClient) RunOniOS( @@ -111,10 +110,10 @@ func (c *ConnectClient) RunOniOS( DnsManager: dnsManager, StateFilePath: stateFilePath, } - return c.run(mobileDependency, nil) + return c.run(mobileDependency, nil, "") } -func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan struct{}) error { +func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan struct{}, logPath string) error { defer func() { if r := recover(); r != nil { rec := c.statusRecorder @@ -284,7 +283,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan relayURLs, token := parseRelayInfo(loginResp) peerConfig := loginResp.GetPeerConfig() - engineConfig, err := createEngineConfig(myPrivateKey, c.config, peerConfig) + engineConfig, err := createEngineConfig(myPrivateKey, c.config, peerConfig, logPath) if err != nil { log.Error(err) return wrapErr(err) @@ -472,7 +471,7 @@ func (c *ConnectClient) SetSyncResponsePersistence(enabled bool) { } // createEngineConfig converts configuration received from Management Service to EngineConfig -func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConfig *mgmProto.PeerConfig) (*EngineConfig, error) { +func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConfig *mgmProto.PeerConfig, logPath string) (*EngineConfig, error) { nm := false if config.NetworkMonitor != nil { nm = *config.NetworkMonitor @@ -507,7 +506,10 @@ func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConf LazyConnectionEnabled: config.LazyConnectionEnabled, - MTU: selectMTU(config.MTU, peerConfig.Mtu), + MTU: selectMTU(config.MTU, peerConfig.Mtu), + LogPath: logPath, + + ProfileConfig: config, } if config.PreSharedKey != "" { diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 01a0377a5..d3b5bc9d4 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -28,8 +28,10 @@ import ( "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/updatemanager/installer" + nbstatus "github.com/netbirdio/netbird/client/status" mgmProto "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/version" ) const readmeContent = `Netbird debug bundle @@ -223,10 +225,9 @@ type BundleGenerator struct { internalConfig *profilemanager.Config statusRecorder *peer.Status syncResponse *mgmProto.SyncResponse - logFile string + logPath string anonymize bool - clientStatus string includeSystemInfo bool logFileCount uint32 @@ -235,7 +236,6 @@ type BundleGenerator struct { type BundleConfig struct { Anonymize bool - ClientStatus string IncludeSystemInfo bool LogFileCount uint32 } @@ -244,7 +244,7 @@ type GeneratorDependencies struct { InternalConfig *profilemanager.Config StatusRecorder *peer.Status SyncResponse *mgmProto.SyncResponse - LogFile string + LogPath string } func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGenerator { @@ -260,10 +260,9 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen internalConfig: deps.InternalConfig, statusRecorder: deps.StatusRecorder, syncResponse: deps.SyncResponse, - logFile: deps.LogFile, + logPath: deps.LogPath, anonymize: cfg.Anonymize, - clientStatus: cfg.ClientStatus, includeSystemInfo: cfg.IncludeSystemInfo, logFileCount: logFileCount, } @@ -309,13 +308,6 @@ func (g *BundleGenerator) createArchive() error { return fmt.Errorf("add status: %w", err) } - if g.statusRecorder != nil { - status := g.statusRecorder.GetFullStatus() - seedFromStatus(g.anonymizer, &status) - } else { - log.Debugf("no status recorder available for seeding") - } - if err := g.addConfig(); err != nil { log.Errorf("failed to add config to debug bundle: %v", err) } @@ -352,7 +344,7 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add wg show output: %v", err) } - if g.logFile != "" && !slices.Contains(util.SpecialLogs, g.logFile) { + if g.logPath != "" && !slices.Contains(util.SpecialLogs, g.logPath) { if err := g.addLogfile(); err != nil { log.Errorf("failed to add log file to debug bundle: %v", err) if err := g.trySystemdLogFallback(); err != nil { @@ -401,11 +393,26 @@ func (g *BundleGenerator) addReadme() error { } func (g *BundleGenerator) addStatus() error { - if status := g.clientStatus; status != "" { - statusReader := strings.NewReader(status) + if g.statusRecorder != nil { + pm := profilemanager.NewProfileManager() + var profName string + if activeProf, err := pm.GetActiveProfile(); err == nil { + profName = activeProf.Name + } + + fullStatus := g.statusRecorder.GetFullStatus() + protoFullStatus := nbstatus.ToProtoFullStatus(fullStatus) + protoFullStatus.Events = g.statusRecorder.GetEventHistory() + overview := nbstatus.ConvertToStatusOutputOverview(protoFullStatus, g.anonymize, version.NetbirdVersion(), "", nil, nil, nil, "", profName) + statusOutput := overview.FullDetailSummary() + + statusReader := strings.NewReader(statusOutput) if err := g.addFileToZip(statusReader, "status.txt"); err != nil { return fmt.Errorf("add status file to zip: %w", err) } + seedFromStatus(g.anonymizer, &fullStatus) + } else { + log.Debugf("no status recorder available for seeding") } return nil } @@ -710,14 +717,14 @@ func (g *BundleGenerator) addCorruptedStateFiles() error { } func (g *BundleGenerator) addLogfile() error { - if g.logFile == "" { + if g.logPath == "" { log.Debugf("skipping empty log file in debug bundle") return nil } - logDir := filepath.Dir(g.logFile) + logDir := filepath.Dir(g.logPath) - if err := g.addSingleLogfile(g.logFile, clientLogFile); err != nil { + if err := g.addSingleLogfile(g.logPath, clientLogFile); err != nil { return fmt.Errorf("add client log file to zip: %w", err) } diff --git a/client/internal/debug/upload.go b/client/internal/debug/upload.go new file mode 100644 index 000000000..cdf52409d --- /dev/null +++ b/client/internal/debug/upload.go @@ -0,0 +1,101 @@ +package debug + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + + "github.com/netbirdio/netbird/upload-server/types" +) + +const maxBundleUploadSize = 50 * 1024 * 1024 + +func UploadDebugBundle(ctx context.Context, url, managementURL, filePath string) (key string, err error) { + response, err := getUploadURL(ctx, url, managementURL) + if err != nil { + return "", err + } + + err = upload(ctx, filePath, response) + if err != nil { + return "", err + } + return response.Key, nil +} + +func upload(ctx context.Context, filePath string, response *types.GetURLResponse) error { + fileData, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("open file: %w", err) + } + + defer fileData.Close() + + stat, err := fileData.Stat() + if err != nil { + return fmt.Errorf("stat file: %w", err) + } + + if stat.Size() > maxBundleUploadSize { + return fmt.Errorf("file size exceeds maximum limit of %d bytes", maxBundleUploadSize) + } + + req, err := http.NewRequestWithContext(ctx, "PUT", response.URL, fileData) + if err != nil { + return fmt.Errorf("create PUT request: %w", err) + } + + req.ContentLength = stat.Size() + req.Header.Set("Content-Type", "application/octet-stream") + + putResp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("upload failed: %v", err) + } + defer putResp.Body.Close() + + if putResp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(putResp.Body) + return fmt.Errorf("upload status %d: %s", putResp.StatusCode, string(body)) + } + return nil +} + +func getUploadURL(ctx context.Context, url string, managementURL string) (*types.GetURLResponse, error) { + id := getURLHash(managementURL) + getReq, err := http.NewRequestWithContext(ctx, "GET", url+"?id="+id, nil) + if err != nil { + return nil, fmt.Errorf("create GET request: %w", err) + } + + getReq.Header.Set(types.ClientHeader, types.ClientHeaderValue) + + resp, err := http.DefaultClient.Do(getReq) + if err != nil { + return nil, fmt.Errorf("get presigned URL: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("get presigned URL status %d: %s", resp.StatusCode, string(body)) + } + + urlBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read response body: %w", err) + } + var response types.GetURLResponse + if err := json.Unmarshal(urlBytes, &response); err != nil { + return nil, fmt.Errorf("unmarshal response: %w", err) + } + return &response, nil +} + +func getURLHash(url string) string { + return fmt.Sprintf("%x", sha256.Sum256([]byte(url))) +} diff --git a/client/server/debug_test.go b/client/internal/debug/upload_test.go similarity index 93% rename from client/server/debug_test.go rename to client/internal/debug/upload_test.go index 53d9ac8ed..e833c196d 100644 --- a/client/server/debug_test.go +++ b/client/internal/debug/upload_test.go @@ -1,4 +1,4 @@ -package server +package debug import ( "context" @@ -38,7 +38,7 @@ func TestUpload(t *testing.T) { fileContent := []byte("test file content") err := os.WriteFile(file, fileContent, 0640) require.NoError(t, err) - key, err := uploadDebugBundle(context.Background(), testURL+types.GetURLPath, testURL, file) + key, err := UploadDebugBundle(context.Background(), testURL+types.GetURLPath, testURL, file) require.NoError(t, err) id := getURLHash(testURL) require.Contains(t, key, id+"/") diff --git a/client/internal/engine.go b/client/internal/engine.go index 0182b2530..c5e2b7c6c 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -31,6 +31,7 @@ import ( "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/acl" + "github.com/netbirdio/netbird/client/internal/debug" "github.com/netbirdio/netbird/client/internal/dns" dnsconfig "github.com/netbirdio/netbird/client/internal/dns/config" "github.com/netbirdio/netbird/client/internal/dnsfwd" @@ -42,12 +43,14 @@ import ( "github.com/netbirdio/netbird/client/internal/peer/guard" icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" "github.com/netbirdio/netbird/client/internal/peerstore" + "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/relay" "github.com/netbirdio/netbird/client/internal/rosenpass" "github.com/netbirdio/netbird/client/internal/routemanager" "github.com/netbirdio/netbird/client/internal/routemanager/systemops" "github.com/netbirdio/netbird/client/internal/statemanager" "github.com/netbirdio/netbird/client/internal/updatemanager" + "github.com/netbirdio/netbird/client/jobexec" cProto "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/shared/management/domain" semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group" @@ -132,6 +135,11 @@ type EngineConfig struct { LazyConnectionEnabled bool MTU uint16 + + // for debug bundle generation + ProfileConfig *profilemanager.Config + + LogPath string } // Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers. @@ -195,7 +203,8 @@ type Engine struct { stateManager *statemanager.Manager srWatcher *guard.SRWatcher - // Sync response persistence + // Sync response persistence (protected by syncRespMux) + syncRespMux sync.RWMutex persistSyncResponse bool latestSyncResponse *mgmProto.SyncResponse connSemaphore *semaphoregroup.SemaphoreGroup @@ -211,6 +220,9 @@ type Engine struct { shutdownWg sync.WaitGroup probeStunTurn *relay.StunTurnProbe + + jobExecutor *jobexec.Executor + jobExecutorWG sync.WaitGroup } // Peer is an instance of the Connection Peer @@ -224,7 +236,18 @@ type localIpUpdater interface { } // NewEngine creates a new Connection Engine with probes attached -func NewEngine(clientCtx context.Context, clientCancel context.CancelFunc, signalClient signal.Client, mgmClient mgm.Client, relayManager *relayClient.Manager, config *EngineConfig, mobileDep MobileDependency, statusRecorder *peer.Status, checks []*mgmProto.Checks, stateManager *statemanager.Manager) *Engine { +func NewEngine( + clientCtx context.Context, + clientCancel context.CancelFunc, + signalClient signal.Client, + mgmClient mgm.Client, + relayManager *relayClient.Manager, + config *EngineConfig, + mobileDep MobileDependency, + statusRecorder *peer.Status, + checks []*mgmProto.Checks, + stateManager *statemanager.Manager, +) *Engine { engine := &Engine{ clientCtx: clientCtx, clientCancel: clientCancel, @@ -244,6 +267,7 @@ func NewEngine(clientCtx context.Context, clientCancel context.CancelFunc, signa checks: checks, connSemaphore: semaphoregroup.NewSemaphoreGroup(connInitLimit), probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), + jobExecutor: jobexec.NewExecutor(), } log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String()) @@ -312,6 +336,8 @@ func (e *Engine) Stop() error { e.cancel() } + e.jobExecutorWG.Wait() // block until job goroutines finish + e.close() // stop flow manager after wg interface is gone @@ -500,6 +526,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.receiveSignalEvents() e.receiveManagementEvents() + e.receiveJobEvents() // starting network monitor at the very last to avoid disruptions e.startNetworkMonitor() @@ -828,9 +855,18 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error { return nil } + // Persist sync response under the dedicated lock (syncRespMux), not under syncMsgMux. + // Read the storage-enabled flag under the syncRespMux too. + e.syncRespMux.RLock() + enabled := e.persistSyncResponse + e.syncRespMux.RUnlock() + // Store sync response if persistence is enabled - if e.persistSyncResponse { + if enabled { + e.syncRespMux.Lock() e.latestSyncResponse = update + e.syncRespMux.Unlock() + log.Debugf("sync response persisted with serial %d", nm.GetSerial()) } @@ -960,6 +996,77 @@ func (e *Engine) updateConfig(conf *mgmProto.PeerConfig) error { return nil } +func (e *Engine) receiveJobEvents() { + e.jobExecutorWG.Add(1) + go func() { + defer e.jobExecutorWG.Done() + err := e.mgmClient.Job(e.ctx, func(msg *mgmProto.JobRequest) *mgmProto.JobResponse { + resp := mgmProto.JobResponse{ + ID: msg.ID, + Status: mgmProto.JobStatus_failed, + } + switch params := msg.WorkloadParameters.(type) { + case *mgmProto.JobRequest_Bundle: + bundleResult, err := e.handleBundle(params.Bundle) + if err != nil { + log.Errorf("handling bundle: %v", err) + resp.Reason = []byte(err.Error()) + return &resp + } + resp.Status = mgmProto.JobStatus_succeeded + resp.WorkloadResults = bundleResult + return &resp + default: + resp.Reason = []byte(jobexec.ErrJobNotImplemented.Error()) + return &resp + } + }) + if err != nil { + // happens if management is unavailable for a long time. + // We want to cancel the operation of the whole client + _ = CtxGetState(e.ctx).Wrap(ErrResetConnection) + e.clientCancel() + return + } + log.Info("stopped receiving jobs from Management Service") + }() + log.Info("connecting to Management Service jobs stream") +} + +func (e *Engine) handleBundle(params *mgmProto.BundleParameters) (*mgmProto.JobResponse_Bundle, error) { + log.Infof("handle remote debug bundle request: %s", params.String()) + syncResponse, err := e.GetLatestSyncResponse() + if err != nil { + log.Warnf("get latest sync response: %v", err) + } + + bundleDeps := debug.GeneratorDependencies{ + InternalConfig: e.config.ProfileConfig, + StatusRecorder: e.statusRecorder, + SyncResponse: syncResponse, + LogPath: e.config.LogPath, + } + + bundleJobParams := debug.BundleConfig{ + Anonymize: params.Anonymize, + IncludeSystemInfo: true, + LogFileCount: uint32(params.LogFileCount), + } + + waitFor := time.Duration(params.BundleForTime) * time.Minute + + uploadKey, err := e.jobExecutor.BundleJob(e.ctx, bundleDeps, bundleJobParams, waitFor, e.config.ProfileConfig.ManagementURL.String()) + if err != nil { + return nil, err + } + + response := &mgmProto.JobResponse_Bundle{ + Bundle: &mgmProto.BundleResult{ + UploadKey: uploadKey, + }, + } + return response, nil +} // receiveManagementEvents connects to the Management Service event stream to receive updates from the management service // E.g. when a new peer has been registered and we are allowed to connect to it. @@ -1848,8 +1955,8 @@ func (e *Engine) stopDNSServer() { // SetSyncResponsePersistence enables or disables sync response persistence func (e *Engine) SetSyncResponsePersistence(enabled bool) { - e.syncMsgMux.Lock() - defer e.syncMsgMux.Unlock() + e.syncRespMux.Lock() + defer e.syncRespMux.Unlock() if enabled == e.persistSyncResponse { return @@ -1864,20 +1971,22 @@ func (e *Engine) SetSyncResponsePersistence(enabled bool) { // GetLatestSyncResponse returns the stored sync response if persistence is enabled func (e *Engine) GetLatestSyncResponse() (*mgmProto.SyncResponse, error) { - e.syncMsgMux.Lock() - defer e.syncMsgMux.Unlock() + e.syncRespMux.RLock() + enabled := e.persistSyncResponse + latest := e.latestSyncResponse + e.syncRespMux.RUnlock() - if !e.persistSyncResponse { + if !enabled { return nil, errors.New("sync response persistence is disabled") } - if e.latestSyncResponse == nil { + if latest == nil { //nolint:nilnil return nil, nil } - log.Debugf("Retrieving latest sync response with size %d bytes", proto.Size(e.latestSyncResponse)) - sr, ok := proto.Clone(e.latestSyncResponse).(*mgmProto.SyncResponse) + log.Debugf("Retrieving latest sync response with size %d bytes", proto.Size(latest)) + sr, ok := proto.Clone(latest).(*mgmProto.SyncResponse) if !ok { return nil, fmt.Errorf("failed to clone sync response") } diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index a15ee0581..56829393c 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/netbirdio/netbird/client/internal/stdnet" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/management-integrations/integrations" @@ -1599,6 +1600,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri permissionsManager := permissions.NewManager(store) peersManager := peers.NewManager(store, permissionsManager) + jobManager := job.NewJobManager(nil, store, peersManager) ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, nil, eventStore) @@ -1622,7 +1624,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := server.NewAccountRequestBuffer(context.Background(), store) networkMapController := controller.NewController(context.Background(), store, metrics, updateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManager), config) - accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, "", err } @@ -1631,7 +1633,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, "", err } diff --git a/client/jobexec/executor.go b/client/jobexec/executor.go new file mode 100644 index 000000000..e29cc8840 --- /dev/null +++ b/client/jobexec/executor.go @@ -0,0 +1,76 @@ +package jobexec + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/debug" + "github.com/netbirdio/netbird/upload-server/types" +) + +const ( + MaxBundleWaitTime = 60 * time.Minute // maximum wait time for bundle generation (1 hour) +) + +var ( + ErrJobNotImplemented = errors.New("job not implemented") +) + +type Executor struct { +} + +func NewExecutor() *Executor { + return &Executor{} +} + +func (e *Executor) BundleJob(ctx context.Context, debugBundleDependencies debug.GeneratorDependencies, params debug.BundleConfig, waitForDuration time.Duration, mgmURL string) (string, error) { + if waitForDuration > MaxBundleWaitTime { + log.Warnf("bundle wait time %v exceeds maximum %v, capping to maximum", waitForDuration, MaxBundleWaitTime) + waitForDuration = MaxBundleWaitTime + } + + if waitForDuration > 0 { + if err := waitFor(ctx, waitForDuration); err != nil { + return "", err + } + } + + log.Infof("execute debug bundle generation") + + bundleGenerator := debug.NewBundleGenerator(debugBundleDependencies, params) + + path, err := bundleGenerator.Generate() + if err != nil { + return "", fmt.Errorf("generate debug bundle: %w", err) + } + defer func() { + if err := os.Remove(path); err != nil { + log.Errorf("failed to remove debug bundle file: %v", err) + } + }() + + key, err := debug.UploadDebugBundle(ctx, types.DefaultBundleURL, mgmURL, path) + if err != nil { + log.Errorf("failed to upload debug bundle: %v", err) + return "", fmt.Errorf("upload debug bundle: %w", err) + } + + log.Infof("debug bundle has been generated successfully") + return key, nil +} + +func waitFor(ctx context.Context, duration time.Duration) error { + log.Infof("wait for %v minutes before executing debug bundle", duration.Minutes()) + select { + case <-time.After(duration): + return nil + case <-ctx.Done(): + log.Infof("wait cancelled: %v", ctx.Err()) + return ctx.Err() + } +} diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 5d56befc7..9cbe34e1d 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.6 -// protoc v3.21.12 +// protoc v6.33.1 // source: daemon.proto package proto @@ -2757,7 +2757,6 @@ func (x *ForwardingRulesResponse) GetRules() []*ForwardingRule { type DebugBundleRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Anonymize bool `protobuf:"varint,1,opt,name=anonymize,proto3" json:"anonymize,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` SystemInfo bool `protobuf:"varint,3,opt,name=systemInfo,proto3" json:"systemInfo,omitempty"` UploadURL string `protobuf:"bytes,4,opt,name=uploadURL,proto3" json:"uploadURL,omitempty"` LogFileCount uint32 `protobuf:"varint,5,opt,name=logFileCount,proto3" json:"logFileCount,omitempty"` @@ -2802,13 +2801,6 @@ func (x *DebugBundleRequest) GetAnonymize() bool { return false } -func (x *DebugBundleRequest) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - func (x *DebugBundleRequest) GetSystemInfo() bool { if x != nil { return x.SystemInfo @@ -5773,10 +5765,9 @@ const file_daemon_proto_rawDesc = "" + "\x12translatedHostname\x18\x04 \x01(\tR\x12translatedHostname\x128\n" + "\x0etranslatedPort\x18\x05 \x01(\v2\x10.daemon.PortInfoR\x0etranslatedPort\"G\n" + "\x17ForwardingRulesResponse\x12,\n" + - "\x05rules\x18\x01 \x03(\v2\x16.daemon.ForwardingRuleR\x05rules\"\xac\x01\n" + + "\x05rules\x18\x01 \x03(\v2\x16.daemon.ForwardingRuleR\x05rules\"\x94\x01\n" + "\x12DebugBundleRequest\x12\x1c\n" + - "\tanonymize\x18\x01 \x01(\bR\tanonymize\x12\x16\n" + - "\x06status\x18\x02 \x01(\tR\x06status\x12\x1e\n" + + "\tanonymize\x18\x01 \x01(\bR\tanonymize\x12\x1e\n" + "\n" + "systemInfo\x18\x03 \x01(\bR\n" + "systemInfo\x12\x1c\n" + diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index b75ca821a..7a802d830 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -455,7 +455,6 @@ message ForwardingRulesResponse { // DebugBundler message DebugBundleRequest { bool anonymize = 1; - string status = 2; bool systemInfo = 3; string uploadURL = 4; uint32 logFileCount = 5; diff --git a/client/server/debug.go b/client/server/debug.go index dfad41604..104fd30f4 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -4,24 +4,16 @@ package server import ( "context" - "crypto/sha256" - "encoding/json" "errors" "fmt" - "io" - "net/http" - "os" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/internal/debug" "github.com/netbirdio/netbird/client/proto" mgmProto "github.com/netbirdio/netbird/shared/management/proto" - "github.com/netbirdio/netbird/upload-server/types" ) -const maxBundleUploadSize = 50 * 1024 * 1024 - // DebugBundle creates a debug bundle and returns the location. func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) (resp *proto.DebugBundleResponse, err error) { s.mutex.Lock() @@ -37,11 +29,10 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( InternalConfig: s.config, StatusRecorder: s.statusRecorder, SyncResponse: syncResponse, - LogFile: s.logFile, + LogPath: s.logFile, }, debug.BundleConfig{ Anonymize: req.GetAnonymize(), - ClientStatus: req.GetStatus(), IncludeSystemInfo: req.GetSystemInfo(), LogFileCount: req.GetLogFileCount(), }, @@ -55,7 +46,7 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( if req.GetUploadURL() == "" { return &proto.DebugBundleResponse{Path: path}, nil } - key, err := uploadDebugBundle(context.Background(), req.GetUploadURL(), s.config.ManagementURL.String(), path) + key, err := debug.UploadDebugBundle(context.Background(), req.GetUploadURL(), s.config.ManagementURL.String(), path) if err != nil { log.Errorf("failed to upload debug bundle to %s: %v", req.GetUploadURL(), err) return &proto.DebugBundleResponse{Path: path, UploadFailureReason: err.Error()}, nil @@ -66,92 +57,6 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( return &proto.DebugBundleResponse{Path: path, UploadedKey: key}, nil } -func uploadDebugBundle(ctx context.Context, url, managementURL, filePath string) (key string, err error) { - response, err := getUploadURL(ctx, url, managementURL) - if err != nil { - return "", err - } - - err = upload(ctx, filePath, response) - if err != nil { - return "", err - } - return response.Key, nil -} - -func upload(ctx context.Context, filePath string, response *types.GetURLResponse) error { - fileData, err := os.Open(filePath) - if err != nil { - return fmt.Errorf("open file: %w", err) - } - - defer fileData.Close() - - stat, err := fileData.Stat() - if err != nil { - return fmt.Errorf("stat file: %w", err) - } - - if stat.Size() > maxBundleUploadSize { - return fmt.Errorf("file size exceeds maximum limit of %d bytes", maxBundleUploadSize) - } - - req, err := http.NewRequestWithContext(ctx, "PUT", response.URL, fileData) - if err != nil { - return fmt.Errorf("create PUT request: %w", err) - } - - req.ContentLength = stat.Size() - req.Header.Set("Content-Type", "application/octet-stream") - - putResp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("upload failed: %v", err) - } - defer putResp.Body.Close() - - if putResp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(putResp.Body) - return fmt.Errorf("upload status %d: %s", putResp.StatusCode, string(body)) - } - return nil -} - -func getUploadURL(ctx context.Context, url string, managementURL string) (*types.GetURLResponse, error) { - id := getURLHash(managementURL) - getReq, err := http.NewRequestWithContext(ctx, "GET", url+"?id="+id, nil) - if err != nil { - return nil, fmt.Errorf("create GET request: %w", err) - } - - getReq.Header.Set(types.ClientHeader, types.ClientHeaderValue) - - resp, err := http.DefaultClient.Do(getReq) - if err != nil { - return nil, fmt.Errorf("get presigned URL: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("get presigned URL status %d: %s", resp.StatusCode, string(body)) - } - - urlBytes, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("read response body: %w", err) - } - var response types.GetURLResponse - if err := json.Unmarshal(urlBytes, &response); err != nil { - return nil, fmt.Errorf("unmarshal response: %w", err) - } - return &response, nil -} - -func getURLHash(url string) string { - return fmt.Sprintf("%x", sha256.Sum256([]byte(url))) -} - // GetLogLevel gets the current logging level for the server. func (s *Server) GetLogLevel(_ context.Context, _ *proto.GetLogLevelRequest) (*proto.GetLogLevelResponse, error) { s.mutex.Lock() diff --git a/client/server/server.go b/client/server/server.go index d593b3f34..22e80ab25 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -13,9 +13,8 @@ import ( "time" "github.com/cenkalti/backoff/v4" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" @@ -1521,7 +1520,7 @@ func (s *Server) connect(ctx context.Context, config *profilemanager.Config, sta log.Tracef("running client connection") s.connectClient = internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate) s.connectClient.SetSyncResponsePersistence(s.persistSyncResponse) - if err := s.connectClient.Run(runningChan); err != nil { + if err := s.connectClient.Run(runningChan, s.logFile); err != nil { return err } return nil diff --git a/client/server/server_test.go b/client/server/server_test.go index 1ed115769..82079c531 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers" "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/groups" @@ -306,6 +307,8 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve peersManager := peers.NewManager(store, permissionsManagerMock) settingsManagerMock := settings.NewMockManager(ctrl) + jobManager := job.NewJobManager(nil, store, peersManager) + ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, settingsManagerMock, eventStore) metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) @@ -317,7 +320,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve requestBuffer := server.NewAccountRequestBuffer(context.Background(), store) peersUpdateManager := update_channel.NewPeersUpdateManager(metrics) networkMapController := controller.NewController(context.Background(), store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManager), config) - accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) if err != nil { return nil, "", err } @@ -326,7 +329,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, "", err } diff --git a/client/status/status.go b/client/status/status.go index 305797eee..be28ff67d 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -11,8 +11,12 @@ import ( "strings" "time" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v3" + "golang.org/x/exp/maps" + "github.com/netbirdio/netbird/client/anonymize" "github.com/netbirdio/netbird/client/internal/peer" probeRelay "github.com/netbirdio/netbird/client/internal/relay" @@ -116,9 +120,7 @@ type OutputOverview struct { SSHServerState SSHServerStateOutput `json:"sshServer" yaml:"sshServer"` } -func ConvertToStatusOutputOverview(resp *proto.StatusResponse, anon bool, statusFilter string, prefixNamesFilter []string, prefixNamesFilterMap map[string]struct{}, ipsFilter map[string]struct{}, connectionTypeFilter string, profName string) OutputOverview { - pbFullStatus := resp.GetFullStatus() - +func ConvertToStatusOutputOverview(pbFullStatus *proto.FullStatus, anon bool, daemonVersion string, statusFilter string, prefixNamesFilter []string, prefixNamesFilterMap map[string]struct{}, ipsFilter map[string]struct{}, connectionTypeFilter string, profName string) OutputOverview { managementState := pbFullStatus.GetManagementState() managementOverview := ManagementStateOutput{ URL: managementState.GetURL(), @@ -134,13 +136,13 @@ func ConvertToStatusOutputOverview(resp *proto.StatusResponse, anon bool, status } relayOverview := mapRelays(pbFullStatus.GetRelays()) - peersOverview := mapPeers(resp.GetFullStatus().GetPeers(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilter, connectionTypeFilter) sshServerOverview := mapSSHServer(pbFullStatus.GetSshServerState()) + peersOverview := mapPeers(pbFullStatus.GetPeers(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilter, connectionTypeFilter) overview := OutputOverview{ Peers: peersOverview, CliVersion: version.NetbirdVersion(), - DaemonVersion: resp.GetDaemonVersion(), + DaemonVersion: daemonVersion, ManagementState: managementOverview, SignalState: signalOverview, Relays: relayOverview, @@ -553,6 +555,94 @@ func (o *OutputOverview) FullDetailSummary() string { ) } +func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { + pbFullStatus := proto.FullStatus{ + ManagementState: &proto.ManagementState{}, + SignalState: &proto.SignalState{}, + LocalPeerState: &proto.LocalPeerState{}, + Peers: []*proto.PeerState{}, + } + + pbFullStatus.ManagementState.URL = fullStatus.ManagementState.URL + pbFullStatus.ManagementState.Connected = fullStatus.ManagementState.Connected + if err := fullStatus.ManagementState.Error; err != nil { + pbFullStatus.ManagementState.Error = err.Error() + } + + pbFullStatus.SignalState.URL = fullStatus.SignalState.URL + pbFullStatus.SignalState.Connected = fullStatus.SignalState.Connected + if err := fullStatus.SignalState.Error; err != nil { + pbFullStatus.SignalState.Error = err.Error() + } + + pbFullStatus.LocalPeerState.IP = fullStatus.LocalPeerState.IP + pbFullStatus.LocalPeerState.PubKey = fullStatus.LocalPeerState.PubKey + pbFullStatus.LocalPeerState.KernelInterface = fullStatus.LocalPeerState.KernelInterface + pbFullStatus.LocalPeerState.Fqdn = fullStatus.LocalPeerState.FQDN + pbFullStatus.LocalPeerState.RosenpassPermissive = fullStatus.RosenpassState.Permissive + pbFullStatus.LocalPeerState.RosenpassEnabled = fullStatus.RosenpassState.Enabled + pbFullStatus.LocalPeerState.Networks = maps.Keys(fullStatus.LocalPeerState.Routes) + pbFullStatus.NumberOfForwardingRules = int32(fullStatus.NumOfForwardingRules) + pbFullStatus.LazyConnectionEnabled = fullStatus.LazyConnectionEnabled + + for _, peerState := range fullStatus.Peers { + pbPeerState := &proto.PeerState{ + IP: peerState.IP, + PubKey: peerState.PubKey, + ConnStatus: peerState.ConnStatus.String(), + ConnStatusUpdate: timestamppb.New(peerState.ConnStatusUpdate), + Relayed: peerState.Relayed, + LocalIceCandidateType: peerState.LocalIceCandidateType, + RemoteIceCandidateType: peerState.RemoteIceCandidateType, + LocalIceCandidateEndpoint: peerState.LocalIceCandidateEndpoint, + RemoteIceCandidateEndpoint: peerState.RemoteIceCandidateEndpoint, + RelayAddress: peerState.RelayServerAddress, + Fqdn: peerState.FQDN, + LastWireguardHandshake: timestamppb.New(peerState.LastWireguardHandshake), + BytesRx: peerState.BytesRx, + BytesTx: peerState.BytesTx, + RosenpassEnabled: peerState.RosenpassEnabled, + Networks: maps.Keys(peerState.GetRoutes()), + Latency: durationpb.New(peerState.Latency), + SshHostKey: peerState.SSHHostKey, + } + pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) + } + + for _, relayState := range fullStatus.Relays { + pbRelayState := &proto.RelayState{ + URI: relayState.URI, + Available: relayState.Err == nil, + } + if err := relayState.Err; err != nil { + pbRelayState.Error = err.Error() + } + pbFullStatus.Relays = append(pbFullStatus.Relays, pbRelayState) + } + + for _, dnsState := range fullStatus.NSGroupStates { + var err string + if dnsState.Error != nil { + err = dnsState.Error.Error() + } + + var servers []string + for _, server := range dnsState.Servers { + servers = append(servers, server.String()) + } + + pbDnsState := &proto.NSGroupState{ + Servers: servers, + Domains: dnsState.Domains, + Enabled: dnsState.Enabled, + Error: err, + } + pbFullStatus.DnsServers = append(pbFullStatus.DnsServers, pbDnsState) + } + + return &pbFullStatus +} + func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bool) string { var ( peersString = "" diff --git a/client/status/status_test.go b/client/status/status_test.go index f4585827b..ad158722b 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -238,7 +238,7 @@ var overview = OutputOverview{ } func TestConversionFromFullStatusToOutputOverview(t *testing.T) { - convertedResult := ConvertToStatusOutputOverview(resp, false, "", nil, nil, nil, "", "") + convertedResult := ConvertToStatusOutputOverview(resp.GetFullStatus(), false, resp.GetDaemonVersion(), "", nil, nil, nil, "", "") assert.Equal(t, overview, convertedResult) } diff --git a/client/ui/debug.go b/client/ui/debug.go index a057b2a85..e9bcfde41 100644 --- a/client/ui/debug.go +++ b/client/ui/debug.go @@ -18,9 +18,7 @@ import ( "github.com/skratchdot/open-golang/open" "github.com/netbirdio/netbird/client/internal" - "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/proto" - nbstatus "github.com/netbirdio/netbird/client/status" uptypes "github.com/netbirdio/netbird/upload-server/types" ) @@ -291,19 +289,18 @@ func (s *serviceClient) handleRunForDuration( return } - statusOutput, err := s.collectDebugData(conn, initialState, params, progressUI) - if err != nil { + defer s.restoreServiceState(conn, initialState) + + if err := s.collectDebugData(conn, initialState, params, progressUI); err != nil { handleError(progressUI, err.Error()) return } - if err := s.createDebugBundleFromCollection(conn, params, statusOutput, progressUI); err != nil { + if err := s.createDebugBundleFromCollection(conn, params, progressUI); err != nil { handleError(progressUI, err.Error()) return } - s.restoreServiceState(conn, initialState) - progressUI.statusLabel.SetText("Bundle created successfully") } @@ -417,68 +414,33 @@ func (s *serviceClient) collectDebugData( state *debugInitialState, params *debugCollectionParams, progress *progressUI, -) (string, error) { +) error { ctx, cancel := context.WithTimeout(s.ctx, params.duration) defer cancel() var wg sync.WaitGroup startProgressTracker(ctx, &wg, params.duration, progress) if err := s.configureServiceForDebug(conn, state, params.enablePersistence); err != nil { - return "", err + return err } - pm := profilemanager.NewProfileManager() - var profName string - if activeProf, err := pm.GetActiveProfile(); err == nil { - profName = activeProf.Name - } - - postUpStatus, err := conn.Status(s.ctx, &proto.StatusRequest{GetFullPeerStatus: true}) - if err != nil { - log.Warnf("Failed to get post-up status: %v", err) - } - - var postUpStatusOutput string - if postUpStatus != nil { - overview := nbstatus.ConvertToStatusOutputOverview(postUpStatus, params.anonymize, "", nil, nil, nil, "", profName) - postUpStatusOutput = overview.FullDetailSummary() - } - headerPostUp := fmt.Sprintf("----- NetBird post-up - Timestamp: %s", time.Now().Format(time.RFC3339)) - statusOutput := fmt.Sprintf("%s\n%s", headerPostUp, postUpStatusOutput) - wg.Wait() progress.progressBar.Hide() progress.statusLabel.SetText("Collecting debug data...") - preDownStatus, err := conn.Status(s.ctx, &proto.StatusRequest{GetFullPeerStatus: true}) - if err != nil { - log.Warnf("Failed to get pre-down status: %v", err) - } - - var preDownStatusOutput string - if preDownStatus != nil { - overview := nbstatus.ConvertToStatusOutputOverview(preDownStatus, params.anonymize, "", nil, nil, nil, "", profName) - preDownStatusOutput = overview.FullDetailSummary() - } - headerPreDown := fmt.Sprintf("----- NetBird pre-down - Timestamp: %s - Duration: %s", - time.Now().Format(time.RFC3339), params.duration) - statusOutput = fmt.Sprintf("%s\n%s\n%s", statusOutput, headerPreDown, preDownStatusOutput) - - return statusOutput, nil + return nil } // Create the debug bundle with collected data func (s *serviceClient) createDebugBundleFromCollection( conn proto.DaemonServiceClient, params *debugCollectionParams, - statusOutput string, progress *progressUI, ) error { progress.statusLabel.SetText("Creating debug bundle with collected logs...") request := &proto.DebugBundleRequest{ Anonymize: params.anonymize, - Status: statusOutput, SystemInfo: params.systemInfo, } @@ -581,26 +543,8 @@ func (s *serviceClient) createDebugBundle(anonymize bool, systemInfo bool, uploa return nil, fmt.Errorf("get client: %v", err) } - pm := profilemanager.NewProfileManager() - var profName string - if activeProf, err := pm.GetActiveProfile(); err == nil { - profName = activeProf.Name - } - - statusResp, err := conn.Status(s.ctx, &proto.StatusRequest{GetFullPeerStatus: true}) - if err != nil { - log.Warnf("failed to get status for debug bundle: %v", err) - } - - var statusOutput string - if statusResp != nil { - overview := nbstatus.ConvertToStatusOutputOverview(statusResp, anonymize, "", nil, nil, nil, "", profName) - statusOutput = overview.FullDetailSummary() - } - request := &proto.DebugBundleRequest{ Anonymize: anonymize, - Status: statusOutput, SystemInfo: systemInfo, } diff --git a/client/wasm/cmd/main.go b/client/wasm/cmd/main.go index 2647c2f0d..26022ffc7 100644 --- a/client/wasm/cmd/main.go +++ b/client/wasm/cmd/main.go @@ -12,7 +12,6 @@ import ( "google.golang.org/protobuf/encoding/protojson" netbird "github.com/netbirdio/netbird/client/embed" - "github.com/netbirdio/netbird/client/proto" sshdetection "github.com/netbirdio/netbird/client/ssh/detection" nbstatus "github.com/netbirdio/netbird/client/status" "github.com/netbirdio/netbird/client/wasm/internal/http" @@ -350,12 +349,8 @@ func getStatusOverview(client *netbird.Client) (nbstatus.OutputOverview, error) } pbFullStatus := fullStatus.ToProto() - statusResp := &proto.StatusResponse{ - DaemonVersion: version.NetbirdVersion(), - FullStatus: pbFullStatus, - } - return nbstatus.ConvertToStatusOutputOverview(statusResp, false, "", nil, nil, nil, "", ""), nil + return nbstatus.ConvertToStatusOutputOverview(pbFullStatus, false, version.NetbirdVersion(), "", nil, nil, nil, "", ""), nil } // createStatusMethod creates the status method that returns JSON diff --git a/go.mod b/go.mod index 773869cb5..cb16fff52 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,7 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/netbirdio/management-integrations/integrations v0.0.0-20251203183432-d5400f030847 github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 + github.com/oapi-codegen/runtime v1.1.2 github.com/okta/okta-sdk-golang/v2 v2.18.0 github.com/oschwald/maxminddb-golang v1.12.0 github.com/patrickmn/go-cache v2.1.0+incompatible @@ -141,6 +142,7 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/awnumar/memcall v0.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect diff --git a/go.sum b/go.sum index 4ea00b399..c59acbb23 100644 --- a/go.sum +++ b/go.sum @@ -35,12 +35,15 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible h1:hqcTK6ZISdip65SR792lwYJTa/axESA0889D3UlZbLo= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible/go.mod h1:6B1nuc1MUs6c62ODZDl7hVE5Pv7O2XGSkgg2olnq34I= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/awnumar/memcall v0.4.0 h1:B7hgZYdfH6Ot1Goaz8jGne/7i8xD4taZie/PNSFZ29g= github.com/awnumar/memcall v0.4.0/go.mod h1:8xOx1YbfyuCg3Fy6TO8DK0kZUua3V42/goA5Ru47E8w= github.com/awnumar/memguard v0.23.0 h1:sJ3a1/SWlcuKIQ7MV+R9p0Pvo9CWsMbGZvcZQtmc68A= @@ -87,6 +90,7 @@ github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE= github.com/beevik/etree v1.6.0/go.mod h1:bh4zJxiIr62SOf9pRzN7UUYaEDa9HEKafK25+sLc0Gc= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -320,6 +324,7 @@ github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7X github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 h1:YLvr1eE6cdCqjOe972w/cYF+FjW34v27+9Vo5106B4M= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25/go.mod h1:kLgvv7o6UM+0QSf0QjAse3wReFDsb9qbZJdfexWlrQw= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -416,6 +421,8 @@ github.com/nicksnyder/go-i18n/v2 v2.5.1/go.mod h1:DrhgsSDZxoAfvVrBVLXoxZn/pN5TXq github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= +github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/okta/okta-sdk-golang/v2 v2.18.0 h1:cfDasMb7CShbZvOrF6n+DnLevWwiHgedWMGJ8M8xKDc= github.com/okta/okta-sdk-golang/v2 v2.18.0/go.mod h1:dz30v3ctAiMb7jpsCngGfQUAEGm1/NsWT92uTbNDQIs= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -522,6 +529,7 @@ github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c h1:km8GpoQut05eY3GiYWEedbTT0qnSxrCjsVbb7yKY1KE= github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c/go.mod h1:cNQ3dwVJtS5Hmnjxy6AgTPd0Inb3pW05ftPSX7NZO7Q= github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef h1:Ch6Q+AZUxDBCVqdkI8FSpFyZDtCVBc2VmejdNrm5rRQ= diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index 4935c608e..1551689b4 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -31,6 +31,7 @@ type Manager interface { SetNetworkMapController(networkMapController network_map.Controller) SetIntegratedPeerValidator(integratedPeerValidator integrated_validator.IntegratedValidator) SetAccountManager(accountManager account.Manager) + GetPeerID(ctx context.Context, peerKey string) (string, error) } type managerImpl struct { @@ -167,3 +168,7 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs return nil } + +func (m *managerImpl) GetPeerID(ctx context.Context, peerKey string) (string, error) { + return m.store.GetPeerIDByKey(ctx, store.LockingStrengthNone, peerKey) +} diff --git a/management/internals/modules/peers/manager_mock.go b/management/internals/modules/peers/manager_mock.go index 2e3651e88..6feedca2e 100644 --- a/management/internals/modules/peers/manager_mock.go +++ b/management/internals/modules/peers/manager_mock.go @@ -97,6 +97,21 @@ func (mr *MockManagerMockRecorder) GetPeerAccountID(ctx, peerID interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerAccountID", reflect.TypeOf((*MockManager)(nil).GetPeerAccountID), ctx, peerID) } +// GetPeerID mocks base method. +func (m *MockManager) GetPeerID(ctx context.Context, peerKey string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerID", ctx, peerKey) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerID indicates an expected call of GetPeerID. +func (mr *MockManagerMockRecorder) GetPeerID(ctx, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerID", reflect.TypeOf((*MockManager)(nil).GetPeerID), ctx, peerKey) +} + // GetPeersByGroupIDs mocks base method. func (m *MockManager) GetPeersByGroupIDs(ctx context.Context, accountID string, groupsIDs []string) ([]*peer.Peer, error) { m.ctrl.T.Helper() diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 5d312ef94..55af17fdf 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -144,7 +144,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server { } gRPCAPIHandler := grpc.NewServer(gRPCOpts...) - srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider()) + srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider()) if err != nil { log.Fatalf("failed to create management server: %v", err) } diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index 9f35d436f..4ea86900a 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -6,6 +6,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/management-integrations/integrations" + "github.com/netbirdio/netbird/management/internals/controllers/network_map" nmapcontroller "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" @@ -16,6 +17,7 @@ import ( "github.com/netbirdio/netbird/management/server/auth" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" ) func (s *BaseServer) PeersUpdateManager() network_map.PeersUpdateManager { @@ -24,6 +26,12 @@ func (s *BaseServer) PeersUpdateManager() network_map.PeersUpdateManager { }) } +func (s *BaseServer) JobManager() *job.Manager { + return Create(s, func() *job.Manager { + return job.NewJobManager(s.Metrics(), s.Store(), s.PeersManager()) + }) +} + func (s *BaseServer) IntegratedValidator() integrated_validator.IntegratedValidator { return Create(s, func() integrated_validator.IntegratedValidator { integratedPeerValidator, err := integrations.NewIntegratedValidator( diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index 9649caead..b51e2ebb2 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -87,7 +87,7 @@ func (s *BaseServer) PeersManager() peers.Manager { func (s *BaseServer) AccountManager() account.Manager { return Create(s, func() account.Manager { - accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy) + accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.JobManager(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy) if err != nil { log.Fatalf("failed to create account manager: %v", err) } diff --git a/management/internals/shared/grpc/conversion_test.go b/management/internals/shared/grpc/conversion_test.go index 95ad05eec..1e75caf95 100644 --- a/management/internals/shared/grpc/conversion_test.go +++ b/management/internals/shared/grpc/conversion_test.go @@ -195,6 +195,7 @@ func TestBuildJWTConfig_Audiences(t *testing.T) { assert.NotNil(t, result) assert.Equal(t, tc.expectedAudiences, result.Audiences, "audiences should match expected") + //nolint:staticcheck // SA1019: Testing backwards compatibility - Audience field must still be populated assert.Equal(t, tc.expectedAudience, result.Audience, "audience should match expected") }) } diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 801c15158..1ff0243f4 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io" "net" "net/netip" "os" @@ -26,6 +27,7 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/store" @@ -57,6 +59,7 @@ type Server struct { accountManager account.Manager settingsManager settings.Manager proto.UnimplementedManagementServiceServer + jobManager *job.Manager config *nbconfig.Config secretsManager SecretsManager appMetrics telemetry.AppMetrics @@ -82,6 +85,7 @@ func NewServer( config *nbconfig.Config, accountManager account.Manager, settingsManager settings.Manager, + jobManager *job.Manager, secretsManager SecretsManager, appMetrics telemetry.AppMetrics, authManager auth.Manager, @@ -114,6 +118,7 @@ func NewServer( } return &Server{ + jobManager: jobManager, accountManager: accountManager, settingsManager: settingsManager, config: config, @@ -169,6 +174,40 @@ func getRealIP(ctx context.Context) net.IP { return nil } +func (s *Server) Job(srv proto.ManagementService_JobServer) error { + reqStart := time.Now() + ctx := srv.Context() + + peerKey, err := s.handleHandshake(ctx, srv) + if err != nil { + return err + } + + accountID, err := s.accountManager.GetAccountIDForPeerKey(ctx, peerKey.String()) + if err != nil { + // nolint:staticcheck + ctx = context.WithValue(ctx, nbContext.AccountIDKey, "UNKNOWN") + log.WithContext(ctx).Tracef("peer %s is not registered", peerKey.String()) + if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound { + return status.Errorf(codes.PermissionDenied, "peer is not registered") + } + return err + } + // nolint:staticcheck + ctx = context.WithValue(ctx, nbContext.AccountIDKey, accountID) + peer, err := s.accountManager.GetStore().GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, peerKey.String()) + if err != nil { + return status.Errorf(codes.Unauthenticated, "peer is not registered") + } + + s.startResponseReceiver(ctx, srv) + + updates := s.jobManager.CreateJobChannel(ctx, accountID, peer.ID) + log.WithContext(ctx).Debugf("Job: took %v", time.Since(reqStart)) + + return s.sendJobsLoop(ctx, accountID, peerKey, peer, updates, srv) +} + // Sync validates the existence of a connecting peer, sends an initial state (all available for the connecting peers) and // notifies the connected peer of any updates (e.g. new peers under the same account) func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_SyncServer) error { @@ -289,6 +328,70 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv) } +func (s *Server) handleHandshake(ctx context.Context, srv proto.ManagementService_JobServer) (wgtypes.Key, error) { + hello, err := srv.Recv() + if err != nil { + return wgtypes.Key{}, status.Errorf(codes.InvalidArgument, "missing hello: %v", err) + } + + jobReq := &proto.JobRequest{} + peerKey, err := s.parseRequest(ctx, hello, jobReq) + if err != nil { + return wgtypes.Key{}, err + } + + return peerKey, nil +} + +func (s *Server) startResponseReceiver(ctx context.Context, srv proto.ManagementService_JobServer) { + go func() { + for { + msg, err := srv.Recv() + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { + return + } + log.WithContext(ctx).Warnf("recv job response error: %v", err) + return + } + + jobResp := &proto.JobResponse{} + if _, err := s.parseRequest(ctx, msg, jobResp); err != nil { + log.WithContext(ctx).Warnf("invalid job response: %v", err) + continue + } + + if err := s.jobManager.HandleResponse(ctx, jobResp, msg.WgPubKey); err != nil { + log.WithContext(ctx).Errorf("handle job response failed: %v", err) + } + } + }() +} + +func (s *Server) sendJobsLoop(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, updates *job.Channel, srv proto.ManagementService_JobServer) error { + // todo figure out better error handling strategy + defer s.jobManager.CloseChannel(ctx, accountID, peer.ID) + + for { + event, err := updates.Event(ctx) + if err != nil { + if errors.Is(err, job.ErrJobChannelClosed) { + log.WithContext(ctx).Debugf("jobs channel for peer %s was closed", peerKey.String()) + return nil + } + + // happens when connection drops, e.g. client disconnects + log.WithContext(ctx).Debugf("stream of peer %s has been closed", peerKey.String()) + return ctx.Err() + } + + if err := s.sendJob(ctx, peerKey, event, srv); err != nil { + log.WithContext(ctx).Warnf("send job failed: %v", err) + return nil + } + } +} + // handleUpdates sends updates to the connected peer until the updates channel is closed. func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, updates chan *network_map.UpdateMessage, srv proto.ManagementService_SyncServer) error { log.WithContext(ctx).Tracef("starting to handle updates for peer %s", peerKey.String()) @@ -306,7 +409,6 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg return nil } log.WithContext(ctx).Debugf("received an update for peer %s", peerKey.String()) - if err := s.sendUpdate(ctx, accountID, peerKey, peer, update, srv); err != nil { log.WithContext(ctx).Debugf("error while sending an update to peer %s: %v", peerKey.String(), err) return err @@ -336,7 +438,7 @@ func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtyp s.cancelPeerRoutines(ctx, accountID, peer) return status.Errorf(codes.Internal, "failed processing update message") } - err = srv.SendMsg(&proto.EncryptedMessage{ + err = srv.Send(&proto.EncryptedMessage{ WgPubKey: key.PublicKey().String(), Body: encryptedResp, }) @@ -348,6 +450,31 @@ func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtyp return nil } +// sendJob encrypts the update message using the peer key and the server's wireguard key, +// then sends the encrypted message to the connected peer via the sync server. +func (s *Server) sendJob(ctx context.Context, peerKey wgtypes.Key, job *job.Event, srv proto.ManagementService_JobServer) error { + wgKey, err := s.secretsManager.GetWGKey() + if err != nil { + log.WithContext(ctx).Errorf("failed to get wg key for peer %s: %v", peerKey.String(), err) + return status.Errorf(codes.Internal, "failed processing job message") + } + + encryptedResp, err := encryption.EncryptMessage(peerKey, wgKey, job.Request) + if err != nil { + log.WithContext(ctx).Errorf("failed to encrypt job for peer %s: %v", peerKey.String(), err) + return status.Errorf(codes.Internal, "failed processing job message") + } + err = srv.Send(&proto.EncryptedMessage{ + WgPubKey: wgKey.PublicKey().String(), + Body: encryptedResp, + }) + if err != nil { + return status.Errorf(codes.Internal, "failed sending job message") + } + log.WithContext(ctx).Debugf("sent a job to peer: %s", peerKey.String()) + return nil +} + func (s *Server) cancelPeerRoutines(ctx context.Context, accountID string, peer *nbpeer.Peer) { unlock := s.acquirePeerLockByUID(ctx, peer.Key) defer unlock() @@ -690,8 +817,8 @@ func (s *Server) IsHealthy(ctx context.Context, req *proto.Empty) (*proto.Empty, // sendInitialSync sends initial proto.SyncResponse to the peer requesting synchronization func (s *Server) sendInitialSync(ctx context.Context, peerKey wgtypes.Key, peer *nbpeer.Peer, networkMap *types.NetworkMap, postureChecks []*posture.Checks, srv proto.ManagementService_SyncServer, dnsFwdPort int64) error { var err error - var turnToken *Token + if s.config.TURNConfig != nil && s.config.TURNConfig.TimeBasedCredentials { turnToken, err = s.secretsManager.GenerateTurnToken() if err != nil { diff --git a/management/server/account.go b/management/server/account.go index 61882411b..d453b87c3 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -15,6 +15,7 @@ import ( "sync" "time" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/shared/auth" cacheStore "github.com/eko/gocache/lib/v4/store" @@ -70,6 +71,7 @@ type DefaultAccountManager struct { // cacheLoading keeps the accountIDs that are currently reloading. The accountID has to be removed once cache has been reloaded cacheLoading map[string]chan struct{} networkMapController network_map.Controller + jobManager *job.Manager idpManager idp.Manager cacheManager *nbcache.AccountUserDataCache externalCacheManager nbcache.UserDataCache @@ -178,6 +180,7 @@ func BuildManager( config *nbconfig.Config, store store.Store, networkMapController network_map.Controller, + jobManager *job.Manager, idpManager idp.Manager, singleAccountModeDomain string, eventStore activity.Store, @@ -200,6 +203,7 @@ func BuildManager( config: config, geo: geo, networkMapController: networkMapController, + jobManager: jobManager, idpManager: idpManager, ctx: context.Background(), cacheMux: sync.Mutex{}, diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 7680a8464..f925af4ec 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -129,4 +129,7 @@ type Manager interface { CreateIdentityProvider(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) UpdateIdentityProvider(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) DeleteIdentityProvider(ctx context.Context, accountID, idpID, userID string) error + CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error + GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) + GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) } diff --git a/management/server/account_test.go b/management/server/account_test.go index 3279a373b..86cc69e8b 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -35,6 +35,7 @@ import ( "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -3023,13 +3024,14 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU AnyTimes() permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - manager, err := BuildManager(ctx, &config.Config{}, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + manager, err := BuildManager(ctx, &config.Config{}, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, nil, err } diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index 7593e1230..ae8e46db9 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -195,6 +195,8 @@ const ( DNSRecordUpdated Activity = 100 DNSRecordDeleted Activity = 101 + JobCreatedByUser Activity = 102 + AccountDeleted Activity = 99999 ) @@ -319,6 +321,8 @@ var activityMap = map[Activity]Code{ DNSRecordCreated: {"DNS zone record created", "dns.zone.record.create"}, DNSRecordUpdated: {"DNS zone record updated", "dns.zone.record.update"}, DNSRecordDeleted: {"DNS zone record deleted", "dns.zone.record.delete"}, + + JobCreatedByUser: {"Create Job for peer", "peer.job.create"}, } // StringCode returns a string code of the activity diff --git a/management/server/dns_test.go b/management/server/dns_test.go index d1da79380..bd0755d0d 100644 --- a/management/server/dns_test.go +++ b/management/server/dns_test.go @@ -16,6 +16,7 @@ import ( ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" @@ -221,13 +222,14 @@ func createDNSManager(t *testing.T) (*DefaultAccountManager, error) { // return empty extra settings for expected calls to UpdateAccountPeers settingsMockManager.EXPECT().GetExtraSettings(gomock.Any(), gomock.Any()).Return(&types.ExtraSettings{}, nil).AnyTimes() permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.test", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - return BuildManager(context.Background(), nil, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) } func createDNSStore(t *testing.T) (store.Store, error) { diff --git a/management/server/http/handlers/peers/peers_handler.go b/management/server/http/handlers/peers/peers_handler.go index b8fb3ea36..53d8ab055 100644 --- a/management/server/http/handlers/peers/peers_handler.go +++ b/management/server/http/handlers/peers/peers_handler.go @@ -36,6 +36,9 @@ func AddEndpoints(accountManager account.Manager, router *mux.Router, networkMap Methods("GET", "PUT", "DELETE", "OPTIONS") router.HandleFunc("/peers/{peerId}/accessible-peers", peersHandler.GetAccessiblePeers).Methods("GET", "OPTIONS") router.HandleFunc("/peers/{peerId}/temporary-access", peersHandler.CreateTemporaryAccess).Methods("POST", "OPTIONS") + router.HandleFunc("/peers/{peerId}/jobs", peersHandler.ListJobs).Methods("GET", "OPTIONS") + router.HandleFunc("/peers/{peerId}/jobs", peersHandler.CreateJob).Methods("POST", "OPTIONS") + router.HandleFunc("/peers/{peerId}/jobs/{jobId}", peersHandler.GetJob).Methods("GET", "OPTIONS") } // NewHandler creates a new peers Handler @@ -46,6 +49,99 @@ func NewHandler(accountManager account.Manager, networkMapController network_map } } +func (h *Handler) CreateJob(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userAuth, err := nbcontext.GetUserAuthFromContext(ctx) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + vars := mux.Vars(r) + peerID := vars["peerId"] + + req := &api.JobRequest{} + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + job, err := types.NewJob(userAuth.UserId, userAuth.AccountId, peerID, req) + if err != nil { + util.WriteError(ctx, err, w) + return + } + if err := h.accountManager.CreatePeerJob(ctx, userAuth.AccountId, peerID, userAuth.UserId, job); err != nil { + util.WriteError(ctx, err, w) + return + } + + resp, err := toSingleJobResponse(job) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + util.WriteJSONObject(ctx, w, resp) +} + +func (h *Handler) ListJobs(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userAuth, err := nbcontext.GetUserAuthFromContext(ctx) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + vars := mux.Vars(r) + peerID := vars["peerId"] + + jobs, err := h.accountManager.GetAllPeerJobs(ctx, userAuth.AccountId, userAuth.UserId, peerID) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + respBody := make([]*api.JobResponse, 0, len(jobs)) + for _, job := range jobs { + resp, err := toSingleJobResponse(job) + if err != nil { + util.WriteError(ctx, err, w) + return + } + respBody = append(respBody, resp) + } + + util.WriteJSONObject(ctx, w, respBody) +} + +func (h *Handler) GetJob(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userAuth, err := nbcontext.GetUserAuthFromContext(ctx) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + vars := mux.Vars(r) + peerID := vars["peerId"] + jobID := vars["jobId"] + + job, err := h.accountManager.GetPeerJobByID(ctx, userAuth.AccountId, userAuth.UserId, peerID, jobID) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + resp, err := toSingleJobResponse(job) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + util.WriteJSONObject(ctx, w, resp) +} + func (h *Handler) getPeer(ctx context.Context, accountID, peerID, userID string, w http.ResponseWriter) { peer, err := h.accountManager.GetPeer(ctx, accountID, peerID, userID) if err != nil { @@ -521,6 +617,28 @@ func toPeerListItemResponse(peer *nbpeer.Peer, groupsInfo []api.GroupMinimum, dn } } +func toSingleJobResponse(job *types.Job) (*api.JobResponse, error) { + workload, err := job.BuildWorkloadResponse() + if err != nil { + return nil, err + } + + var failed *string + if job.FailedReason != "" { + failed = &job.FailedReason + } + + return &api.JobResponse{ + Id: job.ID, + CreatedAt: job.CreatedAt, + CompletedAt: job.CompletedAt, + TriggeredBy: job.TriggeredBy, + Status: api.JobResponseStatus(job.Status), + FailedReason: failed, + Workload: *workload, + }, nil +} + func fqdn(peer *nbpeer.Peer, dnsDomain string) string { fqdn := peer.FQDN(dnsDomain) if fqdn == "" { diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 8c8f1a7b2..9339c3541 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/netbirdio/management-integrations/integrations" + zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" recordsManager "github.com/netbirdio/netbird/management/internals/modules/zones/records/manager" "github.com/netbirdio/netbird/management/internals/server/config" @@ -20,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/account" @@ -72,11 +74,14 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee userManager := users.NewManager(store) permissionsManager := permissions.NewManager(store) settingsManager := settings.NewManager(store, userManager, integrations.NewManager(&activity.InMemoryEventStore{}), permissionsManager) + peersManager := peers.NewManager(store, permissionsManager) + + jobManager := job.NewJobManager(nil, store, peersManager) ctx := context.Background() requestBuffer := server.NewAccountRequestBuffer(ctx, store) - networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - am, err := server.BuildManager(ctx, nil, store, networkMapController, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) + networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManager), &config.Config{}) + am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) if err != nil { t.Fatalf("Failed to create manager: %v", err) } @@ -94,7 +99,6 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee resourcesManagerMock := resources.NewManagerMock() routersManagerMock := routers.NewManagerMock() groupsManagerMock := groups.NewManagerMock() - peersManager := peers.NewManager(store, permissionsManager) customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) diff --git a/management/server/identity_provider_test.go b/management/server/identity_provider_test.go index 78dcbeb74..9fce6b9c0 100644 --- a/management/server/identity_provider_test.go +++ b/management/server/identity_provider_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" @@ -80,11 +81,12 @@ func createManagerWithEmbeddedIdP(t testing.TB) (*DefaultAccountManager, *update AnyTimes() permissionsManager := permissions.NewManager(testStore) + peersManager := peers.NewManager(testStore, permissionsManager) updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, testStore) - networkMapController := controller.NewController(ctx, testStore, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(testStore, peers.NewManager(testStore, permissionsManager)), &config.Config{}) - manager, err := BuildManager(ctx, &config.Config{}, testStore, networkMapController, idpManager, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + networkMapController := controller.NewController(ctx, testStore, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(testStore, peersManager), &config.Config{}) + manager, err := BuildManager(ctx, &config.Config{}, testStore, networkMapController, job.NewJobManager(nil, testStore, peersManager), idpManager, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, nil, err } diff --git a/management/server/job/channel.go b/management/server/job/channel.go new file mode 100644 index 000000000..c4dc98a68 --- /dev/null +++ b/management/server/job/channel.go @@ -0,0 +1,59 @@ +package job + +import ( + "context" + "errors" + "fmt" + "sync" + "time" +) + +// todo consider the channel buffer size when we allow to run multiple jobs +const jobChannelBuffer = 1 + +var ( + ErrJobChannelClosed = errors.New("job channel closed") +) + +type Channel struct { + events chan *Event + once sync.Once +} + +func NewChannel() *Channel { + jc := &Channel{ + events: make(chan *Event, jobChannelBuffer), + } + + return jc +} + +func (jc *Channel) AddEvent(ctx context.Context, responseWait time.Duration, event *Event) error { + select { + case <-ctx.Done(): + return ctx.Err() + // todo: timeout is handled in the wrong place. If the peer does not respond with the job response, the server does not clean it up from the pending jobs and cannot apply a new job + case <-time.After(responseWait): + return fmt.Errorf("failed to add the event to the channel") + case jc.events <- event: + } + return nil +} + +func (jc *Channel) Close() { + jc.once.Do(func() { + close(jc.events) + }) +} + +func (jc *Channel) Event(ctx context.Context) (*Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case job, open := <-jc.events: + if !open { + return nil, ErrJobChannelClosed + } + return job, nil + } +} diff --git a/management/server/job/manager.go b/management/server/job/manager.go new file mode 100644 index 000000000..0b183ac39 --- /dev/null +++ b/management/server/job/manager.go @@ -0,0 +1,182 @@ +package job + +import ( + "context" + "fmt" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/internals/modules/peers" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/telemetry" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type Event struct { + PeerID string + Request *proto.JobRequest + Response *proto.JobResponse +} + +type Manager struct { + mu *sync.RWMutex + jobChannels map[string]*Channel // per-peer job streams + pending map[string]*Event // jobID → event + responseWait time.Duration + metrics telemetry.AppMetrics + Store store.Store + peersManager peers.Manager +} + +func NewJobManager(metrics telemetry.AppMetrics, store store.Store, peersManager peers.Manager) *Manager { + + return &Manager{ + jobChannels: make(map[string]*Channel), + pending: make(map[string]*Event), + responseWait: 5 * time.Minute, + metrics: metrics, + mu: &sync.RWMutex{}, + Store: store, + peersManager: peersManager, + } +} + +// CreateJobChannel creates or replaces a channel for a peer +func (jm *Manager) CreateJobChannel(ctx context.Context, accountID, peerID string) *Channel { + // all pending jobs stored in db for this peer should be failed + if err := jm.Store.MarkAllPendingJobsAsFailed(ctx, accountID, peerID, "Pending job cleanup: marked as failed automatically due to being stuck too long"); err != nil { + log.WithContext(ctx).Error(err.Error()) + } + + jm.mu.Lock() + defer jm.mu.Unlock() + + if ch, ok := jm.jobChannels[peerID]; ok { + ch.Close() + delete(jm.jobChannels, peerID) + } + + ch := NewChannel() + jm.jobChannels[peerID] = ch + return ch +} + +// SendJob sends a job to a peer and tracks it as pending +func (jm *Manager) SendJob(ctx context.Context, accountID, peerID string, req *proto.JobRequest) error { + jm.mu.RLock() + ch, ok := jm.jobChannels[peerID] + jm.mu.RUnlock() + if !ok { + return fmt.Errorf("peer %s has no channel", peerID) + } + + event := &Event{ + PeerID: peerID, + Request: req, + } + + jm.mu.Lock() + jm.pending[string(req.ID)] = event + jm.mu.Unlock() + + if err := ch.AddEvent(ctx, jm.responseWait, event); err != nil { + jm.cleanup(ctx, accountID, string(req.ID), err.Error()) + return err + } + + return nil +} + +// HandleResponse marks a job as finished and moves it to completed +func (jm *Manager) HandleResponse(ctx context.Context, resp *proto.JobResponse, peerKey string) error { + jm.mu.Lock() + defer jm.mu.Unlock() + + // todo: validate job ID and would be nice to use uuid text marshal instead of string + jobID := string(resp.ID) + + // todo: in this map has jobs for all peers in any account. Consider to validate the jobID association for the peer + event, ok := jm.pending[jobID] + if !ok { + return fmt.Errorf("job %s not found", jobID) + } + var job types.Job + // todo: ApplyResponse should be static. Any member value is unusable in this way + if err := job.ApplyResponse(resp); err != nil { + return fmt.Errorf("invalid job response: %v", err) + } + + peerID, err := jm.peersManager.GetPeerID(ctx, peerKey) + if err != nil { + return fmt.Errorf("failed to get peer ID: %v", err) + } + if peerID != event.PeerID { + return fmt.Errorf("peer ID mismatch: %s != %s", peerID, event.PeerID) + } + + // update or create the store for job response + err = jm.Store.CompletePeerJob(ctx, &job) + if err != nil { + return fmt.Errorf("failed to complete job %s: %v", jobID, err) + } + + delete(jm.pending, jobID) + return nil +} + +// CloseChannel closes a peer’s channel and cleans up its jobs +func (jm *Manager) CloseChannel(ctx context.Context, accountID, peerID string) { + jm.mu.Lock() + defer jm.mu.Unlock() + + if ch, ok := jm.jobChannels[peerID]; ok { + ch.Close() + delete(jm.jobChannels, peerID) + } + + for jobID, ev := range jm.pending { + if ev.PeerID == peerID { + // if the client disconnect and there is pending job then mark it as failed + if err := jm.Store.MarkPendingJobsAsFailed(ctx, accountID, peerID, jobID, "Time out peer disconnected"); err != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as failed: %v", err) + } + delete(jm.pending, jobID) + } + } +} + +// cleanup removes a pending job safely +func (jm *Manager) cleanup(ctx context.Context, accountID, jobID string, reason string) { + jm.mu.Lock() + defer jm.mu.Unlock() + + if ev, ok := jm.pending[jobID]; ok { + if err := jm.Store.MarkPendingJobsAsFailed(ctx, accountID, ev.PeerID, jobID, reason); err != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as failed: %v", err) + } + delete(jm.pending, jobID) + } +} + +func (jm *Manager) IsPeerConnected(peerID string) bool { + jm.mu.RLock() + defer jm.mu.RUnlock() + + _, ok := jm.jobChannels[peerID] + return ok +} + +func (jm *Manager) IsPeerHasPendingJobs(peerID string) bool { + jm.mu.RLock() + defer jm.mu.RUnlock() + + for _, ev := range jm.pending { + if ev.PeerID == peerID { + return true + } + } + return false +} diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index cc302400f..090c99877 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -31,6 +31,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" @@ -361,13 +362,15 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config AnyTimes() permissionsManager := permissions.NewManager(store) groupsManager := groups.NewManagerMock() + peersManager := peers.NewManager(store, permissionsManager) + jobManager := job.NewJobManager(nil, store, peersManager) updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) ephemeralMgr := manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeralMgr, config) - accountManager, err := BuildManager(ctx, nil, store, networkMapController, nil, "", + accountManager, err := BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { @@ -381,7 +384,7 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config return nil, nil, "", cleanup, err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, nil, "", cleanup, err } diff --git a/management/server/management_test.go b/management/server/management_test.go index ace372509..0864baadf 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -30,6 +30,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" @@ -202,6 +203,8 @@ func startServer( AnyTimes() permissionsManager := permissions.NewManager(str) + peersManager := peers.NewManager(str, permissionsManager) + jobManager := job.NewJobManager(nil, str, peersManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) @@ -213,6 +216,7 @@ func startServer( nil, str, networkMapController, + jobManager, nil, "", eventStore, @@ -237,6 +241,7 @@ func startServer( config, accountManager, settingsMockManager, + jobManager, secretsManager, nil, nil, diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 422829eba..f5caa3bbc 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -135,6 +135,29 @@ type MockAccountManager struct { CreateIdentityProviderFunc func(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) UpdateIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) DeleteIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string) error + CreatePeerJobFunc func(ctx context.Context, accountID, peerID, userID string, job *types.Job) error + GetAllPeerJobsFunc func(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) + GetPeerJobByIDFunc func(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) +} + +func (am *MockAccountManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { + if am.CreatePeerJobFunc != nil { + return am.CreatePeerJobFunc(ctx, accountID, peerID, userID, job) + } + return status.Errorf(codes.Unimplemented, "method CreatePeerJob is not implemented") +} + +func (am *MockAccountManager) GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) { + if am.GetAllPeerJobsFunc != nil { + return am.GetAllPeerJobsFunc(ctx, accountID, userID, peerID) + } + return nil, status.Errorf(codes.Unimplemented, "method GetAllPeerJobs is not implemented") +} +func (am *MockAccountManager) GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) { + if am.GetPeerJobByIDFunc != nil { + return am.GetPeerJobByIDFunc(ctx, accountID, userID, peerID, jobID) + } + return nil, status.Errorf(codes.Unimplemented, "method GetPeerJobByID is not implemented") } func (am *MockAccountManager) CreateGroup(ctx context.Context, accountID, userID string, group *types.Group) error { diff --git a/management/server/nameserver_test.go b/management/server/nameserver_test.go index 955c6b0ef..0d781e0d4 100644 --- a/management/server/nameserver_test.go +++ b/management/server/nameserver_test.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" @@ -790,13 +791,14 @@ func createNSManager(t *testing.T) (*DefaultAccountManager, error) { AnyTimes() permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - return BuildManager(context.Background(), nil, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) } func createNSStore(t *testing.T) (store.Store, error) { diff --git a/management/server/peer.go b/management/server/peer.go index 977bd52af..d6eb2aecd 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -31,6 +31,8 @@ import ( "github.com/netbirdio/netbird/shared/management/status" ) +const remoteJobsMinVer = "0.64.0" + // GetPeers returns a list of peers under the given account filtering out peers that do not belong to a user if // the current user is not an admin. func (am *DefaultAccountManager) GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) { @@ -324,6 +326,134 @@ func (am *DefaultAccountManager) UpdatePeer(ctx context.Context, accountID, user return peer, nil } +func (am *DefaultAccountManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.RemoteJobs, operations.Create) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !allowed { + return status.NewPermissionDeniedError() + } + + p, err := am.Store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) + if err != nil { + return err + } + + if p.AccountID != accountID { + return status.NewPeerNotPartOfAccountError() + } + + meetMinVer, err := posture.MeetsMinVersion(remoteJobsMinVer, p.Meta.WtVersion) + if !strings.Contains(p.Meta.WtVersion, "dev") && (!meetMinVer || err != nil) { + return status.Errorf(status.PreconditionFailed, "peer version %s does not meet the minimum required version %s for remote jobs", p.Meta.WtVersion, remoteJobsMinVer) + } + + if !am.jobManager.IsPeerConnected(peerID) { + return status.Errorf(status.BadRequest, "peer not connected") + } + + // check if already has pending jobs + // todo: The job checks here are not protected. The user can run this function from multiple threads, + // and each thread can think there is no job yet. This means entries in the pending job map will be overwritten, + // and only one will be kept, but potentially another one will overwrite it in the queue. + if am.jobManager.IsPeerHasPendingJobs(peerID) { + return status.Errorf(status.BadRequest, "peer already has pending job") + } + + jobStream, err := job.ToStreamJobRequest() + if err != nil { + return status.Errorf(status.BadRequest, "invalid job request %v", err) + } + + // try sending job first + if err := am.jobManager.SendJob(ctx, accountID, peerID, jobStream); err != nil { + return status.Errorf(status.Internal, "failed to send job: %v", err) + } + + var peer *nbpeer.Peer + var eventsToStore func() + + // persist job in DB only if send succeeded + err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + peer, err = transaction.GetPeerByID(ctx, store.LockingStrengthUpdate, accountID, peerID) + if err != nil { + return err + } + if err := transaction.CreatePeerJob(ctx, job); err != nil { + return err + } + + jobMeta := map[string]any{ + "for_peer_name": peer.Name, + "job_type": job.Workload.Type, + } + + eventsToStore = func() { + am.StoreEvent(ctx, userID, peer.ID, accountID, activity.JobCreatedByUser, jobMeta) + } + return nil + }) + if err != nil { + return err + } + eventsToStore() + return nil +} + +func (am *DefaultAccountManager) GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) { + // todo: Create permissions for job + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.RemoteJobs, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + peerAccountID, err := am.Store.GetAccountIDByPeerID(ctx, store.LockingStrengthNone, peerID) + if err != nil { + return nil, err + } + + if peerAccountID != accountID { + return nil, status.NewPeerNotPartOfAccountError() + } + + accountJobs, err := am.Store.GetPeerJobs(ctx, accountID, peerID) + if err != nil { + return nil, err + } + + return accountJobs, nil +} + +func (am *DefaultAccountManager) GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) { + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.RemoteJobs, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + peerAccountID, err := am.Store.GetAccountIDByPeerID(ctx, store.LockingStrengthNone, peerID) + if err != nil { + return nil, err + } + + if peerAccountID != accountID { + return nil, status.NewPeerNotPartOfAccountError() + } + + job, err := am.Store.GetPeerJobByID(ctx, accountID, jobID) + if err != nil { + return nil, err + } + + return job, nil +} + // DeletePeer removes peer from the account by its IP func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peerID, userID string) error { allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Peers, operations.Delete) diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 0160ff586..3846a3e85 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -34,6 +34,7 @@ import ( "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/shared/management/status" @@ -1289,13 +1290,14 @@ func Test_RegisterPeerByUser(t *testing.T) { t.Cleanup(ctrl.Finish) settingsMockManager := settings.NewMockManager(ctrl) permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1374,13 +1376,14 @@ func Test_RegisterPeerBySetupKey(t *testing.T) { Return(&types.ExtraSettings{}, nil). AnyTimes() permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1527,13 +1530,14 @@ func Test_RegisterPeerRollbackOnFailure(t *testing.T) { settingsMockManager := settings.NewMockManager(ctrl) permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1607,13 +1611,14 @@ func Test_LoginPeer(t *testing.T) { Return(&types.ExtraSettings{}, nil). AnyTimes() permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" diff --git a/management/server/permissions/modules/module.go b/management/server/permissions/modules/module.go index 0ae10d521..f19675d27 100644 --- a/management/server/permissions/modules/module.go +++ b/management/server/permissions/modules/module.go @@ -3,35 +3,37 @@ package modules type Module string const ( - Networks Module = "networks" - Peers Module = "peers" - Groups Module = "groups" - Settings Module = "settings" - Accounts Module = "accounts" - Dns Module = "dns" - Nameservers Module = "nameservers" - Events Module = "events" - Policies Module = "policies" - Routes Module = "routes" - Users Module = "users" - SetupKeys Module = "setup_keys" - Pats Module = "pats" + Networks Module = "networks" + Peers Module = "peers" + RemoteJobs Module = "remote_jobs" + Groups Module = "groups" + Settings Module = "settings" + Accounts Module = "accounts" + Dns Module = "dns" + Nameservers Module = "nameservers" + Events Module = "events" + Policies Module = "policies" + Routes Module = "routes" + Users Module = "users" + SetupKeys Module = "setup_keys" + Pats Module = "pats" IdentityProviders Module = "identity_providers" ) var All = map[Module]struct{}{ - Networks: {}, - Peers: {}, - Groups: {}, - Settings: {}, - Accounts: {}, - Dns: {}, - Nameservers: {}, - Events: {}, - Policies: {}, - Routes: {}, - Users: {}, - SetupKeys: {}, - Pats: {}, + Networks: {}, + Peers: {}, + RemoteJobs: {}, + Groups: {}, + Settings: {}, + Accounts: {}, + Dns: {}, + Nameservers: {}, + Events: {}, + Policies: {}, + Routes: {}, + Users: {}, + SetupKeys: {}, + Pats: {}, IdentityProviders: {}, } diff --git a/management/server/route_test.go b/management/server/route_test.go index 6dc8c4cf4..d4882eff8 100644 --- a/management/server/route_test.go +++ b/management/server/route_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -1289,13 +1290,14 @@ func createRouterManager(t *testing.T) (*DefaultAccountManager, *update_channel. Return(&types.ExtraSettings{}, nil) permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, nil, err } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 7d71030eb..0eb687dbb 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -43,14 +43,15 @@ import ( ) const ( - storeSqliteFileName = "store.db" - idQueryCondition = "id = ?" - keyQueryCondition = "key = ?" - mysqlKeyQueryCondition = "`key` = ?" - accountAndIDQueryCondition = "account_id = ? and id = ?" - accountAndIDsQueryCondition = "account_id = ? AND id IN ?" - accountIDCondition = "account_id = ?" - peerNotFoundFMT = "peer %s not found" + storeSqliteFileName = "store.db" + idQueryCondition = "id = ?" + keyQueryCondition = "key = ?" + mysqlKeyQueryCondition = "`key` = ?" + accountAndIDQueryCondition = "account_id = ? and id = ?" + accountAndPeerIDQueryCondition = "account_id = ? and peer_id = ?" + accountAndIDsQueryCondition = "account_id = ? AND id IN ?" + accountIDCondition = "account_id = ?" + peerNotFoundFMT = "peer %s not found" pgMaxConnections = 30 pgMinConnections = 1 @@ -125,7 +126,7 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met &types.Account{}, &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &installation{}, &types.ExtraSettings{}, &posture.Checks{}, &nbpeer.NetworkAddress{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, - &zones.Zone{}, &records.Record{}, + &types.Job{}, &zones.Zone{}, &records.Record{}, ) if err != nil { return nil, fmt.Errorf("auto migratePreAuto: %w", err) @@ -144,6 +145,97 @@ func GetKeyQueryCondition(s *SqlStore) string { return keyQueryCondition } +// SaveJob persists a job in DB +func (s *SqlStore) CreatePeerJob(ctx context.Context, job *types.Job) error { + result := s.db.Create(job) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to create job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to create job in store") + } + return nil +} + +func (s *SqlStore) CompletePeerJob(ctx context.Context, job *types.Job) error { + result := s.db. + Model(&types.Job{}). + Where(idQueryCondition, job.ID). + Updates(job) + + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to update job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to update job in store") + } + return nil +} + +// job was pending for too long and has been cancelled +func (s *SqlStore) MarkPendingJobsAsFailed(ctx context.Context, accountID, peerID, jobID, reason string) error { + now := time.Now().UTC() + result := s.db. + Model(&types.Job{}). + Where(accountAndPeerIDQueryCondition+" AND id = ?"+" AND status = ?", accountID, peerID, jobID, types.JobStatusPending). + Updates(types.Job{ + Status: types.JobStatusFailed, + FailedReason: reason, + CompletedAt: &now, + }) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as Failed job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to mark pending job as Failed in store") + } + return nil +} + +// job was pending for too long and has been cancelled +func (s *SqlStore) MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error { + now := time.Now().UTC() + result := s.db. + Model(&types.Job{}). + Where(accountAndPeerIDQueryCondition+" AND status = ?", accountID, peerID, types.JobStatusPending). + Updates(types.Job{ + Status: types.JobStatusFailed, + FailedReason: reason, + CompletedAt: &now, + }) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as Failed job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to mark pending job as Failed in store") + } + return nil +} + +// GetJobByID fetches job by ID +func (s *SqlStore) GetPeerJobByID(ctx context.Context, accountID, jobID string) (*types.Job, error) { + var job types.Job + err := s.db. + Where(accountAndIDQueryCondition, accountID, jobID). + First(&job).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "job %s not found", jobID) + } + if err != nil { + log.WithContext(ctx).Errorf("failed to fetch job from store: %s", err) + return nil, err + } + return &job, nil +} + +// get all jobs +func (s *SqlStore) GetPeerJobs(ctx context.Context, accountID, peerID string) ([]*types.Job, error) { + var jobs []*types.Job + err := s.db. + Where(accountAndPeerIDQueryCondition, accountID, peerID). + Order("created_at DESC"). + Find(&jobs).Error + + if err != nil { + log.WithContext(ctx).Errorf("failed to fetch jobs from store: %s", err) + return nil, err + } + + return jobs, nil +} + // AcquireGlobalLock acquires global lock across all the accounts and returns a function that releases the lock func (s *SqlStore) AcquireGlobalLock(ctx context.Context) (unlock func()) { log.WithContext(ctx).Tracef("acquiring global lock") @@ -4363,3 +4455,23 @@ func (s *SqlStore) DeleteZoneDNSRecords(ctx context.Context, accountID, zoneID s return nil } + +func (s *SqlStore) GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var peerID string + result := tx.Model(&nbpeer.Peer{}). + Select("id"). + Where(GetKeyQueryCondition(s), key). + Limit(1). + Scan(&peerID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get peer ID by key: %s", result.Error) + return "", status.Errorf(status.Internal, "failed to get peer ID by key") + } + + return peerID, nil +} diff --git a/management/server/store/store.go b/management/server/store/store.go index 3838b235e..02c746592 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -226,6 +226,13 @@ type Store interface { GetZoneDNSRecords(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) ([]*records.Record, error) GetZoneDNSRecordsByName(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, name string) ([]*records.Record, error) DeleteZoneDNSRecords(ctx context.Context, accountID, zoneID string) error + CreatePeerJob(ctx context.Context, job *types.Job) error + CompletePeerJob(ctx context.Context, job *types.Job) error + GetPeerJobByID(ctx context.Context, accountID, jobID string) (*types.Job, error) + GetPeerJobs(ctx context.Context, accountID, peerID string) ([]*types.Job, error) + MarkPendingJobsAsFailed(ctx context.Context, accountID, peerID, jobID, reason string) error + MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error + GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error) } const ( diff --git a/management/server/types/job.go b/management/server/types/job.go new file mode 100644 index 000000000..bad8f00ba --- /dev/null +++ b/management/server/types/job.go @@ -0,0 +1,228 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/google/uuid" + + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/proto" + "github.com/netbirdio/netbird/shared/management/status" +) + +type JobStatus string + +const ( + JobStatusPending JobStatus = "pending" + JobStatusSucceeded JobStatus = "succeeded" + JobStatusFailed JobStatus = "failed" +) + +type JobType string + +const ( + JobTypeBundle JobType = "bundle" +) + +const ( + // MaxJobReasonLength is the maximum length allowed for job failure reasons + MaxJobReasonLength = 4096 +) + +type Job struct { + // ID is the primary identifier + ID string `gorm:"primaryKey"` + + // CreatedAt when job was created (UTC) + CreatedAt time.Time `gorm:"autoCreateTime"` + + // CompletedAt when job finished, null if still running + CompletedAt *time.Time + + // TriggeredBy user that triggered this job + TriggeredBy string `gorm:"index"` + + PeerID string `gorm:"index"` + + AccountID string `gorm:"index"` + + // Status of the job: pending, succeeded, failed + Status JobStatus `gorm:"index;type:varchar(50)"` + + // FailedReason describes why the job failed (if failed) + FailedReason string + + Workload Workload `gorm:"embedded;embeddedPrefix:workload_"` +} + +type Workload struct { + Type JobType `gorm:"column:workload_type;index;type:varchar(50)"` + Parameters json.RawMessage `gorm:"type:json"` + Result json.RawMessage `gorm:"type:json"` +} + +// NewJob creates a new job with default fields and validation +func NewJob(triggeredBy, accountID, peerID string, req *api.JobRequest) (*Job, error) { + if req == nil { + return nil, status.Errorf(status.BadRequest, "job request cannot be nil") + } + + // Determine job type + jobTypeStr, err := req.Workload.Discriminator() + if err != nil { + return nil, status.Errorf(status.BadRequest, "could not determine job type: %v", err) + } + jobType := JobType(jobTypeStr) + + if jobType == "" { + return nil, status.Errorf(status.BadRequest, "job type is required") + } + + var workload Workload + + switch jobType { + case JobTypeBundle: + if err := validateAndBuildBundleParams(req.Workload, &workload); err != nil { + return nil, status.Errorf(status.BadRequest, "%v", err) + } + default: + return nil, status.Errorf(status.BadRequest, "unsupported job type: %s", jobType) + } + + return &Job{ + ID: uuid.New().String(), + TriggeredBy: triggeredBy, + PeerID: peerID, + AccountID: accountID, + Status: JobStatusPending, + CreatedAt: time.Now().UTC(), + Workload: workload, + }, nil +} + +func (j *Job) BuildWorkloadResponse() (*api.WorkloadResponse, error) { + var wl api.WorkloadResponse + + switch j.Workload.Type { + case JobTypeBundle: + if err := j.buildBundleResponse(&wl); err != nil { + return nil, status.Errorf(status.Internal, "failed to process job: %v", err.Error()) + } + return &wl, nil + + default: + return nil, status.Errorf(status.InvalidArgument, "unknown job type: %v", j.Workload.Type) + } +} + +func (j *Job) buildBundleResponse(wl *api.WorkloadResponse) error { + var p api.BundleParameters + if err := json.Unmarshal(j.Workload.Parameters, &p); err != nil { + return fmt.Errorf("invalid parameters for bundle job: %w", err) + } + var r api.BundleResult + if err := json.Unmarshal(j.Workload.Result, &r); err != nil { + return fmt.Errorf("invalid result for bundle job: %w", err) + } + + if err := wl.FromBundleWorkloadResponse(api.BundleWorkloadResponse{ + Type: api.WorkloadTypeBundle, + Parameters: p, + Result: r, + }); err != nil { + return fmt.Errorf("unknown job parameters: %v", err) + } + return nil +} + +func validateAndBuildBundleParams(req api.WorkloadRequest, workload *Workload) error { + bundle, err := req.AsBundleWorkloadRequest() + if err != nil { + return fmt.Errorf("invalid parameters for bundle job") + } + // validate bundle_for_time <= 5 minutes if BundleFor is enabled + if bundle.Parameters.BundleFor && (bundle.Parameters.BundleForTime < 1 || bundle.Parameters.BundleForTime > 5) { + return fmt.Errorf("bundle_for_time must be between 1 and 5, got %d", bundle.Parameters.BundleForTime) + } + // validate log-file-count ≥ 1 and ≤ 1000 + if bundle.Parameters.LogFileCount < 1 || bundle.Parameters.LogFileCount > 1000 { + return fmt.Errorf("log-file-count must be between 1 and 1000, got %d", bundle.Parameters.LogFileCount) + } + + workload.Parameters, err = json.Marshal(bundle.Parameters) + if err != nil { + return fmt.Errorf("failed to marshal workload parameters: %w", err) + } + workload.Result = []byte("{}") + workload.Type = JobType(api.WorkloadTypeBundle) + + return nil +} + +// ApplyResponse validates and maps a proto.JobResponse into the Job fields. +func (j *Job) ApplyResponse(resp *proto.JobResponse) error { + if resp == nil { + return nil + } + + j.ID = string(resp.ID) + now := time.Now().UTC() + j.CompletedAt = &now + switch resp.Status { + case proto.JobStatus_succeeded: + j.Status = JobStatusSucceeded + case proto.JobStatus_failed: + j.Status = JobStatusFailed + if len(resp.Reason) > 0 { + reason := string(resp.Reason) + if len(resp.Reason) > MaxJobReasonLength { + reason = string(resp.Reason[:MaxJobReasonLength]) + "... (truncated)" + } + j.FailedReason = fmt.Sprintf("Client error: '%s'", reason) + } + return nil + default: + return fmt.Errorf("unexpected job status: %v", resp.Status) + } + + // Handle workload results (oneof) + var err error + switch r := resp.WorkloadResults.(type) { + case *proto.JobResponse_Bundle: + if j.Workload.Result, err = json.Marshal(r.Bundle); err != nil { + return fmt.Errorf("failed to marshal workload results: %w", err) + } + default: + return fmt.Errorf("unsupported workload response type: %T", r) + } + return nil +} + +func (j *Job) ToStreamJobRequest() (*proto.JobRequest, error) { + switch j.Workload.Type { + case JobTypeBundle: + return j.buildStreamBundleResponse() + default: + return nil, status.Errorf(status.InvalidArgument, "unknown job type: %v", j.Workload.Type) + } +} + +func (j *Job) buildStreamBundleResponse() (*proto.JobRequest, error) { + var p api.BundleParameters + if err := json.Unmarshal(j.Workload.Parameters, &p); err != nil { + return nil, fmt.Errorf("invalid parameters for bundle job: %w", err) + } + return &proto.JobRequest{ + ID: []byte(j.ID), + WorkloadParameters: &proto.JobRequest_Bundle{ + Bundle: &proto.BundleParameters{ + BundleFor: p.BundleFor, + BundleForTime: int64(p.BundleForTime), + LogFileCount: int32(p.LogFileCount), + Anonymize: p.Anonymize, + }, + }, + }, nil +} diff --git a/shared/management/client/client.go b/shared/management/client/client.go index 3126bcd1f..b92c636c5 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -14,6 +14,7 @@ import ( type Client interface { io.Closer Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error + Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error GetServerPublicKey() (*wgtypes.Key, error) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) Login(serverKey wgtypes.Key, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index 64f6831f2..a11f863a7 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -18,12 +18,13 @@ import ( "google.golang.org/grpc/status" "github.com/netbirdio/management-integrations/integrations" + ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" "github.com/netbirdio/netbird/management/internals/modules/peers" - "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/client/system" "github.com/netbirdio/netbird/encryption" @@ -92,6 +93,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { peersManger := peers.NewManager(store, permissionsManagerMock) settingsManagerMock := settings.NewMockManager(ctrl) + jobManager := job.NewJobManager(nil, store, peersManger) ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManger, settingsManagerMock, eventStore) @@ -117,8 +119,8 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := mgmt.NewAccountRequestBuffer(ctx, store) - networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManger), config) - accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManger), config) + accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) if err != nil { t.Fatal(err) } @@ -129,7 +131,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil) if err != nil { t.Fatal(err) } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 89860ac9b..e9dbae892 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -12,6 +12,7 @@ import ( gstatus "google.golang.org/grpc/status" "github.com/cenkalti/backoff/v4" + "github.com/google/uuid" log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc" @@ -111,8 +112,26 @@ func (c *GrpcClient) ready() bool { // Sync wraps the real client's Sync endpoint call and takes care of retries and encryption/decryption of messages // Blocking request. The result will be sent via msgHandler callback function func (c *GrpcClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error { - backOff := defaultBackoff(ctx) + return c.withMgmtStream(ctx, func(ctx context.Context, serverPubKey wgtypes.Key) error { + return c.handleSyncStream(ctx, serverPubKey, sysInfo, msgHandler) + }) +} +// Job wraps the real client's Job endpoint call and takes care of retries and encryption/decryption of messages +// Blocking request. The result will be sent via msgHandler callback function +func (c *GrpcClient) Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error { + return c.withMgmtStream(ctx, func(ctx context.Context, serverPubKey wgtypes.Key) error { + return c.handleJobStream(ctx, serverPubKey, msgHandler) + }) +} + +// withMgmtStream runs a streaming operation against the ManagementService +// It takes care of retries, connection readiness, and fetching server public key. +func (c *GrpcClient) withMgmtStream( + ctx context.Context, + handler func(ctx context.Context, serverPubKey wgtypes.Key) error, +) error { + backOff := defaultBackoff(ctx) operation := func() error { log.Debugf("management connection state %v", c.conn.GetState()) connState := c.conn.GetState() @@ -130,7 +149,7 @@ func (c *GrpcClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler return err } - return c.handleStream(ctx, *serverPubKey, sysInfo, msgHandler, backOff) + return handler(ctx, *serverPubKey) } err := backoff.Retry(operation, backOff) @@ -141,12 +160,151 @@ func (c *GrpcClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler return err } -func (c *GrpcClient) handleStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info, - msgHandler func(msg *proto.SyncResponse) error, backOff backoff.BackOff) error { +func (c *GrpcClient) handleJobStream( + ctx context.Context, + serverPubKey wgtypes.Key, + msgHandler func(msg *proto.JobRequest) *proto.JobResponse, +) error { ctx, cancelStream := context.WithCancel(ctx) defer cancelStream() - stream, err := c.connectToStream(ctx, serverPubKey, sysInfo) + stream, err := c.realClient.Job(ctx) + if err != nil { + log.Errorf("failed to open job stream: %v", err) + return err + } + + // Handshake with the server + if err := c.sendHandshake(ctx, stream, serverPubKey); err != nil { + return err + } + + log.Debug("job stream handshake sent successfully") + + // Main loop: receive, process, respond + for { + jobReq, err := c.receiveJobRequest(ctx, stream, serverPubKey) + if err != nil { + c.notifyDisconnected(err) + if s, ok := gstatus.FromError(err); ok { + switch s.Code() { + case codes.PermissionDenied: + return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer + case codes.Canceled: + log.Debugf("management connection context has been canceled, this usually indicates shutdown") + return err + case codes.Unimplemented: + log.Warn("Job feature is not supported by the current management server version. " + + "Please update the management service to use this feature.") + return nil + default: + log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + return err + } + } else { + // non-gRPC error + log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + return err + } + } + + if jobReq == nil || len(jobReq.ID) == 0 { + log.Debug("received unknown or empty job request, skipping") + continue + } + + log.Infof("received a new job from the management server (ID: %s)", jobReq.ID) + jobResp := c.processJobRequest(ctx, jobReq, msgHandler) + if err := c.sendJobResponse(ctx, stream, serverPubKey, jobResp); err != nil { + return err + } + } +} + +// sendHandshake sends the initial handshake message +func (c *GrpcClient) sendHandshake(ctx context.Context, stream proto.ManagementService_JobClient, serverPubKey wgtypes.Key) error { + handshakeReq := &proto.JobRequest{ + ID: []byte(uuid.New().String()), + } + encHello, err := encryption.EncryptMessage(serverPubKey, c.key, handshakeReq) + if err != nil { + log.Errorf("failed to encrypt handshake message: %v", err) + return err + } + return stream.Send(&proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encHello, + }) +} + +// receiveJobRequest waits for and decrypts a job request +func (c *GrpcClient) receiveJobRequest( + ctx context.Context, + stream proto.ManagementService_JobClient, + serverPubKey wgtypes.Key, +) (*proto.JobRequest, error) { + encryptedMsg, err := stream.Recv() + if err != nil { + return nil, err + } + + jobReq := &proto.JobRequest{} + if err := encryption.DecryptMessage(serverPubKey, c.key, encryptedMsg.Body, jobReq); err != nil { + log.Warnf("failed to decrypt job request: %v", err) + return nil, err + } + + return jobReq, nil +} + +// processJobRequest executes the handler and ensures a valid response +func (c *GrpcClient) processJobRequest( + ctx context.Context, + jobReq *proto.JobRequest, + msgHandler func(msg *proto.JobRequest) *proto.JobResponse, +) *proto.JobResponse { + jobResp := msgHandler(jobReq) + if jobResp == nil { + jobResp = &proto.JobResponse{ + ID: jobReq.ID, + Status: proto.JobStatus_failed, + Reason: []byte("handler returned nil response"), + } + log.Warnf("job handler returned nil for job %s", string(jobReq.ID)) + } + return jobResp +} + +// sendJobResponse encrypts and sends a job response +func (c *GrpcClient) sendJobResponse( + ctx context.Context, + stream proto.ManagementService_JobClient, + serverPubKey wgtypes.Key, + resp *proto.JobResponse, +) error { + encResp, err := encryption.EncryptMessage(serverPubKey, c.key, resp) + if err != nil { + log.Errorf("failed to encrypt job response for job %s: %v", string(resp.ID), err) + return err + } + + if err := stream.Send(&proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encResp, + }); err != nil { + log.Errorf("failed to send job response for job %s: %v", string(resp.ID), err) + return err + } + + log.Infof("job response sent for job %s (status: %s)", string(resp.ID), resp.Status.String()) + return nil +} + +func (c *GrpcClient) handleSyncStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error { + ctx, cancelStream := context.WithCancel(ctx) + defer cancelStream() + + stream, err := c.connectToSyncStream(ctx, serverPubKey, sysInfo) if err != nil { log.Debugf("failed to open Management Service stream: %s", err) if s, ok := gstatus.FromError(err); ok && s.Code() == codes.PermissionDenied { @@ -159,20 +317,22 @@ func (c *GrpcClient) handleStream(ctx context.Context, serverPubKey wgtypes.Key, c.notifyConnected() // blocking until error - err = c.receiveEvents(stream, serverPubKey, msgHandler) - // we need this reset because after a successful connection and a consequent error, backoff lib doesn't - // reset times and next try will start with a long delay - backOff.Reset() + err = c.receiveUpdatesEvents(stream, serverPubKey, msgHandler) if err != nil { c.notifyDisconnected(err) - s, _ := gstatus.FromError(err) - switch s.Code() { - case codes.PermissionDenied: - return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer - case codes.Canceled: - log.Debugf("management connection context has been canceled, this usually indicates shutdown") - return nil - default: + if s, ok := gstatus.FromError(err); ok { + switch s.Code() { + case codes.PermissionDenied: + return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer + case codes.Canceled: + log.Debugf("management connection context has been canceled, this usually indicates shutdown") + return nil + default: + log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + return err + } + } else { + // non-gRPC error log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) return err } @@ -191,7 +351,7 @@ func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, err ctx, cancelStream := context.WithCancel(c.ctx) defer cancelStream() - stream, err := c.connectToStream(ctx, *serverPubKey, sysInfo) + stream, err := c.connectToSyncStream(ctx, *serverPubKey, sysInfo) if err != nil { log.Debugf("failed to open Management Service stream: %s", err) return nil, err @@ -224,7 +384,7 @@ func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, err return decryptedResp.GetNetworkMap(), nil } -func (c *GrpcClient) connectToStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info) (proto.ManagementService_SyncClient, error) { +func (c *GrpcClient) connectToSyncStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info) (proto.ManagementService_SyncClient, error) { req := &proto.SyncRequest{Meta: infoToMetaData(sysInfo)} myPrivateKey := c.key @@ -243,7 +403,7 @@ func (c *GrpcClient) connectToStream(ctx context.Context, serverPubKey wgtypes.K return sync, nil } -func (c *GrpcClient) receiveEvents(stream proto.ManagementService_SyncClient, serverPubKey wgtypes.Key, msgHandler func(msg *proto.SyncResponse) error) error { +func (c *GrpcClient) receiveUpdatesEvents(stream proto.ManagementService_SyncClient, serverPubKey wgtypes.Key, msgHandler func(msg *proto.SyncResponse) error) error { for { update, err := stream.Recv() if err == io.EOF { diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index 29006c9c3..ac96f7b36 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -20,6 +20,7 @@ type MockClient struct { GetPKCEAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) SyncMetaFunc func(sysInfo *system.Info) error LogoutFunc func() error + JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error } func (m *MockClient) IsHealthy() bool { @@ -40,6 +41,13 @@ func (m *MockClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler return m.SyncFunc(ctx, sysInfo, msgHandler) } +func (m *MockClient) Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error { + if m.JobFunc == nil { + return nil + } + return m.JobFunc(ctx, msgHandler) +} + func (m *MockClient) GetServerPublicKey() (*wgtypes.Key, error) { if m.GetServerPublicKeyFunc == nil { return nil, nil diff --git a/shared/management/http/api/generate.sh b/shared/management/http/api/generate.sh index 2f24fd903..3770ea90f 100755 --- a/shared/management/http/api/generate.sh +++ b/shared/management/http/api/generate.sh @@ -11,6 +11,6 @@ fi old_pwd=$(pwd) script_path=$(dirname $(realpath "$0")) cd "$script_path" -go install github.com/deepmap/oapi-codegen/cmd/oapi-codegen@4a1477f6a8ba6ca8115cc23bb2fb67f0b9fca18e +go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@latest oapi-codegen --config cfg.yaml openapi.yml -cd "$old_pwd" \ No newline at end of file +cd "$old_pwd" diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 7b9451b15..29e81f15a 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -38,8 +38,128 @@ tags: description: Interact with and view information about identity providers. - name: Instance description: Instance setup and status endpoints for initial configuration. + - name: Jobs + description: Interact with and view information about remote jobs. + x-experimental: true + components: schemas: + WorkloadType: + type: string + description: | + Identifies the type of workload the job will execute. + Currently only `"bundle"` is supported. + enum: + - bundle + example: "bundle" + BundleParameters: + type: object + description: These parameters control what gets included in the bundle and how it is processed. + properties: + bundle_for: + type: boolean + description: Whether to generate a bundle for the given timeframe. + example: true + bundle_for_time: + type: integer + minimum: 1 + maximum: 5 + description: Time period in minutes for which to generate the bundle. + example: 2 + log_file_count: + type: integer + minimum: 1 + maximum: 1000 + description: Maximum number of log files to include in the bundle. + example: 100 + anonymize: + type: boolean + description: Whether sensitive data should be anonymized in the bundle. + example: false + required: + - bundle_for + - bundle_for_time + - log_file_count + - anonymize + BundleResult: + type: object + properties: + upload_key: + type: string + example: "upload_key_123" + nullable: true + BundleWorkloadRequest: + type: object + properties: + type: + $ref: '#/components/schemas/WorkloadType' + parameters: + $ref: '#/components/schemas/BundleParameters' + required: + - type + - parameters + BundleWorkloadResponse: + type: object + properties: + type: + $ref: '#/components/schemas/WorkloadType' + parameters: + $ref: '#/components/schemas/BundleParameters' + result: + $ref: '#/components/schemas/BundleResult' + required: + - type + - parameters + - result + WorkloadRequest: + oneOf: + - $ref: '#/components/schemas/BundleWorkloadRequest' + discriminator: + propertyName: type + mapping: + bundle: '#/components/schemas/BundleWorkloadRequest' + WorkloadResponse: + oneOf: + - $ref: '#/components/schemas/BundleWorkloadResponse' + discriminator: + propertyName: type + mapping: + bundle: '#/components/schemas/BundleWorkloadResponse' + JobRequest: + type: object + properties: + workload: + $ref: '#/components/schemas/WorkloadRequest' + required: + - workload + JobResponse: + type: object + properties: + id: + type: string + created_at: + type: string + format: date-time + completed_at: + type: string + format: date-time + nullable: true + triggered_by: + type: string + status: + type: string + enum: [pending, succeeded, failed] + failed_reason: + type: string + nullable: true + workload: + $ref: '#/components/schemas/WorkloadResponse' + required: + - id + - created_at + - status + - triggered_by + - workload Account: type: object properties: @@ -2554,6 +2674,110 @@ paths: content: { } '500': "$ref": "#/components/responses/internal_error" + /api/peers/{peerId}/jobs: + get: + summary: List Jobs + description: Retrieve all jobs for a given peer + tags: [ Jobs ] + security: + - BearerAuth: [] + - TokenAuth: [] + parameters: + - in: path + name: peerId + description: The unique identifier of a peer + required: true + schema: + type: string + responses: + '200': + description: List of jobs + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/JobResponse' + '400': + $ref: '#/components/responses/bad_request' + '401': + $ref: '#/components/responses/requires_authentication' + '403': + $ref: '#/components/responses/forbidden' + '500': + $ref: '#/components/responses/internal_error' + post: + summary: Create Job + description: Create a new job for a given peer + tags: [ Jobs ] + security: + - BearerAuth: [] + - TokenAuth: [] + parameters: + - in: path + name: peerId + description: The unique identifier of a peer + required: true + schema: + type: string + requestBody: + description: Create job request + content: + application/json: + schema: + $ref: '#/components/schemas/JobRequest' + required: true + responses: + '201': + description: Job created + content: + application/json: + schema: + $ref: '#/components/schemas/JobResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + /api/peers/{peerId}/jobs/{jobId}: + get: + summary: Get Job + description: Retrieve details of a specific job + tags: [ Jobs ] + security: + - BearerAuth: [] + - TokenAuth: [] + parameters: + - in: path + name: peerId + required: true + description: The unique identifier of a peer + schema: + type: string + - in: path + name: jobId + required: true + description: The unique identifier of a job + schema: + type: string + responses: + '200': + description: A Job object + content: + application/json: + schema: + $ref: '#/components/schemas/JobResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" /api/accounts: get: summary: List all Accounts diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 94a52b6e1..7a845b62f 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1,10 +1,14 @@ // Package api provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/deepmap/oapi-codegen version v1.11.1-0.20220912230023-4a1477f6a8ba DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.1 DO NOT EDIT. package api import ( + "encoding/json" + "errors" "time" + + "github.com/oapi-codegen/runtime" ) const ( @@ -122,6 +126,13 @@ const ( IngressPortAllocationRequestPortRangeProtocolUdp IngressPortAllocationRequestPortRangeProtocol = "udp" ) +// Defines values for JobResponseStatus. +const ( + JobResponseStatusFailed JobResponseStatus = "failed" + JobResponseStatusPending JobResponseStatus = "pending" + JobResponseStatusSucceeded JobResponseStatus = "succeeded" +) + // Defines values for NameserverNsType. const ( NameserverNsTypeUdp NameserverNsType = "udp" @@ -200,6 +211,11 @@ const ( UserStatusInvited UserStatus = "invited" ) +// Defines values for WorkloadType. +const ( + WorkloadTypeBundle WorkloadType = "bundle" +) + // Defines values for GetApiEventsNetworkTrafficParamsType. const ( GetApiEventsNetworkTrafficParamsTypeTYPEDROP GetApiEventsNetworkTrafficParamsType = "TYPE_DROP" @@ -368,6 +384,47 @@ type AvailablePorts struct { Udp int `json:"udp"` } +// BundleParameters These parameters control what gets included in the bundle and how it is processed. +type BundleParameters struct { + // Anonymize Whether sensitive data should be anonymized in the bundle. + Anonymize bool `json:"anonymize"` + + // BundleFor Whether to generate a bundle for the given timeframe. + BundleFor bool `json:"bundle_for"` + + // BundleForTime Time period in minutes for which to generate the bundle. + BundleForTime int `json:"bundle_for_time"` + + // LogFileCount Maximum number of log files to include in the bundle. + LogFileCount int `json:"log_file_count"` +} + +// BundleResult defines model for BundleResult. +type BundleResult struct { + UploadKey *string `json:"upload_key"` +} + +// BundleWorkloadRequest defines model for BundleWorkloadRequest. +type BundleWorkloadRequest struct { + // Parameters These parameters control what gets included in the bundle and how it is processed. + Parameters BundleParameters `json:"parameters"` + + // Type Identifies the type of workload the job will execute. + // Currently only `"bundle"` is supported. + Type WorkloadType `json:"type"` +} + +// BundleWorkloadResponse defines model for BundleWorkloadResponse. +type BundleWorkloadResponse struct { + // Parameters These parameters control what gets included in the bundle and how it is processed. + Parameters BundleParameters `json:"parameters"` + Result BundleResult `json:"result"` + + // Type Identifies the type of workload the job will execute. + // Currently only `"bundle"` is supported. + Type WorkloadType `json:"type"` +} + // Checks List of objects that perform the actual checks type Checks struct { // GeoLocationCheck Posture check for geo location @@ -755,6 +812,25 @@ type InstanceStatus struct { SetupRequired bool `json:"setup_required"` } +// JobRequest defines model for JobRequest. +type JobRequest struct { + Workload WorkloadRequest `json:"workload"` +} + +// JobResponse defines model for JobResponse. +type JobResponse struct { + CompletedAt *time.Time `json:"completed_at"` + CreatedAt time.Time `json:"created_at"` + FailedReason *string `json:"failed_reason"` + Id string `json:"id"` + Status JobResponseStatus `json:"status"` + TriggeredBy string `json:"triggered_by"` + Workload WorkloadResponse `json:"workload"` +} + +// JobResponseStatus defines model for JobResponse.Status. +type JobResponseStatus string + // Location Describe geographical location information type Location struct { // CityName Commonly used English name of the city @@ -2042,6 +2118,20 @@ type UserRequest struct { Role string `json:"role"` } +// WorkloadRequest defines model for WorkloadRequest. +type WorkloadRequest struct { + union json.RawMessage +} + +// WorkloadResponse defines model for WorkloadResponse. +type WorkloadResponse struct { + union json.RawMessage +} + +// WorkloadType Identifies the type of workload the job will execute. +// Currently only `"bundle"` is supported. +type WorkloadType string + // Zone defines model for Zone. type Zone struct { // DistributionGroups Group IDs that defines groups of peers that will resolve this zone @@ -2225,6 +2315,9 @@ type PostApiPeersPeerIdIngressPortsJSONRequestBody = IngressPortAllocationReques // PutApiPeersPeerIdIngressPortsAllocationIdJSONRequestBody defines body for PutApiPeersPeerIdIngressPortsAllocationId for application/json ContentType. type PutApiPeersPeerIdIngressPortsAllocationIdJSONRequestBody = IngressPortAllocationRequest +// PostApiPeersPeerIdJobsJSONRequestBody defines body for PostApiPeersPeerIdJobs for application/json ContentType. +type PostApiPeersPeerIdJobsJSONRequestBody = JobRequest + // PostApiPeersPeerIdTemporaryAccessJSONRequestBody defines body for PostApiPeersPeerIdTemporaryAccess for application/json ContentType. type PostApiPeersPeerIdTemporaryAccessJSONRequestBody = PeerTemporaryAccessRequest @@ -2263,3 +2356,121 @@ type PutApiUsersUserIdJSONRequestBody = UserRequest // PostApiUsersUserIdTokensJSONRequestBody defines body for PostApiUsersUserIdTokens for application/json ContentType. type PostApiUsersUserIdTokensJSONRequestBody = PersonalAccessTokenRequest + +// AsBundleWorkloadRequest returns the union data inside the WorkloadRequest as a BundleWorkloadRequest +func (t WorkloadRequest) AsBundleWorkloadRequest() (BundleWorkloadRequest, error) { + var body BundleWorkloadRequest + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromBundleWorkloadRequest overwrites any union data inside the WorkloadRequest as the provided BundleWorkloadRequest +func (t *WorkloadRequest) FromBundleWorkloadRequest(v BundleWorkloadRequest) error { + v.Type = "bundle" + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeBundleWorkloadRequest performs a merge with any union data inside the WorkloadRequest, using the provided BundleWorkloadRequest +func (t *WorkloadRequest) MergeBundleWorkloadRequest(v BundleWorkloadRequest) error { + v.Type = "bundle" + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t WorkloadRequest) Discriminator() (string, error) { + var discriminator struct { + Discriminator string `json:"type"` + } + err := json.Unmarshal(t.union, &discriminator) + return discriminator.Discriminator, err +} + +func (t WorkloadRequest) ValueByDiscriminator() (interface{}, error) { + discriminator, err := t.Discriminator() + if err != nil { + return nil, err + } + switch discriminator { + case "bundle": + return t.AsBundleWorkloadRequest() + default: + return nil, errors.New("unknown discriminator value: " + discriminator) + } +} + +func (t WorkloadRequest) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *WorkloadRequest) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} + +// AsBundleWorkloadResponse returns the union data inside the WorkloadResponse as a BundleWorkloadResponse +func (t WorkloadResponse) AsBundleWorkloadResponse() (BundleWorkloadResponse, error) { + var body BundleWorkloadResponse + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromBundleWorkloadResponse overwrites any union data inside the WorkloadResponse as the provided BundleWorkloadResponse +func (t *WorkloadResponse) FromBundleWorkloadResponse(v BundleWorkloadResponse) error { + v.Type = "bundle" + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeBundleWorkloadResponse performs a merge with any union data inside the WorkloadResponse, using the provided BundleWorkloadResponse +func (t *WorkloadResponse) MergeBundleWorkloadResponse(v BundleWorkloadResponse) error { + v.Type = "bundle" + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t WorkloadResponse) Discriminator() (string, error) { + var discriminator struct { + Discriminator string `json:"type"` + } + err := json.Unmarshal(t.union, &discriminator) + return discriminator.Discriminator, err +} + +func (t WorkloadResponse) ValueByDiscriminator() (interface{}, error) { + discriminator, err := t.Discriminator() + if err != nil { + return nil, err + } + switch discriminator { + case "bundle": + return t.AsBundleWorkloadResponse() + default: + return nil, errors.New("unknown discriminator value: " + discriminator) + } +} + +func (t WorkloadResponse) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *WorkloadResponse) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 84b74bf8c..dfa9adaf6 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.3 +// protoc v6.33.1 // source: management.proto package proto @@ -22,6 +22,55 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type JobStatus int32 + +const ( + JobStatus_unknown_status JobStatus = 0 //placeholder + JobStatus_succeeded JobStatus = 1 + JobStatus_failed JobStatus = 2 +) + +// Enum value maps for JobStatus. +var ( + JobStatus_name = map[int32]string{ + 0: "unknown_status", + 1: "succeeded", + 2: "failed", + } + JobStatus_value = map[string]int32{ + "unknown_status": 0, + "succeeded": 1, + "failed": 2, + } +) + +func (x JobStatus) Enum() *JobStatus { + p := new(JobStatus) + *p = x + return p +} + +func (x JobStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JobStatus) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[0].Descriptor() +} + +func (JobStatus) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[0] +} + +func (x JobStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use JobStatus.Descriptor instead. +func (JobStatus) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{0} +} + type RuleProtocol int32 const ( @@ -64,11 +113,11 @@ func (x RuleProtocol) String() string { } func (RuleProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[0].Descriptor() + return file_management_proto_enumTypes[1].Descriptor() } func (RuleProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[0] + return &file_management_proto_enumTypes[1] } func (x RuleProtocol) Number() protoreflect.EnumNumber { @@ -77,7 +126,7 @@ func (x RuleProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use RuleProtocol.Descriptor instead. func (RuleProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{0} + return file_management_proto_rawDescGZIP(), []int{1} } type RuleDirection int32 @@ -110,11 +159,11 @@ func (x RuleDirection) String() string { } func (RuleDirection) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[1].Descriptor() + return file_management_proto_enumTypes[2].Descriptor() } func (RuleDirection) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[1] + return &file_management_proto_enumTypes[2] } func (x RuleDirection) Number() protoreflect.EnumNumber { @@ -123,7 +172,7 @@ func (x RuleDirection) Number() protoreflect.EnumNumber { // Deprecated: Use RuleDirection.Descriptor instead. func (RuleDirection) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{1} + return file_management_proto_rawDescGZIP(), []int{2} } type RuleAction int32 @@ -156,11 +205,11 @@ func (x RuleAction) String() string { } func (RuleAction) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[2].Descriptor() + return file_management_proto_enumTypes[3].Descriptor() } func (RuleAction) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[2] + return &file_management_proto_enumTypes[3] } func (x RuleAction) Number() protoreflect.EnumNumber { @@ -169,7 +218,7 @@ func (x RuleAction) Number() protoreflect.EnumNumber { // Deprecated: Use RuleAction.Descriptor instead. func (RuleAction) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{2} + return file_management_proto_rawDescGZIP(), []int{3} } type HostConfig_Protocol int32 @@ -211,11 +260,11 @@ func (x HostConfig_Protocol) String() string { } func (HostConfig_Protocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[3].Descriptor() + return file_management_proto_enumTypes[4].Descriptor() } func (HostConfig_Protocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[3] + return &file_management_proto_enumTypes[4] } func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { @@ -224,7 +273,7 @@ func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { // Deprecated: Use HostConfig_Protocol.Descriptor instead. func (HostConfig_Protocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{14, 0} + return file_management_proto_rawDescGZIP(), []int{18, 0} } type DeviceAuthorizationFlowProvider int32 @@ -254,11 +303,11 @@ func (x DeviceAuthorizationFlowProvider) String() string { } func (DeviceAuthorizationFlowProvider) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[4].Descriptor() + return file_management_proto_enumTypes[5].Descriptor() } func (DeviceAuthorizationFlowProvider) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[4] + return &file_management_proto_enumTypes[5] } func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { @@ -267,7 +316,7 @@ func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { // Deprecated: Use DeviceAuthorizationFlowProvider.Descriptor instead. func (DeviceAuthorizationFlowProvider) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{27, 0} + return file_management_proto_rawDescGZIP(), []int{31, 0} } type EncryptedMessage struct { @@ -336,6 +385,290 @@ func (x *EncryptedMessage) GetVersion() int32 { return 0 } +type JobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID []byte `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Types that are assignable to WorkloadParameters: + // + // *JobRequest_Bundle + WorkloadParameters isJobRequest_WorkloadParameters `protobuf_oneof:"workload_parameters"` +} + +func (x *JobRequest) Reset() { + *x = JobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JobRequest) ProtoMessage() {} + +func (x *JobRequest) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JobRequest.ProtoReflect.Descriptor instead. +func (*JobRequest) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{1} +} + +func (x *JobRequest) GetID() []byte { + if x != nil { + return x.ID + } + return nil +} + +func (m *JobRequest) GetWorkloadParameters() isJobRequest_WorkloadParameters { + if m != nil { + return m.WorkloadParameters + } + return nil +} + +func (x *JobRequest) GetBundle() *BundleParameters { + if x, ok := x.GetWorkloadParameters().(*JobRequest_Bundle); ok { + return x.Bundle + } + return nil +} + +type isJobRequest_WorkloadParameters interface { + isJobRequest_WorkloadParameters() +} + +type JobRequest_Bundle struct { + Bundle *BundleParameters `protobuf:"bytes,10,opt,name=bundle,proto3,oneof"` //OtherParameters other = 11; +} + +func (*JobRequest_Bundle) isJobRequest_WorkloadParameters() {} + +type JobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID []byte `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Status JobStatus `protobuf:"varint,2,opt,name=status,proto3,enum=management.JobStatus" json:"status,omitempty"` + Reason []byte `protobuf:"bytes,3,opt,name=Reason,proto3" json:"Reason,omitempty"` + // Types that are assignable to WorkloadResults: + // + // *JobResponse_Bundle + WorkloadResults isJobResponse_WorkloadResults `protobuf_oneof:"workload_results"` +} + +func (x *JobResponse) Reset() { + *x = JobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JobResponse) ProtoMessage() {} + +func (x *JobResponse) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JobResponse.ProtoReflect.Descriptor instead. +func (*JobResponse) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{2} +} + +func (x *JobResponse) GetID() []byte { + if x != nil { + return x.ID + } + return nil +} + +func (x *JobResponse) GetStatus() JobStatus { + if x != nil { + return x.Status + } + return JobStatus_unknown_status +} + +func (x *JobResponse) GetReason() []byte { + if x != nil { + return x.Reason + } + return nil +} + +func (m *JobResponse) GetWorkloadResults() isJobResponse_WorkloadResults { + if m != nil { + return m.WorkloadResults + } + return nil +} + +func (x *JobResponse) GetBundle() *BundleResult { + if x, ok := x.GetWorkloadResults().(*JobResponse_Bundle); ok { + return x.Bundle + } + return nil +} + +type isJobResponse_WorkloadResults interface { + isJobResponse_WorkloadResults() +} + +type JobResponse_Bundle struct { + Bundle *BundleResult `protobuf:"bytes,10,opt,name=bundle,proto3,oneof"` //OtherResult other = 11; +} + +func (*JobResponse_Bundle) isJobResponse_WorkloadResults() {} + +type BundleParameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BundleFor bool `protobuf:"varint,1,opt,name=bundle_for,json=bundleFor,proto3" json:"bundle_for,omitempty"` + BundleForTime int64 `protobuf:"varint,2,opt,name=bundle_for_time,json=bundleForTime,proto3" json:"bundle_for_time,omitempty"` + LogFileCount int32 `protobuf:"varint,3,opt,name=log_file_count,json=logFileCount,proto3" json:"log_file_count,omitempty"` + Anonymize bool `protobuf:"varint,4,opt,name=anonymize,proto3" json:"anonymize,omitempty"` +} + +func (x *BundleParameters) Reset() { + *x = BundleParameters{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BundleParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BundleParameters) ProtoMessage() {} + +func (x *BundleParameters) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BundleParameters.ProtoReflect.Descriptor instead. +func (*BundleParameters) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{3} +} + +func (x *BundleParameters) GetBundleFor() bool { + if x != nil { + return x.BundleFor + } + return false +} + +func (x *BundleParameters) GetBundleForTime() int64 { + if x != nil { + return x.BundleForTime + } + return 0 +} + +func (x *BundleParameters) GetLogFileCount() int32 { + if x != nil { + return x.LogFileCount + } + return 0 +} + +func (x *BundleParameters) GetAnonymize() bool { + if x != nil { + return x.Anonymize + } + return false +} + +type BundleResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UploadKey string `protobuf:"bytes,1,opt,name=upload_key,json=uploadKey,proto3" json:"upload_key,omitempty"` +} + +func (x *BundleResult) Reset() { + *x = BundleResult{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BundleResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BundleResult) ProtoMessage() {} + +func (x *BundleResult) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BundleResult.ProtoReflect.Descriptor instead. +func (*BundleResult) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{4} +} + +func (x *BundleResult) GetUploadKey() string { + if x != nil { + return x.UploadKey + } + return "" +} + type SyncRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -348,7 +681,7 @@ type SyncRequest struct { func (x *SyncRequest) Reset() { *x = SyncRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[1] + mi := &file_management_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -361,7 +694,7 @@ func (x *SyncRequest) String() string { func (*SyncRequest) ProtoMessage() {} func (x *SyncRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[1] + mi := &file_management_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -374,7 +707,7 @@ func (x *SyncRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncRequest.ProtoReflect.Descriptor instead. func (*SyncRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{1} + return file_management_proto_rawDescGZIP(), []int{5} } func (x *SyncRequest) GetMeta() *PeerSystemMeta { @@ -407,7 +740,7 @@ type SyncResponse struct { func (x *SyncResponse) Reset() { *x = SyncResponse{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[2] + mi := &file_management_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -420,7 +753,7 @@ func (x *SyncResponse) String() string { func (*SyncResponse) ProtoMessage() {} func (x *SyncResponse) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[2] + mi := &file_management_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -433,7 +766,7 @@ func (x *SyncResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncResponse.ProtoReflect.Descriptor instead. func (*SyncResponse) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{2} + return file_management_proto_rawDescGZIP(), []int{6} } func (x *SyncResponse) GetNetbirdConfig() *NetbirdConfig { @@ -490,7 +823,7 @@ type SyncMetaRequest struct { func (x *SyncMetaRequest) Reset() { *x = SyncMetaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[3] + mi := &file_management_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -503,7 +836,7 @@ func (x *SyncMetaRequest) String() string { func (*SyncMetaRequest) ProtoMessage() {} func (x *SyncMetaRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[3] + mi := &file_management_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -516,7 +849,7 @@ func (x *SyncMetaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncMetaRequest.ProtoReflect.Descriptor instead. func (*SyncMetaRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{3} + return file_management_proto_rawDescGZIP(), []int{7} } func (x *SyncMetaRequest) GetMeta() *PeerSystemMeta { @@ -545,7 +878,7 @@ type LoginRequest struct { func (x *LoginRequest) Reset() { *x = LoginRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[4] + mi := &file_management_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -558,7 +891,7 @@ func (x *LoginRequest) String() string { func (*LoginRequest) ProtoMessage() {} func (x *LoginRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[4] + mi := &file_management_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -571,7 +904,7 @@ func (x *LoginRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. func (*LoginRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{4} + return file_management_proto_rawDescGZIP(), []int{8} } func (x *LoginRequest) GetSetupKey() string { @@ -625,7 +958,7 @@ type PeerKeys struct { func (x *PeerKeys) Reset() { *x = PeerKeys{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[5] + mi := &file_management_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -638,7 +971,7 @@ func (x *PeerKeys) String() string { func (*PeerKeys) ProtoMessage() {} func (x *PeerKeys) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[5] + mi := &file_management_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -651,7 +984,7 @@ func (x *PeerKeys) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerKeys.ProtoReflect.Descriptor instead. func (*PeerKeys) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{5} + return file_management_proto_rawDescGZIP(), []int{9} } func (x *PeerKeys) GetSshPubKey() []byte { @@ -683,7 +1016,7 @@ type Environment struct { func (x *Environment) Reset() { *x = Environment{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[6] + mi := &file_management_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -696,7 +1029,7 @@ func (x *Environment) String() string { func (*Environment) ProtoMessage() {} func (x *Environment) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[6] + mi := &file_management_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -709,7 +1042,7 @@ func (x *Environment) ProtoReflect() protoreflect.Message { // Deprecated: Use Environment.ProtoReflect.Descriptor instead. func (*Environment) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{6} + return file_management_proto_rawDescGZIP(), []int{10} } func (x *Environment) GetCloud() string { @@ -743,7 +1076,7 @@ type File struct { func (x *File) Reset() { *x = File{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[7] + mi := &file_management_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -756,7 +1089,7 @@ func (x *File) String() string { func (*File) ProtoMessage() {} func (x *File) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[7] + mi := &file_management_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -769,7 +1102,7 @@ func (x *File) ProtoReflect() protoreflect.Message { // Deprecated: Use File.ProtoReflect.Descriptor instead. func (*File) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{7} + return file_management_proto_rawDescGZIP(), []int{11} } func (x *File) GetPath() string { @@ -818,7 +1151,7 @@ type Flags struct { func (x *Flags) Reset() { *x = Flags{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[8] + mi := &file_management_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -831,7 +1164,7 @@ func (x *Flags) String() string { func (*Flags) ProtoMessage() {} func (x *Flags) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[8] + mi := &file_management_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -844,7 +1177,7 @@ func (x *Flags) ProtoReflect() protoreflect.Message { // Deprecated: Use Flags.ProtoReflect.Descriptor instead. func (*Flags) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{8} + return file_management_proto_rawDescGZIP(), []int{12} } func (x *Flags) GetRosenpassEnabled() bool { @@ -980,7 +1313,7 @@ type PeerSystemMeta struct { func (x *PeerSystemMeta) Reset() { *x = PeerSystemMeta{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[9] + mi := &file_management_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -993,7 +1326,7 @@ func (x *PeerSystemMeta) String() string { func (*PeerSystemMeta) ProtoMessage() {} func (x *PeerSystemMeta) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[9] + mi := &file_management_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1006,7 +1339,7 @@ func (x *PeerSystemMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerSystemMeta.ProtoReflect.Descriptor instead. func (*PeerSystemMeta) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{9} + return file_management_proto_rawDescGZIP(), []int{13} } func (x *PeerSystemMeta) GetHostname() string { @@ -1144,7 +1477,7 @@ type LoginResponse struct { func (x *LoginResponse) Reset() { *x = LoginResponse{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[10] + mi := &file_management_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1157,7 +1490,7 @@ func (x *LoginResponse) String() string { func (*LoginResponse) ProtoMessage() {} func (x *LoginResponse) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[10] + mi := &file_management_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1170,7 +1503,7 @@ func (x *LoginResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LoginResponse.ProtoReflect.Descriptor instead. func (*LoginResponse) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{10} + return file_management_proto_rawDescGZIP(), []int{14} } func (x *LoginResponse) GetNetbirdConfig() *NetbirdConfig { @@ -1210,7 +1543,7 @@ type ServerKeyResponse struct { func (x *ServerKeyResponse) Reset() { *x = ServerKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[11] + mi := &file_management_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1223,7 +1556,7 @@ func (x *ServerKeyResponse) String() string { func (*ServerKeyResponse) ProtoMessage() {} func (x *ServerKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[11] + mi := &file_management_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1236,7 +1569,7 @@ func (x *ServerKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerKeyResponse.ProtoReflect.Descriptor instead. func (*ServerKeyResponse) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{11} + return file_management_proto_rawDescGZIP(), []int{15} } func (x *ServerKeyResponse) GetKey() string { @@ -1269,7 +1602,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[12] + mi := &file_management_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1282,7 +1615,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[12] + mi := &file_management_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1295,7 +1628,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{12} + return file_management_proto_rawDescGZIP(), []int{16} } // NetbirdConfig is a common configuration of any Netbird peer. It contains STUN, TURN, Signal and Management servers configurations @@ -1317,7 +1650,7 @@ type NetbirdConfig struct { func (x *NetbirdConfig) Reset() { *x = NetbirdConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[13] + mi := &file_management_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1330,7 +1663,7 @@ func (x *NetbirdConfig) String() string { func (*NetbirdConfig) ProtoMessage() {} func (x *NetbirdConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[13] + mi := &file_management_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1343,7 +1676,7 @@ func (x *NetbirdConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use NetbirdConfig.ProtoReflect.Descriptor instead. func (*NetbirdConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{13} + return file_management_proto_rawDescGZIP(), []int{17} } func (x *NetbirdConfig) GetStuns() []*HostConfig { @@ -1395,7 +1728,7 @@ type HostConfig struct { func (x *HostConfig) Reset() { *x = HostConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[14] + mi := &file_management_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1408,7 +1741,7 @@ func (x *HostConfig) String() string { func (*HostConfig) ProtoMessage() {} func (x *HostConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[14] + mi := &file_management_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1421,7 +1754,7 @@ func (x *HostConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use HostConfig.ProtoReflect.Descriptor instead. func (*HostConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{14} + return file_management_proto_rawDescGZIP(), []int{18} } func (x *HostConfig) GetUri() string { @@ -1451,7 +1784,7 @@ type RelayConfig struct { func (x *RelayConfig) Reset() { *x = RelayConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[15] + mi := &file_management_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1464,7 +1797,7 @@ func (x *RelayConfig) String() string { func (*RelayConfig) ProtoMessage() {} func (x *RelayConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[15] + mi := &file_management_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1477,7 +1810,7 @@ func (x *RelayConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RelayConfig.ProtoReflect.Descriptor instead. func (*RelayConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{15} + return file_management_proto_rawDescGZIP(), []int{19} } func (x *RelayConfig) GetUrls() []string { @@ -1522,7 +1855,7 @@ type FlowConfig struct { func (x *FlowConfig) Reset() { *x = FlowConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[16] + mi := &file_management_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1535,7 +1868,7 @@ func (x *FlowConfig) String() string { func (*FlowConfig) ProtoMessage() {} func (x *FlowConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[16] + mi := &file_management_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1548,7 +1881,7 @@ func (x *FlowConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use FlowConfig.ProtoReflect.Descriptor instead. func (*FlowConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{16} + return file_management_proto_rawDescGZIP(), []int{20} } func (x *FlowConfig) GetUrl() string { @@ -1607,24 +1940,26 @@ func (x *FlowConfig) GetDnsCollection() bool { return false } -// JWTConfig represents JWT authentication configuration +// JWTConfig represents JWT authentication configuration for validating tokens. type JWTConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Issuer string `protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty"` + Issuer string `protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty"` + // Deprecated: audience is kept for backwards compatibility only. Use audiences instead in the client code but populate this field. Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"` KeysLocation string `protobuf:"bytes,3,opt,name=keysLocation,proto3" json:"keysLocation,omitempty"` MaxTokenAge int64 `protobuf:"varint,4,opt,name=maxTokenAge,proto3" json:"maxTokenAge,omitempty"` - // audiences + // audiences contains the list of valid audiences for JWT validation. + // Tokens matching any audience in this list are considered valid. Audiences []string `protobuf:"bytes,5,rep,name=audiences,proto3" json:"audiences,omitempty"` } func (x *JWTConfig) Reset() { *x = JWTConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[17] + mi := &file_management_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1637,7 +1972,7 @@ func (x *JWTConfig) String() string { func (*JWTConfig) ProtoMessage() {} func (x *JWTConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[17] + mi := &file_management_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1650,7 +1985,7 @@ func (x *JWTConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use JWTConfig.ProtoReflect.Descriptor instead. func (*JWTConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{17} + return file_management_proto_rawDescGZIP(), []int{21} } func (x *JWTConfig) GetIssuer() string { @@ -1703,7 +2038,7 @@ type ProtectedHostConfig struct { func (x *ProtectedHostConfig) Reset() { *x = ProtectedHostConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[18] + mi := &file_management_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1716,7 +2051,7 @@ func (x *ProtectedHostConfig) String() string { func (*ProtectedHostConfig) ProtoMessage() {} func (x *ProtectedHostConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[18] + mi := &file_management_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1729,7 +2064,7 @@ func (x *ProtectedHostConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtectedHostConfig.ProtoReflect.Descriptor instead. func (*ProtectedHostConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{18} + return file_management_proto_rawDescGZIP(), []int{22} } func (x *ProtectedHostConfig) GetHostConfig() *HostConfig { @@ -1778,7 +2113,7 @@ type PeerConfig struct { func (x *PeerConfig) Reset() { *x = PeerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[19] + mi := &file_management_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1791,7 +2126,7 @@ func (x *PeerConfig) String() string { func (*PeerConfig) ProtoMessage() {} func (x *PeerConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[19] + mi := &file_management_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1804,7 +2139,7 @@ func (x *PeerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerConfig.ProtoReflect.Descriptor instead. func (*PeerConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{19} + return file_management_proto_rawDescGZIP(), []int{23} } func (x *PeerConfig) GetAddress() string { @@ -1877,7 +2212,7 @@ type AutoUpdateSettings struct { func (x *AutoUpdateSettings) Reset() { *x = AutoUpdateSettings{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[20] + mi := &file_management_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1890,7 +2225,7 @@ func (x *AutoUpdateSettings) String() string { func (*AutoUpdateSettings) ProtoMessage() {} func (x *AutoUpdateSettings) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[20] + mi := &file_management_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1903,7 +2238,7 @@ func (x *AutoUpdateSettings) ProtoReflect() protoreflect.Message { // Deprecated: Use AutoUpdateSettings.ProtoReflect.Descriptor instead. func (*AutoUpdateSettings) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{20} + return file_management_proto_rawDescGZIP(), []int{24} } func (x *AutoUpdateSettings) GetVersion() string { @@ -1958,7 +2293,7 @@ type NetworkMap struct { func (x *NetworkMap) Reset() { *x = NetworkMap{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[21] + mi := &file_management_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1971,7 +2306,7 @@ func (x *NetworkMap) String() string { func (*NetworkMap) ProtoMessage() {} func (x *NetworkMap) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[21] + mi := &file_management_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1984,7 +2319,7 @@ func (x *NetworkMap) ProtoReflect() protoreflect.Message { // Deprecated: Use NetworkMap.ProtoReflect.Descriptor instead. func (*NetworkMap) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{21} + return file_management_proto_rawDescGZIP(), []int{25} } func (x *NetworkMap) GetSerial() uint64 { @@ -2094,7 +2429,7 @@ type SSHAuth struct { func (x *SSHAuth) Reset() { *x = SSHAuth{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[22] + mi := &file_management_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2107,7 +2442,7 @@ func (x *SSHAuth) String() string { func (*SSHAuth) ProtoMessage() {} func (x *SSHAuth) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[22] + mi := &file_management_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2120,7 +2455,7 @@ func (x *SSHAuth) ProtoReflect() protoreflect.Message { // Deprecated: Use SSHAuth.ProtoReflect.Descriptor instead. func (*SSHAuth) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{22} + return file_management_proto_rawDescGZIP(), []int{26} } func (x *SSHAuth) GetUserIDClaim() string { @@ -2155,7 +2490,7 @@ type MachineUserIndexes struct { func (x *MachineUserIndexes) Reset() { *x = MachineUserIndexes{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[23] + mi := &file_management_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2168,7 +2503,7 @@ func (x *MachineUserIndexes) String() string { func (*MachineUserIndexes) ProtoMessage() {} func (x *MachineUserIndexes) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[23] + mi := &file_management_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2181,7 +2516,7 @@ func (x *MachineUserIndexes) ProtoReflect() protoreflect.Message { // Deprecated: Use MachineUserIndexes.ProtoReflect.Descriptor instead. func (*MachineUserIndexes) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{23} + return file_management_proto_rawDescGZIP(), []int{27} } func (x *MachineUserIndexes) GetIndexes() []uint32 { @@ -2212,7 +2547,7 @@ type RemotePeerConfig struct { func (x *RemotePeerConfig) Reset() { *x = RemotePeerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[24] + mi := &file_management_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2225,7 +2560,7 @@ func (x *RemotePeerConfig) String() string { func (*RemotePeerConfig) ProtoMessage() {} func (x *RemotePeerConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[24] + mi := &file_management_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2238,7 +2573,7 @@ func (x *RemotePeerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RemotePeerConfig.ProtoReflect.Descriptor instead. func (*RemotePeerConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{24} + return file_management_proto_rawDescGZIP(), []int{28} } func (x *RemotePeerConfig) GetWgPubKey() string { @@ -2293,7 +2628,7 @@ type SSHConfig struct { func (x *SSHConfig) Reset() { *x = SSHConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[25] + mi := &file_management_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2306,7 +2641,7 @@ func (x *SSHConfig) String() string { func (*SSHConfig) ProtoMessage() {} func (x *SSHConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[25] + mi := &file_management_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2319,7 +2654,7 @@ func (x *SSHConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use SSHConfig.ProtoReflect.Descriptor instead. func (*SSHConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{25} + return file_management_proto_rawDescGZIP(), []int{29} } func (x *SSHConfig) GetSshEnabled() bool { @@ -2353,7 +2688,7 @@ type DeviceAuthorizationFlowRequest struct { func (x *DeviceAuthorizationFlowRequest) Reset() { *x = DeviceAuthorizationFlowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[26] + mi := &file_management_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2366,7 +2701,7 @@ func (x *DeviceAuthorizationFlowRequest) String() string { func (*DeviceAuthorizationFlowRequest) ProtoMessage() {} func (x *DeviceAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[26] + mi := &file_management_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2379,7 +2714,7 @@ func (x *DeviceAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeviceAuthorizationFlowRequest.ProtoReflect.Descriptor instead. func (*DeviceAuthorizationFlowRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{26} + return file_management_proto_rawDescGZIP(), []int{30} } // DeviceAuthorizationFlow represents Device Authorization Flow information @@ -2398,7 +2733,7 @@ type DeviceAuthorizationFlow struct { func (x *DeviceAuthorizationFlow) Reset() { *x = DeviceAuthorizationFlow{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[27] + mi := &file_management_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2411,7 +2746,7 @@ func (x *DeviceAuthorizationFlow) String() string { func (*DeviceAuthorizationFlow) ProtoMessage() {} func (x *DeviceAuthorizationFlow) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[27] + mi := &file_management_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2424,7 +2759,7 @@ func (x *DeviceAuthorizationFlow) ProtoReflect() protoreflect.Message { // Deprecated: Use DeviceAuthorizationFlow.ProtoReflect.Descriptor instead. func (*DeviceAuthorizationFlow) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{27} + return file_management_proto_rawDescGZIP(), []int{31} } func (x *DeviceAuthorizationFlow) GetProvider() DeviceAuthorizationFlowProvider { @@ -2451,7 +2786,7 @@ type PKCEAuthorizationFlowRequest struct { func (x *PKCEAuthorizationFlowRequest) Reset() { *x = PKCEAuthorizationFlowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[28] + mi := &file_management_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2464,7 +2799,7 @@ func (x *PKCEAuthorizationFlowRequest) String() string { func (*PKCEAuthorizationFlowRequest) ProtoMessage() {} func (x *PKCEAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[28] + mi := &file_management_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2477,7 +2812,7 @@ func (x *PKCEAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PKCEAuthorizationFlowRequest.ProtoReflect.Descriptor instead. func (*PKCEAuthorizationFlowRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{28} + return file_management_proto_rawDescGZIP(), []int{32} } // PKCEAuthorizationFlow represents Authorization Code Flow information @@ -2494,7 +2829,7 @@ type PKCEAuthorizationFlow struct { func (x *PKCEAuthorizationFlow) Reset() { *x = PKCEAuthorizationFlow{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[29] + mi := &file_management_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2507,7 +2842,7 @@ func (x *PKCEAuthorizationFlow) String() string { func (*PKCEAuthorizationFlow) ProtoMessage() {} func (x *PKCEAuthorizationFlow) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[29] + mi := &file_management_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2520,7 +2855,7 @@ func (x *PKCEAuthorizationFlow) ProtoReflect() protoreflect.Message { // Deprecated: Use PKCEAuthorizationFlow.ProtoReflect.Descriptor instead. func (*PKCEAuthorizationFlow) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{29} + return file_management_proto_rawDescGZIP(), []int{33} } func (x *PKCEAuthorizationFlow) GetProviderConfig() *ProviderConfig { @@ -2566,7 +2901,7 @@ type ProviderConfig struct { func (x *ProviderConfig) Reset() { *x = ProviderConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[30] + mi := &file_management_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2579,7 +2914,7 @@ func (x *ProviderConfig) String() string { func (*ProviderConfig) ProtoMessage() {} func (x *ProviderConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[30] + mi := &file_management_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2592,7 +2927,7 @@ func (x *ProviderConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ProviderConfig.ProtoReflect.Descriptor instead. func (*ProviderConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{30} + return file_management_proto_rawDescGZIP(), []int{34} } func (x *ProviderConfig) GetClientID() string { @@ -2700,7 +3035,7 @@ type Route struct { func (x *Route) Reset() { *x = Route{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[31] + mi := &file_management_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2713,7 +3048,7 @@ func (x *Route) String() string { func (*Route) ProtoMessage() {} func (x *Route) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[31] + mi := &file_management_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2726,7 +3061,7 @@ func (x *Route) ProtoReflect() protoreflect.Message { // Deprecated: Use Route.ProtoReflect.Descriptor instead. func (*Route) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{31} + return file_management_proto_rawDescGZIP(), []int{35} } func (x *Route) GetID() string { @@ -2815,7 +3150,7 @@ type DNSConfig struct { func (x *DNSConfig) Reset() { *x = DNSConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[32] + mi := &file_management_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2828,7 +3163,7 @@ func (x *DNSConfig) String() string { func (*DNSConfig) ProtoMessage() {} func (x *DNSConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[32] + mi := &file_management_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2841,7 +3176,7 @@ func (x *DNSConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use DNSConfig.ProtoReflect.Descriptor instead. func (*DNSConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{32} + return file_management_proto_rawDescGZIP(), []int{36} } func (x *DNSConfig) GetServiceEnable() bool { @@ -2890,7 +3225,7 @@ type CustomZone struct { func (x *CustomZone) Reset() { *x = CustomZone{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[33] + mi := &file_management_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2903,7 +3238,7 @@ func (x *CustomZone) String() string { func (*CustomZone) ProtoMessage() {} func (x *CustomZone) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[33] + mi := &file_management_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2916,7 +3251,7 @@ func (x *CustomZone) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomZone.ProtoReflect.Descriptor instead. func (*CustomZone) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{33} + return file_management_proto_rawDescGZIP(), []int{37} } func (x *CustomZone) GetDomain() string { @@ -2963,7 +3298,7 @@ type SimpleRecord struct { func (x *SimpleRecord) Reset() { *x = SimpleRecord{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[34] + mi := &file_management_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2976,7 +3311,7 @@ func (x *SimpleRecord) String() string { func (*SimpleRecord) ProtoMessage() {} func (x *SimpleRecord) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[34] + mi := &file_management_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2989,7 +3324,7 @@ func (x *SimpleRecord) ProtoReflect() protoreflect.Message { // Deprecated: Use SimpleRecord.ProtoReflect.Descriptor instead. func (*SimpleRecord) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{34} + return file_management_proto_rawDescGZIP(), []int{38} } func (x *SimpleRecord) GetName() string { @@ -3042,7 +3377,7 @@ type NameServerGroup struct { func (x *NameServerGroup) Reset() { *x = NameServerGroup{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[35] + mi := &file_management_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3055,7 +3390,7 @@ func (x *NameServerGroup) String() string { func (*NameServerGroup) ProtoMessage() {} func (x *NameServerGroup) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[35] + mi := &file_management_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3068,7 +3403,7 @@ func (x *NameServerGroup) ProtoReflect() protoreflect.Message { // Deprecated: Use NameServerGroup.ProtoReflect.Descriptor instead. func (*NameServerGroup) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{35} + return file_management_proto_rawDescGZIP(), []int{39} } func (x *NameServerGroup) GetNameServers() []*NameServer { @@ -3113,7 +3448,7 @@ type NameServer struct { func (x *NameServer) Reset() { *x = NameServer{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[36] + mi := &file_management_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3126,7 +3461,7 @@ func (x *NameServer) String() string { func (*NameServer) ProtoMessage() {} func (x *NameServer) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[36] + mi := &file_management_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3139,7 +3474,7 @@ func (x *NameServer) ProtoReflect() protoreflect.Message { // Deprecated: Use NameServer.ProtoReflect.Descriptor instead. func (*NameServer) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{36} + return file_management_proto_rawDescGZIP(), []int{40} } func (x *NameServer) GetIP() string { @@ -3182,7 +3517,7 @@ type FirewallRule struct { func (x *FirewallRule) Reset() { *x = FirewallRule{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[37] + mi := &file_management_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3195,7 +3530,7 @@ func (x *FirewallRule) String() string { func (*FirewallRule) ProtoMessage() {} func (x *FirewallRule) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[37] + mi := &file_management_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3208,7 +3543,7 @@ func (x *FirewallRule) ProtoReflect() protoreflect.Message { // Deprecated: Use FirewallRule.ProtoReflect.Descriptor instead. func (*FirewallRule) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{37} + return file_management_proto_rawDescGZIP(), []int{41} } func (x *FirewallRule) GetPeerIP() string { @@ -3272,7 +3607,7 @@ type NetworkAddress struct { func (x *NetworkAddress) Reset() { *x = NetworkAddress{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[38] + mi := &file_management_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3285,7 +3620,7 @@ func (x *NetworkAddress) String() string { func (*NetworkAddress) ProtoMessage() {} func (x *NetworkAddress) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[38] + mi := &file_management_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3298,7 +3633,7 @@ func (x *NetworkAddress) ProtoReflect() protoreflect.Message { // Deprecated: Use NetworkAddress.ProtoReflect.Descriptor instead. func (*NetworkAddress) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{38} + return file_management_proto_rawDescGZIP(), []int{42} } func (x *NetworkAddress) GetNetIP() string { @@ -3326,7 +3661,7 @@ type Checks struct { func (x *Checks) Reset() { *x = Checks{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[39] + mi := &file_management_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3339,7 +3674,7 @@ func (x *Checks) String() string { func (*Checks) ProtoMessage() {} func (x *Checks) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[39] + mi := &file_management_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3352,7 +3687,7 @@ func (x *Checks) ProtoReflect() protoreflect.Message { // Deprecated: Use Checks.ProtoReflect.Descriptor instead. func (*Checks) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{39} + return file_management_proto_rawDescGZIP(), []int{43} } func (x *Checks) GetFiles() []string { @@ -3377,7 +3712,7 @@ type PortInfo struct { func (x *PortInfo) Reset() { *x = PortInfo{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[40] + mi := &file_management_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3390,7 +3725,7 @@ func (x *PortInfo) String() string { func (*PortInfo) ProtoMessage() {} func (x *PortInfo) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[40] + mi := &file_management_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3403,7 +3738,7 @@ func (x *PortInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use PortInfo.ProtoReflect.Descriptor instead. func (*PortInfo) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{40} + return file_management_proto_rawDescGZIP(), []int{44} } func (m *PortInfo) GetPortSelection() isPortInfo_PortSelection { @@ -3474,7 +3809,7 @@ type RouteFirewallRule struct { func (x *RouteFirewallRule) Reset() { *x = RouteFirewallRule{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[41] + mi := &file_management_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3487,7 +3822,7 @@ func (x *RouteFirewallRule) String() string { func (*RouteFirewallRule) ProtoMessage() {} func (x *RouteFirewallRule) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[41] + mi := &file_management_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3500,7 +3835,7 @@ func (x *RouteFirewallRule) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteFirewallRule.ProtoReflect.Descriptor instead. func (*RouteFirewallRule) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{41} + return file_management_proto_rawDescGZIP(), []int{45} } func (x *RouteFirewallRule) GetSourceRanges() []string { @@ -3591,7 +3926,7 @@ type ForwardingRule struct { func (x *ForwardingRule) Reset() { *x = ForwardingRule{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[42] + mi := &file_management_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3604,7 +3939,7 @@ func (x *ForwardingRule) String() string { func (*ForwardingRule) ProtoMessage() {} func (x *ForwardingRule) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[42] + mi := &file_management_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3617,7 +3952,7 @@ func (x *ForwardingRule) ProtoReflect() protoreflect.Message { // Deprecated: Use ForwardingRule.ProtoReflect.Descriptor instead. func (*ForwardingRule) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{42} + return file_management_proto_rawDescGZIP(), []int{46} } func (x *ForwardingRule) GetProtocol() RuleProtocol { @@ -3660,7 +3995,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[44] + mi := &file_management_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3673,7 +4008,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[44] + mi := &file_management_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3686,7 +4021,7 @@ func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { // Deprecated: Use PortInfo_Range.ProtoReflect.Descriptor instead. func (*PortInfo_Range) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{40, 0} + return file_management_proto_rawDescGZIP(), []int{44, 0} } func (x *PortInfo_Range) GetStart() uint32 { @@ -3717,586 +4052,625 @@ var file_management_proto_rawDesc = []byte{ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3d, 0x0a, - 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, - 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xdb, 0x02, 0x0a, - 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, - 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, - 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, - 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x36, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x4d, 0x61, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, - 0x61, 0x70, 0x52, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x2a, - 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, - 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, - 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, - 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, - 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, - 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, - 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x44, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, - 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, - 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, - 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, - 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, - 0x2a, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, - 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, - 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x6b, 0x0a, + 0x0a, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x49, 0x44, 0x12, 0x36, 0x0a, 0x06, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x06, 0x62, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0xac, 0x01, 0x0a, 0x0b, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x49, 0x44, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x12, 0x32, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x48, 0x00, 0x52, 0x06, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x10, 0x42, 0x75, + 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x26, 0x0a, + 0x0f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x6f, + 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6c, + 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x22, 0x2d, 0x0a, 0x0c, 0x42, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4b, 0x65, 0x79, 0x22, 0x3d, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, + 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xdb, 0x02, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, + 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, + 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, + 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x36, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x52, 0x0a, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, + 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x74, + 0x75, 0x70, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x74, + 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x22, 0x44, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, + 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x77, + 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, + 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, + 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x76, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, - 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, - 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, + 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, + 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, 0x2a, + 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, - 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x44, 0x4e, 0x53, 0x12, 0x28, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, - 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, - 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, - 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, - 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, - 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, - 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, - 0x44, 0x0a, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, + 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x12, 0x28, + 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, + 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, + 0x50, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, + 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x44, 0x0a, 0x1d, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, + 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, - 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x22, 0xf2, 0x04, - 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x67, 0x6f, 0x4f, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, - 0x12, 0x16, 0x0a, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x4f, 0x53, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, - 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, - 0x0a, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, - 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, - 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, - 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, - 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, - 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, - 0x74, 0x12, 0x26, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, - 0x67, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, - 0x67, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, - 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x38, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, 0x01, - 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x2c, 0x0a, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, 0x0a, - 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x74, - 0x75, 0x72, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x03, + 0x12, 0x26, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, + 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x22, 0xf2, 0x04, 0x0a, 0x0e, 0x50, 0x65, 0x65, + 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x12, 0x16, 0x0a, 0x06, 0x6b, + 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x65, 0x72, + 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x4f, 0x53, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, 0x74, + 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x75, + 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x6b, 0x65, 0x72, + 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1c, 0x0a, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, + 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, + 0x26, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, + 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, + 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, + 0x72, 0x12, 0x39, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x05, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0xb4, 0x01, + 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, 0x65, - 0x6c, 0x61, 0x79, 0x12, 0x2a, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, - 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x22, - 0x98, 0x01, 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, - 0x12, 0x3b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, 0x0a, - 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, - 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, - 0x54, 0x54, 0x50, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x03, - 0x12, 0x08, 0x0a, 0x04, 0x44, 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, 0x65, - 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, 0x0a, - 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, 0x6c, - 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, - 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, - 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, - 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6b, - 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, - 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, - 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, - 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, - 0x02, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, - 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, - 0x64, 0x6e, 0x12, 0x48, 0x0a, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, - 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, - 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, - 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x6d, 0x74, 0x75, 0x12, 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x22, 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, - 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, - 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, - 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, - 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, - 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, - 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, - 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, - 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, - 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, - 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, - 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, - 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, - 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, - 0x0a, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, - 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, - 0x0a, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, - 0x75, 0x74, 0x68, 0x22, 0x82, 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, - 0x20, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, - 0x6d, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, - 0x73, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, - 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, - 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, - 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, - 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, - 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, - 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, - 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, - 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, - 0x64, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, - 0x79, 0x12, 0x33, 0x0a, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, 0x77, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x42, + 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, + 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x05, 0x73, 0x74, + 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x05, 0x74, 0x75, 0x72, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x12, + 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, + 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6c, 0x61, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x2a, + 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x98, 0x01, 0x0a, 0x0a, 0x48, + 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x02, + 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x44, + 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2e, + 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x78, 0x69, 0x74, + 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, + 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, + 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6b, 0x65, + 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, + 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x68, + 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x02, 0x0a, 0x0a, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, + 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x48, 0x0a, + 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6d, 0x74, 0x75, 0x12, + 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, + 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, + 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, + 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x4e, + 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, + 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x1a, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0f, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0f, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, + 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x22, 0x82, + 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x55, 0x73, + 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x28, 0x0a, 0x0f, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, + 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, + 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, + 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, + 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, + 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, + 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, + 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, + 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, + 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x22, 0x0a, + 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, + 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, + 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x09, + 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x57, 0x54, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, + 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, + 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, + 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, + 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x4f, 0x53, + 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x16, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0a, - 0x0a, 0x06, 0x48, 0x4f, 0x53, 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x4b, - 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, 0x50, 0x4b, - 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, - 0x0a, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, - 0x0a, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, - 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x55, 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, - 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, - 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, - 0x61, 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, - 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, - 0x72, 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, - 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, + 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, + 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x12, + 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, + 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x44, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, + 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x93, 0x02, + 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x0a, + 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, - 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, - 0x6e, 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, - 0x28, 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x12, 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, - 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, - 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x22, 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, - 0x0a, 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, - 0x0a, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, - 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, - 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, - 0x65, 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, - 0x72, 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, - 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, - 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, - 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, - 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, - 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, - 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, - 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, - 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, - 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, - 0x44, 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, + 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x0b, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x46, 0x6f, + 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, + 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, + 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x07, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, + 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, + 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x4e, + 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x22, + 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, + 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, + 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, + 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x4e, + 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x53, 0x54, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x12, 0x37, + 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, + 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x6f, 0x72, + 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, - 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, - 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, - 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, - 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, - 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x32, 0xcd, 0x04, 0x0a, 0x11, 0x4d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, - 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, - 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, - 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, - 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, + 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x22, + 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, 0x06, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x50, 0x6f, + 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, + 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x6f, + 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x26, + 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x22, 0xf2, 0x01, 0x0a, + 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, + 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, + 0x74, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, + 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, + 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, + 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, + 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, + 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, + 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, + 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, + 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, + 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, + 0x01, 0x32, 0x96, 0x05, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, - 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, - 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, + 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, + 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, + 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, + 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } @@ -4312,142 +4686,152 @@ func file_management_proto_rawDescGZIP() []byte { return file_management_proto_rawDescData } -var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 45) +var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 49) var file_management_proto_goTypes = []interface{}{ - (RuleProtocol)(0), // 0: management.RuleProtocol - (RuleDirection)(0), // 1: management.RuleDirection - (RuleAction)(0), // 2: management.RuleAction - (HostConfig_Protocol)(0), // 3: management.HostConfig.Protocol - (DeviceAuthorizationFlowProvider)(0), // 4: management.DeviceAuthorizationFlow.provider - (*EncryptedMessage)(nil), // 5: management.EncryptedMessage - (*SyncRequest)(nil), // 6: management.SyncRequest - (*SyncResponse)(nil), // 7: management.SyncResponse - (*SyncMetaRequest)(nil), // 8: management.SyncMetaRequest - (*LoginRequest)(nil), // 9: management.LoginRequest - (*PeerKeys)(nil), // 10: management.PeerKeys - (*Environment)(nil), // 11: management.Environment - (*File)(nil), // 12: management.File - (*Flags)(nil), // 13: management.Flags - (*PeerSystemMeta)(nil), // 14: management.PeerSystemMeta - (*LoginResponse)(nil), // 15: management.LoginResponse - (*ServerKeyResponse)(nil), // 16: management.ServerKeyResponse - (*Empty)(nil), // 17: management.Empty - (*NetbirdConfig)(nil), // 18: management.NetbirdConfig - (*HostConfig)(nil), // 19: management.HostConfig - (*RelayConfig)(nil), // 20: management.RelayConfig - (*FlowConfig)(nil), // 21: management.FlowConfig - (*JWTConfig)(nil), // 22: management.JWTConfig - (*ProtectedHostConfig)(nil), // 23: management.ProtectedHostConfig - (*PeerConfig)(nil), // 24: management.PeerConfig - (*AutoUpdateSettings)(nil), // 25: management.AutoUpdateSettings - (*NetworkMap)(nil), // 26: management.NetworkMap - (*SSHAuth)(nil), // 27: management.SSHAuth - (*MachineUserIndexes)(nil), // 28: management.MachineUserIndexes - (*RemotePeerConfig)(nil), // 29: management.RemotePeerConfig - (*SSHConfig)(nil), // 30: management.SSHConfig - (*DeviceAuthorizationFlowRequest)(nil), // 31: management.DeviceAuthorizationFlowRequest - (*DeviceAuthorizationFlow)(nil), // 32: management.DeviceAuthorizationFlow - (*PKCEAuthorizationFlowRequest)(nil), // 33: management.PKCEAuthorizationFlowRequest - (*PKCEAuthorizationFlow)(nil), // 34: management.PKCEAuthorizationFlow - (*ProviderConfig)(nil), // 35: management.ProviderConfig - (*Route)(nil), // 36: management.Route - (*DNSConfig)(nil), // 37: management.DNSConfig - (*CustomZone)(nil), // 38: management.CustomZone - (*SimpleRecord)(nil), // 39: management.SimpleRecord - (*NameServerGroup)(nil), // 40: management.NameServerGroup - (*NameServer)(nil), // 41: management.NameServer - (*FirewallRule)(nil), // 42: management.FirewallRule - (*NetworkAddress)(nil), // 43: management.NetworkAddress - (*Checks)(nil), // 44: management.Checks - (*PortInfo)(nil), // 45: management.PortInfo - (*RouteFirewallRule)(nil), // 46: management.RouteFirewallRule - (*ForwardingRule)(nil), // 47: management.ForwardingRule - nil, // 48: management.SSHAuth.MachineUsersEntry - (*PortInfo_Range)(nil), // 49: management.PortInfo.Range - (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 51: google.protobuf.Duration + (JobStatus)(0), // 0: management.JobStatus + (RuleProtocol)(0), // 1: management.RuleProtocol + (RuleDirection)(0), // 2: management.RuleDirection + (RuleAction)(0), // 3: management.RuleAction + (HostConfig_Protocol)(0), // 4: management.HostConfig.Protocol + (DeviceAuthorizationFlowProvider)(0), // 5: management.DeviceAuthorizationFlow.provider + (*EncryptedMessage)(nil), // 6: management.EncryptedMessage + (*JobRequest)(nil), // 7: management.JobRequest + (*JobResponse)(nil), // 8: management.JobResponse + (*BundleParameters)(nil), // 9: management.BundleParameters + (*BundleResult)(nil), // 10: management.BundleResult + (*SyncRequest)(nil), // 11: management.SyncRequest + (*SyncResponse)(nil), // 12: management.SyncResponse + (*SyncMetaRequest)(nil), // 13: management.SyncMetaRequest + (*LoginRequest)(nil), // 14: management.LoginRequest + (*PeerKeys)(nil), // 15: management.PeerKeys + (*Environment)(nil), // 16: management.Environment + (*File)(nil), // 17: management.File + (*Flags)(nil), // 18: management.Flags + (*PeerSystemMeta)(nil), // 19: management.PeerSystemMeta + (*LoginResponse)(nil), // 20: management.LoginResponse + (*ServerKeyResponse)(nil), // 21: management.ServerKeyResponse + (*Empty)(nil), // 22: management.Empty + (*NetbirdConfig)(nil), // 23: management.NetbirdConfig + (*HostConfig)(nil), // 24: management.HostConfig + (*RelayConfig)(nil), // 25: management.RelayConfig + (*FlowConfig)(nil), // 26: management.FlowConfig + (*JWTConfig)(nil), // 27: management.JWTConfig + (*ProtectedHostConfig)(nil), // 28: management.ProtectedHostConfig + (*PeerConfig)(nil), // 29: management.PeerConfig + (*AutoUpdateSettings)(nil), // 30: management.AutoUpdateSettings + (*NetworkMap)(nil), // 31: management.NetworkMap + (*SSHAuth)(nil), // 32: management.SSHAuth + (*MachineUserIndexes)(nil), // 33: management.MachineUserIndexes + (*RemotePeerConfig)(nil), // 34: management.RemotePeerConfig + (*SSHConfig)(nil), // 35: management.SSHConfig + (*DeviceAuthorizationFlowRequest)(nil), // 36: management.DeviceAuthorizationFlowRequest + (*DeviceAuthorizationFlow)(nil), // 37: management.DeviceAuthorizationFlow + (*PKCEAuthorizationFlowRequest)(nil), // 38: management.PKCEAuthorizationFlowRequest + (*PKCEAuthorizationFlow)(nil), // 39: management.PKCEAuthorizationFlow + (*ProviderConfig)(nil), // 40: management.ProviderConfig + (*Route)(nil), // 41: management.Route + (*DNSConfig)(nil), // 42: management.DNSConfig + (*CustomZone)(nil), // 43: management.CustomZone + (*SimpleRecord)(nil), // 44: management.SimpleRecord + (*NameServerGroup)(nil), // 45: management.NameServerGroup + (*NameServer)(nil), // 46: management.NameServer + (*FirewallRule)(nil), // 47: management.FirewallRule + (*NetworkAddress)(nil), // 48: management.NetworkAddress + (*Checks)(nil), // 49: management.Checks + (*PortInfo)(nil), // 50: management.PortInfo + (*RouteFirewallRule)(nil), // 51: management.RouteFirewallRule + (*ForwardingRule)(nil), // 52: management.ForwardingRule + nil, // 53: management.SSHAuth.MachineUsersEntry + (*PortInfo_Range)(nil), // 54: management.PortInfo.Range + (*timestamppb.Timestamp)(nil), // 55: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 56: google.protobuf.Duration } var file_management_proto_depIdxs = []int32{ - 14, // 0: management.SyncRequest.meta:type_name -> management.PeerSystemMeta - 18, // 1: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig - 24, // 2: management.SyncResponse.peerConfig:type_name -> management.PeerConfig - 29, // 3: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig - 26, // 4: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap - 44, // 5: management.SyncResponse.Checks:type_name -> management.Checks - 14, // 6: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta - 14, // 7: management.LoginRequest.meta:type_name -> management.PeerSystemMeta - 10, // 8: management.LoginRequest.peerKeys:type_name -> management.PeerKeys - 43, // 9: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress - 11, // 10: management.PeerSystemMeta.environment:type_name -> management.Environment - 12, // 11: management.PeerSystemMeta.files:type_name -> management.File - 13, // 12: management.PeerSystemMeta.flags:type_name -> management.Flags - 18, // 13: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig - 24, // 14: management.LoginResponse.peerConfig:type_name -> management.PeerConfig - 44, // 15: management.LoginResponse.Checks:type_name -> management.Checks - 50, // 16: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp - 19, // 17: management.NetbirdConfig.stuns:type_name -> management.HostConfig - 23, // 18: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig - 19, // 19: management.NetbirdConfig.signal:type_name -> management.HostConfig - 20, // 20: management.NetbirdConfig.relay:type_name -> management.RelayConfig - 21, // 21: management.NetbirdConfig.flow:type_name -> management.FlowConfig - 3, // 22: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol - 51, // 23: management.FlowConfig.interval:type_name -> google.protobuf.Duration - 19, // 24: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig - 30, // 25: management.PeerConfig.sshConfig:type_name -> management.SSHConfig - 25, // 26: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings - 24, // 27: management.NetworkMap.peerConfig:type_name -> management.PeerConfig - 29, // 28: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig - 36, // 29: management.NetworkMap.Routes:type_name -> management.Route - 37, // 30: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig - 29, // 31: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig - 42, // 32: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule - 46, // 33: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule - 47, // 34: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule - 27, // 35: management.NetworkMap.sshAuth:type_name -> management.SSHAuth - 48, // 36: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry - 30, // 37: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig - 22, // 38: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig - 4, // 39: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider - 35, // 40: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 35, // 41: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 40, // 42: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup - 38, // 43: management.DNSConfig.CustomZones:type_name -> management.CustomZone - 39, // 44: management.CustomZone.Records:type_name -> management.SimpleRecord - 41, // 45: management.NameServerGroup.NameServers:type_name -> management.NameServer - 1, // 46: management.FirewallRule.Direction:type_name -> management.RuleDirection - 2, // 47: management.FirewallRule.Action:type_name -> management.RuleAction - 0, // 48: management.FirewallRule.Protocol:type_name -> management.RuleProtocol - 45, // 49: management.FirewallRule.PortInfo:type_name -> management.PortInfo - 49, // 50: management.PortInfo.range:type_name -> management.PortInfo.Range - 2, // 51: management.RouteFirewallRule.action:type_name -> management.RuleAction - 0, // 52: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol - 45, // 53: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo - 0, // 54: management.ForwardingRule.protocol:type_name -> management.RuleProtocol - 45, // 55: management.ForwardingRule.destinationPort:type_name -> management.PortInfo - 45, // 56: management.ForwardingRule.translatedPort:type_name -> management.PortInfo - 28, // 57: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes - 5, // 58: management.ManagementService.Login:input_type -> management.EncryptedMessage - 5, // 59: management.ManagementService.Sync:input_type -> management.EncryptedMessage - 17, // 60: management.ManagementService.GetServerKey:input_type -> management.Empty - 17, // 61: management.ManagementService.isHealthy:input_type -> management.Empty - 5, // 62: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage - 5, // 63: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage - 5, // 64: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage - 5, // 65: management.ManagementService.Logout:input_type -> management.EncryptedMessage - 5, // 66: management.ManagementService.Login:output_type -> management.EncryptedMessage - 5, // 67: management.ManagementService.Sync:output_type -> management.EncryptedMessage - 16, // 68: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse - 17, // 69: management.ManagementService.isHealthy:output_type -> management.Empty - 5, // 70: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage - 5, // 71: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage - 17, // 72: management.ManagementService.SyncMeta:output_type -> management.Empty - 17, // 73: management.ManagementService.Logout:output_type -> management.Empty - 66, // [66:74] is the sub-list for method output_type - 58, // [58:66] is the sub-list for method input_type - 58, // [58:58] is the sub-list for extension type_name - 58, // [58:58] is the sub-list for extension extendee - 0, // [0:58] is the sub-list for field type_name + 9, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters + 0, // 1: management.JobResponse.status:type_name -> management.JobStatus + 10, // 2: management.JobResponse.bundle:type_name -> management.BundleResult + 19, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta + 23, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig + 29, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig + 34, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig + 31, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap + 49, // 8: management.SyncResponse.Checks:type_name -> management.Checks + 19, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta + 19, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta + 15, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys + 48, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress + 16, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment + 17, // 14: management.PeerSystemMeta.files:type_name -> management.File + 18, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags + 23, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig + 29, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig + 49, // 18: management.LoginResponse.Checks:type_name -> management.Checks + 55, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp + 24, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig + 28, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig + 24, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig + 25, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig + 26, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig + 4, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol + 56, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration + 24, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig + 35, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig + 30, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings + 29, // 30: management.NetworkMap.peerConfig:type_name -> management.PeerConfig + 34, // 31: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig + 41, // 32: management.NetworkMap.Routes:type_name -> management.Route + 42, // 33: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig + 34, // 34: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig + 47, // 35: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule + 51, // 36: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule + 52, // 37: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule + 32, // 38: management.NetworkMap.sshAuth:type_name -> management.SSHAuth + 53, // 39: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry + 35, // 40: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig + 27, // 41: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig + 5, // 42: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider + 40, // 43: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 40, // 44: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 45, // 45: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup + 43, // 46: management.DNSConfig.CustomZones:type_name -> management.CustomZone + 44, // 47: management.CustomZone.Records:type_name -> management.SimpleRecord + 46, // 48: management.NameServerGroup.NameServers:type_name -> management.NameServer + 2, // 49: management.FirewallRule.Direction:type_name -> management.RuleDirection + 3, // 50: management.FirewallRule.Action:type_name -> management.RuleAction + 1, // 51: management.FirewallRule.Protocol:type_name -> management.RuleProtocol + 50, // 52: management.FirewallRule.PortInfo:type_name -> management.PortInfo + 54, // 53: management.PortInfo.range:type_name -> management.PortInfo.Range + 3, // 54: management.RouteFirewallRule.action:type_name -> management.RuleAction + 1, // 55: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol + 50, // 56: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo + 1, // 57: management.ForwardingRule.protocol:type_name -> management.RuleProtocol + 50, // 58: management.ForwardingRule.destinationPort:type_name -> management.PortInfo + 50, // 59: management.ForwardingRule.translatedPort:type_name -> management.PortInfo + 33, // 60: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes + 6, // 61: management.ManagementService.Login:input_type -> management.EncryptedMessage + 6, // 62: management.ManagementService.Sync:input_type -> management.EncryptedMessage + 22, // 63: management.ManagementService.GetServerKey:input_type -> management.Empty + 22, // 64: management.ManagementService.isHealthy:input_type -> management.Empty + 6, // 65: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage + 6, // 66: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage + 6, // 67: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage + 6, // 68: management.ManagementService.Logout:input_type -> management.EncryptedMessage + 6, // 69: management.ManagementService.Job:input_type -> management.EncryptedMessage + 6, // 70: management.ManagementService.Login:output_type -> management.EncryptedMessage + 6, // 71: management.ManagementService.Sync:output_type -> management.EncryptedMessage + 21, // 72: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse + 22, // 73: management.ManagementService.isHealthy:output_type -> management.Empty + 6, // 74: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage + 6, // 75: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage + 22, // 76: management.ManagementService.SyncMeta:output_type -> management.Empty + 22, // 77: management.ManagementService.Logout:output_type -> management.Empty + 6, // 78: management.ManagementService.Job:output_type -> management.EncryptedMessage + 70, // [70:79] is the sub-list for method output_type + 61, // [61:70] is the sub-list for method input_type + 61, // [61:61] is the sub-list for extension type_name + 61, // [61:61] is the sub-list for extension extendee + 0, // [0:61] is the sub-list for field type_name } func init() { file_management_proto_init() } @@ -4469,7 +4853,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncRequest); i { + switch v := v.(*JobRequest); i { case 0: return &v.state case 1: @@ -4481,7 +4865,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncResponse); i { + switch v := v.(*JobResponse); i { case 0: return &v.state case 1: @@ -4493,7 +4877,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncMetaRequest); i { + switch v := v.(*BundleParameters); i { case 0: return &v.state case 1: @@ -4505,7 +4889,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoginRequest); i { + switch v := v.(*BundleResult); i { case 0: return &v.state case 1: @@ -4517,7 +4901,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerKeys); i { + switch v := v.(*SyncRequest); i { case 0: return &v.state case 1: @@ -4529,7 +4913,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Environment); i { + switch v := v.(*SyncResponse); i { case 0: return &v.state case 1: @@ -4541,7 +4925,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*File); i { + switch v := v.(*SyncMetaRequest); i { case 0: return &v.state case 1: @@ -4553,7 +4937,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Flags); i { + switch v := v.(*LoginRequest); i { case 0: return &v.state case 1: @@ -4565,7 +4949,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerSystemMeta); i { + switch v := v.(*PeerKeys); i { case 0: return &v.state case 1: @@ -4577,7 +4961,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoginResponse); i { + switch v := v.(*Environment); i { case 0: return &v.state case 1: @@ -4589,7 +4973,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerKeyResponse); i { + switch v := v.(*File); i { case 0: return &v.state case 1: @@ -4601,7 +4985,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { + switch v := v.(*Flags); i { case 0: return &v.state case 1: @@ -4613,7 +4997,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetbirdConfig); i { + switch v := v.(*PeerSystemMeta); i { case 0: return &v.state case 1: @@ -4625,7 +5009,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HostConfig); i { + switch v := v.(*LoginResponse); i { case 0: return &v.state case 1: @@ -4637,7 +5021,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RelayConfig); i { + switch v := v.(*ServerKeyResponse); i { case 0: return &v.state case 1: @@ -4649,7 +5033,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlowConfig); i { + switch v := v.(*Empty); i { case 0: return &v.state case 1: @@ -4661,7 +5045,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JWTConfig); i { + switch v := v.(*NetbirdConfig); i { case 0: return &v.state case 1: @@ -4673,7 +5057,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtectedHostConfig); i { + switch v := v.(*HostConfig); i { case 0: return &v.state case 1: @@ -4685,7 +5069,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerConfig); i { + switch v := v.(*RelayConfig); i { case 0: return &v.state case 1: @@ -4697,7 +5081,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AutoUpdateSettings); i { + switch v := v.(*FlowConfig); i { case 0: return &v.state case 1: @@ -4709,7 +5093,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetworkMap); i { + switch v := v.(*JWTConfig); i { case 0: return &v.state case 1: @@ -4721,7 +5105,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHAuth); i { + switch v := v.(*ProtectedHostConfig); i { case 0: return &v.state case 1: @@ -4733,7 +5117,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MachineUserIndexes); i { + switch v := v.(*PeerConfig); i { case 0: return &v.state case 1: @@ -4745,7 +5129,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemotePeerConfig); i { + switch v := v.(*AutoUpdateSettings); i { case 0: return &v.state case 1: @@ -4757,7 +5141,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHConfig); i { + switch v := v.(*NetworkMap); i { case 0: return &v.state case 1: @@ -4769,7 +5153,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeviceAuthorizationFlowRequest); i { + switch v := v.(*SSHAuth); i { case 0: return &v.state case 1: @@ -4781,7 +5165,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeviceAuthorizationFlow); i { + switch v := v.(*MachineUserIndexes); i { case 0: return &v.state case 1: @@ -4793,7 +5177,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PKCEAuthorizationFlowRequest); i { + switch v := v.(*RemotePeerConfig); i { case 0: return &v.state case 1: @@ -4805,7 +5189,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PKCEAuthorizationFlow); i { + switch v := v.(*SSHConfig); i { case 0: return &v.state case 1: @@ -4817,7 +5201,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProviderConfig); i { + switch v := v.(*DeviceAuthorizationFlowRequest); i { case 0: return &v.state case 1: @@ -4829,7 +5213,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Route); i { + switch v := v.(*DeviceAuthorizationFlow); i { case 0: return &v.state case 1: @@ -4841,7 +5225,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DNSConfig); i { + switch v := v.(*PKCEAuthorizationFlowRequest); i { case 0: return &v.state case 1: @@ -4853,7 +5237,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomZone); i { + switch v := v.(*PKCEAuthorizationFlow); i { case 0: return &v.state case 1: @@ -4865,7 +5249,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SimpleRecord); i { + switch v := v.(*ProviderConfig); i { case 0: return &v.state case 1: @@ -4877,7 +5261,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NameServerGroup); i { + switch v := v.(*Route); i { case 0: return &v.state case 1: @@ -4889,7 +5273,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NameServer); i { + switch v := v.(*DNSConfig); i { case 0: return &v.state case 1: @@ -4901,7 +5285,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FirewallRule); i { + switch v := v.(*CustomZone); i { case 0: return &v.state case 1: @@ -4913,7 +5297,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetworkAddress); i { + switch v := v.(*SimpleRecord); i { case 0: return &v.state case 1: @@ -4925,7 +5309,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Checks); i { + switch v := v.(*NameServerGroup); i { case 0: return &v.state case 1: @@ -4937,7 +5321,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PortInfo); i { + switch v := v.(*NameServer); i { case 0: return &v.state case 1: @@ -4949,7 +5333,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RouteFirewallRule); i { + switch v := v.(*FirewallRule); i { case 0: return &v.state case 1: @@ -4961,7 +5345,19 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ForwardingRule); i { + switch v := v.(*NetworkAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Checks); i { case 0: return &v.state case 1: @@ -4973,6 +5369,42 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PortInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteFirewallRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PortInfo_Range); i { case 0: return &v.state @@ -4985,7 +5417,13 @@ func file_management_proto_init() { } } } - file_management_proto_msgTypes[40].OneofWrappers = []interface{}{ + file_management_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*JobRequest_Bundle)(nil), + } + file_management_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*JobResponse_Bundle)(nil), + } + file_management_proto_msgTypes[44].OneofWrappers = []interface{}{ (*PortInfo_Port)(nil), (*PortInfo_Range_)(nil), } @@ -4994,8 +5432,8 @@ func file_management_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_management_proto_rawDesc, - NumEnums: 5, - NumMessages: 45, + NumEnums: 6, + NumMessages: 49, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index e44b49781..d97d66819 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -48,6 +48,9 @@ service ManagementService { // Logout logs out the peer and removes it from the management server rpc Logout(EncryptedMessage) returns (Empty) {} + + // Executes a job on a target peer (e.g., debug bundle) + rpc Job(stream EncryptedMessage) returns (stream EncryptedMessage) {} } message EncryptedMessage { @@ -60,6 +63,42 @@ message EncryptedMessage { int32 version = 3; } +message JobRequest { + bytes ID = 1; + + oneof workload_parameters { + BundleParameters bundle = 10; + //OtherParameters other = 11; + } +} + +enum JobStatus { + unknown_status = 0; //placeholder + succeeded = 1; + failed = 2; +} + +message JobResponse{ + bytes ID = 1; + JobStatus status=2; + bytes Reason=3; + oneof workload_results { + BundleResult bundle = 10; + //OtherResult other = 11; + } +} + +message BundleParameters { + bool bundle_for = 1; + int64 bundle_for_time = 2; + int32 log_file_count = 3; + bool anonymize = 4; +} + +message BundleResult { + string upload_key = 1; +} + message SyncRequest { // Meta data of the peer PeerSystemMeta meta = 1; diff --git a/shared/management/proto/management_grpc.pb.go b/shared/management/proto/management_grpc.pb.go index 5b189334d..b78e21aaa 100644 --- a/shared/management/proto/management_grpc.pb.go +++ b/shared/management/proto/management_grpc.pb.go @@ -50,6 +50,8 @@ type ManagementServiceClient interface { SyncMeta(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) + // Executes a job on a target peer (e.g., debug bundle) + Job(ctx context.Context, opts ...grpc.CallOption) (ManagementService_JobClient, error) } type managementServiceClient struct { @@ -155,6 +157,37 @@ func (c *managementServiceClient) Logout(ctx context.Context, in *EncryptedMessa return out, nil } +func (c *managementServiceClient) Job(ctx context.Context, opts ...grpc.CallOption) (ManagementService_JobClient, error) { + stream, err := c.cc.NewStream(ctx, &ManagementService_ServiceDesc.Streams[1], "/management.ManagementService/Job", opts...) + if err != nil { + return nil, err + } + x := &managementServiceJobClient{stream} + return x, nil +} + +type ManagementService_JobClient interface { + Send(*EncryptedMessage) error + Recv() (*EncryptedMessage, error) + grpc.ClientStream +} + +type managementServiceJobClient struct { + grpc.ClientStream +} + +func (x *managementServiceJobClient) Send(m *EncryptedMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *managementServiceJobClient) Recv() (*EncryptedMessage, error) { + m := new(EncryptedMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // ManagementServiceServer is the server API for ManagementService service. // All implementations must embed UnimplementedManagementServiceServer // for forward compatibility @@ -191,6 +224,8 @@ type ManagementServiceServer interface { SyncMeta(context.Context, *EncryptedMessage) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(context.Context, *EncryptedMessage) (*Empty, error) + // Executes a job on a target peer (e.g., debug bundle) + Job(ManagementService_JobServer) error mustEmbedUnimplementedManagementServiceServer() } @@ -222,6 +257,9 @@ func (UnimplementedManagementServiceServer) SyncMeta(context.Context, *Encrypted func (UnimplementedManagementServiceServer) Logout(context.Context, *EncryptedMessage) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") } +func (UnimplementedManagementServiceServer) Job(ManagementService_JobServer) error { + return status.Errorf(codes.Unimplemented, "method Job not implemented") +} func (UnimplementedManagementServiceServer) mustEmbedUnimplementedManagementServiceServer() {} // UnsafeManagementServiceServer may be embedded to opt out of forward compatibility for this service. @@ -382,6 +420,32 @@ func _ManagementService_Logout_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _ManagementService_Job_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ManagementServiceServer).Job(&managementServiceJobServer{stream}) +} + +type ManagementService_JobServer interface { + Send(*EncryptedMessage) error + Recv() (*EncryptedMessage, error) + grpc.ServerStream +} + +type managementServiceJobServer struct { + grpc.ServerStream +} + +func (x *managementServiceJobServer) Send(m *EncryptedMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *managementServiceJobServer) Recv() (*EncryptedMessage, error) { + m := new(EncryptedMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // ManagementService_ServiceDesc is the grpc.ServiceDesc for ManagementService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -424,6 +488,12 @@ var ManagementService_ServiceDesc = grpc.ServiceDesc{ Handler: _ManagementService_Sync_Handler, ServerStreams: true, }, + { + StreamName: "Job", + Handler: _ManagementService_Job_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "management.proto", } From 50da5074e78bc1aaf2d0c0d0c2f867d9bf6f858e Mon Sep 17 00:00:00 2001 From: Diego Romar Date: Tue, 20 Jan 2026 07:14:33 -0300 Subject: [PATCH 058/374] [client] change notifyDisconnected call (#5138) On handleJobStream, when handling error codes from receiveJobRequest in the switch-case, notifying disconnected in cases where it isn't a disconnection breaks connection status reporting on mobile peers. This commit changes it so it isn't called on Canceled or Unimplemented status codes. --- shared/management/client/grpc.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index e9dbae892..d54c8f870 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -185,10 +185,10 @@ func (c *GrpcClient) handleJobStream( for { jobReq, err := c.receiveJobRequest(ctx, stream, serverPubKey) if err != nil { - c.notifyDisconnected(err) if s, ok := gstatus.FromError(err); ok { switch s.Code() { case codes.PermissionDenied: + c.notifyDisconnected(err) return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer case codes.Canceled: log.Debugf("management connection context has been canceled, this usually indicates shutdown") @@ -198,11 +198,13 @@ func (c *GrpcClient) handleJobStream( "Please update the management service to use this feature.") return nil default: + c.notifyDisconnected(err) log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) return err } } else { // non-gRPC error + c.notifyDisconnected(err) log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) return err } From a0b0b664b6ad777cb5730af0d561cc8915e5c314 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 20 Jan 2026 14:16:42 +0100 Subject: [PATCH 059/374] Local user password change (embedded IdP) (#5132) --- idp/dex/connector.go | 356 ++++++++++++++++++ idp/dex/provider.go | 326 +--------------- management/server/account/manager.go | 1 + management/server/activity/codes.go | 6 +- .../http/handlers/users/users_handler.go | 44 +++ .../http/handlers/users/users_handler_test.go | 115 ++++++ management/server/idp/embedded.go | 37 +- management/server/idp/embedded_test.go | 65 ++++ management/server/mock_server/account_mock.go | 15 +- management/server/user.go | 46 ++- shared/management/http/api/openapi.yml | 51 +++ shared/management/http/api/types.gen.go | 12 + 12 files changed, 754 insertions(+), 320 deletions(-) create mode 100644 idp/dex/connector.go diff --git a/idp/dex/connector.go b/idp/dex/connector.go new file mode 100644 index 000000000..cad682141 --- /dev/null +++ b/idp/dex/connector.go @@ -0,0 +1,356 @@ +// Package dex provides an embedded Dex OIDC identity provider. +package dex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/dexidp/dex/storage" +) + +// ConnectorConfig represents the configuration for an identity provider connector +type ConnectorConfig struct { + // ID is the unique identifier for the connector + ID string + // Name is a human-readable name for the connector + Name string + // Type is the connector type (oidc, google, microsoft) + Type string + // Issuer is the OIDC issuer URL (for OIDC-based connectors) + Issuer string + // ClientID is the OAuth2 client ID + ClientID string + // ClientSecret is the OAuth2 client secret + ClientSecret string + // RedirectURI is the OAuth2 redirect URI + RedirectURI string +} + +// CreateConnector creates a new connector in Dex storage. +// It maps the connector config to the appropriate Dex connector type and configuration. +func (p *Provider) CreateConnector(ctx context.Context, cfg *ConnectorConfig) (*ConnectorConfig, error) { + // Fill in the redirect URI if not provided + if cfg.RedirectURI == "" { + cfg.RedirectURI = p.GetRedirectURI() + } + + storageConn, err := p.buildStorageConnector(cfg) + if err != nil { + return nil, fmt.Errorf("failed to build connector: %w", err) + } + + if err := p.storage.CreateConnector(ctx, storageConn); err != nil { + return nil, fmt.Errorf("failed to create connector: %w", err) + } + + p.logger.Info("connector created", "id", cfg.ID, "type", cfg.Type) + return cfg, nil +} + +// GetConnector retrieves a connector by ID from Dex storage. +func (p *Provider) GetConnector(ctx context.Context, id string) (*ConnectorConfig, error) { + conn, err := p.storage.GetConnector(ctx, id) + if err != nil { + if err == storage.ErrNotFound { + return nil, err + } + return nil, fmt.Errorf("failed to get connector: %w", err) + } + + return p.parseStorageConnector(conn) +} + +// ListConnectors returns all connectors from Dex storage (excluding the local connector). +func (p *Provider) ListConnectors(ctx context.Context) ([]*ConnectorConfig, error) { + connectors, err := p.storage.ListConnectors(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list connectors: %w", err) + } + + result := make([]*ConnectorConfig, 0, len(connectors)) + for _, conn := range connectors { + // Skip the local password connector + if conn.ID == "local" && conn.Type == "local" { + continue + } + + cfg, err := p.parseStorageConnector(conn) + if err != nil { + p.logger.Warn("failed to parse connector", "id", conn.ID, "error", err) + continue + } + result = append(result, cfg) + } + + return result, nil +} + +// UpdateConnector updates an existing connector in Dex storage. +// It merges incoming updates with existing values to prevent data loss on partial updates. +func (p *Provider) UpdateConnector(ctx context.Context, cfg *ConnectorConfig) error { + if err := p.storage.UpdateConnector(ctx, cfg.ID, func(old storage.Connector) (storage.Connector, error) { + oldCfg, err := p.parseStorageConnector(old) + if err != nil { + return storage.Connector{}, fmt.Errorf("failed to parse existing connector: %w", err) + } + + mergeConnectorConfig(cfg, oldCfg) + + storageConn, err := p.buildStorageConnector(cfg) + if err != nil { + return storage.Connector{}, fmt.Errorf("failed to build connector: %w", err) + } + return storageConn, nil + }); err != nil { + return fmt.Errorf("failed to update connector: %w", err) + } + + p.logger.Info("connector updated", "id", cfg.ID, "type", cfg.Type) + return nil +} + +// mergeConnectorConfig preserves existing values for empty fields in the update. +func mergeConnectorConfig(cfg, oldCfg *ConnectorConfig) { + if cfg.ClientSecret == "" { + cfg.ClientSecret = oldCfg.ClientSecret + } + if cfg.RedirectURI == "" { + cfg.RedirectURI = oldCfg.RedirectURI + } + if cfg.Issuer == "" && cfg.Type == oldCfg.Type { + cfg.Issuer = oldCfg.Issuer + } + if cfg.ClientID == "" { + cfg.ClientID = oldCfg.ClientID + } + if cfg.Name == "" { + cfg.Name = oldCfg.Name + } +} + +// DeleteConnector removes a connector from Dex storage. +func (p *Provider) DeleteConnector(ctx context.Context, id string) error { + // Prevent deletion of the local connector + if id == "local" { + return fmt.Errorf("cannot delete the local password connector") + } + + if err := p.storage.DeleteConnector(ctx, id); err != nil { + return fmt.Errorf("failed to delete connector: %w", err) + } + + p.logger.Info("connector deleted", "id", id) + return nil +} + +// GetRedirectURI returns the default redirect URI for connectors. +func (p *Provider) GetRedirectURI() string { + if p.config == nil { + return "" + } + issuer := strings.TrimSuffix(p.config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + return issuer + "/callback" +} + +// buildStorageConnector creates a storage.Connector from ConnectorConfig. +// It handles the type-specific configuration for each connector type. +func (p *Provider) buildStorageConnector(cfg *ConnectorConfig) (storage.Connector, error) { + redirectURI := p.resolveRedirectURI(cfg.RedirectURI) + + var dexType string + var configData []byte + var err error + + switch cfg.Type { + case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": + dexType = "oidc" + configData, err = buildOIDCConnectorConfig(cfg, redirectURI) + case "google": + dexType = "google" + configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) + case "microsoft": + dexType = "microsoft" + configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) + default: + return storage.Connector{}, fmt.Errorf("unsupported connector type: %s", cfg.Type) + } + if err != nil { + return storage.Connector{}, err + } + + return storage.Connector{ID: cfg.ID, Type: dexType, Name: cfg.Name, Config: configData}, nil +} + +// resolveRedirectURI returns the redirect URI, using a default if not provided +func (p *Provider) resolveRedirectURI(redirectURI string) string { + if redirectURI != "" || p.config == nil { + return redirectURI + } + issuer := strings.TrimSuffix(p.config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + return issuer + "/callback" +} + +// buildOIDCConnectorConfig creates config for OIDC-based connectors +func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { + oidcConfig := map[string]interface{}{ + "issuer": cfg.Issuer, + "clientID": cfg.ClientID, + "clientSecret": cfg.ClientSecret, + "redirectURI": redirectURI, + "scopes": []string{"openid", "profile", "email"}, + "insecureEnableGroups": true, + //some providers don't return email verified, so we need to skip it if not present (e.g., Entra, Okta, Duo) + "insecureSkipEmailVerified": true, + } + switch cfg.Type { + case "zitadel": + oidcConfig["getUserInfo"] = true + case "entra": + oidcConfig["claimMapping"] = map[string]string{"email": "preferred_username"} + case "okta": + oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} + case "pocketid": + oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} + } + return encodeConnectorConfig(oidcConfig) +} + +// buildOAuth2ConnectorConfig creates config for OAuth2 connectors (google, microsoft) +func buildOAuth2ConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { + return encodeConnectorConfig(map[string]interface{}{ + "clientID": cfg.ClientID, + "clientSecret": cfg.ClientSecret, + "redirectURI": redirectURI, + }) +} + +// parseStorageConnector converts a storage.Connector back to ConnectorConfig. +// It infers the original identity provider type from the Dex connector type and ID. +func (p *Provider) parseStorageConnector(conn storage.Connector) (*ConnectorConfig, error) { + cfg := &ConnectorConfig{ + ID: conn.ID, + Name: conn.Name, + } + + if len(conn.Config) == 0 { + cfg.Type = conn.Type + return cfg, nil + } + + var configMap map[string]interface{} + if err := decodeConnectorConfig(conn.Config, &configMap); err != nil { + return nil, fmt.Errorf("failed to parse connector config: %w", err) + } + + // Extract common fields + if v, ok := configMap["clientID"].(string); ok { + cfg.ClientID = v + } + if v, ok := configMap["clientSecret"].(string); ok { + cfg.ClientSecret = v + } + if v, ok := configMap["redirectURI"].(string); ok { + cfg.RedirectURI = v + } + if v, ok := configMap["issuer"].(string); ok { + cfg.Issuer = v + } + + // Infer the original identity provider type from Dex connector type and ID + cfg.Type = inferIdentityProviderType(conn.Type, conn.ID, configMap) + + return cfg, nil +} + +// inferIdentityProviderType determines the original identity provider type +// based on the Dex connector type, connector ID, and configuration. +func inferIdentityProviderType(dexType, connectorID string, _ map[string]interface{}) string { + if dexType != "oidc" { + return dexType + } + return inferOIDCProviderType(connectorID) +} + +// inferOIDCProviderType infers the specific OIDC provider from connector ID +func inferOIDCProviderType(connectorID string) string { + connectorIDLower := strings.ToLower(connectorID) + for _, provider := range []string{"pocketid", "zitadel", "entra", "okta", "authentik", "keycloak"} { + if strings.Contains(connectorIDLower, provider) { + return provider + } + } + return "oidc" +} + +// encodeConnectorConfig serializes connector config to JSON bytes. +func encodeConnectorConfig(config map[string]interface{}) ([]byte, error) { + return json.Marshal(config) +} + +// decodeConnectorConfig deserializes connector config from JSON bytes. +func decodeConnectorConfig(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// ensureLocalConnector creates a local (password) connector if it doesn't exist +func ensureLocalConnector(ctx context.Context, stor storage.Storage) error { + // Check specifically for the local connector + _, err := stor.GetConnector(ctx, "local") + if err == nil { + // Local connector already exists + return nil + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to get local connector: %w", err) + } + + // Create a local connector for password authentication + localConnector := storage.Connector{ + ID: "local", + Type: "local", + Name: "Email", + } + + if err := stor.CreateConnector(ctx, localConnector); err != nil { + return fmt.Errorf("failed to create local connector: %w", err) + } + + return nil +} + +// ensureStaticConnectors creates or updates static connectors in storage +func ensureStaticConnectors(ctx context.Context, stor storage.Storage, connectors []Connector) error { + for _, conn := range connectors { + storConn, err := conn.ToStorageConnector() + if err != nil { + return fmt.Errorf("failed to convert connector %s: %w", conn.ID, err) + } + _, err = stor.GetConnector(ctx, conn.ID) + if err == storage.ErrNotFound { + if err := stor.CreateConnector(ctx, storConn); err != nil { + return fmt.Errorf("failed to create connector %s: %w", conn.ID, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to get connector %s: %w", conn.ID, err) + } + if err := stor.UpdateConnector(ctx, conn.ID, func(old storage.Connector) (storage.Connector, error) { + old.Name = storConn.Name + old.Config = storConn.Config + return old, nil + }); err != nil { + return fmt.Errorf("failed to update connector %s: %w", conn.ID, err) + } + } + return nil +} diff --git a/idp/dex/provider.go b/idp/dex/provider.go index 6625d9eaf..6c608dbf5 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -4,7 +4,6 @@ package dex import ( "context" "encoding/base64" - "encoding/json" "errors" "fmt" "log/slog" @@ -245,34 +244,6 @@ func ensureStaticClients(ctx context.Context, stor storage.Storage, clients []st return nil } -// ensureStaticConnectors creates or updates static connectors in storage -func ensureStaticConnectors(ctx context.Context, stor storage.Storage, connectors []Connector) error { - for _, conn := range connectors { - storConn, err := conn.ToStorageConnector() - if err != nil { - return fmt.Errorf("failed to convert connector %s: %w", conn.ID, err) - } - _, err = stor.GetConnector(ctx, conn.ID) - if errors.Is(err, storage.ErrNotFound) { - if err := stor.CreateConnector(ctx, storConn); err != nil { - return fmt.Errorf("failed to create connector %s: %w", conn.ID, err) - } - continue - } - if err != nil { - return fmt.Errorf("failed to get connector %s: %w", conn.ID, err) - } - if err := stor.UpdateConnector(ctx, conn.ID, func(old storage.Connector) (storage.Connector, error) { - old.Name = storConn.Name - old.Config = storConn.Config - return old, nil - }); err != nil { - return fmt.Errorf("failed to update connector %s: %w", conn.ID, err) - } - } - return nil -} - // buildDexConfig creates a server.Config with defaults applied func buildDexConfig(yamlConfig *YAMLConfig, stor storage.Storage, logger *slog.Logger) server.Config { cfg := yamlConfig.ToServerConfig(stor, logger) @@ -613,294 +584,37 @@ func (p *Provider) ListUsers(ctx context.Context) ([]storage.Password, error) { return p.storage.ListPasswords(ctx) } -// ensureLocalConnector creates a local (password) connector if none exists -func ensureLocalConnector(ctx context.Context, stor storage.Storage) error { - connectors, err := stor.ListConnectors(ctx) +// UpdateUserPassword updates the password for a user identified by userID. +// The userID can be either an encoded Dex ID (base64 protobuf) or a raw UUID. +// It verifies the current password before updating. +func (p *Provider) UpdateUserPassword(ctx context.Context, userID string, oldPassword, newPassword string) error { + // Get the user by ID to find their email + user, err := p.GetUserByID(ctx, userID) if err != nil { - return fmt.Errorf("failed to list connectors: %w", err) + return fmt.Errorf("failed to get user: %w", err) } - // If any connector exists, we're good - if len(connectors) > 0 { - return nil + // Verify old password + if err := bcrypt.CompareHashAndPassword(user.Hash, []byte(oldPassword)); err != nil { + return fmt.Errorf("current password is incorrect") } - // Create a local connector for password authentication - localConnector := storage.Connector{ - ID: "local", - Type: "local", - Name: "Email", - } - - if err := stor.CreateConnector(ctx, localConnector); err != nil { - return fmt.Errorf("failed to create local connector: %w", err) - } - - return nil -} - -// ConnectorConfig represents the configuration for an identity provider connector -type ConnectorConfig struct { - // ID is the unique identifier for the connector - ID string - // Name is a human-readable name for the connector - Name string - // Type is the connector type (oidc, google, microsoft) - Type string - // Issuer is the OIDC issuer URL (for OIDC-based connectors) - Issuer string - // ClientID is the OAuth2 client ID - ClientID string - // ClientSecret is the OAuth2 client secret - ClientSecret string - // RedirectURI is the OAuth2 redirect URI - RedirectURI string -} - -// CreateConnector creates a new connector in Dex storage. -// It maps the connector config to the appropriate Dex connector type and configuration. -func (p *Provider) CreateConnector(ctx context.Context, cfg *ConnectorConfig) (*ConnectorConfig, error) { - // Fill in the redirect URI if not provided - if cfg.RedirectURI == "" { - cfg.RedirectURI = p.GetRedirectURI() - } - - storageConn, err := p.buildStorageConnector(cfg) + // Hash the new password + newHash, err := bcrypt.GenerateFromPassword([]byte(newPassword), bcrypt.DefaultCost) if err != nil { - return nil, fmt.Errorf("failed to build connector: %w", err) + return fmt.Errorf("failed to hash new password: %w", err) } - if err := p.storage.CreateConnector(ctx, storageConn); err != nil { - return nil, fmt.Errorf("failed to create connector: %w", err) - } - - p.logger.Info("connector created", "id", cfg.ID, "type", cfg.Type) - return cfg, nil -} - -// GetConnector retrieves a connector by ID from Dex storage. -func (p *Provider) GetConnector(ctx context.Context, id string) (*ConnectorConfig, error) { - conn, err := p.storage.GetConnector(ctx, id) - if err != nil { - if err == storage.ErrNotFound { - return nil, err - } - return nil, fmt.Errorf("failed to get connector: %w", err) - } - - return p.parseStorageConnector(conn) -} - -// ListConnectors returns all connectors from Dex storage (excluding the local connector). -func (p *Provider) ListConnectors(ctx context.Context) ([]*ConnectorConfig, error) { - connectors, err := p.storage.ListConnectors(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list connectors: %w", err) - } - - result := make([]*ConnectorConfig, 0, len(connectors)) - for _, conn := range connectors { - // Skip the local password connector - if conn.ID == "local" && conn.Type == "local" { - continue - } - - cfg, err := p.parseStorageConnector(conn) - if err != nil { - p.logger.Warn("failed to parse connector", "id", conn.ID, "error", err) - continue - } - result = append(result, cfg) - } - - return result, nil -} - -// UpdateConnector updates an existing connector in Dex storage. -func (p *Provider) UpdateConnector(ctx context.Context, cfg *ConnectorConfig) error { - storageConn, err := p.buildStorageConnector(cfg) - if err != nil { - return fmt.Errorf("failed to build connector: %w", err) - } - - if err := p.storage.UpdateConnector(ctx, cfg.ID, func(old storage.Connector) (storage.Connector, error) { - return storageConn, nil - }); err != nil { - return fmt.Errorf("failed to update connector: %w", err) - } - - p.logger.Info("connector updated", "id", cfg.ID, "type", cfg.Type) - return nil -} - -// DeleteConnector removes a connector from Dex storage. -func (p *Provider) DeleteConnector(ctx context.Context, id string) error { - // Prevent deletion of the local connector - if id == "local" { - return fmt.Errorf("cannot delete the local password connector") - } - - if err := p.storage.DeleteConnector(ctx, id); err != nil { - return fmt.Errorf("failed to delete connector: %w", err) - } - - p.logger.Info("connector deleted", "id", id) - return nil -} - -// buildStorageConnector creates a storage.Connector from ConnectorConfig. -// It handles the type-specific configuration for each connector type. -func (p *Provider) buildStorageConnector(cfg *ConnectorConfig) (storage.Connector, error) { - redirectURI := p.resolveRedirectURI(cfg.RedirectURI) - - var dexType string - var configData []byte - var err error - - switch cfg.Type { - case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": - dexType = "oidc" - configData, err = buildOIDCConnectorConfig(cfg, redirectURI) - case "google": - dexType = "google" - configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) - case "microsoft": - dexType = "microsoft" - configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) - default: - return storage.Connector{}, fmt.Errorf("unsupported connector type: %s", cfg.Type) - } - if err != nil { - return storage.Connector{}, err - } - - return storage.Connector{ID: cfg.ID, Type: dexType, Name: cfg.Name, Config: configData}, nil -} - -// resolveRedirectURI returns the redirect URI, using a default if not provided -func (p *Provider) resolveRedirectURI(redirectURI string) string { - if redirectURI != "" || p.config == nil { - return redirectURI - } - issuer := strings.TrimSuffix(p.config.Issuer, "/") - if !strings.HasSuffix(issuer, "/oauth2") { - issuer += "/oauth2" - } - return issuer + "/callback" -} - -// buildOIDCConnectorConfig creates config for OIDC-based connectors -func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { - oidcConfig := map[string]interface{}{ - "issuer": cfg.Issuer, - "clientID": cfg.ClientID, - "clientSecret": cfg.ClientSecret, - "redirectURI": redirectURI, - "scopes": []string{"openid", "profile", "email"}, - "insecureEnableGroups": true, - //some providers don't return email verified, so we need to skip it if not present (e.g., Entra, Okta, Duo) - "insecureSkipEmailVerified": true, - } - switch cfg.Type { - case "zitadel": - oidcConfig["getUserInfo"] = true - case "entra": - oidcConfig["claimMapping"] = map[string]string{"email": "preferred_username"} - case "okta": - oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} - case "pocketid": - oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} - } - return encodeConnectorConfig(oidcConfig) -} - -// buildOAuth2ConnectorConfig creates config for OAuth2 connectors (google, microsoft) -func buildOAuth2ConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { - return encodeConnectorConfig(map[string]interface{}{ - "clientID": cfg.ClientID, - "clientSecret": cfg.ClientSecret, - "redirectURI": redirectURI, + // Update the password in storage + err = p.storage.UpdatePassword(ctx, user.Email, func(old storage.Password) (storage.Password, error) { + old.Hash = newHash + return old, nil }) -} - -// parseStorageConnector converts a storage.Connector back to ConnectorConfig. -// It infers the original identity provider type from the Dex connector type and ID. -func (p *Provider) parseStorageConnector(conn storage.Connector) (*ConnectorConfig, error) { - cfg := &ConnectorConfig{ - ID: conn.ID, - Name: conn.Name, + if err != nil { + return fmt.Errorf("failed to update password: %w", err) } - if len(conn.Config) == 0 { - cfg.Type = conn.Type - return cfg, nil - } - - var configMap map[string]interface{} - if err := decodeConnectorConfig(conn.Config, &configMap); err != nil { - return nil, fmt.Errorf("failed to parse connector config: %w", err) - } - - // Extract common fields - if v, ok := configMap["clientID"].(string); ok { - cfg.ClientID = v - } - if v, ok := configMap["clientSecret"].(string); ok { - cfg.ClientSecret = v - } - if v, ok := configMap["redirectURI"].(string); ok { - cfg.RedirectURI = v - } - if v, ok := configMap["issuer"].(string); ok { - cfg.Issuer = v - } - - // Infer the original identity provider type from Dex connector type and ID - cfg.Type = inferIdentityProviderType(conn.Type, conn.ID, configMap) - - return cfg, nil -} - -// inferIdentityProviderType determines the original identity provider type -// based on the Dex connector type, connector ID, and configuration. -func inferIdentityProviderType(dexType, connectorID string, _ map[string]interface{}) string { - if dexType != "oidc" { - return dexType - } - return inferOIDCProviderType(connectorID) -} - -// inferOIDCProviderType infers the specific OIDC provider from connector ID -func inferOIDCProviderType(connectorID string) string { - connectorIDLower := strings.ToLower(connectorID) - for _, provider := range []string{"pocketid", "zitadel", "entra", "okta", "authentik", "keycloak"} { - if strings.Contains(connectorIDLower, provider) { - return provider - } - } - return "oidc" -} - -// encodeConnectorConfig serializes connector config to JSON bytes. -func encodeConnectorConfig(config map[string]interface{}) ([]byte, error) { - return json.Marshal(config) -} - -// decodeConnectorConfig deserializes connector config from JSON bytes. -func decodeConnectorConfig(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// GetRedirectURI returns the default redirect URI for connectors. -func (p *Provider) GetRedirectURI() string { - if p.config == nil { - return "" - } - issuer := strings.TrimSuffix(p.config.Issuer, "/") - if !strings.HasSuffix(issuer, "/oauth2") { - issuer += "/oauth2" - } - return issuer + "/callback" + return nil } // GetIssuer returns the OIDC issuer URL. diff --git a/management/server/account/manager.go b/management/server/account/manager.go index f925af4ec..11af67358 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -32,6 +32,7 @@ type Manager interface { CreateUser(ctx context.Context, accountID, initiatorUserID string, key *types.UserInfo) (*types.UserInfo, error) DeleteUser(ctx context.Context, accountID, initiatorUserID string, targetUserID string) error DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error + UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error InviteUser(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error ApproveUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) (*types.UserInfo, error) RejectUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) error diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index ae8e46db9..e9eaa644b 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -195,7 +195,9 @@ const ( DNSRecordUpdated Activity = 100 DNSRecordDeleted Activity = 101 - JobCreatedByUser Activity = 102 + JobCreatedByUser Activity = 102 + + UserPasswordChanged Activity = 103 AccountDeleted Activity = 99999 ) @@ -323,6 +325,8 @@ var activityMap = map[Activity]Code{ DNSRecordDeleted: {"DNS zone record deleted", "dns.zone.record.delete"}, JobCreatedByUser: {"Create Job for peer", "peer.job.create"}, + + UserPasswordChanged: {"User password changed", "user.password.change"}, } // StringCode returns a string code of the activity diff --git a/management/server/http/handlers/users/users_handler.go b/management/server/http/handlers/users/users_handler.go index 7669d7404..40ad585d2 100644 --- a/management/server/http/handlers/users/users_handler.go +++ b/management/server/http/handlers/users/users_handler.go @@ -33,6 +33,7 @@ func AddEndpoints(accountManager account.Manager, router *mux.Router) { router.HandleFunc("/users/{userId}/invite", userHandler.inviteUser).Methods("POST", "OPTIONS") router.HandleFunc("/users/{userId}/approve", userHandler.approveUser).Methods("POST", "OPTIONS") router.HandleFunc("/users/{userId}/reject", userHandler.rejectUser).Methods("DELETE", "OPTIONS") + router.HandleFunc("/users/{userId}/password", userHandler.changePassword).Methods("PUT", "OPTIONS") addUsersTokensEndpoint(accountManager, router) } @@ -410,3 +411,46 @@ func (h *handler) rejectUser(w http.ResponseWriter, r *http.Request) { util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) } + +// passwordChangeRequest represents the request body for password change +type passwordChangeRequest struct { + OldPassword string `json:"old_password"` + NewPassword string `json:"new_password"` +} + +// changePassword is a PUT request to change user's password. +// Only available when embedded IDP is enabled. +// Users can only change their own password. +func (h *handler) changePassword(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPut { + util.WriteErrorResponse("wrong HTTP method", http.StatusMethodNotAllowed, w) + return + } + + vars := mux.Vars(r) + targetUserID := vars["userId"] + if len(targetUserID) == 0 { + util.WriteErrorResponse("invalid user ID", http.StatusBadRequest, w) + return + } + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var req passwordChangeRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + err = h.accountManager.UpdateUserPassword(r.Context(), userAuth.AccountId, userAuth.UserId, targetUserID, req.OldPassword, req.NewPassword) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} diff --git a/management/server/http/handlers/users/users_handler_test.go b/management/server/http/handlers/users/users_handler_test.go index 37f0a6c1d..aa77dd843 100644 --- a/management/server/http/handlers/users/users_handler_test.go +++ b/management/server/http/handlers/users/users_handler_test.go @@ -856,3 +856,118 @@ func TestRejectUserEndpoint(t *testing.T) { }) } } + +func TestChangePasswordEndpoint(t *testing.T) { + tt := []struct { + name string + expectedStatus int + requestBody string + targetUserID string + currentUserID string + mockError error + expectMockNotCalled bool + }{ + { + name: "successful password change", + expectedStatus: http.StatusOK, + requestBody: `{"old_password": "OldPass123!", "new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: nil, + }, + { + name: "missing old password", + expectedStatus: http.StatusUnprocessableEntity, + requestBody: `{"new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.InvalidArgument, "old password is required"), + }, + { + name: "missing new password", + expectedStatus: http.StatusUnprocessableEntity, + requestBody: `{"old_password": "OldPass123!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.InvalidArgument, "new password is required"), + }, + { + name: "wrong old password", + expectedStatus: http.StatusUnprocessableEntity, + requestBody: `{"old_password": "WrongPass!", "new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.InvalidArgument, "invalid password"), + }, + { + name: "embedded IDP not enabled", + expectedStatus: http.StatusPreconditionFailed, + requestBody: `{"old_password": "OldPass123!", "new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.PreconditionFailed, "password change is only available with embedded identity provider"), + }, + { + name: "invalid JSON request", + expectedStatus: http.StatusBadRequest, + requestBody: `{invalid json}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + expectMockNotCalled: true, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + mockCalled := false + am := &mock_server.MockAccountManager{} + am.UpdateUserPasswordFunc = func(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error { + mockCalled = true + return tc.mockError + } + + handler := newHandler(am) + router := mux.NewRouter() + router.HandleFunc("/users/{userId}/password", handler.changePassword).Methods("PUT") + + reqPath := "/users/" + tc.targetUserID + "/password" + req, err := http.NewRequest("PUT", reqPath, bytes.NewBufferString(tc.requestBody)) + require.NoError(t, err) + + userAuth := auth.UserAuth{ + AccountId: existingAccountID, + UserId: tc.currentUserID, + } + ctx := nbcontext.SetUserAuthInContext(req.Context(), userAuth) + req = req.WithContext(ctx) + + rr := httptest.NewRecorder() + router.ServeHTTP(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectMockNotCalled { + assert.False(t, mockCalled, "mock should not have been called") + } + }) + } +} + +func TestChangePasswordEndpoint_WrongMethod(t *testing.T) { + am := &mock_server.MockAccountManager{} + handler := newHandler(am) + + req, err := http.NewRequest("POST", "/users/test-user/password", bytes.NewBufferString(`{}`)) + require.NoError(t, err) + + userAuth := auth.UserAuth{ + AccountId: existingAccountID, + UserId: existingUserID, + } + req = nbcontext.SetUserAuthInRequest(req, userAuth) + + rr := httptest.NewRecorder() + handler.changePassword(rr, req) + + assert.Equal(t, http.StatusMethodNotAllowed, rr.Code) +} diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 0e46b506e..79859525b 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -400,7 +400,6 @@ func (m *EmbeddedIdPManager) CreateUserWithPassword(ctx context.Context, email, // InviteUserByID resends an invitation to a user. func (m *EmbeddedIdPManager) InviteUserByID(ctx context.Context, userID string) error { - // TODO: implement return fmt.Errorf("not implemented") } @@ -432,6 +431,33 @@ func (m *EmbeddedIdPManager) DeleteUser(ctx context.Context, userID string) erro return nil } +// UpdateUserPassword updates the password for a user in the embedded IdP. +// It verifies that the current user is changing their own password and +// validates the current password before updating to the new password. +func (m *EmbeddedIdPManager) UpdateUserPassword(ctx context.Context, currentUserID, targetUserID string, oldPassword, newPassword string) error { + // Verify the user is changing their own password + if currentUserID != targetUserID { + return fmt.Errorf("users can only change their own password") + } + + // Verify the new password is different from the old password + if oldPassword == newPassword { + return fmt.Errorf("new password must be different from current password") + } + + err := m.provider.UpdateUserPassword(ctx, targetUserID, oldPassword, newPassword) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return err + } + + log.WithContext(ctx).Debugf("updated password for user %s in embedded IdP", targetUserID) + + return nil +} + // CreateConnector creates a new identity provider connector in Dex. // Returns the created connector config with the redirect URL populated. func (m *EmbeddedIdPManager) CreateConnector(ctx context.Context, cfg *dex.ConnectorConfig) (*dex.ConnectorConfig, error) { @@ -449,15 +475,8 @@ func (m *EmbeddedIdPManager) ListConnectors(ctx context.Context) ([]*dex.Connect } // UpdateConnector updates an existing identity provider connector. +// Field preservation for partial updates is handled by Provider.UpdateConnector. func (m *EmbeddedIdPManager) UpdateConnector(ctx context.Context, cfg *dex.ConnectorConfig) error { - // Preserve existing secret if not provided in update - if cfg.ClientSecret == "" { - existing, err := m.provider.GetConnector(ctx, cfg.ID) - if err != nil { - return fmt.Errorf("failed to get existing connector: %w", err) - } - cfg.ClientSecret = existing.ClientSecret - } return m.provider.UpdateConnector(ctx, cfg) } diff --git a/management/server/idp/embedded_test.go b/management/server/idp/embedded_test.go index 04e3f0699..d8d3009dd 100644 --- a/management/server/idp/embedded_test.go +++ b/management/server/idp/embedded_test.go @@ -248,6 +248,71 @@ func TestEmbeddedIdPManager_UserIDFormat_MatchesJWT(t *testing.T) { t.Logf(" Connector: %s", connectorID) } +func TestEmbeddedIdPManager_UpdateUserPassword(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Create a user with a known password + email := "password-test@example.com" + name := "Password Test User" + initialPassword := "InitialPass123!" + + userData, err := manager.CreateUserWithPassword(ctx, email, initialPassword, name) + require.NoError(t, err) + require.NotNil(t, userData) + + userID := userData.ID + + t.Run("successful password change", func(t *testing.T) { + newPassword := "NewSecurePass456!" + err := manager.UpdateUserPassword(ctx, userID, userID, initialPassword, newPassword) + require.NoError(t, err) + + // Verify the new password works by changing it again + anotherPassword := "AnotherPass789!" + err = manager.UpdateUserPassword(ctx, userID, userID, newPassword, anotherPassword) + require.NoError(t, err) + }) + + t.Run("wrong old password", func(t *testing.T) { + err := manager.UpdateUserPassword(ctx, userID, userID, "wrongpassword", "NewPass123!") + require.Error(t, err) + assert.Contains(t, err.Error(), "current password is incorrect") + }) + + t.Run("cannot change other user password", func(t *testing.T) { + otherUserID := "other-user-id" + err := manager.UpdateUserPassword(ctx, userID, otherUserID, "oldpass", "newpass") + require.Error(t, err) + assert.Contains(t, err.Error(), "users can only change their own password") + }) + + t.Run("same password rejected", func(t *testing.T) { + samePassword := "SamePass123!" + err := manager.UpdateUserPassword(ctx, userID, userID, samePassword, samePassword) + require.Error(t, err) + assert.Contains(t, err.Error(), "new password must be different") + }) +} + func TestEmbeddedIdPManager_GetLocalKeysLocation(t *testing.T) { ctx := context.Background() diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index f5caa3bbc..75e971498 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -74,6 +74,7 @@ type MockAccountManager struct { SaveOrAddUsersFunc func(ctx context.Context, accountID, initiatorUserID string, update []*types.User, addIfNotExists bool) ([]*types.UserInfo, error) DeleteUserFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error DeleteRegularUsersFunc func(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error + UpdateUserPasswordFunc func(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error CreatePATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) DeletePATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenID string) error GetPATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenID string) (*types.PersonalAccessToken, error) @@ -135,9 +136,9 @@ type MockAccountManager struct { CreateIdentityProviderFunc func(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) UpdateIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) DeleteIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string) error - CreatePeerJobFunc func(ctx context.Context, accountID, peerID, userID string, job *types.Job) error - GetAllPeerJobsFunc func(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) - GetPeerJobByIDFunc func(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) + CreatePeerJobFunc func(ctx context.Context, accountID, peerID, userID string, job *types.Job) error + GetAllPeerJobsFunc func(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) + GetPeerJobByIDFunc func(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) } func (am *MockAccountManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { @@ -635,6 +636,14 @@ func (am *MockAccountManager) DeleteRegularUsers(ctx context.Context, accountID, return status.Errorf(codes.Unimplemented, "method DeleteRegularUsers is not implemented") } +// UpdateUserPassword mocks UpdateUserPassword of the AccountManager interface +func (am *MockAccountManager) UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error { + if am.UpdateUserPasswordFunc != nil { + return am.UpdateUserPasswordFunc(ctx, accountID, currentUserID, targetUserID, oldPassword, newPassword) + } + return status.Errorf(codes.Unimplemented, "method UpdateUserPassword is not implemented") +} + func (am *MockAccountManager) InviteUser(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error { if am.InviteUserFunc != nil { return am.InviteUserFunc(ctx, accountID, initiatorUserID, targetUserID) diff --git a/management/server/user.go b/management/server/user.go index d12dd4f11..1f38b749f 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -249,6 +249,37 @@ func (am *DefaultAccountManager) ListUsers(ctx context.Context, accountID string return am.Store.GetAccountUsers(ctx, store.LockingStrengthNone, accountID) } +// UpdateUserPassword updates the password for a user in the embedded IdP. +// This is only available when the embedded IdP is enabled. +// Users can only change their own password. +func (am *DefaultAccountManager) UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error { + if !IsEmbeddedIdp(am.idpManager) { + return status.Errorf(status.PreconditionFailed, "password change is only available with embedded identity provider") + } + + if oldPassword == "" { + return status.Errorf(status.InvalidArgument, "old password is required") + } + + if newPassword == "" { + return status.Errorf(status.InvalidArgument, "new password is required") + } + + embeddedIdp, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return status.Errorf(status.Internal, "failed to get embedded IdP manager") + } + + err := embeddedIdp.UpdateUserPassword(ctx, currentUserID, targetUserID, oldPassword, newPassword) + if err != nil { + return status.Errorf(status.InvalidArgument, "failed to update password: %v", err) + } + + am.StoreEvent(ctx, currentUserID, targetUserID, accountID, activity.UserPasswordChanged, nil) + + return nil +} + func (am *DefaultAccountManager) deleteServiceUser(ctx context.Context, accountID string, initiatorUserID string, targetUser *types.User) error { if err := am.Store.DeleteUser(ctx, accountID, targetUser.Id); err != nil { return err @@ -806,7 +837,20 @@ func (am *DefaultAccountManager) getUserInfo(ctx context.Context, user *types.Us } return user.ToUserInfo(userData) } - return user.ToUserInfo(nil) + + userInfo, err := user.ToUserInfo(nil) + if err != nil { + return nil, err + } + + // For embedded IDP users, extract the IdPID (connector ID) from the encoded user ID + if IsEmbeddedIdp(am.idpManager) && !user.IsServiceUser { + if _, connectorID, decodeErr := dex.DecodeDexUserID(user.Id); decodeErr == nil && connectorID != "" { + userInfo.IdPID = connectorID + } + } + + return userInfo, nil } // validateUserUpdate validates the update operation for a user. diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 29e81f15a..cc3fa10d8 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -44,6 +44,20 @@ tags: components: schemas: + PasswordChangeRequest: + type: object + properties: + old_password: + description: The current password + type: string + example: "currentPassword123" + new_password: + description: The new password to set + type: string + example: "newSecurePassword456" + required: + - old_password + - new_password WorkloadType: type: string description: | @@ -3205,6 +3219,43 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/users/{userId}/password: + put: + summary: Change user password + description: Change the password for a user. Only available when embedded IdP is enabled. Users can only change their own password. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: userId + required: true + schema: + type: string + description: The unique identifier of a user + requestBody: + description: Password change request + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PasswordChangeRequest' + responses: + '200': + description: Password changed successfully + content: {} + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '500': + "$ref": "#/components/responses/internal_error" /api/users/current: get: summary: Retrieve current user diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 7a845b62f..17af8b06d 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1201,6 +1201,15 @@ type OSVersionCheck struct { Windows *MinKernelVersionCheck `json:"windows,omitempty"` } +// PasswordChangeRequest defines model for PasswordChangeRequest. +type PasswordChangeRequest struct { + // NewPassword The new password to set + NewPassword string `json:"new_password"` + + // OldPassword The current password + OldPassword string `json:"old_password"` +} + // Peer defines model for Peer. type Peer struct { // ApprovalRequired (Cloud only) Indicates whether peer needs approval @@ -2354,6 +2363,9 @@ type PostApiUsersJSONRequestBody = UserCreateRequest // PutApiUsersUserIdJSONRequestBody defines body for PutApiUsersUserId for application/json ContentType. type PutApiUsersUserIdJSONRequestBody = UserRequest +// PutApiUsersUserIdPasswordJSONRequestBody defines body for PutApiUsersUserIdPassword for application/json ContentType. +type PutApiUsersUserIdPasswordJSONRequestBody = PasswordChangeRequest + // PostApiUsersUserIdTokensJSONRequestBody defines body for PostApiUsersUserIdTokens for application/json ContentType. type PostApiUsersUserIdTokensJSONRequestBody = PersonalAccessTokenRequest From 4888021ba6e0ef308779a7a99b84f315c2db84dc Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 20 Jan 2026 15:12:22 +0100 Subject: [PATCH 060/374] Add missing activity events to the API response (#5140) --- shared/management/http/api/openapi.yml | 57 +++++++-- shared/management/http/api/types.gen.go | 152 ++++++++++++++++-------- 2 files changed, 150 insertions(+), 59 deletions(-) diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index cc3fa10d8..f1ff98b16 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -2028,18 +2028,51 @@ components: activity_code: description: The string code of the activity that occurred during the event type: string - enum: [ "user.peer.delete", "user.join", "user.invite", "user.peer.add", "user.group.add", "user.group.delete", - "user.role.update", "user.block", "user.unblock", "user.peer.login", - "setupkey.peer.add", "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", - "setupkey.group.delete", "setupkey.group.add", - "rule.add", "rule.delete", "rule.update", - "policy.add", "policy.delete", "policy.update", - "group.add", "group.update", "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", - "account.create", "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.disable", "account.setting.peer.login.expiration.enable", - "route.add", "route.delete", "route.update", - "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", - "peer.ssh.disable", "peer.ssh.enable", "peer.rename", "peer.login.expiration.disable", "peer.login.expiration.enable", "peer.login.expire", - "service.user.create", "personal.access.token.create", "service.user.delete", "personal.access.token.delete" ] + enum: [ + "peer.user.add", "peer.setupkey.add", "user.join", "user.invite", "account.create", "account.delete", + "user.peer.delete", "rule.add", "rule.update", "rule.delete", + "policy.add", "policy.update", "policy.delete", + "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", "setupkey.delete", + "group.add", "group.update", "group.delete", + "peer.group.add", "peer.group.delete", + "user.group.add", "user.group.delete", "user.role.update", + "setupkey.group.add", "setupkey.group.delete", + "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", + "route.add", "route.delete", "route.update", + "peer.ssh.enable", "peer.ssh.disable", "peer.rename", + "peer.login.expiration.enable", "peer.login.expiration.disable", + "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", + "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.enable", "account.setting.peer.login.expiration.disable", + "personal.access.token.create", "personal.access.token.delete", + "service.user.create", "service.user.delete", + "user.block", "user.unblock", "user.delete", + "user.peer.login", "peer.login.expire", + "dashboard.login", + "integration.create", "integration.update", "integration.delete", + "account.setting.peer.approval.enable", "account.setting.peer.approval.disable", + "peer.approve", "peer.approval.revoke", + "transferred.owner.role", + "posture.check.create", "posture.check.update", "posture.check.delete", + "peer.inactivity.expiration.enable", "peer.inactivity.expiration.disable", + "account.peer.inactivity.expiration.enable", "account.peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.update", + "account.setting.group.propagation.enable", "account.setting.group.propagation.disable", + "account.setting.routing.peer.dns.resolution.enable", "account.setting.routing.peer.dns.resolution.disable", + "network.create", "network.update", "network.delete", + "network.resource.create", "network.resource.update", "network.resource.delete", + "network.router.create", "network.router.update", "network.router.delete", + "resource.group.add", "resource.group.delete", + "account.dns.domain.update", + "account.setting.lazy.connection.enable", "account.setting.lazy.connection.disable", + "account.network.range.update", + "peer.ip.update", + "user.approve", "user.reject", "user.create", + "account.settings.auto.version.update", + "identityprovider.create", "identityprovider.update", "identityprovider.delete", + "dns.zone.create", "dns.zone.update", "dns.zone.delete", + "dns.zone.record.create", "dns.zone.record.update", "dns.zone.record.delete", + "peer.job.create", + "user.password.change" + ] example: route.add initiator_id: description: The ID of the initiator of the event. E.g., an ID of a user that triggered the event. diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 17af8b06d..848023689 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -25,53 +25,111 @@ const ( // Defines values for EventActivityCode. const ( - EventActivityCodeAccountCreate EventActivityCode = "account.create" - EventActivityCodeAccountSettingPeerLoginExpirationDisable EventActivityCode = "account.setting.peer.login.expiration.disable" - EventActivityCodeAccountSettingPeerLoginExpirationEnable EventActivityCode = "account.setting.peer.login.expiration.enable" - EventActivityCodeAccountSettingPeerLoginExpirationUpdate EventActivityCode = "account.setting.peer.login.expiration.update" - EventActivityCodeDnsSettingDisabledManagementGroupAdd EventActivityCode = "dns.setting.disabled.management.group.add" - EventActivityCodeDnsSettingDisabledManagementGroupDelete EventActivityCode = "dns.setting.disabled.management.group.delete" - EventActivityCodeGroupAdd EventActivityCode = "group.add" - EventActivityCodeGroupUpdate EventActivityCode = "group.update" - EventActivityCodeNameserverGroupAdd EventActivityCode = "nameserver.group.add" - EventActivityCodeNameserverGroupDelete EventActivityCode = "nameserver.group.delete" - EventActivityCodeNameserverGroupUpdate EventActivityCode = "nameserver.group.update" - EventActivityCodePeerLoginExpirationDisable EventActivityCode = "peer.login.expiration.disable" - EventActivityCodePeerLoginExpirationEnable EventActivityCode = "peer.login.expiration.enable" - EventActivityCodePeerLoginExpire EventActivityCode = "peer.login.expire" - EventActivityCodePeerRename EventActivityCode = "peer.rename" - EventActivityCodePeerSshDisable EventActivityCode = "peer.ssh.disable" - EventActivityCodePeerSshEnable EventActivityCode = "peer.ssh.enable" - EventActivityCodePersonalAccessTokenCreate EventActivityCode = "personal.access.token.create" - EventActivityCodePersonalAccessTokenDelete EventActivityCode = "personal.access.token.delete" - EventActivityCodePolicyAdd EventActivityCode = "policy.add" - EventActivityCodePolicyDelete EventActivityCode = "policy.delete" - EventActivityCodePolicyUpdate EventActivityCode = "policy.update" - EventActivityCodeRouteAdd EventActivityCode = "route.add" - EventActivityCodeRouteDelete EventActivityCode = "route.delete" - EventActivityCodeRouteUpdate EventActivityCode = "route.update" - EventActivityCodeRuleAdd EventActivityCode = "rule.add" - EventActivityCodeRuleDelete EventActivityCode = "rule.delete" - EventActivityCodeRuleUpdate EventActivityCode = "rule.update" - EventActivityCodeServiceUserCreate EventActivityCode = "service.user.create" - EventActivityCodeServiceUserDelete EventActivityCode = "service.user.delete" - EventActivityCodeSetupkeyAdd EventActivityCode = "setupkey.add" - EventActivityCodeSetupkeyGroupAdd EventActivityCode = "setupkey.group.add" - EventActivityCodeSetupkeyGroupDelete EventActivityCode = "setupkey.group.delete" - EventActivityCodeSetupkeyOveruse EventActivityCode = "setupkey.overuse" - EventActivityCodeSetupkeyPeerAdd EventActivityCode = "setupkey.peer.add" - EventActivityCodeSetupkeyRevoke EventActivityCode = "setupkey.revoke" - EventActivityCodeSetupkeyUpdate EventActivityCode = "setupkey.update" - EventActivityCodeUserBlock EventActivityCode = "user.block" - EventActivityCodeUserGroupAdd EventActivityCode = "user.group.add" - EventActivityCodeUserGroupDelete EventActivityCode = "user.group.delete" - EventActivityCodeUserInvite EventActivityCode = "user.invite" - EventActivityCodeUserJoin EventActivityCode = "user.join" - EventActivityCodeUserPeerAdd EventActivityCode = "user.peer.add" - EventActivityCodeUserPeerDelete EventActivityCode = "user.peer.delete" - EventActivityCodeUserPeerLogin EventActivityCode = "user.peer.login" - EventActivityCodeUserRoleUpdate EventActivityCode = "user.role.update" - EventActivityCodeUserUnblock EventActivityCode = "user.unblock" + EventActivityCodeAccountCreate EventActivityCode = "account.create" + EventActivityCodeAccountDelete EventActivityCode = "account.delete" + EventActivityCodeAccountDnsDomainUpdate EventActivityCode = "account.dns.domain.update" + EventActivityCodeAccountNetworkRangeUpdate EventActivityCode = "account.network.range.update" + EventActivityCodeAccountPeerInactivityExpirationDisable EventActivityCode = "account.peer.inactivity.expiration.disable" + EventActivityCodeAccountPeerInactivityExpirationEnable EventActivityCode = "account.peer.inactivity.expiration.enable" + EventActivityCodeAccountPeerInactivityExpirationUpdate EventActivityCode = "account.peer.inactivity.expiration.update" + EventActivityCodeAccountSettingGroupPropagationDisable EventActivityCode = "account.setting.group.propagation.disable" + EventActivityCodeAccountSettingGroupPropagationEnable EventActivityCode = "account.setting.group.propagation.enable" + EventActivityCodeAccountSettingLazyConnectionDisable EventActivityCode = "account.setting.lazy.connection.disable" + EventActivityCodeAccountSettingLazyConnectionEnable EventActivityCode = "account.setting.lazy.connection.enable" + EventActivityCodeAccountSettingPeerApprovalDisable EventActivityCode = "account.setting.peer.approval.disable" + EventActivityCodeAccountSettingPeerApprovalEnable EventActivityCode = "account.setting.peer.approval.enable" + EventActivityCodeAccountSettingPeerLoginExpirationDisable EventActivityCode = "account.setting.peer.login.expiration.disable" + EventActivityCodeAccountSettingPeerLoginExpirationEnable EventActivityCode = "account.setting.peer.login.expiration.enable" + EventActivityCodeAccountSettingPeerLoginExpirationUpdate EventActivityCode = "account.setting.peer.login.expiration.update" + EventActivityCodeAccountSettingRoutingPeerDnsResolutionDisable EventActivityCode = "account.setting.routing.peer.dns.resolution.disable" + EventActivityCodeAccountSettingRoutingPeerDnsResolutionEnable EventActivityCode = "account.setting.routing.peer.dns.resolution.enable" + EventActivityCodeAccountSettingsAutoVersionUpdate EventActivityCode = "account.settings.auto.version.update" + EventActivityCodeDashboardLogin EventActivityCode = "dashboard.login" + EventActivityCodeDnsSettingDisabledManagementGroupAdd EventActivityCode = "dns.setting.disabled.management.group.add" + EventActivityCodeDnsSettingDisabledManagementGroupDelete EventActivityCode = "dns.setting.disabled.management.group.delete" + EventActivityCodeDnsZoneCreate EventActivityCode = "dns.zone.create" + EventActivityCodeDnsZoneDelete EventActivityCode = "dns.zone.delete" + EventActivityCodeDnsZoneRecordCreate EventActivityCode = "dns.zone.record.create" + EventActivityCodeDnsZoneRecordDelete EventActivityCode = "dns.zone.record.delete" + EventActivityCodeDnsZoneRecordUpdate EventActivityCode = "dns.zone.record.update" + EventActivityCodeDnsZoneUpdate EventActivityCode = "dns.zone.update" + EventActivityCodeGroupAdd EventActivityCode = "group.add" + EventActivityCodeGroupDelete EventActivityCode = "group.delete" + EventActivityCodeGroupUpdate EventActivityCode = "group.update" + EventActivityCodeIdentityproviderCreate EventActivityCode = "identityprovider.create" + EventActivityCodeIdentityproviderDelete EventActivityCode = "identityprovider.delete" + EventActivityCodeIdentityproviderUpdate EventActivityCode = "identityprovider.update" + EventActivityCodeIntegrationCreate EventActivityCode = "integration.create" + EventActivityCodeIntegrationDelete EventActivityCode = "integration.delete" + EventActivityCodeIntegrationUpdate EventActivityCode = "integration.update" + EventActivityCodeNameserverGroupAdd EventActivityCode = "nameserver.group.add" + EventActivityCodeNameserverGroupDelete EventActivityCode = "nameserver.group.delete" + EventActivityCodeNameserverGroupUpdate EventActivityCode = "nameserver.group.update" + EventActivityCodeNetworkCreate EventActivityCode = "network.create" + EventActivityCodeNetworkDelete EventActivityCode = "network.delete" + EventActivityCodeNetworkResourceCreate EventActivityCode = "network.resource.create" + EventActivityCodeNetworkResourceDelete EventActivityCode = "network.resource.delete" + EventActivityCodeNetworkResourceUpdate EventActivityCode = "network.resource.update" + EventActivityCodeNetworkRouterCreate EventActivityCode = "network.router.create" + EventActivityCodeNetworkRouterDelete EventActivityCode = "network.router.delete" + EventActivityCodeNetworkRouterUpdate EventActivityCode = "network.router.update" + EventActivityCodeNetworkUpdate EventActivityCode = "network.update" + EventActivityCodePeerApprovalRevoke EventActivityCode = "peer.approval.revoke" + EventActivityCodePeerApprove EventActivityCode = "peer.approve" + EventActivityCodePeerGroupAdd EventActivityCode = "peer.group.add" + EventActivityCodePeerGroupDelete EventActivityCode = "peer.group.delete" + EventActivityCodePeerInactivityExpirationDisable EventActivityCode = "peer.inactivity.expiration.disable" + EventActivityCodePeerInactivityExpirationEnable EventActivityCode = "peer.inactivity.expiration.enable" + EventActivityCodePeerIpUpdate EventActivityCode = "peer.ip.update" + EventActivityCodePeerJobCreate EventActivityCode = "peer.job.create" + EventActivityCodePeerLoginExpirationDisable EventActivityCode = "peer.login.expiration.disable" + EventActivityCodePeerLoginExpirationEnable EventActivityCode = "peer.login.expiration.enable" + EventActivityCodePeerLoginExpire EventActivityCode = "peer.login.expire" + EventActivityCodePeerRename EventActivityCode = "peer.rename" + EventActivityCodePeerSetupkeyAdd EventActivityCode = "peer.setupkey.add" + EventActivityCodePeerSshDisable EventActivityCode = "peer.ssh.disable" + EventActivityCodePeerSshEnable EventActivityCode = "peer.ssh.enable" + EventActivityCodePeerUserAdd EventActivityCode = "peer.user.add" + EventActivityCodePersonalAccessTokenCreate EventActivityCode = "personal.access.token.create" + EventActivityCodePersonalAccessTokenDelete EventActivityCode = "personal.access.token.delete" + EventActivityCodePolicyAdd EventActivityCode = "policy.add" + EventActivityCodePolicyDelete EventActivityCode = "policy.delete" + EventActivityCodePolicyUpdate EventActivityCode = "policy.update" + EventActivityCodePostureCheckCreate EventActivityCode = "posture.check.create" + EventActivityCodePostureCheckDelete EventActivityCode = "posture.check.delete" + EventActivityCodePostureCheckUpdate EventActivityCode = "posture.check.update" + EventActivityCodeResourceGroupAdd EventActivityCode = "resource.group.add" + EventActivityCodeResourceGroupDelete EventActivityCode = "resource.group.delete" + EventActivityCodeRouteAdd EventActivityCode = "route.add" + EventActivityCodeRouteDelete EventActivityCode = "route.delete" + EventActivityCodeRouteUpdate EventActivityCode = "route.update" + EventActivityCodeRuleAdd EventActivityCode = "rule.add" + EventActivityCodeRuleDelete EventActivityCode = "rule.delete" + EventActivityCodeRuleUpdate EventActivityCode = "rule.update" + EventActivityCodeServiceUserCreate EventActivityCode = "service.user.create" + EventActivityCodeServiceUserDelete EventActivityCode = "service.user.delete" + EventActivityCodeSetupkeyAdd EventActivityCode = "setupkey.add" + EventActivityCodeSetupkeyDelete EventActivityCode = "setupkey.delete" + EventActivityCodeSetupkeyGroupAdd EventActivityCode = "setupkey.group.add" + EventActivityCodeSetupkeyGroupDelete EventActivityCode = "setupkey.group.delete" + EventActivityCodeSetupkeyOveruse EventActivityCode = "setupkey.overuse" + EventActivityCodeSetupkeyRevoke EventActivityCode = "setupkey.revoke" + EventActivityCodeSetupkeyUpdate EventActivityCode = "setupkey.update" + EventActivityCodeTransferredOwnerRole EventActivityCode = "transferred.owner.role" + EventActivityCodeUserApprove EventActivityCode = "user.approve" + EventActivityCodeUserBlock EventActivityCode = "user.block" + EventActivityCodeUserCreate EventActivityCode = "user.create" + EventActivityCodeUserDelete EventActivityCode = "user.delete" + EventActivityCodeUserGroupAdd EventActivityCode = "user.group.add" + EventActivityCodeUserGroupDelete EventActivityCode = "user.group.delete" + EventActivityCodeUserInvite EventActivityCode = "user.invite" + EventActivityCodeUserJoin EventActivityCode = "user.join" + EventActivityCodeUserPasswordChange EventActivityCode = "user.password.change" + EventActivityCodeUserPeerDelete EventActivityCode = "user.peer.delete" + EventActivityCodeUserPeerLogin EventActivityCode = "user.peer.login" + EventActivityCodeUserReject EventActivityCode = "user.reject" + EventActivityCodeUserRoleUpdate EventActivityCode = "user.role.update" + EventActivityCodeUserUnblock EventActivityCode = "user.unblock" ) // Defines values for GeoLocationCheckAction. From 202fa47f2b19a0d45ea6f7959cc688e7df530992 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 20 Jan 2026 17:21:25 +0100 Subject: [PATCH 061/374] [client] Add support to wildcard custom records (#5125) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * **New Features** * Wildcard DNS fallback for eligible query types (excluding NS/SOA): attempts wildcard records when no exact match, rewrites wildcard names back to the original query, and rotates responses; preserves CNAME resolution. * **Tests** * Vastly expanded coverage for wildcard behaviors, precedence, multi-record round‑robin, multi-type chains, multi-hop and cross-zone scenarios, and edge cases (NXDOMAIN/NODATA, fallthrough). * **Chores** * CI lint config updated to ignore an additional codespell entry. --- .github/workflows/golangci-lint.yml | 2 +- client/internal/dns/local/local.go | 62 +- client/internal/dns/local/local_test.go | 1233 ++++++++++++++++++++++- 3 files changed, 1290 insertions(+), 7 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9ce779dbb..19a3a01e0 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: codespell uses: codespell-project/actions-codespell@v2 with: - ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros + ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans skip: go.mod,go.sum golangci: strategy: diff --git a/client/internal/dns/local/local.go b/client/internal/dns/local/local.go index 63c2428ce..ae27b3b56 100644 --- a/client/internal/dns/local/local.go +++ b/client/internal/dns/local/local.go @@ -120,7 +120,7 @@ func (d *Resolver) determineRcode(question dns.Question, result lookupResult) in } // No records found, but domain exists with different record types (NODATA) - if d.hasRecordsForDomain(domain.Domain(question.Name)) { + if d.hasRecordsForDomain(domain.Domain(question.Name), question.Qtype) { return dns.RcodeSuccess } @@ -164,11 +164,15 @@ func (d *Resolver) continueToNext(logger *log.Entry, w dns.ResponseWriter, r *dn } // hasRecordsForDomain checks if any records exist for the given domain name regardless of type -func (d *Resolver) hasRecordsForDomain(domainName domain.Domain) bool { +func (d *Resolver) hasRecordsForDomain(domainName domain.Domain, qType uint16) bool { d.mu.RLock() defer d.mu.RUnlock() _, exists := d.domains[domainName] + if !exists && supportsWildcard(qType) { + testWild := transformDomainToWildcard(string(domainName)) + _, exists = d.domains[domain.Domain(testWild)] + } return exists } @@ -195,6 +199,12 @@ type lookupResult struct { func (d *Resolver) lookupRecords(logger *log.Entry, question dns.Question) lookupResult { d.mu.RLock() records, found := d.records[question] + usingWildcard := false + wildQuestion := transformToWildcard(question) + if !found && supportsWildcard(question.Qtype) { + records, found = d.records[wildQuestion] + usingWildcard = found + } if !found { d.mu.RUnlock() @@ -216,18 +226,53 @@ func (d *Resolver) lookupRecords(logger *log.Entry, question dns.Question) looku // if there's more than one record, rotate them (round-robin) if len(recordsCopy) > 1 { d.mu.Lock() - records = d.records[question] + q := question + if usingWildcard { + q = wildQuestion + } + records = d.records[q] if len(records) > 1 { first := records[0] records = append(records[1:], first) - d.records[question] = records + d.records[q] = records } d.mu.Unlock() } + if usingWildcard { + return responseFromWildRecords(question.Name, wildQuestion.Name, recordsCopy) + } + return lookupResult{records: recordsCopy, rcode: dns.RcodeSuccess} } +func transformToWildcard(question dns.Question) dns.Question { + wildQuestion := question + wildQuestion.Name = transformDomainToWildcard(wildQuestion.Name) + return wildQuestion +} + +func transformDomainToWildcard(domain string) string { + s := strings.Split(domain, ".") + s[0] = "*" + return strings.Join(s, ".") +} + +func supportsWildcard(queryType uint16) bool { + return queryType != dns.TypeNS && queryType != dns.TypeSOA +} + +func responseFromWildRecords(originalName, wildName string, wildRecords []dns.RR) lookupResult { + records := make([]dns.RR, len(wildRecords)) + for i, record := range wildRecords { + copiedRecord := dns.Copy(record) + copiedRecord.Header().Name = originalName + records[i] = copiedRecord + } + + return lookupResult{records: records, rcode: dns.RcodeSuccess} +} + // lookupCNAMEChain follows a CNAME chain and returns the CNAME records along with // the final resolved record of the requested type. This is required for musl libc // compatibility, which expects the full answer chain rather than just the CNAME. @@ -237,6 +282,13 @@ func (d *Resolver) lookupCNAMEChain(logger *log.Entry, cnameQuestion dns.Questio for range maxDepth { cnameRecords := d.getRecords(cnameQuestion) + if len(cnameRecords) == 0 && supportsWildcard(targetType) { + wildQuestion := transformToWildcard(cnameQuestion) + if wildRecords := d.getRecords(wildQuestion); len(wildRecords) > 0 { + cnameRecords = responseFromWildRecords(cnameQuestion.Name, wildQuestion.Name, wildRecords).records + } + } + if len(cnameRecords) == 0 { break } @@ -303,7 +355,7 @@ func (d *Resolver) resolveCNAMETarget(logger *log.Entry, targetName string, targ } // domain exists locally but not this record type (NODATA) - if d.hasRecordsForDomain(domain.Domain(targetName)) { + if d.hasRecordsForDomain(domain.Domain(targetName), targetType) { return lookupResult{rcode: dns.RcodeSuccess} } diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go index 1c7cad5d1..dc295cd17 100644 --- a/client/internal/dns/local/local_test.go +++ b/client/internal/dns/local/local_test.go @@ -47,6 +47,24 @@ func TestLocalResolver_ServeDNS(t *testing.T) { RData: "www.netbird.io", } + wild := "wild.netbird.cloud." + + recordWild := nbdns.SimpleRecord{ + Name: "*." + wild, + Type: 1, + Class: nbdns.DefaultClass, + TTL: 300, + RData: "1.2.3.4", + } + + specificRecord := nbdns.SimpleRecord{ + Name: "existing." + wild, + Type: 1, + Class: nbdns.DefaultClass, + TTL: 300, + RData: "5.6.7.8", + } + testCases := []struct { name string inputRecord nbdns.SimpleRecord @@ -69,12 +87,23 @@ func TestLocalResolver_ServeDNS(t *testing.T) { inputMSG: new(dns.Msg).SetQuestion("not.found.com", dns.TypeA), responseShouldBeNil: true, }, + { + name: "Should Resolve A Wild Record", + inputRecord: recordWild, + inputMSG: new(dns.Msg).SetQuestion("test."+wild, dns.TypeA), + }, + { + name: "Should Resolve A more specific Record", + inputRecord: specificRecord, + inputMSG: new(dns.Msg).SetQuestion(specificRecord.Name, dns.TypeA), + }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { resolver := NewResolver() _ = resolver.RegisterRecord(testCase.inputRecord) + _ = resolver.RegisterRecord(recordWild) var responseMSG *dns.Msg responseWriter := &test.MockResponseWriter{ WriteMsgFunc: func(m *dns.Msg) error { @@ -93,7 +122,7 @@ func TestLocalResolver_ServeDNS(t *testing.T) { } answerString := responseMSG.Answer[0].String() - if !strings.Contains(answerString, testCase.inputRecord.Name) { + if !strings.Contains(answerString, testCase.inputMSG.Question[0].Name) { t.Fatalf("answer doesn't contain the same domain name: \nWant: %s\nGot:%s", testCase.name, answerString) } if !strings.Contains(answerString, dns.Type(testCase.inputRecord.Type).String()) { @@ -1341,6 +1370,1208 @@ func TestLocalResolver_FallthroughCaseInsensitive(t *testing.T) { assert.True(t, responseMSG.MsgHdr.Zero, "Should fallthrough for non-authoritative zone with case-insensitive match") } +// TestLocalResolver_WildcardCNAME tests wildcard CNAME record handling for non-CNAME queries +func TestLocalResolver_WildcardCNAME(t *testing.T) { + t.Run("wildcard CNAME resolves A query with internal target", func(t *testing.T) { + resolver := NewResolver() + + // Configure wildcard CNAME pointing to internal A record + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should resolve via wildcard CNAME") + require.Len(t, resp.Answer, 2, "Should have CNAME + A record") + + // Verify CNAME has the original query name, not the wildcard + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok, "First answer should be CNAME") + assert.Equal(t, "foo.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten to query name") + assert.Equal(t, "target.example.com.", cname.Target) + + // Verify A record + a, ok := resp.Answer[1].(*dns.A) + require.True(t, ok, "Second answer should be A record") + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard CNAME resolves AAAA query with internal target", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("bar.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should resolve via wildcard CNAME") + require.Len(t, resp.Answer, 2, "Should have CNAME + AAAA record") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "bar.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + + aaaa, ok := resp.Answer[1].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("specific record takes precedence over wildcard CNAME", func(t *testing.T) { + resolver := NewResolver() + + // Both wildcard CNAME and specific A record exist + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1, "Should return specific A record only") + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "192.168.1.1", a.A.String()) + }) + + t.Run("specific CNAME takes precedence over wildcard CNAME", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "wildcard-target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "specific-target.example.com."}, + {Name: "specific-target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.1.1.1"}, + {Name: "wildcard-target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.2.2.2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.GreaterOrEqual(t, len(resp.Answer), 1) + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "specific-target.example.com.", cname.Target, "Should use specific CNAME, not wildcard") + }) + + t.Run("wildcard CNAME to non-existent internal target returns NXDOMAIN with CNAME", func(t *testing.T) { + resolver := NewResolver() + + // Wildcard CNAME pointing to non-existent internal target + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.example.com."}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + // Per RFC 6604, CNAME chains should return the rcode of the final target. + // When the wildcard CNAME target doesn't exist in the managed zone, this + // returns NXDOMAIN with the CNAME record included. + // Note: Current implementation returns NODATA (success) because the wildcard + // domain exists. This test documents the actual behavior. + if resp.Rcode == dns.RcodeNameError { + // RFC-compliant behavior: NXDOMAIN with CNAME + require.Len(t, resp.Answer, 1, "Should include the CNAME pointing to non-existent target") + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "foo.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + assert.Equal(t, "nonexistent.example.com.", cname.Target) + } else { + // Current behavior: NODATA (success with CNAME but target not found) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Returns NODATA when wildcard exists but target doesn't") + } + }) + + t.Run("wildcard CNAME with multi-level subdomain", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + // Query with multi-level subdomain - wildcard should only match first label + // Standard DNS wildcards only match a single label, so sub.domain.example.com + // should NOT match *.example.com - this tests current implementation behavior + msg := new(dns.Msg).SetQuestion("sub.domain.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + }) + + t.Run("wildcard CNAME NODATA when target has no matching type", func(t *testing.T) { + resolver := NewResolver() + + // Wildcard CNAME to target that only has A record, query for AAAA + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no answer for AAAA)") + require.Len(t, resp.Answer, 1, "Should have only CNAME") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "foo.example.com.", cname.Hdr.Name) + }) + + t.Run("direct CNAME query for wildcard record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + // Direct CNAME query should also work via wildcard + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeCNAME) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "foo.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + assert.Equal(t, "target.example.com.", cname.Target) + }) + + t.Run("wildcard CNAME case insensitive query", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("FOO.EXAMPLE.COM.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode, "Wildcard CNAME should match case-insensitively") + require.Len(t, resp.Answer, 2) + }) + + t.Run("wildcard A and wildcard CNAME coexist - A takes precedence", func(t *testing.T) { + resolver := NewResolver() + + // Both wildcard A and wildcard CNAME exist + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + // A record should be returned, not CNAME + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok, "Wildcard A should take precedence over wildcard CNAME for A query") + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard CNAME with chained CNAMEs", func(t *testing.T) { + resolver := NewResolver() + + // Wildcard CNAME -> another CNAME -> A record + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop1.example.com."}, + {Name: "hop1.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "final.example.com."}, + {Name: "final.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 3, "Should have wildcard CNAME + hop1 CNAME + A record") + + // First should be the wildcard CNAME with rewritten name + cname1, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "anyhost.example.com.", cname1.Hdr.Name) + assert.Equal(t, "hop1.example.com.", cname1.Target) + }) +} + +// TestLocalResolver_WildcardAandAAAA tests wildcard A and AAAA record handling +func TestLocalResolver_WildcardAandAAAA(t *testing.T) { + t.Run("wildcard A record resolves with owner name rewriting", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "anyhost.example.com.", a.Hdr.Name, "Owner name should be rewritten to query name") + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard AAAA record resolves with owner name rewriting", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + aaaa, ok := resp.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "anyhost.example.com.", aaaa.Hdr.Name, "Owner name should be rewritten to query name") + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("NODATA when querying AAAA but only wildcard A exists", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no answer)") + assert.Len(t, resp.Answer, 0, "Should have no AAAA answer") + }) + + t.Run("NODATA when querying A but only wildcard AAAA exists", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no answer)") + assert.Len(t, resp.Answer, 0, "Should have no A answer") + }) + + t.Run("dual-stack wildcard returns both A and AAAA separately", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // Query A + msgA := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 1) + a, ok := respA.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // Query AAAA + msgAAAA := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 1) + aaaa, ok := respAAAA.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("specific A takes precedence over wildcard A", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "specific.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "192.168.1.1", a.A.String(), "Specific record should take precedence") + }) + + t.Run("specific AAAA takes precedence over wildcard AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "specific.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + aaaa, ok := resp.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::2", aaaa.AAAA.String(), "Specific record should take precedence") + }) + + t.Run("multiple wildcard A records round-robin", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.2"}, + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.3"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + + var firstIPs []string + for i := 0; i < 3; i++ { + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Len(t, resp.Answer, 3, "Should return all 3 A records") + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + firstIPs = append(firstIPs, a.A.String()) + + // Verify owner name is rewritten for all records + for _, ans := range resp.Answer { + assert.Equal(t, "anyhost.example.com.", ans.Header().Name) + } + } + + // Verify rotation happened + assert.NotEqual(t, firstIPs[0], firstIPs[1], "First record should rotate") + assert.NotEqual(t, firstIPs[1], firstIPs[2], "Second rotation should differ") + }) + + t.Run("wildcard A case insensitive", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("ANYHOST.EXAMPLE.COM.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + }) + + t.Run("wildcard does not match multi-level subdomain", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + // *.example.com should NOT match sub.domain.example.com (standard DNS behavior) + msg := new(dns.Msg).SetQuestion("sub.domain.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + // This depends on implementation - standard DNS wildcards only match single label + // Current implementation replaces first label with *, so it WOULD match + // This test documents the current behavior + }) + + t.Run("wildcard with existing domain but different type returns NODATA", func(t *testing.T) { + resolver := NewResolver() + + // Specific A record exists, but query for TXT on wildcard domain + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("test.example.com.", dns.TypeTXT) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA for existing wildcard domain with different type") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("mixed specific and wildcard returns correct records", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "specific.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // Query A for specific - should use wildcard + msgA := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + // This could be NODATA since specific.example.com exists but has no A + // or could return wildcard A - depends on implementation + // The current behavior returns NODATA because specific domain exists + + // Query AAAA for specific - should use specific record + msgAAAA := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 1) + aaaa, ok := respAAAA.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) +} + +// TestLocalResolver_WildcardEdgeCases tests edge cases for wildcard record handling +func TestLocalResolver_WildcardEdgeCases(t *testing.T) { + t.Run("wildcard does not match NS queries", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeNS) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeNameError, resp.Rcode, "NS queries should not match wildcards") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("wildcard does not match SOA queries", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeSOA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeNameError, resp.Rcode, "SOA queries should not match wildcards") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("apex wildcard query", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + // Query for *.example.com directly (the wildcard itself) + msg := new(dns.Msg).SetQuestion("*.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard TXT record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeTXT), Class: nbdns.DefaultClass, TTL: 300, RData: "v=spf1 -all"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("mail.example.com.", dns.TypeTXT) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + txt, ok := resp.Answer[0].(*dns.TXT) + require.True(t, ok) + assert.Equal(t, "mail.example.com.", txt.Hdr.Name, "TXT owner should be rewritten") + }) + + t.Run("wildcard MX record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeMX), Class: nbdns.DefaultClass, TTL: 300, RData: "10 mail.example.com."}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("sub.example.com.", dns.TypeMX) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + mx, ok := resp.Answer[0].(*dns.MX) + require.True(t, ok) + assert.Equal(t, "sub.example.com.", mx.Hdr.Name, "MX owner should be rewritten") + }) + + t.Run("non-authoritative zone with wildcard CNAME triggers fallthrough for unmatched names", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + NonAuthoritative: true, + Records: []nbdns.SimpleRecord{ + {Name: "*.sub.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + // Query for name not matching the wildcard pattern + msg := new(dns.Msg).SetQuestion("other.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.True(t, resp.MsgHdr.Zero, "Should trigger fallthrough for non-authoritative zone") + }) +} + +// TestLocalResolver_MixedRecordTypes tests scenarios with A, AAAA, and CNAME records combined +func TestLocalResolver_MixedRecordTypes(t *testing.T) { + t.Run("specific A with wildcard CNAME - A query uses specific A", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1, "Should return only the specific A record") + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String(), "Should use specific A, not follow wildcard CNAME") + }) + + t.Run("specific AAAA with wildcard CNAME - AAAA query uses specific AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1, "Should return only the specific AAAA record") + + aaaa, ok := resp.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String(), "Should use specific AAAA, not follow wildcard CNAME") + }) + + t.Run("specific A only - AAAA query returns NODATA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no AAAA)") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("specific AAAA only - A query returns NODATA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no A)") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("CNAME with both A and AAAA target - A query returns CNAME + A", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 2, "Should have CNAME + A") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "target.example.com.", cname.Target) + + a, ok := resp.Answer[1].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("CNAME with both A and AAAA target - AAAA query returns CNAME + AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 2, "Should have CNAME + AAAA") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "target.example.com.", cname.Target) + + aaaa, ok := resp.Answer[1].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("CNAME to target with only A - AAAA query returns CNAME only (NODATA)", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA with CNAME") + require.Len(t, resp.Answer, 1, "Should have only CNAME") + + _, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + }) + + t.Run("CNAME to target with only AAAA - A query returns CNAME only (NODATA)", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA with CNAME") + require.Len(t, resp.Answer, 1, "Should have only CNAME") + + _, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + }) + + t.Run("wildcard A + wildcard AAAA + wildcard CNAME - each query type returns correct record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + // A query should return wildcard A (not CNAME) + msgA := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 1) + a, ok := respA.Answer[0].(*dns.A) + require.True(t, ok, "A query should return A record, not CNAME") + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query should return wildcard AAAA (not CNAME) + msgAAAA := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 1) + aaaa, ok := respAAAA.Answer[0].(*dns.AAAA) + require.True(t, ok, "AAAA query should return AAAA record, not CNAME") + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + + // CNAME query should return wildcard CNAME + msgCNAME := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeCNAME) + var respCNAME *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respCNAME = m; return nil }}, msgCNAME) + + require.NotNil(t, respCNAME) + require.Equal(t, dns.RcodeSuccess, respCNAME.Rcode) + require.Len(t, respCNAME.Answer, 1) + cname, ok := respCNAME.Answer[0].(*dns.CNAME) + require.True(t, ok, "CNAME query should return CNAME record") + assert.Equal(t, "target.example.com.", cname.Target) + }) + + t.Run("dual-stack host with both A and AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.2"}, + {Name: "host.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "host.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::2"}, + }, + }}) + + // A query + msgA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 2, "Should return both A records") + for _, ans := range respA.Answer { + _, ok := ans.(*dns.A) + require.True(t, ok) + } + + // AAAA query + msgAAAA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 2, "Should return both AAAA records") + for _, ans := range respAAAA.Answer { + _, ok := ans.(*dns.AAAA) + require.True(t, ok) + } + }) + + t.Run("CNAME chain with mixed record types at target", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias1.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "alias2.example.com."}, + {Name: "alias2.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // A query through chain + msgA := new(dns.Msg).SetQuestion("alias1.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 3, "Should have 2 CNAMEs + 1 A") + + // Verify chain order + cname1, ok := respA.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "alias2.example.com.", cname1.Target) + + cname2, ok := respA.Answer[1].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "target.example.com.", cname2.Target) + + a, ok := respA.Answer[2].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query through chain + msgAAAA := new(dns.Msg).SetQuestion("alias1.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 3, "Should have 2 CNAMEs + 1 AAAA") + + aaaa, ok := respAAAA.Answer[2].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("wildcard CNAME with dual-stack target", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // A query via wildcard CNAME + msgA := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 2, "Should have CNAME + A") + + cname, ok := respA.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "any.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + + a, ok := respA.Answer[1].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query via wildcard CNAME + msgAAAA := new(dns.Msg).SetQuestion("other.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 2, "Should have CNAME + AAAA") + + cname2, ok := respAAAA.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "other.example.com.", cname2.Hdr.Name, "CNAME owner should be rewritten") + + aaaa, ok := respAAAA.Answer[1].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("specific A + wildcard AAAA - each query type returns correct record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // A query for host should return specific A + msgA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 1) + a, ok := respA.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query for host should return NODATA (specific A exists, no AAAA for host.example.com) + msgAAAA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + // host.example.com exists (has A), so AAAA query returns NODATA, not wildcard + assert.Equal(t, dns.RcodeSuccess, respAAAA.Rcode, "Should return NODATA for existing host without AAAA") + + // AAAA query for other host should return wildcard AAAA + msgAAAAOther := new(dns.Msg).SetQuestion("other.example.com.", dns.TypeAAAA) + var respAAAAOther *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAAOther = m; return nil }}, msgAAAAOther) + + require.NotNil(t, respAAAAOther) + require.Equal(t, dns.RcodeSuccess, respAAAAOther.Rcode) + require.Len(t, respAAAAOther.Answer, 1) + aaaa, ok := respAAAAOther.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + assert.Equal(t, "other.example.com.", aaaa.Hdr.Name, "Owner should be rewritten") + }) + + t.Run("multiple zones with mixed records", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{ + { + Domain: "zone1.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.zone1.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.1.0.1"}, + {Name: "host.zone1.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8:1::1"}, + }, + }, + { + Domain: "zone2.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.zone2.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.zone2.com."}, + {Name: "target.zone2.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.2.0.1"}, + }, + }, + }) + + // Query zone1 A + msg1A := new(dns.Msg).SetQuestion("host.zone1.com.", dns.TypeA) + var resp1A *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp1A = m; return nil }}, msg1A) + + require.NotNil(t, resp1A) + require.Equal(t, dns.RcodeSuccess, resp1A.Rcode) + require.Len(t, resp1A.Answer, 1) + + // Query zone1 AAAA + msg1AAAA := new(dns.Msg).SetQuestion("host.zone1.com.", dns.TypeAAAA) + var resp1AAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp1AAAA = m; return nil }}, msg1AAAA) + + require.NotNil(t, resp1AAAA) + require.Equal(t, dns.RcodeSuccess, resp1AAAA.Rcode) + require.Len(t, resp1AAAA.Answer, 1) + + // Query zone2 via CNAME + msg2A := new(dns.Msg).SetQuestion("alias.zone2.com.", dns.TypeA) + var resp2A *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp2A = m; return nil }}, msg2A) + + require.NotNil(t, resp2A) + require.Equal(t, dns.RcodeSuccess, resp2A.Rcode) + require.Len(t, resp2A.Answer, 2, "Should have CNAME + A") + }) +} + // BenchmarkFindZone_BestCase benchmarks zone lookup with immediate match (first label) func BenchmarkFindZone_BestCase(b *testing.B) { resolver := NewResolver() From b3a2992a105825737f94418651f32ac19daa0373 Mon Sep 17 00:00:00 2001 From: Diego Romar Date: Tue, 20 Jan 2026 13:26:51 -0300 Subject: [PATCH 062/374] [client/android] - Fix Rosenpass connectivity for Android peers (#5044) * [client] Add WGConfigurer interface To allow Rosenpass to work both with kernel WireGuard via wgctrl (default behavior) and userspace WireGuard via IPC on Android/iOS using WGUSPConfigurer * [client] Remove Rosenpass debug logs * [client] Return simpler peer configuration in outputKey method ConfigureDevice, the method previously used in outputKey via wgClient to update the device's properties, is now defined in the WGConfigurer interface and implemented both in kernel_unix and usp configurers. PresharedKey datatype was also changed from boolean to [32]byte to compare it to the original NetBird PSK, so that Rosenpass may replace it with its own when necessary. * [client] Remove unused field * [client] Replace usage of WGConfigurer Replaced with preshared key setter interface, which only defines a method to set / update the preshared key. Logic has been migrated from rosenpass/netbird_handler to client/iface. * [client] Use same default peer keepalive value when setting preshared keys * [client] Store PresharedKeySetter iface in rosenpass manager To avoid no-op if SetInterface is called before generateConfig * [client] Add mutex usage in rosenpass netbird handler * [client] change implementation setting Rosenpass preshared key Instead of providing a method to configure a device (device/interface.go), it forwards the new parameters to the configurer (either kernel_unix.go / usp.go). This removes dependency on reading FullStats, and makes use of a common method (buildPresharedKeyConfig in configurer/common.go) to build a minimal WG config that only sets/updates the PSK. netbird_handler.go now keeps s list of initializedPeers to choose whether to set the value of "UpdateOnly" when calling iface.SetPresharedKey. * [client] Address possible race condition Between outputKey calls and peer removal; it checks again if the peer still exists in the peers map before inserting it in the initializedPeers map. * [client] Add psk Rosenpass-initialized check On client/internal/peer/conn.go, the presharedKey function would always return the current key set in wgConfig.presharedKey. This would eventually overwrite a key set by Rosenpass if the feature is active. The purpose here is to set a handler that will check if a given peer has its psk initialized by Rosenpass to skip updating the psk via updatePeer (since it calls presharedKey method in conn.go). * Add missing updateOnly flag setup for usp peers * Change common.go buildPresharedKeyConfig signature PeerKey datatype changed from string to wgTypes.Key. Callers are responsible for parsing a peer key with string datatype. --- client/iface/configurer/common.go | 14 ++ client/iface/configurer/kernel_unix.go | 16 ++- client/iface/configurer/usp.go | 65 ++++++---- client/iface/configurer/wgshow.go | 2 +- client/iface/device/interface.go | 1 + client/iface/iface.go | 13 ++ client/internal/debug/wgshow.go | 2 +- client/internal/engine.go | 6 + client/internal/engine_test.go | 4 + client/internal/iface_common.go | 1 + client/internal/peer/conn.go | 28 +++- client/internal/peer/conn_test.go | 24 ++++ client/internal/rosenpass/manager.go | 37 +++++- client/internal/rosenpass/netbird_handler.go | 130 +++++++++---------- 14 files changed, 238 insertions(+), 105 deletions(-) diff --git a/client/iface/configurer/common.go b/client/iface/configurer/common.go index 088cff69d..10162d703 100644 --- a/client/iface/configurer/common.go +++ b/client/iface/configurer/common.go @@ -3,8 +3,22 @@ package configurer import ( "net" "net/netip" + + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" ) +// buildPresharedKeyConfig creates a wgtypes.Config for setting a preshared key on a peer. +// This is a shared helper used by both kernel and userspace configurers. +func buildPresharedKeyConfig(peerKey wgtypes.Key, psk wgtypes.Key, updateOnly bool) wgtypes.Config { + return wgtypes.Config{ + Peers: []wgtypes.PeerConfig{{ + PublicKey: peerKey, + PresharedKey: &psk, + UpdateOnly: updateOnly, + }}, + } +} + func prefixesToIPNets(prefixes []netip.Prefix) []net.IPNet { ipNets := make([]net.IPNet, len(prefixes)) for i, prefix := range prefixes { diff --git a/client/iface/configurer/kernel_unix.go b/client/iface/configurer/kernel_unix.go index 96b286175..a29fe181a 100644 --- a/client/iface/configurer/kernel_unix.go +++ b/client/iface/configurer/kernel_unix.go @@ -15,8 +15,6 @@ import ( "github.com/netbirdio/netbird/monotime" ) -var zeroKey wgtypes.Key - type KernelConfigurer struct { deviceName string } @@ -48,6 +46,18 @@ func (c *KernelConfigurer) ConfigureInterface(privateKey string, port int) error return nil } +// SetPresharedKey sets the preshared key for a peer. +// If updateOnly is true, only updates the existing peer; if false, creates or updates. +func (c *KernelConfigurer) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + parsedPeerKey, err := wgtypes.ParseKey(peerKey) + if err != nil { + return err + } + + cfg := buildPresharedKeyConfig(parsedPeerKey, psk, updateOnly) + return c.configure(cfg) +} + func (c *KernelConfigurer) UpdatePeer(peerKey string, allowedIps []netip.Prefix, keepAlive time.Duration, endpoint *net.UDPAddr, preSharedKey *wgtypes.Key) error { peerKeyParsed, err := wgtypes.ParseKey(peerKey) if err != nil { @@ -279,7 +289,7 @@ func (c *KernelConfigurer) FullStats() (*Stats, error) { TxBytes: p.TransmitBytes, RxBytes: p.ReceiveBytes, LastHandshake: p.LastHandshakeTime, - PresharedKey: p.PresharedKey != zeroKey, + PresharedKey: [32]byte(p.PresharedKey), } if p.Endpoint != nil { peer.Endpoint = *p.Endpoint diff --git a/client/iface/configurer/usp.go b/client/iface/configurer/usp.go index bc875b73c..c4ea349df 100644 --- a/client/iface/configurer/usp.go +++ b/client/iface/configurer/usp.go @@ -22,17 +22,16 @@ import ( ) const ( - privateKey = "private_key" - ipcKeyLastHandshakeTimeSec = "last_handshake_time_sec" - ipcKeyLastHandshakeTimeNsec = "last_handshake_time_nsec" - ipcKeyTxBytes = "tx_bytes" - ipcKeyRxBytes = "rx_bytes" - allowedIP = "allowed_ip" - endpoint = "endpoint" - fwmark = "fwmark" - listenPort = "listen_port" - publicKey = "public_key" - presharedKey = "preshared_key" + privateKey = "private_key" + ipcKeyLastHandshakeTimeSec = "last_handshake_time_sec" + ipcKeyTxBytes = "tx_bytes" + ipcKeyRxBytes = "rx_bytes" + allowedIP = "allowed_ip" + endpoint = "endpoint" + fwmark = "fwmark" + listenPort = "listen_port" + publicKey = "public_key" + presharedKey = "preshared_key" ) var ErrAllowedIPNotFound = fmt.Errorf("allowed IP not found") @@ -72,6 +71,18 @@ func (c *WGUSPConfigurer) ConfigureInterface(privateKey string, port int) error return c.device.IpcSet(toWgUserspaceString(config)) } +// SetPresharedKey sets the preshared key for a peer. +// If updateOnly is true, only updates the existing peer; if false, creates or updates. +func (c *WGUSPConfigurer) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + parsedPeerKey, err := wgtypes.ParseKey(peerKey) + if err != nil { + return err + } + + cfg := buildPresharedKeyConfig(parsedPeerKey, psk, updateOnly) + return c.device.IpcSet(toWgUserspaceString(cfg)) +} + func (c *WGUSPConfigurer) UpdatePeer(peerKey string, allowedIps []netip.Prefix, keepAlive time.Duration, endpoint *net.UDPAddr, preSharedKey *wgtypes.Key) error { peerKeyParsed, err := wgtypes.ParseKey(peerKey) if err != nil { @@ -422,23 +433,19 @@ func toWgUserspaceString(wgCfg wgtypes.Config) string { hexKey := hex.EncodeToString(p.PublicKey[:]) sb.WriteString(fmt.Sprintf("public_key=%s\n", hexKey)) + if p.Remove { + sb.WriteString("remove=true\n") + } + + if p.UpdateOnly { + sb.WriteString("update_only=true\n") + } + if p.PresharedKey != nil { preSharedHexKey := hex.EncodeToString(p.PresharedKey[:]) sb.WriteString(fmt.Sprintf("preshared_key=%s\n", preSharedHexKey)) } - if p.Remove { - sb.WriteString("remove=true") - } - - if p.ReplaceAllowedIPs { - sb.WriteString("replace_allowed_ips=true\n") - } - - for _, aip := range p.AllowedIPs { - sb.WriteString(fmt.Sprintf("allowed_ip=%s\n", aip.String())) - } - if p.Endpoint != nil { sb.WriteString(fmt.Sprintf("endpoint=%s\n", p.Endpoint.String())) } @@ -446,6 +453,14 @@ func toWgUserspaceString(wgCfg wgtypes.Config) string { if p.PersistentKeepaliveInterval != nil { sb.WriteString(fmt.Sprintf("persistent_keepalive_interval=%d\n", int(p.PersistentKeepaliveInterval.Seconds()))) } + + if p.ReplaceAllowedIPs { + sb.WriteString("replace_allowed_ips=true\n") + } + + for _, aip := range p.AllowedIPs { + sb.WriteString(fmt.Sprintf("allowed_ip=%s\n", aip.String())) + } } return sb.String() } @@ -599,7 +614,9 @@ func parseStatus(deviceName, ipcStr string) (*Stats, error) { continue } if val != "" && val != "0000000000000000000000000000000000000000000000000000000000000000" { - currentPeer.PresharedKey = true + if pskKey, err := hexToWireguardKey(val); err == nil { + currentPeer.PresharedKey = [32]byte(pskKey) + } } } } diff --git a/client/iface/configurer/wgshow.go b/client/iface/configurer/wgshow.go index 604264026..4a5c31160 100644 --- a/client/iface/configurer/wgshow.go +++ b/client/iface/configurer/wgshow.go @@ -12,7 +12,7 @@ type Peer struct { TxBytes int64 RxBytes int64 LastHandshake time.Time - PresharedKey bool + PresharedKey [32]byte } type Stats struct { diff --git a/client/iface/device/interface.go b/client/iface/device/interface.go index db53d9c3a..7bab7b757 100644 --- a/client/iface/device/interface.go +++ b/client/iface/device/interface.go @@ -17,6 +17,7 @@ type WGConfigurer interface { RemovePeer(peerKey string) error AddAllowedIP(peerKey string, allowedIP netip.Prefix) error RemoveAllowedIP(peerKey string, allowedIP netip.Prefix) error + SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error Close() GetStats() (map[string]configurer.WGStats, error) FullStats() (*configurer.Stats, error) diff --git a/client/iface/iface.go b/client/iface/iface.go index 07235a995..71fd433ad 100644 --- a/client/iface/iface.go +++ b/client/iface/iface.go @@ -297,6 +297,19 @@ func (w *WGIface) FullStats() (*configurer.Stats, error) { return w.configurer.FullStats() } +// SetPresharedKey sets or updates the preshared key for a peer. +// If updateOnly is true, only updates existing peer; if false, creates or updates. +func (w *WGIface) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.configurer == nil { + return ErrIfaceNotFound + } + + return w.configurer.SetPresharedKey(peerKey, psk, updateOnly) +} + func (w *WGIface) waitUntilRemoved() error { maxWaitTime := 5 * time.Second timeout := time.NewTimer(maxWaitTime) diff --git a/client/internal/debug/wgshow.go b/client/internal/debug/wgshow.go index 8233ca510..1e8a8a6cc 100644 --- a/client/internal/debug/wgshow.go +++ b/client/internal/debug/wgshow.go @@ -60,7 +60,7 @@ func (g *BundleGenerator) toWGShowFormat(s *configurer.Stats) string { } sb.WriteString(fmt.Sprintf(" latest handshake: %s\n", peer.LastHandshake.Format(time.RFC1123))) sb.WriteString(fmt.Sprintf(" transfer: %d B received, %d B sent\n", peer.RxBytes, peer.TxBytes)) - if peer.PresharedKey { + if peer.PresharedKey != [32]byte{} { sb.WriteString(" preshared key: (hidden)\n") } } diff --git a/client/internal/engine.go b/client/internal/engine.go index c5e2b7c6c..25a4e4048 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -505,6 +505,11 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) return fmt.Errorf("up wg interface: %w", err) } + // Set the WireGuard interface for rosenpass after interface is up + if e.rpManager != nil { + e.rpManager.SetInterface(e.wgInterface) + } + // if inbound conns are blocked there is no need to create the ACL manager if e.firewall != nil && !e.config.BlockInbound { e.acl = acl.NewDefaultManager(e.firewall) @@ -1512,6 +1517,7 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV if e.rpManager != nil { peerConn.SetOnConnected(e.rpManager.OnConnected) peerConn.SetOnDisconnected(e.rpManager.OnDisconnected) + peerConn.SetRosenpassInitializedPresharedKeyValidator(e.rpManager.IsPresharedKeyInitialized) } return peerConn, nil diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index 56829393c..af9f27a71 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -214,6 +214,10 @@ func (m *MockWGIface) LastActivities() map[string]monotime.Time { return nil } +func (m *MockWGIface) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + return nil +} + func TestMain(m *testing.M) { _ = util.InitLog("debug", util.LogConsole) code := m.Run() diff --git a/client/internal/iface_common.go b/client/internal/iface_common.go index 90b06cbd1..f8a433a6e 100644 --- a/client/internal/iface_common.go +++ b/client/internal/iface_common.go @@ -42,4 +42,5 @@ type wgIfaceBase interface { GetNet() *netstack.Net FullStats() (*configurer.Stats, error) LastActivities() map[string]monotime.Time + SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error } diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 80ca36789..ba82354a2 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -88,8 +88,9 @@ type Conn struct { relayManager *relayClient.Manager srWatcher *guard.SRWatcher - onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) - onDisconnected func(remotePeer string) + onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) + onDisconnected func(remotePeer string) + rosenpassInitializedPresharedKeyValidator func(peerKey string) bool statusRelay *worker.AtomicWorkerStatus statusICE *worker.AtomicWorkerStatus @@ -289,6 +290,13 @@ func (conn *Conn) SetOnDisconnected(handler func(remotePeer string)) { conn.onDisconnected = handler } +// SetRosenpassInitializedPresharedKeyValidator sets a function to check if Rosenpass has taken over +// PSK management for a peer. When this returns true, presharedKey() returns nil +// to prevent UpdatePeer from overwriting the Rosenpass-managed PSK. +func (conn *Conn) SetRosenpassInitializedPresharedKeyValidator(handler func(peerKey string) bool) { + conn.rosenpassInitializedPresharedKeyValidator = handler +} + func (conn *Conn) OnRemoteOffer(offer OfferAnswer) { conn.dumpState.RemoteOffer() conn.Log.Infof("OnRemoteOffer, on status ICE: %s, status Relay: %s", conn.statusICE, conn.statusRelay) @@ -759,10 +767,24 @@ func (conn *Conn) presharedKey(remoteRosenpassKey []byte) *wgtypes.Key { return conn.config.WgConfig.PreSharedKey } + // If Rosenpass has already set a PSK for this peer, return nil to prevent + // UpdatePeer from overwriting the Rosenpass-managed key. + if conn.rosenpassInitializedPresharedKeyValidator != nil && conn.rosenpassInitializedPresharedKeyValidator(conn.config.Key) { + return nil + } + + // Use NetBird PSK as the seed for Rosenpass. This same PSK is passed to + // Rosenpass as PeerConfig.PresharedKey, ensuring the derived post-quantum + // key is cryptographically bound to the original secret. + if conn.config.WgConfig.PreSharedKey != nil { + return conn.config.WgConfig.PreSharedKey + } + + // Fallback to deterministic key if no NetBird PSK is configured determKey, err := conn.rosenpassDetermKey() if err != nil { conn.Log.Errorf("failed to generate Rosenpass initial key: %v", err) - return conn.config.WgConfig.PreSharedKey + return nil } return determKey diff --git a/client/internal/peer/conn_test.go b/client/internal/peer/conn_test.go index 6b47f95eb..32383b530 100644 --- a/client/internal/peer/conn_test.go +++ b/client/internal/peer/conn_test.go @@ -284,3 +284,27 @@ func TestConn_presharedKey(t *testing.T) { }) } } + +func TestConn_presharedKey_RosenpassManaged(t *testing.T) { + conn := Conn{ + config: ConnConfig{ + Key: "LLHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=", + LocalKey: "RRHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=", + RosenpassConfig: RosenpassConfig{PubKey: []byte("dummykey")}, + }, + } + + // When Rosenpass has already initialized the PSK for this peer, + // presharedKey must return nil to avoid UpdatePeer overwriting it. + conn.rosenpassInitializedPresharedKeyValidator = func(peerKey string) bool { return true } + if k := conn.presharedKey([]byte("remote")); k != nil { + t.Fatalf("expected nil presharedKey when Rosenpass manages PSK, got %v", k) + } + + // When Rosenpass hasn't taken over yet, presharedKey should provide + // a non-nil initial key (deterministic or from NetBird PSK). + conn.rosenpassInitializedPresharedKeyValidator = func(peerKey string) bool { return false } + if k := conn.presharedKey([]byte("remote")); k == nil { + t.Fatalf("expected non-nil presharedKey before Rosenpass manages PSK") + } +} diff --git a/client/internal/rosenpass/manager.go b/client/internal/rosenpass/manager.go index d2d7408fd..26a1eef58 100644 --- a/client/internal/rosenpass/manager.go +++ b/client/internal/rosenpass/manager.go @@ -34,6 +34,7 @@ type Manager struct { server *rp.Server lock sync.Mutex port int + wgIface PresharedKeySetter } // NewManager creates a new Rosenpass manager @@ -109,7 +110,13 @@ func (m *Manager) generateConfig() (rp.Config, error) { cfg.SecretKey = m.ssk cfg.Peers = []rp.PeerConfig{} - m.rpWgHandler, _ = NewNetbirdHandler(m.preSharedKey, m.ifaceName) + + m.lock.Lock() + m.rpWgHandler = NewNetbirdHandler() + if m.wgIface != nil { + m.rpWgHandler.SetInterface(m.wgIface) + } + m.lock.Unlock() cfg.Handlers = []rp.Handler{m.rpWgHandler} @@ -172,6 +179,20 @@ func (m *Manager) Close() error { return nil } +// SetInterface sets the WireGuard interface for the rosenpass handler. +// This can be called before or after Run() - the interface will be stored +// and passed to the handler when it's created or updated immediately if +// already running. +func (m *Manager) SetInterface(iface PresharedKeySetter) { + m.lock.Lock() + defer m.lock.Unlock() + + m.wgIface = iface + if m.rpWgHandler != nil { + m.rpWgHandler.SetInterface(iface) + } +} + // OnConnected is a handler function that is triggered when a connection to a remote peer establishes func (m *Manager) OnConnected(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) { m.lock.Lock() @@ -192,6 +213,20 @@ func (m *Manager) OnConnected(remoteWireGuardKey string, remoteRosenpassPubKey [ } } +// IsPresharedKeyInitialized returns true if Rosenpass has completed a handshake +// and set a PSK for the given WireGuard peer. +func (m *Manager) IsPresharedKeyInitialized(wireGuardPubKey string) bool { + m.lock.Lock() + defer m.lock.Unlock() + + peerID, ok := m.rpPeerIDs[wireGuardPubKey] + if !ok || peerID == nil { + return false + } + + return m.rpWgHandler.IsPeerInitialized(*peerID) +} + func findRandomAvailableUDPPort() (int, error) { conn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) if err != nil { diff --git a/client/internal/rosenpass/netbird_handler.go b/client/internal/rosenpass/netbird_handler.go index 345f95c01..9de2409ef 100644 --- a/client/internal/rosenpass/netbird_handler.go +++ b/client/internal/rosenpass/netbird_handler.go @@ -1,46 +1,50 @@ package rosenpass import ( - "fmt" - "log/slog" + "sync" rp "cunicu.li/go-rosenpass" log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" ) +// PresharedKeySetter is the interface for setting preshared keys on WireGuard peers. +// This minimal interface allows rosenpass to update PSKs without depending on the full WGIface. +type PresharedKeySetter interface { + SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error +} + type wireGuardPeer struct { Interface string PublicKey rp.Key } type NetbirdHandler struct { - ifaceName string - client *wgctrl.Client - peers map[rp.PeerID]wireGuardPeer - presharedKey [32]byte + mu sync.Mutex + iface PresharedKeySetter + peers map[rp.PeerID]wireGuardPeer + initializedPeers map[rp.PeerID]bool } -func NewNetbirdHandler(preSharedKey *[32]byte, wgIfaceName string) (hdlr *NetbirdHandler, err error) { - hdlr = &NetbirdHandler{ - ifaceName: wgIfaceName, - peers: map[rp.PeerID]wireGuardPeer{}, +func NewNetbirdHandler() *NetbirdHandler { + return &NetbirdHandler{ + peers: map[rp.PeerID]wireGuardPeer{}, + initializedPeers: map[rp.PeerID]bool{}, } +} - if preSharedKey != nil { - hdlr.presharedKey = *preSharedKey - } - - if hdlr.client, err = wgctrl.New(); err != nil { - return nil, fmt.Errorf("failed to creat WireGuard client: %w", err) - } - - return hdlr, nil +// SetInterface sets the WireGuard interface for the handler. +// This must be called after the WireGuard interface is created. +func (h *NetbirdHandler) SetInterface(iface PresharedKeySetter) { + h.mu.Lock() + defer h.mu.Unlock() + h.iface = iface } func (h *NetbirdHandler) AddPeer(pid rp.PeerID, intf string, pk rp.Key) { + h.mu.Lock() + defer h.mu.Unlock() h.peers[pid] = wireGuardPeer{ Interface: intf, PublicKey: pk, @@ -48,79 +52,61 @@ func (h *NetbirdHandler) AddPeer(pid rp.PeerID, intf string, pk rp.Key) { } func (h *NetbirdHandler) RemovePeer(pid rp.PeerID) { + h.mu.Lock() + defer h.mu.Unlock() delete(h.peers, pid) + delete(h.initializedPeers, pid) +} + +// IsPeerInitialized returns true if Rosenpass has completed a handshake +// and set a PSK for this peer. +func (h *NetbirdHandler) IsPeerInitialized(pid rp.PeerID) bool { + h.mu.Lock() + defer h.mu.Unlock() + return h.initializedPeers[pid] } func (h *NetbirdHandler) HandshakeCompleted(pid rp.PeerID, key rp.Key) { - log.Debug("Handshake complete") h.outputKey(rp.KeyOutputReasonStale, pid, key) } func (h *NetbirdHandler) HandshakeExpired(pid rp.PeerID) { key, _ := rp.GeneratePresharedKey() - log.Debug("Handshake expired") h.outputKey(rp.KeyOutputReasonStale, pid, key) } func (h *NetbirdHandler) outputKey(_ rp.KeyOutputReason, pid rp.PeerID, psk rp.Key) { + h.mu.Lock() + iface := h.iface wg, ok := h.peers[pid] + isInitialized := h.initializedPeers[pid] + h.mu.Unlock() + + if iface == nil { + log.Warn("rosenpass: interface not set, cannot update preshared key") + return + } + if !ok { return } - device, err := h.client.Device(h.ifaceName) - if err != nil { - log.Errorf("Failed to get WireGuard device: %v", err) + peerKey := wgtypes.Key(wg.PublicKey).String() + pskKey := wgtypes.Key(psk) + + // Use updateOnly=true for later rotations (peer already has Rosenpass PSK) + // Use updateOnly=false for first rotation (peer has original/empty PSK) + if err := iface.SetPresharedKey(peerKey, pskKey, isInitialized); err != nil { + log.Errorf("Failed to apply rosenpass key: %v", err) return } - config := []wgtypes.PeerConfig{ - { - UpdateOnly: true, - PublicKey: wgtypes.Key(wg.PublicKey), - PresharedKey: (*wgtypes.Key)(&psk), - }, - } - for _, peer := range device.Peers { - if peer.PublicKey == wgtypes.Key(wg.PublicKey) { - if publicKeyEmpty(peer.PresharedKey) || peer.PresharedKey == h.presharedKey { - log.Debugf("Restart wireguard connection to peer %s", peer.PublicKey) - config = []wgtypes.PeerConfig{ - { - PublicKey: wgtypes.Key(wg.PublicKey), - PresharedKey: (*wgtypes.Key)(&psk), - Endpoint: peer.Endpoint, - AllowedIPs: peer.AllowedIPs, - }, - } - err = h.client.ConfigureDevice(wg.Interface, wgtypes.Config{ - Peers: []wgtypes.PeerConfig{ - { - Remove: true, - PublicKey: wgtypes.Key(wg.PublicKey), - }, - }, - }) - if err != nil { - slog.Debug("Failed to remove peer") - return - } - } + // Mark peer as isInitialized after the successful first rotation + if !isInitialized { + h.mu.Lock() + if _, exists := h.peers[pid]; exists { + h.initializedPeers[pid] = true } - } - - if err = h.client.ConfigureDevice(wg.Interface, wgtypes.Config{ - Peers: config, - }); err != nil { - log.Errorf("Failed to apply rosenpass key: %v", err) + h.mu.Unlock() } } - -func publicKeyEmpty(key wgtypes.Key) bool { - for _, b := range key { - if b != 0 { - return false - } - } - return true -} From 07e4a5a23c91176c6ad12a0702e6aff23190544a Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 20 Jan 2026 18:22:37 +0100 Subject: [PATCH 063/374] Fixes profile switching and repeated down/up command failures. (#5142) When Down() and Up() are called in quick succession, the connectWithRetryRuns goroutine could set ErrResetConnection after Down() had cleared the state, causing the subsequent Up() to fail. Fix by waiting for the goroutine to exit (via clientGiveUpChan) before Down() returns. Uses a 5-second timeout to prevent RPC timeouts while ensuring the goroutine completes in most cases. --- client/server/server.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index 22e80ab25..408bd56db 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -66,7 +66,7 @@ type Server struct { proto.UnimplementedDaemonServiceServer clientRunning bool // protected by mutex clientRunningChan chan struct{} - clientGiveUpChan chan struct{} + clientGiveUpChan chan struct{} // closed when connectWithRetryRuns goroutine exits connectClient *internal.ConnectClient @@ -792,9 +792,11 @@ func (s *Server) SwitchProfile(callerCtx context.Context, msg *proto.SwitchProfi // Down engine work in the daemon. func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownResponse, error) { s.mutex.Lock() - defer s.mutex.Unlock() + + giveUpChan := s.clientGiveUpChan if err := s.cleanupConnection(); err != nil { + s.mutex.Unlock() // todo review to update the status in case any type of error log.Errorf("failed to shut down properly: %v", err) return nil, err @@ -803,6 +805,20 @@ func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownRes state := internal.CtxGetState(s.rootCtx) state.Set(internal.StatusIdle) + s.mutex.Unlock() + + // Wait for the connectWithRetryRuns goroutine to finish with a short timeout. + // This prevents the goroutine from setting ErrResetConnection after Down() returns. + // The giveUpChan is closed at the end of connectWithRetryRuns. + if giveUpChan != nil { + select { + case <-giveUpChan: + log.Debugf("client goroutine finished successfully") + case <-time.After(5 * time.Second): + log.Warnf("timeout waiting for client goroutine to finish, proceeding anyway") + } + } + return &proto.DownResponse{}, nil } From e01998815e64660ea41535cb57b9dbab3c44f83b Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 20 Jan 2026 19:01:34 +0100 Subject: [PATCH 064/374] [infra] add embedded STUN to getting started (#5141) --- infrastructure_files/getting-started.sh | 139 ++++++------------------ 1 file changed, 34 insertions(+), 105 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 8676840a6..25599997c 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -82,16 +82,6 @@ read_nb_domain() { return 0 } -get_turn_external_ip() { - TURN_EXTERNAL_IP_CONFIG="#external-ip=" - IP=$(curl -s -4 https://jsonip.com | jq -r '.ip') - if [[ "x-$IP" != "x-" ]]; then - TURN_EXTERNAL_IP_CONFIG="external-ip=$IP" - fi - echo "$TURN_EXTERNAL_IP_CONFIG" - return 0 -} - read_reverse_proxy_type() { echo "" > /dev/stderr echo "Which reverse proxy will you use?" > /dev/stderr @@ -249,14 +239,17 @@ initialize_default_values() { NETBIRD_PORT=80 NETBIRD_HTTP_PROTOCOL="http" NETBIRD_RELAY_PROTO="rel" - TURN_USER="self" - TURN_PASSWORD=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") NETBIRD_RELAY_AUTH_SECRET=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") # Note: DataStoreEncryptionKey must keep base64 padding (=) for Go's base64.StdEncoding DATASTORE_ENCRYPTION_KEY=$(openssl rand -base64 32) - TURN_MIN_PORT=49152 - TURN_MAX_PORT=65535 - TURN_EXTERNAL_IP_CONFIG=$(get_turn_external_ip) + NETBIRD_STUN_PORT=3478 + + # Docker images + CADDY_IMAGE="caddy" + DASHBOARD_IMAGE="netbirdio/dashboard:latest" + SIGNAL_IMAGE="netbirdio/signal:latest" + RELAY_IMAGE="netbirdio/relay:latest" + MANAGEMENT_IMAGE="netbirdio/management:latest" # Reverse proxy configuration REVERSE_PROXY_TYPE="0" @@ -320,7 +313,7 @@ check_existing_installation() { echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." echo "You can use the following commands:" echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" - echo " rm -f docker-compose.yml Caddyfile dashboard.env turnserver.conf management.json relay.env nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" + echo " rm -f docker-compose.yml Caddyfile dashboard.env management.json relay.env nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." exit 1 fi @@ -363,7 +356,6 @@ generate_configuration_files() { # Common files for all configurations render_dashboard_env > dashboard.env render_management_json > management.json - render_turn_server_conf > turnserver.conf render_relay_env > relay.env return 0 } @@ -487,34 +479,13 @@ EOF return 0 } -render_turn_server_conf() { - cat < Date: Wed, 21 Jan 2026 08:48:32 +0100 Subject: [PATCH 065/374] [client] Fix RFC 4592 wildcard matching for existing domain names (#5145) Per RFC 4592 section 2.2.1, wildcards should only match when the queried name does not exist in the zone. Previously, if host.example.com had an A record and *.example.com had an AAAA record, querying AAAA for host.example.com would incorrectly return the wildcard AAAA instead of NODATA. Now the resolver checks if the domain exists (with any record type) before falling back to wildcard matching, returning proper NODATA responses for existing names without the requested record type. --- client/internal/dns/local/local.go | 8 ++++++-- client/internal/dns/local/local_test.go | 4 +++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/client/internal/dns/local/local.go b/client/internal/dns/local/local.go index ae27b3b56..cbdc64997 100644 --- a/client/internal/dns/local/local.go +++ b/client/internal/dns/local/local.go @@ -201,9 +201,13 @@ func (d *Resolver) lookupRecords(logger *log.Entry, question dns.Question) looku records, found := d.records[question] usingWildcard := false wildQuestion := transformToWildcard(question) + // RFC 4592 section 2.2.1: wildcard only matches if the name does NOT exist in the zone. + // If the domain exists with any record type, return NODATA instead of wildcard match. if !found && supportsWildcard(question.Qtype) { - records, found = d.records[wildQuestion] - usingWildcard = found + if _, domainExists := d.domains[domain.Domain(question.Name)]; !domainExists { + records, found = d.records[wildQuestion] + usingWildcard = found + } } if !found { diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go index dc295cd17..73f70035f 100644 --- a/client/internal/dns/local/local_test.go +++ b/client/internal/dns/local/local_test.go @@ -2506,8 +2506,10 @@ func TestLocalResolver_MixedRecordTypes(t *testing.T) { resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) require.NotNil(t, respAAAA) - // host.example.com exists (has A), so AAAA query returns NODATA, not wildcard + // RFC 4592 section 2.2.1: wildcard should NOT match when the name EXISTS in zone. + // host.example.com exists (has A record), so AAAA query returns NODATA, not wildcard. assert.Equal(t, dns.RcodeSuccess, respAAAA.Rcode, "Should return NODATA for existing host without AAAA") + assert.Len(t, respAAAA.Answer, 0, "RFC 4592: wildcard should not match when name exists") // AAAA query for other host should return wildcard AAAA msgAAAAOther := new(dns.Msg).SetQuestion("other.example.com.", dns.TypeAAAA) From e908dea702eb4520021b0cd0806e695619777127 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 21 Jan 2026 10:42:13 +0100 Subject: [PATCH 066/374] [client] Extend WG watcher for ICE connection too (#5133) Extend WG watcher for ICE connection too --- client/internal/peer/conn.go | 84 +++++++++++++++++++------ client/internal/peer/wg_watcher.go | 72 ++++++++++----------- client/internal/peer/wg_watcher_test.go | 20 +++--- client/internal/peer/worker_relay.go | 22 +------ 4 files changed, 114 insertions(+), 84 deletions(-) diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index ba82354a2..39133a6d3 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -99,7 +99,10 @@ type Conn struct { workerICE *WorkerICE workerRelay *WorkerRelay - wgWatcherWg sync.WaitGroup + + wgWatcher *WGWatcher + wgWatcherWg sync.WaitGroup + wgWatcherCancel context.CancelFunc // used to store the remote Rosenpass key for Relayed connection in case of connection update from ice rosenpassRemoteKey []byte @@ -127,6 +130,7 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { connLog := log.WithField("peer", config.Key) + dumpState := newStateDump(config.Key, connLog, services.StatusRecorder) var conn = &Conn{ Log: connLog, config: config, @@ -138,8 +142,9 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { semaphore: services.Semaphore, statusRelay: worker.NewAtomicStatus(), statusICE: worker.NewAtomicStatus(), - dumpState: newStateDump(config.Key, connLog, services.StatusRecorder), + dumpState: dumpState, endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)), + wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState), } return conn, nil @@ -163,7 +168,7 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.ctx, conn.ctxCancel = context.WithCancel(engineCtx) - conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager, conn.dumpState) + conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager) relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) @@ -232,7 +237,9 @@ func (conn *Conn) Close(signalToRemote bool) { conn.Log.Infof("close peer connection") conn.ctxCancel() - conn.workerRelay.DisableWgWatcher() + if conn.wgWatcherCancel != nil { + conn.wgWatcherCancel() + } conn.workerRelay.CloseConn() conn.workerICE.Close() @@ -374,9 +381,6 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn ep = directEp } - conn.workerRelay.DisableWgWatcher() - // todo consider to run conn.wgWatcherWg.Wait() here - if conn.wgProxyRelay != nil { conn.wgProxyRelay.Pause() } @@ -398,6 +402,8 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn conn.wgProxyRelay.RedirectAs(ep) } + conn.enableWgWatcherIfNeeded() + conn.currentConnPriority = priority conn.statusICE.SetConnected() conn.updateIceState(iceConnInfo) @@ -431,11 +437,6 @@ func (conn *Conn) onICEStateDisconnected() { conn.Log.Errorf("failed to switch to relay conn: %v", err) } - conn.wgWatcherWg.Add(1) - go func() { - defer conn.wgWatcherWg.Done() - conn.workerRelay.EnableWgWatcher(conn.ctx) - }() conn.wgProxyRelay.Work() conn.currentConnPriority = conntype.Relay } else { @@ -452,15 +453,15 @@ func (conn *Conn) onICEStateDisconnected() { } conn.statusICE.SetDisconnected() + conn.disableWgWatcherIfNeeded() + peerState := State{ PubKey: conn.config.Key, ConnStatus: conn.evalStatus(), Relayed: conn.isRelayed(), ConnStatusUpdate: time.Now(), } - - err := conn.statusRecorder.UpdatePeerICEStateToDisconnected(peerState) - if err != nil { + if err := conn.statusRecorder.UpdatePeerICEStateToDisconnected(peerState); err != nil { conn.Log.Warnf("unable to set peer's state to disconnected ice, got error: %v", err) } } @@ -508,11 +509,7 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { return } - conn.wgWatcherWg.Add(1) - go func() { - defer conn.wgWatcherWg.Done() - conn.workerRelay.EnableWgWatcher(conn.ctx) - }() + conn.enableWgWatcherIfNeeded() wgConfigWorkaround() conn.rosenpassRemoteKey = rci.rosenpassPubKey @@ -527,7 +524,11 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { func (conn *Conn) onRelayDisconnected() { conn.mu.Lock() defer conn.mu.Unlock() + conn.handleRelayDisconnectedLocked() +} +// handleRelayDisconnectedLocked handles relay disconnection. Caller must hold conn.mu. +func (conn *Conn) handleRelayDisconnectedLocked() { if conn.ctx.Err() != nil { return } @@ -553,6 +554,8 @@ func (conn *Conn) onRelayDisconnected() { } conn.statusRelay.SetDisconnected() + conn.disableWgWatcherIfNeeded() + peerState := State{ PubKey: conn.config.Key, ConnStatus: conn.evalStatus(), @@ -571,6 +574,28 @@ func (conn *Conn) onGuardEvent() { } } +func (conn *Conn) onWGDisconnected() { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.ctx.Err() != nil { + return + } + + conn.Log.Warnf("WireGuard handshake timeout detected, closing current connection") + + // Close the active connection based on current priority + switch conn.currentConnPriority { + case conntype.Relay: + conn.workerRelay.CloseConn() + conn.handleRelayDisconnectedLocked() + case conntype.ICEP2P, conntype.ICETurn: + conn.workerICE.Close() + default: + conn.Log.Debugf("No active connection to close on WG timeout") + } +} + func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []byte) { peerState := State{ PubKey: conn.config.Key, @@ -697,6 +722,25 @@ func (conn *Conn) isConnectedOnAllWay() (connected bool) { return true } +func (conn *Conn) enableWgWatcherIfNeeded() { + if !conn.wgWatcher.IsEnabled() { + wgWatcherCtx, wgWatcherCancel := context.WithCancel(conn.ctx) + conn.wgWatcherCancel = wgWatcherCancel + conn.wgWatcherWg.Add(1) + go func() { + defer conn.wgWatcherWg.Done() + conn.wgWatcher.EnableWgWatcher(wgWatcherCtx, conn.onWGDisconnected) + }() + } +} + +func (conn *Conn) disableWgWatcherIfNeeded() { + if conn.currentConnPriority == conntype.None && conn.wgWatcherCancel != nil { + conn.wgWatcherCancel() + conn.wgWatcherCancel = nil + } +} + func (conn *Conn) newProxy(remoteConn net.Conn) (wgproxy.Proxy, error) { conn.Log.Debugf("setup proxied WireGuard connection") udpAddr := &net.UDPAddr{ diff --git a/client/internal/peer/wg_watcher.go b/client/internal/peer/wg_watcher.go index 0ed200fda..d40ec7a80 100644 --- a/client/internal/peer/wg_watcher.go +++ b/client/internal/peer/wg_watcher.go @@ -30,10 +30,8 @@ type WGWatcher struct { peerKey string stateDump *stateDump - ctx context.Context - ctxCancel context.CancelFunc - ctxLock sync.Mutex - enabledTime time.Time + enabled bool + muEnabled sync.RWMutex } func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey string, stateDump *stateDump) *WGWatcher { @@ -46,52 +44,44 @@ func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey strin } // EnableWgWatcher starts the WireGuard watcher. If it is already enabled, it will return immediately and do nothing. -func (w *WGWatcher) EnableWgWatcher(parentCtx context.Context, onDisconnectedFn func()) { - w.log.Debugf("enable WireGuard watcher") - w.ctxLock.Lock() - w.enabledTime = time.Now() - - if w.ctx != nil && w.ctx.Err() == nil { - w.log.Errorf("WireGuard watcher already enabled") - w.ctxLock.Unlock() +// The watcher runs until ctx is cancelled. Caller is responsible for context lifecycle management. +func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func()) { + w.muEnabled.Lock() + if w.enabled { + w.muEnabled.Unlock() return } - ctx, ctxCancel := context.WithCancel(parentCtx) - w.ctx = ctx - w.ctxCancel = ctxCancel - w.ctxLock.Unlock() + w.log.Debugf("enable WireGuard watcher") + enabledTime := time.Now() + w.enabled = true + w.muEnabled.Unlock() initialHandshake, err := w.wgState() if err != nil { w.log.Warnf("failed to read initial wg stats: %v", err) } - w.periodicHandshakeCheck(ctx, ctxCancel, onDisconnectedFn, initialHandshake) + w.periodicHandshakeCheck(ctx, onDisconnectedFn, enabledTime, initialHandshake) + + w.muEnabled.Lock() + w.enabled = false + w.muEnabled.Unlock() } -// DisableWgWatcher stops the WireGuard watcher and wait for the watcher to exit -func (w *WGWatcher) DisableWgWatcher() { - w.ctxLock.Lock() - defer w.ctxLock.Unlock() - - if w.ctxCancel == nil { - return - } - - w.log.Debugf("disable WireGuard watcher") - - w.ctxCancel() - w.ctxCancel = nil +// IsEnabled returns true if the WireGuard watcher is currently enabled +func (w *WGWatcher) IsEnabled() bool { + w.muEnabled.RLock() + defer w.muEnabled.RUnlock() + return w.enabled } // wgStateCheck help to check the state of the WireGuard handshake and relay connection -func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, ctxCancel context.CancelFunc, onDisconnectedFn func(), initialHandshake time.Time) { +func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn func(), enabledTime time.Time, initialHandshake time.Time) { w.log.Infof("WireGuard watcher started") timer := time.NewTimer(wgHandshakeOvertime) defer timer.Stop() - defer ctxCancel() lastHandshake := initialHandshake @@ -104,7 +94,7 @@ func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, ctxCancel contex return } if lastHandshake.IsZero() { - elapsed := handshake.Sub(w.enabledTime).Seconds() + elapsed := calcElapsed(enabledTime, *handshake) w.log.Infof("first wg handshake detected within: %.2fsec, (%s)", elapsed, handshake) } @@ -134,19 +124,19 @@ func (w *WGWatcher) handshakeCheck(lastHandshake time.Time) (*time.Time, bool) { // the current know handshake did not change if handshake.Equal(lastHandshake) { - w.log.Warnf("WireGuard handshake timed out, closing relay connection: %v", handshake) + w.log.Warnf("WireGuard handshake timed out: %v", handshake) return nil, false } // in case if the machine is suspended, the handshake time will be in the past if handshake.Add(checkPeriod).Before(time.Now()) { - w.log.Warnf("WireGuard handshake timed out, closing relay connection: %v", handshake) + w.log.Warnf("WireGuard handshake timed out: %v", handshake) return nil, false } // error handling for handshake time in the future if handshake.After(time.Now()) { - w.log.Warnf("WireGuard handshake is in the future, closing relay connection: %v", handshake) + w.log.Warnf("WireGuard handshake is in the future: %v", handshake) return nil, false } @@ -164,3 +154,13 @@ func (w *WGWatcher) wgState() (time.Time, error) { } return wgState.LastHandshake, nil } + +// calcElapsed calculates elapsed time since watcher was enabled. +// The watcher started after the wg configuration happens, because of this need to normalise the negative value +func calcElapsed(enabledTime, handshake time.Time) float64 { + elapsed := handshake.Sub(enabledTime).Seconds() + if elapsed < 0 { + elapsed = 0 + } + return elapsed +} diff --git a/client/internal/peer/wg_watcher_test.go b/client/internal/peer/wg_watcher_test.go index d7c277eff..f79405a01 100644 --- a/client/internal/peer/wg_watcher_test.go +++ b/client/internal/peer/wg_watcher_test.go @@ -2,6 +2,7 @@ package peer import ( "context" + "sync" "testing" "time" @@ -48,7 +49,6 @@ func TestWGWatcher_EnableWgWatcher(t *testing.T) { case <-time.After(10 * time.Second): t.Errorf("timeout") } - watcher.DisableWgWatcher() } func TestWGWatcher_ReEnable(t *testing.T) { @@ -60,14 +60,21 @@ func TestWGWatcher_ReEnable(t *testing.T) { watcher := NewWGWatcher(mlog, mocWgIface, "", newStateDump("peer", mlog, &Status{})) ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + watcher.EnableWgWatcher(ctx, func() {}) + }() + cancel() + + wg.Wait() + + // Re-enable with a new context + ctx, cancel = context.WithCancel(context.Background()) defer cancel() onDisconnected := make(chan struct{}, 1) - - go watcher.EnableWgWatcher(ctx, func() {}) - time.Sleep(1 * time.Second) - watcher.DisableWgWatcher() - go watcher.EnableWgWatcher(ctx, func() { onDisconnected <- struct{}{} }) @@ -80,5 +87,4 @@ func TestWGWatcher_ReEnable(t *testing.T) { case <-time.After(10 * time.Second): t.Errorf("timeout") } - watcher.DisableWgWatcher() } diff --git a/client/internal/peer/worker_relay.go b/client/internal/peer/worker_relay.go index f584487f5..06309fbaf 100644 --- a/client/internal/peer/worker_relay.go +++ b/client/internal/peer/worker_relay.go @@ -30,11 +30,9 @@ type WorkerRelay struct { relayLock sync.Mutex relaySupportedOnRemotePeer atomic.Bool - - wgWatcher *WGWatcher } -func NewWorkerRelay(ctx context.Context, log *log.Entry, ctrl bool, config ConnConfig, conn *Conn, relayManager *relayClient.Manager, stateDump *stateDump) *WorkerRelay { +func NewWorkerRelay(ctx context.Context, log *log.Entry, ctrl bool, config ConnConfig, conn *Conn, relayManager *relayClient.Manager) *WorkerRelay { r := &WorkerRelay{ peerCtx: ctx, log: log, @@ -42,7 +40,6 @@ func NewWorkerRelay(ctx context.Context, log *log.Entry, ctrl bool, config ConnC config: config, conn: conn, relayManager: relayManager, - wgWatcher: NewWGWatcher(log, config.WgConfig.WgInterface, config.Key, stateDump), } return r } @@ -93,14 +90,6 @@ func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) { }) } -func (w *WorkerRelay) EnableWgWatcher(ctx context.Context) { - w.wgWatcher.EnableWgWatcher(ctx, w.onWGDisconnected) -} - -func (w *WorkerRelay) DisableWgWatcher() { - w.wgWatcher.DisableWgWatcher() -} - func (w *WorkerRelay) RelayInstanceAddress() (string, error) { return w.relayManager.RelayInstanceAddress() } @@ -125,14 +114,6 @@ func (w *WorkerRelay) CloseConn() { } } -func (w *WorkerRelay) onWGDisconnected() { - w.relayLock.Lock() - _ = w.relayedConn.Close() - w.relayLock.Unlock() - - w.conn.onRelayDisconnected() -} - func (w *WorkerRelay) isRelaySupported(answer *OfferAnswer) bool { if !w.relayManager.HasRelayAddress() { return false @@ -148,6 +129,5 @@ func (w *WorkerRelay) preferredRelayServer(myRelayAddress, remoteRelayAddress st } func (w *WorkerRelay) onRelayClientDisconnected() { - w.wgWatcher.DisableWgWatcher() go w.conn.onRelayDisconnected() } From ee54827f94428afe03a7258791e6153aac4653e8 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 22 Jan 2026 10:20:43 +0800 Subject: [PATCH 067/374] [client] Add IPv6 support to usersace bind (#5147) --- client/iface/bind/dual_stack_conn.go | 169 +++++++++ .../iface/bind/dual_stack_conn_bench_test.go | 119 +++++++ client/iface/bind/dual_stack_conn_test.go | 191 +++++++++++ client/iface/bind/ice_bind.go | 97 ++++-- client/iface/bind/ice_bind_test.go | 324 ++++++++++++++++++ client/internal/peer/worker_ice.go | 45 ++- sharedsock/sock_linux.go | 15 +- 7 files changed, 915 insertions(+), 45 deletions(-) create mode 100644 client/iface/bind/dual_stack_conn.go create mode 100644 client/iface/bind/dual_stack_conn_bench_test.go create mode 100644 client/iface/bind/dual_stack_conn_test.go create mode 100644 client/iface/bind/ice_bind_test.go diff --git a/client/iface/bind/dual_stack_conn.go b/client/iface/bind/dual_stack_conn.go new file mode 100644 index 000000000..061016ecc --- /dev/null +++ b/client/iface/bind/dual_stack_conn.go @@ -0,0 +1,169 @@ +package bind + +import ( + "errors" + "net" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + log "github.com/sirupsen/logrus" + + nberrors "github.com/netbirdio/netbird/client/errors" +) + +var ( + errNoIPv4Conn = errors.New("no IPv4 connection available") + errNoIPv6Conn = errors.New("no IPv6 connection available") + errInvalidAddr = errors.New("invalid address type") +) + +// DualStackPacketConn wraps IPv4 and IPv6 UDP connections and routes writes +// to the appropriate connection based on the destination address. +// ReadFrom is not used in the hot path - ICEBind receives packets via +// BatchReader.ReadBatch() directly. This is only used by udpMux for sending. +type DualStackPacketConn struct { + ipv4Conn net.PacketConn + ipv6Conn net.PacketConn + + readFromWarn sync.Once +} + +// NewDualStackPacketConn creates a new dual-stack packet connection. +func NewDualStackPacketConn(ipv4Conn, ipv6Conn net.PacketConn) *DualStackPacketConn { + return &DualStackPacketConn{ + ipv4Conn: ipv4Conn, + ipv6Conn: ipv6Conn, + } +} + +// ReadFrom reads from the available connection (preferring IPv4). +// NOTE: This method is NOT used in the data path. ICEBind receives packets via +// BatchReader.ReadBatch() directly for both IPv4 and IPv6, which is much more efficient. +// This implementation exists only to satisfy the net.PacketConn interface for the udpMux, +// but the udpMux only uses WriteTo() for sending STUN responses - it never calls ReadFrom() +// because STUN packets are filtered and forwarded via HandleSTUNMessage() from the receive path. +func (d *DualStackPacketConn) ReadFrom(b []byte) (n int, addr net.Addr, err error) { + d.readFromWarn.Do(func() { + log.Warn("DualStackPacketConn.ReadFrom called - this is unexpected and may indicate an inefficient code path") + }) + + if d.ipv4Conn != nil { + return d.ipv4Conn.ReadFrom(b) + } + if d.ipv6Conn != nil { + return d.ipv6Conn.ReadFrom(b) + } + return 0, nil, net.ErrClosed +} + +// WriteTo writes to the appropriate connection based on the address type. +func (d *DualStackPacketConn) WriteTo(b []byte, addr net.Addr) (n int, err error) { + udpAddr, ok := addr.(*net.UDPAddr) + if !ok { + return 0, &net.OpError{ + Op: "write", + Net: "udp", + Addr: addr, + Err: errInvalidAddr, + } + } + + if udpAddr.IP.To4() == nil { + if d.ipv6Conn != nil { + return d.ipv6Conn.WriteTo(b, addr) + } + return 0, &net.OpError{ + Op: "write", + Net: "udp6", + Addr: addr, + Err: errNoIPv6Conn, + } + } + + if d.ipv4Conn != nil { + return d.ipv4Conn.WriteTo(b, addr) + } + return 0, &net.OpError{ + Op: "write", + Net: "udp4", + Addr: addr, + Err: errNoIPv4Conn, + } +} + +// Close closes both connections. +func (d *DualStackPacketConn) Close() error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.Close(); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.Close(); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} + +// LocalAddr returns the local address of the IPv4 connection if available, +// otherwise the IPv6 connection. +func (d *DualStackPacketConn) LocalAddr() net.Addr { + if d.ipv4Conn != nil { + return d.ipv4Conn.LocalAddr() + } + if d.ipv6Conn != nil { + return d.ipv6Conn.LocalAddr() + } + return nil +} + +// SetDeadline sets the deadline for both connections. +func (d *DualStackPacketConn) SetDeadline(t time.Time) error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.SetDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.SetDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} + +// SetReadDeadline sets the read deadline for both connections. +func (d *DualStackPacketConn) SetReadDeadline(t time.Time) error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.SetReadDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.SetReadDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} + +// SetWriteDeadline sets the write deadline for both connections. +func (d *DualStackPacketConn) SetWriteDeadline(t time.Time) error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.SetWriteDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.SetWriteDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} diff --git a/client/iface/bind/dual_stack_conn_bench_test.go b/client/iface/bind/dual_stack_conn_bench_test.go new file mode 100644 index 000000000..940c44966 --- /dev/null +++ b/client/iface/bind/dual_stack_conn_bench_test.go @@ -0,0 +1,119 @@ +package bind + +import ( + "net" + "testing" +) + +var ( + ipv4Addr = &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 12345} + ipv6Addr = &net.UDPAddr{IP: net.ParseIP("::1"), Port: 12345} + payload = make([]byte, 1200) +) + +func BenchmarkWriteTo_DirectUDPConn(b *testing.B) { + conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = conn.WriteTo(payload, ipv4Addr) + } +} + +func BenchmarkWriteTo_DualStack_IPv4Only(b *testing.B) { + conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + + ds := NewDualStackPacketConn(conn, nil) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv4Addr) + } +} + +func BenchmarkWriteTo_DualStack_IPv6Only(b *testing.B) { + conn, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn.Close() + + ds := NewDualStackPacketConn(nil, conn) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv6Addr) + } +} + +func BenchmarkWriteTo_DualStack_Both_IPv4Traffic(b *testing.B) { + conn4, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn4.Close() + + conn6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn6.Close() + + ds := NewDualStackPacketConn(conn4, conn6) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv4Addr) + } +} + +func BenchmarkWriteTo_DualStack_Both_IPv6Traffic(b *testing.B) { + conn4, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn4.Close() + + conn6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn6.Close() + + ds := NewDualStackPacketConn(conn4, conn6) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv6Addr) + } +} + +func BenchmarkWriteTo_DualStack_Both_MixedTraffic(b *testing.B) { + conn4, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn4.Close() + + conn6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn6.Close() + + ds := NewDualStackPacketConn(conn4, conn6) + addrs := []net.Addr{ipv4Addr, ipv6Addr} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, addrs[i&1]) + } +} diff --git a/client/iface/bind/dual_stack_conn_test.go b/client/iface/bind/dual_stack_conn_test.go new file mode 100644 index 000000000..3007d907f --- /dev/null +++ b/client/iface/bind/dual_stack_conn_test.go @@ -0,0 +1,191 @@ +package bind + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDualStackPacketConn_RoutesWritesToCorrectSocket(t *testing.T) { + ipv4Conn := &mockPacketConn{network: "udp4"} + ipv6Conn := &mockPacketConn{network: "udp6"} + dualStack := NewDualStackPacketConn(ipv4Conn, ipv6Conn) + + tests := []struct { + name string + addr *net.UDPAddr + wantSocket string + }{ + { + name: "IPv4 address", + addr: &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}, + wantSocket: "udp4", + }, + { + name: "IPv6 address", + addr: &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}, + wantSocket: "udp6", + }, + { + name: "IPv4-mapped IPv6 goes to IPv4", + addr: &net.UDPAddr{IP: net.ParseIP("::ffff:192.168.1.1"), Port: 1234}, + wantSocket: "udp4", + }, + { + name: "IPv4 loopback", + addr: &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + wantSocket: "udp4", + }, + { + name: "IPv6 loopback", + addr: &net.UDPAddr{IP: net.ParseIP("::1"), Port: 1234}, + wantSocket: "udp6", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ipv4Conn.writeCount = 0 + ipv6Conn.writeCount = 0 + + n, err := dualStack.WriteTo([]byte("test"), tt.addr) + require.NoError(t, err) + assert.Equal(t, 4, n) + + if tt.wantSocket == "udp4" { + assert.Equal(t, 1, ipv4Conn.writeCount, "expected write to IPv4") + assert.Equal(t, 0, ipv6Conn.writeCount, "expected no write to IPv6") + } else { + assert.Equal(t, 0, ipv4Conn.writeCount, "expected no write to IPv4") + assert.Equal(t, 1, ipv6Conn.writeCount, "expected write to IPv6") + } + }) + } +} + +func TestDualStackPacketConn_IPv4OnlyRejectsIPv6(t *testing.T) { + dualStack := NewDualStackPacketConn(&mockPacketConn{network: "udp4"}, nil) + + // IPv4 works + _, err := dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}) + require.NoError(t, err) + + // IPv6 fails + _, err = dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}) + require.Error(t, err) + assert.Contains(t, err.Error(), "no IPv6 connection") +} + +func TestDualStackPacketConn_IPv6OnlyRejectsIPv4(t *testing.T) { + dualStack := NewDualStackPacketConn(nil, &mockPacketConn{network: "udp6"}) + + // IPv6 works + _, err := dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}) + require.NoError(t, err) + + // IPv4 fails + _, err = dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}) + require.Error(t, err) + assert.Contains(t, err.Error(), "no IPv4 connection") +} + +// TestDualStackPacketConn_ReadFromIsNotUsedInHotPath documents that ReadFrom +// only reads from one socket (IPv4 preferred). This is fine because the actual +// receive path uses wireguard-go's BatchReader directly, not ReadFrom. +func TestDualStackPacketConn_ReadFromIsNotUsedInHotPath(t *testing.T) { + ipv4Conn := &mockPacketConn{ + network: "udp4", + readData: []byte("from ipv4"), + readAddr: &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}, + } + ipv6Conn := &mockPacketConn{ + network: "udp6", + readData: []byte("from ipv6"), + readAddr: &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}, + } + + dualStack := NewDualStackPacketConn(ipv4Conn, ipv6Conn) + + buf := make([]byte, 100) + n, addr, err := dualStack.ReadFrom(buf) + + require.NoError(t, err) + // reads from IPv4 (preferred) - this is expected behavior + assert.Equal(t, "from ipv4", string(buf[:n])) + assert.Equal(t, "192.168.1.1", addr.(*net.UDPAddr).IP.String()) +} + +func TestDualStackPacketConn_LocalAddrPrefersIPv4(t *testing.T) { + ipv4Addr := &net.UDPAddr{IP: net.ParseIP("0.0.0.0"), Port: 51820} + ipv6Addr := &net.UDPAddr{IP: net.ParseIP("::"), Port: 51820} + + tests := []struct { + name string + ipv4 net.PacketConn + ipv6 net.PacketConn + wantAddr net.Addr + }{ + { + name: "both available returns IPv4", + ipv4: &mockPacketConn{localAddr: ipv4Addr}, + ipv6: &mockPacketConn{localAddr: ipv6Addr}, + wantAddr: ipv4Addr, + }, + { + name: "IPv4 only", + ipv4: &mockPacketConn{localAddr: ipv4Addr}, + ipv6: nil, + wantAddr: ipv4Addr, + }, + { + name: "IPv6 only", + ipv4: nil, + ipv6: &mockPacketConn{localAddr: ipv6Addr}, + wantAddr: ipv6Addr, + }, + { + name: "neither returns nil", + ipv4: nil, + ipv6: nil, + wantAddr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dualStack := NewDualStackPacketConn(tt.ipv4, tt.ipv6) + assert.Equal(t, tt.wantAddr, dualStack.LocalAddr()) + }) + } +} + +// mock + +type mockPacketConn struct { + network string + writeCount int + readData []byte + readAddr net.Addr + localAddr net.Addr +} + +func (m *mockPacketConn) ReadFrom(b []byte) (n int, addr net.Addr, err error) { + if m.readData != nil { + return copy(b, m.readData), m.readAddr, nil + } + return 0, nil, nil +} + +func (m *mockPacketConn) WriteTo(b []byte, addr net.Addr) (n int, err error) { + m.writeCount++ + return len(b), nil +} + +func (m *mockPacketConn) Close() error { return nil } +func (m *mockPacketConn) LocalAddr() net.Addr { return m.localAddr } +func (m *mockPacketConn) SetDeadline(t time.Time) error { return nil } +func (m *mockPacketConn) SetReadDeadline(t time.Time) error { return nil } +func (m *mockPacketConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/client/iface/bind/ice_bind.go b/client/iface/bind/ice_bind.go index 0957d2dd5..bf79ecd79 100644 --- a/client/iface/bind/ice_bind.go +++ b/client/iface/bind/ice_bind.go @@ -14,7 +14,6 @@ import ( "github.com/pion/stun/v3" "github.com/pion/transport/v3" log "github.com/sirupsen/logrus" - "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" wgConn "golang.zx2c4.com/wireguard/conn" @@ -28,22 +27,7 @@ type receiverCreator struct { } func (rc receiverCreator) CreateReceiverFn(pc wgConn.BatchReader, conn *net.UDPConn, rxOffload bool, msgPool *sync.Pool) wgConn.ReceiveFunc { - if ipv4PC, ok := pc.(*ipv4.PacketConn); ok { - return rc.iceBind.createIPv4ReceiverFn(ipv4PC, conn, rxOffload, msgPool) - } - // IPv6 is currently not supported in the udpmux, this is a stub for compatibility with the - // wireguard-go ReceiverCreator interface which is called for both IPv4 and IPv6. - return func(bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (n int, err error) { - buf := bufs[0] - size, ep, err := conn.ReadFromUDPAddrPort(buf) - if err != nil { - return 0, err - } - sizes[0] = size - stdEp := &wgConn.StdNetEndpoint{AddrPort: ep} - eps[0] = stdEp - return 1, nil - } + return rc.iceBind.createReceiverFn(pc, conn, rxOffload, msgPool) } // ICEBind is a bind implementation with two main features: @@ -73,6 +57,8 @@ type ICEBind struct { muUDPMux sync.Mutex udpMux *udpmux.UniversalUDPMuxDefault + ipv4Conn *net.UDPConn + ipv6Conn *net.UDPConn } func NewICEBind(transportNet transport.Net, filterFn udpmux.FilterFn, address wgaddr.Address, mtu uint16) *ICEBind { @@ -118,6 +104,12 @@ func (s *ICEBind) Close() error { close(s.closedChan) + s.muUDPMux.Lock() + s.ipv4Conn = nil + s.ipv6Conn = nil + s.udpMux = nil + s.muUDPMux.Unlock() + return s.StdNetBind.Close() } @@ -175,19 +167,18 @@ func (b *ICEBind) Send(bufs [][]byte, ep wgConn.Endpoint) error { return nil } -func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, rxOffload bool, msgsPool *sync.Pool) wgConn.ReceiveFunc { +func (s *ICEBind) createReceiverFn(pc wgConn.BatchReader, conn *net.UDPConn, rxOffload bool, msgsPool *sync.Pool) wgConn.ReceiveFunc { s.muUDPMux.Lock() defer s.muUDPMux.Unlock() - s.udpMux = udpmux.NewUniversalUDPMuxDefault( - udpmux.UniversalUDPMuxParams{ - UDPConn: nbnet.WrapPacketConn(conn), - Net: s.transportNet, - FilterFn: s.filterFn, - WGAddress: s.address, - MTU: s.mtu, - }, - ) + // Detect IPv4 vs IPv6 from connection's local address + if localAddr := conn.LocalAddr().(*net.UDPAddr); localAddr.IP.To4() != nil { + s.ipv4Conn = conn + } else { + s.ipv6Conn = conn + } + s.createOrUpdateMux() + return func(bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (n int, err error) { msgs := getMessages(msgsPool) for i := range bufs { @@ -195,12 +186,13 @@ func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, r (*msgs)[i].OOB = (*msgs)[i].OOB[:cap((*msgs)[i].OOB)] } defer putMessages(msgs, msgsPool) + var numMsgs int if runtime.GOOS == "linux" || runtime.GOOS == "android" { if rxOffload { readAt := len(*msgs) - (wgConn.IdealBatchSize / wgConn.UdpSegmentMaxDatagrams) - //nolint - numMsgs, err = pc.ReadBatch((*msgs)[readAt:], 0) + //nolint:staticcheck + _, err = pc.ReadBatch((*msgs)[readAt:], 0) if err != nil { return 0, err } @@ -222,12 +214,12 @@ func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, r } numMsgs = 1 } + for i := 0; i < numMsgs; i++ { msg := &(*msgs)[i] // todo: handle err - ok, _ := s.filterOutStunMessages(msg.Buffers, msg.N, msg.Addr) - if ok { + if ok, _ := s.filterOutStunMessages(msg.Buffers, msg.N, msg.Addr); ok { continue } sizes[i] = msg.N @@ -248,6 +240,38 @@ func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, r } } +// createOrUpdateMux creates or updates the UDP mux with the available connections. +// Must be called with muUDPMux held. +func (s *ICEBind) createOrUpdateMux() { + var muxConn net.PacketConn + + switch { + case s.ipv4Conn != nil && s.ipv6Conn != nil: + muxConn = NewDualStackPacketConn( + nbnet.WrapPacketConn(s.ipv4Conn), + nbnet.WrapPacketConn(s.ipv6Conn), + ) + case s.ipv4Conn != nil: + muxConn = nbnet.WrapPacketConn(s.ipv4Conn) + case s.ipv6Conn != nil: + muxConn = nbnet.WrapPacketConn(s.ipv6Conn) + default: + return + } + + // Don't close the old mux - it doesn't own the underlying connections. + // The sockets are managed by WireGuard's StdNetBind, not by us. + s.udpMux = udpmux.NewUniversalUDPMuxDefault( + udpmux.UniversalUDPMuxParams{ + UDPConn: muxConn, + Net: s.transportNet, + FilterFn: s.filterFn, + WGAddress: s.address, + MTU: s.mtu, + }, + ) +} + func (s *ICEBind) filterOutStunMessages(buffers [][]byte, n int, addr net.Addr) (bool, error) { for i := range buffers { if !stun.IsMessage(buffers[i]) { @@ -260,9 +284,14 @@ func (s *ICEBind) filterOutStunMessages(buffers [][]byte, n int, addr net.Addr) return true, err } - muxErr := s.udpMux.HandleSTUNMessage(msg, addr) - if muxErr != nil { - log.Warnf("failed to handle STUN packet") + s.muUDPMux.Lock() + mux := s.udpMux + s.muUDPMux.Unlock() + + if mux != nil { + if muxErr := mux.HandleSTUNMessage(msg, addr); muxErr != nil { + log.Warnf("failed to handle STUN packet: %v", muxErr) + } } buffers[i] = []byte{} diff --git a/client/iface/bind/ice_bind_test.go b/client/iface/bind/ice_bind_test.go new file mode 100644 index 000000000..1fdd955c9 --- /dev/null +++ b/client/iface/bind/ice_bind_test.go @@ -0,0 +1,324 @@ +package bind + +import ( + "fmt" + "net" + "net/netip" + "sync" + "testing" + "time" + + "github.com/pion/transport/v3/stdnet" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + + "github.com/netbirdio/netbird/client/iface/wgaddr" +) + +func TestICEBind_CreatesReceiverForBothIPv4AndIPv6(t *testing.T) { + iceBind := setupICEBind(t) + + ipv4Conn, ipv6Conn := createDualStackConns(t) + defer ipv4Conn.Close() + defer ipv6Conn.Close() + + rc := receiverCreator{iceBind} + pool := createMsgPool() + + // Simulate wireguard-go calling CreateReceiverFn for IPv4 + ipv4RecvFn := rc.CreateReceiverFn(ipv4.NewPacketConn(ipv4Conn), ipv4Conn, false, pool) + require.NotNil(t, ipv4RecvFn) + + iceBind.muUDPMux.Lock() + assert.NotNil(t, iceBind.ipv4Conn, "should store IPv4 connection") + assert.Nil(t, iceBind.ipv6Conn, "IPv6 not added yet") + assert.NotNil(t, iceBind.udpMux, "mux should be created after first connection") + iceBind.muUDPMux.Unlock() + + // Simulate wireguard-go calling CreateReceiverFn for IPv6 + ipv6RecvFn := rc.CreateReceiverFn(ipv6.NewPacketConn(ipv6Conn), ipv6Conn, false, pool) + require.NotNil(t, ipv6RecvFn) + + iceBind.muUDPMux.Lock() + assert.NotNil(t, iceBind.ipv4Conn, "should still have IPv4 connection") + assert.NotNil(t, iceBind.ipv6Conn, "should now have IPv6 connection") + assert.NotNil(t, iceBind.udpMux, "mux should still exist") + iceBind.muUDPMux.Unlock() + + mux, err := iceBind.GetICEMux() + require.NoError(t, err) + require.NotNil(t, mux) +} + +func TestICEBind_WorksWithIPv4Only(t *testing.T) { + iceBind := setupICEBind(t) + + ipv4Conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + require.NoError(t, err) + defer ipv4Conn.Close() + + rc := receiverCreator{iceBind} + recvFn := rc.CreateReceiverFn(ipv4.NewPacketConn(ipv4Conn), ipv4Conn, false, createMsgPool()) + require.NotNil(t, recvFn) + + iceBind.muUDPMux.Lock() + assert.NotNil(t, iceBind.ipv4Conn) + assert.Nil(t, iceBind.ipv6Conn) + assert.NotNil(t, iceBind.udpMux) + iceBind.muUDPMux.Unlock() + + mux, err := iceBind.GetICEMux() + require.NoError(t, err) + require.NotNil(t, mux) +} + +func TestICEBind_WorksWithIPv6Only(t *testing.T) { + iceBind := setupICEBind(t) + + ipv6Conn, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + t.Skipf("IPv6 not available: %v", err) + } + defer ipv6Conn.Close() + + rc := receiverCreator{iceBind} + recvFn := rc.CreateReceiverFn(ipv6.NewPacketConn(ipv6Conn), ipv6Conn, false, createMsgPool()) + require.NotNil(t, recvFn) + + iceBind.muUDPMux.Lock() + assert.Nil(t, iceBind.ipv4Conn) + assert.NotNil(t, iceBind.ipv6Conn) + assert.NotNil(t, iceBind.udpMux) + iceBind.muUDPMux.Unlock() + + mux, err := iceBind.GetICEMux() + require.NoError(t, err) + require.NotNil(t, mux) +} + +// TestICEBind_SendsToIPv4AndIPv6PeersSimultaneously verifies that we can communicate +// with peers on different address families through the same DualStackPacketConn. +func TestICEBind_SendsToIPv4AndIPv6PeersSimultaneously(t *testing.T) { + // two "remote peers" listening on different address families + ipv4Peer := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Peer.Close() + + ipv6Peer, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6loopback, Port: 0}) + if err != nil { + t.Skipf("IPv6 not available: %v", err) + } + defer ipv6Peer.Close() + + // our local dual-stack connection + ipv4Local := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Local.Close() + + ipv6Local := listenUDP(t, "udp6", "[::1]:0") + defer ipv6Local.Close() + + dualStack := NewDualStackPacketConn(ipv4Local, ipv6Local) + + // send to both peers + _, err = dualStack.WriteTo([]byte("to-ipv4"), ipv4Peer.LocalAddr()) + require.NoError(t, err) + + _, err = dualStack.WriteTo([]byte("to-ipv6"), ipv6Peer.LocalAddr()) + require.NoError(t, err) + + // verify IPv4 peer got its packet from the IPv4 socket + buf := make([]byte, 100) + _ = ipv4Peer.SetReadDeadline(time.Now().Add(time.Second)) + n, addr, err := ipv4Peer.ReadFrom(buf) + require.NoError(t, err) + assert.Equal(t, "to-ipv4", string(buf[:n])) + assert.Equal(t, ipv4Local.LocalAddr().(*net.UDPAddr).Port, addr.(*net.UDPAddr).Port) + + // verify IPv6 peer got its packet from the IPv6 socket + _ = ipv6Peer.SetReadDeadline(time.Now().Add(time.Second)) + n, addr, err = ipv6Peer.ReadFrom(buf) + require.NoError(t, err) + assert.Equal(t, "to-ipv6", string(buf[:n])) + assert.Equal(t, ipv6Local.LocalAddr().(*net.UDPAddr).Port, addr.(*net.UDPAddr).Port) +} + +// TestICEBind_HandlesConcurrentMixedTraffic sends packets concurrently to both IPv4 +// and IPv6 peers. Verifies no packets get misrouted (IPv4 peer only gets v4- packets, +// IPv6 peer only gets v6- packets). Some packet loss is acceptable for UDP. +func TestICEBind_HandlesConcurrentMixedTraffic(t *testing.T) { + ipv4Peer := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Peer.Close() + + ipv6Peer, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6loopback, Port: 0}) + if err != nil { + t.Skipf("IPv6 not available: %v", err) + } + defer ipv6Peer.Close() + + ipv4Local := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Local.Close() + + ipv6Local := listenUDP(t, "udp6", "[::1]:0") + defer ipv6Local.Close() + + dualStack := NewDualStackPacketConn(ipv4Local, ipv6Local) + + const packetsPerFamily = 500 + + ipv4Received := make(chan string, packetsPerFamily) + ipv6Received := make(chan string, packetsPerFamily) + + startGate := make(chan struct{}) + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, 100) + for i := 0; i < packetsPerFamily; i++ { + n, _, err := ipv4Peer.ReadFrom(buf) + if err != nil { + return + } + ipv4Received <- string(buf[:n]) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, 100) + for i := 0; i < packetsPerFamily; i++ { + n, _, err := ipv6Peer.ReadFrom(buf) + if err != nil { + return + } + ipv6Received <- string(buf[:n]) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + <-startGate + for i := 0; i < packetsPerFamily; i++ { + _, _ = dualStack.WriteTo([]byte(fmt.Sprintf("v4-%04d", i)), ipv4Peer.LocalAddr()) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + <-startGate + for i := 0; i < packetsPerFamily; i++ { + _, _ = dualStack.WriteTo([]byte(fmt.Sprintf("v6-%04d", i)), ipv6Peer.LocalAddr()) + } + }() + + close(startGate) + + time.AfterFunc(5*time.Second, func() { + _ = ipv4Peer.SetReadDeadline(time.Now()) + _ = ipv6Peer.SetReadDeadline(time.Now()) + }) + + wg.Wait() + close(ipv4Received) + close(ipv6Received) + + ipv4Count := 0 + for pkt := range ipv4Received { + require.True(t, len(pkt) >= 3 && pkt[:3] == "v4-", "IPv4 peer got misrouted packet: %s", pkt) + ipv4Count++ + } + + ipv6Count := 0 + for pkt := range ipv6Received { + require.True(t, len(pkt) >= 3 && pkt[:3] == "v6-", "IPv6 peer got misrouted packet: %s", pkt) + ipv6Count++ + } + + assert.Equal(t, packetsPerFamily, ipv4Count) + assert.Equal(t, packetsPerFamily, ipv6Count) +} + +func TestICEBind_DetectsAddressFamilyFromConnection(t *testing.T) { + tests := []struct { + name string + network string + addr string + wantIPv4 bool + }{ + {"IPv4 any", "udp4", "0.0.0.0:0", true}, + {"IPv4 loopback", "udp4", "127.0.0.1:0", true}, + {"IPv6 any", "udp6", "[::]:0", false}, + {"IPv6 loopback", "udp6", "[::1]:0", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + addr, err := net.ResolveUDPAddr(tt.network, tt.addr) + require.NoError(t, err) + + conn, err := net.ListenUDP(tt.network, addr) + if err != nil { + t.Skipf("%s not available: %v", tt.network, err) + } + defer conn.Close() + + localAddr := conn.LocalAddr().(*net.UDPAddr) + isIPv4 := localAddr.IP.To4() != nil + assert.Equal(t, tt.wantIPv4, isIPv4) + }) + } +} + +// helpers + +func setupICEBind(t *testing.T) *ICEBind { + t.Helper() + transportNet, err := stdnet.NewNet() + require.NoError(t, err) + + address := wgaddr.Address{ + IP: netip.MustParseAddr("100.64.0.1"), + Network: netip.MustParsePrefix("100.64.0.0/10"), + } + return NewICEBind(transportNet, nil, address, 1280) +} + +func createDualStackConns(t *testing.T) (*net.UDPConn, *net.UDPConn) { + t.Helper() + ipv4Conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + require.NoError(t, err) + + ipv6Conn, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + ipv4Conn.Close() + t.Skipf("IPv6 not available: %v", err) + } + return ipv4Conn, ipv6Conn +} + +func createMsgPool() *sync.Pool { + return &sync.Pool{ + New: func() any { + msgs := make([]ipv6.Message, 1) + for i := range msgs { + msgs[i].Buffers = make(net.Buffers, 1) + msgs[i].OOB = make([]byte, 0, 40) + } + return &msgs + }, + } +} + +func listenUDP(t *testing.T, network, addr string) *net.UDPConn { + t.Helper() + udpAddr, err := net.ResolveUDPAddr(network, addr) + require.NoError(t, err) + conn, err := net.ListenUDP(network, udpAddr) + require.NoError(t, err) + return conn +} diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index 840fc9241..b6b9d2cf4 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "net/netip" + "strconv" "sync" "time" @@ -286,8 +287,8 @@ func (w *WorkerICE) connect(ctx context.Context, agent *icemaker.ThreadSafeAgent RosenpassAddr: remoteOfferAnswer.RosenpassAddr, LocalIceCandidateType: pair.Local.Type().String(), RemoteIceCandidateType: pair.Remote.Type().String(), - LocalIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Local.Address(), pair.Local.Port()), - RemoteIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Remote.Address(), pair.Remote.Port()), + LocalIceCandidateEndpoint: net.JoinHostPort(pair.Local.Address(), strconv.Itoa(pair.Local.Port())), + RemoteIceCandidateEndpoint: net.JoinHostPort(pair.Remote.Address(), strconv.Itoa(pair.Remote.Port())), Relayed: isRelayed(pair), RelayedOnLocal: isRelayCandidate(pair.Local), } @@ -328,13 +329,7 @@ func (w *WorkerICE) closeAgent(agent *icemaker.ThreadSafeAgent, cancel context.C func (w *WorkerICE) punchRemoteWGPort(pair *ice.CandidatePair, remoteWgPort int) { // wait local endpoint configuration time.Sleep(time.Second) - addrString := pair.Remote.Address() - parsed, err := netip.ParseAddr(addrString) - if (err == nil) && (parsed.Is6()) { - addrString = fmt.Sprintf("[%s]", addrString) - //IPv6 Literals need to be wrapped in brackets for Resolve*Addr() - } - addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", addrString, remoteWgPort)) + addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(pair.Remote.Address(), strconv.Itoa(remoteWgPort))) if err != nil { w.log.Warnf("got an error while resolving the udp address, err: %s", err) return @@ -386,12 +381,44 @@ func (w *WorkerICE) onICESelectedCandidatePair(agent *icemaker.ThreadSafeAgent, } } +func (w *WorkerICE) logSuccessfulPaths(agent *icemaker.ThreadSafeAgent) { + sessionID := w.SessionID() + stats := agent.GetCandidatePairsStats() + localCandidates, _ := agent.GetLocalCandidates() + remoteCandidates, _ := agent.GetRemoteCandidates() + + localMap := make(map[string]ice.Candidate) + for _, c := range localCandidates { + localMap[c.ID()] = c + } + remoteMap := make(map[string]ice.Candidate) + for _, c := range remoteCandidates { + remoteMap[c.ID()] = c + } + + for _, stat := range stats { + if stat.State == ice.CandidatePairStateSucceeded { + local, lok := localMap[stat.LocalCandidateID] + remote, rok := remoteMap[stat.RemoteCandidateID] + if !lok || !rok { + continue + } + w.log.Debugf("successful ICE path %s: [%s %s %s] <-> [%s %s %s] rtt=%.3fms", + sessionID, + local.NetworkType(), local.Type(), local.Address(), + remote.NetworkType(), remote.Type(), remote.Address(), + stat.CurrentRoundTripTime*1000) + } + } +} + func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dialerCancel context.CancelFunc) func(ice.ConnectionState) { return func(state ice.ConnectionState) { w.log.Debugf("ICE ConnectionState has changed to %s", state.String()) switch state { case ice.ConnectionStateConnected: w.lastKnownState = ice.ConnectionStateConnected + w.logSuccessfulPaths(agent) return case ice.ConnectionStateFailed, ice.ConnectionStateDisconnected, ice.ConnectionStateClosed: // ice.ConnectionStateClosed happens when we recreate the agent. For the P2P to TURN switch important to diff --git a/sharedsock/sock_linux.go b/sharedsock/sock_linux.go index bc2d4d1be..523beb32b 100644 --- a/sharedsock/sock_linux.go +++ b/sharedsock/sock_linux.go @@ -154,9 +154,20 @@ func (s *SharedSocket) updateRouter() { } } -// LocalAddr returns an IPv4 address using the supplied port +// LocalAddr returns the local address, preferring IPv4 for backward compatibility. func (s *SharedSocket) LocalAddr() net.Addr { - // todo check impact on ipv6 discovery + if s.conn4 != nil { + return &net.UDPAddr{ + IP: net.IPv4zero, + Port: s.port, + } + } + if s.conn6 != nil { + return &net.UDPAddr{ + IP: net.IPv6zero, + Port: s.port, + } + } return &net.UDPAddr{ IP: net.IPv4zero, Port: s.port, From f86022eacef02f1852971c5b4e5b6518fbf9db16 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 22 Jan 2026 17:01:08 +0800 Subject: [PATCH 068/374] [client] Hide forwarding rules in status when count is zero (#5149) Co-authored-by: Claude Opus 4.5 --- client/status/status.go | 9 +++++++-- client/status/status_test.go | 2 -- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/client/status/status.go b/client/status/status.go index be28ff67d..f13163a41 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -491,6 +491,11 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS peersCountString := fmt.Sprintf("%d/%d Connected", o.Peers.Connected, o.Peers.Total) + var forwardingRulesString string + if o.NumberOfForwardingRules > 0 { + forwardingRulesString = fmt.Sprintf("Forwarding rules: %d\n", o.NumberOfForwardingRules) + } + goos := runtime.GOOS goarch := runtime.GOARCH goarm := "" @@ -514,7 +519,7 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS "Lazy connection: %s\n"+ "SSH Server: %s\n"+ "Networks: %s\n"+ - "Forwarding rules: %d\n"+ + "%s"+ "Peers count: %s\n", fmt.Sprintf("%s/%s%s", goos, goarch, goarm), o.DaemonVersion, @@ -531,7 +536,7 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS lazyConnectionEnabledStatus, sshServerStatus, networks, - o.NumberOfForwardingRules, + forwardingRulesString, peersCountString, ) return summary diff --git a/client/status/status_test.go b/client/status/status_test.go index ad158722b..b02d78d64 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -567,7 +567,6 @@ Quantum resistance: false Lazy connection: false SSH Server: Disabled Networks: 10.10.0.0/24 -Forwarding rules: 0 Peers count: 2/2 Connected `, lastConnectionUpdate1, lastHandshake1, lastConnectionUpdate2, lastHandshake2, runtime.GOOS, runtime.GOARCH, overview.CliVersion) @@ -592,7 +591,6 @@ Quantum resistance: false Lazy connection: false SSH Server: Disabled Networks: 10.10.0.0/24 -Forwarding rules: 0 Peers count: 2/2 Connected ` From 8da23daae3aa371d14178ecf1685da8b6f04f926 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Thu, 22 Jan 2026 12:18:46 +0100 Subject: [PATCH 069/374] [management] Fix activity event initiator for user group changes (#5152) --- management/server/user.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/management/server/user.go b/management/server/user.go index 1f38b749f..0a090d681 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -704,7 +704,7 @@ func (am *DefaultAccountManager) prepareUserUpdateEvents(ctx context.Context, ac "is_service_user": oldUser.IsServiceUser, "user_name": oldUser.ServiceUserName, } eventsToStore = append(eventsToStore, func() { - am.StoreEvent(ctx, oldUser.Id, oldUser.Id, accountID, activity.GroupAddedToUser, meta) + am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.GroupAddedToUser, meta) }) } @@ -718,7 +718,7 @@ func (am *DefaultAccountManager) prepareUserUpdateEvents(ctx context.Context, ac "is_service_user": oldUser.IsServiceUser, "user_name": oldUser.ServiceUserName, } eventsToStore = append(eventsToStore, func() { - am.StoreEvent(ctx, oldUser.Id, oldUser.Id, accountID, activity.GroupRemovedFromUser, meta) + am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.GroupRemovedFromUser, meta) }) } @@ -1282,7 +1282,7 @@ func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountI addPeerRemovedEvent() } - meta := map[string]any{"name": targetUserInfo.Name, "email": targetUserInfo.Email, "created_at": targetUser.CreatedAt} + meta := map[string]any{"name": targetUserInfo.Name, "email": targetUserInfo.Email, "created_at": targetUser.CreatedAt, "issued": targetUser.Issued} am.StoreEvent(ctx, initiatorUserID, targetUser.Id, accountID, activity.UserDeleted, meta) return updateAccountPeers, nil From d0221a3e72d63a28baefb7ca760b01cd9bf6612f Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 22 Jan 2026 19:24:12 +0800 Subject: [PATCH 070/374] [client] Add cpu profile to debug bundle (#4700) --- client/cmd/debug.go | 23 +++ client/internal/debug/debug.go | 21 +++ client/proto/daemon.pb.go | 294 +++++++++++++++++++++++++-------- client/proto/daemon.proto | 18 ++ client/proto/daemon_grpc.pb.go | 76 +++++++++ client/server/debug.go | 51 ++++++ client/server/server.go | 4 + client/ui/debug.go | 8 + 8 files changed, 429 insertions(+), 66 deletions(-) diff --git a/client/cmd/debug.go b/client/cmd/debug.go index bbb0ef0d6..e480df4d7 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -219,11 +219,33 @@ func runForDuration(cmd *cobra.Command, args []string) error { time.Sleep(3 * time.Second) + cpuProfilingStarted := false + if _, err := client.StartCPUProfile(cmd.Context(), &proto.StartCPUProfileRequest{}); err != nil { + cmd.PrintErrf("Failed to start CPU profiling: %v\n", err) + } else { + cpuProfilingStarted = true + defer func() { + if cpuProfilingStarted { + if _, err := client.StopCPUProfile(cmd.Context(), &proto.StopCPUProfileRequest{}); err != nil { + cmd.PrintErrf("Failed to stop CPU profiling: %v\n", err) + } + } + }() + } + if waitErr := waitForDurationOrCancel(cmd.Context(), duration, cmd); waitErr != nil { return waitErr } cmd.Println("\nDuration completed") + if cpuProfilingStarted { + if _, err := client.StopCPUProfile(cmd.Context(), &proto.StopCPUProfileRequest{}); err != nil { + cmd.PrintErrf("Failed to stop CPU profiling: %v\n", err) + } else { + cpuProfilingStarted = false + } + } + cmd.Println("Creating debug bundle...") request := &proto.DebugBundleRequest{ @@ -353,6 +375,7 @@ func generateDebugBundle(config *profilemanager.Config, recorder *peer.Status, c StatusRecorder: recorder, SyncResponse: syncResponse, LogPath: logFilePath, + CPUProfile: nil, }, debug.BundleConfig{ IncludeSystemInfo: true, diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index d3b5bc9d4..07a19036a 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -59,6 +59,7 @@ block.prof: Block profiling information. heap.prof: Heap profiling information (snapshot of memory allocations). allocs.prof: Allocations profiling information. threadcreate.prof: Thread creation profiling information. +cpu.prof: CPU profiling information. stack_trace.txt: Complete stack traces of all goroutines at the time of bundle creation. @@ -226,6 +227,7 @@ type BundleGenerator struct { statusRecorder *peer.Status syncResponse *mgmProto.SyncResponse logPath string + cpuProfile []byte anonymize bool includeSystemInfo bool @@ -245,6 +247,7 @@ type GeneratorDependencies struct { StatusRecorder *peer.Status SyncResponse *mgmProto.SyncResponse LogPath string + CPUProfile []byte } func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGenerator { @@ -261,6 +264,7 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen statusRecorder: deps.StatusRecorder, syncResponse: deps.SyncResponse, logPath: deps.LogPath, + cpuProfile: deps.CPUProfile, anonymize: cfg.Anonymize, includeSystemInfo: cfg.IncludeSystemInfo, @@ -324,6 +328,10 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add profiles to debug bundle: %v", err) } + if err := g.addCPUProfile(); err != nil { + log.Errorf("failed to add CPU profile to debug bundle: %v", err) + } + if err := g.addStackTrace(); err != nil { log.Errorf("failed to add stack trace to debug bundle: %v", err) } @@ -542,6 +550,19 @@ func (g *BundleGenerator) addProf() (err error) { return nil } +func (g *BundleGenerator) addCPUProfile() error { + if len(g.cpuProfile) == 0 { + return nil + } + + reader := bytes.NewReader(g.cpuProfile) + if err := g.addFileToZip(reader, "cpu.prof"); err != nil { + return fmt.Errorf("add CPU profile to zip: %w", err) + } + + return nil +} + func (g *BundleGenerator) addStackTrace() error { buf := make([]byte, 5242880) // 5 MB buffer n := runtime.Stack(buf, true) diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 9cbe34e1d..1d9d7233c 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.6 -// protoc v6.33.1 +// protoc v6.32.1 // source: daemon.proto package proto @@ -5364,6 +5364,154 @@ func (x *WaitJWTTokenResponse) GetExpiresIn() int64 { return 0 } +// StartCPUProfileRequest for starting CPU profiling +type StartCPUProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartCPUProfileRequest) Reset() { + *x = StartCPUProfileRequest{} + mi := &file_daemon_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartCPUProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartCPUProfileRequest) ProtoMessage() {} + +func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[79] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartCPUProfileRequest.ProtoReflect.Descriptor instead. +func (*StartCPUProfileRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{79} +} + +// StartCPUProfileResponse confirms CPU profiling has started +type StartCPUProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartCPUProfileResponse) Reset() { + *x = StartCPUProfileResponse{} + mi := &file_daemon_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartCPUProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartCPUProfileResponse) ProtoMessage() {} + +func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[80] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartCPUProfileResponse.ProtoReflect.Descriptor instead. +func (*StartCPUProfileResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{80} +} + +// StopCPUProfileRequest for stopping CPU profiling +type StopCPUProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopCPUProfileRequest) Reset() { + *x = StopCPUProfileRequest{} + mi := &file_daemon_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopCPUProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopCPUProfileRequest) ProtoMessage() {} + +func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[81] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopCPUProfileRequest.ProtoReflect.Descriptor instead. +func (*StopCPUProfileRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{81} +} + +// StopCPUProfileResponse confirms CPU profiling has stopped +type StopCPUProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopCPUProfileResponse) Reset() { + *x = StopCPUProfileResponse{} + mi := &file_daemon_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopCPUProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopCPUProfileResponse) ProtoMessage() {} + +func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[82] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopCPUProfileResponse.ProtoReflect.Descriptor instead. +func (*StopCPUProfileResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{82} +} + type InstallerResultRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -5372,7 +5520,7 @@ type InstallerResultRequest struct { func (x *InstallerResultRequest) Reset() { *x = InstallerResultRequest{} - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5384,7 +5532,7 @@ func (x *InstallerResultRequest) String() string { func (*InstallerResultRequest) ProtoMessage() {} func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5397,7 +5545,7 @@ func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultRequest.ProtoReflect.Descriptor instead. func (*InstallerResultRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{79} + return file_daemon_proto_rawDescGZIP(), []int{83} } type InstallerResultResponse struct { @@ -5410,7 +5558,7 @@ type InstallerResultResponse struct { func (x *InstallerResultResponse) Reset() { *x = InstallerResultResponse{} - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5422,7 +5570,7 @@ func (x *InstallerResultResponse) String() string { func (*InstallerResultResponse) ProtoMessage() {} func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5435,7 +5583,7 @@ func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultResponse.ProtoReflect.Descriptor instead. func (*InstallerResultResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{80} + return file_daemon_proto_rawDescGZIP(), []int{84} } func (x *InstallerResultResponse) GetSuccess() bool { @@ -5462,7 +5610,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5474,7 +5622,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5994,6 +6142,10 @@ const file_daemon_proto_rawDesc = "" + "\x05token\x18\x01 \x01(\tR\x05token\x12\x1c\n" + "\ttokenType\x18\x02 \x01(\tR\ttokenType\x12\x1c\n" + "\texpiresIn\x18\x03 \x01(\x03R\texpiresIn\"\x18\n" + + "\x16StartCPUProfileRequest\"\x19\n" + + "\x17StartCPUProfileResponse\"\x17\n" + + "\x15StopCPUProfileRequest\"\x18\n" + + "\x16StopCPUProfileResponse\"\x18\n" + "\x16InstallerResultRequest\"O\n" + "\x17InstallerResultResponse\x12\x18\n" + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" + @@ -6006,7 +6158,7 @@ const file_daemon_proto_rawDesc = "" + "\x04WARN\x10\x04\x12\b\n" + "\x04INFO\x10\x05\x12\t\n" + "\x05DEBUG\x10\x06\x12\t\n" + - "\x05TRACE\x10\a2\xb4\x13\n" + + "\x05TRACE\x10\a2\xdd\x14\n" + "\rDaemonService\x126\n" + "\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" + "\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" + @@ -6041,7 +6193,9 @@ const file_daemon_proto_rawDesc = "" + "\vGetFeatures\x12\x1a.daemon.GetFeaturesRequest\x1a\x1b.daemon.GetFeaturesResponse\"\x00\x12Z\n" + "\x11GetPeerSSHHostKey\x12 .daemon.GetPeerSSHHostKeyRequest\x1a!.daemon.GetPeerSSHHostKeyResponse\"\x00\x12Q\n" + "\x0eRequestJWTAuth\x12\x1d.daemon.RequestJWTAuthRequest\x1a\x1e.daemon.RequestJWTAuthResponse\"\x00\x12K\n" + - "\fWaitJWTToken\x12\x1b.daemon.WaitJWTTokenRequest\x1a\x1c.daemon.WaitJWTTokenResponse\"\x00\x12N\n" + + "\fWaitJWTToken\x12\x1b.daemon.WaitJWTTokenRequest\x1a\x1c.daemon.WaitJWTTokenResponse\"\x00\x12T\n" + + "\x0fStartCPUProfile\x12\x1e.daemon.StartCPUProfileRequest\x1a\x1f.daemon.StartCPUProfileResponse\"\x00\x12Q\n" + + "\x0eStopCPUProfile\x12\x1d.daemon.StopCPUProfileRequest\x1a\x1e.daemon.StopCPUProfileResponse\"\x00\x12N\n" + "\x11NotifyOSLifecycle\x12\x1a.daemon.OSLifecycleRequest\x1a\x1b.daemon.OSLifecycleResponse\"\x00\x12W\n" + "\x12GetInstallerResult\x12\x1e.daemon.InstallerResultRequest\x1a\x1f.daemon.InstallerResultResponse\"\x00B\bZ\x06/protob\x06proto3" @@ -6058,7 +6212,7 @@ func file_daemon_proto_rawDescGZIP() []byte { } var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 84) +var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 88) var file_daemon_proto_goTypes = []any{ (LogLevel)(0), // 0: daemon.LogLevel (OSLifecycleRequest_CycleType)(0), // 1: daemon.OSLifecycleRequest.CycleType @@ -6143,21 +6297,25 @@ var file_daemon_proto_goTypes = []any{ (*RequestJWTAuthResponse)(nil), // 80: daemon.RequestJWTAuthResponse (*WaitJWTTokenRequest)(nil), // 81: daemon.WaitJWTTokenRequest (*WaitJWTTokenResponse)(nil), // 82: daemon.WaitJWTTokenResponse - (*InstallerResultRequest)(nil), // 83: daemon.InstallerResultRequest - (*InstallerResultResponse)(nil), // 84: daemon.InstallerResultResponse - nil, // 85: daemon.Network.ResolvedIPsEntry - (*PortInfo_Range)(nil), // 86: daemon.PortInfo.Range - nil, // 87: daemon.SystemEvent.MetadataEntry - (*durationpb.Duration)(nil), // 88: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 89: google.protobuf.Timestamp + (*StartCPUProfileRequest)(nil), // 83: daemon.StartCPUProfileRequest + (*StartCPUProfileResponse)(nil), // 84: daemon.StartCPUProfileResponse + (*StopCPUProfileRequest)(nil), // 85: daemon.StopCPUProfileRequest + (*StopCPUProfileResponse)(nil), // 86: daemon.StopCPUProfileResponse + (*InstallerResultRequest)(nil), // 87: daemon.InstallerResultRequest + (*InstallerResultResponse)(nil), // 88: daemon.InstallerResultResponse + nil, // 89: daemon.Network.ResolvedIPsEntry + (*PortInfo_Range)(nil), // 90: daemon.PortInfo.Range + nil, // 91: daemon.SystemEvent.MetadataEntry + (*durationpb.Duration)(nil), // 92: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 93: google.protobuf.Timestamp } var file_daemon_proto_depIdxs = []int32{ 1, // 0: daemon.OSLifecycleRequest.type:type_name -> daemon.OSLifecycleRequest.CycleType - 88, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 92, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration 27, // 2: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus - 89, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp - 89, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp - 88, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration + 93, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp + 93, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp + 92, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration 25, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo 22, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState 21, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState @@ -6168,8 +6326,8 @@ var file_daemon_proto_depIdxs = []int32{ 57, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent 26, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState 33, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 85, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 86, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 89, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 90, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range 34, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo 34, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo 35, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule @@ -6180,10 +6338,10 @@ var file_daemon_proto_depIdxs = []int32{ 54, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage 2, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity 3, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 89, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 87, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 93, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 91, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry 57, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 88, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 92, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration 70, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile 32, // 33: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList 7, // 34: daemon.DaemonService.Login:input_type -> daemon.LoginRequest @@ -6217,43 +6375,47 @@ var file_daemon_proto_depIdxs = []int32{ 77, // 62: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest 79, // 63: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest 81, // 64: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 5, // 65: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest - 83, // 66: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 8, // 67: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 10, // 68: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 12, // 69: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 14, // 70: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 16, // 71: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 18, // 72: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 29, // 73: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 31, // 74: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 31, // 75: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 36, // 76: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 38, // 77: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 40, // 78: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 42, // 79: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 45, // 80: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 47, // 81: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 49, // 82: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 51, // 83: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 55, // 84: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 57, // 85: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 59, // 86: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 61, // 87: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 63, // 88: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 65, // 89: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 67, // 90: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 69, // 91: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 72, // 92: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 74, // 93: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 76, // 94: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 78, // 95: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 80, // 96: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 82, // 97: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 6, // 98: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse - 84, // 99: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 67, // [67:100] is the sub-list for method output_type - 34, // [34:67] is the sub-list for method input_type + 83, // 65: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 85, // 66: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 5, // 67: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest + 87, // 68: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 8, // 69: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 10, // 70: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 12, // 71: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 14, // 72: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 16, // 73: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 18, // 74: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 29, // 75: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 31, // 76: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 31, // 77: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 36, // 78: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 38, // 79: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 40, // 80: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 42, // 81: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 45, // 82: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 47, // 83: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 49, // 84: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 51, // 85: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 55, // 86: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 57, // 87: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 59, // 88: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 61, // 89: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 63, // 90: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 65, // 91: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 67, // 92: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 69, // 93: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 72, // 94: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 74, // 95: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 76, // 96: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 78, // 97: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 80, // 98: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 82, // 99: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 84, // 100: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 86, // 101: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 6, // 102: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse + 88, // 103: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 69, // [69:104] is the sub-list for method output_type + 34, // [34:69] is the sub-list for method input_type 34, // [34:34] is the sub-list for extension type_name 34, // [34:34] is the sub-list for extension extendee 0, // [0:34] is the sub-list for field type_name @@ -6283,7 +6445,7 @@ func file_daemon_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_proto_rawDesc), len(file_daemon_proto_rawDesc)), NumEnums: 4, - NumMessages: 84, + NumMessages: 88, NumExtensions: 0, NumServices: 1, }, diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 7a802d830..68b9a9348 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -94,6 +94,12 @@ service DaemonService { // WaitJWTToken waits for JWT authentication completion rpc WaitJWTToken(WaitJWTTokenRequest) returns (WaitJWTTokenResponse) {} +// StartCPUProfile starts CPU profiling in the daemon + rpc StartCPUProfile(StartCPUProfileRequest) returns (StartCPUProfileResponse) {} + + // StopCPUProfile stops CPU profiling in the daemon + rpc StopCPUProfile(StopCPUProfileRequest) returns (StopCPUProfileResponse) {} + rpc NotifyOSLifecycle(OSLifecycleRequest) returns(OSLifecycleResponse) {} rpc GetInstallerResult(InstallerResultRequest) returns (InstallerResultResponse) {} @@ -776,6 +782,18 @@ message WaitJWTTokenResponse { int64 expiresIn = 3; } +// StartCPUProfileRequest for starting CPU profiling +message StartCPUProfileRequest {} + +// StartCPUProfileResponse confirms CPU profiling has started +message StartCPUProfileResponse {} + +// StopCPUProfileRequest for stopping CPU profiling +message StopCPUProfileRequest {} + +// StopCPUProfileResponse confirms CPU profiling has stopped +message StopCPUProfileResponse {} + message InstallerResultRequest { } diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index fdabb1879..ea9b4df05 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -70,6 +70,10 @@ type DaemonServiceClient interface { RequestJWTAuth(ctx context.Context, in *RequestJWTAuthRequest, opts ...grpc.CallOption) (*RequestJWTAuthResponse, error) // WaitJWTToken waits for JWT authentication completion WaitJWTToken(ctx context.Context, in *WaitJWTTokenRequest, opts ...grpc.CallOption) (*WaitJWTTokenResponse, error) + // StartCPUProfile starts CPU profiling in the daemon + StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) + // StopCPUProfile stops CPU profiling in the daemon + StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) NotifyOSLifecycle(ctx context.Context, in *OSLifecycleRequest, opts ...grpc.CallOption) (*OSLifecycleResponse, error) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) } @@ -384,6 +388,24 @@ func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTToken return out, nil } +func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) { + out := new(StartCPUProfileResponse) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartCPUProfile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) { + out := new(StopCPUProfileResponse) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopCPUProfile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *daemonServiceClient) NotifyOSLifecycle(ctx context.Context, in *OSLifecycleRequest, opts ...grpc.CallOption) (*OSLifecycleResponse, error) { out := new(OSLifecycleResponse) err := c.cc.Invoke(ctx, "/daemon.DaemonService/NotifyOSLifecycle", in, out, opts...) @@ -458,6 +480,10 @@ type DaemonServiceServer interface { RequestJWTAuth(context.Context, *RequestJWTAuthRequest) (*RequestJWTAuthResponse, error) // WaitJWTToken waits for JWT authentication completion WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) + // StartCPUProfile starts CPU profiling in the daemon + StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) + // StopCPUProfile stops CPU profiling in the daemon + StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) NotifyOSLifecycle(context.Context, *OSLifecycleRequest) (*OSLifecycleResponse, error) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) mustEmbedUnimplementedDaemonServiceServer() @@ -560,6 +586,12 @@ func (UnimplementedDaemonServiceServer) RequestJWTAuth(context.Context, *Request func (UnimplementedDaemonServiceServer) WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WaitJWTToken not implemented") } +func (UnimplementedDaemonServiceServer) StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartCPUProfile not implemented") +} +func (UnimplementedDaemonServiceServer) StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopCPUProfile not implemented") +} func (UnimplementedDaemonServiceServer) NotifyOSLifecycle(context.Context, *OSLifecycleRequest) (*OSLifecycleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method NotifyOSLifecycle not implemented") } @@ -1140,6 +1172,42 @@ func _DaemonService_WaitJWTToken_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _DaemonService_StartCPUProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartCPUProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StartCPUProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/daemon.DaemonService/StartCPUProfile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StartCPUProfile(ctx, req.(*StartCPUProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_StopCPUProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopCPUProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StopCPUProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/daemon.DaemonService/StopCPUProfile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StopCPUProfile(ctx, req.(*StopCPUProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _DaemonService_NotifyOSLifecycle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OSLifecycleRequest) if err := dec(in); err != nil { @@ -1303,6 +1371,14 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "WaitJWTToken", Handler: _DaemonService_WaitJWTToken_Handler, }, + { + MethodName: "StartCPUProfile", + Handler: _DaemonService_StartCPUProfile_Handler, + }, + { + MethodName: "StopCPUProfile", + Handler: _DaemonService_StopCPUProfile_Handler, + }, { MethodName: "NotifyOSLifecycle", Handler: _DaemonService_NotifyOSLifecycle_Handler, diff --git a/client/server/debug.go b/client/server/debug.go index 104fd30f4..5646cea79 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -3,9 +3,11 @@ package server import ( + "bytes" "context" "errors" "fmt" + "runtime/pprof" log "github.com/sirupsen/logrus" @@ -24,12 +26,21 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( log.Warnf("failed to get latest sync response: %v", err) } + var cpuProfileData []byte + if s.cpuProfileBuf != nil && !s.cpuProfiling { + cpuProfileData = s.cpuProfileBuf.Bytes() + defer func() { + s.cpuProfileBuf = nil + }() + } + bundleGenerator := debug.NewBundleGenerator( debug.GeneratorDependencies{ InternalConfig: s.config, StatusRecorder: s.statusRecorder, SyncResponse: syncResponse, LogPath: s.logFile, + CPUProfile: cpuProfileData, }, debug.BundleConfig{ Anonymize: req.GetAnonymize(), @@ -109,3 +120,43 @@ func (s *Server) getLatestSyncResponse() (*mgmProto.SyncResponse, error) { return cClient.GetLatestSyncResponse() } + +// StartCPUProfile starts CPU profiling in the daemon. +func (s *Server) StartCPUProfile(_ context.Context, _ *proto.StartCPUProfileRequest) (*proto.StartCPUProfileResponse, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.cpuProfiling { + return nil, fmt.Errorf("CPU profiling already in progress") + } + + s.cpuProfileBuf = &bytes.Buffer{} + s.cpuProfiling = true + if err := pprof.StartCPUProfile(s.cpuProfileBuf); err != nil { + s.cpuProfileBuf = nil + s.cpuProfiling = false + return nil, fmt.Errorf("start CPU profile: %w", err) + } + + log.Info("CPU profiling started") + return &proto.StartCPUProfileResponse{}, nil +} + +// StopCPUProfile stops CPU profiling in the daemon. +func (s *Server) StopCPUProfile(_ context.Context, _ *proto.StopCPUProfileRequest) (*proto.StopCPUProfileResponse, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.cpuProfiling { + return nil, fmt.Errorf("CPU profiling not in progress") + } + + pprof.StopCPUProfile() + s.cpuProfiling = false + + if s.cpuProfileBuf != nil { + log.Infof("CPU profiling stopped, captured %d bytes", s.cpuProfileBuf.Len()) + } + + return &proto.StopCPUProfileResponse{}, nil +} diff --git a/client/server/server.go b/client/server/server.go index 408bd56db..e3c95077a 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1,6 +1,7 @@ package server import ( + "bytes" "context" "errors" "fmt" @@ -77,6 +78,9 @@ type Server struct { persistSyncResponse bool isSessionActive atomic.Bool + cpuProfileBuf *bytes.Buffer + cpuProfiling bool + profileManager *profilemanager.ServiceManager profilesDisabled bool updateSettingsDisabled bool diff --git a/client/ui/debug.go b/client/ui/debug.go index e9bcfde41..29f73a66a 100644 --- a/client/ui/debug.go +++ b/client/ui/debug.go @@ -406,6 +406,10 @@ func (s *serviceClient) configureServiceForDebug( } time.Sleep(time.Second * 3) + if _, err := conn.StartCPUProfile(s.ctx, &proto.StartCPUProfileRequest{}); err != nil { + log.Warnf("failed to start CPU profiling: %v", err) + } + return nil } @@ -428,6 +432,10 @@ func (s *serviceClient) collectDebugData( progress.progressBar.Hide() progress.statusLabel.SetText("Collecting debug data...") + if _, err := conn.StopCPUProfile(s.ctx, &proto.StopCPUProfileRequest{}); err != nil { + log.Warnf("failed to stop CPU profiling: %v", err) + } + return nil } From a1de2b8a986269961226e3997562002f85bc8293 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Thu, 22 Jan 2026 13:01:13 +0100 Subject: [PATCH 071/374] [management] Move activity store encryption to shared crypt package (#5111) --- go.mod | 2 +- go.sum | 4 +- management/server/activity/store/crypt.go | 136 -------- .../server/activity/store/crypt_test.go | 310 ------------------ management/server/activity/store/migration.go | 9 +- .../server/activity/store/migration_test.go | 5 +- management/server/activity/store/sql_store.go | 9 +- .../server/activity/store/sql_store_test.go | 3 +- util/crypt/crypt_test.go | 139 ++++++++ util/crypt/legacy.go | 71 ++++ util/crypt/legacy_test.go | 164 +++++++++ 11 files changed, 392 insertions(+), 460 deletions(-) delete mode 100644 management/server/activity/store/crypt.go delete mode 100644 management/server/activity/store/crypt_test.go create mode 100644 util/crypt/crypt_test.go create mode 100644 util/crypt/legacy.go create mode 100644 util/crypt/legacy_test.go diff --git a/go.mod b/go.mod index cb16fff52..8ac5613ee 100644 --- a/go.mod +++ b/go.mod @@ -68,7 +68,7 @@ require ( github.com/mdlayher/socket v0.5.1 github.com/miekg/dns v1.1.59 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/netbirdio/management-integrations/integrations v0.0.0-20251203183432-d5400f030847 + github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 github.com/oapi-codegen/runtime v1.1.2 github.com/okta/okta-sdk-golang/v2 v2.18.0 diff --git a/go.sum b/go.sum index c59acbb23..6adc7f7e8 100644 --- a/go.sum +++ b/go.sum @@ -406,8 +406,8 @@ github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 h1:TDtJKmM6S github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944/go.mod h1:sHA6TRxjQ6RLbnI+3R4DZo2Eseg/iKiPRfNmcuNySVQ= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51 h1:Ov4qdafATOgGMB1wbSuh+0aAHcwz9hdvB6VZjh1mVMI= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51/go.mod h1:ZSIbPdBn5hePO8CpF1PekH2SfpTxg1PDhEwtbqZS7R8= -github.com/netbirdio/management-integrations/integrations v0.0.0-20251203183432-d5400f030847 h1:V0zsYYMU5d2UN1m9zOLPEZCGWpnhtkYcxQVi9Rrx3bY= -github.com/netbirdio/management-integrations/integrations v0.0.0-20251203183432-d5400f030847/go.mod h1:qzLCKeR253jtsWhfZTt4fyegI5zei32jKZykV+oSQOo= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f h1:CTBf0je/FpKr2lVSMZLak7m8aaWcS6ur4SOfhSSazFI= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f/go.mod h1:y7CxagMYzg9dgu+masRqYM7BQlOGA5Y8US85MCNFPlY= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502 h1:3tHlFmhTdX9axERMVN63dqyFqnvuD+EMJHzM7mNGON8= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 h1:ujgviVYmx243Ksy7NdSwrdGPSRNE3pb8kEDSpH0QuAQ= diff --git a/management/server/activity/store/crypt.go b/management/server/activity/store/crypt.go deleted file mode 100644 index ce97347d4..000000000 --- a/management/server/activity/store/crypt.go +++ /dev/null @@ -1,136 +0,0 @@ -package store - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/base64" - "errors" -) - -var iv = []byte{10, 22, 13, 79, 05, 8, 52, 91, 87, 98, 88, 98, 35, 25, 13, 05} - -type FieldEncrypt struct { - block cipher.Block - gcm cipher.AEAD -} - -func GenerateKey() (string, error) { - key := make([]byte, 32) - _, err := rand.Read(key) - if err != nil { - return "", err - } - readableKey := base64.StdEncoding.EncodeToString(key) - return readableKey, nil -} - -func NewFieldEncrypt(key string) (*FieldEncrypt, error) { - binKey, err := base64.StdEncoding.DecodeString(key) - if err != nil { - return nil, err - } - - block, err := aes.NewCipher(binKey) - if err != nil { - return nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - ec := &FieldEncrypt{ - block: block, - gcm: gcm, - } - - return ec, nil -} - -func (ec *FieldEncrypt) LegacyEncrypt(payload string) string { - plainText := pkcs5Padding([]byte(payload)) - cipherText := make([]byte, len(plainText)) - cbc := cipher.NewCBCEncrypter(ec.block, iv) - cbc.CryptBlocks(cipherText, plainText) - return base64.StdEncoding.EncodeToString(cipherText) -} - -// Encrypt encrypts plaintext using AES-GCM -func (ec *FieldEncrypt) Encrypt(payload string) (string, error) { - plaintext := []byte(payload) - nonceSize := ec.gcm.NonceSize() - - nonce := make([]byte, nonceSize, len(plaintext)+nonceSize+ec.gcm.Overhead()) - if _, err := rand.Read(nonce); err != nil { - return "", err - } - - ciphertext := ec.gcm.Seal(nonce, nonce, plaintext, nil) - - return base64.StdEncoding.EncodeToString(ciphertext), nil -} - -func (ec *FieldEncrypt) LegacyDecrypt(data string) (string, error) { - cipherText, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return "", err - } - cbc := cipher.NewCBCDecrypter(ec.block, iv) - cbc.CryptBlocks(cipherText, cipherText) - payload, err := pkcs5UnPadding(cipherText) - if err != nil { - return "", err - } - - return string(payload), nil -} - -// Decrypt decrypts ciphertext using AES-GCM -func (ec *FieldEncrypt) Decrypt(data string) (string, error) { - cipherText, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return "", err - } - - nonceSize := ec.gcm.NonceSize() - if len(cipherText) < nonceSize { - return "", errors.New("cipher text too short") - } - - nonce, cipherText := cipherText[:nonceSize], cipherText[nonceSize:] - plainText, err := ec.gcm.Open(nil, nonce, cipherText, nil) - if err != nil { - return "", err - } - - return string(plainText), nil -} - -func pkcs5Padding(ciphertext []byte) []byte { - padding := aes.BlockSize - len(ciphertext)%aes.BlockSize - padText := bytes.Repeat([]byte{byte(padding)}, padding) - return append(ciphertext, padText...) -} -func pkcs5UnPadding(src []byte) ([]byte, error) { - srcLen := len(src) - if srcLen == 0 { - return nil, errors.New("input data is empty") - } - - paddingLen := int(src[srcLen-1]) - if paddingLen == 0 || paddingLen > aes.BlockSize || paddingLen > srcLen { - return nil, errors.New("invalid padding size") - } - - // Verify that all padding bytes are the same - for i := 0; i < paddingLen; i++ { - if src[srcLen-1-i] != byte(paddingLen) { - return nil, errors.New("invalid padding") - } - } - - return src[:srcLen-paddingLen], nil -} diff --git a/management/server/activity/store/crypt_test.go b/management/server/activity/store/crypt_test.go deleted file mode 100644 index 700bbcd6b..000000000 --- a/management/server/activity/store/crypt_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package store - -import ( - "bytes" - "testing" -) - -func TestGenerateKey(t *testing.T) { - testData := "exampl@netbird.io" - key, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - ee, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - encrypted, err := ee.Encrypt(testData) - if err != nil { - t.Fatalf("failed to encrypt data: %s", err) - } - - if encrypted == "" { - t.Fatalf("invalid encrypted text") - } - - decrypted, err := ee.Decrypt(encrypted) - if err != nil { - t.Fatalf("failed to decrypt data: %s", err) - } - - if decrypted != testData { - t.Fatalf("decrypted data is not match with test data: %s, %s", testData, decrypted) - } -} - -func TestGenerateKeyLegacy(t *testing.T) { - testData := "exampl@netbird.io" - key, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - ee, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - encrypted := ee.LegacyEncrypt(testData) - if encrypted == "" { - t.Fatalf("invalid encrypted text") - } - - decrypted, err := ee.LegacyDecrypt(encrypted) - if err != nil { - t.Fatalf("failed to decrypt data: %s", err) - } - - if decrypted != testData { - t.Fatalf("decrypted data is not match with test data: %s, %s", testData, decrypted) - } -} - -func TestCorruptKey(t *testing.T) { - testData := "exampl@netbird.io" - key, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - ee, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - encrypted, err := ee.Encrypt(testData) - if err != nil { - t.Fatalf("failed to encrypt data: %s", err) - } - - if encrypted == "" { - t.Fatalf("invalid encrypted text") - } - - newKey, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - - ee, err = NewFieldEncrypt(newKey) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - res, _ := ee.Decrypt(encrypted) - if res == testData { - t.Fatalf("incorrect decryption, the result is: %s", res) - } -} - -func TestEncryptDecrypt(t *testing.T) { - // Generate a key for encryption/decryption - key, err := GenerateKey() - if err != nil { - t.Fatalf("Failed to generate key: %v", err) - } - - // Initialize the FieldEncrypt with the generated key - ec, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("Failed to create FieldEncrypt: %v", err) - } - - // Test cases - testCases := []struct { - name string - input string - }{ - { - name: "Empty String", - input: "", - }, - { - name: "Short String", - input: "Hello", - }, - { - name: "String with Spaces", - input: "Hello, World!", - }, - { - name: "Long String", - input: "The quick brown fox jumps over the lazy dog.", - }, - { - name: "Unicode Characters", - input: "こんにちは世界", - }, - { - name: "Special Characters", - input: "!@#$%^&*()_+-=[]{}|;':\",./<>?", - }, - { - name: "Numeric String", - input: "1234567890", - }, - { - name: "Repeated Characters", - input: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - }, - { - name: "Multi-block String", - input: "This is a longer string that will span multiple blocks in the encryption algorithm.", - }, - { - name: "Non-ASCII and ASCII Mix", - input: "Hello 世界 123", - }, - } - - for _, tc := range testCases { - t.Run(tc.name+" - Legacy", func(t *testing.T) { - // Legacy Encryption - encryptedLegacy := ec.LegacyEncrypt(tc.input) - if encryptedLegacy == "" { - t.Errorf("LegacyEncrypt returned empty string for input '%s'", tc.input) - } - - // Legacy Decryption - decryptedLegacy, err := ec.LegacyDecrypt(encryptedLegacy) - if err != nil { - t.Errorf("LegacyDecrypt failed for input '%s': %v", tc.input, err) - } - - // Verify that the decrypted value matches the original input - if decryptedLegacy != tc.input { - t.Errorf("LegacyDecrypt output '%s' does not match original input '%s'", decryptedLegacy, tc.input) - } - }) - - t.Run(tc.name+" - New", func(t *testing.T) { - // New Encryption - encryptedNew, err := ec.Encrypt(tc.input) - if err != nil { - t.Errorf("Encrypt failed for input '%s': %v", tc.input, err) - } - if encryptedNew == "" { - t.Errorf("Encrypt returned empty string for input '%s'", tc.input) - } - - // New Decryption - decryptedNew, err := ec.Decrypt(encryptedNew) - if err != nil { - t.Errorf("Decrypt failed for input '%s': %v", tc.input, err) - } - - // Verify that the decrypted value matches the original input - if decryptedNew != tc.input { - t.Errorf("Decrypt output '%s' does not match original input '%s'", decryptedNew, tc.input) - } - }) - } -} - -func TestPKCS5UnPadding(t *testing.T) { - tests := []struct { - name string - input []byte - expected []byte - expectError bool - }{ - { - name: "Valid Padding", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{4}, 4)...), - expected: []byte("Hello, World!"), - }, - { - name: "Empty Input", - input: []byte{}, - expectError: true, - }, - { - name: "Padding Length Zero", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{0}, 4)...), - expectError: true, - }, - { - name: "Padding Length Exceeds Block Size", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{17}, 17)...), - expectError: true, - }, - { - name: "Padding Length Exceeds Input Length", - input: []byte{5, 5, 5}, - expectError: true, - }, - { - name: "Invalid Padding Bytes", - input: append([]byte("Hello, World!"), []byte{2, 3, 4, 5}...), - expectError: true, - }, - { - name: "Valid Single Byte Padding", - input: append([]byte("Hello, World!"), byte(1)), - expected: []byte("Hello, World!"), - }, - { - name: "Invalid Mixed Padding Bytes", - input: append([]byte("Hello, World!"), []byte{3, 3, 2}...), - expectError: true, - }, - { - name: "Valid Full Block Padding", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{16}, 16)...), - expected: []byte("Hello, World!"), - }, - { - name: "Non-Padding Byte at End", - input: append([]byte("Hello, World!"), []byte{4, 4, 4, 5}...), - expectError: true, - }, - { - name: "Valid Padding with Different Text Length", - input: append([]byte("Test"), bytes.Repeat([]byte{12}, 12)...), - expected: []byte("Test"), - }, - { - name: "Padding Length Equal to Input Length", - input: bytes.Repeat([]byte{8}, 8), - expected: []byte{}, - }, - { - name: "Invalid Padding Length Zero (Again)", - input: append([]byte("Test"), byte(0)), - expectError: true, - }, - { - name: "Padding Length Greater Than Input", - input: []byte{10}, - expectError: true, - }, - { - name: "Input Length Not Multiple of Block Size", - input: append([]byte("Invalid Length"), byte(1)), - expected: []byte("Invalid Length"), - }, - { - name: "Valid Padding with Non-ASCII Characters", - input: append([]byte("こんにちは"), bytes.Repeat([]byte{2}, 2)...), - expected: []byte("こんにちは"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := pkcs5UnPadding(tt.input) - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got nil") - } - } else { - if err != nil { - t.Errorf("Did not expect error but got: %v", err) - } - if !bytes.Equal(result, tt.expected) { - t.Errorf("Expected output %v, got %v", tt.expected, result) - } - } - }) - } -} diff --git a/management/server/activity/store/migration.go b/management/server/activity/store/migration.go index af19a34eb..d0f165d5f 100644 --- a/management/server/activity/store/migration.go +++ b/management/server/activity/store/migration.go @@ -10,9 +10,10 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/migration" + "github.com/netbirdio/netbird/util/crypt" ) -func migrate(ctx context.Context, crypt *FieldEncrypt, db *gorm.DB) error { +func migrate(ctx context.Context, crypt *crypt.FieldEncrypt, db *gorm.DB) error { migrations := getMigrations(ctx, crypt) for _, m := range migrations { @@ -26,7 +27,7 @@ func migrate(ctx context.Context, crypt *FieldEncrypt, db *gorm.DB) error { type migrationFunc func(*gorm.DB) error -func getMigrations(ctx context.Context, crypt *FieldEncrypt) []migrationFunc { +func getMigrations(ctx context.Context, crypt *crypt.FieldEncrypt) []migrationFunc { return []migrationFunc{ func(db *gorm.DB) error { return migration.MigrateNewField[activity.DeletedUser](ctx, db, "name", "") @@ -45,7 +46,7 @@ func getMigrations(ctx context.Context, crypt *FieldEncrypt) []migrationFunc { // migrateLegacyEncryptedUsersToGCM migrates previously encrypted data using // legacy CBC encryption with a static IV to the new GCM encryption method. -func migrateLegacyEncryptedUsersToGCM(ctx context.Context, db *gorm.DB, crypt *FieldEncrypt) error { +func migrateLegacyEncryptedUsersToGCM(ctx context.Context, db *gorm.DB, crypt *crypt.FieldEncrypt) error { model := &activity.DeletedUser{} if !db.Migrator().HasTable(model) { @@ -80,7 +81,7 @@ func migrateLegacyEncryptedUsersToGCM(ctx context.Context, db *gorm.DB, crypt *F return nil } -func updateDeletedUserData(transaction *gorm.DB, user activity.DeletedUser, crypt *FieldEncrypt) error { +func updateDeletedUserData(transaction *gorm.DB, user activity.DeletedUser, crypt *crypt.FieldEncrypt) error { var err error var decryptedEmail, decryptedName string diff --git a/management/server/activity/store/migration_test.go b/management/server/activity/store/migration_test.go index e3261d9fa..5c6f5ade8 100644 --- a/management/server/activity/store/migration_test.go +++ b/management/server/activity/store/migration_test.go @@ -12,6 +12,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/migration" "github.com/netbirdio/netbird/management/server/testutil" + "github.com/netbirdio/netbird/util/crypt" ) const ( @@ -40,10 +41,10 @@ func setupDatabase(t *testing.T) *gorm.DB { func TestMigrateLegacyEncryptedUsersToGCM(t *testing.T) { db := setupDatabase(t) - key, err := GenerateKey() + key, err := crypt.GenerateKey() require.NoError(t, err, "Failed to generate key") - crypt, err := NewFieldEncrypt(key) + crypt, err := crypt.NewFieldEncrypt(key) require.NoError(t, err, "Failed to initialize FieldEncrypt") t.Run("empty table, no migration required", func(t *testing.T) { diff --git a/management/server/activity/store/sql_store.go b/management/server/activity/store/sql_store.go index ffecb6b8f..db614d0cd 100644 --- a/management/server/activity/store/sql_store.go +++ b/management/server/activity/store/sql_store.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/util/crypt" ) const ( @@ -45,12 +46,12 @@ type eventWithNames struct { // Store is the implementation of the activity.Store interface backed by SQLite type Store struct { db *gorm.DB - fieldEncrypt *FieldEncrypt + fieldEncrypt *crypt.FieldEncrypt } // NewSqlStore creates a new Store with an event table if not exists. func NewSqlStore(ctx context.Context, dataDir string, encryptionKey string) (*Store, error) { - crypt, err := NewFieldEncrypt(encryptionKey) + fieldEncrypt, err := crypt.NewFieldEncrypt(encryptionKey) if err != nil { return nil, err @@ -61,7 +62,7 @@ func NewSqlStore(ctx context.Context, dataDir string, encryptionKey string) (*St return nil, fmt.Errorf("initialize database: %w", err) } - if err = migrate(ctx, crypt, db); err != nil { + if err = migrate(ctx, fieldEncrypt, db); err != nil { return nil, fmt.Errorf("events database migration: %w", err) } @@ -72,7 +73,7 @@ func NewSqlStore(ctx context.Context, dataDir string, encryptionKey string) (*St return &Store{ db: db, - fieldEncrypt: crypt, + fieldEncrypt: fieldEncrypt, }, nil } diff --git a/management/server/activity/store/sql_store_test.go b/management/server/activity/store/sql_store_test.go index 8c0d159df..d723f1623 100644 --- a/management/server/activity/store/sql_store_test.go +++ b/management/server/activity/store/sql_store_test.go @@ -9,11 +9,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/util/crypt" ) func TestNewSqlStore(t *testing.T) { dataDir := t.TempDir() - key, _ := GenerateKey() + key, _ := crypt.GenerateKey() store, err := NewSqlStore(context.Background(), dataDir, key) if err != nil { t.Fatal(err) diff --git a/util/crypt/crypt_test.go b/util/crypt/crypt_test.go new file mode 100644 index 000000000..143a4bbc2 --- /dev/null +++ b/util/crypt/crypt_test.go @@ -0,0 +1,139 @@ +package crypt + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGenerateKey(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + assert.NotEmpty(t, key) + + _, err = NewFieldEncrypt(key) + assert.NoError(t, err) +} + +func TestNewFieldEncrypt_InvalidKey(t *testing.T) { + tests := []struct { + name string + key string + }{ + {name: "invalid base64", key: "not-valid-base64!!!"}, + {name: "too short", key: "c2hvcnQ="}, + {name: "empty", key: ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := NewFieldEncrypt(tt.key) + assert.Error(t, err) + }) + } +} + +func TestEncryptDecrypt(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + testCases := []struct { + name string + input string + }{ + {name: "Empty String", input: ""}, + {name: "Short String", input: "Hello"}, + {name: "String with Spaces", input: "Hello, World!"}, + {name: "Long String", input: "The quick brown fox jumps over the lazy dog."}, + {name: "Unicode Characters", input: "こんにちは世界"}, + {name: "Special Characters", input: "!@#$%^&*()_+-=[]{}|;':\",./<>?"}, + {name: "Numeric String", input: "1234567890"}, + {name: "Email Address", input: "user@example.com"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + encrypted, err := ec.Encrypt(tc.input) + require.NoError(t, err) + + decrypted, err := ec.Decrypt(encrypted) + require.NoError(t, err) + + assert.Equal(t, tc.input, decrypted) + }) + } +} + +func TestEncrypt_DifferentCiphertexts(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + plaintext := "same plaintext" + + // Encrypt the same plaintext multiple times + encrypted1, err := ec.Encrypt(plaintext) + require.NoError(t, err) + + encrypted2, err := ec.Encrypt(plaintext) + require.NoError(t, err) + + assert.NotEqual(t, encrypted1, encrypted2, "expected different ciphertexts for same plaintext (random nonce)") + + // Both should decrypt to the same plaintext + decrypted1, err := ec.Decrypt(encrypted1) + require.NoError(t, err) + + decrypted2, err := ec.Decrypt(encrypted2) + require.NoError(t, err) + + assert.Equal(t, plaintext, decrypted1) + assert.Equal(t, plaintext, decrypted2) +} + +func TestDecrypt_InvalidCiphertext(t *testing.T) { + key, err := GenerateKey() + assert.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + assert.NoError(t, err) + + tests := []struct { + name string + ciphertext string + }{ + {name: "invalid base64", ciphertext: "not-valid!!!"}, + {name: "too short", ciphertext: "c2hvcnQ="}, + {name: "corrupted", ciphertext: "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo="}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + payload, err := ec.Decrypt(tt.ciphertext) + assert.Error(t, err) + assert.Empty(t, payload) + }) + } +} + +func TestDecrypt_WrongKey(t *testing.T) { + key1, _ := GenerateKey() + key2, _ := GenerateKey() + + ec1, _ := NewFieldEncrypt(key1) + ec2, _ := NewFieldEncrypt(key2) + + plaintext := "secret data" + encrypted, _ := ec1.Encrypt(plaintext) + + // Try to decrypt with wrong key + payload, err := ec2.Decrypt(encrypted) + assert.Error(t, err) + assert.Empty(t, payload) +} diff --git a/util/crypt/legacy.go b/util/crypt/legacy.go new file mode 100644 index 000000000..f84e6964f --- /dev/null +++ b/util/crypt/legacy.go @@ -0,0 +1,71 @@ +package crypt + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "fmt" +) + +// legacyIV is the static IV used by the legacy CBC encryption. +// Deprecated: This is kept only for backward compatibility with existing encrypted data. +var legacyIV = []byte{10, 22, 13, 79, 05, 8, 52, 91, 87, 98, 88, 98, 35, 25, 13, 05} + +// LegacyEncrypt encrypts plaintext using AES-CBC with a static IV. +// Deprecated: Use Encrypt instead. This method is kept only for backward compatibility. +func (f *FieldEncrypt) LegacyEncrypt(plaintext string) string { + padded := pkcs5Padding([]byte(plaintext)) + ciphertext := make([]byte, len(padded)) + cbc := cipher.NewCBCEncrypter(f.block, legacyIV) + cbc.CryptBlocks(ciphertext, padded) + return base64.StdEncoding.EncodeToString(ciphertext) +} + +// LegacyDecrypt decrypts ciphertext that was encrypted using AES-CBC with a static IV. +// Deprecated: This method is kept only for backward compatibility with existing encrypted data. +func (f *FieldEncrypt) LegacyDecrypt(ciphertext string) (string, error) { + data, err := base64.StdEncoding.DecodeString(ciphertext) + if err != nil { + return "", fmt.Errorf("decode ciphertext: %w", err) + } + + cbc := cipher.NewCBCDecrypter(f.block, legacyIV) + cbc.CryptBlocks(data, data) + + plaintext, err := pkcs5UnPadding(data) + if err != nil { + return "", fmt.Errorf("unpad plaintext: %w", err) + } + + return string(plaintext), nil +} + +// pkcs5Padding adds PKCS#5 padding to the input. +func pkcs5Padding(data []byte) []byte { + padding := aes.BlockSize - len(data)%aes.BlockSize + padText := bytes.Repeat([]byte{byte(padding)}, padding) + return append(data, padText...) +} + +// pkcs5UnPadding removes PKCS#5 padding from the input. +func pkcs5UnPadding(data []byte) ([]byte, error) { + length := len(data) + if length == 0 { + return nil, fmt.Errorf("input data is empty") + } + + paddingLen := int(data[length-1]) + if paddingLen == 0 || paddingLen > aes.BlockSize || paddingLen > length { + return nil, fmt.Errorf("invalid padding size") + } + + // Verify that all padding bytes are the same + for i := 0; i < paddingLen; i++ { + if data[length-1-i] != byte(paddingLen) { + return nil, fmt.Errorf("invalid padding") + } + } + + return data[:length-paddingLen], nil +} diff --git a/util/crypt/legacy_test.go b/util/crypt/legacy_test.go new file mode 100644 index 000000000..09b75a71f --- /dev/null +++ b/util/crypt/legacy_test.go @@ -0,0 +1,164 @@ +package crypt + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLegacyEncryptDecrypt(t *testing.T) { + testData := "exampl@netbird.io" + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + encrypted := ec.LegacyEncrypt(testData) + assert.NotEmpty(t, encrypted) + + decrypted, err := ec.LegacyDecrypt(encrypted) + require.NoError(t, err) + + assert.Equal(t, testData, decrypted) +} + +func TestLegacyEncryptDecryptVariousInputs(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + testCases := []struct { + name string + input string + }{ + {name: "Empty String", input: ""}, + {name: "Short String", input: "Hello"}, + {name: "String with Spaces", input: "Hello, World!"}, + {name: "Long String", input: "The quick brown fox jumps over the lazy dog."}, + {name: "Unicode Characters", input: "こんにちは世界"}, + {name: "Special Characters", input: "!@#$%^&*()_+-=[]{}|;':\",./<>?"}, + {name: "Numeric String", input: "1234567890"}, + {name: "Repeated Characters", input: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {name: "Multi-block String", input: "This is a longer string that will span multiple blocks in the encryption algorithm."}, + {name: "Non-ASCII and ASCII Mix", input: "Hello 世界 123"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + encrypted := ec.LegacyEncrypt(tc.input) + assert.NotEmpty(t, encrypted) + + decrypted, err := ec.LegacyDecrypt(encrypted) + require.NoError(t, err) + + assert.Equal(t, tc.input, decrypted) + }) + } +} + +func TestPKCS5UnPadding(t *testing.T) { + tests := []struct { + name string + input []byte + expected []byte + expectError bool + }{ + { + name: "Valid Padding", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{4}, 4)...), + expected: []byte("Hello, World!"), + }, + { + name: "Empty Input", + input: []byte{}, + expectError: true, + }, + { + name: "Padding Length Zero", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{0}, 4)...), + expectError: true, + }, + { + name: "Padding Length Exceeds Block Size", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{17}, 17)...), + expectError: true, + }, + { + name: "Padding Length Exceeds Input Length", + input: []byte{5, 5, 5}, + expectError: true, + }, + { + name: "Invalid Padding Bytes", + input: append([]byte("Hello, World!"), []byte{2, 3, 4, 5}...), + expectError: true, + }, + { + name: "Valid Single Byte Padding", + input: append([]byte("Hello, World!"), byte(1)), + expected: []byte("Hello, World!"), + }, + { + name: "Invalid Mixed Padding Bytes", + input: append([]byte("Hello, World!"), []byte{3, 3, 2}...), + expectError: true, + }, + { + name: "Valid Full Block Padding", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{16}, 16)...), + expected: []byte("Hello, World!"), + }, + { + name: "Non-Padding Byte at End", + input: append([]byte("Hello, World!"), []byte{4, 4, 4, 5}...), + expectError: true, + }, + { + name: "Valid Padding with Different Text Length", + input: append([]byte("Test"), bytes.Repeat([]byte{12}, 12)...), + expected: []byte("Test"), + }, + { + name: "Padding Length Equal to Input Length", + input: bytes.Repeat([]byte{8}, 8), + expected: []byte{}, + }, + { + name: "Invalid Padding Length Zero (Again)", + input: append([]byte("Test"), byte(0)), + expectError: true, + }, + { + name: "Padding Length Greater Than Input", + input: []byte{10}, + expectError: true, + }, + { + name: "Input Length Not Multiple of Block Size", + input: append([]byte("Invalid Length"), byte(1)), + expected: []byte("Invalid Length"), + }, + { + name: "Valid Padding with Non-ASCII Characters", + input: append([]byte("こんにちは"), bytes.Repeat([]byte{2}, 2)...), + expected: []byte("こんにちは"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := pkcs5UnPadding(tt.input) + if tt.expectError { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} From 269d5d1cbab7f67b289b885a3ed9d3493631ef16 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 23 Jan 2026 18:59:52 +0800 Subject: [PATCH 072/374] [client] Try next DNS upstream on SERVFAIL/REFUSED responses (#5163) --- client/internal/dns/local/local.go | 5 +- client/internal/dns/upstream.go | 84 +++++--- client/internal/dns/upstream_test.go | 284 +++++++++++++++++++++++++++ 3 files changed, 346 insertions(+), 27 deletions(-) diff --git a/client/internal/dns/local/local.go b/client/internal/dns/local/local.go index cbdc64997..b374bcc6a 100644 --- a/client/internal/dns/local/local.go +++ b/client/internal/dns/local/local.go @@ -81,7 +81,10 @@ func (d *Resolver) ProbeAvailability() {} // ServeDNS handles a DNS request func (d *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { - logger := log.WithField("request_id", resutil.GetRequestID(w)) + logger := log.WithFields(log.Fields{ + "request_id": resutil.GetRequestID(w), + "dns_id": fmt.Sprintf("%04x", r.Id), + }) if len(r.Question) == 0 { logger.Debug("received local resolver request with no question") diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 654d280ef..0fbd32771 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -71,6 +71,11 @@ type upstreamResolverBase struct { statusRecorder *peer.Status } +type upstreamFailure struct { + upstream netip.AddrPort + reason string +} + func newUpstreamResolverBase(ctx context.Context, statusRecorder *peer.Status, domain string) *upstreamResolverBase { ctx, cancel := context.WithCancel(ctx) @@ -114,7 +119,10 @@ func (u *upstreamResolverBase) Stop() { // ServeDNS handles a DNS request func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { - logger := log.WithField("request_id", resutil.GetRequestID(w)) + logger := log.WithFields(log.Fields{ + "request_id": resutil.GetRequestID(w), + "dns_id": fmt.Sprintf("%04x", r.Id), + }) u.prepareRequest(r) @@ -123,11 +131,13 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { return } - if u.tryUpstreamServers(w, r, logger) { - return + ok, failures := u.tryUpstreamServers(w, r, logger) + if len(failures) > 0 { + u.logUpstreamFailures(r.Question[0].Name, failures, ok, logger) + } + if !ok { + u.writeErrorResponse(w, r, logger) } - - u.writeErrorResponse(w, r, logger) } func (u *upstreamResolverBase) prepareRequest(r *dns.Msg) { @@ -136,7 +146,7 @@ func (u *upstreamResolverBase) prepareRequest(r *dns.Msg) { } } -func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) bool { +func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) { timeout := u.upstreamTimeout if len(u.upstreamServers) > 1 { maxTotal := 5 * time.Second @@ -149,15 +159,19 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M } } + var failures []upstreamFailure for _, upstream := range u.upstreamServers { - if u.queryUpstream(w, r, upstream, timeout, logger) { - return true + if failure := u.queryUpstream(w, r, upstream, timeout, logger); failure != nil { + failures = append(failures, *failure) + } else { + return true, failures } } - return false + return false, failures } -func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) bool { +// queryUpstream queries a single upstream server. Returns nil on success, or failure info to try next upstream. +func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure { var rm *dns.Msg var t time.Duration var err error @@ -171,31 +185,32 @@ func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, u }() if err != nil { - u.handleUpstreamError(err, upstream, r.Question[0].Name, startTime, timeout, logger) - return false + return u.handleUpstreamError(err, upstream, startTime) } if rm == nil || !rm.Response { - logger.Warnf("no response from upstream %s for question domain=%s", upstream, r.Question[0].Name) - return false + return &upstreamFailure{upstream: upstream, reason: "no response"} } - return u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, logger) + if rm.Rcode == dns.RcodeServerFailure || rm.Rcode == dns.RcodeRefused { + return &upstreamFailure{upstream: upstream, reason: dns.RcodeToString[rm.Rcode]} + } + + u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, logger) + return nil } -func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.AddrPort, domain string, startTime time.Time, timeout time.Duration, logger *log.Entry) { +func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.AddrPort, startTime time.Time) *upstreamFailure { if !errors.Is(err, context.DeadlineExceeded) && !isTimeout(err) { - logger.Warnf("failed to query upstream %s for question domain=%s: %s", upstream, domain, err) - return + return &upstreamFailure{upstream: upstream, reason: err.Error()} } elapsed := time.Since(startTime) - timeoutMsg := fmt.Sprintf("upstream %s timed out for question domain=%s after %v (timeout=%v)", upstream, domain, elapsed.Truncate(time.Millisecond), timeout) + reason := fmt.Sprintf("timeout after %v", elapsed.Truncate(time.Millisecond)) if peerInfo := u.debugUpstreamTimeout(upstream); peerInfo != "" { - timeoutMsg += " " + peerInfo + reason += " " + peerInfo } - timeoutMsg += fmt.Sprintf(" - error: %v", err) - logger.Warn(timeoutMsg) + return &upstreamFailure{upstream: upstream, reason: reason} } func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, logger *log.Entry) bool { @@ -215,16 +230,34 @@ func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dn return true } -func (u *upstreamResolverBase) writeErrorResponse(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) { - logger.Errorf("all queries to the %s failed for question domain=%s", u, r.Question[0].Name) +func (u *upstreamResolverBase) logUpstreamFailures(domain string, failures []upstreamFailure, succeeded bool, logger *log.Entry) { + totalUpstreams := len(u.upstreamServers) + failedCount := len(failures) + failureSummary := formatFailures(failures) + if succeeded { + logger.Warnf("%d/%d upstreams failed for domain=%s: %s", failedCount, totalUpstreams, domain, failureSummary) + } else { + logger.Errorf("%d/%d upstreams failed for domain=%s: %s", failedCount, totalUpstreams, domain, failureSummary) + } +} + +func (u *upstreamResolverBase) writeErrorResponse(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) { m := new(dns.Msg) m.SetRcode(r, dns.RcodeServerFailure) if err := w.WriteMsg(m); err != nil { - logger.Errorf("failed to write error response for %s for question domain=%s: %s", u, r.Question[0].Name, err) + logger.Errorf("write error response for domain=%s: %s", r.Question[0].Name, err) } } +func formatFailures(failures []upstreamFailure) string { + parts := make([]string, 0, len(failures)) + for _, f := range failures { + parts = append(parts, fmt.Sprintf("%s=%s", f.upstream, f.reason)) + } + return strings.Join(parts, ", ") +} + // ProbeAvailability tests all upstream servers simultaneously and // disables the resolver if none work func (u *upstreamResolverBase) ProbeAvailability() { @@ -468,7 +501,6 @@ func netstackExchange(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upst return reply, nil } - // FormatPeerStatus formats peer connection status information for debugging DNS timeouts func FormatPeerStatus(peerState *peer.State) string { isConnected := peerState.ConnStatus == peer.StatusConnected diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go index 2852f4775..8b06e4475 100644 --- a/client/internal/dns/upstream_test.go +++ b/client/internal/dns/upstream_test.go @@ -2,6 +2,7 @@ package dns import ( "context" + "fmt" "net" "net/netip" "strings" @@ -9,6 +10,8 @@ import ( "time" "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.zx2c4.com/wireguard/tun/netstack" "github.com/netbirdio/netbird/client/iface/device" @@ -140,6 +143,23 @@ func (c mockUpstreamResolver) exchange(_ context.Context, _ string, _ *dns.Msg) return c.r, c.rtt, c.err } +type mockUpstreamResponse struct { + msg *dns.Msg + err error +} + +type mockUpstreamResolverPerServer struct { + responses map[string]mockUpstreamResponse + rtt time.Duration +} + +func (c mockUpstreamResolverPerServer) exchange(_ context.Context, upstream string, _ *dns.Msg) (*dns.Msg, time.Duration, error) { + if r, ok := c.responses[upstream]; ok { + return r.msg, c.rtt, r.err + } + return nil, c.rtt, fmt.Errorf("no mock response for %s", upstream) +} + func TestUpstreamResolver_DeactivationReactivation(t *testing.T) { mockClient := &mockUpstreamResolver{ err: dns.ErrTime, @@ -191,3 +211,267 @@ func TestUpstreamResolver_DeactivationReactivation(t *testing.T) { t.Errorf("should be enabled") } } + +func TestUpstreamResolver_Failover(t *testing.T) { + upstream1 := netip.MustParseAddrPort("192.0.2.1:53") + upstream2 := netip.MustParseAddrPort("192.0.2.2:53") + + successAnswer := "192.0.2.100" + timeoutErr := &net.OpError{Op: "read", Err: fmt.Errorf("i/o timeout")} + + testCases := []struct { + name string + upstream1 mockUpstreamResponse + upstream2 mockUpstreamResponse + expectedRcode int + expectAnswer bool + expectTrySecond bool + }{ + { + name: "success on first upstream", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: false, + }, + { + name: "SERVFAIL from first should try second", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "REFUSED from first should try second", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeRefused, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "NXDOMAIN from first should NOT try second", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeNameError, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeNameError, + expectAnswer: false, + expectTrySecond: false, + }, + { + name: "timeout from first should try second", + upstream1: mockUpstreamResponse{err: timeoutErr}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "no response from first should try second", + upstream1: mockUpstreamResponse{msg: nil}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "both upstreams return SERVFAIL", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "both upstreams timeout", + upstream1: mockUpstreamResponse{err: timeoutErr}, + upstream2: mockUpstreamResponse{err: timeoutErr}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "first SERVFAIL then timeout", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + upstream2: mockUpstreamResponse{err: timeoutErr}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "first timeout then SERVFAIL", + upstream1: mockUpstreamResponse{err: timeoutErr}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "first REFUSED then SERVFAIL", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeRefused, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var queriedUpstreams []string + mockClient := &mockUpstreamResolverPerServer{ + responses: map[string]mockUpstreamResponse{ + upstream1.String(): tc.upstream1, + upstream2.String(): tc.upstream2, + }, + rtt: time.Millisecond, + } + + trackingClient := &trackingMockClient{ + inner: mockClient, + queriedUpstreams: &queriedUpstreams, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resolver := &upstreamResolverBase{ + ctx: ctx, + upstreamClient: trackingClient, + upstreamServers: []netip.AddrPort{upstream1, upstream2}, + upstreamTimeout: UpstreamTimeout, + } + + var responseMSG *dns.Msg + responseWriter := &test.MockResponseWriter{ + WriteMsgFunc: func(m *dns.Msg) error { + responseMSG = m + return nil + }, + } + + inputMSG := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + resolver.ServeDNS(responseWriter, inputMSG) + + require.NotNil(t, responseMSG, "should write a response") + assert.Equal(t, tc.expectedRcode, responseMSG.Rcode, "unexpected rcode") + + if tc.expectAnswer { + require.NotEmpty(t, responseMSG.Answer, "expected answer records") + assert.Contains(t, responseMSG.Answer[0].String(), successAnswer) + } + + if tc.expectTrySecond { + assert.Len(t, queriedUpstreams, 2, "should have tried both upstreams") + assert.Equal(t, upstream1.String(), queriedUpstreams[0]) + assert.Equal(t, upstream2.String(), queriedUpstreams[1]) + } else { + assert.Len(t, queriedUpstreams, 1, "should have only tried first upstream") + assert.Equal(t, upstream1.String(), queriedUpstreams[0]) + } + }) + } +} + +type trackingMockClient struct { + inner *mockUpstreamResolverPerServer + queriedUpstreams *[]string +} + +func (t *trackingMockClient) exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error) { + *t.queriedUpstreams = append(*t.queriedUpstreams, upstream) + return t.inner.exchange(ctx, upstream, r) +} + +func buildMockResponse(rcode int, answer string) *dns.Msg { + m := new(dns.Msg) + m.Response = true + m.Rcode = rcode + + if rcode == dns.RcodeSuccess && answer != "" { + m.Answer = []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{ + Name: "example.com.", + Rrtype: dns.TypeA, + Class: dns.ClassINET, + Ttl: 300, + }, + A: net.ParseIP(answer), + }, + } + } + return m +} + +func TestUpstreamResolver_SingleUpstreamFailure(t *testing.T) { + upstream := netip.MustParseAddrPort("192.0.2.1:53") + + mockClient := &mockUpstreamResolverPerServer{ + responses: map[string]mockUpstreamResponse{ + upstream.String(): {msg: buildMockResponse(dns.RcodeServerFailure, "")}, + }, + rtt: time.Millisecond, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resolver := &upstreamResolverBase{ + ctx: ctx, + upstreamClient: mockClient, + upstreamServers: []netip.AddrPort{upstream}, + upstreamTimeout: UpstreamTimeout, + } + + var responseMSG *dns.Msg + responseWriter := &test.MockResponseWriter{ + WriteMsgFunc: func(m *dns.Msg) error { + responseMSG = m + return nil + }, + } + + inputMSG := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + resolver.ServeDNS(responseWriter, inputMSG) + + require.NotNil(t, responseMSG, "should write a response") + assert.Equal(t, dns.RcodeServerFailure, responseMSG.Rcode, "single upstream SERVFAIL should return SERVFAIL") +} + +func TestFormatFailures(t *testing.T) { + testCases := []struct { + name string + failures []upstreamFailure + expected string + }{ + { + name: "empty slice", + failures: []upstreamFailure{}, + expected: "", + }, + { + name: "single failure", + failures: []upstreamFailure{ + {upstream: netip.MustParseAddrPort("8.8.8.8:53"), reason: "SERVFAIL"}, + }, + expected: "8.8.8.8:53=SERVFAIL", + }, + { + name: "multiple failures", + failures: []upstreamFailure{ + {upstream: netip.MustParseAddrPort("8.8.8.8:53"), reason: "SERVFAIL"}, + {upstream: netip.MustParseAddrPort("8.8.4.4:53"), reason: "timeout after 2s"}, + }, + expected: "8.8.8.8:53=SERVFAIL, 8.8.4.4:53=timeout after 2s", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := formatFailures(tc.failures) + assert.Equal(t, tc.expected, result) + }) + } +} From 1a32e4c223acc3515e4a5dcf79dd4dadf8e2f60d Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 23 Jan 2026 22:15:34 +0800 Subject: [PATCH 073/374] [client] Fix IPv4-only in bind proxy (#5154) --- client/iface/wgproxy/bind/proxy.go | 23 ++++++++++++++++++----- client/iface/wgproxy/ebpf/wrapper.go | 4 +++- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/client/iface/wgproxy/bind/proxy.go b/client/iface/wgproxy/bind/proxy.go index eb585d8a2..9978cceee 100644 --- a/client/iface/wgproxy/bind/proxy.go +++ b/client/iface/wgproxy/bind/proxy.go @@ -117,16 +117,29 @@ func (p *ProxyBind) RedirectAs(endpoint *net.UDPAddr) { p.pausedCond.L.Lock() p.paused = false - p.wgCurrentUsed = addrToEndpoint(endpoint) + ep, err := addrToEndpoint(endpoint) + if err != nil { + log.Errorf("failed to convert endpoint address: %v", err) + } else { + p.wgCurrentUsed = ep + } p.pausedCond.Signal() p.pausedCond.L.Unlock() } -func addrToEndpoint(addr *net.UDPAddr) *bind.Endpoint { - ip, _ := netip.AddrFromSlice(addr.IP.To4()) - addrPort := netip.AddrPortFrom(ip, uint16(addr.Port)) - return &bind.Endpoint{AddrPort: addrPort} +func addrToEndpoint(addr *net.UDPAddr) (*bind.Endpoint, error) { + if addr == nil { + return nil, errors.New("nil address") + } + + ip, ok := netip.AddrFromSlice(addr.IP) + if !ok { + return nil, fmt.Errorf("convert %s to netip.Addr", addr) + } + + addrPort := netip.AddrPortFrom(ip.Unmap(), uint16(addr.Port)) + return &bind.Endpoint{AddrPort: addrPort}, nil } func (p *ProxyBind) CloseConn() error { diff --git a/client/iface/wgproxy/ebpf/wrapper.go b/client/iface/wgproxy/ebpf/wrapper.go index ff44d30c0..f1f05a7c9 100644 --- a/client/iface/wgproxy/ebpf/wrapper.go +++ b/client/iface/wgproxy/ebpf/wrapper.go @@ -94,7 +94,9 @@ func (p *ProxyWrapper) RedirectAs(endpoint *net.UDPAddr) { p.pausedCond.L.Lock() p.paused = false - p.wgEndpointCurrentUsedAddr = endpoint + if endpoint != nil && endpoint.IP != nil { + p.wgEndpointCurrentUsedAddr = endpoint + } p.pausedCond.Signal() p.pausedCond.L.Unlock() From ee3a67d2d8007dd380ad9e0f772f07863ab9ff7d Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 23 Jan 2026 17:06:07 +0100 Subject: [PATCH 074/374] [client] Fix/health result in bundle (#5164) * Add support for optional status refresh callback during debug bundle generation * Always update wg status * Remove duplicated wg status call --- client/internal/debug/debug.go | 7 +++++++ client/internal/engine.go | 24 ++++++------------------ client/internal/peer/status.go | 32 ++++++++++++++++++++++++++++++++ client/server/debug.go | 13 +++++++++++++ client/server/server.go | 4 ++++ 5 files changed, 62 insertions(+), 18 deletions(-) diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 07a19036a..0f8243e7a 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -228,6 +228,7 @@ type BundleGenerator struct { syncResponse *mgmProto.SyncResponse logPath string cpuProfile []byte + refreshStatus func() // Optional callback to refresh status before bundle generation anonymize bool includeSystemInfo bool @@ -248,6 +249,7 @@ type GeneratorDependencies struct { SyncResponse *mgmProto.SyncResponse LogPath string CPUProfile []byte + RefreshStatus func() // Optional callback to refresh status before bundle generation } func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGenerator { @@ -265,6 +267,7 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen syncResponse: deps.SyncResponse, logPath: deps.LogPath, cpuProfile: deps.CPUProfile, + refreshStatus: deps.RefreshStatus, anonymize: cfg.Anonymize, includeSystemInfo: cfg.IncludeSystemInfo, @@ -408,6 +411,10 @@ func (g *BundleGenerator) addStatus() error { profName = activeProf.Name } + if g.refreshStatus != nil { + g.refreshStatus() + } + fullStatus := g.statusRecorder.GetFullStatus() protoFullStatus := nbstatus.ToProtoFullStatus(fullStatus) protoFullStatus.Events = g.statusRecorder.GetEventHistory() diff --git a/client/internal/engine.go b/client/internal/engine.go index 25a4e4048..a391ba22a 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1050,6 +1050,9 @@ func (e *Engine) handleBundle(params *mgmProto.BundleParameters) (*mgmProto.JobR StatusRecorder: e.statusRecorder, SyncResponse: syncResponse, LogPath: e.config.LogPath, + RefreshStatus: func() { + e.RunHealthProbes(true) + }, } bundleJobParams := debug.BundleConfig{ @@ -1827,7 +1830,7 @@ func (e *Engine) getRosenpassAddr() string { return "" } -// RunHealthProbes executes health checks for Signal, Management, Relay and WireGuard services +// RunHealthProbes executes health checks for Signal, Management, Relay, and WireGuard services // and updates the status recorder with the latest states. func (e *Engine) RunHealthProbes(waitForResult bool) bool { e.syncMsgMux.Lock() @@ -1841,23 +1844,8 @@ func (e *Engine) RunHealthProbes(waitForResult bool) bool { stuns := slices.Clone(e.STUNs) turns := slices.Clone(e.TURNs) - if e.wgInterface != nil { - stats, err := e.wgInterface.GetStats() - if err != nil { - log.Warnf("failed to get wireguard stats: %v", err) - e.syncMsgMux.Unlock() - return false - } - for _, key := range e.peerStore.PeersPubKey() { - // wgStats could be zero value, in which case we just reset the stats - wgStats, ok := stats[key] - if !ok { - continue - } - if err := e.statusRecorder.UpdateWireGuardPeerState(key, wgStats); err != nil { - log.Debugf("failed to update wg stats for peer %s: %s", key, err) - } - } + if err := e.statusRecorder.RefreshWireGuardStats(); err != nil { + log.Debugf("failed to refresh WireGuard stats: %v", err) } e.syncMsgMux.Unlock() diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index 697bda2ff..abedc208e 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -1145,6 +1145,38 @@ func (d *Status) PeersStatus() (*configurer.Stats, error) { return d.wgIface.FullStats() } +// RefreshWireGuardStats fetches fresh WireGuard statistics from the interface +// and updates the cached peer states. This ensures accurate handshake times and +// transfer statistics in status reports without running full health probes. +func (d *Status) RefreshWireGuardStats() error { + d.mux.Lock() + defer d.mux.Unlock() + + if d.wgIface == nil { + return nil // silently skip if interface not set + } + + stats, err := d.wgIface.FullStats() + if err != nil { + return fmt.Errorf("get wireguard stats: %w", err) + } + + // Update each peer's WireGuard statistics + for _, peerStats := range stats.Peers { + peerState, ok := d.peers[peerStats.PublicKey] + if !ok { + continue + } + + peerState.LastWireguardHandshake = peerStats.LastHandshake + peerState.BytesRx = peerStats.RxBytes + peerState.BytesTx = peerStats.TxBytes + d.peers[peerStats.PublicKey] = peerState + } + + return nil +} + type EventQueue struct { maxSize int events []*proto.SystemEvent diff --git a/client/server/debug.go b/client/server/debug.go index 5646cea79..4c531efba 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -34,6 +34,18 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( }() } + // Prepare refresh callback for health probes + var refreshStatus func() + if s.connectClient != nil { + engine := s.connectClient.Engine() + if engine != nil { + refreshStatus = func() { + log.Debug("refreshing system health status for debug bundle") + engine.RunHealthProbes(true) + } + } + } + bundleGenerator := debug.NewBundleGenerator( debug.GeneratorDependencies{ InternalConfig: s.config, @@ -41,6 +53,7 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( SyncResponse: syncResponse, LogPath: s.logFile, CPUProfile: cpuProfileData, + RefreshStatus: refreshStatus, }, debug.BundleConfig{ Anonymize: req.GetAnonymize(), diff --git a/client/server/server.go b/client/server/server.go index e3c95077a..b291d7f71 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1327,6 +1327,10 @@ func (s *Server) runProbes(waitForProbeResult bool) { if engine.RunHealthProbes(waitForProbeResult) { s.lastProbe = time.Now() } + } else { + if err := s.statusRecorder.RefreshWireGuardStats(); err != nil { + log.Debugf("failed to refresh WireGuard stats: %v", err) + } } } From 737d6061bffe8a748c2317c1f8b9469783c130b1 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Fri, 23 Jan 2026 18:05:22 +0100 Subject: [PATCH 075/374] [management] ephemeral peers track on login (#5165) --- .../network_map/controller/controller.go | 4 ++++ .../controllers/network_map/interface.go | 2 ++ .../controllers/network_map/interface_mock.go | 16 ++++++++++++++-- management/server/peer.go | 5 +++++ 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index d46737c26..5ae64e9f1 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -856,3 +856,7 @@ func (c *Controller) GetNetworkMap(ctx context.Context, peerID string) (*types.N func (c *Controller) DisconnectPeers(ctx context.Context, accountId string, peerIDs []string) { c.peersUpdateManager.CloseChannels(ctx, peerIDs) } + +func (c *Controller) TrackEphemeralPeer(ctx context.Context, peer *nbpeer.Peer) { + c.EphemeralPeersManager.OnPeerDisconnected(ctx, peer) +} diff --git a/management/internals/controllers/network_map/interface.go b/management/internals/controllers/network_map/interface.go index b1de7d017..64caac861 100644 --- a/management/internals/controllers/network_map/interface.go +++ b/management/internals/controllers/network_map/interface.go @@ -36,4 +36,6 @@ type Controller interface { DisconnectPeers(ctx context.Context, accountId string, peerIDs []string) OnPeerConnected(ctx context.Context, accountID string, peerID string) (chan *UpdateMessage, error) OnPeerDisconnected(ctx context.Context, accountID string, peerID string) + + TrackEphemeralPeer(ctx context.Context, peer *nbpeer.Peer) } diff --git a/management/internals/controllers/network_map/interface_mock.go b/management/internals/controllers/network_map/interface_mock.go index 5a98eefa8..4e86d2973 100644 --- a/management/internals/controllers/network_map/interface_mock.go +++ b/management/internals/controllers/network_map/interface_mock.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ./interface.go +// Source: management/internals/controllers/network_map/interface.go // // Generated by this command: // -// mockgen -package network_map -destination=interface_mock.go -source=./interface.go -build_flags=-mod=mod +// mockgen -package network_map -destination=management/internals/controllers/network_map/interface_mock.go -source=management/internals/controllers/network_map/interface.go -build_flags=-mod=mod // // Package network_map is a generated GoMock package. @@ -211,6 +211,18 @@ func (mr *MockControllerMockRecorder) StartWarmup(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartWarmup", reflect.TypeOf((*MockController)(nil).StartWarmup), arg0) } +// TrackEphemeralPeer mocks base method. +func (m *MockController) TrackEphemeralPeer(ctx context.Context, arg1 *peer.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TrackEphemeralPeer", ctx, arg1) +} + +// TrackEphemeralPeer indicates an expected call of TrackEphemeralPeer. +func (mr *MockControllerMockRecorder) TrackEphemeralPeer(ctx, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrackEphemeralPeer", reflect.TypeOf((*MockController)(nil).TrackEphemeralPeer), ctx, arg1) +} + // UpdateAccountPeer mocks base method. func (m *MockController) UpdateAccountPeer(ctx context.Context, accountId, peerId string) error { m.ctrl.T.Helper() diff --git a/management/server/peer.go b/management/server/peer.go index d6eb2aecd..80c74e209 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -728,6 +728,11 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe return fmt.Errorf("failed adding peer to All group: %w", err) } + if temporary { + // we should track ephemeral peers to be able to clean them if the peer don't sync and be marked as connected + am.networkMapController.TrackEphemeralPeer(ctx, newPeer) + } + if addedByUser { err := transaction.SaveUserLastLogin(ctx, accountID, userID, newPeer.GetLastLogin()) if err != nil { From c61568ceb4ad2ae9c3dd31ae6b689fff81e0e336 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Fri, 23 Jan 2026 18:06:54 +0100 Subject: [PATCH 076/374] [client] Change default rosenpass log level (#5137) * Change default rosenpass log level - Add support to environment configuration - Change default log level to info * use .String() for print log level --- client/internal/rosenpass/manager.go | 29 ++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/client/internal/rosenpass/manager.go b/client/internal/rosenpass/manager.go index 26a1eef58..1faa22dc5 100644 --- a/client/internal/rosenpass/manager.go +++ b/client/internal/rosenpass/manager.go @@ -17,6 +17,11 @@ import ( "golang.zx2c4.com/wireguard/wgctrl/wgtypes" ) +const ( + defaultLog = slog.LevelInfo + defaultLogLevelVar = "NB_ROSENPASS_LOG_LEVEL" +) + func hashRosenpassKey(key []byte) string { hasher := sha256.New() hasher.Write(key) @@ -45,7 +50,7 @@ func NewManager(preSharedKey *wgtypes.Key, wgIfaceName string) (*Manager, error) } rpKeyHash := hashRosenpassKey(public) - log.Debugf("generated new rosenpass key pair with public key %s", rpKeyHash) + log.Tracef("generated new rosenpass key pair with public key %s", rpKeyHash) return &Manager{ifaceName: wgIfaceName, rpKeyHash: rpKeyHash, spk: public, ssk: secret, preSharedKey: (*[32]byte)(preSharedKey), rpPeerIDs: make(map[string]*rp.PeerID), lock: sync.Mutex{}}, nil } @@ -101,7 +106,7 @@ func (m *Manager) removePeer(wireGuardPubKey string) error { func (m *Manager) generateConfig() (rp.Config, error) { opts := &slog.HandlerOptions{ - Level: slog.LevelDebug, + Level: getLogLevel(), } logger := slog.New(slog.NewTextHandler(os.Stdout, opts)) cfg := rp.Config{Logger: logger} @@ -133,6 +138,26 @@ func (m *Manager) generateConfig() (rp.Config, error) { return cfg, nil } +func getLogLevel() slog.Level { + level, ok := os.LookupEnv(defaultLogLevelVar) + if !ok { + return defaultLog + } + switch strings.ToLower(level) { + case "debug": + return slog.LevelDebug + case "info": + return slog.LevelInfo + case "warn": + return slog.LevelWarn + case "error": + return slog.LevelError + default: + log.Warnf("unknown log level: %s. Using default %s", level, defaultLog.String()) + return defaultLog + } +} + func (m *Manager) OnDisconnected(peerKey string) { m.lock.Lock() defer m.lock.Unlock() From 67211010f7240d53734abd922777c32fccb02754 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Fri, 23 Jan 2026 18:39:45 +0100 Subject: [PATCH 077/374] [client, gui] fix exit nodes menu on reconnect, remove tooltips (#5167) * [client, gui] fix exit nodes menu on reconnect clean s.exitNodeStates when disconnecting * disable tooltip for exit nodes and settings --- client/ui/client_ui.go | 5 ++--- client/ui/const.go | 4 +--- client/ui/event_handler.go | 2 ++ client/ui/network.go | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 5d955ed25..0290e17d5 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -1033,7 +1033,7 @@ func (s *serviceClient) onTrayReady() { s.mDown.Disable() systray.AddSeparator() - s.mSettings = systray.AddMenuItem("Settings", settingsMenuDescr) + s.mSettings = systray.AddMenuItem("Settings", disabledMenuDescr) s.mAllowSSH = s.mSettings.AddSubMenuItemCheckbox("Allow SSH", allowSSHMenuDescr, false) s.mAutoConnect = s.mSettings.AddSubMenuItemCheckbox("Connect on Startup", autoConnectMenuDescr, false) s.mEnableRosenpass = s.mSettings.AddSubMenuItemCheckbox("Enable Quantum-Resistance", quantumResistanceMenuDescr, false) @@ -1060,7 +1060,7 @@ func (s *serviceClient) onTrayReady() { } s.exitNodeMu.Lock() - s.mExitNode = systray.AddMenuItem("Exit Node", exitNodeMenuDescr) + s.mExitNode = systray.AddMenuItem("Exit Node", disabledMenuDescr) s.mExitNode.Disable() s.exitNodeMu.Unlock() @@ -1261,7 +1261,6 @@ func (s *serviceClient) setSettingsEnabled(enabled bool) { if s.mSettings != nil { if enabled { s.mSettings.Enable() - s.mSettings.SetTooltip(settingsMenuDescr) } else { s.mSettings.Hide() s.mSettings.SetTooltip("Settings are disabled by daemon") diff --git a/client/ui/const.go b/client/ui/const.go index 332282c17..48619be75 100644 --- a/client/ui/const.go +++ b/client/ui/const.go @@ -1,8 +1,6 @@ package main const ( - settingsMenuDescr = "Settings of the application" - profilesMenuDescr = "Manage your profiles" allowSSHMenuDescr = "Allow SSH connections" autoConnectMenuDescr = "Connect automatically when the service starts" quantumResistanceMenuDescr = "Enable post-quantum security via Rosenpass" @@ -11,7 +9,7 @@ const ( notificationsMenuDescr = "Enable notifications" advancedSettingsMenuDescr = "Advanced settings of the application" debugBundleMenuDescr = "Create and open debug information bundle" - exitNodeMenuDescr = "Select exit node for routing traffic" + disabledMenuDescr = "" networksMenuDescr = "Open the networks management window" latestVersionMenuDescr = "Download latest version" quitMenuDescr = "Quit the client app" diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 9ffacd926..cc55c31dd 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -99,6 +99,8 @@ func (h *eventHandler) handleConnectClick() { func (h *eventHandler) handleDisconnectClick() { h.client.mDown.Disable() + h.client.exitNodeStates = []exitNodeState{} + if h.client.connectCancel != nil { log.Debugf("cancelling ongoing connect operation") h.client.connectCancel() diff --git a/client/ui/network.go b/client/ui/network.go index fb73efd7b..371eb975b 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -390,7 +390,7 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { if runtime.GOOS == "linux" || runtime.GOOS == "freebsd" { s.mExitNode.Remove() - s.mExitNode = systray.AddMenuItem("Exit Node", exitNodeMenuDescr) + s.mExitNode = systray.AddMenuItem("Exit Node", disabledMenuDescr) } var showDeselectAll bool From ded04b7627bd8d48d7e672a7b3b06b55a67b3058 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 23 Jan 2026 22:28:32 +0100 Subject: [PATCH 078/374] [client] Consolidate authentication logic (#5010) * Consolidate authentication logic - Moving auth functions from client/internal to client/internal/auth package - Creating unified auth.Auth client with NewAuth() constructor - Replacing direct auth function calls with auth client methods - Refactoring device flow and PKCE flow implementations - Updating iOS/Android/server code to use new auth client API * Refactor PKCE auth and login methods - Remove unnecessary internal package reference in PKCE flow test - Adjust context assignment placement in iOS and Android login methods --- client/android/login.go | 109 ++-- client/cmd/login.go | 46 +- client/embed/embed.go | 9 +- client/internal/auth/auth.go | 499 ++++++++++++++++++ client/internal/auth/device_flow.go | 72 ++- client/internal/auth/device_flow_test.go | 44 +- client/internal/auth/oauth.go | 29 +- client/internal/auth/pkce_flow.go | 74 ++- client/internal/auth/pkce_flow_test.go | 3 +- .../internal/auth/pkce_flow_windows_test.go | 4 +- client/internal/device_auth.go | 136 ----- client/internal/login.go | 201 ------- client/internal/pkce_auth.go | 138 ----- client/ios/NetBirdSDK/client.go | 22 +- client/ios/NetBirdSDK/login.go | 127 ++--- client/server/server.go | 16 +- 16 files changed, 805 insertions(+), 724 deletions(-) create mode 100644 client/internal/auth/auth.go delete mode 100644 client/internal/device_auth.go delete mode 100644 client/internal/login.go delete mode 100644 client/internal/pkce_auth.go diff --git a/client/android/login.go b/client/android/login.go index 4d4c7a650..a9422cdbf 100644 --- a/client/android/login.go +++ b/client/android/login.go @@ -3,15 +3,7 @@ package android import ( "context" "fmt" - "time" - "github.com/cenkalti/backoff/v4" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - gstatus "google.golang.org/grpc/status" - - "github.com/netbirdio/netbird/client/cmd" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/system" @@ -84,34 +76,21 @@ func (a *Auth) SaveConfigIfSSOSupported(listener SSOListener) { } func (a *Auth) saveConfigIfSSOSupported() (bool, error) { - supportsSSO := true - err := a.withBackOff(a.ctx, func() (err error) { - _, err = internal.GetPKCEAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL, nil) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { - _, err = internal.GetDeviceAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL) - s, ok := gstatus.FromError(err) - if !ok { - return err - } - if s.Code() == codes.NotFound || s.Code() == codes.Unimplemented { - supportsSSO = false - err = nil - } + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return false, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() - return err - } - - return err - }) + supportsSSO, err := authClient.IsSSOSupported(a.ctx) + if err != nil { + return false, fmt.Errorf("failed to check SSO support: %v", err) + } if !supportsSSO { return false, nil } - if err != nil { - return false, fmt.Errorf("backoff cycle failed: %v", err) - } - err = profilemanager.WriteOutConfig(a.cfgPath, a.config) return true, err } @@ -129,19 +108,17 @@ func (a *Auth) LoginWithSetupKeyAndSaveConfig(resultListener ErrListener, setupK } func (a *Auth) loginWithSetupKeyAndSaveConfig(setupKey string, deviceName string) error { + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + //nolint ctxWithValues := context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) - - err := a.withBackOff(a.ctx, func() error { - backoffErr := internal.Login(ctxWithValues, a.config, setupKey, "") - if s, ok := gstatus.FromError(backoffErr); ok && (s.Code() == codes.PermissionDenied) { - // we got an answer from management, exit backoff earlier - return backoff.Permanent(backoffErr) - } - return backoffErr - }) + err, _ = authClient.Login(ctxWithValues, setupKey, "") if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } return profilemanager.WriteOutConfig(a.cfgPath, a.config) @@ -160,49 +137,41 @@ func (a *Auth) Login(resultListener ErrListener, urlOpener URLOpener, isAndroidT } func (a *Auth) login(urlOpener URLOpener, isAndroidTV bool) error { - var needsLogin bool + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() // check if we need to generate JWT token - err := a.withBackOff(a.ctx, func() (err error) { - needsLogin, err = internal.IsLoginRequired(a.ctx, a.config) - return - }) + needsLogin, err := authClient.IsLoginRequired(a.ctx) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("failed to check login requirement: %v", err) } jwtToken := "" if needsLogin { - tokenInfo, err := a.foregroundGetTokenInfo(urlOpener, isAndroidTV) + tokenInfo, err := a.foregroundGetTokenInfo(authClient, urlOpener, isAndroidTV) if err != nil { return fmt.Errorf("interactive sso login failed: %v", err) } jwtToken = tokenInfo.GetTokenToUse() } - err = a.withBackOff(a.ctx, func() error { - err := internal.Login(a.ctx, a.config, "", jwtToken) - - if err == nil { - go urlOpener.OnLoginSuccess() - } - - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { - return nil - } - return err - }) + err, _ = authClient.Login(a.ctx, "", jwtToken) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } + go urlOpener.OnLoginSuccess() + return nil } -func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, isAndroidTV bool) (*auth.TokenInfo, error) { - oAuthFlow, err := auth.NewOAuthFlow(a.ctx, a.config, false, isAndroidTV, "") +func (a *Auth) foregroundGetTokenInfo(authClient *auth.Auth, urlOpener URLOpener, isAndroidTV bool) (*auth.TokenInfo, error) { + oAuthFlow, err := authClient.GetOAuthFlow(a.ctx, isAndroidTV) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get OAuth flow: %v", err) } flowInfo, err := oAuthFlow.RequestAuthInfo(context.TODO()) @@ -212,22 +181,10 @@ func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, isAndroidTV bool) (*a go urlOpener.Open(flowInfo.VerificationURIComplete, flowInfo.UserCode) - waitTimeout := time.Duration(flowInfo.ExpiresIn) * time.Second - waitCTX, cancel := context.WithTimeout(a.ctx, waitTimeout) - defer cancel() - tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) + tokenInfo, err := oAuthFlow.WaitToken(a.ctx, flowInfo) if err != nil { return nil, fmt.Errorf("waiting for browser login failed: %v", err) } return &tokenInfo, nil } - -func (a *Auth) withBackOff(ctx context.Context, bf func() error) error { - return backoff.RetryNotify( - bf, - backoff.WithContext(cmd.CLIBackOffSettings, ctx), - func(err error, duration time.Duration) { - log.Warnf("retrying Login to the Management service in %v due to error %v", duration, err) - }) -} diff --git a/client/cmd/login.go b/client/cmd/login.go index 57c010571..64b45e557 100644 --- a/client/cmd/login.go +++ b/client/cmd/login.go @@ -7,7 +7,6 @@ import ( "os/user" "runtime" "strings" - "time" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -277,18 +276,19 @@ func handleSSOLogin(ctx context.Context, cmd *cobra.Command, loginResp *proto.Lo } func foregroundLogin(ctx context.Context, cmd *cobra.Command, config *profilemanager.Config, setupKey, profileName string) error { + authClient, err := auth.NewAuth(ctx, config.PrivateKey, config.ManagementURL, config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + needsLogin := false - err := WithBackOff(func() error { - err := internal.Login(ctx, config, "", "") - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { - needsLogin = true - return nil - } - return err - }) - if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + err, isAuthError := authClient.Login(ctx, "", "") + if isAuthError { + needsLogin = true + } else if err != nil { + return fmt.Errorf("login check failed: %v", err) } jwtToken := "" @@ -300,23 +300,9 @@ func foregroundLogin(ctx context.Context, cmd *cobra.Command, config *profileman jwtToken = tokenInfo.GetTokenToUse() } - var lastError error - - err = WithBackOff(func() error { - err := internal.Login(ctx, config, setupKey, jwtToken) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { - lastError = err - return nil - } - return err - }) - - if lastError != nil { - return fmt.Errorf("login failed: %v", lastError) - } - + err, _ = authClient.Login(ctx, setupKey, jwtToken) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } return nil @@ -344,11 +330,7 @@ func foregroundGetTokenInfo(ctx context.Context, cmd *cobra.Command, config *pro openURL(cmd, flowInfo.VerificationURIComplete, flowInfo.UserCode, noBrowser) - waitTimeout := time.Duration(flowInfo.ExpiresIn) * time.Second - waitCTX, c := context.WithTimeout(context.TODO(), waitTimeout) - defer c() - - tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) + tokenInfo, err := oAuthFlow.WaitToken(context.TODO(), flowInfo) if err != nil { return nil, fmt.Errorf("waiting for browser login failed: %v", err) } diff --git a/client/embed/embed.go b/client/embed/embed.go index 8bbbef0f2..e266aae28 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" sshcommon "github.com/netbirdio/netbird/client/ssh" @@ -176,7 +177,13 @@ func (c *Client) Start(startCtx context.Context) error { // nolint:staticcheck ctx = context.WithValue(ctx, system.DeviceNameCtxKey, c.deviceName) - if err := internal.Login(ctx, c.config, c.setupKey, c.jwtToken); err != nil { + authClient, err := auth.NewAuth(ctx, c.config.PrivateKey, c.config.ManagementURL, c.config) + if err != nil { + return fmt.Errorf("create auth client: %w", err) + } + defer authClient.Close() + + if err, _ := authClient.Login(ctx, c.setupKey, c.jwtToken); err != nil { return fmt.Errorf("login: %w", err) } diff --git a/client/internal/auth/auth.go b/client/internal/auth/auth.go new file mode 100644 index 000000000..44e98bede --- /dev/null +++ b/client/internal/auth/auth.go @@ -0,0 +1,499 @@ +package auth + +import ( + "context" + "net/url" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/netbirdio/netbird/client/internal/profilemanager" + "github.com/netbirdio/netbird/client/ssh" + "github.com/netbirdio/netbird/client/system" + mgm "github.com/netbirdio/netbird/shared/management/client" + "github.com/netbirdio/netbird/shared/management/client/common" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Auth manages authentication operations with the management server +// It maintains a long-lived connection and automatically handles reconnection with backoff +type Auth struct { + mutex sync.RWMutex + client *mgm.GrpcClient + config *profilemanager.Config + privateKey wgtypes.Key + mgmURL *url.URL + mgmTLSEnabled bool +} + +// NewAuth creates a new Auth instance that manages authentication flows +// It establishes a connection to the management server that will be reused for all operations +// The connection is automatically recreated with backoff if it becomes disconnected +func NewAuth(ctx context.Context, privateKey string, mgmURL *url.URL, config *profilemanager.Config) (*Auth, error) { + // Validate WireGuard private key + myPrivateKey, err := wgtypes.ParseKey(privateKey) + if err != nil { + return nil, err + } + + // Determine TLS setting based on URL scheme + mgmTLSEnabled := mgmURL.Scheme == "https" + + log.Debugf("connecting to Management Service %s", mgmURL.String()) + mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTLSEnabled) + if err != nil { + log.Errorf("failed connecting to Management Service %s: %v", mgmURL.String(), err) + return nil, err + } + + log.Debugf("connected to the Management service %s", mgmURL.String()) + + return &Auth{ + client: mgmClient, + config: config, + privateKey: myPrivateKey, + mgmURL: mgmURL, + mgmTLSEnabled: mgmTLSEnabled, + }, nil +} + +// Close closes the management client connection +func (a *Auth) Close() error { + a.mutex.Lock() + defer a.mutex.Unlock() + + if a.client == nil { + return nil + } + return a.client.Close() +} + +// IsSSOSupported checks if the management server supports SSO by attempting to retrieve auth flow configurations. +// Returns true if either PKCE or Device authorization flow is supported, false otherwise. +// This function encapsulates the SSO detection logic to avoid exposing gRPC error codes to upper layers. +// Automatically retries with backoff and reconnection on connection errors. +func (a *Auth) IsSSOSupported(ctx context.Context) (bool, error) { + var supportsSSO bool + + err := a.withRetry(ctx, func(client *mgm.GrpcClient) error { + // Try PKCE flow first + _, err := a.getPKCEFlow(client) + if err == nil { + supportsSSO = true + return nil + } + + // Check if PKCE is not supported + if s, ok := status.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { + // PKCE not supported, try Device flow + _, err = a.getDeviceFlow(client) + if err == nil { + supportsSSO = true + return nil + } + + // Check if Device flow is also not supported + if s, ok := status.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { + // Neither PKCE nor Device flow is supported + supportsSSO = false + return nil + } + + // Device flow check returned an error other than NotFound/Unimplemented + return err + } + + // PKCE flow check returned an error other than NotFound/Unimplemented + return err + }) + + return supportsSSO, err +} + +// GetOAuthFlow returns an OAuth flow (PKCE or Device) using the existing management connection +// This avoids creating a new connection to the management server +func (a *Auth) GetOAuthFlow(ctx context.Context, forceDeviceAuth bool) (OAuthFlow, error) { + var flow OAuthFlow + var err error + + err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { + if forceDeviceAuth { + flow, err = a.getDeviceFlow(client) + return err + } + + // Try PKCE flow first + flow, err = a.getPKCEFlow(client) + if err != nil { + // If PKCE not supported, try Device flow + if s, ok := status.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { + flow, err = a.getDeviceFlow(client) + return err + } + return err + } + return nil + }) + + return flow, err +} + +// IsLoginRequired checks if login is required by attempting to authenticate with the server +// Automatically retries with backoff and reconnection on connection errors. +func (a *Auth) IsLoginRequired(ctx context.Context) (bool, error) { + pubSSHKey, err := ssh.GeneratePublicKey([]byte(a.config.SSHKey)) + if err != nil { + return false, err + } + + var needsLogin bool + + err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { + _, _, err := a.doMgmLogin(client, ctx, pubSSHKey) + if isLoginNeeded(err) { + needsLogin = true + return nil + } + needsLogin = false + return err + }) + + return needsLogin, err +} + +// Login attempts to log in or register the client with the management server +// Returns error and a boolean indicating if it's an authentication error (permission denied) that should stop retries. +// Automatically retries with backoff and reconnection on connection errors. +func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (error, bool) { + pubSSHKey, err := ssh.GeneratePublicKey([]byte(a.config.SSHKey)) + if err != nil { + return err, false + } + + var isAuthError bool + + err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { + serverKey, _, err := a.doMgmLogin(client, ctx, pubSSHKey) + if serverKey != nil && isRegistrationNeeded(err) { + log.Debugf("peer registration required") + _, err = a.registerPeer(client, ctx, setupKey, jwtToken, pubSSHKey) + if err != nil { + isAuthError = isPermissionDenied(err) + return err + } + } else if err != nil { + isAuthError = isPermissionDenied(err) + return err + } + + isAuthError = false + return nil + }) + + return err, isAuthError +} + +// getPKCEFlow retrieves PKCE authorization flow configuration and creates a flow instance +func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, error) { + serverKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, err + } + + protoFlow, err := client.GetPKCEAuthorizationFlow(*serverKey) + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + log.Warnf("server couldn't find pkce flow, contact admin: %v", err) + return nil, err + } + log.Errorf("failed to retrieve pkce flow: %v", err) + return nil, err + } + + protoConfig := protoFlow.GetProviderConfig() + config := &PKCEAuthProviderConfig{ + Audience: protoConfig.GetAudience(), + ClientID: protoConfig.GetClientID(), + ClientSecret: protoConfig.GetClientSecret(), + TokenEndpoint: protoConfig.GetTokenEndpoint(), + AuthorizationEndpoint: protoConfig.GetAuthorizationEndpoint(), + Scope: protoConfig.GetScope(), + RedirectURLs: protoConfig.GetRedirectURLs(), + UseIDToken: protoConfig.GetUseIDToken(), + ClientCertPair: a.config.ClientCertKeyPair, + DisablePromptLogin: protoConfig.GetDisablePromptLogin(), + LoginFlag: common.LoginFlag(protoConfig.GetLoginFlag()), + } + + if err := validatePKCEConfig(config); err != nil { + return nil, err + } + + flow, err := NewPKCEAuthorizationFlow(*config) + if err != nil { + return nil, err + } + + return flow, nil +} + +// getDeviceFlow retrieves device authorization flow configuration and creates a flow instance +func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, error) { + serverKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, err + } + + protoFlow, err := client.GetDeviceAuthorizationFlow(*serverKey) + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + log.Warnf("server couldn't find device flow, contact admin: %v", err) + return nil, err + } + log.Errorf("failed to retrieve device flow: %v", err) + return nil, err + } + + protoConfig := protoFlow.GetProviderConfig() + config := &DeviceAuthProviderConfig{ + Audience: protoConfig.GetAudience(), + ClientID: protoConfig.GetClientID(), + ClientSecret: protoConfig.GetClientSecret(), + Domain: protoConfig.Domain, + TokenEndpoint: protoConfig.GetTokenEndpoint(), + DeviceAuthEndpoint: protoConfig.GetDeviceAuthEndpoint(), + Scope: protoConfig.GetScope(), + UseIDToken: protoConfig.GetUseIDToken(), + } + + // Keep compatibility with older management versions + if config.Scope == "" { + config.Scope = "openid" + } + + if err := validateDeviceAuthConfig(config); err != nil { + return nil, err + } + + flow, err := NewDeviceAuthorizationFlow(*config) + if err != nil { + return nil, err + } + + return flow, nil +} + +// doMgmLogin performs the actual login operation with the management service +func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) (*wgtypes.Key, *mgmProto.LoginResponse, error) { + serverKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, nil, err + } + + sysInfo := system.GetInfo(ctx) + a.setSystemInfoFlags(sysInfo) + loginResp, err := client.Login(*serverKey, sysInfo, pubSSHKey, a.config.DNSLabels) + return serverKey, loginResp, err +} + +// registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key. +// Otherwise tries to register with the provided setupKey via command line. +func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKey string, jwtToken string, pubSSHKey []byte) (*mgmProto.LoginResponse, error) { + serverPublicKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, err + } + + validSetupKey, err := uuid.Parse(setupKey) + if err != nil && jwtToken == "" { + return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err) + } + + log.Debugf("sending peer registration request to Management Service") + info := system.GetInfo(ctx) + a.setSystemInfoFlags(info) + loginResp, err := client.Register(*serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels) + if err != nil { + log.Errorf("failed registering peer %v", err) + return nil, err + } + + log.Infof("peer has been successfully registered on Management Service") + + return loginResp, nil +} + +// setSystemInfoFlags sets all configuration flags on the provided system info +func (a *Auth) setSystemInfoFlags(info *system.Info) { + info.SetFlags( + a.config.RosenpassEnabled, + a.config.RosenpassPermissive, + a.config.ServerSSHAllowed, + a.config.DisableClientRoutes, + a.config.DisableServerRoutes, + a.config.DisableDNS, + a.config.DisableFirewall, + a.config.BlockLANAccess, + a.config.BlockInbound, + a.config.LazyConnectionEnabled, + a.config.EnableSSHRoot, + a.config.EnableSSHSFTP, + a.config.EnableSSHLocalPortForwarding, + a.config.EnableSSHRemotePortForwarding, + a.config.DisableSSHAuth, + ) +} + +// reconnect closes the current connection and creates a new one +// It checks if the brokenClient is still the current client before reconnecting +// to avoid multiple threads reconnecting unnecessarily +func (a *Auth) reconnect(ctx context.Context, brokenClient *mgm.GrpcClient) error { + a.mutex.Lock() + defer a.mutex.Unlock() + + // Double-check: if client has already been replaced by another thread, skip reconnection + if a.client != brokenClient { + log.Debugf("client already reconnected by another thread, skipping") + return nil + } + + // Create new connection FIRST, before closing the old one + // This ensures a.client is never nil, preventing panics in other threads + log.Debugf("reconnecting to Management Service %s", a.mgmURL.String()) + mgmClient, err := mgm.NewClient(ctx, a.mgmURL.Host, a.privateKey, a.mgmTLSEnabled) + if err != nil { + log.Errorf("failed reconnecting to Management Service %s: %v", a.mgmURL.String(), err) + // Keep the old client if reconnection fails + return err + } + + // Close old connection AFTER new one is successfully created + oldClient := a.client + a.client = mgmClient + + if oldClient != nil { + if err := oldClient.Close(); err != nil { + log.Debugf("error closing old connection: %v", err) + } + } + + log.Debugf("successfully reconnected to Management service %s", a.mgmURL.String()) + return nil +} + +// isConnectionError checks if the error is a connection-related error that should trigger reconnection +func isConnectionError(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + // These error codes indicate connection issues + return s.Code() == codes.Unavailable || + s.Code() == codes.DeadlineExceeded || + s.Code() == codes.Canceled || + s.Code() == codes.Internal +} + +// withRetry wraps an operation with exponential backoff retry logic +// It automatically reconnects on connection errors +func (a *Auth) withRetry(ctx context.Context, operation func(client *mgm.GrpcClient) error) error { + backoffSettings := &backoff.ExponentialBackOff{ + InitialInterval: 500 * time.Millisecond, + RandomizationFactor: 0.5, + Multiplier: 1.5, + MaxInterval: 10 * time.Second, + MaxElapsedTime: 2 * time.Minute, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + backoffSettings.Reset() + + return backoff.RetryNotify( + func() error { + // Capture the client BEFORE the operation to ensure we track the correct client + a.mutex.RLock() + currentClient := a.client + a.mutex.RUnlock() + + if currentClient == nil { + return status.Errorf(codes.Unavailable, "client is not initialized") + } + + // Execute operation with the captured client + err := operation(currentClient) + if err == nil { + return nil + } + + // If it's a connection error, attempt reconnection using the client that was actually used + if isConnectionError(err) { + log.Warnf("connection error detected, attempting reconnection: %v", err) + + if reconnectErr := a.reconnect(ctx, currentClient); reconnectErr != nil { + log.Errorf("reconnection failed: %v", reconnectErr) + return reconnectErr + } + // Return the original error to trigger retry with the new connection + return err + } + + // For authentication errors, don't retry + if isAuthenticationError(err) { + return backoff.Permanent(err) + } + + return err + }, + backoff.WithContext(backoffSettings, ctx), + func(err error, duration time.Duration) { + log.Warnf("operation failed, retrying in %v: %v", duration, err) + }, + ) +} + +// isAuthenticationError checks if the error is an authentication-related error that should not be retried. +// Returns true if the error is InvalidArgument or PermissionDenied, indicating that retrying won't help. +func isAuthenticationError(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + return s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied +} + +// isPermissionDenied checks if the error is a PermissionDenied error. +// This is used to determine if early exit from backoff is needed (e.g., when the server responded but denied access). +func isPermissionDenied(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + return s.Code() == codes.PermissionDenied +} + +func isLoginNeeded(err error) bool { + return isAuthenticationError(err) +} + +func isRegistrationNeeded(err error) bool { + return isPermissionDenied(err) +} diff --git a/client/internal/auth/device_flow.go b/client/internal/auth/device_flow.go index 8ca760742..e33765300 100644 --- a/client/internal/auth/device_flow.go +++ b/client/internal/auth/device_flow.go @@ -15,7 +15,6 @@ import ( log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/util/embeddedroots" ) @@ -26,12 +25,56 @@ const ( var _ OAuthFlow = &DeviceAuthorizationFlow{} +// DeviceAuthProviderConfig has all attributes needed to initiate a device authorization flow +type DeviceAuthProviderConfig struct { + // ClientID An IDP application client id + ClientID string + // ClientSecret An IDP application client secret + ClientSecret string + // Domain An IDP API domain + // Deprecated. Use OIDCConfigEndpoint instead + Domain string + // Audience An Audience for to authorization validation + Audience string + // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token + TokenEndpoint string + // DeviceAuthEndpoint is the endpoint of an IDP manager where clients can obtain device authorization code + DeviceAuthEndpoint string + // Scopes provides the scopes to be included in the token request + Scope string + // UseIDToken indicates if the id token should be used for authentication + UseIDToken bool + // LoginHint is used to pre-fill the email/username field during authentication + LoginHint string +} + +// validateDeviceAuthConfig validates device authorization provider configuration +func validateDeviceAuthConfig(config *DeviceAuthProviderConfig) error { + errorMsgFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" + + if config.Audience == "" { + return fmt.Errorf(errorMsgFormat, "Audience") + } + if config.ClientID == "" { + return fmt.Errorf(errorMsgFormat, "Client ID") + } + if config.TokenEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Token Endpoint") + } + if config.DeviceAuthEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Device Auth Endpoint") + } + if config.Scope == "" { + return fmt.Errorf(errorMsgFormat, "Device Auth Scopes") + } + return nil +} + // DeviceAuthorizationFlow implements the OAuthFlow interface, // for the Device Authorization Flow. type DeviceAuthorizationFlow struct { - providerConfig internal.DeviceAuthProviderConfig - - HTTPClient HTTPClient + providerConfig DeviceAuthProviderConfig + HTTPClient HTTPClient } // RequestDeviceCodePayload used for request device code payload for auth0 @@ -57,7 +100,7 @@ type TokenRequestResponse struct { } // NewDeviceAuthorizationFlow returns device authorization flow client -func NewDeviceAuthorizationFlow(config internal.DeviceAuthProviderConfig) (*DeviceAuthorizationFlow, error) { +func NewDeviceAuthorizationFlow(config DeviceAuthProviderConfig) (*DeviceAuthorizationFlow, error) { httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 @@ -89,6 +132,11 @@ func (d *DeviceAuthorizationFlow) GetClientID(ctx context.Context) string { return d.providerConfig.ClientID } +// SetLoginHint sets the login hint for the device authorization flow +func (d *DeviceAuthorizationFlow) SetLoginHint(hint string) { + d.providerConfig.LoginHint = hint +} + // RequestAuthInfo requests a device code login flow information from Hosted func (d *DeviceAuthorizationFlow) RequestAuthInfo(ctx context.Context) (AuthFlowInfo, error) { form := url.Values{} @@ -199,14 +247,22 @@ func (d *DeviceAuthorizationFlow) requestToken(info AuthFlowInfo) (TokenRequestR } // WaitToken waits user's login and authorize the app. Once the user's authorize -// it retrieves the access token from Hosted's endpoint and validates it before returning +// it retrieves the access token from Hosted's endpoint and validates it before returning. +// The method creates a timeout context internally based on info.ExpiresIn. func (d *DeviceAuthorizationFlow) WaitToken(ctx context.Context, info AuthFlowInfo) (TokenInfo, error) { + // Create timeout context based on flow expiration + timeout := time.Duration(info.ExpiresIn) * time.Second + waitCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + interval := time.Duration(info.Interval) * time.Second ticker := time.NewTicker(interval) + defer ticker.Stop() + for { select { - case <-ctx.Done(): - return TokenInfo{}, ctx.Err() + case <-waitCtx.Done(): + return TokenInfo{}, waitCtx.Err() case <-ticker.C: tokenResponse, err := d.requestToken(info) diff --git a/client/internal/auth/device_flow_test.go b/client/internal/auth/device_flow_test.go index 466645ee9..6a433cb61 100644 --- a/client/internal/auth/device_flow_test.go +++ b/client/internal/auth/device_flow_test.go @@ -12,8 +12,6 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/stretchr/testify/require" - - "github.com/netbirdio/netbird/client/internal" ) type mockHTTPClient struct { @@ -115,18 +113,19 @@ func TestHosted_RequestDeviceCode(t *testing.T) { err: testCase.inputReqError, } - deviceFlow := &DeviceAuthorizationFlow{ - providerConfig: internal.DeviceAuthProviderConfig{ - Audience: expectedAudience, - ClientID: expectedClientID, - Scope: expectedScope, - TokenEndpoint: "test.hosted.com/token", - DeviceAuthEndpoint: "test.hosted.com/device/auth", - UseIDToken: false, - }, - HTTPClient: &httpClient, + config := DeviceAuthProviderConfig{ + Audience: expectedAudience, + ClientID: expectedClientID, + Scope: expectedScope, + TokenEndpoint: "test.hosted.com/token", + DeviceAuthEndpoint: "test.hosted.com/device/auth", + UseIDToken: false, } + deviceFlow, err := NewDeviceAuthorizationFlow(config) + require.NoError(t, err, "creating device flow should not fail") + deviceFlow.HTTPClient = &httpClient + authInfo, err := deviceFlow.RequestAuthInfo(context.TODO()) testCase.testingErrFunc(t, err, testCase.expectedErrorMSG) @@ -280,18 +279,19 @@ func TestHosted_WaitToken(t *testing.T) { countResBody: testCase.inputCountResBody, } - deviceFlow := DeviceAuthorizationFlow{ - providerConfig: internal.DeviceAuthProviderConfig{ - Audience: testCase.inputAudience, - ClientID: clientID, - TokenEndpoint: "test.hosted.com/token", - DeviceAuthEndpoint: "test.hosted.com/device/auth", - Scope: "openid", - UseIDToken: false, - }, - HTTPClient: &httpClient, + config := DeviceAuthProviderConfig{ + Audience: testCase.inputAudience, + ClientID: clientID, + TokenEndpoint: "test.hosted.com/token", + DeviceAuthEndpoint: "test.hosted.com/device/auth", + Scope: "openid", + UseIDToken: false, } + deviceFlow, err := NewDeviceAuthorizationFlow(config) + require.NoError(t, err, "creating device flow should not fail") + deviceFlow.HTTPClient = &httpClient + ctx, cancel := context.WithTimeout(context.TODO(), testCase.inputTimeout) defer cancel() tokenInfo, err := deviceFlow.WaitToken(ctx, testCase.inputInfo) diff --git a/client/internal/auth/oauth.go b/client/internal/auth/oauth.go index 85a166005..a50a2ce6f 100644 --- a/client/internal/auth/oauth.go +++ b/client/internal/auth/oauth.go @@ -10,7 +10,6 @@ import ( "google.golang.org/grpc/codes" gstatus "google.golang.org/grpc/status" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/profilemanager" ) @@ -87,19 +86,33 @@ func NewOAuthFlow(ctx context.Context, config *profilemanager.Config, isUnixDesk // authenticateWithPKCEFlow initializes the Proof Key for Code Exchange flow auth flow func authenticateWithPKCEFlow(ctx context.Context, config *profilemanager.Config, hint string) (OAuthFlow, error) { - pkceFlowInfo, err := internal.GetPKCEAuthorizationFlowInfo(ctx, config.PrivateKey, config.ManagementURL, config.ClientCertKeyPair) + authClient, err := NewAuth(ctx, config.PrivateKey, config.ManagementURL, config) + if err != nil { + return nil, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + + pkceFlowInfo, err := authClient.getPKCEFlow(authClient.client) if err != nil { return nil, fmt.Errorf("getting pkce authorization flow info failed with error: %v", err) } - pkceFlowInfo.ProviderConfig.LoginHint = hint + if hint != "" { + pkceFlowInfo.SetLoginHint(hint) + } - return NewPKCEAuthorizationFlow(pkceFlowInfo.ProviderConfig) + return pkceFlowInfo, nil } // authenticateWithDeviceCodeFlow initializes the Device Code auth Flow func authenticateWithDeviceCodeFlow(ctx context.Context, config *profilemanager.Config, hint string) (OAuthFlow, error) { - deviceFlowInfo, err := internal.GetDeviceAuthorizationFlowInfo(ctx, config.PrivateKey, config.ManagementURL) + authClient, err := NewAuth(ctx, config.PrivateKey, config.ManagementURL, config) + if err != nil { + return nil, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + + deviceFlowInfo, err := authClient.getDeviceFlow(authClient.client) if err != nil { switch s, ok := gstatus.FromError(err); { case ok && s.Code() == codes.NotFound: @@ -114,7 +127,9 @@ func authenticateWithDeviceCodeFlow(ctx context.Context, config *profilemanager. } } - deviceFlowInfo.ProviderConfig.LoginHint = hint + if hint != "" { + deviceFlowInfo.SetLoginHint(hint) + } - return NewDeviceAuthorizationFlow(deviceFlowInfo.ProviderConfig) + return deviceFlowInfo, nil } diff --git a/client/internal/auth/pkce_flow.go b/client/internal/auth/pkce_flow.go index cc43c8648..2e16836d8 100644 --- a/client/internal/auth/pkce_flow.go +++ b/client/internal/auth/pkce_flow.go @@ -20,7 +20,6 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/oauth2" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/templates" "github.com/netbirdio/netbird/shared/management/client/common" ) @@ -35,17 +34,67 @@ const ( defaultPKCETimeoutSeconds = 300 ) +// PKCEAuthProviderConfig has all attributes needed to initiate PKCE authorization flow +type PKCEAuthProviderConfig struct { + // ClientID An IDP application client id + ClientID string + // ClientSecret An IDP application client secret + ClientSecret string + // Audience An Audience for to authorization validation + Audience string + // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token + TokenEndpoint string + // AuthorizationEndpoint is the endpoint of an IDP manager where clients can obtain authorization code + AuthorizationEndpoint string + // Scopes provides the scopes to be included in the token request + Scope string + // RedirectURL handles authorization code from IDP manager + RedirectURLs []string + // UseIDToken indicates if the id token should be used for authentication + UseIDToken bool + // ClientCertPair is used for mTLS authentication to the IDP + ClientCertPair *tls.Certificate + // DisablePromptLogin makes the PKCE flow to not prompt the user for login + DisablePromptLogin bool + // LoginFlag is used to configure the PKCE flow login behavior + LoginFlag common.LoginFlag + // LoginHint is used to pre-fill the email/username field during authentication + LoginHint string +} + +// validatePKCEConfig validates PKCE provider configuration +func validatePKCEConfig(config *PKCEAuthProviderConfig) error { + errorMsgFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" + + if config.ClientID == "" { + return fmt.Errorf(errorMsgFormat, "Client ID") + } + if config.TokenEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Token Endpoint") + } + if config.AuthorizationEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Authorization Auth Endpoint") + } + if config.Scope == "" { + return fmt.Errorf(errorMsgFormat, "PKCE Auth Scopes") + } + if config.RedirectURLs == nil { + return fmt.Errorf(errorMsgFormat, "PKCE Redirect URLs") + } + return nil +} + // PKCEAuthorizationFlow implements the OAuthFlow interface for // the Authorization Code Flow with PKCE. type PKCEAuthorizationFlow struct { - providerConfig internal.PKCEAuthProviderConfig + providerConfig PKCEAuthProviderConfig state string codeVerifier string oAuthConfig *oauth2.Config } // NewPKCEAuthorizationFlow returns new PKCE authorization code flow. -func NewPKCEAuthorizationFlow(config internal.PKCEAuthProviderConfig) (*PKCEAuthorizationFlow, error) { +func NewPKCEAuthorizationFlow(config PKCEAuthProviderConfig) (*PKCEAuthorizationFlow, error) { var availableRedirectURL string excludedRanges := getSystemExcludedPortRanges() @@ -124,10 +173,21 @@ func (p *PKCEAuthorizationFlow) RequestAuthInfo(ctx context.Context) (AuthFlowIn }, nil } +// SetLoginHint sets the login hint for the PKCE authorization flow +func (p *PKCEAuthorizationFlow) SetLoginHint(hint string) { + p.providerConfig.LoginHint = hint +} + // WaitToken waits for the OAuth token in the PKCE Authorization Flow. // It starts an HTTP server to receive the OAuth token callback and waits for the token or an error. // Once the token is received, it is converted to TokenInfo and validated before returning. -func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, _ AuthFlowInfo) (TokenInfo, error) { +// The method creates a timeout context internally based on info.ExpiresIn. +func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, info AuthFlowInfo) (TokenInfo, error) { + // Create timeout context based on flow expiration + timeout := time.Duration(info.ExpiresIn) * time.Second + waitCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + tokenChan := make(chan *oauth2.Token, 1) errChan := make(chan error, 1) @@ -138,7 +198,7 @@ func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, _ AuthFlowInfo) ( server := &http.Server{Addr: fmt.Sprintf(":%s", parsedURL.Port())} defer func() { - shutdownCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := server.Shutdown(shutdownCtx); err != nil { @@ -149,8 +209,8 @@ func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, _ AuthFlowInfo) ( go p.startServer(server, tokenChan, errChan) select { - case <-ctx.Done(): - return TokenInfo{}, ctx.Err() + case <-waitCtx.Done(): + return TokenInfo{}, waitCtx.Err() case token := <-tokenChan: return p.parseOAuthToken(token) case err := <-errChan: diff --git a/client/internal/auth/pkce_flow_test.go b/client/internal/auth/pkce_flow_test.go index b77a17eaa..c487c13df 100644 --- a/client/internal/auth/pkce_flow_test.go +++ b/client/internal/auth/pkce_flow_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/netbirdio/netbird/client/internal" mgm "github.com/netbirdio/netbird/shared/management/client/common" ) @@ -50,7 +49,7 @@ func TestPromptLogin(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - config := internal.PKCEAuthProviderConfig{ + config := PKCEAuthProviderConfig{ ClientID: "test-client-id", Audience: "test-audience", TokenEndpoint: "https://test-token-endpoint.com/token", diff --git a/client/internal/auth/pkce_flow_windows_test.go b/client/internal/auth/pkce_flow_windows_test.go index dd455b2fe..125eb270a 100644 --- a/client/internal/auth/pkce_flow_windows_test.go +++ b/client/internal/auth/pkce_flow_windows_test.go @@ -9,8 +9,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/netbirdio/netbird/client/internal" ) func TestParseExcludedPortRanges(t *testing.T) { @@ -95,7 +93,7 @@ func TestNewPKCEAuthorizationFlow_WithActualExcludedPorts(t *testing.T) { availablePort := 65432 - config := internal.PKCEAuthProviderConfig{ + config := PKCEAuthProviderConfig{ ClientID: "test-client-id", Audience: "test-audience", TokenEndpoint: "https://test-token-endpoint.com/token", diff --git a/client/internal/device_auth.go b/client/internal/device_auth.go deleted file mode 100644 index 7f7d06130..000000000 --- a/client/internal/device_auth.go +++ /dev/null @@ -1,136 +0,0 @@ -package internal - -import ( - "context" - "fmt" - "net/url" - - log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - mgm "github.com/netbirdio/netbird/shared/management/client" -) - -// DeviceAuthorizationFlow represents Device Authorization Flow information -type DeviceAuthorizationFlow struct { - Provider string - ProviderConfig DeviceAuthProviderConfig -} - -// DeviceAuthProviderConfig has all attributes needed to initiate a device authorization flow -type DeviceAuthProviderConfig struct { - // ClientID An IDP application client id - ClientID string - // ClientSecret An IDP application client secret - ClientSecret string - // Domain An IDP API domain - // Deprecated. Use OIDCConfigEndpoint instead - Domain string - // Audience An Audience for to authorization validation - Audience string - // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token - TokenEndpoint string - // DeviceAuthEndpoint is the endpoint of an IDP manager where clients can obtain device authorization code - DeviceAuthEndpoint string - // Scopes provides the scopes to be included in the token request - Scope string - // UseIDToken indicates if the id token should be used for authentication - UseIDToken bool - // LoginHint is used to pre-fill the email/username field during authentication - LoginHint string -} - -// GetDeviceAuthorizationFlowInfo initialize a DeviceAuthorizationFlow instance and return with it -func GetDeviceAuthorizationFlowInfo(ctx context.Context, privateKey string, mgmURL *url.URL) (DeviceAuthorizationFlow, error) { - // validate our peer's Wireguard PRIVATE key - myPrivateKey, err := wgtypes.ParseKey(privateKey) - if err != nil { - log.Errorf("failed parsing Wireguard key %s: [%s]", privateKey, err.Error()) - return DeviceAuthorizationFlow{}, err - } - - var mgmTLSEnabled bool - if mgmURL.Scheme == "https" { - mgmTLSEnabled = true - } - - log.Debugf("connecting to Management Service %s", mgmURL.String()) - mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTLSEnabled) - if err != nil { - log.Errorf("failed connecting to Management Service %s %v", mgmURL.String(), err) - return DeviceAuthorizationFlow{}, err - } - log.Debugf("connected to the Management service %s", mgmURL.String()) - - defer func() { - err = mgmClient.Close() - if err != nil { - log.Warnf("failed to close the Management service client %v", err) - } - }() - - serverKey, err := mgmClient.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return DeviceAuthorizationFlow{}, err - } - - protoDeviceAuthorizationFlow, err := mgmClient.GetDeviceAuthorizationFlow(*serverKey) - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - log.Warnf("server couldn't find device flow, contact admin: %v", err) - return DeviceAuthorizationFlow{}, err - } - log.Errorf("failed to retrieve device flow: %v", err) - return DeviceAuthorizationFlow{}, err - } - - deviceAuthorizationFlow := DeviceAuthorizationFlow{ - Provider: protoDeviceAuthorizationFlow.Provider.String(), - - ProviderConfig: DeviceAuthProviderConfig{ - Audience: protoDeviceAuthorizationFlow.GetProviderConfig().GetAudience(), - ClientID: protoDeviceAuthorizationFlow.GetProviderConfig().GetClientID(), - ClientSecret: protoDeviceAuthorizationFlow.GetProviderConfig().GetClientSecret(), - Domain: protoDeviceAuthorizationFlow.GetProviderConfig().Domain, - TokenEndpoint: protoDeviceAuthorizationFlow.GetProviderConfig().GetTokenEndpoint(), - DeviceAuthEndpoint: protoDeviceAuthorizationFlow.GetProviderConfig().GetDeviceAuthEndpoint(), - Scope: protoDeviceAuthorizationFlow.GetProviderConfig().GetScope(), - UseIDToken: protoDeviceAuthorizationFlow.GetProviderConfig().GetUseIDToken(), - }, - } - - // keep compatibility with older management versions - if deviceAuthorizationFlow.ProviderConfig.Scope == "" { - deviceAuthorizationFlow.ProviderConfig.Scope = "openid" - } - - err = isDeviceAuthProviderConfigValid(deviceAuthorizationFlow.ProviderConfig) - if err != nil { - return DeviceAuthorizationFlow{}, err - } - - return deviceAuthorizationFlow, nil -} - -func isDeviceAuthProviderConfigValid(config DeviceAuthProviderConfig) error { - errorMSGFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" - if config.Audience == "" { - return fmt.Errorf(errorMSGFormat, "Audience") - } - if config.ClientID == "" { - return fmt.Errorf(errorMSGFormat, "Client ID") - } - if config.TokenEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Token Endpoint") - } - if config.DeviceAuthEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Device Auth Endpoint") - } - if config.Scope == "" { - return fmt.Errorf(errorMSGFormat, "Device Auth Scopes") - } - return nil -} diff --git a/client/internal/login.go b/client/internal/login.go deleted file mode 100644 index f528783ef..000000000 --- a/client/internal/login.go +++ /dev/null @@ -1,201 +0,0 @@ -package internal - -import ( - "context" - "net/url" - - "github.com/google/uuid" - log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/netbirdio/netbird/client/internal/profilemanager" - "github.com/netbirdio/netbird/client/ssh" - "github.com/netbirdio/netbird/client/system" - mgm "github.com/netbirdio/netbird/shared/management/client" - mgmProto "github.com/netbirdio/netbird/shared/management/proto" -) - -// IsLoginRequired check that the server is support SSO or not -func IsLoginRequired(ctx context.Context, config *profilemanager.Config) (bool, error) { - mgmURL := config.ManagementURL - mgmClient, err := getMgmClient(ctx, config.PrivateKey, mgmURL) - if err != nil { - return false, err - } - defer func() { - err = mgmClient.Close() - if err != nil { - cStatus, ok := status.FromError(err) - if !ok || ok && cStatus.Code() != codes.Canceled { - log.Warnf("failed to close the Management service client, err: %v", err) - } - } - }() - log.Debugf("connected to the Management service %s", mgmURL.String()) - - pubSSHKey, err := ssh.GeneratePublicKey([]byte(config.SSHKey)) - if err != nil { - return false, err - } - - _, _, err = doMgmLogin(ctx, mgmClient, pubSSHKey, config) - if isLoginNeeded(err) { - return true, nil - } - return false, err -} - -// Login or register the client -func Login(ctx context.Context, config *profilemanager.Config, setupKey string, jwtToken string) error { - mgmClient, err := getMgmClient(ctx, config.PrivateKey, config.ManagementURL) - if err != nil { - return err - } - defer func() { - err = mgmClient.Close() - if err != nil { - cStatus, ok := status.FromError(err) - if !ok || ok && cStatus.Code() != codes.Canceled { - log.Warnf("failed to close the Management service client, err: %v", err) - } - } - }() - log.Debugf("connected to the Management service %s", config.ManagementURL.String()) - - pubSSHKey, err := ssh.GeneratePublicKey([]byte(config.SSHKey)) - if err != nil { - return err - } - - serverKey, _, err := doMgmLogin(ctx, mgmClient, pubSSHKey, config) - if serverKey != nil && isRegistrationNeeded(err) { - log.Debugf("peer registration required") - _, err = registerPeer(ctx, *serverKey, mgmClient, setupKey, jwtToken, pubSSHKey, config) - if err != nil { - return err - } - } else if err != nil { - return err - } - - return nil -} - -func getMgmClient(ctx context.Context, privateKey string, mgmURL *url.URL) (*mgm.GrpcClient, error) { - // validate our peer's Wireguard PRIVATE key - myPrivateKey, err := wgtypes.ParseKey(privateKey) - if err != nil { - log.Errorf("failed parsing Wireguard key %s: [%s]", privateKey, err.Error()) - return nil, err - } - - var mgmTlsEnabled bool - if mgmURL.Scheme == "https" { - mgmTlsEnabled = true - } - - log.Debugf("connecting to the Management service %s", mgmURL.String()) - mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTlsEnabled) - if err != nil { - log.Errorf("failed connecting to the Management service %s %v", mgmURL.String(), err) - return nil, err - } - return mgmClient, err -} - -func doMgmLogin(ctx context.Context, mgmClient *mgm.GrpcClient, pubSSHKey []byte, config *profilemanager.Config) (*wgtypes.Key, *mgmProto.LoginResponse, error) { - serverKey, err := mgmClient.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, nil, err - } - - sysInfo := system.GetInfo(ctx) - sysInfo.SetFlags( - config.RosenpassEnabled, - config.RosenpassPermissive, - config.ServerSSHAllowed, - config.DisableClientRoutes, - config.DisableServerRoutes, - config.DisableDNS, - config.DisableFirewall, - config.BlockLANAccess, - config.BlockInbound, - config.LazyConnectionEnabled, - config.EnableSSHRoot, - config.EnableSSHSFTP, - config.EnableSSHLocalPortForwarding, - config.EnableSSHRemotePortForwarding, - config.DisableSSHAuth, - ) - loginResp, err := mgmClient.Login(*serverKey, sysInfo, pubSSHKey, config.DNSLabels) - return serverKey, loginResp, err -} - -// registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key. -// Otherwise tries to register with the provided setupKey via command line. -func registerPeer(ctx context.Context, serverPublicKey wgtypes.Key, client *mgm.GrpcClient, setupKey string, jwtToken string, pubSSHKey []byte, config *profilemanager.Config) (*mgmProto.LoginResponse, error) { - validSetupKey, err := uuid.Parse(setupKey) - if err != nil && jwtToken == "" { - return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err) - } - - log.Debugf("sending peer registration request to Management Service") - info := system.GetInfo(ctx) - info.SetFlags( - config.RosenpassEnabled, - config.RosenpassPermissive, - config.ServerSSHAllowed, - config.DisableClientRoutes, - config.DisableServerRoutes, - config.DisableDNS, - config.DisableFirewall, - config.BlockLANAccess, - config.BlockInbound, - config.LazyConnectionEnabled, - config.EnableSSHRoot, - config.EnableSSHSFTP, - config.EnableSSHLocalPortForwarding, - config.EnableSSHRemotePortForwarding, - config.DisableSSHAuth, - ) - loginResp, err := client.Register(serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey, config.DNSLabels) - if err != nil { - log.Errorf("failed registering peer %v", err) - return nil, err - } - - log.Infof("peer has been successfully registered on Management Service") - - return loginResp, nil -} - -func isLoginNeeded(err error) bool { - if err == nil { - return false - } - s, ok := status.FromError(err) - if !ok { - return false - } - if s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied { - return true - } - return false -} - -func isRegistrationNeeded(err error) bool { - if err == nil { - return false - } - s, ok := status.FromError(err) - if !ok { - return false - } - if s.Code() == codes.PermissionDenied { - return true - } - return false -} diff --git a/client/internal/pkce_auth.go b/client/internal/pkce_auth.go deleted file mode 100644 index 23c92e8af..000000000 --- a/client/internal/pkce_auth.go +++ /dev/null @@ -1,138 +0,0 @@ -package internal - -import ( - "context" - "crypto/tls" - "fmt" - "net/url" - - log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - mgm "github.com/netbirdio/netbird/shared/management/client" - "github.com/netbirdio/netbird/shared/management/client/common" -) - -// PKCEAuthorizationFlow represents PKCE Authorization Flow information -type PKCEAuthorizationFlow struct { - ProviderConfig PKCEAuthProviderConfig -} - -// PKCEAuthProviderConfig has all attributes needed to initiate pkce authorization flow -type PKCEAuthProviderConfig struct { - // ClientID An IDP application client id - ClientID string - // ClientSecret An IDP application client secret - ClientSecret string - // Audience An Audience for to authorization validation - Audience string - // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token - TokenEndpoint string - // AuthorizationEndpoint is the endpoint of an IDP manager where clients can obtain authorization code - AuthorizationEndpoint string - // Scopes provides the scopes to be included in the token request - Scope string - // RedirectURL handles authorization code from IDP manager - RedirectURLs []string - // UseIDToken indicates if the id token should be used for authentication - UseIDToken bool - // ClientCertPair is used for mTLS authentication to the IDP - ClientCertPair *tls.Certificate - // DisablePromptLogin makes the PKCE flow to not prompt the user for login - DisablePromptLogin bool - // LoginFlag is used to configure the PKCE flow login behavior - LoginFlag common.LoginFlag - // LoginHint is used to pre-fill the email/username field during authentication - LoginHint string -} - -// GetPKCEAuthorizationFlowInfo initialize a PKCEAuthorizationFlow instance and return with it -func GetPKCEAuthorizationFlowInfo(ctx context.Context, privateKey string, mgmURL *url.URL, clientCert *tls.Certificate) (PKCEAuthorizationFlow, error) { - // validate our peer's Wireguard PRIVATE key - myPrivateKey, err := wgtypes.ParseKey(privateKey) - if err != nil { - log.Errorf("failed parsing Wireguard key %s: [%s]", privateKey, err.Error()) - return PKCEAuthorizationFlow{}, err - } - - var mgmTLSEnabled bool - if mgmURL.Scheme == "https" { - mgmTLSEnabled = true - } - - log.Debugf("connecting to Management Service %s", mgmURL.String()) - mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTLSEnabled) - if err != nil { - log.Errorf("failed connecting to Management Service %s %v", mgmURL.String(), err) - return PKCEAuthorizationFlow{}, err - } - log.Debugf("connected to the Management service %s", mgmURL.String()) - - defer func() { - err = mgmClient.Close() - if err != nil { - log.Warnf("failed to close the Management service client %v", err) - } - }() - - serverKey, err := mgmClient.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return PKCEAuthorizationFlow{}, err - } - - protoPKCEAuthorizationFlow, err := mgmClient.GetPKCEAuthorizationFlow(*serverKey) - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - log.Warnf("server couldn't find pkce flow, contact admin: %v", err) - return PKCEAuthorizationFlow{}, err - } - log.Errorf("failed to retrieve pkce flow: %v", err) - return PKCEAuthorizationFlow{}, err - } - - authFlow := PKCEAuthorizationFlow{ - ProviderConfig: PKCEAuthProviderConfig{ - Audience: protoPKCEAuthorizationFlow.GetProviderConfig().GetAudience(), - ClientID: protoPKCEAuthorizationFlow.GetProviderConfig().GetClientID(), - ClientSecret: protoPKCEAuthorizationFlow.GetProviderConfig().GetClientSecret(), - TokenEndpoint: protoPKCEAuthorizationFlow.GetProviderConfig().GetTokenEndpoint(), - AuthorizationEndpoint: protoPKCEAuthorizationFlow.GetProviderConfig().GetAuthorizationEndpoint(), - Scope: protoPKCEAuthorizationFlow.GetProviderConfig().GetScope(), - RedirectURLs: protoPKCEAuthorizationFlow.GetProviderConfig().GetRedirectURLs(), - UseIDToken: protoPKCEAuthorizationFlow.GetProviderConfig().GetUseIDToken(), - ClientCertPair: clientCert, - DisablePromptLogin: protoPKCEAuthorizationFlow.GetProviderConfig().GetDisablePromptLogin(), - LoginFlag: common.LoginFlag(protoPKCEAuthorizationFlow.GetProviderConfig().GetLoginFlag()), - }, - } - - err = isPKCEProviderConfigValid(authFlow.ProviderConfig) - if err != nil { - return PKCEAuthorizationFlow{}, err - } - - return authFlow, nil -} - -func isPKCEProviderConfigValid(config PKCEAuthProviderConfig) error { - errorMSGFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" - if config.ClientID == "" { - return fmt.Errorf(errorMSGFormat, "Client ID") - } - if config.TokenEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Token Endpoint") - } - if config.AuthorizationEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Authorization Auth Endpoint") - } - if config.Scope == "" { - return fmt.Errorf(errorMSGFormat, "PKCE Auth Scopes") - } - if config.RedirectURLs == nil { - return fmt.Errorf(errorMSGFormat, "PKCE Redirect URLs") - } - return nil -} diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index 935910fc9..aafef41d3 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -263,7 +263,14 @@ func (c *Client) IsLoginRequired() bool { return true } - needsLogin, err := internal.IsLoginRequired(ctx, cfg) + authClient, err := auth.NewAuth(ctx, cfg.PrivateKey, cfg.ManagementURL, cfg) + if err != nil { + log.Errorf("IsLoginRequired: failed to create auth client: %v", err) + return true // Assume login is required if we can't create auth client + } + defer authClient.Close() + + needsLogin, err := authClient.IsLoginRequired(ctx) if err != nil { log.Errorf("IsLoginRequired: check failed: %v", err) // If the check fails, assume login is required to be safe @@ -314,16 +321,19 @@ func (c *Client) LoginForMobile() string { // This could cause a potential race condition with loading the extension which need to be handled on swift side go func() { - waitTimeout := time.Duration(flowInfo.ExpiresIn) * time.Second - waitCTX, cancel := context.WithTimeout(ctx, waitTimeout) - defer cancel() - tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) + tokenInfo, err := oAuthFlow.WaitToken(ctx, flowInfo) if err != nil { log.Errorf("LoginForMobile: WaitToken failed: %v", err) return } jwtToken := tokenInfo.GetTokenToUse() - if err := internal.Login(ctx, cfg, "", jwtToken); err != nil { + authClient, err := auth.NewAuth(ctx, cfg.PrivateKey, cfg.ManagementURL, cfg) + if err != nil { + log.Errorf("LoginForMobile: failed to create auth client: %v", err) + return + } + defer authClient.Close() + if err, _ := authClient.Login(ctx, "", jwtToken); err != nil { log.Errorf("LoginForMobile: Login failed: %v", err) return } diff --git a/client/ios/NetBirdSDK/login.go b/client/ios/NetBirdSDK/login.go index 27fdcf5ef..9d447ef3f 100644 --- a/client/ios/NetBirdSDK/login.go +++ b/client/ios/NetBirdSDK/login.go @@ -7,13 +7,8 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - gstatus "google.golang.org/grpc/status" - "github.com/netbirdio/netbird/client/cmd" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/system" @@ -90,34 +85,21 @@ func (a *Auth) SaveConfigIfSSOSupported(listener SSOListener) { } func (a *Auth) saveConfigIfSSOSupported() (bool, error) { - supportsSSO := true - err := a.withBackOff(a.ctx, func() (err error) { - _, err = internal.GetPKCEAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL, nil) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { - _, err = internal.GetDeviceAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL) - s, ok := gstatus.FromError(err) - if !ok { - return err - } - if s.Code() == codes.NotFound || s.Code() == codes.Unimplemented { - supportsSSO = false - err = nil - } + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return false, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() - return err - } - - return err - }) + supportsSSO, err := authClient.IsSSOSupported(a.ctx) + if err != nil { + return false, fmt.Errorf("failed to check SSO support: %v", err) + } if !supportsSSO { return false, nil } - if err != nil { - return false, fmt.Errorf("backoff cycle failed: %v", err) - } - // Use DirectWriteOutConfig to avoid atomic file operations (temp file + rename) // which are blocked by the tvOS sandbox in App Group containers err = profilemanager.DirectWriteOutConfig(a.cfgPath, a.config) @@ -141,19 +123,17 @@ func (a *Auth) LoginWithSetupKeyAndSaveConfig(resultListener ErrListener, setupK } func (a *Auth) loginWithSetupKeyAndSaveConfig(setupKey string, deviceName string) error { + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + //nolint ctxWithValues := context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) - - err := a.withBackOff(a.ctx, func() error { - backoffErr := internal.Login(ctxWithValues, a.config, setupKey, "") - if s, ok := gstatus.FromError(backoffErr); ok && (s.Code() == codes.PermissionDenied) { - // we got an answer from management, exit backoff earlier - return backoff.Permanent(backoffErr) - } - return backoffErr - }) + err, _ = authClient.Login(ctxWithValues, setupKey, "") if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } // Use DirectWriteOutConfig to avoid atomic file operations (temp file + rename) @@ -164,15 +144,16 @@ func (a *Auth) loginWithSetupKeyAndSaveConfig(setupKey string, deviceName string // LoginSync performs a synchronous login check without UI interaction // Used for background VPN connection where user should already be authenticated func (a *Auth) LoginSync() error { - var needsLogin bool + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() // check if we need to generate JWT token - err := a.withBackOff(a.ctx, func() (err error) { - needsLogin, err = internal.IsLoginRequired(a.ctx, a.config) - return - }) + needsLogin, err := authClient.IsLoginRequired(a.ctx) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("failed to check login requirement: %v", err) } jwtToken := "" @@ -180,15 +161,12 @@ func (a *Auth) LoginSync() error { return fmt.Errorf("not authenticated") } - err = a.withBackOff(a.ctx, func() error { - err := internal.Login(a.ctx, a.config, "", jwtToken) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) { - // PermissionDenied means registration is required or peer is blocked - return backoff.Permanent(err) - } - return err - }) + err, isAuthError := authClient.Login(a.ctx, "", jwtToken) if err != nil { + if isAuthError { + // PermissionDenied means registration is required or peer is blocked + return fmt.Errorf("authentication error: %v", err) + } return fmt.Errorf("login failed: %v", err) } @@ -225,8 +203,6 @@ func (a *Auth) LoginWithDeviceName(resultListener ErrListener, urlOpener URLOpen } func (a *Auth) login(urlOpener URLOpener, forceDeviceAuth bool, deviceName string) error { - var needsLogin bool - // Create context with device name if provided ctx := a.ctx if deviceName != "" { @@ -234,33 +210,33 @@ func (a *Auth) login(urlOpener URLOpener, forceDeviceAuth bool, deviceName strin ctx = context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) } - // check if we need to generate JWT token - err := a.withBackOff(ctx, func() (err error) { - needsLogin, err = internal.IsLoginRequired(ctx, a.config) - return - }) + authClient, err := auth.NewAuth(ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + + // check if we need to generate JWT token + needsLogin, err := authClient.IsLoginRequired(ctx) + if err != nil { + return fmt.Errorf("failed to check login requirement: %v", err) } jwtToken := "" if needsLogin { - tokenInfo, err := a.foregroundGetTokenInfo(urlOpener, forceDeviceAuth) + tokenInfo, err := a.foregroundGetTokenInfo(authClient, urlOpener, forceDeviceAuth) if err != nil { return fmt.Errorf("interactive sso login failed: %v", err) } jwtToken = tokenInfo.GetTokenToUse() } - err = a.withBackOff(ctx, func() error { - err := internal.Login(ctx, a.config, "", jwtToken) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) { - // PermissionDenied means registration is required or peer is blocked - return backoff.Permanent(err) - } - return err - }) + err, isAuthError := authClient.Login(ctx, "", jwtToken) if err != nil { + if isAuthError { + // PermissionDenied means registration is required or peer is blocked + return fmt.Errorf("authentication error: %v", err) + } return fmt.Errorf("login failed: %v", err) } @@ -285,10 +261,10 @@ func (a *Auth) login(urlOpener URLOpener, forceDeviceAuth bool, deviceName strin const authInfoRequestTimeout = 30 * time.Second -func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, forceDeviceAuth bool) (*auth.TokenInfo, error) { - oAuthFlow, err := auth.NewOAuthFlow(a.ctx, a.config, false, forceDeviceAuth, "") +func (a *Auth) foregroundGetTokenInfo(authClient *auth.Auth, urlOpener URLOpener, forceDeviceAuth bool) (*auth.TokenInfo, error) { + oAuthFlow, err := authClient.GetOAuthFlow(a.ctx, forceDeviceAuth) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get OAuth flow: %v", err) } // Use a bounded timeout for the auth info request to prevent indefinite hangs @@ -313,15 +289,6 @@ func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, forceDeviceAuth bool) return &tokenInfo, nil } -func (a *Auth) withBackOff(ctx context.Context, bf func() error) error { - return backoff.RetryNotify( - bf, - backoff.WithContext(cmd.CLIBackOffSettings, ctx), - func(err error, duration time.Duration) { - log.Warnf("retrying Login to the Management service in %v due to error %v", duration, err) - }) -} - // GetConfigJSON returns the current config as a JSON string. // This can be used by the caller to persist the config via alternative storage // mechanisms (e.g., UserDefaults on tvOS where file writes are blocked). diff --git a/client/server/server.go b/client/server/server.go index b291d7f71..108eab9fe 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -253,10 +253,17 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil // loginAttempt attempts to login using the provided information. it returns a status in case something fails func (s *Server) loginAttempt(ctx context.Context, setupKey, jwtToken string) (internal.StatusType, error) { - var status internal.StatusType - err := internal.Login(ctx, s.config, setupKey, jwtToken) + authClient, err := auth.NewAuth(ctx, s.config.PrivateKey, s.config.ManagementURL, s.config) if err != nil { - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { + log.Errorf("failed to create auth client: %v", err) + return internal.StatusLoginFailed, err + } + defer authClient.Close() + + var status internal.StatusType + err, isAuthError := authClient.Login(ctx, setupKey, jwtToken) + if err != nil { + if isAuthError { log.Warnf("failed login: %v", err) status = internal.StatusNeedsLogin } else { @@ -581,8 +588,7 @@ func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLogin s.oauthAuthFlow.waitCancel() } - waitTimeout := time.Until(s.oauthAuthFlow.expiresAt) - waitCTX, cancel := context.WithTimeout(ctx, waitTimeout) + waitCTX, cancel := context.WithCancel(ctx) defer cancel() s.mutex.Lock() From 2381e216e421379f9cbb70c372fcdefe32b7cc58 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Sat, 24 Jan 2026 17:49:25 +0100 Subject: [PATCH 079/374] Fix validator message with warn (#5168) --- shared/auth/jwt/validator.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared/auth/jwt/validator.go b/shared/auth/jwt/validator.go index ede7acea5..aeaa5842c 100644 --- a/shared/auth/jwt/validator.go +++ b/shared/auth/jwt/validator.go @@ -72,8 +72,8 @@ var ( func NewValidator(issuer string, audienceList []string, keysLocation string, idpSignkeyRefreshEnabled bool) *Validator { keys, err := getPemKeys(keysLocation) - if err != nil { - log.WithField("keysLocation", keysLocation).Errorf("could not get keys from location: %s", err) + if err != nil && !strings.Contains(keysLocation, "localhost") { + log.WithField("keysLocation", keysLocation).Warnf("could not get keys from location: %s, it will try again on the next http request", err) } return &Validator{ From 074df56c3d7b47c17b35198894c1aa850e4c0c57 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 26 Jan 2026 16:30:00 +0800 Subject: [PATCH 080/374] [client] Fix flaky JWT SSH test (#5181) --- client/ssh/server/jwt_test.go | 12 ++++++------ client/ssh/server/test.go | 3 +-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/client/ssh/server/jwt_test.go b/client/ssh/server/jwt_test.go index dbef011ac..b2f3ac6a0 100644 --- a/client/ssh/server/jwt_test.go +++ b/client/ssh/server/jwt_test.go @@ -54,7 +54,7 @@ func TestJWTEnforcement(t *testing.T) { server.SetAllowRootLogin(true) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -88,7 +88,7 @@ func TestJWTEnforcement(t *testing.T) { serverNoJWT.SetAllowRootLogin(true) serverAddrNoJWT := StartTestServer(t, serverNoJWT) - defer require.NoError(t, serverNoJWT.Stop()) + defer func() { require.NoError(t, serverNoJWT.Stop()) }() hostNoJWT, portStrNoJWT, err := net.SplitHostPort(serverAddrNoJWT) require.NoError(t, err) @@ -213,7 +213,7 @@ func TestJWTDetection(t *testing.T) { server.SetAllowRootLogin(true) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -341,7 +341,7 @@ func TestJWTFailClose(t *testing.T) { server.SetAllowRootLogin(true) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -596,7 +596,7 @@ func TestJWTAuthentication(t *testing.T) { server.UpdateSSHAuth(authConfig) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -715,7 +715,7 @@ func TestJWTMultipleAudiences(t *testing.T) { server.UpdateSSHAuth(authConfig) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) diff --git a/client/ssh/server/test.go b/client/ssh/server/test.go index f8abd1752..454d3afa3 100644 --- a/client/ssh/server/test.go +++ b/client/ssh/server/test.go @@ -8,19 +8,18 @@ import ( "time" ) +// StartTestServer starts the SSH server and returns the address it's listening on. func StartTestServer(t *testing.T, server *Server) string { started := make(chan string, 1) errChan := make(chan error, 1) go func() { - // Use port 0 to let the OS assign a free port addrPort := netip.MustParseAddrPort("127.0.0.1:0") if err := server.Start(context.Background(), addrPort); err != nil { errChan <- err return } - // Get the actual listening address from the server actualAddr := server.Addr() if actualAddr == nil { errChan <- fmt.Errorf("server started but no listener address available") From 05af39a69b510bb6094588eee1f6776fef165476 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 26 Jan 2026 14:03:32 +0100 Subject: [PATCH 081/374] [client] Add IPv6 support to UDP WireGuard proxy (#5169) * Add IPv6 support to UDP WireGuard proxy Add IPv6 packet header support in UDP raw socket proxy to handle both IPv4 and IPv6 source addresses. Refactor error handling in proxy bind implementations to validate endpoints before acquiring locks. --- client/iface/wgproxy/bind/proxy.go | 40 +-- client/iface/wgproxy/ebpf/proxy.go | 93 +++++- client/iface/wgproxy/ebpf/wrapper.go | 10 +- client/iface/wgproxy/rawsocket/rawsocket.go | 56 +++- client/iface/wgproxy/redirect_test.go | 353 ++++++++++++++++++++ client/iface/wgproxy/udp/proxy.go | 2 +- client/iface/wgproxy/udp/rawsocket.go | 78 +++-- 7 files changed, 565 insertions(+), 67 deletions(-) create mode 100644 client/iface/wgproxy/redirect_test.go diff --git a/client/iface/wgproxy/bind/proxy.go b/client/iface/wgproxy/bind/proxy.go index 9978cceee..9ac3ea6df 100644 --- a/client/iface/wgproxy/bind/proxy.go +++ b/client/iface/wgproxy/bind/proxy.go @@ -114,34 +114,21 @@ func (p *ProxyBind) Pause() { } func (p *ProxyBind) RedirectAs(endpoint *net.UDPAddr) { + ep, err := addrToEndpoint(endpoint) + if err != nil { + log.Errorf("failed to start package redirection: %v", err) + return + } + p.pausedCond.L.Lock() p.paused = false - ep, err := addrToEndpoint(endpoint) - if err != nil { - log.Errorf("failed to convert endpoint address: %v", err) - } else { - p.wgCurrentUsed = ep - } + p.wgCurrentUsed = ep p.pausedCond.Signal() p.pausedCond.L.Unlock() } -func addrToEndpoint(addr *net.UDPAddr) (*bind.Endpoint, error) { - if addr == nil { - return nil, errors.New("nil address") - } - - ip, ok := netip.AddrFromSlice(addr.IP) - if !ok { - return nil, fmt.Errorf("convert %s to netip.Addr", addr) - } - - addrPort := netip.AddrPortFrom(ip.Unmap(), uint16(addr.Port)) - return &bind.Endpoint{AddrPort: addrPort}, nil -} - func (p *ProxyBind) CloseConn() error { if p.cancel == nil { return fmt.Errorf("proxy not started") @@ -225,3 +212,16 @@ func fakeAddress(peerAddress *net.UDPAddr) (*netip.AddrPort, error) { netipAddr := netip.AddrPortFrom(fakeIP, uint16(peerAddress.Port)) return &netipAddr, nil } + +func addrToEndpoint(addr *net.UDPAddr) (*bind.Endpoint, error) { + if addr == nil { + return nil, fmt.Errorf("invalid address") + } + ip, ok := netip.AddrFromSlice(addr.IP) + if !ok { + return nil, fmt.Errorf("convert %s to netip.Addr", addr) + } + + addrPort := netip.AddrPortFrom(ip.Unmap(), uint16(addr.Port)) + return &bind.Endpoint{AddrPort: addrPort}, nil +} diff --git a/client/iface/wgproxy/ebpf/proxy.go b/client/iface/wgproxy/ebpf/proxy.go index 858143091..0c1c886d7 100644 --- a/client/iface/wgproxy/ebpf/proxy.go +++ b/client/iface/wgproxy/ebpf/proxy.go @@ -27,7 +27,13 @@ const ( ) var ( - localHostNetIP = net.ParseIP("127.0.0.1") + localHostNetIPv4 = net.ParseIP("127.0.0.1") + localHostNetIPv6 = net.ParseIP("::1") + + serializeOpts = gopacket.SerializeOptions{ + ComputeChecksums: true, + FixLengths: true, + } ) // WGEBPFProxy definition for proxy with EBPF support @@ -40,7 +46,8 @@ type WGEBPFProxy struct { turnConnMutex sync.Mutex lastUsedPort uint16 - rawConn net.PacketConn + rawConnIPv4 net.PacketConn + rawConnIPv6 net.PacketConn conn transport.UDPConn ctx context.Context @@ -67,13 +74,28 @@ func (p *WGEBPFProxy) Listen() error { return err } - p.rawConn, err = rawsocket.PrepareSenderRawSocket() + // Prepare IPv4 raw socket (required) + p.rawConnIPv4, err = rawsocket.PrepareSenderRawSocketIPv4() if err != nil { return err } + // Prepare IPv6 raw socket (optional) + p.rawConnIPv6, err = rawsocket.PrepareSenderRawSocketIPv6() + if err != nil { + log.Warnf("failed to prepare IPv6 raw socket, continuing with IPv4 only: %v", err) + } + err = p.ebpfManager.LoadWgProxy(wgPorxyPort, p.localWGListenPort) if err != nil { + if closeErr := p.rawConnIPv4.Close(); closeErr != nil { + log.Warnf("failed to close IPv4 raw socket: %v", closeErr) + } + if p.rawConnIPv6 != nil { + if closeErr := p.rawConnIPv6.Close(); closeErr != nil { + log.Warnf("failed to close IPv6 raw socket: %v", closeErr) + } + } return err } @@ -135,8 +157,16 @@ func (p *WGEBPFProxy) Free() error { result = multierror.Append(result, err) } - if err := p.rawConn.Close(); err != nil { - result = multierror.Append(result, err) + if p.rawConnIPv4 != nil { + if err := p.rawConnIPv4.Close(); err != nil { + result = multierror.Append(result, err) + } + } + + if p.rawConnIPv6 != nil { + if err := p.rawConnIPv6.Close(); err != nil { + result = multierror.Append(result, err) + } } return nberrors.FormatErrorOrNil(result) } @@ -218,31 +248,60 @@ generatePort: } func (p *WGEBPFProxy) sendPkg(data []byte, endpointAddr *net.UDPAddr) error { - payload := gopacket.Payload(data) - ipH := &layers.IPv4{ - DstIP: localHostNetIP, - SrcIP: endpointAddr.IP, - Version: 4, - TTL: 64, - Protocol: layers.IPProtocolUDP, + + var ipH gopacket.SerializableLayer + var networkLayer gopacket.NetworkLayer + var dstIP net.IP + var rawConn net.PacketConn + + if endpointAddr.IP.To4() != nil { + // IPv4 path + ipv4 := &layers.IPv4{ + DstIP: localHostNetIPv4, + SrcIP: endpointAddr.IP, + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + } + ipH = ipv4 + networkLayer = ipv4 + dstIP = localHostNetIPv4 + rawConn = p.rawConnIPv4 + } else { + // IPv6 path + if p.rawConnIPv6 == nil { + return fmt.Errorf("IPv6 raw socket not available") + } + ipv6 := &layers.IPv6{ + DstIP: localHostNetIPv6, + SrcIP: endpointAddr.IP, + Version: 6, + HopLimit: 64, + NextHeader: layers.IPProtocolUDP, + } + ipH = ipv6 + networkLayer = ipv6 + dstIP = localHostNetIPv6 + rawConn = p.rawConnIPv6 } + udpH := &layers.UDP{ SrcPort: layers.UDPPort(endpointAddr.Port), DstPort: layers.UDPPort(p.localWGListenPort), } - err := udpH.SetNetworkLayerForChecksum(ipH) - if err != nil { + if err := udpH.SetNetworkLayerForChecksum(networkLayer); err != nil { return fmt.Errorf("set network layer for checksum: %w", err) } layerBuffer := gopacket.NewSerializeBuffer() + payload := gopacket.Payload(data) - err = gopacket.SerializeLayers(layerBuffer, gopacket.SerializeOptions{ComputeChecksums: true, FixLengths: true}, ipH, udpH, payload) - if err != nil { + if err := gopacket.SerializeLayers(layerBuffer, serializeOpts, ipH, udpH, payload); err != nil { return fmt.Errorf("serialize layers: %w", err) } - if _, err = p.rawConn.WriteTo(layerBuffer.Bytes(), &net.IPAddr{IP: localHostNetIP}); err != nil { + + if _, err := rawConn.WriteTo(layerBuffer.Bytes(), &net.IPAddr{IP: dstIP}); err != nil { return fmt.Errorf("write to raw conn: %w", err) } return nil diff --git a/client/iface/wgproxy/ebpf/wrapper.go b/client/iface/wgproxy/ebpf/wrapper.go index f1f05a7c9..5b98be7b4 100644 --- a/client/iface/wgproxy/ebpf/wrapper.go +++ b/client/iface/wgproxy/ebpf/wrapper.go @@ -41,7 +41,7 @@ func NewProxyWrapper(proxy *WGEBPFProxy) *ProxyWrapper { closeListener: listener.NewCloseListener(), } } -func (p *ProxyWrapper) AddTurnConn(ctx context.Context, endpoint *net.UDPAddr, remoteConn net.Conn) error { +func (p *ProxyWrapper) AddTurnConn(ctx context.Context, _ *net.UDPAddr, remoteConn net.Conn) error { addr, err := p.wgeBPFProxy.AddTurnConn(remoteConn) if err != nil { return fmt.Errorf("add turn conn: %w", err) @@ -91,12 +91,14 @@ func (p *ProxyWrapper) Pause() { } func (p *ProxyWrapper) RedirectAs(endpoint *net.UDPAddr) { + if endpoint == nil || endpoint.IP == nil { + log.Errorf("failed to start package redirection, endpoint is nil") + return + } p.pausedCond.L.Lock() p.paused = false - if endpoint != nil && endpoint.IP != nil { - p.wgEndpointCurrentUsedAddr = endpoint - } + p.wgEndpointCurrentUsedAddr = endpoint p.pausedCond.Signal() p.pausedCond.L.Unlock() diff --git a/client/iface/wgproxy/rawsocket/rawsocket.go b/client/iface/wgproxy/rawsocket/rawsocket.go index a11ac46d5..bc785b43a 100644 --- a/client/iface/wgproxy/rawsocket/rawsocket.go +++ b/client/iface/wgproxy/rawsocket/rawsocket.go @@ -8,43 +8,87 @@ import ( "os" "syscall" + log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + nbnet "github.com/netbirdio/netbird/client/net" ) -func PrepareSenderRawSocket() (net.PacketConn, error) { +// PrepareSenderRawSocketIPv4 creates and configures a raw socket for sending IPv4 packets +func PrepareSenderRawSocketIPv4() (net.PacketConn, error) { + return prepareSenderRawSocket(syscall.AF_INET, true) +} + +// PrepareSenderRawSocketIPv6 creates and configures a raw socket for sending IPv6 packets +func PrepareSenderRawSocketIPv6() (net.PacketConn, error) { + return prepareSenderRawSocket(syscall.AF_INET6, false) +} + +func prepareSenderRawSocket(family int, isIPv4 bool) (net.PacketConn, error) { // Create a raw socket. - fd, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, syscall.IPPROTO_RAW) + fd, err := syscall.Socket(family, syscall.SOCK_RAW, syscall.IPPROTO_RAW) if err != nil { return nil, fmt.Errorf("creating raw socket failed: %w", err) } - // Set the IP_HDRINCL option on the socket to tell the kernel that headers are included in the packet. - err = syscall.SetsockoptInt(fd, syscall.IPPROTO_IP, syscall.IP_HDRINCL, 1) - if err != nil { - return nil, fmt.Errorf("setting IP_HDRINCL failed: %w", err) + // Set the header include option on the socket to tell the kernel that headers are included in the packet. + // For IPv4, we need to set IP_HDRINCL. For IPv6, we need to set IPV6_HDRINCL to accept application-provided IPv6 headers. + if isIPv4 { + err = syscall.SetsockoptInt(fd, syscall.IPPROTO_IP, unix.IP_HDRINCL, 1) + if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } + return nil, fmt.Errorf("setting IP_HDRINCL failed: %w", err) + } + } else { + err = syscall.SetsockoptInt(fd, syscall.IPPROTO_IPV6, unix.IPV6_HDRINCL, 1) + if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } + return nil, fmt.Errorf("setting IPV6_HDRINCL failed: %w", err) + } } // Bind the socket to the "lo" interface. err = syscall.SetsockoptString(fd, syscall.SOL_SOCKET, syscall.SO_BINDTODEVICE, "lo") if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } return nil, fmt.Errorf("binding to lo interface failed: %w", err) } // Set the fwmark on the socket. err = nbnet.SetSocketOpt(fd) if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } return nil, fmt.Errorf("setting fwmark failed: %w", err) } // Convert the file descriptor to a PacketConn. file := os.NewFile(uintptr(fd), fmt.Sprintf("fd %d", fd)) if file == nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } return nil, fmt.Errorf("converting fd to file failed") } packetConn, err := net.FilePacketConn(file) if err != nil { + if closeErr := file.Close(); closeErr != nil { + log.Warnf("failed to close file: %v", closeErr) + } return nil, fmt.Errorf("converting file to packet conn failed: %w", err) } + // Close the original file to release the FD (net.FilePacketConn duplicates it) + if closeErr := file.Close(); closeErr != nil { + log.Warnf("failed to close file after creating packet conn: %v", closeErr) + } + return packetConn, nil } diff --git a/client/iface/wgproxy/redirect_test.go b/client/iface/wgproxy/redirect_test.go new file mode 100644 index 000000000..b52eead25 --- /dev/null +++ b/client/iface/wgproxy/redirect_test.go @@ -0,0 +1,353 @@ +//go:build linux && !android + +package wgproxy + +import ( + "context" + "net" + "testing" + "time" + + "github.com/netbirdio/netbird/client/iface/wgproxy/ebpf" + "github.com/netbirdio/netbird/client/iface/wgproxy/udp" +) + +// compareUDPAddr compares two UDP addresses, ignoring IPv6 zone IDs +// IPv6 link-local addresses include zone IDs (e.g., fe80::1%lo) which we should ignore +func compareUDPAddr(addr1, addr2 net.Addr) bool { + udpAddr1, ok1 := addr1.(*net.UDPAddr) + udpAddr2, ok2 := addr2.(*net.UDPAddr) + + if !ok1 || !ok2 { + return addr1.String() == addr2.String() + } + + // Compare IP and Port, ignoring zone + return udpAddr1.IP.Equal(udpAddr2.IP) && udpAddr1.Port == udpAddr2.Port +} + +// TestRedirectAs_eBPF_IPv4 tests RedirectAs with eBPF proxy using IPv4 addresses +func TestRedirectAs_eBPF_IPv4(t *testing.T) { + wgPort := 51850 + ebpfProxy := ebpf.NewWGEBPFProxy(wgPort, 1280) + if err := ebpfProxy.Listen(); err != nil { + t.Fatalf("failed to initialize ebpf proxy: %v", err) + } + defer func() { + if err := ebpfProxy.Free(); err != nil { + t.Errorf("failed to free ebpf proxy: %v", err) + } + }() + + proxy := ebpf.NewProxyWrapper(ebpfProxy) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("192.168.0.56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// TestRedirectAs_eBPF_IPv6 tests RedirectAs with eBPF proxy using IPv6 addresses +func TestRedirectAs_eBPF_IPv6(t *testing.T) { + wgPort := 51851 + ebpfProxy := ebpf.NewWGEBPFProxy(wgPort, 1280) + if err := ebpfProxy.Listen(); err != nil { + t.Fatalf("failed to initialize ebpf proxy: %v", err) + } + defer func() { + if err := ebpfProxy.Free(); err != nil { + t.Errorf("failed to free ebpf proxy: %v", err) + } + }() + + proxy := ebpf.NewProxyWrapper(ebpfProxy) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("fe80::56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// TestRedirectAs_UDP_IPv4 tests RedirectAs with UDP proxy using IPv4 addresses +func TestRedirectAs_UDP_IPv4(t *testing.T) { + wgPort := 51852 + proxy := udp.NewWGUDPProxy(wgPort, 1280) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("192.168.0.56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// TestRedirectAs_UDP_IPv6 tests RedirectAs with UDP proxy using IPv6 addresses +func TestRedirectAs_UDP_IPv6(t *testing.T) { + wgPort := 51853 + proxy := udp.NewWGUDPProxy(wgPort, 1280) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("fe80::56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// testRedirectAs is a helper function that tests the RedirectAs functionality +// It verifies that: +// 1. Initial traffic from relay connection works +// 2. After calling RedirectAs, packets appear to come from the p2p endpoint +// 3. Multiple packets are correctly redirected with the new source address +func testRedirectAs(t *testing.T, proxy Proxy, wgPort int, nbAddr, p2pEndpoint *net.UDPAddr) { + t.Helper() + + ctx := context.Background() + + // Create WireGuard listeners on both IPv4 and IPv6 to support both P2P connection types + // In reality, WireGuard binds to a port and receives from both IPv4 and IPv6 + wgListener4, err := net.ListenUDP("udp4", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: wgPort, + }) + if err != nil { + t.Fatalf("failed to create IPv4 WireGuard listener: %v", err) + } + defer wgListener4.Close() + + wgListener6, err := net.ListenUDP("udp6", &net.UDPAddr{ + IP: net.ParseIP("::1"), + Port: wgPort, + }) + if err != nil { + t.Fatalf("failed to create IPv6 WireGuard listener: %v", err) + } + defer wgListener6.Close() + + // Determine which listener to use based on the NetBird address IP version + // (this is where initial traffic will come from before RedirectAs is called) + var wgListener *net.UDPConn + if p2pEndpoint.IP.To4() == nil { + wgListener = wgListener6 + } else { + wgListener = wgListener4 + } + + // Create relay server and connection + relayServer, err := net.ListenUDP("udp", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 0, // Random port + }) + if err != nil { + t.Fatalf("failed to create relay server: %v", err) + } + defer relayServer.Close() + + relayConn, err := net.Dial("udp", relayServer.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to create relay connection: %v", err) + } + defer relayConn.Close() + + // Add TURN connection to proxy + if err := proxy.AddTurnConn(ctx, nbAddr, relayConn); err != nil { + t.Fatalf("failed to add TURN connection: %v", err) + } + defer func() { + if err := proxy.CloseConn(); err != nil { + t.Errorf("failed to close proxy connection: %v", err) + } + }() + + // Start the proxy + proxy.Work() + + // Phase 1: Test initial relay traffic + msgFromRelay := []byte("hello from relay") + if _, err := relayServer.WriteTo(msgFromRelay, relayConn.LocalAddr()); err != nil { + t.Fatalf("failed to write to relay server: %v", err) + } + + // Set read deadline to avoid hanging + if err := wgListener4.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatalf("failed to set read deadline: %v", err) + } + + buf := make([]byte, 1024) + n, _, err := wgListener4.ReadFrom(buf) + if err != nil { + t.Fatalf("failed to read from WireGuard listener: %v", err) + } + + if n != len(msgFromRelay) { + t.Errorf("expected %d bytes, got %d", len(msgFromRelay), n) + } + + if string(buf[:n]) != string(msgFromRelay) { + t.Errorf("expected message %q, got %q", msgFromRelay, buf[:n]) + } + + // Phase 2: Redirect to p2p endpoint + proxy.RedirectAs(p2pEndpoint) + + // Give the proxy a moment to process the redirect + time.Sleep(100 * time.Millisecond) + + // Phase 3: Test redirected traffic + redirectedMessages := [][]byte{ + []byte("redirected message 1"), + []byte("redirected message 2"), + []byte("redirected message 3"), + } + + for i, msg := range redirectedMessages { + if _, err := relayServer.WriteTo(msg, relayConn.LocalAddr()); err != nil { + t.Fatalf("failed to write redirected message %d: %v", i+1, err) + } + + if err := wgListener.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatalf("failed to set read deadline: %v", err) + } + + n, srcAddr, err := wgListener.ReadFrom(buf) + if err != nil { + t.Fatalf("failed to read redirected message %d: %v", i+1, err) + } + + // Verify message content + if string(buf[:n]) != string(msg) { + t.Errorf("message %d: expected %q, got %q", i+1, msg, buf[:n]) + } + + // Verify source address matches p2p endpoint (this is the key test) + // Use compareUDPAddr to ignore IPv6 zone IDs + if !compareUDPAddr(srcAddr, p2pEndpoint) { + t.Errorf("message %d: expected source address %s, got %s", + i+1, p2pEndpoint.String(), srcAddr.String()) + } + } +} + +// TestRedirectAs_Multiple_Switches tests switching between multiple endpoints +func TestRedirectAs_Multiple_Switches(t *testing.T) { + wgPort := 51856 + ebpfProxy := ebpf.NewWGEBPFProxy(wgPort, 1280) + if err := ebpfProxy.Listen(); err != nil { + t.Fatalf("failed to initialize ebpf proxy: %v", err) + } + defer func() { + if err := ebpfProxy.Free(); err != nil { + t.Errorf("failed to free ebpf proxy: %v", err) + } + }() + + proxy := ebpf.NewProxyWrapper(ebpfProxy) + + ctx := context.Background() + + // Create WireGuard listener + wgListener, err := net.ListenUDP("udp4", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: wgPort, + }) + if err != nil { + t.Fatalf("failed to create WireGuard listener: %v", err) + } + defer wgListener.Close() + + // Create relay server and connection + relayServer, err := net.ListenUDP("udp", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 0, + }) + if err != nil { + t.Fatalf("failed to create relay server: %v", err) + } + defer relayServer.Close() + + relayConn, err := net.Dial("udp", relayServer.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to create relay connection: %v", err) + } + defer relayConn.Close() + + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + if err := proxy.AddTurnConn(ctx, nbAddr, relayConn); err != nil { + t.Fatalf("failed to add TURN connection: %v", err) + } + defer func() { + if err := proxy.CloseConn(); err != nil { + t.Errorf("failed to close proxy connection: %v", err) + } + }() + + proxy.Work() + + // Test switching between multiple endpoints - using addresses in local subnet + endpoints := []*net.UDPAddr{ + {IP: net.ParseIP("192.168.0.100"), Port: 51820}, + {IP: net.ParseIP("192.168.0.101"), Port: 51821}, + {IP: net.ParseIP("192.168.0.102"), Port: 51822}, + } + + for i, endpoint := range endpoints { + proxy.RedirectAs(endpoint) + time.Sleep(100 * time.Millisecond) + + msg := []byte("test message") + if _, err := relayServer.WriteTo(msg, relayConn.LocalAddr()); err != nil { + t.Fatalf("failed to write message for endpoint %d: %v", i, err) + } + + buf := make([]byte, 1024) + if err := wgListener.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatalf("failed to set read deadline: %v", err) + } + + n, srcAddr, err := wgListener.ReadFrom(buf) + if err != nil { + t.Fatalf("failed to read message for endpoint %d: %v", i, err) + } + + if string(buf[:n]) != string(msg) { + t.Errorf("endpoint %d: expected message %q, got %q", i, msg, buf[:n]) + } + + if !compareUDPAddr(srcAddr, endpoint) { + t.Errorf("endpoint %d: expected source %s, got %s", + i, endpoint.String(), srcAddr.String()) + } + } +} diff --git a/client/iface/wgproxy/udp/proxy.go b/client/iface/wgproxy/udp/proxy.go index 4ef2f19c4..6069d1960 100644 --- a/client/iface/wgproxy/udp/proxy.go +++ b/client/iface/wgproxy/udp/proxy.go @@ -56,7 +56,7 @@ func NewWGUDPProxy(wgPort int, mtu uint16) *WGUDPProxy { // the connection is complete, an error is returned. Once successfully // connected, any expiration of the context will not affect the // connection. -func (p *WGUDPProxy) AddTurnConn(ctx context.Context, endpoint *net.UDPAddr, remoteConn net.Conn) error { +func (p *WGUDPProxy) AddTurnConn(ctx context.Context, _ *net.UDPAddr, remoteConn net.Conn) error { dialer := net.Dialer{} localConn, err := dialer.DialContext(ctx, "udp", fmt.Sprintf(":%d", p.localWGListenPort)) if err != nil { diff --git a/client/iface/wgproxy/udp/rawsocket.go b/client/iface/wgproxy/udp/rawsocket.go index fdc911463..cc099d9df 100644 --- a/client/iface/wgproxy/udp/rawsocket.go +++ b/client/iface/wgproxy/udp/rawsocket.go @@ -19,37 +19,56 @@ var ( FixLengths: true, } - localHostNetIPAddr = &net.IPAddr{ + localHostNetIPAddrV4 = &net.IPAddr{ IP: net.ParseIP("127.0.0.1"), } + localHostNetIPAddrV6 = &net.IPAddr{ + IP: net.ParseIP("::1"), + } ) type SrcFaker struct { srcAddr *net.UDPAddr - rawSocket net.PacketConn - ipH gopacket.SerializableLayer - udpH gopacket.SerializableLayer - layerBuffer gopacket.SerializeBuffer + rawSocket net.PacketConn + ipH gopacket.SerializableLayer + udpH gopacket.SerializableLayer + layerBuffer gopacket.SerializeBuffer + localHostAddr *net.IPAddr } func NewSrcFaker(dstPort int, srcAddr *net.UDPAddr) (*SrcFaker, error) { - rawSocket, err := rawsocket.PrepareSenderRawSocket() + // Create only the raw socket for the address family we need + var rawSocket net.PacketConn + var err error + var localHostAddr *net.IPAddr + + if srcAddr.IP.To4() != nil { + rawSocket, err = rawsocket.PrepareSenderRawSocketIPv4() + localHostAddr = localHostNetIPAddrV4 + } else { + rawSocket, err = rawsocket.PrepareSenderRawSocketIPv6() + localHostAddr = localHostNetIPAddrV6 + } if err != nil { return nil, err } ipH, udpH, err := prepareHeaders(dstPort, srcAddr) if err != nil { + if closeErr := rawSocket.Close(); closeErr != nil { + log.Warnf("failed to close raw socket: %v", closeErr) + } return nil, err } f := &SrcFaker{ - srcAddr: srcAddr, - rawSocket: rawSocket, - ipH: ipH, - udpH: udpH, - layerBuffer: gopacket.NewSerializeBuffer(), + srcAddr: srcAddr, + rawSocket: rawSocket, + ipH: ipH, + udpH: udpH, + layerBuffer: gopacket.NewSerializeBuffer(), + localHostAddr: localHostAddr, } return f, nil @@ -72,7 +91,7 @@ func (f *SrcFaker) SendPkg(data []byte) (int, error) { if err != nil { return 0, fmt.Errorf("serialize layers: %w", err) } - n, err := f.rawSocket.WriteTo(f.layerBuffer.Bytes(), localHostNetIPAddr) + n, err := f.rawSocket.WriteTo(f.layerBuffer.Bytes(), f.localHostAddr) if err != nil { return 0, fmt.Errorf("write to raw conn: %w", err) } @@ -80,19 +99,40 @@ func (f *SrcFaker) SendPkg(data []byte) (int, error) { } func prepareHeaders(dstPort int, srcAddr *net.UDPAddr) (gopacket.SerializableLayer, gopacket.SerializableLayer, error) { - ipH := &layers.IPv4{ - DstIP: net.ParseIP("127.0.0.1"), - SrcIP: srcAddr.IP, - Version: 4, - TTL: 64, - Protocol: layers.IPProtocolUDP, + var ipH gopacket.SerializableLayer + var networkLayer gopacket.NetworkLayer + + // Check if source IP is IPv4 or IPv6 + if srcAddr.IP.To4() != nil { + // IPv4 + ipv4 := &layers.IPv4{ + DstIP: localHostNetIPAddrV4.IP, + SrcIP: srcAddr.IP, + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + } + ipH = ipv4 + networkLayer = ipv4 + } else { + // IPv6 + ipv6 := &layers.IPv6{ + DstIP: localHostNetIPAddrV6.IP, + SrcIP: srcAddr.IP, + Version: 6, + HopLimit: 64, + NextHeader: layers.IPProtocolUDP, + } + ipH = ipv6 + networkLayer = ipv6 } + udpH := &layers.UDP{ SrcPort: layers.UDPPort(srcAddr.Port), DstPort: layers.UDPPort(dstPort), // dst is the localhost WireGuard port } - err := udpH.SetNetworkLayerForChecksum(ipH) + err := udpH.SetNetworkLayerForChecksum(networkLayer) if err != nil { return nil, nil, fmt.Errorf("set network layer for checksum: %w", err) } From 11f50d6c38033bcfbaf9238b58c755f56216364d Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Mon, 26 Jan 2026 22:26:29 +0100 Subject: [PATCH 082/374] Include default groups claim in CLI audience (#5186) --- management/server/idp/embedded.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 79859525b..db7a91fa3 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -20,7 +20,7 @@ const ( staticClientCLI = "netbird-cli" defaultCLIRedirectURL1 = "http://localhost:53000/" defaultCLIRedirectURL2 = "http://localhost:54000/" - defaultScopes = "openid profile email" + defaultScopes = "openid profile email groups" defaultUserIDClaim = "sub" ) From 44ab454a13712c2b422e95a51c728001cc88c9bd Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 26 Jan 2026 23:15:34 +0100 Subject: [PATCH 083/374] [management] Fix peer deletion error handling (#5188) When a deleted peer tries to reconnect, GetUserIDByPeerKey was returning Internal error instead of NotFound, causing clients to retry indefinitely instead of recognizing the unrecoverable PermissionDenied error. This fix: 1. Updates GetUserIDByPeerKey to properly return NotFound when peer doesn't exist 2. Updates Sync handler to convert NotFound to PermissionDenied with message 'peer is not registered', matching the behavior of GetAccountIDForPeerKey Fixes the regression introduced in v0.61.1 where deleted peers would see: - Before: 'rpc error: code = Internal desc = failed handling request' (retry loop) - After: 'rpc error: code = PermissionDenied desc = peer is not registered' (exits) --- management/internals/shared/grpc/server.go | 3 +++ management/server/store/sql_store.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 1ff0243f4..32049d044 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -232,6 +232,9 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S userID, err := s.accountManager.GetUserIDByPeerKey(ctx, peerKey.String()) if err != nil { s.syncSem.Add(-1) + if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound { + return status.Errorf(codes.PermissionDenied, "peer is not registered") + } return mapError(ctx, err) } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 0eb687dbb..4fe800636 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -4269,6 +4269,9 @@ func (s *SqlStore) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingS Take(&userID, GetKeyQueryCondition(s), peerKey) if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return "", status.Errorf(status.NotFound, "peer not found: index lookup failed") + } return "", status.Errorf(status.Internal, "failed to get user ID by peer key") } From 7d791620a624b8fd81f8b8e8aecf6fcf888e57c1 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 27 Jan 2026 09:42:20 +0100 Subject: [PATCH 084/374] Add user invite link feature for embedded IdP (#5157) --- management/server/account/manager.go | 6 + management/server/activity/codes.go | 10 + management/server/http/handler.go | 10 + .../handlers/instance/instance_handler.go | 35 + .../instance/instance_handler_test.go | 54 + .../http/handlers/users/invites_handler.go | 263 +++++ .../handlers/users/invites_handler_test.go | 642 +++++++++++ .../server/http/middleware/rate_limiter.go | 26 + .../http/middleware/rate_limiter_test.go | 158 +++ management/server/instance/manager.go | 175 +++ management/server/instance/version_test.go | 285 +++++ management/server/mock_server/account_mock.go | 48 + management/server/store/sql_store.go | 126 +- .../store/sql_store_user_invite_test.go | 520 +++++++++ management/server/store/store.go | 7 + management/server/types/user_invite.go | 201 ++++ management/server/types/user_invite_test.go | 355 ++++++ management/server/user.go | 366 ++++++ management/server/user_invite_test.go | 1010 +++++++++++++++++ shared/management/http/api/openapi.yml | 416 ++++++- shared/management/http/api/types.gen.go | 121 ++ 21 files changed, 4832 insertions(+), 2 deletions(-) create mode 100644 management/server/http/handlers/users/invites_handler.go create mode 100644 management/server/http/handlers/users/invites_handler_test.go create mode 100644 management/server/http/middleware/rate_limiter_test.go create mode 100644 management/server/instance/version_test.go create mode 100644 management/server/store/sql_store_user_invite_test.go create mode 100644 management/server/types/user_invite.go create mode 100644 management/server/types/user_invite_test.go create mode 100644 management/server/user_invite_test.go diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 11af67358..5e9bb42a2 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -30,6 +30,12 @@ type Manager interface { autoGroups []string, usageLimit int, userID string, ephemeral bool, allowExtraDNSLabels bool) (*types.SetupKey, error) SaveSetupKey(ctx context.Context, accountID string, key *types.SetupKey, userID string) (*types.SetupKey, error) CreateUser(ctx context.Context, accountID, initiatorUserID string, key *types.UserInfo) (*types.UserInfo, error) + CreateUserInvite(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) + AcceptUserInvite(ctx context.Context, token, password string) error + RegenerateUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) + GetUserInviteInfo(ctx context.Context, token string) (*types.UserInviteInfo, error) + ListUserInvites(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) + DeleteUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string) error DeleteUser(ctx context.Context, accountID, initiatorUserID string, targetUserID string) error DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index e9eaa644b..e83eeb90a 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -199,6 +199,11 @@ const ( UserPasswordChanged Activity = 103 + UserInviteLinkCreated Activity = 104 + UserInviteLinkAccepted Activity = 105 + UserInviteLinkRegenerated Activity = 106 + UserInviteLinkDeleted Activity = 107 + AccountDeleted Activity = 99999 ) @@ -327,6 +332,11 @@ var activityMap = map[Activity]Code{ JobCreatedByUser: {"Create Job for peer", "peer.job.create"}, UserPasswordChanged: {"User password changed", "user.password.change"}, + + UserInviteLinkCreated: {"User invite link created", "user.invite.link.create"}, + UserInviteLinkAccepted: {"User invite link accepted", "user.invite.link.accept"}, + UserInviteLinkRegenerated: {"User invite link regenerated", "user.invite.link.regenerate"}, + UserInviteLinkDeleted: {"User invite link deleted", "user.invite.link.delete"}, } // StringCode returns a string code of the activity diff --git a/management/server/http/handler.go b/management/server/http/handler.go index 64f914afe..32a97ff44 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -68,6 +68,13 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks if err := bypass.AddBypassPath("/api/setup"); err != nil { return nil, fmt.Errorf("failed to add bypass path: %w", err) } + // Public invite endpoints (tokens start with nbi_) + if err := bypass.AddBypassPath("/api/users/invites/nbi_*"); err != nil { + return nil, fmt.Errorf("failed to add bypass path: %w", err) + } + if err := bypass.AddBypassPath("/api/users/invites/nbi_*/accept"); err != nil { + return nil, fmt.Errorf("failed to add bypass path: %w", err) + } var rateLimitingConfig *middleware.RateLimiterConfig if os.Getenv(rateLimitingEnabledKey) == "true" { @@ -132,6 +139,8 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks accounts.AddEndpoints(accountManager, settingsManager, embeddedIdpEnabled, router) peers.AddEndpoints(accountManager, router, networkMapController) users.AddEndpoints(accountManager, router) + users.AddInvitesEndpoints(accountManager, router) + users.AddPublicInvitesEndpoints(accountManager, router) setup_keys.AddEndpoints(accountManager, router) policies.AddEndpoints(accountManager, LocationManager, router) policies.AddPostureCheckEndpoints(accountManager, LocationManager, router) @@ -145,6 +154,7 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks recordsManager.RegisterEndpoints(router, rManager) idp.AddEndpoints(accountManager, router) instance.AddEndpoints(instanceManager, router) + instance.AddVersionEndpoint(instanceManager, router) // Mount embedded IdP handler at /oauth2 path if configured if embeddedIdpEnabled { diff --git a/management/server/http/handlers/instance/instance_handler.go b/management/server/http/handlers/instance/instance_handler.go index 889c3133e..5d8baaf8d 100644 --- a/management/server/http/handlers/instance/instance_handler.go +++ b/management/server/http/handlers/instance/instance_handler.go @@ -28,6 +28,15 @@ func AddEndpoints(instanceManager nbinstance.Manager, router *mux.Router) { router.HandleFunc("/setup", h.setup).Methods("POST", "OPTIONS") } +// AddVersionEndpoint registers the authenticated version endpoint. +func AddVersionEndpoint(instanceManager nbinstance.Manager, router *mux.Router) { + h := &handler{ + instanceManager: instanceManager, + } + + router.HandleFunc("/instance/version", h.getVersionInfo).Methods("GET", "OPTIONS") +} + // getInstanceStatus returns the instance status including whether setup is required. // This endpoint is unauthenticated. func (h *handler) getInstanceStatus(w http.ResponseWriter, r *http.Request) { @@ -65,3 +74,29 @@ func (h *handler) setup(w http.ResponseWriter, r *http.Request) { Email: userData.Email, }) } + +// getVersionInfo returns version information for NetBird components. +// This endpoint requires authentication. +func (h *handler) getVersionInfo(w http.ResponseWriter, r *http.Request) { + versionInfo, err := h.instanceManager.GetVersionInfo(r.Context()) + if err != nil { + log.WithContext(r.Context()).Errorf("failed to get version info: %v", err) + util.WriteErrorResponse("failed to get version info", http.StatusInternalServerError, w) + return + } + + resp := api.InstanceVersionInfo{ + ManagementCurrentVersion: versionInfo.CurrentVersion, + ManagementUpdateAvailable: versionInfo.ManagementUpdateAvailable, + } + + if versionInfo.DashboardVersion != "" { + resp.DashboardAvailableVersion = &versionInfo.DashboardVersion + } + + if versionInfo.ManagementVersion != "" { + resp.ManagementAvailableVersion = &versionInfo.ManagementVersion + } + + util.WriteJSONObject(r.Context(), w, resp) +} diff --git a/management/server/http/handlers/instance/instance_handler_test.go b/management/server/http/handlers/instance/instance_handler_test.go index 7a3a2bc88..470079c85 100644 --- a/management/server/http/handlers/instance/instance_handler_test.go +++ b/management/server/http/handlers/instance/instance_handler_test.go @@ -25,6 +25,7 @@ type mockInstanceManager struct { isSetupRequired bool isSetupRequiredFn func(ctx context.Context) (bool, error) createOwnerUserFn func(ctx context.Context, email, password, name string) (*idp.UserData, error) + getVersionInfoFn func(ctx context.Context) (*nbinstance.VersionInfo, error) } func (m *mockInstanceManager) IsSetupRequired(ctx context.Context) (bool, error) { @@ -66,6 +67,18 @@ func (m *mockInstanceManager) CreateOwnerUser(ctx context.Context, email, passwo }, nil } +func (m *mockInstanceManager) GetVersionInfo(ctx context.Context) (*nbinstance.VersionInfo, error) { + if m.getVersionInfoFn != nil { + return m.getVersionInfoFn(ctx) + } + return &nbinstance.VersionInfo{ + CurrentVersion: "0.34.0", + DashboardVersion: "2.0.0", + ManagementVersion: "0.35.0", + ManagementUpdateAvailable: true, + }, nil +} + var _ nbinstance.Manager = (*mockInstanceManager)(nil) func setupTestRouter(manager nbinstance.Manager) *mux.Router { @@ -279,3 +292,44 @@ func TestSetup_ManagerError(t *testing.T) { assert.Equal(t, http.StatusInternalServerError, rec.Code) } + +func TestGetVersionInfo_Success(t *testing.T) { + manager := &mockInstanceManager{} + router := mux.NewRouter() + AddVersionEndpoint(manager, router) + + req := httptest.NewRequest(http.MethodGet, "/instance/version", nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var response api.InstanceVersionInfo + err := json.NewDecoder(rec.Body).Decode(&response) + require.NoError(t, err) + + assert.Equal(t, "0.34.0", response.ManagementCurrentVersion) + assert.NotNil(t, response.DashboardAvailableVersion) + assert.Equal(t, "2.0.0", *response.DashboardAvailableVersion) + assert.NotNil(t, response.ManagementAvailableVersion) + assert.Equal(t, "0.35.0", *response.ManagementAvailableVersion) + assert.True(t, response.ManagementUpdateAvailable) +} + +func TestGetVersionInfo_Error(t *testing.T) { + manager := &mockInstanceManager{ + getVersionInfoFn: func(ctx context.Context) (*nbinstance.VersionInfo, error) { + return nil, errors.New("failed to fetch versions") + }, + } + router := mux.NewRouter() + AddVersionEndpoint(manager, router) + + req := httptest.NewRequest(http.MethodGet, "/instance/version", nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.Code) +} diff --git a/management/server/http/handlers/users/invites_handler.go b/management/server/http/handlers/users/invites_handler.go new file mode 100644 index 000000000..0f0f57c29 --- /dev/null +++ b/management/server/http/handlers/users/invites_handler.go @@ -0,0 +1,263 @@ +package users + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "time" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/server/account" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/management/server/http/middleware" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" + "github.com/netbirdio/netbird/shared/management/status" +) + +// publicInviteRateLimiter limits public invite requests by IP address to prevent brute-force attacks +var publicInviteRateLimiter = middleware.NewAPIRateLimiter(&middleware.RateLimiterConfig{ + RequestsPerMinute: 10, // 10 attempts per minute per IP + Burst: 5, // Allow burst of 5 requests + CleanupInterval: 10 * time.Minute, + LimiterTTL: 30 * time.Minute, +}) + +// toUserInviteResponse converts a UserInvite to an API response. +func toUserInviteResponse(invite *types.UserInvite) api.UserInvite { + autoGroups := invite.UserInfo.AutoGroups + if autoGroups == nil { + autoGroups = []string{} + } + var inviteLink *string + if invite.InviteToken != "" { + inviteLink = &invite.InviteToken + } + return api.UserInvite{ + Id: invite.UserInfo.ID, + Email: invite.UserInfo.Email, + Name: invite.UserInfo.Name, + Role: invite.UserInfo.Role, + AutoGroups: autoGroups, + ExpiresAt: invite.InviteExpiresAt.UTC(), + CreatedAt: invite.InviteCreatedAt.UTC(), + Expired: time.Now().After(invite.InviteExpiresAt), + InviteToken: inviteLink, + } +} + +// invitesHandler handles user invite operations +type invitesHandler struct { + accountManager account.Manager +} + +// AddInvitesEndpoints registers invite-related endpoints +func AddInvitesEndpoints(accountManager account.Manager, router *mux.Router) { + h := &invitesHandler{accountManager: accountManager} + + // Authenticated endpoints (require admin) + router.HandleFunc("/users/invites", h.listInvites).Methods("GET", "OPTIONS") + router.HandleFunc("/users/invites", h.createInvite).Methods("POST", "OPTIONS") + router.HandleFunc("/users/invites/{inviteId}", h.deleteInvite).Methods("DELETE", "OPTIONS") + router.HandleFunc("/users/invites/{inviteId}/regenerate", h.regenerateInvite).Methods("POST", "OPTIONS") +} + +// AddPublicInvitesEndpoints registers public (unauthenticated) invite endpoints with rate limiting +func AddPublicInvitesEndpoints(accountManager account.Manager, router *mux.Router) { + h := &invitesHandler{accountManager: accountManager} + + // Create a subrouter for public invite endpoints with rate limiting middleware + publicRouter := router.PathPrefix("/users/invites").Subrouter() + publicRouter.Use(publicInviteRateLimiter.Middleware) + + // Public endpoints (no auth required, protected by token and rate limited) + publicRouter.HandleFunc("/{token}", h.getInviteInfo).Methods("GET", "OPTIONS") + publicRouter.HandleFunc("/{token}/accept", h.acceptInvite).Methods("POST", "OPTIONS") +} + +// listInvites handles GET /api/users/invites +func (h *invitesHandler) listInvites(w http.ResponseWriter, r *http.Request) { + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + invites, err := h.accountManager.ListUserInvites(r.Context(), userAuth.AccountId, userAuth.UserId) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + resp := make([]api.UserInvite, 0, len(invites)) + for _, invite := range invites { + resp = append(resp, toUserInviteResponse(invite)) + } + + util.WriteJSONObject(r.Context(), w, resp) +} + +// createInvite handles POST /api/users/invites +func (h *invitesHandler) createInvite(w http.ResponseWriter, r *http.Request) { + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var req api.UserInviteCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + invite := &types.UserInfo{ + Email: req.Email, + Name: req.Name, + Role: req.Role, + AutoGroups: req.AutoGroups, + } + + expiresIn := 0 + if req.ExpiresIn != nil { + expiresIn = *req.ExpiresIn + } + + result, err := h.accountManager.CreateUserInvite(r.Context(), userAuth.AccountId, userAuth.UserId, invite, expiresIn) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + result.InviteCreatedAt = time.Now().UTC() + resp := toUserInviteResponse(result) + util.WriteJSONObject(r.Context(), w, &resp) +} + +// getInviteInfo handles GET /api/users/invites/{token} +func (h *invitesHandler) getInviteInfo(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + token := vars["token"] + if token == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "token is required"), w) + return + } + + info, err := h.accountManager.GetUserInviteInfo(r.Context(), token) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + expiresAt := info.ExpiresAt.UTC() + util.WriteJSONObject(r.Context(), w, &api.UserInviteInfo{ + Email: info.Email, + Name: info.Name, + ExpiresAt: expiresAt, + Valid: info.Valid, + InvitedBy: info.InvitedBy, + }) +} + +// acceptInvite handles POST /api/users/invites/{token}/accept +func (h *invitesHandler) acceptInvite(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + token := vars["token"] + if token == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "token is required"), w) + return + } + + var req api.UserInviteAcceptRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + err := h.accountManager.AcceptUserInvite(r.Context(), token, req.Password) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, &api.UserInviteAcceptResponse{Success: true}) +} + +// regenerateInvite handles POST /api/users/invites/{inviteId}/regenerate +func (h *invitesHandler) regenerateInvite(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + util.WriteErrorResponse("wrong HTTP method", http.StatusMethodNotAllowed, w) + return + } + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + vars := mux.Vars(r) + inviteID := vars["inviteId"] + if inviteID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "invite ID is required"), w) + return + } + + var req api.UserInviteRegenerateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + // Allow empty body (io.EOF) - expiresIn is optional + if !errors.Is(err, io.EOF) { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + } + + expiresIn := 0 + if req.ExpiresIn != nil { + expiresIn = *req.ExpiresIn + } + + result, err := h.accountManager.RegenerateUserInvite(r.Context(), userAuth.AccountId, userAuth.UserId, inviteID, expiresIn) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + expiresAt := result.InviteExpiresAt.UTC() + util.WriteJSONObject(r.Context(), w, &api.UserInviteRegenerateResponse{ + InviteToken: result.InviteToken, + InviteExpiresAt: expiresAt, + }) +} + +// deleteInvite handles DELETE /api/users/invites/{inviteId} +func (h *invitesHandler) deleteInvite(w http.ResponseWriter, r *http.Request) { + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + vars := mux.Vars(r) + inviteID := vars["inviteId"] + if inviteID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "invite ID is required"), w) + return + } + + err = h.accountManager.DeleteUserInvite(r.Context(), userAuth.AccountId, userAuth.UserId, inviteID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} diff --git a/management/server/http/handlers/users/invites_handler_test.go b/management/server/http/handlers/users/invites_handler_test.go new file mode 100644 index 000000000..80826b9d4 --- /dev/null +++ b/management/server/http/handlers/users/invites_handler_test.go @@ -0,0 +1,642 @@ +package users + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/management/server/mock_server" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/auth" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/status" +) + +const ( + testAccountID = "test-account-id" + testUserID = "test-user-id" + testInviteID = "test-invite-id" + testInviteToken = "nbi_testtoken123456789012345678" + testEmail = "invite@example.com" + testName = "Test User" +) + +func setupInvitesTestHandler(am *mock_server.MockAccountManager) *invitesHandler { + return &invitesHandler{ + accountManager: am, + } +} + +func TestListInvites(t *testing.T) { + now := time.Now().UTC() + testInvites := []*types.UserInvite{ + { + UserInfo: &types.UserInfo{ + ID: "invite-1", + Email: "user1@example.com", + Name: "User One", + Role: "user", + AutoGroups: []string{"group-1"}, + }, + InviteExpiresAt: now.Add(24 * time.Hour), + InviteCreatedAt: now, + }, + { + UserInfo: &types.UserInfo{ + ID: "invite-2", + Email: "user2@example.com", + Name: "User Two", + Role: "admin", + AutoGroups: nil, + }, + InviteExpiresAt: now.Add(-1 * time.Hour), // Expired + InviteCreatedAt: now.Add(-48 * time.Hour), + }, + } + + tt := []struct { + name string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) + expectedCount int + }{ + { + name: "successful list", + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + return testInvites, nil + }, + expectedCount: 2, + }, + { + name: "empty list", + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + return []*types.UserInvite{}, nil + }, + expectedCount: 0, + }, + { + name: "permission denied", + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + return nil, status.NewPermissionDeniedError() + }, + expectedCount: 0, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + ListUserInvitesFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodGet, "/api/users/invites", nil) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + rr := httptest.NewRecorder() + handler.listInvites(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp []api.UserInvite + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.Len(t, resp, tc.expectedCount) + } + }) + } +} + +func TestCreateInvite(t *testing.T) { + now := time.Now().UTC() + expiresAt := now.Add(72 * time.Hour) + + tt := []struct { + name string + requestBody string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) + }{ + { + name: "successful create", + requestBody: `{"email":"test@example.com","name":"Test User","role":"user","auto_groups":["group-1"]}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: testInviteID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: invite.AutoGroups, + Status: string(types.UserStatusInvited), + }, + InviteToken: testInviteToken, + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "successful create with custom expiration", + requestBody: `{"email":"test@example.com","name":"Test User","role":"admin","auto_groups":[],"expires_in":3600}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + assert.Equal(t, 3600, expiresIn) + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: testInviteID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: []string{}, + Status: string(types.UserStatusInvited), + }, + InviteToken: testInviteToken, + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "user already exists", + requestBody: `{"email":"existing@example.com","name":"Existing User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusConflict, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.UserAlreadyExists, "user with this email already exists") + }, + }, + { + name: "invite already exists", + requestBody: `{"email":"invited@example.com","name":"Invited User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusConflict, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.AlreadyExists, "invite already exists for this email") + }, + }, + { + name: "permission denied", + requestBody: `{"email":"test@example.com","name":"Test User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.NewPermissionDeniedError() + }, + }, + { + name: "embedded IDP not enabled", + requestBody: `{"email":"test@example.com","name":"Test User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + }, + }, + { + name: "invalid JSON", + requestBody: `{invalid json}`, + expectedStatus: http.StatusBadRequest, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + CreateUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodPost, "/api/users/invites", bytes.NewBufferString(tc.requestBody)) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + rr := httptest.NewRecorder() + handler.createInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInvite + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.Equal(t, testInviteID, resp.Id) + assert.NotNil(t, resp.InviteToken) + assert.NotEmpty(t, *resp.InviteToken) + } + }) + } +} + +func TestGetInviteInfo(t *testing.T) { + now := time.Now().UTC() + + tt := []struct { + name string + token string + expectedStatus int + mockFunc func(ctx context.Context, token string) (*types.UserInviteInfo, error) + }{ + { + name: "successful get valid invite", + token: testInviteToken, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return &types.UserInviteInfo{ + Email: testEmail, + Name: testName, + ExpiresAt: now.Add(24 * time.Hour), + Valid: true, + InvitedBy: "Admin User", + }, nil + }, + }, + { + name: "successful get expired invite", + token: testInviteToken, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return &types.UserInviteInfo{ + Email: testEmail, + Name: testName, + ExpiresAt: now.Add(-24 * time.Hour), + Valid: false, + InvitedBy: "Admin User", + }, nil + }, + }, + { + name: "invite not found", + token: "nbi_invalidtoken1234567890123456", + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return nil, status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "invalid token format", + token: "invalid", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return nil, status.Errorf(status.InvalidArgument, "invalid invite token") + }, + }, + { + name: "missing token", + token: "", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + GetUserInviteInfoFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodGet, "/api/users/invites/"+tc.token, nil) + if tc.token != "" { + req = mux.SetURLVars(req, map[string]string{"token": tc.token}) + } + + rr := httptest.NewRecorder() + handler.getInviteInfo(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInviteInfo + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.Equal(t, testEmail, resp.Email) + assert.Equal(t, testName, resp.Name) + } + }) + } +} + +func TestAcceptInvite(t *testing.T) { + tt := []struct { + name string + token string + requestBody string + expectedStatus int + mockFunc func(ctx context.Context, token, password string) error + }{ + { + name: "successful accept", + token: testInviteToken, + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, token, password string) error { + return nil + }, + }, + { + name: "invite not found", + token: "nbi_invalidtoken1234567890123456", + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "invite expired", + token: testInviteToken, + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "invite has expired") + }, + }, + { + name: "embedded IDP not enabled", + token: testInviteToken, + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + }, + }, + { + name: "missing token", + token: "", + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + { + name: "invalid JSON", + token: testInviteToken, + requestBody: `{invalid}`, + expectedStatus: http.StatusBadRequest, + mockFunc: nil, + }, + { + name: "password too short", + token: testInviteToken, + requestBody: `{"password":"Short1!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must be at least 8 characters long") + }, + }, + { + name: "password missing digit", + token: testInviteToken, + requestBody: `{"password":"NoDigitPass!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must contain at least one digit") + }, + }, + { + name: "password missing uppercase", + token: testInviteToken, + requestBody: `{"password":"nouppercase1!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must contain at least one uppercase letter") + }, + }, + { + name: "password missing special character", + token: testInviteToken, + requestBody: `{"password":"NoSpecial123"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must contain at least one special character") + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + AcceptUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodPost, "/api/users/invites/"+tc.token+"/accept", bytes.NewBufferString(tc.requestBody)) + if tc.token != "" { + req = mux.SetURLVars(req, map[string]string{"token": tc.token}) + } + + rr := httptest.NewRecorder() + handler.acceptInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInviteAcceptResponse + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.True(t, resp.Success) + } + }) + } +} + +func TestRegenerateInvite(t *testing.T) { + now := time.Now().UTC() + expiresAt := now.Add(72 * time.Hour) + + tt := []struct { + name string + inviteID string + requestBody string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) + }{ + { + name: "successful regenerate with empty body", + inviteID: testInviteID, + requestBody: "", + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + assert.Equal(t, 0, expiresIn) + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: inviteID, + Email: testEmail, + }, + InviteToken: "nbi_newtoken12345678901234567890", + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "successful regenerate with custom expiration", + inviteID: testInviteID, + requestBody: `{"expires_in":7200}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + assert.Equal(t, 7200, expiresIn) + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: inviteID, + Email: testEmail, + }, + InviteToken: "nbi_newtoken12345678901234567890", + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "invite not found", + inviteID: "non-existent-invite", + requestBody: "", + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "permission denied", + inviteID: testInviteID, + requestBody: "", + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + return nil, status.NewPermissionDeniedError() + }, + }, + { + name: "missing invite ID", + inviteID: "", + requestBody: "", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + { + name: "invalid JSON should return error", + inviteID: testInviteID, + requestBody: `{invalid json}`, + expectedStatus: http.StatusBadRequest, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + RegenerateUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + var body io.Reader + if tc.requestBody != "" { + body = bytes.NewBufferString(tc.requestBody) + } + + req := httptest.NewRequest(http.MethodPost, "/api/users/invites/"+tc.inviteID+"/regenerate", body) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + if tc.inviteID != "" { + req = mux.SetURLVars(req, map[string]string{"inviteId": tc.inviteID}) + } + + rr := httptest.NewRecorder() + handler.regenerateInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInviteRegenerateResponse + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.NotEmpty(t, resp.InviteToken) + } + }) + } +} + +func TestDeleteInvite(t *testing.T) { + tt := []struct { + name string + inviteID string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string) error + }{ + { + name: "successful delete", + inviteID: testInviteID, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return nil + }, + }, + { + name: "invite not found", + inviteID: "non-existent-invite", + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "permission denied", + inviteID: testInviteID, + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return status.NewPermissionDeniedError() + }, + }, + { + name: "embedded IDP not enabled", + inviteID: testInviteID, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + }, + }, + { + name: "missing invite ID", + inviteID: "", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + DeleteUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodDelete, "/api/users/invites/"+tc.inviteID, nil) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + if tc.inviteID != "" { + req = mux.SetURLVars(req, map[string]string{"inviteId": tc.inviteID}) + } + + rr := httptest.NewRecorder() + handler.deleteInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + }) + } +} diff --git a/management/server/http/middleware/rate_limiter.go b/management/server/http/middleware/rate_limiter.go index a6266d4f3..936b34319 100644 --- a/management/server/http/middleware/rate_limiter.go +++ b/management/server/http/middleware/rate_limiter.go @@ -2,10 +2,14 @@ package middleware import ( "context" + "net" + "net/http" "sync" "time" "golang.org/x/time/rate" + + "github.com/netbirdio/netbird/shared/management/http/util" ) // RateLimiterConfig holds configuration for the API rate limiter @@ -144,3 +148,25 @@ func (rl *APIRateLimiter) Reset(key string) { defer rl.mu.Unlock() delete(rl.limiters, key) } + +// Middleware returns an HTTP middleware that rate limits requests by client IP. +// Returns 429 Too Many Requests if the rate limit is exceeded. +func (rl *APIRateLimiter) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + clientIP := getClientIP(r) + if !rl.Allow(clientIP) { + util.WriteErrorResponse("rate limit exceeded, please try again later", http.StatusTooManyRequests, w) + return + } + next.ServeHTTP(w, r) + }) +} + +// getClientIP extracts the client IP address from the request. +func getClientIP(r *http.Request) string { + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return r.RemoteAddr + } + return ip +} diff --git a/management/server/http/middleware/rate_limiter_test.go b/management/server/http/middleware/rate_limiter_test.go new file mode 100644 index 000000000..68f804e57 --- /dev/null +++ b/management/server/http/middleware/rate_limiter_test.go @@ -0,0 +1,158 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestAPIRateLimiter_Allow(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, // 1 per second + Burst: 2, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + // First two requests should be allowed (burst) + assert.True(t, rl.Allow("test-key")) + assert.True(t, rl.Allow("test-key")) + + // Third request should be denied (exceeded burst) + assert.False(t, rl.Allow("test-key")) + + // Different key should be allowed + assert.True(t, rl.Allow("different-key")) +} + +func TestAPIRateLimiter_Middleware(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, // 1 per second + Burst: 2, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + // Create a simple handler that returns 200 OK + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + // Wrap with rate limiter middleware + handler := rl.Middleware(nextHandler) + + // First two requests should pass (burst) + for i := 0; i < 2; i++ { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "192.168.1.1:12345" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + assert.Equal(t, http.StatusOK, rr.Code, "request %d should be allowed", i+1) + } + + // Third request should be rate limited + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "192.168.1.1:12345" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + assert.Equal(t, http.StatusTooManyRequests, rr.Code) +} + +func TestAPIRateLimiter_Middleware_DifferentIPs(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 1, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + handler := rl.Middleware(nextHandler) + + // Request from first IP + req1 := httptest.NewRequest(http.MethodGet, "/test", nil) + req1.RemoteAddr = "192.168.1.1:12345" + rr1 := httptest.NewRecorder() + handler.ServeHTTP(rr1, req1) + assert.Equal(t, http.StatusOK, rr1.Code) + + // Second request from first IP should be rate limited + req2 := httptest.NewRequest(http.MethodGet, "/test", nil) + req2.RemoteAddr = "192.168.1.1:12345" + rr2 := httptest.NewRecorder() + handler.ServeHTTP(rr2, req2) + assert.Equal(t, http.StatusTooManyRequests, rr2.Code) + + // Request from different IP should be allowed + req3 := httptest.NewRequest(http.MethodGet, "/test", nil) + req3.RemoteAddr = "192.168.1.2:12345" + rr3 := httptest.NewRecorder() + handler.ServeHTTP(rr3, req3) + assert.Equal(t, http.StatusOK, rr3.Code) +} + +func TestGetClientIP(t *testing.T) { + tests := []struct { + name string + remoteAddr string + expected string + }{ + { + name: "remote addr with port", + remoteAddr: "192.168.1.1:12345", + expected: "192.168.1.1", + }, + { + name: "remote addr without port", + remoteAddr: "192.168.1.1", + expected: "192.168.1.1", + }, + { + name: "IPv6 with port", + remoteAddr: "[::1]:12345", + expected: "::1", + }, + { + name: "IPv6 without port", + remoteAddr: "::1", + expected: "::1", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = tc.remoteAddr + assert.Equal(t, tc.expected, getClientIP(req)) + }) + } +} + +func TestAPIRateLimiter_Reset(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 1, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + // Use up the burst + assert.True(t, rl.Allow("test-key")) + assert.False(t, rl.Allow("test-key")) + + // Reset the limiter + rl.Reset("test-key") + + // Should be allowed again + assert.True(t, rl.Allow("test-key")) +} diff --git a/management/server/instance/manager.go b/management/server/instance/manager.go index 6f50e3ff7..6a0509ebd 100644 --- a/management/server/instance/manager.go +++ b/management/server/instance/manager.go @@ -2,18 +2,54 @@ package instance import ( "context" + "encoding/json" "errors" "fmt" + "io" + "net/http" "net/mail" + "strings" "sync" + "time" + goversion "github.com/hashicorp/go-version" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/shared/management/status" + "github.com/netbirdio/netbird/version" ) +const ( + // Version endpoints + managementVersionURL = "https://pkgs.netbird.io/releases/latest/version" + dashboardReleasesURL = "https://api.github.com/repos/netbirdio/dashboard/releases/latest" + + // Cache TTL for version information + versionCacheTTL = 60 * time.Minute + + // HTTP client timeout + httpTimeout = 5 * time.Second +) + +// VersionInfo contains version information for NetBird components +type VersionInfo struct { + // CurrentVersion is the running management server version + CurrentVersion string + // DashboardVersion is the latest available dashboard version from GitHub + DashboardVersion string + // ManagementVersion is the latest available management version from GitHub + ManagementVersion string + // ManagementUpdateAvailable indicates if a newer management version is available + ManagementUpdateAvailable bool +} + +// githubRelease represents a GitHub release response +type githubRelease struct { + TagName string `json:"tag_name"` +} + // Manager handles instance-level operations like initial setup. type Manager interface { // IsSetupRequired checks if instance setup is required. @@ -23,6 +59,9 @@ type Manager interface { // CreateOwnerUser creates the initial owner user in the embedded IDP. // This should only be called when IsSetupRequired returns true. CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) + + // GetVersionInfo returns version information for NetBird components. + GetVersionInfo(ctx context.Context) (*VersionInfo, error) } // DefaultManager is the default implementation of Manager. @@ -32,6 +71,12 @@ type DefaultManager struct { setupRequired bool setupMu sync.RWMutex + + // Version caching + httpClient *http.Client + versionMu sync.RWMutex + cachedVersions *VersionInfo + lastVersionFetch time.Time } // NewManager creates a new instance manager. @@ -43,6 +88,9 @@ func NewManager(ctx context.Context, store store.Store, idpManager idp.Manager) store: store, embeddedIdpManager: embeddedIdp, setupRequired: false, + httpClient: &http.Client{ + Timeout: httpTimeout, + }, } if embeddedIdp != nil { @@ -134,3 +182,130 @@ func (m *DefaultManager) validateSetupInfo(email, password, name string) error { } return nil } + +// GetVersionInfo returns version information for NetBird components. +func (m *DefaultManager) GetVersionInfo(ctx context.Context) (*VersionInfo, error) { + m.versionMu.RLock() + if m.cachedVersions != nil && time.Since(m.lastVersionFetch) < versionCacheTTL { + cached := *m.cachedVersions + m.versionMu.RUnlock() + return &cached, nil + } + m.versionMu.RUnlock() + + return m.fetchVersionInfo(ctx) +} + +func (m *DefaultManager) fetchVersionInfo(ctx context.Context) (*VersionInfo, error) { + m.versionMu.Lock() + // Double-check after acquiring write lock + if m.cachedVersions != nil && time.Since(m.lastVersionFetch) < versionCacheTTL { + cached := *m.cachedVersions + m.versionMu.Unlock() + return &cached, nil + } + m.versionMu.Unlock() + + info := &VersionInfo{ + CurrentVersion: version.NetbirdVersion(), + } + + // Fetch management version from pkgs.netbird.io (plain text) + mgmtVersion, err := m.fetchPlainTextVersion(ctx, managementVersionURL) + if err != nil { + log.WithContext(ctx).Warnf("failed to fetch management version: %v", err) + } else { + info.ManagementVersion = mgmtVersion + info.ManagementUpdateAvailable = isNewerVersion(info.CurrentVersion, mgmtVersion) + } + + // Fetch dashboard version from GitHub + dashVersion, err := m.fetchGitHubRelease(ctx, dashboardReleasesURL) + if err != nil { + log.WithContext(ctx).Warnf("failed to fetch dashboard version from GitHub: %v", err) + } else { + info.DashboardVersion = dashVersion + } + + // Update cache + m.versionMu.Lock() + m.cachedVersions = info + m.lastVersionFetch = time.Now() + m.versionMu.Unlock() + + return info, nil +} + +// isNewerVersion returns true if latestVersion is greater than currentVersion +func isNewerVersion(currentVersion, latestVersion string) bool { + current, err := goversion.NewVersion(currentVersion) + if err != nil { + return false + } + + latest, err := goversion.NewVersion(latestVersion) + if err != nil { + return false + } + + return latest.GreaterThan(current) +} + +func (m *DefaultManager) fetchPlainTextVersion(ctx context.Context, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("create request: %w", err) + } + + req.Header.Set("User-Agent", "NetBird-Management/"+version.NetbirdVersion()) + + resp, err := m.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + body, err := io.ReadAll(io.LimitReader(resp.Body, 100)) + if err != nil { + return "", fmt.Errorf("read response: %w", err) + } + + return strings.TrimSpace(string(body)), nil +} + +func (m *DefaultManager) fetchGitHubRelease(ctx context.Context, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("create request: %w", err) + } + + req.Header.Set("Accept", "application/vnd.github.v3+json") + req.Header.Set("User-Agent", "NetBird-Management/"+version.NetbirdVersion()) + + resp, err := m.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + var release githubRelease + if err := json.NewDecoder(resp.Body).Decode(&release); err != nil { + return "", fmt.Errorf("decode response: %w", err) + } + + // Remove 'v' prefix if present + tag := release.TagName + if len(tag) > 0 && tag[0] == 'v' { + tag = tag[1:] + } + + return tag, nil +} diff --git a/management/server/instance/version_test.go b/management/server/instance/version_test.go new file mode 100644 index 000000000..35ba66db8 --- /dev/null +++ b/management/server/instance/version_test.go @@ -0,0 +1,285 @@ +package instance + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockRoundTripper implements http.RoundTripper for testing +type mockRoundTripper struct { + callCount atomic.Int32 + managementVersion string + dashboardVersion string +} + +func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + m.callCount.Add(1) + + var body string + if strings.Contains(req.URL.String(), "pkgs.netbird.io") { + // Plain text response for management version + body = m.managementVersion + } else if strings.Contains(req.URL.String(), "github.com") { + // JSON response for dashboard version + jsonResp, _ := json.Marshal(githubRelease{TagName: "v" + m.dashboardVersion}) + body = string(jsonResp) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewBufferString(body)), + Header: make(http.Header), + }, nil +} + +func TestDefaultManager_GetVersionInfo_ReturnsCurrentVersion(t *testing.T) { + mockTransport := &mockRoundTripper{ + managementVersion: "0.65.0", + dashboardVersion: "2.10.0", + } + + m := &DefaultManager{ + httpClient: &http.Client{Transport: mockTransport}, + } + + ctx := context.Background() + + info, err := m.GetVersionInfo(ctx) + require.NoError(t, err) + + // CurrentVersion should always be set + assert.NotEmpty(t, info.CurrentVersion) + assert.Equal(t, "0.65.0", info.ManagementVersion) + assert.Equal(t, "2.10.0", info.DashboardVersion) + assert.Equal(t, int32(2), mockTransport.callCount.Load()) // 2 calls: management + dashboard +} + +func TestDefaultManager_GetVersionInfo_CachesResults(t *testing.T) { + mockTransport := &mockRoundTripper{ + managementVersion: "0.65.0", + dashboardVersion: "2.10.0", + } + + m := &DefaultManager{ + httpClient: &http.Client{Transport: mockTransport}, + } + + ctx := context.Background() + + // First call + info1, err := m.GetVersionInfo(ctx) + require.NoError(t, err) + assert.NotEmpty(t, info1.CurrentVersion) + assert.Equal(t, "0.65.0", info1.ManagementVersion) + + initialCallCount := mockTransport.callCount.Load() + + // Second call should use cache (no additional HTTP calls) + info2, err := m.GetVersionInfo(ctx) + require.NoError(t, err) + assert.Equal(t, info1.CurrentVersion, info2.CurrentVersion) + assert.Equal(t, info1.ManagementVersion, info2.ManagementVersion) + assert.Equal(t, info1.DashboardVersion, info2.DashboardVersion) + + // Verify no additional HTTP calls were made (cache was used) + assert.Equal(t, initialCallCount, mockTransport.callCount.Load()) +} + +func TestDefaultManager_FetchGitHubRelease_ParsesTagName(t *testing.T) { + tests := []struct { + name string + tagName string + expected string + shouldError bool + }{ + { + name: "tag with v prefix", + tagName: "v1.2.3", + expected: "1.2.3", + }, + { + name: "tag without v prefix", + tagName: "1.2.3", + expected: "1.2.3", + }, + { + name: "tag with prerelease", + tagName: "v2.0.0-beta.1", + expected: "2.0.0-beta.1", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(githubRelease{TagName: tc.tagName}) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + version, err := m.fetchGitHubRelease(context.Background(), server.URL) + + if tc.shouldError { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expected, version) + } + }) + } +} + +func TestDefaultManager_FetchGitHubRelease_HandlesErrors(t *testing.T) { + tests := []struct { + name string + statusCode int + body string + }{ + { + name: "not found", + statusCode: http.StatusNotFound, + body: `{"message": "Not Found"}`, + }, + { + name: "rate limited", + statusCode: http.StatusForbidden, + body: `{"message": "API rate limit exceeded"}`, + }, + { + name: "server error", + statusCode: http.StatusInternalServerError, + body: `{"message": "Internal Server Error"}`, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tc.statusCode) + _, _ = w.Write([]byte(tc.body)) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + _, err := m.fetchGitHubRelease(context.Background(), server.URL) + assert.Error(t, err) + }) + } +} + +func TestDefaultManager_FetchGitHubRelease_InvalidJSON(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{invalid json}`)) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + _, err := m.fetchGitHubRelease(context.Background(), server.URL) + assert.Error(t, err) +} + +func TestDefaultManager_FetchGitHubRelease_ContextCancellation(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(githubRelease{TagName: "v1.0.0"}) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + _, err := m.fetchGitHubRelease(ctx, server.URL) + assert.Error(t, err) +} + +func TestIsNewerVersion(t *testing.T) { + tests := []struct { + name string + currentVersion string + latestVersion string + expected bool + }{ + { + name: "latest is newer - minor version", + currentVersion: "0.64.1", + latestVersion: "0.65.0", + expected: true, + }, + { + name: "latest is newer - patch version", + currentVersion: "0.64.1", + latestVersion: "0.64.2", + expected: true, + }, + { + name: "latest is newer - major version", + currentVersion: "0.64.1", + latestVersion: "1.0.0", + expected: true, + }, + { + name: "versions are equal", + currentVersion: "0.64.1", + latestVersion: "0.64.1", + expected: false, + }, + { + name: "current is newer - minor version", + currentVersion: "0.65.0", + latestVersion: "0.64.1", + expected: false, + }, + { + name: "current is newer - patch version", + currentVersion: "0.64.2", + latestVersion: "0.64.1", + expected: false, + }, + { + name: "development version", + currentVersion: "development", + latestVersion: "0.65.0", + expected: false, + }, + { + name: "invalid latest version", + currentVersion: "0.64.1", + latestVersion: "invalid", + expected: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := isNewerVersion(tc.currentVersion, tc.latestVersion) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 75e971498..026989898 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -139,6 +139,12 @@ type MockAccountManager struct { CreatePeerJobFunc func(ctx context.Context, accountID, peerID, userID string, job *types.Job) error GetAllPeerJobsFunc func(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) GetPeerJobByIDFunc func(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) + CreateUserInviteFunc func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) + AcceptUserInviteFunc func(ctx context.Context, token, password string) error + RegenerateUserInviteFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) + GetUserInviteInfoFunc func(ctx context.Context, token string) (*types.UserInviteInfo, error) + ListUserInvitesFunc func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) + DeleteUserInviteFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string) error } func (am *MockAccountManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { @@ -713,6 +719,48 @@ func (am *MockAccountManager) CreateUser(ctx context.Context, accountID, userID return nil, status.Errorf(codes.Unimplemented, "method CreateUser is not implemented") } +func (am *MockAccountManager) CreateUserInvite(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + if am.CreateUserInviteFunc != nil { + return am.CreateUserInviteFunc(ctx, accountID, initiatorUserID, invite, expiresIn) + } + return nil, status.Errorf(codes.Unimplemented, "method CreateUserInvite is not implemented") +} + +func (am *MockAccountManager) AcceptUserInvite(ctx context.Context, token, password string) error { + if am.AcceptUserInviteFunc != nil { + return am.AcceptUserInviteFunc(ctx, token, password) + } + return status.Errorf(codes.Unimplemented, "method AcceptUserInvite is not implemented") +} + +func (am *MockAccountManager) RegenerateUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + if am.RegenerateUserInviteFunc != nil { + return am.RegenerateUserInviteFunc(ctx, accountID, initiatorUserID, inviteID, expiresIn) + } + return nil, status.Errorf(codes.Unimplemented, "method RegenerateUserInvite is not implemented") +} + +func (am *MockAccountManager) GetUserInviteInfo(ctx context.Context, token string) (*types.UserInviteInfo, error) { + if am.GetUserInviteInfoFunc != nil { + return am.GetUserInviteInfoFunc(ctx, token) + } + return nil, status.Errorf(codes.Unimplemented, "method GetUserInviteInfo is not implemented") +} + +func (am *MockAccountManager) ListUserInvites(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + if am.ListUserInvitesFunc != nil { + return am.ListUserInvitesFunc(ctx, accountID, initiatorUserID) + } + return nil, status.Errorf(codes.Unimplemented, "method ListUserInvites is not implemented") +} + +func (am *MockAccountManager) DeleteUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + if am.DeleteUserInviteFunc != nil { + return am.DeleteUserInviteFunc(ctx, accountID, initiatorUserID, inviteID) + } + return status.Errorf(codes.Unimplemented, "method DeleteUserInvite is not implemented") +} + func (am *MockAccountManager) GetAccountIDFromUserAuth(ctx context.Context, userAuth auth.UserAuth) (string, string, error) { if am.GetAccountIDFromUserAuthFunc != nil { return am.GetAccountIDFromUserAuthFunc(ctx, userAuth) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 4fe800636..7f48f510e 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -126,7 +126,7 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met &types.Account{}, &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &installation{}, &types.ExtraSettings{}, &posture.Checks{}, &nbpeer.NetworkAddress{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, - &types.Job{}, &zones.Zone{}, &records.Record{}, + &types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, ) if err != nil { return nil, fmt.Errorf("auto migratePreAuto: %w", err) @@ -815,6 +815,130 @@ func (s *SqlStore) GetAccountOwner(ctx context.Context, lockStrength LockingStre return &user, nil } +// SaveUserInvite saves a user invite to the database +func (s *SqlStore) SaveUserInvite(ctx context.Context, invite *types.UserInviteRecord) error { + inviteCopy := invite.Copy() + if err := inviteCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt invite: %w", err) + } + + result := s.db.Save(inviteCopy) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to save user invite to store: %s", result.Error) + return status.Errorf(status.Internal, "failed to save user invite to store") + } + return nil +} + +// GetUserInviteByID retrieves a user invite by its ID and account ID +func (s *SqlStore) GetUserInviteByID(ctx context.Context, lockStrength LockingStrength, accountID, inviteID string) (*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invite types.UserInviteRecord + result := tx.Where("account_id = ?", accountID).Take(&invite, idQueryCondition, inviteID) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "user invite not found") + } + log.WithContext(ctx).Errorf("failed to get user invite from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invite from store") + } + + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + + return &invite, nil +} + +// GetUserInviteByHashedToken retrieves a user invite by its hashed token +func (s *SqlStore) GetUserInviteByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invite types.UserInviteRecord + result := tx.Take(&invite, "hashed_token = ?", hashedToken) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "user invite not found") + } + log.WithContext(ctx).Errorf("failed to get user invite from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invite from store") + } + + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + + return &invite, nil +} + +// GetUserInviteByEmail retrieves a user invite by account ID and email. +// Since email is encrypted with random IVs, we fetch all invites for the account +// and compare emails in memory after decryption. +func (s *SqlStore) GetUserInviteByEmail(ctx context.Context, lockStrength LockingStrength, accountID, email string) (*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invites []*types.UserInviteRecord + result := tx.Find(&invites, "account_id = ?", accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get user invites from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invites from store") + } + + for _, invite := range invites { + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + if strings.EqualFold(invite.Email, email) { + return invite, nil + } + } + + return nil, status.Errorf(status.NotFound, "user invite not found for email") +} + +// GetAccountUserInvites retrieves all user invites for an account +func (s *SqlStore) GetAccountUserInvites(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invites []*types.UserInviteRecord + result := tx.Find(&invites, "account_id = ?", accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get user invites from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invites from store") + } + + for _, invite := range invites { + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + } + + return invites, nil +} + +// DeleteUserInvite deletes a user invite by its ID +func (s *SqlStore) DeleteUserInvite(ctx context.Context, inviteID string) error { + result := s.db.Delete(&types.UserInviteRecord{}, idQueryCondition, inviteID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete user invite from store: %s", result.Error) + return status.Errorf(status.Internal, "failed to delete user invite from store") + } + return nil +} + func (s *SqlStore) GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.Group, error) { tx := s.db if lockStrength != LockingStrengthNone { diff --git a/management/server/store/sql_store_user_invite_test.go b/management/server/store/sql_store_user_invite_test.go new file mode 100644 index 000000000..fb6934a2e --- /dev/null +++ b/management/server/store/sql_store_user_invite_test.go @@ -0,0 +1,520 @@ +package store + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/types" +) + +func TestSqlStore_SaveUserInvite(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-1", + AccountID: "account-1", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group-1", "group-2"}, + HashedToken: "hashed-token-123", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Verify the invite was saved + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + assert.Equal(t, invite.Email, retrieved.Email) + assert.Equal(t, invite.Name, retrieved.Name) + assert.Equal(t, invite.Role, retrieved.Role) + assert.Equal(t, invite.AutoGroups, retrieved.AutoGroups) + assert.Equal(t, invite.CreatedBy, retrieved.CreatedBy) + }) +} + +func TestSqlStore_SaveUserInvite_Update(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-update", + AccountID: "account-1", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-123", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Update the invite with a new token + invite.HashedToken = "new-hashed-token" + invite.ExpiresAt = time.Now().Add(24 * time.Hour) + + err = store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Verify the update + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, "new-hashed-token", retrieved.HashedToken) + }) +} + +func TestSqlStore_GetUserInviteByID(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-get-by-id", + AccountID: "account-1", + Email: "getbyid@example.com", + Name: "Get By ID User", + Role: "admin", + AutoGroups: []string{}, + HashedToken: "hashed-token-get", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Get by ID - success + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + assert.Equal(t, invite.Email, retrieved.Email) + + // Get by ID - wrong account + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, "wrong-account", invite.ID) + assert.Error(t, err) + + // Get by ID - not found + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, "non-existent") + assert.Error(t, err) + }) +} + +func TestSqlStore_GetUserInviteByHashedToken(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-get-by-token", + AccountID: "account-1", + Email: "getbytoken@example.com", + Name: "Get By Token User", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "unique-hashed-token-456", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Get by hashed token - success + retrieved, err := store.GetUserInviteByHashedToken(ctx, LockingStrengthNone, invite.HashedToken) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + assert.Equal(t, invite.Email, retrieved.Email) + + // Get by hashed token - not found + _, err = store.GetUserInviteByHashedToken(ctx, LockingStrengthNone, "non-existent-token") + assert.Error(t, err) + }) +} + +func TestSqlStore_GetUserInviteByEmail(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-get-by-email", + AccountID: "account-email-test", + Email: "unique-email@example.com", + Name: "Get By Email User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-email", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Get by email - success + retrieved, err := store.GetUserInviteByEmail(ctx, LockingStrengthNone, invite.AccountID, invite.Email) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + // Get by email - case insensitive + retrieved, err = store.GetUserInviteByEmail(ctx, LockingStrengthNone, invite.AccountID, "UNIQUE-EMAIL@EXAMPLE.COM") + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + // Get by email - wrong account + _, err = store.GetUserInviteByEmail(ctx, LockingStrengthNone, "wrong-account", invite.Email) + assert.Error(t, err) + + // Get by email - not found + _, err = store.GetUserInviteByEmail(ctx, LockingStrengthNone, invite.AccountID, "nonexistent@example.com") + assert.Error(t, err) + }) +} + +func TestSqlStore_GetAccountUserInvites(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + accountID := "account-list-invites" + + invites := []*types.UserInviteRecord{ + { + ID: "invite-list-1", + AccountID: accountID, + Email: "user1@example.com", + Name: "User One", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-list-1", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + }, + { + ID: "invite-list-2", + AccountID: accountID, + Email: "user2@example.com", + Name: "User Two", + Role: "admin", + AutoGroups: []string{"group-2"}, + HashedToken: "hashed-token-list-2", + ExpiresAt: time.Now().Add(24 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + }, + { + ID: "invite-list-3", + AccountID: "different-account", + Email: "user3@example.com", + Name: "User Three", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-list-3", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + }, + } + + for _, invite := range invites { + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + } + + // Get all invites for the account + retrieved, err := store.GetAccountUserInvites(ctx, LockingStrengthNone, accountID) + require.NoError(t, err) + assert.Len(t, retrieved, 2) + + // Verify the invites belong to the correct account + for _, invite := range retrieved { + assert.Equal(t, accountID, invite.AccountID) + } + + // Get invites for account with no invites + retrieved, err = store.GetAccountUserInvites(ctx, LockingStrengthNone, "empty-account") + require.NoError(t, err) + assert.Len(t, retrieved, 0) + }) +} + +func TestSqlStore_DeleteUserInvite(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-delete", + AccountID: "account-delete-test", + Email: "delete@example.com", + Name: "Delete User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-delete", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Verify invite exists + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + + // Delete the invite + err = store.DeleteUserInvite(ctx, invite.ID) + require.NoError(t, err) + + // Verify invite is deleted + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + assert.Error(t, err) + }) +} + +func TestSqlStore_UserInvite_EncryptedFields(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-encrypted", + AccountID: "account-encrypted", + Email: "sensitive-email@example.com", + Name: "Sensitive Name", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-encrypted", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Retrieve and verify decryption works + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, "sensitive-email@example.com", retrieved.Email) + assert.Equal(t, "Sensitive Name", retrieved.Name) + }) +} + +func TestSqlStore_DeleteUserInvite_NonExistent(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + // Deleting a non-existent invite should not return an error + err := store.DeleteUserInvite(ctx, "non-existent-invite-id") + require.NoError(t, err) + }) +} + +func TestSqlStore_UserInvite_SameEmailDifferentAccounts(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + email := "shared-email@example.com" + + // Create invite in first account + invite1 := &types.UserInviteRecord{ + ID: "invite-account1", + AccountID: "account-1", + Email: email, + Name: "User Account 1", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-account1", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-1", + } + + // Create invite in second account with same email + invite2 := &types.UserInviteRecord{ + ID: "invite-account2", + AccountID: "account-2", + Email: email, + Name: "User Account 2", + Role: "admin", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-account2", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-2", + } + + err := store.SaveUserInvite(ctx, invite1) + require.NoError(t, err) + + err = store.SaveUserInvite(ctx, invite2) + require.NoError(t, err) + + // Verify each account gets the correct invite by email + retrieved1, err := store.GetUserInviteByEmail(ctx, LockingStrengthNone, "account-1", email) + require.NoError(t, err) + assert.Equal(t, "invite-account1", retrieved1.ID) + assert.Equal(t, "User Account 1", retrieved1.Name) + + retrieved2, err := store.GetUserInviteByEmail(ctx, LockingStrengthNone, "account-2", email) + require.NoError(t, err) + assert.Equal(t, "invite-account2", retrieved2.ID) + assert.Equal(t, "User Account 2", retrieved2.Name) + }) +} + +func TestSqlStore_UserInvite_LockingStrength(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-locking", + AccountID: "account-locking", + Email: "locking@example.com", + Name: "Locking Test User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-locking", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Test with different locking strengths + lockStrengths := []LockingStrength{LockingStrengthNone, LockingStrengthShare, LockingStrengthUpdate} + + for _, strength := range lockStrengths { + retrieved, err := store.GetUserInviteByID(ctx, strength, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + retrieved, err = store.GetUserInviteByHashedToken(ctx, strength, invite.HashedToken) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + retrieved, err = store.GetUserInviteByEmail(ctx, strength, invite.AccountID, invite.Email) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + invites, err := store.GetAccountUserInvites(ctx, strength, invite.AccountID) + require.NoError(t, err) + assert.Len(t, invites, 1) + } + }) +} + +func TestSqlStore_UserInvite_EmptyAutoGroups(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + // Test with nil AutoGroups + invite := &types.UserInviteRecord{ + ID: "invite-nil-autogroups", + AccountID: "account-autogroups", + Email: "nilgroups@example.com", + Name: "Nil Groups User", + Role: "user", + AutoGroups: nil, + HashedToken: "hashed-token-nil", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + // Should return empty slice or nil, both are acceptable + assert.Empty(t, retrieved.AutoGroups) + }) +} + +func TestSqlStore_UserInvite_TimestampPrecision(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + now := time.Now().UTC().Truncate(time.Millisecond) + expiresAt := now.Add(72 * time.Hour) + + invite := &types.UserInviteRecord{ + ID: "invite-timestamp", + AccountID: "account-timestamp", + Email: "timestamp@example.com", + Name: "Timestamp User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-timestamp", + ExpiresAt: expiresAt, + CreatedAt: now, + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + + // Verify timestamps are preserved (within reasonable precision) + assert.WithinDuration(t, now, retrieved.CreatedAt, time.Second) + assert.WithinDuration(t, expiresAt, retrieved.ExpiresAt, time.Second) + }) +} diff --git a/management/server/store/store.go b/management/server/store/store.go index 02c746592..be0d29768 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -92,6 +92,13 @@ type Store interface { DeleteHashedPAT2TokenIDIndex(hashedToken string) error DeleteTokenID2UserIDIndex(tokenID string) error + SaveUserInvite(ctx context.Context, invite *types.UserInviteRecord) error + GetUserInviteByID(ctx context.Context, lockStrength LockingStrength, accountID, inviteID string) (*types.UserInviteRecord, error) + GetUserInviteByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.UserInviteRecord, error) + GetUserInviteByEmail(ctx context.Context, lockStrength LockingStrength, accountID, email string) (*types.UserInviteRecord, error) + GetAccountUserInvites(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.UserInviteRecord, error) + DeleteUserInvite(ctx context.Context, inviteID string) error + GetPATByID(ctx context.Context, lockStrength LockingStrength, userID, patID string) (*types.PersonalAccessToken, error) GetUserPATs(ctx context.Context, lockStrength LockingStrength, userID string) ([]*types.PersonalAccessToken, error) GetPATByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.PersonalAccessToken, error) diff --git a/management/server/types/user_invite.go b/management/server/types/user_invite.go new file mode 100644 index 000000000..1544b0ff3 --- /dev/null +++ b/management/server/types/user_invite.go @@ -0,0 +1,201 @@ +package types + +import ( + "crypto/sha256" + b64 "encoding/base64" + "fmt" + "hash/crc32" + "strings" + "time" + + b "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/rs/xid" + + "github.com/netbirdio/netbird/base62" + "github.com/netbirdio/netbird/util/crypt" +) + +const ( + // InviteTokenPrefix is the prefix for invite tokens + InviteTokenPrefix = "nbi_" + // InviteTokenSecretLength is the length of the random secret part + InviteTokenSecretLength = 30 + // InviteTokenChecksumLength is the length of the encoded checksum + InviteTokenChecksumLength = 6 + // InviteTokenLength is the total length of the token (4 + 30 + 6 = 40) + InviteTokenLength = 40 + // DefaultInviteExpirationSeconds is the default expiration time for invites (72 hours) + DefaultInviteExpirationSeconds = 259200 + // MinInviteExpirationSeconds is the minimum expiration time for invites (1 hour) + MinInviteExpirationSeconds = 3600 +) + +// UserInviteRecord represents an invitation for a user to set up their account (database model) +type UserInviteRecord struct { + ID string `gorm:"primaryKey"` + AccountID string `gorm:"index;not null"` + Email string `gorm:"index;not null"` + Name string `gorm:"not null"` + Role string `gorm:"not null"` + AutoGroups []string `gorm:"serializer:json"` + HashedToken string `gorm:"index;not null"` // SHA-256 hash of the token (base64 encoded) + ExpiresAt time.Time `gorm:"not null"` + CreatedAt time.Time `gorm:"not null"` + CreatedBy string `gorm:"not null"` +} + +// TableName returns the table name for GORM +func (UserInviteRecord) TableName() string { + return "user_invites" +} + +// GenerateInviteToken creates a new invite token with the format: nbi_ +// Returns the hashed token (for storage) and the plain token (to give to the user) +func GenerateInviteToken() (hashedToken string, plainToken string, err error) { + secret, err := b.Random(InviteTokenSecretLength) + if err != nil { + return "", "", fmt.Errorf("failed to generate random secret: %w", err) + } + + checksum := crc32.ChecksumIEEE([]byte(secret)) + encodedChecksum := base62.Encode(checksum) + // Left-pad with '0' to ensure exactly 6 characters (fmt.Sprintf %s pads with spaces which breaks base62.Decode) + paddedChecksum := encodedChecksum + if len(paddedChecksum) < InviteTokenChecksumLength { + paddedChecksum = strings.Repeat("0", InviteTokenChecksumLength-len(paddedChecksum)) + paddedChecksum + } + + plainToken = InviteTokenPrefix + secret + paddedChecksum + hash := sha256.Sum256([]byte(plainToken)) + hashedToken = b64.StdEncoding.EncodeToString(hash[:]) + + return hashedToken, plainToken, nil +} + +// HashInviteToken creates a SHA-256 hash of the token (base64 encoded) +func HashInviteToken(token string) string { + hash := sha256.Sum256([]byte(token)) + return b64.StdEncoding.EncodeToString(hash[:]) +} + +// ValidateInviteToken validates the token format and checksum. +// Returns an error if the token is invalid. +func ValidateInviteToken(token string) error { + if len(token) != InviteTokenLength { + return fmt.Errorf("invalid token length") + } + + prefix := token[:len(InviteTokenPrefix)] + if prefix != InviteTokenPrefix { + return fmt.Errorf("invalid token prefix") + } + + secret := token[len(InviteTokenPrefix) : len(InviteTokenPrefix)+InviteTokenSecretLength] + encodedChecksum := token[len(InviteTokenPrefix)+InviteTokenSecretLength:] + + verificationChecksum, err := base62.Decode(encodedChecksum) + if err != nil { + return fmt.Errorf("checksum decoding failed: %w", err) + } + + secretChecksum := crc32.ChecksumIEEE([]byte(secret)) + if secretChecksum != verificationChecksum { + return fmt.Errorf("checksum does not match") + } + + return nil +} + +// IsExpired checks if the invite has expired +func (i *UserInviteRecord) IsExpired() bool { + return time.Now().After(i.ExpiresAt) +} + +// UserInvite contains the result of creating or regenerating an invite +type UserInvite struct { + UserInfo *UserInfo + InviteToken string + InviteExpiresAt time.Time + InviteCreatedAt time.Time +} + +// UserInviteInfo contains public information about an invite (for unauthenticated endpoint) +type UserInviteInfo struct { + Email string `json:"email"` + Name string `json:"name"` + ExpiresAt time.Time `json:"expires_at"` + Valid bool `json:"valid"` + InvitedBy string `json:"invited_by"` +} + +// NewInviteID generates a new invite ID using xid +func NewInviteID() string { + return xid.New().String() +} + +// EncryptSensitiveData encrypts the invite's sensitive fields (Email and Name) in place. +func (i *UserInviteRecord) EncryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + var err error + if i.Email != "" { + i.Email, err = enc.Encrypt(i.Email) + if err != nil { + return fmt.Errorf("encrypt email: %w", err) + } + } + + if i.Name != "" { + i.Name, err = enc.Encrypt(i.Name) + if err != nil { + return fmt.Errorf("encrypt name: %w", err) + } + } + + return nil +} + +// DecryptSensitiveData decrypts the invite's sensitive fields (Email and Name) in place. +func (i *UserInviteRecord) DecryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + var err error + if i.Email != "" { + i.Email, err = enc.Decrypt(i.Email) + if err != nil { + return fmt.Errorf("decrypt email: %w", err) + } + } + + if i.Name != "" { + i.Name, err = enc.Decrypt(i.Name) + if err != nil { + return fmt.Errorf("decrypt name: %w", err) + } + } + + return nil +} + +// Copy creates a deep copy of the UserInviteRecord +func (i *UserInviteRecord) Copy() *UserInviteRecord { + autoGroups := make([]string, len(i.AutoGroups)) + copy(autoGroups, i.AutoGroups) + + return &UserInviteRecord{ + ID: i.ID, + AccountID: i.AccountID, + Email: i.Email, + Name: i.Name, + Role: i.Role, + AutoGroups: autoGroups, + HashedToken: i.HashedToken, + ExpiresAt: i.ExpiresAt, + CreatedAt: i.CreatedAt, + CreatedBy: i.CreatedBy, + } +} diff --git a/management/server/types/user_invite_test.go b/management/server/types/user_invite_test.go new file mode 100644 index 000000000..09dae3800 --- /dev/null +++ b/management/server/types/user_invite_test.go @@ -0,0 +1,355 @@ +package types + +import ( + "crypto/sha256" + b64 "encoding/base64" + "hash/crc32" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/base62" + "github.com/netbirdio/netbird/util/crypt" +) + +func TestUserInviteRecord_TableName(t *testing.T) { + invite := UserInviteRecord{} + assert.Equal(t, "user_invites", invite.TableName()) +} + +func TestGenerateInviteToken_Success(t *testing.T) { + hashedToken, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.NotEmpty(t, hashedToken) + assert.NotEmpty(t, plainToken) +} + +func TestGenerateInviteToken_Length(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.Len(t, plainToken, InviteTokenLength) +} + +func TestGenerateInviteToken_Prefix(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.True(t, strings.HasPrefix(plainToken, InviteTokenPrefix)) +} + +func TestGenerateInviteToken_Hashing(t *testing.T) { + hashedToken, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + expectedHash := sha256.Sum256([]byte(plainToken)) + expectedHashedToken := b64.StdEncoding.EncodeToString(expectedHash[:]) + assert.Equal(t, expectedHashedToken, hashedToken) +} + +func TestGenerateInviteToken_Checksum(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + // Extract parts + secret := plainToken[len(InviteTokenPrefix) : len(InviteTokenPrefix)+InviteTokenSecretLength] + checksumStr := plainToken[len(InviteTokenPrefix)+InviteTokenSecretLength:] + + // Verify checksum + expectedChecksum := crc32.ChecksumIEEE([]byte(secret)) + actualChecksum, err := base62.Decode(checksumStr) + require.NoError(t, err) + assert.Equal(t, expectedChecksum, actualChecksum) +} + +func TestGenerateInviteToken_Uniqueness(t *testing.T) { + tokens := make(map[string]bool) + for i := 0; i < 100; i++ { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.False(t, tokens[plainToken], "Token should be unique") + tokens[plainToken] = true + } +} + +func TestHashInviteToken(t *testing.T) { + token := "nbi_testtoken123456789012345678901234" + hashedToken := HashInviteToken(token) + + expectedHash := sha256.Sum256([]byte(token)) + expectedHashedToken := b64.StdEncoding.EncodeToString(expectedHash[:]) + assert.Equal(t, expectedHashedToken, hashedToken) +} + +func TestHashInviteToken_Consistency(t *testing.T) { + token := "nbi_testtoken123456789012345678901234" + hash1 := HashInviteToken(token) + hash2 := HashInviteToken(token) + assert.Equal(t, hash1, hash2) +} + +func TestHashInviteToken_DifferentTokens(t *testing.T) { + token1 := "nbi_testtoken123456789012345678901234" + token2 := "nbi_testtoken123456789012345678901235" + hash1 := HashInviteToken(token1) + hash2 := HashInviteToken(token2) + assert.NotEqual(t, hash1, hash2) +} + +func TestValidateInviteToken_Success(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + err = ValidateInviteToken(plainToken) + assert.NoError(t, err) +} + +func TestValidateInviteToken_InvalidLength(t *testing.T) { + testCases := []struct { + name string + token string + }{ + {"empty", ""}, + {"too short", "nbi_abc"}, + {"too long", "nbi_" + strings.Repeat("a", 50)}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := ValidateInviteToken(tc.token) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid token length") + }) + } +} + +func TestValidateInviteToken_InvalidPrefix(t *testing.T) { + // Create a token with wrong prefix but correct length + token := "xyz_" + strings.Repeat("a", 30) + "000000" + err := ValidateInviteToken(token) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid token prefix") +} + +func TestValidateInviteToken_InvalidChecksum(t *testing.T) { + // Create a token with correct format but invalid checksum + token := InviteTokenPrefix + strings.Repeat("a", InviteTokenSecretLength) + "ZZZZZZ" + err := ValidateInviteToken(token) + require.Error(t, err) + assert.Contains(t, err.Error(), "checksum") +} + +func TestValidateInviteToken_ModifiedToken(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + // Modify one character in the secret part + modifiedToken := plainToken[:5] + "X" + plainToken[6:] + err = ValidateInviteToken(modifiedToken) + require.Error(t, err) +} + +func TestUserInviteRecord_IsExpired(t *testing.T) { + t.Run("not expired", func(t *testing.T) { + invite := &UserInviteRecord{ + ExpiresAt: time.Now().Add(time.Hour), + } + assert.False(t, invite.IsExpired()) + }) + + t.Run("expired", func(t *testing.T) { + invite := &UserInviteRecord{ + ExpiresAt: time.Now().Add(-time.Hour), + } + assert.True(t, invite.IsExpired()) + }) + + t.Run("just expired", func(t *testing.T) { + invite := &UserInviteRecord{ + ExpiresAt: time.Now().Add(-time.Second), + } + assert.True(t, invite.IsExpired()) + }) +} + +func TestNewInviteID(t *testing.T) { + id := NewInviteID() + assert.NotEmpty(t, id) + assert.Len(t, id, 20) // xid generates 20 character IDs +} + +func TestNewInviteID_Uniqueness(t *testing.T) { + ids := make(map[string]bool) + for i := 0; i < 100; i++ { + id := NewInviteID() + assert.False(t, ids[id], "ID should be unique") + ids[id] = true + } +} + +func TestUserInviteRecord_EncryptDecryptSensitiveData(t *testing.T) { + key, err := crypt.GenerateKey() + require.NoError(t, err) + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + + t.Run("encrypt and decrypt", func(t *testing.T) { + invite := &UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + // Encrypt + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify encrypted values are different from original + assert.NotEqual(t, "test@example.com", invite.Email) + assert.NotEqual(t, "Test User", invite.Name) + + // Decrypt + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify decrypted values match original + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) + + t.Run("encrypt empty fields", func(t *testing.T) { + invite := &UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "", + Name: "", + Role: "user", + } + + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + }) + + t.Run("nil encryptor", func(t *testing.T) { + invite := &UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + err := invite.EncryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + + err = invite.DecryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) +} + +func TestUserInviteRecord_Copy(t *testing.T) { + now := time.Now() + expiresAt := now.Add(72 * time.Hour) + + original := &UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group1", "group2"}, + HashedToken: "hashed-token", + ExpiresAt: expiresAt, + CreatedAt: now, + CreatedBy: "creator-id", + } + + copied := original.Copy() + + // Verify all fields are copied + assert.Equal(t, original.ID, copied.ID) + assert.Equal(t, original.AccountID, copied.AccountID) + assert.Equal(t, original.Email, copied.Email) + assert.Equal(t, original.Name, copied.Name) + assert.Equal(t, original.Role, copied.Role) + assert.Equal(t, original.AutoGroups, copied.AutoGroups) + assert.Equal(t, original.HashedToken, copied.HashedToken) + assert.Equal(t, original.ExpiresAt, copied.ExpiresAt) + assert.Equal(t, original.CreatedAt, copied.CreatedAt) + assert.Equal(t, original.CreatedBy, copied.CreatedBy) + + // Verify deep copy of AutoGroups (modifying copy doesn't affect original) + copied.AutoGroups[0] = "modified" + assert.NotEqual(t, original.AutoGroups[0], copied.AutoGroups[0]) + assert.Equal(t, "group1", original.AutoGroups[0]) +} + +func TestUserInviteRecord_Copy_EmptyAutoGroups(t *testing.T) { + original := &UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + AutoGroups: []string{}, + } + + copied := original.Copy() + assert.NotNil(t, copied.AutoGroups) + assert.Len(t, copied.AutoGroups, 0) +} + +func TestUserInviteRecord_Copy_NilAutoGroups(t *testing.T) { + original := &UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + AutoGroups: nil, + } + + copied := original.Copy() + assert.NotNil(t, copied.AutoGroups) + assert.Len(t, copied.AutoGroups, 0) +} + +func TestInviteTokenConstants(t *testing.T) { + // Verify constants are consistent + expectedLength := len(InviteTokenPrefix) + InviteTokenSecretLength + InviteTokenChecksumLength + assert.Equal(t, InviteTokenLength, expectedLength) + assert.Equal(t, 4, len(InviteTokenPrefix)) + assert.Equal(t, 30, InviteTokenSecretLength) + assert.Equal(t, 6, InviteTokenChecksumLength) + assert.Equal(t, 40, InviteTokenLength) + assert.Equal(t, 259200, DefaultInviteExpirationSeconds) // 72 hours + assert.Equal(t, 3600, MinInviteExpirationSeconds) // 1 hour +} + +func TestGenerateInviteToken_ValidatesOwnOutput(t *testing.T) { + // Generate multiple tokens and ensure they all validate + for i := 0; i < 50; i++ { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + err = ValidateInviteToken(plainToken) + assert.NoError(t, err, "Generated token should always be valid") + } +} + +func TestHashInviteToken_MatchesGeneratedHash(t *testing.T) { + hashedToken, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + // HashInviteToken should produce the same hash as GenerateInviteToken + rehashedToken := HashInviteToken(plainToken) + assert.Equal(t, hashedToken, rehashedToken) +} diff --git a/management/server/user.go b/management/server/user.go index 0a090d681..51da7a633 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" "time" + "unicode" nbcontext "github.com/netbirdio/netbird/management/server/context" "github.com/netbirdio/netbird/shared/auth" @@ -1453,3 +1454,368 @@ func (am *DefaultAccountManager) RejectUser(ctx context.Context, accountID, init return nil } + +// CreateUserInvite creates an invite link for a new user in the embedded IdP. +// The user is NOT created until the invite is accepted. +func (am *DefaultAccountManager) CreateUserInvite(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + if !IsEmbeddedIdp(am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + if err := validateUserInvite(invite); err != nil { + return nil, err + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Create) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + // Check if user already exists in NetBird DB + existingUsers, err := am.Store.GetAccountUsers(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return nil, err + } + for _, user := range existingUsers { + if strings.EqualFold(user.Email, invite.Email) { + return nil, status.Errorf(status.UserAlreadyExists, "user with this email already exists") + } + } + + // Check if invite already exists for this email + existingInvite, err := am.Store.GetUserInviteByEmail(ctx, store.LockingStrengthNone, accountID, invite.Email) + if err != nil { + if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { + return nil, fmt.Errorf("failed to check existing invites: %w", err) + } + } + if existingInvite != nil { + return nil, status.Errorf(status.AlreadyExists, "invite already exists for this email") + } + + // Calculate expiration time + if expiresIn <= 0 { + expiresIn = types.DefaultInviteExpirationSeconds + } + + if expiresIn < types.MinInviteExpirationSeconds { + return nil, status.Errorf(status.InvalidArgument, "invite expiration must be at least 1 hour") + } + expiresAt := time.Now().UTC().Add(time.Duration(expiresIn) * time.Second) + + // Generate invite token + inviteID := types.NewInviteID() + hashedToken, plainToken, err := types.GenerateInviteToken() + if err != nil { + return nil, fmt.Errorf("failed to generate invite token: %w", err) + } + + // Create the invite record (no user created yet) + userInvite := &types.UserInviteRecord{ + ID: inviteID, + AccountID: accountID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: invite.AutoGroups, + HashedToken: hashedToken, + ExpiresAt: expiresAt, + CreatedAt: time.Now().UTC(), + CreatedBy: initiatorUserID, + } + + if err := am.Store.SaveUserInvite(ctx, userInvite); err != nil { + return nil, err + } + + am.StoreEvent(ctx, initiatorUserID, inviteID, accountID, activity.UserInviteLinkCreated, map[string]any{"email": invite.Email}) + + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: inviteID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: invite.AutoGroups, + Status: string(types.UserStatusInvited), + Issued: types.UserIssuedAPI, + }, + InviteToken: plainToken, + InviteExpiresAt: expiresAt, + }, nil +} + +// GetUserInviteInfo retrieves invite information from a token (public endpoint). +func (am *DefaultAccountManager) GetUserInviteInfo(ctx context.Context, token string) (*types.UserInviteInfo, error) { + if err := types.ValidateInviteToken(token); err != nil { + return nil, status.Errorf(status.InvalidArgument, "invalid invite token: %v", err) + } + + hashedToken := types.HashInviteToken(token) + invite, err := am.Store.GetUserInviteByHashedToken(ctx, store.LockingStrengthNone, hashedToken) + if err != nil { + return nil, err + } + + // Get the inviter's name + invitedBy := "" + if invite.CreatedBy != "" { + inviter, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, invite.CreatedBy) + if err == nil && inviter != nil { + invitedBy = inviter.Name + } + } + + return &types.UserInviteInfo{ + Email: invite.Email, + Name: invite.Name, + ExpiresAt: invite.ExpiresAt, + Valid: !invite.IsExpired(), + InvitedBy: invitedBy, + }, nil +} + +// ListUserInvites returns all invites for an account. +func (am *DefaultAccountManager) ListUserInvites(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + if !IsEmbeddedIdp(am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + records, err := am.Store.GetAccountUserInvites(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return nil, err + } + + invites := make([]*types.UserInvite, 0, len(records)) + for _, record := range records { + invites = append(invites, &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: record.ID, + Email: record.Email, + Name: record.Name, + Role: record.Role, + AutoGroups: record.AutoGroups, + }, + InviteExpiresAt: record.ExpiresAt, + InviteCreatedAt: record.CreatedAt, + }) + } + + return invites, nil +} + +// AcceptUserInvite accepts an invite and creates the user in both IdP and NetBird DB. +func (am *DefaultAccountManager) AcceptUserInvite(ctx context.Context, token, password string) error { + if !IsEmbeddedIdp(am.idpManager) { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + if password == "" { + return status.Errorf(status.InvalidArgument, "password is required") + } + + if err := validatePassword(password); err != nil { + return status.Errorf(status.InvalidArgument, "invalid password: %v", err) + } + + if err := types.ValidateInviteToken(token); err != nil { + return status.Errorf(status.InvalidArgument, "invalid invite token: %v", err) + } + + hashedToken := types.HashInviteToken(token) + invite, err := am.Store.GetUserInviteByHashedToken(ctx, store.LockingStrengthUpdate, hashedToken) + if err != nil { + return err + } + + if invite.IsExpired() { + return status.Errorf(status.InvalidArgument, "invite has expired") + } + + // Create user in Dex with the provided password + embeddedIdp, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return status.Errorf(status.Internal, "failed to get embedded IdP manager") + } + + idpUser, err := embeddedIdp.CreateUserWithPassword(ctx, invite.Email, password, invite.Name) + if err != nil { + return fmt.Errorf("failed to create user in IdP: %w", err) + } + + // Create user in NetBird DB + newUser := &types.User{ + Id: idpUser.ID, + AccountID: invite.AccountID, + Role: types.StrRoleToUserRole(invite.Role), + AutoGroups: invite.AutoGroups, + Issued: types.UserIssuedAPI, + CreatedAt: time.Now().UTC(), + Email: invite.Email, + Name: invite.Name, + } + + err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + if err := transaction.SaveUser(ctx, newUser); err != nil { + return fmt.Errorf("failed to save user: %w", err) + } + if err := transaction.DeleteUserInvite(ctx, invite.ID); err != nil { + return fmt.Errorf("failed to delete invite: %w", err) + } + return nil + }) + if err != nil { + // Best-effort rollback: delete the IdP user to avoid orphaned records + if deleteErr := embeddedIdp.DeleteUser(ctx, idpUser.ID); deleteErr != nil { + log.WithContext(ctx).WithError(deleteErr).Errorf("failed to rollback IdP user %s after transaction failure", idpUser.ID) + } + return err + } + + am.StoreEvent(ctx, newUser.Id, newUser.Id, invite.AccountID, activity.UserInviteLinkAccepted, map[string]any{"email": invite.Email}) + + return nil +} + +// RegenerateUserInvite creates a new invite token for an existing invite, invalidating the previous one. +func (am *DefaultAccountManager) RegenerateUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + if !IsEmbeddedIdp(am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Update) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + // Get existing invite + existingInvite, err := am.Store.GetUserInviteByID(ctx, store.LockingStrengthUpdate, accountID, inviteID) + if err != nil { + return nil, err + } + + // Calculate expiration time + if expiresIn <= 0 { + expiresIn = types.DefaultInviteExpirationSeconds + } + if expiresIn < types.MinInviteExpirationSeconds { + return nil, status.Errorf(status.InvalidArgument, "invite expiration must be at least 1 hour") + } + expiresAt := time.Now().UTC().Add(time.Duration(expiresIn) * time.Second) + + // Generate new invite token + hashedToken, plainToken, err := types.GenerateInviteToken() + if err != nil { + return nil, fmt.Errorf("failed to generate invite token: %w", err) + } + + // Update existing invite with new token and expiration + existingInvite.HashedToken = hashedToken + existingInvite.ExpiresAt = expiresAt + existingInvite.CreatedBy = initiatorUserID + + err = am.Store.SaveUserInvite(ctx, existingInvite) + if err != nil { + return nil, err + } + + am.StoreEvent(ctx, initiatorUserID, existingInvite.ID, accountID, activity.UserInviteLinkRegenerated, map[string]any{"email": existingInvite.Email}) + + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: existingInvite.ID, + Email: existingInvite.Email, + Name: existingInvite.Name, + Role: existingInvite.Role, + AutoGroups: existingInvite.AutoGroups, + Status: string(types.UserStatusInvited), + Issued: types.UserIssuedAPI, + }, + InviteToken: plainToken, + InviteExpiresAt: expiresAt, + }, nil +} + +// DeleteUserInvite deletes an existing invite by ID. +func (am *DefaultAccountManager) DeleteUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + if !IsEmbeddedIdp(am.idpManager) { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !allowed { + return status.NewPermissionDeniedError() + } + + invite, err := am.Store.GetUserInviteByID(ctx, store.LockingStrengthUpdate, accountID, inviteID) + if err != nil { + return err + } + + if err := am.Store.DeleteUserInvite(ctx, inviteID); err != nil { + return err + } + + am.StoreEvent(ctx, initiatorUserID, inviteID, accountID, activity.UserInviteLinkDeleted, map[string]any{"email": invite.Email}) + + return nil +} + +const minPasswordLength = 8 + +// validatePassword checks password strength requirements: +// - Minimum 8 characters +// - At least 1 digit +// - At least 1 uppercase letter +// - At least 1 special character +func validatePassword(password string) error { + if len(password) < minPasswordLength { + return errors.New("password must be at least 8 characters long") + } + + var hasDigit, hasUpper, hasSpecial bool + for _, c := range password { + switch { + case unicode.IsDigit(c): + hasDigit = true + case unicode.IsUpper(c): + hasUpper = true + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + hasSpecial = true + } + } + + var missing []string + if !hasDigit { + missing = append(missing, "one digit") + } + if !hasUpper { + missing = append(missing, "one uppercase letter") + } + if !hasSpecial { + missing = append(missing, "one special character") + } + + if len(missing) > 0 { + return errors.New("password must contain at least " + strings.Join(missing, ", ")) + } + + return nil +} diff --git a/management/server/user_invite_test.go b/management/server/user_invite_test.go new file mode 100644 index 000000000..6256ed44a --- /dev/null +++ b/management/server/user_invite_test.go @@ -0,0 +1,1010 @@ +package server + +import ( + "context" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" + "github.com/netbirdio/netbird/util/crypt" +) + +const ( + testAccountID = "testAccountID" + testAdminUserID = "testAdminUserID" + testRegularUserID = "testRegularUserID" +) + +// setupInviteTestManagerWithEmbeddedIdP creates a test manager with a real embedded IdP +// and store encryption enabled. This is required for tests that need to pass the IsEmbeddedIdp check. +func setupInviteTestManagerWithEmbeddedIdP(t *testing.T) (*DefaultAccountManager, func()) { + t.Helper() + ctx := context.Background() + + tmpDir := t.TempDir() + dexDataDir := tmpDir + "/dex" + require.NoError(t, os.MkdirAll(dexDataDir, 0700)) + + // Create test store + s, cleanup, err := store.NewTestStoreFromSQL(ctx, "", tmpDir) + require.NoError(t, err, "Error when creating store") + + // Enable encryption + key, err := crypt.GenerateKey() + require.NoError(t, err) + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + s.SetFieldEncrypt(fieldEncrypt) + + // Create embedded IDP config + embeddedIdPConfig := &idp.EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: idp.EmbeddedStorageConfig{ + Type: "sqlite3", + Config: idp.EmbeddedStorageTypeConfig{ + File: dexDataDir + "/dex.db", + }, + }, + } + + // Create embedded IDP manager + embeddedIdp, err := idp.NewEmbeddedIdPManager(ctx, embeddedIdPConfig, nil) + require.NoError(t, err) + + account := newAccountWithId(ctx, testAccountID, testAdminUserID, "", "admin@test.com", "Admin User", false) + account.Users[testRegularUserID] = &types.User{ + Id: testRegularUserID, + AccountID: testAccountID, + Role: types.UserRoleUser, + Email: "regular@test.com", + Name: "Regular User", + } + + err = s.SaveAccount(ctx, account) + require.NoError(t, err, "Error when saving account") + + permissionsManager := permissions.NewManager(s) + + am := DefaultAccountManager{ + Store: s, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: permissionsManager, + idpManager: embeddedIdp, + } + + cleanupFunc := func() { + _ = embeddedIdp.Stop(ctx) + cleanup() + } + + return &am, cleanupFunc +} + +func TestCreateUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, "newuser@test.com", result.UserInfo.Email) + assert.Equal(t, "New User", result.UserInfo.Name) + assert.Equal(t, "user", result.UserInfo.Role) + assert.Equal(t, string(types.UserStatusInvited), result.UserInfo.Status) + assert.NotEmpty(t, result.InviteToken) + assert.True(t, result.InviteExpiresAt.After(time.Now())) + + // Verify invite is stored in DB + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + assert.Len(t, invites, 1) + assert.Equal(t, "newuser@test.com", invites[0].Email) +} + +func TestCreateUserInvite_DuplicateEmail(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + // Create first invite + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Try to create duplicate invite + _, err = am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.AlreadyExists, sErr.Type()) +} + +func TestCreateUserInvite_ExistingUserEmail(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Try to invite with an email that already exists as a user + invite := &types.UserInfo{ + Email: "regular@test.com", // Already exists as a user + Name: "Duplicate User", + Role: "user", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.UserAlreadyExists, sErr.Type()) +} + +func TestCreateUserInvite_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + // Regular user should not be able to create invites + _, err := am.CreateUserInvite(context.Background(), testAccountID, testRegularUserID, invite, 0) + require.Error(t, err) +} + +func TestCreateUserInvite_InvalidEmail(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestCreateUserInvite_InvalidName(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "", + Role: "user", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestCreateUserInvite_OwnerRole(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newowner@test.com", + Name: "New Owner", + Role: "owner", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestCreateUserInvite_ExpirationTooShort(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + // Try to create with expiration less than 1 hour + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 1800) // 30 minutes + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) + assert.Contains(t, err.Error(), "at least 1 hour") +} + +func TestCreateUserInvite_CustomExpiration(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + expiresIn := 7200 // 2 hours + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, expiresIn) + require.NoError(t, err) + + // Verify expiration is approximately 2 hours from now + expectedExpiration := time.Now().Add(time.Duration(expiresIn) * time.Second) + assert.WithinDuration(t, expectedExpiration, result.InviteExpiresAt, time.Minute) +} + +func TestCreateUserInvite_WithAutoGroups(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{"group1", "group2"}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + assert.Equal(t, []string{"group1", "group2"}, result.UserInfo.AutoGroups) + + // Verify invite in DB has auto groups + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + require.Len(t, invites, 1) + assert.Equal(t, []string{"group1", "group2"}, invites[0].AutoGroups) +} + +func TestGetUserInviteInfo_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Get the invite info using the token + info, err := am.GetUserInviteInfo(context.Background(), result.InviteToken) + require.NoError(t, err) + require.NotNil(t, info) + + assert.Equal(t, "newuser@test.com", info.Email) + assert.Equal(t, "New User", info.Name) + assert.True(t, info.Valid) + assert.Equal(t, "Admin User", info.InvitedBy) +} + +func TestGetUserInviteInfo_InvalidToken(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + _, err := am.GetUserInviteInfo(context.Background(), "invalid_token") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestGetUserInviteInfo_TokenNotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Generate a valid token format that doesn't exist in DB + _, validToken, err := types.GenerateInviteToken() + require.NoError(t, err) + + _, err = am.GetUserInviteInfo(context.Background(), validToken) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestGetUserInviteInfo_ExpiredInvite(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite with valid expiration + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Manually set the invite to expired by updating the store directly + inviteRecord, err := am.Store.GetUserInviteByID(context.Background(), store.LockingStrengthUpdate, testAccountID, result.UserInfo.ID) + require.NoError(t, err) + inviteRecord.ExpiresAt = time.Now().Add(-time.Hour) // Set to 1 hour ago + err = am.Store.SaveUserInvite(context.Background(), inviteRecord) + require.NoError(t, err) + + // Get the invite info - should still return info but Valid should be false + info, err := am.GetUserInviteInfo(context.Background(), result.InviteToken) + require.NoError(t, err) + assert.False(t, info.Valid) +} + +func TestListUserInvites_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create multiple invites + for i, email := range []string{"user1@test.com", "user2@test.com", "user3@test.com"} { + invite := &types.UserInfo{ + Email: email, + Name: "User " + string(rune('1'+i)), + Role: "user", + AutoGroups: []string{}, + } + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + } + + // List invites + invites, err := am.ListUserInvites(context.Background(), testAccountID, testAdminUserID) + require.NoError(t, err) + assert.Len(t, invites, 3) +} + +func TestListUserInvites_Empty(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invites, err := am.ListUserInvites(context.Background(), testAccountID, testAdminUserID) + require.NoError(t, err) + assert.Len(t, invites, 0) +} + +func TestListUserInvites_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + _, err := am.ListUserInvites(context.Background(), testAccountID, testRegularUserID) + require.Error(t, err) +} + +func TestRegenerateUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + originalResult, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Regenerate the invite + newResult, err := am.RegenerateUserInvite(context.Background(), testAccountID, testAdminUserID, originalResult.UserInfo.ID, 0) + require.NoError(t, err) + require.NotNil(t, newResult) + + // Verify invite ID remains the same (stable ID for clients) + assert.Equal(t, originalResult.UserInfo.ID, newResult.UserInfo.ID) + + // Verify new token is different + assert.NotEqual(t, originalResult.InviteToken, newResult.InviteToken) + assert.Equal(t, "newuser@test.com", newResult.UserInfo.Email) + assert.Equal(t, "New User", newResult.UserInfo.Name) + + // Verify old token no longer works + _, err = am.GetUserInviteInfo(context.Background(), originalResult.InviteToken) + require.Error(t, err) + + // Verify new token works + info, err := am.GetUserInviteInfo(context.Background(), newResult.InviteToken) + require.NoError(t, err) + assert.Equal(t, "newuser@test.com", info.Email) +} + +func TestRegenerateUserInvite_NotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + _, err := am.RegenerateUserInvite(context.Background(), testAccountID, testAdminUserID, "nonexistent-id", 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestRegenerateUserInvite_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Regular user should not be able to regenerate + _, err = am.RegenerateUserInvite(context.Background(), testAccountID, testRegularUserID, result.UserInfo.ID, 0) + require.Error(t, err) +} + +func TestDeleteUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Delete the invite + err = am.DeleteUserInvite(context.Background(), testAccountID, testAdminUserID, result.UserInfo.ID) + require.NoError(t, err) + + // Verify invite is deleted + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + assert.Len(t, invites, 0) + + // Verify token no longer works + _, err = am.GetUserInviteInfo(context.Background(), result.InviteToken) + require.Error(t, err) +} + +func TestDeleteUserInvite_NotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + err := am.DeleteUserInvite(context.Background(), testAccountID, testAdminUserID, "nonexistent-id") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestDeleteUserInvite_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Regular user should not be able to delete + err = am.DeleteUserInvite(context.Background(), testAccountID, testRegularUserID, result.UserInfo.ID) + require.Error(t, err) +} + +func TestDeleteUserInvite_WrongAccount(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Create another account + anotherAccountID := "anotherAccountID" + anotherAdminID := "anotherAdminID" + anotherAccount := newAccountWithId(context.Background(), anotherAccountID, anotherAdminID, "", "otheradmin@test.com", "Other Admin", false) + err = am.Store.SaveAccount(context.Background(), anotherAccount) + require.NoError(t, err) + + // Try to delete from wrong account + err = am.DeleteUserInvite(context.Background(), anotherAccountID, anotherAdminID, result.UserInfo.ID) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestAcceptUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Accept the invite with a valid password + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "Password1!") + require.NoError(t, err) + + // Verify user is created in DB + users, err := am.Store.GetAccountUsers(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + + var foundUser *types.User + for _, u := range users { + if u.Email == "newuser@test.com" { + foundUser = u + break + } + } + require.NotNil(t, foundUser, "User should be created in DB") + assert.Equal(t, "New User", foundUser.Name) + assert.Equal(t, types.UserRoleUser, foundUser.Role) + + // Verify invite is deleted + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + assert.Len(t, invites, 0) +} + +func TestAcceptUserInvite_InvalidToken(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + err := am.AcceptUserInvite(context.Background(), "invalid_token", "Password1!") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestAcceptUserInvite_TokenNotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Generate a valid token format that doesn't exist in DB + _, validToken, err := types.GenerateInviteToken() + require.NoError(t, err) + + err = am.AcceptUserInvite(context.Background(), validToken, "Password1!") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestAcceptUserInvite_ExpiredToken(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite with valid expiration + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Manually set the invite to expired by updating the store directly + inviteRecord, err := am.Store.GetUserInviteByID(context.Background(), store.LockingStrengthUpdate, testAccountID, result.UserInfo.ID) + require.NoError(t, err) + inviteRecord.ExpiresAt = time.Now().Add(-time.Hour) // Set to 1 hour ago + err = am.Store.SaveUserInvite(context.Background(), inviteRecord) + require.NoError(t, err) + + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "Password1!") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) + assert.Contains(t, err.Error(), "expired") +} + +func TestAcceptUserInvite_EmptyPassword(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) + assert.Contains(t, err.Error(), "password is required") +} + +func TestAcceptUserInvite_WeakPassword(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + testCases := []struct { + name string + password string + expectedMsg string + }{ + {"too short", "Pass1!", "at least 8 characters"}, + {"no digit", "Password!", "one digit"}, + {"no uppercase", "password1!", "one uppercase"}, + {"no special", "Password1", "one special character"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := am.AcceptUserInvite(context.Background(), result.InviteToken, tc.password) + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedMsg) + }) + } +} + +func TestValidatePassword(t *testing.T) { + testCases := []struct { + name string + password string + expectError bool + errorMsg string + }{ + {"valid password", "Password1!", false, ""}, + {"valid complex password", "MyP@ssw0rd#2024", false, ""}, + {"too short", "Pass1!", true, "at least 8 characters"}, + {"no digit", "Password!", true, "one digit"}, + {"no uppercase", "password1!", true, "one uppercase"}, + {"no special", "Password1", true, "one special character"}, + {"only lowercase", "password", true, "one digit"}, + {"no uppercase no special", "password1", true, "one uppercase"}, + {"all lowercase short", "pass", true, "at least 8 characters"}, + {"empty", "", true, "at least 8 characters"}, + {"spaces count as special", "Pass word1", false, ""}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := validatePassword(tc.password) + if tc.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestInviteToken_GenerateAndValidate(t *testing.T) { + hashedToken, plainToken, err := types.GenerateInviteToken() + require.NoError(t, err) + require.NotEmpty(t, hashedToken) + require.NotEmpty(t, plainToken) + + // Validate token format + assert.Len(t, plainToken, types.InviteTokenLength) + assert.True(t, len(plainToken) > len(types.InviteTokenPrefix)) + assert.Equal(t, types.InviteTokenPrefix, plainToken[:len(types.InviteTokenPrefix)]) + + // Validate checksum + err = types.ValidateInviteToken(plainToken) + require.NoError(t, err) + + // Verify hashing is consistent + hashedAgain := types.HashInviteToken(plainToken) + assert.Equal(t, hashedToken, hashedAgain) +} + +func TestInviteToken_ValidateInvalid(t *testing.T) { + testCases := []struct { + name string + token string + }{ + {"empty", ""}, + {"too short", "nbi_abc"}, + {"wrong prefix", "xyz_123456789012345678901234567890"}, + {"invalid checksum", "nbi_123456789012345678901234567890abcdef"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := types.ValidateInviteToken(tc.token) + require.Error(t, err) + }) + } +} + +func TestUserInviteRecord_IsExpired(t *testing.T) { + // Not expired + invite := &types.UserInviteRecord{ + ExpiresAt: time.Now().Add(time.Hour), + } + assert.False(t, invite.IsExpired()) + + // Expired + invite = &types.UserInviteRecord{ + ExpiresAt: time.Now().Add(-time.Hour), + } + assert.True(t, invite.IsExpired()) +} + +func TestUserInviteRecord_Copy(t *testing.T) { + original := &types.UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group1", "group2"}, + HashedToken: "hashed-token", + ExpiresAt: time.Now().Add(time.Hour), + CreatedAt: time.Now(), + CreatedBy: "creator-id", + } + + copied := original.Copy() + + assert.Equal(t, original.ID, copied.ID) + assert.Equal(t, original.AccountID, copied.AccountID) + assert.Equal(t, original.Email, copied.Email) + assert.Equal(t, original.Name, copied.Name) + assert.Equal(t, original.Role, copied.Role) + assert.Equal(t, original.AutoGroups, copied.AutoGroups) + assert.Equal(t, original.HashedToken, copied.HashedToken) + assert.Equal(t, original.ExpiresAt, copied.ExpiresAt) + assert.Equal(t, original.CreatedAt, copied.CreatedAt) + assert.Equal(t, original.CreatedBy, copied.CreatedBy) + + // Verify deep copy of AutoGroups + copied.AutoGroups[0] = "modified" + assert.NotEqual(t, original.AutoGroups[0], copied.AutoGroups[0]) +} + +func TestCreateUserInvite_NonEmbeddedIdP(t *testing.T) { + s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir()) + require.NoError(t, err) + defer cleanup() + + account := newAccountWithId(context.Background(), testAccountID, testAdminUserID, "", "admin@test.com", "Admin User", false) + err = s.SaveAccount(context.Background(), account) + require.NoError(t, err) + + permissionsManager := permissions.NewManager(s) + + // Use nil IDP manager (non-embedded) + am := DefaultAccountManager{ + Store: s, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: permissionsManager, + idpManager: nil, + } + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + _, err = am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.PreconditionFailed, sErr.Type()) + assert.Contains(t, err.Error(), "embedded identity provider") +} + +func TestAcceptUserInvite_WithAutoGroups(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite with auto groups + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "admin", + AutoGroups: []string{"group1", "group2"}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Accept the invite + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "Password1!") + require.NoError(t, err) + + // Verify user has the auto groups and role + users, err := am.Store.GetAccountUsers(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + + var foundUser *types.User + for _, u := range users { + if u.Email == "newuser@test.com" { + foundUser = u + break + } + } + require.NotNil(t, foundUser) + assert.Equal(t, types.UserRoleAdmin, foundUser.Role) + assert.Equal(t, []string{"group1", "group2"}, foundUser.AutoGroups) +} + +func TestUserInvite_EncryptDecryptSensitiveData(t *testing.T) { + key, err := crypt.GenerateKey() + require.NoError(t, err) + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + + t.Run("encrypt and decrypt", func(t *testing.T) { + invite := &types.UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + // Encrypt + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify encrypted values are different from original + assert.NotEqual(t, "test@example.com", invite.Email) + assert.NotEqual(t, "Test User", invite.Name) + + // Decrypt + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify decrypted values match original + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) + + t.Run("encrypt empty fields", func(t *testing.T) { + invite := &types.UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "", + Name: "", + Role: "user", + } + + // Encrypt empty fields + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Empty strings should remain empty + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + + // Decrypt empty fields + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Should still be empty + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + }) + + t.Run("nil encryptor", func(t *testing.T) { + invite := &types.UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + // Encrypt with nil encryptor should be no-op + err := invite.EncryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + + // Decrypt with nil encryptor should be no-op + err = invite.DecryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index f1ff98b16..26d2387d1 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -488,6 +488,171 @@ components: - role - auto_groups - is_service_user + UserInviteCreateRequest: + type: object + description: Request to create a user invite link + properties: + email: + description: User's email address + type: string + example: user@example.com + name: + description: User's full name + type: string + example: John Doe + role: + description: User's NetBird account role + type: string + example: user + auto_groups: + description: Group IDs to auto-assign to peers registered by this user + type: array + items: + type: string + example: ch8i4ug6lnn4g9hqv7m0 + expires_in: + description: Invite expiration time in seconds (default 72 hours) + type: integer + example: 259200 + required: + - email + - name + - role + - auto_groups + UserInvite: + type: object + description: A user invite + properties: + id: + description: Invite ID + type: string + example: d5p7eedra0h0lt6f59hg + email: + description: User's email address + type: string + example: user@example.com + name: + description: User's full name + type: string + example: John Doe + role: + description: User's NetBird account role + type: string + example: user + auto_groups: + description: Group IDs to auto-assign to peers registered by this user + type: array + items: + type: string + example: ch8i4ug6lnn4g9hqv7m0 + expires_at: + description: Invite expiration time + type: string + format: date-time + example: "2024-01-25T10:00:00Z" + created_at: + description: Invite creation time + type: string + format: date-time + example: "2024-01-22T10:00:00Z" + expired: + description: Whether the invite has expired + type: boolean + example: false + invite_token: + description: The invite link to be shared with the user. Only returned when the invite is created or regenerated. + type: string + example: nbi_Xk5Lz9mP2vQwRtYu1aN3bC4dE5fGh0ABC123 + required: + - id + - email + - name + - role + - auto_groups + - expires_at + - created_at + - expired + UserInviteInfo: + type: object + description: Public information about an invite + properties: + email: + description: User's email address + type: string + example: user@example.com + name: + description: User's full name + type: string + example: John Doe + expires_at: + description: Invite expiration time + type: string + format: date-time + example: "2024-01-25T10:00:00Z" + valid: + description: Whether the invite is still valid (not expired) + type: boolean + example: true + invited_by: + description: Name of the user who sent the invite + type: string + example: Admin User + required: + - email + - name + - expires_at + - valid + - invited_by + UserInviteAcceptRequest: + type: object + description: Request to accept an invite and set password + properties: + password: + description: >- + The password the user wants to set. Must be at least 8 characters long + and contain at least one uppercase letter, one digit, and one special + character (any character that is not a letter or digit, including spaces). + type: string + format: password + minLength: 8 + pattern: '^(?=.*[0-9])(?=.*[A-Z])(?=.*[^a-zA-Z0-9]).{8,}$' + example: SecurePass123! + required: + - password + UserInviteAcceptResponse: + type: object + description: Response after accepting an invite + properties: + success: + description: Whether the invite was accepted successfully + type: boolean + example: true + required: + - success + UserInviteRegenerateRequest: + type: object + description: Request to regenerate an invite link + properties: + expires_in: + description: Invite expiration time in seconds (default 72 hours) + type: integer + example: 259200 + UserInviteRegenerateResponse: + type: object + description: Response after regenerating an invite + properties: + invite_token: + description: The new invite token + type: string + example: nbi_Xk5Lz9mP2vQwRtYu1aN3bC4dE5fGh0ABC123 + invite_expires_at: + description: New invite expiration time + type: string + format: date-time + example: "2024-01-28T10:00:00Z" + required: + - invite_token + - invite_expires_at PeerMinimum: type: object properties: @@ -2071,7 +2236,8 @@ components: "dns.zone.create", "dns.zone.update", "dns.zone.delete", "dns.zone.record.create", "dns.zone.record.update", "dns.zone.record.delete", "peer.job.create", - "user.password.change" + "user.password.change", + "user.invite.link.create", "user.invite.link.accept", "user.invite.link.regenerate", "user.invite.link.delete" ] example: route.add initiator_id: @@ -2642,6 +2808,29 @@ components: required: - user_id - email + InstanceVersionInfo: + type: object + description: Version information for NetBird components + properties: + management_current_version: + description: The current running version of the management server + type: string + example: "0.35.0" + dashboard_available_version: + description: The latest available version of the dashboard (from GitHub releases) + type: string + example: "2.10.0" + management_available_version: + description: The latest available version of the management server (from GitHub releases) + type: string + example: "0.35.0" + management_update_available: + description: Indicates if a newer management version is available + type: boolean + example: true + required: + - management_current_version + - management_update_available responses: not_found: description: Resource not found @@ -2694,6 +2883,27 @@ paths: $ref: '#/components/schemas/InstanceStatus' '500': "$ref": "#/components/responses/internal_error" + /api/instance/version: + get: + summary: Get Version Info + description: Returns version information for NetBird components including the current management server version and latest available versions from GitHub. + tags: [ Instance ] + security: + - BearerAuth: [] + - TokenAuth: [] + responses: + '200': + description: Version information + content: + application/json: + schema: + $ref: '#/components/schemas/InstanceVersionInfo' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" /api/setup: post: summary: Setup Instance @@ -3312,6 +3522,210 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/users/invites: + get: + summary: List user invites + description: Lists all pending invites for the account. Only available when embedded IdP is enabled. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: List of invites + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UserInvite' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '500': + "$ref": "#/components/responses/internal_error" + post: + summary: Create a user invite + description: Creates an invite link for a new user. Only available when embedded IdP is enabled. The user is not created until they accept the invite. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + requestBody: + description: User invite information + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteCreateRequest' + responses: + '200': + description: Invite created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/UserInvite' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '409': + description: User or invite already exists + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '422': + "$ref": "#/components/responses/validation_failed" + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{inviteId}: + delete: + summary: Delete a user invite + description: Deletes a pending invite. Only available when embedded IdP is enabled. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: inviteId + required: true + schema: + type: string + description: The ID of the invite to delete + responses: + '200': + description: Invite deleted successfully + content: { } + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + description: Invite not found + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{inviteId}/regenerate: + post: + summary: Regenerate a user invite + description: Regenerates an invite link for an existing invite. Invalidates the previous token and creates a new one. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: inviteId + required: true + schema: + type: string + description: The ID of the invite to regenerate + requestBody: + description: Regenerate options + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteRegenerateRequest' + responses: + '200': + description: Invite regenerated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteRegenerateResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + description: Invite not found + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '422': + "$ref": "#/components/responses/validation_failed" + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{token}: + get: + summary: Get invite information + description: Retrieves public information about an invite. This endpoint is unauthenticated and protected by the token itself. + tags: [ Users ] + security: [] + parameters: + - in: path + name: token + required: true + schema: + type: string + description: The invite token + responses: + '200': + description: Invite information + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteInfo' + '400': + "$ref": "#/components/responses/bad_request" + '404': + description: Invite not found or invalid token + content: { } + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{token}/accept: + post: + summary: Accept an invite + description: Accepts an invite and creates the user with the provided password. This endpoint is unauthenticated and protected by the token itself. + tags: [ Users ] + security: [] + parameters: + - in: path + name: token + required: true + schema: + type: string + description: The invite token + requestBody: + description: Password to set for the new user + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteAcceptRequest' + responses: + '200': + description: Invite accepted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteAcceptResponse' + '400': + "$ref": "#/components/responses/bad_request" + '404': + description: Invite not found or invalid token + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled or invite expired + content: { } + '422': + "$ref": "#/components/responses/validation_failed" + '500': + "$ref": "#/components/responses/internal_error" /api/peers: get: summary: List all Peers diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 848023689..e8c044b32 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -123,6 +123,10 @@ const ( EventActivityCodeUserGroupAdd EventActivityCode = "user.group.add" EventActivityCodeUserGroupDelete EventActivityCode = "user.group.delete" EventActivityCodeUserInvite EventActivityCode = "user.invite" + EventActivityCodeUserInviteLinkAccept EventActivityCode = "user.invite.link.accept" + EventActivityCodeUserInviteLinkCreate EventActivityCode = "user.invite.link.create" + EventActivityCodeUserInviteLinkDelete EventActivityCode = "user.invite.link.delete" + EventActivityCodeUserInviteLinkRegenerate EventActivityCode = "user.invite.link.regenerate" EventActivityCodeUserJoin EventActivityCode = "user.join" EventActivityCodeUserPasswordChange EventActivityCode = "user.password.change" EventActivityCodeUserPeerDelete EventActivityCode = "user.peer.delete" @@ -870,6 +874,21 @@ type InstanceStatus struct { SetupRequired bool `json:"setup_required"` } +// InstanceVersionInfo Version information for NetBird components +type InstanceVersionInfo struct { + // DashboardAvailableVersion The latest available version of the dashboard (from GitHub releases) + DashboardAvailableVersion *string `json:"dashboard_available_version,omitempty"` + + // ManagementAvailableVersion The latest available version of the management server (from GitHub releases) + ManagementAvailableVersion *string `json:"management_available_version,omitempty"` + + // ManagementCurrentVersion The current running version of the management server + ManagementCurrentVersion string `json:"management_current_version"` + + // ManagementUpdateAvailable Indicates if a newer management version is available + ManagementUpdateAvailable bool `json:"management_update_available"` +} + // JobRequest defines model for JobRequest. type JobRequest struct { Workload WorkloadRequest `json:"workload"` @@ -2166,6 +2185,99 @@ type UserCreateRequest struct { Role string `json:"role"` } +// UserInvite A user invite +type UserInvite struct { + // AutoGroups Group IDs to auto-assign to peers registered by this user + AutoGroups []string `json:"auto_groups"` + + // CreatedAt Invite creation time + CreatedAt time.Time `json:"created_at"` + + // Email User's email address + Email string `json:"email"` + + // Expired Whether the invite has expired + Expired bool `json:"expired"` + + // ExpiresAt Invite expiration time + ExpiresAt time.Time `json:"expires_at"` + + // Id Invite ID + Id string `json:"id"` + + // InviteToken The invite link to be shared with the user. Only returned when the invite is created or regenerated. + InviteToken *string `json:"invite_token,omitempty"` + + // Name User's full name + Name string `json:"name"` + + // Role User's NetBird account role + Role string `json:"role"` +} + +// UserInviteAcceptRequest Request to accept an invite and set password +type UserInviteAcceptRequest struct { + // Password The password the user wants to set. Must be at least 8 characters long and contain at least one uppercase letter, one digit, and one special character (any character that is not a letter or digit, including spaces). + Password string `json:"password"` +} + +// UserInviteAcceptResponse Response after accepting an invite +type UserInviteAcceptResponse struct { + // Success Whether the invite was accepted successfully + Success bool `json:"success"` +} + +// UserInviteCreateRequest Request to create a user invite link +type UserInviteCreateRequest struct { + // AutoGroups Group IDs to auto-assign to peers registered by this user + AutoGroups []string `json:"auto_groups"` + + // Email User's email address + Email string `json:"email"` + + // ExpiresIn Invite expiration time in seconds (default 72 hours) + ExpiresIn *int `json:"expires_in,omitempty"` + + // Name User's full name + Name string `json:"name"` + + // Role User's NetBird account role + Role string `json:"role"` +} + +// UserInviteInfo Public information about an invite +type UserInviteInfo struct { + // Email User's email address + Email string `json:"email"` + + // ExpiresAt Invite expiration time + ExpiresAt time.Time `json:"expires_at"` + + // InvitedBy Name of the user who sent the invite + InvitedBy string `json:"invited_by"` + + // Name User's full name + Name string `json:"name"` + + // Valid Whether the invite is still valid (not expired) + Valid bool `json:"valid"` +} + +// UserInviteRegenerateRequest Request to regenerate an invite link +type UserInviteRegenerateRequest struct { + // ExpiresIn Invite expiration time in seconds (default 72 hours) + ExpiresIn *int `json:"expires_in,omitempty"` +} + +// UserInviteRegenerateResponse Response after regenerating an invite +type UserInviteRegenerateResponse struct { + // InviteExpiresAt New invite expiration time + InviteExpiresAt time.Time `json:"invite_expires_at"` + + // InviteToken The new invite token + InviteToken string `json:"invite_token"` +} + // UserPermissions defines model for UserPermissions. type UserPermissions struct { // IsRestricted Indicates whether this User's Peers view is restricted @@ -2418,6 +2530,15 @@ type PutApiSetupKeysKeyIdJSONRequestBody = SetupKeyRequest // PostApiUsersJSONRequestBody defines body for PostApiUsers for application/json ContentType. type PostApiUsersJSONRequestBody = UserCreateRequest +// PostApiUsersInvitesJSONRequestBody defines body for PostApiUsersInvites for application/json ContentType. +type PostApiUsersInvitesJSONRequestBody = UserInviteCreateRequest + +// PostApiUsersInvitesInviteIdRegenerateJSONRequestBody defines body for PostApiUsersInvitesInviteIdRegenerate for application/json ContentType. +type PostApiUsersInvitesInviteIdRegenerateJSONRequestBody = UserInviteRegenerateRequest + +// PostApiUsersInvitesTokenAcceptJSONRequestBody defines body for PostApiUsersInvitesTokenAccept for application/json ContentType. +type PostApiUsersInvitesTokenAcceptJSONRequestBody = UserInviteAcceptRequest + // PutApiUsersUserIdJSONRequestBody defines body for PutApiUsersUserId for application/json ContentType. type PutApiUsersUserIdJSONRequestBody = UserRequest From 5299549eb66575dba25efc7783ea635ec19ac050 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 27 Jan 2026 09:52:55 +0100 Subject: [PATCH 085/374] [client] Update fyne and add exit menu retry (#5187) * Update fyne and add exit menu retry - Fix an extra arrow on macos by updating fyne/systray * use systray.TrayOpenedCh instead of loop and retry --- client/ui/event_handler.go | 2 ++ client/ui/network.go | 1 - go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index cc55c31dd..2216c8aeb 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -63,6 +63,8 @@ func (h *eventHandler) listen(ctx context.Context) { h.handleNetworksClick() case <-h.client.mNotifications.ClickedCh: h.handleNotificationsClick() + case <-systray.TrayOpenedCh: + h.client.updateExitNodes() } } } diff --git a/client/ui/network.go b/client/ui/network.go index 371eb975b..9a5ad7662 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -341,7 +341,6 @@ func (s *serviceClient) updateExitNodes() { log.Errorf("get client: %v", err) return } - exitNodes, err := s.getExitNodes(conn) if err != nil { log.Errorf("get exit nodes: %v", err) diff --git a/go.mod b/go.mod index 8ac5613ee..2a6c311ce 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( require ( fyne.io/fyne/v2 v2.7.0 - fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58 + fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible github.com/awnumar/memguard v0.23.0 github.com/aws/aws-sdk-go-v2 v1.36.3 diff --git a/go.sum b/go.sum index 6adc7f7e8..17e5c8ffa 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= fyne.io/fyne/v2 v2.7.0 h1:GvZSpE3X0liU/fqstInVvRsaboIVpIWQ4/sfjDGIGGQ= fyne.io/fyne/v2 v2.7.0/go.mod h1:xClVlrhxl7D+LT+BWYmcrW4Nf+dJTvkhnPgji7spAwE= -fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58 h1:eA5/u2XRd8OUkoMqEv3IBlFYSruNlXD8bRHDiqm0VNI= -fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= +fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 h1:829+77I4TaMrcg9B3wf+gHhdSgoCVEgH2czlPXPbfj4= +fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AppsFlyer/go-sundheit v0.6.0 h1:d2hBvCjBSb2lUsEWGfPigr4MCOt04sxB+Rppl0yUMSk= From d4f7df271aa1ddd330e872aab70a7f0451e31405 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 27 Jan 2026 18:04:23 +0800 Subject: [PATCH 086/374] [cllient] Don't track ebpf traffic in conntrack (#5166) --- client/firewall/iptables/manager_linux.go | 127 ++++++++++++++ client/firewall/manager/firewall.go | 4 + client/firewall/nftables/manager_linux.go | 191 +++++++++++++++++++++- client/firewall/uspfilter/filter.go | 8 + client/iface/iface.go | 7 + client/iface/wgproxy/ebpf/proxy.go | 15 +- client/iface/wgproxy/factory_kernel.go | 8 + client/iface/wgproxy/factory_usp.go | 5 + client/internal/engine.go | 22 +++ client/internal/engine_test.go | 8 + client/internal/iface_common.go | 1 + 11 files changed, 389 insertions(+), 7 deletions(-) diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index 2563a9052..716385705 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -83,6 +83,10 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { return fmt.Errorf("acl manager init: %w", err) } + if err := m.initNoTrackChain(); err != nil { + return fmt.Errorf("init notrack chain: %w", err) + } + // persist early to ensure cleanup of chains go func() { if err := stateManager.PersistState(context.Background()); err != nil { @@ -177,6 +181,10 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error { var merr *multierror.Error + if err := m.cleanupNoTrackChain(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("cleanup notrack chain: %w", err)) + } + if err := m.aclMgr.Reset(); err != nil { merr = multierror.Append(merr, fmt.Errorf("reset acl manager: %w", err)) } @@ -277,6 +285,125 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +const ( + chainNameRaw = "NETBIRD-RAW" + chainOUTPUT = "OUTPUT" + tableRaw = "raw" +) + +// SetupEBPFProxyNoTrack creates notrack rules for eBPF proxy loopback traffic. +// This prevents conntrack from tracking WireGuard proxy traffic on loopback, which +// can interfere with MASQUERADE rules (e.g., from container runtimes like Podman/netavark). +// +// Traffic flows that need NOTRACK: +// +// 1. Egress: WireGuard -> fake endpoint (before eBPF rewrite) +// src=127.0.0.1:wgPort -> dst=127.0.0.1:fakePort +// Matched by: sport=wgPort +// +// 2. Egress: Proxy -> WireGuard (via raw socket) +// src=127.0.0.1:fakePort -> dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 3. Ingress: Packets to WireGuard +// dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 4. Ingress: Packets to proxy (after eBPF rewrite) +// dst=127.0.0.1:proxyPort +// Matched by: dport=proxyPort +// +// Rules are cleaned up when the firewall manager is closed. +func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + wgPortStr := fmt.Sprintf("%d", wgPort) + proxyPortStr := fmt.Sprintf("%d", proxyPort) + + // Egress rules: match outgoing loopback UDP packets + outputRuleSport := []string{"-o", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--sport", wgPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, outputRuleSport...); err != nil { + return fmt.Errorf("add output sport notrack rule: %w", err) + } + + outputRuleDport := []string{"-o", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--dport", wgPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, outputRuleDport...); err != nil { + return fmt.Errorf("add output dport notrack rule: %w", err) + } + + // Ingress rules: match incoming loopback UDP packets + preroutingRuleWg := []string{"-i", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--dport", wgPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, preroutingRuleWg...); err != nil { + return fmt.Errorf("add prerouting wg notrack rule: %w", err) + } + + preroutingRuleProxy := []string{"-i", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--dport", proxyPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, preroutingRuleProxy...); err != nil { + return fmt.Errorf("add prerouting proxy notrack rule: %w", err) + } + + log.Debugf("set up ebpf proxy notrack rules for ports %d,%d", proxyPort, wgPort) + return nil +} + +func (m *Manager) initNoTrackChain() error { + if err := m.cleanupNoTrackChain(); err != nil { + log.Debugf("cleanup notrack chain: %v", err) + } + + if err := m.ipv4Client.NewChain(tableRaw, chainNameRaw); err != nil { + return fmt.Errorf("create chain: %w", err) + } + + jumpRule := []string{"-j", chainNameRaw} + + if err := m.ipv4Client.InsertUnique(tableRaw, chainOUTPUT, 1, jumpRule...); err != nil { + if delErr := m.ipv4Client.DeleteChain(tableRaw, chainNameRaw); delErr != nil { + log.Debugf("delete orphan chain: %v", delErr) + } + return fmt.Errorf("add output jump rule: %w", err) + } + + if err := m.ipv4Client.InsertUnique(tableRaw, chainPREROUTING, 1, jumpRule...); err != nil { + if delErr := m.ipv4Client.DeleteIfExists(tableRaw, chainOUTPUT, jumpRule...); delErr != nil { + log.Debugf("delete output jump rule: %v", delErr) + } + if delErr := m.ipv4Client.DeleteChain(tableRaw, chainNameRaw); delErr != nil { + log.Debugf("delete orphan chain: %v", delErr) + } + return fmt.Errorf("add prerouting jump rule: %w", err) + } + + return nil +} + +func (m *Manager) cleanupNoTrackChain() error { + exists, err := m.ipv4Client.ChainExists(tableRaw, chainNameRaw) + if err != nil { + return fmt.Errorf("check chain exists: %w", err) + } + if !exists { + return nil + } + + jumpRule := []string{"-j", chainNameRaw} + + if err := m.ipv4Client.DeleteIfExists(tableRaw, chainOUTPUT, jumpRule...); err != nil { + return fmt.Errorf("remove output jump rule: %w", err) + } + + if err := m.ipv4Client.DeleteIfExists(tableRaw, chainPREROUTING, jumpRule...); err != nil { + return fmt.Errorf("remove prerouting jump rule: %w", err) + } + + if err := m.ipv4Client.ClearAndDeleteChain(tableRaw, chainNameRaw); err != nil { + return fmt.Errorf("clear and delete chain: %w", err) + } + + return nil +} + func getConntrackEstablished() []string { return []string{"-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} } diff --git a/client/firewall/manager/firewall.go b/client/firewall/manager/firewall.go index 72e6a5c68..3511a5463 100644 --- a/client/firewall/manager/firewall.go +++ b/client/firewall/manager/firewall.go @@ -168,6 +168,10 @@ type Manager interface { // RemoveInboundDNAT removes inbound DNAT rule RemoveInboundDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + + // SetupEBPFProxyNoTrack creates static notrack rules for eBPF proxy loopback traffic. + // This prevents conntrack from interfering with WireGuard proxy communication. + SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error } func GenKey(format string, pair RouterPair) string { diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index bd19f1067..acf482f86 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -12,6 +12,7 @@ import ( "github.com/google/nftables/binaryutil" "github.com/google/nftables/expr" log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface/wgaddr" @@ -48,8 +49,10 @@ type Manager struct { rConn *nftables.Conn wgIface iFaceMapper - router *router - aclManager *AclManager + router *router + aclManager *AclManager + notrackOutputChain *nftables.Chain + notrackPreroutingChain *nftables.Chain } // Create nftables firewall manager @@ -91,6 +94,10 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { return fmt.Errorf("acl manager init: %w", err) } + if err := m.initNoTrackChains(workTable); err != nil { + return fmt.Errorf("init notrack chains: %w", err) + } + stateManager.RegisterState(&ShutdownState{}) // We only need to record minimal interface state for potential recreation. @@ -288,7 +295,15 @@ func (m *Manager) Flush() error { m.mutex.Lock() defer m.mutex.Unlock() - return m.aclManager.Flush() + if err := m.aclManager.Flush(); err != nil { + return err + } + + if err := m.refreshNoTrackChains(); err != nil { + log.Errorf("failed to refresh notrack chains: %v", err) + } + + return nil } // AddDNATRule adds a DNAT rule @@ -331,6 +346,176 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +const ( + chainNameRawOutput = "netbird-raw-out" + chainNameRawPrerouting = "netbird-raw-pre" +) + +// SetupEBPFProxyNoTrack creates notrack rules for eBPF proxy loopback traffic. +// This prevents conntrack from tracking WireGuard proxy traffic on loopback, which +// can interfere with MASQUERADE rules (e.g., from container runtimes like Podman/netavark). +// +// Traffic flows that need NOTRACK: +// +// 1. Egress: WireGuard -> fake endpoint (before eBPF rewrite) +// src=127.0.0.1:wgPort -> dst=127.0.0.1:fakePort +// Matched by: sport=wgPort +// +// 2. Egress: Proxy -> WireGuard (via raw socket) +// src=127.0.0.1:fakePort -> dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 3. Ingress: Packets to WireGuard +// dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 4. Ingress: Packets to proxy (after eBPF rewrite) +// dst=127.0.0.1:proxyPort +// Matched by: dport=proxyPort +// +// Rules are cleaned up when the firewall manager is closed. +func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.notrackOutputChain == nil || m.notrackPreroutingChain == nil { + return fmt.Errorf("notrack chains not initialized") + } + + proxyPortBytes := binaryutil.BigEndian.PutUint16(proxyPort) + wgPortBytes := binaryutil.BigEndian.PutUint16(wgPort) + loopback := []byte{127, 0, 0, 1} + + // Egress rules: match outgoing loopback UDP packets + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackOutputChain.Table, + Chain: m.notrackOutputChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 0, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: wgPortBytes}, // sport=wgPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackOutputChain.Table, + Chain: m.notrackOutputChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 2, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: wgPortBytes}, // dport=wgPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + + // Ingress rules: match incoming loopback UDP packets + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackPreroutingChain.Table, + Chain: m.notrackPreroutingChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 2, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: wgPortBytes}, // dport=wgPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackPreroutingChain.Table, + Chain: m.notrackPreroutingChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 2, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: proxyPortBytes}, // dport=proxyPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + + if err := m.rConn.Flush(); err != nil { + return fmt.Errorf("flush notrack rules: %w", err) + } + + log.Debugf("set up ebpf proxy notrack rules for ports %d,%d", proxyPort, wgPort) + return nil +} + +func (m *Manager) initNoTrackChains(table *nftables.Table) error { + m.notrackOutputChain = m.rConn.AddChain(&nftables.Chain{ + Name: chainNameRawOutput, + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityRaw, + }) + + m.notrackPreroutingChain = m.rConn.AddChain(&nftables.Chain{ + Name: chainNameRawPrerouting, + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookPrerouting, + Priority: nftables.ChainPriorityRaw, + }) + + if err := m.rConn.Flush(); err != nil { + return fmt.Errorf("flush chain creation: %w", err) + } + + return nil +} + +func (m *Manager) refreshNoTrackChains() error { + chains, err := m.rConn.ListChainsOfTableFamily(nftables.TableFamilyIPv4) + if err != nil { + return fmt.Errorf("list chains: %w", err) + } + + tableName := getTableName() + for _, c := range chains { + if c.Table.Name != tableName { + continue + } + switch c.Name { + case chainNameRawOutput: + m.notrackOutputChain = c + case chainNameRawPrerouting: + m.notrackPreroutingChain = c + } + } + + return nil +} + func (m *Manager) createWorkTable() (*nftables.Table, error) { tables, err := m.rConn.ListTablesOfFamily(nftables.TableFamilyIPv4) if err != nil { diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index 8caa1a0ad..aacc4ca1c 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -570,6 +570,14 @@ func (m *Manager) SetLegacyManagement(isLegacy bool) error { // Flush doesn't need to be implemented for this manager func (m *Manager) Flush() error { return nil } +// SetupEBPFProxyNoTrack creates notrack rules for eBPF proxy loopback traffic. +func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { + if m.nativeFirewall == nil { + return nil + } + return m.nativeFirewall.SetupEBPFProxyNoTrack(proxyPort, wgPort) +} + // UpdateSet updates the rule destinations associated with the given set // by merging the existing prefixes with the new ones, then deduplicating. func (m *Manager) UpdateSet(set firewall.Set, prefixes []netip.Prefix) error { diff --git a/client/iface/iface.go b/client/iface/iface.go index 71fd433ad..e5623c979 100644 --- a/client/iface/iface.go +++ b/client/iface/iface.go @@ -50,6 +50,7 @@ func ValidateMTU(mtu uint16) error { type wgProxyFactory interface { GetProxy() wgproxy.Proxy + GetProxyPort() uint16 Free() error } @@ -80,6 +81,12 @@ func (w *WGIface) GetProxy() wgproxy.Proxy { return w.wgProxyFactory.GetProxy() } +// GetProxyPort returns the proxy port used by the WireGuard proxy. +// Returns 0 if no proxy port is used (e.g., for userspace WireGuard). +func (w *WGIface) GetProxyPort() uint16 { + return w.wgProxyFactory.GetProxyPort() +} + // GetBind returns the EndpointManager userspace bind mode. func (w *WGIface) GetBind() device.EndpointManager { w.mu.Lock() diff --git a/client/iface/wgproxy/ebpf/proxy.go b/client/iface/wgproxy/ebpf/proxy.go index 0c1c886d7..5458519fa 100644 --- a/client/iface/wgproxy/ebpf/proxy.go +++ b/client/iface/wgproxy/ebpf/proxy.go @@ -39,6 +39,7 @@ var ( // WGEBPFProxy definition for proxy with EBPF support type WGEBPFProxy struct { localWGListenPort int + proxyPort int mtu uint16 ebpfManager ebpfMgr.Manager @@ -69,10 +70,11 @@ func NewWGEBPFProxy(wgPort int, mtu uint16) *WGEBPFProxy { // Listen load ebpf program and listen the proxy func (p *WGEBPFProxy) Listen() error { pl := portLookup{} - wgPorxyPort, err := pl.searchFreePort() + proxyPort, err := pl.searchFreePort() if err != nil { return err } + p.proxyPort = proxyPort // Prepare IPv4 raw socket (required) p.rawConnIPv4, err = rawsocket.PrepareSenderRawSocketIPv4() @@ -86,7 +88,7 @@ func (p *WGEBPFProxy) Listen() error { log.Warnf("failed to prepare IPv6 raw socket, continuing with IPv4 only: %v", err) } - err = p.ebpfManager.LoadWgProxy(wgPorxyPort, p.localWGListenPort) + err = p.ebpfManager.LoadWgProxy(proxyPort, p.localWGListenPort) if err != nil { if closeErr := p.rawConnIPv4.Close(); closeErr != nil { log.Warnf("failed to close IPv4 raw socket: %v", closeErr) @@ -100,7 +102,7 @@ func (p *WGEBPFProxy) Listen() error { } addr := net.UDPAddr{ - Port: wgPorxyPort, + Port: proxyPort, IP: net.ParseIP(loopbackAddr), } @@ -116,7 +118,7 @@ func (p *WGEBPFProxy) Listen() error { p.conn = conn go p.proxyToRemote() - log.Infof("local wg proxy listening on: %d", wgPorxyPort) + log.Infof("local wg proxy listening on: %d", proxyPort) return nil } @@ -171,6 +173,11 @@ func (p *WGEBPFProxy) Free() error { return nberrors.FormatErrorOrNil(result) } +// GetProxyPort returns the proxy listening port. +func (p *WGEBPFProxy) GetProxyPort() uint16 { + return uint16(p.proxyPort) +} + // proxyToRemote read messages from local WireGuard interface and forward it to remote conn // From this go routine has only one instance. func (p *WGEBPFProxy) proxyToRemote() { diff --git a/client/iface/wgproxy/factory_kernel.go b/client/iface/wgproxy/factory_kernel.go index 2714c5774..7821df3de 100644 --- a/client/iface/wgproxy/factory_kernel.go +++ b/client/iface/wgproxy/factory_kernel.go @@ -54,6 +54,14 @@ func (w *KernelFactory) GetProxy() Proxy { return ebpf.NewProxyWrapper(w.ebpfProxy) } +// GetProxyPort returns the eBPF proxy port, or 0 if eBPF is not active. +func (w *KernelFactory) GetProxyPort() uint16 { + if w.ebpfProxy == nil { + return 0 + } + return w.ebpfProxy.GetProxyPort() +} + func (w *KernelFactory) Free() error { if w.ebpfProxy == nil { return nil diff --git a/client/iface/wgproxy/factory_usp.go b/client/iface/wgproxy/factory_usp.go index a1b1c34d7..bbd67e076 100644 --- a/client/iface/wgproxy/factory_usp.go +++ b/client/iface/wgproxy/factory_usp.go @@ -24,6 +24,11 @@ func (w *USPFactory) GetProxy() Proxy { return proxyBind.NewProxyBind(w.bind, w.mtu) } +// GetProxyPort returns 0 as userspace WireGuard doesn't use a separate proxy port. +func (w *USPFactory) GetProxyPort() uint16 { + return 0 +} + func (w *USPFactory) Free() error { return nil } diff --git a/client/internal/engine.go b/client/internal/engine.go index a391ba22a..f0693e82c 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -505,6 +505,10 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) return fmt.Errorf("up wg interface: %w", err) } + // Set up notrack rules immediately after proxy is listening to prevent + // conntrack entries from being created before the rules are in place + e.setupWGProxyNoTrack() + // Set the WireGuard interface for rosenpass after interface is up if e.rpManager != nil { e.rpManager.SetInterface(e.wgInterface) @@ -617,6 +621,23 @@ func (e *Engine) initFirewall() error { return nil } +// setupWGProxyNoTrack configures connection tracking exclusion for WireGuard proxy traffic. +// This prevents conntrack/MASQUERADE from affecting loopback traffic between WireGuard and the eBPF proxy. +func (e *Engine) setupWGProxyNoTrack() { + if e.firewall == nil { + return + } + + proxyPort := e.wgInterface.GetProxyPort() + if proxyPort == 0 { + return + } + + if err := e.firewall.SetupEBPFProxyNoTrack(proxyPort, uint16(e.config.WgPort)); err != nil { + log.Warnf("failed to setup ebpf proxy notrack: %v", err) + } +} + func (e *Engine) blockLanAccess() { if e.config.BlockInbound { // no need to set up extra deny rules if inbound is already blocked in general @@ -1644,6 +1665,7 @@ func (e *Engine) parseNATExternalIPMappings() []string { func (e *Engine) close() { log.Debugf("removing Netbird interface %s", e.config.WgIfaceName) + if e.wgInterface != nil { if err := e.wgInterface.Close(); err != nil { log.Errorf("failed closing Netbird interface %s %v", e.config.WgIfaceName, err) diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index af9f27a71..012c8ad6e 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -107,6 +107,7 @@ type MockWGIface struct { GetStatsFunc func() (map[string]configurer.WGStats, error) GetInterfaceGUIDStringFunc func() (string, error) GetProxyFunc func() wgproxy.Proxy + GetProxyPortFunc func() uint16 GetNetFunc func() *netstack.Net LastActivitiesFunc func() map[string]monotime.Time } @@ -203,6 +204,13 @@ func (m *MockWGIface) GetProxy() wgproxy.Proxy { return m.GetProxyFunc() } +func (m *MockWGIface) GetProxyPort() uint16 { + if m.GetProxyPortFunc != nil { + return m.GetProxyPortFunc() + } + return 0 +} + func (m *MockWGIface) GetNet() *netstack.Net { return m.GetNetFunc() } diff --git a/client/internal/iface_common.go b/client/internal/iface_common.go index f8a433a6e..39e9bacfa 100644 --- a/client/internal/iface_common.go +++ b/client/internal/iface_common.go @@ -28,6 +28,7 @@ type wgIfaceBase interface { Up() (*udpmux.UniversalUDPMuxDefault, error) UpdateAddr(newAddr string) error GetProxy() wgproxy.Proxy + GetProxyPort() uint16 UpdatePeer(peerKey string, allowedIps []netip.Prefix, keepAlive time.Duration, endpoint *net.UDPAddr, preSharedKey *wgtypes.Key) error RemoveEndpointAddress(key string) error RemovePeer(peerKey string) error From 06966da0121382297297d6838a90dd6e87d048ac Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 27 Jan 2026 18:05:04 +0800 Subject: [PATCH 087/374] [client] Support non-PTY no-command interactive SSH sessions (#5093) --- client/ssh/proxy/proxy.go | 26 +- client/ssh/server/command_execution.go | 25 +- client/ssh/server/command_execution_js.go | 6 +- client/ssh/server/command_execution_unix.go | 63 +++-- .../ssh/server/command_execution_windows.go | 79 ++---- client/ssh/server/compatibility_test.go | 230 +++++++++++++++++- client/ssh/server/executor_unix.go | 50 +++- client/ssh/server/executor_windows.go | 98 +++++--- client/ssh/server/port_forwarding.go | 7 - client/ssh/server/server.go | 2 +- client/ssh/server/server_config_test.go | 42 +--- client/ssh/server/server_test.go | 10 +- client/ssh/server/session_handlers.go | 54 +--- client/ssh/server/session_handlers_js.go | 4 +- client/ssh/server/userswitching_unix.go | 8 +- client/ssh/server/userswitching_windows.go | 23 +- client/ssh/server/winpty/conpty.go | 4 +- 17 files changed, 461 insertions(+), 270 deletions(-) diff --git a/client/ssh/proxy/proxy.go b/client/ssh/proxy/proxy.go index cb1c36e13..8897b9c7e 100644 --- a/client/ssh/proxy/proxy.go +++ b/client/ssh/proxy/proxy.go @@ -207,8 +207,6 @@ func (p *SSHProxy) handleProxyExitCode(session ssh.Session, err error) { } func (p *SSHProxy) handleNonInteractiveSession(session ssh.Session, sshClient *cryptossh.Client) { - // Create a backend session to mirror the client's session request. - // This keeps the connection alive on the server side while port forwarding channels operate. serverSession, err := sshClient.NewSession() if err != nil { _, _ = fmt.Fprintf(p.stderr, "create server session: %v\n", err) @@ -216,10 +214,28 @@ func (p *SSHProxy) handleNonInteractiveSession(session ssh.Session, sshClient *c } defer func() { _ = serverSession.Close() }() - <-session.Context().Done() + serverSession.Stdin = session + serverSession.Stdout = session + serverSession.Stderr = session.Stderr() - if err := session.Exit(0); err != nil { - log.Debugf("session exit: %v", err) + if err := serverSession.Shell(); err != nil { + log.Debugf("start shell: %v", err) + return + } + + done := make(chan error, 1) + go func() { + done <- serverSession.Wait() + }() + + select { + case <-session.Context().Done(): + return + case err := <-done: + if err != nil { + log.Debugf("shell session: %v", err) + p.handleProxyExitCode(session, err) + } } } diff --git a/client/ssh/server/command_execution.go b/client/ssh/server/command_execution.go index 7a01ce4f6..b0a85fe4b 100644 --- a/client/ssh/server/command_execution.go +++ b/client/ssh/server/command_execution.go @@ -12,8 +12,8 @@ import ( log "github.com/sirupsen/logrus" ) -// handleCommand executes an SSH command with privilege validation -func (s *Server) handleCommand(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, winCh <-chan ssh.Window) { +// handleExecution executes an SSH command or shell with privilege validation +func (s *Server) handleExecution(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) { hasPty := winCh != nil commandType := "command" @@ -23,7 +23,7 @@ func (s *Server) handleCommand(logger *log.Entry, session ssh.Session, privilege logger.Infof("executing %s: %s", commandType, safeLogCommand(session.Command())) - execCmd, cleanup, err := s.createCommand(privilegeResult, session, hasPty) + execCmd, cleanup, err := s.createCommand(logger, privilegeResult, session, hasPty) if err != nil { logger.Errorf("%s creation failed: %v", commandType, err) @@ -51,13 +51,12 @@ func (s *Server) handleCommand(logger *log.Entry, session ssh.Session, privilege defer cleanup() - ptyReq, _, _ := session.Pty() if s.executeCommandWithPty(logger, session, execCmd, privilegeResult, ptyReq, winCh) { logger.Debugf("%s execution completed", commandType) } } -func (s *Server) createCommand(privilegeResult PrivilegeCheckResult, session ssh.Session, hasPty bool) (*exec.Cmd, func(), error) { +func (s *Server) createCommand(logger *log.Entry, privilegeResult PrivilegeCheckResult, session ssh.Session, hasPty bool) (*exec.Cmd, func(), error) { localUser := privilegeResult.User if localUser == nil { return nil, nil, errors.New("no user in privilege result") @@ -66,28 +65,28 @@ func (s *Server) createCommand(privilegeResult PrivilegeCheckResult, session ssh // If PTY requested but su doesn't support --pty, skip su and use executor // This ensures PTY functionality is provided (executor runs within our allocated PTY) if hasPty && !s.suSupportsPty { - log.Debugf("PTY requested but su doesn't support --pty, using executor for PTY functionality") - cmd, cleanup, err := s.createExecutorCommand(session, localUser, hasPty) + logger.Debugf("PTY requested but su doesn't support --pty, using executor for PTY functionality") + cmd, cleanup, err := s.createExecutorCommand(logger, session, localUser, hasPty) if err != nil { return nil, nil, fmt.Errorf("create command with privileges: %w", err) } - cmd.Env = s.prepareCommandEnv(localUser, session) + cmd.Env = s.prepareCommandEnv(logger, localUser, session) return cmd, cleanup, nil } // Try su first for system integration (PAM/audit) when privileged - cmd, err := s.createSuCommand(session, localUser, hasPty) + cmd, err := s.createSuCommand(logger, session, localUser, hasPty) if err != nil || privilegeResult.UsedFallback { - log.Debugf("su command failed, falling back to executor: %v", err) - cmd, cleanup, err := s.createExecutorCommand(session, localUser, hasPty) + logger.Debugf("su command failed, falling back to executor: %v", err) + cmd, cleanup, err := s.createExecutorCommand(logger, session, localUser, hasPty) if err != nil { return nil, nil, fmt.Errorf("create command with privileges: %w", err) } - cmd.Env = s.prepareCommandEnv(localUser, session) + cmd.Env = s.prepareCommandEnv(logger, localUser, session) return cmd, cleanup, nil } - cmd.Env = s.prepareCommandEnv(localUser, session) + cmd.Env = s.prepareCommandEnv(logger, localUser, session) return cmd, func() {}, nil } diff --git a/client/ssh/server/command_execution_js.go b/client/ssh/server/command_execution_js.go index 01759a337..3aeaa135c 100644 --- a/client/ssh/server/command_execution_js.go +++ b/client/ssh/server/command_execution_js.go @@ -15,17 +15,17 @@ import ( var errNotSupported = errors.New("SSH server command execution not supported on WASM/JS platform") // createSuCommand is not supported on JS/WASM -func (s *Server) createSuCommand(_ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, error) { +func (s *Server) createSuCommand(_ *log.Entry, _ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, error) { return nil, errNotSupported } // createExecutorCommand is not supported on JS/WASM -func (s *Server) createExecutorCommand(_ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, func(), error) { +func (s *Server) createExecutorCommand(_ *log.Entry, _ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, func(), error) { return nil, nil, errNotSupported } // prepareCommandEnv is not supported on JS/WASM -func (s *Server) prepareCommandEnv(_ *user.User, _ ssh.Session) []string { +func (s *Server) prepareCommandEnv(_ *log.Entry, _ *user.User, _ ssh.Session) []string { return nil } diff --git a/client/ssh/server/command_execution_unix.go b/client/ssh/server/command_execution_unix.go index db1a9bcfe..279b89341 100644 --- a/client/ssh/server/command_execution_unix.go +++ b/client/ssh/server/command_execution_unix.go @@ -10,6 +10,7 @@ import ( "os" "os/exec" "os/user" + "path/filepath" "runtime" "strings" "sync" @@ -99,40 +100,52 @@ func (s *Server) detectUtilLinuxLogin(ctx context.Context) bool { return isUtilLinux } -// createSuCommand creates a command using su -l -c for privilege switching -func (s *Server) createSuCommand(session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, error) { +// createSuCommand creates a command using su - for privilege switching. +func (s *Server) createSuCommand(logger *log.Entry, session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, error) { + if err := validateUsername(localUser.Username); err != nil { + return nil, fmt.Errorf("invalid username %q: %w", localUser.Username, err) + } + suPath, err := exec.LookPath("su") if err != nil { return nil, fmt.Errorf("su command not available: %w", err) } - command := session.RawCommand() - if command == "" { - return nil, fmt.Errorf("no command specified for su execution") - } - - args := []string{"-l"} + args := []string{"-"} if hasPty && s.suSupportsPty { args = append(args, "--pty") } - args = append(args, localUser.Username, "-c", command) + args = append(args, localUser.Username) + command := session.RawCommand() + if command != "" { + args = append(args, "-c", command) + } + + logger.Debugf("creating su command: %s %v", suPath, args) cmd := exec.CommandContext(session.Context(), suPath, args...) cmd.Dir = localUser.HomeDir return cmd, nil } -// getShellCommandArgs returns the shell command and arguments for executing a command string +// getShellCommandArgs returns the shell command and arguments for executing a command string. func (s *Server) getShellCommandArgs(shell, cmdString string) []string { if cmdString == "" { - return []string{shell, "-l"} + return []string{shell} } - return []string{shell, "-l", "-c", cmdString} + return []string{shell, "-c", cmdString} +} + +// createShellCommand creates an exec.Cmd configured as a login shell by setting argv[0] to "-shellname". +func (s *Server) createShellCommand(ctx context.Context, shell string, args []string) *exec.Cmd { + cmd := exec.CommandContext(ctx, shell, args[1:]...) + cmd.Args[0] = "-" + filepath.Base(shell) + return cmd } // prepareCommandEnv prepares environment variables for command execution on Unix -func (s *Server) prepareCommandEnv(localUser *user.User, session ssh.Session) []string { +func (s *Server) prepareCommandEnv(_ *log.Entry, localUser *user.User, session ssh.Session) []string { env := prepareUserEnv(localUser, getUserShell(localUser.Uid)) env = append(env, prepareSSHEnv(session)...) for _, v := range session.Environ() { @@ -154,7 +167,7 @@ func (s *Server) executeCommandWithPty(logger *log.Entry, session ssh.Session, e return s.runPtyCommand(logger, session, execCmd, ptyReq, winCh) } -func (s *Server) handlePty(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { +func (s *Server) handlePtyLogin(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { execCmd, err := s.createPtyCommand(privilegeResult, ptyReq, session) if err != nil { logger.Errorf("Pty command creation failed: %v", err) @@ -244,11 +257,6 @@ func (s *Server) handlePtyIO(logger *log.Entry, session ssh.Session, ptyMgr *pty }() go func() { - defer func() { - if err := session.Close(); err != nil && !errors.Is(err, io.EOF) { - logger.Debugf("session close error: %v", err) - } - }() if _, err := io.Copy(session, ptmx); err != nil { if !errors.Is(err, io.EOF) && !errors.Is(err, syscall.EIO) { logger.Warnf("Pty output copy error: %v", err) @@ -268,7 +276,7 @@ func (s *Server) waitForPtyCompletion(logger *log.Entry, session ssh.Session, ex case <-ctx.Done(): s.handlePtySessionCancellation(logger, session, execCmd, ptyMgr, done) case err := <-done: - s.handlePtyCommandCompletion(logger, session, err) + s.handlePtyCommandCompletion(logger, session, ptyMgr, err) } } @@ -296,17 +304,20 @@ func (s *Server) handlePtySessionCancellation(logger *log.Entry, session ssh.Ses } } -func (s *Server) handlePtyCommandCompletion(logger *log.Entry, session ssh.Session, err error) { +func (s *Server) handlePtyCommandCompletion(logger *log.Entry, session ssh.Session, ptyMgr *ptyManager, err error) { if err != nil { logger.Debugf("Pty command execution failed: %v", err) s.handleSessionExit(session, err, logger) - return + } else { + logger.Debugf("Pty command completed successfully") + if err := session.Exit(0); err != nil { + logSessionExitError(logger, err) + } } - // Normal completion - logger.Debugf("Pty command completed successfully") - if err := session.Exit(0); err != nil { - logSessionExitError(logger, err) + // Close PTY to unblock io.Copy goroutines + if err := ptyMgr.Close(); err != nil { + logger.Debugf("Pty close after completion: %v", err) } } diff --git a/client/ssh/server/command_execution_windows.go b/client/ssh/server/command_execution_windows.go index 998796871..e1ba777f6 100644 --- a/client/ssh/server/command_execution_windows.go +++ b/client/ssh/server/command_execution_windows.go @@ -20,32 +20,32 @@ import ( // getUserEnvironment retrieves the Windows environment for the target user. // Follows OpenSSH's resilient approach with graceful degradation on failures. -func (s *Server) getUserEnvironment(username, domain string) ([]string, error) { - userToken, err := s.getUserToken(username, domain) +func (s *Server) getUserEnvironment(logger *log.Entry, username, domain string) ([]string, error) { + userToken, err := s.getUserToken(logger, username, domain) if err != nil { return nil, fmt.Errorf("get user token: %w", err) } defer func() { if err := windows.CloseHandle(userToken); err != nil { - log.Debugf("close user token: %v", err) + logger.Debugf("close user token: %v", err) } }() - return s.getUserEnvironmentWithToken(userToken, username, domain) + return s.getUserEnvironmentWithToken(logger, userToken, username, domain) } // getUserEnvironmentWithToken retrieves the Windows environment using an existing token. -func (s *Server) getUserEnvironmentWithToken(userToken windows.Handle, username, domain string) ([]string, error) { +func (s *Server) getUserEnvironmentWithToken(logger *log.Entry, userToken windows.Handle, username, domain string) ([]string, error) { userProfile, err := s.loadUserProfile(userToken, username, domain) if err != nil { - log.Debugf("failed to load user profile for %s\\%s: %v", domain, username, err) + logger.Debugf("failed to load user profile for %s\\%s: %v", domain, username, err) userProfile = fmt.Sprintf("C:\\Users\\%s", username) } envMap := make(map[string]string) if err := s.loadSystemEnvironment(envMap); err != nil { - log.Debugf("failed to load system environment from registry: %v", err) + logger.Debugf("failed to load system environment from registry: %v", err) } s.setUserEnvironmentVariables(envMap, userProfile, username, domain) @@ -59,8 +59,8 @@ func (s *Server) getUserEnvironmentWithToken(userToken windows.Handle, username, } // getUserToken creates a user token for the specified user. -func (s *Server) getUserToken(username, domain string) (windows.Handle, error) { - privilegeDropper := NewPrivilegeDropper() +func (s *Server) getUserToken(logger *log.Entry, username, domain string) (windows.Handle, error) { + privilegeDropper := NewPrivilegeDropper(WithLogger(logger)) token, err := privilegeDropper.createToken(username, domain) if err != nil { return 0, fmt.Errorf("generate S4U user token: %w", err) @@ -242,9 +242,9 @@ func (s *Server) setUserEnvironmentVariables(envMap map[string]string, userProfi } // prepareCommandEnv prepares environment variables for command execution on Windows -func (s *Server) prepareCommandEnv(localUser *user.User, session ssh.Session) []string { +func (s *Server) prepareCommandEnv(logger *log.Entry, localUser *user.User, session ssh.Session) []string { username, domain := s.parseUsername(localUser.Username) - userEnv, err := s.getUserEnvironment(username, domain) + userEnv, err := s.getUserEnvironment(logger, username, domain) if err != nil { log.Debugf("failed to get user environment for %s\\%s, using fallback: %v", domain, username, err) env := prepareUserEnv(localUser, getUserShell(localUser.Uid)) @@ -267,22 +267,16 @@ func (s *Server) prepareCommandEnv(localUser *user.User, session ssh.Session) [] return env } -func (s *Server) handlePty(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { +func (s *Server) handlePtyLogin(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, _ <-chan ssh.Window) bool { if privilegeResult.User == nil { logger.Errorf("no user in privilege result") return false } - cmd := session.Command() shell := getUserShell(privilegeResult.User.Uid) + logger.Infof("starting interactive shell: %s", shell) - if len(cmd) == 0 { - logger.Infof("starting interactive shell: %s", shell) - } else { - logger.Infof("executing command: %s", safeLogCommand(cmd)) - } - - s.handlePtyWithUserSwitching(logger, session, privilegeResult, ptyReq, winCh, cmd) + s.executeCommandWithPty(logger, session, nil, privilegeResult, ptyReq, nil) return true } @@ -294,11 +288,6 @@ func (s *Server) getShellCommandArgs(shell, cmdString string) []string { return []string{shell, "-Command", cmdString} } -func (s *Server) handlePtyWithUserSwitching(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, _ <-chan ssh.Window, _ []string) { - logger.Info("starting interactive shell") - s.executeConPtyCommand(logger, session, privilegeResult, ptyReq, session.RawCommand()) -} - type PtyExecutionRequest struct { Shell string Command string @@ -308,25 +297,25 @@ type PtyExecutionRequest struct { Domain string } -func executePtyCommandWithUserToken(ctx context.Context, session ssh.Session, req PtyExecutionRequest) error { - log.Tracef("executing Windows ConPty command with user switching: shell=%s, command=%s, user=%s\\%s, size=%dx%d", +func executePtyCommandWithUserToken(logger *log.Entry, session ssh.Session, req PtyExecutionRequest) error { + logger.Tracef("executing Windows ConPty command with user switching: shell=%s, command=%s, user=%s\\%s, size=%dx%d", req.Shell, req.Command, req.Domain, req.Username, req.Width, req.Height) - privilegeDropper := NewPrivilegeDropper() + privilegeDropper := NewPrivilegeDropper(WithLogger(logger)) userToken, err := privilegeDropper.createToken(req.Username, req.Domain) if err != nil { return fmt.Errorf("create user token: %w", err) } defer func() { if err := windows.CloseHandle(userToken); err != nil { - log.Debugf("close user token: %v", err) + logger.Debugf("close user token: %v", err) } }() server := &Server{} - userEnv, err := server.getUserEnvironmentWithToken(userToken, req.Username, req.Domain) + userEnv, err := server.getUserEnvironmentWithToken(logger, userToken, req.Username, req.Domain) if err != nil { - log.Debugf("failed to get user environment for %s\\%s, using system environment: %v", req.Domain, req.Username, err) + logger.Debugf("failed to get user environment for %s\\%s, using system environment: %v", req.Domain, req.Username, err) userEnv = os.Environ() } @@ -348,8 +337,8 @@ func executePtyCommandWithUserToken(ctx context.Context, session ssh.Session, re Environment: userEnv, } - log.Debugf("executePtyCommandWithUserToken: calling winpty execution with working dir: %s", workingDir) - return winpty.ExecutePtyWithUserToken(ctx, session, ptyConfig, userConfig) + logger.Debugf("executePtyCommandWithUserToken: calling winpty execution with working dir: %s", workingDir) + return winpty.ExecutePtyWithUserToken(session, ptyConfig, userConfig) } func getUserHomeFromEnv(env []string) string { @@ -371,10 +360,8 @@ func (s *Server) killProcessGroup(cmd *exec.Cmd) { return } - logger := log.WithField("pid", cmd.Process.Pid) - if err := cmd.Process.Kill(); err != nil { - logger.Debugf("kill process failed: %v", err) + log.Debugf("kill process %d failed: %v", cmd.Process.Pid, err) } } @@ -389,21 +376,7 @@ func (s *Server) detectUtilLinuxLogin(context.Context) bool { } // executeCommandWithPty executes a command with PTY allocation on Windows using ConPty -func (s *Server) executeCommandWithPty(logger *log.Entry, session ssh.Session, execCmd *exec.Cmd, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { - command := session.RawCommand() - if command == "" { - logger.Error("no command specified for PTY execution") - if err := session.Exit(1); err != nil { - logSessionExitError(logger, err) - } - return false - } - - return s.executeConPtyCommand(logger, session, privilegeResult, ptyReq, command) -} - -// executeConPtyCommand executes a command using ConPty (common for interactive and command execution) -func (s *Server) executeConPtyCommand(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, command string) bool { +func (s *Server) executeCommandWithPty(logger *log.Entry, session ssh.Session, _ *exec.Cmd, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, _ <-chan ssh.Window) bool { localUser := privilegeResult.User if localUser == nil { logger.Errorf("no user in privilege result") @@ -415,14 +388,14 @@ func (s *Server) executeConPtyCommand(logger *log.Entry, session ssh.Session, pr req := PtyExecutionRequest{ Shell: shell, - Command: command, + Command: session.RawCommand(), Width: ptyReq.Window.Width, Height: ptyReq.Window.Height, Username: username, Domain: domain, } - if err := executePtyCommandWithUserToken(session.Context(), session, req); err != nil { + if err := executePtyCommandWithUserToken(logger, session, req); err != nil { logger.Errorf("ConPty execution failed: %v", err) if err := session.Exit(1); err != nil { logSessionExitError(logger, err) diff --git a/client/ssh/server/compatibility_test.go b/client/ssh/server/compatibility_test.go index 34ffccfd2..7fe2d6c5e 100644 --- a/client/ssh/server/compatibility_test.go +++ b/client/ssh/server/compatibility_test.go @@ -4,12 +4,15 @@ import ( "context" "crypto/ed25519" "crypto/rand" + "errors" "fmt" "io" "net" "os" "os/exec" + "path/filepath" "runtime" + "slices" "strings" "testing" "time" @@ -23,25 +26,67 @@ import ( "github.com/netbirdio/netbird/client/ssh/testutil" ) -// TestMain handles package-level setup and cleanup func TestMain(m *testing.M) { - // Guard against infinite recursion when test binary is called as "netbird ssh exec" - // This happens when running tests as non-privileged user with fallback + // On platforms where su doesn't support --pty (macOS, FreeBSD, Windows), the SSH server + // spawns an executor subprocess via os.Executable(). During tests, this invokes the test + // binary with "ssh exec" args. We handle that here to properly execute commands and + // propagate exit codes. if len(os.Args) > 2 && os.Args[1] == "ssh" && os.Args[2] == "exec" { - // Just exit with error to break the recursion - fmt.Fprintf(os.Stderr, "Test binary called as 'ssh exec' - preventing infinite recursion\n") - os.Exit(1) + runTestExecutor() + return } - // Run tests code := m.Run() - - // Cleanup any created test users testutil.CleanupTestUsers() - os.Exit(code) } +// runTestExecutor emulates the netbird executor for tests. +// Parses --shell and --cmd args, runs the command, and exits with the correct code. +func runTestExecutor() { + if os.Getenv("_NETBIRD_TEST_EXECUTOR") != "" { + fmt.Fprintf(os.Stderr, "executor recursion detected\n") + os.Exit(1) + } + os.Setenv("_NETBIRD_TEST_EXECUTOR", "1") + + shell := "/bin/sh" + var command string + for i := 3; i < len(os.Args); i++ { + switch os.Args[i] { + case "--shell": + if i+1 < len(os.Args) { + shell = os.Args[i+1] + i++ + } + case "--cmd": + if i+1 < len(os.Args) { + command = os.Args[i+1] + i++ + } + } + } + + var cmd *exec.Cmd + if command == "" { + cmd = exec.Command(shell) + } else { + cmd = exec.Command(shell, "-c", command) + } + cmd.Args[0] = "-" + filepath.Base(shell) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + os.Exit(exitErr.ExitCode()) + } + os.Exit(1) + } + os.Exit(0) +} + // TestSSHServerCompatibility tests that our SSH server is compatible with the system SSH client func TestSSHServerCompatibility(t *testing.T) { if testing.Short() { @@ -405,6 +450,171 @@ func createTempKeyFile(t *testing.T, privateKey []byte) (string, func()) { return createTempKeyFileFromBytes(t, privateKey) } +// TestSSHPtyModes tests different PTY allocation modes (-T, -t, -tt flags) +// This ensures our implementation matches OpenSSH behavior for: +// - ssh host command (no PTY - default when no TTY) +// - ssh -T host command (explicit no PTY) +// - ssh -t host command (force PTY) +// - ssh -T host (no PTY shell - our implementation) +func TestSSHPtyModes(t *testing.T) { + if testing.Short() { + t.Skip("Skipping SSH PTY mode tests in short mode") + } + + if !isSSHClientAvailable() { + t.Skip("SSH client not available on this system") + } + + if runtime.GOOS == "windows" && testutil.IsCI() { + t.Skip("Skipping Windows SSH PTY tests in CI due to S4U authentication issues") + } + + hostKey, err := nbssh.GeneratePrivateKey(nbssh.ED25519) + require.NoError(t, err) + + clientPrivKeyOpenSSH, _, err := generateOpenSSHKey(t) + require.NoError(t, err) + + serverConfig := &Config{ + HostKeyPEM: hostKey, + JWT: nil, + } + server := New(serverConfig) + server.SetAllowRootLogin(true) + + serverAddr := StartTestServer(t, server) + defer func() { + err := server.Stop() + require.NoError(t, err) + }() + + clientKeyFile, cleanupKey := createTempKeyFileFromBytes(t, clientPrivKeyOpenSSH) + defer cleanupKey() + + host, portStr, err := net.SplitHostPort(serverAddr) + require.NoError(t, err) + + username := testutil.GetTestUsername(t) + + baseArgs := []string{ + "-i", clientKeyFile, + "-p", portStr, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ConnectTimeout=5", + "-o", "BatchMode=yes", + } + + t.Run("command_default_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), fmt.Sprintf("%s@%s", username, host), "echo", "no_pty_default") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Command (default no PTY) failed: %s", output) + assert.Contains(t, string(output), "no_pty_default") + }) + + t.Run("command_explicit_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host), "echo", "explicit_no_pty") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Command (-T explicit no PTY) failed: %s", output) + assert.Contains(t, string(output), "explicit_no_pty") + }) + + t.Run("command_force_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-tt", fmt.Sprintf("%s@%s", username, host), "echo", "force_pty") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Command (-tt force PTY) failed: %s", output) + assert.Contains(t, string(output), "force_pty") + }) + + t.Run("shell_explicit_no_pty", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host)) + cmd := exec.CommandContext(ctx, "ssh", args...) + + stdin, err := cmd.StdinPipe() + require.NoError(t, err) + + stdout, err := cmd.StdoutPipe() + require.NoError(t, err) + + require.NoError(t, cmd.Start(), "Shell (-T no PTY) start failed") + + go func() { + defer stdin.Close() + time.Sleep(100 * time.Millisecond) + _, err := stdin.Write([]byte("echo shell_no_pty_test\n")) + assert.NoError(t, err, "write echo command") + time.Sleep(100 * time.Millisecond) + _, err = stdin.Write([]byte("exit 0\n")) + assert.NoError(t, err, "write exit command") + }() + + output, _ := io.ReadAll(stdout) + err = cmd.Wait() + + require.NoError(t, err, "Shell (-T no PTY) failed: %s", output) + assert.Contains(t, string(output), "shell_no_pty_test") + }) + + t.Run("exit_code_preserved_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host), "exit", "42") + cmd := exec.Command("ssh", args...) + + err := cmd.Run() + require.Error(t, err, "Command should exit with non-zero") + + var exitErr *exec.ExitError + require.True(t, errors.As(err, &exitErr), "Should be an exit error: %v", err) + assert.Equal(t, 42, exitErr.ExitCode(), "Exit code should be preserved with -T") + }) + + t.Run("exit_code_preserved_with_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-tt", fmt.Sprintf("%s@%s", username, host), "sh -c 'exit 43'") + cmd := exec.Command("ssh", args...) + + err := cmd.Run() + require.Error(t, err, "PTY command should exit with non-zero") + + var exitErr *exec.ExitError + require.True(t, errors.As(err, &exitErr), "Should be an exit error: %v", err) + assert.Equal(t, 43, exitErr.ExitCode(), "Exit code should be preserved with -tt") + }) + + t.Run("stderr_works_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host), + "sh -c 'echo stdout_msg; echo stderr_msg >&2'") + cmd := exec.Command("ssh", args...) + + var stdout, stderr strings.Builder + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + require.NoError(t, cmd.Run(), "stderr test failed") + assert.Contains(t, stdout.String(), "stdout_msg", "stdout should have stdout_msg") + assert.Contains(t, stderr.String(), "stderr_msg", "stderr should have stderr_msg") + assert.NotContains(t, stdout.String(), "stderr_msg", "stdout should NOT have stderr_msg") + }) + + t.Run("stderr_merged_with_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-tt", fmt.Sprintf("%s@%s", username, host), + "sh -c 'echo stdout_msg; echo stderr_msg >&2'") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "PTY stderr test failed: %s", output) + assert.Contains(t, string(output), "stdout_msg") + assert.Contains(t, string(output), "stderr_msg") + }) +} + // TestSSHServerFeatureCompatibility tests specific SSH features for compatibility func TestSSHServerFeatureCompatibility(t *testing.T) { if testing.Short() { diff --git a/client/ssh/server/executor_unix.go b/client/ssh/server/executor_unix.go index 8adc824ef..ee0b0ff78 100644 --- a/client/ssh/server/executor_unix.go +++ b/client/ssh/server/executor_unix.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "os/exec" + "path/filepath" "runtime" "strings" "syscall" @@ -35,11 +36,35 @@ type ExecutorConfig struct { } // PrivilegeDropper handles secure privilege dropping in child processes -type PrivilegeDropper struct{} +type PrivilegeDropper struct { + logger *log.Entry +} + +// PrivilegeDropperOption is a functional option for configuring PrivilegeDropper +type PrivilegeDropperOption func(*PrivilegeDropper) // NewPrivilegeDropper creates a new privilege dropper -func NewPrivilegeDropper() *PrivilegeDropper { - return &PrivilegeDropper{} +func NewPrivilegeDropper(opts ...PrivilegeDropperOption) *PrivilegeDropper { + pd := &PrivilegeDropper{} + for _, opt := range opts { + opt(pd) + } + return pd +} + +// WithLogger sets the logger for the PrivilegeDropper +func WithLogger(logger *log.Entry) PrivilegeDropperOption { + return func(pd *PrivilegeDropper) { + pd.logger = logger + } +} + +// log returns the logger, falling back to standard logger if none set +func (pd *PrivilegeDropper) log() *log.Entry { + if pd.logger != nil { + return pd.logger + } + return log.NewEntry(log.StandardLogger()) } // CreateExecutorCommand creates a command that spawns netbird ssh exec for privilege dropping @@ -83,7 +108,7 @@ func (pd *PrivilegeDropper) CreateExecutorCommand(ctx context.Context, config Ex break } } - log.Tracef("creating executor command: %s %v", netbirdPath, safeArgs) + pd.log().Tracef("creating executor command: %s %v", netbirdPath, safeArgs) return exec.CommandContext(ctx, netbirdPath, args...), nil } @@ -206,17 +231,22 @@ func (pd *PrivilegeDropper) ExecuteWithPrivilegeDrop(ctx context.Context, config var execCmd *exec.Cmd if config.Command == "" { - os.Exit(ExitCodeSuccess) + execCmd = exec.CommandContext(ctx, config.Shell) + } else { + execCmd = exec.CommandContext(ctx, config.Shell, "-c", config.Command) } - - execCmd = exec.CommandContext(ctx, config.Shell, "-c", config.Command) + execCmd.Args[0] = "-" + filepath.Base(config.Shell) execCmd.Stdin = os.Stdin execCmd.Stdout = os.Stdout execCmd.Stderr = os.Stderr - cmdParts := strings.Fields(config.Command) - safeCmd := safeLogCommand(cmdParts) - log.Tracef("executing %s -c %s", execCmd.Path, safeCmd) + if config.Command == "" { + log.Tracef("executing login shell: %s", execCmd.Path) + } else { + cmdParts := strings.Fields(config.Command) + safeCmd := safeLogCommand(cmdParts) + log.Tracef("executing %s -c %s", execCmd.Path, safeCmd) + } if err := execCmd.Run(); err != nil { var exitError *exec.ExitError if errors.As(err, &exitError) { diff --git a/client/ssh/server/executor_windows.go b/client/ssh/server/executor_windows.go index d3504e056..51c995ec3 100644 --- a/client/ssh/server/executor_windows.go +++ b/client/ssh/server/executor_windows.go @@ -28,22 +28,45 @@ const ( ) type WindowsExecutorConfig struct { - Username string - Domain string - WorkingDir string - Shell string - Command string - Args []string - Interactive bool - Pty bool - PtyWidth int - PtyHeight int + Username string + Domain string + WorkingDir string + Shell string + Command string + Args []string + Pty bool + PtyWidth int + PtyHeight int } -type PrivilegeDropper struct{} +type PrivilegeDropper struct { + logger *log.Entry +} -func NewPrivilegeDropper() *PrivilegeDropper { - return &PrivilegeDropper{} +// PrivilegeDropperOption is a functional option for configuring PrivilegeDropper +type PrivilegeDropperOption func(*PrivilegeDropper) + +func NewPrivilegeDropper(opts ...PrivilegeDropperOption) *PrivilegeDropper { + pd := &PrivilegeDropper{} + for _, opt := range opts { + opt(pd) + } + return pd +} + +// WithLogger sets the logger for the PrivilegeDropper +func WithLogger(logger *log.Entry) PrivilegeDropperOption { + return func(pd *PrivilegeDropper) { + pd.logger = logger + } +} + +// log returns the logger, falling back to standard logger if none set +func (pd *PrivilegeDropper) log() *log.Entry { + if pd.logger != nil { + return pd.logger + } + return log.NewEntry(log.StandardLogger()) } var ( @@ -56,7 +79,6 @@ const ( // Common error messages commandFlag = "-Command" - closeTokenErrorMsg = "close token error: %v" // #nosec G101 -- This is an error message template, not credentials convertUsernameError = "convert username to UTF16: %w" convertDomainError = "convert domain to UTF16: %w" ) @@ -80,7 +102,7 @@ func (pd *PrivilegeDropper) CreateWindowsExecutorCommand(ctx context.Context, co shellArgs = []string{shell} } - log.Tracef("creating Windows direct shell command: %s %v", shellArgs[0], shellArgs) + pd.log().Tracef("creating Windows direct shell command: %s %v", shellArgs[0], shellArgs) cmd, token, err := pd.CreateWindowsProcessAsUser( ctx, shellArgs[0], shellArgs, config.Username, config.Domain, config.WorkingDir) @@ -180,10 +202,10 @@ func newLsaString(s string) lsaString { // generateS4UUserToken creates a Windows token using S4U authentication // This is the exact approach OpenSSH for Windows uses for public key authentication -func generateS4UUserToken(username, domain string) (windows.Handle, error) { +func generateS4UUserToken(logger *log.Entry, username, domain string) (windows.Handle, error) { userCpn := buildUserCpn(username, domain) - pd := NewPrivilegeDropper() + pd := NewPrivilegeDropper(WithLogger(logger)) isDomainUser := !pd.isLocalUser(domain) lsaHandle, err := initializeLsaConnection() @@ -197,12 +219,12 @@ func generateS4UUserToken(username, domain string) (windows.Handle, error) { return 0, err } - logonInfo, logonInfoSize, err := prepareS4ULogonStructure(username, domain, isDomainUser) + logonInfo, logonInfoSize, err := prepareS4ULogonStructure(logger, username, domain, isDomainUser) if err != nil { return 0, err } - return performS4ULogon(lsaHandle, authPackageId, logonInfo, logonInfoSize, userCpn, isDomainUser) + return performS4ULogon(logger, lsaHandle, authPackageId, logonInfo, logonInfoSize, userCpn, isDomainUser) } // buildUserCpn constructs the user principal name @@ -310,21 +332,21 @@ func lookupPrincipalName(username, domain string) (string, error) { } // prepareS4ULogonStructure creates the appropriate S4U logon structure -func prepareS4ULogonStructure(username, domain string, isDomainUser bool) (unsafe.Pointer, uintptr, error) { +func prepareS4ULogonStructure(logger *log.Entry, username, domain string, isDomainUser bool) (unsafe.Pointer, uintptr, error) { if isDomainUser { - return prepareDomainS4ULogon(username, domain) + return prepareDomainS4ULogon(logger, username, domain) } - return prepareLocalS4ULogon(username) + return prepareLocalS4ULogon(logger, username) } // prepareDomainS4ULogon creates S4U logon structure for domain users -func prepareDomainS4ULogon(username, domain string) (unsafe.Pointer, uintptr, error) { +func prepareDomainS4ULogon(logger *log.Entry, username, domain string) (unsafe.Pointer, uintptr, error) { upn, err := lookupPrincipalName(username, domain) if err != nil { return nil, 0, fmt.Errorf("lookup principal name: %w", err) } - log.Debugf("using KerbS4ULogon for domain user with UPN: %s", upn) + logger.Debugf("using KerbS4ULogon for domain user with UPN: %s", upn) upnUtf16, err := windows.UTF16FromString(upn) if err != nil { @@ -357,8 +379,8 @@ func prepareDomainS4ULogon(username, domain string) (unsafe.Pointer, uintptr, er } // prepareLocalS4ULogon creates S4U logon structure for local users -func prepareLocalS4ULogon(username string) (unsafe.Pointer, uintptr, error) { - log.Debugf("using Msv1_0S4ULogon for local user: %s", username) +func prepareLocalS4ULogon(logger *log.Entry, username string) (unsafe.Pointer, uintptr, error) { + logger.Debugf("using Msv1_0S4ULogon for local user: %s", username) usernameUtf16, err := windows.UTF16FromString(username) if err != nil { @@ -406,11 +428,11 @@ func prepareLocalS4ULogon(username string) (unsafe.Pointer, uintptr, error) { } // performS4ULogon executes the S4U logon operation -func performS4ULogon(lsaHandle windows.Handle, authPackageId uint32, logonInfo unsafe.Pointer, logonInfoSize uintptr, userCpn string, isDomainUser bool) (windows.Handle, error) { +func performS4ULogon(logger *log.Entry, lsaHandle windows.Handle, authPackageId uint32, logonInfo unsafe.Pointer, logonInfoSize uintptr, userCpn string, isDomainUser bool) (windows.Handle, error) { var tokenSource tokenSource copy(tokenSource.SourceName[:], "netbird") if ret, _, _ := procAllocateLocallyUniqueId.Call(uintptr(unsafe.Pointer(&tokenSource.SourceIdentifier))); ret == 0 { - log.Debugf("AllocateLocallyUniqueId failed") + logger.Debugf("AllocateLocallyUniqueId failed") } originName := newLsaString("netbird") @@ -441,7 +463,7 @@ func performS4ULogon(lsaHandle windows.Handle, authPackageId uint32, logonInfo u if profile != 0 { if ret, _, _ := procLsaFreeReturnBuffer.Call(profile); ret != StatusSuccess { - log.Debugf("LsaFreeReturnBuffer failed: 0x%x", ret) + logger.Debugf("LsaFreeReturnBuffer failed: 0x%x", ret) } } @@ -449,7 +471,7 @@ func performS4ULogon(lsaHandle windows.Handle, authPackageId uint32, logonInfo u return 0, fmt.Errorf("LsaLogonUser S4U for %s: NTSTATUS=0x%x, SubStatus=0x%x", userCpn, ret, subStatus) } - log.Debugf("created S4U %s token for user %s", + logger.Debugf("created S4U %s token for user %s", map[bool]string{true: "domain", false: "local"}[isDomainUser], userCpn) return token, nil } @@ -497,8 +519,8 @@ func (pd *PrivilegeDropper) isLocalUser(domain string) bool { // authenticateLocalUser handles authentication for local users func (pd *PrivilegeDropper) authenticateLocalUser(username, fullUsername string) (windows.Handle, error) { - log.Debugf("using S4U authentication for local user %s", fullUsername) - token, err := generateS4UUserToken(username, ".") + pd.log().Debugf("using S4U authentication for local user %s", fullUsername) + token, err := generateS4UUserToken(pd.log(), username, ".") if err != nil { return 0, fmt.Errorf("S4U authentication for local user %s: %w", fullUsername, err) } @@ -507,12 +529,12 @@ func (pd *PrivilegeDropper) authenticateLocalUser(username, fullUsername string) // authenticateDomainUser handles authentication for domain users func (pd *PrivilegeDropper) authenticateDomainUser(username, domain, fullUsername string) (windows.Handle, error) { - log.Debugf("using S4U authentication for domain user %s", fullUsername) - token, err := generateS4UUserToken(username, domain) + pd.log().Debugf("using S4U authentication for domain user %s", fullUsername) + token, err := generateS4UUserToken(pd.log(), username, domain) if err != nil { return 0, fmt.Errorf("S4U authentication for domain user %s: %w", fullUsername, err) } - log.Debugf("Successfully created S4U token for domain user %s", fullUsername) + pd.log().Debugf("successfully created S4U token for domain user %s", fullUsername) return token, nil } @@ -526,7 +548,7 @@ func (pd *PrivilegeDropper) CreateWindowsProcessAsUser(ctx context.Context, exec defer func() { if err := windows.CloseHandle(token); err != nil { - log.Debugf("close impersonation token: %v", err) + pd.log().Debugf("close impersonation token: %v", err) } }() @@ -564,7 +586,7 @@ func (pd *PrivilegeDropper) createProcessWithToken(ctx context.Context, sourceTo return cmd, primaryToken, nil } -// createSuCommand creates a command using su -l -c for privilege switching (Windows stub) -func (s *Server) createSuCommand(ssh.Session, *user.User, bool) (*exec.Cmd, error) { +// createSuCommand creates a command using su - for privilege switching (Windows stub). +func (s *Server) createSuCommand(*log.Entry, ssh.Session, *user.User, bool) (*exec.Cmd, error) { return nil, fmt.Errorf("su command not available on Windows") } diff --git a/client/ssh/server/port_forwarding.go b/client/ssh/server/port_forwarding.go index c60cf4f58..e16ff5d46 100644 --- a/client/ssh/server/port_forwarding.go +++ b/client/ssh/server/port_forwarding.go @@ -271,13 +271,6 @@ func (s *Server) isRemotePortForwardingAllowed() bool { return s.allowRemotePortForwarding } -// isPortForwardingEnabled checks if any port forwarding (local or remote) is enabled -func (s *Server) isPortForwardingEnabled() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.allowLocalPortForwarding || s.allowRemotePortForwarding -} - // parseTcpipForwardRequest parses the SSH request payload func (s *Server) parseTcpipForwardRequest(req *cryptossh.Request) (*tcpipForwardMsg, error) { var payload tcpipForwardMsg diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index e897bbade..1ddb60f8e 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -335,7 +335,7 @@ func (s *Server) GetStatus() (enabled bool, sessions []SessionInfo) { sessions = append(sessions, info) } - // Add authenticated connections without sessions (e.g., -N/-T or port-forwarding only) + // Add authenticated connections without sessions (e.g., -N or port-forwarding only) for key, connState := range s.connections { remoteAddr := string(key) if reportedAddrs[remoteAddr] { diff --git a/client/ssh/server/server_config_test.go b/client/ssh/server/server_config_test.go index d85d85a51..f70e29963 100644 --- a/client/ssh/server/server_config_test.go +++ b/client/ssh/server/server_config_test.go @@ -483,12 +483,11 @@ func TestServer_IsPrivilegedUser(t *testing.T) { } } -func TestServer_PortForwardingOnlySession(t *testing.T) { - // Test that sessions without PTY and command are allowed when port forwarding is enabled +func TestServer_NonPtyShellSession(t *testing.T) { + // Test that non-PTY shell sessions (ssh -T) work regardless of port forwarding settings. currentUser, err := user.Current() require.NoError(t, err, "Should be able to get current user") - // Generate host key for server hostKey, err := ssh.GeneratePrivateKey(ssh.ED25519) require.NoError(t, err) @@ -496,36 +495,26 @@ func TestServer_PortForwardingOnlySession(t *testing.T) { name string allowLocalForwarding bool allowRemoteForwarding bool - expectAllowed bool - description string }{ { - name: "session_allowed_with_local_forwarding", + name: "shell_with_local_forwarding_enabled", allowLocalForwarding: true, allowRemoteForwarding: false, - expectAllowed: true, - description: "Port-forwarding-only session should be allowed when local forwarding is enabled", }, { - name: "session_allowed_with_remote_forwarding", + name: "shell_with_remote_forwarding_enabled", allowLocalForwarding: false, allowRemoteForwarding: true, - expectAllowed: true, - description: "Port-forwarding-only session should be allowed when remote forwarding is enabled", }, { - name: "session_allowed_with_both", + name: "shell_with_both_forwarding_enabled", allowLocalForwarding: true, allowRemoteForwarding: true, - expectAllowed: true, - description: "Port-forwarding-only session should be allowed when both forwarding types enabled", }, { - name: "session_denied_without_forwarding", + name: "shell_with_forwarding_disabled", allowLocalForwarding: false, allowRemoteForwarding: false, - expectAllowed: false, - description: "Port-forwarding-only session should be denied when all forwarding is disabled", }, } @@ -545,7 +534,6 @@ func TestServer_PortForwardingOnlySession(t *testing.T) { _ = server.Stop() }() - // Connect to the server without requesting PTY or command ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -557,20 +545,10 @@ func TestServer_PortForwardingOnlySession(t *testing.T) { _ = client.Close() }() - // Execute a command without PTY - this simulates ssh -T with no command - // The server should either allow it (port forwarding enabled) or reject it - output, err := client.ExecuteCommand(ctx, "") - if tt.expectAllowed { - // When allowed, the session stays open until cancelled - // ExecuteCommand with empty command should return without error - assert.NoError(t, err, "Session should be allowed when port forwarding is enabled") - assert.NotContains(t, output, "port forwarding is disabled", - "Output should not contain port forwarding disabled message") - } else if err != nil { - // When denied, we expect an error message about port forwarding being disabled - assert.Contains(t, err.Error(), "port forwarding is disabled", - "Should get port forwarding disabled message") - } + // Execute without PTY and no command - simulates ssh -T (shell without PTY) + // Should always succeed regardless of port forwarding settings + _, err = client.ExecuteCommand(ctx, "") + assert.NoError(t, err, "Non-PTY shell session should be allowed") }) } } diff --git a/client/ssh/server/server_test.go b/client/ssh/server/server_test.go index 661068539..89fab717f 100644 --- a/client/ssh/server/server_test.go +++ b/client/ssh/server/server_test.go @@ -405,12 +405,14 @@ func TestSSHServer_WindowsShellHandling(t *testing.T) { assert.Equal(t, "-Command", args[1]) assert.Equal(t, "echo test", args[2]) } else { - // Test Unix shell behavior args := server.getShellCommandArgs("/bin/sh", "echo test") assert.Equal(t, "/bin/sh", args[0]) - assert.Equal(t, "-l", args[1]) - assert.Equal(t, "-c", args[2]) - assert.Equal(t, "echo test", args[3]) + assert.Equal(t, "-c", args[1]) + assert.Equal(t, "echo test", args[2]) + + args = server.getShellCommandArgs("/bin/sh", "") + assert.Equal(t, "/bin/sh", args[0]) + assert.Len(t, args, 1) } } diff --git a/client/ssh/server/session_handlers.go b/client/ssh/server/session_handlers.go index 3fd578064..f12a75961 100644 --- a/client/ssh/server/session_handlers.go +++ b/client/ssh/server/session_handlers.go @@ -62,54 +62,12 @@ func (s *Server) sessionHandler(session ssh.Session) { ptyReq, winCh, isPty := session.Pty() hasCommand := len(session.Command()) > 0 - switch { - case isPty && hasCommand: - // ssh -t - Pty command execution - s.handleCommand(logger, session, privilegeResult, winCh) - case isPty: - // ssh - Pty interactive session (login) - s.handlePty(logger, session, privilegeResult, ptyReq, winCh) - case hasCommand: - // ssh - non-Pty command execution - s.handleCommand(logger, session, privilegeResult, nil) - default: - // ssh -T (or ssh -N) - no PTY, no command - s.handleNonInteractiveSession(logger, session) - } -} - -// handleNonInteractiveSession handles sessions that have no PTY and no command. -// These are typically used for port forwarding (ssh -L/-R) or tunneling (ssh -N). -func (s *Server) handleNonInteractiveSession(logger *log.Entry, session ssh.Session) { - s.updateSessionType(session, cmdNonInteractive) - - if !s.isPortForwardingEnabled() { - if _, err := io.WriteString(session, "port forwarding is disabled on this server\n"); err != nil { - logger.Debugf(errWriteSession, err) - } - if err := session.Exit(1); err != nil { - logSessionExitError(logger, err) - } - logger.Infof("rejected non-interactive session: port forwarding disabled") - return - } - - <-session.Context().Done() - - if err := session.Exit(0); err != nil { - logSessionExitError(logger, err) - } -} - -func (s *Server) updateSessionType(session ssh.Session, sessionType string) { - s.mu.Lock() - defer s.mu.Unlock() - - for _, state := range s.sessions { - if state.session == session { - state.sessionType = sessionType - return - } + if isPty && !hasCommand { + // ssh - PTY interactive session (login) + s.handlePtyLogin(logger, session, privilegeResult, ptyReq, winCh) + } else { + // ssh , ssh -t , ssh -T - command or shell execution + s.handleExecution(logger, session, privilegeResult, ptyReq, winCh) } } diff --git a/client/ssh/server/session_handlers_js.go b/client/ssh/server/session_handlers_js.go index c35e4da0b..4a6cf3d92 100644 --- a/client/ssh/server/session_handlers_js.go +++ b/client/ssh/server/session_handlers_js.go @@ -9,8 +9,8 @@ import ( log "github.com/sirupsen/logrus" ) -// handlePty is not supported on JS/WASM -func (s *Server) handlePty(logger *log.Entry, session ssh.Session, _ PrivilegeCheckResult, _ ssh.Pty, _ <-chan ssh.Window) bool { +// handlePtyLogin is not supported on JS/WASM +func (s *Server) handlePtyLogin(logger *log.Entry, session ssh.Session, _ PrivilegeCheckResult, _ ssh.Pty, _ <-chan ssh.Window) bool { errorMsg := "PTY sessions are not supported on WASM/JS platform\n" if _, err := fmt.Fprint(session.Stderr(), errorMsg); err != nil { logger.Debugf(errWriteSession, err) diff --git a/client/ssh/server/userswitching_unix.go b/client/ssh/server/userswitching_unix.go index bc1557419..d80b77042 100644 --- a/client/ssh/server/userswitching_unix.go +++ b/client/ssh/server/userswitching_unix.go @@ -181,8 +181,8 @@ func (s *Server) getSupplementaryGroups(username string) ([]uint32, error) { // createExecutorCommand creates a command that spawns netbird ssh exec for privilege dropping. // Returns the command and a cleanup function (no-op on Unix). -func (s *Server) createExecutorCommand(session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { - log.Debugf("creating executor command for user %s (Pty: %v)", localUser.Username, hasPty) +func (s *Server) createExecutorCommand(logger *log.Entry, session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { + logger.Debugf("creating executor command for user %s (Pty: %v)", localUser.Username, hasPty) if err := validateUsername(localUser.Username); err != nil { return nil, nil, fmt.Errorf("invalid username %q: %w", localUser.Username, err) @@ -192,7 +192,7 @@ func (s *Server) createExecutorCommand(session ssh.Session, localUser *user.User if err != nil { return nil, nil, fmt.Errorf("parse user credentials: %w", err) } - privilegeDropper := NewPrivilegeDropper() + privilegeDropper := NewPrivilegeDropper(WithLogger(logger)) config := ExecutorConfig{ UID: uid, GID: gid, @@ -233,7 +233,7 @@ func (s *Server) createDirectPtyCommand(session ssh.Session, localUser *user.Use shell := getUserShell(localUser.Uid) args := s.getShellCommandArgs(shell, session.RawCommand()) - cmd := exec.CommandContext(session.Context(), args[0], args[1:]...) + cmd := s.createShellCommand(session.Context(), shell, args) cmd.Dir = localUser.HomeDir cmd.Env = s.preparePtyEnv(localUser, ptyReq, session) diff --git a/client/ssh/server/userswitching_windows.go b/client/ssh/server/userswitching_windows.go index 5a5f75fa4..260e1301e 100644 --- a/client/ssh/server/userswitching_windows.go +++ b/client/ssh/server/userswitching_windows.go @@ -88,20 +88,20 @@ func validateUsernameFormat(username string) error { // createExecutorCommand creates a command using Windows executor for privilege dropping. // Returns the command and a cleanup function that must be called after starting the process. -func (s *Server) createExecutorCommand(session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { - log.Debugf("creating Windows executor command for user %s (Pty: %v)", localUser.Username, hasPty) +func (s *Server) createExecutorCommand(logger *log.Entry, session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { + logger.Debugf("creating Windows executor command for user %s (Pty: %v)", localUser.Username, hasPty) username, _ := s.parseUsername(localUser.Username) if err := validateUsername(username); err != nil { return nil, nil, fmt.Errorf("invalid username %q: %w", username, err) } - return s.createUserSwitchCommand(localUser, session, hasPty) + return s.createUserSwitchCommand(logger, session, localUser) } // createUserSwitchCommand creates a command with Windows user switching. // Returns the command and a cleanup function that must be called after starting the process. -func (s *Server) createUserSwitchCommand(localUser *user.User, session ssh.Session, interactive bool) (*exec.Cmd, func(), error) { +func (s *Server) createUserSwitchCommand(logger *log.Entry, session ssh.Session, localUser *user.User) (*exec.Cmd, func(), error) { username, domain := s.parseUsername(localUser.Username) shell := getUserShell(localUser.Uid) @@ -113,15 +113,14 @@ func (s *Server) createUserSwitchCommand(localUser *user.User, session ssh.Sessi } config := WindowsExecutorConfig{ - Username: username, - Domain: domain, - WorkingDir: localUser.HomeDir, - Shell: shell, - Command: command, - Interactive: interactive || (rawCmd == ""), + Username: username, + Domain: domain, + WorkingDir: localUser.HomeDir, + Shell: shell, + Command: command, } - dropper := NewPrivilegeDropper() + dropper := NewPrivilegeDropper(WithLogger(logger)) cmd, token, err := dropper.CreateWindowsExecutorCommand(session.Context(), config) if err != nil { return nil, nil, err @@ -130,7 +129,7 @@ func (s *Server) createUserSwitchCommand(localUser *user.User, session ssh.Sessi cleanup := func() { if token != 0 { if err := windows.CloseHandle(windows.Handle(token)); err != nil { - log.Debugf("close primary token: %v", err) + logger.Debugf("close primary token: %v", err) } } } diff --git a/client/ssh/server/winpty/conpty.go b/client/ssh/server/winpty/conpty.go index 0f3659ffe..c08ccfd05 100644 --- a/client/ssh/server/winpty/conpty.go +++ b/client/ssh/server/winpty/conpty.go @@ -56,7 +56,7 @@ var ( ) // ExecutePtyWithUserToken executes a command with ConPty using user token. -func ExecutePtyWithUserToken(ctx context.Context, session ssh.Session, ptyConfig PtyConfig, userConfig UserConfig) error { +func ExecutePtyWithUserToken(session ssh.Session, ptyConfig PtyConfig, userConfig UserConfig) error { args := buildShellArgs(ptyConfig.Shell, ptyConfig.Command) commandLine := buildCommandLine(args) @@ -64,7 +64,7 @@ func ExecutePtyWithUserToken(ctx context.Context, session ssh.Session, ptyConfig Pty: ptyConfig, User: userConfig, Session: session, - Context: ctx, + Context: session.Context(), } return executeConPtyWithConfig(commandLine, config) From 2248ff392f659f4e941d8f337683f8e2344c60de Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 27 Jan 2026 20:10:59 +0100 Subject: [PATCH 088/374] Remove redundant square bracket trimming in USP endpoint parsing (#5197) --- client/iface/configurer/usp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/iface/configurer/usp.go b/client/iface/configurer/usp.go index c4ea349df..1298c609d 100644 --- a/client/iface/configurer/usp.go +++ b/client/iface/configurer/usp.go @@ -558,7 +558,7 @@ func parseStatus(deviceName, ipcStr string) (*Stats, error) { continue } - host, portStr, err := net.SplitHostPort(strings.Trim(val, "[]")) + host, portStr, err := net.SplitHostPort(val) if err != nil { log.Errorf("failed to parse endpoint: %v", err) continue From b55262d4a21cee277614e2d0ddd156d99112a9fc Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 28 Jan 2026 15:06:59 +0100 Subject: [PATCH 089/374] [client] Refactor/optimise raw socket headers (#5174) Pre-create and reuse packet headers to eliminate per-packet allocations. --- client/iface/wgproxy/ebpf/proxy.go | 72 ------------- client/iface/wgproxy/ebpf/wrapper.go | 153 +++++++++++++++++++++++++-- 2 files changed, 147 insertions(+), 78 deletions(-) diff --git a/client/iface/wgproxy/ebpf/proxy.go b/client/iface/wgproxy/ebpf/proxy.go index 5458519fa..1b1a8ce1c 100644 --- a/client/iface/wgproxy/ebpf/proxy.go +++ b/client/iface/wgproxy/ebpf/proxy.go @@ -8,8 +8,6 @@ import ( "net" "sync" - "github.com/google/gopacket" - "github.com/google/gopacket/layers" "github.com/hashicorp/go-multierror" "github.com/pion/transport/v3" log "github.com/sirupsen/logrus" @@ -26,16 +24,6 @@ const ( loopbackAddr = "127.0.0.1" ) -var ( - localHostNetIPv4 = net.ParseIP("127.0.0.1") - localHostNetIPv6 = net.ParseIP("::1") - - serializeOpts = gopacket.SerializeOptions{ - ComputeChecksums: true, - FixLengths: true, - } -) - // WGEBPFProxy definition for proxy with EBPF support type WGEBPFProxy struct { localWGListenPort int @@ -253,63 +241,3 @@ generatePort: } return p.lastUsedPort, nil } - -func (p *WGEBPFProxy) sendPkg(data []byte, endpointAddr *net.UDPAddr) error { - - var ipH gopacket.SerializableLayer - var networkLayer gopacket.NetworkLayer - var dstIP net.IP - var rawConn net.PacketConn - - if endpointAddr.IP.To4() != nil { - // IPv4 path - ipv4 := &layers.IPv4{ - DstIP: localHostNetIPv4, - SrcIP: endpointAddr.IP, - Version: 4, - TTL: 64, - Protocol: layers.IPProtocolUDP, - } - ipH = ipv4 - networkLayer = ipv4 - dstIP = localHostNetIPv4 - rawConn = p.rawConnIPv4 - } else { - // IPv6 path - if p.rawConnIPv6 == nil { - return fmt.Errorf("IPv6 raw socket not available") - } - ipv6 := &layers.IPv6{ - DstIP: localHostNetIPv6, - SrcIP: endpointAddr.IP, - Version: 6, - HopLimit: 64, - NextHeader: layers.IPProtocolUDP, - } - ipH = ipv6 - networkLayer = ipv6 - dstIP = localHostNetIPv6 - rawConn = p.rawConnIPv6 - } - - udpH := &layers.UDP{ - SrcPort: layers.UDPPort(endpointAddr.Port), - DstPort: layers.UDPPort(p.localWGListenPort), - } - - if err := udpH.SetNetworkLayerForChecksum(networkLayer); err != nil { - return fmt.Errorf("set network layer for checksum: %w", err) - } - - layerBuffer := gopacket.NewSerializeBuffer() - payload := gopacket.Payload(data) - - if err := gopacket.SerializeLayers(layerBuffer, serializeOpts, ipH, udpH, payload); err != nil { - return fmt.Errorf("serialize layers: %w", err) - } - - if _, err := rawConn.WriteTo(layerBuffer.Bytes(), &net.IPAddr{IP: dstIP}); err != nil { - return fmt.Errorf("write to raw conn: %w", err) - } - return nil -} diff --git a/client/iface/wgproxy/ebpf/wrapper.go b/client/iface/wgproxy/ebpf/wrapper.go index 5b98be7b4..6e80945c4 100644 --- a/client/iface/wgproxy/ebpf/wrapper.go +++ b/client/iface/wgproxy/ebpf/wrapper.go @@ -10,12 +10,89 @@ import ( "net" "sync" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/iface/bufsize" "github.com/netbirdio/netbird/client/iface/wgproxy/listener" ) +var ( + errIPv6ConnNotAvailable = errors.New("IPv6 endpoint but rawConnIPv6 is not available") + errIPv4ConnNotAvailable = errors.New("IPv4 endpoint but rawConnIPv4 is not available") + + localHostNetIPv4 = net.ParseIP("127.0.0.1") + localHostNetIPv6 = net.ParseIP("::1") + + serializeOpts = gopacket.SerializeOptions{ + ComputeChecksums: true, + FixLengths: true, + } +) + +// PacketHeaders holds pre-created headers and buffers for efficient packet sending +type PacketHeaders struct { + ipH gopacket.SerializableLayer + udpH *layers.UDP + layerBuffer gopacket.SerializeBuffer + localHostAddr net.IP + isIPv4 bool +} + +func NewPacketHeaders(localWGListenPort int, endpoint *net.UDPAddr) (*PacketHeaders, error) { + var ipH gopacket.SerializableLayer + var networkLayer gopacket.NetworkLayer + var localHostAddr net.IP + var isIPv4 bool + + // Check if source address is IPv4 or IPv6 + if endpoint.IP.To4() != nil { + // IPv4 path + ipv4 := &layers.IPv4{ + DstIP: localHostNetIPv4, + SrcIP: endpoint.IP, + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + } + ipH = ipv4 + networkLayer = ipv4 + localHostAddr = localHostNetIPv4 + isIPv4 = true + } else { + // IPv6 path + ipv6 := &layers.IPv6{ + DstIP: localHostNetIPv6, + SrcIP: endpoint.IP, + Version: 6, + HopLimit: 64, + NextHeader: layers.IPProtocolUDP, + } + ipH = ipv6 + networkLayer = ipv6 + localHostAddr = localHostNetIPv6 + isIPv4 = false + } + + udpH := &layers.UDP{ + SrcPort: layers.UDPPort(endpoint.Port), + DstPort: layers.UDPPort(localWGListenPort), + } + + if err := udpH.SetNetworkLayerForChecksum(networkLayer); err != nil { + return nil, fmt.Errorf("set network layer for checksum: %w", err) + } + + return &PacketHeaders{ + ipH: ipH, + udpH: udpH, + layerBuffer: gopacket.NewSerializeBuffer(), + localHostAddr: localHostAddr, + isIPv4: isIPv4, + }, nil +} + // ProxyWrapper help to keep the remoteConn instance for net.Conn.Close function call type ProxyWrapper struct { wgeBPFProxy *WGEBPFProxy @@ -24,8 +101,10 @@ type ProxyWrapper struct { ctx context.Context cancel context.CancelFunc - wgRelayedEndpointAddr *net.UDPAddr - wgEndpointCurrentUsedAddr *net.UDPAddr + wgRelayedEndpointAddr *net.UDPAddr + headers *PacketHeaders + headerCurrentUsed *PacketHeaders + rawConn net.PacketConn paused bool pausedCond *sync.Cond @@ -41,15 +120,32 @@ func NewProxyWrapper(proxy *WGEBPFProxy) *ProxyWrapper { closeListener: listener.NewCloseListener(), } } + func (p *ProxyWrapper) AddTurnConn(ctx context.Context, _ *net.UDPAddr, remoteConn net.Conn) error { addr, err := p.wgeBPFProxy.AddTurnConn(remoteConn) if err != nil { return fmt.Errorf("add turn conn: %w", err) } + + headers, err := NewPacketHeaders(p.wgeBPFProxy.localWGListenPort, addr) + if err != nil { + return fmt.Errorf("create packet sender: %w", err) + } + + // Check if required raw connection is available + if !headers.isIPv4 && p.wgeBPFProxy.rawConnIPv6 == nil { + return errIPv6ConnNotAvailable + } + if headers.isIPv4 && p.wgeBPFProxy.rawConnIPv4 == nil { + return errIPv4ConnNotAvailable + } + p.remoteConn = remoteConn p.ctx, p.cancel = context.WithCancel(ctx) p.wgRelayedEndpointAddr = addr - return err + p.headers = headers + p.rawConn = p.selectRawConn(headers) + return nil } func (p *ProxyWrapper) EndpointAddr() *net.UDPAddr { @@ -68,7 +164,8 @@ func (p *ProxyWrapper) Work() { p.pausedCond.L.Lock() p.paused = false - p.wgEndpointCurrentUsedAddr = p.wgRelayedEndpointAddr + p.headerCurrentUsed = p.headers + p.rawConn = p.selectRawConn(p.headerCurrentUsed) if !p.isStarted { p.isStarted = true @@ -95,10 +192,28 @@ func (p *ProxyWrapper) RedirectAs(endpoint *net.UDPAddr) { log.Errorf("failed to start package redirection, endpoint is nil") return } + + header, err := NewPacketHeaders(p.wgeBPFProxy.localWGListenPort, endpoint) + if err != nil { + log.Errorf("failed to create packet headers: %s", err) + return + } + + // Check if required raw connection is available + if !header.isIPv4 && p.wgeBPFProxy.rawConnIPv6 == nil { + log.Error(errIPv6ConnNotAvailable) + return + } + if header.isIPv4 && p.wgeBPFProxy.rawConnIPv4 == nil { + log.Error(errIPv4ConnNotAvailable) + return + } + p.pausedCond.L.Lock() p.paused = false - p.wgEndpointCurrentUsedAddr = endpoint + p.headerCurrentUsed = header + p.rawConn = p.selectRawConn(header) p.pausedCond.Signal() p.pausedCond.L.Unlock() @@ -140,7 +255,7 @@ func (p *ProxyWrapper) proxyToLocal(ctx context.Context) { p.pausedCond.Wait() } - err = p.wgeBPFProxy.sendPkg(buf[:n], p.wgEndpointCurrentUsedAddr) + err = p.sendPkg(buf[:n], p.headerCurrentUsed) p.pausedCond.L.Unlock() if err != nil { @@ -166,3 +281,29 @@ func (p *ProxyWrapper) readFromRemote(ctx context.Context, buf []byte) (int, err } return n, nil } + +func (p *ProxyWrapper) sendPkg(data []byte, header *PacketHeaders) error { + defer func() { + if err := header.layerBuffer.Clear(); err != nil { + log.Errorf("failed to clear layer buffer: %s", err) + } + }() + + payload := gopacket.Payload(data) + + if err := gopacket.SerializeLayers(header.layerBuffer, serializeOpts, header.ipH, header.udpH, payload); err != nil { + return fmt.Errorf("serialize layers: %w", err) + } + + if _, err := p.rawConn.WriteTo(header.layerBuffer.Bytes(), &net.IPAddr{IP: header.localHostAddr}); err != nil { + return fmt.Errorf("write to raw conn: %w", err) + } + return nil +} + +func (p *ProxyWrapper) selectRawConn(header *PacketHeaders) net.PacketConn { + if header.isIPv4 { + return p.wgeBPFProxy.rawConnIPv4 + } + return p.wgeBPFProxy.rawConnIPv6 +} From cead3f38ee4912fe0d3962bf2a1d14d927a7eed3 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Wed, 28 Jan 2026 18:24:12 +0100 Subject: [PATCH 090/374] [management] fix ephemeral peers being not removed (#5203) --- management/internals/shared/grpc/server.go | 4 +++- management/server/peer.go | 10 +++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 32049d044..219baaf6d 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -17,13 +17,14 @@ import ( pb "github.com/golang/protobuf/proto" // nolint "github.com/golang/protobuf/ptypes/timestamp" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/realip" - "github.com/netbirdio/netbird/shared/management/client/common" log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "github.com/netbirdio/netbird/shared/management/client/common" + "github.com/netbirdio/netbird/management/internals/controllers/network_map" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/idp" @@ -304,6 +305,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S if err != nil { log.WithContext(ctx).Debugf("error while sending initial sync for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) + s.cancelPeerRoutines(ctx, accountID, peer) return err } diff --git a/management/server/peer.go b/management/server/peer.go index 80c74e209..ab72d3051 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -728,11 +728,6 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe return fmt.Errorf("failed adding peer to All group: %w", err) } - if temporary { - // we should track ephemeral peers to be able to clean them if the peer don't sync and be marked as connected - am.networkMapController.TrackEphemeralPeer(ctx, newPeer) - } - if addedByUser { err := transaction.SaveUserLastLogin(ctx, accountID, userID, newPeer.GetLastLogin()) if err != nil { @@ -760,6 +755,11 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe return fmt.Errorf("failed to increment network serial: %w", err) } + if ephemeral { + // we should track ephemeral peers to be able to clean them if the peer doesn't sync and isn't marked as connected + am.networkMapController.TrackEphemeralPeer(ctx, newPeer) + } + log.WithContext(ctx).Debugf("Peer %s added to account %s", newPeer.ID, accountID) return nil }) From 0169e4540fbbe04c88c8600ae0cd0033211f893c Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Thu, 29 Jan 2026 10:58:45 +0100 Subject: [PATCH 091/374] [management] fix skip of ephemeral peers on deletion (#5206) --- .../modules/peers/ephemeral/manager/ephemeral.go | 4 ++-- management/internals/modules/peers/manager.go | 12 +++++++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/management/internals/modules/peers/ephemeral/manager/ephemeral.go b/management/internals/modules/peers/ephemeral/manager/ephemeral.go index 15119045b..758f643d0 100644 --- a/management/internals/modules/peers/ephemeral/manager/ephemeral.go +++ b/management/internals/modules/peers/ephemeral/manager/ephemeral.go @@ -187,10 +187,10 @@ func (e *EphemeralManager) cleanup(ctx context.Context) { } for accountID, peerIDs := range peerIDsPerAccount { - log.WithContext(ctx).Debugf("delete ephemeral peers for account: %s", accountID) + log.WithContext(ctx).Tracef("cleanup: deleting %d ephemeral peers for account %s", len(peerIDs), accountID) err := e.peersManager.DeletePeers(ctx, accountID, peerIDs, activity.SystemInitiator, true) if err != nil { - log.WithContext(ctx).Errorf("failed to delete ephemeral peer: %s", err) + log.WithContext(ctx).Errorf("failed to delete ephemeral peers: %s", err) } } } diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index 1551689b4..7ac2e379f 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -108,10 +108,19 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { peer, err := transaction.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) if err != nil { + if e, ok := status.FromError(err); ok && e.Type() == status.NotFound { + log.WithContext(ctx).Tracef("DeletePeers: peer %s not found, skipping", peerID) + return nil + } return err } if checkConnected && (peer.Status.Connected || peer.Status.LastSeen.After(time.Now().Add(-(ephemeral.EphemeralLifeTime - 10*time.Second)))) { + log.WithContext(ctx).Tracef("DeletePeers: peer %s skipped (connected=%t, lastSeen=%s, threshold=%s, ephemeral=%t)", + peerID, peer.Status.Connected, + peer.Status.LastSeen.Format(time.RFC3339), + time.Now().Add(-(ephemeral.EphemeralLifeTime - 10*time.Second)).Format(time.RFC3339), + peer.Ephemeral) return nil } @@ -150,7 +159,8 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs return nil }) if err != nil { - return err + log.WithContext(ctx).Errorf("DeletePeers: failed to delete peer %s: %v", peerID, err) + continue } if m.integratedPeerValidator != nil { From f74bc48d16c1c7df28ef47e640507804c9e80b55 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 29 Jan 2026 18:05:06 +0800 Subject: [PATCH 092/374] [Client] Stop NetBird on firewall init failure (#5208) --- client/internal/engine.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/client/internal/engine.go b/client/internal/engine.go index f0693e82c..63ba1c9f2 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -573,9 +573,11 @@ func (e *Engine) createFirewall() error { var err error e.firewall, err = firewall.NewFirewall(e.wgInterface, e.stateManager, e.flowManager.GetLogger(), e.config.DisableServerRoutes, e.config.MTU) - if err != nil || e.firewall == nil { - log.Errorf("failed creating firewall manager: %s", err) - return nil + if err != nil { + return fmt.Errorf("create firewall manager: %w", err) + } + if e.firewall == nil { + return fmt.Errorf("create firewall manager: received nil manager") } if err := e.initFirewall(); err != nil { From 81c11df1034956b82a036f209e6a1b0af2f037f2 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 29 Jan 2026 20:51:44 +0800 Subject: [PATCH 093/374] [management] Streamline domain validation (#5211) --- client/internal/dns/handler_chain_test.go | 48 +++++ management/cmd/management.go | 7 +- .../internals/modules/zones/records/record.go | 8 +- management/internals/modules/zones/zone.go | 4 +- management/server/account.go | 11 +- management/server/nameserver.go | 25 +-- management/server/nameserver_test.go | 85 +++----- .../server/networks/resources/manager_test.go | 6 +- .../networks/resources/types/resource.go | 4 +- .../networks/resources/types/resource_test.go | 6 +- management/server/util/util.go | 10 - shared/management/domain/validate.go | 30 ++- shared/management/domain/validate_test.go | 200 +++++++++++++++++- 13 files changed, 339 insertions(+), 105 deletions(-) diff --git a/client/internal/dns/handler_chain_test.go b/client/internal/dns/handler_chain_test.go index 72c0004d5..fa9525069 100644 --- a/client/internal/dns/handler_chain_test.go +++ b/client/internal/dns/handler_chain_test.go @@ -112,6 +112,54 @@ func TestHandlerChain_ServeDNS_DomainMatching(t *testing.T) { matchSubdomains: false, shouldMatch: false, }, + { + name: "single letter TLD exact match", + handlerDomain: "example.x.", + queryDomain: "example.x.", + isWildcard: false, + matchSubdomains: false, + shouldMatch: true, + }, + { + name: "single letter TLD subdomain match", + handlerDomain: "example.x.", + queryDomain: "sub.example.x.", + isWildcard: false, + matchSubdomains: true, + shouldMatch: true, + }, + { + name: "single letter TLD wildcard match", + handlerDomain: "*.example.x.", + queryDomain: "sub.example.x.", + isWildcard: true, + matchSubdomains: false, + shouldMatch: true, + }, + { + name: "two letter domain labels", + handlerDomain: "a.b.", + queryDomain: "a.b.", + isWildcard: false, + matchSubdomains: false, + shouldMatch: true, + }, + { + name: "single character domain", + handlerDomain: "x.", + queryDomain: "x.", + isWildcard: false, + matchSubdomains: false, + shouldMatch: true, + }, + { + name: "single character domain with subdomain match", + handlerDomain: "x.", + queryDomain: "sub.x.", + isWildcard: false, + matchSubdomains: true, + shouldMatch: true, + }, } for _, tt := range tests { diff --git a/management/cmd/management.go b/management/cmd/management.go index 7da04074b..511168823 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -16,13 +16,13 @@ import ( "strings" "syscall" - "github.com/miekg/dns" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/netbirdio/netbird/formatter/hook" "github.com/netbirdio/netbird/management/internals/server" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + nbdomain "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/util" "github.com/netbirdio/netbird/util/crypt" ) @@ -78,9 +78,8 @@ var ( } } - _, valid := dns.IsDomainName(dnsDomain) - if !valid || len(dnsDomain) > 192 { - return fmt.Errorf("failed parsing the provided dns-domain. Valid status: %t, Length: %d", valid, len(dnsDomain)) + if !nbdomain.IsValidDomainNoWildcard(dnsDomain) { + return fmt.Errorf("invalid dns-domain: %s", dnsDomain) } return nil diff --git a/management/internals/modules/zones/records/record.go b/management/internals/modules/zones/records/record.go index e44de08f4..1488febb9 100644 --- a/management/internals/modules/zones/records/record.go +++ b/management/internals/modules/zones/records/record.go @@ -6,7 +6,7 @@ import ( "github.com/rs/xid" - "github.com/netbirdio/netbird/management/server/util" + "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/http/api" ) @@ -63,7 +63,7 @@ func (r *Record) Validate() error { return errors.New("record name is required") } - if !util.IsValidDomain(r.Name) { + if !domain.IsValidDomain(r.Name) { return errors.New("invalid record name format") } @@ -81,8 +81,8 @@ func (r *Record) Validate() error { return err } case RecordTypeCNAME: - if !util.IsValidDomain(r.Content) { - return errors.New("invalid CNAME record format") + if !domain.IsValidDomainNoWildcard(r.Content) { + return errors.New("invalid CNAME target format") } default: return errors.New("invalid record type, must be A, AAAA, or CNAME") diff --git a/management/internals/modules/zones/zone.go b/management/internals/modules/zones/zone.go index 27adac1ac..f5ebed26c 100644 --- a/management/internals/modules/zones/zone.go +++ b/management/internals/modules/zones/zone.go @@ -6,7 +6,7 @@ import ( "github.com/rs/xid" "github.com/netbirdio/netbird/management/internals/modules/zones/records" - "github.com/netbirdio/netbird/management/server/util" + "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/http/api" ) @@ -73,7 +73,7 @@ func (z *Zone) Validate() error { return errors.New("zone name exceeds maximum length of 255 characters") } - if !util.IsValidDomain(z.Domain) { + if !domain.IsValidDomainNoWildcard(z.Domain) { return errors.New("invalid zone domain format") } diff --git a/management/server/account.go b/management/server/account.go index d453b87c3..ba5f0cffa 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -26,6 +26,7 @@ import ( "golang.org/x/exp/maps" nbdns "github.com/netbirdio/netbird/dns" + nbdomain "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/formatter/hook" "github.com/netbirdio/netbird/management/internals/controllers/network_map" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" @@ -231,7 +232,7 @@ func BuildManager( // enable single account mode only if configured by user and number of existing accounts is not grater than 1 am.singleAccountMode = singleAccountModeDomain != "" && accountsCounter <= 1 if am.singleAccountMode { - if !isDomainValid(singleAccountModeDomain) { + if !nbdomain.IsValidDomainNoWildcard(singleAccountModeDomain) { return nil, status.Errorf(status.InvalidArgument, "invalid domain \"%s\" provided for a single account mode. Please review your input for --single-account-mode-domain", singleAccountModeDomain) } am.singleAccountModeDomain = singleAccountModeDomain @@ -402,7 +403,7 @@ func (am *DefaultAccountManager) validateSettingsUpdate(ctx context.Context, tra return status.Errorf(status.InvalidArgument, "peer login expiration can't be smaller than one hour") } - if newSettings.DNSDomain != "" && !isDomainValid(newSettings.DNSDomain) { + if newSettings.DNSDomain != "" && !nbdomain.IsValidDomainNoWildcard(newSettings.DNSDomain) { return status.Errorf(status.InvalidArgument, "invalid domain \"%s\" provided for DNS domain", newSettings.DNSDomain) } @@ -1691,10 +1692,12 @@ func (am *DefaultAccountManager) SyncPeerMeta(ctx context.Context, peerPubKey st return nil } -var invalidDomainRegexp = regexp.MustCompile(`^([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}$`) +// isDomainValid validates public/IDP domains using stricter rules than internal DNS domains. +// Requires at least 2-char alphabetic TLD and no single-label domains. +var publicDomainRegexp = regexp.MustCompile(`^([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}$`) func isDomainValid(domain string) bool { - return invalidDomainRegexp.MatchString(domain) + return publicDomainRegexp.MatchString(domain) } func (am *DefaultAccountManager) onPeersInvalidated(ctx context.Context, accountID string, peerIDs []string) { diff --git a/management/server/nameserver.go b/management/server/nameserver.go index a3eb4ae2e..3d8c78912 100644 --- a/management/server/nameserver.go +++ b/management/server/nameserver.go @@ -3,10 +3,10 @@ package server import ( "context" "errors" - "regexp" + "fmt" + "strings" "unicode/utf8" - "github.com/miekg/dns" "github.com/rs/xid" nbdns "github.com/netbirdio/netbird/dns" @@ -15,11 +15,10 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" + nbdomain "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/status" ) -const domainPattern = `^(?i)[a-z0-9]+([\-\.]{1}[a-z0-9]+)*[*.a-z]{1,}$` - var errInvalidDomainName = errors.New("invalid domain name") // GetNameServerGroup gets a nameserver group object from account and nameserver group IDs @@ -305,16 +304,18 @@ func validateGroups(list []string, groups map[string]*types.Group) error { return nil } -var domainMatcher = regexp.MustCompile(domainPattern) - -func validateDomain(domain string) error { - if !domainMatcher.MatchString(domain) { - return errors.New("domain should consists of only letters, numbers, and hyphens with no leading, trailing hyphens, or spaces") +// validateDomain validates a nameserver match domain. +// Converts unicode to punycode. Wildcards are not allowed for nameservers. +func validateDomain(d string) error { + if strings.HasPrefix(d, "*.") { + return errors.New("wildcards not allowed") } - _, valid := dns.IsDomainName(domain) - if !valid { - return errInvalidDomainName + // Nameservers allow trailing dot (FQDN format) + toValidate := strings.TrimSuffix(d, ".") + + if _, err := nbdomain.ValidateDomains([]string{toValidate}); err != nil { + return fmt.Errorf("%w: %w", errInvalidDomainName, err) } return nil diff --git a/management/server/nameserver_test.go b/management/server/nameserver_test.go index 0d781e0d4..90b4b9687 100644 --- a/management/server/nameserver_test.go +++ b/management/server/nameserver_test.go @@ -901,82 +901,53 @@ func initTestNSAccount(t *testing.T, am *DefaultAccountManager) (*types.Account, return account, nil } +// TestValidateDomain tests nameserver-specific domain validation. +// Core domain validation is tested in shared/management/domain/validate_test.go. +// This test only covers nameserver-specific behavior: wildcard rejection and unicode support. func TestValidateDomain(t *testing.T) { testCases := []struct { name string domain string errFunc require.ErrorAssertionFunc }{ + // Nameserver-specific: wildcards not allowed { - name: "Valid domain name with multiple labels", - domain: "123.example.com", + name: "Wildcard prefix rejected", + domain: "*.example.com", + errFunc: require.Error, + }, + { + name: "Wildcard in middle rejected", + domain: "a.*.example.com", + errFunc: require.Error, + }, + // Nameserver-specific: unicode converted to punycode + { + name: "Unicode domain converted to punycode", + domain: "münchen.de", errFunc: require.NoError, }, { - name: "Valid domain name with hyphen", - domain: "test-example.com", + name: "Unicode domain all labels", + domain: "中国.中国", + errFunc: require.NoError, + }, + // Basic validation still works (delegates to shared validation) + { + name: "Valid multi-label domain", + domain: "example.com", errFunc: require.NoError, }, { - name: "Valid domain name with only one label", - domain: "example", + name: "Valid single label", + domain: "internal", errFunc: require.NoError, }, { - name: "Valid domain name with trailing dot", - domain: "example.", - errFunc: require.NoError, - }, - { - name: "Invalid wildcard domain name", - domain: "*.example", - errFunc: require.Error, - }, - { - name: "Invalid domain name with leading dot", - domain: ".com", - errFunc: require.Error, - }, - { - name: "Invalid domain name with dot only", - domain: ".", - errFunc: require.Error, - }, - { - name: "Invalid domain name with double hyphen", - domain: "test--example.com", - errFunc: require.Error, - }, - { - name: "Invalid domain name with a label exceeding 63 characters", - domain: "dnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdnsdns.com", - errFunc: require.Error, - }, - { - name: "Invalid domain name starting with a hyphen", + name: "Invalid leading hyphen", domain: "-example.com", errFunc: require.Error, }, - { - name: "Invalid domain name ending with a hyphen", - domain: "example.com-", - errFunc: require.Error, - }, - { - name: "Invalid domain with unicode", - domain: "example?,.com", - errFunc: require.Error, - }, - { - name: "Invalid domain with space before top-level domain", - domain: "space .example.com", - errFunc: require.Error, - }, - { - name: "Invalid domain with trailing space", - domain: "example.com ", - errFunc: require.Error, - }, } for _, testCase := range testCases { diff --git a/management/server/networks/resources/manager_test.go b/management/server/networks/resources/manager_test.go index e2dea2c6b..29b0af2cc 100644 --- a/management/server/networks/resources/manager_test.go +++ b/management/server/networks/resources/manager_test.go @@ -203,7 +203,7 @@ func Test_CreateResourceFailsWithInvalidAddress(t *testing.T) { NetworkID: "testNetworkId", Name: "testResourceId", Description: "description", - Address: "invalid-address", + Address: "-invalid", } store, cleanUp, err := store.NewTestStoreFromSQL(context.Background(), "../../testdata/networks.sql", t.TempDir()) @@ -227,9 +227,9 @@ func Test_CreateResourceFailsWithUsedName(t *testing.T) { resource := &types.NetworkResource{ AccountID: "testAccountId", NetworkID: "testNetworkId", - Name: "testResourceId", + Name: "used-name", Description: "description", - Address: "invalid-address", + Address: "example.com", } store, cleanUp, err := store.NewTestStoreFromSQL(context.Background(), "../../testdata/networks.sql", t.TempDir()) diff --git a/management/server/networks/resources/types/resource.go b/management/server/networks/resources/types/resource.go index 6b8cf9412..1fa908393 100644 --- a/management/server/networks/resources/types/resource.go +++ b/management/server/networks/resources/types/resource.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "net/netip" - "regexp" "github.com/rs/xid" @@ -166,8 +165,7 @@ func GetResourceType(address string) (NetworkResourceType, string, netip.Prefix, return Host, "", netip.PrefixFrom(ip, ip.BitLen()), nil } - domainRegex := regexp.MustCompile(`^(\*\.)?([a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}$`) - if domainRegex.MatchString(address) { + if _, err := nbDomain.ValidateDomains([]string{address}); err == nil { return Domain, address, netip.Prefix{}, nil } diff --git a/management/server/networks/resources/types/resource_test.go b/management/server/networks/resources/types/resource_test.go index 02e802300..a842b0a28 100644 --- a/management/server/networks/resources/types/resource_test.go +++ b/management/server/networks/resources/types/resource_test.go @@ -23,10 +23,12 @@ func TestGetResourceType(t *testing.T) { {"example.com", Domain, false, "example.com", netip.Prefix{}}, {"*.example.com", Domain, false, "*.example.com", netip.Prefix{}}, {"sub.example.com", Domain, false, "sub.example.com", netip.Prefix{}}, + {"example.x", Domain, false, "example.x", netip.Prefix{}}, + {"internal", Domain, false, "internal", netip.Prefix{}}, // Invalid inputs - {"invalid", "", true, "", netip.Prefix{}}, {"1.1.1.1/abc", "", true, "", netip.Prefix{}}, - {"1234", "", true, "", netip.Prefix{}}, + {"-invalid.com", "", true, "", netip.Prefix{}}, + {"", "", true, "", netip.Prefix{}}, } for _, tt := range tests { diff --git a/management/server/util/util.go b/management/server/util/util.go index eea6a72b0..ce9759864 100644 --- a/management/server/util/util.go +++ b/management/server/util/util.go @@ -1,9 +1,5 @@ package util -import "regexp" - -var domainRegex = regexp.MustCompile(`^(\*\.)?([a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}$`) - // Difference returns the elements in `a` that aren't in `b`. func Difference(a, b []string) []string { mb := make(map[string]struct{}, len(b)) @@ -55,9 +51,3 @@ func contains[T comparableObject[T]](slice []T, element T) bool { return false } -func IsValidDomain(domain string) bool { - if domain == "" { - return false - } - return domainRegex.MatchString(domain) -} diff --git a/shared/management/domain/validate.go b/shared/management/domain/validate.go index bf2af7116..1858b5d55 100644 --- a/shared/management/domain/validate.go +++ b/shared/management/domain/validate.go @@ -10,7 +10,30 @@ const maxDomains = 32 var domainRegex = regexp.MustCompile(`^(?:\*\.)?(?:(?:xn--)?[a-zA-Z0-9_](?:[a-zA-Z0-9-_]{0,61}[a-zA-Z0-9])?\.)*(?:xn--)?[a-zA-Z0-9](?:[a-zA-Z0-9-_]{0,61}[a-zA-Z0-9])?$`) -// ValidateDomains checks if each domain in the list is valid and returns a punycode-encoded DomainList. +// IsValidDomain checks if a single domain string is valid. +// Does not convert unicode to punycode - domain must already be ASCII/punycode. +// Allows wildcard prefix (*.example.com). +func IsValidDomain(domain string) bool { + if domain == "" { + return false + } + return domainRegex.MatchString(strings.ToLower(domain)) +} + +// IsValidDomainNoWildcard checks if a single domain string is valid without wildcard prefix. +// Use for zone domains and CNAME targets where wildcards are not allowed. +func IsValidDomainNoWildcard(domain string) bool { + if domain == "" { + return false + } + if strings.HasPrefix(domain, "*.") { + return false + } + return domainRegex.MatchString(strings.ToLower(domain)) +} + +// ValidateDomains validates domains and converts unicode to punycode. +// Allows wildcard prefix (*.example.com). Maximum 32 domains. func ValidateDomains(domains []string) (List, error) { if len(domains) == 0 { return nil, fmt.Errorf("domains list is empty") @@ -37,7 +60,10 @@ func ValidateDomains(domains []string) (List, error) { return domainList, nil } -// ValidateDomainsList checks if each domain in the list is valid +// ValidateDomainsList validates domains without punycode conversion. +// Use this for domains that must already be in ASCII/punycode format (e.g., extra DNS labels). +// Unlike ValidateDomains, this does not convert unicode to punycode - unicode domains will fail. +// Allows wildcard prefix (*.example.com). Maximum 32 domains. func ValidateDomainsList(domains []string) error { if len(domains) == 0 { return nil diff --git a/shared/management/domain/validate_test.go b/shared/management/domain/validate_test.go index 30efcd9a9..9dbcd8ac8 100644 --- a/shared/management/domain/validate_test.go +++ b/shared/management/domain/validate_test.go @@ -2,12 +2,16 @@ package domain import ( "fmt" + "strings" "testing" "github.com/stretchr/testify/assert" ) func TestValidateDomains(t *testing.T) { + label63 := strings.Repeat("a", 63) + label64 := strings.Repeat("a", 64) + tests := []struct { name string domains []string @@ -26,6 +30,48 @@ func TestValidateDomains(t *testing.T) { expected: List{"sub.ex-ample.com"}, wantErr: false, }, + { + name: "Valid uppercase domain normalized to lowercase", + domains: []string{"EXAMPLE.COM"}, + expected: List{"example.com"}, + wantErr: false, + }, + { + name: "Valid mixed case domain", + domains: []string{"ExAmPlE.CoM"}, + expected: List{"example.com"}, + wantErr: false, + }, + { + name: "Single letter TLD", + domains: []string{"example.x"}, + expected: List{"example.x"}, + wantErr: false, + }, + { + name: "Two letter domain labels", + domains: []string{"a.b"}, + expected: List{"a.b"}, + wantErr: false, + }, + { + name: "Single character domain", + domains: []string{"x"}, + expected: List{"x"}, + wantErr: false, + }, + { + name: "Wildcard with single letter TLD", + domains: []string{"*.x"}, + expected: List{"*.x"}, + wantErr: false, + }, + { + name: "Multi-level with single letter labels", + domains: []string{"a.b.c"}, + expected: List{"a.b.c"}, + wantErr: false, + }, { name: "Valid Unicode domain", domains: []string{"münchen.de"}, @@ -45,17 +91,92 @@ func TestValidateDomains(t *testing.T) { wantErr: false, }, { - name: "Invalid domain format", + name: "Valid domain starting with digit", + domains: []string{"123.example.com"}, + expected: List{"123.example.com"}, + wantErr: false, + }, + // Numeric TLDs are allowed for internal/private DNS use cases. + // While ICANN doesn't issue all-numeric gTLDs, the DNS protocol permits them + // and resolvers like systemd-resolved handle them correctly. + { + name: "Numeric TLD allowed", + domains: []string{"example.123"}, + expected: List{"example.123"}, + wantErr: false, + }, + { + name: "Single digit TLD allowed", + domains: []string{"example.1"}, + expected: List{"example.1"}, + wantErr: false, + }, + { + name: "All numeric labels allowed", + domains: []string{"123.456"}, + expected: List{"123.456"}, + wantErr: false, + }, + { + name: "Single numeric label allowed", + domains: []string{"123"}, + expected: List{"123"}, + wantErr: false, + }, + { + name: "Valid domain with double hyphen", + domains: []string{"test--example.com"}, + expected: List{"test--example.com"}, + wantErr: false, + }, + { + name: "Invalid leading hyphen", domains: []string{"-example.com"}, expected: nil, wantErr: true, }, { - name: "Invalid domain format 2", + name: "Invalid trailing hyphen", domains: []string{"example.com-"}, expected: nil, wantErr: true, }, + { + name: "Invalid leading dot", + domains: []string{".com"}, + expected: nil, + wantErr: true, + }, + { + name: "Invalid dot only", + domains: []string{"."}, + expected: nil, + wantErr: true, + }, + { + name: "Invalid double dot", + domains: []string{"example..com"}, + expected: nil, + wantErr: true, + }, + { + name: "Invalid special characters", + domains: []string{"example?,.com"}, + expected: nil, + wantErr: true, + }, + { + name: "Invalid space in domain", + domains: []string{"space .example.com"}, + expected: nil, + wantErr: true, + }, + { + name: "Invalid trailing space", + domains: []string{"example.com "}, + expected: nil, + wantErr: true, + }, { name: "Multiple domains valid and invalid", domains: []string{"google.com", "invalid,nbdomain.com", "münchen.de"}, @@ -86,6 +207,30 @@ func TestValidateDomains(t *testing.T) { expected: nil, wantErr: true, }, + { + name: "Valid 63 char label (max)", + domains: []string{label63 + ".com"}, + expected: List{Domain(label63 + ".com")}, + wantErr: false, + }, + { + name: "Invalid 64 char label (exceeds max)", + domains: []string{label64 + ".com"}, + expected: nil, + wantErr: true, + }, + { + name: "Valid 253 char domain (max)", + domains: []string{strings.Repeat("a.", 126) + "a"}, + expected: List{Domain(strings.Repeat("a.", 126) + "a")}, + wantErr: false, + }, + { + name: "Invalid 254+ char domain (exceeds max)", + domains: []string{strings.Repeat("ab.", 85)}, + expected: nil, + wantErr: true, + }, } for _, tt := range tests { @@ -118,6 +263,57 @@ func TestValidateDomainsList(t *testing.T) { domains: []string{"sub.ex-ample.com"}, wantErr: false, }, + { + name: "Uppercase domain accepted", + domains: []string{"EXAMPLE.COM"}, + wantErr: false, + }, + { + name: "Single letter TLD", + domains: []string{"example.x"}, + wantErr: false, + }, + { + name: "Two letter domain labels", + domains: []string{"a.b"}, + wantErr: false, + }, + { + name: "Single character domain", + domains: []string{"x"}, + wantErr: false, + }, + { + name: "Wildcard with single letter TLD", + domains: []string{"*.x"}, + wantErr: false, + }, + { + name: "Multi-level with single letter labels", + domains: []string{"a.b.c"}, + wantErr: false, + }, + // Numeric TLDs are allowed for internal/private DNS use cases. + { + name: "Numeric TLD allowed", + domains: []string{"example.123"}, + wantErr: false, + }, + { + name: "Single digit TLD allowed", + domains: []string{"example.1"}, + wantErr: false, + }, + { + name: "All numeric labels allowed", + domains: []string{"123.456"}, + wantErr: false, + }, + { + name: "Single numeric label allowed", + domains: []string{"123"}, + wantErr: false, + }, { name: "Underscores in labels", domains: []string{"_jabber._tcp.gmail.com"}, From 5333e55a8134b7f0feb58777e92c32509abcd037 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 29 Jan 2026 16:58:10 +0100 Subject: [PATCH 094/374] Fix WG watcher missing initial handshake (#5213) Start the WireGuard watcher before configuring the WG endpoint to ensure it captures the initial handshake timestamp. Previously, the watcher was started after endpoint configuration, causing it to miss the handshake that occurred during setup. --- client/internal/peer/conn.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 39133a6d3..eb455431d 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -390,6 +390,8 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn } conn.Log.Infof("configure WireGuard endpoint to: %s", ep.String()) + conn.enableWgWatcherIfNeeded() + presharedKey := conn.presharedKey(iceConnInfo.RosenpassPubKey) if err = conn.endpointUpdater.ConfigureWGEndpoint(ep, presharedKey); err != nil { conn.handleConfigurationFailure(err, wgProxy) @@ -402,8 +404,6 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn conn.wgProxyRelay.RedirectAs(ep) } - conn.enableWgWatcherIfNeeded() - conn.currentConnPriority = priority conn.statusICE.SetConnected() conn.updateIceState(iceConnInfo) @@ -501,6 +501,9 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { wgProxy.Work() presharedKey := conn.presharedKey(rci.rosenpassPubKey) + + conn.enableWgWatcherIfNeeded() + if err := conn.endpointUpdater.ConfigureWGEndpoint(wgProxy.EndpointAddr(), presharedKey); err != nil { if err := wgProxy.CloseConn(); err != nil { conn.Log.Warnf("Failed to close relay connection: %v", err) @@ -509,8 +512,6 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { return } - conn.enableWgWatcherIfNeeded() - wgConfigWorkaround() conn.rosenpassRemoteKey = rci.rosenpassPubKey conn.currentConnPriority = conntype.Relay From 101c813e9846bc30ce6c00766e6befdaf7dc965c Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 30 Jan 2026 17:42:14 +0800 Subject: [PATCH 095/374] [client] Add macOS default resolvers as fallback (#5201) --- client/internal/dns/host_darwin.go | 25 +++- client/internal/dns/host_darwin_test.go | 166 ++++++++++++++++++++++++ client/internal/dns/server.go | 3 +- client/internal/dns/test/mock.go | 6 + 4 files changed, 196 insertions(+), 4 deletions(-) diff --git a/client/internal/dns/host_darwin.go b/client/internal/dns/host_darwin.go index 71badf0d4..af84c8a85 100644 --- a/client/internal/dns/host_darwin.go +++ b/client/internal/dns/host_darwin.go @@ -9,8 +9,10 @@ import ( "io" "net/netip" "os/exec" + "slices" "strconv" "strings" + "sync" log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" @@ -38,6 +40,9 @@ const ( type systemConfigurator struct { createdKeys map[string]struct{} systemDNSSettings SystemDNSSettings + + mu sync.RWMutex + origNameservers []netip.Addr } func newHostManager() (*systemConfigurator, error) { @@ -218,6 +223,7 @@ func (s *systemConfigurator) getSystemDNSSettings() (SystemDNSSettings, error) { } var dnsSettings SystemDNSSettings + var serverAddresses []netip.Addr inSearchDomainsArray := false inServerAddressesArray := false @@ -244,9 +250,12 @@ func (s *systemConfigurator) getSystemDNSSettings() (SystemDNSSettings, error) { dnsSettings.Domains = append(dnsSettings.Domains, searchDomain) } else if inServerAddressesArray { address := strings.Split(line, " : ")[1] - if ip, err := netip.ParseAddr(address); err == nil && ip.Is4() { - dnsSettings.ServerIP = ip.Unmap() - inServerAddressesArray = false // Stop reading after finding the first IPv4 address + if ip, err := netip.ParseAddr(address); err == nil && !ip.IsUnspecified() { + ip = ip.Unmap() + serverAddresses = append(serverAddresses, ip) + if !dnsSettings.ServerIP.IsValid() && ip.Is4() { + dnsSettings.ServerIP = ip + } } } } @@ -258,9 +267,19 @@ func (s *systemConfigurator) getSystemDNSSettings() (SystemDNSSettings, error) { // default to 53 port dnsSettings.ServerPort = DefaultPort + s.mu.Lock() + s.origNameservers = serverAddresses + s.mu.Unlock() + return dnsSettings, nil } +func (s *systemConfigurator) getOriginalNameservers() []netip.Addr { + s.mu.RLock() + defer s.mu.RUnlock() + return slices.Clone(s.origNameservers) +} + func (s *systemConfigurator) addSearchDomains(key, domains string, ip netip.Addr, port int) error { err := s.addDNSState(key, domains, ip, port, true) if err != nil { diff --git a/client/internal/dns/host_darwin_test.go b/client/internal/dns/host_darwin_test.go index c4efd17b0..28915de65 100644 --- a/client/internal/dns/host_darwin_test.go +++ b/client/internal/dns/host_darwin_test.go @@ -109,3 +109,169 @@ func removeTestDNSKey(key string) error { _, err := cmd.CombinedOutput() return err } + +func TestGetOriginalNameservers(t *testing.T) { + configurator := &systemConfigurator{ + createdKeys: make(map[string]struct{}), + origNameservers: []netip.Addr{ + netip.MustParseAddr("8.8.8.8"), + netip.MustParseAddr("1.1.1.1"), + }, + } + + servers := configurator.getOriginalNameservers() + assert.Len(t, servers, 2) + assert.Equal(t, netip.MustParseAddr("8.8.8.8"), servers[0]) + assert.Equal(t, netip.MustParseAddr("1.1.1.1"), servers[1]) +} + +func TestGetOriginalNameserversFromSystem(t *testing.T) { + configurator := &systemConfigurator{ + createdKeys: make(map[string]struct{}), + } + + _, err := configurator.getSystemDNSSettings() + require.NoError(t, err) + + servers := configurator.getOriginalNameservers() + + require.NotEmpty(t, servers, "expected at least one DNS server from system configuration") + + for _, server := range servers { + assert.True(t, server.IsValid(), "server address should be valid") + assert.False(t, server.IsUnspecified(), "server address should not be unspecified") + } + + t.Logf("found %d original nameservers: %v", len(servers), servers) +} + +func setupTestConfigurator(t *testing.T) (*systemConfigurator, *statemanager.Manager, func()) { + t.Helper() + + tmpDir := t.TempDir() + stateFile := filepath.Join(tmpDir, "state.json") + sm := statemanager.New(stateFile) + sm.RegisterState(&ShutdownState{}) + sm.Start() + + configurator := &systemConfigurator{ + createdKeys: make(map[string]struct{}), + } + + searchKey := getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix) + matchKey := getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix) + localKey := getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix) + + cleanup := func() { + _ = sm.Stop(context.Background()) + for _, key := range []string{searchKey, matchKey, localKey} { + _ = removeTestDNSKey(key) + } + } + + return configurator, sm, cleanup +} + +func TestOriginalNameserversNoTransition(t *testing.T) { + netbirdIP := netip.MustParseAddr("100.64.0.1") + + testCases := []struct { + name string + routeAll bool + }{ + {"routeall_false", false}, + {"routeall_true", true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + configurator, sm, cleanup := setupTestConfigurator(t) + defer cleanup() + + _, err := configurator.getSystemDNSSettings() + require.NoError(t, err) + initialServers := configurator.getOriginalNameservers() + t.Logf("Initial servers: %v", initialServers) + require.NotEmpty(t, initialServers) + + for _, srv := range initialServers { + require.NotEqual(t, netbirdIP, srv, "initial servers should not contain NetBird IP") + } + + config := HostDNSConfig{ + ServerIP: netbirdIP, + ServerPort: 53, + RouteAll: tc.routeAll, + Domains: []DomainConfig{{Domain: "example.com", MatchOnly: true}}, + } + + for i := 1; i <= 2; i++ { + err = configurator.applyDNSConfig(config, sm) + require.NoError(t, err) + + servers := configurator.getOriginalNameservers() + t.Logf("After apply %d (RouteAll=%v): %v", i, tc.routeAll, servers) + assert.Equal(t, initialServers, servers) + } + }) + } +} + +func TestOriginalNameserversRouteAllTransition(t *testing.T) { + netbirdIP := netip.MustParseAddr("100.64.0.1") + + testCases := []struct { + name string + initialRoute bool + }{ + {"start_with_routeall_false", false}, + {"start_with_routeall_true", true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + configurator, sm, cleanup := setupTestConfigurator(t) + defer cleanup() + + _, err := configurator.getSystemDNSSettings() + require.NoError(t, err) + initialServers := configurator.getOriginalNameservers() + t.Logf("Initial servers: %v", initialServers) + require.NotEmpty(t, initialServers) + + config := HostDNSConfig{ + ServerIP: netbirdIP, + ServerPort: 53, + RouteAll: tc.initialRoute, + Domains: []DomainConfig{{Domain: "example.com", MatchOnly: true}}, + } + + // First apply + err = configurator.applyDNSConfig(config, sm) + require.NoError(t, err) + servers := configurator.getOriginalNameservers() + t.Logf("After first apply (RouteAll=%v): %v", tc.initialRoute, servers) + assert.Equal(t, initialServers, servers) + + // Toggle RouteAll + config.RouteAll = !tc.initialRoute + err = configurator.applyDNSConfig(config, sm) + require.NoError(t, err) + servers = configurator.getOriginalNameservers() + t.Logf("After toggle (RouteAll=%v): %v", config.RouteAll, servers) + assert.Equal(t, initialServers, servers) + + // Toggle back + config.RouteAll = tc.initialRoute + err = configurator.applyDNSConfig(config, sm) + require.NoError(t, err) + servers = configurator.getOriginalNameservers() + t.Logf("After toggle back (RouteAll=%v): %v", config.RouteAll, servers) + assert.Equal(t, initialServers, servers) + + for _, srv := range servers { + assert.NotEqual(t, netbirdIP, srv, "servers should not contain NetBird IP") + } + }) + } +} diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 1ce7bf1c6..4d4fcc06e 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -615,7 +615,7 @@ func (s *DefaultServer) applyHostConfig() { s.registerFallback(config) } -// registerFallback registers original nameservers as low-priority fallback handlers +// registerFallback registers original nameservers as low-priority fallback handlers. func (s *DefaultServer) registerFallback(config HostDNSConfig) { hostMgrWithNS, ok := s.hostManager.(hostManagerWithOriginalNS) if !ok { @@ -624,6 +624,7 @@ func (s *DefaultServer) registerFallback(config HostDNSConfig) { originalNameservers := hostMgrWithNS.getOriginalNameservers() if len(originalNameservers) == 0 { + s.deregisterHandler([]string{nbdns.RootZone}, PriorityFallback) return } diff --git a/client/internal/dns/test/mock.go b/client/internal/dns/test/mock.go index 1db452805..8d16689bf 100644 --- a/client/internal/dns/test/mock.go +++ b/client/internal/dns/test/mock.go @@ -8,15 +8,21 @@ import ( type MockResponseWriter struct { WriteMsgFunc func(m *dns.Msg) error + lastResponse *dns.Msg } func (rw *MockResponseWriter) WriteMsg(m *dns.Msg) error { + rw.lastResponse = m if rw.WriteMsgFunc != nil { return rw.WriteMsgFunc(m) } return nil } +func (rw *MockResponseWriter) GetLastResponse() *dns.Msg { + return rw.lastResponse +} + func (rw *MockResponseWriter) LocalAddr() net.Addr { return nil } func (rw *MockResponseWriter) RemoteAddr() net.Addr { return nil } func (rw *MockResponseWriter) Write([]byte) (int, error) { return 0, nil } From 0c990ab6623530b2ad6925a8dce04bdcc2455baa Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 30 Jan 2026 17:42:39 +0800 Subject: [PATCH 096/374] [client] Add block inbound option to the embed client (#5215) --- client/embed/embed.go | 3 +++ client/internal/networkmonitor/monitor.go | 6 ++++++ client/internal/wg_iface_monitor.go | 7 +++++++ 3 files changed, 16 insertions(+) diff --git a/client/embed/embed.go b/client/embed/embed.go index e266aae28..e73f37e35 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -69,6 +69,8 @@ type Options struct { StatePath string // DisableClientRoutes disables the client routes DisableClientRoutes bool + // BlockInbound blocks all inbound connections from peers + BlockInbound bool } // validateCredentials checks that exactly one credential type is provided @@ -137,6 +139,7 @@ func New(opts Options) (*Client, error) { PreSharedKey: &opts.PreSharedKey, DisableServerRoutes: &t, DisableClientRoutes: &opts.DisableClientRoutes, + BlockInbound: &opts.BlockInbound, } if opts.ConfigPath != "" { config, err = profilemanager.UpdateOrCreateConfig(input) diff --git a/client/internal/networkmonitor/monitor.go b/client/internal/networkmonitor/monitor.go index 6d019258d..6dd81f68c 100644 --- a/client/internal/networkmonitor/monitor.go +++ b/client/internal/networkmonitor/monitor.go @@ -14,6 +14,7 @@ import ( "github.com/cenkalti/backoff/v4" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal/routemanager/systemops" ) @@ -37,6 +38,11 @@ func New() *NetworkMonitor { // Listen begins monitoring network changes. When a change is detected, this function will return without error. func (nw *NetworkMonitor) Listen(ctx context.Context) (err error) { + if netstack.IsEnabled() { + log.Debugf("Network monitor: skipping in netstack mode") + return nil + } + nw.mu.Lock() if nw.cancel != nil { nw.mu.Unlock() diff --git a/client/internal/wg_iface_monitor.go b/client/internal/wg_iface_monitor.go index 78d70c15b..a870c1145 100644 --- a/client/internal/wg_iface_monitor.go +++ b/client/internal/wg_iface_monitor.go @@ -9,6 +9,8 @@ import ( "time" log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/iface/netstack" ) // WGIfaceMonitor monitors the WireGuard interface lifecycle and restarts the engine @@ -35,6 +37,11 @@ func (m *WGIfaceMonitor) Start(ctx context.Context, ifaceName string) (shouldRes return false, errors.New("not supported on mobile platforms") } + if netstack.IsEnabled() { + log.Debugf("Interface monitor: skipped in netstack mode") + return false, nil + } + if ifaceName == "" { log.Debugf("Interface monitor: empty interface name, skipping monitor") return false, errors.New("empty interface name") From 3a0cf230a179e767434018814a0dee5be2c526dd Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Sun, 1 Feb 2026 14:26:22 +0100 Subject: [PATCH 097/374] Disable local users for a smooth single-idp mode (#5226) Add LocalAuthDisabled option to embedded IdP configuration This adds the ability to disable local (email/password) authentication when using the embedded Dex identity provider. When disabled, users can only authenticate via external identity providers (Google, OIDC, etc.). This simplifies user login when there is only one external IdP configured. The login page will redirect directly to the IdP login page. Key changes: Added LocalAuthDisabled field to EmbeddedIdPConfig Added methods to check and toggle local auth: IsLocalAuthEnabled, HasNonLocalConnectors, DisableLocalAuth, EnableLocalAuth Validation prevents disabling local auth if no external connectors are configured Existing local users are preserved when disabled and can login again when re-enabled Operations are idempotent (disabling already disabled is a no-op) --- idp/dex/connector.go | 54 ++++ management/internals/server/modules.go | 9 +- management/server/account.go | 15 +- management/server/http/handler.go | 4 +- .../handlers/accounts/accounts_handler.go | 25 +- .../accounts/accounts_handler_test.go | 7 +- .../handlers/instance/instance_handler.go | 2 +- .../handlers/users/invites_handler_test.go | 17 ++ .../testing/testing_tools/channel/channel.go | 2 +- management/server/idp/embedded.go | 50 ++++ management/server/idp/embedded_test.go | 231 ++++++++++++++++++ management/server/instance/manager.go | 11 +- management/server/settings/manager.go | 15 +- management/server/types/settings.go | 10 + management/server/user.go | 12 + shared/management/http/api/openapi.yml | 5 + shared/management/http/api/types.gen.go | 3 + 17 files changed, 450 insertions(+), 22 deletions(-) diff --git a/idp/dex/connector.go b/idp/dex/connector.go index cad682141..ba2bb1f00 100644 --- a/idp/dex/connector.go +++ b/idp/dex/connector.go @@ -327,6 +327,60 @@ func ensureLocalConnector(ctx context.Context, stor storage.Storage) error { return nil } +// HasNonLocalConnectors checks if there are any connectors other than the local connector. +func (p *Provider) HasNonLocalConnectors(ctx context.Context) (bool, error) { + connectors, err := p.storage.ListConnectors(ctx) + if err != nil { + return false, fmt.Errorf("failed to list connectors: %w", err) + } + + p.logger.Info("checking for non-local connectors", "total_connectors", len(connectors)) + for _, conn := range connectors { + p.logger.Info("found connector in storage", "id", conn.ID, "type", conn.Type, "name", conn.Name) + if conn.ID != "local" || conn.Type != "local" { + p.logger.Info("found non-local connector", "id", conn.ID) + return true, nil + } + } + p.logger.Info("no non-local connectors found") + return false, nil +} + +// DisableLocalAuth removes the local (password) connector. +// Returns an error if no other connectors are configured. +func (p *Provider) DisableLocalAuth(ctx context.Context) error { + hasOthers, err := p.HasNonLocalConnectors(ctx) + if err != nil { + return err + } + if !hasOthers { + return fmt.Errorf("cannot disable local authentication: no other identity providers configured") + } + + // Check if local connector exists + _, err = p.storage.GetConnector(ctx, "local") + if errors.Is(err, storage.ErrNotFound) { + // Already disabled + return nil + } + if err != nil { + return fmt.Errorf("failed to check local connector: %w", err) + } + + // Delete the local connector + if err := p.storage.DeleteConnector(ctx, "local"); err != nil { + return fmt.Errorf("failed to delete local connector: %w", err) + } + + p.logger.Info("local authentication disabled") + return nil +} + +// EnableLocalAuth creates the local (password) connector if it doesn't exist. +func (p *Provider) EnableLocalAuth(ctx context.Context) error { + return ensureLocalConnector(ctx, p.storage) +} + // ensureStaticConnectors creates or updates static connectors in storage func ensureStaticConnectors(ctx context.Context, stor storage.Storage, connectors []Connector) error { for _, conn := range connectors { diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index b51e2ebb2..31badf9d0 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -69,7 +69,14 @@ func (s *BaseServer) UsersManager() users.Manager { func (s *BaseServer) SettingsManager() settings.Manager { return Create(s, func() settings.Manager { extraSettingsManager := integrations.NewManager(s.EventStore()) - return settings.NewManager(s.Store(), s.UsersManager(), extraSettingsManager, s.PermissionsManager()) + + idpConfig := settings.IdpConfig{} + if s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled { + idpConfig.EmbeddedIdpEnabled = true + idpConfig.LocalAuthDisabled = s.Config.EmbeddedIdP.LocalAuthDisabled + } + + return settings.NewManager(s.Store(), s.UsersManager(), extraSettingsManager, s.PermissionsManager(), idpConfig) }) } diff --git a/management/server/account.go b/management/server/account.go index ba5f0cffa..8f9dad031 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -26,7 +26,6 @@ import ( "golang.org/x/exp/maps" nbdns "github.com/netbirdio/netbird/dns" - nbdomain "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/formatter/hook" "github.com/netbirdio/netbird/management/internals/controllers/network_map" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" @@ -49,6 +48,7 @@ import ( "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/management/server/util" "github.com/netbirdio/netbird/route" + nbdomain "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/status" ) @@ -795,6 +795,19 @@ func IsEmbeddedIdp(i idp.Manager) bool { return ok } +// IsLocalAuthDisabled checks if local (email/password) authentication is disabled. +// Returns true only when using embedded IDP with local auth disabled in config. +func IsLocalAuthDisabled(ctx context.Context, i idp.Manager) bool { + if isNil(i) { + return false + } + embeddedIdp, ok := i.(*idp.EmbeddedIdPManager) + if !ok { + return false + } + return embeddedIdp.IsLocalAuthDisabled() +} + // addAccountIDToIDPAppMeta update user's app metadata in idp manager func (am *DefaultAccountManager) addAccountIDToIDPAppMeta(ctx context.Context, userID string, accountID string) error { if !isNil(am.idpManager) && !IsEmbeddedIdp(am.idpManager) { diff --git a/management/server/http/handler.go b/management/server/http/handler.go index 32a97ff44..79431a0a3 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -129,14 +129,14 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks return nil, fmt.Errorf("register integrations endpoints: %w", err) } - // Check if embedded IdP is enabled + // Check if embedded IdP is enabled for instance manager embeddedIdP, embeddedIdpEnabled := idpManager.(*idpmanager.EmbeddedIdPManager) instanceManager, err := nbinstance.NewManager(ctx, accountManager.GetStore(), embeddedIdP) if err != nil { return nil, fmt.Errorf("failed to create instance manager: %w", err) } - accounts.AddEndpoints(accountManager, settingsManager, embeddedIdpEnabled, router) + accounts.AddEndpoints(accountManager, settingsManager, router) peers.AddEndpoints(accountManager, router, networkMapController) users.AddEndpoints(accountManager, router) users.AddInvitesEndpoints(accountManager, router) diff --git a/management/server/http/handlers/accounts/accounts_handler.go b/management/server/http/handlers/accounts/accounts_handler.go index de778d59a..122c061ce 100644 --- a/management/server/http/handlers/accounts/accounts_handler.go +++ b/management/server/http/handlers/accounts/accounts_handler.go @@ -36,24 +36,22 @@ const ( // handler is a handler that handles the server.Account HTTP endpoints type handler struct { - accountManager account.Manager - settingsManager settings.Manager - embeddedIdpEnabled bool + accountManager account.Manager + settingsManager settings.Manager } -func AddEndpoints(accountManager account.Manager, settingsManager settings.Manager, embeddedIdpEnabled bool, router *mux.Router) { - accountsHandler := newHandler(accountManager, settingsManager, embeddedIdpEnabled) +func AddEndpoints(accountManager account.Manager, settingsManager settings.Manager, router *mux.Router) { + accountsHandler := newHandler(accountManager, settingsManager) router.HandleFunc("/accounts/{accountId}", accountsHandler.updateAccount).Methods("PUT", "OPTIONS") router.HandleFunc("/accounts/{accountId}", accountsHandler.deleteAccount).Methods("DELETE", "OPTIONS") router.HandleFunc("/accounts", accountsHandler.getAllAccounts).Methods("GET", "OPTIONS") } // newHandler creates a new handler HTTP handler -func newHandler(accountManager account.Manager, settingsManager settings.Manager, embeddedIdpEnabled bool) *handler { +func newHandler(accountManager account.Manager, settingsManager settings.Manager) *handler { return &handler{ - accountManager: accountManager, - settingsManager: settingsManager, - embeddedIdpEnabled: embeddedIdpEnabled, + accountManager: accountManager, + settingsManager: settingsManager, } } @@ -165,7 +163,7 @@ func (h *handler) getAllAccounts(w http.ResponseWriter, r *http.Request) { return } - resp := toAccountResponse(accountID, settings, meta, onboarding, h.embeddedIdpEnabled) + resp := toAccountResponse(accountID, settings, meta, onboarding) util.WriteJSONObject(r.Context(), w, []*api.Account{resp}) } @@ -292,7 +290,7 @@ func (h *handler) updateAccount(w http.ResponseWriter, r *http.Request) { return } - resp := toAccountResponse(accountID, updatedSettings, meta, updatedOnboarding, h.embeddedIdpEnabled) + resp := toAccountResponse(accountID, updatedSettings, meta, updatedOnboarding) util.WriteJSONObject(r.Context(), w, &resp) } @@ -321,7 +319,7 @@ func (h *handler) deleteAccount(w http.ResponseWriter, r *http.Request) { util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) } -func toAccountResponse(accountID string, settings *types.Settings, meta *types.AccountMeta, onboarding *types.AccountOnboarding, embeddedIdpEnabled bool) *api.Account { +func toAccountResponse(accountID string, settings *types.Settings, meta *types.AccountMeta, onboarding *types.AccountOnboarding) *api.Account { jwtAllowGroups := settings.JWTAllowGroups if jwtAllowGroups == nil { jwtAllowGroups = []string{} @@ -341,7 +339,8 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A LazyConnectionEnabled: &settings.LazyConnectionEnabled, DnsDomain: &settings.DNSDomain, AutoUpdateVersion: &settings.AutoUpdateVersion, - EmbeddedIdpEnabled: &embeddedIdpEnabled, + EmbeddedIdpEnabled: &settings.EmbeddedIdpEnabled, + LocalAuthDisabled: &settings.LocalAuthDisabled, } if settings.NetworkRange.IsValid() { diff --git a/management/server/http/handlers/accounts/accounts_handler_test.go b/management/server/http/handlers/accounts/accounts_handler_test.go index e455372c8..6cbd5908d 100644 --- a/management/server/http/handlers/accounts/accounts_handler_test.go +++ b/management/server/http/handlers/accounts/accounts_handler_test.go @@ -33,7 +33,6 @@ func initAccountsTestData(t *testing.T, account *types.Account) *handler { AnyTimes() return &handler{ - embeddedIdpEnabled: false, accountManager: &mock_server.MockAccountManager{ GetAccountSettingsFunc: func(ctx context.Context, accountID string, userID string) (*types.Settings, error) { return account.Settings, nil @@ -124,6 +123,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { DnsDomain: sr(""), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), }, expectedArray: true, expectedID: accountID, @@ -148,6 +148,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { DnsDomain: sr(""), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -172,6 +173,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { DnsDomain: sr(""), AutoUpdateVersion: sr("latest"), EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -196,6 +198,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { DnsDomain: sr(""), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -220,6 +223,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { DnsDomain: sr(""), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), }, expectedArray: false, expectedID: accountID, @@ -244,6 +248,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { DnsDomain: sr(""), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), + LocalAuthDisabled: br(false), }, expectedArray: false, expectedID: accountID, diff --git a/management/server/http/handlers/instance/instance_handler.go b/management/server/http/handlers/instance/instance_handler.go index 5d8baaf8d..cd9fae6b8 100644 --- a/management/server/http/handlers/instance/instance_handler.go +++ b/management/server/http/handlers/instance/instance_handler.go @@ -46,7 +46,7 @@ func (h *handler) getInstanceStatus(w http.ResponseWriter, r *http.Request) { util.WriteErrorResponse("failed to check instance status", http.StatusInternalServerError, w) return } - + log.WithContext(r.Context()).Infof("instance setup status: %v", setupRequired) util.WriteJSONObject(r.Context(), w, api.InstanceStatus{ SetupRequired: setupRequired, }) diff --git a/management/server/http/handlers/users/invites_handler_test.go b/management/server/http/handlers/users/invites_handler_test.go index 80826b9d4..529ea24d6 100644 --- a/management/server/http/handlers/users/invites_handler_test.go +++ b/management/server/http/handlers/users/invites_handler_test.go @@ -205,6 +205,14 @@ func TestCreateInvite(t *testing.T) { return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") }, }, + { + name: "local auth disabled", + requestBody: `{"email":"test@example.com","name":"Test User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.PreconditionFailed, "local user creation is disabled - use an external identity provider") + }, + }, { name: "invalid JSON", requestBody: `{invalid json}`, @@ -376,6 +384,15 @@ func TestAcceptInvite(t *testing.T) { return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") }, }, + { + name: "local auth disabled", + token: testInviteToken, + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.PreconditionFailed, "local user creation is disabled - use an external identity provider") + }, + }, { name: "missing token", token: "", diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 9339c3541..1fd4c9bad 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -73,7 +73,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee proxyController := integrations.NewController(store) userManager := users.NewManager(store) permissionsManager := permissions.NewManager(store) - settingsManager := settings.NewManager(store, userManager, integrations.NewManager(&activity.InMemoryEventStore{}), permissionsManager) + settingsManager := settings.NewManager(store, userManager, integrations.NewManager(&activity.InMemoryEventStore{}), permissionsManager, settings.IdpConfig{}) peersManager := peers.NewManager(store, permissionsManager) jobManager := job.NewJobManager(nil, store, peersManager) diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index db7a91fa3..a27050a26 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -43,6 +43,11 @@ type EmbeddedIdPConfig struct { Owner *OwnerConfig // SignKeyRefreshEnabled enables automatic key rotation for signing keys SignKeyRefreshEnabled bool + // LocalAuthDisabled disables the local (email/password) authentication connector. + // When true, users cannot authenticate via email/password, only via external identity providers. + // Existing local users are preserved and will be able to login again if re-enabled. + // Cannot be enabled if no external identity provider connectors are configured. + LocalAuthDisabled bool } // EmbeddedStorageConfig holds storage configuration for the embedded IdP. @@ -105,6 +110,8 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { Issuer: "NetBird", Theme: "light", }, + // Always enable password DB initially - we disable the local connector after startup if needed. + // This ensures Dex has at least one connector during initialization. EnablePasswordDB: true, StaticClients: []storage.Client{ { @@ -192,11 +199,32 @@ func NewEmbeddedIdPManager(ctx context.Context, config *EmbeddedIdPConfig, appMe return nil, err } + log.WithContext(ctx).Debugf("initializing embedded Dex IDP with config: %+v", config) + provider, err := dex.NewProviderFromYAML(ctx, yamlConfig) if err != nil { return nil, fmt.Errorf("failed to create embedded IdP provider: %w", err) } + // If local auth is disabled, validate that other connectors exist + if config.LocalAuthDisabled { + hasOthers, err := provider.HasNonLocalConnectors(ctx) + if err != nil { + _ = provider.Stop(ctx) + return nil, fmt.Errorf("failed to check connectors: %w", err) + } + if !hasOthers { + _ = provider.Stop(ctx) + return nil, fmt.Errorf("cannot disable local authentication: no other identity providers configured") + } + // Ensure local connector is removed (it might exist from a previous run) + if err := provider.DisableLocalAuth(ctx); err != nil { + _ = provider.Stop(ctx) + return nil, fmt.Errorf("failed to disable local auth: %w", err) + } + log.WithContext(ctx).Info("local authentication disabled - only external identity providers can be used") + } + log.WithContext(ctx).Infof("embedded Dex IDP initialized with issuer: %s", yamlConfig.Issuer) return &EmbeddedIdPManager{ @@ -281,6 +309,8 @@ func (m *EmbeddedIdPManager) GetAllAccounts(ctx context.Context) (map[string][]* return nil, fmt.Errorf("failed to list users: %w", err) } + log.WithContext(ctx).Debugf("retrieved %d users from embedded IdP", len(users)) + indexedUsers := make(map[string][]*UserData) for _, user := range users { indexedUsers[UnsetAccountID] = append(indexedUsers[UnsetAccountID], &UserData{ @@ -290,11 +320,17 @@ func (m *EmbeddedIdPManager) GetAllAccounts(ctx context.Context) (map[string][]* }) } + log.WithContext(ctx).Debugf("retrieved %d users from embedded IdP", len(indexedUsers[UnsetAccountID])) + return indexedUsers, nil } // CreateUser creates a new user in the embedded IdP. func (m *EmbeddedIdPManager) CreateUser(ctx context.Context, email, name, accountID, invitedByEmail string) (*UserData, error) { + if m.config.LocalAuthDisabled { + return nil, fmt.Errorf("local user creation is disabled") + } + if m.appMetrics != nil { m.appMetrics.IDPMetrics().CountCreateUser() } @@ -364,6 +400,10 @@ func (m *EmbeddedIdPManager) GetUserByEmail(ctx context.Context, email string) ( // Unlike CreateUser which auto-generates a password, this method uses the provided password. // This is useful for instance setup where the user provides their own password. func (m *EmbeddedIdPManager) CreateUserWithPassword(ctx context.Context, email, password, name string) (*UserData, error) { + if m.config.LocalAuthDisabled { + return nil, fmt.Errorf("local user creation is disabled") + } + if m.appMetrics != nil { m.appMetrics.IDPMetrics().CountCreateUser() } @@ -553,3 +593,13 @@ func (m *EmbeddedIdPManager) GetClientIDs() []string { func (m *EmbeddedIdPManager) GetUserIDClaim() string { return defaultUserIDClaim } + +// IsLocalAuthDisabled returns whether local authentication is disabled based on configuration. +func (m *EmbeddedIdPManager) IsLocalAuthDisabled() bool { + return m.config.LocalAuthDisabled +} + +// HasNonLocalConnectors checks if there are any identity provider connectors other than local. +func (m *EmbeddedIdPManager) HasNonLocalConnectors(ctx context.Context) (bool, error) { + return m.provider.HasNonLocalConnectors(ctx) +} diff --git a/management/server/idp/embedded_test.go b/management/server/idp/embedded_test.go index d8d3009dd..4dda483fb 100644 --- a/management/server/idp/embedded_test.go +++ b/management/server/idp/embedded_test.go @@ -370,3 +370,234 @@ func TestEmbeddedIdPManager_GetLocalKeysLocation(t *testing.T) { }) } } + +func TestEmbeddedIdPManager_LocalAuthDisabled(t *testing.T) { + ctx := context.Background() + + t.Run("cannot start with local auth disabled without other connectors", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + LocalAuthDisabled: true, + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + _, err = NewEmbeddedIdPManager(ctx, config, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "no other identity providers configured") + }) + + t.Run("local auth enabled by default", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Verify local auth is enabled by default + assert.False(t, manager.IsLocalAuthDisabled()) + }) + + t.Run("start with local auth disabled when connector exists", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + dbFile := filepath.Join(tmpDir, "dex.db") + + // First, create a manager with local auth enabled and add a connector + config1 := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: dbFile, + }, + }, + } + + manager1, err := NewEmbeddedIdPManager(ctx, config1, nil) + require.NoError(t, err) + + // Create a user + userData, err := manager1.CreateUser(ctx, "preserved@example.com", "Preserved User", "account1", "admin@example.com") + require.NoError(t, err) + userID := userData.ID + + // Add an external connector (Google doesn't require OIDC discovery) + _, err = manager1.CreateConnector(ctx, &dex.ConnectorConfig{ + ID: "google-test", + Name: "Google Test", + Type: "google", + ClientID: "test-client-id", + ClientSecret: "test-client-secret", + }) + require.NoError(t, err) + + // Stop the first manager + err = manager1.Stop(ctx) + require.NoError(t, err) + + // Now create a new manager with local auth disabled + config2 := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + LocalAuthDisabled: true, + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: dbFile, + }, + }, + } + + manager2, err := NewEmbeddedIdPManager(ctx, config2, nil) + require.NoError(t, err) + defer func() { _ = manager2.Stop(ctx) }() + + // Verify local auth is disabled via config + assert.True(t, manager2.IsLocalAuthDisabled()) + + // Verify the user still exists in storage (just can't login via local) + lookedUp, err := manager2.GetUserDataByID(ctx, userID, AppMetadata{}) + require.NoError(t, err) + assert.Equal(t, "preserved@example.com", lookedUp.Email) + }) + + t.Run("CreateUser fails when local auth is disabled", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + dbFile := filepath.Join(tmpDir, "dex.db") + + // First, create a manager and add an external connector + config1 := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: dbFile, + }, + }, + } + + manager1, err := NewEmbeddedIdPManager(ctx, config1, nil) + require.NoError(t, err) + + _, err = manager1.CreateConnector(ctx, &dex.ConnectorConfig{ + ID: "google-test", + Name: "Google Test", + Type: "google", + ClientID: "test-client-id", + ClientSecret: "test-client-secret", + }) + require.NoError(t, err) + + err = manager1.Stop(ctx) + require.NoError(t, err) + + // Create manager with local auth disabled + config2 := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + LocalAuthDisabled: true, + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: dbFile, + }, + }, + } + + manager2, err := NewEmbeddedIdPManager(ctx, config2, nil) + require.NoError(t, err) + defer func() { _ = manager2.Stop(ctx) }() + + // Try to create a user - should fail + _, err = manager2.CreateUser(ctx, "newuser@example.com", "New User", "account1", "admin@example.com") + require.Error(t, err) + assert.Contains(t, err.Error(), "local user creation is disabled") + }) + + t.Run("CreateUserWithPassword fails when local auth is disabled", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + dbFile := filepath.Join(tmpDir, "dex.db") + + // First, create a manager and add an external connector + config1 := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: dbFile, + }, + }, + } + + manager1, err := NewEmbeddedIdPManager(ctx, config1, nil) + require.NoError(t, err) + + _, err = manager1.CreateConnector(ctx, &dex.ConnectorConfig{ + ID: "google-test", + Name: "Google Test", + Type: "google", + ClientID: "test-client-id", + ClientSecret: "test-client-secret", + }) + require.NoError(t, err) + + err = manager1.Stop(ctx) + require.NoError(t, err) + + // Create manager with local auth disabled + config2 := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + LocalAuthDisabled: true, + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: dbFile, + }, + }, + } + + manager2, err := NewEmbeddedIdPManager(ctx, config2, nil) + require.NoError(t, err) + defer func() { _ = manager2.Stop(ctx) }() + + // Try to create a user with password - should fail + _, err = manager2.CreateUserWithPassword(ctx, "newuser@example.com", "SecurePass123!", "New User") + require.Error(t, err) + assert.Contains(t, err.Error(), "local user creation is disabled") + }) +} diff --git a/management/server/instance/manager.go b/management/server/instance/manager.go index 6a0509ebd..19e3abdc0 100644 --- a/management/server/instance/manager.go +++ b/management/server/instance/manager.go @@ -104,13 +104,22 @@ func NewManager(ctx context.Context, store store.Store, idpManager idp.Manager) } func (m *DefaultManager) loadSetupRequired(ctx context.Context) error { + // Check if there are any accounts in the NetBird store + numAccounts, err := m.store.GetAccountsCounter(ctx) + if err != nil { + return err + } + hasAccounts := numAccounts > 0 + + // Check if there are any users in the embedded IdP (Dex) users, err := m.embeddedIdpManager.GetAllAccounts(ctx) if err != nil { return err } + hasLocalUsers := len(users) > 0 m.setupMu.Lock() - m.setupRequired = len(users) == 0 + m.setupRequired = !(hasAccounts || hasLocalUsers) m.setupMu.Unlock() return nil diff --git a/management/server/settings/manager.go b/management/server/settings/manager.go index 2b2896572..74af0a3ef 100644 --- a/management/server/settings/manager.go +++ b/management/server/settings/manager.go @@ -24,19 +24,28 @@ type Manager interface { UpdateExtraSettings(ctx context.Context, accountID, userID string, extraSettings *types.ExtraSettings) (bool, error) } +// IdpConfig holds IdP-related configuration that is set at runtime +// and not stored in the database. +type IdpConfig struct { + EmbeddedIdpEnabled bool + LocalAuthDisabled bool +} + type managerImpl struct { store store.Store extraSettingsManager extra_settings.Manager userManager users.Manager permissionsManager permissions.Manager + idpConfig IdpConfig } -func NewManager(store store.Store, userManager users.Manager, extraSettingsManager extra_settings.Manager, permissionsManager permissions.Manager) Manager { +func NewManager(store store.Store, userManager users.Manager, extraSettingsManager extra_settings.Manager, permissionsManager permissions.Manager, idpConfig IdpConfig) Manager { return &managerImpl{ store: store, extraSettingsManager: extraSettingsManager, userManager: userManager, permissionsManager: permissionsManager, + idpConfig: idpConfig, } } @@ -74,6 +83,10 @@ func (m *managerImpl) GetSettings(ctx context.Context, accountID, userID string) settings.Extra.FlowDnsCollectionEnabled = extraSettings.FlowDnsCollectionEnabled } + // Fill in IdP-related runtime settings + settings.EmbeddedIdpEnabled = m.idpConfig.EmbeddedIdpEnabled + settings.LocalAuthDisabled = m.idpConfig.LocalAuthDisabled + return settings, nil } diff --git a/management/server/types/settings.go b/management/server/types/settings.go index 867e12bef..a94e01b78 100644 --- a/management/server/types/settings.go +++ b/management/server/types/settings.go @@ -55,6 +55,14 @@ type Settings struct { // AutoUpdateVersion client auto-update version AutoUpdateVersion string `gorm:"default:'disabled'"` + + // EmbeddedIdpEnabled indicates if the embedded identity provider is enabled. + // This is a runtime-only field, not stored in the database. + EmbeddedIdpEnabled bool `gorm:"-"` + + // LocalAuthDisabled indicates if local (email/password) authentication is disabled. + // This is a runtime-only field, not stored in the database. + LocalAuthDisabled bool `gorm:"-"` } // Copy copies the Settings struct @@ -76,6 +84,8 @@ func (s *Settings) Copy() *Settings { DNSDomain: s.DNSDomain, NetworkRange: s.NetworkRange, AutoUpdateVersion: s.AutoUpdateVersion, + EmbeddedIdpEnabled: s.EmbeddedIdpEnabled, + LocalAuthDisabled: s.LocalAuthDisabled, } if s.Extra != nil { settings.Extra = s.Extra.Copy() diff --git a/management/server/user.go b/management/server/user.go index 51da7a633..48005f325 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -191,6 +191,10 @@ func (am *DefaultAccountManager) createNewIdpUser(ctx context.Context, accountID // Unlike createNewIdpUser, this method fetches user data directly from the database // since the embedded IdP usage ensures the username and email are stored locally in the User table. func (am *DefaultAccountManager) createEmbeddedIdpUser(ctx context.Context, accountID string, inviterID string, invite *types.UserInfo) (*idp.UserData, error) { + if IsLocalAuthDisabled(ctx, am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "local user creation is disabled - use an external identity provider") + } + inviter, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, inviterID) if err != nil { return nil, fmt.Errorf("failed to get inviter user: %w", err) @@ -1462,6 +1466,10 @@ func (am *DefaultAccountManager) CreateUserInvite(ctx context.Context, accountID return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") } + if IsLocalAuthDisabled(ctx, am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "local user creation is disabled - use an external identity provider") + } + if err := validateUserInvite(invite); err != nil { return nil, err } @@ -1621,6 +1629,10 @@ func (am *DefaultAccountManager) AcceptUserInvite(ctx context.Context, token, pa return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") } + if IsLocalAuthDisabled(ctx, am.idpManager) { + return status.Errorf(status.PreconditionFailed, "local user creation is disabled - use an external identity provider") + } + if password == "" { return status.Errorf(status.InvalidArgument, "password is required") } diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 26d2387d1..b9a8eae3a 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -294,6 +294,11 @@ components: type: boolean readOnly: true example: false + local_auth_disabled: + description: Indicates whether local (email/password) authentication is disabled. When true, users can only authenticate via external identity providers. This is a read-only field. + type: boolean + readOnly: true + example: false required: - peer_login_expiration_enabled - peer_login_expiration diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index e8c044b32..fd7c61917 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -415,6 +415,9 @@ type AccountSettings struct { // LazyConnectionEnabled Enables or disables experimental lazy connection LazyConnectionEnabled *bool `json:"lazy_connection_enabled,omitempty"` + // LocalAuthDisabled Indicates whether local (email/password) authentication is disabled. When true, users can only authenticate via external identity providers. This is a read-only field. + LocalAuthDisabled *bool `json:"local_auth_disabled,omitempty"` + // NetworkRange Allows to define a custom network range for the account in CIDR format NetworkRange *string `json:"network_range,omitempty"` From 7b830d8f72b6fa997f03d99292de814faa33a562 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Sun, 1 Feb 2026 14:37:00 +0100 Subject: [PATCH 098/374] disable sync lim (#5233) --- management/internals/shared/grpc/server.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 219baaf6d..6757cca13 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -77,8 +77,9 @@ type Server struct { oAuthConfigProvider idp.OAuthConfigProvider - syncSem atomic.Int32 - syncLim int32 + syncSem atomic.Int32 + syncLimEnabled bool + syncLim int32 } // NewServer creates a new Management server @@ -108,6 +109,7 @@ func NewServer( blockPeersWithSameConfig := strings.ToLower(os.Getenv(envBlockPeers)) == "true" syncLim := int32(defaultSyncLim) + syncLimEnabled := true if syncLimStr := os.Getenv(envConcurrentSyncs); syncLimStr != "" { syncLimParsed, err := strconv.Atoi(syncLimStr) if err != nil { @@ -115,6 +117,9 @@ func NewServer( } else { //nolint:gosec syncLim = int32(syncLimParsed) + if syncLim < 0 { + syncLimEnabled = false + } } } @@ -134,7 +139,8 @@ func NewServer( loginFilter: newLoginFilter(), - syncLim: syncLim, + syncLim: syncLim, + syncLimEnabled: syncLimEnabled, }, nil } @@ -212,7 +218,7 @@ func (s *Server) Job(srv proto.ManagementService_JobServer) error { // Sync validates the existence of a connecting peer, sends an initial state (all available for the connecting peers) and // notifies the connected peer of any updates (e.g. new peers under the same account) func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_SyncServer) error { - if s.syncSem.Load() >= s.syncLim { + if s.syncLimEnabled && s.syncSem.Load() >= s.syncLim { return status.Errorf(codes.ResourceExhausted, "too many concurrent sync requests, please try again later") } s.syncSem.Add(1) From 893129334376ef0cf65198dfbf5f428290ef6244 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Sun, 1 Feb 2026 15:44:27 +0100 Subject: [PATCH 099/374] [management] run cancelPeerRoutinesWithoutLock in sync (#5234) --- management/internals/shared/grpc/server.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 6757cca13..3704b3188 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -311,7 +311,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S if err != nil { log.WithContext(ctx).Debugf("error while sending initial sync for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutines(ctx, accountID, peer) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer) return err } @@ -319,7 +319,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S if err != nil { log.WithContext(ctx).Debugf("error while notify peer connected for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutines(ctx, accountID, peer) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer) return err } @@ -490,6 +490,10 @@ func (s *Server) cancelPeerRoutines(ctx context.Context, accountID string, peer unlock := s.acquirePeerLockByUID(ctx, peer.Key) defer unlock() + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer) +} + +func (s *Server) cancelPeerRoutinesWithoutLock(ctx context.Context, accountID string, peer *nbpeer.Peer) { err := s.accountManager.OnPeerDisconnected(ctx, accountID, peer.Key) if err != nil { log.WithContext(ctx).Errorf("failed to disconnect peer %s properly: %v", peer.Key, err) From b20d4849720754928ce08a88d8970877a5b0daa5 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Sun, 1 Feb 2026 16:06:36 +0100 Subject: [PATCH 100/374] [docs] Add selfhosting video (#5235) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8f4c04641..bca81c20b 100644 --- a/README.md +++ b/README.md @@ -60,8 +60,8 @@ https://github.com/user-attachments/assets/10cec749-bb56-4ab3-97af-4e38850108d2 -### NetBird on Lawrence Systems (Video) -[![Watch the video](https://img.youtube.com/vi/Kwrff6h0rEw/0.jpg)](https://www.youtube.com/watch?v=Kwrff6h0rEw) +### Self-Host NetBird (Video) +[![Watch the video](https://img.youtube.com/vi/bZAgpT6nzaQ/0.jpg)](https://youtu.be/bZAgpT6nzaQ) ### Key features From 6fdc00ff4185849cf9d3e65d38971684a0ffe65e Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 3 Feb 2026 17:30:02 +0100 Subject: [PATCH 101/374] [management] adding account id validation to accessible peers handler (#5246) --- management/server/http/handler.go | 5 +++-- .../http/handlers/peers/peers_handler.go | 19 ++++++++++++++----- .../http/handlers/peers/peers_handler_test.go | 11 +++++++++-- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/management/server/http/handler.go b/management/server/http/handler.go index 79431a0a3..17355d1d9 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -9,10 +9,11 @@ import ( "time" "github.com/gorilla/mux" - idpmanager "github.com/netbirdio/netbird/management/server/idp" "github.com/rs/cors" log "github.com/sirupsen/logrus" + idpmanager "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/management-integrations/integrations" "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/modules/zones" @@ -137,7 +138,7 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks } accounts.AddEndpoints(accountManager, settingsManager, router) - peers.AddEndpoints(accountManager, router, networkMapController) + peers.AddEndpoints(accountManager, router, networkMapController, permissionsManager) users.AddEndpoints(accountManager, router) users.AddInvitesEndpoints(accountManager, router) users.AddPublicInvitesEndpoints(accountManager, router) diff --git a/management/server/http/handlers/peers/peers_handler.go b/management/server/http/handlers/peers/peers_handler.go index 53d8ab055..783cfe11b 100644 --- a/management/server/http/handlers/peers/peers_handler.go +++ b/management/server/http/handlers/peers/peers_handler.go @@ -17,6 +17,7 @@ import ( nbcontext "github.com/netbirdio/netbird/management/server/context" "github.com/netbirdio/netbird/management/server/groups" nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/http/api" "github.com/netbirdio/netbird/shared/management/http/util" @@ -26,11 +27,12 @@ import ( // Handler is a handler that returns peers of the account type Handler struct { accountManager account.Manager + permissionsManager permissions.Manager networkMapController network_map.Controller } -func AddEndpoints(accountManager account.Manager, router *mux.Router, networkMapController network_map.Controller) { - peersHandler := NewHandler(accountManager, networkMapController) +func AddEndpoints(accountManager account.Manager, router *mux.Router, networkMapController network_map.Controller, permissionsManager permissions.Manager) { + peersHandler := NewHandler(accountManager, networkMapController, permissionsManager) router.HandleFunc("/peers", peersHandler.GetAllPeers).Methods("GET", "OPTIONS") router.HandleFunc("/peers/{peerId}", peersHandler.HandlePeer). Methods("GET", "PUT", "DELETE", "OPTIONS") @@ -42,10 +44,11 @@ func AddEndpoints(accountManager account.Manager, router *mux.Router, networkMap } // NewHandler creates a new peers Handler -func NewHandler(accountManager account.Manager, networkMapController network_map.Controller) *Handler { +func NewHandler(accountManager account.Manager, networkMapController network_map.Controller, permissionsManager permissions.Manager) *Handler { return &Handler{ accountManager: accountManager, networkMapController: networkMapController, + permissionsManager: permissionsManager, } } @@ -359,13 +362,19 @@ func (h *Handler) GetAccessiblePeers(w http.ResponseWriter, r *http.Request) { return } - account, err := h.accountManager.GetAccountByID(r.Context(), accountID, activity.SystemInitiator) + user, err := h.accountManager.GetUserByID(r.Context(), userID) if err != nil { util.WriteError(r.Context(), err, w) return } - user, err := h.accountManager.GetUserByID(r.Context(), userID) + err = h.permissionsManager.ValidateAccountAccess(r.Context(), accountID, user, false) + if err != nil { + util.WriteError(r.Context(), status.NewPermissionDeniedError(), w) + return + } + + account, err := h.accountManager.GetAccountByID(r.Context(), accountID, activity.SystemInitiator) if err != nil { util.WriteError(r.Context(), err, w) return diff --git a/management/server/http/handlers/peers/peers_handler_test.go b/management/server/http/handlers/peers/peers_handler_test.go index 869a39b5e..786c144fc 100644 --- a/management/server/http/handlers/peers/peers_handler_test.go +++ b/management/server/http/handlers/peers/peers_handler_test.go @@ -13,13 +13,15 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/gorilla/mux" - "go.uber.org/mock/gomock" + ugomock "go.uber.org/mock/gomock" "golang.org/x/exp/maps" "github.com/netbirdio/netbird/management/internals/controllers/network_map" nbcontext "github.com/netbirdio/netbird/management/server/context" nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/auth" "github.com/netbirdio/netbird/shared/management/http/api" @@ -102,7 +104,7 @@ func initTestMetaData(t *testing.T, peers ...*nbpeer.Peer) *Handler { }, } - ctrl := gomock.NewController(t) + ctrl := ugomock.NewController(t) networkMapController := network_map.NewMockController(ctrl) networkMapController.EXPECT(). @@ -110,6 +112,10 @@ func initTestMetaData(t *testing.T, peers ...*nbpeer.Peer) *Handler { Return("domain"). AnyTimes() + ctrl2 := gomock.NewController(t) + permissionsManager := permissions.NewMockManager(ctrl2) + permissionsManager.EXPECT().ValidateAccountAccess(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + return &Handler{ accountManager: &mock_server.MockAccountManager{ UpdatePeerFunc: func(_ context.Context, accountID, userID string, update *nbpeer.Peer) (*nbpeer.Peer, error) { @@ -199,6 +205,7 @@ func initTestMetaData(t *testing.T, peers ...*nbpeer.Peer) *Handler { }, }, networkMapController: networkMapController, + permissionsManager: permissionsManager, } } From d488f583115402c62d9974e787a5b23f1b73fc32 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Wed, 4 Feb 2026 11:44:46 +0100 Subject: [PATCH 102/374] [management] fix set disconnected status for connected peer (#5247) --- management/internals/shared/grpc/server.go | 32 ++++++----- management/server/account.go | 16 +++++- management/server/account/manager.go | 2 +- management/server/account_test.go | 55 +++++++++++++++++++ management/server/mock_server/account_mock.go | 5 +- 5 files changed, 89 insertions(+), 21 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 3704b3188..befcd2adf 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -307,11 +307,13 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S return mapError(ctx, err) } + streamStartTime := time.Now().UTC() + err = s.sendInitialSync(ctx, peerKey, peer, netMap, postureChecks, srv, dnsFwdPort) if err != nil { log.WithContext(ctx).Debugf("error while sending initial sync for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, streamStartTime) return err } @@ -319,7 +321,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S if err != nil { log.WithContext(ctx).Debugf("error while notify peer connected for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, streamStartTime) return err } @@ -336,7 +338,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S s.syncSem.Add(-1) - return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv) + return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv, streamStartTime) } func (s *Server) handleHandshake(ctx context.Context, srv proto.ManagementService_JobServer) (wgtypes.Key, error) { @@ -404,7 +406,7 @@ func (s *Server) sendJobsLoop(ctx context.Context, accountID string, peerKey wgt } // handleUpdates sends updates to the connected peer until the updates channel is closed. -func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, updates chan *network_map.UpdateMessage, srv proto.ManagementService_SyncServer) error { +func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, updates chan *network_map.UpdateMessage, srv proto.ManagementService_SyncServer, streamStartTime time.Time) error { log.WithContext(ctx).Tracef("starting to handle updates for peer %s", peerKey.String()) for { select { @@ -416,11 +418,11 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg if !open { log.WithContext(ctx).Debugf("updates channel for peer %s was closed", peerKey.String()) - s.cancelPeerRoutines(ctx, accountID, peer) + s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return nil } log.WithContext(ctx).Debugf("received an update for peer %s", peerKey.String()) - if err := s.sendUpdate(ctx, accountID, peerKey, peer, update, srv); err != nil { + if err := s.sendUpdate(ctx, accountID, peerKey, peer, update, srv, streamStartTime); err != nil { log.WithContext(ctx).Debugf("error while sending an update to peer %s: %v", peerKey.String(), err) return err } @@ -429,7 +431,7 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg case <-srv.Context().Done(): // happens when connection drops, e.g. client disconnects log.WithContext(ctx).Debugf("stream of peer %s has been closed", peerKey.String()) - s.cancelPeerRoutines(ctx, accountID, peer) + s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return srv.Context().Err() } } @@ -437,16 +439,16 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg // sendUpdate encrypts the update message using the peer key and the server's wireguard key, // then sends the encrypted message to the connected peer via the sync server. -func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, update *network_map.UpdateMessage, srv proto.ManagementService_SyncServer) error { +func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, update *network_map.UpdateMessage, srv proto.ManagementService_SyncServer, streamStartTime time.Time) error { key, err := s.secretsManager.GetWGKey() if err != nil { - s.cancelPeerRoutines(ctx, accountID, peer) + s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return status.Errorf(codes.Internal, "failed processing update message") } encryptedResp, err := encryption.EncryptMessage(peerKey, key, update.Update) if err != nil { - s.cancelPeerRoutines(ctx, accountID, peer) + s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return status.Errorf(codes.Internal, "failed processing update message") } err = srv.Send(&proto.EncryptedMessage{ @@ -454,7 +456,7 @@ func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtyp Body: encryptedResp, }) if err != nil { - s.cancelPeerRoutines(ctx, accountID, peer) + s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return status.Errorf(codes.Internal, "failed sending update message") } log.WithContext(ctx).Debugf("sent an update to peer %s", peerKey.String()) @@ -486,15 +488,15 @@ func (s *Server) sendJob(ctx context.Context, peerKey wgtypes.Key, job *job.Even return nil } -func (s *Server) cancelPeerRoutines(ctx context.Context, accountID string, peer *nbpeer.Peer) { +func (s *Server) cancelPeerRoutines(ctx context.Context, accountID string, peer *nbpeer.Peer, streamStartTime time.Time) { unlock := s.acquirePeerLockByUID(ctx, peer.Key) defer unlock() - s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, streamStartTime) } -func (s *Server) cancelPeerRoutinesWithoutLock(ctx context.Context, accountID string, peer *nbpeer.Peer) { - err := s.accountManager.OnPeerDisconnected(ctx, accountID, peer.Key) +func (s *Server) cancelPeerRoutinesWithoutLock(ctx context.Context, accountID string, peer *nbpeer.Peer, streamStartTime time.Time) { + err := s.accountManager.OnPeerDisconnected(ctx, accountID, peer.Key, streamStartTime) if err != nil { log.WithContext(ctx).Errorf("failed to disconnect peer %s properly: %v", peer.Key, err) } diff --git a/management/server/account.go b/management/server/account.go index 8f9dad031..4f53415f5 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -1684,8 +1684,20 @@ func (am *DefaultAccountManager) SyncAndMarkPeer(ctx context.Context, accountID return peer, netMap, postureChecks, dnsfwdPort, nil } -func (am *DefaultAccountManager) OnPeerDisconnected(ctx context.Context, accountID string, peerPubKey string) error { - err := am.MarkPeerConnected(ctx, peerPubKey, false, nil, accountID) +func (am *DefaultAccountManager) OnPeerDisconnected(ctx context.Context, accountID string, peerPubKey string, streamStartTime time.Time) error { + peer, err := am.Store.GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, peerPubKey) + if err != nil { + log.WithContext(ctx).Warnf("failed to get peer %s for disconnect check: %v", peerPubKey, err) + return nil + } + + if peer.Status.LastSeen.After(streamStartTime) { + log.WithContext(ctx).Tracef("peer %s has newer activity (lastSeen=%s > streamStart=%s), skipping disconnect", + peerPubKey, peer.Status.LastSeen.Format(time.RFC3339), streamStartTime.Format(time.RFC3339)) + return nil + } + + err = am.MarkPeerConnected(ctx, peerPubKey, false, nil, accountID) if err != nil { log.WithContext(ctx).Warnf("failed marking peer as disconnected %s %v", peerPubKey, err) } diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 5e9bb42a2..eed7739da 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -115,7 +115,7 @@ type Manager interface { GroupValidation(ctx context.Context, accountId string, groups []string) (bool, error) GetValidatedPeers(ctx context.Context, accountID string) (map[string]struct{}, map[string]string, error) SyncAndMarkPeer(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) - OnPeerDisconnected(ctx context.Context, accountID string, peerPubKey string) error + OnPeerDisconnected(ctx context.Context, accountID string, peerPubKey string, streamStartTime time.Time) error SyncPeerMeta(ctx context.Context, peerPubKey string, meta nbpeer.PeerSystemMeta) error FindExistingPostureCheck(accountID string, checks *posture.ChecksDefinition) (*posture.Checks, error) GetAccountIDForPeerKey(ctx context.Context, peerKey string) (string, error) diff --git a/management/server/account_test.go b/management/server/account_test.go index 86cc69e8b..f3d98916c 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -1961,6 +1961,61 @@ func TestDefaultAccountManager_MarkPeerConnected_PeerLoginExpiration(t *testing. } } +func TestDefaultAccountManager_OnPeerDisconnected_LastSeenCheck(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err, "unable to create account manager") + + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err, "unable to create an account") + + key, err := wgtypes.GenerateKey() + require.NoError(t, err, "unable to generate WireGuard key") + peerPubKey := key.PublicKey().String() + + _, _, _, err = manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: peerPubKey, + Meta: nbpeer.PeerSystemMeta{Hostname: "test-peer"}, + }, false) + require.NoError(t, err, "unable to add peer") + + t.Run("disconnect peer when streamStartTime is after LastSeen", func(t *testing.T) { + err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID) + require.NoError(t, err, "unable to mark peer connected") + + peer, err := manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) + require.NoError(t, err, "unable to get peer") + require.True(t, peer.Status.Connected, "peer should be connected") + + streamStartTime := time.Now().UTC() + + err = manager.OnPeerDisconnected(context.Background(), accountID, peerPubKey, streamStartTime) + require.NoError(t, err) + + peer, err = manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) + require.NoError(t, err) + require.False(t, peer.Status.Connected, "peer should be disconnected") + }) + + t.Run("skip disconnect when LastSeen is after streamStartTime (zombie stream protection)", func(t *testing.T) { + err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID) + require.NoError(t, err, "unable to mark peer connected") + + peer, err := manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) + require.NoError(t, err) + require.True(t, peer.Status.Connected, "peer should be connected") + + streamStartTime := peer.Status.LastSeen.Add(-1 * time.Hour) + + err = manager.OnPeerDisconnected(context.Background(), accountID, peerPubKey, streamStartTime) + require.NoError(t, err) + + peer, err = manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) + require.NoError(t, err) + require.True(t, peer.Status.Connected, + "peer should remain connected because LastSeen > streamStartTime (zombie stream protection)") + }) +} + func TestDefaultAccountManager_UpdateAccountSettings_PeerLoginExpiration(t *testing.T) { manager, _, err := createManager(t) require.NoError(t, err, "unable to create account manager") diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 026989898..a4754d180 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -221,9 +221,8 @@ func (am *MockAccountManager) SyncAndMarkPeer(ctx context.Context, accountID str return nil, nil, nil, 0, status.Errorf(codes.Unimplemented, "method MarkPeerConnected is not implemented") } -func (am *MockAccountManager) OnPeerDisconnected(_ context.Context, accountID string, peerPubKey string) error { - // TODO implement me - panic("implement me") +func (am *MockAccountManager) OnPeerDisconnected(_ context.Context, accountID string, peerPubKey string, streamStartTime time.Time) error { + return nil } func (am *MockAccountManager) GetValidatedPeers(ctx context.Context, accountID string) (map[string]struct{}, map[string]string, error) { From f7732557fa42622c85c96aa894a648ac7f5cb58f Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 5 Feb 2026 01:07:27 +0800 Subject: [PATCH 103/374] [client] Add missing bsd flags in debug bundle (#5254) --- .../routemanager/systemops/routeflags_bsd.go | 40 ++++++++++++------- .../systemops/routeflags_freebsd.go | 38 +++++++++++------- 2 files changed, 49 insertions(+), 29 deletions(-) diff --git a/client/internal/routemanager/systemops/routeflags_bsd.go b/client/internal/routemanager/systemops/routeflags_bsd.go index ad32e5029..33280bfb3 100644 --- a/client/internal/routemanager/systemops/routeflags_bsd.go +++ b/client/internal/routemanager/systemops/routeflags_bsd.go @@ -4,16 +4,17 @@ package systemops import ( "strings" - "syscall" + + "golang.org/x/sys/unix" ) // filterRoutesByFlags returns true if the route message should be ignored based on its flags. func filterRoutesByFlags(routeMessageFlags int) bool { - if routeMessageFlags&syscall.RTF_UP == 0 { + if routeMessageFlags&unix.RTF_UP == 0 { return true } - if routeMessageFlags&(syscall.RTF_REJECT|syscall.RTF_BLACKHOLE|syscall.RTF_WASCLONED) != 0 { + if routeMessageFlags&(unix.RTF_REJECT|unix.RTF_BLACKHOLE|unix.RTF_WASCLONED) != 0 { return true } @@ -24,42 +25,51 @@ func filterRoutesByFlags(routeMessageFlags int) bool { func formatBSDFlags(flags int) string { var flagStrs []string - if flags&syscall.RTF_UP != 0 { + if flags&unix.RTF_UP != 0 { flagStrs = append(flagStrs, "U") } - if flags&syscall.RTF_GATEWAY != 0 { + if flags&unix.RTF_GATEWAY != 0 { flagStrs = append(flagStrs, "G") } - if flags&syscall.RTF_HOST != 0 { + if flags&unix.RTF_HOST != 0 { flagStrs = append(flagStrs, "H") } - if flags&syscall.RTF_REJECT != 0 { + if flags&unix.RTF_REJECT != 0 { flagStrs = append(flagStrs, "R") } - if flags&syscall.RTF_DYNAMIC != 0 { + if flags&unix.RTF_DYNAMIC != 0 { flagStrs = append(flagStrs, "D") } - if flags&syscall.RTF_MODIFIED != 0 { + if flags&unix.RTF_MODIFIED != 0 { flagStrs = append(flagStrs, "M") } - if flags&syscall.RTF_STATIC != 0 { + if flags&unix.RTF_STATIC != 0 { flagStrs = append(flagStrs, "S") } - if flags&syscall.RTF_LLINFO != 0 { + if flags&unix.RTF_LLINFO != 0 { flagStrs = append(flagStrs, "L") } - if flags&syscall.RTF_LOCAL != 0 { + if flags&unix.RTF_LOCAL != 0 { flagStrs = append(flagStrs, "l") } - if flags&syscall.RTF_BLACKHOLE != 0 { + if flags&unix.RTF_BLACKHOLE != 0 { flagStrs = append(flagStrs, "B") } - if flags&syscall.RTF_CLONING != 0 { + if flags&unix.RTF_CLONING != 0 { flagStrs = append(flagStrs, "C") } - if flags&syscall.RTF_WASCLONED != 0 { + if flags&unix.RTF_WASCLONED != 0 { flagStrs = append(flagStrs, "W") } + if flags&unix.RTF_PROTO1 != 0 { + flagStrs = append(flagStrs, "1") + } + if flags&unix.RTF_PROTO2 != 0 { + flagStrs = append(flagStrs, "2") + } + if flags&unix.RTF_PROTO3 != 0 { + flagStrs = append(flagStrs, "3") + } if len(flagStrs) == 0 { return "-" diff --git a/client/internal/routemanager/systemops/routeflags_freebsd.go b/client/internal/routemanager/systemops/routeflags_freebsd.go index 2338fe5d8..a8c82b3ed 100644 --- a/client/internal/routemanager/systemops/routeflags_freebsd.go +++ b/client/internal/routemanager/systemops/routeflags_freebsd.go @@ -4,17 +4,18 @@ package systemops import ( "strings" - "syscall" + + "golang.org/x/sys/unix" ) // filterRoutesByFlags returns true if the route message should be ignored based on its flags. func filterRoutesByFlags(routeMessageFlags int) bool { - if routeMessageFlags&syscall.RTF_UP == 0 { + if routeMessageFlags&unix.RTF_UP == 0 { return true } - // NOTE: syscall.RTF_WASCLONED deprecated in FreeBSD 8.0 - if routeMessageFlags&(syscall.RTF_REJECT|syscall.RTF_BLACKHOLE) != 0 { + // NOTE: RTF_WASCLONED deprecated in FreeBSD 8.0 + if routeMessageFlags&(unix.RTF_REJECT|unix.RTF_BLACKHOLE) != 0 { return true } @@ -25,37 +26,46 @@ func filterRoutesByFlags(routeMessageFlags int) bool { func formatBSDFlags(flags int) string { var flagStrs []string - if flags&syscall.RTF_UP != 0 { + if flags&unix.RTF_UP != 0 { flagStrs = append(flagStrs, "U") } - if flags&syscall.RTF_GATEWAY != 0 { + if flags&unix.RTF_GATEWAY != 0 { flagStrs = append(flagStrs, "G") } - if flags&syscall.RTF_HOST != 0 { + if flags&unix.RTF_HOST != 0 { flagStrs = append(flagStrs, "H") } - if flags&syscall.RTF_REJECT != 0 { + if flags&unix.RTF_REJECT != 0 { flagStrs = append(flagStrs, "R") } - if flags&syscall.RTF_DYNAMIC != 0 { + if flags&unix.RTF_DYNAMIC != 0 { flagStrs = append(flagStrs, "D") } - if flags&syscall.RTF_MODIFIED != 0 { + if flags&unix.RTF_MODIFIED != 0 { flagStrs = append(flagStrs, "M") } - if flags&syscall.RTF_STATIC != 0 { + if flags&unix.RTF_STATIC != 0 { flagStrs = append(flagStrs, "S") } - if flags&syscall.RTF_LLINFO != 0 { + if flags&unix.RTF_LLINFO != 0 { flagStrs = append(flagStrs, "L") } - if flags&syscall.RTF_LOCAL != 0 { + if flags&unix.RTF_LOCAL != 0 { flagStrs = append(flagStrs, "l") } - if flags&syscall.RTF_BLACKHOLE != 0 { + if flags&unix.RTF_BLACKHOLE != 0 { flagStrs = append(flagStrs, "B") } // Note: RTF_CLONING and RTF_WASCLONED deprecated in FreeBSD 8.0 + if flags&unix.RTF_PROTO1 != 0 { + flagStrs = append(flagStrs, "1") + } + if flags&unix.RTF_PROTO2 != 0 { + flagStrs = append(flagStrs, "2") + } + if flags&unix.RTF_PROTO3 != 0 { + flagStrs = append(flagStrs, "3") + } if len(flagStrs) == 0 { return "-" From 194a986926cfcce284e77ee6eca84732d8976ace Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 4 Feb 2026 22:22:37 +0100 Subject: [PATCH 104/374] Cache the result of wgInterface.ToInterface() using sync.Once (#5256) Avoid repeated conversions during route setup. The toInterface helper ensures the conversion happens only once regardless of how many routes are added or removed. --- client/internal/routemanager/manager.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/client/internal/routemanager/manager.go b/client/internal/routemanager/manager.go index 2baa0e668..077b9521b 100644 --- a/client/internal/routemanager/manager.go +++ b/client/internal/routemanager/manager.go @@ -173,12 +173,21 @@ func (m *DefaultManager) setupAndroidRoutes(config ManagerConfig) { } func (m *DefaultManager) setupRefCounters(useNoop bool) { + var once sync.Once + var wgIface *net.Interface + toInterface := func() *net.Interface { + once.Do(func() { + wgIface = m.wgInterface.ToInterface() + }) + return wgIface + } + m.routeRefCounter = refcounter.New( func(prefix netip.Prefix, _ struct{}) (struct{}, error) { - return struct{}{}, m.sysOps.AddVPNRoute(prefix, m.wgInterface.ToInterface()) + return struct{}{}, m.sysOps.AddVPNRoute(prefix, toInterface()) }, func(prefix netip.Prefix, _ struct{}) error { - return m.sysOps.RemoveVPNRoute(prefix, m.wgInterface.ToInterface()) + return m.sysOps.RemoveVPNRoute(prefix, toInterface()) }, ) From d2f9653cea8c3b7d119c657366e967eb5869dd07 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 5 Feb 2026 12:06:28 +0100 Subject: [PATCH 105/374] Fix nil pointer panic in ICE agent during sleep/wake cycles (#5261) Add defensive nil checks in ThreadSafeAgent.Close() to prevent panic when agent field is nil. This can occur during Windows suspend/resume when network interfaces are disrupted or the pion/ice library returns nil without error. Also capture agent pointer in local variable before goroutine execution to prevent race conditions. Fixes service crashes on laptop wake-up. --- client/internal/peer/ice/agent.go | 51 +++++++++++++++++++----------- client/internal/peer/worker_ice.go | 6 ++-- 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/client/internal/peer/ice/agent.go b/client/internal/peer/ice/agent.go index 79f68d279..c74b46d10 100644 --- a/client/internal/peer/ice/agent.go +++ b/client/internal/peer/ice/agent.go @@ -2,6 +2,7 @@ package ice import ( "context" + "fmt" "sync" "time" @@ -32,24 +33,6 @@ type ThreadSafeAgent struct { once sync.Once } -func (a *ThreadSafeAgent) Close() error { - var err error - a.once.Do(func() { - done := make(chan error, 1) - go func() { - done <- a.Agent.Close() - }() - - select { - case err = <-done: - case <-time.After(iceAgentCloseTimeout): - log.Warnf("ICE agent close timed out after %v, proceeding with cleanup", iceAgentCloseTimeout) - err = nil - } - }) - return err -} - func NewAgent(ctx context.Context, iFaceDiscover stdnet.ExternalIFaceDiscover, config Config, candidateTypes []ice.CandidateType, ufrag string, pwd string) (*ThreadSafeAgent, error) { iceKeepAlive := iceKeepAlive() iceDisconnectedTimeout := iceDisconnectedTimeout() @@ -93,9 +76,41 @@ func NewAgent(ctx context.Context, iFaceDiscover stdnet.ExternalIFaceDiscover, c return nil, err } + if agent == nil { + return nil, fmt.Errorf("ice.NewAgent returned nil agent without error") + } + return &ThreadSafeAgent{Agent: agent}, nil } +func (a *ThreadSafeAgent) Close() error { + var err error + a.once.Do(func() { + // Defensive check to prevent nil pointer dereference + // This can happen during sleep/wake transitions or memory corruption scenarios + // github.com/netbirdio/netbird/client/internal/peer/ice.(*ThreadSafeAgent).Close(0x40006883f0?) + // [signal 0xc0000005 code=0x0 addr=0x0 pc=0x7ff7e73af83c] + agent := a.Agent + if agent == nil { + log.Warnf("ICE agent is nil during close, skipping") + return + } + + done := make(chan error, 1) + go func() { + done <- agent.Close() + }() + + select { + case err = <-done: + case <-time.After(iceAgentCloseTimeout): + log.Warnf("ICE agent close timed out after %v, proceeding with cleanup", iceAgentCloseTimeout) + err = nil + } + }) + return err +} + func GenerateICECredentials() (string, string, error) { ufrag, err := randutil.GenerateCryptoRandomString(lenUFrag, runesAlpha) if err != nil { diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index b6b9d2cf4..464f57bff 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -107,8 +107,10 @@ func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) { } w.log.Debugf("agent already exists, recreate the connection") w.agentDialerCancel() - if err := w.agent.Close(); err != nil { - w.log.Warnf("failed to close ICE agent: %s", err) + if w.agent != nil { + if err := w.agent.Close(); err != nil { + w.log.Warnf("failed to close ICE agent: %s", err) + } } sessionID, err := NewICESessionID() From 1b96648d4d190ec2d57b3a195004445dd5e10b16 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 5 Feb 2026 21:34:35 +0800 Subject: [PATCH 106/374] [client] Always log dns forwader responses (#5262) --- client/internal/dnsfwd/forwarder.go | 125 ++++++++++------------- client/internal/dnsfwd/forwarder_test.go | 90 +++++++--------- 2 files changed, 93 insertions(+), 122 deletions(-) diff --git a/client/internal/dnsfwd/forwarder.go b/client/internal/dnsfwd/forwarder.go index 1230a4e46..5c7cb31fc 100644 --- a/client/internal/dnsfwd/forwarder.go +++ b/client/internal/dnsfwd/forwarder.go @@ -190,50 +190,75 @@ func (f *DNSForwarder) Close(ctx context.Context) error { return nberrors.FormatErrorOrNil(result) } -func (f *DNSForwarder) handleDNSQuery(logger *log.Entry, w dns.ResponseWriter, query *dns.Msg) *dns.Msg { +func (f *DNSForwarder) handleDNSQuery(logger *log.Entry, w dns.ResponseWriter, query *dns.Msg, startTime time.Time) { if len(query.Question) == 0 { - return nil + return } question := query.Question[0] - logger.Tracef("received DNS request for DNS forwarder: domain=%s type=%s class=%s", - question.Name, dns.TypeToString[question.Qtype], dns.ClassToString[question.Qclass]) + qname := strings.ToLower(question.Name) - domain := strings.ToLower(question.Name) + logger.Tracef("question: domain=%s type=%s class=%s", + qname, dns.TypeToString[question.Qtype], dns.ClassToString[question.Qclass]) resp := query.SetReply(query) network := resutil.NetworkForQtype(question.Qtype) if network == "" { resp.Rcode = dns.RcodeNotImplemented - if err := w.WriteMsg(resp); err != nil { - logger.Errorf("failed to write DNS response: %v", err) - } - return nil + f.writeResponse(logger, w, resp, qname, startTime) + return } - mostSpecificResId, matchingEntries := f.getMatchingEntries(strings.TrimSuffix(domain, ".")) - // query doesn't match any configured domain + mostSpecificResId, matchingEntries := f.getMatchingEntries(strings.TrimSuffix(qname, ".")) if mostSpecificResId == "" { resp.Rcode = dns.RcodeRefused - if err := w.WriteMsg(resp); err != nil { - logger.Errorf("failed to write DNS response: %v", err) - } - return nil + f.writeResponse(logger, w, resp, qname, startTime) + return } ctx, cancel := context.WithTimeout(context.Background(), upstreamTimeout) defer cancel() - result := resutil.LookupIP(ctx, f.resolver, network, domain, question.Qtype) + result := resutil.LookupIP(ctx, f.resolver, network, qname, question.Qtype) if result.Err != nil { - f.handleDNSError(ctx, logger, w, question, resp, domain, result) - return nil + f.handleDNSError(ctx, logger, w, question, resp, qname, result, startTime) + return } f.updateInternalState(result.IPs, mostSpecificResId, matchingEntries) - resp.Answer = append(resp.Answer, resutil.IPsToRRs(domain, result.IPs, f.ttl)...) - f.cache.set(domain, question.Qtype, result.IPs) + resp.Answer = append(resp.Answer, resutil.IPsToRRs(qname, result.IPs, f.ttl)...) + f.cache.set(qname, question.Qtype, result.IPs) - return resp + f.writeResponse(logger, w, resp, qname, startTime) +} + +func (f *DNSForwarder) writeResponse(logger *log.Entry, w dns.ResponseWriter, resp *dns.Msg, qname string, startTime time.Time) { + if err := w.WriteMsg(resp); err != nil { + logger.Errorf("failed to write DNS response: %v", err) + return + } + + logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s", + qname, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime)) +} + +// udpResponseWriter wraps a dns.ResponseWriter to handle UDP-specific truncation. +type udpResponseWriter struct { + dns.ResponseWriter + query *dns.Msg +} + +func (u *udpResponseWriter) WriteMsg(resp *dns.Msg) error { + opt := u.query.IsEdns0() + maxSize := dns.MinMsgSize + if opt != nil { + maxSize = int(opt.UDPSize()) + } + + if resp.Len() > maxSize { + resp.Truncate(maxSize) + } + + return u.ResponseWriter.WriteMsg(resp) } func (f *DNSForwarder) handleDNSQueryUDP(w dns.ResponseWriter, query *dns.Msg) { @@ -243,30 +268,7 @@ func (f *DNSForwarder) handleDNSQueryUDP(w dns.ResponseWriter, query *dns.Msg) { "dns_id": fmt.Sprintf("%04x", query.Id), }) - resp := f.handleDNSQuery(logger, w, query) - if resp == nil { - return - } - - opt := query.IsEdns0() - maxSize := dns.MinMsgSize - if opt != nil { - // client advertised a larger EDNS0 buffer - maxSize = int(opt.UDPSize()) - } - - // if our response is too big, truncate and set the TC bit - if resp.Len() > maxSize { - resp.Truncate(maxSize) - } - - if err := w.WriteMsg(resp); err != nil { - logger.Errorf("failed to write DNS response: %v", err) - return - } - - logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s", - query.Question[0].Name, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime)) + f.handleDNSQuery(logger, &udpResponseWriter{ResponseWriter: w, query: query}, query, startTime) } func (f *DNSForwarder) handleDNSQueryTCP(w dns.ResponseWriter, query *dns.Msg) { @@ -276,18 +278,7 @@ func (f *DNSForwarder) handleDNSQueryTCP(w dns.ResponseWriter, query *dns.Msg) { "dns_id": fmt.Sprintf("%04x", query.Id), }) - resp := f.handleDNSQuery(logger, w, query) - if resp == nil { - return - } - - if err := w.WriteMsg(resp); err != nil { - logger.Errorf("failed to write DNS response: %v", err) - return - } - - logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s", - query.Question[0].Name, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime)) + f.handleDNSQuery(logger, w, query, startTime) } func (f *DNSForwarder) updateInternalState(ips []netip.Addr, mostSpecificResId route.ResID, matchingEntries []*ForwarderEntry) { @@ -334,6 +325,7 @@ func (f *DNSForwarder) handleDNSError( resp *dns.Msg, domain string, result resutil.LookupResult, + startTime time.Time, ) { qType := question.Qtype qTypeName := dns.TypeToString[qType] @@ -343,9 +335,7 @@ func (f *DNSForwarder) handleDNSError( // NotFound: cache negative result and respond if result.Rcode == dns.RcodeNameError || result.Rcode == dns.RcodeSuccess { f.cache.set(domain, question.Qtype, nil) - if writeErr := w.WriteMsg(resp); writeErr != nil { - logger.Errorf("failed to write failure DNS response: %v", writeErr) - } + f.writeResponse(logger, w, resp, domain, startTime) return } @@ -355,9 +345,7 @@ func (f *DNSForwarder) handleDNSError( logger.Debugf("serving cached DNS response after upstream failure: domain=%s type=%s", domain, qTypeName) resp.Answer = append(resp.Answer, resutil.IPsToRRs(domain, ips, f.ttl)...) resp.Rcode = dns.RcodeSuccess - if writeErr := w.WriteMsg(resp); writeErr != nil { - logger.Errorf("failed to write cached DNS response: %v", writeErr) - } + f.writeResponse(logger, w, resp, domain, startTime) return } @@ -365,9 +353,7 @@ func (f *DNSForwarder) handleDNSError( verifyResult := resutil.LookupIP(ctx, f.resolver, resutil.NetworkForQtype(qType), domain, qType) if verifyResult.Rcode == dns.RcodeNameError || verifyResult.Rcode == dns.RcodeSuccess { resp.Rcode = verifyResult.Rcode - if writeErr := w.WriteMsg(resp); writeErr != nil { - logger.Errorf("failed to write failure DNS response: %v", writeErr) - } + f.writeResponse(logger, w, resp, domain, startTime) return } } @@ -375,15 +361,12 @@ func (f *DNSForwarder) handleDNSError( // No cache or verification failed. Log with or without the server field for more context. var dnsErr *net.DNSError if errors.As(result.Err, &dnsErr) && dnsErr.Server != "" { - logger.Warnf("failed to resolve: type=%s domain=%s server=%s: %v", qTypeName, domain, dnsErr.Server, result.Err) + logger.Warnf("upstream failure: type=%s domain=%s server=%s: %v", qTypeName, domain, dnsErr.Server, result.Err) } else { logger.Warnf(errResolveFailed, domain, result.Err) } - // Write final failure response. - if writeErr := w.WriteMsg(resp); writeErr != nil { - logger.Errorf("failed to write failure DNS response: %v", writeErr) - } + f.writeResponse(logger, w, resp, domain, startTime) } // getMatchingEntries retrieves the resource IDs for a given domain. diff --git a/client/internal/dnsfwd/forwarder_test.go b/client/internal/dnsfwd/forwarder_test.go index 6416c2f21..7325ef8a7 100644 --- a/client/internal/dnsfwd/forwarder_test.go +++ b/client/internal/dnsfwd/forwarder_test.go @@ -318,8 +318,9 @@ func TestDNSForwarder_UnauthorizedDomainAccess(t *testing.T) { query.SetQuestion(dns.Fqdn(tt.queryDomain), dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query, time.Now()) + resp := mockWriter.GetLastResponse() if tt.shouldResolve { require.NotNil(t, resp, "Expected response for authorized domain") require.Equal(t, dns.RcodeSuccess, resp.Rcode, "Expected successful response") @@ -329,10 +330,9 @@ func TestDNSForwarder_UnauthorizedDomainAccess(t *testing.T) { mockFirewall.AssertExpectations(t) mockResolver.AssertExpectations(t) } else { - if resp != nil { - assert.True(t, len(resp.Answer) == 0 || resp.Rcode != dns.RcodeSuccess, - "Unauthorized domain should not return successful answers") - } + require.NotNil(t, resp, "Expected response") + assert.True(t, len(resp.Answer) == 0 || resp.Rcode != dns.RcodeSuccess, + "Unauthorized domain should not return successful answers") mockFirewall.AssertNotCalled(t, "UpdateSet") mockResolver.AssertNotCalled(t, "LookupNetIP") } @@ -466,14 +466,16 @@ func TestDNSForwarder_FirewallSetUpdates(t *testing.T) { dnsQuery.SetQuestion(dns.Fqdn(tt.query), dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, dnsQuery) + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, dnsQuery, time.Now()) // Verify response + resp := mockWriter.GetLastResponse() if tt.shouldResolve { require.NotNil(t, resp, "Expected response for authorized domain") require.Equal(t, dns.RcodeSuccess, resp.Rcode) require.NotEmpty(t, resp.Answer) - } else if resp != nil { + } else { + require.NotNil(t, resp, "Expected response") assert.True(t, resp.Rcode == dns.RcodeRefused || len(resp.Answer) == 0, "Unauthorized domain should be refused or have no answers") } @@ -528,9 +530,10 @@ func TestDNSForwarder_MultipleIPsInSingleUpdate(t *testing.T) { query.SetQuestion("example.com.", dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query, time.Now()) // Verify response contains all IPs + resp := mockWriter.GetLastResponse() require.NotNil(t, resp) require.Equal(t, dns.RcodeSuccess, resp.Rcode) require.Len(t, resp.Answer, 3, "Should have 3 answer records") @@ -605,7 +608,7 @@ func TestDNSForwarder_ResponseCodes(t *testing.T) { }, } - _ = forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query, time.Now()) // Check the response written to the writer require.NotNil(t, writtenResp, "Expected response to be written") @@ -675,7 +678,8 @@ func TestDNSForwarder_ServeFromCacheOnUpstreamFailure(t *testing.T) { q1 := &dns.Msg{} q1.SetQuestion(dns.Fqdn("example.com"), dns.TypeA) w1 := &test.MockResponseWriter{} - resp1 := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w1, q1) + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w1, q1, time.Now()) + resp1 := w1.GetLastResponse() require.NotNil(t, resp1) require.Equal(t, dns.RcodeSuccess, resp1.Rcode) require.Len(t, resp1.Answer, 1) @@ -683,13 +687,13 @@ func TestDNSForwarder_ServeFromCacheOnUpstreamFailure(t *testing.T) { // Second query: serve from cache after upstream failure q2 := &dns.Msg{} q2.SetQuestion(dns.Fqdn("example.com"), dns.TypeA) - var writtenResp *dns.Msg - w2 := &test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { writtenResp = m; return nil }} - _ = forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w2, q2) + w2 := &test.MockResponseWriter{} + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w2, q2, time.Now()) - require.NotNil(t, writtenResp, "expected response to be written") - require.Equal(t, dns.RcodeSuccess, writtenResp.Rcode) - require.Len(t, writtenResp.Answer, 1) + resp2 := w2.GetLastResponse() + require.NotNil(t, resp2, "expected response to be written") + require.Equal(t, dns.RcodeSuccess, resp2.Rcode) + require.Len(t, resp2.Answer, 1) mockResolver.AssertExpectations(t) } @@ -715,7 +719,8 @@ func TestDNSForwarder_CacheNormalizationCasingAndDot(t *testing.T) { q1 := &dns.Msg{} q1.SetQuestion(mixedQuery+".", dns.TypeA) w1 := &test.MockResponseWriter{} - resp1 := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w1, q1) + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w1, q1, time.Now()) + resp1 := w1.GetLastResponse() require.NotNil(t, resp1) require.Equal(t, dns.RcodeSuccess, resp1.Rcode) require.Len(t, resp1.Answer, 1) @@ -727,13 +732,13 @@ func TestDNSForwarder_CacheNormalizationCasingAndDot(t *testing.T) { q2 := &dns.Msg{} q2.SetQuestion("EXAMPLE.COM", dns.TypeA) - var writtenResp *dns.Msg - w2 := &test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { writtenResp = m; return nil }} - _ = forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w2, q2) + w2 := &test.MockResponseWriter{} + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), w2, q2, time.Now()) - require.NotNil(t, writtenResp) - require.Equal(t, dns.RcodeSuccess, writtenResp.Rcode) - require.Len(t, writtenResp.Answer, 1) + resp2 := w2.GetLastResponse() + require.NotNil(t, resp2) + require.Equal(t, dns.RcodeSuccess, resp2.Rcode) + require.Len(t, resp2.Answer, 1) mockResolver.AssertExpectations(t) } @@ -784,8 +789,9 @@ func TestDNSForwarder_MultipleOverlappingPatterns(t *testing.T) { query.SetQuestion("smtp.mail.example.com.", dns.TypeA) mockWriter := &test.MockResponseWriter{} - resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query, time.Now()) + resp := mockWriter.GetLastResponse() require.NotNil(t, resp) assert.Equal(t, dns.RcodeSuccess, resp.Rcode) @@ -897,26 +903,15 @@ func TestDNSForwarder_NodataVsNxdomain(t *testing.T) { query := &dns.Msg{} query.SetQuestion(dns.Fqdn("example.com"), tt.queryType) - var writtenResp *dns.Msg - mockWriter := &test.MockResponseWriter{ - WriteMsgFunc: func(m *dns.Msg) error { - writtenResp = m - return nil - }, - } + mockWriter := &test.MockResponseWriter{} + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query, time.Now()) - resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) - - // If a response was returned, it means it should be written (happens in wrapper functions) - if resp != nil && writtenResp == nil { - writtenResp = resp - } - - require.NotNil(t, writtenResp, "Expected response to be written") - assert.Equal(t, tt.expectedCode, writtenResp.Rcode, tt.description) + resp := mockWriter.GetLastResponse() + require.NotNil(t, resp, "Expected response to be written") + assert.Equal(t, tt.expectedCode, resp.Rcode, tt.description) if tt.expectNoAnswer { - assert.Empty(t, writtenResp.Answer, "Response should have no answer records") + assert.Empty(t, resp.Answer, "Response should have no answer records") } mockResolver.AssertExpectations(t) @@ -931,15 +926,8 @@ func TestDNSForwarder_EmptyQuery(t *testing.T) { query := &dns.Msg{} // Don't set any question - writeCalled := false - mockWriter := &test.MockResponseWriter{ - WriteMsgFunc: func(m *dns.Msg) error { - writeCalled = true - return nil - }, - } - resp := forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query) + mockWriter := &test.MockResponseWriter{} + forwarder.handleDNSQuery(log.NewEntry(log.StandardLogger()), mockWriter, query, time.Now()) - assert.Nil(t, resp, "Should return nil for empty query") - assert.False(t, writeCalled, "Should not write response for empty query") + assert.Nil(t, mockWriter.GetLastResponse(), "Should not write response for empty query") } From 0119f3e9f4f87e49af92c3ee5a4a79c7eebe9a1d Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 6 Feb 2026 17:03:01 +0800 Subject: [PATCH 107/374] [client] Fix netstack detection and add wireguard port option (#5251) - Add WireguardPort option to embed.Options for custom port configuration - Fix KernelInterface detection to account for netstack mode - Skip SSH config updates when running in netstack mode - Skip interface removal wait when running in netstack mode - Use BindListener for netstack to avoid port conflicts on same host --- client/embed/embed.go | 3 +++ client/iface/iface.go | 5 +++++ client/internal/connect.go | 3 ++- client/internal/engine.go | 2 +- client/internal/engine_ssh.go | 9 +++++++++ client/internal/lazyconn/activity/manager.go | 8 +++++--- 6 files changed, 25 insertions(+), 5 deletions(-) diff --git a/client/embed/embed.go b/client/embed/embed.go index e73f37e35..2ad025ff0 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -71,6 +71,8 @@ type Options struct { DisableClientRoutes bool // BlockInbound blocks all inbound connections from peers BlockInbound bool + // WireguardPort is the port for the WireGuard interface. Use 0 for a random port. + WireguardPort *int } // validateCredentials checks that exactly one credential type is provided @@ -140,6 +142,7 @@ func New(opts Options) (*Client, error) { DisableServerRoutes: &t, DisableClientRoutes: &opts.DisableClientRoutes, BlockInbound: &opts.BlockInbound, + WireguardPort: opts.WireguardPort, } if opts.ConfigPath != "" { config, err = profilemanager.UpdateOrCreateConfig(input) diff --git a/client/iface/iface.go b/client/iface/iface.go index e5623c979..9b331d68c 100644 --- a/client/iface/iface.go +++ b/client/iface/iface.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/client/errors" "github.com/netbirdio/netbird/client/iface/configurer" "github.com/netbirdio/netbird/client/iface/device" + nbnetstack "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/iface/wgaddr" "github.com/netbirdio/netbird/client/iface/wgproxy" @@ -228,6 +229,10 @@ func (w *WGIface) Close() error { result = multierror.Append(result, fmt.Errorf("failed to close wireguard interface %s: %w", w.Name(), err)) } + if nbnetstack.IsEnabled() { + return errors.FormatErrorOrNil(result) + } + if err := w.waitUntilRemoved(); err != nil { log.Warnf("failed to remove WireGuard interface %s: %v", w.Name(), err) if err := w.Destroy(); err != nil { diff --git a/client/internal/connect.go b/client/internal/connect.go index 7fc3c9a96..17fc20c42 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/iface/device" + "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal/dns" "github.com/netbirdio/netbird/client/internal/listener" "github.com/netbirdio/netbird/client/internal/peer" @@ -244,7 +245,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan localPeerState := peer.LocalPeerState{ IP: loginResp.GetPeerConfig().GetAddress(), PubKey: myPrivateKey.PublicKey().String(), - KernelInterface: device.WireGuardModuleIsLoaded(), + KernelInterface: device.WireGuardModuleIsLoaded() && !netstack.IsEnabled(), FQDN: loginResp.GetPeerConfig().GetFqdn(), } c.statusRecorder.UpdateLocalPeerState(localPeerState) diff --git a/client/internal/engine.go b/client/internal/engine.go index 63ba1c9f2..597ac7c2d 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1017,7 +1017,7 @@ func (e *Engine) updateConfig(conf *mgmProto.PeerConfig) error { state := e.statusRecorder.GetLocalPeerState() state.IP = e.wgInterface.Address().String() state.PubKey = e.config.WgPrivateKey.PublicKey().String() - state.KernelInterface = device.WireGuardModuleIsLoaded() + state.KernelInterface = !e.wgInterface.IsUserspaceBind() state.FQDN = conf.GetFqdn() e.statusRecorder.UpdateLocalPeerState(state) diff --git a/client/internal/engine_ssh.go b/client/internal/engine_ssh.go index a8c05fe0a..1419bc262 100644 --- a/client/internal/engine_ssh.go +++ b/client/internal/engine_ssh.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" firewallManager "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/iface/netstack" nftypes "github.com/netbirdio/netbird/client/internal/netflow/types" sshauth "github.com/netbirdio/netbird/client/ssh/auth" sshconfig "github.com/netbirdio/netbird/client/ssh/config" @@ -94,6 +95,10 @@ func (e *Engine) updateSSH(sshConf *mgmProto.SSHConfig) error { // updateSSHClientConfig updates the SSH client configuration with peer information func (e *Engine) updateSSHClientConfig(remotePeers []*mgmProto.RemotePeerConfig) error { + if netstack.IsEnabled() { + return nil + } + peerInfo := e.extractPeerSSHInfo(remotePeers) if len(peerInfo) == 0 { log.Debug("no SSH-enabled peers found, skipping SSH config update") @@ -216,6 +221,10 @@ func (e *Engine) GetPeerSSHKey(peerAddress string) ([]byte, bool) { // cleanupSSHConfig removes NetBird SSH client configuration on shutdown func (e *Engine) cleanupSSHConfig() { + if netstack.IsEnabled() { + return + } + configMgr := sshconfig.New() if err := configMgr.RemoveSSHClientConfig(); err != nil { diff --git a/client/internal/lazyconn/activity/manager.go b/client/internal/lazyconn/activity/manager.go index db283ec9a..1c11378c8 100644 --- a/client/internal/lazyconn/activity/manager.go +++ b/client/internal/lazyconn/activity/manager.go @@ -11,6 +11,7 @@ import ( log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/wgaddr" "github.com/netbirdio/netbird/client/internal/lazyconn" peerid "github.com/netbirdio/netbird/client/internal/peer/id" @@ -74,12 +75,13 @@ func (m *Manager) createListener(peerCfg lazyconn.PeerConfig) (listener, error) return NewUDPListener(m.wgIface, peerCfg) } - // BindListener is only used on Windows and JS platforms: + // BindListener is used on Windows, JS, and netstack platforms: // - JS: Cannot listen to UDP sockets // - Windows: IP_UNICAST_IF socket option forces packets out the interface the default // gateway points to, preventing them from reaching the loopback interface. - // BindListener bypasses this by passing data directly through the bind. - if runtime.GOOS != "windows" && runtime.GOOS != "js" { + // - Netstack: Allows multiple instances on the same host without port conflicts. + // BindListener bypasses these issues by passing data directly through the bind. + if runtime.GOOS != "windows" && runtime.GOOS != "js" && !netstack.IsEnabled() { return NewUDPListener(m.wgIface, peerCfg) } From c3f176f34835151da3a3bf493960b3e8a7857421 Mon Sep 17 00:00:00 2001 From: eyJhb Date: Fri, 6 Feb 2026 11:23:36 +0100 Subject: [PATCH 108/374] [client] Fix wrong URL being logged for DefaultAdminURL (#5252) - DefaultManagementURL was being logged instead of DefaultAdminURL --- client/internal/profilemanager/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index f2fda84e0..8f3ff8b11 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -252,7 +252,7 @@ func (config *Config) apply(input ConfigInput) (updated bool, err error) { } if config.AdminURL == nil { - log.Infof("using default Admin URL %s", DefaultManagementURL) + log.Infof("using default Admin URL %s", DefaultAdminURL) config.AdminURL, err = parseURL("Admin URL", DefaultAdminURL) if err != nil { return false, err From af8f730bdac9109b02f3432efd6b6d24a251cea9 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Fri, 6 Feb 2026 18:00:43 +0100 Subject: [PATCH 109/374] [management] check stream start time for connecting peer (#5267) --- management/internals/shared/grpc/server.go | 10 +++--- management/server/account.go | 6 ++-- management/server/account/manager.go | 4 +-- management/server/account_test.go | 33 +++++++++++++++---- management/server/mock_server/account_mock.go | 12 +++---- management/server/peer.go | 20 ++++++++--- 6 files changed, 58 insertions(+), 27 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index befcd2adf..98c68ebda 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -300,20 +300,18 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S metahash := metaHash(peerMeta, realIP.String()) s.loginFilter.addLogin(peerKey.String(), metahash) - peer, netMap, postureChecks, dnsFwdPort, err := s.accountManager.SyncAndMarkPeer(ctx, accountID, peerKey.String(), peerMeta, realIP) + peer, netMap, postureChecks, dnsFwdPort, err := s.accountManager.SyncAndMarkPeer(ctx, accountID, peerKey.String(), peerMeta, realIP, reqStart) if err != nil { log.WithContext(ctx).Debugf("error while syncing peer %s: %v", peerKey.String(), err) s.syncSem.Add(-1) return mapError(ctx, err) } - streamStartTime := time.Now().UTC() - err = s.sendInitialSync(ctx, peerKey, peer, netMap, postureChecks, srv, dnsFwdPort) if err != nil { log.WithContext(ctx).Debugf("error while sending initial sync for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, streamStartTime) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, reqStart) return err } @@ -321,7 +319,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S if err != nil { log.WithContext(ctx).Debugf("error while notify peer connected for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, streamStartTime) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, reqStart) return err } @@ -338,7 +336,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S s.syncSem.Add(-1) - return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv, streamStartTime) + return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv, reqStart) } func (s *Server) handleHandshake(ctx context.Context, srv proto.ManagementService_JobServer) (wgtypes.Key, error) { diff --git a/management/server/account.go b/management/server/account.go index 4f53415f5..a9f59773a 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -1670,13 +1670,13 @@ func domainIsUpToDate(domain string, domainCategory string, userAuth auth.UserAu return domainCategory == types.PrivateCategory || userAuth.DomainCategory != types.PrivateCategory || domain != userAuth.Domain } -func (am *DefaultAccountManager) SyncAndMarkPeer(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) { +func (am *DefaultAccountManager) SyncAndMarkPeer(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP, syncTime time.Time) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) { peer, netMap, postureChecks, dnsfwdPort, err := am.SyncPeer(ctx, types.PeerSync{WireGuardPubKey: peerPubKey, Meta: meta}, accountID) if err != nil { return nil, nil, nil, 0, fmt.Errorf("error syncing peer: %w", err) } - err = am.MarkPeerConnected(ctx, peerPubKey, true, realIP, accountID) + err = am.MarkPeerConnected(ctx, peerPubKey, true, realIP, accountID, syncTime) if err != nil { log.WithContext(ctx).Warnf("failed marking peer as connected %s %v", peerPubKey, err) } @@ -1697,7 +1697,7 @@ func (am *DefaultAccountManager) OnPeerDisconnected(ctx context.Context, account return nil } - err = am.MarkPeerConnected(ctx, peerPubKey, false, nil, accountID) + err = am.MarkPeerConnected(ctx, peerPubKey, false, nil, accountID, time.Now().UTC()) if err != nil { log.WithContext(ctx).Warnf("failed marking peer as disconnected %s %v", peerPubKey, err) } diff --git a/management/server/account/manager.go b/management/server/account/manager.go index eed7739da..1d25b0af7 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -58,7 +58,7 @@ type Manager interface { GetUserFromUserAuth(ctx context.Context, userAuth auth.UserAuth) (*types.User, error) ListUsers(ctx context.Context, accountID string) ([]*types.User, error) GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) - MarkPeerConnected(ctx context.Context, peerKey string, connected bool, realIP net.IP, accountID string) error + MarkPeerConnected(ctx context.Context, peerKey string, connected bool, realIP net.IP, accountID string, syncTime time.Time) error DeletePeer(ctx context.Context, accountID, peerID, userID string) error UpdatePeer(ctx context.Context, accountID, userID string, peer *nbpeer.Peer) (*nbpeer.Peer, error) UpdatePeerIP(ctx context.Context, accountID, userID, peerID string, newIP netip.Addr) error @@ -114,7 +114,7 @@ type Manager interface { UpdateIntegratedValidator(ctx context.Context, accountID, userID, validator string, groups []string) error GroupValidation(ctx context.Context, accountId string, groups []string) (bool, error) GetValidatedPeers(ctx context.Context, accountID string) (map[string]struct{}, map[string]string, error) - SyncAndMarkPeer(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) + SyncAndMarkPeer(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP, syncTime time.Time) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) OnPeerDisconnected(ctx context.Context, accountID string, peerPubKey string, streamStartTime time.Time) error SyncPeerMeta(ctx context.Context, peerPubKey string, meta nbpeer.PeerSystemMeta) error FindExistingPostureCheck(accountID string, checks *posture.ChecksDefinition) (*posture.Checks, error) diff --git a/management/server/account_test.go b/management/server/account_test.go index f3d98916c..443e6344e 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -1881,7 +1881,7 @@ func TestDefaultAccountManager_UpdatePeer_PeerLoginExpiration(t *testing.T) { accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) require.NoError(t, err, "unable to get the account") - err = manager.MarkPeerConnected(context.Background(), key.PublicKey().String(), true, nil, accountID) + err = manager.MarkPeerConnected(context.Background(), key.PublicKey().String(), true, nil, accountID, time.Now().UTC()) require.NoError(t, err, "unable to mark peer connected") _, err = manager.UpdateAccountSettings(context.Background(), accountID, userID, &types.Settings{ @@ -1952,7 +1952,7 @@ func TestDefaultAccountManager_MarkPeerConnected_PeerLoginExpiration(t *testing. require.NoError(t, err, "unable to get the account") // when we mark peer as connected, the peer login expiration routine should trigger - err = manager.MarkPeerConnected(context.Background(), key.PublicKey().String(), true, nil, accountID) + err = manager.MarkPeerConnected(context.Background(), key.PublicKey().String(), true, nil, accountID, time.Now().UTC()) require.NoError(t, err, "unable to mark peer connected") failed := waitTimeout(wg, time.Second) @@ -1979,7 +1979,7 @@ func TestDefaultAccountManager_OnPeerDisconnected_LastSeenCheck(t *testing.T) { require.NoError(t, err, "unable to add peer") t.Run("disconnect peer when streamStartTime is after LastSeen", func(t *testing.T) { - err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID) + err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID, time.Now().UTC()) require.NoError(t, err, "unable to mark peer connected") peer, err := manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) @@ -1997,7 +1997,7 @@ func TestDefaultAccountManager_OnPeerDisconnected_LastSeenCheck(t *testing.T) { }) t.Run("skip disconnect when LastSeen is after streamStartTime (zombie stream protection)", func(t *testing.T) { - err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID) + err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID, time.Now().UTC()) require.NoError(t, err, "unable to mark peer connected") peer, err := manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) @@ -2014,6 +2014,27 @@ func TestDefaultAccountManager_OnPeerDisconnected_LastSeenCheck(t *testing.T) { require.True(t, peer.Status.Connected, "peer should remain connected because LastSeen > streamStartTime (zombie stream protection)") }) + + t.Run("skip stale connect when peer already has newer LastSeen (blocked goroutine protection)", func(t *testing.T) { + node2SyncTime := time.Now().UTC() + err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID, node2SyncTime) + require.NoError(t, err, "node 2 should connect peer") + + peer, err := manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) + require.NoError(t, err) + require.True(t, peer.Status.Connected, "peer should be connected") + require.Equal(t, node2SyncTime.Unix(), peer.Status.LastSeen.Unix(), "LastSeen should be node2SyncTime") + + node1StaleSyncTime := node2SyncTime.Add(-1 * time.Minute) + err = manager.MarkPeerConnected(context.Background(), peerPubKey, true, nil, accountID, node1StaleSyncTime) + require.NoError(t, err, "stale connect should not return error") + + peer, err = manager.Store.GetPeerByPeerPubKey(context.Background(), store.LockingStrengthNone, peerPubKey) + require.NoError(t, err) + require.True(t, peer.Status.Connected, "peer should still be connected") + require.Equal(t, node2SyncTime.Unix(), peer.Status.LastSeen.Unix(), + "LastSeen should NOT be overwritten by stale syncTime from blocked goroutine") + }) } func TestDefaultAccountManager_UpdateAccountSettings_PeerLoginExpiration(t *testing.T) { @@ -2038,7 +2059,7 @@ func TestDefaultAccountManager_UpdateAccountSettings_PeerLoginExpiration(t *test account, err := manager.Store.GetAccount(context.Background(), accountID) require.NoError(t, err, "unable to get the account") - err = manager.MarkPeerConnected(context.Background(), key.PublicKey().String(), true, nil, accountID) + err = manager.MarkPeerConnected(context.Background(), key.PublicKey().String(), true, nil, accountID, time.Now().UTC()) require.NoError(t, err, "unable to mark peer connected") wg := &sync.WaitGroup{} @@ -3231,7 +3252,7 @@ func BenchmarkSyncAndMarkPeer(b *testing.B) { b.ResetTimer() start := time.Now() for i := 0; i < b.N; i++ { - _, _, _, _, err := manager.SyncAndMarkPeer(context.Background(), account.Id, account.Peers["peer-1"].Key, nbpeer.PeerSystemMeta{Hostname: strconv.Itoa(i)}, net.IP{1, 1, 1, 1}) + _, _, _, _, err := manager.SyncAndMarkPeer(context.Background(), account.Id, account.Peers["peer-1"].Key, nbpeer.PeerSystemMeta{Hostname: strconv.Itoa(i)}, net.IP{1, 1, 1, 1}, time.Now().UTC()) assert.NoError(b, err) } diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index a4754d180..8471d0a94 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -37,8 +37,8 @@ type MockAccountManager struct { GetUserFromUserAuthFunc func(ctx context.Context, userAuth auth.UserAuth) (*types.User, error) ListUsersFunc func(ctx context.Context, accountID string) ([]*types.User, error) GetPeersFunc func(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) - MarkPeerConnectedFunc func(ctx context.Context, peerKey string, connected bool, realIP net.IP) error - SyncAndMarkPeerFunc func(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) + MarkPeerConnectedFunc func(ctx context.Context, peerKey string, connected bool, realIP net.IP, syncTime time.Time) error + SyncAndMarkPeerFunc func(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP, syncTime time.Time) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) DeletePeerFunc func(ctx context.Context, accountID, peerKey, userID string) error GetNetworkMapFunc func(ctx context.Context, peerKey string) (*types.NetworkMap, error) GetPeerNetworkFunc func(ctx context.Context, peerKey string) (*types.Network, error) @@ -214,9 +214,9 @@ func (am *MockAccountManager) DeleteSetupKey(ctx context.Context, accountID, use return status.Errorf(codes.Unimplemented, "method DeleteSetupKey is not implemented") } -func (am *MockAccountManager) SyncAndMarkPeer(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) { +func (am *MockAccountManager) SyncAndMarkPeer(ctx context.Context, accountID string, peerPubKey string, meta nbpeer.PeerSystemMeta, realIP net.IP, syncTime time.Time) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) { if am.SyncAndMarkPeerFunc != nil { - return am.SyncAndMarkPeerFunc(ctx, accountID, peerPubKey, meta, realIP) + return am.SyncAndMarkPeerFunc(ctx, accountID, peerPubKey, meta, realIP, syncTime) } return nil, nil, nil, 0, status.Errorf(codes.Unimplemented, "method MarkPeerConnected is not implemented") } @@ -322,9 +322,9 @@ func (am *MockAccountManager) GetAccountIDByUserID(ctx context.Context, userAuth } // MarkPeerConnected mock implementation of MarkPeerConnected from server.AccountManager interface -func (am *MockAccountManager) MarkPeerConnected(ctx context.Context, peerKey string, connected bool, realIP net.IP, accountID string) error { +func (am *MockAccountManager) MarkPeerConnected(ctx context.Context, peerKey string, connected bool, realIP net.IP, accountID string, syncTime time.Time) error { if am.MarkPeerConnectedFunc != nil { - return am.MarkPeerConnectedFunc(ctx, peerKey, connected, realIP) + return am.MarkPeerConnectedFunc(ctx, peerKey, connected, realIP, syncTime) } return status.Errorf(codes.Unimplemented, "method MarkPeerConnected is not implemented") } diff --git a/management/server/peer.go b/management/server/peer.go index ab72d3051..a4bdc784d 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -103,11 +103,13 @@ func (am *DefaultAccountManager) getUserAccessiblePeers(ctx context.Context, acc } // MarkPeerConnected marks peer as connected (true) or disconnected (false) -func (am *DefaultAccountManager) MarkPeerConnected(ctx context.Context, peerPubKey string, connected bool, realIP net.IP, accountID string) error { +// syncTime is used as the LastSeen timestamp and for stale request detection +func (am *DefaultAccountManager) MarkPeerConnected(ctx context.Context, peerPubKey string, connected bool, realIP net.IP, accountID string, syncTime time.Time) error { var peer *nbpeer.Peer var settings *types.Settings var expired bool var err error + var skipped bool err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { peer, err = transaction.GetPeerByPeerPubKey(ctx, store.LockingStrengthUpdate, peerPubKey) @@ -115,9 +117,19 @@ func (am *DefaultAccountManager) MarkPeerConnected(ctx context.Context, peerPubK return err } - expired, err = updatePeerStatusAndLocation(ctx, am.geo, transaction, peer, connected, realIP, accountID) + if connected && !syncTime.After(peer.Status.LastSeen) { + log.WithContext(ctx).Tracef("peer %s has newer activity (lastSeen=%s >= syncTime=%s), skipping connect", + peer.ID, peer.Status.LastSeen.Format(time.RFC3339), syncTime.Format(time.RFC3339)) + skipped = true + return nil + } + + expired, err = updatePeerStatusAndLocation(ctx, am.geo, transaction, peer, connected, realIP, accountID, syncTime) return err }) + if skipped { + return nil + } if err != nil { return err } @@ -147,10 +159,10 @@ func (am *DefaultAccountManager) MarkPeerConnected(ctx context.Context, peerPubK return nil } -func updatePeerStatusAndLocation(ctx context.Context, geo geolocation.Geolocation, transaction store.Store, peer *nbpeer.Peer, connected bool, realIP net.IP, accountID string) (bool, error) { +func updatePeerStatusAndLocation(ctx context.Context, geo geolocation.Geolocation, transaction store.Store, peer *nbpeer.Peer, connected bool, realIP net.IP, accountID string, syncTime time.Time) (bool, error) { oldStatus := peer.Status.Copy() newStatus := oldStatus - newStatus.LastSeen = time.Now().UTC() + newStatus.LastSeen = syncTime newStatus.Connected = connected // whenever peer got connected that means that it logged in successfully if newStatus.Connected { From 3be16d19a0d5167d2ceeaac321b987b17f3120f4 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 6 Feb 2026 19:47:38 +0100 Subject: [PATCH 110/374] [management] Feature/grpc debounce msgtype (#5239) * Add gRPC update debouncing mechanism Implements backpressure handling for peer network map updates to efficiently handle rapid changes. First update is sent immediately, subsequent rapid updates are coalesced, ensuring only the latest update is sent after a 1-second quiet period. * Enhance unit test to verify peer count synchronization with debouncing and timeout handling * Debounce based on type * Refactor test to validate timer restart after pending update dispatch * Simplify timer reset for Go 1.23+ automatic channel draining Remove manual channel drain in resetTimer() since Go 1.23+ automatically drains the timer channel when Stop() returns false, making the select-case pattern unnecessary. --- .../network_map/controller/controller.go | 11 +- .../update_channel/updatechannel_test.go | 22 +- .../controllers/network_map/update_message.go | 15 +- management/internals/shared/grpc/server.go | 33 +- management/internals/shared/grpc/token_mgr.go | 10 +- .../internals/shared/grpc/update_debouncer.go | 103 +++ .../shared/grpc/update_debouncer_test.go | 587 ++++++++++++++++++ management/server/management_test.go | 59 +- 8 files changed, 818 insertions(+), 22 deletions(-) create mode 100644 management/internals/shared/grpc/update_debouncer.go create mode 100644 management/internals/shared/grpc/update_debouncer_test.go diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index 5ae64e9f1..3e28e1380 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -247,7 +247,10 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, p, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSetting, maps.Keys(peerGroups), dnsFwdPort) c.metrics.CountToSyncResponseDuration(time.Since(start)) - c.peersUpdateManager.SendUpdate(ctx, p.ID, &network_map.UpdateMessage{Update: update}) + c.peersUpdateManager.SendUpdate(ctx, p.ID, &network_map.UpdateMessage{ + Update: update, + MessageType: network_map.MessageTypeNetworkMap, + }) }(peer) } @@ -370,7 +373,10 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe dnsFwdPort := computeForwarderPort(maps.Values(account.Peers), network_map.DnsForwarderPortMinVersion) update := grpc.ToSyncResponse(ctx, nil, c.config.HttpConfig, c.config.DeviceAuthorizationFlow, peer, nil, nil, remotePeerNetworkMap, dnsDomain, postureChecks, dnsCache, account.Settings, extraSettings, maps.Keys(peerGroups), dnsFwdPort) - c.peersUpdateManager.SendUpdate(ctx, peer.ID, &network_map.UpdateMessage{Update: update}) + c.peersUpdateManager.SendUpdate(ctx, peer.ID, &network_map.UpdateMessage{ + Update: update, + MessageType: network_map.MessageTypeNetworkMap, + }) return nil } @@ -778,6 +784,7 @@ func (c *Controller) OnPeersDeleted(ctx context.Context, accountID string, peerI }, }, }, + MessageType: network_map.MessageTypeNetworkMap, }) c.peersUpdateManager.CloseChannel(ctx, peerID) diff --git a/management/internals/controllers/network_map/update_channel/updatechannel_test.go b/management/internals/controllers/network_map/update_channel/updatechannel_test.go index afc1e2c32..c73baf81f 100644 --- a/management/internals/controllers/network_map/update_channel/updatechannel_test.go +++ b/management/internals/controllers/network_map/update_channel/updatechannel_test.go @@ -25,11 +25,14 @@ func TestCreateChannel(t *testing.T) { func TestSendUpdate(t *testing.T) { peer := "test-sendupdate" peersUpdater := NewPeersUpdateManager(nil) - update1 := &network_map.UpdateMessage{Update: &proto.SyncResponse{ - NetworkMap: &proto.NetworkMap{ - Serial: 0, + update1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{ + NetworkMap: &proto.NetworkMap{ + Serial: 0, + }, }, - }} + MessageType: network_map.MessageTypeNetworkMap, + } _ = peersUpdater.CreateChannel(context.Background(), peer) if _, ok := peersUpdater.peerChannels[peer]; !ok { t.Error("Error creating the channel") @@ -45,11 +48,14 @@ func TestSendUpdate(t *testing.T) { peersUpdater.SendUpdate(context.Background(), peer, update1) } - update2 := &network_map.UpdateMessage{Update: &proto.SyncResponse{ - NetworkMap: &proto.NetworkMap{ - Serial: 10, + update2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{ + NetworkMap: &proto.NetworkMap{ + Serial: 10, + }, }, - }} + MessageType: network_map.MessageTypeNetworkMap, + } peersUpdater.SendUpdate(context.Background(), peer, update2) timeout := time.After(5 * time.Second) diff --git a/management/internals/controllers/network_map/update_message.go b/management/internals/controllers/network_map/update_message.go index 33643bcbd..0ffddf8b2 100644 --- a/management/internals/controllers/network_map/update_message.go +++ b/management/internals/controllers/network_map/update_message.go @@ -4,6 +4,19 @@ import ( "github.com/netbirdio/netbird/shared/management/proto" ) +// MessageType indicates the type of update message for debouncing strategy +type MessageType int + +const ( + // MessageTypeNetworkMap represents network map updates (peers, routes, DNS, firewall) + // These updates can be safely debounced - only the latest state matters + MessageTypeNetworkMap MessageType = iota + // MessageTypeControlConfig represents control/config updates (tokens, peer expiration) + // These updates should not be dropped as they contain time-sensitive information + MessageTypeControlConfig +) + type UpdateMessage struct { - Update *proto.SyncResponse + Update *proto.SyncResponse + MessageType MessageType } diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 98c68ebda..ff9d7ea05 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -404,11 +404,20 @@ func (s *Server) sendJobsLoop(ctx context.Context, accountID string, peerKey wgt } // handleUpdates sends updates to the connected peer until the updates channel is closed. +// It implements a backpressure mechanism that sends the first update immediately, +// then debounces subsequent rapid updates, ensuring only the latest update is sent +// after a quiet period. func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, updates chan *network_map.UpdateMessage, srv proto.ManagementService_SyncServer, streamStartTime time.Time) error { log.WithContext(ctx).Tracef("starting to handle updates for peer %s", peerKey.String()) + + // Create a debouncer for this peer connection + debouncer := NewUpdateDebouncer(1000 * time.Millisecond) + defer debouncer.Stop() + for { select { // condition when there are some updates + // todo set the updates channel size to 1 case update, open := <-updates: if s.appMetrics != nil { s.appMetrics.GRPCMetrics().UpdateChannelQueueLength(len(updates) + 1) @@ -419,10 +428,28 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg s.cancelPeerRoutines(ctx, accountID, peer, streamStartTime) return nil } + log.WithContext(ctx).Debugf("received an update for peer %s", peerKey.String()) - if err := s.sendUpdate(ctx, accountID, peerKey, peer, update, srv, streamStartTime); err != nil { - log.WithContext(ctx).Debugf("error while sending an update to peer %s: %v", peerKey.String(), err) - return err + if debouncer.ProcessUpdate(update) { + // Send immediately (first update or after quiet period) + if err := s.sendUpdate(ctx, accountID, peerKey, peer, update, srv, streamStartTime); err != nil { + log.WithContext(ctx).Debugf("error while sending an update to peer %s: %v", peerKey.String(), err) + return err + } + } + + // Timer expired - quiet period reached, send pending updates if any + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + if len(pendingUpdates) == 0 { + continue + } + log.WithContext(ctx).Debugf("sending %d debounced update(s) for peer %s", len(pendingUpdates), peerKey.String()) + for _, pendingUpdate := range pendingUpdates { + if err := s.sendUpdate(ctx, accountID, peerKey, peer, pendingUpdate, srv, streamStartTime); err != nil { + log.WithContext(ctx).Debugf("error while sending an update to peer %s: %v", peerKey.String(), err) + return err + } } // condition when client <-> server connection has been terminated diff --git a/management/internals/shared/grpc/token_mgr.go b/management/internals/shared/grpc/token_mgr.go index ccb32202f..65e58ad41 100644 --- a/management/internals/shared/grpc/token_mgr.go +++ b/management/internals/shared/grpc/token_mgr.go @@ -242,7 +242,10 @@ func (m *TimeBasedAuthSecretsManager) pushNewTURNAndRelayTokens(ctx context.Cont m.extendNetbirdConfig(ctx, peerID, accountID, update) log.WithContext(ctx).Debugf("sending new TURN credentials to peer %s", peerID) - m.updateManager.SendUpdate(ctx, peerID, &network_map.UpdateMessage{Update: update}) + m.updateManager.SendUpdate(ctx, peerID, &network_map.UpdateMessage{ + Update: update, + MessageType: network_map.MessageTypeControlConfig, + }) } func (m *TimeBasedAuthSecretsManager) pushNewRelayTokens(ctx context.Context, accountID, peerID string) { @@ -266,7 +269,10 @@ func (m *TimeBasedAuthSecretsManager) pushNewRelayTokens(ctx context.Context, ac m.extendNetbirdConfig(ctx, peerID, accountID, update) log.WithContext(ctx).Debugf("sending new relay credentials to peer %s", peerID) - m.updateManager.SendUpdate(ctx, peerID, &network_map.UpdateMessage{Update: update}) + m.updateManager.SendUpdate(ctx, peerID, &network_map.UpdateMessage{ + Update: update, + MessageType: network_map.MessageTypeControlConfig, + }) } func (m *TimeBasedAuthSecretsManager) extendNetbirdConfig(ctx context.Context, peerID, accountID string, update *proto.SyncResponse) { diff --git a/management/internals/shared/grpc/update_debouncer.go b/management/internals/shared/grpc/update_debouncer.go new file mode 100644 index 000000000..8af9c2656 --- /dev/null +++ b/management/internals/shared/grpc/update_debouncer.go @@ -0,0 +1,103 @@ +package grpc + +import ( + "time" + + "github.com/netbirdio/netbird/management/internals/controllers/network_map" +) + +// UpdateDebouncer implements a backpressure mechanism that: +// - Sends the first update immediately +// - Coalesces rapid subsequent network map updates (only latest matters) +// - Queues control/config updates (all must be delivered) +// - Preserves the order of messages (important for control configs between network maps) +// - Ensures pending updates are sent after a quiet period +type UpdateDebouncer struct { + debounceInterval time.Duration + timer *time.Timer + pendingUpdates []*network_map.UpdateMessage // Queue that preserves order + timerC <-chan time.Time +} + +// NewUpdateDebouncer creates a new debouncer with the specified interval +func NewUpdateDebouncer(interval time.Duration) *UpdateDebouncer { + return &UpdateDebouncer{ + debounceInterval: interval, + } +} + +// ProcessUpdate handles an incoming update and returns whether it should be sent immediately +func (d *UpdateDebouncer) ProcessUpdate(update *network_map.UpdateMessage) bool { + if d.timer == nil { + // No active debounce timer, signal to send immediately + // and start the debounce period + d.startTimer() + return true + } + + // Already in debounce period, accumulate this update preserving order + // Check if we should coalesce with the last pending update + if len(d.pendingUpdates) > 0 && + update.MessageType == network_map.MessageTypeNetworkMap && + d.pendingUpdates[len(d.pendingUpdates)-1].MessageType == network_map.MessageTypeNetworkMap { + // Replace the last network map with this one (coalesce consecutive network maps) + d.pendingUpdates[len(d.pendingUpdates)-1] = update + } else { + // Append to the queue (preserves order for control configs and non-consecutive network maps) + d.pendingUpdates = append(d.pendingUpdates, update) + } + d.resetTimer() + return false +} + +// TimerChannel returns the timer channel for select statements +func (d *UpdateDebouncer) TimerChannel() <-chan time.Time { + if d.timer == nil { + return nil + } + return d.timerC +} + +// GetPendingUpdates returns and clears all pending updates after timer expiration. +// Updates are returned in the order they were received, with consecutive network maps +// already coalesced to only the latest one. +// If there were pending updates, it restarts the timer to continue debouncing. +// If there were no pending updates, it clears the timer (true quiet period). +func (d *UpdateDebouncer) GetPendingUpdates() []*network_map.UpdateMessage { + updates := d.pendingUpdates + d.pendingUpdates = nil + + if len(updates) > 0 { + // There were pending updates, so updates are still coming rapidly + // Restart the timer to continue debouncing mode + if d.timer != nil { + d.timer.Reset(d.debounceInterval) + } + } else { + // No pending updates means true quiet period - return to immediate mode + d.timer = nil + d.timerC = nil + } + + return updates +} + +// Stop stops the debouncer and cleans up resources +func (d *UpdateDebouncer) Stop() { + if d.timer != nil { + d.timer.Stop() + d.timer = nil + d.timerC = nil + } + d.pendingUpdates = nil +} + +func (d *UpdateDebouncer) startTimer() { + d.timer = time.NewTimer(d.debounceInterval) + d.timerC = d.timer.C +} + +func (d *UpdateDebouncer) resetTimer() { + d.timer.Stop() + d.timer.Reset(d.debounceInterval) +} diff --git a/management/internals/shared/grpc/update_debouncer_test.go b/management/internals/shared/grpc/update_debouncer_test.go new file mode 100644 index 000000000..075994a2d --- /dev/null +++ b/management/internals/shared/grpc/update_debouncer_test.go @@ -0,0 +1,587 @@ +package grpc + +import ( + "testing" + "time" + + "github.com/netbirdio/netbird/management/internals/controllers/network_map" + "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestUpdateDebouncer_FirstUpdateSentImmediately(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + update := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + shouldSend := debouncer.ProcessUpdate(update) + + if !shouldSend { + t.Error("First update should be sent immediately") + } + + if debouncer.TimerChannel() == nil { + t.Error("Timer should be started after first update") + } +} + +func TestUpdateDebouncer_RapidUpdatesCoalesced(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + update1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update3 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + // First update should be sent immediately + if !debouncer.ProcessUpdate(update1) { + t.Error("First update should be sent immediately") + } + + // Rapid subsequent updates should be coalesced + if debouncer.ProcessUpdate(update2) { + t.Error("Second rapid update should not be sent immediately") + } + + if debouncer.ProcessUpdate(update3) { + t.Error("Third rapid update should not be sent immediately") + } + + // Wait for debounce period + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + if len(pendingUpdates) != 1 { + t.Errorf("Should get exactly 1 pending update, got %d", len(pendingUpdates)) + } + if pendingUpdates[0] != update3 { + t.Error("Should get the last update (update3)") + } + case <-time.After(100 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_LastUpdateAlwaysSent(t *testing.T) { + debouncer := NewUpdateDebouncer(30 * time.Millisecond) + defer debouncer.Stop() + + update1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + // Send first update + debouncer.ProcessUpdate(update1) + + // Send second update within debounce period + debouncer.ProcessUpdate(update2) + + // Wait for timer + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + if len(pendingUpdates) != 1 { + t.Errorf("Should get exactly 1 pending update, got %d", len(pendingUpdates)) + } + if pendingUpdates[0] != update2 { + t.Error("Should get the last update") + } + if pendingUpdates[0] == update1 { + t.Error("Should not get the first update") + } + case <-time.After(100 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_TimerResetOnNewUpdate(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + update1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update3 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + // Send first update + debouncer.ProcessUpdate(update1) + + // Wait a bit, but not the full debounce period + time.Sleep(30 * time.Millisecond) + + // Send second update - should reset timer + debouncer.ProcessUpdate(update2) + + // Wait a bit more + time.Sleep(30 * time.Millisecond) + + // Send third update - should reset timer again + debouncer.ProcessUpdate(update3) + + // Now wait for the timer (should fire after last update's reset) + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + if len(pendingUpdates) != 1 { + t.Errorf("Should get exactly 1 pending update, got %d", len(pendingUpdates)) + } + if pendingUpdates[0] != update3 { + t.Error("Should get the last update (update3)") + } + // Timer should be restarted since there was a pending update + if debouncer.TimerChannel() == nil { + t.Error("Timer should be restarted after sending pending update") + } + case <-time.After(150 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_TimerRestartsAfterPendingUpdateSent(t *testing.T) { + debouncer := NewUpdateDebouncer(30 * time.Millisecond) + defer debouncer.Stop() + + update1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update3 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + // First update sent immediately + debouncer.ProcessUpdate(update1) + + // Second update coalesced + debouncer.ProcessUpdate(update2) + + // Wait for timer to expire + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + + if len(pendingUpdates) == 0 { + t.Fatal("Should have pending update") + } + + // After sending pending update, timer is restarted, so next update is NOT immediate + if debouncer.ProcessUpdate(update3) { + t.Error("Update after debounced send should not be sent immediately (timer restarted)") + } + + // Wait for the restarted timer and verify update3 is pending + select { + case <-debouncer.TimerChannel(): + finalUpdates := debouncer.GetPendingUpdates() + if len(finalUpdates) != 1 || finalUpdates[0] != update3 { + t.Error("Should get update3 as pending") + } + case <-time.After(100 * time.Millisecond): + t.Error("Timer should have fired for restarted timer") + } + case <-time.After(100 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_StopCleansUp(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + + update := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + // Send update to start timer + debouncer.ProcessUpdate(update) + + // Stop should clean up + debouncer.Stop() + + // Multiple stops should be safe + debouncer.Stop() +} + +func TestUpdateDebouncer_HighFrequencyUpdates(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + // Simulate high-frequency updates + var lastUpdate *network_map.UpdateMessage + sentImmediately := 0 + for i := 0; i < 100; i++ { + update := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{ + NetworkMap: &proto.NetworkMap{ + Serial: uint64(i), + }, + }, + MessageType: network_map.MessageTypeNetworkMap, + } + lastUpdate = update + if debouncer.ProcessUpdate(update) { + sentImmediately++ + } + time.Sleep(1 * time.Millisecond) // Very rapid updates + } + + // Only first update should be sent immediately + if sentImmediately != 1 { + t.Errorf("Expected only 1 update sent immediately, got %d", sentImmediately) + } + + // Wait for debounce period + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + if len(pendingUpdates) != 1 { + t.Errorf("Should get exactly 1 pending update, got %d", len(pendingUpdates)) + } + if pendingUpdates[0] != lastUpdate { + t.Error("Should get the very last update") + } + if pendingUpdates[0].Update.NetworkMap.Serial != 99 { + t.Errorf("Expected serial 99, got %d", pendingUpdates[0].Update.NetworkMap.Serial) + } + case <-time.After(200 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_NoUpdatesAfterFirst(t *testing.T) { + debouncer := NewUpdateDebouncer(30 * time.Millisecond) + defer debouncer.Stop() + + update := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + // Send first update + if !debouncer.ProcessUpdate(update) { + t.Error("First update should be sent immediately") + } + + // Wait for timer to expire with no additional updates (true quiet period) + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + if len(pendingUpdates) != 0 { + t.Error("Should have no pending updates") + } + // After true quiet period, timer should be cleared + if debouncer.TimerChannel() != nil { + t.Error("Timer should be cleared after quiet period") + } + case <-time.After(100 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_IntermediateUpdatesDropped(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + updates := make([]*network_map.UpdateMessage, 5) + for i := range updates { + updates[i] = &network_map.UpdateMessage{ + Update: &proto.SyncResponse{ + NetworkMap: &proto.NetworkMap{ + Serial: uint64(i), + }, + }, + MessageType: network_map.MessageTypeNetworkMap, + } + } + + // First update sent immediately + debouncer.ProcessUpdate(updates[0]) + + // Send updates 1, 2, 3, 4 rapidly - only last one should remain pending + debouncer.ProcessUpdate(updates[1]) + debouncer.ProcessUpdate(updates[2]) + debouncer.ProcessUpdate(updates[3]) + debouncer.ProcessUpdate(updates[4]) + + // Wait for debounce + <-debouncer.TimerChannel() + pendingUpdates := debouncer.GetPendingUpdates() + + if len(pendingUpdates) != 1 { + t.Errorf("Should get exactly 1 pending update, got %d", len(pendingUpdates)) + } + if pendingUpdates[0].Update.NetworkMap.Serial != 4 { + t.Errorf("Expected only the last update (serial 4), got serial %d", pendingUpdates[0].Update.NetworkMap.Serial) + } +} + +func TestUpdateDebouncer_TrueQuietPeriodResetsToImmediateMode(t *testing.T) { + debouncer := NewUpdateDebouncer(30 * time.Millisecond) + defer debouncer.Stop() + + update1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + update2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{}, + MessageType: network_map.MessageTypeNetworkMap, + } + + // First update sent immediately + if !debouncer.ProcessUpdate(update1) { + t.Error("First update should be sent immediately") + } + + // Wait for timer without sending any more updates (true quiet period) + <-debouncer.TimerChannel() + pendingUpdates := debouncer.GetPendingUpdates() + + if len(pendingUpdates) != 0 { + t.Error("Should have no pending updates during quiet period") + } + + // After true quiet period, next update should be sent immediately + if !debouncer.ProcessUpdate(update2) { + t.Error("Update after true quiet period should be sent immediately") + } +} + +func TestUpdateDebouncer_ContinuousHighFrequencyStaysInDebounceMode(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + // Simulate continuous high-frequency updates + for i := 0; i < 10; i++ { + update := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{ + NetworkMap: &proto.NetworkMap{ + Serial: uint64(i), + }, + }, + MessageType: network_map.MessageTypeNetworkMap, + } + + if i == 0 { + // First one sent immediately + if !debouncer.ProcessUpdate(update) { + t.Error("First update should be sent immediately") + } + } else { + // All others should be coalesced (not sent immediately) + if debouncer.ProcessUpdate(update) { + t.Errorf("Update %d should not be sent immediately", i) + } + } + + // Wait a bit but send next update before debounce expires + time.Sleep(20 * time.Millisecond) + } + + // Now wait for final debounce + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + if len(pendingUpdates) == 0 { + t.Fatal("Should have the last update pending") + } + if pendingUpdates[0].Update.NetworkMap.Serial != 9 { + t.Errorf("Expected serial 9, got %d", pendingUpdates[0].Update.NetworkMap.Serial) + } + case <-time.After(200 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_ControlConfigMessagesQueued(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + netmapUpdate := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetworkMap: &proto.NetworkMap{Serial: 1}}, + MessageType: network_map.MessageTypeNetworkMap, + } + tokenUpdate1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetbirdConfig: &proto.NetbirdConfig{}}, + MessageType: network_map.MessageTypeControlConfig, + } + tokenUpdate2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetbirdConfig: &proto.NetbirdConfig{}}, + MessageType: network_map.MessageTypeControlConfig, + } + + // First update sent immediately + debouncer.ProcessUpdate(netmapUpdate) + + // Send multiple control config updates - they should all be queued + debouncer.ProcessUpdate(tokenUpdate1) + debouncer.ProcessUpdate(tokenUpdate2) + + // Wait for debounce period + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + // Should get both control config updates + if len(pendingUpdates) != 2 { + t.Errorf("Expected 2 control config updates, got %d", len(pendingUpdates)) + } + // Control configs should come first + if pendingUpdates[0] != tokenUpdate1 { + t.Error("First pending update should be tokenUpdate1") + } + if pendingUpdates[1] != tokenUpdate2 { + t.Error("Second pending update should be tokenUpdate2") + } + case <-time.After(200 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_MixedMessageTypes(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + netmapUpdate1 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetworkMap: &proto.NetworkMap{Serial: 1}}, + MessageType: network_map.MessageTypeNetworkMap, + } + netmapUpdate2 := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetworkMap: &proto.NetworkMap{Serial: 2}}, + MessageType: network_map.MessageTypeNetworkMap, + } + tokenUpdate := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetbirdConfig: &proto.NetbirdConfig{}}, + MessageType: network_map.MessageTypeControlConfig, + } + + // First update sent immediately + debouncer.ProcessUpdate(netmapUpdate1) + + // Send token update and network map update + debouncer.ProcessUpdate(tokenUpdate) + debouncer.ProcessUpdate(netmapUpdate2) + + // Wait for debounce period + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + // Should get 2 updates in order: token, then network map + if len(pendingUpdates) != 2 { + t.Errorf("Expected 2 pending updates, got %d", len(pendingUpdates)) + } + // Token update should come first (preserves order) + if pendingUpdates[0] != tokenUpdate { + t.Error("First pending update should be tokenUpdate") + } + // Network map update should come second + if pendingUpdates[1] != netmapUpdate2 { + t.Error("Second pending update should be netmapUpdate2") + } + case <-time.After(200 * time.Millisecond): + t.Error("Timer should have fired") + } +} + +func TestUpdateDebouncer_OrderPreservation(t *testing.T) { + debouncer := NewUpdateDebouncer(50 * time.Millisecond) + defer debouncer.Stop() + + // Simulate: 50 network maps -> 1 control config -> 50 network maps + // Expected result: 3 messages (netmap, controlConfig, netmap) + + // Send first network map immediately + firstNetmap := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetworkMap: &proto.NetworkMap{Serial: 0}}, + MessageType: network_map.MessageTypeNetworkMap, + } + if !debouncer.ProcessUpdate(firstNetmap) { + t.Error("First update should be sent immediately") + } + + // Send 49 more network maps (will be coalesced to last one) + var lastNetmapBatch1 *network_map.UpdateMessage + for i := 1; i < 50; i++ { + lastNetmapBatch1 = &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetworkMap: &proto.NetworkMap{Serial: uint64(i)}}, + MessageType: network_map.MessageTypeNetworkMap, + } + debouncer.ProcessUpdate(lastNetmapBatch1) + } + + // Send 1 control config + controlConfig := &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetbirdConfig: &proto.NetbirdConfig{}}, + MessageType: network_map.MessageTypeControlConfig, + } + debouncer.ProcessUpdate(controlConfig) + + // Send 50 more network maps (will be coalesced to last one) + var lastNetmapBatch2 *network_map.UpdateMessage + for i := 50; i < 100; i++ { + lastNetmapBatch2 = &network_map.UpdateMessage{ + Update: &proto.SyncResponse{NetworkMap: &proto.NetworkMap{Serial: uint64(i)}}, + MessageType: network_map.MessageTypeNetworkMap, + } + debouncer.ProcessUpdate(lastNetmapBatch2) + } + + // Wait for debounce period + select { + case <-debouncer.TimerChannel(): + pendingUpdates := debouncer.GetPendingUpdates() + // Should get exactly 3 updates: netmap, controlConfig, netmap + if len(pendingUpdates) != 3 { + t.Errorf("Expected 3 pending updates, got %d", len(pendingUpdates)) + } + // First should be the last netmap from batch 1 + if pendingUpdates[0] != lastNetmapBatch1 { + t.Error("First pending update should be last netmap from batch 1") + } + if pendingUpdates[0].Update.NetworkMap.Serial != 49 { + t.Errorf("Expected serial 49, got %d", pendingUpdates[0].Update.NetworkMap.Serial) + } + // Second should be the control config + if pendingUpdates[1] != controlConfig { + t.Error("Second pending update should be control config") + } + // Third should be the last netmap from batch 2 + if pendingUpdates[2] != lastNetmapBatch2 { + t.Error("Third pending update should be last netmap from batch 2") + } + if pendingUpdates[2].Update.NetworkMap.Serial != 99 { + t.Errorf("Expected serial 99, got %d", pendingUpdates[2].Update.NetworkMap.Serial) + } + case <-time.After(200 * time.Millisecond): + t.Error("Timer should have fired") + } +} diff --git a/management/server/management_test.go b/management/server/management_test.go index 0864baadf..de02855bf 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -610,6 +610,7 @@ func TestSync10PeersGetUpdates(t *testing.T) { initialPeers := 10 additionalPeers := 10 + expectedPeerCount := initialPeers + additionalPeers - 1 // -1 because peer doesn't see itself var peers []wgtypes.Key for i := 0; i < initialPeers; i++ { @@ -618,8 +619,19 @@ func TestSync10PeersGetUpdates(t *testing.T) { peers = append(peers, key) } + // Track the maximum peer count each peer has seen + type peerState struct { + mu sync.Mutex + maxPeerCount int + done bool + } + peerStates := make(map[string]*peerState) + for _, pk := range peers { + peerStates[pk.PublicKey().String()] = &peerState{} + } + var wg sync.WaitGroup - wg.Add(initialPeers + initialPeers*additionalPeers) + wg.Add(initialPeers) // One completion per initial peer var syncClients []mgmtProto.ManagementService_SyncClient for _, pk := range peers { @@ -643,6 +655,9 @@ func TestSync10PeersGetUpdates(t *testing.T) { syncClients = append(syncClients, s) go func(pk wgtypes.Key, syncStream mgmtProto.ManagementService_SyncClient) { + pubKey := pk.PublicKey().String() + state := peerStates[pubKey] + for { encMsg := &mgmtProto.EncryptedMessage{} err := syncStream.RecvMsg(encMsg) @@ -651,19 +666,28 @@ func TestSync10PeersGetUpdates(t *testing.T) { } decryptedBytes, decErr := encryption.Decrypt(encMsg.Body, ts.serverPubKey, pk) if decErr != nil { - t.Errorf("failed to decrypt SyncResponse for peer %s: %v", pk.PublicKey().String(), decErr) + t.Errorf("failed to decrypt SyncResponse for peer %s: %v", pubKey, decErr) return } resp := &mgmtProto.SyncResponse{} umErr := pb.Unmarshal(decryptedBytes, resp) if umErr != nil { - t.Errorf("failed to unmarshal SyncResponse for peer %s: %v", pk.PublicKey().String(), umErr) + t.Errorf("failed to unmarshal SyncResponse for peer %s: %v", pubKey, umErr) return } - // We only count if there's a new peer update - if len(resp.GetRemotePeers()) > 0 { + + // Track the maximum peer count seen (due to debouncing, updates are coalesced) + peerCount := len(resp.GetRemotePeers()) + state.mu.Lock() + if peerCount > state.maxPeerCount { + state.maxPeerCount = peerCount + } + // Signal completion when this peer has seen all expected peers + if !state.done && state.maxPeerCount >= expectedPeerCount { + state.done = true wg.Done() } + state.mu.Unlock() } }(pk, s) } @@ -677,7 +701,30 @@ func TestSync10PeersGetUpdates(t *testing.T) { time.Sleep(time.Duration(n) * time.Millisecond) } - wg.Wait() + // Wait for debouncer to flush final updates (debounce interval is 1000ms) + time.Sleep(1500 * time.Millisecond) + + // Wait with timeout + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + // Success - all peers received expected peer count + case <-time.After(5 * time.Second): + // Timeout - report which peers didn't receive all updates + t.Error("Timeout waiting for all peers to receive updates") + for pubKey, state := range peerStates { + state.mu.Lock() + if state.maxPeerCount < expectedPeerCount { + t.Errorf("Peer %s only saw %d peers, expected %d", pubKey, state.maxPeerCount, expectedPeerCount) + } + state.mu.Unlock() + } + } for _, sc := range syncClients { err := sc.CloseSend() From 7bc85107eb6f76e250b874419092c3934ee8deff Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 6 Feb 2026 19:50:48 +0100 Subject: [PATCH 111/374] Adds timing measurement to handleSync to help diagnose sync performance issues (#5228) --- client/internal/engine.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/client/internal/engine.go b/client/internal/engine.go index 597ac7c2d..4dbd5f45e 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -828,6 +828,10 @@ func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdate } func (e *Engine) handleSync(update *mgmProto.SyncResponse) error { + started := time.Now() + defer func() { + log.Infof("sync finished in %s", time.Since(started)) + }() e.syncMsgMux.Lock() defer e.syncMsgMux.Unlock() From 391221a986813463345eed059d2593802fdcf823 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 9 Feb 2026 17:14:02 +0800 Subject: [PATCH 112/374] [client] Fix uspfilter duplicate firewall rules (#5269) --- client/firewall/uspfilter/allow_netbird.go | 34 +- .../uspfilter/allow_netbird_windows.go | 31 +- client/firewall/uspfilter/filter.go | 60 ++- .../uspfilter/filter_routeacl_test.go | 376 ++++++++++++++++++ client/firewall/uspfilter/filter_test.go | 152 +++++++ client/internal/acl/manager_test.go | 206 ++++++++++ 6 files changed, 791 insertions(+), 68 deletions(-) create mode 100644 client/firewall/uspfilter/filter_routeacl_test.go diff --git a/client/firewall/uspfilter/allow_netbird.go b/client/firewall/uspfilter/allow_netbird.go index 22e6fca1f..6a6533344 100644 --- a/client/firewall/uspfilter/allow_netbird.go +++ b/client/firewall/uspfilter/allow_netbird.go @@ -3,12 +3,6 @@ package uspfilter import ( - "context" - "net/netip" - "time" - - log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/client/internal/statemanager" ) @@ -17,33 +11,7 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error { m.mutex.Lock() defer m.mutex.Unlock() - m.outgoingRules = make(map[netip.Addr]RuleSet) - m.incomingDenyRules = make(map[netip.Addr]RuleSet) - m.incomingRules = make(map[netip.Addr]RuleSet) - - if m.udpTracker != nil { - m.udpTracker.Close() - } - - if m.icmpTracker != nil { - m.icmpTracker.Close() - } - - if m.tcpTracker != nil { - m.tcpTracker.Close() - } - - if fwder := m.forwarder.Load(); fwder != nil { - fwder.Stop() - } - - if m.logger != nil { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if err := m.logger.Stop(ctx); err != nil { - log.Errorf("failed to shutdown logger: %v", err) - } - } + m.resetState() if m.nativeFirewall != nil { return m.nativeFirewall.Close(stateManager) diff --git a/client/firewall/uspfilter/allow_netbird_windows.go b/client/firewall/uspfilter/allow_netbird_windows.go index 8a56b0862..6aef2ecfd 100644 --- a/client/firewall/uspfilter/allow_netbird_windows.go +++ b/client/firewall/uspfilter/allow_netbird_windows.go @@ -1,12 +1,9 @@ package uspfilter import ( - "context" "fmt" - "net/netip" "os/exec" "syscall" - "time" log "github.com/sirupsen/logrus" @@ -26,33 +23,7 @@ func (m *Manager) Close(*statemanager.Manager) error { m.mutex.Lock() defer m.mutex.Unlock() - m.outgoingRules = make(map[netip.Addr]RuleSet) - m.incomingDenyRules = make(map[netip.Addr]RuleSet) - m.incomingRules = make(map[netip.Addr]RuleSet) - - if m.udpTracker != nil { - m.udpTracker.Close() - } - - if m.icmpTracker != nil { - m.icmpTracker.Close() - } - - if m.tcpTracker != nil { - m.tcpTracker.Close() - } - - if fwder := m.forwarder.Load(); fwder != nil { - fwder.Stop() - } - - if m.logger != nil { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if err := m.logger.Stop(ctx); err != nil { - log.Errorf("failed to shutdown logger: %v", err) - } - } + m.resetState() if !isWindowsFirewallReachable() { return nil diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index aacc4ca1c..df2e274eb 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -1,6 +1,7 @@ package uspfilter import ( + "context" "encoding/binary" "errors" "fmt" @@ -12,11 +13,13 @@ import ( "strings" "sync" "sync/atomic" + "time" "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/google/uuid" log "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/firewall/uspfilter/common" @@ -24,6 +27,7 @@ import ( "github.com/netbirdio/netbird/client/firewall/uspfilter/forwarder" nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" "github.com/netbirdio/netbird/client/iface/netstack" + nbid "github.com/netbirdio/netbird/client/internal/acl/id" nftypes "github.com/netbirdio/netbird/client/internal/netflow/types" "github.com/netbirdio/netbird/client/internal/statemanager" ) @@ -89,6 +93,7 @@ type Manager struct { incomingDenyRules map[netip.Addr]RuleSet incomingRules map[netip.Addr]RuleSet routeRules RouteRules + routeRulesMap map[nbid.RuleID]*RouteRule decoders sync.Pool wgIface common.IFaceMapper nativeFirewall firewall.Manager @@ -229,6 +234,7 @@ func create(iface common.IFaceMapper, nativeFirewall firewall.Manager, disableSe flowLogger: flowLogger, netstack: netstack.IsEnabled(), localForwarding: enableLocalForwarding, + routeRulesMap: make(map[nbid.RuleID]*RouteRule), dnatMappings: make(map[netip.Addr]netip.Addr), portDNATRules: []portDNATRule{}, netstackServices: make(map[serviceKey]struct{}), @@ -480,11 +486,15 @@ func (m *Manager) addRouteFiltering( return m.nativeFirewall.AddRouteFiltering(id, sources, destination, proto, sPort, dPort, action) } - ruleID := uuid.New().String() + ruleKey := nbid.GenerateRouteRuleKey(sources, destination, proto, sPort, dPort, action) + + if existingRule, ok := m.routeRulesMap[ruleKey]; ok { + return existingRule, nil + } rule := RouteRule{ // TODO: consolidate these IDs - id: ruleID, + id: string(ruleKey), mgmtId: id, sources: sources, dstSet: destination.Set, @@ -499,6 +509,7 @@ func (m *Manager) addRouteFiltering( m.routeRules = append(m.routeRules, &rule) m.routeRules.Sort() + m.routeRulesMap[ruleKey] = &rule return &rule, nil } @@ -515,15 +526,20 @@ func (m *Manager) deleteRouteRule(rule firewall.Rule) error { return m.nativeFirewall.DeleteRouteRule(rule) } - ruleID := rule.ID() + ruleKey := nbid.RuleID(rule.ID()) + if _, ok := m.routeRulesMap[ruleKey]; !ok { + return fmt.Errorf("route rule not found: %s", ruleKey) + } + idx := slices.IndexFunc(m.routeRules, func(r *RouteRule) bool { - return r.id == ruleID + return r.id == string(ruleKey) }) if idx < 0 { - return fmt.Errorf("route rule not found: %s", ruleID) + return fmt.Errorf("route rule not found in slice: %s", ruleKey) } m.routeRules = slices.Delete(m.routeRules, idx, idx+1) + delete(m.routeRulesMap, ruleKey) return nil } @@ -570,6 +586,40 @@ func (m *Manager) SetLegacyManagement(isLegacy bool) error { // Flush doesn't need to be implemented for this manager func (m *Manager) Flush() error { return nil } +// resetState clears all firewall rules and closes connection trackers. +// Must be called with m.mutex held. +func (m *Manager) resetState() { + maps.Clear(m.outgoingRules) + maps.Clear(m.incomingDenyRules) + maps.Clear(m.incomingRules) + maps.Clear(m.routeRulesMap) + m.routeRules = m.routeRules[:0] + + if m.udpTracker != nil { + m.udpTracker.Close() + } + + if m.icmpTracker != nil { + m.icmpTracker.Close() + } + + if m.tcpTracker != nil { + m.tcpTracker.Close() + } + + if fwder := m.forwarder.Load(); fwder != nil { + fwder.Stop() + } + + if m.logger != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if err := m.logger.Stop(ctx); err != nil { + log.Errorf("failed to shutdown logger: %v", err) + } + } +} + // SetupEBPFProxyNoTrack creates notrack rules for eBPF proxy loopback traffic. func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { if m.nativeFirewall == nil { diff --git a/client/firewall/uspfilter/filter_routeacl_test.go b/client/firewall/uspfilter/filter_routeacl_test.go new file mode 100644 index 000000000..68572a01c --- /dev/null +++ b/client/firewall/uspfilter/filter_routeacl_test.go @@ -0,0 +1,376 @@ +package uspfilter + +import ( + "net/netip" + "testing" + + "github.com/golang/mock/gomock" + "github.com/google/gopacket/layers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + wgdevice "golang.zx2c4.com/wireguard/device" + + fw "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/iface" + "github.com/netbirdio/netbird/client/iface/device" + "github.com/netbirdio/netbird/client/iface/mocks" + "github.com/netbirdio/netbird/client/iface/wgaddr" +) + +// TestAddRouteFilteringReturnsExistingRule verifies that adding the same route +// filtering rule twice returns the same rule ID (idempotent behavior). +func TestAddRouteFilteringReturnsExistingRule(t *testing.T) { + manager := setupTestManager(t) + + sources := []netip.Prefix{ + netip.MustParsePrefix("100.64.1.0/24"), + netip.MustParsePrefix("100.64.2.0/24"), + } + destination := fw.Network{Prefix: netip.MustParsePrefix("192.168.1.0/24")} + + // Add rule first time + rule1, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + destination, + fw.ProtocolTCP, + nil, + &fw.Port{Values: []uint16{443}}, + fw.ActionAccept, + ) + require.NoError(t, err) + require.NotNil(t, rule1) + + // Add the same rule again + rule2, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + destination, + fw.ProtocolTCP, + nil, + &fw.Port{Values: []uint16{443}}, + fw.ActionAccept, + ) + require.NoError(t, err) + require.NotNil(t, rule2) + + // These should be the same (idempotent) like nftables/iptables implementations + assert.Equal(t, rule1.ID(), rule2.ID(), + "Adding the same rule twice should return the same rule ID (idempotent)") + + manager.mutex.RLock() + ruleCount := len(manager.routeRules) + manager.mutex.RUnlock() + + assert.Equal(t, 2, ruleCount, + "Should have exactly 2 rules (1 user rule + 1 block rule)") +} + +// TestAddRouteFilteringDifferentRulesGetDifferentIDs verifies that rules with +// different parameters get distinct IDs. +func TestAddRouteFilteringDifferentRulesGetDifferentIDs(t *testing.T) { + manager := setupTestManager(t) + + sources := []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")} + + // Add first rule + rule1, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + fw.Network{Prefix: netip.MustParsePrefix("192.168.1.0/24")}, + fw.ProtocolTCP, + nil, + &fw.Port{Values: []uint16{443}}, + fw.ActionAccept, + ) + require.NoError(t, err) + + // Add different rule (different destination) + rule2, err := manager.AddRouteFiltering( + []byte("policy-2"), + sources, + fw.Network{Prefix: netip.MustParsePrefix("192.168.2.0/24")}, // Different! + fw.ProtocolTCP, + nil, + &fw.Port{Values: []uint16{443}}, + fw.ActionAccept, + ) + require.NoError(t, err) + + assert.NotEqual(t, rule1.ID(), rule2.ID(), + "Different rules should have different IDs") + + manager.mutex.RLock() + ruleCount := len(manager.routeRules) + manager.mutex.RUnlock() + + assert.Equal(t, 3, ruleCount, "Should have 3 rules (2 user rules + 1 block rule)") +} + +// TestRouteRuleUpdateDoesNotCauseGap verifies that re-adding the same route +// rule during a network map update does not disrupt existing traffic. +func TestRouteRuleUpdateDoesNotCauseGap(t *testing.T) { + manager := setupTestManager(t) + + sources := []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")} + destination := fw.Network{Prefix: netip.MustParsePrefix("192.168.1.0/24")} + + rule1, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + destination, + fw.ProtocolTCP, + nil, + nil, + fw.ActionAccept, + ) + require.NoError(t, err) + + srcIP := netip.MustParseAddr("100.64.1.5") + dstIP := netip.MustParseAddr("192.168.1.10") + _, pass := manager.routeACLsPass(srcIP, dstIP, layers.LayerTypeTCP, 12345, 443) + require.True(t, pass, "Traffic should pass with rule in place") + + // Re-add same rule (simulates network map update) + rule2, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + destination, + fw.ProtocolTCP, + nil, + nil, + fw.ActionAccept, + ) + require.NoError(t, err) + + // Idempotent IDs mean rule1.ID() == rule2.ID(), so the ACL manager + // won't delete rule1 during cleanup. If IDs differed, deleting rule1 + // would remove the only matching rule and cause a traffic gap. + if rule1.ID() != rule2.ID() { + err = manager.DeleteRouteRule(rule1) + require.NoError(t, err) + } + + _, passAfter := manager.routeACLsPass(srcIP, dstIP, layers.LayerTypeTCP, 12345, 443) + assert.True(t, passAfter, + "Traffic should still pass after rule update - no gap should occur") +} + +// TestBlockInvalidRoutedIdempotent verifies that blockInvalidRouted creates +// exactly one drop rule for the WireGuard network prefix, and calling it again +// returns the same rule without duplicating. +func TestBlockInvalidRoutedIdempotent(t *testing.T) { + ctrl := gomock.NewController(t) + dev := mocks.NewMockDevice(ctrl) + dev.EXPECT().MTU().Return(1500, nil).AnyTimes() + + wgNet := netip.MustParsePrefix("100.64.0.1/16") + + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + AddressFunc: func() wgaddr.Address { + return wgaddr.Address{ + IP: wgNet.Addr(), + Network: wgNet, + } + }, + GetDeviceFunc: func() *device.FilteredDevice { + return &device.FilteredDevice{Device: dev} + }, + GetWGDeviceFunc: func() *wgdevice.Device { + return &wgdevice.Device{} + }, + } + + manager, err := Create(ifaceMock, false, flowLogger, iface.DefaultMTU) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, manager.Close(nil)) + }) + + // Call blockInvalidRouted directly multiple times + rule1, err := manager.blockInvalidRouted(ifaceMock) + require.NoError(t, err) + require.NotNil(t, rule1) + + rule2, err := manager.blockInvalidRouted(ifaceMock) + require.NoError(t, err) + require.NotNil(t, rule2) + + rule3, err := manager.blockInvalidRouted(ifaceMock) + require.NoError(t, err) + require.NotNil(t, rule3) + + // All should return the same rule + assert.Equal(t, rule1.ID(), rule2.ID(), "Second call should return same rule") + assert.Equal(t, rule2.ID(), rule3.ID(), "Third call should return same rule") + + // Should have exactly 1 route rule + manager.mutex.RLock() + ruleCount := len(manager.routeRules) + manager.mutex.RUnlock() + + assert.Equal(t, 1, ruleCount, "Should have exactly 1 block rule after 3 calls") + + // Verify the rule blocks traffic to the WG network + srcIP := netip.MustParseAddr("10.0.0.1") + dstIP := netip.MustParseAddr("100.64.0.50") + _, pass := manager.routeACLsPass(srcIP, dstIP, layers.LayerTypeTCP, 12345, 80) + assert.False(t, pass, "Block rule should deny traffic to WG prefix") +} + +// TestBlockRuleNotAccumulatedOnRepeatedEnableRouting verifies that calling +// EnableRouting multiple times (as happens on each route update) does not +// accumulate duplicate block rules in the routeRules slice. +func TestBlockRuleNotAccumulatedOnRepeatedEnableRouting(t *testing.T) { + ctrl := gomock.NewController(t) + dev := mocks.NewMockDevice(ctrl) + dev.EXPECT().MTU().Return(1500, nil).AnyTimes() + + wgNet := netip.MustParsePrefix("100.64.0.1/16") + + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + AddressFunc: func() wgaddr.Address { + return wgaddr.Address{ + IP: wgNet.Addr(), + Network: wgNet, + } + }, + GetDeviceFunc: func() *device.FilteredDevice { + return &device.FilteredDevice{Device: dev} + }, + GetWGDeviceFunc: func() *wgdevice.Device { + return &wgdevice.Device{} + }, + } + + manager, err := Create(ifaceMock, false, flowLogger, iface.DefaultMTU) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, manager.Close(nil)) + }) + + // Call EnableRouting multiple times (simulating repeated route updates) + for i := 0; i < 5; i++ { + require.NoError(t, manager.EnableRouting()) + } + + manager.mutex.RLock() + ruleCount := len(manager.routeRules) + manager.mutex.RUnlock() + + assert.Equal(t, 1, ruleCount, + "Repeated EnableRouting should not accumulate block rules") +} + +// TestRouteRuleCountStableAcrossUpdates verifies that adding the same route +// rule multiple times does not create duplicate entries. +func TestRouteRuleCountStableAcrossUpdates(t *testing.T) { + manager := setupTestManager(t) + + sources := []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")} + destination := fw.Network{Prefix: netip.MustParsePrefix("192.168.1.0/24")} + + // Simulate 5 network map updates with the same route rule + for i := 0; i < 5; i++ { + rule, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + destination, + fw.ProtocolTCP, + nil, + &fw.Port{Values: []uint16{443}}, + fw.ActionAccept, + ) + require.NoError(t, err) + require.NotNil(t, rule) + } + + manager.mutex.RLock() + ruleCount := len(manager.routeRules) + manager.mutex.RUnlock() + + assert.Equal(t, 2, ruleCount, + "Should have exactly 2 rules (1 user rule + 1 block rule) after 5 updates") +} + +// TestDeleteRouteRuleAfterIdempotentAdd verifies that deleting a route rule +// after adding it multiple times works correctly. +func TestDeleteRouteRuleAfterIdempotentAdd(t *testing.T) { + manager := setupTestManager(t) + + sources := []netip.Prefix{netip.MustParsePrefix("100.64.1.0/24")} + destination := fw.Network{Prefix: netip.MustParsePrefix("192.168.1.0/24")} + + // Add same rule twice + rule1, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + destination, + fw.ProtocolTCP, + nil, + nil, + fw.ActionAccept, + ) + require.NoError(t, err) + + rule2, err := manager.AddRouteFiltering( + []byte("policy-1"), + sources, + destination, + fw.ProtocolTCP, + nil, + nil, + fw.ActionAccept, + ) + require.NoError(t, err) + + require.Equal(t, rule1.ID(), rule2.ID(), "Should return same rule ID") + + // Delete using first reference + err = manager.DeleteRouteRule(rule1) + require.NoError(t, err) + + // Verify traffic no longer passes + srcIP := netip.MustParseAddr("100.64.1.5") + dstIP := netip.MustParseAddr("192.168.1.10") + _, pass := manager.routeACLsPass(srcIP, dstIP, layers.LayerTypeTCP, 12345, 443) + assert.False(t, pass, "Traffic should not pass after rule deletion") +} + +func setupTestManager(t *testing.T) *Manager { + t.Helper() + + ctrl := gomock.NewController(t) + dev := mocks.NewMockDevice(ctrl) + dev.EXPECT().MTU().Return(1500, nil).AnyTimes() + + wgNet := netip.MustParsePrefix("100.64.0.1/16") + + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + AddressFunc: func() wgaddr.Address { + return wgaddr.Address{ + IP: wgNet.Addr(), + Network: wgNet, + } + }, + GetDeviceFunc: func() *device.FilteredDevice { + return &device.FilteredDevice{Device: dev} + }, + GetWGDeviceFunc: func() *wgdevice.Device { + return &wgdevice.Device{} + }, + } + + manager, err := Create(ifaceMock, false, flowLogger, iface.DefaultMTU) + require.NoError(t, err) + require.NoError(t, manager.EnableRouting()) + + t.Cleanup(func() { + require.NoError(t, manager.Close(nil)) + }) + + return manager +} diff --git a/client/firewall/uspfilter/filter_test.go b/client/firewall/uspfilter/filter_test.go index c6a4ebeb8..55a8e723c 100644 --- a/client/firewall/uspfilter/filter_test.go +++ b/client/firewall/uspfilter/filter_test.go @@ -263,6 +263,158 @@ func TestAddUDPPacketHook(t *testing.T) { } } +// TestPeerRuleLifecycleDenyRules verifies that deny rules are correctly added +// to the deny map and can be cleanly deleted without leaving orphans. +func TestPeerRuleLifecycleDenyRules(t *testing.T) { + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + } + + m, err := Create(ifaceMock, false, flowLogger, nbiface.DefaultMTU) + require.NoError(t, err) + defer func() { + require.NoError(t, m.Close(nil)) + }() + + ip := net.ParseIP("192.168.1.1") + addr := netip.MustParseAddr("192.168.1.1") + + // Add multiple deny rules for different ports + rule1, err := m.AddPeerFiltering(nil, ip, fw.ProtocolTCP, nil, + &fw.Port{Values: []uint16{22}}, fw.ActionDrop, "") + require.NoError(t, err) + + rule2, err := m.AddPeerFiltering(nil, ip, fw.ProtocolTCP, nil, + &fw.Port{Values: []uint16{80}}, fw.ActionDrop, "") + require.NoError(t, err) + + m.mutex.RLock() + denyCount := len(m.incomingDenyRules[addr]) + m.mutex.RUnlock() + require.Equal(t, 2, denyCount, "Should have exactly 2 deny rules") + + // Delete the first deny rule + err = m.DeletePeerRule(rule1[0]) + require.NoError(t, err) + + m.mutex.RLock() + denyCount = len(m.incomingDenyRules[addr]) + m.mutex.RUnlock() + require.Equal(t, 1, denyCount, "Should have 1 deny rule after deleting first") + + // Delete the second deny rule + err = m.DeletePeerRule(rule2[0]) + require.NoError(t, err) + + m.mutex.RLock() + _, exists := m.incomingDenyRules[addr] + m.mutex.RUnlock() + require.False(t, exists, "Deny rules IP entry should be cleaned up when empty") +} + +// TestPeerRuleAddAndDeleteDontLeak verifies that repeatedly adding and deleting +// peer rules (simulating network map updates) does not leak rules in the maps. +func TestPeerRuleAddAndDeleteDontLeak(t *testing.T) { + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + } + + m, err := Create(ifaceMock, false, flowLogger, nbiface.DefaultMTU) + require.NoError(t, err) + defer func() { + require.NoError(t, m.Close(nil)) + }() + + ip := net.ParseIP("192.168.1.1") + addr := netip.MustParseAddr("192.168.1.1") + + // Simulate 10 network map updates: add rule, delete old, add new + for i := 0; i < 10; i++ { + // Add a deny rule + rules, err := m.AddPeerFiltering(nil, ip, fw.ProtocolTCP, nil, + &fw.Port{Values: []uint16{22}}, fw.ActionDrop, "") + require.NoError(t, err) + + // Add an allow rule + allowRules, err := m.AddPeerFiltering(nil, ip, fw.ProtocolTCP, nil, + &fw.Port{Values: []uint16{80}}, fw.ActionAccept, "") + require.NoError(t, err) + + // Delete them (simulating ACL manager cleanup) + for _, r := range rules { + require.NoError(t, m.DeletePeerRule(r)) + } + for _, r := range allowRules { + require.NoError(t, m.DeletePeerRule(r)) + } + } + + m.mutex.RLock() + denyCount := len(m.incomingDenyRules[addr]) + allowCount := len(m.incomingRules[addr]) + m.mutex.RUnlock() + + require.Equal(t, 0, denyCount, "No deny rules should remain after cleanup") + require.Equal(t, 0, allowCount, "No allow rules should remain after cleanup") +} + +// TestMixedAllowDenyRulesSameIP verifies that allow and deny rules for the same +// IP are stored in separate maps and don't interfere with each other. +func TestMixedAllowDenyRulesSameIP(t *testing.T) { + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + } + + m, err := Create(ifaceMock, false, flowLogger, nbiface.DefaultMTU) + require.NoError(t, err) + defer func() { + require.NoError(t, m.Close(nil)) + }() + + ip := net.ParseIP("192.168.1.1") + + // Add allow rule for port 80 + allowRule, err := m.AddPeerFiltering(nil, ip, fw.ProtocolTCP, nil, + &fw.Port{Values: []uint16{80}}, fw.ActionAccept, "") + require.NoError(t, err) + + // Add deny rule for port 22 + denyRule, err := m.AddPeerFiltering(nil, ip, fw.ProtocolTCP, nil, + &fw.Port{Values: []uint16{22}}, fw.ActionDrop, "") + require.NoError(t, err) + + addr := netip.MustParseAddr("192.168.1.1") + m.mutex.RLock() + allowCount := len(m.incomingRules[addr]) + denyCount := len(m.incomingDenyRules[addr]) + m.mutex.RUnlock() + + require.Equal(t, 1, allowCount, "Should have 1 allow rule") + require.Equal(t, 1, denyCount, "Should have 1 deny rule") + + // Delete allow rule should not affect deny rule + err = m.DeletePeerRule(allowRule[0]) + require.NoError(t, err) + + m.mutex.RLock() + denyCountAfter := len(m.incomingDenyRules[addr]) + m.mutex.RUnlock() + + require.Equal(t, 1, denyCountAfter, "Deny rule should still exist after deleting allow rule") + + // Delete deny rule + err = m.DeletePeerRule(denyRule[0]) + require.NoError(t, err) + + m.mutex.RLock() + _, denyExists := m.incomingDenyRules[addr] + _, allowExists := m.incomingRules[addr] + m.mutex.RUnlock() + + require.False(t, denyExists, "Deny rules should be empty") + require.False(t, allowExists, "Allow rules should be empty") +} + func TestManagerReset(t *testing.T) { ifaceMock := &IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, diff --git a/client/internal/acl/manager_test.go b/client/internal/acl/manager_test.go index 4bc0fd800..bd7adfaef 100644 --- a/client/internal/acl/manager_test.go +++ b/client/internal/acl/manager_test.go @@ -189,6 +189,212 @@ func TestDefaultManagerStateless(t *testing.T) { }) } +// TestDenyRulesNotAccumulatedOnRepeatedApply verifies that applying the same +// deny rules repeatedly does not accumulate duplicate rules in the uspfilter. +// This tests the full ACL manager -> uspfilter integration. +func TestDenyRulesNotAccumulatedOnRepeatedApply(t *testing.T) { + t.Setenv("NB_WG_KERNEL_DISABLED", "true") + + networkMap := &mgmProto.NetworkMap{ + FirewallRules: []*mgmProto.FirewallRule{ + { + PeerIP: "10.93.0.1", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_DROP, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "22", + }, + { + PeerIP: "10.93.0.2", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_DROP, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "80", + }, + { + PeerIP: "10.93.0.3", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_ACCEPT, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "443", + }, + }, + FirewallRulesIsEmpty: false, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ifaceMock := mocks.NewMockIFaceMapper(ctrl) + ifaceMock.EXPECT().IsUserspaceBind().Return(true).AnyTimes() + ifaceMock.EXPECT().SetFilter(gomock.Any()) + network := netip.MustParsePrefix("172.0.0.1/32") + ifaceMock.EXPECT().Name().Return("lo").AnyTimes() + ifaceMock.EXPECT().Address().Return(wgaddr.Address{ + IP: network.Addr(), + Network: network, + }).AnyTimes() + ifaceMock.EXPECT().GetWGDevice().Return(nil).AnyTimes() + + fw, err := firewall.NewFirewall(ifaceMock, nil, flowLogger, false, iface.DefaultMTU) + require.NoError(t, err) + defer func() { + require.NoError(t, fw.Close(nil)) + }() + + acl := NewDefaultManager(fw) + + // Apply the same rules 5 times (simulating repeated network map updates) + for i := 0; i < 5; i++ { + acl.ApplyFiltering(networkMap, false) + } + + // The ACL manager should track exactly 3 rule pairs (2 deny + 1 accept inbound) + assert.Equal(t, 3, len(acl.peerRulesPairs), + "Should have exactly 3 rule pairs after 5 identical updates") +} + +// TestDenyRulesCleanedUpOnRemoval verifies that deny rules are properly cleaned +// up when they're removed from the network map in a subsequent update. +func TestDenyRulesCleanedUpOnRemoval(t *testing.T) { + t.Setenv("NB_WG_KERNEL_DISABLED", "true") + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ifaceMock := mocks.NewMockIFaceMapper(ctrl) + ifaceMock.EXPECT().IsUserspaceBind().Return(true).AnyTimes() + ifaceMock.EXPECT().SetFilter(gomock.Any()) + network := netip.MustParsePrefix("172.0.0.1/32") + ifaceMock.EXPECT().Name().Return("lo").AnyTimes() + ifaceMock.EXPECT().Address().Return(wgaddr.Address{ + IP: network.Addr(), + Network: network, + }).AnyTimes() + ifaceMock.EXPECT().GetWGDevice().Return(nil).AnyTimes() + + fw, err := firewall.NewFirewall(ifaceMock, nil, flowLogger, false, iface.DefaultMTU) + require.NoError(t, err) + defer func() { + require.NoError(t, fw.Close(nil)) + }() + + acl := NewDefaultManager(fw) + + // First update: add deny and accept rules + networkMap1 := &mgmProto.NetworkMap{ + FirewallRules: []*mgmProto.FirewallRule{ + { + PeerIP: "10.93.0.1", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_DROP, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "22", + }, + { + PeerIP: "10.93.0.2", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_ACCEPT, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "443", + }, + }, + FirewallRulesIsEmpty: false, + } + + acl.ApplyFiltering(networkMap1, false) + assert.Equal(t, 2, len(acl.peerRulesPairs), "Should have 2 rules after first update") + + // Second update: remove the deny rule, keep only accept + networkMap2 := &mgmProto.NetworkMap{ + FirewallRules: []*mgmProto.FirewallRule{ + { + PeerIP: "10.93.0.2", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_ACCEPT, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "443", + }, + }, + FirewallRulesIsEmpty: false, + } + + acl.ApplyFiltering(networkMap2, false) + assert.Equal(t, 1, len(acl.peerRulesPairs), + "Should have 1 rule after removing deny rule") + + // Third update: remove all rules + networkMap3 := &mgmProto.NetworkMap{ + FirewallRules: []*mgmProto.FirewallRule{}, + FirewallRulesIsEmpty: true, + } + + acl.ApplyFiltering(networkMap3, false) + assert.Equal(t, 0, len(acl.peerRulesPairs), + "Should have 0 rules after removing all rules") +} + +// TestRuleUpdateChangingAction verifies that when a rule's action changes from +// accept to deny (or vice versa), the old rule is properly removed and the new +// one added without leaking. +func TestRuleUpdateChangingAction(t *testing.T) { + t.Setenv("NB_WG_KERNEL_DISABLED", "true") + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ifaceMock := mocks.NewMockIFaceMapper(ctrl) + ifaceMock.EXPECT().IsUserspaceBind().Return(true).AnyTimes() + ifaceMock.EXPECT().SetFilter(gomock.Any()) + network := netip.MustParsePrefix("172.0.0.1/32") + ifaceMock.EXPECT().Name().Return("lo").AnyTimes() + ifaceMock.EXPECT().Address().Return(wgaddr.Address{ + IP: network.Addr(), + Network: network, + }).AnyTimes() + ifaceMock.EXPECT().GetWGDevice().Return(nil).AnyTimes() + + fw, err := firewall.NewFirewall(ifaceMock, nil, flowLogger, false, iface.DefaultMTU) + require.NoError(t, err) + defer func() { + require.NoError(t, fw.Close(nil)) + }() + + acl := NewDefaultManager(fw) + + // First update: accept rule + networkMap := &mgmProto.NetworkMap{ + FirewallRules: []*mgmProto.FirewallRule{ + { + PeerIP: "10.93.0.1", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_ACCEPT, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "22", + }, + }, + FirewallRulesIsEmpty: false, + } + acl.ApplyFiltering(networkMap, false) + assert.Equal(t, 1, len(acl.peerRulesPairs)) + + // Second update: change to deny (same IP/port/proto, different action) + networkMap.FirewallRules = []*mgmProto.FirewallRule{ + { + PeerIP: "10.93.0.1", + Direction: mgmProto.RuleDirection_IN, + Action: mgmProto.RuleAction_DROP, + Protocol: mgmProto.RuleProtocol_TCP, + Port: "22", + }, + } + acl.ApplyFiltering(networkMap, false) + + // Should still have exactly 1 rule (the old accept removed, new deny added) + assert.Equal(t, 1, len(acl.peerRulesPairs), + "Changing action should result in exactly 1 rule, not 2") +} + func TestPortInfoEmpty(t *testing.T) { tests := []struct { name string From 08403f64aa07c230ea288ed8764596eff3a33d26 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 9 Feb 2026 18:09:11 +0800 Subject: [PATCH 113/374] [client] Add env var to skip DNS probing (#5270) --- client/internal/dns/server.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 4d4fcc06e..c2b01de62 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -6,7 +6,9 @@ import ( "fmt" "net/netip" "net/url" + "os" "runtime" + "strconv" "strings" "sync" @@ -27,6 +29,8 @@ import ( "github.com/netbirdio/netbird/shared/management/domain" ) +const envSkipDNSProbe = "NB_SKIP_DNS_PROBE" + // ReadyListener is a notification mechanism what indicate the server is ready to handle host dns address changes type ReadyListener interface { OnReady() @@ -439,6 +443,17 @@ func (s *DefaultServer) SearchDomains() []string { // ProbeAvailability tests each upstream group's servers for availability // and deactivates the group if no server responds func (s *DefaultServer) ProbeAvailability() { + if val := os.Getenv(envSkipDNSProbe); val != "" { + skipProbe, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", envSkipDNSProbe, err) + } + if skipProbe { + log.Infof("skipping DNS probe due to %s", envSkipDNSProbe) + return + } + } + var wg sync.WaitGroup for _, mux := range s.dnsMuxMap { wg.Add(1) From 6981fdce7e86640b9ff1c222a807390296fc684c Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 9 Feb 2026 11:34:24 +0100 Subject: [PATCH 114/374] [client] Fix race condition and ensure correct message ordering in Relay (#5265) * Fix race condition and ensure correct message ordering in connection establishment Reorder operations in OpenConn to register the connection before waiting for peer availability. This ensures: - Connection is ready to receive messages before peer subscription completes - Transport messages and onconnected events maintain proper ordering - No messages are lost during the connection establishment window - Concurrent OpenConn calls cannot create duplicate connections If peer availability check fails, the pre-registered connection is properly cleaned up. * Handle service shutdown during relay connection initialization Ensure relay connections are properly cleaned up when the service is not running by verifying `serviceIsRunning` and removing stale entries from `c.conns` to prevent unintended behaviors. --- shared/relay/client/client.go | 51 ++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/shared/relay/client/client.go b/shared/relay/client/client.go index 57a98614d..0acadaa4b 100644 --- a/shared/relay/client/client.go +++ b/shared/relay/client/client.go @@ -225,35 +225,42 @@ func (c *Client) OpenConn(ctx context.Context, dstPeerID string) (net.Conn, erro c.mu.Unlock() return nil, ErrConnAlreadyExists } - c.mu.Unlock() - if err := c.stateSubscription.WaitToBeOnlineAndSubscribe(ctx, peerID); err != nil { - c.log.Errorf("peer not available: %s, %s", peerID, err) - return nil, err - } - - c.log.Infof("remote peer is available, prepare the relayed connection: %s", peerID) - msgChannel := make(chan Msg, 100) - - c.mu.Lock() - if !c.serviceIsRunning { - c.mu.Unlock() - return nil, fmt.Errorf("relay connection is not established") - } + c.log.Infof("prepare the relayed connection, waiting for remote peer: %s", peerID) c.muInstanceURL.Lock() instanceURL := c.instanceURL c.muInstanceURL.Unlock() - conn := NewConn(c, peerID, msgChannel, instanceURL) - _, ok = c.conns[peerID] - if ok { - c.mu.Unlock() - _ = conn.Close() - return nil, ErrConnAlreadyExists - } - c.conns[peerID] = newConnContainer(c.log, conn, msgChannel) + msgChannel := make(chan Msg, 100) + conn := NewConn(c, peerID, msgChannel, instanceURL) + container := newConnContainer(c.log, conn, msgChannel) + c.conns[peerID] = container c.mu.Unlock() + + if err := c.stateSubscription.WaitToBeOnlineAndSubscribe(ctx, peerID); err != nil { + c.log.Errorf("peer not available: %s, %s", peerID, err) + c.mu.Lock() + if savedContainer, ok := c.conns[peerID]; ok && savedContainer == container { + delete(c.conns, peerID) + } + c.mu.Unlock() + container.close() + return nil, err + } + + c.mu.Lock() + if !c.serviceIsRunning { + if savedContainer, ok := c.conns[peerID]; ok && savedContainer == container { + delete(c.conns, peerID) + } + c.mu.Unlock() + container.close() + return nil, fmt.Errorf("relay connection is not established") + } + c.mu.Unlock() + + c.log.Infof("remote peer is available: %s", peerID) return conn, nil } From fc88399c232efe88b736dd41e248a8f809bdd837 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Tue, 10 Feb 2026 18:31:15 +0100 Subject: [PATCH 115/374] [management] fixed ischild check (#5279) --- go.mod | 2 +- go.sum | 4 ++-- .../server/http/middleware/auth_middleware.go | 13 +++++++++---- .../server/http/middleware/auth_middleware_test.go | 10 ++++------ 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index 2a6c311ce..801d52483 100644 --- a/go.mod +++ b/go.mod @@ -68,7 +68,7 @@ require ( github.com/mdlayher/socket v0.5.1 github.com/miekg/dns v1.1.59 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f + github.com/netbirdio/management-integrations/integrations v0.0.0-20260210160626-df4b180c7b25 github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 github.com/oapi-codegen/runtime v1.1.2 github.com/okta/okta-sdk-golang/v2 v2.18.0 diff --git a/go.sum b/go.sum index 17e5c8ffa..23a12ff68 100644 --- a/go.sum +++ b/go.sum @@ -406,8 +406,8 @@ github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 h1:TDtJKmM6S github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944/go.mod h1:sHA6TRxjQ6RLbnI+3R4DZo2Eseg/iKiPRfNmcuNySVQ= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51 h1:Ov4qdafATOgGMB1wbSuh+0aAHcwz9hdvB6VZjh1mVMI= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51/go.mod h1:ZSIbPdBn5hePO8CpF1PekH2SfpTxg1PDhEwtbqZS7R8= -github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f h1:CTBf0je/FpKr2lVSMZLak7m8aaWcS6ur4SOfhSSazFI= -github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f/go.mod h1:y7CxagMYzg9dgu+masRqYM7BQlOGA5Y8US85MCNFPlY= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260210160626-df4b180c7b25 h1:iwAq/Ncaq0etl4uAlVsbNBzC1yY52o0AmY7uCm2AMTs= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260210160626-df4b180c7b25/go.mod h1:y7CxagMYzg9dgu+masRqYM7BQlOGA5Y8US85MCNFPlY= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502 h1:3tHlFmhTdX9axERMVN63dqyFqnvuD+EMJHzM7mNGON8= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 h1:ujgviVYmx243Ksy7NdSwrdGPSRNE3pb8kEDSpH0QuAQ= diff --git a/management/server/http/middleware/auth_middleware.go b/management/server/http/middleware/auth_middleware.go index 257347153..63be672e6 100644 --- a/management/server/http/middleware/auth_middleware.go +++ b/management/server/http/middleware/auth_middleware.go @@ -11,6 +11,7 @@ import ( log "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/metric" + "github.com/netbirdio/management-integrations/integrations" serverauth "github.com/netbirdio/netbird/management/server/auth" nbcontext "github.com/netbirdio/netbird/management/server/context" "github.com/netbirdio/netbird/management/server/http/middleware/bypass" @@ -130,8 +131,10 @@ func (m *AuthMiddleware) checkJWTFromRequest(r *http.Request, authHeaderParts [] } if impersonate, ok := r.URL.Query()["account"]; ok && len(impersonate) == 1 { - userAuth.AccountId = impersonate[0] - userAuth.IsChild = ok + if integrations.IsValidChildAccount(ctx, userAuth.UserId, userAuth.AccountId, impersonate[0]) { + userAuth.AccountId = impersonate[0] + userAuth.IsChild = true + } } // Email is now extracted in ToUserAuth (from claims or userinfo endpoint) @@ -207,8 +210,10 @@ func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts [] } if impersonate, ok := r.URL.Query()["account"]; ok && len(impersonate) == 1 { - userAuth.AccountId = impersonate[0] - userAuth.IsChild = ok + if integrations.IsValidChildAccount(r.Context(), userAuth.UserId, userAuth.AccountId, impersonate[0]) { + userAuth.AccountId = impersonate[0] + userAuth.IsChild = true + } } return nbcontext.SetUserAuthInRequest(r, userAuth), nil diff --git a/management/server/http/middleware/auth_middleware_test.go b/management/server/http/middleware/auth_middleware_test.go index 05ca59419..f397c63a4 100644 --- a/management/server/http/middleware/auth_middleware_test.go +++ b/management/server/http/middleware/auth_middleware_test.go @@ -627,15 +627,14 @@ func TestAuthMiddleware_Handler_Child(t *testing.T) { }, }, { - name: "Valid PAT Token accesses child", + name: "PAT Token with account param ignored in public version", path: "/test?account=xyz", authHeader: "Token " + PAT, expectedUserAuth: &nbauth.UserAuth{ - AccountId: "xyz", + AccountId: accountID, UserId: userID, Domain: testAccount.Domain, DomainCategory: testAccount.DomainCategory, - IsChild: true, IsPAT: true, }, }, @@ -652,15 +651,14 @@ func TestAuthMiddleware_Handler_Child(t *testing.T) { }, { - name: "Valid JWT Token with child", + name: "JWT Token with account param ignored in public version", path: "/test?account=xyz", authHeader: "Bearer " + JWT, expectedUserAuth: &nbauth.UserAuth{ - AccountId: "xyz", + AccountId: accountID, UserId: userID, Domain: testAccount.Domain, DomainCategory: testAccount.DomainCategory, - IsChild: true, }, }, } From 2de19490186123c2412b1c11b5000686f1168399 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 11 Feb 2026 21:42:36 +0100 Subject: [PATCH 116/374] [client] Check if login is required on foreground mode (#5295) --- client/cmd/login.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/client/cmd/login.go b/client/cmd/login.go index 64b45e557..4521a67c9 100644 --- a/client/cmd/login.go +++ b/client/cmd/login.go @@ -282,13 +282,9 @@ func foregroundLogin(ctx context.Context, cmd *cobra.Command, config *profileman } defer authClient.Close() - needsLogin := false - - err, isAuthError := authClient.Login(ctx, "", "") - if isAuthError { - needsLogin = true - } else if err != nil { - return fmt.Errorf("login check failed: %v", err) + needsLogin, err := authClient.IsLoginRequired(ctx) + if err != nil { + return fmt.Errorf("check login required: %v", err) } jwtToken := "" From 1ddc9ce2bf32833b973a70ee2eda2a335b3c3e57 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:15:42 +0800 Subject: [PATCH 117/374] [client] Fix nil pointer panic in device and engine code (#5287) --- client/iface/device/device_filter.go | 19 +++++++++++++++++-- client/iface/device/device_netstack.go | 4 +++- client/iface/netstack/tun.go | 2 +- client/internal/engine.go | 3 ++- 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/client/iface/device/device_filter.go b/client/iface/device/device_filter.go index 015f71ff4..708f38d26 100644 --- a/client/iface/device/device_filter.go +++ b/client/iface/device/device_filter.go @@ -29,8 +29,9 @@ type PacketFilter interface { type FilteredDevice struct { tun.Device - filter PacketFilter - mutex sync.RWMutex + filter PacketFilter + mutex sync.RWMutex + closeOnce sync.Once } // newDeviceFilter constructor function @@ -40,6 +41,20 @@ func newDeviceFilter(device tun.Device) *FilteredDevice { } } +// Close closes the underlying tun device exactly once. +// wireguard-go's netTun.Close() panics on double-close due to a bare close(channel), +// and multiple code paths can trigger Close on the same device. +func (d *FilteredDevice) Close() error { + var err error + d.closeOnce.Do(func() { + err = d.Device.Close() + }) + if err != nil { + return err + } + return nil +} + // Read wraps read method with filtering feature func (d *FilteredDevice) Read(bufs [][]byte, sizes []int, offset int) (n int, err error) { if n, err = d.Device.Read(bufs, sizes, offset); err != nil { diff --git a/client/iface/device/device_netstack.go b/client/iface/device/device_netstack.go index 40d8fdac8..e457657f7 100644 --- a/client/iface/device/device_netstack.go +++ b/client/iface/device/device_netstack.go @@ -82,7 +82,9 @@ func (t *TunNetstackDevice) create() (WGConfigurer, error) { t.configurer = configurer.NewUSPConfigurer(t.device, t.name, t.bind.ActivityRecorder()) err = t.configurer.ConfigureInterface(t.key, t.port) if err != nil { - _ = tunIface.Close() + if cErr := tunIface.Close(); cErr != nil { + log.Debugf("failed to close tun device: %v", cErr) + } return nil, fmt.Errorf("error configuring interface: %s", err) } diff --git a/client/iface/netstack/tun.go b/client/iface/netstack/tun.go index b2506b50d..346ae29ec 100644 --- a/client/iface/netstack/tun.go +++ b/client/iface/netstack/tun.go @@ -66,7 +66,7 @@ func (t *NetStackTun) Create() (tun.Device, *netstack.Net, error) { } }() - return nsTunDev, tunNet, nil + return t.tundev, tunNet, nil } func (t *NetStackTun) Close() error { diff --git a/client/internal/engine.go b/client/internal/engine.go index 4dbd5f45e..631910eb6 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -543,11 +543,12 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) // monitor WireGuard interface lifecycle and restart engine on changes e.wgIfaceMonitor = NewWGIfaceMonitor() e.shutdownWg.Add(1) + wgIfaceName := e.wgInterface.Name() go func() { defer e.shutdownWg.Done() - if shouldRestart, err := e.wgIfaceMonitor.Start(e.ctx, e.wgInterface.Name()); shouldRestart { + if shouldRestart, err := e.wgIfaceMonitor.Start(e.ctx, wgIfaceName); shouldRestart { log.Infof("WireGuard interface monitor: %s, restarting engine", err) e.triggerClientRestart() } else if err != nil { From 3dfa97dcbde64006d0e7e21160cd8242c66e0b6e Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:15:57 +0800 Subject: [PATCH 118/374] [client] Fix stale entries in nftables with no handle (#5272) --- client/firewall/nftables/router_linux.go | 179 +++++++++++++----- client/firewall/nftables/router_linux_test.go | 135 +++++++++++++ 2 files changed, 266 insertions(+), 48 deletions(-) diff --git a/client/firewall/nftables/router_linux.go b/client/firewall/nftables/router_linux.go index b6e0cf5b2..fde654c20 100644 --- a/client/firewall/nftables/router_linux.go +++ b/client/firewall/nftables/router_linux.go @@ -483,7 +483,12 @@ func (r *router) DeleteRouteRule(rule firewall.Rule) error { } if nftRule.Handle == 0 { - return fmt.Errorf("route rule %s has no handle", ruleKey) + log.Warnf("route rule %s has no handle, removing stale entry", ruleKey) + if err := r.decrementSetCounter(nftRule); err != nil { + log.Warnf("decrement set counter for stale rule %s: %v", ruleKey, err) + } + delete(r.rules, ruleKey) + return nil } if err := r.deleteNftRule(nftRule, ruleKey); err != nil { @@ -660,13 +665,32 @@ func (r *router) AddNatRule(pair firewall.RouterPair) error { } if err := r.conn.Flush(); err != nil { - // TODO: rollback ipset counter - return fmt.Errorf("insert rules for %s: %v", pair.Destination, err) + r.rollbackRules(pair) + return fmt.Errorf("insert rules for %s: %w", pair.Destination, err) } return nil } +// rollbackRules cleans up unflushed rules and their set counters after a flush failure. +func (r *router) rollbackRules(pair firewall.RouterPair) { + keys := []string{ + firewall.GenKey(firewall.ForwardingFormat, pair), + firewall.GenKey(firewall.PreroutingFormat, pair), + firewall.GenKey(firewall.PreroutingFormat, firewall.GetInversePair(pair)), + } + for _, key := range keys { + rule, ok := r.rules[key] + if !ok { + continue + } + if err := r.decrementSetCounter(rule); err != nil { + log.Warnf("rollback set counter for %s: %v", key, err) + } + delete(r.rules, key) + } +} + // addNatRule inserts a nftables rule to the conn client flush queue func (r *router) addNatRule(pair firewall.RouterPair) error { sourceExp, err := r.applyNetwork(pair.Source, nil, true) @@ -928,18 +952,30 @@ func (r *router) addLegacyRouteRule(pair firewall.RouterPair) error { func (r *router) removeLegacyRouteRule(pair firewall.RouterPair) error { ruleKey := firewall.GenKey(firewall.ForwardingFormat, pair) - if rule, exists := r.rules[ruleKey]; exists { - if err := r.conn.DelRule(rule); err != nil { - return fmt.Errorf("remove legacy forwarding rule %s -> %s: %v", pair.Source, pair.Destination, err) - } - - log.Debugf("removed legacy forwarding rule %s -> %s", pair.Source, pair.Destination) - - delete(r.rules, ruleKey) + rule, exists := r.rules[ruleKey] + if !exists { + return nil + } + if rule.Handle == 0 { + log.Warnf("legacy forwarding rule %s has no handle, removing stale entry", ruleKey) if err := r.decrementSetCounter(rule); err != nil { - return fmt.Errorf("decrement set counter: %w", err) + log.Warnf("decrement set counter for stale rule %s: %v", ruleKey, err) } + delete(r.rules, ruleKey) + return nil + } + + if err := r.conn.DelRule(rule); err != nil { + return fmt.Errorf("remove legacy forwarding rule %s -> %s: %w", pair.Source, pair.Destination, err) + } + + log.Debugf("removed legacy forwarding rule %s -> %s", pair.Source, pair.Destination) + + delete(r.rules, ruleKey) + + if err := r.decrementSetCounter(rule); err != nil { + return fmt.Errorf("decrement set counter: %w", err) } return nil @@ -1329,65 +1365,89 @@ func (r *router) RemoveNatRule(pair firewall.RouterPair) error { return fmt.Errorf(refreshRulesMapError, err) } + var merr *multierror.Error + if pair.Masquerade { if err := r.removeNatRule(pair); err != nil { - return fmt.Errorf("remove prerouting rule: %w", err) + merr = multierror.Append(merr, fmt.Errorf("remove prerouting rule: %w", err)) } if err := r.removeNatRule(firewall.GetInversePair(pair)); err != nil { - return fmt.Errorf("remove inverse prerouting rule: %w", err) + merr = multierror.Append(merr, fmt.Errorf("remove inverse prerouting rule: %w", err)) } } if err := r.removeLegacyRouteRule(pair); err != nil { - return fmt.Errorf("remove legacy routing rule: %w", err) + merr = multierror.Append(merr, fmt.Errorf("remove legacy routing rule: %w", err)) } + // Set counters are decremented in the sub-methods above before flush. If flush fails, + // counters will be off until the next successful removal or refresh cycle. if err := r.conn.Flush(); err != nil { - // TODO: rollback set counter - return fmt.Errorf("remove nat rules rule %s: %v", pair.Destination, err) + merr = multierror.Append(merr, fmt.Errorf("flush remove nat rules %s: %w", pair.Destination, err)) } - return nil + return nberrors.FormatErrorOrNil(merr) } func (r *router) removeNatRule(pair firewall.RouterPair) error { ruleKey := firewall.GenKey(firewall.PreroutingFormat, pair) - if rule, exists := r.rules[ruleKey]; exists { - if err := r.conn.DelRule(rule); err != nil { - return fmt.Errorf("remove prerouting rule %s -> %s: %v", pair.Source, pair.Destination, err) - } - - log.Debugf("removed prerouting rule %s -> %s", pair.Source, pair.Destination) - - delete(r.rules, ruleKey) - - if err := r.decrementSetCounter(rule); err != nil { - return fmt.Errorf("decrement set counter: %w", err) - } - } else { + rule, exists := r.rules[ruleKey] + if !exists { log.Debugf("prerouting rule %s not found", ruleKey) + return nil + } + + if rule.Handle == 0 { + log.Warnf("prerouting rule %s has no handle, removing stale entry", ruleKey) + if err := r.decrementSetCounter(rule); err != nil { + log.Warnf("decrement set counter for stale rule %s: %v", ruleKey, err) + } + delete(r.rules, ruleKey) + return nil + } + + if err := r.conn.DelRule(rule); err != nil { + return fmt.Errorf("remove prerouting rule %s -> %s: %w", pair.Source, pair.Destination, err) + } + + log.Debugf("removed prerouting rule %s -> %s", pair.Source, pair.Destination) + + delete(r.rules, ruleKey) + + if err := r.decrementSetCounter(rule); err != nil { + return fmt.Errorf("decrement set counter: %w", err) } return nil } -// refreshRulesMap refreshes the rule map with the latest rules. this is useful to avoid -// duplicates and to get missing attributes that we don't have when adding new rules +// refreshRulesMap rebuilds the rule map from the kernel. This removes stale entries +// (e.g. from failed flushes) and updates handles for all existing rules. func (r *router) refreshRulesMap() error { + var merr *multierror.Error + newRules := make(map[string]*nftables.Rule) for _, chain := range r.chains { rules, err := r.conn.GetRules(chain.Table, chain) if err != nil { - return fmt.Errorf("list rules: %w", err) + merr = multierror.Append(merr, fmt.Errorf("list rules for chain %s: %w", chain.Name, err)) + // preserve existing entries for this chain since we can't verify their state + for k, v := range r.rules { + if v.Chain != nil && v.Chain.Name == chain.Name { + newRules[k] = v + } + } + continue } for _, rule := range rules { if len(rule.UserData) > 0 { - r.rules[string(rule.UserData)] = rule + newRules[string(rule.UserData)] = rule } } } - return nil + r.rules = newRules + return nberrors.FormatErrorOrNil(merr) } func (r *router) AddDNATRule(rule firewall.ForwardRule) (firewall.Rule, error) { @@ -1629,20 +1689,34 @@ func (r *router) DeleteDNATRule(rule firewall.Rule) error { } var merr *multierror.Error + var needsFlush bool + if dnatRule, exists := r.rules[ruleKey+dnatSuffix]; exists { - if err := r.conn.DelRule(dnatRule); err != nil { + if dnatRule.Handle == 0 { + log.Warnf("dnat rule %s has no handle, removing stale entry", ruleKey+dnatSuffix) + delete(r.rules, ruleKey+dnatSuffix) + } else if err := r.conn.DelRule(dnatRule); err != nil { merr = multierror.Append(merr, fmt.Errorf("delete dnat rule: %w", err)) + } else { + needsFlush = true } } if masqRule, exists := r.rules[ruleKey+snatSuffix]; exists { - if err := r.conn.DelRule(masqRule); err != nil { + if masqRule.Handle == 0 { + log.Warnf("snat rule %s has no handle, removing stale entry", ruleKey+snatSuffix) + delete(r.rules, ruleKey+snatSuffix) + } else if err := r.conn.DelRule(masqRule); err != nil { merr = multierror.Append(merr, fmt.Errorf("delete snat rule: %w", err)) + } else { + needsFlush = true } } - if err := r.conn.Flush(); err != nil { - merr = multierror.Append(merr, fmt.Errorf(flushError, err)) + if needsFlush { + if err := r.conn.Flush(); err != nil { + merr = multierror.Append(merr, fmt.Errorf(flushError, err)) + } } if merr == nil { @@ -1757,16 +1831,25 @@ func (r *router) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Proto ruleID := fmt.Sprintf("inbound-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) - if rule, exists := r.rules[ruleID]; exists { - if err := r.conn.DelRule(rule); err != nil { - return fmt.Errorf("delete inbound DNAT rule %s: %w", ruleID, err) - } - if err := r.conn.Flush(); err != nil { - return fmt.Errorf("flush delete inbound DNAT rule: %w", err) - } - delete(r.rules, ruleID) + rule, exists := r.rules[ruleID] + if !exists { + return nil } + if rule.Handle == 0 { + log.Warnf("inbound DNAT rule %s has no handle, removing stale entry", ruleID) + delete(r.rules, ruleID) + return nil + } + + if err := r.conn.DelRule(rule); err != nil { + return fmt.Errorf("delete inbound DNAT rule %s: %w", ruleID, err) + } + if err := r.conn.Flush(); err != nil { + return fmt.Errorf("flush delete inbound DNAT rule: %w", err) + } + delete(r.rules, ruleID) + return nil } diff --git a/client/firewall/nftables/router_linux_test.go b/client/firewall/nftables/router_linux_test.go index 3531b014b..f0e34d211 100644 --- a/client/firewall/nftables/router_linux_test.go +++ b/client/firewall/nftables/router_linux_test.go @@ -18,6 +18,7 @@ import ( firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/firewall/test" "github.com/netbirdio/netbird/client/iface" + "github.com/netbirdio/netbird/client/internal/acl/id" ) const ( @@ -719,3 +720,137 @@ func deleteWorkTable() { } } } + +func TestRouter_RefreshRulesMap_RemovesStaleEntries(t *testing.T) { + if check() != NFTABLES { + t.Skip("nftables not supported on this system") + } + + workTable, err := createWorkTable() + require.NoError(t, err) + defer deleteWorkTable() + + r, err := newRouter(workTable, ifaceMock, iface.DefaultMTU) + require.NoError(t, err) + require.NoError(t, r.init(workTable)) + defer func() { require.NoError(t, r.Reset()) }() + + // Add a real rule to the kernel + ruleKey, err := r.AddRouteFiltering( + nil, + []netip.Prefix{netip.MustParsePrefix("192.168.1.0/24")}, + firewall.Network{Prefix: netip.MustParsePrefix("10.0.0.0/24")}, + firewall.ProtocolTCP, + nil, + &firewall.Port{Values: []uint16{80}}, + firewall.ActionAccept, + ) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, r.DeleteRouteRule(ruleKey)) + }) + + // Inject a stale entry with Handle=0 (simulates store-before-flush failure) + staleKey := "stale-rule-that-does-not-exist" + r.rules[staleKey] = &nftables.Rule{ + Table: r.workTable, + Chain: r.chains[chainNameRoutingFw], + Handle: 0, + UserData: []byte(staleKey), + } + + require.Contains(t, r.rules, staleKey, "stale entry should be in map before refresh") + + err = r.refreshRulesMap() + require.NoError(t, err) + + assert.NotContains(t, r.rules, staleKey, "stale entry should be removed after refresh") + + realRule, ok := r.rules[ruleKey.ID()] + assert.True(t, ok, "real rule should still exist after refresh") + assert.NotZero(t, realRule.Handle, "real rule should have a valid handle") +} + +func TestRouter_DeleteRouteRule_StaleHandle(t *testing.T) { + if check() != NFTABLES { + t.Skip("nftables not supported on this system") + } + + workTable, err := createWorkTable() + require.NoError(t, err) + defer deleteWorkTable() + + r, err := newRouter(workTable, ifaceMock, iface.DefaultMTU) + require.NoError(t, err) + require.NoError(t, r.init(workTable)) + defer func() { require.NoError(t, r.Reset()) }() + + // Inject a stale entry with Handle=0 + staleKey := "stale-route-rule" + r.rules[staleKey] = &nftables.Rule{ + Table: r.workTable, + Chain: r.chains[chainNameRoutingFw], + Handle: 0, + UserData: []byte(staleKey), + } + + // DeleteRouteRule should not return an error for stale handles + err = r.DeleteRouteRule(id.RuleID(staleKey)) + assert.NoError(t, err, "deleting a stale rule should not error") + assert.NotContains(t, r.rules, staleKey, "stale entry should be cleaned up") +} + +func TestRouter_AddNatRule_WithStaleEntry(t *testing.T) { + if check() != NFTABLES { + t.Skip("nftables not supported on this system") + } + + manager, err := Create(ifaceMock, iface.DefaultMTU) + require.NoError(t, err) + require.NoError(t, manager.Init(nil)) + t.Cleanup(func() { + require.NoError(t, manager.Close(nil)) + }) + + pair := firewall.RouterPair{ + ID: "staletest", + Source: firewall.Network{Prefix: netip.MustParsePrefix("100.100.100.1/32")}, + Destination: firewall.Network{Prefix: netip.MustParsePrefix("100.100.200.0/24")}, + Masquerade: true, + } + + rtr := manager.router + + // First add succeeds + err = rtr.AddNatRule(pair) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, rtr.RemoveNatRule(pair)) + }) + + // Corrupt the handle to simulate stale state + natRuleKey := firewall.GenKey(firewall.PreroutingFormat, pair) + if rule, exists := rtr.rules[natRuleKey]; exists { + rule.Handle = 0 + } + inverseKey := firewall.GenKey(firewall.PreroutingFormat, firewall.GetInversePair(pair)) + if rule, exists := rtr.rules[inverseKey]; exists { + rule.Handle = 0 + } + + // Adding the same rule again should succeed despite stale handles + err = rtr.AddNatRule(pair) + assert.NoError(t, err, "AddNatRule should succeed even with stale entries") + + // Verify rules exist in kernel + rules, err := rtr.conn.GetRules(rtr.workTable, rtr.chains[chainNameManglePrerouting]) + require.NoError(t, err) + + found := 0 + for _, rule := range rules { + if len(rule.UserData) > 0 && string(rule.UserData) == natRuleKey { + found++ + } + } + assert.Equal(t, 1, found, "NAT rule should exist in kernel") +} From 69d4b5d821b609eb834dc1f93c267b88699f94d0 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 12 Feb 2026 11:31:49 +0100 Subject: [PATCH 119/374] [misc] Update sign pipeline version (#5296) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 84f6f64ed..967e0c7d7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,7 +9,7 @@ on: pull_request: env: - SIGN_PIPE_VER: "v0.1.0" + SIGN_PIPE_VER: "v0.1.1" GORELEASER_VER: "v2.3.2" PRODUCT_NAME: "NetBird" COPYRIGHT: "NetBird GmbH" From 64b849c801eba6046a555770b3ddb885dae84d62 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Thu, 12 Feb 2026 19:24:43 +0100 Subject: [PATCH 120/374] [self-hosted] add netbird server (#5232) * Unified NetBird combined server (Management, Signal, Relay, STUN) as a single executable with richer YAML configuration, validation, and defaults. * Official Dockerfile/image for single-container deployment. * Optional in-process profiling endpoint for diagnostics. * Multiplexing to route HTTP/gRPC/WebSocket traffic via one port; runtime hooks to inject custom handlers. * **Chores** * Updated deployment scripts, compose files, and reverse-proxy templates to target the combined server; added example configs and getting-started updates. --- .goreleaser.yaml | 94 +++ combined/Dockerfile | 5 + combined/cmd/config.go | 715 +++++++++++++++++++ combined/cmd/pprof.go | 33 + combined/cmd/root.go | 711 +++++++++++++++++++ combined/config-simple.yaml.example | 111 +++ combined/config.yaml.example | 115 +++ combined/main.go | 13 + infrastructure_files/getting-started.sh | 778 +++++++-------------- management/cmd/management.go | 46 +- management/cmd/management_test.go | 2 +- management/internals/server/server.go | 33 +- management/server/store/sql_store.go | 4 +- management/server/store/store.go | 18 +- management/server/telemetry/app_metrics.go | 50 ++ relay/cmd/root.go | 2 +- relay/server/server.go | 8 + {signal => shared}/metrics/metrics.go | 0 signal/cmd/root.go | 1 - signal/cmd/run.go | 28 +- signal/metrics/app.go | 28 +- signal/server/signal.go | 4 +- stun/server.go | 2 +- 23 files changed, 2198 insertions(+), 603 deletions(-) create mode 100644 combined/Dockerfile create mode 100644 combined/cmd/config.go create mode 100644 combined/cmd/pprof.go create mode 100644 combined/cmd/root.go create mode 100644 combined/config-simple.yaml.example create mode 100644 combined/config.yaml.example create mode 100644 combined/main.go rename {signal => shared}/metrics/metrics.go (100%) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 7c6651f83..743822649 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -106,6 +106,26 @@ builds: - -s -w -X github.com/netbirdio/netbird/version.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser mod_timestamp: "{{ .CommitTimestamp }}" + - id: netbird-server + dir: combined + env: + - CGO_ENABLED=1 + - >- + {{- if eq .Runtime.Goos "linux" }} + {{- if eq .Arch "arm64"}}CC=aarch64-linux-gnu-gcc{{- end }} + {{- if eq .Arch "arm"}}CC=arm-linux-gnueabihf-gcc{{- end }} + {{- end }} + binary: netbird-server + goos: + - linux + goarch: + - amd64 + - arm64 + - arm + ldflags: + - -s -w -X github.com/netbirdio/netbird/version.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser + mod_timestamp: "{{ .CommitTimestamp }}" + - id: netbird-upload dir: upload-server env: [CGO_ENABLED=0] @@ -520,6 +540,55 @@ dockers: - "--label=org.opencontainers.image.revision={{.FullCommit}}" - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" - "--label=maintainer=dev@netbird.io" + - image_templates: + - netbirdio/netbird-server:{{ .Version }}-amd64 + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-amd64 + ids: + - netbird-server + goarch: amd64 + use: buildx + dockerfile: combined/Dockerfile + build_flag_templates: + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" + - "--label=maintainer=dev@netbird.io" + - image_templates: + - netbirdio/netbird-server:{{ .Version }}-arm64v8 + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-arm64v8 + ids: + - netbird-server + goarch: arm64 + use: buildx + dockerfile: combined/Dockerfile + build_flag_templates: + - "--platform=linux/arm64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" + - "--label=maintainer=dev@netbird.io" + - image_templates: + - netbirdio/netbird-server:{{ .Version }}-arm + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-arm + ids: + - netbird-server + goarch: arm + goarm: 6 + use: buildx + dockerfile: combined/Dockerfile + build_flag_templates: + - "--platform=linux/arm" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" + - "--label=maintainer=dev@netbird.io" docker_manifests: - name_template: netbirdio/netbird:{{ .Version }} image_templates: @@ -598,6 +667,18 @@ docker_manifests: - netbirdio/upload:{{ .Version }}-arm - netbirdio/upload:{{ .Version }}-amd64 + - name_template: netbirdio/netbird-server:{{ .Version }} + image_templates: + - netbirdio/netbird-server:{{ .Version }}-arm64v8 + - netbirdio/netbird-server:{{ .Version }}-arm + - netbirdio/netbird-server:{{ .Version }}-amd64 + + - name_template: netbirdio/netbird-server:latest + image_templates: + - netbirdio/netbird-server:{{ .Version }}-arm64v8 + - netbirdio/netbird-server:{{ .Version }}-arm + - netbirdio/netbird-server:{{ .Version }}-amd64 + - name_template: ghcr.io/netbirdio/netbird:{{ .Version }} image_templates: - ghcr.io/netbirdio/netbird:{{ .Version }}-arm64v8 @@ -675,6 +756,19 @@ docker_manifests: - ghcr.io/netbirdio/upload:{{ .Version }}-arm64v8 - ghcr.io/netbirdio/upload:{{ .Version }}-arm - ghcr.io/netbirdio/upload:{{ .Version }}-amd64 + + - name_template: ghcr.io/netbirdio/netbird-server:{{ .Version }} + image_templates: + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-arm64v8 + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-arm + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-amd64 + + - name_template: ghcr.io/netbirdio/netbird-server:latest + image_templates: + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-arm64v8 + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-arm + - ghcr.io/netbirdio/netbird-server:{{ .Version }}-amd64 + brews: - ids: - default diff --git a/combined/Dockerfile b/combined/Dockerfile new file mode 100644 index 000000000..357e10cf8 --- /dev/null +++ b/combined/Dockerfile @@ -0,0 +1,5 @@ +FROM ubuntu:24.04 +RUN apt update && apt install -y ca-certificates && rm -fr /var/cache/apt +ENTRYPOINT [ "/go/bin/netbird-server" ] +CMD ["--config", "/etc/netbird/config.yaml"] +COPY netbird-server /go/bin/netbird-server \ No newline at end of file diff --git a/combined/cmd/config.go b/combined/cmd/config.go new file mode 100644 index 000000000..72c63b7c7 --- /dev/null +++ b/combined/cmd/config.go @@ -0,0 +1,715 @@ +package cmd + +import ( + "context" + "fmt" + "net" + "net/netip" + "os" + "path" + "strings" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/util/crypt" + + nbconfig "github.com/netbirdio/netbird/management/internals/server/config" +) + +// CombinedConfig is the root configuration for the combined server. +// The combined server is primarily a Management server with optional embedded +// Signal, Relay, and STUN services. +// +// Architecture: +// - Management: Always runs locally (this IS the management server) +// - Signal: Runs locally by default; disabled if server.signalUri is set +// - Relay: Runs locally by default; disabled if server.relays is set +// - STUN: Runs locally on port 3478 by default; disabled if server.stuns is set +// +// All user-facing settings are under "server". The relay/signal/management +// fields are internal and populated automatically from server settings. +type CombinedConfig struct { + Server ServerConfig `yaml:"server"` + + // Internal configs - populated from Server settings, not user-configurable + Relay RelayConfig `yaml:"-"` + Signal SignalConfig `yaml:"-"` + Management ManagementConfig `yaml:"-"` +} + +// ServerConfig contains server-wide settings +// In simplified mode, this contains all configuration +type ServerConfig struct { + ListenAddress string `yaml:"listenAddress"` + MetricsPort int `yaml:"metricsPort"` + HealthcheckAddress string `yaml:"healthcheckAddress"` + LogLevel string `yaml:"logLevel"` + LogFile string `yaml:"logFile"` + TLS TLSConfig `yaml:"tls"` + + // Simplified config fields (used when relay/signal/management sections are omitted) + ExposedAddress string `yaml:"exposedAddress"` // Public address with protocol (e.g., "https://example.com:443") + StunPorts []int `yaml:"stunPorts"` // STUN ports (empty to disable local STUN) + AuthSecret string `yaml:"authSecret"` // Shared secret for relay authentication + DataDir string `yaml:"dataDir"` // Data directory for all services + + // External service overrides (simplified mode) + // When these are set, the corresponding local service is NOT started + // and these values are used for client configuration instead + Stuns []HostConfig `yaml:"stuns"` // External STUN servers (disables local STUN) + Relays RelaysConfig `yaml:"relays"` // External relay servers (disables local relay) + SignalURI string `yaml:"signalUri"` // External signal server (disables local signal) + + // Management settings (simplified mode) + DisableAnonymousMetrics bool `yaml:"disableAnonymousMetrics"` + DisableGeoliteUpdate bool `yaml:"disableGeoliteUpdate"` + Auth AuthConfig `yaml:"auth"` + Store StoreConfig `yaml:"store"` + ReverseProxy ReverseProxyConfig `yaml:"reverseProxy"` +} + +// TLSConfig contains TLS/HTTPS settings +type TLSConfig struct { + CertFile string `yaml:"certFile"` + KeyFile string `yaml:"keyFile"` + LetsEncrypt LetsEncryptConfig `yaml:"letsencrypt"` +} + +// LetsEncryptConfig contains Let's Encrypt settings +type LetsEncryptConfig struct { + Enabled bool `yaml:"enabled"` + DataDir string `yaml:"dataDir"` + Domains []string `yaml:"domains"` + Email string `yaml:"email"` + AWSRoute53 bool `yaml:"awsRoute53"` +} + +// RelayConfig contains relay service settings +type RelayConfig struct { + Enabled bool `yaml:"enabled"` + ExposedAddress string `yaml:"exposedAddress"` + AuthSecret string `yaml:"authSecret"` + LogLevel string `yaml:"logLevel"` + Stun StunConfig `yaml:"stun"` +} + +// StunConfig contains embedded STUN service settings +type StunConfig struct { + Enabled bool `yaml:"enabled"` + Ports []int `yaml:"ports"` + LogLevel string `yaml:"logLevel"` +} + +// SignalConfig contains signal service settings +type SignalConfig struct { + Enabled bool `yaml:"enabled"` + LogLevel string `yaml:"logLevel"` +} + +// ManagementConfig contains management service settings +type ManagementConfig struct { + Enabled bool `yaml:"enabled"` + LogLevel string `yaml:"logLevel"` + DataDir string `yaml:"dataDir"` + DnsDomain string `yaml:"dnsDomain"` + DisableAnonymousMetrics bool `yaml:"disableAnonymousMetrics"` + DisableGeoliteUpdate bool `yaml:"disableGeoliteUpdate"` + DisableDefaultPolicy bool `yaml:"disableDefaultPolicy"` + Auth AuthConfig `yaml:"auth"` + Stuns []HostConfig `yaml:"stuns"` + Relays RelaysConfig `yaml:"relays"` + SignalURI string `yaml:"signalUri"` + Store StoreConfig `yaml:"store"` + ReverseProxy ReverseProxyConfig `yaml:"reverseProxy"` +} + +// AuthConfig contains authentication/identity provider settings +type AuthConfig struct { + Issuer string `yaml:"issuer"` + LocalAuthDisabled bool `yaml:"localAuthDisabled"` + SignKeyRefreshEnabled bool `yaml:"signKeyRefreshEnabled"` + Storage AuthStorageConfig `yaml:"storage"` + DashboardRedirectURIs []string `yaml:"dashboardRedirectURIs"` + CLIRedirectURIs []string `yaml:"cliRedirectURIs"` + Owner *AuthOwnerConfig `yaml:"owner,omitempty"` +} + +// AuthStorageConfig contains auth storage settings +type AuthStorageConfig struct { + Type string `yaml:"type"` + File string `yaml:"file"` +} + +// AuthOwnerConfig contains initial admin user settings +type AuthOwnerConfig struct { + Email string `yaml:"email"` + Password string `yaml:"password"` +} + +// HostConfig represents a STUN/TURN/Signal host +type HostConfig struct { + URI string `yaml:"uri"` + Proto string `yaml:"proto,omitempty"` // udp, dtls, tcp, http, https - defaults based on URI scheme + Username string `yaml:"username,omitempty"` + Password string `yaml:"password,omitempty"` +} + +// RelaysConfig contains external relay server settings for clients +type RelaysConfig struct { + Addresses []string `yaml:"addresses"` + CredentialsTTL string `yaml:"credentialsTTL"` + Secret string `yaml:"secret"` +} + +// StoreConfig contains database settings +type StoreConfig struct { + Engine string `yaml:"engine"` + EncryptionKey string `yaml:"encryptionKey"` + DSN string `yaml:"dsn"` // Connection string for postgres or mysql engines +} + +// ReverseProxyConfig contains reverse proxy settings +type ReverseProxyConfig struct { + TrustedHTTPProxies []string `yaml:"trustedHTTPProxies"` + TrustedHTTPProxiesCount uint `yaml:"trustedHTTPProxiesCount"` + TrustedPeers []string `yaml:"trustedPeers"` +} + +// DefaultConfig returns a CombinedConfig with default values +func DefaultConfig() *CombinedConfig { + return &CombinedConfig{ + Server: ServerConfig{ + ListenAddress: ":443", + MetricsPort: 9090, + HealthcheckAddress: ":9000", + LogLevel: "info", + LogFile: "console", + StunPorts: []int{3478}, + DataDir: "/var/lib/netbird/", + Auth: AuthConfig{ + Storage: AuthStorageConfig{ + Type: "sqlite3", + }, + }, + Store: StoreConfig{ + Engine: "sqlite", + }, + }, + Relay: RelayConfig{ + // LogLevel inherited from Server.LogLevel via ApplySimplifiedDefaults + Stun: StunConfig{ + Enabled: false, + Ports: []int{3478}, + // LogLevel inherited from Server.LogLevel via ApplySimplifiedDefaults + }, + }, + Signal: SignalConfig{ + // LogLevel inherited from Server.LogLevel via ApplySimplifiedDefaults + }, + Management: ManagementConfig{ + DataDir: "/var/lib/netbird/", + Auth: AuthConfig{ + Storage: AuthStorageConfig{ + Type: "sqlite3", + }, + }, + Relays: RelaysConfig{ + CredentialsTTL: "12h", + }, + Store: StoreConfig{ + Engine: "sqlite", + }, + }, + } +} + +// hasRequiredSettings returns true if the configuration has the required server settings +func (c *CombinedConfig) hasRequiredSettings() bool { + return c.Server.ExposedAddress != "" +} + +// parseExposedAddress extracts protocol, host, and host:port from the exposed address +// Input format: "https://example.com:443" or "http://example.com:8080" or "example.com:443" +// Returns: protocol ("https" or "http"), hostname only, and host:port +func parseExposedAddress(exposedAddress string) (protocol, hostname, hostPort string) { + // Default to https if no protocol specified + protocol = "https" + hostPort = exposedAddress + + // Check for protocol prefix + if strings.HasPrefix(exposedAddress, "https://") { + protocol = "https" + hostPort = strings.TrimPrefix(exposedAddress, "https://") + } else if strings.HasPrefix(exposedAddress, "http://") { + protocol = "http" + hostPort = strings.TrimPrefix(exposedAddress, "http://") + } + + // Extract hostname (without port) + hostname = hostPort + if host, _, err := net.SplitHostPort(hostPort); err == nil { + hostname = host + } + + return protocol, hostname, hostPort +} + +// ApplySimplifiedDefaults populates internal relay/signal/management configs from server settings. +// Management is always enabled. Signal, Relay, and STUN are enabled unless external +// overrides are configured (server.signalUri, server.relays, server.stuns). +func (c *CombinedConfig) ApplySimplifiedDefaults() { + if !c.hasRequiredSettings() { + return + } + + // Parse exposed address to extract protocol and hostname + exposedProto, exposedHost, exposedHostPort := parseExposedAddress(c.Server.ExposedAddress) + + // Check for external service overrides + hasExternalRelay := len(c.Server.Relays.Addresses) > 0 + hasExternalSignal := c.Server.SignalURI != "" + hasExternalStuns := len(c.Server.Stuns) > 0 + + // Default stunPorts to [3478] if not specified and no external STUN + if len(c.Server.StunPorts) == 0 && !hasExternalStuns { + c.Server.StunPorts = []int{3478} + } + + c.applyRelayDefaults(exposedProto, exposedHostPort, hasExternalRelay, hasExternalStuns) + c.applySignalDefaults(hasExternalSignal) + c.applyManagementDefaults(exposedHost) + + // Auto-configure client settings (stuns, relays, signalUri) + c.autoConfigureClientSettings(exposedProto, exposedHost, exposedHostPort, hasExternalStuns, hasExternalRelay, hasExternalSignal) +} + +// applyRelayDefaults configures the relay service if no external relay is configured. +func (c *CombinedConfig) applyRelayDefaults(exposedProto, exposedHostPort string, hasExternalRelay, hasExternalStuns bool) { + if hasExternalRelay { + return + } + + c.Relay.Enabled = true + relayProto := "rel" + if exposedProto == "https" { + relayProto = "rels" + } + c.Relay.ExposedAddress = fmt.Sprintf("%s://%s", relayProto, exposedHostPort) + c.Relay.AuthSecret = c.Server.AuthSecret + if c.Relay.LogLevel == "" { + c.Relay.LogLevel = c.Server.LogLevel + } + + // Enable local STUN only if no external STUN servers and stunPorts are configured + if !hasExternalStuns && len(c.Server.StunPorts) > 0 { + c.Relay.Stun.Enabled = true + c.Relay.Stun.Ports = c.Server.StunPorts + if c.Relay.Stun.LogLevel == "" { + c.Relay.Stun.LogLevel = c.Server.LogLevel + } + } +} + +// applySignalDefaults configures the signal service if no external signal is configured. +func (c *CombinedConfig) applySignalDefaults(hasExternalSignal bool) { + if hasExternalSignal { + return + } + + c.Signal.Enabled = true + if c.Signal.LogLevel == "" { + c.Signal.LogLevel = c.Server.LogLevel + } +} + +// applyManagementDefaults configures the management service (always enabled). +func (c *CombinedConfig) applyManagementDefaults(exposedHost string) { + c.Management.Enabled = true + if c.Management.LogLevel == "" { + c.Management.LogLevel = c.Server.LogLevel + } + if c.Management.DataDir == "" || c.Management.DataDir == "/var/lib/netbird/" { + c.Management.DataDir = c.Server.DataDir + } + c.Management.DnsDomain = exposedHost + c.Management.DisableAnonymousMetrics = c.Server.DisableAnonymousMetrics + c.Management.DisableGeoliteUpdate = c.Server.DisableGeoliteUpdate + // Copy auth config from server if management auth issuer is not set + if c.Management.Auth.Issuer == "" && c.Server.Auth.Issuer != "" { + c.Management.Auth = c.Server.Auth + } + + // Copy store config from server if not set + if c.Management.Store.Engine == "" || c.Management.Store.Engine == "sqlite" { + if c.Server.Store.Engine != "" { + c.Management.Store = c.Server.Store + } + } + + // Copy reverse proxy config from server + if len(c.Server.ReverseProxy.TrustedHTTPProxies) > 0 || c.Server.ReverseProxy.TrustedHTTPProxiesCount > 0 || len(c.Server.ReverseProxy.TrustedPeers) > 0 { + c.Management.ReverseProxy = c.Server.ReverseProxy + } +} + +// autoConfigureClientSettings sets up STUN/relay/signal URIs for clients +// External overrides from server config take precedence over auto-generated values +func (c *CombinedConfig) autoConfigureClientSettings(exposedProto, exposedHost, exposedHostPort string, hasExternalStuns, hasExternalRelay, hasExternalSignal bool) { + // Determine relay protocol from exposed protocol + relayProto := "rel" + if exposedProto == "https" { + relayProto = "rels" + } + + // Configure STUN servers for clients + if hasExternalStuns { + // Use external STUN servers from server config + c.Management.Stuns = c.Server.Stuns + } else if len(c.Server.StunPorts) > 0 && len(c.Management.Stuns) == 0 { + // Auto-configure local STUN servers for all ports + for _, port := range c.Server.StunPorts { + c.Management.Stuns = append(c.Management.Stuns, HostConfig{ + URI: fmt.Sprintf("stun:%s:%d", exposedHost, port), + }) + } + } + + // Configure relay for clients + if hasExternalRelay { + // Use external relay config from server + c.Management.Relays = c.Server.Relays + } else if len(c.Management.Relays.Addresses) == 0 { + // Auto-configure local relay + c.Management.Relays.Addresses = []string{ + fmt.Sprintf("%s://%s", relayProto, exposedHostPort), + } + } + if c.Management.Relays.Secret == "" { + c.Management.Relays.Secret = c.Server.AuthSecret + } + if c.Management.Relays.CredentialsTTL == "" { + c.Management.Relays.CredentialsTTL = "12h" + } + + // Configure signal for clients + if hasExternalSignal { + // Use external signal URI from server config + c.Management.SignalURI = c.Server.SignalURI + } else if c.Management.SignalURI == "" { + // Auto-configure local signal + c.Management.SignalURI = fmt.Sprintf("%s://%s", exposedProto, exposedHostPort) + } +} + +// LoadConfig loads configuration from a YAML file +func LoadConfig(configPath string) (*CombinedConfig, error) { + cfg := DefaultConfig() + + if configPath == "" { + return cfg, nil + } + + data, err := os.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("failed to read config file: %w", err) + } + + if err := yaml.Unmarshal(data, cfg); err != nil { + return nil, fmt.Errorf("failed to parse config file: %w", err) + } + + // Populate internal configs from server settings + cfg.ApplySimplifiedDefaults() + + return cfg, nil +} + +// Validate validates the configuration +func (c *CombinedConfig) Validate() error { + if c.Server.ExposedAddress == "" { + return fmt.Errorf("server.exposedAddress is required") + } + if c.Server.DataDir == "" { + return fmt.Errorf("server.dataDir is required") + } + + // Validate STUN ports + seen := make(map[int]bool) + for _, port := range c.Server.StunPorts { + if port <= 0 || port > 65535 { + return fmt.Errorf("invalid server.stunPorts value %d: must be between 1 and 65535", port) + } + if seen[port] { + return fmt.Errorf("duplicate STUN port %d in server.stunPorts", port) + } + seen[port] = true + } + + // authSecret is required only if running local relay (no external relay configured) + hasExternalRelay := len(c.Server.Relays.Addresses) > 0 + if !hasExternalRelay && c.Server.AuthSecret == "" { + return fmt.Errorf("server.authSecret is required when running local relay") + } + + return nil +} + +// HasTLSCert returns true if TLS certificate files are configured +func (c *CombinedConfig) HasTLSCert() bool { + return c.Server.TLS.CertFile != "" && c.Server.TLS.KeyFile != "" +} + +// HasLetsEncrypt returns true if Let's Encrypt is configured +func (c *CombinedConfig) HasLetsEncrypt() bool { + return c.Server.TLS.LetsEncrypt.Enabled && + c.Server.TLS.LetsEncrypt.DataDir != "" && + len(c.Server.TLS.LetsEncrypt.Domains) > 0 +} + +// parseExplicitProtocol parses an explicit protocol string to nbconfig.Protocol +func parseExplicitProtocol(proto string) (nbconfig.Protocol, bool) { + switch strings.ToLower(proto) { + case "udp": + return nbconfig.UDP, true + case "dtls": + return nbconfig.DTLS, true + case "tcp": + return nbconfig.TCP, true + case "http": + return nbconfig.HTTP, true + case "https": + return nbconfig.HTTPS, true + default: + return "", false + } +} + +// parseStunProtocol determines protocol for STUN/TURN servers. +// stun: → UDP, stuns: → DTLS, turn: → UDP, turns: → DTLS +// Explicit proto overrides URI scheme. Defaults to UDP. +func parseStunProtocol(uri, proto string) nbconfig.Protocol { + if proto != "" { + if p, ok := parseExplicitProtocol(proto); ok { + return p + } + } + + uri = strings.ToLower(uri) + switch { + case strings.HasPrefix(uri, "stuns:"): + return nbconfig.DTLS + case strings.HasPrefix(uri, "turns:"): + return nbconfig.DTLS + default: + // stun:, turn:, or no scheme - default to UDP + return nbconfig.UDP + } +} + +// parseSignalProtocol determines protocol for Signal servers. +// https:// → HTTPS, http:// → HTTP. Defaults to HTTPS. +func parseSignalProtocol(uri string) nbconfig.Protocol { + uri = strings.ToLower(uri) + switch { + case strings.HasPrefix(uri, "http://"): + return nbconfig.HTTP + default: + // https:// or no scheme - default to HTTPS + return nbconfig.HTTPS + } +} + +// stripSignalProtocol removes the protocol prefix from a signal URI. +// Returns just the host:port (e.g., "selfhosted2.demo.netbird.io:443"). +func stripSignalProtocol(uri string) string { + uri = strings.TrimPrefix(uri, "https://") + uri = strings.TrimPrefix(uri, "http://") + return uri +} + +// ToManagementConfig converts CombinedConfig to management server config +func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) { + mgmt := c.Management + + // Build STUN hosts + var stuns []*nbconfig.Host + for _, s := range mgmt.Stuns { + stuns = append(stuns, &nbconfig.Host{ + URI: s.URI, + Proto: parseStunProtocol(s.URI, s.Proto), + Username: s.Username, + Password: s.Password, + }) + } + + // Build relay config + var relayConfig *nbconfig.Relay + if len(mgmt.Relays.Addresses) > 0 || mgmt.Relays.Secret != "" { + var ttl time.Duration + if mgmt.Relays.CredentialsTTL != "" { + var err error + ttl, err = time.ParseDuration(mgmt.Relays.CredentialsTTL) + if err != nil { + return nil, fmt.Errorf("invalid relay credentials TTL %q: %w", mgmt.Relays.CredentialsTTL, err) + } + } + relayConfig = &nbconfig.Relay{ + Addresses: mgmt.Relays.Addresses, + CredentialsTTL: util.Duration{Duration: ttl}, + Secret: mgmt.Relays.Secret, + } + } + + // Build signal config + var signalConfig *nbconfig.Host + if mgmt.SignalURI != "" { + signalConfig = &nbconfig.Host{ + URI: stripSignalProtocol(mgmt.SignalURI), + Proto: parseSignalProtocol(mgmt.SignalURI), + } + } + + // Build store config + storeConfig := nbconfig.StoreConfig{ + Engine: types.Engine(mgmt.Store.Engine), + } + + // Build reverse proxy config + reverseProxy := nbconfig.ReverseProxy{ + TrustedHTTPProxiesCount: mgmt.ReverseProxy.TrustedHTTPProxiesCount, + } + for _, p := range mgmt.ReverseProxy.TrustedHTTPProxies { + if prefix, err := netip.ParsePrefix(p); err == nil { + reverseProxy.TrustedHTTPProxies = append(reverseProxy.TrustedHTTPProxies, prefix) + } + } + for _, p := range mgmt.ReverseProxy.TrustedPeers { + if prefix, err := netip.ParsePrefix(p); err == nil { + reverseProxy.TrustedPeers = append(reverseProxy.TrustedPeers, prefix) + } + } + + // Build HTTP config (required, even if empty) + httpConfig := &nbconfig.HttpServerConfig{} + + // Build embedded IDP config (always enabled in combined server) + storageFile := mgmt.Auth.Storage.File + if storageFile == "" { + storageFile = path.Join(mgmt.DataDir, "idp.db") + } + + embeddedIdP := &idp.EmbeddedIdPConfig{ + Enabled: true, + Issuer: mgmt.Auth.Issuer, + LocalAuthDisabled: mgmt.Auth.LocalAuthDisabled, + SignKeyRefreshEnabled: mgmt.Auth.SignKeyRefreshEnabled, + Storage: idp.EmbeddedStorageConfig{ + Type: mgmt.Auth.Storage.Type, + Config: idp.EmbeddedStorageTypeConfig{ + File: storageFile, + }, + }, + DashboardRedirectURIs: mgmt.Auth.DashboardRedirectURIs, + CLIRedirectURIs: mgmt.Auth.CLIRedirectURIs, + } + + if mgmt.Auth.Owner != nil && mgmt.Auth.Owner.Email != "" { + embeddedIdP.Owner = &idp.OwnerConfig{ + Email: mgmt.Auth.Owner.Email, + Hash: mgmt.Auth.Owner.Password, // Will be hashed if plain text + } + } + + // Set HTTP config fields for embedded IDP + httpConfig.AuthIssuer = mgmt.Auth.Issuer + httpConfig.IdpSignKeyRefreshEnabled = mgmt.Auth.SignKeyRefreshEnabled + + return &nbconfig.Config{ + Stuns: stuns, + Relay: relayConfig, + Signal: signalConfig, + Datadir: mgmt.DataDir, + DataStoreEncryptionKey: mgmt.Store.EncryptionKey, + HttpConfig: httpConfig, + StoreConfig: storeConfig, + ReverseProxy: reverseProxy, + DisableDefaultPolicy: mgmt.DisableDefaultPolicy, + EmbeddedIdP: embeddedIdP, + }, nil +} + +// ApplyEmbeddedIdPConfig applies embedded IdP configuration to the management config. +// This mirrors the logic in management/cmd/management.go ApplyEmbeddedIdPConfig. +func ApplyEmbeddedIdPConfig(ctx context.Context, cfg *nbconfig.Config, mgmtPort int, disableSingleAccMode bool) error { + if cfg.EmbeddedIdP == nil || !cfg.EmbeddedIdP.Enabled { + return nil + } + + // Embedded IdP requires single account mode + if disableSingleAccMode { + return fmt.Errorf("embedded IdP requires single account mode; multiple account mode is not supported with embedded IdP") + } + + // Set LocalAddress for embedded IdP, used for internal JWT validation + cfg.EmbeddedIdP.LocalAddress = fmt.Sprintf("localhost:%d", mgmtPort) + + // Set storage defaults based on Datadir + if cfg.EmbeddedIdP.Storage.Type == "" { + cfg.EmbeddedIdP.Storage.Type = "sqlite3" + } + if cfg.EmbeddedIdP.Storage.Config.File == "" && cfg.Datadir != "" { + cfg.EmbeddedIdP.Storage.Config.File = path.Join(cfg.Datadir, "idp.db") + } + + issuer := cfg.EmbeddedIdP.Issuer + + // Ensure HttpConfig exists + if cfg.HttpConfig == nil { + cfg.HttpConfig = &nbconfig.HttpServerConfig{} + } + + // Set HttpConfig values from EmbeddedIdP + cfg.HttpConfig.AuthIssuer = issuer + cfg.HttpConfig.AuthAudience = "netbird-dashboard" + cfg.HttpConfig.CLIAuthAudience = "netbird-cli" + cfg.HttpConfig.AuthUserIDClaim = "sub" + cfg.HttpConfig.AuthKeysLocation = issuer + "/keys" + cfg.HttpConfig.OIDCConfigEndpoint = issuer + "/.well-known/openid-configuration" + cfg.HttpConfig.IdpSignKeyRefreshEnabled = true + + return nil +} + +// EnsureEncryptionKey generates an encryption key if not set. +// Unlike management server, we don't write back to the config file. +func EnsureEncryptionKey(ctx context.Context, cfg *nbconfig.Config) error { + if cfg.DataStoreEncryptionKey != "" { + return nil + } + + log.WithContext(ctx).Infof("DataStoreEncryptionKey is not set, generating a new key") + key, err := crypt.GenerateKey() + if err != nil { + return fmt.Errorf("failed to generate datastore encryption key: %v", err) + } + cfg.DataStoreEncryptionKey = key + keyPreview := key[:8] + "..." + log.WithContext(ctx).Warnf("DataStoreEncryptionKey generated (%s); add it to your config file under 'server.store.encryptionKey' to persist across restarts", keyPreview) + + return nil +} + +// LogConfigInfo logs informational messages about the loaded configuration +func LogConfigInfo(cfg *nbconfig.Config) { + if cfg.EmbeddedIdP != nil && cfg.EmbeddedIdP.Enabled { + log.Infof("running with the embedded IdP: %v", cfg.EmbeddedIdP.Issuer) + } + if cfg.Relay != nil { + log.Infof("Relay addresses: %v", cfg.Relay.Addresses) + } +} diff --git a/combined/cmd/pprof.go b/combined/cmd/pprof.go new file mode 100644 index 000000000..37efd35f0 --- /dev/null +++ b/combined/cmd/pprof.go @@ -0,0 +1,33 @@ +//go:build pprof +// +build pprof + +package cmd + +import ( + "net/http" + _ "net/http/pprof" + "os" + + log "github.com/sirupsen/logrus" +) + +func init() { + addr := pprofAddr() + go pprof(addr) +} + +func pprofAddr() string { + listenAddr := os.Getenv("NB_PPROF_ADDR") + if listenAddr == "" { + return "localhost:6969" + } + + return listenAddr +} + +func pprof(listenAddr string) { + log.Infof("listening pprof on: %s\n", listenAddr) + if err := http.ListenAndServe(listenAddr, nil); err != nil { + log.Fatalf("Failed to start pprof: %v", err) + } +} diff --git a/combined/cmd/root.go b/combined/cmd/root.go new file mode 100644 index 000000000..8837fea44 --- /dev/null +++ b/combined/cmd/root.go @@ -0,0 +1,711 @@ +package cmd + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "os" + "os/signal" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/coder/websocket" + "github.com/hashicorp/go-multierror" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.opentelemetry.io/otel/metric" + "google.golang.org/grpc" + + "github.com/netbirdio/netbird/encryption" + mgmtServer "github.com/netbirdio/netbird/management/internals/server" + nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + "github.com/netbirdio/netbird/management/server/telemetry" + "github.com/netbirdio/netbird/relay/healthcheck" + relayServer "github.com/netbirdio/netbird/relay/server" + "github.com/netbirdio/netbird/relay/server/listener/ws" + sharedMetrics "github.com/netbirdio/netbird/shared/metrics" + "github.com/netbirdio/netbird/shared/relay/auth" + "github.com/netbirdio/netbird/shared/signal/proto" + signalServer "github.com/netbirdio/netbird/signal/server" + "github.com/netbirdio/netbird/stun" + "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/util/wsproxy" + wsproxyserver "github.com/netbirdio/netbird/util/wsproxy/server" +) + +var ( + configPath string + config *CombinedConfig + + rootCmd = &cobra.Command{ + Use: "combined", + Short: "Combined Netbird server (Management + Signal + Relay + STUN)", + Long: `Combined Netbird server for self-hosted deployments. + +All services (Management, Signal, Relay) are multiplexed on a single port. +Optional STUN server runs on separate UDP ports. + +Configuration is loaded from a YAML file specified with --config.`, + SilenceUsage: true, + SilenceErrors: true, + RunE: execute, + } +) + +func init() { + rootCmd.PersistentFlags().StringVarP(&configPath, "config", "c", "", "path to YAML configuration file (required)") + _ = rootCmd.MarkPersistentFlagRequired("config") +} + +func Execute() error { + return rootCmd.Execute() +} + +func waitForExitSignal() { + osSigs := make(chan os.Signal, 1) + signal.Notify(osSigs, syscall.SIGINT, syscall.SIGTERM) + <-osSigs +} + +func execute(cmd *cobra.Command, _ []string) error { + if err := initializeConfig(); err != nil { + return err + } + + // Management is required as the base server when signal or relay are enabled + if (config.Signal.Enabled || config.Relay.Enabled) && !config.Management.Enabled { + return fmt.Errorf("management must be enabled when signal or relay are enabled (provides the base HTTP server)") + } + + servers, err := createAllServers(cmd.Context(), config) + if err != nil { + return err + } + + // Register services with management's gRPC server using AfterInit hook + setupServerHooks(servers, config) + + // Start management server (this also starts the HTTP listener) + if servers.mgmtSrv != nil { + if err := servers.mgmtSrv.Start(cmd.Context()); err != nil { + cleanupSTUNListeners(servers.stunListeners) + return fmt.Errorf("failed to start management server: %w", err) + } + } + + // Start all other servers + wg := sync.WaitGroup{} + startServers(&wg, servers.relaySrv, servers.healthcheck, servers.stunServer, servers.metricsServer) + + waitForExitSignal() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + err = shutdownServers(ctx, servers.relaySrv, servers.healthcheck, servers.stunServer, servers.mgmtSrv, servers.metricsServer) + wg.Wait() + return err +} + +// initializeConfig loads and validates the configuration, then initializes logging. +func initializeConfig() error { + var err error + config, err = LoadConfig(configPath) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if err := config.Validate(); err != nil { + return fmt.Errorf("invalid config: %w", err) + } + + if err := util.InitLog(config.Server.LogLevel, config.Server.LogFile); err != nil { + return fmt.Errorf("failed to initialize log: %w", err) + } + + if dsn := config.Server.Store.DSN; dsn != "" { + switch strings.ToLower(config.Server.Store.Engine) { + case "postgres": + os.Setenv("NB_STORE_ENGINE_POSTGRES_DSN", dsn) + case "mysql": + os.Setenv("NB_STORE_ENGINE_MYSQL_DSN", dsn) + } + } + + log.Infof("Starting combined NetBird server") + logConfig(config) + logEnvVars() + return nil +} + +// serverInstances holds all server instances created during startup. +type serverInstances struct { + relaySrv *relayServer.Server + mgmtSrv *mgmtServer.BaseServer + signalSrv *signalServer.Server + healthcheck *healthcheck.Server + stunServer *stun.Server + stunListeners []*net.UDPConn + metricsServer *sharedMetrics.Metrics +} + +// createAllServers creates all server instances based on configuration. +func createAllServers(ctx context.Context, cfg *CombinedConfig) (*serverInstances, error) { + metricsServer, err := sharedMetrics.NewServer(cfg.Server.MetricsPort, "") + if err != nil { + return nil, fmt.Errorf("failed to create metrics server: %w", err) + } + servers := &serverInstances{ + metricsServer: metricsServer, + } + + _, tlsSupport, err := handleTLSConfig(cfg) + if err != nil { + return nil, fmt.Errorf("failed to setup TLS config: %w", err) + } + + if err := servers.createRelayServer(cfg, tlsSupport); err != nil { + return nil, err + } + + if err := servers.createManagementServer(ctx, cfg); err != nil { + return nil, err + } + + if err := servers.createSignalServer(ctx, cfg); err != nil { + return nil, err + } + + if err := servers.createHealthcheckServer(cfg); err != nil { + return nil, err + } + + return servers, nil +} + +func (s *serverInstances) createRelayServer(cfg *CombinedConfig, tlsSupport bool) error { + if !cfg.Relay.Enabled { + return nil + } + + var err error + s.stunListeners, err = createSTUNListeners(cfg) + if err != nil { + return err + } + + hashedSecret := sha256.Sum256([]byte(cfg.Relay.AuthSecret)) + authenticator := auth.NewTimedHMACValidator(hashedSecret[:], 24*time.Hour) + + relayCfg := relayServer.Config{ + Meter: s.metricsServer.Meter, + ExposedAddress: cfg.Relay.ExposedAddress, + AuthValidator: authenticator, + TLSSupport: tlsSupport, + } + + s.relaySrv, err = createRelayServer(relayCfg, s.stunListeners) + if err != nil { + return err + } + + log.Infof("Relay server created") + + if len(s.stunListeners) > 0 { + s.stunServer = stun.NewServer(s.stunListeners, cfg.Relay.Stun.LogLevel) + } + + return nil +} + +func (s *serverInstances) createManagementServer(ctx context.Context, cfg *CombinedConfig) error { + if !cfg.Management.Enabled { + return nil + } + + mgmtConfig, err := cfg.ToManagementConfig() + if err != nil { + return fmt.Errorf("failed to create management config: %w", err) + } + + _, portStr, portErr := net.SplitHostPort(cfg.Server.ListenAddress) + if portErr != nil { + portStr = "443" + } + mgmtPort, _ := strconv.Atoi(portStr) + + if err := ApplyEmbeddedIdPConfig(ctx, mgmtConfig, mgmtPort, false); err != nil { + cleanupSTUNListeners(s.stunListeners) + return fmt.Errorf("failed to apply embedded IdP config: %w", err) + } + + if err := EnsureEncryptionKey(ctx, mgmtConfig); err != nil { + cleanupSTUNListeners(s.stunListeners) + return fmt.Errorf("failed to ensure encryption key: %w", err) + } + + LogConfigInfo(mgmtConfig) + + s.mgmtSrv, err = createManagementServer(cfg, mgmtConfig) + if err != nil { + cleanupSTUNListeners(s.stunListeners) + return fmt.Errorf("failed to create management server: %w", err) + } + + // Inject externally-managed AppMetrics so management uses the shared metrics server + appMetrics, err := telemetry.NewAppMetricsWithMeter(ctx, s.metricsServer.Meter) + if err != nil { + cleanupSTUNListeners(s.stunListeners) + return fmt.Errorf("failed to create management app metrics: %w", err) + } + mgmtServer.Inject[telemetry.AppMetrics](s.mgmtSrv, appMetrics) + + log.Infof("Management server created") + return nil +} + +func (s *serverInstances) createSignalServer(ctx context.Context, cfg *CombinedConfig) error { + if !cfg.Signal.Enabled { + return nil + } + + var err error + s.signalSrv, err = signalServer.NewServer(ctx, s.metricsServer.Meter, "signal_") + if err != nil { + cleanupSTUNListeners(s.stunListeners) + return fmt.Errorf("failed to create signal server: %w", err) + } + + log.Infof("Signal server created") + return nil +} + +func (s *serverInstances) createHealthcheckServer(cfg *CombinedConfig) error { + hCfg := healthcheck.Config{ + ListenAddress: cfg.Server.HealthcheckAddress, + ServiceChecker: s.relaySrv, + } + + var err error + s.healthcheck, err = createHealthCheck(hCfg, s.stunListeners) + return err +} + +// setupServerHooks registers services with management's gRPC server. +func setupServerHooks(servers *serverInstances, cfg *CombinedConfig) { + if servers.mgmtSrv == nil { + return + } + + servers.mgmtSrv.AfterInit(func(s *mgmtServer.BaseServer) { + grpcSrv := s.GRPCServer() + + if servers.signalSrv != nil { + proto.RegisterSignalExchangeServer(grpcSrv, servers.signalSrv) + log.Infof("Signal server registered on port %s", cfg.Server.ListenAddress) + } + + s.SetHandlerFunc(createCombinedHandler(grpcSrv, s.APIHandler(), servers.relaySrv, servers.metricsServer.Meter, cfg)) + if servers.relaySrv != nil { + log.Infof("Relay WebSocket handler added (path: /relay)") + } + }) +} + +func startServers(wg *sync.WaitGroup, srv *relayServer.Server, httpHealthcheck *healthcheck.Server, stunServer *stun.Server, metricsServer *sharedMetrics.Metrics) { + if srv != nil { + instanceURL := srv.InstanceURL() + log.Infof("Relay server instance URL: %s", instanceURL.String()) + log.Infof("Relay WebSocket multiplexed on management port (no separate relay listener)") + } + + wg.Add(1) + go func() { + defer wg.Done() + log.Infof("running metrics server: %s%s", metricsServer.Addr, metricsServer.Endpoint) + if err := metricsServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + log.Fatalf("failed to start metrics server: %v", err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + if err := httpHealthcheck.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + log.Fatalf("failed to start healthcheck server: %v", err) + } + }() + + if stunServer != nil { + wg.Add(1) + go func() { + defer wg.Done() + if err := stunServer.Listen(); err != nil { + if errors.Is(err, stun.ErrServerClosed) { + return + } + log.Errorf("STUN server error: %v", err) + } + }() + } +} + +func shutdownServers(ctx context.Context, srv *relayServer.Server, httpHealthcheck *healthcheck.Server, stunServer *stun.Server, mgmtSrv *mgmtServer.BaseServer, metricsServer *sharedMetrics.Metrics) error { + var errs error + + if err := httpHealthcheck.Shutdown(ctx); err != nil { + errs = multierror.Append(errs, fmt.Errorf("failed to close healthcheck server: %w", err)) + } + + if stunServer != nil { + if err := stunServer.Shutdown(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("failed to close STUN server: %w", err)) + } + } + + if srv != nil { + if err := srv.Shutdown(ctx); err != nil { + errs = multierror.Append(errs, fmt.Errorf("failed to close relay server: %w", err)) + } + } + + if mgmtSrv != nil { + log.Infof("shutting down management and signal servers") + if err := mgmtSrv.Stop(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("failed to close management server: %w", err)) + } + } + + if metricsServer != nil { + log.Infof("shutting down metrics server") + if err := metricsServer.Shutdown(ctx); err != nil { + errs = multierror.Append(errs, fmt.Errorf("failed to close metrics server: %w", err)) + } + } + + return errs +} + +func createHealthCheck(hCfg healthcheck.Config, stunListeners []*net.UDPConn) (*healthcheck.Server, error) { + httpHealthcheck, err := healthcheck.NewServer(hCfg) + if err != nil { + cleanupSTUNListeners(stunListeners) + return nil, fmt.Errorf("failed to create healthcheck server: %w", err) + } + return httpHealthcheck, nil +} + +func createRelayServer(cfg relayServer.Config, stunListeners []*net.UDPConn) (*relayServer.Server, error) { + srv, err := relayServer.NewServer(cfg) + if err != nil { + cleanupSTUNListeners(stunListeners) + return nil, fmt.Errorf("failed to create relay server: %w", err) + } + return srv, nil +} + +func cleanupSTUNListeners(stunListeners []*net.UDPConn) { + for _, l := range stunListeners { + _ = l.Close() + } +} + +func createSTUNListeners(cfg *CombinedConfig) ([]*net.UDPConn, error) { + var stunListeners []*net.UDPConn + if cfg.Relay.Stun.Enabled { + for _, port := range cfg.Relay.Stun.Ports { + listener, err := net.ListenUDP("udp", &net.UDPAddr{Port: port}) + if err != nil { + cleanupSTUNListeners(stunListeners) + return nil, fmt.Errorf("failed to create STUN listener on port %d: %w", port, err) + } + stunListeners = append(stunListeners, listener) + log.Infof("STUN server listening on UDP port %d", port) + } + } + return stunListeners, nil +} + +func handleTLSConfig(cfg *CombinedConfig) (*tls.Config, bool, error) { + tlsCfg := cfg.Server.TLS + + if tlsCfg.LetsEncrypt.AWSRoute53 { + log.Debugf("using Let's Encrypt DNS resolver with Route 53 support") + r53 := encryption.Route53TLS{ + DataDir: tlsCfg.LetsEncrypt.DataDir, + Email: tlsCfg.LetsEncrypt.Email, + Domains: tlsCfg.LetsEncrypt.Domains, + } + tc, err := r53.GetCertificate() + if err != nil { + return nil, false, err + } + return tc, true, nil + } + + if cfg.HasLetsEncrypt() { + log.Infof("setting up TLS with Let's Encrypt") + certManager, err := encryption.CreateCertManager(tlsCfg.LetsEncrypt.DataDir, tlsCfg.LetsEncrypt.Domains...) + if err != nil { + return nil, false, fmt.Errorf("failed creating LetsEncrypt cert manager: %w", err) + } + return certManager.TLSConfig(), true, nil + } + + if cfg.HasTLSCert() { + log.Debugf("using file based TLS config") + tc, err := encryption.LoadTLSConfig(tlsCfg.CertFile, tlsCfg.KeyFile) + if err != nil { + return nil, false, err + } + return tc, true, nil + } + + return nil, false, nil +} + +func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (*mgmtServer.BaseServer, error) { + mgmt := cfg.Management + + dnsDomain := mgmt.DnsDomain + singleAccModeDomain := dnsDomain + + // Extract port from listen address + _, portStr, err := net.SplitHostPort(cfg.Server.ListenAddress) + if err != nil { + // If no port specified, assume default + portStr = "443" + } + mgmtPort, _ := strconv.Atoi(portStr) + + mgmtSrv := mgmtServer.NewServer( + mgmtConfig, + dnsDomain, + singleAccModeDomain, + mgmtPort, + cfg.Server.MetricsPort, + mgmt.DisableAnonymousMetrics, + mgmt.DisableGeoliteUpdate, + // Always enable user deletion from IDP in combined server (embedded IdP is always enabled) + true, + ) + + return mgmtSrv, nil +} + +// createCombinedHandler creates an HTTP handler that multiplexes Management, Signal (via wsproxy), and Relay WebSocket traffic +func createCombinedHandler(grpcServer *grpc.Server, httpHandler http.Handler, relaySrv *relayServer.Server, meter metric.Meter, cfg *CombinedConfig) http.Handler { + wsProxy := wsproxyserver.New(grpcServer, wsproxyserver.WithOTelMeter(meter)) + + var relayAcceptFn func(conn net.Conn) + if relaySrv != nil { + relayAcceptFn = relaySrv.RelayAccept() + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + // Native gRPC traffic (HTTP/2 with gRPC content-type) + case r.ProtoMajor == 2 && (strings.HasPrefix(r.Header.Get("Content-Type"), "application/grpc") || + strings.HasPrefix(r.Header.Get("Content-Type"), "application/grpc+proto")): + grpcServer.ServeHTTP(w, r) + + // WebSocket proxy for Management gRPC + case r.URL.Path == wsproxy.ProxyPath+wsproxy.ManagementComponent: + wsProxy.Handler().ServeHTTP(w, r) + + // WebSocket proxy for Signal gRPC + case r.URL.Path == wsproxy.ProxyPath+wsproxy.SignalComponent: + if cfg.Signal.Enabled { + wsProxy.Handler().ServeHTTP(w, r) + } else { + http.Error(w, "Signal service not enabled", http.StatusNotFound) + } + + // Relay WebSocket + case r.URL.Path == "/relay": + if relayAcceptFn != nil { + handleRelayWebSocket(w, r, relayAcceptFn, cfg) + } else { + http.Error(w, "Relay service not enabled", http.StatusNotFound) + } + + // Management HTTP API (default) + default: + httpHandler.ServeHTTP(w, r) + } + }) +} + +// handleRelayWebSocket handles incoming WebSocket connections for the relay service +func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(conn net.Conn), cfg *CombinedConfig) { + acceptOptions := &websocket.AcceptOptions{ + OriginPatterns: []string{"*"}, + } + + wsConn, err := websocket.Accept(w, r, acceptOptions) + if err != nil { + log.Errorf("failed to accept relay ws connection: %s", err) + return + } + + connRemoteAddr := r.RemoteAddr + if r.Header.Get("X-Real-Ip") != "" && r.Header.Get("X-Real-Port") != "" { + connRemoteAddr = net.JoinHostPort(r.Header.Get("X-Real-Ip"), r.Header.Get("X-Real-Port")) + } + + rAddr, err := net.ResolveTCPAddr("tcp", connRemoteAddr) + if err != nil { + _ = wsConn.Close(websocket.StatusInternalError, "internal error") + return + } + + lAddr, err := net.ResolveTCPAddr("tcp", cfg.Server.ListenAddress) + if err != nil { + _ = wsConn.Close(websocket.StatusInternalError, "internal error") + return + } + + log.Debugf("Relay WS client connected from: %s", rAddr) + + conn := ws.NewConn(wsConn, lAddr, rAddr) + acceptFn(conn) +} + +// logConfig prints all configuration parameters for debugging +func logConfig(cfg *CombinedConfig) { + log.Info("=== Configuration ===") + logServerConfig(cfg) + logComponentsConfig(cfg) + logRelayConfig(cfg) + logManagementConfig(cfg) + log.Info("=== End Configuration ===") +} + +func logServerConfig(cfg *CombinedConfig) { + log.Info("--- Server ---") + log.Infof(" Listen address: %s", cfg.Server.ListenAddress) + log.Infof(" Exposed address: %s", cfg.Server.ExposedAddress) + log.Infof(" Healthcheck address: %s", cfg.Server.HealthcheckAddress) + log.Infof(" Metrics port: %d", cfg.Server.MetricsPort) + log.Infof(" Log level: %s", cfg.Server.LogLevel) + log.Infof(" Data dir: %s", cfg.Server.DataDir) + + switch { + case cfg.HasTLSCert(): + log.Infof(" TLS: cert=%s, key=%s", cfg.Server.TLS.CertFile, cfg.Server.TLS.KeyFile) + case cfg.HasLetsEncrypt(): + log.Infof(" TLS: Let's Encrypt (domains=%v)", cfg.Server.TLS.LetsEncrypt.Domains) + default: + log.Info(" TLS: disabled (using reverse proxy)") + } +} + +func logComponentsConfig(cfg *CombinedConfig) { + log.Info("--- Components ---") + log.Infof(" Management: %v (log level: %s)", cfg.Management.Enabled, cfg.Management.LogLevel) + log.Infof(" Signal: %v (log level: %s)", cfg.Signal.Enabled, cfg.Signal.LogLevel) + log.Infof(" Relay: %v (log level: %s)", cfg.Relay.Enabled, cfg.Relay.LogLevel) +} + +func logRelayConfig(cfg *CombinedConfig) { + if !cfg.Relay.Enabled { + return + } + log.Info("--- Relay ---") + log.Infof(" Exposed address: %s", cfg.Relay.ExposedAddress) + log.Infof(" Auth secret: %s...", maskSecret(cfg.Relay.AuthSecret)) + if cfg.Relay.Stun.Enabled { + log.Infof(" STUN ports: %v (log level: %s)", cfg.Relay.Stun.Ports, cfg.Relay.Stun.LogLevel) + } else { + log.Info(" STUN: disabled") + } +} + +func logManagementConfig(cfg *CombinedConfig) { + if !cfg.Management.Enabled { + return + } + log.Info("--- Management ---") + log.Infof(" Data dir: %s", cfg.Management.DataDir) + log.Infof(" DNS domain: %s", cfg.Management.DnsDomain) + log.Infof(" Store engine: %s", cfg.Management.Store.Engine) + if cfg.Server.Store.DSN != "" { + log.Infof(" Store DSN: %s", maskDSNPassword(cfg.Server.Store.DSN)) + } + + log.Info(" Auth (embedded IdP):") + log.Infof(" Issuer: %s", cfg.Management.Auth.Issuer) + log.Infof(" Dashboard redirect URIs: %v", cfg.Management.Auth.DashboardRedirectURIs) + log.Infof(" CLI redirect URIs: %v", cfg.Management.Auth.CLIRedirectURIs) + + log.Info(" Client settings:") + log.Infof(" Signal URI: %s", cfg.Management.SignalURI) + for _, s := range cfg.Management.Stuns { + log.Infof(" STUN: %s", s.URI) + } + if len(cfg.Management.Relays.Addresses) > 0 { + log.Infof(" Relay addresses: %v", cfg.Management.Relays.Addresses) + log.Infof(" Relay credentials TTL: %s", cfg.Management.Relays.CredentialsTTL) + } +} + +// logEnvVars logs all NB_ environment variables that are currently set +func logEnvVars() { + log.Info("=== Environment Variables ===") + found := false + for _, env := range os.Environ() { + if strings.HasPrefix(env, "NB_") { + key, _, _ := strings.Cut(env, "=") + value := os.Getenv(key) + if strings.Contains(strings.ToLower(key), "secret") || strings.Contains(strings.ToLower(key), "key") || strings.Contains(strings.ToLower(key), "password") { + value = maskSecret(value) + } + log.Infof(" %s=%s", key, value) + found = true + } + } + if !found { + log.Info(" (none set)") + } + log.Info("=== End Environment Variables ===") +} + +// maskDSNPassword masks the password in a DSN string. +// Handles both key=value format ("password=secret") and URI format ("user:secret@host"). +func maskDSNPassword(dsn string) string { + // Key=value format: "host=localhost user=nb password=secret dbname=nb" + if strings.Contains(dsn, "password=") { + parts := strings.Fields(dsn) + for i, p := range parts { + if strings.HasPrefix(p, "password=") { + parts[i] = "password=****" + } + } + return strings.Join(parts, " ") + } + + // URI format: "user:password@host..." + if atIdx := strings.Index(dsn, "@"); atIdx != -1 { + prefix := dsn[:atIdx] + if colonIdx := strings.Index(prefix, ":"); colonIdx != -1 { + return prefix[:colonIdx+1] + "****" + dsn[atIdx:] + } + } + + return dsn +} + +// maskSecret returns first 4 chars of secret followed by "..." +func maskSecret(secret string) string { + if len(secret) <= 4 { + return "****" + } + return secret[:4] + "..." +} diff --git a/combined/config-simple.yaml.example b/combined/config-simple.yaml.example new file mode 100644 index 000000000..4a90adda8 --- /dev/null +++ b/combined/config-simple.yaml.example @@ -0,0 +1,111 @@ +# NetBird Combined Server Configuration +# Copy this file to config.yaml and customize for your deployment +# +# This is a Management server with optional embedded Signal, Relay, and STUN services. +# By default, all services run locally. You can use external services instead by +# setting the corresponding override fields. +# +# Architecture: +# - Management: Always runs locally (this IS the management server) +# - Signal: Local by default; set 'signalUri' to use external (disables local) +# - Relay: Local by default; set 'relays' to use external (disables local) +# - STUN: Local on port 3478 by default; set 'stuns' to use external instead + +server: + # Main HTTP/gRPC port for all services (Management, Signal, Relay) + listenAddress: ":443" + + # Public address that peers will use to connect to this server + # Used for relay connections and management DNS domain + # Format: protocol://hostname:port (e.g., https://server.mycompany.com:443) + exposedAddress: "https://server.mycompany.com:443" + + # STUN server ports (defaults to [3478] if not specified; set 'stuns' to use external) + # stunPorts: + # - 3478 + + # Metrics endpoint port + metricsPort: 9090 + + # Healthcheck endpoint address + healthcheckAddress: ":9000" + + # Logging configuration + logLevel: "info" # Default log level for all components: panic, fatal, error, warn, info, debug, trace + logFile: "console" # "console" or path to log file + + # TLS configuration (optional) + tls: + certFile: "" + keyFile: "" + letsencrypt: + enabled: false + dataDir: "" + domains: [] + email: "" + awsRoute53: false + + # Shared secret for relay authentication (required when running local relay) + authSecret: "your-secret-key-here" + + # Data directory for all services + dataDir: "/var/lib/netbird/" + + # ============================================================================ + # External Service Overrides (optional) + # Use these to point to external Signal, Relay, or STUN servers instead of + # running them locally. When set, the corresponding local service is disabled. + # ============================================================================ + + # External STUN servers - disables local STUN server + # stuns: + # - uri: "stun:stun.example.com:3478" + # - uri: "stun:stun.example.com:3479" + + # External relay servers - disables local relay server + # relays: + # addresses: + # - "rels://relay.example.com:443" + # credentialsTTL: "12h" + # secret: "relay-shared-secret" + + # External signal server - disables local signal server + # signalUri: "https://signal.example.com:443" + + # ============================================================================ + # Management Settings + # ============================================================================ + + # Metrics and updates + disableAnonymousMetrics: false + disableGeoliteUpdate: false + + # Embedded authentication/identity provider (Dex) configuration (always enabled) + auth: + # OIDC issuer URL - must be publicly accessible + issuer: "https://server.mycompany.com/oauth2" + localAuthDisabled: false + signKeyRefreshEnabled: false + # OAuth2 redirect URIs for dashboard + dashboardRedirectURIs: + - "https://app.netbird.io/nb-auth" + - "https://app.netbird.io/nb-silent-auth" + # OAuth2 redirect URIs for CLI + cliRedirectURIs: + - "http://localhost:53000/" + # Optional initial admin user + # owner: + # email: "admin@example.com" + # password: "initial-password" + + # Store configuration + store: + engine: "sqlite" # sqlite, postgres, or mysql + dsn: "" # Connection string for postgres or mysql + encryptionKey: "" + + # Reverse proxy settings (optional) + # reverseProxy: + # trustedHTTPProxies: [] + # trustedHTTPProxiesCount: 0 + # trustedPeers: [] \ No newline at end of file diff --git a/combined/config.yaml.example b/combined/config.yaml.example new file mode 100644 index 000000000..6cb10e04d --- /dev/null +++ b/combined/config.yaml.example @@ -0,0 +1,115 @@ +# Simplified Combined NetBird Server Configuration +# Copy this file to config.yaml and customize for your deployment + +# Server-wide settings +server: + # Main HTTP/gRPC port for all services (Management, Signal, Relay) + listenAddress: ":443" + + # Metrics endpoint port + metricsPort: 9090 + + # Healthcheck endpoint address + healthcheckAddress: ":9000" + + # Logging configuration + logLevel: "info" # panic, fatal, error, warn, info, debug, trace + logFile: "console" # "console" or path to log file + + # TLS configuration (optional) + tls: + certFile: "" + keyFile: "" + letsencrypt: + enabled: false + dataDir: "" + domains: [] + email: "" + awsRoute53: false + +# Relay service configuration +relay: + # Enable/disable the relay service + enabled: true + + # Public address that peers will use to connect to this relay + # Format: hostname:port or ip:port + exposedAddress: "relay.example.com:443" + + # Shared secret for relay authentication (required when enabled) + authSecret: "your-secret-key-here" + + # Log level for relay (reserved for future use, currently uses global log level) + logLevel: "info" + + # Embedded STUN server (optional) + stun: + enabled: false + ports: [3478] + logLevel: "info" + +# Signal service configuration +signal: + # Enable/disable the signal service + enabled: true + + # Log level for signal (reserved for future use, currently uses global log level) + logLevel: "info" + +# Management service configuration +management: + # Enable/disable the management service + enabled: true + + # Data directory for management service + dataDir: "/var/lib/netbird/" + + # DNS domain for the management server + dnsDomain: "" + + # Metrics and updates + disableAnonymousMetrics: false + disableGeoliteUpdate: false + + auth: + # OIDC issuer URL - must be publicly accessible + issuer: "https://management.example.com/oauth2" + localAuthDisabled: false + signKeyRefreshEnabled: false + # OAuth2 redirect URIs for dashboard + dashboardRedirectURIs: + - "https://app.example.com/nb-auth" + - "https://app.example.com/nb-silent-auth" + # OAuth2 redirect URIs for CLI + cliRedirectURIs: + - "http://localhost:53000/" + # Optional initial admin user + # owner: + # email: "admin@example.com" + # password: "initial-password" + + # External STUN servers (for client config) + stuns: [] + # - uri: "stun:stun.example.com:3478" + + # External relay servers (for client config) + relays: + addresses: [] + # - "rels://relay.example.com:443" + credentialsTTL: "12h" + secret: "" + + # External signal server URI (for client config) + signalUri: "" + + # Store configuration + store: + engine: "sqlite" # sqlite, postgres, or mysql + dsn: "" # Connection string for postgres or mysql + encryptionKey: "" + + # Reverse proxy settings + reverseProxy: + trustedHTTPProxies: [] + trustedHTTPProxiesCount: 0 + trustedPeers: [] diff --git a/combined/main.go b/combined/main.go new file mode 100644 index 000000000..6740ac93e --- /dev/null +++ b/combined/main.go @@ -0,0 +1,13 @@ +package main + +import ( + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/combined/cmd" +) + +func main() { + if err := cmd.Execute(); err != nil { + log.Fatalf("failed to execute command: %v", err) + } +} diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 25599997c..fd50c4871 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -85,8 +85,8 @@ read_nb_domain() { read_reverse_proxy_type() { echo "" > /dev/stderr echo "Which reverse proxy will you use?" > /dev/stderr - echo " [0] Built-in Caddy (recommended - automatic TLS)" > /dev/stderr - echo " [1] Traefik (labels added to containers)" > /dev/stderr + echo " [0] Traefik (recommended - automatic TLS, included in Docker Compose)" > /dev/stderr + echo " [1] Existing Traefik (labels for external Traefik instance)" > /dev/stderr echo " [2] Nginx (generates config template)" > /dev/stderr echo " [3] Nginx Proxy Manager (generates config + instructions)" > /dev/stderr echo " [4] External Caddy (generates Caddyfile snippet)" > /dev/stderr @@ -182,20 +182,21 @@ get_upstream_host() { return 0 } -wait_management() { +wait_management_proxy() { + local proxy_container="${1:-traefik}" set +e - echo -n "Waiting for Management server to become ready" + echo -n "Waiting for NetBird server to become ready" counter=1 while true; do - # Check the embedded IdP endpoint + # Check the embedded IdP endpoint through the reverse proxy if curl -sk -f -o /dev/null "$NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/oauth2/.well-known/openid-configuration" 2>/dev/null; then break fi if [[ $counter -eq 60 ]]; then echo "" echo "Taking too long. Checking logs..." - $DOCKER_COMPOSE_COMMAND logs --tail=20 caddy - $DOCKER_COMPOSE_COMMAND logs --tail=20 management + $DOCKER_COMPOSE_COMMAND logs --tail=20 "$proxy_container" + $DOCKER_COMPOSE_COMMAND logs --tail=20 netbird-server fi echo -n " ." sleep 2 @@ -209,7 +210,7 @@ wait_management() { wait_management_direct() { set +e local upstream_host=$(get_upstream_host) - echo -n "Waiting for Management server to become ready" + echo -n "Waiting for NetBird server to become ready" counter=1 while true; do # Check the embedded IdP endpoint directly (no reverse proxy) @@ -219,7 +220,7 @@ wait_management_direct() { if [[ $counter -eq 60 ]]; then echo "" echo "Taking too long. Checking logs..." - $DOCKER_COMPOSE_COMMAND logs --tail=20 management + $DOCKER_COMPOSE_COMMAND logs --tail=20 netbird-server fi echo -n " ." sleep 2 @@ -235,7 +236,6 @@ wait_management_direct() { ############################################ initialize_default_values() { - CADDY_SECURE_DOMAIN="" NETBIRD_PORT=80 NETBIRD_HTTP_PROTOCOL="http" NETBIRD_RELAY_PROTO="rel" @@ -245,11 +245,9 @@ initialize_default_values() { NETBIRD_STUN_PORT=3478 # Docker images - CADDY_IMAGE="caddy" DASHBOARD_IMAGE="netbirdio/dashboard:latest" - SIGNAL_IMAGE="netbirdio/signal:latest" - RELAY_IMAGE="netbirdio/relay:latest" - MANAGEMENT_IMAGE="netbirdio/management:latest" + # Combined server replaces separate signal, relay, and management containers + NETBIRD_SERVER_IMAGE="netbirdio/netbird-server:latest" # Reverse proxy configuration REVERSE_PROXY_TYPE="0" @@ -257,10 +255,7 @@ initialize_default_values() { TRAEFIK_ENTRYPOINT="websecure" TRAEFIK_CERTRESOLVER="" DASHBOARD_HOST_PORT="8080" - MANAGEMENT_HOST_PORT="8081" - SIGNAL_HOST_PORT="8083" - SIGNAL_GRPC_PORT="10000" - RELAY_HOST_PORT="8084" + MANAGEMENT_HOST_PORT="8081" # Combined server port (management + signal + relay) BIND_LOCALHOST_ONLY="true" EXTERNAL_PROXY_NETWORK="" return 0 @@ -275,7 +270,6 @@ configure_domain() { NETBIRD_DOMAIN=$(get_main_ip_address) else NETBIRD_PORT=443 - CADDY_SECURE_DOMAIN=", $NETBIRD_DOMAIN:$NETBIRD_PORT" NETBIRD_HTTP_PROTOCOL="https" NETBIRD_RELAY_PROTO="rels" fi @@ -286,7 +280,7 @@ configure_reverse_proxy() { # Prompt for reverse proxy type REVERSE_PROXY_TYPE=$(read_reverse_proxy_type) - # Handle Traefik-specific prompts + # Handle Traefik-specific prompts (only for external Traefik) if [[ "$REVERSE_PROXY_TYPE" == "1" ]]; then TRAEFIK_EXTERNAL_NETWORK=$(read_traefik_network) TRAEFIK_ENTRYPOINT=$(read_traefik_entrypoint) @@ -309,11 +303,11 @@ configure_reverse_proxy() { } check_existing_installation() { - if [[ -f management.json ]]; then + if [[ -f config.yaml ]]; then echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." echo "You can use the following commands:" echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" - echo " rm -f docker-compose.yml Caddyfile dashboard.env management.json relay.env nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" + echo " rm -f docker-compose.yml dashboard.env config.yaml nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." exit 1 fi @@ -326,8 +320,7 @@ generate_configuration_files() { # Render docker-compose and proxy config based on selection case "$REVERSE_PROXY_TYPE" in 0) - render_docker_compose > docker-compose.yml - render_caddyfile > Caddyfile + render_docker_compose_traefik_builtin > docker-compose.yml ;; 1) render_docker_compose_traefik > docker-compose.yml @@ -355,27 +348,26 @@ generate_configuration_files() { # Common files for all configurations render_dashboard_env > dashboard.env - render_management_json > management.json - render_relay_env > relay.env + render_combined_yaml > config.yaml return 0 } start_services_and_show_instructions() { - # For built-in Caddy and Traefik, start containers immediately + # For built-in Traefik, start containers immediately # For NPM, start containers first (NPM needs services running to create proxy) # For other external proxies, show instructions first and wait for user confirmation if [[ "$REVERSE_PROXY_TYPE" == "0" ]]; then - # Built-in Caddy - handles everything automatically + # Built-in Traefik - handles everything automatically (TLS via Let's Encrypt) echo -e "$MSG_STARTING_SERVICES" $DOCKER_COMPOSE_COMMAND up -d sleep 3 - wait_management + wait_management_proxy traefik echo -e "$MSG_DONE" print_post_setup_instructions elif [[ "$REVERSE_PROXY_TYPE" == "1" ]]; then - # Traefik - start containers first, then show instructions + # External Traefik - start containers, then show instructions # Traefik discovers services via Docker labels, so containers must be running echo -e "$MSG_STARTING_SERVICES" $DOCKER_COMPOSE_COMMAND up -d @@ -441,73 +433,136 @@ init_environment() { # Configuration File Renderers ############################################ -render_caddyfile() { +render_docker_compose_traefik_builtin() { cat < ${upstream_host}:${RELAY_HOST_PORT}" - echo " (HTTP with WebSocket upgrade)" + echo " WebSocket (relay, signal, management WS proxy):" + echo " /relay*, /ws-proxy/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (HTTP with WebSocket upgrade, extended timeout)" echo "" - echo " /ws-proxy/signal* -> ${upstream_host}:${SIGNAL_HOST_PORT}" - echo " (HTTP with WebSocket upgrade)" - echo "" - echo " /signalexchange.SignalExchange/* -> ${upstream_host}:${SIGNAL_GRPC_PORT}" + echo " Native gRPC (signal + management):" + echo " /signalexchange.SignalExchange/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " /management.ManagementService/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" echo " (gRPC/h2c - plaintext HTTP/2)" echo "" - echo " /api/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" - echo " (HTTP)" + echo " HTTP (API + embedded IdP):" + echo " /api/*, /oauth2/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" echo "" - echo " /ws-proxy/management* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" - echo " (HTTP with WebSocket upgrade)" - echo "" - echo " /management.ManagementService/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" - echo " (gRPC/h2c - plaintext HTTP/2)" - echo "" - echo " /oauth2/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" - echo " (HTTP - embedded IdP)" - echo "" - echo " /* -> ${upstream_host}:${DASHBOARD_HOST_PORT}" - echo " (HTTP - catch-all for dashboard)" + echo " Dashboard (catch-all):" + echo " /* -> ${upstream_host}:${DASHBOARD_HOST_PORT}" echo "" echo "IMPORTANT: gRPC routes require HTTP/2 (h2c) upstream support." - echo "Long-running connections need extended timeouts (recommend 1 day)." + echo "WebSocket and gRPC connections need extended timeouts (recommend 1 day)." return 0 } print_post_setup_instructions() { case "$REVERSE_PROXY_TYPE" in 0) - print_caddy_instructions + print_builtin_traefik_instructions ;; 1) print_traefik_instructions diff --git a/management/cmd/management.go b/management/cmd/management.go index 511168823..b064524d8 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -55,7 +55,7 @@ var ( // detect whether user specified a port userPort := cmd.Flag("port").Changed - config, err = loadMgmtConfig(ctx, nbconfig.MgmtConfigPath) + config, err = LoadMgmtConfig(ctx, nbconfig.MgmtConfigPath) if err != nil { return fmt.Errorf("failed reading provided config file: %s: %v", nbconfig.MgmtConfigPath, err) } @@ -133,35 +133,35 @@ var ( } ) -func loadMgmtConfig(ctx context.Context, mgmtConfigPath string) (*nbconfig.Config, error) { +func LoadMgmtConfig(ctx context.Context, mgmtConfigPath string) (*nbconfig.Config, error) { loadedConfig := &nbconfig.Config{} if _, err := util.ReadJsonWithEnvSub(mgmtConfigPath, loadedConfig); err != nil { return nil, err } - applyCommandLineOverrides(loadedConfig) + ApplyCommandLineOverrides(loadedConfig) // Apply EmbeddedIdP config to HttpConfig if embedded IdP is enabled - err := applyEmbeddedIdPConfig(ctx, loadedConfig) + err := ApplyEmbeddedIdPConfig(ctx, loadedConfig) if err != nil { return nil, err } - if err := applyOIDCConfig(ctx, loadedConfig); err != nil { + if err := ApplyOIDCConfig(ctx, loadedConfig); err != nil { return nil, err } - logConfigInfo(loadedConfig) + LogConfigInfo(loadedConfig) - if err := ensureEncryptionKey(ctx, mgmtConfigPath, loadedConfig); err != nil { + if err := EnsureEncryptionKey(ctx, mgmtConfigPath, loadedConfig); err != nil { return nil, err } return loadedConfig, nil } -// applyCommandLineOverrides applies command-line flag overrides to the config -func applyCommandLineOverrides(cfg *nbconfig.Config) { +// ApplyCommandLineOverrides applies command-line flag overrides to the config +func ApplyCommandLineOverrides(cfg *nbconfig.Config) { if mgmtLetsencryptDomain != "" { cfg.HttpConfig.LetsEncryptDomain = mgmtLetsencryptDomain } @@ -174,9 +174,9 @@ func applyCommandLineOverrides(cfg *nbconfig.Config) { } } -// applyEmbeddedIdPConfig populates HttpConfig and EmbeddedIdP storage from config when embedded IdP is enabled. +// ApplyEmbeddedIdPConfig populates HttpConfig and EmbeddedIdP storage from config when embedded IdP is enabled. // This allows users to only specify EmbeddedIdP config without duplicating values in HttpConfig. -func applyEmbeddedIdPConfig(ctx context.Context, cfg *nbconfig.Config) error { +func ApplyEmbeddedIdPConfig(ctx context.Context, cfg *nbconfig.Config) error { if cfg.EmbeddedIdP == nil || !cfg.EmbeddedIdP.Enabled { return nil } @@ -222,8 +222,8 @@ func applyEmbeddedIdPConfig(ctx context.Context, cfg *nbconfig.Config) error { return nil } -// applyOIDCConfig fetches and applies OIDC configuration if endpoint is specified -func applyOIDCConfig(ctx context.Context, cfg *nbconfig.Config) error { +// ApplyOIDCConfig fetches and applies OIDC configuration if endpoint is specified +func ApplyOIDCConfig(ctx context.Context, cfg *nbconfig.Config) error { oidcEndpoint := cfg.HttpConfig.OIDCConfigEndpoint if oidcEndpoint == "" { return nil @@ -249,16 +249,16 @@ func applyOIDCConfig(ctx context.Context, cfg *nbconfig.Config) error { oidcConfig.JwksURI, cfg.HttpConfig.AuthKeysLocation) cfg.HttpConfig.AuthKeysLocation = oidcConfig.JwksURI - if err := applyDeviceAuthFlowConfig(ctx, cfg, &oidcConfig, oidcEndpoint); err != nil { + if err := ApplyDeviceAuthFlowConfig(ctx, cfg, &oidcConfig, oidcEndpoint); err != nil { return err } - applyPKCEFlowConfig(ctx, cfg, &oidcConfig) + ApplyPKCEFlowConfig(ctx, cfg, &oidcConfig) return nil } -// applyDeviceAuthFlowConfig applies OIDC config to DeviceAuthorizationFlow if enabled -func applyDeviceAuthFlowConfig(ctx context.Context, cfg *nbconfig.Config, oidcConfig *OIDCConfigResponse, oidcEndpoint string) error { +// ApplyDeviceAuthFlowConfig applies OIDC config to DeviceAuthorizationFlow if enabled +func ApplyDeviceAuthFlowConfig(ctx context.Context, cfg *nbconfig.Config, oidcConfig *OIDCConfigResponse, oidcEndpoint string) error { if cfg.DeviceAuthorizationFlow == nil || strings.ToLower(cfg.DeviceAuthorizationFlow.Provider) == string(nbconfig.NONE) { return nil } @@ -285,8 +285,8 @@ func applyDeviceAuthFlowConfig(ctx context.Context, cfg *nbconfig.Config, oidcCo return nil } -// applyPKCEFlowConfig applies OIDC config to PKCEAuthorizationFlow if configured -func applyPKCEFlowConfig(ctx context.Context, cfg *nbconfig.Config, oidcConfig *OIDCConfigResponse) { +// ApplyPKCEFlowConfig applies OIDC config to PKCEAuthorizationFlow if configured +func ApplyPKCEFlowConfig(ctx context.Context, cfg *nbconfig.Config, oidcConfig *OIDCConfigResponse) { if cfg.PKCEAuthorizationFlow == nil { return } @@ -299,8 +299,8 @@ func applyPKCEFlowConfig(ctx context.Context, cfg *nbconfig.Config, oidcConfig * cfg.PKCEAuthorizationFlow.ProviderConfig.AuthorizationEndpoint = oidcConfig.AuthorizationEndpoint } -// logConfigInfo logs informational messages about the loaded configuration -func logConfigInfo(cfg *nbconfig.Config) { +// LogConfigInfo logs informational messages about the loaded configuration +func LogConfigInfo(cfg *nbconfig.Config) { if cfg.EmbeddedIdP != nil { log.Infof("running with the embedded IdP: %v", cfg.EmbeddedIdP.Issuer) } @@ -309,8 +309,8 @@ func logConfigInfo(cfg *nbconfig.Config) { } } -// ensureEncryptionKey generates and saves a DataStoreEncryptionKey if not set -func ensureEncryptionKey(ctx context.Context, configPath string, cfg *nbconfig.Config) error { +// EnsureEncryptionKey generates and saves a DataStoreEncryptionKey if not set +func EnsureEncryptionKey(ctx context.Context, configPath string, cfg *nbconfig.Config) error { if cfg.DataStoreEncryptionKey != "" { return nil } diff --git a/management/cmd/management_test.go b/management/cmd/management_test.go index 244d86254..f0c89dd3f 100644 --- a/management/cmd/management_test.go +++ b/management/cmd/management_test.go @@ -30,7 +30,7 @@ func Test_loadMgmtConfig(t *testing.T) { t.Fatalf("failed to create config: %s", err) } - cfg, err := loadMgmtConfig(context.Background(), tmpFile) + cfg, err := LoadMgmtConfig(context.Background(), tmpFile) if err != nil { t.Fatalf("failed to load management config: %s", err) } diff --git a/management/internals/server/server.go b/management/internals/server/server.go index cd8d8e8fb..0f985c4ed 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -11,7 +11,6 @@ import ( "time" "github.com/google/uuid" - "github.com/netbirdio/netbird/management/server/idp" log "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/metric" "golang.org/x/crypto/acme/autocert" @@ -19,6 +18,8 @@ import ( "golang.org/x/net/http2/h2c" "google.golang.org/grpc" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/encryption" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/metrics" @@ -138,6 +139,14 @@ func (s *BaseServer) Start(ctx context.Context) error { go metricsWorker.Run(srvCtx) } + // Run afterInit hooks before starting any servers + // This allows registering additional gRPC services (e.g., Signal) before Serve() is called + for _, fn := range s.afterInit { + if fn != nil { + fn(s) + } + } + var compatListener net.Listener if s.mgmtPort != ManagementLegacyPort { // The Management gRPC server was running on port 33073 previously. Old agents that are already connected to it @@ -178,12 +187,6 @@ func (s *BaseServer) Start(ctx context.Context) error { } } - for _, fn := range s.afterInit { - if fn != nil { - fn(s) - } - } - log.WithContext(ctx).Infof("management server version %s", version.NetbirdVersion()) log.WithContext(ctx).Infof("running HTTP server and gRPC server on the same port: %s", s.listener.Addr().String()) s.serveGRPCWithHTTP(ctx, s.listener, rootHandler, tlsEnabled) @@ -255,7 +258,23 @@ func (s *BaseServer) SetContainer(key string, container any) { log.Tracef("container with key %s set successfully", key) } +// SetHandlerFunc allows overriding the default HTTP handler function. +// This is useful for multiplexing additional services on the same port. +func (s *BaseServer) SetHandlerFunc(handler http.Handler) { + s.container["customHandler"] = handler + log.Tracef("custom handler set successfully") +} + func (s *BaseServer) handlerFunc(_ context.Context, gRPCHandler *grpc.Server, httpHandler http.Handler, meter metric.Meter) http.Handler { + // Check if a custom handler was set (for multiplexing additional services) + if customHandler, ok := s.GetContainer("customHandler"); ok { + if handler, ok := customHandler.(http.Handler); ok { + log.Tracef("using custom handler") + return handler + } + } + + // Use default handler wsProxy := wsproxyserver.New(gRPCHandler, wsproxyserver.WithOTelMeter(meter)) return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 7f48f510e..f9ad1987c 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2643,7 +2643,7 @@ func getGormConfig() *gorm.Config { // newPostgresStore initializes a new Postgres store. func newPostgresStore(ctx context.Context, metrics telemetry.AppMetrics, skipMigration bool) (Store, error) { - dsn, ok := os.LookupEnv(postgresDsnEnv) + dsn, ok := lookupDSNEnv(postgresDsnEnv, postgresDsnEnvLegacy) if !ok { return nil, fmt.Errorf("%s is not set", postgresDsnEnv) } @@ -2652,7 +2652,7 @@ func newPostgresStore(ctx context.Context, metrics telemetry.AppMetrics, skipMig // newMysqlStore initializes a new MySQL store. func newMysqlStore(ctx context.Context, metrics telemetry.AppMetrics, skipMigration bool) (Store, error) { - dsn, ok := os.LookupEnv(mysqlDsnEnv) + dsn, ok := lookupDSNEnv(mysqlDsnEnv, mysqlDsnEnvLegacy) if !ok { return nil, fmt.Errorf("%s is not set", mysqlDsnEnv) } diff --git a/management/server/store/store.go b/management/server/store/store.go index be0d29768..3928ce3f0 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -243,10 +243,20 @@ type Store interface { } const ( - postgresDsnEnv = "NETBIRD_STORE_ENGINE_POSTGRES_DSN" - mysqlDsnEnv = "NETBIRD_STORE_ENGINE_MYSQL_DSN" + postgresDsnEnv = "NB_STORE_ENGINE_POSTGRES_DSN" + postgresDsnEnvLegacy = "NETBIRD_STORE_ENGINE_POSTGRES_DSN" + mysqlDsnEnv = "NB_STORE_ENGINE_MYSQL_DSN" + mysqlDsnEnvLegacy = "NETBIRD_STORE_ENGINE_MYSQL_DSN" ) +// lookupDSNEnv checks the NB_ env var first, then falls back to the legacy NETBIRD_ env var. +func lookupDSNEnv(nbKey, legacyKey string) (string, bool) { + if v, ok := os.LookupEnv(nbKey); ok { + return v, true + } + return os.LookupEnv(legacyKey) +} + var supportedEngines = []types.Engine{types.SqliteStoreEngine, types.PostgresStoreEngine, types.MysqlStoreEngine} func getStoreEngineFromEnv() types.Engine { @@ -531,7 +541,7 @@ func getSqlStoreEngine(ctx context.Context, store *SqlStore, kind types.Engine) } func newReusedPostgresStore(ctx context.Context, store *SqlStore, kind types.Engine) (*SqlStore, func(), error) { - dsn, ok := os.LookupEnv(postgresDsnEnv) + dsn, ok := lookupDSNEnv(postgresDsnEnv, postgresDsnEnvLegacy) if !ok || dsn == "" { var err error _, dsn, err = testutil.CreatePostgresTestContainer() @@ -569,7 +579,7 @@ func newReusedPostgresStore(ctx context.Context, store *SqlStore, kind types.Eng } func newReusedMysqlStore(ctx context.Context, store *SqlStore, kind types.Engine) (*SqlStore, func(), error) { - dsn, ok := os.LookupEnv(mysqlDsnEnv) + dsn, ok := lookupDSNEnv(mysqlDsnEnv, mysqlDsnEnvLegacy) if !ok || dsn == "" { var err error _, dsn, err = testutil.CreateMysqlTestContainer() diff --git a/management/server/telemetry/app_metrics.go b/management/server/telemetry/app_metrics.go index 988f91779..1fd78bc3a 100644 --- a/management/server/telemetry/app_metrics.go +++ b/management/server/telemetry/app_metrics.go @@ -122,6 +122,7 @@ type defaultAppMetrics struct { Meter metric2.Meter listener net.Listener ctx context.Context + externallyManaged bool idpMetrics *IDPMetrics httpMiddleware *HTTPMiddleware grpcMetrics *GRPCMetrics @@ -171,6 +172,9 @@ func (appMetrics *defaultAppMetrics) Close() error { // Expose metrics on a given port and endpoint. If endpoint is empty a defaultEndpoint one will be used. // Exposes metrics in the Prometheus format https://prometheus.io/ func (appMetrics *defaultAppMetrics) Expose(ctx context.Context, port int, endpoint string) error { + if appMetrics.externallyManaged { + return nil + } if endpoint == "" { endpoint = defaultEndpoint } @@ -252,3 +256,49 @@ func NewDefaultAppMetrics(ctx context.Context) (AppMetrics, error) { accountManagerMetrics: accountManagerMetrics, }, nil } + +// NewAppMetricsWithMeter creates AppMetrics using an externally provided meter. +// The caller is responsible for exposing metrics via HTTP. Expose() and Close() are no-ops. +func NewAppMetricsWithMeter(ctx context.Context, meter metric2.Meter) (AppMetrics, error) { + idpMetrics, err := NewIDPMetrics(ctx, meter) + if err != nil { + return nil, fmt.Errorf("failed to initialize IDP metrics: %w", err) + } + + middleware, err := NewMetricsMiddleware(ctx, meter) + if err != nil { + return nil, fmt.Errorf("failed to initialize HTTP middleware metrics: %w", err) + } + + grpcMetrics, err := NewGRPCMetrics(ctx, meter) + if err != nil { + return nil, fmt.Errorf("failed to initialize gRPC metrics: %w", err) + } + + storeMetrics, err := NewStoreMetrics(ctx, meter) + if err != nil { + return nil, fmt.Errorf("failed to initialize store metrics: %w", err) + } + + updateChannelMetrics, err := NewUpdateChannelMetrics(ctx, meter) + if err != nil { + return nil, fmt.Errorf("failed to initialize update channel metrics: %w", err) + } + + accountManagerMetrics, err := NewAccountManagerMetrics(ctx, meter) + if err != nil { + return nil, fmt.Errorf("failed to initialize account manager metrics: %w", err) + } + + return &defaultAppMetrics{ + Meter: meter, + ctx: ctx, + externallyManaged: true, + idpMetrics: idpMetrics, + httpMiddleware: middleware, + grpcMetrics: grpcMetrics, + storeMetrics: storeMetrics, + updateChannelMetrics: updateChannelMetrics, + accountManagerMetrics: accountManagerMetrics, + }, nil +} diff --git a/relay/cmd/root.go b/relay/cmd/root.go index 20c565c3d..b1949ca11 100644 --- a/relay/cmd/root.go +++ b/relay/cmd/root.go @@ -21,8 +21,8 @@ import ( "github.com/netbirdio/netbird/encryption" "github.com/netbirdio/netbird/relay/healthcheck" "github.com/netbirdio/netbird/relay/server" + "github.com/netbirdio/netbird/shared/metrics" "github.com/netbirdio/netbird/shared/relay/auth" - "github.com/netbirdio/netbird/signal/metrics" "github.com/netbirdio/netbird/stun" "github.com/netbirdio/netbird/util" ) diff --git a/relay/server/server.go b/relay/server/server.go index 8e4333064..a0f7eb73c 100644 --- a/relay/server/server.go +++ b/relay/server/server.go @@ -3,6 +3,7 @@ package server import ( "context" "crypto/tls" + "net" "net/url" "sync" @@ -134,3 +135,10 @@ func (r *Server) ListenerProtocols() []protocol.Protocol { func (r *Server) InstanceURL() url.URL { return r.relay.InstanceURL() } + +// RelayAccept returns the relay's Accept function for handling incoming connections. +// This allows external HTTP handlers to route connections to the relay without +// starting the relay's own listeners. +func (r *Server) RelayAccept() func(conn net.Conn) { + return r.relay.Accept +} diff --git a/signal/metrics/metrics.go b/shared/metrics/metrics.go similarity index 100% rename from signal/metrics/metrics.go rename to shared/metrics/metrics.go diff --git a/signal/cmd/root.go b/signal/cmd/root.go index 7fa75d923..155790482 100644 --- a/signal/cmd/root.go +++ b/signal/cmd/root.go @@ -40,7 +40,6 @@ func Execute() error { func init() { stopCh = make(chan int) defaultLogFile = "/var/log/netbird/signal.log" - defaultSignalSSLDir = "/var/lib/netbird/" if runtime.GOOS == "windows" { defaultLogFile = os.Getenv("PROGRAMDATA") + "\\Netbird\\" + "signal.log" diff --git a/signal/cmd/run.go b/signal/cmd/run.go index d7662a886..681222403 100644 --- a/signal/cmd/run.go +++ b/signal/cmd/run.go @@ -18,7 +18,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" - "github.com/netbirdio/netbird/signal/metrics" + "github.com/netbirdio/netbird/shared/metrics" "github.com/netbirdio/netbird/encryption" "github.com/netbirdio/netbird/shared/signal/proto" @@ -38,13 +38,13 @@ import ( const legacyGRPCPort = 10000 var ( - signalPort int - metricsPort int - signalLetsencryptDomain string - signalSSLDir string - defaultSignalSSLDir string - signalCertFile string - signalCertKey string + signalPort int + metricsPort int + signalLetsencryptDomain string + signalLetsencryptEmail string + signalLetsencryptDataDir string + signalCertFile string + signalCertKey string signalKaep = grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: 5 * time.Second, @@ -216,7 +216,7 @@ func getTLSConfigurations() ([]grpc.ServerOption, *autocert.Manager, *tls.Config } if signalLetsencryptDomain != "" { - certManager, err = encryption.CreateCertManager(signalSSLDir, signalLetsencryptDomain) + certManager, err = encryption.CreateCertManager(signalLetsencryptDataDir, signalLetsencryptDomain) if err != nil { return nil, certManager, nil, err } @@ -326,9 +326,11 @@ func loadTLSConfig(certFile string, certKey string) (*tls.Config, error) { func init() { runCmd.PersistentFlags().IntVar(&signalPort, "port", 80, "Server port to listen on (defaults to 443 if TLS is enabled, 80 otherwise") runCmd.Flags().IntVar(&metricsPort, "metrics-port", 9090, "metrics endpoint http port. Metrics are accessible under host:metrics-port/metrics") - runCmd.Flags().StringVar(&signalSSLDir, "ssl-dir", defaultSignalSSLDir, "server ssl directory location. *Required only for Let's Encrypt certificates.") - runCmd.Flags().StringVar(&signalLetsencryptDomain, "letsencrypt-domain", "", "a domain to issue Let's Encrypt certificate for. Enables TLS using Let's Encrypt. Will fetch and renew certificate, and run the server with TLS") - runCmd.Flags().StringVar(&signalCertFile, "cert-file", "", "Location of your SSL certificate. Can be used when you have an existing certificate and don't want a new certificate be generated automatically. If letsencrypt-domain is specified this property has no effect") - runCmd.Flags().StringVar(&signalCertKey, "cert-key", "", "Location of your SSL certificate private key. Can be used when you have an existing certificate and don't want a new certificate be generated automatically. If letsencrypt-domain is specified this property has no effect") + runCmd.PersistentFlags().StringVar(&signalLetsencryptDataDir, "letsencrypt-data-dir", "", "a directory to store Let's Encrypt data. Required if Let's Encrypt is enabled.") + runCmd.PersistentFlags().StringVar(&signalLetsencryptDataDir, "ssl-dir", "", "server ssl directory location. *Required only for Let's Encrypt certificates. Deprecated: use --letsencrypt-data-dir") + runCmd.PersistentFlags().StringVar(&signalLetsencryptDomain, "letsencrypt-domain", "", "a domain to issue Let's Encrypt certificate for. Enables TLS using Let's Encrypt. Will fetch and renew certificate, and run the server with TLS") + runCmd.PersistentFlags().StringVar(&signalLetsencryptEmail, "letsencrypt-email", "", "email address to use for Let's Encrypt certificate registration") + runCmd.PersistentFlags().StringVar(&signalCertFile, "cert-file", "", "Location of your SSL certificate. Can be used when you have an existing certificate and don't want a new certificate be generated automatically. If letsencrypt-domain is specified this property has no effect") + runCmd.PersistentFlags().StringVar(&signalCertKey, "cert-key", "", "Location of your SSL certificate private key. Can be used when you have an existing certificate and don't want a new certificate be generated automatically. If letsencrypt-domain is specified this property has no effect") setFlagsFromEnvVars(runCmd) } diff --git a/signal/metrics/app.go b/signal/metrics/app.go index e3b1c67cd..759b51913 100644 --- a/signal/metrics/app.go +++ b/signal/metrics/app.go @@ -24,15 +24,19 @@ type AppMetrics struct { MessageSize metric.Int64Histogram } -func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) { - activePeers, err := meter.Int64UpDownCounter("active_peers", +func NewAppMetrics(meter metric.Meter, prefix ...string) (*AppMetrics, error) { + p := "" + if len(prefix) > 0 { + p = prefix[0] + } + activePeers, err := meter.Int64UpDownCounter(p+"active_peers", metric.WithDescription("Number of active connected peers"), ) if err != nil { return nil, err } - peerConnectionDuration, err := meter.Int64Histogram("peer_connection_duration_seconds", + peerConnectionDuration, err := meter.Int64Histogram(p+"peer_connection_duration_seconds", metric.WithExplicitBucketBoundaries(getPeerConnectionDurationBucketBoundaries()...), metric.WithDescription("Duration of how long a peer was connected"), ) @@ -40,28 +44,28 @@ func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) { return nil, err } - registrations, err := meter.Int64Counter("registrations_total", + registrations, err := meter.Int64Counter(p+"registrations_total", metric.WithDescription("Total number of peer registrations"), ) if err != nil { return nil, err } - deregistrations, err := meter.Int64Counter("deregistrations_total", + deregistrations, err := meter.Int64Counter(p+"deregistrations_total", metric.WithDescription("Total number of peer deregistrations"), ) if err != nil { return nil, err } - registrationFailures, err := meter.Int64Counter("registration_failures_total", + registrationFailures, err := meter.Int64Counter(p+"registration_failures_total", metric.WithDescription("Total number of peer registration failures"), ) if err != nil { return nil, err } - registrationDelay, err := meter.Float64Histogram("registration_delay_milliseconds", + registrationDelay, err := meter.Float64Histogram(p+"registration_delay_milliseconds", metric.WithExplicitBucketBoundaries(getStandardBucketBoundaries()...), metric.WithDescription("Duration of how long it takes to register a peer"), ) @@ -69,7 +73,7 @@ func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) { return nil, err } - getRegistrationDelay, err := meter.Float64Histogram("get_registration_delay_milliseconds", + getRegistrationDelay, err := meter.Float64Histogram(p+"get_registration_delay_milliseconds", metric.WithExplicitBucketBoundaries(getStandardBucketBoundaries()...), metric.WithDescription("Duration of how long it takes to load a connection from the registry"), ) @@ -77,21 +81,21 @@ func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) { return nil, err } - messagesForwarded, err := meter.Int64Counter("messages_forwarded_total", + messagesForwarded, err := meter.Int64Counter(p+"messages_forwarded_total", metric.WithDescription("Total number of messages forwarded to peers"), ) if err != nil { return nil, err } - messageForwardFailures, err := meter.Int64Counter("message_forward_failures_total", + messageForwardFailures, err := meter.Int64Counter(p+"message_forward_failures_total", metric.WithDescription("Total number of message forwarding failures"), ) if err != nil { return nil, err } - messageForwardLatency, err := meter.Float64Histogram("message_forward_latency_milliseconds", + messageForwardLatency, err := meter.Float64Histogram(p+"message_forward_latency_milliseconds", metric.WithExplicitBucketBoundaries(getStandardBucketBoundaries()...), metric.WithDescription("Duration of how long it takes to forward a message to a peer"), ) @@ -100,7 +104,7 @@ func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) { } messageSize, err := meter.Int64Histogram( - "message.size.bytes", + p+"message.size.bytes", metric.WithUnit("bytes"), metric.WithExplicitBucketBoundaries(getMessageSizeBucketBoundaries()...), metric.WithDescription("Records the size of each message sent"), diff --git a/signal/server/signal.go b/signal/server/signal.go index 47f01edae..c46df56d2 100644 --- a/signal/server/signal.go +++ b/signal/server/signal.go @@ -62,8 +62,8 @@ type Server struct { } // NewServer creates a new Signal server -func NewServer(ctx context.Context, meter metric.Meter) (*Server, error) { - appMetrics, err := metrics.NewAppMetrics(meter) +func NewServer(ctx context.Context, meter metric.Meter, metricsPrefix ...string) (*Server, error) { + appMetrics, err := metrics.NewAppMetrics(meter, metricsPrefix...) if err != nil { return nil, fmt.Errorf("creating app metrics: %v", err) } diff --git a/stun/server.go b/stun/server.go index be5717d48..01558f09c 100644 --- a/stun/server.go +++ b/stun/server.go @@ -48,7 +48,7 @@ func NewServer(conns []*net.UDPConn, logLevel string) *Server { // Use the formatter package to set up formatter, ReportCaller, and context hook formatter.SetTextFormatter(stunLogger) - logger := stunLogger.WithField("component", "stun-server") + logger := stunLogger.WithField("component", "stun") logger.Infof("STUN server log level set to: %s", level.String()) return &Server{ From 7ebf37ef20d2f5320a1fc79050fe7927241edf21 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Fri, 13 Feb 2026 10:46:43 +0100 Subject: [PATCH 121/374] [management] Enforce access control on accessible peers (#5301) --- .../http/handlers/peers/peers_handler.go | 15 +++-- .../http/handlers/peers/peers_handler_test.go | 61 ++++++++++++++++++- 2 files changed, 69 insertions(+), 7 deletions(-) diff --git a/management/server/http/handlers/peers/peers_handler.go b/management/server/http/handlers/peers/peers_handler.go index 783cfe11b..0bee7cbab 100644 --- a/management/server/http/handlers/peers/peers_handler.go +++ b/management/server/http/handlers/peers/peers_handler.go @@ -18,6 +18,8 @@ import ( "github.com/netbirdio/netbird/management/server/groups" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/http/api" "github.com/netbirdio/netbird/shared/management/http/util" @@ -368,9 +370,9 @@ func (h *Handler) GetAccessiblePeers(w http.ResponseWriter, r *http.Request) { return } - err = h.permissionsManager.ValidateAccountAccess(r.Context(), accountID, user, false) + allowed, err := h.permissionsManager.ValidateUserPermissions(r.Context(), accountID, userID, modules.Peers, operations.Read) if err != nil { - util.WriteError(r.Context(), status.NewPermissionDeniedError(), w) + util.WriteError(r.Context(), status.NewPermissionValidationError(err), w) return } @@ -380,9 +382,12 @@ func (h *Handler) GetAccessiblePeers(w http.ResponseWriter, r *http.Request) { return } - // If the user is regular user and does not own the peer - // with the given peerID return an empty list - if !user.HasAdminPower() && !user.IsServiceUser && !userAuth.IsChild { + if !allowed && !userAuth.IsChild { + if account.Settings.RegularUsersViewBlocked { + util.WriteJSONObject(r.Context(), w, []api.AccessiblePeer{}) + return + } + peer, ok := account.Peers[peerID] if !ok { util.WriteError(r.Context(), status.Errorf(status.NotFound, "peer not found"), w) diff --git a/management/server/http/handlers/peers/peers_handler_test.go b/management/server/http/handlers/peers/peers_handler_test.go index 786c144fc..6b3616597 100644 --- a/management/server/http/handlers/peers/peers_handler_test.go +++ b/management/server/http/handlers/peers/peers_handler_test.go @@ -22,6 +22,8 @@ import ( nbcontext "github.com/netbirdio/netbird/management/server/context" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/auth" "github.com/netbirdio/netbird/shared/management/http/api" @@ -115,6 +117,16 @@ func initTestMetaData(t *testing.T, peers ...*nbpeer.Peer) *Handler { ctrl2 := gomock.NewController(t) permissionsManager := permissions.NewMockManager(ctrl2) permissionsManager.EXPECT().ValidateAccountAccess(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + permissionsManager.EXPECT(). + ValidateUserPermissions(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Eq(modules.Peers), gomock.Eq(operations.Read)). + DoAndReturn(func(ctx context.Context, accountID, userID string, module modules.Module, operation operations.Operation) (bool, error) { + user, ok := account.Users[userID] + if !ok { + return false, fmt.Errorf("user not found") + } + return user.HasAdminPower() || user.IsServiceUser, nil + }). + AnyTimes() return &Handler{ accountManager: &mock_server.MockAccountManager{ @@ -383,12 +395,11 @@ func TestGetAccessiblePeers(t *testing.T) { UserID: regularUser, } - p := initTestMetaData(t, peer1, peer2, peer3) - tt := []struct { name string peerID string callerUserID string + viewBlocked bool expectedStatus int expectedPeers []string }{ @@ -427,10 +438,56 @@ func TestGetAccessiblePeers(t *testing.T) { expectedStatus: http.StatusOK, expectedPeers: []string{"peer1", "peer2"}, }, + { + name: "regular user gets empty for owned peer list when view blocked", + peerID: "peer1", + callerUserID: regularUser, + viewBlocked: true, + expectedStatus: http.StatusOK, + expectedPeers: []string{}, + }, + { + name: "regular user gets empty list for unowned peer when view blocked", + peerID: "peer2", + callerUserID: regularUser, + viewBlocked: true, + expectedStatus: http.StatusOK, + expectedPeers: []string{}, + }, + { + name: "admin user still sees accessible peers when view blocked", + peerID: "peer2", + callerUserID: adminUser, + viewBlocked: true, + expectedStatus: http.StatusOK, + expectedPeers: []string{"peer1", "peer3"}, + }, + { + name: "service user still sees accessible peers when view blocked", + peerID: "peer3", + callerUserID: serviceUser, + viewBlocked: true, + expectedStatus: http.StatusOK, + expectedPeers: []string{"peer1", "peer2"}, + }, } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { + p := initTestMetaData(t, peer1, peer2, peer3) + + if tc.viewBlocked { + mockAM := p.accountManager.(*mock_server.MockAccountManager) + originalGetAccountByIDFunc := mockAM.GetAccountByIDFunc + mockAM.GetAccountByIDFunc = func(ctx context.Context, accountID string, userID string) (*types.Account, error) { + account, err := originalGetAccountByIDFunc(ctx, accountID, userID) + if err != nil { + return nil, err + } + account.Settings.RegularUsersViewBlocked = true + return account, nil + } + } recorder := httptest.NewRecorder() req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/api/peers/%s/accessible-peers", tc.peerID), nil) From d3eeb6d8ee80d30d07f4756c0cfe3980717375f7 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Fri, 13 Feb 2026 13:08:47 +0100 Subject: [PATCH 122/374] [misc] Add cloud api spec to public open api with rest client (#5222) --- shared/management/client/rest/billing.go | 82 + shared/management/client/rest/billing_test.go | 194 ++ shared/management/client/rest/client.go | 40 + shared/management/client/rest/edr.go | 307 ++ shared/management/client/rest/edr_test.go | 422 +++ .../management/client/rest/event_streaming.go | 92 + .../client/rest/event_streaming_test.go | 194 ++ shared/management/client/rest/events.go | 97 +- shared/management/client/rest/events_test.go | 53 +- .../client/rest/identity_providers.go | 92 + .../client/rest/identity_providers_test.go | 183 ++ shared/management/client/rest/ingress.go | 92 + shared/management/client/rest/ingress_test.go | 184 ++ shared/management/client/rest/instance.go | 46 + .../management/client/rest/instance_test.go | 96 + shared/management/client/rest/msp.go | 122 + shared/management/client/rest/msp_test.go | 251 ++ shared/management/client/rest/networks.go | 14 + .../management/client/rest/networks_test.go | 29 + shared/management/client/rest/peers.go | 170 + shared/management/client/rest/peers_test.go | 273 ++ shared/management/client/rest/scim.go | 119 + shared/management/client/rest/scim_test.go | 262 ++ shared/management/client/rest/users.go | 142 + shared/management/client/rest/users_test.go | 280 ++ shared/management/http/api/openapi.yml | 2873 ++++++++++++++++- shared/management/http/api/types.gen.go | 754 +++++ 27 files changed, 7369 insertions(+), 94 deletions(-) create mode 100644 shared/management/client/rest/billing.go create mode 100644 shared/management/client/rest/billing_test.go create mode 100644 shared/management/client/rest/edr.go create mode 100644 shared/management/client/rest/edr_test.go create mode 100644 shared/management/client/rest/event_streaming.go create mode 100644 shared/management/client/rest/event_streaming_test.go create mode 100644 shared/management/client/rest/identity_providers.go create mode 100644 shared/management/client/rest/identity_providers_test.go create mode 100644 shared/management/client/rest/ingress.go create mode 100644 shared/management/client/rest/ingress_test.go create mode 100644 shared/management/client/rest/instance.go create mode 100644 shared/management/client/rest/instance_test.go create mode 100644 shared/management/client/rest/msp.go create mode 100644 shared/management/client/rest/msp_test.go create mode 100644 shared/management/client/rest/scim.go create mode 100644 shared/management/client/rest/scim_test.go diff --git a/shared/management/client/rest/billing.go b/shared/management/client/rest/billing.go new file mode 100644 index 000000000..4ac9cdf55 --- /dev/null +++ b/shared/management/client/rest/billing.go @@ -0,0 +1,82 @@ +package rest + +import ( + "context" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// BillingAPI APIs for billing and invoices +type BillingAPI struct { + c *Client +} + +// GetUsage retrieves current usage statistics for the account +// See more: https://docs.netbird.io/api/resources/billing#get-current-usage +func (a *BillingAPI) GetUsage(ctx context.Context) (*api.UsageStats, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/billing/usage", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.UsageStats](resp) + return &ret, err +} + +// GetSubscription retrieves the current subscription details +// See more: https://docs.netbird.io/api/resources/billing#get-current-subscription +func (a *BillingAPI) GetSubscription(ctx context.Context) (*api.Subscription, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/billing/subscription", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.Subscription](resp) + return &ret, err +} + +// GetInvoices retrieves the account's paid invoices +// See more: https://docs.netbird.io/api/resources/billing#list-all-invoices +func (a *BillingAPI) GetInvoices(ctx context.Context) ([]api.InvoiceResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/billing/invoices", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.InvoiceResponse](resp) + return ret, err +} + +// GetInvoicePDF retrieves the invoice PDF URL +// See more: https://docs.netbird.io/api/resources/billing#get-invoice-pdf +func (a *BillingAPI) GetInvoicePDF(ctx context.Context, invoiceID string) (*api.InvoicePDFResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/billing/invoices/"+invoiceID+"/pdf", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.InvoicePDFResponse](resp) + return &ret, err +} + +// GetInvoiceCSV retrieves the invoice CSV content +// See more: https://docs.netbird.io/api/resources/billing#get-invoice-csv +func (a *BillingAPI) GetInvoiceCSV(ctx context.Context, invoiceID string) (string, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/billing/invoices/"+invoiceID+"/csv", nil, nil) + if err != nil { + return "", err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[string](resp) + return ret, err +} diff --git a/shared/management/client/rest/billing_test.go b/shared/management/client/rest/billing_test.go new file mode 100644 index 000000000..060e459f6 --- /dev/null +++ b/shared/management/client/rest/billing_test.go @@ -0,0 +1,194 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var ( + testUsageStats = api.UsageStats{ + ActiveUsers: 15, + TotalUsers: 20, + ActivePeers: 10, + TotalPeers: 25, + } + + testSubscription = api.Subscription{ + Active: true, + PlanTier: "basic", + PriceId: "price_1HhxOp", + Currency: "USD", + Price: 1000, + Provider: "stripe", + UpdatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + } + + testInvoice = api.InvoiceResponse{ + Id: "inv_123", + PeriodStart: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + PeriodEnd: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC), + Type: "invoice", + } + + testInvoicePDF = api.InvoicePDFResponse{ + Url: "https://example.com/invoice.pdf", + } +) + +func TestBilling_GetUsage_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/usage", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testUsageStats) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetUsage(context.Background()) + require.NoError(t, err) + assert.Equal(t, testUsageStats, *ret) + }) +} + +func TestBilling_GetUsage_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/usage", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetUsage(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestBilling_GetSubscription_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/subscription", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testSubscription) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetSubscription(context.Background()) + require.NoError(t, err) + assert.Equal(t, testSubscription, *ret) + }) +} + +func TestBilling_GetSubscription_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/subscription", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetSubscription(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestBilling_GetInvoices_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/invoices", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.InvoiceResponse{testInvoice}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetInvoices(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testInvoice, ret[0]) + }) +} + +func TestBilling_GetInvoices_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/invoices", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetInvoices(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestBilling_GetInvoicePDF_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/invoices/inv_123/pdf", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testInvoicePDF) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetInvoicePDF(context.Background(), "inv_123") + require.NoError(t, err) + assert.Equal(t, testInvoicePDF, *ret) + }) +} + +func TestBilling_GetInvoicePDF_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/invoices/inv_123/pdf", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetInvoicePDF(context.Background(), "inv_123") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestBilling_GetInvoiceCSV_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/invoices/inv_123/csv", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal("col1,col2\nval1,val2") + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetInvoiceCSV(context.Background(), "inv_123") + require.NoError(t, err) + assert.Equal(t, "col1,col2\nval1,val2", ret) + }) +} + +func TestBilling_GetInvoiceCSV_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/billing/invoices/inv_123/csv", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Billing.GetInvoiceCSV(context.Background(), "inv_123") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index ad8328093..99d8eb594 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -73,6 +73,38 @@ type Client struct { // Events NetBird Events APIs // see more: https://docs.netbird.io/api/resources/events Events *EventsAPI + + // Billing NetBird Billing APIs for subscriptions, plans, and invoices + // see more: https://docs.netbird.io/api/resources/billing + Billing *BillingAPI + + // MSP NetBird MSP tenant management APIs + // see more: https://docs.netbird.io/api/resources/msp + MSP *MSPAPI + + // EDR NetBird EDR integration APIs (Intune, SentinelOne, Falcon, Huntress) + // see more: https://docs.netbird.io/api/resources/edr + EDR *EDRAPI + + // SCIM NetBird SCIM IDP integration APIs + // see more: https://docs.netbird.io/api/resources/scim + SCIM *SCIMAPI + + // EventStreaming NetBird Event Streaming integration APIs + // see more: https://docs.netbird.io/api/resources/event-streaming + EventStreaming *EventStreamingAPI + + // IdentityProviders NetBird Identity Providers APIs + // see more: https://docs.netbird.io/api/resources/identity-providers + IdentityProviders *IdentityProvidersAPI + + // Ingress NetBird Ingress Peers APIs + // see more: https://docs.netbird.io/api/resources/ingress-ports + Ingress *IngressAPI + + // Instance NetBird Instance API + // see more: https://docs.netbird.io/api/resources/instance + Instance *InstanceAPI } // New initialize new Client instance using PAT token @@ -120,6 +152,14 @@ func (c *Client) initialize() { c.DNSZones = &DNSZonesAPI{c} c.GeoLocation = &GeoLocationAPI{c} c.Events = &EventsAPI{c} + c.Billing = &BillingAPI{c} + c.MSP = &MSPAPI{c} + c.EDR = &EDRAPI{c} + c.SCIM = &SCIMAPI{c} + c.EventStreaming = &EventStreamingAPI{c} + c.IdentityProviders = &IdentityProvidersAPI{c} + c.Ingress = &IngressAPI{c} + c.Instance = &InstanceAPI{c} } // NewRequest creates and executes new management API request diff --git a/shared/management/client/rest/edr.go b/shared/management/client/rest/edr.go new file mode 100644 index 000000000..7dfc891c2 --- /dev/null +++ b/shared/management/client/rest/edr.go @@ -0,0 +1,307 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// EDRAPI APIs for EDR integrations (Intune, SentinelOne, Falcon, Huntress) +type EDRAPI struct { + c *Client +} + +// GetIntuneIntegration retrieves the EDR Intune integration +// See more: https://docs.netbird.io/api/resources/edr#get-intune-integration +func (a *EDRAPI) GetIntuneIntegration(ctx context.Context) (*api.EDRIntuneResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/edr/intune", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRIntuneResponse](resp) + return &ret, err +} + +// CreateIntuneIntegration creates a new EDR Intune integration +// See more: https://docs.netbird.io/api/resources/edr#create-intune-integration +func (a *EDRAPI) CreateIntuneIntegration(ctx context.Context, request api.EDRIntuneRequest) (*api.EDRIntuneResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/edr/intune", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRIntuneResponse](resp) + return &ret, err +} + +// UpdateIntuneIntegration updates an existing EDR Intune integration +// See more: https://docs.netbird.io/api/resources/edr#update-intune-integration +func (a *EDRAPI) UpdateIntuneIntegration(ctx context.Context, request api.EDRIntuneRequest) (*api.EDRIntuneResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/edr/intune", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRIntuneResponse](resp) + return &ret, err +} + +// DeleteIntuneIntegration deletes the EDR Intune integration +// See more: https://docs.netbird.io/api/resources/edr#delete-intune-integration +func (a *EDRAPI) DeleteIntuneIntegration(ctx context.Context) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/edr/intune", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// GetSentinelOneIntegration retrieves the EDR SentinelOne integration +// See more: https://docs.netbird.io/api/resources/edr#get-sentinelone-integration +func (a *EDRAPI) GetSentinelOneIntegration(ctx context.Context) (*api.EDRSentinelOneResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/edr/sentinelone", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRSentinelOneResponse](resp) + return &ret, err +} + +// CreateSentinelOneIntegration creates a new EDR SentinelOne integration +// See more: https://docs.netbird.io/api/resources/edr#create-sentinelone-integration +func (a *EDRAPI) CreateSentinelOneIntegration(ctx context.Context, request api.EDRSentinelOneRequest) (*api.EDRSentinelOneResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/edr/sentinelone", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRSentinelOneResponse](resp) + return &ret, err +} + +// UpdateSentinelOneIntegration updates an existing EDR SentinelOne integration +// See more: https://docs.netbird.io/api/resources/edr#update-sentinelone-integration +func (a *EDRAPI) UpdateSentinelOneIntegration(ctx context.Context, request api.EDRSentinelOneRequest) (*api.EDRSentinelOneResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/edr/sentinelone", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRSentinelOneResponse](resp) + return &ret, err +} + +// DeleteSentinelOneIntegration deletes the EDR SentinelOne integration +// See more: https://docs.netbird.io/api/resources/edr#delete-sentinelone-integration +func (a *EDRAPI) DeleteSentinelOneIntegration(ctx context.Context) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/edr/sentinelone", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// GetFalconIntegration retrieves the EDR Falcon integration +// See more: https://docs.netbird.io/api/resources/edr#get-falcon-integration +func (a *EDRAPI) GetFalconIntegration(ctx context.Context) (*api.EDRFalconResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/edr/falcon", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFalconResponse](resp) + return &ret, err +} + +// CreateFalconIntegration creates a new EDR Falcon integration +// See more: https://docs.netbird.io/api/resources/edr#create-falcon-integration +func (a *EDRAPI) CreateFalconIntegration(ctx context.Context, request api.EDRFalconRequest) (*api.EDRFalconResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/edr/falcon", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFalconResponse](resp) + return &ret, err +} + +// UpdateFalconIntegration updates an existing EDR Falcon integration +// See more: https://docs.netbird.io/api/resources/edr#update-falcon-integration +func (a *EDRAPI) UpdateFalconIntegration(ctx context.Context, request api.EDRFalconRequest) (*api.EDRFalconResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/edr/falcon", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFalconResponse](resp) + return &ret, err +} + +// DeleteFalconIntegration deletes the EDR Falcon integration +// See more: https://docs.netbird.io/api/resources/edr#delete-falcon-integration +func (a *EDRAPI) DeleteFalconIntegration(ctx context.Context) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/edr/falcon", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// GetHuntressIntegration retrieves the EDR Huntress integration +// See more: https://docs.netbird.io/api/resources/edr#get-huntress-integration +func (a *EDRAPI) GetHuntressIntegration(ctx context.Context) (*api.EDRHuntressResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/edr/huntress", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRHuntressResponse](resp) + return &ret, err +} + +// CreateHuntressIntegration creates a new EDR Huntress integration +// See more: https://docs.netbird.io/api/resources/edr#create-huntress-integration +func (a *EDRAPI) CreateHuntressIntegration(ctx context.Context, request api.EDRHuntressRequest) (*api.EDRHuntressResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/edr/huntress", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRHuntressResponse](resp) + return &ret, err +} + +// UpdateHuntressIntegration updates an existing EDR Huntress integration +// See more: https://docs.netbird.io/api/resources/edr#update-huntress-integration +func (a *EDRAPI) UpdateHuntressIntegration(ctx context.Context, request api.EDRHuntressRequest) (*api.EDRHuntressResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/edr/huntress", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRHuntressResponse](resp) + return &ret, err +} + +// DeleteHuntressIntegration deletes the EDR Huntress integration +// See more: https://docs.netbird.io/api/resources/edr#delete-huntress-integration +func (a *EDRAPI) DeleteHuntressIntegration(ctx context.Context) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/edr/huntress", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// BypassPeerCompliance bypasses compliance for a non-compliant peer +// See more: https://docs.netbird.io/api/resources/edr#bypass-peer-compliance +func (a *EDRAPI) BypassPeerCompliance(ctx context.Context, peerID string) (*api.BypassResponse, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/peers/"+peerID+"/edr/bypass", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.BypassResponse](resp) + return &ret, err +} + +// RevokePeerBypass revokes the compliance bypass for a peer +// See more: https://docs.netbird.io/api/resources/edr#revoke-peer-bypass +func (a *EDRAPI) RevokePeerBypass(ctx context.Context, peerID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/peers/"+peerID+"/edr/bypass", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// ListBypassedPeers returns all peers that have compliance bypassed +// See more: https://docs.netbird.io/api/resources/edr#list-all-bypassed-peers +func (a *EDRAPI) ListBypassedPeers(ctx context.Context) ([]api.BypassResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/peers/edr/bypassed", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.BypassResponse](resp) + return ret, err +} diff --git a/shared/management/client/rest/edr_test.go b/shared/management/client/rest/edr_test.go new file mode 100644 index 000000000..a2a48858c --- /dev/null +++ b/shared/management/client/rest/edr_test.go @@ -0,0 +1,422 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var ( + testIntuneResponse = api.EDRIntuneResponse{ + AccountId: "acc-1", + ClientId: "client-1", + TenantId: "tenant-1", + Enabled: true, + Id: 1, + Groups: []api.Group{}, + LastSyncedInterval: 24, + CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + UpdatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + CreatedBy: "user-1", + } + + testSentinelOneResponse = api.EDRSentinelOneResponse{ + AccountId: "acc-1", + ApiUrl: "https://sentinelone.example.com", + Enabled: true, + Id: 2, + Groups: []api.Group{}, + LastSyncedInterval: 24, + MatchAttributes: api.SentinelOneMatchAttributes{}, + CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + UpdatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + CreatedBy: "user-1", + } + + testFalconResponse = api.EDRFalconResponse{ + AccountId: "acc-1", + CloudId: "us-1", + Enabled: true, + Id: 3, + Groups: []api.Group{}, + ZtaScoreThreshold: 50, + CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + UpdatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + CreatedBy: "user-1", + } + + testHuntressResponse = api.EDRHuntressResponse{ + AccountId: "acc-1", + Enabled: true, + Id: 4, + Groups: []api.Group{}, + LastSyncedInterval: 24, + MatchAttributes: api.HuntressMatchAttributes{}, + CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + UpdatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + CreatedBy: "user-1", + } + + testBypassResponse = api.BypassResponse{ + PeerId: "peer-1", + } +) + +// Intune tests + +func TestEDR_GetIntuneIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/intune", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testIntuneResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.GetIntuneIntegration(context.Background()) + require.NoError(t, err) + assert.Equal(t, testIntuneResponse, *ret) + }) +} + +func TestEDR_GetIntuneIntegration_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/intune", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.GetIntuneIntegration(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestEDR_CreateIntuneIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/intune", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.EDRIntuneRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "client-1", req.ClientId) + retBytes, _ := json.Marshal(testIntuneResponse) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.CreateIntuneIntegration(context.Background(), api.EDRIntuneRequest{ + ClientId: "client-1", + Secret: "secret", + TenantId: "tenant-1", + Groups: []string{"group-1"}, + LastSyncedInterval: 24, + }) + require.NoError(t, err) + assert.Equal(t, testIntuneResponse, *ret) + }) +} + +func TestEDR_CreateIntuneIntegration_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/intune", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.CreateIntuneIntegration(context.Background(), api.EDRIntuneRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestEDR_UpdateIntuneIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/intune", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + retBytes, _ := json.Marshal(testIntuneResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.UpdateIntuneIntegration(context.Background(), api.EDRIntuneRequest{ + ClientId: "client-1", + Secret: "new-secret", + TenantId: "tenant-1", + Groups: []string{"group-1"}, + }) + require.NoError(t, err) + assert.Equal(t, testIntuneResponse, *ret) + }) +} + +func TestEDR_DeleteIntuneIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/intune", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.EDR.DeleteIntuneIntegration(context.Background()) + require.NoError(t, err) + }) +} + +func TestEDR_DeleteIntuneIntegration_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/intune", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.EDR.DeleteIntuneIntegration(context.Background()) + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +// SentinelOne tests + +func TestEDR_GetSentinelOneIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/sentinelone", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testSentinelOneResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.GetSentinelOneIntegration(context.Background()) + require.NoError(t, err) + assert.Equal(t, testSentinelOneResponse, *ret) + }) +} + +func TestEDR_CreateSentinelOneIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/sentinelone", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testSentinelOneResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.CreateSentinelOneIntegration(context.Background(), api.EDRSentinelOneRequest{ + ApiToken: "token", + ApiUrl: "https://sentinelone.example.com", + Groups: []string{"group-1"}, + LastSyncedInterval: 24, + MatchAttributes: api.SentinelOneMatchAttributes{}, + }) + require.NoError(t, err) + assert.Equal(t, testSentinelOneResponse, *ret) + }) +} + +func TestEDR_DeleteSentinelOneIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/sentinelone", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.EDR.DeleteSentinelOneIntegration(context.Background()) + require.NoError(t, err) + }) +} + +// Falcon tests + +func TestEDR_GetFalconIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/falcon", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testFalconResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.GetFalconIntegration(context.Background()) + require.NoError(t, err) + assert.Equal(t, testFalconResponse, *ret) + }) +} + +func TestEDR_CreateFalconIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/falcon", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testFalconResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.CreateFalconIntegration(context.Background(), api.EDRFalconRequest{ + ClientId: "client-1", + Secret: "secret", + CloudId: "us-1", + Groups: []string{"group-1"}, + ZtaScoreThreshold: 50, + }) + require.NoError(t, err) + assert.Equal(t, testFalconResponse, *ret) + }) +} + +func TestEDR_DeleteFalconIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/falcon", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.EDR.DeleteFalconIntegration(context.Background()) + require.NoError(t, err) + }) +} + +// Huntress tests + +func TestEDR_GetHuntressIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/huntress", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testHuntressResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.GetHuntressIntegration(context.Background()) + require.NoError(t, err) + assert.Equal(t, testHuntressResponse, *ret) + }) +} + +func TestEDR_CreateHuntressIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/huntress", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testHuntressResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.CreateHuntressIntegration(context.Background(), api.EDRHuntressRequest{ + ApiKey: "key", + ApiSecret: "secret", + Groups: []string{"group-1"}, + LastSyncedInterval: 24, + MatchAttributes: api.HuntressMatchAttributes{}, + }) + require.NoError(t, err) + assert.Equal(t, testHuntressResponse, *ret) + }) +} + +func TestEDR_DeleteHuntressIntegration_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/edr/huntress", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.EDR.DeleteHuntressIntegration(context.Background()) + require.NoError(t, err) + }) +} + +// Peer bypass tests + +func TestEDR_BypassPeerCompliance_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/peer-1/edr/bypass", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testBypassResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.BypassPeerCompliance(context.Background(), "peer-1") + require.NoError(t, err) + assert.Equal(t, testBypassResponse, *ret) + }) +} + +func TestEDR_BypassPeerCompliance_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/peer-1/edr/bypass", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Bad request", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.BypassPeerCompliance(context.Background(), "peer-1") + assert.Error(t, err) + assert.Equal(t, "Bad request", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestEDR_RevokePeerBypass_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/peer-1/edr/bypass", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.EDR.RevokePeerBypass(context.Background(), "peer-1") + require.NoError(t, err) + }) +} + +func TestEDR_RevokePeerBypass_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/peer-1/edr/bypass", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.EDR.RevokePeerBypass(context.Background(), "peer-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestEDR_ListBypassedPeers_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/edr/bypassed", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.BypassResponse{testBypassResponse}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.ListBypassedPeers(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testBypassResponse, ret[0]) + }) +} + +func TestEDR_ListBypassedPeers_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/edr/bypassed", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EDR.ListBypassedPeers(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/client/rest/event_streaming.go b/shared/management/client/rest/event_streaming.go new file mode 100644 index 000000000..99a02bd33 --- /dev/null +++ b/shared/management/client/rest/event_streaming.go @@ -0,0 +1,92 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + "strconv" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// EventStreamingAPI APIs for event streaming integrations +type EventStreamingAPI struct { + c *Client +} + +// List retrieves all event streaming integrations +// See more: https://docs.netbird.io/api/resources/event-streaming#list-all-event-streaming-integrations +func (a *EventStreamingAPI) List(ctx context.Context) ([]api.IntegrationResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/event-streaming", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IntegrationResponse](resp) + return ret, err +} + +// Get retrieves a specific event streaming integration by ID +// See more: https://docs.netbird.io/api/resources/event-streaming#retrieve-an-event-streaming-integration +func (a *EventStreamingAPI) Get(ctx context.Context, integrationID int) (*api.IntegrationResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/event-streaming/"+strconv.Itoa(integrationID), nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IntegrationResponse](resp) + return &ret, err +} + +// Create creates a new event streaming integration +// See more: https://docs.netbird.io/api/resources/event-streaming#create-an-event-streaming-integration +func (a *EventStreamingAPI) Create(ctx context.Context, request api.CreateIntegrationRequest) (*api.IntegrationResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/event-streaming", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IntegrationResponse](resp) + return &ret, err +} + +// Update updates an existing event streaming integration +// See more: https://docs.netbird.io/api/resources/event-streaming#update-an-event-streaming-integration +func (a *EventStreamingAPI) Update(ctx context.Context, integrationID int, request api.CreateIntegrationRequest) (*api.IntegrationResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/event-streaming/"+strconv.Itoa(integrationID), bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IntegrationResponse](resp) + return &ret, err +} + +// Delete deletes an event streaming integration +// See more: https://docs.netbird.io/api/resources/event-streaming#delete-an-event-streaming-integration +func (a *EventStreamingAPI) Delete(ctx context.Context, integrationID int) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/event-streaming/"+strconv.Itoa(integrationID), nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} diff --git a/shared/management/client/rest/event_streaming_test.go b/shared/management/client/rest/event_streaming_test.go new file mode 100644 index 000000000..eebe291e4 --- /dev/null +++ b/shared/management/client/rest/event_streaming_test.go @@ -0,0 +1,194 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var ( + testIntegrationResponse = api.IntegrationResponse{ + Id: ptr[int64](1), + AccountId: ptr("acc-1"), + Platform: (*api.IntegrationResponsePlatform)(ptr("datadog")), + Enabled: ptr(true), + Config: &map[string]string{"api_key": "****"}, + CreatedAt: ptr(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)), + UpdatedAt: ptr(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)), + } +) + +func TestEventStreaming_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IntegrationResponse{testIntegrationResponse}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testIntegrationResponse, ret[0]) + }) +} + +func TestEventStreaming_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestEventStreaming_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming/1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testIntegrationResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.Get(context.Background(), 1) + require.NoError(t, err) + assert.Equal(t, testIntegrationResponse, *ret) + }) +} + +func TestEventStreaming_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming/1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.Get(context.Background(), 1) + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestEventStreaming_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, api.CreateIntegrationRequestPlatformDatadog, req.Platform) + assert.Equal(t, true, req.Enabled) + retBytes, _ := json.Marshal(testIntegrationResponse) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.Create(context.Background(), api.CreateIntegrationRequest{ + Platform: api.CreateIntegrationRequestPlatformDatadog, + Enabled: true, + Config: map[string]string{"api_key": "test-key"}, + }) + require.NoError(t, err) + assert.Equal(t, testIntegrationResponse, *ret) + }) +} + +func TestEventStreaming_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.Create(context.Background(), api.CreateIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestEventStreaming_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming/1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, false, req.Enabled) + retBytes, _ := json.Marshal(testIntegrationResponse) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.Update(context.Background(), 1, api.CreateIntegrationRequest{ + Platform: api.CreateIntegrationRequestPlatformDatadog, + Enabled: false, + Config: map[string]string{"api_key": "updated-key"}, + }) + require.NoError(t, err) + assert.Equal(t, testIntegrationResponse, *ret) + }) +} + +func TestEventStreaming_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming/1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.EventStreaming.Update(context.Background(), 1, api.CreateIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestEventStreaming_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming/1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.EventStreaming.Delete(context.Background(), 1) + require.NoError(t, err) + }) +} + +func TestEventStreaming_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/event-streaming/1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.EventStreaming.Delete(context.Background(), 1) + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} diff --git a/shared/management/client/rest/events.go b/shared/management/client/rest/events.go index 2d25333ae..348d0698a 100644 --- a/shared/management/client/rest/events.go +++ b/shared/management/client/rest/events.go @@ -2,6 +2,8 @@ package rest import ( "context" + "fmt" + "time" "github.com/netbirdio/netbird/shared/management/http/api" ) @@ -11,10 +13,79 @@ type EventsAPI struct { c *Client } -// List list all events -// See more: https://docs.netbird.io/api/resources/events#list-all-events -func (a *EventsAPI) List(ctx context.Context) ([]api.Event, error) { - resp, err := a.c.NewRequest(ctx, "GET", "/api/events", nil, nil) +// NetworkTrafficOption options for ListNetworkTrafficEvents API +type NetworkTrafficOption func(query map[string]string) + +func NetworkTrafficPage(page int) NetworkTrafficOption { + return func(query map[string]string) { + query["page"] = fmt.Sprintf("%d", page) + } +} + +func NetworkTrafficPageSize(pageSize int) NetworkTrafficOption { + return func(query map[string]string) { + query["page_size"] = fmt.Sprintf("%d", pageSize) + } +} + +func NetworkTrafficUserID(userID string) NetworkTrafficOption { + return func(query map[string]string) { + query["user_id"] = userID + } +} + +func NetworkTrafficReporterID(reporterID string) NetworkTrafficOption { + return func(query map[string]string) { + query["reporter_id"] = reporterID + } +} + +func NetworkTrafficProtocol(protocol int) NetworkTrafficOption { + return func(query map[string]string) { + query["protocol"] = fmt.Sprintf("%d", protocol) + } +} + +func NetworkTrafficType(t api.GetApiEventsNetworkTrafficParamsType) NetworkTrafficOption { + return func(query map[string]string) { + query["type"] = string(t) + } +} + +func NetworkTrafficConnectionType(ct api.GetApiEventsNetworkTrafficParamsConnectionType) NetworkTrafficOption { + return func(query map[string]string) { + query["connection_type"] = string(ct) + } +} + +func NetworkTrafficDirection(d api.GetApiEventsNetworkTrafficParamsDirection) NetworkTrafficOption { + return func(query map[string]string) { + query["direction"] = string(d) + } +} + +func NetworkTrafficSearch(search string) NetworkTrafficOption { + return func(query map[string]string) { + query["search"] = search + } +} + +func NetworkTrafficStartDate(t time.Time) NetworkTrafficOption { + return func(query map[string]string) { + query["start_date"] = t.Format(time.RFC3339) + } +} + +func NetworkTrafficEndDate(t time.Time) NetworkTrafficOption { + return func(query map[string]string) { + query["end_date"] = t.Format(time.RFC3339) + } +} + +// ListAuditEvents list all audit events +// See more: https://docs.netbird.io/api/resources/events#list-all-audit-events +func (a *EventsAPI) ListAuditEvents(ctx context.Context) ([]api.Event, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/events/audit", nil, nil) if err != nil { return nil, err } @@ -24,3 +95,21 @@ func (a *EventsAPI) List(ctx context.Context) ([]api.Event, error) { ret, err := parseResponse[[]api.Event](resp) return ret, err } + +// ListNetworkTrafficEvents list network traffic events +// See more: https://docs.netbird.io/api/resources/events#list-network-traffic-events +func (a *EventsAPI) ListNetworkTrafficEvents(ctx context.Context, opts ...NetworkTrafficOption) (*api.NetworkTrafficEventsResponse, error) { + query := make(map[string]string) + for _, o := range opts { + o(query) + } + resp, err := a.c.NewRequest(ctx, "GET", "/api/events/network-traffic", nil, query) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.NetworkTrafficEventsResponse](resp) + return &ret, err +} diff --git a/shared/management/client/rest/events_test.go b/shared/management/client/rest/events_test.go index 1ee10eb6e..d4bdae15d 100644 --- a/shared/management/client/rest/events_test.go +++ b/shared/management/client/rest/events_test.go @@ -21,37 +21,76 @@ var ( Activity: "AccountCreate", ActivityCode: api.EventActivityCodeAccountCreate, } + + testNetworkTrafficResponse = api.NetworkTrafficEventsResponse{ + Data: []api.NetworkTrafficEvent{}, + Page: 1, + PageSize: 50, + } ) -func TestEvents_List_200(t *testing.T) { +func TestEvents_ListAuditEvents_200(t *testing.T) { withMockClient(func(c *rest.Client, mux *http.ServeMux) { - mux.HandleFunc("/api/events", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/api/events/audit", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Event{testEvent}) _, err := w.Write(retBytes) require.NoError(t, err) }) - ret, err := c.Events.List(context.Background()) + ret, err := c.Events.ListAuditEvents(context.Background()) require.NoError(t, err) assert.Len(t, ret, 1) assert.Equal(t, testEvent, ret[0]) }) } -func TestEvents_List_Err(t *testing.T) { +func TestEvents_ListAuditEvents_Err(t *testing.T) { withMockClient(func(c *rest.Client, mux *http.ServeMux) { - mux.HandleFunc("/api/events", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/api/events/audit", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) _, err := w.Write(retBytes) require.NoError(t, err) }) - ret, err := c.Events.List(context.Background()) + ret, err := c.Events.ListAuditEvents(context.Background()) assert.Error(t, err) assert.Equal(t, "No", err.Error()) assert.Empty(t, ret) }) } +func TestEvents_ListNetworkTrafficEvents_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/events/network-traffic", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "1", r.URL.Query().Get("page")) + assert.Equal(t, "50", r.URL.Query().Get("page_size")) + retBytes, _ := json.Marshal(testNetworkTrafficResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Events.ListNetworkTrafficEvents(context.Background(), + rest.NetworkTrafficPage(1), + rest.NetworkTrafficPageSize(50), + ) + require.NoError(t, err) + assert.Equal(t, testNetworkTrafficResponse, *ret) + }) +} + +func TestEvents_ListNetworkTrafficEvents_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/events/network-traffic", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Events.ListNetworkTrafficEvents(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + func TestEvents_Integration(t *testing.T) { withBlackBoxServer(t, func(c *rest.Client) { // Do something that would trigger any event @@ -62,7 +101,7 @@ func TestEvents_Integration(t *testing.T) { }) require.NoError(t, err) - events, err := c.Events.List(context.Background()) + events, err := c.Events.ListAuditEvents(context.Background()) require.NoError(t, err) assert.NotEmpty(t, events) }) diff --git a/shared/management/client/rest/identity_providers.go b/shared/management/client/rest/identity_providers.go new file mode 100644 index 000000000..2a725183d --- /dev/null +++ b/shared/management/client/rest/identity_providers.go @@ -0,0 +1,92 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// IdentityProvidersAPI APIs for Identity Providers, do not use directly +type IdentityProvidersAPI struct { + c *Client +} + +// List all identity providers +// See more: https://docs.netbird.io/api/resources/identity-providers#list-all-identity-providers +func (a *IdentityProvidersAPI) List(ctx context.Context) ([]api.IdentityProvider, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/identity-providers", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdentityProvider](resp) + return ret, err +} + +// Get identity provider info +// See more: https://docs.netbird.io/api/resources/identity-providers#retrieve-an-identity-provider +func (a *IdentityProvidersAPI) Get(ctx context.Context, idpID string) (*api.IdentityProvider, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/identity-providers/"+idpID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IdentityProvider](resp) + return &ret, err +} + +// Create new identity provider +// See more: https://docs.netbird.io/api/resources/identity-providers#create-an-identity-provider +func (a *IdentityProvidersAPI) Create(ctx context.Context, request api.PostApiIdentityProvidersJSONRequestBody) (*api.IdentityProvider, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/identity-providers", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IdentityProvider](resp) + return &ret, err +} + +// Update update identity provider +// See more: https://docs.netbird.io/api/resources/identity-providers#update-an-identity-provider +func (a *IdentityProvidersAPI) Update(ctx context.Context, idpID string, request api.PutApiIdentityProvidersIdpIdJSONRequestBody) (*api.IdentityProvider, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/identity-providers/"+idpID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IdentityProvider](resp) + return &ret, err +} + +// Delete delete identity provider +// See more: https://docs.netbird.io/api/resources/identity-providers#delete-an-identity-provider +func (a *IdentityProvidersAPI) Delete(ctx context.Context, idpID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/identity-providers/"+idpID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} diff --git a/shared/management/client/rest/identity_providers_test.go b/shared/management/client/rest/identity_providers_test.go new file mode 100644 index 000000000..e6edab549 --- /dev/null +++ b/shared/management/client/rest/identity_providers_test.go @@ -0,0 +1,183 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testIdentityProvider = api.IdentityProvider{ + ClientId: "test-client-id", + Id: ptr("Test"), +} + +func TestIdentityProviders_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal([]api.IdentityProvider{testIdentityProvider}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testIdentityProvider, ret[0]) + }) +} + +func TestIdentityProviders_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestIdentityProviders_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(testIdentityProvider) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.Get(context.Background(), "Test") + require.NoError(t, err) + assert.Equal(t, testIdentityProvider, *ret) + }) +} + +func TestIdentityProviders_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.Get(context.Background(), "Test") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestIdentityProviders_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PostApiIdentityProvidersJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "new-client-id", req.ClientId) + retBytes, _ := json.Marshal(testIdentityProvider) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.Create(context.Background(), api.PostApiIdentityProvidersJSONRequestBody{ + ClientId: "new-client-id", + }) + require.NoError(t, err) + assert.Equal(t, testIdentityProvider, *ret) + }) +} + +func TestIdentityProviders_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.Create(context.Background(), api.PostApiIdentityProvidersJSONRequestBody{ + ClientId: "new-client-id", + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestIdentityProviders_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers/Test", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PutApiIdentityProvidersIdpIdJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "updated-client-id", req.ClientId) + retBytes, _ := json.Marshal(testIdentityProvider) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.Update(context.Background(), "Test", api.PutApiIdentityProvidersIdpIdJSONRequestBody{ + ClientId: "updated-client-id", + }) + require.NoError(t, err) + assert.Equal(t, testIdentityProvider, *ret) + }) +} + +func TestIdentityProviders_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.IdentityProviders.Update(context.Background(), "Test", api.PutApiIdentityProvidersIdpIdJSONRequestBody{ + ClientId: "updated-client-id", + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestIdentityProviders_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers/Test", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.IdentityProviders.Delete(context.Background(), "Test") + require.NoError(t, err) + }) +} + +func TestIdentityProviders_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/identity-providers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.IdentityProviders.Delete(context.Background(), "Test") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} diff --git a/shared/management/client/rest/ingress.go b/shared/management/client/rest/ingress.go new file mode 100644 index 000000000..f69288d7e --- /dev/null +++ b/shared/management/client/rest/ingress.go @@ -0,0 +1,92 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// IngressAPI APIs for Ingress Peers, do not use directly +type IngressAPI struct { + c *Client +} + +// List all ingress peers +// See more: https://docs.netbird.io/api/resources/ingress#list-all-ingress-peers +func (a *IngressAPI) List(ctx context.Context) ([]api.IngressPeer, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/ingress/peers", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IngressPeer](resp) + return ret, err +} + +// Get ingress peer info +// See more: https://docs.netbird.io/api/resources/ingress#retrieve-an-ingress-peer +func (a *IngressAPI) Get(ctx context.Context, ingressPeerID string) (*api.IngressPeer, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/ingress/peers/"+ingressPeerID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IngressPeer](resp) + return &ret, err +} + +// Create new ingress peer +// See more: https://docs.netbird.io/api/resources/ingress#create-an-ingress-peer +func (a *IngressAPI) Create(ctx context.Context, request api.PostApiIngressPeersJSONRequestBody) (*api.IngressPeer, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/ingress/peers", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IngressPeer](resp) + return &ret, err +} + +// Update update ingress peer +// See more: https://docs.netbird.io/api/resources/ingress#update-an-ingress-peer +func (a *IngressAPI) Update(ctx context.Context, ingressPeerID string, request api.PutApiIngressPeersIngressPeerIdJSONRequestBody) (*api.IngressPeer, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/ingress/peers/"+ingressPeerID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IngressPeer](resp) + return &ret, err +} + +// Delete delete ingress peer +// See more: https://docs.netbird.io/api/resources/ingress#delete-an-ingress-peer +func (a *IngressAPI) Delete(ctx context.Context, ingressPeerID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/ingress/peers/"+ingressPeerID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} diff --git a/shared/management/client/rest/ingress_test.go b/shared/management/client/rest/ingress_test.go new file mode 100644 index 000000000..c915db094 --- /dev/null +++ b/shared/management/client/rest/ingress_test.go @@ -0,0 +1,184 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testIngressPeer = api.IngressPeer{ + Connected: true, + Enabled: true, + Id: "Test", +} + +func TestIngress_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal([]api.IngressPeer{testIngressPeer}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testIngressPeer, ret[0]) + }) +} + +func TestIngress_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestIngress_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(testIngressPeer) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.Get(context.Background(), "Test") + require.NoError(t, err) + assert.Equal(t, testIngressPeer, *ret) + }) +} + +func TestIngress_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.Get(context.Background(), "Test") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestIngress_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PostApiIngressPeersJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "peer-id", req.PeerId) + retBytes, _ := json.Marshal(testIngressPeer) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.Create(context.Background(), api.PostApiIngressPeersJSONRequestBody{ + PeerId: "peer-id", + }) + require.NoError(t, err) + assert.Equal(t, testIngressPeer, *ret) + }) +} + +func TestIngress_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.Create(context.Background(), api.PostApiIngressPeersJSONRequestBody{ + PeerId: "peer-id", + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestIngress_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers/Test", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PutApiIngressPeersIngressPeerIdJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, req.Enabled) + retBytes, _ := json.Marshal(testIngressPeer) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.Update(context.Background(), "Test", api.PutApiIngressPeersIngressPeerIdJSONRequestBody{ + Enabled: true, + }) + require.NoError(t, err) + assert.Equal(t, testIngressPeer, *ret) + }) +} + +func TestIngress_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Ingress.Update(context.Background(), "Test", api.PutApiIngressPeersIngressPeerIdJSONRequestBody{ + Enabled: true, + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestIngress_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers/Test", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.Ingress.Delete(context.Background(), "Test") + require.NoError(t, err) + }) +} + +func TestIngress_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/ingress/peers/Test", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.Ingress.Delete(context.Background(), "Test") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} diff --git a/shared/management/client/rest/instance.go b/shared/management/client/rest/instance.go new file mode 100644 index 000000000..041879b41 --- /dev/null +++ b/shared/management/client/rest/instance.go @@ -0,0 +1,46 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// InstanceAPI APIs for Instance status and version, do not use directly +type InstanceAPI struct { + c *Client +} + +// GetStatus get instance status +// See more: https://docs.netbird.io/api/resources/instance#get-instance-status +func (a *InstanceAPI) GetStatus(ctx context.Context) (*api.InstanceStatus, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/instance", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.InstanceStatus](resp) + return &ret, err +} + +// Setup perform initial instance setup +// See more: https://docs.netbird.io/api/resources/instance#setup-instance +func (a *InstanceAPI) Setup(ctx context.Context, request api.PostApiSetupJSONRequestBody) (*api.SetupResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/setup", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.SetupResponse](resp) + return &ret, err +} diff --git a/shared/management/client/rest/instance_test.go b/shared/management/client/rest/instance_test.go new file mode 100644 index 000000000..52125838d --- /dev/null +++ b/shared/management/client/rest/instance_test.go @@ -0,0 +1,96 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var ( + testInstanceStatus = api.InstanceStatus{ + SetupRequired: true, + } + + testSetupResponse = api.SetupResponse{ + Email: "admin@example.com", + UserId: "user-123", + } +) + +func TestInstance_GetStatus_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/instance", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(testInstanceStatus) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Instance.GetStatus(context.Background()) + require.NoError(t, err) + assert.Equal(t, testInstanceStatus, *ret) + }) +} + +func TestInstance_GetStatus_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/instance", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Instance.GetStatus(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestInstance_Setup_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/setup", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PostApiSetupJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "admin@example.com", req.Email) + retBytes, _ := json.Marshal(testSetupResponse) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Instance.Setup(context.Background(), api.PostApiSetupJSONRequestBody{ + Email: "admin@example.com", + }) + require.NoError(t, err) + assert.Equal(t, testSetupResponse, *ret) + }) +} + +func TestInstance_Setup_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/setup", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Instance.Setup(context.Background(), api.PostApiSetupJSONRequestBody{ + Email: "admin@example.com", + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} diff --git a/shared/management/client/rest/msp.go b/shared/management/client/rest/msp.go new file mode 100644 index 000000000..d820ccbde --- /dev/null +++ b/shared/management/client/rest/msp.go @@ -0,0 +1,122 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// MSPAPI APIs for MSP tenant management +type MSPAPI struct { + c *Client +} + +// ListTenants retrieves all MSP tenants +// See more: https://docs.netbird.io/api/resources/msp#list-all-tenants +func (a *MSPAPI) ListTenants(ctx context.Context) (*api.GetTenantsResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/msp/tenants", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.GetTenantsResponse](resp) + return &ret, err +} + +// CreateTenant creates a new MSP tenant +// See more: https://docs.netbird.io/api/resources/msp#create-a-tenant +func (a *MSPAPI) CreateTenant(ctx context.Context, request api.CreateTenantRequest) (*api.TenantResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/msp/tenants", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.TenantResponse](resp) + return &ret, err +} + +// UpdateTenant updates an existing MSP tenant +// See more: https://docs.netbird.io/api/resources/msp#update-a-tenant +func (a *MSPAPI) UpdateTenant(ctx context.Context, tenantID string, request api.UpdateTenantRequest) (*api.TenantResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/msp/tenants/"+tenantID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.TenantResponse](resp) + return &ret, err +} + +// DeleteTenant deletes an MSP tenant +// See more: https://docs.netbird.io/api/resources/msp#delete-a-tenant +func (a *MSPAPI) DeleteTenant(ctx context.Context, tenantID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/msp/tenants/"+tenantID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// UnlinkTenant unlinks a tenant from the MSP account +// See more: https://docs.netbird.io/api/resources/msp#unlink-a-tenant +func (a *MSPAPI) UnlinkTenant(ctx context.Context, tenantID, owner string) error { + params := map[string]string{"owner": owner} + requestBytes, err := json.Marshal(params) + if err != nil { + return err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/msp/tenants/"+tenantID+"/unlink", bytes.NewReader(requestBytes), nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// VerifyTenantDNS verifies a tenant domain DNS challenge +// See more: https://docs.netbird.io/api/resources/msp#verify-tenant-dns +func (a *MSPAPI) VerifyTenantDNS(ctx context.Context, tenantID string) error { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/msp/tenants/"+tenantID+"/dns", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// InviteTenant invites an existing account as a tenant to the MSP account +// See more: https://docs.netbird.io/api/resources/msp#invite-a-tenant +func (a *MSPAPI) InviteTenant(ctx context.Context, tenantID string) (*api.TenantResponse, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/msp/tenants/"+tenantID+"/invite", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.TenantResponse](resp) + return &ret, err +} diff --git a/shared/management/client/rest/msp_test.go b/shared/management/client/rest/msp_test.go new file mode 100644 index 000000000..7078346f3 --- /dev/null +++ b/shared/management/client/rest/msp_test.go @@ -0,0 +1,251 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var ( + testTenant = api.TenantResponse{ + Id: "tenant-1", + Name: "Test Tenant", + Domain: "test.example.com", + DnsChallenge: "challenge-123", + Status: "active", + Groups: []api.TenantGroupResponse{}, + CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + UpdatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + } +) + +func TestMSP_ListTenants_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.TenantResponse{testTenant}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.ListTenants(context.Background()) + require.NoError(t, err) + assert.Len(t, *ret, 1) + assert.Equal(t, testTenant, (*ret)[0]) + }) +} + +func TestMSP_ListTenants_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.ListTenants(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestMSP_CreateTenant_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateTenantRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "Test Tenant", req.Name) + assert.Equal(t, "test.example.com", req.Domain) + retBytes, _ := json.Marshal(testTenant) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.CreateTenant(context.Background(), api.CreateTenantRequest{ + Name: "Test Tenant", + Domain: "test.example.com", + Groups: []api.TenantGroupResponse{}, + }) + require.NoError(t, err) + assert.Equal(t, testTenant, *ret) + }) +} + +func TestMSP_CreateTenant_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.CreateTenant(context.Background(), api.CreateTenantRequest{ + Name: "Test Tenant", + Domain: "test.example.com", + Groups: []api.TenantGroupResponse{}, + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestMSP_UpdateTenant_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateTenantRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "Updated Tenant", req.Name) + retBytes, _ := json.Marshal(testTenant) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.UpdateTenant(context.Background(), "tenant-1", api.UpdateTenantRequest{ + Name: "Updated Tenant", + Groups: []api.TenantGroupResponse{}, + }) + require.NoError(t, err) + assert.Equal(t, testTenant, *ret) + }) +} + +func TestMSP_UpdateTenant_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.UpdateTenant(context.Background(), "tenant-1", api.UpdateTenantRequest{ + Name: "Updated Tenant", + Groups: []api.TenantGroupResponse{}, + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestMSP_DeleteTenant_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.MSP.DeleteTenant(context.Background(), "tenant-1") + require.NoError(t, err) + }) +} + +func TestMSP_DeleteTenant_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.MSP.DeleteTenant(context.Background(), "tenant-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestMSP_UnlinkTenant_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1/unlink", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + w.WriteHeader(200) + }) + err := c.MSP.UnlinkTenant(context.Background(), "tenant-1", "owner-1") + require.NoError(t, err) + }) +} + +func TestMSP_UnlinkTenant_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1/unlink", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.MSP.UnlinkTenant(context.Background(), "tenant-1", "owner-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestMSP_VerifyTenantDNS_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1/dns", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + w.WriteHeader(200) + }) + err := c.MSP.VerifyTenantDNS(context.Background(), "tenant-1") + require.NoError(t, err) + }) +} + +func TestMSP_VerifyTenantDNS_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1/dns", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Failed", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.MSP.VerifyTenantDNS(context.Background(), "tenant-1") + assert.Error(t, err) + assert.Equal(t, "Failed", err.Error()) + }) +} + +func TestMSP_InviteTenant_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1/invite", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testTenant) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.InviteTenant(context.Background(), "tenant-1") + require.NoError(t, err) + assert.Equal(t, testTenant, *ret) + }) +} + +func TestMSP_InviteTenant_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/msp/tenants/tenant-1/invite", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.MSP.InviteTenant(context.Background(), "tenant-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} diff --git a/shared/management/client/rest/networks.go b/shared/management/client/rest/networks.go index cb25dcbef..86dd20c7b 100644 --- a/shared/management/client/rest/networks.go +++ b/shared/management/client/rest/networks.go @@ -91,6 +91,20 @@ func (a *NetworksAPI) Delete(ctx context.Context, networkID string) error { return nil } +// ListAllRouters list all routers across all networks +// See more: https://docs.netbird.io/api/resources/networks#list-all-network-routers +func (a *NetworksAPI) ListAllRouters(ctx context.Context) ([]api.NetworkRouter, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/networks/routers", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.NetworkRouter](resp) + return ret, err +} + // NetworkResourcesAPI APIs for Network Resources, do not use directly type NetworkResourcesAPI struct { c *Client diff --git a/shared/management/client/rest/networks_test.go b/shared/management/client/rest/networks_test.go index 2bf1a0d3b..33c9e72bb 100644 --- a/shared/management/client/rest/networks_test.go +++ b/shared/management/client/rest/networks_test.go @@ -219,6 +219,35 @@ func TestNetworks_Integration(t *testing.T) { }) } +func TestNetworks_ListAllRouters_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/networks/routers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal([]api.NetworkRouter{testNetworkRouter}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Networks.ListAllRouters(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testNetworkRouter, ret[0]) + }) +} + +func TestNetworks_ListAllRouters_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/networks/routers", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Networks.ListAllRouters(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + func TestNetworkResources_List_200(t *testing.T) { withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources", func(w http.ResponseWriter, r *http.Request) { diff --git a/shared/management/client/rest/peers.go b/shared/management/client/rest/peers.go index 359c21e42..b22bcae67 100644 --- a/shared/management/client/rest/peers.go +++ b/shared/management/client/rest/peers.go @@ -106,3 +106,173 @@ func (a *PeersAPI) ListAccessiblePeers(ctx context.Context, peerID string) ([]ap ret, err := parseResponse[[]api.Peer](resp) return ret, err } + +// CreateTemporaryAccess create temporary access for a peer +// See more: https://docs.netbird.io/api/resources/peers#create-temporary-access +func (a *PeersAPI) CreateTemporaryAccess(ctx context.Context, peerID string, request api.PostApiPeersPeerIdTemporaryAccessJSONRequestBody) (*api.PeerTemporaryAccessResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/peers/"+peerID+"/temporary-access", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.PeerTemporaryAccessResponse](resp) + return &ret, err +} + +// PeerIngressPortsAPI APIs for Peer Ingress Ports, do not use directly +type PeerIngressPortsAPI struct { + c *Client + peerID string +} + +// IngressPorts APIs for peer ingress ports +func (a *PeersAPI) IngressPorts(peerID string) *PeerIngressPortsAPI { + return &PeerIngressPortsAPI{ + c: a.c, + peerID: peerID, + } +} + +// List list all ingress port allocations for a peer +// See more: https://docs.netbird.io/api/resources/peers#list-all-ingress-port-allocations +func (a *PeerIngressPortsAPI) List(ctx context.Context) ([]api.IngressPortAllocation, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/peers/"+a.peerID+"/ingress/ports", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IngressPortAllocation](resp) + return ret, err +} + +// Get get ingress port allocation info +// See more: https://docs.netbird.io/api/resources/peers#retrieve-an-ingress-port-allocation +func (a *PeerIngressPortsAPI) Get(ctx context.Context, allocationID string) (*api.IngressPortAllocation, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/peers/"+a.peerID+"/ingress/ports/"+allocationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IngressPortAllocation](resp) + return &ret, err +} + +// Create create new ingress port allocation +// See more: https://docs.netbird.io/api/resources/peers#create-an-ingress-port-allocation +func (a *PeerIngressPortsAPI) Create(ctx context.Context, request api.PostApiPeersPeerIdIngressPortsJSONRequestBody) (*api.IngressPortAllocation, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/peers/"+a.peerID+"/ingress/ports", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IngressPortAllocation](resp) + return &ret, err +} + +// Update update ingress port allocation +// See more: https://docs.netbird.io/api/resources/peers#update-an-ingress-port-allocation +func (a *PeerIngressPortsAPI) Update(ctx context.Context, allocationID string, request api.PutApiPeersPeerIdIngressPortsAllocationIdJSONRequestBody) (*api.IngressPortAllocation, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/peers/"+a.peerID+"/ingress/ports/"+allocationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.IngressPortAllocation](resp) + return &ret, err +} + +// Delete delete ingress port allocation +// See more: https://docs.netbird.io/api/resources/peers#delete-an-ingress-port-allocation +func (a *PeerIngressPortsAPI) Delete(ctx context.Context, allocationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/peers/"+a.peerID+"/ingress/ports/"+allocationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} + +// PeerJobsAPI APIs for Peer Jobs, do not use directly +type PeerJobsAPI struct { + c *Client + peerID string +} + +// Jobs APIs for peer jobs +func (a *PeersAPI) Jobs(peerID string) *PeerJobsAPI { + return &PeerJobsAPI{ + c: a.c, + peerID: peerID, + } +} + +// List list all jobs for a peer +// See more: https://docs.netbird.io/api/resources/peers#list-all-peer-jobs +func (a *PeerJobsAPI) List(ctx context.Context) ([]api.JobResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/peers/"+a.peerID+"/jobs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.JobResponse](resp) + return ret, err +} + +// Get get job info +// See more: https://docs.netbird.io/api/resources/peers#retrieve-a-peer-job +func (a *PeerJobsAPI) Get(ctx context.Context, jobID string) (*api.JobResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/peers/"+a.peerID+"/jobs/"+jobID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.JobResponse](resp) + return &ret, err +} + +// Create create new job for a peer +// See more: https://docs.netbird.io/api/resources/peers#create-a-peer-job +func (a *PeerJobsAPI) Create(ctx context.Context, request api.PostApiPeersPeerIdJobsJSONRequestBody) (*api.JobResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/peers/"+a.peerID+"/jobs", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.JobResponse](resp) + return &ret, err +} diff --git a/shared/management/client/rest/peers_test.go b/shared/management/client/rest/peers_test.go index c464de7ed..5724b57f9 100644 --- a/shared/management/client/rest/peers_test.go +++ b/shared/management/client/rest/peers_test.go @@ -25,6 +25,21 @@ var ( DnsLabel: "test", Id: "Test", } + + testPeerTemporaryAccess = api.PeerTemporaryAccessResponse{ + Id: "Test", + Name: "test-peer", + } + + testIngressPortAllocation = api.IngressPortAllocation{ + Enabled: true, + Id: "alloc-1", + } + + testJobResponse = api.JobResponse{ + Id: "job-1", + Status: "pending", + } ) func TestPeers_List_200(t *testing.T) { @@ -177,6 +192,264 @@ func TestPeers_ListAccessiblePeers_Err(t *testing.T) { }) } +func TestPeers_CreateTemporaryAccess_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/temporary-access", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testPeerTemporaryAccess) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.CreateTemporaryAccess(context.Background(), "Test", api.PostApiPeersPeerIdTemporaryAccessJSONRequestBody{}) + require.NoError(t, err) + assert.Equal(t, testPeerTemporaryAccess, *ret) + }) +} + +func TestPeers_CreateTemporaryAccess_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/temporary-access", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.CreateTemporaryAccess(context.Background(), "Test", api.PostApiPeersPeerIdTemporaryAccessJSONRequestBody{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestPeerIngressPorts_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal([]api.IngressPortAllocation{testIngressPortAllocation}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testIngressPortAllocation, ret[0]) + }) +} + +func TestPeerIngressPorts_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestPeerIngressPorts_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports/alloc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(testIngressPortAllocation) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").Get(context.Background(), "alloc-1") + require.NoError(t, err) + assert.Equal(t, testIngressPortAllocation, *ret) + }) +} + +func TestPeerIngressPorts_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports/alloc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").Get(context.Background(), "alloc-1") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestPeerIngressPorts_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testIngressPortAllocation) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").Create(context.Background(), api.PostApiPeersPeerIdIngressPortsJSONRequestBody{}) + require.NoError(t, err) + assert.Equal(t, testIngressPortAllocation, *ret) + }) +} + +func TestPeerIngressPorts_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").Create(context.Background(), api.PostApiPeersPeerIdIngressPortsJSONRequestBody{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestPeerIngressPorts_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports/alloc-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + retBytes, _ := json.Marshal(testIngressPortAllocation) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").Update(context.Background(), "alloc-1", api.PutApiPeersPeerIdIngressPortsAllocationIdJSONRequestBody{}) + require.NoError(t, err) + assert.Equal(t, testIngressPortAllocation, *ret) + }) +} + +func TestPeerIngressPorts_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports/alloc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.IngressPorts("Test").Update(context.Background(), "alloc-1", api.PutApiPeersPeerIdIngressPortsAllocationIdJSONRequestBody{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestPeerIngressPorts_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports/alloc-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.Peers.IngressPorts("Test").Delete(context.Background(), "alloc-1") + require.NoError(t, err) + }) +} + +func TestPeerIngressPorts_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/ingress/ports/alloc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.Peers.IngressPorts("Test").Delete(context.Background(), "alloc-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestPeerJobs_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/jobs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal([]api.JobResponse{testJobResponse}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.Jobs("Test").List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testJobResponse.Id, ret[0].Id) + assert.Equal(t, testJobResponse.Status, ret[0].Status) + }) +} + +func TestPeerJobs_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/jobs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.Jobs("Test").List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestPeerJobs_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/jobs/job-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(testJobResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.Jobs("Test").Get(context.Background(), "job-1") + require.NoError(t, err) + assert.Equal(t, testJobResponse.Id, ret.Id) + assert.Equal(t, testJobResponse.Status, ret.Status) + }) +} + +func TestPeerJobs_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/jobs/job-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.Jobs("Test").Get(context.Background(), "job-1") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestPeerJobs_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/jobs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testJobResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.Jobs("Test").Create(context.Background(), api.PostApiPeersPeerIdJobsJSONRequestBody{}) + require.NoError(t, err) + assert.Equal(t, testJobResponse.Id, ret.Id) + assert.Equal(t, testJobResponse.Status, ret.Status) + }) +} + +func TestPeerJobs_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/peers/Test/jobs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Peers.Jobs("Test").Create(context.Background(), api.PostApiPeersPeerIdJobsJSONRequestBody{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + func TestPeers_Integration(t *testing.T) { withBlackBoxServer(t, func(c *rest.Client) { peers, err := c.Peers.List(context.Background()) diff --git a/shared/management/client/rest/scim.go b/shared/management/client/rest/scim.go new file mode 100644 index 000000000..f9a33fee7 --- /dev/null +++ b/shared/management/client/rest/scim.go @@ -0,0 +1,119 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// SCIMAPI APIs for SCIM IDP integrations +type SCIMAPI struct { + c *Client +} + +// List retrieves all SCIM IDP integrations +// See more: https://docs.netbird.io/api/resources/scim#list-all-scim-integrations +func (a *SCIMAPI) List(ctx context.Context) ([]api.ScimIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/scim-idp", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.ScimIntegration](resp) + return ret, err +} + +// Get retrieves a specific SCIM IDP integration by ID +// See more: https://docs.netbird.io/api/resources/scim#retrieve-a-scim-integration +func (a *SCIMAPI) Get(ctx context.Context, integrationID string) (*api.ScimIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/scim-idp/"+integrationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.ScimIntegration](resp) + return &ret, err +} + +// Create creates a new SCIM IDP integration +// See more: https://docs.netbird.io/api/resources/scim#create-a-scim-integration +func (a *SCIMAPI) Create(ctx context.Context, request api.CreateScimIntegrationRequest) (*api.ScimIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/scim-idp", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.ScimIntegration](resp) + return &ret, err +} + +// Update updates an existing SCIM IDP integration +// See more: https://docs.netbird.io/api/resources/scim#update-a-scim-integration +func (a *SCIMAPI) Update(ctx context.Context, integrationID string, request api.UpdateScimIntegrationRequest) (*api.ScimIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/scim-idp/"+integrationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.ScimIntegration](resp) + return &ret, err +} + +// Delete deletes a SCIM IDP integration +// See more: https://docs.netbird.io/api/resources/scim#delete-a-scim-integration +func (a *SCIMAPI) Delete(ctx context.Context, integrationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/scim-idp/"+integrationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// RegenerateToken regenerates the SCIM API token for an integration +// See more: https://docs.netbird.io/api/resources/scim#regenerate-scim-token +func (a *SCIMAPI) RegenerateToken(ctx context.Context, integrationID string) (*api.ScimTokenResponse, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/scim-idp/"+integrationID+"/token", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.ScimTokenResponse](resp) + return &ret, err +} + +// GetLogs retrieves synchronization logs for an SCIM IDP integration +// See more: https://docs.netbird.io/api/resources/scim#get-scim-sync-logs +func (a *SCIMAPI) GetLogs(ctx context.Context, integrationID string) ([]api.IdpIntegrationSyncLog, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/scim-idp/"+integrationID+"/logs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdpIntegrationSyncLog](resp) + return ret, err +} diff --git a/shared/management/client/rest/scim_test.go b/shared/management/client/rest/scim_test.go new file mode 100644 index 000000000..08581b482 --- /dev/null +++ b/shared/management/client/rest/scim_test.go @@ -0,0 +1,262 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var ( + testScimIntegration = api.ScimIntegration{ + Id: 1, + AuthToken: "****", + Enabled: true, + GroupPrefixes: []string{"eng-"}, + UserGroupPrefixes: []string{"dev-"}, + Provider: "okta", + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + } + + testScimToken = api.ScimTokenResponse{ + AuthToken: "new-token-123", + } + + testSyncLog = api.IdpIntegrationSyncLog{ + Id: 1, + Level: "info", + Message: "Sync completed", + Timestamp: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + } +) + +func TestSCIM_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.ScimIntegration{testScimIntegration}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testScimIntegration, ret[0]) + }) +} + +func TestSCIM_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestSCIM_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testScimIntegration) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.Get(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testScimIntegration, *ret) + }) +} + +func TestSCIM_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.Get(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestSCIM_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateScimIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "okta", req.Provider) + assert.Equal(t, "scim-", req.Prefix) + retBytes, _ := json.Marshal(testScimIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.Create(context.Background(), api.CreateScimIntegrationRequest{ + Provider: "okta", + Prefix: "scim-", + GroupPrefixes: &[]string{"eng-"}, + }) + require.NoError(t, err) + assert.Equal(t, testScimIntegration, *ret) + }) +} + +func TestSCIM_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.Create(context.Background(), api.CreateScimIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestSCIM_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateScimIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, *req.Enabled) + retBytes, _ := json.Marshal(testScimIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.Update(context.Background(), "int-1", api.UpdateScimIntegrationRequest{ + Enabled: ptr(true), + }) + require.NoError(t, err) + assert.Equal(t, testScimIntegration, *ret) + }) +} + +func TestSCIM_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.Update(context.Background(), "int-1", api.UpdateScimIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestSCIM_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.SCIM.Delete(context.Background(), "int-1") + require.NoError(t, err) + }) +} + +func TestSCIM_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.SCIM.Delete(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestSCIM_RegenerateToken_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1/token", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testScimToken) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.RegenerateToken(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testScimToken, *ret) + }) +} + +func TestSCIM_RegenerateToken_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1/token", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.RegenerateToken(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestSCIM_GetLogs_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IdpIntegrationSyncLog{testSyncLog}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.GetLogs(context.Background(), "int-1") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testSyncLog, ret[0]) + }) +} + +func TestSCIM_GetLogs_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/scim-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.SCIM.GetLogs(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/client/rest/users.go b/shared/management/client/rest/users.go index b0ea46d55..98d84895b 100644 --- a/shared/management/client/rest/users.go +++ b/shared/management/client/rest/users.go @@ -105,3 +105,145 @@ func (a *UsersAPI) Current(ctx context.Context) (*api.User, error) { ret, err := parseResponse[api.User](resp) return &ret, err } + +// ListInvites list all user invites +// See more: https://docs.netbird.io/api/resources/users#list-all-user-invites +func (a *UsersAPI) ListInvites(ctx context.Context) ([]api.UserInvite, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/users/invites", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.UserInvite](resp) + return ret, err +} + +// CreateInvite create a user invite +// See more: https://docs.netbird.io/api/resources/users#create-a-user-invite +func (a *UsersAPI) CreateInvite(ctx context.Context, request api.PostApiUsersInvitesJSONRequestBody) (*api.UserInvite, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/users/invites", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.UserInvite](resp) + return &ret, err +} + +// DeleteInvite delete a user invite +// See more: https://docs.netbird.io/api/resources/users#delete-a-user-invite +func (a *UsersAPI) DeleteInvite(ctx context.Context, inviteID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/users/invites/"+inviteID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} + +// RegenerateInvite regenerate a user invite token +// See more: https://docs.netbird.io/api/resources/users#regenerate-a-user-invite +func (a *UsersAPI) RegenerateInvite(ctx context.Context, inviteID string, request api.PostApiUsersInvitesInviteIdRegenerateJSONRequestBody) (*api.UserInviteRegenerateResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/users/invites/"+inviteID+"/regenerate", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.UserInviteRegenerateResponse](resp) + return &ret, err +} + +// GetInviteByToken get a user invite by token +// See more: https://docs.netbird.io/api/resources/users#get-a-user-invite-by-token +func (a *UsersAPI) GetInviteByToken(ctx context.Context, token string) (*api.UserInviteInfo, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/users/invites/"+token, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.UserInviteInfo](resp) + return &ret, err +} + +// AcceptInvite accept a user invite +// See more: https://docs.netbird.io/api/resources/users#accept-a-user-invite +func (a *UsersAPI) AcceptInvite(ctx context.Context, token string, request api.PostApiUsersInvitesTokenAcceptJSONRequestBody) (*api.UserInviteAcceptResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/users/invites/"+token+"/accept", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.UserInviteAcceptResponse](resp) + return &ret, err +} + +// Approve approve a pending user +// See more: https://docs.netbird.io/api/resources/users#approve-a-user +func (a *UsersAPI) Approve(ctx context.Context, userID string) (*api.User, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/users/"+userID+"/approve", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.User](resp) + return &ret, err +} + +// ChangePassword change a user's password +// See more: https://docs.netbird.io/api/resources/users#change-user-password +func (a *UsersAPI) ChangePassword(ctx context.Context, userID string, request api.PutApiUsersUserIdPasswordJSONRequestBody) error { + requestBytes, err := json.Marshal(request) + if err != nil { + return err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/users/"+userID+"/password", bytes.NewReader(requestBytes), nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} + +// Reject reject a pending user +// See more: https://docs.netbird.io/api/resources/users#reject-a-user +func (a *UsersAPI) Reject(ctx context.Context, userID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/users/"+userID+"/reject", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} diff --git a/shared/management/client/rest/users_test.go b/shared/management/client/rest/users_test.go index 68815d4f9..66690833a 100644 --- a/shared/management/client/rest/users_test.go +++ b/shared/management/client/rest/users_test.go @@ -32,6 +32,23 @@ var ( Role: "user", Status: api.UserStatusActive, } + + testUserInvite = api.UserInvite{ + AutoGroups: []string{"group1"}, + Id: "invite-1", + } + + testUserInviteInfo = api.UserInviteInfo{ + Email: "invite@test.com", + } + + testUserInviteAcceptResponse = api.UserInviteAcceptResponse{ + Success: true, + } + + testUserInviteRegenerateResponse = api.UserInviteRegenerateResponse{ + InviteToken: "new-token", + } ) func TestUsers_List_200(t *testing.T) { @@ -220,6 +237,269 @@ func TestUsers_Current_Err(t *testing.T) { }) } +func TestUsers_ListInvites_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal([]api.UserInvite{testUserInvite}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.ListInvites(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testUserInvite, ret[0]) + }) +} + +func TestUsers_ListInvites_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.ListInvites(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestUsers_CreateInvite_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PostApiUsersInvitesJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "invite@test.com", req.Email) + retBytes, _ := json.Marshal(testUserInvite) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.CreateInvite(context.Background(), api.PostApiUsersInvitesJSONRequestBody{ + Email: "invite@test.com", + }) + require.NoError(t, err) + assert.Equal(t, testUserInvite, *ret) + }) +} + +func TestUsers_CreateInvite_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.CreateInvite(context.Background(), api.PostApiUsersInvitesJSONRequestBody{ + Email: "invite@test.com", + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestUsers_DeleteInvite_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/invite-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.Users.DeleteInvite(context.Background(), "invite-1") + require.NoError(t, err) + }) +} + +func TestUsers_DeleteInvite_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/invite-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.Users.DeleteInvite(context.Background(), "invite-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestUsers_RegenerateInvite_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/invite-1/regenerate", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testUserInviteRegenerateResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.RegenerateInvite(context.Background(), "invite-1", api.PostApiUsersInvitesInviteIdRegenerateJSONRequestBody{}) + require.NoError(t, err) + assert.Equal(t, testUserInviteRegenerateResponse, *ret) + }) +} + +func TestUsers_RegenerateInvite_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/invite-1/regenerate", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.RegenerateInvite(context.Background(), "invite-1", api.PostApiUsersInvitesInviteIdRegenerateJSONRequestBody{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestUsers_GetInviteByToken_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/some-token", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(testUserInviteInfo) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.GetInviteByToken(context.Background(), "some-token") + require.NoError(t, err) + assert.Equal(t, testUserInviteInfo, *ret) + }) +} + +func TestUsers_GetInviteByToken_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/some-token", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.GetInviteByToken(context.Background(), "some-token") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestUsers_AcceptInvite_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/some-token/accept", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testUserInviteAcceptResponse) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.AcceptInvite(context.Background(), "some-token", api.PostApiUsersInvitesTokenAcceptJSONRequestBody{}) + require.NoError(t, err) + assert.Equal(t, testUserInviteAcceptResponse, *ret) + }) +} + +func TestUsers_AcceptInvite_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/invites/some-token/accept", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.AcceptInvite(context.Background(), "some-token", api.PostApiUsersInvitesTokenAcceptJSONRequestBody{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestUsers_Approve_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/Test/approve", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testUser) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.Approve(context.Background(), "Test") + require.NoError(t, err) + assert.Equal(t, testUser, *ret) + }) +} + +func TestUsers_Approve_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/Test/approve", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.Users.Approve(context.Background(), "Test") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestUsers_ChangePassword_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/Test/password", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.PutApiUsersUserIdPasswordJSONRequestBody + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + w.WriteHeader(200) + }) + err := c.Users.ChangePassword(context.Background(), "Test", api.PutApiUsersUserIdPasswordJSONRequestBody{}) + require.NoError(t, err) + }) +} + +func TestUsers_ChangePassword_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/Test/password", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.Users.ChangePassword(context.Background(), "Test", api.PutApiUsersUserIdPasswordJSONRequestBody{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + }) +} + +func TestUsers_Reject_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/Test/reject", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.Users.Reject(context.Background(), "Test") + require.NoError(t, err) + }) +} + +func TestUsers_Reject_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/users/Test/reject", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.Users.Reject(context.Background(), "Test") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + }) +} + func TestUsers_Integration(t *testing.T) { withBlackBoxServer(t, func(c *rest.Client) { // rest client PAT is owner's diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index b9a8eae3a..5a504c471 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -42,6 +42,52 @@ tags: description: Interact with and view information about remote jobs. x-experimental: true + - name: Usage + description: Retrieve current usage statistics for the account. + x-cloud-only: true + - name: Subscription + description: Manage and view information about account subscriptions. + x-cloud-only: true + - name: Plans + description: Retrieve available plans and products. + x-cloud-only: true + - name: Checkout + description: Manage checkout sessions for plan subscriptions. + x-cloud-only: true + - name: AWS Marketplace + description: Manage AWS Marketplace subscriptions. + x-cloud-only: true + - name: Portal + description: Access customer portal for subscription management. + x-cloud-only: true + - name: Invoice + description: Manage and retrieve account invoices. + x-cloud-only: true + - name: MSP + description: MSP portal for Tenant management. + x-cloud-only: true + - name: IDP + description: Manage identity provider integrations for user and group sync. + x-cloud-only: true + - name: EDR Intune Integrations + description: Manage Microsoft Intune EDR integrations. + x-cloud-only: true + - name: EDR SentinelOne Integrations + description: Manage SentinelOne EDR integrations. + x-cloud-only: true + - name: EDR Falcon Integrations + description: Manage CrowdStrike Falcon EDR integrations. + x-cloud-only: true + - name: EDR Huntress Integrations + description: Manage Huntress EDR integrations. + x-cloud-only: true + - name: EDR Peers + description: Manage EDR compliance bypass for peers. + x-cloud-only: true + - name: Event Streaming Integrations + description: Manage event streaming integrations. + x-cloud-only: true + components: schemas: PasswordChangeRequest: @@ -61,8 +107,8 @@ components: WorkloadType: type: string description: | - Identifies the type of workload the job will execute. - Currently only `"bundle"` is supported. + Identifies the type of workload the job will execute. + Currently only `"bundle"` is supported. enum: - bundle example: "bundle" @@ -110,8 +156,8 @@ components: parameters: $ref: '#/components/schemas/BundleParameters' required: - - type - - parameters + - type + - parameters BundleWorkloadResponse: type: object properties: @@ -162,7 +208,7 @@ components: type: string status: type: string - enum: [pending, succeeded, failed] + enum: [ pending, succeeded, failed ] failed_reason: type: string nullable: true @@ -371,7 +417,7 @@ components: status: description: User's status type: string - enum: [ "active","invited","blocked" ] + enum: [ "active", "invited", "blocked" ] example: active last_login: description: Last time this user performed a login to the dashboard @@ -439,7 +485,7 @@ components: propertyNames: type: string description: The module name - example: {"networks": { "read": true, "create": false, "update": false, "delete": false}, "peers": { "read": false, "create": false, "update": false, "delete": false} } + example: { "networks": { "read": true, "create": false, "update": false, "delete": false }, "peers": { "read": false, "create": false, "update": false, "delete": false } } required: - modules - is_restricted @@ -1234,7 +1280,7 @@ components: issued: description: How the group was issued (api, integration, jwt) type: string - enum: ["api", "integration", "jwt"] + enum: [ "api", "integration", "jwt" ] example: api required: - id @@ -1295,7 +1341,7 @@ components: action: description: Policy rule accept or drops packets type: string - enum: ["accept","drop"] + enum: [ "accept", "drop" ] example: "accept" bidirectional: description: Define if the rule is applicable in both directions, sources, and destinations. @@ -1304,7 +1350,7 @@ components: protocol: description: Policy rule type of the traffic type: string - enum: ["all", "tcp", "udp", "icmp", "netbird-ssh"] + enum: [ "all", "tcp", "udp", "icmp", "netbird-ssh" ] example: "tcp" ports: description: Policy rule affected ports @@ -1615,7 +1661,7 @@ components: type: array items: type: string - example: ["192.168.1.0/24", "10.0.0.0/8", "2001:db8:1234:1a00::/56"] + example: [ "192.168.1.0/24", "10.0.0.0/8", "2001:db8:1234:1a00::/56" ] action: description: Action to take upon policy match type: string @@ -1786,11 +1832,11 @@ components: - description - network_id - enabled - # Only one property has to be set - #- peer - #- peer_groups - # Only one property has to be set - #- network + # Only one property has to be set + #- peer + #- peer_groups + # Only one property has to be set + #- network #- domains - metric - masquerade @@ -1829,7 +1875,7 @@ components: allOf: - $ref: '#/components/schemas/NetworkResourceType' - type: string - enum: ["peer"] + enum: [ "peer" ] example: peer NetworkRequest: type: object @@ -2198,52 +2244,7 @@ components: activity_code: description: The string code of the activity that occurred during the event type: string - enum: [ - "peer.user.add", "peer.setupkey.add", "user.join", "user.invite", "account.create", "account.delete", - "user.peer.delete", "rule.add", "rule.update", "rule.delete", - "policy.add", "policy.update", "policy.delete", - "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", "setupkey.delete", - "group.add", "group.update", "group.delete", - "peer.group.add", "peer.group.delete", - "user.group.add", "user.group.delete", "user.role.update", - "setupkey.group.add", "setupkey.group.delete", - "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", - "route.add", "route.delete", "route.update", - "peer.ssh.enable", "peer.ssh.disable", "peer.rename", - "peer.login.expiration.enable", "peer.login.expiration.disable", - "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", - "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.enable", "account.setting.peer.login.expiration.disable", - "personal.access.token.create", "personal.access.token.delete", - "service.user.create", "service.user.delete", - "user.block", "user.unblock", "user.delete", - "user.peer.login", "peer.login.expire", - "dashboard.login", - "integration.create", "integration.update", "integration.delete", - "account.setting.peer.approval.enable", "account.setting.peer.approval.disable", - "peer.approve", "peer.approval.revoke", - "transferred.owner.role", - "posture.check.create", "posture.check.update", "posture.check.delete", - "peer.inactivity.expiration.enable", "peer.inactivity.expiration.disable", - "account.peer.inactivity.expiration.enable", "account.peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.update", - "account.setting.group.propagation.enable", "account.setting.group.propagation.disable", - "account.setting.routing.peer.dns.resolution.enable", "account.setting.routing.peer.dns.resolution.disable", - "network.create", "network.update", "network.delete", - "network.resource.create", "network.resource.update", "network.resource.delete", - "network.router.create", "network.router.update", "network.router.delete", - "resource.group.add", "resource.group.delete", - "account.dns.domain.update", - "account.setting.lazy.connection.enable", "account.setting.lazy.connection.disable", - "account.network.range.update", - "peer.ip.update", - "user.approve", "user.reject", "user.create", - "account.settings.auto.version.update", - "identityprovider.create", "identityprovider.update", "identityprovider.delete", - "dns.zone.create", "dns.zone.update", "dns.zone.delete", - "dns.zone.record.create", "dns.zone.record.update", "dns.zone.record.delete", - "peer.job.create", - "user.password.change", - "user.invite.link.create", "user.invite.link.accept", "user.invite.link.regenerate", "user.invite.link.delete" - ] + enum: [ "peer.user.add", "peer.setupkey.add", "user.join", "user.invite", "account.create", "account.delete", "user.peer.delete", "rule.add", "rule.update", "rule.delete", "policy.add", "policy.update", "policy.delete", "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", "setupkey.delete", "group.add", "group.update", "group.delete", "peer.group.add", "peer.group.delete", "user.group.add", "user.group.delete", "user.role.update", "setupkey.group.add", "setupkey.group.delete", "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", "route.add", "route.delete", "route.update", "peer.ssh.enable", "peer.ssh.disable", "peer.rename", "peer.login.expiration.enable", "peer.login.expiration.disable", "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.enable", "account.setting.peer.login.expiration.disable", "personal.access.token.create", "personal.access.token.delete", "service.user.create", "service.user.delete", "user.block", "user.unblock", "user.delete", "user.peer.login", "peer.login.expire", "dashboard.login", "integration.create", "integration.update", "integration.delete", "account.setting.peer.approval.enable", "account.setting.peer.approval.disable", "peer.approve", "peer.approval.revoke", "transferred.owner.role", "posture.check.create", "posture.check.update", "posture.check.delete", "peer.inactivity.expiration.enable", "peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.enable", "account.peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.update", "account.setting.group.propagation.enable", "account.setting.group.propagation.disable", "account.setting.routing.peer.dns.resolution.enable", "account.setting.routing.peer.dns.resolution.disable", "network.create", "network.update", "network.delete", "network.resource.create", "network.resource.update", "network.resource.delete", "network.router.create", "network.router.update", "network.router.delete", "resource.group.add", "resource.group.delete", "account.dns.domain.update", "account.setting.lazy.connection.enable", "account.setting.lazy.connection.disable", "account.network.range.update", "peer.ip.update", "user.approve", "user.reject", "user.create", "account.settings.auto.version.update", "identityprovider.create", "identityprovider.update", "identityprovider.delete", "dns.zone.create", "dns.zone.update", "dns.zone.delete", "dns.zone.record.create", "dns.zone.record.update", "dns.zone.record.delete", "peer.job.create", "user.password.change", "user.invite.link.create", "user.invite.link.accept", "user.invite.link.regenerate", "user.invite.link.delete" ] example: route.add initiator_id: description: The ID of the initiator of the event. E.g., an ID of a user that triggered the event. @@ -2266,7 +2267,7 @@ components: type: object additionalProperties: type: string - example: { "name": "my route", "network_range": "10.64.0.0/24", "peer_id": "chacbco6lnnbn6cg5s91"} + example: { "name": "my route", "network_range": "10.64.0.0/24", "peer_id": "chacbco6lnnbn6cg5s91" } required: - id - timestamp @@ -2558,9 +2559,9 @@ components: description: "Email of the user who initiated the event (if any)." example: "alice@netbird.io" name: - type: string - description: "Name of the user who initiated the event (if any)." - example: "Alice Smith" + type: string + description: "Name of the user who initiated the event (if any)." + example: "Alice Smith" required: - id - email @@ -2836,6 +2837,980 @@ components: required: - management_current_version - management_update_available + UsageStats: + type: object + properties: + active_users: + type: integer + format: int64 + description: Number of active users. + example: 15 + total_users: + type: integer + format: int64 + description: Total number of users. + example: 20 + active_peers: + type: integer + format: int64 + description: Number of active peers. + example: 10 + total_peers: + type: integer + format: int64 + description: Total number of peers. + example: 25 + required: + - active_users + - total_users + - active_peers + - total_peers + Product: + type: object + properties: + name: + type: string + description: Name of the product. + example: "Basic Plan" + description: + type: string + description: Detailed description of the product. + example: "This is the basic plan with limited features." + features: + type: array + description: List of features provided by the product. + items: + type: string + example: [ "5 free users", "Basic support" ] + prices: + type: array + description: List of prices for the product in different currencies + items: + $ref: "#/components/schemas/Price" + free: + type: boolean + description: Indicates whether the product is free or not. + example: false + required: + - name + - description + - features + - prices + - free + Price: + type: object + properties: + price_id: + type: string + description: Unique identifier for the price. + example: "price_H2KmRb4u1tP0sR7s" + currency: + type: string + description: Currency code for this price. + example: "USD" + price: + type: integer + description: Price amount in minor units (e.g., cents). + example: 1000 + unit: + type: string + description: Unit of measurement for this price (e.g., per user). + example: "user" + required: + - price_id + - currency + - price + - unit + Subscription: + type: object + properties: + active: + type: boolean + description: Indicates whether the subscription is active or not. + example: true + plan_tier: + type: string + description: The tier of the plan for the subscription. + example: "basic" + price_id: + type: string + description: Unique identifier for the price of the subscription. + example: "price_1HhxOpBzq4JbCqRmJxkpzL2V" + remaining_trial: + type: integer + description: The remaining time for the trial period, in seconds. + example: 3600 + features: + type: array + description: List of features included in the subscription. + items: + type: string + example: [ "free", "idp-sync", "audit-logs" ] + currency: + type: string + description: Currency code of the subscription. + example: "USD" + price: + type: integer + description: Price amount in minor units (e.g., cents). + example: 1000 + provider: + type: string + description: The provider of the subscription. + example: [ "stripe", "aws" ] + updated_at: + type: string + format: date-time + description: The date and time when the subscription was last updated. + example: "2021-08-01T12:00:00Z" + required: + - active + - plan_tier + - price_id + - updated_at + - currency + - price + - provider + PortalResponse: + type: object + properties: + session_id: + type: string + description: The unique identifier for the customer portal session. + example: "cps_test_123456789" + url: + type: string + description: URL to redirect the user to the customer portal. + example: "https://billing.stripe.com/session/a1b2c3d4e5f6g7h8i9j0k" + required: + - session_id + - url + CheckoutResponse: + type: object + properties: + session_id: + type: string + description: The unique identifier for the checkout session. + example: "cs_test_a1b2c3d4e5f6g7h8i9j0" + url: + type: string + description: URL to redirect the user to the checkout session. + example: "https://checkout.stripe.com/pay/cs_test_a1b2c3d4e5f6g7h8i9j0" + required: + - session_id + - url + StripeWebhookEvent: + type: object + properties: + type: + type: string + description: The type of event received from Stripe. + example: "customer.subscription.updated" + data: + type: object + description: The data associated with the event from Stripe. + example: + object: + id: "sub_123456789" + object: "subscription" + status: "active" + items: + object: "list" + data: + - id: "si_123456789" + object: "subscription_item" + price: + id: "price_1HhxOpBzq4JbCqRmJxkpzL2V" + object: "price" + unit_amount: 2000 + currency: "usd" + billing_cycle_anchor: 1609459200 + InvoiceResponse: + type: object + properties: + id: + type: string + description: The Stripe invoice id + example: "in_1MtHbELkdIwHu7ixl4OzzPMv" + type: + type: string + description: The invoice type + enum: + - account + - tenants + period_start: + type: string + format: date-time + description: The start date of the invoice period. + example: "2021-08-01T12:00:00Z" + period_end: + type: string + format: date-time + description: The end date of the invoice period. + example: "2021-08-31T12:00:00Z" + required: + - id + - type + - period_start + - period_end + InvoicePDFResponse: + type: object + properties: + url: + type: string + description: URL to redirect the user to invoice. + example: "https://invoice.stripe.com/i/acct_1M2DaBKina4I2KUb/test_YWNjdF8xTTJEdVBLaW5hM0kyS1ViLF1SeFpQdEJZd3lUOGNEajNqeWdrdXY2RFM4aHcyCnpsLDEzMjg3GTgyNQ02000JoIHc1X?s=db" + required: + - url + CreateTenantRequest: + type: object + properties: + name: + type: string + description: The name for the MSP tenant + example: "My new tenant" + domain: + type: string + description: The name for the MSP tenant + example: "tenant.com" + groups: + description: MSP users Groups that can access the Tenant and Roles to assume + type: array + items: + $ref: "#/components/schemas/TenantGroupResponse" + required: + - name + - domain + - groups + UpdateTenantRequest: + type: object + properties: + name: + type: string + description: The name for the MSP tenant + example: "My new tenant" + groups: + description: MSP users Groups that can access the Tenant and Roles to assume + type: array + items: + $ref: "#/components/schemas/TenantGroupResponse" + required: + - name + - groups + GetTenantsResponse: + type: array + items: + $ref: "#/components/schemas/TenantResponse" + DNSChallengeResponse: + type: object + properties: + dns_challenge: + type: string + description: The DNS challenge to set in a TXT record + example: YXNkYSBkYXNhc2Rhc2RhIGFzZGFzZDJhc2QyNDUxNQ + required: + - dns_challenge + TenantGroupResponse: + type: object + properties: + id: + type: string + description: The Group ID + example: ch8i4ug6lnn4g9hqv7m0 + role: + type: string + description: The Role name + example: "admin" + required: + - id + - role + TenantResponse: + type: object + properties: + id: + type: string + description: The updated MSP tenant account ID + example: ch8i4ug6lnn4g9hqv7m0 + name: + type: string + description: The name for the MSP tenant + example: "My new tenant" + domain: + type: string + description: The tenant account domain + example: "tenant.com" + groups: + description: MSP users Groups that can access the Tenant and Roles to assume + type: array + items: + $ref: "#/components/schemas/TenantGroupResponse" + activated_at: + type: string + format: date-time + description: The date and time when the tenant was activated. + example: "2021-08-01T12:00:00Z" + dns_challenge: + type: string + description: The DNS challenge to set in a TXT record + example: YXNkYSBkYXNhc2Rhc2RhIGFzZGFzZDJhc2QyNDUxNQ + created_at: + type: string + format: date-time + description: The date and time when the tenant was created. + example: "2021-08-01T12:00:00Z" + updated_at: + type: string + format: date-time + description: The date and time when the tenant was last updated. + example: "2021-08-01T12:00:00Z" + invited_at: + type: string + format: date-time + description: The date and time when the existing tenant was invited. + example: "2021-08-01T12:00:00Z" + status: + type: string + description: The status of the tenant + enum: + - existing + - invited + - pending + - active + example: "active" + required: + - id + - name + - domain + - groups + - created_at + - updated_at + - status + - dns_challenge + CreateIntegrationRequest: + type: object + description: "Request payload for creating a new event streaming integration. Also used as the structure for the PUT request body, but not all fields are applicable for updates (see PUT operation description)." + required: + - platform + - config + - enabled + properties: + platform: + type: string + description: The event streaming platform to integrate with (e.g., "datadog", "s3", "firehose"). This field is used for creation. For updates (PUT), this field, if sent, is ignored by the backend. + enum: [ "datadog", "s3", "firehose", "generic_http" ] + example: "s3" + config: + type: object + additionalProperties: + type: string + description: Platform-specific configuration as key-value pairs. For creation, all necessary credentials and settings must be provided. For updates, provide the fields to change or the entire new configuration. + example: { "bucket_name": "my-event-logs", "region": "us-east-1", "access_key_id": "AKIA...", "secret_access_key": "YOUR_SECRET_KEY" } + enabled: + type: boolean + description: "Specifies whether the integration is enabled. During creation (POST), this value is sent by the client, but the provided backend manager function `CreateIntegration` does not appear to use it directly, so its effect on creation should be verified. During updates (PUT), this field is used to enable or disable the integration." + example: true + IntegrationResponse: + type: object + description: Represents an event streaming integration. + properties: + id: + type: integer + format: int64 + description: The unique numeric identifier for the integration. + example: 123 + minimum: 0 + account_id: + type: string + description: The identifier of the account this integration belongs to. + example: "acc_abcdef123456" + enabled: + type: boolean + description: Whether the integration is currently active. + example: true + platform: + type: string + description: The event streaming platform. + enum: [ "datadog", "s3", "firehose", "generic_http" ] + example: "datadog" + created_at: + type: string + format: date-time + description: Timestamp of when the integration was created. + example: "2023-05-15T10:30:00Z" + updated_at: + type: string + format: date-time + description: Timestamp of when the integration was last updated. + example: "2023-05-16T11:45:00Z" + config: + type: object + additionalProperties: + type: string + description: Configuration for the integration. Sensitive keys (like API keys, secret keys) are masked with '****' in responses, as indicated by the GetIntegration handler logic. + example: { "api_key": "****", "site": "datadoghq.com", "region": "us-east-1" } + EDRIntuneRequest: + type: object + description: "Request payload for creating or updating a EDR Intune integration." + required: + - client_id + - tenant_id + - secret + - groups + - last_synced_interval + properties: + client_id: + type: string + description: The Azure application client id + tenant_id: + type: string + description: The Azure tenant id + secret: + type: string + description: The Azure application client secret + groups: + type: array + description: The Groups this integrations applies to + items: + type: string + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. Minimum value is 24 hours. + minimum: 24 + enabled: + type: boolean + description: Indicates whether the integration is enabled + default: true + EDRIntuneResponse: + type: object + description: Represents a Intune EDR integration configuration + required: + - id + - account_id + - created_by + - last_synced_at + - created_at + - updated_at + - client_id + - tenant_id + - groups + - last_synced_interval + - enabled + properties: + id: + type: integer + format: int64 + description: The unique numeric identifier for the integration. + example: 123 + minimum: 0 + account_id: + type: string + description: The identifier of the account this integration belongs to. + example: "acc_abcdef123456" + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced. + example: "2023-05-15T10:30:00Z" + created_by: + type: string + description: The user id that created the integration + created_at: + type: string + format: date-time + description: Timestamp of when the integration was created. + example: "2023-05-15T10:30:00Z" + updated_at: + type: string + format: date-time + description: Timestamp of when the integration was last updated. + example: "2023-05-16T11:45:00Z" + client_id: + type: string + description: The Azure application client id + example: "acc_abcdef123456" + tenant_id: + type: string + description: The Azure tenant id + example: "acc_abcdef123456" + groups: + type: array + description: List of groups + items: + $ref: '#/components/schemas/Group' + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. + enabled: + type: boolean + description: Indicates whether the integration is enabled + EDRSentinelOneRequest: + type: object + description: Request payload for creating or updating a EDR SentinelOne integration + properties: + api_token: + type: string + description: SentinelOne API token + api_url: + type: string + description: The Base URL of SentinelOne API + groups: + type: array + description: The Groups this integrations applies to + items: + type: string + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. Minimum value is 24 hours. + minimum: 24 + enabled: + type: boolean + description: Indicates whether the integration is enabled + default: true + match_attributes: + $ref: '#/components/schemas/SentinelOneMatchAttributes' + required: + - api_token + - api_url + - groups + - last_synced_interval + - match_attributes + EDRSentinelOneResponse: + type: object + description: Represents a SentinelOne EDR integration configuration + required: + - id + - account_id + - created_by + - last_synced_at + - created_at + - updated_at + - api_url + - groups + - last_synced_interval + - match_attributes + - enabled + properties: + id: + type: integer + format: int64 + description: The unique numeric identifier for the integration. + example: 123 + account_id: + type: string + description: The identifier of the account this integration belongs to. + example: "ch8i4ug6lnn4g9hqv7l0" + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced. + example: "2023-05-15T10:30:00Z" + created_by: + type: string + description: The user id that created the integration + created_at: + type: string + format: date-time + description: Timestamp of when the integration was created. + example: "2023-05-15T10:30:00Z" + updated_at: + type: string + format: date-time + description: Timestamp of when the integration was last updated. + example: "2023-05-16T11:45:00Z" + api_url: + type: string + description: The Base URL of SentinelOne API + groups: + type: array + description: List of groups + items: + $ref: '#/components/schemas/Group' + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. + match_attributes: + $ref: '#/components/schemas/SentinelOneMatchAttributes' + enabled: + type: boolean + description: Indicates whether the integration is enabled + SentinelOneMatchAttributes: + type: object + description: Attribute conditions to match when approving agents + additionalProperties: false + properties: + active_threats: + description: The maximum allowed number of active threats on the agent + type: integer + example: 0 + encrypted_applications: + description: Whether disk encryption is enabled on the agent + type: boolean + firewall_enabled: + description: Whether the agent firewall is enabled + type: boolean + infected: + description: Whether the agent is currently flagged as infected + type: boolean + is_active: + description: Whether the agent has been recently active and reporting + type: boolean + is_up_to_date: + description: Whether the agent is running the latest available version + type: boolean + network_status: + description: The current network connectivity status of the device + type: string + enum: [ "connected", "disconnected", "quarantined" ] + operational_state: + description: The current operational state of the agent + type: string + + EDRFalconRequest: + type: object + description: Request payload for creating or updating a EDR Falcon integration + properties: + client_id: + type: string + description: CrowdStrike API client ID + secret: + type: string + description: CrowdStrike API client secret + cloud_id: + type: string + description: CrowdStrike cloud identifier (e.g., "us-1", "us-2", "eu-1") + groups: + type: array + description: The Groups this integration applies to + items: + type: string + zta_score_threshold: + type: integer + description: The minimum Zero Trust Assessment score required for agent approval (0-100) + minimum: 0 + maximum: 100 + example: 75 + enabled: + type: boolean + description: Indicates whether the integration is enabled + default: true + required: + - client_id + - secret + - cloud_id + - groups + - zta_score_threshold + EDRFalconResponse: + type: object + description: Represents a Falcon EDR integration + required: + - id + - account_id + - last_synced_at + - created_by + - created_at + - updated_at + - cloud_id + - groups + - zta_score_threshold + - enabled + properties: + id: + type: integer + format: int64 + description: The unique numeric identifier for the integration. + example: 123 + account_id: + type: string + description: The identifier of the account this integration belongs to. + example: "ch8i4ug6lnn4g9hqv7l0" + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced. + example: "2023-05-15T10:30:00Z" + created_by: + type: string + description: The user id that created the integration + created_at: + type: string + format: date-time + description: Timestamp of when the integration was created. + example: "2023-05-15T10:30:00Z" + updated_at: + type: string + format: date-time + description: Timestamp of when the integration was last updated. + example: "2023-05-16T11:45:00Z" + cloud_id: + type: string + description: CrowdStrike cloud identifier + groups: + type: array + description: List of groups + items: + $ref: '#/components/schemas/Group' + zta_score_threshold: + type: integer + description: The minimum Zero Trust Assessment score required for agent approval (0-100) + enabled: + type: boolean + description: Indicates whether the integration is enabled + + EDRHuntressRequest: + type: object + description: Request payload for creating or updating a EDR Huntress integration + properties: + api_key: + type: string + description: Huntress API key + api_secret: + type: string + description: Huntress API secret + groups: + type: array + description: The Groups this integrations applies to + items: + type: string + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. Minimum value is 24 hours + minimum: 24 + enabled: + type: boolean + description: Indicates whether the integration is enabled + default: true + match_attributes: + $ref: '#/components/schemas/HuntressMatchAttributes' + required: + - api_key + - api_secret + - groups + - last_synced_interval + - match_attributes + EDRHuntressResponse: + type: object + description: Represents a Huntress EDR integration configuration + required: + - id + - account_id + - created_by + - last_synced_at + - created_at + - updated_at + - groups + - last_synced_interval + - match_attributes + - enabled + properties: + id: + type: integer + format: int64 + description: The unique numeric identifier for the integration. + example: 123 + account_id: + type: string + description: The identifier of the account this integration belongs to. + example: "ch8i4ug6lnn4g9hqv7l0" + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced. + example: "2023-05-15T10:30:00Z" + created_by: + type: string + description: The user id that created the integration + created_at: + type: string + format: date-time + description: Timestamp of when the integration was created. + example: "2023-05-15T10:30:00Z" + updated_at: + type: string + format: date-time + description: Timestamp of when the integration was last updated. + example: "2023-05-16T11:45:00Z" + groups: + type: array + description: List of groups + items: + $ref: '#/components/schemas/Group' + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. + enabled: + type: boolean + description: Indicates whether the integration is enabled + default: true + match_attributes: + $ref: '#/components/schemas/HuntressMatchAttributes' + + HuntressMatchAttributes: + type: object + description: Attribute conditions to match when approving agents + additionalProperties: false + properties: + defender_policy_status: + type: string + description: Policy status of Defender AV for Managed Antivirus. + example: "Compliant" + defender_status: + type: string + description: Status of Defender AV Managed Antivirus. + example: "Healthy" + defender_substatus: + type: string + description: Sub-status of Defender AV Managed Antivirus. + example: "Up to date" + firewall_status: + type: string + description: Status of agent firewall. Can be one of Disabled, Enabled, Pending Isolation, Isolated, Pending Release. + example: "Enabled" + + CreateScimIntegrationRequest: + type: object + description: Request payload for creating an SCIM IDP integration + required: + - prefix + - provider + properties: + prefix: + type: string + description: The connection prefix used for the SCIM provider + provider: + type: string + description: Name of the SCIM identity provider + group_prefixes: + type: array + description: List of start_with string patterns for groups to sync + items: + type: string + example: [ "Engineering", "Sales" ] + user_group_prefixes: + type: array + description: List of start_with string patterns for groups which users to sync + items: + type: string + example: [ "Users" ] + UpdateScimIntegrationRequest: + type: object + description: Request payload for updating an SCIM IDP integration + properties: + enabled: + type: boolean + description: Indicates whether the integration is enabled + example: true + group_prefixes: + type: array + description: List of start_with string patterns for groups to sync + items: + type: string + example: [ "Engineering", "Sales" ] + user_group_prefixes: + type: array + description: List of start_with string patterns for groups which users to sync + items: + type: string + example: [ "Users" ] + ScimIntegration: + type: object + description: Represents a SCIM IDP integration + required: + - id + - enabled + - provider + - group_prefixes + - user_group_prefixes + - auth_token + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 123 + enabled: + type: boolean + description: Indicates whether the integration is enabled + example: true + provider: + type: string + description: Name of the SCIM identity provider + group_prefixes: + type: array + description: List of start_with string patterns for groups to sync + items: + type: string + example: [ "Engineering", "Sales" ] + user_group_prefixes: + type: array + description: List of start_with string patterns for groups which users to sync + items: + type: string + example: [ "Users" ] + auth_token: + type: string + description: SCIM API token (full on creation, masked otherwise) + example: "nbs_abc***********************************" + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced + example: "2023-05-15T10:30:00Z" + IdpIntegrationSyncLog: + type: object + description: Represents a synchronization log entry for an integration + required: + - id + - level + - timestamp + - message + properties: + id: + type: integer + format: int64 + description: The unique identifier for the sync log + example: 123 + level: + type: string + description: The log level + example: "info" + timestamp: + type: string + format: date-time + description: Timestamp of when the log was created + example: "2023-05-15T10:30:00Z" + message: + type: string + description: Log message + example: "Successfully synchronized users and groups" + ScimTokenResponse: + type: object + description: Response containing the regenerated SCIM token + required: + - auth_token + properties: + auth_token: + type: string + description: The newly generated SCIM API token + example: "nbs_F3f0d..." + BypassResponse: + type: object + description: Response for bypassed peer operations. + required: + - peer_id + properties: + peer_id: + type: string + description: The ID of the bypassed peer. + example: "chacbco6lnnbn6cg5s91" + ErrorResponse: + type: object + description: "Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided." + properties: + message: + type: string + description: A human-readable error message. + example: "couldn't parse JSON request" responses: not_found: description: Resource not found @@ -2894,8 +3869,8 @@ paths: description: Returns version information for NetBird components including the current management server version and latest available versions from GitHub. tags: [ Instance ] security: - - BearerAuth: [] - - TokenAuth: [] + - BearerAuth: [ ] + - TokenAuth: [ ] responses: '200': description: Version information @@ -2942,8 +3917,8 @@ paths: description: Retrieve all jobs for a given peer tags: [ Jobs ] security: - - BearerAuth: [] - - TokenAuth: [] + - BearerAuth: [ ] + - TokenAuth: [ ] parameters: - in: path name: peerId @@ -2973,8 +3948,8 @@ paths: description: Create a new job for a given peer tags: [ Jobs ] security: - - BearerAuth: [] - - TokenAuth: [] + - BearerAuth: [ ] + - TokenAuth: [ ] parameters: - in: path name: peerId @@ -3010,8 +3985,8 @@ paths: description: Retrieve details of a specific job tags: [ Jobs ] security: - - BearerAuth: [] - - TokenAuth: [] + - BearerAuth: [ ] + - TokenAuth: [ ] parameters: - in: path name: peerId @@ -3401,7 +4376,7 @@ paths: responses: '200': description: Invite status code - content: {} + content: { } '400': "$ref": "#/components/responses/bad_request" '401': @@ -3458,7 +4433,7 @@ paths: responses: '200': description: User rejected successfully - content: {} + content: { } '400': "$ref": "#/components/responses/bad_request" '401': @@ -3492,7 +4467,7 @@ paths: responses: '200': description: Password changed successfully - content: {} + content: { } '400': "$ref": "#/components/responses/bad_request" '401': @@ -3670,7 +4645,7 @@ paths: summary: Get invite information description: Retrieves public information about an invite. This endpoint is unauthenticated and protected by the token itself. tags: [ Users ] - security: [] + security: [ ] parameters: - in: path name: token @@ -3697,7 +4672,7 @@ paths: summary: Accept an invite description: Accepts an invite and creates the user with the provided password. This endpoint is unauthenticated and protected by the token itself. tags: [ Users ] - security: [] + security: [ ] parameters: - in: path name: token @@ -5971,21 +6946,21 @@ paths: required: false schema: type: string - enum: [TYPE_UNKNOWN, TYPE_START, TYPE_END, TYPE_DROP] + enum: [ TYPE_UNKNOWN, TYPE_START, TYPE_END, TYPE_DROP ] - name: connection_type in: query description: Filter by connection type required: false schema: type: string - enum: [P2P, ROUTED] + enum: [ P2P, ROUTED ] - name: direction in: query description: Filter by direction required: false schema: type: string - enum: [INGRESS, EGRESS, DIRECTION_UNKNOWN] + enum: [ INGRESS, EGRESS, DIRECTION_UNKNOWN ] - name: search in: query description: Case-insensitive partial match on user email, source/destination names, and source/destination addresses @@ -6356,3 +7331,1735 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/integrations/billing/usage: + get: + summary: Get current usage + tags: + - Usage + responses: + "200": + description: Current usage data + content: + application/json: + schema: + $ref: "#/components/schemas/UsageStats" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/subscription: + get: + summary: Get current subscription + tags: + - Subscription + responses: + "200": + description: Subscription details + content: + application/json: + schema: + $ref: "#/components/schemas/Subscription" + "401": + $ref: "#/components/responses/requires_authentication" + "404": + description: No subscription found + "500": + $ref: "#/components/responses/internal_error" + put: + summary: Change subscription + tags: + - Subscription + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + priceID: + type: string + description: The Price ID to change the subscription to. + example: "price_1HhxOpBzq4JbCqRmJxkpzL2V" + plan_tier: + type: string + description: The plan tier to change the subscription to. + example: business + responses: + "200": + description: Subscription successfully changed + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/plans: + get: + summary: Get available plans + tags: + - Plans + responses: + "200": + description: List of available plans + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Product" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/checkout: + post: + summary: Create checkout session + tags: + - Checkout + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + baseURL: + type: string + description: The base URL for the redirect after checkout. + example: "https://app.netbird.io/plans/success" + priceID: + type: string + description: The Price ID for checkout. + example: "price_1HhxOpBzq4JbCqRmJxkpzL2V" + enableTrial: + type: boolean + description: Enables a 14-day trial for the account. + required: + - baseURL + - priceID + responses: + "200": + description: Checkout session URL + content: + application/json: + schema: + $ref: "#/components/schemas/CheckoutResponse" + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/portal: + get: + summary: Get customer portal URL + tags: + - Portal + parameters: + - in: query + name: baseURL + schema: + type: string + required: true + description: The base URL for the redirect after accessing the portal. + example: "https://app.netbird.io/plans" + responses: + "200": + description: Customer portal URL + content: + application/json: + schema: + $ref: "#/components/schemas/PortalResponse" + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/invoices: + get: + summary: Get account's paid invoices + tags: + - Invoice + responses: + "200": + description: The account's paid invoices + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/InvoiceResponse" + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/invoices/{id}/pdf: + get: + summary: Get account invoice URL to Stripe. + tags: + - Invoice + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of the invoice + responses: + "200": + description: The invoice URL to Stripe + content: + application/json: + schema: + $ref: "#/components/schemas/InvoicePDFResponse" + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/invoices/{id}/csv: + get: + summary: Get account invoice CSV. + tags: + - Invoice + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of the invoice + responses: + "200": + description: The invoice CSV + headers: + Content-Disposition: + schema: + type: string + example: attachment; filename=in_1MtHbELkdIwHu7ixl4OzzPMv.csv + content: + text/csv: + schema: + type: string + example: | + description,qty,unit_price,amount + line item 2, 5, 1.00, 5.00 + line item 1, 10, 0.50, 5.00 + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/aws/marketplace/activate: + post: + summary: Activate AWS Marketplace subscription. + tags: + - AWS Marketplace + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + plan_tier: + type: string + description: The plan tier to activate the subscription for. + example: business + required: + - plan_tier + responses: + "200": + description: AWS subscription successfully activated + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/billing/aws/marketplace/enrich: + post: + summary: Enrich AWS Marketplace subscription with Account ID. + tags: + - AWS Marketplace + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + aws_user_id: + type: string + description: The AWS user ID. + example: eRF345hgdgFyu + required: + - aws_user_id + responses: + "200": + description: AWS subscription successfully enriched with Account ID. + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/msp/tenants: + get: + summary: Get MSP tenants + tags: + - MSP + responses: + "200": + description: Get MSP tenants response + content: + application/json: + schema: + $ref: "#/components/schemas/GetTenantsResponse" + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + post: + summary: Create MSP tenant + tags: + - MSP + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTenantRequest" + responses: + "200": + description: Create MSP tenant Response + content: + application/json: + schema: + $ref: "#/components/schemas/TenantResponse" + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/msp/tenants/{id}: + put: + summary: Update MSP tenant + tags: + - MSP + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of a tenant account + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateTenantRequest" + responses: + "200": + description: Update MSP tenant Response + content: + application/json: + schema: + $ref: "#/components/schemas/TenantResponse" + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/msp/tenants/{id}/unlink: + post: + summary: Unlink a tenant + tags: + - MSP + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of a tenant account + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + owner: + type: string + description: The new owners user ID. + example: "google-oauth2|123456789012345678901" + required: + - owner + responses: + "200": + description: Successfully unlinked the tenant + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "404": + description: The tenant was not found + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/msp/tenants/{id}/dns: + post: + summary: Verify a tenant domain DNS challenge + tags: + - MSP + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of a tenant account + responses: + "200": + description: Successfully verified the DNS challenge + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "404": + description: The tenant was not found + "500": + $ref: "#/components/responses/internal_error" + "501": + description: DNS Challenge Failed Response + content: + application/json: + schema: + $ref: "#/components/schemas/DNSChallengeResponse" + /api/integrations/msp/tenants/{id}/subscription: + post: + summary: Create subscription for Tenant + tags: + - MSP + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of a tenant account + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + priceID: + type: string + description: The Price ID to change the subscription to. + example: "price_1HhxOpBzq4JbCqRmJxkpzL2V" + required: + - priceID + responses: + "200": + description: Successfully created subscription for Tenant + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "404": + description: The tenant was not found + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/msp/tenants/{id}/invite: + post: + summary: Invite existing account as a Tenant to the MSP account + tags: + - MSP + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of an existing tenant account + responses: + "200": + description: Successfully invited existing Tenant to the MSP account + content: + application/json: + schema: + $ref: "#/components/schemas/TenantResponse" + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "404": + description: The tenant was not found + "500": + $ref: "#/components/responses/internal_error" + put: + summary: Response by the invited Tenant account owner + tags: + - MSP + parameters: + - in: path + name: id + required: true + schema: + type: string + description: The unique identifier of an existing tenant account + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + value: + type: string + description: Accept or decline the invitation. + enum: + - accept + - decline + required: + - value + responses: + "200": + description: Successful response + "400": + $ref: "#/components/responses/bad_request" + "403": + $ref: "#/components/responses/requires_authentication" + "404": + description: The tenant was not found + "500": + $ref: "#/components/responses/internal_error" + /api/integrations/edr/intune: + post: + tags: + - EDR Intune Integrations + summary: Create EDR Intune Integration + description: | + Creates a new EDR Intune integration for the authenticated account. + operationId: createEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRIntuneRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRIntuneResponse' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - EDR Intune Integrations + summary: Get EDR Intune Integration + description: Retrieves a specific EDR Intune integration by its ID. + operationId: getEDRIntegration + responses: + '200': + description: Successfully retrieved the integration details. Config keys are masked. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRIntuneResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - EDR Intune Integrations + summary: Update EDR Intune Integration + description: | + Updates an existing EDR Intune Integration. The request body structure is `EDRIntuneRequest`. + operationId: updateEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRIntuneRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRIntuneResponse' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - EDR Intune Integrations + summary: Delete EDR Intune Integration + description: Deletes an EDR Intune Integration by its ID. + operationId: deleteIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/edr/sentinelone: + post: + tags: + - EDR SentinelOne Integrations + summary: Create EDR SentinelOne Integration + description: Creates a new EDR SentinelOne integration + operationId: createSentinelOneEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRSentinelOneRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRSentinelOneResponse' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - EDR SentinelOne Integrations + summary: Get EDR SentinelOne Integration + description: Retrieves a specific EDR SentinelOne integration by its ID. + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRSentinelOneResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - EDR SentinelOne Integrations + summary: Update EDR SentinelOne Integration + description: Updates an existing EDR SentinelOne Integration. + operationId: updateSentinelOneEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRSentinelOneRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRSentinelOneResponse' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - EDR SentinelOne Integrations + summary: Delete EDR SentinelOne Integration + description: Deletes an EDR SentinelOne Integration by its ID. + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /api/integrations/edr/falcon: + post: + tags: + - EDR Falcon Integrations + summary: Create EDR Falcon Integration + description: Creates a new EDR Falcon integration + operationId: createFalconEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFalconRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFalconResponse' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - EDR Falcon Integrations + summary: Get EDR Falcon Integration + description: Retrieves a specific EDR Falcon integration by its ID. + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFalconResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - EDR Falcon Integrations + summary: Update EDR Falcon Integration + description: Updates an existing EDR Falcon Integration. + operationId: updateFalconEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFalconRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFalconResponse' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - EDR Falcon Integrations + summary: Delete EDR Falcon Integration + description: Deletes an existing EDR Falcon Integration by its ID. + responses: + '202': + description: Integration deleted successfully. Typically returns no content. + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /api/integrations/scim-idp: + post: + tags: + - IDP + summary: Create SCIM IDP Integration + description: Creates a new SCIM integration + operationId: createSCIMIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateScimIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/ScimIntegration' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - IDP + summary: Get All SCIM IDP Integrations + description: Retrieves all SCIM IDP integrations for the authenticated account + operationId: getAllSCIMIntegrations + responses: + '200': + description: A list of SCIM IDP integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ScimIntegration' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/scim-idp/{id}: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the SCIM IDP integration. + schema: + type: string + example: "ch8i4ug6lnn4g9hqv7m0" + get: + tags: + - IDP + summary: Get SCIM IDP Integration + description: Retrieves an SCIM IDP integration by ID. + operationId: getSCIMIntegration + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/ScimIntegration' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - IDP + summary: Update SCIM IDP Integration + description: Updates an existing SCIM IDP Integration. + operationId: updateSCIMIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateScimIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/ScimIntegration' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - IDP + summary: Delete SCIM IDP Integration + description: Deletes an SCIM IDP integration by ID. + operationId: deleteSCIMIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/scim-idp/{id}/token: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the SCIM IDP integration. + schema: + type: string + example: "ch8i4ug6lnn4g9hqv7m0" + post: + tags: + - IDP + summary: Regenerate SCIM Token + description: Regenerates the SCIM API token for an SCIM IDP integration. + operationId: regenerateSCIMToken + responses: + '200': + description: Token regenerated successfully. Returns the new token. + content: + application/json: + schema: + $ref: '#/components/schemas/ScimTokenResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/scim-idp/{id}/logs: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the SCIM IDP integration. + schema: + type: string + example: "ch8i4ug6lnn4g9hqv7m0" + get: + tags: + - IDP + summary: Get SCIM Integration Sync Logs + description: Retrieves synchronization logs for a SCIM IDP integration. + operationId: getSCIMIntegrationLogs + responses: + '200': + description: Successfully retrieved the integration sync logs. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdpIntegrationSyncLog' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /api/integrations/edr/huntress: + post: + tags: + - EDR Huntress Integrations + summary: Create EDR Huntress Integration + description: Creates a new EDR Huntress integration + operationId: createHuntressEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRHuntressRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRHuntressResponse' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - EDR Huntress Integrations + summary: Get EDR Huntress Integration + description: Retrieves a specific EDR Huntress integration by its ID. + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRHuntressResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - EDR Huntress Integrations + summary: Update EDR Huntress Integration + description: Updates an existing EDR Huntress Integration. + operationId: updateHuntressEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRHuntressRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRHuntressResponse' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - EDR Huntress Integrations + summary: Delete EDR Huntress Integration + description: Deletes an EDR Huntress Integration by its ID. + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /api/peers/{peer-id}/edr/bypass: + parameters: + - name: peer-id + in: path + required: true + schema: + type: string + description: The unique identifier of the peer + post: + tags: + - EDR Peers + summary: Bypass compliance for a non-compliant peer + description: | + Allows an admin to bypass EDR compliance checks for a specific peer. + The peer will remain bypassed until the admin revokes it OR the device becomes + naturally compliant in the EDR system. + operationId: bypassCompliance + responses: + '200': + description: Peer compliance bypassed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/BypassResponse' + '400': + description: Bad Request (peer not in non-compliant state) + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - EDR Peers + summary: Revoke compliance bypass for a peer + description: Removes the compliance bypass, subjecting the peer to normal EDR validation. + operationId: revokeBypass + responses: + '200': + description: Compliance bypass revoked successfully + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /api/peers/edr/bypassed: + get: + tags: + - EDR Peers + summary: List all bypassed peers + description: Returns all peers that have compliance bypassed by an admin. + operationId: listBypassedPeers + responses: + '200': + description: List of bypassed peers + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/BypassResponse' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /api/event-streaming: + post: + tags: + - Event Streaming Integrations + summary: Create Event Streaming Integration + description: | + Creates a new event streaming integration for the authenticated account. + The request body should conform to `CreateIntegrationRequest`. + Note: Based on the provided Go code, the `enabled` field from the request is part of the `CreateIntegrationRequest` struct, + but the backend `manager.CreateIntegration` function signature shown does not directly use this `enabled` field. + The actual behavior for `enabled` during creation should be confirmed (e.g., it might have a server-side default or be handled by other logic). + operationId: createIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/IntegrationResponse' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - Event Streaming Integrations + summary: List Event Streaming Integrations + description: Retrieves all event streaming integrations for the authenticated account. + operationId: getAllIntegrations + responses: + '200': + description: A list of event streaming integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IntegrationResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/event-streaming/{id}: + parameters: + - name: id + in: path + required: true + description: The unique numeric identifier of the event streaming integration. + schema: + type: integer + example: 123 + get: + tags: + - Event Streaming Integrations + summary: Get Event Streaming Integration + description: Retrieves a specific event streaming integration by its ID. + operationId: getIntegration + responses: + '200': + description: Successfully retrieved the integration details. Config keys are masked. + content: + application/json: + schema: + $ref: '#/components/schemas/IntegrationResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - Event Streaming Integrations + summary: Update Event Streaming Integration + description: | + Updates an existing event streaming integration. The request body structure is `CreateIntegrationRequest`. + However, for updates: + - The `platform` field, if provided in the body, is ignored by the backend manager function, as the platform of an existing integration is typically immutable. + - The `enabled` and `config` fields from the request body are used to update the integration. + operationId: updateIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/IntegrationResponse' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - Event Streaming Integrations + summary: Delete Event Streaming Integration + description: Deletes an event streaming integration by its ID. + operationId: deleteIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index fd7c61917..3f16af46b 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -16,6 +16,14 @@ const ( TokenAuthScopes = "TokenAuth.Scopes" ) +// Defines values for CreateIntegrationRequestPlatform. +const ( + CreateIntegrationRequestPlatformDatadog CreateIntegrationRequestPlatform = "datadog" + CreateIntegrationRequestPlatformFirehose CreateIntegrationRequestPlatform = "firehose" + CreateIntegrationRequestPlatformGenericHttp CreateIntegrationRequestPlatform = "generic_http" + CreateIntegrationRequestPlatformS3 CreateIntegrationRequestPlatform = "s3" +) + // Defines values for DNSRecordType. const ( DNSRecordTypeA DNSRecordType = "A" @@ -188,6 +196,20 @@ const ( IngressPortAllocationRequestPortRangeProtocolUdp IngressPortAllocationRequestPortRangeProtocol = "udp" ) +// Defines values for IntegrationResponsePlatform. +const ( + IntegrationResponsePlatformDatadog IntegrationResponsePlatform = "datadog" + IntegrationResponsePlatformFirehose IntegrationResponsePlatform = "firehose" + IntegrationResponsePlatformGenericHttp IntegrationResponsePlatform = "generic_http" + IntegrationResponsePlatformS3 IntegrationResponsePlatform = "s3" +) + +// Defines values for InvoiceResponseType. +const ( + InvoiceResponseTypeAccount InvoiceResponseType = "account" + InvoiceResponseTypeTenants InvoiceResponseType = "tenants" +) + // Defines values for JobResponseStatus. const ( JobResponseStatusFailed JobResponseStatus = "failed" @@ -266,6 +288,21 @@ const ( ResourceTypeSubnet ResourceType = "subnet" ) +// Defines values for SentinelOneMatchAttributesNetworkStatus. +const ( + SentinelOneMatchAttributesNetworkStatusConnected SentinelOneMatchAttributesNetworkStatus = "connected" + SentinelOneMatchAttributesNetworkStatusDisconnected SentinelOneMatchAttributesNetworkStatus = "disconnected" + SentinelOneMatchAttributesNetworkStatusQuarantined SentinelOneMatchAttributesNetworkStatus = "quarantined" +) + +// Defines values for TenantResponseStatus. +const ( + TenantResponseStatusActive TenantResponseStatus = "active" + TenantResponseStatusExisting TenantResponseStatus = "existing" + TenantResponseStatusInvited TenantResponseStatus = "invited" + TenantResponseStatusPending TenantResponseStatus = "pending" +) + // Defines values for UserStatus. const ( UserStatusActive UserStatus = "active" @@ -299,6 +336,12 @@ const ( GetApiEventsNetworkTrafficParamsDirectionINGRESS GetApiEventsNetworkTrafficParamsDirection = "INGRESS" ) +// Defines values for PutApiIntegrationsMspTenantsIdInviteJSONBodyValue. +const ( + PutApiIntegrationsMspTenantsIdInviteJSONBodyValueAccept PutApiIntegrationsMspTenantsIdInviteJSONBodyValue = "accept" + PutApiIntegrationsMspTenantsIdInviteJSONBodyValueDecline PutApiIntegrationsMspTenantsIdInviteJSONBodyValue = "decline" +) + // AccessiblePeer defines model for AccessiblePeer. type AccessiblePeer struct { // CityName Commonly used English name of the city @@ -490,6 +533,21 @@ type BundleWorkloadResponse struct { Type WorkloadType `json:"type"` } +// BypassResponse Response for bypassed peer operations. +type BypassResponse struct { + // PeerId The ID of the bypassed peer. + PeerId string `json:"peer_id"` +} + +// CheckoutResponse defines model for CheckoutResponse. +type CheckoutResponse struct { + // SessionId The unique identifier for the checkout session. + SessionId string `json:"session_id"` + + // Url URL to redirect the user to the checkout session. + Url string `json:"url"` +} + // Checks List of objects that perform the actual checks type Checks struct { // GeoLocationCheck Posture check for geo location @@ -532,6 +590,36 @@ type Country struct { // CountryCode 2-letter ISO 3166-1 alpha-2 code that represents the country type CountryCode = string +// CreateIntegrationRequest Request payload for creating a new event streaming integration. Also used as the structure for the PUT request body, but not all fields are applicable for updates (see PUT operation description). +type CreateIntegrationRequest struct { + // Config Platform-specific configuration as key-value pairs. For creation, all necessary credentials and settings must be provided. For updates, provide the fields to change or the entire new configuration. + Config map[string]string `json:"config"` + + // Enabled Specifies whether the integration is enabled. During creation (POST), this value is sent by the client, but the provided backend manager function `CreateIntegration` does not appear to use it directly, so its effect on creation should be verified. During updates (PUT), this field is used to enable or disable the integration. + Enabled bool `json:"enabled"` + + // Platform The event streaming platform to integrate with (e.g., "datadog", "s3", "firehose"). This field is used for creation. For updates (PUT), this field, if sent, is ignored by the backend. + Platform CreateIntegrationRequestPlatform `json:"platform"` +} + +// CreateIntegrationRequestPlatform The event streaming platform to integrate with (e.g., "datadog", "s3", "firehose"). This field is used for creation. For updates (PUT), this field, if sent, is ignored by the backend. +type CreateIntegrationRequestPlatform string + +// CreateScimIntegrationRequest Request payload for creating an SCIM IDP integration +type CreateScimIntegrationRequest struct { + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // Prefix The connection prefix used for the SCIM provider + Prefix string `json:"prefix"` + + // Provider Name of the SCIM identity provider + Provider string `json:"provider"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + // CreateSetupKeyRequest defines model for CreateSetupKeyRequest. type CreateSetupKeyRequest struct { // AllowExtraDnsLabels Allow extra DNS labels to be added to the peer @@ -556,6 +644,24 @@ type CreateSetupKeyRequest struct { UsageLimit int `json:"usage_limit"` } +// CreateTenantRequest defines model for CreateTenantRequest. +type CreateTenantRequest struct { + // Domain The name for the MSP tenant + Domain string `json:"domain"` + + // Groups MSP users Groups that can access the Tenant and Roles to assume + Groups []TenantGroupResponse `json:"groups"` + + // Name The name for the MSP tenant + Name string `json:"name"` +} + +// DNSChallengeResponse defines model for DNSChallengeResponse. +type DNSChallengeResponse struct { + // DnsChallenge The DNS challenge to set in a TXT record + DnsChallenge string `json:"dns_challenge"` +} + // DNSRecord defines model for DNSRecord. type DNSRecord struct { // Content DNS record content (IP address for A/AAAA, domain for CNAME) @@ -598,6 +704,234 @@ type DNSSettings struct { DisabledManagementGroups []string `json:"disabled_management_groups"` } +// EDRFalconRequest Request payload for creating or updating a EDR Falcon integration +type EDRFalconRequest struct { + // ClientId CrowdStrike API client ID + ClientId string `json:"client_id"` + + // CloudId CrowdStrike cloud identifier (e.g., "us-1", "us-2", "eu-1") + CloudId string `json:"cloud_id"` + + // Enabled Indicates whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // Groups The Groups this integration applies to + Groups []string `json:"groups"` + + // Secret CrowdStrike API client secret + Secret string `json:"secret"` + + // ZtaScoreThreshold The minimum Zero Trust Assessment score required for agent approval (0-100) + ZtaScoreThreshold int `json:"zta_score_threshold"` +} + +// EDRFalconResponse Represents a Falcon EDR integration +type EDRFalconResponse struct { + // AccountId The identifier of the account this integration belongs to. + AccountId string `json:"account_id"` + + // CloudId CrowdStrike cloud identifier + CloudId string `json:"cloud_id"` + + // CreatedAt Timestamp of when the integration was created. + CreatedAt time.Time `json:"created_at"` + + // CreatedBy The user id that created the integration + CreatedBy string `json:"created_by"` + + // Enabled Indicates whether the integration is enabled + Enabled bool `json:"enabled"` + + // Groups List of groups + Groups []Group `json:"groups"` + + // Id The unique numeric identifier for the integration. + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of when the integration was last synced. + LastSyncedAt time.Time `json:"last_synced_at"` + + // UpdatedAt Timestamp of when the integration was last updated. + UpdatedAt time.Time `json:"updated_at"` + + // ZtaScoreThreshold The minimum Zero Trust Assessment score required for agent approval (0-100) + ZtaScoreThreshold int `json:"zta_score_threshold"` +} + +// EDRHuntressRequest Request payload for creating or updating a EDR Huntress integration +type EDRHuntressRequest struct { + // ApiKey Huntress API key + ApiKey string `json:"api_key"` + + // ApiSecret Huntress API secret + ApiSecret string `json:"api_secret"` + + // Enabled Indicates whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // Groups The Groups this integrations applies to + Groups []string `json:"groups"` + + // LastSyncedInterval The devices last sync requirement interval in hours. Minimum value is 24 hours + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving agents + MatchAttributes HuntressMatchAttributes `json:"match_attributes"` +} + +// EDRHuntressResponse Represents a Huntress EDR integration configuration +type EDRHuntressResponse struct { + // AccountId The identifier of the account this integration belongs to. + AccountId string `json:"account_id"` + + // CreatedAt Timestamp of when the integration was created. + CreatedAt time.Time `json:"created_at"` + + // CreatedBy The user id that created the integration + CreatedBy string `json:"created_by"` + + // Enabled Indicates whether the integration is enabled + Enabled bool `json:"enabled"` + + // Groups List of groups + Groups []Group `json:"groups"` + + // Id The unique numeric identifier for the integration. + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of when the integration was last synced. + LastSyncedAt time.Time `json:"last_synced_at"` + + // LastSyncedInterval The devices last sync requirement interval in hours. + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving agents + MatchAttributes HuntressMatchAttributes `json:"match_attributes"` + + // UpdatedAt Timestamp of when the integration was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + +// EDRIntuneRequest Request payload for creating or updating a EDR Intune integration. +type EDRIntuneRequest struct { + // ClientId The Azure application client id + ClientId string `json:"client_id"` + + // Enabled Indicates whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // Groups The Groups this integrations applies to + Groups []string `json:"groups"` + + // LastSyncedInterval The devices last sync requirement interval in hours. Minimum value is 24 hours. + LastSyncedInterval int `json:"last_synced_interval"` + + // Secret The Azure application client secret + Secret string `json:"secret"` + + // TenantId The Azure tenant id + TenantId string `json:"tenant_id"` +} + +// EDRIntuneResponse Represents a Intune EDR integration configuration +type EDRIntuneResponse struct { + // AccountId The identifier of the account this integration belongs to. + AccountId string `json:"account_id"` + + // ClientId The Azure application client id + ClientId string `json:"client_id"` + + // CreatedAt Timestamp of when the integration was created. + CreatedAt time.Time `json:"created_at"` + + // CreatedBy The user id that created the integration + CreatedBy string `json:"created_by"` + + // Enabled Indicates whether the integration is enabled + Enabled bool `json:"enabled"` + + // Groups List of groups + Groups []Group `json:"groups"` + + // Id The unique numeric identifier for the integration. + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of when the integration was last synced. + LastSyncedAt time.Time `json:"last_synced_at"` + + // LastSyncedInterval The devices last sync requirement interval in hours. + LastSyncedInterval int `json:"last_synced_interval"` + + // TenantId The Azure tenant id + TenantId string `json:"tenant_id"` + + // UpdatedAt Timestamp of when the integration was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + +// EDRSentinelOneRequest Request payload for creating or updating a EDR SentinelOne integration +type EDRSentinelOneRequest struct { + // ApiToken SentinelOne API token + ApiToken string `json:"api_token"` + + // ApiUrl The Base URL of SentinelOne API + ApiUrl string `json:"api_url"` + + // Enabled Indicates whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // Groups The Groups this integrations applies to + Groups []string `json:"groups"` + + // LastSyncedInterval The devices last sync requirement interval in hours. Minimum value is 24 hours. + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving agents + MatchAttributes SentinelOneMatchAttributes `json:"match_attributes"` +} + +// EDRSentinelOneResponse Represents a SentinelOne EDR integration configuration +type EDRSentinelOneResponse struct { + // AccountId The identifier of the account this integration belongs to. + AccountId string `json:"account_id"` + + // ApiUrl The Base URL of SentinelOne API + ApiUrl string `json:"api_url"` + + // CreatedAt Timestamp of when the integration was created. + CreatedAt time.Time `json:"created_at"` + + // CreatedBy The user id that created the integration + CreatedBy string `json:"created_by"` + + // Enabled Indicates whether the integration is enabled + Enabled bool `json:"enabled"` + + // Groups List of groups + Groups []Group `json:"groups"` + + // Id The unique numeric identifier for the integration. + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of when the integration was last synced. + LastSyncedAt time.Time `json:"last_synced_at"` + + // LastSyncedInterval The devices last sync requirement interval in hours. + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving agents + MatchAttributes SentinelOneMatchAttributes `json:"match_attributes"` + + // UpdatedAt Timestamp of when the integration was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + +// ErrorResponse Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided. +type ErrorResponse struct { + // Message A human-readable error message. + Message *string `json:"message,omitempty"` +} + // Event defines model for Event. type Event struct { // Activity The activity that occurred during the event @@ -643,6 +977,9 @@ type GeoLocationCheck struct { // GeoLocationCheckAction Action to take upon policy match type GeoLocationCheckAction string +// GetTenantsResponse defines model for GetTenantsResponse. +type GetTenantsResponse = []TenantResponse + // Group defines model for Group. type Group struct { // Id Group ID @@ -699,6 +1036,21 @@ type GroupRequest struct { Resources *[]Resource `json:"resources,omitempty"` } +// HuntressMatchAttributes Attribute conditions to match when approving agents +type HuntressMatchAttributes struct { + // DefenderPolicyStatus Policy status of Defender AV for Managed Antivirus. + DefenderPolicyStatus *string `json:"defender_policy_status,omitempty"` + + // DefenderStatus Status of Defender AV Managed Antivirus. + DefenderStatus *string `json:"defender_status,omitempty"` + + // DefenderSubstatus Sub-status of Defender AV Managed Antivirus. + DefenderSubstatus *string `json:"defender_substatus,omitempty"` + + // FirewallStatus Status of agent firewall. Can be one of Disabled, Enabled, Pending Isolation, Isolated, Pending Release. + FirewallStatus *string `json:"firewall_status,omitempty"` +} + // IdentityProvider defines model for IdentityProvider. type IdentityProvider struct { // ClientId OAuth2 client ID @@ -738,6 +1090,21 @@ type IdentityProviderRequest struct { // IdentityProviderType Type of identity provider type IdentityProviderType string +// IdpIntegrationSyncLog Represents a synchronization log entry for an integration +type IdpIntegrationSyncLog struct { + // Id The unique identifier for the sync log + Id int64 `json:"id"` + + // Level The log level + Level string `json:"level"` + + // Message Log message + Message string `json:"message"` + + // Timestamp Timestamp of when the log was created + Timestamp time.Time `json:"timestamp"` +} + // IngressPeer defines model for IngressPeer. type IngressPeer struct { AvailablePorts AvailablePorts `json:"available_ports"` @@ -892,6 +1259,57 @@ type InstanceVersionInfo struct { ManagementUpdateAvailable bool `json:"management_update_available"` } +// IntegrationResponse Represents an event streaming integration. +type IntegrationResponse struct { + // AccountId The identifier of the account this integration belongs to. + AccountId *string `json:"account_id,omitempty"` + + // Config Configuration for the integration. Sensitive keys (like API keys, secret keys) are masked with '****' in responses, as indicated by the GetIntegration handler logic. + Config *map[string]string `json:"config,omitempty"` + + // CreatedAt Timestamp of when the integration was created. + CreatedAt *time.Time `json:"created_at,omitempty"` + + // Enabled Whether the integration is currently active. + Enabled *bool `json:"enabled,omitempty"` + + // Id The unique numeric identifier for the integration. + Id *int64 `json:"id,omitempty"` + + // Platform The event streaming platform. + Platform *IntegrationResponsePlatform `json:"platform,omitempty"` + + // UpdatedAt Timestamp of when the integration was last updated. + UpdatedAt *time.Time `json:"updated_at,omitempty"` +} + +// IntegrationResponsePlatform The event streaming platform. +type IntegrationResponsePlatform string + +// InvoicePDFResponse defines model for InvoicePDFResponse. +type InvoicePDFResponse struct { + // Url URL to redirect the user to invoice. + Url string `json:"url"` +} + +// InvoiceResponse defines model for InvoiceResponse. +type InvoiceResponse struct { + // Id The Stripe invoice id + Id string `json:"id"` + + // PeriodEnd The end date of the invoice period. + PeriodEnd time.Time `json:"period_end"` + + // PeriodStart The start date of the invoice period. + PeriodStart time.Time `json:"period_start"` + + // Type The invoice type + Type InvoiceResponseType `json:"type"` +} + +// InvoiceResponseType The invoice type +type InvoiceResponseType string + // JobRequest defines model for JobRequest. type JobRequest struct { Workload WorkloadRequest `json:"workload"` @@ -1797,6 +2215,15 @@ type PolicyUpdate struct { SourcePostureChecks *[]string `json:"source_posture_checks,omitempty"` } +// PortalResponse defines model for PortalResponse. +type PortalResponse struct { + // SessionId The unique identifier for the customer portal session. + SessionId string `json:"session_id"` + + // Url URL to redirect the user to the customer portal. + Url string `json:"url"` +} + // PostureCheck defines model for PostureCheck. type PostureCheck struct { // Checks List of objects that perform the actual checks @@ -1824,6 +2251,21 @@ type PostureCheckUpdate struct { Name string `json:"name"` } +// Price defines model for Price. +type Price struct { + // Currency Currency code for this price. + Currency string `json:"currency"` + + // Price Price amount in minor units (e.g., cents). + Price int `json:"price"` + + // PriceId Unique identifier for the price. + PriceId string `json:"price_id"` + + // Unit Unit of measurement for this price (e.g., per user). + Unit string `json:"unit"` +} + // Process Describes the operational activity within a peer's system. type Process struct { // LinuxPath Path to the process executable file in a Linux operating system @@ -1841,6 +2283,24 @@ type ProcessCheck struct { Processes []Process `json:"processes"` } +// Product defines model for Product. +type Product struct { + // Description Detailed description of the product. + Description string `json:"description"` + + // Features List of features provided by the product. + Features []string `json:"features"` + + // Free Indicates whether the product is free or not. + Free bool `json:"free"` + + // Name Name of the product. + Name string `json:"name"` + + // Prices List of prices for the product in different currencies + Prices []Price `json:"prices"` +} + // Resource defines model for Resource. type Resource struct { // Id ID of the resource @@ -1950,6 +2410,66 @@ type RulePortRange struct { Start int `json:"start"` } +// ScimIntegration Represents a SCIM IDP integration +type ScimIntegration struct { + // AuthToken SCIM API token (full on creation, masked otherwise) + AuthToken string `json:"auth_token"` + + // Enabled Indicates whether the integration is enabled + Enabled bool `json:"enabled"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes []string `json:"group_prefixes"` + + // Id The unique identifier for the integration + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of when the integration was last synced + LastSyncedAt time.Time `json:"last_synced_at"` + + // Provider Name of the SCIM identity provider + Provider string `json:"provider"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes []string `json:"user_group_prefixes"` +} + +// ScimTokenResponse Response containing the regenerated SCIM token +type ScimTokenResponse struct { + // AuthToken The newly generated SCIM API token + AuthToken string `json:"auth_token"` +} + +// SentinelOneMatchAttributes Attribute conditions to match when approving agents +type SentinelOneMatchAttributes struct { + // ActiveThreats The maximum allowed number of active threats on the agent + ActiveThreats *int `json:"active_threats,omitempty"` + + // EncryptedApplications Whether disk encryption is enabled on the agent + EncryptedApplications *bool `json:"encrypted_applications,omitempty"` + + // FirewallEnabled Whether the agent firewall is enabled + FirewallEnabled *bool `json:"firewall_enabled,omitempty"` + + // Infected Whether the agent is currently flagged as infected + Infected *bool `json:"infected,omitempty"` + + // IsActive Whether the agent has been recently active and reporting + IsActive *bool `json:"is_active,omitempty"` + + // IsUpToDate Whether the agent is running the latest available version + IsUpToDate *bool `json:"is_up_to_date,omitempty"` + + // NetworkStatus The current network connectivity status of the device + NetworkStatus *SentinelOneMatchAttributesNetworkStatus `json:"network_status,omitempty"` + + // OperationalState The current operational state of the agent + OperationalState *string `json:"operational_state,omitempty"` +} + +// SentinelOneMatchAttributesNetworkStatus The current network connectivity status of the device +type SentinelOneMatchAttributesNetworkStatus string + // SetupKey defines model for SetupKey. type SetupKey struct { // AllowExtraDnsLabels Allow extra DNS labels to be added to the peer @@ -2121,6 +2641,117 @@ type SetupResponse struct { UserId string `json:"user_id"` } +// Subscription defines model for Subscription. +type Subscription struct { + // Active Indicates whether the subscription is active or not. + Active bool `json:"active"` + + // Currency Currency code of the subscription. + Currency string `json:"currency"` + + // Features List of features included in the subscription. + Features *[]string `json:"features,omitempty"` + + // PlanTier The tier of the plan for the subscription. + PlanTier string `json:"plan_tier"` + + // Price Price amount in minor units (e.g., cents). + Price int `json:"price"` + + // PriceId Unique identifier for the price of the subscription. + PriceId string `json:"price_id"` + + // Provider The provider of the subscription. + Provider string `json:"provider"` + + // RemainingTrial The remaining time for the trial period, in seconds. + RemainingTrial *int `json:"remaining_trial,omitempty"` + + // UpdatedAt The date and time when the subscription was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + +// TenantGroupResponse defines model for TenantGroupResponse. +type TenantGroupResponse struct { + // Id The Group ID + Id string `json:"id"` + + // Role The Role name + Role string `json:"role"` +} + +// TenantResponse defines model for TenantResponse. +type TenantResponse struct { + // ActivatedAt The date and time when the tenant was activated. + ActivatedAt *time.Time `json:"activated_at,omitempty"` + + // CreatedAt The date and time when the tenant was created. + CreatedAt time.Time `json:"created_at"` + + // DnsChallenge The DNS challenge to set in a TXT record + DnsChallenge string `json:"dns_challenge"` + + // Domain The tenant account domain + Domain string `json:"domain"` + + // Groups MSP users Groups that can access the Tenant and Roles to assume + Groups []TenantGroupResponse `json:"groups"` + + // Id The updated MSP tenant account ID + Id string `json:"id"` + + // InvitedAt The date and time when the existing tenant was invited. + InvitedAt *time.Time `json:"invited_at,omitempty"` + + // Name The name for the MSP tenant + Name string `json:"name"` + + // Status The status of the tenant + Status TenantResponseStatus `json:"status"` + + // UpdatedAt The date and time when the tenant was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + +// TenantResponseStatus The status of the tenant +type TenantResponseStatus string + +// UpdateScimIntegrationRequest Request payload for updating an SCIM IDP integration +type UpdateScimIntegrationRequest struct { + // Enabled Indicates whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// UpdateTenantRequest defines model for UpdateTenantRequest. +type UpdateTenantRequest struct { + // Groups MSP users Groups that can access the Tenant and Roles to assume + Groups []TenantGroupResponse `json:"groups"` + + // Name The name for the MSP tenant + Name string `json:"name"` +} + +// UsageStats defines model for UsageStats. +type UsageStats struct { + // ActivePeers Number of active peers. + ActivePeers int64 `json:"active_peers"` + + // ActiveUsers Number of active users. + ActiveUsers int64 `json:"active_users"` + + // TotalPeers Total number of peers. + TotalPeers int64 `json:"total_peers"` + + // TotalUsers Total number of users. + TotalUsers int64 `json:"total_users"` +} + // User defines model for User. type User struct { // AutoGroups Group IDs to auto-assign to peers registered by this user @@ -2407,6 +3038,66 @@ type GetApiGroupsParams struct { Name *string `form:"name,omitempty" json:"name,omitempty"` } +// PostApiIntegrationsBillingAwsMarketplaceActivateJSONBody defines parameters for PostApiIntegrationsBillingAwsMarketplaceActivate. +type PostApiIntegrationsBillingAwsMarketplaceActivateJSONBody struct { + // PlanTier The plan tier to activate the subscription for. + PlanTier string `json:"plan_tier"` +} + +// PostApiIntegrationsBillingAwsMarketplaceEnrichJSONBody defines parameters for PostApiIntegrationsBillingAwsMarketplaceEnrich. +type PostApiIntegrationsBillingAwsMarketplaceEnrichJSONBody struct { + // AwsUserId The AWS user ID. + AwsUserId string `json:"aws_user_id"` +} + +// PostApiIntegrationsBillingCheckoutJSONBody defines parameters for PostApiIntegrationsBillingCheckout. +type PostApiIntegrationsBillingCheckoutJSONBody struct { + // BaseURL The base URL for the redirect after checkout. + BaseURL string `json:"baseURL"` + + // EnableTrial Enables a 14-day trial for the account. + EnableTrial *bool `json:"enableTrial,omitempty"` + + // PriceID The Price ID for checkout. + PriceID string `json:"priceID"` +} + +// GetApiIntegrationsBillingPortalParams defines parameters for GetApiIntegrationsBillingPortal. +type GetApiIntegrationsBillingPortalParams struct { + // BaseURL The base URL for the redirect after accessing the portal. + BaseURL string `form:"baseURL" json:"baseURL"` +} + +// PutApiIntegrationsBillingSubscriptionJSONBody defines parameters for PutApiIntegrationsBillingSubscription. +type PutApiIntegrationsBillingSubscriptionJSONBody struct { + // PlanTier The plan tier to change the subscription to. + PlanTier *string `json:"plan_tier,omitempty"` + + // PriceID The Price ID to change the subscription to. + PriceID *string `json:"priceID,omitempty"` +} + +// PutApiIntegrationsMspTenantsIdInviteJSONBody defines parameters for PutApiIntegrationsMspTenantsIdInvite. +type PutApiIntegrationsMspTenantsIdInviteJSONBody struct { + // Value Accept or decline the invitation. + Value PutApiIntegrationsMspTenantsIdInviteJSONBodyValue `json:"value"` +} + +// PutApiIntegrationsMspTenantsIdInviteJSONBodyValue defines parameters for PutApiIntegrationsMspTenantsIdInvite. +type PutApiIntegrationsMspTenantsIdInviteJSONBodyValue string + +// PostApiIntegrationsMspTenantsIdSubscriptionJSONBody defines parameters for PostApiIntegrationsMspTenantsIdSubscription. +type PostApiIntegrationsMspTenantsIdSubscriptionJSONBody struct { + // PriceID The Price ID to change the subscription to. + PriceID string `json:"priceID"` +} + +// PostApiIntegrationsMspTenantsIdUnlinkJSONBody defines parameters for PostApiIntegrationsMspTenantsIdUnlink. +type PostApiIntegrationsMspTenantsIdUnlinkJSONBody struct { + // Owner The new owners user ID. + Owner string `json:"owner"` +} + // GetApiPeersParams defines parameters for GetApiPeers. type GetApiPeersParams struct { // Name Filter peers by name @@ -2452,6 +3143,12 @@ type PostApiDnsZonesZoneIdRecordsJSONRequestBody = DNSRecordRequest // PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody defines body for PutApiDnsZonesZoneIdRecordsRecordId for application/json ContentType. type PutApiDnsZonesZoneIdRecordsRecordIdJSONRequestBody = DNSRecordRequest +// CreateIntegrationJSONRequestBody defines body for CreateIntegration for application/json ContentType. +type CreateIntegrationJSONRequestBody = CreateIntegrationRequest + +// UpdateIntegrationJSONRequestBody defines body for UpdateIntegration for application/json ContentType. +type UpdateIntegrationJSONRequestBody = CreateIntegrationRequest + // PostApiGroupsJSONRequestBody defines body for PostApiGroups for application/json ContentType. type PostApiGroupsJSONRequestBody = GroupRequest @@ -2470,6 +3167,63 @@ type PostApiIngressPeersJSONRequestBody = IngressPeerCreateRequest // PutApiIngressPeersIngressPeerIdJSONRequestBody defines body for PutApiIngressPeersIngressPeerId for application/json ContentType. type PutApiIngressPeersIngressPeerIdJSONRequestBody = IngressPeerUpdateRequest +// PostApiIntegrationsBillingAwsMarketplaceActivateJSONRequestBody defines body for PostApiIntegrationsBillingAwsMarketplaceActivate for application/json ContentType. +type PostApiIntegrationsBillingAwsMarketplaceActivateJSONRequestBody PostApiIntegrationsBillingAwsMarketplaceActivateJSONBody + +// PostApiIntegrationsBillingAwsMarketplaceEnrichJSONRequestBody defines body for PostApiIntegrationsBillingAwsMarketplaceEnrich for application/json ContentType. +type PostApiIntegrationsBillingAwsMarketplaceEnrichJSONRequestBody PostApiIntegrationsBillingAwsMarketplaceEnrichJSONBody + +// PostApiIntegrationsBillingCheckoutJSONRequestBody defines body for PostApiIntegrationsBillingCheckout for application/json ContentType. +type PostApiIntegrationsBillingCheckoutJSONRequestBody PostApiIntegrationsBillingCheckoutJSONBody + +// PutApiIntegrationsBillingSubscriptionJSONRequestBody defines body for PutApiIntegrationsBillingSubscription for application/json ContentType. +type PutApiIntegrationsBillingSubscriptionJSONRequestBody PutApiIntegrationsBillingSubscriptionJSONBody + +// CreateFalconEDRIntegrationJSONRequestBody defines body for CreateFalconEDRIntegration for application/json ContentType. +type CreateFalconEDRIntegrationJSONRequestBody = EDRFalconRequest + +// UpdateFalconEDRIntegrationJSONRequestBody defines body for UpdateFalconEDRIntegration for application/json ContentType. +type UpdateFalconEDRIntegrationJSONRequestBody = EDRFalconRequest + +// CreateHuntressEDRIntegrationJSONRequestBody defines body for CreateHuntressEDRIntegration for application/json ContentType. +type CreateHuntressEDRIntegrationJSONRequestBody = EDRHuntressRequest + +// UpdateHuntressEDRIntegrationJSONRequestBody defines body for UpdateHuntressEDRIntegration for application/json ContentType. +type UpdateHuntressEDRIntegrationJSONRequestBody = EDRHuntressRequest + +// CreateEDRIntegrationJSONRequestBody defines body for CreateEDRIntegration for application/json ContentType. +type CreateEDRIntegrationJSONRequestBody = EDRIntuneRequest + +// UpdateEDRIntegrationJSONRequestBody defines body for UpdateEDRIntegration for application/json ContentType. +type UpdateEDRIntegrationJSONRequestBody = EDRIntuneRequest + +// CreateSentinelOneEDRIntegrationJSONRequestBody defines body for CreateSentinelOneEDRIntegration for application/json ContentType. +type CreateSentinelOneEDRIntegrationJSONRequestBody = EDRSentinelOneRequest + +// UpdateSentinelOneEDRIntegrationJSONRequestBody defines body for UpdateSentinelOneEDRIntegration for application/json ContentType. +type UpdateSentinelOneEDRIntegrationJSONRequestBody = EDRSentinelOneRequest + +// PostApiIntegrationsMspTenantsJSONRequestBody defines body for PostApiIntegrationsMspTenants for application/json ContentType. +type PostApiIntegrationsMspTenantsJSONRequestBody = CreateTenantRequest + +// PutApiIntegrationsMspTenantsIdJSONRequestBody defines body for PutApiIntegrationsMspTenantsId for application/json ContentType. +type PutApiIntegrationsMspTenantsIdJSONRequestBody = UpdateTenantRequest + +// PutApiIntegrationsMspTenantsIdInviteJSONRequestBody defines body for PutApiIntegrationsMspTenantsIdInvite for application/json ContentType. +type PutApiIntegrationsMspTenantsIdInviteJSONRequestBody PutApiIntegrationsMspTenantsIdInviteJSONBody + +// PostApiIntegrationsMspTenantsIdSubscriptionJSONRequestBody defines body for PostApiIntegrationsMspTenantsIdSubscription for application/json ContentType. +type PostApiIntegrationsMspTenantsIdSubscriptionJSONRequestBody PostApiIntegrationsMspTenantsIdSubscriptionJSONBody + +// PostApiIntegrationsMspTenantsIdUnlinkJSONRequestBody defines body for PostApiIntegrationsMspTenantsIdUnlink for application/json ContentType. +type PostApiIntegrationsMspTenantsIdUnlinkJSONRequestBody PostApiIntegrationsMspTenantsIdUnlinkJSONBody + +// CreateSCIMIntegrationJSONRequestBody defines body for CreateSCIMIntegration for application/json ContentType. +type CreateSCIMIntegrationJSONRequestBody = CreateScimIntegrationRequest + +// UpdateSCIMIntegrationJSONRequestBody defines body for UpdateSCIMIntegration for application/json ContentType. +type UpdateSCIMIntegrationJSONRequestBody = UpdateScimIntegrationRequest + // PostApiNetworksJSONRequestBody defines body for PostApiNetworks for application/json ContentType. type PostApiNetworksJSONRequestBody = NetworkRequest From 841b2d26c673990662adf847a8ea21ef861a072f Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 13 Feb 2026 15:41:26 +0100 Subject: [PATCH 123/374] Add early message buffer for relay client (#5282) Add early message buffer to capture transport messages arriving before OpenConn completes, ensuring correct message ordering and no dropped messages. --- shared/relay/client/client.go | 26 +- shared/relay/client/early_msg_buffer.go | 175 +++++++ shared/relay/client/early_msg_buffer_test.go | 485 +++++++++++++++++++ 3 files changed, 684 insertions(+), 2 deletions(-) create mode 100644 shared/relay/client/early_msg_buffer.go create mode 100644 shared/relay/client/early_msg_buffer_test.go diff --git a/shared/relay/client/client.go b/shared/relay/client/client.go index 0acadaa4b..e0e894eb1 100644 --- a/shared/relay/client/client.go +++ b/shared/relay/client/client.go @@ -130,6 +130,7 @@ type Client struct { relayConn net.Conn conns map[messages.PeerID]*connContainer + earlyMsgs *earlyMsgBuffer serviceIsRunning bool mu sync.Mutex // protect serviceIsRunning and conns readLoopMutex sync.Mutex @@ -165,6 +166,8 @@ func NewClient(serverURL string, authTokenStore *auth.TokenStore, peerID string, conns: make(map[messages.PeerID]*connContainer), } + c.earlyMsgs = newEarlyMsgBuffer() + c.log.Infof("create new relay connection: local peerID: %s, local peer hashedID: %s", peerID, hashedID) return c } @@ -236,8 +239,14 @@ func (c *Client) OpenConn(ctx context.Context, dstPeerID string) (net.Conn, erro conn := NewConn(c, peerID, msgChannel, instanceURL) container := newConnContainer(c.log, conn, msgChannel) c.conns[peerID] = container + earlyMsg, hasEarly := c.earlyMsgs.pop(peerID) c.mu.Unlock() + if hasEarly { + container.writeMsg(earlyMsg) + c.log.Tracef("flushed buffered early message for peer: %s", peerID) + } + if err := c.stateSubscription.WaitToBeOnlineAndSubscribe(ctx, peerID); err != nil { c.log.Errorf("peer not available: %s, %s", peerID, err) c.mu.Lock() @@ -466,10 +475,20 @@ func (c *Client) handleTransportMsg(buf []byte, bufPtr *[]byte, internallyStoppe return false } container, ok := c.conns[*peerID] + earlyBuf := c.earlyMsgs c.mu.Unlock() if !ok { - c.log.Errorf("peer not found: %s", peerID.String()) - c.bufPool.Put(bufPtr) + msg := Msg{ + bufPool: c.bufPool, + bufPtr: bufPtr, + Payload: payload, + } + if earlyBuf == nil || !earlyBuf.put(*peerID, msg) { + c.log.Warnf("failed to buffer early message for peer: %s", peerID.String()) + c.bufPool.Put(bufPtr) + } else { + c.log.Debugf("buffered early transport message for peer: %s", peerID.String()) + } return true } msg := Msg{ @@ -537,6 +556,9 @@ func (c *Client) closeAllConns() { container.close() } c.conns = make(map[messages.PeerID]*connContainer) + + c.earlyMsgs.close() + c.earlyMsgs = newEarlyMsgBuffer() } func (c *Client) closeConnsByPeerID(peerIDs []messages.PeerID) { diff --git a/shared/relay/client/early_msg_buffer.go b/shared/relay/client/early_msg_buffer.go new file mode 100644 index 000000000..3ead94de1 --- /dev/null +++ b/shared/relay/client/early_msg_buffer.go @@ -0,0 +1,175 @@ +package client + +import ( + "container/list" + "sync" + "time" + + "github.com/netbirdio/netbird/shared/relay/messages" +) + +const ( + earlyMsgTTL = 5 * time.Second + earlyMsgCapacity = 1000 +) + +// earlyMsgBuffer buffers transport messages that arrive before the corresponding +// OpenConn call. This happens during reconnection when the remote peer sends data +// before the local side has set up the relay connection. +// +// It stores at most one message per peer (the first WireGuard handshake) and +// caps the total number of entries to prevent unbounded memory growth. +// A cleanup timer runs only when there are buffered entries and fires when the +// oldest entry expires. Entries are kept in a linked list ordered by insertion +// time so cleanup only needs to walk from the front. +type earlyMsgBuffer struct { + mu sync.Mutex + index map[messages.PeerID]*list.Element + order *list.List // front = oldest + timer *time.Timer + closed bool +} + +type earlyMsg struct { + peerID messages.PeerID + msg Msg + createdAt time.Time +} + +func newEarlyMsgBuffer() *earlyMsgBuffer { + return &earlyMsgBuffer{ + index: make(map[messages.PeerID]*list.Element), + order: list.New(), + } +} + +// put stores or overwrites a message for the given peer. If a message for the +// peer already exists, it is replaced with the new one. Returns false if the +// message was not stored (buffer full or buffer closed). +func (b *earlyMsgBuffer) put(peerID messages.PeerID, msg Msg) bool { + b.mu.Lock() + defer b.mu.Unlock() + + if b.closed { + return false + } + + if existing, exists := b.index[peerID]; exists { + old := b.order.Remove(existing).(earlyMsg) + old.msg.Free() + delete(b.index, peerID) + } + + if b.order.Len() >= earlyMsgCapacity { + return false + } + + entry := earlyMsg{ + peerID: peerID, + msg: msg, + createdAt: time.Now(), + } + elem := b.order.PushBack(entry) + b.index[peerID] = elem + + // Start the cleanup timer if this is the first entry + if b.order.Len() == 1 { + b.scheduleCleanup(earlyMsgTTL) + } + + return true +} + +// pop retrieves and removes the buffered message for the given peer. +// Returns the message and true if found, zero value and false otherwise. +func (b *earlyMsgBuffer) pop(peerID messages.PeerID) (Msg, bool) { + b.mu.Lock() + defer b.mu.Unlock() + + elem, ok := b.index[peerID] + if !ok { + return Msg{}, false + } + + entry := b.order.Remove(elem).(earlyMsg) + delete(b.index, peerID) + + if b.order.Len() == 0 { + b.stopCleanup() + } + + return entry.msg, true +} + +// close stops the cleanup timer and frees all buffered messages. +func (b *earlyMsgBuffer) close() { + b.mu.Lock() + defer b.mu.Unlock() + + if b.closed { + return + } + b.closed = true + b.stopCleanup() + + for elem := b.order.Front(); elem != nil; elem = elem.Next() { + entry := elem.Value.(earlyMsg) + entry.msg.Free() + } + b.order.Init() + b.index = make(map[messages.PeerID]*list.Element) +} + +// scheduleCleanup starts or resets the timer. Caller must hold b.mu. +func (b *earlyMsgBuffer) scheduleCleanup(d time.Duration) { + if b.timer != nil { + b.timer.Stop() + } + b.timer = time.AfterFunc(d, b.removeExpired) +} + +// stopCleanup stops the timer. Caller must hold b.mu. +func (b *earlyMsgBuffer) stopCleanup() { + if b.timer != nil { + b.timer.Stop() + b.timer = nil + } +} + +func (b *earlyMsgBuffer) removeExpired() { + b.mu.Lock() + defer b.mu.Unlock() + + if b.closed { + return + } + + now := time.Now() + for elem := b.order.Front(); elem != nil; { + entry := elem.Value.(earlyMsg) + if now.Sub(entry.createdAt) <= earlyMsgTTL { + // Entries are ordered by time, so the rest are newer + break + } + next := elem.Next() + b.order.Remove(elem) + delete(b.index, entry.peerID) + entry.msg.Free() + elem = next + } + + if b.order.Len() == 0 { + b.timer = nil + return + } + + // Schedule next cleanup based on when the oldest entry expires + front := b.order.Front() + if front == nil { + b.timer = nil + return + } + oldest := front.Value.(earlyMsg).createdAt + nextCleanup := earlyMsgTTL - now.Sub(oldest) + b.scheduleCleanup(nextCleanup) +} diff --git a/shared/relay/client/early_msg_buffer_test.go b/shared/relay/client/early_msg_buffer_test.go new file mode 100644 index 000000000..1073378e1 --- /dev/null +++ b/shared/relay/client/early_msg_buffer_test.go @@ -0,0 +1,485 @@ +package client + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/netbirdio/netbird/shared/relay/messages" +) + +func newTestPool() *sync.Pool { + return &sync.Pool{ + New: func() any { + buf := make([]byte, 64) + return &buf + }, + } +} + +func newTestMsg(pool *sync.Pool, payload string) Msg { + bufPtr := pool.Get().(*[]byte) + copy(*bufPtr, payload) + return Msg{ + bufPool: pool, + bufPtr: bufPtr, + Payload: (*bufPtr)[:len(payload)], + } +} + +func peerID(id string) messages.PeerID { + return messages.HashID(id) +} + +func TestEarlyMsgBuffer_PutAndPop(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + peer := peerID("peer1") + msg := newTestMsg(pool, "hello") + + if !buf.put(peer, msg) { + t.Fatal("put should succeed") + } + + got, ok := buf.pop(peer) + if !ok { + t.Fatal("pop should find the message") + } + if string(got.Payload) != "hello" { + t.Fatalf("expected payload 'hello', got '%s'", got.Payload) + } + got.Free() +} + +func TestEarlyMsgBuffer_PopNotFound(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + _, ok := buf.pop(peerID("nonexistent")) + if ok { + t.Fatal("pop should return false for unknown peer") + } +} + +func TestEarlyMsgBuffer_PopAfterPopReturnsFalse(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + peer := peerID("peer1") + + buf.put(peer, newTestMsg(pool, "data")) + + got, ok := buf.pop(peer) + if !ok { + t.Fatal("first pop should succeed") + } + got.Free() + + _, ok = buf.pop(peer) + if ok { + t.Fatal("second pop for the same peer should return false") + } +} + +func TestEarlyMsgBuffer_OverwriteSamePeer(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + peer := peerID("peer1") + + if !buf.put(peer, newTestMsg(pool, "first")) { + t.Fatal("first put should succeed") + } + if !buf.put(peer, newTestMsg(pool, "second")) { + t.Fatal("second put (overwrite) should succeed") + } + + got, ok := buf.pop(peer) + if !ok { + t.Fatal("pop should find the message") + } + if string(got.Payload) != "second" { + t.Fatalf("expected payload 'second', got '%s'", got.Payload) + } + got.Free() + + // No more messages should be present for this peer + _, ok = buf.pop(peer) + if ok { + t.Fatal("pop should return false after the only message was already popped") + } +} + +func TestEarlyMsgBuffer_MultiplePeers(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + peers := []messages.PeerID{peerID("a"), peerID("b"), peerID("c")} + + for i, p := range peers { + msg := newTestMsg(pool, fmt.Sprintf("msg-%d", i)) + if !buf.put(p, msg) { + t.Fatalf("put should succeed for peer %d", i) + } + } + + // Pop in reverse order to verify independence + for i := len(peers) - 1; i >= 0; i-- { + got, ok := buf.pop(peers[i]) + if !ok { + t.Fatalf("pop should find message for peer %d", i) + } + expected := fmt.Sprintf("msg-%d", i) + if string(got.Payload) != expected { + t.Fatalf("expected payload '%s', got '%s'", expected, got.Payload) + } + got.Free() + } +} + +func TestEarlyMsgBuffer_Capacity(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + + // Fill to capacity + for i := 0; i < earlyMsgCapacity; i++ { + peer := peerID(fmt.Sprintf("peer-%d", i)) + msg := newTestMsg(pool, fmt.Sprintf("msg-%d", i)) + if !buf.put(peer, msg) { + t.Fatalf("put should succeed for peer %d", i) + } + } + + // Next put for a new peer should fail + msg := newTestMsg(pool, "overflow") + if buf.put(peerID("overflow-peer"), msg) { + t.Fatal("put should fail when buffer is at capacity") + } + msg.Free() + + // Overwriting an existing peer should still work (it removes then adds) + overwrite := newTestMsg(pool, "overwritten") + if !buf.put(peerID("peer-0"), overwrite) { + t.Fatal("overwrite should succeed even at capacity") + } + + got, ok := buf.pop(peerID("peer-0")) + if !ok { + t.Fatal("pop should find overwritten message") + } + if string(got.Payload) != "overwritten" { + t.Fatalf("expected 'overwritten', got '%s'", got.Payload) + } + got.Free() + + // Clean up remaining + for i := 1; i < earlyMsgCapacity; i++ { + peer := peerID(fmt.Sprintf("peer-%d", i)) + if m, ok := buf.pop(peer); ok { + m.Free() + } + } +} + +func TestEarlyMsgBuffer_CapacityAfterPop(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + + // Fill to capacity + for i := 0; i < earlyMsgCapacity; i++ { + peer := peerID(fmt.Sprintf("peer-%d", i)) + if !buf.put(peer, newTestMsg(pool, "x")) { + t.Fatalf("put should succeed for peer %d", i) + } + } + + // Pop one entry to free a slot + got, ok := buf.pop(peerID("peer-0")) + if !ok { + t.Fatal("pop should succeed") + } + got.Free() + + // Now a new peer should fit + if !buf.put(peerID("new-peer"), newTestMsg(pool, "new")) { + t.Fatal("put should succeed after popping one entry") + } + + // Clean up + for i := 1; i < earlyMsgCapacity; i++ { + if m, ok := buf.pop(peerID(fmt.Sprintf("peer-%d", i))); ok { + m.Free() + } + } + if m, ok := buf.pop(peerID("new-peer")); ok { + m.Free() + } +} + +func TestEarlyMsgBuffer_PutAfterClose(t *testing.T) { + buf := newEarlyMsgBuffer() + + pool := newTestPool() + buf.close() + + msg := newTestMsg(pool, "too late") + if buf.put(peerID("peer1"), msg) { + t.Fatal("put should fail after close") + } + msg.Free() +} + +func TestEarlyMsgBuffer_PopAfterClose(t *testing.T) { + buf := newEarlyMsgBuffer() + + pool := newTestPool() + buf.put(peerID("peer1"), newTestMsg(pool, "data")) + buf.close() + + // Messages are freed on close, so pop should not find anything + _, ok := buf.pop(peerID("peer1")) + if ok { + t.Fatal("pop should return false after close") + } +} + +func TestEarlyMsgBuffer_DoubleClose(t *testing.T) { + buf := newEarlyMsgBuffer() + buf.close() + buf.close() // should not panic +} + +func TestEarlyMsgBuffer_TTLExpiry(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + peer := peerID("peer1") + + buf.put(peer, newTestMsg(pool, "expiring")) + + // Wait for the TTL to expire plus some margin + time.Sleep(earlyMsgTTL + 500*time.Millisecond) + + _, ok := buf.pop(peer) + if ok { + t.Fatal("message should have been expired by cleanup") + } +} + +func TestEarlyMsgBuffer_PartialExpiry(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + + // Insert first message + buf.put(peerID("peer1"), newTestMsg(pool, "old")) + + // Wait half the TTL, then insert second message + time.Sleep(earlyMsgTTL / 2) + + buf.put(peerID("peer2"), newTestMsg(pool, "new")) + + // Wait for the first to expire but not the second + time.Sleep(earlyMsgTTL/2 + 500*time.Millisecond) + + // First should be gone + _, ok := buf.pop(peerID("peer1")) + if ok { + t.Fatal("peer1 message should have expired") + } + + // Second should still be there + got, ok := buf.pop(peerID("peer2")) + if !ok { + t.Fatal("peer2 message should still be present") + } + if string(got.Payload) != "new" { + t.Fatalf("expected payload 'new', got '%s'", got.Payload) + } + got.Free() +} + +func TestEarlyMsgBuffer_BulkExpiry(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + + for i := 0; i < 50; i++ { + peer := peerID(fmt.Sprintf("peer-%d", i)) + buf.put(peer, newTestMsg(pool, fmt.Sprintf("msg-%d", i))) + } + + // All should expire together + time.Sleep(earlyMsgTTL + 500*time.Millisecond) + + for i := 0; i < 50; i++ { + _, ok := buf.pop(peerID(fmt.Sprintf("peer-%d", i))) + if ok { + t.Fatalf("peer-%d should have expired", i) + } + } +} + +func TestEarlyMsgBuffer_ConcurrentPutAndPop(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + pool := newTestPool() + var wg sync.WaitGroup + + // Concurrent puts + for i := 0; i < 100; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + peer := peerID(fmt.Sprintf("peer-%d", id)) + msg := newTestMsg(pool, fmt.Sprintf("msg-%d", id)) + if !buf.put(peer, msg) { + msg.Free() + } + }(i) + } + wg.Wait() + + // Concurrent pops + var popped int64 + var mu sync.Mutex + for i := 0; i < 100; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + peer := peerID(fmt.Sprintf("peer-%d", id)) + if msg, ok := buf.pop(peer); ok { + msg.Free() + mu.Lock() + popped++ + mu.Unlock() + } + }(i) + } + wg.Wait() + + if popped != 100 { + t.Fatalf("expected to pop 100 messages, got %d", popped) + } +} + +func TestEarlyMsgBuffer_ConcurrentPutPopAndClose(t *testing.T) { + buf := newEarlyMsgBuffer() + + pool := newTestPool() + var wg sync.WaitGroup + + // Concurrent puts + for i := 0; i < 50; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + peer := peerID(fmt.Sprintf("peer-%d", id)) + msg := newTestMsg(pool, fmt.Sprintf("msg-%d", id)) + if !buf.put(peer, msg) { + msg.Free() + } + }(i) + } + + // Concurrent pops + for i := 0; i < 50; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + peer := peerID(fmt.Sprintf("peer-%d", id)) + if msg, ok := buf.pop(peer); ok { + msg.Free() + } + }(i) + } + + // Close concurrently + wg.Add(1) + go func() { + defer wg.Done() + buf.close() + }() + + wg.Wait() // should not panic or deadlock +} + +func TestEarlyMsgBuffer_OverwriteDoesNotLeak(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + // Use a dedicated pool to detect that overwritten message's Free was called + freeCalled := make(chan struct{}, 1) + origPool := &sync.Pool{ + New: func() any { + b := make([]byte, 64) + return &b + }, + } + + b := make([]byte, 64) + copy(b, "original") + bufPtr := &b + origMsg := Msg{ + bufPool: origPool, + bufPtr: bufPtr, + Payload: b[:8], + } + + peer := peerID("peer1") + buf.put(peer, origMsg) + + // Now check if the original buffer was freed by trying to get from pool + // We need a wrapper pool that signals when Put is called + trackPool := &sync.Pool{ + New: func() any { + b := make([]byte, 64) + return &b + }, + } + _ = trackPool + + // Simpler approach: overwrite and check that only new value is returned + newPool := newTestPool() + buf.put(peer, newTestMsg(newPool, "replaced")) + + // After overwrite, only the new message should be retrievable + got, ok := buf.pop(peer) + if !ok { + t.Fatal("pop should find the message") + } + if string(got.Payload) != "replaced" { + t.Fatalf("expected 'replaced', got '%s'", got.Payload) + } + got.Free() + close(freeCalled) +} + +func TestEarlyMsgBuffer_EmptyBuffer(t *testing.T) { + buf := newEarlyMsgBuffer() + defer buf.close() + + // Pop from empty buffer + _, ok := buf.pop(peerID("anything")) + if ok { + t.Fatal("pop from empty buffer should return false") + } + + // Close empty buffer should be fine + buf2 := newEarlyMsgBuffer() + buf2.close() +} From edce11b34d3317eac80f80a74dcbdf6b7cc58602 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 13 Feb 2026 15:48:08 +0100 Subject: [PATCH 124/374] [client] Refactor/relay conn container (#5271) * Fix race condition and ensure correct message ordering in connection establishment Reorder operations in OpenConn to register the connection before waiting for peer availability. This ensures: - Connection is ready to receive messages before peer subscription completes - Transport messages and onconnected events maintain proper ordering - No messages are lost during the connection establishment window - Concurrent OpenConn calls cannot create duplicate connections If peer availability check fails, the pre-registered connection is properly cleaned up. * Handle service shutdown during relay connection initialization Ensure relay connections are properly cleaned up when the service is not running by verifying `serviceIsRunning` and removing stale entries from `c.conns` to prevent unintended behaviors. * Refactor relay client Conn/connContainer ownership and decouple Conn from Client Conn previously held a direct *Client pointer and called client methods (writeTo, closeConn, LocalAddr) directly, creating a tight bidirectional coupling. The message channel was also created externally in OpenConn and shared between Conn and connContainer with unclear ownership. Now connContainer fully owns the lifecycle of both the channel and the Conn it wraps: - connContainer creates the channel (sized by connChannelSize const) and the Conn internally via newConnContainer - connContainer feeds messages into the channel (writeMsg), closes and drains it on shutdown (close) - Conn reads from the channel (Read) but never closes it Conn is decoupled from *Client by replacing the *Client field with three function closures (writeFn, closeFn, localAddrFn) that are wired by newConnContainer at construction time. Write, Close, and LocalAddr delegate to these closures. This removes the direct dependency while keeping the identity-check logic: writeTo and closeConn now compare connContainer pointers instead of Conn pointers to verify the caller is the current active connection for that peer. --- shared/relay/client/client.go | 53 ++++++++++++++++++++++++----------- shared/relay/client/conn.go | 32 ++++++--------------- 2 files changed, 46 insertions(+), 39 deletions(-) diff --git a/shared/relay/client/client.go b/shared/relay/client/client.go index e0e894eb1..ed1b63435 100644 --- a/shared/relay/client/client.go +++ b/shared/relay/client/client.go @@ -18,6 +18,7 @@ import ( const ( bufferSize = 8820 serverResponseTimeout = 8 * time.Second + connChannelSize = 100 ) var ( @@ -69,15 +70,37 @@ type connContainer struct { cancel context.CancelFunc } -func newConnContainer(log *log.Entry, conn *Conn, messages chan Msg) *connContainer { +func newConnContainer(log *log.Entry, c *Client, peerID messages.PeerID, instanceURL *RelayAddr) *connContainer { ctx, cancel := context.WithCancel(context.Background()) - return &connContainer{ + msgChan := make(chan Msg, connChannelSize) + cn := &Conn{ + dstID: peerID, + messageChan: msgChan, + instanceURL: instanceURL, + } + cc := &connContainer{ log: log, - conn: conn, - messages: messages, + conn: cn, + messages: msgChan, ctx: ctx, cancel: cancel, } + + // bind conn to client + cn.writeFn = func(dstID messages.PeerID, payload []byte) (int, error) { + return c.writeTo(cc, dstID, payload) + } + cn.closeFn = func(dstID messages.PeerID) error { + return c.closeConn(cc, dstID) + } + cn.localAddrFn = func() net.Addr { + return c.relayConn.LocalAddr() + } + return cc +} + +func (cc *connContainer) netConn() net.Conn { + return cc.conn } func (cc *connContainer) writeMsg(msg Msg) { @@ -235,9 +258,7 @@ func (c *Client) OpenConn(ctx context.Context, dstPeerID string) (net.Conn, erro instanceURL := c.instanceURL c.muInstanceURL.Unlock() - msgChannel := make(chan Msg, 100) - conn := NewConn(c, peerID, msgChannel, instanceURL) - container := newConnContainer(c.log, conn, msgChannel) + container := newConnContainer(c.log, c, peerID, instanceURL) c.conns[peerID] = container earlyMsg, hasEarly := c.earlyMsgs.pop(peerID) c.mu.Unlock() @@ -270,7 +291,7 @@ func (c *Client) OpenConn(ctx context.Context, dstPeerID string) (net.Conn, erro c.mu.Unlock() c.log.Infof("remote peer is available: %s", peerID) - return conn, nil + return container.netConn(), nil } // ServerInstanceURL returns the address of the relay server. It could change after the close and reopen the connection. @@ -500,15 +521,15 @@ func (c *Client) handleTransportMsg(buf []byte, bufPtr *[]byte, internallyStoppe return true } -func (c *Client) writeTo(connReference *Conn, dstID messages.PeerID, payload []byte) (int, error) { +func (c *Client) writeTo(containerRef *connContainer, dstID messages.PeerID, payload []byte) (int, error) { c.mu.Lock() - conn, ok := c.conns[dstID] + current, ok := c.conns[dstID] c.mu.Unlock() if !ok { return 0, net.ErrClosed } - if conn.conn != connReference { + if current != containerRef { return 0, net.ErrClosed } @@ -582,26 +603,26 @@ func (c *Client) closeConnsByPeerID(peerIDs []messages.PeerID) { } } -func (c *Client) closeConn(connReference *Conn, id messages.PeerID) error { +func (c *Client) closeConn(containerRef *connContainer, id messages.PeerID) error { c.mu.Lock() defer c.mu.Unlock() - container, ok := c.conns[id] + current, ok := c.conns[id] if !ok { return net.ErrClosed } - if container.conn != connReference { + if current != containerRef { return fmt.Errorf("conn reference mismatch") } if err := c.stateSubscription.UnsubscribeStateChange([]messages.PeerID{id}); err != nil { - container.log.Errorf("failed to unsubscribe from peer state change: %s", err) + current.log.Errorf("failed to unsubscribe from peer state change: %s", err) } c.log.Infof("free up connection to peer: %s", id) delete(c.conns, id) - container.close() + current.close() return nil } diff --git a/shared/relay/client/conn.go b/shared/relay/client/conn.go index 4e151aaa4..9e2279790 100644 --- a/shared/relay/client/conn.go +++ b/shared/relay/client/conn.go @@ -9,49 +9,35 @@ import ( // Conn represent a connection to a relayed remote peer. type Conn struct { - client *Client dstID messages.PeerID messageChan chan Msg instanceURL *RelayAddr -} - -// NewConn creates a new connection to a relayed remote peer. -// client: the client instance, it used to send messages to the destination peer -// dstID: the destination peer ID -// messageChan: the channel where the messages will be received -// instanceURL: the relay instance URL, it used to get the proper server instance address for the remote peer -func NewConn(client *Client, dstID messages.PeerID, messageChan chan Msg, instanceURL *RelayAddr) *Conn { - c := &Conn{ - client: client, - dstID: dstID, - messageChan: messageChan, - instanceURL: instanceURL, - } - - return c + writeFn func(messages.PeerID, []byte) (int, error) + closeFn func(messages.PeerID) error + localAddrFn func() net.Addr } func (c *Conn) Write(p []byte) (n int, err error) { - return c.client.writeTo(c, c.dstID, p) + return c.writeFn(c.dstID, p) } func (c *Conn) Read(b []byte) (n int, err error) { - msg, ok := <-c.messageChan + m, ok := <-c.messageChan if !ok { return 0, net.ErrClosed } - n = copy(b, msg.Payload) - msg.Free() + n = copy(b, m.Payload) + m.Free() return n, nil } func (c *Conn) Close() error { - return c.client.closeConn(c, c.dstID) + return c.closeFn(c.dstID) } func (c *Conn) LocalAddr() net.Addr { - return c.client.relayConn.LocalAddr() + return c.localAddrFn() } func (c *Conn) RemoteAddr() net.Addr { From f53155562f0c87d163423a5ee012a8bff8711739 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 13 Feb 2026 19:37:43 +0100 Subject: [PATCH 125/374] [management, reverse proxy] Add reverse proxy feature (#5291) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implement reverse proxy --------- Co-authored-by: Alisdair MacLeod Co-authored-by: mlsmaycon Co-authored-by: Eduard Gert Co-authored-by: Viktor Liu Co-authored-by: Diego Noguês Co-authored-by: Diego Noguês <49420+diegocn@users.noreply.github.com> Co-authored-by: Bethuel Mmbaga Co-authored-by: Zoltan Papp Co-authored-by: Ashley Mensah --- .dockerignore | 6 + .../workflows/check-license-dependencies.yml | 10 +- .github/workflows/golang-test-darwin.yml | 2 +- .github/workflows/golang-test-freebsd.yml | 1 - .github/workflows/golang-test-linux.yml | 61 +- .github/workflows/golang-test-windows.yml | 2 +- .github/workflows/golangci-lint.yml | 4 +- .github/workflows/release.yml | 16 +- .gitignore | 1 + .goreleaser.yaml | 87 + LICENSE | 2 +- client/embed/embed.go | 22 +- client/firewall/uspfilter/conntrack/tcp.go | 15 +- .../firewall/uspfilter/conntrack/tcp_test.go | 255 ++ client/firewall/uspfilter/log/log.go | 15 +- client/internal/engine.go | 3 +- client/internal/networkmonitor/monitor.go | 6 - combined/Dockerfile.multistage | 25 + combined/LICENSE | 661 +++ combined/cmd/config.go | 8 + combined/cmd/root.go | 2 + combined/cmd/token.go | 60 + go.mod | 2 +- infrastructure_files/getting-started.sh | 240 +- management/Dockerfile.multistage | 17 + management/cmd/management.go | 5 + management/cmd/root.go | 4 + management/cmd/token.go | 55 + management/cmd/token/token.go | 185 + management/cmd/token/token_test.go | 101 + .../network_map/controller/controller.go | 5 + management/internals/modules/peers/manager.go | 35 + .../internals/modules/peers/manager_mock.go | 14 + .../reverseproxy/accesslogs/accesslogentry.go | 105 + .../modules/reverseproxy/accesslogs/filter.go | 109 + .../reverseproxy/accesslogs/filter_test.go | 371 ++ .../reverseproxy/accesslogs/interface.go | 10 + .../reverseproxy/accesslogs/manager/api.go | 64 + .../accesslogs/manager/manager.go | 108 + .../modules/reverseproxy/domain/domain.go | 17 + .../modules/reverseproxy/domain/interface.go | 12 + .../reverseproxy/domain/manager/api.go | 136 + .../reverseproxy/domain/manager/manager.go | 279 ++ .../modules/reverseproxy/domain/validator.go | 88 + .../reverseproxy/domain/validator_test.go | 56 + .../modules/reverseproxy/interface.go | 23 + .../modules/reverseproxy/interface_mock.go | 225 + .../modules/reverseproxy/manager/api.go | 170 + .../modules/reverseproxy/manager/manager.go | 541 +++ .../reverseproxy/manager/manager_test.go | 375 ++ .../modules/reverseproxy/reverseproxy.go | 463 ++ .../modules/reverseproxy/reverseproxy_test.go | 405 ++ .../reverseproxy/sessionkey/sessionkey.go | 69 + management/internals/server/boot.go | 53 +- management/internals/server/config/config.go | 4 + management/internals/server/modules.go | 23 +- management/internals/server/server.go | 17 +- .../internals/shared/grpc/onetime_token.go | 167 + management/internals/shared/grpc/proxy.go | 1083 +++++ .../internals/shared/grpc/proxy_auth.go | 234 + .../shared/grpc/proxy_auth_ratelimit.go | 134 + .../shared/grpc/proxy_auth_ratelimit_test.go | 98 + .../shared/grpc/proxy_group_access_test.go | 381 ++ .../internals/shared/grpc/proxy_test.go | 232 + .../shared/grpc/validate_session_test.go | 304 ++ management/server/account.go | 13 +- management/server/account/manager.go | 2 + management/server/account_test.go | 12 + management/server/activity/codes.go | 8 + management/server/group_test.go | 2 +- management/server/http/handler.go | 28 +- .../http/handlers/peers/peers_handler.go | 8 + management/server/http/handlers/proxy/auth.go | 208 + .../proxy/auth_callback_integration_test.go | 523 +++ .../server/http/handlers/proxy/auth_test.go | 185 + .../testing/testing_tools/channel/channel.go | 14 +- management/server/idp/auth0.go | 2 +- management/server/idp/authentik.go | 2 +- management/server/idp/azure.go | 4 +- management/server/idp/embedded.go | 8 +- management/server/idp/google_workspace.go | 2 +- management/server/idp/keycloak.go | 2 +- management/server/idp/pocketid.go | 2 +- management/server/idp/util.go | 2 +- management/server/idp/zitadel.go | 2 +- management/server/mock_server/account_mock.go | 5 + management/server/networks/manager_test.go | 20 +- .../server/networks/resources/manager.go | 39 +- .../server/networks/resources/manager_test.go | 69 +- management/server/peer.go | 195 +- management/server/peer/peer.go | 9 + management/server/peer_test.go | 249 ++ .../server/permissions/modules/module.go | 58 +- management/server/store/sql_store.go | 584 ++- .../server/store/sqlstore_bench_test.go | 3 +- management/server/store/store.go | 30 + management/server/store/store_mock.go | 2745 ++++++++++++ management/server/testdata/auth_callback.sql | 17 + management/server/types/account.go | 116 +- .../server/types/networkmap_golden_test.go | 19 +- management/server/types/proxy.go | 7 + management/server/types/proxy_access_token.go | 137 + .../server/types/proxy_access_token_test.go | 155 + management/server/util/util.go | 1 - proxy/Dockerfile | 19 + proxy/Dockerfile.multistage | 37 + proxy/LICENSE | 661 +++ proxy/README.md | 80 + proxy/auth/auth.go | 76 + proxy/cmd/proxy/cmd/debug.go | 173 + proxy/cmd/proxy/cmd/root.go | 210 + proxy/cmd/proxy/main.go | 26 + proxy/handle_mapping_stream_test.go | 94 + proxy/internal/accesslog/logger.go | 105 + proxy/internal/accesslog/middleware.go | 74 + proxy/internal/accesslog/requestip.go | 16 + proxy/internal/accesslog/statuswriter.go | 26 + proxy/internal/acme/locker.go | 102 + proxy/internal/acme/locker_k8s.go | 197 + proxy/internal/acme/locker_test.go | 65 + proxy/internal/acme/manager.go | 336 ++ proxy/internal/acme/manager_test.go | 102 + proxy/internal/auth/auth.gohtml | 18 + proxy/internal/auth/middleware.go | 364 ++ proxy/internal/auth/middleware_test.go | 660 +++ proxy/internal/auth/oidc.go | 65 + proxy/internal/auth/password.go | 61 + proxy/internal/auth/pin.go | 61 + proxy/internal/certwatch/watcher.go | 279 ++ proxy/internal/certwatch/watcher_test.go | 292 ++ proxy/internal/debug/client.go | 388 ++ proxy/internal/debug/client_test.go | 71 + proxy/internal/debug/handler.go | 712 +++ proxy/internal/debug/templates/base.html | 101 + .../debug/templates/client_detail.html | 19 + proxy/internal/debug/templates/clients.html | 33 + proxy/internal/debug/templates/index.html | 58 + proxy/internal/debug/templates/tools.html | 142 + proxy/internal/flock/flock_other.go | 20 + proxy/internal/flock/flock_test.go | 79 + proxy/internal/flock/flock_unix.go | 77 + proxy/internal/grpc/auth.go | 48 + proxy/internal/health/health.go | 405 ++ proxy/internal/health/health_test.go | 473 ++ proxy/internal/k8s/lease.go | 281 ++ proxy/internal/k8s/lease_test.go | 102 + proxy/internal/metrics/metrics.go | 149 + proxy/internal/metrics/metrics_test.go | 67 + proxy/internal/proxy/context.go | 187 + proxy/internal/proxy/proxy_bench_test.go | 130 + proxy/internal/proxy/reverseproxy.go | 406 ++ proxy/internal/proxy/reverseproxy_test.go | 966 ++++ proxy/internal/proxy/servicemapping.go | 84 + proxy/internal/proxy/trustedproxy.go | 60 + proxy/internal/proxy/trustedproxy_test.go | 129 + proxy/internal/roundtrip/netbird.go | 575 +++ .../internal/roundtrip/netbird_bench_test.go | 107 + proxy/internal/roundtrip/netbird_test.go | 328 ++ proxy/internal/roundtrip/transport.go | 152 + proxy/internal/types/types.go | 5 + proxy/log.go | 21 + proxy/management_integration_test.go | 548 +++ proxy/server.go | 653 +++ proxy/server_test.go | 48 + proxy/trustedproxy.go | 43 + proxy/trustedproxy_test.go | 90 + proxy/web/.gitignore | 23 + .../Inter-Italic-VariableFont_opsz_wght.ttf | Bin 0 -> 904532 bytes .../assets/Inter-VariableFont_opsz_wght.ttf | Bin 0 -> 874708 bytes proxy/web/dist/assets/favicon.ico | Bin 0 -> 15086 bytes proxy/web/dist/assets/index.js | 9 + proxy/web/dist/assets/netbird-full.svg | 19 + proxy/web/dist/assets/style.css | 1 + proxy/web/dist/index.html | 19 + proxy/web/dist/robots.txt | 2 + proxy/web/eslint.config.js | 23 + proxy/web/index.html | 18 + proxy/web/package-lock.json | 3952 +++++++++++++++++ proxy/web/package.json | 36 + proxy/web/public/robots.txt | 2 + proxy/web/src/App.tsx | 227 + proxy/web/src/ErrorPage.tsx | 73 + proxy/web/src/assets/favicon.ico | Bin 0 -> 15086 bytes .../Inter-Italic-VariableFont_opsz,wght.ttf | Bin 0 -> 904532 bytes .../fonts/Inter-VariableFont_opsz,wght.ttf | Bin 0 -> 874708 bytes proxy/web/src/assets/netbird-full.svg | 19 + proxy/web/src/assets/netbird.svg | 5 + proxy/web/src/components/Button.tsx | 156 + proxy/web/src/components/Card.tsx | 23 + proxy/web/src/components/ConnectionLine.tsx | 26 + proxy/web/src/components/Description.tsx | 14 + proxy/web/src/components/ErrorMessage.tsx | 7 + .../components/GradientFadedBackground.tsx | 22 + proxy/web/src/components/HelpText.tsx | 19 + proxy/web/src/components/Input.tsx | 137 + proxy/web/src/components/Label.tsx | 19 + proxy/web/src/components/NetBirdLogo.tsx | 46 + proxy/web/src/components/PinCodeInput.tsx | 109 + proxy/web/src/components/PoweredByNetBird.tsx | 17 + proxy/web/src/components/SegmentedTabs.tsx | 145 + proxy/web/src/components/Separator.tsx | 10 + proxy/web/src/components/StatusCard.tsx | 38 + proxy/web/src/components/TabContext.tsx | 13 + proxy/web/src/components/Title.tsx | 14 + proxy/web/src/data.ts | 54 + proxy/web/src/index.css | 213 + proxy/web/src/main.tsx | 18 + proxy/web/src/utils/helpers.ts | 6 + proxy/web/src/vite-env.d.ts | 6 + proxy/web/tsconfig.json | 22 + proxy/web/vite.config.ts | 32 + proxy/web/web.go | 189 + shared/hash/argon2id/argon2id.go | 136 + shared/hash/argon2id/argon2id_test.go | 327 ++ shared/management/http/api/openapi.yml | 777 +++- shared/management/http/api/types.gen.go | 349 ++ shared/management/proto/generate.sh | 1 + shared/management/proto/management.pb.go | 2 +- shared/management/proto/proxy_service.pb.go | 2061 +++++++++ shared/management/proto/proxy_service.proto | 185 + .../management/proto/proxy_service_grpc.pb.go | 349 ++ shared/management/status/error.go | 8 + util/log.go | 29 +- util/syslog_nonwindows.go | 8 +- util/syslog_windows.go | 7 + 225 files changed, 35513 insertions(+), 235 deletions(-) create mode 100644 .dockerignore create mode 100644 combined/Dockerfile.multistage create mode 100644 combined/LICENSE create mode 100644 combined/cmd/token.go create mode 100644 management/Dockerfile.multistage create mode 100644 management/cmd/token.go create mode 100644 management/cmd/token/token.go create mode 100644 management/cmd/token/token_test.go create mode 100644 management/internals/modules/reverseproxy/accesslogs/accesslogentry.go create mode 100644 management/internals/modules/reverseproxy/accesslogs/filter.go create mode 100644 management/internals/modules/reverseproxy/accesslogs/filter_test.go create mode 100644 management/internals/modules/reverseproxy/accesslogs/interface.go create mode 100644 management/internals/modules/reverseproxy/accesslogs/manager/api.go create mode 100644 management/internals/modules/reverseproxy/accesslogs/manager/manager.go create mode 100644 management/internals/modules/reverseproxy/domain/domain.go create mode 100644 management/internals/modules/reverseproxy/domain/interface.go create mode 100644 management/internals/modules/reverseproxy/domain/manager/api.go create mode 100644 management/internals/modules/reverseproxy/domain/manager/manager.go create mode 100644 management/internals/modules/reverseproxy/domain/validator.go create mode 100644 management/internals/modules/reverseproxy/domain/validator_test.go create mode 100644 management/internals/modules/reverseproxy/interface.go create mode 100644 management/internals/modules/reverseproxy/interface_mock.go create mode 100644 management/internals/modules/reverseproxy/manager/api.go create mode 100644 management/internals/modules/reverseproxy/manager/manager.go create mode 100644 management/internals/modules/reverseproxy/manager/manager_test.go create mode 100644 management/internals/modules/reverseproxy/reverseproxy.go create mode 100644 management/internals/modules/reverseproxy/reverseproxy_test.go create mode 100644 management/internals/modules/reverseproxy/sessionkey/sessionkey.go create mode 100644 management/internals/shared/grpc/onetime_token.go create mode 100644 management/internals/shared/grpc/proxy.go create mode 100644 management/internals/shared/grpc/proxy_auth.go create mode 100644 management/internals/shared/grpc/proxy_auth_ratelimit.go create mode 100644 management/internals/shared/grpc/proxy_auth_ratelimit_test.go create mode 100644 management/internals/shared/grpc/proxy_group_access_test.go create mode 100644 management/internals/shared/grpc/proxy_test.go create mode 100644 management/internals/shared/grpc/validate_session_test.go create mode 100644 management/server/http/handlers/proxy/auth.go create mode 100644 management/server/http/handlers/proxy/auth_callback_integration_test.go create mode 100644 management/server/http/handlers/proxy/auth_test.go create mode 100644 management/server/store/store_mock.go create mode 100644 management/server/testdata/auth_callback.sql create mode 100644 management/server/types/proxy.go create mode 100644 management/server/types/proxy_access_token.go create mode 100644 management/server/types/proxy_access_token_test.go create mode 100644 proxy/Dockerfile create mode 100644 proxy/Dockerfile.multistage create mode 100644 proxy/LICENSE create mode 100644 proxy/README.md create mode 100644 proxy/auth/auth.go create mode 100644 proxy/cmd/proxy/cmd/debug.go create mode 100644 proxy/cmd/proxy/cmd/root.go create mode 100644 proxy/cmd/proxy/main.go create mode 100644 proxy/handle_mapping_stream_test.go create mode 100644 proxy/internal/accesslog/logger.go create mode 100644 proxy/internal/accesslog/middleware.go create mode 100644 proxy/internal/accesslog/requestip.go create mode 100644 proxy/internal/accesslog/statuswriter.go create mode 100644 proxy/internal/acme/locker.go create mode 100644 proxy/internal/acme/locker_k8s.go create mode 100644 proxy/internal/acme/locker_test.go create mode 100644 proxy/internal/acme/manager.go create mode 100644 proxy/internal/acme/manager_test.go create mode 100644 proxy/internal/auth/auth.gohtml create mode 100644 proxy/internal/auth/middleware.go create mode 100644 proxy/internal/auth/middleware_test.go create mode 100644 proxy/internal/auth/oidc.go create mode 100644 proxy/internal/auth/password.go create mode 100644 proxy/internal/auth/pin.go create mode 100644 proxy/internal/certwatch/watcher.go create mode 100644 proxy/internal/certwatch/watcher_test.go create mode 100644 proxy/internal/debug/client.go create mode 100644 proxy/internal/debug/client_test.go create mode 100644 proxy/internal/debug/handler.go create mode 100644 proxy/internal/debug/templates/base.html create mode 100644 proxy/internal/debug/templates/client_detail.html create mode 100644 proxy/internal/debug/templates/clients.html create mode 100644 proxy/internal/debug/templates/index.html create mode 100644 proxy/internal/debug/templates/tools.html create mode 100644 proxy/internal/flock/flock_other.go create mode 100644 proxy/internal/flock/flock_test.go create mode 100644 proxy/internal/flock/flock_unix.go create mode 100644 proxy/internal/grpc/auth.go create mode 100644 proxy/internal/health/health.go create mode 100644 proxy/internal/health/health_test.go create mode 100644 proxy/internal/k8s/lease.go create mode 100644 proxy/internal/k8s/lease_test.go create mode 100644 proxy/internal/metrics/metrics.go create mode 100644 proxy/internal/metrics/metrics_test.go create mode 100644 proxy/internal/proxy/context.go create mode 100644 proxy/internal/proxy/proxy_bench_test.go create mode 100644 proxy/internal/proxy/reverseproxy.go create mode 100644 proxy/internal/proxy/reverseproxy_test.go create mode 100644 proxy/internal/proxy/servicemapping.go create mode 100644 proxy/internal/proxy/trustedproxy.go create mode 100644 proxy/internal/proxy/trustedproxy_test.go create mode 100644 proxy/internal/roundtrip/netbird.go create mode 100644 proxy/internal/roundtrip/netbird_bench_test.go create mode 100644 proxy/internal/roundtrip/netbird_test.go create mode 100644 proxy/internal/roundtrip/transport.go create mode 100644 proxy/internal/types/types.go create mode 100644 proxy/log.go create mode 100644 proxy/management_integration_test.go create mode 100644 proxy/server.go create mode 100644 proxy/server_test.go create mode 100644 proxy/trustedproxy.go create mode 100644 proxy/trustedproxy_test.go create mode 100644 proxy/web/.gitignore create mode 100644 proxy/web/dist/assets/Inter-Italic-VariableFont_opsz_wght.ttf create mode 100644 proxy/web/dist/assets/Inter-VariableFont_opsz_wght.ttf create mode 100644 proxy/web/dist/assets/favicon.ico create mode 100644 proxy/web/dist/assets/index.js create mode 100644 proxy/web/dist/assets/netbird-full.svg create mode 100644 proxy/web/dist/assets/style.css create mode 100644 proxy/web/dist/index.html create mode 100644 proxy/web/dist/robots.txt create mode 100644 proxy/web/eslint.config.js create mode 100644 proxy/web/index.html create mode 100644 proxy/web/package-lock.json create mode 100644 proxy/web/package.json create mode 100644 proxy/web/public/robots.txt create mode 100644 proxy/web/src/App.tsx create mode 100644 proxy/web/src/ErrorPage.tsx create mode 100644 proxy/web/src/assets/favicon.ico create mode 100644 proxy/web/src/assets/fonts/Inter-Italic-VariableFont_opsz,wght.ttf create mode 100644 proxy/web/src/assets/fonts/Inter-VariableFont_opsz,wght.ttf create mode 100644 proxy/web/src/assets/netbird-full.svg create mode 100644 proxy/web/src/assets/netbird.svg create mode 100644 proxy/web/src/components/Button.tsx create mode 100644 proxy/web/src/components/Card.tsx create mode 100644 proxy/web/src/components/ConnectionLine.tsx create mode 100644 proxy/web/src/components/Description.tsx create mode 100644 proxy/web/src/components/ErrorMessage.tsx create mode 100644 proxy/web/src/components/GradientFadedBackground.tsx create mode 100644 proxy/web/src/components/HelpText.tsx create mode 100644 proxy/web/src/components/Input.tsx create mode 100644 proxy/web/src/components/Label.tsx create mode 100644 proxy/web/src/components/NetBirdLogo.tsx create mode 100644 proxy/web/src/components/PinCodeInput.tsx create mode 100644 proxy/web/src/components/PoweredByNetBird.tsx create mode 100644 proxy/web/src/components/SegmentedTabs.tsx create mode 100644 proxy/web/src/components/Separator.tsx create mode 100644 proxy/web/src/components/StatusCard.tsx create mode 100644 proxy/web/src/components/TabContext.tsx create mode 100644 proxy/web/src/components/Title.tsx create mode 100644 proxy/web/src/data.ts create mode 100644 proxy/web/src/index.css create mode 100644 proxy/web/src/main.tsx create mode 100644 proxy/web/src/utils/helpers.ts create mode 100644 proxy/web/src/vite-env.d.ts create mode 100644 proxy/web/tsconfig.json create mode 100644 proxy/web/vite.config.ts create mode 100644 proxy/web/web.go create mode 100644 shared/hash/argon2id/argon2id.go create mode 100644 shared/hash/argon2id/argon2id_test.go create mode 100644 shared/management/proto/proxy_service.pb.go create mode 100644 shared/management/proto/proxy_service.proto create mode 100644 shared/management/proto/proxy_service_grpc.pb.go diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..a546f5f5e --- /dev/null +++ b/.dockerignore @@ -0,0 +1,6 @@ +.env +.env.* +*.pem +*.key +*.crt +*.p12 diff --git a/.github/workflows/check-license-dependencies.yml b/.github/workflows/check-license-dependencies.yml index 543ba2ab2..d1d2a8e50 100644 --- a/.github/workflows/check-license-dependencies.yml +++ b/.github/workflows/check-license-dependencies.yml @@ -23,7 +23,7 @@ jobs: - name: Check for problematic license dependencies run: | - echo "Checking for dependencies on management/, signal/, and relay/ packages..." + echo "Checking for dependencies on management/, signal/, relay/, and proxy/ packages..." echo "" # Find all directories except the problematic ones and system dirs @@ -31,7 +31,7 @@ jobs: while IFS= read -r dir; do echo "=== Checking $dir ===" # Search for problematic imports, excluding test files - RESULTS=$(grep -r "github.com/netbirdio/netbird/\(management\|signal\|relay\)" "$dir" --include="*.go" 2>/dev/null | grep -v "_test.go" | grep -v "test_" | grep -v "/test/" || true) + RESULTS=$(grep -r "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\)" "$dir" --include="*.go" 2>/dev/null | grep -v "_test.go" | grep -v "test_" | grep -v "/test/" || true) if [ -n "$RESULTS" ]; then echo "❌ Found problematic dependencies:" echo "$RESULTS" @@ -39,11 +39,11 @@ jobs: else echo "✓ No problematic dependencies found" fi - done < <(find . -maxdepth 1 -type d -not -name "." -not -name "management" -not -name "signal" -not -name "relay" -not -name ".git*" | sort) + done < <(find . -maxdepth 1 -type d -not -name "." -not -name "management" -not -name "signal" -not -name "relay" -not -name "proxy" -not -name "combined" -not -name ".git*" | sort) echo "" if [ $FOUND_ISSUES -eq 1 ]; then - echo "❌ Found dependencies on management/, signal/, or relay/ packages" + echo "❌ Found dependencies on management/, signal/, relay/, or proxy/ packages" echo "These packages are licensed under AGPLv3 and must not be imported by BSD-licensed code" exit 1 else @@ -88,7 +88,7 @@ jobs: IMPORTERS=$(go list -json -deps ./... 2>/dev/null | jq -r "select(.Imports[]? == \"$package\") | .ImportPath") # Check if any importer is NOT in management/signal/relay - BSD_IMPORTER=$(echo "$IMPORTERS" | grep -v "github.com/netbirdio/netbird/\(management\|signal\|relay\)" | head -1) + BSD_IMPORTER=$(echo "$IMPORTERS" | grep -v "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\|combined\)" | head -1) if [ -n "$BSD_IMPORTER" ]; then echo "❌ $package ($license) is imported by BSD-licensed code: $BSD_IMPORTER" diff --git a/.github/workflows/golang-test-darwin.yml b/.github/workflows/golang-test-darwin.yml index 9c4c35d21..0528ed086 100644 --- a/.github/workflows/golang-test-darwin.yml +++ b/.github/workflows/golang-test-darwin.yml @@ -43,5 +43,5 @@ jobs: run: git --no-pager diff --exit-code - name: Test - run: NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -tags=devcert -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 5m -p 1 $(go list ./... | grep -v /management) + run: NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -tags=devcert -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 5m -p 1 $(go list ./... | grep -v -e /management -e /signal -e /relay -e /proxy -e /combined) diff --git a/.github/workflows/golang-test-freebsd.yml b/.github/workflows/golang-test-freebsd.yml index df64e86bb..2c029b117 100644 --- a/.github/workflows/golang-test-freebsd.yml +++ b/.github/workflows/golang-test-freebsd.yml @@ -46,6 +46,5 @@ jobs: time go test -timeout 1m -failfast ./client/iface/... time go test -timeout 1m -failfast ./route/... time go test -timeout 1m -failfast ./sharedsock/... - time go test -timeout 1m -failfast ./signal/... time go test -timeout 1m -failfast ./util/... time go test -timeout 1m -failfast ./version/... diff --git a/.github/workflows/golang-test-linux.yml b/.github/workflows/golang-test-linux.yml index 195a37a1f..3c4674fc6 100644 --- a/.github/workflows/golang-test-linux.yml +++ b/.github/workflows/golang-test-linux.yml @@ -97,6 +97,16 @@ jobs: working-directory: relay run: CGO_ENABLED=1 GOARCH=386 go build -o relay-386 . + - name: Build combined + if: steps.cache.outputs.cache-hit != 'true' + working-directory: combined + run: CGO_ENABLED=1 go build . + + - name: Build combined 386 + if: steps.cache.outputs.cache-hit != 'true' + working-directory: combined + run: CGO_ENABLED=1 GOARCH=386 go build -o combined-386 . + test: name: "Client / Unit" needs: [build-cache] @@ -144,7 +154,7 @@ jobs: run: git --no-pager diff --exit-code - name: Test - run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} CI=true go test -tags devcert -exec 'sudo' -timeout 10m -p 1 $(go list ./... | grep -v -e /management -e /signal -e /relay) + run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} CI=true go test -tags devcert -exec 'sudo' -timeout 10m -p 1 $(go list ./... | grep -v -e /management -e /signal -e /relay -e /proxy -e /combined) test_client_on_docker: name: "Client (Docker) / Unit" @@ -204,7 +214,7 @@ jobs: sh -c ' \ apk update; apk add --no-cache \ ca-certificates iptables ip6tables dbus dbus-dev libpcap-dev build-base; \ - go test -buildvcs=false -tags devcert -v -timeout 10m -p 1 $(go list -buildvcs=false ./... | grep -v -e /management -e /signal -e /relay -e /client/ui -e /upload-server) + go test -buildvcs=false -tags devcert -v -timeout 10m -p 1 $(go list -buildvcs=false ./... | grep -v -e /management -e /signal -e /relay -e /proxy -e /combined -e /client/ui -e /upload-server) ' test_relay: @@ -261,6 +271,53 @@ jobs: -exec 'sudo' \ -timeout 10m -p 1 ./relay/... ./shared/relay/... + test_proxy: + name: "Proxy / Unit" + needs: [build-cache] + strategy: + fail-fast: false + matrix: + arch: [ '386','amd64' ] + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + cache: false + + - name: Install dependencies + run: sudo apt update && sudo apt install -y gcc-multilib g++-multilib libc6-dev-i386 + + - name: Get Go environment + run: | + echo "cache=$(go env GOCACHE)" >> $GITHUB_ENV + echo "modcache=$(go env GOMODCACHE)" >> $GITHUB_ENV + + - name: Cache Go modules + uses: actions/cache/restore@v4 + with: + path: | + ${{ env.cache }} + ${{ env.modcache }} + key: ${{ runner.os }}-gotest-cache-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-gotest-cache- + + - name: Install modules + run: go mod tidy + + - name: check git status + run: git --no-pager diff --exit-code + + - name: Test + run: | + CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ + go test -timeout 10m -p 1 ./proxy/... + test_signal: name: "Signal / Unit" needs: [build-cache] diff --git a/.github/workflows/golang-test-windows.yml b/.github/workflows/golang-test-windows.yml index 43357c45f..8af4046a7 100644 --- a/.github/workflows/golang-test-windows.yml +++ b/.github/workflows/golang-test-windows.yml @@ -63,7 +63,7 @@ jobs: - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOMODCACHE=${{ env.cache }} - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOCACHE=${{ env.modcache }} - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe mod tidy - - run: echo "files=$(go list ./... | ForEach-Object { $_ } | Where-Object { $_ -notmatch '/management' } | Where-Object { $_ -notmatch '/relay' } | Where-Object { $_ -notmatch '/signal' })" >> $env:GITHUB_ENV + - run: echo "files=$(go list ./... | ForEach-Object { $_ } | Where-Object { $_ -notmatch '/management' } | Where-Object { $_ -notmatch '/relay' } | Where-Object { $_ -notmatch '/signal' } | Where-Object { $_ -notmatch '/proxy' } | Where-Object { $_ -notmatch '/combined' })" >> $env:GITHUB_ENV - name: test run: PsExec64 -s -w ${{ github.workspace }} cmd.exe /c "C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe test -tags=devcert -timeout 10m -p 1 ${{ env.files }} > test-out.txt 2>&1" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 19a3a01e0..56450d45f 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,8 +19,8 @@ jobs: - name: codespell uses: codespell-project/actions-codespell@v2 with: - ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans - skip: go.mod,go.sum + ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver + skip: go.mod,go.sum,**/proxy/web/** golangci: strategy: fail-fast: false diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 967e0c7d7..d1f085b47 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -160,7 +160,7 @@ jobs: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_TOKEN }} - name: Log in to the GitHub container registry - if: github.event_name != 'pull_request' + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository uses: docker/login-action@v3 with: registry: ghcr.io @@ -176,6 +176,7 @@ jobs: - name: Generate windows syso arm64 run: goversioninfo -arm -64 -icon client/ui/assets/netbird.ico -manifest client/manifest.xml -product-name ${{ env.PRODUCT_NAME }} -copyright "${{ env.COPYRIGHT }}" -ver-major ${{ steps.semver_parser.outputs.major }} -ver-minor ${{ steps.semver_parser.outputs.minor }} -ver-patch ${{ steps.semver_parser.outputs.patch }} -ver-build 0 -file-version ${{ steps.semver_parser.outputs.fullversion }}.0 -product-version ${{ steps.semver_parser.outputs.fullversion }}.0 -o client/resources_windows_arm64.syso - name: Run GoReleaser + id: goreleaser uses: goreleaser/goreleaser-action@v4 with: version: ${{ env.GORELEASER_VER }} @@ -185,6 +186,19 @@ jobs: HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }} UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }} UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }} + - name: Tag and push PR images (amd64 only) + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository + run: | + PR_TAG="pr-${{ github.event.pull_request.number }}" + echo '${{ steps.goreleaser.outputs.artifacts }}' | \ + jq -r '.[] | select(.type == "Docker Image") | select(.goarch == "amd64") | .name' | \ + grep '^ghcr.io/' | while read -r SRC; do + IMG_NAME="${SRC%%:*}" + DST="${IMG_NAME}:${PR_TAG}" + echo "Tagging ${SRC} -> ${DST}" + docker tag "$SRC" "$DST" + docker push "$DST" + done - name: upload non tags for debug purposes uses: actions/upload-artifact@v4 with: diff --git a/.gitignore b/.gitignore index 89024d190..a0f128933 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .run *.iml dist/ +!proxy/web/dist/ bin/ .env conf.json diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 743822649..c0a5efbbe 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -140,6 +140,20 @@ builds: - -s -w -X github.com/netbirdio/netbird/version.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser mod_timestamp: "{{ .CommitTimestamp }}" + - id: netbird-proxy + dir: proxy/cmd/proxy + env: [CGO_ENABLED=0] + binary: netbird-proxy + goos: + - linux + goarch: + - amd64 + - arm64 + - arm + ldflags: + - -s -w -X main.Version={{.Version}} -X main.Commit={{.Commit}} -X main.BuildDate={{.CommitDate}} + mod_timestamp: "{{ .CommitTimestamp }}" + universal_binaries: - id: netbird @@ -589,6 +603,55 @@ dockers: - "--label=org.opencontainers.image.revision={{.FullCommit}}" - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" - "--label=maintainer=dev@netbird.io" + - image_templates: + - netbirdio/reverse-proxy:{{ .Version }}-amd64 + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-amd64 + ids: + - netbird-proxy + goarch: amd64 + use: buildx + dockerfile: proxy/Dockerfile + build_flag_templates: + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" + - "--label=maintainer=dev@netbird.io" + - image_templates: + - netbirdio/reverse-proxy:{{ .Version }}-arm64v8 + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-arm64v8 + ids: + - netbird-proxy + goarch: arm64 + use: buildx + dockerfile: proxy/Dockerfile + build_flag_templates: + - "--platform=linux/arm64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" + - "--label=maintainer=dev@netbird.io" + - image_templates: + - netbirdio/reverse-proxy:{{ .Version }}-arm + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-arm + ids: + - netbird-proxy + goarch: arm + goarm: 6 + use: buildx + dockerfile: proxy/Dockerfile + build_flag_templates: + - "--platform=linux/arm" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.source=https://github.com/netbirdio/{{.ProjectName}}" + - "--label=maintainer=dev@netbird.io" docker_manifests: - name_template: netbirdio/netbird:{{ .Version }} image_templates: @@ -769,6 +832,30 @@ docker_manifests: - ghcr.io/netbirdio/netbird-server:{{ .Version }}-arm - ghcr.io/netbirdio/netbird-server:{{ .Version }}-amd64 + - name_template: netbirdio/reverse-proxy:{{ .Version }} + image_templates: + - netbirdio/reverse-proxy:{{ .Version }}-arm64v8 + - netbirdio/reverse-proxy:{{ .Version }}-arm + - netbirdio/reverse-proxy:{{ .Version }}-amd64 + + - name_template: netbirdio/reverse-proxy:latest + image_templates: + - netbirdio/reverse-proxy:{{ .Version }}-arm64v8 + - netbirdio/reverse-proxy:{{ .Version }}-arm + - netbirdio/reverse-proxy:{{ .Version }}-amd64 + + - name_template: ghcr.io/netbirdio/reverse-proxy:{{ .Version }} + image_templates: + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-arm64v8 + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-arm + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-amd64 + + - name_template: ghcr.io/netbirdio/reverse-proxy:latest + image_templates: + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-arm64v8 + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-arm + - ghcr.io/netbirdio/reverse-proxy:{{ .Version }}-amd64 + brews: - ids: - default diff --git a/LICENSE b/LICENSE index 594691464..d922f155a 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -This BSD‑3‑Clause license applies to all parts of the repository except for the directories management/, signal/ and relay/. +This BSD‑3‑Clause license applies to all parts of the repository except for the directories management/, signal/, relay/ and combined/. Those directories are licensed under the GNU Affero General Public License version 3.0 (AGPLv3). See the respective LICENSE files inside each directory. BSD 3-Clause License diff --git a/client/embed/embed.go b/client/embed/embed.go index 2ad025ff0..4fbe0eada 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -31,6 +31,14 @@ var ( ErrConfigNotInitialized = errors.New("config not initialized") ) +// PeerConnStatus is a peer's connection status. +type PeerConnStatus = peer.ConnStatus + +const ( + // PeerStatusConnected indicates the peer is in connected state. + PeerStatusConnected = peer.StatusConnected +) + // Client manages a netbird embedded client instance. type Client struct { deviceName string @@ -162,6 +170,7 @@ func New(opts Options) (*Client, error) { setupKey: opts.SetupKey, jwtToken: opts.JWTToken, config: config, + recorder: peer.NewRecorder(config.ManagementURL.String()), }, nil } @@ -183,6 +192,7 @@ func (c *Client) Start(startCtx context.Context) error { // nolint:staticcheck ctx = context.WithValue(ctx, system.DeviceNameCtxKey, c.deviceName) + authClient, err := auth.NewAuth(ctx, c.config.PrivateKey, c.config.ManagementURL, c.config) if err != nil { return fmt.Errorf("create auth client: %w", err) @@ -192,10 +202,7 @@ func (c *Client) Start(startCtx context.Context) error { if err, _ := authClient.Login(ctx, c.setupKey, c.jwtToken); err != nil { return fmt.Errorf("login: %w", err) } - - recorder := peer.NewRecorder(c.config.ManagementURL.String()) - c.recorder = recorder - client := internal.NewConnectClient(ctx, c.config, recorder, false) + client := internal.NewConnectClient(ctx, c.config, c.recorder, false) client.SetSyncResponsePersistence(true) // either startup error (permanent backoff err) or nil err (successful engine up) @@ -348,14 +355,9 @@ func (c *Client) NewHTTPClient() *http.Client { // Status returns the current status of the client. func (c *Client) Status() (peer.FullStatus, error) { c.mu.Lock() - recorder := c.recorder connect := c.connect c.mu.Unlock() - if recorder == nil { - return peer.FullStatus{}, errors.New("client not started") - } - if connect != nil { engine := connect.Engine() if engine != nil { @@ -363,7 +365,7 @@ func (c *Client) Status() (peer.FullStatus, error) { } } - return recorder.GetFullStatus(), nil + return c.recorder.GetFullStatus(), nil } // GetLatestSyncResponse returns the latest sync response from the management server. diff --git a/client/firewall/uspfilter/conntrack/tcp.go b/client/firewall/uspfilter/conntrack/tcp.go index 8d64412e0..335a3abab 100644 --- a/client/firewall/uspfilter/conntrack/tcp.go +++ b/client/firewall/uspfilter/conntrack/tcp.go @@ -115,6 +115,17 @@ func (t *TCPConnTrack) IsTombstone() bool { return t.tombstone.Load() } +// IsSupersededBy returns true if this connection should be replaced by a new one +// carrying the given flags. Tombstoned connections are always superseded; TIME-WAIT +// connections are superseded by a pure SYN (a new connection attempt for the same +// four-tuple, as contemplated by RFC 1122 §4.2.2.13 and RFC 6191). +func (t *TCPConnTrack) IsSupersededBy(flags uint8) bool { + if t.tombstone.Load() { + return true + } + return flags&TCPSyn != 0 && flags&TCPAck == 0 && TCPState(t.state.Load()) == TCPStateTimeWait +} + // SetTombstone safely marks the connection for deletion func (t *TCPConnTrack) SetTombstone() { t.tombstone.Store(true) @@ -169,7 +180,7 @@ func (t *TCPTracker) updateIfExists(srcIP, dstIP netip.Addr, srcPort, dstPort ui conn, exists := t.connections[key] t.mutex.RUnlock() - if exists { + if exists && !conn.IsSupersededBy(flags) { t.updateState(key, conn, flags, direction, size) return key, uint16(conn.DNATOrigPort.Load()), true } @@ -241,7 +252,7 @@ func (t *TCPTracker) IsValidInbound(srcIP, dstIP netip.Addr, srcPort, dstPort ui conn, exists := t.connections[key] t.mutex.RUnlock() - if !exists || conn.IsTombstone() { + if !exists || conn.IsSupersededBy(flags) { return false } diff --git a/client/firewall/uspfilter/conntrack/tcp_test.go b/client/firewall/uspfilter/conntrack/tcp_test.go index bb440f70a..f46c5c1ab 100644 --- a/client/firewall/uspfilter/conntrack/tcp_test.go +++ b/client/firewall/uspfilter/conntrack/tcp_test.go @@ -485,6 +485,261 @@ func TestTCPAbnormalSequences(t *testing.T) { }) } +// TestTCPPortReuseTombstone verifies that a new connection on a port with a +// tombstoned (closed) conntrack entry is properly tracked. Without the fix, +// updateIfExists treats tombstoned entries as live, causing track() to skip +// creating a new connection. The subsequent SYN-ACK then fails IsValidInbound +// because the entry is tombstoned, and the response packet gets dropped by ACL. +func TestTCPPortReuseTombstone(t *testing.T) { + srcIP := netip.MustParseAddr("100.64.0.1") + dstIP := netip.MustParseAddr("100.64.0.2") + srcPort := uint16(12345) + dstPort := uint16(80) + + t.Run("Outbound port reuse after graceful close", func(t *testing.T) { + tracker := NewTCPTracker(DefaultTCPTimeout, logger, flowLogger) + defer tracker.Close() + + key := ConnKey{SrcIP: srcIP, DstIP: dstIP, SrcPort: srcPort, DstPort: dstPort} + + // Establish and gracefully close a connection (server-initiated close) + establishConnection(t, tracker, srcIP, dstIP, srcPort, dstPort) + + // Server sends FIN + valid := tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPFin|TCPAck, 0) + require.True(t, valid) + + // Client sends FIN-ACK + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPFin|TCPAck, 0) + + // Server sends final ACK + valid = tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPAck, 0) + require.True(t, valid) + + // Connection should be tombstoned + conn := tracker.connections[key] + require.NotNil(t, conn, "old connection should still be in map") + require.True(t, conn.IsTombstone(), "old connection should be tombstoned") + + // Now reuse the same port for a new connection + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPSyn, 100) + + // The old tombstoned entry should be replaced with a new one + newConn := tracker.connections[key] + require.NotNil(t, newConn, "new connection should exist") + require.False(t, newConn.IsTombstone(), "new connection should not be tombstoned") + require.Equal(t, TCPStateSynSent, newConn.GetState()) + + // SYN-ACK for the new connection should be valid + valid = tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPSyn|TCPAck, 100) + require.True(t, valid, "SYN-ACK for new connection on reused port should be accepted") + require.Equal(t, TCPStateEstablished, newConn.GetState()) + + // Data transfer should work + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPAck, 100) + valid = tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPPush|TCPAck, 500) + require.True(t, valid, "data should be allowed on new connection") + }) + + t.Run("Outbound port reuse after RST", func(t *testing.T) { + tracker := NewTCPTracker(DefaultTCPTimeout, logger, flowLogger) + defer tracker.Close() + + key := ConnKey{SrcIP: srcIP, DstIP: dstIP, SrcPort: srcPort, DstPort: dstPort} + + // Establish and RST a connection + establishConnection(t, tracker, srcIP, dstIP, srcPort, dstPort) + valid := tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPRst|TCPAck, 0) + require.True(t, valid) + + conn := tracker.connections[key] + require.True(t, conn.IsTombstone(), "RST connection should be tombstoned") + + // Reuse the same port + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPSyn, 100) + + newConn := tracker.connections[key] + require.NotNil(t, newConn) + require.False(t, newConn.IsTombstone()) + require.Equal(t, TCPStateSynSent, newConn.GetState()) + + valid = tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPSyn|TCPAck, 100) + require.True(t, valid, "SYN-ACK should be accepted after RST tombstone") + }) + + t.Run("Inbound port reuse after close", func(t *testing.T) { + tracker := NewTCPTracker(DefaultTCPTimeout, logger, flowLogger) + defer tracker.Close() + + clientIP := srcIP + serverIP := dstIP + clientPort := srcPort + serverPort := dstPort + key := ConnKey{SrcIP: clientIP, DstIP: serverIP, SrcPort: clientPort, DstPort: serverPort} + + // Inbound connection: client SYN → server SYN-ACK → client ACK + tracker.TrackInbound(clientIP, serverIP, clientPort, serverPort, TCPSyn, nil, 100, 0) + tracker.TrackOutbound(serverIP, clientIP, serverPort, clientPort, TCPSyn|TCPAck, 100) + tracker.TrackInbound(clientIP, serverIP, clientPort, serverPort, TCPAck, nil, 100, 0) + + conn := tracker.connections[key] + require.Equal(t, TCPStateEstablished, conn.GetState()) + + // Server-initiated close to reach Closed/tombstoned: + // Server FIN (opposite dir) → CloseWait + tracker.TrackOutbound(serverIP, clientIP, serverPort, clientPort, TCPFin|TCPAck, 100) + require.Equal(t, TCPStateCloseWait, conn.GetState()) + // Client FIN-ACK (same dir as conn) → LastAck + tracker.TrackInbound(clientIP, serverIP, clientPort, serverPort, TCPFin|TCPAck, nil, 100, 0) + require.Equal(t, TCPStateLastAck, conn.GetState()) + // Server final ACK (opposite dir) → Closed → tombstoned + tracker.TrackOutbound(serverIP, clientIP, serverPort, clientPort, TCPAck, 100) + + require.True(t, conn.IsTombstone()) + + // New inbound connection on same ports + tracker.TrackInbound(clientIP, serverIP, clientPort, serverPort, TCPSyn, nil, 100, 0) + + newConn := tracker.connections[key] + require.NotNil(t, newConn) + require.False(t, newConn.IsTombstone()) + require.Equal(t, TCPStateSynReceived, newConn.GetState()) + + // Complete handshake: server SYN-ACK, then client ACK + tracker.TrackOutbound(serverIP, clientIP, serverPort, clientPort, TCPSyn|TCPAck, 100) + tracker.TrackInbound(clientIP, serverIP, clientPort, serverPort, TCPAck, nil, 100, 0) + require.Equal(t, TCPStateEstablished, newConn.GetState()) + }) + + t.Run("Late ACK on tombstoned connection is harmless", func(t *testing.T) { + tracker := NewTCPTracker(DefaultTCPTimeout, logger, flowLogger) + defer tracker.Close() + + key := ConnKey{SrcIP: srcIP, DstIP: dstIP, SrcPort: srcPort, DstPort: dstPort} + + // Establish and close via passive close (server-initiated FIN → Closed → tombstoned) + establishConnection(t, tracker, srcIP, dstIP, srcPort, dstPort) + tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPFin|TCPAck, 0) // CloseWait + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPFin|TCPAck, 0) // LastAck + tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPAck, 0) // Closed + + conn := tracker.connections[key] + require.True(t, conn.IsTombstone()) + + // Late ACK should be rejected (tombstoned) + valid := tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPAck, 0) + require.False(t, valid, "late ACK on tombstoned connection should be rejected") + + // Late outbound ACK should not create a new connection (not a SYN) + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPAck, 0) + require.True(t, tracker.connections[key].IsTombstone(), "late outbound ACK should not replace tombstoned entry") + }) +} + +func TestTCPPortReuseTimeWait(t *testing.T) { + srcIP := netip.MustParseAddr("100.64.0.1") + dstIP := netip.MustParseAddr("100.64.0.2") + srcPort := uint16(12345) + dstPort := uint16(80) + + t.Run("Outbound port reuse during TIME-WAIT (active close)", func(t *testing.T) { + tracker := NewTCPTracker(DefaultTCPTimeout, logger, flowLogger) + defer tracker.Close() + + key := ConnKey{SrcIP: srcIP, DstIP: dstIP, SrcPort: srcPort, DstPort: dstPort} + + // Establish connection + establishConnection(t, tracker, srcIP, dstIP, srcPort, dstPort) + + // Active close: client (outbound initiator) sends FIN first + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPFin|TCPAck, 0) + conn := tracker.connections[key] + require.Equal(t, TCPStateFinWait1, conn.GetState()) + + // Server ACKs the FIN + valid := tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPAck, 0) + require.True(t, valid) + require.Equal(t, TCPStateFinWait2, conn.GetState()) + + // Server sends its own FIN + valid = tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPFin|TCPAck, 0) + require.True(t, valid) + require.Equal(t, TCPStateTimeWait, conn.GetState()) + + // Client sends final ACK (TIME-WAIT stays, not tombstoned) + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPAck, 0) + require.False(t, conn.IsTombstone(), "TIME-WAIT should not be tombstoned") + + // New outbound SYN on the same port (port reuse during TIME-WAIT) + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPSyn, 100) + + // Per RFC 1122/6191, new SYN during TIME-WAIT should start a new connection + newConn := tracker.connections[key] + require.NotNil(t, newConn, "new connection should exist") + require.False(t, newConn.IsTombstone(), "new connection should not be tombstoned") + require.Equal(t, TCPStateSynSent, newConn.GetState(), "new connection should be in SYN-SENT") + + // SYN-ACK for new connection should be valid + valid = tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPSyn|TCPAck, 100) + require.True(t, valid, "SYN-ACK for new connection should be accepted") + require.Equal(t, TCPStateEstablished, newConn.GetState()) + }) + + t.Run("Inbound SYN during TIME-WAIT falls through to normal tracking", func(t *testing.T) { + tracker := NewTCPTracker(DefaultTCPTimeout, logger, flowLogger) + defer tracker.Close() + + key := ConnKey{SrcIP: srcIP, DstIP: dstIP, SrcPort: srcPort, DstPort: dstPort} + + // Establish outbound connection and close via active close → TIME-WAIT + establishConnection(t, tracker, srcIP, dstIP, srcPort, dstPort) + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPFin|TCPAck, 0) + tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPAck, 0) + tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPFin|TCPAck, 0) + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPAck, 0) + + conn := tracker.connections[key] + require.Equal(t, TCPStateTimeWait, conn.GetState()) + + // Inbound SYN on same ports during TIME-WAIT: IsValidInbound returns false + // so the filter falls through to ACL check + TrackInbound (which creates + // a new connection via track() → updateIfExists skips TIME-WAIT for SYN) + valid := tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPSyn, 0) + require.False(t, valid, "inbound SYN during TIME-WAIT should fail conntrack validation") + + // Simulate what the filter does next: TrackInbound via the normal path + tracker.TrackInbound(dstIP, srcIP, dstPort, srcPort, TCPSyn, nil, 100, 0) + + // The new inbound connection uses the inverted key (dst→src becomes src→dst in track) + invertedKey := ConnKey{SrcIP: dstIP, DstIP: srcIP, SrcPort: dstPort, DstPort: srcPort} + newConn := tracker.connections[invertedKey] + require.NotNil(t, newConn, "new inbound connection should be tracked") + require.Equal(t, TCPStateSynReceived, newConn.GetState()) + require.False(t, newConn.IsTombstone()) + }) + + t.Run("Late retransmit during TIME-WAIT still allowed", func(t *testing.T) { + tracker := NewTCPTracker(DefaultTCPTimeout, logger, flowLogger) + defer tracker.Close() + + key := ConnKey{SrcIP: srcIP, DstIP: dstIP, SrcPort: srcPort, DstPort: dstPort} + + // Establish and active close → TIME-WAIT + establishConnection(t, tracker, srcIP, dstIP, srcPort, dstPort) + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPFin|TCPAck, 0) + tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPAck, 0) + tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPFin|TCPAck, 0) + tracker.TrackOutbound(srcIP, dstIP, srcPort, dstPort, TCPAck, 0) + + conn := tracker.connections[key] + require.Equal(t, TCPStateTimeWait, conn.GetState()) + + // Late ACK retransmits during TIME-WAIT should still be accepted + valid := tracker.IsValidInbound(dstIP, srcIP, dstPort, srcPort, TCPAck, 0) + require.True(t, valid, "retransmitted ACK during TIME-WAIT should be accepted") + }) +} + func TestTCPTimeoutHandling(t *testing.T) { // Create tracker with a very short timeout for testing shortTimeout := 100 * time.Millisecond diff --git a/client/firewall/uspfilter/log/log.go b/client/firewall/uspfilter/log/log.go index 66308defc..c6ca55e70 100644 --- a/client/firewall/uspfilter/log/log.go +++ b/client/firewall/uspfilter/log/log.go @@ -5,6 +5,8 @@ import ( "context" "fmt" "io" + "os" + "strconv" "sync" "sync/atomic" "time" @@ -16,9 +18,18 @@ const ( maxBatchSize = 1024 * 16 maxMessageSize = 1024 * 2 defaultFlushInterval = 2 * time.Second - logChannelSize = 1000 + defaultLogChanSize = 1000 ) +func getLogChannelSize() int { + if v := os.Getenv("NB_USPFILTER_LOG_BUFFER"); v != "" { + if n, err := strconv.Atoi(v); err == nil && n > 0 { + return n + } + } + return defaultLogChanSize +} + type Level uint32 const ( @@ -69,7 +80,7 @@ type Logger struct { func NewFromLogrus(logrusLogger *log.Logger) *Logger { l := &Logger{ output: logrusLogger.Out, - msgChannel: make(chan logMessage, logChannelSize), + msgChannel: make(chan logMessage, getLogChannelSize()), shutdown: make(chan struct{}), bufPool: sync.Pool{ New: func() any { diff --git a/client/internal/engine.go b/client/internal/engine.go index 631910eb6..4f3cf0998 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -28,6 +28,7 @@ import ( "github.com/netbirdio/netbird/client/firewall" firewallManager "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface" + nbnetstack "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/acl" @@ -1923,7 +1924,7 @@ func (e *Engine) triggerClientRestart() { } func (e *Engine) startNetworkMonitor() { - if !e.config.NetworkMonitor { + if !e.config.NetworkMonitor || nbnetstack.IsEnabled() { log.Infof("Network monitor is disabled, not starting") return } diff --git a/client/internal/networkmonitor/monitor.go b/client/internal/networkmonitor/monitor.go index 6dd81f68c..6d019258d 100644 --- a/client/internal/networkmonitor/monitor.go +++ b/client/internal/networkmonitor/monitor.go @@ -14,7 +14,6 @@ import ( "github.com/cenkalti/backoff/v4" log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal/routemanager/systemops" ) @@ -38,11 +37,6 @@ func New() *NetworkMonitor { // Listen begins monitoring network changes. When a change is detected, this function will return without error. func (nw *NetworkMonitor) Listen(ctx context.Context) (err error) { - if netstack.IsEnabled() { - log.Debugf("Network monitor: skipping in netstack mode") - return nil - } - nw.mu.Lock() if nw.cancel != nil { nw.mu.Unlock() diff --git a/combined/Dockerfile.multistage b/combined/Dockerfile.multistage new file mode 100644 index 000000000..ef3d68c6e --- /dev/null +++ b/combined/Dockerfile.multistage @@ -0,0 +1,25 @@ +FROM golang:1.25-bookworm AS builder +WORKDIR /app + +# Install build dependencies +RUN apt-get update && apt-get install -y gcc libc6-dev git && rm -rf /var/lib/apt/lists/* + +COPY go.mod go.sum ./ +RUN go mod download + +COPY . . + +# Build with version info from git (matching goreleaser ldflags) +RUN CGO_ENABLED=1 GOOS=linux go build \ + -ldflags="-s -w \ + -X github.com/netbirdio/netbird/version.version=$(git describe --tags --always --dirty 2>/dev/null || echo 'dev') \ + -X main.commit=$(git rev-parse --short HEAD 2>/dev/null || echo 'unknown') \ + -X main.date=$(date -u +%Y-%m-%dT%H:%M:%SZ) \ + -X main.builtBy=docker" \ + -o netbird-server ./combined + +FROM ubuntu:24.04 +RUN apt update && apt install -y ca-certificates && rm -fr /var/cache/apt +ENTRYPOINT [ "/go/bin/netbird-server" ] +CMD ["--config", "/etc/netbird/config.yaml"] +COPY --from=builder /app/netbird-server /go/bin/netbird-server diff --git a/combined/LICENSE b/combined/LICENSE new file mode 100644 index 000000000..be3f7b28e --- /dev/null +++ b/combined/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/combined/cmd/config.go b/combined/cmd/config.go index 72c63b7c7..04155f72e 100644 --- a/combined/cmd/config.go +++ b/combined/cmd/config.go @@ -627,7 +627,15 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) { // Set HTTP config fields for embedded IDP httpConfig.AuthIssuer = mgmt.Auth.Issuer + httpConfig.AuthAudience = "netbird-dashboard" + httpConfig.AuthClientID = httpConfig.AuthAudience + httpConfig.CLIAuthAudience = "netbird-cli" + httpConfig.AuthUserIDClaim = "sub" + httpConfig.AuthKeysLocation = mgmt.Auth.Issuer + "/keys" + httpConfig.OIDCConfigEndpoint = mgmt.Auth.Issuer + "/.well-known/openid-configuration" httpConfig.IdpSignKeyRefreshEnabled = mgmt.Auth.SignKeyRefreshEnabled + callbackURL := strings.TrimSuffix(httpConfig.AuthIssuer, "/oauth2") + httpConfig.AuthCallbackURL = callbackURL + types.ProxyCallbackEndpointFull return &nbconfig.Config{ Stuns: stuns, diff --git a/combined/cmd/root.go b/combined/cmd/root.go index 8837fea44..0ec0e9480 100644 --- a/combined/cmd/root.go +++ b/combined/cmd/root.go @@ -62,6 +62,8 @@ Configuration is loaded from a YAML file specified with --config.`, func init() { rootCmd.PersistentFlags().StringVarP(&configPath, "config", "c", "", "path to YAML configuration file (required)") _ = rootCmd.MarkPersistentFlagRequired("config") + + rootCmd.AddCommand(newTokenCommands()) } func Execute() error { diff --git a/combined/cmd/token.go b/combined/cmd/token.go new file mode 100644 index 000000000..9393c6c46 --- /dev/null +++ b/combined/cmd/token.go @@ -0,0 +1,60 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/netbirdio/netbird/formatter/hook" + tokencmd "github.com/netbirdio/netbird/management/cmd/token" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/util" +) + +// newTokenCommands creates the token command tree with combined-specific store opener. +func newTokenCommands() *cobra.Command { + return tokencmd.NewCommands(withTokenStore) +} + +// withTokenStore loads the combined YAML config, initializes the store, and calls fn. +func withTokenStore(cmd *cobra.Command, fn func(ctx context.Context, s store.Store) error) error { + if err := util.InitLog("error", "console"); err != nil { + return fmt.Errorf("init log: %w", err) + } + + ctx := context.WithValue(cmd.Context(), hook.ExecutionContextKey, hook.SystemSource) //nolint:staticcheck + + cfg, err := LoadConfig(configPath) + if err != nil { + return fmt.Errorf("load config: %w", err) + } + + if dsn := cfg.Server.Store.DSN; dsn != "" { + switch strings.ToLower(cfg.Server.Store.Engine) { + case "postgres": + os.Setenv("NB_STORE_ENGINE_POSTGRES_DSN", dsn) + case "mysql": + os.Setenv("NB_STORE_ENGINE_MYSQL_DSN", dsn) + } + } + + datadir := cfg.Management.DataDir + engine := types.Engine(cfg.Management.Store.Engine) + + s, err := store.NewStore(ctx, engine, datadir, nil, true) + if err != nil { + return fmt.Errorf("create store: %w", err) + } + defer func() { + if err := s.Close(ctx); err != nil { + log.Debugf("close store: %v", err) + } + }() + + return fn(ctx, s) +} diff --git a/go.mod b/go.mod index 801d52483..ff9105761 100644 --- a/go.mod +++ b/go.mod @@ -42,6 +42,7 @@ require ( github.com/cilium/ebpf v0.15.0 github.com/coder/websocket v1.8.13 github.com/coreos/go-iptables v0.7.0 + github.com/coreos/go-oidc/v3 v3.14.1 github.com/creack/pty v1.1.24 github.com/dexidp/dex v0.0.0-00010101000000-000000000000 github.com/dexidp/dex/api/v2 v2.4.0 @@ -167,7 +168,6 @@ require ( github.com/containerd/containerd v1.7.29 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/coreos/go-oidc/v3 v3.14.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index fd50c4871..b96598622 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -166,6 +166,65 @@ read_proxy_docker_network() { return 0 } +read_enable_proxy() { + echo "" > /dev/stderr + echo "Do you want to enable the NetBird Proxy service?" > /dev/stderr + echo "The proxy exposes internal NetBird network resources to the internet." > /dev/stderr + echo -n "Enable proxy? [y/N]: " > /dev/stderr + read -r CHOICE < /dev/tty + + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + echo "true" + else + echo "false" + fi + return 0 +} + +read_proxy_domain() { + echo "" > /dev/stderr + echo "WARNING: The proxy domain MUST NOT be a subdomain of the NetBird management" > /dev/stderr + echo "domain ($NETBIRD_DOMAIN). Using a subdomain will cause TLS certificate conflicts." > /dev/stderr + echo "" > /dev/stderr + echo -n "Enter the domain for the NetBird Proxy (e.g. proxy.my-domain.com): " > /dev/stderr + read -r READ_PROXY_DOMAIN < /dev/tty + + if [[ -z "$READ_PROXY_DOMAIN" ]]; then + echo "The proxy domain cannot be empty." > /dev/stderr + read_proxy_domain + return + fi + + if [[ "$READ_PROXY_DOMAIN" == "$NETBIRD_DOMAIN" ]]; then + echo "The proxy domain cannot be the same as the management domain ($NETBIRD_DOMAIN)." > /dev/stderr + read_proxy_domain + return + fi + + if [[ "$READ_PROXY_DOMAIN" == *".${NETBIRD_DOMAIN}" ]]; then + echo "The proxy domain cannot be a subdomain of the management domain ($NETBIRD_DOMAIN)." > /dev/stderr + read_proxy_domain + return + fi + + echo "$READ_PROXY_DOMAIN" + return 0 +} + +read_traefik_acme_email() { + echo "" > /dev/stderr + echo "Enter your email for Let's Encrypt certificate notifications." > /dev/stderr + echo -n "Email address: " > /dev/stderr + read -r EMAIL < /dev/tty + if [[ -z "$EMAIL" ]]; then + echo "Email is required for Let's Encrypt." > /dev/stderr + read_traefik_acme_email + return + fi + echo "$EMAIL" + return 0 +} + get_bind_address() { if [[ "$BIND_LOCALHOST_ONLY" == "true" ]]; then echo "127.0.0.1" @@ -248,16 +307,23 @@ initialize_default_values() { DASHBOARD_IMAGE="netbirdio/dashboard:latest" # Combined server replaces separate signal, relay, and management containers NETBIRD_SERVER_IMAGE="netbirdio/netbird-server:latest" + NETBIRD_PROXY_IMAGE="netbirdio/reverse-proxy:latest" # Reverse proxy configuration REVERSE_PROXY_TYPE="0" TRAEFIK_EXTERNAL_NETWORK="" TRAEFIK_ENTRYPOINT="websecure" TRAEFIK_CERTRESOLVER="" + TRAEFIK_ACME_EMAIL="" DASHBOARD_HOST_PORT="8080" MANAGEMENT_HOST_PORT="8081" # Combined server port (management + signal + relay) BIND_LOCALHOST_ONLY="true" EXTERNAL_PROXY_NETWORK="" + + # NetBird Proxy configuration + ENABLE_PROXY="false" + PROXY_DOMAIN="" + PROXY_TOKEN="" return 0 } @@ -280,7 +346,16 @@ configure_reverse_proxy() { # Prompt for reverse proxy type REVERSE_PROXY_TYPE=$(read_reverse_proxy_type) - # Handle Traefik-specific prompts (only for external Traefik) + # Handle built-in Traefik prompts (option 0) + if [[ "$REVERSE_PROXY_TYPE" == "0" ]]; then + TRAEFIK_ACME_EMAIL=$(read_traefik_acme_email) + ENABLE_PROXY=$(read_enable_proxy) + if [[ "$ENABLE_PROXY" == "true" ]]; then + PROXY_DOMAIN=$(read_proxy_domain) + fi + fi + + # Handle external Traefik-specific prompts (option 1) if [[ "$REVERSE_PROXY_TYPE" == "1" ]]; then TRAEFIK_EXTERNAL_NETWORK=$(read_traefik_network) TRAEFIK_ENTRYPOINT=$(read_traefik_entrypoint) @@ -307,7 +382,7 @@ check_existing_installation() { echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." echo "You can use the following commands:" echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" - echo " rm -f docker-compose.yml dashboard.env config.yaml nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" + echo " rm -f docker-compose.yml dashboard.env config.yaml proxy.env nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." exit 1 fi @@ -321,6 +396,12 @@ generate_configuration_files() { case "$REVERSE_PROXY_TYPE" in 0) render_docker_compose_traefik_builtin > docker-compose.yml + if [[ "$ENABLE_PROXY" == "true" ]]; then + # Create placeholder proxy.env so docker-compose can validate + # This will be overwritten with the actual token after netbird-server starts + echo "# Placeholder - will be updated with token after netbird-server starts" > proxy.env + echo "NB_PROXY_TOKEN=placeholder" >> proxy.env + fi ;; 1) render_docker_compose_traefik > docker-compose.yml @@ -357,12 +438,45 @@ start_services_and_show_instructions() { # For NPM, start containers first (NPM needs services running to create proxy) # For other external proxies, show instructions first and wait for user confirmation if [[ "$REVERSE_PROXY_TYPE" == "0" ]]; then - # Built-in Traefik - handles everything automatically (TLS via Let's Encrypt) + # Built-in Traefik - two-phase startup if proxy is enabled echo -e "$MSG_STARTING_SERVICES" - $DOCKER_COMPOSE_COMMAND up -d - sleep 3 - wait_management_proxy traefik + if [[ "$ENABLE_PROXY" == "true" ]]; then + # Phase 1: Start core services (without proxy) + echo "Starting core services..." + $DOCKER_COMPOSE_COMMAND up -d traefik dashboard netbird-server + + sleep 3 + wait_management_proxy traefik + + # Phase 2: Create proxy token and start proxy + echo "" + echo "Creating proxy access token..." + # Use docker exec with bash to run the token command directly + PROXY_TOKEN=$($DOCKER_COMPOSE_COMMAND exec -T netbird-server \ + /go/bin/netbird-server token create --name "default-proxy" --config /etc/netbird/config.yaml 2>/dev/null | grep "^Token:" | awk '{print $2}') + + if [[ -z "$PROXY_TOKEN" ]]; then + echo "ERROR: Failed to create proxy token. Check netbird-server logs." > /dev/stderr + $DOCKER_COMPOSE_COMMAND logs --tail=20 netbird-server + exit 1 + fi + + echo "Proxy token created successfully." + + # Generate proxy.env with the token + render_proxy_env > proxy.env + + # Start proxy service + echo "Starting proxy service..." + $DOCKER_COMPOSE_COMMAND up -d proxy + else + # No proxy - start all services at once + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management_proxy traefik + fi echo -e "$MSG_DONE" print_post_setup_instructions @@ -434,6 +548,45 @@ init_environment() { ############################################ render_docker_compose_traefik_builtin() { + # Generate proxy service section if enabled + local proxy_service="" + local proxy_volumes="" + if [[ "$ENABLE_PROXY" == "true" ]]; then + proxy_service=" + # NetBird Proxy - exposes internal resources to the internet + proxy: + image: $NETBIRD_PROXY_IMAGE + container_name: netbird-proxy + # Hairpin NAT fix: route domain back to traefik's static IP within Docker + extra_hosts: + - \"$NETBIRD_DOMAIN:172.30.0.10\" + restart: unless-stopped + networks: [netbird] + depends_on: + - netbird-server + env_file: + - ./proxy.env + volumes: + - netbird_proxy_certs:/certs + labels: + # TCP passthrough for any unmatched domain (proxy handles its own TLS) + - traefik.enable=true + - traefik.tcp.routers.proxy-passthrough.entrypoints=websecure + - traefik.tcp.routers.proxy-passthrough.rule=HostSNI(\`*\`) + - traefik.tcp.routers.proxy-passthrough.tls.passthrough=true + - traefik.tcp.routers.proxy-passthrough.service=proxy-tls + - traefik.tcp.routers.proxy-passthrough.priority=1 + - traefik.tcp.services.proxy-tls.loadbalancer.server.port=8443 + logging: + driver: \"json-file\" + options: + max-size: \"500m\" + max-file: \"2\" +" + proxy_volumes=" + netbird_proxy_certs:" + fi + cat <= 400 { + a.Reason = "Request failed" + } +} + +// ToAPIResponse converts an AccessLogEntry to the API ProxyAccessLog type +func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { + var sourceIP *string + if a.GeoLocation.ConnectionIP != nil { + ip := a.GeoLocation.ConnectionIP.String() + sourceIP = &ip + } + + var reason *string + if a.Reason != "" { + reason = &a.Reason + } + + var userID *string + if a.UserId != "" { + userID = &a.UserId + } + + var authMethod *string + if a.AuthMethodUsed != "" { + authMethod = &a.AuthMethodUsed + } + + var countryCode *string + if a.GeoLocation.CountryCode != "" { + countryCode = &a.GeoLocation.CountryCode + } + + var cityName *string + if a.GeoLocation.CityName != "" { + cityName = &a.GeoLocation.CityName + } + + return &api.ProxyAccessLog{ + Id: a.ID, + ServiceId: a.ServiceID, + Timestamp: a.Timestamp, + Method: a.Method, + Host: a.Host, + Path: a.Path, + DurationMs: int(a.Duration.Milliseconds()), + StatusCode: a.StatusCode, + SourceIp: sourceIP, + Reason: reason, + UserId: userID, + AuthMethodUsed: authMethod, + CountryCode: countryCode, + CityName: cityName, + } +} diff --git a/management/internals/modules/reverseproxy/accesslogs/filter.go b/management/internals/modules/reverseproxy/accesslogs/filter.go new file mode 100644 index 000000000..f4b0a2048 --- /dev/null +++ b/management/internals/modules/reverseproxy/accesslogs/filter.go @@ -0,0 +1,109 @@ +package accesslogs + +import ( + "net/http" + "strconv" + "time" +) + +const ( + // DefaultPageSize is the default number of records per page + DefaultPageSize = 50 + // MaxPageSize is the maximum number of records allowed per page + MaxPageSize = 100 +) + +// AccessLogFilter holds pagination and filtering parameters for access logs +type AccessLogFilter struct { + // Page is the current page number (1-indexed) + Page int + // PageSize is the number of records per page + PageSize int + + // Filtering parameters + Search *string // General search across log ID, host, path, source IP, and user fields + SourceIP *string // Filter by source IP address + Host *string // Filter by host header + Path *string // Filter by request path (supports LIKE pattern) + UserID *string // Filter by authenticated user ID + UserEmail *string // Filter by user email (requires user lookup) + UserName *string // Filter by user name (requires user lookup) + Method *string // Filter by HTTP method + Status *string // Filter by status: "success" (2xx/3xx) or "failed" (1xx/4xx/5xx) + StatusCode *int // Filter by HTTP status code + StartDate *time.Time // Filter by timestamp >= start_date + EndDate *time.Time // Filter by timestamp <= end_date +} + +// ParseFromRequest parses pagination and filter parameters from HTTP request query parameters +func (f *AccessLogFilter) ParseFromRequest(r *http.Request) { + queryParams := r.URL.Query() + + f.Page = parsePositiveInt(queryParams.Get("page"), 1) + f.PageSize = min(parsePositiveInt(queryParams.Get("page_size"), DefaultPageSize), MaxPageSize) + + f.Search = parseOptionalString(queryParams.Get("search")) + f.SourceIP = parseOptionalString(queryParams.Get("source_ip")) + f.Host = parseOptionalString(queryParams.Get("host")) + f.Path = parseOptionalString(queryParams.Get("path")) + f.UserID = parseOptionalString(queryParams.Get("user_id")) + f.UserEmail = parseOptionalString(queryParams.Get("user_email")) + f.UserName = parseOptionalString(queryParams.Get("user_name")) + f.Method = parseOptionalString(queryParams.Get("method")) + f.Status = parseOptionalString(queryParams.Get("status")) + f.StatusCode = parseOptionalInt(queryParams.Get("status_code")) + f.StartDate = parseOptionalRFC3339(queryParams.Get("start_date")) + f.EndDate = parseOptionalRFC3339(queryParams.Get("end_date")) +} + +// parsePositiveInt parses a positive integer from a string, returning defaultValue if invalid +func parsePositiveInt(s string, defaultValue int) int { + if s == "" { + return defaultValue + } + if val, err := strconv.Atoi(s); err == nil && val > 0 { + return val + } + return defaultValue +} + +// parseOptionalString returns a pointer to the string if non-empty, otherwise nil +func parseOptionalString(s string) *string { + if s == "" { + return nil + } + return &s +} + +// parseOptionalInt parses an optional positive integer from a string +func parseOptionalInt(s string) *int { + if s == "" { + return nil + } + if val, err := strconv.Atoi(s); err == nil && val > 0 { + v := val + return &v + } + return nil +} + +// parseOptionalRFC3339 parses an optional RFC3339 timestamp from a string +func parseOptionalRFC3339(s string) *time.Time { + if s == "" { + return nil + } + if t, err := time.Parse(time.RFC3339, s); err == nil { + return &t + } + return nil +} + +// GetOffset calculates the database offset for pagination +func (f *AccessLogFilter) GetOffset() int { + return (f.Page - 1) * f.PageSize +} + +// GetLimit returns the page size for database queries +func (f *AccessLogFilter) GetLimit() int { + return f.PageSize +} diff --git a/management/internals/modules/reverseproxy/accesslogs/filter_test.go b/management/internals/modules/reverseproxy/accesslogs/filter_test.go new file mode 100644 index 000000000..5d48ea9d2 --- /dev/null +++ b/management/internals/modules/reverseproxy/accesslogs/filter_test.go @@ -0,0 +1,371 @@ +package accesslogs + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccessLogFilter_ParseFromRequest(t *testing.T) { + tests := []struct { + name string + queryParams map[string]string + expectedPage int + expectedPageSize int + }{ + { + name: "default values when no params provided", + queryParams: map[string]string{}, + expectedPage: 1, + expectedPageSize: DefaultPageSize, + }, + { + name: "valid page and page_size", + queryParams: map[string]string{ + "page": "2", + "page_size": "25", + }, + expectedPage: 2, + expectedPageSize: 25, + }, + { + name: "page_size exceeds max, should cap at MaxPageSize", + queryParams: map[string]string{ + "page": "1", + "page_size": "200", + }, + expectedPage: 1, + expectedPageSize: MaxPageSize, + }, + { + name: "invalid page number, should use default", + queryParams: map[string]string{ + "page": "invalid", + "page_size": "10", + }, + expectedPage: 1, + expectedPageSize: 10, + }, + { + name: "invalid page_size, should use default", + queryParams: map[string]string{ + "page": "2", + "page_size": "invalid", + }, + expectedPage: 2, + expectedPageSize: DefaultPageSize, + }, + { + name: "zero page number, should use default", + queryParams: map[string]string{ + "page": "0", + "page_size": "10", + }, + expectedPage: 1, + expectedPageSize: 10, + }, + { + name: "negative page number, should use default", + queryParams: map[string]string{ + "page": "-1", + "page_size": "10", + }, + expectedPage: 1, + expectedPageSize: 10, + }, + { + name: "zero page_size, should use default", + queryParams: map[string]string{ + "page": "1", + "page_size": "0", + }, + expectedPage: 1, + expectedPageSize: DefaultPageSize, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + q := req.URL.Query() + for key, value := range tt.queryParams { + q.Set(key, value) + } + req.URL.RawQuery = q.Encode() + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Equal(t, tt.expectedPage, filter.Page, "Page mismatch") + assert.Equal(t, tt.expectedPageSize, filter.PageSize, "PageSize mismatch") + }) + } +} + +func TestAccessLogFilter_GetOffset(t *testing.T) { + tests := []struct { + name string + page int + pageSize int + expectedOffset int + }{ + { + name: "first page", + page: 1, + pageSize: 50, + expectedOffset: 0, + }, + { + name: "second page", + page: 2, + pageSize: 50, + expectedOffset: 50, + }, + { + name: "third page with page size 25", + page: 3, + pageSize: 25, + expectedOffset: 50, + }, + { + name: "page 10 with page size 10", + page: 10, + pageSize: 10, + expectedOffset: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filter := &AccessLogFilter{ + Page: tt.page, + PageSize: tt.pageSize, + } + + offset := filter.GetOffset() + assert.Equal(t, tt.expectedOffset, offset) + }) + } +} + +func TestAccessLogFilter_GetLimit(t *testing.T) { + filter := &AccessLogFilter{ + Page: 2, + PageSize: 25, + } + + limit := filter.GetLimit() + assert.Equal(t, 25, limit, "GetLimit should return PageSize") +} + +func TestAccessLogFilter_ParseFromRequest_FilterParams(t *testing.T) { + startDate := "2024-01-15T10:30:00Z" + endDate := "2024-01-16T15:45:00Z" + + req := httptest.NewRequest(http.MethodGet, "/test", nil) + q := req.URL.Query() + q.Set("search", "test query") + q.Set("source_ip", "192.168.1.1") + q.Set("host", "example.com") + q.Set("path", "/api/users") + q.Set("user_id", "user123") + q.Set("user_email", "user@example.com") + q.Set("user_name", "John Doe") + q.Set("method", "GET") + q.Set("status", "success") + q.Set("status_code", "200") + q.Set("start_date", startDate) + q.Set("end_date", endDate) + req.URL.RawQuery = q.Encode() + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + require.NotNil(t, filter.Search) + assert.Equal(t, "test query", *filter.Search) + + require.NotNil(t, filter.SourceIP) + assert.Equal(t, "192.168.1.1", *filter.SourceIP) + + require.NotNil(t, filter.Host) + assert.Equal(t, "example.com", *filter.Host) + + require.NotNil(t, filter.Path) + assert.Equal(t, "/api/users", *filter.Path) + + require.NotNil(t, filter.UserID) + assert.Equal(t, "user123", *filter.UserID) + + require.NotNil(t, filter.UserEmail) + assert.Equal(t, "user@example.com", *filter.UserEmail) + + require.NotNil(t, filter.UserName) + assert.Equal(t, "John Doe", *filter.UserName) + + require.NotNil(t, filter.Method) + assert.Equal(t, "GET", *filter.Method) + + require.NotNil(t, filter.Status) + assert.Equal(t, "success", *filter.Status) + + require.NotNil(t, filter.StatusCode) + assert.Equal(t, 200, *filter.StatusCode) + + require.NotNil(t, filter.StartDate) + expectedStart, _ := time.Parse(time.RFC3339, startDate) + assert.Equal(t, expectedStart, *filter.StartDate) + + require.NotNil(t, filter.EndDate) + expectedEnd, _ := time.Parse(time.RFC3339, endDate) + assert.Equal(t, expectedEnd, *filter.EndDate) +} + +func TestAccessLogFilter_ParseFromRequest_EmptyFilters(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Nil(t, filter.Search) + assert.Nil(t, filter.SourceIP) + assert.Nil(t, filter.Host) + assert.Nil(t, filter.Path) + assert.Nil(t, filter.UserID) + assert.Nil(t, filter.UserEmail) + assert.Nil(t, filter.UserName) + assert.Nil(t, filter.Method) + assert.Nil(t, filter.Status) + assert.Nil(t, filter.StatusCode) + assert.Nil(t, filter.StartDate) + assert.Nil(t, filter.EndDate) +} + +func TestAccessLogFilter_ParseFromRequest_InvalidFilters(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + q := req.URL.Query() + q.Set("status_code", "invalid") + q.Set("start_date", "not-a-date") + q.Set("end_date", "2024-99-99") + req.URL.RawQuery = q.Encode() + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Nil(t, filter.StatusCode, "invalid status_code should be nil") + assert.Nil(t, filter.StartDate, "invalid start_date should be nil") + assert.Nil(t, filter.EndDate, "invalid end_date should be nil") +} + +func TestParsePositiveInt(t *testing.T) { + tests := []struct { + name string + input string + defaultValue int + expected int + }{ + {"empty string", "", 10, 10}, + {"valid positive int", "25", 10, 25}, + {"zero", "0", 10, 10}, + {"negative", "-5", 10, 10}, + {"invalid string", "abc", 10, 10}, + {"float", "3.14", 10, 10}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parsePositiveInt(tt.input, tt.defaultValue) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestParseOptionalString(t *testing.T) { + tests := []struct { + name string + input string + expected *string + }{ + {"empty string", "", nil}, + {"valid string", "hello", strPtr("hello")}, + {"whitespace", " ", strPtr(" ")}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseOptionalString(tt.input) + if tt.expected == nil { + assert.Nil(t, result) + } else { + require.NotNil(t, result) + assert.Equal(t, *tt.expected, *result) + } + }) + } +} + +func TestParseOptionalInt(t *testing.T) { + tests := []struct { + name string + input string + expected *int + }{ + {"empty string", "", nil}, + {"valid positive int", "42", intPtr(42)}, + {"zero", "0", nil}, + {"negative", "-10", nil}, + {"invalid string", "abc", nil}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseOptionalInt(tt.input) + if tt.expected == nil { + assert.Nil(t, result) + } else { + require.NotNil(t, result) + assert.Equal(t, *tt.expected, *result) + } + }) + } +} + +func TestParseOptionalRFC3339(t *testing.T) { + validDate := "2024-01-15T10:30:00Z" + expectedTime, _ := time.Parse(time.RFC3339, validDate) + + tests := []struct { + name string + input string + expected *time.Time + }{ + {"empty string", "", nil}, + {"valid RFC3339", validDate, &expectedTime}, + {"invalid format", "2024-01-15", nil}, + {"invalid date", "not-a-date", nil}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseOptionalRFC3339(tt.input) + if tt.expected == nil { + assert.Nil(t, result) + } else { + require.NotNil(t, result) + assert.Equal(t, *tt.expected, *result) + } + }) + } +} + +// Helper functions for creating pointers +func strPtr(s string) *string { + return &s +} + +func intPtr(i int) *int { + return &i +} diff --git a/management/internals/modules/reverseproxy/accesslogs/interface.go b/management/internals/modules/reverseproxy/accesslogs/interface.go new file mode 100644 index 000000000..1c51a8a7d --- /dev/null +++ b/management/internals/modules/reverseproxy/accesslogs/interface.go @@ -0,0 +1,10 @@ +package accesslogs + +import ( + "context" +) + +type Manager interface { + SaveAccessLog(ctx context.Context, proxyLog *AccessLogEntry) error + GetAllAccessLogs(ctx context.Context, accountID, userID string, filter *AccessLogFilter) ([]*AccessLogEntry, int64, error) +} diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/api.go b/management/internals/modules/reverseproxy/accesslogs/manager/api.go new file mode 100644 index 000000000..1e1414ca5 --- /dev/null +++ b/management/internals/modules/reverseproxy/accesslogs/manager/api.go @@ -0,0 +1,64 @@ +package manager + +import ( + "net/http" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +type handler struct { + manager accesslogs.Manager +} + +func RegisterEndpoints(router *mux.Router, manager accesslogs.Manager) { + h := &handler{ + manager: manager, + } + + router.HandleFunc("/events/proxy", h.getAccessLogs).Methods("GET", "OPTIONS") +} + +func (h *handler) getAccessLogs(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var filter accesslogs.AccessLogFilter + filter.ParseFromRequest(r) + + logs, totalCount, err := h.manager.GetAllAccessLogs(r.Context(), userAuth.AccountId, userAuth.UserId, &filter) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + apiLogs := make([]api.ProxyAccessLog, 0, len(logs)) + for _, log := range logs { + apiLogs = append(apiLogs, *log.ToAPIResponse()) + } + + response := &api.ProxyAccessLogsResponse{ + Data: apiLogs, + Page: filter.Page, + PageSize: filter.PageSize, + TotalRecords: int(totalCount), + TotalPages: getTotalPageCount(int(totalCount), filter.PageSize), + } + + util.WriteJSONObject(r.Context(), w, response) +} + +// getTotalPageCount calculates the total number of pages +func getTotalPageCount(totalCount, pageSize int) int { + if pageSize <= 0 { + return 0 + } + return (totalCount + pageSize - 1) / pageSize +} diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go new file mode 100644 index 000000000..7bcdecb1b --- /dev/null +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go @@ -0,0 +1,108 @@ +package manager + +import ( + "context" + "strings" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + "github.com/netbirdio/netbird/management/server/geolocation" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/status" +) + +type managerImpl struct { + store store.Store + permissionsManager permissions.Manager + geo geolocation.Geolocation +} + +func NewManager(store store.Store, permissionsManager permissions.Manager, geo geolocation.Geolocation) accesslogs.Manager { + return &managerImpl{ + store: store, + permissionsManager: permissionsManager, + geo: geo, + } +} + +// SaveAccessLog saves an access log entry to the database after enriching it +func (m *managerImpl) SaveAccessLog(ctx context.Context, logEntry *accesslogs.AccessLogEntry) error { + if m.geo != nil && logEntry.GeoLocation.ConnectionIP != nil { + location, err := m.geo.Lookup(logEntry.GeoLocation.ConnectionIP) + if err != nil { + log.WithContext(ctx).Warnf("failed to get location for access log source IP [%s]: %v", logEntry.GeoLocation.ConnectionIP.String(), err) + } else { + logEntry.GeoLocation.CountryCode = location.Country.ISOCode + logEntry.GeoLocation.CityName = location.City.Names.En + logEntry.GeoLocation.GeoNameID = location.City.GeonameID + } + } + + if err := m.store.CreateAccessLog(ctx, logEntry); err != nil { + log.WithContext(ctx).WithFields(log.Fields{ + "service_id": logEntry.ServiceID, + "method": logEntry.Method, + "host": logEntry.Host, + "path": logEntry.Path, + "status": logEntry.StatusCode, + }).Errorf("failed to save access log: %v", err) + return err + } + + return nil +} + +// GetAllAccessLogs retrieves access logs for an account with pagination and filtering +func (m *managerImpl) GetAllAccessLogs(ctx context.Context, accountID, userID string, filter *accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) + if err != nil { + return nil, 0, status.NewPermissionValidationError(err) + } + if !ok { + return nil, 0, status.NewPermissionDeniedError() + } + + if err := m.resolveUserFilters(ctx, accountID, filter); err != nil { + log.WithContext(ctx).Warnf("failed to resolve user filters: %v", err) + } + + logs, totalCount, err := m.store.GetAccountAccessLogs(ctx, store.LockingStrengthNone, accountID, *filter) + if err != nil { + return nil, 0, err + } + + return logs, totalCount, nil +} + +// resolveUserFilters converts user email/name filters to user ID filter +func (m *managerImpl) resolveUserFilters(ctx context.Context, accountID string, filter *accesslogs.AccessLogFilter) error { + if filter.UserEmail == nil && filter.UserName == nil { + return nil + } + + users, err := m.store.GetAccountUsers(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return err + } + + var matchingUserIDs []string + for _, user := range users { + if filter.UserEmail != nil && strings.Contains(strings.ToLower(user.Email), strings.ToLower(*filter.UserEmail)) { + matchingUserIDs = append(matchingUserIDs, user.Id) + continue + } + if filter.UserName != nil && strings.Contains(strings.ToLower(user.Name), strings.ToLower(*filter.UserName)) { + matchingUserIDs = append(matchingUserIDs, user.Id) + } + } + + if len(matchingUserIDs) > 0 { + filter.UserID = &matchingUserIDs[0] + } + + return nil +} diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go new file mode 100644 index 000000000..da3432626 --- /dev/null +++ b/management/internals/modules/reverseproxy/domain/domain.go @@ -0,0 +1,17 @@ +package domain + +type Type string + +const ( + TypeFree Type = "free" + TypeCustom Type = "custom" +) + +type Domain struct { + ID string `gorm:"unique;primaryKey;autoIncrement"` + Domain string `gorm:"unique"` // Domain records must be unique, this avoids domain reuse across accounts. + AccountID string `gorm:"index"` + TargetCluster string // The proxy cluster this domain should be validated against + Type Type `gorm:"-"` + Validated bool +} diff --git a/management/internals/modules/reverseproxy/domain/interface.go b/management/internals/modules/reverseproxy/domain/interface.go new file mode 100644 index 000000000..d40e9b637 --- /dev/null +++ b/management/internals/modules/reverseproxy/domain/interface.go @@ -0,0 +1,12 @@ +package domain + +import ( + "context" +) + +type Manager interface { + GetDomains(ctx context.Context, accountID, userID string) ([]*Domain, error) + CreateDomain(ctx context.Context, accountID, userID, domainName, targetCluster string) (*Domain, error) + DeleteDomain(ctx context.Context, accountID, userID, domainID string) error + ValidateDomain(ctx context.Context, accountID, userID, domainID string) +} diff --git a/management/internals/modules/reverseproxy/domain/manager/api.go b/management/internals/modules/reverseproxy/domain/manager/api.go new file mode 100644 index 000000000..2fbcdd5b8 --- /dev/null +++ b/management/internals/modules/reverseproxy/domain/manager/api.go @@ -0,0 +1,136 @@ +package manager + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" + "github.com/netbirdio/netbird/shared/management/status" +) + +type handler struct { + manager Manager +} + +func RegisterEndpoints(router *mux.Router, manager Manager) { + h := &handler{ + manager: manager, + } + + router.HandleFunc("/domains", h.getAllDomains).Methods("GET", "OPTIONS") + router.HandleFunc("/domains", h.createCustomDomain).Methods("POST", "OPTIONS") + router.HandleFunc("/domains/{domainId}", h.deleteCustomDomain).Methods("DELETE", "OPTIONS") + router.HandleFunc("/domains/{domainId}/validate", h.triggerCustomDomainValidation).Methods("GET", "OPTIONS") +} + +func domainTypeToApi(t domain.Type) api.ReverseProxyDomainType { + switch t { + case domain.TypeCustom: + return api.ReverseProxyDomainTypeCustom + case domain.TypeFree: + return api.ReverseProxyDomainTypeFree + } + // By default return as a "free" domain as that is more restrictive. + // TODO: is this correct? + return api.ReverseProxyDomainTypeFree +} + +func domainToApi(d *domain.Domain) api.ReverseProxyDomain { + resp := api.ReverseProxyDomain{ + Domain: d.Domain, + Id: d.ID, + Type: domainTypeToApi(d.Type), + Validated: d.Validated, + } + if d.TargetCluster != "" { + resp.TargetCluster = &d.TargetCluster + } + return resp +} + +func (h *handler) getAllDomains(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + domains, err := h.manager.GetDomains(r.Context(), userAuth.AccountId, userAuth.UserId) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + ret := make([]api.ReverseProxyDomain, 0) + for _, d := range domains { + ret = append(ret, domainToApi(d)) + } + + util.WriteJSONObject(r.Context(), w, ret) +} + +func (h *handler) createCustomDomain(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var req api.PostApiReverseProxiesDomainsJSONRequestBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + domain, err := h.manager.CreateDomain(r.Context(), userAuth.AccountId, userAuth.UserId, req.Domain, req.TargetCluster) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, domainToApi(domain)) +} + +func (h *handler) deleteCustomDomain(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + domainID := mux.Vars(r)["domainId"] + if domainID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "domain ID is required"), w) + return + } + + if err := h.manager.DeleteDomain(r.Context(), userAuth.AccountId, userAuth.UserId, domainID); err != nil { + util.WriteError(r.Context(), err, w) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +func (h *handler) triggerCustomDomainValidation(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + domainID := mux.Vars(r)["domainId"] + if domainID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "domain ID is required"), w) + return + } + + go h.manager.ValidateDomain(r.Context(), userAuth.AccountId, userAuth.UserId, domainID) + + w.WriteHeader(http.StatusAccepted) +} diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go new file mode 100644 index 000000000..1125f428f --- /dev/null +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -0,0 +1,279 @@ +package manager + +import ( + "context" + "fmt" + "net" + "strings" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" +) + +type store interface { + GetAccount(ctx context.Context, accountID string) (*types.Account, error) + + GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) + ListFreeDomains(ctx context.Context, accountID string) ([]string, error) + ListCustomDomains(ctx context.Context, accountID string) ([]*domain.Domain, error) + CreateCustomDomain(ctx context.Context, accountID string, domainName string, targetCluster string, validated bool) (*domain.Domain, error) + UpdateCustomDomain(ctx context.Context, accountID string, d *domain.Domain) (*domain.Domain, error) + DeleteCustomDomain(ctx context.Context, accountID string, domainID string) error +} + +type proxyURLProvider interface { + GetConnectedProxyURLs() []string +} + +type Manager struct { + store store + validator domain.Validator + proxyURLProvider proxyURLProvider + permissionsManager permissions.Manager +} + +func NewManager(store store, proxyURLProvider proxyURLProvider, permissionsManager permissions.Manager) Manager { + return Manager{ + store: store, + proxyURLProvider: proxyURLProvider, + validator: domain.Validator{ + Resolver: net.DefaultResolver, + }, + permissionsManager: permissionsManager, + } +} + +func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*domain.Domain, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + domains, err := m.store.ListCustomDomains(ctx, accountID) + if err != nil { + return nil, fmt.Errorf("list custom domains: %w", err) + } + + var ret []*domain.Domain + + // Add connected proxy clusters as free domains. + // The cluster address itself is the free domain base (e.g., "eu.proxy.netbird.io"). + allowList := m.proxyURLAllowList() + log.WithFields(log.Fields{ + "accountID": accountID, + "proxyAllowList": allowList, + }).Debug("getting domains with proxy allow list") + + for _, cluster := range allowList { + ret = append(ret, &domain.Domain{ + Domain: cluster, + AccountID: accountID, + Type: domain.TypeFree, + Validated: true, + }) + } + + // Add custom domains. + for _, d := range domains { + ret = append(ret, &domain.Domain{ + ID: d.ID, + Domain: d.Domain, + AccountID: accountID, + TargetCluster: d.TargetCluster, + Type: domain.TypeCustom, + Validated: d.Validated, + }) + } + + return ret, nil +} + +func (m Manager) CreateDomain(ctx context.Context, accountID, userID, domainName, targetCluster string) (*domain.Domain, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Create) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + // Verify the target cluster is in the available clusters + allowList := m.proxyURLAllowList() + clusterValid := false + for _, cluster := range allowList { + if cluster == targetCluster { + clusterValid = true + break + } + } + if !clusterValid { + return nil, fmt.Errorf("target cluster %s is not available", targetCluster) + } + + // Attempt an initial validation against the specified cluster only + var validated bool + if m.validator.IsValid(ctx, domainName, []string{targetCluster}) { + validated = true + } + + d, err := m.store.CreateCustomDomain(ctx, accountID, domainName, targetCluster, validated) + if err != nil { + return d, fmt.Errorf("create domain in store: %w", err) + } + return d, nil +} + +func (m Manager) DeleteDomain(ctx context.Context, accountID, userID, domainID string) error { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !ok { + return status.NewPermissionDeniedError() + } + + if err := m.store.DeleteCustomDomain(ctx, accountID, domainID); err != nil { + // TODO: check for "no records" type error. Because that is a success condition. + return fmt.Errorf("delete domain from store: %w", err) + } + return nil +} + +func (m Manager) ValidateDomain(ctx context.Context, accountID, userID, domainID string) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Create) + if err != nil { + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + }).WithError(err).Error("validate domain") + return + } + if !ok { + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + }).WithError(err).Error("validate domain") + } + + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + }).Info("starting domain validation") + + d, err := m.store.GetCustomDomain(context.Background(), accountID, domainID) + if err != nil { + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + }).WithError(err).Error("get custom domain from store") + return + } + + // Validate only against the domain's target cluster + targetCluster := d.TargetCluster + if targetCluster == "" { + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + "domain": d.Domain, + }).Warn("domain has no target cluster set, skipping validation") + return + } + + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + "domain": d.Domain, + "targetCluster": targetCluster, + }).Info("validating domain against target cluster") + + if m.validator.IsValid(context.Background(), d.Domain, []string{targetCluster}) { + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + "domain": d.Domain, + }).Info("domain validated successfully") + d.Validated = true + if _, err := m.store.UpdateCustomDomain(context.Background(), accountID, d); err != nil { + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + "domain": d.Domain, + }).WithError(err).Error("update custom domain in store") + return + } + } else { + log.WithFields(log.Fields{ + "accountID": accountID, + "domainID": domainID, + "domain": d.Domain, + "targetCluster": targetCluster, + }).Warn("domain validation failed - CNAME does not match target cluster") + } +} + +// proxyURLAllowList retrieves a list of currently connected proxies and +// their URLs +func (m Manager) proxyURLAllowList() []string { + var reverseProxyAddresses []string + if m.proxyURLProvider != nil { + reverseProxyAddresses = m.proxyURLProvider.GetConnectedProxyURLs() + } + return reverseProxyAddresses +} + +// DeriveClusterFromDomain determines the proxy cluster for a given domain. +// For free domains (those ending with a known cluster suffix), the cluster is extracted from the domain. +// For custom domains, the cluster is determined by checking the registered custom domain's target cluster. +func (m Manager) DeriveClusterFromDomain(ctx context.Context, accountID, domain string) (string, error) { + allowList := m.proxyURLAllowList() + if len(allowList) == 0 { + return "", fmt.Errorf("no proxy clusters available") + } + + if cluster, ok := ExtractClusterFromFreeDomain(domain, allowList); ok { + return cluster, nil + } + + customDomains, err := m.store.ListCustomDomains(ctx, accountID) + if err != nil { + return "", fmt.Errorf("list custom domains: %w", err) + } + + targetCluster, valid := extractClusterFromCustomDomains(domain, customDomains) + if valid { + return targetCluster, nil + } + + return "", fmt.Errorf("domain %s does not match any available proxy cluster", domain) +} + +func extractClusterFromCustomDomains(domain string, customDomains []*domain.Domain) (string, bool) { + for _, customDomain := range customDomains { + if strings.HasSuffix(domain, "."+customDomain.Domain) { + return customDomain.TargetCluster, true + } + } + return "", false +} + +// ExtractClusterFromFreeDomain extracts the cluster address from a free domain. +// Free domains have the format: .. (e.g., myapp.abc123.eu.proxy.netbird.io) +// It matches the domain suffix against available clusters and returns the matching cluster. +func ExtractClusterFromFreeDomain(domain string, availableClusters []string) (string, bool) { + for _, cluster := range availableClusters { + if strings.HasSuffix(domain, "."+cluster) { + return cluster, true + } + } + return "", false +} diff --git a/management/internals/modules/reverseproxy/domain/validator.go b/management/internals/modules/reverseproxy/domain/validator.go new file mode 100644 index 000000000..9c23c1192 --- /dev/null +++ b/management/internals/modules/reverseproxy/domain/validator.go @@ -0,0 +1,88 @@ +package domain + +import ( + "context" + "net" + "strings" + + log "github.com/sirupsen/logrus" +) + +type resolver interface { + LookupCNAME(context.Context, string) (string, error) +} + +type Validator struct { + Resolver resolver +} + +// NewValidator initializes a validator with a specific DNS Resolver. +// If a Validator is used without specifying a Resolver, then it will +// use the net.DefaultResolver. +func NewValidator(resolver resolver) *Validator { + return &Validator{ + Resolver: resolver, + } +} + +// IsValid looks up the CNAME record for the passed domain with a prefix +// and compares it against the acceptable domains. +// If the returned CNAME matches any accepted domain, it will return true, +// otherwise, including in the event of a DNS error, it will return false. +// The comparison is very simple, so wildcards will not match if included +// in the acceptable domain list. +func (v *Validator) IsValid(ctx context.Context, domain string, accept []string) bool { + _, valid := v.ValidateWithCluster(ctx, domain, accept) + return valid +} + +// ValidateWithCluster validates a custom domain and returns the matched cluster address. +// Returns the cluster address and true if valid, or empty string and false if invalid. +func (v *Validator) ValidateWithCluster(ctx context.Context, domain string, accept []string) (string, bool) { + if v.Resolver == nil { + v.Resolver = net.DefaultResolver + } + + lookupDomain := "validation." + domain + log.WithFields(log.Fields{ + "domain": domain, + "lookupDomain": lookupDomain, + "acceptList": accept, + }).Debug("looking up CNAME for domain validation") + + cname, err := v.Resolver.LookupCNAME(ctx, lookupDomain) + if err != nil { + log.WithFields(log.Fields{ + "domain": domain, + "lookupDomain": lookupDomain, + }).WithError(err).Warn("CNAME lookup failed for domain validation") + return "", false + } + + nakedCNAME := strings.TrimSuffix(cname, ".") + log.WithFields(log.Fields{ + "domain": domain, + "cname": cname, + "nakedCNAME": nakedCNAME, + "acceptList": accept, + }).Debug("CNAME lookup result for domain validation") + + for _, acceptDomain := range accept { + normalizedAccept := strings.TrimSuffix(acceptDomain, ".") + if nakedCNAME == normalizedAccept { + log.WithFields(log.Fields{ + "domain": domain, + "cname": nakedCNAME, + "cluster": acceptDomain, + }).Info("domain CNAME matched cluster") + return acceptDomain, true + } + } + + log.WithFields(log.Fields{ + "domain": domain, + "cname": nakedCNAME, + "acceptList": accept, + }).Warn("domain CNAME does not match any accepted cluster") + return "", false +} diff --git a/management/internals/modules/reverseproxy/domain/validator_test.go b/management/internals/modules/reverseproxy/domain/validator_test.go new file mode 100644 index 000000000..1f9583728 --- /dev/null +++ b/management/internals/modules/reverseproxy/domain/validator_test.go @@ -0,0 +1,56 @@ +package domain_test + +import ( + "context" + "testing" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" +) + +type resolver struct { + CNAME string +} + +func (r resolver) LookupCNAME(_ context.Context, _ string) (string, error) { + return r.CNAME, nil +} + +func TestIsValid(t *testing.T) { + tests := map[string]struct { + resolver interface { + LookupCNAME(context.Context, string) (string, error) + } + domain string + accept []string + expect bool + }{ + "match": { + resolver: resolver{"bar.example.com."}, // Including trailing "." in response. + domain: "foo.example.com", + accept: []string{"bar.example.com"}, + expect: true, + }, + "no match": { + resolver: resolver{"invalid"}, + domain: "foo.example.com", + accept: []string{"bar.example.com"}, + expect: false, + }, + "accept trailing dot": { + resolver: resolver{"bar.example.com."}, + domain: "foo.example.com", + accept: []string{"bar.example.com."}, // Including trailing "." in accept. + expect: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + validator := domain.NewValidator(test.resolver) + actual := validator.IsValid(t.Context(), test.domain, test.accept) + if test.expect != actual { + t.Errorf("Incorrect return value:\nexpect: %v\nactual: %v", test.expect, actual) + } + }) + } +} diff --git a/management/internals/modules/reverseproxy/interface.go b/management/internals/modules/reverseproxy/interface.go new file mode 100644 index 000000000..7614b3ce5 --- /dev/null +++ b/management/internals/modules/reverseproxy/interface.go @@ -0,0 +1,23 @@ +package reverseproxy + +//go:generate go run github.com/golang/mock/mockgen -package reverseproxy -destination=interface_mock.go -source=./interface.go -build_flags=-mod=mod + +import ( + "context" +) + +type Manager interface { + GetAllServices(ctx context.Context, accountID, userID string) ([]*Service, error) + GetService(ctx context.Context, accountID, userID, serviceID string) (*Service, error) + CreateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) + UpdateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) + DeleteService(ctx context.Context, accountID, userID, serviceID string) error + SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error + SetStatus(ctx context.Context, accountID, serviceID string, status ProxyStatus) error + ReloadAllServicesForAccount(ctx context.Context, accountID string) error + ReloadService(ctx context.Context, accountID, serviceID string) error + GetGlobalServices(ctx context.Context) ([]*Service, error) + GetServiceByID(ctx context.Context, accountID, serviceID string) (*Service, error) + GetAccountServices(ctx context.Context, accountID string) ([]*Service, error) + GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) +} diff --git a/management/internals/modules/reverseproxy/interface_mock.go b/management/internals/modules/reverseproxy/interface_mock.go new file mode 100644 index 000000000..d5f38c38a --- /dev/null +++ b/management/internals/modules/reverseproxy/interface_mock.go @@ -0,0 +1,225 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./interface.go + +// Package reverseproxy is a generated GoMock package. +package reverseproxy + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// CreateService mocks base method. +func (m *MockManager) CreateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateService", ctx, accountID, userID, service) + ret0, _ := ret[0].(*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateService indicates an expected call of CreateService. +func (mr *MockManagerMockRecorder) CreateService(ctx, accountID, userID, service interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateService", reflect.TypeOf((*MockManager)(nil).CreateService), ctx, accountID, userID, service) +} + +// DeleteService mocks base method. +func (m *MockManager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteService", ctx, accountID, userID, serviceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteService indicates an expected call of DeleteService. +func (mr *MockManagerMockRecorder) DeleteService(ctx, accountID, userID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockManager)(nil).DeleteService), ctx, accountID, userID, serviceID) +} + +// GetAccountServices mocks base method. +func (m *MockManager) GetAccountServices(ctx context.Context, accountID string) ([]*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountServices", ctx, accountID) + ret0, _ := ret[0].([]*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountServices indicates an expected call of GetAccountServices. +func (mr *MockManagerMockRecorder) GetAccountServices(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountServices", reflect.TypeOf((*MockManager)(nil).GetAccountServices), ctx, accountID) +} + +// GetAllServices mocks base method. +func (m *MockManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllServices", ctx, accountID, userID) + ret0, _ := ret[0].([]*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllServices indicates an expected call of GetAllServices. +func (mr *MockManagerMockRecorder) GetAllServices(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllServices", reflect.TypeOf((*MockManager)(nil).GetAllServices), ctx, accountID, userID) +} + +// GetGlobalServices mocks base method. +func (m *MockManager) GetGlobalServices(ctx context.Context) ([]*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGlobalServices", ctx) + ret0, _ := ret[0].([]*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGlobalServices indicates an expected call of GetGlobalServices. +func (mr *MockManagerMockRecorder) GetGlobalServices(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGlobalServices", reflect.TypeOf((*MockManager)(nil).GetGlobalServices), ctx) +} + +// GetService mocks base method. +func (m *MockManager) GetService(ctx context.Context, accountID, userID, serviceID string) (*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetService", ctx, accountID, userID, serviceID) + ret0, _ := ret[0].(*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetService indicates an expected call of GetService. +func (mr *MockManagerMockRecorder) GetService(ctx, accountID, userID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetService", reflect.TypeOf((*MockManager)(nil).GetService), ctx, accountID, userID, serviceID) +} + +// GetServiceByID mocks base method. +func (m *MockManager) GetServiceByID(ctx context.Context, accountID, serviceID string) (*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServiceByID", ctx, accountID, serviceID) + ret0, _ := ret[0].(*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServiceByID indicates an expected call of GetServiceByID. +func (mr *MockManagerMockRecorder) GetServiceByID(ctx, accountID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceByID", reflect.TypeOf((*MockManager)(nil).GetServiceByID), ctx, accountID, serviceID) +} + +// GetServiceIDByTargetID mocks base method. +func (m *MockManager) GetServiceIDByTargetID(ctx context.Context, accountID, resourceID string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServiceIDByTargetID", ctx, accountID, resourceID) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServiceIDByTargetID indicates an expected call of GetServiceIDByTargetID. +func (mr *MockManagerMockRecorder) GetServiceIDByTargetID(ctx, accountID, resourceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceIDByTargetID", reflect.TypeOf((*MockManager)(nil).GetServiceIDByTargetID), ctx, accountID, resourceID) +} + +// ReloadAllServicesForAccount mocks base method. +func (m *MockManager) ReloadAllServicesForAccount(ctx context.Context, accountID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReloadAllServicesForAccount", ctx, accountID) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReloadAllServicesForAccount indicates an expected call of ReloadAllServicesForAccount. +func (mr *MockManagerMockRecorder) ReloadAllServicesForAccount(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReloadAllServicesForAccount", reflect.TypeOf((*MockManager)(nil).ReloadAllServicesForAccount), ctx, accountID) +} + +// ReloadService mocks base method. +func (m *MockManager) ReloadService(ctx context.Context, accountID, serviceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReloadService", ctx, accountID, serviceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReloadService indicates an expected call of ReloadService. +func (mr *MockManagerMockRecorder) ReloadService(ctx, accountID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReloadService", reflect.TypeOf((*MockManager)(nil).ReloadService), ctx, accountID, serviceID) +} + +// SetCertificateIssuedAt mocks base method. +func (m *MockManager) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetCertificateIssuedAt", ctx, accountID, serviceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetCertificateIssuedAt indicates an expected call of SetCertificateIssuedAt. +func (mr *MockManagerMockRecorder) SetCertificateIssuedAt(ctx, accountID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCertificateIssuedAt", reflect.TypeOf((*MockManager)(nil).SetCertificateIssuedAt), ctx, accountID, serviceID) +} + +// SetStatus mocks base method. +func (m *MockManager) SetStatus(ctx context.Context, accountID, serviceID string, status ProxyStatus) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetStatus", ctx, accountID, serviceID, status) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetStatus indicates an expected call of SetStatus. +func (mr *MockManagerMockRecorder) SetStatus(ctx, accountID, serviceID, status interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetStatus", reflect.TypeOf((*MockManager)(nil).SetStatus), ctx, accountID, serviceID, status) +} + +// UpdateService mocks base method. +func (m *MockManager) UpdateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateService", ctx, accountID, userID, service) + ret0, _ := ret[0].(*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateService indicates an expected call of UpdateService. +func (mr *MockManagerMockRecorder) UpdateService(ctx, accountID, userID, service interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateService", reflect.TypeOf((*MockManager)(nil).UpdateService), ctx, accountID, userID, service) +} diff --git a/management/internals/modules/reverseproxy/manager/api.go b/management/internals/modules/reverseproxy/manager/api.go new file mode 100644 index 000000000..9117ecd38 --- /dev/null +++ b/management/internals/modules/reverseproxy/manager/api.go @@ -0,0 +1,170 @@ +package manager + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager" + domainmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" + "github.com/netbirdio/netbird/shared/management/status" +) + +type handler struct { + manager reverseproxy.Manager +} + +// RegisterEndpoints registers all service HTTP endpoints. +func RegisterEndpoints(manager reverseproxy.Manager, domainManager domainmanager.Manager, accessLogsManager accesslogs.Manager, router *mux.Router) { + h := &handler{ + manager: manager, + } + + domainRouter := router.PathPrefix("/reverse-proxies").Subrouter() + domainmanager.RegisterEndpoints(domainRouter, domainManager) + + accesslogsmanager.RegisterEndpoints(router, accessLogsManager) + + router.HandleFunc("/reverse-proxies/services", h.getAllServices).Methods("GET", "OPTIONS") + router.HandleFunc("/reverse-proxies/services", h.createService).Methods("POST", "OPTIONS") + router.HandleFunc("/reverse-proxies/services/{serviceId}", h.getService).Methods("GET", "OPTIONS") + router.HandleFunc("/reverse-proxies/services/{serviceId}", h.updateService).Methods("PUT", "OPTIONS") + router.HandleFunc("/reverse-proxies/services/{serviceId}", h.deleteService).Methods("DELETE", "OPTIONS") +} + +func (h *handler) getAllServices(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + allServices, err := h.manager.GetAllServices(r.Context(), userAuth.AccountId, userAuth.UserId) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + apiServices := make([]*api.Service, 0, len(allServices)) + for _, service := range allServices { + apiServices = append(apiServices, service.ToAPIResponse()) + } + + util.WriteJSONObject(r.Context(), w, apiServices) +} + +func (h *handler) createService(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var req api.ServiceRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + service := new(reverseproxy.Service) + service.FromAPIRequest(&req, userAuth.AccountId) + + if err = service.Validate(); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } + + createdService, err := h.manager.CreateService(r.Context(), userAuth.AccountId, userAuth.UserId, service) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, createdService.ToAPIResponse()) +} + +func (h *handler) getService(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + serviceID := mux.Vars(r)["serviceId"] + if serviceID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "service ID is required"), w) + return + } + + service, err := h.manager.GetService(r.Context(), userAuth.AccountId, userAuth.UserId, serviceID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, service.ToAPIResponse()) +} + +func (h *handler) updateService(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + serviceID := mux.Vars(r)["serviceId"] + if serviceID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "service ID is required"), w) + return + } + + var req api.ServiceRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + service := new(reverseproxy.Service) + service.ID = serviceID + service.FromAPIRequest(&req, userAuth.AccountId) + + if err = service.Validate(); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } + + updatedService, err := h.manager.UpdateService(r.Context(), userAuth.AccountId, userAuth.UserId, service) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, updatedService.ToAPIResponse()) +} + +func (h *handler) deleteService(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + serviceID := mux.Vars(r)["serviceId"] + if serviceID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "service ID is required"), w) + return + } + + if err := h.manager.DeleteService(r.Context(), userAuth.AccountId, userAuth.UserId, serviceID); err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} diff --git a/management/internals/modules/reverseproxy/manager/manager.go b/management/internals/modules/reverseproxy/manager/manager.go new file mode 100644 index 000000000..2a93fdff6 --- /dev/null +++ b/management/internals/modules/reverseproxy/manager/manager.go @@ -0,0 +1,541 @@ +package manager + +import ( + "context" + "fmt" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/account" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/status" +) + +const unknownHostPlaceholder = "unknown" + +// ClusterDeriver derives the proxy cluster from a domain. +type ClusterDeriver interface { + DeriveClusterFromDomain(ctx context.Context, accountID, domain string) (string, error) +} + +type managerImpl struct { + store store.Store + accountManager account.Manager + permissionsManager permissions.Manager + proxyGRPCServer *nbgrpc.ProxyServiceServer + clusterDeriver ClusterDeriver +} + +// NewManager creates a new service manager. +func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, clusterDeriver ClusterDeriver) reverseproxy.Manager { + return &managerImpl{ + store: store, + accountManager: accountManager, + permissionsManager: permissionsManager, + proxyGRPCServer: proxyGRPCServer, + clusterDeriver: clusterDeriver, + } +} + +func (m *managerImpl) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + services, err := m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return nil, fmt.Errorf("failed to get services: %w", err) + } + + for _, service := range services { + err = m.replaceHostByLookup(ctx, accountID, service) + if err != nil { + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + } + + return services, nil +} + +func (m *managerImpl) replaceHostByLookup(ctx context.Context, accountID string, service *reverseproxy.Service) error { + for _, target := range service.Targets { + switch target.TargetType { + case reverseproxy.TargetTypePeer: + peer, err := m.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, target.TargetId) + if err != nil { + log.WithContext(ctx).Warnf("failed to get peer by id %s for service %s: %v", target.TargetId, service.ID, err) + target.Host = unknownHostPlaceholder + continue + } + target.Host = peer.IP.String() + case reverseproxy.TargetTypeHost: + resource, err := m.store.GetNetworkResourceByID(ctx, store.LockingStrengthNone, accountID, target.TargetId) + if err != nil { + log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, service.ID, err) + target.Host = unknownHostPlaceholder + continue + } + target.Host = resource.Prefix.Addr().String() + case reverseproxy.TargetTypeDomain: + resource, err := m.store.GetNetworkResourceByID(ctx, store.LockingStrengthNone, accountID, target.TargetId) + if err != nil { + log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, service.ID, err) + target.Host = unknownHostPlaceholder + continue + } + target.Host = resource.Domain + case reverseproxy.TargetTypeSubnet: + // For subnets we do not do any lookups on the resource + default: + return fmt.Errorf("unknown target type: %s", target.TargetType) + } + } + return nil +} + +func (m *managerImpl) GetService(ctx context.Context, accountID, userID, serviceID string) (*reverseproxy.Service, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + service, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) + if err != nil { + return nil, fmt.Errorf("failed to get service: %w", err) + } + + err = m.replaceHostByLookup(ctx, accountID, service) + if err != nil { + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + return service, nil +} + +func (m *managerImpl) CreateService(ctx context.Context, accountID, userID string, service *reverseproxy.Service) (*reverseproxy.Service, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Create) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + if err := m.initializeServiceForCreate(ctx, accountID, service); err != nil { + return nil, err + } + + if err := m.persistNewService(ctx, accountID, service); err != nil { + return nil, err + } + + m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceCreated, service.EventMeta()) + + err = m.replaceHostByLookup(ctx, accountID, service) + if err != nil { + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Create, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return service, nil +} + +func (m *managerImpl) initializeServiceForCreate(ctx context.Context, accountID string, service *reverseproxy.Service) error { + if m.clusterDeriver != nil { + proxyCluster, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, service.Domain) + if err != nil { + log.WithError(err).Warnf("could not derive cluster from domain %s, updates will broadcast to all proxy servers", service.Domain) + return status.Errorf(status.PreconditionFailed, "could not derive cluster from domain %s: %v", service.Domain, err) + } + service.ProxyCluster = proxyCluster + } + + service.AccountID = accountID + service.InitNewRecord() + + if err := service.Auth.HashSecrets(); err != nil { + return fmt.Errorf("hash secrets: %w", err) + } + + keyPair, err := sessionkey.GenerateKeyPair() + if err != nil { + return fmt.Errorf("generate session keys: %w", err) + } + service.SessionPrivateKey = keyPair.PrivateKey + service.SessionPublicKey = keyPair.PublicKey + + return nil +} + +func (m *managerImpl) persistNewService(ctx context.Context, accountID string, service *reverseproxy.Service) error { + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, ""); err != nil { + return err + } + + if err := validateTargetReferences(ctx, transaction, accountID, service.Targets); err != nil { + return err + } + + if err := transaction.CreateService(ctx, service); err != nil { + return fmt.Errorf("failed to create service: %w", err) + } + + return nil + }) +} + +func (m *managerImpl) checkDomainAvailable(ctx context.Context, transaction store.Store, accountID, domain, excludeServiceID string) error { + existingService, err := transaction.GetServiceByDomain(ctx, accountID, domain) + if err != nil { + if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { + return fmt.Errorf("failed to check existing service: %w", err) + } + return nil + } + + if existingService != nil && existingService.ID != excludeServiceID { + return status.Errorf(status.AlreadyExists, "service with domain %s already exists", domain) + } + + return nil +} + +func (m *managerImpl) UpdateService(ctx context.Context, accountID, userID string, service *reverseproxy.Service) (*reverseproxy.Service, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Update) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + if err := service.Auth.HashSecrets(); err != nil { + return nil, fmt.Errorf("hash secrets: %w", err) + } + + updateInfo, err := m.persistServiceUpdate(ctx, accountID, service) + if err != nil { + return nil, err + } + + m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceUpdated, service.EventMeta()) + + if err := m.replaceHostByLookup(ctx, accountID, service); err != nil { + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + + m.sendServiceUpdateNotifications(service, updateInfo) + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return service, nil +} + +type serviceUpdateInfo struct { + oldCluster string + domainChanged bool + serviceEnabledChanged bool +} + +func (m *managerImpl) persistServiceUpdate(ctx context.Context, accountID string, service *reverseproxy.Service) (*serviceUpdateInfo, error) { + var updateInfo serviceUpdateInfo + + err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + existingService, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, service.ID) + if err != nil { + return err + } + + updateInfo.oldCluster = existingService.ProxyCluster + updateInfo.domainChanged = existingService.Domain != service.Domain + + if updateInfo.domainChanged { + if err := m.handleDomainChange(ctx, transaction, accountID, service); err != nil { + return err + } + } else { + service.ProxyCluster = existingService.ProxyCluster + } + + m.preserveExistingAuthSecrets(service, existingService) + m.preserveServiceMetadata(service, existingService) + updateInfo.serviceEnabledChanged = existingService.Enabled != service.Enabled + + if err := validateTargetReferences(ctx, transaction, accountID, service.Targets); err != nil { + return err + } + + if err := transaction.UpdateService(ctx, service); err != nil { + return fmt.Errorf("update service: %w", err) + } + + return nil + }) + + return &updateInfo, err +} + +func (m *managerImpl) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, service *reverseproxy.Service) error { + if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, service.ID); err != nil { + return err + } + + if m.clusterDeriver != nil { + newCluster, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, service.Domain) + if err != nil { + log.WithError(err).Warnf("could not derive cluster from domain %s", service.Domain) + } else { + service.ProxyCluster = newCluster + } + } + + return nil +} + +func (m *managerImpl) preserveExistingAuthSecrets(service, existingService *reverseproxy.Service) { + if service.Auth.PasswordAuth != nil && service.Auth.PasswordAuth.Enabled && + existingService.Auth.PasswordAuth != nil && existingService.Auth.PasswordAuth.Enabled && + service.Auth.PasswordAuth.Password == "" { + service.Auth.PasswordAuth = existingService.Auth.PasswordAuth + } + + if service.Auth.PinAuth != nil && service.Auth.PinAuth.Enabled && + existingService.Auth.PinAuth != nil && existingService.Auth.PinAuth.Enabled && + service.Auth.PinAuth.Pin == "" { + service.Auth.PinAuth = existingService.Auth.PinAuth + } +} + +func (m *managerImpl) preserveServiceMetadata(service, existingService *reverseproxy.Service) { + service.Meta = existingService.Meta + service.SessionPrivateKey = existingService.SessionPrivateKey + service.SessionPublicKey = existingService.SessionPublicKey +} + +func (m *managerImpl) sendServiceUpdateNotifications(service *reverseproxy.Service, updateInfo *serviceUpdateInfo) { + oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() + + switch { + case updateInfo.domainChanged && updateInfo.oldCluster != service.ProxyCluster: + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg), updateInfo.oldCluster) + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Create, "", oidcCfg), service.ProxyCluster) + case !service.Enabled && updateInfo.serviceEnabledChanged: + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg), service.ProxyCluster) + case service.Enabled && updateInfo.serviceEnabledChanged: + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Create, "", oidcCfg), service.ProxyCluster) + default: + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Update, "", oidcCfg), service.ProxyCluster) + } +} + +// validateTargetReferences checks that all target IDs reference existing peers or resources in the account. +func validateTargetReferences(ctx context.Context, transaction store.Store, accountID string, targets []*reverseproxy.Target) error { + for _, target := range targets { + switch target.TargetType { + case reverseproxy.TargetTypePeer: + if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil { + if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { + return status.Errorf(status.InvalidArgument, "peer target %q not found in account", target.TargetId) + } + return fmt.Errorf("look up peer target %q: %w", target.TargetId, err) + } + case reverseproxy.TargetTypeHost, reverseproxy.TargetTypeSubnet, reverseproxy.TargetTypeDomain: + if _, err := transaction.GetNetworkResourceByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil { + if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { + return status.Errorf(status.InvalidArgument, "resource target %q not found in account", target.TargetId) + } + return fmt.Errorf("look up resource target %q: %w", target.TargetId, err) + } + } + } + return nil +} + +func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !ok { + return status.NewPermissionDeniedError() + } + + var service *reverseproxy.Service + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + var err error + service, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) + if err != nil { + return err + } + + if err = transaction.DeleteService(ctx, accountID, serviceID); err != nil { + return fmt.Errorf("failed to delete service: %w", err) + } + + return nil + }) + if err != nil { + return err + } + + m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, service.EventMeta()) + + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Delete, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + +// SetCertificateIssuedAt sets the certificate issued timestamp to the current time. +// Call this when receiving a gRPC notification that the certificate was issued. +func (m *managerImpl) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error { + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + service, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) + if err != nil { + return fmt.Errorf("failed to get service: %w", err) + } + + service.Meta.CertificateIssuedAt = time.Now() + + if err = transaction.UpdateService(ctx, service); err != nil { + return fmt.Errorf("failed to update service certificate timestamp: %w", err) + } + + return nil + }) +} + +// SetStatus updates the status of the service (e.g., "active", "tunnel_not_created", etc.) +func (m *managerImpl) SetStatus(ctx context.Context, accountID, serviceID string, status reverseproxy.ProxyStatus) error { + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + service, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) + if err != nil { + return fmt.Errorf("failed to get service: %w", err) + } + + service.Meta.Status = string(status) + + if err = transaction.UpdateService(ctx, service); err != nil { + return fmt.Errorf("failed to update service status: %w", err) + } + + return nil + }) +} + +func (m *managerImpl) ReloadService(ctx context.Context, accountID, serviceID string) error { + service, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) + if err != nil { + return fmt.Errorf("failed to get service: %w", err) + } + + err = m.replaceHostByLookup(ctx, accountID, service) + if err != nil { + return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Update, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + +func (m *managerImpl) ReloadAllServicesForAccount(ctx context.Context, accountID string) error { + services, err := m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return fmt.Errorf("failed to get services: %w", err) + } + + for _, service := range services { + err = m.replaceHostByLookup(ctx, accountID, service) + if err != nil { + return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Update, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + } + + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + +func (m *managerImpl) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { + services, err := m.store.GetServices(ctx, store.LockingStrengthNone) + if err != nil { + return nil, fmt.Errorf("failed to get services: %w", err) + } + + for _, service := range services { + err = m.replaceHostByLookup(ctx, service.AccountID, service) + if err != nil { + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + } + + return services, nil +} + +func (m *managerImpl) GetServiceByID(ctx context.Context, accountID, serviceID string) (*reverseproxy.Service, error) { + service, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) + if err != nil { + return nil, fmt.Errorf("failed to get service: %w", err) + } + + err = m.replaceHostByLookup(ctx, accountID, service) + if err != nil { + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + + return service, nil +} + +func (m *managerImpl) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { + services, err := m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return nil, fmt.Errorf("failed to get services: %w", err) + } + + for _, service := range services { + err = m.replaceHostByLookup(ctx, accountID, service) + if err != nil { + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + } + } + + return services, nil +} + +func (m *managerImpl) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) { + target, err := m.store.GetServiceTargetByTargetID(ctx, store.LockingStrengthNone, accountID, resourceID) + if err != nil { + if s, ok := status.FromError(err); ok && s.Type() == status.NotFound { + return "", nil + } + return "", fmt.Errorf("failed to get service target by resource ID: %w", err) + } + + if target == nil { + return "", nil + } + + return target.ServiceID, nil +} diff --git a/management/internals/modules/reverseproxy/manager/manager_test.go b/management/internals/modules/reverseproxy/manager/manager_test.go new file mode 100644 index 000000000..266b0066f --- /dev/null +++ b/management/internals/modules/reverseproxy/manager/manager_test.go @@ -0,0 +1,375 @@ +package manager + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/status" +) + +func TestInitializeServiceForCreate(t *testing.T) { + ctx := context.Background() + accountID := "test-account" + + t.Run("successful initialization without cluster deriver", func(t *testing.T) { + mgr := &managerImpl{ + clusterDeriver: nil, + } + + service := &reverseproxy.Service{ + Domain: "example.com", + Auth: reverseproxy.AuthConfig{}, + } + + err := mgr.initializeServiceForCreate(ctx, accountID, service) + + assert.NoError(t, err) + assert.Equal(t, accountID, service.AccountID) + assert.Empty(t, service.ProxyCluster, "proxy cluster should be empty when no deriver") + assert.NotEmpty(t, service.ID, "service ID should be initialized") + assert.NotEmpty(t, service.SessionPrivateKey, "session private key should be generated") + assert.NotEmpty(t, service.SessionPublicKey, "session public key should be generated") + }) + + t.Run("verifies session keys are different", func(t *testing.T) { + mgr := &managerImpl{ + clusterDeriver: nil, + } + + service1 := &reverseproxy.Service{Domain: "test1.com", Auth: reverseproxy.AuthConfig{}} + service2 := &reverseproxy.Service{Domain: "test2.com", Auth: reverseproxy.AuthConfig{}} + + err1 := mgr.initializeServiceForCreate(ctx, accountID, service1) + err2 := mgr.initializeServiceForCreate(ctx, accountID, service2) + + assert.NoError(t, err1) + assert.NoError(t, err2) + assert.NotEqual(t, service1.SessionPrivateKey, service2.SessionPrivateKey, "private keys should be unique") + assert.NotEqual(t, service1.SessionPublicKey, service2.SessionPublicKey, "public keys should be unique") + }) +} + +func TestCheckDomainAvailable(t *testing.T) { + ctx := context.Background() + accountID := "test-account" + + tests := []struct { + name string + domain string + excludeServiceID string + setupMock func(*store.MockStore) + expectedError bool + errorType status.Type + }{ + { + name: "domain available - not found", + domain: "available.com", + excludeServiceID: "", + setupMock: func(ms *store.MockStore) { + ms.EXPECT(). + GetServiceByDomain(ctx, accountID, "available.com"). + Return(nil, status.Errorf(status.NotFound, "not found")) + }, + expectedError: false, + }, + { + name: "domain already exists", + domain: "exists.com", + excludeServiceID: "", + setupMock: func(ms *store.MockStore) { + ms.EXPECT(). + GetServiceByDomain(ctx, accountID, "exists.com"). + Return(&reverseproxy.Service{ID: "existing-id", Domain: "exists.com"}, nil) + }, + expectedError: true, + errorType: status.AlreadyExists, + }, + { + name: "domain exists but excluded (same ID)", + domain: "exists.com", + excludeServiceID: "service-123", + setupMock: func(ms *store.MockStore) { + ms.EXPECT(). + GetServiceByDomain(ctx, accountID, "exists.com"). + Return(&reverseproxy.Service{ID: "service-123", Domain: "exists.com"}, nil) + }, + expectedError: false, + }, + { + name: "domain exists with different ID", + domain: "exists.com", + excludeServiceID: "service-456", + setupMock: func(ms *store.MockStore) { + ms.EXPECT(). + GetServiceByDomain(ctx, accountID, "exists.com"). + Return(&reverseproxy.Service{ID: "service-123", Domain: "exists.com"}, nil) + }, + expectedError: true, + errorType: status.AlreadyExists, + }, + { + name: "store error (non-NotFound)", + domain: "error.com", + excludeServiceID: "", + setupMock: func(ms *store.MockStore) { + ms.EXPECT(). + GetServiceByDomain(ctx, accountID, "error.com"). + Return(nil, errors.New("database error")) + }, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + tt.setupMock(mockStore) + + mgr := &managerImpl{} + err := mgr.checkDomainAvailable(ctx, mockStore, accountID, tt.domain, tt.excludeServiceID) + + if tt.expectedError { + require.Error(t, err) + if tt.errorType != 0 { + sErr, ok := status.FromError(err) + require.True(t, ok, "error should be a status error") + assert.Equal(t, tt.errorType, sErr.Type()) + } + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestCheckDomainAvailable_EdgeCases(t *testing.T) { + ctx := context.Background() + accountID := "test-account" + + t.Run("empty domain", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetServiceByDomain(ctx, accountID, ""). + Return(nil, status.Errorf(status.NotFound, "not found")) + + mgr := &managerImpl{} + err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "", "") + + assert.NoError(t, err) + }) + + t.Run("empty exclude ID with existing service", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetServiceByDomain(ctx, accountID, "test.com"). + Return(&reverseproxy.Service{ID: "some-id", Domain: "test.com"}, nil) + + mgr := &managerImpl{} + err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "test.com", "") + + assert.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.AlreadyExists, sErr.Type()) + }) + + t.Run("nil existing service with nil error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetServiceByDomain(ctx, accountID, "nil.com"). + Return(nil, nil) + + mgr := &managerImpl{} + err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "nil.com", "") + + assert.NoError(t, err) + }) +} + +func TestPersistNewService(t *testing.T) { + ctx := context.Background() + accountID := "test-account" + + t.Run("successful service creation with no targets", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + service := &reverseproxy.Service{ + ID: "service-123", + Domain: "new.com", + Targets: []*reverseproxy.Target{}, + } + + // Mock ExecuteInTransaction to execute the function immediately + mockStore.EXPECT(). + ExecuteInTransaction(ctx, gomock.Any()). + DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { + // Create another mock for the transaction + txMock := store.NewMockStore(ctrl) + txMock.EXPECT(). + GetServiceByDomain(ctx, accountID, "new.com"). + Return(nil, status.Errorf(status.NotFound, "not found")) + txMock.EXPECT(). + CreateService(ctx, service). + Return(nil) + + return fn(txMock) + }) + + mgr := &managerImpl{store: mockStore} + err := mgr.persistNewService(ctx, accountID, service) + + assert.NoError(t, err) + }) + + t.Run("domain already exists", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + service := &reverseproxy.Service{ + ID: "service-123", + Domain: "existing.com", + Targets: []*reverseproxy.Target{}, + } + + mockStore.EXPECT(). + ExecuteInTransaction(ctx, gomock.Any()). + DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { + txMock := store.NewMockStore(ctrl) + txMock.EXPECT(). + GetServiceByDomain(ctx, accountID, "existing.com"). + Return(&reverseproxy.Service{ID: "other-id", Domain: "existing.com"}, nil) + + return fn(txMock) + }) + + mgr := &managerImpl{store: mockStore} + err := mgr.persistNewService(ctx, accountID, service) + + require.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.AlreadyExists, sErr.Type()) + }) +} +func TestPreserveExistingAuthSecrets(t *testing.T) { + mgr := &managerImpl{} + + t.Run("preserve password when empty", func(t *testing.T) { + existing := &reverseproxy.Service{ + Auth: reverseproxy.AuthConfig{ + PasswordAuth: &reverseproxy.PasswordAuthConfig{ + Enabled: true, + Password: "hashed-password", + }, + }, + } + + updated := &reverseproxy.Service{ + Auth: reverseproxy.AuthConfig{ + PasswordAuth: &reverseproxy.PasswordAuthConfig{ + Enabled: true, + Password: "", + }, + }, + } + + mgr.preserveExistingAuthSecrets(updated, existing) + + assert.Equal(t, existing.Auth.PasswordAuth, updated.Auth.PasswordAuth) + }) + + t.Run("preserve pin when empty", func(t *testing.T) { + existing := &reverseproxy.Service{ + Auth: reverseproxy.AuthConfig{ + PinAuth: &reverseproxy.PINAuthConfig{ + Enabled: true, + Pin: "hashed-pin", + }, + }, + } + + updated := &reverseproxy.Service{ + Auth: reverseproxy.AuthConfig{ + PinAuth: &reverseproxy.PINAuthConfig{ + Enabled: true, + Pin: "", + }, + }, + } + + mgr.preserveExistingAuthSecrets(updated, existing) + + assert.Equal(t, existing.Auth.PinAuth, updated.Auth.PinAuth) + }) + + t.Run("do not preserve when password is provided", func(t *testing.T) { + existing := &reverseproxy.Service{ + Auth: reverseproxy.AuthConfig{ + PasswordAuth: &reverseproxy.PasswordAuthConfig{ + Enabled: true, + Password: "old-password", + }, + }, + } + + updated := &reverseproxy.Service{ + Auth: reverseproxy.AuthConfig{ + PasswordAuth: &reverseproxy.PasswordAuthConfig{ + Enabled: true, + Password: "new-password", + }, + }, + } + + mgr.preserveExistingAuthSecrets(updated, existing) + + assert.Equal(t, "new-password", updated.Auth.PasswordAuth.Password) + assert.NotEqual(t, existing.Auth.PasswordAuth, updated.Auth.PasswordAuth) + }) +} + +func TestPreserveServiceMetadata(t *testing.T) { + mgr := &managerImpl{} + + existing := &reverseproxy.Service{ + Meta: reverseproxy.ServiceMeta{ + CertificateIssuedAt: time.Now(), + Status: "active", + }, + SessionPrivateKey: "private-key", + SessionPublicKey: "public-key", + } + + updated := &reverseproxy.Service{ + Domain: "updated.com", + } + + mgr.preserveServiceMetadata(updated, existing) + + assert.Equal(t, existing.Meta, updated.Meta) + assert.Equal(t, existing.SessionPrivateKey, updated.SessionPrivateKey) + assert.Equal(t, existing.SessionPublicKey, updated.SessionPublicKey) +} diff --git a/management/internals/modules/reverseproxy/reverseproxy.go b/management/internals/modules/reverseproxy/reverseproxy.go new file mode 100644 index 000000000..0cbbe450b --- /dev/null +++ b/management/internals/modules/reverseproxy/reverseproxy.go @@ -0,0 +1,463 @@ +package reverseproxy + +import ( + "errors" + "fmt" + "net" + "net/url" + "strconv" + "time" + + "github.com/rs/xid" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/shared/hash/argon2id" + "github.com/netbirdio/netbird/util/crypt" + + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type Operation string + +const ( + Create Operation = "create" + Update Operation = "update" + Delete Operation = "delete" +) + +type ProxyStatus string + +const ( + StatusPending ProxyStatus = "pending" + StatusActive ProxyStatus = "active" + StatusTunnelNotCreated ProxyStatus = "tunnel_not_created" + StatusCertificatePending ProxyStatus = "certificate_pending" + StatusCertificateFailed ProxyStatus = "certificate_failed" + StatusError ProxyStatus = "error" + + TargetTypePeer = "peer" + TargetTypeHost = "host" + TargetTypeDomain = "domain" + TargetTypeSubnet = "subnet" +) + +type Target struct { + ID uint `gorm:"primaryKey" json:"-"` + AccountID string `gorm:"index:idx_target_account;not null" json:"-"` + ServiceID string `gorm:"index:idx_service_targets;not null" json:"-"` + Path *string `json:"path,omitempty"` + Host string `json:"host"` // the Host field is only used for subnet targets, otherwise ignored + Port int `gorm:"index:idx_target_port" json:"port"` + Protocol string `gorm:"index:idx_target_protocol" json:"protocol"` + TargetId string `gorm:"index:idx_target_id" json:"target_id"` + TargetType string `gorm:"index:idx_target_type" json:"target_type"` + Enabled bool `gorm:"index:idx_target_enabled" json:"enabled"` +} + +type PasswordAuthConfig struct { + Enabled bool `json:"enabled"` + Password string `json:"password"` +} + +type PINAuthConfig struct { + Enabled bool `json:"enabled"` + Pin string `json:"pin"` +} + +type BearerAuthConfig struct { + Enabled bool `json:"enabled"` + DistributionGroups []string `json:"distribution_groups,omitempty" gorm:"serializer:json"` +} + +type AuthConfig struct { + PasswordAuth *PasswordAuthConfig `json:"password_auth,omitempty" gorm:"serializer:json"` + PinAuth *PINAuthConfig `json:"pin_auth,omitempty" gorm:"serializer:json"` + BearerAuth *BearerAuthConfig `json:"bearer_auth,omitempty" gorm:"serializer:json"` +} + +func (a *AuthConfig) HashSecrets() error { + if a.PasswordAuth != nil && a.PasswordAuth.Enabled && a.PasswordAuth.Password != "" { + hashedPassword, err := argon2id.Hash(a.PasswordAuth.Password) + if err != nil { + return fmt.Errorf("hash password: %w", err) + } + a.PasswordAuth.Password = hashedPassword + } + + if a.PinAuth != nil && a.PinAuth.Enabled && a.PinAuth.Pin != "" { + hashedPin, err := argon2id.Hash(a.PinAuth.Pin) + if err != nil { + return fmt.Errorf("hash pin: %w", err) + } + a.PinAuth.Pin = hashedPin + } + + return nil +} + +func (a *AuthConfig) ClearSecrets() { + if a.PasswordAuth != nil { + a.PasswordAuth.Password = "" + } + if a.PinAuth != nil { + a.PinAuth.Pin = "" + } +} + +type OIDCValidationConfig struct { + Issuer string + Audiences []string + KeysLocation string + MaxTokenAgeSeconds int64 +} + +type ServiceMeta struct { + CreatedAt time.Time + CertificateIssuedAt time.Time + Status string +} + +type Service struct { + ID string `gorm:"primaryKey"` + AccountID string `gorm:"index"` + Name string + Domain string `gorm:"index"` + ProxyCluster string `gorm:"index"` + Targets []*Target `gorm:"foreignKey:ServiceID;constraint:OnDelete:CASCADE"` + Enabled bool + PassHostHeader bool + RewriteRedirects bool + Auth AuthConfig `gorm:"serializer:json"` + Meta ServiceMeta `gorm:"embedded;embeddedPrefix:meta_"` + SessionPrivateKey string `gorm:"column:session_private_key"` + SessionPublicKey string `gorm:"column:session_public_key"` +} + +func NewService(accountID, name, domain, proxyCluster string, targets []*Target, enabled bool) *Service { + for _, target := range targets { + target.AccountID = accountID + } + + s := &Service{ + AccountID: accountID, + Name: name, + Domain: domain, + ProxyCluster: proxyCluster, + Targets: targets, + Enabled: enabled, + } + s.InitNewRecord() + return s +} + +// InitNewRecord generates a new unique ID and resets metadata for a newly created +// Service record. This overwrites any existing ID and Meta fields and should +// only be called during initial creation, not for updates. +func (s *Service) InitNewRecord() { + s.ID = xid.New().String() + s.Meta = ServiceMeta{ + CreatedAt: time.Now(), + Status: string(StatusPending), + } +} + +func (s *Service) ToAPIResponse() *api.Service { + s.Auth.ClearSecrets() + + authConfig := api.ServiceAuthConfig{} + + if s.Auth.PasswordAuth != nil { + authConfig.PasswordAuth = &api.PasswordAuthConfig{ + Enabled: s.Auth.PasswordAuth.Enabled, + Password: s.Auth.PasswordAuth.Password, + } + } + + if s.Auth.PinAuth != nil { + authConfig.PinAuth = &api.PINAuthConfig{ + Enabled: s.Auth.PinAuth.Enabled, + Pin: s.Auth.PinAuth.Pin, + } + } + + if s.Auth.BearerAuth != nil { + authConfig.BearerAuth = &api.BearerAuthConfig{ + Enabled: s.Auth.BearerAuth.Enabled, + DistributionGroups: &s.Auth.BearerAuth.DistributionGroups, + } + } + + // Convert internal targets to API targets + apiTargets := make([]api.ServiceTarget, 0, len(s.Targets)) + for _, target := range s.Targets { + apiTargets = append(apiTargets, api.ServiceTarget{ + Path: target.Path, + Host: &target.Host, + Port: target.Port, + Protocol: api.ServiceTargetProtocol(target.Protocol), + TargetId: target.TargetId, + TargetType: api.ServiceTargetTargetType(target.TargetType), + Enabled: target.Enabled, + }) + } + + meta := api.ServiceMeta{ + CreatedAt: s.Meta.CreatedAt, + Status: api.ServiceMetaStatus(s.Meta.Status), + } + + if !s.Meta.CertificateIssuedAt.IsZero() { + meta.CertificateIssuedAt = &s.Meta.CertificateIssuedAt + } + + resp := &api.Service{ + Id: s.ID, + Name: s.Name, + Domain: s.Domain, + Targets: apiTargets, + Enabled: s.Enabled, + PassHostHeader: &s.PassHostHeader, + RewriteRedirects: &s.RewriteRedirects, + Auth: authConfig, + Meta: meta, + } + + if s.ProxyCluster != "" { + resp.ProxyCluster = &s.ProxyCluster + } + + return resp +} + +func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConfig OIDCValidationConfig) *proto.ProxyMapping { + pathMappings := make([]*proto.PathMapping, 0, len(s.Targets)) + for _, target := range s.Targets { + if !target.Enabled { + continue + } + + // TODO: Make path prefix stripping configurable per-target. + // Currently the matching prefix is baked into the target URL path, + // so the proxy strips-then-re-adds it (effectively a no-op). + targetURL := url.URL{ + Scheme: target.Protocol, + Host: target.Host, + Path: "/", // TODO: support service path + } + if target.Port > 0 && !isDefaultPort(target.Protocol, target.Port) { + targetURL.Host = net.JoinHostPort(targetURL.Host, strconv.Itoa(target.Port)) + } + + path := "/" + if target.Path != nil { + path = *target.Path + } + pathMappings = append(pathMappings, &proto.PathMapping{ + Path: path, + Target: targetURL.String(), + }) + } + + auth := &proto.Authentication{ + SessionKey: s.SessionPublicKey, + MaxSessionAgeSeconds: int64((time.Hour * 24).Seconds()), + } + + if s.Auth.PasswordAuth != nil && s.Auth.PasswordAuth.Enabled { + auth.Password = true + } + + if s.Auth.PinAuth != nil && s.Auth.PinAuth.Enabled { + auth.Pin = true + } + + if s.Auth.BearerAuth != nil && s.Auth.BearerAuth.Enabled { + auth.Oidc = true + } + + return &proto.ProxyMapping{ + Type: operationToProtoType(operation), + Id: s.ID, + Domain: s.Domain, + Path: pathMappings, + AuthToken: authToken, + Auth: auth, + AccountId: s.AccountID, + PassHostHeader: s.PassHostHeader, + RewriteRedirects: s.RewriteRedirects, + } +} + +func operationToProtoType(op Operation) proto.ProxyMappingUpdateType { + switch op { + case Create: + return proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED + case Update: + return proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED + case Delete: + return proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED + default: + log.Fatalf("unknown operation type: %v", op) + return proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED + } +} + +// isDefaultPort reports whether port is the standard default for the given scheme +// (443 for https, 80 for http). +func isDefaultPort(scheme string, port int) bool { + return (scheme == "https" && port == 443) || (scheme == "http" && port == 80) +} + +func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) { + s.Name = req.Name + s.Domain = req.Domain + s.AccountID = accountID + + targets := make([]*Target, 0, len(req.Targets)) + for _, apiTarget := range req.Targets { + target := &Target{ + AccountID: accountID, + Path: apiTarget.Path, + Port: apiTarget.Port, + Protocol: string(apiTarget.Protocol), + TargetId: apiTarget.TargetId, + TargetType: string(apiTarget.TargetType), + Enabled: apiTarget.Enabled, + } + if apiTarget.Host != nil { + target.Host = *apiTarget.Host + } + targets = append(targets, target) + } + s.Targets = targets + + s.Enabled = req.Enabled + + if req.PassHostHeader != nil { + s.PassHostHeader = *req.PassHostHeader + } + + if req.RewriteRedirects != nil { + s.RewriteRedirects = *req.RewriteRedirects + } + + if req.Auth.PasswordAuth != nil { + s.Auth.PasswordAuth = &PasswordAuthConfig{ + Enabled: req.Auth.PasswordAuth.Enabled, + Password: req.Auth.PasswordAuth.Password, + } + } + + if req.Auth.PinAuth != nil { + s.Auth.PinAuth = &PINAuthConfig{ + Enabled: req.Auth.PinAuth.Enabled, + Pin: req.Auth.PinAuth.Pin, + } + } + + if req.Auth.BearerAuth != nil { + bearerAuth := &BearerAuthConfig{ + Enabled: req.Auth.BearerAuth.Enabled, + } + if req.Auth.BearerAuth.DistributionGroups != nil { + bearerAuth.DistributionGroups = *req.Auth.BearerAuth.DistributionGroups + } + s.Auth.BearerAuth = bearerAuth + } +} + +func (s *Service) Validate() error { + if s.Name == "" { + return errors.New("service name is required") + } + if len(s.Name) > 255 { + return errors.New("service name exceeds maximum length of 255 characters") + } + + if s.Domain == "" { + return errors.New("service domain is required") + } + + if len(s.Targets) == 0 { + return errors.New("at least one target is required") + } + + for i, target := range s.Targets { + switch target.TargetType { + case TargetTypePeer, TargetTypeHost, TargetTypeDomain: + // host field will be ignored + case TargetTypeSubnet: + if target.Host == "" { + return fmt.Errorf("target %d has empty host but target_type is %q", i, target.TargetType) + } + default: + return fmt.Errorf("target %d has invalid target_type %q", i, target.TargetType) + } + if target.TargetId == "" { + return fmt.Errorf("target %d has empty target_id", i) + } + } + + return nil +} + +func (s *Service) EventMeta() map[string]any { + return map[string]any{"name": s.Name, "domain": s.Domain, "proxy_cluster": s.ProxyCluster} +} + +func (s *Service) Copy() *Service { + targets := make([]*Target, len(s.Targets)) + for i, target := range s.Targets { + targetCopy := *target + targets[i] = &targetCopy + } + + return &Service{ + ID: s.ID, + AccountID: s.AccountID, + Name: s.Name, + Domain: s.Domain, + ProxyCluster: s.ProxyCluster, + Targets: targets, + Enabled: s.Enabled, + PassHostHeader: s.PassHostHeader, + RewriteRedirects: s.RewriteRedirects, + Auth: s.Auth, + Meta: s.Meta, + SessionPrivateKey: s.SessionPrivateKey, + SessionPublicKey: s.SessionPublicKey, + } +} + +func (s *Service) EncryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + if s.SessionPrivateKey != "" { + var err error + s.SessionPrivateKey, err = enc.Encrypt(s.SessionPrivateKey) + if err != nil { + return err + } + } + + return nil +} + +func (s *Service) DecryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + if s.SessionPrivateKey != "" { + var err error + s.SessionPrivateKey, err = enc.Decrypt(s.SessionPrivateKey) + if err != nil { + return err + } + } + + return nil +} diff --git a/management/internals/modules/reverseproxy/reverseproxy_test.go b/management/internals/modules/reverseproxy/reverseproxy_test.go new file mode 100644 index 000000000..546e80b31 --- /dev/null +++ b/management/internals/modules/reverseproxy/reverseproxy_test.go @@ -0,0 +1,405 @@ +package reverseproxy + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/hash/argon2id" + "github.com/netbirdio/netbird/shared/management/proto" +) + +func validProxy() *Service { + return &Service{ + Name: "test", + Domain: "example.com", + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Host: "10.0.0.1", Port: 80, Protocol: "http", Enabled: true}, + }, + } +} + +func TestValidate_Valid(t *testing.T) { + require.NoError(t, validProxy().Validate()) +} + +func TestValidate_EmptyName(t *testing.T) { + rp := validProxy() + rp.Name = "" + assert.ErrorContains(t, rp.Validate(), "name is required") +} + +func TestValidate_EmptyDomain(t *testing.T) { + rp := validProxy() + rp.Domain = "" + assert.ErrorContains(t, rp.Validate(), "domain is required") +} + +func TestValidate_NoTargets(t *testing.T) { + rp := validProxy() + rp.Targets = nil + assert.ErrorContains(t, rp.Validate(), "at least one target") +} + +func TestValidate_EmptyTargetId(t *testing.T) { + rp := validProxy() + rp.Targets[0].TargetId = "" + assert.ErrorContains(t, rp.Validate(), "empty target_id") +} + +func TestValidate_InvalidTargetType(t *testing.T) { + rp := validProxy() + rp.Targets[0].TargetType = "invalid" + assert.ErrorContains(t, rp.Validate(), "invalid target_type") +} + +func TestValidate_ResourceTarget(t *testing.T) { + rp := validProxy() + rp.Targets = append(rp.Targets, &Target{ + TargetId: "resource-1", + TargetType: TargetTypeHost, + Host: "example.org", + Port: 443, + Protocol: "https", + Enabled: true, + }) + require.NoError(t, rp.Validate()) +} + +func TestValidate_MultipleTargetsOneInvalid(t *testing.T) { + rp := validProxy() + rp.Targets = append(rp.Targets, &Target{ + TargetId: "", + TargetType: TargetTypePeer, + Host: "10.0.0.2", + Port: 80, + Protocol: "http", + Enabled: true, + }) + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "target 1") + assert.Contains(t, err.Error(), "empty target_id") +} + +func TestIsDefaultPort(t *testing.T) { + tests := []struct { + scheme string + port int + want bool + }{ + {"http", 80, true}, + {"https", 443, true}, + {"http", 443, false}, + {"https", 80, false}, + {"http", 8080, false}, + {"https", 8443, false}, + {"http", 0, false}, + {"https", 0, false}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%s/%d", tt.scheme, tt.port), func(t *testing.T) { + assert.Equal(t, tt.want, isDefaultPort(tt.scheme, tt.port)) + }) + } +} + +func TestToProtoMapping_PortInTargetURL(t *testing.T) { + oidcConfig := OIDCValidationConfig{} + + tests := []struct { + name string + protocol string + host string + port int + wantTarget string + }{ + { + name: "http with default port 80 omits port", + protocol: "http", + host: "10.0.0.1", + port: 80, + wantTarget: "http://10.0.0.1/", + }, + { + name: "https with default port 443 omits port", + protocol: "https", + host: "10.0.0.1", + port: 443, + wantTarget: "https://10.0.0.1/", + }, + { + name: "port 0 omits port", + protocol: "http", + host: "10.0.0.1", + port: 0, + wantTarget: "http://10.0.0.1/", + }, + { + name: "non-default port is included", + protocol: "http", + host: "10.0.0.1", + port: 8080, + wantTarget: "http://10.0.0.1:8080/", + }, + { + name: "https with non-default port is included", + protocol: "https", + host: "10.0.0.1", + port: 8443, + wantTarget: "https://10.0.0.1:8443/", + }, + { + name: "http port 443 is included", + protocol: "http", + host: "10.0.0.1", + port: 443, + wantTarget: "http://10.0.0.1:443/", + }, + { + name: "https port 80 is included", + protocol: "https", + host: "10.0.0.1", + port: 80, + wantTarget: "https://10.0.0.1:80/", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rp := &Service{ + ID: "test-id", + AccountID: "acc-1", + Domain: "example.com", + Targets: []*Target{ + { + TargetId: "peer-1", + TargetType: TargetTypePeer, + Host: tt.host, + Port: tt.port, + Protocol: tt.protocol, + Enabled: true, + }, + }, + } + pm := rp.ToProtoMapping(Create, "token", oidcConfig) + require.Len(t, pm.Path, 1, "should have one path mapping") + assert.Equal(t, tt.wantTarget, pm.Path[0].Target) + }) + } +} + +func TestToProtoMapping_DisabledTargetSkipped(t *testing.T) { + rp := &Service{ + ID: "test-id", + AccountID: "acc-1", + Domain: "example.com", + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Host: "10.0.0.1", Port: 8080, Protocol: "http", Enabled: false}, + {TargetId: "peer-2", TargetType: TargetTypePeer, Host: "10.0.0.2", Port: 9090, Protocol: "http", Enabled: true}, + }, + } + pm := rp.ToProtoMapping(Create, "token", OIDCValidationConfig{}) + require.Len(t, pm.Path, 1) + assert.Equal(t, "http://10.0.0.2:9090/", pm.Path[0].Target) +} + +func TestToProtoMapping_OperationTypes(t *testing.T) { + rp := validProxy() + tests := []struct { + op Operation + want proto.ProxyMappingUpdateType + }{ + {Create, proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED}, + {Update, proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED}, + {Delete, proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED}, + } + for _, tt := range tests { + t.Run(string(tt.op), func(t *testing.T) { + pm := rp.ToProtoMapping(tt.op, "", OIDCValidationConfig{}) + assert.Equal(t, tt.want, pm.Type) + }) + } +} + +func TestAuthConfig_HashSecrets(t *testing.T) { + tests := []struct { + name string + config *AuthConfig + wantErr bool + validate func(*testing.T, *AuthConfig) + }{ + { + name: "hash password successfully", + config: &AuthConfig{ + PasswordAuth: &PasswordAuthConfig{ + Enabled: true, + Password: "testPassword123", + }, + }, + wantErr: false, + validate: func(t *testing.T, config *AuthConfig) { + if !strings.HasPrefix(config.PasswordAuth.Password, "$argon2id$") { + t.Errorf("Password not hashed with argon2id, got: %s", config.PasswordAuth.Password) + } + // Verify the hash can be verified + if err := argon2id.Verify("testPassword123", config.PasswordAuth.Password); err != nil { + t.Errorf("Hash verification failed: %v", err) + } + }, + }, + { + name: "hash PIN successfully", + config: &AuthConfig{ + PinAuth: &PINAuthConfig{ + Enabled: true, + Pin: "123456", + }, + }, + wantErr: false, + validate: func(t *testing.T, config *AuthConfig) { + if !strings.HasPrefix(config.PinAuth.Pin, "$argon2id$") { + t.Errorf("PIN not hashed with argon2id, got: %s", config.PinAuth.Pin) + } + // Verify the hash can be verified + if err := argon2id.Verify("123456", config.PinAuth.Pin); err != nil { + t.Errorf("Hash verification failed: %v", err) + } + }, + }, + { + name: "hash both password and PIN", + config: &AuthConfig{ + PasswordAuth: &PasswordAuthConfig{ + Enabled: true, + Password: "password", + }, + PinAuth: &PINAuthConfig{ + Enabled: true, + Pin: "9999", + }, + }, + wantErr: false, + validate: func(t *testing.T, config *AuthConfig) { + if !strings.HasPrefix(config.PasswordAuth.Password, "$argon2id$") { + t.Errorf("Password not hashed with argon2id") + } + if !strings.HasPrefix(config.PinAuth.Pin, "$argon2id$") { + t.Errorf("PIN not hashed with argon2id") + } + if err := argon2id.Verify("password", config.PasswordAuth.Password); err != nil { + t.Errorf("Password hash verification failed: %v", err) + } + if err := argon2id.Verify("9999", config.PinAuth.Pin); err != nil { + t.Errorf("PIN hash verification failed: %v", err) + } + }, + }, + { + name: "skip disabled password auth", + config: &AuthConfig{ + PasswordAuth: &PasswordAuthConfig{ + Enabled: false, + Password: "password", + }, + }, + wantErr: false, + validate: func(t *testing.T, config *AuthConfig) { + if config.PasswordAuth.Password != "password" { + t.Errorf("Disabled password auth should not be hashed") + } + }, + }, + { + name: "skip empty password", + config: &AuthConfig{ + PasswordAuth: &PasswordAuthConfig{ + Enabled: true, + Password: "", + }, + }, + wantErr: false, + validate: func(t *testing.T, config *AuthConfig) { + if config.PasswordAuth.Password != "" { + t.Errorf("Empty password should remain empty") + } + }, + }, + { + name: "skip nil password auth", + config: &AuthConfig{ + PasswordAuth: nil, + PinAuth: &PINAuthConfig{ + Enabled: true, + Pin: "1234", + }, + }, + wantErr: false, + validate: func(t *testing.T, config *AuthConfig) { + if config.PasswordAuth != nil { + t.Errorf("PasswordAuth should remain nil") + } + if !strings.HasPrefix(config.PinAuth.Pin, "$argon2id$") { + t.Errorf("PIN should still be hashed") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.HashSecrets() + if (err != nil) != tt.wantErr { + t.Errorf("HashSecrets() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.validate != nil { + tt.validate(t, tt.config) + } + }) + } +} + +func TestAuthConfig_HashSecrets_VerifyIncorrectSecret(t *testing.T) { + config := &AuthConfig{ + PasswordAuth: &PasswordAuthConfig{ + Enabled: true, + Password: "correctPassword", + }, + } + + if err := config.HashSecrets(); err != nil { + t.Fatalf("HashSecrets() error = %v", err) + } + + // Verify with wrong password should fail + err := argon2id.Verify("wrongPassword", config.PasswordAuth.Password) + if !errors.Is(err, argon2id.ErrMismatchedHashAndPassword) { + t.Errorf("Expected ErrMismatchedHashAndPassword, got %v", err) + } +} + +func TestAuthConfig_ClearSecrets(t *testing.T) { + config := &AuthConfig{ + PasswordAuth: &PasswordAuthConfig{ + Enabled: true, + Password: "hashedPassword", + }, + PinAuth: &PINAuthConfig{ + Enabled: true, + Pin: "hashedPin", + }, + } + + config.ClearSecrets() + + if config.PasswordAuth.Password != "" { + t.Errorf("Password not cleared, got: %s", config.PasswordAuth.Password) + } + if config.PinAuth.Pin != "" { + t.Errorf("PIN not cleared, got: %s", config.PinAuth.Pin) + } +} diff --git a/management/internals/modules/reverseproxy/sessionkey/sessionkey.go b/management/internals/modules/reverseproxy/sessionkey/sessionkey.go new file mode 100644 index 000000000..aacbe5dca --- /dev/null +++ b/management/internals/modules/reverseproxy/sessionkey/sessionkey.go @@ -0,0 +1,69 @@ +package sessionkey + +import ( + "crypto/ed25519" + "crypto/rand" + "encoding/base64" + "fmt" + "time" + + "github.com/golang-jwt/jwt/v5" + + "github.com/netbirdio/netbird/proxy/auth" +) + +type KeyPair struct { + PrivateKey string + PublicKey string +} + +type Claims struct { + jwt.RegisteredClaims + Method auth.Method `json:"method"` +} + +func GenerateKeyPair() (*KeyPair, error) { + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, fmt.Errorf("generate ed25519 key: %w", err) + } + + return &KeyPair{ + PrivateKey: base64.StdEncoding.EncodeToString(priv), + PublicKey: base64.StdEncoding.EncodeToString(pub), + }, nil +} + +func SignToken(privKeyB64, userID, domain string, method auth.Method, expiration time.Duration) (string, error) { + privKeyBytes, err := base64.StdEncoding.DecodeString(privKeyB64) + if err != nil { + return "", fmt.Errorf("decode private key: %w", err) + } + + if len(privKeyBytes) != ed25519.PrivateKeySize { + return "", fmt.Errorf("invalid private key size: got %d, want %d", len(privKeyBytes), ed25519.PrivateKeySize) + } + + privKey := ed25519.PrivateKey(privKeyBytes) + + now := time.Now() + claims := Claims{ + RegisteredClaims: jwt.RegisteredClaims{ + Issuer: auth.SessionJWTIssuer, + Subject: userID, + Audience: jwt.ClaimStrings{domain}, + ExpiresAt: jwt.NewNumericDate(now.Add(expiration)), + IssuedAt: jwt.NewNumericDate(now), + NotBefore: jwt.NewNumericDate(now), + }, + Method: method, + } + + token := jwt.NewWithClaims(jwt.SigningMethodEdDSA, claims) + signedToken, err := token.SignedString(privKey) + if err != nil { + return "", fmt.Errorf("sign token: %w", err) + } + + return signedToken, nil +} diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 55af17fdf..7da1e6898 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -21,6 +21,8 @@ import ( "github.com/netbirdio/management-integrations/integrations" "github.com/netbirdio/netbird/encryption" "github.com/netbirdio/netbird/formatter/hook" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/activity" nbContext "github.com/netbirdio/netbird/management/server/context" @@ -92,7 +94,7 @@ func (s *BaseServer) EventStore() activity.Store { func (s *BaseServer) APIHandler() http.Handler { return Create(s, func() http.Handler { - httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager()) + httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ReverseProxyManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies) if err != nil { log.Fatalf("failed to create API handler: %v", err) } @@ -120,11 +122,13 @@ func (s *BaseServer) GRPCServer() *grpc.Server { realip.WithTrustedProxiesCount(trustedProxiesCount), realip.WithHeaders([]string{realip.XForwardedFor, realip.XRealIp}), } + proxyUnary, proxyStream, proxyAuthClose := nbgrpc.NewProxyAuthInterceptors(s.Store()) + s.proxyAuthClose = proxyAuthClose gRPCOpts := []grpc.ServerOption{ grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp), - grpc.ChainUnaryInterceptor(realip.UnaryServerInterceptorOpts(realipOpts...), unaryInterceptor), - grpc.ChainStreamInterceptor(realip.StreamServerInterceptorOpts(realipOpts...), streamInterceptor), + grpc.ChainUnaryInterceptor(realip.UnaryServerInterceptorOpts(realipOpts...), unaryInterceptor, proxyUnary), + grpc.ChainStreamInterceptor(realip.StreamServerInterceptorOpts(realipOpts...), streamInterceptor, proxyStream), } if s.Config.HttpConfig.LetsEncryptDomain != "" { @@ -150,10 +154,53 @@ func (s *BaseServer) GRPCServer() *grpc.Server { } mgmtProto.RegisterManagementServiceServer(gRPCAPIHandler, srv) + mgmtProto.RegisterProxyServiceServer(gRPCAPIHandler, s.ReverseProxyGRPCServer()) + log.Info("ProxyService registered on gRPC server") + return gRPCAPIHandler }) } +func (s *BaseServer) ReverseProxyGRPCServer() *nbgrpc.ProxyServiceServer { + return Create(s, func() *nbgrpc.ProxyServiceServer { + proxyService := nbgrpc.NewProxyServiceServer(s.AccessLogsManager(), s.ProxyTokenStore(), s.proxyOIDCConfig(), s.PeersManager(), s.UsersManager()) + s.AfterInit(func(s *BaseServer) { + proxyService.SetProxyManager(s.ReverseProxyManager()) + }) + return proxyService + }) +} + +func (s *BaseServer) proxyOIDCConfig() nbgrpc.ProxyOIDCConfig { + return Create(s, func() nbgrpc.ProxyOIDCConfig { + return nbgrpc.ProxyOIDCConfig{ + Issuer: s.Config.HttpConfig.AuthIssuer, + // todo: double check auth clientID value + ClientID: s.Config.HttpConfig.AuthClientID, // Reuse dashboard client + Scopes: []string{"openid", "profile", "email"}, + CallbackURL: s.Config.HttpConfig.AuthCallbackURL, + HMACKey: []byte(s.Config.DataStoreEncryptionKey), // Use the datastore encryption key for OIDC state HMACs, this should ensure all management instances are using the same key. + Audience: s.Config.HttpConfig.AuthAudience, + KeysLocation: s.Config.HttpConfig.AuthKeysLocation, + } + }) +} + +func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore { + return Create(s, func() *nbgrpc.OneTimeTokenStore { + tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Minute) + log.Info("One-time token store initialized for proxy authentication") + return tokenStore + }) +} + +func (s *BaseServer) AccessLogsManager() accesslogs.Manager { + return Create(s, func() accesslogs.Manager { + accessLogManager := accesslogsmanager.NewManager(s.Store(), s.PermissionsManager(), s.GeoLocationManager()) + return accessLogManager + }) +} + func loadTLSConfig(certFile string, certKey string) (*tls.Config, error) { // Load server's certificate and private key serverCert, err := tls.LoadX509KeyPair(certFile, certKey) diff --git a/management/internals/server/config/config.go b/management/internals/server/config/config.go index 7b8783943..5ed1c3ede 100644 --- a/management/internals/server/config/config.go +++ b/management/internals/server/config/config.go @@ -100,6 +100,8 @@ type HttpServerConfig struct { CertFile string // CertKey is the location of the certificate private key CertKey string + // AuthClientID is the client id used for proxy SSO auth + AuthClientID string // AuthAudience identifies the recipients that the JWT is intended for (aud in JWT) AuthAudience string // CLIAuthAudience identifies the client app recipients that the JWT is intended for (aud in JWT) @@ -117,6 +119,8 @@ type HttpServerConfig struct { IdpSignKeyRefreshEnabled bool // Extra audience ExtraAuthAudience string + // AuthCallbackDomain contains the callback domain + AuthCallbackURL string } // Host represents a Netbird host (e.g. STUN, TURN, Signal) diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index 31badf9d0..58125c0a3 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -8,6 +8,9 @@ import ( "github.com/netbirdio/management-integrations/integrations" "github.com/netbirdio/netbird/management/internals/modules/peers" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" + nbreverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" "github.com/netbirdio/netbird/management/internals/modules/zones" zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" "github.com/netbirdio/netbird/management/internals/modules/zones/records" @@ -98,6 +101,11 @@ func (s *BaseServer) AccountManager() account.Manager { if err != nil { log.Fatalf("failed to create account manager: %v", err) } + + s.AfterInit(func(s *BaseServer) { + accountManager.SetServiceManager(s.ReverseProxyManager()) + }) + return accountManager }) } @@ -154,7 +162,7 @@ func (s *BaseServer) GroupsManager() groups.Manager { func (s *BaseServer) ResourcesManager() resources.Manager { return Create(s, func() resources.Manager { - return resources.NewManager(s.Store(), s.PermissionsManager(), s.GroupsManager(), s.AccountManager()) + return resources.NewManager(s.Store(), s.PermissionsManager(), s.GroupsManager(), s.AccountManager(), s.ReverseProxyManager()) }) } @@ -181,3 +189,16 @@ func (s *BaseServer) RecordsManager() records.Manager { return recordsManager.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager()) }) } + +func (s *BaseServer) ReverseProxyManager() reverseproxy.Manager { + return Create(s, func() reverseproxy.Manager { + return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ReverseProxyGRPCServer(), s.ReverseProxyDomainManager()) + }) +} + +func (s *BaseServer) ReverseProxyDomainManager() *manager.Manager { + return Create(s, func() *manager.Manager { + m := manager.NewManager(s.Store(), s.ReverseProxyGRPCServer(), s.PermissionsManager()) + return &m + }) +} diff --git a/management/internals/server/server.go b/management/internals/server/server.go index 0f985c4ed..55c7a271f 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -18,10 +18,9 @@ import ( "golang.org/x/net/http2/h2c" "google.golang.org/grpc" - "github.com/netbirdio/netbird/management/server/idp" - "github.com/netbirdio/netbird/encryption" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/metrics" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/util/wsproxy" @@ -59,6 +58,8 @@ type BaseServer struct { mgmtMetricsPort int mgmtPort int + proxyAuthClose func() + listener net.Listener certManager *autocert.Manager update *version.Update @@ -139,8 +140,11 @@ func (s *BaseServer) Start(ctx context.Context) error { go metricsWorker.Run(srvCtx) } - // Run afterInit hooks before starting any servers - // This allows registering additional gRPC services (e.g., Signal) before Serve() is called + // Eagerly create the gRPC server so that all AfterInit hooks are registered + // before we iterate them. Lazy creation after the loop would miss hooks + // registered during GRPCServer() construction (e.g., SetProxyManager). + s.GRPCServer() + for _, fn := range s.afterInit { if fn != nil { fn(s) @@ -218,6 +222,11 @@ func (s *BaseServer) Stop() error { _ = s.certManager.Listener().Close() } s.GRPCServer().Stop() + s.ReverseProxyGRPCServer().Close() + if s.proxyAuthClose != nil { + s.proxyAuthClose() + s.proxyAuthClose = nil + } _ = s.Store().Close(ctx) _ = s.EventStore().Close(ctx) if s.update != nil { diff --git a/management/internals/shared/grpc/onetime_token.go b/management/internals/shared/grpc/onetime_token.go new file mode 100644 index 000000000..dcc37c639 --- /dev/null +++ b/management/internals/shared/grpc/onetime_token.go @@ -0,0 +1,167 @@ +package grpc + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "fmt" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +// OneTimeTokenStore manages short-lived, single-use authentication tokens +// for proxy-to-management RPC authentication. Tokens are generated when +// a service is created and must be used exactly once by the proxy +// to authenticate a subsequent RPC call. +type OneTimeTokenStore struct { + tokens map[string]*tokenMetadata + mu sync.RWMutex + cleanup *time.Ticker + cleanupDone chan struct{} +} + +// tokenMetadata stores information about a one-time token +type tokenMetadata struct { + ServiceID string + AccountID string + ExpiresAt time.Time + CreatedAt time.Time +} + +// NewOneTimeTokenStore creates a new token store with automatic cleanup +// of expired tokens. The cleanupInterval determines how often expired +// tokens are removed from memory. +func NewOneTimeTokenStore(cleanupInterval time.Duration) *OneTimeTokenStore { + store := &OneTimeTokenStore{ + tokens: make(map[string]*tokenMetadata), + cleanup: time.NewTicker(cleanupInterval), + cleanupDone: make(chan struct{}), + } + + // Start background cleanup goroutine + go store.cleanupExpired() + + return store +} + +// GenerateToken creates a new cryptographically secure one-time token +// with the specified TTL. The token is associated with a specific +// accountID and serviceID for validation purposes. +// +// Returns the generated token string or an error if random generation fails. +func (s *OneTimeTokenStore) GenerateToken(accountID, serviceID string, ttl time.Duration) (string, error) { + // Generate 32 bytes (256 bits) of cryptographically secure random data + randomBytes := make([]byte, 32) + if _, err := rand.Read(randomBytes); err != nil { + return "", fmt.Errorf("failed to generate random token: %w", err) + } + + // Encode as URL-safe base64 for easy transmission in gRPC + token := base64.URLEncoding.EncodeToString(randomBytes) + + s.mu.Lock() + defer s.mu.Unlock() + + s.tokens[token] = &tokenMetadata{ + ServiceID: serviceID, + AccountID: accountID, + ExpiresAt: time.Now().Add(ttl), + CreatedAt: time.Now(), + } + + log.Debugf("Generated one-time token for proxy %s in account %s (expires in %s)", + serviceID, accountID, ttl) + + return token, nil +} + +// ValidateAndConsume verifies the token against the provided accountID and +// serviceID, checks expiration, and then deletes it to enforce single-use. +// +// This method uses constant-time comparison to prevent timing attacks. +// +// Returns nil on success, or an error if: +// - Token doesn't exist +// - Token has expired +// - Account ID doesn't match +// - Reverse proxy ID doesn't match +func (s *OneTimeTokenStore) ValidateAndConsume(token, accountID, serviceID string) error { + s.mu.Lock() + defer s.mu.Unlock() + + metadata, exists := s.tokens[token] + if !exists { + log.Warnf("Token validation failed: token not found (proxy: %s, account: %s)", + serviceID, accountID) + return fmt.Errorf("invalid token") + } + + // Check expiration + if time.Now().After(metadata.ExpiresAt) { + delete(s.tokens, token) + log.Warnf("Token validation failed: token expired (proxy: %s, account: %s)", + serviceID, accountID) + return fmt.Errorf("token expired") + } + + // Validate account ID using constant-time comparison (prevents timing attacks) + if subtle.ConstantTimeCompare([]byte(metadata.AccountID), []byte(accountID)) != 1 { + log.Warnf("Token validation failed: account ID mismatch (expected: %s, got: %s)", + metadata.AccountID, accountID) + return fmt.Errorf("account ID mismatch") + } + + // Validate service ID using constant-time comparison + if subtle.ConstantTimeCompare([]byte(metadata.ServiceID), []byte(serviceID)) != 1 { + log.Warnf("Token validation failed: service ID mismatch (expected: %s, got: %s)", + metadata.ServiceID, serviceID) + return fmt.Errorf("service ID mismatch") + } + + // Delete token immediately to enforce single-use + delete(s.tokens, token) + + log.Infof("Token validated and consumed for proxy %s in account %s", + serviceID, accountID) + + return nil +} + +// cleanupExpired removes expired tokens in the background to prevent memory leaks +func (s *OneTimeTokenStore) cleanupExpired() { + for { + select { + case <-s.cleanup.C: + s.mu.Lock() + now := time.Now() + removed := 0 + for token, metadata := range s.tokens { + if now.After(metadata.ExpiresAt) { + delete(s.tokens, token) + removed++ + } + } + if removed > 0 { + log.Debugf("Cleaned up %d expired one-time tokens", removed) + } + s.mu.Unlock() + case <-s.cleanupDone: + return + } + } +} + +// Close stops the cleanup goroutine and releases resources +func (s *OneTimeTokenStore) Close() { + s.cleanup.Stop() + close(s.cleanupDone) +} + +// GetTokenCount returns the current number of tokens in the store (for debugging/metrics) +func (s *OneTimeTokenStore) GetTokenCount() int { + s.mu.RLock() + defer s.mu.RUnlock() + return len(s.tokens) +} diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go new file mode 100644 index 000000000..4771d35af --- /dev/null +++ b/management/internals/shared/grpc/proxy.go @@ -0,0 +1,1083 @@ +package grpc + +import ( + "context" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "net/url" + "strings" + "sync" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + log "github.com/sirupsen/logrus" + "golang.org/x/oauth2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + + "github.com/netbirdio/netbird/shared/management/domain" + + "github.com/netbirdio/netbird/management/internals/modules/peers" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/management/server/users" + proxyauth "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/shared/hash/argon2id" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type ProxyOIDCConfig struct { + Issuer string + ClientID string + Scopes []string + CallbackURL string + HMACKey []byte + + Audience string + KeysLocation string +} + +// ClusterInfo contains information about a proxy cluster. +type ClusterInfo struct { + Address string + ConnectedProxies int +} + +// ProxyServiceServer implements the ProxyService gRPC server +type ProxyServiceServer struct { + proto.UnimplementedProxyServiceServer + + // Map of connected proxies: proxy_id -> proxy connection + connectedProxies sync.Map + + // Map of cluster address -> set of proxy IDs + clusterProxies sync.Map + + // Channel for broadcasting reverse proxy updates to all proxies + updatesChan chan *proto.ProxyMapping + + // Manager for access logs + accessLogManager accesslogs.Manager + + // Manager for reverse proxy operations + reverseProxyManager reverseproxy.Manager + + // Manager for peers + peersManager peers.Manager + + // Manager for users + usersManager users.Manager + + // Store for one-time authentication tokens + tokenStore *OneTimeTokenStore + + // OIDC configuration for proxy authentication + oidcConfig ProxyOIDCConfig + + // TODO: use database to store these instead? + // pkceVerifiers stores PKCE code verifiers keyed by OAuth state. + // Entries expire after pkceVerifierTTL to prevent unbounded growth. + pkceVerifiers sync.Map + pkceCleanupCancel context.CancelFunc +} + +const pkceVerifierTTL = 10 * time.Minute + +type pkceEntry struct { + verifier string + createdAt time.Time +} + +// proxyConnection represents a connected proxy +type proxyConnection struct { + proxyID string + address string + stream proto.ProxyService_GetMappingUpdateServer + sendChan chan *proto.ProxyMapping + ctx context.Context + cancel context.CancelFunc +} + +// NewProxyServiceServer creates a new proxy service server. +func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager) *ProxyServiceServer { + ctx, cancel := context.WithCancel(context.Background()) + s := &ProxyServiceServer{ + updatesChan: make(chan *proto.ProxyMapping, 100), + accessLogManager: accessLogMgr, + oidcConfig: oidcConfig, + tokenStore: tokenStore, + peersManager: peersManager, + usersManager: usersManager, + pkceCleanupCancel: cancel, + } + go s.cleanupPKCEVerifiers(ctx) + return s +} + +// cleanupPKCEVerifiers periodically removes expired PKCE verifiers. +func (s *ProxyServiceServer) cleanupPKCEVerifiers(ctx context.Context) { + ticker := time.NewTicker(pkceVerifierTTL) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + now := time.Now() + s.pkceVerifiers.Range(func(key, value any) bool { + if entry, ok := value.(pkceEntry); ok && now.Sub(entry.createdAt) > pkceVerifierTTL { + s.pkceVerifiers.Delete(key) + } + return true + }) + } + } +} + +// Close stops background goroutines. +func (s *ProxyServiceServer) Close() { + s.pkceCleanupCancel() +} + +func (s *ProxyServiceServer) SetProxyManager(manager reverseproxy.Manager) { + s.reverseProxyManager = manager +} + +// GetMappingUpdate handles the control stream with proxy clients +func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest, stream proto.ProxyService_GetMappingUpdateServer) error { + ctx := stream.Context() + + peerInfo := "" + if p, ok := peer.FromContext(ctx); ok { + peerInfo = p.Addr.String() + } + + log.Infof("New proxy connection from %s", peerInfo) + + proxyID := req.GetProxyId() + if proxyID == "" { + return status.Errorf(codes.InvalidArgument, "proxy_id is required") + } + + proxyAddress := req.GetAddress() + if !isProxyAddressValid(proxyAddress) { + return status.Errorf(codes.InvalidArgument, "proxy address is invalid") + } + + connCtx, cancel := context.WithCancel(ctx) + conn := &proxyConnection{ + proxyID: proxyID, + address: proxyAddress, + stream: stream, + sendChan: make(chan *proto.ProxyMapping, 100), + ctx: connCtx, + cancel: cancel, + } + + s.connectedProxies.Store(proxyID, conn) + s.addToCluster(conn.address, proxyID) + log.WithFields(log.Fields{ + "proxy_id": proxyID, + "address": proxyAddress, + "cluster_addr": proxyAddress, + "total_proxies": len(s.GetConnectedProxies()), + }).Info("Proxy registered in cluster") + defer func() { + s.connectedProxies.Delete(proxyID) + s.removeFromCluster(conn.address, proxyID) + cancel() + log.Infof("Proxy %s disconnected", proxyID) + }() + + if err := s.sendSnapshot(ctx, conn); err != nil { + return fmt.Errorf("send snapshot to proxy %s: %w", proxyID, err) + } + + errChan := make(chan error, 2) + go s.sender(conn, errChan) + + select { + case err := <-errChan: + return fmt.Errorf("send update to proxy %s: %w", proxyID, err) + case <-connCtx.Done(): + return connCtx.Err() + } +} + +// sendSnapshot sends the initial snapshot of services to the connecting proxy. +// Only services matching the proxy's cluster address are sent. +func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnection) error { + services, err := s.reverseProxyManager.GetGlobalServices(ctx) + if err != nil { + return fmt.Errorf("get services from store: %w", err) + } + + if !isProxyAddressValid(conn.address) { + return fmt.Errorf("proxy address is invalid") + } + + var filtered []*reverseproxy.Service + for _, service := range services { + if !service.Enabled { + continue + } + if service.ProxyCluster == "" || service.ProxyCluster != conn.address { + continue + } + filtered = append(filtered, service) + } + + if len(filtered) == 0 { + if err := conn.stream.Send(&proto.GetMappingUpdateResponse{ + InitialSyncComplete: true, + }); err != nil { + return fmt.Errorf("send snapshot completion: %w", err) + } + return nil + } + + for i, service := range filtered { + // Generate one-time authentication token for each service in the snapshot + // Tokens are not persistent on the proxy, so we need to generate new ones on reconnection + token, err := s.tokenStore.GenerateToken(service.AccountID, service.ID, 5*time.Minute) + if err != nil { + log.WithFields(log.Fields{ + "service": service.Name, + "account": service.AccountID, + }).WithError(err).Error("failed to generate auth token for snapshot") + continue + } + + if err := conn.stream.Send(&proto.GetMappingUpdateResponse{ + Mapping: []*proto.ProxyMapping{ + service.ToProtoMapping( + reverseproxy.Create, // Initial snapshot, all records are "new" for the proxy. + token, + s.GetOIDCValidationConfig(), + ), + }, + InitialSyncComplete: i == len(filtered)-1, + }); err != nil { + log.WithFields(log.Fields{ + "domain": service.Domain, + "account": service.AccountID, + }).WithError(err).Error("failed to send proxy mapping") + return fmt.Errorf("send proxy mapping: %w", err) + } + } + + return nil +} + +// isProxyAddressValid validates a proxy address +func isProxyAddressValid(addr string) bool { + _, err := domain.ValidateDomains([]string{addr}) + return err == nil +} + +// sender handles sending messages to proxy +func (s *ProxyServiceServer) sender(conn *proxyConnection, errChan chan<- error) { + for { + select { + case msg := <-conn.sendChan: + if err := conn.stream.Send(&proto.GetMappingUpdateResponse{Mapping: []*proto.ProxyMapping{msg}}); err != nil { + errChan <- err + return + } + case <-conn.ctx.Done(): + return + } + } +} + +// SendAccessLog processes access log from proxy +func (s *ProxyServiceServer) SendAccessLog(ctx context.Context, req *proto.SendAccessLogRequest) (*proto.SendAccessLogResponse, error) { + accessLog := req.GetLog() + + fields := log.Fields{ + "service_id": accessLog.GetServiceId(), + "account_id": accessLog.GetAccountId(), + "host": accessLog.GetHost(), + "source_ip": accessLog.GetSourceIp(), + } + if mechanism := accessLog.GetAuthMechanism(); mechanism != "" { + fields["auth_mechanism"] = mechanism + } + if userID := accessLog.GetUserId(); userID != "" { + fields["user_id"] = userID + } + if !accessLog.GetAuthSuccess() { + fields["auth_success"] = false + } + log.WithFields(fields).Debugf("%s %s %d (%dms)", + accessLog.GetMethod(), + accessLog.GetPath(), + accessLog.GetResponseCode(), + accessLog.GetDurationMs(), + ) + + logEntry := &accesslogs.AccessLogEntry{} + logEntry.FromProto(accessLog) + + if err := s.accessLogManager.SaveAccessLog(ctx, logEntry); err != nil { + log.WithContext(ctx).Errorf("failed to save access log: %v", err) + return nil, status.Errorf(codes.Internal, "save access log: %v", err) + } + + return &proto.SendAccessLogResponse{}, nil +} + +// SendServiceUpdate broadcasts a service update to all connected proxy servers. +// Management should call this when services are created/updated/removed. +// For create/update operations a unique one-time auth token is generated per +// proxy so that every replica can independently authenticate with management. +func (s *ProxyServiceServer) SendServiceUpdate(update *proto.ProxyMapping) { + log.Debugf("Broadcasting service update to all connected proxy servers") + s.connectedProxies.Range(func(key, value interface{}) bool { + conn := value.(*proxyConnection) + msg := s.perProxyMessage(update, conn.proxyID) + if msg == nil { + return true + } + select { + case conn.sendChan <- msg: + log.Debugf("Sent service update with id %s to proxy server %s", update.Id, conn.proxyID) + default: + log.Warnf("Failed to send service update to proxy server %s (channel full)", conn.proxyID) + } + return true + }) +} + +// GetConnectedProxies returns a list of connected proxy IDs +func (s *ProxyServiceServer) GetConnectedProxies() []string { + var proxies []string + s.connectedProxies.Range(func(key, value interface{}) bool { + proxies = append(proxies, key.(string)) + return true + }) + return proxies +} + +// GetConnectedProxyURLs returns a deduplicated list of URLs from all connected proxies. +func (s *ProxyServiceServer) GetConnectedProxyURLs() []string { + seenUrls := make(map[string]struct{}) + var urls []string + var proxyCount int + s.connectedProxies.Range(func(key, value interface{}) bool { + proxyCount++ + conn := value.(*proxyConnection) + log.WithFields(log.Fields{ + "proxy_id": conn.proxyID, + "address": conn.address, + }).Debug("checking connected proxy for URL") + if _, seen := seenUrls[conn.address]; conn.address != "" && !seen { + seenUrls[conn.address] = struct{}{} + urls = append(urls, conn.address) + } + return true + }) + log.WithFields(log.Fields{ + "total_proxies": proxyCount, + "unique_urls": len(urls), + "connected_urls": urls, + }).Debug("GetConnectedProxyURLs result") + return urls +} + +// addToCluster registers a proxy in a cluster. +func (s *ProxyServiceServer) addToCluster(clusterAddr, proxyID string) { + if clusterAddr == "" { + return + } + proxySet, _ := s.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{}) + proxySet.(*sync.Map).Store(proxyID, struct{}{}) + log.Debugf("Added proxy %s to cluster %s", proxyID, clusterAddr) +} + +// removeFromCluster removes a proxy from a cluster. +func (s *ProxyServiceServer) removeFromCluster(clusterAddr, proxyID string) { + if clusterAddr == "" { + return + } + if proxySet, ok := s.clusterProxies.Load(clusterAddr); ok { + proxySet.(*sync.Map).Delete(proxyID) + log.Debugf("Removed proxy %s from cluster %s", proxyID, clusterAddr) + } +} + +// SendServiceUpdateToCluster sends a service update to all proxy servers in a specific cluster. +// If clusterAddr is empty, broadcasts to all connected proxy servers (backward compatibility). +// For create/update operations a unique one-time auth token is generated per +// proxy so that every replica can independently authenticate with management. +func (s *ProxyServiceServer) SendServiceUpdateToCluster(update *proto.ProxyMapping, clusterAddr string) { + if clusterAddr == "" { + s.SendServiceUpdate(update) + return + } + + proxySet, ok := s.clusterProxies.Load(clusterAddr) + if !ok { + log.Debugf("No proxies connected for cluster %s", clusterAddr) + return + } + + log.Debugf("Sending service update to cluster %s", clusterAddr) + proxySet.(*sync.Map).Range(func(key, _ interface{}) bool { + proxyID := key.(string) + if connVal, ok := s.connectedProxies.Load(proxyID); ok { + conn := connVal.(*proxyConnection) + msg := s.perProxyMessage(update, proxyID) + if msg == nil { + return true + } + select { + case conn.sendChan <- msg: + log.Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr) + default: + log.Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) + } + } + return true + }) +} + +// perProxyMessage returns a copy of update with a fresh one-time token for +// create/update operations. For delete operations the original message is +// returned unchanged because proxies do not need to authenticate for removal. +// Returns nil if token generation fails (the proxy should be skipped). +func (s *ProxyServiceServer) perProxyMessage(update *proto.ProxyMapping, proxyID string) *proto.ProxyMapping { + if update.Type == proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED || update.AccountId == "" { + return update + } + + token, err := s.tokenStore.GenerateToken(update.AccountId, update.Id, 5*time.Minute) + if err != nil { + log.Warnf("Failed to generate token for proxy %s: %v", proxyID, err) + return nil + } + + msg := shallowCloneMapping(update) + msg.AuthToken = token + return msg +} + +// shallowCloneMapping creates a shallow copy of a ProxyMapping, reusing the +// same slice/pointer fields. Only scalar fields that differ per proxy (AuthToken) +// should be set on the copy. +func shallowCloneMapping(m *proto.ProxyMapping) *proto.ProxyMapping { + return &proto.ProxyMapping{ + Type: m.Type, + Id: m.Id, + AccountId: m.AccountId, + Domain: m.Domain, + Path: m.Path, + Auth: m.Auth, + PassHostHeader: m.PassHostHeader, + RewriteRedirects: m.RewriteRedirects, + } +} + +// GetAvailableClusters returns information about all connected proxy clusters. +func (s *ProxyServiceServer) GetAvailableClusters() []ClusterInfo { + clusterCounts := make(map[string]int) + s.clusterProxies.Range(func(key, value interface{}) bool { + clusterAddr := key.(string) + proxySet := value.(*sync.Map) + count := 0 + proxySet.Range(func(_, _ interface{}) bool { + count++ + return true + }) + if count > 0 { + clusterCounts[clusterAddr] = count + } + return true + }) + + clusters := make([]ClusterInfo, 0, len(clusterCounts)) + for addr, count := range clusterCounts { + clusters = append(clusters, ClusterInfo{ + Address: addr, + ConnectedProxies: count, + }) + } + return clusters +} + +func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { + service, err := s.reverseProxyManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId()) + if err != nil { + log.WithContext(ctx).Debugf("failed to get service from store: %v", err) + return nil, status.Errorf(codes.FailedPrecondition, "get service from store: %v", err) + } + + authenticated, userId, method := s.authenticateRequest(ctx, req, service) + + token, err := s.generateSessionToken(ctx, authenticated, service, userId, method) + if err != nil { + return nil, err + } + + return &proto.AuthenticateResponse{ + Success: authenticated, + SessionToken: token, + }, nil +} + +func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto.AuthenticateRequest, service *reverseproxy.Service) (bool, string, proxyauth.Method) { + switch v := req.GetRequest().(type) { + case *proto.AuthenticateRequest_Pin: + return s.authenticatePIN(ctx, req.GetId(), v, service.Auth.PinAuth) + case *proto.AuthenticateRequest_Password: + return s.authenticatePassword(ctx, req.GetId(), v, service.Auth.PasswordAuth) + default: + return false, "", "" + } +} + +func (s *ProxyServiceServer) authenticatePIN(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Pin, auth *reverseproxy.PINAuthConfig) (bool, string, proxyauth.Method) { + if auth == nil || !auth.Enabled { + log.WithContext(ctx).Debugf("PIN authentication attempted but not enabled for service %s", serviceID) + return false, "", "" + } + + if err := argon2id.Verify(req.Pin.GetPin(), auth.Pin); err != nil { + s.logAuthenticationError(ctx, err, "PIN") + return false, "", "" + } + + return true, "pin-user", proxyauth.MethodPIN +} + +func (s *ProxyServiceServer) authenticatePassword(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Password, auth *reverseproxy.PasswordAuthConfig) (bool, string, proxyauth.Method) { + if auth == nil || !auth.Enabled { + log.WithContext(ctx).Debugf("password authentication attempted but not enabled for service %s", serviceID) + return false, "", "" + } + + if err := argon2id.Verify(req.Password.GetPassword(), auth.Password); err != nil { + s.logAuthenticationError(ctx, err, "Password") + return false, "", "" + } + + return true, "password-user", proxyauth.MethodPassword +} + +func (s *ProxyServiceServer) logAuthenticationError(ctx context.Context, err error, authType string) { + if errors.Is(err, argon2id.ErrMismatchedHashAndPassword) { + log.WithContext(ctx).Tracef("%s authentication failed: invalid credentials", authType) + } else { + log.WithContext(ctx).Errorf("%s authentication error: %v", authType, err) + } +} + +func (s *ProxyServiceServer) generateSessionToken(ctx context.Context, authenticated bool, service *reverseproxy.Service, userId string, method proxyauth.Method) (string, error) { + if !authenticated || service.SessionPrivateKey == "" { + return "", nil + } + + token, err := sessionkey.SignToken( + service.SessionPrivateKey, + userId, + service.Domain, + method, + proxyauth.DefaultSessionExpiry, + ) + if err != nil { + log.WithContext(ctx).WithError(err).Error("failed to sign session token") + return "", status.Errorf(codes.Internal, "sign session token: %v", err) + } + + return token, nil +} + +// SendStatusUpdate handles status updates from proxy clients +func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.SendStatusUpdateRequest) (*proto.SendStatusUpdateResponse, error) { + accountID := req.GetAccountId() + serviceID := req.GetServiceId() + protoStatus := req.GetStatus() + certificateIssued := req.GetCertificateIssued() + + log.WithFields(log.Fields{ + "service_id": serviceID, + "account_id": accountID, + "status": protoStatus, + "certificate_issued": certificateIssued, + "error_message": req.GetErrorMessage(), + }).Debug("Status update from proxy server") + + if serviceID == "" || accountID == "" { + return nil, status.Errorf(codes.InvalidArgument, "service_id and account_id are required") + } + + if certificateIssued { + if err := s.reverseProxyManager.SetCertificateIssuedAt(ctx, accountID, serviceID); err != nil { + log.WithContext(ctx).WithError(err).Error("failed to set certificate issued timestamp") + return nil, status.Errorf(codes.Internal, "update certificate timestamp: %v", err) + } + log.WithFields(log.Fields{ + "service_id": serviceID, + "account_id": accountID, + }).Info("Certificate issued timestamp updated") + } + + internalStatus := protoStatusToInternal(protoStatus) + + if err := s.reverseProxyManager.SetStatus(ctx, accountID, serviceID, internalStatus); err != nil { + log.WithContext(ctx).WithError(err).Error("failed to update service status") + return nil, status.Errorf(codes.Internal, "update service status: %v", err) + } + + log.WithFields(log.Fields{ + "service_id": serviceID, + "account_id": accountID, + "status": internalStatus, + }).Info("Service status updated") + + return &proto.SendStatusUpdateResponse{}, nil +} + +// protoStatusToInternal maps proto status to internal status +func protoStatusToInternal(protoStatus proto.ProxyStatus) reverseproxy.ProxyStatus { + switch protoStatus { + case proto.ProxyStatus_PROXY_STATUS_PENDING: + return reverseproxy.StatusPending + case proto.ProxyStatus_PROXY_STATUS_ACTIVE: + return reverseproxy.StatusActive + case proto.ProxyStatus_PROXY_STATUS_TUNNEL_NOT_CREATED: + return reverseproxy.StatusTunnelNotCreated + case proto.ProxyStatus_PROXY_STATUS_CERTIFICATE_PENDING: + return reverseproxy.StatusCertificatePending + case proto.ProxyStatus_PROXY_STATUS_CERTIFICATE_FAILED: + return reverseproxy.StatusCertificateFailed + case proto.ProxyStatus_PROXY_STATUS_ERROR: + return reverseproxy.StatusError + default: + return reverseproxy.StatusError + } +} + +// CreateProxyPeer handles proxy peer creation with one-time token authentication +func (s *ProxyServiceServer) CreateProxyPeer(ctx context.Context, req *proto.CreateProxyPeerRequest) (*proto.CreateProxyPeerResponse, error) { + serviceID := req.GetServiceId() + accountID := req.GetAccountId() + token := req.GetToken() + cluster := req.GetCluster() + key := req.WireguardPublicKey + + log.WithFields(log.Fields{ + "service_id": serviceID, + "account_id": accountID, + "cluster": cluster, + }).Debug("CreateProxyPeer request received") + + if serviceID == "" || accountID == "" || token == "" { + log.Warn("CreateProxyPeer: missing required fields") + return &proto.CreateProxyPeerResponse{ + Success: false, + ErrorMessage: strPtr("missing required fields: service_id, account_id, and token are required"), + }, nil + } + + if err := s.tokenStore.ValidateAndConsume(token, accountID, serviceID); err != nil { + log.WithFields(log.Fields{ + "service_id": serviceID, + "account_id": accountID, + }).WithError(err).Warn("CreateProxyPeer: token validation failed") + return &proto.CreateProxyPeerResponse{ + Success: false, + ErrorMessage: strPtr("authentication failed: invalid or expired token"), + }, status.Errorf(codes.Unauthenticated, "token validation: %v", err) + } + + err := s.peersManager.CreateProxyPeer(ctx, accountID, key, cluster) + if err != nil { + log.WithFields(log.Fields{ + "service_id": serviceID, + "account_id": accountID, + }).WithError(err).Error("failed to create proxy peer") + return &proto.CreateProxyPeerResponse{ + Success: false, + ErrorMessage: strPtr(fmt.Sprintf("create proxy peer: %v", err)), + }, status.Errorf(codes.Internal, "create proxy peer: %v", err) + } + + return &proto.CreateProxyPeerResponse{ + Success: true, + }, nil +} + +// strPtr is a helper to create a string pointer for optional proto fields +func strPtr(s string) *string { + return &s +} + +func (s *ProxyServiceServer) GetOIDCURL(ctx context.Context, req *proto.GetOIDCURLRequest) (*proto.GetOIDCURLResponse, error) { + redirectURL, err := url.Parse(req.GetRedirectUrl()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "parse redirect url: %v", err) + } + // Validate redirectURL against known service endpoints to avoid abuse of OIDC redirection. + services, err := s.reverseProxyManager.GetAccountServices(ctx, req.GetAccountId()) + if err != nil { + log.WithContext(ctx).Errorf("failed to get account services: %v", err) + return nil, status.Errorf(codes.FailedPrecondition, "get account services: %v", err) + } + var found bool + for _, service := range services { + if service.Domain == redirectURL.Hostname() { + found = true + break + } + } + if !found { + log.WithContext(ctx).Debugf("OIDC redirect URL %q does not match any service domain", redirectURL.Hostname()) + return nil, status.Errorf(codes.FailedPrecondition, "service not found in store") + } + + provider, err := oidc.NewProvider(ctx, s.oidcConfig.Issuer) + if err != nil { + log.WithContext(ctx).Errorf("failed to create OIDC provider: %v", err) + return nil, status.Errorf(codes.FailedPrecondition, "create OIDC provider: %v", err) + } + + scopes := s.oidcConfig.Scopes + if len(scopes) == 0 { + scopes = []string{oidc.ScopeOpenID, "profile", "email"} + } + + // Generate a random nonce to ensure each OIDC request gets a unique state. + // Without this, multiple requests to the same URL would generate the same state + // but different PKCE verifiers, causing the later verifier to overwrite the earlier one. + nonce := make([]byte, 16) + if _, err := rand.Read(nonce); err != nil { + return nil, status.Errorf(codes.Internal, "generate nonce: %v", err) + } + nonceB64 := base64.URLEncoding.EncodeToString(nonce) + + // Using an HMAC here to avoid redirection state being modified. + // State format: base64(redirectURL)|nonce|hmac(redirectURL|nonce) + payload := redirectURL.String() + "|" + nonceB64 + hmacSum := s.generateHMAC(payload) + state := fmt.Sprintf("%s|%s|%s", base64.URLEncoding.EncodeToString([]byte(redirectURL.String())), nonceB64, hmacSum) + + codeVerifier := oauth2.GenerateVerifier() + s.pkceVerifiers.Store(state, pkceEntry{verifier: codeVerifier, createdAt: time.Now()}) + + return &proto.GetOIDCURLResponse{ + Url: (&oauth2.Config{ + ClientID: s.oidcConfig.ClientID, + Endpoint: provider.Endpoint(), + RedirectURL: s.oidcConfig.CallbackURL, + Scopes: scopes, + }).AuthCodeURL(state, oauth2.S256ChallengeOption(codeVerifier)), + }, nil +} + +// GetOIDCConfig returns the OIDC configuration for token validation. +func (s *ProxyServiceServer) GetOIDCConfig() ProxyOIDCConfig { + return s.oidcConfig +} + +// GetOIDCValidationConfig returns the OIDC configuration for token validation +// in the format needed by ToProtoMapping. +func (s *ProxyServiceServer) GetOIDCValidationConfig() reverseproxy.OIDCValidationConfig { + return reverseproxy.OIDCValidationConfig{ + Issuer: s.oidcConfig.Issuer, + Audiences: []string{s.oidcConfig.Audience}, + KeysLocation: s.oidcConfig.KeysLocation, + MaxTokenAgeSeconds: 0, // No max token age by default + } +} + +func (s *ProxyServiceServer) generateHMAC(input string) string { + mac := hmac.New(sha256.New, s.oidcConfig.HMACKey) + mac.Write([]byte(input)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ValidateState validates the state parameter from an OAuth callback. +// Returns the original redirect URL if valid, or an error if invalid. +func (s *ProxyServiceServer) ValidateState(state string) (verifier, redirectURL string, err error) { + v, ok := s.pkceVerifiers.LoadAndDelete(state) + if !ok { + return "", "", errors.New("no verifier for state") + } + entry, ok := v.(pkceEntry) + if !ok { + return "", "", errors.New("invalid verifier for state") + } + if time.Since(entry.createdAt) > pkceVerifierTTL { + return "", "", errors.New("PKCE verifier expired") + } + verifier = entry.verifier + + // State format: base64(redirectURL)|nonce|hmac(redirectURL|nonce) + parts := strings.Split(state, "|") + if len(parts) != 3 { + return "", "", errors.New("invalid state format") + } + + encodedURL := parts[0] + nonce := parts[1] + providedHMAC := parts[2] + + redirectURLBytes, err := base64.URLEncoding.DecodeString(encodedURL) + if err != nil { + return "", "", fmt.Errorf("invalid state encoding: %w", err) + } + redirectURL = string(redirectURLBytes) + + payload := redirectURL + "|" + nonce + expectedHMAC := s.generateHMAC(payload) + + if !hmac.Equal([]byte(providedHMAC), []byte(expectedHMAC)) { + return "", "", errors.New("invalid state signature") + } + + return verifier, redirectURL, nil +} + +// GenerateSessionToken creates a signed session JWT for the given domain and user. +func (s *ProxyServiceServer) GenerateSessionToken(ctx context.Context, domain, userID string, method proxyauth.Method) (string, error) { + // Find the service by domain to get its signing key + services, err := s.reverseProxyManager.GetGlobalServices(ctx) + if err != nil { + return "", fmt.Errorf("get services: %w", err) + } + + var service *reverseproxy.Service + for _, svc := range services { + if svc.Domain == domain { + service = svc + break + } + } + if service == nil { + return "", fmt.Errorf("service not found for domain: %s", domain) + } + + if service.SessionPrivateKey == "" { + return "", fmt.Errorf("no session key configured for domain: %s", domain) + } + + return sessionkey.SignToken( + service.SessionPrivateKey, + userID, + domain, + method, + proxyauth.DefaultSessionExpiry, + ) +} + +// ValidateUserGroupAccess checks if a user has access to a service. +// It looks up the service within the user's account only, then optionally checks +// group membership if BearerAuth with DistributionGroups is configured. +func (s *ProxyServiceServer) ValidateUserGroupAccess(ctx context.Context, domain, userID string) error { + user, err := s.usersManager.GetUser(ctx, userID) + if err != nil { + return fmt.Errorf("user not found: %s", userID) + } + + service, err := s.getAccountServiceByDomain(ctx, user.AccountID, domain) + if err != nil { + return err + } + + if service.Auth.BearerAuth == nil || !service.Auth.BearerAuth.Enabled { + return nil + } + + allowedGroups := service.Auth.BearerAuth.DistributionGroups + if len(allowedGroups) == 0 { + return nil + } + + allowedSet := make(map[string]bool, len(allowedGroups)) + for _, groupID := range allowedGroups { + allowedSet[groupID] = true + } + + for _, groupID := range user.AutoGroups { + if allowedSet[groupID] { + log.WithFields(log.Fields{ + "user_id": user.Id, + "group_id": groupID, + "domain": domain, + }).Debug("User granted access via group membership") + return nil + } + } + + return fmt.Errorf("user %s not in allowed groups for domain %s", user.Id, domain) +} + +func (s *ProxyServiceServer) getAccountServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) { + services, err := s.reverseProxyManager.GetAccountServices(ctx, accountID) + if err != nil { + return nil, fmt.Errorf("get account services: %w", err) + } + + for _, service := range services { + if service.Domain == domain { + return service, nil + } + } + + return nil, fmt.Errorf("service not found for domain %s in account %s", domain, accountID) +} + +// ValidateSession validates a session token and checks if the user has access to the domain. +func (s *ProxyServiceServer) ValidateSession(ctx context.Context, req *proto.ValidateSessionRequest) (*proto.ValidateSessionResponse, error) { + domain := req.GetDomain() + sessionToken := req.GetSessionToken() + + if domain == "" || sessionToken == "" { + return &proto.ValidateSessionResponse{ + Valid: false, + DeniedReason: "missing domain or session_token", + }, nil + } + + service, err := s.getServiceByDomain(ctx, domain) + if err != nil { + log.WithFields(log.Fields{ + "domain": domain, + "error": err.Error(), + }).Debug("ValidateSession: service not found") + //nolint:nilerr + return &proto.ValidateSessionResponse{ + Valid: false, + DeniedReason: "service_not_found", + }, nil + } + + pubKeyBytes, err := base64.StdEncoding.DecodeString(service.SessionPublicKey) + if err != nil { + log.WithFields(log.Fields{ + "domain": domain, + "error": err.Error(), + }).Error("ValidateSession: decode public key") + //nolint:nilerr + return &proto.ValidateSessionResponse{ + Valid: false, + DeniedReason: "invalid_service_config", + }, nil + } + + userID, _, err := proxyauth.ValidateSessionJWT(sessionToken, domain, pubKeyBytes) + if err != nil { + log.WithFields(log.Fields{ + "domain": domain, + "error": err.Error(), + }).Debug("ValidateSession: invalid session token") + //nolint:nilerr + return &proto.ValidateSessionResponse{ + Valid: false, + DeniedReason: "invalid_token", + }, nil + } + + user, err := s.usersManager.GetUser(ctx, userID) + if err != nil { + log.WithFields(log.Fields{ + "domain": domain, + "user_id": userID, + "error": err.Error(), + }).Debug("ValidateSession: user not found") + //nolint:nilerr + return &proto.ValidateSessionResponse{ + Valid: false, + DeniedReason: "user_not_found", + }, nil + } + + if user.AccountID != service.AccountID { + log.WithFields(log.Fields{ + "domain": domain, + "user_id": userID, + "user_account": user.AccountID, + "service_account": service.AccountID, + }).Debug("ValidateSession: user account mismatch") + //nolint:nilerr + return &proto.ValidateSessionResponse{ + Valid: false, + DeniedReason: "account_mismatch", + }, nil + } + + if err := s.checkGroupAccess(service, user); err != nil { + log.WithFields(log.Fields{ + "domain": domain, + "user_id": userID, + "error": err.Error(), + }).Debug("ValidateSession: access denied") + //nolint:nilerr + return &proto.ValidateSessionResponse{ + Valid: false, + UserId: user.Id, + UserEmail: user.Email, + DeniedReason: "not_in_group", + }, nil + } + + log.WithFields(log.Fields{ + "domain": domain, + "user_id": userID, + "email": user.Email, + }).Debug("ValidateSession: access granted") + + return &proto.ValidateSessionResponse{ + Valid: true, + UserId: user.Id, + UserEmail: user.Email, + }, nil +} + +func (s *ProxyServiceServer) getServiceByDomain(ctx context.Context, domain string) (*reverseproxy.Service, error) { + services, err := s.reverseProxyManager.GetGlobalServices(ctx) + if err != nil { + return nil, fmt.Errorf("get services: %w", err) + } + + for _, service := range services { + if service.Domain == domain { + return service, nil + } + } + + return nil, fmt.Errorf("service not found for domain: %s", domain) +} + +func (s *ProxyServiceServer) checkGroupAccess(service *reverseproxy.Service, user *types.User) error { + if service.Auth.BearerAuth == nil || !service.Auth.BearerAuth.Enabled { + return nil + } + + allowedGroups := service.Auth.BearerAuth.DistributionGroups + if len(allowedGroups) == 0 { + return nil + } + + allowedSet := make(map[string]bool, len(allowedGroups)) + for _, groupID := range allowedGroups { + allowedSet[groupID] = true + } + + for _, groupID := range user.AutoGroups { + if allowedSet[groupID] { + return nil + } + } + + return fmt.Errorf("user not in allowed groups") +} diff --git a/management/internals/shared/grpc/proxy_auth.go b/management/internals/shared/grpc/proxy_auth.go new file mode 100644 index 000000000..6daeab5f2 --- /dev/null +++ b/management/internals/shared/grpc/proxy_auth.go @@ -0,0 +1,234 @@ +package grpc + +import ( + "context" + "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" +) + +const ( + // lastUsedUpdateInterval is the minimum interval between last_used updates for the same token. + lastUsedUpdateInterval = time.Minute + // lastUsedCleanupInterval is how often stale lastUsed entries are removed. + lastUsedCleanupInterval = 2 * time.Minute +) + +type proxyTokenContextKey struct{} + +// ProxyTokenContextKey is the typed key used to store validated token info in context. +var ProxyTokenContextKey = proxyTokenContextKey{} + +// proxyTokenID identifies a proxy access token by its database ID. +type proxyTokenID = string + +// proxyTokenStore defines the store interface needed for token validation +type proxyTokenStore interface { + GetProxyAccessTokenByHashedToken(ctx context.Context, lockStrength store.LockingStrength, hashedToken types.HashedProxyToken) (*types.ProxyAccessToken, error) + MarkProxyAccessTokenUsed(ctx context.Context, tokenID string) error +} + +// proxyAuthInterceptor holds state for proxy authentication interceptors. +type proxyAuthInterceptor struct { + store proxyTokenStore + failureLimiter *authFailureLimiter + + // lastUsedMu protects lastUsedTimes + lastUsedMu sync.Mutex + lastUsedTimes map[proxyTokenID]time.Time + cancel context.CancelFunc +} + +func newProxyAuthInterceptor(tokenStore proxyTokenStore) *proxyAuthInterceptor { + ctx, cancel := context.WithCancel(context.Background()) + i := &proxyAuthInterceptor{ + store: tokenStore, + failureLimiter: newAuthFailureLimiter(), + lastUsedTimes: make(map[proxyTokenID]time.Time), + cancel: cancel, + } + go i.lastUsedCleanupLoop(ctx) + return i +} + +// NewProxyAuthInterceptors creates gRPC unary and stream interceptors that validate proxy access tokens. +// They only intercept ProxyService methods. Both interceptors share state for last-used and failure rate limiting. +// The returned close function must be called on shutdown to stop background goroutines. +func NewProxyAuthInterceptors(tokenStore proxyTokenStore) (grpc.UnaryServerInterceptor, grpc.StreamServerInterceptor, func()) { + interceptor := newProxyAuthInterceptor(tokenStore) + + unary := func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + if !strings.HasPrefix(info.FullMethod, "/management.ProxyService/") { + return handler(ctx, req) + } + + token, err := interceptor.validateProxyToken(ctx) + if err != nil { + // Log auth failures explicitly; gRPC doesn't log these by default. + log.WithContext(ctx).Warnf("proxy auth failed: %v", err) + return nil, err + } + + ctx = context.WithValue(ctx, ProxyTokenContextKey, token) + return handler(ctx, req) + } + + stream := func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if !strings.HasPrefix(info.FullMethod, "/management.ProxyService/") { + return handler(srv, ss) + } + + token, err := interceptor.validateProxyToken(ss.Context()) + if err != nil { + // Log auth failures explicitly; gRPC doesn't log these by default. + log.WithContext(ss.Context()).Warnf("proxy auth failed: %v", err) + return err + } + + ctx := context.WithValue(ss.Context(), ProxyTokenContextKey, token) + wrapped := &wrappedServerStream{ + ServerStream: ss, + ctx: ctx, + } + + return handler(srv, wrapped) + } + + return unary, stream, interceptor.close +} + +func (i *proxyAuthInterceptor) validateProxyToken(ctx context.Context) (*types.ProxyAccessToken, error) { + clientIP := peerIPFromContext(ctx) + + if clientIP != "" && i.failureLimiter.isLimited(clientIP) { + return nil, status.Errorf(codes.ResourceExhausted, "too many failed authentication attempts") + } + + token, err := i.doValidateProxyToken(ctx) + if err != nil { + if clientIP != "" { + i.failureLimiter.recordFailure(clientIP) + } + return nil, err + } + + i.maybeUpdateLastUsed(ctx, token.ID) + + return token, nil +} + +func (i *proxyAuthInterceptor) doValidateProxyToken(ctx context.Context) (*types.ProxyAccessToken, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Unauthenticated, "missing metadata") + } + + authValues := md.Get("authorization") + if len(authValues) == 0 { + return nil, status.Errorf(codes.Unauthenticated, "missing authorization header") + } + + authValue := authValues[0] + if !strings.HasPrefix(authValue, "Bearer ") { + return nil, status.Errorf(codes.Unauthenticated, "invalid authorization format") + } + + plainToken := types.PlainProxyToken(strings.TrimPrefix(authValue, "Bearer ")) + + if err := plainToken.Validate(); err != nil { + return nil, status.Errorf(codes.Unauthenticated, "invalid token format") + } + + token, err := i.store.GetProxyAccessTokenByHashedToken(ctx, store.LockingStrengthNone, plainToken.Hash()) + if err != nil { + return nil, status.Errorf(codes.Unauthenticated, "invalid token") + } + + // TODO: Enforce AccountID scope for "bring your own proxy" feature. + // Currently tokens are management-wide; AccountID field is reserved for future use. + + if !token.IsValid() { + return nil, status.Errorf(codes.Unauthenticated, "token expired or revoked") + } + + return token, nil +} + +// maybeUpdateLastUsed updates the last_used timestamp if enough time has passed since the last update. +func (i *proxyAuthInterceptor) maybeUpdateLastUsed(ctx context.Context, tokenID string) { + now := time.Now() + + i.lastUsedMu.Lock() + lastUpdate, exists := i.lastUsedTimes[tokenID] + if exists && now.Sub(lastUpdate) < lastUsedUpdateInterval { + i.lastUsedMu.Unlock() + return + } + i.lastUsedTimes[tokenID] = now + i.lastUsedMu.Unlock() + + if err := i.store.MarkProxyAccessTokenUsed(ctx, tokenID); err != nil { + log.WithContext(ctx).Debugf("failed to mark proxy token as used: %v", err) + } +} + +func (i *proxyAuthInterceptor) lastUsedCleanupLoop(ctx context.Context) { + ticker := time.NewTicker(lastUsedCleanupInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + i.cleanupStaleLastUsed() + case <-ctx.Done(): + return + } + } +} + +// cleanupStaleLastUsed removes entries older than 2x the update interval. +func (i *proxyAuthInterceptor) cleanupStaleLastUsed() { + i.lastUsedMu.Lock() + defer i.lastUsedMu.Unlock() + + now := time.Now() + staleThreshold := 2 * lastUsedUpdateInterval + for id, lastUpdate := range i.lastUsedTimes { + if now.Sub(lastUpdate) > staleThreshold { + delete(i.lastUsedTimes, id) + } + } +} + +func (i *proxyAuthInterceptor) close() { + i.cancel() + i.failureLimiter.stop() +} + +// GetProxyTokenFromContext retrieves the validated proxy token from the context +func GetProxyTokenFromContext(ctx context.Context) *types.ProxyAccessToken { + token, ok := ctx.Value(ProxyTokenContextKey).(*types.ProxyAccessToken) + if !ok { + return nil + } + return token +} + +// wrappedServerStream wraps a grpc.ServerStream to provide a custom context +type wrappedServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedServerStream) Context() context.Context { + return w.ctx +} diff --git a/management/internals/shared/grpc/proxy_auth_ratelimit.go b/management/internals/shared/grpc/proxy_auth_ratelimit.go new file mode 100644 index 000000000..447e531b0 --- /dev/null +++ b/management/internals/shared/grpc/proxy_auth_ratelimit.go @@ -0,0 +1,134 @@ +package grpc + +import ( + "context" + "net" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/realip" + "golang.org/x/time/rate" + "google.golang.org/grpc/peer" +) + +const ( + // proxyAuthFailureBurst is the maximum number of failed attempts before rate limiting kicks in. + proxyAuthFailureBurst = 5 + // proxyAuthLimiterCleanup is how often stale limiters are removed. + proxyAuthLimiterCleanup = 5 * time.Minute + // proxyAuthLimiterTTL is how long a limiter is kept after the last failure. + proxyAuthLimiterTTL = 15 * time.Minute +) + +// defaultProxyAuthFailureRate is the token replenishment rate for failed auth attempts. +// One token every 12 seconds = 5 per minute. +var defaultProxyAuthFailureRate = rate.Every(12 * time.Second) + +// clientIP identifies a client by its IP address for rate limiting purposes. +type clientIP = string + +type limiterEntry struct { + limiter *rate.Limiter + lastAccess time.Time +} + +// authFailureLimiter tracks per-IP rate limits for failed proxy authentication attempts. +type authFailureLimiter struct { + mu sync.Mutex + limiters map[clientIP]*limiterEntry + failureRate rate.Limit + cancel context.CancelFunc +} + +func newAuthFailureLimiter() *authFailureLimiter { + return newAuthFailureLimiterWithRate(defaultProxyAuthFailureRate) +} + +func newAuthFailureLimiterWithRate(failureRate rate.Limit) *authFailureLimiter { + ctx, cancel := context.WithCancel(context.Background()) + l := &authFailureLimiter{ + limiters: make(map[clientIP]*limiterEntry), + failureRate: failureRate, + cancel: cancel, + } + go l.cleanupLoop(ctx) + return l +} + +// isLimited returns true if the given IP has exhausted its failure budget. +func (l *authFailureLimiter) isLimited(ip clientIP) bool { + l.mu.Lock() + defer l.mu.Unlock() + + entry, exists := l.limiters[ip] + if !exists { + return false + } + + return entry.limiter.Tokens() < 1 +} + +// recordFailure consumes a token from the rate limiter for the given IP. +func (l *authFailureLimiter) recordFailure(ip clientIP) { + l.mu.Lock() + defer l.mu.Unlock() + + now := time.Now() + entry, exists := l.limiters[ip] + if !exists { + entry = &limiterEntry{ + limiter: rate.NewLimiter(l.failureRate, proxyAuthFailureBurst), + } + l.limiters[ip] = entry + } + entry.lastAccess = now + entry.limiter.Allow() +} + +func (l *authFailureLimiter) cleanupLoop(ctx context.Context) { + ticker := time.NewTicker(proxyAuthLimiterCleanup) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + l.cleanup() + case <-ctx.Done(): + return + } + } +} + +func (l *authFailureLimiter) cleanup() { + l.mu.Lock() + defer l.mu.Unlock() + + now := time.Now() + for ip, entry := range l.limiters { + if now.Sub(entry.lastAccess) > proxyAuthLimiterTTL { + delete(l.limiters, ip) + } + } +} + +func (l *authFailureLimiter) stop() { + l.cancel() +} + +// peerIPFromContext extracts the client IP from the gRPC context. +// Uses realip (from trusted proxy headers) first, falls back to the transport peer address. +func peerIPFromContext(ctx context.Context) clientIP { + if addr, ok := realip.FromContext(ctx); ok { + return addr.String() + } + + if p, ok := peer.FromContext(ctx); ok { + host, _, err := net.SplitHostPort(p.Addr.String()) + if err != nil { + return p.Addr.String() + } + return host + } + + return "" +} diff --git a/management/internals/shared/grpc/proxy_auth_ratelimit_test.go b/management/internals/shared/grpc/proxy_auth_ratelimit_test.go new file mode 100644 index 000000000..3577baeb8 --- /dev/null +++ b/management/internals/shared/grpc/proxy_auth_ratelimit_test.go @@ -0,0 +1,98 @@ +package grpc + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/time/rate" +) + +func TestAuthFailureLimiter_NotLimitedInitially(t *testing.T) { + l := newAuthFailureLimiter() + defer l.stop() + + assert.False(t, l.isLimited("192.168.1.1"), "new IP should not be rate limited") +} + +func TestAuthFailureLimiter_LimitedAfterBurst(t *testing.T) { + l := newAuthFailureLimiter() + defer l.stop() + + ip := "192.168.1.1" + for i := 0; i < proxyAuthFailureBurst; i++ { + l.recordFailure(ip) + } + + assert.True(t, l.isLimited(ip), "IP should be limited after exhausting burst") +} + +func TestAuthFailureLimiter_DifferentIPsIndependent(t *testing.T) { + l := newAuthFailureLimiter() + defer l.stop() + + for i := 0; i < proxyAuthFailureBurst; i++ { + l.recordFailure("192.168.1.1") + } + + assert.True(t, l.isLimited("192.168.1.1")) + assert.False(t, l.isLimited("192.168.1.2"), "different IP should not be affected") +} + +func TestAuthFailureLimiter_RecoveryOverTime(t *testing.T) { + l := newAuthFailureLimiterWithRate(rate.Limit(100)) // 100 tokens/sec for fast recovery + defer l.stop() + + ip := "10.0.0.1" + + // Exhaust burst + for i := 0; i < proxyAuthFailureBurst; i++ { + l.recordFailure(ip) + } + require.True(t, l.isLimited(ip)) + + // Wait for token replenishment + time.Sleep(50 * time.Millisecond) + + assert.False(t, l.isLimited(ip), "should recover after tokens replenish") +} + +func TestAuthFailureLimiter_Cleanup(t *testing.T) { + l := newAuthFailureLimiter() + defer l.stop() + + l.recordFailure("10.0.0.1") + + l.mu.Lock() + require.Len(t, l.limiters, 1) + // Backdate the entry so it looks stale + l.limiters["10.0.0.1"].lastAccess = time.Now().Add(-proxyAuthLimiterTTL - time.Minute) + l.mu.Unlock() + + l.cleanup() + + l.mu.Lock() + assert.Empty(t, l.limiters, "stale entries should be cleaned up") + l.mu.Unlock() +} + +func TestAuthFailureLimiter_CleanupKeepsFresh(t *testing.T) { + l := newAuthFailureLimiter() + defer l.stop() + + l.recordFailure("10.0.0.1") + l.recordFailure("10.0.0.2") + + l.mu.Lock() + // Only backdate one entry + l.limiters["10.0.0.1"].lastAccess = time.Now().Add(-proxyAuthLimiterTTL - time.Minute) + l.mu.Unlock() + + l.cleanup() + + l.mu.Lock() + assert.Len(t, l.limiters, 1, "only stale entries should be removed") + assert.Contains(t, l.limiters, "10.0.0.2") + l.mu.Unlock() +} diff --git a/management/internals/shared/grpc/proxy_group_access_test.go b/management/internals/shared/grpc/proxy_group_access_test.go new file mode 100644 index 000000000..84fb54923 --- /dev/null +++ b/management/internals/shared/grpc/proxy_group_access_test.go @@ -0,0 +1,381 @@ +package grpc + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/server/types" +) + +type mockReverseProxyManager struct { + proxiesByAccount map[string][]*reverseproxy.Service + err error +} + +func (m *mockReverseProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { + if m.err != nil { + return nil, m.err + } + return m.proxiesByAccount[accountID], nil +} + +func (m *mockReverseProxyManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { + return nil, nil +} + +func (m *mockReverseProxyManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { + return []*reverseproxy.Service{}, nil +} + +func (m *mockReverseProxyManager) GetService(ctx context.Context, accountID, userID, reverseProxyID string) (*reverseproxy.Service, error) { + return &reverseproxy.Service{}, nil +} + +func (m *mockReverseProxyManager) CreateService(ctx context.Context, accountID, userID string, rp *reverseproxy.Service) (*reverseproxy.Service, error) { + return &reverseproxy.Service{}, nil +} + +func (m *mockReverseProxyManager) UpdateService(ctx context.Context, accountID, userID string, rp *reverseproxy.Service) (*reverseproxy.Service, error) { + return &reverseproxy.Service{}, nil +} + +func (m *mockReverseProxyManager) DeleteService(ctx context.Context, accountID, userID, reverseProxyID string) error { + return nil +} + +func (m *mockReverseProxyManager) SetCertificateIssuedAt(ctx context.Context, accountID, reverseProxyID string) error { + return nil +} + +func (m *mockReverseProxyManager) SetStatus(ctx context.Context, accountID, reverseProxyID string, status reverseproxy.ProxyStatus) error { + return nil +} + +func (m *mockReverseProxyManager) ReloadAllServicesForAccount(ctx context.Context, accountID string) error { + return nil +} + +func (m *mockReverseProxyManager) ReloadService(ctx context.Context, accountID, reverseProxyID string) error { + return nil +} + +func (m *mockReverseProxyManager) GetServiceByID(ctx context.Context, accountID, reverseProxyID string) (*reverseproxy.Service, error) { + return &reverseproxy.Service{}, nil +} + +func (m *mockReverseProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) { + return "", nil +} + +type mockUsersManager struct { + users map[string]*types.User + err error +} + +func (m *mockUsersManager) GetUser(ctx context.Context, userID string) (*types.User, error) { + if m.err != nil { + return nil, m.err + } + user, ok := m.users[userID] + if !ok { + return nil, errors.New("user not found") + } + return user, nil +} + +func TestValidateUserGroupAccess(t *testing.T) { + tests := []struct { + name string + domain string + userID string + proxiesByAccount map[string][]*reverseproxy.Service + users map[string]*types.User + proxyErr error + userErr error + expectErr bool + expectErrMsg string + }{ + { + name: "user not found", + domain: "app.example.com", + userID: "unknown-user", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{Domain: "app.example.com", AccountID: "account1"}}, + }, + users: map[string]*types.User{}, + expectErr: true, + expectErrMsg: "user not found", + }, + { + name: "proxy not found in user's account", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{}, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1"}, + }, + expectErr: true, + expectErrMsg: "service not found", + }, + { + name: "proxy exists in different account - not accessible", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account2": {{Domain: "app.example.com", AccountID: "account2"}}, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1"}, + }, + expectErr: true, + expectErrMsg: "service not found", + }, + { + name: "no bearer auth configured - same account allows access", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{Domain: "app.example.com", AccountID: "account1", Auth: reverseproxy.AuthConfig{}}}, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1"}, + }, + expectErr: false, + }, + { + name: "bearer auth disabled - same account allows access", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{ + Domain: "app.example.com", + AccountID: "account1", + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{Enabled: false}, + }, + }}, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1"}, + }, + expectErr: false, + }, + { + name: "bearer auth enabled but no groups configured - same account allows access", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{ + Domain: "app.example.com", + AccountID: "account1", + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + DistributionGroups: []string{}, + }, + }, + }}, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1"}, + }, + expectErr: false, + }, + { + name: "user not in allowed groups", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{ + Domain: "app.example.com", + AccountID: "account1", + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + DistributionGroups: []string{"group1", "group2"}, + }, + }, + }}, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1", AutoGroups: []string{"group3", "group4"}}, + }, + expectErr: true, + expectErrMsg: "not in allowed groups", + }, + { + name: "user in one of the allowed groups - allow access", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{ + Domain: "app.example.com", + AccountID: "account1", + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + DistributionGroups: []string{"group1", "group2"}, + }, + }, + }}, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1", AutoGroups: []string{"group2", "group3"}}, + }, + expectErr: false, + }, + { + name: "user in all allowed groups - allow access", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{ + Domain: "app.example.com", + AccountID: "account1", + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + DistributionGroups: []string{"group1", "group2"}, + }, + }, + }}, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1", AutoGroups: []string{"group1", "group2", "group3"}}, + }, + expectErr: false, + }, + { + name: "proxy manager error", + domain: "app.example.com", + userID: "user1", + proxiesByAccount: nil, + proxyErr: errors.New("database error"), + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1"}, + }, + expectErr: true, + expectErrMsg: "get account services", + }, + { + name: "multiple proxies in account - finds correct one", + domain: "app2.example.com", + userID: "user1", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": { + {Domain: "app1.example.com", AccountID: "account1"}, + {Domain: "app2.example.com", AccountID: "account1", Auth: reverseproxy.AuthConfig{}}, + {Domain: "app3.example.com", AccountID: "account1"}, + }, + }, + users: map[string]*types.User{ + "user1": {Id: "user1", AccountID: "account1"}, + }, + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := &ProxyServiceServer{ + reverseProxyManager: &mockReverseProxyManager{ + proxiesByAccount: tt.proxiesByAccount, + err: tt.proxyErr, + }, + usersManager: &mockUsersManager{ + users: tt.users, + err: tt.userErr, + }, + } + + err := server.ValidateUserGroupAccess(context.Background(), tt.domain, tt.userID) + + if tt.expectErr { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.expectErrMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestGetAccountProxyByDomain(t *testing.T) { + tests := []struct { + name string + accountID string + domain string + proxiesByAccount map[string][]*reverseproxy.Service + err error + expectProxy bool + expectErr bool + }{ + { + name: "proxy found", + accountID: "account1", + domain: "app.example.com", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": { + {Domain: "other.example.com", AccountID: "account1"}, + {Domain: "app.example.com", AccountID: "account1"}, + }, + }, + expectProxy: true, + expectErr: false, + }, + { + name: "proxy not found in account", + accountID: "account1", + domain: "unknown.example.com", + proxiesByAccount: map[string][]*reverseproxy.Service{ + "account1": {{Domain: "app.example.com", AccountID: "account1"}}, + }, + expectProxy: false, + expectErr: true, + }, + { + name: "empty proxy list for account", + accountID: "account1", + domain: "app.example.com", + proxiesByAccount: map[string][]*reverseproxy.Service{}, + expectProxy: false, + expectErr: true, + }, + { + name: "manager error", + accountID: "account1", + domain: "app.example.com", + proxiesByAccount: nil, + err: errors.New("database error"), + expectProxy: false, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := &ProxyServiceServer{ + reverseProxyManager: &mockReverseProxyManager{ + proxiesByAccount: tt.proxiesByAccount, + err: tt.err, + }, + } + + proxy, err := server.getAccountServiceByDomain(context.Background(), tt.accountID, tt.domain) + + if tt.expectErr { + require.Error(t, err) + assert.Nil(t, proxy) + } else { + require.NoError(t, err) + require.NotNil(t, proxy) + assert.Equal(t, tt.domain, proxy.Domain) + } + }) + } +} diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go new file mode 100644 index 000000000..4c84e6010 --- /dev/null +++ b/management/internals/shared/grpc/proxy_test.go @@ -0,0 +1,232 @@ +package grpc + +import ( + "crypto/rand" + "encoding/base64" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/proto" +) + +// registerFakeProxy adds a fake proxy connection to the server's internal maps +// and returns the channel where messages will be received. +func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan *proto.ProxyMapping { + ch := make(chan *proto.ProxyMapping, 10) + conn := &proxyConnection{ + proxyID: proxyID, + address: clusterAddr, + sendChan: ch, + } + s.connectedProxies.Store(proxyID, conn) + + proxySet, _ := s.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{}) + proxySet.(*sync.Map).Store(proxyID, struct{}{}) + + return ch +} + +func drainChannel(ch chan *proto.ProxyMapping) *proto.ProxyMapping { + select { + case msg := <-ch: + return msg + case <-time.After(time.Second): + return nil + } +} + +func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { + tokenStore := NewOneTimeTokenStore(time.Hour) + defer tokenStore.Close() + + s := &ProxyServiceServer{ + tokenStore: tokenStore, + updatesChan: make(chan *proto.ProxyMapping, 100), + } + + const cluster = "proxy.example.com" + const numProxies = 3 + + channels := make([]chan *proto.ProxyMapping, numProxies) + for i := range numProxies { + id := "proxy-" + string(rune('a'+i)) + channels[i] = registerFakeProxy(s, id, cluster) + } + + update := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, + Id: "service-1", + AccountId: "account-1", + Domain: "test.example.com", + Path: []*proto.PathMapping{ + {Path: "/", Target: "http://10.0.0.1:8080/"}, + }, + } + + s.SendServiceUpdateToCluster(update, cluster) + + tokens := make([]string, numProxies) + for i, ch := range channels { + msg := drainChannel(ch) + require.NotNil(t, msg, "proxy %d should receive a message", i) + assert.Equal(t, update.Domain, msg.Domain) + assert.Equal(t, update.Id, msg.Id) + assert.NotEmpty(t, msg.AuthToken, "proxy %d should have a non-empty token", i) + tokens[i] = msg.AuthToken + } + + // All tokens must be unique + tokenSet := make(map[string]struct{}) + for i, tok := range tokens { + _, exists := tokenSet[tok] + assert.False(t, exists, "proxy %d got duplicate token", i) + tokenSet[tok] = struct{}{} + } + + // Each token must be independently consumable + for i, tok := range tokens { + err := tokenStore.ValidateAndConsume(tok, "account-1", "service-1") + assert.NoError(t, err, "proxy %d token should validate successfully", i) + } +} + +func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { + tokenStore := NewOneTimeTokenStore(time.Hour) + defer tokenStore.Close() + + s := &ProxyServiceServer{ + tokenStore: tokenStore, + updatesChan: make(chan *proto.ProxyMapping, 100), + } + + const cluster = "proxy.example.com" + ch1 := registerFakeProxy(s, "proxy-a", cluster) + ch2 := registerFakeProxy(s, "proxy-b", cluster) + + update := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED, + Id: "service-1", + AccountId: "account-1", + Domain: "test.example.com", + } + + s.SendServiceUpdateToCluster(update, cluster) + + msg1 := drainChannel(ch1) + msg2 := drainChannel(ch2) + require.NotNil(t, msg1) + require.NotNil(t, msg2) + + // Delete operations should not generate tokens + assert.Empty(t, msg1.AuthToken) + assert.Empty(t, msg2.AuthToken) + + // No tokens should have been created + assert.Equal(t, 0, tokenStore.GetTokenCount()) +} + +func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) { + tokenStore := NewOneTimeTokenStore(time.Hour) + defer tokenStore.Close() + + s := &ProxyServiceServer{ + tokenStore: tokenStore, + updatesChan: make(chan *proto.ProxyMapping, 100), + } + + // Register proxies in different clusters (SendServiceUpdate broadcasts to all) + ch1 := registerFakeProxy(s, "proxy-a", "cluster-a") + ch2 := registerFakeProxy(s, "proxy-b", "cluster-b") + + update := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, + Id: "service-1", + AccountId: "account-1", + Domain: "test.example.com", + } + + s.SendServiceUpdate(update) + + msg1 := drainChannel(ch1) + msg2 := drainChannel(ch2) + require.NotNil(t, msg1) + require.NotNil(t, msg2) + + assert.NotEmpty(t, msg1.AuthToken) + assert.NotEmpty(t, msg2.AuthToken) + assert.NotEqual(t, msg1.AuthToken, msg2.AuthToken, "tokens must be unique per proxy") + + // Both tokens should validate + assert.NoError(t, tokenStore.ValidateAndConsume(msg1.AuthToken, "account-1", "service-1")) + assert.NoError(t, tokenStore.ValidateAndConsume(msg2.AuthToken, "account-1", "service-1")) +} + +// generateState creates a state using the same format as GetOIDCURL. +func generateState(s *ProxyServiceServer, redirectURL string) string { + nonce := make([]byte, 16) + _, _ = rand.Read(nonce) + nonceB64 := base64.URLEncoding.EncodeToString(nonce) + + payload := redirectURL + "|" + nonceB64 + hmacSum := s.generateHMAC(payload) + return base64.URLEncoding.EncodeToString([]byte(redirectURL)) + "|" + nonceB64 + "|" + hmacSum +} + +func TestOAuthState_NeverTheSame(t *testing.T) { + s := &ProxyServiceServer{ + oidcConfig: ProxyOIDCConfig{ + HMACKey: []byte("test-hmac-key"), + }, + } + + redirectURL := "https://app.example.com/callback" + + // Generate 100 states for the same redirect URL + states := make(map[string]bool) + for i := 0; i < 100; i++ { + state := generateState(s, redirectURL) + + // State must have 3 parts: base64(url)|nonce|hmac + parts := strings.Split(state, "|") + require.Equal(t, 3, len(parts), "state must have 3 parts") + + // State must be unique + require.False(t, states[state], "state %d is a duplicate", i) + states[state] = true + } +} + +func TestValidateState_RejectsOldTwoPartFormat(t *testing.T) { + s := &ProxyServiceServer{ + oidcConfig: ProxyOIDCConfig{ + HMACKey: []byte("test-hmac-key"), + }, + } + + // Old format had only 2 parts: base64(url)|hmac + s.pkceVerifiers.Store("base64url|hmac", pkceEntry{verifier: "test", createdAt: time.Now()}) + + _, _, err := s.ValidateState("base64url|hmac") + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid state format") +} + +func TestValidateState_RejectsInvalidHMAC(t *testing.T) { + s := &ProxyServiceServer{ + oidcConfig: ProxyOIDCConfig{ + HMACKey: []byte("test-hmac-key"), + }, + } + + // Store with tampered HMAC + s.pkceVerifiers.Store("dGVzdA==|nonce|wrong-hmac", pkceEntry{verifier: "test", createdAt: time.Now()}) + + _, _, err := s.ValidateState("dGVzdA==|nonce|wrong-hmac") + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid state signature") +} diff --git a/management/internals/shared/grpc/validate_session_test.go b/management/internals/shared/grpc/validate_session_test.go new file mode 100644 index 000000000..f76d3ada0 --- /dev/null +++ b/management/internals/shared/grpc/validate_session_test.go @@ -0,0 +1,304 @@ +//go:build integration + +package grpc + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "encoding/base64" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type validateSessionTestSetup struct { + proxyService *ProxyServiceServer + store store.Store + cleanup func() +} + +func setupValidateSessionTest(t *testing.T) *validateSessionTestSetup { + t.Helper() + + ctx := context.Background() + testStore, storeCleanup, err := store.NewTestStoreFromSQL(ctx, "../../../server/testdata/auth_callback.sql", t.TempDir()) + require.NoError(t, err) + + proxyManager := &testValidateSessionProxyManager{store: testStore} + usersManager := &testValidateSessionUsersManager{store: testStore} + + proxyService := NewProxyServiceServer(nil, NewOneTimeTokenStore(time.Minute), ProxyOIDCConfig{}, nil, usersManager) + proxyService.SetProxyManager(proxyManager) + + createTestProxies(t, ctx, testStore) + + return &validateSessionTestSetup{ + proxyService: proxyService, + store: testStore, + cleanup: storeCleanup, + } +} + +func createTestProxies(t *testing.T, ctx context.Context, testStore store.Store) { + t.Helper() + + pubKey, privKey := generateSessionKeyPair(t) + + testProxy := &reverseproxy.Service{ + ID: "testProxyId", + AccountID: "testAccountId", + Name: "Test Proxy", + Domain: "test-proxy.example.com", + Enabled: true, + SessionPrivateKey: privKey, + SessionPublicKey: pubKey, + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + }, + }, + } + require.NoError(t, testStore.CreateService(ctx, testProxy)) + + restrictedProxy := &reverseproxy.Service{ + ID: "restrictedProxyId", + AccountID: "testAccountId", + Name: "Restricted Proxy", + Domain: "restricted-proxy.example.com", + Enabled: true, + SessionPrivateKey: privKey, + SessionPublicKey: pubKey, + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + DistributionGroups: []string{"allowedGroupId"}, + }, + }, + } + require.NoError(t, testStore.CreateService(ctx, restrictedProxy)) +} + +func generateSessionKeyPair(t *testing.T) (string, string) { + t.Helper() + pub, priv, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + return base64.StdEncoding.EncodeToString(pub), base64.StdEncoding.EncodeToString(priv) +} + +func createSessionToken(t *testing.T, privKeyB64, userID, domain string) string { + t.Helper() + token, err := sessionkey.SignToken(privKeyB64, userID, domain, auth.MethodOIDC, time.Hour) + require.NoError(t, err) + return token +} + +func TestValidateSession_UserAllowed(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + proxy, err := setup.store.GetServiceByID(context.Background(), store.LockingStrengthNone, "testAccountId", "testProxyId") + require.NoError(t, err) + + token := createSessionToken(t, proxy.SessionPrivateKey, "allowedUserId", "test-proxy.example.com") + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + Domain: "test-proxy.example.com", + SessionToken: token, + }) + + require.NoError(t, err) + assert.True(t, resp.Valid, "User should be allowed access") + assert.Equal(t, "allowedUserId", resp.UserId) + assert.Empty(t, resp.DeniedReason) +} + +func TestValidateSession_UserNotInAllowedGroup(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + proxy, err := setup.store.GetServiceByID(context.Background(), store.LockingStrengthNone, "testAccountId", "restrictedProxyId") + require.NoError(t, err) + + token := createSessionToken(t, proxy.SessionPrivateKey, "nonGroupUserId", "restricted-proxy.example.com") + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + Domain: "restricted-proxy.example.com", + SessionToken: token, + }) + + require.NoError(t, err) + assert.False(t, resp.Valid, "User not in group should be denied") + assert.Equal(t, "not_in_group", resp.DeniedReason) + assert.Equal(t, "nonGroupUserId", resp.UserId) +} + +func TestValidateSession_UserInDifferentAccount(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + proxy, err := setup.store.GetServiceByID(context.Background(), store.LockingStrengthNone, "testAccountId", "testProxyId") + require.NoError(t, err) + + token := createSessionToken(t, proxy.SessionPrivateKey, "otherAccountUserId", "test-proxy.example.com") + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + Domain: "test-proxy.example.com", + SessionToken: token, + }) + + require.NoError(t, err) + assert.False(t, resp.Valid, "User in different account should be denied") + assert.Equal(t, "account_mismatch", resp.DeniedReason) +} + +func TestValidateSession_UserNotFound(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + proxy, err := setup.store.GetServiceByID(context.Background(), store.LockingStrengthNone, "testAccountId", "testProxyId") + require.NoError(t, err) + + token := createSessionToken(t, proxy.SessionPrivateKey, "nonExistentUserId", "test-proxy.example.com") + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + Domain: "test-proxy.example.com", + SessionToken: token, + }) + + require.NoError(t, err) + assert.False(t, resp.Valid, "Non-existent user should be denied") + assert.Equal(t, "user_not_found", resp.DeniedReason) +} + +func TestValidateSession_ProxyNotFound(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + proxy, err := setup.store.GetServiceByID(context.Background(), store.LockingStrengthNone, "testAccountId", "testProxyId") + require.NoError(t, err) + + token := createSessionToken(t, proxy.SessionPrivateKey, "allowedUserId", "unknown-proxy.example.com") + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + Domain: "unknown-proxy.example.com", + SessionToken: token, + }) + + require.NoError(t, err) + assert.False(t, resp.Valid, "Unknown proxy should be denied") + assert.Equal(t, "proxy_not_found", resp.DeniedReason) +} + +func TestValidateSession_InvalidToken(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + Domain: "test-proxy.example.com", + SessionToken: "invalid-token", + }) + + require.NoError(t, err) + assert.False(t, resp.Valid, "Invalid token should be denied") + assert.Equal(t, "invalid_token", resp.DeniedReason) +} + +func TestValidateSession_MissingDomain(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + SessionToken: "some-token", + }) + + require.NoError(t, err) + assert.False(t, resp.Valid) + assert.Contains(t, resp.DeniedReason, "missing") +} + +func TestValidateSession_MissingToken(t *testing.T) { + setup := setupValidateSessionTest(t) + defer setup.cleanup() + + resp, err := setup.proxyService.ValidateSession(context.Background(), &proto.ValidateSessionRequest{ + Domain: "test-proxy.example.com", + }) + + require.NoError(t, err) + assert.False(t, resp.Valid) + assert.Contains(t, resp.DeniedReason, "missing") +} + +type testValidateSessionProxyManager struct { + store store.Store +} + +func (m *testValidateSessionProxyManager) GetAllServices(_ context.Context, _, _ string) ([]*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testValidateSessionProxyManager) GetService(_ context.Context, _, _, _ string) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testValidateSessionProxyManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testValidateSessionProxyManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testValidateSessionProxyManager) DeleteService(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) SetCertificateIssuedAt(_ context.Context, _, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) SetStatus(_ context.Context, _, _ string, _ reverseproxy.ProxyStatus) error { + return nil +} + +func (m *testValidateSessionProxyManager) ReloadAllServicesForAccount(_ context.Context, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) ReloadService(_ context.Context, _, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { + return m.store.GetServices(ctx, store.LockingStrengthNone) +} + +func (m *testValidateSessionProxyManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*reverseproxy.Service, error) { + return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, proxyID) +} + +func (m *testValidateSessionProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { + return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) +} + +func (m *testValidateSessionProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) { + return "", nil +} + +type testValidateSessionUsersManager struct { + store store.Store +} + +func (m *testValidateSessionUsersManager) GetUser(ctx context.Context, userID string) (*types.User, error) { + return m.store.GetUserByUserID(ctx, store.LockingStrengthNone, userID) +} diff --git a/management/server/account.go b/management/server/account.go index a9f59773a..7b858c223 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -15,6 +15,7 @@ import ( "sync" "time" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/shared/auth" @@ -82,8 +83,9 @@ type DefaultAccountManager struct { requestBuffer *AccountRequestBuffer - proxyController port_forwarding.Controller - settingsManager settings.Manager + proxyController port_forwarding.Controller + settingsManager settings.Manager + reverseProxyManager reverseproxy.Manager // config contains the management server configuration config *nbconfig.Config @@ -113,6 +115,10 @@ type DefaultAccountManager struct { var _ account.Manager = (*DefaultAccountManager)(nil) +func (am *DefaultAccountManager) SetServiceManager(serviceManager reverseproxy.Manager) { + am.reverseProxyManager = serviceManager +} + func isUniqueConstraintError(err error) bool { switch { case strings.Contains(err.Error(), "(SQLSTATE 23505)"), @@ -321,6 +327,9 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco if err = am.reallocateAccountPeerIPs(ctx, transaction, accountID, newSettings.NetworkRange); err != nil { return err } + if err = am.reverseProxyManager.ReloadAllServicesForAccount(ctx, accountID); err != nil { + log.WithContext(ctx).Warnf("failed to reload all services for account %s: %v", accountID, err) + } updateAccountPeers = true } diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 1d25b0af7..207ab71d6 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -6,6 +6,7 @@ import ( "net/netip" "time" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/shared/auth" nbdns "github.com/netbirdio/netbird/dns" @@ -139,4 +140,5 @@ type Manager interface { CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) + SetServiceManager(serviceManager reverseproxy.Manager) } diff --git a/management/server/account_test.go b/management/server/account_test.go index 443e6344e..44bb0fb1c 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -27,6 +27,8 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" "github.com/netbirdio/netbird/management/internals/modules/peers" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/server/config" nbAccount "github.com/netbirdio/netbird/management/server/account" @@ -1800,6 +1802,14 @@ func TestAccount_Copy(t *testing.T) { Address: "172.12.6.1/24", }, }, + Services: []*reverseproxy.Service{ + { + ID: "service1", + Name: "test-service", + AccountID: "account1", + Targets: []*reverseproxy.Target{}, + }, + }, NetworkMapCache: &types.NetworkMapBuilder{}, } account.InitOnce() @@ -3112,6 +3122,8 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU return nil, nil, err } + manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, nil, nil)) + return manager, updateManager, nil } diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index e83eeb90a..e1b7e5300 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -204,6 +204,10 @@ const ( UserInviteLinkRegenerated Activity = 106 UserInviteLinkDeleted Activity = 107 + ServiceCreated Activity = 108 + ServiceUpdated Activity = 109 + ServiceDeleted Activity = 110 + AccountDeleted Activity = 99999 ) @@ -337,6 +341,10 @@ var activityMap = map[Activity]Code{ UserInviteLinkAccepted: {"User invite link accepted", "user.invite.link.accept"}, UserInviteLinkRegenerated: {"User invite link regenerated", "user.invite.link.regenerate"}, UserInviteLinkDeleted: {"User invite link deleted", "user.invite.link.delete"}, + + ServiceCreated: {"Service created", "service.create"}, + ServiceUpdated: {"Service updated", "service.update"}, + ServiceDeleted: {"Service deleted", "service.delete"}, } // StringCode returns a string code of the activity diff --git a/management/server/group_test.go b/management/server/group_test.go index f7cc8d60c..dba917dbb 100644 --- a/management/server/group_test.go +++ b/management/server/group_test.go @@ -703,7 +703,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { t.Run("saving group linked to network router", func(t *testing.T) { permissionsManager := permissions.NewManager(manager.Store) groupsManager := groups.NewManager(manager.Store, permissionsManager, manager) - resourcesManager := resources.NewManager(manager.Store, permissionsManager, groupsManager, manager) + resourcesManager := resources.NewManager(manager.Store, permissionsManager, groupsManager, manager, manager.reverseProxyManager) routersManager := routers.NewManager(manager.Store, permissionsManager, manager) networksManager := networks.NewManager(manager.Store, permissionsManager, resourcesManager, routersManager, manager) diff --git a/management/server/http/handler.go b/management/server/http/handler.go index 17355d1d9..9d2384cae 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/netip" "os" "strconv" "time" @@ -12,9 +13,19 @@ import ( "github.com/rs/cors" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" + + "github.com/netbirdio/netbird/management/server/types" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" + + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" idpmanager "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/management-integrations/integrations" + "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/modules/zones" zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" @@ -26,6 +37,8 @@ import ( "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/http/handlers/proxy" + nbpeers "github.com/netbirdio/netbird/management/internals/modules/peers" "github.com/netbirdio/netbird/management/server/auth" "github.com/netbirdio/netbird/management/server/geolocation" @@ -60,7 +73,7 @@ const ( ) // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. -func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager) (http.Handler, error) { +func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, reverseProxyManager reverseproxy.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix) (http.Handler, error) { // Register bypass paths for unauthenticated endpoints if err := bypass.AddBypassPath("/api/instance"); err != nil { @@ -76,6 +89,10 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks if err := bypass.AddBypassPath("/api/users/invites/nbi_*/accept"); err != nil { return nil, fmt.Errorf("failed to add bypass path: %w", err) } + // OAuth callback for proxy authentication + if err := bypass.AddBypassPath(types.ProxyCallbackEndpointFull); err != nil { + return nil, fmt.Errorf("failed to add bypass path: %w", err) + } var rateLimitingConfig *middleware.RateLimiterConfig if os.Getenv(rateLimitingEnabledKey) == "true" { @@ -156,6 +173,15 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks idp.AddEndpoints(accountManager, router) instance.AddEndpoints(instanceManager, router) instance.AddVersionEndpoint(instanceManager, router) + if reverseProxyManager != nil && reverseProxyDomainManager != nil { + reverseproxymanager.RegisterEndpoints(reverseProxyManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, router) + } + + // Register OAuth callback handler for proxy authentication + if proxyGRPCServer != nil { + oauthHandler := proxy.NewAuthCallbackHandler(proxyGRPCServer, trustedHTTPProxies) + oauthHandler.RegisterEndpoints(router) + } // Mount embedded IdP handler at /oauth2 path if configured if embeddedIdpEnabled { diff --git a/management/server/http/handlers/peers/peers_handler.go b/management/server/http/handlers/peers/peers_handler.go index 0bee7cbab..6b9a69f04 100644 --- a/management/server/http/handlers/peers/peers_handler.go +++ b/management/server/http/handlers/peers/peers_handler.go @@ -154,6 +154,11 @@ func (h *Handler) getPeer(ctx context.Context, accountID, peerID, userID string, return } + if peer.ProxyMeta.Embedded { + util.WriteError(ctx, status.Errorf(status.InvalidArgument, "not allowed to read peer"), w) + return + } + settings, err := h.accountManager.GetAccountSettings(ctx, accountID, activity.SystemInitiator) if err != nil { util.WriteError(ctx, err, w) @@ -321,6 +326,9 @@ func (h *Handler) GetAllPeers(w http.ResponseWriter, r *http.Request) { grpsInfoMap := groups.ToGroupsInfoMap(grps, len(peers)) respBody := make([]*api.PeerBatch, 0, len(peers)) for _, peer := range peers { + if peer.ProxyMeta.Embedded { + continue + } respBody = append(respBody, toPeerListItemResponse(peer, grpsInfoMap[peer.ID], dnsDomain, 0)) } diff --git a/management/server/http/handlers/proxy/auth.go b/management/server/http/handlers/proxy/auth.go new file mode 100644 index 000000000..0120fad0e --- /dev/null +++ b/management/server/http/handlers/proxy/auth.go @@ -0,0 +1,208 @@ +package proxy + +import ( + "context" + "net" + "net/http" + "net/netip" + "net/url" + "strings" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" + "golang.org/x/oauth2" + + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/http/middleware" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/proxy/auth" +) + +// AuthCallbackHandler handles OAuth callbacks for proxy authentication. +type AuthCallbackHandler struct { + proxyService *nbgrpc.ProxyServiceServer + rateLimiter *middleware.APIRateLimiter + trustedProxies []netip.Prefix +} + +// NewAuthCallbackHandler creates a new OAuth callback handler. +func NewAuthCallbackHandler(proxyService *nbgrpc.ProxyServiceServer, trustedProxies []netip.Prefix) *AuthCallbackHandler { + rateLimiterConfig := &middleware.RateLimiterConfig{ + RequestsPerMinute: 10, + Burst: 15, + CleanupInterval: 5 * time.Minute, + LimiterTTL: 10 * time.Minute, + } + + return &AuthCallbackHandler{ + proxyService: proxyService, + rateLimiter: middleware.NewAPIRateLimiter(rateLimiterConfig), + trustedProxies: trustedProxies, + } +} + +// RegisterEndpoints registers the OAuth callback endpoint. +func (h *AuthCallbackHandler) RegisterEndpoints(router *mux.Router) { + router.HandleFunc(types.ProxyCallbackEndpoint, h.handleCallback).Methods(http.MethodGet) +} + +func (h *AuthCallbackHandler) handleCallback(w http.ResponseWriter, r *http.Request) { + clientIP := h.resolveClientIP(r) + if !h.rateLimiter.Allow(clientIP) { + log.WithField("client_ip", clientIP).Warn("OAuth callback rate limit exceeded") + http.Error(w, "Too many requests. Please try again later.", http.StatusTooManyRequests) + return + } + + state := r.URL.Query().Get("state") + + codeVerifier, originalURL, err := h.proxyService.ValidateState(state) + if err != nil { + log.WithError(err).Error("OAuth callback state validation failed") + http.Error(w, "Invalid state parameter", http.StatusBadRequest) + return + } + + redirectURL, err := url.Parse(originalURL) + if err != nil { + log.WithError(err).Error("Failed to parse redirect URL") + http.Error(w, "Invalid redirect URL", http.StatusBadRequest) + return + } + + oidcConfig := h.proxyService.GetOIDCConfig() + + provider, err := oidc.NewProvider(r.Context(), oidcConfig.Issuer) + if err != nil { + log.WithError(err).Error("Failed to create OIDC provider") + http.Error(w, "Failed to create OIDC provider", http.StatusInternalServerError) + return + } + + token, err := (&oauth2.Config{ + ClientID: oidcConfig.ClientID, + Endpoint: provider.Endpoint(), + RedirectURL: oidcConfig.CallbackURL, + }).Exchange(r.Context(), r.URL.Query().Get("code"), oauth2.VerifierOption(codeVerifier)) + if err != nil { + log.WithError(err).Error("Failed to exchange code for token") + http.Error(w, "Failed to exchange code for token", http.StatusInternalServerError) + return + } + + userID := extractUserIDFromToken(r.Context(), provider, oidcConfig, token) + if userID == "" { + log.Error("Failed to extract user ID from OIDC token") + http.Error(w, "Failed to validate token", http.StatusUnauthorized) + return + } + + // Group validation is performed by the proxy via ValidateSession gRPC call. + // This allows the proxy to show 403 pages directly without redirect dance. + + sessionToken, err := h.proxyService.GenerateSessionToken(r.Context(), redirectURL.Hostname(), userID, auth.MethodOIDC) + if err != nil { + log.WithError(err).Error("Failed to create session token") + redirectURL.Scheme = "https" + query := redirectURL.Query() + query.Set("error", "access_denied") + query.Set("error_description", "Service configuration error") + redirectURL.RawQuery = query.Encode() + http.Redirect(w, r, redirectURL.String(), http.StatusFound) + return + } + + redirectURL.Scheme = "https" + + query := redirectURL.Query() + query.Set("session_token", sessionToken) + redirectURL.RawQuery = query.Encode() + + log.WithField("redirect", redirectURL.Host).Debug("OAuth callback: redirecting user with session token") + http.Redirect(w, r, redirectURL.String(), http.StatusFound) +} + +func extractUserIDFromToken(ctx context.Context, provider *oidc.Provider, config nbgrpc.ProxyOIDCConfig, token *oauth2.Token) string { + rawIDToken, ok := token.Extra("id_token").(string) + if !ok { + log.Warn("No id_token in OIDC response") + return "" + } + + verifier := provider.Verifier(&oidc.Config{ + ClientID: config.ClientID, + }) + + idToken, err := verifier.Verify(ctx, rawIDToken) + if err != nil { + log.WithError(err).Warn("Failed to verify ID token") + return "" + } + + var claims struct { + Subject string `json:"sub"` + } + if err := idToken.Claims(&claims); err != nil { + log.WithError(err).Warn("Failed to extract claims from ID token") + return "" + } + + return claims.Subject +} + +// resolveClientIP extracts the real client IP from the request. +// When trustedProxies is non-empty and the direct peer is trusted, +// it walks X-Forwarded-For right-to-left skipping trusted IPs. +// Otherwise it returns RemoteAddr directly. +func (h *AuthCallbackHandler) resolveClientIP(r *http.Request) string { + remoteIP := extractHost(r.RemoteAddr) + + if len(h.trustedProxies) == 0 || !isTrustedProxy(remoteIP, h.trustedProxies) { + return remoteIP + } + + xff := r.Header.Get("X-Forwarded-For") + if xff == "" { + return remoteIP + } + + parts := strings.Split(xff, ",") + for i := len(parts) - 1; i >= 0; i-- { + ip := strings.TrimSpace(parts[i]) + if ip == "" { + continue + } + if !isTrustedProxy(ip, h.trustedProxies) { + return ip + } + } + + // All IPs in XFF are trusted; return the leftmost as best guess. + if first := strings.TrimSpace(parts[0]); first != "" { + return first + } + return remoteIP +} + +func extractHost(remoteAddr string) string { + host, _, err := net.SplitHostPort(remoteAddr) + if err != nil { + return remoteAddr + } + return host +} + +func isTrustedProxy(ipStr string, trusted []netip.Prefix) bool { + addr, err := netip.ParseAddr(ipStr) + if err != nil { + return false + } + for _, prefix := range trusted { + if prefix.Contains(addr) { + return true + } + } + return false +} diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go new file mode 100644 index 000000000..0a9a560cd --- /dev/null +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -0,0 +1,523 @@ +//go:build integration + +package proxy + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "encoding/base64" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/gorilla/mux" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/management/server/users" + "github.com/netbirdio/netbird/shared/management/proto" +) + +// fakeOIDCServer creates a minimal OIDC provider for testing. +type fakeOIDCServer struct { + server *httptest.Server + issuer string + signingKey ed25519.PrivateKey + publicKey ed25519.PublicKey + keyID string + tokenSubject string + tokenExpiry time.Duration + failExchange bool +} + +func newFakeOIDCServer() *fakeOIDCServer { + pub, priv, _ := ed25519.GenerateKey(rand.Reader) + f := &fakeOIDCServer{ + signingKey: priv, + publicKey: pub, + keyID: "test-key-1", + tokenExpiry: time.Hour, + } + f.server = httptest.NewServer(f) + f.issuer = f.server.URL + return f +} + +func (f *fakeOIDCServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/.well-known/openid-configuration": + f.handleDiscovery(w, r) + case "/token": + f.handleToken(w, r) + case "/keys": + f.handleJWKS(w, r) + default: + http.NotFound(w, r) + } +} + +func (f *fakeOIDCServer) handleDiscovery(w http.ResponseWriter, _ *http.Request) { + discovery := map[string]interface{}{ + "issuer": f.issuer, + "authorization_endpoint": f.issuer + "/auth", + "token_endpoint": f.issuer + "/token", + "jwks_uri": f.issuer + "/keys", + "response_types_supported": []string{ + "code", + "id_token", + "token id_token", + }, + "subject_types_supported": []string{"public"}, + "id_token_signing_alg_values_supported": []string{"EdDSA"}, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(discovery) +} + +func (f *fakeOIDCServer) handleToken(w http.ResponseWriter, r *http.Request) { + if f.failExchange { + http.Error(w, "invalid_grant", http.StatusBadRequest) + return + } + + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + idToken := f.createIDToken() + + response := map[string]interface{}{ + "access_token": "test-access-token", + "token_type": "Bearer", + "expires_in": 3600, + "id_token": idToken, + "refresh_token": "test-refresh-token", + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) +} + +func (f *fakeOIDCServer) createIDToken() string { + now := time.Now() + claims := jwt.MapClaims{ + "iss": f.issuer, + "sub": f.tokenSubject, + "aud": "test-client-id", + "exp": now.Add(f.tokenExpiry).Unix(), + "iat": now.Unix(), + "nbf": now.Unix(), + } + + token := jwt.NewWithClaims(jwt.SigningMethodEdDSA, claims) + token.Header["kid"] = f.keyID + signed, _ := token.SignedString(f.signingKey) + return signed +} + +func (f *fakeOIDCServer) handleJWKS(w http.ResponseWriter, _ *http.Request) { + jwks := map[string]interface{}{ + "keys": []map[string]interface{}{ + { + "kty": "OKP", + "crv": "Ed25519", + "kid": f.keyID, + "x": base64.RawURLEncoding.EncodeToString(f.publicKey), + "use": "sig", + }, + }, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(jwks) +} + +func (f *fakeOIDCServer) Close() { + f.server.Close() +} + +// testSetup contains all test dependencies. +type testSetup struct { + store store.Store + oidcServer *fakeOIDCServer + proxyService *nbgrpc.ProxyServiceServer + handler *AuthCallbackHandler + router *mux.Router + cleanup func() +} + +// testAccessLogManager is a minimal mock for accesslogs.Manager. +type testAccessLogManager struct{} + +func (m *testAccessLogManager) SaveAccessLog(_ context.Context, _ *accesslogs.AccessLogEntry) error { + return nil +} + +func (m *testAccessLogManager) GetAllAccessLogs(_ context.Context, _, _ string, _ *accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) { + return nil, 0, nil +} + +func setupAuthCallbackTest(t *testing.T) *testSetup { + t.Helper() + + ctx := context.Background() + + testStore, cleanup, err := store.NewTestStoreFromSQL(ctx, "", t.TempDir()) + require.NoError(t, err) + + createTestAccountsAndUsers(t, ctx, testStore) + createTestReverseProxies(t, ctx, testStore) + + oidcServer := newFakeOIDCServer() + + tokenStore := nbgrpc.NewOneTimeTokenStore(time.Minute) + + usersManager := users.NewManager(testStore) + + oidcConfig := nbgrpc.ProxyOIDCConfig{ + Issuer: oidcServer.issuer, + ClientID: "test-client-id", + Scopes: []string{"openid", "profile", "email"}, + CallbackURL: "https://management.example.com/reverse-proxy/callback", + HMACKey: []byte("test-hmac-key-for-state-signing"), + } + + proxyService := nbgrpc.NewProxyServiceServer( + &testAccessLogManager{}, + tokenStore, + oidcConfig, + nil, + usersManager, + ) + + proxyService.SetProxyManager(&testServiceManager{store: testStore}) + + handler := NewAuthCallbackHandler(proxyService, nil) + + router := mux.NewRouter() + handler.RegisterEndpoints(router) + + return &testSetup{ + store: testStore, + oidcServer: oidcServer, + proxyService: proxyService, + handler: handler, + router: router, + cleanup: func() { + cleanup() + oidcServer.Close() + }, + } +} + +func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store.Store) { + t.Helper() + + pub, priv, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + + pubKey := base64.StdEncoding.EncodeToString(pub) + privKey := base64.StdEncoding.EncodeToString(priv) + + testProxy := &reverseproxy.Service{ + ID: "testProxyId", + AccountID: "testAccountId", + Name: "Test Proxy", + Domain: "test-proxy.example.com", + Targets: []*reverseproxy.Target{{ + Path: strPtr("/"), + Host: "localhost", + Port: 8080, + Protocol: "http", + TargetId: "peer1", + TargetType: "peer", + Enabled: true, + }}, + Enabled: true, + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + DistributionGroups: []string{"allowedGroupId"}, + }, + }, + SessionPrivateKey: privKey, + SessionPublicKey: pubKey, + } + require.NoError(t, testStore.CreateService(ctx, testProxy)) + + restrictedProxy := &reverseproxy.Service{ + ID: "restrictedProxyId", + AccountID: "testAccountId", + Name: "Restricted Proxy", + Domain: "restricted-proxy.example.com", + Targets: []*reverseproxy.Target{{ + Path: strPtr("/"), + Host: "localhost", + Port: 8080, + Protocol: "http", + TargetId: "peer1", + TargetType: "peer", + Enabled: true, + }}, + Enabled: true, + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: true, + DistributionGroups: []string{"restrictedGroupId"}, + }, + }, + SessionPrivateKey: privKey, + SessionPublicKey: pubKey, + } + require.NoError(t, testStore.CreateService(ctx, restrictedProxy)) + + noAuthProxy := &reverseproxy.Service{ + ID: "noAuthProxyId", + AccountID: "testAccountId", + Name: "No Auth Proxy", + Domain: "no-auth-proxy.example.com", + Targets: []*reverseproxy.Target{{ + Path: strPtr("/"), + Host: "localhost", + Port: 8080, + Protocol: "http", + TargetId: "peer1", + TargetType: "peer", + Enabled: true, + }}, + Enabled: true, + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{ + Enabled: false, + }, + }, + SessionPrivateKey: privKey, + SessionPublicKey: pubKey, + } + require.NoError(t, testStore.CreateService(ctx, noAuthProxy)) +} + +func strPtr(s string) *string { + return &s +} + +func createTestAccountsAndUsers(t *testing.T, ctx context.Context, testStore store.Store) { + t.Helper() + + testAccount := &types.Account{ + Id: "testAccountId", + Domain: "test.com", + DomainCategory: "private", + IsDomainPrimaryAccount: true, + CreatedAt: time.Now(), + } + require.NoError(t, testStore.SaveAccount(ctx, testAccount)) + + allowedGroup := &types.Group{ + ID: "allowedGroupId", + AccountID: "testAccountId", + Name: "Allowed Group", + Issued: "api", + } + require.NoError(t, testStore.CreateGroup(ctx, allowedGroup)) + + allowedUser := &types.User{ + Id: "allowedUserId", + AccountID: "testAccountId", + Role: types.UserRoleUser, + AutoGroups: []string{"allowedGroupId"}, + CreatedAt: time.Now(), + Issued: "api", + } + require.NoError(t, testStore.SaveUser(ctx, allowedUser)) +} + +// testServiceManager is a minimal implementation for testing. +type testServiceManager struct { + store store.Store +} + +func (m *testServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testServiceManager) GetService(_ context.Context, _, _, _ string) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testServiceManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testServiceManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testServiceManager) DeleteService(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *testServiceManager) SetCertificateIssuedAt(_ context.Context, _, _ string) error { + return nil +} + +func (m *testServiceManager) SetStatus(_ context.Context, _, _ string, _ reverseproxy.ProxyStatus) error { + return nil +} + +func (m *testServiceManager) ReloadAllServicesForAccount(_ context.Context, _ string) error { + return nil +} + +func (m *testServiceManager) ReloadService(_ context.Context, _, _ string) error { + return nil +} + +func (m *testServiceManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { + return m.store.GetServices(ctx, store.LockingStrengthNone) +} + +func (m *testServiceManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*reverseproxy.Service, error) { + return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, proxyID) +} + +func (m *testServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { + return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) +} + +func (m *testServiceManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) { + return "", nil +} + +func createTestState(t *testing.T, ps *nbgrpc.ProxyServiceServer, redirectURL string) string { + t.Helper() + + resp, err := ps.GetOIDCURL(context.Background(), &proto.GetOIDCURLRequest{ + RedirectUrl: redirectURL, + AccountId: "testAccountId", + }) + require.NoError(t, err) + + parsedURL, err := url.Parse(resp.Url) + require.NoError(t, err) + + return parsedURL.Query().Get("state") +} + +func TestAuthCallback_UserAllowedToLogin(t *testing.T) { + setup := setupAuthCallbackTest(t) + defer setup.cleanup() + + setup.oidcServer.tokenSubject = "allowedUserId" + + state := createTestState(t, setup.proxyService, "https://test-proxy.example.com/dashboard") + + req := httptest.NewRequest(http.MethodGet, "/reverse-proxy/callback?code=test-auth-code&state="+url.QueryEscape(state), nil) + rec := httptest.NewRecorder() + + setup.router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusFound, rec.Code) + + location := rec.Header().Get("Location") + require.NotEmpty(t, location) + + parsedLocation, err := url.Parse(location) + require.NoError(t, err) + + require.Equal(t, "test-proxy.example.com", parsedLocation.Host) + require.NotEmpty(t, parsedLocation.Query().Get("session_token"), "Should include session token") + require.Empty(t, parsedLocation.Query().Get("error"), "Should not have error parameter") +} + +func TestAuthCallback_ProxyNotFound(t *testing.T) { + setup := setupAuthCallbackTest(t) + defer setup.cleanup() + + setup.oidcServer.tokenSubject = "allowedUserId" + + state := createTestState(t, setup.proxyService, "https://test-proxy.example.com/") + + require.NoError(t, setup.store.DeleteService(context.Background(), "testAccountId", "testProxyId")) + + req := httptest.NewRequest(http.MethodGet, "/reverse-proxy/callback?code=test-auth-code&state="+url.QueryEscape(state), nil) + rec := httptest.NewRecorder() + + setup.router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusFound, rec.Code) + + location := rec.Header().Get("Location") + parsedLocation, err := url.Parse(location) + require.NoError(t, err) + + require.Equal(t, "access_denied", parsedLocation.Query().Get("error")) +} + +func TestAuthCallback_InvalidToken(t *testing.T) { + setup := setupAuthCallbackTest(t) + defer setup.cleanup() + + setup.oidcServer.failExchange = true + + state := createTestState(t, setup.proxyService, "https://test-proxy.example.com/") + + req := httptest.NewRequest(http.MethodGet, "/reverse-proxy/callback?code=invalid-code&state="+url.QueryEscape(state), nil) + rec := httptest.NewRecorder() + + setup.router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusInternalServerError, rec.Code) + require.Contains(t, rec.Body.String(), "Failed to exchange code") +} + +func TestAuthCallback_ExpiredToken(t *testing.T) { + setup := setupAuthCallbackTest(t) + defer setup.cleanup() + + setup.oidcServer.tokenSubject = "allowedUserId" + setup.oidcServer.tokenExpiry = -time.Hour + + state := createTestState(t, setup.proxyService, "https://test-proxy.example.com/") + + req := httptest.NewRequest(http.MethodGet, "/reverse-proxy/callback?code=test-auth-code&state="+url.QueryEscape(state), nil) + rec := httptest.NewRecorder() + + setup.router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) + require.Contains(t, rec.Body.String(), "Failed to validate token") +} + +func TestAuthCallback_InvalidState(t *testing.T) { + setup := setupAuthCallbackTest(t) + defer setup.cleanup() + + req := httptest.NewRequest(http.MethodGet, "/reverse-proxy/callback?code=test-auth-code&state=invalid-state", nil) + rec := httptest.NewRecorder() + + setup.router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) + require.Contains(t, rec.Body.String(), "Invalid state") +} + +func TestAuthCallback_MissingState(t *testing.T) { + setup := setupAuthCallbackTest(t) + defer setup.cleanup() + + req := httptest.NewRequest(http.MethodGet, "/reverse-proxy/callback?code=test-auth-code", nil) + rec := httptest.NewRecorder() + + setup.router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} diff --git a/management/server/http/handlers/proxy/auth_test.go b/management/server/http/handlers/proxy/auth_test.go new file mode 100644 index 000000000..360405474 --- /dev/null +++ b/management/server/http/handlers/proxy/auth_test.go @@ -0,0 +1,185 @@ +package proxy + +import ( + "net/http" + "net/http/httptest" + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" +) + +func TestAuthCallbackHandler_RateLimiting(t *testing.T) { + handler := NewAuthCallbackHandler(&nbgrpc.ProxyServiceServer{}, nil) + require.NotNil(t, handler.rateLimiter, "Rate limiter should be initialized") + + req := httptest.NewRequest(http.MethodGet, "/callback?state=test&code=test", nil) + req.RemoteAddr = "192.168.1.100:12345" + + t.Run("allows requests under limit", func(t *testing.T) { + for i := 0; i < 15; i++ { + allowed := handler.rateLimiter.Allow("192.168.1.100") + assert.True(t, allowed, "Request %d should be allowed", i+1) + } + }) + + t.Run("blocks requests over limit", func(t *testing.T) { + handler.rateLimiter.Reset("192.168.1.200") + + for i := 0; i < 15; i++ { + handler.rateLimiter.Allow("192.168.1.200") + } + + allowed := handler.rateLimiter.Allow("192.168.1.200") + assert.False(t, allowed, "Request over limit should be blocked") + }) + + t.Run("different IPs have separate limits", func(t *testing.T) { + ip1 := "192.168.1.201" + ip2 := "192.168.1.202" + + handler.rateLimiter.Reset(ip1) + handler.rateLimiter.Reset(ip2) + + for i := 0; i < 15; i++ { + handler.rateLimiter.Allow(ip1) + } + + assert.False(t, handler.rateLimiter.Allow(ip1), "IP1 should be blocked") + + assert.True(t, handler.rateLimiter.Allow(ip2), "IP2 should be allowed") + }) +} + +func TestAuthCallbackHandler_RateLimitInHandleCallback(t *testing.T) { + handler := NewAuthCallbackHandler(&nbgrpc.ProxyServiceServer{}, nil) + testIP := "10.0.0.50" + + handler.rateLimiter.Reset(testIP) + + t.Run("returns 429 when rate limited", func(t *testing.T) { + for i := 0; i < 15; i++ { + handler.rateLimiter.Allow(testIP) + } + + req := httptest.NewRequest(http.MethodGet, "/callback?state=test&code=test", nil) + req.RemoteAddr = testIP + ":12345" + + rr := httptest.NewRecorder() + handler.handleCallback(rr, req) + + assert.Equal(t, http.StatusTooManyRequests, rr.Code, "Should return 429 status code") + assert.Contains(t, rr.Body.String(), "Too many requests", "Should contain rate limit message") + }) +} + +func TestResolveClientIP(t *testing.T) { + trusted := []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("172.16.0.0/12"), + } + + tests := []struct { + name string + remoteAddr string + xForwardedFor string + trustedProxy []netip.Prefix + expectedIP string + }{ + { + name: "no trusted proxies returns RemoteAddr", + remoteAddr: "203.0.113.50:9999", + xForwardedFor: "1.2.3.4", + trustedProxy: nil, + expectedIP: "203.0.113.50", + }, + { + name: "untrusted RemoteAddr ignores XFF", + remoteAddr: "203.0.113.50:9999", + xForwardedFor: "1.2.3.4, 10.0.0.1", + trustedProxy: trusted, + expectedIP: "203.0.113.50", + }, + { + name: "trusted RemoteAddr with single client in XFF", + remoteAddr: "10.0.0.1:5000", + xForwardedFor: "203.0.113.50", + trustedProxy: trusted, + expectedIP: "203.0.113.50", + }, + { + name: "trusted RemoteAddr walks past trusted entries in XFF", + remoteAddr: "10.0.0.1:5000", + xForwardedFor: "203.0.113.50, 10.0.0.2, 172.16.0.5", + trustedProxy: trusted, + expectedIP: "203.0.113.50", + }, + { + name: "trusted RemoteAddr with empty XFF falls back to RemoteAddr", + remoteAddr: "10.0.0.1:5000", + trustedProxy: trusted, + expectedIP: "10.0.0.1", + }, + { + name: "all XFF IPs trusted returns leftmost", + remoteAddr: "10.0.0.1:5000", + xForwardedFor: "10.0.0.2, 172.16.0.1, 10.0.0.3", + trustedProxy: trusted, + expectedIP: "10.0.0.2", + }, + { + name: "XFF with whitespace", + remoteAddr: "10.0.0.1:5000", + xForwardedFor: " 203.0.113.50 , 10.0.0.2 ", + trustedProxy: trusted, + expectedIP: "203.0.113.50", + }, + { + name: "multi-hop with mixed trust", + remoteAddr: "10.0.0.1:5000", + xForwardedFor: "8.8.8.8, 203.0.113.50, 172.16.0.1", + trustedProxy: trusted, + expectedIP: "203.0.113.50", + }, + { + name: "RemoteAddr without port", + remoteAddr: "192.168.1.100", + expectedIP: "192.168.1.100", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := NewAuthCallbackHandler(&nbgrpc.ProxyServiceServer{}, tt.trustedProxy) + + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = tt.remoteAddr + if tt.xForwardedFor != "" { + req.Header.Set("X-Forwarded-For", tt.xForwardedFor) + } + + ip := handler.resolveClientIP(req) + assert.Equal(t, tt.expectedIP, ip) + }) + } +} + +func TestAuthCallbackHandler_RateLimiterConfiguration(t *testing.T) { + handler := NewAuthCallbackHandler(&nbgrpc.ProxyServiceServer{}, nil) + + require.NotNil(t, handler.rateLimiter, "Rate limiter should be initialized") + + testIP := "192.168.1.250" + handler.rateLimiter.Reset(testIP) + + for i := 0; i < 15; i++ { + allowed := handler.rateLimiter.Allow(testIP) + assert.True(t, allowed, "Should allow request %d within burst limit", i+1) + } + + allowed := handler.rateLimiter.Allow(testIP) + assert.False(t, allowed, "Should block request that exceeds burst limit") +} diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 1fd4c9bad..f5c2aafa6 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -10,6 +10,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/netbirdio/management-integrations/integrations" + accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" + reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" recordsManager "github.com/netbirdio/netbird/management/internals/modules/zones/records/manager" @@ -86,6 +90,14 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee t.Fatalf("Failed to create manager: %v", err) } + accessLogsManager := accesslogsmanager.NewManager(store, permissionsManager, nil) + proxyTokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Minute) + proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager) + domainManager := manager.NewManager(store, proxyServiceServer, permissionsManager) + reverseProxyManager := reverseproxymanager.NewManager(store, am, permissionsManager, proxyServiceServer, domainManager) + proxyServiceServer.SetProxyManager(reverseProxyManager) + am.SetServiceManager(reverseProxyManager) + // @note this is required so that PAT's validate from store, but JWT's are mocked authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false) authManagerMock := &serverauth.MockManager{ @@ -102,7 +114,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, reverseProxyManager, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } diff --git a/management/server/idp/auth0.go b/management/server/idp/auth0.go index 0d4461e89..7d3837190 100644 --- a/management/server/idp/auth0.go +++ b/management/server/idp/auth0.go @@ -135,7 +135,7 @@ func NewAuth0Manager(config Auth0ClientConfig, appMetrics telemetry.AppMetrics) httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 - httpClient := &http.Client{ + httpClient := &http.Client{ Timeout: idpTimeout(), Transport: httpTransport, } diff --git a/management/server/idp/authentik.go b/management/server/idp/authentik.go index 0f30cc63d..ebd79b715 100644 --- a/management/server/idp/authentik.go +++ b/management/server/idp/authentik.go @@ -56,7 +56,7 @@ func NewAuthentikManager(config AuthentikClientConfig, appMetrics telemetry.AppM Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/azure.go b/management/server/idp/azure.go index e098424b5..320ca7a83 100644 --- a/management/server/idp/azure.go +++ b/management/server/idp/azure.go @@ -57,11 +57,11 @@ func NewAzureManager(config AzureClientConfig, appMetrics telemetry.AppMetrics) httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 - httpClient := &http.Client{ + httpClient := &http.Client{ Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index a27050a26..8ab4ce0dc 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -91,6 +91,12 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { cliRedirectURIs = append(cliRedirectURIs, "/device/callback") cliRedirectURIs = append(cliRedirectURIs, c.Issuer+"/device/callback") + // Build dashboard redirect URIs including the OAuth callback for proxy authentication + dashboardRedirectURIs := c.DashboardRedirectURIs + baseURL := strings.TrimSuffix(c.Issuer, "/oauth2") + // todo: resolve import cycle + dashboardRedirectURIs = append(dashboardRedirectURIs, baseURL+"/api/reverse-proxy/callback") + cfg := &dex.YAMLConfig{ Issuer: c.Issuer, Storage: dex.Storage{ @@ -118,7 +124,7 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { ID: staticClientDashboard, Name: "NetBird Dashboard", Public: true, - RedirectURIs: c.DashboardRedirectURIs, + RedirectURIs: dashboardRedirectURIs, }, { ID: staticClientCLI, diff --git a/management/server/idp/google_workspace.go b/management/server/idp/google_workspace.go index 6e417d394..48e4f3000 100644 --- a/management/server/idp/google_workspace.go +++ b/management/server/idp/google_workspace.go @@ -51,7 +51,7 @@ func NewGoogleWorkspaceManager(ctx context.Context, config GoogleWorkspaceClient Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} if config.CustomerID == "" { diff --git a/management/server/idp/keycloak.go b/management/server/idp/keycloak.go index b640f7520..1cf26394f 100644 --- a/management/server/idp/keycloak.go +++ b/management/server/idp/keycloak.go @@ -66,7 +66,7 @@ func NewKeycloakManager(config KeycloakClientConfig, appMetrics telemetry.AppMet Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/pocketid.go b/management/server/idp/pocketid.go index ee8e304ee..fc338b86b 100644 --- a/management/server/idp/pocketid.go +++ b/management/server/idp/pocketid.go @@ -90,7 +90,7 @@ func NewPocketIdManager(config PocketIdClientConfig, appMetrics telemetry.AppMet Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} if config.ManagementEndpoint == "" { diff --git a/management/server/idp/util.go b/management/server/idp/util.go index 4310d1388..ed82fb9e3 100644 --- a/management/server/idp/util.go +++ b/management/server/idp/util.go @@ -76,7 +76,7 @@ const ( // Provides the env variable name for use with idpTimeout function idpTimeoutEnv = "NB_IDP_TIMEOUT" // Sets the defaultTimeout to 10s. - defaultTimeout = 10 * time.Second + defaultTimeout = 10 * time.Second ) // idpTimeout returns a timeout value for the IDP diff --git a/management/server/idp/zitadel.go b/management/server/idp/zitadel.go index ea0fd0aa7..320f0c131 100644 --- a/management/server/idp/zitadel.go +++ b/management/server/idp/zitadel.go @@ -167,7 +167,7 @@ func NewZitadelManager(config ZitadelClientConfig, appMetrics telemetry.AppMetri Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} hasPAT := config.PAT != "" diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 8471d0a94..032b1150f 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -12,6 +12,7 @@ import ( "google.golang.org/grpc/status" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/idp" @@ -147,6 +148,10 @@ type MockAccountManager struct { DeleteUserInviteFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string) error } +func (am *MockAccountManager) SetServiceManager(serviceManager reverseproxy.Manager) { + // Mock implementation - no-op +} + func (am *MockAccountManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { if am.CreatePeerJobFunc != nil { return am.CreatePeerJobFunc(ctx, accountID, peerID, userID, job) diff --git a/management/server/networks/manager_test.go b/management/server/networks/manager_test.go index bf196fcb3..6fb19d157 100644 --- a/management/server/networks/manager_test.go +++ b/management/server/networks/manager_test.go @@ -29,7 +29,7 @@ func Test_GetAllNetworksReturnsNetworks(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) networks, err := manager.GetAllNetworks(ctx, accountID, userID) @@ -52,7 +52,7 @@ func Test_GetAllNetworksReturnsPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) networks, err := manager.GetAllNetworks(ctx, accountID, userID) @@ -75,7 +75,7 @@ func Test_GetNetworkReturnsNetwork(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) networks, err := manager.GetNetwork(ctx, accountID, userID, networkID) @@ -98,7 +98,7 @@ func Test_GetNetworkReturnsPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) network, err := manager.GetNetwork(ctx, accountID, userID, networkID) @@ -123,7 +123,7 @@ func Test_CreateNetworkSuccessfully(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) createdNetwork, err := manager.CreateNetwork(ctx, userID, network) @@ -148,7 +148,7 @@ func Test_CreateNetworkFailsWithPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) createdNetwork, err := manager.CreateNetwork(ctx, userID, network) @@ -171,7 +171,7 @@ func Test_DeleteNetworkSuccessfully(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) err = manager.DeleteNetwork(ctx, accountID, userID, networkID) @@ -193,7 +193,7 @@ func Test_DeleteNetworkFailsWithPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) err = manager.DeleteNetwork(ctx, accountID, userID, networkID) @@ -218,7 +218,7 @@ func Test_UpdateNetworkSuccessfully(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) updatedNetwork, err := manager.UpdateNetwork(ctx, userID, network) @@ -245,7 +245,7 @@ func Test_UpdateNetworkFailsWithPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(s) groupsManager := groups.NewManagerMock() routerManager := routers.NewManagerMock() - resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am) + resourcesManager := resources.NewManager(s, permissionsManager, groupsManager, &am, nil) manager := NewManager(s, permissionsManager, resourcesManager, routerManager, &am) updatedNetwork, err := manager.UpdateNetwork(ctx, userID, network) diff --git a/management/server/networks/resources/manager.go b/management/server/networks/resources/manager.go index 66484d120..843ca93e5 100644 --- a/management/server/networks/resources/manager.go +++ b/management/server/networks/resources/manager.go @@ -5,6 +5,9 @@ import ( "errors" "fmt" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/groups" @@ -30,21 +33,23 @@ type Manager interface { } type managerImpl struct { - store store.Store - permissionsManager permissions.Manager - groupsManager groups.Manager - accountManager account.Manager + store store.Store + permissionsManager permissions.Manager + groupsManager groups.Manager + accountManager account.Manager + reverseProxyManager reverseproxy.Manager } type mockManager struct { } -func NewManager(store store.Store, permissionsManager permissions.Manager, groupsManager groups.Manager, accountManager account.Manager) Manager { +func NewManager(store store.Store, permissionsManager permissions.Manager, groupsManager groups.Manager, accountManager account.Manager, reverseproxyManager reverseproxy.Manager) Manager { return &managerImpl{ - store: store, - permissionsManager: permissionsManager, - groupsManager: groupsManager, - accountManager: accountManager, + store: store, + permissionsManager: permissionsManager, + groupsManager: groupsManager, + accountManager: accountManager, + reverseProxyManager: reverseproxyManager, } } @@ -257,6 +262,14 @@ func (m *managerImpl) UpdateResource(ctx context.Context, userID string, resourc event() } + // TODO: optimize to only reload reverse proxies that are affected by the resource update instead of all of them + go func() { + err := m.reverseProxyManager.ReloadAllServicesForAccount(ctx, resource.AccountID) + if err != nil { + log.WithContext(ctx).Warnf("failed to reload all proxies for account: %v", err) + } + }() + go m.accountManager.UpdateAccountPeers(ctx, resource.AccountID) return resource, nil @@ -309,6 +322,14 @@ func (m *managerImpl) DeleteResource(ctx context.Context, accountID, userID, net return status.NewPermissionDeniedError() } + serviceID, err := m.reverseProxyManager.GetServiceIDByTargetID(ctx, accountID, resourceID) + if err != nil { + return fmt.Errorf("failed to check if resource is used by service: %w", err) + } + if serviceID != "" { + return status.NewResourceInUseError(resourceID, serviceID) + } + var events []func() err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { events, err = m.DeleteResourceInTransaction(ctx, transaction, accountID, userID, networkID, resourceID) diff --git a/management/server/networks/resources/manager_test.go b/management/server/networks/resources/manager_test.go index 29b0af2cc..99de484e5 100644 --- a/management/server/networks/resources/manager_test.go +++ b/management/server/networks/resources/manager_test.go @@ -4,8 +4,10 @@ import ( "context" "testing" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/mock_server" "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -28,7 +30,9 @@ func Test_GetAllResourcesInNetworkReturnsResources(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) resources, err := manager.GetAllResourcesInNetwork(ctx, accountID, userID, networkID) require.NoError(t, err) @@ -49,7 +53,9 @@ func Test_GetAllResourcesInNetworkReturnsPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) resources, err := manager.GetAllResourcesInNetwork(ctx, accountID, userID, networkID) require.Error(t, err) @@ -69,7 +75,9 @@ func Test_GetAllResourcesInAccountReturnsResources(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) resources, err := manager.GetAllResourcesInAccount(ctx, accountID, userID) require.NoError(t, err) @@ -89,7 +97,9 @@ func Test_GetAllResourcesInAccountReturnsPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) resources, err := manager.GetAllResourcesInAccount(ctx, accountID, userID) require.Error(t, err) @@ -112,7 +122,9 @@ func Test_GetResourceInNetworkReturnsResources(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) resource, err := manager.GetResource(ctx, accountID, userID, networkID, resourceID) require.NoError(t, err) @@ -134,7 +146,9 @@ func Test_GetResourceInNetworkReturnsPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) resources, err := manager.GetResource(ctx, accountID, userID, networkID, resourceID) require.Error(t, err) @@ -161,7 +175,10 @@ func Test_CreateResourceSuccessfully(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + reverseProxyManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), resource.AccountID).Return(nil).AnyTimes() + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.NoError(t, err) @@ -187,7 +204,9 @@ func Test_CreateResourceFailsWithPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.Error(t, err) @@ -214,7 +233,9 @@ func Test_CreateResourceFailsWithInvalidAddress(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.Error(t, err) @@ -240,7 +261,9 @@ func Test_CreateResourceFailsWithUsedName(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.Error(t, err) @@ -270,7 +293,10 @@ func Test_UpdateResourceSuccessfully(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + reverseProxyManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), accountID).Return(nil).AnyTimes() + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.NoError(t, err) @@ -302,7 +328,9 @@ func Test_UpdateResourceFailsWithResourceNotFound(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.Error(t, err) @@ -332,7 +360,9 @@ func Test_UpdateResourceFailsWithNameInUse(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.Error(t, err) @@ -361,7 +391,9 @@ func Test_UpdateResourceFailsWithPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.Error(t, err) @@ -383,7 +415,10 @@ func Test_DeleteResourceSuccessfully(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + reverseProxyManager.EXPECT().GetServiceIDByTargetID(gomock.Any(), accountID, resourceID).Return("", nil).AnyTimes() + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) err = manager.DeleteResource(ctx, accountID, userID, networkID, resourceID) require.NoError(t, err) @@ -404,7 +439,9 @@ func Test_DeleteResourceFailsWithPermissionDenied(t *testing.T) { permissionsManager := permissions.NewManager(store) am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() - manager := NewManager(store, permissionsManager, groupsManager, &am) + ctrl := gomock.NewController(t) + reverseProxyManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) err = manager.DeleteResource(ctx, accountID, userID, networkID, resourceID) require.Error(t, err) diff --git a/management/server/peer.go b/management/server/peer.go index a4bdc784d..a2ca97208 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -221,6 +221,10 @@ func (am *DefaultAccountManager) UpdatePeer(ctx context.Context, accountID, user return err } + if peer.ProxyMeta.Embedded { + return fmt.Errorf("not allowed to update peer") + } + settings, err = transaction.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) if err != nil { return err @@ -489,6 +493,14 @@ func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peer var settings *types.Settings var eventsToStore []func() + serviceID, err := am.reverseProxyManager.GetServiceIDByTargetID(ctx, accountID, peerID) + if err != nil { + return fmt.Errorf("failed to check if resource is used by service: %w", err) + } + if serviceID != "" { + return status.NewPeerInUseError(peerID, serviceID) + } + err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { peer, err = transaction.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) if err != nil { @@ -549,6 +561,99 @@ func (am *DefaultAccountManager) GetPeerNetwork(ctx context.Context, peerID stri return account.Network.Copy(), err } +type peerAddAuthConfig struct { + AccountID string + SetupKeyID string + SetupKeyName string + GroupsToAdd []string + AllowExtraDNSLabels bool + Ephemeral bool +} + +func (am *DefaultAccountManager) processPeerAddAuth(ctx context.Context, accountID, userID, encodedHashedKey string, peer *nbpeer.Peer, temporary, addedByUser, addedBySetupKey bool, opEvent *activity.Event) (*peerAddAuthConfig, error) { + config := &peerAddAuthConfig{ + AccountID: accountID, + Ephemeral: peer.Ephemeral, + } + + switch { + case addedByUser: + if err := am.handleUserAddedPeer(ctx, accountID, userID, temporary, opEvent, config); err != nil { + return nil, err + } + case addedBySetupKey: + if err := am.handleSetupKeyAddedPeer(ctx, encodedHashedKey, peer, opEvent, config); err != nil { + return nil, err + } + default: + if peer.ProxyMeta.Embedded { + log.WithContext(ctx).Debugf("adding peer for proxy embedded, accountID: %s", accountID) + } else { + log.WithContext(ctx).Warnf("adding peer without setup key or userID, accountID: %s", accountID) + } + } + + opEvent.AccountID = config.AccountID + if temporary { + config.Ephemeral = true + } + + return config, nil +} + +func (am *DefaultAccountManager) handleUserAddedPeer(ctx context.Context, accountID, userID string, temporary bool, opEvent *activity.Event, config *peerAddAuthConfig) error { + user, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, userID) + if err != nil { + return status.Errorf(status.NotFound, "failed adding new peer: user not found") + } + if user.PendingApproval { + return status.Errorf(status.PermissionDenied, "user pending approval cannot add peers") + } + + if temporary { + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Peers, operations.Create) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !allowed { + return status.NewPermissionDeniedError() + } + } else { + config.AccountID = user.AccountID + config.GroupsToAdd = user.AutoGroups + } + + opEvent.InitiatorID = userID + opEvent.Activity = activity.PeerAddedByUser + return nil +} + +func (am *DefaultAccountManager) handleSetupKeyAddedPeer(ctx context.Context, encodedHashedKey string, peer *nbpeer.Peer, opEvent *activity.Event, config *peerAddAuthConfig) error { + sk, err := am.Store.GetSetupKeyBySecret(ctx, store.LockingStrengthNone, encodedHashedKey) + if err != nil { + return status.Errorf(status.NotFound, "couldn't add peer: setup key is invalid") + } + + if !sk.IsValid() { + return status.Errorf(status.NotFound, "couldn't add peer: setup key is invalid") + } + + if !sk.AllowExtraDNSLabels && len(peer.ExtraDNSLabels) > 0 { + return status.Errorf(status.PreconditionFailed, "couldn't add peer: setup key doesn't allow extra DNS labels") + } + + opEvent.InitiatorID = sk.Id + opEvent.Activity = activity.PeerAddedWithSetupKey + config.GroupsToAdd = sk.AutoGroups + config.Ephemeral = sk.Ephemeral + config.SetupKeyID = sk.Id + config.SetupKeyName = sk.Name + config.AllowExtraDNSLabels = sk.AllowExtraDNSLabels + config.AccountID = sk.AccountID + + return nil +} + // AddPeer adds a new peer to the Store. // Each Account has a list of pre-authorized SetupKey and if no Account has a given key err with a code status.PermissionDenied // will be returned, meaning the setup key is invalid or not found. @@ -557,7 +662,7 @@ func (am *DefaultAccountManager) GetPeerNetwork(ctx context.Context, peerID stri // Each new Peer will be assigned a new next net.IP from the Account.Network and Account.Network.LastIP will be updated (IP's are not reused). // The peer property is just a placeholder for the Peer properties to pass further func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKey, userID string, peer *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) { - if setupKey == "" && userID == "" { + if setupKey == "" && userID == "" && !peer.ProxyMeta.Embedded { // no auth method provided => reject access return nil, nil, nil, status.Errorf(status.Unauthenticated, "no peer auth method provided, please use a setup key or interactive SSO login") } @@ -566,6 +671,7 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe hashedKey := sha256.Sum256([]byte(upperKey)) encodedHashedKey := b64.StdEncoding.EncodeToString(hashedKey[:]) addedByUser := len(userID) > 0 + addedBySetupKey := len(setupKey) > 0 // This is a handling for the case when the same machine (with the same WireGuard pub key) tries to register twice. // Such case is possible when AddPeer function takes long time to finish after AcquireWriteLockByUID (e.g., database is slow) @@ -583,63 +689,12 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe var newPeer *nbpeer.Peer - var setupKeyID string - var setupKeyName string - var ephemeral bool - var groupsToAdd []string - var allowExtraDNSLabels bool - if addedByUser { - user, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, userID) - if err != nil { - return nil, nil, nil, status.Errorf(status.NotFound, "failed adding new peer: user not found") - } - if user.PendingApproval { - return nil, nil, nil, status.Errorf(status.PermissionDenied, "user pending approval cannot add peers") - } - if temporary { - allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Peers, operations.Create) - if err != nil { - return nil, nil, nil, status.NewPermissionValidationError(err) - } - - if !allowed { - return nil, nil, nil, status.NewPermissionDeniedError() - } - } else { - accountID = user.AccountID - groupsToAdd = user.AutoGroups - } - opEvent.InitiatorID = userID - opEvent.Activity = activity.PeerAddedByUser - } else { - // Validate the setup key - sk, err := am.Store.GetSetupKeyBySecret(ctx, store.LockingStrengthNone, encodedHashedKey) - if err != nil { - return nil, nil, nil, status.Errorf(status.NotFound, "couldn't add peer: setup key is invalid") - } - - // we will check key twice for early return - if !sk.IsValid() { - return nil, nil, nil, status.Errorf(status.NotFound, "couldn't add peer: setup key is invalid") - } - - opEvent.InitiatorID = sk.Id - opEvent.Activity = activity.PeerAddedWithSetupKey - groupsToAdd = sk.AutoGroups - ephemeral = sk.Ephemeral - setupKeyID = sk.Id - setupKeyName = sk.Name - allowExtraDNSLabels = sk.AllowExtraDNSLabels - accountID = sk.AccountID - if !sk.AllowExtraDNSLabels && len(peer.ExtraDNSLabels) > 0 { - return nil, nil, nil, status.Errorf(status.PreconditionFailed, "couldn't add peer: setup key doesn't allow extra DNS labels") - } - } - opEvent.AccountID = accountID - - if temporary { - ephemeral = true + peerAddConfig, err := am.processPeerAddAuth(ctx, accountID, userID, encodedHashedKey, peer, temporary, addedByUser, addedBySetupKey, opEvent) + if err != nil { + return nil, nil, nil, err } + accountID = peerAddConfig.AccountID + ephemeral := peerAddConfig.Ephemeral if (strings.ToLower(peer.Meta.Hostname) == "iphone" || strings.ToLower(peer.Meta.Hostname) == "ipad") && userID != "" { if am.idpManager != nil { @@ -669,10 +724,11 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe CreatedAt: registrationTime, LoginExpirationEnabled: addedByUser && !temporary, Ephemeral: ephemeral, + ProxyMeta: peer.ProxyMeta, Location: peer.Location, InactivityExpirationEnabled: addedByUser && !temporary, ExtraDNSLabels: peer.ExtraDNSLabels, - AllowExtraDNSLabels: allowExtraDNSLabels, + AllowExtraDNSLabels: peerAddConfig.AllowExtraDNSLabels, } settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) if err != nil { @@ -690,7 +746,7 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe } } - newPeer = am.integratedPeerValidator.PreparePeer(ctx, accountID, newPeer, groupsToAdd, settings.Extra, temporary) + newPeer = am.integratedPeerValidator.PreparePeer(ctx, accountID, newPeer, peerAddConfig.GroupsToAdd, settings.Extra, temporary) network, err := am.Store.GetAccountNetwork(ctx, store.LockingStrengthNone, accountID) if err != nil { @@ -726,8 +782,8 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe return err } - if len(groupsToAdd) > 0 { - for _, g := range groupsToAdd { + if len(peerAddConfig.GroupsToAdd) > 0 { + for _, g := range peerAddConfig.GroupsToAdd { err = transaction.AddPeerToGroup(ctx, newPeer.AccountID, newPeer.ID, g) if err != nil { return err @@ -735,17 +791,20 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe } } - err = transaction.AddPeerToAllGroup(ctx, accountID, newPeer.ID) - if err != nil { - return fmt.Errorf("failed adding peer to All group: %w", err) + if !peer.ProxyMeta.Embedded { + err = transaction.AddPeerToAllGroup(ctx, accountID, newPeer.ID) + if err != nil { + return fmt.Errorf("failed adding peer to All group: %w", err) + } } - if addedByUser { + switch { + case addedByUser: err := transaction.SaveUserLastLogin(ctx, accountID, userID, newPeer.GetLastLogin()) if err != nil { log.WithContext(ctx).Debugf("failed to update user last login: %v", err) } - } else { + case addedBySetupKey: sk, err := transaction.GetSetupKeyBySecret(ctx, store.LockingStrengthUpdate, encodedHashedKey) if err != nil { return fmt.Errorf("failed to get setup key: %w", err) @@ -756,7 +815,7 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe return status.Errorf(status.PreconditionFailed, "couldn't add peer: setup key is invalid") } - err = transaction.IncrementSetupKeyUsage(ctx, setupKeyID) + err = transaction.IncrementSetupKeyUsage(ctx, peerAddConfig.SetupKeyID) if err != nil { return fmt.Errorf("failed to increment setup key usage: %w", err) } @@ -797,7 +856,7 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe opEvent.TargetID = newPeer.ID opEvent.Meta = newPeer.EventMeta(am.networkMapController.GetDNSDomain(settings)) if !addedByUser { - opEvent.Meta["setup_key_name"] = setupKeyName + opEvent.Meta["setup_key_name"] = peerAddConfig.SetupKeyName } am.StoreEvent(ctx, opEvent.InitiatorID, opEvent.TargetID, opEvent.AccountID, opEvent.Activity, opEvent.Meta) diff --git a/management/server/peer/peer.go b/management/server/peer/peer.go index 2439e8a22..269b30822 100644 --- a/management/server/peer/peer.go +++ b/management/server/peer/peer.go @@ -24,6 +24,8 @@ type Peer struct { IP net.IP `gorm:"serializer:json"` // uniqueness index per accountID (check migrations) // Meta is a Peer system meta data Meta PeerSystemMeta `gorm:"embedded;embeddedPrefix:meta_"` + // ProxyMeta is metadata related to proxy peers + ProxyMeta ProxyMeta `gorm:"embedded;embeddedPrefix:proxy_meta_"` // Name is peer's name (machine name) Name string `gorm:"index"` // DNSLabel is the parsed peer name for domain resolution. It is used to form an FQDN by appending the account's @@ -48,6 +50,7 @@ type Peer struct { CreatedAt time.Time // Indicate ephemeral peer attribute Ephemeral bool `gorm:"index"` + // Geo location based on connection IP Location Location `gorm:"embedded;embeddedPrefix:location_"` @@ -57,6 +60,11 @@ type Peer struct { AllowExtraDNSLabels bool } +type ProxyMeta struct { + Embedded bool `gorm:"index"` + Cluster string `gorm:"index"` +} + type PeerStatus struct { //nolint:revive // LastSeen is the last time peer was connected to the management service LastSeen time.Time @@ -224,6 +232,7 @@ func (p *Peer) Copy() *Peer { LastLogin: p.LastLogin, CreatedAt: p.CreatedAt, Ephemeral: p.Ephemeral, + ProxyMeta: p.ProxyMeta, Location: p.Location, InactivityExpirationEnabled: p.InactivityExpirationEnabled, ExtraDNSLabels: slices.Clone(p.ExtraDNSLabels), diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 3846a3e85..b17757ffd 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -2489,3 +2489,252 @@ func TestLoginPeer_ApprovedUserCanLogin(t *testing.T) { _, _, _, err = manager.LoginPeer(context.Background(), login) require.NoError(t, err, "Regular user should be able to login peers") } + +func TestHandleUserAddedPeer(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err) + + account := newAccountWithId(context.Background(), "test-account", "owner", "", "", "", false) + err = manager.Store.SaveAccount(context.Background(), account) + require.NoError(t, err) + + t.Run("regular user can add peer", func(t *testing.T) { + regularUser := types.NewRegularUser("regular-user-1", "", "") + regularUser.AccountID = account.Id + regularUser.AutoGroups = []string{"group1", "group2"} + err = manager.Store.SaveUser(context.Background(), regularUser) + require.NoError(t, err) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + + err = manager.handleUserAddedPeer(context.Background(), account.Id, regularUser.Id, false, opEvent, config) + require.NoError(t, err) + assert.Equal(t, account.Id, config.AccountID) + assert.Equal(t, regularUser.AutoGroups, config.GroupsToAdd) + assert.Equal(t, regularUser.Id, opEvent.InitiatorID) + assert.Equal(t, activity.PeerAddedByUser, opEvent.Activity) + }) + + t.Run("pending approval user cannot add peer", func(t *testing.T) { + pendingUser := types.NewRegularUser("pending-user", "", "") + pendingUser.AccountID = account.Id + pendingUser.PendingApproval = true + err = manager.Store.SaveUser(context.Background(), pendingUser) + require.NoError(t, err) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + + err = manager.handleUserAddedPeer(context.Background(), account.Id, pendingUser.Id, false, opEvent, config) + require.Error(t, err) + assert.Contains(t, err.Error(), "user pending approval cannot add peers") + }) + + t.Run("user not found", func(t *testing.T) { + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + + err = manager.handleUserAddedPeer(context.Background(), account.Id, "non-existent-user", false, opEvent, config) + require.Error(t, err) + assert.Contains(t, err.Error(), "user not found") + }) + + t.Run("temporary peer requires permissions", func(t *testing.T) { + regularUser := types.NewRegularUser("regular-user-2", "", "") + regularUser.AccountID = account.Id + err = manager.Store.SaveUser(context.Background(), regularUser) + require.NoError(t, err) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + + // Should fail because user doesn't have permissions for temporary peers + err = manager.handleUserAddedPeer(context.Background(), account.Id, regularUser.Id, true, opEvent, config) + require.Error(t, err) + }) +} + +func TestHandleSetupKeyAddedPeer(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err) + + account := newAccountWithId(context.Background(), "test-account", "owner", "", "", "", false) + err = manager.Store.SaveAccount(context.Background(), account) + require.NoError(t, err) + + // Create admin user for setup key creation + adminUser := types.NewAdminUser("admin-user") + adminUser.AccountID = account.Id + err = manager.Store.SaveUser(context.Background(), adminUser) + require.NoError(t, err) + + t.Run("valid setup key", func(t *testing.T) { + setupKey, err := manager.CreateSetupKey(context.Background(), account.Id, "test-key", types.SetupKeyReusable, time.Hour, []string{}, 0, adminUser.Id, false, false) + require.NoError(t, err) + + upperKey := strings.ToUpper(setupKey.Key) + hashedKey := sha256.Sum256([]byte(upperKey)) + encodedHashedKey := b64.StdEncoding.EncodeToString(hashedKey[:]) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + peer := &nbpeer.Peer{ExtraDNSLabels: []string{}} + + err = manager.handleSetupKeyAddedPeer(context.Background(), encodedHashedKey, peer, opEvent, config) + require.NoError(t, err) + assert.Equal(t, setupKey.Id, config.SetupKeyID) + assert.Equal(t, setupKey.Name, config.SetupKeyName) + assert.Equal(t, setupKey.AutoGroups, config.GroupsToAdd) + assert.Equal(t, setupKey.Ephemeral, config.Ephemeral) + assert.Equal(t, setupKey.Id, opEvent.InitiatorID) + assert.Equal(t, activity.PeerAddedWithSetupKey, opEvent.Activity) + }) + + t.Run("invalid setup key", func(t *testing.T) { + invalidKey := "invalid-key" + hashedKey := sha256.Sum256([]byte(invalidKey)) + encodedHashedKey := b64.StdEncoding.EncodeToString(hashedKey[:]) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + peer := &nbpeer.Peer{} + + err = manager.handleSetupKeyAddedPeer(context.Background(), encodedHashedKey, peer, opEvent, config) + require.Error(t, err) + assert.Contains(t, err.Error(), "setup key is invalid") + }) + + t.Run("expired setup key", func(t *testing.T) { + setupKey, err := manager.CreateSetupKey(context.Background(), account.Id, "expired-key", types.SetupKeyReusable, time.Millisecond, []string{}, 0, adminUser.Id, false, false) + require.NoError(t, err) + + // Wait for key to expire + time.Sleep(10 * time.Millisecond) + + upperKey := strings.ToUpper(setupKey.Key) + hashedKey := sha256.Sum256([]byte(upperKey)) + encodedHashedKey := b64.StdEncoding.EncodeToString(hashedKey[:]) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + peer := &nbpeer.Peer{} + + err = manager.handleSetupKeyAddedPeer(context.Background(), encodedHashedKey, peer, opEvent, config) + require.Error(t, err) + assert.Contains(t, err.Error(), "setup key is invalid") + }) + + t.Run("extra DNS labels not allowed", func(t *testing.T) { + setupKey, err := manager.CreateSetupKey(context.Background(), account.Id, "no-dns-key", types.SetupKeyReusable, time.Hour, []string{}, 0, adminUser.Id, false, false) + require.NoError(t, err) + + upperKey := strings.ToUpper(setupKey.Key) + hashedKey := sha256.Sum256([]byte(upperKey)) + encodedHashedKey := b64.StdEncoding.EncodeToString(hashedKey[:]) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + peer := &nbpeer.Peer{ExtraDNSLabels: []string{"custom.label"}} + + err = manager.handleSetupKeyAddedPeer(context.Background(), encodedHashedKey, peer, opEvent, config) + require.Error(t, err) + assert.Contains(t, err.Error(), "doesn't allow extra DNS labels") + }) + + t.Run("extra DNS labels allowed", func(t *testing.T) { + setupKey, err := manager.CreateSetupKey(context.Background(), account.Id, "dns-key", types.SetupKeyReusable, time.Hour, []string{}, 0, adminUser.Id, false, true) + require.NoError(t, err) + + upperKey := strings.ToUpper(setupKey.Key) + hashedKey := sha256.Sum256([]byte(upperKey)) + encodedHashedKey := b64.StdEncoding.EncodeToString(hashedKey[:]) + + opEvent := &activity.Event{} + config := &peerAddAuthConfig{} + peer := &nbpeer.Peer{ExtraDNSLabels: []string{"custom.label"}} + + err = manager.handleSetupKeyAddedPeer(context.Background(), encodedHashedKey, peer, opEvent, config) + require.NoError(t, err) + assert.True(t, config.AllowExtraDNSLabels) + }) +} + +func TestProcessPeerAddAuth(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err) + + account := newAccountWithId(context.Background(), "test-account", "owner", "", "", "", false) + err = manager.Store.SaveAccount(context.Background(), account) + require.NoError(t, err) + + adminUser := types.NewAdminUser("admin") + adminUser.AccountID = account.Id + err = manager.Store.SaveUser(context.Background(), adminUser) + require.NoError(t, err) + + t.Run("user authentication flow", func(t *testing.T) { + regularUser := types.NewRegularUser("user-auth-test", "", "") + regularUser.AccountID = account.Id + regularUser.AutoGroups = []string{"group1"} + err = manager.Store.SaveUser(context.Background(), regularUser) + require.NoError(t, err) + + opEvent := &activity.Event{Timestamp: time.Now()} + peer := &nbpeer.Peer{Ephemeral: false} + + config, err := manager.processPeerAddAuth(context.Background(), account.Id, regularUser.Id, "", peer, false, true, false, opEvent) + require.NoError(t, err) + assert.Equal(t, account.Id, config.AccountID) + assert.False(t, config.Ephemeral) + assert.Equal(t, regularUser.AutoGroups, config.GroupsToAdd) + assert.Equal(t, account.Id, opEvent.AccountID) + }) + + t.Run("setup key authentication flow", func(t *testing.T) { + setupKey, err := manager.CreateSetupKey(context.Background(), account.Id, "auth-test-key", types.SetupKeyReusable, time.Hour, []string{}, 0, adminUser.Id, true, false) + require.NoError(t, err) + + upperKey := strings.ToUpper(setupKey.Key) + hashedKey := sha256.Sum256([]byte(upperKey)) + encodedHashedKey := b64.StdEncoding.EncodeToString(hashedKey[:]) + + opEvent := &activity.Event{Timestamp: time.Now()} + peer := &nbpeer.Peer{Ephemeral: false} + + config, err := manager.processPeerAddAuth(context.Background(), account.Id, "", encodedHashedKey, peer, false, false, true, opEvent) + require.NoError(t, err) + assert.Equal(t, account.Id, config.AccountID) + assert.True(t, config.Ephemeral) // setupKey.Ephemeral is true + assert.Equal(t, setupKey.AutoGroups, config.GroupsToAdd) + assert.Equal(t, account.Id, opEvent.AccountID) + }) + + t.Run("temporary flag overrides ephemeral", func(t *testing.T) { + regularUser := types.NewRegularUser("temp-user", "", "") + regularUser.AccountID = account.Id + err = manager.Store.SaveUser(context.Background(), regularUser) + require.NoError(t, err) + + opEvent := &activity.Event{Timestamp: time.Now()} + peer := &nbpeer.Peer{Ephemeral: false} + + config, err := manager.processPeerAddAuth(context.Background(), account.Id, regularUser.Id, "", peer, true, true, false, opEvent) + require.Error(t, err) // Will fail permission check but that's expected + _ = config // avoid unused warning + }) + + t.Run("proxy embedded peer (no auth)", func(t *testing.T) { + opEvent := &activity.Event{Timestamp: time.Now()} + peer := &nbpeer.Peer{ + Ephemeral: false, + ProxyMeta: nbpeer.ProxyMeta{Embedded: true}, + } + + config, err := manager.processPeerAddAuth(context.Background(), account.Id, "", "", peer, false, false, false, opEvent) + require.NoError(t, err) + assert.Equal(t, account.Id, config.AccountID) + assert.False(t, config.Ephemeral) + assert.Empty(t, config.GroupsToAdd) + }) +} diff --git a/management/server/permissions/modules/module.go b/management/server/permissions/modules/module.go index f19675d27..93007d4c1 100644 --- a/management/server/permissions/modules/module.go +++ b/management/server/permissions/modules/module.go @@ -3,37 +3,39 @@ package modules type Module string const ( - Networks Module = "networks" - Peers Module = "peers" - RemoteJobs Module = "remote_jobs" - Groups Module = "groups" - Settings Module = "settings" - Accounts Module = "accounts" - Dns Module = "dns" - Nameservers Module = "nameservers" - Events Module = "events" - Policies Module = "policies" - Routes Module = "routes" - Users Module = "users" - SetupKeys Module = "setup_keys" - Pats Module = "pats" + Networks Module = "networks" + Peers Module = "peers" + RemoteJobs Module = "remote_jobs" + Groups Module = "groups" + Settings Module = "settings" + Accounts Module = "accounts" + Dns Module = "dns" + Nameservers Module = "nameservers" + Events Module = "events" + Policies Module = "policies" + Routes Module = "routes" + Users Module = "users" + SetupKeys Module = "setup_keys" + Pats Module = "pats" IdentityProviders Module = "identity_providers" + Services Module = "services" ) var All = map[Module]struct{}{ - Networks: {}, - Peers: {}, - RemoteJobs: {}, - Groups: {}, - Settings: {}, - Accounts: {}, - Dns: {}, - Nameservers: {}, - Events: {}, - Policies: {}, - Routes: {}, - Users: {}, - SetupKeys: {}, - Pats: {}, + Networks: {}, + Peers: {}, + RemoteJobs: {}, + Groups: {}, + Settings: {}, + Accounts: {}, + Dns: {}, + Nameservers: {}, + Events: {}, + Policies: {}, + Routes: {}, + Users: {}, + SetupKeys: {}, + Pats: {}, IdentityProviders: {}, + Services: {}, } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index f9ad1987c..db7cfd32d 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -18,6 +18,7 @@ import ( "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" + "github.com/rs/xid" log "github.com/sirupsen/logrus" "gorm.io/driver/mysql" "gorm.io/driver/postgres" @@ -27,6 +28,9 @@ import ( "gorm.io/gorm/logger" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -122,11 +126,13 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met return nil, fmt.Errorf("migratePreAuto: %w", err) } err = db.AutoMigrate( - &types.SetupKey{}, &nbpeer.Peer{}, &types.User{}, &types.PersonalAccessToken{}, &types.Group{}, &types.GroupPeer{}, + &types.SetupKey{}, &nbpeer.Peer{}, &types.User{}, &types.PersonalAccessToken{}, &types.ProxyAccessToken{}, + &types.Group{}, &types.GroupPeer{}, &types.Account{}, &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &installation{}, &types.ExtraSettings{}, &posture.Checks{}, &nbpeer.NetworkAddress{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, - &types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, + &types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, &reverseproxy.Service{}, &reverseproxy.Target{}, &domain.Domain{}, + &accesslogs.AccessLogEntry{}, ) if err != nil { return nil, fmt.Errorf("auto migratePreAuto: %w", err) @@ -1094,6 +1100,7 @@ func (s *SqlStore) getAccountGorm(ctx context.Context, accountID string) (*types Preload("NetworkRouters"). Preload("NetworkResources"). Preload("Onboarding"). + Preload("Services.Targets"). Take(&account, idQueryCondition, accountID) if result.Error != nil { log.WithContext(ctx).Errorf("error when getting account %s from the store: %s", accountID, result.Error) @@ -1271,6 +1278,17 @@ func (s *SqlStore) getAccountPgx(ctx context.Context, accountID string) (*types. account.PostureChecks = checks }() + wg.Add(1) + go func() { + defer wg.Done() + services, err := s.getServices(ctx, accountID) + if err != nil { + errChan <- err + return + } + account.Services = services + }() + wg.Add(1) go func() { defer wg.Done() @@ -1672,7 +1690,7 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee meta_kernel_version, meta_network_addresses, meta_system_serial_number, meta_system_product_name, meta_system_manufacturer, meta_environment, meta_flags, meta_files, peer_status_last_seen, peer_status_connected, peer_status_login_expired, peer_status_requires_approval, location_connection_ip, location_country_code, location_city_name, - location_geo_name_id FROM peers WHERE account_id = $1` + location_geo_name_id, proxy_meta_embedded, proxy_meta_cluster FROM peers WHERE account_id = $1` rows, err := s.pool.Query(ctx, query, accountID) if err != nil { return nil, err @@ -1685,12 +1703,12 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee lastLogin, createdAt sql.NullTime sshEnabled, loginExpirationEnabled, inactivityExpirationEnabled, ephemeral, allowExtraDNSLabels sql.NullBool peerStatusLastSeen sql.NullTime - peerStatusConnected, peerStatusLoginExpired, peerStatusRequiresApproval sql.NullBool + peerStatusConnected, peerStatusLoginExpired, peerStatusRequiresApproval, proxyEmbedded sql.NullBool ip, extraDNS, netAddr, env, flags, files, connIP []byte metaHostname, metaGoOS, metaKernel, metaCore, metaPlatform sql.NullString metaOS, metaOSVersion, metaWtVersion, metaUIVersion, metaKernelVersion sql.NullString metaSystemSerialNumber, metaSystemProductName, metaSystemManufacturer sql.NullString - locationCountryCode, locationCityName sql.NullString + locationCountryCode, locationCityName, proxyCluster sql.NullString locationGeoNameID sql.NullInt64 ) @@ -1700,7 +1718,7 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee &metaOS, &metaOSVersion, &metaWtVersion, &metaUIVersion, &metaKernelVersion, &netAddr, &metaSystemSerialNumber, &metaSystemProductName, &metaSystemManufacturer, &env, &flags, &files, &peerStatusLastSeen, &peerStatusConnected, &peerStatusLoginExpired, &peerStatusRequiresApproval, &connIP, - &locationCountryCode, &locationCityName, &locationGeoNameID) + &locationCountryCode, &locationCityName, &locationGeoNameID, &proxyEmbedded, &proxyCluster) if err == nil { if lastLogin.Valid { @@ -1784,6 +1802,12 @@ func (s *SqlStore) getPeers(ctx context.Context, accountID string) ([]nbpeer.Pee if locationGeoNameID.Valid { p.Location.GeoNameID = uint(locationGeoNameID.Int64) } + if proxyEmbedded.Valid { + p.ProxyMeta.Embedded = proxyEmbedded.Bool + } + if proxyCluster.Valid { + p.ProxyMeta.Cluster = proxyCluster.String + } if ip != nil { _ = json.Unmarshal(ip, &p.IP) } @@ -2039,6 +2063,131 @@ func (s *SqlStore) getPostureChecks(ctx context.Context, accountID string) ([]*p return checks, nil } +func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { + const serviceQuery = `SELECT id, account_id, name, domain, enabled, auth, + meta_created_at, meta_certificate_issued_at, meta_status, proxy_cluster, + pass_host_header, rewrite_redirects, session_private_key, session_public_key + FROM services WHERE account_id = $1` + + const targetsQuery = `SELECT id, account_id, service_id, path, host, port, protocol, + target_id, target_type, enabled + FROM targets WHERE service_id = ANY($1)` + + serviceRows, err := s.pool.Query(ctx, serviceQuery, accountID) + if err != nil { + return nil, err + } + + services, err := pgx.CollectRows(serviceRows, func(row pgx.CollectableRow) (*reverseproxy.Service, error) { + var s reverseproxy.Service + var auth []byte + var createdAt, certIssuedAt sql.NullTime + var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString + err := row.Scan( + &s.ID, + &s.AccountID, + &s.Name, + &s.Domain, + &s.Enabled, + &auth, + &createdAt, + &certIssuedAt, + &status, + &proxyCluster, + &s.PassHostHeader, + &s.RewriteRedirects, + &sessionPrivateKey, + &sessionPublicKey, + ) + if err != nil { + return nil, err + } + + if auth != nil { + if err := json.Unmarshal(auth, &s.Auth); err != nil { + return nil, err + } + } + + s.Meta = reverseproxy.ServiceMeta{} + if createdAt.Valid { + s.Meta.CreatedAt = createdAt.Time + } + if certIssuedAt.Valid { + s.Meta.CertificateIssuedAt = certIssuedAt.Time + } + if status.Valid { + s.Meta.Status = status.String + } + if proxyCluster.Valid { + s.ProxyCluster = proxyCluster.String + } + if sessionPrivateKey.Valid { + s.SessionPrivateKey = sessionPrivateKey.String + } + if sessionPublicKey.Valid { + s.SessionPublicKey = sessionPublicKey.String + } + + s.Targets = []*reverseproxy.Target{} + return &s, nil + }) + if err != nil { + return nil, err + } + + if len(services) == 0 { + return services, nil + } + + serviceIDs := make([]string, len(services)) + serviceMap := make(map[string]*reverseproxy.Service) + for i, s := range services { + serviceIDs[i] = s.ID + serviceMap[s.ID] = s + } + + targetRows, err := s.pool.Query(ctx, targetsQuery, serviceIDs) + if err != nil { + return nil, err + } + + targets, err := pgx.CollectRows(targetRows, func(row pgx.CollectableRow) (*reverseproxy.Target, error) { + var t reverseproxy.Target + var path sql.NullString + err := row.Scan( + &t.ID, + &t.AccountID, + &t.ServiceID, + &path, + &t.Host, + &t.Port, + &t.Protocol, + &t.TargetId, + &t.TargetType, + &t.Enabled, + ) + if err != nil { + return nil, err + } + if path.Valid { + t.Path = &path.String + } + return &t, nil + }) + if err != nil { + return nil, err + } + + for _, target := range targets { + if service, ok := serviceMap[target.ServiceID]; ok { + service.Targets = append(service.Targets, target) + } + } + + return services, nil +} + func (s *SqlStore) getNetworks(ctx context.Context, accountID string) ([]*networkTypes.Network, error) { const query = `SELECT id, account_id, name, description FROM networks WHERE account_id = $1` rows, err := s.pool.Query(ctx, query, accountID) @@ -4230,6 +4379,79 @@ func (s *SqlStore) DeletePAT(ctx context.Context, userID, patID string) error { return nil } +// GetProxyAccessTokenByHashedToken retrieves a proxy access token by its hashed value. +func (s *SqlStore) GetProxyAccessTokenByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken types.HashedProxyToken) (*types.ProxyAccessToken, error) { + tx := s.db.WithContext(ctx) + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var token types.ProxyAccessToken + result := tx.Take(&token, "hashed_token = ?", hashedToken) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "proxy access token not found") + } + return nil, status.Errorf(status.Internal, "get proxy access token: %v", result.Error) + } + + return &token, nil +} + +// GetAllProxyAccessTokens retrieves all proxy access tokens. +func (s *SqlStore) GetAllProxyAccessTokens(ctx context.Context, lockStrength LockingStrength) ([]*types.ProxyAccessToken, error) { + tx := s.db.WithContext(ctx) + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var tokens []*types.ProxyAccessToken + result := tx.Find(&tokens) + if result.Error != nil { + return nil, status.Errorf(status.Internal, "get proxy access tokens: %v", result.Error) + } + + return tokens, nil +} + +// SaveProxyAccessToken saves a proxy access token to the database. +func (s *SqlStore) SaveProxyAccessToken(ctx context.Context, token *types.ProxyAccessToken) error { + if result := s.db.WithContext(ctx).Create(token); result.Error != nil { + return status.Errorf(status.Internal, "save proxy access token: %v", result.Error) + } + return nil +} + +// RevokeProxyAccessToken revokes a proxy access token by its ID. +func (s *SqlStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) error { + result := s.db.WithContext(ctx).Model(&types.ProxyAccessToken{}).Where(idQueryCondition, tokenID).Update("revoked", true) + if result.Error != nil { + return status.Errorf(status.Internal, "revoke proxy access token: %v", result.Error) + } + + if result.RowsAffected == 0 { + return status.Errorf(status.NotFound, "proxy access token not found") + } + + return nil +} + +// MarkProxyAccessTokenUsed updates the last used timestamp for a proxy access token. +func (s *SqlStore) MarkProxyAccessTokenUsed(ctx context.Context, tokenID string) error { + result := s.db.WithContext(ctx).Model(&types.ProxyAccessToken{}). + Where(idQueryCondition, tokenID). + Update("last_used", time.Now().UTC()) + if result.Error != nil { + return status.Errorf(status.Internal, "mark proxy access token as used: %v", result.Error) + } + + if result.RowsAffected == 0 { + return status.Errorf(status.NotFound, "proxy access token not found") + } + + return nil +} + func (s *SqlStore) GetPeerByIP(ctx context.Context, lockStrength LockingStrength, accountID string, ip net.IP) (*nbpeer.Peer, error) { tx := s.db if lockStrength != LockingStrengthNone { @@ -4602,3 +4824,353 @@ func (s *SqlStore) GetPeerIDByKey(ctx context.Context, lockStrength LockingStren return peerID, nil } + +func (s *SqlStore) CreateService(ctx context.Context, service *reverseproxy.Service) error { + serviceCopy := service.Copy() + if err := serviceCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt service data: %w", err) + } + result := s.db.Create(serviceCopy) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to create service to store: %v", result.Error) + return status.Errorf(status.Internal, "failed to create service to store") + } + + return nil +} + +func (s *SqlStore) UpdateService(ctx context.Context, service *reverseproxy.Service) error { + serviceCopy := service.Copy() + if err := serviceCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt service data: %w", err) + } + + // Use a transaction to ensure atomic updates of the service and its targets + err := s.db.Transaction(func(tx *gorm.DB) error { + // Delete existing targets + if err := tx.Where("service_id = ?", serviceCopy.ID).Delete(&reverseproxy.Target{}).Error; err != nil { + return err + } + + // Update the service and create new targets + if err := tx.Session(&gorm.Session{FullSaveAssociations: true}).Save(serviceCopy).Error; err != nil { + return err + } + + return nil + }) + + if err != nil { + log.WithContext(ctx).Errorf("failed to update service to store: %v", err) + return status.Errorf(status.Internal, "failed to update service to store") + } + + return nil +} + +func (s *SqlStore) DeleteService(ctx context.Context, accountID, serviceID string) error { + result := s.db.Delete(&reverseproxy.Service{}, accountAndIDQueryCondition, accountID, serviceID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete service from store: %v", result.Error) + return status.Errorf(status.Internal, "failed to delete service from store") + } + + if result.RowsAffected == 0 { + return status.Errorf(status.NotFound, "service %s not found", serviceID) + } + + return nil +} + +func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) { + tx := s.db.Preload("Targets") + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var service *reverseproxy.Service + result := tx.Take(&service, accountAndIDQueryCondition, accountID, serviceID) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "service %s not found", serviceID) + } + + log.WithContext(ctx).Errorf("failed to get service from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get service from store") + } + + if err := service.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt service data: %w", err) + } + + return service, nil +} + +func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) { + var service *reverseproxy.Service + result := s.db.Preload("Targets").Where("account_id = ? AND domain = ?", accountID, domain).First(&service) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "service with domain %s not found", domain) + } + + log.WithContext(ctx).Errorf("failed to get service by domain from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get service by domain from store") + } + + if err := service.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt service data: %w", err) + } + + return service, nil +} + +func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) { + tx := s.db.Preload("Targets") + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var serviceList []*reverseproxy.Service + result := tx.Find(&serviceList) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get services from store") + } + + for _, service := range serviceList { + if err := service.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt service data: %w", err) + } + } + + return serviceList, nil +} + +func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { + tx := s.db.Preload("Targets") + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var serviceList []*reverseproxy.Service + result := tx.Find(&serviceList, accountIDCondition, accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get services from store") + } + + for _, service := range serviceList { + if err := service.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt service data: %w", err) + } + } + + return serviceList, nil +} + +func (s *SqlStore) GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) { + tx := s.db + + customDomain := &domain.Domain{} + result := tx.Take(&customDomain, accountAndIDQueryCondition, accountID, domainID) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "custom domain %s not found", domainID) + } + + log.WithContext(ctx).Errorf("failed to get custom domain from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get custom domain from store") + } + + return customDomain, nil +} + +func (s *SqlStore) ListFreeDomains(ctx context.Context, accountID string) ([]string, error) { + return nil, nil +} + +func (s *SqlStore) ListCustomDomains(ctx context.Context, accountID string) ([]*domain.Domain, error) { + tx := s.db + + var domains []*domain.Domain + result := tx.Find(&domains, accountIDCondition, accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get reverse proxy custom domains from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get reverse proxy custom domains from store") + } + + return domains, nil +} + +func (s *SqlStore) CreateCustomDomain(ctx context.Context, accountID string, domainName string, targetCluster string, validated bool) (*domain.Domain, error) { + newDomain := &domain.Domain{ + ID: xid.New().String(), // Generate our own ID because gorm doesn't always configure the database to handle this for us. + Domain: domainName, + AccountID: accountID, + TargetCluster: targetCluster, + Type: domain.TypeCustom, + Validated: validated, + } + result := s.db.Create(newDomain) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to create reverse proxy custom domain to store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to create reverse proxy custom domain to store") + } + + return newDomain, nil +} + +func (s *SqlStore) UpdateCustomDomain(ctx context.Context, accountID string, d *domain.Domain) (*domain.Domain, error) { + d.AccountID = accountID + result := s.db.Select("*").Save(d) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to update reverse proxy custom domain to store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to update reverse proxy custom domain to store") + } + + return d, nil +} + +func (s *SqlStore) DeleteCustomDomain(ctx context.Context, accountID string, domainID string) error { + result := s.db.Delete(domain.Domain{}, accountAndIDQueryCondition, accountID, domainID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete reverse proxy custom domain from store: %v", result.Error) + return status.Errorf(status.Internal, "failed to delete reverse proxy custom domain from store") + } + + if result.RowsAffected == 0 { + return status.Errorf(status.NotFound, "reverse proxy custom domain %s not found", domainID) + } + + return nil +} + +// CreateAccessLog creates a new access log entry in the database +func (s *SqlStore) CreateAccessLog(ctx context.Context, logEntry *accesslogs.AccessLogEntry) error { + result := s.db.Create(logEntry) + if result.Error != nil { + log.WithContext(ctx).WithFields(log.Fields{ + "service_id": logEntry.ServiceID, + "method": logEntry.Method, + "host": logEntry.Host, + "path": logEntry.Path, + }).Errorf("failed to create access log entry in store: %v", result.Error) + return status.Errorf(status.Internal, "failed to create access log entry in store") + } + return nil +} + +// GetAccountAccessLogs retrieves access logs for a given account with pagination and filtering +func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) { + var logs []*accesslogs.AccessLogEntry + var totalCount int64 + + baseQuery := s.db.WithContext(ctx). + Model(&accesslogs.AccessLogEntry{}). + Where(accountIDCondition, accountID) + + baseQuery = s.applyAccessLogFilters(baseQuery, filter) + + if err := baseQuery.Count(&totalCount).Error; err != nil { + log.WithContext(ctx).Errorf("failed to count access logs: %v", err) + return nil, 0, status.Errorf(status.Internal, "failed to count access logs") + } + + query := s.db.WithContext(ctx). + Where(accountIDCondition, accountID) + + query = s.applyAccessLogFilters(query, filter) + + query = query. + Order("timestamp DESC"). + Limit(filter.GetLimit()). + Offset(filter.GetOffset()) + + if lockStrength != LockingStrengthNone { + query = query.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + result := query.Find(&logs) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get access logs from store: %v", result.Error) + return nil, 0, status.Errorf(status.Internal, "failed to get access logs from store") + } + + return logs, totalCount, nil +} + +// applyAccessLogFilters applies filter conditions to the query +func (s *SqlStore) applyAccessLogFilters(query *gorm.DB, filter accesslogs.AccessLogFilter) *gorm.DB { + if filter.Search != nil { + searchPattern := "%" + *filter.Search + "%" + query = query.Where( + "id LIKE ? OR location_connection_ip LIKE ? OR host LIKE ? OR path LIKE ? OR CONCAT(host, path) LIKE ? OR user_id IN (SELECT id FROM users WHERE email LIKE ? OR name LIKE ?)", + searchPattern, searchPattern, searchPattern, searchPattern, searchPattern, searchPattern, searchPattern, + ) + } + + if filter.SourceIP != nil { + query = query.Where("location_connection_ip = ?", *filter.SourceIP) + } + + if filter.Host != nil { + query = query.Where("host = ?", *filter.Host) + } + + if filter.Path != nil { + // Support LIKE pattern for path filtering + query = query.Where("path LIKE ?", "%"+*filter.Path+"%") + } + + if filter.UserID != nil { + query = query.Where("user_id = ?", *filter.UserID) + } + + if filter.Method != nil { + query = query.Where("method = ?", *filter.Method) + } + + if filter.Status != nil { + switch *filter.Status { + case "success": + query = query.Where("status_code >= ? AND status_code < ?", 200, 400) + case "failed": + query = query.Where("status_code < ? OR status_code >= ?", 200, 400) + } + } + + if filter.StatusCode != nil { + query = query.Where("status_code = ?", *filter.StatusCode) + } + + if filter.StartDate != nil { + query = query.Where("timestamp >= ?", *filter.StartDate) + } + + if filter.EndDate != nil { + query = query.Where("timestamp <= ?", *filter.EndDate) + } + + return query +} + +func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var target *reverseproxy.Target + result := tx.Take(&target, "account_id = ? AND target_id = ?", accountID, targetID) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "service target with ID %s not found", targetID) + } + + log.WithContext(ctx).Errorf("failed to get service target from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get service target from store") + } + + return target, nil +} diff --git a/management/server/store/sqlstore_bench_test.go b/management/server/store/sqlstore_bench_test.go index 350a1da83..fa9a9dbf5 100644 --- a/management/server/store/sqlstore_bench_test.go +++ b/management/server/store/sqlstore_bench_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/assert" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -263,7 +264,7 @@ func setupBenchmarkDB(b testing.TB) (*SqlStore, func(), string) { &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &posture.Checks{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, - &types.AccountOnboarding{}, + &types.AccountOnboarding{}, &reverseproxy.Service{}, &reverseproxy.Target{}, } for i := len(models) - 1; i >= 0; i-- { diff --git a/management/server/store/store.go b/management/server/store/store.go index 3928ce3f0..a8e44a438 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -1,5 +1,7 @@ package store +//go:generate go run github.com/golang/mock/mockgen -package store -destination=store_mock.go -source=./store.go -build_flags=-mod=mod + import ( "context" "errors" @@ -23,6 +25,9 @@ import ( "gorm.io/gorm" "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" "github.com/netbirdio/netbird/management/server/telemetry" @@ -106,6 +111,12 @@ type Store interface { SavePAT(ctx context.Context, pat *types.PersonalAccessToken) error DeletePAT(ctx context.Context, userID, patID string) error + GetProxyAccessTokenByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken types.HashedProxyToken) (*types.ProxyAccessToken, error) + GetAllProxyAccessTokens(ctx context.Context, lockStrength LockingStrength) ([]*types.ProxyAccessToken, error) + SaveProxyAccessToken(ctx context.Context, token *types.ProxyAccessToken) error + RevokeProxyAccessToken(ctx context.Context, tokenID string) error + MarkProxyAccessTokenUsed(ctx context.Context, tokenID string) error + GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.Group, error) GetResourceGroups(ctx context.Context, lockStrength LockingStrength, accountID, resourceID string) ([]*types.Group, error) GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types.Group, error) @@ -240,6 +251,25 @@ type Store interface { MarkPendingJobsAsFailed(ctx context.Context, accountID, peerID, jobID, reason string) error MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error) + + CreateService(ctx context.Context, service *reverseproxy.Service) error + UpdateService(ctx context.Context, service *reverseproxy.Service) error + DeleteService(ctx context.Context, accountID, serviceID string) error + GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) + GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) + GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) + GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) + + GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) + ListFreeDomains(ctx context.Context, accountID string) ([]string, error) + ListCustomDomains(ctx context.Context, accountID string) ([]*domain.Domain, error) + CreateCustomDomain(ctx context.Context, accountID string, domainName string, targetCluster string, validated bool) (*domain.Domain, error) + UpdateCustomDomain(ctx context.Context, accountID string, d *domain.Domain) (*domain.Domain, error) + DeleteCustomDomain(ctx context.Context, accountID string, domainID string) error + + CreateAccessLog(ctx context.Context, log *accesslogs.AccessLogEntry) error + GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) + GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) } const ( diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go new file mode 100644 index 000000000..2f451dc43 --- /dev/null +++ b/management/server/store/store_mock.go @@ -0,0 +1,2745 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./store.go + +// Package store is a generated GoMock package. +package store + +import ( + context "context" + net "net" + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" + dns "github.com/netbirdio/netbird/dns" + reverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + accesslogs "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + domain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + zones "github.com/netbirdio/netbird/management/internals/modules/zones" + records "github.com/netbirdio/netbird/management/internals/modules/zones/records" + types "github.com/netbirdio/netbird/management/server/networks/resources/types" + types0 "github.com/netbirdio/netbird/management/server/networks/routers/types" + types1 "github.com/netbirdio/netbird/management/server/networks/types" + peer "github.com/netbirdio/netbird/management/server/peer" + posture "github.com/netbirdio/netbird/management/server/posture" + types2 "github.com/netbirdio/netbird/management/server/types" + route "github.com/netbirdio/netbird/route" + crypt "github.com/netbirdio/netbird/util/crypt" +) + +// MockStore is a mock of Store interface. +type MockStore struct { + ctrl *gomock.Controller + recorder *MockStoreMockRecorder +} + +// MockStoreMockRecorder is the mock recorder for MockStore. +type MockStoreMockRecorder struct { + mock *MockStore +} + +// NewMockStore creates a new mock instance. +func NewMockStore(ctrl *gomock.Controller) *MockStore { + mock := &MockStore{ctrl: ctrl} + mock.recorder = &MockStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStore) EXPECT() *MockStoreMockRecorder { + return m.recorder +} + +// AccountExists mocks base method. +func (m *MockStore) AccountExists(ctx context.Context, lockStrength LockingStrength, id string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AccountExists", ctx, lockStrength, id) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AccountExists indicates an expected call of AccountExists. +func (mr *MockStoreMockRecorder) AccountExists(ctx, lockStrength, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AccountExists", reflect.TypeOf((*MockStore)(nil).AccountExists), ctx, lockStrength, id) +} + +// AcquireGlobalLock mocks base method. +func (m *MockStore) AcquireGlobalLock(ctx context.Context) func() { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcquireGlobalLock", ctx) + ret0, _ := ret[0].(func()) + return ret0 +} + +// AcquireGlobalLock indicates an expected call of AcquireGlobalLock. +func (mr *MockStoreMockRecorder) AcquireGlobalLock(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireGlobalLock", reflect.TypeOf((*MockStore)(nil).AcquireGlobalLock), ctx) +} + +// AddPeerToAccount mocks base method. +func (m *MockStore) AddPeerToAccount(ctx context.Context, peer *peer.Peer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddPeerToAccount", ctx, peer) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddPeerToAccount indicates an expected call of AddPeerToAccount. +func (mr *MockStoreMockRecorder) AddPeerToAccount(ctx, peer interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPeerToAccount", reflect.TypeOf((*MockStore)(nil).AddPeerToAccount), ctx, peer) +} + +// AddPeerToAllGroup mocks base method. +func (m *MockStore) AddPeerToAllGroup(ctx context.Context, accountID, peerID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddPeerToAllGroup", ctx, accountID, peerID) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddPeerToAllGroup indicates an expected call of AddPeerToAllGroup. +func (mr *MockStoreMockRecorder) AddPeerToAllGroup(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPeerToAllGroup", reflect.TypeOf((*MockStore)(nil).AddPeerToAllGroup), ctx, accountID, peerID) +} + +// AddPeerToGroup mocks base method. +func (m *MockStore) AddPeerToGroup(ctx context.Context, accountID, peerId, groupID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddPeerToGroup", ctx, accountID, peerId, groupID) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddPeerToGroup indicates an expected call of AddPeerToGroup. +func (mr *MockStoreMockRecorder) AddPeerToGroup(ctx, accountID, peerId, groupID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPeerToGroup", reflect.TypeOf((*MockStore)(nil).AddPeerToGroup), ctx, accountID, peerId, groupID) +} + +// AddResourceToGroup mocks base method. +func (m *MockStore) AddResourceToGroup(ctx context.Context, accountId, groupID string, resource *types2.Resource) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddResourceToGroup", ctx, accountId, groupID, resource) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddResourceToGroup indicates an expected call of AddResourceToGroup. +func (mr *MockStoreMockRecorder) AddResourceToGroup(ctx, accountId, groupID, resource interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddResourceToGroup", reflect.TypeOf((*MockStore)(nil).AddResourceToGroup), ctx, accountId, groupID, resource) +} + +// ApproveAccountPeers mocks base method. +func (m *MockStore) ApproveAccountPeers(ctx context.Context, accountID string) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApproveAccountPeers", ctx, accountID) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApproveAccountPeers indicates an expected call of ApproveAccountPeers. +func (mr *MockStoreMockRecorder) ApproveAccountPeers(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApproveAccountPeers", reflect.TypeOf((*MockStore)(nil).ApproveAccountPeers), ctx, accountID) +} + +// Close mocks base method. +func (m *MockStore) Close(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockStoreMockRecorder) Close(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockStore)(nil).Close), ctx) +} + +// CompletePeerJob mocks base method. +func (m *MockStore) CompletePeerJob(ctx context.Context, job *types2.Job) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompletePeerJob", ctx, job) + ret0, _ := ret[0].(error) + return ret0 +} + +// CompletePeerJob indicates an expected call of CompletePeerJob. +func (mr *MockStoreMockRecorder) CompletePeerJob(ctx, job interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompletePeerJob", reflect.TypeOf((*MockStore)(nil).CompletePeerJob), ctx, job) +} + +// CountAccountsByPrivateDomain mocks base method. +func (m *MockStore) CountAccountsByPrivateDomain(ctx context.Context, domain string) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountAccountsByPrivateDomain", ctx, domain) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountAccountsByPrivateDomain indicates an expected call of CountAccountsByPrivateDomain. +func (mr *MockStoreMockRecorder) CountAccountsByPrivateDomain(ctx, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountAccountsByPrivateDomain", reflect.TypeOf((*MockStore)(nil).CountAccountsByPrivateDomain), ctx, domain) +} + +// CreateAccessLog mocks base method. +func (m *MockStore) CreateAccessLog(ctx context.Context, log *accesslogs.AccessLogEntry) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAccessLog", ctx, log) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAccessLog indicates an expected call of CreateAccessLog. +func (mr *MockStoreMockRecorder) CreateAccessLog(ctx, log interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessLog", reflect.TypeOf((*MockStore)(nil).CreateAccessLog), ctx, log) +} + +// CreateCustomDomain mocks base method. +func (m *MockStore) CreateCustomDomain(ctx context.Context, accountID, domainName, targetCluster string, validated bool) (*domain.Domain, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateCustomDomain", ctx, accountID, domainName, targetCluster, validated) + ret0, _ := ret[0].(*domain.Domain) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateCustomDomain indicates an expected call of CreateCustomDomain. +func (mr *MockStoreMockRecorder) CreateCustomDomain(ctx, accountID, domainName, targetCluster, validated interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCustomDomain", reflect.TypeOf((*MockStore)(nil).CreateCustomDomain), ctx, accountID, domainName, targetCluster, validated) +} + +// CreateDNSRecord mocks base method. +func (m *MockStore) CreateDNSRecord(ctx context.Context, record *records.Record) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateDNSRecord", ctx, record) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateDNSRecord indicates an expected call of CreateDNSRecord. +func (mr *MockStoreMockRecorder) CreateDNSRecord(ctx, record interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateDNSRecord", reflect.TypeOf((*MockStore)(nil).CreateDNSRecord), ctx, record) +} + +// CreateGroup mocks base method. +func (m *MockStore) CreateGroup(ctx context.Context, group *types2.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateGroup", ctx, group) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateGroup indicates an expected call of CreateGroup. +func (mr *MockStoreMockRecorder) CreateGroup(ctx, group interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroup", reflect.TypeOf((*MockStore)(nil).CreateGroup), ctx, group) +} + +// CreateGroups mocks base method. +func (m *MockStore) CreateGroups(ctx context.Context, accountID string, groups []*types2.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateGroups", ctx, accountID, groups) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateGroups indicates an expected call of CreateGroups. +func (mr *MockStoreMockRecorder) CreateGroups(ctx, accountID, groups interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroups", reflect.TypeOf((*MockStore)(nil).CreateGroups), ctx, accountID, groups) +} + +// CreatePeerJob mocks base method. +func (m *MockStore) CreatePeerJob(ctx context.Context, job *types2.Job) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePeerJob", ctx, job) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreatePeerJob indicates an expected call of CreatePeerJob. +func (mr *MockStoreMockRecorder) CreatePeerJob(ctx, job interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePeerJob", reflect.TypeOf((*MockStore)(nil).CreatePeerJob), ctx, job) +} + +// CreatePolicy mocks base method. +func (m *MockStore) CreatePolicy(ctx context.Context, policy *types2.Policy) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePolicy", ctx, policy) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreatePolicy indicates an expected call of CreatePolicy. +func (mr *MockStoreMockRecorder) CreatePolicy(ctx, policy interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePolicy", reflect.TypeOf((*MockStore)(nil).CreatePolicy), ctx, policy) +} + +// CreateService mocks base method. +func (m *MockStore) CreateService(ctx context.Context, service *reverseproxy.Service) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateService", ctx, service) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateService indicates an expected call of CreateService. +func (mr *MockStoreMockRecorder) CreateService(ctx, service interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateService", reflect.TypeOf((*MockStore)(nil).CreateService), ctx, service) +} + +// CreateZone mocks base method. +func (m *MockStore) CreateZone(ctx context.Context, zone *zones.Zone) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateZone", ctx, zone) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateZone indicates an expected call of CreateZone. +func (mr *MockStoreMockRecorder) CreateZone(ctx, zone interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateZone", reflect.TypeOf((*MockStore)(nil).CreateZone), ctx, zone) +} + +// DeleteAccount mocks base method. +func (m *MockStore) DeleteAccount(ctx context.Context, account *types2.Account) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAccount", ctx, account) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAccount indicates an expected call of DeleteAccount. +func (mr *MockStoreMockRecorder) DeleteAccount(ctx, account interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccount", reflect.TypeOf((*MockStore)(nil).DeleteAccount), ctx, account) +} + +// DeleteCustomDomain mocks base method. +func (m *MockStore) DeleteCustomDomain(ctx context.Context, accountID, domainID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCustomDomain", ctx, accountID, domainID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteCustomDomain indicates an expected call of DeleteCustomDomain. +func (mr *MockStoreMockRecorder) DeleteCustomDomain(ctx, accountID, domainID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCustomDomain", reflect.TypeOf((*MockStore)(nil).DeleteCustomDomain), ctx, accountID, domainID) +} + +// DeleteDNSRecord mocks base method. +func (m *MockStore) DeleteDNSRecord(ctx context.Context, accountID, zoneID, recordID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteDNSRecord", ctx, accountID, zoneID, recordID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteDNSRecord indicates an expected call of DeleteDNSRecord. +func (mr *MockStoreMockRecorder) DeleteDNSRecord(ctx, accountID, zoneID, recordID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDNSRecord", reflect.TypeOf((*MockStore)(nil).DeleteDNSRecord), ctx, accountID, zoneID, recordID) +} + +// DeleteGroup mocks base method. +func (m *MockStore) DeleteGroup(ctx context.Context, accountID, groupID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteGroup", ctx, accountID, groupID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteGroup indicates an expected call of DeleteGroup. +func (mr *MockStoreMockRecorder) DeleteGroup(ctx, accountID, groupID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroup", reflect.TypeOf((*MockStore)(nil).DeleteGroup), ctx, accountID, groupID) +} + +// DeleteGroups mocks base method. +func (m *MockStore) DeleteGroups(ctx context.Context, accountID string, groupIDs []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteGroups", ctx, accountID, groupIDs) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteGroups indicates an expected call of DeleteGroups. +func (mr *MockStoreMockRecorder) DeleteGroups(ctx, accountID, groupIDs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroups", reflect.TypeOf((*MockStore)(nil).DeleteGroups), ctx, accountID, groupIDs) +} + +// DeleteHashedPAT2TokenIDIndex mocks base method. +func (m *MockStore) DeleteHashedPAT2TokenIDIndex(hashedToken string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteHashedPAT2TokenIDIndex", hashedToken) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteHashedPAT2TokenIDIndex indicates an expected call of DeleteHashedPAT2TokenIDIndex. +func (mr *MockStoreMockRecorder) DeleteHashedPAT2TokenIDIndex(hashedToken interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteHashedPAT2TokenIDIndex", reflect.TypeOf((*MockStore)(nil).DeleteHashedPAT2TokenIDIndex), hashedToken) +} + +// DeleteNameServerGroup mocks base method. +func (m *MockStore) DeleteNameServerGroup(ctx context.Context, accountID, nameServerGroupID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNameServerGroup", ctx, accountID, nameServerGroupID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNameServerGroup indicates an expected call of DeleteNameServerGroup. +func (mr *MockStoreMockRecorder) DeleteNameServerGroup(ctx, accountID, nameServerGroupID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNameServerGroup", reflect.TypeOf((*MockStore)(nil).DeleteNameServerGroup), ctx, accountID, nameServerGroupID) +} + +// DeleteNetwork mocks base method. +func (m *MockStore) DeleteNetwork(ctx context.Context, accountID, networkID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNetwork", ctx, accountID, networkID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNetwork indicates an expected call of DeleteNetwork. +func (mr *MockStoreMockRecorder) DeleteNetwork(ctx, accountID, networkID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNetwork", reflect.TypeOf((*MockStore)(nil).DeleteNetwork), ctx, accountID, networkID) +} + +// DeleteNetworkResource mocks base method. +func (m *MockStore) DeleteNetworkResource(ctx context.Context, accountID, resourceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNetworkResource", ctx, accountID, resourceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNetworkResource indicates an expected call of DeleteNetworkResource. +func (mr *MockStoreMockRecorder) DeleteNetworkResource(ctx, accountID, resourceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNetworkResource", reflect.TypeOf((*MockStore)(nil).DeleteNetworkResource), ctx, accountID, resourceID) +} + +// DeleteNetworkRouter mocks base method. +func (m *MockStore) DeleteNetworkRouter(ctx context.Context, accountID, routerID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNetworkRouter", ctx, accountID, routerID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNetworkRouter indicates an expected call of DeleteNetworkRouter. +func (mr *MockStoreMockRecorder) DeleteNetworkRouter(ctx, accountID, routerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNetworkRouter", reflect.TypeOf((*MockStore)(nil).DeleteNetworkRouter), ctx, accountID, routerID) +} + +// DeletePAT mocks base method. +func (m *MockStore) DeletePAT(ctx context.Context, userID, patID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePAT", ctx, userID, patID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePAT indicates an expected call of DeletePAT. +func (mr *MockStoreMockRecorder) DeletePAT(ctx, userID, patID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePAT", reflect.TypeOf((*MockStore)(nil).DeletePAT), ctx, userID, patID) +} + +// DeletePeer mocks base method. +func (m *MockStore) DeletePeer(ctx context.Context, accountID, peerID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePeer", ctx, accountID, peerID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePeer indicates an expected call of DeletePeer. +func (mr *MockStoreMockRecorder) DeletePeer(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePeer", reflect.TypeOf((*MockStore)(nil).DeletePeer), ctx, accountID, peerID) +} + +// DeletePolicy mocks base method. +func (m *MockStore) DeletePolicy(ctx context.Context, accountID, policyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePolicy", ctx, accountID, policyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePolicy indicates an expected call of DeletePolicy. +func (mr *MockStoreMockRecorder) DeletePolicy(ctx, accountID, policyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicy", reflect.TypeOf((*MockStore)(nil).DeletePolicy), ctx, accountID, policyID) +} + +// DeletePostureChecks mocks base method. +func (m *MockStore) DeletePostureChecks(ctx context.Context, accountID, postureChecksID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePostureChecks", ctx, accountID, postureChecksID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePostureChecks indicates an expected call of DeletePostureChecks. +func (mr *MockStoreMockRecorder) DeletePostureChecks(ctx, accountID, postureChecksID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePostureChecks", reflect.TypeOf((*MockStore)(nil).DeletePostureChecks), ctx, accountID, postureChecksID) +} + +// DeleteRoute mocks base method. +func (m *MockStore) DeleteRoute(ctx context.Context, accountID, routeID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRoute", ctx, accountID, routeID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteRoute indicates an expected call of DeleteRoute. +func (mr *MockStoreMockRecorder) DeleteRoute(ctx, accountID, routeID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRoute", reflect.TypeOf((*MockStore)(nil).DeleteRoute), ctx, accountID, routeID) +} + +// DeleteService mocks base method. +func (m *MockStore) DeleteService(ctx context.Context, accountID, serviceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteService", ctx, accountID, serviceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteService indicates an expected call of DeleteService. +func (mr *MockStoreMockRecorder) DeleteService(ctx, accountID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockStore)(nil).DeleteService), ctx, accountID, serviceID) +} + +// DeleteSetupKey mocks base method. +func (m *MockStore) DeleteSetupKey(ctx context.Context, accountID, keyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSetupKey", ctx, accountID, keyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSetupKey indicates an expected call of DeleteSetupKey. +func (mr *MockStoreMockRecorder) DeleteSetupKey(ctx, accountID, keyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSetupKey", reflect.TypeOf((*MockStore)(nil).DeleteSetupKey), ctx, accountID, keyID) +} + +// DeleteTokenID2UserIDIndex mocks base method. +func (m *MockStore) DeleteTokenID2UserIDIndex(tokenID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTokenID2UserIDIndex", tokenID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTokenID2UserIDIndex indicates an expected call of DeleteTokenID2UserIDIndex. +func (mr *MockStoreMockRecorder) DeleteTokenID2UserIDIndex(tokenID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTokenID2UserIDIndex", reflect.TypeOf((*MockStore)(nil).DeleteTokenID2UserIDIndex), tokenID) +} + +// DeleteUser mocks base method. +func (m *MockStore) DeleteUser(ctx context.Context, accountID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteUser", ctx, accountID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteUser indicates an expected call of DeleteUser. +func (mr *MockStoreMockRecorder) DeleteUser(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockStore)(nil).DeleteUser), ctx, accountID, userID) +} + +// DeleteUserInvite mocks base method. +func (m *MockStore) DeleteUserInvite(ctx context.Context, inviteID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteUserInvite", ctx, inviteID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteUserInvite indicates an expected call of DeleteUserInvite. +func (mr *MockStoreMockRecorder) DeleteUserInvite(ctx, inviteID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserInvite", reflect.TypeOf((*MockStore)(nil).DeleteUserInvite), ctx, inviteID) +} + +// DeleteZone mocks base method. +func (m *MockStore) DeleteZone(ctx context.Context, accountID, zoneID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteZone", ctx, accountID, zoneID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteZone indicates an expected call of DeleteZone. +func (mr *MockStoreMockRecorder) DeleteZone(ctx, accountID, zoneID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteZone", reflect.TypeOf((*MockStore)(nil).DeleteZone), ctx, accountID, zoneID) +} + +// DeleteZoneDNSRecords mocks base method. +func (m *MockStore) DeleteZoneDNSRecords(ctx context.Context, accountID, zoneID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteZoneDNSRecords", ctx, accountID, zoneID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteZoneDNSRecords indicates an expected call of DeleteZoneDNSRecords. +func (mr *MockStoreMockRecorder) DeleteZoneDNSRecords(ctx, accountID, zoneID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteZoneDNSRecords", reflect.TypeOf((*MockStore)(nil).DeleteZoneDNSRecords), ctx, accountID, zoneID) +} + +// ExecuteInTransaction mocks base method. +func (m *MockStore) ExecuteInTransaction(ctx context.Context, f func(Store) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteInTransaction", ctx, f) + ret0, _ := ret[0].(error) + return ret0 +} + +// ExecuteInTransaction indicates an expected call of ExecuteInTransaction. +func (mr *MockStoreMockRecorder) ExecuteInTransaction(ctx, f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteInTransaction", reflect.TypeOf((*MockStore)(nil).ExecuteInTransaction), ctx, f) +} + +// GetAccount mocks base method. +func (m *MockStore) GetAccount(ctx context.Context, accountID string) (*types2.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccount", ctx, accountID) + ret0, _ := ret[0].(*types2.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccount indicates an expected call of GetAccount. +func (mr *MockStoreMockRecorder) GetAccount(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccount", reflect.TypeOf((*MockStore)(nil).GetAccount), ctx, accountID) +} + +// GetAccountAccessLogs mocks base method. +func (m *MockStore) GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountAccessLogs", ctx, lockStrength, accountID, filter) + ret0, _ := ret[0].([]*accesslogs.AccessLogEntry) + ret1, _ := ret[1].(int64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetAccountAccessLogs indicates an expected call of GetAccountAccessLogs. +func (mr *MockStoreMockRecorder) GetAccountAccessLogs(ctx, lockStrength, accountID, filter interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAccessLogs", reflect.TypeOf((*MockStore)(nil).GetAccountAccessLogs), ctx, lockStrength, accountID, filter) +} + +// GetAccountByPeerID mocks base method. +func (m *MockStore) GetAccountByPeerID(ctx context.Context, peerID string) (*types2.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountByPeerID", ctx, peerID) + ret0, _ := ret[0].(*types2.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountByPeerID indicates an expected call of GetAccountByPeerID. +func (mr *MockStoreMockRecorder) GetAccountByPeerID(ctx, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountByPeerID", reflect.TypeOf((*MockStore)(nil).GetAccountByPeerID), ctx, peerID) +} + +// GetAccountByPeerPubKey mocks base method. +func (m *MockStore) GetAccountByPeerPubKey(ctx context.Context, peerKey string) (*types2.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountByPeerPubKey", ctx, peerKey) + ret0, _ := ret[0].(*types2.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountByPeerPubKey indicates an expected call of GetAccountByPeerPubKey. +func (mr *MockStoreMockRecorder) GetAccountByPeerPubKey(ctx, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountByPeerPubKey", reflect.TypeOf((*MockStore)(nil).GetAccountByPeerPubKey), ctx, peerKey) +} + +// GetAccountByPrivateDomain mocks base method. +func (m *MockStore) GetAccountByPrivateDomain(ctx context.Context, domain string) (*types2.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountByPrivateDomain", ctx, domain) + ret0, _ := ret[0].(*types2.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountByPrivateDomain indicates an expected call of GetAccountByPrivateDomain. +func (mr *MockStoreMockRecorder) GetAccountByPrivateDomain(ctx, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountByPrivateDomain", reflect.TypeOf((*MockStore)(nil).GetAccountByPrivateDomain), ctx, domain) +} + +// GetAccountBySetupKey mocks base method. +func (m *MockStore) GetAccountBySetupKey(ctx context.Context, setupKey string) (*types2.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountBySetupKey", ctx, setupKey) + ret0, _ := ret[0].(*types2.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountBySetupKey indicates an expected call of GetAccountBySetupKey. +func (mr *MockStoreMockRecorder) GetAccountBySetupKey(ctx, setupKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountBySetupKey", reflect.TypeOf((*MockStore)(nil).GetAccountBySetupKey), ctx, setupKey) +} + +// GetAccountByUser mocks base method. +func (m *MockStore) GetAccountByUser(ctx context.Context, userID string) (*types2.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountByUser", ctx, userID) + ret0, _ := ret[0].(*types2.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountByUser indicates an expected call of GetAccountByUser. +func (mr *MockStoreMockRecorder) GetAccountByUser(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountByUser", reflect.TypeOf((*MockStore)(nil).GetAccountByUser), ctx, userID) +} + +// GetAccountCreatedBy mocks base method. +func (m *MockStore) GetAccountCreatedBy(ctx context.Context, lockStrength LockingStrength, accountID string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountCreatedBy", ctx, lockStrength, accountID) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountCreatedBy indicates an expected call of GetAccountCreatedBy. +func (mr *MockStoreMockRecorder) GetAccountCreatedBy(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountCreatedBy", reflect.TypeOf((*MockStore)(nil).GetAccountCreatedBy), ctx, lockStrength, accountID) +} + +// GetAccountDNSSettings mocks base method. +func (m *MockStore) GetAccountDNSSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types2.DNSSettings, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountDNSSettings", ctx, lockStrength, accountID) + ret0, _ := ret[0].(*types2.DNSSettings) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountDNSSettings indicates an expected call of GetAccountDNSSettings. +func (mr *MockStoreMockRecorder) GetAccountDNSSettings(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountDNSSettings", reflect.TypeOf((*MockStore)(nil).GetAccountDNSSettings), ctx, lockStrength, accountID) +} + +// GetAccountDomainAndCategory mocks base method. +func (m *MockStore) GetAccountDomainAndCategory(ctx context.Context, lockStrength LockingStrength, accountID string) (string, string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountDomainAndCategory", ctx, lockStrength, accountID) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetAccountDomainAndCategory indicates an expected call of GetAccountDomainAndCategory. +func (mr *MockStoreMockRecorder) GetAccountDomainAndCategory(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountDomainAndCategory", reflect.TypeOf((*MockStore)(nil).GetAccountDomainAndCategory), ctx, lockStrength, accountID) +} + +// GetAccountGroupPeers mocks base method. +func (m *MockStore) GetAccountGroupPeers(ctx context.Context, lockStrength LockingStrength, accountID string) (map[string]map[string]struct{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountGroupPeers", ctx, lockStrength, accountID) + ret0, _ := ret[0].(map[string]map[string]struct{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountGroupPeers indicates an expected call of GetAccountGroupPeers. +func (mr *MockStoreMockRecorder) GetAccountGroupPeers(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountGroupPeers", reflect.TypeOf((*MockStore)(nil).GetAccountGroupPeers), ctx, lockStrength, accountID) +} + +// GetAccountGroups mocks base method. +func (m *MockStore) GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types2.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountGroups", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types2.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountGroups indicates an expected call of GetAccountGroups. +func (mr *MockStoreMockRecorder) GetAccountGroups(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountGroups", reflect.TypeOf((*MockStore)(nil).GetAccountGroups), ctx, lockStrength, accountID) +} + +// GetAccountIDByPeerID mocks base method. +func (m *MockStore) GetAccountIDByPeerID(ctx context.Context, lockStrength LockingStrength, peerID string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDByPeerID", ctx, lockStrength, peerID) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountIDByPeerID indicates an expected call of GetAccountIDByPeerID. +func (mr *MockStoreMockRecorder) GetAccountIDByPeerID(ctx, lockStrength, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDByPeerID", reflect.TypeOf((*MockStore)(nil).GetAccountIDByPeerID), ctx, lockStrength, peerID) +} + +// GetAccountIDByPeerPubKey mocks base method. +func (m *MockStore) GetAccountIDByPeerPubKey(ctx context.Context, peerKey string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDByPeerPubKey", ctx, peerKey) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountIDByPeerPubKey indicates an expected call of GetAccountIDByPeerPubKey. +func (mr *MockStoreMockRecorder) GetAccountIDByPeerPubKey(ctx, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDByPeerPubKey", reflect.TypeOf((*MockStore)(nil).GetAccountIDByPeerPubKey), ctx, peerKey) +} + +// GetAccountIDByPrivateDomain mocks base method. +func (m *MockStore) GetAccountIDByPrivateDomain(ctx context.Context, lockStrength LockingStrength, domain string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDByPrivateDomain", ctx, lockStrength, domain) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountIDByPrivateDomain indicates an expected call of GetAccountIDByPrivateDomain. +func (mr *MockStoreMockRecorder) GetAccountIDByPrivateDomain(ctx, lockStrength, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDByPrivateDomain", reflect.TypeOf((*MockStore)(nil).GetAccountIDByPrivateDomain), ctx, lockStrength, domain) +} + +// GetAccountIDBySetupKey mocks base method. +func (m *MockStore) GetAccountIDBySetupKey(ctx context.Context, peerKey string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDBySetupKey", ctx, peerKey) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountIDBySetupKey indicates an expected call of GetAccountIDBySetupKey. +func (mr *MockStoreMockRecorder) GetAccountIDBySetupKey(ctx, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDBySetupKey", reflect.TypeOf((*MockStore)(nil).GetAccountIDBySetupKey), ctx, peerKey) +} + +// GetAccountIDByUserID mocks base method. +func (m *MockStore) GetAccountIDByUserID(ctx context.Context, lockStrength LockingStrength, userID string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDByUserID", ctx, lockStrength, userID) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountIDByUserID indicates an expected call of GetAccountIDByUserID. +func (mr *MockStoreMockRecorder) GetAccountIDByUserID(ctx, lockStrength, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDByUserID", reflect.TypeOf((*MockStore)(nil).GetAccountIDByUserID), ctx, lockStrength, userID) +} + +// GetAccountMeta mocks base method. +func (m *MockStore) GetAccountMeta(ctx context.Context, lockStrength LockingStrength, accountID string) (*types2.AccountMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountMeta", ctx, lockStrength, accountID) + ret0, _ := ret[0].(*types2.AccountMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountMeta indicates an expected call of GetAccountMeta. +func (mr *MockStoreMockRecorder) GetAccountMeta(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountMeta", reflect.TypeOf((*MockStore)(nil).GetAccountMeta), ctx, lockStrength, accountID) +} + +// GetAccountNameServerGroups mocks base method. +func (m *MockStore) GetAccountNameServerGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*dns.NameServerGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountNameServerGroups", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*dns.NameServerGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountNameServerGroups indicates an expected call of GetAccountNameServerGroups. +func (mr *MockStoreMockRecorder) GetAccountNameServerGroups(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountNameServerGroups", reflect.TypeOf((*MockStore)(nil).GetAccountNameServerGroups), ctx, lockStrength, accountID) +} + +// GetAccountNetwork mocks base method. +func (m *MockStore) GetAccountNetwork(ctx context.Context, lockStrength LockingStrength, accountId string) (*types2.Network, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountNetwork", ctx, lockStrength, accountId) + ret0, _ := ret[0].(*types2.Network) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountNetwork indicates an expected call of GetAccountNetwork. +func (mr *MockStoreMockRecorder) GetAccountNetwork(ctx, lockStrength, accountId interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountNetwork", reflect.TypeOf((*MockStore)(nil).GetAccountNetwork), ctx, lockStrength, accountId) +} + +// GetAccountNetworks mocks base method. +func (m *MockStore) GetAccountNetworks(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types1.Network, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountNetworks", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types1.Network) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountNetworks indicates an expected call of GetAccountNetworks. +func (mr *MockStoreMockRecorder) GetAccountNetworks(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountNetworks", reflect.TypeOf((*MockStore)(nil).GetAccountNetworks), ctx, lockStrength, accountID) +} + +// GetAccountOnboarding mocks base method. +func (m *MockStore) GetAccountOnboarding(ctx context.Context, accountID string) (*types2.AccountOnboarding, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountOnboarding", ctx, accountID) + ret0, _ := ret[0].(*types2.AccountOnboarding) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountOnboarding indicates an expected call of GetAccountOnboarding. +func (mr *MockStoreMockRecorder) GetAccountOnboarding(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountOnboarding", reflect.TypeOf((*MockStore)(nil).GetAccountOnboarding), ctx, accountID) +} + +// GetAccountOwner mocks base method. +func (m *MockStore) GetAccountOwner(ctx context.Context, lockStrength LockingStrength, accountID string) (*types2.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountOwner", ctx, lockStrength, accountID) + ret0, _ := ret[0].(*types2.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountOwner indicates an expected call of GetAccountOwner. +func (mr *MockStoreMockRecorder) GetAccountOwner(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountOwner", reflect.TypeOf((*MockStore)(nil).GetAccountOwner), ctx, lockStrength, accountID) +} + +// GetAccountPeers mocks base method. +func (m *MockStore) GetAccountPeers(ctx context.Context, lockStrength LockingStrength, accountID, nameFilter, ipFilter string) ([]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountPeers", ctx, lockStrength, accountID, nameFilter, ipFilter) + ret0, _ := ret[0].([]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountPeers indicates an expected call of GetAccountPeers. +func (mr *MockStoreMockRecorder) GetAccountPeers(ctx, lockStrength, accountID, nameFilter, ipFilter interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPeers", reflect.TypeOf((*MockStore)(nil).GetAccountPeers), ctx, lockStrength, accountID, nameFilter, ipFilter) +} + +// GetAccountPeersWithExpiration mocks base method. +func (m *MockStore) GetAccountPeersWithExpiration(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountPeersWithExpiration", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountPeersWithExpiration indicates an expected call of GetAccountPeersWithExpiration. +func (mr *MockStoreMockRecorder) GetAccountPeersWithExpiration(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPeersWithExpiration", reflect.TypeOf((*MockStore)(nil).GetAccountPeersWithExpiration), ctx, lockStrength, accountID) +} + +// GetAccountPeersWithInactivity mocks base method. +func (m *MockStore) GetAccountPeersWithInactivity(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountPeersWithInactivity", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountPeersWithInactivity indicates an expected call of GetAccountPeersWithInactivity. +func (mr *MockStoreMockRecorder) GetAccountPeersWithInactivity(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPeersWithInactivity", reflect.TypeOf((*MockStore)(nil).GetAccountPeersWithInactivity), ctx, lockStrength, accountID) +} + +// GetAccountPolicies mocks base method. +func (m *MockStore) GetAccountPolicies(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types2.Policy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountPolicies", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types2.Policy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountPolicies indicates an expected call of GetAccountPolicies. +func (mr *MockStoreMockRecorder) GetAccountPolicies(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPolicies", reflect.TypeOf((*MockStore)(nil).GetAccountPolicies), ctx, lockStrength, accountID) +} + +// GetAccountPostureChecks mocks base method. +func (m *MockStore) GetAccountPostureChecks(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountPostureChecks", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountPostureChecks indicates an expected call of GetAccountPostureChecks. +func (mr *MockStoreMockRecorder) GetAccountPostureChecks(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPostureChecks", reflect.TypeOf((*MockStore)(nil).GetAccountPostureChecks), ctx, lockStrength, accountID) +} + +// GetAccountRoutes mocks base method. +func (m *MockStore) GetAccountRoutes(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*route.Route, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountRoutes", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*route.Route) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountRoutes indicates an expected call of GetAccountRoutes. +func (mr *MockStoreMockRecorder) GetAccountRoutes(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountRoutes", reflect.TypeOf((*MockStore)(nil).GetAccountRoutes), ctx, lockStrength, accountID) +} + +// GetAccountServices mocks base method. +func (m *MockStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountServices", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*reverseproxy.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountServices indicates an expected call of GetAccountServices. +func (mr *MockStoreMockRecorder) GetAccountServices(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountServices", reflect.TypeOf((*MockStore)(nil).GetAccountServices), ctx, lockStrength, accountID) +} + +// GetAccountSettings mocks base method. +func (m *MockStore) GetAccountSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types2.Settings, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountSettings", ctx, lockStrength, accountID) + ret0, _ := ret[0].(*types2.Settings) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountSettings indicates an expected call of GetAccountSettings. +func (mr *MockStoreMockRecorder) GetAccountSettings(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountSettings", reflect.TypeOf((*MockStore)(nil).GetAccountSettings), ctx, lockStrength, accountID) +} + +// GetAccountSetupKeys mocks base method. +func (m *MockStore) GetAccountSetupKeys(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types2.SetupKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountSetupKeys", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types2.SetupKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountSetupKeys indicates an expected call of GetAccountSetupKeys. +func (mr *MockStoreMockRecorder) GetAccountSetupKeys(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountSetupKeys", reflect.TypeOf((*MockStore)(nil).GetAccountSetupKeys), ctx, lockStrength, accountID) +} + +// GetAccountUserInvites mocks base method. +func (m *MockStore) GetAccountUserInvites(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types2.UserInviteRecord, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountUserInvites", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types2.UserInviteRecord) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountUserInvites indicates an expected call of GetAccountUserInvites. +func (mr *MockStoreMockRecorder) GetAccountUserInvites(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountUserInvites", reflect.TypeOf((*MockStore)(nil).GetAccountUserInvites), ctx, lockStrength, accountID) +} + +// GetAccountUsers mocks base method. +func (m *MockStore) GetAccountUsers(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types2.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountUsers", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types2.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountUsers indicates an expected call of GetAccountUsers. +func (mr *MockStoreMockRecorder) GetAccountUsers(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountUsers", reflect.TypeOf((*MockStore)(nil).GetAccountUsers), ctx, lockStrength, accountID) +} + +// GetAccountZones mocks base method. +func (m *MockStore) GetAccountZones(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*zones.Zone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountZones", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*zones.Zone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountZones indicates an expected call of GetAccountZones. +func (mr *MockStoreMockRecorder) GetAccountZones(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountZones", reflect.TypeOf((*MockStore)(nil).GetAccountZones), ctx, lockStrength, accountID) +} + +// GetAccountsCounter mocks base method. +func (m *MockStore) GetAccountsCounter(ctx context.Context) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountsCounter", ctx) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountsCounter indicates an expected call of GetAccountsCounter. +func (mr *MockStoreMockRecorder) GetAccountsCounter(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountsCounter", reflect.TypeOf((*MockStore)(nil).GetAccountsCounter), ctx) +} + +// GetAllAccounts mocks base method. +func (m *MockStore) GetAllAccounts(ctx context.Context) []*types2.Account { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllAccounts", ctx) + ret0, _ := ret[0].([]*types2.Account) + return ret0 +} + +// GetAllAccounts indicates an expected call of GetAllAccounts. +func (mr *MockStoreMockRecorder) GetAllAccounts(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllAccounts", reflect.TypeOf((*MockStore)(nil).GetAllAccounts), ctx) +} + +// GetAllEphemeralPeers mocks base method. +func (m *MockStore) GetAllEphemeralPeers(ctx context.Context, lockStrength LockingStrength) ([]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllEphemeralPeers", ctx, lockStrength) + ret0, _ := ret[0].([]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllEphemeralPeers indicates an expected call of GetAllEphemeralPeers. +func (mr *MockStoreMockRecorder) GetAllEphemeralPeers(ctx, lockStrength interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllEphemeralPeers", reflect.TypeOf((*MockStore)(nil).GetAllEphemeralPeers), ctx, lockStrength) +} + +// GetAllProxyAccessTokens mocks base method. +func (m *MockStore) GetAllProxyAccessTokens(ctx context.Context, lockStrength LockingStrength) ([]*types2.ProxyAccessToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllProxyAccessTokens", ctx, lockStrength) + ret0, _ := ret[0].([]*types2.ProxyAccessToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllProxyAccessTokens indicates an expected call of GetAllProxyAccessTokens. +func (mr *MockStoreMockRecorder) GetAllProxyAccessTokens(ctx, lockStrength interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllProxyAccessTokens", reflect.TypeOf((*MockStore)(nil).GetAllProxyAccessTokens), ctx, lockStrength) +} + +// GetAnyAccountID mocks base method. +func (m *MockStore) GetAnyAccountID(ctx context.Context) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAnyAccountID", ctx) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAnyAccountID indicates an expected call of GetAnyAccountID. +func (mr *MockStoreMockRecorder) GetAnyAccountID(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnyAccountID", reflect.TypeOf((*MockStore)(nil).GetAnyAccountID), ctx) +} + +// GetCustomDomain mocks base method. +func (m *MockStore) GetCustomDomain(ctx context.Context, accountID, domainID string) (*domain.Domain, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCustomDomain", ctx, accountID, domainID) + ret0, _ := ret[0].(*domain.Domain) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCustomDomain indicates an expected call of GetCustomDomain. +func (mr *MockStoreMockRecorder) GetCustomDomain(ctx, accountID, domainID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomain", reflect.TypeOf((*MockStore)(nil).GetCustomDomain), ctx, accountID, domainID) +} + +// GetDNSRecordByID mocks base method. +func (m *MockStore) GetDNSRecordByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, recordID string) (*records.Record, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDNSRecordByID", ctx, lockStrength, accountID, zoneID, recordID) + ret0, _ := ret[0].(*records.Record) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDNSRecordByID indicates an expected call of GetDNSRecordByID. +func (mr *MockStoreMockRecorder) GetDNSRecordByID(ctx, lockStrength, accountID, zoneID, recordID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDNSRecordByID", reflect.TypeOf((*MockStore)(nil).GetDNSRecordByID), ctx, lockStrength, accountID, zoneID, recordID) +} + +// GetGroupByID mocks base method. +func (m *MockStore) GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types2.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroupByID", ctx, lockStrength, accountID, groupID) + ret0, _ := ret[0].(*types2.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroupByID indicates an expected call of GetGroupByID. +func (mr *MockStoreMockRecorder) GetGroupByID(ctx, lockStrength, accountID, groupID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByID", reflect.TypeOf((*MockStore)(nil).GetGroupByID), ctx, lockStrength, accountID, groupID) +} + +// GetGroupByName mocks base method. +func (m *MockStore) GetGroupByName(ctx context.Context, lockStrength LockingStrength, groupName, accountID string) (*types2.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroupByName", ctx, lockStrength, groupName, accountID) + ret0, _ := ret[0].(*types2.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroupByName indicates an expected call of GetGroupByName. +func (mr *MockStoreMockRecorder) GetGroupByName(ctx, lockStrength, groupName, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockStore)(nil).GetGroupByName), ctx, lockStrength, groupName, accountID) +} + +// GetGroupsByIDs mocks base method. +func (m *MockStore) GetGroupsByIDs(ctx context.Context, lockStrength LockingStrength, accountID string, groupIDs []string) (map[string]*types2.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroupsByIDs", ctx, lockStrength, accountID, groupIDs) + ret0, _ := ret[0].(map[string]*types2.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroupsByIDs indicates an expected call of GetGroupsByIDs. +func (mr *MockStoreMockRecorder) GetGroupsByIDs(ctx, lockStrength, accountID, groupIDs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupsByIDs", reflect.TypeOf((*MockStore)(nil).GetGroupsByIDs), ctx, lockStrength, accountID, groupIDs) +} + +// GetInstallationID mocks base method. +func (m *MockStore) GetInstallationID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInstallationID") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetInstallationID indicates an expected call of GetInstallationID. +func (mr *MockStoreMockRecorder) GetInstallationID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstallationID", reflect.TypeOf((*MockStore)(nil).GetInstallationID)) +} + +// GetNameServerGroupByID mocks base method. +func (m *MockStore) GetNameServerGroupByID(ctx context.Context, lockStrength LockingStrength, nameServerGroupID, accountID string) (*dns.NameServerGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNameServerGroupByID", ctx, lockStrength, nameServerGroupID, accountID) + ret0, _ := ret[0].(*dns.NameServerGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNameServerGroupByID indicates an expected call of GetNameServerGroupByID. +func (mr *MockStoreMockRecorder) GetNameServerGroupByID(ctx, lockStrength, nameServerGroupID, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNameServerGroupByID", reflect.TypeOf((*MockStore)(nil).GetNameServerGroupByID), ctx, lockStrength, nameServerGroupID, accountID) +} + +// GetNetworkByID mocks base method. +func (m *MockStore) GetNetworkByID(ctx context.Context, lockStrength LockingStrength, accountID, networkID string) (*types1.Network, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkByID", ctx, lockStrength, accountID, networkID) + ret0, _ := ret[0].(*types1.Network) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkByID indicates an expected call of GetNetworkByID. +func (mr *MockStoreMockRecorder) GetNetworkByID(ctx, lockStrength, accountID, networkID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkByID", reflect.TypeOf((*MockStore)(nil).GetNetworkByID), ctx, lockStrength, accountID, networkID) +} + +// GetNetworkResourceByID mocks base method. +func (m *MockStore) GetNetworkResourceByID(ctx context.Context, lockStrength LockingStrength, accountID, resourceID string) (*types.NetworkResource, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkResourceByID", ctx, lockStrength, accountID, resourceID) + ret0, _ := ret[0].(*types.NetworkResource) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkResourceByID indicates an expected call of GetNetworkResourceByID. +func (mr *MockStoreMockRecorder) GetNetworkResourceByID(ctx, lockStrength, accountID, resourceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkResourceByID", reflect.TypeOf((*MockStore)(nil).GetNetworkResourceByID), ctx, lockStrength, accountID, resourceID) +} + +// GetNetworkResourceByName mocks base method. +func (m *MockStore) GetNetworkResourceByName(ctx context.Context, lockStrength LockingStrength, accountID, resourceName string) (*types.NetworkResource, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkResourceByName", ctx, lockStrength, accountID, resourceName) + ret0, _ := ret[0].(*types.NetworkResource) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkResourceByName indicates an expected call of GetNetworkResourceByName. +func (mr *MockStoreMockRecorder) GetNetworkResourceByName(ctx, lockStrength, accountID, resourceName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkResourceByName", reflect.TypeOf((*MockStore)(nil).GetNetworkResourceByName), ctx, lockStrength, accountID, resourceName) +} + +// GetNetworkResourcesByAccountID mocks base method. +func (m *MockStore) GetNetworkResourcesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.NetworkResource, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkResourcesByAccountID", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types.NetworkResource) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkResourcesByAccountID indicates an expected call of GetNetworkResourcesByAccountID. +func (mr *MockStoreMockRecorder) GetNetworkResourcesByAccountID(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkResourcesByAccountID", reflect.TypeOf((*MockStore)(nil).GetNetworkResourcesByAccountID), ctx, lockStrength, accountID) +} + +// GetNetworkResourcesByNetID mocks base method. +func (m *MockStore) GetNetworkResourcesByNetID(ctx context.Context, lockStrength LockingStrength, accountID, netID string) ([]*types.NetworkResource, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkResourcesByNetID", ctx, lockStrength, accountID, netID) + ret0, _ := ret[0].([]*types.NetworkResource) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkResourcesByNetID indicates an expected call of GetNetworkResourcesByNetID. +func (mr *MockStoreMockRecorder) GetNetworkResourcesByNetID(ctx, lockStrength, accountID, netID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkResourcesByNetID", reflect.TypeOf((*MockStore)(nil).GetNetworkResourcesByNetID), ctx, lockStrength, accountID, netID) +} + +// GetNetworkRouterByID mocks base method. +func (m *MockStore) GetNetworkRouterByID(ctx context.Context, lockStrength LockingStrength, accountID, routerID string) (*types0.NetworkRouter, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkRouterByID", ctx, lockStrength, accountID, routerID) + ret0, _ := ret[0].(*types0.NetworkRouter) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkRouterByID indicates an expected call of GetNetworkRouterByID. +func (mr *MockStoreMockRecorder) GetNetworkRouterByID(ctx, lockStrength, accountID, routerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkRouterByID", reflect.TypeOf((*MockStore)(nil).GetNetworkRouterByID), ctx, lockStrength, accountID, routerID) +} + +// GetNetworkRoutersByAccountID mocks base method. +func (m *MockStore) GetNetworkRoutersByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types0.NetworkRouter, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkRoutersByAccountID", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*types0.NetworkRouter) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkRoutersByAccountID indicates an expected call of GetNetworkRoutersByAccountID. +func (mr *MockStoreMockRecorder) GetNetworkRoutersByAccountID(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkRoutersByAccountID", reflect.TypeOf((*MockStore)(nil).GetNetworkRoutersByAccountID), ctx, lockStrength, accountID) +} + +// GetNetworkRoutersByNetID mocks base method. +func (m *MockStore) GetNetworkRoutersByNetID(ctx context.Context, lockStrength LockingStrength, accountID, netID string) ([]*types0.NetworkRouter, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkRoutersByNetID", ctx, lockStrength, accountID, netID) + ret0, _ := ret[0].([]*types0.NetworkRouter) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkRoutersByNetID indicates an expected call of GetNetworkRoutersByNetID. +func (mr *MockStoreMockRecorder) GetNetworkRoutersByNetID(ctx, lockStrength, accountID, netID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkRoutersByNetID", reflect.TypeOf((*MockStore)(nil).GetNetworkRoutersByNetID), ctx, lockStrength, accountID, netID) +} + +// GetPATByHashedToken mocks base method. +func (m *MockStore) GetPATByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types2.PersonalAccessToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPATByHashedToken", ctx, lockStrength, hashedToken) + ret0, _ := ret[0].(*types2.PersonalAccessToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPATByHashedToken indicates an expected call of GetPATByHashedToken. +func (mr *MockStoreMockRecorder) GetPATByHashedToken(ctx, lockStrength, hashedToken interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPATByHashedToken", reflect.TypeOf((*MockStore)(nil).GetPATByHashedToken), ctx, lockStrength, hashedToken) +} + +// GetPATByID mocks base method. +func (m *MockStore) GetPATByID(ctx context.Context, lockStrength LockingStrength, userID, patID string) (*types2.PersonalAccessToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPATByID", ctx, lockStrength, userID, patID) + ret0, _ := ret[0].(*types2.PersonalAccessToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPATByID indicates an expected call of GetPATByID. +func (mr *MockStoreMockRecorder) GetPATByID(ctx, lockStrength, userID, patID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPATByID", reflect.TypeOf((*MockStore)(nil).GetPATByID), ctx, lockStrength, userID, patID) +} + +// GetPeerByID mocks base method. +func (m *MockStore) GetPeerByID(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerByID", ctx, lockStrength, accountID, peerID) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerByID indicates an expected call of GetPeerByID. +func (mr *MockStoreMockRecorder) GetPeerByID(ctx, lockStrength, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerByID", reflect.TypeOf((*MockStore)(nil).GetPeerByID), ctx, lockStrength, accountID, peerID) +} + +// GetPeerByIP mocks base method. +func (m *MockStore) GetPeerByIP(ctx context.Context, lockStrength LockingStrength, accountID string, ip net.IP) (*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerByIP", ctx, lockStrength, accountID, ip) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerByIP indicates an expected call of GetPeerByIP. +func (mr *MockStoreMockRecorder) GetPeerByIP(ctx, lockStrength, accountID, ip interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerByIP", reflect.TypeOf((*MockStore)(nil).GetPeerByIP), ctx, lockStrength, accountID, ip) +} + +// GetPeerByPeerPubKey mocks base method. +func (m *MockStore) GetPeerByPeerPubKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerByPeerPubKey", ctx, lockStrength, peerKey) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerByPeerPubKey indicates an expected call of GetPeerByPeerPubKey. +func (mr *MockStoreMockRecorder) GetPeerByPeerPubKey(ctx, lockStrength, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerByPeerPubKey", reflect.TypeOf((*MockStore)(nil).GetPeerByPeerPubKey), ctx, lockStrength, peerKey) +} + +// GetPeerGroupIDs mocks base method. +func (m *MockStore) GetPeerGroupIDs(ctx context.Context, lockStrength LockingStrength, accountId, peerId string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerGroupIDs", ctx, lockStrength, accountId, peerId) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerGroupIDs indicates an expected call of GetPeerGroupIDs. +func (mr *MockStoreMockRecorder) GetPeerGroupIDs(ctx, lockStrength, accountId, peerId interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerGroupIDs", reflect.TypeOf((*MockStore)(nil).GetPeerGroupIDs), ctx, lockStrength, accountId, peerId) +} + +// GetPeerGroups mocks base method. +func (m *MockStore) GetPeerGroups(ctx context.Context, lockStrength LockingStrength, accountId, peerId string) ([]*types2.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerGroups", ctx, lockStrength, accountId, peerId) + ret0, _ := ret[0].([]*types2.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerGroups indicates an expected call of GetPeerGroups. +func (mr *MockStoreMockRecorder) GetPeerGroups(ctx, lockStrength, accountId, peerId interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerGroups", reflect.TypeOf((*MockStore)(nil).GetPeerGroups), ctx, lockStrength, accountId, peerId) +} + +// GetPeerIDByKey mocks base method. +func (m *MockStore) GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerIDByKey", ctx, lockStrength, key) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerIDByKey indicates an expected call of GetPeerIDByKey. +func (mr *MockStoreMockRecorder) GetPeerIDByKey(ctx, lockStrength, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerIDByKey", reflect.TypeOf((*MockStore)(nil).GetPeerIDByKey), ctx, lockStrength, key) +} + +// GetPeerIdByLabel mocks base method. +func (m *MockStore) GetPeerIdByLabel(ctx context.Context, lockStrength LockingStrength, accountID, hostname string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerIdByLabel", ctx, lockStrength, accountID, hostname) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerIdByLabel indicates an expected call of GetPeerIdByLabel. +func (mr *MockStoreMockRecorder) GetPeerIdByLabel(ctx, lockStrength, accountID, hostname interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerIdByLabel", reflect.TypeOf((*MockStore)(nil).GetPeerIdByLabel), ctx, lockStrength, accountID, hostname) +} + +// GetPeerJobByID mocks base method. +func (m *MockStore) GetPeerJobByID(ctx context.Context, accountID, jobID string) (*types2.Job, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerJobByID", ctx, accountID, jobID) + ret0, _ := ret[0].(*types2.Job) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerJobByID indicates an expected call of GetPeerJobByID. +func (mr *MockStoreMockRecorder) GetPeerJobByID(ctx, accountID, jobID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerJobByID", reflect.TypeOf((*MockStore)(nil).GetPeerJobByID), ctx, accountID, jobID) +} + +// GetPeerJobs mocks base method. +func (m *MockStore) GetPeerJobs(ctx context.Context, accountID, peerID string) ([]*types2.Job, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerJobs", ctx, accountID, peerID) + ret0, _ := ret[0].([]*types2.Job) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerJobs indicates an expected call of GetPeerJobs. +func (mr *MockStoreMockRecorder) GetPeerJobs(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerJobs", reflect.TypeOf((*MockStore)(nil).GetPeerJobs), ctx, accountID, peerID) +} + +// GetPeerLabelsInAccount mocks base method. +func (m *MockStore) GetPeerLabelsInAccount(ctx context.Context, lockStrength LockingStrength, accountId, hostname string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerLabelsInAccount", ctx, lockStrength, accountId, hostname) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerLabelsInAccount indicates an expected call of GetPeerLabelsInAccount. +func (mr *MockStoreMockRecorder) GetPeerLabelsInAccount(ctx, lockStrength, accountId, hostname interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerLabelsInAccount", reflect.TypeOf((*MockStore)(nil).GetPeerLabelsInAccount), ctx, lockStrength, accountId, hostname) +} + +// GetPeersByGroupIDs mocks base method. +func (m *MockStore) GetPeersByGroupIDs(ctx context.Context, accountID string, groupIDs []string) ([]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeersByGroupIDs", ctx, accountID, groupIDs) + ret0, _ := ret[0].([]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeersByGroupIDs indicates an expected call of GetPeersByGroupIDs. +func (mr *MockStoreMockRecorder) GetPeersByGroupIDs(ctx, accountID, groupIDs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeersByGroupIDs", reflect.TypeOf((*MockStore)(nil).GetPeersByGroupIDs), ctx, accountID, groupIDs) +} + +// GetPeersByIDs mocks base method. +func (m *MockStore) GetPeersByIDs(ctx context.Context, lockStrength LockingStrength, accountID string, peerIDs []string) (map[string]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeersByIDs", ctx, lockStrength, accountID, peerIDs) + ret0, _ := ret[0].(map[string]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeersByIDs indicates an expected call of GetPeersByIDs. +func (mr *MockStoreMockRecorder) GetPeersByIDs(ctx, lockStrength, accountID, peerIDs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeersByIDs", reflect.TypeOf((*MockStore)(nil).GetPeersByIDs), ctx, lockStrength, accountID, peerIDs) +} + +// GetPolicyByID mocks base method. +func (m *MockStore) GetPolicyByID(ctx context.Context, lockStrength LockingStrength, accountID, policyID string) (*types2.Policy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPolicyByID", ctx, lockStrength, accountID, policyID) + ret0, _ := ret[0].(*types2.Policy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPolicyByID indicates an expected call of GetPolicyByID. +func (mr *MockStoreMockRecorder) GetPolicyByID(ctx, lockStrength, accountID, policyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicyByID", reflect.TypeOf((*MockStore)(nil).GetPolicyByID), ctx, lockStrength, accountID, policyID) +} + +// GetPolicyRulesByResourceID mocks base method. +func (m *MockStore) GetPolicyRulesByResourceID(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) ([]*types2.PolicyRule, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPolicyRulesByResourceID", ctx, lockStrength, accountID, peerID) + ret0, _ := ret[0].([]*types2.PolicyRule) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPolicyRulesByResourceID indicates an expected call of GetPolicyRulesByResourceID. +func (mr *MockStoreMockRecorder) GetPolicyRulesByResourceID(ctx, lockStrength, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicyRulesByResourceID", reflect.TypeOf((*MockStore)(nil).GetPolicyRulesByResourceID), ctx, lockStrength, accountID, peerID) +} + +// GetPostureCheckByChecksDefinition mocks base method. +func (m *MockStore) GetPostureCheckByChecksDefinition(accountID string, checks *posture.ChecksDefinition) (*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPostureCheckByChecksDefinition", accountID, checks) + ret0, _ := ret[0].(*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPostureCheckByChecksDefinition indicates an expected call of GetPostureCheckByChecksDefinition. +func (mr *MockStoreMockRecorder) GetPostureCheckByChecksDefinition(accountID, checks interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPostureCheckByChecksDefinition", reflect.TypeOf((*MockStore)(nil).GetPostureCheckByChecksDefinition), accountID, checks) +} + +// GetPostureChecksByID mocks base method. +func (m *MockStore) GetPostureChecksByID(ctx context.Context, lockStrength LockingStrength, accountID, postureCheckID string) (*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPostureChecksByID", ctx, lockStrength, accountID, postureCheckID) + ret0, _ := ret[0].(*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPostureChecksByID indicates an expected call of GetPostureChecksByID. +func (mr *MockStoreMockRecorder) GetPostureChecksByID(ctx, lockStrength, accountID, postureCheckID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPostureChecksByID", reflect.TypeOf((*MockStore)(nil).GetPostureChecksByID), ctx, lockStrength, accountID, postureCheckID) +} + +// GetPostureChecksByIDs mocks base method. +func (m *MockStore) GetPostureChecksByIDs(ctx context.Context, lockStrength LockingStrength, accountID string, postureChecksIDs []string) (map[string]*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPostureChecksByIDs", ctx, lockStrength, accountID, postureChecksIDs) + ret0, _ := ret[0].(map[string]*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPostureChecksByIDs indicates an expected call of GetPostureChecksByIDs. +func (mr *MockStoreMockRecorder) GetPostureChecksByIDs(ctx, lockStrength, accountID, postureChecksIDs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPostureChecksByIDs", reflect.TypeOf((*MockStore)(nil).GetPostureChecksByIDs), ctx, lockStrength, accountID, postureChecksIDs) +} + +// GetProxyAccessTokenByHashedToken mocks base method. +func (m *MockStore) GetProxyAccessTokenByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken types2.HashedProxyToken) (*types2.ProxyAccessToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProxyAccessTokenByHashedToken", ctx, lockStrength, hashedToken) + ret0, _ := ret[0].(*types2.ProxyAccessToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProxyAccessTokenByHashedToken indicates an expected call of GetProxyAccessTokenByHashedToken. +func (mr *MockStoreMockRecorder) GetProxyAccessTokenByHashedToken(ctx, lockStrength, hashedToken interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProxyAccessTokenByHashedToken", reflect.TypeOf((*MockStore)(nil).GetProxyAccessTokenByHashedToken), ctx, lockStrength, hashedToken) +} + +// GetResourceGroups mocks base method. +func (m *MockStore) GetResourceGroups(ctx context.Context, lockStrength LockingStrength, accountID, resourceID string) ([]*types2.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResourceGroups", ctx, lockStrength, accountID, resourceID) + ret0, _ := ret[0].([]*types2.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetResourceGroups indicates an expected call of GetResourceGroups. +func (mr *MockStoreMockRecorder) GetResourceGroups(ctx, lockStrength, accountID, resourceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourceGroups", reflect.TypeOf((*MockStore)(nil).GetResourceGroups), ctx, lockStrength, accountID, resourceID) +} + +// GetRouteByID mocks base method. +func (m *MockStore) GetRouteByID(ctx context.Context, lockStrength LockingStrength, accountID, routeID string) (*route.Route, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRouteByID", ctx, lockStrength, accountID, routeID) + ret0, _ := ret[0].(*route.Route) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRouteByID indicates an expected call of GetRouteByID. +func (mr *MockStoreMockRecorder) GetRouteByID(ctx, lockStrength, accountID, routeID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRouteByID", reflect.TypeOf((*MockStore)(nil).GetRouteByID), ctx, lockStrength, accountID, routeID) +} + +// GetServiceByDomain mocks base method. +func (m *MockStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServiceByDomain", ctx, accountID, domain) + ret0, _ := ret[0].(*reverseproxy.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServiceByDomain indicates an expected call of GetServiceByDomain. +func (mr *MockStoreMockRecorder) GetServiceByDomain(ctx, accountID, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceByDomain", reflect.TypeOf((*MockStore)(nil).GetServiceByDomain), ctx, accountID, domain) +} + +// GetServiceByID mocks base method. +func (m *MockStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServiceByID", ctx, lockStrength, accountID, serviceID) + ret0, _ := ret[0].(*reverseproxy.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServiceByID indicates an expected call of GetServiceByID. +func (mr *MockStoreMockRecorder) GetServiceByID(ctx, lockStrength, accountID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceByID", reflect.TypeOf((*MockStore)(nil).GetServiceByID), ctx, lockStrength, accountID, serviceID) +} + +// GetServiceTargetByTargetID mocks base method. +func (m *MockStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID, targetID string) (*reverseproxy.Target, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServiceTargetByTargetID", ctx, lockStrength, accountID, targetID) + ret0, _ := ret[0].(*reverseproxy.Target) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServiceTargetByTargetID indicates an expected call of GetServiceTargetByTargetID. +func (mr *MockStoreMockRecorder) GetServiceTargetByTargetID(ctx, lockStrength, accountID, targetID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceTargetByTargetID", reflect.TypeOf((*MockStore)(nil).GetServiceTargetByTargetID), ctx, lockStrength, accountID, targetID) +} + +// GetServices mocks base method. +func (m *MockStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServices", ctx, lockStrength) + ret0, _ := ret[0].([]*reverseproxy.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServices indicates an expected call of GetServices. +func (mr *MockStoreMockRecorder) GetServices(ctx, lockStrength interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServices", reflect.TypeOf((*MockStore)(nil).GetServices), ctx, lockStrength) +} + +// GetSetupKeyByID mocks base method. +func (m *MockStore) GetSetupKeyByID(ctx context.Context, lockStrength LockingStrength, accountID, setupKeyID string) (*types2.SetupKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSetupKeyByID", ctx, lockStrength, accountID, setupKeyID) + ret0, _ := ret[0].(*types2.SetupKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSetupKeyByID indicates an expected call of GetSetupKeyByID. +func (mr *MockStoreMockRecorder) GetSetupKeyByID(ctx, lockStrength, accountID, setupKeyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSetupKeyByID", reflect.TypeOf((*MockStore)(nil).GetSetupKeyByID), ctx, lockStrength, accountID, setupKeyID) +} + +// GetSetupKeyBySecret mocks base method. +func (m *MockStore) GetSetupKeyBySecret(ctx context.Context, lockStrength LockingStrength, key string) (*types2.SetupKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSetupKeyBySecret", ctx, lockStrength, key) + ret0, _ := ret[0].(*types2.SetupKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSetupKeyBySecret indicates an expected call of GetSetupKeyBySecret. +func (mr *MockStoreMockRecorder) GetSetupKeyBySecret(ctx, lockStrength, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSetupKeyBySecret", reflect.TypeOf((*MockStore)(nil).GetSetupKeyBySecret), ctx, lockStrength, key) +} + +// GetStoreEngine mocks base method. +func (m *MockStore) GetStoreEngine() types2.Engine { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStoreEngine") + ret0, _ := ret[0].(types2.Engine) + return ret0 +} + +// GetStoreEngine indicates an expected call of GetStoreEngine. +func (mr *MockStoreMockRecorder) GetStoreEngine() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStoreEngine", reflect.TypeOf((*MockStore)(nil).GetStoreEngine)) +} + +// GetTakenIPs mocks base method. +func (m *MockStore) GetTakenIPs(ctx context.Context, lockStrength LockingStrength, accountId string) ([]net.IP, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTakenIPs", ctx, lockStrength, accountId) + ret0, _ := ret[0].([]net.IP) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTakenIPs indicates an expected call of GetTakenIPs. +func (mr *MockStoreMockRecorder) GetTakenIPs(ctx, lockStrength, accountId interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTakenIPs", reflect.TypeOf((*MockStore)(nil).GetTakenIPs), ctx, lockStrength, accountId) +} + +// GetTokenIDByHashedToken mocks base method. +func (m *MockStore) GetTokenIDByHashedToken(ctx context.Context, secret string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTokenIDByHashedToken", ctx, secret) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTokenIDByHashedToken indicates an expected call of GetTokenIDByHashedToken. +func (mr *MockStoreMockRecorder) GetTokenIDByHashedToken(ctx, secret interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTokenIDByHashedToken", reflect.TypeOf((*MockStore)(nil).GetTokenIDByHashedToken), ctx, secret) +} + +// GetUserByPATID mocks base method. +func (m *MockStore) GetUserByPATID(ctx context.Context, lockStrength LockingStrength, patID string) (*types2.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserByPATID", ctx, lockStrength, patID) + ret0, _ := ret[0].(*types2.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserByPATID indicates an expected call of GetUserByPATID. +func (mr *MockStoreMockRecorder) GetUserByPATID(ctx, lockStrength, patID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserByPATID", reflect.TypeOf((*MockStore)(nil).GetUserByPATID), ctx, lockStrength, patID) +} + +// GetUserByUserID mocks base method. +func (m *MockStore) GetUserByUserID(ctx context.Context, lockStrength LockingStrength, userID string) (*types2.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserByUserID", ctx, lockStrength, userID) + ret0, _ := ret[0].(*types2.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserByUserID indicates an expected call of GetUserByUserID. +func (mr *MockStoreMockRecorder) GetUserByUserID(ctx, lockStrength, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserByUserID", reflect.TypeOf((*MockStore)(nil).GetUserByUserID), ctx, lockStrength, userID) +} + +// GetUserIDByPeerKey mocks base method. +func (m *MockStore) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserIDByPeerKey", ctx, lockStrength, peerKey) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserIDByPeerKey indicates an expected call of GetUserIDByPeerKey. +func (mr *MockStoreMockRecorder) GetUserIDByPeerKey(ctx, lockStrength, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserIDByPeerKey", reflect.TypeOf((*MockStore)(nil).GetUserIDByPeerKey), ctx, lockStrength, peerKey) +} + +// GetUserInviteByEmail mocks base method. +func (m *MockStore) GetUserInviteByEmail(ctx context.Context, lockStrength LockingStrength, accountID, email string) (*types2.UserInviteRecord, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserInviteByEmail", ctx, lockStrength, accountID, email) + ret0, _ := ret[0].(*types2.UserInviteRecord) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserInviteByEmail indicates an expected call of GetUserInviteByEmail. +func (mr *MockStoreMockRecorder) GetUserInviteByEmail(ctx, lockStrength, accountID, email interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserInviteByEmail", reflect.TypeOf((*MockStore)(nil).GetUserInviteByEmail), ctx, lockStrength, accountID, email) +} + +// GetUserInviteByHashedToken mocks base method. +func (m *MockStore) GetUserInviteByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types2.UserInviteRecord, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserInviteByHashedToken", ctx, lockStrength, hashedToken) + ret0, _ := ret[0].(*types2.UserInviteRecord) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserInviteByHashedToken indicates an expected call of GetUserInviteByHashedToken. +func (mr *MockStoreMockRecorder) GetUserInviteByHashedToken(ctx, lockStrength, hashedToken interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserInviteByHashedToken", reflect.TypeOf((*MockStore)(nil).GetUserInviteByHashedToken), ctx, lockStrength, hashedToken) +} + +// GetUserInviteByID mocks base method. +func (m *MockStore) GetUserInviteByID(ctx context.Context, lockStrength LockingStrength, accountID, inviteID string) (*types2.UserInviteRecord, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserInviteByID", ctx, lockStrength, accountID, inviteID) + ret0, _ := ret[0].(*types2.UserInviteRecord) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserInviteByID indicates an expected call of GetUserInviteByID. +func (mr *MockStoreMockRecorder) GetUserInviteByID(ctx, lockStrength, accountID, inviteID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserInviteByID", reflect.TypeOf((*MockStore)(nil).GetUserInviteByID), ctx, lockStrength, accountID, inviteID) +} + +// GetUserPATs mocks base method. +func (m *MockStore) GetUserPATs(ctx context.Context, lockStrength LockingStrength, userID string) ([]*types2.PersonalAccessToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserPATs", ctx, lockStrength, userID) + ret0, _ := ret[0].([]*types2.PersonalAccessToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserPATs indicates an expected call of GetUserPATs. +func (mr *MockStoreMockRecorder) GetUserPATs(ctx, lockStrength, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserPATs", reflect.TypeOf((*MockStore)(nil).GetUserPATs), ctx, lockStrength, userID) +} + +// GetUserPeers mocks base method. +func (m *MockStore) GetUserPeers(ctx context.Context, lockStrength LockingStrength, accountID, userID string) ([]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserPeers", ctx, lockStrength, accountID, userID) + ret0, _ := ret[0].([]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserPeers indicates an expected call of GetUserPeers. +func (mr *MockStoreMockRecorder) GetUserPeers(ctx, lockStrength, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserPeers", reflect.TypeOf((*MockStore)(nil).GetUserPeers), ctx, lockStrength, accountID, userID) +} + +// GetZoneByDomain mocks base method. +func (m *MockStore) GetZoneByDomain(ctx context.Context, accountID, domain string) (*zones.Zone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetZoneByDomain", ctx, accountID, domain) + ret0, _ := ret[0].(*zones.Zone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetZoneByDomain indicates an expected call of GetZoneByDomain. +func (mr *MockStoreMockRecorder) GetZoneByDomain(ctx, accountID, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByDomain", reflect.TypeOf((*MockStore)(nil).GetZoneByDomain), ctx, accountID, domain) +} + +// GetZoneByID mocks base method. +func (m *MockStore) GetZoneByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) (*zones.Zone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetZoneByID", ctx, lockStrength, accountID, zoneID) + ret0, _ := ret[0].(*zones.Zone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetZoneByID indicates an expected call of GetZoneByID. +func (mr *MockStoreMockRecorder) GetZoneByID(ctx, lockStrength, accountID, zoneID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByID", reflect.TypeOf((*MockStore)(nil).GetZoneByID), ctx, lockStrength, accountID, zoneID) +} + +// GetZoneDNSRecords mocks base method. +func (m *MockStore) GetZoneDNSRecords(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) ([]*records.Record, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetZoneDNSRecords", ctx, lockStrength, accountID, zoneID) + ret0, _ := ret[0].([]*records.Record) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetZoneDNSRecords indicates an expected call of GetZoneDNSRecords. +func (mr *MockStoreMockRecorder) GetZoneDNSRecords(ctx, lockStrength, accountID, zoneID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneDNSRecords", reflect.TypeOf((*MockStore)(nil).GetZoneDNSRecords), ctx, lockStrength, accountID, zoneID) +} + +// GetZoneDNSRecordsByName mocks base method. +func (m *MockStore) GetZoneDNSRecordsByName(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, name string) ([]*records.Record, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetZoneDNSRecordsByName", ctx, lockStrength, accountID, zoneID, name) + ret0, _ := ret[0].([]*records.Record) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetZoneDNSRecordsByName indicates an expected call of GetZoneDNSRecordsByName. +func (mr *MockStoreMockRecorder) GetZoneDNSRecordsByName(ctx, lockStrength, accountID, zoneID, name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneDNSRecordsByName", reflect.TypeOf((*MockStore)(nil).GetZoneDNSRecordsByName), ctx, lockStrength, accountID, zoneID, name) +} + +// IncrementNetworkSerial mocks base method. +func (m *MockStore) IncrementNetworkSerial(ctx context.Context, accountId string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrementNetworkSerial", ctx, accountId) + ret0, _ := ret[0].(error) + return ret0 +} + +// IncrementNetworkSerial indicates an expected call of IncrementNetworkSerial. +func (mr *MockStoreMockRecorder) IncrementNetworkSerial(ctx, accountId interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementNetworkSerial", reflect.TypeOf((*MockStore)(nil).IncrementNetworkSerial), ctx, accountId) +} + +// IncrementSetupKeyUsage mocks base method. +func (m *MockStore) IncrementSetupKeyUsage(ctx context.Context, setupKeyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrementSetupKeyUsage", ctx, setupKeyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// IncrementSetupKeyUsage indicates an expected call of IncrementSetupKeyUsage. +func (mr *MockStoreMockRecorder) IncrementSetupKeyUsage(ctx, setupKeyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementSetupKeyUsage", reflect.TypeOf((*MockStore)(nil).IncrementSetupKeyUsage), ctx, setupKeyID) +} + +// IsPrimaryAccount mocks base method. +func (m *MockStore) IsPrimaryAccount(ctx context.Context, accountID string) (bool, string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPrimaryAccount", ctx, accountID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// IsPrimaryAccount indicates an expected call of IsPrimaryAccount. +func (mr *MockStoreMockRecorder) IsPrimaryAccount(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPrimaryAccount", reflect.TypeOf((*MockStore)(nil).IsPrimaryAccount), ctx, accountID) +} + +// ListCustomDomains mocks base method. +func (m *MockStore) ListCustomDomains(ctx context.Context, accountID string) ([]*domain.Domain, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListCustomDomains", ctx, accountID) + ret0, _ := ret[0].([]*domain.Domain) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListCustomDomains indicates an expected call of ListCustomDomains. +func (mr *MockStoreMockRecorder) ListCustomDomains(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCustomDomains", reflect.TypeOf((*MockStore)(nil).ListCustomDomains), ctx, accountID) +} + +// ListFreeDomains mocks base method. +func (m *MockStore) ListFreeDomains(ctx context.Context, accountID string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFreeDomains", ctx, accountID) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListFreeDomains indicates an expected call of ListFreeDomains. +func (mr *MockStoreMockRecorder) ListFreeDomains(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFreeDomains", reflect.TypeOf((*MockStore)(nil).ListFreeDomains), ctx, accountID) +} + +// MarkAccountPrimary mocks base method. +func (m *MockStore) MarkAccountPrimary(ctx context.Context, accountID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkAccountPrimary", ctx, accountID) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkAccountPrimary indicates an expected call of MarkAccountPrimary. +func (mr *MockStoreMockRecorder) MarkAccountPrimary(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkAccountPrimary", reflect.TypeOf((*MockStore)(nil).MarkAccountPrimary), ctx, accountID) +} + +// MarkAllPendingJobsAsFailed mocks base method. +func (m *MockStore) MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkAllPendingJobsAsFailed", ctx, accountID, peerID, reason) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkAllPendingJobsAsFailed indicates an expected call of MarkAllPendingJobsAsFailed. +func (mr *MockStoreMockRecorder) MarkAllPendingJobsAsFailed(ctx, accountID, peerID, reason interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkAllPendingJobsAsFailed", reflect.TypeOf((*MockStore)(nil).MarkAllPendingJobsAsFailed), ctx, accountID, peerID, reason) +} + +// MarkPATUsed mocks base method. +func (m *MockStore) MarkPATUsed(ctx context.Context, patID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkPATUsed", ctx, patID) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkPATUsed indicates an expected call of MarkPATUsed. +func (mr *MockStoreMockRecorder) MarkPATUsed(ctx, patID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkPATUsed", reflect.TypeOf((*MockStore)(nil).MarkPATUsed), ctx, patID) +} + +// MarkPendingJobsAsFailed mocks base method. +func (m *MockStore) MarkPendingJobsAsFailed(ctx context.Context, accountID, peerID, jobID, reason string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkPendingJobsAsFailed", ctx, accountID, peerID, jobID, reason) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkPendingJobsAsFailed indicates an expected call of MarkPendingJobsAsFailed. +func (mr *MockStoreMockRecorder) MarkPendingJobsAsFailed(ctx, accountID, peerID, jobID, reason interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkPendingJobsAsFailed", reflect.TypeOf((*MockStore)(nil).MarkPendingJobsAsFailed), ctx, accountID, peerID, jobID, reason) +} + +// MarkProxyAccessTokenUsed mocks base method. +func (m *MockStore) MarkProxyAccessTokenUsed(ctx context.Context, tokenID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkProxyAccessTokenUsed", ctx, tokenID) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkProxyAccessTokenUsed indicates an expected call of MarkProxyAccessTokenUsed. +func (mr *MockStoreMockRecorder) MarkProxyAccessTokenUsed(ctx, tokenID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkProxyAccessTokenUsed", reflect.TypeOf((*MockStore)(nil).MarkProxyAccessTokenUsed), ctx, tokenID) +} + +// RemovePeerFromAllGroups mocks base method. +func (m *MockStore) RemovePeerFromAllGroups(ctx context.Context, peerID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemovePeerFromAllGroups", ctx, peerID) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemovePeerFromAllGroups indicates an expected call of RemovePeerFromAllGroups. +func (mr *MockStoreMockRecorder) RemovePeerFromAllGroups(ctx, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePeerFromAllGroups", reflect.TypeOf((*MockStore)(nil).RemovePeerFromAllGroups), ctx, peerID) +} + +// RemovePeerFromGroup mocks base method. +func (m *MockStore) RemovePeerFromGroup(ctx context.Context, peerID, groupID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemovePeerFromGroup", ctx, peerID, groupID) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemovePeerFromGroup indicates an expected call of RemovePeerFromGroup. +func (mr *MockStoreMockRecorder) RemovePeerFromGroup(ctx, peerID, groupID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePeerFromGroup", reflect.TypeOf((*MockStore)(nil).RemovePeerFromGroup), ctx, peerID, groupID) +} + +// RemoveResourceFromGroup mocks base method. +func (m *MockStore) RemoveResourceFromGroup(ctx context.Context, accountId, groupID, resourceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveResourceFromGroup", ctx, accountId, groupID, resourceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveResourceFromGroup indicates an expected call of RemoveResourceFromGroup. +func (mr *MockStoreMockRecorder) RemoveResourceFromGroup(ctx, accountId, groupID, resourceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveResourceFromGroup", reflect.TypeOf((*MockStore)(nil).RemoveResourceFromGroup), ctx, accountId, groupID, resourceID) +} + +// RevokeProxyAccessToken mocks base method. +func (m *MockStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RevokeProxyAccessToken", ctx, tokenID) + ret0, _ := ret[0].(error) + return ret0 +} + +// RevokeProxyAccessToken indicates an expected call of RevokeProxyAccessToken. +func (mr *MockStoreMockRecorder) RevokeProxyAccessToken(ctx, tokenID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RevokeProxyAccessToken", reflect.TypeOf((*MockStore)(nil).RevokeProxyAccessToken), ctx, tokenID) +} + +// SaveAccount mocks base method. +func (m *MockStore) SaveAccount(ctx context.Context, account *types2.Account) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveAccount", ctx, account) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveAccount indicates an expected call of SaveAccount. +func (mr *MockStoreMockRecorder) SaveAccount(ctx, account interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveAccount", reflect.TypeOf((*MockStore)(nil).SaveAccount), ctx, account) +} + +// SaveAccountOnboarding mocks base method. +func (m *MockStore) SaveAccountOnboarding(ctx context.Context, onboarding *types2.AccountOnboarding) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveAccountOnboarding", ctx, onboarding) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveAccountOnboarding indicates an expected call of SaveAccountOnboarding. +func (mr *MockStoreMockRecorder) SaveAccountOnboarding(ctx, onboarding interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveAccountOnboarding", reflect.TypeOf((*MockStore)(nil).SaveAccountOnboarding), ctx, onboarding) +} + +// SaveAccountSettings mocks base method. +func (m *MockStore) SaveAccountSettings(ctx context.Context, accountID string, settings *types2.Settings) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveAccountSettings", ctx, accountID, settings) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveAccountSettings indicates an expected call of SaveAccountSettings. +func (mr *MockStoreMockRecorder) SaveAccountSettings(ctx, accountID, settings interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveAccountSettings", reflect.TypeOf((*MockStore)(nil).SaveAccountSettings), ctx, accountID, settings) +} + +// SaveDNSSettings mocks base method. +func (m *MockStore) SaveDNSSettings(ctx context.Context, accountID string, settings *types2.DNSSettings) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveDNSSettings", ctx, accountID, settings) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveDNSSettings indicates an expected call of SaveDNSSettings. +func (mr *MockStoreMockRecorder) SaveDNSSettings(ctx, accountID, settings interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveDNSSettings", reflect.TypeOf((*MockStore)(nil).SaveDNSSettings), ctx, accountID, settings) +} + +// SaveInstallationID mocks base method. +func (m *MockStore) SaveInstallationID(ctx context.Context, ID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveInstallationID", ctx, ID) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveInstallationID indicates an expected call of SaveInstallationID. +func (mr *MockStoreMockRecorder) SaveInstallationID(ctx, ID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveInstallationID", reflect.TypeOf((*MockStore)(nil).SaveInstallationID), ctx, ID) +} + +// SaveNameServerGroup mocks base method. +func (m *MockStore) SaveNameServerGroup(ctx context.Context, nameServerGroup *dns.NameServerGroup) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveNameServerGroup", ctx, nameServerGroup) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveNameServerGroup indicates an expected call of SaveNameServerGroup. +func (mr *MockStoreMockRecorder) SaveNameServerGroup(ctx, nameServerGroup interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNameServerGroup", reflect.TypeOf((*MockStore)(nil).SaveNameServerGroup), ctx, nameServerGroup) +} + +// SaveNetwork mocks base method. +func (m *MockStore) SaveNetwork(ctx context.Context, network *types1.Network) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveNetwork", ctx, network) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveNetwork indicates an expected call of SaveNetwork. +func (mr *MockStoreMockRecorder) SaveNetwork(ctx, network interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNetwork", reflect.TypeOf((*MockStore)(nil).SaveNetwork), ctx, network) +} + +// SaveNetworkResource mocks base method. +func (m *MockStore) SaveNetworkResource(ctx context.Context, resource *types.NetworkResource) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveNetworkResource", ctx, resource) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveNetworkResource indicates an expected call of SaveNetworkResource. +func (mr *MockStoreMockRecorder) SaveNetworkResource(ctx, resource interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNetworkResource", reflect.TypeOf((*MockStore)(nil).SaveNetworkResource), ctx, resource) +} + +// SaveNetworkRouter mocks base method. +func (m *MockStore) SaveNetworkRouter(ctx context.Context, router *types0.NetworkRouter) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveNetworkRouter", ctx, router) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveNetworkRouter indicates an expected call of SaveNetworkRouter. +func (mr *MockStoreMockRecorder) SaveNetworkRouter(ctx, router interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNetworkRouter", reflect.TypeOf((*MockStore)(nil).SaveNetworkRouter), ctx, router) +} + +// SavePAT mocks base method. +func (m *MockStore) SavePAT(ctx context.Context, pat *types2.PersonalAccessToken) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePAT", ctx, pat) + ret0, _ := ret[0].(error) + return ret0 +} + +// SavePAT indicates an expected call of SavePAT. +func (mr *MockStoreMockRecorder) SavePAT(ctx, pat interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePAT", reflect.TypeOf((*MockStore)(nil).SavePAT), ctx, pat) +} + +// SavePeer mocks base method. +func (m *MockStore) SavePeer(ctx context.Context, accountID string, peer *peer.Peer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePeer", ctx, accountID, peer) + ret0, _ := ret[0].(error) + return ret0 +} + +// SavePeer indicates an expected call of SavePeer. +func (mr *MockStoreMockRecorder) SavePeer(ctx, accountID, peer interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePeer", reflect.TypeOf((*MockStore)(nil).SavePeer), ctx, accountID, peer) +} + +// SavePeerLocation mocks base method. +func (m *MockStore) SavePeerLocation(ctx context.Context, accountID string, peer *peer.Peer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePeerLocation", ctx, accountID, peer) + ret0, _ := ret[0].(error) + return ret0 +} + +// SavePeerLocation indicates an expected call of SavePeerLocation. +func (mr *MockStoreMockRecorder) SavePeerLocation(ctx, accountID, peer interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePeerLocation", reflect.TypeOf((*MockStore)(nil).SavePeerLocation), ctx, accountID, peer) +} + +// SavePeerStatus mocks base method. +func (m *MockStore) SavePeerStatus(ctx context.Context, accountID, peerID string, status peer.PeerStatus) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePeerStatus", ctx, accountID, peerID, status) + ret0, _ := ret[0].(error) + return ret0 +} + +// SavePeerStatus indicates an expected call of SavePeerStatus. +func (mr *MockStoreMockRecorder) SavePeerStatus(ctx, accountID, peerID, status interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePeerStatus", reflect.TypeOf((*MockStore)(nil).SavePeerStatus), ctx, accountID, peerID, status) +} + +// SavePolicy mocks base method. +func (m *MockStore) SavePolicy(ctx context.Context, policy *types2.Policy) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePolicy", ctx, policy) + ret0, _ := ret[0].(error) + return ret0 +} + +// SavePolicy indicates an expected call of SavePolicy. +func (mr *MockStoreMockRecorder) SavePolicy(ctx, policy interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePolicy", reflect.TypeOf((*MockStore)(nil).SavePolicy), ctx, policy) +} + +// SavePostureChecks mocks base method. +func (m *MockStore) SavePostureChecks(ctx context.Context, postureCheck *posture.Checks) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePostureChecks", ctx, postureCheck) + ret0, _ := ret[0].(error) + return ret0 +} + +// SavePostureChecks indicates an expected call of SavePostureChecks. +func (mr *MockStoreMockRecorder) SavePostureChecks(ctx, postureCheck interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePostureChecks", reflect.TypeOf((*MockStore)(nil).SavePostureChecks), ctx, postureCheck) +} + +// SaveProxyAccessToken mocks base method. +func (m *MockStore) SaveProxyAccessToken(ctx context.Context, token *types2.ProxyAccessToken) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveProxyAccessToken", ctx, token) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveProxyAccessToken indicates an expected call of SaveProxyAccessToken. +func (mr *MockStoreMockRecorder) SaveProxyAccessToken(ctx, token interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveProxyAccessToken", reflect.TypeOf((*MockStore)(nil).SaveProxyAccessToken), ctx, token) +} + +// SaveRoute mocks base method. +func (m *MockStore) SaveRoute(ctx context.Context, route *route.Route) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveRoute", ctx, route) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveRoute indicates an expected call of SaveRoute. +func (mr *MockStoreMockRecorder) SaveRoute(ctx, route interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveRoute", reflect.TypeOf((*MockStore)(nil).SaveRoute), ctx, route) +} + +// SaveSetupKey mocks base method. +func (m *MockStore) SaveSetupKey(ctx context.Context, setupKey *types2.SetupKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveSetupKey", ctx, setupKey) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveSetupKey indicates an expected call of SaveSetupKey. +func (mr *MockStoreMockRecorder) SaveSetupKey(ctx, setupKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSetupKey", reflect.TypeOf((*MockStore)(nil).SaveSetupKey), ctx, setupKey) +} + +// SaveUser mocks base method. +func (m *MockStore) SaveUser(ctx context.Context, user *types2.User) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveUser", ctx, user) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveUser indicates an expected call of SaveUser. +func (mr *MockStoreMockRecorder) SaveUser(ctx, user interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveUser", reflect.TypeOf((*MockStore)(nil).SaveUser), ctx, user) +} + +// SaveUserInvite mocks base method. +func (m *MockStore) SaveUserInvite(ctx context.Context, invite *types2.UserInviteRecord) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveUserInvite", ctx, invite) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveUserInvite indicates an expected call of SaveUserInvite. +func (mr *MockStoreMockRecorder) SaveUserInvite(ctx, invite interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveUserInvite", reflect.TypeOf((*MockStore)(nil).SaveUserInvite), ctx, invite) +} + +// SaveUserLastLogin mocks base method. +func (m *MockStore) SaveUserLastLogin(ctx context.Context, accountID, userID string, lastLogin time.Time) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveUserLastLogin", ctx, accountID, userID, lastLogin) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveUserLastLogin indicates an expected call of SaveUserLastLogin. +func (mr *MockStoreMockRecorder) SaveUserLastLogin(ctx, accountID, userID, lastLogin interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveUserLastLogin", reflect.TypeOf((*MockStore)(nil).SaveUserLastLogin), ctx, accountID, userID, lastLogin) +} + +// SaveUsers mocks base method. +func (m *MockStore) SaveUsers(ctx context.Context, users []*types2.User) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveUsers", ctx, users) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveUsers indicates an expected call of SaveUsers. +func (mr *MockStoreMockRecorder) SaveUsers(ctx, users interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveUsers", reflect.TypeOf((*MockStore)(nil).SaveUsers), ctx, users) +} + +// SetFieldEncrypt mocks base method. +func (m *MockStore) SetFieldEncrypt(enc *crypt.FieldEncrypt) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetFieldEncrypt", enc) +} + +// SetFieldEncrypt indicates an expected call of SetFieldEncrypt. +func (mr *MockStoreMockRecorder) SetFieldEncrypt(enc interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFieldEncrypt", reflect.TypeOf((*MockStore)(nil).SetFieldEncrypt), enc) +} + +// UpdateAccountDomainAttributes mocks base method. +func (m *MockStore) UpdateAccountDomainAttributes(ctx context.Context, accountID, domain, category string, isPrimaryDomain bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAccountDomainAttributes", ctx, accountID, domain, category, isPrimaryDomain) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAccountDomainAttributes indicates an expected call of UpdateAccountDomainAttributes. +func (mr *MockStoreMockRecorder) UpdateAccountDomainAttributes(ctx, accountID, domain, category, isPrimaryDomain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountDomainAttributes", reflect.TypeOf((*MockStore)(nil).UpdateAccountDomainAttributes), ctx, accountID, domain, category, isPrimaryDomain) +} + +// UpdateAccountNetwork mocks base method. +func (m *MockStore) UpdateAccountNetwork(ctx context.Context, accountID string, ipNet net.IPNet) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAccountNetwork", ctx, accountID, ipNet) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAccountNetwork indicates an expected call of UpdateAccountNetwork. +func (mr *MockStoreMockRecorder) UpdateAccountNetwork(ctx, accountID, ipNet interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountNetwork", reflect.TypeOf((*MockStore)(nil).UpdateAccountNetwork), ctx, accountID, ipNet) +} + +// UpdateCustomDomain mocks base method. +func (m *MockStore) UpdateCustomDomain(ctx context.Context, accountID string, d *domain.Domain) (*domain.Domain, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateCustomDomain", ctx, accountID, d) + ret0, _ := ret[0].(*domain.Domain) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateCustomDomain indicates an expected call of UpdateCustomDomain. +func (mr *MockStoreMockRecorder) UpdateCustomDomain(ctx, accountID, d interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCustomDomain", reflect.TypeOf((*MockStore)(nil).UpdateCustomDomain), ctx, accountID, d) +} + +// UpdateDNSRecord mocks base method. +func (m *MockStore) UpdateDNSRecord(ctx context.Context, record *records.Record) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateDNSRecord", ctx, record) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateDNSRecord indicates an expected call of UpdateDNSRecord. +func (mr *MockStoreMockRecorder) UpdateDNSRecord(ctx, record interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDNSRecord", reflect.TypeOf((*MockStore)(nil).UpdateDNSRecord), ctx, record) +} + +// UpdateGroup mocks base method. +func (m *MockStore) UpdateGroup(ctx context.Context, group *types2.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateGroup", ctx, group) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateGroup indicates an expected call of UpdateGroup. +func (mr *MockStoreMockRecorder) UpdateGroup(ctx, group interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroup", reflect.TypeOf((*MockStore)(nil).UpdateGroup), ctx, group) +} + +// UpdateGroups mocks base method. +func (m *MockStore) UpdateGroups(ctx context.Context, accountID string, groups []*types2.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateGroups", ctx, accountID, groups) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateGroups indicates an expected call of UpdateGroups. +func (mr *MockStoreMockRecorder) UpdateGroups(ctx, accountID, groups interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroups", reflect.TypeOf((*MockStore)(nil).UpdateGroups), ctx, accountID, groups) +} + +// UpdateService mocks base method. +func (m *MockStore) UpdateService(ctx context.Context, service *reverseproxy.Service) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateService", ctx, service) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateService indicates an expected call of UpdateService. +func (mr *MockStoreMockRecorder) UpdateService(ctx, service interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateService", reflect.TypeOf((*MockStore)(nil).UpdateService), ctx, service) +} + +// UpdateZone mocks base method. +func (m *MockStore) UpdateZone(ctx context.Context, zone *zones.Zone) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateZone", ctx, zone) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateZone indicates an expected call of UpdateZone. +func (mr *MockStoreMockRecorder) UpdateZone(ctx, zone interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateZone", reflect.TypeOf((*MockStore)(nil).UpdateZone), ctx, zone) +} diff --git a/management/server/testdata/auth_callback.sql b/management/server/testdata/auth_callback.sql new file mode 100644 index 000000000..fdd91a6d5 --- /dev/null +++ b/management/server/testdata/auth_callback.sql @@ -0,0 +1,17 @@ +-- Schema definitions (must match GORM auto-migrate order) +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +-- Test accounts +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO accounts VALUES('otherAccountId','','2024-10-02 16:01:38.000000000+00:00','other.com','private',1,'otherNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL); + +-- Test groups +INSERT INTO "groups" VALUES('allowedGroupId','testAccountId','Allowed Group','api','[]',0,''); +INSERT INTO "groups" VALUES('restrictedGroupId','testAccountId','Restricted Group','api','[]',0,''); + +-- Test users +INSERT INTO users VALUES('allowedUserId','testAccountId','user',0,0,'','["allowedGroupId"]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('nonGroupUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherAccountUserId','otherAccountId','user',0,0,'','["allowedGroupId"]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); diff --git a/management/server/types/account.go b/management/server/types/account.go index a2b5140d4..3208cc89a 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -99,6 +100,7 @@ type Account struct { NameServerGroupsG []nbdns.NameServerGroup `json:"-" gorm:"foreignKey:AccountID;references:id"` DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"` PostureChecks []*posture.Checks `gorm:"foreignKey:AccountID;references:id"` + Services []*reverseproxy.Service `gorm:"foreignKey:AccountID;references:id"` // Settings is a dictionary of Account settings Settings *Settings `gorm:"embedded;embeddedPrefix:settings_"` Networks []*networkTypes.Network `gorm:"foreignKey:AccountID;references:id"` @@ -108,6 +110,8 @@ type Account struct { NetworkMapCache *NetworkMapBuilder `gorm:"-"` nmapInitOnce *sync.Once `gorm:"-"` + + ReverseProxyFreeDomainNonce string } func (a *Account) InitOnce() { @@ -902,6 +906,11 @@ func (a *Account) Copy() *Account { networkResources = append(networkResources, resource.Copy()) } + services := []*reverseproxy.Service{} + for _, service := range a.Services { + services = append(services, service.Copy()) + } + return &Account{ Id: a.Id, CreatedBy: a.CreatedBy, @@ -923,6 +932,7 @@ func (a *Account) Copy() *Account { Networks: nets, NetworkRouters: networkRouters, NetworkResources: networkResources, + Services: services, Onboarding: a.Onboarding, NetworkMapCache: a.NetworkMapCache, nmapInitOnce: a.nmapInitOnce, @@ -1213,7 +1223,7 @@ func (a *Account) getAllPeersFromGroups(ctx context.Context, groups []string, pe filteredPeers := make([]*nbpeer.Peer, 0, len(uniquePeerIDs)) for _, p := range uniquePeerIDs { peer, ok := a.Peers[p] - if !ok || peer == nil { + if !ok || peer == nil || peer.ProxyMeta.Embedded { continue } @@ -1776,6 +1786,110 @@ func (a *Account) GetActiveGroupUsers() map[string][]string { return groups } +func (a *Account) GetProxyPeers() map[string][]*nbpeer.Peer { + proxyPeers := make(map[string][]*nbpeer.Peer) + for _, peer := range a.Peers { + if peer.ProxyMeta.Embedded { + proxyPeers[peer.ProxyMeta.Cluster] = append(proxyPeers[peer.ProxyMeta.Cluster], peer) + } + } + return proxyPeers +} + +func (a *Account) InjectProxyPolicies(ctx context.Context) { + if len(a.Services) == 0 { + return + } + + proxyPeersByCluster := a.GetProxyPeers() + if len(proxyPeersByCluster) == 0 { + return + } + + for _, service := range a.Services { + if !service.Enabled { + continue + } + a.injectServiceProxyPolicies(ctx, service, proxyPeersByCluster) + } +} + +func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *reverseproxy.Service, proxyPeersByCluster map[string][]*nbpeer.Peer) { + for _, target := range service.Targets { + if !target.Enabled { + continue + } + a.injectTargetProxyPolicies(ctx, service, target, proxyPeersByCluster[service.ProxyCluster]) + } +} + +func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *reverseproxy.Service, target *reverseproxy.Target, proxyPeers []*nbpeer.Peer) { + port, ok := a.resolveTargetPort(ctx, target) + if !ok { + return + } + + path := "" + if target.Path != nil { + path = *target.Path + } + + for _, proxyPeer := range proxyPeers { + policy := a.createProxyPolicy(service, target, proxyPeer, port, path) + a.Policies = append(a.Policies, policy) + } +} + +func (a *Account) resolveTargetPort(ctx context.Context, target *reverseproxy.Target) (int, bool) { + if target.Port != 0 { + return target.Port, true + } + + switch target.Protocol { + case "https": + return 443, true + case "http": + return 80, true + default: + log.WithContext(ctx).Warnf("unsupported protocol %s for proxy target %s, skipping policy injection", target.Protocol, target.TargetId) + return 0, false + } +} + +func (a *Account) createProxyPolicy(service *reverseproxy.Service, target *reverseproxy.Target, proxyPeer *nbpeer.Peer, port int, path string) *Policy { + policyID := fmt.Sprintf("proxy-access-%s-%s-%s", service.ID, proxyPeer.ID, path) + return &Policy{ + ID: policyID, + Name: fmt.Sprintf("Proxy Access to %s", service.Name), + Enabled: true, + Rules: []*PolicyRule{ + { + ID: policyID, + PolicyID: policyID, + Name: fmt.Sprintf("Allow access to %s", service.Name), + Enabled: true, + SourceResource: Resource{ + ID: proxyPeer.ID, + Type: ResourceTypePeer, + }, + DestinationResource: Resource{ + ID: target.TargetId, + Type: ResourceType(target.TargetType), + }, + Bidirectional: false, + Protocol: PolicyRuleProtocolTCP, + Action: PolicyTrafficActionAccept, + PortRanges: []RulePortRange{ + { + Start: uint16(port), + End: uint16(port), + }, + }, + }, + }, + } +} + // expandPortsAndRanges expands Ports and PortRanges of a rule into individual firewall rules func expandPortsAndRanges(base FirewallRule, rule *PolicyRule, peer *nbpeer.Peer) []*FirewallRule { features := peerSupportedFirewallFeatures(peer.Meta.WtVersion) diff --git a/management/server/types/networkmap_golden_test.go b/management/server/types/networkmap_golden_test.go index ef6c51779..53261f22d 100644 --- a/management/server/types/networkmap_golden_test.go +++ b/management/server/types/networkmap_golden_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -70,7 +71,7 @@ func TestGetPeerNetworkMap_Golden(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -115,7 +116,7 @@ func BenchmarkGetPeerNetworkMap(b *testing.B) { b.Run("old builder", func(b *testing.B) { for range b.N { for _, peerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) @@ -177,7 +178,7 @@ func TestGetPeerNetworkMap_Golden_WithNewPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -240,7 +241,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerAdded(b *testing.B) { b.Run("old builder after add", func(b *testing.B) { for i := 0; i < b.N; i++ { for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) @@ -317,7 +318,7 @@ func TestGetPeerNetworkMap_Golden_WithNewRoutingPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -402,7 +403,7 @@ func BenchmarkGetPeerNetworkMap_AfterRouterPeerAdded(b *testing.B) { b.Run("old builder after add", func(b *testing.B) { for i := 0; i < b.N; i++ { for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) @@ -458,7 +459,7 @@ func TestGetPeerNetworkMap_Golden_WithDeletedPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -537,7 +538,7 @@ func TestGetPeerNetworkMap_Golden_WithDeletedRouterPeer(t *testing.T) { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) normalizeAndSortNetworkMap(legacyNetworkMap) legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") require.NoError(t, err, "error marshaling legacy network map to JSON") @@ -597,7 +598,7 @@ func BenchmarkGetPeerNetworkMap_AfterPeerDeleted(b *testing.B) { b.Run("old builder after delete", func(b *testing.B) { for i := 0; i < b.N; i++ { for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) + _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) } } }) diff --git a/management/server/types/proxy.go b/management/server/types/proxy.go new file mode 100644 index 000000000..1b80e80d1 --- /dev/null +++ b/management/server/types/proxy.go @@ -0,0 +1,7 @@ +package types + +// ProxyCallbackEndpoint holds the proxy callback endpoint +const ProxyCallbackEndpoint = "/reverse-proxy/callback" + +// ProxyCallbackEndpointFull holds the proxy callback endpoint with api suffix +const ProxyCallbackEndpointFull = "/api" + ProxyCallbackEndpoint diff --git a/management/server/types/proxy_access_token.go b/management/server/types/proxy_access_token.go new file mode 100644 index 000000000..b20b83bc1 --- /dev/null +++ b/management/server/types/proxy_access_token.go @@ -0,0 +1,137 @@ +package types + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "hash/crc32" + "strings" + "time" + + b "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/rs/xid" + + "github.com/netbirdio/netbird/base62" + "github.com/netbirdio/netbird/management/server/util" +) + +const ( + // ProxyTokenPrefix is the globally used prefix for proxy access tokens + ProxyTokenPrefix = "nbx_" + // ProxyTokenSecretLength is the number of characters used for the secret + ProxyTokenSecretLength = 30 + // ProxyTokenChecksumLength is the number of characters used for the encoded checksum + ProxyTokenChecksumLength = 6 + // ProxyTokenLength is the total number of characters used for the token + ProxyTokenLength = 40 +) + +// HashedProxyToken is a SHA-256 hash of a plain proxy token, base64-encoded. +type HashedProxyToken string + +// PlainProxyToken is the raw token string displayed once at creation time. +type PlainProxyToken string + +// ProxyAccessToken holds information about a proxy access token including a hashed version for verification +type ProxyAccessToken struct { + ID string `gorm:"primaryKey"` + Name string + HashedToken HashedProxyToken `gorm:"type:varchar(255);uniqueIndex"` + // AccountID is nil for management-wide tokens, set for account-scoped tokens + AccountID *string `gorm:"index"` + ExpiresAt *time.Time + CreatedBy string + CreatedAt time.Time + LastUsed *time.Time + Revoked bool +} + +// IsExpired returns true if the token has expired +func (t *ProxyAccessToken) IsExpired() bool { + if t.ExpiresAt == nil { + return false + } + return time.Now().After(*t.ExpiresAt) +} + +// IsValid returns true if the token is not revoked and not expired +func (t *ProxyAccessToken) IsValid() bool { + return !t.Revoked && !t.IsExpired() +} + +// ProxyAccessTokenGenerated holds the new token and the plain text version +type ProxyAccessTokenGenerated struct { + PlainToken PlainProxyToken + ProxyAccessToken +} + +// CreateNewProxyAccessToken generates a new proxy access token. +// Returns the token with hashed value stored and plain token for one-time display. +func CreateNewProxyAccessToken(name string, expiresIn time.Duration, accountID *string, createdBy string) (*ProxyAccessTokenGenerated, error) { + hashedToken, plainToken, err := generateProxyToken() + if err != nil { + return nil, err + } + + currentTime := time.Now().UTC() + var expiresAt *time.Time + if expiresIn > 0 { + expiresAt = util.ToPtr(currentTime.Add(expiresIn)) + } + + return &ProxyAccessTokenGenerated{ + ProxyAccessToken: ProxyAccessToken{ + ID: xid.New().String(), + Name: name, + HashedToken: hashedToken, + AccountID: accountID, + ExpiresAt: expiresAt, + CreatedBy: createdBy, + CreatedAt: currentTime, + Revoked: false, + }, + PlainToken: plainToken, + }, nil +} + +func generateProxyToken() (HashedProxyToken, PlainProxyToken, error) { + secret, err := b.Random(ProxyTokenSecretLength) + if err != nil { + return "", "", err + } + + checksum := crc32.ChecksumIEEE([]byte(secret)) + encodedChecksum := base62.Encode(checksum) + paddedChecksum := fmt.Sprintf("%06s", encodedChecksum) + plainToken := PlainProxyToken(ProxyTokenPrefix + secret + paddedChecksum) + return plainToken.Hash(), plainToken, nil +} + +// Hash returns the SHA-256 hash of the plain token, base64-encoded. +func (t PlainProxyToken) Hash() HashedProxyToken { + h := sha256.Sum256([]byte(t)) + return HashedProxyToken(base64.StdEncoding.EncodeToString(h[:])) +} + +// Validate checks the format of a proxy token without checking the database. +func (t PlainProxyToken) Validate() error { + if !strings.HasPrefix(string(t), ProxyTokenPrefix) { + return fmt.Errorf("invalid token prefix") + } + + if len(t) != ProxyTokenLength { + return fmt.Errorf("invalid token length") + } + + secret := t[len(ProxyTokenPrefix) : len(t)-ProxyTokenChecksumLength] + checksumStr := t[len(t)-ProxyTokenChecksumLength:] + + expectedChecksum := crc32.ChecksumIEEE([]byte(secret)) + expectedChecksumStr := fmt.Sprintf("%06s", base62.Encode(expectedChecksum)) + + if string(checksumStr) != expectedChecksumStr { + return fmt.Errorf("invalid token checksum") + } + + return nil +} diff --git a/management/server/types/proxy_access_token_test.go b/management/server/types/proxy_access_token_test.go new file mode 100644 index 000000000..aa1a4d2dd --- /dev/null +++ b/management/server/types/proxy_access_token_test.go @@ -0,0 +1,155 @@ +package types + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPlainProxyToken_Validate(t *testing.T) { + tests := []struct { + name string + token PlainProxyToken + wantErr bool + errMsg string + }{ + { + name: "valid token", + token: "", // will be generated + wantErr: false, + }, + { + name: "wrong prefix", + token: "xyz_8FbPkxioCFmlvCTJbD1RafygfVmS9z15lyNM", + wantErr: true, + errMsg: "invalid token prefix", + }, + { + name: "too short", + token: "nbx_short", + wantErr: true, + errMsg: "invalid token length", + }, + { + name: "too long", + token: "nbx_8FbPkxioCFmlvCTJbD1RafygfVmS9z15lyNMextra", + wantErr: true, + errMsg: "invalid token length", + }, + { + name: "correct length but invalid checksum", + token: "nbx_invalidtoken123456789012345678901234", // exactly 40 chars, invalid checksum + wantErr: true, + errMsg: "invalid token checksum", + }, + { + name: "empty token", + token: "", + wantErr: true, + errMsg: "invalid token prefix", + }, + { + name: "only prefix", + token: "nbx_", + wantErr: true, + errMsg: "invalid token length", + }, + } + + // Generate a valid token for the first test + generated, err := CreateNewProxyAccessToken("test", 0, nil, "test") + require.NoError(t, err) + tests[0].token = generated.PlainToken + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.token.Validate() + if tt.wantErr { + assert.Error(t, err) + if tt.errMsg != "" { + assert.Contains(t, err.Error(), tt.errMsg) + } + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestPlainProxyToken_Hash(t *testing.T) { + token1 := PlainProxyToken("nbx_8FbPkxioCFmlvCTJbD1RafygfVmS9z15lyNM") + token2 := PlainProxyToken("nbx_8FbPkxioCFmlvCTJbD1RafygfVmS9z15lyNM") + token3 := PlainProxyToken("nbx_differenttoken1234567890123456789X") + + hash1 := token1.Hash() + hash2 := token2.Hash() + hash3 := token3.Hash() + + assert.Equal(t, hash1, hash2, "same token should produce same hash") + assert.NotEqual(t, hash1, hash3, "different tokens should produce different hashes") + assert.NotEmpty(t, hash1) +} + +func TestCreateNewProxyAccessToken(t *testing.T) { + t.Run("creates valid token", func(t *testing.T) { + generated, err := CreateNewProxyAccessToken("test-token", 0, nil, "test-user") + require.NoError(t, err) + + assert.NotEmpty(t, generated.ID) + assert.Equal(t, "test-token", generated.Name) + assert.Equal(t, "test-user", generated.CreatedBy) + assert.NotEmpty(t, generated.HashedToken) + assert.NotEmpty(t, generated.PlainToken) + assert.Nil(t, generated.ExpiresAt) + assert.False(t, generated.Revoked) + + assert.NoError(t, generated.PlainToken.Validate()) + assert.Equal(t, ProxyTokenLength, len(generated.PlainToken)) + assert.Equal(t, ProxyTokenPrefix, string(generated.PlainToken[:len(ProxyTokenPrefix)])) + }) + + t.Run("tokens are unique", func(t *testing.T) { + gen1, err := CreateNewProxyAccessToken("test1", 0, nil, "user") + require.NoError(t, err) + + gen2, err := CreateNewProxyAccessToken("test2", 0, nil, "user") + require.NoError(t, err) + + assert.NotEqual(t, gen1.PlainToken, gen2.PlainToken) + assert.NotEqual(t, gen1.HashedToken, gen2.HashedToken) + assert.NotEqual(t, gen1.ID, gen2.ID) + }) +} + +func TestProxyAccessToken_IsExpired(t *testing.T) { + past := time.Now().Add(-1 * time.Hour) + future := time.Now().Add(1 * time.Hour) + + t.Run("expired token", func(t *testing.T) { + token := &ProxyAccessToken{ExpiresAt: &past} + assert.True(t, token.IsExpired()) + }) + + t.Run("not expired token", func(t *testing.T) { + token := &ProxyAccessToken{ExpiresAt: &future} + assert.False(t, token.IsExpired()) + }) + + t.Run("no expiration", func(t *testing.T) { + token := &ProxyAccessToken{ExpiresAt: nil} + assert.False(t, token.IsExpired()) + }) +} + +func TestProxyAccessToken_IsValid(t *testing.T) { + token := &ProxyAccessToken{ + Revoked: false, + } + + assert.True(t, token.IsValid()) + + token.Revoked = true + assert.False(t, token.IsValid()) +} diff --git a/management/server/util/util.go b/management/server/util/util.go index ce9759864..617484274 100644 --- a/management/server/util/util.go +++ b/management/server/util/util.go @@ -50,4 +50,3 @@ func contains[T comparableObject[T]](slice []T, element T) bool { } return false } - diff --git a/proxy/Dockerfile b/proxy/Dockerfile new file mode 100644 index 000000000..096c71f21 --- /dev/null +++ b/proxy/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.25-alpine AS builder +WORKDIR /app + +RUN echo "netbird:x:1000:1000:netbird:/var/lib/netbird:/sbin/nologin" > /tmp/passwd && \ + echo "netbird:x:1000:netbird" > /tmp/group && \ + mkdir -p /tmp/var/lib/netbird && \ + mkdir -p /tmp/certs + +FROM gcr.io/distroless/base:debug +COPY netbird-proxy /go/bin/netbird-proxy +COPY --from=builder /tmp/passwd /etc/passwd +COPY --from=builder /tmp/group /etc/group +COPY --from=builder /tmp/var/lib/netbird /var/lib/netbird +COPY --from=builder --chown=1000:1000 --chmod=755 /tmp/certs /certs +USER netbird:netbird +ENV HOME=/var/lib/netbird +ENV NB_PROXY_ADDRESS=":8443" +EXPOSE 8443 +ENTRYPOINT ["/go/bin/netbird-proxy"] diff --git a/proxy/Dockerfile.multistage b/proxy/Dockerfile.multistage new file mode 100644 index 000000000..2e3ac3561 --- /dev/null +++ b/proxy/Dockerfile.multistage @@ -0,0 +1,37 @@ +FROM golang:1.25-alpine AS builder +WORKDIR /app + +COPY go.mod go.sum ./ +RUN go mod download + +COPY client ./client +COPY dns ./dns +COPY encryption ./encryption +COPY flow ./flow +COPY formatter ./formatter +COPY monotime ./monotime +COPY proxy ./proxy +COPY route ./route +COPY shared ./shared +COPY sharedsock ./sharedsock +COPY upload-server ./upload-server +COPY util ./util +COPY version ./version +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o netbird-proxy ./proxy/cmd/proxy + +RUN echo "netbird:x:1000:1000:netbird:/var/lib/netbird:/sbin/nologin" > /tmp/passwd && \ + echo "netbird:x:1000:netbird" > /tmp/group && \ + mkdir -p /tmp/var/lib/netbird && \ + mkdir -p /tmp/certs + +FROM gcr.io/distroless/base:debug +COPY --from=builder /app/netbird-proxy /usr/bin/netbird-proxy +COPY --from=builder /tmp/passwd /etc/passwd +COPY --from=builder /tmp/group /etc/group +COPY --from=builder /tmp/var/lib/netbird /var/lib/netbird +COPY --from=builder --chown=1000:1000 --chmod=755 /tmp/certs /certs +USER netbird:netbird +ENV HOME=/var/lib/netbird +ENV NB_PROXY_ADDRESS=":8443" +EXPOSE 8443 +ENTRYPOINT ["/usr/bin/netbird-proxy"] diff --git a/proxy/LICENSE b/proxy/LICENSE new file mode 100644 index 000000000..be3f7b28e --- /dev/null +++ b/proxy/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/proxy/README.md b/proxy/README.md new file mode 100644 index 000000000..6af7cadd2 --- /dev/null +++ b/proxy/README.md @@ -0,0 +1,80 @@ +# Netbird Reverse Proxy + +The NetBird Reverse Proxy is a separate service that can act as a public entrypoint to certain resources within a NetBird network. +At a high level, the way that it operates is: +- Configured routes are communicated from the Management server to the proxy. +- For each route the proxy creates a NetBird connection to the NetBird Peer that hosts the resource. +- When traffic hits the proxy at the address and path configured for the proxied resource, the NetBird Proxy brings up a relevant authentication method for that resource. +- On successful authentication the proxy will forward traffic onwards to the NetBird Peer. + +Proxy Authentication methods supported are: +- No authentication +- Oauth2/OIDC +- Emailed Magic Link +- Simple PIN +- HTTP Basic Auth Username and Password + +## Management Connection and Authentication + +The Proxy communicates with the Management server over a gRPC connection. +Proxies act as clients to the Management server, the following RPCs are used: +- Server-side streaming for proxied service updates. +- Client-side streaming for proxy logs. + +To authenticate with the Management server, the proxy server uses Machine-to-Machine OAuth2. +If you are using the embedded IdP //TODO: explain how to get credentials. +Otherwise, create a new machine-to-machine profile in your IdP for proxy servers and set the relevant settings in the proxy's environment or flags (see below). + +## User Authentication + +When a request hits the Proxy, it looks up the permitted authentication methods for the Host domain. +If no authentication methods are registered for the Host domain, then no authentication will be applied (for fully public resources). +If any authentication methods are registered for the Host domain, then the Proxy will first serve an authentication page allowing the user to select an authentication method (from the permitted methods) and enter the required information for that authentication method. +If the user is successfully authenticated, their request will be forwarded through to the Proxy to be proxied to the relevant Peer. +Successful authentication does not guarantee a successful forwarding of the request as there may be failures behind the Proxy, such as with Peer connectivity or the underlying resource. + +## TLS + +Due to the authentication provided, the Proxy uses HTTPS for its endpoint, even if the underlying service is HTTP. +Certificate generation can either be via ACME (by default, using Let's Encrypt, but alternative ACME providers can be used) or through certificate files. +When not using ACME, the proxy server attempts to load a certificate and key from the files `tls.crt` and `tls.key` in a specified certificate directory. +When using ACME, the proxy server will store generated certificates in the specified certificate directory. + + +## Auth UI + +The authentication UI is a Vite + React application located in the `web/` directory. It is embedded into the Go binary at build time. + +To build the UI: +```bash +cd web +npm install +npm run build +``` + +For UI development with hot reload (served at http://localhost:3031): +```bash +npm run dev +``` + +The built assets in `web/dist/` are embedded via `//go:embed` and served by the `web.ServeHTTP` handler. + +## Configuration + +NetBird Proxy deployment configuration is via flags or environment variables, with flags taking precedence over the environment. +The following deployment configuration is available: + +| Flag | Env | Purpose | Default | +|------------------|----------------------------------|------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------| +| `-debug` | `NB_PROXY_DEBUG_LOGS` | Enable debug logging | `false` | +| `-mgmt` | `NB_PROXY_MANAGEMENT_ADDRESS` | The address of the management server for the proxy to get configuration from. | `"https://api.netbird.io:443"` | +| `-addr` | `NB_PROXY_ADDRESS` | The address that the reverse proxy will listen on. | `":443` | +| `-url` | `NB_PROXY_URL` | The URL that the proxy will be reached at (where endpoints will be CNAMEd to). If unset, this will fall back to the proxy address. | `"proxy.netbird.io"` | +| `-cert-dir` | `NB_PROXY_CERTIFICATE_DIRECTORY` | The location that certificates are stored in. | `"./certs"` | +| `-acme-certs` | `NB_PROXY_ACME_CERTIFICATES` | Whether to use ACME to generate certificates. | `false` | +| `-acme-addr` | `NB_PROXY_ACME_ADDRESS` | The HTTP address the proxy will listen on to respond to HTTP-01 ACME challenges | `":80"` | +| `-acme-dir` | `NB_PROXY_ACME_DIRECTORY` | The directory URL of the ACME server to be used | `"https://acme-v02.api.letsencrypt.org/directory"` | +| `-oidc-id` | `NB_PROXY_OIDC_CLIENT_ID` | The OAuth2 Client ID for OIDC User Authentication | `"netbird-proxy"` | +| `-oidc-secret` | `NB_PROXY_OIDC_CLIENT_SECRET` | The OAuth2 Client Secret for OIDC User Authentication | `""` | +| `-oidc-endpoint` | `NB_PROXY_OIDC_ENDPOINT` | The OAuth2 provider endpoint for OIDC User Authentication | `"https://api.netbird.io/oauth2"` | +| `-oidc-scopes` | `NB_PROXY_OIDC_SCOPES` | The OAuth2 scopes for OIDC User Authentication, comma separated | `"openid,profile,email"` | diff --git a/proxy/auth/auth.go b/proxy/auth/auth.go new file mode 100644 index 000000000..14caa03b3 --- /dev/null +++ b/proxy/auth/auth.go @@ -0,0 +1,76 @@ +// Package auth contains exported proxy auth values. +// These are used to ensure coherent usage across management and proxy implementations. +package auth + +import ( + "crypto/ed25519" + "crypto/tls" + "fmt" + "time" + + "github.com/golang-jwt/jwt/v5" +) + +type Method string + +var ( + MethodPassword Method = "password" + MethodPIN Method = "pin" + MethodOIDC Method = "oidc" +) + +func (m Method) String() string { + return string(m) +} + +const ( + SessionCookieName = "nb_session" + DefaultSessionExpiry = 24 * time.Hour + SessionJWTIssuer = "netbird-management" +) + +// ResolveProto determines the protocol scheme based on the forwarded proto +// configuration. When set to "http" or "https" the value is used directly. +// Otherwise TLS state is used: if conn is non-nil "https" is returned, else "http". +func ResolveProto(forwardedProto string, conn *tls.ConnectionState) string { + switch forwardedProto { + case "http", "https": + return forwardedProto + default: + if conn != nil { + return "https" + } + return "http" + } +} + +// ValidateSessionJWT validates a session JWT and returns the user ID and method. +func ValidateSessionJWT(tokenString, domain string, publicKey ed25519.PublicKey) (userID, method string, err error) { + if publicKey == nil { + return "", "", fmt.Errorf("no public key configured for domain") + } + + token, err := jwt.Parse(tokenString, func(t *jwt.Token) (interface{}, error) { + if _, ok := t.Method.(*jwt.SigningMethodEd25519); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"]) + } + return publicKey, nil + }, jwt.WithAudience(domain), jwt.WithIssuer(SessionJWTIssuer)) + if err != nil { + return "", "", fmt.Errorf("parse token: %w", err) + } + + claims, ok := token.Claims.(jwt.MapClaims) + if !ok || !token.Valid { + return "", "", fmt.Errorf("invalid token claims") + } + + sub, _ := claims.GetSubject() + if sub == "" { + return "", "", fmt.Errorf("missing subject claim") + } + + methodClaim, _ := claims["method"].(string) + + return sub, methodClaim, nil +} diff --git a/proxy/cmd/proxy/cmd/debug.go b/proxy/cmd/proxy/cmd/debug.go new file mode 100644 index 000000000..59f7a6b65 --- /dev/null +++ b/proxy/cmd/proxy/cmd/debug.go @@ -0,0 +1,173 @@ +package cmd + +import ( + "fmt" + "strconv" + + "github.com/spf13/cobra" + + "github.com/netbirdio/netbird/proxy/internal/debug" +) + +var ( + debugAddr string + jsonOutput bool + + // status filters + statusFilterByIPs []string + statusFilterByNames []string + statusFilterByStatus string + statusFilterByConnectionType string +) + +var debugCmd = &cobra.Command{ + Use: "debug", + Short: "Debug commands for inspecting proxy state", + Long: "Debug commands for inspecting the reverse proxy state via the debug HTTP endpoint.", +} + +var debugHealthCmd = &cobra.Command{ + Use: "health", + Short: "Show proxy health status", + RunE: runDebugHealth, + SilenceUsage: true, +} + +var debugClientsCmd = &cobra.Command{ + Use: "clients", + Aliases: []string{"list"}, + Short: "List all connected clients", + RunE: runDebugClients, + SilenceUsage: true, +} + +var debugStatusCmd = &cobra.Command{ + Use: "status ", + Short: "Show client status", + Args: cobra.ExactArgs(1), + RunE: runDebugStatus, + SilenceUsage: true, +} + +var debugSyncCmd = &cobra.Command{ + Use: "sync-response ", + Short: "Show client sync response", + Args: cobra.ExactArgs(1), + RunE: runDebugSync, + SilenceUsage: true, +} + +var pingTimeout string + +var debugPingCmd = &cobra.Command{ + Use: "ping [port]", + Short: "TCP ping through a client", + Long: "Perform a TCP ping through a client's network to test connectivity.\nPort defaults to 80 if not specified.", + Args: cobra.RangeArgs(2, 3), + RunE: runDebugPing, + SilenceUsage: true, +} + +var debugLogCmd = &cobra.Command{ + Use: "log", + Short: "Manage client logging", + Long: "Commands to manage logging settings for a client connected through the proxy.", +} + +var debugLogLevelCmd = &cobra.Command{ + Use: "level ", + Short: "Set client log level", + Long: "Set the log level for a client (trace, debug, info, warn, error).", + Args: cobra.ExactArgs(2), + RunE: runDebugLogLevel, + SilenceUsage: true, +} + +var debugStartCmd = &cobra.Command{ + Use: "start ", + Short: "Start a client", + Args: cobra.ExactArgs(1), + RunE: runDebugStart, + SilenceUsage: true, +} + +var debugStopCmd = &cobra.Command{ + Use: "stop ", + Short: "Stop a client", + Args: cobra.ExactArgs(1), + RunE: runDebugStop, + SilenceUsage: true, +} + +func init() { + debugCmd.PersistentFlags().StringVar(&debugAddr, "addr", envStringOrDefault("NB_PROXY_DEBUG_ADDRESS", "localhost:8444"), "Debug endpoint address") + debugCmd.PersistentFlags().BoolVar(&jsonOutput, "json", false, "Output JSON instead of pretty format") + + debugStatusCmd.Flags().StringSliceVar(&statusFilterByIPs, "filter-by-ips", nil, "Filter by peer IPs (comma-separated)") + debugStatusCmd.Flags().StringSliceVar(&statusFilterByNames, "filter-by-names", nil, "Filter by peer names (comma-separated)") + debugStatusCmd.Flags().StringVar(&statusFilterByStatus, "filter-by-status", "", "Filter by status (idle|connecting|connected)") + debugStatusCmd.Flags().StringVar(&statusFilterByConnectionType, "filter-by-connection-type", "", "Filter by connection type (P2P|Relayed)") + + debugPingCmd.Flags().StringVar(&pingTimeout, "timeout", "", "Ping timeout (e.g., 10s)") + + debugCmd.AddCommand(debugHealthCmd) + debugCmd.AddCommand(debugClientsCmd) + debugCmd.AddCommand(debugStatusCmd) + debugCmd.AddCommand(debugSyncCmd) + debugCmd.AddCommand(debugPingCmd) + debugLogCmd.AddCommand(debugLogLevelCmd) + debugCmd.AddCommand(debugLogCmd) + debugCmd.AddCommand(debugStartCmd) + debugCmd.AddCommand(debugStopCmd) + + rootCmd.AddCommand(debugCmd) +} + +func getDebugClient(cmd *cobra.Command) *debug.Client { + return debug.NewClient(debugAddr, jsonOutput, cmd.OutOrStdout()) +} + +func runDebugHealth(cmd *cobra.Command, _ []string) error { + return getDebugClient(cmd).Health(cmd.Context()) +} + +func runDebugClients(cmd *cobra.Command, _ []string) error { + return getDebugClient(cmd).ListClients(cmd.Context()) +} + +func runDebugStatus(cmd *cobra.Command, args []string) error { + return getDebugClient(cmd).ClientStatus(cmd.Context(), args[0], debug.StatusFilters{ + IPs: statusFilterByIPs, + Names: statusFilterByNames, + Status: statusFilterByStatus, + ConnectionType: statusFilterByConnectionType, + }) +} + +func runDebugSync(cmd *cobra.Command, args []string) error { + return getDebugClient(cmd).ClientSyncResponse(cmd.Context(), args[0]) +} + +func runDebugPing(cmd *cobra.Command, args []string) error { + port := 80 + if len(args) > 2 { + p, err := strconv.Atoi(args[2]) + if err != nil { + return fmt.Errorf("invalid port: %w", err) + } + port = p + } + return getDebugClient(cmd).PingTCP(cmd.Context(), args[0], args[1], port, pingTimeout) +} + +func runDebugLogLevel(cmd *cobra.Command, args []string) error { + return getDebugClient(cmd).SetLogLevel(cmd.Context(), args[0], args[1]) +} + +func runDebugStart(cmd *cobra.Command, args []string) error { + return getDebugClient(cmd).StartClient(cmd.Context(), args[0]) +} + +func runDebugStop(cmd *cobra.Command, args []string) error { + return getDebugClient(cmd).StopClient(cmd.Context(), args[0]) +} diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go new file mode 100644 index 000000000..e6593ade5 --- /dev/null +++ b/proxy/cmd/proxy/cmd/root.go @@ -0,0 +1,210 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + + "github.com/netbirdio/netbird/shared/management/domain" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "golang.org/x/crypto/acme" + + "github.com/netbirdio/netbird/proxy" + nbacme "github.com/netbirdio/netbird/proxy/internal/acme" + "github.com/netbirdio/netbird/util" +) + +const DefaultManagementURL = "https://api.netbird.io:443" + +// envProxyToken is the environment variable name for the proxy access token. +// +//nolint:gosec +const envProxyToken = "NB_PROXY_TOKEN" + +var ( + Version = "dev" + Commit = "unknown" + BuildDate = "unknown" + GoVersion = "unknown" +) + +var ( + debugLogs bool + mgmtAddr string + addr string + proxyDomain string + certDir string + acmeCerts bool + acmeAddr string + acmeDir string + acmeChallengeType string + debugEndpoint bool + debugEndpointAddr string + healthAddr string + oidcClientID string + oidcClientSecret string + oidcEndpoint string + oidcScopes string + forwardedProto string + trustedProxies string + certFile string + certKeyFile string + certLockMethod string + wgPort int +) + +var rootCmd = &cobra.Command{ + Use: "proxy", + Short: "NetBird reverse proxy server", + Long: "NetBird reverse proxy server for proxying traffic to NetBird networks.", + Version: Version, + SilenceUsage: true, + RunE: runServer, +} + +func init() { + rootCmd.PersistentFlags().BoolVar(&debugLogs, "debug", envBoolOrDefault("NB_PROXY_DEBUG_LOGS", false), "Enable debug logs") + rootCmd.Flags().StringVar(&mgmtAddr, "mgmt", envStringOrDefault("NB_PROXY_MANAGEMENT_ADDRESS", DefaultManagementURL), "Management address to connect to") + rootCmd.Flags().StringVar(&addr, "addr", envStringOrDefault("NB_PROXY_ADDRESS", ":443"), "Reverse proxy address to listen on") + rootCmd.Flags().StringVar(&proxyDomain, "domain", envStringOrDefault("NB_PROXY_DOMAIN", ""), "The Domain at which this proxy will be reached. e.g., netbird.example.com") + rootCmd.Flags().StringVar(&certDir, "cert-dir", envStringOrDefault("NB_PROXY_CERTIFICATE_DIRECTORY", "./certs"), "Directory to store certificates") + rootCmd.Flags().BoolVar(&acmeCerts, "acme-certs", envBoolOrDefault("NB_PROXY_ACME_CERTIFICATES", false), "Generate ACME certificates automatically") + rootCmd.Flags().StringVar(&acmeAddr, "acme-addr", envStringOrDefault("NB_PROXY_ACME_ADDRESS", ":80"), "HTTP address for ACME HTTP-01 challenges (only used when acme-challenge-type is http-01)") + rootCmd.Flags().StringVar(&acmeDir, "acme-dir", envStringOrDefault("NB_PROXY_ACME_DIRECTORY", acme.LetsEncryptURL), "URL of ACME challenge directory") + rootCmd.Flags().StringVar(&acmeChallengeType, "acme-challenge-type", envStringOrDefault("NB_PROXY_ACME_CHALLENGE_TYPE", "tls-alpn-01"), "ACME challenge type: tls-alpn-01 (default, port 443 only) or http-01 (requires port 80)") + rootCmd.Flags().BoolVar(&debugEndpoint, "debug-endpoint", envBoolOrDefault("NB_PROXY_DEBUG_ENDPOINT", false), "Enable debug HTTP endpoint") + rootCmd.Flags().StringVar(&debugEndpointAddr, "debug-endpoint-addr", envStringOrDefault("NB_PROXY_DEBUG_ENDPOINT_ADDRESS", "localhost:8444"), "Address for the debug HTTP endpoint") + rootCmd.Flags().StringVar(&healthAddr, "health-addr", envStringOrDefault("NB_PROXY_HEALTH_ADDRESS", "localhost:8080"), "Address for the health probe endpoint (liveness/readiness/startup)") + rootCmd.Flags().StringVar(&oidcClientID, "oidc-id", envStringOrDefault("NB_PROXY_OIDC_CLIENT_ID", "netbird-proxy"), "The OAuth2 Client ID for OIDC User Authentication") + rootCmd.Flags().StringVar(&oidcClientSecret, "oidc-secret", envStringOrDefault("NB_PROXY_OIDC_CLIENT_SECRET", ""), "The OAuth2 Client Secret for OIDC User Authentication") + rootCmd.Flags().StringVar(&oidcEndpoint, "oidc-endpoint", envStringOrDefault("NB_PROXY_OIDC_ENDPOINT", ""), "The OIDC Endpoint for OIDC User Authentication") + rootCmd.Flags().StringVar(&oidcScopes, "oidc-scopes", envStringOrDefault("NB_PROXY_OIDC_SCOPES", "openid,profile,email"), "The OAuth2 scopes for OIDC User Authentication, comma separated") + rootCmd.Flags().StringVar(&forwardedProto, "forwarded-proto", envStringOrDefault("NB_PROXY_FORWARDED_PROTO", "auto"), "X-Forwarded-Proto value for backends: auto, http, or https") + rootCmd.Flags().StringVar(&trustedProxies, "trusted-proxies", envStringOrDefault("NB_PROXY_TRUSTED_PROXIES", ""), "Comma-separated list of trusted upstream proxy CIDR ranges (e.g. '10.0.0.0/8,192.168.1.1')") + rootCmd.Flags().StringVar(&certFile, "cert-file", envStringOrDefault("NB_PROXY_CERTIFICATE_FILE", "tls.crt"), "TLS certificate filename within the certificate directory") + rootCmd.Flags().StringVar(&certKeyFile, "cert-key-file", envStringOrDefault("NB_PROXY_CERTIFICATE_KEY_FILE", "tls.key"), "TLS certificate key filename within the certificate directory") + rootCmd.Flags().StringVar(&certLockMethod, "cert-lock-method", envStringOrDefault("NB_PROXY_CERT_LOCK_METHOD", "auto"), "Certificate lock method for cross-replica coordination: auto, flock, or k8s-lease") + rootCmd.Flags().IntVar(&wgPort, "wg-port", envIntOrDefault("NB_PROXY_WG_PORT", 0), "WireGuard listen port (0 = random). Fixed port only works with single-account deployments") +} + +// Execute runs the root command. +func Execute() { + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} + +// SetVersionInfo sets version information for the CLI. +func SetVersionInfo(version, commit, buildDate, goVersion string) { + Version = version + Commit = commit + BuildDate = buildDate + GoVersion = goVersion + rootCmd.Version = version + rootCmd.SetVersionTemplate("Version: {{.Version}}, Commit: " + Commit + ", BuildDate: " + BuildDate + ", Go: " + GoVersion + "\n") +} + +func runServer(cmd *cobra.Command, args []string) error { + proxyToken := os.Getenv(envProxyToken) + if proxyToken == "" { + return fmt.Errorf("proxy token is required: set %s environment variable", envProxyToken) + } + + level := "error" + if debugLogs { + level = "debug" + } + logger := log.New() + + _ = util.InitLogger(logger, level, util.LogConsole) + + logger.Infof("configured log level: %s", level) + + switch forwardedProto { + case "auto", "http", "https": + default: + return fmt.Errorf("invalid --forwarded-proto value %q: must be auto, http, or https", forwardedProto) + } + + _, err := domain.ValidateDomains([]string{proxyDomain}) + if err != nil { + return fmt.Errorf("invalid domain value %q: %w", proxyDomain, err) + } + + parsedTrustedProxies, err := proxy.ParseTrustedProxies(trustedProxies) + if err != nil { + return fmt.Errorf("invalid --trusted-proxies: %w", err) + } + + srv := proxy.Server{ + Logger: logger, + Version: Version, + ManagementAddress: mgmtAddr, + ProxyURL: proxyDomain, + ProxyToken: proxyToken, + CertificateDirectory: certDir, + CertificateFile: certFile, + CertificateKeyFile: certKeyFile, + GenerateACMECertificates: acmeCerts, + ACMEChallengeAddress: acmeAddr, + ACMEDirectory: acmeDir, + ACMEChallengeType: acmeChallengeType, + DebugEndpointEnabled: debugEndpoint, + DebugEndpointAddress: debugEndpointAddr, + HealthAddress: healthAddr, + OIDCClientId: oidcClientID, + OIDCClientSecret: oidcClientSecret, + OIDCEndpoint: oidcEndpoint, + OIDCScopes: strings.Split(oidcScopes, ","), + ForwardedProto: forwardedProto, + TrustedProxies: parsedTrustedProxies, + CertLockMethod: nbacme.CertLockMethod(certLockMethod), + WireguardPort: wgPort, + } + + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT) + defer stop() + + if err := srv.ListenAndServe(ctx, addr); err != nil { + logger.Error(err) + return err + } + return nil +} + +func envBoolOrDefault(key string, def bool) bool { + v, exists := os.LookupEnv(key) + if !exists { + return def + } + parsed, err := strconv.ParseBool(v) + if err != nil { + return def + } + return parsed +} + +func envStringOrDefault(key string, def string) string { + v, exists := os.LookupEnv(key) + if !exists { + return def + } + return v +} + +func envIntOrDefault(key string, def int) int { + v, exists := os.LookupEnv(key) + if !exists { + return def + } + parsed, err := strconv.Atoi(v) + if err != nil { + return def + } + return parsed +} diff --git a/proxy/cmd/proxy/main.go b/proxy/cmd/proxy/main.go new file mode 100644 index 000000000..14e540a2e --- /dev/null +++ b/proxy/cmd/proxy/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "runtime" + + "github.com/netbirdio/netbird/proxy/cmd/proxy/cmd" +) + +var ( + // Version is the application version (set via ldflags during build) + Version = "dev" + + // Commit is the git commit hash (set via ldflags during build) + Commit = "unknown" + + // BuildDate is the build date (set via ldflags during build) + BuildDate = "unknown" + + // GoVersion is the Go version used to build the binary + GoVersion = runtime.Version() +) + +func main() { + cmd.SetVersionInfo(Version, Commit, BuildDate, GoVersion) + cmd.Execute() +} diff --git a/proxy/handle_mapping_stream_test.go b/proxy/handle_mapping_stream_test.go new file mode 100644 index 000000000..d2ad3f67e --- /dev/null +++ b/proxy/handle_mapping_stream_test.go @@ -0,0 +1,94 @@ +package proxy + +import ( + "context" + "io" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "github.com/netbirdio/netbird/proxy/internal/health" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type mockMappingStream struct { + grpc.ClientStream + messages []*proto.GetMappingUpdateResponse + idx int +} + +func (m *mockMappingStream) Recv() (*proto.GetMappingUpdateResponse, error) { + if m.idx >= len(m.messages) { + return nil, io.EOF + } + msg := m.messages[m.idx] + m.idx++ + return msg, nil +} + +func (m *mockMappingStream) Header() (metadata.MD, error) { + return nil, nil //nolint:nilnil +} +func (m *mockMappingStream) Trailer() metadata.MD { return nil } +func (m *mockMappingStream) CloseSend() error { return nil } +func (m *mockMappingStream) Context() context.Context { return context.Background() } +func (m *mockMappingStream) SendMsg(any) error { return nil } +func (m *mockMappingStream) RecvMsg(any) error { return nil } + +func TestHandleMappingStream_SyncCompleteFlag(t *testing.T) { + checker := health.NewChecker(nil, nil) + s := &Server{ + Logger: log.StandardLogger(), + healthChecker: checker, + } + + stream := &mockMappingStream{ + messages: []*proto.GetMappingUpdateResponse{ + {InitialSyncComplete: true}, + }, + } + + syncDone := false + err := s.handleMappingStream(context.Background(), stream, &syncDone) + assert.NoError(t, err) + assert.True(t, syncDone, "initial sync should be marked done when flag is set") +} + +func TestHandleMappingStream_NoSyncFlagDoesNotMarkDone(t *testing.T) { + checker := health.NewChecker(nil, nil) + s := &Server{ + Logger: log.StandardLogger(), + healthChecker: checker, + } + + stream := &mockMappingStream{ + messages: []*proto.GetMappingUpdateResponse{ + {}, // no sync flag + }, + } + + syncDone := false + err := s.handleMappingStream(context.Background(), stream, &syncDone) + assert.NoError(t, err) + assert.False(t, syncDone, "initial sync should not be marked done without flag") +} + +func TestHandleMappingStream_NilHealthChecker(t *testing.T) { + s := &Server{ + Logger: log.StandardLogger(), + } + + stream := &mockMappingStream{ + messages: []*proto.GetMappingUpdateResponse{ + {InitialSyncComplete: true}, + }, + } + + syncDone := false + err := s.handleMappingStream(context.Background(), stream, &syncDone) + assert.NoError(t, err) + assert.True(t, syncDone, "sync done flag should be set even without health checker") +} diff --git a/proxy/internal/accesslog/logger.go b/proxy/internal/accesslog/logger.go new file mode 100644 index 000000000..9e204be65 --- /dev/null +++ b/proxy/internal/accesslog/logger.go @@ -0,0 +1,105 @@ +package accesslog + +import ( + "context" + "net/netip" + "time" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type gRPCClient interface { + SendAccessLog(ctx context.Context, in *proto.SendAccessLogRequest, opts ...grpc.CallOption) (*proto.SendAccessLogResponse, error) +} + +// Logger sends access log entries to the management server via gRPC. +type Logger struct { + client gRPCClient + logger *log.Logger + trustedProxies []netip.Prefix +} + +// NewLogger creates a new access log Logger. The trustedProxies parameter +// configures which upstream proxy IP ranges are trusted for extracting +// the real client IP from X-Forwarded-For headers. +func NewLogger(client gRPCClient, logger *log.Logger, trustedProxies []netip.Prefix) *Logger { + if logger == nil { + logger = log.StandardLogger() + } + return &Logger{ + client: client, + logger: logger, + trustedProxies: trustedProxies, + } +} + +type logEntry struct { + ID string + AccountID string + ServiceId string + Host string + Path string + DurationMs int64 + Method string + ResponseCode int32 + SourceIp string + AuthMechanism string + UserId string + AuthSuccess bool +} + +func (l *Logger) log(ctx context.Context, entry logEntry) { + // Fire off the log request in a separate routine. + // This increases the possibility of losing a log message + // (although it should still get logged in the event of an error), + // but it will reduce latency returning the request in the + // middleware. + // There is also a chance that log messages will arrive at + // the server out of order; however, the timestamp should + // allow for resolving that on the server. + now := timestamppb.Now() // Grab the timestamp before launching the goroutine to try to prevent weird timing issues. This is probably unnecessary. + go func() { + logCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if entry.AuthMechanism != auth.MethodOIDC.String() { + entry.UserId = "" + } + if _, err := l.client.SendAccessLog(logCtx, &proto.SendAccessLogRequest{ + Log: &proto.AccessLog{ + LogId: entry.ID, + AccountId: entry.AccountID, + Timestamp: now, + ServiceId: entry.ServiceId, + Host: entry.Host, + Path: entry.Path, + DurationMs: entry.DurationMs, + Method: entry.Method, + ResponseCode: entry.ResponseCode, + SourceIp: entry.SourceIp, + AuthMechanism: entry.AuthMechanism, + UserId: entry.UserId, + AuthSuccess: entry.AuthSuccess, + }, + }); err != nil { + // If it fails to send on the gRPC connection, then at least log it to the error log. + l.logger.WithFields(log.Fields{ + "service_id": entry.ServiceId, + "host": entry.Host, + "path": entry.Path, + "duration": entry.DurationMs, + "method": entry.Method, + "response_code": entry.ResponseCode, + "source_ip": entry.SourceIp, + "auth_mechanism": entry.AuthMechanism, + "user_id": entry.UserId, + "auth_success": entry.AuthSuccess, + "error": err, + }).Error("Error sending access log on gRPC connection") + } + }() +} diff --git a/proxy/internal/accesslog/middleware.go b/proxy/internal/accesslog/middleware.go new file mode 100644 index 000000000..ca7556bfd --- /dev/null +++ b/proxy/internal/accesslog/middleware.go @@ -0,0 +1,74 @@ +package accesslog + +import ( + "net" + "net/http" + "strings" + "time" + + "github.com/rs/xid" + + "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/web" +) + +func (l *Logger) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Skip logging for internal proxy assets (CSS, JS, etc.) + if strings.HasPrefix(r.URL.Path, web.PathPrefix+"/") { + next.ServeHTTP(w, r) + return + } + + // Generate request ID early so it can be used by error pages and log correlation. + requestID := xid.New().String() + + l.logger.Debugf("request: request_id=%s method=%s host=%s path=%s", requestID, r.Method, r.Host, r.URL.Path) + + // Use a response writer wrapper so we can access the status code later. + sw := &statusWriter{ + w: w, + status: http.StatusOK, + } + + // Resolve the source IP using trusted proxy configuration before passing + // the request on, as the proxy will modify forwarding headers. + sourceIp := extractSourceIP(r, l.trustedProxies) + + // Create a mutable struct to capture data from downstream handlers. + // We pass a pointer in the context - the pointer itself flows down immutably, + // but the struct it points to can be mutated by inner handlers. + capturedData := &proxy.CapturedData{RequestID: requestID} + capturedData.SetClientIP(sourceIp) + ctx := proxy.WithCapturedData(r.Context(), capturedData) + + start := time.Now() + next.ServeHTTP(sw, r.WithContext(ctx)) + duration := time.Since(start) + + host, _, err := net.SplitHostPort(r.Host) + if err != nil { + // Fallback to just using the full host value. + host = r.Host + } + + entry := logEntry{ + ID: requestID, + ServiceId: capturedData.GetServiceId(), + AccountID: string(capturedData.GetAccountId()), + Host: host, + Path: r.URL.Path, + DurationMs: duration.Milliseconds(), + Method: r.Method, + ResponseCode: int32(sw.status), + SourceIp: sourceIp, + AuthMechanism: capturedData.GetAuthMethod(), + UserId: capturedData.GetUserID(), + AuthSuccess: sw.status != http.StatusUnauthorized && sw.status != http.StatusForbidden, + } + l.logger.Debugf("response: request_id=%s method=%s host=%s path=%s status=%d duration=%dms source=%s origin=%s service=%s account=%s", + requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceId(), capturedData.GetAccountId()) + + l.log(r.Context(), entry) + }) +} diff --git a/proxy/internal/accesslog/requestip.go b/proxy/internal/accesslog/requestip.go new file mode 100644 index 000000000..f111c1322 --- /dev/null +++ b/proxy/internal/accesslog/requestip.go @@ -0,0 +1,16 @@ +package accesslog + +import ( + "net/http" + "net/netip" + + "github.com/netbirdio/netbird/proxy/internal/proxy" +) + +// extractSourceIP resolves the real client IP from the request using trusted +// proxy configuration. When trustedProxies is non-empty and the direct +// connection is from a trusted source, it walks X-Forwarded-For right-to-left +// skipping trusted IPs. Otherwise it returns RemoteAddr directly. +func extractSourceIP(r *http.Request, trustedProxies []netip.Prefix) string { + return proxy.ResolveClientIP(r.RemoteAddr, r.Header.Get("X-Forwarded-For"), trustedProxies) +} diff --git a/proxy/internal/accesslog/statuswriter.go b/proxy/internal/accesslog/statuswriter.go new file mode 100644 index 000000000..56ef90efa --- /dev/null +++ b/proxy/internal/accesslog/statuswriter.go @@ -0,0 +1,26 @@ +package accesslog + +import ( + "net/http" +) + +// statusWriter is a simple wrapper around an http.ResponseWriter +// that captures the setting of the status code via the WriteHeader +// function and stores it so that it can be retrieved later. +type statusWriter struct { + w http.ResponseWriter + status int +} + +func (w *statusWriter) Header() http.Header { + return w.w.Header() +} + +func (w *statusWriter) Write(data []byte) (int, error) { + return w.w.Write(data) +} + +func (w *statusWriter) WriteHeader(status int) { + w.status = status + w.w.WriteHeader(status) +} diff --git a/proxy/internal/acme/locker.go b/proxy/internal/acme/locker.go new file mode 100644 index 000000000..2f0f18885 --- /dev/null +++ b/proxy/internal/acme/locker.go @@ -0,0 +1,102 @@ +package acme + +import ( + "context" + "path/filepath" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/internal/flock" + "github.com/netbirdio/netbird/proxy/internal/k8s" +) + +// certLocker provides distributed mutual exclusion for certificate operations. +// Implementations must be safe for concurrent use from multiple goroutines. +type certLocker interface { + // Lock acquires an exclusive lock for the given domain. + // It blocks until the lock is acquired, the context is cancelled, or an + // unrecoverable error occurs. The returned function releases the lock; + // callers must call it exactly once when the critical section is complete. + Lock(ctx context.Context, domain string) (unlock func(), err error) +} + +// CertLockMethod controls how ACME certificate locks are coordinated. +type CertLockMethod string + +const ( + // CertLockAuto detects the environment and selects k8s-lease if running + // in a Kubernetes pod, otherwise flock. + CertLockAuto CertLockMethod = "auto" + // CertLockFlock uses advisory file locks via flock(2). + CertLockFlock CertLockMethod = "flock" + // CertLockK8sLease uses Kubernetes coordination Leases. + CertLockK8sLease CertLockMethod = "k8s-lease" +) + +func newCertLocker(method CertLockMethod, certDir string, logger *log.Logger) certLocker { + if logger == nil { + logger = log.StandardLogger() + } + + if method == "" || method == CertLockAuto { + if k8s.InCluster() { + method = CertLockK8sLease + } else { + method = CertLockFlock + } + logger.Infof("auto-detected cert lock method: %s", method) + } + + switch method { + case CertLockK8sLease: + locker, err := newK8sLeaseLocker(logger) + if err != nil { + logger.Warnf("create k8s lease locker, falling back to flock: %v", err) + return newFlockLocker(certDir, logger) + } + logger.Infof("using k8s lease locker in namespace %s", locker.client.Namespace()) + return locker + default: + logger.Infof("using flock cert locker in %s", certDir) + return newFlockLocker(certDir, logger) + } +} + +type flockLocker struct { + certDir string + logger *log.Logger +} + +func newFlockLocker(certDir string, logger *log.Logger) *flockLocker { + if logger == nil { + logger = log.StandardLogger() + } + return &flockLocker{certDir: certDir, logger: logger} +} + +// Lock acquires an advisory file lock for the given domain. +func (l *flockLocker) Lock(ctx context.Context, domain string) (func(), error) { + lockPath := filepath.Join(l.certDir, domain+".lock") + lockFile, err := flock.Lock(ctx, lockPath) + if err != nil { + return nil, err + } + + // nil lockFile means locking is not supported (non-unix). + if lockFile == nil { + return func() { /* no-op: locking unsupported on this platform */ }, nil + } + + return func() { + if err := flock.Unlock(lockFile); err != nil { + l.logger.Debugf("release cert lock for domain %q: %v", domain, err) + } + }, nil +} + +type noopLocker struct{} + +// Lock is a no-op that always succeeds immediately. +func (noopLocker) Lock(context.Context, string) (func(), error) { + return func() { /* no-op: locker disabled */ }, nil +} diff --git a/proxy/internal/acme/locker_k8s.go b/proxy/internal/acme/locker_k8s.go new file mode 100644 index 000000000..a3f8043e6 --- /dev/null +++ b/proxy/internal/acme/locker_k8s.go @@ -0,0 +1,197 @@ +package acme + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/internal/k8s" +) + +const ( + // leaseDurationSec is the Kubernetes Lease TTL. If the holder crashes without + // releasing the lock, other replicas must wait this long before taking over. + // This is intentionally generous: in the worst case two replicas may both + // issue an ACME request for the same domain, which is harmless (the CA + // deduplicates and the cache converges). + leaseDurationSec = 300 + retryBaseBackoff = 500 * time.Millisecond + retryMaxBackoff = 10 * time.Second +) + +type k8sLeaseLocker struct { + client *k8s.LeaseClient + identity string + logger *log.Logger +} + +func newK8sLeaseLocker(logger *log.Logger) (*k8sLeaseLocker, error) { + client, err := k8s.NewLeaseClient() + if err != nil { + return nil, fmt.Errorf("create k8s lease client: %w", err) + } + + identity, err := os.Hostname() + if err != nil { + return nil, fmt.Errorf("get hostname: %w", err) + } + + return &k8sLeaseLocker{ + client: client, + identity: identity, + logger: logger, + }, nil +} + +// Lock acquires a Kubernetes Lease for the given domain using optimistic +// concurrency. It retries with exponential backoff until the lease is +// acquired or the context is cancelled. +func (l *k8sLeaseLocker) Lock(ctx context.Context, domain string) (func(), error) { + leaseName := k8s.LeaseNameForDomain(domain) + backoff := retryBaseBackoff + + for { + acquired, err := l.tryAcquire(ctx, leaseName, domain) + if err != nil { + return nil, fmt.Errorf("acquire lease %s for %q: %w", leaseName, domain, err) + } + if acquired { + l.logger.Debugf("k8s lease %s acquired for domain %q", leaseName, domain) + return l.unlockFunc(leaseName, domain), nil + } + + l.logger.Debugf("k8s lease %s held by another replica, retrying in %s", leaseName, backoff) + + timer := time.NewTimer(backoff) + select { + case <-ctx.Done(): + timer.Stop() + return nil, ctx.Err() + case <-timer.C: + } + + backoff *= 2 + if backoff > retryMaxBackoff { + backoff = retryMaxBackoff + } + } +} + +// tryAcquire attempts to create or take over a Lease. Returns (true, nil) +// on success, (false, nil) if the lease is held and not stale, or an error. +func (l *k8sLeaseLocker) tryAcquire(ctx context.Context, name, domain string) (bool, error) { + existing, err := l.client.Get(ctx, name) + if err != nil { + return false, err + } + + now := k8s.MicroTime{Time: time.Now().UTC()} + dur := int32(leaseDurationSec) + + if existing == nil { + lease := &k8s.Lease{ + Metadata: k8s.LeaseMetadata{ + Name: name, + Annotations: map[string]string{ + "netbird.io/domain": domain, + }, + }, + Spec: k8s.LeaseSpec{ + HolderIdentity: &l.identity, + LeaseDurationSeconds: &dur, + AcquireTime: &now, + RenewTime: &now, + }, + } + + if _, err := l.client.Create(ctx, lease); errors.Is(err, k8s.ErrConflict) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil + } + + if !l.canTakeover(existing) { + return false, nil + } + + existing.Spec.HolderIdentity = &l.identity + existing.Spec.LeaseDurationSeconds = &dur + existing.Spec.AcquireTime = &now + existing.Spec.RenewTime = &now + + if _, err := l.client.Update(ctx, existing); errors.Is(err, k8s.ErrConflict) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// canTakeover returns true if the lease is free (no holder) or stale +// (renewTime + leaseDuration has passed). +func (l *k8sLeaseLocker) canTakeover(lease *k8s.Lease) bool { + holder := lease.Spec.HolderIdentity + if holder == nil || *holder == "" { + return true + } + + // We already hold it (e.g. from a previous crashed attempt). + if *holder == l.identity { + return true + } + + if lease.Spec.RenewTime == nil || lease.Spec.LeaseDurationSeconds == nil { + return true + } + + expiry := lease.Spec.RenewTime.Add(time.Duration(*lease.Spec.LeaseDurationSeconds) * time.Second) + if time.Now().After(expiry) { + l.logger.Infof("k8s lease %s held by %q is stale (expired %s ago), taking over", + lease.Metadata.Name, *holder, time.Since(expiry).Round(time.Second)) + return true + } + + return false +} + +// unlockFunc returns a closure that releases the lease by clearing the holder. +func (l *k8sLeaseLocker) unlockFunc(name, domain string) func() { + return func() { + // Use a fresh context: the parent may already be cancelled. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Re-GET to get current resourceVersion (ours may be stale if + // the lock was held for a long time and something updated it). + current, err := l.client.Get(ctx, name) + if err != nil { + l.logger.Debugf("release k8s lease %s for %q: get: %v", name, domain, err) + return + } + if current == nil { + return + } + + // Only clear if we're still the holder. + if current.Spec.HolderIdentity == nil || *current.Spec.HolderIdentity != l.identity { + l.logger.Debugf("k8s lease %s for %q: holder changed to %v, skip release", + name, domain, current.Spec.HolderIdentity) + return + } + + empty := "" + current.Spec.HolderIdentity = &empty + current.Spec.AcquireTime = nil + current.Spec.RenewTime = nil + + if _, err := l.client.Update(ctx, current); err != nil { + l.logger.Debugf("release k8s lease %s for %q: update: %v", name, domain, err) + } + } +} diff --git a/proxy/internal/acme/locker_test.go b/proxy/internal/acme/locker_test.go new file mode 100644 index 000000000..39245df0c --- /dev/null +++ b/proxy/internal/acme/locker_test.go @@ -0,0 +1,65 @@ +package acme + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFlockLockerRoundTrip(t *testing.T) { + dir := t.TempDir() + locker := newFlockLocker(dir, nil) + + unlock, err := locker.Lock(context.Background(), "example.com") + require.NoError(t, err) + require.NotNil(t, unlock) + + // Lock file should exist. + assert.FileExists(t, filepath.Join(dir, "example.com.lock")) + + unlock() +} + +func TestNoopLocker(t *testing.T) { + locker := noopLocker{} + unlock, err := locker.Lock(context.Background(), "example.com") + require.NoError(t, err) + require.NotNil(t, unlock) + unlock() +} + +func TestNewCertLockerDefaultsToFlock(t *testing.T) { + dir := t.TempDir() + + // t.Setenv registers cleanup to restore the original value. + // os.Unsetenv is needed because the production code uses LookupEnv, + // which distinguishes "empty" from "not set". + t.Setenv("KUBERNETES_SERVICE_HOST", "") + os.Unsetenv("KUBERNETES_SERVICE_HOST") + locker := newCertLocker(CertLockAuto, dir, nil) + + _, ok := locker.(*flockLocker) + assert.True(t, ok, "auto without k8s env should select flockLocker") +} + +func TestNewCertLockerExplicitFlock(t *testing.T) { + dir := t.TempDir() + locker := newCertLocker(CertLockFlock, dir, nil) + + _, ok := locker.(*flockLocker) + assert.True(t, ok, "explicit flock should select flockLocker") +} + +func TestNewCertLockerK8sFallsBackToFlock(t *testing.T) { + dir := t.TempDir() + + // k8s-lease without SA files should fall back to flock. + locker := newCertLocker(CertLockK8sLease, dir, nil) + + _, ok := locker.(*flockLocker) + assert.True(t, ok, "k8s-lease without SA should fall back to flockLocker") +} diff --git a/proxy/internal/acme/manager.go b/proxy/internal/acme/manager.go new file mode 100644 index 000000000..a663b8138 --- /dev/null +++ b/proxy/internal/acme/manager.go @@ -0,0 +1,336 @@ +package acme + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/binary" + "fmt" + "net" + "slices" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/acme" + "golang.org/x/crypto/acme/autocert" + + "github.com/netbirdio/netbird/shared/management/domain" +) + +// OID for the SCT list extension (1.3.6.1.4.1.11129.2.4.2) +var oidSCTList = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} + +type certificateNotifier interface { + NotifyCertificateIssued(ctx context.Context, accountID, serviceID, domain string) error +} + +type domainState int + +const ( + domainPending domainState = iota + domainReady + domainFailed +) + +type domainInfo struct { + accountID string + serviceID string + state domainState + err string +} + +// Manager wraps autocert.Manager with domain tracking and cross-replica +// coordination via a pluggable locking strategy. The locker prevents +// duplicate ACME requests when multiple replicas share a certificate cache. +type Manager struct { + *autocert.Manager + + certDir string + locker certLocker + mu sync.RWMutex + domains map[domain.Domain]*domainInfo + + certNotifier certificateNotifier + logger *log.Logger +} + +// NewManager creates a new ACME certificate manager. The certDir is used +// for caching certificates. The lockMethod controls cross-replica +// coordination strategy (see CertLockMethod constants). +func NewManager(certDir, acmeURL string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod) *Manager { + if logger == nil { + logger = log.StandardLogger() + } + mgr := &Manager{ + certDir: certDir, + locker: newCertLocker(lockMethod, certDir, logger), + domains: make(map[domain.Domain]*domainInfo), + certNotifier: notifier, + logger: logger, + } + mgr.Manager = &autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: mgr.hostPolicy, + Cache: autocert.DirCache(certDir), + Client: &acme.Client{ + DirectoryURL: acmeURL, + }, + } + return mgr +} + +func (mgr *Manager) hostPolicy(_ context.Context, host string) error { + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + mgr.mu.RLock() + _, exists := mgr.domains[domain.Domain(host)] + mgr.mu.RUnlock() + if !exists { + return fmt.Errorf("unknown domain %q", host) + } + return nil +} + +// AddDomain registers a domain for ACME certificate prefetching. +func (mgr *Manager) AddDomain(d domain.Domain, accountID, serviceID string) { + mgr.mu.Lock() + mgr.domains[d] = &domainInfo{ + accountID: accountID, + serviceID: serviceID, + state: domainPending, + } + mgr.mu.Unlock() + + go mgr.prefetchCertificate(d) +} + +// prefetchCertificate proactively triggers certificate generation for a domain. +// It acquires a distributed lock to prevent multiple replicas from issuing +// duplicate ACME requests. The second replica will block until the first +// finishes, then find the certificate in the cache. +func (mgr *Manager) prefetchCertificate(d domain.Domain) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + name := d.PunycodeString() + + mgr.logger.Infof("acquiring cert lock for domain %q", name) + lockStart := time.Now() + unlock, err := mgr.locker.Lock(ctx, name) + if err != nil { + mgr.logger.Warnf("acquire cert lock for domain %q, proceeding without lock: %v", name, err) + } else { + mgr.logger.Infof("acquired cert lock for domain %q in %s", name, time.Since(lockStart)) + defer unlock() + } + + hello := &tls.ClientHelloInfo{ + ServerName: name, + Conn: &dummyConn{ctx: ctx}, + } + + start := time.Now() + cert, err := mgr.GetCertificate(hello) + elapsed := time.Since(start) + if err != nil { + mgr.logger.Warnf("prefetch certificate for domain %q: %v", name, err) + mgr.setDomainState(d, domainFailed, err.Error()) + return + } + + mgr.setDomainState(d, domainReady, "") + + now := time.Now() + if cert != nil && cert.Leaf != nil { + leaf := cert.Leaf + mgr.logger.Infof("certificate for domain %q ready in %s: serial=%s SANs=%v notBefore=%s, notAfter=%s, now=%s", + name, elapsed.Round(time.Millisecond), + leaf.SerialNumber.Text(16), + leaf.DNSNames, + leaf.NotBefore.UTC().Format(time.RFC3339), + leaf.NotAfter.UTC().Format(time.RFC3339), + now.UTC().Format(time.RFC3339), + ) + mgr.logCertificateDetails(name, leaf, now) + } else { + mgr.logger.Infof("certificate for domain %q ready in %s", name, elapsed.Round(time.Millisecond)) + } + + mgr.mu.RLock() + info := mgr.domains[d] + mgr.mu.RUnlock() + + if info != nil && mgr.certNotifier != nil { + if err := mgr.certNotifier.NotifyCertificateIssued(ctx, info.accountID, info.serviceID, name); err != nil { + mgr.logger.Warnf("notify certificate ready for domain %q: %v", name, err) + } + } +} + +func (mgr *Manager) setDomainState(d domain.Domain, state domainState, errMsg string) { + mgr.mu.Lock() + defer mgr.mu.Unlock() + if info, ok := mgr.domains[d]; ok { + info.state = state + info.err = errMsg + } +} + +// logCertificateDetails logs certificate validity and SCT timestamps. +func (mgr *Manager) logCertificateDetails(domain string, cert *x509.Certificate, now time.Time) { + if cert.NotBefore.After(now) { + mgr.logger.Warnf("certificate for %q NotBefore is in the future by %v", domain, cert.NotBefore.Sub(now)) + } + + sctTimestamps := mgr.parseSCTTimestamps(cert) + if len(sctTimestamps) == 0 { + return + } + + for i, sctTime := range sctTimestamps { + if sctTime.After(now) { + mgr.logger.Warnf("certificate for %q SCT[%d] timestamp is in the future: %v (by %v)", + domain, i, sctTime.UTC(), sctTime.Sub(now)) + } else { + mgr.logger.Debugf("certificate for %q SCT[%d] timestamp: %v (%v in the past)", + domain, i, sctTime.UTC(), now.Sub(sctTime)) + } + } +} + +// parseSCTTimestamps extracts SCT timestamps from a certificate. +func (mgr *Manager) parseSCTTimestamps(cert *x509.Certificate) []time.Time { + var timestamps []time.Time + + for _, ext := range cert.Extensions { + if !ext.Id.Equal(oidSCTList) { + continue + } + + // The extension value is an OCTET STRING containing the SCT list + var sctListBytes []byte + if _, err := asn1.Unmarshal(ext.Value, &sctListBytes); err != nil { + mgr.logger.Debugf("failed to unmarshal SCT list outer wrapper: %v", err) + continue + } + + // SCT list format: 2-byte length prefix, then concatenated SCTs + if len(sctListBytes) < 2 { + continue + } + + listLen := int(binary.BigEndian.Uint16(sctListBytes[:2])) + data := sctListBytes[2:] + if len(data) < listLen { + continue + } + + // Parse individual SCTs + offset := 0 + for offset < listLen { + if offset+2 > len(data) { + break + } + sctLen := int(binary.BigEndian.Uint16(data[offset : offset+2])) + offset += 2 + + if offset+sctLen > len(data) { + break + } + sctData := data[offset : offset+sctLen] + offset += sctLen + + // SCT format: version (1) + log_id (32) + timestamp (8) + ... + if len(sctData) < 41 { + continue + } + + // Timestamp is at offset 33 (after version + log_id), 8 bytes, milliseconds since epoch + tsMillis := binary.BigEndian.Uint64(sctData[33:41]) + ts := time.UnixMilli(int64(tsMillis)) + timestamps = append(timestamps, ts) + } + } + + return timestamps +} + +// dummyConn implements net.Conn to provide context for certificate fetching. +type dummyConn struct { + ctx context.Context +} + +func (c *dummyConn) Read(b []byte) (n int, err error) { return 0, nil } +func (c *dummyConn) Write(b []byte) (n int, err error) { return len(b), nil } +func (c *dummyConn) Close() error { return nil } +func (c *dummyConn) LocalAddr() net.Addr { return nil } +func (c *dummyConn) RemoteAddr() net.Addr { return nil } +func (c *dummyConn) SetDeadline(t time.Time) error { return nil } +func (c *dummyConn) SetReadDeadline(t time.Time) error { return nil } +func (c *dummyConn) SetWriteDeadline(t time.Time) error { return nil } + +// RemoveDomain removes a domain from tracking. +func (mgr *Manager) RemoveDomain(d domain.Domain) { + mgr.mu.Lock() + defer mgr.mu.Unlock() + delete(mgr.domains, d) +} + +// PendingCerts returns the number of certificates currently being prefetched. +func (mgr *Manager) PendingCerts() int { + mgr.mu.RLock() + defer mgr.mu.RUnlock() + var n int + for _, info := range mgr.domains { + if info.state == domainPending { + n++ + } + } + return n +} + +// TotalDomains returns the total number of registered domains. +func (mgr *Manager) TotalDomains() int { + mgr.mu.RLock() + defer mgr.mu.RUnlock() + return len(mgr.domains) +} + +// PendingDomains returns the domain names currently being prefetched. +func (mgr *Manager) PendingDomains() []string { + return mgr.domainsByState(domainPending) +} + +// ReadyDomains returns domain names that have successfully obtained certificates. +func (mgr *Manager) ReadyDomains() []string { + return mgr.domainsByState(domainReady) +} + +// FailedDomains returns domain names that failed certificate prefetch, mapped to their error. +func (mgr *Manager) FailedDomains() map[string]string { + mgr.mu.RLock() + defer mgr.mu.RUnlock() + result := make(map[string]string) + for d, info := range mgr.domains { + if info.state == domainFailed { + result[d.PunycodeString()] = info.err + } + } + return result +} + +func (mgr *Manager) domainsByState(state domainState) []string { + mgr.mu.RLock() + defer mgr.mu.RUnlock() + var domains []string + for d, info := range mgr.domains { + if info.state == state { + domains = append(domains, d.PunycodeString()) + } + } + slices.Sort(domains) + return domains +} diff --git a/proxy/internal/acme/manager_test.go b/proxy/internal/acme/manager_test.go new file mode 100644 index 000000000..3b554e360 --- /dev/null +++ b/proxy/internal/acme/manager_test.go @@ -0,0 +1,102 @@ +package acme + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHostPolicy(t *testing.T) { + mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", nil, nil, "") + mgr.AddDomain("example.com", "acc1", "rp1") + + // Wait for the background prefetch goroutine to finish so the temp dir + // can be cleaned up without a race. + t.Cleanup(func() { + assert.Eventually(t, func() bool { + return mgr.PendingCerts() == 0 + }, 30*time.Second, 50*time.Millisecond) + }) + + tests := []struct { + name string + host string + wantErr bool + }{ + { + name: "exact domain match", + host: "example.com", + }, + { + name: "domain with port", + host: "example.com:443", + }, + { + name: "unknown domain", + host: "unknown.com", + wantErr: true, + }, + { + name: "unknown domain with port", + host: "unknown.com:443", + wantErr: true, + }, + { + name: "empty host", + host: "", + wantErr: true, + }, + { + name: "port only", + host: ":443", + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := mgr.hostPolicy(context.Background(), tc.host) + if tc.wantErr { + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown domain") + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestDomainStates(t *testing.T) { + mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", nil, nil, "") + + assert.Equal(t, 0, mgr.PendingCerts(), "initially zero") + assert.Equal(t, 0, mgr.TotalDomains(), "initially zero domains") + assert.Empty(t, mgr.PendingDomains()) + assert.Empty(t, mgr.ReadyDomains()) + assert.Empty(t, mgr.FailedDomains()) + + // AddDomain starts as pending, then the prefetch goroutine will fail + // (no real ACME server) and transition to failed. + mgr.AddDomain("a.example.com", "acc1", "rp1") + mgr.AddDomain("b.example.com", "acc1", "rp1") + + assert.Equal(t, 2, mgr.TotalDomains(), "two domains registered") + + // Pending domains should eventually drain after prefetch goroutines finish. + assert.Eventually(t, func() bool { + return mgr.PendingCerts() == 0 + }, 30*time.Second, 100*time.Millisecond, "pending certs should return to zero after prefetch completes") + + assert.Empty(t, mgr.PendingDomains()) + assert.Equal(t, 2, mgr.TotalDomains(), "total domains unchanged") + + // With a fake ACME URL, both should have failed. + failed := mgr.FailedDomains() + assert.Len(t, failed, 2, "both domains should have failed") + assert.Contains(t, failed, "a.example.com") + assert.Contains(t, failed, "b.example.com") + assert.Empty(t, mgr.ReadyDomains()) +} diff --git a/proxy/internal/auth/auth.gohtml b/proxy/internal/auth/auth.gohtml new file mode 100644 index 000000000..9cd36b796 --- /dev/null +++ b/proxy/internal/auth/auth.gohtml @@ -0,0 +1,18 @@ + +{{ range $method, $value := .Methods }} +{{ if eq $method "pin" }} +
+ + + +
+{{ else if eq $method "password" }} +
+ + + +
+{{ else if eq $method "oidc" }} +Click here to log in with SSO +{{ end }} +{{ end }} diff --git a/proxy/internal/auth/middleware.go b/proxy/internal/auth/middleware.go new file mode 100644 index 000000000..8a966faa3 --- /dev/null +++ b/proxy/internal/auth/middleware.go @@ -0,0 +1,364 @@ +package auth + +import ( + "context" + "crypto/ed25519" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/url" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/proxy/web" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type authenticator interface { + Authenticate(ctx context.Context, in *proto.AuthenticateRequest, opts ...grpc.CallOption) (*proto.AuthenticateResponse, error) +} + +// SessionValidator validates session tokens and checks user access permissions. +type SessionValidator interface { + ValidateSession(ctx context.Context, in *proto.ValidateSessionRequest, opts ...grpc.CallOption) (*proto.ValidateSessionResponse, error) +} + +// Scheme defines an authentication mechanism for a domain. +type Scheme interface { + Type() auth.Method + // Authenticate checks the request and determines whether it represents + // an authenticated user. An empty token indicates an unauthenticated + // request; optionally, promptData may be returned for the login UI. + // An error indicates an infrastructure failure (e.g. gRPC unavailable). + Authenticate(*http.Request) (token string, promptData string, err error) +} + +type DomainConfig struct { + Schemes []Scheme + SessionPublicKey ed25519.PublicKey + SessionExpiration time.Duration + AccountID string + ServiceID string +} + +type validationResult struct { + UserID string + Valid bool + DeniedReason string +} + +type Middleware struct { + domainsMux sync.RWMutex + domains map[string]DomainConfig + logger *log.Logger + sessionValidator SessionValidator +} + +// NewMiddleware creates a new authentication middleware. +// The sessionValidator is optional; if nil, OIDC session tokens will be validated +// locally without group access checks. +func NewMiddleware(logger *log.Logger, sessionValidator SessionValidator) *Middleware { + if logger == nil { + logger = log.StandardLogger() + } + return &Middleware{ + domains: make(map[string]DomainConfig), + logger: logger, + sessionValidator: sessionValidator, + } +} + +// Protect applies authentication middleware to the passed handler. +// For each incoming request it will be checked against the middleware's +// internal list of protected domains. +// If the Host domain in the inbound request is not present, then it will +// simply be passed through. +// However, if the Host domain is present, then the specified authentication +// schemes for that domain will be applied to the request. +// In the event that no authentication schemes are defined for the domain, +// then the request will also be simply passed through. +func (mw *Middleware) Protect(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + host, _, err := net.SplitHostPort(r.Host) + if err != nil { + host = r.Host + } + + config, exists := mw.getDomainConfig(host) + mw.logger.Debugf("checking authentication for host: %s, exists: %t", host, exists) + + // Domains that are not configured here or have no authentication schemes applied should simply pass through. + if !exists || len(config.Schemes) == 0 { + next.ServeHTTP(w, r) + return + } + + // Set account and service IDs in captured data for access logging. + setCapturedIDs(r, config) + + if mw.handleOAuthCallbackError(w, r) { + return + } + + if mw.forwardWithSessionCookie(w, r, host, config, next) { + return + } + + mw.authenticateWithSchemes(w, r, host, config) + }) +} + +func (mw *Middleware) getDomainConfig(host string) (DomainConfig, bool) { + mw.domainsMux.RLock() + defer mw.domainsMux.RUnlock() + config, exists := mw.domains[host] + return config, exists +} + +func setCapturedIDs(r *http.Request, config DomainConfig) { + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetAccountId(types.AccountID(config.AccountID)) + cd.SetServiceId(config.ServiceID) + } +} + +// handleOAuthCallbackError checks for error query parameters from an OAuth +// callback and renders the access denied page if present. +func (mw *Middleware) handleOAuthCallbackError(w http.ResponseWriter, r *http.Request) bool { + errCode := r.URL.Query().Get("error") + if errCode == "" { + return false + } + + var requestID string + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + cd.SetAuthMethod(auth.MethodOIDC.String()) + requestID = cd.GetRequestID() + } + errDesc := r.URL.Query().Get("error_description") + if errDesc == "" { + errDesc = "An error occurred during authentication" + } + web.ServeAccessDeniedPage(w, r, http.StatusForbidden, "Access Denied", errDesc, requestID) + return true +} + +// forwardWithSessionCookie checks for a valid session cookie and, if found, +// sets the user identity on the request context and forwards to the next handler. +func (mw *Middleware) forwardWithSessionCookie(w http.ResponseWriter, r *http.Request, host string, config DomainConfig, next http.Handler) bool { + cookie, err := r.Cookie(auth.SessionCookieName) + if err != nil { + return false + } + userID, method, err := auth.ValidateSessionJWT(cookie.Value, host, config.SessionPublicKey) + if err != nil { + return false + } + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetUserID(userID) + cd.SetAuthMethod(method) + } + next.ServeHTTP(w, r) + return true +} + +// authenticateWithSchemes tries each configured auth scheme in order. +// On success it sets a session cookie and redirects; on failure it renders the login page. +func (mw *Middleware) authenticateWithSchemes(w http.ResponseWriter, r *http.Request, host string, config DomainConfig) { + methods := make(map[string]string) + var attemptedMethod string + + for _, scheme := range config.Schemes { + token, promptData, err := scheme.Authenticate(r) + if err != nil { + mw.logger.WithField("scheme", scheme.Type().String()).Warnf("authentication infrastructure error: %v", err) + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + } + http.Error(w, "authentication service unavailable", http.StatusBadGateway) + return + } + + // Track if credentials were submitted but auth failed + if token == "" && wasCredentialSubmitted(r, scheme.Type()) { + attemptedMethod = scheme.Type().String() + } + + if token != "" { + mw.handleAuthenticatedToken(w, r, host, token, config, scheme) + return + } + methods[scheme.Type().String()] = promptData + } + + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + if attemptedMethod != "" { + cd.SetAuthMethod(attemptedMethod) + } + } + web.ServeHTTP(w, r, map[string]any{"methods": methods}, http.StatusUnauthorized) +} + +// handleAuthenticatedToken validates the token, handles denied access, and on +// success sets a session cookie and redirects to the original URL. +func (mw *Middleware) handleAuthenticatedToken(w http.ResponseWriter, r *http.Request, host, token string, config DomainConfig, scheme Scheme) { + result, err := mw.validateSessionToken(r.Context(), host, token, config.SessionPublicKey, scheme.Type()) + if err != nil { + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + cd.SetAuthMethod(scheme.Type().String()) + } + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + if !result.Valid { + var requestID string + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + cd.SetUserID(result.UserID) + cd.SetAuthMethod(scheme.Type().String()) + requestID = cd.GetRequestID() + } + web.ServeAccessDeniedPage(w, r, http.StatusForbidden, "Access Denied", "You are not authorized to access this service", requestID) + return + } + + expiration := config.SessionExpiration + if expiration == 0 { + expiration = auth.DefaultSessionExpiry + } + http.SetCookie(w, &http.Cookie{ + Name: auth.SessionCookieName, + Value: token, + HttpOnly: true, + Secure: true, + SameSite: http.SameSiteLaxMode, + MaxAge: int(expiration.Seconds()), + }) + + // Redirect instead of forwarding the auth POST to the backend. + // The browser will follow with a GET carrying the new session cookie. + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + cd.SetUserID(result.UserID) + cd.SetAuthMethod(scheme.Type().String()) + } + redirectURL := stripSessionTokenParam(r.URL) + http.Redirect(w, r, redirectURL, http.StatusSeeOther) +} + +// wasCredentialSubmitted checks if credentials were submitted for the given auth method. +func wasCredentialSubmitted(r *http.Request, method auth.Method) bool { + switch method { + case auth.MethodPIN: + return r.FormValue("pin") != "" + case auth.MethodPassword: + return r.FormValue("password") != "" + case auth.MethodOIDC: + return r.URL.Query().Get("session_token") != "" + } + return false +} + +// AddDomain registers authentication schemes for the given domain. +// If schemes are provided, a valid session public key is required to sign/verify +// session JWTs. Returns an error if the key is missing or invalid. +// Callers must not serve the domain if this returns an error, to avoid +// exposing an unauthenticated service. +func (mw *Middleware) AddDomain(domain string, schemes []Scheme, publicKeyB64 string, expiration time.Duration, accountID, serviceID string) error { + if len(schemes) == 0 { + mw.domainsMux.Lock() + defer mw.domainsMux.Unlock() + mw.domains[domain] = DomainConfig{ + AccountID: accountID, + ServiceID: serviceID, + } + return nil + } + + pubKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyB64) + if err != nil { + return fmt.Errorf("decode session public key for domain %s: %w", domain, err) + } + if len(pubKeyBytes) != ed25519.PublicKeySize { + return fmt.Errorf("invalid session public key size for domain %s: got %d, want %d", domain, len(pubKeyBytes), ed25519.PublicKeySize) + } + + mw.domainsMux.Lock() + defer mw.domainsMux.Unlock() + mw.domains[domain] = DomainConfig{ + Schemes: schemes, + SessionPublicKey: pubKeyBytes, + SessionExpiration: expiration, + AccountID: accountID, + ServiceID: serviceID, + } + return nil +} + +func (mw *Middleware) RemoveDomain(domain string) { + mw.domainsMux.Lock() + defer mw.domainsMux.Unlock() + delete(mw.domains, domain) +} + +// validateSessionToken validates a session token, optionally checking group access via gRPC. +// For OIDC tokens with a configured validator, it calls ValidateSession to check group access. +// For other auth methods (PIN, password), it validates the JWT locally. +// Returns a validationResult with user ID and validity status, or error for invalid tokens. +func (mw *Middleware) validateSessionToken(ctx context.Context, host, token string, publicKey ed25519.PublicKey, method auth.Method) (*validationResult, error) { + // For OIDC with a session validator, call the gRPC service to check group access + if method == auth.MethodOIDC && mw.sessionValidator != nil { + resp, err := mw.sessionValidator.ValidateSession(ctx, &proto.ValidateSessionRequest{ + Domain: host, + SessionToken: token, + }) + if err != nil { + mw.logger.WithError(err).Error("ValidateSession gRPC call failed") + return nil, fmt.Errorf("session validation failed") + } + if !resp.Valid { + mw.logger.WithFields(log.Fields{ + "domain": host, + "denied_reason": resp.DeniedReason, + "user_id": resp.UserId, + }).Debug("Session validation denied") + return &validationResult{ + UserID: resp.UserId, + Valid: false, + DeniedReason: resp.DeniedReason, + }, nil + } + return &validationResult{UserID: resp.UserId, Valid: true}, nil + } + + // For non-OIDC methods or when no validator is configured, validate JWT locally + userID, _, err := auth.ValidateSessionJWT(token, host, publicKey) + if err != nil { + return nil, err + } + return &validationResult{UserID: userID, Valid: true}, nil +} + +// stripSessionTokenParam returns the request URI with the session_token query +// parameter removed so it doesn't linger in the browser's address bar or history. +func stripSessionTokenParam(u *url.URL) string { + q := u.Query() + if !q.Has("session_token") { + return u.RequestURI() + } + q.Del("session_token") + clean := *u + clean.RawQuery = q.Encode() + return clean.RequestURI() +} diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go new file mode 100644 index 000000000..7d9ac1bd5 --- /dev/null +++ b/proxy/internal/auth/middleware_test.go @@ -0,0 +1,660 @@ +package auth + +import ( + "crypto/ed25519" + "crypto/rand" + "encoding/base64" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/proxy" +) + +func generateTestKeyPair(t *testing.T) *sessionkey.KeyPair { + t.Helper() + kp, err := sessionkey.GenerateKeyPair() + require.NoError(t, err) + return kp +} + +// stubScheme is a minimal Scheme implementation for testing. +type stubScheme struct { + method auth.Method + token string + promptID string + authFn func(*http.Request) (string, string, error) +} + +func (s *stubScheme) Type() auth.Method { return s.method } + +func (s *stubScheme) Authenticate(r *http.Request) (string, string, error) { + if s.authFn != nil { + return s.authFn(r) + } + return s.token, s.promptID, nil +} + +func newPassthroughHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("backend")) + }) +} + +func TestAddDomain_ValidKey(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + err := mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "") + require.NoError(t, err) + + mw.domainsMux.RLock() + config, exists := mw.domains["example.com"] + mw.domainsMux.RUnlock() + + assert.True(t, exists, "domain should be registered") + assert.Len(t, config.Schemes, 1) + assert.Equal(t, ed25519.PublicKeySize, len(config.SessionPublicKey)) + assert.Equal(t, time.Hour, config.SessionExpiration) +} + +func TestAddDomain_EmptyKey(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + err := mw.AddDomain("example.com", []Scheme{scheme}, "", time.Hour, "", "") + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid session public key size") + + mw.domainsMux.RLock() + _, exists := mw.domains["example.com"] + mw.domainsMux.RUnlock() + assert.False(t, exists, "domain must not be registered with an empty session key") +} + +func TestAddDomain_InvalidBase64(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + err := mw.AddDomain("example.com", []Scheme{scheme}, "not-valid-base64!!!", time.Hour, "", "") + require.Error(t, err) + assert.Contains(t, err.Error(), "decode session public key") + + mw.domainsMux.RLock() + _, exists := mw.domains["example.com"] + mw.domainsMux.RUnlock() + assert.False(t, exists, "domain must not be registered with invalid base64 key") +} + +func TestAddDomain_WrongKeySize(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + + shortKey := base64.StdEncoding.EncodeToString([]byte("tooshort")) + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + err := mw.AddDomain("example.com", []Scheme{scheme}, shortKey, time.Hour, "", "") + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid session public key size") + + mw.domainsMux.RLock() + _, exists := mw.domains["example.com"] + mw.domainsMux.RUnlock() + assert.False(t, exists, "domain must not be registered with a wrong-size key") +} + +func TestAddDomain_NoSchemes_NoKeyRequired(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + + err := mw.AddDomain("example.com", nil, "", time.Hour, "", "") + require.NoError(t, err, "domains with no auth schemes should not require a key") + + mw.domainsMux.RLock() + _, exists := mw.domains["example.com"] + mw.domainsMux.RUnlock() + assert.True(t, exists) +} + +func TestAddDomain_OverwritesPreviousConfig(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp1 := generateTestKeyPair(t) + kp2 := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp1.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp2.PublicKey, 2*time.Hour, "", "")) + + mw.domainsMux.RLock() + config := mw.domains["example.com"] + mw.domainsMux.RUnlock() + + pubKeyBytes, _ := base64.StdEncoding.DecodeString(kp2.PublicKey) + assert.Equal(t, ed25519.PublicKey(pubKeyBytes), config.SessionPublicKey, "should use the latest key") + assert.Equal(t, 2*time.Hour, config.SessionExpiration) +} + +func TestRemoveDomain(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + mw.RemoveDomain("example.com") + + mw.domainsMux.RLock() + _, exists := mw.domains["example.com"] + mw.domainsMux.RUnlock() + assert.False(t, exists) +} + +func TestProtect_UnknownDomainPassesThrough(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://unknown.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "backend", rec.Body.String()) +} + +func TestProtect_DomainWithNoSchemesPassesThrough(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + require.NoError(t, mw.AddDomain("example.com", nil, "", time.Hour, "", "")) + + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "backend", rec.Body.String()) +} + +func TestProtect_UnauthenticatedRequestIsBlocked(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + var backendCalled bool + backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + }) + handler := mw.Protect(backend) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.False(t, backendCalled, "unauthenticated request should not reach backend") +} + +func TestProtect_HostWithPortIsMatched(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + var backendCalled bool + backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + }) + handler := mw.Protect(backend) + + req := httptest.NewRequest(http.MethodGet, "http://example.com:8443/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.False(t, backendCalled, "host with port should still match the protected domain") +} + +func TestProtect_ValidSessionCookiePassesThrough(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + token, err := sessionkey.SignToken(kp.PrivateKey, "test-user", "example.com", auth.MethodPIN, time.Hour) + require.NoError(t, err) + + capturedData := &proxy.CapturedData{} + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cd := proxy.CapturedDataFromContext(r.Context()) + require.NotNil(t, cd) + assert.Equal(t, "test-user", cd.GetUserID()) + assert.Equal(t, "pin", cd.GetAuthMethod()) + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("authenticated")) + })) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req = req.WithContext(proxy.WithCapturedData(req.Context(), capturedData)) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: token}) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "authenticated", rec.Body.String()) +} + +func TestProtect_ExpiredSessionCookieIsRejected(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + // Sign a token that expired 1 second ago. + token, err := sessionkey.SignToken(kp.PrivateKey, "test-user", "example.com", auth.MethodPIN, -time.Second) + require.NoError(t, err) + + var backendCalled bool + backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + }) + handler := mw.Protect(backend) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: token}) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.False(t, backendCalled, "expired session should not reach the backend") +} + +func TestProtect_WrongDomainCookieIsRejected(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + // Token signed for a different domain audience. + token, err := sessionkey.SignToken(kp.PrivateKey, "test-user", "other.com", auth.MethodPIN, time.Hour) + require.NoError(t, err) + + var backendCalled bool + backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + }) + handler := mw.Protect(backend) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: token}) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.False(t, backendCalled, "cookie for wrong domain should be rejected") +} + +func TestProtect_WrongKeyCookieIsRejected(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp1 := generateTestKeyPair(t) + kp2 := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp1.PublicKey, time.Hour, "", "")) + + // Token signed with a different private key. + token, err := sessionkey.SignToken(kp2.PrivateKey, "test-user", "example.com", auth.MethodPIN, time.Hour) + require.NoError(t, err) + + var backendCalled bool + backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + }) + handler := mw.Protect(backend) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: token}) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.False(t, backendCalled, "cookie signed by wrong key should be rejected") +} + +func TestProtect_SchemeAuthRedirectsWithCookie(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + token, err := sessionkey.SignToken(kp.PrivateKey, "pin-user", "example.com", auth.MethodPIN, time.Hour) + require.NoError(t, err) + + scheme := &stubScheme{ + method: auth.MethodPIN, + authFn: func(r *http.Request) (string, string, error) { + if r.FormValue("pin") == "111111" { + return token, "", nil + } + return "", "pin", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + var backendCalled bool + backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + }) + handler := mw.Protect(backend) + + // Submit the PIN via form POST. + form := url.Values{"pin": {"111111"}} + req := httptest.NewRequest(http.MethodPost, "http://example.com/somepath", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.False(t, backendCalled, "backend should not be called during auth, only a redirect should be returned") + assert.Equal(t, http.StatusSeeOther, rec.Code) + assert.Equal(t, "/somepath", rec.Header().Get("Location"), "redirect should point to the original request URI") + + cookies := rec.Result().Cookies() + var sessionCookie *http.Cookie + for _, c := range cookies { + if c.Name == auth.SessionCookieName { + sessionCookie = c + break + } + } + require.NotNil(t, sessionCookie, "session cookie should be set after successful auth") + assert.True(t, sessionCookie.HttpOnly) + assert.True(t, sessionCookie.Secure) + assert.Equal(t, http.SameSiteLaxMode, sessionCookie.SameSite) +} + +func TestProtect_FailedAuthDoesNotSetCookie(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{ + method: auth.MethodPIN, + authFn: func(_ *http.Request) (string, string, error) { + return "", "pin", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + for _, c := range rec.Result().Cookies() { + assert.NotEqual(t, auth.SessionCookieName, c.Name, "no session cookie should be set on failed auth") + } +} + +func TestProtect_MultipleSchemes(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + token, err := sessionkey.SignToken(kp.PrivateKey, "password-user", "example.com", auth.MethodPassword, time.Hour) + require.NoError(t, err) + + // First scheme (PIN) always fails, second scheme (password) succeeds. + pinScheme := &stubScheme{ + method: auth.MethodPIN, + authFn: func(_ *http.Request) (string, string, error) { + return "", "pin", nil + }, + } + passwordScheme := &stubScheme{ + method: auth.MethodPassword, + authFn: func(r *http.Request) (string, string, error) { + if r.FormValue("password") == "secret" { + return token, "", nil + } + return "", "password", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{pinScheme, passwordScheme}, kp.PublicKey, time.Hour, "", "")) + + var backendCalled bool + backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + }) + handler := mw.Protect(backend) + + form := url.Values{"password": {"secret"}} + req := httptest.NewRequest(http.MethodPost, "http://example.com/", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.False(t, backendCalled, "backend should not be called during auth") + assert.Equal(t, http.StatusSeeOther, rec.Code) +} + +func TestProtect_InvalidTokenFromSchemeReturns400(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + // Return a garbage token that won't validate. + scheme := &stubScheme{ + method: auth.MethodPIN, + authFn: func(_ *http.Request) (string, string, error) { + return "invalid-jwt-token", "", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestAddDomain_RandomBytes32NotEd25519(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + + // 32 random bytes that happen to be valid base64 and correct size + // but are actually a valid ed25519 public key length-wise. + // This should succeed because ed25519 public keys are just 32 bytes. + randomBytes := make([]byte, ed25519.PublicKeySize) + _, err := rand.Read(randomBytes) + require.NoError(t, err) + + key := base64.StdEncoding.EncodeToString(randomBytes) + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + + err = mw.AddDomain("example.com", []Scheme{scheme}, key, time.Hour, "", "") + require.NoError(t, err, "any 32-byte key should be accepted at registration time") +} + +func TestAddDomain_InvalidKeyDoesNotCorruptExistingConfig(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + // Attempt to overwrite with an invalid key. + err := mw.AddDomain("example.com", []Scheme{scheme}, "bad", time.Hour, "", "") + require.Error(t, err) + + // The original valid config should still be intact. + mw.domainsMux.RLock() + config, exists := mw.domains["example.com"] + mw.domainsMux.RUnlock() + + assert.True(t, exists, "original config should still exist") + assert.Len(t, config.Schemes, 1) + assert.Equal(t, time.Hour, config.SessionExpiration) +} + +func TestProtect_FailedPinAuthCapturesAuthMethod(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + // Scheme that always fails authentication (returns empty token) + scheme := &stubScheme{ + method: auth.MethodPIN, + authFn: func(_ *http.Request) (string, string, error) { + return "", "pin", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + capturedData := &proxy.CapturedData{} + handler := mw.Protect(newPassthroughHandler()) + + // Submit wrong PIN - should capture auth method + form := url.Values{"pin": {"wrong-pin"}} + req := httptest.NewRequest(http.MethodPost, "http://example.com/", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req = req.WithContext(proxy.WithCapturedData(req.Context(), capturedData)) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code) + assert.Equal(t, "pin", capturedData.GetAuthMethod(), "Auth method should be captured for failed PIN auth") +} + +func TestProtect_FailedPasswordAuthCapturesAuthMethod(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{ + method: auth.MethodPassword, + authFn: func(_ *http.Request) (string, string, error) { + return "", "password", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + capturedData := &proxy.CapturedData{} + handler := mw.Protect(newPassthroughHandler()) + + // Submit wrong password - should capture auth method + form := url.Values{"password": {"wrong-password"}} + req := httptest.NewRequest(http.MethodPost, "http://example.com/", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req = req.WithContext(proxy.WithCapturedData(req.Context(), capturedData)) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code) + assert.Equal(t, "password", capturedData.GetAuthMethod(), "Auth method should be captured for failed password auth") +} + +func TestProtect_NoCredentialsDoesNotCaptureAuthMethod(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil) + kp := generateTestKeyPair(t) + + scheme := &stubScheme{ + method: auth.MethodPIN, + authFn: func(_ *http.Request) (string, string, error) { + return "", "pin", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + + capturedData := &proxy.CapturedData{} + handler := mw.Protect(newPassthroughHandler()) + + // No credentials submitted - should not capture auth method + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req = req.WithContext(proxy.WithCapturedData(req.Context(), capturedData)) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code) + assert.Empty(t, capturedData.GetAuthMethod(), "Auth method should not be captured when no credentials submitted") +} + +func TestWasCredentialSubmitted(t *testing.T) { + tests := []struct { + name string + method auth.Method + formData url.Values + query url.Values + expected bool + }{ + { + name: "PIN submitted", + method: auth.MethodPIN, + formData: url.Values{"pin": {"123456"}}, + expected: true, + }, + { + name: "PIN not submitted", + method: auth.MethodPIN, + formData: url.Values{}, + expected: false, + }, + { + name: "Password submitted", + method: auth.MethodPassword, + formData: url.Values{"password": {"secret"}}, + expected: true, + }, + { + name: "Password not submitted", + method: auth.MethodPassword, + formData: url.Values{}, + expected: false, + }, + { + name: "OIDC token in query", + method: auth.MethodOIDC, + query: url.Values{"session_token": {"abc123"}}, + expected: true, + }, + { + name: "OIDC token not in query", + method: auth.MethodOIDC, + query: url.Values{}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reqURL := "http://example.com/" + if len(tt.query) > 0 { + reqURL += "?" + tt.query.Encode() + } + + var body *strings.Reader + if len(tt.formData) > 0 { + body = strings.NewReader(tt.formData.Encode()) + } else { + body = strings.NewReader("") + } + + req := httptest.NewRequest(http.MethodPost, reqURL, body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + result := wasCredentialSubmitted(req, tt.method) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/proxy/internal/auth/oidc.go b/proxy/internal/auth/oidc.go new file mode 100644 index 000000000..bf178d432 --- /dev/null +++ b/proxy/internal/auth/oidc.go @@ -0,0 +1,65 @@ +package auth + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "google.golang.org/grpc" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type urlGenerator interface { + GetOIDCURL(context.Context, *proto.GetOIDCURLRequest, ...grpc.CallOption) (*proto.GetOIDCURLResponse, error) +} + +type OIDC struct { + id string + accountId string + forwardedProto string + client urlGenerator +} + +// NewOIDC creates a new OIDC authentication scheme +func NewOIDC(client urlGenerator, id, accountId, forwardedProto string) OIDC { + return OIDC{ + id: id, + accountId: accountId, + forwardedProto: forwardedProto, + client: client, + } +} + +func (OIDC) Type() auth.Method { + return auth.MethodOIDC +} + +// Authenticate checks for an OIDC session token or obtains the OIDC redirect URL. +func (o OIDC) Authenticate(r *http.Request) (string, string, error) { + // Check for the session_token query param (from OIDC redirects). + // The management server passes the token in the URL because it cannot set + // cookies for the proxy's domain (cookies are domain-scoped per RFC 6265). + if token := r.URL.Query().Get("session_token"); token != "" { + return token, "", nil + } + + redirectURL := &url.URL{ + Scheme: auth.ResolveProto(o.forwardedProto, r.TLS), + Host: r.Host, + Path: r.URL.Path, + } + + res, err := o.client.GetOIDCURL(r.Context(), &proto.GetOIDCURLRequest{ + Id: o.id, + AccountId: o.accountId, + RedirectUrl: redirectURL.String(), + }) + if err != nil { + return "", "", fmt.Errorf("get OIDC URL: %w", err) + } + + return "", res.GetUrl(), nil +} diff --git a/proxy/internal/auth/password.go b/proxy/internal/auth/password.go new file mode 100644 index 000000000..208423465 --- /dev/null +++ b/proxy/internal/auth/password.go @@ -0,0 +1,61 @@ +package auth + +import ( + "fmt" + "net/http" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/shared/management/proto" +) + +const passwordFormId = "password" + +type Password struct { + id, accountId string + client authenticator +} + +func NewPassword(client authenticator, id, accountId string) Password { + return Password{ + id: id, + accountId: accountId, + client: client, + } +} + +func (Password) Type() auth.Method { + return auth.MethodPassword +} + +// Authenticate attempts to authenticate the request using a form +// value passed in the request. +// If authentication fails, the required HTTP form ID is returned +// so that it can be injected into a request from the UI so that +// authentication may be successful. +func (p Password) Authenticate(r *http.Request) (string, string, error) { + password := r.FormValue(passwordFormId) + + if password == "" { + // No password submitted; return the form ID so the UI can prompt the user. + return "", passwordFormId, nil + } + + res, err := p.client.Authenticate(r.Context(), &proto.AuthenticateRequest{ + Id: p.id, + AccountId: p.accountId, + Request: &proto.AuthenticateRequest_Password{ + Password: &proto.PasswordRequest{ + Password: password, + }, + }, + }) + if err != nil { + return "", "", fmt.Errorf("authenticate password: %w", err) + } + + if res.GetSuccess() { + return res.GetSessionToken(), "", nil + } + + return "", passwordFormId, nil +} diff --git a/proxy/internal/auth/pin.go b/proxy/internal/auth/pin.go new file mode 100644 index 000000000..c1eb56071 --- /dev/null +++ b/proxy/internal/auth/pin.go @@ -0,0 +1,61 @@ +package auth + +import ( + "fmt" + "net/http" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/shared/management/proto" +) + +const pinFormId = "pin" + +type Pin struct { + id, accountId string + client authenticator +} + +func NewPin(client authenticator, id, accountId string) Pin { + return Pin{ + id: id, + accountId: accountId, + client: client, + } +} + +func (Pin) Type() auth.Method { + return auth.MethodPIN +} + +// Authenticate attempts to authenticate the request using a form +// value passed in the request. +// If authentication fails, the required HTTP form ID is returned +// so that it can be injected into a request from the UI so that +// authentication may be successful. +func (p Pin) Authenticate(r *http.Request) (string, string, error) { + pin := r.FormValue(pinFormId) + + if pin == "" { + // No PIN submitted; return the form ID so the UI can prompt the user. + return "", pinFormId, nil + } + + res, err := p.client.Authenticate(r.Context(), &proto.AuthenticateRequest{ + Id: p.id, + AccountId: p.accountId, + Request: &proto.AuthenticateRequest_Pin{ + Pin: &proto.PinRequest{ + Pin: pin, + }, + }, + }) + if err != nil { + return "", "", fmt.Errorf("authenticate pin: %w", err) + } + + if res.GetSuccess() { + return res.GetSessionToken(), "", nil + } + + return "", pinFormId, nil +} diff --git a/proxy/internal/certwatch/watcher.go b/proxy/internal/certwatch/watcher.go new file mode 100644 index 000000000..78ad1ab7c --- /dev/null +++ b/proxy/internal/certwatch/watcher.go @@ -0,0 +1,279 @@ +// Package certwatch watches TLS certificate files on disk and provides +// a hot-reloading GetCertificate callback for tls.Config. +package certwatch + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "path/filepath" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + log "github.com/sirupsen/logrus" +) + +const ( + defaultPollInterval = 30 * time.Second + debounceDelay = 500 * time.Millisecond +) + +// Watcher monitors TLS certificate files on disk and caches the loaded +// certificate in memory. It detects changes via fsnotify (with a polling +// fallback for filesystems like NFS that lack inotify support) and +// reloads the certificate pair automatically. +type Watcher struct { + certPath string + keyPath string + + mu sync.RWMutex + cert *tls.Certificate + leaf *x509.Certificate + + pollInterval time.Duration + logger *log.Logger +} + +// NewWatcher creates a Watcher that monitors the given cert and key files. +// It performs an initial load of the certificate and returns an error +// if the initial load fails. +func NewWatcher(certPath, keyPath string, logger *log.Logger) (*Watcher, error) { + if logger == nil { + logger = log.StandardLogger() + } + + w := &Watcher{ + certPath: certPath, + keyPath: keyPath, + pollInterval: defaultPollInterval, + logger: logger, + } + + if err := w.reload(); err != nil { + return nil, fmt.Errorf("initial certificate load: %w", err) + } + + return w, nil +} + +// GetCertificate returns the current in-memory certificate. +// It is safe for concurrent use and compatible with tls.Config.GetCertificate. +func (w *Watcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.cert, nil +} + +// Watch starts watching for certificate file changes. It blocks until +// ctx is cancelled. It uses fsnotify for immediate detection and falls +// back to polling if fsnotify is unavailable (e.g. on NFS). +// Even with fsnotify active, a periodic poll runs as a safety net. +func (w *Watcher) Watch(ctx context.Context) { + // Watch the parent directory rather than individual files. Some volume + // mounts use an atomic symlink swap (..data -> timestamped dir), so + // watching the parent directory catches the link replacement. + certDir := filepath.Dir(w.certPath) + keyDir := filepath.Dir(w.keyPath) + + watcher, err := fsnotify.NewWatcher() + if err != nil { + w.logger.Warnf("fsnotify unavailable, using polling only: %v", err) + w.pollLoop(ctx) + return + } + defer func() { + if err := watcher.Close(); err != nil { + w.logger.Debugf("close fsnotify watcher: %v", err) + } + }() + + if err := watcher.Add(certDir); err != nil { + w.logger.Warnf("fsnotify watch on %s failed, using polling only: %v", certDir, err) + w.pollLoop(ctx) + return + } + + if keyDir != certDir { + if err := watcher.Add(keyDir); err != nil { + w.logger.Warnf("fsnotify watch on %s failed: %v", keyDir, err) + } + } + + w.logger.Infof("watching certificate files in %s", certDir) + w.fsnotifyLoop(ctx, watcher) +} + +func (w *Watcher) fsnotifyLoop(ctx context.Context, watcher *fsnotify.Watcher) { + certBase := filepath.Base(w.certPath) + keyBase := filepath.Base(w.keyPath) + + var debounce *time.Timer + defer func() { + if debounce != nil { + debounce.Stop() + } + }() + + // Periodic poll as a safety net for missed fsnotify events. + pollTicker := time.NewTicker(w.pollInterval) + defer pollTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + + case event, ok := <-watcher.Events: + if !ok { + return + } + + base := filepath.Base(event.Name) + if !isRelevantFile(base, certBase, keyBase) { + w.logger.Debugf("fsnotify: ignoring event %s on %s", event.Op, event.Name) + continue + } + if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) && !event.Has(fsnotify.Rename) { + w.logger.Debugf("fsnotify: ignoring op %s on %s", event.Op, base) + continue + } + + w.logger.Debugf("fsnotify: detected %s on %s, scheduling reload", event.Op, base) + + // Debounce: cert-manager may write cert and key as separate + // operations. Wait briefly to load both at once. + if debounce != nil { + debounce.Stop() + } + debounce = time.AfterFunc(debounceDelay, func() { + if ctx.Err() != nil { + return + } + w.tryReload() + }) + + case err, ok := <-watcher.Errors: + if !ok { + return + } + w.logger.Warnf("fsnotify error: %v", err) + + case <-pollTicker.C: + w.tryReload() + } + } +} + +func (w *Watcher) pollLoop(ctx context.Context) { + ticker := time.NewTicker(w.pollInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + w.tryReload() + } + } +} + +// reload loads the certificate from disk and updates the in-memory cache. +func (w *Watcher) reload() error { + cert, err := tls.LoadX509KeyPair(w.certPath, w.keyPath) + if err != nil { + return err + } + + // Parse the leaf for comparison on subsequent reloads. + if cert.Leaf == nil && len(cert.Certificate) > 0 { + leaf, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("parse leaf certificate: %w", err) + } + cert.Leaf = leaf + } + + w.mu.Lock() + w.cert = &cert + w.leaf = cert.Leaf + w.mu.Unlock() + + w.logCertDetails("loaded certificate", cert.Leaf) + + return nil +} + +// tryReload attempts to reload the certificate. It skips the update +// if the certificate on disk is identical to the one in memory (same +// serial number and issuer) to avoid redundant log noise. +func (w *Watcher) tryReload() { + cert, err := tls.LoadX509KeyPair(w.certPath, w.keyPath) + if err != nil { + w.logger.Warnf("reload certificate: %v", err) + return + } + + if cert.Leaf == nil && len(cert.Certificate) > 0 { + leaf, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + w.logger.Warnf("parse reloaded leaf certificate: %v", err) + return + } + cert.Leaf = leaf + } + + w.mu.Lock() + + if w.leaf != nil && cert.Leaf != nil && + w.leaf.SerialNumber.Cmp(cert.Leaf.SerialNumber) == 0 && + w.leaf.Issuer.CommonName == cert.Leaf.Issuer.CommonName { + w.mu.Unlock() + return + } + + prev := w.leaf + w.cert = &cert + w.leaf = cert.Leaf + w.mu.Unlock() + + w.logCertChange(prev, cert.Leaf) +} + +func (w *Watcher) logCertDetails(msg string, leaf *x509.Certificate) { + if leaf == nil { + w.logger.Info(msg) + return + } + + w.logger.Infof("%s: subject=%q serial=%s SANs=%v notAfter=%s", + msg, + leaf.Subject.CommonName, + leaf.SerialNumber.Text(16), + leaf.DNSNames, + leaf.NotAfter.UTC().Format(time.RFC3339), + ) +} + +func (w *Watcher) logCertChange(prev, next *x509.Certificate) { + if prev == nil || next == nil { + w.logCertDetails("certificate reloaded from disk", next) + return + } + + w.logger.Infof("certificate reloaded from disk: subject=%q -> %q serial=%s -> %s notAfter=%s -> %s", + prev.Subject.CommonName, next.Subject.CommonName, + prev.SerialNumber.Text(16), next.SerialNumber.Text(16), + prev.NotAfter.UTC().Format(time.RFC3339), next.NotAfter.UTC().Format(time.RFC3339), + ) +} + +// isRelevantFile returns true if the changed file name is one we care about. +// This includes the cert/key files themselves and the ..data symlink used +// by atomic volume mounts. +func isRelevantFile(changed, certBase, keyBase string) bool { + return changed == certBase || changed == keyBase || changed == "..data" +} diff --git a/proxy/internal/certwatch/watcher_test.go b/proxy/internal/certwatch/watcher_test.go new file mode 100644 index 000000000..06b0a4bb8 --- /dev/null +++ b/proxy/internal/certwatch/watcher_test.go @@ -0,0 +1,292 @@ +package certwatch + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func generateSelfSignedCert(t *testing.T, serial int64) (certPEM, keyPEM []byte) { + t.Helper() + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + template := &x509.Certificate{ + SerialNumber: big.NewInt(serial), + Subject: pkix.Name{CommonName: "test"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(24 * time.Hour), + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) + require.NoError(t, err) + + certPEM = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) + + keyDER, err := x509.MarshalECPrivateKey(key) + require.NoError(t, err) + keyPEM = pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyDER}) + + return certPEM, keyPEM +} + +func writeCert(t *testing.T, dir string, certPEM, keyPEM []byte) { + t.Helper() + + require.NoError(t, os.WriteFile(filepath.Join(dir, "tls.crt"), certPEM, 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(dir, "tls.key"), keyPEM, 0o600)) +} + +func TestNewWatcher(t *testing.T) { + dir := t.TempDir() + certPEM, keyPEM := generateSelfSignedCert(t, 1) + writeCert(t, dir, certPEM, keyPEM) + + w, err := NewWatcher( + filepath.Join(dir, "tls.crt"), + filepath.Join(dir, "tls.key"), + nil, + ) + require.NoError(t, err) + + cert, err := w.GetCertificate(nil) + require.NoError(t, err) + require.NotNil(t, cert) + assert.Equal(t, int64(1), cert.Leaf.SerialNumber.Int64()) +} + +func TestNewWatcherMissingFiles(t *testing.T) { + dir := t.TempDir() + + _, err := NewWatcher( + filepath.Join(dir, "tls.crt"), + filepath.Join(dir, "tls.key"), + nil, + ) + assert.Error(t, err) +} + +func TestReload(t *testing.T) { + dir := t.TempDir() + certPEM1, keyPEM1 := generateSelfSignedCert(t, 100) + writeCert(t, dir, certPEM1, keyPEM1) + + w, err := NewWatcher( + filepath.Join(dir, "tls.crt"), + filepath.Join(dir, "tls.key"), + nil, + ) + require.NoError(t, err) + + cert1, err := w.GetCertificate(nil) + require.NoError(t, err) + assert.Equal(t, int64(100), cert1.Leaf.SerialNumber.Int64()) + + // Write a new cert with a different serial. + certPEM2, keyPEM2 := generateSelfSignedCert(t, 200) + writeCert(t, dir, certPEM2, keyPEM2) + + // Manually trigger reload. + w.tryReload() + + cert2, err := w.GetCertificate(nil) + require.NoError(t, err) + assert.Equal(t, int64(200), cert2.Leaf.SerialNumber.Int64()) +} + +func TestTryReloadSkipsUnchanged(t *testing.T) { + dir := t.TempDir() + certPEM, keyPEM := generateSelfSignedCert(t, 42) + writeCert(t, dir, certPEM, keyPEM) + + w, err := NewWatcher( + filepath.Join(dir, "tls.crt"), + filepath.Join(dir, "tls.key"), + nil, + ) + require.NoError(t, err) + + cert1, err := w.GetCertificate(nil) + require.NoError(t, err) + + // Reload with same cert - pointer should remain the same. + w.tryReload() + + cert2, err := w.GetCertificate(nil) + require.NoError(t, err) + assert.Same(t, cert1, cert2, "cert pointer should not change when content is the same") +} + +func TestWatchDetectsChanges(t *testing.T) { + dir := t.TempDir() + certPEM1, keyPEM1 := generateSelfSignedCert(t, 1) + writeCert(t, dir, certPEM1, keyPEM1) + + w, err := NewWatcher( + filepath.Join(dir, "tls.crt"), + filepath.Join(dir, "tls.key"), + nil, + ) + require.NoError(t, err) + + // Use a short poll interval for the test. + w.pollInterval = 100 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go w.Watch(ctx) + + // Write new cert. + certPEM2, keyPEM2 := generateSelfSignedCert(t, 999) + writeCert(t, dir, certPEM2, keyPEM2) + + // Wait for the watcher to pick it up. + require.Eventually(t, func() bool { + cert, err := w.GetCertificate(nil) + if err != nil { + return false + } + return cert.Leaf.SerialNumber.Int64() == 999 + }, 5*time.Second, 50*time.Millisecond, "watcher should detect cert change") +} + +func TestIsRelevantFile(t *testing.T) { + assert.True(t, isRelevantFile("tls.crt", "tls.crt", "tls.key")) + assert.True(t, isRelevantFile("tls.key", "tls.crt", "tls.key")) + assert.True(t, isRelevantFile("..data", "tls.crt", "tls.key")) + assert.False(t, isRelevantFile("other.txt", "tls.crt", "tls.key")) +} + +// TestWatchSymlinkRotation simulates Kubernetes secret volume updates where +// the data directory is atomically swapped via a ..data symlink. +func TestWatchSymlinkRotation(t *testing.T) { + base := t.TempDir() + + // Create initial target directory with certs. + dir1 := filepath.Join(base, "dir1") + require.NoError(t, os.Mkdir(dir1, 0o755)) + certPEM1, keyPEM1 := generateSelfSignedCert(t, 1) + require.NoError(t, os.WriteFile(filepath.Join(dir1, "tls.crt"), certPEM1, 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(dir1, "tls.key"), keyPEM1, 0o600)) + + // Create ..data symlink pointing to dir1. + dataLink := filepath.Join(base, "..data") + require.NoError(t, os.Symlink(dir1, dataLink)) + + // Create tls.crt and tls.key as symlinks to ..data/{file}. + certLink := filepath.Join(base, "tls.crt") + keyLink := filepath.Join(base, "tls.key") + require.NoError(t, os.Symlink(filepath.Join(dataLink, "tls.crt"), certLink)) + require.NoError(t, os.Symlink(filepath.Join(dataLink, "tls.key"), keyLink)) + + w, err := NewWatcher(certLink, keyLink, nil) + require.NoError(t, err) + + cert, err := w.GetCertificate(nil) + require.NoError(t, err) + assert.Equal(t, int64(1), cert.Leaf.SerialNumber.Int64()) + + w.pollInterval = 100 * time.Millisecond + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go w.Watch(ctx) + + // Simulate k8s atomic rotation: create dir2, swap ..data symlink. + dir2 := filepath.Join(base, "dir2") + require.NoError(t, os.Mkdir(dir2, 0o755)) + certPEM2, keyPEM2 := generateSelfSignedCert(t, 777) + require.NoError(t, os.WriteFile(filepath.Join(dir2, "tls.crt"), certPEM2, 0o600)) + require.NoError(t, os.WriteFile(filepath.Join(dir2, "tls.key"), keyPEM2, 0o600)) + + // Atomic swap: create temp link, then rename over ..data. + tmpLink := filepath.Join(base, "..data_tmp") + require.NoError(t, os.Symlink(dir2, tmpLink)) + require.NoError(t, os.Rename(tmpLink, dataLink)) + + require.Eventually(t, func() bool { + cert, err := w.GetCertificate(nil) + if err != nil { + return false + } + return cert.Leaf.SerialNumber.Int64() == 777 + }, 5*time.Second, 50*time.Millisecond, "watcher should detect symlink rotation") +} + +// TestPollLoopDetectsChanges verifies the poll-only fallback path works. +func TestPollLoopDetectsChanges(t *testing.T) { + dir := t.TempDir() + certPEM1, keyPEM1 := generateSelfSignedCert(t, 1) + writeCert(t, dir, certPEM1, keyPEM1) + + w, err := NewWatcher( + filepath.Join(dir, "tls.crt"), + filepath.Join(dir, "tls.key"), + nil, + ) + require.NoError(t, err) + + w.pollInterval = 100 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Directly use pollLoop to test the fallback path. + go w.pollLoop(ctx) + + certPEM2, keyPEM2 := generateSelfSignedCert(t, 555) + writeCert(t, dir, certPEM2, keyPEM2) + + require.Eventually(t, func() bool { + cert, err := w.GetCertificate(nil) + if err != nil { + return false + } + return cert.Leaf.SerialNumber.Int64() == 555 + }, 5*time.Second, 50*time.Millisecond, "poll loop should detect cert change") +} + +func TestGetCertificateConcurrency(t *testing.T) { + dir := t.TempDir() + certPEM, keyPEM := generateSelfSignedCert(t, 1) + writeCert(t, dir, certPEM, keyPEM) + + w, err := NewWatcher( + filepath.Join(dir, "tls.crt"), + filepath.Join(dir, "tls.key"), + nil, + ) + require.NoError(t, err) + + // Hammer GetCertificate concurrently while reloading. + done := make(chan struct{}) + go func() { + for i := 0; i < 100; i++ { + w.tryReload() + } + close(done) + }() + + for i := 0; i < 1000; i++ { + cert, err := w.GetCertificate(&tls.ClientHelloInfo{}) + assert.NoError(t, err) + assert.NotNil(t, cert) + } + + <-done +} diff --git a/proxy/internal/debug/client.go b/proxy/internal/debug/client.go new file mode 100644 index 000000000..885c574bc --- /dev/null +++ b/proxy/internal/debug/client.go @@ -0,0 +1,388 @@ +// Package debug provides HTTP debug endpoints and CLI client for the proxy server. +package debug + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// StatusFilters contains filter options for status queries. +type StatusFilters struct { + IPs []string + Names []string + Status string + ConnectionType string +} + +// Client provides CLI access to debug endpoints. +type Client struct { + baseURL string + jsonOutput bool + httpClient *http.Client + out io.Writer +} + +// NewClient creates a new debug client. +func NewClient(baseURL string, jsonOutput bool, out io.Writer) *Client { + if !strings.HasPrefix(baseURL, "http://") && !strings.HasPrefix(baseURL, "https://") { + baseURL = "http://" + baseURL + } + baseURL = strings.TrimSuffix(baseURL, "/") + + return &Client{ + baseURL: baseURL, + jsonOutput: jsonOutput, + out: out, + httpClient: &http.Client{ + Timeout: 30 * time.Second, + }, + } +} + +// Health fetches the health status. +func (c *Client) Health(ctx context.Context) error { + return c.fetchAndPrint(ctx, "/debug/health", c.printHealth) +} + +func (c *Client) printHealth(data map[string]any) { + _, _ = fmt.Fprintf(c.out, "Status: %v\n", data["status"]) + _, _ = fmt.Fprintf(c.out, "Uptime: %v\n", data["uptime"]) + _, _ = fmt.Fprintf(c.out, "Management Connected: %s\n", boolIcon(data["management_connected"])) + _, _ = fmt.Fprintf(c.out, "All Clients Healthy: %s\n", boolIcon(data["all_clients_healthy"])) + + total, _ := data["certs_total"].(float64) + ready, _ := data["certs_ready"].(float64) + pending, _ := data["certs_pending"].(float64) + failed, _ := data["certs_failed"].(float64) + if total > 0 { + _, _ = fmt.Fprintf(c.out, "Certificates: %d ready, %d pending, %d failed (%d total)\n", + int(ready), int(pending), int(failed), int(total)) + } + if domains, ok := data["certs_ready_domains"].([]any); ok && len(domains) > 0 { + _, _ = fmt.Fprintf(c.out, " Ready:\n") + for _, d := range domains { + _, _ = fmt.Fprintf(c.out, " %v\n", d) + } + } + if domains, ok := data["certs_pending_domains"].([]any); ok && len(domains) > 0 { + _, _ = fmt.Fprintf(c.out, " Pending:\n") + for _, d := range domains { + _, _ = fmt.Fprintf(c.out, " %v\n", d) + } + } + if domains, ok := data["certs_failed_domains"].(map[string]any); ok && len(domains) > 0 { + _, _ = fmt.Fprintf(c.out, " Failed:\n") + for d, errMsg := range domains { + _, _ = fmt.Fprintf(c.out, " %s: %v\n", d, errMsg) + } + } + + c.printHealthClients(data) +} + +func (c *Client) printHealthClients(data map[string]any) { + clients, ok := data["clients"].(map[string]any) + if !ok || len(clients) == 0 { + return + } + + _, _ = fmt.Fprintf(c.out, "\n%-38s %-9s %-7s %-8s %-8s %-16s %s\n", + "ACCOUNT ID", "HEALTHY", "MGMT", "SIGNAL", "RELAYS", "PEERS (P2P/RLY)", "DEGRADED") + _, _ = fmt.Fprintln(c.out, strings.Repeat("-", 110)) + + for accountID, v := range clients { + ch, ok := v.(map[string]any) + if !ok { + continue + } + + healthy := boolIcon(ch["healthy"]) + mgmt := boolIcon(ch["management_connected"]) + signal := boolIcon(ch["signal_connected"]) + + relaysConn, _ := ch["relays_connected"].(float64) + relaysTotal, _ := ch["relays_total"].(float64) + relays := fmt.Sprintf("%d/%d", int(relaysConn), int(relaysTotal)) + + peersConnected, _ := ch["peers_connected"].(float64) + peersTotal, _ := ch["peers_total"].(float64) + peersP2P, _ := ch["peers_p2p"].(float64) + peersRelayed, _ := ch["peers_relayed"].(float64) + peersDegraded, _ := ch["peers_degraded"].(float64) + peers := fmt.Sprintf("%d/%d (%d/%d)", int(peersConnected), int(peersTotal), int(peersP2P), int(peersRelayed)) + degraded := fmt.Sprintf("%d", int(peersDegraded)) + + _, _ = fmt.Fprintf(c.out, "%-38s %-9s %-7s %-8s %-8s %-16s %s", accountID, healthy, mgmt, signal, relays, peers, degraded) + if errMsg, ok := ch["error"].(string); ok && errMsg != "" { + _, _ = fmt.Fprintf(c.out, " (%s)", errMsg) + } + _, _ = fmt.Fprintln(c.out) + } +} + +func boolIcon(v any) string { + b, ok := v.(bool) + if !ok { + return "?" + } + if b { + return "yes" + } + return "no" +} + +// ListClients fetches the list of all clients. +func (c *Client) ListClients(ctx context.Context) error { + return c.fetchAndPrint(ctx, "/debug/clients", c.printClients) +} + +func (c *Client) printClients(data map[string]any) { + _, _ = fmt.Fprintf(c.out, "Uptime: %v\n", data["uptime"]) + _, _ = fmt.Fprintf(c.out, "Clients: %v\n\n", data["client_count"]) + + clients, ok := data["clients"].([]any) + if !ok || len(clients) == 0 { + _, _ = fmt.Fprintln(c.out, "No clients connected.") + return + } + + _, _ = fmt.Fprintf(c.out, "%-38s %-12s %-40s %s\n", "ACCOUNT ID", "AGE", "DOMAINS", "HAS CLIENT") + _, _ = fmt.Fprintln(c.out, strings.Repeat("-", 110)) + + for _, item := range clients { + c.printClientRow(item) + } +} + +func (c *Client) printClientRow(item any) { + client, ok := item.(map[string]any) + if !ok { + return + } + + domains := c.extractDomains(client) + hasClient := "no" + if hc, ok := client["has_client"].(bool); ok && hc { + hasClient = "yes" + } + + _, _ = fmt.Fprintf(c.out, "%-38s %-12v %s %s\n", + client["account_id"], + client["age"], + domains, + hasClient, + ) +} + +func (c *Client) extractDomains(client map[string]any) string { + d, ok := client["domains"].([]any) + if !ok || len(d) == 0 { + return "-" + } + + parts := make([]string, len(d)) + for i, domain := range d { + parts[i] = fmt.Sprint(domain) + } + return strings.Join(parts, ", ") +} + +// ClientStatus fetches the status of a specific client. +func (c *Client) ClientStatus(ctx context.Context, accountID string, filters StatusFilters) error { + params := url.Values{} + if len(filters.IPs) > 0 { + params.Set("filter-by-ips", strings.Join(filters.IPs, ",")) + } + if len(filters.Names) > 0 { + params.Set("filter-by-names", strings.Join(filters.Names, ",")) + } + if filters.Status != "" { + params.Set("filter-by-status", filters.Status) + } + if filters.ConnectionType != "" { + params.Set("filter-by-connection-type", filters.ConnectionType) + } + + path := "/debug/clients/" + url.PathEscape(accountID) + if len(params) > 0 { + path += "?" + params.Encode() + } + return c.fetchAndPrint(ctx, path, c.printClientStatus) +} + +func (c *Client) printClientStatus(data map[string]any) { + _, _ = fmt.Fprintf(c.out, "Account: %v\n\n", data["account_id"]) + if status, ok := data["status"].(string); ok { + _, _ = fmt.Fprint(c.out, status) + } +} + +// ClientSyncResponse fetches the sync response of a specific client. +func (c *Client) ClientSyncResponse(ctx context.Context, accountID string) error { + path := "/debug/clients/" + url.PathEscape(accountID) + "/syncresponse" + return c.fetchAndPrintJSON(ctx, path) +} + +// PingTCP performs a TCP ping through a client. +func (c *Client) PingTCP(ctx context.Context, accountID, host string, port int, timeout string) error { + params := url.Values{} + params.Set("host", host) + params.Set("port", fmt.Sprintf("%d", port)) + if timeout != "" { + params.Set("timeout", timeout) + } + + path := fmt.Sprintf("/debug/clients/%s/pingtcp?%s", url.PathEscape(accountID), params.Encode()) + return c.fetchAndPrint(ctx, path, c.printPingResult) +} + +func (c *Client) printPingResult(data map[string]any) { + success, _ := data["success"].(bool) + if success { + _, _ = fmt.Fprintf(c.out, "Success: %v:%v\n", data["host"], data["port"]) + _, _ = fmt.Fprintf(c.out, "Latency: %v\n", data["latency"]) + } else { + _, _ = fmt.Fprintf(c.out, "Failed: %v:%v\n", data["host"], data["port"]) + c.printError(data) + } +} + +// SetLogLevel sets the log level of a specific client. +func (c *Client) SetLogLevel(ctx context.Context, accountID, level string) error { + params := url.Values{} + params.Set("level", level) + + path := fmt.Sprintf("/debug/clients/%s/loglevel?%s", url.PathEscape(accountID), params.Encode()) + return c.fetchAndPrint(ctx, path, c.printLogLevelResult) +} + +func (c *Client) printLogLevelResult(data map[string]any) { + success, _ := data["success"].(bool) + if success { + _, _ = fmt.Fprintf(c.out, "Log level set to: %v\n", data["level"]) + } else { + _, _ = fmt.Fprintln(c.out, "Failed to set log level") + c.printError(data) + } +} + +// StartClient starts a specific client. +func (c *Client) StartClient(ctx context.Context, accountID string) error { + path := "/debug/clients/" + url.PathEscape(accountID) + "/start" + return c.fetchAndPrint(ctx, path, c.printStartResult) +} + +func (c *Client) printStartResult(data map[string]any) { + success, _ := data["success"].(bool) + if success { + _, _ = fmt.Fprintln(c.out, "Client started") + } else { + _, _ = fmt.Fprintln(c.out, "Failed to start client") + c.printError(data) + } +} + +// StopClient stops a specific client. +func (c *Client) StopClient(ctx context.Context, accountID string) error { + path := "/debug/clients/" + url.PathEscape(accountID) + "/stop" + return c.fetchAndPrint(ctx, path, c.printStopResult) +} + +func (c *Client) printStopResult(data map[string]any) { + success, _ := data["success"].(bool) + if success { + _, _ = fmt.Fprintln(c.out, "Client stopped") + } else { + _, _ = fmt.Fprintln(c.out, "Failed to stop client") + c.printError(data) + } +} + +func (c *Client) printError(data map[string]any) { + if errMsg, ok := data["error"].(string); ok { + _, _ = fmt.Fprintf(c.out, "Error: %s\n", errMsg) + } +} + +func (c *Client) fetchAndPrint(ctx context.Context, path string, printer func(map[string]any)) error { + data, raw, err := c.fetch(ctx, path) + if err != nil { + return err + } + + if c.jsonOutput { + return c.writeJSON(data) + } + + if data != nil { + printer(data) + return nil + } + + _, _ = fmt.Fprintln(c.out, string(raw)) + return nil +} + +func (c *Client) fetchAndPrintJSON(ctx context.Context, path string) error { + data, raw, err := c.fetch(ctx, path) + if err != nil { + return err + } + + if data != nil { + return c.writeJSON(data) + } + + _, _ = fmt.Fprintln(c.out, string(raw)) + return nil +} + +func (c *Client) writeJSON(data map[string]any) error { + enc := json.NewEncoder(c.out) + enc.SetIndent("", " ") + return enc.Encode(data) +} + +func (c *Client) fetch(ctx context.Context, path string) (map[string]any, []byte, error) { + fullURL := c.baseURL + path + if !strings.Contains(path, "format=json") { + if strings.Contains(path, "?") { + fullURL += "&format=json" + } else { + fullURL += "?format=json" + } + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fullURL, nil) + if err != nil { + return nil, nil, fmt.Errorf("create request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, nil, fmt.Errorf("request failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, nil, fmt.Errorf("read response: %w", err) + } + + if resp.StatusCode >= 400 { + return nil, nil, fmt.Errorf("server error (%d): %s", resp.StatusCode, strings.TrimSpace(string(body))) + } + + var data map[string]any + if err := json.Unmarshal(body, &data); err != nil { + return nil, body, nil + } + + return data, body, nil +} diff --git a/proxy/internal/debug/client_test.go b/proxy/internal/debug/client_test.go new file mode 100644 index 000000000..0d627a94e --- /dev/null +++ b/proxy/internal/debug/client_test.go @@ -0,0 +1,71 @@ +package debug + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPrintHealth_WithCertsAndClients(t *testing.T) { + var buf bytes.Buffer + c := NewClient("localhost:8444", false, &buf) + + data := map[string]any{ + "status": "ok", + "uptime": "1h30m", + "management_connected": true, + "all_clients_healthy": true, + "certs_total": float64(3), + "certs_ready": float64(2), + "certs_pending": float64(1), + "certs_failed": float64(0), + "certs_ready_domains": []any{"a.example.com", "b.example.com"}, + "certs_pending_domains": []any{"c.example.com"}, + "clients": map[string]any{ + "acc-1": map[string]any{ + "healthy": true, + "management_connected": true, + "signal_connected": true, + "relays_connected": float64(1), + "relays_total": float64(2), + "peers_connected": float64(3), + "peers_total": float64(5), + "peers_p2p": float64(2), + "peers_relayed": float64(1), + "peers_degraded": float64(0), + }, + }, + } + + c.printHealth(data) + out := buf.String() + + assert.Contains(t, out, "Status: ok") + assert.Contains(t, out, "Uptime: 1h30m") + assert.Contains(t, out, "yes") // management_connected + assert.Contains(t, out, "2 ready, 1 pending, 0 failed (3 total)") + assert.Contains(t, out, "a.example.com") + assert.Contains(t, out, "c.example.com") + assert.Contains(t, out, "acc-1") +} + +func TestPrintHealth_Minimal(t *testing.T) { + var buf bytes.Buffer + c := NewClient("localhost:8444", false, &buf) + + data := map[string]any{ + "status": "ok", + "uptime": "5m", + "management_connected": false, + "all_clients_healthy": false, + } + + c.printHealth(data) + out := buf.String() + + assert.Contains(t, out, "Status: ok") + assert.Contains(t, out, "Uptime: 5m") + assert.NotContains(t, out, "Certificates") + assert.NotContains(t, out, "ACCOUNT ID") +} diff --git a/proxy/internal/debug/handler.go b/proxy/internal/debug/handler.go new file mode 100644 index 000000000..ab75c8b72 --- /dev/null +++ b/proxy/internal/debug/handler.go @@ -0,0 +1,712 @@ +// Package debug provides HTTP debug endpoints for the proxy server. +package debug + +import ( + "cmp" + "context" + "embed" + "encoding/json" + "fmt" + "html/template" + "maps" + "net/http" + "slices" + "strconv" + "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/encoding/protojson" + + nbembed "github.com/netbirdio/netbird/client/embed" + nbstatus "github.com/netbirdio/netbird/client/status" + "github.com/netbirdio/netbird/proxy/internal/health" + "github.com/netbirdio/netbird/proxy/internal/roundtrip" + "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/version" +) + +//go:embed templates/*.html +var templateFS embed.FS + +const defaultPingTimeout = 10 * time.Second + +// formatDuration formats a duration with 2 decimal places using appropriate units. +func formatDuration(d time.Duration) string { + switch { + case d >= time.Hour: + return fmt.Sprintf("%.2fh", d.Hours()) + case d >= time.Minute: + return fmt.Sprintf("%.2fm", d.Minutes()) + case d >= time.Second: + return fmt.Sprintf("%.2fs", d.Seconds()) + case d >= time.Millisecond: + return fmt.Sprintf("%.2fms", float64(d.Microseconds())/1000) + case d >= time.Microsecond: + return fmt.Sprintf("%.2fµs", float64(d.Nanoseconds())/1000) + default: + return fmt.Sprintf("%dns", d.Nanoseconds()) + } +} + +func sortedAccountIDs(m map[types.AccountID]roundtrip.ClientDebugInfo) []types.AccountID { + return slices.Sorted(maps.Keys(m)) +} + +// clientProvider provides access to NetBird clients. +type clientProvider interface { + GetClient(accountID types.AccountID) (*nbembed.Client, bool) + ListClientsForDebug() map[types.AccountID]roundtrip.ClientDebugInfo +} + +// healthChecker provides health probe state. +type healthChecker interface { + ReadinessProbe() bool + StartupProbe(ctx context.Context) bool + CheckClientsConnected(ctx context.Context) (bool, map[types.AccountID]health.ClientHealth) +} + +type certStatus interface { + TotalDomains() int + PendingDomains() []string + ReadyDomains() []string + FailedDomains() map[string]string +} + +// Handler provides HTTP debug endpoints. +type Handler struct { + provider clientProvider + health healthChecker + certStatus certStatus + logger *log.Logger + startTime time.Time + templates *template.Template + templateMu sync.RWMutex +} + +// NewHandler creates a new debug handler. +func NewHandler(provider clientProvider, healthChecker healthChecker, logger *log.Logger) *Handler { + if logger == nil { + logger = log.StandardLogger() + } + h := &Handler{ + provider: provider, + health: healthChecker, + logger: logger, + startTime: time.Now(), + } + if err := h.loadTemplates(); err != nil { + logger.Errorf("failed to load embedded templates: %v", err) + } + return h +} + +// SetCertStatus sets the certificate status provider for ACME prefetch observability. +func (h *Handler) SetCertStatus(cs certStatus) { + h.certStatus = cs +} + +func (h *Handler) loadTemplates() error { + tmpl, err := template.ParseFS(templateFS, "templates/*.html") + if err != nil { + return fmt.Errorf("parse embedded templates: %w", err) + } + + h.templateMu.Lock() + h.templates = tmpl + h.templateMu.Unlock() + + return nil +} + +func (h *Handler) getTemplates() *template.Template { + h.templateMu.RLock() + defer h.templateMu.RUnlock() + return h.templates +} + +// ServeHTTP handles debug requests. +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + wantJSON := r.URL.Query().Get("format") == "json" || strings.HasSuffix(path, "/json") + path = strings.TrimSuffix(path, "/json") + + switch path { + case "/debug", "/debug/": + h.handleIndex(w, r, wantJSON) + case "/debug/clients": + h.handleListClients(w, r, wantJSON) + case "/debug/health": + h.handleHealth(w, r, wantJSON) + default: + if h.handleClientRoutes(w, r, path, wantJSON) { + return + } + http.NotFound(w, r) + } +} + +func (h *Handler) handleClientRoutes(w http.ResponseWriter, r *http.Request, path string, wantJSON bool) bool { + if !strings.HasPrefix(path, "/debug/clients/") { + return false + } + + rest := strings.TrimPrefix(path, "/debug/clients/") + parts := strings.SplitN(rest, "/", 2) + accountID := types.AccountID(parts[0]) + + if len(parts) == 1 { + h.handleClientStatus(w, r, accountID, wantJSON) + return true + } + + switch parts[1] { + case "syncresponse": + h.handleClientSyncResponse(w, r, accountID, wantJSON) + case "tools": + h.handleClientTools(w, r, accountID) + case "pingtcp": + h.handlePingTCP(w, r, accountID) + case "loglevel": + h.handleLogLevel(w, r, accountID) + case "start": + h.handleClientStart(w, r, accountID) + case "stop": + h.handleClientStop(w, r, accountID) + default: + return false + } + return true +} + +type failedDomain struct { + Domain string + Error string +} + +type indexData struct { + Version string + Uptime string + ClientCount int + TotalDomains int + CertsTotal int + CertsReady int + CertsPending int + CertsFailed int + CertsPendingDomains []string + CertsReadyDomains []string + CertsFailedDomains []failedDomain + Clients []clientData +} + +type clientData struct { + AccountID string + Domains string + Age string + Status string +} + +func (h *Handler) handleIndex(w http.ResponseWriter, _ *http.Request, wantJSON bool) { + clients := h.provider.ListClientsForDebug() + sortedIDs := sortedAccountIDs(clients) + + totalDomains := 0 + for _, info := range clients { + totalDomains += info.DomainCount + } + + var certsTotal, certsReady, certsPending, certsFailed int + var certsPendingDomains, certsReadyDomains []string + var certsFailedDomains map[string]string + if h.certStatus != nil { + certsTotal = h.certStatus.TotalDomains() + certsPendingDomains = h.certStatus.PendingDomains() + certsReadyDomains = h.certStatus.ReadyDomains() + certsFailedDomains = h.certStatus.FailedDomains() + certsReady = len(certsReadyDomains) + certsPending = len(certsPendingDomains) + certsFailed = len(certsFailedDomains) + } + + if wantJSON { + clientsJSON := make([]map[string]interface{}, 0, len(clients)) + for _, id := range sortedIDs { + info := clients[id] + clientsJSON = append(clientsJSON, map[string]interface{}{ + "account_id": info.AccountID, + "domain_count": info.DomainCount, + "domains": info.Domains, + "has_client": info.HasClient, + "created_at": info.CreatedAt, + "age": time.Since(info.CreatedAt).Round(time.Second).String(), + }) + } + resp := map[string]interface{}{ + "version": version.NetbirdVersion(), + "uptime": time.Since(h.startTime).Round(time.Second).String(), + "client_count": len(clients), + "total_domains": totalDomains, + "certs_total": certsTotal, + "certs_ready": certsReady, + "certs_pending": certsPending, + "certs_failed": certsFailed, + "clients": clientsJSON, + } + if len(certsPendingDomains) > 0 { + resp["certs_pending_domains"] = certsPendingDomains + } + if len(certsReadyDomains) > 0 { + resp["certs_ready_domains"] = certsReadyDomains + } + if len(certsFailedDomains) > 0 { + resp["certs_failed_domains"] = certsFailedDomains + } + h.writeJSON(w, resp) + return + } + + sortedFailed := make([]failedDomain, 0, len(certsFailedDomains)) + for d, e := range certsFailedDomains { + sortedFailed = append(sortedFailed, failedDomain{Domain: d, Error: e}) + } + slices.SortFunc(sortedFailed, func(a, b failedDomain) int { + return cmp.Compare(a.Domain, b.Domain) + }) + + data := indexData{ + Version: version.NetbirdVersion(), + Uptime: time.Since(h.startTime).Round(time.Second).String(), + ClientCount: len(clients), + TotalDomains: totalDomains, + CertsTotal: certsTotal, + CertsReady: certsReady, + CertsPending: certsPending, + CertsFailed: certsFailed, + CertsPendingDomains: certsPendingDomains, + CertsReadyDomains: certsReadyDomains, + CertsFailedDomains: sortedFailed, + Clients: make([]clientData, 0, len(clients)), + } + + for _, id := range sortedIDs { + info := clients[id] + domains := info.Domains.SafeString() + if domains == "" { + domains = "-" + } + status := "No client" + if info.HasClient { + status = "Active" + } + data.Clients = append(data.Clients, clientData{ + AccountID: string(info.AccountID), + Domains: domains, + Age: time.Since(info.CreatedAt).Round(time.Second).String(), + Status: status, + }) + } + + h.renderTemplate(w, "index", data) +} + +type clientsData struct { + Uptime string + Clients []clientData +} + +func (h *Handler) handleListClients(w http.ResponseWriter, _ *http.Request, wantJSON bool) { + clients := h.provider.ListClientsForDebug() + sortedIDs := sortedAccountIDs(clients) + + if wantJSON { + clientsJSON := make([]map[string]interface{}, 0, len(clients)) + for _, id := range sortedIDs { + info := clients[id] + clientsJSON = append(clientsJSON, map[string]interface{}{ + "account_id": info.AccountID, + "domain_count": info.DomainCount, + "domains": info.Domains, + "has_client": info.HasClient, + "created_at": info.CreatedAt, + "age": time.Since(info.CreatedAt).Round(time.Second).String(), + }) + } + h.writeJSON(w, map[string]interface{}{ + "uptime": time.Since(h.startTime).Round(time.Second).String(), + "client_count": len(clients), + "clients": clientsJSON, + }) + return + } + + data := clientsData{ + Uptime: time.Since(h.startTime).Round(time.Second).String(), + Clients: make([]clientData, 0, len(clients)), + } + + for _, id := range sortedIDs { + info := clients[id] + domains := info.Domains.SafeString() + if domains == "" { + domains = "-" + } + status := "No client" + if info.HasClient { + status = "Active" + } + data.Clients = append(data.Clients, clientData{ + AccountID: string(info.AccountID), + Domains: domains, + Age: time.Since(info.CreatedAt).Round(time.Second).String(), + Status: status, + }) + } + + h.renderTemplate(w, "clients", data) +} + +type clientDetailData struct { + AccountID string + ActiveTab string + Content string +} + +func (h *Handler) handleClientStatus(w http.ResponseWriter, r *http.Request, accountID types.AccountID, wantJSON bool) { + client, ok := h.provider.GetClient(accountID) + if !ok { + http.Error(w, "Client not found: "+string(accountID), http.StatusNotFound) + return + } + + fullStatus, err := client.Status() + if err != nil { + http.Error(w, "Error getting status: "+err.Error(), http.StatusInternalServerError) + return + } + + // Parse filter parameters + query := r.URL.Query() + statusFilter := query.Get("filter-by-status") + connectionTypeFilter := query.Get("filter-by-connection-type") + + var prefixNamesFilter []string + var prefixNamesFilterMap map[string]struct{} + if names := query.Get("filter-by-names"); names != "" { + prefixNamesFilter = strings.Split(names, ",") + prefixNamesFilterMap = make(map[string]struct{}) + for _, name := range prefixNamesFilter { + prefixNamesFilterMap[strings.ToLower(strings.TrimSpace(name))] = struct{}{} + } + } + + var ipsFilterMap map[string]struct{} + if ips := query.Get("filter-by-ips"); ips != "" { + ipsFilterMap = make(map[string]struct{}) + for _, ip := range strings.Split(ips, ",") { + ipsFilterMap[strings.TrimSpace(ip)] = struct{}{} + } + } + + pbStatus := nbstatus.ToProtoFullStatus(fullStatus) + overview := nbstatus.ConvertToStatusOutputOverview( + pbStatus, + false, + version.NetbirdVersion(), + statusFilter, + prefixNamesFilter, + prefixNamesFilterMap, + ipsFilterMap, + connectionTypeFilter, + "", + ) + + if wantJSON { + h.writeJSON(w, map[string]interface{}{ + "account_id": accountID, + "status": overview.FullDetailSummary(), + }) + return + } + + data := clientDetailData{ + AccountID: string(accountID), + ActiveTab: "status", + Content: overview.FullDetailSummary(), + } + + h.renderTemplate(w, "clientDetail", data) +} + +func (h *Handler) handleClientSyncResponse(w http.ResponseWriter, _ *http.Request, accountID types.AccountID, wantJSON bool) { + client, ok := h.provider.GetClient(accountID) + if !ok { + http.Error(w, "Client not found: "+string(accountID), http.StatusNotFound) + return + } + + syncResp, err := client.GetLatestSyncResponse() + if err != nil { + http.Error(w, "Error getting sync response: "+err.Error(), http.StatusInternalServerError) + return + } + + if syncResp == nil { + http.Error(w, "No sync response available for client: "+string(accountID), http.StatusNotFound) + return + } + + opts := protojson.MarshalOptions{ + EmitUnpopulated: true, + UseProtoNames: true, + Indent: " ", + AllowPartial: true, + } + + jsonBytes, err := opts.Marshal(syncResp) + if err != nil { + http.Error(w, "Error marshaling sync response: "+err.Error(), http.StatusInternalServerError) + return + } + + if wantJSON { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(jsonBytes) + return + } + + data := clientDetailData{ + AccountID: string(accountID), + ActiveTab: "syncresponse", + Content: string(jsonBytes), + } + + h.renderTemplate(w, "clientDetail", data) +} + +type toolsData struct { + AccountID string +} + +func (h *Handler) handleClientTools(w http.ResponseWriter, _ *http.Request, accountID types.AccountID) { + _, ok := h.provider.GetClient(accountID) + if !ok { + http.Error(w, "Client not found: "+string(accountID), http.StatusNotFound) + return + } + + data := toolsData{ + AccountID: string(accountID), + } + + h.renderTemplate(w, "tools", data) +} + +func (h *Handler) handlePingTCP(w http.ResponseWriter, r *http.Request, accountID types.AccountID) { + client, ok := h.provider.GetClient(accountID) + if !ok { + h.writeJSON(w, map[string]interface{}{"error": "client not found"}) + return + } + + host := r.URL.Query().Get("host") + portStr := r.URL.Query().Get("port") + if host == "" || portStr == "" { + h.writeJSON(w, map[string]interface{}{"error": "host and port parameters required"}) + return + } + + port, err := strconv.Atoi(portStr) + if err != nil || port < 1 || port > 65535 { + h.writeJSON(w, map[string]interface{}{"error": "invalid port"}) + return + } + + timeout := defaultPingTimeout + if t := r.URL.Query().Get("timeout"); t != "" { + if d, err := time.ParseDuration(t); err == nil { + timeout = d + } + } + + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer cancel() + + address := fmt.Sprintf("%s:%d", host, port) + start := time.Now() + + conn, err := client.Dial(ctx, "tcp", address) + if err != nil { + h.writeJSON(w, map[string]interface{}{ + "success": false, + "host": host, + "port": port, + "error": err.Error(), + }) + return + } + if err := conn.Close(); err != nil { + h.logger.Debugf("close tcp ping connection: %v", err) + } + + latency := time.Since(start) + h.writeJSON(w, map[string]interface{}{ + "success": true, + "host": host, + "port": port, + "latency_ms": latency.Milliseconds(), + "latency": formatDuration(latency), + }) +} + +func (h *Handler) handleLogLevel(w http.ResponseWriter, r *http.Request, accountID types.AccountID) { + client, ok := h.provider.GetClient(accountID) + if !ok { + h.writeJSON(w, map[string]interface{}{"error": "client not found"}) + return + } + + level := r.URL.Query().Get("level") + if level == "" { + h.writeJSON(w, map[string]interface{}{"error": "level parameter required (trace, debug, info, warn, error)"}) + return + } + + if err := client.SetLogLevel(level); err != nil { + h.writeJSON(w, map[string]interface{}{ + "success": false, + "error": err.Error(), + }) + return + } + + h.writeJSON(w, map[string]interface{}{ + "success": true, + "level": level, + }) +} + +const clientActionTimeout = 30 * time.Second + +func (h *Handler) handleClientStart(w http.ResponseWriter, r *http.Request, accountID types.AccountID) { + client, ok := h.provider.GetClient(accountID) + if !ok { + h.writeJSON(w, map[string]interface{}{"error": "client not found"}) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), clientActionTimeout) + defer cancel() + + if err := client.Start(ctx); err != nil { + h.writeJSON(w, map[string]interface{}{ + "success": false, + "error": err.Error(), + }) + return + } + + h.writeJSON(w, map[string]interface{}{ + "success": true, + "message": "client started", + }) +} + +func (h *Handler) handleClientStop(w http.ResponseWriter, r *http.Request, accountID types.AccountID) { + client, ok := h.provider.GetClient(accountID) + if !ok { + h.writeJSON(w, map[string]interface{}{"error": "client not found"}) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), clientActionTimeout) + defer cancel() + + if err := client.Stop(ctx); err != nil { + h.writeJSON(w, map[string]interface{}{ + "success": false, + "error": err.Error(), + }) + return + } + + h.writeJSON(w, map[string]interface{}{ + "success": true, + "message": "client stopped", + }) +} + +func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request, wantJSON bool) { + if !wantJSON { + http.Redirect(w, r, "/debug", http.StatusSeeOther) + return + } + + uptime := time.Since(h.startTime).Round(10 * time.Millisecond).String() + + ready := h.health.ReadinessProbe() + allHealthy, clientHealth := h.health.CheckClientsConnected(r.Context()) + + status := "ok" + // No clients is not a health issue; only degrade when actual clients are unhealthy + if !ready || (!allHealthy && len(clientHealth) > 0) { + status = "degraded" + } + + var certsTotal, certsReady, certsPending, certsFailed int + var certsPendingDomains, certsReadyDomains []string + var certsFailedDomains map[string]string + if h.certStatus != nil { + certsTotal = h.certStatus.TotalDomains() + certsPendingDomains = h.certStatus.PendingDomains() + certsReadyDomains = h.certStatus.ReadyDomains() + certsFailedDomains = h.certStatus.FailedDomains() + certsReady = len(certsReadyDomains) + certsPending = len(certsPendingDomains) + certsFailed = len(certsFailedDomains) + } + + resp := map[string]any{ + "status": status, + "uptime": uptime, + "management_connected": ready, + "all_clients_healthy": allHealthy, + "certs_total": certsTotal, + "certs_ready": certsReady, + "certs_pending": certsPending, + "certs_failed": certsFailed, + "clients": clientHealth, + } + if len(certsPendingDomains) > 0 { + resp["certs_pending_domains"] = certsPendingDomains + } + if len(certsReadyDomains) > 0 { + resp["certs_ready_domains"] = certsReadyDomains + } + if len(certsFailedDomains) > 0 { + resp["certs_failed_domains"] = certsFailedDomains + } + h.writeJSON(w, resp) +} + +func (h *Handler) renderTemplate(w http.ResponseWriter, name string, data interface{}) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + tmpl := h.getTemplates() + if tmpl == nil { + http.Error(w, "Templates not loaded", http.StatusInternalServerError) + return + } + if err := tmpl.ExecuteTemplate(w, name, data); err != nil { + h.logger.Errorf("execute template %s: %v", name, err) + http.Error(w, "Template error", http.StatusInternalServerError) + } +} + +func (h *Handler) writeJSON(w http.ResponseWriter, v interface{}) { + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + if err := enc.Encode(v); err != nil { + h.logger.Errorf("encode JSON response: %v", err) + } +} diff --git a/proxy/internal/debug/templates/base.html b/proxy/internal/debug/templates/base.html new file mode 100644 index 000000000..737bd5b85 --- /dev/null +++ b/proxy/internal/debug/templates/base.html @@ -0,0 +1,101 @@ +{{define "style"}} +body { + font-family: monospace; + margin: 20px; + background: #1a1a1a; + color: #eee; +} +a { + color: #6cf; +} +h1, h2, h3 { + color: #fff; +} +.info { + color: #aaa; +} +table { + border-collapse: collapse; + margin: 10px 0; +} +th, td { + border: 1px solid #444; + padding: 8px; + text-align: left; +} +th { + background: #333; +} +.nav { + margin-bottom: 20px; +} +.nav a { + margin-right: 15px; + padding: 8px 16px; + background: #333; + text-decoration: none; + border-radius: 4px; +} +.nav a.active { + background: #6cf; + color: #000; +} +pre { + background: #222; + padding: 15px; + border-radius: 4px; + overflow-x: auto; + white-space: pre-wrap; +} +input, select, textarea { + background: #333; + color: #eee; + border: 1px solid #555; + padding: 8px; + border-radius: 4px; + font-family: monospace; +} +input:focus, select:focus, textarea:focus { + outline: none; + border-color: #6cf; +} +button { + background: #6cf; + color: #000; + border: none; + padding: 8px 16px; + border-radius: 4px; + cursor: pointer; + font-family: monospace; +} +button:hover { + background: #5be; +} +button:disabled { + background: #555; + color: #888; + cursor: not-allowed; +} +.form-group { + margin-bottom: 15px; +} +.form-group label { + display: block; + margin-bottom: 5px; + color: #aaa; +} +.form-row { + display: flex; + gap: 10px; + align-items: flex-end; +} +.result { + margin-top: 20px; +} +.success { + color: #5f5; +} +.error { + color: #f55; +} +{{end}} diff --git a/proxy/internal/debug/templates/client_detail.html b/proxy/internal/debug/templates/client_detail.html new file mode 100644 index 000000000..8eb27b1e5 --- /dev/null +++ b/proxy/internal/debug/templates/client_detail.html @@ -0,0 +1,19 @@ +{{define "clientDetail"}} + + + + Client {{.AccountID}} + + + +

Client: {{.AccountID}}

+ +
{{.Content}}
+ + +{{end}} diff --git a/proxy/internal/debug/templates/clients.html b/proxy/internal/debug/templates/clients.html new file mode 100644 index 000000000..4d455b2bb --- /dev/null +++ b/proxy/internal/debug/templates/clients.html @@ -0,0 +1,33 @@ +{{define "clients"}} + + + + Clients + + + +

All Clients

+

Uptime: {{.Uptime}} | ← Back

+ {{if .Clients}} + + + + + + + + {{range .Clients}} + + + + + + + {{end}} +
Account IDDomainsAgeStatus
{{.AccountID}}{{.Domains}}{{.Age}}{{.Status}}
+ {{else}} +

No clients connected

+ {{end}} + + +{{end}} diff --git a/proxy/internal/debug/templates/index.html b/proxy/internal/debug/templates/index.html new file mode 100644 index 000000000..16ab3d979 --- /dev/null +++ b/proxy/internal/debug/templates/index.html @@ -0,0 +1,58 @@ +{{define "index"}} + + + + NetBird Proxy Debug + + + +

NetBird Proxy Debug

+

Version: {{.Version}} | Uptime: {{.Uptime}}

+

Certificates: {{.CertsReady}} ready, {{.CertsPending}} pending, {{.CertsFailed}} failed ({{.CertsTotal}} total)

+ {{if .CertsReadyDomains}} +
+ Ready domains ({{.CertsReady}}) +
    {{range .CertsReadyDomains}}
  • {{.}}
  • {{end}}
+
+ {{end}} + {{if .CertsPendingDomains}} +
+ Pending domains ({{.CertsPending}}) +
    {{range .CertsPendingDomains}}
  • {{.}}
  • {{end}}
+
+ {{end}} + {{if .CertsFailedDomains}} +
+ Failed domains ({{.CertsFailed}}) +
    {{range .CertsFailedDomains}}
  • {{.Domain}}: {{.Error}}
  • {{end}}
+
+ {{end}} +

Clients ({{.ClientCount}}) | Domains ({{.TotalDomains}})

+ {{if .Clients}} + + + + + + + + {{range .Clients}} + + + + + + + {{end}} +
Account IDDomainsAgeStatus
{{.AccountID}}{{.Domains}}{{.Age}}{{.Status}}
+ {{else}} +

No clients connected

+ {{end}} +

Endpoints

+ +

Add ?format=json or /json suffix for JSON output

+ + +{{end}} diff --git a/proxy/internal/debug/templates/tools.html b/proxy/internal/debug/templates/tools.html new file mode 100644 index 000000000..216a44693 --- /dev/null +++ b/proxy/internal/debug/templates/tools.html @@ -0,0 +1,142 @@ +{{define "tools"}} + + + + Client {{.AccountID}} - Tools + + + +

Client: {{.AccountID}}

+ + +

Client Control

+
+
+   + +
+
+   + +
+
+
+ +

Log Level

+
+
+ + +
+
+   + +
+
+
+ +

TCP Ping

+
+
+ + +
+
+ + +
+
+   + +
+
+
+ + + + +{{end}} diff --git a/proxy/internal/flock/flock_other.go b/proxy/internal/flock/flock_other.go new file mode 100644 index 000000000..a3916a442 --- /dev/null +++ b/proxy/internal/flock/flock_other.go @@ -0,0 +1,20 @@ +//go:build !unix + +package flock + +import ( + "context" + "os" +) + +// Lock is a no-op on non-Unix platforms. Returns (nil, nil) to indicate +// that no lock was acquired; callers must treat a nil file as "proceed +// without lock" rather than "lock held by someone else." +func Lock(_ context.Context, _ string) (*os.File, error) { + return nil, nil //nolint:nilnil // intentional: nil file signals locking unsupported on this platform +} + +// Unlock is a no-op on non-Unix platforms. +func Unlock(_ *os.File) error { + return nil +} diff --git a/proxy/internal/flock/flock_test.go b/proxy/internal/flock/flock_test.go new file mode 100644 index 000000000..501a173f7 --- /dev/null +++ b/proxy/internal/flock/flock_test.go @@ -0,0 +1,79 @@ +//go:build unix + +package flock + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLockUnlock(t *testing.T) { + lockPath := filepath.Join(t.TempDir(), "test.lock") + + f, err := Lock(context.Background(), lockPath) + require.NoError(t, err) + require.NotNil(t, f) + + _, err = os.Stat(lockPath) + assert.NoError(t, err, "lock file should exist") + + err = Unlock(f) + assert.NoError(t, err) +} + +func TestUnlockNil(t *testing.T) { + err := Unlock(nil) + assert.NoError(t, err, "unlocking nil should be a no-op") +} + +func TestLockRespectsContext(t *testing.T) { + lockPath := filepath.Join(t.TempDir(), "test.lock") + + f1, err := Lock(context.Background(), lockPath) + require.NoError(t, err) + defer func() { require.NoError(t, Unlock(f1)) }() + + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond) + defer cancel() + + _, err = Lock(ctx, lockPath) + assert.ErrorIs(t, err, context.DeadlineExceeded) +} + +func TestLockBlocks(t *testing.T) { + lockPath := filepath.Join(t.TempDir(), "test.lock") + + f1, err := Lock(context.Background(), lockPath) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + + start := time.Now() + var elapsed time.Duration + + go func() { + defer wg.Done() + f2, err := Lock(context.Background(), lockPath) + elapsed = time.Since(start) + assert.NoError(t, err) + if f2 != nil { + assert.NoError(t, Unlock(f2)) + } + }() + + // Hold the lock for 200ms, then release. + time.Sleep(200 * time.Millisecond) + require.NoError(t, Unlock(f1)) + + wg.Wait() + assert.GreaterOrEqual(t, elapsed, 150*time.Millisecond, + "Lock should have blocked for at least ~200ms") +} diff --git a/proxy/internal/flock/flock_unix.go b/proxy/internal/flock/flock_unix.go new file mode 100644 index 000000000..738859a6f --- /dev/null +++ b/proxy/internal/flock/flock_unix.go @@ -0,0 +1,77 @@ +//go:build unix + +// Package flock provides best-effort advisory file locking using flock(2). +// +// This is used for cross-replica coordination (e.g. preventing duplicate +// ACME requests). Note that flock(2) does NOT work reliably on NFS volumes: +// on NFSv3 it depends on the NLM daemon, on NFSv4 Linux emulates it via +// fcntl locks with different semantics. Callers must treat lock failures +// as non-fatal and proceed without the lock. +package flock + +import ( + "context" + "errors" + "fmt" + "os" + "syscall" + "time" + + log "github.com/sirupsen/logrus" +) + +const retryInterval = 100 * time.Millisecond + +// Lock acquires an exclusive advisory lock on the given file path. +// It creates the lock file if it does not exist. The lock attempt +// respects context cancellation by using non-blocking flock with polling. +// The caller must call Unlock with the returned *os.File when done. +func Lock(ctx context.Context, path string) (*os.File, error) { + f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o600) + if err != nil { + return nil, fmt.Errorf("open lock file %s: %w", path, err) + } + + timer := time.NewTimer(retryInterval) + defer timer.Stop() + + for { + if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err == nil { + return f, nil + } else if !errors.Is(err, syscall.EWOULDBLOCK) { + if cerr := f.Close(); cerr != nil { + log.Debugf("close lock file %s: %v", path, cerr) + } + return nil, fmt.Errorf("acquire lock on %s: %w", path, err) + } + + select { + case <-ctx.Done(): + if cerr := f.Close(); cerr != nil { + log.Debugf("close lock file %s: %v", path, cerr) + } + return nil, ctx.Err() + case <-timer.C: + timer.Reset(retryInterval) + } + } +} + +// Unlock releases the lock and closes the file. +func Unlock(f *os.File) error { + if f == nil { + return nil + } + + defer func() { + if cerr := f.Close(); cerr != nil { + log.Debugf("close lock file: %v", cerr) + } + }() + + if err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN); err != nil { + return fmt.Errorf("release lock: %w", err) + } + + return nil +} diff --git a/proxy/internal/grpc/auth.go b/proxy/internal/grpc/auth.go new file mode 100644 index 000000000..ce1a23f68 --- /dev/null +++ b/proxy/internal/grpc/auth.go @@ -0,0 +1,48 @@ +// Package grpc provides gRPC utilities for the proxy client. +package grpc + +import ( + "context" + "os" + "strconv" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// EnvProxyAllowInsecure controls whether the proxy token can be sent over non-TLS connections. +const EnvProxyAllowInsecure = "NB_PROXY_ALLOW_INSECURE" + +var _ credentials.PerRPCCredentials = (*proxyAuthToken)(nil) + +type proxyAuthToken struct { + token string + allowInsecure bool +} + +func (t proxyAuthToken) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { + return map[string]string{ + "authorization": "Bearer " + t.token, + }, nil +} + +// RequireTransportSecurity returns true by default to protect the token in transit. +// Set NB_PROXY_ALLOW_INSECURE=true to allow non-TLS connections (not recommended for production). +func (t proxyAuthToken) RequireTransportSecurity() bool { + return !t.allowInsecure +} + +// WithProxyToken returns a DialOption that sets the proxy access token on each outbound RPC. +func WithProxyToken(token string) grpc.DialOption { + allowInsecure := false + if val := os.Getenv(EnvProxyAllowInsecure); val != "" { + parsed, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("invalid value for %s: %v", EnvProxyAllowInsecure, err) + } else { + allowInsecure = parsed + } + } + return grpc.WithPerRPCCredentials(proxyAuthToken{token: token, allowInsecure: allowInsecure}) +} diff --git a/proxy/internal/health/health.go b/proxy/internal/health/health.go new file mode 100644 index 000000000..60ce7f8ef --- /dev/null +++ b/proxy/internal/health/health.go @@ -0,0 +1,405 @@ +// Package health provides health probes for the proxy server. +package health + +import ( + "context" + "encoding/json" + "net" + "net/http" + "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/embed" + "github.com/netbirdio/netbird/proxy/internal/types" +) + +const handshakeStaleThreshold = 5 * time.Minute + +const ( + maxConcurrentChecks = 3 + maxClientCheckTimeout = 5 * time.Minute +) + +// clientProvider provides access to NetBird clients for health checks. +type clientProvider interface { + ListClientsForStartup() map[types.AccountID]*embed.Client +} + +// Checker tracks health state and provides probe endpoints. +type Checker struct { + logger *log.Logger + provider clientProvider + + mu sync.RWMutex + managementConnected bool + initialSyncComplete bool + shuttingDown bool + + // checkSem limits concurrent client health checks. + checkSem chan struct{} + + // checkHealth checks the health of a single client. + // Defaults to checkClientHealth; overridable in tests. + checkHealth func(*embed.Client) ClientHealth +} + +// ClientHealth represents the health status of a single NetBird client. +type ClientHealth struct { + Healthy bool `json:"healthy"` + ManagementConnected bool `json:"management_connected"` + SignalConnected bool `json:"signal_connected"` + RelaysConnected int `json:"relays_connected"` + RelaysTotal int `json:"relays_total"` + PeersTotal int `json:"peers_total"` + PeersConnected int `json:"peers_connected"` + PeersP2P int `json:"peers_p2p"` + PeersRelayed int `json:"peers_relayed"` + PeersDegraded int `json:"peers_degraded"` + Error string `json:"error,omitempty"` +} + +// ProbeResponse represents the JSON response for health probes. +type ProbeResponse struct { + Status string `json:"status"` + Checks map[string]bool `json:"checks,omitempty"` + Clients map[types.AccountID]ClientHealth `json:"clients,omitempty"` +} + +// Server runs the health probe HTTP server on a dedicated port. +type Server struct { + server *http.Server + logger *log.Logger + checker *Checker +} + +// SetManagementConnected updates the management connection state. +func (c *Checker) SetManagementConnected(connected bool) { + c.mu.Lock() + defer c.mu.Unlock() + c.managementConnected = connected +} + +// SetInitialSyncComplete marks that the initial mapping sync has completed. +func (c *Checker) SetInitialSyncComplete() { + c.mu.Lock() + defer c.mu.Unlock() + c.initialSyncComplete = true +} + +// SetShuttingDown marks the server as shutting down. +// This causes ReadinessProbe to return false so load balancers stop routing traffic. +func (c *Checker) SetShuttingDown() { + c.mu.Lock() + defer c.mu.Unlock() + c.shuttingDown = true +} + +// CheckClientsConnected verifies all clients are connected to management/signal/relay. +// Uses the provided context for timeout/cancellation, with a maximum bound of maxClientCheckTimeout. +// Limits concurrent checks via semaphore. +func (c *Checker) CheckClientsConnected(ctx context.Context) (bool, map[types.AccountID]ClientHealth) { + // Apply upper bound timeout in case parent context has no deadline + ctx, cancel := context.WithTimeout(ctx, maxClientCheckTimeout) + defer cancel() + + clients := c.provider.ListClientsForStartup() + + // No clients is not a health issue + if len(clients) == 0 { + return true, make(map[types.AccountID]ClientHealth) + } + + type result struct { + accountID types.AccountID + health ClientHealth + } + + resultsCh := make(chan result, len(clients)) + var wg sync.WaitGroup + + for accountID, client := range clients { + wg.Add(1) + go func(id types.AccountID, cl *embed.Client) { + defer wg.Done() + + // Acquire semaphore + select { + case c.checkSem <- struct{}{}: + defer func() { <-c.checkSem }() + case <-ctx.Done(): + resultsCh <- result{id, ClientHealth{Healthy: false, Error: ctx.Err().Error()}} + return + } + + resultsCh <- result{id, c.checkHealth(cl)} + }(accountID, client) + } + + go func() { + wg.Wait() + close(resultsCh) + }() + + results := make(map[types.AccountID]ClientHealth) + allHealthy := true + for r := range resultsCh { + results[r.accountID] = r.health + if !r.health.Healthy { + allHealthy = false + } + } + + return allHealthy, results +} + +// LivenessProbe returns true if the process is alive. +// This should always return true if we can respond. +func (c *Checker) LivenessProbe() bool { + return true +} + +// ReadinessProbe returns true if the server can accept traffic. +func (c *Checker) ReadinessProbe() bool { + c.mu.RLock() + defer c.mu.RUnlock() + if c.shuttingDown { + return false + } + return c.managementConnected +} + +// StartupProbe checks if initial startup is complete. +// Checks management connection, initial sync, and all client health directly. +// Uses the provided context for timeout/cancellation. +func (c *Checker) StartupProbe(ctx context.Context) bool { + c.mu.RLock() + mgmt := c.managementConnected + sync := c.initialSyncComplete + c.mu.RUnlock() + + if !mgmt || !sync { + return false + } + + // Check all clients are connected to management/signal/relay. + // Returns true when no clients exist (nothing to check). + allHealthy, _ := c.CheckClientsConnected(ctx) + return allHealthy +} + +// Handler returns an http.Handler for health probe endpoints. +func (c *Checker) Handler() http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("/healthz/live", c.handleLiveness) + mux.HandleFunc("/healthz/ready", c.handleReadiness) + mux.HandleFunc("/healthz/startup", c.handleStartup) + mux.HandleFunc("/healthz", c.handleFull) + return mux +} + +func (c *Checker) handleLiveness(w http.ResponseWriter, r *http.Request) { + if c.LivenessProbe() { + c.writeProbeResponse(w, http.StatusOK, "ok", nil, nil) + return + } + c.writeProbeResponse(w, http.StatusServiceUnavailable, "fail", nil, nil) +} + +func (c *Checker) handleReadiness(w http.ResponseWriter, r *http.Request) { + c.mu.RLock() + checks := map[string]bool{ + "management_connected": c.managementConnected, + } + c.mu.RUnlock() + + if c.ReadinessProbe() { + c.writeProbeResponse(w, http.StatusOK, "ok", checks, nil) + return + } + c.writeProbeResponse(w, http.StatusServiceUnavailable, "fail", checks, nil) +} + +func (c *Checker) handleStartup(w http.ResponseWriter, r *http.Request) { + c.mu.RLock() + mgmt := c.managementConnected + syncComplete := c.initialSyncComplete + c.mu.RUnlock() + + allClientsHealthy, clientHealth := c.CheckClientsConnected(r.Context()) + + checks := map[string]bool{ + "management_connected": mgmt, + "initial_sync_complete": syncComplete, + "all_clients_healthy": allClientsHealthy, + } + + ready := mgmt && syncComplete && allClientsHealthy + if ready { + c.writeProbeResponse(w, http.StatusOK, "ok", checks, clientHealth) + return + } + c.writeProbeResponse(w, http.StatusServiceUnavailable, "fail", checks, clientHealth) +} + +func (c *Checker) handleFull(w http.ResponseWriter, r *http.Request) { + c.mu.RLock() + mgmt := c.managementConnected + sync := c.initialSyncComplete + c.mu.RUnlock() + + allClientsHealthy, clientHealth := c.CheckClientsConnected(r.Context()) + + checks := map[string]bool{ + "management_connected": mgmt, + "initial_sync_complete": sync, + "all_clients_healthy": allClientsHealthy, + } + + status := "ok" + statusCode := http.StatusOK + if !c.ReadinessProbe() { + status = "fail" + statusCode = http.StatusServiceUnavailable + } + + c.writeProbeResponse(w, statusCode, status, checks, clientHealth) +} + +func (c *Checker) writeProbeResponse(w http.ResponseWriter, statusCode int, status string, checks map[string]bool, clients map[types.AccountID]ClientHealth) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + + resp := ProbeResponse{ + Status: status, + Checks: checks, + Clients: clients, + } + if err := json.NewEncoder(w).Encode(resp); err != nil { + c.logger.Debugf("write health response: %v", err) + } +} + +// ListenAndServe starts the health probe server. +func (s *Server) ListenAndServe() error { + s.logger.Infof("starting health probe server on %s", s.server.Addr) + return s.server.ListenAndServe() +} + +// Serve starts the health probe server on the given listener. +func (s *Server) Serve(l net.Listener) error { + s.logger.Infof("starting health probe server on %s", l.Addr()) + return s.server.Serve(l) +} + +// Shutdown gracefully shuts down the health probe server. +func (s *Server) Shutdown(ctx context.Context) error { + return s.server.Shutdown(ctx) +} + +// NewChecker creates a new health checker. +func NewChecker(logger *log.Logger, provider clientProvider) *Checker { + if logger == nil { + logger = log.StandardLogger() + } + return &Checker{ + logger: logger, + provider: provider, + checkSem: make(chan struct{}, maxConcurrentChecks), + checkHealth: checkClientHealth, + } +} + +// NewServer creates a new health probe server. +// If metricsHandler is non-nil, it is mounted at /metrics on the same port. +func NewServer(addr string, checker *Checker, logger *log.Logger, metricsHandler http.Handler) *Server { + if logger == nil { + logger = log.StandardLogger() + } + + handler := checker.Handler() + if metricsHandler != nil { + mux := http.NewServeMux() + mux.Handle("/metrics", metricsHandler) + mux.Handle("/", handler) + handler = mux + } + + return &Server{ + server: &http.Server{ + Addr: addr, + Handler: handler, + ReadTimeout: 5 * time.Second, + WriteTimeout: 5 * time.Second, + }, + logger: logger, + checker: checker, + } +} + +func checkClientHealth(client *embed.Client) ClientHealth { + if client == nil { + return ClientHealth{ + Healthy: false, + Error: "client not initialized", + } + } + + status, err := client.Status() + if err != nil { + return ClientHealth{ + Healthy: false, + Error: err.Error(), + } + } + + // Count only rel:// and rels:// relays (not stun/turn) + var relayCount, relaysConnected int + for _, relay := range status.Relays { + if !strings.HasPrefix(relay.URI, "rel://") && !strings.HasPrefix(relay.URI, "rels://") { + continue + } + relayCount++ + if relay.Err == nil { + relaysConnected++ + } + } + + // Count peer connection stats + now := time.Now() + var peersConnected, peersP2P, peersRelayed, peersDegraded int + for _, p := range status.Peers { + if p.ConnStatus != embed.PeerStatusConnected { + continue + } + peersConnected++ + if p.Relayed { + peersRelayed++ + } else { + peersP2P++ + } + if p.LastWireguardHandshake.IsZero() || now.Sub(p.LastWireguardHandshake) > handshakeStaleThreshold { + peersDegraded++ + } + } + + // Client is healthy if connected to management, signal, and at least one relay (if any are defined) + healthy := status.ManagementState.Connected && + status.SignalState.Connected && + (relayCount == 0 || relaysConnected > 0) + + return ClientHealth{ + Healthy: healthy, + ManagementConnected: status.ManagementState.Connected, + SignalConnected: status.SignalState.Connected, + RelaysConnected: relaysConnected, + RelaysTotal: relayCount, + PeersTotal: len(status.Peers), + PeersConnected: peersConnected, + PeersP2P: peersP2P, + PeersRelayed: peersRelayed, + PeersDegraded: peersDegraded, + } +} diff --git a/proxy/internal/health/health_test.go b/proxy/internal/health/health_test.go new file mode 100644 index 000000000..47b5f250f --- /dev/null +++ b/proxy/internal/health/health_test.go @@ -0,0 +1,473 @@ +package health + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/embed" + "github.com/netbirdio/netbird/proxy/internal/types" +) + +type mockClientProvider struct { + clients map[types.AccountID]*embed.Client +} + +func (m *mockClientProvider) ListClientsForStartup() map[types.AccountID]*embed.Client { + return m.clients +} + +// newTestChecker creates a checker with a mock health function for testing. +// The health function returns the provided ClientHealth for every client. +func newTestChecker(provider clientProvider, healthResult ClientHealth) *Checker { + c := NewChecker(nil, provider) + c.checkHealth = func(_ *embed.Client) ClientHealth { + return healthResult + } + return c +} + +func TestChecker_LivenessProbe(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + + // Liveness should always return true if we can respond. + assert.True(t, checker.LivenessProbe()) +} + +func TestChecker_ReadinessProbe(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + + // Initially not ready (management not connected). + assert.False(t, checker.ReadinessProbe()) + + // After management connects, should be ready. + checker.SetManagementConnected(true) + assert.True(t, checker.ReadinessProbe()) + + // If management disconnects, should not be ready. + checker.SetManagementConnected(false) + assert.False(t, checker.ReadinessProbe()) +} + +// TestStartupProbe_EmptyServiceList covers the scenario where management has +// no services configured for this proxy. The proxy should become ready once +// management is connected and the initial sync completes, even with zero clients. +func TestStartupProbe_EmptyServiceList(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + + // No management connection = not ready. + assert.False(t, checker.StartupProbe(context.Background())) + + // Management connected but no sync = not ready. + checker.SetManagementConnected(true) + assert.False(t, checker.StartupProbe(context.Background())) + + // Management + sync complete + no clients = ready. + checker.SetInitialSyncComplete() + assert.True(t, checker.StartupProbe(context.Background())) +} + +// TestStartupProbe_WithUnhealthyClients verifies that when services exist +// and clients have been created but are not yet fully connected (to mgmt, +// signal, relays), the startup probe does NOT pass. +func TestStartupProbe_WithUnhealthyClients(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "account-1": nil, // concrete client not needed; checkHealth is mocked + "account-2": nil, + }, + } + checker := newTestChecker(provider, ClientHealth{Healthy: false, Error: "not connected yet"}) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + + assert.False(t, checker.StartupProbe(context.Background()), + "startup probe must not pass when clients are unhealthy") +} + +// TestStartupProbe_WithHealthyClients verifies that once all clients are +// connected and healthy, the startup probe passes. +func TestStartupProbe_WithHealthyClients(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "account-1": nil, + "account-2": nil, + }, + } + checker := newTestChecker(provider, ClientHealth{ + Healthy: true, + ManagementConnected: true, + SignalConnected: true, + RelaysConnected: 1, + RelaysTotal: 1, + }) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + + assert.True(t, checker.StartupProbe(context.Background()), + "startup probe must pass when all clients are healthy") +} + +// TestStartupProbe_MixedHealthClients verifies that if any single client is +// unhealthy, the startup probe fails (all-or-nothing). +func TestStartupProbe_MixedHealthClients(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "healthy-account": nil, + "unhealthy-account": nil, + }, + } + + checker := NewChecker(nil, provider) + checker.checkHealth = func(cl *embed.Client) ClientHealth { + // We identify accounts by their position in the map iteration; since we + // can't control map order, make exactly one unhealthy via counter. + return ClientHealth{Healthy: false} + } + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + + assert.False(t, checker.StartupProbe(context.Background()), + "startup probe must fail if any client is unhealthy") +} + +// TestStartupProbe_RequiresAllConditions ensures that each individual +// prerequisite (management, sync, clients) is necessary. The probe must not +// pass if any one is missing. +func TestStartupProbe_RequiresAllConditions(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "account-1": nil, + }, + } + + t.Run("no management", func(t *testing.T) { + checker := newTestChecker(provider, ClientHealth{Healthy: true}) + checker.SetInitialSyncComplete() + // management NOT connected + assert.False(t, checker.StartupProbe(context.Background())) + }) + + t.Run("no sync", func(t *testing.T) { + checker := newTestChecker(provider, ClientHealth{Healthy: true}) + checker.SetManagementConnected(true) + // sync NOT complete + assert.False(t, checker.StartupProbe(context.Background())) + }) + + t.Run("unhealthy client", func(t *testing.T) { + checker := newTestChecker(provider, ClientHealth{Healthy: false}) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + assert.False(t, checker.StartupProbe(context.Background())) + }) + + t.Run("all conditions met", func(t *testing.T) { + checker := newTestChecker(provider, ClientHealth{Healthy: true}) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + assert.True(t, checker.StartupProbe(context.Background())) + }) +} + +// TestStartupProbe_ConcurrentAccess runs the startup probe from many +// goroutines simultaneously to check for races. +func TestStartupProbe_ConcurrentAccess(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "account-1": nil, + "account-2": nil, + }, + } + checker := newTestChecker(provider, ClientHealth{Healthy: true}) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + + var wg sync.WaitGroup + const goroutines = 50 + results := make([]bool, goroutines) + + for i := range goroutines { + wg.Add(1) + go func(idx int) { + defer wg.Done() + results[idx] = checker.StartupProbe(context.Background()) + }(i) + } + wg.Wait() + + for i, r := range results { + assert.True(t, r, "goroutine %d got unexpected result", i) + } +} + +// TestStartupProbe_CancelledContext verifies that a cancelled context causes +// the probe to report unhealthy when client checks are needed. +func TestStartupProbe_CancelledContext(t *testing.T) { + t.Run("no management bypasses context", func(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // Should be false because management isn't connected, context is irrelevant. + assert.False(t, checker.StartupProbe(ctx)) + }) + + t.Run("with clients and cancelled context", func(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "account-1": nil, + }, + } + checker := NewChecker(nil, provider) + // Use the real checkHealth path — a cancelled context should cause + // the semaphore acquisition to fail, reporting unhealthy. + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + assert.False(t, checker.StartupProbe(ctx), + "cancelled context must result in unhealthy when clients exist") + }) +} + +// TestHandler_Startup_EmptyServiceList verifies the HTTP startup endpoint +// returns 200 when management is connected, sync is complete, and there are +// no services/clients. +func TestHandler_Startup_EmptyServiceList(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/startup", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "ok", resp.Status) + assert.True(t, resp.Checks["management_connected"]) + assert.True(t, resp.Checks["initial_sync_complete"]) + assert.True(t, resp.Checks["all_clients_healthy"]) + assert.Empty(t, resp.Clients) +} + +// TestHandler_Startup_WithUnhealthyClients verifies that the HTTP startup +// endpoint returns 503 when clients exist but are not yet healthy. +func TestHandler_Startup_WithUnhealthyClients(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "account-1": nil, + }, + } + checker := newTestChecker(provider, ClientHealth{Healthy: false, Error: "starting"}) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/startup", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusServiceUnavailable, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "fail", resp.Status) + assert.True(t, resp.Checks["management_connected"]) + assert.True(t, resp.Checks["initial_sync_complete"]) + assert.False(t, resp.Checks["all_clients_healthy"]) + require.Contains(t, resp.Clients, types.AccountID("account-1")) + assert.Equal(t, "starting", resp.Clients["account-1"].Error) +} + +// TestHandler_Startup_WithHealthyClients verifies the HTTP startup endpoint +// returns 200 once clients are healthy. +func TestHandler_Startup_WithHealthyClients(t *testing.T) { + provider := &mockClientProvider{ + clients: map[types.AccountID]*embed.Client{ + "account-1": nil, + }, + } + checker := newTestChecker(provider, ClientHealth{ + Healthy: true, + ManagementConnected: true, + SignalConnected: true, + RelaysConnected: 1, + RelaysTotal: 1, + }) + checker.SetManagementConnected(true) + checker.SetInitialSyncComplete() + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/startup", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "ok", resp.Status) + assert.True(t, resp.Checks["all_clients_healthy"]) +} + +// TestHandler_Startup_NotComplete verifies the startup handler returns 503 +// when prerequisites aren't met. +func TestHandler_Startup_NotComplete(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/startup", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusServiceUnavailable, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "fail", resp.Status) +} + +func TestChecker_Handler_Liveness(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/live", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "ok", resp.Status) +} + +func TestChecker_Handler_Readiness_NotReady(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/ready", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusServiceUnavailable, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "fail", resp.Status) + assert.False(t, resp.Checks["management_connected"]) +} + +func TestChecker_Handler_Readiness_Ready(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + checker.SetManagementConnected(true) + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/ready", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "ok", resp.Status) + assert.True(t, resp.Checks["management_connected"]) +} + +func TestChecker_Handler_Full(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + checker.SetManagementConnected(true) + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "ok", resp.Status) + assert.NotNil(t, resp.Checks) + // Clients may be empty map when no clients exist. + assert.Empty(t, resp.Clients) +} + +func TestChecker_SetShuttingDown(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + checker.SetManagementConnected(true) + + assert.True(t, checker.ReadinessProbe(), "should be ready before shutdown") + + checker.SetShuttingDown() + + assert.False(t, checker.ReadinessProbe(), "should not be ready after shutdown") +} + +func TestChecker_Handler_Readiness_ShuttingDown(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + checker.SetManagementConnected(true) + checker.SetShuttingDown() + handler := checker.Handler() + + req := httptest.NewRequest(http.MethodGet, "/healthz/ready", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusServiceUnavailable, rec.Code) + + var resp ProbeResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&resp)) + assert.Equal(t, "fail", resp.Status) +} + +func TestNewServer_WithMetricsHandler(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + checker.SetManagementConnected(true) + + metricsHandler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("metrics")) + }) + + srv := NewServer(":0", checker, nil, metricsHandler) + require.NotNil(t, srv) + + // Verify health endpoint still works through the mux. + req := httptest.NewRequest(http.MethodGet, "/healthz/live", nil) + rec := httptest.NewRecorder() + srv.server.Handler.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) + + // Verify metrics endpoint is mounted. + req = httptest.NewRequest(http.MethodGet, "/metrics", nil) + rec = httptest.NewRecorder() + srv.server.Handler.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "metrics", rec.Body.String()) +} + +func TestNewServer_WithoutMetricsHandler(t *testing.T) { + checker := NewChecker(nil, &mockClientProvider{}) + checker.SetManagementConnected(true) + + srv := NewServer(":0", checker, nil, nil) + require.NotNil(t, srv) + + req := httptest.NewRequest(http.MethodGet, "/healthz/live", nil) + rec := httptest.NewRecorder() + srv.server.Handler.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) +} diff --git a/proxy/internal/k8s/lease.go b/proxy/internal/k8s/lease.go new file mode 100644 index 000000000..9677e0e27 --- /dev/null +++ b/proxy/internal/k8s/lease.go @@ -0,0 +1,281 @@ +// Package k8s provides a lightweight Kubernetes API client for coordination +// Leases. It uses raw HTTP calls against the mounted service account +// credentials, avoiding a dependency on client-go. +package k8s + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" +) + +const ( + saTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" //nolint:gosec + saNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + saCACertPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + + leaseAPIPath = "/apis/coordination.k8s.io/v1" +) + +// ErrConflict is returned when a Lease update fails due to a +// resourceVersion mismatch (another writer updated the object first). +var ErrConflict = errors.New("conflict: resource version mismatch") + +// Lease represents a coordination.k8s.io/v1 Lease object with only the +// fields needed for distributed locking. +type Lease struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata LeaseMetadata `json:"metadata"` + Spec LeaseSpec `json:"spec"` +} + +// LeaseMetadata holds the standard k8s object metadata fields used by Leases. +type LeaseMetadata struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// LeaseSpec holds the Lease specification fields. +type LeaseSpec struct { + HolderIdentity *string `json:"holderIdentity"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *MicroTime `json:"acquireTime"` + RenewTime *MicroTime `json:"renewTime"` +} + +// MicroTime wraps time.Time with Kubernetes MicroTime JSON formatting. +type MicroTime struct { + time.Time +} + +const microTimeFormat = "2006-01-02T15:04:05.000000Z" + +// MarshalJSON implements json.Marshaler with k8s MicroTime format. +func (t *MicroTime) MarshalJSON() ([]byte, error) { + return json.Marshal(t.UTC().Format(microTimeFormat)) +} + +// UnmarshalJSON implements json.Unmarshaler with k8s MicroTime format. +func (t *MicroTime) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(microTimeFormat, s) + if err != nil { + return fmt.Errorf("parse MicroTime %q: %w", s, err) + } + t.Time = parsed + return nil +} + +// LeaseClient talks to the Kubernetes coordination API using raw HTTP. +type LeaseClient struct { + baseURL string + namespace string + httpClient *http.Client +} + +// NewLeaseClient creates a client that authenticates via the pod's +// mounted service account. It reads the namespace and CA certificate +// at construction time (they don't rotate) but reads the bearer token +// fresh on each request (tokens rotate). +func NewLeaseClient() (*LeaseClient, error) { + host := os.Getenv("KUBERNETES_SERVICE_HOST") + port := os.Getenv("KUBERNETES_SERVICE_PORT") + if host == "" || port == "" { + return nil, fmt.Errorf("KUBERNETES_SERVICE_HOST/PORT not set") + } + + ns, err := os.ReadFile(saNamespacePath) + if err != nil { + return nil, fmt.Errorf("read namespace from %s: %w", saNamespacePath, err) + } + + caCert, err := os.ReadFile(saCACertPath) + if err != nil { + return nil, fmt.Errorf("read CA cert from %s: %w", saCACertPath, err) + } + + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(caCert) { + return nil, fmt.Errorf("parse CA certificate from %s", saCACertPath) + } + + return &LeaseClient{ + baseURL: fmt.Sprintf("https://%s:%s", host, port), + namespace: strings.TrimSpace(string(ns)), + httpClient: &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }, + }, + }, nil +} + +// Namespace returns the namespace this client operates in. +func (c *LeaseClient) Namespace() string { + return c.namespace +} + +// Get retrieves a Lease by name. Returns (nil, nil) if the Lease does not exist. +func (c *LeaseClient) Get(ctx context.Context, name string) (*Lease, error) { + url := fmt.Sprintf("%s%s/namespaces/%s/leases/%s", c.baseURL, leaseAPIPath, c.namespace, name) + + resp, err := c.doRequest(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode == http.StatusNotFound { + return nil, nil //nolint:nilnil + } + if resp.StatusCode != http.StatusOK { + return nil, c.readError(resp) + } + + var lease Lease + if err := json.NewDecoder(resp.Body).Decode(&lease); err != nil { + return nil, fmt.Errorf("decode lease response: %w", err) + } + return &lease, nil +} + +// Create creates a new Lease. Returns the created Lease with server-assigned +// fields like resourceVersion populated. +func (c *LeaseClient) Create(ctx context.Context, lease *Lease) (*Lease, error) { + url := fmt.Sprintf("%s%s/namespaces/%s/leases", c.baseURL, leaseAPIPath, c.namespace) + + lease.APIVersion = "coordination.k8s.io/v1" + lease.Kind = "Lease" + if lease.Metadata.Namespace == "" { + lease.Metadata.Namespace = c.namespace + } + + resp, err := c.doRequest(ctx, http.MethodPost, url, lease) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode == http.StatusConflict { + return nil, ErrConflict + } + if resp.StatusCode != http.StatusCreated { + return nil, c.readError(resp) + } + + var created Lease + if err := json.NewDecoder(resp.Body).Decode(&created); err != nil { + return nil, fmt.Errorf("decode created lease: %w", err) + } + return &created, nil +} + +// Update replaces a Lease. The lease.Metadata.ResourceVersion must match +// the current server value (optimistic concurrency). Returns ErrConflict +// on version mismatch. +func (c *LeaseClient) Update(ctx context.Context, lease *Lease) (*Lease, error) { + url := fmt.Sprintf("%s%s/namespaces/%s/leases/%s", c.baseURL, leaseAPIPath, c.namespace, lease.Metadata.Name) + + lease.APIVersion = "coordination.k8s.io/v1" + lease.Kind = "Lease" + + resp, err := c.doRequest(ctx, http.MethodPut, url, lease) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode == http.StatusConflict { + return nil, ErrConflict + } + if resp.StatusCode != http.StatusOK { + return nil, c.readError(resp) + } + + var updated Lease + if err := json.NewDecoder(resp.Body).Decode(&updated); err != nil { + return nil, fmt.Errorf("decode updated lease: %w", err) + } + return &updated, nil +} + +func (c *LeaseClient) doRequest(ctx context.Context, method, url string, body any) (*http.Response, error) { + token, err := readToken() + if err != nil { + return nil, fmt.Errorf("read service account token: %w", err) + } + + var bodyReader io.Reader + if body != nil { + data, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("marshal request body: %w", err) + } + bodyReader = bytes.NewReader(data) + } + + req, err := http.NewRequestWithContext(ctx, method, url, bodyReader) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Accept", "application/json") + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + return c.httpClient.Do(req) +} + +func readToken() (string, error) { + data, err := os.ReadFile(saTokenPath) + if err != nil { + return "", fmt.Errorf("read %s: %w", saTokenPath, err) + } + return strings.TrimSpace(string(data)), nil +} + +func (c *LeaseClient) readError(resp *http.Response) error { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024)) + return fmt.Errorf("k8s API %s %d: %s", resp.Request.URL.Path, resp.StatusCode, string(body)) +} + +// LeaseNameForDomain returns a deterministic, DNS-label-safe Lease name +// for the given domain. The domain is hashed to avoid dots and length issues. +func LeaseNameForDomain(domain string) string { + h := sha256.Sum256([]byte(domain)) + return "cert-lock-" + hex.EncodeToString(h[:8]) +} + +// InCluster reports whether the process is running inside a Kubernetes pod +// by checking for the KUBERNETES_SERVICE_HOST environment variable. +func InCluster() bool { + _, exists := os.LookupEnv("KUBERNETES_SERVICE_HOST") + return exists +} diff --git a/proxy/internal/k8s/lease_test.go b/proxy/internal/k8s/lease_test.go new file mode 100644 index 000000000..9d5d3c6ce --- /dev/null +++ b/proxy/internal/k8s/lease_test.go @@ -0,0 +1,102 @@ +package k8s + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLeaseNameForDomain(t *testing.T) { + tests := []struct { + domain string + }{ + {"example.com"}, + {"app.example.com"}, + {"another.domain.io"}, + } + + seen := make(map[string]string) + for _, tc := range tests { + name := LeaseNameForDomain(tc.domain) + + assert.True(t, len(name) <= 63, "must be valid DNS label length") + assert.Regexp(t, `^cert-lock-[0-9a-f]{16}$`, name, + "must match expected format for domain %q", tc.domain) + + // Same input produces same output. + assert.Equal(t, name, LeaseNameForDomain(tc.domain), "must be deterministic") + + // Different domains produce different names. + if prev, ok := seen[name]; ok { + t.Errorf("collision: %q and %q both map to %s", prev, tc.domain, name) + } + seen[name] = tc.domain + } +} + +func TestMicroTimeJSON(t *testing.T) { + ts := time.Date(2024, 6, 15, 10, 30, 0, 0, time.UTC) + mt := &MicroTime{Time: ts} + + data, err := json.Marshal(mt) + require.NoError(t, err) + assert.Equal(t, `"2024-06-15T10:30:00.000000Z"`, string(data)) + + var decoded MicroTime + require.NoError(t, json.Unmarshal(data, &decoded)) + assert.True(t, ts.Equal(decoded.Time), "round-trip should preserve time") +} + +func TestMicroTimeNullJSON(t *testing.T) { + // Null pointer serializes as JSON null via the Lease struct. + spec := LeaseSpec{ + HolderIdentity: nil, + AcquireTime: nil, + RenewTime: nil, + } + + data, err := json.Marshal(spec) + require.NoError(t, err) + assert.Contains(t, string(data), `"acquireTime":null`) + assert.Contains(t, string(data), `"renewTime":null`) +} + +func TestLeaseJSONRoundTrip(t *testing.T) { + holder := "pod-abc" + dur := int32(300) + now := MicroTime{Time: time.Now().UTC().Truncate(time.Microsecond)} + + original := Lease{ + APIVersion: "coordination.k8s.io/v1", + Kind: "Lease", + Metadata: LeaseMetadata{ + Name: "cert-lock-abcdef0123456789", + Namespace: "default", + ResourceVersion: "12345", + Annotations: map[string]string{ + "netbird.io/domain": "app.example.com", + }, + }, + Spec: LeaseSpec{ + HolderIdentity: &holder, + LeaseDurationSeconds: &dur, + AcquireTime: &now, + RenewTime: &now, + }, + } + + data, err := json.Marshal(original) + require.NoError(t, err) + + var decoded Lease + require.NoError(t, json.Unmarshal(data, &decoded)) + + assert.Equal(t, original.Metadata.Name, decoded.Metadata.Name) + assert.Equal(t, original.Metadata.ResourceVersion, decoded.Metadata.ResourceVersion) + assert.Equal(t, *original.Spec.HolderIdentity, *decoded.Spec.HolderIdentity) + assert.Equal(t, *original.Spec.LeaseDurationSeconds, *decoded.Spec.LeaseDurationSeconds) + assert.True(t, original.Spec.AcquireTime.Equal(decoded.Spec.AcquireTime.Time)) +} diff --git a/proxy/internal/metrics/metrics.go b/proxy/internal/metrics/metrics.go new file mode 100644 index 000000000..951ce73dd --- /dev/null +++ b/proxy/internal/metrics/metrics.go @@ -0,0 +1,149 @@ +package metrics + +import ( + "net/http" + "strconv" + "time" + + "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type Metrics struct { + requestsTotal prometheus.Counter + activeRequests prometheus.Gauge + configuredDomains prometheus.Gauge + pathsPerDomain *prometheus.GaugeVec + requestDuration *prometheus.HistogramVec + backendDuration *prometheus.HistogramVec +} + +func New(reg prometheus.Registerer) *Metrics { + promFactory := promauto.With(reg) + return &Metrics{ + requestsTotal: promFactory.NewCounter(prometheus.CounterOpts{ + Name: "netbird_proxy_requests_total", + Help: "Total number of requests made to the netbird proxy", + }), + activeRequests: promFactory.NewGauge(prometheus.GaugeOpts{ + Name: "netbird_proxy_active_requests_count", + Help: "Current in-flight requests handled by the netbird proxy", + }), + configuredDomains: promFactory.NewGauge(prometheus.GaugeOpts{ + Name: "netbird_proxy_domains_count", + Help: "Current number of domains configured on the netbird proxy", + }), + pathsPerDomain: promFactory.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "netbird_proxy_paths_count", + Help: "Current number of paths configured on the netbird proxy labelled by domain", + }, + []string{"domain"}, + ), + requestDuration: promFactory.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "netbird_proxy_request_duration_seconds", + Help: "Duration of requests made to the netbird proxy", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}, + }, + []string{"status", "size", "method", "host", "path"}, + ), + backendDuration: promFactory.NewHistogramVec(prometheus.HistogramOpts{ + Name: "netbird_proxy_backend_duration_seconds", + Help: "Duration of peer round trip time from the netbird proxy", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}, + }, + []string{"status", "size", "method", "host", "path"}, + ), + } +} + +type responseInterceptor struct { + http.ResponseWriter + status int + size int +} + +func (w *responseInterceptor) WriteHeader(status int) { + w.status = status + w.ResponseWriter.WriteHeader(status) +} + +func (w *responseInterceptor) Write(b []byte) (int, error) { + size, err := w.ResponseWriter.Write(b) + w.size += size + return size, err +} + +func (m *Metrics) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m.requestsTotal.Inc() + m.activeRequests.Inc() + + interceptor := &responseInterceptor{ResponseWriter: w} + + start := time.Now() + next.ServeHTTP(interceptor, r) + duration := time.Since(start) + + m.activeRequests.Desc() + m.requestDuration.With(prometheus.Labels{ + "status": strconv.Itoa(interceptor.status), + "size": strconv.Itoa(interceptor.size), + "method": r.Method, + "host": r.Host, + "path": r.URL.Path, + }).Observe(duration.Seconds()) + }) +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return f(r) +} + +func (m *Metrics) RoundTripper(next http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(req *http.Request) (*http.Response, error) { + labels := prometheus.Labels{ + "method": req.Method, + "host": req.Host, + // Fill potentially empty labels with default values to avoid cardinality issues. + "path": "/", + "status": "0", + "size": "0", + } + if req.URL != nil { + labels["path"] = req.URL.Path + } + + start := time.Now() + res, err := next.RoundTrip(req) + duration := time.Since(start) + + // Not all labels will be available if there was an error. + if res != nil { + labels["status"] = strconv.Itoa(res.StatusCode) + labels["size"] = strconv.Itoa(int(res.ContentLength)) + } + + m.backendDuration.With(labels).Observe(duration.Seconds()) + + return res, err + }) +} + +func (m *Metrics) AddMapping(mapping proxy.Mapping) { + m.configuredDomains.Inc() + m.pathsPerDomain.With(prometheus.Labels{ + "domain": mapping.Host, + }).Set(float64(len(mapping.Paths))) +} + +func (m *Metrics) RemoveMapping(mapping proxy.Mapping) { + m.configuredDomains.Dec() + m.pathsPerDomain.With(prometheus.Labels{ + "domain": mapping.Host, + }).Set(0) +} diff --git a/proxy/internal/metrics/metrics_test.go b/proxy/internal/metrics/metrics_test.go new file mode 100644 index 000000000..31e00ae64 --- /dev/null +++ b/proxy/internal/metrics/metrics_test.go @@ -0,0 +1,67 @@ +package metrics_test + +import ( + "net/http" + "net/url" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/netbirdio/netbird/proxy/internal/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type testRoundTripper struct { + response *http.Response + err error +} + +func (t *testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return t.response, t.err +} + +func TestMetrics_RoundTripper(t *testing.T) { + testResponse := http.Response{ + StatusCode: http.StatusOK, + Body: http.NoBody, + } + + tests := map[string]struct { + roundTripper http.RoundTripper + request *http.Request + response *http.Response + err error + }{ + "ok": { + roundTripper: &testRoundTripper{response: &testResponse}, + request: &http.Request{Method: "GET", URL: &url.URL{Path: "/foo"}}, + response: &testResponse, + }, + "nil url": { + roundTripper: &testRoundTripper{response: &testResponse}, + request: &http.Request{Method: "GET", URL: nil}, + response: &testResponse, + }, + "nil response": { + roundTripper: &testRoundTripper{response: nil}, + request: &http.Request{Method: "GET", URL: &url.URL{Path: "/foo"}}, + }, + } + + m := metrics.New(prometheus.NewRegistry()) + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rt := m.RoundTripper(test.roundTripper) + res, err := rt.RoundTrip(test.request) + if res != nil && res.Body != nil { + defer res.Body.Close() + } + if diff := cmp.Diff(test.err, err); diff != "" { + t.Errorf("Incorrect error (-want +got):\n%s", diff) + } + if diff := cmp.Diff(test.response, res); diff != "" { + t.Errorf("Incorrect response (-want +got):\n%s", diff) + } + }) + } +} diff --git a/proxy/internal/proxy/context.go b/proxy/internal/proxy/context.go new file mode 100644 index 000000000..22ebbf371 --- /dev/null +++ b/proxy/internal/proxy/context.go @@ -0,0 +1,187 @@ +package proxy + +import ( + "context" + "sync" + + "github.com/netbirdio/netbird/proxy/internal/types" +) + +type requestContextKey string + +const ( + serviceIdKey requestContextKey = "serviceId" + accountIdKey requestContextKey = "accountId" + capturedDataKey requestContextKey = "capturedData" +) + +// ResponseOrigin indicates where a response was generated. +type ResponseOrigin int + +const ( + // OriginBackend means the response came from the backend service. + OriginBackend ResponseOrigin = iota + // OriginNoRoute means the proxy had no matching host or path. + OriginNoRoute + // OriginProxyError means the proxy failed to reach the backend. + OriginProxyError + // OriginAuth means the proxy intercepted the request for authentication. + OriginAuth +) + +func (o ResponseOrigin) String() string { + switch o { + case OriginNoRoute: + return "no_route" + case OriginProxyError: + return "proxy_error" + case OriginAuth: + return "auth" + default: + return "backend" + } +} + +// CapturedData is a mutable struct that allows downstream handlers +// to pass data back up the middleware chain. +type CapturedData struct { + mu sync.RWMutex + RequestID string + ServiceId string + AccountId types.AccountID + Origin ResponseOrigin + ClientIP string + UserID string + AuthMethod string +} + +// GetRequestID safely gets the request ID +func (c *CapturedData) GetRequestID() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.RequestID +} + +// SetServiceId safely sets the service ID +func (c *CapturedData) SetServiceId(serviceId string) { + c.mu.Lock() + defer c.mu.Unlock() + c.ServiceId = serviceId +} + +// GetServiceId safely gets the service ID +func (c *CapturedData) GetServiceId() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.ServiceId +} + +// SetAccountId safely sets the account ID +func (c *CapturedData) SetAccountId(accountId types.AccountID) { + c.mu.Lock() + defer c.mu.Unlock() + c.AccountId = accountId +} + +// GetAccountId safely gets the account ID +func (c *CapturedData) GetAccountId() types.AccountID { + c.mu.RLock() + defer c.mu.RUnlock() + return c.AccountId +} + +// SetOrigin safely sets the response origin +func (c *CapturedData) SetOrigin(origin ResponseOrigin) { + c.mu.Lock() + defer c.mu.Unlock() + c.Origin = origin +} + +// GetOrigin safely gets the response origin +func (c *CapturedData) GetOrigin() ResponseOrigin { + c.mu.RLock() + defer c.mu.RUnlock() + return c.Origin +} + +// SetClientIP safely sets the resolved client IP. +func (c *CapturedData) SetClientIP(ip string) { + c.mu.Lock() + defer c.mu.Unlock() + c.ClientIP = ip +} + +// GetClientIP safely gets the resolved client IP. +func (c *CapturedData) GetClientIP() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.ClientIP +} + +// SetUserID safely sets the authenticated user ID. +func (c *CapturedData) SetUserID(userID string) { + c.mu.Lock() + defer c.mu.Unlock() + c.UserID = userID +} + +// GetUserID safely gets the authenticated user ID. +func (c *CapturedData) GetUserID() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.UserID +} + +// SetAuthMethod safely sets the authentication method used. +func (c *CapturedData) SetAuthMethod(method string) { + c.mu.Lock() + defer c.mu.Unlock() + c.AuthMethod = method +} + +// GetAuthMethod safely gets the authentication method used. +func (c *CapturedData) GetAuthMethod() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.AuthMethod +} + +// WithCapturedData adds a CapturedData struct to the context +func WithCapturedData(ctx context.Context, data *CapturedData) context.Context { + return context.WithValue(ctx, capturedDataKey, data) +} + +// CapturedDataFromContext retrieves the CapturedData from context +func CapturedDataFromContext(ctx context.Context) *CapturedData { + v := ctx.Value(capturedDataKey) + data, ok := v.(*CapturedData) + if !ok { + return nil + } + return data +} + +func withServiceId(ctx context.Context, serviceId string) context.Context { + return context.WithValue(ctx, serviceIdKey, serviceId) +} + +func ServiceIdFromContext(ctx context.Context) string { + v := ctx.Value(serviceIdKey) + serviceId, ok := v.(string) + if !ok { + return "" + } + return serviceId +} +func withAccountId(ctx context.Context, accountId types.AccountID) context.Context { + return context.WithValue(ctx, accountIdKey, accountId) +} + +func AccountIdFromContext(ctx context.Context) types.AccountID { + v := ctx.Value(accountIdKey) + accountId, ok := v.(types.AccountID) + if !ok { + return "" + } + return accountId +} diff --git a/proxy/internal/proxy/proxy_bench_test.go b/proxy/internal/proxy/proxy_bench_test.go new file mode 100644 index 000000000..b7526e26b --- /dev/null +++ b/proxy/internal/proxy/proxy_bench_test.go @@ -0,0 +1,130 @@ +package proxy_test + +import ( + "crypto/rand" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/types" +) + +type nopTransport struct{} + +func (nopTransport) RoundTrip(*http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: http.NoBody, + }, nil +} + +func BenchmarkServeHTTP(b *testing.B) { + rp := proxy.NewReverseProxy(nopTransport{}, "http", nil, nil) + rp.AddMapping(proxy.Mapping{ + ID: rand.Text(), + AccountID: types.AccountID(rand.Text()), + Host: "app.example.com", + Paths: map[string]*url.URL{ + "/": { + Scheme: "http", + Host: "10.0.0.1:8080", + }, + }, + }) + + req := httptest.NewRequest(http.MethodGet, "http://app.example.com", nil) + req.Host = "app.example.com" + req.RemoteAddr = "203.0.113.50:12345" + + for b.Loop() { + rp.ServeHTTP(httptest.NewRecorder(), req) + } +} + +func BenchmarkServeHTTPHostCount(b *testing.B) { + hostCounts := []int{1, 10, 100, 1_000, 10_000} + + for _, hostCount := range hostCounts { + b.Run(fmt.Sprintf("hosts=%d", hostCount), func(b *testing.B) { + rp := proxy.NewReverseProxy(nopTransport{}, "http", nil, nil) + + var target string + targetIndex, err := rand.Int(rand.Reader, big.NewInt(int64(hostCount))) + if err != nil { + b.Fatal(err) + } + for i := range hostCount { + id := rand.Text() + host := fmt.Sprintf("%s.example.com", id) + if int64(i) == targetIndex.Int64() { + target = id + } + rp.AddMapping(proxy.Mapping{ + ID: id, + AccountID: types.AccountID(rand.Text()), + Host: host, + Paths: map[string]*url.URL{ + "/": { + Scheme: "http", + Host: "10.0.0.1:8080", + }, + }, + }) + } + + req := httptest.NewRequest(http.MethodGet, "http://"+target+"/", nil) + req.Host = target + req.RemoteAddr = "203.0.113.50:12345" + + for b.Loop() { + rp.ServeHTTP(httptest.NewRecorder(), req) + } + }) + } +} + +func BenchmarkServeHTTPPathCount(b *testing.B) { + pathCounts := []int{1, 5, 10, 25, 50} + + for _, pathCount := range pathCounts { + b.Run(fmt.Sprintf("paths=%d", pathCount), func(b *testing.B) { + rp := proxy.NewReverseProxy(nopTransport{}, "http", nil, nil) + + var target string + targetIndex, err := rand.Int(rand.Reader, big.NewInt(int64(pathCount))) + if err != nil { + b.Fatal(err) + } + + paths := make(map[string]*url.URL, pathCount) + for i := range pathCount { + path := "/" + rand.Text() + if int64(i) == targetIndex.Int64() { + target = path + } + paths[path] = &url.URL{ + Scheme: "http", + Host: "10.0.0.1:" + fmt.Sprintf("%d", 8080+i), + } + } + rp.AddMapping(proxy.Mapping{ + ID: rand.Text(), + AccountID: types.AccountID(rand.Text()), + Host: "app.example.com", + Paths: paths, + }) + + req := httptest.NewRequest(http.MethodGet, "http://app.example.com"+target, nil) + req.Host = "app.example.com" + req.RemoteAddr = "203.0.113.50:12345" + + for b.Loop() { + rp.ServeHTTP(httptest.NewRecorder(), req) + } + }) + } +} diff --git a/proxy/internal/proxy/reverseproxy.go b/proxy/internal/proxy/reverseproxy.go new file mode 100644 index 000000000..16607689a --- /dev/null +++ b/proxy/internal/proxy/reverseproxy.go @@ -0,0 +1,406 @@ +package proxy + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/netip" + "net/url" + "strings" + "sync" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/roundtrip" + "github.com/netbirdio/netbird/proxy/web" +) + +type ReverseProxy struct { + transport http.RoundTripper + // forwardedProto overrides the X-Forwarded-Proto header value. + // Valid values: "auto" (detect from TLS), "http", "https". + forwardedProto string + // trustedProxies is a list of IP prefixes for trusted upstream proxies. + // When the direct connection comes from a trusted proxy, forwarding + // headers are preserved and appended to instead of being stripped. + trustedProxies []netip.Prefix + mappingsMux sync.RWMutex + mappings map[string]Mapping + logger *log.Logger +} + +// NewReverseProxy configures a new NetBird ReverseProxy. +// This is a wrapper around an httputil.ReverseProxy set +// to dynamically route requests based on internal mapping +// between requested URLs and targets. +// The internal mappings can be modified using the AddMapping +// and RemoveMapping functions. +func NewReverseProxy(transport http.RoundTripper, forwardedProto string, trustedProxies []netip.Prefix, logger *log.Logger) *ReverseProxy { + if logger == nil { + logger = log.StandardLogger() + } + return &ReverseProxy{ + transport: transport, + forwardedProto: forwardedProto, + trustedProxies: trustedProxies, + mappings: make(map[string]Mapping), + logger: logger, + } +} + +func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + result, exists := p.findTargetForRequest(r) + if !exists { + if cd := CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(OriginNoRoute) + } + requestID := getRequestID(r) + web.ServeErrorPage(w, r, http.StatusNotFound, "Service Not Found", + "The requested service could not be found. Please check the URL, try refreshing, or check if the peer is running. If that doesn't work, see our documentation for help.", + requestID, web.ErrorStatus{Proxy: true, Destination: false}) + return + } + + // Set the serviceId in the context for later retrieval. + ctx := withServiceId(r.Context(), result.serviceID) + // Set the accountId in the context for later retrieval (for middleware). + ctx = withAccountId(ctx, result.accountID) + // Set the accountId in the context for the roundtripper to use. + ctx = roundtrip.WithAccountID(ctx, result.accountID) + + // Also populate captured data if it exists (allows middleware to read after handler completes). + // This solves the problem of passing data UP the middleware chain: we put a mutable struct + // pointer in the context, and mutate the struct here so outer middleware can read it. + if capturedData := CapturedDataFromContext(ctx); capturedData != nil { + capturedData.SetServiceId(result.serviceID) + capturedData.SetAccountId(result.accountID) + } + + rp := &httputil.ReverseProxy{ + Rewrite: p.rewriteFunc(result.url, result.matchedPath, result.passHostHeader), + Transport: p.transport, + ErrorHandler: proxyErrorHandler, + } + if result.rewriteRedirects { + rp.ModifyResponse = p.rewriteLocationFunc(result.url, result.matchedPath, r) //nolint:bodyclose + } + rp.ServeHTTP(w, r.WithContext(ctx)) +} + +// rewriteFunc returns a Rewrite function for httputil.ReverseProxy that rewrites +// inbound requests to target the backend service while setting security-relevant +// forwarding headers and stripping proxy authentication credentials. +// When passHostHeader is true, the original client Host header is preserved +// instead of being rewritten to the backend's address. +func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHostHeader bool) func(r *httputil.ProxyRequest) { + return func(r *httputil.ProxyRequest) { + // Strip the matched path prefix from the incoming request path before + // SetURL joins it with the target's base path, avoiding path duplication. + if matchedPath != "" && matchedPath != "/" { + r.Out.URL.Path = strings.TrimPrefix(r.Out.URL.Path, matchedPath) + if r.Out.URL.Path == "" { + r.Out.URL.Path = "/" + } + r.Out.URL.RawPath = "" + } + + r.SetURL(target) + if passHostHeader { + r.Out.Host = r.In.Host + } else { + r.Out.Host = target.Host + } + + clientIP := extractClientIP(r.In.RemoteAddr) + + if IsTrustedProxy(clientIP, p.trustedProxies) { + p.setTrustedForwardingHeaders(r, clientIP) + } else { + p.setUntrustedForwardingHeaders(r, clientIP) + } + + stripSessionCookie(r) + stripSessionTokenQuery(r) + } +} + +// rewriteLocationFunc returns a ModifyResponse function that rewrites Location +// headers in backend responses when they point to the backend's address, +// replacing them with the public-facing host and scheme. +func (p *ReverseProxy) rewriteLocationFunc(target *url.URL, matchedPath string, inReq *http.Request) func(*http.Response) error { + publicHost := inReq.Host + publicScheme := auth.ResolveProto(p.forwardedProto, inReq.TLS) + + return func(resp *http.Response) error { + location := resp.Header.Get("Location") + if location == "" { + return nil + } + + locURL, err := url.Parse(location) + if err != nil { + return fmt.Errorf("parse Location header %q: %w", location, err) + } + + // Only rewrite absolute URLs that point to the backend. + if locURL.Host == "" || !hostsEqual(locURL, target) { + return nil + } + + locURL.Host = publicHost + locURL.Scheme = publicScheme + + // Re-add the stripped path prefix so the client reaches the correct route. + // TrimRight prevents double slashes when matchedPath has a trailing slash. + if matchedPath != "" && matchedPath != "/" { + locURL.Path = strings.TrimRight(matchedPath, "/") + "/" + strings.TrimLeft(locURL.Path, "/") + } + + resp.Header.Set("Location", locURL.String()) + return nil + } +} + +// hostsEqual compares two URL authorities, normalizing default ports per +// RFC 3986 Section 6.2.3 (https://443 == https, http://80 == http). +func hostsEqual(a, b *url.URL) bool { + return normalizeHost(a) == normalizeHost(b) +} + +// normalizeHost strips the port from a URL's Host field if it matches the +// scheme's default port (443 for https, 80 for http). +func normalizeHost(u *url.URL) string { + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + return u.Host + } + if (u.Scheme == "https" && port == "443") || (u.Scheme == "http" && port == "80") { + return host + } + return u.Host +} + +// setTrustedForwardingHeaders appends to the existing forwarding header chain +// and preserves upstream-provided headers when the direct connection is from +// a trusted proxy. +func (p *ReverseProxy) setTrustedForwardingHeaders(r *httputil.ProxyRequest, clientIP string) { + // Append the direct connection IP to the existing X-Forwarded-For chain. + if existing := r.In.Header.Get("X-Forwarded-For"); existing != "" { + r.Out.Header.Set("X-Forwarded-For", existing+", "+clientIP) + } else { + r.Out.Header.Set("X-Forwarded-For", clientIP) + } + + // Preserve upstream X-Real-IP if present; otherwise resolve through the chain. + if realIP := r.In.Header.Get("X-Real-IP"); realIP != "" { + r.Out.Header.Set("X-Real-IP", realIP) + } else { + resolved := ResolveClientIP(r.In.RemoteAddr, r.In.Header.Get("X-Forwarded-For"), p.trustedProxies) + r.Out.Header.Set("X-Real-IP", resolved) + } + + // Preserve upstream X-Forwarded-Host if present. + if fwdHost := r.In.Header.Get("X-Forwarded-Host"); fwdHost != "" { + r.Out.Header.Set("X-Forwarded-Host", fwdHost) + } else { + r.Out.Header.Set("X-Forwarded-Host", r.In.Host) + } + + // Trust upstream X-Forwarded-Proto; fall back to local resolution. + if fwdProto := r.In.Header.Get("X-Forwarded-Proto"); fwdProto != "" { + r.Out.Header.Set("X-Forwarded-Proto", fwdProto) + } else { + r.Out.Header.Set("X-Forwarded-Proto", auth.ResolveProto(p.forwardedProto, r.In.TLS)) + } + + // Trust upstream X-Forwarded-Port; fall back to local computation. + if fwdPort := r.In.Header.Get("X-Forwarded-Port"); fwdPort != "" { + r.Out.Header.Set("X-Forwarded-Port", fwdPort) + } else { + resolvedProto := r.Out.Header.Get("X-Forwarded-Proto") + r.Out.Header.Set("X-Forwarded-Port", extractForwardedPort(r.In.Host, resolvedProto)) + } +} + +// setUntrustedForwardingHeaders strips all incoming forwarding headers and +// sets them fresh based on the direct connection. This is the default +// behavior when no trusted proxies are configured or the direct connection +// is from an untrusted source. +func (p *ReverseProxy) setUntrustedForwardingHeaders(r *httputil.ProxyRequest, clientIP string) { + proto := auth.ResolveProto(p.forwardedProto, r.In.TLS) + r.Out.Header.Set("X-Forwarded-For", clientIP) + r.Out.Header.Set("X-Real-IP", clientIP) + r.Out.Header.Set("X-Forwarded-Host", r.In.Host) + r.Out.Header.Set("X-Forwarded-Proto", proto) + r.Out.Header.Set("X-Forwarded-Port", extractForwardedPort(r.In.Host, proto)) +} + +// stripSessionCookie removes the proxy's session cookie from the outgoing +// request while preserving all other cookies. +func stripSessionCookie(r *httputil.ProxyRequest) { + cookies := r.In.Cookies() + r.Out.Header.Del("Cookie") + for _, c := range cookies { + if c.Name != auth.SessionCookieName { + r.Out.AddCookie(c) + } + } +} + +// stripSessionTokenQuery removes the OIDC session_token query parameter from +// the outgoing URL to prevent credential leakage to backends. +func stripSessionTokenQuery(r *httputil.ProxyRequest) { + q := r.Out.URL.Query() + if q.Has("session_token") { + q.Del("session_token") + r.Out.URL.RawQuery = q.Encode() + } +} + +// extractClientIP extracts the IP address from an http.Request.RemoteAddr +// which is always in host:port format. +func extractClientIP(remoteAddr string) string { + ip, _, err := net.SplitHostPort(remoteAddr) + if err != nil { + return remoteAddr + } + return ip +} + +// extractForwardedPort returns the port from the Host header if present, +// otherwise defaults to the standard port for the resolved protocol. +func extractForwardedPort(host, resolvedProto string) string { + _, port, err := net.SplitHostPort(host) + if err == nil && port != "" { + return port + } + if resolvedProto == "https" { + return "443" + } + return "80" +} + +// proxyErrorHandler handles errors from the reverse proxy and serves +// user-friendly error pages instead of raw error responses. +func proxyErrorHandler(w http.ResponseWriter, r *http.Request, err error) { + if cd := CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(OriginProxyError) + } + requestID := getRequestID(r) + clientIP := getClientIP(r) + title, message, code, status := classifyProxyError(err) + + log.Warnf("proxy error: request_id=%s client_ip=%s method=%s host=%s path=%s status=%d title=%q err=%v", + requestID, clientIP, r.Method, r.Host, r.URL.Path, code, title, err) + + web.ServeErrorPage(w, r, code, title, message, requestID, status) +} + +// getClientIP retrieves the resolved client IP from context. +func getClientIP(r *http.Request) string { + if capturedData := CapturedDataFromContext(r.Context()); capturedData != nil { + return capturedData.GetClientIP() + } + return "" +} + +// getRequestID retrieves the request ID from context or returns empty string. +func getRequestID(r *http.Request) string { + if capturedData := CapturedDataFromContext(r.Context()); capturedData != nil { + return capturedData.GetRequestID() + } + return "" +} + +// classifyProxyError determines the appropriate error title, message, HTTP +// status code, and component status based on the error type. +func classifyProxyError(err error) (title, message string, code int, status web.ErrorStatus) { + switch { + case errors.Is(err, context.DeadlineExceeded), + isNetTimeout(err): + return "Request Timeout", + "The request timed out while trying to reach the service. Please refresh the page and try again.", + http.StatusGatewayTimeout, + web.ErrorStatus{Proxy: true, Destination: false} + + case errors.Is(err, context.Canceled): + return "Request Canceled", + "The request was canceled before it could be completed. Please refresh the page and try again.", + http.StatusBadGateway, + web.ErrorStatus{Proxy: true, Destination: false} + + case errors.Is(err, roundtrip.ErrNoAccountID): + return "Configuration Error", + "The request could not be processed due to a configuration issue. Please refresh the page and try again.", + http.StatusInternalServerError, + web.ErrorStatus{Proxy: false, Destination: false} + + case errors.Is(err, roundtrip.ErrNoPeerConnection), + errors.Is(err, roundtrip.ErrClientStartFailed): + return "Proxy Not Connected", + "The proxy is not connected to the NetBird network. Please try again later or contact your administrator.", + http.StatusBadGateway, + web.ErrorStatus{Proxy: false, Destination: false} + + case errors.Is(err, roundtrip.ErrTooManyInflight): + return "Service Overloaded", + "The service is currently handling too many requests. Please try again shortly.", + http.StatusServiceUnavailable, + web.ErrorStatus{Proxy: true, Destination: false} + + case isConnectionRefused(err): + return "Service Unavailable", + "The connection to the service was refused. Please verify that the service is running and try again.", + http.StatusBadGateway, + web.ErrorStatus{Proxy: true, Destination: false} + + case isHostUnreachable(err): + return "Peer Not Connected", + "The connection to the peer could not be established. Please ensure the peer is running and connected to the NetBird network.", + http.StatusBadGateway, + web.ErrorStatus{Proxy: true, Destination: false} + } + + return "Connection Error", + "An unexpected error occurred while connecting to the service. Please try again later.", + http.StatusBadGateway, + web.ErrorStatus{Proxy: true, Destination: false} +} + +// isConnectionRefused checks for connection refused errors by inspecting +// the inner error of a *net.OpError. This handles both standard net errors +// (where the inner error is a *os.SyscallError with "connection refused") +// and gVisor netstack errors ("connection was refused"). +func isConnectionRefused(err error) bool { + return opErrorContains(err, "refused") +} + +// isHostUnreachable checks for host/network unreachable errors by inspecting +// the inner error of a *net.OpError. Covers standard net ("no route to host", +// "network is unreachable") and gVisor ("host is unreachable", etc.). +func isHostUnreachable(err error) bool { + return opErrorContains(err, "unreachable") || opErrorContains(err, "no route to host") +} + +// isNetTimeout checks whether the error is a network timeout using the +// net.Error interface. +func isNetTimeout(err error) bool { + var netErr net.Error + return errors.As(err, &netErr) && netErr.Timeout() +} + +// opErrorContains extracts the inner error from a *net.OpError and checks +// whether its message contains the given substring. This handles gVisor +// netstack errors which wrap tcpip errors as plain strings rather than +// syscall.Errno values. +func opErrorContains(err error, substr string) bool { + var opErr *net.OpError + if errors.As(err, &opErr) && opErr.Err != nil { + return strings.Contains(opErr.Err.Error(), substr) + } + return false +} diff --git a/proxy/internal/proxy/reverseproxy_test.go b/proxy/internal/proxy/reverseproxy_test.go new file mode 100644 index 000000000..f7f231db4 --- /dev/null +++ b/proxy/internal/proxy/reverseproxy_test.go @@ -0,0 +1,966 @@ +package proxy + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/netip" + "net/url" + "os" + "syscall" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/roundtrip" + "github.com/netbirdio/netbird/proxy/web" +) + +func TestRewriteFunc_HostRewriting(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080") + p := &ReverseProxy{forwardedProto: "auto"} + + t.Run("rewrites host to backend by default", func(t *testing.T) { + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "https://public.example.com/path", "203.0.113.1:12345") + + rewrite(pr) + + assert.Equal(t, "backend.internal:8080", pr.Out.Host) + }) + + t.Run("preserves original host when passHostHeader is true", func(t *testing.T) { + rewrite := p.rewriteFunc(target, "", true) + pr := newProxyRequest(t, "https://public.example.com/path", "203.0.113.1:12345") + + rewrite(pr) + + assert.Equal(t, "public.example.com", pr.Out.Host, + "Host header should be the original client host") + assert.Equal(t, "backend.internal:8080", pr.Out.URL.Host, + "URL host (used for TLS/SNI) must still point to the backend") + }) +} + +func TestRewriteFunc_XForwardedForStripping(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080") + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + + t.Run("sets X-Forwarded-For from direct connection IP", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") + + rewrite(pr) + + assert.Equal(t, "203.0.113.50", pr.Out.Header.Get("X-Forwarded-For"), + "should be set to the connecting client IP") + }) + + t.Run("strips spoofed X-Forwarded-For from client", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") + pr.In.Header.Set("X-Forwarded-For", "10.0.0.1, 172.16.0.1") + + rewrite(pr) + + assert.Equal(t, "203.0.113.50", pr.Out.Header.Get("X-Forwarded-For"), + "spoofed XFF must be replaced, not appended to") + }) + + t.Run("strips spoofed X-Real-IP from client", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") + pr.In.Header.Set("X-Real-IP", "10.0.0.1") + + rewrite(pr) + + assert.Equal(t, "203.0.113.50", pr.Out.Header.Get("X-Real-IP"), + "spoofed X-Real-IP must be replaced") + }) +} + +func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080") + + t.Run("sets X-Forwarded-Host to original host", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "http://myapp.example.com:8443/path", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "myapp.example.com:8443", pr.Out.Header.Get("X-Forwarded-Host")) + }) + + t.Run("sets X-Forwarded-Port from explicit host port", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "http://example.com:8443/path", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "8443", pr.Out.Header.Get("X-Forwarded-Port")) + }) + + t.Run("defaults X-Forwarded-Port to 443 for https", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") + pr.In.TLS = &tls.ConnectionState{} + + rewrite(pr) + + assert.Equal(t, "443", pr.Out.Header.Get("X-Forwarded-Port")) + }) + + t.Run("defaults X-Forwarded-Port to 80 for http", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "80", pr.Out.Header.Get("X-Forwarded-Port")) + }) + + t.Run("auto detects https from TLS", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") + pr.In.TLS = &tls.ConnectionState{} + + rewrite(pr) + + assert.Equal(t, "https", pr.Out.Header.Get("X-Forwarded-Proto")) + }) + + t.Run("auto detects http without TLS", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "http", pr.Out.Header.Get("X-Forwarded-Proto")) + }) + + t.Run("forced proto overrides TLS detection", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "https"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + // No TLS, but forced to https + + rewrite(pr) + + assert.Equal(t, "https", pr.Out.Header.Get("X-Forwarded-Proto")) + }) + + t.Run("forced http proto", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "http"} + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") + pr.In.TLS = &tls.ConnectionState{} + + rewrite(pr) + + assert.Equal(t, "http", pr.Out.Header.Get("X-Forwarded-Proto")) + }) +} + +func TestRewriteFunc_SessionCookieStripping(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080") + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + + t.Run("strips nb_session cookie", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + pr.In.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: "jwt-token-here"}) + + rewrite(pr) + + cookies := pr.Out.Cookies() + for _, c := range cookies { + assert.NotEqual(t, auth.SessionCookieName, c.Name, + "proxy session cookie must not be forwarded to backend") + } + }) + + t.Run("preserves other cookies", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + pr.In.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: "jwt-token"}) + pr.In.AddCookie(&http.Cookie{Name: "app_session", Value: "app-value"}) + pr.In.AddCookie(&http.Cookie{Name: "tracking", Value: "track-value"}) + + rewrite(pr) + + cookies := pr.Out.Cookies() + cookieNames := make([]string, 0, len(cookies)) + for _, c := range cookies { + cookieNames = append(cookieNames, c.Name) + } + assert.Contains(t, cookieNames, "app_session", "non-proxy cookies should be preserved") + assert.Contains(t, cookieNames, "tracking", "non-proxy cookies should be preserved") + assert.NotContains(t, cookieNames, auth.SessionCookieName, "proxy cookie must be stripped") + }) + + t.Run("handles request with no cookies", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + + rewrite(pr) + + assert.Empty(t, pr.Out.Header.Get("Cookie")) + }) +} + +func TestRewriteFunc_SessionTokenQueryStripping(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080") + p := &ReverseProxy{forwardedProto: "auto"} + rewrite := p.rewriteFunc(target, "", false) + + t.Run("strips session_token query parameter", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/callback?session_token=secret123&other=keep", "1.2.3.4:5000") + + rewrite(pr) + + assert.Empty(t, pr.Out.URL.Query().Get("session_token"), + "OIDC session token must be stripped from backend request") + assert.Equal(t, "keep", pr.Out.URL.Query().Get("other"), + "other query parameters must be preserved") + }) + + t.Run("preserves query when no session_token present", func(t *testing.T) { + pr := newProxyRequest(t, "http://example.com/api?foo=bar&baz=qux", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "bar", pr.Out.URL.Query().Get("foo")) + assert.Equal(t, "qux", pr.Out.URL.Query().Get("baz")) + }) +} + +func TestRewriteFunc_URLRewriting(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + + t.Run("rewrites URL to target with path prefix", func(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080/app") + rewrite := p.rewriteFunc(target, "", false) + pr := newProxyRequest(t, "http://example.com/somepath", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "http", pr.Out.URL.Scheme) + assert.Equal(t, "backend.internal:8080", pr.Out.URL.Host) + assert.Equal(t, "/app/somepath", pr.Out.URL.Path, + "SetURL should join the target base path with the request path") + }) + + t.Run("strips matched path prefix to avoid duplication", func(t *testing.T) { + target, _ := url.Parse("https://backend.example.org:443/app") + rewrite := p.rewriteFunc(target, "/app", false) + pr := newProxyRequest(t, "http://example.com/app", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "https", pr.Out.URL.Scheme) + assert.Equal(t, "backend.example.org:443", pr.Out.URL.Host) + assert.Equal(t, "/app/", pr.Out.URL.Path, + "matched path prefix should be stripped before joining with target path") + }) + + t.Run("strips matched prefix and preserves subpath", func(t *testing.T) { + target, _ := url.Parse("https://backend.example.org:443/app") + rewrite := p.rewriteFunc(target, "/app", false) + pr := newProxyRequest(t, "http://example.com/app/article/123", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/app/article/123", pr.Out.URL.Path, + "subpath after matched prefix should be preserved") + }) +} + +func TestExtractClientIP(t *testing.T) { + tests := []struct { + name string + remoteAddr string + expected string + }{ + {"IPv4 with port", "192.168.1.1:12345", "192.168.1.1"}, + {"IPv6 with port", "[::1]:12345", "::1"}, + {"IPv6 full with port", "[2001:db8::1]:443", "2001:db8::1"}, + {"IPv4 without port fallback", "192.168.1.1", "192.168.1.1"}, + {"IPv6 without brackets fallback", "::1", "::1"}, + {"empty string fallback", "", ""}, + {"public IP", "203.0.113.50:9999", "203.0.113.50"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, extractClientIP(tt.remoteAddr)) + }) + } +} + +func TestExtractForwardedPort(t *testing.T) { + tests := []struct { + name string + host string + resolvedProto string + expected string + }{ + {"explicit port in host", "example.com:8443", "https", "8443"}, + {"explicit port overrides proto default", "example.com:9090", "http", "9090"}, + {"no port defaults to 443 for https", "example.com", "https", "443"}, + {"no port defaults to 80 for http", "example.com", "http", "80"}, + {"IPv6 host with port", "[::1]:8080", "http", "8080"}, + {"IPv6 host without port", "::1", "https", "443"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, extractForwardedPort(tt.host, tt.resolvedProto)) + }) + } +} + +func TestRewriteFunc_TrustedProxy(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080") + trusted := []netip.Prefix{netip.MustParsePrefix("10.0.0.0/8")} + + t.Run("appends to X-Forwarded-For", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") + + rewrite(pr) + + assert.Equal(t, "203.0.113.50, 10.0.0.1", pr.Out.Header.Get("X-Forwarded-For")) + }) + + t.Run("preserves upstream X-Real-IP", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") + pr.In.Header.Set("X-Real-IP", "203.0.113.50") + + rewrite(pr) + + assert.Equal(t, "203.0.113.50", pr.Out.Header.Get("X-Real-IP")) + }) + + t.Run("resolves X-Real-IP from XFF when not set by upstream", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + pr.In.Header.Set("X-Forwarded-For", "203.0.113.50, 10.0.0.2") + + rewrite(pr) + + assert.Equal(t, "203.0.113.50", pr.Out.Header.Get("X-Real-IP"), + "should resolve real client through trusted chain") + }) + + t.Run("preserves upstream X-Forwarded-Host", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://proxy.internal/", "10.0.0.1:5000") + pr.In.Header.Set("X-Forwarded-Host", "original.example.com") + + rewrite(pr) + + assert.Equal(t, "original.example.com", pr.Out.Header.Get("X-Forwarded-Host")) + }) + + t.Run("preserves upstream X-Forwarded-Proto", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + pr.In.Header.Set("X-Forwarded-Proto", "https") + + rewrite(pr) + + assert.Equal(t, "https", pr.Out.Header.Get("X-Forwarded-Proto")) + }) + + t.Run("preserves upstream X-Forwarded-Port", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + pr.In.Header.Set("X-Forwarded-Port", "8443") + + rewrite(pr) + + assert.Equal(t, "8443", pr.Out.Header.Get("X-Forwarded-Port")) + }) + + t.Run("falls back to local proto when upstream does not set it", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "https", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + + rewrite(pr) + + assert.Equal(t, "https", pr.Out.Header.Get("X-Forwarded-Proto"), + "should use configured forwardedProto as fallback") + }) + + t.Run("sets X-Forwarded-Host from request when upstream does not set it", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + + rewrite(pr) + + assert.Equal(t, "example.com", pr.Out.Header.Get("X-Forwarded-Host")) + }) + + t.Run("untrusted RemoteAddr strips headers even with trusted list", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") + pr.In.Header.Set("X-Forwarded-For", "10.0.0.1, 172.16.0.1") + pr.In.Header.Set("X-Real-IP", "evil") + pr.In.Header.Set("X-Forwarded-Host", "evil.example.com") + pr.In.Header.Set("X-Forwarded-Proto", "https") + pr.In.Header.Set("X-Forwarded-Port", "9999") + + rewrite(pr) + + assert.Equal(t, "203.0.113.50", pr.Out.Header.Get("X-Forwarded-For"), + "untrusted: XFF must be replaced") + assert.Equal(t, "203.0.113.50", pr.Out.Header.Get("X-Real-IP"), + "untrusted: X-Real-IP must be replaced") + assert.Equal(t, "example.com", pr.Out.Header.Get("X-Forwarded-Host"), + "untrusted: host must be from direct connection") + assert.Equal(t, "http", pr.Out.Header.Get("X-Forwarded-Proto"), + "untrusted: proto must be locally resolved") + assert.Equal(t, "80", pr.Out.Header.Get("X-Forwarded-Port"), + "untrusted: port must be locally computed") + }) + + t.Run("empty trusted list behaves as untrusted", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: nil} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") + + rewrite(pr) + + assert.Equal(t, "10.0.0.1", pr.Out.Header.Get("X-Forwarded-For"), + "nil trusted list: should strip and use RemoteAddr") + }) + + t.Run("XFF starts fresh when trusted proxy has no upstream XFF", func(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} + rewrite := p.rewriteFunc(target, "", false) + + pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") + + rewrite(pr) + + assert.Equal(t, "10.0.0.1", pr.Out.Header.Get("X-Forwarded-For"), + "no upstream XFF: should set direct connection IP") + }) +} + +// TestRewriteFunc_PathForwarding verifies what path the backend actually +// receives given different configurations. This simulates the full pipeline: +// management builds a target URL (with matching prefix baked into the path), +// then the proxy strips the prefix and SetURL re-joins with the target path. +func TestRewriteFunc_PathForwarding(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + + // Simulate what ToProtoMapping does: target URL includes the matching + // prefix as its path component, so the proxy strips-then-re-adds. + t.Run("path prefix baked into target URL is a no-op", func(t *testing.T) { + // Management builds: path="/heise", target="https://heise.de:443/heise" + target, _ := url.Parse("https://heise.de:443/heise") + rewrite := p.rewriteFunc(target, "/heise", false) + pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/heise/", pr.Out.URL.Path, + "backend sees /heise/ because prefix is stripped then re-added by SetURL") + }) + + t.Run("subpath under prefix also preserved", func(t *testing.T) { + target, _ := url.Parse("https://heise.de:443/heise") + rewrite := p.rewriteFunc(target, "/heise", false) + pr := newProxyRequest(t, "http://external.test/heise/article/123", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/heise/article/123", pr.Out.URL.Path, + "subpath is preserved on top of the re-added prefix") + }) + + // What the behavior WOULD be if target URL had no path (true stripping) + t.Run("target without path prefix gives true stripping", func(t *testing.T) { + target, _ := url.Parse("https://heise.de:443") + rewrite := p.rewriteFunc(target, "/heise", false) + pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/", pr.Out.URL.Path, + "without path in target URL, backend sees / (true prefix stripping)") + }) + + t.Run("target without path prefix strips and preserves subpath", func(t *testing.T) { + target, _ := url.Parse("https://heise.de:443") + rewrite := p.rewriteFunc(target, "/heise", false) + pr := newProxyRequest(t, "http://external.test/heise/article/123", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/article/123", pr.Out.URL.Path, + "without path in target URL, prefix is truly stripped") + }) + + // Root path "/" — no stripping expected + t.Run("root path forwards full request path unchanged", func(t *testing.T) { + target, _ := url.Parse("https://backend.example.com:443/") + rewrite := p.rewriteFunc(target, "/", false) + pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/heise", pr.Out.URL.Path, + "root path match must not strip anything") + }) +} + +func TestRewriteLocationFunc(t *testing.T) { + target, _ := url.Parse("http://backend.internal:8080") + newProxy := func(proto string) *ReverseProxy { return &ReverseProxy{forwardedProto: proto} } + newReq := func(rawURL string) *http.Request { + t.Helper() + r := httptest.NewRequest(http.MethodGet, rawURL, nil) + parsed, _ := url.Parse(rawURL) + r.Host = parsed.Host + return r + } + run := func(p *ReverseProxy, matchedPath string, inReq *http.Request, location string) (*http.Response, error) { + t.Helper() + modifyResp := p.rewriteLocationFunc(target, matchedPath, inReq) //nolint:bodyclose + resp := &http.Response{Header: http.Header{}} + if location != "" { + resp.Header.Set("Location", location) + } + err := modifyResp(resp) + return resp, err + } + + t.Run("rewrites Location pointing to backend", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/page"), //nolint:bodyclose + "http://backend.internal:8080/login") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/login", resp.Header.Get("Location")) + }) + + t.Run("does not rewrite Location pointing to other host", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "https://other.example.com/path") + + require.NoError(t, err) + assert.Equal(t, "https://other.example.com/path", resp.Header.Get("Location")) + }) + + t.Run("does not rewrite relative Location", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "/dashboard") + + require.NoError(t, err) + assert.Equal(t, "/dashboard", resp.Header.Get("Location")) + }) + + t.Run("re-adds stripped path prefix", func(t *testing.T) { + resp, err := run(newProxy("https"), "/api", newReq("https://public.example.com/api/users"), //nolint:bodyclose + "http://backend.internal:8080/users") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/api/users", resp.Header.Get("Location")) + }) + + t.Run("uses resolved proto for scheme", func(t *testing.T) { + resp, err := run(newProxy("auto"), "", newReq("http://public.example.com/"), //nolint:bodyclose + "http://backend.internal:8080/path") + + require.NoError(t, err) + assert.Equal(t, "http://public.example.com/path", resp.Header.Get("Location")) + }) + + t.Run("no-op when Location header is empty", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), "") //nolint:bodyclose + + require.NoError(t, err) + assert.Empty(t, resp.Header.Get("Location")) + }) + + t.Run("does not prepend root path prefix", func(t *testing.T) { + resp, err := run(newProxy("https"), "/", newReq("https://public.example.com/login"), //nolint:bodyclose + "http://backend.internal:8080/login") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/login", resp.Header.Get("Location")) + }) + + // --- Edge cases: query parameters and fragments --- + + t.Run("preserves query parameters", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "http://backend.internal:8080/login?redirect=%2Fdashboard&lang=en") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/login?redirect=%2Fdashboard&lang=en", resp.Header.Get("Location")) + }) + + t.Run("preserves fragment", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "http://backend.internal:8080/docs#section-2") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/docs#section-2", resp.Header.Get("Location")) + }) + + t.Run("preserves query parameters and fragment together", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "http://backend.internal:8080/search?q=test&page=1#results") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/search?q=test&page=1#results", resp.Header.Get("Location")) + }) + + t.Run("preserves query parameters with path prefix re-added", func(t *testing.T) { + resp, err := run(newProxy("https"), "/api", newReq("https://public.example.com/api/search"), //nolint:bodyclose + "http://backend.internal:8080/search?q=hello") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/api/search?q=hello", resp.Header.Get("Location")) + }) + + // --- Edge cases: slash handling --- + + t.Run("no double slash when matchedPath has trailing slash", func(t *testing.T) { + resp, err := run(newProxy("https"), "/api/", newReq("https://public.example.com/api/users"), //nolint:bodyclose + "http://backend.internal:8080/users") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/api/users", resp.Header.Get("Location")) + }) + + t.Run("backend redirect to root with path prefix", func(t *testing.T) { + resp, err := run(newProxy("https"), "/app", newReq("https://public.example.com/app/"), //nolint:bodyclose + "http://backend.internal:8080/") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/app/", resp.Header.Get("Location")) + }) + + t.Run("backend redirect to root with trailing-slash path prefix", func(t *testing.T) { + resp, err := run(newProxy("https"), "/app/", newReq("https://public.example.com/app/"), //nolint:bodyclose + "http://backend.internal:8080/") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/app/", resp.Header.Get("Location")) + }) + + t.Run("preserves trailing slash on redirect path", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "http://backend.internal:8080/path/") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/path/", resp.Header.Get("Location")) + }) + + t.Run("backend redirect to bare root", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/page"), //nolint:bodyclose + "http://backend.internal:8080/") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/", resp.Header.Get("Location")) + }) + + // --- Edge cases: host/port matching --- + + t.Run("does not rewrite when backend host matches but port differs", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "http://backend.internal:9090/other") + + require.NoError(t, err) + assert.Equal(t, "http://backend.internal:9090/other", resp.Header.Get("Location"), + "Different port means different host authority, must not rewrite") + }) + + t.Run("rewrites when redirect omits default port matching target", func(t *testing.T) { + // Target is backend.internal:8080, redirect is to backend.internal (no port). + // These are different authorities, so should NOT rewrite. + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "http://backend.internal/path") + + require.NoError(t, err) + assert.Equal(t, "http://backend.internal/path", resp.Header.Get("Location"), + "backend.internal != backend.internal:8080, must not rewrite") + }) + + t.Run("rewrites when target has :443 but redirect omits it for https", func(t *testing.T) { + // Target: heise.de:443, redirect: https://heise.de/path (no :443 because it's default) + // Per RFC 3986, these are the same authority. + target443, _ := url.Parse("https://heise.de:443") + p := newProxy("https") + modifyResp := p.rewriteLocationFunc(target443, "", newReq("https://public.example.com/")) //nolint:bodyclose + resp := &http.Response{Header: http.Header{}} + resp.Header.Set("Location", "https://heise.de/path") + + err := modifyResp(resp) + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/path", resp.Header.Get("Location"), + "heise.de:443 and heise.de are the same for https") + }) + + t.Run("rewrites when target has :80 but redirect omits it for http", func(t *testing.T) { + target80, _ := url.Parse("http://backend.local:80") + p := newProxy("http") + modifyResp := p.rewriteLocationFunc(target80, "", newReq("http://public.example.com/")) //nolint:bodyclose + resp := &http.Response{Header: http.Header{}} + resp.Header.Set("Location", "http://backend.local/path") + + err := modifyResp(resp) + + require.NoError(t, err) + assert.Equal(t, "http://public.example.com/path", resp.Header.Get("Location"), + "backend.local:80 and backend.local are the same for http") + }) + + t.Run("rewrites when redirect has :443 but target omits it", func(t *testing.T) { + targetNoPort, _ := url.Parse("https://heise.de") + p := newProxy("https") + modifyResp := p.rewriteLocationFunc(targetNoPort, "", newReq("https://public.example.com/")) //nolint:bodyclose + resp := &http.Response{Header: http.Header{}} + resp.Header.Set("Location", "https://heise.de:443/path") + + err := modifyResp(resp) + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/path", resp.Header.Get("Location"), + "heise.de and heise.de:443 are the same for https") + }) + + t.Run("does not conflate non-default ports", func(t *testing.T) { + target8443, _ := url.Parse("https://backend.internal:8443") + p := newProxy("https") + modifyResp := p.rewriteLocationFunc(target8443, "", newReq("https://public.example.com/")) //nolint:bodyclose + resp := &http.Response{Header: http.Header{}} + resp.Header.Set("Location", "https://backend.internal/path") + + err := modifyResp(resp) + + require.NoError(t, err) + assert.Equal(t, "https://backend.internal/path", resp.Header.Get("Location"), + "backend.internal:8443 != backend.internal (port 443), must not rewrite") + }) + + // --- Edge cases: encoded paths --- + + t.Run("preserves percent-encoded path segments", func(t *testing.T) { + resp, err := run(newProxy("https"), "", newReq("https://public.example.com/"), //nolint:bodyclose + "http://backend.internal:8080/path%20with%20spaces/file%2Fname") + + require.NoError(t, err) + loc := resp.Header.Get("Location") + assert.Contains(t, loc, "public.example.com") + parsed, err := url.Parse(loc) + require.NoError(t, err) + assert.Equal(t, "/path with spaces/file/name", parsed.Path) + }) + + t.Run("preserves encoded query parameters with path prefix", func(t *testing.T) { + resp, err := run(newProxy("https"), "/v1", newReq("https://public.example.com/v1/"), //nolint:bodyclose + "http://backend.internal:8080/redirect?url=http%3A%2F%2Fexample.com") + + require.NoError(t, err) + assert.Equal(t, "https://public.example.com/v1/redirect?url=http%3A%2F%2Fexample.com", resp.Header.Get("Location")) + }) +} + +// newProxyRequest creates an httputil.ProxyRequest suitable for testing +// the Rewrite function. It simulates what httputil.ReverseProxy does internally: +// Out is a shallow clone of In with headers copied. +func newProxyRequest(t *testing.T, rawURL, remoteAddr string) *httputil.ProxyRequest { + t.Helper() + + parsed, err := url.Parse(rawURL) + require.NoError(t, err) + + in := httptest.NewRequest(http.MethodGet, rawURL, nil) + in.RemoteAddr = remoteAddr + in.Host = parsed.Host + + out := in.Clone(in.Context()) + out.Header = in.Header.Clone() + + return &httputil.ProxyRequest{In: in, Out: out} +} + +func TestClassifyProxyError(t *testing.T) { + tests := []struct { + name string + err error + wantTitle string + wantCode int + wantStatus web.ErrorStatus + }{ + { + name: "context deadline exceeded", + err: context.DeadlineExceeded, + wantTitle: "Request Timeout", + wantCode: http.StatusGatewayTimeout, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "wrapped deadline exceeded", + err: fmt.Errorf("dial: %w", context.DeadlineExceeded), + wantTitle: "Request Timeout", + wantCode: http.StatusGatewayTimeout, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "context canceled", + err: context.Canceled, + wantTitle: "Request Canceled", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "no account ID", + err: roundtrip.ErrNoAccountID, + wantTitle: "Configuration Error", + wantCode: http.StatusInternalServerError, + wantStatus: web.ErrorStatus{Proxy: false, Destination: false}, + }, + { + name: "no peer connection", + err: fmt.Errorf("%w for account: abc", roundtrip.ErrNoPeerConnection), + wantTitle: "Proxy Not Connected", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: false, Destination: false}, + }, + { + name: "client not started", + err: fmt.Errorf("%w: %w", roundtrip.ErrClientStartFailed, errors.New("engine init failed")), + wantTitle: "Proxy Not Connected", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: false, Destination: false}, + }, + { + name: "syscall ECONNREFUSED via os.SyscallError", + err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &os.SyscallError{Syscall: "connect", Err: syscall.ECONNREFUSED}, + }, + wantTitle: "Service Unavailable", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "gvisor connection was refused", + err: &net.OpError{ + Op: "connect", + Net: "tcp", + Err: errors.New("connection was refused"), + }, + wantTitle: "Service Unavailable", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "syscall EHOSTUNREACH via os.SyscallError", + err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &os.SyscallError{Syscall: "connect", Err: syscall.EHOSTUNREACH}, + }, + wantTitle: "Peer Not Connected", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "syscall ENETUNREACH via os.SyscallError", + err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &os.SyscallError{Syscall: "connect", Err: syscall.ENETUNREACH}, + }, + wantTitle: "Peer Not Connected", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "gvisor host is unreachable", + err: &net.OpError{ + Op: "connect", + Net: "tcp", + Err: errors.New("host is unreachable"), + }, + wantTitle: "Peer Not Connected", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "gvisor network is unreachable", + err: &net.OpError{ + Op: "connect", + Net: "tcp", + Err: errors.New("network is unreachable"), + }, + wantTitle: "Peer Not Connected", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "standard no route to host", + err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &os.SyscallError{Syscall: "connect", Err: syscall.EHOSTUNREACH}, + }, + wantTitle: "Peer Not Connected", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + { + name: "unknown error falls to default", + err: errors.New("something unexpected"), + wantTitle: "Connection Error", + wantCode: http.StatusBadGateway, + wantStatus: web.ErrorStatus{Proxy: true, Destination: false}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + title, _, code, status := classifyProxyError(tt.err) + assert.Equal(t, tt.wantTitle, title, "title") + assert.Equal(t, tt.wantCode, code, "status code") + assert.Equal(t, tt.wantStatus, status, "component status") + }) + } +} diff --git a/proxy/internal/proxy/servicemapping.go b/proxy/internal/proxy/servicemapping.go new file mode 100644 index 000000000..6f5829ebb --- /dev/null +++ b/proxy/internal/proxy/servicemapping.go @@ -0,0 +1,84 @@ +package proxy + +import ( + "net" + "net/http" + "net/url" + "sort" + "strings" + + "github.com/netbirdio/netbird/proxy/internal/types" +) + +type Mapping struct { + ID string + AccountID types.AccountID + Host string + Paths map[string]*url.URL + PassHostHeader bool + RewriteRedirects bool +} + +type targetResult struct { + url *url.URL + matchedPath string + serviceID string + accountID types.AccountID + passHostHeader bool + rewriteRedirects bool +} + +func (p *ReverseProxy) findTargetForRequest(req *http.Request) (targetResult, bool) { + p.mappingsMux.RLock() + defer p.mappingsMux.RUnlock() + + // Strip port from host if present (e.g., "external.test:8443" -> "external.test") + host := req.Host + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + + m, exists := p.mappings[host] + if !exists { + p.logger.Debugf("no mapping found for host: %s", host) + return targetResult{}, false + } + + // Sort paths by length (longest first) in a naive attempt to match the most specific route first. + paths := make([]string, 0, len(m.Paths)) + for path := range m.Paths { + paths = append(paths, path) + } + sort.Slice(paths, func(i, j int) bool { + return len(paths[i]) > len(paths[j]) + }) + + for _, path := range paths { + if strings.HasPrefix(req.URL.Path, path) { + target := m.Paths[path] + p.logger.Debugf("matched host: %s, path: %s -> %s", host, path, target) + return targetResult{ + url: target, + matchedPath: path, + serviceID: m.ID, + accountID: m.AccountID, + passHostHeader: m.PassHostHeader, + rewriteRedirects: m.RewriteRedirects, + }, true + } + } + p.logger.Debugf("no path match for host: %s, path: %s", host, req.URL.Path) + return targetResult{}, false +} + +func (p *ReverseProxy) AddMapping(m Mapping) { + p.mappingsMux.Lock() + defer p.mappingsMux.Unlock() + p.mappings[m.Host] = m +} + +func (p *ReverseProxy) RemoveMapping(m Mapping) { + p.mappingsMux.Lock() + defer p.mappingsMux.Unlock() + delete(p.mappings, m.Host) +} diff --git a/proxy/internal/proxy/trustedproxy.go b/proxy/internal/proxy/trustedproxy.go new file mode 100644 index 000000000..ad9a5b6c0 --- /dev/null +++ b/proxy/internal/proxy/trustedproxy.go @@ -0,0 +1,60 @@ +package proxy + +import ( + "net/netip" + "strings" +) + +// IsTrustedProxy checks if the given IP string falls within any of the trusted prefixes. +func IsTrustedProxy(ipStr string, trusted []netip.Prefix) bool { + if len(trusted) == 0 { + return false + } + + addr, err := netip.ParseAddr(ipStr) + if err != nil { + return false + } + + for _, prefix := range trusted { + if prefix.Contains(addr) { + return true + } + } + return false +} + +// ResolveClientIP extracts the real client IP from X-Forwarded-For using the trusted proxy list. +// It walks the XFF chain right-to-left, skipping IPs that match trusted prefixes. +// The first untrusted IP is the real client. +// +// If the trusted list is empty or remoteAddr is not trusted, it returns the +// remoteAddr IP directly (ignoring any forwarding headers). +func ResolveClientIP(remoteAddr, xff string, trusted []netip.Prefix) string { + remoteIP := extractClientIP(remoteAddr) + + if len(trusted) == 0 || !IsTrustedProxy(remoteIP, trusted) { + return remoteIP + } + + if xff == "" { + return remoteIP + } + + parts := strings.Split(xff, ",") + for i := len(parts) - 1; i >= 0; i-- { + ip := strings.TrimSpace(parts[i]) + if ip == "" { + continue + } + if !IsTrustedProxy(ip, trusted) { + return ip + } + } + + // All IPs in XFF are trusted; return the leftmost as best guess. + if first := strings.TrimSpace(parts[0]); first != "" { + return first + } + return remoteIP +} diff --git a/proxy/internal/proxy/trustedproxy_test.go b/proxy/internal/proxy/trustedproxy_test.go new file mode 100644 index 000000000..827b7babf --- /dev/null +++ b/proxy/internal/proxy/trustedproxy_test.go @@ -0,0 +1,129 @@ +package proxy + +import ( + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsTrustedProxy(t *testing.T) { + trusted := []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("192.168.1.0/24"), + netip.MustParsePrefix("fd00::/8"), + } + + tests := []struct { + name string + ip string + trusted []netip.Prefix + want bool + }{ + {"empty trusted list", "10.0.0.1", nil, false}, + {"IP within /8 prefix", "10.1.2.3", trusted, true}, + {"IP within /24 prefix", "192.168.1.100", trusted, true}, + {"IP outside all prefixes", "203.0.113.50", trusted, false}, + {"boundary IP just outside prefix", "192.168.2.1", trusted, false}, + {"unparsable IP", "not-an-ip", trusted, false}, + {"IPv6 in trusted range", "fd00::1", trusted, true}, + {"IPv6 outside range", "2001:db8::1", trusted, false}, + {"empty string", "", trusted, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, IsTrustedProxy(tt.ip, tt.trusted)) + }) + } +} + +func TestResolveClientIP(t *testing.T) { + trusted := []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("172.16.0.0/12"), + } + + tests := []struct { + name string + remoteAddr string + xff string + trusted []netip.Prefix + want string + }{ + { + name: "empty trusted list returns RemoteAddr", + remoteAddr: "203.0.113.50:9999", + xff: "1.2.3.4", + trusted: nil, + want: "203.0.113.50", + }, + { + name: "untrusted RemoteAddr ignores XFF", + remoteAddr: "203.0.113.50:9999", + xff: "1.2.3.4, 10.0.0.1", + trusted: trusted, + want: "203.0.113.50", + }, + { + name: "trusted RemoteAddr with single client in XFF", + remoteAddr: "10.0.0.1:5000", + xff: "203.0.113.50", + trusted: trusted, + want: "203.0.113.50", + }, + { + name: "trusted RemoteAddr walks past trusted entries in XFF", + remoteAddr: "10.0.0.1:5000", + xff: "203.0.113.50, 10.0.0.2, 172.16.0.5", + trusted: trusted, + want: "203.0.113.50", + }, + { + name: "trusted RemoteAddr with empty XFF falls back to RemoteAddr", + remoteAddr: "10.0.0.1:5000", + xff: "", + trusted: trusted, + want: "10.0.0.1", + }, + { + name: "all XFF IPs trusted returns leftmost", + remoteAddr: "10.0.0.1:5000", + xff: "10.0.0.2, 172.16.0.1, 10.0.0.3", + trusted: trusted, + want: "10.0.0.2", + }, + { + name: "XFF with whitespace", + remoteAddr: "10.0.0.1:5000", + xff: " 203.0.113.50 , 10.0.0.2 ", + trusted: trusted, + want: "203.0.113.50", + }, + { + name: "XFF with empty segments", + remoteAddr: "10.0.0.1:5000", + xff: "203.0.113.50,,10.0.0.2", + trusted: trusted, + want: "203.0.113.50", + }, + { + name: "multi-hop with mixed trust", + remoteAddr: "10.0.0.1:5000", + xff: "8.8.8.8, 203.0.113.50, 172.16.0.1", + trusted: trusted, + want: "203.0.113.50", + }, + { + name: "RemoteAddr without port", + remoteAddr: "10.0.0.1", + xff: "203.0.113.50", + trusted: trusted, + want: "203.0.113.50", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, ResolveClientIP(tt.remoteAddr, tt.xff, tt.trusted)) + }) + } +} diff --git a/proxy/internal/roundtrip/netbird.go b/proxy/internal/roundtrip/netbird.go new file mode 100644 index 000000000..d7fd2746f --- /dev/null +++ b/proxy/internal/roundtrip/netbird.go @@ -0,0 +1,575 @@ +package roundtrip + +import ( + "context" + "errors" + "fmt" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + log "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "google.golang.org/grpc" + + "github.com/netbirdio/netbird/client/embed" + nberrors "github.com/netbirdio/netbird/client/errors" + "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/shared/management/domain" + "github.com/netbirdio/netbird/shared/management/proto" + "github.com/netbirdio/netbird/util" +) + +const deviceNamePrefix = "ingress-proxy-" + +// backendKey identifies a backend by its host:port from the target URL. +type backendKey = string + +var ( + // ErrNoAccountID is returned when a request context is missing the account ID. + ErrNoAccountID = errors.New("no account ID in request context") + // ErrNoPeerConnection is returned when no embedded client exists for the account. + ErrNoPeerConnection = errors.New("no peer connection found") + // ErrClientStartFailed is returned when the embedded client fails to start. + ErrClientStartFailed = errors.New("client start failed") + // ErrTooManyInflight is returned when the per-backend in-flight limit is reached. + ErrTooManyInflight = errors.New("too many in-flight requests") +) + +// domainInfo holds metadata about a registered domain. +type domainInfo struct { + serviceID string +} + +type domainNotification struct { + domain domain.Domain + serviceID string +} + +// clientEntry holds an embedded NetBird client and tracks which domains use it. +type clientEntry struct { + client *embed.Client + transport *http.Transport + domains map[domain.Domain]domainInfo + createdAt time.Time + started bool + // Per-backend in-flight limiting keyed by target host:port. + // TODO: clean up stale entries when backend targets change. + inflightMu sync.Mutex + inflightMap map[backendKey]chan struct{} + maxInflight int +} + +// acquireInflight attempts to acquire an in-flight slot for the given backend. +// It returns a release function that must always be called, and true on success. +func (e *clientEntry) acquireInflight(backend backendKey) (release func(), ok bool) { + noop := func() {} + if e.maxInflight <= 0 { + return noop, true + } + + e.inflightMu.Lock() + sem, exists := e.inflightMap[backend] + if !exists { + sem = make(chan struct{}, e.maxInflight) + e.inflightMap[backend] = sem + } + e.inflightMu.Unlock() + + select { + case sem <- struct{}{}: + return func() { <-sem }, true + default: + return noop, false + } +} + +type statusNotifier interface { + NotifyStatus(ctx context.Context, accountID, serviceID, domain string, connected bool) error +} + +type managementClient interface { + CreateProxyPeer(ctx context.Context, req *proto.CreateProxyPeerRequest, opts ...grpc.CallOption) (*proto.CreateProxyPeerResponse, error) +} + +// NetBird provides an http.RoundTripper implementation +// backed by underlying NetBird connections. +// Clients are keyed by AccountID, allowing multiple domains to share the same connection. +type NetBird struct { + mgmtAddr string + proxyID string + proxyAddr string + wgPort int + logger *log.Logger + mgmtClient managementClient + transportCfg transportConfig + + clientsMux sync.RWMutex + clients map[types.AccountID]*clientEntry + initLogOnce sync.Once + statusNotifier statusNotifier +} + +// ClientDebugInfo contains debug information about a client. +type ClientDebugInfo struct { + AccountID types.AccountID + DomainCount int + Domains domain.List + HasClient bool + CreatedAt time.Time +} + +// accountIDContextKey is the context key for storing the account ID. +type accountIDContextKey struct{} + +// AddPeer registers a domain for an account. If the account doesn't have a client yet, +// one is created by authenticating with the management server using the provided token. +// Multiple domains can share the same client. +func (n *NetBird) AddPeer(ctx context.Context, accountID types.AccountID, d domain.Domain, authToken, serviceID string) error { + n.clientsMux.Lock() + + entry, exists := n.clients[accountID] + if exists { + // Client already exists for this account, just register the domain + entry.domains[d] = domainInfo{serviceID: serviceID} + started := entry.started + n.clientsMux.Unlock() + + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + }).Debug("registered domain with existing client") + + // If client is already started, notify this domain as connected immediately + if started && n.statusNotifier != nil { + if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), serviceID, string(d), true); err != nil { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + }).WithError(err).Warn("failed to notify status for existing client") + } + } + return nil + } + + entry, err := n.createClientEntry(ctx, accountID, d, authToken, serviceID) + if err != nil { + n.clientsMux.Unlock() + return err + } + + n.clients[accountID] = entry + n.clientsMux.Unlock() + + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + }).Info("created new client for account") + + // Attempt to start the client in the background; if this fails we will + // retry on the first request via RoundTrip. + go n.runClientStartup(ctx, accountID, entry.client) + + return nil +} + +// createClientEntry generates a WireGuard keypair, authenticates with management, +// and creates an embedded NetBird client. Must be called with clientsMux held. +func (n *NetBird) createClientEntry(ctx context.Context, accountID types.AccountID, d domain.Domain, authToken, serviceID string) (*clientEntry, error) { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "service_id": serviceID, + }).Debug("generating WireGuard keypair for new peer") + + privateKey, err := wgtypes.GeneratePrivateKey() + if err != nil { + return nil, fmt.Errorf("generate wireguard private key: %w", err) + } + publicKey := privateKey.PublicKey() + + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "service_id": serviceID, + "public_key": publicKey.String(), + }).Debug("authenticating new proxy peer with management") + + resp, err := n.mgmtClient.CreateProxyPeer(ctx, &proto.CreateProxyPeerRequest{ + ServiceId: serviceID, + AccountId: string(accountID), + Token: authToken, + WireguardPublicKey: publicKey.String(), + Cluster: n.proxyAddr, + }) + if err != nil { + return nil, fmt.Errorf("authenticate proxy peer with management: %w", err) + } + if resp != nil && !resp.GetSuccess() { + errMsg := "unknown error" + if resp.ErrorMessage != nil { + errMsg = *resp.ErrorMessage + } + return nil, fmt.Errorf("proxy peer authentication failed: %s", errMsg) + } + + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "service_id": serviceID, + "public_key": publicKey.String(), + }).Info("proxy peer authenticated successfully with management") + + n.initLogOnce.Do(func() { + if err := util.InitLog(log.WarnLevel.String(), util.LogConsole); err != nil { + n.logger.WithField("account_id", accountID).Warnf("failed to initialize embedded client logging: %v", err) + } + }) + + // Create embedded NetBird client with the generated private key. + // The peer has already been created via CreateProxyPeer RPC with the public key. + client, err := embed.New(embed.Options{ + DeviceName: deviceNamePrefix + n.proxyID, + ManagementURL: n.mgmtAddr, + PrivateKey: privateKey.String(), + LogLevel: log.WarnLevel.String(), + BlockInbound: true, + WireguardPort: &n.wgPort, + }) + if err != nil { + return nil, fmt.Errorf("create netbird client: %w", err) + } + + // Create a transport using the client dialer. We do this instead of using + // the client's HTTPClient to avoid issues with request validation that do + // not work with reverse proxied requests. + return &clientEntry{ + client: client, + domains: map[domain.Domain]domainInfo{d: {serviceID: serviceID}}, + transport: &http.Transport{ + DialContext: client.DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: n.transportCfg.maxIdleConns, + MaxIdleConnsPerHost: n.transportCfg.maxIdleConnsPerHost, + MaxConnsPerHost: n.transportCfg.maxConnsPerHost, + IdleConnTimeout: n.transportCfg.idleConnTimeout, + TLSHandshakeTimeout: n.transportCfg.tlsHandshakeTimeout, + ExpectContinueTimeout: n.transportCfg.expectContinueTimeout, + ResponseHeaderTimeout: n.transportCfg.responseHeaderTimeout, + WriteBufferSize: n.transportCfg.writeBufferSize, + ReadBufferSize: n.transportCfg.readBufferSize, + DisableCompression: n.transportCfg.disableCompression, + }, + createdAt: time.Now(), + started: false, + inflightMap: make(map[backendKey]chan struct{}), + maxInflight: n.transportCfg.maxInflight, + }, nil +} + +// runClientStartup starts the client and notifies registered domains on success. +func (n *NetBird) runClientStartup(ctx context.Context, accountID types.AccountID, client *embed.Client) { + startCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if err := client.Start(startCtx); err != nil { + if errors.Is(err, context.DeadlineExceeded) { + n.logger.WithField("account_id", accountID).Warn("netbird client start timed out, will retry on first request") + } else { + n.logger.WithField("account_id", accountID).WithError(err).Error("failed to start netbird client") + } + return + } + + // Mark client as started and collect domains to notify outside the lock. + n.clientsMux.Lock() + entry, exists := n.clients[accountID] + if exists { + entry.started = true + } + var domainsToNotify []domainNotification + if exists { + for dom, info := range entry.domains { + domainsToNotify = append(domainsToNotify, domainNotification{domain: dom, serviceID: info.serviceID}) + } + } + n.clientsMux.Unlock() + + if n.statusNotifier == nil { + return + } + for _, dn := range domainsToNotify { + if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), dn.serviceID, string(dn.domain), true); err != nil { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": dn.domain, + }).WithError(err).Warn("failed to notify tunnel connection status") + } else { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": dn.domain, + }).Info("notified management about tunnel connection") + } + } +} + +// RemovePeer unregisters a domain from an account. The client is only stopped +// when no domains are using it anymore. +func (n *NetBird) RemovePeer(ctx context.Context, accountID types.AccountID, d domain.Domain) error { + n.clientsMux.Lock() + + entry, exists := n.clients[accountID] + if !exists { + n.clientsMux.Unlock() + n.logger.WithField("account_id", accountID).Debug("remove peer: account not found") + return nil + } + + // Get domain info before deleting + domInfo, domainExists := entry.domains[d] + if !domainExists { + n.clientsMux.Unlock() + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + }).Debug("remove peer: domain not registered") + return nil + } + + delete(entry.domains, d) + + // If there are still domains using this client, keep it running + if len(entry.domains) > 0 { + n.clientsMux.Unlock() + + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + "remaining_domains": len(entry.domains), + }).Debug("unregistered domain, client still in use") + + // Notify this domain as disconnected + if n.statusNotifier != nil { + if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), domInfo.serviceID, string(d), false); err != nil { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + }).WithError(err).Warn("failed to notify tunnel disconnection status") + } + } + return nil + } + + // No more domains using this client, stop it + n.logger.WithFields(log.Fields{ + "account_id": accountID, + }).Info("stopping client, no more domains") + + client := entry.client + transport := entry.transport + delete(n.clients, accountID) + n.clientsMux.Unlock() + + // Notify disconnection before stopping + if n.statusNotifier != nil { + if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), domInfo.serviceID, string(d), false); err != nil { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + }).WithError(err).Warn("failed to notify tunnel disconnection status") + } + } + + transport.CloseIdleConnections() + + if err := client.Stop(ctx); err != nil { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + }).WithError(err).Warn("failed to stop netbird client") + } + + return nil +} + +// RoundTrip implements http.RoundTripper. It looks up the client for the account +// specified in the request context and uses it to dial the backend. +func (n *NetBird) RoundTrip(req *http.Request) (*http.Response, error) { + accountID := AccountIDFromContext(req.Context()) + if accountID == "" { + return nil, ErrNoAccountID + } + + // Copy references while holding lock, then unlock early to avoid blocking + // other requests during the potentially slow RoundTrip. + n.clientsMux.RLock() + entry, exists := n.clients[accountID] + if !exists { + n.clientsMux.RUnlock() + return nil, fmt.Errorf("%w for account: %s", ErrNoPeerConnection, accountID) + } + client := entry.client + transport := entry.transport + n.clientsMux.RUnlock() + + release, ok := entry.acquireInflight(req.URL.Host) + defer release() + if !ok { + return nil, ErrTooManyInflight + } + + // Attempt to start the client, if the client is already running then + // it will return an error that we ignore, if this hits a timeout then + // this request is unprocessable. + startCtx, cancel := context.WithTimeout(req.Context(), 30*time.Second) + defer cancel() + if err := client.Start(startCtx); err != nil { + if !errors.Is(err, embed.ErrClientAlreadyStarted) { + return nil, fmt.Errorf("%w: %w", ErrClientStartFailed, err) + } + } + + start := time.Now() + resp, err := transport.RoundTrip(req) + duration := time.Since(start) + + if err != nil { + n.logger.Debugf("roundtrip: method=%s host=%s url=%s account=%s duration=%s err=%v", + req.Method, req.Host, req.URL.String(), accountID, duration.Truncate(time.Millisecond), err) + return nil, err + } + + n.logger.Debugf("roundtrip: method=%s host=%s url=%s account=%s status=%d duration=%s", + req.Method, req.Host, req.URL.String(), accountID, resp.StatusCode, duration.Truncate(time.Millisecond)) + return resp, nil +} + +// StopAll stops all clients. +func (n *NetBird) StopAll(ctx context.Context) error { + n.clientsMux.Lock() + defer n.clientsMux.Unlock() + + var merr *multierror.Error + for accountID, entry := range n.clients { + entry.transport.CloseIdleConnections() + if err := entry.client.Stop(ctx); err != nil { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + }).WithError(err).Warn("failed to stop netbird client during shutdown") + merr = multierror.Append(merr, err) + } + } + maps.Clear(n.clients) + + return nberrors.FormatErrorOrNil(merr) +} + +// HasClient returns true if there is a client for the given account. +func (n *NetBird) HasClient(accountID types.AccountID) bool { + n.clientsMux.RLock() + defer n.clientsMux.RUnlock() + _, exists := n.clients[accountID] + return exists +} + +// DomainCount returns the number of domains registered for the given account. +// Returns 0 if the account has no client. +func (n *NetBird) DomainCount(accountID types.AccountID) int { + n.clientsMux.RLock() + defer n.clientsMux.RUnlock() + entry, exists := n.clients[accountID] + if !exists { + return 0 + } + return len(entry.domains) +} + +// ClientCount returns the total number of active clients. +func (n *NetBird) ClientCount() int { + n.clientsMux.RLock() + defer n.clientsMux.RUnlock() + return len(n.clients) +} + +// GetClient returns the embed.Client for the given account ID. +func (n *NetBird) GetClient(accountID types.AccountID) (*embed.Client, bool) { + n.clientsMux.RLock() + defer n.clientsMux.RUnlock() + entry, exists := n.clients[accountID] + if !exists { + return nil, false + } + return entry.client, true +} + +// ListClientsForDebug returns information about all clients for debug purposes. +func (n *NetBird) ListClientsForDebug() map[types.AccountID]ClientDebugInfo { + n.clientsMux.RLock() + defer n.clientsMux.RUnlock() + + result := make(map[types.AccountID]ClientDebugInfo) + for accountID, entry := range n.clients { + domains := make(domain.List, 0, len(entry.domains)) + for d := range entry.domains { + domains = append(domains, d) + } + result[accountID] = ClientDebugInfo{ + AccountID: accountID, + DomainCount: len(entry.domains), + Domains: domains, + HasClient: entry.client != nil, + CreatedAt: entry.createdAt, + } + } + return result +} + +// ListClientsForStartup returns all embed.Client instances for health checks. +func (n *NetBird) ListClientsForStartup() map[types.AccountID]*embed.Client { + n.clientsMux.RLock() + defer n.clientsMux.RUnlock() + + result := make(map[types.AccountID]*embed.Client) + for accountID, entry := range n.clients { + if entry.client != nil { + result[accountID] = entry.client + } + } + return result +} + +// NewNetBird creates a new NetBird transport. Set wgPort to 0 for a random +// OS-assigned port. A fixed port only works with single-account deployments; +// multiple accounts will fail to bind the same port. +func NewNetBird(mgmtAddr, proxyID, proxyAddr string, wgPort int, logger *log.Logger, notifier statusNotifier, mgmtClient managementClient) *NetBird { + if logger == nil { + logger = log.StandardLogger() + } + return &NetBird{ + mgmtAddr: mgmtAddr, + proxyID: proxyID, + proxyAddr: proxyAddr, + wgPort: wgPort, + logger: logger, + clients: make(map[types.AccountID]*clientEntry), + statusNotifier: notifier, + mgmtClient: mgmtClient, + transportCfg: loadTransportConfig(logger), + } +} + +// WithAccountID adds the account ID to the context. +func WithAccountID(ctx context.Context, accountID types.AccountID) context.Context { + return context.WithValue(ctx, accountIDContextKey{}, accountID) +} + +// AccountIDFromContext retrieves the account ID from the context. +func AccountIDFromContext(ctx context.Context) types.AccountID { + v := ctx.Value(accountIDContextKey{}) + if v == nil { + return "" + } + accountID, ok := v.(types.AccountID) + if !ok { + return "" + } + return accountID +} diff --git a/proxy/internal/roundtrip/netbird_bench_test.go b/proxy/internal/roundtrip/netbird_bench_test.go new file mode 100644 index 000000000..e89213c33 --- /dev/null +++ b/proxy/internal/roundtrip/netbird_bench_test.go @@ -0,0 +1,107 @@ +package roundtrip + +import ( + "crypto/rand" + "math/big" + "sync" + "testing" + "time" + + "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/shared/management/domain" +) + +// Simple benchmark for comparison with AddPeer contention. +func BenchmarkHasClient(b *testing.B) { + // Knobs for dialling in: + initialClientCount := 100 // Size of initial peer map to generate. + + nb := mockNetBird() + + var target types.AccountID + targetIndex, err := rand.Int(rand.Reader, big.NewInt(int64(initialClientCount))) + if err != nil { + b.Fatal(err) + } + for i := range initialClientCount { + id := types.AccountID(rand.Text()) + if int64(i) == targetIndex.Int64() { + target = id + } + nb.clients[id] = &clientEntry{ + domains: map[domain.Domain]domainInfo{ + domain.Domain(rand.Text()): { + serviceID: rand.Text(), + }, + }, + createdAt: time.Now(), + started: true, + } + } + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + nb.HasClient(target) + } + }) + b.StopTimer() +} + +func BenchmarkHasClientDuringAddPeer(b *testing.B) { + // Knobs for dialling in: + initialClientCount := 100 // Size of initial peer map to generate. + addPeerWorkers := 5 // Number of workers to concurrently call AddPeer. + + nb := mockNetBird() + + // Add random client entries to the netbird instance. + // We're trying to test map lock contention, so starting with + // a populated map should help with this. + // Pick a random one to target for retrieval later. + var target types.AccountID + targetIndex, err := rand.Int(rand.Reader, big.NewInt(int64(initialClientCount))) + if err != nil { + b.Fatal(err) + } + for i := range initialClientCount { + id := types.AccountID(rand.Text()) + if int64(i) == targetIndex.Int64() { + target = id + } + nb.clients[id] = &clientEntry{ + domains: map[domain.Domain]domainInfo{ + domain.Domain(rand.Text()): { + serviceID: rand.Text(), + }, + }, + createdAt: time.Now(), + started: true, + } + } + + // Launch workers that continuously call AddPeer with new random accountIDs. + var wg sync.WaitGroup + for range addPeerWorkers { + wg.Go(func() { + for { + if err := nb.AddPeer(b.Context(), + types.AccountID(rand.Text()), + domain.Domain(rand.Text()), + rand.Text(), + rand.Text()); err != nil { + b.Log(err) + } + } + }) + } + + // Benchmark calling HasClient during AddPeer contention. + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + nb.HasClient(target) + } + }) + b.StopTimer() +} diff --git a/proxy/internal/roundtrip/netbird_test.go b/proxy/internal/roundtrip/netbird_test.go new file mode 100644 index 000000000..3e76af9da --- /dev/null +++ b/proxy/internal/roundtrip/netbird_test.go @@ -0,0 +1,328 @@ +package roundtrip + +import ( + "context" + "net/http" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/shared/management/domain" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type mockMgmtClient struct{} + +func (m *mockMgmtClient) CreateProxyPeer(_ context.Context, _ *proto.CreateProxyPeerRequest, _ ...grpc.CallOption) (*proto.CreateProxyPeerResponse, error) { + return &proto.CreateProxyPeerResponse{Success: true}, nil +} + +type mockStatusNotifier struct { + mu sync.Mutex + statuses []statusCall +} + +type statusCall struct { + accountID string + serviceID string + domain string + connected bool +} + +func (m *mockStatusNotifier) NotifyStatus(_ context.Context, accountID, serviceID, domain string, connected bool) error { + m.mu.Lock() + defer m.mu.Unlock() + m.statuses = append(m.statuses, statusCall{accountID, serviceID, domain, connected}) + return nil +} + +func (m *mockStatusNotifier) calls() []statusCall { + m.mu.Lock() + defer m.mu.Unlock() + return append([]statusCall{}, m.statuses...) +} + +// mockNetBird creates a NetBird instance for testing without actually connecting. +// It uses an invalid management URL to prevent real connections. +func mockNetBird() *NetBird { + return NewNetBird("http://invalid.test:9999", "test-proxy", "invalid.test", 0, nil, nil, &mockMgmtClient{}) +} + +func TestNetBird_AddPeer_CreatesClientForNewAccount(t *testing.T) { + nb := mockNetBird() + accountID := types.AccountID("account-1") + + // Initially no client exists. + assert.False(t, nb.HasClient(accountID), "should not have client before AddPeer") + assert.Equal(t, 0, nb.DomainCount(accountID), "domain count should be 0") + + // Add first domain - this should create a new client. + // Note: This will fail to actually connect since we use an invalid URL, + // but the client entry should still be created. + err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + require.NoError(t, err) + + assert.True(t, nb.HasClient(accountID), "should have client after AddPeer") + assert.Equal(t, 1, nb.DomainCount(accountID), "domain count should be 1") +} + +func TestNetBird_AddPeer_ReuseClientForSameAccount(t *testing.T) { + nb := mockNetBird() + accountID := types.AccountID("account-1") + + // Add first domain. + err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + require.NoError(t, err) + assert.Equal(t, 1, nb.DomainCount(accountID)) + + // Add second domain for the same account - should reuse existing client. + err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "setup-key-1", "proxy-2") + require.NoError(t, err) + assert.Equal(t, 2, nb.DomainCount(accountID), "domain count should be 2 after adding second domain") + + // Add third domain. + err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain3.test"), "setup-key-1", "proxy-3") + require.NoError(t, err) + assert.Equal(t, 3, nb.DomainCount(accountID), "domain count should be 3 after adding third domain") + + // Still only one client. + assert.True(t, nb.HasClient(accountID)) +} + +func TestNetBird_AddPeer_SeparateClientsForDifferentAccounts(t *testing.T) { + nb := mockNetBird() + account1 := types.AccountID("account-1") + account2 := types.AccountID("account-2") + + // Add domain for account 1. + err := nb.AddPeer(context.Background(), account1, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + require.NoError(t, err) + + // Add domain for account 2. + err = nb.AddPeer(context.Background(), account2, domain.Domain("domain2.test"), "setup-key-2", "proxy-2") + require.NoError(t, err) + + // Both accounts should have their own clients. + assert.True(t, nb.HasClient(account1), "account1 should have client") + assert.True(t, nb.HasClient(account2), "account2 should have client") + assert.Equal(t, 1, nb.DomainCount(account1), "account1 domain count should be 1") + assert.Equal(t, 1, nb.DomainCount(account2), "account2 domain count should be 1") +} + +func TestNetBird_RemovePeer_KeepsClientWhenDomainsRemain(t *testing.T) { + nb := mockNetBird() + accountID := types.AccountID("account-1") + + // Add multiple domains. + err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + require.NoError(t, err) + err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "setup-key-1", "proxy-2") + require.NoError(t, err) + err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain3.test"), "setup-key-1", "proxy-3") + require.NoError(t, err) + assert.Equal(t, 3, nb.DomainCount(accountID)) + + // Remove one domain - client should remain. + err = nb.RemovePeer(context.Background(), accountID, "domain1.test") + require.NoError(t, err) + assert.True(t, nb.HasClient(accountID), "client should remain after removing one domain") + assert.Equal(t, 2, nb.DomainCount(accountID), "domain count should be 2") + + // Remove another domain - client should still remain. + err = nb.RemovePeer(context.Background(), accountID, "domain2.test") + require.NoError(t, err) + assert.True(t, nb.HasClient(accountID), "client should remain after removing second domain") + assert.Equal(t, 1, nb.DomainCount(accountID), "domain count should be 1") +} + +func TestNetBird_RemovePeer_RemovesClientWhenLastDomainRemoved(t *testing.T) { + nb := mockNetBird() + accountID := types.AccountID("account-1") + + // Add single domain. + err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + require.NoError(t, err) + assert.True(t, nb.HasClient(accountID)) + + // Remove the only domain - client should be removed. + // Note: Stop() may fail since the client never actually connected, + // but the entry should still be removed from the map. + _ = nb.RemovePeer(context.Background(), accountID, "domain1.test") + + // After removing all domains, client should be gone. + assert.False(t, nb.HasClient(accountID), "client should be removed after removing last domain") + assert.Equal(t, 0, nb.DomainCount(accountID), "domain count should be 0") +} + +func TestNetBird_RemovePeer_NonExistentAccountIsNoop(t *testing.T) { + nb := mockNetBird() + accountID := types.AccountID("nonexistent-account") + + // Removing from non-existent account should not error. + err := nb.RemovePeer(context.Background(), accountID, "domain1.test") + assert.NoError(t, err, "removing from non-existent account should not error") +} + +func TestNetBird_RemovePeer_NonExistentDomainIsNoop(t *testing.T) { + nb := mockNetBird() + accountID := types.AccountID("account-1") + + // Add one domain. + err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + require.NoError(t, err) + + // Remove non-existent domain - should not affect existing domain. + err = nb.RemovePeer(context.Background(), accountID, domain.Domain("nonexistent.test")) + require.NoError(t, err) + + // Original domain should still be registered. + assert.True(t, nb.HasClient(accountID)) + assert.Equal(t, 1, nb.DomainCount(accountID), "original domain should remain") +} + +func TestWithAccountID_AndAccountIDFromContext(t *testing.T) { + ctx := context.Background() + accountID := types.AccountID("test-account") + + // Initially no account ID in context. + retrieved := AccountIDFromContext(ctx) + assert.True(t, retrieved == "", "should be empty when not set") + + // Add account ID to context. + ctx = WithAccountID(ctx, accountID) + retrieved = AccountIDFromContext(ctx) + assert.Equal(t, accountID, retrieved, "should retrieve the same account ID") +} + +func TestAccountIDFromContext_ReturnsEmptyForWrongType(t *testing.T) { + // Create context with wrong type for account ID key. + ctx := context.WithValue(context.Background(), accountIDContextKey{}, "wrong-type-string") + + retrieved := AccountIDFromContext(ctx) + assert.True(t, retrieved == "", "should return empty for wrong type") +} + +func TestNetBird_StopAll_StopsAllClients(t *testing.T) { + nb := mockNetBird() + account1 := types.AccountID("account-1") + account2 := types.AccountID("account-2") + account3 := types.AccountID("account-3") + + // Add domains for multiple accounts. + err := nb.AddPeer(context.Background(), account1, domain.Domain("domain1.test"), "key-1", "proxy-1") + require.NoError(t, err) + err = nb.AddPeer(context.Background(), account2, domain.Domain("domain2.test"), "key-2", "proxy-2") + require.NoError(t, err) + err = nb.AddPeer(context.Background(), account3, domain.Domain("domain3.test"), "key-3", "proxy-3") + require.NoError(t, err) + + assert.Equal(t, 3, nb.ClientCount(), "should have 3 clients") + + // Stop all clients. + // Note: StopAll may return errors since clients never actually connected, + // but the clients should still be removed from the map. + _ = nb.StopAll(context.Background()) + + assert.Equal(t, 0, nb.ClientCount(), "should have 0 clients after StopAll") + assert.False(t, nb.HasClient(account1), "account1 should not have client") + assert.False(t, nb.HasClient(account2), "account2 should not have client") + assert.False(t, nb.HasClient(account3), "account3 should not have client") +} + +func TestNetBird_ClientCount(t *testing.T) { + nb := mockNetBird() + + assert.Equal(t, 0, nb.ClientCount(), "should start with 0 clients") + + // Add clients for different accounts. + err := nb.AddPeer(context.Background(), types.AccountID("account-1"), domain.Domain("domain1.test"), "key-1", "proxy-1") + require.NoError(t, err) + assert.Equal(t, 1, nb.ClientCount()) + + err = nb.AddPeer(context.Background(), types.AccountID("account-2"), domain.Domain("domain2.test"), "key-2", "proxy-2") + require.NoError(t, err) + assert.Equal(t, 2, nb.ClientCount()) + + // Adding domain to existing account should not increase count. + err = nb.AddPeer(context.Background(), types.AccountID("account-1"), domain.Domain("domain1b.test"), "key-1", "proxy-1b") + require.NoError(t, err) + assert.Equal(t, 2, nb.ClientCount(), "adding domain to existing account should not increase client count") +} + +func TestNetBird_RoundTrip_RequiresAccountIDInContext(t *testing.T) { + nb := mockNetBird() + + // Create a request without account ID in context. + req, err := http.NewRequest("GET", "http://example.com/", nil) + require.NoError(t, err) + + // RoundTrip should fail because no account ID in context. + _, err = nb.RoundTrip(req) //nolint:bodyclose + require.ErrorIs(t, err, ErrNoAccountID) +} + +func TestNetBird_RoundTrip_RequiresExistingClient(t *testing.T) { + nb := mockNetBird() + accountID := types.AccountID("nonexistent-account") + + // Create a request with account ID but no client exists. + req, err := http.NewRequest("GET", "http://example.com/", nil) + require.NoError(t, err) + req = req.WithContext(WithAccountID(req.Context(), accountID)) + + // RoundTrip should fail because no client for this account. + _, err = nb.RoundTrip(req) //nolint:bodyclose // Error case, no response body + assert.Error(t, err) + assert.Contains(t, err.Error(), "no peer connection found for account") +} + +func TestNetBird_AddPeer_ExistingStartedClient_NotifiesStatus(t *testing.T) { + notifier := &mockStatusNotifier{} + nb := NewNetBird("http://invalid.test:9999", "test-proxy", "invalid.test", 0, nil, notifier, &mockMgmtClient{}) + accountID := types.AccountID("account-1") + + // Add first domain — creates a new client entry. + err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "key-1", "svc-1") + require.NoError(t, err) + + // Manually mark client as started to simulate background startup completing. + nb.clientsMux.Lock() + nb.clients[accountID].started = true + nb.clientsMux.Unlock() + + // Add second domain — should notify immediately since client is already started. + err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "key-1", "svc-2") + require.NoError(t, err) + + calls := notifier.calls() + require.Len(t, calls, 1) + assert.Equal(t, string(accountID), calls[0].accountID) + assert.Equal(t, "svc-2", calls[0].serviceID) + assert.Equal(t, "domain2.test", calls[0].domain) + assert.True(t, calls[0].connected) +} + +func TestNetBird_RemovePeer_NotifiesDisconnection(t *testing.T) { + notifier := &mockStatusNotifier{} + nb := NewNetBird("http://invalid.test:9999", "test-proxy", "invalid.test", 0, nil, notifier, &mockMgmtClient{}) + accountID := types.AccountID("account-1") + + err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "key-1", "svc-1") + require.NoError(t, err) + err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "key-1", "svc-2") + require.NoError(t, err) + + // Remove one domain — client stays, but disconnection notification fires. + err = nb.RemovePeer(context.Background(), accountID, "domain1.test") + require.NoError(t, err) + assert.True(t, nb.HasClient(accountID)) + + calls := notifier.calls() + require.Len(t, calls, 1) + assert.Equal(t, "domain1.test", calls[0].domain) + assert.False(t, calls[0].connected) +} diff --git a/proxy/internal/roundtrip/transport.go b/proxy/internal/roundtrip/transport.go new file mode 100644 index 000000000..7c450bbb7 --- /dev/null +++ b/proxy/internal/roundtrip/transport.go @@ -0,0 +1,152 @@ +package roundtrip + +import ( + "os" + "strconv" + "time" + + log "github.com/sirupsen/logrus" +) + +// Environment variable names for tuning the backend HTTP transport. +const ( + EnvMaxIdleConns = "NB_PROXY_MAX_IDLE_CONNS" + EnvMaxIdleConnsPerHost = "NB_PROXY_MAX_IDLE_CONNS_PER_HOST" + EnvMaxConnsPerHost = "NB_PROXY_MAX_CONNS_PER_HOST" + EnvIdleConnTimeout = "NB_PROXY_IDLE_CONN_TIMEOUT" + EnvTLSHandshakeTimeout = "NB_PROXY_TLS_HANDSHAKE_TIMEOUT" + EnvExpectContinueTimeout = "NB_PROXY_EXPECT_CONTINUE_TIMEOUT" + EnvResponseHeaderTimeout = "NB_PROXY_RESPONSE_HEADER_TIMEOUT" + EnvWriteBufferSize = "NB_PROXY_WRITE_BUFFER_SIZE" + EnvReadBufferSize = "NB_PROXY_READ_BUFFER_SIZE" + EnvDisableCompression = "NB_PROXY_DISABLE_COMPRESSION" + EnvMaxInflight = "NB_PROXY_MAX_INFLIGHT" +) + +// transportConfig holds tunable parameters for the per-account HTTP transport. +type transportConfig struct { + maxIdleConns int + maxIdleConnsPerHost int + maxConnsPerHost int + idleConnTimeout time.Duration + tlsHandshakeTimeout time.Duration + expectContinueTimeout time.Duration + responseHeaderTimeout time.Duration + writeBufferSize int + readBufferSize int + disableCompression bool + // maxInflight limits per-backend concurrent requests. 0 means unlimited. + maxInflight int +} + +func defaultTransportConfig() transportConfig { + return transportConfig{ + maxIdleConns: 100, + maxIdleConnsPerHost: 100, + maxConnsPerHost: 0, // unlimited + idleConnTimeout: 90 * time.Second, + tlsHandshakeTimeout: 10 * time.Second, + expectContinueTimeout: 1 * time.Second, + } +} + +func loadTransportConfig(logger *log.Logger) transportConfig { + cfg := defaultTransportConfig() + + if v, ok := envInt(EnvMaxIdleConns, logger); ok { + cfg.maxIdleConns = v + } + if v, ok := envInt(EnvMaxIdleConnsPerHost, logger); ok { + cfg.maxIdleConnsPerHost = v + } + if v, ok := envInt(EnvMaxConnsPerHost, logger); ok { + cfg.maxConnsPerHost = v + } + if v, ok := envDuration(EnvIdleConnTimeout, logger); ok { + cfg.idleConnTimeout = v + } + if v, ok := envDuration(EnvTLSHandshakeTimeout, logger); ok { + cfg.tlsHandshakeTimeout = v + } + if v, ok := envDuration(EnvExpectContinueTimeout, logger); ok { + cfg.expectContinueTimeout = v + } + if v, ok := envDuration(EnvResponseHeaderTimeout, logger); ok { + cfg.responseHeaderTimeout = v + } + if v, ok := envInt(EnvWriteBufferSize, logger); ok { + cfg.writeBufferSize = v + } + if v, ok := envInt(EnvReadBufferSize, logger); ok { + cfg.readBufferSize = v + } + if v, ok := envBool(EnvDisableCompression, logger); ok { + cfg.disableCompression = v + } + if v, ok := envInt(EnvMaxInflight, logger); ok { + cfg.maxInflight = v + } + + logger.WithFields(log.Fields{ + "max_idle_conns": cfg.maxIdleConns, + "max_idle_conns_per_host": cfg.maxIdleConnsPerHost, + "max_conns_per_host": cfg.maxConnsPerHost, + "idle_conn_timeout": cfg.idleConnTimeout, + "tls_handshake_timeout": cfg.tlsHandshakeTimeout, + "expect_continue_timeout": cfg.expectContinueTimeout, + "response_header_timeout": cfg.responseHeaderTimeout, + "write_buffer_size": cfg.writeBufferSize, + "read_buffer_size": cfg.readBufferSize, + "disable_compression": cfg.disableCompression, + "max_inflight": cfg.maxInflight, + }).Debug("backend transport configuration") + + return cfg +} + +func envInt(key string, logger *log.Logger) (int, bool) { + s := os.Getenv(key) + if s == "" { + return 0, false + } + v, err := strconv.Atoi(s) + if err != nil { + logger.Warnf("failed to parse %s=%q as int: %v", key, s, err) + return 0, false + } + if v < 0 { + logger.Warnf("ignoring negative value for %s=%d", key, v) + return 0, false + } + return v, true +} + +func envDuration(key string, logger *log.Logger) (time.Duration, bool) { + s := os.Getenv(key) + if s == "" { + return 0, false + } + v, err := time.ParseDuration(s) + if err != nil { + logger.Warnf("failed to parse %s=%q as duration: %v", key, s, err) + return 0, false + } + if v < 0 { + logger.Warnf("ignoring negative value for %s=%s", key, v) + return 0, false + } + return v, true +} + +func envBool(key string, logger *log.Logger) (bool, bool) { + s := os.Getenv(key) + if s == "" { + return false, false + } + v, err := strconv.ParseBool(s) + if err != nil { + logger.Warnf("failed to parse %s=%q as bool: %v", key, s, err) + return false, false + } + return v, true +} diff --git a/proxy/internal/types/types.go b/proxy/internal/types/types.go new file mode 100644 index 000000000..41acfef40 --- /dev/null +++ b/proxy/internal/types/types.go @@ -0,0 +1,5 @@ +// Package types defines common types used across the proxy package. +package types + +// AccountID represents a unique identifier for a NetBird account. +type AccountID string diff --git a/proxy/log.go b/proxy/log.go new file mode 100644 index 000000000..79562989e --- /dev/null +++ b/proxy/log.go @@ -0,0 +1,21 @@ +package proxy + +import ( + stdlog "log" + + log "github.com/sirupsen/logrus" +) + +const ( + // HTTP server type identifiers for logging + logtagFieldHTTPServer = "http-server" + logtagValueHTTPS = "https" + logtagValueACME = "acme" + logtagValueDebug = "debug" +) + +// newHTTPServerLogger creates a standard library logger that writes to logrus +// with the specified server type field. +func newHTTPServerLogger(logger *log.Logger, serverType string) *stdlog.Logger { + return stdlog.New(logger.WithField(logtagFieldHTTPServer, serverType).WriterLevel(log.WarnLevel), "", 0) +} diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go new file mode 100644 index 000000000..53d7019f7 --- /dev/null +++ b/proxy/management_integration_test.go @@ -0,0 +1,548 @@ +package proxy + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "encoding/base64" + "errors" + "net" + "sync" + "sync/atomic" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/management/server/users" + "github.com/netbirdio/netbird/proxy/internal/auth" + "github.com/netbirdio/netbird/proxy/internal/proxy" + proxytypes "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/shared/management/proto" +) + +// integrationTestSetup contains all real components for testing. +type integrationTestSetup struct { + store store.Store + proxyService *nbgrpc.ProxyServiceServer + grpcServer *grpc.Server + grpcAddr string + cleanup func() + services []*reverseproxy.Service +} + +func setupIntegrationTest(t *testing.T) *integrationTestSetup { + t.Helper() + + ctx := context.Background() + + // Create real SQLite store + testStore, cleanup, err := store.NewTestStoreFromSQL(ctx, "", t.TempDir()) + require.NoError(t, err) + + // Create test account + testAccount := &types.Account{ + Id: "test-account-1", + Domain: "test.com", + DomainCategory: "private", + IsDomainPrimaryAccount: true, + CreatedAt: time.Now(), + } + require.NoError(t, testStore.SaveAccount(ctx, testAccount)) + + // Generate session keys for reverse proxies + pub, priv, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + pubKey := base64.StdEncoding.EncodeToString(pub) + privKey := base64.StdEncoding.EncodeToString(priv) + + // Create test services in the store + services := []*reverseproxy.Service{ + { + ID: "rp-1", + AccountID: "test-account-1", + Name: "Test App 1", + Domain: "app1.test.proxy.io", + Targets: []*reverseproxy.Target{{ + Path: strPtr("/"), + Host: "10.0.0.1", + Port: 8080, + Protocol: "http", + TargetId: "peer1", + TargetType: "peer", + Enabled: true, + }}, + Enabled: true, + ProxyCluster: "test.proxy.io", + SessionPrivateKey: privKey, + SessionPublicKey: pubKey, + }, + { + ID: "rp-2", + AccountID: "test-account-1", + Name: "Test App 2", + Domain: "app2.test.proxy.io", + Targets: []*reverseproxy.Target{{ + Path: strPtr("/"), + Host: "10.0.0.2", + Port: 8080, + Protocol: "http", + TargetId: "peer2", + TargetType: "peer", + Enabled: true, + }}, + Enabled: true, + ProxyCluster: "test.proxy.io", + SessionPrivateKey: privKey, + SessionPublicKey: pubKey, + }, + } + + for _, svc := range services { + require.NoError(t, testStore.CreateService(ctx, svc)) + } + + // Create real token store + tokenStore := nbgrpc.NewOneTimeTokenStore(5 * time.Minute) + + // Create real users manager + usersManager := users.NewManager(testStore) + + // Create real proxy service server with minimal config + oidcConfig := nbgrpc.ProxyOIDCConfig{ + Issuer: "https://fake-issuer.example.com", + ClientID: "test-client", + HMACKey: []byte("test-hmac-key"), + } + + proxyService := nbgrpc.NewProxyServiceServer( + &testAccessLogManager{}, + tokenStore, + oidcConfig, + nil, + usersManager, + ) + + // Use store-backed service manager + svcMgr := &storeBackedServiceManager{store: testStore, tokenStore: tokenStore} + proxyService.SetProxyManager(svcMgr) + + // Start real gRPC server + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + grpcServer := grpc.NewServer() + proto.RegisterProxyServiceServer(grpcServer, proxyService) + + go func() { + if err := grpcServer.Serve(lis); err != nil { + t.Logf("gRPC server error: %v", err) + } + }() + + return &integrationTestSetup{ + store: testStore, + proxyService: proxyService, + grpcServer: grpcServer, + grpcAddr: lis.Addr().String(), + services: services, + cleanup: func() { + grpcServer.GracefulStop() + cleanup() + }, + } +} + +// testAccessLogManager provides access log storage for testing. +type testAccessLogManager struct{} + +func (m *testAccessLogManager) SaveAccessLog(_ context.Context, _ *accesslogs.AccessLogEntry) error { + return nil +} + +func (m *testAccessLogManager) GetAllAccessLogs(_ context.Context, _, _ string, _ *accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) { + return nil, 0, nil +} + +// storeBackedServiceManager reads directly from the real store. +type storeBackedServiceManager struct { + store store.Store + tokenStore *nbgrpc.OneTimeTokenStore +} + +func (m *storeBackedServiceManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { + return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) +} + +func (m *storeBackedServiceManager) GetService(ctx context.Context, accountID, userID, serviceID string) (*reverseproxy.Service, error) { + return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) +} + +func (m *storeBackedServiceManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, errors.New("not implemented") +} + +func (m *storeBackedServiceManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, errors.New("not implemented") +} + +func (m *storeBackedServiceManager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { + return nil +} + +func (m *storeBackedServiceManager) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error { + return nil +} + +func (m *storeBackedServiceManager) SetStatus(ctx context.Context, accountID, serviceID string, status reverseproxy.ProxyStatus) error { + return nil +} + +func (m *storeBackedServiceManager) ReloadAllServicesForAccount(ctx context.Context, accountID string) error { + return nil +} + +func (m *storeBackedServiceManager) ReloadService(ctx context.Context, accountID, serviceID string) error { + return nil +} + +func (m *storeBackedServiceManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { + return m.store.GetAccountServices(ctx, store.LockingStrengthNone, "test-account-1") +} + +func (m *storeBackedServiceManager) GetServiceByID(ctx context.Context, accountID, serviceID string) (*reverseproxy.Service, error) { + return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) +} + +func (m *storeBackedServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { + return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) +} + +func (m *storeBackedServiceManager) GetServiceIDByTargetID(ctx context.Context, accountID string, targetID string) (string, error) { + return "", nil +} + +func strPtr(s string) *string { + return &s +} + +func TestIntegration_ProxyConnection_HappyPath(t *testing.T) { + setup := setupIntegrationTest(t) + defer setup.cleanup() + + conn, err := grpc.NewClient(setup.grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + client := proto.NewProxyServiceClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + stream, err := client.GetMappingUpdate(ctx, &proto.GetMappingUpdateRequest{ + ProxyId: "test-proxy-1", + Version: "test-v1", + Address: "test.proxy.io", + }) + require.NoError(t, err) + + // Receive all mappings from the snapshot - server sends each mapping individually + mappingsByID := make(map[string]*proto.ProxyMapping) + for i := 0; i < 2; i++ { + msg, err := stream.Recv() + require.NoError(t, err) + for _, m := range msg.GetMapping() { + mappingsByID[m.GetId()] = m + } + } + + // Should receive 2 mappings total + assert.Len(t, mappingsByID, 2, "Should receive 2 reverse proxy mappings") + + rp1 := mappingsByID["rp-1"] + require.NotNil(t, rp1) + assert.Equal(t, "app1.test.proxy.io", rp1.GetDomain()) + assert.Equal(t, "test-account-1", rp1.GetAccountId()) + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, rp1.GetType()) + assert.NotEmpty(t, rp1.GetAuthToken(), "Should have auth token for peer creation") + + rp2 := mappingsByID["rp-2"] + require.NotNil(t, rp2) + assert.Equal(t, "app2.test.proxy.io", rp2.GetDomain()) +} + +func TestIntegration_ProxyConnection_SendsClusterAddress(t *testing.T) { + setup := setupIntegrationTest(t) + defer setup.cleanup() + + conn, err := grpc.NewClient(setup.grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + client := proto.NewProxyServiceClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + clusterAddress := "test.proxy.io" + + stream, err := client.GetMappingUpdate(ctx, &proto.GetMappingUpdateRequest{ + ProxyId: "test-proxy-cluster", + Version: "test-v1", + Address: clusterAddress, + }) + require.NoError(t, err) + + // Receive all mappings - server sends each mapping individually + mappings := make([]*proto.ProxyMapping, 0) + for i := 0; i < 2; i++ { + msg, err := stream.Recv() + require.NoError(t, err) + mappings = append(mappings, msg.GetMapping()...) + } + + // Should receive the 2 mappings matching the cluster + assert.Len(t, mappings, 2, "Should receive mappings for the cluster") + + for _, mapping := range mappings { + t.Logf("Received mapping: id=%s domain=%s", mapping.GetId(), mapping.GetDomain()) + } +} + +func TestIntegration_ProxyConnection_Reconnect_ReceivesSameConfig(t *testing.T) { + setup := setupIntegrationTest(t) + defer setup.cleanup() + + conn, err := grpc.NewClient(setup.grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + client := proto.NewProxyServiceClient(conn) + + clusterAddress := "test.proxy.io" + proxyID := "test-proxy-reconnect" + + // Helper to receive all mappings from a stream + receiveMappings := func(stream proto.ProxyService_GetMappingUpdateClient, count int) []*proto.ProxyMapping { + var mappings []*proto.ProxyMapping + for i := 0; i < count; i++ { + msg, err := stream.Recv() + require.NoError(t, err) + mappings = append(mappings, msg.GetMapping()...) + } + return mappings + } + + // First connection + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Second) + stream1, err := client.GetMappingUpdate(ctx1, &proto.GetMappingUpdateRequest{ + ProxyId: proxyID, + Version: "test-v1", + Address: clusterAddress, + }) + require.NoError(t, err) + + firstMappings := receiveMappings(stream1, 2) + cancel1() + + time.Sleep(100 * time.Millisecond) + + // Second connection (simulating reconnect) + ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel2() + + stream2, err := client.GetMappingUpdate(ctx2, &proto.GetMappingUpdateRequest{ + ProxyId: proxyID, + Version: "test-v1", + Address: clusterAddress, + }) + require.NoError(t, err) + + secondMappings := receiveMappings(stream2, 2) + + // Should receive the same mappings + assert.Equal(t, len(firstMappings), len(secondMappings), + "Should receive same number of mappings on reconnect") + + firstIDs := make(map[string]bool) + for _, m := range firstMappings { + firstIDs[m.GetId()] = true + } + + for _, m := range secondMappings { + assert.True(t, firstIDs[m.GetId()], + "Mapping %s should be present in both connections", m.GetId()) + } +} + +func TestIntegration_ProxyConnection_ReconnectDoesNotDuplicateState(t *testing.T) { + setup := setupIntegrationTest(t) + defer setup.cleanup() + + conn, err := grpc.NewClient(setup.grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + client := proto.NewProxyServiceClient(conn) + + // Use real auth middleware and proxy to verify idempotency + logger := log.New() + logger.SetLevel(log.WarnLevel) + + authMw := auth.NewMiddleware(logger, nil) + proxyHandler := proxy.NewReverseProxy(nil, "auto", nil, logger) + + clusterAddress := "test.proxy.io" + proxyID := "test-proxy-idempotent" + + var addMappingCalls atomic.Int32 + + applyMappings := func(mappings []*proto.ProxyMapping) { + for _, mapping := range mappings { + if mapping.GetType() == proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED { + addMappingCalls.Add(1) + + // Apply to real auth middleware (idempotent) + err := authMw.AddDomain( + mapping.GetDomain(), + nil, + "", + 0, + mapping.GetAccountId(), + mapping.GetId(), + ) + require.NoError(t, err) + + // Apply to real proxy (idempotent) + proxyHandler.AddMapping(proxy.Mapping{ + Host: mapping.GetDomain(), + ID: mapping.GetId(), + AccountID: proxytypes.AccountID(mapping.GetAccountId()), + }) + } + } + } + + // Helper to receive and apply all mappings + receiveAndApply := func(stream proto.ProxyService_GetMappingUpdateClient) { + for i := 0; i < 2; i++ { + msg, err := stream.Recv() + require.NoError(t, err) + applyMappings(msg.GetMapping()) + } + } + + // First connection + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Second) + stream1, err := client.GetMappingUpdate(ctx1, &proto.GetMappingUpdateRequest{ + ProxyId: proxyID, + Version: "test-v1", + Address: clusterAddress, + }) + require.NoError(t, err) + + receiveAndApply(stream1) + cancel1() + + firstCallCount := addMappingCalls.Load() + t.Logf("First connection: applied %d mappings", firstCallCount) + + time.Sleep(100 * time.Millisecond) + + // Second connection + ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) + stream2, err := client.GetMappingUpdate(ctx2, &proto.GetMappingUpdateRequest{ + ProxyId: proxyID, + Version: "test-v1", + Address: clusterAddress, + }) + require.NoError(t, err) + + receiveAndApply(stream2) + cancel2() + + time.Sleep(100 * time.Millisecond) + + // Third connection + ctx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel3() + + stream3, err := client.GetMappingUpdate(ctx3, &proto.GetMappingUpdateRequest{ + ProxyId: proxyID, + Version: "test-v1", + Address: clusterAddress, + }) + require.NoError(t, err) + + receiveAndApply(stream3) + + totalCalls := addMappingCalls.Load() + t.Logf("After three connections: total applied %d mappings", totalCalls) + + // Should have called addMapping 6 times (2 mappings x 3 connections) + // But internal state is NOT duplicated because auth and proxy use maps keyed by domain/host + assert.Equal(t, int32(6), totalCalls, "Should have 6 total calls (2 mappings x 3 connections)") +} + +func TestIntegration_ProxyConnection_MultipleProxiesReceiveUpdates(t *testing.T) { + setup := setupIntegrationTest(t) + defer setup.cleanup() + + clusterAddress := "test.proxy.io" + + var wg sync.WaitGroup + var mu sync.Mutex + receivedByProxy := make(map[string]int) + + for i := 1; i <= 3; i++ { + wg.Add(1) + go func(proxyNum int) { + defer wg.Done() + + conn, err := grpc.NewClient(setup.grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + client := proto.NewProxyServiceClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + proxyID := "test-proxy-" + string(rune('A'+proxyNum-1)) + + stream, err := client.GetMappingUpdate(ctx, &proto.GetMappingUpdateRequest{ + ProxyId: proxyID, + Version: "test-v1", + Address: clusterAddress, + }) + require.NoError(t, err) + + // Receive all mappings - server sends each mapping individually + count := 0 + for i := 0; i < 2; i++ { + msg, err := stream.Recv() + require.NoError(t, err) + count += len(msg.GetMapping()) + } + + mu.Lock() + receivedByProxy[proxyID] = count + mu.Unlock() + }(i) + } + + wg.Wait() + + for proxyID, count := range receivedByProxy { + assert.Equal(t, 2, count, "Proxy %s should receive 2 mappings", proxyID) + } +} diff --git a/proxy/server.go b/proxy/server.go new file mode 100644 index 000000000..c1be69529 --- /dev/null +++ b/proxy/server.go @@ -0,0 +1,653 @@ +// Package proxy runs a NetBird proxy server. +// It attempts to do everything it needs to do within the context +// of a single request to the server to try to reduce the amount +// of concurrency coordination that is required. However, it does +// run two additional routines in an error group for handling +// updates from the management server and running a separate +// HTTP server to handle ACME HTTP-01 challenges (if configured). +package proxy + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "net/url" + "path/filepath" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/keepalive" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/netbirdio/netbird/proxy/internal/accesslog" + "github.com/netbirdio/netbird/proxy/internal/acme" + "github.com/netbirdio/netbird/proxy/internal/auth" + "github.com/netbirdio/netbird/proxy/internal/certwatch" + "github.com/netbirdio/netbird/proxy/internal/debug" + proxygrpc "github.com/netbirdio/netbird/proxy/internal/grpc" + "github.com/netbirdio/netbird/proxy/internal/health" + "github.com/netbirdio/netbird/proxy/internal/k8s" + "github.com/netbirdio/netbird/proxy/internal/metrics" + "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/roundtrip" + "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/proxy/web" + "github.com/netbirdio/netbird/shared/management/domain" + "github.com/netbirdio/netbird/shared/management/proto" + "github.com/netbirdio/netbird/util/embeddedroots" +) + +type Server struct { + mgmtClient proto.ProxyServiceClient + proxy *proxy.ReverseProxy + netbird *roundtrip.NetBird + acme *acme.Manager + auth *auth.Middleware + http *http.Server + https *http.Server + debug *http.Server + healthServer *health.Server + healthChecker *health.Checker + meter *metrics.Metrics + + // Mostly used for debugging on management. + startTime time.Time + + ID string + Logger *log.Logger + Version string + ProxyURL string + ManagementAddress string + CertificateDirectory string + CertificateFile string + CertificateKeyFile string + GenerateACMECertificates bool + ACMEChallengeAddress string + ACMEDirectory string + // ACMEChallengeType specifies the ACME challenge type: "http-01" or "tls-alpn-01". + // Defaults to "tls-alpn-01" if not specified. + ACMEChallengeType string + // CertLockMethod controls how ACME certificate locks are coordinated + // across replicas. Default: CertLockAuto (detect environment). + CertLockMethod acme.CertLockMethod + OIDCClientId string + OIDCClientSecret string + OIDCEndpoint string + OIDCScopes []string + + // DebugEndpointEnabled enables the debug HTTP endpoint. + DebugEndpointEnabled bool + // DebugEndpointAddress is the address for the debug HTTP endpoint (default: ":8444"). + DebugEndpointAddress string + // HealthAddress is the address for the health probe endpoint (default: "localhost:8080"). + HealthAddress string + // ProxyToken is the access token for authenticating with the management server. + ProxyToken string + // ForwardedProto overrides the X-Forwarded-Proto value sent to backends. + // Valid values: "auto" (detect from TLS), "http", "https". + ForwardedProto string + // TrustedProxies is a list of IP prefixes for trusted upstream proxies. + // When set, forwarding headers from these sources are preserved and + // appended to instead of being stripped. + TrustedProxies []netip.Prefix + // WireguardPort is the port for the WireGuard interface. Use 0 for a + // random OS-assigned port. A fixed port only works with single-account + // deployments; multiple accounts will fail to bind the same port. + WireguardPort int +} + +// NotifyStatus sends a status update to management about tunnel connectivity +func (s *Server) NotifyStatus(ctx context.Context, accountID, serviceID, domain string, connected bool) error { + status := proto.ProxyStatus_PROXY_STATUS_TUNNEL_NOT_CREATED + if connected { + status = proto.ProxyStatus_PROXY_STATUS_ACTIVE + } + + _, err := s.mgmtClient.SendStatusUpdate(ctx, &proto.SendStatusUpdateRequest{ + ServiceId: serviceID, + AccountId: accountID, + Status: status, + CertificateIssued: false, + }) + return err +} + +// NotifyCertificateIssued sends a notification to management that a certificate was issued +func (s *Server) NotifyCertificateIssued(ctx context.Context, accountID, serviceID, domain string) error { + _, err := s.mgmtClient.SendStatusUpdate(ctx, &proto.SendStatusUpdateRequest{ + ServiceId: serviceID, + AccountId: accountID, + Status: proto.ProxyStatus_PROXY_STATUS_ACTIVE, + CertificateIssued: true, + }) + return err +} + +func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { + s.startTime = time.Now() + + // If no ID is set then one can be generated. + if s.ID == "" { + s.ID = "netbird-proxy-" + s.startTime.Format("20060102150405") + } + // Fallback version option in case it is not set. + if s.Version == "" { + s.Version = "dev" + } + + // If no logger is specified fallback to the standard logger. + if s.Logger == nil { + s.Logger = log.StandardLogger() + } + + // Start up metrics gathering + reg := prometheus.NewRegistry() + s.meter = metrics.New(reg) + + mgmtConn, err := s.dialManagement() + if err != nil { + return err + } + defer func() { + if err := mgmtConn.Close(); err != nil { + s.Logger.Debugf("management connection close: %v", err) + } + }() + s.mgmtClient = proto.NewProxyServiceClient(mgmtConn) + go s.newManagementMappingWorker(ctx, s.mgmtClient) + + // Initialize the netbird client, this is required to build peer connections + // to proxy over. + s.netbird = roundtrip.NewNetBird(s.ManagementAddress, s.ID, s.ProxyURL, s.WireguardPort, s.Logger, s, s.mgmtClient) + + tlsConfig, err := s.configureTLS(ctx) + if err != nil { + return err + } + + // Configure the reverse proxy using NetBird's HTTP Client Transport for proxying. + s.proxy = proxy.NewReverseProxy(s.meter.RoundTripper(s.netbird), s.ForwardedProto, s.TrustedProxies, s.Logger) + + // Configure the authentication middleware with session validator for OIDC group checks. + s.auth = auth.NewMiddleware(s.Logger, s.mgmtClient) + + // Configure Access logs to management server. + accessLog := accesslog.NewLogger(s.mgmtClient, s.Logger, s.TrustedProxies) + + s.healthChecker = health.NewChecker(s.Logger, s.netbird) + + if s.DebugEndpointEnabled { + debugAddr := debugEndpointAddr(s.DebugEndpointAddress) + debugHandler := debug.NewHandler(s.netbird, s.healthChecker, s.Logger) + if s.acme != nil { + debugHandler.SetCertStatus(s.acme) + } + s.debug = &http.Server{ + Addr: debugAddr, + Handler: debugHandler, + ErrorLog: newHTTPServerLogger(s.Logger, logtagValueDebug), + } + go func() { + s.Logger.Infof("starting debug endpoint on %s", debugAddr) + if err := s.debug.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + s.Logger.Errorf("debug endpoint error: %v", err) + } + }() + } + + // Start health probe server. + healthAddr := s.HealthAddress + if healthAddr == "" { + healthAddr = "localhost:8080" + } + s.healthServer = health.NewServer(healthAddr, s.healthChecker, s.Logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) + healthListener, err := net.Listen("tcp", healthAddr) + if err != nil { + return fmt.Errorf("health probe server listen on %s: %w", healthAddr, err) + } + go func() { + if err := s.healthServer.Serve(healthListener); err != nil && !errors.Is(err, http.ErrServerClosed) { + s.Logger.Errorf("health probe server: %v", err) + } + }() + + // Start the reverse proxy HTTPS server. + s.https = &http.Server{ + Addr: addr, + Handler: s.meter.Middleware(accessLog.Middleware(web.AssetHandler(s.auth.Protect(s.proxy)))), + TLSConfig: tlsConfig, + ErrorLog: newHTTPServerLogger(s.Logger, logtagValueHTTPS), + } + + httpsErr := make(chan error, 1) + go func() { + s.Logger.Debugf("starting reverse proxy server on %s", addr) + httpsErr <- s.https.ListenAndServeTLS("", "") + }() + + select { + case err := <-httpsErr: + s.shutdownServices() + if !errors.Is(err, http.ErrServerClosed) { + return fmt.Errorf("https server: %w", err) + } + return nil + case <-ctx.Done(): + s.gracefulShutdown() + return nil + } +} + +const ( + // shutdownPreStopDelay is the time to wait after receiving a shutdown signal + // before draining connections. This allows the load balancer to propagate + // the endpoint removal. + shutdownPreStopDelay = 5 * time.Second + + // shutdownDrainTimeout is the maximum time to wait for in-flight HTTP + // requests to complete during graceful shutdown. + shutdownDrainTimeout = 30 * time.Second + + // shutdownServiceTimeout is the maximum time to wait for auxiliary + // services (health probe, debug endpoint, ACME) to shut down. + shutdownServiceTimeout = 5 * time.Second +) + +func (s *Server) dialManagement() (*grpc.ClientConn, error) { + mgmtURL, err := url.Parse(s.ManagementAddress) + if err != nil { + return nil, fmt.Errorf("parse management address: %w", err) + } + creds := insecure.NewCredentials() + // Assume management TLS is enabled for gRPC as well if using HTTPS for the API. + if mgmtURL.Scheme == "https" { + certPool, err := x509.SystemCertPool() + if err != nil || certPool == nil { + // Fall back to embedded CAs if no OS-provided ones are available. + certPool = embeddedroots.Get() + } + creds = credentials.NewTLS(&tls.Config{ + RootCAs: certPool, + }) + } + s.Logger.WithFields(log.Fields{ + "gRPC_address": mgmtURL.Host, + "TLS_enabled": mgmtURL.Scheme == "https", + }).Debug("starting management gRPC client") + conn, err := grpc.NewClient(mgmtURL.Host, + grpc.WithTransportCredentials(creds), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 20 * time.Second, + Timeout: 10 * time.Second, + PermitWithoutStream: true, + }), + proxygrpc.WithProxyToken(s.ProxyToken), + ) + if err != nil { + return nil, fmt.Errorf("create management connection: %w", err) + } + return conn, nil +} + +func (s *Server) configureTLS(ctx context.Context) (*tls.Config, error) { + tlsConfig := &tls.Config{} + if !s.GenerateACMECertificates { + s.Logger.Debug("ACME certificates disabled, using static certificates with file watching") + certPath := filepath.Join(s.CertificateDirectory, s.CertificateFile) + keyPath := filepath.Join(s.CertificateDirectory, s.CertificateKeyFile) + + certWatcher, err := certwatch.NewWatcher(certPath, keyPath, s.Logger) + if err != nil { + return nil, fmt.Errorf("initialize certificate watcher: %w", err) + } + go certWatcher.Watch(ctx) + tlsConfig.GetCertificate = certWatcher.GetCertificate + return tlsConfig, nil + } + + if s.ACMEChallengeType == "" { + s.ACMEChallengeType = "tls-alpn-01" + } + s.Logger.WithFields(log.Fields{ + "acme_server": s.ACMEDirectory, + "challenge_type": s.ACMEChallengeType, + }).Debug("ACME certificates enabled, configuring certificate manager") + s.acme = acme.NewManager(s.CertificateDirectory, s.ACMEDirectory, s, s.Logger, s.CertLockMethod) + + if s.ACMEChallengeType == "http-01" { + s.http = &http.Server{ + Addr: s.ACMEChallengeAddress, + Handler: s.acme.HTTPHandler(nil), + ErrorLog: newHTTPServerLogger(s.Logger, logtagValueACME), + } + go func() { + if err := s.http.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + s.Logger.WithError(err).Error("ACME HTTP-01 challenge server failed") + } + }() + } + tlsConfig = s.acme.TLSConfig() + + // ServerName needs to be set to allow for ACME to work correctly + // when using CNAME URLs to access the proxy. + tlsConfig.ServerName = s.ProxyURL + + s.Logger.WithFields(log.Fields{ + "ServerName": s.ProxyURL, + "challenge_type": s.ACMEChallengeType, + }).Debug("ACME certificate manager configured") + return tlsConfig, nil +} + +// gracefulShutdown performs a zero-downtime shutdown sequence. It marks the +// readiness probe as failing, waits for load balancer propagation, drains +// in-flight connections, and then stops all background services. +func (s *Server) gracefulShutdown() { + s.Logger.Info("shutdown signal received, starting graceful shutdown") + + // Step 1: Fail readiness probe so load balancers stop routing new traffic. + if s.healthChecker != nil { + s.healthChecker.SetShuttingDown() + } + + // Step 2: When running behind a load balancer, wait for endpoint removal + // to propagate before draining connections. + if k8s.InCluster() { + s.Logger.Infof("waiting %s for load balancer propagation", shutdownPreStopDelay) + time.Sleep(shutdownPreStopDelay) + } + + // Step 3: Stop accepting new connections and drain in-flight requests. + drainCtx, drainCancel := context.WithTimeout(context.Background(), shutdownDrainTimeout) + defer drainCancel() + + s.Logger.Info("draining in-flight connections") + if err := s.https.Shutdown(drainCtx); err != nil { + s.Logger.Warnf("https server drain: %v", err) + } + + // Step 4: Stop all remaining background services. + s.shutdownServices() + s.Logger.Info("graceful shutdown complete") +} + +// shutdownServices stops all background services concurrently and waits for +// them to finish. +func (s *Server) shutdownServices() { + var wg sync.WaitGroup + + shutdownHTTP := func(name string, shutdown func(context.Context) error) { + wg.Add(1) + go func() { + defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), shutdownServiceTimeout) + defer cancel() + if err := shutdown(ctx); err != nil { + s.Logger.Debugf("%s shutdown: %v", name, err) + } + }() + } + + if s.healthServer != nil { + shutdownHTTP("health probe", s.healthServer.Shutdown) + } + if s.debug != nil { + shutdownHTTP("debug endpoint", s.debug.Shutdown) + } + if s.http != nil { + shutdownHTTP("acme http", s.http.Shutdown) + } + + if s.netbird != nil { + wg.Add(1) + go func() { + defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), shutdownDrainTimeout) + defer cancel() + if err := s.netbird.StopAll(ctx); err != nil { + s.Logger.Warnf("stop netbird clients: %v", err) + } + }() + } + + wg.Wait() +} + +func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.ProxyServiceClient) { + bo := &backoff.ExponentialBackOff{ + InitialInterval: 800 * time.Millisecond, + RandomizationFactor: 1, + Multiplier: 1.7, + MaxInterval: 10 * time.Second, + MaxElapsedTime: 0, // retry indefinitely until context is canceled + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + + initialSyncDone := false + + operation := func() error { + s.Logger.Debug("connecting to management mapping stream") + + if s.healthChecker != nil { + s.healthChecker.SetManagementConnected(false) + } + + mappingClient, err := client.GetMappingUpdate(ctx, &proto.GetMappingUpdateRequest{ + ProxyId: s.ID, + Version: s.Version, + StartedAt: timestamppb.New(s.startTime), + Address: s.ProxyURL, + }) + if err != nil { + return fmt.Errorf("create mapping stream: %w", err) + } + + if s.healthChecker != nil { + s.healthChecker.SetManagementConnected(true) + } + s.Logger.Debug("management mapping stream established") + + // Stream established — reset backoff so the next failure retries quickly. + bo.Reset() + + streamErr := s.handleMappingStream(ctx, mappingClient, &initialSyncDone) + + if s.healthChecker != nil { + s.healthChecker.SetManagementConnected(false) + } + + if streamErr == nil { + return fmt.Errorf("stream closed by server") + } + + return fmt.Errorf("mapping stream: %w", streamErr) + } + + notify := func(err error, next time.Duration) { + s.Logger.Warnf("management connection failed, retrying in %s: %v", next.Truncate(time.Millisecond), err) + } + + if err := backoff.RetryNotify(operation, backoff.WithContext(bo, ctx), notify); err != nil { + s.Logger.WithError(err).Debug("management mapping worker exiting") + } +} + +func (s *Server) handleMappingStream(ctx context.Context, mappingClient proto.ProxyService_GetMappingUpdateClient, initialSyncDone *bool) error { + for { + // Check for context completion to gracefully shutdown. + select { + case <-ctx.Done(): + // Shutting down. + return ctx.Err() + default: + msg, err := mappingClient.Recv() + switch { + case errors.Is(err, io.EOF): + // Mapping connection gracefully terminated by server. + return nil + case err != nil: + // Something has gone horribly wrong, return and hope the parent retries the connection. + return fmt.Errorf("receive msg: %w", err) + } + s.Logger.Debug("Received mapping update, starting processing") + s.processMappings(ctx, msg.GetMapping()) + s.Logger.Debug("Processing mapping update completed") + + if !*initialSyncDone && msg.GetInitialSyncComplete() { + if s.healthChecker != nil { + s.healthChecker.SetInitialSyncComplete() + } + *initialSyncDone = true + s.Logger.Info("Initial mapping sync complete") + } + } + } +} + +func (s *Server) processMappings(ctx context.Context, mappings []*proto.ProxyMapping) { + for _, mapping := range mappings { + s.Logger.WithFields(log.Fields{ + "type": mapping.GetType(), + "domain": mapping.GetDomain(), + "path": mapping.GetPath(), + "id": mapping.GetId(), + }).Debug("Processing mapping update") + switch mapping.GetType() { + case proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED: + if err := s.addMapping(ctx, mapping); err != nil { + // TODO: Retry this? Or maybe notify the management server that this mapping has failed? + s.Logger.WithFields(log.Fields{ + "service_id": mapping.GetId(), + "domain": mapping.GetDomain(), + "error": err, + }).Error("Error adding new mapping, ignoring this mapping and continuing processing") + } + case proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED: + if err := s.updateMapping(ctx, mapping); err != nil { + s.Logger.WithFields(log.Fields{ + "service_id": mapping.GetId(), + "domain": mapping.GetDomain(), + }).Errorf("failed to update mapping: %v", err) + } + case proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED: + s.removeMapping(ctx, mapping) + } + } +} + +func (s *Server) addMapping(ctx context.Context, mapping *proto.ProxyMapping) error { + d := domain.Domain(mapping.GetDomain()) + accountID := types.AccountID(mapping.GetAccountId()) + serviceID := mapping.GetId() + authToken := mapping.GetAuthToken() + + if err := s.netbird.AddPeer(ctx, accountID, d, authToken, serviceID); err != nil { + return fmt.Errorf("create peer for domain %q: %w", d, err) + } + if s.acme != nil { + s.acme.AddDomain(d, string(accountID), serviceID) + } + + // Pass the mapping through to the update function to avoid duplicating the + // setup, currently update is simply a subset of this function, so this + // separation makes sense...to me at least. + if err := s.updateMapping(ctx, mapping); err != nil { + s.removeMapping(ctx, mapping) + return fmt.Errorf("update mapping for domain %q: %w", d, err) + } + return nil +} + +func (s *Server) updateMapping(ctx context.Context, mapping *proto.ProxyMapping) error { + // Very simple implementation here, we don't touch the existing peer + // connection or any existing TLS configuration, we simply overwrite + // the auth and proxy mappings. + // Note: this does require the management server to always send a + // full mapping rather than deltas during a modification. + var schemes []auth.Scheme + if mapping.GetAuth().GetPassword() { + schemes = append(schemes, auth.NewPassword(s.mgmtClient, mapping.GetId(), mapping.GetAccountId())) + } + if mapping.GetAuth().GetPin() { + schemes = append(schemes, auth.NewPin(s.mgmtClient, mapping.GetId(), mapping.GetAccountId())) + } + if mapping.GetAuth().GetOidc() { + schemes = append(schemes, auth.NewOIDC(s.mgmtClient, mapping.GetId(), mapping.GetAccountId(), s.ForwardedProto)) + } + + maxSessionAge := time.Duration(mapping.GetAuth().GetMaxSessionAgeSeconds()) * time.Second + if err := s.auth.AddDomain(mapping.GetDomain(), schemes, mapping.GetAuth().GetSessionKey(), maxSessionAge, mapping.GetAccountId(), mapping.GetId()); err != nil { + return fmt.Errorf("auth setup for domain %s: %w", mapping.GetDomain(), err) + } + s.proxy.AddMapping(s.protoToMapping(mapping)) + s.meter.AddMapping(s.protoToMapping(mapping)) + return nil +} + +func (s *Server) removeMapping(ctx context.Context, mapping *proto.ProxyMapping) { + d := domain.Domain(mapping.GetDomain()) + accountID := types.AccountID(mapping.GetAccountId()) + if err := s.netbird.RemovePeer(ctx, accountID, d); err != nil { + s.Logger.WithFields(log.Fields{ + "account_id": accountID, + "domain": d, + "error": err, + }).Error("Error removing NetBird peer connection for domain, continuing additional domain cleanup but peer connection may still exist") + } + if s.acme != nil { + s.acme.RemoveDomain(d) + } + s.auth.RemoveDomain(mapping.GetDomain()) + s.proxy.RemoveMapping(s.protoToMapping(mapping)) + s.meter.RemoveMapping(s.protoToMapping(mapping)) +} + +func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { + paths := make(map[string]*url.URL) + for _, pathMapping := range mapping.GetPath() { + targetURL, err := url.Parse(pathMapping.GetTarget()) + if err != nil { + // TODO: Should we warn management about this so it can be bubbled up to a user to reconfigure? + s.Logger.WithFields(log.Fields{ + "service_id": mapping.GetId(), + "account_id": mapping.GetAccountId(), + "domain": mapping.GetDomain(), + "path": pathMapping.GetPath(), + "target": pathMapping.GetTarget(), + }).WithError(err).Error("failed to parse target URL for path, skipping") + continue + } + paths[pathMapping.GetPath()] = targetURL + } + return proxy.Mapping{ + ID: mapping.GetId(), + AccountID: types.AccountID(mapping.GetAccountId()), + Host: mapping.GetDomain(), + Paths: paths, + PassHostHeader: mapping.GetPassHostHeader(), + RewriteRedirects: mapping.GetRewriteRedirects(), + } +} + +// debugEndpointAddr returns the address for the debug endpoint. +// If addr is empty, it defaults to localhost:8444 for security. +func debugEndpointAddr(addr string) string { + if addr == "" { + return "localhost:8444" + } + return addr +} diff --git a/proxy/server_test.go b/proxy/server_test.go new file mode 100644 index 000000000..b4fb4f8ba --- /dev/null +++ b/proxy/server_test.go @@ -0,0 +1,48 @@ +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDebugEndpointDisabledByDefault(t *testing.T) { + s := &Server{} + assert.False(t, s.DebugEndpointEnabled, "debug endpoint should be disabled by default") +} + +func TestDebugEndpointAddr(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "empty defaults to localhost", + input: "", + expected: "localhost:8444", + }, + { + name: "explicit localhost preserved", + input: "localhost:9999", + expected: "localhost:9999", + }, + { + name: "explicit address preserved", + input: "0.0.0.0:8444", + expected: "0.0.0.0:8444", + }, + { + name: "127.0.0.1 preserved", + input: "127.0.0.1:8444", + expected: "127.0.0.1:8444", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := debugEndpointAddr(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/proxy/trustedproxy.go b/proxy/trustedproxy.go new file mode 100644 index 000000000..3a1f0ad37 --- /dev/null +++ b/proxy/trustedproxy.go @@ -0,0 +1,43 @@ +package proxy + +import ( + "fmt" + "net/netip" + "strings" +) + +// ParseTrustedProxies parses a comma-separated list of CIDR prefixes or bare IPs +// into a slice of netip.Prefix values suitable for trusted proxy configuration. +// Bare IPs are converted to single-host prefixes (/32 or /128). +func ParseTrustedProxies(raw string) ([]netip.Prefix, error) { + if raw == "" { + return nil, nil + } + + parts := strings.Split(raw, ",") + prefixes := make([]netip.Prefix, 0, len(parts)) + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + + prefix, err := netip.ParsePrefix(part) + if err == nil { + prefixes = append(prefixes, prefix) + continue + } + + addr, addrErr := netip.ParseAddr(part) + if addrErr != nil { + return nil, fmt.Errorf("parse trusted proxy %q: not a valid CIDR or IP: %w", part, addrErr) + } + + bits := 32 + if addr.Is6() { + bits = 128 + } + prefixes = append(prefixes, netip.PrefixFrom(addr, bits)) + } + return prefixes, nil +} diff --git a/proxy/trustedproxy_test.go b/proxy/trustedproxy_test.go new file mode 100644 index 000000000..974e56863 --- /dev/null +++ b/proxy/trustedproxy_test.go @@ -0,0 +1,90 @@ +package proxy + +import ( + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseTrustedProxies(t *testing.T) { + tests := []struct { + name string + raw string + want []netip.Prefix + wantErr bool + }{ + { + name: "empty string returns nil", + raw: "", + want: nil, + }, + { + name: "single CIDR", + raw: "10.0.0.0/8", + want: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/8")}, + }, + { + name: "single bare IPv4", + raw: "1.2.3.4", + want: []netip.Prefix{netip.MustParsePrefix("1.2.3.4/32")}, + }, + { + name: "single bare IPv6", + raw: "::1", + want: []netip.Prefix{netip.MustParsePrefix("::1/128")}, + }, + { + name: "comma-separated CIDRs", + raw: "10.0.0.0/8, 192.168.1.0/24", + want: []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("192.168.1.0/24"), + }, + }, + { + name: "mixed CIDRs and bare IPs", + raw: "10.0.0.0/8, 1.2.3.4, fd00::/8", + want: []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("1.2.3.4/32"), + netip.MustParsePrefix("fd00::/8"), + }, + }, + { + name: "whitespace around entries", + raw: " 10.0.0.0/8 , 192.168.0.0/16 ", + want: []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("192.168.0.0/16"), + }, + }, + { + name: "trailing comma produces no extra entry", + raw: "10.0.0.0/8,", + want: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/8")}, + }, + { + name: "invalid entry", + raw: "not-an-ip", + wantErr: true, + }, + { + name: "partially invalid", + raw: "10.0.0.0/8, garbage", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseTrustedProxies(tt.raw) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/proxy/web/.gitignore b/proxy/web/.gitignore new file mode 100644 index 000000000..251ce6d2b --- /dev/null +++ b/proxy/web/.gitignore @@ -0,0 +1,23 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/proxy/web/dist/assets/Inter-Italic-VariableFont_opsz_wght.ttf b/proxy/web/dist/assets/Inter-Italic-VariableFont_opsz_wght.ttf new file mode 100644 index 0000000000000000000000000000000000000000..43ed4f5ee6cb01173b448af26edb9d7459f9d365 GIT binary patch literal 904532 zcmd>{cU)9Q|L?!2?AZb=#ia_kq9S(1uBh01H)>3x(O47PV~M7lX8MzuW@3yn8jU8# zSYqrQ3o0lAvUJ#`vvgS6?wBZ>?RoWbJb4!?zLtNyJy4;^eoFh#$dAoGg<& zTymK}Z;>nggOtqI;ve%PM0v^Xg$~)+;A%Wuy>{f?h!R4s2q11pMU3Lroi3R;A&fm{L;~u z0%*!bRD^J!kb+3jTa1zOX$Ny@6CXn#^53CXNFm^Mp2RHTWiR?(b|eciW9HAq$4%Cu35YOMbaXSp$CYtY1#4(gx*FuNPy)) zffYuU7@2#SsZpBH_)k9Lm$+n^K+pOcIxM#M>Ofoa=9T+o#n^M z2wOud^jW`Q-5M4GcRgHwqRA{`f+MRRX8`J1jX z*&O~T1pUUb6d+m8i1HP7kDS_c=bk4ikUos-zK7Wbhf=cs&=E!qbtRs)CpVNFZOGyJ ziVX`_qFs0jq3$$@#?TC!PtVfx^alNtcF<4sJ6)kzN})WYh5B*B9IfnV0WOk_J4ZNi z!yP#^g^wWEt22Bg@^02gk>5joG)l?BaR)bs^_o*3OXxMHK92Po%8z<-<5{oq=oRO+ z2^@OPsZV4*=hP>)h%;HxKMbGZuoTu}NCt76{zVEBOJ7vO`-xKP)H%{9MW@b_t5oOI z1?n!{a_S=aNynVJg8E4>J9P=+i=DcXW=S4ST}3L1^##jIO?pLvQ+I)W)2X|*(A~HY zalbQMOZ~*PPTik;#Cc6RD&Z%NaOxbniv66r4srBO-J1eMXI(fS>MW!5H!>I>A@9xxFQ-Gjz>VXu%H#qem^5UI!;eyG7H#o!FApE*h*Hc%%Ij^?l z$G_nW51|nLX{X+fy7S>qy*&l;K2E&@g>vOiJ(Rq-45!`^`W>epM&Vp@`hCfVd&?Q# zk3zUbPQ5?*aTA>SfJc3bc_4u6=?ou4!JL;cgRb?$k#>|In$Agx*}vQP7?C!;OZ%!5L=^^p#G1EXL7Xr#_A}G{vcp z$9Rl%>Ju=3_HpVHF*e&f^+`x4(5X+x*wi@nDbR>UAdiLg3_VAqX*2R(2j2)kt!hJPbMXFy*Djis`VCLwegEk`S3sNFiW*g}V>ef_8J*`KJvUXqV zXpNERw^h(te{Mj?2KsZzQiLo-j7KSqMZ8T8|8i*Sacv{g8{_ED4b%;3tZ}5i0XeNh zX_nAq@UL~0tDB=d>!ItB!XMLN91ngInOKc&bhIn08PnIG9hRVV z{&gydn#agr}B{m+`;YufrOjgg;j@E2h2R(&4O2uqt71CT!|1PITEz3sy zGOz)6%t0>ABmK{#lC?K0#dzf1WWO!PB%2#Hptsgzz0m{znRT(AW4*%SH;r5t!tAO$ zLf0YJrV+9p(*drJj1BWDI?alKnAXHX>MQ7GgEkWTOkwkF392j+2G(je#Xj zcf{O)HqoPe|2r-8=ewAdtpeR4m!O?zIC^>&EP|D4IBL<Ej*c zYObSVA}GkLKq;w`w1hwkQ7gqQOo}>qh+? z|NG#H@C>dj!vB`#Xqj5eyJw((CxgK)V*fGkmUli%k?R%*}C<3O0g34L%o<4 zvi^G%Ur*BzcMaCeOHe*G=MF~;GaF-e%UU|@U;49Y3@t~zMX2vHh`9>=)7<_&kaP36 z>4cT4;8;!LK`rbDd04%E1%APmU$LU&I6v-v$Pc-%A-~}akSSa}WFwEe`7ZoY$mRT7 zkX!k!kniyaAP@1sLLTLhL7wEVLEaDs5+_6mV<5)~vmqZB=0QFwJPA2p*a*2vxC(hq z$bifi@*xX_LdasF7_vfCV128^3YQaIMK?%y(F@XB^n(l#yFhjmyFvC4dqegWr$9~< zr$NpTXF@(EB42T?xBzmYxEb6n}*LS-b*yUAzf-OS}siC&od>iaDjAFctCn7P(p>T!WYtC5eONq&_jkP!XP^< zIzx6>bcgJz=mj}IF#vLqVi4q5#W={x3e-aJs^WFXcNFhJ)+*|VlXwaH3`vsOLbjJe zA;TrqTIw$igB&i+fP74v1NjVg44kw=dKPk(vk_PO5Z_#FMSWWSNZ|+ zC+R21!_r~MW76-C=cIFxH>JBINZC>@amv2RzK~Ot%ORgtJ_or@xejuJ5@!?250v{M z4=4{p9#I~FJf=Jb8Ldo$G$^r(SLUcj6Q>%dLXN7Fs#B0>RhMv1a8(6sQQcDAf=pJy z22=$q>;P1isw(I;DzvkzUeyR`!?uc3^J?sR)UImme$^gow7J?x?E~qr_J?e(Mw_dH z)xnS<>JZ3KHQGuYrVfMbtVa8*`>6Xsj#E#BoUMjUs-ILp4Y^3Y2=ZAqY+L=28ttRr zr-p^7e^O&~sE?^pBlYj<3y@dT*~Do&YbKGPnW~u$xm0r+@{Z;Xq(PI6s~MUM;x#3j z5`>g$N)b|~DTCgi!CptxsId|U!-0jns9j*;E}kx)keysQId-0GU&+o9*dE_oik5Ck zx1~E$4BG4*@>-@@u34dZR`Z-@rDm(0XVa0A3)U3 z3a)?y;8oCvDC9Iz+bZw{ksfC&dYrVi=|~iO0&K&f1Wp?y#Fs|kROKhk7bq9@W$awU z6K4i3PYt@s`{e%g1jWnH5QAwE@;WM&U!*hgQ2BXi|A2M|XA1@jCW9O!M{+0SujJR{ zLU|%xmb2w&a35iBL~?tka5!bKmpOJ7Bpc;d$)7UmYkQsi3U!jdGh%ZALPN1ny2+=k+4>sW`9 zNLxoj31=b09LfWFCsOW@IQQx8k@5h!8+M6r^_2(90@c&^ljRZi46Z%B-&r1Mzsb#_ zgM;Le@<7^6?@e$h-!o;b97-qY?Y0i(EWI5hk3+~QdMijCFZaP=%$x4=M0*tX7=7IZ z$~Epa+7=>Dk~L`M-5upwvP4<*i60atr=fS*QlMQ`ckEwY;OX-^;dU0Fc|^l1ku61w31 zKzYBsAN~C$Ohk@CSN$|iPLk=E)1cg=9dh1n(%`-Uv^$ty zlr!lodE;*Cjptko>qNJG&dqwEuRR-2|aI7B@?vG_7hB5{?5W(;bzX4GxLNBIXyrF4&FU>rkzb(K>_geaSw{Aa9U|(ZPX(<;}2$ z?=~pl4xs3jR(fClg|a`pg#9@?y=*#1V`q<}xUdgYD)~9O^@(DZ{2b@?iD!4Y12<=z z$y*-6J-#j3TYiiiv90G&*^?XgOqY((Y4BLe>wBS}ypeLh7dpwy=-QqN zUVehs>@)S1*VFgESbX8^Va_HxdbFsDIme2s?3LVz6M<1^;>6Sb7v$A+@&ftDld1G_ ziV42c(UnSh1n~yU*c7O!MTc2} zBg}Y{AmBeHeDulFm{)c~&jDL7csJ4(481M%ru+&DyIo#^%AJrWVh%r#$!8?p#C(Nb z#`M*n8e|DWyT3i!9?k8rFSRe_zOgT{FX67*huDYE1oXTD7J#Xq$)OI!MC`(;flirKNfV;8#j!*Q#E5+FI~*R3{6{NcLIPI3E= zH*$1!|K)}%syn#d*M;6W_>1pI`tE3qtfJixnR(`;&hGT)_52)}KECm7u9a@wI3lPi z1yV)7-MDObqjzs4X}i$-(Vtf5P(t+A_1&lnvJIz(BsvzoD^*F?ASL=V`nG#V`t??H zqlUKK-ddAR$8Y~uucVJ+zYLmBAKv@A=qvgvE=FigpWja{FQG5u-x3Pw>-cvI6X~4? zZ{>VXpBPTny2+~z#;`z2PyN6aK#8fJS{vzD>W(5m+Ml`y@@(oJgv6!pQ^eC7X+IQ& zQcT)GEihIj4U=hg)s|{XXs}mT(P`^3PEAMc z=g@jP)c5l5&x`Mg|UVDoyOR(F*^&xykRR~f(aHHi-<_UZ67=1rF{YsJqm+GhdwzOWm%JyYt1x9xOgAPrxQFeD+2|g& z|G?v|-1Z+_Z|}DM;3j*g{pZ%(JM2HVSq|NQbDSl3|Lv9Cy6=yRsO|jY!40-fKOUcv z>h&XUL2z={qgSdPCb5PR}bubMyEb-a;dfTfm3s=?S8^-oqvj%7INX|>CuFyDtuDfAn5Qw}@lLpwNQjJ2%SrjvqKRAPiz) zCnmZb|7Bj#o#Q7*D*GM3I)dwY{OU-En3rh%@ygN@ce=R^K5?&8+u{?c z{l%OU#?BS|iOQj6BTv*0EggBHabk)6gk3rs`TGyEI%od=;|uu#zh4jY>HGVQQ18LN z=Zq?`{%#pmJnH0)88xmaV`hfzIhj6E=ymFRh<3~=7IrFrl=r<;hAE|Wr&7G!dYnq{ zuhE~%=&#nF${A&DI8`*Ga-G+talJ z6g|&;^`dY5nVa3cx}CY(S=;N3VM>YROvbUW@H0ibAcD8{hIm(>#rM~u}^JQ)8x}UEf%7tBs>aOj4A*z?Q+l8B> z^5Z_tT?Se6!&%2Q4@9K4-aF&#Rp`?vH@Iq-D8^kO{{>9&>tJ+*V`9ed` z#e{y^+>6D*r7jmsTEkm1jW4)Z@(iDUk?U{mdeJS{=5Z;;uIPIyWtP!=39XiSDXX6< z^OAXdR^LnJ30ZwEl?K)aUb2K2PrCg380o>~><*3om$T;x*_X5D2|1Tdfz}R}3+JhG zE|&+_`d==eDd$|S=vW~dYZ^3Ip7qp}BH76;XbUHS5b!68>tW@Q&$DHtfU zyHYriZ+FEUU~O}yrhQeXEA>n?w6E-RMV2}4YW&DX&#NheJW{W+?!H>kS8ji`u)V$N zYHh2crYc-uMq1S4ex2|`M=QV3PbLcg>hI6^5jj*^}zuu1zxqiJr-}d^= zk!|l>PwcKKyeZN6cgS8uxk18IAs zv~?xKxO&^ooE|FE&DQd$wZCX&99Bq2K)(~B2 zZI}{W*r6dPy12a}C%QbiJSe&%xGX5Tc5Fr9t@t5sez%g^XgqIadDMs9%IPXq-zwKt zi?@{3y3*S*%iYD>@nPC_x8nzRW!}#7wU^&6Y^@2rZ5@_9^tM|^p~IbDRw}#Qx$4!c z_nljzAsz0-trWW5N$_oGdndDxBKJ$GjBcfHVi9w zjcFKOu8CLW^A znSeH=#3h#5Mr@BWW#M;EjIIp5dtshUe>b^bdBEMww5pDG^SxC5cMAqu^>>T)nxea9 zqf6CyYkKB*+^rp5t-fm?TBN?~>SAes?{p6-?%ur-)m`ox^sb}sC2#Z(x|h2UqGVV; zf3IXjsdTSoWU1m_$*5BCUJdTOS2MO+dC!Uj?pcSWLW?X?-g8w}cDTo^B<`#J9S6mI z_nfzD+`&mzZQ@S8Q0*I+(WfdX4wj>eGj%TTiZgX7@Qf?e+biOVrxm%zm9$0_7?LW^ zIygxcXX{?1ifbF4;d6iQGEcYrsSC;j?x((5?s`9Mh}r9Yp%7Ygzpz(m-u>c$TE+d+ zz$%~nwIi$4@h4xX^o~C@L@0>A-%B>eC-&r9$EQY^z2dV56??}U2Nd|km$XI{X1nps zcHPWRxYgy>1`^$Bs25D(gv`X^)#EcQ;w?#HPUvPTwqCYYEvr(eEL zLe9VTI{V)Que-9%<=fSnH9+eNSP4K9Bz)bXkVMu}dgVZ(ws$g8jJ@!Pc|6MW5NwaO@zi^Y|RUXf)TZDhT>_t8bhg~x~-vX zxKeK@n_#pX$|f=~$!Imyw=%UdG>p&I7__E*hLHtFS$l?&+_MQQ7dV{L8LjI;fEWMo`jVROld zT4wdkxc+QiZN`nMVo}D8X<}gpv*!$E&lz#gTB|Z1bT9DBNEw}`%t#wj?3s}_B+osg z)U(<EYfA>Pb}1B76<0} zXO?)@cxRThl6^AkCg!of2OuJ>&7yOn3WKT zJ#xLXl6&RqvXa?_*8lvvLO)w#v%uUC=r!Z!9k456<<>${(7e%_;~g^vf#H=k?1f9FIH- zryvhA3o8jKfS8?Z&#LQNsLb-nKonmKUz)|nR@P`u%A%|>A7*)EpL?>^&Fss|tqs|i zS6J(_Z@yMvoy`iKo!q*{JKHok*F8J8M{Zy?D@Jxc6GcHex@@y|Zcw(_H@9`R*+17m zyQEcao9t>e!m8cit?q8tW!H|uy>)Vq&Uoh8x^m;0RdrRy^FcLMG__)*9Oz%|1E)#?tP{vQo;?8|x%^8(i}>#zxmXjnT^Ef|#4%!>D@l z&P}6Raejd5#9CXW>BM?ljVXF%on(qxUZgU`FfDdvtz?QD#2HP==n7Nvz&t-wT5zSO zDWiR^n<-15=Vi)jhYO~UYHbQO$*LU87RsEM71dHs>X;NoPU_ec zF(+eiEuWL;ksFv(F(KQUQ=`rf%#jt@9y#i{H*VyF6`mZQdwpJ(S8iIZ$tAbcJx9zf z^~zD^R<|?ga=D5e-(0_T3A$W;x%BqVol*Q)@j6(jtpa7i9I6a|=wZ()$;f zeAD|CHjlN%ke! z%+qv6bII`$a_^F~u8k!n<-P`MNx7e)u|%jfxRhSqR9IJfb#q~DX>4aNvDD;|Qd?>Y zGIT7>afMc?eo$MM?q_XPmKBoPp)A|?LAUY;y>qL|Gg}u3Au_C2eUW|mY{XX+B`tMgmeYO9OfD%`3|T<%v@ zv)ig+2hiN&>Z>bNxKOJSE33JN%FflQ+62#o@x!2_TLa^Iv*c~-BWiBo? zH8n0SmMG1bFiWf@0zx+?++y^)Z?+h{@0VF}ePW9(`Mw3RrO4Y*W-+5rEZCLnAaoW> zx%O_Er9vB9W~m-}w%CHsGXjmqVnLr92_T6DNtdt+9;wKlbFwM%VL zK)Fk8ndV-7ZG~HrTx%<86;i9rb5Yf$wk>R|D{=KGt*fe1y47*Dr9pKrm2I^3_j^dL z^|g(S?dn}BgWVd~=C+}REAww)^=+uv2o()%n+p+8-yrC3yEd?GZsXC>oT~BqSgvQ| zjWvPAjWOXiFYECo?w-~gYg!jsvjd__tZWZ!t?hV6VdZ+dCtHPt;Cd?#OY5x~`KH?H zQW_X+JHABgVLLlSR@&|jlY7`Q^;#dBIpDU*RxaHvv(+2?ax$mt!do>d^zR`Jh%a~tH_Qn$TO1sT&3AS@g z@b;QuIlUv!8OzZUa>ei}QLb)vJzuU4vMJ^Idd^L*cjM~ihRWWha$||MLYCdR3K@Og zMizV&jk2;qZs2|$+JWN=f^L|&619s5SE?=LxpEhcCs!Vzv2itAl^<6lR<-6V6&ej^ zsnn=BOO-~&S*kTk&Z0)-TA!;Xt|7mJAJ@>(tC)!ruCWeL8m$_UvlXiSIGa^~Cf2v+ zL|a*FPFw1t zG`aDOb!sKwXjKcmtx)9)q4wi#jqOF=W^2dub{j5eOEs-{e=l_zA8bes<-?6}UP9E^ zD>=fA$4>t&qy+jF3Tf65UZ~4gss)ycVB>`VW^Kzsgz#c@iON;1ml~_Z`W}~TqEKg3h*AxQPlcLPO0j#D zwNde4L0!EfX+cAcg59R5E>XHD#D+$tl=_6lB5}L|ouCjJlpHVGRlhz~5};~O@b#)* zWf~640TZt-YIinCU8~ObP}gem-PH>J+8WK?Crevt_AV&((d;*dhiQyDa}>r;7$!oE z1wx0hC$~GpX*g?+heoNaFL8`EjV9P))VL|D8#Owt*EN_iEgGF$WtAqF4ao5Qu{Sm0 z;n4;x;=|1@ag)x*yWC$JQ|FTTba|OeL!H*@;^J9TaE8I#a+%9*k9Cf=|%N~$gzRgLywr`sUMeY|SRR_2$YZ@xu!?cYB9_I$zNZ_qR~THQ(pnmQ0bv_Gv|BsZ<|o)d1^>q0fbQC86R4&@>F#Bt-+JU^9*aS-18g| z5?|o8Z?VzEYu^&1#w$LwPT8v7Pnp$x_or;9#S% zO?r5dv5k9{pHiP5UYMF}*BnjG0h7i!nmSp0^saF2Y=1;^J=Esf)0fEre-2tRX+tzo9a`q@f|) z3`(^&-+V3aSKyWG(n^Pi0iFb;!`3j^VlanW48{m^lo20CVmY!8aO35}&LfLA@4Kog zIbsYCJ~epUxKkU)jXPrq)|@epi8y<9^yu>gZJo~#vUR;AT;ZY&8e%n#vP5X2KkVN= z_HOv#yVbpX5+qw?;+tXJlF~bMGwcoPYcPlD3_4?MT8hq|btlUs>yCa_R+dI@EX)Zu z)ddCReI5{)mla^mE6wyMd{eigup%_SxVV$IITl065~efP=`@vUZ7txc`=6<-DUkCp zP}stQ8a|iiHspEg8}mHc*lbm8Y&uNg6+GEdKD*k7qr{|Al6A=>73#>WcJK0C=D~4 z4Rpke*{!m00?RD8{#Fkk>*+b!6pzn97{g-89A>2QNMa^PurX@DC!i6Ng5e)ywkS?_ z6D1p2S!EnwTwvDbu<+RZ!DH@sxmTX5jkue2CZNt}jwmwXyww;StjWn3Tc3A%O^u_1 z1~XY;g%%^aU1v1%zYcAW!#c-Qq6yZ;>I`Gdp#hn8!j~|bU)a67dGiMwH||)vbmtk( z&c(rdpL*(tC!hRjw&94ujB$Kqn(oN7X-B6{{e8lOQ{%=(MHpjs#=8bhf=dw=u?DnX zm?hb0NaMJykTA?yC{b?hk43yttnn+>`I#}ASoi2kU8M?7-)h%_8jaRsFkoEPs?{=w zS5>a$7?hk#5t_oax}g;e$bd%#Y+8qZ0UQ_PYHptv7G|_`N-Y?2)c{KwS6R7Y`*!N! ztOe^gVd%RC+Pn9uz4R2u-`SE@XKC~$Vdw+uY%qiwF#as&ybAXk(l(O9di{>oCfZU2 znY3IX&ctO-lKFB04`p7k^Gd?2Prj91XeEnEe59bURFS!1rbTIMC! zc432uj2=7fsl|Bd>v$2n_iU1}M%HM9yY~toJ^d+srqE%u?|ty$!L!${-%m`>Fy$2# znoDXMtaeGIc5!w0)cFSKL)vxk(|6G5G1KQfwS2=y6c29$Bk{lGzvgcrHH9@@WwJT^ zQ3zfpM$(Vs5x8&5N&n9|K}rRX;weWeqGTxS1MEb5}^Npl$Dz5lE9VfiVFDMso-g;F0% zR<5QTG;l51CinnM#yZ$fbI*oFkmPd;Ym)a|8(H?(~e;}R%iRd2;=5MJxu2-U5 zl_Ub@{tMDTdX0tSds&WaN+ZotIpZnaC`LJ%3YD`dS+$LFR0rU0hr0{)s^D4yPvs77 z2aCtj`0wJfID9DTJE_TKIzDgPjSrO{bLhq<4|D(22WfByh>U@j}+Je%*8ZgSquaTFQ~{aXZ5 zC_ZIB@&7R+TIR1DNgZ1v=F{+pWYCC*It$N}t>R~xlRhIar_1#JntWVqhSeLo)RE>Z zUTzWId>yXpBk-Qn6&!Im@f)}fWaA^fIqW~~ZsH5&5xiBN$$xLJ<3E-=^H0HrF0Fw3 zI$+nCi#XC*d8SxE{(p){me5n~#v_fU z>+%kMEz{+EC*$DyI-c*(P$++xy6|i8UG16FOK6X0K6~!>kyH6g4m}KhAioFZvh;+0 z)LO~Ya3{~gWiD;im!$Q{IU7#Gfd`mbE+XBw_(g?KTr(TB)(@p?=De^_UE z{h1h);bD3He*Q`m#%&x;RJX@H`IrZ?0CWEUy9O}U1^h+^@DN1#M{tk9U!k@9Wwg5_ z2^* zjFCUWH;3StOZe}pweuRg9`s0;Z{zw7hyQhYoPV7<1LgwR?$jqjKLlQ4e%QsDhtxnn zCfo3=_IMc9^7{V?o#pXo&eB@gJ~T=E9(J@| zPQse)L&b3F$@VB%lXO;UXp_=~DkMGWr8?~EWULp5VZAsGYlb(mPQ_@P)^hz--X+(xQccCZ0rkm&^^T|z}y`)SLs7We4}lS5^<&X zu@7Z?yk6Mf6%k&#K~JOws$jUGayGujH%FBLJ&FFI%%*kt7Ts_~4eX_cW-9hjEWS@? z6c5l?#Wbu>|1Th3lWh)LJA`>+BK30c9z{CbNi^j)N`WD2;%vKQ4YFpK@?oJ|%TVd4c#lNWk^yoI3NL zK>BBCw&FFcO*>-_+6rzog=38ELOFIZSNZ~JusmMC{bBUDA{Tlo@&U?aa;&l|{GU*N z=@Q)K@@?@k8lhYO{Zq7A1dUgW#d!IE+{7C4Mw*|oeH+%zui~2oD@7xD3BmHK;$=Kv zzoQVvY&^3b!ycq7{ib%qIG#?66lZ9PayNysxQ??n4_YpTAuc=rX+9H+$DEJ7ol6@E zkuoU~<9M#(9u2~`O6MqIVc!Yzxrb+IF0@t{LC3^wJWCmD?&wBS#q%^v%%tbVDwOLz zT7>WkVl^$nw|>_O;gpK<&veAwhj>5G5$qMe0&jz-!3r=9Yz6Ou-@$V52562q6Z%xA zzlIN$6LGFG3`Bwj%rEFES7;-f;LZ~C4)<%JE&5zfH3+YQyBOijJ&5pXMNe)DVELa0 z&w;g0euT^T8Jq#zobDR92LZ~-jfI=gq|-Za8CRTKhnolvIo)^RMuFYnCdy0A;LL8Y z=uOx7$!NnJG!cU70$+r>7h{Yw%yI=oc#b|5 zo|Vg$Cy_=m#Uf5F++{qzlZ|V8glF;NWG88Z!jmEtcktUlM)@#*kmm87s1Mf23BnQj zMC5Uf+>@55;4&~S>Oq~jh3gLV=;JpqcAh|3DdO)zd@sb`gt%U~&hP|d!9c|Q25~n) zSMtj!-N90>y>JF3QMwZ~!T~o(Na6-@(R4ugfDR}o(*dPB9pIvI=>Gw}{4^Pd^<|ud zI1)cuE@W*bb$lak}jKr{HsN75oOia1sZXkpN8Kg45j&_ZKI*!hZQ(d`thLP(&MqYZ$LQ z9mCn(HN`CYNI9L(H6!Lwh8RoVE54u%MIPpieAY&CK821bF_-9bg=aL6XLzmS2m3q(i97{Q^a0=-vA?w9ce)Zt!=EBQ z!G)CY2f*J~@TbQFElm>wXcC7p&za?TE{VEwNpdP&51>W*uQ8CvHRN#?d2Ga26Sp9b z*OC594CL_@(tH)!_3Uj0i+9IXF{|jwYzC>HF9(+lPhJOovB~{_N0s1C{e+>V7SX<&6 zd$uT3x&E48x&Cf%a{V#hXQ?DQtGo`9>5%dS>~cTt7V5b+{Ab)?zBA`5p5?xg=5yZ& zQ!!>-xTl49ZjPX#M&T=J#QebVi|D8@mD4Dm;lANJ$o1%pXnY6r0uk*d$FB@>_@zN^ z4tSa?lXHlyF-#el*6&VTxrgI5XE@$_BgZ2w7uR#8zH|Za$3B<( za-nd)W%p>Q>0ahahp>)6L=UizSPl10{8mFUsKfmQh+Ba43z~JL?}_w1k-jIws&SoB zfOH&jnTtG*Ag{4-FCgy}Wh<&w`fyLH@OcQ852>-ve?$2gjdJ=`5B+^)lX4mzcFbMV zXc2VA;$~FKX-f-USA9rP4!Tnzo@MzCp5#<2#xOEGgrjrP2u_ej&~<4z-I3sS zAXN{Vk2;h#qdr4-zFJJ`&PifA z?Q#hwF9)vj7k_Y@!~cJAS^fV%#Ie?8?=hY4veJ1cjbk+(QQb)H4Db7vq|JIG1e~!-#FdJg$EbP38?bjUk!p?VaHsd(k!dVVG zw_*K*Hr~Yc$`AL+tbMRQWqVn+cV%ZuI7f(V&YSOxH5twzn*GehSYd1Q*`QOii}m_! zz6*_H`x=Cu>~~n47g4?uz`KRJ6yduRbGW_D2(NQ4c=GNA_-zvh-Qfb9k)GxD zauuA1w1C^o-XpMgDrmbZmN&Z}W!j2$(RV@wCj#vCxm_SlJOcL!+-Cn-_|I}_SXX=k zX5e#{PcZ(*{BZ?+BOo6(e_VCWAEVJ%*y&;IiQm7%FKsjKf;jl^f;i5O{pu~sRYcNU z&0X{nK3jrc#+uAr^wT7TE8f*wXqneVNTeiUW4`XH?)p7(mLT=+5tP9;W#H{XQfTJIM#M- zPG<8co5R_B$Y+Z{o~TkKgy2%jSROYv_}B8m{hy`xju}jWb>a&NnsR(g%+7 zlIHuJ^Cde6MV@om_+)3Vj`P(P;}!K?fU{P|7#;KeX2;Bw}qoLhJ@V&f(ZOl4E>ho@H&~Ja2!Goinkq?>M_cpa08T!18FG zFI&tBjyddYoO547+u#|iNI-d69pSP*XYVq^WAu_^9(k3Xa=2p<#_YidXIeUX5_4jt z;ud|0K6@U0(UG11I?i2@C+|G_bmZ+kbChtF=vd=posT~ImbD-I%#XAV?f5gwz@AY* zQ>;3UVqI#Xe}gdASKr8K>dTl1x5Dpy7PIFlA42^EBgzwx_CwmRt9KOF=^fQV`b;we zbM{mgCL5%wa*ire&SCEWVW)&KI)gn^uooYqh@m~wHu{up3@`?~uphjFakX6d0ej7F zkfr1#`loDcMpw#c`{8C(Fy=^=z2!n@5Eg+!duWWd&e z@SQwg*hOjVy)GZZ%|-eK*s~Gsm4Y_-3M_@bh|k3yFN-9;6YZ4%pY6HwMme0$8(9ms$--p#j{cMCh^SYf+- z2m4+F(pfkuV2{R}aXJ~Q4 zVz?_Qjvs^b6aYK-;7&qRrPsRQc&!%qxK1@m|8;4IkQzJMEEbzbDggq9| zhkC&FsU?Vl{2O&B2f`bFLpe4>XB+@%A8P`_(Kc2+(!oC~t9Kn>_po>yz{`kp4i^ss zRK_|9{(Vrt0xUjg%trlh;@TDXk*5`TacD~p{lQ#PBWxpB20llexqu}X2|x0)W&u0W z!LQ;qvUCb??H#xSKsa=4pKW6S(sT5~NVsPJ%3!^Yd#*KwGo7X5u#H}D5zh+yVEx{R z_Os%XtqrVBb;3Pf;4`^C%uZozDx7PE@|k$=(UCGh7~hfW3VXdGe8qVQU(;O{k9&^p z2=SQXn{59e_Z<6OJ?o2n1Q$tiby+xV0GXi@1MdU(Ghf`UdA9qxmm6KR!&3#;=Oa6MD-(L%+k1k)!!B z_M5yQpXA5d(H{*-u%~gvSy!SR_v7rZ7-wf_bE^veG_VV8jo%7wjDZ^kK4&(J^~)|JkRXZCShD)>{>6wWqtYgL<)luvZ3tZ;-gz0CNVn z73>1P1B^Q^4<|mTHy;f8fhk}qcolpJ4uk6;16XhZs0E>5D3}dagRNjcxBwo2QalkP zfOuj*fH5g90-M1n-~hM;44@oO74(xr4+elK;4p{+3BZhBniN3@hy+iAEnqje1hTN= zAm9gP0hB|Dawt&_WgbqoFs4N|@n9i%0ib<+(Qdw}0PW+4_VH^627{U4Ya)N#>yLZ= zaj*YofVT8U`TK=%?2YG-n5D8G%pw(b2*adzEu^mUOlZW}EK1w+AXfVgcCx6M%y4YENkk=`A21n4V0`bv+!(xb2R-+`0h zF31Nmex=nH^Z;YQ0i9bN8XN`D0A=fl>tQHcSSK(XU`%#e4sL)Yx zI`|qK1<@cI)Dw001f9Te@Hlt@Yy$_uWsnRih=qqrUXfWzOcmkLQekO_t1{gaL7&{Rd4-ps-5g0oW z7&{TyK?cCs8G^AhBoqt<7&}8&gRNi}_#MQ8JYXjp>I1riFjyVEu0?cD55{>l(u=}yF`>{`h&EOM&cw-T7EQ?o1G)@cJ zgFyf`F%C8`?hUX5zy`*_2F4jd1JQWczNTY8t@L- zPBaO5ObP{ui6)~glTnt*xNq_SqA8sK+GHxqFcmgF6*fM#o@g3uWg4zc8x9tNe}LCP z4bgNr@EiW-*K$xoH1irr2e>v1*JgD9L%?HT713j`)5kCw%vOOgfP81e7UoO`D*)1+ zgE2VgS8xMlf?A@vsMlPiH5X~kMOt&81Bg2papxWdaiEasaSr%`9srI1IPBr^O#pR& z{6_$LcsvoHtn*fbc>EmpIDj%d84eKVNyK?F3FHGCewx}FbOW%7r;x||DaL1aX%l?oz~EinvP=cj-#-7T5#MfP0{TXc+<6-m>ms0)U+?dl7sJ(C*8w zgAAhOXxrr-0qVE>DX<=V0MM?>VVlb>L@O487r-`f5TK1#pdDA#5Is8-Am3;2gCe5m zcn|=>!Al?tWP@6wmB?!)`f=qy0K!(m23NrbSHT8X?FXpmss{jhuGWH3@C-oSt3L+& zh}Lukqrj739e5w0?rW}q6i`Vx4hA9M5WYSJyZQ&xTlYDDe;vxRE{$k?4}dzaze%)V zJdlYtz79~wji}?s%ODw45IwI3ZNWe=9iaTr#}I8oe{Pxx76RDWW>3%w3;-jAvZ z+JGToCV-qr9mY2civff;x+OrKkH&SQaorfCW2^%9fRo@JcuN%0lW1I40C|ni08r+} zF9r9BCLm4|)&PVtu`Fl|;D6#cun6n`$G~myooEu$GYPVpgs>)eBbpLS6q*V60LU&B zvJ1ueP{=NH1%Nz4&w@upVH!X@!jR^v1wbOvG{}A03~-BRdM~0GB|r#3-!KDlpJ@-= zK}mr8o7o*8ZZi?LnTY=^l*3slhqF))XF)!*VgO`0>jlwl732o}z+`|l&v62X+Z>el zaFn%hl=pCy_wecfVTB{CaL76wVTB{CaMay!_?=4t`7*ZAe9I+Y11Ne=2Mc6_CAcKX-mxT+!R&a!9Q7~u%E&!x& zF=VkAvRDjREUp1MfJAT$yd!MS0;54PK)x)U2EGz4L*6e#-Y-MmFT*{S^#@^K6*vHp z_sfu`Wgm%_qwFou3y`Mei@|Pi0^9{(h*ls@D+U6Dz2X|SVIXZQA=j1X0OX1wX%*zU z$`2sFR;>d`0P$Hp3hV>8*BZoq&1-ymv?)LxSc`bBLp;|Zp6e=smH_uzhx@EU*F2M7c>zY%p~QxQ-TK*pPKelyN*_5q#2 z9-=Lf@0K>8510%f+pRsp1aO{c8_LEul#OjD8{0M!ZFc}DYui!Qw#NXJ>mA-;2hq;l z0O{XV4y*)_(=NzqcQc|r@UsVg_u{&}y})J=PqeQifLtQ&0LoS*@*%Q5K-r2M2Nr={ z;5fKLw4VX^+g|~+2jjph5C`sn&qPs<0O^jZ2D$>2k*Fm^2arDpP#zCLJ_j#>XGDjl z0_6MQQs6pKG-MKe65In{iDEJV$SbB2XbBz>#g+yQ0pt`5ImIIESX>v2>tbKj;Kde&WLcWEF1#kY7C7 zkOb791jsre2-E^7TM5Ge%0R+K0KW;>z$>C7{fUk`1B8F{F%hZ{CAtDXu!-oHJ3u;) zjRbSR1`rFbf|o=|8ps2R1H>~4X-OIjBESxC6eNSUM91yHGNKb+U>4CyPf!Ll0?6}| z6Twoj7n}n3!8f8)nL$wif2Rh4d0;QN1l|#y&J2nIr1dnyIgM~muO&K@1so(g>kI0G zaRBl=3)!ASJv-MIApG-1KwS_9(2iX|oxjiotO5@J{9i;^7xRI#pf>0Xb`o964$%Hw zY6WobOSt|r|+sBB*<3~h)AWcuY0LbSl?)@|tT*b0(Z!n4IInw{D5#jOar*)!xnG|%b*nj()%$2Aif{>gJa-#fV6#LAO|Q0s({7-ar-l> z@%elJ90$1WGp_sm8EeY+AQvbMkk4OggVq3P{xTZO0L#EO5DiX)Wbhp5Sl@L9c>(VI zwHjy+kdCjo_AAo(brnE)F#a3(3R!-|eZS(qzw*t3wP3{eyB7!sW55EeNf!X+K{N0K zYv0p};mf;>27*K2J~5UHR0U&+VMUtpHN+%*$4N>ASHMH?ff&|lnd}1aEhKp;;j2`j z8t4M%0DR{N|LQU9hF~f-CaVZn#rL(;ZUAAacY&3dRuxPG8v(xLq$PoC;4#2Ahy=b` zgePN|@Bj6T=Igcz)CDDOjpFqbri8|IRX4;!#%Sj zjoA@)_NByfAg(zPpIqg^OmLW3?%bd)xJN8cArL{#O$CsJ+ZAH&H307IQ48RDj~m3W z2FUUb2MdUKB7Dzw0Dki22Z(RJYs9=z-n zyvu^4#C#C8&kztvtdJMjL#(h5XhO^v@$#KdtO)+EDS|MIOa>@}MJ^FTH^lsqZoldP z-{|s#eEg6He#iqqq}gvTNCb$RAM(KO8!><6fxjC-9{3{<{BaNeZUFh^EnCy7L?IqlO0R0=UT7v-m)keH)FC|t7 z@vHNUSY61rF4A8wGe92HgIoQ@#2TO;HHZR9;2J=FG=R(-Laq&6K><(#R0qvKH!uu@ zf`wo+K$&ZJ20*5b5Whz7-w5%-IvQ()ve5{6-3aF!eIVA@9YCIq7Xaj6la>H-ZL*43 zQ{-V&gxgF8o?rynO00P;VlB!Owwi%?;0}0AtW`k(|E+cdgx$I=u{Nk*Z4ho-l>N3< z0OZmR;k7Fbju2}P`L>TD)*&P41K_{I6Ji|$K`*ctARV1@f+hg@-3f8;oE@ONcZR*o zY+_v{kO|}k2)irN+qEY^UUXdo_JEV%9{5VE8~k>Ith-eLEx|xA4Xgo&z(w$sSa%8J z0D+(;=mUDP0$gH1arYA5DTt?=fno1%npX%!TtdC8EbZI@D^f2@`BQ!Ay^O4#ty9m5dP5b z#D--B#X(ch7fb;wz%4C zmjTMiM3j+<`9N9F81x1c0Pa0;7dQrz0n#%G>6wJ|OezWLgYIA)fZs`o=Op-@1izEs z6Pug?Al%6acQV4AjBqDU0?R-&!2KuV{!?)ODLH^Y_zfWKQwD=+U=26~&I8CP6mkm9 z2H-aoenZ=X;b0Ef1meIQ@R?YcBghYM|1gyEFr+^W_YYeF5N6m#@RZn8ggG@UC<3a1 z)?gsO{ihaS6yCXmtv$ug*@SfNl4d5PgP_O5p4Vse(Zh;TP!X1DY2nJ06;vPO3ECuM#!qIPq zqaOh7V(W?n7kJ&Jx@B8)y$;--LYM1pk}hXR{lCthb;pY&lG9Yj5y7v28s-6tV4H!DC`OkggpG z#CCQfw#$#$Zlq<83E=#mp#bsRI}AjF)5P{cHv5ndkw|;wXkz;d1IRQgC%`pPtBD=( z1<11l$fE;@%Yl!?4mtvq--ELN?#>WyMtAuj^vO0o%I^qf-rz6!tJ8+%YQKb7Q{3JF92=^GuMiS(jGz3fs z7&9dy50Wkcl(nR9#ExeMg+OJ1x_P_@Ks=8x1lz$8fUu8$Aa=qGJU~T&_?$$!JXs6u zC3eaOR01sl%KoX5;3TorD5Ixw{TY;*GlpzK`O3UIFr zH^Dn%7fk^7x>yP{0Np?cI0O*xr78er?-I(>r5ONWU)l#yHZDCUb~y_u3{Vy?!{6n> zU=o-Q;P>)AfN(CqCUzx1KzLW+?+W}~!TBo>h+UOHCJ+cZf`I_>y*d}HA$H9P%mGNB z!S5*Rzt<*q-3PP*C}YL$umayfu9myG;NMjj`B zB6drFq5$!{?FCQ{ZbKHg_kc&lQu2VNU^+kcyT<|Q;@!8z?jis0^#O}P0)PzfLoW9lf*BwZq!4?6GVq`T=mchiIARYu=m4$& zo!FxV#2zE9kHd*!`HTHg4y*t#h&^cpP7r(Q0wBkymx(*xp!3n>f}4b?R{JDhSBf*ar|aV0-E4?YlA1*ikM13ZDFUL~&O0?ok$ zupOKOZ-`^av7;6Cx(9v}ojK6%Wb zAHZ=Qlz}|(p9eC@V*%$t3V21_4Ki}W7n9u@g8|?mad(sv_d(z=aSz7yBQp%yJedCnrazOir22#m&{y!*fZq{oQ$0wkfIc4<+W> zY<;D_Q?6pP7zyp<4&b8d4UHKUrBA2e6u`~SgpKVNJTd@)}vLJj?& zs!DvZxg7kes=GO>Iiop;Ij8EN<~ICGS&TC{@A+>i$t=u@naucKz$}|J;|izTX4NqH zQodBmXXv8hPbFuUv&n@;Makc${L)P0sF%r^SN@A?!T)R3jJHtjrB-RGQ@ZVk%fDlG zl71-o#;0jA%2~|?%=yed=0akg;qo(7d7~6;Q-#G8DVRIqObe;o->5pGoK!<9_fJ%5 zlRaPXXC*uHhG{Nccz@pIKUDp3jZI08a#5QqA^)oUWhmhx=D;j1O3J1GL3NT#o6F0k zRS$C}b1pTHabAotT;wwUsQ4mY`2Ua!=BcVUH&i2=UYwWZ`TkZF<%TM3=wh(VZgWvH zh!8acUjr9Er~sR8*#2WB6*5=g!YLPT%Y`}ElvOHZ%4)7)4&c6jqdM{qyiS^`#cS~q zypG&KZkF!$<6Ip+gU?VM4gKFxv-m726Q5v$t&S#rlH$Zgj7xT`&A(gKUK|N>^4``#PNM5o662h zrP)%C|6RQlu1i(f|HbwG@^XA#DM-r6gVIznUJSYJ_(Mrmr7AqbkE37AXHre>!d>Jp zhWv0?jn6|P8Aye_YcYoY%$ zC6D~6{?U)@n5s+}|EgrKRK>kzC#VoPqs=tzf3JLHLpgCDlk#_phw(O|s?>_hJkn(1 zaeSs!M3{tx?|NxwH**%VyV*m{CM%NtKe!a(`$Q&T2G#jS-cG76+X*+3U8*e?k_+&W zyg5SMEc;4z<#FD zzsR@p0sjp(T6W~&oN;E7pmUQfvKdFesK&hJf2gkW^8d9`J-MB{ikC=J6?k8<;2-n# zA6?|U!dcGmApK`bwBhA=SE;R`r8#k1-M}&KH==@F^mhZrH}Dm3AB6COi6LGLcll4qGRnX-UPCMT1VoKwytuTFDGJL2c% zx_q@9AnNiM{!I$Oy+WiAnK_WmWX3=8575)x|LkfEYUNCV%o)s%W=9-JW(T>Fxr*7# zDb3_N%$3zFY8EH%WHwAFn<+U7JD~|7RDSZmqomPt8@ajYBDdxD_!g|*MgXfSMeyzU8H(aA1O>;DNU2cN(#Pn zh>UW;YkvOoE6hP=Z>JpQVuG2g3NB=kMPv{zW&H(Ts%C*}ZX&)!DrU+cD$2#AR&q)|4`3oI_vdFN zZ~mUY|2zLb{H0!nE2H=vxuM)Zt}i$JVM6!J7bo`jk!rVj1(g<+N*_8l8czddl03z7iK53vzk-QWiDd2hwhv{ul zW6TXO?#69ZGKJo@m0dx;>E!UE3@$I~;2o_yMXE9Xd5w-Xz zeod|?YKRW91I9P*Vm^PUx`+Xi#uv&}_#=LU$Meg)O{$U$@ko@>zI*{j!v4Zt@)c!K z{@e43@?buY&y9#nu;55&bz1PtJL4fQQ2SWD*2|4xKc-2qOj~Km6gYe zMPf3qF6YM>sRYj_<{Kk-FwtrU#2&3|kqLH6RU_;=K&0T?R{kiF%a+#O}4 zA4rzH#czD22ol}o>ihx5zYJqlqr_x0%8rA?|A0Dbto%4-F_)B!noF9qJK32_$o}RM z=4@D5=_|SN%2Gzv(aBj=uw*mLYTS*!CanX}MlIzHIAmd&9jcCDl zqGxfC28rVQHGjh&^T+ZM$xhZpWqw5Tl^e^AMPKCAAkl~KYeBjpuSaSv?2 z`QQ6VL!br=C!vVU!d_k^FLKH&Tpb)mbJ0w~9_ar!e&(|Lvt%bKi4yz^Pl7tlpNLVS zzhvTfQO6;DytRtt8BQKzhSWwH#Y5!zVu^_0Rk$0*`7=Zh(OT|}5yLpSGFp$`(in4T zuFHMoKE@G76TNZ7(XwntC|&<7FTPteLmy(N3VuTBD0MJplV&=(%RWwd4B1KJNs~k6 zP}0cAMopRk-7H*rEtpIiNs@6)j(C@hYA#I~NWnFO;Hr|0YmCn_n#BsE^}_d{%~Ccg z8-Gjq&qZpEe{Ae=tqCZ2GuH^QP%M=f%00w#d4s%0UhkkfWN^^r={!YrG}fwJUKsDI zalxOv4*1i|bdSjH94>8u6ir9UiXu8|5mq7cM0kh=t}*^ilE2o2o#dVJPHQQ>8Z*;Q z(yVRu2%bT|sAsUY({tk8#|rsaJLwZ>8%?I%)~>o8#o?9?)^MvG%fX7UlGa0dN%p&5 zpTew%tt06)J!g*A>$*E@uh*tI*84hV1t^ldtPicvX)JA_2i7P0LDojUtGBV)M!?p< z`WfHFcBd&cMK@c|vTn4L8tA#KS6CfdLWOh>y$!3Ox1nLWr|w7(X(jd0^I0#jx-^Y) z!FG`irwNo<&!>mdRQOx17uQV`g;470rS+_MTkS$>qL}`PTX*WCsC)>g=?Hbi0*Pkmhx8J7 zB}^}TjnikJA7bB2*OLxX7rnLq_{t?ZuaDKcq_|UpK2jfbKOZe4Ss(KvjFyw4k9rvj zQ_@Gi3}fZtUY{1zcRE7Z@ve_&da&-3*BvP;uiq;lOWk4aFK?mWZ1G7i2iE@9t<1gG zQ0r9d4klVwgt?OiG%RU7rm~SbO%=6<%;#4o{?3|cu7~RCt2C0R_*^y;yz-W~CZ z4fcXrjYc;3!I(+BHgS8O& z)nM*mGhmLeexcZ=B~ed0vaT7OP%5iYXi^=*zl^r_MNVyD^nx^TZ{0(BGphB>cObPu5qA>XL3PxN6_XR)t2w)MKBkEJThN9kxYW+XSU>W66a(I-c3|E*&W0n&m7 z8#SsOs}k}eP9H#tA>|9{S!mn1QC-l)+#HwuQFo@@6a9pqi|$V>>Sg_%c}?;uk5G5R zFpegB+l~yQGu;cl@(Pz92rKeQFUmZpMrP0p(aLF!i|N17qnR0s>)DxU_P0w2?9%KK z-}MIcX7;OIdIfqg$NCWhnlbNwEj_1YUAdTEN-u(z1?37fg`S_x}U-f`2_;(8Od zbd$10FTjd!z7nJlWzJi&eztbtBeq^Ds%K@Lw?4F@IMm$gQAaP!Xj`!y$m}!Q+;ixS z>BY7WfqG-saNFA))*xPe`-K2pHy(zbo7FOmMlAdG^M$ROn6~4*zx6Ad0t4S&9%&dw z>Hf}he)?z{v(xo5DtiB2XNw>yeRrLyOi$?Pu2bIDFKqGdGrqbL>t+}(tc77zWe%xE zM#Jbyr+44;N4vRX_gR0G1B>D4Ob_;)F0AKg84Safb=Z5V5acq*FuKv6z4smTY^>wH zlRkP~dbiIbKp)Ji?K_zbW#o^2uV3nd4U0VCt((~>!x%sl4Py-TgJF$jllC7kXnoH{ z8^(9G3P%fzjWvcbmSSO`!3>E?DgdA1hT+Sq8pa5kXc!usa^P5g zJr^?{IOYPw;lMFhJuho(IJT!A2c#!@dp6EE>dWdHMt@2+jErp5!9*{;BFk(T9jKFG zbfTGtF_JnSOe}y7J~IapcWP8Ac6yV~KOq>(O}QXegbq#1%yE&?wGAsYY(SA4SFO zaMfGTx}!}^dJmQ%@v|Qa-<{)Vp_YRe>{D}i0*_le^}7_hh56(h2}Wy`o{BNWM%h}QjI6|-x>j@`=XNI7MSodqSB1u?ftCf8<6y)^RI;EDe_t0}5+a#?!}+M* zl)}URD5W>2l^cD%A+VcpZ!` z=H|S4^$cV2{Dx?W(~RZ&U%DasJNCZ0idwca^3@d>JCa}TfDxVidXwRJ>(zD`yKcSO z1_AAe`d~E3Q=+FggpvI6{$^zBjtkE>p-}C-{A2?p^T&!ix%Ck&aK+nqINGs4{22yB z)92r-3ggQB_c>u4-+!A>=axRnzKu3P6vpt$7_gv0rjU@$Uc0P2Lnx#?#g0pUu!OdZ zyL&5`rjM5rXVUdaQ4b$eNLUmrL7S#8IgpDDoVjGlLW-RI^!O_pAHFJgIEBtza#QTpBD869*wx}>*&Kc) zh*oX3T=Au;TN3e5@QN*$FK442TW(w~#`10TzSNNRZeRMWAT8a#^;$6+zjJl&SCqJG z-nr_Ov}@G^M?A;A{agS&+I8w21_isP=37a7_C&~zv~SP((}n5F-q2UCXy)GarwY;3 zz0Xd$(dB*PKH~+A`zD?Ap-ub3pkLj$=;ceBZl-In+tc`cdrlUj#K`F{UZ9m*dBU3x zMy`dP7`YoRlOoTY^rmh5C*a!gQK3l%&|FQ2zAI`j^vO}Hp+`m$^n?Qwj^(Ev2PQ#3 za9|4b2?xTU&o~ePJ@jCBq8Cj$xB~X^2RFjL;-H>v!dnU=j^?9rhc+F}$3`Fe3j6fK z>yLQS#KRk*huZXshquB$*`_ZzY;VoT`bN)B$V-c&S3nPo-UR(j^xjW`wTu3g)QHx_ zM8xN%c`>WwJ!opoX6O@Rw#R$0_A#H2HKdy{Ry1IJVj95xT^=xIM9XXkHvU;-D9fGsWbg`SBZKbJMAWnb&X9 z-Xp6EJY@XH-VYsV*0GC^{3tGI?T0t0s?jeBP|C^G?>*?+$@neVXyK_hm!8qK)2rU) zrI6F>p8C@LGY22%rHHdPZXAN)&Z%dgf5Nl)O@DLhF*ZB|jqT zt;r)i$dckVc_!X_FvsfyMOliun|Mdi(T7X1(Ju5!=yMmg_UU=bLh;XHADAfg z)$H%>Sgkj^9n2K+E_wCu&~L52PB9;DnoM-KXk&+^3HIRyb) zQc@_f+`-xB7#@+f4KC9ruj68y+T9u#Ut4K5KCX`3c*2?i+51d*JNQ}6iQ7kb?4NwJ z<(DQ?wvT@76c#@CLya)m`Ca*`ah)?IPL1!9@#wUNm0mucxnSt)8nX@#`o42^#Nb!8 z=1ds$d%-zTqn!Qc#P&;k7yc#thdT2PG|hiv-l0bMPs~3w!rp)WnX20L1si(jJ-*;_ z;FGEgt_1#3H6pZ2*;5gR8hV{sc(_5nvkR}4%wK)c;rgEE7F{j&N2NtB-JfS!$ohce3sJ7 zVk+baSQb+aTEc+eD=s@z>_MevUtU6E#qL#Fez1Z2+2yg7aukP_v-omL#T+FKt@LtB zrR*h_pQIt+#}W?%SKcd~QeoAav9G*V?W^Qk ze3hlrZNJsV;p!K?Zs%EZr~K_wYqQuVmxFe@+**%1hsv(o(dvf%y4OQ*WH+=N>)s5# z4sF==?CTA;^=}4W&$(e|C;L4ccJ+4Nu_3ad^SKQP4NleCkW|Cjf5ZLOmrHI885CD! zpV8_HE zTE!hV%U|%=IZ2yeWar8O=lpiws(!+6*N#pf19$z={EWwL=W7dm_N?yzyy%`??N1lp zv%l$Sr#<^Sil{w@swMdCT{iS|;k~;CpFXj7@1RqK_8uB|vf$oC-wTEJp6P+Zr-2uW z>|5Pay}IvEw*YwD0^-*seD_QDEPVoDY5XJsEf+-@cb0&U^2BIT|NeCTJ$} z$!w8p$DHtvJlq6YbU(H(($Xl&JMu)I69pnq7dsIUd9Ke%pUC^|PIyIr_y8{->YVW2 zKdsf#y!$tdK3;IYr9Z9RpU}YByx+KD|EWH3yEFt|uKAt_+J7S-4j&c0QFQ;?w<&|8 zrnOG=jGEd1O@^qABa#Y4ZEXf^Zx3iuC7xxAIyelrv!2f~MVj~{ZcW4LuK#OhI^QpeE@g2=>vGJYk^TZx+9_Jo=A!odM z>~*)#)notAarl`fxy3Rbn>*I0P3#BD0yngi+xr;W5NLZoxqDdl4z;*iB1hoxU_WS< zZidzaT6`;LM>;@DY6tDqeQ4Lq8(LLpw@MjWSxauqonf%u4S;sv1KMNb#1l_wFL_)+ z%V&pUSuCH69(!r|!V>dXzNr=u3$4Xjx?pHV!wt>dk})G}x#1Q!ZfI3ClaUUna4gPGJTO75t)Q+Ft>WF82OuOiS_){%n+~Uvs$7YIu5ER4XUlF?P zFLbd%!WQM2lyFvxUkIs~Mt=(gXV-I^qJ0xA|vf=6R zBLR_Zj$iAs|KjnNwfAH@5uG>Q^JHSt$W~_#x7E*|J=k79bM{aN>*=#Mz4h`J&-c2Y z`{MPU+nQXw(_?GnOV~=v)^aZygV?KDqVA{)Nl!&HYY2y}iBNs#Uim%Vj-#`)~i;Y#%28O>rBf( z-w)}Ov+VtGopO|Yc(?MR$j6sGKfQQ-!y&uFIn?K$i7n>SuFKfHaLf5!U{S3Tc8|8S3Tvrngk#g|X# zD~gYwK7A76^Y$v)lRxjQ;+p(*Sr5hc+t!}W?%$^eNz(VTwYdDfSrtjr?__)PO}AP( z({;?|b#kkfLa?EOrVn_r$ZM-32J1MF2N$gJxuW>01GF1jw6Dtt`{ZozzB=scwCKI# zoA-<20vF~!{RQizSf+-OVOZ5K;*-N0*T>l(=DzEe;+0Y$rBI4*N3~RKNDl~GVgSG3{ za-(hy>ehB67ecbg%XXD*#T)|=6>Ikb>+L42(VMZV?|>D4Cvv8YlnMT`VsMiU>tH#s zLY#|oQyy|7ck-aTSbNWhkn;bBA!hkM9%ANyBShqbEuQ)Q?Ueu5=`PT|eXl|MR!2j> z(6Rl%&iquzj=k~!X~!Xc{(K|!06q_TAP<3Fl=p&Oj5mWG#4AHD&i$a5;5ng}`8XFUq1i_3GXgj{^4T+na6fGN64&HoaHhj=k7qn;v4*NAwxodjK2M zcR;5;tXDrAur31;E7opc_dZ=%i-80E{8>Hd0jw(YKvo8NQ5FEb7%Kohh~19k|*4r%Q7;?ok_V*awvUGu-fR9o~%e^Jlo#gj>#^POzzvhVhS`QTI(aW>gru zOukfv0w@r6ZgXwc*rDEuy6x(g3Vs-TBzSY_TBZF;yOanjk-J3h;x~&QF1|GAR!~Tg zci^kQyMgh6;equ)0LT~^9`FjB0_(vT&hA1DM?z4Jt{$x z%rD?S=*(!9r?VE!i^+JM+is-9NR0rhh9?;DekeT0m*&RFjWXcc?${5}Q|g7CVSVr~ zdq1f^{(&7W#vsiS{;h0*W%70sp31@g$PSRs&#SQk5IbLc@!kT@wZYyEJNE~OGQar9 ziIN$C|K=9qU#lhfw{98!%UVGzMFE7(X$IllR@yu5-Je2r!54zOl~Ssg+EneUhH8T} zBNak%Q~cF@Y7@1OIz{WL8H+}#DdVVZ9>$U~J)`IJf?lF4evSJ1mfq2ObXXth6Md#H z^c9~@`HoMapd(`p+h!$fUR3akD~$@Zqgt4($pvI6S;{n ze5u|&{hp8}>`)S(vP_RhGb$*}m6k|5rM1$0X|*&@S}1Lm7Dzj!&C+%$LRu*;m6l5@ zkY8J*WzueGk+e(NCasdzNb95x(ne{Mv|fxAAz~bMxMOb&cF0T?Q*5~~KpKo(7%GL6 zEINvw$hV(&^(04rj-N;FU&LPH%lrzz%CBLM<8|y5xhX~nJe!5QiQ`95h!c4t>`DAA zDcE6B6Ya#$a16N@R?4bH)OPAHb&fVv+lRA4$*&Ywebu(=P<6J}U)%AQD+(!PR3EjK zI!K+N4bk@6uJBZfs@`f#b)Y(3>!WS`%N0IKX|;gbT6CuswTS2ih|vBI!T!83S2edB4$N7_T}k@ko7K)bI!(VlA0wCCCj?UnXgd!xP7-fHi} z@1lUXu6@uxYVT1RK5JjJuP6@%g}3m*9^tv-hW1U|)V^z0O&27R1+(pgH1@Ab*uiQN zw}d_Rl4lUNu~*7TI0$Ev5j)E>i)`2{nH75-T|`ciUE~nCL~iT=a>LUt?%18;De?&~ z?0+tdor6V$pYRs}*hN%S6vG~x;-aJ|B}$7DVwgB8%80U}oG6c7L>2IiQYGwbsVu6B zBO*mq7r%)+Mowd=%Uz=^h+3kyxF_m}y4c~=L^Q%K!^WbixG$Pv-$)DbK(rQZuv@wn z_Rl;N?L`OF`A0@ufEJ;n=q5UeF1DRYJ+U{c$3K=;(M$9eeMDc;U-T0Luy1OR7%YZ} zWnzU`E_RDuVvpD-_KGcHtJomci_K!A*eN!NZDO4|PF*bGM1qJH`^96Ch@D5r#C8!W zqQn7lP#hA6MYM?v>=27( z7M93Pu(RwOJI^lSD>*mWEtbOGu=h;oD)tSV@!W?4c9s_4{&-3#m{-R>s203E+S7h$ zM~Ct0*r7KQJO1Zm&*3V*9(#;-^Mlw~mc&o+Q`jeS8TH(#<@d3h>V<8G(kIC*IZ9cu zFU}8p!YX3NV`Zr>cKX)GuJnf3`P%`zTsmQ|S87Y&AA5qwN#msn(nRd`oF+||!qIju zMayKgQ7h3dtw-w=DMd-qQj8QQ#Y>6ODe0_qPkM;m_Yv|!C0Hq^lvgS!6_o+X5aqBE zt;8r6B~D3DjwnafvFbwgka}2+SC6Vm>KVJ`cF*lzVW(X?Q+rbfQx{WL(-c#82^gbjNhh^uYAc^vLws^u#`+eJ1;Y_TKhB_P+Lb#yY))T|m{fY1(vchBi~1rOnpn zXyMviZJst?TcAZ~3$;bsVr_}GR9mJk*H&mNwN=_`ZH=~8Tc@qpHfS5QP1aDZm!nwisFtY3YYEyBo9&qK z)c=7L1W@3&k8}np^SOqqejbR~d2EN&}ktMN{>;k*SUMV@1 zTuN^AYwn7N;;H0QywKJbPzs{=^HB<+EjFY&T?to~D<_px%30;SazVMMTvDznzbiMD zTgq)EMY*GPQy;0%)X(Y{^{c9DvgWL1{6%8(sG3$stE<)1>T3oLYqAV zeZw$qxHdu?2{lR^t&P#fY9ZQqZNguU#%WWuP;H_%Ntt)0=% zYUi}`+6C>Rc1gReUD2*;x3t^ZO)Xiwr`^?3v^(1G+I2NVU8G&p@J<5E0p`*&v}(qV zuJJ4!?Z^qV9xu>J)J3b%%jg@?Gj3OQC_9y1%5K!<1Y7-0vU~MQEP7xp{J$t2l>HJY zV|cb3qsD(2hy61tp%&#NUj>SapwxCFKh*X0&tCCxKohqf2R?1+M%?M+RZs6RJ*CXJGX6qnxL;4+zc8R#QDuNBxxmr6@*4`7mbj#@MI{MlLNe zLh6I@%Rr2irl?`+bc~ja+S3R1#%Oi6{yK8PyBt>j-MILFDqTCr)baSgDuaJF-v6JD zT7PM?N-94joHt#@jnbvuD_zc$(k1<~O-XN!G2SYOk&Q84vCTyOW~6~Pgg}Nf@b}X! zyfMT0D&<_f#(q9#6i016`f>afb&^hDKj0ZUOXui3`uK}^z}C>8SmD(O(}GT z?$SNHKlcH49X_JR^aop%JXsMch2cpYqsN^g&6H+Iv!yvw zIQjykCosxak@OPgkzU3crkApw>E&!qu}#xY}spr`uaSP7GrkIE+lxZ;5)^lM-FN6dtb z*|8_)a1z{$0%LW-xUM{O_<|qf4BpZ5=QB8mcQufN8jM*xQttsf(r=qv{kW?!D?z)U zx~mP;9_j>J`(VsUaw@*6n_6G(u8v2oUvImgkrGZJ_)@7aWawv;Vjya2F(pVTj&fN^ zsjO5}sw-8Ls>*Ll4W+hHN2#gQQtB%8ei_TuR~je{l}1Wq1z%TInkmhd7D`K{mC{;i ztF%+vD;<=MN++eW(naa2bW^%3J(QkG8>N@hTj`_p#r>WCF+FH~yvd}DQpS+I5~75V zgEC&3K#s~JWh(rKE8&zCz5H@=HAYdC%@{@D17`_J0_8&Ae;?mTexf|366!hiJe5*! zsyFG^S+|$iY%dDWozOQn3G-A6`!6qpGT= z3hcBtsrGojF9Y7v;HWyO&UgnzCOnyuMRhTr6;ZRP+0`7zlRRo}H4k#vL-n+k|AJ^` z3#ogx`sgz?1iuj;EGa;vZ-LstKi)_u#JFiK)T!TbhjZk z+W=({##n}HYDgUO11Yr){pp4^IJ|KO@7($63+o!Z$KSQ2hBVNAbi-`jSUF*6CAy;Y zs|a7FYuG34jTjoWAsGCqCnaDvuEOex?VQIi=YkNTv1dGe6~-uGc=iXYjhHo5AeC3; zRS}zRygQkVwHR`+h2w;0)nf3CUJJiT*`#7paeV)`v{Z#k+QO=gc=e~M(jI9KHIw#9 zQPf;Ih&P3|#%j_P>LJ~bZqjgDTsj~wM$I+a2YeI^I^*HcIquj4K8$*5)brFaXAo>z zFlU*|=kbMj|Hcx$Ib#)`i(ZeXs5kM=d<&j^-p+T}<}&+uBz!Z-$_P2N?)Je~yHjlC z(Gc7^Pm#0PN`^VW@+?Y2XE8z5Y;5a+JJMo+*!#yWwfVQnH(DmOe5b_P&aA{4 z*liL|Ju|{|&1B4(C*$quhM9T=Bct~8f$t6`OS&t^cRkzS2;*3}mML|W#t4PWMX4&> zQ4UN-*+>n8n?{*N;m8}VhWR7pw9;g}zuo3$tfE%rIO6v!>tshV@t|&})Xv>SE z#Vo0m#fr8>n&$FT(RjR#-dM~15x%N^`k|;3a@G3FImKu>ZLMc@oVokU85y%wD2)D@ zlJtwaq;6I><9{Ef>`Zq>e3dCiJk*&#l`S5|vur=((GR0Ro3h2@)i381tYz60W{ym` z^vfApEoCTMJP!WiE}<>-u*G9_x~mb7Fk3v(y#Dyv;t`bY2Q82C_=hs$p}PKZPQk3w zrfl)}_RAR=a{wq?Jbsmvq%2kz+v0H|-M0~sNr;E74Um3@Z{+ACWg2pHxw4To)Yv%6 zh+2CewdpB(GdIjuov@~g^2y$@ces+_o3rV5gl{XMm?>Z5-*7G-;{}^7{Y(st!P7yE zZv-&Y+&Np%PrwB=f~`-rr=OY5cjC+tgk-a&yAR?E;f}Ixm}#zzqipf&2&D~cW7yN( zd%)cfVN^v6_|vr!Tv4_;%ru`U4O}G!t39a-_2cIm_Kalq%uw9xPwtm+$4l%IH+r&E z86X@MgZss>Pk(aT4EOu#E)45fj4gz}h3o>Z#4{;arLrmXJB*=*GmFh#W_=8WwRi@d zv5huYi8W)*C=|!dK)B6~v&u$G0G+WhHWx-6U=IMjQjb|H*gatX5nIM;z{Pkb#tW)^ znlGbg_=Z}Z5&g6=USt?O+)g`ZvpKW*4eq>rs2~KD-Y; z`PPq*qF#8;ZY~YS+So#xiGF4UK8=MnGJKA#}nA^DS>~K3et5vbzO{QkcvyCnX6P*s={)it)JclI0l z%B!rilq@}BQ}Mj-XEtB{Du3lI)d%VW-U@52k9cc4C%bCAv)x#`)qI)VM!Q%Zhm_jc z`WLeGFOsc)k!}5pX6s+D*1x#f`WJUx>1JFF8QEi2BaAgA%y5ojo$>(I zmcr;J{^wc4TCsKP1slX(u`et}x+Yy`$EC+I{x6lkDcQIy#)V~hkkU`-$D3h232!b@ zW+}6HN9C*Xm3LB?s7rZgb-B8ncU4!at9UncgSvtDP&cVtc~5nR8p->rDQXHIs%6q# z_%JPpmWz+l@@hVOtmdl)@=z^E3+6MiLVg=>>UnMFz?a)Ku^Yn=+pV$Nz#pItXsp{$ zmDZ~uNjRgjIzJSiq_X{!XmeLsP^!Y(EyFlB)wj&t429CeP^yfd(Qe?CT^REjEsyat zTA~n}KE}8+{tNo4Qu{N*moXZiS*1q2Ir?1&8;A}V%7REna4VsF?G z8zP8eLu`nMy?5+1!Cnz|X`Vw?t-9j(tLVPXvT>&A}yKY*a3cHPM#JF@%vy<7%bmS`J81aq@tWy|%kZ$K>KCFO< zk8I}FyT>O77w{`0~)+-U|9AD`Tn`$;JE%fqqX%uiezj$@Sl z2*(&@M^l}afkuUboN2_Wl!UFw(jiKW=qo)w^`I;H(UCjW*4S({u~Z=xn?zrTjS4}8 zRp>$fJPkiD;%Qyd%{^{_$`Q!{+NXJ3yXVt3E$6x|W8+!Q7%4gO+)Z9Ytmzp}Ot*|w zmP4Gcc}Pxt!tZ0#JLkakqBpTReHxPM!8VjV*`uN7PD+d8_uVs}E}2g&t`L@dk#OvX z=Vd7+?`OYOr_U*R4YAVhrQs~clTfo)rDyf_rlQHjCe=-<8_#GwwDHi&>81r!C?-{2 zQ+avi`IV<-wJ`DtD>ergG!Tj+MpA=G!L$GS^c1`lK1omUyYt^lw_ROf~$?S@@en4S#bM{^n4_Pt3y8{-yEg z3(8c(Pt3wk%)*b)!jI3wkI%x7&-On)3qL*!KQ0SDE(<>{3s0+?9%oz@eq0uQY!-fO z7M{A6hU2+7Q$1gLt_sr2f_;dQyKXMKCYD{}%dT-{R~}carEzcOc)2T&D>lph^0;C* z^%KkGt~{<-FZaviiUo7OJg!(#{lt#BD~~I-%>DAXVo&uGi{`F8u2?nq%j1e=bH6;U zSXceTzPT%pD>lyk^0>L3L3 z3000OWmM89etF!9<>TgdmS6MZ=62=?<+xH}buYP{<=6bUxt&Q#`M6ScHAZe{`87Y^ z+|J^cA2+u%XHkwTC0X~9+gX0ikDJ?>v(1hxE^pm)c5P8dqFuE6%YTH@CC+ z<#BU6i(ei$w=-5L$Ib05etF#7&f=HH&FzdG%W-o%i(ei$x3l==adSK4Q;wV4S^V<2 zxt+x?kDJ?>eU#(ob{4-pZfdO#V?PW+gbebxVfDHU)Dyz6;lzm)#bGkCr}hF+|LnXl6`TFzbH_;fptt>+x&FVXz( zvFYEONv7)0S)_lH%1qUt6h@zNb(GpazU&%TcICgy{xlqxDZBDtWv}`x=U#T{uf)uL z6_@@huKd@0&uQ6CeV3Zl-a?mH(RWInA%^ zSIMu=%s%VEOp{HyU$17h_(t5V|HO>7CETmO&glBn+^J7-6B#kO)QxbbFoxCN$;#6^ zagW}Gm04T6HCzQFAK%d{I>)|bpSBO%X||2&%su~CZW(KNzR3vvv+iN;_a`w|?sD$- zPouB!DDL%pF*>#lclsS!k+r!?oUuR9_BQ1SqMEgt8#!lxq8Dz7{gByy zuaLqAnddc;QJPDc^>qsS?oZ!pPrDO67@IO5tetIbo6~m{P)<)XCl+}5o#*v0*v{eT z=eXfKsq|xQ>^*r->B@?7?RZLIAwByWvj>+mi|}p6fTlCkaGJf9zTVOHLVJcCY6p=1 z-nNI`j$?GP)wTskaLj#fV0OcY%z${s@FFg&7EPw#_Zs>aX_sq&1#YNN&ugpGA6Sw6BM8+GNH5cjL75FAV=b9;eMy zZRr7Poc2A*Ie5N)J%+QPH`3mko|*LTGAi-s5Yo>Y9G~?jDC%(fiNob%(-IXEXW z+w0`$6lQ##9-R@L8J!iK9i79xxf}kp4=ffi_k)`M_y5zRxhHLk?aMS1ke0J4;E4{Y{TgdpeS_1VW`FleC-&Ukys{a}9#*|+D)#;78FdCW8 za=MJZsnOAu(U@p#G%mV28c%Q4wb6t$Y+5GW^nVjxLQWv;)e`fsM`VmJ9knDCjPa8D zgUb7}ZYx^NJblisM+^TI<8O|ztC=m>l@~j%`gvhW@F3<`jY!| zchQ4h&9qd>zSIBu#Tj}p`AhbsD)}M*Wcd>bC%@#bRlf!4Z~E&h_p0AzO)LE`@&EkS zsxVPGjPB>Jf3FHx_)|Cy`FEdHVWq76`M(_fU;S2vkN*-{VoHpE@?8}t`Ik5v>!1Bs z#glnR|N38!_wRnuxHcS}#&hXRv!b!->@=IsN?^Q&u@gcN#-;PMtn`!P!~ASLS{hqR zwlf_mm;cmeNNaH}?ZuhWdJs?Mjs5FqQ1qGuA=IFfaKmNC=m-%dJn^oftwAyJ23@T(i z;wApFym|Q#zq=Xv?~K&ub=@+P(J|&GdvSIxexbhzyQh0f|6+a2Kl`z7$(N?-R*x+2 z6H=S)7}nnHx9Z4YgW1;2WlrgnobhDlMvZl&{`cMvg_iq>w>cD{IY9A*>!H%A(atWr7_H3wn5CDw&MUi^ zub27pow>|)TG@4K*~J`=?AMdauHj|ZNoCiGW!JE>YiQZUe9dex%yr6Kd`B#E4Jo_$ zu3YAKOxZQK>>5;d4J^9`lwC)cT}PE&N0wdv%dR8Ju6|_~-#W{V%lqt^>#(xx(6Z~0 zvg_co>!7lW`C3_AzSowy`jlOKpDpv-uk6~l?BYvp*{}PQUA@Y#y;E1YX89K=S$1DX8 zlwG`Ik@?+McHLWc-BWf=E4%J4yQY?1ca>dJ%C0-huE}NB9c9<;W!G(G*R5sOEoIlF zvg_uuYhu}TQ`vQ6*>ywNb$!`&UD-9E?7Ft>x~A+JUv^zxcJVfFW}&Oft}$iTm1P(A z09m*z%C5^Z*S7y_Z#j+T8SO9cg#Gy~u=EYEa_us5zpr)jvNTFb2L& zyeyu^yRHvc+{)XZeJdIjJ}h))mQd@0W3Jc4Yz%pBcy)M2*ov{?`N8$UnBbIPy>x81 z4`aH{O?T(eFLOP2);;YuyqR%-cI~2OhF$r#=y0i{VaL>!&Nv8|!H^6HDwx4Ale3Vy zGBOWz0Gy*qd6n=?zI6Ynt;O)LKz zVUG24(vW;-GMeMxVEdYX9iJh7Ub@e@Y-jmd_zd#22FU>b5!+#YCjKDvA!rZ9pLQV` z$Wz#+R;a4T=TUc%UmQbAOP0 z?k~Z1P5mggwfo@+kmej3|Zl-a-JiCM!BOhXP} zZ%zHtY^(fHgq-V-#D5_7SuPps`{OgiAAx)DKkR|)T4g7{Pa3ON8mp&-^n0hV_Dc7# zC)+`O4}50+!ya}{kGo^KhwkYfcEBh0-SC<1w@>%BUAnhz(>-W8=#qxqF!lL!-mQ(# zT)!6E*L*9&4Dl^-&+;wOkd$>+@|Scp|DWZa>Es)cnhkv=`}*7`Y(Mon0IUk`_wdvK+*?A1OvE9#+)DiCxSwzja6jXI&HaIUkoz6? zOy=fFZpF;X+K!n<8?UN^?mPC;lvVG^DfbPb=DM$mH_&}0G2EB92fGH`Gu#(!XF6&u zW!tB1QU$XIC5F`5fo`$&#uh`$eIo|w!}`rA1>t|`fKcRTxw z-EG94?QTuu-NAMUvsA^J6Y-zpxDH`Qt~}WECbomzjrh!TH?W;mw!sAURO=?=H`m=l zs9}F^Md-nhv|q`#(oy5t8}*OS)IZMqGPZ-A)agNgZ5I2v|b^8%(sFQjZJMJa1$f0asbBEwR#2t)# zuA@G3{9bH_xxLvAbW$sianwiB*aPn(?Q!&xt^o#NRD;V0t|3#Y`vGdYUb0waAE#2!Jd_BY!;%n=6L=hNd%$7im62=_4iAYnrL0PbV${cMl1_wnl- zdoSD9>^=Alu+!KMvUlS%(@teO%RXm(G>2cFMPI}BapRK#_&+AfaUMd%Osr*j|XwYSsQTWWYN3wm*_9x5`dj#%TwqF{OmdhTUy4GPFxX!%K_}2_G zoj0TI=U!+M{oMa0x9z^^aeJjH+?zekwzNj%=ALW^TiPv-co6<`c{hUg!qPI4%d|}7 z?{4_dvE6YGush;@jZsCh)ULQ^+FjVrvIk23^49^W&Z6(0d?x(^_>$wl6w}Jsu`X8V*hWo@Eb7b*A};>#*I7v^BMxvaPb4aP+x$W70FwZj`2^3+};o zL)QWg(@r+0LTO zNUi(ZlLaM+t1TblKKI^oG_LpTjao&xakytt4kf14 ztz)=5m*aEhtWhJrEg7|0HDdc;@7>J_sU41I+teJ(9>iv1lffKmHmlIfnVyLM5Z1ww zRMUQvj)824@hl)mq|T5IsatbcFGtQ#>ee75b!#S13u2Sw(h@t9=Ladlv&^Y!$$O1F zN=u|UIsJPUt*DgLzuS)MKB=SraPHKf$|`xAkhE~O!14RA{o8fgQ2X%fnf@+ya1i+~ zso4kjaI**7@^zZ~G1;Tkxw$;k$sV0tWwJLMMpuFigEnl%D)r1UAHqr=D8IK(btdPKwg;^Ar`B(k{yj_H zluWH}#SW&i`Oke3JK3FCqb6UW=B`z1)X-PooG+oS>aRaO>#-lloDJ!-e_ea+nTzo& zJ@&{1C^^)GJ3#U1B@I+|>`&gqzJ>Txi6?7F@!mBdSGSWO@P9B5{~N!;FY|T&T|bvMlV4yq{-eB~Je8I0Ze&LOXx6bfpEWK{Va32> zcw@OQGxK}%wb@;KH)iK=z});6e8ue#=F%)d$Ok>W@B>IT1X6DN%dp>8; z_phtl{G0KmCn>G6>Wz#tHI&DhwqvAeZ^oMX^KQX^8*f^*-j$5X$;gwes4(^)R?m4x z-g;vF2I-^wH!CZY*Vl1{k1Fn^XYT!q<<$J^qU+g?h|XiY>GQBJ+a=*)Y!`=zvRxF? zeuniS?I-WaAH*9s0~ifIBU~7579Pl7M>EEKdbl9kG^9Oc#fCok4P@l|Oy1ntJlr3@ zkHY=fei-h{_JgoD+xJ86EyC`F4=e8CuN^Ax=db6(ZWU8_U+3EBoN)UJN**KV=Z4!A zKB%~pl-A3NdWE`*FWD}t_>}G9iqF`7T(N@f{EE-nF05!^yP)C=_Bk=64d-3{UD#d{ z?#zCV3P(koggtN_8D1Q19PUaw`!i0@`(V*V;ZFQ@1S9-sg%?F#LR$0i#fpd7K40+& z+vyb#v3;iE0k*s!L)s2!R=}x|tU(bn;&>%9GS&|_$9)(x22PIF3pc}k2=fO{iaLdx z;yyUKEIKh-H{1mGfzc(rrQIRi7>jO!`<;rV zxOa^OIh$lD3Q@M69uDT}I)t#f};ZOYjR>fle+KF*qt~ZRla=j_MS+R)i z8x{5VbdR|96ke~W!@WbqHG?tQrWLzj*X_ffqb3zQ;J;nOb%+sMu0xFE%5`Y#i0dCC zyIhSJ;pJ+?NN<6-TSQU#Bm0|Iv5@WDiUn+6tN57hoQnBuXIFg0c2>oQY-d({K+VhA zbb4*K34i>r>nL{o_iHE?ek-q^Xe(U!6W@_uwSHo{dZHcszrK26zINhE>AH!sa?a`H zH5327Vq&^pqThSfdWpV&wqD}@E2|~4mdZcP2IwSn5B_)7ajCdFSy3^K^)~*zGR{A( zywQtw8(FdOKdgo`q`Z>jE4nI9wDP~N$C0l1_)C%^@$tt0u-ap@@=A}g z&f^Y$S?N*Nc^vbYu3MCl_+0xa;~mVl)kb0Y4ln2Iq$fB5j8|#^5)~$(Kpez z(Rb1J(GSs&(ND5A<^R#jlr(_pJt%imJ+IE(?I?@1jQs6+&M=JBG?HLFhsu_rwo$6- zR$9<%C3+pLQ9^kGyp0l0L)$9h2D>6jre@ zMpgoc@C{VfWPtE#R93zQW^NnCt5R5#%CHi43UiwbBV~r3eiODvWp(V$a95+U^2nCB zC5>At;dE5iMhDijGK}h^@J6oTsU#(whi<2YbI|RT@D#M05?+YzkRf-s-7~~*#|&0m z=NVAh58XNAaCDc9OVC|2u0Z8Idf^t3_`=JuyJ95|_fTw8bWep@KSov@Pgfn6^X;iv zvE|;1m9+Iz?5QX#o(sIqZs^BIu_I7cOc$)AN74=U29y=o(^c1}pnViO17*c_!HNwJ zRP3ARL5h7JWyL7LeuW;QgmT57-~bB5wSM$c5j^U<@E@JjS-g|*-eqp~UCd*~!3dIP;hkr~j8;-&<@ zqqix+O7wOml=5?j68wQqR>Cppok}Qirzp`==v_+q1v*vXF3|8L=ag_JI!y_eqW3D{ z3iLiDdJdI*g763Q0VR9~eGsr&Bz5FrB@(+mqIj|Eqe>{{N`8azB~E76o|5dmtLjohu##SrziW^&_FfMIu8-;!zW7{h3bhMo!ZK17I80|K;y&`R! z?Vz|z(2k1KPrI(-u0<&ag49>rSz(;s$hADZp1+Q+uP};l>;_6&hd0Cx^wJsIMUgtq z=t_!{G)taAg_N016n8JWX@=zaW(so*jNLqAHB|Bsn2lg;S0$D-ZkZvr-%4SIgRxs@ zNIpoJ0Omg!yKM&fVYgG53t{Z`3Zpf~c2k%&VdUN=Me>0*NMKGy+BQl1l-gh?6qvtZ z?9Lftk6jdIcNj*!6tPVYg}EQb?w%p%w};|BMWxONyMdHZU9+7GR{X2P`udbz>EvfgB0fZ7{&mV(dZ$Hmoyxj zaV2_~;^ln$W{g1(SG=5GzYM7hM<`y-wSUHq=#h$-uu_MGn?ULPnk^2jKtzu@P6Ea$&*D3A^^m@g;h~A*Y>!CL)afC`4 z6~sOh6}JMtIYY|Dq>P=>TNL*zdaDv!^fo2l0KGj!^7{_Oi~nRL=7jB?8Is;9itmHo zrBwWgPF3Po=-rAx7@d|O$Gk_0H%0HwkYnDLA^Veh0$$4U14E=tCJFpbsnl zRrHYz$;U?(zW{wqsrVUvT&WP7h~1y0ju+9Vl(2+8t%PmSXOws|^jRgYMW0jRUD4_A z0{iKSO8!B-7y7ay&w|XJNpU-&l2)kr1D%l}$CB~^PSQI|iKV>F&gh2DQCM@)Fn1?~ zu~K8_D$b+x6!RAPy5c1N-^kb#eN*ucm2iUC@@>VBK;Ov_yS}ShP@{Q0Ql zhal?evl16`i6I{K;Nq%3@vA^G^Z;>ETrG9<6RP`s4M2BqQ`^h+gf zjeey#vF+DN#W!e%ISlu=itCPkr!cp{*zXm0Ci;US&x`g)B`%;+)`g8g$~QPUrlbQ5 z1xWfA1lJP%O)=Y}zbmc=m9i)t4J#GD4sqCpV6cy)?MR`mb9f4j*&8WyDdNT!>2uFu zG|cFYMvC-9q>F=*cDFB7DDuqaNNdV%s2l_2`AwdeQf@~n6KT2F8*Qw3IZhKr?q8&Q z2wsj!xe??##Rx+rp=D9>Pmm%QCrkv70>qWIb9rWvoIn`L~4Zm#%4(JeBjpmNSzQp`cF@q&@G zh>bw{2jy8VMbf^lV#R*jDRQl2mTQXN9qp$0I#kL8#FFOjN-X={Q87ECJ1NqpJNX;p zN^}=R`mvH4N9|Q*zW-as(!1lrZ=wV7AVf!k9 z#6LXaF0^09aP$Zz7Mu6aI2%1u30_2xQsOnxqccXL17ILr0E05FM+Ymx8R#(?_o2sT zyo3(Pcm+K!<7M=CC8q4U6JRLR!>|m=_Y*TdM^DOFj>`E6w8wH?5xm&*6vcN!PgUX# z(bJTeaxc%=f)_hWx*?W&aHit7M9)#Yl(BOazZE(H&Li$F==qA5awB$uxCMHl5=$CJ zDsfBnV#P~djZ(bi=Ov1l^j->=u^%bFQqI6Xj9#JmC(zM~e-FJ<@$=C!iWeJ>RpNEf zaY|f;UaiD!(eX;$5xquOjhD8&^wh__A^ECQa|p>ka9FtiL24O zVH$Sc7QIJ_YtVZ^%CF?r{R%S;jC(+d<$UFQAZm&}q(m)I$tQ@VZa$*K`=XC3ac}f7 zC2Ea6u0%yt^7To&Y(egOgyco^X(g1jKcgfspwBAFEL3a?$xL)QJdgV&RKlSw5M-Rx zH%Kh{iW15GUR5FqKSN2xPxdvN@RFW6O2XgnH6@X-bCpE)H&002Pl-iKOdI zg+*tLdrL_Y^lc@nLf=u65PerkD$w_oq=3G!M6%Bh;6w64&h;ZDlJw74BC*rQN+kA{ zG6zXJbfJ<+U8qyy-_Uv`S{+@aM6J-p3UgwNTcRYj=u#yqqRW(|8vR5`+Mvsoqz3&| zNlNHv@Hy#`yjTHW;BJIAD4~?cFO?)hzk(kL(**rVNn-S8ScyOFk2gw0*^oXXAtFEJ z8AD)BrjdJSAtFEJzFCOKXSr7snA@4&XUM$x_+f45vD7*f0CGFc9jT_{d$zPfA zEjmUCB#l=oGOq5&Dy9cI4z7kzVZ34_{56U^llW^Db00b(gYxIEQw;UVU$02NxW7S> z@oj&jV%J4)Qly{NPs~_>-mKUG=%ftFw7*43`><}qEi}OGiu7&xI}~|#^pi8bMCCV- z=UqPq?gHw8pQ@OTP{}ip=Sx3L2{uRXQG$KZdzFCl;pO}xkbIH+fIw_RJ1+#Ap%20% zxc5dMg~xFBLm!7HaG#D!Ie=Kw^^_7wem)J<=~&YGtP)&-K9|uNl`{7{`;k0<0Vpql z>Y72_*gRDzVr@@&ID7-+U#Oy!=>+#l~O3*MxZp{YH`J3IDAk*GB)HB7Ls@ zdnFeCAC$zPKPyJ^?ia;&LVr_&vFPuxl4DVJ0^$U;b!H^$GB!cI5+8sPC}n*#RHQFZ z`nKe}Wvnl#P;3)41`-prhsum>`jj5Fd^<&1i|PrkF11>KU@XH59We z+DwW2pv^O6e`_k{BD6(@>|6FL$o{CSf{djEYbi2*60Dse$5|&s_DA^>WLzkqtOzpZ z926Dl^A1WtdlnypwuN>e$Ej7~eyG?*kmGd7kbQTA^?)`i5L<5u;wR@1^6VXK1d=~5 z0p%k_%J*i9Jflb*PML*n0djtn@jz@1@%gBnr!Wt;&UhQ$CgVMH+l&v;?G(AT1lwmU zK)Wek(zSyk&%QzTjD_fqiqw^0r;IvuXT`sW?vk+>m3$SJLJ!ytmc#C_2Ye2DW_*V3 z1$)ES&~&=mfr^| z@x|yMC6=&*m3S0-j1o({W0m+4bchnmK8{o3OVQ(%SoU{<5?_W6RbtuqFeSbmJu#yN zdXnO6(cu{zqbFzlik_mxlCD#g_zLtiC6+XvuK3RA2qiun6}v-xD|)^X|Bi~?u|Xo` zK+c)-OQc-L`GAah267IR^JjM?Y}B_4uar^J$$>lGP;4{peK1HCb$8G2L3o9HCPu8H1~ zA>~l)1?Fz_wu}$a+ZA&UdPl}b=;Vy`&^t4}N2g?LfZmnyBRW-yk3;1c5R3h$De>{B z90Ovp<-JNQ3KS1L-d)9pV4QPco_Pe5=;J1hvzxR6VVrxSjxhS8O_m`GG0et zR^s94D;aW*QntW|U1wy>M`tQV>^dvsV^qoz#3!S3lz2S)nj-TZ(sDl+eIMp2@|+h) zI>2m%zM)9}ZXo3eWPWELWeB9NH+Wky-B3v@NS|)-u41JAy{AZjSb97;W*7KCiKQM$ z`GfcrRLUI0QYYj*AwCtA^g%oUU7*CLp$nB*>PlUPq`yAn7j%(gw?-E$@%88uMdm}K z*O;a3{|4MrKEO)eNtpn7j!LgR%kh^y`c#Q;L_brk*!*)Pz6o8S#HXWQDDgzJLGfaX zFO~G(@hjZm#Wr6naxWQtqj<5?w~E|X2Hz?18R+*)d^7rk5+8{EsKg`CpOpAa^k*fW zgo=%X#_(D>g}%CUaW5WD`N#Al%^m6$drq;3e3|4p`6!nKX5GD_S7vf(OT#sN+iec zuSBG`>PRJ$V{%QcqRd8;Hu6}ADp2etM3v}MN>oChQ)23T)pU4~>&`VObzI;rLQ_Q@ zucF>2*Q3-~A=v_*4R7MU8KrJjVZWp+`ks>9iM|h%!(d zxeNUbR^ood7^-TD!6rqk$T&~YDW*H>71sd`6hodAL&bGO$s57!gpxOcjI|W8i(try zVywt`NHI|iwl88YLB=kM1Qo&s>nRT;%18UxfVB9WZqwK3q|@zi(M5f_S;gCzS81WiWOUKtw_IV zaT~>oUAI-F54E_RV#Q`sZb14Si`^6}_LK4f(g#_Tat~H)DP&(tN149eu_N}m1BUH{FMEIJsp*OftS3M_+Uq(QjWokZKS?|y%?1` z2GUnpJXEox(8CljHtVa%oTlR8iWmF!Q)JFj@d(AsIrUfU)##Cmm-9PHvE$LB6))#H zK(W`L0~IglJxH1ZY(yv(@qS#63af+8?90MadhGKF;D9ip5%*!cCxDNwKe|xytS*|o#HaZK97E)_$$!mik1BQRFSct;%ADLy!~8}v7q7##Y#SZp~$#Uu|ctt=U*x^ zMpXPtv6BB^D>7bG{6?{IuHPy$K2ZElv2xzuD>7D4{6Vo|gC7+cHz@w3$Q;1p&x(vC z6n|0TM(D3fyaW215;sPFSK{vIA4=Q=U8%%78mWe+L^~nr<7vJyXzo}vUJ(NmT93G_6@w?j`?;-}Cv6zMN3ovFl6qh~47&r}lILHsN_ z0xl-~Vw+KLr7>m`bPQaJ{}6Nn+>F2MXA<0rznseyxR3Mw9lakO#=i$Dzj02|w_TER zc#^oCQAr;d@~I^I0<$&xjFKFPN}3>k4$Vjoz&%|_jz+0hLed|70jM9+Ut6NwmtMs! zcA5dSapnS)wyi|FX2foD6hoZSYf5r3N_`NLL(zFk(ieRl-ejNHsPvW+b3Uba;XV8> zMc;>y*#8prVTHLrD%Yrkc~PuB30_ZFDtQoxEe;)oUo>)o3#%9EUas>T)<9ZK1@ZtGcBU zbNuR7uohwNLf3|MaNm!%h9Yk2T6IYYA4Ew+bq)U5tGbO6UW>L>!t2p?N{r2_Yn71l zRoz|*u~l^kCC0whlu;pi6YT;U;g&RP44ZPShtSPn3*3*PU6q*jp?XW$3jbHpt(BPk zsoq8jPeQj5nM!P97`Ch$)64T~XcUNM{MfHwKcpbWv5)DFkR>E`9 zU6k-VbXVv>+D=7xQ^J$c-Ib8Gs(KG4B=4*DguMuV1KLxGzd`p_;;+zNun+nEE!ta2 z)$aiqPNhYN+fwG$A+j0I$Vh)Pb7V(;4k}>bf1oUJydLW7H;yR zn)*R`l{vpA(C{R4;SDjebpB#(ShhiFcSZR(Tm{{+=rpm^XkiS zAB|oCqj4XBj!~j5(5sY4@_eikNj{8IBC+4qN-TCAuSAmn*8q7K?SNjdM3Qe}_Z#t- zyqcsW9no8quqk@060VNkri5#uw<}?S-l2re(aB2K7`;;oBXo)qwnXnz!Wf;ZglnRA zD^AijO$j}Ej}lg(_riUYnKjS{l(2w4sDzTg55dEPq3x)CLFs2}Rv0OO`s2(MKLc}@LU^^k=oNBO_5G_KRDM@>jv#B{;up{hlz0(Jof6`m(2HOsZb|1QN^}!C zMu|Q_uTr9)QSwHJR-ogQ=oggy6Qb`?@=1t(L#aD8*WnhMNct$R(eLQ}@BsPS9eqeC z?0`z1K|#{?h*Ic=QpReY!hcKjbtU=?eN%}fzus1&A5qFq%|gQOiY|erxOYb7e4wx) zD)FJv#h5lu30I<_5(Q|ac*3=5szerDUEycbW(_4IEp66TBFbVL%8wBKfl?NPNRG3) z625@;P{Man@=FLgzcxp~(YRkjv1J?VSRgOkT&on$M<*zS^SBk~ctT-glynP)O;OH8 zC~SgK_k_YmXsuEpEp6LF2mH4|NuyApT(%vd6gERIRtj67mnns==v<|+JvvV*Y-dcn zwo1Z&+i_0q2C>h<=%YZmLQfQ%3I%?vodmby#$L5km5_2+`=C;2j#5s90_RZ6euP2` z^f9G??Q1C~Lh4U>sg*J#{8F_=H4&=Dnm`-t%Cq2z~9z=rLQPzvj! zM=OO+D0UMH>#!sb=Oz>=n;keep+MQ}&_^k>LyuAl;#GjzwE75ZF z2POIf{ZWa;?#tN;Ez@k!N(s+2X7bHSvY#<`5=MykG3G9gC&bH) zsWVEUJxZOdtHk44v=Lw%^BGDy1a}>}r{X4|w9|sS6Qvvr?rwB%#XW%bfdEXD5UnJ-H zAUuToNc3SPqAb-tqC|2|kHTYwISzeXiH4$20QEFF5v7jSQ75C5(HTH~MyI3mm58>k z?qemo5M2NZ2{RI{Q=&`IdRT=2W$0p9ihDFF`3;fS?OULHCm*mpNu(qkr#^-Re~wd6 z8K`fHn`6|k4wTP?{nfXGR=C+$eGRn1y&P?)B$S!@P6}(cn)=SL1!b6XtjE6fTjD1D z^;^NVxXI&s@~^%R?#bu@a4_!a=poP-_sb~u6`~gCi%LlT)xQa12g-Ln=Po2GP;9Vh zIB}Pt)Q3f{@^0O!D7IbnK5kLYe-Ytj+(_&s`TiV45tHQHV&Ci)cZonj)s|GVbpo<30Wx@Xb1lIIDm#;Ei?r-t$`H%e% z{+HnN;HF?@xM7p+tC-PIwNBNJRl8N~Rn@Dich!Mahg2P1bz{|}s;8=ERlQmDUeza6 zpH_WewX%3aab>AcYFb*eR8{Iw+OV`mY5UU7rM*fAlnyE#QaY@3cxhnigwolii%VCQ zt}WeMy0dg&>EY5#rMF7&l|CwcS^BQ@M|EX&hwA;S52!x8dRX=7>Km)?tA3>V$?E5- z7gR5={;I~-_?oCDu4z`&vZhT<`tZNs*$+IDE$ zx$TIy7q`8v?Ywqv+YM?ruia<0x7JRreW>=a+GlE?uYI+4cI~{{Wwl?jEOpd=oAyVv zKf3*-4$V5W?9ivfQ5{Bg__D(<>o)7uwR6Z11-&2K0)Bn@jcG;e6xP}{I`!>$d(8ZKiw$a zRi9P;R%8sVM89-twNk56b*W>iOQ~zATd7B>SGgSYD-9|QEsZFRDvc>kC`~F&DcxUs zwDd~p-O~G|k4xW_eyFC8ta`(=91N`06`+!aybb9r5uc)9DJXZgEjuK9Jsnhb$itHsymEwaBSUq zb))LW)?Hh7d)`E}n?4&wSo^{dyntglJSLFf9e_1o1SP=82T4&JP%AS^m_(TqhO zG}wl?p;1~6sv6o;4tg}4NI4kQFotq)6XoFThJ{%F5RMjou#|gAH8(P`b(DftiOHnf<@I!cV9Gm(G`m>Uv$~hy_V2!Ety_- z_mcVb?UuG!vj36^5^KrBa?M{d$Czd8kG&kejO)W+|9yJTr!$v6vXnM>=?9-SliwE2 zUi!q+#}^*FHH1n*M9KQJ7>;+?Y$$9eB_AU z{T}bozIwd$rL-9V4eg$(4QjcQsNDe`;0}G+y6>+0_PTGa`{ugy)_rx|SJr*LJ=fFr zgW3;ne@wfF+AghKf8EOVuJ*U~ruK79OY;%U?^<)uHIHlYOAD@2tp~2PPwT_i+NO2i z)?w?zj9D$@{)jT*0yo{gA2bU7h+4CT_=&7`J(WD)s?a+>BT;z{%>Na76!t9i!LMIo zWTBz(6)QIXQSp+$Gw$YvZl>CSv!-JN-T0pF^b~H~72H-~Vphiv6W^ zF%6&n)%Z`juqwts`x3u<8kegSl6EQdW|zXnBB~N=5TX~xx(CHUbcyCYS*&u?0T%De5yUC z{7s1&cD8-fzHh%`-TbRqIsemeS@gueS*ia&e+l~h@Jd$Qe=m9?93K46J;@cpAK{qr z_voqci}1&AX1Fw55j_|F5WW+&i)y3C{3+qD;Y-mgtl$1qI4hbI-5A{xo^C?!j0z?& zo_nNi%^s$oIl}ZeN1EAYj^TSS72n(S`3A+V+&ArLyN7Sto$OS5H{Tw($A0LS`vcr+ z_BZO#+OE>I;QJUG@b!W{+@7w7yMeDd-{i&zo4QZjZ|--#fAF*WDSX@g;!pFZb2UB5 z&vc#qD}2SFnQ3HyH;r9$v!~n8?B%+co^B(vx7*nCaobdU>$WgQxxLKMuBRE`_BI1u zFEhyPW5&2s%vJ7GGuEAE#<|nYZSGQYm%GHwb=R7CZlZbJ-E7`)x0pBGxQgH1y|&Ul zWLI-f#+~ijZl+zw&9bfC?246chOKcg*bUqlc0<=-ySOjy7VcBKn_FpjXZhkiytSwJ zb?te6TYJ9W&R*cRx1;?Y_Da8}eAC)q<$Ky2eLs7XKO%PiaC@&GZtwGF+WY-k_5pvk zebAp{AM)qgIsQrek>?v=_G`b$e&ZM0Z~YQ?ylZE6srcH}#-5e4dYj$d`sPrxf5mrh zD^|KXqv9vlvbr=h=E@*(7n%3XOxw~u;a-f_izByu zTbT{aK5jkR#!mLT`?@$`MUb8CtE~BPy4%e*a#QWOep7q7-@-2TGu%4yYVqo>ecXkw zacmi{8Lt(u9qb?U2@VVnaCf=kaT|Ab@M63McaRtGJaK(+hP#h%n!n;+j@NhfZjq~V zpS$Jm2jAXziZ_fm@?GPN{g!@fz6!F7-_`FHw}?B%E%{!^A^y~O=Xe+Yuz!N*jQPGk zUMH@K+r}N^4so++X}nFapF1bsjc=Ab=N9-);&yR+x1-xR-Xh+_ALdSstK)6s?cy4D zu^SaNj`xe2MlIw0<9*}aT+@o-li^dzi{aGdg=AJTldsgioVdg%L1Lq4qL-3alP8m> zlBbi|$&BQci5DJ9P&&nAtM#^Jq5#OmQKl9oxUWUXZFWSyjSG>fmG@trQd;dV?ikZ;cnNsf)? zB}0>8@gMO@yInjzzB`^4-xI!K8%Iw@FVn64vN_A#Z$7k3{kOckTw{9pPwCe_)BbGk z^Pig!Y%8;kUDs~!SJ(mm3wxGtuy@)e{!9BF_vK%kkL)tO;<9hhKR7G6HrOoKJLnbc z6Z8)H1xN7gc1AoUxIDfqo*G=?TL+_qD}!;t)xr4SnqZ25DVQ4E9h@CZ@KwQe!L<1H z`2P5Tpb%69ad21sV0>SETYPW)Q2eml)IDNPh#yIe+1GXo#>RKVljA$hNON(pNqj)C zzMWu}n@??%pncFGsE!Y|TgHd;)@2vo2HiF8!@Hgr1x5g*e$L9t=`2G0; z&OPqY;LG4EcV2v9e1U%}SP~x*UljKb8sa14k?~RS#qrVcsCYnpNjxyVG#(UR77vau z4_f%zVB_G|_?Y;L_*maLK0CfO{+;g%^-A_hdMEoPdnG;Xw!z%^oZt(4hF_ois#oKK z;`4&e!FR!G!R&ZQJleJh-UylnZ`w6^qvmejfVn65C|ED}KG-1mF+MK7GCn>Y6Q2-Y z6%UQa+INCQ!8*ZDK~=DfZ}NN+l-yOphQZJAuy|a2Vti73bMdHHTqGJ)?VjZ3aiZb)uSZb~L5Tks90Et9R1t&=;G zDZIDP+^uE0^WMRZykoEv?-TTLo0tQ5PvAheojJ&DZw_|d%pqlwGsRtLX1NJwhP%eD;hwh5+%vYhd)D$L zSX*?j*^--UtKB?X>lWMgZi(&SmfDTo*LD;4jos9JYd3S>+5LT`?c*ES1AJq9pl@PN z^BdaJeHS~@cefY&9qlN;lfA_6Y{&Ut_G-V69q)VFYy7_UTECyY*&k^q`J?PD{%Cuv zA7G#I7u%=(DEo}R#6IgUwa@v>>`ecNo#h|3ulc9!T>rG4=O4GP`)BMM{#pB$f8M_B zU$6`O9Q&PLYQOi(><|7EH^vY2&-vHfnr>*ky}dr}X4Z5~Obcd^wsfIsbD}%Moa7EQ!P3afh2zT|aZ0JHoun zEXMcTWP6ZrY7h3S*+cy5_E5iuJQ?siuidmH;;uhSot?UfyroxFqGhrJnIlQ+lcF?w-J_#&JWJyAJH zxnFredEA@n9pW8koND~hIL&>)o8=wq%{Hzwt~Rc5AHtpD=NQ)-*QsBrU#s8X?x^!*7}3B zy*1HFTNPHN6}J*rIc^!+A2$#kh}(%~;^v}5ajVg6-0d_McRAJLo}>(J@X1;GxO?Gl zpBmf&RA*&zZ_pHL4{vL;)EsPXVtVEfv)HuE;ihX2)qGPo9ka|VH*K@TjG0B|Ak#Dr zQ#FT~8t!5r<(==H3+--i>n`g~>wWWEb(wm)`ET<-=D)1#t(&b|aC`gZ)|J*(*45TE z*0t7k)(zHQtsAYItV`Vw-K_hO`!R0W_!;+Wtiz2O>+KplYfrKa{Yy(R*VxC|OYIZv zyV<+iL+lauFngr@mD|JZq4p;7H`X3!nf9jE=eVuoN8H-+FY71m zVr_-IEADo^Qr{W3Q_)RawYVYcXWWgHv{Jais2VpK?TmYlCgT>Sskob|!D_@!P4jV! zQ#)>W>cZ_$y|@`_5$?Pu&~?dR<0?HBCT_KWsQ_RID^ z>{skp?KSpm_6MZmSss+d}&7ON%Dtp=%s)iSkQ9ik3ZhpEHW5zw|qs-x7=@(!EL)h*O9kn+Yt z0~@D~S0|`jLuS}k-46QL_Uc4+2Q{uHRB_8qTCGsdRx8zvTBVFrtJNLVoz$JxUDRFG z-IVd_B&AX56F1=~8=(J)+W~!Lg4Bklz<#)=x|h1Qx{o?l-B;Nf8dH<LKc(>S5|^^>DRbouf9WjcSuRS8Y~X)K+z#I$v#57hqO) zsGaIUwF{DckJ_tjqaFd>aj~+kxvHuB+y{4-dbWCwdak^!=K}Rl>V@h>>czM#?o##7>SgL*)XQ;m+!gAT>Q(C1 z&=anO1a-Z7L!j9ye^75xo>y;GZ&PnqUJ!a9ZkxMPy-U4YS*PBk-izDk?pGgBA5{OQ zK7_mI9#&VWkEoBTkExHVPpD6-f5)wfPphk;=RT`Gr#`Q~puVWS1fBO~^&jdh>Z|H& z(0<>*J$7$F8+u25SA7q++I^sYsD7k=tbU??s(z+^u706@DfpJMz2scqs{c|Zs{dC1 zqke~5@P1H#RM)CMK_1)zx8nVxu2=iiyt)Ad_MxVN-^Vo_8mS4pzpXh~6B5uHJ?N)J zS~2d@E7b;RgS9fP9QQd6h0Z!$8=-9i{dJT!T1hI+%B|2UH&tF%UWE3z88q50v@zP2 zxc6}@Z5;07o1ks2ZKG|gZKwS~+aB8P4q9AG;HJJ5WWoxqQp*TULEBN=N!wZ51-JO^ zrcKgnv|32(SuLme+GJ?PQ?xxG-=?IVytlTGHWeCe8X9edHci_fzKQ9uO3%;^(hk;U zYO`=B;Gs&Tb{N*GUzCrv!?k*Cj+Eq@w7FWd)}mx0GqPRl&^on+S{Lr7>`|(qmA$I` zLwN=I@)6o1ZLzjQI})CcqqSpT^*&BpsvWPLAT^hhp}|0R&`#4%*OqC^wKKFcAxTuj zLvoI?Bdp`+Y3FO=UZx9`op6id#kfZi+P$)~cA4@OH0=kq%fXed(5}?3f=A^V*wU}V zEtWUne#0B3oOuhhqTAqY-Ua&0oyw=$UE=0N?OxoPc|X2imWB5BAZ|o_NLvZ*W|j5` zw75sL$Dl_&0Zr=fkbj=ip2mHJ&p<;Iw?RFxy#Q}@yY`Z{Mtd2O(Lc0Tv{$v)loi_R z8t#cwa@t$k+uA$YySU%*edV8uFElZwL&<9&LE=%gPjKtuXUb$%g=Y4J_9Zm5ueEP* z58}7*0EqjgzSF+fe$akYc89F6R#~WYLT>m8whP?r$##qlx}vMPrt9z=neZ9dx}&@B zA;sW5DAr5hNgAXN*2|PB@F)#|PidGwTpyutqK|}kX|%G3zNx;MzPa+VzJ)#pcSDZV zx6;SyGR{aEhV`zed{6Z8||GdWp5ML!icWS#~; z+A?@h&VUc)ESy1@1}*OxNn{(>$m8);x5kH^*eCW>RtNXxS8`_{XXc? z59kjnx8tVHhm`&Gm5?4+>5u4-DhKF~>5nVZ^(XWv^}pjT&!_d(`ZM~o`g6G1^9B7y z{Uv>k{xW3$SM*nv1EGt*4lUqKWxetV*1)&n0en|~5BGq6pns^GrhlZ&&_C8c!Oftb z!4md`{-ypE?g{-y|EK<~&_r=-=y&?}`Va6LuEiaqKkMuCUzCHC;~{tT;a1TN%5m^V zE;STGH8evv48t@m);2vO2F^Zxrl>}Dx=!iQJHD%WbAD0V(e<{W=t|_ zj9O)uQD8iyE% z8iyIPg_f;+2Hm>>UvK%wXfoy+%|?sSYRog{8*Rn{quuB*I*o-!m(g92XN6vF90h&- z7~@#uIQZ3$H%>54G)^*3Hclbg);Qf*W-K?(kQ%#?YtIe!_6v=RpsinGTx$Fop4q<` zmm4dLD~u~i3KsIOalLVa)a-9EZZ>W)ZdJZCZZmE-?lA5&?lSH+?lJB)?lbN;9xxs> z{$@O6tTY}rRtXu|c#QS!zZ*{(PebE=M(Ete^U%3pga*6Dcp19)E5@tPzh5`rFy4d~ z`?m3p@viZn@xJka@uBgN@v-p<^w!U$CjO<=#lJEBDYS0V$e|PeVEkyTg?{_9vCjC# zSPwgH9+q4M5~&7R)qr$rK~{B?OCi5{QerKJliWXLA>pDQnDHv(C(# zx&JTs+;u`5=z_M;gPr3xw3$CCCo9*&k}fobCFYUlQPKu~EVPHE&>v2Ko%^JIcj7HG zmz!rOYhjx|ODR`|C_~M&m0`+oWrTT-vWYTM8KsPd?fyJ@bKakRb$8z7<_hx)^Gfq7 z^J?=N^IG#d^LnZC+z2ng%~IzP+Rq)V;oL(yj*gDZYrctt5N|10nQz0&^^W#7Rp&liSn)TFXcYvJmos2-YSOPRto#aU|2xP zts$_246}yAHn@p$wNk5`pgannNU=3iIbB(1jj~27%dJhV&8*F#?~Q>5dztbVYfEda z@<(ecSkBH>E>bR5E>Unhr8Q2u-x?2%aBHa(ZpT{T4pJjbLc<~bFvFV8PEyy|71ozY zp$?dX7C0H!m?^kv78V)kLHkHeXg_Nj^vMIHMmYmI<-t;`JcRYi!=+ByU^QAz)?BOE zYJt`?4|-FZ)SNn?J1vyjQxEG;i>)Qrk=9Yx(bh56vAD%}sdYSbsuQ7Aoh zpz;ft+HtV`jExlC%7E37N}X_wcB`sK~gJ#Q6S zw{-_J)4Qy@t$VC{t^2I|tp}9vtp}~YSq~{cC_lnV`!FoFk4T&Clxfr|D5%_^@8;xyj1Tgb=FJDnbsP37T!?aRNk^)R^C?L!G^PcDDNuoTd!EJ zTCZ8JTW?rzT5nlzE4uZL^{(}v^}h9i^`Z5V^|AGd_37`qRT3KUI_no}z13&sH+loA zYkPLgF0za561&tMWDmB>;Bg-UEqEC8;Stb>N7|$8(e|eHX7=Xx7WNo>OM9%nl|9ZL zZ%?qdwzsjjwYRhXU~g|vw0E%Mb^l$!P_fv#;YvzOav*k{^j*=O75K&w8_KHt8;{*!&7eUW{!eTjXk{b&0! z`!Dw8_6qw7`%3#N`)d0d`&#=t`+EBZ`>*zm_D%N9_AU0U_HFj<_8s<}_FeYf_C5B! z_I>vK_5=2V_TTJ>?3MPz_A2`k`%(Kb=xiiEW>I`#+J0qM; zoRQ8bXSB1avzfEGvxPIp+0q&7Y~_q|#yb<7t(|S0ZJq6$KRDYv6P+ELxRY>_PRdC; z6;7p-ajKkZXGdozXJ=;@XIE!8XOdIn)H-!e*2y`(GdaBDcrRyfN8E0_ud|;s&Dq~M zz?tqG=*)2B4ad$b=MZ_v@oeXCdB1Uk)95rgbDd_V#c6fsIrE)1XMxl1bU2;PLZ{2= zc6yv%=Llz!vzTvBKH3pCA|K~0b&hvVa87hia!z(maZYvq=$z)9?ksbbJ7+j&I%hd& zJLfp(I_Ej(I~O>AaxQc(!i~U}IF~wqb}n=N;#}^maISE!bgpu)cCK-*b*^)+cW%H< z#5X!O;Wpx1oLimSoZFo{oI9PnoV%TSoO_-7oco;zoClr1IS)B2orj%OxRv-(=P~DT z=LzRY=kLx_&eP6n=Nac&=Q-zj=LP3Q=Ot&2^Rn{~=N0Ew=QZbb=MCpg=Pl=L=N;!= z=RN0r=L6?M=OgE1=M(2s=QHPX=L_dc=PT!H=Nspr&bQ9LoPRt2alUiDcYbhwbk@R) z`Lnam`Ndi9^f`HFgR3ZCz-Ot!=V`d6Yq_@Tz>DX(F<3~8VI3_MUN^VQEq8~&=Qhk8 z?v4u%+abH}?A;3wM#p0w@Y7v0{S2v1wwO}I%n<)-1~ ztb~`WN_hUYz5k8W+@Q<{>H!{zi5AVhTcrQBOxmYN@6Fu-o907mB zV)zt}g#X}Z_)Cs;kApAac=rVNM0j3KhOgyR_*qVak7b#=9A1?(;Q=`t-j8$P;W*#D z06vQg;jg&Zy~MrL{WH81e}Ok*g?k13E?2=rB|T5>^}?e?el&QfZWVqj_^aTla_@HU zaqo5SbMJQ_a36I4=04=EbRUM#?Gg7;_c8Zz_X+n&_wVjg?$hpS_ZjzD_c`}@_XYPw z_a%3Y`?C8F_Z9b5_cix*_YL<=_bvBr_Z|0L_dWN0_XF5FVeNE3aX)oGb3b>#fW7xC z_iOhXSbx8TefQt6^1{YT7GBttVPSTEao4+j!t3Y3tjIn{6COhwenuA_$C&UTdL>?| zH^>|8m3igf5N~LIZz8;so5A0>1-y=1dSl^_9OsSqCU{$W+j!f0+j)QRw)ZA_J9u#~ z;U&G4m-Z^W%82J>XKxp8S8q3Ol2_x^dUamb%Xz*x+1uTl;_cz>>Fp)_R}pX40p4`) zKv>@o@(zZjeHLu(hkA#3vpr!)pW`)njb4*C*K77#yjFNO=fk(Tz-#w9gm=^H^18hq zuh%=mTjVYFmUu^cM|nqk$9Ttj$9YS={@8Qz)R zS>D;+Io`SO9-Z%95c-k|o}|mY72XxzmEKj})!sGUwbGw-gZEeOM(-x?X73j7R_`|N zcJB`FPVX-7ZtouNUhh8de)jSTPwm6-@IC@>@MF>!{3JZUPr)y|8Xn+hz305=y%)R} zy_dW--pk%UyjQ$ez1O_gy*Io!y|=u#y?4BKz4yHLy$`$(y^p+)y-&PPz0bVQy)V2k zy|28ly>GmKdf$5g^8W4p$NSFv-uuD((Oc{N3qzaI7p=9vc!H8XFcH9vczcBsMZODmFT{X>7CD=CLhe zV`5vz#>Tdajf;(sO^9tB+a|VcY`fSWV%x_i#&(FsV~JQYmWrig6|u@#CRP=zj_nxR zDYmn)M8|fEO^VgTYGZY=Y%CY^W0PaM$EL*gi0v8ME4FuRpV-vczOnsc(_;I_4)AKa zTHBlJ8+v=1V)C`4xud;lzFo7RzM-q5-Kv?>)pSIYUBlqs_SSf!E^EtYHb>7~Tt1WW z;F+XwvQj(~If5s0H8LJg8INb0;%hivO%;c0_*utKSx+KY%lT_Le=X;)tu$-eI$P>J z`M0N|y`x)Z^y5^ZAFt*x73{}rIZPG!aoM4SAE%1^xKHu5oWGXy*UJ3yWQxnFmF2|K z3F>${8$79jeii4h;`~*duZruh;`*z&{wl7&DyYBC+N*xSoW^?5oOmLav}u z<^IPRAMqe8@flC1QKa4czU)doQB`AQ8|pzJoSb@=i}R2%CUb12kYg1QKcPlf@6n84coeCHoae;)N;!dTwQz`CLiaV9% z{8`SQWu#;oc|2{?@r*uYVj?yrs68Z>TtGn8nGjY7>YCErRewa2c#-@M)H$U$#G%e2YG|@50u*%%CzO%C)E07QlWF?7It+iLkK@vGC8_%F@CO#q) zEFO|fATR{+L@v2Bw;?UtfQg*d_U@H6qo-caEpArV5Ui&M_KjiH@f-mD)bkq(ohN+8il?^pLUQWrUC>tF+e7%t@&cEmUVwrmJW0zUW+%<@Br&0Ql9&&k3=ak; zP4g_5W`tER!YX*s>NviR<4N#HY~GYT58c#78?pT&qY@qeJd(wG|Zc_LK+ znVc$klq-mo(Jfu(O^XmtMWr>ZKsZU3)RGBCR0SiZ zBB+WHQy~$XOeB15+F}~Rbe*-oEI79Rz_}Ml8|#duDfQ{UZWA3&I(hNp7+0rkD+ zbV0+`bXroTGbvA(q-;(XNsneRE)%t>*ujPJ8Nb}es+@JOtdEKh#4De> z#8V)NIX#_)M}EqARmy>WZS*U6epT?is^Ixl!3!?q$-F+_hT4cUa;(JWSb5DQ z7+(o$Eo2TNSH@qWuF`B=($U;vGTu+LR*`do|?kCv7XUbCxafd zkewqP2099lA?rdp)`4;?MCVws&XFPw+F=^0Wg4sFKGZRW>zI}TO&~?}r)hnRrz?n7 z(%FpJBnG)?c4vK8&&0X21y|JPPE6p}YU0NdSuO!N%$nQO-aIj3LY-^wj&;jNPglqM zrV{yHzJ;0ZEJR>tv4 zL_{b=S7=(KX~{Dt;-bf>MUySuf&vf^8KeCgL2DynvMoC4X{Cns2%pEqCxs4{ zC2D}r;+f9~_A@L1XHrH-Fl*CfM1Z)TF*}+9N=RpDUZu$v3elFws3LgMd`puq8Baog zUC0xtD>{+VS+h&>N%@yzin`{ucDD#V3FEdTlw>C7bTPSET@i|@$XHzkih)8v$!Sr= zNHC?ae3oPRELY(hU7{-xM2-;8?r4)zzSS+Oh;Y?eT6!CNJ)hYl6TpkyMji=fD0?X3*#BDu=I8j{N-f>>byvU(+FG<)T| z6H_bB(q&SZ)OjBE%6Vw^N*$)R|7`5H%#ehe2n?yD6l3|3ItZRphk))Pag8U>nhcLY zhQ}Ziz&dt$ikLd+*_cO z07Drzk%<<=BGemb4q?8)vXskUK*+EgSe8U@Ll|3@&_a~s^s=c<8n1FWRANi~2i4{R zGgZzH2P+p2YAzhq90s+t-*9k+K&*J9(F;n=$P#F{M2%uFMPnoZgo(ncGia%Kgdk$# z*crxc#&;J5crX{qdB3QDIcpK=gNwom4(*Rei+waXw(I8DN0TGp45ac3y&=>BSyxK? zu_cAli8BQ2J^OHS)z#LLs8+~C;&ZiuA%uN3X)-*38A>97@+oH;o+)!l&|SnzH5gD5 zPSQ{vAj%8Jov{S$#yB#V*hv!b;0ub|M-nfvj+DLkj>P&N_4ovSg=!{u2$+-W7c8ik z-AYxb%>!DX5^4vvYWudD`?oePsMq&xY1Q}bCe=8_`ke3AdQ%rPHAijmWcPr5o@R{C z<_w?Ni_c~azmi7LXY+>7?8YafV8Ukyp&wLLL)HVIR7k{=6~Jdz)2|_GfzK+d@7Fo3 zWs|Cv;)OcJYF{dwwq&0f>CElZq$>m6#3|Bj8H&@ih59@C;;cqOvq8!mvjKa9%{&8<{3?8Tl9HxcG?NIyqdHW*Kub3Pk< ze4g??`;&dPVEF7;_Sur*vwztq#3%gPj4=mOz-$)N!fb46gD2(NWSK?|I}w?-e=QeovnjZacs|>Wd}bFu+mC!^8$MADL~BMm%?*Tk z@Yu8Jvr6x?ht($!Yr?O~m_35g7_DGWgTU&6Eu?>$pNtE(ELc% zCCzThE_>>G?Oq~=A^hyux6%Q?A`VAJksSd(YXUwy1bo&Ad^VN)EG7G-{KDuEOdaN! zZ1DG4;`hl70yDbgF?eS5&X!gOzh>by)X{3QLvVFZ{*APVZQedhtUlYmeU@B(wt@RB z!TM|q_gRwl*>dai<`AFEecDm11`x#5l zTW0M6qD*qX%Jbe;^*waKmiH`N^SzbX-4qvJmEW5_|K$r(84fl-tKtVM0fOcM>IP>%1eIx;iFG?c0GsCk!PCyQc-S5K{2mj$WvhfH>W)i{*>k-Wnv5 z8GGhhA{QP#i#qUdC~MDLTjmwdRyfiTFG@gsH@0`Ri
=9V73*s{c)xy4)_wIxQS z$~rx64iXn}sS+NeBISD#mm&jBLkCt&BzHTTx>`FLMRgLncp)rLY=D7Ql-Jz^V}roE z2*7yT)ZW%KS2U|w2D-SBMRTB}Hw95L5JZ)T(EKJCEoHt^4hGSK3x#r>Ws#B@G`LWF zkSJ0IgUX{7F>FY`+8F4zEa_~KNG{rbc-9sRM}$ucNc6eNJy%TT>7BDOJ+b)mq=)+}6|& z%$$rEda|OXCKe&)7|gUNR9046s6>WiO$)-f7!^tB3N_2HFiiu6DH$m5--c*{x@Z6n z2lP*7MU!NpcwsMQhuF5-1@U{33}M6qfZ5sAx}YgWTd(9h^_y0DxU@2kgd%-OB#{WB zO`UB`L9ECYPx6A9tgR^m$CVz5`o^X%kC>Q5zUWsnRq56X(r;^Ol`9XnnB-hJ|78aP z4j=+0{pZ!-Urj2nFr$k5O(!>;OI~3j75AG(vB(rE=|6u44V*ls17;5R1ak(_Osh8m zeS{Xi1R2Y)uOhCaj}oKUE##oiMx+wP% z;eOml0Qz$uS+pcO@y28)Ux`S?s~puiG^%)rQN=@yDjuRMc!*KOLyRgOVpQ=Eql$+Z zRXoI~;vq&A4>78Eh*8Bupc4-)kPHdf_(sZQf{c~e zXvpn}$4lxP8?nc_8*^q}Q$vp_jVF*~Y2)=oS$8q4#*htL8_1^|M22>jcq$3`L2SeY zvgWwbp&;I*43c6?aw?v7rN2bPI1okVG_`dsf`SF*OTIR62FSOx^785(XXM7oG7F)X zHDTHzpFAm!u|=qFu5#G+sk_!Mk*6rh56)F3@`m<46BIio;nK7c#5a!_Lv z+uAyM7Q^}7 zTHg%($!Bk;9N-F^@R8vzqLT_TFeNLMGqUxrOxvlMD+IIKAAWbEw z$mxE%; z0|HMb_60B-CtnQ0#CZ~Ij>A@dSzLmQ>d6G{SjUr!ttGcbvV#1jKsKwXw6z60Zpc8# z&a-5ihc!(H!4RgIoaXLe!<(#+om+?}ww@-#9l}&^nhuI3(@ZXDa%&(={YjJk7hxKj zG}(C(CbDAtEy7$s*>Mr(`e}zh!dySugAwNXX`^p4P3Ctz32$jKzaz};U=m4_`5kZ? zo-~=?5w0pDl|{(46N%Cr2T5fG(nkPbi6aaT4h@k(ia}CVjbs^2;%Ed?LdKIY$HHF0 zykY6%TFbEF&wCd`-X4( zg>TcsxBbJn1H!lI;oE`X+l=t-pz!VB@NH)BRzze;otD;TMnFtzSp+X;?TSKVjOJuT z(#{>^k<`g~SUcvB6z;-GiD)S*Ix)DM!o*-eI59XJnHWKqaAE{W3KN4N!Ndr1gcE}S z(TNdcVcQ!AX<~37oERMDi4i0vt&V%oiXDXp&4uM$&PYx>D3~JweggYzP?KEfF=aR$ znKD6&V9NAMA*T#OqEjXSMWzgc`cD}K3Ml~ltCLb|CZrN(<-BBc;?z65vu2)W*`tx#ytG*zg|hDN!P4 zOHwmS3TIp+xwy#4dDKE=U9=0SXwQk7xqO^ME#yf@v6YVINmDZj2SlM<01lpt0!pU# zLkJH=p$+12Vx*?JC{Alatw6w}7F2#umrVMODjn-2cEv+(I z8tq)qluZA1j}loqAGtx9LV4H-DJQ3lK|oHR1XJXBG6B?tei8`!NFMKTWA3mlaej&?;VtEN5E!r7fj%6@H5Mw^+WW(qU4m`Sbu zs--63z)OG%agc=qeELNOgmIe*9lJE6jY?5f;Xw>0L|T%9Kr}--ick2_f^wlmMJYKQ z9RsHRl8#@s9TUHRXmK_nQeIsY>$!e4MU&AY$W5(?HnpNqG`K*tw905xE2B-VjCK!u z%nRMCj1~>!K_n(x4>p{On3BRy=^!DfG&&uzUpf*~nBGWMn5YOu=QH+B2YC`=XLKQe zt<8}D;in|rr5uT?inbgZ!3(*nq8O^8@=BKcYRJGJqUeWBNEA_8*y$T75})`GkwTb} zgelH#NeHn6aWIa8;W?m4dZIz*FtP-E3<1Rvgh~(__#*|1egDxQ_KC~jpu%Vr;^lZ0 zf(f~UJk0#p%~AzgM4&)^ z2t@NReQ>n}dW&Npw5+t>_DcyFxNlJcH-U>!d<-X`M`^VVTa8a#6qZ{A3OL0!!)Q#D zk6;6$ETAOxnag0{K@gN%5R%2K*fAua{pICMaTAQ{Xk9mDXA6W#h+P6~eh{H@a_0yu zG|W?tGPS@*H(gTa9G}YsoqoZck&_HbLIKn2Y9_&GsPf@x%r+&^7xp&)X=_+}_69a_KvUBXL}4le*7j4wV~ zQ^7|AE3#RqnfuKJq)VFNW9q4I14rrveg^js;@IZ^2-*_OG9UqB0im)J+1NaB0tAc= z1sBa1?$P!(9PMmvaOv0qz-F8Hc7x22sKIevu{WO%l29QPX|Z{~t-hzVJtj}S0-=1A z8fRr`q!P3t8r_yo29SvyA;e230Y*?d5pW2d$%IarV976@#ITFp*}0 zN%M!C()1k#oS7n8Pt&Oygb6cg{?JpJkKeJICeH2|Jf&L;PcA?BG7z1N160fadpxz(_(pTp#~!tnuO5bT~pSh}5rkSnnS`IeuY zk3VFUrc)Kb3sFSc=XmlKV*e}Wqum)uN0gT436^FGO4B)M92W@s$?YLG75dHjf^*Jv zQV?*W)HIz81Rgjaoyo;<3!1=brpPo?c$)U-0}q^@_uaGG4*d`M%lU&3){z@gh}VP< zd>2MA&NQF#Nz)l$A!IY0+ecsdK>xWsqIlqk>Q6C#**l7}LX6H!}WAi9AI7J6vA+d7-`0|N3qZU*XJS#0(T*AOAQb_S9owBvQ(}hnZv_qJq7f8^agh<)Ym9N5& z$ZuFvC>L=Pdhc0;jUr8yYi{e@o+U*x366zh z6HenGsuguHpjZO%#faW+a9Dw398ad*F$uy9!g5mMDSM13`=t3SN`fXO;F4(Zl#>Hb zIrwlLeF>Tt~J$PHY+~ ziyUD*<9zY}>v>UBrsm{SZJfRrA~YR_2gPzcojJqCXfBp_Ud4ImRlK621Y50pS{v%c zdIdJ@kaKGG>|%Op7hlS#pI?k2fP1&01LFW*Em!mi>#a%_!2#UVRo^BSy!oZk00ex| zHp~;^mq~J~$CKoC$CKt|l8;>_`8#FFV1eiFl_mM?Ka?t2UXl;~Cix(6GWcL$@Wqbc zJ7atdJIUW@N(LWOVt$z9`JN2g7sx_9|C4-jILXJ#l6*EW$@4zR^FGOZBAHFohk>BU zH?;FBw!Q?f*!U8>Vs}jNid{2uQwI*N68lVMbEHF7R}{8eC$Jwy?&6Z$?|Jh|xFd`| zHAYid6C%jZwN_}o9AKP~|SfFNw-my;D-T25w^%jK~E z0CNCMDE@L+igx}t)J6zgxq>axmZj}@uxCUe6&5qN7Yk&6g|a9 z&Qp9^GR1;qiccY?m`+oC+AzhZ4O4u|FvX_~Q+)a`#it2Ve3~%DrwLR1{jU_UsCX)% zVirSEe7Z2jpG!&cH=$DeX|)u8)G5V!LW=c-6n}&(#bQc|#g-I*r6EQ8CgUmEg@Y%r zt-K2d))YA!S>6o^g(_}g$UD{ zoaOI=W|?}jOf^}imMkA^&ho+LEFWyn@&|&lLA&@ff?57_NtQpqm}M%bSy0XM3 z;p*Xj@L}96AI8n{Vcaas99dTVvP_j(rur-^2JwLH@<+Cke9AS+Vrr7w7f-U-m8AZ~ zll&RKWbj2XR&0`dyfVqupJeejS(kI#AWn$o%O#sgu+k)1LQJw)ljIFqXd-`9u4cB_ zF2Mo%YU(ih+?@Qn7?#4>u({w^SmY*6ZG~-a3BG;T#bwY(ok}{Jg5AbKH=-2+URB`%5CMG64;#HW zfCElR85k1SgrWwrB5B`Z^P4y=By{)LVx<;K55x{aDok8MgX{!h8&+6o!RFQ0P|{Z4 zg-wFu@S5n%(2W|ailv=Owgsh!m;L{Dsc-HVg{eFIyW)RcLK*!{9lapHyG)}-Uf+lD322b;$s3&V6^yEhaHgi&Drg8V+{l*aB@F4Q35Fh z&s!t}7eFLb*Doc!645x+DK18!Qs6C!gz%*v8FJ-cymyKwgR4{mwZ*ct0Vx+t1O*|7 zzt>X}yi5&VrUx(e!An!{(hb2Sg;*$XSRwiVxS<8*2ZR$lmW#A;81A;#Q(1PIj!xDy`rqaVv``tg^6 zU|k&o5ECE3;QU1a8xs+wL4ffYhBv8YmDXEa(Ix4&KO!iXb4)VqjdnloEx6cKw)~ z>cB5(wD688CYmABA|evN^xc?gdn3T!hB9ieya7ejElQFHV!16PV5#U0{5j3CjRC_i z3tnamgv}Q5#k5zAyITr|JYVeckA_8B4i6IX60r;@767={$iXp4wo5D#+>lsMA$ZH+ z;C*&CK9|-52r(X*&M}MSxwRmoWVS3wW`NGnAGHnbIN*nQBylKvA3zFhFMv=n0z(b$ z!wNGdnl%EB<{w%}E$Eu-i6o8|U5@Al_4KhSi5*loBnlJXglPg0cR^m4Kiqi1o}}nX zF>ZYvin|>r;C}21WhZ5_qNro=&mNN*o5_r^Q@TE;+#Z{e&#`t2>iyWv1QClX_Ye|E z>~h3Q7{xGDpV7Ckx>&ny{oS5A$$RbVSbkOP^^3MuCvAK9s-dgy-cFsg-Kt@eVv~lh z+G4wyI%(Xbkzd7DjausES9wbZ#V3_c>K&&}iY*;^qsV^tptD8(&O!OZo%erICQUl7 zW|F#e1MZ4nwNzato=Yb!UA1)5`zw}CS}|$T;p!HLPkMh60+SBM|BANZHAOMLH`mAx zD`Rov`&fOvHX&tMwpxx(Y@-i(Tit%u4aHjz-)g@l+vayXag^3K%3QN?5@C;x>E5_U0;Asy!jRHGo^s;hyvY{PYmM8aKah4F0rs{Knv3 z3vQ1uq+b`5<9(x^qbNbW&N~4-w!Qi?!Sj0=KiNV036vh~?b9zP2_eJ#EVn{=FTRV?Co>5rJP7)Ejfv zSrPc60PZzw4#E5WC5AeL4_2>We9YX8w6*k143 zLV8nk?g-P@1@Hm&%W)o9Kk#S?IuGy^;??qg{`zklHsIb~#oP+F%wjgg6~*!nO1~XK z-wvS%j}vJ>MuJ-0dpkkV)O|2_EN`YO;BVmdV1_qI_$((#zdOSZknlso^ivr=OTrJ8 z=}$&muC&LB@|9a8{IFT8Q9i-TBk4I7O*37VmF(xgqVV$Dr8(!N` zYi6u7!A%7(RaAAXs}1BYW@dfrTK&0S{EhM%>&&0-viJJAYoq*zF}x+ZMRMY^cSR@845x-g<28w)HEY8KvEVmFBv9`Stn1*O?R6y-`?aa9gV2&%2;?w)L!jVldAE z7krZ7PaEG9;BvkSxUS3q6@y=+J)45^l~nndW5;Ch zCSJ#mnXy*${`}ySvXX<3o$Mb|TV8g;m$&BcP!GQSmO(|;#~hMBZ=_l`^~}cNLG#be z-)yc~aBs(~qxUTLj9J$m(Rpjrb>}tiafH93cG13v9*<#^4X_1+t`QyZn$@R8LzNj| zJhYH7TscxOV!)S0;nuSoPL>S%Kh%-1oEJ9SlYdAtlnXJUgV4bkuBYBcNhwNsrm_+r zDI2?`W!vSyr@I#xjkxVEhmJm^ZjYj(@&A{u8#DJf|B~x!cHjM%YkwCOFv*} zTiw(B$XQd>m3ro|qjz8a?xoY_+|;?(ImgWz{qTfS?tF2`*=q5Fx2b!p?w@Oqy}M_x zo_)7j`g;C{)0eNR-)7&Qy}IsNS_ck^ig6qB1)$grh`$`DOAZ@ugC2lBUHzX;F|E3x zx1Kg8Q!=7-i`a|ODl0X7eBq?=j z+J+wlTp&-*1qrvF)}EInHUN&b^_mD=kR+AUtWA%=#hfB|Ymi>l8QhC{3z_doIWs{YKi>nqeDDBNq=aF0w6LNvjeAub#+3Jfx~ z^QX*p+;s#Mt<>K!p9r{-yUrRWQUgAL;G!4S?g*=rhrcQDEEbS(QJb+=Up~4oxvyBu z=IpP%CKs?xuFHGZ5k`zRB}Qh8x~$de<^e|f!JpQ4-54B`b!Y@GL>E~O6s_42xL6pe z{AZPaMBri)Q~G&oRRk_3J;9sRCkPIO27L?R3$?QXoJ8A+5skJdEFTKWVUhBMf+EX@ zf--{3KdfK*ooa8Pd@0ydd)6th6v~Of&kNxB-TRf(vf&Y?jOHlZdTv8K!%=UDe=sdk zFUB;)2iD(cznc}NG_13Qd^QxJI|}Wxg(4^03&r-{0B#;rXfG6`O#`^`W(1y#z`u#W z{RsR`sUQ*lCI@iQ&PYEbT?jcsv=_{G#)ewsP04&qnfXR2^htf%wd-%me>IR$B}-i? zw>_7Z|? zmrHo4B-v*({2mDpl_dK(hF>G$p^{{u!tk3UoP_6V(atk5gn2vqxz1SFUCo^uJ33F*tz_4 zL!>Sh%Zi>6GErrwJf&OK*e%Da;mAKYa?s|n19MJ%Wpe0^XN<|HM!y2{hh45i@KDF* zUpeMPQ-$zs+mJIteiI!&dA+-n1+zQ=aLCq}Chp}Sq`lCi`f1loW%qZony$?IdIn!p8$xinjr#e@h$?OLAs7D9wgYx9Zlu)7qX@$KJE}$+MIiWduG?eKZ0W?4Hs$ zYf}lHFBiiSz^$j$FC{q-ET?tjk?RU?nlZ@lr6YryvNA(Pyk(IbScCzjE!$h3o&_X|CxzT03doMD5zXnHWDK zf0)RFvS7x&x&v#=#ly};ceRa z>$Phyn0#)#c68t3tKXPU%a|M?ON<=C&tYV~L!}d7P9Gqb*eNNOTrp5Cv3Hho!xaPN z5@!*?2g)VRnG7EwmpI2Te1KfyoW$^mT%y8kX1^wlw^yheB&AE^tvXo3g)uLHOCz3o z3E)em9CL+wy-eS*-+$|`oEcJzA-sH%FYDKTVPu>q%VGHp@lia8_-+}`a$3HuA3hLo zkkL6bg7(9Uh=Gm@3SU^tkki zNw_yR|6Rb2^c|(hPw=<%KT9}PNbPAV#}{3G;ol=*)1IDh7FDB6nc8};?}vaHX@e!R zBlzkKcLt?vKSbb9D{BI{9xK418s8AY%cUa2Rq<$j^UVAYZBbm+;Aj2?jB=|svVLZz40Sv>qo#EF=LJk!f<35HDU>HVBGK?n%G7P{4 z;RkRgc(i{bqbUep-^l2zU4So_4CV=SsccWfHva=}{qVa{Ji^NW=B7UZyh8{WOWLMX zebmva`*42kXzhj3`JWzEozd4DCtQF1FUMXFf)iOjkkbnyD^~lt8?F{ZFX3Ow=|%9j zH~ck(qY99afMa?cBBs}de6y&Sp|)%(nbtHSh8$O$IlF;_V>0uR%BZOU41&#Zp!Y2I+BoM4y`W{aEoJDsB|OyZtveY0jf962!n&W~-%B{J2pH-j zIU&B(CQEp@f&wlkL;xo@m>9>6;$IM`WQYC6OF0aR@De#8z6>n~aCl-|AY+(YOIGrw z+8}F|whtOH!GgpH@Cg2jj9|^V zW9K!)Z(n))*!(v`?!BROTy0gldehk4w&lx=t$%s_A77$YlxHpy6rp-(4Wwg@EO-L0 zp`HN#g0?M`Wtm>ouUR7fz%aSe%vH$eLUkS@BvW`}uuDdYfx4}aQ8TK##faUGI<`hD zR<*wJSl?M|AD&)abg1@GzVR6KU+Q0<@s}RCv-&~bf$I9#%r$u}|L~!GtF<4`Uw;Iu zn*p^0#iO}m3AUkjfd2_N(6LH(L&9N*JWvqiKseNn;|LC6K2i==pj&Ar$S;e+VOoAb z8qna6w*@+MbV~~D<&Zy9@M5TAu|GyvNA=}L-43z#MQz%reTS-#f1F?ZW=AO5F49ix zo7kslSN9zv`>@KqL7>(slPG`Lz`~B9JqUgH*8EC%MTN?fQY$Nk%>nyvP>HP{^yA7U zW!mj^=Ps*m*m`blzD)i6(|p%k`$4!JtDUf0McKsVo0jaH-);R1+D$Lz#|532V-?CP z`bVhPWSz=}HuaWv$7skbuYz|<_WqGXBJ4)ObrT2)`ErpDNGJGH>b?Qe^^dF(0X(+5 zkS=8hXGPNkxicBLfQ#88%b7rM)DsF3`DM|3fH!fz-J@^_DK`@wvoF%F*sjW{8$L(< z5VZ8XK(M+A#CX{CKcH3}W}Rp00*c;E(5^R5k?3zWPL=2f3E5j9*B=CS#*}ZS4~P2) zvqD-^Z(zOpRcZO}Ui8pA>LtNgQ1TCmmnoAv!CUb*xe`MUP>IYwA&JM;- z!YxrwP@klgjf6?oH6bDiZ4<<)3a2Y(Hpq0WDsiF-oV=;7DoIq29BP&gaV}iB_N{#9 zCKb8#=+fbXZGnKvn=QQXpaU;GDXyQ3?(bW9)Ga40dElh2*WW_*o8Jh5Qw9Ff-{;hw z!wLM2!~?;f)*j>jmW%d;>7P=u#)RbvekRj{5nQs-a#pKSE|3i38;MU!->mHs;4@l2 zX!SeB=dvgqG@1`+Gz!N`?+eBuY_S3kT_4`2U)y3gj>(N!X8#*%f33p*Hmccl5H@Ke zrh|N`hJ*F(sgo|AzW@0>WA*#{QAL%c zuGQ+b1E@m6VQ+sp0vDW$(mzWZ>6ltz>mfM&Y@afn3j_XTr zTl%^hTeke+dSNKXa}F9NEWR%aaaygB4VtU`OLmyxV!BE=rt9t!-o0Us`4zUo1?itx z9z{M~lw-}buEg3lPVS+|B!#a%1M{wMN=(!un<##>Px|>oci1_z_px)!^v(aeS;?FO zC)6BvY5v;(+EH`f(S~IiWh?KQKD}sA(Yh1F4uRj_aWSIQ!PV+*k&YMOPirr13=S(J zQ5YdgP!g5%tlG5E^z(Sf52Y`Zf8TGUZw<=N?ss#7mSAHy{FTi7QxLkzbMg0)#lfO103Msz4_-*WPMy2aa?T6j5cr~aYF0K#zC8fnUYk}Ilw*|?+5?a6 zEdktoF#^y12Hful$Cg&c=i~q`+9NpzjgO=kG2;vEvS!NooS1kJTthnR2anKY6yC2N zGv&HUw7kZ6~LhxN({9BCI#;C}T>JX)eZ80Y6Ov9ADUntD7@E&Ke&KzT*h4O_y-i_yFIT;cXJm8Xw@>F?^PUv&IK_lHqeC zoHRZc^lO99|H^vwsue5N-}QU> z&2QmR(x=!m#P}w9{SAgJ({;#kDddZUB_l@|bS4=&Ru*kDk4Hvnd024?L(&efI-(>H{mz z&HsG+E%~3%TM^w!AoMk{A#u1AwrRIzVRt1qKnV#ufAh0@4jX#TkGJG69Ieh?a@mZM zl5J01oL~Kxx#pNxPd@pLl!UuTbPy9Jv{(Wz7?Fg-6avd9 zIVxlebm>4*p=1TyAp5sNpQ~ES^XvMK`7iC!SLWjfsxPj;=vNIxMHhnDgvK8eYxhR? zUG}dqy7g-Of2lOG=jvD02KbO72KtTg8)(J_!W5z|Qcso>025EH%sp}f*w`!p5mikM zA6`Co!UVhEj{nUCS^M~&6SjEto@2-S^S@36bXP3z7to|*^3yE?b6D)P zg8T&AdS$rCNN&MfvY&}b zlVLJNm0_TVDPU|+AqP@5hh_XqWfZ^LUm?`d-TP9Vja0CTazJ6Z|RlbP_2sA0p|Y z2#t{o*o^*gEd28#aIx^qaM0qs--#*WCcSwh$H$ke36&1UvfFL4L6qF*0 z(gX!W6zmOqLu0{)JsM+)m{?Jh7^Bf>dNFy^OpkA3@NT}B0V`pbC9y##bfSNl+mTzcAtVx-8xWvkG;H=?? zYfid4?EG`xy3eNAoL29y99KQw(kgCt_tia@l=ZiB*wo;bb8dE0VRt0CM~&|79Mb93 zwmEfqIVVaAX7n$N7@pbPwykYw+SFVjPvrsqP(#l!johhmA~d!jy6di#K`2)r&( ze@@VET6#f24u<2j%ymBURl0*+LDklwiz4 zPh6X6seAE-low|^zTvq~?^gQejNCP8J?+}s_Q)I_HK}iC_K@Nexq0X3Y(Cp1#Hsg~ z=<+q)3zKG@%az@e!@;X_d~{Q);~*y}?a%7bE#K z?~ZeFthu-5uKV3J=N+9^tvTgF{uD&}Nek^x34`9aQ6t*hWSw3x?{d-pRSV{?uj1My z=1ryh=;JU!L7I#U%@(^5pc=RE<0Rd(xDl9KSG;hR9#$ zh8n+Mpbf^9X@iS8h(Gv%U+BG59JoV6dBgWsJ6LB%_HY~DFF(fH(&3?z!ZVV)ksAM) zDbW>*{+8kj*94YHil7`#<2Q7(l)tsVq4O}~w^{b6`e)N3Nf?9ADRGHvedAB+&p-^I z#Z(boJqBe&f7J-wz7zkOi9#2 z{G|rp>bsSgDLT}=0vCfh+djaNvUdS?K>*8slvSP7yV}g9*5F5I_@w2L)PvSC$HAlh zEe#3AV{>2(M$;;2`nKe3Y!1K5oo0;G3=UnkRsp9*J?jU?`R58aJ#k@HhpXPjs-iV!!{C?=ZY4`&nMGkNe~bnDS`JNMRO4^- z3OhA+Xeh04=j%z@R$AiX(Ys%3vh33ot%Vuq?$M`jYqH=?eesuB=B-Fza9pqm4c;(0 zdehc~gsq#R`6}h_h+Z5^|7hK^k0v&ZkPtSDV$1jrMHn&EC2V;y5hYx6ja%0u{3?G; z0jFv}>L*(^wWWUOz9hkO5SF4oHV}kP+yp0IQ-Uk8Z@-Kc!nRiiuV%VjL1wI7?wUR zob+w@Wo5%DQqh0(=>Gl2jP5U1B3jKxWJb>Wi~>2Fief0k>5L?Jwu&jp#xIaPNHH|P zKM;mf1p|0e6Zl8`c?mAf2%87(M`EuJ@qHUCxvmL+@uYsDIQiXA{JJ|2>zmz|l>Mw9 zI*}XC7m2Oo*B6z%5KF#kSgYRwAwW->HtHWC1lWxUwnfUF{W&S(3R8o7L(@jk|N1HE zLiUHPSvN4c%fzl?tBG~lTtk~S+{gOGR1jn5V**Ma&9{dO)(X59}NCw$6S_bOLr zSXs3K4#gjJRO9T=EUVKWUaovp&eD+*}5i(x9YqjZ{Tcr$|Ai$5w~Hl+i{q zALFMqC5rkOuRnEO*k+ZhQ%utEEkfhh>NcQ5D_GhHK*LTWKH30h{=0_HJOnG}1`j^a zqf*#+U}=3iyA)9Fg>a}97X5zsNh zb5v8<6+%`HNo{?ie)r2h=|grE z)Xkc8I%mq6snaeNW6jy_99H6}!j9u&KB;WfUME2($Y-knf~|o`=d{-nJeni{A!Qpw2divQGBB1V2apWRyE#g$JA*9dJ7K4Mm*;TvTUHA!7odvqm6GUOwSwm3C%Jk8#DZmc#c)Pp-DM zNuQXvCc$#BbUsYtl2LOvrn-LT7H>3x%}dUw7s9rDX2*qcU^q_KQ6D` zUpHV|cGz`wE211;dLbE&5m>ngKwER4lpQ>O-2daE!`Y_^f zBMK_|+Sq#?c~F~X<2GV(-ptzZu~i@Jn6M?Ud-?o{nxD#5z_b>*9k_E0Ua7kYLCq(5IgpBAVp?{ z^AWE4-=2w@y4RL1kB&YAKj1M4WLdBNnn_1bC1FA(!Z2xRzAM4M$oNVwXko)~eJ7QL zT03~`tvVmqF3hXRb#!buN8j4X;pKI0LTy}ay{zJs zTRC~VM5lWl=4&iHQ=?Xi`eVn%`qkA_U!Pu9R|J>hoJ)ng=Y@skmF!w$=k5zH7$~Ya zZ=fi&NP8V1)DSLZ16a-@o{c&xn{{Mfx49T*2tEiFyLSP;?;Kdv(_ zvDz+qJuh+FSE~|NO$xU-ruOTb*du$}q#oT%QhVo&4Abpzle=J}OYytQ7eBf(t$jji zbYT0)5Rc3q=hG9H#(DRO?a+H@X?%O=6MIa04<^l1rh`FFDAPS{WP3rgqW%$?`W)kn z8QRi^E6MWbK!Z;Gp4uJuUMS`L02ZIqGJmzR^RrOmYyI1-+t{X~m5XCX+wNVg0`%vf z13TXN+~I9%egn@BjJVP8>a&mpLuG(gSRXJHXoc;kh+fQ~aJp_x<1o19D*3S`IP&@G z6mV?oaz8l)`_kk6z`&8)({YXc(+UF^mm2BHap;ah`!n=o_#10X@uNAr2F8|_ms*+d zW6D;hQqhs$Ui&0={Rfp(%6nN`rk#9n)Jx}*xGZYo=Im}2vnOoM>0a(~@7AJ*cVbSUq<8`LPRV9x%pKXEEWUrzfa6%$BOT4BRWHxY$6*@ClYy z@~g_$H2!`Tc7krjT*tmA`n4JyFGX7Qr=EqbICB16QXp>V(TT-b$@-_G4>V!3s$Yi| zv{MDCpan^D{RTXr!&v7(cuc+N$2_0?&hKoZ<{F}(Vb4?2%F9dxf$e}Z&MD7B&O*J5 zD<7e6^(-P{19{~1Ou|7%)JS1~->8QwlC7^|fqMi522clDpg-F!aC&+k1UpWh>2KNd zWM9k3?pQM5-KNZQ1(*k;L~v)n)brO*a)#+bk?32+O< z*t){A6-q+cc~$G9g^lw}*&5`+tAvIx+ys@A-j?HLxjpLtEYdFEJ6qN)Z}`-*=DD%RTOJ?@E|FdVr|7spv z9+SSKz{aY3_3|Z1{`P)v-@wCf@`bR6aI2hQc(OjQpkmz7j?>LWrt>}gB z+H6!N>8w2tr&u-v+=6hBJDHp@ihlBxqZ;9G3up|j{WLgoOTFXBeNpNK?qY(3 zq!@vlL1GwRh>Z%VKs-G^8p=AbLyWQDpfwlm*w){^gLSx7RHF4~4!L^^0wevp+70y@ zk!JUq{rIh7n4m>ct~PFz+PiE|lBHR-AY@K>^q;~FF;EJL7-S9x;b|rZ4gNiKDBjXV zKhfpj(+3nXs;d6^_9=sk4>72(a!?o#Po5ul+(q2xB6|OSj@sA`qrT4QiK#wynl!&; zT=U_qT`hH}*eFvq-IX8W+KjY>+ee5@TJ_>BTT~pI>DuQS6MLd|{nN)JUtm}!?~Zy5h^dn<F_c_2F=vV~{H0bV#maaHA+6)79I z@WVH8-H^BEAIWvuYS(k}xaHTJ{cT+nD)XkF%pShs{Y8ld1AWaMy^daV+4g;(oK0Ds zD`t<|oY!NH7&&-SpCqrC&?U=07koyAWz{X6_xD?6!Gi`lsZZ3;U3(&lD;V>_$+US} z#%wy3K6@i%0XWKshF-KdV@rny@aw-oyQTBPz<)iHW=Dxf2mal}pzP?x2D5`NQoXT= zH@QiTF%n$z{BsgKSA{ih0iI7YhmCN$g-Y#nNVWowD3b)Yh?eu=*rGF)EVa*Txqlpl zzcTg1Hgwt^8~QY?f@*BzJz=C!Xz=P7Y+y%UWMmrjF^o*9JQ&C6mb{slxc$rJ6=(BL zx9XD>F|N9x=eikb8^&9=6W?+r=bT+Oer;2DZDr-7x2C4=pAwedHFe9m%(WYbR;E~2 zEm;L0Ge?J1w->&}MPJ#S@dSScHi(w7;P3j_cCJYG=?TY}r`4!%J1u+~&&G?ZKQwA> zGINd<@95uf|EXvaS}0hc>3Gf{6Qb2{2C!YiNsB)u4>FlBrS?JXq}JVLS0xtLM7Wfl zEOja=jaxB}+o2y1N9XLk*`euYSL=IouPs~~xflZcGYl5=iM5o%phbVsm6u17q15a7 zspR#9JAh(f1E&|97trX24T}Z)o`4n27vpGIG*@eH%j8%y&ss$P#L6e2+ThGpIwPJa zY~*m9cnP6EA-W~x$%o6AW9=xav1;k)j|{tJj!mA?DNypFo9J}=v4ej5DYa)_4t9&V zKD)GLu*HQm=_Fl&hVxXW6dtmWV-h?)+_1bl zmP}`_!%Cxi?!bq&skYsZFD%(M(8cvtC&ww%S`T}FUdh%0&Jot5!p$9ZT33h8 ze*Lo}-4RhKm_A~~INK?;qL{%Q-?Av<+)A=Hxj)Boo3wmjY;lY@Y3_!|%1q#b;V||{ zHa1Qf;0ny**~2qL=te2@UNPS>`Z-Yfs8#p~3vXK&^EPdn^X$x1(RvWt5so!Z%{@ud z4RD%a!1MwVoElfNi~*(@6!5$zaJnZ@Jwl_&{iXIW>rMKF!iTZLhtv-}hDq(SMX7|^ zKry^Y`~1d>|3p)K;D}N514m464q36U-WpyVi+il>Rp zA^f1zwWo!Q`rT4{z6PI2?X}!`a*y?cjez@%ovvv=xI86pV{iyJ_Z>UdNboPj-Ntdx zu|pGsqkIgCk3hfnMb#z~J^>6LTy@v8EJXS##Y@K41V4sRVvVzqzXdI9+n6umOwO=< zRvPPCBZ7Jti z^Mc>$$iJYn(_@r1&k02$a zCkXW_Q~Q1s zI+l*_KQ*-**)54&;{*)#8=KW}QBoCOGX|rSKEalmQ5uXIZ-jrPfafXT%_h?F^g!W( zproFSc?pxIH2ecUDGeztv46vgn2(K6gIscJ5H*w*dco5as6UAGCDOs)hwJo}evgCL zS-z9n!to1;b-NK^&>LKB%3|s`YcKBrsqZkd-dM`N*<|NV3UZrG5KQTb551HSu%$ zTU(7;moX%#zk@~p+Ay=={;j+Fsx{rZkq2oHUwyzOg_S2omq!Q36%G_vlcgP_yhFT# z`g5O4<281GobAGRt?BJl;BGRlVDmMP_^`QzJ~O1#md@^}K^rYHz18na+{p77ULwC% zeV%OLrbuv(6*Ondf^ezCjg8eJ*3Y;w8qM2Z$UM#WVU7p7h+2y%TBQ%m^&|AYsWsL6 zKTzJKaP9|sN0Id&D8XqON+V0lZHy9ED8HwHK14)4~!W#!h>)7#42b%@!La^ds(#Ml9Xuvu%4sRlGY!4;oT&Y>Ttiu#(8QXiICYgSlC!{vD&uw4vR|4VSCUkmgdBEsHB15VIh$ zUEfYa26t*7TcxjQJ`qrX7bN89Dqt833&2_OW zTgw!zr2>w%MD{Aim$e0KNXkouCXaIeff|CMpw^;~*8OyC4Y}_sWHj{IB54=R7NLZm zfbqFj&f0P|W^d8cxRj*@i8UOf2A(+@WD_uLS;VN4UYt{#iXEdSY>af8RXfMJLu$_v zbK)##)XuZbP6=O-VN)t(JW{7k>p66GoMqzlu;iB)=pDHK#b>r1TpXzn=G9~K2h19y zA3_hmxHe5Ou~GG5Y|!*H{@l0;+nXKE>)rnawoOi+FQuXK|k$--a)MgP!B8bJmzza6$ zZj7gTKI|k8)ofuV&{vW?$w_kHT)^pRhrw~WSSIcGxCI2hFu@7(w_K|W zGeSK`EjdEC%`JI8B-DghcJM*;4kCRch3=2kACixOXx<3ZPs;s~Zpk71V?*;lG}|)u z=`G{ZWE&L*w}hCC0-dXbf*e<&XlUS0c!FODCarB-bDE%jQc6&-egX#cp#(WkUFiQr z1TxeGK!D~}P#P$7@#FjK6KEoE&)j+&K7JC8Ps^bYdbf;qqd)az8OVkWq^U}2dF7@x zQy*l+6Hsd6)oXF#z)?8$2yrwYN*HG}Ut<~bQx2nfRA6?>d7_SD3MQA}Zxh&$297kf zM_!iX=7z8c%6_h?E-KnnA!7aDOpq=2pQ`NNg*w4MysO1Vevch>(I}|QfqB%k$>X! zFXHX?1Kcb%)^?WpLWX|+`2BIc^NR;@b8#4$KX(7f&XdX`^>ZNvG&);|)wH9LKPgQ? zvJ@2gve>GmgjtrPxc#e@@LVVD{Ax+g_WT|_O472o<@cyCANzwpGzCL zbOg;%oO2j7XX7w;$pg#@Y4T=tB^Z?H1_ssK$ON=eZ2UmDA*=wsrM90CdICdRHBonD z_0GD1ETM*KVQgCfBO0?|T0;GZE;uzkt-U?(UE|)WYeKKl7a!1^!j>n)-!J3T0&icp zqt`Hh+xkD{T*9Z%jbY+}g40k-3%wz?L2Fz!=7wSTBe&s_ z5u-wST|r*AudPk3jZllu*(NBJDTxNkoJmt3^vc6hQ{T+3#%N=H$Jom(ZF|Fr79EYy zLPgt0bBrS@pz9?l+>Z*nK!%f;Og39P;Q%k1)kUw;a{o%o$mjlpdw14(_-N+nvf={!ckk@#*4lEWF6ZBfpRn20 z$2G(wt<=rOEwJ4D9#s@z3PvKsl1ggDNZJ5%s#cmN-BL!-P=++9CSz-@`9HKm zk`eB0WZV?8qeb{=TDzl#_G9F`Iue7K`bjk~09D8u6Fg0YO`(N;(wnq5DjD-E;%RDc z6nKsK$AYXxnYyQ?%>uCK@;z^34Apc=!l3l|2d)kGU5W3LwIx@+VS|3k^XSOO$jccd z*R&sK3y^IiYXxoy9|bPQ#X9UwzhVcpU=_&}wa0uUX9%(e*rgX*EdtS}g45hvZu;MQ zkl$QIT`1?|+Hj@nospg%`|fY5b*;_ZQ{?2>ZBb^eyO2>Wp4%iI+p@I&YX*3^qm?75*|yX_RgiECrlhGvTG{MTYw&up4pfgeOgM6@jik zU3eC@uqYVqs4(-vlGfx`zgE+oNW+3hmHDYr5>tR&EWzhTi)f)*D!=~^4ahFbx&;` zWM5x0QTee96j0bY8({QAeP z1UxIho|)BrH(|0|TU{$fw&jdE4K=BccvuD>pWx8L*2}V=&xqxzR`9zV9+|_DlbqS$ zX~Xp4&`h@CNY;ss>?>{J=Vj`F1x$29+M`+f^iQ(nFDt)Zg?sa)ET_~GW{IWDlOsamjvH!9d?i`{$ zx5SaGA6xo+pHw+S||MeojgA;kbhYVmxa7 zWW9sr#^8QOFZT4rBq>PK3Jjd(vg&M_cPDSHhizB?KDp60v$)^Z?T_Sma**TNq$TT} zK>?@?z6ZF2r-=qwNWP`T6e15tPH1WUnE!?bw2XwM{DN%-Cs2L_(?o3Ha8~WWG|pDk z(>EoX{LxTt8+N$G}$}lCulM4_%2%={#Gz%+hyi$mO$dT@*4#@0^sgZPb== z8CkgvD2gX)q@IQC=-B}QGv;ok#wL2iHHfbCL3V^vLdc-gCw4ZhU1Mugutq%TN|L`k zR8B z%$;%GmTs=mNT)OYM4XenQFI{7oRA}CCFA1fjW9C{i#t&@*THt)8>PDry7OwX&nh`L zqAb?Td}zk*syPmp3G?^6gjJ+yt+bwLBf2JZ={`gk?6;#LeRbTzY;#8oufAy=hj;BA z?i}Q|WBQO8DV@QAA40WI{OnW_EaGbP)FXxjA*8m(zd`xyk52}fv#2wDH{W~J&UrkiF(&+s%=PooK>(_yJZ|7ud7hR27r_YBJP z=;%MIPgq*G*9J2E|1hhA?@bOOot#R229X|l(WKzoznK)|l(7<%NNTUrLq!df$fj_O zdG2&7zx|+Eu&|IY)*AWv&{I+`g4);%q+M>UQ?^FX7C}qnm{hK%UUEpF1c&@8d(Tl> zu@fzD5iGS`4ec%3k_Co-gjU*MBfOnzzNsIkH)#LR1GFC&5tql2#0NGK{hX*e;_o|P zS9t4Xl?zX*mj06iUy{uPJCWLoRDyASJRDeYxMaqWL|ff~eV0a#HMg8Sbz)V(e&1ym zN*A0OWve?_b2Ka0!fM9&DHYy)dVEEA;`+>@tpUNh*xp03Mx_R1E(%Yn9$&o4Kgc<1 zKvdeqUO*%nZoKulg#%O1p!^voRB+{V4mLbX>@Xn0C!ca$@zMO*r&AmpcTVP3ZE$y3 z`SHSio_k|wC0IJlt(@EgOt$L=8?q+G;b5JjHyOPm4O9lV> z+k*dwE7`+#%o>_GB)mhn*r?=+)CsF&y0qTr)M?bPKFPxpCJh@9(>Hd|q^!kIt0|3t zawpWe*x#tFfO8Bf zJ99JJ@Gip_hT94mTecAMN1Y-fUA0_|#wKh*CXQLF8h_&nI;6pGFgi-5B zkWfQ5J<|^_&e>DDNAJo-yE;70@lJO0pEuw(D!n;?Rr(2qqI2=YEH+r+Vl<((zH$W7 zP#{qbBg#T%#F%j%_?})Y;O}g|F~Y<1#fHr@j%NxdwS^_)_Z2xhhAhe&SQg*PtaIN! z#U%pTWfJK_o@9O39v2Co*QlyHQj&oNC-vGLVf8vD-{ zf56LIoN6=(vw;yt7HPh@NN;cvPr3L%Cel?^%|*H$M4EQ>G?8xJd%MR)x~1WoiFC8E zL8j4I1dNU8<;sh&lq~|L#|N`TU~nvgC0&F@5sP4Ik3|ThOU8)77D4JIN7aEX0&4gw zTGK@s%7}d!i(u*nhONl1l{L=9A}HWkgzan*C^iCH1gZaQ@d_4!sHUJA4NXsWYNk7I z`eMn-NU}Ivv~{5KwAvEK!m_v(<83FOJbk2tOYDc%Y3EkcoaA}yBPx-PU%#8!Y@dgj zupVjXgf9U-5ISMhU{D%5k)U!|6;O4&dK8k>e4q|nDtHR(*@O!J&|yp)jc<()UQW@+ zw~QA5!;Cf`zcq4XgJnzj;moOrbA-;=A4dSDca$-CT3MDRKUt&X1uD!!S&FqqcHN4H z4xc~TuBdiRtDt_KZhp(AJKD9iA2p?@Lx+ip0rqX@QID8@O#Z^Y0~Y3R`x|zKw%PWC; z)S3i}}~=x+=qvhACv|7e?qp1@wC( zv_b*>#soEvhfqCOsb}cIZ8OLunv^rQA#*Mr@JIL zl{N(~cQTBl(y)*`oA1gPwAq+Bs^O+SjF@W?B1tq*P#w?U)7a`OR=EjW!4*n)(|(ww z(tcP11YJ!;DIlr9k@b#E0@y%*6v1jN2V29OG1(qQiNZ!Mt~wR%Tv}UV+qQ62^85tb zlG>@x%RY0M_{M7PoIZBk?0)@bkK<10!&knUjbT`KY!d5DlVH;Gn=C!9&80^%m?S%# zd5c3b(TRP&agEH~3?Cy~HisKTrwB1~kOO35_+#N~*rGLUZ(uhGN>wFoZBUgA%9uog zO3YmZc2M@5Eq-V0Sz>yrwZ!zz`;eGk8jlK-)K(Iv<~`@AVvRlH1yI9WjOs+{#R!)e z2WteltjxWipv)CZ)kKwVXmP=enq2ebMUy#ruZdYG6B&7x!NIG>2{mbNkOYP}o2b;@ zAW_nUD7h%yAPIE(l3rs(l=T{gmNU&kp<1KnU~uN1l3Gi;ltG!qOA8@Md=Ye`Vu>wK zUV%xr)Hl_ztZ$=+mHIX=C*4yGBBWp+NyijHQz`m}rV3;7iyjdw$Y52DQF)jqYhc5r zbru%?d2YUggVmr8qe4mA4gEIcW#r}D$jrQ+%WbOPjg1uQpZ`tzD$&vM#DT0lu`l5s znZ7FSy}?ajsmz;luhTE<^Mnjw$;M?`=7dDs-W2!sU~*uCnNldM9aSfW4ke^`1JQ}j&VT93tl5i4T$WIgNcL68S z1(<3gcOFiqIFWyhI8gzVaLOe+8J|eF8sVtrey~OORqkyCoVsMBesZ`t365+#P}>A2 zJ(xm}`loy-wV%X6>w^||z(YBpg}X{B*6b{=528#c)(i(3V+^ea=jS56c`dhX_tqIb zGQ(f_SpUG0#9p5n)UEZ{Zt3ASxrNKQiJv7^g@;!qeWqW(SiihJVb#Eas}kyAHM238 zd*>osha3`0ah2gzg;F?F=zB&eW}snEuJJ|Kd^-htK7gIcvz0;M6{a4Xc!cmBB|i($ zrq0E*&iP zdnJt^FX#_)?{IB;cd^{>Jvv#O!3}SC2c)@+(v(!ZVDs^z&Q8>sWa3|p#m>LFD7(<~ z#Gp?1AaFWK;L>mC!#Yo0G9oxaoU~1_vWkgI7bZyZcO!?}^r${%YENrSpY_k!^RhJk zZxN87Q1u=4*D8Azy!Cj$81EFCoWgkX;z@|`UqyNKiOd`v5awuQ?P=C0vYmZQef-m~ z6EaryN)7W-3#&Qe*4by_BdaH3E8)ec!CsA_hu|mEL>TVW(9vbx)5hk#Dl6+_pE#b8 zI#eJib*P{75@Q;PYDPE=-_0gCQ`U@{n(N7wnWWZKOjs|7x<;8=)3Ts4d=kM;8)JK8 z@7OnYo8U~WjQvotlKVHd$9~$~)SjtG37Z>KMM|w1YXfTdn{8fFulcI8CioNPLdAvk zX`cP<<9)zXPo`oG{Vr5#%%t-QyGNmIqUM>eMyPshiNrlOxQks%mc|7~s{fLgr+yn} z)c( zCuQgl?{i=Rvmku9)9_e{^|EO3!z&w{eQkTRjSM2%FZ2(h$!Tt%xGl6`#E61WGC`jz z4nZH#7eaURVFB-~u>_Ql*2kIqgVQ5)GQ>si$8iPjm3)B3fi(@^SnkyEA429Gri^NN_!xnOkcP^SaIg%gGkceSux;N-@}9Vzr?RE5588f%5KKtUt^ z)n=jNhM~==)+}&r7}`Wc#@LLA5Z=R-u_fZNoj@XP4hd1fsTfJ{Jk`P$;kgDlYz@%Q z*gi+)z;-zwq{AxUxKYy@HVCI022R+AyCKqx6L5|YH7e{-A3Y82u@C{Gd}PqYhjqlA z%-eM}$H z7@fXK#;dfF1i|x6z0dg9*7Z`k6&FZ5{G=^?my95}#j$G_DF4oLJ+e}XS!sx%mZ znd;P^+^@Uc@F$}Qu3oigWFf3v*=n>zdKiQNVF@iB^QHs%AlEw(78nwbkaA4(eSEa- z+{wMRu@QZotn3HI`itkCgcuL~p|o}VESv_9>_jx?;cyvH^oR`0vKY+PSZ@i$nrO{!#^-~3@RQ|>%D7y;w z;19t!ka@A_B^uv00u3-ZQ9FbC{ib+(k&n+WWbCaaw}|f|Zzu_e^UtC`IeRlTub)@W3N34)HmX^MP+LqGY z2RHCTd)TAI6hwl~S*yhY=;-T)2EV_(GwpNL55Q|tl{+r><8L? zS_E)_4|Z?BGg7ZLHquvT0mrKjM!4}+2hC>GF{JZFH6*mp+DcX9&!T;kc?Gt*!1rgL z%k#tM%K6dHhp^A(d5ZQ;<{4m)d8)LnE(i;lD*X(fE9Z;v3tQOt(W62LjRi`V!C#lCJU-xKxg)_t?CJ9ps*={DB#z(;Kl{IqP#{dMd9*}C%ky@4Mc zuuK)Nf4D>Z=Td(DCDIDWG(d1RtAC;bnZ}h7QKE+?9f6iIn(DY}O#UAGmU-JY zR;|ri`FHn^8sB<3>Djv^!#}+BAhoBpcZjn%YAn}}d&$pRt=?~E!||CVVk?dxn{8%& zNUiqv(~s1@A<2Q3$B==7mP}&wl;ne{yv?jrDih}{(5c-!ge8PMh?x!}+8-Lv_o+pVL&N?~Ftrk%y@_^Xm&Ra_^Pujn_ zvV57;qgso}@i3EA^cq~Kw7Xiz9*%7-@339TyJFWGw#~wx{!vU4{s$ZaT8)Bz6JXKj z8_ItljiTqO55$WaUwmdub9)dxz;1)BiN;Cjo0s@c!)ZE1+x(|+bomz2M_4(XJ zefNfcxDEOO%n5H5v2Ftdn8H}+4>W6;Jdc3W>ImdTqXsi#-9dl#K@4w6sgL40(!joW z$hdCOEiT~)eL8vbjyU6Uyl|tR&Mnh-)T_9?`ZUlv1>dB_%X!d-?m2R)0SR%Q3)@w= zOI&=vZZb(Dm%q?Al7LUdMsdp|Aw-`eW}~-ku3GP^cjOl8XVc!q4;la2va?5Mh_{Ld z2ZlN^7NDz0F+<|=dv2c)S7O)6bH}}E*Hv$}_IdS9{qg(f^Y(GMcN=csE9PqUidV(E zFFjbapWHb4`gi(F3Gs$^^_iHi8Ib-SNZZpbObRqnL&Jwh^vBFSXvo+o|KIt|59)5~ z-@oEByx@hL1HE@_;4e_)qv6vj3&w)` znvE3-Q8N06m>*!Ddv0X5cYs*?=FB$jTHAH+mRS&?|3u%hZC2IPgTcMSg}GvQ`Hc$` zCe~fAAm{4+h7=6|xo0%KsF|Q?$1X-d*)_F2=+3XSJy>#!WP7}c&9lVY`xY5|eZdW4 zRovS9eewLY^pfqrtaovZy|SaE`nL_PF7a=7Ck}PuZ+5$tsQZHZ?u$C{mF_X_L=~^@T2u4rT`)Et$$a=<61HEY2asD>HrUbV z3GOF)+>xSa)Va*66`*5o&8Ax^9>eN}*$-GbZq6@xt#ki)8M4C#kAMF~a!m{^SQ?fT zex}bbJ>D*254PRHve7;R1lUYeLWD3TA@W$zkP`eo@}I^RWPUd@;y}*wQTp{6D) zZ4kZrri{ai@eGH1M}PyrfBbs~#UA9U0S8*boY4k5on|(EvxoMhF~VlO_UCA?YS2!2 zbKCWJ_aSvd?E5XtobzLzSHz}+zkOZzwHW9j9QNQ>O?-c!7)OpxUNQia4D9DiE3b4qtc9xN>TR)F9mkw*hhA;=F%WFIp-Ek*xyZezBw%#1=g>Oc)HSJ zQo)rat8Nxy><^+AjZU65h_Vz$xDR{-1b;fR>ay?)eo6ioJn{aZu=6E@opB($_}jdWe#hkwH8$q#C?_!75<)fGbTrwj_Eccez|-QsX# zaa7z5lhmIYM{2~p14HOVMZ>$~*zA3&`w#~iC=;4F#N|%CuzVP)<9m;v7hU<^L#@3N zFI7gbnw9BU7r5-z4~j1mi;}>A@5I->cIRq69Kz?GEFLy(M9>nVy>O9emITF3i!D7l zXV4k(;IF@mFP`O2gDnM`e5SnxH0VK5dc>WcS;6m+?`NJ{x!dkyru%zTxC)FD*Lj!rA@A1C62`s?J&&&9Ut_E8H{7j8N@VyxIn(~9}ggBQ+SG}h1&Sqsuj^6a4wWHHc#Fn9EV_*Y3_ z;2jcuvH1MT;p75ayT$)KP2$?YzNk$%v|?@mFmU@EAn-hPs>#H9KGg39z9VVyx(}0VO}# zNg}^)1dErdY}7|kH&*^UFmqt2ij(AV}e$m(BXEOpkt3DvY8qdn# z4|#iy`r^<=;{RZulYQ6nwA78+I$eiZ<2ueu?KUmaD=0nRaoE7<>-Q`7-e0!lZuL&F z;c{j8S(5VeapG|$|JYLT(S=ju!&OVc~skZ*@3dC9;jK2 z83n1}fK?kOK%Zy=H~MHL{f={kJrKv0W=daLCdWyNC4};Mb>jV^K+iSr6V1j;|I29O zUSH+T-|g+%>%hvT_g?b$PPn*e_=>Vj-!}u6z4~cG<#9_3O|P(Ej}F@xF{=Of7;(Fj ze?)xeojv<4HvUpsDb`;A;g_|GpBi0oV*U`Jdu-%DyZyWA!_tWTq1Y?Ml~NyXA%h`9 zr2`EP-5k3Prx0C~YM+1Ue`!AM!eoyI9m8nZy_X=nb&E!*?IDcc6axHSwd@f3OoyuibuR*wFe6oDO`^+qaDeH1zQR z!{gQ8axg2Q5{4iZI9<~Lzj@Ir@Pq`iXJTZ2a}I&h3WeLD#V2xvk@~3~+zS1` z+`8k>rM*VAPkaC7sM&olh)@zlYw@_4h0KNy^6Eq-C(3jMoW*X}D8j>e!&T7caKlwl z2G@=@;*%lQJ7^9cO9nGrOzOupFT>T*7@YcJ=@nEqU-$$_0h_mReoy$^IL{}1Zk+EE zJ~z($37;G1|AfztbfBLb>0rX&NRKCc{v^5>Zrn3^(FhQui{XyEL@fgqO`|oTCe8Mj zxJRNUXKDfTTR)G!)SDKhDKnx^8-hq~~j6$5!=$~nBX zhLm%Jj&VPg?Hj(z1bAF{gRKiq8voJ>OR-)Ys&Me+>zQZsV-~!aw#{Z%^y0)h#c7LE zd${zPo)$H8Sa!h@-MXsnePb5AG_If~uUk;^@;4lYEEttEeVAWT*R-&KqoOO;ZwyPB zIk01L|DMssgK~Q%FB{vn=cMX!6K-ADlw!adqNNci)|{#ru;rCxHdJS)h6d$!=6S>I zMQSF=spk^C>6NZi6$er@r_@1d@e67!%{u0esI=NPZ~JfuGrP67xox_|(<^ZirKoVe z@7{n}Co1OEjcEs?i z?Dk{hLxA5f;5PvHxyft*i$}6BC^9YLhYAbyw7|k*b7Q+{rvL9<`%hAN_J;Co^Z^lx-w_($x-%J6V5oSAr_~XzMh*gEIuF1 zFuudss(9cxGJNQ`aVIL~zq>AW`1-sNGX}vL0R7a{fS(;U-XU0i8u`?QqOz)FS0Wlo zA#%n0=b@?aM?$;@f7+FFBHEzNZk;SNUf~V<{!er!UkoUWm{`4M)cP^j!VD{&X#2c` zH!up@(Ioaxksn5t*q-im5_~j)fz(3#F{)iH!c$f2jBqDK|7oh@MmVQ{rz_fX3USL) z{lvtrxA=_^1~nE8;yLgyuGv7zx0LSmNsF_Geg$Qbn+3ux#-u;q>P?(a0L ze>cb8f=12lS|<9=+*%$zrl8C8`ENq>TLRY-kZaMuiY{3++>BBuj=TDEd!R;Rx75`*cP7JZ3>i_+*f1WBYssd@LDigvTl7 zl||s`NB^xvdm+v^Za}LGaOP^w4YO1Oz}2{o7{L_gI~Y8YwM{5unff-{)C!@i@rkz@Rh z5h{%X8z_e!ZYIMNxTgLEN{rPnl@4?4L{E>MaJmF`MU8rU6mZ~pEoaYMFmBtgM*3O! zy+YKxcD)l3WEu3DsGqy{({@hn%J=U1tesQ)IhC$FadoLB(cgxDMTWk5QhfF6@bAS} zkL@C!L{&*t#8dpPQB^56h(B_U8#h9JXaUemQnCzi2!#L{uh+2xpleSqvvTX@+1kp& zX^2^61)tq;Wmp&rfNCs2lo-^q^NH3Mi{Z zF1I$+BB!l8GWL^Nr*x&QDYU%;I@*UqBLLlDglf&lH0HArY0=UB3V3Sc7zQ5+_*V*e zS|i3lKeYW71w37DPe;AaL`T9U<6{FZ4;0X`pU6W?pchFMy8=phO9o@8BXweHHF#`EGQy-gx6=M_+DBv4DY#(ktuYRcOU}HR zASCQx9ByO3^5R%A-;MWhUipjI=%ib_?t8a~r7!2&IBwiOdii`OZ}FQ;er@f>zA|&x zmUvG`hdxs?iWl_NsO`3_HM5#IZ*qA}|FV%4D@rQgWD@sKZN@lVM|X?n-0lz7r&7k> znw#ygq;{T>>l;=cJ8|@?t1#b{@_z@tYS=(Q*rc&AqqkZ4%k0u1t=v)#lSD_8vUmiW z8Rbncd>alkH0sNI0>W<3z4tf0a3#UrWAA@g=1;?OzT?OS>G|IB{Olb$c&2zJQcZ6i zC1mw7{gGPneidF()*t8E7hRk_?My+=shP7cP6Ge1p&2LBkUCY{Z!x}V4j)ZUGI*+l zD)@)Bw+Io>Hl$v((_RxXhMd^xlfuo;kLG0gfYH4Lcu~P>%;zA{}m{V`Lk>ufds9`f#g&lgA zov7IPa5d9>f5HrBMMxMgP@@H*T!RtGjb5|TxkOy+##gzCRg*0{MM>9-tv9XU4({5e zpBO!a-7SVPVF~#xHxsFc^55&INK5xEG?xz>pye!Q7p{^;iKhqIKzRmf!ux!b;=oVQ@~T@*L|C}PgDJ;#rEmS_EZJR{bvxU9Yg;L_*l}% z1gBeo+&+sSYG-J#fE%upF??FQFkzyk);v>n&eRLtSy}JOD;nigHOga-SE1+`W9e~!AF}|c*^bjb{T;DnxVs5A;pMMJdZ*N;xQ1?=T08Y* zxhsD6J}mz4*nQHjR{YpuIkCRXIqH9{zMP(Zxtjj=y7=cRu|#YB@IKaHP~#sOXGmKY z>4;9rPD_GH;%2O>3qeO9N=`h%ueeCIx9mWKhg;doKG%kBoa*9{T3oo)z3tJq?h!@P zH+#4Qjp=@^V}hTn$GV#pnM$Vh& z(z~CRP}U$M=MD1q3+~XS!@>&w@A?%z#<-W$I=WFX5^27UztO{oEgr+l(uk@bAqGzq zCp#dGCJQI5y_M#v@r*Cs@m^*4FkelLb^Ml!sV|l%`>5NkKRv$uq{4+0{RB>VQ^e?Z+Oh9A25Y>fYuZ4OKfbj+DzT!3Ozs-TZDw$&|o-C27^YH^nx)ZjAma`L&XQ>ny#KY<7~SuC#;~S zwRQ0;CE}}O;)qf!>$uT)jFYq;ztGYmCrkWR$kaW@*-;cc=+p z0w!vb)`e<(#Dnt|cYZ{CM#t60C6n`?=zk^SJ`-<`99}zoq}Y$^#rcc=_QG;% zC3oSL$w$Ump3=-1GpI1i%(}cd+A(_kDocyeBa_xxAGMobmp^$^xQ*SqG%~Z$-gePd zt0D7KdJXTC&@FL7WmeqK^rVO}OJWjMrc9V=XXdaXKVxlzw3_rDHqCbNp|hoaArOJT zbh6S)pZn73+QpV!^&vi>;d2z-`h;)GMEs(_9-R{4i$NVBW|u%UzR$|DG6oSSMJ=W5 z3}cQQ-r$OxLjH3{|1+6zN4!%#93CYPa!3E2{sQ?>45oY|Hn4B%5buC#?IYQ-+Ca!0 zXhIoDPHVGpBd*g=&kC`#pMR;;ZTNyw77jLTFnpOsn*hP&c z{XYGht~RRi$?XIbWJ9}XdE>lD|Awy!f1}+8dCkEe?%-IlZEB|M6_9V<%PY>9&+fEkk&$HJB%`Q4Q<;8rRecQQ? zr0+@DoIOg^YuAePM-Ph)Yd4j@KDp@1oH;|4Li>a3$4yxJn&~DM%Ztfqc}e zkKKmq`xKZ$f}CVT8#&3sZL}UVWrEclsgcXkO@r*8esmZ z%r&wZiS~daSyY05Nao{nj-FP8v1m`r*)W3uvLzYFKv;)AIRDo#x9M}Tuj7pTf|D5@ z{mSPpJbm&s$+&TYWQ*0arw6s(@91qWGN!`#J1`FIFF^QS9zUJoC69xhNrJyeVny6q zrQEDZM0==B493#x=+FAv9c0H2>A%yDUWCewqMX2=l3pIfhCsg*Akg3dCa;diPl=ld z;Mjtir?IhJ`~NZb9Z*qSTf=kiy;B%qC^K}VN)x2_-h1!8iS#DD3n&PR4GVTrBR1?E zqb6z+lc+Ijs%bB#nDR{37-cU1KKITbro6mw{pIl@vL}6jNqXkR>8-<0t zDB}cx{tRe?SH<7di}&5s^Ht+n0-69}&oF>*@c@W5-R94}u`~FZb-B!Q&Th5g_L7?UC+4kPFadNXlZd64h~*UUe|y>{1pka;_5-3sD;B)d(^7LO#zCjId8}SH% z_)g!O{1-232t2$0tc4!hpoh}`{>E`;%%6aRtMEVFIu5+xn}B|Ez&vwzC}voIQG$IB z>L^Aia5BcORAiOx?$D|gC`yOk_wi{R_12qf(fP@Sx6sh*} zRu9Kc;#-#F$OYz0&WC}2JF7A5iV9SN$s8UAeJ%NEeIbClyH@wli>R;X5(P<8u{+xG{83(Yq$bw;Tm>W=C@m3|VA2V|% z-q*|Rk5g2O-cU$?tBW)@e^R2O5KzDSbNN|Af4)U4y(O%0dCVxPe|N3>@};HRGWqcX z0|SjAqpFmx9r<;0f6!B6jzS9z<}jPYwB96PX<4vLPl?qf>K0u!?(-6TB`=tk_iv=P z!^+WPG$;6|nG;tZ!Kf=6FVt3_Z!mC+p6d|Rm}Cu02kyb(M1bD%9IPEbhT~=RH_x<| zvcb!=m|D}eX;7+QCDNoP{YM8_~-4NFIU&T)LM6DDAyhqJot7~zrbXXK~L_#kSSX!lzDQ4{x;RvDzt9tE9JR+KVDclnqF{?e)(hV z(~nmBRirK0US#LmUR<%qdXF#t5PkRhSm6?BbLAV$*C6th+Ni;t+`eRs0S(7wKmVAj z0~eDL`(ktA?G|qY`mRB@z$FmC)^O)cU7~?YU;@Oy1PJhJ_*xOC89s}s;a{(8Rzexh z+iIh9!!5nFXe$`&GGb40>2}jOlPj3jXU-*goO@@t`ARv!JLR_QV?er5Xn@j7LtfR5Pl(UUro?LV(x{Xvb&S?ckl^n0DZ1JrMiOqLW zwNiDTdvuZp z;*30E=6XgpBv^wuT?sw-n&|<{Aw$d~67z9(<2pZ z?D)cc0w91HsE{neaI|6AF9E3G^(m?d4umeBzPr_1)s)t@90CW?88QDSPFJcAwBY zt}dzCmNTb7FI3r5FQr^no$}l`sawA}j;pYfM-`-!Uu~>v+Lf%LxP#*5l-bD5D`Ga{k-3Eb74a9iEM$Xz zqICzF-soz1rBS*c;tdY+FcMAxo{Oe84J(bw5J48ce&ID@jo#Budel1jKZ`p4G=@69 zXAd%DA}Zi2R>5s%ilR7i9Pma!j2{_0O{Vw+mbq!M2*q{}mM)Qud6@N@l2I|apPw1D z4R<0wbFgA;gM{_J^gpZV3sO|pF0Vdu_rfF8uxG+YQ_^#fhxU=0-o9~VihPly{eV}( zT#5nd>VaV4et~(Uw{ND5`%3B5N@k1HDbndzG< zD`|!}Msx?O5;1$?d&9zelkrMvdb7LtdLvS`)CqJ*@UsZ8SFuv}j>~ZhY{_v+Tomc) z6lZH0ZfoOiqG+p~-5H*o+{KIGHstqUKGS3<0C|&P;PLD+gExoRCg6BcgBEp7>&c`xbxr%p zTcqS8H20-_%9dKQ6I$ocp2R1hSvUe}nL11Z$%Zh*xp0zBoE(C&mjZdc4#<7LsC8~xkuoUL*k6APqI zcG0%Gq<+Wu%}I^Z*LBMGO3BTu>KRLb#Q+qV=PAPkxFX0G18{Z_&;9I$d^p|tx5X^IyE*aw@t*fqX}ZuM`h>T>R>#D* z3@h~ND)1RSieiy)Ygo_m%J8~G>+bjIM@w(tt-ji?Hnwl+$6J#DC(EV+xhHr-@Qcs9 z6&0i@OtHzo>le!mL6i^mv-G8W!h0w4|Hi(_rPVjk>{InGuZ)8@IJex6@S#sOCl-#^ zMz3C2sDII-|M*Sb(Blby=;H~BANqR{4mCT1VvxcX98wkC5N`#&d$@Gyr2?z?1}3CR zhOQF~x`0EIag*}Mx98C{-7O-7!4>)6bV!b;|Dq>gf#)89WiG*P)M%P{0^K=d6w=g$ zD^}o;ke|UUzbK3MVgk{)PFYIUvolv^DO(+HJ8me{nYS+uHIcNGxPRU?#|vp+Br(ng=-(k4 z>d6wTxW;I*?qnxed)N~?sOtnw>F0no`B91WuQEml6K_V=jMkMZ=@OUVzox%FRjg0X zy?fy<<-3LccCBO+eSakA=bI z1JoNk40bz(>h!|Q% z(?^fcPgX_b7&6{R7n|$PRhOLYXuntq%h!LZm)8hJGm>$xfPIC9e3{4tzvM6e(vq7M znN%zhana-#$4SGghSRMYIaz*PxhgfMCQcbDYR_97R@R}dq*JjvK6j~)R_m!YkyEz) zf>m73lU{C@^t?2Pc)zPWtT#&v_e)OLXL~m!*=KpD9a|=~BtA)=XFUwPN(U-v4+5St z_G-Y|M>b3wv#E?$6AKBz&+ts)I)&gU5i%bkoCsw(E(&RaRF&;*hYW!=8kvDN^k90`$ez1I_MO^D~Ec3d&?OFjKj@m zQ{D6eI6Bj&LaE0L>D2-yl#Ife$$3z|#&afJMpYzduF<>5xrM$)EBr0>D4=pj`lkIrm*jV=m`TT!H}S}{x?WN7aM_g2;Enyii}v5?Bz(^PwS z{+=~OkxikaATsCl0NS2)iWNysVePuXI)Nnt3=`~M_WQr;CZS{9Vtoic(hQpxn?vxC z?)#tgnA?wd^b`E~9m1s9U;i)w=EhwJ9me67VCP_*cH|3=@f(&0akG?~Fdw@fWNCQ{ zDZKE)^$<(T6ZCJ(s*iLS8k%(1R2=CtGBoXxoIt5R|AJCao}dr>{E*&vd~79BK0kq! zRxP7{x%49a5WCAAgH3J;Q!KL%nIbnc((<>74~2Av34#>~%&7_e@Y)ZM)dyC?4(Oi| zu-0%l7q)Y<>u_4@NMa)y-$#oGo7W$usO(r3o4l$(bW(HVv%dZd84{sO!R7^YH$fcI z8zgkHmbQn))p&<=q%3}|w)T9ZPhCv$s%12NB zhU_*|f(Mhd(jNM4loLV0-4QUzWEkWos0O_F|6p^+=;m86H|el1jAKk*4;Nj`6P@Ar z)Msq3QBn2i&I;P*bm%szsxMvDA#h$BGw^bnj`r?{63`|UE=pQgP`EC|+EkPHd=DLC z?$Go3@QTm6q4Q19>Wk1SBY8l#-LRH#hQ0qU+a^r(>5uf0&VSbfxX@?M6W#yZjY{ao zHJD)&`J1sA4Xi68lbg!coBY%f&2VO6+-o!mmz)!eolT5e54_jab-s+4P^ho1yU=f- zS5-`O zogXZW`7n@IxZgaR5jQ7B!RzW~%4otcJEdhnFnke@#0p0_UBF#e>Uyb+cu`PWzu@8$ z10|=>3>5??*b_WeE~;=b+X`mD$~%w(Npr4 zl6Lv(WL;j|nkLhozfoTbO?}L_dK3pR`&|}x-4sB;D{8`&yzW2`L%F=xpK%}ST3 zz^>#>*zO{m6$5K^(948$a2DCFEq$AM;ckJ3#^zrnxyL#pOPsx1v*HI+M$l=cJ(BU5N{5<5;|GUW)v zW!^>05Gt5@ESk%FfoASbpuw0u{&nfgDOyVY9rMtx>(cdNz4U7D!PC)NTHC&d}IDX~%Q*}D9LyE*Y-*~=-~iUI9Kbk2369C}dY!}j3YYTN^8IVELv5%i>) z`&HK`t3pRkHPI@>d$KOP-~PugYyc#@16>efq81=A-9vM~{~ z>_VYU{-(RDmtHKi&fWOYin6WMA%+V(nXhh1$Af(hNQ49*F6#PW{{s3U{rK+Uv4$5v zyPJ08yzC3c-3Q}#XGpaoK8nFx2N+U(a`)+y8+FsBPw&yuXXiH@s`*gt3iOr7H890~ z3}w*w*O+G`CXff5Myb6}EstxAaC$$aUt^w4n9!X;c>adTKYJEVKLsNOEk5KoOUBPw zp@K;_k85s#pL^l?b08c%t_7H#Nsb&>ButnUTyDi~(|^DiYGgL*Z~qZ*vQ7Fw#U3c^ zF!r9`tPBpNKR>@?M@MJwSs0sq&oWnKz_WZi@j&FnW5;8~*S+w#Ho^EATydy73~uIz z!1L5Q^hT5ccVF?j`{~C|XvpC=^$W^GAmMP&|AMUHKVXrVZXDf;7@RO9`t#;DeO0_R&vpbgchaA` z=Y*I|&YPRNuH=ix5|n2ioa;e)=4@?M>8KT(sRop9B|459 z93%S8LUP=ZL1!}>vCUgs{8iKZNorlwLZt7J8*HA3N*lf?UXyQ2hNDvm=40bD4!Z&| z4!?jdjxjjx{sqt1!>KT`g0~Fg*bOATsb8i-i3HwKjAH{V`4vHWIg=iu%BF`AzcbIF z_?feBx_nn6PXBc(j5x)lca!jpVhJnqG4U46>M$F-f%1v9l#ujfAU8P>Gz71=v7z3i z3Zcz6)o`CG;JK4|zIF>g*K$Irx2a}Gzy7oQc@y(|gL&SA^r$stJ*2<&6Hed5v8C34 zEX?EHWS$Rjil`xK4m`ij}v_oH>xF;WSh-^2M)7%@bxhF!#m0JwF`zPiJC zi~5So1<0Ear@RZ70mudha&7A03}gxCZBXojB+b1JtoRV*!H}a2tYKbA^NK9R#P6OmNN66;k zd0EYASvBkBb87$EOJo)IW@1;tUnQhfk<7wqb|o>9Vqx<>8=$a~QG7Jyvk zOi{b3+klJjNRVPVUO-VUazA^Hmt=S?Mmqtgs}`Jma?=i$3;102)MaeISS<`&1^ux8 zYN}Xu{??-34I_OO6r*Ci3jKu&MnQ84%XesB{}Klk)tZBq^&0|Jg+avu^pVneN{S9O z@tG$#SBnp77nB&=rhOr)z1WnutTkYl#3RuyDA^&+!=+4;jsnIqW& z3-YZ$4sMAIE%%IE`cmWim#X#W#zn#~UIAO`epphVTtbZh4{Agu(r@&LH|R#n|oZGyBqE)^hn^w^t6e=KobQb9U)HyJMkYYT7&Z9ZSNAUv@r6a|%C4n;UQO8=XY_{OYU&=3l!#xbNKZCWBDyoj z`x|Diqn+x(gWXQPOXkN{xFY0Gp4xsSr~E|iqSt+$BU3}w@x=mkf_#<1DfO6cA@isT zz!}@a|BN;8$QD0`Io!i}3D83&mr_6PrfkU>-8Hy}5n||JM$&W-BNY{r9bu&ll86`8 zgO)CI_8FKTSLtG3k(z)M-(`RgYsG( zgn0s6+SRp8&56(TI>+-4m+q_6))aVq9L{ki@^_;7D5`xseULu2YBBu-ee?Lo=n}X; zyaU{V6L1Sq4T-C9CBV$2$z0IIx31qYPg9%g?tH}6hp#vL%@^qVN5JP?(l?AEP}25x z`Xs%1*9WxEhtQog=#CC_2UJaBWDL3yCw>}oMk{nar|Hji=x_MJ8-IU*-#aSLh<%tq z^$(5*vh2i=4t-Te`m7M~-i8g6>av1p>L)mpkFzb1@VXjPFx#oI)1Z} zr@rwqe#5*x!v1H4*cN03NzO2gVFR@AKjM@|{}(wWh|MP+J?qea$tivP zzs4zH;!QX5=X1AlBCxM5GMUXv;-I_6))lx$1k1Rlpryy(tmh2eq04$igCY-`V<=Wg zXDu`!Nt1t2h>(4s(r4Zbo^5&%2~qBy8_4{azmA$)-loQ=N)IHMy`^FCPERe(W!IVu zJH$fK_G+PRU_?Qnp2pn4e;+(RfA-0+fq}zTq%aY;bCIFge&vq1g1!W?#H=zSVNt3` zMLVBvZ2JzSee^C${C7v+J^G6cW$&M?wu*N0o!9%Xf?4K}Vg9}Z<}iq54jJaJ z1k$aT^e*NZ0rPj6W&W7woshnldG2MNIq=M5o)mY_;ttmoK_Mtp!o8|j}X-_WD7XHB{x)e-t<<~NoZ(dPB*>G_P!eEWtC z93IC8tca(muVLNemm=7JPRkj~yO`gA8-lPFiv&scIn@ucaMQvcE_}rBA$r@d_Pc6o z^j!S9z3ug79-5kqFWoQ5*gKT3s+Dx4x%vI|nwklRi5-RC7t^nf4efopsNnk|Vr=PN zN#21k`})4vpO?Gui~jyE59I9(@7|l=d#9`OcJIPF3rUrZ8y!#kZ?s+O=(yHKd~&1X z1`w@7Q$`@tXu~-Idgh-Lg^@?>HeUAYylZFRjxj-WnT&G!9`!!EbpQrLRB9k*CckLaFbG@2Atg*L%8F zkN93*4*a^N%L(-LAG76PnL(X64K8p5ZW-G69jTLR$YIPxG1}Z|%?zQJHQ=YBQn@KP zb4h@TsA3aR&)SfgzBD0wBr|PU0$|`V6kW#@#kRKTt%?~-(4~Peh*i-Liz-U%^%1F5 zjizTT@>3Dj?g)tMQ5NOJ#CD5B*+@NgbwgWb~lst3M6^yn?=s7Uw7@`>epi!KZdoS!!;eK1NUGTsbXP){P%ql|rI%|MGQ zh7WKxBElHzAq?m+T{J(P-mHhtzH`e|bNMH9>AB_S_Kp(jqaR=IyD*=`;b;$#Ejdg! zFoVF?#>NuC`O+832I&M$#ZF=^84bc2k2|c6MKg5}9c-EBHOzA}ag?GVe2K?hD^C}& z>Fe+_QU_&oO25gobYK=WJngC<^o@w4C!chCs>`EGAEAT_Ed_Dd|KI zgMK$~hoC!odNJ8;9;f z61kMA;xtp_G}|7F-4nRi0+5MuEaGVb1;-Q%zS*;hO0-V#&Xs{YNHwg_ckI{OW!+l#gh?xXccwwj+cOdXtP~YAGPF!1x)NgPua1E6%f;EN4;hSET_JRh(6% zF6EC!STpR4hR_jsZN;ZIXiMb13cZisr!8qq$fpGK|16si984jf5+q%@fr4n`>1@z6 zR0lCo{(!32m^pY&b|nZDr{Wm+RR(?)!}0nArOpH>brcxAJvhejEf9aNCOK&^`II7f z#E3B==t3V|p${nO3+V&E!E-i3kppZI-~q6%4E|H=CMjc*fFjbWS!4krpl4q~DMEcE zlyZfh4aBw!v^AfCc8CNr$q-3glnc#q`PAIW1U>Tb60h#C>tu> zIz=Cc3QmDOYA5KUuvf&i``?rvIiNQQ?%V@<#PzW;Iu=e15|q)l6u4M0S+Udr+`rt@ zlkD9o9o=dAtQbV_+$6TkiV~nTD*}zp9JUo43x?Swf;+lzlvdOyU(qijU!C+s%g4t7 zlUG6GwE{F=vJ;(JF>5kSkJt)n1iTlpmO2C4FQ&IB%Quiu&G9pF z3Fq_DVlBK4c!E?H*KnaAk=U|-zjQp+!^F)f*4HhBcw`=6=@gda?S$*cGr1r1Zmf3+ zIC%mah(9n^W+En!NwDeXCY?qUiw4gp;QqDH4G}cOQ53D!@d+^mR)D#J6NTF z1!I%tlYg7CUM+cXM)1-I7dMlD6y=>NNqw0?ZShtiHvWmCy=qB|GlK`C96U^!w8X_( z{w=9iA=ds$B4T;PhD5IjXXBjY@(qbT@m3}o>2!D*z#<(?vQx@7CV8Y-8KtMfq^t$C z;Y*-41Wx7ed$?9yx*|HVHq_SF+%HXer&`I1n5gPt+dvD?bQN^EeSNTBsI7TsTI-si zpdeergnZbUmqE4oj!fC8$EfK}0OVCx*$Ay+#oank8$OaL8?_i^BZgd*DH~bH+n{Vb zFH<(MrQU`-=Vi)97IFuajVEQwMiz1pIEH;Bt88Q;7mzk}hk{o)fwEDbvlm*w2g=61 zGG!y1X9Sdumu1REhHu~;1ZCsLGG!y1=O8f`)UGc;J@3iR`!A6f6-0c)Xyq^~M3PB! zrSo;k;4tKcRJPN=?o16#Ws*3u7?wV8TyA89-ejg8>^*{W5%rV!U3OedlMp^MnJt8N z&0cV9K?!|OhZO3dezb?!4X=rn7&xSmlpQ zUFn;M%K6Ef7(zY=x`v0l48lAP!QH*{2iUQ>!9J$<(&yP&O}bes@X&8Lrf`b?p-8b1$S^Sn9mPy?N% z!!#Za-1NLQ__GGQx02#4N15CJ(1l?$qZJNKW90n55WTS8PE*s&!buBQT3IM9V~55Z zyD3TZw_NFWlSn$xecsY~rO#a=>6-YwAp5}bJQYdmiMFf*EAWq#NG+o`i+0LDswUyq zLSbmS*=5mo9Y2Ax``Lfb6L(<^CC zhMm2=DUIK3^WxMuyjx%lGz7nn5a_>56p_1hK&QbSQ?6*lqXR=KegPf6$``KK#lj_f z+bNfcz296qnn@fX8jxCmPVn+*dO%7 zx}{HFsq4A7X6Yn+eL>uBxzdoap&&9ptR*76IV>-#U{glp6=_Ss;co{Hqkzfbf(6b8QtFiWw{NUSfp78vdSc`)2s$-4Ji zN;V`BM~LF&h(%e7%1#v#DeH8P7z~{+X*rrAz4DKPX};2cP{ziB$O07F5+2cl$Fng5 zp~fqdga4BeVa1jQ-WFIK0wv9i#UuXbS!wG=k!$-U;xp^t)4Z6Htso3e87#N@ht8vH zlE0>*et1#1r)Q|Nn_m9U{pE5#At$&H*aWHxAtMcHLZ3nW2A4_36%;lf{>Cf-xo)_y z@LnPPB7HqGKONP;zsc&vyu{NYv|EXsAhhXLv_yKJ&}KjQ{o&|Jax(0?9povl5NISe ziaO%3Kzuy2H=tl^C>(u^nqM9b2s=+p3qoRfJnZjsd6~-Td|NN~06uTF zgJ(dvP^mjEEmM@vxAbuK;!-9So^fGH$_uZj=cR97!}RvT){Ojg@>FV~wS|eP(QJWG zG1@;q459dBtJ#L8`i6X=a#B!KEX8*F*${vTW7IHKGa!rj+6f(IIw5e)2~6EAPPD*& zi%FgnpIDcPBGvAD-4{vI;`8D$QbhGCpQ)6j7es%}peLDLl`Kcf$H~cG1H!zMp_y&u z1aA(^fC8{DI6Mu4gT>&QX37V*H$EP?DWD(luLZmqRz7XZL~bhYX-}w~HF+Dkq%F+L z|HfE^>{Xi4tL3yOQlRfwA`Ltr$y*ZN#lu3o>8#G^=uY{}ux)bnXUw|ApJpB7n_aUE z#RHc>xYZE+LD06fg+=$^U2F7>%)E3|n_iHf4IekrCXY--RmEWUZ7SOeYgJVg;(78PW>J7nbL}8rbJ{LQf&A2OFlW@=3kd?I|Cuc)u=7t>SBuB?2XQvb=rxemB zYeROZ#1e80OtGf08{|46yaQNm5RY}C#~uQv zSig%Kb#s#&l2A`dw~w-F%UDWV6(1=^rFv%isc*7bv?1E1p}<}xdrYigveGGXL5M(I zl$LK0U#}=kBbIp8CFT{+RZte&P{GkgP6igyIdgRU>N2Ib>m8H&T~h4J71b3fp@*-h z5A+X0@5m?6ziAw>ytGVbC8x`*E7D}=zAz2VB{w=d_l0X}3|^m~)DoqvY>=0fh`$YU ziGiw1J&P_^R$gAzbGdS5+@iR+#qs!WQ5>|?OWa3y_*!rhU{6n2ax?9Sy}jFeLo99h z0zWe&J1*CkxKC@ANOZ-Nw8-$rdUB&wAi%<(06?FpM_l z9wWlIhkij19T|NsR#S8RkK3j99-VzeSd!FO`<=dlJ8jbUv7#7iiJ)&$&cLPka}wG0 zArF!Thd>Jp#x^~}|4|+=|ldhV6WeWqViMx)|PGVOn)gx3Yq{ zpW&2eoQY5vSszAuIw-5wAFrtzj8xjL9#x#*q9ia2Fr>X>jD^CO`XFkKy^27k^<0~N zZjvHj;%G%nXQf9g^30B*eKzI*VU0FK+VoSrAQeJ?i4}2BRXD&9^Ej zNsVxca@H8LD5>-Gs|n}vXUC>H;`bN~sp}*LT7~-Pydc(33a||G*N3H0#QB16LMelG z+#g@1gW&9k<3I}!23xKHpKLHDAcHYlvNJ`>127>Uq_8fo+-Vm|9v`YOfY20ISHdi2 zVX2k&Y&DUsN$~mB>O*-7{K)2!~>Xh>RB?L$}lm-+eH8~W+%d6+o(_?GzjmG~Ms7?_5Nbo`7A zoD6--VHGR{M}?!X3Je*mEyP8$W8t${U~a)?Z9lL(wV22XmiYr>VM`E~l1FR>QOh!7 zhr)ybs~Rr6<2^@cwD;l2<||Qpl-A8|U3vr~eQ=FSfk9+I)bRO|L&D#qSlpk$P*72OMuund_1xhBe z@@0awh=?}y(z6i-se~tqK9`j4sGX%(Xa$zA9LryKBnMQ#!4Hk1`&(l=!ZyoKB#(zuTS0^4o8WCNYB4(;S&p7<;sN0qh+vws(~0`$5K8+T)|2VHw(bzV2ds?ttJ!)ZRqNo{G}mi z{rigjNKg83DRH0pc0_tmI=*%-kxV3wNHwL}P|-oMjoici1*ohMOOIxl8|6n7dv*bp zK}VC?UDd>8!?F2UN__En@qTp${RXw93T075QhJF<6i&8{%~A>fz=GFrG5?V|sxvz(o&N9>_etD05*5wS&}mwTUZohz_-la#32ZnKM8(uUVKi zR_2oFZen1k7hCHb7&6PzV75oLD~#wYjObMukp%lC&tNFDSPn~ez%zG#FfF2qh=$*lI)PrXK0n;g<6C!opSdrHaav`Lt?4X)v zL)Q8nShB}MVA+!%SM8)2IPRkm78VNc#H5E|aqf*w@+^jcLR9S<7zjgz>d<}Oy}xFP z#*4>G3@z>Xe19`5TRz{9>?JCs>xlD?CZ=xi2<^Ig9<2EV{Gd-DJMYls9nywP+s;Zdx zN2DeRB>JgkZWRrx!Q%l6evSdT0-vz}fro2guD}=LXcsp#Dz8v6C~kIRK0Ochy4d+S z(#y~y2gg818V7LU@eRVlf&h8kFL~q7nni2850s2*@XUKN6RI2(1IB$7LW3hqsc-2G zM8ya%G{L7dh;Bv=uHNA=2@^nB7DIJxjHQWuSenS%-d56|Mr&jB-Ppbha@a;@DGD}`-yGj2Bw=pATf?+kVaVk5KJq6ZprsA->1pf ztIrLs54B3}NiWJ(5_)_zO&_7MxK_J(=h`fv#dTrv*7QDTy>z&rj= zn+!eI9H~3*AvCN?V`*4PzEGgCkB*xrU$8P0oB~-AmS)pQt35}(U^2Gqd9d72){ zG!4@Vc9v1Vj4`8a02=>BT1%)hLfeEv0Vz=kSv$5T&nL;$RT=GrDu_TMiXQHEAz-Sq zcI-*dCy4ybWV2nQLaNE74cbREZA^%(6?1Ad#AhzSvkpJWHS1`l$jGo0S;-iFfaDJJ_HbjXYTyBHg|N`DVg>I5 z#iN>(#Q@pSRfJa3!)^fqZm3$iZX_ZZ>bec9|1#7C%nla6fma3^22~=|bLeo2suI^^ zmW#>73AzuXoIdZ}5f;l9ptEO{rDT=V53Un=24s)-v0ko7fa20On!~eih2ozH=f--$XUg z&TUS~8G2m%v*QW+CTbxD0oH9mYnN_BZDrOac7e&(Wpw`t-O@d`$YE~j+-_*2nb=6a zCTqhD2864B4nII#P0X9axkjG$CIaPS$KZWpq^Y5+>8oex_*wBa(I$b!kv7C5lyo&s zh-$!GcW_J##$1wZ^jsxqVHb=d6n{@kUC&d{4R2~Tx8AEA0pvNsR) zaQn;YrQCj4OQ*>e@;lxcm@llAnznZR!Kp;~_%373mWk$LJB>73&gAEHYHG&i<#oY- zvgPiT#pen?dS%f>;fkTY{uL|x7B2^yJhNVGOZ%Ie!Bnb6{C(}gKIp@MzieX8EFxP6 zM*S;H@G&?AFvAbj)|Bt&MjY!uSS|QT_Z~hCh>blPIw0RKknaV^hu0+=3ucPd+a#(j zQ_zafvDQLy>rK}zfn4pBskLNTy!YfCD0Uc-dI->ib!oB?TkL1Up35LVsu0Hf!xGfv z8ez#*ND2unQWz88ST(X;jb{+llzjl9lUihNJ5o0HbTM%C^K-2m>?<`6iq7_~;V*;g zCjk2sP`wRyzU@Btb3NSVhL(Hx6$ED~@{|i>LMnZr zALpPScK|ywRn;?{D)hsQ4XShl=^0c41W~j;M&T$~5D{OgAjmID6(a$n6SN4RFiUvV zTceeg>JO1E9%W%xj{a_-KRU)8YNIDDL>_ff(KX(%;*JAKPeNON2+HMx8?zI69Nxg# zViZLmMTCbIeQlN~e`h}>=qHR`rpEvm-$D69-bjt+!hqj`0iT9W$XMwa1BTgU0(#}=A&ePKeQhExwh(_8xKmfpN2R%+ZdayT-PGjj@Rf@hN^8+Per7 zZN&Ja1Gg#HgvsqKveOq@oA&0yCkmku(v@u?)@*;VIcIS&agd4#2rdm0@h#RTC%1Sg ziK_M`$2JLtS#d5|9we1awpeCwTCnI+Wk6N%9Q%NlB#)55`pBg2@Wn|!K8cnAmR?EF znPYIYb;97~vJs;-$C4>?q_JIamFo~e5c?);b(Bak!XG&tpoeT#gEH-VR!cei?mYxFfHcZK$u`CIzLoB3J!#4||mCC8y%*sI`p zz!0dn2o*%@X%U;Xqz_>4ZQ(9kE-e7^aae{gL(uetog1IPfUz0MDrPq@N~&pm0lI)? znU)_3vTSRtP!L?=jl$v;g{7mpxkKSX#kwfunhYX)-O-eUMu9?pTwIfaAbUk{8Mt$Y zgp>#biDZjUiNAfarvhJ89Ozr%;Zf)lSS;cTJV7^8;?ox5>>M2I>>Ps1{q5|0ee7)m z0Hv>iyz~<9f_#3Vw_tl>WIh?AgLNy+`$lE1vzUqiYQM|Ivr5MJ29*Sh_?GLFl3MYs z9$66HmSm)+*?!L2DchSMlVjoL?YMVfwB8sM1w6 zD8r$5l^D;kW2!q}m>LZe4KvI;>)VgC${rG z$9e*M9|8EcLnCmEo0}s-C8ErCTW*I$iElA`tG^9(JME?8AwBf>ZQ;XXB~91|w2JtW zP~zRi@wy=Mfm^$U6U;Dlv?{w^qG@8c+suNes@arFeA)MPM;{V51s;7lr}t466g@{g zAO?6}FiQ*DJiw50ZZ9&E^mg@X5D&;lT~ctyMVvm!mMF{S1_lu@toLc)JjiSh@t})% z(lsdr^!-fUCFk=LU>)LP1;`T6Kt4K^vm%>5s6}LJk#`AO>8+b9FuVT?b%315^j^9^N5)^J~x)r0H6ABi>*q$ORgNd#+i?T3cw$3qYjHT4z!2JsoZ ziW$Z`GxaFRAYanAh|5ep%FiIkm~etpA{iuD`alLzd6tKGn<=IG3<9y< zxX)_OAXE$r28jAI2yuyNT`YtA0^>T*KqNBAMd<^m8Dm332KgBvpE3~5XZ4^TP$=Zl zl0iO&dRmyav}KTc^s8tEdK2>K$RJPXYs5#$2OzpEgrfvwpJX69_#5+w}>wph>a}I{pnJ3WqCNT4u2;^Fm_uRL<9Qs8KD3W zI~l}+_D1g$jsUTj)iXxBpb)|uAPzFfue2{j(>nviQ3kmHkYxniIF_~&1_9F19<+)-A2hkurk;g%;iT!8b^rOYpZ~LAq$V?|HQ@;^! zlP;|8B`=>tJZ0h0ATC_!d$#OVG0t#t~o zYn{dV6XHPsP&?#{f_zuWxtJq%5`zX06tEzH153QSR-m`YSwqn#H(tA^pV)zd{Cdm- zEGBP5A#kxIwgOt9_fCJu4|CN~grkoUCGdB4Pf}Q?KGxc`O3c;wDobi3P`*YL(zOos zG;sCub*$*@E!6Xlsf?;oz|>h4aJ~;Jw`NfD-*Lkrotnq?gei=YF>apeLdBS{0QErc zY(7{w+%g2~m2w7S+ZL-S7Du4$c#--A1;@MqBRfB5_kaa~&UW6eHbF)1eT6}pN&@Bl z*wRJN2{q`%>wrFBg<*c4>x3V{@GKTACyeGK^o9y{ph#E0Oa*~&Y?As%h#u7-jMeEE z1=&jil$G+=C^+N>nav6Ga89UrnZK}%{>D_rt1&XH!V3-ukkg3O&=A81yG>tC!!ogo z5;R_G7)pPS&`k~ck)BH7swG`*g!4Iiov^Fx1s5WTVT0v$Fl;cc1EhcO2*sQ<)X=ns zUW3qkxV*akKoTx5{Sb=paz!Q#7i>B8h&T*g0B!sq#`*^?7!D<5nb7R6xRLMa;#)3M zjPTVD(d}J;-qxV^z#B;OJxiLzlJi2ZVn17tAV=GGb8i(!u(U8<62OEfFC6~@Z*jUu$I&yzl=FybNtev7eW|s8H`r~5rXW9 zVZmd@mr=9+k@rUM0wZ6M(8mQSuBKmi*J$-Ct(S-jhP=G@NH2sD|E8ibQ3?LzG1cXUhf z<%xVi&=F>1Sx#T31f6qXTv`}oCGbIDdYsuwpg=PjUSyxP6dYMP<9x!E)yA9R+XIA( zWdSH6L!_9yIwhb;AP5ccnXe!S?Dk9tg|Dkynu3Clf_I5uM4hrgR2-7g=9%vmQY7Ll zH%59F`_x2wx`ao#J4d7HNCyXhe@BN1K%wr`ufzceaga^qG@~mEAp09O*C4jZpV~@z z*u~Xo@r?(==2fVRibk^Ri(-t#8fBX-Eu)<%uCKrUJOx33Qb0|xo$KZy-{|Evsf)t6 zqzk%1{e5Sv@WXI2hSw^ za5ZxEkhBJrEf9+eR(Se=x<0_$y--Ohh;;R7O7(5@rLUj`x>h~)NgW~0$$^21aY4b! z&|V6sn^;BcWm!4wlgzM$-0bdb`lf`ikaQE4()ZtM#fs^86i+NgcksD_m2t#U4e4DC zbZ7Bms3w|ogV;pi8$@A6$nQ^W)*wV0(%0~RbZg;4da}D4s!5wViXMbamL^rwwMLwhQl9)e}_HWbcg3-h7yGgf+TXr`l3GHcGT zu{mEw+>{qryKm6H5g9KDJa;v{@nH)zY%}!;slcnRfw;_!ELdmjCEy2J+Pm|4fx=*i zIqo60R$kE5UHV^0je}jjIV8r*>fKiX@(K-UK+%zSY^)1@wua=wfZp zKvpcZBjeSO3S~VD+aki8++CE!i++uqoms#oNIgeS5U~=%>Q+KsqA7Ev2sapjbO7E` zAn3Vifp!wAFcm3lnmGvg$_ot6nW&V-d>G?rH&--qJF^5Fau>pJ6~Mg<$9@^7mKb{u zQZ9qMLW~jN*uKfDkU`D?#GQK^AeAymBQZuQGxb!-AaKtDsl!03Wsn|#crf+U$RO=d z3Net{KS37AARFNFKSL%@oeVMn_2@G-*UKPn#1`TMCQrk3o-q=e&H21W8KfCX88M}r zWRRuACQ^_4Ipk@UL3TrbDDFo9X^}x*A5xH!h%|JKfplUB%-05@8?{3~6)YpH{bNZioFiz)IwHECEhJ7f zT84Vbc)XB6YfnbQLIMaZB$%`gv>6RRWqe*Yr;=C+kT!G#HIrDb=mE%5fTRIU{|!)L z!A>KjloG*A5DTR6%4eiluqlwEioDL`SUB|?KLU_v!7dZ{E+xTx7;;ooqih{hzwz(| zGV%|W9nd*2vY|bftWhB$@}qK51-XhO9n7Gx}m4*nL3z7h3g>hF?H0E zpUL|1h_~u5`%y>!i^rH+Cg0501hM-I~pvv8TQfO9sC zrH(oz>&I`r*1zmWy$l~403Y^D4rXC9V|mKdQBN&q>wwYXW&LNIHd0>Gt#tlnjwV?v z-Ovi&EEc>!VyuaRmm?vL8*deh6T0ag>||yP0rL(L<)EO=Op>AHVW&Y}%iD z+AQlw+SG4?BY%+tO5hxTxiapj@XbYTQ}=lP0qQh&f#F171aN!pn|$ znIa2|8JG|K+f;m+G2xcqI7eAol({;$pkaWOTfuvZ75)L^6EMC2Rj7!&91( zen`LMr3tYKOOukAujEB#L9xN5rNOa5Ws}B#n^epQO`zSFx~Fh*#>f&ld7OFwm!tcC zFm_TAkoKRj_xqgx3*;>N2ec_*v?&e#0c{I73ptDaS5f!>17baibY1e%WEYeEkRSOM z=@N)t!|CHwU-PTD-$8la)MH#f9B!CM2q?~Ett{Q|FVeMT>7!hNFy(1-c)Zu49Dxj} zA1MJmGmItpn1ON=jIPJlj9sl-`@!qfkI0<<5Sc@SsTcJl9NoC!8M491_7Z_cM4_glHPm=Z|vZ8OQp}-`rRSPtc<~ICuyI36o1^JfQj+ z4_u8<2ZxXl2Zzu7`-(>w@gBPB~1EJYB%vNZwowEF!c+ey%kLPGQcMuwl?)8?>A=HU>Bar z!OR>gmb9rcz&IZ zMg`zB%o8qvE}*FkG+3jjmdY9Hz*T3@1EiTA1m;RU{jlgTuQWRG`j1=!N$Br6XnKYr z4V&Q6o2y>WWETLe6PSMT5{#2wCy;CmAJVqvYA|g!{(7@~q0n$Y%ruU1MOd}HQCs@1 zHupAGdn5UL0F#8Br>2dfjAtx%8X$RQL7w(vePdpicwU@^uZz8ouAx%9xP)u0W31z( zpWUEqtZOl+paw?%1)atDVJZSHv@W6V{T|9eli|`k(q&WC5K96cq2Snvg$Fn3GLg_N z#l6HfTuC$>=BuiR0#ee7mDN>2V0gNk zs)2WR6hGaBUWOPrt<+5=1*Mx(Q#X|slx#{JuE;D%FRMr`$SH;Oe1GbDg+Oq^H3zp8 z*yo%Gas{&v??ca9AWT3GA0)wl9P$G-;J(DYMeslr!;9lR@WK3*|Aa6HAri_9|BY5N z`If;wAUC;R5#|UONE|IqBoK9w2VdK}n2H6ThS}hDilroc0W0G*3K0krxS@fuPk}cb z9|CjC&B-@JIadpZ-!0IUv>$oBVDLmbHA%bM_eJMj9*Eo=U`H6(+jPHL>gv4_aW*x) zu_MnAb@DpQ%w7^Kt$GnR%e#;(sog0u^ zTEy>QfOuT+Xl`X~g|)GDYKj#_=s%^c6638jO%hV$jgAmQ#8AU6^Q5H6Sw;#Xg)s#c zfzhnUr2oU(cYsxOH2vQ_=iCBFFBiCgAWg7Kvw?t$poj=aQ;HQ(R76Cvg2t|>*id6b z#jc6H5H)Yq#NLvqDJE~KNz^1JHn{ij|LvZ80ri@E-}C?SMBvPsGc&t8J3BkOJDccp zOX6U1=YPvV>Fo02r~fgB`dYgRxvLDeU5+vk2v0as_a!gO)BSBT*!9s&vOvLAaLcFT z3RBS!z3EQkuphqsvrLRmlUunh1@a0r;R@}BoZmy|o^P!NdkqGGS;YwrQ>S~h7 z_WOd4$>0AYIx0`i3-yP}8OG=k0xeNIKpnw$dHAB;L(q%Zy*pTrQ@7=OeX)P{4}A-? z@J~+iuM=Ga&VN9BOLd#^1+Ctj$GIEoUuB8d>#I*r`?Y~4+bFlnBAI_JYVjM9x2bq!SyGAPtr z$SAo{+-$K2$UOT|qw+rc;)F)IWWk$ckmO57-Me*m;xZTNwmSNI+t?7LnWag`bNsq$Q!5iQb%#JVU8`d5bGPs) zPyhClN~Z^lpQ8h;1dG)Y3vW*(-J~0fmBg1Wx#BO{%!01Eqlt5KcT2&wTX#?Gp`m=7 zXG}lemJz%5MT?(n_;u%qfAI9uN$veTqr%;E{>9v9Zh;-tW+qmqH06sAG8ciT8BTr;h2Ym|N3v2lwhwaU zJ}cJwyGHi(l1f4RQHl*z=@2D>ZHAN=%P*bEvdc_pc;^;d8Y3v;TV4mbt zUeR6ME4p1f_w8n-X}{4GHR(~W8#MD?{}*zq&i;d(&%^)!m%Yh#%!WT947~NQAKVLC_jDvaaf8<~qW8m=g zSW^zBH$Qx4`Ju_&f961BS{OzJr^AEI2Pju4|PS3-|O3#CXnZ*Ej zJ7D&#Ko?3|u&aAWc^P=HLm3KJ@Ze?cbS(}eO%w9i?zOchl%rmQ|N29U2&9 zOYGfyM0QbA{?*moC0?a{(S(Oy1Xd_21Ijg9_fpYY*_`&LCJg9BkW$4#8=6$8p{#0d ztM1!*2i$u`bXBwZd?3|k+OVN*o(`dPJr8xADAn}?rP>8GExpsdq?OPhfgieIV?CpS zUTFq&!^R52AVqt>>b62AYx{cdhc8KwK+#jFY(d5IpF9)(o3xGj7M9&+rNwGu7hcLz zJM8>>q1NN=T`6fGVyEOxc(7=6_jMJ&;P2)YKGFhg6oKxF(;7 z3DnP)8Pc=KPwe@D{yD{Ni+l>+|6+JnJYvtk8lFvg_WT=t#u7k=;yL^Nq2bxol084t zKM%6IBuA=a#bfsU-?CKTPsQ`^hG*c@=ReppVQq&v#aRlrm1Sn7)Bus&KB&Ra`mAS0Qx>k@dtoYv~=je@_x9 zUyFNsw1XjBBQKPKc$n~vU9vccT;W3~Q~AQWGXmYm28UOfz*qwl%)LE`O4!S7tQF+E z7)Yh47U&%mje-DAZo=UvP9}^v;Ip~}Ceh35a-u5*YNK30RhtM*vRX@FkJgvTrz_`5 zxiXq#9K6@C0?Xt)Z$bLX|aIwOxkWap54M|7QZa|pnpi` z`1sBt{mDT>ug2Mr>=xf5y9M@m$n^;?Y4Qr1@DjO^1U_MlSbMOq#(C=#;Rw z|0RB*F1bTATQ(4z8xu?Jh(B)GAU?h^5rr^h%g|pd@Nd~L_~?I0v7r96J={= z;xV7~;D^4D-@^awWbaVamGlbgMqhd(y$D07{Zp8Gwr&c=s}|-$^x;5NgNg;#Y-JuT zLg;ZSSQw;@S9p5Uo9F2XXAdm+W7j{4ec-dxc1eLCD;#FK({}NAt(>pz7+|tfZtkL0 z1hhBVC0IYe%gK`qwX*S(s(nLvZF6(~V68%94b?ZWqW1;2pqhsK2LHk^UEayD_7l^8 zXDc{XXs7(&^0s2Ulm4AI7h54ly_GkSmDW3|S}0DU24p6@szqIe_KGltd!TL4-{9%l z&&K!q+@B_vigeLNTuol^Ek!GFGkL6Y6IDo-_cz}b@JjMb-b_p;yUBiSTQPy`C%eUD z={KHJ$xgB#4cnBe%tYD1H&VAtJDbZg@YnX>ZtLW_Ht5@BkkgyVDam1rm>xTAXTr<9 zedW?mZxfC_=zo!4#WmzP$r1gC*G=(DZWe@lN`{E-h}#wMD?UKnK>lQp^sV?C($vwn z?TIVik}i_ub@)%)o=PnyJbhYMH>D9gg_co*Vn%tcK1n4gW*~^u9I-~4<)a<4(}c*n zN*Q*r3W9-B8nan4gN8%d4h}R5M$>z-X&*JccxGl8o)I=16QOPT+;9uag6)IT=R{ao z6mIVskRE1Y>fN_P$MjGWRf|4p(dB)J^8hj?Nj%_4PCAHbVr?-|XfFY-n>&DvP1KzN z+;N4d1(9Xlj+;L1sj*z<$oq|0#K(DHKKV$ucq zQ^gO-Nipi)NIZ}(<>iFF7B9fmH6_+JzP8(r%k zD1Sk;+m@bDmJ{mi1_xt~#eP!nn&9qtA2`9-!@_ayk>Q9aEn%PUg1cAQexH z)2$?@#mIRF$q90pv(Y_9VQT8{E9XG7y2_$uNw{Zwj10kXxwL-_9)Z+D)O1H4BGh3V z`wJluYyg++1fmu+=ZytNg9{;^QUVLx089Xs(%&9zm%jcHS@Y-{PrB$zOYmvxtS}3U zF&opW+f}E`4zsizw`pMNtnL;r#8qQ4c2jCvS*V4@xQ!|F^{g;Ui?JJ1Q)h)*I*Z?H z0FG^&7OrxsY=@twJ-;}rG3yc4IxQSmbo0F{+oVRQ%z8$)O$P==GYju^*u}i37(-mz zi9fU;1DBq9YRMF`tnC?%pruw1zGRQzop(S z;ztNkk-~A@N%D7zn9n;Fa>=^gh2%Tv%muPd(TuqwP803 zi~Pz{xZ@gKbe*W-BFKNpR~p?JFFX(r4ztm%dBAzgY3U<&QXE)J*)FIGZ$xWr|O;1s{Vbr6V*_I<~ z=W=#)b)^G2;j(63?VjFFPMd!h@2{tSI#=6wnsaW_FyP-9&&Re? z5ykumrs7g^n9!7@l=WQ$YdQ}{F=?JTWebE43GAwZ@M*Uo%rNR5r2x$6Z^t~ zVcF1xF7Qg3Ik2~d*)Qrw({2#L!q~ zQd}?>TUR5V9?v}=udSp%K@ZwsLVa@q9#=D((h&YR0%V{^)ta)&d?p{ujnZz`{+YoS z5B?1#xv6yng$3Luv3VUf)zEYqB#4sJnXn)AGA@$q35u!EG@6QoPBUo?uEF<5Jcgw= zO?{^L;n~Zt<&|e^bNM_*S^IPlKM|XGn6z4n;CLo0hrB@FC}v^d3=5+yT+N$s3GL;d zx38VXkL3!!!9L;poCz<}Vf9J&h3t;e`b@;1xKxR+bjR;}6WD%WW4@T0`O11QU?DlF!DVr-m+0_qu>a8zvSA#%&b z3L<_@KM_xy+eS~$KrM0c`&O+&*ibhNJjdbNNsZsq4CwffZrzlZK-XTpIJfPbnFa*7 zHB__ej?nLTPypY)uzr^s<&_f;Dlavk{55d}n&*d)w|}C2so{@m9LbTn!m+v-!HG&N zt3Z~OrNV{=iK+9)zMD=3*3MbBt%B5QgeVO*|Db}km3*$Yk__Fp4c}hDx0f2fg~W1w zafSYnxOS#Sh;}7&mu_8#v*Wa$wrvwP((h#gK|zI5g+*fe0*{nwwwY;jvX4XZRV!Xon9SowDJ@{+M@SG>G%H-;dCrLwX4ABphuRkDy|5T}|J zEyDJQ&&1squZhKCmH6{15BIRWI7*xRZ$j>w@gur=(bdaj3~Bdu$;_WHw^(rfqPSZ8 z^sC9ZNoAF;%It`$Et^BpEGi-JyxDx880Py#Ki?+`hpAKY?MS}e>M5$qDWmMfm3Awq zJRhyP=;SVtV!_e*d1t2hIU#S9q0C`1QBo-+T-HEsBqURZky(X%Lpu-wU4?p$ui_y&WDKsf=4@r1-o#hh0FLtlhEb67p+gWGa&A&N-+)JOkBq`& zxDuoABwR6Mj7}Ec!YF(J4pO#VGzHTo7Z7&J3=Y4n=R!Ddk=m#a|+ssyFc0bH?S| z$t^QV=M&DTK42(1VHE$CWD4Kldzbs#DE=SP(EB<*h9aoro8^_e^6X^B8~AR#vr&9^ zuGT2rgZs=VyakU4j1j(GyqQtBw{d;IP=uJF5jwtaq0{m$_29>!GfKyw``swK75Bg> zyfxp^D7+0118qdkLLu2GyqKS96u$T^?Nz~lNQa^~+Uqi0BHTjp2K)+FZxnw0E%-Oz zf`5zRW3ANS`Q8{Jf8{lhRfu{gY_v-r4y}AtleTEM>z03)4Cp~U;FvEkd}h34 zl#UI9m+IlG8Gc{DdA7&bAdfWvh|o2$>WCP+#z`` z;As-}Xo}ql(3K3DD?wv1r>=hpIh^Xp(=(yAbXzy_YQAM86SZ6r9jJBqAs($S&TM$`UDs3}ON*h=VUfsw-rn0C zH#iDzo&Bx5bVoLxPQks*Ow+~B+=i5ArR2tjhW6_2PFlC+1EK;Q`JWPkgX7}51ktO1 zpYyl)mvWuVM|q#s+I=`Rs2{@d#{>cAGkuibV1k7bAR>wyqWD}C@E!3$cXmQB{RwqU z;UQfZ$F+0|9T;SRP?RFbE1;W+Nm_WX9+p-EOXRx2Zaqc}P;he3(CDsR;zp!pl@F6) zdkHQr>kn3xBwY!4z0#57%FQh-$p}g0$&yI&LW{7NE?xUkl0!k#0wfbb61t6ztqoO1 z1#GBG@^P0mRN%1Q;bA5wi9z}b?Bd5Ca~)cqm73G9bLZIZ9;A&_X;>r!QNf%Mh5ddj zS-SpXXrfp6@ny#Oi6+M3#^o4nOgOY-d+6Fsy{@%qy4F#=&IbbG3tj72=SbU7_9J~$ ze@9Hhe*J~8A!u5eujwcr5{(Aaik|JGE1vxiWQ#uU(8?##y{A&H~2Kf0V_%*~tf{RJ~ zdCFH$r>1^C;|o0%cHo$)52*uvjE6Ez^r$|{QjEUOr3ZvMJw_O^b4Y=R|50SyC4AgsNGBomz0G0WQ#qfbKm7+Y!c^SPjI0E?dDIEh` zHlN9w34H9;1-z9We+;8f;ombl@WUCMd`2e|bl5zA2ESODf+RYCd(k@KdIJvIDEmPF zKE8oZAmc;dqs%H=3d+T+0ZOIKm$*}ELb0A2Eghp$pD2d&Gn8e3=StLxA2|RzPGXc) zquc~Nd<3HdJ1oS}mc=RomG+bd`!o$ zrS-%()%U{j+NX>YwJ3lxdQoB~HxNs*K1c?KIyVcbcdXxH_=dH3lNlAhX~)Wfvh%2O zeDxudu9}?YCPKPDAzkB43w193m*P`6_Y<}L4BuCRvr)Gn>z$ydSj%MlhS96x=1X-C ztF~4s#CLi)k$Nrgg?A4FQihj`eblg zM+_g_DV&u?`YyHp32&?T3h(Lsm}>pc#1!-Z$9=nv@ETcP2@d^foKK^a^}`vTarI9G zZ0!b}LeN3Gu|CD1wD0JlR~1y&acG|@LZ%){c}gQE#h%>+`?`cpDK~-MY=lyc&{M{7 z8Y7fzWHFL-X!!y1WMEH}+R^~0v=wy?R6Z}JRNy%X)l5FLEYlNzMrA7sww0Cfm{eDE zBmw^GaCq(#9DQahqa)$hkPehi{SW9fIgHL(&_P|@0+rXGs97S#Odg5JLN&uaFJXUA zv0sH!Y*=CjH^@%0DZB@h-2*&>7!`zt8JvYDWgaMN5R*|4msv6ysdYO*;d-lABejwE z0yf)|+3Y-tj_ew<*(voecyDHhXbeZGswQWs9g=kfoyOP>)t44b9>BSunH{3|z7m|- zA<5fKgZ}-^^iLWaPT`gfs+SbLvI)FK)|$Zwf?i`phw|yg_!Qu@Irc4~g(X`r=_P|Q zosyuEUKXG(N?2V`DAh|G`3FymE$O9%E$QX!G$g%bP^Om>uB4Z*Q?8LkOOzY*a+|(3 z^>8XR(@Tkpq?ajZ!?(;pjn3>Ki~K)yycF6?<&j-yo)A1j<*y9RqJrqF$C>Sq2iN@+ zE_c`C4`Oie)z7Rt>ES;!IGYJkd@@cCmu5&=;9JG$x75RTG5qbIGlszt?4k85OJi_o zfV`B!UG#7kQJ|3k#WQ7;%t!DB+*fuU*B7(zNN@TbTE?hnD4PV^8A{un8wq^$ub=h( zOM$yCQ5%=^T>i^?sVDB2|AfJ*UkB`Wfh~iTm+YBvRe~~mMm?Oy(aFd3bAf%q#;6dy z>&fW+DSdI1(%A?)Sq%E41bwQfE{7#S$!LjuxfT51_)TkM8Iwti8mVM8qSV7HSWSi3 zzmR+sWI<#2dLM;p9p&}AUgB6d?1F3sUyMj%GJLR+IO@mKZK(mu zWS?Js9y|ctPyC(1Ef5E7%%w+91o>IHv<)>iv0CEuYb)E?+2Zd*`giGU^FVw#z6=~F zy~hGLg9ELZYT0}~1aL}Eg70_*4t#|S{3U!}RW;X7dM~WQZ;qzE;_T2u?9?1R!hzd> zlzZR3azy+AqLOd;zqIC}2Sny%`_aViNTA_CgSP}RP(%RPF;04FQp#MKl`rW+rxj@BrmI#>JA4c_bl5iHC5p5GM>J{#|H}ZICt)hXx^W zg^nlgj&cZ~dBp>WtxI;AZL42>mN=N0dbV}h{;Th=;_qeSiSmI>=Pv#EpD9^X4}Ykk zdO)0358zW|_c$8?X@S7b3)>|r+r5Gn-k`&f1bm@TRX?Pd_7FpQ-oc*f`!66CD*U0; zKt-&9D4qld9-}A0N6PLIQP0Urae+DHy@!Dt#PLo@=`RE8VqNIcmpw-7!-Y8+_~ zD%t|q*y0~eib=M5obw>l@Iy?exo<4a;lGb)z8fRo(Tl-RQ*!*{7flcTkXW4aIH%i$ zAM%^{M2$}oB-eA}6sluT8J?KQE%*;{2AOhiPBj5H7vUdoGzkqcgDBtPU(wr=(J%=~ z=&LDd5QwI1=_X#LnFc1Tc$Az}Fa*J^BFvC_YU;QCh(oXtE)Te+i2OY{a> zp?7iEAHR7Y>9@QHzfr-;|Hu(LrkIK^j9>zTVxrlt#8mmo`fy64{-l^ne{)Lw28NI% zrs}JW$`4CRRmva@o*|9Y7>Jii<--!Np@g(58%o(&QrZ&r!!dP22m+I!_eH8hH8M<6 z>gx^83g9_MB{*|d>i-gxWOwUn!D{qFJT)IMN2n5*5517kB&lstthsQnE!h{$DL^;d zB;4T^D_cm-00TiU9aL`m`=xj?6*Vo~A%5Z=GVk0uw3r|~n5}yo_u!> zzm84VbBGgv-gBD!1s^G72M(BsPl=gHB2%zPMK*sywFqKfmW3fU$&-y}`v@i7{7W}k zH`EQ?_e(Z{0<9PEI8C`EX-@=AyoBO>rI;nmRNVokt4%0P2PG<#RijiPv=p?exxEdd zW#|bjhZ)4v?V-I#Pxlm~d%X!=-zE~$G*1SS$5&j;5EJ#(6#c1e;+ZCtMleb@n@}2{ zc-l}_n&(MRNfNyM~b?p zlzsJ-`IVHiLQh$Kth~IG5=PYzk}K$h%y2Hzcxov_QF-!~UM+?gNGo3L*rcDn0D3;q zygSU|F4C=Qy5y^RK;T|LBgZ$u*RX-h08atD$&xSd_cDCIUkSrMASnj;E8+16FeOjl!px-py`+!Vi_z}8wO_zMZw*!UO|3|m3NgzU@dO)WyG9J*zX#t>K6iR6{ z4H`TkSkeKFk05*F)Ed}7Q0!1Zn@)Kj5K|^eFWtH(Q&eD&q}V=yHck)##e9j>f1_I` z)7B56X9l2LO;h)JSUCHIn5ZmfdfxJ}E=KL8Mg;{}-F!5X#xMy6tuQv&?YK*c;~6^x zsW*XVXxa{fIU(TJ`Fs2wYM4}ySR*~6v01=x0*=@;tm6*ZP7Gc6f}4Qf^!lHBbajOa zL}=axoJz##O;1L1&0ckYzb%g^&l*|DFNsgWL(UT;yAN3GQ>Qy&@oTM&5>tB8K zE&zY8Vfcq|_>RZI6@-=op}&(W5MH|ZnZhmTS}jhFAX6932SL*}(bL-0=kfGNea0_x z(IGW=!PaX@4i2lHY!Ua6g5i}}&1@I05s@BthuC%0+1ax`F4*3GW^{ff_(2a%f2RCo zY(b9vAwSP52_a1RJ_)XYw`SJuCn^Q}5P4hqVMz)PdM<|^y~W`e9B#6uhsEg?!9u9+ z)6Lfh+B>ZNyNV?B)WvJ~d17*kxVv(*wV7|>@Npl{n*GrjQu%)EOU`Q5nCPi}A%`)3 zL>3G8_YAzdK=75AHNES{v&lidX5sTP2CmGpKI>TZm)*FtQ!-Z%wQSbny&0p977RZ) zF=k{)=-58}3cGKQnmV9gaYRT?@8W~QA+sB#nuR)2@Q*QrAhH}f<%6`Ccp|jLuI{BihF)LDE{!t zD2Ty{(Q-tC=msEqft7UMkd)cPaQ_=UNhaNLZF|-D%i#gmlELMh=)DvW|G>_Rue9Gd@TO7 zW$u+(?HpWdh+{P;#;FB=H|>{Xjnnw@KQ=qsMN~~4eP*7z$Lb$T2k-b{-T`8<8J(c{ zqCL$-)%wEQ6*DgtCsz)Gu67e+g@Ljnu3j~262^Y3f2!hgpQlmLW zTrL({JIr1*cu9)c9lK4>Rxkg2q_g7z5qEKtO_LLIJsp}K6@L;}e&%|JG`~Gd?GQ)? zyc<+FZ`;V(%evrFg~GQ?Z1%6ue8(9CxHHu3SjFZk=?qVLI6PCoY1rQtaj zmWmMqS$_2-+9A!`uIvMk{4Pn}#ztNMK2k$pQ z@>QQ+dv-oO(`o9}6=RnNYxZB;Z{r@@A#YKFZf2(AjO#1&D}r41-`H>K(XVx0dEy&KNVgE%e~DV>F=@wK$rH{y zI;TwtJ;=(E9nk@o-eQiNEI51io%qRJ61-EhhqU|sT_-ae`@W^=d22^`MaJ$QbM(;= zH&fS(;uR6S;KgEP(aVy3nw9s7?V|&u%EyrTK>yc*pjxp!rT{@Poh+BNWK%=dFaqR^@+^}tJ_rP85 z!R}+es3M=$wq*+H1vP5NC^ly^adyU>8awnkx@pWyO*zmI083ost3O)qtG7HrgHc4} z0%nNzPwt@O+6NOI)Q82tHaV{Uwb0q=2r;X4eD}JedgBJi^*7Y&sttab>+>uvqSG~F ztK;n)4~g29e&f!}3$?YM_t98Q))*_Rm}UILAs1r>*Q!mKQcN zchZT`Yo0G(x%}Dc!(uSG1ZIem$Q4XKO=L0;-Z#L6!*z6m(+%@#;w28#t^AF*+%Ek1 z2>DchtkcMEQJaTP#^V!%jdUXAuz5rl4PQYaKLk$(Q!{o2Agfo{wqP&hKSTmI$@a1E z5SNrPd2;a$H`iV#>HHn-=qUcnTIWIU4$0h*HGE_0kaq{2^oj~h7~mZp93O|uz(68b zp%F3k1t1R_)krmteA9-*CC(wzAQ0S^M>7&S4A6Y9DcfC_d|Ns0z@pCLxQbJ;_Ufv? zDq`kGr1<3ZUi`j;g=h4@zP3x9NlvSxLE{$0SXT32?|D$%ePpQE-(<|GnM=-%baLKq z?OstjAg7CyViQ@?qU*@2bgd_vjIGCGO;EvB*g07wqP6RhhTnlU7PY*-p7gk|GJr|S ze52PBxo`tVPw#GF<#O=EL02osghly_&cwUAZv1h5-sW7~^*eQR&W4fpc79VGqcfZD z6EY7qAJlu1Sby-aShr+L!I|QsQ)6>aPMh|@Xz1K=)Xf%fca?>rnXuQC)eiFlJVP9q zdLaED>cm^AA0g>xONS68+hZvu%OxIp-PWg#m@&MoeLH6}>+Q2$e0&6@Q+TjvU}$@f zNFjTbeW1Hn_kLp%gHuA3a%WGMX=T=CF`0hxi)PP{O`FOmm3GaT_i%O4qKe3zX}d{F zC$I5G7Nm_0Xq{2&+1ev<`M8*}94{9?vb$td#Ii-5$DLjhmx(FU3#`ujK=u}pT<_*G zBcpF)+(4^uOz1i}5nh;R@sZuCPm8M0hC3!Wt^9M_?DzUxe5A_FAG;{lD#_A1M|>EV z9+JI%7eS<`Y^nC@Ws}|yXp#%7N(3^D#hAHGlgt0PMOXw3d zT5i1Bwjt<>s!~f%C-;Z0qboUX+Nqb@;CH%N^UfRoShfD6evZ8LqHmVkG;8M7zh7_H z?!2{I{_r7-ldXB1_-?k|PPr@7#raz-Bl@}yI&yBj-EQ5Ya_x5eNtfr@a7bN|kdWZV zd)gHq%E>uYXg7IZYo&b1W+;41^nMKlWxh8QN>XuThc*lq{RO+wAwk0;k1dmx3$PxGNAv~ZXeD0$+mr{Z|hK%X-LPQtY-J^a<}Ds z_w?;x73&&5*m`(eMP@~D!la1a(Z5c@^~Ii+3T2npf#H2(&=!5g}IMg_$W@Qe;gpff(UP=t*s3T`B7jHA!;vqCXE)^3tf2nxi|>20qN*bZ!( z>LMtR*(#*{X6~xJ9jcpVL-JJyc}vEfUUyEu%-@`vdX=OT{xk8xrpVDL<2Ma&7d$0r z)Xr8r!iZV*!V7tsJHO`so6z5xpF&KD;M3l@oteU|r(@p*8JP=*$GB8z0=p-75AQc- z&zZ!8IdK{B?cQ0Nf3bXps5@UkQsSofvRdq&JTJ~dm~3u~qS%X*g-vvYMebk!9}(Af zN)XhzuLPkJBH2wgOz~oduo$?(Xa{NBXXZz(^wry8RuH<5K~qs;mP8NOqlZzQotnIK zMB8TO*_A_wtRC4PtaKXB)&g*;z2nfUDsmbBCtr{Y9yt z8rI(${lyJM`HSLgOl%xRjmdmhecoZ!?FqZjg}Ir!Eq%DN=xOGi!JKdYp3HWcd-BI8 zWmk`=e72o8m$g6JKS!^7e%4|3tc& zXeu7skMh|5)A~;~-sT3~npXti1|1L2{_r zb&#wx&kRv};c1sNArF8r?2e;@vJH zuybrdB5n=76BZxRF+8j9kd;Zf+XuxYd;9j_!`H4wbtAwl45vGoP1t(AS%Az|_cP38 z03Gn4h*GtCN=a**h|^FnwrF=Qy@PAJ9j7B4E!E4P>@3?KZ+>1mEI+?I-qO<2E;l*h zMgsp$_3Gyp8RgF_^A3+4v~cX+jEszZ^K+UV(MD)c-yQ4@s{V!a!2Pc=x1wcV>2p!@U_95XZ4!ryZQj?O- ztBGxhFX1;JsVyf<6QdC-qp=oh>|rd_l4XB&2r|qbNZgUfxYpT>ba9 zxapa0u{V1IQG4y?pRI^ng`*wG6?59g_`}U@xtZ{lEu%Y#Asktu zb{uznY|F_zQu!;jGU3?_LgqoRftbsALof|Bs6;6_LbOIguu!FdglxL*r4B*jSZ3K< zz$3;R$?K(#Fu&dP+_PB@7N&jU<5#*hU*5ds&+m;})XUt`uJ|(X-RXLs@a6t?mNAoe zjXO}$viVYXJNJan@na$_9J*f@@~fxboHcNJe#Oz6laEC)qw;84$qz?HRacKb^8MuW zW0ivm_xNPZ#O0Brwfzf)l4G~|xL;lL79qQEOywHAF@4N;5tW0ej za`TFoX+CmZd}~hC=}2c2$2GsdJM&Nq85A-&xNu);$go2N72{`|9lb`kHh9^yH97R2 z%h_U)Ktq?dGCwJ>N}Weco|x%D#WZB+l00t<u15jyz_{dn8|~~EbwQi;eBFC28Y>`;A^+;?^g}(*G&@2c!2Rle{5feOneF|cIRF5hvb%o(P;@)32#np>y z$&r)umfDx?F0tqmXYH*vbLuVbbmSr(#jNHIcKepM2(fgsax)*0V&dR#vAL92>U>iM z2*NrOGyaQ;iZ*9vm&}z5t2nNG(Cjm){X*C=7om%OVDzO6Q%V|WXDd^*qhJAq3d8I- z9UV#Wd%L)Ot|E!>oYCdbK3D6>*VjZZDG0H+WIlRG(L0uw8TmL| zBx25v9@f@VFU%HwPaNJzyhkoGx5ytMp5_W;$!2lf;LSNBw`B{N+UU^(lE;Gh91#CT zo{L2BlC!5*1NeHN+UV-h8!#|7H8^?*U{RO-?1JSEM-K0^=oCxtOj+5?v{%BRH4|;E zW?!7VZ-@16m48Z~;j7I|;xe|bnrvr1wjqF>?1>jb)H3|M~wAHg2x!x+$S(AHdwZ5DeJlwk`Jbta$QeDQ&967O8`^@7=(yPG%jUw*M{-oEhPc`N(j!%H`J z^d@a{Qid%|;sWiq)i3X}_)=Nu-oIy+BU!v%!J)i;Q@$K9CqDP!eL<{k%emX_CbIHb zPxk<&YQ4 zD=DhUrtEK23Ag%-;JwygYDHF%(#<_AUM5t(Ik1-f5_ovhApr z_9F+PS`hGVbMr>+E&ePk(agWzYf_>eGrvrn8#ns!n1%}3{S=E^4Hc3!r(fC^Z3bF>}v(TZS)wqqyR+Qp2`n`Iw+v(GK= zgM}Xs&#qZKe*dttITMQ3ET3E}hQd3JgxwFuu3?2h7t#b-IQr}X=E+d!%o+m%H92$^ z8Q@6`IUESNyGHwyE5BSjajcqOnwO%{s$3U7JNlx!x|(mLZMTZQSdFuYEVQqp;UvLS zeh_VC3L9$=m%tsv0v4;W>qiM7S(FSny?SQ9qy`Auae@xCT5W91I#BOux4{}a-ls%A-=GA^q&dw1- zxo9zbKK((?>W;Gq^3fb>k5Svh5O`o45Iq`4DcWyBXkA3!IPdzy-sTa_ef)!x3#@)~ z*6nZ>cD#RDrKna7&+4{r9ctAb@!1ZjMTIcnKIpMcmUPl<*!cuH-qh{IKVg$b>+ej~ zJUjY~^Lughg}h@`)n;0GtOBK|yr6LUA4cU9Q;N3qa*(3(X^6@@7uOK7y~D)wmAOu4 z_U%UXNSvNvsgKI9x-+}{Qzvd`R)fk_G;B9P-W3Cj77C#-LlFfEiH@pWoMW7 zv@G!@U;L^oZaKajZhZ@JpNC*oyVJXOKteoXTX6$UW^Blk{_nJvzhC>ufC*v$2R_=O z+l@Y2>A_cgwB>~}&gAQTwA>xwh3G%-eaxqUG;b+GEFva6lBd#;9$f|CB_9?GZ?LE# z)P2MH2;Ok9!{!d~Q>x6Ioh_7tTWrN-C%fM(?~ORSVQmsFHk=9l5?v5;V{k(O z;!RJSroR{KIQ-gh>z0E&oI>;ZT8Ef=wRQ>d=`^MD(7wD`C&$HCD)(kxsp7Wl1{Kd} zvv+U1=@ZG+o?T5uTn~2A%Rp^h@{uMC?NZOOAw{f&*@*7-q z>s=<%`Dh7#aLZxVuF{z({Rbt%r}78*3&sp?lPzNS*qTD4ZS3kW#Mat~N)L^BSFHi4UG@+=g?l@$@4p4!IEZDukHWOiY6KT0o zT&CeGG-Aa@{&;n@HX2d&S_B4UWIo;6f{8M_6Iq%sQnyndB}8LUV~2GevSY&LD0_ut z`SJAPCGBmkxzWyI9A_0ds7pq;x!ijA;2uL-4{yJZ%pbO|z41;` zf(tD5F#Rng>=5m%#aUmF8A6d*_9>aco!u=u?%>yovpyp;mn@NcF0GA|A7uZ&G(6E^ zfm_U9meYG^;3$Jo0HM1m^aHSGh?-U=EMLuM#QL^qA+DW!B7aHpK&3LI@5fF*WR-Mi z)+}p{kU-M4-}3Id>5=VQcVACpOL|9@#8BcShN}c|3)+WlL!`w;DOd}0${NTpg9<71 zP`7dEKiImxP1@8bY&iKv6VTB)Jj|@wsuRnVZVq<-=G@#5%$iNgEI2S+5bw%WLf6r~ z`C{#wmd$YoE+<=uf-F;7U}&d0Ky{U^UFat_`o2p?W~K!S^6RQo>zcLdAFk=xUhQJq z($3B|zk_YoqJc%lE(1@++jQ&JDIri{r=85Nh#uWdu2Ssg6fU|tVf=d|7FUY`xicyu zG#6|WxEb6uRz%y^i%6#ho(Zg2nU<_r_Fnb5r{*qgQjnuuk@j0^n4#x%>!BR7L&q_E z-gD6fe0lmgw&vA-W?0@9?$OK$Cg>D6lXa2n%1)o8h-c}i2Ds^MdJ-BMXC{%-jA)}m#x{R8?9Ne;_3Zt2 zd_RWKb)Z1F|C#j{7m z1npvSAE}}Rs%nbIdY{dn^?FLbk%xZ3p>tKP+4IxS`O6t6GIZm4`!6L%YA?n}EO3^4 zBF;=@)@HoI4|iHNGL+L9r~(_g;3PX?XC%F?MA~}5X5`>YXX3ESxKJX1=TT0jC+6$O zPwt_N2z4y~zLNa1`on&X-A;$9r_K|d=Iv33k^^NmW!=Te_eq#vl#+|Qr%M-8HT-G~ z8N2LaUd|^gko3A>enrK6+_z<>eaemDM`(YJP2#P!>$wk+^IAzOL3L6u6W!0^gVC-X z9b;fMjLAiZU+XvI<0`v3F8g6M`K9v1zK)?EbW=~8BRbC6r|zaBsJQML{(a4>g5HIo zX_BBTKFP_uxQq(AaM`kjbPp~jhzqN$b;U+S#-JjG3p14y5zh&dg`q1M0vZj|CDb3X zQ_%?&Y7;R>8h@Y>9y%qaV;_RZAp!PKqY#onS}(bjjdi)-SFW&IzHz;^UC{#ZR`9aA z$8PrT{<=6ICe}pNGSV%l``9k#_McVyC(cPTw_CDyDe1iI>I(5cx!-c~TbcVNP1u(; zWAU(p>AJr^`C#6MBfAB4jB0B+*ClOM%KYiwCnuN96Rz&v+YGz1t(&1E{s}$!lbIN4 zFZ(q-n=zBok(r{)GAR|%A~D(Y96F{xl?#7ff&H}jJ{~LQ`(c9g2O&)h$YRMfiHXcK z`B2jY(^J54^-qESop`qC213)@+yI6TzAQC{f?y;Dp5fVtK4a+ZDmx^u2RuSNtC&Oi z3D6AC{ghtm4mfm&rP@)@)I5@j!@*5zQY{(YC~g@usVeaQkr^KBC&Tx*1WX$7{SClN zp?f}p3xnSl!WkS#`fxB52^pk=p{65oh*xhyL7&1U`Xk9PMn4|T@qP-IyE8f?W$6qq z9rUF5Y+(p=Sjs2a4V1f-(UJI$lvOc&Y7m|&i(fy`X*!Zb;dl#YFySzn7+GmYpk7A+ z)*|2+;sVwHtZwN2L3oDe9Rv%9Iuq`OTFkm#<<-W^-lJ8|xUR(NTV0Bp9J_tNy_Kgu zZe;BfB%b6>&7G_3#YZgR-NbXfvIj?mXAJ2{x@mu1tUXSqgr%j0g{G&4iS$ykg^+y~ zZsGKz6K`)CKGd6JwP#xzWMc}?6dhhEjsI}-NvZw@1X!&~Oxr>PT!3$zs!7xe>XqzYmUO{I;t(8-xRL6Bs<#xU(WEa>-p}-Z-Ok74;6v%n7vVB4f7@H&%H6o zVsR1^rgb@syV=Y*GSezGGjp2N%tJG*QU(v6ZUuN+F%#JJ5Z?xHAcJ8CXGI+*YE7 zQG}hu?pnGtneV!*{f2s7rC+na9?lW@_U&dwIJfEUqw>>La^u!-7L#$2NZ-1ur_nw6 z^@08S2d?LuhqZqyWQs?$kKnJNedJpiBYJ3yj&F<^bED~92)0I<3g3V!k=1TGarA_Z z6M5k@xGDSA-F;Opb!)hB?`|=$r?WHi&ieJ@v?hYMg>`5YM1UXx5Txlai%BXS&XjZ~ zHzxELW)URSkF$tC8OB*b<43GNBRxO8>g#-1W7DoMQLIUtoob^YgR%}~m55*5(rLPx zk_-2Ae~a%pbAM>6{hFn}{7(p$g~-C8$ofc7!=y<6hX=donhm(pB74Koi!&#-`U8a_#H*mPhoxoy zo-ut!mzkT79Ie$n99@b(Mv0ZqB)4R1A1lkLCkq~`v;{i`SXoZ0De04zZ)%#B(boB+ zhQoehX4I^-v{_M!vHcP^C-&=?u({9tjLeBa?ITLFvKGho2n*}6S$W)W5D->lHdd=% zAeE*<*v+g35*7$QGHX!>E1^(aX2;+?QOsl@Nr40PnANtW<+CPvysmZ}9Y0179A z7(5wM4a~+drRdAxw+ZM1e+BSqY7T`E&z=|PpMB^vo6P>Ar8{)ZA(b6XIV8N4=yJU& zK25TrkZjo{1|KD5RhDf8zbt5wkMN_!kI!KAU)doBy6joO8BzSDxgwo0gQv+ zvae}sfx8r&m0Dpsay=~rv$HfIjFehHab;oyl;KjV3|vNmB^+o-Wuu@82S9;AnH0}~ zt4~8R>#)=#WA)A86>KUGlXIPirIS~JeeQ9O4k*Dt7fW^i`TgP-f3+mn7*rnU@h zo)wtU^E!Dpcdls4&0ffjxR+GXvu8!pJ>7~qx`lNKOClncB-CL9j>R#BKFqqiVg!cU zu6~{&H(RCh8539*`=%%6kDPkNwRS9e> zn0ahpl>8B2LTw0^O>pNJV*KtNVN+*Ba^^2}wx+uB=cfmDYL>}$noXLELzj|2$nMKg zrBk|*|LT&77BgUx6VGJmE{SoYNHq6srIw~epdQs0c%*Dh#a*reZZtw-?C6%jEXiIiT(ZN5hV_6 zDd)G0FX}hp$jr!-;;u&z#a-0pFwD92j;;IyE`}QixLzD|!|hNACE#WZPE{BiP&kDN zx-Ghu4&9Ju^id$U19c6i;{82gBt{4fQ5X)`-XOKDUUBH0>-brd*6o~}+(u8c8fItL zETYZ85R!3D_bxxf-*f$dBvFxbJu~}i4!5SR3iOy#LnZXpg7rgMEp)DdKpVzaUxb$F zaSa8c2$vHUjp|`~SneI3)~R6Y2pe19X$2ue;zRAWd*;2fW^VYu`JkZZ?JV9!T$`_mJKjDTF{MA%s8zp_e4I5CQ2` z5kisPL_kEWD0VJ3^jZ-W6&1T;Lm}C7zW%YL#F95-h05Hm~q=0Vxp%HPpBK5YO~+7e&I%PxF{y3 zh+fiwX=hj5e(hhgg34y*`2}T1x>j$0V&tG@#qRM%{S)$MmIeVaLvffYE33L825IT< z6b@qo#7rtqwMtfI5=4llIAEjC7(F$IY(7^lR$UM&ABfJ69Sw zk(82iuwzoFl?$+I(zc>t?gXQ<0P5)e&}r(#vx<-hCII^7WpobhGLfJHI<%gc7zZ6) zuWwV#Z?`>E;^egD`_0>)O!71HJxa_T7_l_nLQ}cBIcw*L!9|VzOG`y{Soz>p6ASGN z{uP?HqSn%B)Vi7#gDo8=v-eJTWy7YA=iN;bzsviajC(m{c4c}?sbkP)cc%=`fWoSS zA1%xub|7QX=(`>oys2jGU1;AUfcIf^d)m%)C>l)LsHy=N8`)IoRS`NJb27snw(%** z^*Hw1ENjMm+OhH5H@VGpyyKmP%TE^DN52wdKe2hjU5+!Y;xZCPTCjc6_AYXah>B*- z_L^I&S9k2Up0<6^>0{dLJ71YF{p<>oEj_kq>+1E)-#1Q+<#A@Dr>H9V+;%dCGjbPt|od-ufxNzu!CxPZh45L-b6k?|cfzXot^yQ{> zVrZLKp7M4!Mt0~M+3(JGSl7<6Dk-t>v2bzBcFCKP<76>{S(Z07+sUHpt2vgX11-G! zxH#m{Ru{y4Qd`WiJ4MrwVGGN(Z~qoLY+)G_dQxjQwvgp^YSh4^QSybQHC<&AtVmMm za$zi9Kznqcx-N&lEG7AREN4*Go#=)(N<>4x!p=El5?PDWp9I%XlKT;&O3kFxb3MtN7i=T#mzOV%FS3y*ZMIv>xNpgW*!qI->7)cJ`OXV zA~p{XV=MQ*zr4W4(ld1yxqrmAQLbU3j<$!SRco=vVbzBV_Yc2oh~3oV_4UW6+6}pD zIC_^wIf-zS&K@|G>>$9%b3ci6lU|#&s;G}( z=`^1ByJ$aj-nSnsJp4BPv~K^ndushBAF3wj_wL0+L(iitQm97QJAn?YCATZ|PdGXT zqqvvp-F0+dIwqWA#eKBiT_>TzHe^hu;ywrNf~MAq(m4fieO#;t#YJ63&1LD!F6xSg zR<;%FN|kPTG@H8XwlM-W^Of+_eSO!4O2YGp6)H-;EI24>-mAlf8Hy*7uS(X~TVMqp z*0BCcXP!q1dQ2c22IcnYx4WNc?b65E)XFkKvK}@3)Z+o(3{xivm{0XlF~2?uP^{{1 zW)AzRa8@z1p-Kls1?Uw}XfaelNTqRTE_96yd2ei6nwx{$(uc>^Z%T2sce(x1;+iSW zZq6xHBd5Febx9}tv`;uPI22|4`Fob!^H%NHw+=2j@ZN;GH|#mFXWgm;CwAXO(zl`L zdWfJDvMGX!lytjAtDZM38%Ajl?R3XpUDO?{+4EeXQgnc$w(9g3$r*h-DiWi+-wNc6 z5)R+udK!*?)A+ouA;8__;ZAqJr|#5WM41%l>W z2m5G!DE@LUq5O2Al)c6->SD2AhIFunO+8|X>;w93MwWt-+k(C?un#CK1HSo7G&KEI zpedqEO&xn)fd)2B-JeRLNhkzhJxWrN`8qRe`}(J+Ik-AUY`uNky`iu9?Ypw;>cLl~R}bFy9)|JVQX4-VM24qg3FFX{ zGBT{&(-9URuEBY6VZ|})kB@uo>q>VQ_a{lf6MJ5*GC50{?q2fDY)2P+kJT$H@0k3< z($|)fwA)guqYshrR}Q^MA`Z8Fu=cL|7Jael(2B3t#FQTHc!TYaQS1)nJ^&hKdi<<8 zN{%={t*oenU>rr#$Okq%By2l&{@?F14{7`EDrTNw+M9fE$4}bf_hUO+ z9J)z<;G)m*JxJ+QbZ;m|r@tD%*K~K-0$iM%Qd_DkVR7WW{@V zCuF*(&#sD%t%AuimPxH)JIAjyRJ^3l65O0x9iPw(@s=NC%fUHwYB z3~^h%Y9SxZ`y&@!>snmE7E>jYqR^qEF3jhYb6`zO z73l-37Nljg4o7!7h9eGMV)K=QQ${oSaDin6nSlL91pW!71D_ZC*{zr;MYQuWJOwP+ zS;oD?y{X`7g9;0Rs7-e&5#`twdmc>4=wQb0y5;}ZyA#)`?1l#Ens(MUW?4s?I&(l` zlv>PBj1MQ1)!B)O5o&c_VtiPeIx8Uo_DOm1iD6_C^^aDIc}ejR7nZQD%Q`Nx&dWMJ zB?H5{H*lDBWVjNi098c3Hx%k9#XX#mGfsP=lhcueX;cr&KB1Ga+`>X%clYe_-K@lC<-(^x=?}loP}v z;zp?pmZW6Sz=6SqA=TLf%NG{>X4V56;w&w}Y9!3S{8#npxlq;Aw_ij1I?Oon)$B4D zc`9eqQ1dcM5TT7xHXTDN4xaIw*qU3s;jsr--x&MaM2P?Aww zoi(s@VIg|b9MvrL5__g|#84SV1eOkY^w8~BI*vH8gRV(^W|JTGuDx@ebapUvau9iI zom9qDkJIjA4s{%12X%a3$BY_>Y8WQDvk!~Mk@2;365`jAU}e(rX9AelnO9W;e@7dg zbh>?`vZ|zNHUlx}(4|u7bkORoi*v_FXZ9s}m*gEg2-&d%A>LQL#a`eKD+2+bL%fA$ z1~6G=LBY~>g_MfzWc;WJdZYuj++!GNxw-kiR<=%#HhqUyHofH> z;%k{u?2ozyLCw9-CqpBRp@tVB6eK>eJiUM*FS3==6(Ww5u6%ivcuCq}*zsSGu8tj z>{yF_k~_IwJJ^<(ll|H%PUK@8sD>J&5+@gO+EI!p_fQGm zOw|T-4oZ8_rjP2ROYq54nOig4$ToXvG}B+IBbyhmL$BFPjxzBe#r!anYhtGXLSJy}*~dX(cJ@V{_Ql5?wG*6}DfZfWM}b|w{PUG7(byhAHXk#T z1N2ldTcLAuF6mb>@Lrqi$1;NPYwHFu@`B7cTv9K}Xd7oGZ-qVzE zx0a3)d#Rt<6QsX%uSf5yteDH*H&Jv>Pf7_Ch ze`%zH!84{mDSbM9a>zhL+N!-w_R0@pXmn9{Tf>TDbO6eV>Nruses*?UxT)#jd0E-x z@GPGf5mBmE4+sk{RhuMeFDKQdB-bS+)g`CYB~4Ec3rk503C%!<+^V|5oMK#|;C@k6 z-2rZxG;x<%2|6+;lLlabBl$eTUZE@7p#pz{4u!%CDkz`Od7qMUnJ45C)Ygbs2weRf zpcB<%4q)OJn-xt-j!X`T4hTq9FE=lqk{p;AmEaeUx!0^nfhqx~8UoVfG17>eQa7hcQ@e#^ zG0(>{56O2cAWcyeXjYVf_rS%lw$fO;#Kf%$%+Z9c3DUzYEtjNiq?TSt<7LTm(!>#Z!X;z=_c}{8;3I%6My>$aY8RllZOi_pV(_hDry~ zm%-UGKk3a-l`b2JM3|p0zy9Mc7pc-sXIQ#_KR{xBVtzCZ$ls#oOT+U0q~ZN^a69@aVcU9!)xiZ0(Oq!0vz4&l zaksM;0@Wtb!H5z7lmbmJCofjqBRw&T_JvjNjI*K~rKi%yx6LtjS5V^}ty@5r6iGyQB_cKj&4wZG5pUq3$I-oiOLr@&@`h3$~RqnsaombrH7q2g_`c1qVy z)om$lJGZPdZPW~{FnFZ*7NjXaQ+rvpik6|$A<&G3!s8KaO8C)yOz?z+$X4( zdB<6M#Oc|?Cl53EPTF=SX0wC#kTWyp&Y7>S*tp`g87CiWoLIW5a!ci!K~mUVcd@hE zf8;ErA$Q(M?orI;Gch!M3VOpFV<)t#5asD=0%ND~J5QA#qtXXz)UhSO=>tv0*b>&f z+h=<#_v=0FChR*^>A`tH%B`cl>-}|V=BHw`#oR%8g76V|WlGQKYeXT6>GA@>!3%Vq zNC{=V?Yf=Pz^Ig5IuNM#3=U2ZT1;1u9W}|ymlMS03=?BMH8zdsy@UFug|POc^UDWo zOxHE?-dBSAdwZDVjviO@iG8k*bKDQ*5pHgt&OVldJhF!tlnhf5Z6&u`o(fzj(_mB+ zC~#^pLy^#7L0h?$J9KhDAI_$7PR9HyTeURQg7_1Y;G~2C3)VC-BQo19!DAY?J7-RT z?Z^e`J7$Cq7GlyXhS?Sj@^a7i$wtnvamOXCcmVwkBs`t`JN0+yLDKnQONMEL2rn-W zfe-Ta@ew@UGi&3264!N#U0JPy@n z7MU{^;TiN{9?9+A(qh)KZ5wV|CRY`jl@1?OWL8=U_(M=JbA1g(nVRbH{HuI(ruCR+)5@+fYo}SM~kb~NDAbT1&PFz58q=(ZJOHsCEKzAI_ z)wL;zn{}*h52+3{#FFAhjQ6nM9NKQ1TBBaThC~D;`-rvSQYQ+i;=(#6y7%^c=s z&e+{Ku3?C&sZDmhb6k;=B}t7QK+7nEcV>=ud}W!}Qi_tgZBAC%w8Y72u~U1M;sP`- zYvbjI4bxbuK4spnw-;{DTe#!h*~0llhotYMuZ|o6kAfXEJCm2fMp_M*8PP<Rh*JF z7k$>ERhWEkC38xzP^JlkPEtQcYp+_Xy#p~d%r{-7WNoVX)QD`J^9l;6u`NjZ?#^dG0~!~6N%Z2BZw+T>2A{1F;qr(&Uk*ZQ*OFn!WJ zPCZp&K&H#;0+t;x81O@h7-&$#tf3nUK?7)iO=xm2b{^=HQ>w}KY8+-!bzAtDX|{|- zMRR&?SwB&*%^UBSSzvEVqVh6IF^7b)=RzVYg52X$$i6w+&&tP!ytZSuS^0vTthr?_ zgXU&T%!r*Dk%#%DbU4N`KNQm#QLS=U@Y16pf)dgrtZ!c&X zFV5xqCl?f(o0*0WjL5PypKDE+K!?rT!P_UbGF7%)M;-4SsJB04EVZRffanj30_4Ap`_?_ffx@vndl(CoZ? z6=9uJchY)CK(rtP`z$O$R!tg4o0|+RR*M!^VsuJYfs>~fzMOGCqeiaM7A@E)=y!r(5R{KSvM*{ zz2so0{kFerMy^Lmjish!TyRQ$v5DZ}=8;@8Y5fGDtkABnWmS26MG}bTK3ukKRAldarL$~_0Nnh99);{7UbsE&(+)|FR!d3w!D9s zXI~_K4mq1Fo>NHZuBH4+N0CM>uCZJ^$jZ~%UE^Tx5v%5oGu1V4=*L zl0o14Y5VNj2jK^6sll2&Ee$HPBquWZTT6?rmG(MJimM@~Cr?Iv2V}eo8Mg#cs~bXY zJYwjq9`r=Bfe^CMem1VY4>2vCU65VqjP{UKW0zL$Dv-pKfaG9v#xjgJ&dKDhta*{) zZGxQ=f-FL~gLgKohb}G5om13r=)p#n=i1poK}Mj7CNolG!L%tB);niLjgB>qEGSGb z(3qu0Cr=qY$D+@sG5yVg5^&O0e0abJ^XA2k7NZsjL#9y^3bpJBY(l_$! z9`g6TxLM`i147X&FhB-^V_{cyeCfZwTz0u z3(h!E@pjV3!Z2m&Fg-BGL0YuP_DPoj-tL8YYi1yTKW`@&=>o8HmWw>|Vs?q`)BJ@O zGH0v-9*2R)H60$<3c{eFqnLpC>B}70<#=CYURG6Z@GuK*nP`1*VR3yy8ozxBk`Ifq z^sSlc9qOgxv?^}5$`;fgo%N{p9ej|NBJBi`(K$j~$M}M62hLKZ?E|(6BNh)MYiG}K z;;e?W7Q~K@(KIpPp;(He;WQReAz{g;jplbFVhDgvEjAoaTLma9_IHTIb&xG9SJ??X8 z#NDYdkDkJ&A0X&*4vZULn46hL#tVE%q>r^v(|B|9Mcn7P-roKCH53+=l>Ft`|K>ht zASP5)?6 zTgzUL*pE2=tp{E>1Wt$9Bm=PPF{8k4fZ0|BS~tWLXpaCjcd-&hPUVi*Rk)2_JFSe% zI>{~=SrTzwJ9w~{gQSdeK(=>3OA49kp8R#i+)MK9*R_O-UX$<2@^#6xs+GJW`VKh*ZmVex8pahP&vJdHkn!NGgHV`f-brt$SC@ufN<-Vuw8 za6B8#cR)9)B<`wm^d3!*&c;S*7^ipRL%Udu@C3EGEIb^DluCg{A7pHA*}s|{9-fsI z9-eJ@lCmCkKu8{U8NJRP=%E3iS+~Op}hEMPc(tFW%Y3NyjpuzCz&+qrfO2lhJilur6p*=LztD{YE3Uu>G66*4edTx>pO9oJsi zTAWqkmmN}kTLBvR{oFq0ZDAW|Q=Npi7d0f=k~cBR)NI(ITs+N8hb@eX9;{ZUL_`fX zF-hX~WzNXTo|c(8Ejw#QW=no_bWUzmQ~^-Cf;x%;YSdDVE+5n_VzI{#Igc$JX<=f! z(q`p=0vB5^?(%Xw>qhtZf?(vmh3sS6+0VdZVN%jF0I^KFyJ_aQ)cW{5OAq$5N7L6W ziYUryt2}w@;ia?ed#W&gF|9WnYW0SNFR@|7KWkWIUEeStR}+45?fO;zMLvb)qvD`J zx(Z{4R?yC{n*gEGIp>HX*dkD?qE%yTm>_t^mt~GpTZ}3(x0o|msv7U+?cMiMSOr&5 zQdtflFW@Nq(WoH+p?1t*X&{~lK~3S|ZSUwgmS$6iA1#`p5xo6fBW!&=s-(i&s+`5K zmf_XOzW!FGw^@0>xbj;lXQx3qi;c+Xsf*ac=CC|lKEORndkBjiVJbUT+|D*~k6wF< zPld0o{ETXmU`c&#;Jc{Zq?!9wu)KO)7zp0+EcdefxKIRN-L^yMMutV59cQ6+wYMYN z>=IJ9rgATn+oT1AZE2CT^i7uAPiQAf39!}_g_vtEUCqP{-YhxDljQqhsQRyt!&lpY z$b=NA(4Rr>FT9WMyCW0h=%?-OI&0&aLN}$f2!bqa`kmh4HfcdrYkRIh0~G- zC6oMQ4~c1`rJ~J{E5jCDPhrAsV^=v$!$oE&CieV zO9@({&YPro1o&nMOHFedlHv;Tq61R+W9s<9p?0ozVP5K(if|9lkY7z>%R_BlY(u<- zxFHc6-g_}9es}G8$zI;o+2|ku;bVOqK={XM zL&heJlTO$YoAB)pQnvOynR;d3zcW|QBYdtbcU<6P$TvPj!(uy{nZw|$f8#2pU--|J z@kLPRAkmk0t61rQp<~P~)|6ZL=a{I&gScU2(%{M&RU|1iC>7#xtu41+UMcJVUjktR zR2K>}uG^K`*oJZWUQJw1VX4Vpi>kY-)-IR&tFz-0Vqlb*fRk-kCzlOQESa0LRnms0 zr-kC8O{SEY;d4~*ipS`Q@{qtN}qk2dy0#*XG(r|gbW$*(4+A~Le@lWT&LpN_embo zzvbsTnP!#lEDF8QRvG5>LL9-tAlRYe+-^SYCrk?_Cyi{d)Sx+6O`Ib}M0>{ja6)LL zSEBDF_zkFfoZvEgk zw<5kep1Qe5_Y2MRh(?QPL$P4lbX5n3LU-g*Ed*#)Vd zt(MyuQL{>=x_SoDE^s^9YHa*PiFb@ie#)9sDCe<%MZ>~(_D^%$>sYgF@33L_ZE5t_ z8{YQp=iJVyArs?WlcPL5qLW+`CJuo)Wc$Z_ylN}-)^s$6AZM#n`zR;=b-F+NwKLKt zd7&JJs$DI8eia+Lbv(me;70JjbxFg_7{heoU1E|?ZHsN+zP9nm&SCCuHiSQ`Fta_3zuAzz zj@3)|jTmuk^GuJuk(;0UtYzMt2b;OOU6NxwJ!6tw2TUFu5nc}2QpXAHo6J+41B5a- zV2;6{0C7-7pKFu)a7T}mdxlCm$B8&q`efut=?CJJ@ZpD1Akh4H-XvJCR!qgt@yZkD zB`Z&UCY~@I!+FWje-V1p?3?=2aFsMD7oj_2_H%klFV2^3(;v|bpu&Y400l4#6ph|Q zD&SAaI8vE^LU*(>mg6c%EAvm#qm_}VS==`H6~zWgM-pLIKfQQXnG1~P1u68-%wR4;NH8yg1-WWO8O`Xj1Chh}5i*kmNL^P=^%uAO%xB z(PcEdP*coqDcoz46Q3NeRu{x2hf4SUdPRCvnyb!AOrRFi2?^o6X=oY+n`uIiO5PNb zmI@m%9EsPlLHnlmJ^29L@|f4bw_xdNIc0-L-CjeWdSa*v3yb4eC?ngkA*>-HD_Sjt zCk7J|k{BomnC4g}J*{t6T@=qpri8*dA&}#u(;}>`J#*bxJ3A-)_VchbHPM)uJL6}n zHWAJFfxJtgo3j%>OiWC1YKEJwwVA1jy(Na-q1;3A&w`7N5MUj6Ka4YCMyymEd!qL9 zYQbgE#YI+&HY`F5RdMggn@~tR`Y9Xf0CM5bq0fBDDn|~5ddDV&NJWS@^&w(CFe_$gSY>L zEelTb?@_t0jfc{D^s?nxkE7b=s10RnR@&?}i6|~AGGPgyk(AKZ6kkz+{@H#8F}LYr zvQYZKwnIlMg1>#XWMMz;JV!=ShZhtUo9wrFf_ph3sV%-@a6(fXaQuQhz_#<>Q6|HN z7ey$iVtw9Y-MP=TjoR>6Id%&F-DSs=*Ir9;{tkt>lyjB)@gFI3yTVR(iQDO$lvy*o zCt*5@|EMQnibfr$eN*D)HHKN8n*GvC#4MuCZ@r_H6wSYD-dQWH<#g#QV!vDZf;i5c z37JkugybZ8%FASnr@g=6n;v}^y_F}xgxO8**8AM%RSL*l3DDKq}U+?14D*J zRHR>XbaHq15Aq7DO7`{jbOZyT<+%AWJ*?Chm8#5%P;2Ulb(Ez}K?Q-o2v$ky8V>gM z(g8Ogo_CC}3@Hj!YicWUGAu+#S2tfl9q3|}Jyc`bke8QXYSNc`->08TB0H1>6vbwY z%`f)#bLki8$ySmPIXOd0lfz+3tIgqBPgn6CDT%B~kIGF190E)@0eGubyIKnVXS1($qX5wO^RMsar08M`-<^fzy-s-8p&N_!R2~+w2u{ zYsL&r$;~h^DYmvnzba#2RSZ}9=;Y`+*2GVD3YuT6VkV>;1+J6m!IXt5SG$F%)uV<8 z?*2UQ>s9ebrY%QZ1dsmVDI}LzI@z5pvgkE>4EwOkwFb5v9kbWwRDOk+Oi=tg!~q{cbYxYMosovNbmYZ zr{tQmrtxXvx$g1qGb+j&Ld;n6p$oX(+h&DPqk!RqsZoGOrf(KC78ur=iIRT|`kC?2 z?I>n7iuSOFhUar?OR{eHSgYI1EZv=4&FxJ+F~-7}MEg2ZJ!VXorl$>%0#R6Ykj7H^ z3)n)cj-o@e9$(}@yrC-=w0KEB$C%`Wc=>v`+WGP^Cb^-m0shW*zVR!74c4(b1#TA|%Jb!Q0k61Ye$HFLa}9$7%K_dAuSe zrkm4)b#q|RnhbVPV1K(?adn)2z3TOLy(L$3BP_WN)MXWRS#m{iA;c_6x^mx*?YY?Y z=h;(|zg{;=O`|Y^L>lyulwL$@9N_^fZkcjYcyPb?DBXQtFVNtER$Q4c(}K@qw0o&m$qE5nXj@V}ZI{yKJ20;tFdo z+rAombN4t!gV$NF{k1!5#tK{=;e*2Uejp#QdVA$xKa@$m022qnfB^x_0)=zu)t_k(q zvb-F`!71dsB};x8tntr?@Z#r@s!yfI6>}we>$F`Wnb21&W{|UB3`m7&{S`YH-JqhI z+Kk>oSs1ljsyzN36G`pIVw8uUs~`mVc=@&S&=tE2LVquB??7r;5|r3!Itk)#8DV40 zr$uGJC_hj!og9;^7y!)@tKI$jySe%MySfJjy1V=MXg`OMiI=C&ma<=(mviio<>7t% zDn=%rS&A(srwzwIF>U%>Hbn$iekJ{5h zgr}D`&xiPVqIAQ2ecgCI(9h4qCHwZ_wa%e7^QY+i4q@p?l^u&CF#<Im`D#3{m%&|k#Z z!QozB;a(aDLm(XqM^2A%TjjUJxyUKg;mG8wxN9@DAF7h&$2!%T;FdwPLxkajGq>q# zD#Zw}XyV6vr4{XmZOPlj-i(p1NCRz|6VJW3Z*G+K;sZDoH-Mc#)pMYk$FzA*N}rQ= zr1u{{DgMS?l4Z;$)Uq)a+P`jWxb~~auM)pI)9|?W-q6nzzIuMbDG%<_mDW{FGuJNs z?YBkiX3kjGdPSBQme1m{_%|S#ds%gu$`42s&oB=|3`-YzY*1XbLNI#@E`so4wF+Wj zP>W0*yGlox)58{Q(3F@uH!0rbwYIZuI8=eT)G-qQu+bHQE;9tB){{!mWp2`Rw+(Ht zwJ~n{_me3!EZD6eDASM1Z_|Y3l~Z9iHqt@ym%ce-TZKR!`!8OEyf>KrhiAWgE;Ois)5*#@#HlK6QLz0p!W=J({uh_As41nIYD& z_PJ*zn+#vH$v{t*j+&Y}Vwo!g$F=T8tdOiiBQALe#n*^NyYh(i!^e zXFHH7)Lw~wdM}ADYN)odGG)_BJ)FIdZ}POVp7P+F^=q}?Jev9_|7~La>bkLMEoE&N z&x~z&Xz~H=E@?4U@<_*+Q%VJ3A&^o5FLMjXJvhk$6+ov~a!=;wbiqj)N9c(W3DQbt zp6o=Mj!wL6Sh#h)gpD+pO|a;*w#*_ZOD%>4aYM=Yp(CeNQwduV0VsoHO3*B!k}7c9 zwyLc%-MU=mfONNJZ_O^3>IuZe9do$jsvu=n6tNtF?xO63(|S|{bQ`5?V8!p{hfXDW ziInrFu8Ou>N5bjU3Ye(1j%1UCR2Q{m*b>%lgYZHTZin{R3wl(Q$k$b$c(j#MfZx;x{EpY%@b84@DB&iL0!DW%h9`PB|DEiDT7 zfvvf!oULPdWzhjuKVx;b7kCL}hi8$JnB}DGjaA1JEG?iFPVrkwY{_g}wvK7g-X-Vl zDjTvrN1or*gmSoFew_~h4@t`b>sP#I@WS+vVf>r0sZ$ML_A}?$V5M^a(FFmFDRTl#G$OvH(UEo4grClMuLx_v2RGKsk_|~Z>QsLPu zwIcR?bNc7OEPyMFNw{?1WONfj1WMK0|Du1`zlqKLz63y0M2{pHa8 zK<_X)$k{OuQKg)X&f=?Uv{yRW<;<|^+L+9`ih#tbD4d>Dn~+&II51=!IhYht5fvI5 zog5oq5f&O62VAuB8{Es>N{S~f*~$^zj1m2ZaaC9~K{x;4W?-WV3@wq!g9om3oF2vW z+5X_8^sFfMk@nqCuN=eJR>Wb;MCML3TDq^lr91)AhcOXQAr7LTl}@vD$;l=BppFk& z52X7u$s^zLM-=le7*Gl5x?sJOjF9L@mfJ6Bu?@F!GXr9rr*SMnO`5j{Y27-o99N{8Lwn|5*)#9-X@+SCf2RII1Y2z?Vf1qsRAM3t{{ zkhiGu=4|96XySKqSDAN&Uw{on7RI>HVV(>z(rc1JNA25=G*O*)lXSGU>a$a-08n(+ z#vqKdL&5c)9j2cFb`SM%R~m6QQDhh~dQW4?oiB{+t;kT{-tqyqgKeWUI&aYVVTjQw z2&meR^CI@H4V&L`a^BW<_ggNNZp+?Wz4e`HC#S=bw6*fs;f-4#IJNiKH%$vK-F4T; zEhrf(#gs%C%Ik%GC;=a%3N|hsootg~daG_K@my-qLmQqDln&h9eehcWJ*8>#kQ^m973*wgb1}4GtTg^mp>!yeMkU0@`Z)S>LWob% z$r;r++)}XuPg&zcM+cpTR%cZ-?x6?nu8U1K(Ts?VOV^l;V5JU*`=B^3Zs5SUxMIZq zM&MZs@tVF`I|V*;Yj@*yyyQaRv}iNa^7%PMQ)A3bE9MV~sL*Hz#83tL0D;eK&Pkt` zn$wh%IVq(jJ1V*W3iNz5M7q_{5llK5w>;>eg^PQreb=w2-e*2}{}h&|y~PIsiC?)7 z*<93)CAKuC9oJyQsu%%aR0nB6&1dYX=KV@flK|iDs`w9$3pe_8KQHfp?u>qy5Sc=mz zES8Qj8|Y~DHO7-pO$6X%f#PleeXUFw*aYSr^=Fj6M2I6ix9UAnr^JzVJiVu4DL^Z- z7fXSCRV`E?d#re>sp#Y1H^SQ2W0X`_GdgEctYt)1l3#$e29^Stm4@Mrol)#sN?OwP zoXlH8_{>;vT}B5n)$^?TOy2d_BirgshM10CcX0pe;cA#4C_rI;Ft(<2X7$*G4-@Xe zGtvhyt*FY&3G?+gz`qyp12BJJcX|cB0L?t#1$@bXjLq7%)7GV`N+Ix&A)s=%V^UQ1yA=-GYDrVlxqKGzh;x`o-%5~!#h znLjtmGGKU`Pk@!CRVe{#bMYqODNCxix$vP%5FLe~x49TUki-R!ACo2U?m>op*H-1O zE_-e0vWX*ty@K@FmCnF?o@e~2l?)XIBx71I&hE(CCQGxLHQP5#vYYQXe%acV!8Er9 zLymDHbLuN=794L`_uS&jocw6LJqK#(VdNEz51ns;qc8Rw=)`Y&QMt2*h$UV&QGFwk z$)Qq3LT&Qd4Qa{x3{srP_hk3);xu~YmNoU(^KEMvt(rTiJ5ITy^YW`#Pnf%FTv}|p z0Vk*;-{EoaJenuA257GaZeZT7VZR@cN4Y$sdI>&o!8&uebWD`?YQKll7%uHEU)>EB ze!sLsdJ~EOWuY0bv!$miCP5WpotaYjy07*}Mw_NYq~&XBOmn6dRE;H*Ycw%wk!7Z% z&GILwO&K{oE+Zo@Yhv=!dczCG!>?f%HCzZ)=o2cz?b6&c1BcF!vsi3inU&RKzSv^i zCLy$Bd3pZGk@@AzOMv_Hk{uH*r-P_7kv6lVK$l2ZhkcndiM7vAL%)H zIb57OEM!c3?c1gP(h~VjrOQ)0FLYN*>{CXhefxl|1FY=HHrv<%n-iq|WSq3+ixo&W4Keyo1~d+i(29nux0?qBR^L*4&5b1$tR6-n1rJJ8`(K9pH+|3ie;zU?WXG%rbpiYTQyCr2ibd)y$J-%|5 zYrso^8q3%)b1y3wmmI0+zQe!%npo^d_}!uiam&-%AGOc})7Hf6mp(Y?Fo^At+JXgJ zCsSw9TJ(3%E3?d&e)jQW$vDX&pxe_ka{OJ+eZQuib zf`*kGq>o`3rG&lOhQjQ@XQNWpd|v9OJtrSU0xxv*y?O?%{dVnhs;6b_z@k|qy??bI!UReE zq!KK&5H$_$bFR)oPu(l6l)jhef#rtot=qZ;!`fY?cWGzUTWuUaq26k|CSgFUw0OMr zT@xnMS=DRelUk*f6K}tL;(*k74NK?)$!sTY$O3Z!1Hvoq$sNa$v#+H(X^@;qU9PR9 zO{u+JIwJ?jlXZE;lWC8TOk^iTH~>NiXC5ed{oJ`@xC=Xn?ygk6d$%OxJS0ZDoqJb) zKV=N?f#@1ZhfBsE6IYOPC z5ElideOz=5)!#yG|5g7?Y$8U@vIxh0!!8qpAt9qn%&AojwZ$~1-O0n96fMay;*kWE zftXD;uu14?=|VMoTD=qFP#~p8f!AKnowXFs;*IXD2EiXB_Px`Q2ai2+e&_GelFlbi zNJ8fHOl%C3IV~ftZa{N-M0jo{H9=)nhd`~0>JM%*tzE|+I=}vR(ozSUIH7(2#bkD_hi)b%`_ZMusO!Uyl9DEfJ6c+N3v0xK>Qwl7+tnXij?f+eHa<|_P4dm zF!YpRTP!S{%qqM=6QiF3p|4dNso4BCM%#Sh~AUM{e4tlsS%M zl|yGxBy>$0B;7CHi-e#x&>qAzb&#u*-WD1$ETMoDRlTAw7r|>|9LG?sJ!tRIpXHn# zM0LuzR9hEcjntC%CqY4$cMz`Ak!X1HFsZRSXKP8*Gp5eBmTMUWi|c|$n^1C@X|k!*Y9 z-45v-3=k7T1u;7@AygPhMCmFC3rf5EufGC>GN?}9;G zyR+a1a0ffNZ-!=r-7cme8}3FRV{bU98lkthw^B2t@pg{_gN+xropA>>i3U&UT@sRf zVmh~-%)!xKgxGVhF&RRWVw~TpOCk1lnuH?nG2zLkcHC>XB~KpSFFR>sYBPpd^br*> zPCx{Wssv}K9L7e3)z}JlDs~lZg*J-I%q}cfO&>FR`b1S@-Sn{ys>X?n}-S+!BQADuLMdV^}`q=rcis&xuv0?9Q)Qz70^$kEhx>Ysb{&x86W-H(y_ zA5)#cvj*2=xCFFGqIwP&J(Yx2oyI2EZ^NCTUnE)e1w*0hazXE~6GJEl7tI$Bm^@&J zs1*uJ=dSk!nMde9rvzavQ3fQN83b8$w)3bc70EhXhygI`<$*Q0pUXOp`$t(1;r>*~ z4N975=!lPQZrNfJX7Bu<_J?5#@_y2WEYQa6_{?kRGEVVFQ(?>RqHjzv~+T z@noOQW^c&}&OVfLG3TM|>DhDeW|=dO{<9b6q~h`&!=Ayft{z|6%?AMdkTo zer{gh+(+`Ab6@QCKb04k`yBpw;_Wy5&*yH)eJ;-s`*7~3osg)T4%(Ld6}da{{|^5b z5&IEb_{wAQEOYng{oEPjcjE=3ngEe^?Gl9#_sZ$O_ZupesWvayTPrur#ybvIr;Tj7kcxj?()f5 zpZ=bIZ+T`JdDhJ*EAP4au)h~lKGq1v@9+7^XPXz_2$G!Ld{b@05LXsv zePwvR@pIN?gS)Jnb*1zDV$*TM``__2^ozd()}MhP-*@w|{`0ubPQH2U{(+DqXLYFh zHtJ@)e*paNf&9Dr-tVPxBR{$IMy@w@`_H$B8{=`kYa^vU{=%;ji+=vfuMsT5t^efx z-Q&osW|vSGd1#|^cb~2=xvNiabxg}eY;%c7aFln;`*Qb zdLu-8z5i%?kUwec(Px{Fc8}3W>8t;PzsCH3I#UoDcK+nfx4-ssEA#+ONj7R}hLnx* zGPo&Wjqo*g@9kY_KYIE|iI4WAJRfU(NcX1yhV%^Ma)}Hbrt}MSYpjr~n!roze)4$W z*-GD=3Uw~m-<7ls@V7ONkWV(&7>AOlbbZO*jURP=>E7g-jrF>>W=De`EgSh<<7D|t z<4l7aeadBxbBw>Qd-rc_HFlF*8<+H?59zmZMK^Y%9HlQB?=ZyG<=ODn^)#MKde)U( zrVB^gf&67xoLhO5-)dB9ts5=j*0|jO!`Qty9BEkTf+5BqKQSimH4a7Fg5f7$Y&>Lm z@9p_}c=Q(MzrnpHEp3=H3}r0q`^v4(&2n7h<2RE(S?O=RheSVmrgz`&KJv4TXS%y| z|1#vMyX%d=(8tbFAM~9!m1kmCIF#A>uCE)v_0}GGbH6c^f5x?EpS_h-jK?mtjZ3!@ z_h!S&XPdsD*Ug64g=_rg*24e!(DK>FAJHy_71~yBp2=Po(g7!TjgT z;_t##2Z!O4%9H4L6hT|0`h(vZb!} zpyR3kr|1ItnJAx{`!9MPh$SpV0Y1ee+|x!VjIfApoh3om|JnX8JV#$ z?P^Td8*u3@J*sQ^qq6uT{~N(JPS+Tp8-@L++#qv&fxFWUbD)0PX8-u^rRo; z@4KMM-!uzdU%IzjneXM9&1OC0-~4y8_3wxMbNV9S)c5z{{{6o4m1b|1syXo1AvbUA z8AtZgxfpCmZe*X-Gmi21b#^uF3^TLh`fC18+7g4JY5%Tz*sGvFC;1*K>R`xcBzF`SlvTLHHf_ky1;CjW}cZNn@W|dza7a!pq;@IG%j@#_ru*?Yn*b zkABG8W@rA7B4Cs_=JJ~)6=cDZ~3v=YV=o&-u(Z6beGT0_Wi5*maojV z{i`r^@n<{#J#cOY%53+Zq5eu)80aViOuz~@y#LQUXGi?cr2E%WHKry08l?aIv5jeo z|NXi7>p76WzRCIRuP1fu5J(c;HHK)W;;z!Yn_4L@(-ar%;-X?SA(}Akz@kce1lKcg z!CB_QS>aXjocaq*qXy@>3ol~D3iby}I)n?{P0}e`XK=la3!0y#ONh@2F9@%wRO*xJ z$5p&$oMr;H(E48UJvKBlPkJcnsiYT^-b}iX^hMH-Ngc^TvSqS!vUhR_O^tsXC*PeC z-&OBdA5ddo7L^?!y(qjSyezyzp#fH%rd~5%GeOg!nW$N!S*^KK^Qh)qK)*)!O-)9* zBx`VCyie|n3)FTp#+hWuEt9ixmEanNs}|Q}T+O%^;#!3Zvg_oXxRA4CPyw5L_?OV-Y2Wb9x{ULBj1sk;H zoz|;-GN~SC7}EPGdKcfNclCbden7b&qIZ=!4{2ypIv=3ir{SK9*e@iNBR0LCQtn^E zeIWM0rgzYDNdo~7zJN#X2bB9EdPf`bWxwKC-i`O+~WF@`x4$9qG zxw|O$waR@P?vL>u^j-Lf-o?Jky`OS-SMDCl-BYaz`U*K0 z(ayd>EBl5Il3M|N51_9CoNWG*bW){8KF0GtxYpyk2WcLW6KS6W^h7{URGIK=kmlpS z>Iqz@a9u*W@5-~#I%c75%u*jh-c1m$)?T#R_@airiqcpky^C35!_u5a;v1z`3F0D+VkjZukdI2SNYfYH~6>sxB2t@d;I(S2mD9;Mg9~1GxWsI`7ih{ z`LFn|`EU4d`S1D5{1v`Km?bO~?i0Qez7`pgRr;o@=!QP|zW9N70X=h{zHe$YrWk4S z@E?THc9d!vM%-PhBdTcCeb_NAoivh0)ljmUY*tm0t=NWo9LC%Os>w_c6Rn!Y#Hc)kl&1EZ8huKPYnCc{3#ZFT_!C7%u*!?|{i&VXi zQ#@W)y@8W4uBzVTIykNBEn0S}cX(?Y2>UMYfKlSSpcd4s_XLfgQN1si3+Ad11V?PX zdqId7;#D6Bi9(|4V*!()>Y|V*p>IZ zMT#_~5s@OLNYjXjlu}A*jEIp^L`ow?M2eJBO1YGBDIy|r5hF!PDMgADF(M*TiWvXD zway?~?fu_c_Gi|7v-acc$6D*`Gjqr(4drJr?~b(wOj^)J@HDE+OkTmPmEut}R#vTQfoZdS5wU$HeQIksuG z=}Mk$mTi_&Xj^Q1SaI1NwLPj7*_PRUtQ6ZGvpuG`ZBN>sQarYw+n!PUwwG)#DIwcm zY=2S0N+eeO&nY}TRvq2@c~W>*bZ>ZHcz(D&ye!-qULW2P-WlHaQA_KhpGe`Z8-It- zgfE1zMzly$BqNd)DT;)`$HQl$>;73AT^A{j9{TJQsfk|stQ1T9tQ6fG-5eQ1^7Eva zGrGe}k*4c(<2^DZGBbMYU#G~N=;`R`$im1HdjCW^AN^E{$>^1sE0!KT6j?!Q6}=K! z6WJKq7TFy+5IGV#895iZ6zPdtqbbpTNO@6DG#0IlHblooo1@dBtF6D$Lq3 z9P9kYR@YpQ>+^}(izR+E8|L_KjKb`v*}J_l8FR*5*IPtAC-nCmA}EPy@- zViRIzcvZ(nBUXGJuR-xx5yQUhIWY-4vscBP;*W|=v?@apF`iQ9Ds#o3m37MV;)HTk zIVyUTW6Im&L*;$hUrCcu8C9YbJ(QU2kdG?k<&Wf3$^^MV{zCbt{H5Hid`tdaEl|F# zx>T3)lp0V2$~twX`fcTD^i z_7r~9xm)X&soF*DqI6K4k!kwh^(!*nz(1KmQAOTtBpFGvui-Ep@)o17(O32}`s064 zzSRgDVcDM|j2vJLF^1r`GKU+(Wu{SU)XFTQ(P)&}6m4XVaoTuY7FvRqpd4svur$ab ziaD~_GS)IymRKfOCP=rX$rm@(`E~0k>sUG6dWZFH`EBb=Ym1y~z1MoL{I2yo)*s6G*2UJv@*!)7b*Wrz zU2a`2AF)1eeOz`}S6P25AGNNwu8}{o?y>HX%dH<+Kah{v>^8exK~YOSZmYIc%ayio z+Gfa~*k;;h%AeZqx7{y0DQd|lC~C>oww1P@$ThZ3Tc=!W`>pMF@+sSH+itnu_MGiG z`E%Rzw&&#r+kV@A`Hby=?SR~9>#}vpU)cU&`-9wMd)4-;{H5)f?U>wbJ8nBJf0gjZ zgg?qHu)6Qyg=-QqR8+$P$A}4H7|vMFh&r6Bo)eFY=V5=Fm75fYcu&cI6~0easEk+U zDQ(K_xMFxrnM~&}TroTeo16oS{D-oME%K1emW9eCoYjWn?_o$Oa=4rxj+u0^PH^5dG1l!0IOXmzf4vuzd=@uQooVUq1^Loft*jaD^IIMYE-_ima0|q zT{UkTwt5q+`B^oetXXY=HJ?y_puVR54Zm&-n^rs2cQjpnR7=y+)peQ^ zzaYMz>|6c0R-hHA8#I^ZQlHU^wPJOn=FvRrFX+swZqkBUgZfMDYuYUJMe+pouiDSG z-RgVVVeQXaZ|xLPAMyNo4R zXj$YxS}xpYfR+yr8KMm}h8m-^8sn7lx;CDCKx={zG-_Y9e8DnS`wDJs^d{>EHlcr& z&bay=WWo9*vS9sATb3yg^w>_hO$F|wF zS^u7Gt8J^kz_!h{P5(YTVW+;3JV9@>?X~UIAGEz_dr@z<9kd^?b?J+3M{Gy* zM{Iwx{Ymezov^*8KT6iG{}`6vf!73%yU&Q*#V?d}u^v`FS4m;}zK`vD0o!*6+xH5# z?}M=CLghSJw!D=sJHVD5WXrxyPKV7-V9TD!mOT@ey$*kqO+U&u{bRQ2$JnOVkxi=! zWYcOdvT5~ZvS~GkZQ9Q^9b}uXhfSYQSFkm2QQtuNE!*@Cw&~w#7R{!1X^C2I^$*%j zT8jEATlt@0lNsutwSKVm6Kv~$Wm`WDTOX*t&bIzH*t#Fr`DEqY+Gy>InxTD38>?Bh zaoSh41hRWAMVqZXh&m5z9a<^*fL2aEfYN)lecDuQzxI;$4ee#5Z)vZ?h0MC;hY^>-}}7 z_BeL`1==dzrMtAB>Lq%K)~S1Rul9r<(IeW=^q3yg*04uC36ILuo-%TbJndQbtG)26 z(b`MKmyFxA*Nm?lQ?=8^H;iv;XK=;+J?#x#buZH1G*%ibwe!X*W0m%{@r3b&cER|W zu||8xSZ}P?-ZeHF8?{Tu4r7P*9(Kg9YnRzS--mxr(XLshTJF~SSY}#g>Ni_jEG>Fp z_~%;v7Rx%z(|Xvl$+Ah0S~goY>oM|AeURm}bvQh&8veGy`Wx#u!)ATf`kc{=yv|4> zuQP6i*IhRHTmNBw-*8$#w0>x0lkXWh(Foe6+NK&Y?2cy`gW!!180EI_*cKRdwufv>jM4DDCycSQFE;KZ ze>0|#zZuiuZ##`|kdGOy5A9ce>l@cDV!YGIzCmw0pw!_S}gkeMWxF|2dL-vOnhD>Ym}A z?Oxzs>|X9(?cU(t`cX@J{YdUzB>#2tEwA6p)-%H+6 z-tqo%{&keRlg#(eFaNg|ywfqpJTrM`U8jHZeY1bFcfKF;b0&`;$=goyUnTD{l<4%X zhYij9KOJS2cZ$LLz3S6^NxlqUmaoVc@|F8)d}Dk~zA3&b@cK&M zOy3;eLf;bK3f~&vM&CByZr=gl5#LGQIo~B;&-Dnw@7E*2b+7%T-+bD_O`E;$rysY; z>(+F`I(WrDTIu+@d~7w$`ML5LGY~^8pt))AFGc-TA1g5qInB^cW8c|}GHv*aN>RCW zT$0AJ1Fu8=W3bfIc%AoO(U*r0{->#=vt)hAmXe(%`%1b>j+dOd-d@QvlRh*5XgHoq zE)d1qO0LG*Zlq{=WZCCQZY{FRJ;t5n&TwbBi`*f1xx2w0h=w+Ng$Ssb>|}JRbR2 zip~4yk7QBg`y`Dp!W1iEV^PVeSNtzoi zdegyYKApTyuPbulvy(RvUGZ5deEB~~-m=&=^+9ST`lusw`R`1qGshOfl(IoG#>wn#NkDdNkDN+@w`gopqzxkKhdg1gFaNg|d}AY{d=n$1%;cNun~HVMpyX>onrrgMrEgI*>t82dN3nUb6Q)Ct;-%<1Z zv!(A8N_0o&|8w$P_D}K)B!|DhzrgSFm-?&xqx|Fj37^tTif)Xo_fL-=i0<~!^3RWM zi>`=yK1YiEoB#eV^ENK_ZzX>_rRWl*h1XNe8tw5f^LIwiMK1W)`?vUaM)pNl`1jE{ z+~4It?my$d;J+Hs0!e|4KvtkA5DJtBY64>dO@S$anSnWhg@Gl36@fK@je%`}-GKvv zBY~5FbAd~N9(=u%66_bu3wnaFU}dl&I4;;6oEB^i&I`5$mj+h_*9A8RcLeta4+W0} zPe+c24@TAp&j+uBWGFF|9&(0Up+KlCR2>=}nh=^Cnh}~ES`bg~x^`hNp&G!gIrm!X4q2;kDsS;qBo)*d=sD zj>qQV>~;p{m8-EGhy_XTp)B}A==wf~-+2!cYrC=2i0m`>3fIr~)Q)+6y>Yhu=$vMr z&En@SI!}H2taSY>L+2LroWkdduIqb(kDnzd#z#+b{JtKm5pm5ZdOb!WUSd3);xX)# zaVat?9xHCd2#x}u;n(bSk?G`Tale8G#ZE`szzY69Ep!*jinSWi}k}R4?Y@$mi%ZOTAhY@o`?PD(%7oly4YsS)85!2MEldm4@a!= zRJ(V$_qq>_SU6%0B~JkVGt(OPF=F>=_j&h~xL$e^nHGA|k(?eEl|%W_o(Z1Go*ACm zo&}!8p5-Ivpas&GLC*11e=48ywt7~3Hh8v<*yh;F(_Z8sX=PUJ9)zwh) zjq;86O~U(h-z?w!>VDPzeC@tvzD~Ta$7>5|%;ekW>+&7DB2*U|8=6QO zyjrL%jxx7?T4+(IBeXKK7PP6lxo%2mJ6?N22SZ0gr|PCu&%Ag3&da7Pmy$;`Rw}-33qwp>F`0ymsKsVid2h9r4zs~Wz_V6<3 zJMmiI*b?5-*iyZhz8epB(f8cddmHD{H{FeMDb=sy@4-Wnne<(F{VM!j#ByOI6e*9? zM8-s#=o{w9oXEoZRU>6&Nn{0mBaHvKES~BvMYfG}MRwE6{Em2JAaV|`OLa*2uDG)9 zQq)QuO~DJ_853Vm^!;(&rI9Xthumy_i#&1yCHgLz-Wx`n3EwEkzgecgN6w~%di)+; zXTEKhM^yhysbIfD)kIy&gopl;CRx;d0;!0_pVrJ}LBvAZ+@$x+(B zvAb?zX#w?^Qe&jlhrdfpt4c>**OiWMtZUd+I;nJeV_j)i>Ao6!X?y7`@O-@5OP7^) zHr9dGlf02ix0LRrzfgK#X&3U3<8|gn{wE3TUl^QGdUbHdU~O;`WY*v;*u)Y_gNu+t zgUdlRgU1YR8a$e$KMasZM>ca zA2Dh0$(pHy&y_VmUMj0Biw*86vzDdQOr=!T&wK~vm3gjnJTF#O2|emHjxC#4Hg2e+ zthsC&xOHek+3^6m9Y8*~0IseDiQ!H<d-SoFATj}p;aVRWK?8T6jg*O z$}4Ir##A&_OsSYzF{ff7(vpf56>BOsR&1--U2&k|NX5yDa}}2=dWKntr3~wblsC*X zEH3xuJ3^-ih~D9)yhL;SF46hhoH+<~yiNmK3Zy7#!_@dz*!&eSpJABjd?ZfvBKZyTt z^wjX~;g_pKRYH}cs()2Mm9MI_s;X*K)%dDORnx0xRn4zzuUb~siT~eoOV!S*eN|mm z$E(g%U8uS`LK~4ZB4b3>h@ugp5#=LlMvNKJg#Qn8CU#ggw5z~gU`pLgGfE=HA#NZt zY{b5D;fN*JJ+8oOBW8T}hy#r)M;xh57;zGNxl1E@s;$*2)%|K6jZ>@huscFa)t>5D z@6BRbagN$Ba_h)lBXO1) zd3NN9#>b4>6t~*e7r0!(hx%#Dbml_t-_0(JIQ*cJktM|aiDjO!$kE?IMYg~PE z{WS8R`c~|0=GC_`V^7mKwSHawX2h|*^@r+@)t|0EUw@@>Zi8${WG{9$xEcZtWewHX zTTEz}+%N;ax}afk!}5mJ4I3J^(%GUdTrVkg~#oz)_`qQIVYQ{(nVI)fi=JcYCH(v_N3<)yfJ_p7AbWGKdh=22^2gBh5_U-ZN7axPp+O36+2E4+pN65a2n$Oc(hwx= zVlt${xps}lVwcw-Ej%u}Tm;7dP0W(|oXBJB%_Fj_mC$Iy0cqE025cfkEbfHFtUy}1 z4|cT*+?#uD6B=_3BxZ=^t&mn8OK)x=AwE|$cKqMUkO^F}x2RxE5u{zc4btHL*hCsQ zh5Km}VanBLq|zLhLMxRdGDxrNgtT#=Z9GOB_fzNglZ6Ypc=iU;D$-ffy4y8ck=|ku zWD@r?iD%NTz7LH>+ze?G#gKMo6J&2*U%NVnxi{oZaVw%#=q(x`6Zq)3sSs>e@z4N{ zLTnRWNaRD}|BPl%Wl2ZD{~yhIS_{d2wyUGT3EV3w=va1nfi+|ay?As&^aTqZ9gYC* z&9kcV{MdMnY;56nt)6qKt|sWXHW7hTdAtccE}h3>SIJH^?q@HaE4y$*W7lF_D~~w{ z*;|mbt6zlFxrLipCh@A>BsA#pf7C&0+0M1S02KTQnKTlH2kP`^)~D~j}aFjt>GU;n=F>p#$cAj0|& z^&g6ezF2=)L~-wZhZuzW?^lY!`YL^u7)E#Ci%Q&sze!Z#?)zVh2Hb(aQ#9(k^j%`K z{;d9-_@e&2zF&;R-T1GH@wf~Bw73&@-oGVg&>i>UUfgqkS=^_;kEd5=8&1P5<`@AZ zDB6uL7|mj_ai=j=bm9)4Z-}+V4C5a0l+j{*TRd&tYs?ZqH|E0Fo-q~}-xr&Vhm42B zX5(SwVeuAVhitW~h2w^>L!Mr1hoAF%Gt$4DjKUQc6R>&#(=$Tlj0a&M8%ypig z2MwNnK*>TplN6wPpr9G34@8+Fy$G{kj1~3qiduO^t-PXEtmPy;k8_uP7oM)c^Ag}` zSY4G@Hv_ACpUBZ?>$63Me!qS{c#b{?mhpi8fJoKnV(n9S?NhP#^TFTKzX$ySeF2`3 z`aZ0|%GO|IYp}u^7K<`G+4L~R@`(NjZ0^VU3LNWk{c%{*O4x_WmZ8Ejo)$@XLSsEF z5>IJ}L_DYQ3^W_{jnMo8c4K3^>BV+qVY|^`H_r-Pe-74@2Xz+{&7*BYJbb7`^B;zTMH=*y=yF~$>_IL~9 zdRu=Rd;wON&Q_QVD|{a#x&~Ve!WJ`yVPqNEA|Lmc<%pq1u8{{#zEL1DjY7i(9%u{{ zgYi69k%$?^MhP@-Sg@ZhI2RT?7SG7tX51$F8{>>GgU1`=!M7W?gC`ghM5*x=<0~R+ zG{M#-Y#mS7;n}b$qJ-{T6Mfn4%h~Rou=^I_!qX|=77s?KNk4U29aUi2rts%MdyUg`lj_w zag(*%+ARvLZ&}|0pSPX|zYQO|iG56GA5+=KH2Bz$*~e5_C9A|u>}f&vv_$r_685y4 zIObW{yHxfrJbMW5N@MSGvUioUcNMaCW#CD?9OROZMcBvObcZ1{G~9X3YdIM>UZmRgUQo${T}@ucqTQ?qBslTjup(Od0t{>J9gUP$3{wm_P>0?qqq5lO; zo~G-s>92vw-=uz8e;rI-m&#seXRp)Q>vGuZEbMhT>~*Q^b$0f;0qk|EA@QVGfPK%# zzL)(m-y6ujcMJQTjeW0-eQyx^o{fDkWZ}&vhP{g_f$*B5)xtdK$Shv${uL^uO67d9@v{b(8?ZY zum@V%10B|9tj`FA{m`&(vThOvdt(B7V=jARK6_&>dt*L(V;*~>!QQB|H>yJInQkl% zT@$oBc}9rP6(Z#;pi4wOk6ZeNE--DjoCs!3YhJU=YIr@s=~6)pljslOB2fqtp- z?|N5gTbSxT3fh8RtJwO<(57$#bSn%sv>}Wu6IqW|)`hy_v?jD0G#tDKBDnTA2Qf+!>)c@p98b zGetU&)XMu^gRim-fejY^(Uk^f0@AMC4r%Z%wzsGPYrIF-xJ+-}@Aei^=&f8=Eeg#AO$9B$3ad(9PgfA% z41sb$O=w#cgF)j%W1*9vv7tKXeg+yHsz6V3KKuv1xgUdEI@+ip%HbK_i}-G<3G@Pp zbB)0tzK2|Ro$dwIVWw4FQ;1+S^;G>LsFFwzfy$X;pi-s+)QALkAQxqV0n({8AP?sj zgNm5EpaLSX4U|J9X?C5g`w0lE2wfH^g9&W~9Yo51gX~OWK#5F2kd@1!A3?N)x=|1S zgq*-Nrc0nJOh49|0+;*~(B4YuIL^thfB0UYJ3wA*f$l7q`vr8T1Mpg9J#=hg%1a=& zF#0tFI*Hkh`0Y zF$%=?XW&{fu$^=U)n)rL9s#la8L+7U+n?46Lch^J8qr!J<#rHiVU#mKtC;Qstsqjj zfR-`62ONCcd$i-6d6wSgeET@7{`z%uH=)T!O$J|VoqfR=%^QSDrfg}`PX<+Lw@hWd;FdLwHW0R=vI3%r}Dpqj(eQf zfLfvJ^kPnx`#~$co6w%F-sNBJUnNwFAAMdyeYR97e%!-^NT`;pv&~%XmvWzfg?BD; z52D;MF4qUd)~VxfKU=5uix_VkW=pj;sxRW+f~~0WlJcH^zJEVv3+m8YZY3V>YssD_M(+({sm}-jy8pAXtW-axGrqNKhZzgTvgob zO=E+n_}O-OjZE9^hHfm^fW`Vp`^RBkXhdU3ryK^s!%_DkQ*Gb#L)MEZx& zl@n=~6&-iGRba+{q<-07hI~~!_falv$jpVUtu}KF%&}Qlfp7T{D`6Y(XFseKtx(+n zw@kextA~A(tbKx*xQfb^irg#k32=E3M*;9l>m( z1`)5NS|x<<5bN9^LX$$`A_yb&USvYd_MT^I24S60_t&5^ zAJHi$^v!#MNO>7_j7UuoBD{eK^Bi7Bq&0xnFx?62WWt<;S2E#f;pIf?M$poaXbF+L z1-f=7tZ#S`Qz4Evlj$kYbSBt=cWM~kqwWDsCY|~}AdCR>X9YDe4FHX2N&<~#f^~XF z6RGn+=q+dgsD_C4X4rXyWHof9OlZN&RxEv>0P7H`y==u=8K{VKI>z8FU|I;uA<|%B zUMG=y0K_e;?}0K%hy9xHI+&;xyEh#=8z_Zz;vfk924U{Jm}|^Eti)?@4O}mHr3(>L z^yvwEdb}EPvGQJIf?9e4o@-3!gz#M98V_hqo=dJh$bAF43#1eO0CjV@DCjH`*3xsD ziOQX1`X=Z&k=h@0lt^OFBYyrp-A2?jTbB2Hhy84?uN9T%(%HVOQ^|bR9-} z@F;T!g|lCL2PNQ*9=1Q_5ArcjLGf9%*8|EaJ^{L@&i53#a2}MsK?Sa*&|#16$#J!T z-T^sX^WwUGu2#?>=rUZ>K);uddmJ9R^11|Fx(nwN1-m3qimL(Z`&-$Gdm#@%cN99! z6$2?C;qu^UzgFkFuetKfy6!8kejr?bxG%YIH7icYweAb zu&2viP`CnA1U+p+gZ2}#S8||W8SHro&Jb*WbTv`3lXB^-R>Gb~S7W{^u7RjfvVll1 z1Fd5UA-9An3?2KHJ{CGB>%IbAJ5vm_h{=!KBqr1?F}=qpN=sUOF0?!nI`$qN-d!@C z%OMVxu=mjZw1mBf`iwmv^#w=s!b?G=$ZhbHgUUfQtn-1m_f*$w_8A^p!qHp9SW6H) zP@@vLONi9JLYK!n>OmG0Mp)9nIn5g$jOmwsaCd_2~Agg(0$ zGNDY-RU&17&xoSSMTm7uC3M^?{6dc?y2QC?ujm5T*aqrm8U#Wg(eilEX{NoPlT44m z>e`v^2DK9DoygtCy8A(Un9!r5T}(K~7wsTY&VaTRAzEl?rHE}v=>ox0a5P-^6m4d* zfHpF{4qDH3(et9UM6??zTFtq1j8#kuXa&=85L+18Ur`6^V4WpwFA_1LXhBh%Icr7c zT+wmoQ#rL1G?z$F|7H^@uY!1#3eCYx&c%_6rgJX(Ry39A6lgM$3VSYU=5nz1q9)e; z)N*I>6(7z$>XVkX*u*ki&!}rHZN=xx5T8j`d}eTGTz9Gr=Ph-ejuh`J#uX`AnO?lV_z>t1*7d~)iV>;R6_ydj2TQG> zcD-xhQSQrXOKb72vh=uaKkFzLCJ?Wd_8SndmX-kG)nek+G7KDT z+8~_ij1-J$%HS&4#y-8v^fL7hjALT)WE>+1U2~L1bP?s6qSWWNkvrZ;b3nSWr5&Jm zL8AwwS8^I`yn*|2E9%w|sSU`jDy7+|McoSDB+wnum6g(L+=jly1|@;Mirf&+DyrIy zTwiG0}G z0JMup-v(kE$M5!ax!_CC?F4OMdJeRSNc|ts1|ls0TE{x{s%USi54m5pv=(hG#a@7V zTC};e6qKxYnZAG#-CMMtJ}c0MK)06bcB9#2GM0IBmB1@#YtEKItknXYCo#E}nz8Ks;;OWf0Gr;RW%m8Py=3 zH6sk-Su@H&JZt#Iu?xE^nrEU;BDobdd5K7Q7sPQ*xs0Ax#STOFcT3yAQ?bn;=mwq$ zj|W|_+&Sv&R z;Ii06jPfb;en$+}Dc6A(MNh_c3+NMd`2c;I<7teG@hvzG?octD;Q0rJqa@Xl8F71``0fz564)E z5oLrCZPgUy`lyDAs8Ucug#NV-6w$?K1=jAm7TgcI8D4RP%l!qEO)w&;>@6`XOVk2LN@s~&>?)n zVbX!fHqdF**oXa>s?ssoPT>n}t^@c)M^#T)M!2@p_g3l|ORH;h%v1_WlTQ@#kj^dQ6m(NpxktjFGO*ObS2?Z&^?#~my2T9-DoebXeB5Yb+foGeBafd z>07ALH*^uXGePONE;lJ9G#T_w)J-ay2pWhQ30&iTlrusU|7Iaq7FB@$Y-ua(p-%(U zW0uy!tKlS2mt{oZWqj|!xh{K%R*TA=4N?3%0Xj|JTd5~4j~AXSgw?U`6d#w$ouKcn z)K@Kc79MkzfR-X69pN6}ykFSm>WkSujB*EksLQlJguZBhfNpQd33?fQ*&UkCbZ_C# zfrvRcci&sMeIWb;)LOVTGzs)LYHSXkXI)$2M&B~fk5O*@K$?RU$UPK13tEcft}I;5 zxvhn(LIqesoON*(id8j!09wX`Ggx5<5q@p9t8g**nObQJ?n2#%F)s`06K3sMHhi)dm1u|(mKJ8Q0zD9cC)C^jMQI_Vy$%WHEH|tjm zr}Uw<9BBy^PAVj8x*6pr_kpdU+#Q9@1F82^W8ACnD8WcCxR!pwELU(Py$96W$S=4Q+z!(8 zcM2|~9{>%t+)>a?pPYc23r@Lcyj1rz=UNO|a5(*NTz4oPkwMNidKVm^D|-1Q&_24N z$FErSF4#j?^z!SL%>}y(FsH2BL09zh8_3;8SM;2_yA_pG|;;AF{pb*f2CkeI((n*FevEs!{6jq(8_d-O~F2*V7dEH zoR<3F`wI3E1xx6&0#Iu~hkqJs3`UK`>5HIy471UehW;r(#W9wrVRn^H(3~`kjdioq zHh`AmxGib0V%E(_+Z3m1Y19hUn36`vSdDp}gm2sU&W3`C={ca4n8^ugm}ih$FfMH& zhjo>p3 zRrw{(g8}j*R22njdN+;gkwEwoY9){ucf(=tLyLNUvW%?ZZ~w7 z@~=V%yDunm%rGgx+ffY?`YZWo9kk}K`~1@mvWO%jKmVlP3W9&+A9rA`)C$WT`6u#^ zGBxKP&OZjC8b=&-HbBeyT@E{lj(aelY?pEmp26`Fgc2ck8B+z!pI8Xv;tD^xl8g)0P!Xo4sKpRu%#dYgbF)wlq zdcW3J18P9at5Z8b-$dP2o(Z7mP-8{v0?;1NvQ#QZ)R7tk{T?+Ir_!uVL5;T5lUOa> z6O+F%72{Bz(5K)R+SY*1SbFEr%EvfBiTN%0?V#tOn~~qjl$bv=e-_4u`+xFVQWMbL zTWDoQ>JsR%g89=@VD02l`BPG0?W~)Vf@3g^PuU29=jD%0*#W{GpZTM`)E?;?QsBv4 zt|ny_T7i}1SEb;%e2j|JqIhmu3hbH7#Zo54>xNP=yWEN|1+zxE1uIi9yL^lse1~AR z=S;~1{SOALtFt zsYppgD=!;r-nA6TB=fE$Uj(6*yh|w=Hr=UqrX2YMaUoxBNj3N_9qAByYFC1YNw zFL}q3;mKU?NOC9WnEpy$SMo~GDMRKROkR%T-fUR&_UEB4&OIaY4ip^#jkC1o?JGHr zazDUv_u$h$>Q#E)uH>1Z|H09AB$L%0f^J)KL7cWE=b$cjL3x|VleO=HHZY9?@l}k5 z-EbaX#c2CMos?@}Z zUe-<5K#1dc{cpO0a_3O4uYCd4MY;5wu;yfmc_}whw1rpXCEY~pwgTl6>=U5F9cp=o zeKhEYAZf2gZbWO!?Xj0Z_af-3JpkQPpv!g_Xe8(&_X>Bd<({{vqg)&4oE?^^{Tg(} zPL{X}bc%b`4mxqudFW2*{c?}xoqzuk!%iRq+r97UqFb5v5EQW4=E@Fi8Rp{pC zz`~S=p_`Yx0`zs6l`|)2K4=Mab8`_v#7?CrXLhaz+AsY%tvPc*zlUyC&IKH8EXuXe zah0W@8GPJ!(6n5{4P`rMN^Uu58E8^&2-F`mF}Dab88jg`3+?SyUd$PnbG?=Exkab*!`bk0@i?g2H>ag_%_H92QM-vCwR90wgluPSoJgMJ4p%NYgQ1B&HTft~|} za!NtJQj2mTIT#yLfJUUOh0c?MImK_U7v&V?U{00IpaSZZ%+|Yda>zQFoJ_d0E2kfk z{wOd110{HX!%AMeV(L^X^`y{pePCd9SuGfYv?DJJ+9 zt|h1(cKX?ei7dlGhgesS+yg|~HRyJ;4iU4@P9_&@eiG$LF}P+#b{)0q6ooy~;V$ZlnFgJv=nf_j*!6^_qF zAJ9bBQMn0BxVFz8$AofyGMEs3vTJxIv0B+xq|-NmDmZtL-j#lt3D@!20Ve9Rhl!3` z#FT?_JZlE7)U$a#@QcD-*?l<|bDzG0NP8DLUTLEM#A|6}B6rR8R`mFUhdz-WtBc`kEs;t$y)p1&teIA6!pLUXK+>YEO zS<9fKadc$S-r{f2EzRBw8b-OQXE6eJEZU!ipg$Rt# zP^z**If&@==|`52`l7u6T}e(}oLpJ(3hhC?%fVizb|AMO{2RRoWniZ16NjoS2Rzs$ zdln+F+5}x<))Ek{n;8#iHnc26DSXCQl_jz$@;9O7IXr9lTp;tJWhZJ7owuw6VSO{t zS!flA&RA$p#~JOJr!o;m)xLU0=7~JSUF=J`GV#d}+WQc>M>3CtevjOvd5F7owVip` zQb)NccgTX(XF6cPoYM7v=03|Q&<@LYGxuaq2VFtAT^7uNf_2N>k)08zZJFqwf_2Me z`@=q`D|4%H1v;Y52Ic@YMr3Zx!dU~KR&-^qH%`XeTWcIaU$B?TTy5-%_j#4E8-%!# zxx(0na#f&Z2Ids>N@j<#74%=wEykU4s&c|Op4papElvwFv6kG^`38*~`=-pfmcAhD zvomK~5S7)}P`A|>7pIxV7|_pGb5QDpiWRAa}j71$dfr21fR|<%4|U^ zWuSu0snlmsPUb|=3G~^iQxA@U`sr91`6wtuF9*>|JMhiFX+w6s2s(T^otdc9$h$#S zJp;Kv0O9FZ&=1iv9@xP+Xw9W|)ojJtqg{Xw$8cWNx}n>otOna1l~$)?Az0@bh%5#70m z3Gv>!i-?|0adJc_+i-5v$X=|VEx1=ymG^@-;r>i~uA)zIZg6e`eHXOOxe>>O*E;zu zN#B?_*K*7E8tu;2OuMX=&Q;DeDEDpXI$4*ZzUW+ud|XkXui6kdHgSjP!W! zLCz)Lr(Uhlrr@}L!ZDUPvC?uLbjzKqK)>N!td@EYXqkr9QYTv4oE>~z>`R=B`M8fj z*XG30=<{r6JL_(TZjo~{biW2IaJGRQ$X&>}*qJ-$GkpOxmq^==aLgY{z5-pfJODZZs+5>#iBF@P<@m0fKjU$hsy&&Kya-Itg;h3D7+b>L*8o za5d)4kQKnP&c0fch=g-wK&51ha>v@@tmcNXl&H za+WW#=0_~=BdNZ@@+?X5J(9{-m}jzVflT6-lgwW&5cjbDn=FTrRPZ$-WHZY}kiGbK z5A6I41NdKANiPDNyNqQa%XXGDcDwR(lJW_b9VFGam|tPOljZ#^UnePoB&C%#B_y?* zSWk1P36(WG8)^lye3o;+LsFsgX*}m?A_wdcG-D3og-jPokg1|Sq+N-Tlr#gWBAGZo z7Fvrm{zZs1);pLT{HqJeLPKsc|C&Tv{5ikpp|Q)SNs2?v>$yxd^GcHHmq{x3vYzKh zoMO%QA$#%Da&{$!**v<9a#gky#ZRmagG}SGrwM~KI!W`vQW8il2=?lMY*zpOY)kk3B<~;SkEgc8ktjB&*N3T2u>Aj z+k)6G|3XsZF)CjrRvoP28CL3um4_j1@l}2SoE)Fqo02$?R(eo{py%OaLE##ElAG(kN~6Et22mvjg_=Sr4r zhYDF{D$l%w?Zm-7NfR3MY5a6!8fzTvO%B%J|Di>0dR)(TtNsxhyGnhw3yKEG>`e~- z#TW-$rh{+0bMTs`vS+0V8oNXIsFw2Ikf~fNjqT7OXm0Tf9+1f#H&TU#Oa7H5=^cV* zBb7bEE+&v(Zih^c_hB&SlARsGyP6prWW|A&;&EWC|5|MdPr(_FwbRn zvm6DPz#eT^zsApgHJ$aVSik8Lo<`gHgA`kz=>>WcKI5(S3H`TXA@KQhTg&B za`17J*-Dd5YmwB?RE`*Stq<6)>?Nu4nI&#HH0KVs3J1@;gCnhj`;aWCWU3%9w##=| zzRi-`QO*#vr>6-!*uf=}c|PsZJXez5F3H-{c%&)jD3o=iSIKAX@>e7kj#o-w)?{+- z4@vTjrSrJ#lA?!QyuoECCfL>Qu$%{(%)gbCEGkK(oF^&Cv+N2*47)NFQWqLzvKS6& zmn9^{n~+K7IY?@xQGN=U5cjx|V4I+LWmk7X_7WWZl=n!ZXpqV5%XZ~S)=VU+Qv6Ic zEkXX0G^YLYlhA4GM`@hvV0OgUg8Vd@%dYV|PB&oKs+|80!$FA;S z$@ZomWj;(&bFoZi%|(_xGb)c!UC)}|vb;c2t6=^vOSU)lHDXCCYggG0)mNFHC#lg$ z?Hb##N|v7#k4`j8c69@5l9}1w)UOcBF(eh!O1Cl(V#yIzS;NeIQ|KtY_;q{+V-MB&iOC#4exNJQvVx%eW_+Gm}KQvW_K1OS`g< zIm+xKsj;2QFN2dgZrkOUEw{R9Kg(WB8!M6uZ!XVrwX!ayH>_! zrm!Z+OrC4u^GK>WcI89XSF@Z2nJRLL+L3#=K=@{26VPc5e1%B8FsNK)Zh zlDtMr18aIX_Y2HlV_C|Y@hr)1lg;BQG!J%#^l5w^Pm7Y=yGC6KAYL2Rxdmrskw)fBEv>*C-zlgwv?WZo}1 z*vB35UXj12any3ecQv$L4nDIxc%IYay9C0UVOP9mxNgZUX|jvjI@xOaS)b(Qq$y_`$FY*%kmr2k#I086C zA8>Nq>d02@lE*7aZiUi zgI&#J*^6a0WUu&M?j^9*9ECcHa|>D4vTP)&c}SX9wW1E};2o?Z?!S4g$%jl9vQU1X49hQ4o9%lI*OY%XxOkjSOWf#kfEdNMSvV}{YD`{g*Gs_7qzru13%llbQ zV`*o37t5(6H7E0AlJV<(>aBz4Cz&ImUGjLP;F2`bRMWcTH(7Hx%bQuUe~IrB3mR!} z^L%Q?O#Up$9@nsMnvqRVJh2NNwV-vki-|1fkyO0Q4=|5q*~XHOtGdC-@pWHFda;7# z_gV7Fis{7ix2)k~EOSXJ4>Geoh!>du%JKlqvn+SA`~Wf`zIzXoMg~}ZkEBL6Y?t#%3XVleC$Yjk zS4_>Ftht9}3rk*ch33kxEMU2klBWnl4o8eAF->{ zezF+Kau{jkH%Ka%SpJ12&$+UNnOjqML`pJi%y`116aP&b!F9z6aAG`C9bx^mEcdcB z`}{Vz7hezB1@*@o=gVA%{Y#j>!yY02PI~2skZGpX$=#%p6o>7Sk0SRFi?>+Pd7*ba zmOcsYB{bHsuZbqGU92Xl%w&F!<G;#+?^(sp~=PJD7l2>2(8<$}V zRA@f!Y6eNAKTGyLr5`i6ce=EzISXPo0uQOM$q>{bj=Y`ui zcMVH!hp!6lVmxc;EMr#|vwVm(%z~{>{D3uVOJWuC0+#o&d>k^J{auS+xrd=i=4(2; z;8BZdT#|iRjA8x~Np&D(s^AgH&8&B@j6tUHrvYi4>tJ@6`_K3ly&2`UQF)~pGS%D{ zDYc~0QXrG#S7G&}k?b$>c4i+*h1Wu5E0wvd$zf^s;djhSSWafibrtphvG*qMQB~*v z|J{bU6T-fVhzN+NC@$c_CMpSQQ^zmGl@T{2uBZgmTANbERwFL8HAO@vZl&r__cB(k z0wO{ZLReJXaYIGi*E0Y2^O+1JAz{(>_x*lg;UmOM z59TVIX~UZpww7t5^ZKSvW)xgbOsksp=oU-kd?Xf#(ivBp=FBA~mT8kfZRJKRjenrg zwNca3ze<$;`;S>TkUn^?SlE*m`DZI!XmqN@$Hhy;O-6UU!VinqYb~88#i>Sq)PkO7 z3FprWuhG^aqH5|aP*^SOJge{zqDm6%ZedsXIGS6WxVB3FofQi2Eo#njFH`stqx&oI zbfdS}=qpcut-@`@yT!iZt74{o4kz4Jb6wjSd$OIjKB#!>ucnU0w8{J);htw(og2kG zqkpQxDo>CZtqH}eeS%?xn`hw52AgSRY>_#O@ulbf>q`Gu zaX!p-q|d%;8XT+mdB$K*aiC&s)HK&QOUocr&Oa$$E$OS>{LdA0mna?fA65A8#$Y$t zOxHql9h*0ri!J>srFlRxR+1K)nVZRuYU#gVTLYVmn(GX!WzgGVJoS-RY+=8N7*!he zcyN|tMncyK6doZ)MCp(JwZhMdQ$&p~Kd`X(fv8dD%~n`_c0D-rEYV6RX9Q;!EOTM*mBr&RbhLs+XhjUZa=k zyPgWqGzLc~e5=BmH~g;@qm|LOvbA#hTa15$;@?pCTG9HYl_Smf-qY4oj2hQ1-ANX9 z*D3xrv5#WZR_+;!v372ivGCnzEGC$scv&J50=I+Ib4AmE3Z| zt#sX^rIU&O1u=mWz?QCN0Ow+bA22%7Sl7zwXvJ*lq!n-TUQ73C@mQmGhd5Qd(ij}5 zuyXSjfH>{%;ktMzlGwrJIS7=D5==#yiUBWXAYzgxh57I@e~EmhNAn7F}R5!5&8EW$_lH8^Si3mUsIpUV7*((bgixysGeC;-8J4^-fDq zW%I@={I#et<*v4{tFhp$Rrq{uJ;vz#UOXL!nU%%Ol%!bkrk^cqe%m_8!rpMjw1Vz_ zGn}EQMU!yCf6MzmyzAC`=S&IDw|CBzhTh@KaAs_5Sk=(BZN&|~zxlLnZQp9!;&95= z@7RL&#jyn)I_l4Y`5VdBVTXA+v$oo|!IC$SYF7Qa|L4BP#&*4-?ULBojs;C;@oGH7`vzml6bFYYnTk52^p0=mJzY#hh? zB5col8*SOpwph1@VjGQ1)Y+1VU1M)6Lnc(8+4}6@XI1!9--$L|!$eLH-j_3_`B&k6 zZSFViZ~23bZ`z#L*d(Ks7=B@*_kEk|HG#6$v|;Qn>b4^#B7IFu^i+S$tf75M)JBAw zI^N zoMWidG3`$7ygrkn)wFEd;X`xw$=Qc`!^}IjE_@?4@IUd+9@9Q;j*G3^@(!oO7GyNu z^u=_d+x$9fk*yE4Rs&0)-GiR0vwB#+*34$iVww3%t)88m)Q1{XHNDB0*ESoO*|Etu z)kxdCXEtrkXHBnWM&(wTKu!rAA+!CyklInoFUZaIx1-hULu<_1y7u9HUFNjgAl;at z5k#F-FUmyiEVnPLeTbWM;6r>{K<+kYXSL0aU@7=s>zJ9cXSSVhdcWmud)L&sDz+|X zisQy6rH>u`C&vvwOphLYyyHgpa-7j)`M!Y9F^=n%Iq}h#>vQ2~{;R=zg!mIlw;-Jw z-QRKDm{T(PG~)k6x&w%Rk?rK8{dy`sZ*&j-XXoN{>FCSZD=~VS<9W9^d7}^W=D@*h zJDGhB@yEe|ju*5bWq#0|{QEkddw05U^kDZMBinXi+dh%i>G`9(IG*zYd$bQfH}Z7G zu1>qry_@U?dp7A}VeU)3J3W4M7xso$(@{@2fmcfUzFs-(?^VDdk#}Lf6Ikj|8!h#y zjg)`fsIRooHBR7fOm`ghMbJ5&9JN6u>k;!PQAf5=uTgjT?^4>pf6r)j95t8YG)>PM z^_ABqednkjyiVy0Mtk0@^rF!rTgn_~)D!+hcyr`M@|+oYC!HE~zZ21x+1hfMwiGux zo-I!{Il<^e+}HSg#(hJ4Q+&%9eLX#I)FOVDe8I^ZHPdQ1>UgW+sD#yU)ETr9@jPBl zX-9r#r5!caN;_(-m3HJ09HnGb2}glW6KBtK;iz++e()ma5O|4m7`(zc9A4@4hu1hw z+*#~D$-T`;`y@tPPS158b#~B-7MVsX6^)ugURI*eN;Gnjm1yKrE78a*Yx9w7R4#tc z@opj2aP9>*TBkVhYHAL3IY{V=kJ^~1N|q`DBp|2dr>|}`HX)N z=ZO!CkBEtq5sFNGp!$ zWvw{;AZx`Dy{r{S^s-hQL9YI*>D2H;d`203U1Y2gW~@QR8hl$^C!#OJFrFaeY4{%LdrUPnw1q&R!CVPWgR|E`AxU74xeUa9X`#< zI(!<}*sgKn!zS4LoBuxTN|<&v{zaT8J}f>WJ}N#YerTj!A?*rjSLn2`ejm2P`W-SV zA-xajeOTs99=6tWfG|1$(Sc#@O$UZOVLCAEA=3fE=)kaUrUS#4m<|kE!kIEUI&8Yl z2>CylrVg8K^Fsc7(^taiD@0!*`U=rkh`vJf6>_xvXKV(^U*w^?!=~GD@|T(h51Vcp z44Lcle=t4fSnVRmq?7r5BWH>e#EIfL;!Wagae=s8tP-om)#7X7>*AZ@I`JK2laIuY z#SO;r0i)B-yNS}2dVdga7E8rHi?@oiMAk%7-XY#8vOW@Xw|I|uuXw-sppmC>V0ffB zRLm2n8l86T;`FTi9_|a`67j`!Nq!f1sklrm7c0a{ak(+bvGhSl@gQTko7ly+hP#Vh zjj=@*AA4TZ5o0={)86!R=uxyneCRW@g7IdtRQ$7et2j%%O}sw1>$nCN~{)Fi?4~Vi*JhS z#CMG7g7IT}QkHh?(E_4>m@zVU8LfwieqM&3c0g@qDAok~%tBd^5jGk&C)kHrl}v=%xY zO<#^2ZTbSy7l^(<^aY|X5PgB@3q)Ta`U24xh`vDd1)?tyeSzo;L|-8Ka^z^UV~;%6 zwC2dsrZo_)IdZgV4Mb~>9PMBJFtp*zM|;%y>* zM9l5t9path9I;HiTf9fS*J$H@$Rr!_L#Cid#GpsU?&85lG|1S-hz1!G;`v6EX#5d9 zD3NiNc$+v|yj{FQyi=SbmWiAPu@~n-@LnUOFm@HYiw7IgW=L6#ZH$z~m=MnwIqw?$ zl(UQJ$Ps~Q2c&Hw+Hpic-<2Nm1bt_mAWjs|5pNP_iwnf%VwG4ez9zmdzA3H~-!al_ z#*f7fM$@t*a_Bq4nQ{$2*UAMc7o=Q+&!t?+K{F|r5z7H$IY2B2h~)sW93Ykh#BzXG z4iL)$VlzN&28hi7u^AvX!=Raz%ZSYYu^9%r;~Fxq2j!YoJm@~NiXm1p#3~+ipIOBNA2O?W(0yhVLoDt=xn^+>$~6ml(0xvK zvycZ4H49|GBD0VO4mCRjVuwKN5QrTDu|pts2*eJ7*dY)*1Y(Ck>=1|@0G1rD*mAr?5q0*6@O1BaU3GH|Hb;RA=7^#ZY8Al3`SGKbhO1BaS@ zK5(ehof9Tn?)@4f%}-n4zbuFRyAAyEZ!>45^oc+=ShP-5ARUSo#Gr3+n)Hl z6~0HjSG-@u=4b1J3S)~8m_iLWmf8OUJDUAJu(jF$gt5y9OruV0wX=YM``B3kR_^tSzF}lha4ik0UP{$2*+|b%)UTjyf zvp7opjd+8RKcc$Y>C*oWTE$3@9$rG*Sa`1ZS8Md7HmAFEzHquT; z{xcVzB%Ume6BFVo;&|~?@ig&t@eJ`yae{c3k#;ivQCuwQxU`eSe<*GczY)I`zccbL zuSVMA@Y%E(VcN{7GSFrgRvBnB3xDb4^}mOfBOL22juL+(-e7b_aAnXrwA%5-Ktwf( zX(C3&P()XW&k>u7xneW1x!6K%DYg<@i*3ZVVmpy@PIBuYb`*CJJBjo+Tj_7Oo7hF% zUF<6EA$AkHi#^0W#h&6`VlQ!TaUXGCvA4LNxWCv(JU~29JV-oP>?`&Y4-pR)4-*d; z`-=m_f#M)>uy}+x#OSX!M%Rh&i1aNn?}_h=9~fi9MaCZCtBuaFj+ZVu6kicyAV$O{ zVpI&pn3y9r6?4UAVso*D*ivjIwiernZN+wCd$EJqQQSrBBbOV(~ZPH1W6MmEu*R%HvR5Sys)60?aQHW9=og0ujn1t2yN#3q8+M36p#^hw_(W)tR{U(@Q*l&*6g>Zt1T}T+a5L&6`m|X}Lh}eaMu?rz~A;d0(lnqigX#FzB z>_SLM`pq%B5MmcX?81I?!qZ_@c!p?4uL|wxRpD8pm7yvu6wel|JXL19^_yU}8>D~x zE-~8;Vqf-~V-{S$2|1j3_N#Jk@^($HJorKHb@2`HZE>CWj`*(lp7_4_fw*4$Nc>pb zAbuil6h9R|6F(Qf5Wh6~y~X{+{lz}w0pfvTU$LKfhsUvp{SXh|L1ASs)e) z#6p2sC=d$;Vxd4R6nLa~lz6l_RLm3qU<`gPo+@Io6LY$VwL@m29P zQTi50-vVh@AngjIU3en1+@wE&G$)YeL{!J<3fmg3Hio0cpNR#cdNw>x;p4><#1loe zVW<`h)k>jSDOAgZYLif93stt*2-_?6Yq40ICjM5uQk*WTFJj6$rZ$f~sQ9GOxzEPj zz6PQ z0pAhd72gwSb>eAtNUOt-#E-=d;wK`#!PZYj`h@W3B0WQxp4oSSjcK@_xWCv(JU~29 z>?<;+Ny(Uohl&Hlfg)p?c*Zn5LL4G8rinR9JX#zo=83fDz6)$jL&h{bO*~yZLp)QQ zAf6>Mrb)?|hKy;*n1<(y=ZWWw7l@O@3&o4Xi^a(zW14*#({QSIiFmzugLtEe4v>a1 z4bex44nTAOqDK%-fM^0l6Cm0O(N>67K(qp)6|hXaOGHD6K|>%K0vC#l#OK9jV!2o$ zR*K8TDsi>AMr2Hr4`Uj>BCZt~)5I{Qq4dqhG+}9%jcLNtF1w1mXvP0bED+V>Hr7dhyvSH5%vgu?0(+^2ZNw9!Hnb5> zSnXybp0G-8BYxinHsay0#bR-q_*?Nxak{9UvJp>u^@xpl!bu}nDCQw(HD|rYIs=?B z&dGecjCY>%d#A+7ckXl+IV1V4e5o^ue>7d;Om^ArAL z?{I&L{|E0F|9OA0cfP;cU+qouKkz^BF7!Y2KlCp0KlV3z7YDgPGjD3pB52`V8nh1D zc$Wq3f?d4JgU&%`?>E8j!5$uWA_cv?tAgG^Z~XWj9Q5@{f-%7uZ$?lOlz7(%GlHA& zhIe!D7w?b3qTmJZuffuw+)DY-FiHwSj^43Ov7WtX? zYUG&6G5GB~GctjH0GJq==)D;^CvpzH+Aoh>?!6tkB65YdE;21L&3h+uW#lUF-N@CE ztG)LlzmNRh`yg_Ci-iFA6$O7+^$kNDCZ)0S6q{{m= zQXQ$rSNZD5YVV84YmwKzFC%Y8-t@kXtc$F}7vj5-_q}iV2c?g_A0itg8~N{ruOr`k zo0@cJ(gClHhc+3@e=8i{v@q0!f)ulxDYbs}MY8~ePDjX4>=wBU<56|Hr@1};A z_;-ig!|vZ3UK3vH-ydEV&hQ@$Zw_zv=Y_X~xB7nzXNR}@kA-u>IsOyjUEy8+-@^yO z2mJYV$D^MLUkP9JpA6p&*ZEI}?}qRB&xK!xU-}DUUd;0s#deA9;y)kj66@kGj_nuQ z&wn8{JT}5#5*rm8&&C$` z-^8AeE%U#REsre^yqq;TYXU##qnwX|AZJ6)h9KfR<9?q$y~kpY`v|)~V)qetpKtdO zc7Mj(SMj~Y{lxvnKH>r5f#N~p!D3&rpLmFPsCbxoxY%DDAPy7#3AC5;!)yI zF;C1Fhl#_*5#mU3lsH=anOGqHTs%gMi^q!pC5{n)A&wP~6OR{95Kk0O5>FP#i3#x( zalCk{c$#>+c!qeUI6=HvoGccJQ^cv_CE}&xW#TWz%SG<;?7qnRTH$ZRZ^iG#OrQRs z@V~@OV%q3CqAPl$F9u>nY$8U*P>hK=VpB0!Y$i4rTZk>iR$^?U>>dx(3AJ;lAmUgF-OwAz=(`qJ1y8XHJs18HnZKpGoJV*_byaI{iNZv*LVAiWKww}JFFklqH;+dz67NN)q_ zZ6Li3ZdN|`iGLB7if@RYieHJ+hd@0Zsz#w|6sks{Y80wQp=uPWMxp8wYFvaG7oo;Q zsBsaFv7EzSh+{>SCoEO?&&HVgE~dVVU9XrwiMNQe#M{K#;vDfYqg&{3U)k>S9Dcup z{C)?y#|(0h8O#@liNnP);xELp;&I~f;tAr3;z{Dk;y5uOo+6GHPZduSPZ!S+&lD$! zXNeQVLh)?z9PwQ7Jn?+-0&$Xfp?Hyau{c>Q5~qmVM@LOB5ib>gDPAdFB~BNw7Jn!H zUc6SkQ=B7~iFb*2i`>^o{@m9G?-Tzb&K3VEa=#y2`Rx^Q*B?xZ^aJ6)i4Tbni;swp zijRqpi%*Du7w3y9@k#L?;#1<&;xpnu#b?Fm#0BC)agn%0d{JB~E*GoB72+y!wYWxn zNqk>iZ**ITEk!&H64P32BeoUWiyg#{;x1w*aaXakxSQBT++FM{?jd#)yNf-J(bf_IX#urQ+Yg<$5VN{ z>$L9-@p|zF@ka4+@d@$o;(Srs?n&D{X}c$F_ny&K>A5F8_nuRX^xRvh@bltg@da^- z_@cN}Tqc%_6=J2hQk4FCs})`&z9haZz9Oy_Up4y2iN}i*MYXDbw!&&#Uv2BFZGE+^ ze}T5Dh5ZW^eqa1R{MZ;Y6>~+6uR!A~(D({ED87@ZQ4?sq1R5`aMoJ(Z2-N$*-b%Tz z*xMLU4iV)L*;O&>qsR{yZt{jPs!utERoh0@wo$chRBanot45b= zFU>GfwQ6*|Vm=gA)940;KM^;IpNgM}pNn6LUx{Cf--+LgDs%K-3U3nA#?TR6(Gzn- zwQZ=j4Vx)OGeD>rAk>Hsk5K#&akw}_94U?xHO51Y@$fRm{8GGJ{FQix_&f1BQMwvR zmqO`MC|wF)(^j>7_@=^diSHR>EkyM~Y(Is^ipPm3iYJLDi>HdGi)V^wiPF;8MGBXR zcZtu68dEWishG+cTcUXNW6nnw&QX84e>S_L!(6jF;L+kxF;C1Fhl#WwX~u}Z5Ggw` zlpQj9;0Yr2AWS_V^?>8Vgm{WLUZj3(Jx!#RgwGJEE8z(uH6~1rA>$IBEuJHuE1oAZ zF4@Ysgp5nbxP-J5yjYwp7Ku~DsUliH8ngh?|L{ujDv|k{n5#v!fbj3dYsEXoIU>Em z*1N>J#e2ki#rwp+h;zliiua2Th!2WMah~`$@gebH@e%P+@iFmn@d@$o;(ReBJ}Le~ zd`f&;d`A4Ii0)8wbO$aF7mABSG>G^Y#ib(JL`;>qLR=-T7T1U`iSLW+jb?YimSQWh zwb({%E4CLqh#kdU#7^R_VrOwTv5UC7$k-zPJ;ZKecd>`Kr`S{6OY9}?E$$=kEA|!{ zqvW=~*hf4-JWxDHJXq{2_7e{k4-*d;`-=m_f#M)>u*fXMQHF?9#Y@CX#mhwLq}d&$ z{FPWN{zjZ8{#KOUn%zN4>8;rvgr&D;cM!ftl=hn4LCj2}*&R@=WOfH(wUXH#gw;xB zcMvWV&k@fRH3rS@V5>%-*&T#cPP01*tDI(c5LP+O?jWpkdMcaQAjGI_W`hvEPMjfL zFWw;DD3*zji%*Du7w3!8f3rl$NBVD;2;pZ$X}(z^#7O7O1|j^sxLABaTq3?GE)|!F z9ER;>+SI;#%=l@lEk9quC?ycyXer);4>Dc(u9NBZSrFW{(iQ zKvc_{JwnVy;>DuoJzw*lKSeRpG+&zLOVfO5nlDZBrD?u2&A&qF)tkP0(^qf$>P=t0 z>8m$=^`@`h^rdyaw9Z%0`s!Ipctx45s^+h|r5R1RiU5mwpEs_HP;tf~%k z&8mXR->fRaI*M6Uga?T)ic3Xli&<4{m7bVYMYu{_Ev^?o6jf8Rsz~#RxKaF6{7n2@ z{8Ic%{961@{9aU<&8i}wO=8+;RuyzbPs|b3wq{kawV9~-&8#Y728&0CL&V|Y2yvu1 zN<3XWLp)QQAf6>ktHVNt&lb-SrHy8baV%+LC~XX-jiIzLls1OatMEFd)R;9}jJ?*1 zuZpjU>Myg!*!q_Eo~U*+i>$-kn0nAGGQwlUVyFRR7J(;-)P^v%fz$?$6BFVo;&_o-vh_5Px)P=*AvK2dB&6PudP8~=(vy&$ zgy)LqiRX(Kh|EuHy-=jJ2s1xH<|oMf1eu@UR1u9Jj7C6o0bVIyB{I_xbG3*@5dOV* zt$3$6N2E{KdY5>&c#n9mc%S$eajy7R@qY0E@j)>u&J+J8J|sRYJ|aFUJ|;dcJ|X^H zoG+%tC&hn=Pl->9&xrpN(I85W2Ehg5LUECZ9ufbdxKu=^h^Z1+h^xfaBD%)bm&EtQ z^+vPHU`w%;*jj8OwiVlp9mI~}E@CHfSFy9Wo7hF%U1Wrj{~lsDvAftq+*9l+?j`mT z_ZIgN_Z54Kj8t;lU+g0uARZ_lBpxjG75j;Yiie4Zi~Yp`;y`hbI9Oz!<0wPKsp2K# zrQ&6xbkgiHQvOOT7Jnm76Mrj8Z_O?vrS#V9GQ!eZv&#sVh%=4csgl>cz$`MtY9F)6 z2&;X}A|tH!F^i0Fp?HpXuBg#x78zSL?#v=1tP+|JXcie^mB(x`!q?H0k?jd#)dx(3A8ee7=vhTj4wc8l(YvQ=v*JM0RJY76P zJX4$?o+Vlx3%IYzXmu>$z9ys9u^_NI76ewug23un5Lg`x0;^*|aFKYiI9V(br-)O< zOT_EN8^jw$8-oSh*JQj!EER1G7I0sa(MDnc_ca-9Bo=UAlks-ZMr8r_H5uoKW#V1p z-J*@q0`6-vE)*Au&x>4Pvl8-TK9a@>kn6(bA%wxDe%F;&Gyl=z{PB@kDVd93Gl}jxk#vUMBuhyj=X1 zc!l^o@jCGqSipT^#@V8DlKaFgMj9BJzKt<^ADTXlv1_mJHSrDcP4O*J8p3^Lmcx4% zV^%-xD()v*tqaWdC*JBcCT4XR6FW&fSv*y=Q9s6Pe`q6pjM@J1BJtO-AXY3+6MrjS zDNYw>imD@btXcXpIL6Kx;Dd@uiq9(MIdOrgy4%?UDW#Kk#(+iF(s#H`&T|47Iynl5 zVr$r1e&=J(Ai};pUz3(_GyUI>blSpZ@*VFg z=cv=1n3%$zL-h%F5%+|F(^O&GHzu#mp3{P`BR{sj(tFCm6`Sc_D_o};X`=dvA>RoI zb4;-(Y#AI3TRFy-fgKAk;;@-&(O#u(=j^BW-mtZDZml$}l}~FYSMgTb)=m@1AArNC zV=WL>8}b*ngt?IrHk0q;P@a4}<%3sq;ywA4Z>Cm_)f~mzzrB-be=eEOVl5S8$MV(k zq5co4eNBs5+d581(bBjoTcEAEig#5bPy0q|X!$o&pN38kg}cI-O5&*&_&_5jR4sx` z5AHXF{&0f9In)0;-eLKUy<-Qod@HuD>5|yG@Y(R$RPnRm0&oTcHDym#=ZKQ&!!Rqc0J-fDL9n_JCpHM>Qx-Fvi~+&Ra> z9oBYO+iLcfdhB>8w|VoH&8lLvYrmV`pMAHutnDQ&F3Y^zl(x96O{u-JFTRZ(AIrS^^VvW5p4xS2+Nt)pOjtVcWbNECPwhlY?*|d|r zW79I9KZ*{2*yzOWulrWq{M-ZVv$1zMi_4l$3a6|7)3YsOGeB%;ZgcxC-=g*Wlx{B$kGMC5CG1<$(7X9-IqhSM z8+$kXDBOg0)bw-PufpmbX+xFtCXTvO?`8+o>iuWYVsuyU{?^xMgv7MNLQQAHhIZ(t zmf{<6=&RoSjkNsGu$1>~KJSW!c2{uta`^Hd8$0z6=Z154?P}lb&7PUh&N=VKket!H z-_7Z-cOoaT%ek@Eu^UXfp4g%?e7WU5J#*P(Ugq6uNKP-_vvS8;c_}|7Wl#3AypT!C zuhzPjna}aR%09Vc>(1`6Rni8nC0piTdS3IZ&8nF>Q?q345nIzgH7zjT+QV9;`LUa4 zh)jEC_NkkCXw4Y28RgjqdbdXRGg@wXOiRzN8GYqV?>F7#XqKntzSwdwZL-g78(NEm zPu6QC+L^U;F)PxLwoT}zYHPt9bU&=NR|2Y=nt66yYY*Gc z-a2+cY-rAO^?bFJwpOV!HMa5%wU%dFL2mQh=9U8`qyL+B!h7wz7+Egmr5o6wlV6BwYmYM25%PTbwxF6XV!rFF1MTqtNlp z!>c`2jR#zg$g!`*!|nKVX>cjIT3j*v77^E+xEInT?7@@U>{TqkxJf)TB?-O6)*Q>l zLf%7^-EuyboQtsPI2v~|#dSnKTM9fl#xvV2-n?qyD;Ljec)%?r*FwkV==tV}S0(T% z`8aA^O1^9(RWYfGiAgy={vqS$5t6oe+wrq?;v3s61zuKDk*CrF_?8JBt8GE#FX^Jl z1L>mZTI4m#8u&aQ%ss-(iF=RZ!(yXE(-pb9)Pc@61?{Eyt;WwG9O9i zAxUZa;H%eKuQb3blsx;YwQAb5)QQlhN!qz6xDKIhgU{JB!uHL%M? z$hFQbjMtJTZ?LVYo14zHKJ)SHLA{G@%uw&r$Rfforu#%z&>mUsGv9<;^kqldqFIBb zIi%qd;k7DHrE0FWnM)5mh-}e#>ya%jxkblQQT$urDJ+F2+4_C>BspYefUWwIo=eel zDS9qN&!zb165E^BsG|itmBS9#mn$ATPn*XRw0YJWo9o_MW#~jXhBL2zgM6DV^`Af% zF{&<2Czv}-hx-KAZ~Bt?sE=m58T8mw%)bAW#wO5+5~np9Ul!n@B={AAFOmRHBmsU% z0=$p}=yz}xg6EL{zas%&M}lh*JdW6!5Z*?c+=1LlyR>bvOq0_|yrcB_QDti=Q46G zBj+-5E+gkMaxNq1GIA~>=Q46GBj+-5E+gkMaxNq1GCU%dk$V}rmyvrJxtEc9nQ~8( zdy?FfBYC9lEfSdP9rKwyLi`^h3is^rMercH0ggr4x1e z$(mZ*mS|LKcl}YbL*040!4<6jjIUXvvzwF7b(EyXIcuV=1oda(dNbaS*1g?1 zVv-{!IbxC{COKl#+u7OhN#^>D#glAo#iqVVR=6Z9T#^+osrv3NI}5)}=C#RuHkl_+ z^Vbx?x0HEoGJj2VES&{>$8VE)Z8D!tvOBSV&0CZCYBEnv*a)d6*E{2yFrv**lX+>X zZyA}FC1;NvYFXwGS~ul$1V(p?(Vg(Soa%}h@pYzZa&Pts9-5H;Le1z-g zUIb}^@M{5gYdKyF$w8VTxkxjlInn}YiL^pmBW;kjNIRrG(gEp+?1FScIwQLwU69?8 zuE-upH?G;gi@e9>{QZ#qkv_-)$brZ~6!O3wYS&j;Ijn8*mYZEg(6c0ZmPF5z=vfjy zOQL7nUR}1|?j?2Fy@)oRg4{#fZ>#+eGB2g(qtrZ~1 zDY6VHM=Fp?1W(&OezyG;2%fh6mB=ateb704J?k`kJ|8?szke?aNtSAOF5!!ii;>Cn zZ7s_+b{*euL^xBA&BY2IQfIc9o=%x>Nb?N2`3&qLRvWxS`uK+QSw+k*qw6i<+b@|7fX_=oy^O6{{)|-b!^N$#^-kWd4@N9&2-#i{VM|zaTqcq-3 zWG?bo1dZ@m`8`&CZw12Y@1KC2h@6C+jEqAP$SKHpwj&V;g5X(6Jl3kx3(YR zn$K@%TU|TCu4~q{Bk=XjnMYjq!~=YqO@W_p^YU#zzRkn8`S&*O-odU;UeFckjvS1b z-GQg?C}%ZcYorZgHpo$2ojn@K+9G)UHhUz-8H{=TjwSd!AN%BI|C>gJ|DUoOcVHA{ zZA*=&GnqLPtPcs+hXm_Gg7qQ6`jB9KNU%O6SRWFs4+++X1nWbB^&!FfP{{gF$of#| za@~=C%PDl1A>~K~Qi&`_s*;rsT7x*;rsT7x z*;rsT7x*;rsT7x*`{088DihPEAj(mZ9iF}28jeLWA zi+q&Gw$QoA2s$IgWUBaqe;#^Fx?DX{^dZn0NDW+G7>6IkClB8FX^h%Om zNzyAxdL>Eg7t;ELw0lf1cg|vPltzSs% z7t;ELte+{?&lKxt%6}Yr0-2Ab5YBP^e;`jGPa_MEg$VOB>uZYjHO2axVtq}qzNT1T zQ>?Ek*4Gs4Yl`(X#rm3JeNC~xrdVH7tgk87*A(k(iry@wH;d`PVtR1Pb+Lo^&b2Yt z*c5AQD)Kew6W<`;BHv+K)m$^9M~msvBt4p>N0anqAw5}0PZqKsr^54)^AWD`ur{Yy zn^UaKDc0r^dREuk=+#1cwU9MB#TuPrjZU#fr|8*YdbXIJEv9FSoj*9N=#GyBNCatu zM3E4QAy|P9XW0&C*$!vf4rkd;3#28|3gMcT!!<3ZErO-vv`0E1SdGpu2E&?CDFIiPV{Yrv#=;Eqj9Co5lQ97NIU#)#2+`JRl~8~O}ozEldHM0tNFwt zMVm^|rc$)2)LG|@pk+rQqma?a&yWJ-=g2Wg961*GFXS3$p>rLdGmz_%8;~23n~*;s zHzR*U{)F5@i`l$$h_lc=lJHT;(a2CF56MS{A(Yak3@&AGDT7NHT*}~52A49pl)m)*M^Xsa#N2-%Pa#hu&mdd_ai2w=Llz(lkwwV!$YNIA7my|C z1lM;LVr4DFvRa5`wGhi{A(qubEUSfBRtuSz6U@sA=H&$QZi0C?!MvMb-c2y?CYW~< z%)1HZ-30S)f_XQ=yqjR&O)&2!n0FJ*yZCcM__r+P-30S)f_XRLf5=GsM5Ac{qbU(I zcP8^sOl3hkKHGB++mX0k__Pt$Io+L=)qbxi4SKS@7xDY)JU5{^+J57Fj`aLK%3Pg@ zzQBn22>F5$;v)ePL7E^@Bt&9Jb`Eb#xE;cA!wyJCgky)D5JqaqY{)#H;2QTr=JW*f zc!GI6!91P_uVEWwB7BJvW+Sh?voMAZYV@H~G4x3z5S?LuPcXkHnBNo3?+Hd^f)SZu zL?#%K3Fm!0u&2D~{1(I+f`?VlIavwkWF_9)gx4YOAXxO?d&v992grKlBjjU*GYan$ zWFzt^@)`0u@&)oGt6gtoKV*NT4{`u;;G>j|)41J1+(&cuQV$XUolgmb{)Y~&n-)j42w4p^N7R_9<6av{Ph z9k5CVtkOXdG6mthH@F00g$}MqZa`S616JzbPY7pg0cUFgXKMjxYr(C^EaWz1HgY?1 z2XZIESzJ(t+=bkY+=JYUEI<|_ST+HcO|T3pM=Fp?WI0lWtVY%#FCi}@uOMp?&IE$j z5OgJ2hoB#LPfy`HJ%#7=RDdqvH9dvT^i-rXvIqAb^x(4>vM*uk&-L)42<2wo%4OZk zW!)OUx;21xYXDbQi@Ca5%+=LmuC5ky7CJk!IK755(bbN3R?-Ag!kFX7z1 zgmd>2&fQBmcP|NPYy7pR@YbHfS9=Oi?J4}U+wa-~SPKVmWw)3syTz=D1GutV%$40@ zJh-Ru-=1QP%w>(t#fy6iAMUAGF)|JLEpjC?9ijj5+MdE^dkT;3saO)>{D89@?hJAs zMFw(pZv=N74B&awYq`VcuiSAkhC6^>;Cb5>PPKD_o8xwOPI8C1L!Bb|ioML8>R#c@ z#aHZo&O`28_n&y3d>)UnYur^__kCBsV)w;YY(IB@?@&C(9*M`;!`-7i-tN)fP%jT3 zvgS87Uw&gJ$Zu?+{KlT+P4*_c=ixo}D))T6$KL4{d1d&Joq-S8XWW_Iv);4rU-2Hh z$i3fN?7iSV;4Sl(xk+z1{$%ILlk6jSl6}v8T%Kf~lqcDz-|W~T%>K@PrUbMY{HnLNy15ws85d)VbcCwZ9d>RlOh z3%YyLgFS=2z2C{t>~+CGK>;3Sj|t-Vll`w?y!SwGX>h5R3Vs><(t9%aRq!kCA3n?spBo3cm8Y z2j2$Y`aOd0gYW%4%?qvH)4b67y&|zl%-`F*(E9sCI!8MD`$qPR?CJN8>=o(d?-w~M za+cpGa&F{Y|A5G($Rz*3$mGam|DedFkxTu9BfpIN((j9J*&F?Sky|3O{6ivhB4z%- z$X_CJ{lSq3BJ=zqkw+ts`a>g6MxOHXBF{vg@rOsAi!AU*M4pc<@kg2eS^wve6_FMG zF_AYSZ}{=ZJCS$&V@nVfH)!7m-boP5uedw$Zl!iP2r7yZR^LH+HFia%Sj97(VEKEFZEP@FBa_|0H}Pe8c}Vd>c=)pUIQ# z7vTrtdjG5Nv+y(j8}7sX(*HJ0hiUw}ns3=3hKJelL2m4{*qK4I*m*Jj1u}MV?BbxEd6*5_n}^w;BOYe240bUO zvq9I`b+PM$ZuptKDd=u~W`n)V&up-d`I!w4jLnVB4f@8Cv1HIMwlKCRI3%_t_F`~Y zY-4O=&_5?9r)e-io@WQS5AbY%oG1I^Jl7xR>RX&EPH~58QO+=AI5GzL1u_;n4mln< z0XY#l2{{=Vha`|wknzZ=$Z5#w$Qj6)$OPmpWFk_CoQ<4=oQs@?oR3_9OhPV1EwM1R$P>ulk@-jp zc@p^t@)Ytk@(l7%UPP86%aJN%1+ofRjjTalLf%K#a~-q=(h_Nf zv_{$>ZIO1)IJZ5Y9gvR5E=VV2SEMtt8`1^Y9qEeffpkNWFKT- zq&Ko3vOm%XIRH5jIS4rz>5KG3>~4)i`8*6c9O;h?Kn5a%kip0i$Pi>IatU%Nav4G& z^0a!KXVv38bQE`ggG@tyi_o_`pC0Gw^f=F^$9Xb6&U5K;yamPGYmpLUCUPrnFbla2 znT_0z+=1MQ%t82Fm}|u2xJEpVYsBMthCS|4Z=PR|^YnV0XV>FAxgO`a^*B$h$Gvlq za}jisr`6*;s~+b`^|(j*cuGCaGwN}kP>=I`dYq@zTW>Y826+j28F>X+i@b_(XAXXZ;yiC1_a`E>vwt>1Tl184+^4O5+S};7kX^ao75(O#`#5~-#PO^X=i2+Y z0R0Z==YT%u`RRDDFVY)1m~C8>$FEKtuR8I_t_Xb<`N4_T+&vV>pH3WaI`L>%ggm3< z$@Tkjc+!dUWNw`2a^pOe8|RtaI8Wrpc^)^;)41^{Z4h0Le27r)=mz8yWFzt^@)`0u z@+I;W@-^}u@;$=wqyIuSA!&XKa1a;qkQ{_Iz!Od!KR9uIM;OQN2;)M=A--_pc*2SE zG;N$`Y2!7&E$Cj&oivv$ zZ&K&`PyVu^|4+^$n?YVBcM@#pHE!7?rKsI**us2SnW$PhK?oO21Xb9W$9%x@tv`E?vPLF^-ddWkpNE-0iacSGs%^ zXM4pBZwW$iMBybz~>~iS@xU?gZNUwWDRNyJz*4b-$DUPi?>XRp-~w)4r$YRZY&u zqtlnApQ|d)e$RfU=T}Y7hV8TVKDFbuEmc*rC9dkGEn)V%C-cqPtp1B*Y*oIU9XIL2 zmfcZ5-`d~P{C`kmFX~>a+>NEGA752VD>K^~YCqf3P@T8rwp{(KHt4ea8l_*e-0seD z(rcFgiKn4zUdv}m-wKz{NguoXKDAHNpX8k` zS^kWL?bT2}WVS3{l&u4eAE zebTd*ag}k|{pkw`_o$ke&8PNfrrb68OWDs$pAX;L5Q-Ht%ij$Gw>iz3R7rLuqQqrHd%;nby(Q0OYnhAd+9Ns37C26M?&dVTmMyU`=_>X4#xj0q=lh!F zUu{maIk0StooM@futNK2bKCOP*<|UD%9|+MW!YPs!`Y1ue5T8nVM8w4(7-0m6Tc1E_eXZ8|Dm6^{0}YHzoS@r zEAIbyWK%m2TaRjg|L+KAbuGIN)h@&Tg!0uc&klum;{3_hl%0E(_t?4gKQZ+*ZhWuv zD(_h5>rQ_u@3Pao{9ATtpxyGy_xZ_LcP$^Dey#jmr)&8|mRDv?ET9 z%G0%+dr-2ewtQNg>aZoM!BBSJbZN!24JO!`$eQ-ryvw*6JF|AXZQfXZ#pb%##5Fc< zu!kF1^Vun<{7<#7H<%5c{;K>Mr%So)=go&wU}d#^XgxMNB3YiXL2I`i`PJQLN57TN z%BD=;Q$EMS>Zf8p%eWrWrTji@n+La?BVM_b-F@r&SovzF&xX^~AsgP-r`2IoX?c~U zv2&gV%kasza>#5`d(A3;ZTl%n5wDv{`a%2EjotpY^!SRN>5__r($7_N()rifEiPB`YbHpsB_AiW26hqe^5MQ3;o+<)a!!^FB{D2O}eZiv^d+J^Oxop z*0?OLXlr49m(xpSP=0LZ%piOI=r|Q!ErFF!3G2b?u<8AZ-u0sOb#ob}v%k$3PQ_!k zL1p1Qa7SL*JS?rsZS8ea#bI^oZQE$KY<@Z~=ZhPu7`i!*x@FrnbAHZu=68FgXH^`N z4QFDuY|DOEx?0zU>crQZ&oxeJ>RLP9_QMrVX49mbEI(j#cx>epo5PKiA-nzG@tH2K z7~5Dr>HLb58{1~_wadM&bV^&N-L}+S+v>Gn`mGgfx0m0K-QMW+h##BbcK2J^si7o| zo!d8*_CFW5^)IiN zCf1JsclVQBFK_HsyWEbGv8(E)t=(?fEn8oA%3hnyrucv1vmsmH|Mq$}tlJgm*RHqh zyuymfwc{+Do?r29{kVp{r?Iy9y>0V#KkVeo8cJtzb@jKgRR3q<>s+h+KXXVYUA3}d z+o%Eir~Y|{+2IYf-;T~dvX`*CvakOa$3wR6KmN1&nTGeR);U#nTl%Z&tT~pqnS7YVdp`Dc|J-MQI$5a1jQr2z1>NSlYWn0@C-z&Q%U9#e# zZ1~^unfK#!;e3oUqyJs6AJcxeUTpitZ#MN_y6#q1QBp7ZKlHWYrtM|C{W@fFs6AhA zd!GK0*NwJU(3l+1|GFYb)-l zlV<+Yk?CQq?cRw)U;Pt+6uJPE$YJ*mz36RX=6J-_wUx+?8&m z-gWkK|ZBl372EH_Qe^B=Na(0*hWoSG_!&|m_FYf#mmuL5?JwMhC zXSdsD`m@Zp^qLhzGNHQP(>3{Vjs7F6cl~s=_ffdsyk~LNep{v6PDJf{ino(=^X9Gl zeeNsf&fla5RCY@rTe0Og+(zo& z*!WHtt*mwJwfd3l?zPL1jmv(f=T!z4&i=-m{k=bZY&J#aH`?sC%=XHh?DzVgo6iYq zHl#;awzTy$eG)RSa;tNDOIEwwn_jEzoXvAnc8#yf#%%l3+Hcz_w!O8ncCEda>e9eD zz|PnVjjiFekMsYBe|ih-tSy{5Vr8$Ne&qkHIn=HDk88gzb?5hvo?E^(9J1d;($B3t zsQ%WSS)a&b^Zg#mXGhnk9VlP@a{W8MZ&mIc%6#SK`&hEc8*0Dow(WiH$edR@j|R_k zYbV&5@aF4AJG0YIOS`l6t$(z1dFB4~;~M}9{O}Az~-*whnd#&I4t>4~z?cZ;obN0!d%RPtO^@ZgN*Xt+lr@{uz zwe4K&7w+ag7l8AX&;O+G6u(sqg5y?B4{r;<2Kmft-g(h(mbcwoOZY5X4KL08=k>{5 zJ~h9RC%@*@)LA+IsgZ(Xmna#2WDSi-6%;oO;D zSL*vUp1ndgJAK}#$vCI`Mk~`#>t9$!?&nqfE}!#J*yPS}+u1zlqcD-@@IAG{#1QxN z!};h-0s|4gs<<}NDrS|VJRW~!lje1p*fr^7nbAm zhsO%resZjLT(_#!DQ(Vlw|Z_)>%89s49m-X|NCUy_!DDj zkLyH9jN4Ylo-E~)JEyfHpZ;fCb*A_G!g}YmS6F_2cJt~6tvWe(uWi^SdGlU5_g}7d zp=ZIY;%TKRBfRG$1jntal)I^y_r9l)_x>I2s_AV>UV8eU?o-hBpW;_F4_()C`sK`D z>kz&^apatKKIJ@jBJXJ49I~@k)z4e+m3h9+318Q{#%3>D)kxQm2)90m>yxLqmWRDE zSKSttQ0VJf9%I1?=_)v3Rr^9^o#V#5-_tqA?tl2^|1JCF_5IG(@TZ(h=i2!X-ahgg3+zN&|Qox7^{udQeBn^paO?J<6;{QtRQ;4H6C zKh<7m*DrNKzU}h;e8Z}N`L_MD@+Vm!@9})%zCn&OeV-u5C;LeQ&?)@hx!3=U*QzA1 z)mi-I=M$erw_mGCUc3GLaemr!-SoIT*DW-D>VmV;<4n(WD_F*=C3&}n?*rzoFQ^=j zp9|W4_Vu}+>sD~DGwr9m=R?7gLR*6Gu38&hw2J6It&e^$Va2KxZ%bHC`1MS33fq46 zGSDF>=F-n4$aCK{_j#CmJZ}!U%Z25GBMMln3+t3;TgYB{Dhn#+c@9f6-N=$zB z=Tet@oD==xQ_K_joQ&5lJYM)Ucbo7W^4*r1QYPFMIdS29-q(C}$=ir>Fh?F^CU~1E zSN2}Lzq6GQYJF;oygH}WoVgTSo6;cMD!3%&>TuqB&GtUycG;aZOg9*KRsWlyxe`xc->CQ z-TJ5J^WNwDsgh3Ew-5oj?Zx@lIo!1H`%WqUbxo-&)yv@8wR zkzeTLxsJ^{S@&~txj)O3J71X0-1-#O?$2#!vcMU?|9B<=IopFNDJ#6}nf$l>#BnU8 z=2xFz-5+ADoSTx7-`s!Gx;iQAe$(;KVQ|*4jB{vyT2uV6^Y(Xz@+$jVpASFhcZKrK z*-Z6i@{kFPZ-m(7s<<$*C&z1WHe}Al?Ni;mW_dW{h z{=2O|(`U_ow-ftcQ#}9Y+MeqybFUW##gXSK^OtIWzUlKz&Fi<6^Go^VzvUc$FJu3& z$MnCK{g9uY!9J@y28XTg9^U5VgNs&o3D@!3%-a?&hufXYYwo_}TZa3RZxhZJ_KH63 z3f;~e3fulmZ_7QO+_F8pHRP7>-2?n7pIyi;ewt$DZ1ykY^BuKl5@+>YKVx zxa<}BcnQz{_#kz%gtvQDx&Fex+mNg3WY+s`Ma7fF0&{ui@@(n{1-A8M=4~IgpVz6d z?{gHkjjq3^YeV?nyEo{j|M>0I0-`DHu zza#nk(*IO#gPTt+|C*)XC#SCc{QhQ({KrdcoqzrLU+aB7sL*=|jndkl|Fbw34by(H z=e*>-w{tF9rleh1m<@3t|=Y?N>mL|z>9V^TiFKpXc4*82W3+_CJXSx?p8?6;6o;L26P=xyq z6Z4f*JZ(z8^@UXyPn&&WbMdrkx$-kl{p;aXxjyjiJh5Mm`&Vt*R=&mf)W* zH~7)1_eRe~(ogAgHgf-4ZGP?h@xi%i**|A4-fxipoc@25zQLL45&6wExFS7}U;Qs% z7y1p8klft)Q%Cwe`P`>!o~v|AdbwPs-+eZCPgzH|63bj@wI-(vj#!kEGB>D~Ve!~8kK&$K{rb9%3zDdH!0|0TX- zdE#>?Wzq-zWJSD=1@sRtN+0&Kg0R0!@qt7d<^My{AiA_2LNR3w(v zl?zA%Rx>X(W|BsW=$hNS)XXA{k&?RJAdQp&sZSKCp<6~taqVw zEG;uOM_O5LtNEke>gicRA2}iylP@mb9*Na8k$R5B^&XGZ@}u=ukp$WVjLM{@-WQIx zq0+}N#}xHG-8Y~@_OIf#SKdAfs5^9eJE>;dvVeT)T+P_Ih5O=DdjvcH$|M;ayGK8ngu!7)pHM6*XnxDO3`|DZjR->BOzdV zw>|>ua=rCdoo_)JnbXG8MD#f;Asbp>P!lbl%!r(|BD9itc8dtj*J?}N)<ZH>-3xDTpseY8jGv)(U?tv;<` zbWXju#)^1nicecCU>+s!if7ws@hpiYNFBYWe7Yr{CZ#5zyt6W(?jeoRYgdfkg8_A) zuJ?|#fa*x<*LIGUCgfxE$`+wz1}yI!@#|wFN?YF1bM)(T*~vNIJ^T2@+rM~fq|th{ zbVKW~@>UPgvx^q*ytB1rQ(MEY_4I3N__Rz{&%)3a@XIB%FRIUufbwkamrA4o>s3;p z9xF!czSI`R;HG3 z>iSqMS+q4u=p)~svkG~uilxsfrO!3?1~o-=O+?OE?Wi$Dk&2!X=OTGmUu#s(aqb;O zzMNwwQE&C^8L+l{sj8%FywsngiDwgLtW)o}bwrUGdc+c1rkk_2>!VuTz}A3y%u6kA zg%Q1yN9&e;y;dNb`e=Qm`St2bTWX?0voJHsyLz2;wUq+qZr$EnKcn?A@9Oo+udS-I z4gGrWDDl<}GiNNXy?TTSM!KmoFlURP?y9y1lE9b5~0aD3h%L zb(NP|uaUa?SPEF)ksnarxUSYGph7E>ccqTXSSYcvuQZ=DXPC{+nP#fl&rCPJHvevJGIyA7n7ho~=DX$|^9S=i^RQLboNHCL zt~2*o&8-(L*Lt7-+pG_*Io9pgTx*SWr?sB{N3E}|?N&c)r?tl#Z0+NJn2JzEtY=hF zRmOT=l~d)cS5yU6)f%sAsG8Or>QdFl`lq^0-DZ8J9#@ZB%hVI<3F~u}q@J>ttEbgy zD@Bb_pI95!VwI_4)L!+ys;_=fM^z(rT>YpT+c9=PHMNV`#nkn7NxP(KZkMsks~hYD z`vP^7eUV*7-E3F0YpPrA8|)j@9rlg(jjEk}lYNuA({63IR_*P3?H=kbyRY3>-ETi; z4^-XkBs)p6?|X$HrX|NTilE7x^7LkpZ%EI-yLW#c89so+RNPM+>!PQcZ@s6 zUg^H|L#t<)7>fVG<$2~{MG%{9ot{iU(<2?&HXnxKL3sW z8y&yDt-qra;qTwnj2;$P_h!nxMJ%D>9F z*}vbv-)S8Y5fSO!60tU7t#fNcW<;iQTg2Xoz0U1o8oM~(61h?|mgcNxH^{A=o43hh z2Utb$+P?$@*L-<;{04f{!`A$$ud$$*+>tYSR--9T4S5U7`u&3(tXB0(*4FUDPtTrj$@T9vy3=qS+lHIW;wGW`3ubp#V{{2 ztC3bWn~2G(a~)|jvzgTORv+@+%x+TMe87BwHr>tcw!Uiqo%}@eAJk7WCzF5O zd|ld`Q_LyUykY*6nm5h2q?|d;e4F;u&FR#DbbscGQtF1&?cUbMjU=_Vzd{#H>Ve-ALzlm%0wfag&*45GE zU$kDNJu9z7u=36({g*XIE@1urH|YZF6N$GLS?ScTvDS#~t;pnAk)^D)!`g|=UDhtC zY3;Uli(-Y|Lk(**`R}an~Emf@I2vo&Yaq3H` z5~5T|Rg#)gsuaCStJ2hzQDvwptIA3dR{09#6I25E3sgn&7pe=%S5g;|udFH~=VEm+ z`6{XkeXFXfzlj)Y7Aq&q+X(atokSQZ>qPbnWm;mC-t^kDADQ@^@&8NMQV|>;>z?XZI-H~wEs+f zCe_q3^|{2W2GgYP(<@&dm{64i0Is4UqwtlC+L&5=dfc*FBdn|BJaqOu>>M-?3 z)G^v0SI5ydOJ&I|Tr+=^y53b&F7mFIQk?5$S<-TLIcaE@x68|QTssq_xqX3sfm~%* zvMW)4k$sVLwkz9}rHx(1u0l;!yDBZK+0|%S-L6i{8g>ooV%M~5N>jUq-9j$5TiPv2 zTiLBhxzawePdLlE2@+UrO8E>~7>Au)9(snuza@3-Y4$X! zVZUupms<8a_B+(fuxC*7u02z#cvpS$T=k_gSN%^U!Cqu9Lc(HuG3`IKKb1s#sl8Mh z+n?E=kzZynBmafHl3A~^Q)s!`Ud>pkb}IQaJDvJ9_8Ri*?61jhv^SB@urp}0+1^Zk zi~SAtTkUVjZ?|_y1AC{vOD?l_+q-F#X=h3^dyl;b3+%J^k>79cr_CYz5N!_I$E3D> z+&<2p%CfVhxMMn|+~x2mmGDA*a*5+Q5#%GCNNMXtIZ@=JooMP~oEW*&DdH574o<)+ zN=>X&OfJP=iK9(%r#Sf%P6_fQos#5BIi=)Yr?gXAx;kZ@vT}n{&M7DNJLR47(#?r? z;;FgVsUp`nmpYfql}=r!E@?gIGP%~N@6?yuoXefo!qwSnE2W%v&Yg0#bC+`$`MaIF z?cAw42jS?(jSzxz)*VGMHDUvsVJnK4-tgdj62q#2+e4TFzHqF7n0uDoSyD zqxy1>=Npm7Hxh+!v_cHum%cBhm~W*oh5Tw?s#v}>Uz$Yu(tYXT^R4l%q5do1S5n!x z&bM9?d>eclq>k@v-`7&Zx6!we{3hQf@>_i0NEP2!-%hc8yL`LpmFdf5ls&%h$shC` zB>#i&F!>|Cqf*j$%r(VuEtd$~jc}vM$GAnMm>cUBlOk@MTTc9LdAGb&aO2$sspwwd zR+KpRLiZwxaVxtQOBp=5I^wu>-Fo!B%)L@fw~^aOs=HUYSCPNk#b|m0+6XUlKZ&>kv7a7M*58VjFfbTyTh5?v+lEK z{+#<9^&{Pp)Zl9}*30h8j5W@Eh5W1TtK|Rg{#{DA6WxE1pX9zqezH56{1kTz=^O4F zQp=s{{!=QuZ@O@EJHwqp{Y-Zz>0*~_y63Yp;(B)@`AzO7 zES%wPA^(lbI_!Sye#=a^x!aiOc6U4VJKbGU#m#gx<$8CoyO$InS6X=9t>D}GiLw19 z{3WCuK5iK?{bl{-#OL|A)Z^WfHupCd+kb=q2C+OJm#uC6cTnHn-+}aQ|J|be9sM1V z*2&*V;{5me@1_1P{=blR@pqx`eg6B%-|z26`k?5~N2mOt5_%;F z+WsZ}&!mEXnSUAi&;4JZ3H~>E{BKF{{BJ4k`QPGt{>+{2sh>AHF$WBfj~wQV)-OEYD|ong5dVwoK>V z#5?>)%MAW)?W331-g$ZLmzT#MUoYjgM_yk0;^nm$?&5`)l2S%#qawcYg~o;W&58Km ze*EvIVj0&N*U4Q*GvjaM`{I%Nv`1bIk9;6CPvVzXGCaS$r1r~i(0+MI{PJ;l+pplA z7uVi-9NzgqB^tl{O>vF4jJKqNG0pf7`Gv+3scn3UzwT;(y@vMJtKhGvN?9Wvue~f@ z`#y=%p8AExaeQ=B`{*_C(c{Tiz*Db@r%wFf-6xPr+#zTroy;cq>*c)r1f-s)Zg}?z zhk!B@wFe_j0Acdw4`{wDdi@Y)^iwfnW# zUR`_be!TX7v2_mqdI{~XoA~Ps$S=lgucWucZL(O$cW*S;AIx8S>9 zfbYH?op+c!Xt@hN-qC)%$^DOm)c=4t?>7&d$FR(C^SC@{X5rZz+Oxk%d-evNeI2~; zy7>1O;osMjO4em~`4LtFs{tPW71kBhG_)F$_x%0{?e}v}6TiPWet&y$th=ncXw$*! zK>lv)ZanplL;?94(iM|=MUe*a5&$)3k=YmeVhp2z=J z?eQ1Y9)Bdh{$lRpEy351(7t|s?dw;@*Z+bUtxzlQ+P_p^5?!oRE2&w9-(Ld1Kb3qM z-oJzQzlQu;wU+!>>MQc=)H?F(715g7pf*tRwfdTxjl>D2juRquoM7lUp)zs8H&S11 zB~CDj6SmQEyV^m1r`k#VE~16Yb+q8u(Skv=u%DXmh!6}NA(-4h`+=H6>JT-Di4$rP zCmf;gQFW9WFItGy(L!e(EhG{x#K`@25n_g$ygP09uU_Qvkd7Q~)R9AT9XV9hk;8R5 za%in%hKF^`aGiZIF++15EnKIgh1NP+ctl4F-Mu?=a;bM`j+8rdq+YZTtD}WtI$DU; z(L#cb7Gia@P)tV)u{v5v&~ZXD9Vgtb#;S{)}e(Q(4H zI!n!T1e2* zLL(h5G|u}I zXrp6>TXoD(OvendI%Y`FF+;458H(wcAy&r>#dORNt7C=)9W%u0n4uUk!&YqT#SBe# z%+OfJ3<)}BXs2U_hB{_wr(=euI%a6BV}>hr%urRw3_XYwT)E8gJAP^9L=Y`prlW=X zbhK~_aY7M%_ka_?%P&ft(25A5IBmSx;1(Sl^whCIFC82F%_-xQk-j=YXs;uLemX*U zOh*XqoeE9`dEC1jihs}DQ0eX64V9MO-B405T4<}Ig)TZ;h|$qPY3B~-4!K#!45gj+ zPJ8lRyxZ@F)|RPe1JekiHqhdAPgl~RKEVYQgPRHBFo9Yq9)BG!nb zd{t)^SB;9alu^xT2hnEBwS2{p2Ed z0MUh`qYIyoF3RiZ!qU-2SshncI<7EuTw&_C!f?kESC~4kD5K*FTgMe;bX;NUxWd+P z#T7cPxJ1VlwRBu@iH<9p>9`_R#}&1>3%-asdU3_|I)0b&#~yLsopt4#UUP}2p z5P?+F5lFm_K%#X7Qr6$u-x--+1QJgKav!k-cj6hhhrfqhtz(aP9eYIU*dt0u9c6XY zVdkk}&`-3Ivw6BP_0_9&ra4@1WuO2;0NI`%Mh>=B`3k4XQE{uiYt z_wrw&mluZ^Iu0>)9AfzYK^$W0IHa;Q~eCLUVDAU-5T+xSdATvY|S0qrZ$ zK7n$=0FjD}Td6&a;6p7Rh+I4zmWotypedktmAPi++3*omETh9@> zB?i#{meGJc+mLTF5wb*X?Etfc3p+G{5x^X8$7Z*;fN_u_(l!>_z-XX+2jx2`-!T#x zuN`BwYXy|sQNA+@t_F0tbBRcMlM9U1X$-6rxiV;$if7n6V$332g6!j(xhrR9GwWw+LXY zzLfiJ6?wE0bb?8cA<{1a+QS%FA<}=B$P*$ma5yX%d6M}&iB3xp6UfNVZX@G+CX_I<)M_Hru=jdp!_uDVU&mUgc-0`pD`PpM4BhX<)f0zrDpQ{H+FiT{lg4Qq|80-0FFaZvTjKZFy%K>AJ zo&*^pFUCSs7$Wi#a>m{UV<1&z9Bp6OCNjPhbP}1+QDh?Zlj7hu7zHc&07d{>1LfD2 ziA<*7WX`qM7m2*lL1gM|k+-nrTiEg~Z28twk!db4?li`oHea|I1&J^k=r@Bg-eted ztOvbe7GRtAu+4ke;yuoR_cn;kIwJD^P>~Oq?}tsGkI3w?B6BK2XQ17jEh7I%`Tr>Y zALaj}JU0QD)7(jrA@Wf?pzBA-`DmTUJmxTu@;qeCV~!tlEPdPpI5s{;zxfK9!VsYR zZ_5A1z6%D4{HG0!0s4KiP-GF?7O@`|4Fl}67+WlEF0uqYKCJ{@fw_LVO=M{q$QD^v z1-ip@VBViI@6USycKDp~^4fr1zF@vzya`z%D;R$T`mNX~@+I59OajJVX#(S|oB+&i z)eu-BlHvgMDfCI9yqb2Y?Eo93uHZS)i6U#!eXaL80Op8%(49=(cVF zWQnYA0O+-TCS;0ir~+MJA~62fB>)?JJrc0TM)cU&1O~!}}|?EfEI543Qn9V5P{e43RxUfq7sa+1pTLKX(3Zo5=TUJ2VIu05T4v-(mDSjDCj? ziX5p7?L>~1gAO8D%stqWPX{#M9sut~t8uVS44e0T?ba{?_)LR?>*Lmkequ!OK6z9? zjOZg`#MBWZ&;a_tOvn_YXcg!JlYsl|vAjPV+ZILx@1+-ug61#?=7|x9?6`)|7iIzX zfQ$2qg5unhEIt{yhf$&ubb|4aE=H+^Vw7nmM!9lgl;?e$3dp_yeJ>aelq)*W48}mJ z7#Efh<0A4^QpBi=?p2v{RotlhrTu-~})5T~`xjA|@A14NW zfpG)&yOF*(b^z*b#1<`?e@ptbMAl8{)GAwyn;SxZm6{U4aDeLM~rSwAPE?!8{>4Z0nD!lvU?)07rOQC36p>^9w`O4 z!6;ZRMjr=|(PscG6r(R=JURn*iP5hzbQ7aL`}lF%Jx;sF2f|!9B*p;7AAsxuv>$Lx zj3?MvPxJ=LPtboL{Rei2iLg$LCrdyZzz$Dhha|>ILeC_|PMRY|vI|Xt`6SO1bL(yRQj*G1llz zFpcRY;+I~v; z)0Qw&jHTFo89FXQr)Bj2JRUm21V|TSc>r3A!L`HqVj9rq%bsGaswD>30)uOT!L`6x zeKnx-YINo|g$;gt%;0yD41PDj;2FV2S^{){agZto-|aWjTfi__2-#w+X$07F&1^u% z+B(n+rbDI}>!P5!80)dadhE9WT{p1rHjIIlVtnmD6F`rz(c^1mY($TZ*lr_zHqs}f z8<5W+zZso2GuO@Ifw^vph1P&wwyY52n+RwMNibK8t$oG#wjuO`55(9$5ax@qlR4~M zD#k7sngiusOT^fXKABx%3NYTD5@PJ7et#KgC&qz6Vtjv0jDrmT*#~C>I{(1De_-A} z%!IvS9I64GU?OZ3<8V1>FUAqZK9ViQ(L_Mj(FI~0n;^#V4nW&1T(#+t!l~OQOOg{5rI?Z9em@c2fiJ&erQ_Scoa7fG;w#AGE?ywiB1H*tj z?*Z}wWCv2jEXuoQMSH+pF=LS%+ZU)WRvUPStJqpG7UtS$mq8bxMd?59-$v^K$gOd=~5#^9nw_cvX~`*US*J z@i?GQlR0ouOs*s5_053!UXPuwKPG1L`p_HDvH1=$Z>R*FU_4--7V$v21-iD_D&~!q zp&OvXjoZX*SrNLxB-kS6O|-v>_BYeEHSdSqT1U*=M*@1ZYXd{XY|pm4TZ`EV{W_(J zd2aw(0`tFjk(iwY8j0DZg_!rD>wV~YAG&r$*RJT=mAQ4@CFcF;LL6q^KM~f6*^T|u zts|gIH*|TR5_E+(f%aTC%0%DWe#2^sIeZY%XLz=l&o+Vq zFc*%9IRY6YkTK!|I3VV8^?`XlM|30?DCVd#fK6YZ-J%sF;%&>$P5DPVNGefWEKC zLTfnK{iaO@`n{bY=Ja^LZtp|@y3FV(=DW=8 z-GgGjhd#5g)BEhZ4~B?28(DLDi1~kv`%yJ9=dmB=qtAc(iMg<)n8Yc(bqpKCT-sjD zWo^X#ysemDbQAN-Mq;j{UrH%4S04~F4O!{6#ay>s%ni)#YjoH+UCfMaVs2*4t;pV1 zMa&)8X&3X!q`WTyc8Pg#w3vs+iFqVe%%f~SIzYUyN}0!z_2WD-gM9AK=nKchvPQyE zv7BmR`5dv_Wnx8qAXao0m@QU-PcFtD6f17JSjG9gV@c{u^3Fi%wPKZ>BUX9j#K%EP z;PW2w^MLjhYCsp50I6bKFkh^SZ$gGx7nXw7FbrsaA^KFJeWl*On3cAQbx}F7E{+nb zN|snv(XDD{K$og3;E-6=;-M8FquK(os+-UdkWqaaY!RzQ8E6B;VIeTznsuN%Oonx0 z)rtk=)S^wTC1PEIoJ$%3^SWdv>=LVXW#|MGAYH6W>2qm|Q>lmk^(KmS8TwqdOsx9T z0Xtp6T(9T=^t}?@xld;$CWzG-8I7~WYBEBsYnuZ$YKjicCW+OYaa)c8+TTRGn;7e+ zL4dt)LT0NvkR{eFRiHOu+cvFW9AKMUZ-ZH4-4=i*V%^?AthUVe4)*UIt;K4GUD|CE z>(2Vn6Q;sOvD%k_X3!sI0CjgU{$14F)fYC1)d6`OXy1W(+|8Wt?giMtqXPDK$2Y~g zhjH&|1I*2%J)fIj3uLw)T>PDLfdO)^V-9HeEdtlas6U6FKTdcn_UQguoVw~Qz@7*3o1N-I? z6R^o6$a-W7a2)iB1@=RqB$y3*#rhk4{)XJXygrH^k8TjFAM*M;VhzA11G|fr#C!+Q ze=vQXS}NAiBVr9h)-waddX{!0`iM1>Hlq??hgdIc5oBZ0YP91v@BC1?k<-%P*F*m_F>Aa~0Um;<}S`lcMT0_OeA zEMT0iae(c&GOw-Ef%$$*-M7?z+aJ*HTk5u<$F`=>7qIg-^xYl>iO?Ic@%B`)b~w-g zy2AumA$&*$>OfauE<35)RYt7cEyT*C-<|}({`=~Q^&R;Gqs98ZqgV%N^TQai4kw9q zGy+D5bv#9^9~X!fYywL~Nky0}3ZGJ$twmXUlg;WbN{tm|_l9Y(MU+zp+5n$qbQZ!f zQNB7rTOa*>>qNP+&;ka*TsR=g&!^q}onRcS6cypZJW-MH&>5z}E>TepVH9i;6S7b>19Q80BBY4oJsVXO9jo?)NpMIM>%FQrRaEtQFhW!f z#;LJPR88t@Vo%m|RTFtNvqjZnj9Rq{pcD1zTWdb-2yrPoTzWMOgjuj(RGn&~>N3a6 zkX3(*sLPv*YCygLeHwNXbtSKjXn)mhqOPXxHR#s3vZyB5zA3NGR*Jfzzo-_>^TxTN zZmKV;)k0CNhlsirIk$HZb;l@J0F2j;KJD7VP?!zdMBNz+O`tbS1m=6^F;VRquYC`g z0rb7A4onl(p%Em(B2jm{&=Qu2>PUUZnWFBY-#sfub)ql+uex`usLt%mzaYO$swn(k z)wL_2)BP<(bsH|K`vFlsW{K+AOVmTuJ=7P*^44Q(zy=Sm74-;UCKGer$X$HDX;f*qf#4C$hVjuiEDYf-~!H;i$g87*o! zGM;5WKi5vw$Z@a~GDSU4pXWOPHhv!cN5w#M7zFd+kf;}`0Q$T@n-^#^nl__pGnzJ| z=fe?EFQWg8*yP2jkRfVJIbggo!(bs~iF&C%pvOzo0K1Ki2lN?>eq)*Y%jo-ZBJ_t3 zV6Uigj6IIA$4vm{_DTR+!U&+x_y|Ck@k2#TK$i*GqW<0p`oeVBCTb$%aBWZ%$3lvz ze<-LA-GDj$13CYo-=x~WSd+%U5@24hRfG1xc&{PifQhhD)SCk6 z{U&v9P6GDDTeO?j2atu&t=>i!*EscdFQCoa^qX!1`)7Jjm;rl5y;B9c0J7g%Cu&9s zXagf*nW%RiXaa*^zNnel?Y&aaUDW$cVK^ZB!*Zf#cN6umrK0AJhP9$TstwrTBjz!W zKJ(fG`)?lGKBoLJ<9tlpkC8b)3OWLM%-<;L-)#SPI~WIRMJ=Gbfc6V$zkv4ti3M!> zAIAR=`)DD0FKi3!vxV5>lNjg(%>9!LQHv6Q@fWdu5!)BX!EL}i7qfi{+n02L@sKI% z(;CnfrovWHOXHyfuzl$|QJ>NOvsOU=&#==n^j}7K8Tu?+F6#3bXbtH5`3h0XDKBpU zY+sJOzChnE(B})}eX&c_iiXe^X2D)jU$S4m#GYSH2JEpC{Z`U`^ z>7r7Qm(mtS!7@>+*>9`aZ>t%9HS$Y`TFt zZD39tHj4VX9JB*;`g*OXjb(uH#<76To7l&jTEPfFhYS-M0Wvd?xfvZcHv`7pya3Q) zOKs>5Q(=p!Z{h)&-;9A2QCpF@wGE60#=z%M-`)mn|CaJL?69pZpyRe3qPACp&M*`|~pR1g`#nPLmyWsvcZ zDz-6BY>Vd^SWO`b7Kp6`8bV)~1$)J|t3g+o3>(G9|FWI7Fcwn8_VMalF18zhRxnj; z{{UDdb_8QaAS0?WAR`JH(V1e$R1rHc5@-|qf!J|;(mIaU679t{iUJRTHsqP8a)@ zQDWbEHK5n+rr2%Ah~2ImAmh$1V&Bz4?7J(9eNQ{FJJ%BXFX(EGDE*1OndSVYi_JD0&?EQtXk8{k(|%LS?{)FE#?^^kS;mV}m9$#yqAmhqqm5 z27_Qe91wdtHkjT6Cc+l6-${TDK;L&ZiajG9D9@lgV~5x?ne!~#&SK2>(FOIw%D7{Z!>n;ioL#N{BLQ$4c)i(0%Wi@*{n_WcE;Jx z_}kZsy@TUm2lcy{=PugpLXX|-pWTeRoAU1cVrQa9<~X1|>yW*NvG+`Zm16HTfid=W zg~@u2Sg#3|% zVjnvu_HlH{Y9#iLjP)bB{x}2pHiz)Jd}E$Cyhq@eTf|XSpo=*6BypSp;`m0waz20Q zKqBxRp$Ld<1ic_doalPMr}krpic^Gq5%PiCV81v;`OZdB%CXgeeC#N3iZMp9iQ>dH z1U~s+yr(!N7Qr!bO47e%A8|_chiq|5)4mMfqA!DfWt+nWamq1HIkuJW3rEC>?+3hA z;59)YQ=Ez|#i`eQ=^cJTJHo0$xIQP@<0pxUFA|@|K9X@q_sGGm|Gv__BZtI+ff|u#XFCVhXdmDL;rpo#d(Z5KgPI^%@(KsesP{? zF3ywamQ)egUmEI@=ZQ0z`oU9SB^(oH2sRmVNSvpb&r{fCSS*Z&`Qkj2F3xad3~wdQ zv)E_^@}I*#&mnVU3z!J#F{*<&FEFkN04P9UyFz-#Y;j=f+rm5m&Fh2LfoD7bwEsi+fpu<-7 z%{JPwE;>7CvvY_zyJLVpyOFbdk~oa; za4Jc*l#90v1(l~oX@LFr_mMvd4 z4mQYMk(3x5XgCh(;9>`M9{ur${ofxrvg5m5`(*oe$B}&~b|B+;FsODPkfXuiQCb~~ zEo1W?e8cwra9H*oC>|?Ox>eemH5+M_wr5Y;U_JYY1QDKjDSXp|R|9w-%P>r%#D~V! z?1EIL^Hh>iLo4=FlIdeOUXCO0sgV5&ln@h=us$T=uX!YFIb8yGY){qUs*r?iPr@M2 zGK~CrNP)Fp_rhneJJj0>jekXW{DUC@-<=%a71Q1_kT?(c`XA3CAG%N}qOc(xd`k4VtRs<#8M{Sr?Cud6%}vIheg z<%Huncu$fi#K?*=E(z@eZwAD9oGm>0kiE!zr;=pt{}BO0ldPEf2pF0qI}>GSl4nNh z-d~FdUJreVB11z`ytBpA#M=)pnz#w3J`^tX42d2Wd3rRKnMj~C&(nd|O($kj;E}K~ zv?mUPOc5P6MRv#(MNhY6M8e4dT$12Iu?zP28%-) zoYqwGXh_0=lM?bjD>6d+QlHXE z#-4EZPeR>)^t$V_I<#_!&I)e@_vajo)`tckB8q;+s3cOvJ0m#UWA+&vL4AUNEVm+nlZ>qmJNfGBrzGQB(rl0U$MUm zS$1#8vToS2Ipri1*G+|I$~;WyZ!E69I7*veWWyUE23wuSaY0SYPN#&#N57Cnqm`DJDASF-$bXd*QWGjljXY9gu+>SacW9~J`r}EhZgyc56K-%Py>s`T5%I&4)UX&yE3axF5mTUI&w7oe;>I5zI z*^tzt6{UP0si$gts?>&B+tpfXqDK9(bQjFvk%B-gz_;&SAAM{ZP}eQwTPgJmD2 z?d5~bWu?jgh5YWp=6x&Cv=RAjgRQ#zr6Rf5!B(XUq&B(3Ir}{<(bF`16q$=XiT!DH z4HC=dNIX6`QAQcdk$FE`(g#}?@{}fW5rZvtkp#&3!pBm=>3*F)u+Q2b`MDmE_U&@c zEYG7N4)GMwUK2h|V_CwvbwnBmIm<9L_1Zf%8b=ukp|h(Rmc9ZBd>TWlaTN3EGbx$D zU%nB_usjh;<6zfd*O+zd3JO$)6&R=l`XtJ7v)ft4E5izoSG216#w^3~#4IKE^OtYj z;|n7tpcjkaU~Ah} zIiL?0eO0oNc&;mFfmismKEeu=aMtlxe%EKU_gilPPG$jCG2eSDa55{fb+}|`2~K7S zR#~OIH8`0ySQU%)7U5(TVQq=L61`}`*!!)w2q$wYBGFrglUXyZ67@sNa5Bp<61{ae znH5vn%3FwY;)(#44Y02Q?5hCBuqUr^uv_q+oVC~!+BkSsuwu??>`9hK6Xj(iv?6=L z8_RTAs#l{|Ln91zHqYNiy@L1WTmeEeXc(-Jvs#B{Fj*GntkWr?>^1MOtm;L3h_$TryE$v@?jgo~D--1@bk_^_5LI{o&7qZh zh^lnKg`uT;h}JwT*W0@%8t!3|q*6w6Ej@3t@V`%kGX7yGVUgIs_$t>SW zS>F216UK7Z@BAZ(!=VV`+mL&ez*Sn`7bq8UuY#V7r}Zk&e5-KOU4mZaIqJ%D)D;P> znp<=3>v(>VFMLmjyE}Pq=OlS9(`hT@kOFVwuxC%ddBeRKeaq%x=$4K4y?o)@HEh&( zYWQAxXtEO-DG6oDByhSmobY1ocs+OBP79WUz~ zIg*tn1|uImnw@>F@ z!tStyBOwX9PD;rC+VPC%WzQk;j6~M(k_KU~yAAu*dnz=aPn0(7*IU`IRkwY+f7cP8 zQh~V0$dYjpk^W;lzg@R$i}FWCWbIzRUSf*+{gDTcZ;(yNgQNZaEaeLxF%BP#DCT4* z`;Q(yy2p))G);4mlYRVX1P8<5s`cs}UAOS#k3L$mV%f4U{`2AN4?o%*7!~nj-A0YC zxUj@KuU2b(L*r|2D(%jZEl3QMFHuHi?)iTIww;F$9!)Ox^wUqj`;A=OuwlccOP79Q z$K3qbV~-6kHhp@0jhi}m>U?L@x^-LLbML*KZcMx>Q6?CNrZou;3_c!w)O!Bo4UZ?v zaPynb#x)7{2|g5jKs~j*o0Ll)92F5`#})HOx_--a)u1Nws`nq=wQWOV-MV%4>eVZ= zZ}odkf)9BAgR5p_q>K3cqZe(Ih*`}Xe0*u6gc`+a-UK3lhI*W!OI&&=Gl zY1vNByTQ?i538)>$~C!1eBALR`&~?RC zRz;;qTU0=#cS47R8c1)G-g}+NWS;MTpF2Ym$Uw6DzTfZrGQZ68Ofvs-?{n|D=bn4c zx#wOjC@64tc2-#1t9YKb$tt_1s0d+eD7)<}xRBY-BGA5IH*jAdEO*jOu8rVRHHztL z@1X0p-fJ-LLTc|AQ_MvWZciY!c1SW|?ch2s+{~WUn+$w(EAswAZa(225s>o*S<|;<2Mnk^eyiz|MKI*r!SoNWobY^`F!%1lnGlqp!78}|GV$GaA1CFc6MLs5*xI}Iq_Vxcu^5V2fV;+tttGLnH zI8v$;ZP`?ypGoB_UT+`}X3OA)(1g5{i#b)rMOEcF8I^+=PNKN9GQAW-rs782z=oDs zY--*a$3Hk)<6Q3%JL;Z6j5N_yOSWEGQ&ZE>I(1N^&MtSQ;6o|+kXbtlshlmQZ-Mkp zuy|N>Oj{W|Vm^2su~Fc0F~NX>tj78+x}~b3qN3g;63d;OoSnTq+})hS-AxrmMMZ^h za=Ey=`?$I~Da52v7iF=?)k8D1t}R-$`0?oO+Or!!|NQg+TrO5koBzTKS8|&dQ8l2v54@LJ(=UxY`+!G9&a@W+qdjw3^LI;mBx_VaPpOFJu4_U>Ihi~ke< z4F9O`dPDrZQ8j%Q`2-#T`r1tXDShq12cLf^B5Bx}v^+@3%!{W^9y@gCz`>oncJDcK z@(%IGymB0d5p^J;c+JRqt)Ej zZq^5Mn@#Wvb~MxFWs8ZoNHiJ^nqDd`&V)NwkY?|rhyRKmMxK1~K6?01=;0||K0az! zjm*WxxuV3ZadjaB@9{}3*3OQO4rn+$TA#DF444C^JofaHZl%{woz1+SdLgSI``REl z!c{!lTPXFlS6+JQrAJ4L2g@P}{3{TozYo~ash<4AjM0Abi1>tA{~g=7?p>SV>guW> zNy|R@Z5h;{PrG)UDSS;p6E(7K> z_wu5W?5tAKSk_`$P8$`nHbZ!WMkanQihRo ziw=AnUcvC?48Mcn-5GxT0Pvm+KZ47o`0 zU@hkaKdv~8Hi4EnC1JjQP-qA(aZA#QdD<9S(uUh=+933nwdpgU`6A>3b`_ClAQ!Ns zlZ-6G&md%hVc_C_WZ=T`t%Dfw%LX|GxF;A@Z~S-T`Q!g?8W>3pm4g_vhJnuoGKg`_ zz{lmsgP9NPGydD?cRJ$>D>Jjq&BMdPnd|ImX=rGuFRQ4mYV7D%dieSI1-P19>uM@X zpuaSC&>0_+rn#fFjm>CIH)ph#c5`dr8Eu%#TA7`lT~N_tu4+&W3HjoSm1{4wN{7#d z1b-mhwPNr0Uw#?p%{O*{b@dXto0pqHFZUZ6ifJW%lWmTdt~%Q4HTie8`CtBQ45Xbg zqcTbD6%qgBlTXf#8x!qXla*;qAVnZ zKkX?KCVi2$xuU97FID(WeDJ~7W+4;$X4@qHwfL2g_LH;t4{S=nGq5A9-n)0hp1h=C zsmXhG?b>zl^rg#b85x4X}}t6VW1tDT*Kg2N($JYBi=diDw3d&7@@=0WR(;fi@^<*7FXxelIEuBlF~9`PF`MlIZ4*(B%Pg|=5C!>qEIOG z5}5uaq7?z>ORnZL4}bc_R~Ozhl&|qr)MR92q@3KeY5aKVFjXyEwroSTDEhgjOPA^; zB8f!AuaHJp$;Z`vmI&SJDgJSOiPJB|munQB(Gw<2932x+^*-^oZP?qdOJLn3dBf9E z`eg#^BZG*ya-RgWmz37mmK7Ehr0hRbTyi5lucWxBy0yC6Xe1|4QB+VS)`=~suwKIR zR*Q8-!0doP|L|~sZx2#i?jInL`b0-ZPrffQQllZmP*Fg9d`Mlb05O}aBFPG2E_x8{ zv~G2zv}9JzXO-_|YP%)~SP zo?igPWvP63QDuGcsqM>Q+v0xs;y$RV&kL`4SRIbwM~IFbf!XVWK1;oRTf5i7?*5*I z9cd)Lkt}#maS4kfJ*NvBzbdqN70qz0OyYIqSl+w@Y?G8N@ zm2*BNtHM}VQJQt5{EofVdvVrp&)L^vVYao{9Xqo3>Z{*QWwW6@N~*Vppa+k$C@Cp3 zzv2K0s#pYorEr-CHt9#dkb)P5<$}@02!(bp`HSdk?ZCe?eOf4jn1@s3L&8qLuEW&E0C3X-Vc{33s)D3nknYm_-Q}c!E1exL1eS zqq+JkmBi>sweDkanJc-0%X1y%1zaji$WORr^FkeHG@}w@kH)guYpg^&Yq?}FKCPFi zf$X6d5qX4OPLRX6aKpf>Nw}eS74S5YjK%_>LEz;DUX=W`KmhS#2)o}yKqmlnqCh7AbOJ!frMx^mB0oPMurSxppIm`Sm;R{LCR38j z`0*z*4t;ktZRb}w^!D9GNg-`SK*mwr%q!zaB?E7d{_F1l}Br3Zb>WM6tMB0C8LzL8u?z?xTCl7=9ANr!ssq!z%}X4`BFC zhTp*O!x?_T0PtfNUcvCk7+%Wo9s|HX#PClud;-HCV)%{$;7tr4$?)qK-oo%;y1TYt z6vIzv_>Bzj%J8Zle1GFbE1C)pLqC1Uz;z6q!@%VfeCKsjAS>I%;I_7pVet7qW!rh@ zw)ojMd$(O2|3@vWKsKX*sIj{~y)UD1PoLJSWcX)#@b;(gT?4HskUjkv0UXEAvK8;b z;G(-ff4_ZykinaKO1D41UEbW3>vzJp_uw7LX7?*pN_ON?DfxX2m6Fjsm6ET;*`=f- zWXKgRig2!Z9OeW|43+94o=YX1B90r!<^w02j6)^|LB>&kv{oEwLX+ue$gg+|<=ACf zG@FPrEySD)=t{6ZDx}fqgapI{|Lq=YtvDbU z{cj6t?2Jbf2qBH)4L_n8SU}@&XOI#C8pR8I(Hu0Ok#W=zaMTdYcF~}S^DFTyB5q}+ z#bvk5c*}ZbJhu^}5A3^T)?3yy>lyl=_Lki`?YSA@HoIl&Th=r6xzPpCEz{q!p6QR( z-ac1{qV_lE>Xw#$do7k@N6psc<7gNs zrl$V+&q*$S`&(A?$sexQ?)hg*8QM)R%+2CGdmemHsCbggkRgpt-X02-x2L5Wtw~jY zjt;ApssN)$2Zx6Qc>9NVIcdJP{S{pjdW62k&^MmG#o5+tRF|O< zfPG?wj+@En&=6-LPVW!geNb!e$0r&Ma+LoQ6qpKpLXePJLtJo&AfXO7G6{F~m;_3b zMmtj3p(3_u$35wU#>gSs@tm&^frMC_IjQ6w{0*^=0ZqgocZhY2jUaF0?oP3e;xpM~ z6~2S8iK9V?KM*1lpGO2CS%G`RI{MWUU)&)oQ4>lY)v;O39_vV-v?qG-RUvld9)VgM zKb1z;@IC@X8s~@D8gfXW8AsL&Q6t|nx{@$5|BiH@6zE3S=#HZiDZHzV?hzV~LdtD) ze-!AhXLMsi$s2d1`=&tmUK`zsLVamuABTv)jY54d+30>D&|S&swuO-igP=P}sIP~O z?j@nV9X7fv1iH%^-P+KbQJ+++wsR8I@TgWCM?S*|XV2(9rg09Gkro2hX^7+0n0ey3 z$@o-n5UW%~NxzbSnM992h1ev<{|(-hqLws*t*QBg@w zsw&AzqZv0cBzjiD&`9zg{#;n7R=2b;Sv48;nv8lqih3cpDSlxo%FoXi%bPGI~KGiOQ4thgM12yK3?1h@93n9qnewW zO}8{dKvh-MspIKIMq_nVWl2$Sai3<<%ESbb79w6J<-4dG>Sj9y_ylOoa%XQZb;R(P zsG)xHKJDhp`}qe21x1B4Hir4C)oS_8hGcV-LtQ0gzpyr&j{bs{zn)#3&_Vd5K!@1Y zX1zHCY8m5@+}!>9N(+Dbsjy7o5R=Jh%+5}7I(|IT{hs-w0umk(IArU`?(cy{M(E}Xq*$*&_K5$ElbiVOUpV;a)r5FU=TST(Z?sr>6KR!<~;fE z=y8ueA}~m3XmhiRi_QAD5FA2%3vYo#kja)j0S-|#H`l_e+|(`Bg@`0liF-hxhm2U7 zD&SQ%HaEg_?5e`%M;Ym8B1O7k7K_4NT zf9h;XdI9{)B{wq8A3V7K$k7uglFwhyBI|X-B$Ceh#-?@?Jj}e*$<@_aWJU|MwYIl5 zpvB}8nbit2tJ%cE$J`Fa4{I$eEUj(pvY7MIvx{qLs+xHm`r|Gpb%g~rg~jb{ZRjGH zTy)a5`o@;d?xX;V$z*PCfswhsuDP=j8kdrEx3$3!VCv#+J??0|pef=w!kulxY&(5lRm~@B$WO7t1rL& z@}pxoy=1TqHzaM>man##n#1FU2TEJiv9b63_Z%qBUzc2?=S)pA`S;-0fXL^UidL+w z_^&u~TF0Rs2hJQjdM-KnM0s`k(PJkD#i~>9;hdadR~PeEIvwzw>mYd7OeGJFBULtVKm-k;&48GbjzH!*x=58iG?>21fLcI+Zr&lUz&GjJ^f zmr<}VJ{>;0D=VAI)ShQ|X7KSnWjn;17(SHYb6N2>7`|cv_!$hJ$nf(R{s)Fn@4+vE zd|3kdvJ~=V5#-AYkT0VoT@97xWmU}fz_(DiJjp3yZL-ZK^h_U5Cex2y*5S zujA|_mm}P21fpD=NM}bkosTT-_W8(!c}qs)dGMo_SrOyYP3NVgQC40Z=AmaoocVpNi%0orQ2fOl&;ue zBtgGRgcoNIqMTQW9~~#z9nXKrFHxVsq;fj5_{OO!>yxu-bh;E#q*8>d$}Fvzp?l9! z?}<){rY9o!Pl$n%zo$5SIOlRktxhtLPF6{yPNZby7aH@fUpRf{RPwRpUE6l-J8>$b zth}_S#CRh&b^pHazFEEX;OT>ia5;4B7+Iqmfk~r{AUu{%RuYwcvXW~=t!5!?%*CbI z46h&&(@867R6%7s5lOn6D@vZ z#+puAogE?ZX0#l4`6CCQMtdN{OVasi8wB5!305X|s-IF1O;InTvC$Ggs@H zzB5-6I14$v$VPBh5;#i_&YE#e?XMB5oH&I-UR==Pq>>Mk)xc}__A`AAhWTdPHiHe4 z8^MmZPhVQy`+wPc^6;V4M~@yW zsYp9?_}Cy>8@zpQKeO9ln0>}=6Wk!VAMD@HG&k7s;80!%Fkfa3sigA$Fquj(Jo6TY zkdV91RVIe_WB3CMf0f~z27r$$7UtS9Ox_=X}=qwt8FUQaJ0~{{8$1VY{N)P@4Oo0~k7? z2i?1Gq-fil79}TEI(F{R>i$Mc*E4i`58Ba8BRWWZDpMOd_;h~7vWIZp=c!Lc;(J%0 ziWe)FW943CPur|#z78#s3DD92;sJ>IUF`qd7e8?xCmcv8f!1Q&nyhwGzQ#c z-)H7_9c$?uH&RkEGA>@s$Vg2A4=%HFl-t*v{MEj`pK2pVuU|iU%BoMvDV=z|Lb3iP zYthY)o5VS#Sy}O92R0a-IS@}iC!cc%evT(Uk{w!Dmc`GGAOGx4PIIemXsT^&G@I+| zJ3H&^O=gmf@@(AZ78X7+dc3rB0+_P zg;He;BAW^djb#<(_~|o3Zns$3A28^?@sr{b#!pXt=*ZrKox|>Z;FckDyUofzrsDX& zYrfuipoljr{9Ig+9R`v!9 zGIclSRn>QPG|F6@mGZ8tTZZfHHmkm^+5h-0CBIV67r|RW$M1()>QK{)ib)#1Sg^a2 zjbA&u?@FYEjsCwgHcn@3T*BD6o3gPWnJF)Zq8~@}=0zH@+NbY!_Tt53X?b({bWed! z%f0oM!QLa%cb2+^!gKX&@`zHC!cFy}jI_U!pPx3K3twD;F*T^{4^d+4bP$B$jE zBego2rMosSZ`w5a&^oSkUjqMk`ckYqnim{gT3FX@khEUewCdmZJC*Tv!LM5V*^95f zGtqQo(+VAuf5~P)@x)Aj;*F|!kDjn|Cv6rT7qujjnhNpyLbiCq)vHkvZb`virkaAh z+zRZ+sjS49s;G9hwskAzVlV$7jg^>nGKCzORJyth;7FZICya?cUdqnmV&dr`Hv|U< zDVmz=!w`iR8Qt2fQac5OMUNOcYW&P;_aFiPLwFoZmxWYU8^D!17k3` zS!~{i{yhZ;bxAt#7v9wL?L2++)v+}I^_ki|Aa=i(2);BhGG>OEH@#`k;Zu({Eft@b zH;v}lVBR$9DYARhDAilNX(D*j;G4s@m_Kbl?wCJqKJJ)5tps<>pEetJc7NKf)P%VQ z{xmpf@Ga(1+m1WzTZcz&3GSFbtq6C_pB8N!N4@=N*qbZNVsSk4r_pRBG|LC`r^N{L z?EbW!0?jzGUQ|zBVN~Hw!;U{;boNh`dDGN3s?3`fZllV)X(=|UKZ5G-7*+Vv67EQq z`O>0nRGBX=(ngi}(tfj1{Q^`MFsksRxeS6T^Q6%-IC$uoCoR`Tbp@zC%BaGRCb3bq z_fT(-tY{D2Do*=*68xuru*lP_A#`^!@`jML_=LP6QH=<;6Y_>23rPs>dgl$H9Au|R zV>v{pD8-NPE%f=h0D&US8giKKJ0}hfXwvieW)Xe$JN*VOqieo$(???QZw*-D%v?-T8E1 zMDJJ!@t&XA5EJr4oXhq_3(@=#k~xZ0xPFD~5CM@*qy)Ff4iP&hg3QLPEjz@m6lsf# z&m3mMjPCcB9K8}mwgbQ!B+m!afhJ^!@QbL2HBHD%pz#PL(e_9|d*%b0mw@`NDLrbS zc~cHNKk2E>Ndh&}fm>;9XK`WM($PlupDy{(gu!M@aKW`&-Xxt01P{(hnEv@|MtFOh!=KJ8o^`4!|V zVJr&dYa2o_7<*-epuEv1BgCGc*>KchI7-fC)A6WdFeC*jXXg()RT_aRK6Q`{Nr6AkWz#W9vq4}?ivGd|rH~B5Cyb6tnhZizSu@n2 z!$cv_>|ZBpXF8EP(}|LqPUOUNBJoX~i2Cl4Jrq8B#14bMXnmwYeExpja;Xq?pQT%V zGl)JGe-wh^Q*a}Mx=++CXJeXD>K$84=NB!vAF}369aOk1a3e0U zS(kY*Pi70rqJVr7OD~;ZK3rVLMEkm|_sWV&rkg7(8N*u{K8xYo7+%>6kJ{6mO?@I) zR3<{_Xx<-d%nmD{C!ot1f-txY#x1K)>pL+EC8hm1UeirVSo<9hbTNDqAVovkbD8f|F3sIKoy6 z)i$bhr9fK-b;U~I#XHuOtrUjh+bnk>jS^&%KF$_(cg;qXt`umH8>nKXFnkbH*-F74 zBgj??T{fz8r7(|C#Y(|$1+n+IU9YEECD}@WJ{Mal&>rtuDICKmwo;(me34-gD~0K} zbF@+rC<-furT7+GDNu?%D}^e2Vk-r@LPUPS%Aq6}cX#FL0ZL+rqhf$FfupqwO!i_$ zG$}<@u~un~<5gNAcNbf$*bVy6g>WHvTdVvpV_w>?w7Qe*ao8RMloF9BF%lY1EXK9n z5-T-#uHV>YMutT`fm{7`JU@ev*T(Uqby{&}r$V63SgO2<{iSXFz1rqeC7g_^~BUgQqq*|jY-mWz|e2#Yeb0A+UdyZWDa~x&Q(cJSK4ykOiQ)#hN@w8JZ zXH<}T;jV3Z&`xEKoeIqvCba1gMrGjusGPG?Nwrg%W2a*4UrP@a+ODdQ+j6#(YDNTH z;>D=MBgS(%&D*|%)&y4*B_i~V!&dpmUXx;bO)Bj*X=Kkcyr(8~^wM?Kt^68~#6=ix zlu23tJZ7hn!f3Sg&~R9jGxnNLi=)8vwRS3EM#XsmR1Vpx?6p(5*+0RI%7OtANB$6z%ZgV0JLwdh`2|M;6aD4myY%lx|@<|HfCXrc_Fv)CEK$Kf6spCiG+ zumlO(n17S7D*)Qq)Clqy{H?b()|;ZBjr|65lC=omLgwEibbrA_+(RFmM$Iy(a1VXV zw_cdypp68ElILu^+`r~DkJ@8|tejNGVR@8y3l!rpLST&HRvnO0~QtjOxTt^1&UcE)=MKYNI-VT8^y0+NkP; zx+-i`SAyy@j4Bs)7khADsw_kGP8-!xVwZ;I+S=OUVq;NVT}jD) zy$Z3hX5TO4&xNM&19l2OvlA|B)K@VN}`HXDRVBWK;V8;OL=)?LuIZ`fw*d!94i*F&QGBZ z4h|j?8t&%{H=d7=r&=oJf&v5GP=Y%`{CH00i5weVKE5HLp`pIcI*|pb;DmYEp*}2O z)(Q=t658HbP{JjGy;o317*F4FZFfR#2akwF8FZJ-Ec8BtEbuEL-UU^a6{W@1IXTFnlA8lJX>m?Q zHgXPVC81u(YO3QTV!f!t!s$3Umn5r8Nz2YJ4oQH@zLft!F@F5U^*?Of*mWbG|A>DM z1MGF3^v&y+&zwA`<2)65QSDK)j+>d1B#VDw=IHT{??^yo;s){wJi0TH<7PW{Dtrp- zdFo5?geS+eab!1!z%!C^tJkE(I_{T4`Z#ion^%oLE<%zh@5=mw!s60Aq#!jKYicX2 zYf5Vy>*$;$DXcMzq&m*htrsaJypA*RN#2_1k@5HVy9WmPYBT|XA^riLfdN6VsY?A@MiASg@o$-((t&S^WG@R`;csq?sMgs%cPJ>Dqdrpa7s|T)o57j!n z>7fkJI@0kNRPz^=QnQzv8Y+kI6pqlN_aJ)*8E-=)UATZ=UrIB0nADst$EiQcjs}m7 z2aoxK$A*H(+;elUU(d@+yLLJA%B3q+#n;o2MdP0w&nKzcm7@a3kKgoN&ZL#_Ny
  • YkcmBK%&UtRej3lY2>A>}$5sjk-yG+@aKn{^gazvCd>b*Fk1>gL3 zGc@9{#{|s2eMzqENLhpv*|>;+@S)*K4X?wt(0@XTB)JFr1cwI(j-4P58D<&g+7dgK z1fa}L;2bGVK~}i zBGC7tEfB1WUu2RlE6dHvDJd(-%gWEm&c2ael$)Ike$qRcOkxS?=HyC+0)tH_bNjb%`N;N{`A(;R2?_gDT(`8 z*njptStH8h;^L7a^`Q9u=YRiUJSQQYBFnR{&`o!7$i%IWCu_(W$+;qqYe>YJ;T8Tq zem*2zJpT~?N|G0%cWUdeU8^!Tl-1Q$wKo>j)Yg(qsHa@s)W%DAlc}>)tm8UONnY+6 z|G+?v+TB0cHy|h=EX)&WjL1mgtKHp1%$qvQofwpz9k#iQN-n!qR*H5JWaH(wsZx?6>TX;lub^1Cpn3LJ{T3`Xs<8N=YeJP9IOYn zu#q3QnK1@^ykH`b(}$`oC)e=}g(3;t#v(^d9t(-A=fc8RIrcd~igM&A=cd=E@}v;W z7Y_0tYe}gA3{VC_xG1Se?>htZM$<^olk|Ho4BAAYJcA+=wT)Noa|g{{V=q@LvNGgp z!Q&Q3zN1hDIYy~D_mpbaDC8l1tOcCtB*r^;znOz?2(MK{!F6A!gIzPP&-#M;Bq@hG z;Y&_vSEOBtMZsFmnRpbgpj)=^z1 zoP_k?UbEL_BI@$}bt*bK=RtJzNFK@;w?0SvNe3slzgh)}1$SiSuAX5xC3V`n_Yo#4!@YDj@^V5V%N@0f?h( z9&u$c&EW&p?xsDKYMxZuQY^$Nera^?$>R{zAL^EA#pW{(Jc3!?QEA3j^wZe1fk>u_A)k~K$uiwZi zG+r&r$;rBrh5)eAva%Ag0i&&~qQ1*LVswD0t*O!A6Rx z;Y?_fHQY1YaW2P@Xsxr>^8v_`zbz%@w?yPbpJ3&!b-Dy=9WUn>ig%=>>`a6-4Yf8} z>yl!PMeW#1SPf06rm?A^v6#2E;&7CN&`sq9s(%kQNFN#6#?QMt@^YE4l<$MhzFSVkfa;O{`et z`oRC_h>>E%+^#JPp1FEDYb>E}LR#t}I@V7AZy=_iR`i5{$I%v3Fpj5S90!$`U&+bM zymt922Jz+0OP4ANDoTx2#l?u-%q3fOdSyes$#0TJb6L?K@Md&=H420(>qS%d*o>pSy7J%2)5b_wA&p@Cgqrcz)6I z&tQ~kNvKYLKlc`wzy%=lUN&AgkYC|x?k&Up)~nVIJ_zJ}@QUKytR>dVNkK+qYkgf) zXIE{_mu4rKbGMW8Gb9npJsUD0Pq~cKf>?_8D7Qk<{rGG z@!q+Wj`w*(>3H8CO2@lAn~rydpMAVLLJr?bHG{VVnTJV$~)B-_H&|UxUVnk{&A{T;B7Ggw>yge_(9Y^TxarcgfY~byg zBL*;pZ;#F&%$U9{>x7hLofyDCcNoz>7y&Ty_AC{56bQHP0&pZDM&6#s;*JvI_FV#w zL`YxW_)BlzICbh&Rb6K4#fv9ToxYHol5!X6JqX@-`}sw0EP4L*KfGUFSQ7U5Ywr$7 zy#H^!;kJD3FQ+pOZ~f1zH~#YWu#!s~2PFUh&#;A9EY8nv=rWrnVo`UO#d4QYQZW_EUZTH4VomoDa~!%TKPBQw9s z)L_iGUT_IgU#C>N$VK8)#_~$5L3y>bvc=<(iC$vZGok`3ex zxj@c|wXmGU(@hWZq_2MbdDGXcf1x3^O%T4&mwmVh9RKpGFn1r}IB3g$hD1!ckvtCf z5$5^#h0|8%e$2i1)l8V;Uc!k#f998p^{ang9gmH1bj!p8FtDYNbJ!=no@~ZSe-qyK zlXUT#_aNGJ_ggPpOL-SQRG7^}d4Fr0^{Rmm{AZEvKAq0|ST2Qg_i=B19As*!t7~j- zR&|=qRZTFxwKO)=6kf`#Xlku8VS>;pn_EoQ?t4T0eMJW4=s;hWy5CZ2y19>olzewr zYkmEsXm_{JP*~x@Lr0E@(6|Hz(#TLh3M|RYNbhbfucg4~u_Ilcn?5$2>#?TYYI_kh z+bL|OiWg?8IR1UAv|!>r%H}G0&s^njuFPQ+Ca}5A!0?R>A2$H}NQQr%;UgJ-2g9oe zfOltjABNw+@U$uM7%@F~C-5R&o7;243M)%-P61dBn*)=yWI`O>oHmp$Ck=W~!L_jx zWN?j}2RgR6W2*b!Zg1JDOF{@OvQ>!^$iq}$79!MeT$V5nDCTwz8yFqR!&@kQ%cgIB zHiUg0Of3mL>)@@Mu?~Ko)@bO%a8WIzorUEa{X=#h*+j2?f$)#8mGE4Y`V^k{HPDz# zIApUHk)bVhl`!_#x0%{4V&poov^2D|n33p9B{H?Q)mIc16;(7M(mfpc5H#4`F2i>^ z8=6}>%ob!pXr|}AU>`rugOPEjR>RQpMg=|dMQLfTNk4u1bV_Tt3+SOm?AVuOqIiT$Bg=u*Ex^wOr)8Pz zN-J6k%e?cA`1SDi6EpeuZCQC<7k`tdoDolteK8D8%gU$cyj;0><-*AmCr(^Q%ScO0 zJ$d->;Zql{T+O8EcnZ>SAY3?-8Z@@Go6JaRpm0;WC}k2XGu8BL7o>)Orf%lt&IS?A zXVDKaHPtuQmX(y0R5!P`HB}TKKYmedV~0qAQ(kbYBsL{T+Z)?jJG%6~P9Cn#G7&-A z0vEZs3rpn2Rx=bm7u!w+hvO}h<;7?dw&LOZc)EQ~REm1yQh^fgrk4jZyoKR4eejoi z@Ki6Sdr9b8)*ff56@3fd_*l$$0nC}r2CXQW;>A!M*XcN!41aivk`TE5T~JsG3ZH_) z`{2Ldga4+qnY!ejfj(#sjfUoXl8Z%M1LDbYowOE*EQ&b8$dC|lr9{%*Qad0n)lP0| z?pC_HDH9)_dH(8+TDj=bmcs+$++rP>znt8olaG6N@#4kLKmFt&8F`p%n74y?MbMrT!{TICMdgbE9)le$?f-B$kN;TQuJi`dD1Kx4w&n45k1#KEIDfA9QxWVa`jry zK*q^F9SQkVUP*@+=MnP1kC}%V&ktcdzmf6$RyJl%_wc;KG1tZL5e%Ql@YfhVe*k!- z$wJJVsEpxvGrWr7M+^WT&+xMu{&9w1&G4xMz}GXp2gC1V_*{nX9svGbhPUOiKF#pi z3|}$;yc@%p_j!IL!^iaC`?m!IQ<n`!&ux{)>@n=u{s|4V(^U(2d+fK?&4r~VG{E({;tr~S_{{HVTo zswdoLKlGz`JwvCk(u){+Ag%AqN^fU)mTW;(#qc-!mVT?pUCo#7|+)ZAhD|DVLW z-d2Aan?%&C!!n+hL1z zl?yY0aH6VKlm}%UGVPjh95i;%?Ypb?_AtW_K@1sy8yS8p!=D}i-d27f!`sSFW%!B# z;DcHD7KYcb@=F>1au41iFP96vES&4QYo@@(Tn&=ZmsPV7ho<^u`5pr&q;S(a%{506lNgbhr#B@7~h+&(39M5xW zmH_dtbn-8J8+(6Be93X#VeikFRPq+?u=fYMm(Su3(Wa9kNGtB_(WZ1&NNM%n`y)P; zEoAM+9+2!HL?Y0-iSDD3rps`TNK@=wrZK083H1vl>EIgb^XWg*l)yIXpjI>g$3ta|IMcb*6#c|WcE{BuLkkr|4{mi%)Uua>qn#*weZAKq~P zAd&1N2N0Y0E%$&b9+u~?xjA$0C-v|lOSl-)LUK5FZVIuGB5fR|3!S8_&>HBgPoPPS z_=S=m8?B0k31ka>Ap^xC1aI^lH>LO`rNCI2UzB+~9u~FN6uYk%Wzsdgr znTg)!)~}=aWI*D9vX=zWA@)om2q3m^3N4N?tBkC4;Mzpq! z3U`${YrMRDy4u&T-?e?`-b4F#9zK(dz4$uGkO*sDu^vGk_4!5C@F;h8cSFyC9>UBt z7X39A{WSyqH5UDa6UvBRIig1D2;W^-ZTan2S({0zax%Awk?Sr+XK;6Ncb56a#>PfP zMFqHd`MKd#jwHYF<7FI6o4uGq{?I&u^!6 z$BrHAZoIHNDFOZhypsHkMxs%5n!21^yL|&%Wn4pjUALqKsq;>vz%G+jYO1WPL}sUU zb6ayOP9sV3(`Y(8edJ1~0DlrcCw5|_M(q?lDjGTUB7iV;Sj3Gu9R-JFibzWtjnKZ` zTTf@0o8J1W6hX6$#uGv_%tUQ*DP=KlD$u-}T~NjFqZs~IhUXaGsRut1{c{ic=OOgZ zJt&_>y$5w=Tt@V`gFZT`Ti$OU1zFCHSR5N$zVDmWt5>f%(R_4kMZP5>dZb5}WY=0m z?29i>eP7joXL;;Ut9R_#GkMIMMbADp&eNCktK7dL5&5tG#J|P=N%Zacx8SnXV!tm5 zi^rzOcS(ffo(n22uPQAlu&Nd99VVqqm$zTDgs-cu=@KDlEQ;N;>uBv>-*p|MkAJl$Qx#*K)KK{WdEe!G$Bg%6+|pGP~=^LZaYJ3fqd9NPPw zNr&x;lNpm^1LzS8a=#JD{qw&|?_(yB{t+=-Gx?!#d(qc${t;fwKgdr+%JARIKHatN z@Q}I9jpiPJ$g1h*d`ZWhG_?2el6M$ty$_e{zt6<-?)E)#{39Wr znr5G|<7V7p6!tz_@(v@i|2a)aOmLL0EyC&BN%|I{vyEZ&eeX2^9mC?Y*}|Ftdi>cS zVdD()mOOTaysNYC7o$61plymnZ=TZl6zIGL9{MYI=qd0JJ?nQQ%~dV@49wHBd|+DQ z$T{ao<+(ZI9vYygsN=pnuj4{m2dOLuZBUiG@XEF0%l-?6k()mseqXUDIM|&oeg^Ll z8VRS9@H0T|arwZsN0qUtsHm#3(>O2<@{@sTkZ+^+|AyYDk*;r}_uoeEPnO9vZUI3m zVri}_uc&H*M@j0BnZ{hvs)jwIxx1q(FE_Wi!Ntkl%}pkkH{=)MOb$+s)wLd)pG+Sr zQK%F$3H1$Hd7iYkHZ<{`Uc&jhoY>N;R3e^U2|WfoIjlsYG)&&GAv%8cxFD&*#ltTk zILKGy9PH<*j&80Zp)up5y!<NwXLsI z_Y2M(nHk0f4aPc8EBm9HTE6-T(dHZjF``e4rq+lZ)gJPw!>WWTzW!Dcs{P5kk zwDiyqK3M(rH)~IwJc}(+m#^GNzjWgG>a{!5h)TI}jJv^}G4SCTe?}k{5 zKTkj$KVFiLv&g8wBXgIyxjAOBPPzDv4?g%{=|`NY*)aL|@x6O~-g@}Vh3k2R1$j48 z&zw&FW!v7psRh^e@BQt}_4AkWs;a9?G7cQty=TwyW5+To%afCjpWYAqgI>h*W__ro zv%S-7>4tv=JwcqE-CVia8XPrY?&iBC&K(^H`&Z&@V=E3Tv6F* z<_X_gQB;2AJmMnD+N!TzNy)D+uCb_`oh2=2&!=Zx%`Pfx>yR~;v5JRIUn+!+eFxOPq10wOp>FJ65n;rS{-= z>(;IR@pM~lR!OckCU(4w`+-f5J@lU~&}j{B>fo^GiE(jpXU?1{kcTBgPs7eBxuB~> zPG2~G-bl53)QsZY2B)zAZoh{PR8c+~Z%F zw#BBe>5ytODbQ%FBj&R5if&m;Sw&HE9fn4ev8wvuiNZ`|mNK}tH+AyZn_&XCTUr}i za0eBhOA6FzN+mdu~E-iQpe)Ti56oK;g( zgI!d2i!ruT3Hn_P@V&fvotoSq?YH{J1NhM&RkpD}z6!*>h-uV(mAhCjmaRSf^&0PsN! zAHnd+46k8$a}VB;Rnn=7S|vlza)`G)GL~8;MXA&(sdTqnCHuD!J0+mmZ96+c>B9>< zJH$s`Kn2*|4&+g%W+=M1Ls@}}+`hNt)>!Z9RWyQghESL1g}SUyr4gJ>?)C`I>EM7_ zko!+!1llvrh7}qMjQRO@8i#qhPaKWIFw18b=S>fWF1~x!>eb(Uf2^TAEkC_8B05&? zyld@)vwvKV%^T8UUzKxgt;0b{PRC`tUyz5K#3V z&Y)T@=;czqcER(=(I)k+%yX`p;p)W*h&&Dh}8`3wt49%T~pWNKsqQbl)oQGCXoS$2imy0~V*;%=HxjA$f znwaNBoKB6&va8F&V>^(W8plTHA^GU9K|a3u*v4(!fBsZR80q%OxnDbLlw&7N59b>s z`!|g@S0^B75%d1|yVn;j!D-eme{c$#Yu>)JKSNwye{N+0O$Ea*(sM(G&|gEivAnFZs-m(QlUQkGO_|YH zR8U-5TV7LZtgftYs&B&PQEX@vbDWjesp&ZlQVBK$wbnObKG8#J(qBWk#@oxs&)?r4 zzwSQ%UK)3IwTEAzw;#5a`UMRM48U3#d);oO($>)2n~Gp3n1+6dN54FXewl`TnTCD| zDJh{R(>b1ta&*fbje(FXjrD9qMBY!Iu3Y)WzxI^noxh%1?;jk|E!w&&e%d!*!{H&G zHZ3K^A^&^LsV2{!JUM&X6K^efJuxEs-X}XRtX>W~`bVOFXZ$UWd@KFp^H-OU4`D<- zKXc|Cxjw{LOmluao}9MeIUMkOh?|?Mo2O@`Le5IPv_9~I*|adr?< z0DDJ{UxJo>3oT3Ic9x)JY242E#^UUf#*(Zvm#f&h2dRYlVPY7n?E+;yq({OLA2ntxd|9RKOrynTYviF3-@71IT9p!&bbfc zYZq;_;Yb91=#--GYm-qZ~{-_Cu-!wIpXXn>8RT-}q6xNx|?Y_ZvSwDThKO^nB zu?#-9u~uvK=`C9hl%Z9{gm-clcO!EYPRBv3%fjR0BB8igWiIViZJb0iG&V@0lv)kr zJv1RvJ|m~hogEbvHge=BPx%n9>7z%*PMk7&*ytIv6DNm^o;Y#L*x1LO9TV8x;5BLN zxCzln4(ij{S(p6jr(5&jfHQS;NL5@HX>)gX66<|jqNYv_!zdTaRn6r%2heTQy~E@x zuJbPW0P0f(y(fUp-SsRVKmm0nYDIoD9VwH;4s{nX!{;&l8HV>^_}6>zQgHRH5wLou zA$SN-Y~#~1FxRVaLDo1dTf)lD?^CungTLBScKAPiE)lWR^*N++0L5 z?5ey#rD0c>q&la#op3k+(QFYz{IzE!Cl_?lk;_@F-R(Frv%!oIe~a(Tg$wWT5*fx< zD8iy5Ls~DUHp-{X89R1Tq_i<}-==lLV*@Q!nftf=wC!?A<}Y9W@cpVCIo&~#(O-S@ z^F?x+mepQadSc_IeJ3v!hy(q`ErRG<2!n-80$C{cj=VQv+C3u_RmDdkDmSNQWtE#m z1{ZHd@%f7ve*PhuFK$eR+u*PKhthAZ*2`xtg#lcB;LzzZdDIhcy#DgT6Bo??C;{^C zS8`CgI5wgq^$fls`TNTgq4~Nh6jphdt0rpJ+!@nHNwRm%pO2nQGleXkZ4;K8AvhsEMfZ6?5*DOom6cXHN4OVV`sSM>$8Z$knX9?RqU;-) z8JA8bpFVRYxwA?puC17BxF;TIj#xH#Tjp>mc-0+bjBg2PJnKE)j%M)3FXdJi_=#YnGxcxxOVEzy}c1yW_bq$<76buUo-^1~(?HNcg( z=oL#F+S*#7zqRRI!3!rELmq$p@%rgYmo5!YOHysy_*1U;2QNgcC;ovR7pwF5==bAV zCeC%LUif@$(A{8y#*d^hTXpF~ldj96JaE(T-_MbQJgAs|3Ke^!Pg$oz@2R!~Ts{Y86K|{nvetu9f z33htwmdP7mAJ^5Ygn7vYXk>)INgg4Ulm_b&r12o$=#e1w2t*=&MMRl?qZa0XvU23M zB`{XNV1b-N$}vHk4{rN5dK%pI1f~d>Bgmf(l0!E>TKpkS?Gqo_fbILpuY$A>aJbPs zgR-`6XoQOz^RkYW{|ndY+=g}ohcAN)c8D#i)Zi|6wcbB|Cvg{N-tsH;S4;Kft&AM zwt{8+31slA3?9kgEj?vBnyL8xbfy||io@#s@;o|Ig}c$2ig&ipRF05i_tUlXy#n$r z0eQlWuB9uS?Q3aA$d>(dH)Q%LE|hTR=h5AeGu-HI$hpq;-H;=}H6y__bp0_BTr(P6 z<5kkuQkIclkda+hm06g7_1e`d**S>m5d1Z;wo-r1O=j{MHGPux_|?*klp}|JDZBdB zf4*Dh9fLp;k>uG}y$q(VH=dW=DQcX%pIj#pu8aJC8!6k6d zl4aZy62}EHzshXNfq1mNQ+N*z4jLNf?H&;rJUnLjh_F!NqZ3>wG6>9y z8*w)oRqP_RAo1Xn8h`K3pvhBadH8#4)SlK(q+AxfBQn^w8d|5`WZ^rEsG6$0d|^cW zr}v2JDDH1WH7M>dqOM&x78;8(GvFfZHKK-%9uXN4lD?}=FvAd`sQrY*XX#MfG11DliA}3d~$O0uu=K? z_3LTrIR&s0lpY;S$6jp1OU}x{v5q_DxphNB9;hBVa?62ytPiEEl$IZI` zAofgLfPn+b6L0R+DVo!N)M~XqT&&WGIh?+2kTx5Wf88Y{3;Kk7A$fO<<>Ds^7=8=+ z$HY-FQNB8)Ns(aoaSzvV!ycnfQ{hx`1a&+=Q78765t+e%`D05iv=vzzJxf2hc(w0k zx>-81UXML8it^*@*WbCF!%|X&{V3oO$-l-~IUl3(uzc>n<-@!@UFnYshao!X zgyCqej}?`zFm&9ZFC4Bc$8Q%_#XjIUOw?Mi4=!g|mb@DDux-9}h%aXNK!!ia@bwIj zDUdS5o+$*Y?k_(kHUg ze`KY%Gq|d!bVUf;i>e4=`C=3yLOw?-gYNOM(er`mGM zB@Wv1j_Xug?#iXwvhSF?Y0KT)xhR~1Eg-7}q&1i3F&B@q=P`FwX2W)>E$3b0pe=ub zR8@qF$fery;xTq@+1_5aJ*}<1>)F#*veuHZr|s%_T8H?)<9rmu+s1h>yrY_T39S_= zJo8H7nH557dE9I*hb7x2tS76vO~O{Pnh!+674H%xd>~r~9VEg5O7-MxDmf%HAsW+w z9F{+x;e8lh%kXCyzGeV;Tlt=S%CBU2^#JfvR(>nP*Rk?BhHvY^J8bz~luskwD4(v( zqI|0On)0c~a66xlfqaM~Y=t!mddei|DFNkh@HkLVl6A{&*XEQtrE1xMw92}=dTM)k zmVb(W%jJh%XIlM-gn#wbSF1LX9G${>{(NT#%7`){wzPe8u(htSI*}wJ1;k~t%jJ(# zRp-`yyYiLSUztB02ceQ;wx7ND&WcBXvAEO-Ykq%9Ub4>MNXrXwb=SB$P%=UQbD&-@ z?g>I0jzSyuKHt~1qU>6FT4ur3?40z1oGhBXQr?Wb#8y#P^D@p?4MUUk$dLoTo%$i3 zCc@xC{{bJC7F&Gu3D#z-g@2gahf_u$wYFOE^%V0=Vj=5c3fw^cEqY<$pW!6oOGrNF zqm3h(I(<^54~KCgu^1MR89YBZa48< zLv>SI-OYcx`1`By&ptEeZE{z&R~SvX!cHXaZ7@zI9Y`vcO*D;OSc};C@s_h3IDRkX zN0-&$N8$lAxFDx18jytu@7GQc^E3BcGvH_z->!zd&qQ zv&N9XFTj0>uAjdd63|WSh+g#Y!%O3lEO>nq*YN*P_a5+3RoffrIn#T3@4b*ffY4h) z02NTNf?W~4R;*yX*PKiMyH^BMnu$)rqq-<~r=OE44f|NVaN z&B&BEYoC4gUVH7eSNYb!d`w;fbr$Hs+o^5z3vb>9?8NP`$MO-j`4yhA!gt=Ceb=(r zR;NZ&sZ^1u)!TE*gjG-9J0*%L#spu%)#)X47gwVXb%h?#BHm&E9&xasrXOkMy;^N&Pfzb4)z{Ob1$JGW*xE{@ z>+aTxtgM-vJ}ZSCjlF(;`pyp0n1IWbJ34Rx5x^OQHCF0ihh_~_9~x`zs;uZ}QahO& zi?}x?=H3XlG4p379o>rb9QxN^5otzuN3+&{v-BOpZ`zYaemjc^SQAT2!EeT}od_0c z&-iw${2tQ6G`0l|+rpr^g><%sB=R;w3-xRZTDFBqa|;@_g(?eLuw`3FV_OI^w;*9# zXttmQ5!(WnZ9!&k!JciQ`bGM;o%<>|V3rA~tjCjDxO6u+|v$6vG zF;~lo1&H%*Xz0`Q3gWrq>YlzR!(;N(zwbMG^xUa4$F~K?f4>%q8xp6~f`W{UKhK}L zn1__VSQwH~vt~SOx~~Y=ZrpzC*iRb)C;SRS2c2catDk-JzUkBNee~JY@m%$6U+eim z3;csRfkNm&=;q%uFJ}JSYcW(Jj6vtAN>O28$fTGB4?puRi+s`@8-=vISo*$QjQGR% zmqy)xfArG#K4j>Vix|$@PuCJ0!YA;2o3m!!+ISxgkb(#G5<$PFN7Gc_*Mb-ePVYdM z+CUq0LW!0O+e$3WAHZF9cJ?+js5?6a46Wm16eMDSy}eK@8Ho4k0+x%We-J421G@h1 z?gs6^psq(V(BIqN-`lO$(vX(1G>;6Gz;}|rf26N~G(v{5qqe@UuR+y0U_z+9q{?!}_oVUy#&8ICT5TTkQ>be8(2-*cOu57Q)RfSg|d%-e|$>r@VQEk<-WTzogpS-j)rf~}Z4MFyv2$}%U1c1gyRi`R0F9A$zRYPq9 zf{2VxOdcaNh@=xI5-4_x7tisXKsIRkv16AqB?24y13P!Dr;akQsFNmar|e>4Y_#nR z^=v$c?i!d&X~2S8=}+E{qLkcM=8sLC3n6}m$^*w;0m)8A=rwWv^UueO^J1u-@itw+ zN$JnSaM)ZV2>WU$@|QvvuXDR?Snoak95yS#r1R-*Y4yo7_E|qIyit0)|@%>|RD#`NU+g zM!V{RXbk;#)H4kAjB){@evMCqmdXdf`kZI)hQCrzdsFq)B^1E=6Nw5ooLQ4Fu?mL! zC>{*|(LZVpgMBfEe;Hmzl0*zV7Su(hLwkE`du@-pOO4V_y*(Wbog_0PogqQeJVqjd z@0;T21~jd(AL^72j(AwRx_Wxx;pXBBuzWik6e~3S8o?J>vk1uG8h;&2IOvXRz$D?V zFhXF#e!m^+T6X|I*AE&R=1`}ez)%TXG;l$B|hvNi5fo!57%J6_#1 zi$eD5*JrWQc;_5Tc3BC#%ht0kyaZWj)HY1}bT&JbWDG+v2N76vv$5tvu;xM_SAdd_ z$L+G^JVeoLvPH0IhrVYx`SK|cc%q_uZ2&_blG>NAvQ37EOx$ghQXS8{@4_tpk+a#mLw)uv!%>YxI zPZ`^^V%z+Lv`I!0yfDwU@#t!CLU~t+7pdq z>re60s_L4`s;0(P%rkZ~PCVJhJGzJ{E>5S$0AhbYv1`nnVC)RQE- zVj-c8rq;8xKNxcrIzr9y6$AlxCivu0@QE44-3_3NJW*eLeSKMZWkVhIrd#+%prhck zMBu!b7Kz1T9(A3M9A}RiJ7wDV$_lB?^lr|)@Y=?&S#<% zsYPA9_JX{`xzrD^R{hL>!e%h*PRw^JZ0|>JGs5MeU0q%MxAUYMa=H`Gr6bj+v7xS< zu+ZS*3gywyn-Tju zz~u`ci<#MtJWbXvshfH0iN_c-Yh*0vbW)3hmYX@MmgNWi4SpbU=?=SOx^b6WLv~4H zTx6(umz)I}f~4>m=8{CN%>o~Llvk7&V2Aa{9op zr_UEtxi=cv3yHyB`on=^yT6@BuzvCUx8WnsfUZv|{?GGs$NAbDT5H^E!F*wpJK?UyY)+DFdt( z>hdbppq0(MhbDRlhs~HVAu{l;XQJiyp8f$rK|U_xZq!`rVo;4r&g1oV(bi%FOlDth zY*gCI*Hk|c=z27&Iqe; zc6PGobL6%TQV~L2_lBt*&U?39VR7#UB%)nTD)>YKREcJbHO>g_(# zSk6UE4>lVCTYjhy1AuRG^c8PzxB#nRnfK_My=JkTlPs;8rA=jNomko#BhVgUX**fk zt1PVtOFQ>EtrUDW6c6oWJ%|~~Q5nlshY?l)szNL`H3f~ZaA;08Xo!VFn=uQGF@Vha zn=vrmNAGa=G4py~h93jV?qhh`qwE-T+%g94mW^K#OPj>fX0YQopQU|iggzItv{!GT zUBJ@5G6HQTOPj#bUSY3$A4~hrb=u+Qt?N% zT5moh(AKiFa+Wrer441T>wcZqtep|-qX2t^0NN8#PV2W3yL#9;8vfOkXroo#jp%i= zCYbfQ;d@-kUbTn4YBhV+vFugF*RMJ}tpiI-v$P2;Z7oa78G+U$5%nysNg@g@8|6Iq zx)PSwBoPtV$UvIDKEidyEUk$%-eCKjXW9MMu(T+wNv`C}(wZc3!3ftq&(fMCi5E+& zV6QuM1lnwt)`q?AUY0hMrS-Z_OE|-Pb|v{cX3p5YjZUOsfy*W2^!{kt9p7MEjU^4x zffLLz8#hmWc~15a*6G|2iyBS zZ11nIy`O%)_rufHv9wU^$cTrrw5GKvyH0D?o8G@!Z@PLp;m>pDQ*v^(y_r{X@>M0- zxfcq1TT?UBl2cN03UV%J#qz zWQ>WRw^yVv)HTI%yL)YHlsvUsF6oUXI90rF^6AjmOyz)iE4=CQjYtYn2216EGpA4W zmP+!I8V9@ED=Qo8)wzEaD+D?XEZ+SbyO8nY#)gnhQm)lX6oyU>VjafLa&l5arA#kE zaR5C>*ULdjq^(5P)!CyA7P$L+`ve3z)AfCTM{_`YvYoH2slmgQ#HhNud5(?n_x1=H zgE^Bc6neG6)gB%I{y;&2l4oVrqfs!j!G0YOKg41eCtZE3$x>%Nx9~Bu#GXRV;tZxN zp1~O7C7Qgrk(UX~-|1ifiw72?69Cdp&)ins`uC1vi zLAND^Wfe85+QOVGdD&SR8HgLm%gIHgGD$8k%c^JwWLR}pVnSk4T3TANXL4R0d?tnY z`T1pKDM{H`ATKOD`{$*U%Ng0Fu#GWc0~!g1%o$WPEGxt5v|p_ut`$91m5Pk4%jBs4I0^LJk!%xm+AUN~!(m~J@n-FI$IvVq2eOXm_wTC{*r zaSE9>ZQ7*qkuxG(Q8dCzAlJ9%3GYMV*n0A`fA3z=;q|{BJag{cxjd2Qn8)%@sVNN; z3cpelmuf|rOiRn%cQ0JH(6tR2r=K4e-yJaVU;p~od^swp5G)R@z`BWBFI`|h?jHODz{@hh*q;`2kaVT$2>X@aWzc%_rm!x z%b$Pmxe$+@zm8|u>x43EzekWKwOF2?R@_vdck$vyRiD#}1z4dw;pW(fS3Jcpj29>E zf9JiYsi};cUa#&@_yz=rj0qnL1iq4@YT4vxpM7?kpjUltE_gua&Dl;=`5%OLGj;pV4ZEbFAtgor6ZtJExd(;|DTU&c)cNg%YnwpxM z8`bqC1qB7QYMzj%#|Y~@b-+VuY-t0KNn@i{-=hY3Tbo$Q(bAfpCaMnnCJ^^hs0{8E z80hB*v<0D5OuS+o$}19xD>G-$iH?ejj*gxi9VHS9xN#w3VRx@0boT7ov!d^co*Wh! z7#a$~Ab(UspEz-*Oe2s=AXzj$R~zEzNRX%{8|amYCHPM&l{+~)+Pm0WNrWP62R~~Y z2M;e9-O|eUcH@bFn&aZ{C?eC(cMJ#)@B+pFDwo^R5{cXj#ULdT0JS(O5CmW~V_c+% zuQM{vCoz;1sJUh?1TuD9%PO}ME7?zwe$FVzE~!2R%zr2+2J}Cb$0dx*Q8V zcsqar#ak_U_IKyMVwQFWOWViNs#x0I>$Jm5N+Z~@hR8?MreQ#Xmx(4xNp?+wbU|u( zJq(FOqMJf;`v*&|m6q43N(z#T`g_u@U4rzua5+6cI~^6hQqHDj;ls6S1*pS{q{_N7 zy9KKRHpyhrbL4h5BG?;#yEb4m8}^1L zCh$5_zJjZFGYrSauy%K2;qAd@xd5McP+PDR4`9>$g7@xAufP8KTq+ipp1dgfm?7uK!A%( z;N$A*9T*rm0XdT7;fYKNepeSy1QZo{SQxJtT-cmnoRM7(TSR|Xr@9tWM8`qE9lIBr zZQ(cD6O4xi)pj}`8es971aAhF7Yx zvq!-t6b%=@g`5(u4*vGnQE&_ODue$lV;Qb{lA+=Mif^I}s}LEq>L0LAxT>vxwtAvRj(>6EYGgv zYSOP=y^?gTu(l{A@k&xkLPBN%YAz#3vanR8szx1q=r2bOoi8fRN~d;{1yY^Y)5Am@ zK3rJX+Xx|Yt)RA>=5e`Ah1G495I9+-)s21JBZ>V3u*Yi#wF8j&{r!nYl+&xbOHQ4< zbTl}4|K|0(@*4B5o!Wo!@WtZtMoD1Uct0seoq6=&zLV*}!Q;Z3Pwv>TY4=W~SRdH) z%ML0DVulWX?KS1t4I6S1iVE>f$t2W<4F}8hUJtHZxpI+{UUhr}YR)?wFS`8Mr=NcM z?lEwaE}(t{O79==C+x)_9HFl!l4SP1)GrA2`U8LYk^j+~@4x%@yYQ^=BIy&KBB$(Q z-q%UGajRnB)7l09`BveI#H17FMGK#a0oL$Nn5mVVmmdSv$hT0DexrVl1E=m$OC^5 zIiPrQ*L_1(DW~^dK2_8Tw5W=T(wfT$(>Z+4i4oy;{am}?2@&Ih?C9S1ww9*$!HUMp z%9_T;Zs=k5j{KJ9o<;$z;m0O-+}wAizX&s#A@fQYzu}sWB5glo8YM4ReG7 zcrClgoSmH9 zP!mh8<64J#IQmQt39_|8`P%VB#7Pt#9gHvXwnGH{5OOVb2)V?bt#IVFv|_{7>bYEf zzZKQm9VZc5scI@KEAtB|E7tmGJ_n1uQA^GH#yPlsJ8e%v>e&;ihxhSB?BRV3k$be` z44^C&5(`TpYhjsTwy@lZ(sw)YoJ}@TzHFqt6SEAcFOUc zyY`&ER#Ybq88<0FN_8aR9J~Lfjo5wT#vJ0FQ+Md)QtZQfOLaa=u@5hC)>R!O`|!6o zvK|0$|BMx|%d`*wihcMO?9T_N4L0wud}rmWD?f=aoc?NE+;`XX;cu{7_ixk&&NDy| z0IzGhrBbp3ixqKv2N$Y)z~l#b)z46|#k~J+$Tx%_tZf^77pbYueJxGce>=2|$yxma zG<-7sogF$_JD}kSG~K=0!NIN;AY1|j!aYPTSGorUNu9AtPYL&w>9stbTd0?{8x)UF zKQA{0PZ=z1>UQvPaz$#NPneIkUE~$y7d$4!B_Lu#XpryN2tO|`A74KoA8H!mOp&a! zQz;7x5eBoM=~%5bY|funeqPUXq=X zRgjl-=3HV*MtX8a9%N!+NpW^gdeX(q>4nwRMaUs%{Iy+8Re7l6m3scrwoPX*Bxm51 zSk}eU}UJIR5k4C&5K%xuzI$#fm1 zrR`D|TSdRBs*~sB$n7k;n%|{T4R}RPacIb|K)FQ5-|OMktL}4yhGp&6d2mY+mxB|t zV%AIVJ{Jy0T>rqB*Y4#Psp3N%?Co%zaQF9h5R0rm{M_9TR^;gB;bdoLXJhM(Y!EL; zKR+kAoSMZ5wc0^Ft*{ZtxeFC`Ha?1hPPEyh=Zoy%I)bZ6+tN_i0!@~c56dvKPh(~Y zk~7hYo+4V&-jd>dqnO<_7S3+CmCdOWqnP2?(6M7Fi)Y#Oxw68?|Jh6{|JhWBPc0~z zJc{X#rL(i+ESz%AibQD?_4{aMUTN{v8+V3Pn0X~;{%6em`kz}WrNhzrh8 zpF98Osei^(!u@g)PT@Qbso(iN?ha4Mk4L`!M?z(qV9~PsAAV%%z4zVo>@%-Czu09I zTu1Fd1e*NB@#Du2?caas=+UG5|9jX{!81+&*ZPe=U+Qk{Y;VYDZ0_?Lg&?AOqYQ62 z{4oglNPR7=g%y`hpbq0`WRuzYaBHMU4fYcp@I0^utbyf!1q$G@-(AR?7$WfR>dcNWDs3A8D&xQxNoyr9f40}@s<;6sXxj>4=Ru0TI;Xom$vl_<(e+3{O4(55e5N=qtE+<7P`?HW-O zlJJ^~M->b;h=(wAG!&&nF(h#zO8btE+!|h+Ork=@^PwH? zO4UdriIrwN-F)FIrO){S7_L*#?*HwZ|8C#9dHY8nZT)%I_VvHab}BoybLY--&zOJD z!=dOe<1?1vNg*mk4K+>|$CYpz8c0+~Tery^6U)6%)(D#^8y2$WaI@6{qdI6WtEs8! z;Mm&Vy>wboSOmgCCWkCsKHt$RFbI|tPdhmdIPLAkIf!BClQ=t*u#l{a_4P`7YwO7& zFvb8ZQox5XMk}^=bV682xE;!rxcHBquxN&-kDq5ykhi;I_bJo)?}N|21P{DTnb6-;pg&$af9cZio43NEf9_hI#XR9577OY2c5?b< zk=-rh5#fniC%ZAbzWE{s(OB=|`R}mU_22HWoO7(i;)aapp3699#&oxgmH1u-za857 z38~Zb73ZzTUm-f@djJ1CJ89G`=GfI0vfRsS`Kx9m_ffOl)Jc;f#z)SXG2`JEo}V>q zW^|{KKZ*=P#T-VR?L-&rmatr4eh> zJv{t5kC{7hyt|ukfaPIxT%(1?`6dgE<#BY6gZq|&`&NSc9tZb5 z2JQ<>%Fn-Oxr`&gUKZ|sVQq85zqee9(eP1Nv;_wAhtKW*9a(BxHu= z(g-QomTrn+ZW39L*9S4z-(s%+i@Bb2H6<-QJ>wD{S76Xe zxp?LB<>d5??9{YtNf+S1P8^93L^l+mIIdpH;V}Lvu4SN*gbp7FpM%ek!W;w2npmVB z@l^^hJ9{K?d?5Yys{^?MlV;4-=KcjR_jv|6t(y{M_$cD|nWPg34U=YuNW00FQZTIU4V`=OR`42zAK!lTkA!y zpLy!X>}it%P##UDX`@EHm(H0wX~OvN(As9qm^Nj~kyxTvgr zBxzw57Ke=EYsqOaQf6giYpsw;#4?F!q_uBe`p#(H^3^Q{*NDJ1#0r5-68uAq8~pQx z2(ZZZ_$Im3E7gbkR z)fVId^R%=CR)L&~s_H6u0xJ;qlbM{H`FH#%yV;0vn(?rfaUoO1!0&Xxd6#$X0(lJZ zIDDy#)5abF$2)?)4D6Jp5DwWU|Rqis5xIM#x85t5X}e5QRQlK7Q*hgW;)~wk|D~tL5|KXrah5<`%s7=KcQi zmV+cX>jT5PLMBmSH|D^86aind%X31b*oLJ%EJ^EcWYW~cKasF;@ z?k<7Buvs`cd*k8h?&j`+ASvI#z>x5Wv46K#WH(z*S{TL+dniZK)>Yd~f(tcJtZ7=H z)eovWyLz>8);gV~yX(}L*}*P$PCh=Kfqu4JaZ-{qYQrcLaw}Uqdk0S+XIt+v9)GvW zWH%dNTo_45uL0YAUrTdYQ=B5Cqq`d&L*)-cPitGh(pulwVbL;VEi5D(d1w|)+7)a! zv2=b5a$llyiS3JO#>YZ$>^lqHTOJhN zq_otOl=RF@qp$EvGOT%(gFLlP4|^70IN2{gtckWqu&te3ZY$}5QS93B!?Wgp_k8kfP5V}P~M zKQU33eDJjomQj-!#)%^k8+v<85sYV2N=j~xV3d8nw^;A}7#e&i@}9BrVQnRdb1f?? z$;&EhXhQY8!tC7K{Ni%tS!QQu=9N_=jJC0zE2kvxJW9nNrB1@B%&m{}tVzo+ZyTV2 zw%*xLQIUxeDNinwQwD?<2H!W+D?TiU)6nNQX2E?k{Qcp{^$GBIk;$Z1_6T5fKr~{I zpPRM7ZcKzSHTr*Xc@>pkhP1_4|8!~g`kD8>C?K^kRv8_UsZWWg+o%(QA zVl9HWl%wJo&Wi_oqNrW}jBVD8l5N7v5m?kzkPSrRqJpfnB9w%xtH{Ml$tJPQWmsrg zfI2R(P&FIGT7fe|)i7%;W}S%XGpnmd$3AN;XCGGb46o<96aI$K)`5Qz-BId|&wZOEU-sb?u6$NK_8Pv4`W zUq#eT3~l{)vsB4mxs$!JX|#gaD?fMr%EQmngj+^yBirkImezIzTCzD{mL}dZS}KS|b zm_*KQzAAEddo||fvzq|&^!DtyzHM|6MfVfz0@`;HIj3q>1nX87W5&82-WRALObI32 zpKvAd1-=GQ?n&e?G~rCV3kYkPe1UiBcds!wZ;bKkSB+N}s7P*}dyM&%JUoAxR&fwJ z$EH=>$d2@lJ!E*=R(6hsENuWg!>0X*e|@Bfr!8P<3FZ}8EP$JL?L*p1lBl zFflVZF#*a$a&BH~Ms8M0BF0vjn3$QJn3&rd=Y<5LzP_HWuBIjwGS>D(Rja<5kapz~ z)Y)@?e))5v(y=!Ej}s?O{CVNRUzdteZiMUVDe?7EZH*z7qVfF1X5X<&8#qC1+S=>R zZ1;&!V8)O{q@nF_az+5EwzY1cUzM2|8ag4uuJzANn>Ky*L$>|IWzd6{J}}3nPZ{Vq zW-*d*o_lGjtCj>P$*YT3imJu#+(hq1j z5*hSCYoCai1(O2;va%`-;~)ChTq>SCWviD4a;kR6$HyN?J(V3Frjb~qCnAZpy-BA7D=-*@LMvw(n9`BuPeqSLl=}QzOTX88c>_x3{~yPjKX{2xnU_ zcgKJL!lywsr+4nVS`7ufS0_T*KAP6)2722X)Hue~CGFaJfke}CgziozFZErR>qVIB zWti)E6peoz%2r_$He;MeXv=`MuC~1)|BoFT<6-TVZTJQ`9L1GIrD_G0$lzegTjqFUY?5(7%d}t>z#MseRsu+za&()Ds98OIdx?QK}&Z_ zP1U)J4gU8&gCJ-n@1e!7J^%4Mm_mM|E+VL4142xmW$gO;dVAF${~LwHSH1lGA9c84 zT1z5~Oiy}8Cd$Wi9(U_Q7d4gHPcVi=YEQxptYuO=1^?snrIKN0_(5t*p7VAg{9M>b~#R5{J=i(lxQ$b{@Hwm6BFxfbr-(#+SHBMw%9+ zG1#vW^zQiW@R@|&`#xL0t)xt^X}q}Qn;65}s3-QZ;oqFS2lvlIQ2BeP(eZ@gc{&!2 zKgz@kSFAwNo<;K&jjUFsT|G5@taVSb+!g?7V-pP9$n?#4F%zWo_>D5{tgP)3C}?SO-L68L}2n zUM#CAt?09+KC^5p{f}HwtIDfddIxLjE~ghcxlErJU?{kDaQo>$R80-Xj;;Uxr@yH6 zqnbI0K)011lLK4^EAsmcsuopg@u|P6JQgf_U;HSO;N$sx|*iOj$W$Qk{O#i#oob1QgN*^?~<#X z6s3`DIDp?N+#R6IJE#iP3Y)i=Uvb}9x$@=tR2-m@ey5Uw#)|mt(5|ji>AD+h2h=UyH5`M0 zFS98wd0+|Z^mq+jZ&I87p6g_OpTZ{DH0I}VdG;KKXCs&i=rdP zg-1kAY4_R@04j(^w z__TSsl@uYdo)Zf^VT-2Gd&r8b?a^v_`^+m&*Bq@sqEC#a{yVw}9J1z?BL5+lxBR*1 zZd-Ozzi3)OwZ~HT{*(C|vh)hllFExR%}cMcdQv1Z94wkY{~@bS<{1L2KCB_XSC z(v$%Wpn5Hzy*r(!DnFyS6z#Jfr*^t^6%CkPaW7)-Xw^cb2^pm=@u`qnw#4IU_Nm0u zz@tQ51^jnk;ln3v8QW+ofu0N$K{2rk5o^nQvp0{N2+e0n95wNoL*XifG9KQ??Rf2{ zJ|?=o0Uz}sykQMABR~-x4&u*=16)>E3|ioEPgIe%{7n~|0W zBV9>edh+i-A4xB`c>Yw<)yrwAmoBDcXJ=ncxt5U)|5XVN6H}msM?MD!8?^+qEDuzPqcTrcnd+ji*W3ypiFG0rDA5GV^5{&vl4MBQmXOh0|<` zP9RFQ!ZZ4#7$~&HC|St4*GFtb2stC@PrawLk%N| znrKgb_~D9~0+l&pmLI#joErb3a++0LEzktAFXUNyjvF^^+>+PswX+$32cm;xWjplS}zN4#X@dM)k?qI_W54nF1hE6VV@NgF4 zd{S}cT5A_$*Dtr>dw4k8h(#o(mSgAR72u(q7Qk}~x3TrIw?TG>qie*xXO~6#JA>LM zeB!vdQDF}D-Y5aYnHK6~#YI^OscYDzg)t`n-wN*g1>8q+R=0rrHiP@-oJ+iR zEh#baG>)m?Z`_q|{?zf&@uTz>hMY4ru5olMD!z$F=M0T>939t6Z(-ZvVjurEPIlb7 zU6mHLU%X`!(UZ-81C6B#-2}#==QsuNzKiFH{$ZU9V%sv8&~!A%+&#JZh?)(YBla-Mo&X$V!9@GBx8{!r25?q52)rQ}LpPDwrB zycxb>pu4@ZFV3~8p3k?rYxU~YPfZrKUjro2!45Bf|8wVDoTTw`Z&@>3=5+%KdzYCH z-yMy-kX5|x)wUDgMnNiI1mVeBF-~>?mZ}fLpZCk3Tl&cSaKNJ@;8o$^6!5eI7 zI7f_4_ZXkAaCbCWFOM-5kx+`f0FCt6W6mN&d_ZqU9bC^<)paP9p`*H68k>z^MJ+Ar zp1}bams(n(GVu`@KYq1b?N=tH?H9r8#;d<4)41k}JFM;UCONLg~> zV0UkSU$2I**J}IK-NaGO;|h3ufmjWQ9t5wT%&jL)4|by!Jwh934woxN)n97~XQ00k zr>8EIdf}_pgM&KSwBLPz8GR2kx(PE%auin+ZR^KUwG)c2%V2RD=uc!q(0PXgUu>QD=fuES>ggRzBf=eOc!Y9l3b^#EFEWYuT3aK|Z74`&$(pIB?+T zNmSKvahV(iBNA@b66Wyi5Tb^F^Vmntl`L(>^pRm#7-~e|iUoLa9v9_K%Jyb8_(@K@bizm^Ht@U6@8X6ABP#oy;7zFwu6I5cPA9yv@NK#H5zV=1UE*4 z8}9`-MuHn7!Hq6Cr3gpMO*r@GAAcM>otStnKP^2cGdnkr+Qm?*N+?5Di8lZ--;5wF z2LdHOU+Hp)fEMgNbKvmVgbSBaGLn}pdooPae)SO5$xus_VlOWlfho|-gBih;#Sj@U zJfI{n@&uA#5g2DPwG02&0diw2wZjM+bci}6$i8x}*={`S{6EA}?=hsbKxb!JeH^E= zjlUOO9mKLRpqqDWOub09paKeUWlK&^ zG^^Rg#o5Y`DT|gp5_9*G2jCMh4lT42BEj_gfi2 zVq$=?+1X>2(s3@$88uxJH$!br_I&CvHipyG2czH&eo@`Z=P+0jz`@fWe~7HHe=TRY zfQP>$x9|=W1qBr;!`nL=+B@2kuatGPcMSAvTJOX`Rh4ywizEav9=UiVCxPJtK>k*) zl9;#(d7C_CxP!ReARJJ2_XCMVVkJjz;_#d%6g>G9=2iksc|&Lza=>OzWw^kYzm@wW zChjw~ZREgZ-~xhP@d>y9&?QFniqL|R+S;m;k}N>R9X^;)Sd^KTU0PS1lXorw)gw<{ zxNt4+T547XLbj;i7~7#|qEtqbn3$D`GTb+>?E)pj>LbEiPW`&^`#ryH*|PKC-W|Ul z{xu|c?}0z}C8Z=MrDmlpS^7k{pdEDo563=<%aX1;)@4%BvtO=GEdlr!1lzr8q6t%Pvm(>lC)6!(p8{Wr^lWf18 zm~pd}X8P^xel#3#ix6;t5U`vhrcYh`*rW6AdEmjA`3oPob}3mp8V)gD*DM_zARR2_ zm|H}}Tvk*FKls06qgy1101{*v%%+jpYI<9HeOr5L(&f_jwsvTaEu&$xTO^DFB#fnO zH~n^TGa5F$MfM0FdyGkeBeUgoIrId$lGqMO7T_{S$gy>`#U+5uC@O;8ueerKS5sMr z>UqWGwWPF2aS6)Im6lgx-H~{}Qe@%BK{goTpgqS4xdNd`gbc7a4!ppOh#u_g2b|cT zo}u(>$3nyS<@+BGZr{4)r{6ZXM@$qp7MweLct>TuILJR@&-N{AzfI_J@(yrbzh%n> z74;_*PwC@41|`Ag&ri7r-s$B_mXIxvV~C0}G@V+%etnjI6vK^(bmJ)LTmEY%u~R{erdjt04L8$!b~Ht8)>!{}c5af4;aN^=fhq?1aA}M{A?tmE}+R*5+r- z1IXA>)V8=pZH}eh!5x*Bw#IQfZq-v()BE$|I93WZL#r3h$MyCYpGT1^9ma6NX86XB zg${#9E3iMCn;I)BFiiE04UJ9p73CFGRn3j~V^h7Vs-n83xuuCz#xagl9HP?WggWRp zC`GBqXwn8oWDo(DQ;4x-VE*LBkF~OKadL8TcXK0PL{`=c8+#`gXID2UyErwVES{^A zlY>2wIq}iWg$lVt%h>uv20q#a#wLfMIBb5yWRn&d11`9sMW&oTpORhQ(9+i0(^gws zl37%go1dGV4gJHZmw3JbuV_-q5);*JCY5aN{mIGqFPt(jI&#AJ@Ic?-Uw+(lxsek( zm*|m)mEx+Z9^($&UC0QMkKxF<<)D%-%!&pC^C2nldCNN6OY$?XB_$D)>=Eh=^(BAB zUD66{5>%*?Yh~rbRI=Y=bt35t|(Ru`H=2eW={a7=^ZyD6qLeo5Dv}yK>>|S*Y6=&z%Ai%ISopbak_F6fwH-&UT7H zIdm0uqiSH#2Vv9-k*2q$AbF=U#_%Rem_BJ(Ci&{%l{SIE?yIjzX;z=Assa3-?karv zF`)Xs^wP`JATA^FiGDcwuyLRduXPL=;}tQ($HUdxLChVL2y|Tyl=aMK|8qAuzm#gD z>QK#5fGRJ?sEypm!UMgcDIKMYjiN5Ye`948630_m>kof1pT&q^UunHIjxP~b9*zO< z7?}=C2>np#+90_Vbof?P8^GkWwc7q(&A{NG-iEIS76hWoIeeM5Lhj@Q%+**QD=Si~ z8-hcN=oui-4pMx4=hFi{QU~K)N!A9ncX2hf#W{_90goGW*L)uXSKu&xVuY`~!d_u5 z<@0InU_TryHcYIoy*)0e?d|UCX;m8^9A2EXt+wBbwldD5wJDnLHO%7&n8yv6$Ja2A zuVNk}n*cc5OxGpfEwG^`q zqeSPOXIZ>v*SRJ#%WIfVTdBf21=4g8DEz7GWgIUt=*M2Gw24NE;iO z+-;wLL>iHV>1LMxt)%rGF#{f-hbjtYggdPL@8B+mW-m-Y2F@TCfq?)Sf*8ShB1 z+hwepUCN$03)nNKCwu06s!6_YDZyJ0aIYBmL~146xrM*Y9L_ z+5whU&HlQFrPZ+C1S8NISlUkZ*VAvgZs&DcDeielW*UWh!Y3Jq3POCcH)(*4S}vb~ zM%buj9_a+Cy5R@(AQ}VoH>)9tF(YQ5{!Muh2n#|TM>I%xrVy5&v68U7CWQpb4+bMp zp5?q@d&AW=8+&6Qy%8I)+l`OqdSh>Z%*y3C8vja@-nbik)A@JSuCeUM*nzGgw*?`|Dzs*2ERwBhZ>^crIdT?_+5b*>CCBX>X1L!K|99y9|;gyf&7* zh}=|3lw?v9%aF@Pui=me#rvbndjHtmm!s-)cd zdK?EC&b&ZL91jjC-%zE(&OIn#a?}KCJ4c0w7d3|b-|4P#JSaaC?>1=`ce>}MlVne` z&ZL{*I9o}Cjng$6;Z;6k@M@Iv!;L=A(5MBtMJ%O{=jj!JR24op#q$ija_W72^pEFh z?F_~raN>Dfe*pk6Vkr*0gZ=ZK49~lk?R~@3TDIGe5g;*a(wp|pSg!8mG2>phE|%w= z(nW2?w#Y4Cd9QK5!{2!D%WlNqcy#a>>LJsf8Hknw&{70i@&NyMfPeUdea8=Wbw=Z) z_B8$L-e|)L!;7&|hNVobSglS^PfSctSF7XsuA+XAFjo>Q_SyGyajpZvXe@~#7O!9b z`s)V6>#wh0AI~q63O`SzJ3!XP$-g2P>Nu?VeoE&E)on@$9_@4$`WH~ z-dGM7%*pB^*u#u5z0YAJW@049V z23I_T4ATt{(m($kO}=4}DTZ;@)DZsDg9||6uqCZ zZ)j*zb9hqtd(WRg-_fTJ2@ZiBnYUz)o3>${Pc8DOYf0(aN>o^`sHjv`*Hu=YJPk`) zO|4PuLLm;G!2nChI*J#E2#;uk%pi-!EPUo;YimOs(1@9&)xHFoPgBHeNPZiG;>}SDJ8Zn7F^4T*s+75 zJ)Gj{`8Dy>oAa^Wj~LQnk7=a7Fe~r$sRyGe2&P!114J2C)2mi7baHY${r%*4>$Wbz z(G$I0gvZizE6Ona9LKOQhW7G`r#%AWt)1Nh;L0L(P@ENZ&MwqghND*~7?@q+DIL3l z@4_7|#vQ$cJDP_(nu|N~9qO0J&pwxYB|W>gIJ3CCFfD`H!?+FgKbY>CLZw`C;|@b> zMeP1B!;dz+WLPaw1_k}{3pX5~scQ=Qb0vDCZ6`kah`aJLPqZ+>M#K`{mtA)HdVwx+tdvb|e)?4+g} z_T1)-yvlOw0%Jea(0}u=``TN}6?&dcxTllvw9sHXTNig1_dwWhoosAeT&$4oM2$0f zljJvflWw1x;klg;_QFao0{7r;BACLsuCegR@*ayfaz>PZQEy+}#+YXPKBEPpj3uvN z$vueNAi-iFXQjg(=^I#jHY2T4h#(?C4Ut;5gE=C^-M7q z>KS|WJLwh^_mQ*sL?h|@M$&EP$w|C-tobDF4{i&k(XoGoOAM4BZ${ z|LGCW;g?*BLjAzRFPoyF@g1NuGEl#g=cgl4iP#TN`cWlB8#a!R~)cKR8GG$kF? zH1fMzs%psk#2R5}lVq@Vv=h5IJ34!MIy!p0IXaN_$?581Xf8LN;<7X8H=M*N$Vo`K znvp{`(~6?>OlmJ9zHROOe^%(QGi=T$kYS$8HVl3#;#g4gd>wxWf>WUMuQM7ixJgM+ zKGyM@5HLdi$ajW^7UzI*l&cG9oykoSPM&Q#s1pB(mJr; zJVv0USy~)k9ZpZ&(aP8(|C(rLK+P{`a+$Xth-s)qXd(z3G4$4-=#lor&QMgt`r zMKmKusG8A`%oDG3xX>2h6S8B=_n*hce)i?|Z3j)mgowyhU_IQSxG# zfKH*D&UQHEH^K384yw(U)N{lz#Evbut1EOlBVPWB8Nzp+LyZc+IZcg?w4Dt+TkX}R z!RKl;eFjEsAV`nsvsSatfiJXoumT=Z; z;&6pxRE-nx`S{194FF=a5ns)yY^bfuIr87wd9ay34c+l6!y9~cRejo_?J-EWdJ^S* zme9(*bM;yPyFJV)Ey2dWfKe=2vgE#*L7r0F5D&q1(eNd1lR8DA*x6IFLxm;9#p|Ld zgUF@1psKREKVtcO>?^fYH`PqlQ#z@ai+kk#k3Rd-tFJG+e=aSd1+;(@Hin9#Z5bg@ z?$2Jjkl7kX7jKUS7}b2~*|R_G-tgUq-_B>M44?V-R9021>grp&Iy!s0JK8nvb@lai z&8;o%?aeqpb~QA@$}WUqd{Af5>f#i7UB4#4ZxBwE!G1(MFcfE~03NmQ5I#|QwtVk0&ow11lG(mr6+$qTZ{}^V3WGLT- z8MzBH5>SDz)zz2cQI5dX>Jl;)b@fd^-m5e^iA}s|!U1&{9tZSO+YXmdPcLcw)*nt+ z@S_&oE3CK*1kU>n&eo#2YZET#od4s`vl%Al9Sv~jI{r&j+ENQH%mYjRh6s~ys938v z9({1hqPrI_u**CCZJ*wE(E>!IRKm$s3}(O1=#2Z{!fk#UP+wbBSy@3`x((#q(%jJ4 zsH&_et*)x6Gv*VUmVpU~(qVXH%ufRxp}AXIIiu*kBXX8)f%xlWXQhxr$G0c$TvyW? z8KIdQdm+(V&30*_H-TH_(25JR0_TV~472I`u#CQ>AEvLutazU8!6ed@42sa-YXS|Y zjGBNOE1X2Es8X@tjK-qj&oVp;>S zI&Jz@)i_7z=2tf6rX}qY#xFS(oFC#8dp_GxjQ==CtZL9*_-GYAk>hqM^^u51s zJK910%5Vi8f1N@_5oGNNnGKu1{dxds9K5F`skvA0!Ad%eP(HFb`~qeAJH&8pB@%ab>rK`zaz>NkZi!Za zv#v2ykL#wR$}tTFB`2*9GW%urvlSl}nji7mPRVuaN9Da(l#}CjbE!7tCll|ZJ;{*v zMChk6+poF}Ii$vxZLHDSr}#`-8wXE-u{F1;$SNeCZf#A0-jNd|$s^=CyCO>6*qXHj zxRT^k6t#Rjr7ZT(N!!phfN4L&JR`OehS2?lgFwCzo9*Y;p zc-k52?^wLXr2O3a4Z}D(!Bo;d83>@#6Maw@1bcFDu)e-aN-SZSmWq%H~ z`I`RETQT$u8YV`P*uHe(LK^-L{2`hSr~PSn+L3mqohdzxEpkS3{(NJfWK2|46n`J7 z%!`m=tKxjbEw^d~;bUi?zY{->sp#)R=3Z5C?xjEfJc~dDU>Wug3=ULqxhIQwdaXci zU7lRprIzZ_seX)JH==Fg1Oi(ZZ*M1Y9B075R}N)gObIOz4E70qyyY?iSBHm0B9n?; z9l8V}Az%LgF!$YoQB~>N_fBugr1yk`R1!iBy`>R40wPv)#V)S8x^`D{XChcu0V#^0 zB1jcP>Ai%OMo1^UOxmPOdM`8IbLJ$GGAa1mKfa7+CLzx~_nz~f_q_eJv62|%mX3N| z88e0x*45QY6cQEk3f9Ub77iZn7Ipp5I^hX-^7USRVs_BT*Izf-8~OTI>PG(^KFq1TUA6>b}PJ-e~InI3c@}c!o#qVxbhKe@b|$zK$q`Wd#4Ot|GRO!8yUI|&p{YEy<&g&610RU>v*qb?OCuC8uvGgB~P{2?LiD6yU0Ft4$r(Zrbi!p^Q|ZdPK{Cot+) z7EoRp`l(P%PnC86Htt?Uv~CNMocEjbxUpE=nX>G%s0CKzcG z9bHnAejO@y%UuOlneE zdPc_WyyWPsSLe@Lv~aA2u<-oJqf;h^j4+g3I(T3wQ_ZO?8){oRA=?^SwS`rrM|*R# zO44P&=&iTjdSRvmC$1WWPmfO@9|b$wa!kedU_{>{hL2d(x8eH^WG!sQp?4Cm!|1?w z2}j>$CR$vQeyhqMV9|d_0O60!Z=!<4IKBIXS3~i~+nGHu!+*)CtP;{HNu{NZ%)Q4o z-FVkkwsY+6&%$B!f5Q0s7rW&*lwl4>8!`Hef3iPbj|wbAH}Jaha`Xz%PfE_qDM45u z`9d<328MF-d&r+frDdd8CzHUb_$|$?-72BJy{WmIT`TG8B<)(;I=VPw#vOv*fQV0h%RAdTsrIoF%wVAmwdeGUK7?V{9ha+`3Wiki<0B7=1 zZf4F&@PQOqk}5c{oP5G>DgFHilt8l0kAM>10VRA4`kPT&QVq+#+`P4f1c>I9KOkk? z70T>@P-a*RhfFCm#?D^$(2UUe3#QKun}79uwBP|L=dO5W9^#pYPD=8SfbODZflHue zOLIvWs(qxQxDGMY{*MXluE=H{BAfp<#my+vWEW`)&z`yk?@M(RBIX~A0`CfO<{`v+ zC?q+mKJC=Z|(SiBzjkbGY=8Y4@$G7!W!XTBAig;}38Bw<@Y!gJvu+^*8gaYD#hKtX7ch=;+Qad_vdzXm#M#tF!0LpEq;N$WdO7E=aVuwN*NgfT@4iPdm=$Y89g-+cuS3cgY?Zh1WB&vFCBYH7j>)2Nx;h5LLA&Jsoe9JyRH7c;Wy0{oE zreTYz>Flhnt0+R0EtpfUZd|M>0`ICSFN5z(V`Nn_xsj zz=?%W?crA zA7J^%E?JV4^nfI=#&W16z=bi1c>N#gV9np9gRz;Jg@q4D3-DA9g%-x5PBuax9*7>+ zSUng$^jdiDuGgA}{S7%c5PR{nNY>uCQ}A;M=ooKpeHIAQXgx&-QFLuDnx*LWK6J1B z;vS0u_8DX&;scpf&B9-yBTfV21xX+RFAOEU77UW>IS#ZuAGACUv^);9?9hLP7=|~U z59ZK+_7`J1*DK_K(z%!c(zYD>Ptsycr)0q$(q94=aym*n%%haSSg#3C5 z3}P<~p+CcfbA!b5qsrj}rQ{i10J6Va00XS4Ut@DkZEZ(;yQQZ`XJ>~2w(9;8@uFi%SN6R-S2iIbp`o=L)feIu z5;M|L2Uyui5y?)5xfKZ^5~S=4`d!&aLT881nH4@4s~aOj0&f2+8!1}%Evhv9c7qc^ z^u#qyudF1YxXxhJU0#|JYFyou{I~q9v=6v1!AZV?*0Ywp*pJ@gNRAw`ZUfFd6KpD#0}Bk(nccG2u>5d0lC&~=*`*H&(a1>nvA4Nv6v^fFn4e=_v(LX#i$pB z-Wu-i9LZ`!hOOzPHCh8m_f8IbmQ-bU6MObe?AghQ$*BF55sxo4f=EK$&V+=d^o*?3 zw3J&hvH0qL5BE4S>`gl|`sJ)v4k61T=8Qjh^-`fY@a5Ov87;0o9|9{{u;-Xpe=pZ{ zYKw|XTl=Slq4+$+JWxpSu;%VP)W-DoA#f}aZ4zA7~{jcv`sw7R+Hzh(Cd5US<+-_o#yC;jo zxyLqlr_3SllR46;%rTbA915DMqN7qrYoF9%8rw~=dWxMzu@Z`H9|T)Tv5gd4OtIb+ z8$Jluhhj%kY%0ZGrP%yIu+0=ZnPN*QRz|UHec1l2t!=xB<#%cWMB$#CAW=3EghkH1 zmfv3FJ#sV~u$$OvA0x=V%$#4diKVv2c`!?D^=@LRt!rRmsm*5bYA~O60P{;(F zQ_iJ1my(mxbB>)jeeU#$Lno6jT|9d{H#(p`mIl+9AaYTosUUL`QzyEQ)fO~wk+YXh2aq|zwq3%&jd4@B9WG)n#+c; zqmc0@!OIHSexunC^<4c$eX71$--uVCK23j9e=*9ru&~2M(ON}H63Wc%?DS=Id}VgN zR;Q~(Sz(o=w$mvv&_`TTtCm>B`co(TbEn&-V#Kz{;cd|W}XyPX$71W=@43etB3owIT$r%U@DJslO zjk|X4J+m)z>~h-pQboe%j;I!?rJbX-R3++YsI6_*3qm2puS4ne7siN6cZSlc z?E>|$xuS`|%sRDKL#3t=5eh|>ORSy8rP|Bg3(I2R=;iC~He$kr38Q`7{XK{K1_lQD zx!F3Sj2@~wXuu>n<$u@)FIDc#Wy%XCPQUTgPN@2%S<# z^7I7GH&`G*o<#*U60XoY`oBSAZ{P}RaD~@#g||Rsd=yGK5P}L-AC1|)d-wkRzyA6w z-mVRW@_ynh&m<%!qPtjJoJJEJO?(_@hC+-JJ8N}ic^T;zP8L7y+H)i(JHK@(G#BP$Xmidy z@zm}eJ9eypK$@P|SW{&aG6w-mq){y5nCx4JLggAxB4~jzh!_IZR$ZvwAsPzR^WBDx zdODavZsh!{0pU<}k_@ruT+B zrEfP%-$yBZ$I-pf&_|u7v56GxNwL)w+eoqUL9oRXTSBo{D0U>pJ~0T^hGJ(^>~j=* zmSWon!Ct0VD~i2DvE39KFbH-I#m=DEG>Yw{Sj$1MB@|nF*YgHY>|=dcB4pS#0*>{k{?ATjAU3v z@>h~cgMbi1W)1HGGHsDeSyUwQHIO75fggC(j%|9)C(`HWnfvv04zlTUNc*0{G`5gp zed!$Z%zXvLt{engO|b@w&8Fk>DR!{AucFvyigl(~ImPk@8Ml^V{VDb;#ad9TVi0W4 zxRw-~L9sRzI~bk*U+(#Ctj}G~s~qH>P42BT^q!mk57@RoY_BYRZ_Z@H79xcQrn8X3 zqsJ3-v{@yQ!kr>|rEt?!)x(9H=`8nfAsxNr!A|!ay1Ka(dxp+b5XC;%cgLp3{r?I( z_TGCo6?+)TFK)mC(@fWrKs+?fbiERd2d9~?Bh7Fj+7;^wCrm(JrmN{WNJQ0o2vMFi zF~1tQefX)7(U3PeGZSz9*FJQoST~A2O0jVi+b{^WXI$sI#??`*Z69_wxakhaR3{-* z{S0on>n@pU)&t5^H()i);tG03K*v&n<16ON8v zW2aA_9^~R8sHmtYNWs0PYBXJr>i@IU=9rUJ#kaL}bhI~Dy4l&v+R6$vg}LSBS)hXd zt2Ad1OK>YBGJabdO6T!Wc~>MtcdoWp^{l@o=D~W{)Qs+C4Yl>=_|)8j|5{o)+FL+w zgUNsXCbS-f!i+eaVG~5@EM!_j0m8dNq3)FnXFaG~_{h?Q^XJbE4-K6#C2a1(#ml4* zKfL1c$6k2uaqNVF1jf<*X;Y~1TmjXpT%&2i3>vy1Z|Zrx+QJ?!t6vWOlaYhx8aen> zBL~+QIrz+84lcotB+<3Rj-tShgnR?}1nGubxHST{&I_6n@pd3IT2C1H8iB#00m>38 zyFptbx%Yim4I!>{e-zsmVkt`}-^P<~mE>ClH;_D2Lr;J?^aS{vo&Y`P`(S5fk4z_~ zSVxLYr<{1O^ZhCvS4y!xG6`Y6;G7P9Cl7hbdvoYi6uE#RE9j`lDRQv$j-}Ye6q`)3 zmnl}zH>zn~-$t?1?i#n}Y5y8`fID76k+F2tXgaFvAfxu^-|tfFPKs4htn(mPJ-uTa zifyOZN{W^AVTl!eIcSp9RwHqSBnsN^W@23IQ1t9A9KTSbDIAJs?PuwEc?>2!{N zbNu+5Ls6d{t1IWue^7;aJSP`+b|U!af$Tnh=teRmt+8k%}TfcZ)!GZ-egU z0yP&X4^fv?-q0#nv~`#(n@bCd3i7kEi!3awbJMf3b23Z1t?ks2(&*^eSPcsCcXX(% zS}Qeqx2|4^hU}1(UsBT!J6sgU`|iNk!G?eFHtt4QgI66l?CRiIyEr*n3)_>634*&0T85{q5WG?Vb!>PSUA;`}U!w zXKYT){(VPxYU<=3NQRWPRV5zXwQE-la-dX-&6|^mO<7MGno3kQ2EP|ygk^bme23*s z7?z(LC#aA6T8#kH2xcF%6R-X7^6q2468!sRWGK!=n&B}@ih2!r>=1cyw?u{2i zX^lt0+7m4<%Y&I8)guZEwZ-M}N0S9o&&iV~k8)Q^GGj_xt1Crv2X9!4odgKGhCtR0 zH;>Aztn6wke7;O~L#3`A6XZWZE@u=9en(wRC2C@J2*ieNg?faAh25}Ww$0TdXFn9q zvlHosTAjIzy@Q97le>?P&p5PUC0}mL6i#LB+8#_E582Qwr$%kFURB#CiU@gc=8X_qK^7uT~#>UCh)3Loy zHf$ssGC0Y2(&~CEcV`!GH#aYTfB%Uf3Gy|Z8P4&EMg0M2;zQ8H`=E(GK@%Tw|0CvNs;0ZKzQ*66 zxoP(o;T|#tI&+ry7v~r%Jx;s74)+5vD_S4^&R$VNBh`VtXSLK$V?`2XBYBqK;>Od> znY`@+gB-}lV?nWw6k9>DVxvQz_iG>4w6un1j2v+I7D~P$;DG~za@t3*tBB10B6bzY zTr{4gR}y10qj&$2o{(*`Gx^??UGwt(E!pE2I7WSWTa86@;KqfMh zH2qeHM77O084T@gS4X#suMnep2t7AZ;ZIS~+Y?am*&kKP2;;dqeeA@M^PZh;Yt@EQ zShYMW+xbt;2pBbfy4^5eKO74lj@H696qaL22PFi&kM+iM=ovFdd(!#u777h>N`*qq z=Zi_xzb@3Kw#C8ei(Y=C7R?Vt?=&kbN3T)i76f}ig!e^>9|ug312TWemynr+i`Lc2 z#5gazB|@1(f~&W5IXI{lD4Ei7e!u;u++a>$E5&CShwc+tV{zx?lyKdfKBb=Obdeg6PtV1i2yl`?|9 z|LLnO`R)^*`S8WZXN`_{>E(~!eQ;Xomks_6BAVtq*3U0s?AU2=&Oi0^qM-59$36gA znP7uMrnG6p-JD$%R>P)^^d2*3@~InTKzI8vlhedvZ+7 z&4kzs(b4 zEhVo2YTz(Zsk^q8R8JvwxCO|zVH%3l;=pyZ%~@Fpu(4@uyj_A$Ee&Nwc^P0nWpzdA zrCn+t|omw`{Q=G1;rR@N!C< zt&^K&YkBUe?>2n3m2@~&l3Lx*K6~qEZ#@=HE|nYVTgaWHZr-f6S-pCU0Mh>Nnr??B z;82fwbm#2>w^0C^{kVf zif=BhtyLrIsIgT>Vv2thyyb7m*wicjZ!a(mUrpE;2ye{Tpjp$VEq~#;88fDb1X-9j z=NFZBDlNkw9`Eiqa@w?UK|XVy2{E&A^YSKKP1;ylU4wEC&YUYP)J+P-&`L4jKn<*g z6znbKT1AE0#>{M-uK>zH7hh@skE?Crn^`E-lPxTUxjT3S1O$gHnHuOl$z`Oyxs|1v z+-dCmsSXZauEU3W472c=JJ;3$K|A1jHbS16m91QY27IpO=61vD>nBg{tvJ)rz>~A( z)!=;{k`W`+g8F)eq=8ciMEq{T0?RA;4%YNU3k1!L2F;Ov2mzqE0MMLmY0j-%H?Hs6 zxqI`v4I8&^{w)Um^hmt`OQf^LkX+VF7vqp1kdT#|NGhG}+xyFJ7j8y>`_<0AhSKs% zWIC&q<>mD-z#AKC)zlQ_S4Y`eSU92O+o-UxhgLkca>c__-K=dKn1%RLG|a;h(IvD% z1FnwB0yv_HuDl4du>`a6CT4>~T#+8%-Ws&pDa*;ty?W$Wc6Lq%>Q9)Q9hwZ`@>enh zq~rT$%lqB}M*Ym*eFcsBnNC{fXdX`O`^p^go*mj$-&X=@pt<+Hw>nZkYjIz-Bom9{ zeQ*7weirM#dP@DR-o5r?nLDifyed#GL`E~vB;LAd**05}+iwhUa9PNOx% zO$-{=SyFt1c2~cD$QZ-rdqv)7ow|dg|mL;})o^YllCG*ICyIPnF$pryiMz z^toS!S$GMvuo!!Q^!)I-dIe^o3m4A+w&}a`=P#VOaIK$dp_@R?!fn&j`@z&a3AS8q zs%u+>iFnhn8j0hN7*?Q6)GBm)c-!!S;fwxfu{SyBa9@W96*4?c%&XuD%aWS>MKA_l zhhgvtY(;yRi^wHB!|Y~0L&3*>XPxc_*{YZN`ZcJ;{jY-$%R^ZzRB~||I|^Bzaxv4- z!t`bX5swn)6RjCkh`qdLseDo`MtF@FKWS?4^!anAO$fEi?Pu+=T4JK$voHb&xXSSM_wu>3T4`a@Ym@(`BqVk`L!d5 zF69Rq-IKi(P zzsMBec#Dm)84>93;4sRjDmkgN+CE5n^S8ab5ZoOpf$ok_8U6zfQ__I+3p=+Q*i2-dKkc=CO} zV@XB?Es$!gg(F+o>&Z7AUt~~VSR@uB($JSYGA)Ip`4q$V)Hb_{TZm*xDI6KL0X~LP ziLdug?#-|mx1MBJ%qDk2GAwAtRbwrjhK0Qu79_)A3U*ZpcGVQ@swvo2$Z*Jtk4sKT ziHpn5&92KWEiNd~C1>YUC0#-t>PTw{&++M~^N0xAyu7^PqGXtLIaZ^Ia`yEtyu5Gs z-hGGTs)~}djU&g5cQ8BpOL)kkgG>y7=rS#__wXG*b?Q{qgAxp%_S$Q&k?`&)>l-%~ zE_m_NkKS7mG&=lgVZx@bKii#8WbR*un>N1lA+sDz1mG@h@`*ir_wJoFYgT76!s|&l z+NU@cWzwCCibNJDxoz#_C@<5thz)ooSduET8d2sFBjc1X=(|}4E!#FaE zFcx>~YisMe+VbrkZ0vBf+1NTc+f0~f%~((%l%n>=va&M5po=wVQN^+C?R4fRWA-Lu zs8Gz_M9khq%$`$ePD)OPt_0N zMJrnL;XmSB3W2h{OE#hQkNrCn%qPvCyCj4`u!;0P&n;N6;k$pYCK(zuEn%JF$#GJc z&F0OUH*=(um}8pppV|pEMXJulCf>Sv^J=;FN;Y~2W+j}7Psqx`iK2oQEJg1*e97f< zoEB*7&BK3cC!`)Yb)soEdbK$^30+)WU7ZyjR_2awj2%#@45u`cAvOoy59DSRQVwbL zj5}_NXeh5?)>dQIdgJw9$E;1r&W9haqN=(q^JYe3Lj0Ap$F8SWmZe0WI(7VT%%KCj zcKm#1|M5SMUA&Q;c;x6WzZ}0DpO}gQ-`p5hZtv{rE0drXTSFVKv#Pu}KYIU(Yf$Zu zZ`$z9SNmgNzl^_hAr1m+k-$~SH$+XyEMsjQ9Go1tZaJD!Wf$n}%5M|E3}D05Rm7e? z^2dP#!-ut0-Q4}%&->4wi~fGi4||SXOhAJ})o6W3SEVlF?E3ZVf78gE$4{O-)~uqo z1Ct|Z>acPMd>UT8#SX1i331nNwK`acc~Rndu0?TZ7oQdaH{TDIFF!T1IV(9fyH=x# zJF(|dr}H!6@Ca^)aIuNmD9k8m@tpTgCbuI-^7W*kFE-F zmI~C7L5(`Jw5zPD(;D~+v?la$7E~wQ$f@dJh3@9I_QQs`dyJ7-3Tw)8b>{Y?r};Wt zDW!~@8!wZY%M5xOYkyMU!$HXtcXTsCv74nHlie&4=ql3E5bM-!rr;?=V!mG6j_O0| z33g5{wq^>MbhvMjt+%VvewhDoD}%tn&(md8fX&F!K@&zg2Dm!=1bREWj}8c(?B(DZ z;LnWZ#<#b(RB7_FGHz=Lqu}Z4J5c-7Y=oo@yVfmix)*A;LDB7&+Dk<|fuSt1M!;{U zJ#)SVMQs8_k!ZzD*oT`yQL`#5>g!tTDk|d6o=Yy)kk)SxMq}bWBC{k`sF5RyyvYL} zVbGQB@6es(qRw-e*ZyC3tRaahpH7eZ;>+#Zb{_eg)QFqX-m0rJc8c4!^?Pzte}g~; z%cL2F1@obkZG~VH1(D_pP~17@8YphN^gsXjU(>IDS()pKg$q}#TD^MpYj3>STw780 zcPM(6rZ^|J01X;=mKL2Al@&T|5%WMaF76}pS;W+3VPWhzXFxIl^qL1FdtndV&)QQg z;p_R9*0vo2hvCk)!)p&e*iI1lZG~A*PLi7BWUo=+_hSM_$}R1k9&B?6@7x`0u{$>OeXj*^Y^yIx%21Gox61Q#~*?XpUj9l z5mPSr95eiH?TCY%W3n8+UJ-kHov z^ADz5mZYXN{SWs7`#54<{>zD{hCTAan{U4UZZH$c26?Jn0+uaq`CI#;wyD0dyt24X z#H*@;3Rz#9|M&L8C2m%Qwz>f($hww}I#kJPu5Zx&&HZ3&Zy^RJH&-gm%q`3m=4Oh& zw;$Y5>=UI2{u|W#V4L91J#epGjb<6fL4tq+qN2pDYlkkU+u%1*2d6Pb~7|AqK)~HN2b~s+J}6IkEGJH9eH%BjX9FX z@6*PF;I{WiS8paRc=X8TZ~=UilW$`=szs34rg^Zi6(q#R#bjg@Bo#3mxPY}kpM_H_F*6_MVsTlm zt_C}gFB<7coX1TiB?Y-TIT@Mh$w-)pJFE_Lbhd8EjybUPhwZzsCLQ0j{)_!LwB|NH zZ2LVvr_RjT$w8{E>FLe4rt z=BFoIk4s`5Lm>XXFjg(RaVp1m#V7L-*K`_Z4l!JQiHdIPjd4yV1Ru?EtIY|8NE^c( zw0L=}qb}`sRs%c3xitT_*|axE+0!2)nU6W%<}(#4Z+&g{^>bI^Q`)U36BpaK$0CN| zdF1XBLuO8z6rilQ3Ssx;d39h}MRB#z%6rV{(UZJn9bKKBJf+j zOC4zC=_Hn#lgj+wKDN+%oZS4JZCve@3Of&T3F-<91qPl}CYSI!tb|zAfVqL@P&hjp zo4ZI3VwXS!<9buowVYZUxMFLBT?%-D_O1>*NuN%%5;X8SXn;gGlK8ikpn*|ksBl(FK&`(fMnpU*bD1WVrAhX0`8>8AqKXKSzj z8Ekl4_(L{NWPUu@@DiJHKG^WSVt=ED-x%ArR#2?*66CM zsH`7IP*vS`WI7#?NkI zKj`j?8U_6q{Tu9@q5dNO*~8rpYOpi0*P)Svje|XU2-w@ei(zZ$L~12Wc%X|Lb>-JV z7w>^Cz64#6+;*ZXPt3^&WuSCpE>YIA(RAbX?W}Bk%FZS2%L?*PCKN3>N>SvIbT%tN zGYCy?E_0M)Sk6cx=)u8*DFhwOy3obGy}e82z~gmwMTA2ykYpbhT z$aqWg~B-Q>V71x}hxP^5x{jGL?g!%l3^RnLRgCt5|bULun=7 zZ`!Q)KX^YxEqVXrg9i^@@AiM`Auy2bu)Xgm_6#;)~bWSliIebhR70JK8%8>WTJtXg_6UVdH>46OK+Ut}v}Q6Y6nvu(l)ZL2Rt8 z3DtqBEUjRDK`xZFgFT9u8%G1>qEUz*+nr^20yJsw8BM^Kkk9yEfO~-{%s^Hf-D!zE zhWHFaKw}Z0u|Uum@i5rJ1K6zK@kQP3`0MPP(z;FyGlXS};oXSN%*?M*N!pq^;KVVn z%7)MIb;GATaZ5uO+MPdxc=eCjZ@&nGcK}UW49^O;{FzjxMXdK?E|PuiH5GewYp~%{ z*_Q7&Z`2Fr(wg+k>JUUZujN%RWpfb(e~Y<|QqI4^%XJ)F;b+lXGXn+M8pz9=xX2L0 zyM}jF?Cdv4umpLnXV#0P25>gzD77t*?d;Mb)uK$J?UuURwH2erDWk-wZEY$uTSr%s z0ISg1+-l|QsJ5LO&J)?WjqnT%4qLin&f70PG{(`HnTtQJu2z}NK$m;8pKm2z9m`(3 z74a$(yA`P!^$_OiAiko`{&Klkqw8nl99W}^DC?M&)zS0i2uBl| znn_7+Tw-SC!S4^GhGXh{47P?q!+&_^lM9AC#n$4Ve?ffu`p?m=@TYtv$jKSgb1pfT zl;0-FIClC%D8d+iV73b0nq<#r4s|Ik4IjRbBYEjasWg082Rt^dNT(lQaeQoSZOTd< zLdKiBgsymeioJtbKdWWk+1&}+^qgB+Id7IN0vuWy6U*l_Gh(Vsz@j%_SvYsqQ!l*y z=z>xGtfw_Ql4#M(UpE+nCwX8#q>PCLHtqxBVVa8dU4-==i}fYlFzrzwg5klp*xB8I zErWUy-8=!frvZc_7ORxq9pIlRDvTY4ZPnF{z3`IZ1D=HJg;%f_o-w>`zV^EfUvJ&~ z&E5+!MGdutirH6QQ7I1}{ej~+)f()4AZzom3BHG`zk;opj}V|FCIiRhKCH_I9GTm} z{5FW*T}22r ztN=WZHFO}g6S*{zuwH{D2_+&S_yY=;qO&EBRolyD5EqmRbD3BsHkXUdrD8rgCPX~| zKH4RKe@JjH;BXM|du+KRn)F@F8IfH{H0cV=*@&td)E;bvdq7)RU9GDrM<*&>Lt_iH z(rTPVWn~RjMcS6;>o>qpaPYw7B7plF_rdZ=pD2V)qKyWtl9PMqWoGhXk+h>0o+)nB zR-}g-788f7M795x%)&l-|AtUZv7=tYrYM39OQc_{`|(TlXKS}MG=?A<;x$&>Le}H` zNM;9zD;Hn76k>=lL@3fOBB=j-zBs_kt#MPZA&jlt5MofGt;(rWrxNq31zjB5wVGpA zqZ!Cr!3wjgmcrT0d9bE9lv?|l`NsU6f4%YQORHZP*OdB)y=%y;;V_sPkl@qK#Ioh( zAuyg@V=KT?zhfdHxTJCJ21BdEqc?AMy9EaayGgq>IqA1fe4jiy$P1JyZ_%NpD}(-Z zq(S66NU$(NB{&HQ1x0@-b2B18n8Q0QmnoFCl90Fp#w|bbz_)^&QA_A>vRsAsCJ|=y0mi)8i{pz|Ww7Oi;jA zpn$JH0kgD?s6g3VUr|z`$uH1WL5MA_s@7E$mlW5uceHo5wxBp%V@)+Y#Px(2%Bw4D znC{M&rpES~`g(0~i-1taRgUv)uP!lGfVy_^(j^%3Q!{dlDx0z6I$1GfT@WMooQgP> zGBfq0yB6}IaKG^K@HxJbZme>(4e$)qmn2`<^W!gmV;Tx#wuB)%V>QU=Bl*7T8ikkd zsN!S4e60$F@cXjiBhlgb^fH~ck%ZH&GCXVX*^x^pa+8k#wDsEq2M?c5?xUz<95+I* z=XH@V5x&&IVh*_BLym^x!%#`KpSaO9apFWDb5UglOxCRh7dgudry=Nk3+MbblBb?t z!x#x{A&StS0-=0vd~Jd^AcfJhBUpcQRC)CI74QX|Wh&T-?2GJb_I37Eb_x2f*nl&j zeI9g$Z(*gqiq}OZp1H*A61@^6)K*rN&jtm?;lhVu!P?Hg`R{pP9~u+}ld`_-V!R$= zXW*wk3T1hm2-VCD*wrdQ(SfKaY$|XU!#jeX3s~EyNbdx^EdM=8UXhfPlvyXURN$HA zc8;8d`*J+>BlDkpe8GYROC!QO1-(=}%US|1A(C0y+dI0tSSuZzUF9{;a2#&kvs+v2fAs>9ZCro-r*9 z3Ey+)KQ!ITHFV{wRnPz9=~q5{|AiM<|MQg>!w>~IdeX$OB@17Ca(dX)&piI{lBLhQ zfXBeTc#acccj4l!%2{gX=GI9{aka2c4!9MWTu$n3*_o4D;WgH?oaMMcVD_CvVm0%C*Tk*|!TzAieW>Z92`(B=E;btF|_#va%!{ zPS@DPn2X1b9lNbnvxT=p4RZ{S8J?1BY8z9|UDLMeb*FwL6pWyZNGfV=+N5F^y%fxF zvR7Yz^`Az$Ct#MEZ7~Z)&e103cX`sa^Qm>U1(nUiSG+{x7JozBB9*eBZhWa?&u$1u zMe(TGSpMcpRxt$MVc6w{> zIkt5$`hEl}rVWYurevuQ2&12};-(>sPNKHwpKt3&S_;7y_=?rF4plr7B>wlmi33YS zj(irvglUnLk)S7-DY?fnERly`0F0L>G8+Y@uPdkU(!3i1sz}1H@?jM{`)uHz?eLk79qCxUP9k{2^qbFkX(Z9 zyxwQHH&S90xdfxfz9Dzrh)qB&l;%oep|qacYoY89{cIlMEEpGxCWK-{A5^laW!X8m zH`1a%^zb~?vSf}Cw1=^r{3cS*(qGA~!ef2EwczXOZDjPs7CDT8ZKK+T_r+1`J zucFUx{@1e;EpH6gk3>n2z!i*I-sQ{1#rbLJnYZGTlWrY6bTay;rhqvNEe~J!YI(N5 z9_!Zm?cBWKNX*6UU$6Ntp6I+`e5{SyP*zUD(VznAhdNoWY2>&+gOaLT5=877m`S zzEfu`SQ7HjRrCC3hqs={x`FIrb@hawn#7PrvPIl_#sWR=cueM(J<4dB= zyzRL0__2%qOp*zW&cv)qtr&s1xhAntx`qU-EHFIhc(hU6-&~r|-b_!fSdk$d}7+~a9MDCc3$*qBrie4Lkn8b`NGeX}pLVpz#KGqz1Cm%C~Y&uM8@>WK%9_XI- zF5zLr#e|1F6?!KRGx@{{`oz`ri96{Nx6vn-{Pn~^*s&zGJs3N-ucL;p5uI*wl1b6n z^wbnY`oyK&DzB(PEnkZcbPsHcj&80-w2DsGf@C3neLd5mc9=SOw6}-f&hIzHRPqBt zLzl$H&I_NsU?zev1$sTdDU%b&{`uvXUv5AAhninhg!(zc)SGc}H;hetNSDoZ!WZT$ z3NlkdAYmUCmE@q~aIznZ6121Yq#retX}g_STt*VHvM zHq>=?Hn!yFl87*Y2zs~#ZM$p~3Wc?`OwCs+8IjtdIKSCkse}dD#@q}(99AZ)>r&_| zQ0o`TPEGY~oS232&D_e`Le00a>FDaw$Ay?9;(ZchjtD)Ib3%p50L|7D0%A#hXBEaZ z5|b0&$S&yJLwBM`ClzD&4KYQA-hnK`ZBXQVj5i14t;BdUF+j#+lKY{)+-H*NHVNv6^uO}NI!WZ_e_w}C45V=ld`kYQ_<8fB{lY9 z3>x>v#-${srr;|TP5R(b%FfQpX3iJ`6ZLvxcx&s_qdOLTrZ65lh$a%&(Eb%+H*Pbze??MBbR2r+yo%osffbya2V9TE}} zJS}LHQNBp@adNh|G*?Q&wB-sk;FHT>Z6kjmLfLwAD|`IA6C)eS z#f*D!I@T`?>-P}WZz|SrD%Q`X8wMQ z7QYD6Dl0At$I>J)X;2e6!SjBdO*J8~I>bi`I%-fWH5h&#{-kl^CJnGtT<`=P)nz(3 zLs2V-NBk2afl?`!HPlrxt(>G0T+uW)bUAfcFFt)N9xdN=HMNx`#c;peE-bvBqDi~R zoJ0G=)KtYld*e?2Chp;ikvw0@-~1_mODFs-l6RHz#@s#*XBwMMv7;&0oMLqp+dK%i zr_z5N#gPNmq36nl_j+Xulerr3ED+d{D@j6`m|a}aFLY`sOXk5X(Z#a5I=H0)nyPJz-eATST2wr3a zx8$(#4BO(wj%NmzbsLObBx>qeNUYVyE)p&|L}Go!saIk(z4v=&nWaxVgU*zYK5cd1 z)AnKq+`mzG@87@Bcg~g6oyELSYm44ESCfs8+dv){(jC$Ym|V;OnJ&{>36TOxnJuF{ z=S%Oqhi8@3`!)=6-|5UPvROwP{~9f>iF|~gbxZ+yYi9iLcBdqH_fz{)v_D0s_oA;- zbnYPNp0R!J8M~aK4JOgOvM-Thjdw1l=ukSgiek(9u)Xt5EJJtA_bn2)HaQ2h$u!`h z>5Lcj%(!3QjE@Fw6WwGs@FY8J9A?};v#_*81KlJWF&aqKONqaHBQ_?H*~6jCUuR8p zw62x30q$(oMMu}Pe`e3R1bDeaE&1-x+hxrjvm=(oUA}g+6n$AppipHZC%JZc_cz~s zv-|Qj2&GzsKrNi+rc1olqn4ax4v0Q|baZ!7c2bzJozzk08{VVH9)jd)>+8?4Gtj_u_y~er@_*7bvBZ%k)p%o{wmI_HauGE@i zt;C<%D+EGw3#Glit+`ZgE`hCGZYH;}LnRae&h<6{CqWG!ssCPHq zeIBJ_E{s%V?C+_ZyQA;2@#D3UI%9)9kFN?-3sZ5_(BCj zp;%yUWnn3YaKtCVk;w&dCbE$xJEx?qRHv&dDJUw*zm4Rq;-bpxUeTzZg&Bxf#L$&- zqg{b|a^MJ2V7ctE*mv*tyH~Y}V(Tfkn67GHC!>L}l@wcd7uJW48__pz?{2-v_Q6)~ zAvTD5+85{@qYc8?7wBP^fjRmNI_gq7>L2uu)99$peWMCNhxhB&pkNin83?YYKP4#* zG|(J1(h=np*+EC_rXzOuVf%Yd;U2=3Bn{w7ENjf`Yd3aXlDhPsn5Ore$x*xR9o00p z!PFh=DOTTi$EL9+#~t$1wi%yxn(=8h#;2t{rUZjLtuMV}e>%shbdLMFVhy}LJ)Fsx zVtY7KIepr{);6MtGd&B=M07S{{TP0S^`oBy;09?Hhc9aUFv+Ke-(mUa=Uhm=)9}#` zpAz0{`55fPU>^m)TGwOxkPy?yz+7wi9oCP24w7>2zorh7;dfX*`Z-e&)YfD87!n7& zTjoe-ZV+ZQ8d;3+YblJ6g`agyIr()fBIfA+?A>Gc`h?nUkUcgnm7M$<%!$#AT2ACr z!I*n@+1(@i(vkh?$f>;}U!^1G_l?}USMEml+_&#N_gzlWsD99M-*=+#ly;0yEvD#T zI<|^pwfAA~=G%+uewjyUzKQOa=Am(ia*7>Ku|4NSF~!cik2~D`#Lg5;xi3oTMADIu zQ*`Nrp;dRIYx+e?u~#E`sB1vT;Z@^(2Q%w?&@INXgY`L)_u=oJvl5D*N}u~Yz4L?g z&O1$F?_4Y6*kXzeCfL62#shPw9$w5+Dr?FTc>}EVYYISawId`%v-r~ zo^5+{bd++`s2#D_lhS2ULF=4DhahjO#9#b+yQ=E;?l0D`r!#_?KP0cc_~ep@A9-YH zMhI+tX_180s6r6nwvPYC?AZwZU&m}+qiDQchTzj?O+^FW%!OkbaUF5>$z8inR*Bar z#S#mJg^f!^O$*N;RUnoh|5%@#lapL8USqs>KU4R9>EyK=C<*fCsdI6KHOaVlCYobo z4~3yr%L{mYz#fXluhhyzho)KC$wkSjw@WsS8fAQh_nsVKYdhk}_tu0UB=5AOz|%iy z*7RvpX9hE;c^PMo8%#xl%--a0IC~ahWdAa}v?kKr#f*=rEK3t0gn78zUMcK$ZynP?2GIirUg39^UsID``*Y#urEcidrzSTn&h?T zR?G{V89cKZNd%HrPd)kYlJLpnQbU+a%uQKIQwuATdySqs|6$|Kf1gvKnY-}a7gw)- z{S$mbcXqc|H#7PY`ZDjXenSa-!AVWm&IGni?7$5W4#P6H^fxS`=Sm>#epHZA43x zoSeFXi@$7q+(-xP3U(rMhF!TblsUqVW>;{b_|1f#-yB8j%N6P{{SW$62ASa%cKh}) z#P^x$&*-MEbt$Q{b8+?d8y_4IjB@LzehWn~R1~92n}7> zLkQU^rCnV*dLN z#rslx3dKiLyzQX)^S$_y6fdXvDvBR9C_cXzKc3=?DPBVHqwm1ONOqUqv8OW}QvgCG z9C2bbWIQ(OeInrrlfe&}+1O^i9MSaCQ+|gHG>MLWj*fnSjy~>=(PvkKG6M*P}+ptUZ73+&i}!`$3s**j7EjI>j!!OR}XYyViWc*KaQZQ{9bH}4xk;x#l zt|P_g z-si2mc$V_HJLTZNJRM!emI6rP-AVXb1=d4F*xg9n65dLT#eDj=SBfY)lcGt)fe}5N zqIG@fUW)*cu!LA!68*se8^r+`h4Vv2KDv=_bzIMGoCkWC2YMiJNArPM0*q&2)~$@( ztkm?>^xH+1<%PG?np>J$T44%ms4T0f&OW;L50d|ea*zC$TI5=&Y`Y9HJ#*vA4N?ge zLF*STpKC>@Qnlw&o6^Ga2F6T2%*8)+$x_!V!4t-MTZtr1t<|@$U+GwORYNNAhTIa_fElsq(HBsQhqMBa*{L{YgqjyO<@Hd+a(~IF*|(kxX1)@=8{yTK zn_`db1F2_SQ>VnfmOuW{Q2fC@<{-POG8k1$J(v%0?N2^g{q{#6fBdnEr|=jRY?u{E z0_?wA8)o>3ES%weA%>~Sg*bQ&d^Jq%*(oITJftEdBpBM-x&C?w0s(@97r@=ea(Q^j%rP!OW8A0Bi{#+Es(gSt!NL zSh_4V<(|{AUX!t2z1GKRSTC2Zn_lLq6_cyUNK4AjuGMN%&;EA&Vr+V8s{}%tQ=_h4 zuV~Lu_R7aLkdKR7(cx6mp?2v^xO%g+LqBfRFEQ7WlEngEYs8_=KcC8OpQ)(@ljS6PE_1*h<{3 zXt=GZLvmVywt-c+Sah@e%FI#-mEv}x&_-_N--?^6i~jS^=sGdSbL(zg>*Cz3fVb95 ziJ6x>c@%0Yn{}+bQ#AIWU~klc5Z8(=ZRPgU8h~rMm6LO;5xDNW5*1=^h9KWIQA@pz z@lmi@ZK-^li!0OfXcbn3$f%^oFv*wx2UcYCIpjj-qo@*!m0#b#=Tuz6^^*fFQUVtk zO)3WAtF^P4gm2WAZ?4Xgp zvzc$OA}9fb*H^MfJ(&JhSzLPXOnQg^q8CCTlWb)^7ku#S!>is8B}>U94w(eL=OXzV z4rey1*p`|I6sUX)Whu$)J*4SH4V5}ZE39SB#d#%JH{-Px)~;fKqmSi41cP=kj)KPA zWK>>C%gRi}TOQ-g^hZOZo3z@3#OTXOO;v?DbGDxC8b)c2c;pDPat7QEx zF^L-6FWbAne$3Lxp9!(bs|ZCL1@bvWuRQbEQ_rnx4@Zmz5<;0`W+(rJg%Got1DxQ* z`t>JFQ@n(4`zGs=))*H^}f;X*4)!6niv!yjX?6IPkx!AGX_CB zP_!FGLxZE}EQ&tehwioa^jb2dJrVj^qu!ry)cYR?n8>-!MDK_1SPBU(d_b<;vue(C z^uAT=qND%Cl})c&1-}o&y(9KmNgDb_>_z`?>7>U-Gw2!CJNn&pGK=2(40`X<-g}qPxz6mn_u1fUB!lP? z@U>|Si~o_F_hSN*uN?pTM^fSWlUJ_AUc7kV$f@i3H6>X|SB~vmk1LCBr6*rK5nFxq zKGT<|ApYE$GiPq7T?&=<<}F2e`wvaEwv)?Hnxka%sEFyFj5hNYZc5z#&wu{s->d$C zc-8lw8Sm*i{+ahUUZ9g$#TeAEhY3D>eA&WRL$S4PGM9;^EM&p5#dGEcGk=OQ4*h;Z zGW%)tJBuXb%hZM;z3x3!<^O>9?S)Q`7|`D0uRD7d(*2u;mpIi>Un{Sf18O$b+k31MQ2uZi1b9=AHQ7z1 zpE!~2CGKorzRT!i>`VrhV=v}&?z0f6OdTA4I!|;0yx;884SM6d?_6QRc~~2Afk<8| z!AG*Qu*e-mCy^MDNP;(Bs5ds9#7p0Ig;n$lF7ygPy;ta_S1A4K3Z|*7lv0@;rLy_G zR2ECA?B9J<=8YNf#`vQ!1KyxAZ%~1J|TMnYoTh2L+)6TQSt3$spXou#F& zBp1qRl;zYpfm@T)N^7lcJiMO2a`_nS)RAUO4Tjn_*2TrpTx&2ajpAj-q=z8Y|8wz& zD;Ga9!$I^*C{&4E%+W~ZQ~dRh27`Ox)aA>kjYK}`k5QJ@Rhi{AkkKpn4NaYj_I7lT zjWla#*{;Ugv@}s0a$eh`ETv+pRAFwew6HT*NF*W&l3*guT3Ob`ePTesWDjSSZSB#* zhT+*f@N9v2wq6UjqOA1yLvb;iw_LjX^*UG=UO|7muFT|QWZt0xT9nw`a@Di*CT{yJ z1gUp(`QPqlu0bZ*_sItzaQtUfYu84xO}r4;1()(O>znnlzc+g~ zX`1eqZn`LYj}8hV!!0U^BH-)5t*?r+H!1XWfTA)42O#1rLsT|YHiZ^SDQ)R)lD6rd zY11ZY#{YYATSQ-9{QZ9aiTh$lJJa-aa#D$Yk^9QH$}PF-VnV zxOxS!PCc3p;jUY&-4DhGQJ%9d?ws)AG~6J$w&EUE(=B$o7I;gG@#&#JB34FY1ky1A zLs^JEtqmp=3b;8824wHnc}9X01ZWBQ@1xOerGj*_DWk_nM1`pyTfz>@JokKos{Y`y z^XQl}+OE;Eqa!4vlXbpjv`$dp(`)Lxb(m(8VWJj6zx@Wr|1}x7RnEDHXzi^-Hj^DP z)YF)2J*U%@nd%Hph1WG2GJ1A(f z8-RHD=+lj9kcJN<3+xN#F83t&NcTARSa&2aAjTr){YgM+EO5`nZSL#mxmSQ+m%$Q~tu+&>_b znY{N?hdGfdmoB>Sx{r@%n#_)VK!)}^kr~JVHi_FgFyL?^bcly|B_d$}=|28s`?g>HLTs=# z02BqkvNZre;1)5$bsf}xjKm|^BhNl}@8mm|y}TsZ=hh{2i)3Mu{-w*F9V!uX>#`Y= zG$G-Uz>q=Rs^oj`j*E*O5%wp^!{YiYFwO)_00pL~TO?7)q$zjINt<{7{ZBpf=CTJD zJf41Y8d%B17Pe4Knmkmth5ECk`m_Di6N^Zn_N*3cduk%2GB`lpAQu6<7fLaae{Fka zTN{U32ePfJ*w&G3YuVqlzA@Cggl%2Pww}SZe)w-%mkhNo{zL0zw)LW$t^aIw9+J?_ z5EI1etiC|Q>Ri8oXye1}(8g)iVmh7W&7rnKn*2~(N8I%eNiF}7%2fViQjl-LsDP%k*$1ytzTVV(Xg?7U z%0b+AU?v%tFJG}}tk{tK>#x7+f{>|UFd1v=+S=Re4W_1gV2#%s_4Q5l4OAYQArnKc z7Q=Z((x1|?QYM~?XPa>aVlj~^016NY zxD>G<4$6HmjG7ll%?G3Ag;Db&@<4B>u4-t&0i2+{tPz$pAKf`_9Q33Ndey2moXNwU zhKZNEcQ4U1QVHVN8ewBYbBn=%@C$;ovPS5q3X4JS6Y;BFKj__6qbK5B4@FOE^rS{l z;)bTO%8DkVkvhcaxHg+eq@xwSX>d5MbWh_3U#kGutzvX^Ru+Lp|Cf%opHBl1bkgx$ z>!L-N>SjhalcdLxB11Z{xy$8twp%O+_0<8!%WC!f$?59CpE`&B#PS-+gI$WdA@zdp z$K4QnbR0}$z2-Kk0uAlGR?7g%d4ZwWY_@TL2Tofpz3mOaRcq)(B@JDq!y!~kCCKIA z@vNQZo_@Qdx3|aK+wT-0rA;Ok2@ut&iyNK3ds9P~2!P;fv7@iG1V|WpB}StyY&yW= zQi2_IC%!>&>(*0L6~nzadGbYuPY-mpby~FoH{VO65+j-ZJ@l4 z{j7^at+B$Qa$<5UyOw_~?;PT}@`_4}imsf?&dxrcf32`g4-7W_4P+|mB6&Ppm!;1J zKx>CcrI3pSsGTKO$i!Sb8EK0Z1^rMVum`VRJD8fbn>%~j2|`_KPk*1eNl(zm+Pke> zF$R=BK-Dp;`F&lM-u?ltzZ6w31soJ@@sbO<1EhQiAt9722l+Y_^Bao!9fkP~#r%e1 ze&y%S<>uvFxPrPFd6y|2v=%7T3ZYOTWN`eE3wz62<<>fDkg(NFm|d8aMUBtS9>0pA zR_W;crc~-I^#gsylnRCY6Z!g(vXG z6R7Y6DzaYmK#TXR&0~y~tWEKW<8vSLtj-SnfvnDD%RWwXzk)~x&k9vAh|(Y{RNUB9 zQ|(!!m+(cbQL#{DZu6{CKm6h5DkaE91bJ^Ydc7CDlG^sePyZGw)SuqH>xX~mbn#A| zJ^@jtuWW89-uroW_UfHQZ5>tm!e2b9;EPO#%(7rg5Z`WfbJ{NKsV8&t!mz?6@=)7>1`MeY>?5biQ$TjKm(z*q7Q^=dC$!3Y7o`1;FBmZB2fVaiCi z`K7tT+;iOT3%>pSlMkUPX419&sW1#bFZkC%eQV)q9K4@%zsS8%yI^b@K=!JrwVWrH zy+WYDj#K;T)Uk7@5~`6Z2gLeMfI_OIL+AkRiw{y0m<+^5NO&%3)U@QC`>xbhrl2H4 zIvHuamji?GQzi}S>rxq{*PYP%$EHG5JN_iYzhD>@O$21@i(Jr5<4>%bDYw4iLAq0j2w|Paly!u|40w>QTeEZ!-oY>Nf~K zhvba~@HPU8`ia=+18So{Dwl8ExbaW}9X$V8yzUI(G#%Nf>g-hMhEs96v{r)w0mhWmP0}o(T@$QA z;92S7z10$$rtCdtp+MmWV8qCn;WjUk1PEE`pfHJAC6UU-F!lKQB9rAmN%914>i{9? z8gTJAEJ=3%n~IqwwlspqGh|B(H33kyq`0OY&}B^xCfqb$B_%bD4W_Cp6BH&!f?X9l z1YaVTav3pCA|}6xCA_TA$S>D#*tFw7P}ImVqY}sdxNZBHc6!sE*5JT|9Xk@Jd^9a+ za*=QIoec$y_X#Ji1L?b~<|NFzTv2%Fn{9g_3cOx_?-9xMw^AGS z?mYbd`-iFVAaG>sCmZnkV$%;t81dfHV5nT_!KHh%LZLP_cXc-!0RjvVP~1CSU0sbe zdP8%w0h$vd!I>2L-9TTD#mQhB?kB(W_c^jcl@Sr)N#hfuv2f&G%82OU<7Z4($yFM) zEIL{SUO-bPaCHX;to=Y%68nVvlF{@H_sQ~D6B5HH)NJRWEE1QR9YzCL0}+D+1s5+s zr;<->FAWXGdgv4l4fRHRpsbpq%cHkP^D8J5Of&1&>Qn>* zzQkK!CK&e~iG(;tU7)_;-aD3vH&5jb;p?w09t9?QF zPJfc&ph5{Fa6?AW@(?Z8g;mES(7d&u<_};cdJ?n&lSOkBGv9j~G{HqYuFGn(ySO|) zk58jGO9rsWV&emN&~2SsF+M0>KzyuH$?*OCgS=%tEuSa%4)XUOv?GE0WtcYw=1l^* zCc(UMFWO;QBz^%-#vRZ*pTy5J;0f2bUxl!*!*5MGuAh>+fuGzuu0{oeBjP*;cWcRi zp~&DnLlhsw&%aZw!fz}^;9U(;?8i?-6lI_&!97ZDxkpYmIfns127Z5Tg+DN=GpJ@A zS9O_s3O`-eaihAa7X0L+z>i1F#Yn z_}!rT5=(VXIF3BGyVKV_R7VX`z0-52!#z|v&yhPlJ4o|O(0rVwsg6L&pg+?5r|UuWA(pBn+!I1YEZ##iO~!PH8#&pfcwREo6EV{Po|$gd zaUQvbd)$w|Ht4vcx{&jgL3!)AG2uYY%Aoph;YBjnf8g%RrPOMC2XkE!%4n8M23lnN-0BS0CKb0}a-I;0b1yr#6Vf^ml?@aE>{23TR z4})i>Q9rsnsGl=f9?#@OLiRPVO3<&Uu(+ zb#mvaKnDKB!|a}$pOZ$oosIv#_1=G{{<{0cL4Iec-|2o}kmCt;?sLg3%d@n5!OLy^Tq@?~&rV1HQfq}ZEr zDZCDW*bJZ)ngEbB2 z0WD|1T(3*0t;QilZvc+H2_@880cc-S2@STYrmhhq*^25u4Ry8kh8le>Qj_@ZE`syI zD1irTbGSUoy--V>Hb8LT?|le8LEQijOk=tPrI$nwbr5(hD2f*n9O&x<$Z>^Cu2lQ^ z`fCDUVDt0E+09#}q=tjAm4G4t6=8wR)(>=A=NfG99v->_eDD;X}Q=4^jAz|)j>aq=Y9$DYb80u z9-{Ag9oARRo1rol5=UoupL&ccCJ;p%9D;Nz6BFY3Z~CA2;Om*{n)789C52ayohm7) z>l*&*E7MxJ&&+{sXdMpRN2nd(y^jdi9sS08sQ{(fg+A6Y8MAQ~eiOj?Z+YJDDl>UD zJVo=7`mN=}d`4x?#?7(+`2xPUGBcvRtsONB+5x28X14SV*n7>rh>o<8d}k!dSo->@ zdJKxqlXPvjQ*J&EmM$lUPtH^n$tkv5?Y7K_@UZa6i16^Ru<)>msOb2($neOxxP%dj z2?=q4ABu}lAQdHVA%g+|jsd$H;KBesy-CJ1cYmqj721(7;nDC`6P$C!BGVN$W!y#R9;z)@)_kdwFZNy&Q(L5zOk~}0JCltDkd0eYmGHE zV8G? z`RT4EQ_0DlA7ymS0mja2Fm*hDYS0hB6a2XQ6`}U4ZQH*4b`u^z#=sI13@gmt+z-CZ z>C$oxKWt3L_wPeCiP|k-6isKgeD>w%h{|2JW&Nfc#7f?CCw(6PHD}O^=_T|ce2$^o zs8fQsqhg0gB|UH-4(uINJe`Cjgj(dk*eNRoEEiz(9D{v@;DPS}C;o=ve+NC&S~`l_ z%uOCK!dJ_SnLJ_QJ!w$M0_aReG48P!ADMU8qD6~l&wu2F$HwVmDq5OMrl#g517vY| zc|}=yMRjdOJw^%#_U1+tOdU0_Y2Y}?_YP3HP88X63+w-{>msCi*DRMs{E%cBk7!n_iH~(Rcp7=HVVKYaak%sz4xL%MRgt9zUm9@rZKvZZUd&%Jtcgm1}W)9_X zY4|IZzG}Z9FS(z_CpbDL#8-tApC1M+0WI3wVU1^70CCFY=x=Ll8`OOLFh+hDqbQ6K z@iF*8zDkYtO{PJQ#vV8%e0&&UIh8*5NM*;MX9E>MUtzefP_Un-y6<*B!%L3--O1q% zd+s9I;lSshw?k?&8I6M;k3uGsSW~53v06Up^_aj2P~ESAx@l7-el3C_OZ-}JdwKLq zUcAE(80l+xCZP{ihU*eKfqD+trj`o;m0H6q=Ux9@GaU;GGeChvK8*#1v3Rb)qU+a) zHNLR$(!~oog{X0Kp|Hr4l0+)%@@z1>qfja=t*~;#_B>#<>6AetJ5TNda^E#5s7`z(>qaI&{D8G@{=ud}aSyTrink_jvLs#TZv?%t&n6?G`Qr2K*2 z-kE?YD+2Pn0Rx!^^VvR3A$=ts(mxmI4^>nrq&`F*F{8q{&V6;Z#sQN7v3#=5nx*cg z9EbPP6zUsYU}sk?++cXMv{qLY7TRnbMtut@TSMjIDNvCJW>9pK2^|O4dcH`g3yh3K z{T8wdxVR9ta755CmTz!C@@*z+aNo<($4vcvo_+OHVS8SE;*LONp~ zVdFy2(`ASY6;qL9fB6aSxYg|b!UqLDDDW|6e2f_%V}|1blgUt}?;|@IKO^m&&s_NYtYb&3I8&odxA2nk}&czp?e@%IU#RYZ zeo)?38gB)%w|rDy{())<@J0iv5o}8+J~#t#I0y{5rR7a-Nx(Qx!Z^l)TVla2J|0xb z>xISn`K4v~xp`Nv6%|weW-|CV5+GpH``2fb3ARf)Y_y*P28EY#ljHasU z(&|<~SX#PGP30u%#Q-+V;GIl>eZ_^9m-{9x+>f<_k8!eki#=^Qksuc z+6sn~eY$S7&}ZzzrFTvUwa=oVfx{tKilfeV{N;0}B&YYyq$#Q!^(!`UUVRHI8ev_t z@W15!3$=|}gN^S)YBm3f2VZ^lfyanA`;g(>6T@E})YIE%aXS0V19obly{Wd&*wE7n zwHrqqvW@Yf31O4T_)zvwBcYOE#fqY~3m2!Ukh*41rj=AnAN|kTi8y~%43-uc_T%k~V(LD&a zFq_-E9SR?X0yRYrx6{7!Ud3yUn$vxJt+K02iCPSuMOX3*%6e6*5%cc9=k7U)@~9~K zP#VsVP-6HSZ`kcYPtT;b1A%=Hl_^Nywd>6{sSrkmM|048xC`MyDqrYczv(uL?c`~BP-6^>2V-Yen+l0L!#AP_%%4YpcZM(hI9M9ob52&^hovf=61xq z3>ONW{cZu`S8O((h-Y;Og^0c3+TC0S)?}Ak;OcWLyOhc@N`QN85vaIvq~u0P?q$%+A{r%;rRtHvdr$3)iP^k5e)_%ILh{CFxX0=opqzJ2+-@78?}5L^0XER+nQSCfA_lBh>g z*4I#LGS$dN>~Qq-I9)wvaA3FD<$y|>MKnON`KUu9pMQE}Ow6d!G10(|h>9II5_<+L zcCZI}z7rV8?(8G*L`is}ad@JkIA6b3WKdyeudFODy-{LpXl!V0Y^)}SB}7(XdzX;- zOuR@X$t*ty9Xv8VcJr1^pX|736!|4aaAwT-=_dqW^QKLk8Y^QYQtOQ~XU-Hf{A2#K zEPj7`$%Tu#M7`Y!BW;HK{`)4j{jzVbe#@4ZUIHKr@ABpArandzlJe-XWy_L%UAdRD z{3@zp;DhtBrLCj0t6pEDuc)bMY9cy;3m!17gvNf37pcjZH;y{XPxrDY&M(T3=I*K>*xxs}ZVKO$|^X8sKo(V<->~ zfThl$_#i^`a1bWql@C=@NDR=yrTM?^*!9E236p1zmUI=I-gDr5(cUlPri~8Se-5W; z-h~UfRTf6pCyGx=Nf{fob>FEA&olJ%p2#J^h%xgXU;HRew(q&$6n}TUux-Yh@4mNu zdMfnz&xOD3IbOVQ8WqLxLPG;ZJ&f$izdwdXvb(6&D>8H)L#-PMXbL81&r%t=FGMGZ z0%0Bh}pZN~+9O$k2tJU?>k)1z{C-29k;V%i@pQxt{BbsPI>56O{935KJ_V`WpYAl|}w#QJz!9*K`l6{%xmD z&|Ve~q%&tPAWhk0D*KrwhZ0^_+uClbYi~C|_7S@Q`IjYE#w5r^(Ft-PQu?7dGB|CL ze}9t3w`QwWL}n1)AzBd0atbLbLLp;g zEo@Loa|G{PFD$F7MW9`6b$Kb6M<>LX!*b$uXG(|!b9ll>XkEz2;J^{bIE|daEpM>Wq}Bu|8J-+5mAadIceTzvYy-Ix;06eDJ~ciRYVc^xf!Vs>Ir zz+F$Tcp^>KuxAh9m#tE(;lLN4e7G&I)(5a!g92Rm=9_OmvtZJ21&;TSTh;ME0ve*WopXyMu9;*tPO{Rp%0G=l#$gK+r zjSTXQ8VyhdxjcCKqM)GB<6{El_$F848wP9vhus0(?aFo)!!VYb_)3 z*OIV5W)Wr3=W*nHduTI!(R~M#L6ay_l9Gh5+Dqi(PedXs%ZSxwEM{UFW@5*Nez0vI>V-#6IimOw=jnzYKOjK#ac_TAOOj-PmH{ zgS(+p#9Aq!0=yrCDdQWWB$va3X3{B`}!TiL+MZymr={% zWcu1?iGTSyQGq2sJ!N`m)7h-7C2tgGe=-xB z+=~GGuviXUkK6EmJXqt8FEbodIM~iIwF~o`vywg=2k?Sbq>HV zhCAr$fPnNKim{5YJ&o>>!RA z-Tec=mGSlS5db)ii&Vlm1VwR0QmMdYMewvh?(ViDELub%5VF@s!BAtfxWobn&*|jp z61Y89c|^qLpRe6;!emW)bosLnjSaJ${N}SSBK@3g-Q0l&6jg7u7oU!q3d)SoFMb#T z5KF&j-M;VfwidgGFH0jj6r~j|jzh?szPL#%wpkIv=ZT5d@xwYVT_`mWyWCM~Gw+4j zQ~XY)A)M?L_cQ!&I=upC0(4^w^}QlDC*LRti<|xEBMZ|Ld7-ITSNCO5F>sTF@VuA9wY>e%p-ZNItyn5^nJv`5^z?nYgezB(hIs?F zD}8mz$=(0ny?5vCGr2`D0#%fk7GKRf@ypNqfBNOriR|-dsdY>o4`Gj1i^a|nVX*8y zw3h&bXQjl@UxXCAd_ojfxAPoaF5JG-h`12BG$=46)K}b(8m=8iLodfmJ=Q`N)}a?8EgH`4_WyA$oG_u3vx6>sAaKHcZu?fAHgU6ged2dLMT` z$N2<@L*e-e6V8K=UCjS34C544~XOQH5_VU$&8`TiE)g>@jlORs+|1dv60tuiZ3V)B?tsQ3T?l89@ zrKqmO+-K=-tSJZRbX!+H2Mbfmv%?Gq+X+qo&+`T zuoyIc7O}IT3FTDlNo8&>mxTAJVA6wO29{YDexf-NZ&@a>r~5}tsBgLQ!*^d_&#mli z2$(Qt%$TuW6`y?;7EXjPg0pha76I~)jSaG(94n3rW5R<2&=!Yl?#~-%JeQ7n`W4>O zZ0g_A6@m_~sPR&Mp+(N|9L?#fhEznwAI!ihF(VB@?szL!A1&&Gwzzxu?p;TUJ84Aq z%QJ(GhUQ8=RJ`IAQ(eiK{Q6qswQCKnt-WpiZftZJDws*2@59E*!B1{iUq^2y#w;_q zt+}t;t@PG(^tHFd2?YUuzG9yV98Q0|nM<&cRIRP8wT5P5Q-Rp-b#}FwiSLa)@Bj5_ z*8o=m&_xoTmq>Sxm$yBz260UT7aOd(PlMWyS8-c@`p$9yH9QW1g`aR?xY7Ff2^Rg zPb|tk6hun2Feo)c+3Z?jQ?VZW9=CWYbB3C_#;M@mFd}6$`Hs?Tb7q|`oytg zzaBVn;OObHCd5mdsxF*7a_HBe4jnpjBr zo(+*>w|FvcS()DI-~d#f?rX)FyRD<9y1dfVBl4C>#40amm&stL>Cj(3SJc$(Kpks5 zKtBkJw8IeY4LRP8@RK%<7stVah*vA6e12_d^DQ!c9oFs^tlbNk`*m2mYq55daXRMn zD2LNwg-L;6YayV<#iPtkO^v3;wri_jzECpH_4vrG@HB3 zBs`|i&cS(57Z*0=nP(oGIeOgKF-h^T9>yl7J-qC`2_Zf~fdLAtyNzf3`LlH&Y$*?% z`A#~F)dvwnaf;fhdG3y|@_+C6>gUVXs(HM?sNn$$nIAQYQ9B%_k_-8*P8$h$!Xow> z7AV*9g<`J5N*ugU7I+V69L8OPtQD~SV4W-Mk+;QF?d>yQwZ9g5@9vWG^yyx@i^FOYNcG#`&%hr6)q;5+XIq z&{2~oPaYTL9Tpt~KDC%BiA)H6o+P@h;^er;5Osv)!2L{pE?=JLZQ8qj{rZ2`NPUxE zoQ-{K-QZ64HMNt&?XzYk=UmJ=eda>$rQCd2w2)$2dZW1D;+YFY)zvkIn)2e@yi#oO zwN+(h#RZr1@-F3^&n<)@^+u&0N3^oC%Im!A1$i)iCqqkdbGYsS2ZwlTEx;;JKJBQ+Xt z|>qP>^$M&yLOOKUuef2(jKtR6k2%6jS-o@<+j{@q5jtlKktC z-HlLXh=ZBqL<9_BN}nmWJfuKD^*Zp7Lbru|NFEPbjRX&^iw}(*Icjvs!j+eIef^7p z8ov0Ax8Hwv@mOWqFW>D=oe(0G_$r;9UEOw(FL@@hGGWTOb5o~Iq4y3Q%l0-?0SgD& zP5Ago0Zp5#+O>R`Y6N0_rr6q7TTpQ0@Q#UnjhZeyAPS=|Fi+P15*A0gcw$#)b%dh921SZI_GuMj^UDh6mJXESHym;=+>2sIz zsBMHJ+H!7LuXZyf=j>@e+H#+ocE#2?`7i@K*C}0}eMW3(y1( zPl}hJ)^S)=s7fs0OOz_aaWKJkC-#aELRQ>PJZ|<5hm3=HH(Rhdxw#xIU+KfO_6}-@ ze9ZKJMzUNY&g)|G@elIx1NmrWSe-HoEl9-O6!1nQMN<*r4V9&*qtyf(0&yOhs@rQ| z4-tD51w4=0tfO;sbmFB`dw$K>urvGYHG;Lvfe-p^?w(CJj;25oF4YbN+{IT zRLY)EZ>t4gMTh`&=4fgLx$xL9R3RK&tQNciBSqqxiFIQIMrs8{YE)fYld-O@o`n3F zOt2{-BeE4TzO}JdUtLvGTVtrh=GagV9SjjS6;&`}C((Q^>+tj#4H3}mbYP{?E?XaT z1~(0r29|9arDFwL0;Zyi^qcX(NcphH;NalM(bJ~f_1vnLpMPM%ijWDTlzjsZ8`UpN zdlQQxOZr%Bh_ZO=hINNpC|@QiVS!gWMT_Jp@k9`FP;Fav=R^02l`7D-LY)xrLvrlOMTx%v9@KmC04`1dmr@Hz#N)swu}xE8$j z(u+$bO%Uf3R2-vn%9}F*mQA8jH=)WxG85YR(HS^a9RqY1#7C=a(yUpFCe`ik^d2{D zR8)+b_MEU}D<;MUlx*F)^|ClL;ZZ=&=tddJs_SYE0E#fc><{z5USCsLUR6<9Q-?!g zV{N0U4*yewf?EwRNVm7bgq;LwN(_Q7w}beP5k$)6aa;~JM+nGRKJ6ADu#`i?ieX1a z9sF*(QQl%4QM^?Ei~#(M!V5};Tq1_AN1@jE`TME;G-`q?q|#{o0)i0Hijemt_W1XFTWdKO&rG`M6WYK7ssfFQ8@ zi=DX@H!5?B^3LzydEnZ$>!{RPT4}hr@5dAQxu+@*|NPC`jVBTJ#mKEKb>-I(8E;Km zkeqB}G)C_bJABnQY8u^R#nM;CZK>;W59E}t zlZ>4ZE$2vkPHz5u?e_eRilTa9u;kOyd~56F>>{8TGccpk*fDti@d2HkT6)UcB!2Nz z2?7!B4dt8mqsVI-f&yo|S5nzpo>ZFqYjLJR)>~C_qtrOy9qh~N?HQShoq4VBw<>*E z6=(Eav#>+|>d~(cQ0oQ%oTqH7s=7dcX?F?(uaxxp1XC}gS6e?V6li&hsHwAN={U-! zooN7!!<)CEN<8MDY1rxUMmUF62cKU-y(M#Gbp!001k@y8VzjHnT<;vJq(oZf(QnQIw5cCj6S5!V z^WS+)H`Al<=2E-q2SvJA|6#%2fLoBM!qXP7cuaFSaZ*i12bbIK^wRkH%gxno1GeVM zNr}HrOr<`fKHxnu!H*h;*^+tze+Z@u)5$M%x*3eO`_zw@$q~g4#9f?P;^$9wGn6|O z>d8F?*tSPVTP@pCa63C&cJm$VGUgM zWVCQiCCo9H!}nh(kQ*ly2!9X?gh2=e!XbnLVUgQYAVW?l5H2AU2%8WJgiiC z>yUdOyhHARFwf9EfM@?46OXwd?1OKn5dI+)2m=ucgo6kL!a}#FFfnum+T&7!=D2jAwdaqwU$4Pz+XZafH`%s(*|xvSm_Kk)7@H4( zXSWSLJK}mgu=@D_7nZY%WJ6b7hsjt}Lgt5|&q@f7TiF zhI-^fHJECRP(dpztMw#?lmoR$3m`a36DZ@-4g^<1ebUOUwU-+xDA7(g1@%TN=OWJ% zhewfs=dkr#b(3Qg;=`miV91;~XN-L0z65i@sWWFzpS^hg%;6uu*^%E6lsbC>=W4I9 zjfzd5JvUJ?aCP_h>p$I_M=~rX_{d3$8CMk;AL}bezGs8kiSS7z^fh0sr#}2}PmLpP z`t<3ja!@A?Pka6K*B?z%Zu;cI4|T#zdyie~^_j8~DV56i-g#|F+MN5Bzx2i%ufOuz zd&^&*2|K`g?Ac$a=S0+JAKq8wn2;7HD%|(Ow>QKIPoyJ&V>R;gb|Dk@1*BjiYvCb! zXPeg7T#0UcP-*z)EijwWRP7-6+H)Z#Eui%cp# zw123uEy#dH@EPjW;>@Ce!rxlo<#6#-{@y@)(8`5=(S5id0hgv=Bhbp7c32(B$;QDG zibcA~N-w3KzmJbE3I6~pphKn!@DK3yRs~b7E)CK)RU~WEOQP~e0;f{t9|YTZU_gMs zFEx^M$>nQ-)j_%Yd!23`YQ2&!Lv4M|T675$ZZBLII8{mjS3|mV^T?Ojvl7W0hyf3h zNN|!j5CjThQj{uReABnhD^0u!iF6ZOwIh#QxQklFUPeJRra~V)W?CSIU}g z&fb1)7;KF~k;K~7#}_LSQ>O&^Nhq6*uax`vB5EJtb~fDvr^q)rI7lOQKlRk2c_}H= z0^2KpI^%0AI+cCM6a+}PCUjL@+f{2H`}mVDPE}h^ZTx)QG5Bh|L?XFD!sW~5Egk(_ zmA_aQ21Wj{^QT1 z$F_t0)5YCmM$LU{<@3+4eC~w@AHX8akiGNP^6AO)-0!~Gd`X#-js^A;9B5~#tPI2v zF$$q~U?{htp{2zL$Ah5*2l$sg?{^fN_8q=h+SKI?8J(z6@TpXSAj*AXidPu{;l!J} zv)ykYK%;dNOn?N0GqAe40|k|8YfYRWABk8kv$eFeSL9u75=bouQv;xv$k)oNYJtSp z+}J_#6cs&)Q>rN_7(Xr{#7pWU*M{MA1h0b9S3b~hE1EUgR0v9eFFV*-4`1_HsiDlltSWteUB$3h6 z8ypbTHI1D;W`V@ho88vO_fm?4mJ=I)tpCs6SY4l3iAfGC@r@HJ@k`*ai9esdQCHVU zdGm}1_Fv4ucCDb`=#M*g|MKr$zwG|5T_WwRHdGg!JoL+fA3oc0rTh0& z3^U&0uymQ_aN+m&BOI*T+C{?6I?a|4(SW0_uH@nsePJF>kEedznwJmvZB=6%qC@I* z6YHA!!GWPu=1-KKx=?xjr*$7>es{5s9}vFd`_t9Zq~wXQ32`cO{pCGdw+suF^cU^l z`PEN(2Tu`WYq;CmCCBN=+k4WaNipu469`=Y?vfq|PL#K|&?N|vakL6}oW?AA`Q?}I zjCKI6&s(R6kr^)LUTNmgO^+>1O8rL~#B(O~5r0l%0H@1D3<4SQ=jSvDIfm2c&((Uz z&3GP$48i;PH#&VLzLko=bshhyDa#%MiYF6}xbxshUqsSXRaN!)#>_!COJ|Lb^9ELj zWT2xLzG9uctGxIZ+}A;)J18hfqMIn7ds|u?FaCV0nI}?HR;0ZM70RISps>ggWy#UQ zd4{T*HYe=`dv9p4+HC_;56y+OmCuaFYEt`H^mBPo5~MPJ z-@wTDgqWn6bBFuJj!7OndTfx(OHMtE2bB6Fla{=|!QI`+^XBt8{Y|~Tey~FHx0dDC zShNaYAt;o%A-Tf=u|1e^|JOQR1^5KI$^=XvE?=@psKfDW_{|?3vYOI+;LBCuOC9*~ z9q{EU@a5F~r%DjHWD=uPHu(U3?6F?ZF29V_*9$Y(Mj z!l1aQvHE&GIQ3ddMMYIzd!Iu$t*nuUP-I_kh5XE=vh2+uyf3d_6dVKBUWn9P`|IY- zn~&!oJ|jpR6BdR{TubYX6MJ^<-1*aQS4!b$ViIkw_4PeGmQgjem-m1nc3rWCC*AoE zu*||}K?7k;Z*Ly0l5%OSkS|wzM+^^KNe!DH6qGeCp>N_FB3ffJ^j`j*gCs2=>OzP7#T`{QdldeJt0i!aZCo zbF>+a=3a1VQ%idvVN+Cg=eBh8_G8QEND;w{Knsy>+T^5R8jU7u`a?^XFOh~$KJs%k4Q9zJ^f@Szh$r9YC=?;)T6`2DVf zS2h$~--FnxeZO70np0YO@qAv+iQU__e~p~yXMtvbnqCmBPDzNx+zEbd?zWjBT*1R4 zjl8#~AA*&H*wrI(zWiF=)*nxSZ7*N1$vIP4ryFair9;CuZTj|TaZl{X@!?#%QgZSO z?dLyyx9fD(Nr|hOj*L&zbQ{kfN*WQ^Uvu@BZ@=A&=1qS;YZr6qN4?-ujZ z_dfVCNlee8)@WXs^|aX%+II2Wi7xfjrHFtwdPPlnE)5RewJ1KcEG?aE-mzStxbd@= zg8HoCe!Y;a;8{&XumGU~5~N)P4b&VY^0c_TO9zL9sH3T0qRW_I?`Sl^Gv=nGO6Z=v zZiiJpbCWa~fG~S^T@0`29 zpQc%VZ6Rc`q-oH5L{E+>WqWQEho;olae8QTX|N z@$u%v7dv7SM#*{w)R9lYMG@<^ef9M(`9~;Atu;DfjM!9qxh8$pV8UT|EOVll6to1sgmzysOK>2b3IF5v=wC>d**vbK!MT zFK!*B4P7!dsyIKVQ#Aj9CFygXeE*%do|v)Zfrr)l6Gwk7wZ;A;6^!?h@Op8*BOq?l zGUCM3Q4II-dC2-j@4zb*&O{e;6XYzT`9=owtTvX>Y*mMFV|y#mJF=7zb0|a17g&vF z*4MYT&?*s9+7vvqv05`__Lz|H@L0;Y4SY&kqPI^<%6$)|$Bh~_ z`i>Q9-@s7|9)I%5Cl}ojpw_HfHZyhRq&x1KJ2S<{ckF_DC#TK7dmgfQhyaYIMN057 zIE}KlvYe(;X#xVIET?g38ScV6RH+OPS84gZRe2X{-Gg3(Z@_E2Au zn*tBGZM@I6Z`-+f9wMM-;XpJ8KG+3_FqndWDF8HD{(lvT^hW3qqZrkcDe)l@!YyBY znGIc}ydq}Al>eKYOarLxadPf>bLAUzfwF>AA7XGifLCyXG6Lh@LY4i0iBc{RrKq&P zW^OAo4|h=MQ(evQ23YN1VDEYJ|0HCSeJH0;wD>WJ6p5C@hWTlN{iy$y*cIQDyD}!( z&o6P5Z%EX{N7AtzLW7|e{O=^O@TMr{&lsMVJpC=?lO!)#FlWxak1Tm;=JYZDC;5zc z!&!_fHg?=_F6A?M>0L<9j)_tS$NqoF?7!B8ud$kNAFBz+vzl-ls|j;%YQlfr_Gh-O zm2I2IwvA%jhW}05HnuIY0Eo_QXWL$3+lKy4+jh3?bhd3R+qR5tOO6Y-^8(egZR7vY z@8I+PUB83(Jd15x!S-9u_8apz{Z_O6KFPK{z_yKH+eY1NOQOV|fUG0?Dv1(b0$G=Y zB#~OU(Hk4^0h3u%#f2lk9?Z_ZRMFU6kBYh#)kvy=|JFdR zD+2_-<#El~AFa`SeS>xuA32!^uRqtXsJdIHt~h!!$3A-cvb!dY9y|FT&%E#o%qa{7 zfUqptYvH+jzg&l~x%EF3_yGppZ0Tv%Uo9vsE-g9oQ3{Zww5tzx$BgQ?RqR9l*c;r! z=H>U!hSB6l*cyKTvieV`*?Cv&WSlU*#J2jc3~C8Xbw&04I)#&xIomI9-?)C`x7#-V zYfU=c6-|U@obvH5!2c2itIURduTyaLL&HTJ_aN zL`@*Uh2&y3n;MXV(M-@1XjH0!$=wYYtbqZE2B5qGt_wy-WEKu^7%^PuWE_QjPG*wY z-+%bHu|fVg7pUbtj*x4*bU546*unMpRjK^cA^tcB`1vTX4F~uJP?PZ#kx||{wMY}7 zmWz<*=AtA%K#J}m#N`Fe?cH7NT_m1cXtfObKqr732wu$saKkuo!#Hq5!1YQ~WpT;L z-;N(W`si}D`9(ON-ngL)*uNt-_Q!q4 z_Z_~JcO@^cAore!AB{x}-F`#~Q}^h^!-mO_;mdK!V;RB3`LH)Xd9RK*ymtYOWgZHJ ze~HQ20*gczKDXiLJ%~`*FSwp_vROU)xfE)122v*pglKnnb)%Nk-N9k#-`36q)F64k zLG9B85J;2827p|1I3gZ$6}vh+Fy@Vo#Q52#6L)tHAh6MntTRSn=|m_EAe{h{rBucZ zlWF27PE1XZN~Fj^Mns=XgBV4sODAr#isdkw^N?pMaQ6|^fY#eZvb14@AZqTyBjd6a zcZ`b~{o}#~$=gYdTDC1hX34h`*|ueD+xEX{dzNjhWZNdOZIx_W+26Evv2E{T+d9~` zN7=UGziI1c+djy)t!LYwXWOET)oq`*k8PXwhkgg2_wV{0yyqos+bXu-3v9otzv;Jv z?bpS&Jhoz6+(eb+U+S2b|Dl9zYq$9VF(4nF@yqPncGt!OhYITt|1hL?qO&g{{9|@o`UcW zp+J}iS2^JxTxEoPZhsGif$&Wf7how679tb~4-pE4iEd9}Xf6mJp+zTQBtn635}`m? z>Gl)|GvS*~!cBw%VJAX?@Drgx7;2EhU+YypAOJDrM0;F1(Hs{!TI2G&#P~0~M8q8F zoya-TI}voGcOvOX??lwyw)ercz+rnI%l0m2d+$PHR!;tx-iMx@v?jfi=A?Jhp7c%# z4E8<`yW3sZ-JZtoHV?ZSsW=x+f&~$$*wR#QfYZoW4|w>h=GK7tv>0>7Z3Ft475igSk!9y*j)f3VTEwk_vFnhYm*enyPBg;07yK-fm z&PQ&myqI&Nq|PZ1opSdBv*xBo$C$JvLi_{w+nfWv}bMYd!~`@f^LNpdgu=6n9 zN>f_R&BofIvWlj*()^qqzt#h428V6E7KD(SP{c68$s{_ZXJlYN)Q=y1kZUT~v;Es& zyQ!ZTzNjzjC_}xU!oDJAHhuZ&M>bZX6FlgaU!JoJG3~#(BIWj=Ik&?6_=5V3_d4|s z!Q1d(!O%-zOoyM2TpxlR(ZijmQ*O;_YiYB%qIaG8CWnoJN#lrlyd5dh_pNc)F?cj`i%xczifyxPIdt&`x7TC;A91VBP-%$J2_b!Cu`$mWq%{v&&fJD*bHY-=ojSbmeNPG)@+0d!&hW|DWBlf|Fg$ z$#!wFhdEj6-_$GQWRw0<@A%XHT}zGM^Ua*BiK~~W5^i1fH}$r2_4aVG*_`YJPImTn zWrY|+T?64M+i~v%S9yZ@NY#^n z_(X}->s-%>p&Gs zeD{uX*@dkmeXV5;{gUXp3w;7L%9=()*LNffI9zb|eb2tUW=%gis&aTn?Ar0gN8fyd z*;gXr_13<&I@v{3t#_-qRNg6&@w+NV(^oIY{`eqhs!QQb-H-j<`#2%`7QgvKzBJK$ zprO$_Ff49*;`|f@pCHNv9LKzo{G9|G_7%dJCG90xy_V`T^B-9R@AY~V_MTWt@-W2_ zsf5e=dHyFZF6)=rC-hO37TXYCC3EX>_a~nG{BgqH$6+q6yRYxk2>|k}B6N+T~lt~_HA7wAevbB#mtR4yA zzLt+?hLDIda$2?yefJGkd+s%s47t9t8VpaFUU{aVdVVPBba63kBE;8@q& zQ&-p8R$X0lIls29r^`@TSyfPw1IV;eVEklI^g0T7P9+`I;7@5OE3Zz+&Q^4nme3gDS3se@<6K4K|2Q$9k;)YDhqHg{6Y^7|io zhVgp*>6aD+Isg^CXwjlbYtvEUp4kTXjMujBe%yCp=WoYOojh^;Lglbn=BPgM^_I`U z!>ao5Y{7=_6a0FE?|43X;|(zQ=de1W_jIAU@{P*8w> zP}tng{m5e+uZpuJh#zM@^oyW3&w_rJI?c1H}V;I2siWXsZQb1RZdQSP;%Hp-M}sbJ^MFbV9pED)Ie16;XJv zD~}bQ{OZ&9zo`+suV*}*>o-H`M8v8J4JB5Ud*&BkefBY|Dk?M-D81A(I>BB1n&y&a zu&8#yeM1tBzH)JtZlvDlz0hJzMgGx#x@iP>k@Q*$D6Nc`m|Ok*3!h4d>xW$HfTjp@ zi|X&k+_o0(yS142*7|Hbmv#JnZvLi>jk|Vc9sQuXvdP%nbM_qYt~55!aJSyx$RMy%W8w7Z74^P-+ z6#PWM=pG)o%?Rc_!3li=?RhiW^Cq-s1metUO7gCpKXdxb<>E5SNOgX7ZQg~8KxDm| zUs+q4a|HkpKz_&wGu0NtJ9f3Ws-5JKwp0%e0tckCzNw`FP zH}0vk2i=M6py18NZfbbrs5l92+r6um+bLPwrcbC*%F(9nV(|2-wTx^ikX&D?H8FtDDOI)ZF4mg_>AqHeN zoSZ0^Q-F5EE@}kh&?Xc*`R=+1rA!qL4GoKljf?S>^bC82dize9mJky;DLl+a>lYdp z8WBUBn#xLhz$UoiAcajBwXK~ZbOg2+~9MF48C}3 z{M3b;#M=;x|6lfW4ks(*Wc@kWa86czO;!>@0lg5|>meMHp(F%eL>hJo%17O_$64A= z;iq;YPs{wEqWEd@HTheiYqvrEQRv#O&^5vn8?|@u-e2|>SAt%lp|bEwab@ZGqh}HM zQ(oNASW#YDRepKfwx71|*g^eBRsnsGqrau904LRF_HOw!`w|d>OG{f2>tiPKz73Hr zQK9~UezGn~8{qG&rn^VGix2LdK6Cz@Nn$#G^JgFI$}jFM-}}`!+knc=^M-Z?%M@ag zRm$-Sv-r%Q?0u*KBIYKPg^&ekO0+-1x7HY zXnl}vbd2Y)SS+d_%b~3mG%%riTiP9-Dl$`wgtR_t_MEv%iC%%e2-Nip^in7^VUvA* zRWb>9fs|T5jW+DjNAJ2}X$s&tG5}bjgAO}|!b~p_0z?BmoI@zDq1ox{kA!%)pu|{C z>qzh2g5G-uy+_zgSEBbKiz<&~GMeth=G;LUwlc6@1m0>MKZ>wN=+QS6;bdu?!9L^^ZFBkzKSFF_0>%>-Y1m zA}a68Pu|;cwYX9`Z_%=Ncafu~^75H8r_Wb{wTlr5`1aw^OuuKIdHLm4;bTQV;8=fK z$ynGuFTeckv$s$4@m7lUa(`zdEQor$-*b1}%bTAL8|@&Xeos=n`OhW}0io$lET^x? zH(cV${H85ldHdbB&!QeEE4gNp8AIFp1!Vr%F_eU)&gfwj}yQng!dSL@*;@@S$%S8ra#eH z%a~Z>g_J6O4OyTA28ypokb5?Rv;*?O| zAg_SvX^C;+L5Mwv(KtLz%VnAV2=nM}AK)pqC{U%~4Ix;wre@H?M>;w@!BI^xMG1cS zjp)TV^kN)(QQKfSXU^pcs)un?D0*tHTp?Z7(19%2Asb(zlb)>| z8&OLv8@6stMK;}2ypMm}ya+b!@7wp?WsD4Q!GFf9o(Gp<>oIwcM9Ef zqAAYSuDmu>y$j=J4_HP@scdQ+qUaCeH{bGJn1_|}XM)V6igPtDPP);^^x|g-<1)EG z2+otT4xM%w`Gi3HKsu&WHaaw_@=uBp!*CIf0LmRlKyFs6HYh-ZxOU#yuui*Rl32z^ z#Il>4uf{ii`n1I}ZdeL5J|Um42R6`DcV^rT>f)+sWt|Kz!lb zTJo)id<$Uy*PJY%c9QA$4oKmx5psC5LlSSAYZ<>2wUND@km7c-nM5*^lVgcoCMQK- zle+#YFwE6>J14n|t5L?uj{S{nCnu}sWM^}-g`BMUZ)9^hSuH0!my-n+6?sy@-^hwN z*}FMeDJT2u|Jn0OIN5tS*?vy;Bq!VVH}$fd?9#u~JN~?X*Usbj{0Jx8&eeO0t5^Cr z^%}W)2@oAx=Q1aIm6J{Q8`<%8mT|ITPFBRpdjCas!dvi%hhx-LFRalFPCA>DmU7a6 zWT0PPvvRn*4&r2+IN6cw)l58Ci1vi9V!|uq&&jTI$$~PKFn#0E4|CFg=+=Mnatvi~ zch07d-?_I-K7|H=7SXU2sx0F#&pzQpn;1<Ohb9WwiP4+>|7*F82Kf!Z9i06I)^J=`Is;aW2F#l?9VQGGDxxoyqvlccwT{~s~ zj%Z_ZV_8mNeNA=#<@`dh&s0-;VFTx0y?p7?rOQ>lJotK%mM>Ebz{2V6?CQ6W<${q6 zbyVjQ9_A8#*s3*;+%+#UL<7U#H!=Z9*?s{*A(O(@{U^Wr{G(r*#evhGUIYjF7s#H^ zl-!yWYHzG5MRq~Pl*QAO*v47MND>ad_(IPBmBaSL!zrGYx?c$|@QGf@lvU3^|NLW1 zqV)n_iCgsRHvkN1kWQdi;JOX3WLhf=8L6qPsA_S#-|;kATEBy@`2h79efO+IbR5lx zU9^t(#>+6$zsASCa6_NQ%4-HwO9Y;C>(4bXb=fhiT%mK9d5Aqc+?6{0j}`W`#gv}p zZ@8{^`O6hXcnn;5+W?hOu>yTOkPb{OCS&Iy7#a}gZN~9NZI^At-=EjfMpyv*hkB40 z)z)qXuxwWs04XwL2oS~tvtt8&7F__}+5)ydr-M(!E$>cjCTD+VTNm=EN9|61e7Mpl zA~AW!g0uy5QqyKljrMhOmrq`Lk3|^~6B!v57dJa~W@L19Vsc#ay!rFyQMaS`jagkN zo^LZ28Fc>bmCb!4L%od+-6Or`K~j82VR1z(il@b#4Sol7~3s;M-WF0@1b^7R$>?`Fp73Jl1wZPxI3PK`AK4AstLs1yW^mVXoQ~YVE@Wr1vk^GAkiDIKlC1*b!u`SzPiE)r#_sc9 zQs+p{=FdLdQ{35^ivN8wBZa*UHA&qBqc)hwfG$EN7o&ldwY%AE+QTrD*RBe+75})= zRa1b|Wga{Hmji%jgx1o*bPz5z=)|gl`_W4s5{L9QtLo7IFP!`t$;KdgJ*KGM8b|a5#)d`N`lA{M5uXqXyj}k zK_Bkz8lV&!5SG&V=#cOL@R|fggx;7AYO{M*q%RA<`Hs8qzVr5#E0zZNsMJW^2@VZf zdLytFi3w(LO2s;Vi5ptVZby+?xdW^-_-gHTDzqL{XD4SV5Vr7i^#1MW{h8?fndtq% zj=s*u+S=mM%8JtJ!tz=G4I@>dq^ha8wz!H|&uY8ra&~rMr=3g!_RhlW?8^pgCS6sf zca!@~3h|5Dw&~05xr1Q;5qz1~Y936-ka-ZE&}Z1!MeF7A_3OcVh{m**?qj6;vi6b9 z4c-{Rz9Nf2uwH02=AAvAlZJH1&!}&B|C*&rrCO+S{O6{GPFYN`2wfZjrV?Pg0TGxA zmuEh@>Jr27`pPdeyvxEz$!6Pi@g5bD-;{K}7g$?f)6&t@G0@XH&}HfCXzw=nT89UL z{|x|estCe@fZ2>RQ43lsFm7rS8Q$z*@q2-4xx&lOGYqgu3E?whCQpftn3+0%Mr?$? zKej(NToVhH6ES)4xESXfdIBx`5?b~nwCq!8*{9L6(>gj@u(hnMt#x5w5NJGOb3=J` zO;Z!tdJJ{dwIxNR6_r(uh+8r>)Ya7+JFqomB5XaTx&mxhu9P*k_Q7}B+f`N7)r$|H z7pZ_re5s_-)NKV`P`Am{W7GQytkNKVf8UVUZ99Mb^79Y(nzT2syeYRRuP^Y{#c;uW z4?A}Y4%t3Z@nsqxZ@<{tNrBpkWdG=dAYgw35zNaUY*izSRuSkUto!V<&rVmLJzISm zA8JH?!3xI1N=ZfjUg*NfH{N{n&Cvp#L<#oKiju;z+9fD?19DY&VUj&ey)Rf5X|AbB z!!MA&DtY?`W#OglqPp@Wa1(AuFA;~}7An*2iHDOz)7HH5+VhK`zl=~Q;k9?NjKEww zC{H6{*tqEVh&|%XAOA^^qOdfyGD5ny3t7II)J==1N_sjy9eAU+=#wrL)Bstfw!Esb z2H>Hc4aFCa96E9)>&$_jU%$U$!>3!m{PxGKpKsctz?mJzEHixHfSQY#N1=+WUJE3mhQ0b=LO6t>s2wpO?4rwKs7I8q(xsgAG){U3Crx;tT_%xNw4Lk1w@+o za0vVCTDZxiMErj}xhE(8Iw#-7)&AZ@?W5PN{f{RXbJA=6RC@+}?a5u0XH0OgOxUk~ zNOSj|LSq!d(Hv3+Q5{kSr#qw!qCD3rgYd|Y-v{XeQU=icKsPd z14${Q8?RH!pPq&&5h;ae5h;bJ5xEmik4PCrk^XlXM3qPxM3+bz+@1WnMgHwR2wOT~ zjr`Mna8EK*&beQsdnw%*fUeEG3num9+)O_f4M@Stt)=3>2MZ9;vssYmGB zy%Y}2E!1|zc>hf8#G3zs`0ZzIyy>jN@@e!5vEklDpFD#;A?L?4 z%gY;Ev3YLo8Wp$GX!O|Df?h{&V>u-^U5z?xB`N2kh_rl{<<;(miR8v zT&3>V59lU=q+Co30_V<6lmpgq2$q~3dqO173=Iz9gN{W@rEdep+Z9)9v4FFpVC)6YG<=CL)8-I*LKys-Jx556lH z4tj2pO9A#%n|Q0JxeSPoG*iRNzu#Lc^H9oojOOAuUv2v0^RJIrR~MW=efo5j#c8wS zybCnKf=9!w#h<0I&yq{9rGXdxKnnG~Zf0L!O9Rfl>TAjhiAx_}hU%iig8ZVgvf^e+ z#dp9)7#pN{AhI4C05S>UFWLv}fb3i+2FbS_Jl_cRVwn(t4)!C5Vi*~Z?!?RjmjZ&+ z-Q@(t6Vz8?fo^7EVz7r2A#)-Ybj9vSlhjU43RbxL#!gR+MGCqo5b1Ddl=i9da0kLhA zQKFQ4X37z4fq~eTy)7dnLl@rE*az&_;m&$vFHR9bW!W*%3W~I$;jXTBGeL70YR5dG zlVJiAc`C(-pl~osWam;s@H)tmAZ&H=@zA<(%>)^hmmdJ$|iaJ^>^tT5(&!RPJT`fz%vmH0h;@F#RL+hsmb*> zHru;mFSQoA4uGc2pj%qTXL>)>M!2j4QJXhvb8%UhSE9!!Y7}Lclw^UPJ<@~S`8 z+}Fp@s9pHsCpX>j*E^v7{BTG??!v7F2}OUp6C#7VakG!Ay#8`W%pZ3(v4*a)50_OlS-X;A@kn^@9U#w&v(F5^rk?wiZzp*?$}ql1Au$oN@pSi~oEa#`6%zsE znn6GV1GTWT6TU5Ak_>es2BaG$$Z=<5mae9{>}(qVhZ!+`&dI6kwCOxG6way>r^heZ z{pGuW=GX|zve(7_sZIO#e?k~7#2YqjK2ex+fZ=c0fD|b5^Xso4IIveIad-heUSw&# zv9>Py9SmH8zXFXiS{^cYJk4fP$ZWP)&O%+9W_=U$BrZ8VaX zo;SAEk-|R86W5w=gd65gHiWHVmpXG1`P7QE)u-79dFH-JDeMzi`F(|B>1}DK0MVeS z5et0-&^m{DdWrYX47_?K&f#zi2ni8F_j^0q%_hXao6YsrRe-c9IDh`a6-@TU)du)* z;3)>bj6;{eii4Aq_k4SQ=AAr`b!a9DjIx^lIKH+9KNi%1+cUOIa1tBcSY z8>siF_xP_Yrye3s#>j`(u6;8NQQWxP-?%wJ2GEAEu&}mEKcpe35SQlbW5=#^5xdGh zIARmIX%%Xqqv;Y_+KP_+o}E+I0_2l1TjRyj(sRwA#~o;IDk{2oA-BfZ z#EL)O%I-2TrC{O&g5L7HydhGB94Tqw zn07csIEx+`p*0#0{@BWb}%y6MzEh@q$ zgrRY=E6mQWtjxwTvOD4S`+km!0$M%t9QiT3LDz>lr;4hAyC;T?S(ifnDlFN)_mP~O zbeLadaB;d(zmh~eCe_*F?9rvtfpp+Hd1q5?88Cu7`appJ_7#S%uh-F`q3h%@aW$Z} z@l+v80{aUxLUgdi*KwYdiFuyv*T_82?WKzvxxJF&S=X2ygUX-fOLAg9vO+m+}uyURR=VE+8a3eCAX%+LAVn$%GXRLot(^2tqi?? zzl#;VtKHDm-_?ZW7Cf1maL&7?4X2Zk!;^5@fbalM1$a~w@t;0E40v~G{3q82Dd@8s z(PzX8OTiN?!V?5HHgv-23g2v7Pb)C&fMMNO=X$F8LXbe9c@*GMZvb8bkMHAWXQkpy znaKpMGxC&pM7rQ)3j3Hx#+GllY}ved>o=z>9bN$u-N=M>J^bDPf`su;J|6raqfW;Z z(A4j64)eojm7dLNaK@&myV`;@1i#fj{qCi}+xZefVc$_dV0X2f`d%94Jycs=E)BWX zOpS}E3rOYX8Yq)d?~jX%(+<}+w09NmQEJ6qHK+FPceNU6vf!51ntDSkA~EYK8UT~n zY-nm}Z|VS-c$)=;Cg^2F00ytO6wHflEdqMPnkfqipcX6$3tO-tAV5zmRagQ8URx21 z-IRS)qnI4&rB;W*5JO`t0$@y0QyDduI>fy?MLlKZ?b48#08dY(vXJKgqwUE~|3ByE zW~dCNCiZxNVF7TUu+xh_{!r#b(J9k1AMd|63$uw)04*Ix+ z=p&_P%~T_uOx4o$4=p0i4eK|P_Z$hOJaSDfMPLw)i#u>Yry`Bo+c`4QZRqOlKoko( zV8{@04H?Xc(5PWy0RcLdr>9>~fHE*5Q0WVFGJJu4DU(*Ih^m4V1o$NG?mA9iBk{~6 zUw8(dc_KqcH4Kt*cDS>rf2h5EkR9q9=CXvdv&VIEeB_BNo3@vg-3V;?bcF71c6Q+! zWq(5Y?kaW#mi$l#QAX5b6OT_6P}oK}TPiA+fHvk7^#R*KmCzAX0o??a^PSWoj7D-s z_Yd;=^Pp6Ta7sqgZgLq12ejFv}crWC2VS}14P{!66^~4TMee{Y(@ZV z%{I$A&2V!`hl65y{=0W4EZYC!`+EzEFt=rhHf-4N$+vs<>jbaA-q~eYCtSM5vQ7hFH7r6-C;Sgq zD_<(10Vrxh1^Dvt4GtiIY=J(qbsCX4z+dAg_)S>5hICFvb|sI zh+lN+_ifnvUA}zt&ES;9fDt^FHGO*42h@wHFo`}Eyq4g9>l!jTNM3y^4!#caD&gkD z-;NIy{*tq2mwW)C`k^5+5dO&?v9r@)z|yDVBZ{5(fzlm82VS0PUmxrj{r%NyY^pFU z1Vh5kPT>a`vnh!(O7Y1aJRG3-HkbZ!xlg0W1CH8?U|g+7pj3qDNu|Tn!Nj zk4}@J@x;*rLzAJiqpjgDjgrgAz#@m%$P@}O8#6UNK7QUTMid70&1-EpvFebhwm-%dF?vYA&ifhAIH*h%ZWwRG=D`~= zdAO0~g_c*M<^3S7gytx5Z&G*%>WXXNAvi+N+R*Q$MWv@srQLbxM|cTLn9Q+HCkD)B z`ovGfuHHdh1rITq+$LQLB|tDhQxgz9Gs*T`>gA=7X?@`?n2cY*UEu0D+^Iykh z4~7K^Zp3#)DPoAQ0QVW;|*GsxZ zz5)Jzo{k<8&~Ym}pPkFj!}ldvSe|6-*t^*W1Rowhd+BFl=e{m=SgwBi)hGMPy1OSL zI_~5mI&7|p4x3HjCS{agef8Cz8o`u19>v!kQv@}8zFIGt7u1Vr=ib1%x6wZIOnMFd zj_X=O&!T;)Tj^Nn_a#Jd{*38+FI7tO=!n}%8Yzjz|Bd>Y_x9YmsSxx*tk3P^5gjxl zI#`&D2<{k1HWaM%5)J-F;-X!b;I~1M*UA)%Ssy)Y!YRF>rKjI6*Q%7BNV6WMJc0fJ z_DFWf)Bu216QVh=Dh-m1V?=WZT+tk?z&Xak%L^bQg|lyvP-+bJITfDb_0baG-~zMQ zORCa%B7c|WE9G8FsgQO8Sxzhmr(i&UMk-aQupQ+>IV578k67XaKbD;!8ENe;W^8zd z%`J9qkoS6*mMlh(lhcD+(Bn(c8OkHVZm9e2JzpOMT|7rz* zvs+sc<0Vw7+ys4=Oi^oDK~+E7&d8gwP07w~rh1w?MrA6Uhq+Ir>g~%vpNqY&`RZ>+ z0CkC=(J|P%^fw(#5WI6c&J9+>wtWTb;-+&j;GSSSfCj4B^u^Y#Tleid)HLz3)D~XFuJ#maFYl%!YZqf{`VC?OGLVP% zF8_%*@H6UUZ@%^R+i%~N7_3&h!Iz;&8XM^8SWAEheEk5C7J9k+X+3#Dk<`;ytC1=+ z!9Kp>!C_u{lpa*3x@;g_;>;lk*|n3(iu0UxMy3kn1+P2|+%ST(&;PaV|> zrmdJ9>+?U?#3t$jnNpk-!JelNU}-o+eipcvUtT3h&;QpNZr~;)9w)sQ((rX>@Jw*f z;LRNm_*dzFpXH8_KU>S$3t5$(o_ST42seWSauSnVr*`P(W&>c@?#>Re*AvxZbLzhd9+r*Z3I z6#g9a+)lLp`=sGa2<5|@JB)U&?L~i@r$uN#5f)w86?Da6x`Co6*U-Zht-gnbd%688 z&7)|qDB6#!Hvp0$XnC@qzUD!wEv_-v6je7@V@C~LkrMAH_gW{6+V(Nu3lNBJ(ftB4 zxxS5sYbW;k8#a*Fddjtr&Z5UmshAee(QI2PCdf;44(|OU;%Y#~7x#U=96a*PhE8M) zbagdh|G>~_8obuaJ$-@bGXQFKQt$q{yN`mI;P`b3C_=&XO((ZLBF5MynA@D}ETjt|?J!o<5DV5%sQc zWs(vKuJdkgvkbL&bauA`!58s+CIFvTW8uzc#AG5Ku~9s|1X@K8bi*U1^Om-@7alxx zINxZ*_Sm<(^x&_1f4wS(o{BGCyjaxKiWn{O=c9%qgx8Mm@n)k{2x`W1vI((7ko4v~bPq-Wl^33stISo*StLU;a z!ZkJ*%k(?^(--r$q{1h>8vDT2#H^J(cD=?EwNav;88;(akOa>j2HVlc{QKrtRHR~q zm@By>zPrt+o*9b%)=ikZ|FtFkiQ~>GPl-ib-t)`(>S}BrJ&ad&?*MwY0~!V+Su%9JW_9V>M3xw_o+Kggc3sO7t3mAQx>VRL zI5eTYp=HQt1va0ZS%>$%uE9-oH5I*bCwhg%M=nIK%tx;T4-exg-e_!UHkFmaCDhP5 z+|g9g*kHu*z}T=EXYwATXR@oR{!8x&MV5V8RaMJfAX+!Ex4;WO7mH{L-j@JSay23; z)4<-ekiAi$%f3?dZ7P^7A4V5F%x%?m*VS-(2sc|yU8J+JQZcLU-~}e%N_@6IU@_ey zcr`7Y{Qhg|cj;WOvcjvjAPMy}gW*LT(*4)Y3Kofif)L8)?-MjNCO9N?Qn1>~Ds)czNOc!h>w@7eqxZxZW@bb_zrN5)$L%qQ%nQ(z5byKGuO*v*m-Og?WV?kw|sN z(m>Vdt~)4Pz4zZ+AGY&#eU;{`b}wxLTi)T)*{MVW@vX zLfrrV)}NT=|7=b0<#OsAvyLxGAt*xpkuqu--VhOnsh)2WU{>Ph_!NvAlG#OOGvZ;L zL&i<^S;CAwY8^U%-ZqGta@2vjz81_bP2?SZiVOuNctrRjT@~|Mzhx{F>mD`O)ip@W zi{Sj@=j?M*m-^IIU%GPX=&|FaWtjT6T{;*(bHxhmtg0Z78ULV3j(qf!Cr?(^G_T`N zKXq#QdiST^ndVthbj5;gK>X8b)HXO)H}h8oaBIL$;mRk%+{Zs>U|3{~PycQY%+umN zfZn?@!FwJ5((>h()(7*2gz+~&ub9WnEvc%k%+GOz(^Z&z;Mz@4_14AYSa9WB)|qoT#Z~$FRn)f(6DbdhPog=b$X4v>%m~vj%Da^P>E>SuxVLM= zM>RJ$HR(jTXRn%hQXDG3!@e1$`}WxB-q5Hxg|R;SyN@>g6fUYccP97fPv3t3!;Yij z$QOSeYy2@LS|SOVY&-Jxx!m88yRo}goJhj+@1DQt@tb5#9dJb0K}Vnf6!V1(I$@-+ zzNoe{4SVFzsEoj;R-`D4_U_(s*sKYgf9Dg=ojZ22OfWh9k!PQM_L0T!ksb6V-p5GS zmk0#fX{%pd`zkn1GqFp+CAbUR=?BsfIEqW0xnbi+A8s$`$&_-tyq+FUPu-NT>9fK` zIAYdJj`p9LG$kr>+U%$qvnJV%jr_RzNx{)c)2Ag(i}jy8*&ipCOr)`zf8&}+D@bR> zp7ODdR>b4ebj8*~Bf%;}&^W-eXG69DP3xwFM@0mvlwdVTiVg@+O9ix4s|`(@y)e~3 zbZTO3L||x4bW}*7RKS-YJ|@6!9|7Eb_o!F}^$7B^o9h7w-aRVScxe@)Od)u|K|{>I zBgLHo-;W+&g&u!(yvOfBk54t%)|#7|032{BAGz&CdFM|Y-uv^wyzzFXaI5xB#{u@r0vCo12*sr=_>8u7qstOKO?{FJZL~bQqgk8lVHU=FWDLRR@fU zy?YBzoGL1BXOSc-6Isl~NB80D(1nsZdth>MviD%S@%(o>o$f4k5$>xwIXQEf5Twu8 ztPXUq1Lu))F_PtfJl`vxazFb1iDiN8<&DIjo=OO4J??r=*s~%51UoGj{G z7vui=Blpf2&IYZ)snPhAi?I%=9+FINp^%WR zBQ?X>R%b%=es}kn+(+Z@F>VdcM6VN%KRNlIKnTt7l~M-8%~(cydHfy`Qo8BECM0?Y zMuEY`sF2BAU0H@`Ae`}(<0uZRAVW7d>-@eBNiw(Y+4)0$zG1KDzO!H?CMM?SQE;}jSoJjMXZ;W#5uy^u69?X}hM#`iccG{nG*t#u zd-r&}&1pubwY67O)*6sAfU~3gQhcZ`t7^4g3;mfiFQ#9dwDOL-7f(`H^Dss2$f~Y2 zG*nhqw^;e(!8sNtNDBAWZa~C zEAEywN{@)i{qzejFU9G?mpD|?BLrnT7N;Ks|4I-4Q*6&0jQna^TG|v*Px<~WTekde zVB9?jX^Pz0(~MJbit{H?cJ$ok#rQ@t`3o6BGtwjQBuL z{vfn$fTvK&K*4MWceSPTg zH-`#qYKsq$#bx#%&eY?K*4$olBB!IvSVg`n>&zVoKG{)n&BH3mDl-TsMNJ7T0}2!E z?8Jega?_rl_w3lZdDk`P>p4bqo2j69)GKOQLPEmn(^*xtW@J#c(B+cVye?=ux@-5& zlQpgPz06L&f9;DeJpba_|8Uf%G8*?g9=$m-fSm!~(#^up@2 zxajEkTh^{xwf3=Fm)&^7G|k{O_wPfDX4hzYtEsHUf+@z&&wr%GRCVCvjV`A!r|ZNH zxEC$_()g-yH?ae*W$l$zl@Z_y1emyz^$>+xR9cvmot4+sX|~Iw?L#PQeUl)=lZA#}O>7JtYLeVs5fOJBkFo5B8MS4EsifsXza% z-ZHxLvuR7QqksVbKfJ>QW6>m=fU$TuQCzzl2Ep&S=e|B-n;as+`aesj^>>UTS465r_Gv8%_Dx@QJYgB z%nTAKv|bwNXg6$tdb8at2sVLAJlfrb=wh6J@VIlYb?EI6(c72N+v_l%)}yx%kLU_T`qZ6C%*%+lmH z1EY362PQMv*V)}aV%Ph7dU^->`zoF7?C9mMKHd0z{!rMmTb3+Yo;X(VE6J24JKH%p z7PvQ<(eL>A#E~nfPFy(G?iSd6?!txaGg&82p3XW`(k)aIs*ZU|Z}Hd|j|X<{Xu9k0 zM<0Fkdy{}4e)lV{ymChfZ7<#N6-baafAu44$Gy}S6(d=Y4tzp2?y9P& z=<%BsdfyF*F((Do}vmLq6t5>fKD9<8^WnbFv-`fHw%cSDN7fU zLAxx72qxS7fMWEX@@zEdNdTZal&A%eVf&Lfz`67e3-WS*xbS|gqI@fo-nuM4W&y^j4 z4CJGPV0*m_x9K{(^lBl#00$zrkJ3m{9komNlpO!<=eG_oJvV>f59FMQSkKStZk4oUC(t?Nz>?){~FH^%&B?b~uS65AC zB|{JNF_U`A(FQsB0D~_mDypigttq*51(D%RV}fD8l-O)S@r+4&ESkJ6XOG{@E@PL% z1$G}B?>e@7fF;*G@IBpz8GE5`N_Dh{9Q1fcK>>=RiabPQFjeLx_af(mmi!L9UUDNf8*o-Oo!{X$aQUVe>8Sd zLdyIpFyiN~czP*3y3l!YQD%huuID`z$V&!_fgqyRciTvf#;CwnC(GTz9)U@b$? zt-hwBtP@vjYvbcX=%RgF2=)#x_xE?#I73#w`R1FqCohZBCxaDMB$CR+{t`ZLx@5@D zmT7SS=PAU5&leNbWig~==oe3~fP^`*GnXae2v!^u6RN=;Gnx@fWW1)b*32MdenmqM zcAlWPYin#QLN7F2sh1N`aM`8dc5M@XVAc^(hSt!>DHUpK?Gt}dZo+rc6GPOX*fY_E>haw;s zF+LK*AQuKXWk+*)8z7u33Q9^ZUCu42rp~xh6-Gvi^P!2@m0T^u%lM8D`~AFQveUkvIsr>dj-z*g!kWn=0dj|JtufEI{mq~Q>e=s)C2fy zl0!@%Yrrc1GP&sYcj=YYm8LpFeS@*HwPkRix3dFvX97#Oe<%|nIo6Ruy;3Cc5KCB0 zZrvR!xxlFeX=o-NDQMn@>Ym=HaL{Az)%9>zLM2zs1GObv%W{c7{K2XR6* zBc!PbTk)~hs;aBo;S2jd$IvdBGH>AxX_4Ll{p0a*;++8fesrc^ps*qD%z^K|KWpl+ zB7qyQnF%wsjk)zCJ!^Bot1Hq&iog8$y`OU~8O|R)dAev=JNv;W9^CY$NiqfI*>)_5 zzfn5gJx{Di!>P({>J!lmFF*g_Qjj@|$t_4E8lW1HGn6mki{9h9Hn`mwY%M*XwP(k% z+}Z&&|D~NfbrVz)Zz+z{2on}8T(KxZt_YJ-65TAza6Wud-NuUME`hVDB0o2;;LtBQ zSym8C^!K(@*LU?JUbvyL8#ypOLETlmzdl+~QrcujIK~Wt-3}P-jte_?9y6o0`4sEs z&UXlHz_cRSIig7^DU+cePLbTwK1>TdBjN%DQlV2fOX0I3UF{hX=^ZAEMf&-9 z$pvG*?JX@WHCK)vsH`=%!>usFlZt4mTCGN!lh>^`&588W#frse&X}zpm_Yr!MMG?V z*^&LpGiD`dM%ym_j6m`O<%Y3<=rGOa2WtBIVbSX%k+UG-IqU=nv(u=!_0g1hE@-+G zwY78?jQZcMp51!f9vLjr`9(#=#Du%G<0P-5-r|{f|4Yx^|LK>%l{-{P&!uCZ@-&G+isu#mWIQI$U+3q>EB2*nx(Ie!0KKbF9@kDDw3Y4%S|&8ftB|ht8Po z-BD!#h@dW#rbF(0?bTZX;9yk-4E*{{jerl&kjH|j);b!F#&~HV+Pw zYXtDI#xyjOidU-26ynIy(M0oz`Z^_@e{+6?ZR-+9bKpW`M2CJd_tBLNnw6uao2FaDC zep`1{w-}8k!~wJ(K7u>JRty7Mz@sz$+Ar+CYQoGa67a2~ z7R%@;T%6W1m}_<#DD$XV7b}(egN!!ZS4Qy%%CZ+fm@2ni`0@K4KVKd43e?a|l7yMl zPnEqok{SBCKSBR$&W7t&oD+EY;fPo#GS*qR*y&gOaqy6?jL_`Ud! z;M#V+1!%5|;Qsj$7vpx@o=O5^I%X>TeN!3^0CDj$c4sX{WXw8{)~>@Ad0WOJ*B}2# zeIR(|p@(iwV)#{2QB}HFqp`ULgsw<8sVc1N?*KPpOGj6y$yjvi(8XpWnhwxUE#^k} z!#?>KaiL;_DFMjY$!7X_&%13#09wojFTDgNj|2n+V)&%t-eve;schB7I%%rO(As7} zz@&#l#6va&h}c1(*w=(EwCKV~ZKQNo)>L1kbdU+a!t?t4h;!%4^wEc&l_wHX0#?qHz%cgyceDu(k-QVR3Gq$MiDPoiEe(+^Q}i#ta;+j2cMrm z+b?e6)bPj~Zo;Pi0+0ufQ#tsa4c*v7U8EXe#&#lDO~YgeHf~&=3<7tSNyk#U0iCX= z*72WQzHuYNGj!WBB|-@fIzIUDut6V*Eb;a>q$QhhE{%8`I6G_W8|%O_VQOh@Z8NnZ z!V(`~Bk%>lSk`$UHbo>9i;x8are^`m>ZGg_3FBkPpaFoS7kJr`FO5|Lc_jEkj*L5Y zgMfx4nbbI{(0F_Mc*6+I;9;wyL&g?NWmjn=C5vU(iI6Qr${M$?NLC;@N52hyN^piq zR-m5&uIg6!Vw((2pfhj8y|)^Xw`gj{DHXHsUgv=1RkqQv1$@H!P99&_0hn+j`_+eQ(_WvgM1U^HAe{B76Lf-za>?SXPJt|2;#Ux{T zJcA;Y)Z)d1!O#Y#Th^;7)B&28N6I>*^J{L#8VwCabZ%pV zm=nvC6DhHXa)>09QzRWDhzmAqN1YR zYEFN*2oR}HkV`KbZM^X9#xLtqv4vem)#(DwhUOMftRQW?!Q9c_&}3@s?1FkB%hb>e z8^)b@maqpx))>@joy2K(+Q*zDBTikQ25PQ=YAY2U8gE~Pr^ed{DO5gQBoZ6JvfI+N$cR8Uznv{=$FY zH(0(%LZAtpHyGaTEI_`nAB6dVJatCOf+QMIG1xL@lDP{@tVfksP$wugu>Y&7*SdpasK)qFMB&!#RS8K)1%Uw8qn+p{K3SzWPoYrY2m1TUAsK zPDv3m#=b3JHg1(lG6imOYS@xVjp-B%lH7eG6f*Dohuvak&sen>0kplCz3ku&-K2T( z#Ya*?SKe~>?Wqx3txr_io%h^Lr8AT_Bl4!qW-3^p4npAN2r;^ey;-0ycZR>d7}>PH z<8=`I`Yzo*oGBQ!IK`q&%0m}zG&Gu!$lhdVFf`TGH4s+X-o9=G5*dw62DFjEK-#L= zfS4)FE$(O=w2%#r0;B{+LIXcRg4vwak*NkIQ8Yqn)yNT5s+B6WN~!S;3<&i1Mi8n> zMScU@7ood2F;P5aS3~yO|=E;tR35>V?<=;wuyBt#H2l)8~L7?}4ty!{ySL zxJ2Qou+aS5C!cTn{DUtKrLe*LpMLrvJC*$hn}B8KpNvQMv(I)@DKL51m__%!`LBOv zN>njP@#**9e?N|i-+E2&c1ya#ug7PKyyM_Upk94jc4mN{BKMhd`->W(z2Lg~kI1jMNX&tl7}mSXJLtS6hYP z&yspjP-2!gG&LAnyINX}uv~CuS(WO>-DBvT!Rt8Ps1Q5duenA_CrmFii zt!rtmt23Eu>r4g`e^0W%5mkU>Vc;z9`FhH{gES8UR`T|j#>Hy6&JT+{rgacU!8OT8 zP^!ZY?20kW7d#&0jhSc6;ed~6%z=0Y%#s4AEdB@@LJ5VLGN+yFXKhv-j5DyHojULQ z(|b>x9E_ha&!fKf==S8~OW%IFZTnA~etN}Oi$(QjEDTCk32)(P=Mm>2XSTD-X>gu* zp24SFXR+(b!}m5;z)r?;63ZqzkKymGI?c{D=K#EjE0LvsH=D-#IBo1CHogRj?0D z{MeN*lC?ny_F;tZo!T>y zbIfn{4NI2IjZKX3;QxQDy$4`aRrWvr-kV-ClSwb6cR~W87fA?31zc>byx&Z^9XAk zW$t@s+`@i}oyZkKby;mnq8 zB?Br}EwyP=CKG@Ee`aG%w9@@z!z)Fk?Mm1BW@WDI+Zf?ZK{R2E@Ufx)#1^9X*#1|P6 z5fu{?4Nj7TgfZZZNgfj)7abKA5*i&H8y_1D402RdR77-m#HfJa;E=GG#N?1*Utb^8 zBmr3o9ssHP!Taw=&HUvqTo|pT0Z#W#xP~aw+np@1`C}L(4bp*|a8el8xCe}D*X(bD z8pV$?S&kbw0VngVH{qVy(lE27V`h64x+x9Kkp|rq+S}XF4Cn(fuTZV82hH8r+tu9E z)6?2YUeJK50Knx*i=i={G{;pk7g0*63_MicGMQ%Ah8_ElR?t6v(bXZII3dN?W=GAL zj|ING{ki`919{X|WtRWcCGP@t0{2VMj43G(y#N0D)H6=!Gp~z8ud5h|fL`$Wvgu9GX01{P_j{HOGSFc`;K#9EX z+}d=^o}~8Da~V9~bDK8fp$Z)u(!nDDc7Lr_r`4!I$2&NrF`3nB1Mnn3`WSF~@bLno zXhoOPPAgd{B4mLF<}{ls6wFNI6Iq6sQFMbGX9NPP2Y8vhKp6YS2XM^_B!4*-EuV&# zznRtQ)rjomhK8=L>s9p)b#<-qDOVvSnFUUHYZfXHW7bj9EN51llkXfwJ9^W=#qkcB ze-?6KgPd{nNwo3%^x>v?LE!4jW4-G&H2F6Kf z7&0Y(^E+3IkbD5L$|8(0H(5^nk%r&ZP~Y9t*xcON+|<(9*wEF{(EAePiKlW#kIn&tCdL_qP%gHrGzy}yEDfyDqv?h7)95?!eBz0_s5TdW%o3^#R1Fo> z^R$=_VeWeM^;D!7$4-YTovWM&{EqtxHZStba8WfV(`bL%UzO(A@7TZ0zo);cv8TVe zf#iU8^>(*_+YPta)P&I`#3F`AC@v_{UlNIm9v)ui9}p<^2n>=+uxOHj#7yQF=pmEB zn}}4F-`&C0rq7{GpGTX1iZ*=?tz!?5YpKb72etrHtN4TZ53vyufOJL<4ME9q(+g*s2c0`wzYTnbXJ%C zk+-d_b?w*PUEe{u-SoK8%pZH)cyO*TVgCMMs_da56KO$n@B)29>L+i z^14}_<)sc*)wN$dZ{#F@kmx!mmbzL6Lo;cvPVk`o2ViGt+#Vn%R1y420} z?L8e`EyLYIy){S#`;;dQ<5s0T8Tuuu8+n(om1sO((3B#%Aa& zl(1z`($6^JC80wl=NJuKF95O>^kG=ml8eZG=@*4T3Eo#ON40j*h0Z zl!5csyZ7F6=LdfWSI z4|F&eaY%CHOa%jJz!DYNi+0#r?sKW6xDXm%1QsgD@etZx_RIQ>FjU>Tev9gKdB%xx z31nsUHJFU$*Gj8PO1hi6J6d|%2>!Jm2JuRPddTcQNulj*EmH}kR*UvJi!TyOvjm=@ zp`KhnKkk?@a3hGP+_mU$_dW8&Ba0V**x!$$k>bPqcb_QDy--|QyaA*WJpL=M@Jq*y zE6pScWs?E*dXxV8>vY8CXToTUQKe(n^QGBTF8u}~#>W7;ilBY71U-G^+1GRUa4PkM zZ+TaLbIs+FtL2s7efZ(V8HsVz7CrIo3(r2CN_~}qg~iId=r*?ut-M`$E z*IhIy4Gc+E3A7xpG|>)vZ*5kTObS*ODRFrF`>?sbA<(`8A%M&@c!bS^p`n3(0l`!# z8c;rYR{GnhLg98zbV9%|}JDsHTzrk5`q*oaKJmKFzN}lWoBH5>)Y~B%O_Sqj? zI6rkqwf066?8tbPX=~J^vrT}$N_)%zMZSDQ{ zhk0W4n^e(!VsAx7Ef(~ZMdu3&E}T1;e{}cW6Xy#qS7QC#(GFs??CcY#&zw7#S9rY! zjH`()p`k5owrOY>1xqk!og8H4h#A7lsUIPZKWbZa;&6(jN zoy{EsdT_cnRMvvCuvhB@a>J8F4T2@AxeeembRxwlS$V0cc}k9cXjo^mX9a?I*J98i z;yW~`H4TG;OF-H6dO{C_VsNf?8JSbwglv2a*?1AM@eE{R9+iQ`=>lj)@;;wZX2j|c z+3X%NV~P-o#9{FePk06R7Q6|Y^po;cAu#W@0pW^puuaL)u;NMp-;^QjU4e6AE8!A z=0z&Hv)5!j`TD=#Or@ffpP+J~)ZbT}$uV_bK6dQ8InXhW-Vm3US?gPbcgz6`i%Vdr zZPaI+7v|4@VnHPQth2$uK@0-Czn|(%kGoiofLK#?@%jARoSc){2M-@PnuB^K2x@n9 z)n3d!dHl%!M|Uef$4{KqL@Q(QV5F z_jsOR;P%CUMbJYCRo=ENyaQRtf-HOkS$GGs@DB9I^tRrf-kz=wxaqqAGi=536ONpY z!Jz?YlTP^Z>k$klb%^>ArNEz{Vh}|qC=0}IE-K^g7}J<%9N@LG+u;WRAGO1d_yrF* zNIsWEAukON3*5%eEci#3GVl?rrk`W1!$T|Y%Hi{OMGMIUFYbt&cwT;NrzkJvMvOrmI)$>M%Hq zOKK?=9Sf3HM6BRMeI|~oTz^xyY^%W~MU3&!WFv1JAqS?nvRUQrTTpo7R9C`-kKT9e z*nN*b{=}?c52=6TEclS`b3V)d2wUueVCP2uqO*NIV;lO9C}hQO6JfF4Rqt2 zS8KZlh74?9FlzYDICkvt;lsnJk4(bj-FH8Hc-**@X^t~nNb%#xh^8h1t||n$WS60j3z;V5b;YxR47^Ae}|R zRt(&JsxfSIkCVplg-$1y6I?cyr!20?Xu-;r;R;;^VaZW!uD>u!krVq;3{Jtu?EJr- z)V(GpypYk5$uW@05XdCKY0H5@X(#HnvSMHm@s2Kp_Ln1EgZMR~HY$0;&`?9&z);iJ z;BDD^zl9&=yprECcWz5vYHHp4O8WixmDKwxaddQKcw~5VbWX*;m;5W0`jT~e3w588 zcLD4PCD!Fno|I*c}<~D@iOD-2)>uzt4(K5W_hY#-F zvf{ZHEpFYno~Ywz}rAlTaQ^;a8F_51tvTX(@V^@dUzqz5Y&Xpic#48d6R zh>Gg$*4iNjup~)AD$8(hPgIOorWCb-YRb#Y>-vlswN*t*R(r>gNg3WzR|3>`aZQ_= z6eI7bMvAu0Hqg}8*FuURV?C^~sf3Lq-ZBrS_58uL0ONiD0QAz;dwPSS0>Y@PJ9dCz ztFjSs{xtijf2+M#lLOq2f!N*-3QRyzMz`NYobnWkfq zC-Qf!7Z)j&fvx9`uLr_r)fa2O1N&HxJ!LT9{x?4S@WZ#?{1;al2w@e;INFpbKZJN? zd|;3ahVt|KR;^lfTB2mt=3Qt57mN-xt&CpdK#bdqPIT9AZnTFH;Nl`bd+YwsRG~8+ zwe?NLsCf^*{NBI+_0r?1o?!XtZqX#a^N_Hlph*762X_L_^BP`Pg6I@_ES}?F$2>^C zMZZfwPd`V$O22|KRu|bH-M{e8zuyT6?Kx@~W$YeDZ0H~KBXlIpU!hVd@QFyCw|Md5 z$6tCU9c~a;c8T!Gq^Qo?Q=3rxOht$+-(`CBsO)Pu4)sLInM^5*EB1f^Gb&~@GWAmC zE7PdW@cBL*p=8?IQNh`1?CY~&Evwi3Qgu3tZvElss?#-e0^*|^R5J@3Fn_dks7(Y_ z!0_mS`g&v$YEih(g!QkPn5lZCEj2a_GD2b(vz$gXQE?q6t9_)Ytu6NY`P_{gHto+V zDDFZOr=_UiyQLcs@7Z~=6x4V9ZRG_hCU*>FnW$<=yU52c9Lb|mL0%+RMN?lN=r1Kq z?@0foXcd<$^YyE*9mte&J%dp7ZcL1i05T9iNy!Qh^5kcR3Bk2(;Yk?13Bn$m0+IqO z(AbQPFbRO3(a9F8X7Ym+04_u(O`bV(+LY;O)8Yb=>K&InW%}d^r_Y`~b>^M-%uio9Cp~>u{DjFf#*YdQk5#Z7L0-Yh6Jq1WlBSJNm>i*r5s`@# zCZYK4gvp8V@uL$HQBOYo_wPqwLds+p!YH@i4_oZ%;{NoJBQPFx<9?L#YLP!~MCR}7 z$Aw3WFDA9OMN(>L06`OjmEm#RD?*Y5NN~d>i{vI7KvI2U8>+W;w6}M*wKUe%)pvK* z)q;h;wyw6J0fFNhe5yzMyc2!Z-qrvg9zky*bdPptIUOn%<{U>RFk*nRX2HZk(F%mY zxExi|cb|zBySK!R8JnmpJ9_W{_z<`Jv~kv<>eLNk;yi+wi;QPjKSukuba z+OncLd(aDnqhS}dg)S(#9}sfF*>e(;_ZGQ3j9YuHSQ8Zb zH?WCiU?x)$_VTJ9XF;#w$sFd6Q?Z_S;=ZU%_LvznrUhp*Ttk(TDcy~dD6U^nS5%|n zkHc(<7?9Rr#A*oD9Dt017GXd|NMeV?bH~txnnAAvW$6{11f1>Qm$q2!G@~NRCnl3j z*K8&Wg}k^9gv}98QY9f5*Owq;JpqpOLv2iCbs}L3k3fRKCdD%GF%X5sGbyQJTwc+4n36# zF6~*a)PiN4yMk@D;LxSPEy%mjY7qwRgF*sMWngF++yMO}MiZvJ8eAEQ7){W1tKzP*E0hQJPqzbT0uADApF5)BP35zm@ zULLqi;S(g$b(QV?V&x~mpV9S~_J6hFljY07va=aGc}#u=S&&O6dMNpF1%uGH(?h-- za6?{yS7X0?^vqXh0kQZ!wTatQTVsq(yLUEL&bu)@z#PqVbs^rEg@g3wt;xv9Sd-VQ zHn!g%!JGq6`mKJYw5x&i&bN4x=3;#?jWfYLe& z1o`{}g94>eNUe__Bp3lI8IUkHce2^ZPVi|u$;QoJ!a<86GUQ_B=b*(nXt7M{Yvs}m zri@yJH~Jo2ZJ}~$COq=kjKx=khlyyqe_+3V-lFLeux2J`f|l;ChDOiINw{q+tmvXJe;fKmFa7-Af$_+HAR>QVB(;%Cq^t8uH;VfP_e$^T9y-f zHFX=xA<=9#Dv~O`C~VT`wL{fsj&A)dYo))RsQcn(Hz4w5=X31kxz%rB+Eg+hP+OGX z?j*0w&=o5vFb_1fq5q+N0Wh(rJo50vXfMy5+WmI&1cS>=?NY^{RQG4ai^wYt4h!eQ?y3I(5T@w(A&|~JBTDx z8?w_z`iD±)A*UftV6K-K68$R~MahH?p`9pWkm#4Z3hKm$S`U~E|IBuNnuRRStB zC=}ja3Moj7ycJk!BV!RjVqYJ4th~JesPqM10_7htmBs#iU|-jPU44QByOgc#z?gb& z9@uSP2q6PP$N+HruDWSL$POz$0KU=;imhUGPQy^opi)(=2__sqgW9ZORpnyLWl%O1 z>tr*P==$V77Rk8zJC5}ljzuzV9>cK`YdXYKSLbk_3I0}@mq_FbEUv)P+gaDw#ukW# zw*LBh3iT%hq7iEYECyCXWMqR%A`sZvJg12^T7(L&1N9=!=0U!c6cES+)wP+#XjO?~ zaen^z@#ebxGp9}!l!8C#+_B@wPPNLu5ZY6wr%j3Q*|$DE?XI-+h5r1LsJH_DyG6=@nR{q+u= zdZ@CfuCMRniQ<8QzUIrlSk)Dn^ZhS~%2&4~C9IAPA0H)W%ox{u!k96CL1MnT zJz;Tchq;iJ*^rheAuV$uEwdplVY(4@Pgm!_h~8kujA7C0j39LP;F$D7t;lw38$#i# z5Sxv|qs*?n9iyFx!p{ba-A;4C{{d7CWi_b6YKEm)?TLnJ^0$2T)!HBPE>xO*g2H{Q z{ndxoty#71FkxS2BL-x(WqMo9F!-qzE4J6$BOd{S_Jg5x(~cD?!Ts@SphO2^@Be5v z<_r`LfpvOP_@CDn`sw;R)IO8{J_ljY!5a&yjR36}l8i!@M5xS1JKQ5zfxhrZU-+XhfONnM+}oEgS5-7tU1#IY zGFb+L$OvfEht3aqyujf3<07a7%1rtrz(UqjJYb#>7~oZ9XLnc`dR+xPF(D;_{@F&rHxw-H?YT| zhR>I%QuaCZ8T0YuGtx;RbZR^GX$G}G3BmY|yC9^mDElYn4fCI~bkDhSb^1)6W1#B# z<+H~R&q8hsW`wNp?k;Vw20*yJhL*V~qbl`Xv*I~yd->Z*%v7y(#fWsI`$QT&4o z!7v(GKEc7D53<>GYNM03^fscneOCC`BwwpT;U$vZ@%u1aS>jQK?Pcjz% zR;0Pi#+_mH*&9nKNhNCr#|&agPxV z5r&nl6TSj(xzu4}%-Z20qa%x*DD9{~uHj*r%IkbyUX;*#`uM?fBVmkJivLjApk8#1-Y z^;|A}XtHcOmZcT$1#KkQA}*~No!I%yhP)C`*|_3@LjvnF0Re`Mp@ zjk^je3NK&AYUbkE6Icu#J9+xdnG?sd&*dIFe(=!Av$=&u7Yokkoz1y;^*Vs@mr=H2 zJe%EjcI(!H5wNx)P!7y4=oiR3lyaJ(TzFB_rp1dPafT7wh@B11f^{U~a}#YXgYb{p zxeU?Z-Q8NS&DssR9weA_Yi%BWo}L1fLbN+Ho!#xj3~d1$a(llC952c+jv25X4OS=y zgAr~m*Xe9#I|ckL$0IB$X%s(GM2mbuw!jy`FJN8jmaqs&SUe;w84?x&35$S)$uAc< zOm

    Wp5w2-fb{UFDmINb^#iv&?cqb16a#&HJSe%>GI{b?bKjuW&$k_E z?CK%NQ3U(}TqR!&upMwIG}~Q|;2zxDy|}lRaBn0#^8wsjw4Br5&{S75K%*YI+%q67 zr2F)l+}#G_{^JL)c7b&O{v!`xESZ&_vbf2U1JR%qU+m!no3U;C7fNV`>S{X@s=Ykn z%+2ByREGQc>Wkp#*@7QzL70dXv;CHNi-H%P?K$C|d!8YvB0Tx8PWCfh*t&J= z8lYnU&n2|H&j?FP2jIH%50CcaXGIqkUaPLCscUIy9UReVy02fXIeWfd9T730R$r`c z(_!+p_g*DD+n&QUg@s*M3ey(jAW3J|vZVz0?WCIE9PLJBG<#NbeK$W)A(dIxT`f%w z^%bS%j<{q+gwZhbo_i*YWM8NU^sK28^HZy8HwsKp`sHWp^wCqfCu5T&k&?z zkZ>Hsh-BZ$6kLoxB%G3Opbr1B10u z(T$zW4)3w!#!r|q{*JkT7G^*zXWmX<3R?AJgG(B%UjD0LP=~C^sip%%qrDAX*Gevo ziQb!c;qyKmFKSbD_Jw!s`cE((9>Dks0xqz8qdgJ zzW|PTxDN}=rkVzR%GBUQ)P{ZNp_z{JrJXvBwx=21%nV-)o7?Yd*If4+dirDZ^jGNV z*KW*p6PojKEBia!YsxCyySrK&yOEgDQq$O6SzJ=p(AL_}P+wc$g6J@)2O3)OxuL$U ztqTkUDVXXKLM$4M74sSL4Oj$c;(*5;8CyWbTWsK=((6q&q!r=86>#iW?5V^jk5^N{ zell%UVcEHF0TKgtPbV@~oZb3)264T<07kl#Z@-CJ)4cU>6|6@_J z1sraZqWP^?HZNVS68$aS+Kjrv4SL^%`-n?rJ++pTSJ35?^2P#8X}hr)`VLdwHfYh~ zm>`MMY$vr^^x~s4c}+FfJ7r_i-<*xVAHiR9P)y|C)Z4fWX=p;MACJytcV5WO{wnf3hMoRV zL4wX64bWO;)vanP#FOTJ9U8Ir11Ib`W}gof`e5ztq2Jb z0V?7gDJg5WAwZ8g@Dd5ZeZXSbO%{eP^-x9fz+H+UzCDfMNre(URhCc^)mIXGK2ASMSrsd z@h`nNHaMqj6>;1!A0yZc4;o6163VUDSveKY%tuL(3~GuJg7N}Uwo2sbWeGx}V+RJN zO-UlO5{%9j)y)zFOJ$?NVxtqNJMksbNfGLGuM=0|YL??_w&H45;%X4npk_9=A^rj1 zY$wv9@eV*QDg1{FI>bim8j#2d>;&+F-JNZ9Rrsp84emwYD)E0(@uIPjX~J@MCaQnI zwFzt{LL~?cU{ql7h42{AI|$ojnP)Ttbq3Bh7T{Nini<^_DTtheYL(+HhFDh;U(P8RjaCwpr9Y~jhU)9B?68?{6Bfts zF~+`R{y}|&Yvc%cY&(z(dY#!~&m7|bKan~4K>VlG0GaIVQN=r%`26Tps($dna*UBsFh{L+#Oiq49YZKPzO^tO` z)m7I@$|~z>!OUIP)Y4FmxCinkD{5-0YMDkX7-j;>!e&b(a6s7++QIbzO5yScvxHKf z)eb^b37^H~5UMovAtR6wS0@`wY2gQOJ^!vF!LOn9)AVzY_nz(#SxEJv25oGX9`As>y$ zV>=8cBwn&I{mB9s$`syK!1;J^(&7Ic5f6jeZ5tEqPU_Q>EDxgHUxREK`AN-Wmfql>UvG=Ak#aG*CZ*QKyrMB+Up6|Z?;>SEviexN-ycaE7w(LyY2Oqrl zKw4Vb^rXlD9`0G&t@D^ZCyFX6YOqC4e(pc;c0M&NM$l5EVm~^;RMSN8p?j#W6-(xF z$^ieodg}1L?4nCo`n_WpemDnmw-L`Dq3c(QUz?po*PYEdR%q}Y|BrM`gx?ZkQRZ2! zlR@6+KP^4|#dJ_RlILe3bNB!gz?4vTHkmoNaZ_hib(g_rd-~~x3G~2qM9E$B(wucA zYT?YMRTHevk(SC94MiKo@rkivVqIr*dEw=vvK~jTP8J^t{vVyjBJ%e4_4MGHhMFoW zsz4X2C7Rsc-_~XV#Lp>6Ne-dGXROxhdj_p0qa@Tz+c#w8!;b<6GjLsj`4K}~uUAb7 zjR*u5SZGsl1wOtCmSW1hsi;UsLy+Kdl86MDccA*Q*dvoKR{%)2i+V5x0dPp4t)ru( z=xA>4#cJTahU|QU9!d!TK&wt?b%lGarePFRgp02JedhLAe`{fX1eZGncN-@?#uqtS0+=r>}4$~wwx&eu8Y zHd(NEunjd*k0>*Qt5w_E!S=y0rjcbbk8nR{ztx||LfrwoN=1qp2w(ZPg-vHB47$E74DCx$6lHkCqa4VpG<|Bt&%`lJ)*-g!?* zU*D8i;vwBEoj55ZM}Q{z9!gMCk1 zS?nzKG-3U9_*)KQQTeUMYxj*C$~k%<$27|#+`Q@JwNz>?>%+GeKa~zTpUqHH>scS( zOFa);Gbr_*dq9%5#^qGmM19VCK0R{a(xpqWzpQ><;I@UvrzD1nmqs2udf>;kTMp!1 zIDI7Na`Bbx1wZxmU8~rCU|_hry|)$2 z+OfB}zp1qrJl>6$5WP|Id1ZNL&Q@tzT=$BL-~b6 z;5U6A+?$>cPPF2kt_)bz#P1=IaU?(cJ$`n1u&(y~@C-q5?!4jf;4^6miJrLW@NoM& zdPgtwP!AWJR}YOF>mgSNJb(6d-0*X>v}AcCkL3iX&mw+~z8((erk}&LIAeh~(0v!g zMkZ`J#6~6&!o|3YIF5Y?kXZvq%wqNFM!Ngij1(mheLd_fMQB)jgN=)=8g~^Be^$vzw zwQ6_$h+xX2k3Kq?J6yYal}b2og6T?GdG+}%$KOe%vYGVsN9JJ}w;gSG1Z%d_0M&de zdj8Q_epZ9V88-Dn1et%pv(@QoEyulJ$LS04Lfr7KS{9#;Z>BhFD~n_v3G4FaE;fU{|VevTvSnm>Oa>?>zZp( zA-?!>VL=gyEXu18DJN_u|y5au-{UKFLyjlu{#W>K`q6|{2 z2O5ERtgLU=vEYPtYKMk85U%OeXh)m^_bQ6?^h4-rQd5O^LtH&=raeG(#W!iKRxgpz zS7EUa15wqjrftaIZR{GcP#*AJd2&flv)Qy|WEk*Jaym5uST^(x8Yni8=g^{PA}ot8 zt#-u0Q^gH8TrMcQ+#33n%1bw7;)$_V8iiV>w8(V+`>)shbfIQ2?9K&?QbVXF=mT05 z5c~HuNV`=Tbp3iah|D-LZxK@Uz&{ryHeTAcZCjy57PI&r6+LA>>DQysIQ!tfI!!fE zmt~?$Gwz5s)zy}qzfjl{5H%CaoTadUR#QsBlXFMY9nH1f!W*3|<_$DeY5kSrBS&&C zoIig)H?OG8Oc7iCP4TCvYRob4GRN8)&U^|ugetD08sz{h&hFe~am0_0HdWV^Ub%X; zq`a!={AKiW6(E&$C~sa~O8VCX`gD%E_~4Jqphq8>J1!w1AmYm9_f~VfsV(r45~-CDBdS>m~`zZrqJAvno$^9gM(b5 zmwRY=M__1;jtr9!dyAHM%7bEJVt`74L)P8DgrAAzmoGv8-mHeIj3OE2W(tgO975nL zpa2VD2|ay5ARuBWlcpc&cmripMu9FAB{_X$0+@F;%}{U8kO5&m%|LfgzY#$liKnlR zf^Sg87^I_NUrn75=gn1@=HiiG+M%Tal4hVLNQBVZa``mMvQ*=UFs$_hJ$)KWrawpK zAK)z^A-?ZUHn0?_@Zdi`FT&P;c7@81+j?T>&Ye4UpRCsTr!88v=-vf)gh)mTw<2JN zNSE_N(eZx%#E0Md&wu`t>N+UT(?7rm1!QFe&*TH#udA|Xag*g14&w#Y*)k*Z=4^nk4^@K8uF6bVa&6)-xW;{ZLUg)pO7 z7!o`bVM7~04GO$H1)2W99U72Sgz4U_8&>OKSb?+#7NO)u{Lc%$N2)eQqW63;I(?ys zRf67Lz0RiBM&Q<$xdVU_f8M4;YuAR3pZM(j>G(Q>{`_;*&rv|`&ht;tefw=xy93WN zdq?g?@(WiKRP}Q}5SAW?ob`JE^f z#f-zjvly*w+v?iJV!te=iERI1KM&V$?XK+=*Y+;gex$|zXukm0Z^yc}XIxvSYd_1M z?C0b9ZGvlCF*nhI$ZCvLgkS^j0C){n1K)OghE!eee=C&0Ctej|AIk%-G5-#y38g5%r zutYuKb8b&t1o3JOk$+8NxJ@4JUsf9lHVaR9+o~h3t(Bs0Q+*^bz*XF~3UM1lx35RS z(bIA0>8a@HDD-p;(IYZLM_=Ut7=6N%Mma}NN)`k6S0<4c(JIkXl6)A?C_Q1T*bhv%<|m7~IP zeQC1;WX(jde`PFr5hWy3@VzB8m!0g#XVZw`;Ya^vFu8pGzpIl{T;pbnYun-4f@B*y zsqIh3P@wC#ZjF}f`fPCRhXCFm>Lj-7w^G;Ez27+3e%^nwAM2L=+`7!YpU*G*vC)$^ zBmacT3AjWgMyj%?WEEAYqJB_Oq4*D9Ma7U$ZhcGkVY{9zY(dRO_L+yDN%7i-7Z zyj(5F$FBwW4Ii?CDG@J-=Dc)O2Gy;irLn3EKuc8gwl(-7OGR^OymzYTm@vHG$RHv| zxQSK#`3Q>^sOa&5Dq5bPqNFG>p`y2bp(3TUKUUE!hlyw&BDn&R5YJlhz$R4{-Znp2zUAnUgz=3$Llg)|F`T2FxIjVXJ@dL zA`p?qOafdsi|M62Zpy%8xZlUnV$Y++9z%;Zbp; z6Go3tfc1^P#>d5lg^f;#icuxhfvg;55Y)pOjX@6t>QHw}okpV@(G8>euO5y$vlc#X zR5utL)TK+`Y3^vnaO+uo&iI!)b|4%~I`oY@LRa73_l@8)m6opa{Q92NVU z?Hj&A_R~jES;q$CLK7VjHg#H(m$Bec$Y9tG7i+Rs2Et z0q$xKNErnS_H7UcuQ{ANE|@i_^k~1@9h`k&-(H2n(SEi3p0`|f6Kmi7_wU2~qSP4v z)d8a1)P#@K^@Pc|iMX(J;w6<2&R9#o`eMxiyfGS$ax7y4$1|EK2r8ij1 zXr%JldVAF-C+Z2YSR}s}2w$`!T2iYwnhY!&K|`Av42?#B;z6s1e@5a~cJeIhVLn&I zC-ee0j*S9sHm%M=^L{zD+3e(dNZFuqb{*R-C%@a@Rdw(tle1s=}9y(N$k9J+}XJ zLCrO4*6S~R@b3@aG79O=rov0tv?(t>`rIpTfX$Eh!Cju}M;o+n!C)U`1&>QUjOgTB z&bO8PQ7HlKcR>W)X0g{>_mp34cPJEl_Z`d*?kaAh_U^m?0ae22SP;Y^u^_5<-A3$q7gjYFU1$gD*^?z#-)H{LBaBp$t_?+U>D>|}u z@5lQcEuBhbsG!*XTV4IEr}@x2q`LDB3>d|T$jZP&#UPxbM5g2ojWzkdH5QpA8SIPo z9?|Z2VMbucsj_Z7ez}+1{oQY7n18!2{de1l==Phl*w5GQfp#I+{)esfyYqX9Yu@y7 zZHHakFxPym`DNZDXC$q4>ukbA;YMO?5F(;`!(INpUVdMrLTX} zCvGdk*R@BpYa8OazrjB_-~YN_sOubV+w9i+`?KqGw~KqfM%OXi67XmHxv$f`&U4>i zr0f3Fe{z5S>-pUG=k7oEb-LU07D@eGd9k?e$-Sb?c72X>U8mPyxX!v;+C_I;`@t6k zO|{0g9pyUTe_iLz5%u#J{$)HM)04@^o>Y&D`Q0(`n|*&76J%fYulD_~aq*jd-Tgt% z4qO3KL*Bb@p7+1T#-HpY z&GoCZ-a2BAkt0A3e|OCM=3alfUW%n+h}V)DRx$2&`m=E}3ct=`+}@~5uFny!aWnGE zcw2^#el5|!{wk75kCiSxVfJt^$qpH3KzvoD7z>u2+3qj z!jwS3vNHpCVu8$42uA}~B;_EO#gQUsYs{=rhQ-hFtFtIjbwOR#P%n%Q4UKiQmhJg` z)vDzyzl4<*8&KCldwEC&#aNR2kLH|i;Q0Eaz5VvvPtHsn4UCVH0C}^*pP3w_5F%&JD>^&i(VSP(!J)JnQQ39e$5Y}%kb{?wSk5j)1+q9(huTnXMJYu*Uhd5rZn;i# zjHUZNPbUm}cz($Fe&?Brx=N)_^Zrezu2ks|nHjl$E-&Zs!6T&>?`iMN0ZfiOHv&od zDg7>LT`cE3U@0$dwW(%yfrY%e%QvL|>gIjTot>x>)UDADvIK+uUENZ4dv6c;;!%^K zw>{@XzmDneLD;scqSr=iE#{_cJsky19zf21#J{fd8yjTlD?U^(I1Chq$;tFwD7C-` zpS!1)vJsTwP;1A9V-V``Uzf3rO`T4{()$fu z4C@F~6`iw;$3{JDDM?YyU`?8I#~oQTpQmK;c_=nWkfv^2b}Y`AfHRURnbA07G|ni? zMP|y`bA^`*a&xmmwr~pEBJU^xvlSl#N?tSA3%hYxXH&@&Q@8Bw5l$lyaxc7{*nHWLP2mP zrBjNL0~9gu$&WtzXi=<(UPTWMDJ5n-EK*X%1!z%t5U|V@0|CdAkXVC?Q1_~z9}28uD9NK{P>2;- z=u`$^UOCF8Y1A3|1M)u&(kYjMeUk91+^2MB6G!6+;W)x1v{e+^Dhh2SKUK}X$*z+zw^o1n^EX+gQ zH@@i18Buq)Nk5#)8b<1pJLV<89g;k2VJHjlG+qpqfzHnW>}n}#qV>w9u0}&My$!^q zhJ2C1>;gO!jYTr9m*5C5;|LjOqa`@P5*#5ZZhT_$n-! zi=Px9HF#xL=JHQ3P`OHl`2mmCo)Q1>fUyswmgGDIi-nRV9z3t$$@fnl6E*1p@cULA zS#wj6*Bq%}t5D}f=IpAfufAT=;W=(DTr=NO+d0D;R>af??uH)C2Se#;1kk>rwgDP; z0GV`$xDO|Lcn8jXoz(Kmagk0CPvCK-P#)_CyrrzK>_Em%h5AqS4;L?XJ2QQRTs10g z_3MPOa=l(Yk$^5AnJCw*l1?8wd2HXlb$bsUJb0#5jT~G%Woa+Day}>f`00YnMR}*r z=bb)r;^f)$mr9C@F95ojC@=2hdhj&;6o6Y+t0E;WEk!|)*G6NXnI{ow8sq@+iaf}W z67gwOlD==C#}Y^*!5xV!-l2g$V6wD=uN;L-0>&iBMuu%R7Kjb_vH#8cYJ|CONE_Q|=9?e4ZT(^0 z#vi^z9_IRuKknGReG|w*(a2pK31_&M6b?WmkH68#sOQ*c;)wa$205E8l*myBRxS~; zRY^a7x#?tn&N8eKzCBaz9DmRJQ~9O!ttOeTx10)&hz$0P?#N%id0V41^v?7>Fr#eh-)zzog8FG2?TuF zZb8`wJ84n%_yCK>ECmdl#DoWnZ>Ru(%6)leolzPISFBmD0mMwBH#=2H2@@wwj^+f1 zg@#5%j1p2@p|^KPM0i-jgp~1otzI@dIWjUZDmvOHBnGt;MpL26#A`kQGB!IZCcs;S zGZ;W_$MJxY@%9&Rzz=Ve23<26QNIv0SwL{#Km|MiiFpSSL$UxLfYg#arW73#=76}= z0(7at>T(#Fe&m4)6xa?UZ&imcbj78W71ys^DL9{ht`M_J25ZD%b(JFel5v|er$}>;D;`foO9yRBq-_Fuf z|KO-7AI?y7by<&J%G|fpuxi`GQ4dR|-22MikcP{ks=5FMma|kNIIT)Bp~;D1&~pJE zk`b+^XH4?=)a2xyJClF41V|ayju?UVhD3}2V64X$B^bycp{1A|X9% z*B-B~Dk$x!KJv}FZ_c3@Br(swkSK{i!K$vdcXgO(PD76G;7ql zS=h-*?U8(}nEcq^?@#m?yik12JMh#DPgQW_y>&w-vT%n{F+1*H=S+B%erMtYpe?%6d zuv{zd?HypLxZ8JN%6blkMKht|KY_GnaNl@t`}VCcVOUNMpQGe*MSKV4Ay?AFT9v1O zC-(9W5As&{_ymMT`pYG-24%h;BAJg5pDpzZiH-E}iHlKrMn{JS`Xo&pKRO|H?1Utr zz-Vfrl7(6$w35pb@)+YFh_9F%{(5r9vAE;0H|{u=+;M$HNpVRjiWFZfExK?%@8UJw z@drwow!5=mV`e$ET9e(ZZEwRR#ruh+a<8D6gnirAqM+tCdr@TsizuXmhR8++2CnzW zeYnGYDt_VNAHMl+!`9=4vyjTShFZ#e@E$Z;2DL5&%3r1(Kot_cA786=2wB?73LJPq z2f`+^eyCq>vct@$4HQQy^9hRx_VwThe0*eBBSl5~s60cX$0Q|IgrlAY0A_1kL`U70Z9%Foxwe`>xg>etuEO3lklz2!nBL4hRJ`18dg z1Io_2amA8plOr)P|9sUf6tCgkxOBIEB`q0%YY)7&C95jWo~y3PCCkNsDS2270BNSt zs5s&BUQtT83$~R(eUD~byY^)S^_(hB&2aAOb2UT`GBT(!$_!U2PSTdk1c=Wu1A`on zOX0&hw=NTS`$mO&dM71O_h1fCClsZHW4HxLC=U1mA#LE|^c6 zkaAtLy9Ego^v#;dgbDfyTy&*Ywl;(051~~&2;wThtDcEq}^q~WrH*7r8z2SmR=pl-XjEfE)IBOs;~T3D$Ip~lF% zAGqMbmevR&hQAh(PXv*KCtido%`ZACA~ZB45b5uJUSL>}q3D38e^_KxWO#g#+;en1 zw8H-qlfNkygj@b+sVFMBhO&WIuU6GmBNL9mvr9{=tE!McS5{tGSzcOpwY0dn^jc+U z`Lz<*n13xQpZHS+*Eb+|^q80l5!Nfm4jubp{Wt3)JUJfnu*h&p?XE)yjvf7e!|nph zp~6G!VG8`UxIB1k#7~Gj>*_A#Ud*p;3VVAF%$*I8jD0|_pA^(;^zBWTFV_p6oeKk< z@KvmZq->{tV4m=9gg1t^_<(XA0`a5re+t+qn)M_ZPOl#T%Zo;bMFgHOkH8C}15QCh zR&nw!TvtM z9PTKnx4#y&cX@D5Lra{6ZmWSjWJ61wf|i(B({BXFZI{_2z?1J8JvKHAQ!ec!N&FZWRx<*>{Y@}RiU zaibI-VlO|CZq9_F^73TNHa@b1{lxX#zuw;jFLy{h5XCUagnTPalEy)-2`C*4X%UX% z4dpBk;Hbg%CiIZ-{IS|cJ|x9*nvs))+874nnZbx7yNb4N-+r_#zPhle zs8tx*-B1zW%z5tl=hx1-Nz=UR>n|=0P$`Jw3`V7EhK-F5Nc24oyYk;Y)`JlL_$hbY zm4>`azq_XkMc#ZTLt*ig`24z^Cr+Fw+Q%N3z6O{twv$lkKR@2OBug0l+ux2` z>}_K~YOtxK^h$ki+KS&S1tZs^=usCcuN|lI)sH^;yQiOj_Sq+H zU$!7i6_YULmOJlybi>9c7Gn)OjlXa4mES+Q_V&Bi+_U!4`_?@4w1Qv2gCRxp!@Rb_vBoO!^z=gDKA|rZ*dI_t7qMH*If_X znP4m^(C01i`+`9yc3FpGC^GYA91_XIiOP(}9(!!j&@rQ{>d>J>XX@P2q+8aY7Z2S3 znA1>k>EVZOi|su3^_O3MdD_P9=FEDST_A8)Rsy!X^w_a0W^2`jvhr?)Iw0vRE-9{Z z>T4P;HoSSY{>o{9xtuDrc_?#9RY^r-!=-bVsu3*I&=TLx^LPfVwN*8)#?F>Ah3Bu7 zo<4TIwxRiI2?WCFN+oa59U+&}kl=g#6E;4zDj7EUHv8}}5(Hq^l!;KRCx{?^44WL< zgKzTFb4C8V7`{+Ixjm@dBNFq51C&T96f&aNF|m1!kEcnG6S50#iVAoTY#tnJw-2>f zoW69~PIGKN^RW}hFElsTmR+r=XsR!{Sbq8P`O=FehPtW?XNpfAIeqzxq0i&>`2)dt znlsq%C^wqBBWv`v#U-WZPMiP;R8?L16{yR)1)c%FC|(7KIcu{yYUSE#KCHuscOnbG zWHQ;^LL~>Qi7zGw7ME`1W{PfqbxMx~*d4Gl^bHZsGREz5BP7mC@pv48GC>X|92k}- zt9W6FRD}6WJa?Ea_#ej7q&Xo`l*Ev5A$3w}R)^grA4wwu9o2&ux9dh^_tCS7$B>*I zig6@o!)hKndmA|$QGeRdYvetX3*bWQ$p5e$LaxD#w1D}i7U72EqTCguboAxOVB1q>B9+PjRS@}VvACA@QlK=N@f=|$l?>w%TMTa zN=J!C=rbPeC0Up$xJC+vldu9qzNOdPjzk*nf#P#Ng52~y7}Kj;Q_P*UzmF3%2a?{UcNk?RWvj_`lyY2@4SY(jgQ*syXR1I@r7f{ zmK|fcCzdTc!77YE9d$IdB9jWa9`(%*pboKIU>o~K9#J$~qmD>SiIB?F8eV(~pUSxL z3*nL8fS$aHo@_!-NT&G)^kjzFjOacBj6l-KO^`TEX7DkB|4c^mrkT8f&rJr4#bC19 z@ENM2!~OLRaHdhJK#T_*rcC7NsE9Xv~K7FCA z_GEP+dD`q-CPg?(J|g9&o<{MZSHxe`CsBtb+QXlJYC1!dFOwS~oKU@XBH09WsS!KJ z$zzaEkF`RlefQmM1rAQu{m(!5{F*eXVduNI4Kz)a4Sae)cI1nJJEk}EvC~&BojV0f z;?#M|R^9#J!y7kkdief3*W9ye#gZjCIg6JsyZzq#)~>ntsa0!!f9LY&URj_Wvotks zF~P2VceH!jNt1vH#2GeKlvfyssEDLVT`~po!|qZP%+MAvZ8E<1zGxb zpvT$m7QAOOwt{8^gvAcxqJ{z_QIiAJ1Z)=S9LXW_2ar1-$YVHME@-6#-Y9tuxCu~I zr$leb+rc29Cqf68%g%rR3IJ*%ZOUb`Xf@oSsAh{`K?TAEWm2R|M&TQ%m9D}s3IpJM znL!o~40^8aVw6Y&Q~z$?TbAOx>4EzK5!GO&gD@^ht< zI;h){g9w0kxq3bIJG|*=Gm*k;TNsT75W-`0Tf$sNyjbK)k=I|TxzTC|YWWuCy>~o5 zxY}{0BI+!sx)#L$5N|u%4SGOxZ(-o|jRchT$hAlw5z*gCO^0Z-DjKa4TAN#J&3d~P zna=1*9w?SiIYNcNgAVZ3^PDyZChWaBB87bsTLap823myHTK$jH}*|>ecvorqY2*2EsQey%P*;) zbX(|VJu&jWnTrhJEsV)vvxUf{gi)CquM1-;N1|tuBUF-VX*>tL^{D?syp2Y?-N-Wg ziZT?WPV2=53of$E7^Tez^jB`|U;i4*ashI|;g}4CoMDs_26rg1wsBFKP!Jx?U%NUn zWs(rik*f~~g#&O-k6ayA3gW6By*h=-2lo;uh-5qR1WVYzxOyZ;MXH6Lk_V6s1dfE4 zDaP2ZVaTh(IdckGW=3)m34-A+I&oqV%kUXoFhV!7W+H@>%Tf14aCm_*jsM@aodMl{ zxr-4iZ$1LBu00@To|q@YdkXP8&%j7c$C!`$=oLqf0XpyW*$bC1oi8dXJbeslw=##r zjIuB&Rp{u1wY&pyJL0L&kr|WEymn_)&t*g&zZ`sxlegQ!hI|0L-^BG@1-Ner$)2~v zCY}6Sz(ccl1aAx8K_DdYLmt<`kN+@AXWfrV>GuN%a}|Kw5i`T{pi-VEkH<))VqU(Y ztn_@*`3vXI6r6&^v#9tAX;4f|Vp>K<642_Sq+&@_46B%YGo12SncDb-l(f{eF%zh4 zI4v|u&|;H9CFsP`Xib=d=Azekq1UU?>sv6!)4-pJp$hlsF5o`r&z(Pa;o|uVq<{)) zx)m0k#|`rkwT=mi#S6p}6VOsD-~d+i`w)hd^Ws8~Vhg3?-6^u3{WGD(f5>$QyuyL1 z@>087eDkE~i3t7;Q)c5$e8ZQcl=-aIm+}xOBc422Nw-5Bb9fY!+53WPLqFvIeW4%n znmikQd>AD*Q&4bFgL)>n2RCxw{p-hMa|uz83cf83QnTl%Y1{ep=Pd#C95&(L@l&Ty zojQB&T+!)*Q>Ts}LQKJl6UR@Xb7#+BL3s@{2f&Okn5WhA#aEud1pNO?U&^B?KVd8{sP@ptX~6 zrJHdjV#AEra;Ru*c#xM-oT>v*{^ZYWCbBfv(!X);eH2d7x3C=MMLvDk(&JZOdF05Y z6?68Vrf$(G$B!SQqT1?_c)WD!any9Kt{%@ulv_MNy&W`_=YZiKpi3_2fcYO_N=l|o zA#~&kgw)V5Dy5@D&YRKt)>dLQGawg(A$esKVBNlQoT3apX$Xgtgntd}W8l@=6A2CJ& zsvU9wl{)g-&-+rO7UQeEZuf>DV#NS%Aq#j;@F{4PP%CpGRdKirv8t>O?9XG2C^<&H z!^i;!4DycbyJVFlo_(TcFc@V9v*tNEuHo|n z;qyk9*l_!Q{XB#u>acBu&%=LmzmRTmEsO2f&-?G)kK_kjuaoFN2P6^b$ohc$;)MELj!yLIscP($w4n7-9({XqiY)P?5@v9Jl8U!8&a!S zCd$kB6c|iswf< z`lXjPXH(zO<3(XZ^cfw!ZQEuhL7p&;TCAfB3pX>h$8G(fJl)o|In>5cjpv5Az0p30 zX@AQo?LVj2hv*g;rd#VM?FpADelcSht3}@e3{Q%SUzS#i5evxqfx@hwh+n*sm_zNL zJ^`Z@=|~i30m~STb|-ZaMh#-uU?LSUYAS2wRhUmi+MhF+NE~K_HWY`6F(SP3N8RR} z;P}8kT@F=3zlX(%lMO2nSPX2k3@s)jLhMImgo&nzv@NV}7-7ztT%0~z_XeV0#|IwB z&GxsD=eWuTx!Iii@SMudrr`5ihNoO@HuYQlCs&i=YNL7(St-QiYV?`h>|i#;kYU}H z+=b+V{x@(*A$dhE$yuM9%}v28Dmw>)3%*Tl!7E(%VJh@A+*b*%O=x=kxHGLbYb$6< zbGe(bVADoPXxv#HJ#iw&{S!JWDVy4f3l{0%pi)Kkdr(QQ$qXy z7cECuDl+#*S6#+z4f7CBNAXz3r`=LWOmFw#mt`)TduSn$l%TUxKVgA2gn7rOL+uqt zNdTw8JE2!UuQ4Jmj}qYe*H)=fz6nVg0Yl&|u=hAv4Y4eQcxQxl!dDSBar=zqiXOKi4V>WBUufx{>(Wx}FA``7h0>>nQV5qzAI0%!@2?t5nK@|Aa*yw#a@eM&yzl@2#F}z87Lo77Ty&+I3#505^ z2?faPA(A**i!f&p1r2t8h)j-_uJ;rVEA&e`I)Te_gRx{(1E^ZS=F+av$kLoQu-^MHwlv3% zZwzB}>7m;2djG$DJwoqawvQ8G0|g$|5o7?ah!4w8I47Ue-iQW%j?E{Z)rx#CCo&WYau-UC$WeK;;D>@q6`!i%f)J(t<4v}r-;Ifkfh%VwV2#P0gjbje~;!P zmBo82VHO;@2g9RiqPIoIM2CXz^Pz1Mi`3v?4t0Ut#h1dD-^7QVaP%&47(wN_b{8VA zla)#cx{S)jdFaDt!rRa*0}Z-d653B^l6qz1$V2w%PWU^YM@3X~q4p#5kUFV!)S9ZRnLkSo~+xu1J zbWH_aWvBq(YBT8#dNfmSKs%*0?e%hj=Sa$&4hLAl?s2M*(pB4!E&|BqwBV%R1aAI6 z&}wmP>=$1Q>~cPN>2hUFc5o_nkd-}B+wn@}z_SmpyywnUuRe{Y^PJA-pTCq%ea9@b z&YVAGCbE90F{}(!u^JGPZnHQ6MU8GqgF%MzM{r@Z=WqoaR;H3G)H0<;rHMf%Qxv-M z%Ws6JKH8&k*Y^k=YBhjS%y79_T0%YYdVw-BjQ*fkZkCS@?fxp3S%gSmQXB4F-i)-5 z&?5#NiXAwJzVK@*FJF4nxr>#cLyxR8_5cx_Q*F&AvRAf_L7$wL#*hrAnN#M^v@S#c zSQ)xxw!w&EG?-AAFVrW%fcZHB^a%ZmU;#v+R;gqPcpah@%9v1}Zu~~5PilIcf)O32-+_L?U$iV3qkvZ;NfI_i`7=EH<}S< zh9bzQ>rvZ4rm4f}Xf=dZNVyMjFFYP#r|OllQdhTO()O_qtXq(4YTq zVemi65BFf>P%@kR!n#Ul?C_P^rfzBC(kCBzVCJ}}9;3BAYTR8fKKINL5J0`Qx_a%G z+bf46(xy(C>TABZ-Q!WtS^c|(h|zn8dYgOyjKpC!*-o-OG3()^v{>N9G}*E67_Bhl zw%g2vR&p37QLPUV`Iv5WaB#q<6Y+#nF=97@ZXeAiYho30u}BdqRm3H$Rq^o>p;#)5 zlp>icK0bzQGb2|R>Kj6lU0M>!!4>>N13p|qDi_luQcx18O>hDU2aL+{QnCtK%=%V~ zt%Yy_(x1FEL3%_T79T53mF+p5B>wJLLi)E~fo|euSFS7@^TQ8g8vXu8R!SDV#-7(U zH8;NW3dq4D>m8ryh`Bues+luau~L)KhHA4myHRf^OPv_2caY*E4V%kFaYC}tDdZ}( zQl=r?apR|94tWV}hAxHl8!q)SIOHX8$ZUa7ds9MM1TMv({v{`9Z)!Jpx(9+hfEN{> zYj)JPo;zJoaQ1ZJ*@ENSKltX;9S8THI8}7=B>E?(Wg@`11Dq6vhle~KS5ICVcNlhG zj)>+)N#VK$@DIrt^ZPscup$cqUC;0(Ty}O+iZ+TPwiF#IZn8yccYXQg(Td8xq-43r z{Ns1285tVB%z5GDmDV<8`VU|4I9^@ZlbROow;kF8vT?+%Uy|w_p`VOhg+TEC>Fd9r z7Xafav(E|qratC3Q~Qy7QyGkgY{egHk9lArAtRM5czx--6)RROyN{JhqQ|d%c*EkU z=@V~CjEKkqkL3wB<=i}R=HexfJo5@ErSWd!$#eRkQth@i8k$N^o}4#L-QC<`?j2Ml zj-5LXBP19|Ub$}FgO8(P)%%ElB0qWKUU}v26y%3^tj(wyU>PWIM5m{xPg=6({eR&CpVh1=Do(T*XD= zl1D(3+1Wvh143T+K%3Lj){eoy+3V~Eoh3ZfMC0-~9F+{NcsKaXZ>~XfOmwWe=ssusEF_e2fAH-{nMw!N)ptQX3U&D zW7d?Zv*+IP$es7!wP?xQnN#M>B~v5VGsuJAjvqvkZXwdiWqD~n0tFNp2>Ltw(I6o= z$m8(@T&_}roqvc=BZH-H6y5z6@ZT-qzgxh6#4~pb_%G3Bb+kF0MEJA7d0=R53e^p7 zG$Q?;NJjBo)ZOwS6$WWAU?t>0V=jTAa2F02zN-|;Ku^E@QsLE>HhJovua7oZJGrT2 z)L#8hyTB~*lLwFQ-1*I}DyzO#-&AtSJ8RYLs}?0nPaVyx4NsFcu774P=~MhUeYaPSxq1x3#ptm7n>%g-ia(UC+-Wzxt{ zpsq?Cmq4Qb;+4uMwF;z;C&k^!*?gP}cMBH>h8jy75`#w8AR_ONYQUqqf(|sx*P-h$ z(L?cq5aNKF<=tfH+o;7;hP6S7wLyTod5FRUN9V%m`G#QPtm$dHiNKi4ojtp_IA2h5-sF4j zx40ft{KF4RmK3vsit>xaC^Z0?398f^0)+rIp*7%q&Md!QN6iY`9*E^u3i=bDvK04{ z;$8@B67XSs&ZYVMe5TA4O;crf@yaXtoRza&I#R!qe)G+IhRZ>5Iu2fVJoy~J*kL~n zzX**G(MU(6t7=S`TCuPNH^XDBsR+%JiXrqQlKbcom zHIL=Iyz;(3sNuenob&SWly&Q;o4|v0nX#qcrr0L|Y|dL=lfcNfGVaY$mnrfb0Su!Y;`r1(S#YJ)bH0 z>1eq~oD^u_6TIkeJx>QBOiDmCK@3Qn5@1=DXp;&7M<&CdCt>YI}j}O$2TaeG>N7Bm2$_^m-n8JrBK}fnLu* zuVXFd8i(!jWhmMY?6BCufiWU?H%b|CJ$`w_;9!K@@8JeGbhlf?%EbP< zVQuF?dgGk^Tq>blnmDNW5TZ zd0=F&lIpP}_v%H^>_yP*InZo@@#h1!4;%>W#4qxN5Q2eMIPk`m@|LEC`kyMz5L8TM z`>oDe8&FD(S50;rp^;k=<3fyzv@QujPgIaIGz9sL!$BUs+Y9XvEX?#<5E8~=x8Bsv z9lLDZLmO9*lX>fo@7(dl{+c>dHlYef#$PB`zv% z$wLo4bmxpH%64KqfRBhA#LzrhLS{xr27>3sG8KRw7KX3=M;2*2ih-xavP~_8% z4^Ct>I8k?^7okvg17cNClM&AVlxyomE?h^4#pp0J3-sW!whl1;b`&%~(7HsB7YiAk z8X~%&XRzN*`JtKeMN&jE1wCwXWF*k1`kR|Op15~zDpmB!2Y=aF?}=LYfWF$qQ>zuy ztz0v20aSgK{4h`5lHfi2(L!Lc;D`R;EW0^6e(}0>PoV(g18}i_iwa&JeD&1{+usGb zxqmP6CCqz#M8V@fKA&ek_i-+(O@=dwFgHwYp-1unZJw*ah{5hp)5)n(ZfkS>M;~|0 zS^wy7?~ZIc{zEsa_@-@t=N-XAFTcEQVs4VPxfT3h{Mk_(C-K&e8)vJLot!D2DJXxp z*gf%q7Z(@|WO`W0Z2QI0Twh-gTX9`=11gHx+8nL+HY=XhSJxWqt7=I3cB|gjCUAiE z%?()OxATFjOAYo9qdoz{P(leG)DuaFe29@8>>Hv2G@ng~r?diYs3-sQz~javv|2u9 z!8|R~URm3(@H;Gh+}QDHGQb-|C*`YM#h>av#iZo(J?5exFOsAqkCzvg2S$11cOyLV zI9X#zU>uArXd9jMh9jGy1}(dwbPuAJxXonLvN@&wQd4XJlri#H(j*+IXAo=x3i@1W)Ku#dPpG zPQhwSBBZe_haSl4j3iIEDS*X{b^CvtQx?b(G$@lH=1w8Qe;FPKcaTFCNx*r-&qI&D z$}$3g`lzFoRFZCY?5}c800o-)VQKo0SYb9`g}DdZ`p1zSYOb~24%?+!@2IUCZfk3A zce>mDEyrA2RHAy(GL?*^?qOX-^YNq?!o+`LrHPt5cewdVd8I=zXqP1{xpmc~wCeN! zhI69Gwx`fem^W`8xI02Sd;R+LoUQ+IZHbcl7{tw+%z?Nh+r_WG+;*&fT+UWSl=~uh z;+?O{U4mIyK(Kt_mF<=?#f?z>+VyQ8p_ zuyZRbx`0m7+v`HLAE&Fmy-i;s3$HIybHI!eIfh^3mOf5#oym?MgmKOgo{$XmrIMfiHrQ^`C3mSZx&F=N0n)l{|aJ3n}#vZMXjF_-K3 zaf(+}#;3}vki(Ei?R|#}OpA0KmCn+Op2()&V>YBOe`e959N0TIOUI9Y<{2uGv&c;? znoT`}uP^vBws5MP)mUs0ydlh)a67XV>ojI*D`fBU_!9TekgS1kszD;PDwMFIR!2n< zhf=f>^NYA1e*RKOvVRzTdmMdx7JYjJeR~9bn`J?Q4Ma0+7om03=&)Mq%yuL0h;Uhw z1#Y$&4MsC`xmHX9*pN+_Ja9pr&}}Bw(BVnI+63igC{O4oTN(`891fz@X)d#c^1n?7 zIJ#-I)op`JidGvbz`LB_kau{0Q}(_0_`KrGC3mm6Z^_J=Icx5_|MrzDLyj+c)eG6w$HJ#4-1YLz8LzxTy`sxP zDL&MI$AYO6xSb%oXONgN!M1Hs);dwvS0YDgQfc4~YQfvHh$suApb6>-hQwgeo6MbN zJ(7|@F=8&XSd2*XGPNL=$!;;*iT(hQ(FU}d&9S&Dq$C_r%OZ$)3lWW^e37taG~2s{ z;=Vl#>!Vz$R4JumC^0fw6mFgr6^W!2P$DJ@f|*i|!B$A4fMNnHM{odKq$*;b+bqEi zAc>CLKqm^_0F(>g0PuO_1|g1^3XULI7{o6F2y*;!sCckByfFqq4?^Dq-?i#tplk(d zP%}jHJOmJsa)caGA`%HtdAMkPJedB>nF9xo5vJ!+Wi_SekDRKh`}*_0Y^Ap1l=Sq* zMqPLY@X}-Z`eO6w#>RYl(V`{b|6J;I)Yp9vaSF^LG6m+YfG_YZ{N$0z0bGM}q-*~O zeeg$Isl0sBq+b4O%-1Y3FeAF;#dV z0FA^Zyn-b&S^?Pu>$j536{v>*T2jZvs5OvdfAy5GG%^MBngDvu2fe0%UQz2i8LsB3lf>_ z4niM{pALVUVW1)Oc?TQ;|@Y4Jj z^Z|&{OJu7rY~Rl;7hN*{>0k57tekE506t%5Td^w zO9WkV3|OrVRh3l^N(~Ks+?X-&YL5FQ@nPSxf}aRanGl-;pE036M1SOif^_e4{2V4v zj%AgDg_RhlU|z#-{z{xA`QMBmvQlalP9{f4D}#DT{(}svK}%C(lVvatmX0{8v#67jkDo7>sSceg zYqa$F`YerQr&a34&p!QZPotVjabW8*8X)=U$spLBD9+MsFgZFqyE+^;6Ewskn0CQB z*ae~q6F~+*W-RfEg>V6AnoSXlB4tt0KzY+3*G3_eBqVA`0S@^8q)}>3LK0$S|IH;r zeE%e<{S^5ADe(Q1;QOb*_tOvz0H@nvPhU5ge!!`5b_{e>6+UY3f~rwYdT2P*L{z7THJ1Z~@}IVS^_eO$McRF_Vi4#x@na`wPJHt5 z-}X_3x+xHMdpoaOXwWy?U8+fI)~vaE$@F9)F7NJa3nmQ>Wo1PSZQFKYcPB&QrewdFHBhH-cF2xz$Bhv08z*lBPTLR{n_4lyz^ zQW_JNkd~Pel{IIIR-s7B)Mj7;1cwzuAzY6#9#=uUk1Bu=OmVroLbZ^(yLvj>T^Q%K zJ`WQY3HJ+6>aOlZ9aD!3n2GInxE$JDu5KXRjaZnLk{Jq~Sqh$63Z7XCo=IsjIq|JF zOKVegMMXt*Lo*<*fQ4FTaJIo}XSE`uI=Q!Z07i%a9Ggf^^b7(8)76DyZ%~zld{1Y4 z51Y~vRARwU1YM060giX6so4=dFK6la2!HRn&p!U!u5zkLmpnM=4FH=3W8K?39PkZp z-MYJRShE-yDT`u;8+UJIQxtR$uw=R^dCDXJ2~SQ<^j8&}I96!krQg2svB!Ti9|>W} zh!g+>A`l$$2Lun%z4y-1h=Yv<1qBTOv1ZP_tZ1;)j&0rU8t~G(Xqxs9(HwxjQNez@ z16b287iH5Wi^Y5*d}E-vu#xk{5=o?p>qUhdfQfTOG6|caQcDZWBTTnwrer2tyg6dm89Yd>#ao zEe&)}1Kky&?H{5V?7&bU5L;NLV-e|19Or&N+~z>O)@t|eb@38n6deth_Ut=Q3oQel zsC3HJfZo=1aMM{uvwqn!)DShBd9l-OUA8cpdK6zV8iyp>?3`?^)MEtDgAoEaOc&S` zOfV4IApEhwL&V@=Jo#90S)}$yMLTk==^n=KI6VQ+G=*M{U zLv1u7Vhq+2fI2{cL5Cp97^$~)6ePT`I;3MbxEYUT)d%)!wFeF!ICyB^-XHeu)n<;B z^fw*GvOtYzF`x`)gFvQZ#@+JZx(Da6>P5F13_00LmSitkzGTrdeQkA51o9>xBZ0{R z52_piy|_!qG+*AkXYYAdOf2C&!e@FW~IVgSnd@l914oLZjTe{TulQ2crOQ`K$&Eb7%MQ`^ZDkq&>(w#94~x-gQsd$h z6lE==62ECC<5``Vq<^nEgzY|X!(d6+L2exhC(At5nkdiIPNQ(@L)dMu3>j~>jNpmf)M`oY%swjaUbG0ILd0htLwD?MGeRb*aveM9TYMZc9>sC>?8)<`wUt*Y@iE@P<7NDv zIaYY)av7eLL|+P>M;@+_PsxEi8R+)?tyIEW65>08P=~S)G>^;j6scM|lBMX)(jX1h zN+6(o?jhjFe6#?oE1wD~@c{Xc957nL2h`VW2;{W1|`4Mv;^bdPbG9I!mnj@wI9*!GvVuLh;AVbn5 zPfw@A(gX3+|50cSIE1-Wfw^SDTsn-obcoC);L8Vx0A1%r7!$Tsf4i}%>_TDDnZnBz z)wRuam6c^Bh^4BuxcUMDkwuSXt_IF#M|)@QfS2-m2Q+OcF$g#0P)~>5Vl+0^*AY3Q z-VC{63S=GBDDc4g6b=W2dW8Tf9s?X^gr(t0NG{?b^3d<|Q7{1c5cn{@MnX~}-q#czjbZg*4R))jyWx)*>4 zd8nTK0{HWm;OgL${4JmFE$g7((g}+%{`kIb%ljXEke?lVS^6F1$lA`JIC;{XIdkqN zez5PTe{lZeDP-Rx(RlhZOY!zNnXlJSUhHctJ%0SSa7u=R6YRA~mM>pE4esR`_@B{L zW99e+<8O(q#y75Zu@oQ--=@9>vi5%JOKJ=6Pp__DvSvBn`T}T{dJI?%RSE=dUCh1N zH$_DVqQ=iz@bGhwuNoUY+*o616HkAbm_pu^zp#4V#H3_(u*>I9%)DjAL$4#CfH!vh zqKz-zM(I#q<|9NFoS-fM7N-ES>Jz-)!o2jm`(}@!P;^wsU$f@c`SVwzq+#xYKio5K z;eBiFn-@*@)|^5if)nR29D6_aokQm@oWQH%cAwP2yRMOF3H%a@LvxLRD=WNja!7$z`0(AU#t zclGqrGO^d)WooP{K3!mG8*ujxz@VsaY5;UcQBg^yrn$B5;`y?x)s6Lf?C~94cC%}s z*JTBw&jJ7lN)TMh$Lt#5@(_Ior3=d=SQ^5n$Lsft_yn69q34AC1blqQ3s5Ai;vfZutXJ~HF3)1X;a3frzC3; zq>MSY-`vkAg^YqOJ@U=Ae|+)D50@%Bc<@_bud9QI?sLxC*l`v zQzveGc@rxiGd?ONNXI12{>`S@8=qcz=aZxa4d9chcO`$gWBSA>Ux4Dul4n1-1O~!y zu&;bg0TVRhm3t-!^|j`@D&)d#(TU^Zr!7(z{`Iea{phbwmQjz*%#;N3=;0xqNER3W z;YY`fR%gWOg}5+&_{%zqRHD+4*r}~8unL5{gl1%6!88C<3~C!0TIy;W8(JZgS^%_* zln{tmO^wYhO--!`1e1`pAHg{Y1@;DMHe>~1HQ<5nVUlD73#5=NlzfzUp*cY>I~8I& z1SMjF)TrW+_>+*JiGf)X@4;Xrk5)sPih=l-6sL-kMFODqCp4f=Abm#Q74Ulrwx1(ND9~@Egj+#MoGXrpCp`YSl1yM-$}K>%T&W zGsRWtbu4;K5U62y!XL7W%obOBi{54>F!v*mF6-mBGbaOA};-TxQm+ zmtUSWE1&zs1N(R70589pGhsfK)o<|8{{29--=8nEn3|hOS`ajzTn3sZ=6Jp^Dq0kw z)+iOQ`&@f7G=mqSEep|>6==&sv?T{^Nv;QUOnFOrGfYs8jrFzFS1z4zDy^xiEGxsl zTt~I)5<5F>&CS4ygANS9XQ(FTb|kRr^(_tdwvJ9V+2ZifZ5>ytTN9ISnK_o%Tz+83 z$y1K5jw46@zU`YrKt?2jA%|HV^pEZPo?8(MB~c)F`H!!?_S#cV|8Tj(%f@J>`U*>X zv)xcvbNGZIap~G;U)!*5)4G=yQy&7n^a!@FH#xUb59<^hlzd} z`4%}2nO?%n>;U*7-vSXpI5-PH<17p@?M zAz1@V<>X{3b@x1Vp2UynDID(pf@*K{h<>gGrKf|^Q$gub?M`NDYj1(ekJ!pA1|v~Y zIV|G5bv!Q6*oAC%@G1ItWs8FggS!JB02`JU?X3AyyV86~!pkaD*u%Wyr8f`o-tr*}NJ(d;&du5Z zytEp;G}dmzTnQZ(YzrnBkjY`O!fJ%@p?3HwjCy@@ixHW=`Ft$*5PgGr8nT)|UPUAr zDh5#y<)FCpIF#4T#=4Z56l!kZ7Zo=m1pEk*P$U#OnR!@=-vR9G$5bBwjx0&-cUwTd zSGj*V2lZSmGW2SQBM6rd=8>Hl5-*h#d?ok_^WFhVF#XYmp)ABVnHN?iahkUQNr8=x zfaNCwwPZrYJW$Ys+(;x!O;CVN#v3if z9{v_%Sd!q#gin+B zH4!_D1OLP#kW(auDjX9Nt5H!}G#P#E^Rt65fO&JVFy~@v zf8k925&0V5SMDE|tiMql@Fm|vHJ zUx`;|Ir_33{JI?cn%dZ8?{qmD%Bp_cfBewCoqG=MKeF#w>6xQP4jn$<1joDy!K7Mv zm7hd=zXo|p@s#Zlo7xLwImMIk z2V8xf?H%ja+HFpod458-%PGiO^}@@qK8zq}t;f^b&m^T1XI5lnObiUOJuc7ksX{s5=clAm!juUK38`bpre$VKPKnYaC#NJQsss!#61%t-uJ=BlOav** z+}Id^85ZmVKb6~ujl_o_f&iP^Z#BExY^~P*VQX7gTa(_^;q2$O7~0z$_SVY!#-?%% zgtoie2R*a}Xu_!NMG6-T4*I*>+6MbwFd*C9fISM2z$%QuDvZD?jKC_4z$%PDMq#V5 z-ePyP+ngqo{&>NGZ@>L^=dMFn&m4eGaICnpv8AP@mimV-&0@JyQv(~UqrKgVkvMwf zTw!SgMxnm6ycEN5siwhbXEXNw^v#z$cORapNL7 z>JEMN)mP`>pi7fVqmq)8N@Cb?U_9(?!$W>P#^Qp5lZ?TbEFEq+#Ad92@`=YcZ5;6Y z?y=uK`PgsnSh;%j>IXJ%%82fTF?Kj^(w!SNY*<9C)}?t!`KkVXrBW)5#i(dgqcsX1 zMr3vzuMun+kw$>y1MKb9=Cg-e{_6))s|mej~I})`l4fp z4t)RJ?w?Mats)lR`jWzfyLTNaIDH1V6UjDPb9qI{xpU`E|Mc~R!c#ad*Vh{n4rDX5 za^3AVHpNPVw-g+g?egJ65s4RFnY4S}+>jk#kng zn6Y^A;u*0#E6mbYt{y4smPfHEW0hF7{KE8Ux88lb-RV}Q&wTL37hk;N=9s|X@Y=gp z&ZF+qB_m@*ERtaqWbT&Y`Z}0fng${?x7>2e)XCYCRK5W=h3V*}L_!|p=g2fM(fpu5 zrjFzd_3?a^Ix9_%)i6bu?DIt@#lRx$;kg@&;bEa^MC1yw$@effr_lXAHpScyUhE$v zR&WD6Nj8hkh&URPwR51;-0HLu+dI4yJzYZs*!bEy+|mIT3@NBKG=u^2_BA;BhX+UI z#Y)VJm6#W|VP34nyjY2Ok>1dN!R=@_R3G^9B#hhp5A8d)|9I)yqsVwHxKOFLSVA&l zD)L~>b+yoDi@*S6dR)Cq)~06QYGPJguBmIbv*|pJ5Wy~rNC{VotcFi_BqpcD@vfK| z2th-Q=XbpS{`)^-eM*%|l~UTStqH$SclCh62kyLlv3G`8eE;25YtrLHZmaw z1j)2W?`k3a%0+n#$CrayafGpfqDJ40=5t zu}mp39bnxd`EdTAAJZXypv?w14;&|MYgdQMWFjQ@>07M`2)DHAA=oxnQ&)AV-Q7dp zP(_8#kOLCjKQM&JKx#o@GN1s%@CZg5sn~W8#)QN*j#g|NbFtil2)mBv^QRC0_~Va7 z#(^PM)oJ7do;g=$2GvT6FCW=|=vWD+M5f(tYHD;g6&4m=xL9=R^tp3auU6I8lwCM; z=1g<9$%QH8>g4v@*)h%`5kkOad{0*`CQ!Mp4}s@#Q>RW%iWoeHl20Z2PG#2NZwrz4 zlo=hZ5b}fp+-W#Mrh>x=Au-*;#L3y&*^4HMojAR+v7slL9g~_QCazauTlh9!(H|s6w2jr`T*sR+De!gZbTg7W|W_aWFq1KSEh@^OTS-l z!bo*GdWM2J)$nki3z1G3EptO3ar=p4;}l+YOn2>NeJ>AS`+X?7)!~G*ySl}NyYvjU zw{T~;v?=YwwD@Es^r*#dF%qX&w zkr_qwoF$l1A+Kb7OjcHOOoWsN>!P#Zn;knpIehf!*(<1BV~9R{h~}cAb237&+=#a3 z^ZVK)j-yA9p1yecSk3uMMkg#zWi`EhTyOpN+uvuu*s*)x?tEPi z%*}sfK7HpO{%#6>8eMWyQgTu;D!N{(tequ+uxGRYP-e!A%;+j_?2eoBeywl%6^qTHh-4*q9q3drj)<>hlT|Ls&n0 z-Jbe}O32NwPD4u>7Ce|k3JSb-lwdQ~RuO}88AA5k*eQm7X>6=&|Ir5HAo6N&(o%dL znPN(rlrPt))SrI-WpTMJ0{ON>P3L#ODsu?1YLk+aV?@I}-L~rLYG*`xdJJS|m69JE zK>xbz2*#kK$)`_O_A)8A-g@iwXxe(3jY>-r5BGZRUzMF2-QV5SSZ^dtWBgEeZ~H(` zckd&=SvP;Cmcr@sn-`LG@jmK4-6SWXCwTG*G1SgzkHu<2)H?(vpBM{elvENE!=s$j zre#G2?8H9a?pID@r$`xy<;ZO55GSGfQT*5xK5Acr>jlEp2?^Sav14vZj*d-)HX5&1 zNnwhV5(5opAT3rX{n)YKK1<=@K6Yz&KgqNKHXl;+MP4+ef0!6EQ7*a7$xi92Dz^{! zwDq-jV=uG&7|~F>1G8!ng>y{^l5H~RhhT8R4uc>kJp`b7h<=ki9!5$#dxF^0201`{ z$IMZx#e6r$wZ|W5YvT@hL?W)wO(s1h5GqAsy@EvfO#y$9`q86Nele|fL_Sz9mt8n} zXy@nKc6|TUSBK7CA#PvQQL?cW6cl36i;IgKj-qoAG0w6vUw@UP{(Q&wFZO-EbLY3; zexEt^Wfdehy%S-0`2r=IY* z2O`tvK1^-WsUX2AQMCj3>M9h#?(c`lQ&iN##&D6-=MO|n8I+8HOBZ}0iGb%NN~a3t z-e3Vl*9Q9W;2DP1%g4qTP1x{lbui?6>g!DcQ~Lcx&B2aKhM4g*W)-fs zR)j0VlmR;=eDel)_hqO?=dP=3Xlz1=T3vm0-AKP?pyBF7(}0hQ5~?2jpzr@Z-PTuuw$>#S*z^ zXnZ1qJjTsh_TU3+=Ti^qG%+!f9vfI4yKVv?E#!$42{lrqd~BQ_R+2tbooCqT_IY3} z=OHE+)=g{{L7fIv3GHTJ{s9-F9Z&<80j#+qnvLr~?59U8wA(sNJ$>k&)nT?e5gTfy zDs-B*Hld6UP9Whmin_YS7DBNaIQ~Z{mJW)I1H}l&QaUJ>4vNX`wuVE8YMNaT9SWOj zPf>exkxdm91qBtAWO6tvuQ*uVfo<xT|0lpH@rib`X=p(F>IS~Jc3mtx+NXz#rsb z9pRWduA-_F71k<=q(%_m?*g^%0=4b}wK826JM4e8C1=ZuOU~~5^0W8edh^d)-+TA( zpMQVo(DBp9kDtBVNFbvK_+19(Q%lPguvl4VXWa$-zlbH~(glc97cMv)rNwxHMTgBw z((Kyt-8Xx7e}DAAiGzD~?fq{1_ODY?Go>9hd%xca0hZrh`Q6uFA^I=_b1#A+ep7FZ z+)rxz`us{|{M0+{xMNm|Qkh7eOixy_Sp&ZNHa@uFkw+f7>!H=_pL+VYPe1?sQ+}UY zJtq5+jhj$vTbViM;SHORlbV5fI^c=K9F!OxJcLz8M~M3S9hFdys%`!K)+#)yarLuV z{=hKYCIJqw$31{0JVGjn5W?HZ0d@o)VLAeSAuMeXx(v+e?yg?!D5J6WRfG>_1aF zx&xq#C$S_K6<;iLI0`R@cYvKcl4HN!x#Oz?`*wf#{r9`F#@;OHYuN?Msw!QHQQ!vr zXorU{QYw?CV+WWU+5vnIKJxTqYaU#;iR=I#H{2yffO;@h^rHm=pUs{g;G-RTXBh>Y5+v@_}th5V9hp|Ks8%!OXq;Q zjqCs>g9G9I2nHfMKwF!|jM%`=o*_`B*LmFz@T&+Zr7c8%Y`l1Q=gysbPF55hKpN4Z z!b*~t*#h%!yv^29Q+E|ST3S_$Y;9LVN$Ew9>S{}q!Nn$8DWdlUU1g~HP*`aiNJg5I zw99x10U(7~YLPY=5f~(6=H`gx@)IY{8{8@=-gC8#{REqsMe-atk*Ra>)}mQsRh?B; zRo>(&YhQTbh1;m5y75ESm(4o=JhHj#sfGr!xzizN0( zp$`ECTyCTgbq6@G+zE8?!Jywe1mGYK!JdLt$${||pAB^(2boQ5>xX#R?`|iGmDAdb z%2Zyj(_**49@K@@m3Y+K9q zs5RW>X!pXthV*JB)PrBzoBF*^{`QH-h@}aY)7Cxq_#@9g{@BKinW{dd$H3mS8oJwJ z*f`Q$F(LUBrOLGmwH#dN=^h+({nFk9+y1bxGZ+gvavUh0LHh(eO00-WN=lN8_+&UD zs0>)I0{@4-_W+NpxcbKL-QIV#Dy!a$dzEc$TqGNJY+ArTsHQ{Ug%BW+yrA_J5E38> zAr#YWW3Vx9;4WLPvL&nc-lbJrX{GIb@AsSC71&@qU zW={DXUt?nnDiH!UN(6r!4k9-2$DqCmCv9L7rVS6ZLqu2CL+9Jo)l^wgUswvwt%{6@{4AjNur(DBy~p=N z?~#y@k`jv{2?=UH%m{fl`1Kw{7dTr*^d6$yu?Nj&)zJ3KZr1RMM%dXGAy_W&a{TgmbWYCj;D zp`N>f-h=pKPJuKf(Rq_0O^H8d*f8+-n(K}nJhk_`14oYTE9!>mJfu})iIkKS;J}u5 z^1@j#nDZwFn6QMXt=qSKi81_Q^X~6z8wukLQ?+;B-aQ8kY8&B<)KGlp z;K8r9Z`<<8w|kEj0;;R4;r!u)+rQp+zDh5S5a4aHT1UadLC_@lUsH z+ph53_$!X~s@e0a>l_!F~ zZCFg&_19lNC5&h70!UTokVC9uX3t%k>?RhUdmgxHg%sXnLY0)YTa2QVjN6`h_@M_M zK*;{I+a7&z<3qP)ZFu~-=U><`hk9B&({8tNW0MnN(x%LuIaAIB*c&uxK7|H+pW}3U zT$T~Arx9AH0(59ZteUplnb~qaUj)D^KI`OB8K)`iP^!2#hczeST?y$O6oNpIC^{8fLbx zsA_m{Xs}-o^BErq4x_^eQ5YWWYBhAU4-5_s8T#x!4M>x0;JiNEqR%#~@F zE1_zcI@HG#C__y?Rv?ZD=fDMz2`woxm>`^n208~!W=nT_{?ViP4CPu1@VY;;FRT8v z<>R;hwCUs14MVK^-A}xu*o^)`8;T&ivP0PEoREh`MJl`lhU{zu6V7}7`IjENb^Tp; zK6qnloPy@vy^FhyUK*%EDtic5eA}+ZWr8A32M|dsugO zOHY1&H&8U-T}7;CBs`9Zh))8-&Zi%LaHL8PmGAalF)@kBG1$0;At#&LU}!CdhPAYd zSjoWaRE(6%g{||cr=PuZ*+iaJ^z4%ozJrxv+ZJZC2?QEV(rmEm={U++07NIYF;GIl zrwG0e%7$!3k<-&ctX_7z8~)o>r8u#`EZf%&47cw3rrOq# z{swH>!Ymg3FgeO#qBy}q9Egy$jz>QvgA&98IvJEm1|^a~31!K!uKZ$Y*@aX2N8Wqq z1eL3mlM}m%7a2)I@0dTpD3diGojFxh0i5DS&3pfVe}o+DarcRKr?c(&9(0*eTIlE2 zuU)!eN!F4q@oTSAuW039ifgkcN+cOsH=>h_QsSnG#X2M|A~GqB4mIh5zl;>9gl6O3oZP5WX93)OyQUS4-xe15?Oe z(5)Y~dVOCaWRe4N5S-oTd7o5Q=e?iJzQjI78w^?OQ@or5)z#l+V+lRQC37PiS!X}s zzx?YB8-D%!Z1^5-rqoROfaS3p7GFPWcILcAi>ceDXG}~9naBfgZc3qhn;WPv`T&BGl1Q(2YBnD+&h(WFZb! zMY@=DkHvruFW=7N*sMZcWPD6~Xqa3Pr%*tBE947kA7805;f$7w@#^je_X`yd_29N~ z6oFDY+&LWSDKwHUb^21$f-;Pp9kMZsSs2B|7)3w2qPetgbhrVo$qgk1d7o}AE7-Z~ zLK&0>Cok5z$=PAx8+U$wV;fH{w|6x{m-;QVY~TNN)22Uvhgr(yIwf8>>PH^jckoDe zYfXvb?_1vaI09DjoNn6X@lXq0*uHs-sq@zMz)%X6@*fv z7jCeO(FoLZ`laivyc%G)Xn3)_s4 z61Az=!iHbBVnuei(8a{H7u9z+G}gf!)_3mYk)sX8C(f0Yw{;#kUsKZqgK~`H;4Dzm zmpp6M47Yt~XvRK=qoH=od+!~pX}pA{1hI1Yr8E^cBq~R$&YY_l?k+2;sx3No;_%yV z9XNQV%O_C5V^F;|qP%3y*HpD*>(j-=iJGfx8#+c@iJA{i=3lJr z9cpU~c~_hJ&yzZn02$F^A}XdK? z{rlT)UNG3ruD;XZNW7`=6$3%|@mLNCbk)u65MiksFNPd{bNd`DZmXAAd~2DSf~RGje|Zet41&FhZr^rkj$( zzCD}`N1D61I}gKSNvdvMxpJk!Fb&ojH6s+O&YY#>H?7R3_ELMfx37Y(U1~`vDK6G% zdb5`t4DI{cUEHX+gLDEUFR_YqWS>5N8xnLJz~s@HC^t8s{^-4Tc3^Twsnr6H zWe7HWqlc$Z@893*iCwW`MVzN)KOS|0D{$^$*&^mjFS^64@2FQ6&^sWo#J-Q}li+|}tyAhel;YtU92 zp}H1*0Z(p2f`x>uQ@NUIgwP&h_l2O|fQ~l#k~ROsKB@L%aX|~zjhgVb zvhs530QO1SKRG$*^}<*M9~118l#yzMB1|KccpR2Lzejd3>J_VQ`qdqGU}hpP5Bns( zmxn-1pNy5oB){?k^$_++XU}~9^;chEho=q=#XgA#ZxB9oo2_ju90ws)<*u|zN>hJ7 zPQwtb*e5mAgNKzj)H*wAE*6&nPD1W58+icz0D~BY>N41eEc9UQd!?rQ+MIa$^647+Bj9!-+oK|MH@RX+zKEt0x#27bNX~GctL+lRY~m# zAIFgaCNfT8Ri>n;PY7}MoIaU3T|8Xy)y{8r9jel2vk$Qkar251SQ@L&tJDj_qB52& zS(2z$+fkKMDho@PlKK6?gK8$y%864t;oQoqQdg|*(or!>@B8(y?_NJyKtLZibMLu{ zTBeP)ESnU|AJa8LQ3#`>5KKW}HTAVN_pDyI(!#`BZOBh4k;;AMQEbzD3??zC7ojlL zSgr807}dtQyvC8?5j_b*=8MH#OusRs!#UPofrxJx6Yp{iG;s_i1DLN5V7}fD3O@h}Pirh| z9yJbhl~$KuyjWa`K!VmLL{e4P)Dl}mQ)ShKi$z5cMJ4&CPM)vAq@JSFIS^s%gtnxs z5Bj=(99wj~#7^B^U*FtIoJ2-PM}`bzJwTn-G1EE>!tk(v{&i1@J}NfJ*&?SahQg!c zsGf^Ew?{{Eduon+_Qe-FcJ2D)!;e1RQwkS>DPX6bv9UTFPipl7z~sPoW0EFkXJ;n^ z{cjY5V}qvM?QvS|v|P&a*kY86I=M<}8F(?)qb2EdRR zp;$uXA;dD4s@r7;dv|YALLX~4kD1Lj8z8+A2*OP3sc1DDo7;x_Ac=dMicI#h7NA@Y zm|X@e4FgWyb&d56Jw3J6mDP=%Si@5gjX#QnprG@JKWNmDFZ!f==fQ ziHPuGsE0-g_|*Wh8V!qQst)t$Jw`XT9z5UULH?Exl`*DKBB8Z5O^qsHo@y1g@?l!?2I0Y#Kz0~*qF3f zE)x?^8!wa8)zWV#Iw^mp|0P<~;zw0lrM8 z98ju2VMZ7&NiS}bz5jj|`zrOWmX8B5wjx^P%qpGk&O58>>Sn6L^@Kr(c=*H1XY-%` z?aR+8Ax8fA5>D(&J-FCcUnR!H=`A{V9$A;Lglm7U7(<2rNA#z;KF}>Z6z9gij#>Sy9a?%%nPux&# zK;$@FTy}KqL|#ict+zipf_R5ywOVK$ z9&W%+yTcxunVC6nagt7V!wmv~n^Zq$_DR)D^o`d=NtFNn>eF{Fjg1PQvu53fdzJDR z9$SC^gZKVnIa2t3fB>mY_-HvV+y~LQNtPF2jfhXnvt-f^!$)oeez^)uZ_5h zs^Af4fr}s?b_fAqA`QnGJ|RS57~xCi&h^^tL(&N|rT|GSfxr|dLmlWG9VI3r*z>@{ z1{>jU;&z`rSxgK>b{9YxnAD@EDv@*HOy1GMd3ne8e}AT-wYvUT9-z59_WtF~%~<~5 z9Uzpm*-WEAoP;KD%n0GC8Sy zAK{pK1GG#C!SGU87JL|13;>SnCAxgbDue_e+G~)RURFOU3|A`+rA0?Jzxg^>YHups zv-$mh{Oz5;N^KjNAiO8b8hRQrfvt&+!oXZ!)&RulVnT!X-z2)V#zbNK{R zw05f3Yc-gWj|Fnk?qOL#8R>dzh+`J~7{M_;ULs)#+67pK)yYh+tFCWBphZ(%U3Ga$ zDd40^PoKn&p?2?{edwWwCj+72=S-Xj*^zhg#FR!R? zYpz3Fcu940JMmZT!k#t-TT!dQV8w16<1>t&^ux=lAI3+lT+Bjp8D!GBMud^!YTwA* zq7$P-b;rNk@#WXw9IdSGq9USVUA?u3w|?>IaRiK{sMX;TYw!8*b+CpGBeJf&6@j(t zW5+Ib8-?&Xi>6Gi#~8`{Obx|@UzFv>TS-LYTNqYt#M4i|@ceTRu3otyS*?;L&0T)) z)6YJCFZG-@1-5XDC^S7ot&WY2O@NsyHA>{LWM|KgQxdC`o>r!2Gbvt<$850+B_oXm z`;OGRMSQ6gUXdDpl*|}xc@e8L;pSe(8Qe1T5LJ`@mSCp3*BdR3D>E!XO=FU=b>a^%V zzurER3#jgFw1pAJrDss=#ED4=IiX#&7HSe?n-ik} z7{|e0q(c-s;5ZBS`|)$ci{Kh0iuPs9l@~BqUIf>eUsT?X@kRi=aR3Xxxvc2?vAuhC z@7}s?=dPVQzW)AP(aF90_MIxK?(XiYxp)q|3W3j9y;am-wexhk?w)}$)96TFZ(IKm z;6wEz!~MV(M9>>xw7W?lF6<)c89@6GdNOSAKQTDU%VKM3-^EZ z^^R|M?b`Fr_U+pbR@4|%GbV9+3;zD@Uk*2z5|Y!XzLL*2f0z#koq4cL^0DANUKi_j zn24%E4}VwPI%yUnuQ5Wl8<#G zqlCk0vl(4Z2Rz#X$l$PoAl4KpN(dGs2wRww;2@|B>LIXvSW~p~U{FDHl%0rZ9?@+; zGXmrxhl{WYn#yr$x0y35jtPCuN)DIXRi_;2+P^aBw zu@56@kAtuxixFjw2W5!jtX_u%uws)-E)5EKyO$wktyHMF%6p9>i| zM;MLp&cdp8`d#Mn2OL1s${Nbg6r5`{6dn7!)x&f4HFb4$G*#6#z|PiETU%FKg@9>9 zXNTMEdcE7LO7%y=@g?v!6$4y{5kb0^v``C~yWD=wm`+V^loO`yYPx*{7d=_NgQy7Ne!4T|GdT?*m_S_*5z>hY2y6 zJ$&Iq4?gn9V;k?imvR~rnWL14CIWyUP9_eCk3>+ZhzSu1M2gT5sf+}SsxjlC)l<5l z1#mjYkPL#w!GyzOLID2XRNn|!wzkfW#)i&e#FiH#o{;R2w za3l{-DkPlWe--+dt}Zz1>pI(zig5@=PX#ODd%OfamIs7;z%r^5Q<4(n;x%7x-MVf2 zj&F7&I9{PqsKpMzg0>7&QcbLeQCY|O2lS)vjrZSs_njN=x`$e+RXF%QIwUPFE-gjP zcNrb<6|n*30yq+W#7zWriM0yTfZl8c#I_N>VxvYAqcRU$tQMS;NA!r^8qfnwPT?3e z<7nh`nh;rH`@vIjT#I_yzM-MvVqsHrQ4!IiLZ%pvSpAGRQL|;wzE3yr#Apa3J3BKo z3ko6`vB`1AnmZo<^-Z9@(3g_Z+PZwXkAYT4EENhQQldizo}LX3vtx`{uLr^yQi*}O zLRcVRbwpdQv4837udC7ZH8xN?(GIs8@ZVOnBSIvMik5%yId+%A$by2|vpYM{4z7}G z^obuMA*4dz^5w0qDJecC2WYT-juzrng?8w1ZZ(>=Ft#f#gSY`}fJ&XLDZ z2%gt+$y*s@Kg1;AX}O3|A7*%8@91d9i(b`o`AQ}K66%v2u>|Xk=+gbYw%ef1M1T_% zq(UN6a4fMhbgV2J=sz#fX%SiMa~vQoYlX?@&L!XFa@~b!|Ilmzg?vbR0LS$i^|wIp zxf?cct1~jHF-AP(W#GD90KO6;CJl>f*uaQ-I<{=<>Dju??;;|UbGh;mp)e%aGCo!t z$qU9u%lK#+AN|)NbS5{L4DMItE`wZ(wE@0!$Xsh<|lz9U~8JAeUc2tv{3J&XCI^x$}`r*)>amGQknK zpWLzJQgzJ|yU8P;k;`w%#X>H>pjG}ce2_fsf5-TU2(Cmc;xdm1^U38&auJaWI&v-g z`l)r6JoqTNsLAF0HGA?;@`#@RE6AOYT$-<0;v@2ifByPsV;cGOe|t6_B9(3m6}O;;K7-;rmE$mM2o(UA)V{_-&wT*iU?BSNWSOzzr$GA5U|>>Kh3VRytzj@vkL+49r3{NI$hyk-7jN9%)Q zA42Z-{iJ1=mpDKk!BA*n$;8FKw2of0#9zpBedMx?Tt>-d;F=};ts+bk#wJ{(*EDSFc&d|DHe@|9iOpGJgn`kz!qmu~Q*Ay`&V4 zZ>X_ioBrEdZo%z`+BX9;dpw?U z9_sobo{|`ISusxlBR&AiMNnExc&uJO>_Tc$j>jX`hQg%iLau1oG7!Iy5CfXxIhg21 zGZ2eMtG<8#pC5c(P}0p8%dBn1Uv2sHDC9#V^x#9CQ2TTbjXK%Ad#m+SYHF&WzjQAX zoh*Ypq&g$(?mO>Xn>jVg>#|E?XRf>Xwk%A0ZG=z}g&31CIec)JE=`iVppEk=lb15l zatXqAxKedea!Q<9EJIiZ0+4_KD8f=i%&9iaPShwqA$-UAfIWgD&PH+gO!Sxm2MRq< zZ~(!Lcyk=%%s_2`z7%Uq!LmLM@+{B-ocDm34wn?D!R#!@9XKCkf%aLT{SweC3$*w9 zZCsUJLLNYFgEZ4ziDt;T>}2AbEO37}S08!*gO9$sP}<3t$ZhRKUv2&T@agu(JS^W> zLOUI3*D`4Gu)BAc>1ntMaQdru+bk_@O#D^pH#0VIE}>!cRcI&{Ya*C2#U;vkSCZECz&TnyES z27nH^N?ImWjaaO;4kf%Tpa91Mi&wxV4t&;RqgJebBHI zg&=fufz~fU>z4)RIG6)D9=5p8yayUWd)Eec}dwrFbZKylM^i*=>-s$ z5jtw*Ozpz#WcBSeZTyMAcFc{8Q97hC35h9T979#v^vU6#_QS`IpDgb65O3+%xaX^p zC^IMe=(%RZ3kks>*s(Dvt!jHLV`eIP3euYA#`hX{?qTTlj4okfa&3LLH+t^8>lV(5 z@jdcL=0xlia-tVch>@}`GoG>0w2~}xp#UKG9a&43I)}tU_08hlRcjB%Q%~4WBq6|43;sHEHUMC`U}l2rq5o)G&6W zq;TQ{4QuK+U2wjlvhw`#6DON-QkV?otkY<$gkkhzivhY~L`!y9#4vIKqEy#RhQn$b z8`SrA_8D1bs(?4z(KCNRYB<%=+F%e2TNMuIh=(a@#-!G^L9Zxs_Uz2bDJj$E&6^XA z1G{ztPZb|O9R{mu2_k50z;Qw-3)Hp-w_6ZF1`GKqFcrhpqTHz?uFu8lDHgP zq1!m@<3gJ$;&!wdMu@u9O(T~!vJ6SULqa^Bh6XfedIuaF3h2+2 zgGhCAYeT~boUDXG4l>Rvf$XOh%ft%gJ85ts$$#GDp=VWR{DJc_Y&YVNdp7TaXv^=@8Tl&p6pX}uFoVLwdu@6-wRo2uj zSWsS5lSGO7YHD&r`i#2tNZSp|Zdu{QEV+3tpi~w8Y4P!;r5R~y{gg8tyf?&Vlcyv{ zQFK}w;>_YABES>?qO28RO9OaOz#+Z?L1-Kt0rE(D$CznE4;g^02YjJcq#~J^_>@Qx zz-0ZY^dOlFluqZ$yG3Hs6NNyj81IR{ADMHA^oW+PHXJzc$xa^MZr{2c{g9D<_rfg_qX1K2u%nm z6<-~pA*~-Auvmkw7x>#Q4z_)HaORKW7%oTKlP#lwVr**GjoF~%X5HH&u%6V&$jC4RoiNE29UiG@+ZW$o=t3@n z{)UR8GeZ8>Jr@oh$lv|(ho5}%*=k$4dF!ukT~000CepMtIw@%)LKIVw z;AMEYyWSih-_bET%7KT0CI%^~lEr)6#kOI-3bLLDbJoiW8#?BQ8DrOj^IZ({X6+(~QWp;Koj=&^|4XF_5qaFx2*q&)>OdAh;k>q$DJTxImFbFD8 zKLLG#`r?k&K%2;zmyF}GJ&l1)2I0TLjZDD^|I7S^|Q8@=q|!!_Mb7|c0? zO$CRUsVOmHhA~*REN%{`OnftXZ{qT6(m{X%ok!FI=-~#e8ax zmZUiZRynpo@!{vxb#UMI>Ne~mQz**$FR5cG&cPqk@uVYjawahzaHoAdrEQM7` z?9@hE%{G1L(;oT@A-weC+6V2alX@X&gqfVUNC}@X+^r&*EIGO%V8o zn)CP8)b#ekAG5Zseb`MMKHMms-YO-*+1CwWoq*Io!T)Sq~{M7Uq z4(pLcCC|=Ux;&H0)F#0s%n41HH76cG$Eoo1nv{&(ZS&_(ik3j|_+*-l`Amw7=L6-0 zw5jpjH{aXkeBj1%TpS_98=v-Ajb75Z^A<1^ANL(>M{GV7U8SF zGAUMITUaff7Y*F_j1;O1xxwJp1SF%*&c0!!c(B?mR4b`TEcUqj%Cs6T*9$kj5u*>_ zIyADuW9tG>3mY96i7<>rI7T8IBSGv}#Ao za`;J!iI6ThJ`@-0Ml<3T!?`wxkC^QE_|9B$M0|qV86OvcQ)dnbvJpr8T(JPLyFeHg zktaE@>T4+^we-)0|3V9%MGHt)*Jsg!|H53DYcyD~skNZ3aH4j&y$-mQJ8gCc{Mp9< zlml66vzrlPW3eN5n$=^s!sWzGm}BFUc>40~+rBv4 zG#nlogLF>&_UtZ(ra2RSD`p&=f#ubYOdEA|ZMyz31HzF8N5>rOsZ$L-Bzdp7x9U_` zS+_-qpnJZtwTuxYg!*85WZ`jVvzWNyiF21N&Ax8VoLS3mSUx`^Re`7{PuPST*Wa`# zQ93qipw(&F%P`euX|=Ni;0b_=hu=9MF5;sALM2nmm0?N=z_4d0sl_-ci6Rnb!|#H& z!e_+B3rS(-@`WtSsEW`qKFEY+h-4>xE5dFGa)^*aGUK>0**u_+Uo(Kz3W&?b=?e+Z zgvdfkA(00UxT;VdIKjSIoEZNvr;uP#;<{F0$7ONE~H>S}s zxY%Indx4i{qa0SV!)XBm3A}7DQQ@;eU=Q&qIS)`nS`mDG0KMQtqJjvH2;4*@6Cnp8 zO?hE(h8%Z$9Gifh;l)8g0xbrQCqe>+z&ySuC_U~AN{{<6i?k_lzaJSHwey6408gD7 z4?f`-8RV&iNL?b5_$Z_o5VKkiva)C)JW_Fb07N4Y?}mCxN*Xc6XsLijDn?uXF!Gt8 zvy{ilLS_jm?7{1FRA|_@U+pi~aV0WoVtN|TpS{h67mt7Q`PMJgQ8PDr;p!F3uS-t|a~sC2bZBZ;cGfHikI9H3L~?;C>(;HCha`WX z4D=FmO=9|#jBqZ_2s3A4PtA}TU@w6Oywb}BqxAyG$b zqU01$f^-E+g}`NkFD8*55SYNmt0lh9NObBskcF@=cTaAOjNwhhs33=+HwKANC$lOLNkt~Wy?rJ`F;7DFs-4?5(sDcfRjM9Yh4Cb!< zyhAum!XH#b%=U2LZyNSQrD-Do`++xc?fs3lv9Ywd3xE-A245_boRECo>Xj?z&ACM( zoH0de0rswu3R5S|Td;6?LI_|UxR4z}c(EtQIU@tw2yiYeUaLnDo6y^9=ZE5a86mXy zG6_5vFn5I(k1Tz`9c#Fwb~xF1cy8OMVa(z6K5XQ%dwL`qxamtI!)JA}7 z0@pDlgsgKg3^DdlIK+>S0FfKx`l<2gH#iEyRgfbTFvMHJ2B#heBaW4yEDIG}tf#uQ z!iZC+B0|oe4`IYZrkNARTvL*vO%s|@Q%xh2CXFzdsZN)c3r1no3d0f59imj?Su*q# z>v97Fog`zV;pa2nic1mCl!-Jnz`!IRvQjLSa=b2z;-9Sy7eeL6JsTN$mZA95Qb(Os zl$JZeV3>ekNfZw3xLo9aQ865!$IU^KI=@s3NxZy&ME?e9jH=>Ls9WN4k7cTVT;Mu|Ch~UgXk#Cb$D3eNMWVgaV z%i@A63j14+mVpA!hddwt_y5#;DGl!r>D6`AmKE2vwYD}?Tr92ahDWjzn_?^usew2CI(LL z<5AbL{ofrqa(Mr~-Eh3z4NJuBS}C*u7C`F)JjCWRiQWjs`rBHm-5xi8-6M}Yl9ed4Gqi}W71PKN9wbkE#ok*JyBNA?^M zB4UX<$>*CCct<8EGY`AF#gNOH*xk*=J0ee=I9CFn#Il;|a*`wQVr5ljLtRY)Abui% zHD)r8VYg=_yu?j1bpn9j!bFBp^%s!5u?Y!rU9zP37*}V}{%=3;K;x)?WU;q|z0uj& zf~*cE6dS6_+q`-6@jAmrMkozk`@r4zJ^VPVJ>u7Xb3=w)tyz4>V@s*G5p40gKa~4z zZA2TzPv(R!zU7t)7?FfIHzGmbJ=s%COr*s=M#2XAI*~*OzTZPV-90VsNK*61iN$XuA6=kCRxGU3>PPEN`l>IeT#T=Fhej`fX7WOERa$Mn>X* zp@FIo(0$n8+60>U0a~SkR>ZS!GH8_wT1^J6R8`2ph5SzKbqyU&4V9&^-zx~TrJkUj zLS*6xqmuIMHV^pjtdz55CBr@(lLQyHf3@rE08h)`vSp~bs+bX5>4=$2Z&J*~_9%@HJeRbVHZ*?<3l;BdNfNB#cx@dFY z2P1B8D&6<-Ki_$$?Br7R0VHAjBj=lXbn!Z^;!e1t&y7W7dQnl>qC4*lt`I)@CPLqc zjgF7L@zFOeR%b3p1Y;OAmBBuJxt8i?f_CFet-4n$xBv2TepjSxmxIL=?}Mqt|2`N; z$onM33VEqj|L5=1`^!}X%Vh-0`AE5upbhfM{cHD^GX={@1Lgb%(1>f6v-!(41k0rc z%9;Q5`_K#YukP}44pI&;^{@FWw^QgZmme&LL`#G&XrdN6vj0T64*&ZO2j90O@IDQB zU&S@wH|8&g6juK6n-eIfB<1?ASq?U4(4{h1E;CS$CFNSLSuQ}&mS8zWpq$^YL2=D; zfpNbWEH^7q&Ocv~8fhd%ab4<#@$=OH zrxp}!3l@wD6ts|n?rZiA1V_29%gdQaInOoA!9fJ&4g||VbwFlk5-FE_&2lIG<;J-L z&UmDp-$yJpSS}G%A#$7KdHz9e@7sU)@S%NszeTQ;Z?7%4Gg1xUar)cR*?v7LJJHkRDq#)>_EO#R9nUiin^NkrSznY+W)HHW&y( z<}&i05u`;M?rDXj>mX7anKmSdFXr%(bR#IW(PkpGRcJHu$R$#nfADzTVL0k=)1d!G zr0u+dIwG}2z=>mceU{f&1w{5yk?1JH7n{riBC@S1DOQFb)0Zb9B70Ft$f6*x76#{v z1aBZX!V*8P{s6vw)9B7BXMyizI(2$0dn1LFOcw11aKr6}4oXi|jhcH+N9^D1E zD^dwhOU-0xD=K*ht}_^x<60JSI(5TYG9G=Xw^Li;*LbOJXk z(U@GWdoK0g7~R|)PC0eE_VpajX?Ux>?&o*1Ahl${zTkvF5GpRC=8xA%Ja)(&zI;?{m_Rwe zCqj1{ve-M(-EdMN*PZ$rm4!sq{sx*KrT$2&8+n#`6hDby9qH@OYU7T6Mdb$D_G?rX z5~ynK^Qh|kAl;&<8UB{dhcDny=yqAlxPlW1q#|_t3UvE*pr%Eypr&I%s>MJ#7oeKr z(wctWD%);&A_Y45B3gwlh`)mmqOL8$4lc$VCLMefik(YZ_4B%f`S7+wUHMt;J*Z0^ zsB0R*C&D@-qZ#oa^?Op6?Z3zjxqPl&*2D1?aaWLHSP?#7kYZ#-j8p7(xHY0atcai0 zclnCw$YSq8n^u$h4rj61s1J;rtO%f?zmbzg7Cc!I8hDQ3w>_lRibtt+|FV^2ojvkn zE7{~rTKShiugj;x%Zs#fKKc0zdVMJ$P+d~rzF>W~Be*2c>+Gx5M|{q5D9qqz(Z2Ph zeVD=2VG8~E1yuayb7#qT zeG0Tr1wNnzmqH?KeEYd`Hv0s718BEM`zH8vHoKCQWBy>qOnrj`4vo~um@)=*{EZx5 z`R(^pFB2!f(;JphvA7|$+=qG(q28}h@5iWj7wS#;?!dR(zWVHo-G>f+cl6Au!h)mw zzT0!I==6pChYoG~bo&>F9zq^zoM71L$b>qHox;vXs>r4I+j4e2(uV#DIi{Dh3-S8` z_GV8Pc#jA zBa#ynX0q)DHaR zeeO%oKmY7XAn(0TeMz083h^nSj`3q-A9{#-O)LE3i*L`Am$#WW3CwNf<)?Oip`ENN zFF(9x%a#M>Tvb!FA^wGPPWoD+S_>VriHZwC8UYSvunQI??bJF!w(kDSV zl$bVi>FOI+tXQ>f)tq&!moLv+H#a49;>4*5FFn2yz~B$v^Wf93u0tzw{LmV?2;|N*LU3E_aXgOD~O4Sk5YyT8JUl(l!nK~#KvYQL*1KF z?Mi7%EOJUaN6q~()OEJ^jOwk>J$i(r<`(?kFskQz-b`3`%es}TSFgS4)|+m;?XC?Q zZePD<{puS5tiSrE4Y%ET>&-Xck8}I``2I807jFYp;5|_DJ>V05fXt+u@%ac@O5efv z+xXkx{hzn-f08x)pZNTP`UmgXXaDdAEODJyh(z$fXB7%I$pldP+H7bHewS>L$>D7P zquZ7L%k%#?;44dqP!tWI1Dc%aN#X;t7y6g0?6v>#Dhp#ZtOC?-<6UN4{ zsmNhG37L#zST%lEvFYq&HU(=l)c*t@T5KVnk6d=iGCgtiWxC`SEz@f(W8N=V$FKcw ztz%!Yug_=q+3;=hnSGL|6Fpj9HbaRJpunP8@hrzg=9 z@JXjbDJx~7y1{SG;B%6y!M%|Vp~LZsqJ=y^r^!Ot)fITnl7ENST!|M+|1-=;5M)O8 zQ^%-pz(sdbBv0;F)aT%#`>504p=Uvf0_p;I=>??iF2P^U;d2sq2dS;pHt^mZ$i4d+ zFUZbDuFB4q{33RC4R*)-h5YWd|0%zt@{p-H5B-C46=uW<>J+|@ff^V5v^Yy0hx|E? z{~g1Xd=BIP1^E9deDbOMAit!Q|F`_|-{7Xtft$VrZu$hprSAbZeeC}UZu*dK$T#fk z^|kogef7RZpU!8%XUJzniaZB#(ZbyTe(wY4A-fLNH|T$&1stdi&l@h`#_#{kQKi-+JFHU!pJBm*h+LrTH>^i+pQ*xA^|- z`#bpi`@V;K_xf)2#reX0(Y{dMvP-zV_#nmXU#Hii2NGOb*Fra3Uv*o`7EwWksbUEg$p<@VcgGAazvS5ACUz z3gTYiY(v2l%CBWtp?(>pF(pK<1m8itV1br1jf!M)IL=K_q^v|S19Cgg&!Tpsewmgt zK*<=I=g6YIzzg$1hk0w|7o7G~fYkiVE)_`fZ{)8-E8ad?U+OgI7Epdq*{#cYLko=&4ZrcYPMsi!SlC93JIjcy#f-)MWA`H=v=v z+B=^0HQ*hO{p20u%ieKj{2g~>QLT6fz$*jqs9p{y(I4NzM?VPO!*~?w50+ysblJIx zB7K`!f~JwnecGT;X&lNBJdrq1cHBFOyZxIzI{-XF=O9G&6?A!qR(SO2i4)|T;qH{D zzXz2#-PjDJCI0!CRu~$}vgDiLKJOcQ2HHD1X+jnS;vv)s5ZsZG+`AVqe{v~w8Aqw_q=F%1 z#EOUSoe>t3_3TpULEfacg%|gSOrIXofB6qe4csK7$p~amBq2v)MSy6xr~whhfjjvn zSH#{(-D4ITFD!NHpKa`0e&36QC}no9By zUhV!8pNF5%+$utH67iiy!pgPO-^k^!<1=@BwqHJKeq;=P5H#%ZewY$}@VkGVllkfS!=wz@A|8je+JE`6%+KBgAGBPKgM`>~ zWP$j1{6R}4UNUokP6IM?sn>DI3Lqc&CvL{I+XkDu7R-*6@mtbgA-}@_{A1gG{+^%G z8f3nN)@h*JQq1?PL@qTYk*4M)auAsWTPtz%!v)Sj9L4zy=g(iLtFNo8|L(|^-Jgk} z`wV_9WA7(0^6dRNA;{xOmWO}Z5*Ip}lf^zm|Lu*XYy!?bbA(gY-_Vw4E-hOMmGSfJ z8rAZ+-s8Q?1J~m_b{wbtHQ=peo#Bhrzb}60lbp-}w*v|$TxZFb!wEoJf@+JefkCk-F$_lj52=^3;Y;=x>Ls}Ap?1V- zptU4ogS<-xNLD1w90V#=|De+(_6@-a=*KUGL;_8{f3}f%^Yb~7Plg4UPPo$k$##wq z8YcQ5%KV%Ykum{}HB9c#lFQ@462ullte3<_MXanu`f{NrZD!!>N`WKKSbU^t{C>-> zyHXi2F)D_^GlP8$MH7k6RjHyPg7<;8kJn194Ww4GXMi@$?qs+PBtLKtN1pQuLtMRS zhIZ3g&^|oNaQh-7`|z)c(ZacKN!FB%Adi|~4x$Da>87`483|AQc z_iYT9h!cjM5}6G>%T|U<1PH@TUA=w+!zE&a;b!T0%i#nIVG)oNQJ_L3s1SqphM~P- zXzw!iHJBJ)VqfNN-}U7-Mi>-eM;ZD;9ns`Jz;(yJ^G`+?lwEk?L(gSXqf`TZ9bSBd zI>-ovlFPve1=FvSrmns@jS&Xr)(9iKYtoGSV|Byv3?Lf)QfoXui}Z5b2KmF7?A`a> zeujJK$dN+~%-G?pG=_@^ ziSf+b$*6ZuA`jnODjB^@rZ_4kg(cu$23Sp4RVonCOWE%ii&Ew zyFF11rQsL`jkb}#`l`47(KtrS5z85wmhzj25*bZlVRaWlY%D2}BhYJ7ES#Y;X3lgJ zGD>)8i~==Sk0>Yr;hM*kG6be6VBQD;vNqiBWy+MKB;>tB*hq3pa!N*eiV_KX$B+mX z&W%XG=J=u75^q^+%^v!kuEtGlz4K+r*%^p>Qsu(-n|T(frVjW;qmJhx!dlFUCL@>lF} ziOVmZDY(E$1R4p4C*ZlpcmhV!)6>=2)zjI5X^tvrga*GQfj77+BoyH%DmZxr{+7eT ztPJo3E`HMZCKkNYS_XTlJ8|{jLK@(m|MruY%D`Yn+b^y1M~?JU?h<^un>;=1(jqw= zto-q(bAZ`~2ky{%FAbMpJNjr%!eLrW_#v!DDh$ze}nxq^d z_ zZP!GKU754fH0^cg&Yf#$*G!nrfOvgCiSF<3Lf9S9T7jN{ul{~=N8~N}0jyrQ43q4b zLxUp-65B+J#bEaizv$A&foLQq11pL}JQAGJK}Te%z?YBq(#%X131bP-XhOom5z7%3 z>Hk88G28*w6>*)BQ4tyqkfkG|5F8p6M@{@U-{7C2Ld*;yW`+Awj$pjKm-&Km#*io%?@$154DT4k zI~vIE2LkN}^nLJ+m+p}f34rxUgbRmW*hNEmAZ?{@r0D!ex`3jqzMzd5j~==|u!6GD zSCXY}8R(RalG&hBdV4!i?>oCXJHa1;D`0W9<0tk5t^QvLAT|DsFa|9}Z$fM>3+QXy zk>FU0vx#EgPA>w^dQC$Ic32(FI6t+uojrHvG;)1a)|6j_yLVe_aZyPHwN3l_l}e}G zbI(1$TDxMw%vm$%EL^_!mgioDv3ldeh1sjtuD@gLs%487-g)P3>p_e6(E#q18vqw? z08t1s73oeB~4 zsu0Z95RYArpDIMy`hUg05Qh+g?fKOz2vC>gRlEn(B~g1nAhlre0H}M6l1YaK`*Z+7 z#(L`M5sN&-U0vNkh1?VcAAb=5V!{;&H1mS%c#L|F4cX9;hhh_TVvTPHOq~|^aXiAl z2=|CJaDPZ*BWRms4eY~n-~zFM-N?SoK8fv8D9jPf*o|$XewEEW%Dxo3wODUyt1l@i zC?Fe#j3h~D=6%_S75JFi%h1mu{lMqYgb}OJc39Tmg~fdn^(w4JZdyu9txI87)KS~% z|N7kuY~25fLf^vrbc|}j=IHl1P}M|D<55n#n>%^Zyk*OlAzfTyO^u7gMQ~F!6OX~@ z?&+>OpLe1ig9Gs6j*hP0-qWW`ntFP>z}PoM0XQ891dcHqNnS&`jx5)z5QKYin0Smt zSYe$zA#=v{%cdg00CA@i7R{NJnaI&~gi#5kbl!yy3+oBf$U#52e;|yTN2}5oFXsiN z#s5DV|Jl*V93PFuA4elJLKYZHi+<$t(TK#_h{xJU#@dJk=h1-kNV@vkstbDi^u6%? z6bUG|h|GTl!$ol1zATnc+htkoJkB=AY4YD+ojZ5^^(Pq4)6|RER3UWea@+qQ?>*q7 ztg^rH`%KSFdasa#gx;%2O9Ch;C}LULs;qtOt0Isr>Xh+@IE7Nl4p5Snxd z3F&Pny%*9m&;NVwok=5;uVnHOoeS@7XgE$7R$As226@EGnA57gAj`@EZ;M$Xg{E z_+j79ya>MnTQ>N%FssOl!RkRs`nhWn1V!rTfK*19CMdTkA1i-Vr*?Yf#3{?AdYw$q zUE}rZ4OZ(7!$1FgILCP*j;Ob?qwHh#99{j?IF=&$bYdj^!)&_Ka4vmgNXW+0RjW#6 zFG9Y=T2YjrZ>=oM$4K#@7Sy*QWK;)YuPCw?H6wIHsGp~oUnqo&;q*jQv`N+d1Yeg1 zZBa={VV14W`qL1dHk0L1JP4k|9oLFeti_P_fpv6 z#YitHG_eMsMobH}sy#LJAZAp|tFy+AQ@1rMnu(Zkll&+mdS!=BgTGG5H{(h6kTg;n~<+*IL(z2+^DCCLHd|F~mHVtk zkoCYt=3vd@`6In0O-;QvtYKb->vIp@&QbLwJiUO}-9wd+;z_=IPhDyhjWUWrszOPPwSKQo;;EoXd^G55^Dd@S2})E2$nNb|8lm1O*U zi@&#T%P)s~pas)!{+PbL;zozAlI9u83^%d&%vs%5QG%py#brqBmzPWPjJCO^2@{!f z=9F|RRj;8{N1d&@l0m;VP^(U5 z@>b;_Ka6d_R#jGN6;CK^Jm3Qy5KJ@ZK&_%Z(RJvn>(N&f50q#A{+++FE38>*#||F& z;@cxf_W!mc_4q-Ei7ANZ8-@@7P%)+K{PEP${mIGJvVHrKxBvLr$6L1H2O0kD&29hK_Y zm-S>uWeFR*^5*%y_M=yU_LQ-7j&t=o2*2S&Am^Er(EZgk96Gdb*UwuvZ#_saIB5{= z_io$r@n=77Pu{n$%!)9uM^F8@GX?zt+It>Oc=GYP|Fn4SyoJlIyX~`@1enOZ&o-zZ zy>sEs3G6#;;>T-pp-W7*SQbjlrRmtu|0q3q_af-c-v-h5u;1Osr9F249k<+c+ue_# zU)Hh?BGj|Wv$_}F`|OjGXTFWc!u5!>$|nIL#{}hm?MLr_@Xe;R3HWxQa-%8?7U~OO zvppBIU9DaB_6M)7e~~sru}Y{^wgmm4OZxio!MEPt;OL>X5w%#Bs~g+xMa9T7cqj$o zZ<<;>{Ip2CRa*-w*AgVDyuxFKn+(iu&&o#>%+^Mumco&)jX1LJ_k%k>fA_sFH~)~5 zlApb6_vV-W_4%g5dIZVZgBFI&pMUepv8<#r1;N5|4PvR zPY(KX-lrVgr%vtuZhP|nV`t7ZR3Xb!UeUp$siZjq?D6~XiHXRo^X>2eUmvf~@US2+Qv&Dr_3PKa^6p2UZhHTX z7ssV7!c?|ESqTgG$EA0kTL`mzll`du$0TV^SiEwh@`(PcAAdiw``hoIi&w5wmV33` z``ZCqN9(}@yS9Gw%{F3pXfzVA9l@XOiF@vyJ@cBUu`7^v1Si0dFiA>Z*{iqqLNBJUCy~(d=PVEB2_72+GuUt}xqH zZce%jsM9dYB%yd2;j5*6-c#7PDiKm-`=5k+qc=@&{z?CgP7m`qx~p+2bN3Ey|W0O1NWc@7AUja)5g8{#`~K- z{pj6S&`E1q)FO}>3Y;T{2Qt8=;o>_?Dvyo-8 zy-;>x1aRZa_3U}=Q}^8d(3-~**eB?U5-E5Gx<>c(H8W@5d(RV&PFrgZB8nqKmWLaZ zA*04ai_+0rlwE-{8qWWUy&m$$xi*A285xC$M2HpKK@phOMj$Lyn?7V@Y+xATNE8;U zr10tELWZB)@$tTW3i1Emy_>&HXCVQjCx@$4<+d)no3Cf|hya2W{Qm%Y<8$=JN9c_Q z&>Ih9_dK(|cA~hXydq^^Mr9=>X+!efoTJHmtUO;=Z>$nCv7?Wb`G?4xn%kqi=p`UnlZ6)`ho zyvI%It%wnOR^znz)-La+dXguq>WcY(wnsrT2HTJ6?R= zrEz0?HTv3W5;W}Rs%!Nc-!XBQR*Un7|9d&r)L)SNU*J^p|Gk`A)h)>W&v5GG|7}j4 zh&lCQ%&7^OQzv3horpO#pyFJGwan(!9hFoSW>pkphV~N@P(KZlq=TULu?})jFIAG1 zI&~r2t>i$;`Gn1t{e(!=Pp8Dz3&Cir=5@^HtGm@p(ws93m8EPdOHuA+hkL|{^Hog& zS*LLZUEowW<)&FH({c$@Au{z-I|Wo$=^$b_l}|c1zfhwdBi&C38vRsIV$Jz~iazC_ z4`LrUlyetEpQ+%V>m56!ncyG3Cxw8C!?x;5YkOlE4lZyaSCoD-J1spMAJ99A@U7I; z<>b7e%gHHg#d|pjhcbV8@7rw2*xFcGTE2hYs#~vFG&gembqVY_Xt0hmSOmfe^0KAU zENM7g6W_#{@cY;c?1VNl!*Us16_4Wm=lITyz0YpQV6oCHcA3m76QC4pW3Oo*y6^74 zKDroNhmXLJpR$kH5s!tT5i^!9`Rikk-FL@bGg+JjBS*8rycm9JS73Yffbt9khnHSW zzPweEAM+HPH~$Qo{t9KJ9D(I5I|q4A^G>GcAyzy>16!*b>YAIX zZB~9xpoiCm&j>^fehwjqqDO=vf|^~9@bdODr53asbrHiu{rm#FJUr}8EuN}U8IAj5e7*zUKZGM36MRp8XWQg> zX#`s<1w69|JIznfH=nTgv=2UT-(T-vxC|=FCOCX$V&8cH2RGy@`3Y2)U$6`_b;A;7 zR$9@MS?mY)4diB-y&^sOU;>^j!vTK+YsG!D^x&hH;7Dg3jDkLaR{95)#XF+d2G%R8 zt1O~kKwq3We)90q)SO&WLnj&#jjlE~4+dRp-EhgvEp5ROxf}sAih$*DBZETKYMqCN z$;Zzxdg2)008R1Xy&lvRXukWAY}14>lP?;9_^Bp?VZ=p~$Baixj3e$2Nnr`b!Ce>! z_n|MAU>q#LJQ`hWD<~`}L3fnYwNy7EC~R@D;E(L={NiJ&`6ckWHzS8K;;n?BD>^$P zBmFh#cf7+@;~yDGU&@|JRauAM>^<*&`1wz}vW{;~z`iX(nc$rOUGV$2T{YRerB0>u z2}HCPvugc%9Q%f&Tf*UaIU5{$J>TTbuq$vd{7v&E%KLJu{WpA2aP)RNe!LzJ%~nRq zIwkPIKMgN^C;@%-DSI(^^_puhzjXPZZW{66Z2zZMUUkj2S1*ek9ua}y5oS7oTaT`~ z+HwOtFBUQdN|58QN-spmv`9v{1Z3kJ?rLm7vvJ7bg}9!T_^krZ{Empm3+3p-qLSj& zf|AUPGsg}d+Pm)vvWX&CYgSG!Ok}dkZJDPL{=%$=PGhSjWb0Oh-+t}Y_r5~(HduIW z+rIs)_g;Ms-))tnM~)gdHa2$LxN#R>nlNTUq^>kGE@8BP=v}L>m^)?quwhZL>`GCN zWC=kO_$gzeV|^M=?u!VPLL&AZsq&1C9y29e5~GRYU2j4Uet{mm8$EcxkS;J~<>wcb zoysVJ(*dkO!@pPhKyQK0%f5^2SOWyV`4Tq=Q!u2 zx4}8^k}Q6k?zncEe@Bzf7hI)RDax>iE=flM|lEL+x>!ddGbJByXOPU}pU@H^Z%4?(r z(kNC0p>RD$6Dd&OE)FH?I6$sI32RW&Y$*9t?R(g_NuwsZJ+kD|rI#;>4}eaEZ{hN*LT+a_S>*0>gD4VNfuRHxag#lYec2p)HWH}93! zZdi;X2rt-7F*rqhEv6ZoN%Bmbnq$~kRoEJ;Y#8^>>9?fVN>gE8 z&ROTwY|TqMc^a0|IoLE{La-p*BLaoej36cT%*~126Lyjttlk{cMAFQWenEfv^X0QA zO$`r=jKM3wgoC)5S3kVQ%QSNKjLKHu$ndGtC&BrC>_v;>MvaS5S<~jlBZP(-v0zgU zS9-)mjh-AP2Na*o$j@mEm^lx6&~n?n30>Lw87GTnW7y=;Q8C^PCsL63m1}TUfg=;a zk?X;ct3ZHN;K<4SY&CAOEO3tJ1=mFaYe zzAzioXrQIu;FatX3Q1G(yW(W-TnT6&Jx)2G%Ap?I+X{t>mr<$lG#Og54j#$N`u+1)Ufl5Z zkNcLw;rk#CE=l(*V|1UEJf&#OpAl@d3$utgtc>ZU?wAy$Yt9D)fsJMc>zXrgdf8V; zvX;G-07cr<%9Fb1H-7Qq)*a?}L@W#M_;~UA&v1(>U}c>=lJ$H%=~57vAmkDL-b#C_h)NZX}aVD@y@!`>HQG5x`kW^hdvTqDOgL z^qxb|Nq$~8Uo*+Awy~_BnM{2sPmICG*WWKBECgn0;lslG{X0r(>T5h_E%Ng7Lx452 zZ=YjP;*<6cUVM88B1L0%=zmVC1I+^9 z0v&uYf_%YMdYi4LCO;bn>sm?O)@szt`p{o&ekt{c>#7}d_VbeyZ;wNEhTYjZj<%n;uB_19s}EUZKIpM)7~ZaB+mTsQK;AH z$>)|)TAB_Uqddw4uSX#DuGUVt+||W-{u&W)-Hn)~`WxDgPq-@P3^<@6PPt#T_YxeZ z-l|rv#c!xyWx8%VT>Mlgud7BV^1S@oQW(L&7+l|`H{x_tqsVUKr_7ojF)3~`a*XP` zS~?|lS*IiZqLY@+=!g{cMpIm9g_Z0&P1BBa_BA5K1wOop;V=W=6xfPz1hr1%jf5mI zmUTkQGhi7dc#@Zqj@Tts#-ayNMiP}#WD{lh`VUb?PC*VYV^>`=`vPU$5qt;ASdTJn z1)>ap@1e@T78co;)@dp#w!Zx97{rWf>uz`ynj+2I|J|D#ioG3l>Oq>#iN{eh&*@e7Po$eee_4N-o`S42V zv*N*_g?)KWxR>PO>(+>G)ZbXB6N838y| z=&yL?GWLn;HFO*H45N`+ZojNT_a(4H%H1pp^=`lq&~q4-*=TKnwd|}79h#^zvKF+~ z+b>w}kB$|eMa|^lJ0=p5%_G&!2d(vwR2lS4ujZa=dYCdax1MV$Fl%7rtSA=dUx&1m z1>1l4=18%9%*1Jap}+hzffZO1J?6{}Qf#|+?XoFBb2Bs7vP3VH&I5@7VHMckW-p40 zD3%kE&k#}1BM}iT$QKdv*TTuRwY9merOu4N`_LL$bXrkyA~KS~o{;+>a84lW@EBkj z4J>^lB{nQl1)ZOcF0gScPEIlz$yq^yFA@HX$Zmp^Ca}|yx{!0Ev#kXov)xoi*s;Tn z4}Zu5gDixq&SoginAxD#d#PAkeN($$ry-9ytwsl9GW;d4b3+;2@lUNa!06fA)63f@ z(hLu>L=++@xB|G%M&AtwZYw~+OTjOrN^2XcaiWZ2QktIwcY@NIhPslPMhXsZD=fp; zuv4q3M&DR;YAvFuy6Y1|kPeWwz#^!<9k$L4Nmz6`qX%$S+U3{PR?Pyq$IA4ly?=gq z$DTLI<^X@v8++u}^=9+iDV^bS?@54~3xB$M=4q_^-ZIN>1{e@%&}>BS+VsK;`de48 zzIl>imj2|6hgv7y_}HlYvu9uylwVR&mS0+d{3ZDrXVMEYQqwXsGmDDz@HJFSIhkbY zu0xI%*4)&V7=oNS%nb%aK+m0)O>8Th8(R=MLnn`d1mb*+UNAROTX{OY$)~H#)~F5{ zKf=p50B#L|m?(!?UI?i=^q%Ft!&NAz-J0_K!7>s%I_wOY3gLXjJ=+q#Ve1)lxF>E{+_+A zJRm)js4ghY#1p!9ruGhUi{$TknPks512tZRW;yLCBO$hNUjVw}m0bv$z$2tMKQB8w zD=QN#d=hv4SnGurkd>fTr_*YXH@IEK24#b`Lz?v9<4?q)R~BoP)Yo2kK2CWS`b;O# z`t=)9sv0Ifv3m6rae>~)H*g@$S8=@CRNhwEEvM_0_7g!7I-w9E#qZ0`0 zVp$@y({9xc)ELG-WZCw7Ni?-NnkP03`k*UZ%7~fhbg>w{M?g+Qh4ju;9w_l&f`?| zT?L0$3_;;|Xz0F{=FfK)az2IO%=lkX*l6^I$KwA0g*D(|4gbSD(r@Nzt*}VGF@0ws zT9y1GHCU0leXKB-L;BTXCsk08oMb@8F)XB5YV=Lb$cWU`T=AS<$F#JjXm!5c`16gS zZ5KU^U6;3aq>z6pJ9Id32*+sZ&JInFl@(^t$pbu<4HCSIsmuguIn>GuRVlprWmca6 zg|mlpy{v{cvu}d(7nHD96@~4F%sdiszEsbCklm=LjQp}fUX;{|6lq8vjb!3TLyD0u ztNB-aT$9BTt!{LlOBPGBF|MO90T$aTN(x-vsaRD9(`k)rTkJ>8oM zS7|pmKT{J_ud;`dR4=hdlJM@nB-JZ;rz28DZ|XwWbse=U=2Hc4J^8mAyPl=5Mtq8S zM>bPK;Scdj5-u;%1@G=-uO~%OlM8W7;_A+E#+h@Hs_!L=2;*b&3UGBNg7t&pAH}GB z3iKln%|}7Mr$E1_K)(fqgLq+P%!h|rKJ+%w?O+(^!Dt0W?Dz^MyYvjb7oWx4PVXfk zyCDF6XK>7fLI>PLVFlDGhHqpO`M2}`xb*w90n^k}Y~{+;v;T7Ey*JJbwIAI0?!P`g zGHKF@Q^nz7g-B><7&Rs{ZPKJu_#9ejY;HBuXW3cXyYFo;RfjLV^Nu?&k6^{y-;+%v z@^ho6oH{inHYz(`HpS*=^G_q9a*+}VliFGYqg7ERPq%h!+Non1)$O`r6DCd?>0epx z7hq~?G-3JatgZF&5ucmfkTTj{U+dvTRibSQn>Z=jOWl}@P$cJ>umSeOt00CMPq64F}4i3!Izz9I?2;t|LENuLF2iJlVa zwGl;xu)sDE-^eBhVX+lP44A&377%}k7uHc&Q^Q4z+{QhztRl@2Y#_y&inF9DKGCe0Dwf?0WDSoLQYO!DoVxT=NyN6^!se^`xP= zC>mTeG}kx_8;VIrI+$c=rhp*A(Hn#ZdNo_{0QKu&ea@TB37^!rPy2p?=dSZ5dU&WF zr4EI2B|3D_a(Vx)ME_lj{<{JFw-Wug68$#@y_ZkyOA|pJrUm5fL1^K!QmB6Oa&wD{ zt>ogJnVpRWB$CIUU6I3xMKRo1YbVpW84TEo8gMuR5dhiGWb;To17BpE-ZGpYkYBgL z>Mpi{F`-X+O-@ku5>6K4psC+P+Aa657GmsowmFop;pO}#`!Su?;v*Ghjzq%=F*n(T*nFYej zBQp7`QV$|a2FDZevkWUPYT5qLi{^iQ7pJT=eE5R-jc+_dXMsqj6v^> zLGJ}{>^bM*ofUnCZUm+l^0$>F5I{i=Qzj)Pf)sdR7YsEUPGFR^f8-HV#~q-!-*SE~ zNCc|!>M&cOA9WxL^5%L6UP9qqNCWBNSE`O>MmA56zgy4Uk1Hj`A` zPhumF$MKOG78NNx9)p<*a=VeY9s;V$2tSh2%94_NYeBNv4f(58DxC_+aba>(m4I`f zhonnY*UvhXV9$i_n7530-EsC7RpwoH#Ysz4ciokVPcqw}a@(64r#z|6stA5^#vSqo z&GZL+($iskuTD?*c|bPO32qid-lAeuf(SWCYHCMvbECJn2M(aU0(}D_dwJ(ZJ-01s*d@}*J?4;Bt{SNnd%LCsVhtz>cbEtWX;wVCgM>7C5B zB18d#YNxS!m2V4-g!C9$SKv753WRdPJK0cBQD0qav$9hbbt|Jhx~;OoJ2Wa{xW6Bp z((|3*yJ+A=)^@XiS2XyJ`)E4wf(=AQX4ctm^eCZfXD3c>dt)gbt-J(bwI6|z?o6DE z|C3_H^+#H}a^)9atXzq-CcV*@d|P17k7N2BntLvv`oQ^3(SG#NHP?Lf5h9<-UXDgp zRaTXimXvq5O%KZyLaJ5vax@Vk)q{fqL%Lhq^91h)vQWAS?OlQPPDOjCVw?r#ALP`LR<4A?zool} zZIa@ifDo2BY@>Ab<>Vr{MSb(-U#Dy?|9MuhOd)ya*}@k{a^pGNJO@q zag~E?R-#>v)mB?mSz20I3Hp^)Rf2+^FzEq*bVB1m33xj!L`O2e^7kWJ4)gOL4riR8 zurSawI6Mp#bp^pGn+3{NAv!Ugn`D8qS)l9!>`ZAAE5N){bCYfusnH99R? z6`etyRW~s#k1b_IS;r3?{AKjCX`@5D z4V^75u$KxuvCANxtF5c=H2H>&8b9{pW667tWoK4eS|Ml97+lykR%yg?v z`{BKJ-q^d#J9g==HI^emmSCR%e#yoUZ&*F-h- z!)f^q*oQO~rQ7Z4MNN2nt|;yB&-e)sESWWKxCgYC?!(5aRAYy^V~j1CF$M1>)3C9# zR|4Vy9{lORnIhU$oU^4L#K)aS(m7-Vwq9c|=GCCbiGRV**hA4an44mNU9DCaxqw~y zZ{TfguxPi1C-DL18DLc_$qmHIMc9kjY$YfGi!A+MwPHlzp`2{mq~Qf+5n_Z8ye%i& zit{zu17ZYN5Nt;rC?#@+MM7dF(GwS9bK-M~DThNrusNkOh6({@gh@DdM1Dukvi9fb zXNt~Qz%PKPK}$M>VJ=l*V}tz-qFMP-7FrLeM(`V=SK-1Hd|qDofNYk2 z{{DU>mv~PD1olM!{(&d~y8`;b&krDY$lU}PhBgbHcu#?$5QW0sz-Z9BHXNMfw__44sUTHBcJZ}^-@-o=rQRp04dY=W3xgv_fI zwbiIy*3OwVW7eE`vdPD8_t^s_#l0iQ*5G0ML-HuqefQoT&vr_XQcLVNFLI!BGthY$ zW+6z|(u0%<%!KWHUO{#)PKso$wY0p{S}2>SAF(aj7M%LyP*^2U@6Xp03Wt`_@ z-DkGeVw`dnkRhlBO>4Ky`cCQNqWDf&bE*uw@8gu)rR_-IJk{{v67N;)28)@EClkSK z_&;9OTMI$h5+tTGYd}?<3W4Ul1N?jfSZwzuujJ%Sf^1@1cLj|G_ws>HB?N7VX7>c+JW3z(PUw6Q()>c~j+izuM zCr%u}yX>^%d-m)}J$brRw#4jcxQN9Ji=8lbWN6^<@Uc@SPyOkanX~6ijVfZ}r_Ks) z&G}}_<`g`)&%BFycm#MWr8c-uR?yM4uWwXTpvO-?6?FwKUAi=kmHdR~B8_G2WNtxr zUQvZD_3){qzaN=9bN{}B`}TyjA3uDsSUdizYp$P<=fW=z$H_)RlXh5iKtKRuWCR5@ zAyaKt`;sN$nrb+sHmO3F$fL2F%*-wD3UpVgb$%gUqozkkkDfGX%=mG$MuiNEjU5#` zBFw{N6iw~~jgX(bvD3?llLBiN9BSL!F`_UXw03GSx>ZVxT^@~ZFw&ZetyL8z`Nd6b z#TB&`*?Cpfbdz6-UcKR#Z($ADm z_GabyarNre796iyaH@;U>lI~X%97wfWm^fv77O{v;?KhEc){Ixgl1b{l0Z4h>=qP|!YV80v7jIe@Wp-{_Qn{2-8d(CI0~i;2w$_KV5%U=12Urr zrba&si3EK6{K)a0iqUXcB;s&HcJE7rF=OCGat+-j9UVL3bWx=a%FoQT7UmQ}n5Cu^ zWWi3k6k=?$POq}VVnj!h7({P`8-T|ffDoN-!iiH;LKp%VGruJKilWzgu3!7o=HIQI z6R&@K%`=bQFiBQV%E>?guLPIjA6ZMYEZ|4_vl=44Prf7HXWDal>5|2ryl=|`jiXqcv=;4CC;Ki zK(=1@=oJx??T44B$Abz>t-1L&IyNO57U1Z|nhE7ZvaZdJ%CCfNS6?t*5g6z1)%jx(0V+0c|21EKql))Wum+Pc_c0> zEllpAuhBtYz1og4{hZ|RS-cf?0+655x^n2hm1xC8QZ#4?0gz}x_Lzo{4A$oEZRF2 zG)w>uV?o1MrtmA$*^$B*_`0s)7Wnu1|XT%hYP^v>P8=lcWml%8;M;T)q0P#bHwUuJ<_$maZjYHO0&9d3CvN z&fN(t2~6~o?jN^K3F_GYE^QD2*%|Sa{5&6;x|e?}k%p~+`!HuV)uUI0uSGC(DJDe* zV)?Kwh4o`$PF5DMECFKB@<5%U!9cU9+hDszyCPrz0L0SsamN{vw@+Z?`45TJY$WIr zi`6Waso6--gYwpsS1DwYlG41ha0tuJ$}1*cqa^qCb{*C(1w7m?>EtBo*T29gNU8vc|Ji|#m2Iodfj`vN1mGeY91bX=9$Dq z_Dn>gHR2g-BKKm&zA-DasJs9!XeCA2dAvTHkGC_Ogqc7GO8`I2oY;dI-F!p5P2K^4 zz9vsB1WE3FpBk!F*;tg(rUKMuBkf_5J2eJv!Yqvgc{xe{eM9RGwf$2#?L2`~s86Lu z8-Xx225m}*Q^@Moc<6!XbthkHy|X=mZBo*e#nQKr;~{Dc+Vspb&oCTK3n~^DW#y4u z9cYQ zKCh~#pt7W}q|jDbMq;FidWzb_JIZJJ^z`&Cb5@<2IR$=WQDe)I7hE<`KT(T#H)+25 z#$t(EsD#?wopSV_nx z>IZ5U?<^jsE!b!Xq*et`S7G1W_Yu)1in!>5@#ul^=*W|ZL8gMj4lI}qO{oRvffg3N z4uM56cjCH^sY!LRPF)=mQoT+*rbre9MfQ%wPS_&KjX}ATaefqN6$4tufL7i)h!2{R z4WE-7tU=IU?X~D}nQyp+SDrsCxNLKlbt_#NgL2XW!4>zkl{@ zi!}Rj3wvDlzK#83=CXTN9L~69WjVX$Qg$=)pRiRs7cbt42)8>a8Y=$F-Zn(kv=!U( z@@!ZfpxD}n+!9QS%DTENQdb+2#LI9I48%yqCd(gF85CVH=Rd>et#Gu*c`h1`_JpH7 z?nNaqCM$$w1Lf*-FxBR4|mzU7t(zYMiOK9UdZ-A6u$|oJFy%N zs&`{qh~bu>nMKEa2)L@&b|BF*#5_=4c0cmn3r6LIH;j$z-a+hJl)44r)T&EkXp;LJT4|H!U+z=07N`-Ag@A_$#k{s?z-P zWuS=^#nN(|xvrP|{BSknmgK&o?%`4MZ%W8ayWv(YcFmJEV%KuLR9s96e{oB4x8@f^ z0c0h90BPM?I#{a$_dKYm-Tk}*eEt0Whz&s5-amEsK8<|h=5D&eAfC2R&@L6Q9TwPl zX@M)pS$0p~{WjCQ{Jfv)_0I&tX3cU26ZUSTWzF&0WvnwEZZYdr*7gt!`!kq!o>{Yz z;Z&g;aZct!m~nA3AFvo(;G2wH7$MHd?1Ol&oXjzOf9v4~ikvjQH{O{!so6PcX$ARd z>BtjcR&{jHaKQdpCto66B>kP)q`$#)1-^VpXBW@fB@c5Pj>pyR+1YLu$qmV!|0a_& z>cw!!x^huGJiIcc3W@I9@L(lMeB_aZ7Pb(gFPgMQb}zM67uzZ;3koYM*irnf)zWas zZogf25Au%)@DB^~_6ZAxL2RGjx+zbR@R;5$NXtwu$jg8QRAv?!(ug@k+tH!r9Gw7f zs~42zZpLQwn{Qa;H({4(ypsL6V9%ZfbN26_1H`RtEq23o-iTra9Oq( zF^OzkwQA!pix>YQyH}Q07ZjDjda1Mo#>7U<935J1hv4t9pa^f@U|1&w1+r0nf9uAY zW((r)Zhk4IEh_e?`59TnFEkD@TnKa7eO`=WY&qv{Y)O0*@`3Tn9~ogwSYy}rto~!A z$>i2t=M%@?lvV@RMBuuL5uSvv?4C_~dTf0Q%SzZG;DfPe?k0B%9ga;%a3CAq z_cx9!&D0Zs>jdE1JyYjrrDOM%m79Au9if~vF>iQknHEMRi0BI~5p2z5GuwiE$=bBN zFK(==BK^kWnlmXceF)yY6Az!!&N^{o7Cn5?$&(k!(yl3S%;KT=UpAwv?6D_|!>^5F zcUxd#@d{>YslFapv#z?DZJj)MtLzP@By6$Z`c+$6r9b=&C1S4u~RG_)jlbJ&G?3 zZg?5pj0Qa%oncdp=BPDHWwB_f%_`|RanhJb(S4x^^#YmTGYB(8y+H@jqDvm@iXLqq zldrtz$!DHjbNk{5UE|3g-rWF+;92#$cXk%`AyuT4R7{p%{$zJivuWxz5B~k{e}DAW zOGf&4rGERmEKhMoLX%XLb#!m?u9Rar)y-1im|1c0@pGrdzznMoX`{MH%jV6SH7-=& z*;thNJ5*VR&sIpD0V5_|G*9*;K^=pIh>rjCf>DDIWQr04)zG3U!oTZz=JR_ z`9~aB^?@r!yCJ(u|2pDvGA|n*zIu9SfM?|%crkLzV#6z69n3AP@ScA+re&c=iU%i< zmFG-w0!0Y82%IonOE2_2e`&N@{6oJZgq)UmR7G=?1S3I+*YFUP$h8ltBGD?mB&D-U zeo;qlI~?|S0?>Gpt`H;<2dOdFaf)PtEs9NrZ4EN$b5L znyp6;?*8qB)etsocKqTg{h;Q}n>WI*Ivd@Sl3ndLVjfx1uH!htK?N^#3lUl^0S?kW z_S*`4EERH<(?~pcJJ~j9)PERG%o9-MfMOb^1^uv3!G9IZB2<>7q$H5Xf{Q{IyT#!b zpmIDU3aR(OoBT7jZ&w z?t;TIK-h4)w9uuFE=O>2%>fRY#^NE>Zh_@U(Z0;?x`=m(llyWVhg=J==01(9Be21^ zb+n-9Fsd;_fJTTKt$k`dpU)xDQeV^(WTb$M5|FFC9nAs`*S7*2JT z2}lQhdPay2^wC8^nTwID!^ky6V&Q{+EgXhejG0-8ycXMa3AqSH!!QTm;N*I6PIuG_ zz-|t3jLXU)ARTi-XfH^|TySOx$S6VGFoH}KEgdbWTRa3LETE`gnh6rxS-e+G6OcoV z%l~BN>YFVBry5k}Jk$wQD^?w<)zLH9PvH~s{J^6m5A|Trd^0!8fQ*v=A=G;iRzlGD^N!t#fJgl3+w9ZisD1*E^IvwR51dI3qDqBaAZ*_dbzUUuj2)s*>^=yRuX`T~$oT@&X9&m{0&))3=~$&5 zv>j@#b?hl<{Zj1^b=pLoO+(ZP_bcPGB(HVWUhcB?^PwGUX`lldbrZCs?g4fyf2{7iK=lWJIc4X%3#3|u9g}&wR?Gf? z?94UA7c4+eJUM#w$v+}Qt0$zUPUx+4xBy9d#;m=2XZ-@U)uf8+FZa5XtzFl0LGty0m0gIC-NW(ck2ub=Vyt#>o(I?3z_pG|Am2Llz@N|a zj(OsOMhxxj`|hoH?bzAFvf8n+8hEr2B<*>jp~QdiJ4O{ox`3o9Dw0pW$m})v`W7u_ zH5Gw4b{)gS)NsBGLQcp%_3?JDU7Luh5SYq+r=;!~jqv;%&x*XyhJ}mkYd++pAt{{^ zFNt&o!7kXz=MGp?#$zPy~b^;T+K=>$w|`8T!CM+srC!dWU^Nd99qQe3y47fRAM^QX&nx(q&|I9BuNxpj2m1@xawXl8fn zEzOLR)*)>Z2m`u-+G|i6Fg%gjq;I1KN(Q||bOQ#EONA92yG4veg^Gtf@Ll;tX zxvqn=Auo9^J#42-^?_|Ca_=q5Q9fR`~CnpUdPNBZzoMvl8R=}T_tu;7X#cUmm1d5Pc zSu2js!&nhAlC+DtM;F=iye_hPcISOY5-UPQLs+FcaK)G$0%{nZAgBxu6~&>jwdp(0 z=_7m3jl4COJ_*fCR?h1Z!@oDp`oi#)1Pnkh83cox&L3NSVFs^fDd@HYR9gVL#iP`C z?8rwb8V#~0;-%wPeqeXrnH{&|z?j1>+SR$8nuN@u4)3%iQ1&V3yaPNn$&W5h@w6l( z_5nZdBu@?ZSzVH=<2-I?Hu#a=e_g;3+=Fffmv*2_A+FJOsyEL$_f!8#uM&(?&gxl> z!3o_`R^QR{N1i`_#}-Y@sG`dmx?oJ0Stc${es-~o#8Y&r%K5Z=q&YF_yC>u=81Q*b zuklag*)jehT9VWSfpY|C3UGvUhR8`6^@gDRodY5YZy;qcEsJEs@vi%M(v|A*W8d>E zA696A09uPo?zn}+MjW?#_7UC6xr9n}^@lY^lHA+Hih99@F<<(mW*=CD#;REbUrT{2 z!MmaEz7lsUf|ai|OX)Cmxrg4w!Q$vgD#MN0=w?9fY(afz{Y1?kIk*ePko@U(+-MVP zR%uu&eWhnB`$6_?bt9tM;W1wpOSfniMK@2DNjHUs;5JaJ=`0(Ov|tF2#|qg2x{YE} z>E_0cSRjxS1qOl?dIJ8CSydn)r?cq<*@+O4xCOxOaWL2p_9Z=*#w>J;XOGiO2Sa1r zv}_`7uIuf9*%2NqMSMuyma+%wrW2nuu+U&(cjR@}q0m$U$Kn z*{Ae$J8PiZa`pt>JXjXpx)3gOuzK6!<$=da*;{m5?rbm5%Pz5HgTj`xe-Tg_T*Pp@ zoIORi2xg+2H{67K>OG$)$FN-dF6{zOUO{he0Z+a`fC0>lZbr5Qx4~%rZ+fhWCDAR8 zt)`m?)6)&!2)Gd}dDBeAc$f-)c6xaSBGo|=`3obIS#bsYovKRmVM<#d2Zo1+goIMe zhG4|)9}_(+I5-%7&!KYQ(gki6O-<*TyOe;aIX7IhB3jA)W#g3d*~rKu(;maTW9d}89gFgYACI!>eT2$fuGtFLWpXlg>ry}ZKG%KG~H znudm2IglI@f?T-4R zcT`dd0R&)W4JR+C$Z}!j`Bxdq5nvo{VNc1>Kh+~%qpb3w&y$4xjyp0tRrR~ zQ?-=vVx)>vS$#%_g5s6GviDFx7B5SBDoIv*X;GTwUo5MA+)$o0Dc^zDH7M~Glz2Z% zr09nfw`An*l>PhmA2{^e&TTt(rtIIpYgbCj!6S!%`(fKIc&=rhJ$?MxzJrI3qt@Fj zu~qPBE2}{lh|E`|B_$hLuj#?z4<0k+-psGSX$+FNP)Dz3Qe|wROkt{T(pl(;hCfm+gBOp|9xA>z_a6z(A-dBjZeSNRVeP zzF*BYv(1J_u9^blq$$@vMxLT^>^0rekch~Ue@0x_ee|+GP1W$;F~S%+Fc5xGfj(~FK`?f0%H9IS)4#B=vbq)yezoOgh4QG}%=EOhGl+j+ zD=J9OL%ekaK{%C~UtW@zvU}&xzaBVrF!kh#{fHwPJ7MC;;lo2ljT$v96h3%9Q>RUv zX3WcN2^urWYHet2gy~pgZLQVOv*eRtLeH9dttzUj&(Uj`D%hDgWgLPdoW|RBkOtul zFnZM!m$KV&3!TIlOd87rmQwMlqm@T>5Pw6h@V&HNS zaJdS&IPK0tl9TtO?Ay10|Ni9NyOV$UY1_7)yAK|KW9B{-{)8pC6}~J8V&ADKo#3hR zx^o!1ZHSc6CWn6T#mRPhPn<~k z^wUp|H~24Fw8+bvyk(ah&t792OgBy$J8ArgVXZ9ma^yC71t!=@hO1``uWxSCB=7;F zE7`ei;|JR}CBP;ATIE{J`kgy>zJ4i8b8l2G;(trvb^ny~MVcJi($d`2*x1m}fJikh zozNcJTkGl(Vzm)rqJt?3D=bA}2dD`R4Gr=(!Sh>>I2L+2G%YRl^yyP4Po7Lmx8-NS zTt748x1n1@^;9_E8Vnag^gbLG*r(VnfM9X;4a!GGmrljmiPTsu>BX!^Y zltagl9Xq`5x8HvK?bqF)<&y#xgp7n=7@-pAP07L?s4Tq5s{~&1#U(X$Zh_OTx$?50 zx+9-{^5H>$mS!>D(i{oY|ovOzJ-y6x`2 z-1Xq26d~hP!tOG`6>Ow0FP>RxksCbHdJ#n4zMsxrZ4} zpGK~jbYwR@eQM94WBU&rIDR5MEj{gQI{Tc%wKNMjxc29il%cN>g`=TO(30YXUJY6< z0xd7b>~}F{ztHa~!OV`G|K74?%a{M&w(Z~l-nwd2>xPgL3 zPd`wTeE-M&>H3Sq+;y_yx#WP0Mgg;R*MVlL{L0t;Wk+$WF z1I114a#%$vmhFOq3S_sfEvu}pswyo-{+p`irb?Dgko6T6#h@wXdXU!vxwim29U_YyP1D*BAqH~WjMRBuR?a2uLbJ*U7++HJ*$Z9I`M9fy^AvE@U zB~FR8*VqqUNFUpeTxcJw(Z~M*H{2k3u)XXJaKd|R6FbW)B{yl3G**gXEr?0pwv^hm#j3>F<7248ReAne*gwYg$f*S`$5 zf9vG4EA8J(F8V_))BReI_vSv~zqEUZ#ckgSfGP5ZeiQykdQzL3^>50q=dID^#< zGpqCT>?ey^nja{e)eS!E9gBs<;5^mC7dW292LryhyHS>2mV>uGcIJ+^iyUQ|1}M`4 z*|WRIeLY2b_bzfmPmyuXBKd4!j1)3@Z#kKtxCZ`&XiRiY{Eh`e$#{+)BnmxH!>t`? zIdpX{OE8EMj*Aj*a+VM%O4xS+C3uPwOriwQ9#rlrN~rHCflh&Spf#O7>p*L#Esp*t z2?-E<-oaR8z929;-MQ6DIZU#2=MT+r!Nu2g~iyN7yC3y&KIyerJr$%Of^TEaX-MVNQ&AtdE;;k>Q>cr*Op1ZR&U3pK&n*9WpVyj|qrY?ri{a)P(T zfZ~AtE)>NZ!3`I|5ze-_gdZc|7YKMl1@I07GPfRhSG0&eL9|$$L$ugDmS~|@h!%ly zPFlFEIeI(Cd;x2uDmRInffdjvJ*gKQQmA7Dp4W@vg&cm2;DW6}QC1@qWu?@oL%;{@ zqWbMDcu05;0e_Kz|3Sca3HWn8@UAGQ+ewt0Uq_Vt!ILQ0dWa~e z3LcVjeu8q1$Que=bGfof)hg6S?ri3em}>=mmw+JGFdr2DN>nQtdxSQ6H!>iIBPwXCFA}*mnl>4u*2uvAP#e?VbUD{0YNF%z(xcwh?b> zZ69C3tav-*3^-EE1LwpHc7jG85p*NnN>GHOEiP#hEZ}PeJRkXHRil9a?gHS43Hb2> zo|=M+D+PRR54+V30cA+`3m~eA`bZuhur3bblK)0 zfpfFK*(7kT5I9%&;Or9K{V?aB$lYv)MzX#Xh+M!(Za>1?5+vI4syIYAE)EeMr$dAxW&$4pKSIDC6!5JAKBNcU6&Ls( zAzH+6S{$8Dw5Z!gva9SWh16nHPv>2*f(Bdc|AZSrXNC;Yl6CxKt3!_*WB3KIb zFVB%y5vZYZ+I_Mvtv}(*W%Pk zg20bY9(2TTelKXT2+%nY-%~0AzD2-i6MUk8KHCHBst*j`P#-iNBfP&&CA>Sb3Gd)3 z&OV^*T>a1b64^JPDb^IM-5l%R1lC=m-lm>J~)pOEuJrM zzC%!Ko51;CPn(@J_m_Coy(FGMhs5K~C7znv{&k-x^Z9P$Ja!CWT_E#~g2cv89F1ZP zub^9BE<3LTqbNZmO7L)&;3G;X>?y$|{@aLh#uq_3=7IW&cJ4JqyPBFI@%Iz*;vs%y z3CD{#XaF!gyhvm+tExyG8UlZlC_xb=P*f9MLZB$YdI2RkXjs60_%CS~BuerIx zXb7p7)6j#{uy#Ni_8D=G9*kkf3IBRfU@grQeC1L|dJ+_L)qifh|0W-&x*GvY{dbP9 zUAnRT`|mu7XG{_2zo|mv`Ar<}7trxO&9&VUk2@r${^zJP-r0#bWipi@(QyTx!6K+f z9D9zy+Yp}xH-Co15YYmcrR;H*LZ@lGl$p*_T16>&1DDd9j_jbbln!Sp)uNO{QHH6f z3}QxT8{B%nd`tVUb-+IBckZog`puhJY;*^?HhWPYKvj0yeB0*2&-f zSMbUYXnvZ!3;iJ-i|4ab81=@Ssm|UYNT)u@&`YAJ{}75uXpRENgaY@ebT*IsXsTKY zm?MqtJF@!M-1s5Y9G*-yA5Nf}qeRVfoHY+j=W--o1xd$o(-+l#HlIHEbCVOOA?D!| z(b%HzpL?&;FeEF zYQ{>CCTHM+o>#0j~&nT@Sn~s&#%w+&-_Ks8;+0QBB47 zUfNJ6)%v$u*h(ovu80!KntWk-B+;kDA?%Ly0&)uQi3Ye|K}5iPFHvC~)=9N=e6;0!&T+0@WRJ!T#Ky3b#482V8pj4e#m3<)>i7O_ zq0)zapHsaWEws06P)5pn+=@07 z2YNM$eSAKO!6zMxz|9}vgU$Pu0=7oLdJ5P80h>1jY`uUTCtwMEROlgKP2I4rxUcz5 zqTZA=qTc>#)MthYqF(SNPU^XYpXsS~Cm87#w#4suqK^_*qo%7^M4vf=~@1>D4m4T_onI zfhllA*ndU}`_D#U|Jf<*KO;$K=QGB@u;~IeRKRW#u(bmAuS39A3)qnYHebMo2w2Z< zSXb0*_!m)c{4Yd3>r=!lJ|?1GP`7?#VCpqcwSs!%scJzzD^)M3=R+t6>ID%R-PAKi z_DF8d^@eL|F3triW@#H_r7J)!331uU2sTlezJC@r9!yGv&q5^1hX6k8pMW=e-@ z%}yJgLHfQy%&|rRIl0d!=}kTHD^IwO*p$paqKPX)sB4XRZaJ5?rd%EwU?$eZcly{#~8^+%z{=w37mIyx7St8P4CbMo62i$ zK#~Be*|U*qR(zcNMAn?WHK%U0`XcTJ1)0UH+QnCXEcpDB9a*X{JZ6Ep$=sV(;DO-_ zobb~HyiLGs1^mQb@U)9`uHbHvR}F}fP0AE25FqR?roDrdvj({14DJ+-Ynd%X{b)en zU1VR{>0n<7{4=;YKb-L?wktkIBT+eSReXvk{0FAIT|j#Y=n5zF7y)e^0=k}Cj9}Hz zMqS+gha2?=h%@+X=7z6b*Ih24#|h|m)Xky81@!4|Xji>n#e2Po_xjBaeyiiXZg+Fl z>wIKNR`lAvM4m7Np2jVgEvFl$9l*_m@XrG8-hI~V1Q;43Q!g!bsS*@xPrYxx=R)K93b8P8H%+xY5l8tH87 zzLckXactyb5d*EI50SA{ZO5(IwO|+2o)bFMQCoQz- zgWl^F$;*kbQ^i`vy=4^IdnMBTK9MY)`izghnv@2DBF7XCH&N`_6d@TL?uK=Rsg_Ij z1}+&$e9zX#B?Gr`C#KFBp>M5?qE?fr)mzlsDQYe4u61D8`av*0G6bxnwGN*mM{B(< zw6#Kyu|UW{ytP6OQgyf1NvHm+R_8$)$qQ1^qpaAmc1Wevr>e>QDb;u84Hd1uic>0% zZ8r;A{UmVyxx2kCdGWusx%zMIcVY^-?kNoEB*14|5jhCA5@GpdV!Gb@IAf%cJxON? z{Rbb4o?8IU%o5p#UuRhx&t8?@hr$E$9ty(!Ap6R}-!3UIGMi(dhfOA`?qK)P?U;~r zikZ!cflGK_VSf@N>`y)x_9tz^{$xy#{fSHX|JLSXd;`bdF+^?LZBCWPUZ!R!thqn_ zz4?ce(0?n3PesUfET9Xp1o9Qgz}H>)xxp*0XFopReCW4>mryS}HVZk^L8rj2&-A1c z6Yw6GmB@Rlw-1(Cwa?;mo6=96l#%?3XPF6;@_96UL4X_NxV|BFaOB8$8R&kVP=+@+&r$xlN z0P>RJ1myK6%?a*pq&mU9f^;Xi8%TMwuJ8V@?|j`MC{}`u06?LbOf6|A6F|2JHV3!f zJFvd6WYLVrn#7D%!>*@K4g)|#w?36HC*C2?J1ls#a|PyE9gywUfo)bu?||Z=^q##> z4lk0CFrTW2=`QYHpJiv9EueAOyWhH3b?FV_V(C{DL{oqSi&6FxA*22OSbGn^rpmT` z{G5~BjHYS2cj>0=y_=Q|A_yWbhKPt;UlkY9lT#ECMNv^u6p)SVA$yd)N-3o~ZPLA) z?j}ug{@0VV$m^@G-}nE1Nt3Re@jUna+~c~(Lxeyw<3}Kh|27L^YBTvZ)e7 zx|WgbK{f0DJKfywlKs^W<82Lc*|23!8x$bqC>t3(8?p5Hr*U=B|^{BQUDww&S4pn9n&!H0+k@+NSBZAl z>=7=bJHQaA63yTkA5tYseaX=kV2%AKICzm`;Vz<&4O}-=iJDz8RdpAKc_F^0aCun* zSdM7DaqY?x0($^?qRr+MX3&?0Wb8?o3>VQ)M_r}%f_wL~$vTvm{E&jK^(e4xb2LH; zGMUEx$+Q}Zj;x~|SPM~D3uFhUFsuc#gOmHUt5;IbpTBq!mpFX*1gSg2Rmi@n4Q9On zC2@@_)V`|D#+rtv`oSS5#Kx_JM7&aI}luCbw^*4)}^X9d<4FhMdZvxFXM zy+ABhyRT4Z<>X$!el<1y-hJ#Ho0U!NV@2w~z`&lqdJ*Q$5Dl;%=-4zrMDl>>{ zD!@Q!4@yA$lKV}dRwFc0<-8wE-M=_gqL6D+HvH6ylP6PUoNJbd#30&A1u77~N$#>3 zx=%>^6yjuvNjT4j9c0MaXx@wn3`I^w(wWNss2U+2O}Fe(5ZQyhG#u3^6`4omi{T{P z#!o=F{q)@A1V%~z)>w>uo==jg{Ruq&#^Z0U`y#udlKu+8pq^JKjU7lCN_zIj@jW-L z9Xk%Py~|=h8^J*e3?dsearc3Q*!2gHi-MEU_YmU%p;BlKqoWF`o3w9#9c84hU|I|; zJXQfhcS}>mK&r^b0iudZYkga(0G)rutju`RP?DZekcUct>Ty=^IMuJ83+0@F`*{NQ zLn@GGV4h~+exiE}?9+ef&_{<28Z;=zr>!8*P+3+(`}+5YkM9!`5)cv<7dU7@SPvLC z5qAdqcQ$u=_+ZBfJnC+rJ9oPd41f|!Z)s;shbrO@8Hp-(XuMXY9y?~VuAj2`<~kjA zCs^+7F72o^-8gpm=!vVpZQrr)@YVr=tA9ld)6+6bWvXoP3-<2y#G7xvIl)sPXl>KW zCEB3jkB{gZHF%b#Ya&!?tp`<}m7jg@cB!WS__uT@>f0_dH!6ordvP-LGfK72Qm3gc zNRQq|?V}E$tnVQ07NMgusNc{+bRv+MBblms%Wo2XiTGiy7TsU0q=fKtFTZZfCqg z1H9#O5q2fCpoZPQcTZ)r*-=%~+6>x945=x&`3J$;mSS5jjzwIn^Kf(cSf(UCR-yy=V6{Aze^u$^E z<19(widb+(PjH3jv2&;vh6p=#>iF@K$Bv%3d5uW3`K(?*if;8%a|3S5!vlLUYcFLx z1`P;Vv%MY_VflAXo_S=@HnK&GfR?tKolLA3n(yDfc2!5MrdHDq2dASpQELQ4JH`Gw zh@E{x-NO0LzMTll9RP?|MdM~cv7cGt{{8!lqn#3n6~zUKAd+<0cK%wb~JSl`^*Xe!Q0%P2thuh=D! z)Ox9JPl%|pvewqF*>GYSp?`QEh~^U0*(n2vw_%`865j)>pZw;ugJDVR$K=d~Mc}ko zF`vYKdARZ{8Bhu#3(c!JmwclbqF$VcAOtSenZUo+CE>794M$ADeJm}m>AEW+&iSXV zP82nsOF=iQ6lxb<61-yYa^At;embn{(~5^bO{8bHmDFu-(c>p^q&F7r83$7xl9=AT zme}l&n_?0EW8>)P9{B#S1peD|gbn@kITZ50IfolY`ET5e`yg{h?t=nne8JJXzUExB z_25ooB5+sTGexkW|LN>AJkjo${Z^viUHXM~SV18>nSHsrYxap2j>etEVTFWagGE+Mb8m-e0jzf(e%77Fn#iJ#{ztewY+CJZ}z@y9Q8ybM5C#yuUMgpRPpca{5H zyXHq;r*`Z(#nGmAWPmYmE_`1HL$sOhJ3t3%_6?@p#LDyL&(9+-COn+sl%<8mmE@(6 z_wb>zSZU$$Dl~sNXS?h3_TMQszzP_*}zbFb?Nmz-Ye8i&L=CX9Lnr@t%#BGRg#$p61u-Rslcbqtd#bn{_66?}~ z<8jP*%)ldY9v{46XH9vd!%?4mXiulbQr*;0UR-JM z>yZuFbB+y($?(VQqXXLdp@lRzbVy%g)E z_6^c1?HvbB@;)&G2lVbUX6j@es?%376X2kR6o!Wvl9y8H=@%Rv9IjRmNlHpeKuPcr zU1DMnB^UbBx(%B)ZTNZXH*3HCdhOS1)~s3mC4N}@4Zgnl=KFQufQT5HXI-5qd7U|P z_UyTHq`yHb9v4zCT)2SmXU=e8SFYKt_2!6hnN%W?N?-vnLi`|=;Y%h{DrBg?2+0Ty z%^)w02JJrr12Kc)y?XVEjfsqmjERpA)VOGdgt-U%)N3*STFk$Mtfkwx&Yt5QtRzlC z7SNE`ZHGBY_ln^}ruKK{$-{KiFW7!U(!D%530W4E#pSh}qzhJLvBY`YUjKlCdMGtjNtH*QSIVEw#cpqtMKST$ZD&c(PgG@0@^KjuRB-6cpnf z>kbAgtV$G}e!Iyfc^Qlh88dkO{VqhX?VS=z-F0<$e44T*@~=6H9kX#>JUD@(^3ogVI623-POT%S<`{BbhHot4u%jGgy9GPtJ;^cTl&*38z4i^yCG z3(pO2OUYXvc?;-Tx7|mO7>9`JUhBV;k8pTPfF+L~Z<4p3Y?m$!##MrGm1rD88u&ku z-GtY6?cDGqa>Bbf&LzJ&WjEJF`+fH-aIOc^+Q|%}+V*!|@^~rN-{S| zM4ZSq_=zMLgAs1TniaUZqv@y%^d45w-a%@b^aOcwQH% z1Xba%u}HTwfu)}M2tUCtgrP$UVQEU>D5u8ZZ}Ki$5gXypmL?-aP81T6k#_^%7&@4y z2nCSN0{{zM8Iai~2OX%fUFRT{6`}XPQI8l!AeX^OzWwCKM)HQAl8G%P!ZH6_>E3;> z1bOeaXK=6Ey6&}!+$+N%aO%3#Jm;OdA?W+yPLZ7La}!64ymi#C)LJ^XK}VgTc7RWQ zWl_pSS4ni#7kHx2jFiUSe~ef`Nm&$83;BT7)&NdMGcqYHa91KI>HMorDz&-zg79+; zW-%W37K(d|!o4Y4T3YHFn%eE14fRG7r?{MU`SR`T?Aw>moH%iYLnkG{dF}cQxI>|@ z)uKdqjSrd6s12+_TcXwasMS7N;o-wVZXDX8{6zgo52OcCH-&Qr%yVz(Xc5(hrU(hF zLK&e{dbG89D790kYB`ls>gK5l>Ok(2#v2=lvI#8{_eWy;zbQSo zxu$mQy2Gx9E(#mE$#j&Jb=XP+155vgFHq$V*5v>av$)9=mRuezAtXOydjGVpBPiFp z4#QE(mgchZ^g9<)Gc#}B!Ag;L>+8+G{IZ!-j_J4eRG+YodqR6{I)w^Cd2CKjEO}v% zN178AG`;bZIFPuNDMCwbk6~bD7R$Cvy?QW=2dAvBtZizlHMiAz`M05NRj#S6)sPpK z4A05RDgykJ?n$}<*g)_s^B95z@@{?T-mjm#i++SV|GWwib30E!ad09TB(96`7Lm8e zu5}R8MfJWIZ49W6UBD>K_3PJDFI~Nrk>1=?T6QPx`~{pv{mF_?KKXLx$}d0Z9y_P` z(a`VM@9PHnc#rP;+sWRcKkde4)O;GxV<%mP@16zDZOQ=k;uoj-v@~0>*X)WMe3U77C=qtu+?mV!K(IGqXAQJu#bQD+;E!V z{rmUnJ4kescE|JtS%VX?`}5sWFi^N!R>zOL?+l zA4vK8S$ERAIf8Hl;erRVq6``K+sST@AlyK>;K5wT9I82E9^weh0hoa>!Gqx|Yp&f! zTd}(4=DNmql>f^K6FeBSa>#wJ+1(65IDv4$gP}TQ+y9soj1O}{_ejGZ9_hWztosi} zx{Z}@*}8ShA0yp+#!K|5?#xXxTBug}VUg(YA0wSSLK*n*NTGM-k9c}M^2b2QCcwnz zDK0RULm|uUot^eS20EaB-+m7U8oA~}m$b{$rQ`x(YBIw4S zWj{?KNUu7NBMD(xEc|O^cHSayGrMI&D2^l^PSitlq&Rr!$l=SEj{UxQ(^_KkzRJqa zUAS8)G0wvrb|Z}}z(ZqVE4 zmGq{#A$P@(e#EtZjg^!3y**!ly(f8b%3(u?4vPtoijR+q>xROy_kCBD7ZZ22teUOhYlS+d>Hu2{sa4A+Tr_=BSaTC^+8xmgHF9St>&OnAI8zij5X3wM$R9XyAsxDa389(_lQ7%?CqK!ApWg?G^`F2iK%^c5XB zA|fv?NYGH-)Lvhaow4fM+(rfpLFN|~@#^zo*>x{JPWvQK-!aQkk{fd4#{JTUhK8!N zH1*)YgL{RbMPJ;&ft@$pXU=pdFD}g7P+#9rUt3#SRf8WI%=j{!Tbj*znK^}o?Xa13 zh(e)M!bbuG1ZND}Lj%6ScZH%YJ-v;*xG)X==Ap)C=?4*o7!mKxGDFbIll*qy74G&RWp7R?@aJQRFG3<|#Sb0Z>?)b7AfY#>jTZb* z9dUhK9dXOSnJ?l*K-9ZB;?}tWTz&r`!WiVLiT88HhpB81cU}ot8sGZ-Lq}XsS4Z4% z$UJ=7>I#uJxkBX8&hn(cj}JzMj#aqI(T%Q> zhf$;){V!*mLu1^D>oS-7jo>-2f4}Q*ls+KcrUK;PJ*2e zW+Y+%Y2=5&;7VstT#GB2xBox%#0`L7fXfJ}6!Oj2l}yzLBLKg}JM+^Cox*YLa7cfm zAqiR+4jQY__4Cf{+vozBaq+^1tG78-3>rTA`J+o8I?Cc3r-}{5j`2dVU~Hd#=gznK za;i5+jZU1h5J8E?#)V7Xdy5n171!B$RwB(?{d&B&?Ra1ON=c>lnG{#08Vr>cMm zgB+`CY($U0wgygBQc+RS)QJAHRaK4n#;M9Gstbz@x!6a&p%Jb8IMqFa!BAOn?b@~M z?5u)&_a3r|AJ%=uCWfwquY^>zV~{-rwuA&S;}2Q_ZaKM|cvyzY7MCEx~V1OO=^f(Xug(4jNzm+0p9K9zu5(UCrcz%)W}KeQDHtcN?!iO6Z!eDuJ1 zqd-&A=Qs>B4a0o6A3t&O?D_L&&Ky5^>-O!d7r<5Tu+EUA8X-w&2=xl}q0@JAi}G^{ zzTGn-I1HZ0Nu^wcdGsf!eKu=Mt%d)Mjd>aM6rMCJ!bK+h>CO2J55*Y7@-9 z18lNn%a$#>)~?vUUx!wOKJ?1lE7aI{9_F5q>g;SbqeG@M77FA9kr9E_YBsM>*Voo` zTJ20rV?$YKNkbh-lq`XlR?&gm(o^lOxqdhX-R-G%1q!eU2>ARM=-d^9J^=E+2s{Os zB0*>moDo52m!%Ym+3AJZ8G+e}=$@SusbH#W$4;C*d+OAg6C~XDHcMuO)=ThC@d|~4 zQ(wzxh7Ji|ySu5rrlK(8EVKsc*t>>WN3EbfW)lgGs4s;dFMN9T<3M&-3YUhK)YFK5 zQ0qjoQznjkX1TMWEtX{xnHoGNQ)^trq}Q%ULSxBy$dglxY>1Av+B+#BadsuJ`Gn{> zD|2Hd_~s@jP?p-<&{$bsZmu&s1Y)|e3AnAElIkTe2BZPxW?f_bTmJmm(S$2M#jC)d zU&CbtcSlg(1P*};&g$asL71t*n5oH_sX>@2?6yhymz5P73-Zts7pykg3$IKW zx)Vl%Ubb}UmMy>TO}&>>&&odgY}Hr$?ont9$_e-E9UlB*0<}SUKF57PzuxraT_lMm zb>~?ABjD{%^RxM7f*s3|$$~ij0)Bzyz4zXG@sSa+?jXf_@)UpY=%}FTsw&RkXsjr! zsxg(6m6w;57>zY1(iPNTFjm7f(Mv5Bbi@^+U#eavkw`?qb4DE^9Pm;)MIwj8LSwf$G_$o>IL)42X-#&UrR2m9 zV_ymPI5hf+y}u-(>*-_sWPY9?=TZ`?=_=3nH&!E8`a5MwyJTF-Ji zL>5@_qQ34L?U)asx&-B^MhmL%n9AGB##1|}Q`A|s0=)%+=z;ra zoRM}o=?wN#q(Dso?wmhD6B@PJ-9ziGQmEX$RZ1^kG`#ln^M=Lnw?{dt_zI-{)DwoS8wx+gD2n2yZAW-rm8nz2ZY)-pkvHG7fzAwQp#+ygqx+=PS7%uNo8hWu9L(7av}| zGG+CS^ZC-q$Zvkwc@eUNRoGkW>YMG*q$-WKLLbJ{0zv3(w6%Yxe;{MHapT6AF=OJz zHTQR*qwnq_n~kPZJVyGKr={P?YLH4AuU@&{5Hb3BA`&;C1^VSnCS_Q{Yf029%*c1t zC!z)87dWEzZxO`Ndq z_K6ceCm}!LRq|A&97wAV=-mFvsD6#&~jz#)^jyNZm`vcmYRHLeR)N@ zNG7Vw&Mhb`tt~O!$tZ8IiA2KACUaGlxuy}VqwSro%`t|G`5ZLe9mi1e%H+YSk``qIBBQy=izx!-$bfB=Q(cV^?ot{NNXK(9v7m)2>|4X|RiXP8>IGc$5+jCMy$q z#mt+`e8eu~;@2C;3@=36knb#5hT9(?t4#$bVPXQsJqlnLA;NtNeATT zUF8oqV92)%WMY|8i`-YRwrQJQumAo^RYz#jai6&4XD%s~q| zswfeW@#EANf(fBbzb5jp18jOu_*FwcQ;UwD#m^G_o-ut0iSB-_T>ji+vmc+(-`}IX zARF!#Gj801B&dxo?zU8cNUMIgs{ZY0{lo=t?Kv@A4|+T-_y~!3AK<5f85~=s!0UyG4fz zs7atdc9r+>rZH6PZ$-xH95Q|&=k(6Q)+Z$!@{t@q_H12zH{$e7oBaw@Vwo|9XG1 zJ!-Kth?DH3zYvK{4gq7%%PlG^FI>ce`rrYTVe;g}z{aA~AHW>jOd@odP8>T(+u3Be zc|P^r_7ysGLw{fJ>*=AvI`sJGgrTzI))iv05rJ<&w zxwW;esiCH#!XUylFrd21Vxgg-vC%n*DMAb^*x%3B%f~l3Ff7nFFf=?oB0SJns~z*7 zpBFhl@1oKIQ0Yai(iE)H1(3=MuuA(Fi}Le}N=iWxm~Oen#W1=H^PyG>a`Vb7OAPr1 zB>YVbaZ^c2aRopIHQi}}8bZn;ws^o2g-I)=g)%YVdNIOqA}MOroL&SzWD{`mrlL~W zz*)1N2(@OdA@1rEY>1`Wka>LT?tSMntKu>*@A+lxZB<+ddieY>XUt@j`mvHFix(C*7alV zrCo3HAN@W_Xxk=kYZr(1d*Xw!PSKtMH*ydC>Q4#$8}#`xNz_U_zoc0Ca|6741<(zq z+*MVvReE}9fw5|UwyNFXM#!kh|Lp~wtGU~^__taC`giyiPAn~lv?xku zdQ;DlZ#P`9*I??UF3+O-XcAi>NRzA1HWf#|Pd;xYAgtbn2Hrzb7f`3iF!7&s9Zl?w z?kIV;y`d+-p8)KHBxLFqCyz%2NP}?&`4)y0Tzt!Q zHT>{%R(=v0_8hL9g4MAO_x&91`#G$R0ol2^c?AX|dE`J;H&2r8y5|1s+vqtKL&Avh3PsDcS`Yx8tNo9cVi z6=HFPp+-+Dg7nxvgMOkY1=%drhZHAS`be~<$yi*F?GLYp8N=Y5PjfB*9v=AnzH3yE5)>0c|Dr8>vfI}$9n4flmS&0 zmF1=Ir17Y%tgfys1t4ImD2MhaE-oprsI01lt5#$x#!54rE6a;a)o7$6kfAtM?*~PL z*=K|>C`CdU-QI4)emYoT4xVK|l~0+LhR&1$e#Gncgmr^tcwc`%AFUQ^$H!acuGVNg zv1ySXQsmv;J&1cA8tUuq?%_}M{a;<-KM|6Ls)*^r-x1BHJuZ^>;>YM;&^5<{?nG^` z0`Jb1I08I513WngJUIe9IRZQxR8U@3wEyI(6Tf5Epi?K09^JG1;L%e^4?A`2&;jae zmX?Z~;SJz2lo7slE9BlPT+Mc9bL{!NXprj+!|Yo+h?DBw)tv)jW2>x9J8deb~5}a z6EtKR8Xg7Feki7FhtK^ZJ%4@@IAc1sm-DNCS~C65e^ zR;47lpcE=uJbQGc15gha)Y(|yf!%)sf)Kd&(1e7<#s&Czf$#l-!y~XMJ>Ajil!_v_ zQFMwM@neKA7YU>iq-bfU?QXU@;4rP;16o?#RD5%JQK6xrq`^Y9<4`#(f!emarUwxx zt<>6F4bQ*J7D~XJ&%&HPi8)WeoF`z;!wgvWL&VlOA2LV9C>u&z>=2 zNVre?o+I0~Unzh%6xi%;N{+gl$S>!U`Q?Hv>gBhRAeUC)rDv82g?iD*mno^(&WXl^ z&=zY`B2?XO>Zs_|3-A2WJ4TZdR)U2_)?8(Esi~;QP-rN^^c6wSl~$HlVD~S8 zAy`F#gOo^462pD$w8L$*SW>VP3B#-i^TaYDE6Uej>*4OMR4Luv@j?(#>q~GhFDmN4 z9`EG*Bv4@?sPG)9kOV3ufePV-0>(V-Zbwp=ydY^SZ4c}b zUtK{G+n-^ik=}_YOL&cc11sSJ6xrBpR=vATpq4YV)X(qdpMO4FrW%_-CDY57v%+SY zZa`jLeLcxp*}&Al^u~Dji+f=^?xyy@emw2=LiEVDCVO>SS`?4#s86Y%nEHHkBDD)O zN9(XIQdlq$<s8Oj02&B6+aT%p~&Y-{NlhlH&ksBI8*$bb~QFd`h{<5!lNfblhz zu}oQ?Hxh|K3x=5+$-<=P=vMOQ0nm`HKR?@p>uoYsRS`oGG6u?y5WA{^Sdq@Zfzinq zK_#om-${ELFM`*&=Z3T$`^kolQ>A|K4t4j?AjvML15`UfZpJ4e=#+|g^_#rr0kpT%e5 zE8_@8jORhxn|_`aBibSoSOPpFN9s5oewEVC%pHrOq#CN0xEQ~GhZ z!3C~Z3;%&?;|*hp-Da=8b?Umg-_%}Q4=i6JESSo2;&v4pVa=37T~$^iI0?E|*VI&j ztegjr`4RBIM{H#6TC51lNE|@wfRr*^4=i9c*;&!s6Xw234THrSD$>{Ahiua6=Z{U# zK+AtPiIbLbpyfEwawKRO2U^B~mOh5E(y~GW+!&Z(hWxyILjj>KbsCVC9p!~$g$GcD z6v_@0HCAvlH4}Y!mowZq>n1z5Hd_c}Y$pFE%#0`)8PoXbOnp5TqBkpcI8MYZ`*@iS zlJOe`__Lnr$UXY0V)2=4M?9WLH1E7&@SI2Rmamv)ufMIMZc(?G&`_+Rl`Jbvy2GV# zNxZk?dqfC6q;BdNagU@=eM<5R@{3F0zQR&R_@%6*l*})R3s{-J4iVDQ0B{LT@25VV za)n&u>8@~7DAk@`-d>(sGKo~ge|*w8FRL&wyD%?XF)ynyFRL&wW8l*j6Hgh62gWCH zp`aXSjqM4H?H2wE@|cDjz(j4+KthpN6Sl(B;k1H z)9rAfc=U-ACrf=IXCzRZ|3^}~Qywv5WN$o%ju{-`*4#?H_4b4q-;$G@=k#f>ytDZE z1@mSlP3RZXr+2T2{v*Z^EWMB_r|PM#J)W7PKKk3QZF48}syeW9({0KpVeb5S3txYB z@RZ&`p-2menfT^YW8ti4QUz2Qb%I_BcKC#qj2ky@NNjYh7pL&;6W?q2I3&kg`z~@h zI*R2Bdw1rcmSa|A<9WDP?73LG^3_B3+eWkFf%!CvjY%D2| zxXEQgE&|>Mg3_=w0cyd6mq>s}!A=zm5l**K0K$mDs#ie{5?G9~!b)dQ$~6Z5yNWpX zP{$q~@bf)AA-8>e{UNQ@3O6^U#={eP?@HZO?r>9(G^JI0sN_VhqfQ1mfUnTc%YzF4 ze{d(xH837)U=G&6Y^;IqthXM<{DLB&LEvhb)v&z?qZ{&J2V(^qOz??|U=OPa;0RTX zX@J)&gm_;e0oF8a6m2`y>5A8=~$SwU%(i~Q#dm^IzpqD#m;)d(&RsJ;>1*zO63&wkM}U1 zJ9GZzl`@ZBQ%Q~E&#I;ddB0KP7Jjhw?KhuJ7?i6+G3r)93*u(1Z0_87z09V@v|l*v zu8ZeV;ZN7Emv$md%QTwq-Gw$oUEy4=hq0^_J0-)W0r^31BjJLQGMK{|#a3+q6MaufTw?ZUGaLI+h-4FRcd&@DkF2R%${)Pwc7;>&w&A9sZ^VjN^Yg zi_7xz0%iO`nGjIM3(|?yGRpxzW#t&Mjiv&a!pT6+bp3o z=FEr}l$9->J9UUJJ;meq6)RTkuj~}{n7~b;3hCGBNpv4P2he?Cg=avcU8Ob%mrR-U z#N6rBdAb)ppB_es)8ptV_*n)gS`HVw1Hvv3!RvL*vUgq~u_gG;G{o}8*3xQuxRTdv z$}(y~Vjf-mWdDHgf4x~zbfPTcbpp-cDO#%Y%KT;(7gJu>P*;u3WC@SLqca(c%WGY6RsWhjXz{c~%arkpjH(^YNd&ylen@$chx%d8!khjT>+4>}bOSYLrz% z)Y_n3frY@`w{oJ?k@})rVD6>i8-$}hq9)9qIb(pteD~PFBfn?yR%O456DPzf?Rgi@ zpUuZ<6CBknB}#DAB_{$J8OHZY7BPaxJI8() z=TS45b?Vp-T!N#gX~)8F+)GUb(Z5i<6cN}Y*LF<^~UAo^Tg35y6@90TAeCAle8=x9udSs?e6 z2oA9u4aD9J52zj@4AhBU25xmBt-RjX!8b9rNI1 z#9)it;dqXO6ZwHy|HF;SZClo&5=XKjGcTvQqw?1TevbU^xlom<+?;#z*mkV@MMQLR z^elgUxL2=%ckroBk-Ju_RSH(F`sv4?ZZ}soE8`Or6YtJ?>+v zR6+hPOrp}LR+^%6k&m*I`VEY+iCQUoZ&A8F23?3ftgMU$D+2o$cbpvt45}co8Z^6g~C?Q4>D}tRky^UPJQ-tls z?Y0J2gqi3TWP{@Mhi+k!ex1U?n8urqyrC?SMaO;D5k#gC?3Cmn5Sc=HYs6fwZ_T}# zn_rfHH}guKy(9h3)$^&BuU*TzdFAHq^h>FCvk;-albug(X0ZlJ?l(8H!KtaaxviDR z+`E3SxP?IzgYxWRbJ^Xyce9I1Dw}Ow%(=T)_WgEjYfQ}6wJSHKSKPaKbnEt=2T$cz z$Yc8qige=}Z|vT_dG9r7J60g*{qoCP;CJ8MqcHwfvqLGTzW8Fd!4dYvJMX+R!=EqR z{{=T5&g#!#kYDh=o!UxYyEY!*e+5Xo86eU=>RXTZUi;T;FE4(*KXv?zPxas2-TitZXgxM&3C4wpf@b1iS5q_6L$NwvV z+Q`K?veRp=wbjPzmPVeG(X;`;r0n)OD{t*+r3EbwO&#rR;Ef_yWU~eJR4Uov(W9fI z<-S9Q4js}fSmCeVQ6d`ox9>`6`86jH0ey;jbo8pihKvV;S!qY}D5nX0joQ^dwftq?tRmDu*7iB)L4nd(a9DYhZuL*Sz7N1*C= zpz3F!>PMjJN1*B$f+)cffU^aqkPm@D+%57a=sf({`A)($fFlYF`H0-Wb}cO_g3E^g z8jvbS2~)%bD+3?ufy(IpXc3C45w1bh4N)7Q%aolc5k%rysQ}O?%ym1jr)X|)-S9qw z%JV<}xb?=JbC<7IDmsGZKEL4A_uhVbSX8*Wt>m%k!y|imi!JRZ*8RNc$Mgc-mYx`OmnD6}^qHQoXI|B{T|c=Gav zQyF!s=eiTgJnx?jquqzdHj*^H@Mw;oCw>DgJoBMI#;o&pTBK(O)*wS|?e zT>10)`}cF}^gcF8SdmQzPE&=%#?JcSgAZPcDGDk0X(dMmP8uvV=DhRvlIg?y`pOhi zH{bZ7W9Pj*_rfKs->79{fnXnit#X>$%Djf+DIL7`PlT^d9yQ936IpHYR)JVA6gFJH z4ygJA@)WJV+bZv~7=AiQdgvN5`To$Dy+QuMGGHTM5{$moP=b&{W$7ga@fL|vAj4*W zlvoMiC8oSUG8IxSL03>wtMo*K!B-n0@Ba9|f7)qhkQ~XcAuYB;T96#cH?VStz&$m= zlE6e4!F9;Zg?kTt5>`h>#(nr2Bv%aP6Fl2Igm>{pq(+emsmCm`c(LmT3DXnoR_{)9 z6~Sci00`-@vtk+%M_3yy^cVmh_;?0igkN-&I@n8@XFF2vB+(VTum zB0Z%n?LcJ1BW}ph0I5_WbCU;!2S<%!>5O$4GtA_5@Q*Do;GW!3oky2H|e+wP6H zFusV`Kv6{W|Ih0=`STI*=hNWN*TA2TfIlArf5sIODFDc|kbqE#GD43La0MnYK>UIOa@X#xI?5s_FIBlt{POG{|+o-LOP+ae~-m^y8AxV`G$ zhD0J9_(dAz1*(Fvcy}B^ zmq;#v8vBf51urF`9*Y%PYD?xkM{;NI6h0r#P^ZR%)8BN-4%uNk+|BLkRf(rcFGlJ$ zqxDm!#9`=0g6U8K^WEft-jg%ol)05>tfhVkI-MEbBQBEhn=i)+e@Vd@m*a;hCWc3!Me z_;_igmIkI#oCqJ`7R3dXUj^;DH`b|z6=b&@4o-hM<2cXDwl}x ziJ7`2{dkJ(-u-M#!UrTH{|a>n*&=3Wl26^AdSb!T&n43&vlDs<^)*u(dG^&82Q-YgN9|SKY2H+prk_*qSNU@9J_9IKr3ie zNKW|dvm?b^pDGetF#sZf$A}cHT9Qg)B2#E03#zabB_%cHCQC_kRc&2E1N94w97jGy zOJgO=N{9U=P2*upl@--cEbg zmmqmzLGm_p@%_6Q6`dL{#9>DL*V8jINBZcc4hEKPd%LBjD-U4-_+vHr;}`J90`SKI z!XJjrdl~nT$WL--Gw&DV69+jrD-%`7fJO2R#ykRx00<{oY+e?YvqmnmTL=OIYy>Bv z?Z9&MKA@6BrjRQXBy|Gt4v`yTsSN3mqA4&P#=t4=jc0%7Gl_o`*5OCe!&N3rN8Y~m z+*sHbvk~B$%P&*yICZzd{`0{sz#jLGYynQP1UBR%IvFaeFJO!>d}nO_3V0aDS?sv{ z?ahyl^2NM*4taDQv21^KTC=OfinTGgWBppi+#d9ng z?#!njp@-8wp}#Vz_aRN(y~cbvf%*}hJQ|_0B!JhlsSSenpXlNC2*L%cAYE2K@+2ce zkNmtErodXfRwXCUfLQn-pEXk!Em^W;@!~QfNP?$)y|)PWRg$~K(e)@UD@CfkskEZHwiXy$C2UVQxI zK(rR#s=>pW)=p%U16~2}0_%ns0F>1G*nwEIx3spjw2`0>0zi;f){gd0u8(J6P;gLi zNJwZ%aByIN)(fdfS|9(wKp<|uFlg1@1b{(O41tfm{ei*!FE8-lB$v~(P9ySa5P2&j zZ%JLAwH}=LEjV)%IP+srRS(XT-OWf(OH0eoJ)DfPxB;L1b{6dlQ`4`flEzinGw?>YUZUAc$~X-=iudwhY#&LdGO$g(}#C&+k0@=J~XOfhg)0QI_i!0vvad@OlBLI zi{h*2ZeO_s@B8ej)tgdNQ`0k2Q!}x%c*(6RMo7vt7lFb987?g0l&3G{)zs8z`iE6i zFeNA>TI75#57~X#=rM5MK#%%{@{>P@^;MOooj7{*=&1`AHlDb2{*uWw2|(UF;JzP2 z{k(=`DAI`XO?nkL_;Ge5+Pu`2TOD;7*Ww2y4E3x#wQk+IFMi0-4wwVZd1A&GKg5&T zX-|~XN+(R{8xR_O?a=Z2NEj`z7%+SS7ZmPpIDY(iL$8VPlAxq{ufP8KjNzfBzd^bF znoGyL4D(oi?R0@x+?09q=FNX{v<|%dgYezQXAYk`nYx2#4xTqrzj7FoFdx|QG04Jt zmh}<)L=79&J1`)(pjr?=bpf&P9trHo-q>YIDSu7fdkqk1i@B}?v?1vuNFY&e%*~1H z+gt)Dw59E<6hQLa2(5%vWi_{|2Ttv!gy$`UQdCPJ^B8+uaduh$bz{4m8nR#NrnXmA zciPoLJKt*V6v`O8h(&*MHaKWNTzGI$5Rk$!Jo-YCgbxXC=1Gt%`_Lt$-nXO85f<-9 ziA2GXGPO$BBLpHxfn+EVCk+h=>(Qe}VAM?FPy9S!`K)2_@$td%(R{*V2aWFM=NTI0 z6BQMd`0Ui)evUR-wA#C8&pr{OM-S>ZB58syJ|+;ovh~9xn!pfWm8{+7uryS(;RehZ z2e#jA$4x6dqQU@e)}`({a<>+DpqAA&bk=CJm;nK8wm{rDHSxpUFi6#>2y@1?A>@H* z5{pteu>K2d3M#vIxcXz%bghH#OjnX`O8R<{nrcB?z4O2n(p8%j;W8ySc!j{t9Y@on z$tK-ns2X;K>&kCqCB280v>Io4A7^+QXBgUPZ?A_VR1f&88O?1PP|SfJz*9}A&T*i( zro1Ynx~c$@J|`ERbzM7z77_$-Sz___hhV`UNPZhmN*qn}L*ogRJ6fBXNmd$t4{d5v zuUP~_+Sbx&Zn4o^WIN^CXY|OCgQGN6=g(!WF-7onpnHA#jjLxa=I8MP7c6nX zzLJdxN&l9owYyH+JFl%>wer(nE)*zYBEH&i@G8~`a3hgTENE>gUBuGw0}W5{sF^)` z_S8sg-BGa6ZzfbRiBseQg6h)lT;H(f+_~KslfaB0LT-F2m^W26gTzzLA=q+@`c3rK zi~&@BdLr^Rc2eIn3$gx?@kzTCU&|~haTRGSgaxx$q69NslgG^&3-iEL*Y}n1m1)xw z0#bS)SLr_bxul~C@#g)^%JRzc{It78Fdp;MPw(Eb``E!l7eVxAS+~T*(L)FJAD*b2 zVqv{3mWDCIh7Ic(J$PWklqpmC(#6H?5s6%c)!c&WSBget$||J&{oG_?3l?)b?`WTL zRk-y=hX?W`--q^mUH!%WbGg-3m3bF;Zn)GW6e&_CK@D$!a-9LS`U=utpNE0CT=2uM zqz~)s{On}xe=vZdrVbof{N8(}gR57sK3vw`UfBSSZpgWR=i0>!sdq}-91+mquq5*B zBL6{&Nuvfw3OJGfBV(Rxbd2R=F%!q(H*6NZyR?$TFVnN5u`)O9`t`JO0XN84Qc`Xz&N3onokXgPPLq!~tcLTy zoh)u^hF@yL2P%M4R94i~qqe84g5)QuZ7P+Wgb^j7aSTxC{T* zXe0#FWXRG^83csFL!lJ8d-(vgL~^>fr^a38Y_Jm;5#kXV2yj!54`{TWS}%>g!R8+u z>5XggWEm_w|#** zUIK1=9o(iodoiORzcep1^WwRUKYTJ4sT-tJ=3V|n;pb~sfAPf=V1?)SXOaB11d?_Z z(u&>$54?n*m%_^@&yT(<4sEB0SQccqMsds+AM@{C~in)VfLYg2h|_n0kTez|Iu4p&+%|7`nj2R5gC zwd>fC!|TT(Ec-I@1qT3J>&NWbsh^G{-{;Zxw+~|cQHbutiA5kV0IBf7&flKqUrbKm z$FeMK_%VThiH%*g>MJ}pU#k$t4;s|3Z@9l4?BMR}+3&emUw!q-0X~kKm(E{F&o|a} ztPrWq_io;D25WW$A6pHNEsff&c;m^(CeC|%@#3k&V#mBa7XHUpss@Sj9?X-^Et`&T z&Rwbl&Wn}~CeAXEllXGH!qU_1AmS@`NG12P8Z(cZ|Py#lWhDT1;L|ZGCg4P!)>Gz;Iu_2^qrY6JYxgI&l_c2Bw;BCpXA zD6a(@wA$Q+Dw(*`WNf4aU3L@6eMt-z;`Jc+MWu9VK`bH=2oGgRem>lSl2Yflvy^ZJ zzC6?jXF5MX)|&Hkr1JlJVg8-Fqk|IxQGJFC&fME~6VP|?GwLxlGkv=AQ_N$JF+x14 zzd8e0eTERdAvdHYLd8AHPZQGk~sRg0yrqV@Kn-%lMpeDLVmaHp0hw#Z&q0x_6XrkVL3g65KU-+gyT zd%Ikrm&H6bQzPmHkEepADmeM53BFl}e*68vEm2$&ne_E4hg}prXw+Q7Z$zOS6bxg?oCcP*JT^ z$RSUC(3g|o!oJ8F)u2#FLvpGq6xjXaX`EV{AiYn4Ci6g(ZlpH~QVfIuu#N2BiZWwi zem)6&;dTk+OQM&#xh6pBEDCc-ZOfl%lOn4aZg!iE>!(`-E;C|&n`^zb~{Vj z=FL;Jb(*U?GEbd8d-%}d8k@V@Nkd8V}om37U zoiNEi=g7VjhteeRemhCsZSj-rZ+q9Wv0YMrGEeJL zAP@MYDSp#t%zSiGl1?}Dhm9N7Z`{0n*REZMF5a$S3$hT>O8;Q_vSrIj zeE~})OP4KsWm2yIQAdjozpj+O`}o)>N7YMDc1S=pPfuyPxmg%F%H6$hxQ|ajkXGg< z(ktZ}H#cNT&7M6b49N;9(I|RCg=nd21j#AZ4J_G-jvF>mms5klkS8#-kcZ}g zI;T_A+wYv*_`|k?`?qdAkc|`qO9!cDc|x|Us>Ofw*hl9?W#|z7drZ(&51APf9gQ~j z1_^cUY$bG}*lM?5LYdK*>(5brN5)Xm(JAWD`Q}NpUR(CzD@*2#9obhS*64tj@05M` z^zim8=Wkyt4M00EYPF!a5(3k$q-4pGm)?3~zTDET7ur3B44K&#c9Fd@P(FW@s1#6I zN_erM&>5mfm>$i05S2%`tpwr-BDA2eyd28mGV2EU1F%?xs(yx3gO<<{VyFtC2(@55 zpAxQ7s+_TQ#M`ybAUk4co+`N<0#u>$_ExDNE&u5x|3-Hpa*Mb_5o9u_CeS@@5g>>VRZrv=z5hSbz5}qSs_p;Wo1N}9-FrdH z-czBAB?8J8#Q`EH;%2^%+$1e13cd;=3Ni!)L=;evB~Vs@Qo1+YO*h@UP5!^qhIagO4{ z+^6TAI*Wc#S5+-p^5QTn=UKD+>J&D}6rF|@3=TWxzh-F)Q!80nnSqTChb7eU5|TVg5{@tW>5wCpewQiuEx2eFKenHU?y@39O$ zG81ECCdNhx20W=0(9Z$8fg2Y6fkl@lq|)rW1trih3LsR)NExw}B0kPg9z+Ig2K9{- zptuqVkkTeX({#4Cb($em>iU8bx5uBD)n8&fKvAYQJrRlJ%98+BIT}B<^zx29`_ju( zN}WcBVg30@@EenK?zS{^(%Q9apB(_zNsi#88mOY+ekFHOOHxW1=muUhuaNAyU1_gZ z2M!x)m6Ms)85jl%i7(ksS$%`qYuJiNXdDIv2{v)d&qtZp;<|_ie4Bd7pvYl1RYe7t zK}&9T*mIy~Wy)EsScg);1%UcQlIpU`)B_1teLW%vvlZbm#$zPY+^rJyAy@*a8Ibh> z$O|MD{f0&=D#*#mF%}o(LKP{%pt=j|&$bKm5;GHsg&+lHy%R9DNcI7Eu?LW)r-^+dyrf?I}<&aJ@gNtZ>8B=&`Rtv!faU3KRx1qpDD}oIVq~KA4SD0 zj(?u(%{a{-xSqINdKSm0KcvAA($F8$;0Cke;5FS)6t=9q3G~*s9pr zl9If|uvF&J@8;aECbtBd82tK67T=IOGmz8|*483ktwtTUk+1+{)J?@7o|DvLJ!8wlUaw zc-Y(9xP>7D%F(lzFBW@)q^U_R2WF$&V7Y?1xPs-lf;Vvmb8!W8aRs4iX~^b5G6Ujh zNGKKa6Em@@)69=P6;%?VZpZ<$9;Sdyq7?kZ1>Hn=B)UPe0a-!R(6Ycu@n5}k<1Te; zPMCQn$F~4n%I%CHOotxJ`f>zcSHm1#=c0}I?z>axQ(EjaKD`1IgDSI$=Ob8j|vwddWt@`XWf*sxJXI1@Wt<~?A>%2li1dV_!_;jD(0@k*PKVeGKf z)b#SpuT6z-V>|3k@`oHj&cm;O@cfKVK5|_7!YC`I?ANQeJAiFfjjr<`=90w28_yCE zsLa6=EXjW4Ks&-dcLg$W<;rFdoS6DX^(s8Nb06J&L;C4MtE!MFgH(vpQiS5+vIelc z9J&B4BA6RQRTdzdf>OAqrAUeBZWH`OHGn5<9o-=H;50$NTGWP!;)~aZdU_(Piqr^% zxR8wssD(T57I1f9Rl!syhVJ3`uy404dlsqzvWg<(FQcjmclP&V0yHU7d*pEkt zMj%<>Df7RqKfhE`-5jecOz`n&EuI25$WrrgI3gyQ7a+#;EX>3e@{e|$xPFT=+i>i? zh?4Hj;fKOeEqibEYQg43nU$l10(r4$_||s4XsJHb-aE+{)_T+oUuz!A;qgY4Zs90L82IY2q#UhW{UaCHwrv?w4jFeuQ^ z$H(2>6O11J9n~0& z2ed;v>&a;mVNuM;3W3hL#Wty@kMweK)3^tZocL}esH?8xkuMu(>)EgOW7Dt>(I`I$ zNBCCu4EF-}972cR0NnK{XN#!!o5BPUbF&`b1M-GnKGRPpYX^K!Dn~L76lC!^SSFAmX4xBV4~gVu4_s zGFq$I9f2~Eag)h-uucKSXs}1XTPkmdxk5@Z2gIBXByVQJ0AR%(9g#}n;R2{8v=%~Q z!t8c+cZ4IEWnAxE5RV`0#+9D0fXer zbu#EXSl)GZBA8ta>l30xBG&^w%Wc^@5x&p9$U<-fu5TGUpu29|ijRcFPF~XY)!TlF zyLR!)jpSS3eL58~HXC1h#5@6+osYngIS6ye5^xr5`gr3+xH0=6L1`d9dd|FD{%vMK z`6ZC)1By6`U$^d^shZqf2sY=U>P-Xr?;ESSyhe`j;@9!K_sEf+UDZuM%yu`RCQi;# zd~f$%&E&V%=_&Sn2Jz-LPRgk{Th5WSK=MxV@4o%cM53U=GM9ig1v&1LH(!3`&F3It z2}qVcgWb-tEICYzHKB zCn~R!$VoprQ_v9EW1`3w_UhATZXcbZp}3&ZCBzLb;&p=DJ=CS5prlbJ$G`gYnuN@G z3ydtPvEbB3O4=WT1&DxxScyOw1D`x>85lJrs>o&rB!ku|{8!9=KspqB@q|n+f|CJN z674W9&2V@@W-+(J7YbJ94$d=gcqL ze0DJ@*LCKC+0TvPYtxY<^QxoS$*Cpp3JOu4{q<~0jnYX1sF8P8)^A@!ioUohYZytW z(~1C;rye{#Zs5n4Pie?3lWW5I~Md04rG~moNH1 znM9TV3^M7SAWP`cL346aQoykZ0ByGp78BTSFt5Ow!Xsdaq&%|uIGr2_X_CebRCc)J zhbfpfDEHoq^egeMF+VO9zD;W|O@{LE3?e1ZLj!#S^UX`v}VofmnM3y2kp=! zk7GceW_JPEdY1baw-WbPxsj{_EW8K!#lEd`piLeD%s3WHTPeUSzhSL92UX@4?zgey z9?U5Fu#Lq`a+F*0ayVzl*udaMwvRRu6ZScgGOXm4xp>@_iwl%mpIY??@pC%;(Q-xyi1m9=iJ2`?GoE|?QIP$NRt{f!->s<;#LV}#qB2oxw zdL{Q*0fArS20{!0wOAG$bzTLgKcNObN|-HC1ly8FXlvt0daxcU>g`X6xhui@%n z!_|*WONTL?4Ye&R6SY>L-+@%Ss4$*1^u#-NFnn$&#NUcfjlTu9QILuzB_$`J3JbWD zw6-!CCT!6k2fdVf711RD>#_m83g#jSC%{O<5E0}Ax`QJ{A8y)jI>X;Q@#QpAlj)wR z3VN&!L?OZSqv@0>+jQ6556btb(ruC+1)5j)* zX_;x6S%Xi)O;b%Dnl6~)Ot(xKrn9Dtcy$SnQd21sqDKg7a@ICCpFgP8Ld9H*!MB!c zs@L)LDd)p6P@m;i3;-jbY0bL#-+%vQI2wLtKih0gBHg+xCfZ;yY~QL62Qo5@fAQ^c zkFyexm^1=*+lh6%7dMWZ#0>#hGM4R-?Q8S?mO;B?jX09`m%O=43 zpt}w(lutN6jgEI6IYHnGn?PLNrqki%ekl}eeC=_v3gx@+;sT!V)sL*Igu_S-c$8OF zS3!#`E-uK+ZvstnO&yGj`ufHOfFY}!np+xC*chm%d$rXVpULRWPCzKcX%6UB19rFM6YZe(MUm2~)#Ee6}GO|v3|8_Ms`PQw3q-$p{ z-H6-2@0aZC-VI|P+P?q(jKH{p_g}hr?Zj{L$qOT4OYFsJE$~a{fopp+Y_M%m0KKo76# z>ERjZ;ThD!nFaYUFkx_JW&?8%f^`Z86~MH}g1wNRpPPdjypSesh^SzNnUSApRAGzzqR z8n77v#VR}c>!oiufzf5Q^z(!HH(>@C1#b92L8P)(UCwK2sHtF`U7TPqQ*_l-oR5x) zzTDDh$IYNLMbJvVetgT7j#r5v;autueyh&LfnePAaqWbPbCD0w;z= ztHSKFzS#Cf0^7RP&>Oivuo_`SmQ$sN(h?&@lL|n4RR+Dhu&|^Q3r#+)EwIZ4D=canHc`at6}g2tb@eo3fwQng5rf2UYH*i;=~(zkoU4D zhd97_t@kXW>_?rO@-yK?Z*;n>VfnaswvDDL3CJ^LfN1b&mc%LEn73DfZ@-G6y#= zMT5ybz|U7*k@5YGZ`@qWm1S3VZTsw-qd#w1|Ix=gPF^3?I~f&ipa)jv3B7*W2dvJ& z&y>ptj2JzlpTCGF@^l_N`&HnHB0X5r%`4Ya^2=KVwj6(YY2e+sJJ(-^wfsHC*B*ZBC27d@e4l)}(X1v-SMxy`OyY$+b$` zz9UD6Jv!W9jp=+PJ@mtr=6m(^o#rl?ZTPf#^T1~kJ}N9M{PCw3y)tvnJ1;I?w&=NO z)22W1;)_d{Eq!U(!pDqdFRx+YXmXE!At7M{P+9`jJUrd8;06YU3u+Y6GByc1+lm>9d9}STOa8 zr{_+aFmck%d2{E^m@(t&dEqV|v*v^6SKvJQ3B4>o73A**M3Z$#OJf_NEMAlStS}rq z+B!QpPG#@z72qVW8vON0V+^G6D@fzFkj5BDV+^EmJO#aJGRF;Z9YhO=85lkyayi01 z3SW1T5mObkVfq$AcKOAW@QjcpD7E25lH}w8%>fEDa*vv0LSa*(ss<0h4V_At85rR1 zZV`L~{SKZ}7<(v|B)O;wjp-!3W0JF}bphR_wJka6%Z zzuj!^e|qzal64ze)^ZR)~4EDCu0GA4)hQ&!J+vCyp@lc zSMd9wEh~_|aVNX1mD3NGc9rEO-X1-AjD6KbOfwrUmN|{Re7V}U|C8^(#+q9k2ZoOu z(7*qHu!#N+wUuqpuZZmDReAZcUNd9roM)eV=9#Cag-;lP{v0uK{HQTl8q#3!m9Z`U z&&&@#fBYaHKH%P|V~4IV=b_W*&Y3-T%AhGjL;4RGG-}rCK;nK0G0tT9?0bG0Bok~z zia~=0jT{#2Z>P7i4-6hMd?03!icsVcie4YKLA@-v{Myv169QUqe*xw)t-Z{~M$%e= zRimi9nNwQH89tS7xzXZ3Vd_*Qq~mj1Qp7D_C(u4Y#aCWI-b|o45a7wn#!?L@Abkrl z=EA$gCIGf4Ob+lFNlJ@YtN_YFa{O~Kp%Nh0&}V=VVLpSqxSNpNBBThqDq;d~7hze4 z2@XO_n2&^pkRUZ3&IDSCaB~u~$cTg(J|gmbs?{o_pxUafh~f)&6Mfa5>fMtw$q8)V z@FGc^9TC?0Kc0oSu5E;zr$f$fK+ZQp&Nq^rBeft0m=@eDFW@>S2A0D7yKqp5IG3W5 z!ouuqU^C%dNApk%p+)gbzS3NqTr?l)^>A@yA}${zhlYa)FH-nnW80EV4Y+p)aebn& zOf<2$Nbv+tY#V~H0Qzo8N|P9Zk9`0APZyKhJv3D}29FyhjX!(rr|C21h5Ne?sJ*#i zPj-7noyP=EZ}J_!t8gfXRe$%^7BG!--zG-DuZmEM^t0=k#)?RcjtTIh&gHZ*VdmEr z#}e!sPknWGZ#V*QGqotYr)+DeZ$ZvPG74v@P3O5Nc2_kWzAK5jp{?)INj;{{adsVy zF_XIv+l@NC0xCMNxe|m`^0+9|d2^7}2{1jj)U`F;+NRY`#`^T6c?`P_AINsg zF}Rod>StocCaB@Z$ZyyI6?!u~AKMY7iN+@+)?45k#a0efRJGfqFA`|<0H*VkY^VH# z#S`s}3UfGS_=AoPKHgql?oK`$wZ_NY$J5Hu!#fJulh;Le%EzkLCV883DkZ^TFNMQJ zdr2F{_G3nb87y}Q40Om$DAAEFsw(|nuhW0~`GWbc5J(7j{<+{G(fF56R&poPp^vu? zHkQBzKS$vvt@?wf7JR;ID|Y$5R6o8a#f=bbVy+vU|Db@&%P%a3t3^Zw<>lt*8;wZS zMtA^oAx(#vKJiQr*boEmaoez0=m&5cF@7cH?)_#lfSE47yLIZ2 zuw}EMUtR7QnUuu-4PaEC_KeK+(6DIC(#V9-+cPqkL}BWf zTN;fuTPG{L`N#0-^}INIY(H4I;PbEljp6e{IYY{J&sa<>)xpVV8H#&9U+@fQy2b0~ zp6{g}4@JUQX0(hTQRR4W1VI6SdO^@ADF0 zq!tX~9zob+2zrC~go7A|2hW_6Q!>(15}_-jWHv}D)8Q&ULA-rjheTlAIyC-<35bUd zMeunx)Y#eFzHJfEQiIKJDWXdgckBoVfFkDr%&Nd`SSYaVI&Rgf4O0PdC`1c?iTT70 z9ydM4Uny3=A322=0gCqVadGq!JlPxQfeDk1))BUB)dr_~H5HXLbx{3Kdj~|9Koqgt z@Jb7uyc-nQI%KeJaPskX_we?EV&?7T3EoQpi*UUUf5$T06MLl=*Ni=XXRqv^pO0v9 zAq;BrUI18v036T=fBh)}a>V{F(SZP-fvwmi@RC1$z$AvrFl)C#abE(1HyS>nMdnwy zZ;#AEhW-}_1P_L7Ou75hkYI!20Z6;q4hO?hn7!+vwmffsmD_)GGXDD|yx2qWU1AEJ zVSb<23Gfvq3+w>34m*H9177nZ**<}d)Ukc|Gl;JZuV9YckNE9vAPn^I5#_O5T=FK^ zr&~bf+X-_ijTun@f+p~lK$-04WQhMh_}x!`|Lz`qw+N)#zp}5mm9RqJ6+jyaW>Pjt z$A%c#Ac4P3A6&iv0lhZ#FDyi`HXo2Wa;KG)A^{drUJ&~H)u#ydf%gN_#d!P~@S5-b z(+8~E(BHuUu~_2q11}U9@WB{`ATHv4fAuNCet$Z`UmemS6Yh|S@qSV^)K9|PF%H}r zZ*2v$fD=)WaQ|6R*#ZI?WM&nd6;z0;BiMxhGKl&DS%(#Wb5nFA!gLD>CdO_0kW^aW zO(cS!Exxft5NuZT6&+#gz__t=MKJ2D;vP(+;4`c8Vx#+rDGL|QTlykcSqxAu1?k#6 zBU#1vg^k#)twpQT>3%7^v3c_iy%kjspwM`ES;197g`f!Hie{AGvZ8tb6u)+JwGV#q zAC_yR*+B`pS3&M&xJJsI7T86>z>y%zF*HBmQ-S#j2Fso<*fR)w24PPZ z?CF9%ZEoBwC`e1o&ySB!Pe&an2{02~8^aM(`HIVqBR|;$?$+%{({aWL9$@bpamOkT3b+;wW_eNsY&!> z0f5R&H`?^>WpD2ZsA*7;hX)%eSU&w@RKJdMe29}aC)E<6#j+xL0rGRxQ`1sYfFR3& zZbxuI(sQY<0cePLo{=`|iC#CG+FM#W%>Yo7bq@+}V0S@UDv|4n_sI*BUoaxJAsEMl zFv34jZasATT->c&CqJA9>9pn;>pr#haFiHZ5KwA17@0j#w7+#6t}4F->W5a-95 zF`VgsNc!#zudI0c33dwJ-KmHwEkX!)G0+?u_<*fBXM8aQ}TCOuTZVEICSlM zA}g&?=A(kUNMxm*^7iT(zT1Ax0Z5?Kd+~kO;;W;bGq!@2wzS$3fWeF=wTuJIo-AunWdrCfp#Zo(#M#Rddd2!C zn$r#%4=+zIaI#?H24qNtqXSL^sKq}$q_~dFhJ;d$gZm7UA}eirW;R>|WEy9KhE&Y` zfG&=4Tma1iDZ^=*xv9yiSwtVJb?A^aU>pG+ib@>Ntzg>+%Zi9i+&;dUUfsrYi24ecDY2ZxKCbgeenf z^)Z0VfYw3%@!=EaQ8FOu)QLS|=C=h&&CL^gqA+7D67TPD+fTsCHMI%+;p^2Hj+;DR ziaJpe_Q=TL!-qaH?$L4M!@?s2W$EWH$2AOma(N`%&5-VciTfo$HO^o%KQ37rh2maX z^rQK6ZkX2Q-S<|%v0}-h7ZxpAy5g<(1m5?F6;mJ!mso*#zl7yr1r+H6Sa6SXPuUtm zD^Z*Y3Md8rz)35nj6f45QxMuq6$n$xORDcxmz4q#taXrg+F}+Ixfc=AK`+Y%YoKjq z=zO(G0a`{N4fyC#(C^qmi=@z=NV`L?s15>Xoha1j<>{vJ^6_$Y1}n-xJ*L>_|1+Kj zZa)!E1GU?NrvWn)@L&()X%yrRl2LG{2Tnsa^nbR~*Mf;^d>6qk`%;52YC z{25OJbgzh~0VKeJr*Q)D+Mn?>e|;d!to%O4s79gL`-_LuR98H^zLJhO*=rf5{QKN zO*iefLkACjcl5%Y{Q3?Yz-3U>q>14^4JG>l1(bfgZ}0Apqwr0tHU0#GdY_*L+Sq%; z2K~n;jZ&YW;Qo(J3-jaowQHBmo&}Xv~!Pb6#4zmgfT^pBUda z$WKvp{3G24z0=Ho-KBsM>gp1!@9{bsCH)wwUr(d_o?sUlRE0EpiA-lx6F&(noi_oXIiFhw8 zDvP2>cSDecwf(?0H`J9-7+w0@Hy^uD(oA zl_h;cQ#Igy{&39uu-;9S={$?0RN52(2Jm%xDhxD{N=!BQj!|%V2#F|3}dj`d$?%G;`k>= zv-D;Jdh;dp=F{lS2=rzIdNY9P>6zgO==@8P)mVFUBJ_hLQ2s^umKk9@110i4W_{iY1wfrU z5-+4A>_I2K2n$Ld!BV)XtjuXPtrUpS%%AAvu>G21NLa*@_oJx6ao@{s>B9l&I*KtA z1rOF)xVyHo4cwY_lc2qCKonpTR&=T!r?p-&d$g(o>}D+?Q|5mV#tvzrLg4(j6($tw zG@MM$cb8@qzA!GtomU$V!mqXSus(q5?;v^)YXS-#1@_Th@btn1jA2cTo$MM8K}laa zL&FOM7Ov9E8%8;(-|#g z30Ya;S+8lAx%Ba~cXqV3S7QEBb$9jd#R9P#y!JK~@foP=R$5rWP*ujH*R(K&okFUH zv`e~M+FBYLt*x1?=eu0W)$F@>lUjI&q^W|Q2&a!9gUuynK4_c!_LmCuSGd76l_+zt zfF_kDmi!{m2ghbCL+cETymp+@xUOHHHjUK^lJmodpWi4McksYPJI3KUa(vz7&ru@%`fyx ziI2OLoL*)uOpiVG-VZ+vduIOhsL{iF4GDYdtyBJIuEbrw_`|VNzg@cp7l)vWM8wYr zp`Cw8zBzjI>qyL})6L$dE)2dF1TPOtSB&uWolXR}H^ANz0WZ}}wn_SgukYA5BCw`r zvlasyE0Xia^!f$UE8lub$nJ+`CuP z3XVLjEG`ZtXK@BOr%|rGn?dX(9%{-P{K?NVN)LNyoW{x-VyNXy={KQ;b3xY@MF`bJvEkZymE#~JK!w|ghJY6Y%zmM2xMX%XU>F# z3@-ZYv(J7hZZYnTMnPs+->Vw^fM@3DUg3>X)Lp-1k&6_uM(u_Gn1_wIX+F2>B z{cWrr9lhDx0%B`zjV7jX(DBXnT>?s-*Ye?bS*EKr>w`gS=>lM;J!0LYe<2Zw z-ar?_`iy`a-oQS<#P{D0BF^`6^aJ^-<>&_)`ay<%5aB8Yi;Fr8?Twf9^mItD^qnpC zal$^f*vA(8*kT_UWi|n_M6Ny_>+!$LtN2PPUegKX)0LL4N;|1V&0XUvBycy6tDR5s{m{SN0V^$FbM|hZT8!S(oJP{C_i2Gw) zQv;<`zl{=Or^1_o>XdEX_Q+_qUf@r$CpYjvu%|bK0dyuA7QwFJev#97ghPcq#&HVJ zlymK&5qS3<+r>9DvoQLRQs%uud~$||0BRmZh4b@E^MM8|%cGzM5R%*m@d+lQBUCpE z*4o)Q+X3L`YD>Y3KfWQh*)Uw=I9%gsT;niY<1k#KClDND|A97u(z_s+cpygvjylK# zMtQUh0U*$y=@dw|#7-b>kbgXO!S(YEb>Yyv<~Uxf96oMRsNI+AtCG&1Jo)p6&7k#vO026!M%1$inMG=II1*j6-a5mNaHwABP$ z_5`$504ZrSU*Sds>{*uVH+%?_nxfEIu$8q)9YT9yNMlfgHd%`E=VO?ReHOkn##JGDkQ?T)fJ><8s8+QpbhFZYoGS* zmNpNY5H*)_@CC_RE0MO~r=0r4yt(sytq@+cQd?OmZ3$>2_kKdDT>SjdAwvfa(g%<_ z4)6-y5Ira$W;QZEi-0wSdWttNHnr#xv^8l&oItslkPL}bfc;NX7>^>lR9^zEjT_yC=_*S3{!b19C?5|UI590Yb zkG=8w8;hRR%ZH7Zws8Yrp3DrOG5-`G>lhdptKmP`;5=)@*fC?nr_X-{3(v4VA&(9k zFnARBFC32Z{Pj+%a3{BNC@x29pAyej-33SQ)pOUcUr(>q2h+L%iXC9;VE*F|KBuB0 zj0sfMEX4~1LcuF>6$Ls3TLe5IeHgN$G@=qK0`E+!lY_~r8$JSkFfAV_J#C>$hCS~D z5Eaq@G!7_Cgcp?6<-~&j(Xqt7a>J+@>?dUd{Upo{jqWB!b!ZWX!aypURnLkK!OA_S z1umXO3Kl%3)sz(^T*#8xnLBxDr&&HQ_{$v<7!s_Dm=IShv|8bASetXNRk?!yKT1vXW~wvcp(X6p9!{TE(yXh7Dtlc)NCO z3_sd;8LCxDjvQfjq%pl0j@gY}q%!vhP02zP#m!`~JS~{qv>>*ZtoW&%6ADc;0q^w?u2XYOH7{ z=v109Ah9vi;VfrPtN(vS&`%2n0g5V~Z0RYEo?HcsPV(1#+@!CF3QQoqilwKZ|GGyV zeMQuEg6Y+#^wj=e_c%pgxlB(p>B)hftb6wO+m^`vS!{`V1liFNerSmfOB2-nzInJM z9@5X)FZ3eV;3dvbfO%=K>_Z;;g^{r3-u`%jWOJmwnc;bWy{7IpY~L53e!TDW&D%NCLEP{t!itZ> z(Xs|w_A0XhIbcTEQJ&^!%xhKA8+RT!c0S9d5A;B!uN@KOM>ZmW`W3zS3NJoBBJfA9 zR_b`0N9}Z6)m1%r=|}|9Pk`-{Eqiy>qzQvS0s?i+9X4VYCxdBt9RZRJ+)i#AlCoR@ zTu1?sXgAw}l5m5H&ag0UnCLQ_h&h#VrRfeES)li*`1bqzFotU4fjZva-BHH{KC2HZ zET9U~NNg@MBBiRXuCAu0x}~+XxxTixrmDQOqO7=(^j5h5cawPg;{_NGcs<9#GsR2w zK@RrdP@#fut}cH5{(gRb-rioldiM_Y_k}aj&Dq`6$&rot2L}?zFX`rT^dt4XoN;)- z?}Y}r8_OYqwOYbd7hFwJDH6s|4-k1mG9&_uxl{>>aF|gRn8(Z6}E`POQBC-VrAnNLilA+H*`~5nprnNB~QnJeS)7@Xi zY})tp@nhe9y5XxYU~0S~JwS6MC?5o6`t<4POu~gMASu9Ix+sLDqWBsW5qMv5Aj{m% zf(Cl%qzWZ#2iZhLTK@VlwwYfu|Mf7IC$&>~j6+t(21H}sfZbn=w7u~tv2&RnlDzS# zr|T^Enrt_NV%{)p^=B>GkU@DU8ii-Nq*?%jP%=l4ZH zsagmjf80Dx8l7PZToZ|u%j3B3mVGdDWD8iirJ!tAD0GOrN!Jggxa>XXL~yoN*CA7} zv9<=qG)hbFL8x#;J<-Zagm3XyM9?G%uH@li-JR|2NZvw1jzrAMl1V9553o*AW7Hv! z*}Q%r1!#SsD#7*YPW!`YFJ*?QF|n)D`LN*RKZ+ zq>r#MAUh+^n9}a!h13UG@tu>m;56J2!P#Qup5~@ZiC{+&Be&H`#$3BLMjs9r>t4yA zK^+!ykTGqUfn@HOv0J2VUlI#cIRRbsuCe1IFmO$p{m37F)NaXreEpp_K)_!Q|~ zLDt>WP|@Dq)`;Aps-~I-NqMCKM*!P(Mk91S&|IFqpqPvvJ!FVq zXxQWtBOe(&WW=c8kipjWlcPXEL*EAD&+koUks@M?nu|80Doww$MZsBCSqVNsr26H+ zxm{LSQ4N!_)R=d-vMLq!!>t@tD=LK+^$Sb@Fu{|>(A6mw6ug;oqs_cPG@LR)0?Img zMx3QoA38KN1b+0Uio%50*w~p*E*Q}*Pu}z8_D4s5|6|w-r>_(T4e$TaWfw`KoK@ne zrjFV&+)w~OP;?D_4!2=lf~2AXYPMv_n{O@@xHne`Y?WRfcjnxs+JR9~P|puzf%%4g zFV9N6+2Y@44!J1*&A#GGZULY8ks>p8e*ZxK$34REjhBZ_9TNx_hgNm;Trr(};b|m^ z#Q*RSZa>7sNwy4J$?S7M;{EyOo&x(Eo^i~7@90BeIcDdSlvaZw2t8A1gxU_i7|?hE zV$_Jb;a%00WoR)9+-OxG?IM#=D(fPScFY)U9O_rKx3f+`0(v_*#;UrYAnQZH2Ne+D z=)@`AHO^KNnJuU(HI5z}3khwqnh73_kQ|r3j&9a+sPZ_ft5_1r#REr8NJWBeTx!Cf zj+!g5+#bKkV)XA~^sfQ^yBK3)F~-DTq#`#pR)YZ!Hb@<+Bs4d-ApgT?gxBD1aYgR6i=WZm(k*$v!@PBl+tJv9t$B!RBz(s9t zE`sv1Y}uRBoxIC_{qe_P!}jgFc4FdFvu0enI(PM*?DjEJBll$Yb60`65B&nm{%)UW~#LfdZtUT!@#q*H^iPPkko>8U4rE8F2t& zAj)Pfofj#sA_UoCMO)FDX_4WtjtK{N{UhnZl~0Vq0NjEfKYF^5uCMqUNy7Ie3E#&X zq#yD8N9=FqVKD61bR@r4RKbk_(-nD9cR`<7Pyp|8Z2^qgqT=+lD&)yFmE`B-K&=xk z?=BftPr#@lxsZ@tNK7C*0BVgrC>#}{a18EfH`&;Bb=NdWOs3}Qdx(D2b$3eb?L9oK zoyQKTs&sV9YaWXg$>ECGskW)L5h^!?#KVPtLgPw8l1@UBo?uYk?CQGt@J~D-DTF*A zHuMLP2jg?W6(q*^a>>C|1Q!Al!|sX!K;*)}c+bj3P7%B+g$3Dn!EcKfNFM`{Xi7@P zUATZ@Ht1Y|3m|m}@Z)Z(U8cYi#NlFqqv)dI9bE?dwszcL)iT)5lvspG1HMzA#J1`? zXU?6wa{af8yFXRWU-Z&otK#CzKX3c;>mM&&x`(=I?_rW&P3o-1a3`n3&Ps7N<;N_i zp`#-l@6`SHi^x(vN4BvhI2b94I#(;t0MK-~y56`E*E;N}HLF)+8E@>ew$l4n+>Zb4 z@HfoVkbZ7EN;0hfKEKI1JS@zr0%?324XVw$OjJj%c>T3km(LyM+dr)1+Qn1J_5J<( zJ{<{cDAirs;IVw#m^sT|d-J*3&rh{2Dk{6z?LBFzeM^0%$yWIsy1 zZ`@!n2s#@_FZ-@4Nc+fQA2kSY% zz*t;b0D31RuoQtzrlzK%82)SEf_1I{6`(&5UIVHJHTT-F2%EdnXLh68*i(UI za7su~nY4=|w~>H{emsgH14n@HB`9Sy8erqCQC`$ZjBO(A1iH1W#u2Uv2b9-#adNhY zvkEyYj$oki_I7uI;~R+M|NmdNtY9+8?SERJC&|vbO;5w=Ni;9-S5N+1TW`i?(bn@t z5E!;zE+&R}GV=0RHf{2eSsZQo&HL4H9<-aaTp?6rc)GQGE?lTMfEy0*Bfa0nKBG-B ztH$O*d;g*RVd&@3*AOGa224a+CeYlkz z#8%`7Kz&0SQzB(7ou-VH(xwnd{fEz1hAWjZ4iCgjaGf~COnMqXPoi9m5G5D8L&tcyeUkxuLYK0iw9KD& zxCwF(Z0g_>z$S=|$L2w~czEw(WU%6EO@K#Y6T_y{rUN zM5*=^TZaGHa>_=w+p@d-{+Uxxsr@WAtkRETN&vIShP9=O$JPI2xq7UkxOVuoG;|kHg-t7*QrIX#l+d%k4X&ReQfkNxMWmGYbuE8Ijmrj0 z6L^L&BwrV0V3H7{l+CZ-A3lEkw^L_+-Wg)ph7`~>YS;LTj5~MEUi|HH8nEq3)8v&XFrRJE37BNG-3gZz(TtuCJF19E!B5I#6v$qd~>p4nwe& zZDv|SQxp6eFw9LI-R+Hy<*1_F-PGFAj^bi1jm=#gY)pBRwRIDdQgg}!h6e==3y{;n zsJZJ(%G%n>OY1v&<}R|0h#8tN(qtPw4TC~@h;Aeo#3Py#@TBY;i72txFbU_!C+soTQ#AK%e z@v7R{9#o5+2H$##H3qemZ?A^mrlz6jZdz7Zw~dXJ+FEk2=x$*F(T{$se9ED`w6?Am zMpkbqlU*j6EfxRcc)5*@qPw}dS>Yr%$)TjcuCjyGFU8ONX|4L-d-49-2cK`5aRypu z`4TpcW}ITnyKd%!cYi4y!#c^$O^E~3@twrvTUV|$G*%n2k_zsK=f=i%Qigt8Ctgq~&UU;2 z?3NO+ySlogZS$KQ`~yPv?D+SdZ-34+Rz-qQ;(4T+y(2YzwD-vI37|Lag^ZRlz?i-x zIhu0uTS{kKWPX`DfODTM2z+^Jt{~6Z4t|M)Ie2jhFTTzZ_}mnuj>Sd-y7~(4YuROk zY=T>HVtQO8CfJ|h@BCc4=Goyx{O7=vCgl|1Ji7%4=7;cYtm2;Jp68zC#v&e=jIVwU zU1lvRF@41ZZZXsm5B#Y8h`9o{1PM{VGP4-&ttEH|3Q^0ux0eX~+~IaQnJjef+)%l} z7s)6H!NS1_YyzxTklavl*YYx=&O)+mw;)SS$~6*sAR>Ph8Ky*ar3^2K8&n}f*Rj3P58}I(sPR{8ZptBRo2#Oslj8%u?yc5{O`mK0=A}KKl$xO z!v;I2_O$CiMZu*#A6ZK?%r982-+ii7VHIOo|25koNUn_;b1m9Of?AU>mUyY!-hS}H zl`B_1;lV0@2H^K_E;x}o43elv`wtIi``A9<;=%wnUN&Xy*s+t~xDepU&Ow&p4XN6( z*JDd3p;vQ}o@56)sxGXuTO{v4J$_gusH)lsc!%ZnXF=jpS?QveOHmII@o$~1@UX$K zQ5ON;6Wpb{^7&pbO%%bco8gAtKz+~8_mS%R!a4-O5zug@2)gH{r{xycb#ym^I+DCq zB_);js!Gb6JIw9Ch*ZNng=o9ppF#;7kK$@5=SOZJ5Z1MEyh(06#~C)NJRZp$sLqL4O8_CsDXT>EybNLU z%6rDb>{8;m<*x$}X_pd@VV{&(3ChG|uq>qrd~ED>gZIp)A%`xFm^j(jzFT63X3%Xf zFTHu=^ni(%p4tuFKpV!{>A6L(5)6EcymxOqf8hNhVB&uPZ|=*B^!T@dKcgBH#$d{<0uZ;Ot)6unyj3$I^)?MnxZy^XuKyQiyC9_Zx&fT5Q4(%ZRMOQjfnGCZIb*tzIgXwTY48YzvY zSJ9Re(Uud@mcb2G#d#H_Id`&93G&uGV=2I}C3zL7t%gkO($ebsisHNFn7S=(>KYr% zQj?NW*+XrckkHe%e6Kh;Fo-@|LPB=$ZZ0nGPl3SK`}jyVMQSPW9&OT ze(q6Le-z*E2KMm`KDo#(8b=<%pSS=Fp-i;YS23DcvpJSM)Im4X#10C+c_S$;wVNvSs4VwWt}k^%y~BKq259~g?cMkY12#`$zERF z=7e}oChfGbQ={Oblbwx+x0{EnLKf(0Z|SudO^1z52YaaNl=foRiSytb^xSLcxwp`B z_dWF9GHDmk(Cw|=sCkYt3U5s-7!>g{UA&j0NvF(B?)Gp`%Yb#HRhW`6!fKODg344{ z3I(XSwA3WnH#f89*x1_U=GxfU=G(EcxApS!;)3Hx_wAy|WR-cHR^G|+#lOaW83_m5 zi&*Ivb4|73l%vZzds24K)_prO&!I-iW^~e7_Oqg)t=4M%g2&&Cgg%*p`7Vo7 zw-Unc78h&|V_DL7=RGxF5e~5Nr`(7z5fO#nyoz7!<&Fu-&b^nQ>Q#Mw7j9>odkGQ{ zdF~P<4!d?a2;8pg0=ur)hL|~w4-+c(AoHoCO(L6*Yb-@Ya!WjG z1$sUjJ--z_f1fpOcxGnW%?qb5+)7Hgaq~`MR%U7nskrDqRr-UIn~Nzhug7OScBHz10!7_v$yl~)M#AD`Cne)=4NfBakg$UVFtGg>hI9p+9wpA z2d!0I9jfDYBD;y}{6n@n+ZqhYPOOQrbN(z~!;1=y9d;hpjm1^f#**^FJSa8AMWt2M z<<%4fczkMkWf1}pg%{7Hl$12stBk*0mhguAqsd3k?-pQ<}$V|8#3stJ2G3E04 ziGk)7U;ifE@<^vEIL}zM@dP<)q$1 zFc7!vcaqW*E?v3=ySTmy2K-0Lb7$fB|LwOc=k(6G^)~L#j;6dD$^pyYT>V&(hO;03 z&blzB?K`utpRKzckE|NCtD8#U>LP7vZR6}ca<1&$zV*b7+XZL4pG?mKnz z4O!QJ*|7?A51jT&|UqW-wd2CTvP*MGtnnK=REUY(u5 z8ES?Gv?IeuHZ*Ej^PxT4(=<{~&s3PqYt3&uoa`*_bPAfbbjkA(UUqg)P6HMb6R4hG z5&f+Dy&B0buxp4nZInIs*wE3VMuvlHG)cC!W7jwE7zrA?;K7g1T7=sWS;qeT`v(sm z%%>l)6t;gy=b~1%=Vycb2GYTI)KugX_(L)HnF#22xNDVe{%&rbMx&>jetdm7^011F z${SMCk;0vxns7TAr>7^nr@5`I19eo8d8k)WW_UqTTUv+iAQIU~RBl^pvUg3{NL_|Ikq^PZT7&-=t4nu|nS=*y_nw@o5 z*Mx!IO^u@hSMaZ~N6{9I^=2y-g{~+ZA>O*X%+W5bbuFE0m=<6$R)MvX%EBW{h0`nB z5zhktr=`6|XOuyDDH`ywkNOz-dbi*y%;KX)O(lU%7J77}zqZ~diVkLY-^t+RkF#fK z6KDCbo!pM%>U@9S2Ytp`z6(EtZ-+qJh}I$jQOlmE_fGH$VbhKf-GAG=*kbj1L*1vn zU({Nxe{bju*jt5m`eX0^w4%EKyo2$43G?lH^dgf{`UiG2feSoZhx`u9-hbUv+o@%8 z#FpAjFB|CzNI8qY;P1ToIoqjhsy*Pj(HNju*Lxr=m4G; zU%M*`$o|n@gtC123<^a5HGE~%WgHB606VaE37y!3{`LFq^>934C$&!kJ&AFqQhF6B zSmFuyW#d7QJjupRI&higBvY}U_%mYk!VCNUajyS)tyFmyCwch(wf4P#twEM+<^JPZ zAO3Aei(HyTxr7{wQrTsZ%Fh2mDg!N^op8~!gPduAq3O(ju=sXr9`fx_D^olD5n)nV z_O7G7b(THbEPJZ{W6y`r*@PnM_#M%8MPp|uy{m*ag^iL;#D;uAlutrjEff>`-Fq)F zwPR#-_QKPOc*ilAj$3oqBRDi{|Rn8dfS6uvmM-Y zxO&ud=0g+!h+}&s6LQ*f1ryK)Q*i}z(FRe>QZL27s5CRJ7$NyPDK}G7^X`_WXWmIo z%Q6;L6oU3UF+Uf@1~oZlWjQRXAU5_^A^2n43b81cmw^U3Kkmwvxcp{=qQ8%Jb7!@w zHO#!o`KSGd57k#1lgA%8w)eYn#>VE3V`r`vgqas>*tLr|=o>2GW6KXavlyLT^~?Pj|r>8Zb_gs}t4R}p#$4IMM2FcPZWK0pAEDD%96 zLk7&88U9!p+mEZ(333r-GXJ2Um^#T*F87!;Rp5>-5fs0it5>P&&;4TXudWE`3la^r zGmo5M7`}bf8dRf^2WqTkPU_AgN9H3Ss?sh)2DT#DsuB(Zt6=bxv{?*BIP~K!OPlF!a1pnDsCF>I?6Me>m~A>BECJ8)d}*1Awv?_56=pUV4sN#7EJQN zLFVid__G&Kj}dq=e)EO1ma!j!z91~xJoH5b`XUT-m2XaQbxBfg5y11=V9dX9?RsKz zW_B8IoT+Iyf4e}OK<&Z7?Sd<+PUolec&ly@PjgyUo)O+Ez3(Uc`#3f#QHR6BMbT0O zW`gN6M?1@F&wcUlO}nqbW`f=0?fr@1DpknL1j{0y)SzeJlbYe$RR*7wK8$U2Uj5S* zPrewnSc=SEQ&z58yzj`>8r5)eN1cGZ^&7x_FOFa{uvTv7Y;5rS|5$quz$nV?{eNal zb~l^eNl!?j_g+&dB1LRqSBe#}i(<|01Q1YcpopSeMHKKVwu=ZzlPUsILr5hB2!v1q zr0)JdXEp&W_g?S!_g_e|ZD!ti-}9dLyyraUIe42?N;aB2-lkcOW^dDnd9IfCN%hay zwYai^19?^OR*_e8^u*aZ)mVE@s;gCiqO5MHRjM5^{v{=Bw*=l%-{es`caCib^|Dkv zN?KfSsE;?5gGguyj|}nliHNqrKu4KlvQ1jHZ;O(!`NqWf5{ZBZPV*PnkxjNOoWKXy zk!QygPOpd4>n-W^aC$wQUWaqv+n#?kAmC^oi#H@7miTx|&Dy(l`?dIog*z=2VtvQ1xnF)*1NfN6$|R+EsuEG6FIOqWCemNHNBq`j!9q5j0N+M4nTf>Ta& zef^m;Cr)JB0|L6Xk8jnzOL7XPh0|PENV$Xa9%&%RjTg(r>@>n2r}AS^A__`NON#Q# za&t-3hCD4V?_fb$;labK`dOxgP}0R6;^DJiB5dWZQ!M0dRc_w3KPNvr17GxW?kQfN zss8c7CyUeIIoM=5nl9K^6vhJ%^CSCqZQiuip@vwU)>CWi6yvwbpdHy>>9^jLHfa3X z3<>6u5dXy&gE8(u2K4h1e-9fvY`QZY@A6Mfxo-&*-fk*)h4vipP?!JcOjCX`-%z2r z?m!idQ!Zpj9LI7jKY^3vB-@>nBtf4$QGD!7C9&KrQ{l-vgV}*k)M>SmJ!J#|?9Pq| zNN(M)XSjcKbc=w1l+<=9De>WflwoGkG4Ht{y>BZyqS9&L2#3*2scnwQ+(6IWO3zKE z=cM97*n=W7~#;I<%^hV7Ev0P(cV zZufFss-{e)uX4Vg2$NO4bRM-OV58a#ijTEHOEsOZ@oky-_SzxxP-PEqd z=~WBgo|hEiI(^R6cnq4Aibl@putcfB4G@~51IL}F z+8V`b$u=icZQGnvltE-_mGZvni6Pwrf07L~Z13i`a%yb7pqhGDS>m^2>#h<@LffHt z-ge8-j<`x3=A;pK4Ih60V8{wSMKQ|1X@(xkrolL8M(h7>b3b98|NZyd%PD?r_falf z%1)&S2jFlaGCuI@zdrqJ>GIFM-nJ(vH?Qz;&cVGqxBalRZI0wY#rlc7asEn|9WBtl_$>ILt~=CLJ0-f zyuBMQUZ_5IrcSxwa73Osbd&tkQlaPRLl)*I&F;?=;ys-$l<{Xtp8$7-hTW zj7o}X$XV7n_1k21tDc$HZ>acLj;fw(ega1HA=Vf%-8Y$((nn|kRB>Cr5wh^krjWWmh_~5~hN^!FHriWJ&LCK*4 zAheq&$T2^VS5fa3RtIH#mI+{CZLQPRSZ6u?{U@KSJY|^!o!iBgI%>5+ED|f@XUsisy z@S{bauEdyQ2E?b-{pG}OLvqVQyWTnF z`R8Rpo#~0im~Ly~b?(bdPn5>}yq9yv^j8<}It_mdBqe5(mYUeTSCUtq!4T2DOUrIO z+O|sV*g3vK*Mx?WLSx7DZgH)cIuy zU5X20U2RkeY-Xn>wMvc-3XF`1O>UjsxpV8J_*U)WlTuS#wTVh-*SURiOafI1;$yI1 zqZ=A7Q6ur#d4rw!OJo>V5O|7c2TQ|b|@Y8?R?-BJ~Qq&`FDx$1+xN@foP*m}uiNy?p4~h4h_`{#v-|e%-v0BJ-G@s`azSI_ z2Rl}L1Q+EI)(uBe!?}v$1G^c^^y2SftcQ++n zJ2=f=v19SlFH4Wr2jY;3ZehKsoiW(LR!DEjQset;M#qkbL`hU)-PDqkQ!&z z5-FGz7atQB9~T>&km7q10B=rVO;Fdno_h4Ix95L*$Q{xR8lXj}cKNr+GNDuWnSJY5 zznHFMQ!)F=ValzJU@3zm*h-dg!lm}m=ukUmRMp<4Z#tZV5Y*>9B4K{zixNxnL~(}8 zzZsSnr%ZY7p6;P0t^2vtB%z$SKw!>w@!Ww8yH7Ow*&C{k969YB5$Ib}T6D%37HT+? zv*p0q($d=4u02AI9VlRHX%A#|jZ02Wj*AKpbR@;yH09}0QOHL?)Y&iJKWsEG$bS7F zcxqgGL8R2ReR!n#a9MVDJ!IP=GKza{LB2Jv%czzKAy$ijbYfH_6r!P#(a`uq*&p5_ zHZc~^dKgSBiH>06s0!(n7H$)Q`+Aq^7YLlg;ujp4;8!Pc!)lk=8XSUz1^6_OVsq+3 zbByo7CYcfz$_J6D2a&0+MdhbKr{^8~b=RKUzpW+g7cQJCFQPtC5vTuqDkG@O+%_ft z>1R_U-!(%B59Gmbzs}&u?VszR`hvw=3 ztYNzPd7HhgzJWp~8T7Zcjs5J|OR##9#ZiCh?B7#8U85plHj=t0i3w4ETMf0kbctkV zxe&;vzo(KsHi7Waip8IZ{9Ly9OLO(O@4kJxlF)CVuFdw}e#^jBvp~1nzpyo7JR-QXoSlHw%OQB`$Xjyk!))L8XiDU<^+qOGr`$37+ z96XR;v~w4lFpDIz(h`T2FpSib-&m@pyNwic=&%U9q0i?MSlPGkyFnkk0vK7L-tuHa z-Qe}v2B|Htq-lfIhiGtX66uzz3ek_D3S>o?^ovN53n6K%7uNhST)KnIl$JM0jd|rq zE%5F!7psj`5~(Ax?wdGm^YB*Avr05t@;G|$Hhs6AuY;k^^IVfQvu|7 z!-Z^v)SFkJ<`BQjRPOR!?s6%2IhDJd%3b#NWc44$;VfzXMN}TAzA%8CJW37goMGfi z2*0GH|K}VA0?A);jVP>9d=$zM;N`+w0b?O>bSKoo)T~8YL#7ptN0{t-@`bSDBJ>`BX%=yW>f(vI#kDseKU6{A^+wWF}LD4 zuPaC0a(C;HOWPKa<_(ddVOq{gIrdKejkdn|&mWEjhQ*v?yYqy@eyQfvu`}mu$#p-u zBpoQoGwRYZXR8KduvZA4?m>fdccKW&+ui-_GgEF!%=`9W&0~*U->H4UwCU3q?K#u( zF->`FkSqx>?PkG~{=!3d_VKT;scy^|N`zngq9;NGHIGkeif zd0T6JospGg7mk4`IJ9q-O9Z#=e<39t$UY$pyBU)8eYRpRc|2%9kR-gCN<2+fYc;;7o(o?*DvYU4|zi!(^zDhAx1uArCb8!hmiPuCPujO^T5a$t^c{_cb6uZlD2Maw(? z+L~8ma778Y>>l@>wwa&rkBn;H#dP5PLA)M|B#w4ZwfuPK;IC`Rigoz>(4i;wqcZz; zNQn;ia+)Fse6#1g)A!8o?{2x}yN^iX{^vJ6uFv33or=S|mv3bKflZq>ZCtmSgwFYj z!>6}DbwV01HX41~-t^dDa$5e0B3XkPnP<6eU~9|y>Lct`vz6CuPYr49Re!R`-1){q zx-`y#9m8q4rE_OLwI57BJ~w>x*++{$TJ-+w(?9(FsMECb<5sOhDFu<#YSye)VRvP?ta;}_Tw!M! zRX<0I+>b>^1ACm{Lm)yxVC63889HD3ZE5sb&qO4Eu+gTAerp zy@EE;Fn6Gr32G4=8AxTKz%W?bph*N8D!3aYP2>yuR*I}hB8#vpBANJY{|}ea1f{RLnU-CKZL+@iib_FufKMuR93SqR^w{7x0u%$hG1D zQZfrW!088GORvZT!K+aKj7B${+~|8Z-VLXBLtTT>=xRVgLLOo;W~UQ03pBJaeLECd z=AQ0MXtsM(UvVs4Z}YeQx+jw=x>Hb|&s*kv@a?9Jo7R+E1NYldop*mArGah%2lBLW z=38&i5d>r#Fp%OId&X<)iudIwk7L7K=+s9$@ zwJUa;)87;rTUlAYh0nh{9g@(yjh3#?RcES=-oqeLilDlB7d1sq<+lMw?yn8cKYia2 zi^^0zcvhd0$&_PRM``}Q%{COD0G6{n33lc}nr{G8J(?wT>% zx2GVmdND5ZB=K{#mAxfJ|WmSw)T``*M$%33DyldycV;6lDeVB86+&eYgwkn%Pr$c-Aad&b7teF7t3 z0Efsmlsqb`uS4dFTm}RN2SWB435BGvz61yHluyXC;z4f9|K(~Pi%R%JeNNF=) zPwS!rXeW3`cp0sSvl6}@xNdn`0Au*!9HAVBml%6XV;4Zq7(2&liYi25unRZ3*$26q zoed4s)jj`8r>bWMpEWsqjZtCYLGGHv`(Xin6mjeVn#cr2{20@`^?P^JUF#l1HHXeF zx9fzfl7DC1W~zR-!O5vOG$m#TLG@2XoACp(!^_VHlF^p8318!W(;S1L^q=W~a=WXy zZB6Gy_gSXTm_9wb&TZ79qenlLQGKrBq_5dkV+rUpYRZe_2VXNfBiN_isB6-$!FXDL zU-Vs0IvuNUzB0q`!u2e)a~ztjq-LjJLl)tgHB}Xt)EFUq^`gb5SI-P7RC zZe4V=^r-X>EJl(Uc@#gd^d0L3zfoc2=ChQRmMA~dS#IbAcr;AIs{|z*B)>+L?V`#W zVlqu{9S{@}L?4%;jFcd+~<{nvBpI!I%5CChU$qpP8V zG<48F5<6iXDucL>s$6Vy^8qD-N~FMK`r*Qa4!jsq$xV*vwum{itEL%Ne0UHjQS&Tx%pcp)oze$kR;Yxd=r)LP*Cxm5V;_n&_B&b(J&UGT$BSKD5_hqkUR z`gOq|bk;<;sKP4VVM{g5b&Z zk)mc6yN}&!Q0Bb;&oaAD*NK$2nb^&z#VOe6eQ(Rze=f++P!Fd@?fA)Ql$Ln(n${g2 zO;cVCeCp?CMzV?9ASn+gi%PR^*Vp$WeRa%M~EBy!;OUkkF<=H!64)2u+`TI?gF2M z!5Kj^4x>kk3idV&sBUQk2RD0>`iWHf`q<$x4Db)pH$fp05n;h0h$IUo0M;NjMxnBD zN(HjuCVE>;Hw-w4FcjkY|Bp@Z=qo`9UyDSKLZbU4(fyI=7?w*aRAQ4I6*fv?M9+mU z{kOe`a}OMVX2_fDd#q6S@UYQ*MH?%s>O%nq_#QOn5SuJ-=c(#S^87E>pRWMyXq?+{ z_^mkt_m8QXREO=B|#JbW#S(I)T18vt}a zC;z-ZIdtq&r*V_xwr`KqVpvnC*@(RZdm4He0`YR_#j8#qD?fRX%uR20&zRf>N@=>C zz6=$B4|jE~lzmdQ7?ue9Hdruz!a5lQ|2ILB7ICrRVNp>~>HWupbnB3!!J=kBaM zj!hP*%6~ZavYiU-LM;6XC@Uq&+gbw6KFmPqq%^rS$3PE{>ld3%rr^?FL< zzi+e2bL>r?V+%dUHh7K=`O9N%%p(uFrqP38c$|_&T<{a+Vg3lDNYBtQ7%yWtS~3e$ zn1vmfg()1J!YmAIXsoKNJb9X!+$D-vUu{C*fV+|k&E4W_0h~-!Z2)J9?CjD4ce4B{7h$qGRydUhBDFN zP|XI1H6XrY$BrG^W8?TS)q4z8ta%n$${glHoseH7#yu-;!I7dO z9e>AaNgZ7`HURXKn2C)Jcn)F3G)vk9vk*nF8F2Ts+B+NF8k$NpNzpH zBTtiB^pf!6yC3!Xu)ZR%3Lesy5OjFeC>Luu-^FUBhR%BX?frF1(xgd~5|z3GZ+Gv$ zeY=+2vr{N$T0>BW?tN~WJbBWj8~YAS@71Mq+jgyzJNNFJHfUU(*JNTzj^8(~XRl5U zQ_eQ1svYL|US(x5FrjC{s6_tV8cA$P8UMx8n z?rJtWGNV*Pf~VAMVmA-BKr%FjQqK|>0E-5m0o-g*2?Pg-xeMDs5Kw_oCHcRfC`)F> zUytE`$_BSFhJV5XsVfovM`SAG0G#1Z2jvd=vjcP?e|qTiL$2q;(Sk>kY+`Rl(i@q{ zQdV*>)@!l@Iy(O1tqo`fL zcWJ7wJa;hedTWyTbM@W-KK`k5(+S{D$eIvgXz8xvrfa^ccl=d2F70~PmcIZ9)7wmt{SN%6VjCkEYX=T%993 z8}ZmDG6H>~r{9{t|9j1NM!s`aekIGVH{=(*hrg4=-ygAFzM_ktL7r`pU#r5zv7yX`0Ew4QRk$tiBJk0(~Le9I*OK6f4z*O3O>v;Q|wK1%}x)>F=4;R;`uMyd$z7z?poJ)&S1r%b9d( zHE<@8*9RGqM90i5GpC#T9p}D_7Y0t5PC6iU64YzuL(V&Qe$$6uoO|^3%Cqu5XZPmp z&HCjs-jTEO?dxPz<98^Z@|RQ6$VPd`w1efBziKO`ck_&r@oVNSG}A`UcrzM?YBs0Y zh9G6UbgVu+@mKY(+?Ox+CHPLkD>~pNThB?MBe+M+q7Bi!@JUyDs3F|TXO8J4i^>+S>qzyzchX6<9FdjgHalL)G99V^bTcP>l;;v2oeYc45zaY@ z*XHw=)^1Q{$g{wp-hE)z;Te4s%y|Q8q42%}4B>|{xYuq!$>A+i>+MvAKFxaK*RN291|G@7s8%%kpMtyu9- zuBl)B&s{eTkh{iV+B5x`@nX}kiL?oe!P);QD5bp zJ$RXkL3WRFM%hJQ-6fqLn|S;bu1TJqIUCVJNprmNpY(*j`sxfUkH~%L&ssIzS95Rt ze#H{W+yOu?+i({af3svp{Nb2C_h7X$h8_%|*-u;H<#7jluJb8kf1h+|{aL)&lw;=9V zL1&7ek8O9h5rQli{vS>GVURn*ZNaOGAJ+2EW9Q4Na=&^n1K>`qJHfQ>$0e_24dtVo z*@F%xA^Cb7vX7aj&s*{hH)e4}S5$1mr~3Myl)RJ&o_lU;W^8PP54(CxU0#yuqEo$8 zsVOfFR$Hkp)L81e>?XhXvB&QnKbST2OSYsX?1fvXk3KpQ+xlO4O)JPSwyJjCi$?ZN zqnCbG-cX-Z($pCaL#tMq&5qHVl2QtGlQbwB)~UWLaU4}~(ZgCW& zq-uir?eR)b=Lnw|PY5F6ohTMI{6J5CK-A#rAv{RgIFww5X^N_~)XSJl4LlwYz*F1T zUtD24!JB}0AyDY@|Klli&2T;ACTPMbjNA2$o8XXI<-w;awQIqy9;Jx1K631_DO9ez z;*Y>aemM8244&~4fkr2hUzfpT)PnG8=|#s)RODt%smfIEAbmF?u`2&i(b2PRYk=9$ zbo}7YUw!gX|B<1`4^g@BQT*i(6Y_Y;d-c8pCC1n0_i2w4!T4>ETJJR1SyOTie$HU8 zpy=p9&pi9=#Dv_GUtVQMoo}sjC>SqeGL&Nw=`QtouuFoa-gUOxJEhC0=hBJ4ylyVL zsKj?C(D)(Aa7&brmDfygNte#-zzebg_b;>ODzEr2l7?P5ERx_P1I(YDV zAq@e$Z2oX}UP=Nn9o^rpJdSr_vO}4ywJIg-U4~PMhy={+81)RG0NVJA>l^#N_~^0n z0vSW2jd-3@804w`tp(vj((7wC&|Qc+)x69O#jLdof&5ge&=O_w^QB0Y3?~`nRMpnK z+texx3J=o@ApE@(dUQ-8V=H{9Js8_W#x{|$^*@|@D6a?{z>(eia!6hN%Au0AX*S?& z$2YBBsMqA?$Gg+j_3)Ipceha2>>k7l(c8U-tj4=oCm7LBr8I^)TA4dn3C5VZQ~8IM z0TJyR%5L=zYdYma6V#WU9Hc(3rYb*BTGWa%oZ(QOOv4Fc&Q(T?P)blDmCBQv|FQBD zrA353%Fb4TZ6XyyHQOC#7oi%QzpCA=`G<$K2my~C7Mc(f5f=^P#UH=nxz~8^H9g*> z42w4^@!V^?VjvDmA~3!={Wse*zg*n({4b)UCo1Sii1cxCv60l>eg*}dF~s`a*B`z% zh|$ z*KFFfzdpWa&z|x1`!&Dv;-jVfFQSqrI$;aE_js<@3+gUqm*N+O-<$sdRK;Y%r~L7A zPoIR*CoznFJmY`4<`am5q9YQQSnaT=Mxf{QX0v8I=(<1{{6Xg8bE<3!S~@HzLoY1Gk`P2B-t$2q`I|K%&z686rW_mvm!tf<|w>cWN9tIi+YuyfaueLokS zUALm*_=*+f2Uc!bcVNfL14qAITvYV==lMIoSVexTB|7@}asS||O6nz71q0MBFZU0u zHiIO;Z6(_Hn zJ~PT^T4$UlXrH5y6_%7xL4+Wbye=s%Dk?1{=!VYbf05@kQdGho$s+8Ux);HYhXFnG zm#>&AxKlb4-{u(I^#!>XYoZlmzkw`%@ zQjmfaL?Q)|NP+$Eks=C_0V~{(O#{xQ`JqW=mZ@s>DvUV8lGRJoDI1@{T#Rr~C6)vR z{#(piI*{_$)pkz~hQnzu6Yn2Nk=g}m9q+J7mcQqr*^iYKQKk$^2g0LL+X9d!@(2&Jb5h(>)dRQn=m?VZyE@eF`BLH7&EE3?3m0W6@~=WWie^C z&~Ro_U}(6)Ji7d@-kvtJN3hjhXiqcWb+CXkpiTp;A~jribqI#PtN^@85{;7b)VRIi zeAoQ86~b(q^<6v9^d1+>xOrz| zMrJSX4%5`fvdD)81E#)0L&iIl5bY1U#3>y6C8PBtqcw$Nry%dW@yQ-IOkOgk;E^MT zat`L@9ie9G;X`m3><5BQInbPg2Y%bP7brT>8puX|-Mw=c5j?njdZO;cuHYBumlQ}f z0t6#&u3}mM@2~(n1ztp4(d}x$AmgD>EDSSEpbn2Qfy^j;r`zf}XZZf>*CmbOF3=BY zlwE(sFnz(Q2`qY#u{-QasXK{Qd%=o71VimP?0~zeS()Vab)9roxvDu^XP^-G!~q}% z4?Hk>=l2Ugc>lX!OY7?Eii6}k)qaV5Cq)~o4#5sjQ75ZWO0{}}`keawOOuuL@L@Cn zjA*B}#uA6yNO=|uy-1Bw`>S`TudB1v8CV|eRBxCwE-CeDZ}m2Hs=7@5O#PVh3xm~U zU0=YLIXH)a3K+Vy$;Y`SFv-$}3K>11c*! zal8VmL+TbEJ8=p!eNw?FYI?llWL4#9Dc*46#Hp&P^JlBiovu6yk!4TMuzJRqNUhYj z0lUI{!LSB3RSQZU)E_ZOYByj`@=!=H!pQ6$85R*09u^r*1(?v#77>vlA)$dGA>klU zqoX4u&~D)o;F)7$rP4!-7SVBWEfbOwV`HM=&HTS#)ibABA>)GZ>=u6|<`MA2=1t32 zuivz3;#W%fy?cwvu*8e79r&4 zv;3~~nJg9fxikECF8^Zh;d5bahK1*Jdyuu%@5SQ4j;Ya`59O`;bnc>+v-PY*Fb`i+ z5YpvmKi%fGO!jW``M1Dla+}k3e@I&Xx^opnpUkH+*~2Vi7xQMwlcNUptSaBNPwm{b zRqgJbC6(R!51o{$gBs@HB$(8X0(=QxUV(8*b*BLP!#R@iZ^8kd={h{%H`o@)>|KrM|7Z5ahndE+1d5kqKWeU$iH2ni7&Y5ZYs?~!L z=!KkKgPitY1bQH+kw@^run))sHo*hKnuq=dkj*TAL1a=o#q$ID#;M4EEpqFJ^FQAK zs_V?TQ-<1eLH=j=?Af_(>-G&>iOmF@89wBOapT54_{4KlpMCmK|8Uh|u7w)(1YVZg zfqR5cz4x};2X#z|@@wJiQ+}a!mu>@xjT|=eW|rycjx2Sa<66o}JcdI+>K;Ev(Bg6Z z@1urd_vP#BG+gkr0o>q$iOYlSQ)wwa)GU92Gs0l*`C-X&N*yl3gQ0jtgppftLd5*x z7z4a_zS$Ye@?qI|}- zV7B*&KAld&OK0xF%y#(rw1~P;uxHPn!m6lHYjz-|Cjilv6qX7c8Lttu8aNqOR%LS^v*L5}sCc0fy0FQ^k=9$^SlA68xl$XKf`r%dv%suAG#edo-Zn{K2?1DeATJ4V`W8E)g=D= z*Vh}3myEUs>!nL13fqiYV9S8Or0BqimT}RNOdXd52IJ4)x-!cot|;**GovjLf2SVr zSDQFEJ+Y^6)S(WihpgeWaFAR%S|ShTYYb7g9eMr$i_j;0h7d>*JUA=APkDn)>RarZ>T_Ayll@sXHy=3JX^7K?wXX? zokES|Wf5yKRw;ISwuv>PXhRkPg{ZPHBgz8Z&&jFik*2&6=yqS*w(93~%in$H-EX%38au=diP|i$ zjJo`TMb) zYt}1EPEn%GH2U2B4x`8z@34POWVf839EOVrJ7`MC$js3r2lndFyG@7wL%a1Ke#0H3 z@4Ag;VV2`vD@8iRj5)gT6)UAS#Zn1MyefwB*Ri^OEC< z0)mv}rdRqEpoWLx+@xf1g#irO0A@*y`1i|Uku1ekBWN|58mziSr6m;EUh9YggoAac z;ty_utf9zRM#S~cmi$PX`$_xTd7;rU$&D-Du0Kx9LkJ09V#odhdCXHWa;Kju-}lz1 z%z;N8QSZP1{(^OvEJGf;_uhLeD^J^6ST2>EXY>c`*fBtB(Xp$tWyP1vKUsO)Zi|m^ zlX=sPx7<81B{q7?0jc(Tw*c>=NU6>Yv0vD??A>?Yjfsf~_qt%OJq}@NPNz;eT8u<- za&s{r@X-mLP$ZYdxF|0_=MWti#XVqRx^ezDfePhLxF6ZlXfc7*Y4xT~s}Ja1VM!I) zWKjm^3^&S?_QA>I_k{-V7Iie}~`J|NNc89)QMW5j0aDnslYFu^g_OE?T z0bw+5+r(0>P_SySxu(X~w}$BKV8vInx9=Hy_as;RgE#fZ!*4sA*`>?bvt7Dmp4IFO zS4l|$J~pYyYjYzacq|NHB1tEu2CpC9F{!(I^(U9x7*892=iOpgfRP2LNl;B$R)Sni z9>9s3u~n2gI?a8H!QLhoZ~~}?dWS7`Z`C99{%wnFZ)w9Qsq!8^<>UN2iAnnw-t$_P zk{2Denojmzm6e+ndnZbQ7Gntsnn>V8Syp02Seqa^H9Ph_CJ*o+!3SOaiDza@kN&B( zG_>dlGx|uL-dZ>{U7V0gEOel<$J8U@`|~^w!otQnlCrEn87rT3ba6XF*VU ziH+F%nBTO#skN26?42q**_F+jorV^bF}KS~_11EB10A9?YIdPK31zB>hUl%m^2wFf z{%QPCJb-@*Qi*36&!K1h*=bovT&P?A>uKvxkg+}r;7G<_S=&7RWb-ioLCJldzB$Bu zNXDJ|_!oP|znJky^ORv~p_=vaXY6JC**MXqC|e2Ai2D;?;pI;*>zQzLWhAc$fU$F zP&IqwsYPpcpRWzswUt~2#o{oYv{+8^M6>%wkIv}bxpVZ0+m#1k_H-EIE?kHcicdRm zYBX6flb_ulp&C8+kSCfQl(VESZ=QT@l)Az*hc8&Xm>c&=)CWQ5wX7G#5Q>M0q6 zrvl;^JdPTg~PMuC18ENG%UYZF=RowxHdbabC?x4ujRiJw5GEJ4a|nyji#@ z4Qa|k&7fB5u1eK7L(9T1aTc1J6PnRgBhk6@aPaC9B{rh?EB^u~q=({5|FqUMAK`_b zG_w+U?9q$zy%KikbhQyDWvskbn|JG=(3M!OlE5*c923gwN@# z(+0CWd&<4QorojwF5~huVjsg2tNi3tZMJEHNjqAjtXD^Zg zs;Z0B-s<_Gb!l8_Fs3Lsy@5! z!={-OX|Y7=Gb!5MyZfluO1}WKur6|8F<_!viw(08$>o(diI0WT)^(UUT zNe+5*zqMerS|CMs*(~cnqM;q+SWuKiKiMzU7f)|CRG-@Vi{7-zg#7Y06{k;!Xm)gR z*sVbkVYiOdo8~I^Pr8`oZOzl{vi;S6L@$%whyUMmpR(uGe?%=`^+A^4pu{%IM7jqe zMVD=}3zzEgt=7WZ12+5gnd;MLD=RDO8fr*EItQg3Orpn5oIH*9@9de=Cn4ECb?QXL zsgoy9o~}54O7q@d8ITYY*|>krVpzarN<8cT_S*I9VN%`q+i%;pZQr0Qc0`REcYXJk z!57QR%P$7E?0)^Yn3#mbq&98Zq$I}2Xc*(V9P7VCIT?*!Pe1;^gomDcNf0G-lxe2t z9+~_!s0K|r8&Fr*XoNT0+t}!10ZZVe#Psgfs~0~#d-Ukhy+`++z0wEv=-GqnD1CbO z&H#%Wb>>XP(fmWZw{PFR`%wPTiZg^|%JO$Z0e`5lLW_wAYk}js1xdw0zCJ#|E#ks` z>~@PiG(I6A0d|$Bu3g)urluxP#{jPC)OKCFcW>V+5l>Zos}9|-(oAB9{~2Y}NA|gf zMI!SJe~gECF&527c_AZI>QDd=zsdmt(0yjB1t`U(y!x7@A=_|@>MPkE>9TO-AJMoib`#gA zEYhmMp9C+W&N@T~d%bYAzxYe>*%G=*N>llW^h&P*|3Mi z)xvWo+R|d<&fiL*7xqr>*RNlsNvVUQXaCxNP(|@`O^Is#_!E=vzWEl`*FJ+sUFTyD zh)GWB*neDhYTMzYGfD9pEM>SygH@|)4O=H zKO&>bswRkW6K-aIpsIe#$bE zwWuiSuJ!5eG46@_-#c`eGV6MOhuVLfLm8(jL-AuRBGI%Qb8ivprt2~E9&wHn|VIyG4JQdX$JJYS)DA!pOE zjJIy>IonrUCTs)QLy&I%{^mRnwlI7BGq2GNM8d_DwZpBu9_T?w7F%R6xI&CRVNhvOV@Il$C z6dyZUz7>42+ihm65*Qv7mfG}6pZ|f#li-qNCr@ccehiB3z=8by{RfH)(V^KShG*O6 z&Fk6oyRDQbUdY*Zr46BQ~hiX|`8qb!hj_(nyOW zag&>(yrQL}?&hoK&ZR5sDb2p|Ffdw8Qd#-YY!%R`>Q~>v*S&DCX|~A7pdkJFZJOWu zeGBByFSh`u!sa{IbivSUGhb=j_QP+J{JD3{_)BKyFSOu2S|E4-o$`_Khuz%0X3Si@ zI#aWqthjvlnm)XmEh_Tz{Wrb!zhD1@Z+30;f4Kgy`t{S-u1){{;rdrb{__3L z;Qqf(^xXf}fdjYd_wO0~@05kce|fH-xpuAG|CQ0deE(O*|9AJ_tmnh7nwz$QB8Vjb zAmOIA;J?D%A$q&Ib ziqdRHi%QE$>?+4~Yr&m|LqoKC5NX_GX@w~%SH0^Q2f^@&j*$4$Wekr*kL98J*x0tC za|Er|P>;)iB@sS7qQpQwwBW>0NV@iIrx?q=Bh^Zal-501c>}v|`+oJ7ojZ1H*_Jlw znqkBHcId1H?y4!Jq(A`ipOR?zK5DS>jFo zxf97tjur13u22w5dHI+#+$$@#SU18YgW~q47cp(vF1X!JsAGr?Xc%d%tVfE|R zw_oR&V2jgWwAvi%Z@+0?zG1CKJoX~=g--RELFu@!UJvpQ?|$u&q3M144I0p?&$5ik0L$V>2I?y6jL4xTl2>V6gztLa$d}q&yB?s!|fzp;#TtIj34zGmYBY zb&#bCfhoFPeZL;$yuxo0vCK6*q__o5Bt5d=iKr9eR~fVm#$m(>9pxX za7aInGdi1};X3K=vkpgmIX)8ij&A1f3_Z_zo^9i%m@BS!Ft^|+i`7~4qCGAS`S~b1WuRn(CKY#>H zL4w90L1Va7TWCzMq40U+eX9=qvhRHZsqY4ZvaeUKeaij!XSWEh+xoU6YsRca2fX?}{rGpE?mlrYddTp> zBYL(N40%qGGJ#}|Unr^M<&}AcR3J;ym)=V~W1kX19gc z1WS`$T}W7DR8$ZSXu*u|h)9f&0>~8>79STG1yf}d$?1RhtRCGVmRNrz@6PzkyFrVW z;s*)`&kB4GZH=>Mzkx*>l%<@=4i4gNw#yQalt22e+1~b=!5L-S_iQaKE^r8*>0wi_ z|A&jd_3a(jSh=Cpua)^!-im#V*^0Hjz3k*^?=IIoltC)SY-NTzY0yZ*8rzxbOO-E_ ztxA<@hOX#3br?Xs)`3?}dj5qpB`ffm7Y6lh-#V$=z@ax!dVcDow>rQ?-ty|}3*Y~6zBX+*pWjYS z!^_|ppL9QHxsxlnCgfK@~Y@X+JAbkJsDs*-h_$dAG0uzCc5 z>7e-!xcS}*_uMgd)Ue^5`u7{quba5>*n0a-x;@?3YBR)j=z7B)1KOr)Szd4xyQUTj!NI3a)dUSp8|Zh=Rk&f}hTo3ZYg9;_hA~J_fUcd$px!~I+eBW!V0ibPw}$fa z9{x?>XQG6G?XSJ{_PlrBa?E_?jX6#^xfcP4(`TJwfu@oYlgY21t>}f)Y{kW#t{X(LnqMqNK~PoKaSwgDhrZlH6c!*tB=bTChw4fN*&?N-Fi*=u{&9pmF$^#Y z(fqir#pZP|88rX99~ghf9XAgj*eiX=kde1ucW)f870& z>8*M1E}Y(Ux*8{zkIAkkCPX+))UhZ$?ocBWv?vU71VogijxQC{7Iy=Yz~bUk4wZ<5 z%gH_gTb+g9q03971NVUz1v@%Y;RlXGhn(v37hxfvBrxPZAF4}7Dm~kgp6y4^rqZ*i z^lXTPd?k^Rsfq?iKuDn|(q`bmLO>n(X;7jno5pM)SG^Wwb_%m^iHc%qCieAdw#;EfU4%cECtv6#M{QEM^9aInzbY z^`lq(1OD_ik4$B9=MuNLkvnhBu8o6Kt)%EE*-_{zPznMoVH?GD842kR(Nv5Bqah$2 z9U^WJXs^CdVQi>c+<-eCzHj_JcicUqb=y|Yzxcpi!wIhV_(XQ>Gi<`tpoS~6{fn5fsrM0tER7*9S4SOr8e4uKkS6+Q3D@!VEykMF>YxcXdWVN}Mx%Lu8 z5$7)d@|*8Jd2Ln(ef1RZi+S(NnFNVm$pWJ1SWt5@jKy6+C2#D5FKFOKb%ezNRT%jGd6}u@l@* zf5MWq%*Ec%K&Oi`Z6vYz#WW==J~{Q7(PPG3KYG-#8%Cs!x%;kLhYe2cFz(5z4~^{K z9`H;}tY$cFG6}kr}rdd#LLTIat0Tc zq;!t6K)2sxS1vHLvj(rIR4q}=ABKvDtP@8V3syu{cb4>~xnIZvob6fJRjLQ#@{{>b z-U;I&>!nfE68%{gWGxrb4+^j|9R5-P7Cpd3(A9wde?N<7MuadUy2QgJ9d86npz>p| zdy6G1u4p>h8?S<3tYk#8&CF2Q6!-)M+;?A6x9&rF_3GHAT?RYOY|1{oH17ITO))uG zEM1VI8f^(o(ry_U8F8R6-kK}AV7&E>`SVvSn7d&9g2B8LcD;$xp~`Hr2o={fwf?-r ze7-tAFTY&##t9&ZDCwANGqWD+(3`!z;GE<(o8QwVOz_1cxR3j|k0yLEs29&&7`YMJ zqYJib2;pW3ND-1^R!BrjZmw{;s1+izGz0Xpk`xj4@FQ(o_3C>~m&B;J$VZ-f_JMIX zw{J6Gz=*D`ViTjEn)=d%6UKJLIhpO{BGcYzigWn3=*qpuC@zW%84Ym`a|^lm7$Q4N z>X%3W8sUDOzmK>dQ>QuEavC(vIgBmZ4g6eBM?8phWih>hVRK%eJ6CkvaHLu=ubjWCd|zohz!hx5Lj0ZqLWpX2%a=vstaOWVA9?&o|)B%&yl+O$qeFh9BK;{4z|2f3D%QREnM%wgfX}S)OH9;Dbt+;2{X`HSE zplt|fe$&&+7+RUq)MqhBK@5@`q)V;{ZnNalc}=1_JE)=VhMRoNp<#(_Q!HMI$z55q0F4?qp-G-IxH*VRoC1>5b^-bctcFo!~C9{Hb*%i@k zmR-8ENqA>VTRj`EX5poCn?LllyEp9~OuKKQcYD)ry&}DsVLX_2d15U#y_7DOl~ZF6 z!8ZYoi5aiyyC_Xoj%@Q|ow^R4@InUG?LU<-LT(>>`|bBU(kbVNrR{)Q@4q)GAu=kq zgBG)YKaBULy0ZQIcWvIhdH??XHM~?S_wWBrUZR7hm=-L2Z}#*I4BV&PPnec2|KK$Y z{^#5e80IgSDenTEHN7%z$uj&oFY}d$wHUmk7!g)$U7bB7SnA%&OPj5(&QD%yF=EE? zV49bTDc|%`hFI1`?ntySpgK|3n!Zs2)kPXe3^FNmpKc7nkt++Iyc98zrSRV$=;__p z=-s*W?p%8JHTvu|q+%dur5Jd+DTH-IYYHI2mcB)p^~Hgf%8u2uG8dKb9cerEI=k4S zqn4wHlMIF|8q!J!P_StFUmo=UXWe)*c$-y7JQ@5kOgGl?4%-shmpHAGQwnK4)n*6Y zaQ&EJJzLgY7(A?ZQbbL|g&`wCC?tNt=pWPj&Km}H={I=vb=S4?i|aLf)W{LTGrG0& zfdsW}@||~09N4FA?Zujl7ChXk9Rt}Nv~RfD?@?ztCLRxWm3TaU%Wb^u@W4)QbXA|L zafW&6Tjoo$We)OenfD*D*U_fM3l}W-$JaZ5ef`Z(zgxX^+b@fk?D-|X;ArmV9Ur~D zWciO@eEG#!-~6y^^_L6h&6_)S&inuRVaLwxufIKS{yX!QEnBy3+iuI=?W@pN{<*=Zl~?6f08&tIZV41tW8 zZkS`MGf+p5m7yRQ7u~M%ctNeuycK;O%H z!ITo6tvgTnt;vF_%vSW8z#WVt@sk*g+Aj`xI<`*j}4iv-^qHGaKJ#C=Lqr&!` zm++AU`0^dG(folsgPicQ_@NI(hW~$PjBW+ZM9$wv&X*zQGm-O|BIk6LZeWUr5;Qc-`o+&twuudV_zjTJ9NpZE#pvi%hL`LtX3@?_QIX8>>ai_vRWK z&}6z>Pf^WY4q1BPx(r}Tzj5E0tM~Nk3_PqaLc*)P{pDBb_%@#Qf90JImw)lqj90!{ zv1!AmpO$_p`_t#WXU+Lw&YN$%J@13>W)0zhshD(mDObzBw6uokQLV#lRTC4V{T(K@ zb>c3FNzeu&_4?LG)-n-%9BL#%l#1M6CHTCMr9o_4`3j$)pZHZ+XcBZ}iDm{I#@Nv( z8ku&U2U}WpQ+D3IEcK`~jg`eLdk?n%S2-UBBJo7k$UY#dCPI&?5Q4`_WM1Esc-)8* zaKlR>HT&c%eBv($@TN(?n8E+=4e@C9aY+1hBz`s$KMsi>hs1Xl)DucV{^Kgu#a{ZQ z$sz3#ev$sG9n!c6FKcn}NxPeT()tEWH2GCNY16g0*87ajgvb3|me@CpFFu&jKLdav zgh-Z$?i!Fja6o#$K}^KCeEzyJ*LugOK?C~?AA8s2VQiJ&=4&&Q#|W3q)=%)R`ov9| z;Sih0KYOfenmyLW>H;MD_a18~$BeJ9T=Mo{wy5IPoWK@hsycUGCgfe9mfSBOg0H{* z+I)<^$@+(r-H%x2zq9nSWs6^bQ%XhNN2>JfX<38VbRftg)T(oVX1L(x<@mGj`h2tR z+F}j(lkd8-V8iH6`OliEi>T<6CI_}hP(@O)j?gPIDu~xrT5lRYY_-XUt#9*14PWKM z?kq4fQMS6i^@yIn{CgbP9sv{yyxNidzg)m0P1hq$w;)Y-BTdaLUMbL_;}FVY%@+iL zt~5oOkSBRhEOG9)AYYUHvcpS6g{I@wmPw_jXQK)g$;*SS15-dt7~ML(f9p0~+PClC zXV}==?znkKpQP|Mna|(WYDA{Du_2|?@Ru@ibUXPsTbZh*46z(qv#%<`R}4INP&fs! zVtNef>ObZ+v#1ygbWt~GH2qb5Xs$e2o8PL@Ym8O7oBw@>I9Ly1n^!x-Mr7&b?z z+WgHil(GEFaSRnr`TKE{HSd2KOViY_TgOFeZT{wXS`q>-{hcul`O}!bGi}-%e;C*3 ze><)v=j%>tZT{xi${7FUaD@JOd}TG!+WgHima+byPV5=$iH!AqjP=Wm^+cxKM6`b= z5knD9+y}Z&LNn=tipCeEFArucpBw#KvTV#*T|f!UNZ`!DXv@ys8DIdc!?Xo# z;xgzwN)FY~sC63NFQcDtRO__s9)04eC!=EGdk!Bta?IFUZ@X#a$U%dz9ZHB{Nc+@l z?|JB!P6N8JmvO~voq$95AKtZQ&6>OubuL$x!VbN@u?}}8{C+Nr-?7t|>VI0hB`3Z! zAoO6Rrf98Zo&DzA`AZkgc*7y}J)c6QKX3kY>9Q|Ant}Vs^xA8$y^0^{Wy6d)6nvb5 zRpLOWzigQH>3dRcYzpdqvSr%*yk)|LvP%rR?V)F5e3d4*lP+pn0 z@#!q@ubHnI(58beP9lgwu}9CkR7aT6*EgYm|Ned#?R5l5ekWXn0tzK&KGMoeZTgnk3Nv?^-B!?oH?H_d;e=sqeApX1)V*6ipHKx zIS~*aA0Ob2^7{YSd(XhAs;!UvOvy~zr1t_LgoGYK2L+N)r1vJGSG*SN_1aJ$CMTgN z3IZb71yn>pKv0lgRGOhffDl3|Nk}J!l$rOp&IC~5dhheRAKowTnPle7nVECW-h1t} zSN*RBof?5fnTCm0B)&&9A$%Fxd92PN5z4F(lT#zWZ^EKQxpB5D4aAmU%$udN+0_V) z$#7)RWW?==_JaAsH$W5}m^6p+ME>mwv_6-b>7n#_D1CmTf}%yU#YjE?v+-hviopn3 zbPmuO52oH?~}dC~czuXlXC?1!Qgd{pdQ?Qhi) zQf1MME)v8KMP_GlG6Y-Q9E-zNC<+;no<27YYxiX4o?N*nSMJHopqilr17nS2$?$-) zaf(XJc8X!){$CfXsCy{}dP_c{&G?kGGq`XD?+7FR2C^qK9A_hoJhq8qHs)>eH3bd2XU!Xu1YB%TlK3=DFdg z(M*4GH;zLZ1&M?*h5mhktKX1N+S4$c!HCBcc+z&^x^jdOgS5ry3!4)888_&86_zTZ zrY24$yEA3sPW&OGKvQCiQne&m6VV^zB7^#jdj4OpKhrnB^}z@G#Q>2S*|STparX=y zO%X(jeDM?*_^*FG{rpoeKBl_H#+H?pn?t(cqfACT3kYt}BD!6>jpDX>x$)HE_&~Mc0K04y=0F1#oDh|tzNhG zn{U1ugbn%);L-@b;PEO6jQPa&)m$ZxKQq~{zPk8oadBaBadv)s`pM(zl}@Gxo7JhV zt};D6j}RFs0EUKFQ=?>wj1d4wS*TzpV#|i*F-LIfT*}izLXkr?**n=?a-u~hVMEt4 zAf%G*%Gz$LvlAl29Dvc`>lEI>uDI5D5tY>fRcS^Y*q;S`BjA5=Vr{&8GTwq4)Q|D* z$$aX`d9(bWbQw8jaL>-M z6cp?^wD*MjUi{aqkByixWb%XxP!xg&5bTO&&1Ucp_VX;b>>k+ep27DFR|>ACD2ssb za+#Yv>$7jRZ29@XMyS{_f8K(3=FXeHXyvT=%h#+?gHx+3E0F%L7L+;#`1c5RD;K<+ z(@KxduE`;grx=6?Z^CnIXoz`!o=(m^YH?fJ2;-sVKw-_;QNM~tM2}3SJO~<95W7Hu7Hkl~d zmsMx_PuMbsIW@{q2Kw%RU|f zeLhWWR}Y~3I)mmXW}QL6V-JObUpJ)8NkVh(%;PRR^=~MjsWJ6>@6guWI<@ejsFjC{ zOIsYv?V1NvUOt~*V#h7qSg+StQO+<=jrnl#mn#>(w*&;04Zr-lfA)LJ)^6LmXWw^+ ze%rZY=iXJzKmYu*BdM#t`|?9IMpsrMWm2;*)YR705mhV5)}?^Wni?Bi@if)b>n<*? zE>3EU*zROlG`SasA&i0{%E^akz(m+&a$LNmI`bR;kZM)bvIdzD+J+ejN=p{2d!rfy zUa1IX;s|1_ct>0XBj*<=4mbwYAstve|GU#^;~Bwt-ta2kib{g6pnk|+Ryc?s7lv4# zL`^j4l0Eekr{0&?txH@~r%s(ZhXUkjHF{9m4h{3}KCB}xJXFs^OO}4PbnTCOx9tDr zm%TfFIiOlCdh)o4Y)Tis@2W&3G1(wms8!ZLXco7ESSf8N<&E}aQna)+kd~s1GTZ<1 zxntFkx(m%C99diBnu2Ff)|O063CuN1xgCg4BmfnrqgaAlb#LD)YlLil;G=Ai`!GqdI zCr)_s-hKmm`_)-}%pos@`S~Hp`v-+abV~Tg^V45O`>EGa=2*(Sx%#?YDX2i#`TmE| z!+Q1Z9qrm!Te!n!`*|B);a~PwiKXQ97bFeMpZm!dpRf3I;rv+xSz8c@nWnFy;Jm#G z1?OcZ>ObsHJHP+Y%AXD%J8^zny!|Ejk5+#5=@(yoymZl$ufP8=Q43XjN}U<2b9S?s z6)+vkv$Gp4?z)HweW|emoVKiLi+c;gHafMBX&nXSQp5>?~r=1L%qh$X7WXp~{?;brW&Zm8Rm;Nuk`o<&O?(NzpC^)=B>uzyQAgk3*fylVa0bw6(W{PXwvYo__9?T@=A zzqjz`-8<(kcsF^L8cQH~b!7t=bZ-XeWSh2a!s?4CCr+bEb;O&Oms82}-03r?&)BGg zS5i)+=o1($(@2;rv7L0^B~2q1NrYoSZF58f5sl?sQe$Kx6ah^_bNOQv*Ah!6sxhl1 zA0xi9ClPHTov5*deZqB#l60tFh#f@hiaIPp3CFxJz(X&5uCXo8|HCyj%9sNGsDYo^ zA`Hl*I{x(?+RIqVVyQP{w~!rP^o0MJ9S6)D;hm&7bF4k|0rm5QeH3dba|(bmUM0Xr zXp4=7N1oV259TT~QuOGi2SLn^@Cb=9WT3@zDt)CW10>cksox``dq=i>bo!Ku;|8{S z`l+#fqTSs`jvdgyZ-?k7pBUD?g^TO(QGNT8rvLo2WA2La@ESdCQ2#!0ecyWP!I9m< z!ykHNf?@oq_)!T8aFcGYOdB2-_peu;eEgqd#!FhqQn%M09@V4Aj5nWo`mxDl5|ky% zBG;FvjqK9xwHZ%7{_q5~pO25ew5(L`;}cA+$xuH(eQ8Ol-p>#4*Psx0_r}7)!bW#@ z@Z(t#ee^|zMFh%%66;5*qVDR|t2&=ppI~yjLcP3feAVXV1vs-`z|9}Zii)@J4?hf3 zl@ZYHbPV}VYew5QvFZp!E}}J$=i0n$O`11<;j)hwzW@Ha@4UTu{`?O=`fT~~&p!WX z>EcE2q|ADo&z3G+v~d2sd9O5`=NW$c8fSS^WE{dlUPgF%j$n{6#Hx-*xS2{=$TTg4 zUBcMMb9^X5&LEPHM)R$Ec}zm9l!_oDIW~^xk-Wc`P-*$y{rp~jH_1L;)$BczS*FRJtVldzX2z(DmW4HnSBY%WM0+Nf z@FR^U%`^$2a8j{eG(D`sp_1f;OebcO7@#?#w0ErREbrKxu`V+Z;J5KA@lZ0)*hEt0 ziM{{LnYEcD6>i2eliD+rJh_~wqPMs!X5<3q3Q;|JgS$3GsKqCf;nF-MX277XwK+vC zVwyF$yVTSuExTl2zSe!{m7Lm4Xa7qVyR>cABDP=5^@2VURg)AMot}9$B(m$VV_5-F z8R=KZb7^SkXsqtryJvLXiS7Z;naBK8CFanfV`q-#Uc6jZtCSX(+VV+2EIX7+e8;*q zTlehxVdcurKWzPe^R}((ckWsjQ!`IG^YT1*tzW2oCq3XjPivErHU-lrciLp6O-7g( zU8&2+v?}_q1$+NTWsSah^UqXsB%5#nRWTo0x9;mz`*s~t-_d1dT6I2NO_q%q&enr- z#IXv+aJCp^d4JfX@CDXcmK`GTSbyN6z^>xCF=#q23+b!`Wu-F9>y5|n9o)|&v!GoeOXi6W8-7J5U zc_ESjnZFW6CpHHULQwwvHLVW?Vf}`!s0qDDY(+3tB(O{1%0RbHm4gOs-o8O~>XUik zKna1HPHu|H)77H#8XXO)mH|QI@=RH34TuBY+TP8x`!PadiMn}qg+~&_8bzX*rR%Lg`QmD zW^^cD(^h<%;VP>iwkdvp2^)}*2RZDXshRD1_U$|J(FX=8Gx5&OQL@N1JfqAq4(Z_U z=`AD10JVvUifYxWecQGHwZEsP9zAhNRVHX5wr}wKG}_i9Fc=A|9|OagObp~WSkh#K zzK677eMU`YG-Tl3a~0tQ>lTf^Y$d^!d9S*Px`K5J@yYwfF-Mz5(x%6t#1trTB$PN3 zN^A#kmW)2(B3TZYO4Z`i|=r%XuzUSOm9{ZGB}?6Xfi^1$RNBl^WB5Roz8{f)=(>qfPaz}AVQ{z3Ti zET_KRM-S~QnMg=xzq%Ff9HO%~3LoW!BLJW(}}c~W$C!)ysS7tOPw!Ri8A zfs{lpn%xfYF)_7z`C2?2Ae+LdWv+p%1L`c?T8s&-q5((E(?%S&Rdb*okecE;C6z=b; zxMdwXaVn3xJusG8*3>kT|EsyHnN}hMvY{I@pa1{1LNUJA^`K(wua}qAQZ%5hp{}OdF|PX< z&8=G**G*e@GRBPSuO$YZy>RdEXOH}{cJ->C_v}ANZn!KME(#;cxV8!QWQ-Ztj-fE< z7TpKj)jTpfx^;)RxQR|hCxhTo5r;>v!fUE-W96v%2SlJ6Zes!lsdi|4P~~ zbqah8i*?Sy{qT0?^xcssFQ%}*;OI@?$4l0OlA zeSL~X@i%&=*B^W4b}IN}30HZiF zUdfT}`750`y61LUnQ%}1{h)QzJ%6Pat$T!KeE&={{$9EuC0e@YuXIEAI5>jzk4SVq zn);{nohfI>p0758>~tr}*|TTEow(q^LkTQo&p|hqJJOBa?t3Se*gh&=NzpwI>3n%d z(rMMI_D-bJx8Ky^)Jwk;=`=K4yA$bTU8u-Y@~m3!_MdrWy%g#Do8GyddNyoc_ALcJ z??gRX@7zv3$4?}%XunGD_)G8HMmt*X+)g_K?oJ?7O(9>fDCF*;3_2c_NZT&JY8jL6A~ii)aAc2K@cOTfA; zDp9JfZEoj8AAPiJ+0vy;mn~cN0o#%fmMmGq>qj4{p&@}j{^8ww-qkw7-z}i{!o^GF z6~(nqPF0y(H|njH^6S;b1*J+v+l+JBO0Kod?X1eh#ji#C_U)tG-LFxh?&#hvTej>w zapJ0r+B`nKfB(Tl2M-=PBChw~sjs~J(%3g2fB32Ar%xL*+~_gx$v1`|{LUtjOC{{@ z3o0n)gNg7lgOFxzb31qQ_6}}|VBFTX2x%rc(PVMZiwlf%kx+aae_ zmjY2!T2)*^Ew7xM!jowk*_6{Muh8o&3j~wzcQ)h$V2~&asDft{`cc-ug2L|t8;PHqr=z$PSXGVIRBNM z|NC(+EdrCXxRi35IVGhfxtA&`{(hYQI(h$ooVzlg2Syk{zAz|lkRk?0XjLPz8zR1l zCmQ*Fq>`)T@h{J((FRXz&&S!z0f}uLuwgg0{#wu?_#ZR9z4#$6MbT-+5Xrm2y1R7` z=*9!wO$kWlEoRjcL#+G2mlG&EYu7zH#cN=A&bFL^j^}OLzWGMcPQrblwaO+ZaXGqi zYh7I1E&<_`AozgX#qs68PmNb*di|E?6X%oXc-HG9BbAZ1I9n(FCs;Z|EYo5QjenVHn{-o`K&^wklcjRA_bRFL(2s#~6br8H2uz!DA8n za1tA&-O_UTmv`J49Vsya`Y_DMt}d>alr3cWVg%lG0o=C_46+3uvGbI~wIa_Hq_>cR2`wp2nZQ8VZ6B63Mri?WJ$JUz7 zE&8fT`xwv4>bmAVMm_!bqy21ohkid^?GZg7zE8lIK0V_H-rcQLL|Rr%ztL0gn>gvQ zmtIIvwkaF+&nk~wV^dR$6#p(=y7((aAWhxcw^)3Gn>A||>E+dCJP*_+PYHwtR4jtOGZq=$)>-O!#Pun^ zj5}8f3aV9|&YvsK)vlz|fjRE&+9E&+;7Zr8H#S{KX#U^ea@Pr+1FdZZ4vwehO%yMv z(qT}1XDD9k7j)AyKL0@RZdCkB{XO;6nbXIQ9d|o-_Vg+0|7p+oZE<7B!yMA*x0UTi zrfoJ4ZniaB{yi+r4S6tS+&TMp%W$Hsc+H@&zO7hNqY zFDs-(Q%SMgAL&F1G+rwxE^V}gM@L2Y)fED3S84DI4-Irv12=8jahOWpm(LyEvFVTd zP5mJ`xIVNlg4W&0!F5Zeh+{F&p&Po$}C)G|DC>LhTuG)u*>NOSb10Uv_TZy4Nc zOBRS(GTEgG!#lVnPD@l)Ac8z$-nQ*>pFV*N+uX7)ojZU2a?X|F;;Z0S1IUVL&tMQD zSKXYimsei1S%A}x3{u^8?AUei1Xnt7aMzBe`wQUy0-y|u?z@2$>YQD8B{%bYIj4MRKXCulM5RipR_5uO@88E& zTX5aHywD)k`RJh|r%zq_c`rvpyh1_(n>7mzQJpXR^26J6Q@0w7s`GU)r9gqJzJBXo zVi-5^1xUZNiqN|fIqX=!5Z}zM1l<}9nA2T$?c6SRwtdGg#p^9xvFmu>z1Lam4Q zw(Y;1RK4nnhRVx3M$pu@w7k6H8gvWyi9Ot_YX=XRGVPIPUVI@@*{|%yI{uRCb@5`M z-peaAA|j$0CNzD~Mb+hc6=8Mt4TOle1c!u$HK%N0XfVnB!vD2ZQ0H1%THjDvSyo(p zz1Ec|yld6fRn^yPt2Hho>&e};qc`oin|9odeivF?QID^tx+p*E+$m5tujJ%l6unwj zT3u6Hq=ue8ee{<-+c&LSw{Fw+J--}1ee~%5-9p`mgSs!%NnztDk>hu%vEN?A;5<=iqR0;o*Jt=TpJsVQ~a*0ZeaipCE5% zvq|q{5t&yF1-OG+lO@8GbaPtG-h$zP^P)r~+6^^=5Yu)iTlS zWAbc%-Pm@*^G{Eis8W5UCKnVrq;Nywg)G$qL|kRT)xyH7#buRnS4(lRV&+QH;}Q&I z@oMCLORH+Q1zSVCYH4h|Qo!;AT3XGuGHT3eqc6O*6L&7T=fYd1s<3OjcI`WKXy36@ zr%th|fJ8XKEcnx@L+g&#ovu4kcas0q`pEiz^-tG7SO2&=RCiq;rgzbs_3n(^dEF)6 z2kEY>12K)SBRBaAFj!X@ADc90+;{>3QxELfdoXqPo@4o{D=DRm7A;=5V8Md-RaYG5 z^#TKcpImi4b_{3F(cceKTJq1I{9o?7VOPU14SO5*@xQatr!lti{>BFz@BRPaz6BAw z4YN(q3vMDpJBS`XJI-~WTrOvttE;a!P!Q<4;&kHhW%s`05}EyLoYp&x(Rw_}BbuRTrxn}EtZqsA+#&ZABKoq{8}4ZJsjgtS!E zjK?)cvStCcG#ja)WH#d|1(Dg)>*f)yhoiXDwopc2C_}1sN_4PqNeOl5ON#S~OG@y3 zXBQR~p1TS=gk}v&<81vkA{TwL&;62GT5;{d?>RZEHZNPW?4wV%DPG}-EhjfH3D@MDAhrfSI z)PBD}zyzGoqd|*LF?tzI29G9NS9{vrn>M$n&4i=yk0&7`834x5+}+FSQ^SR%Y9VS{EdeX9LhO29Cy}er2o6^JvB$xB)m0!>@DoE z_D5A;SJ$%alB#;8NMJ9j*FSSG(2cXA~;#UV0C} za7^wYUe&7agMBUTuC|P!Pfi#+cIy4ppMQD!(~rw{uT@+=eOB@DHyh{(FQ2N0>L!k0 zq7QE9B!cr~q7QHYDbyV^;d>_O7h+QtzP<-Pu&RKLqS1@ssRe*2Qb8>4-e0G8{6x>YNvEv8Gx=lmMU zm{03Au2J1mi;7a`7ZuHySBiiDW{WEffiEXVbrWA*T`4KL*E^~@XA@dMXJx&rE2~yh*c`vO zk#XS1FK%QUm~T`LAajL4B?4yks(Hq17LVTTEMcm-p|G%FXkp<{)%?M-OBt(vxO7G} z>mwrcnGq40s+rhZf(j4-XPQ{zh$*$HXgCBfN+!mwV~(p!<=+7jMrCxw&C0*NKAO-C zE|X-*-$<(~x15IAGB&R_b$T#5L9-RYO#_H0@|E8hN^^0^vbRaL?A zUtlo0ntY?8qFTpx?b-z-=Pi)Ia!ypwEz{1xE-Vw;?vRV zB}Q8O)x;D?uryUxy?yWWJx9+?r{|=GI@CX%@s88FhLu7(i!e~>uS*hGA))c- z($A%xOOsu~)_|ZrcaCpmsc|(mwNmI+s=X3V%vMvaZB;durNkRwtCV+DEIG*Aby~En z8aJ}0tj;gGP21>ZLC(c_clYbs%qO%(bE@hkju;%Fr0@Oh_v*l&V;+96Pe{#$OIhyi zM?9{E{f+;Ft4tSq&61Q!eM zISGK{$!e=31di*gYs<3J(y}VAt@?Ju&I1RIU(Crq_xlGQpRElZanC^a{NIRff8Ax~ z_6%EGyO@$gUxQtuK0-;deXJ6jl;x})Ut<0K5M^T#0^|4Mm`DEDp{*pT)Nwegr1Q9- z>?R92T-Tc_=e>0iILw#>zbg;Z^Ai)1viqxX+F-I~%J32|DkDtziUiwU z%FNHnzmk7Nc8RV8yf#0dZ)K=)0`~)yuT%sk>w(RMrHjqY#X^`L@}~Ai7GDC!wFqc6 zP9p|ZVj&M=MukF%#0EH45` zDYes}fDYa%CcM>MgGY@Ubx(Y3Gw* z{Iqt>+RZ9Pi6>0oulxGLdGF2YPY9e8M1D;-ckVD2;eG^5Od>$yAH0}lZ0Ms3rveJc}&Dye8x0W7$oIi{qp zojb?1{@1I|jOg06YmeS!`VHxS&xq-woA5Bc_2wIIKBop}2@`-&h?{^y5WOYZO-Lt; zsCS8EOY#!I8aF;YY%o&B$Yc!m(dsS(B?1+YmJ>iY03}L=S^WDSX=_J2MzkB8q9>f9 z9V5~XPJxU>tczd*GID3mrJbP=a~km!fYPw$F~|~)slBSk@K&$CbWeW*R~9HwDhqTI zUZV!AS$|iMZJ4vIqiVR;w(T`CwTi;R+Ia%q<{1{|8KdeB?iyk@QObCX-Iv!sc6WkL z66_{(##gH|KR%PWS~VDt9Wz>;3i3~#DlV`(zD@HOos`FnU-%atxblv60(AUm<2R z5G9~0l*^vWit(wH%ZAAsyS@~rp>IgDi`Vv0Wji92Q zWW9*7dYF9_V(Vc05Y^JYL%UX?)K~Wni;V8lS+!iwfZM_MkZsshi)LtsLy#witzETv zdpNMfkZ;&-`&?^_4{rrQ3 z@OcRegcErE-S4#C6$wvJ;T6u|uah`6-BglC> zs=FAwRR03N(w|LZr+wC|6zCM6|7*{v*AQxa@D?Ium!ZIX9I zdyV4TQcEa`c1JPOJWWpa>Z&S2mTOCkOG>K%3&wQy+wPw?VanRHbMLQ5Pph6S{e5sv z`1+X&c7OlFPiM`lXJSP~Ng)O*Qnt@rHMC7sJxeP~$j`#;LZptf*?vv+bZy=|$QMIY z^XB0p{@(7cs^|53L2JV>rx3N{WN1{$d7$zK>>U23S)NqSib?{u3X03hYie+NSJkMV z7?&{8;+@5FP=uNJFFKj%1roES>160$ZB)gTykFD<1OF&y7}+bDej^C=EhE5ojJR1V zNNP_x1a7m0yz;mtariQg6_3$lM)ye=6&=&5eXFQp{ky0hX8+K#YkK|lvQU4g@OGi9 z;xX%kS?@1%UsRY}tpZ2g{tJI&8nA*#iH9!#BPx&c{o+XV# z5JK1-^f8!wTg$z@&Akn#J%bf}%Sc5+EV&t@IITn_;Jsr{KG3#Em;^V?CP^*Y4hf++ z`nAkZE&Cut@&N((;-}U?O|s%W0kRH(1{G6GS;{Mji>sLht1(V3$Ncb}vYApD8xa|I zV{3g)u_6L~%x5Q*0vwqc6wcVee0hg7#azV4v;1G;vm?rRLSA!~%e*VWth|~tY{$uY znOhH}+>|@lEhQv&oAQm?qqN!;8%AuexKJ`TtIP8*T)0pX(mip=;307?j?db6Yae{` zv&ztJeYMY2r9YW20>?eWtiA_e`3Hi|_lEstr+43-JA0OO078vC01(zE&3RWa1VFk9 znl*pPC!c)0?1T4Htcw@p-g*~RaNav_&;LN}k$n;Q?cAl@A_V`k;_MufL9ZgH5MO;E z%kf$I<-FpXpB3j`PUo|8nQA2LOcYxgWKwcvLj}y095i7K@}DJHhk)X-9$5o5HG=od zJvqRpMT;~>!rnv)7TO@$MK;t%f0hnHLfIFh`ESmn@jc1$_Gec7F~{4Sk$&MELA{x( zl@Yf>JF01Vvrx}YGl;{T!@&y|tU5x>aA+BlX8-nu*rp$`NAp2A5W(5msyQnsH=mdZ zIRQcvCsEBlSdj75dEGjsUH`^vF7c$dc%2fV%HI)HIixW$2i>;wS)atuT(Mlpb{Xn6yRkzO3A^uSV zA1J(>Uf`+vM@D+pknDBr;Nj!Pj~_m8EF&YQ&L>h;z8S!}9!PNTDEn0Xk3S&vK45>| zb$y8`Z|g>Pzb@TsRsYn~Tyxv*-S6reMZ|Jcm%F-mZ*9y;RsFNIGSMvimoB60Ai`c@ z%Tu-VH&%Yp6xi59{6Y5c_w{hYn2s3WiJdJ7c89$>DB$0I-_b9w5f0mt8~xIRwZMcG z7JzuF7|L`WjjCa}#j+e*Qe8e4M_q1iGM7z}(BNA;e>Wa(sxm;8ur1&t*c{jG!FUW} zJce-H9*jp1#v_E}Z1~B=3wRLEoIHK{#EBC)-p--_oWG!o+@9V*Ai)Cg9oQM-Ch~ z`1{@qwl;0rbm-EPEZ6QV!SS{-#@o&APo>~C_OX`ajj2b@ojo4a!rfl51+?&owbYy; z_Pf=PbX$NYioDJe92wct%T#(A_4CrHgZuXFJ*|e2v(0SG&ZQP>VKK#5vD{u|6(c}9 zSA!@>_K`t@6~dD>`a-a>t0AOpGx>b+m4t?bhlb)o4-X5$WkOi?-~7zco3Zq!7$w|mVS1)`21&y z7`{hfmRtJC0KzW1+XK)`CK%T`_36lB|MABke)8?6O`E>`WXZ=LfBjowlD^=Vm8w6^ zt-LGwFgC*1B|G49?iHN(swYw>HICSjKSi)k!tm6n`m=Zk1O>vsh)>s`DEx!K1O9hk zcl3Dr33>#}hD&e`VfWnzyEA+ZqgsHIb1u zHk-F4EX+cUhR!hwwoF@*dDL~&1LgNT;HtX0rKY->O!>gN7FVinWmI%1FD|JlF0UxT zm`b*Sq#X(;QQab%dwI7AZx$KeBC^?EK6Biy#M&2{F8AR4*J(FGwIi$vTCU3N-Gnjm8xEvsHcgl-qFAWX`rf?*6C@Ts&8srynT|Ymxle# zId#-pv=ylUF*~bzOW|)#=WIw&zSB)n_1FbCXKALYudJ+8b@i*`?f0sB%t%e=6#wiP z?sGWzdDB08{Meba%h+Vjvs7T1xpe%*(cj6VJ^Xv>@e}GyGn~oR(9qZ*SeWo-Abc7n z&zlV&xcSVZ!xEv}@bWr0RAZ8bQWMC)=C0|FLbcrISV8g-EE5 zwhG%3+gjTUw+)*rd^F`JFm5q@X}?4!yh2-7Q=z!JQ~`X^#0M%C+L;sgtE@ zXerSZmoS{I3-r&73{1da^`_3dZh)=aR!P@Su)nDPYQ-o0VTFnI2q@3p z+>GvybgiJfuNGL{3rrDod2@3)U0#BJyR5uI0s>HTO7rrIuVR+T%_o#8Hz(_o>TW|0sKk0EA8ICcBu~DOiQvhm2FVgeE}ARbRIP9u5PWvr~-zUQ0MF$)~vao35MUMU5|T44I4Ui z?9@jdoBq&5ib#*1aNh$DP8cwFU}v>6iVjf}y85beR7iqgas?md(z%P7N@E#$P}F_N zyHb1&8~U}%Mt2_wG1y0KzHrg1Z@xKn`1hm7j-NRG`@vri98Nt>EYG&@S1f*e?wr}` z0EG7$+T(vL>YO>VX3tx&aM3$Umwxhz+9@=YiaQbBfyCQ}HftWyEIg)Thjtx0Mx)&N zwQSjpIv`=8L4kqbz__CsGb$-`AS*w0HZXS~05FUq2WTmvm~1scUF^0bCh`T++#MY# zk_?Ct_*g`&5vIX7BiZP!F4_xPnSp)!SKuA#2f?71mEx?!q$ zMMK!WpdGuyRNaOn0}&qvBLlx>|Iq$V)!fH)#**%9!J(C#2T5TSRLurpj|o`9jfD}0 z>@;82l$*!2d$|9&A}S#79^sVBYD)2jdaYfl81p0-S2 zXI30Ps8VZGZ!#6APC1!~WksswzXDF==Y_O|eyzRJuH?+T2#7E$fe{A$e5800u%&?t zWTs^xg6fn6YBJ8zyO%wKiCv{!yx!Aux~e4DTiZM8PGpF9|A4AwSoH-3U{qyZ$rAID z-YdvQUbQR88h<0+uL*8*=H)cQg03(Lb_?+f|-%vo%Z zx9%$eM^SHG+Xv#6w{=7M7v3>$c+fI0EGxMrYCEKPgufOHu3XuQ!IEXc1=FM-#j#1T z|HIK+bI72PA0IGhJ*C!1dA)c?Nm z|D1uguJ>|HP(2$OK}DK9d+wSw->oGavO!e_vxtr%qC#wy9kGtQ1Q7Dfl%%BevmdKi z`p8po+<*UGy&{}cmu%b(x%gkM6GGkpq9MwC$W- zRCFZ|T|Hkw5Olg(ist|f?Xz`*l+pGv`jWE8F@%yRH3Bs5JjZERwPrb6FbTV&HM=w&&_-WkQTlTS@yr;p6-X?KZ&v#5 z-Mco<&8#|h;aQ@N!*ulvMyvH0wH8x& zvxTlT1bnwHUKgwj&_(LH>F&}^(hb#lW4&~Zii>U@>=!+J{8KN!_~Kh6py@kCxw0qvNUk^v)sIVQkrP_YCrLljN#060)-0eM7-!o_S{rJ=V)0u}iE&qhfoG+H2 zPCui1rTnyY`;N5pOP8-(zxLpl%a#Jx^8|DDc?oQ(b_$LRP_C8Q%25F8u4zFqyR*pu zZIO6p^WoIgefxe~yL!tHTjaZL9uaNZC?!{~+eoM^E0G`=t&aqcLG&t#IcdUUP~5cq z?hNDyL_)2Qw1l#tgo0P-hyVqTr3jjHrx^DAjHKs#)*t`*GQd_ZNKr zxjNrX%W)Rl7K_M;GZETuikWkF!(L$|ltIKJog1DR@KG)R z0|8EyD_R!_eh>^RYrOJqVCNvW@{;QwEZbqtE}T7o%_}0Vd)I_vgT*uzTwQ%=_r@Q7 zSo76aU#(lc=U6rVU(hT3f_#ZJjJH4H^Tq0oE0(YP_PbSIEMK{A$NCkYd^CR`CO@cO zA{Oi;)nHfG$S8gxH*Hj^G9~m_wyW8tsvG_rkT!C&G52v}5HYBEn&{-_%}@OO{P`W= zQ-A!#(dMzVc`Q9WmYyC9HI9WEqot2C9X*|aJb@^|5n^jNCdgCT3m>b|h$S|(GGS0! zPAlc95g|lUX2nGy;{%B|Rij5tdg7_)hV{QIzIW%&UHU!v+N+Na@6#tXEG(?!h>4@e zj~~}B*3Ub-_b}CvmX>|h8euF_$2@y;Rv7??IwdGe&p0eAoSLG87QBjzYrbaJY zv|!=;?|u0B=d0GOUOs2>+@$vw&0Y5Cr^{C?{d`Gs^1KD#efQml)nBhrqobmlHH-GJ z^iRC2ZD?IZc}0VlmaE#DB z`ACZpP@^FRib0ATWppw~djTs$SAY_j_DE5D;Do59Mhkff;15JbaFEnsW1m1Fj%hhu z^1{de{TUr&HjFVF#h6WI%!YC0VT@T6(let3`^By&A~qw%8{TT0c7zcF&sjE*4F__vXboEwto%3y_kL=g4pJ&CDLx+zXJ6qR)?2|GG3Vs$# z?Bn>^rrZDJoU&rawjFDiBqMb^Y=6c(nLLeoi$5maZ~0sxDd)aBj}s%Y%56536_wQ5 zmEiDDmx_8M^dxf#!M?bD?P?V5WrX1W*jYs;W_WlbF#vFGz9^= zRCF*ns_)2V#k~?GIC+9j?Cql&t7TkxBOzU~i6G3t{a2@QjLwjCf5D<10GBh)*tV%3Rh+=(VFk#ZP2k(Du z;(x4a7+^G3UAtzs1P9gG3`FxGf!6$Ol|xV`U4A6yXT!!%<}br&`4%HP!=55#g#NeX z4+Ctrs+x+*{EO+xHubd)b;^GzhZrm27D!l(HN-7j#(yY}2*Rp(CeRgSIc7~p0H%20 zzblp)E1?+@mqTp~)Eu_|R6Oy2c>xD)+|x9B1!x0{o@78AcgAa}B-=%Ha4jp>mqs|P_ z)jE}y`uVw)k&IUCVQfhur*`2rE=i;+7(LugI#su9X#(@?E%)RdKW*Ch-P+|J&q}h+ z1Uud{>AiXLXU$#s{wLotD>wbTUd~ogA5I;wN>hu-7A+N90SiHHfyFh+R8wVBygYPj z3mTL~fDQ?3g%Y^{j<>^j0D2(7ZU_?DG*A-d#JDUr13gkSD?LZ3#N%K`lsl&6H1!Ax zYYPpLKnAei9ku~4un(~qAandTr_toSHq43kj6*x-gyf&LVNUpJLI|8v>^Fed>`I&ybLa)^T_B zACi#JcgmD|$Byb37u&7-G>h9bwL?*@J0TK?Tb?dnPF1Y7lCWQOt-->SVuTrN_j;1~ zE3uJNhAhSH>r_mr2fkmjc&c z3N=(NM2wx`ylCzB-+q;{b=z#!E4jGQ4kNZ=4XeW*$k;EYMQs_I&0Wkp3fi@eq3 zL_j6=#ZqDggD6J}))c152I;~xnvTk%uN~Fo0eM2hf19)|I+%{Z=z%us*sM(eb8}(^ zh{#Wi%M@*ZO&Uq+rsJqA{@PJZE|91Hd`ia%Co;lA7~x@za3VL5$Ot!w!xP&8iwEvn zL}_^v;9SUCLd`CUbiuu{!czbnP(5Mp4ZH|ru_9Qiz7O2p5IJn(sKJB#_v{eWp=;M( z{qGvnAMfKk=&G~KlM>pNS60$L|C0k)3&WpbOa^=@w zuUs}yFgE|iI8k+C{>SS#|Mc^h+!MH&a;n-SFdK}{y2uv(R-+WAa#XdVRj_S9-K7w= zh|2OLK)p;F3CYXm3__)>yFdlnXd>*~$cu0jD`q(=6iebV;FwYmTSQ}d62w$ylhniZ z{f}pG^!dQ1J{OPgK>B>3tg2cMJ7jmr1Jkp}#1iAc9@cPfkcp&Ep{J$S9mRS@-xF&Z zS`FP!Qti1jaj}<7))1q~*x&*hq@4ovU=&9<10K&qi>SmFK z0{$i%SYkqysUc_Yp1oOCUE2gTQs;X1q^_(gH@)zxuDay9sj;G}zV_sqj2g4^#fv8o zY}>l#Fg88dtEsL|VznERd|QN{KX$n;?cDK0oue@qd)Vr7^G7dsR&_(HDqHieg= zF7|L^Kuu6TwRnk?k-O3AE)%HHRCgI_f?-5zy)4xr1Zm$3Yp;mB%tQDWM=eqx`ahmT zW0rVBc(}|_?Y-kDBKgc__fDSF zvRRlnCKMM>gVHeWdVPJz9=!*T96o$#|FL5ysiv?n5A6PSYk&qM-At*kue$DJ1W*G* z{J{fP3~KZJA1q$@!J17}tlhV3*H63lzQ2bR_WM=Kmw&Wm$%kJqU8GvW|H$+U_3$2` zhN^38gE8ENcvPv)iH^Ln0B9-&ove8)L?jS;q9NL1AhDV(20}O5d&g0{$2`4VLb?xpS~XVxnjNfFftTO8yHN%H z^uGQ3a#7PBwLha<`ku%jGjwa$CV)Nnnr;rT&dzEqO2AK0~$Y zi;AcRFK5x}!5dpfwG33bThYoy;1F4l7mZi5%iUrvbCP5U4Mvw>bqvNq8fSngMR_Kd zqDetx_s-vU@BzVe>p(wsr=Qxw2in31{2@mA2l-m0Pi^9G@l2+^#+HQNCTsi2t}`Bt zHuW8+gb#fd?{bpMtC1bX21E?L?~#eB5@}UdCMnI5ll z+@EfEk8XO@X~9=-zrA=4?q>V5%2J|)tV*XOFvFF!WZ7?@><>ZFW&~jo_ci9kxN$So z9pO%dO(tvm$^J}Ek!EE+3S^~&WC$a^?VB2fNbSg_w0u7;pG(UfwIc=MSX!NxNx@bj z5(!5VR>2=LlWveeatN7SVIO>kA|mfJ@-|9col7A73)mk_jUhryWSIqNL%1aV0 z-HWi;LBvl@z&tS>1I0Le5Cb^+iv3mRxeJ!8`gYaAxk-Z%VD2N_VLXrf?T_nHW)C17 zWfY#+XYBuSN=ccm&Ut45i3k%wVVq`v$myN8Kl=EyB_pukS@F0};33$(FF+tqVkmtY z;&_3Q>22J7Vd{aN#_M(0jb8rj2Y@6&ovBEz+G>Nbs-75AXJ@y%dW!e?OU+f2Ns9M* znT@_M97oYrN=pg=xTZX2QfFA^mPB>QGRGfNj^UN4e34@{riE#Q1TPs_8T8D{MwV=r zj3Cr5v}JA7Fl@4~(`qK#L?x4>KtJL}YciX7!CCm@Ycg#cvG=L{sFxzeePuy$7yN4x znkW}RW(}-wFG3CNMIpUDacq8l9pCz*pyr&Sf)>0|wafA9HdxyA&;{;>ppT)J9 zr=PrcV9(etJqApg{%rRyVa|=LD{f)!dJdZ~a`529o*i0=2siK!B3vsqq_JEhXd!N+ zT?qZEM%-?HC)V}H0RJ$G`3ytKq7Pt5Zz0S*Z+|Fs<;sme?>Tkq3-`FF5f_$d*gQvIK2iloWJ4IgP#ilB*)RnEJT#;cToXKy zMsQG?LtvyXkrXiflfc+TD93`f>EUTXF@b!3Yk$^Jj4t6EQ1e$5!Sz1PewSr6V z$JyA+9V@g0tF!$}FH9ZYuWR?7gCBU}m7YDKJTa}?+*@?+^}-9`VGlesW^}^n0h1?B zwCU^gt-AjMwk&i~P+@>hToqx0SFeF~?&GGd;@qoa70+N5FHjR%wVxsPG3nipmoB9! zxX{Pbo~ypwvFiZgsr$BV{Wdvi>y~)?tDZ^kFSgE_ot%`sapRUBKL6|;HArJrzU~H? zlgT3xn(+6~m=>$N2yfadZ()@WQ`<9@FrUC(=n6(XJBJVjqD;}wq#y=7CsRZ!VN&dp ziYp{Qm>_4t&&<9<4bo|asT`pCNCQ;(0S8!v)n^$LeH7mb!Hc3WXoWRkA({#E$5))*F-q6DhNsMSnf`{Khl=+N+La{05&ksyS@$f!c7_Bk#o9pVu$g`Rf42@*z@Kdf8_fPj`4^Qd<=S!XLIB z1cB?|77%Lp9$obaD%>0;%GUYew*i#YMvoSQ0V@#Q6ph z4K40pfHfpn(bL;M7}$}2`+Z0M^^Z`9^D**oP$op^lm{Y^x(KH*V0p(S3yF)IFT-o_ zYeC#r%ZL_{k*#peuon@5ooc49U3!MkwQuo#>9Dv15v`k{~pyo($(f_VIgdPS5{g^mBGb>+N%Hs!~yf4LqOE8;BoxAYuC9N&g0@u>~m0? z)zqGO`lv~JV7b#zP= z;!mp>-nNP1U8|YFe2cG(dK4xfw87R~FK2~_>)G9Gc zKWBeQzkGkHWPiS8n15*NE-;mY3kpo-k;aDf+VWGzKvO|Mut~+bs0Epci4Crujw=E8 z*i2L<5o^gLq&uWf?AR9t9v(j9;qLB^kr|9WG`Bl{)zMq6ntGr$J{=V)WV9>S@jneC1@ycvxZ}$TZ`}IRs&&iv?Au(BajCMPHZAS4>YbT>qOLgCRo~EHa?35P zJCmNtF=5C&A^71!!A(plaR`Y9CYz+q?1S)w5a#evVhIzEF=f90`X!AXq=us46gbnu zrZ^@g-aa0a^mEo^-AVSx@cIR>&s&q@l}~3X&&un(naXIZ1MSYl8UBh!p`WSjvL@@@ zmAyRcm4EWwU2*3*K#8y>JNnk?55L37syNZNR%JI|8l*kHN6=KVcEBn>((YEUStN5b zoV#tpW>H*DpSeJgZDuC0m;fv*b+ZS8AQDT;&uN7!*7oK&vq#)%q6OoGaZ&_CB-z`B@_V7AoN$IUDFZAy-PI#!8;3! zxQX}g1dJ3c&v@cOTINPwBaWQ}yN^9a_bD1=UwdCYXN|Xmvvx2c;oxj#psuH`B|_~i zvYdX@ySs89Q>=zQn>Y8FZ7j*Xl9{SAG$t9F`3HG*ls(5)@7U8D{?O9~s0jU^O#)AxNe;?dUUdjTn=+yu3XaC(4ZKN$u4qLyc7O%<)&kf zc1e_nL;y*|;SFRYi%gHp1TLkMy~Q2Oq`0q^f0AtR3uxV=Tj#G<|M24vKPdUunFhP7 zr!ff}Ft=n&byd-o^6Ez(7&md+BizqSL)pbViib7SCtGB?Um`FG)P1?FnZyU`l9_19 z!aPvu5nfn-xVc-OXni9Y1#)+i?Rg`kz#YDUk%ySs3+c)ZD{x9=Fu3H9Vj{ba6m+ej~X{i5f|SHpIf+0{-F< z@~}04l_HKLP4wgD)3bpUVSZXzSdjD@>+L2NRx|>u4Nhh;DX?7GXFFBbRb4)^LP|hM z0bO8OyuZ&gRi+*I{c>(YV^Mn6$$3dm31Vyl+9BRP+G_9*aMxMQEyg{b7xv^6Pb9f! zre1#T)t8@n=9&Kfvx5iqA2@vU@S#Hob?lrtsLmdFE&cMv+G^)cqn{A_!zV`1`|k5p zFgsYC-2%KJxv60pLjZxhzOS4mWAvMs` zcEya;ZWYnn6%RQ-r|;wK(~M`+RxbQ_F#Y>7MvMn}PR3|4)&7k6z5Uh2dgaoW12Kb4 z!9sG1__txYQ;fj~tE<0%0v-P5bC**ylgv*%@nm@3hwOdody<^Ikz5{0EY5B z-HT)=FCYl;pGuO~8y!1#=`dhm*EU_d4U2sK`A9~=YBjsLmC@JruJpU}%mWMo1Mw9j z;_4s9@EDqDW6+#xZ5N50$YHt@?v#_6heHKpnndS7&u|Ji1R#1_r%0x*RaCF)I+2_p zv)RqPr9c}V9vA~5QGQ+l9%81z&PIg0ub0x|Z-4FR`+vUzb=6j&HOdpWtw7F4fsVUe#Z4Z>L8vv#dP)Hulq7;l_OTG z-pz!!u5+hAL-ENIoEdC>NMEUTY986j-=yTQ6f|00NUAt?w4l+G@;=VuRToDzCzjNI zu#y#LDfc2?<%nCyG_ELpF4^RI+Inl%uahcQXT!>KwG81d3E%+RQ-L2dzi;P+^G87QD&ByamgKlJM)hv z;QYDsB=DRgPT>573mF$l{lxVlIX&mnG`sw4?h_v*!=FSaQ~XM#FzPhgC1MsSUys+| zDYC>akbnH6&Xu)(BhNL;3UZhCDaVyd2t3~_dpJ_4rI)e_}RGM5kX*w3fi z$Zg)MY~a_MM8|+0N;rB*S)`lsx@ssaY}qnJZ#0ULpT#yKBT3h*m({tdQQ&QJu2fmg zRhE`)wkq`*+fdtFGJpoy2H9TXF~~OCw%K;pw!!v2k4x-tw!Lp#XnU6c(v!CBwr^}7 z*$#1hxow4QrELe_*=E~k`-T7gw!O9#+Z_Jq+kUWpZTs1F-j=C?j#5;V!h&0Abt=CpqLoYTmR2H-la)$vCcAQBBVk|223@>3NK{xYufD~eL(2H7Dubvvc|0SN{YQ{F z*=$atLZ>yjV-9nCbJXP4IZSVYr9jq#f^X+#K-DW$&B^bxcTk6O=MYVyXm=kI!eRv3 zfuy7ZBH!8}`i9symIN@;7y(=O+1wq*XZ z6d?ibLbcu3j5T#R_x9V#vX8b1AIGsz`MX)Husj}C$jW4-g>SJbWiWk~b4q10;f0XH zAky*8Q4cCjkJ7w4OQWb$6~cq%%{Er&C`~nXtgmtl=DE&-{QOh_@thAUH^*@^x{E@< zZkv-5awZj}!V07?IKw_%&h_h;>?n)uQn$`N>54Z8!~`aRRanKP{t=Hby0PZC4x3=+ zE?-XOoLD?7W!KT~rNSy?DaTZ(#s|9{z+-%nB#Opv6z)lx@4Clshs;TVjbbY@-&<|X zmil@SLI{AW$l6+nsMX*m$Qv43LE@pyylUwvIluSkBd0QR_J5mzXeFd!a@W9E;DlvbL)j++1A~Hs+Hj_m)1_LFLP|A{LCs@ObIPDYTw*ckIp2)a8DMVB zq=+|!8D6+}@xpmz?>m*oz7V|0y#X75cB2W+D1C*?;hN~X@#w+*`}X{{?@UphM?iH$ zlhxMNRCspZCd_!=L43yzI4&;|7e!th?}H2)NE#a%Eb?zWiEY7T&t;EHjZoHHI+>A^ zdF;%!CjZ2RkG{;}1n+vFfFA<#JmDQ}b;2|c+U4;6Y^>l7+BdMO zxAS230YHSonp48IZBzf%T##S5cMtnS0Iv4PAt2!w_sKr zK5VlcKAf!X-+$ZsVTt?qL*t@ByOKM3GM;_S4scofq?D;<_HgHbT8S{N@{0Vt@^WlU z(~w$;h$SCC1K5!<@zbDMDV2R2{{bl}T*YoEuADlZs|TF=84eSPS31^J&aXdbaXdB6 zV3gmG*dMy1-8!{iGikl?urnUvtE({><_W9?|HK3+I3IA2n>j5bwbx85HPkTrEj>** z?nx^tc^AXc62x9>xQ6I9Y(0_eK^e=<1-HE(UQAws$H7b{m#N6=i4Ejl`~A+F5kucx z3iMcRUM`A$R^aOEVP`0}!Asg$PoOB7AP6h*Cou^cl!Y~b?PV)lCx88w$DVn45?0j> zP{sDJU6`I`33_+;P#Hoj+}$Tkm^fkLxKPnhgt)l^X$ypumEvOX=Iha9S`R`?CYl_qM+qk0krksmHw1*5cRdX}URpZ%W>06F zK7T$VGu8D-uN)8*d(V(Qn^Q<{@;~#=UF`XRk)Q9)b?&0jhn~bScjN#f$1*e}=@$(g zUOvpj8221GDpzS#6?NW|A6+z9ci+=6qunwqkgvhBqVpF8?=#ol#QN3Ve$Dx=z1j8Q z_k1tA6@`#YNis;< z$S2v`9zt@wa{?G{c_lLlEJCt{*v|q}h0aU4G3@j$BFBYmY&u z*%xv4`#HG92E?(ixK_sL?<1*q(_jR2%O>TQAKF_sujE1yu4bPH=QV;_E?p$qJaXhv zYRZ8F2U1cG9ohEfm)k@#xD=e!BTkCQoh#OCsw-UKL^%L{RVaRCs?olUEgeHFz#LG!$IFjD(m)WtEi`B`}Q(Nd6F% z1LXn2N3`Xwt?i;A+#7rIeuKs(Cd9>fLEk0$tEi~5LPj}et*VIh@QCbe+b;Qw@07pT z|5^UB&R@LnRUi4In%+_VKK)+aP}ePgQmy|d`O|jF-7#b&D}6}mSa-FL z^ieA?=6^@&!%hn`-dXgd=8^int96q8`_{Q|G4ss-Vsr+K-tou(a&$U%w$AVb3cY;s zzZjpDX_-y88=rS)WCMTJs)}X z4D0ZUU9j-}2OeDX_|iw&0zsLOnwnY{ID_&F&^rM$eB*MZ+S@;TR6>F%E3n%SQ*z-4 z?X%t2pK^ZeMb_?Q3=fVwqGD_}_i%Su4F1m66J$}I`-cHzx-2wY|Da^Qx_i!Ntn ziT!K<9M#w$M+4{}5+rRwcOZbFb ziG3wy|K30L?ROk85*-&hRs%&RDNCk>P<{*eJpP?{ESGmXp9MKfsBPmx`oWUWSy-=S zL8i?!1#fVDYjaJ7 zg%6I*&nX!^cIQc`FUWEdGHl$qaU+aI8E8(fpF45lL{>pTs}EIB^q19?*IBE}va{BF zaK*yA=$zEP;h|6eJ;-v-7zLM*F+KfaaZODPh?QMqVqyjdB6@t_$dRq-5)F>O1x$jq zH2~$8SKzaBwhwgASIKSV{U4O~@Dt>6?WXy~`xA`(c+K z!q+W&q#{Vd=+RphNlQyReDp-t*;J3->d49!D_5?2d)-PYzgLe+LN}1PcBv(suSM;cbjt@LU3vku_|U*TaFTit0M=p_3n7ylB+oso`(DxnwY6 z)5*hc>YYKV_h&wm=amM7k{7)^|6CEOi)VEu$d)V>H<2Zznzgno6|NQa4F#UP^_cFw zGA#H`t4XMzpRdnQdssM{+SFTv>HT?H9Cj=E_rGhJqrFL^A+M(r?X7?WDIpucphLv}O-a3LCD0}&*$p_r^($Z);L!DKVQ9TDNb7L4Lm2C{6pm-5 zr5-(Y^5lu5P{9rzI*fe>CwpwH9kkOtw;c1QLM1x*4B%Y=FiAab#|$PAr=3Hmhhr4wVZTm(HF!bM@?T&=VbIn_(2} z*~8`K)mGiWs&S*LmS76-s}X-x0;Gdt*T$MSFdHGiPb;TErW<8jV|6L^-zpky(Bp0* zDFs$l5Gh4O1o^n@;W^<|t{%pah@c>UZv#wP7kA$vQGLAFHPqzCF8r}M5dhT(0B3zf zXWn@rW#8UC`wksGdhGaN(Kvtpcvov-DuSzz);I+w&IdeINlP^XeU&08G*quWy>#i5 zk3F$$3QI;>5@KtAAB@ExQShN++4Il7`mcW_Vz91-{UnM;w7OEGlwyi3RrPiip&eVm zq|wyM+(b9bdy?`k%_x!y!Wl+Ij~X98ea4I#Q{&?%B~6N*FkaCPqeLzP9Z)MsC;`r7 zoDtz?Aaw*;6l^QKi`vPc)?#jS0|*Fgclz4ehIWO9zIiyy+Zt-o3mk`D0%KjW)Uw18 zLDn{Tth%l3Yo(>RIi+RS*d9UTT3S}9_l+F+_4}iz?}Ot<1S9zh*(2Y74~B)05=*d9EkOt0XMaY7+Q4NzKbG%E3d^B+dYWGcU#aA9z#j^^*Zt3#;qv zKK*!4##O7%b??5TS$o~v0wd}@H8-xO{qgfIG2l-Lt8PhM)Hi$8BX?|LfN)WCnvD7Lh<7gF#k#JtRcc0Ixl|)mm*y4rsPQc|!IA zF9u-s$L1&!OzR=j&RVrvsYI41Pfz5G@$vHX5gm81 zGbmXN77N{>;M|1^=Pq2yDJ(982TBaURy#xiu`KXwW1mm24-JhO9UUFLV8MbpQ^o?f zB=tAx@3gMPnjF@36j?WL^AMiOaipeqg{QOo-PBfMNp*#m+5G~k)5o`3H(YuJ0g)Cc zhTW5&ay>83K8suNi)!(hY(_tc_Bn*{u+Ks6Nx*;5#ZW8gNTtahi5&R=<*fT&HK?I2 zWZtrwu?ZMBcw*O6xK*;| zQGkmDI)nxEV2w@J@_brM4jw1+MYwtmAMWL7nXdW0@2uNQ)9h(FOr(Po9gyNrYD>DJ z3)iDyUHF_0IEB&+;)ulHIDIr7+W+#J^@Mf2?$!@_XAokN+<@9%-+*H^U<#cN`3lJHEUP!{g<; zc+l^@cq_x>Z^!gaw=z7A@iHd%Vt7;yjvP3M{pWh1s3KKgqIZ<~gZQmz*QNLVj(q12 z-)x5?4_GKrc4dnSo5!k+>|4R%Pvrt#;*mV$19Kc))Ur|^pHfi~!W4tqU~+;kkO2pH-Lbabwv-=!_~u)bvaqWZkDw2JWZSz+QE21XChj%dX-j7*X8C|28aAY> zlmb(1DI3_klp(0g1k{Cg7KWhDhoCjNYk7i!w^^=dRMpj6tYw$aZ~SCGBmjY_dabvU zffZND)G(S`9eJtN{m_}0Y92_?#(~y;6yj4J21Fwk-vaG}?T?y)J_6HDU11ecQ`MYI z*}iq_){RnXDupF|$M-wd#Mzg)+&B5LCzd{Z-?Xu#$3$9Kg@CBr{!#ti1+CYh@rlS! z6)WYZ@*Jh+?QP8wVLk?(zQHdvJbdoL8Fc%mCUx5l3(_$d2u)k-*HX zn`I?Ar&E9b{ZMAAu~ln#!qM&T=H{$Y^6l2Tnwq-04vj_urfNk` zD;Z8zImx-Eo4IF?A50Zju3o(=xhwGDC4TdN{+$S7?N`xmOZjck2mLh~OX;=D<44nx z$&twSTB?h(PVVS#HiLh2RaHfkHZ*p@<4-PmI%5!R;k}u_V~Pc^QZb_ ztl~bDd^5nuRo+wu?MCYrG?aR@cf%nA>7&v4^%&3@=BZm9wIJw?`(6GUg0WlMk zl3sY_DX8ZDXjhr5kB5e9m+M_>t`t}e0Y1E{yM=+8@p?EqW^~-Nd4K!c!r5`-$KN&Q z;is3q_`(}+CPBse5Ng+IvW?7a0^O?-%OujVTjhGavcs;_xw@!sZU4B=_Me9Kk3;*% zq5T7@E!ZJavKC}XDu)%+V6)4haH(6$OTO808qM#As4K0Tr>m;Hxy5c*I?1uS)T{Mf zjuhM5TI-6=rT+5mcfxWZ36Tf?0=Vb}K)jyf-~VuRq7E{fYq_JL_mCro$?2;IC z0PYkxVC>8}GvkMc_ZByCR ztPeikhdDrjO(>b%CTK4gwA$e`s=INi%t=(f`_sweC-W{-j+;rC0mdNp$AeJb=OXpT zSbKmy(*Adv0|eL7tA)t^S-OAse?I!?qjy&2mL_}Jb55T<^W%p5#y;}+KNiiKIxc3^ zKsypr3qa``^@2thIC2aaMPs5lTe9b9^3y4Yg@(Gh7#jUULnH3Jf2MR_PkF1|oq78^ z`-1Sx)ibnTvd5A*1c3#Qoc{8{dGj86_^B6PUHaTo2-A1Cb4RPy3MgW*!37ZGenz{} zvCa-hJCKKvthM2gyy0jE?*R8HsP)r_uMshu&>cikA&BBMZmytS=rRtiypamVmzkBiPE&_-E=YQ zY-V~v%8nlnU&+Zkc(uG;9yQDz8j@#IlM8F9;nMaVy4J=Ux<$u(h+jp;%^H>UMw9!X z{-HxchD{hhWW*>aQMkftEx#$dkp9R1JQe(3YByG#Z^H*emP3q}vAZRwAeG9o-uY4G)SZm+&^!_o?ZJbACYhMMh} z#Yio7=6K59eY3G{5bSReOuvO_zjaDave|) z_-7T(rRR_I%6I60ao3k$Z{K|~x2m~0ATV^`@G%o7#zse)Kp4thWd3xu-+&6x7=zx)iIcf8S8s$hvL>Q}KWlM*=*;;nctqEzG}JaK^v+`khPc`lTJ6{o z1{Q{MWbM_g_9o0CUWL(U_~cYwWBtZ$AFo`o_S^5beEb766u~)v-_Oj5WLpYnUDp_# zg4*44i+ph}KTEESxA73kF0Vbk_`Z9iM zqrR=Wy2HiIMcqfz_FQkU)RkX5eQ?(=l(y@B$l7D@6_b7XsC_a2_F8iyf|?#f^z8y} zJ=CmDK|?l{s?sZ&nT5#GeDln$g7)q28HJ6kBQ$K#uuji9Qchkftg3NR*~+hE96OQzeahJeHaHwqn+`#<6yz4yA*fi{R9$6h z?IUO&aMz#1JChZ{oQeS5;u7vLZa9Q0f(zt4I6ddgx!~>1sX@ya&5ecl4dOx|Xc&?t zXq7qVvaZ$1go)R?Ai@a7s*DU)1i*4Gh%2XEc*K)SyYb+p zBFYa&zwqDW;MfaUTBCmTr6u>@A3kv6lDFcqavyTJKRi4@t7>XIeEhoHc-Ot-Dm4o zZxk_O`^)7nzP`RLKn^!U^TB>npRb||Vx(!Cz6c-{`c39H2uoo4gK`a?Bv<_{HRKV+ zOo7Z4#4K&y7xumcZhpbhS~O3DqooIQu^>5m@0U;!^IT=QVGv0OMRat|&` z09iPaFl!%NxM0ronX?&hoP-%-kDynnI%-Qxvq4^;U0Pb(p#sprD=;M3=&v`p%fPXw z&@&G~A00i!iLRpcmN^(RfE3#F2Z&U{}l*qf|iP_VRt@b?vZMEM-fdr=D24P8R zZWW^3hHZVcW*p7nsq9h)HBRksSoZK7e%$v6$U5-zCdkBP2PQfgRZiSN`41 z`U#NeDm_%6 zs_fiMJ0ku$rro`tYrR}tdg(}-tCOg$HyZ2HjK(zjEh-S=T3Aqw7&2hM>?x*0q= zzxBvP5}siWo`K|If{AxCNP`IbfK(=45IF-lbn(i*iZr`OgOrnfDaU{~0&r}$V^7gW zI)b&e4IuytQ1LIYOt1SNxG!l~M8wa#3;QQN_2#S3B?Wa9msNO+?n{>r!Yx5pj-&tG zOPiW5?%$Xou`w-^eYEo|u#5}|+@D6^(^&0>e{bCU>E_Li^_O>kMmjkm)uuTm|CrU$ z+He3p0f=nm?u#CpjCJ8dEK$d#_jib0d*dOX5AxyUE4f)(f*?PD z^lz4z7!c+mSHSZG+69dUN{s>mF65k*f?$xd-535m5PSfX@9pc`&l4Tp4-J8rbQx`r zc3nx=I;4=~r%MV6T_oksiXr&{>H`BY2y*bCnLa@T?-FDgJ|!m;(w%e3gjDNrD6PO=gzfE96lg zWkuzp6qHln8g8PX93XkMX7jp@Yfoff%ee~ofW~+D=-7#)hR4PZ^A&O9f)$I^lo)w zW_ri*VU&JFqZF8#Isx$mJdgb%iXO0G%Lpk9;fOK{Y*MdrNNw1+71>=qjrf=2^H}Nk1Lr;e^!@YMQbC8!n3C-5Yzz}w zc}}5dc4;wq%ajl~mCoJU6VM^uEuLq+8e_1*+HQB!IAH^y1qr$m%~A^JV2# zqU%`Sfsm0qw|2ygnlOGsc!S6c%XjbIv+qRO z@w6j9fAQsJ5%Zd`+*u!F3JMICDAjnS=!70C#JeDmh7S}ipy@Q~?!tLB@!8>R(C5<$ zGCBcJQS2Sk+(%=U#(l5VbBPE=>V7kM=V>HD`RhJgg|pSY&R($sd(YU1UJ+*x=09fP z0yNU)xS~fRz0K-;uVZz7PFkI7`>f9YSJ6yVv)h!sKxBI{^*&{Wp~C(tmE2W3%5U$9 z9_)u6q^w9@CeUt5J8g1=aOUNMf&{ZWQ-wr7e0NavLo-CxS6*IUz0dRSi;Q3YK5XJQ z6`y^+b`A4Epo+=lIeo!`>7pFrqy_mlUc zQ*$>_KTHGB6CO0#(Fa{KHO=l_(O#H|n*=CJ(2R-%HeCdKmSrK`5NS=n(JF{F4+~y* zAKs>2wu@|!7{}f-V~$|2NbS-hp@uV}~4tdd5*evs^-g%&;XvcTo?f4Fg9j*X*L|Aaw>5d?6XdMUzbT4)8eP-VlyJd_3l5L|k(S_DT z8YMtfNG;r@&(is1T}k?ElE@-N7AVp>dWiZ3*!nP$f#?qzCb2zrOYhuL))bK`MSdlf z)!e5n8Z9KV0q7gbapi83lmq~ANMdC?!&p}hnDETP!LiRgh_qT$rd%p};DIucaqAK3 z`6j<&wTR?Z_SelMwEU$@eAyp=l!-EuSy=~|<1y`1gUBU;MW7t8qf#AdwxdY;K-7_9 z1(CUeAr~##PT=%Y5oH2qIILa^Ct(|SJM#bh%(ij!k(ot(ynL{JXnDFg*`5e((+!&w zo^VZ0v`3gRv#A36H&q2%n!`mE*kcIDjodFBkLi-wQq*96A?heS=9iwoI-V#DPc#%y zM0+SokwGEv=)ls&Vdx?@f++}EPFb8Nd%3MyT4|HHc0-(fu41J|nZM_QczC*A zR|wx_S`ZOzM{uNGaMqvOvu@qG-_BKI_qaynl-}`R0C-=nGfcYg*#ux4-es%h4^19S z03+y2lzwUYW3RpT+9T7WVRFWZW**KdKnvs?LTjMN3QQ$H8P&pZOQc@X8k_vzokTM> z`H4rM$L~TtiFIZS>gmzY&`?!VXKio2SqXS+d07#xm=e+BSmv=~r?az9A4^HuzweJd zd-f3MPjuk@XjP&W0(ieSWvL#YY=T2d?qjp~d=d|~s7=luMIN;Jd579QNMT9XH~W-r z=0=eI9#!pAyiA~UOSHg_{2F82_D`a^{gqmuOK7Oo;KSsChXN@g_?8w#s%R(%8a<4l4Us>xb8|el z2cB2#-da()2^&;zsLjhaY}<9{;QpQHo_hq3wx+hW=8}@;wxdTc-E0Rqp#A2hqn%@k z#$oRrTgfg!HeJF@Z6ZAwPns;Cczh(93Wppe;;OljBf?)KqJ0z4zCjdE@K=Z$K(Rz) zSeVJ%%|!y$=-vATg#`v0Jqc8!bMp=qx#FJ?p{7l~+!P!$e281=shtR`dKxYLlK#De zXU?9^NIwlYV)o?=>_Z_DuCO?~|6z=bbT2=b^4aRutGA^WHv2|`#Vzj|!ecIEU&#}> z4keb}fCq;J0MFo}(fRa`9tE{wTzG%XX8wVJLxx60Fq6RUkH-*v$M61Gv+ln8p1Tu) zOB_EoX8hz?3m49qoG^6g(20|#EEJ_G#*H5ZOay>6NfNRq5zrc7YvKW}89$M#r~yzK zJEdfB@PZ2vt~|K)h!Bt_R#A#+|0~1|9)>_S^#I)%pijAL|~=9z1gT;?+W!FU2_*GL9ZRbLQ;XtSeWpASCNd=cpQw@$oq3 zpC?U@Y=w`Tcz;;Jcvk$7&w~6H)IHrOWIu$Jo|k~OfY}npHY4+N+R3yt=gwxF%1HH- zfVKp)1;~~VwtcY4Eb2~BTSdrS@#D2sXLtVc`wg#=_syB<*HMsi8?l`iWSbS!zYlWQU8xYk}RVxF= zkB)Z1YCQB#P~E`5;juFpFJ3$|b~u94dOnMONK#oVVRs321K2M3;_?gfkwF#K!3jSJ zyNkIK8X}flXXn0X=qTiB^+USWe*L_C@xdP$T>y21g97R*tH99R0Q^^j)nxPw4Db)` z9~Kr8&O8Omt%7yPx1Ya%KjQMC&%`<6hbip_pu{z`V65r=xwD@YowH|O#7`>Cm=ey? zg+57uPEWz1_uPuJo``XKN1Qc9g|L)6@Y+m z)!!LsEyd2<5ocXdUS0tV-yLz*QcR8iop9Dz*<9cyxFg8ASF8Ic6HM!$#M?r_JqoU(r$3eT?E;rK(@HPIe zQo-H@(Pv?y2yY2%Tcw6&i-iM}JoyyejlY;lzaT4!qIBI*Dp^4k6D+%$laKg(2_(Z6 zTjR0Yv0s#3nK|={$bI)!JUI=`Oo@Q$f7D=nduC8)c?%M;{BHdvele3$M|-dDoy z7;yidXz7;vI)IlQk&X&i_9e&)1lf?RtRW&-Wy31RzXaka*_DWhE283bM%H=uxgZ0X z54eX;X9R};dFlApr-Z)o*eyYUQ0BTzD6~Ldca)&0vN_pUW6$jokBw-}uG=>>u|u8T z92&xHb#8=ILFWe93i+>OOtoOHDI&=Ep%+9JEXaf7^Yi0HL^(cXe_59D+s}I_v|^bV z;n9e=l&2rkXbwT=E-ua|M&EY$L~%3d+@K|)zi_v3uaV_PwtA*M(BDDw{X z{t~jdBwvP)z>Z6{(~ZvgijW4xPe%;Vgftk57WE?klp|nSB81B!iniSAS4A&KFyeL? z5G?TV!nAE~?T~{CRrH!YDQ?cZ`(`F4#z(o-0ePBttrFYDexjG*#OaF#WktEUSs7<9 z1smlpwJlC^S0kS06>sx@R;^gMcEi6v*qN4EAKL(AqgNGD&}*F|rp{UX z(Cpda5x#-Wt<~4hR*HBsj9(nELq+6yB}MSk6&IF@UZj1&(nBhB(#yzy1;RH_hk$ns zBOC|pP$826G8$p}_i*#Vc)#N{j<)O?qg^9NXM*h{2R$5X6=dx)S-YrETR-@keJ03a z($izWXrOvw3D|CA=Y|eFCt{;RuEf?U&TE&6PzOk98#08)<=x|xGE>ssbtdqMIqu$K z#hmQ8JEykB1Hv*NE|r-*Ma)TH(>jYlbx~z*{{dd!#yU&z^6FE9Cra={2{gw@jAh^z zsV5$XQ^yXU6ghuwoc$$k6|9J5LL5us?`F@5+_ie7x8k0KwkEeM?(Y&wNI+bXQq4Un<($>gwT=k)1nrJVR7?u`4x*(f~)CnX6nT za;5DwgZ^$txhUpQzUFV|!J(!`Ocjb58VxwIS?;GqU_XszamBRiD5WFee35=zsr%vfcj2JP{50V!V71ew~ z^sTL_M)(Z=?#Um}(p9Yw3ya6uBfqK_xxdp%Y^}h~xF31rH4hG=+qQhJ}U= z2tz(Z>1F_u*MZ>@k>n0p^T8M=Yk9Bm`wQ=O>;|g z8+LGP?T7|VME;8pJFIqRIp3cB%i08d{ippk`RB*0z^BwvBk22smer*R%v4Rz{b$XZ z1?LX;5_YlQVIMkMtv(@DB1PAP2wrtkWSL4`vh%+wy6V_h}TJ!o7#GhioF%h}{3EGd!Cp%8>jpx77Z)6ot z_xKZzKOinHpi^V@LyHVWUyny$6Jy>G6AxDh!b!4ML4! zHVXpM@m?*g>KhgDoi1!MI2GLf)EIXA3Wm1OHZ@P1{=nkLo_%RqPY8s-#omqJAhgEr z+q8yWFO!MR)_Uw(wY0ajSk7jZJF5}d0QxsIo)YR7m=>j{l`LJ3*IDcBfDhKTw>4OP zK3T5QQxF5pIK9af8|#n+TE`Zm-Y=rww2mz>@jl*I-mxlh@G(>EdkiygHLO3pkwk;! z90Y<0Wk4KKpm(a{wPF4Gb?erzU%U1lyw`5nz}Uc|^7>}$S6dHXD&~BQY3W(lZWhk!)z*R|Mg=#Wqm48Q1@kjX z4Jf%B@TsRYrP;_>)!1HBSy@qDS5t*}6~K-wt1Wf)b)eX-&oIJ|MVx*VNf9+-)VOiO zhsV#DGI`SU>EogtJ!eACnJ|V4^BIhu3&t3BDXu^=w47_jMMT^HnRiZ2-IJ2CCpGnu zC&z5NJG31@J}0FBzwF;p6(vitV$i%5W9po&#<^X4SNHSsk2 zot;KgofDkP%8Gq`i_6NcOT$u^o|3xjO4ivE>HCUr9>ZGr79e9XPyxbEsgtyQ-8H3Z z@0oYs`R&+;Ckr>@jK>6!LSpw1Bw1?z*jWG0v3F}tBKp)MFaOF_7INpBM9(^W_P?mf zB~}UmpSRTMcdSXu<}Jk7I`P(;ICa+~5p+i3aId{!j9I2{*VY|^VM+DFAF76v; z<=x%a$5Jlko;;P7b}A$5a_&{M)xQL(`6H-A(DD_6Q0xv|Y>$@blAox~Uafzwvk38tqv1Ox=wd5HKJn zK0ba-P>>pN;>FpgPoKVAR>lR0w=^%sK)<@|^*AP|UU`1;gB}5o{A(JP^pAnAq4>ow z2xtYmy=t}tZJjEZy4xDK{uXR_f&1G#BqYSt4>5&7!NIaBG5GYMBfsZfJfD6#9oksh zNoZk55O$iD2JH;UtjyEtY^7l8ZhmY%V*$#2RefZ Z45&OeSD3Au{DRD*>1T?{E}>lZ{{Y=nJdywa literal 0 HcmV?d00001 diff --git a/proxy/web/src/assets/fonts/Inter-VariableFont_opsz,wght.ttf b/proxy/web/src/assets/fonts/Inter-VariableFont_opsz,wght.ttf new file mode 100644 index 0000000000000000000000000000000000000000..e31b51e3e9388ae61767c692885e5d77ff7b5346 GIT binary patch literal 874708 zcmd?ScU)A*`aeEr&hFXW!@9Jk2#AP^*iaF%D>m$1V`A*RYizM3(HL8z(Zs~W7-Niy z#*(Nps3;axR4kxkLji?F5OC?R^qt>(b{9*MdvEUjeD3%2$B%hE@8_A`o|!XqcIH3? zLWm1MK?e2g-XnBm=r8XRq6;I$r*qFi1Bb5P{QDlFa9&C1hzUK14)1z*+lJYM9QPq) z?3caX=rSZawsRXo0*VM}e5^n03FAEsgoyAb`vwke7U=fQ^aH0{@_iv&YZt|LJ%6L~ud=lRlXOf6eO0j)?yr{C}D{e!)C& z2ZWyluAVw;>6C*_KXxTT*?K~2nx-QBJg0**i8SLWk+SoqO&&ifFnZ)~2>%q}gQvm4 zb+z{>*f+vnZ`$mIOR|n+JtSo0FN8D=pEY;lc)RvWg zIpb$fp84}OS2UK95KZ*FxeFHFoH?eH2+KkUSnW0l0|R)>xE?ktEXM8$v>O zG@DHuW4ET}b~o8?itpx@Et~&j@#S$8r0cjbYknJR-=Ua@)^rVv*L}v50ROJst!ftX zOM+D-)Rv1vSzGvwC<#%vRK&n%yOq-zvXOp|ULi^mu9PT}31v>?2L_wvZ^+R8LwSK1 zv(`tQM88BcAPx#LMi~d@gI3_>azK1a^h86nbLTBsMs#rFp&;)XzO-4MSrA>yG3PDzxz6rUNO+F1d*eY|)6ImX;T-%ZjEEDT|eGP1U(dD@u-IC-m==Qz8tIF zU%H^*1fBvAeNGhY0*}KItTA_wB3@+U?SS2V;4h$*yg&2=BZf2~l((mV(aGCTfdBM` z<7WvWNXM51kai@L3?XC446>N4BAdu|a)2Bs7f395MADHK>L;|dw6diIgm!$~S=@=x z-jajWy#s;1+T1%L?`pXdaepCqMk%>FVxaHPYc{z#L9f~57QEL`eqK+?dz+$HYH1+F~2^{*X0%kxlZVEs&5W|IXXOY>|pB|2$> zO%};0NwUcb;-NI!WF-k!9=FL7+;`Yy6&a@NWs}uJtqifr8sek$vB`FjE%ihhw6DlD z1Yhy7EsTx?i~DSH&6j0ECEUf&tnTPfEoy10i=Zd2;vAdoLcGKwHrbUl68&wm8wsHq zHrbujqfs`wCgfc<*#q)>HrbPSP+L0)UXX3|5^51A8f**WO&qA7O|A`hy-oHZO@!*a z>JWF~md)Ll_zH(?a$OQ2thCAXh!RJ(1P7b!51ljH0CrduzTy(C<%dax0{>$|eURuX#4PHPV@4 zliNVQ2iWAc2p?*b+d-}_e|yN)ZPfvCfGtc%$kpxE39`4%y)$H2n;e3&$u_wQB!ba8 zm&_*P$sE#~ECioLrjv=NeH)Z-A$X9DH@EN>Hun~!wap%E9?dMs_wh%50$sb*&Sn4nbxz9q% zO%Z<~;>;&Y5qdmAk4H)`Q|}4acP;j5kQTsi5yFOAYQGS%d0d|JLX=`Y(wj_%!#>Ya z=BAdiE`aQV6kbn<*J(PmGC@MRmtXB;tnP6AOvr7UNbJwMg1qEbzb-3Z8ZmN zu+S2EA@bqzUk$@+GSSw5T$`+Yy#T#19jQ;W^w2*F(;Z=``kMyl0V*yGSY7585J|ZVtO2y021fkxr#O8J2rSu^! zP%=(FuaxZdl5^eXr5TSEgjE~IqsREzdv!#Nhb}KWw2tw(ET4J{uGO zoQ!rJV(HV_&=+2+E~tgIMHax$>r@@SH)N|m`e01=wv@}NEwB1i*NwL)p96VXTx;fI zX7sW2i_eP`1CUB}noUuT$ri2U^XU9PwQCT{+Z?6^X>Zf2rg%nh6NH(1B77#myiCvM z59$5A`0WvEDCrMs|4LeaEZZOLub2E~8Td>)5A!IW*{pNB51u!?4#8*(uDQI=UxxP~ zgAjHu)|Qh|BR*SqL9g(cleEO_7UF%zAa7yP8AmcmxxP&%f;ou_r))PUx^0bQ?VRa zDOO>nNfZ)LtxyAHg$#61)C2k{{DF-XO@IN47Qi4yYhYUi*7AyeihjTWiXp&ZirK)q zin+jd6!U=#706xjuHpmWO2yZ}ZxjcChZMg7k0~w#uPIW1X^LmSOhpbbPmu>KP?(9J z6qG2B(oSgy)G2j92c;v>S?L0FQ+fgGE9(OrDH{QsD4PLWC{a#jure6fM%feF;Wchwsa4eASD2wNjXGR5f%1Vs$r^Oz}c#Gz>ihX9M$Kl&w*d4z5pIjT>@TF zMFVfDZUS$sZUZw^CSZxG1p8@G4Xsd5RUn+0s3mt+L{I$XoaSQ2JNhAr9nGu+H26xnogQdzz|IrU^h)SU{6g? zU|&sNV1EtTTr*HJ5I9&f82FY3ZLXQEnFn01Spob|vj(_MgF0$9YxV<=YM>{YYnmIt zo0>S_1I+_qf+iLCOorad1Lb#!C@++k12@Qzfdz5_uvErd4?AKqT&neIA?9BX1O zA_j7?KMOD)5+;(9!!5!F@*7-OfASOgaEe9P!-dx7Jfw4^D;F@W?rX}Ekv%m>y0dr|g@!mVh$Yj$gj8+CYX^(VszF3X@HS+ZH}pG2|uX0<~o_Uju_j-em zCi@FJvQEs0WGxM~@SLSx!CS)K$-;A&b!H)~2}yc4h;?U;NOD0v7RnlvxpW}<%1IDb z)MI_j$wKG%{aGJ2m~5LJWZ~b~c;w_J7W;V%e~+xHY6tsT^03T}^<#eI)^cAqfCZ6d zlV`Gl%%8+9scqr+mw2+laKE&;1{=z{l4I|Q7M`@Qf(`*p>`I5wGZk!8>arW#j=0P$)sA410tB)t^E!;5Kfpb9`t+jA@v>LoM>55@HYc|yQVTD8yPS<Rug59<$LQ&@=LH)aN#3e~;6 zq%E7vR*~O^k6?@8KBRvywuH4M7w5ZL`12V$@Q&oo_S>*bsbSRr6=svX%#ApcqVp(mr zgdE(e5z#qazAmZ7{Djc2OB~pnB<`DHH|9(dw`X)>?MdQ}0@R$`-BExBA@Ms39N27b z(}F$RQCNdbBB|femPoqYcV-8qneiRkl$(VhR}X zuu(tuw&3>YnTwUVvI)fWDBYU{lcxBHCChrbLyhs;etSvo` z?1CZWDQG2IiBhk`m|sagV(+u}$=7T;rpsfjFY8Orux_jyiDqpugD-R-@E%^QBuo_Ls4_l7Q z4apv0eX?##s?dO}x8RpwUQoM{PqzN-;VyLkZk1Ui*S`;|lu69)<7Fyx8z@4)TEsLX|Iy8gAtX!1$ymZGQR!`P%84fzU~O$u(_ zY@i`q@2nOxNYcFxRUYKt{VkOh9?=bx7YgXYwp;)Nc7)fi1iQMk?GGm?b3!e zs#f~lzbvk5wELH3RernAEw1w2eQvqA?oZ*A*sX?jo}c*nMB9epU%cmWH+)}s8~^arW7I+62~FgNCy#em zcQ~1rTD$kj%v8_br;;0#C{KACLpyL76mhjjWygrvf%W!AJP8)lB8*jQTAhA;)UV0u zba$_^(^)(H8gpoJI@`25=yXB5-0r7MfzSJ$Hnn@+jYId- z@N7yGWz)0iHMLG>wU4{@IrrVj+Ue&a+SE>u3?JpyB=Y>*a+An-Euf+qTA zd;OE>xGqJ_qaU{`bBRu`Q`tP)SX(ki7c?qp7A?tb+FVU&C>LF=s!`G4s$Affebp|c zM0G8xjWX?;(MOSe{Y20F`q$5ouB>(aMpvJh>*;Rhyz3ct#q8_3&Sl=$3!RH=ae(9k zC~?mBzV6Je{&k9K+%P=Jt9K(~AWOMn^vr8|!#J=anFFi?E1usl4u+IBuq5S1k-yO= z=3-0MASS7vu0hP>Mowul$-%jvF@}+4>X=kJ!7(P&v!qT;wr8Q9g9`^A4i>itG1;{s z<@7?}oIZej&s-M{bz({a@|`*8V=4l&^|43C>r!Hq#^=xzK7Q5xwx>+(NzwD+9q+2JONw;rB`_-*~E8a)#a4W-8_PqUV z>$-t)nWA5DTxJWu^ti(QMPgi0Ku(Rg(hfN_?u2&~Gwxhz6I1ify=JbtcOEvTPwymn zX}s?gx6jwzDejoBy;IUYzs8-i_SqKD-6`w9rH*Ox>NT zj+xp!tbJyUJ6iW_`#U;^eEU024u}DOVT8s)pLVushfbM+GLEV>sTtV_C=@~qJF-cvu7>%FvR@JP!9WIlWDdC#PW zZ$-Q3G6%bR7pI(p?{&?PqX(6zfrsJ-X8q)<*&9-j5QxJac)J(BqlYqbIGNyFN;71uFnjYYuJU zk=8oh<jy!kt#PRSEZ+rMV>B<1rq37P}-o+X|29OIoc2qZ?8&)=YEfP&2{k3CMOv(XtyMT&{hgTS8vVl$r_o&MCDM z3M;bfClpq3XqxSkP!gJEpHNYOa21^Zs#XBE;8QLM1IyDEBwiSi=8~A)P_5)3C8jh~ zD-sQjU`=oRRG*mT^wcLY%L`Jr)+i?yNKb2X@JTdD$v%lCp-*dY&?T12PdyS#>%=*6 za89fUP1Yq=Qbf}wX9Okc8p6#f`Dswn#Sy7`4lYSCeIccFOmR$l?vm=3lnAV;PiOFErZh&{?vW)q`gUe zlGZfI15oDjq()kbv% zT@#ZXlHE)+C)qz5Qs~BHhZLmSCdJ_Tq;*PVzhsSJU5HCx!`VSKn;N2a_r0s2;p-f9`DWs((hCWoLCASh(X+rbo{nGjL zm43TfhC_NnEeFnSB1s z)Vy`&N>)na%IxR&nMU{gLCdFw&!0bcsl%bc^E`K@z47uWIm#G4UfJ75q=zYWlz+`S5 zq&FC|@A(&3W+%40UyyAWl+qykx#mt0hvMv9&CTMR*hWr@oV)$9PUbuql2eqE7?Ksv z;bcy7a6(Z|a!C5goHSpxBKO>iE{?g8-LBQn&64jG=04ZlF5pm@n-QfYFXOU~QpW%N=$;gFnX>?I`URXua4pI4RXP&ZGglq&N)jA}*xGq)@=2bPag z<>%+ufMia#ua|E&*!vcQdnCIToc2g`FL>gaEEWhQIbIxU74n5wq0!Jdw=g$R=~{@) za|`o4lvzcWf>?P`iCR%mT%2oXS6o~Skd1ae#d40Fw<$^v^*6>B&9asncH(6 z2m6w&zVLYN_RPMdFi~(SQKj3dOHX$+t4nWpHa9PQ;-hvgEi6>kD6KHcg=P6ltyor3 z0jMfB)h-jt3%of%sst#@b846Km0)>Zres%M>|H69^JBpG8kk3@*tXxR+Q)LO2<#hz1 zveL)mCEyRSm8CjIs#cpzsds#W9}P6zb`sNSrBX$sj{$ZzNl1`SE!_vu{W}%a+BIDm6xh3C55W8OqJ8;dY-DF`}HCf zW12Fx;2oZ?VM0-%Mp?6@Sl(+0^p_70H@b7EDWB+!gjG5k!jzD7Zz?9KHiaBpn192j^t9?$SVqg}$=P{TBP zeU6=-V{x(eYD*zs+rfB6UE{W2p{B-d|3a0H&rLe1{Pzm`XH##)*yoQ1`0FY%9PW0` z%X3H`2jJ@~2Yn6WGsp0br5OM*!_mLobjyhe6; zJ!o@%JaiUX8x;>dKD9#+l{V-xf5hv(F_Y*qiC}W+us2p8XgYPoMP*EhbB!Jw{(~zL z*yZY!=;%~4rjL`ehq^{?l4o0&gaohjCQu=HH^h5;dm9Ws zF5XY>`o{FBDc5zclNjf3FvwB<`ar`Gf9~73UQ(s5sj2R@bAj9w z@E)1JGuUxt(vm$FecI1ytjPH5ZWOkqDPNN@q!Q)FB4wEC{qXd z>c-afVs8evzgg76^{!M^aPK4krVo-DHjV$$zg4`^UmveG#3nw{n-giWKKX{vu;gUf z$B_BdJGHb{t@JNFywa0BDluX9gri0G9^??Fe@K*&rA$51YKw@jJ|*@AG6`rH2dW@F zdPI=HgAbF@qkqI`E)`W(B~lIdY*qb}1+odXf?9Khmi7Jy9X7qry6kK{isF`;>E>2y zSLIf2SK*Oqa;s}^OoFq?-#dP9u&L|gc8xDz?qCQsbue{^@7ORt1=&Op4_m8R-o$K^ z+dny_4>Vq20=YoVB+A<;-$)LEf^_gs)tSrjDAXF`$r0W=M!igodVQ>=wz9WeRVJ1R zRjgQ1R#A>=83Pi%DnoA(%-b7n?2o?GN0C7P*m%5*xAvvwJ@YC?LpC2B)&Khg3!5G> zg-$y9&5~P@aJ#1U^NkHO$hYq|eR?n7PKf_Kv`uB_qU$SJwqD9-MOxbZt-Z9Dh$I+vI8a3*`8}UctjTl`=2I-Fs z8uZ)1fyevw3GdY_s)Hd`Z@3vR-?huadLbTd2hdQjaz;6Mo#7^tu$&Wvd{*jEv_=?DO zjvyFho$ixH@$vrg7-uG9dcJ*@nHCdW1yNRBzEN4FsCpzaq7yRtb3~9B%@Zj#Q_)OS z_z{axXS3+c6e>ZXs;5lBiP%avk{LkadJqiY2vP^R^8>7kAL-uML%%(aGsvlpQ`@DP z(!4WrGpW&3Ov@!R74WKGfr(w)-r4N(+|^tw)hD?w^Eb4*-Xmi0;URlQ|2XZG<$xtm zOI*3~)BHoJkx5B*sYzKnqp={hKvQH_%f41EztkQ*MvunfVV)~S1DYQVOU-y>l<19? zBbSzU&=Dxz1@i@Rp;Npmo)|DfiX<~ZIe1a=N=+(PLCN$+|M(tc@Zk}u_(cUjuHh8# zT+18O&&k(h8`(`xk?SM_zmD<4wmd(&EA({m9q+sPWSt=}$1BGtr=7{QTsAYwBnR0J ze=2_5^Gk#w%~-^!wyuBsjy-w~8a)Z`Y%T9me~frwC}y(GJD^2ikHMpH&dQ?HA8**W zKjPBmxO+*DQ`0jtjoBsTm1aq;vD4Z+>fLJj_|^?*)vibA;1Q#zEnI|>;GJLy%&ULZ zM`x{WR$q?Q?k`<K+sSHq3gBX6&i($)?I6__0`r@+pE2toV4BP`ailN`~roN zE}#qKg&hB(X-XGpLl9P_cGtRTwc6TRA9W422M_P$$bH~n^KTeYtI|qZq7}4MtI(=> z3i;kk-pAAiv{b@Wc`$tA6(6z4{ z$)5JMx%8oL;H=HRXWl>@t9dLnwU}notEQ0zBfH8g!2{(cQsAG=2q{q8MG92c)Yj0} zQF~hbxr_ASpN+zSw*Fr-&8gJ{(Eo|?qug|{pdW2OYt!2DR*tV3Yp4JoO4i!KLUxkX z)JdkR;nE1Eu?4yG@0+_$I!fsvI-F8%2ZbZOt8jGEY1>j?>MIZ7_=*`%$I@V%X+zu4 zUNjgdcc#5?tM&_~^XNRa9ml_6=2NTTakOeBoi9(5r`g=*tF^S}pUjI?52@j+26cF8f?ts>u4)ynL0xR7?hC2fmeLs!sjJo)WKKN?Lhiw&ht(ps@5Esu|Lh{wk|oK;R(LB0V+nLUk?WN|DN@^oI@im_GJ`JSa z)LYb0lc*Kj(pl7Zue+)p)D8~L z4h~umZ7r>z+EwkKttEBVhHC3OX{?-X)OJ-nshykzCl%)~8Y{{o49p@`<}K0#(tLv?g_?&XSte zrxU3cwen816YWSl=wzKnXZPRF!KsGWT5KkYqL)}#tcN46jbR)_cd;Fs*2ltw#WrFq z8cSo<3JHp=Q~mofw5_y_9qhHuwB58pPFig*xrVm4gQK>AgPk@&>+ev*L8}ecHqd%G zNlu~@bx=4E2hl-rplV6$sg~rPw2iEjme9r8j?(829#VVh18Iq@k;l@Q>oRwmX2J98~T)#eZohI6g=i~Ty+ z1kBHvx3Sl*-dodmMK|nExv}j_ZM#6e=ff`VRJ@LKL>-!<>kg&_K z?r`|t#=4)uPDQL;y>oqSe}Yv$o;MtAYkI8HeQi(f7wcckD!HdTo3DNOy4JFSeYw)) zYgRwodhx|7(Yp3~eGMSJitz}tVSyobfvGm#=XC9^2HMMP(_}bb|nnzoSD!M?d zDLdd9$6KhO)PSzj_QdYGk?d)CKD*%A%Ad4q^bn8D*AbRftbN4MNl@#hEfxuL{Y#xz zJIRZWZUQ!;RXJ z8?A=!mK?5uex+TCiTdeG@|X!}~MiW|w!VhyPk#=>wqO`1-Z(RZYo zY8O#s@#i|a7H?-9Xeb?E?N_*?N2Nw$9kGUW^<`ak$=;GTt&kSsH2)IXLh`|SsT4cf zkNFy2ena-eN(*{y(}KyCy_7{0q}sF_Esz7O@4s+Aqz`qq?SRiqb>yLRrCdwyD|Mk) zu@)xKTCOoJTxT5g^c>7S{tn5>PTO8;s%@`zc2a8FNsYAav`#eNGNPN&WVM}xMlI3@ z@(iu)q|mmOMrm7X9WiEVOOvFAa;Vf%_Qe_Ue)3yVU%46mmM-PmCry^U)Ec@}8Y+#1 zX+xLF?s5xxraY70mWN4$@wVikJc*u>dP}|0#-G#ca(}v6?L;4mILJq@;g-=^dRE#< z?>RU*IMJWw2~vNlzdV74NmJ$VbTgg%!t9_wP@}vYc??m%(Cq(xL2Sy6yi|Ti?j?Ij zpGcqJ3951MlGo$;n*P6^-+w1XZ71B>x+k}m8)1Ymk=kPh7{kXlAMaeo+F%T?jog#3 z9PkQVl9o%$iDWroDp|^8Cu?XM?k6_K#)#if(cwn6PTSFP0GNpU3k6uV!tevZcD5O1 zr9(|VgV1#ObLk6}8R^dw3IJ)kF%&}t`V`2*2Y45E{y zk8$T^9Sx$ju=|)NkCuDO?}{xYNN{Obm4QbS&gyEI#*4bo`&ZT`+%{~`C)zlHbtwFs&4G1>yI8bAEXRgCq|RuJ3; zu4?@D{A;&gDh9!oD2^zOm`AZMsf_y?4(89zO2JcTCbTt2n3v#YOtxTej$mzthqzAg zvH1b3BaFwLlF!X)=I3MuuCJt-Gg!DViallH%vtP|@B!||3^12h$)Yb=OV%{zGUG;9^$$~XA+3e zwXmfKBwg7XLI-vi2j9E1S%Mdf!zuY+*kke)8-!~KwJg7XSblZjy=ZRBdI(x_RB0Z_ zI^ZHsabxovxGa%Ga`om(>?0I^kHL`;E1^+$tIt^$J&{*k$_D_igpyU4PMC+q0PZsB5;Gkd=_>K{fXu@8(anXN=Gukt}l4Z$6xagij5EfeOj zA&De}MS?<-39h7Bh5`R^2)`<^2Dmi5=2!$y=yxR_?XCe%$+o;7(WEWOx|BoUOUP>D zZFIP69W2nx9~T*buh=VF62<^GVpFBKkqGbqWPInFTT6^ ziTRk|J7=uffU6dlQ%8b#6KXwaZ7yIsp_6MoQ?j|j%yr|LjB69Ale@8+xQHN*8EfJF z=e5H9sM93e^F8K?dnc)*x`1~h*C%-~FBU9}IU2(1uogm#Pu~XbEwudTZRQ6>dobA1 z!dY((^T+*~;<0Adi0R4m(QQ~0sLu7bJy^=~g*8VyOASWwsU+T^JMQ^}k(3pU zSs>isPR?RMxR3KhYQkDE2U60fDd$4%uJtW^`mhS9kRN%_%ih9S2riUBfs@8OVC||N z(>Ft>unxGNGWbRo3&9}SwrnKs`8bdRLqfrulgz$1I2RndMX?^Z*YaVHh0wbJg3q8) zxDU~dlz5M2{h3VWj~R#iIwILOu`}-JxR8{keK;31?_eMye80}^*xR_YHhXdxHrAX> z_RJs7#^M^-f-w`=WLBH(n*9du&&(ou8SZQvem#tN?1)EDwYz}S?&6z)h&BKtKH*XM|&jt$ey)L*Y7D}=oH04}qohh=V=8s8#RZk54 zx5??@!Qi7v%sqSX=_KjyS+>l4MA#&SvSq9%ZafTN%Tb?w!O(wvjp&Ox-S8{sYxi=@ z4+X!qjY3#0q2AgXPPl6FWNrKnwt^hxjvj*iS>;_8LY{4@EMh}R@fX?Cac$(mR;dpz z)@DH{n@*M<3YDFoWPp=%hM|J zM7sRfJ1ud^i2d5i6L+Xi{n|&3D#rX;$J0E4t~wMKXf_F7fw5tN$Dz0Y^O4} zrX7t9z}=`(oCOH;ISUnZoH>%T-(#AxY2^3cpW36G6T)sZ!L6ztVX@8GBBAH8>owVO zl6fpQmt=xnZ)`p*tUrFO5mO6GIMWL6apoaR;;f0#owF$OyM-6sc_Fc}Qwj_;5pFteF#JayR*m#yC8te!BAvqr+3oOKoSoJ|l`aW;u; z;%pJ=cFM~hm0r(n6Up?5OZ8Et;haq)+ag$f++}JHn>kK+?{ri>q&bDN31sH!@(lJK zvDnsNXnoD^>@V!v#RgQ6uIkwEle{V=c(--xB(x zqH&+poz4c5sy%1gvSDPyBd5!_1ozoyV+ZyY`C(r`V-z_3!cX0CJ#%yMkKND+JEAMk zfo;ES@6=XMd(Q$ph*HC9r_(-~aK0*5u>;-_+VgA`fi2A56jye&g;=b{{-{ zuPOQd;Jte_$>D`QMZ=d^V1#->1%~OIr019n=Qz(-*3k>CFNo3^YX~1u)Q~$kWa#l zH+;ywV;`2~lC8(qTyISFANv^i;Mg~qx;Sdd<>MQ3a>(lAyRJ1Nr;h&wynOtq#U%{# zq43qf!{O_rYhmKq80}AvhJOiM8@@Z*Ulwur0{2G{c zvh~HjBj`+%z~xF_Q4OZCad(|e=pk=3Vv0j}jJtU6r@ zdDm%@D3fo_e1EYnW|RFF>td$(>VhxXdv+V}%d@+IYtJ44eqzDZXAeP6JZr`TGXLxl z$hXdYQr3<{oeL{*B$p%CNBWQ_kw4(W8|xzvMCyQt;IcCEC~#BcsmNr(DKZZ7s>sA- zANFP>!4&e71>-o9k-!0h&f+pB@(uV-h#Uj=Bj+?RV97oEFwrjN{&Xg`kkaH!88V$*>{AmkA!Fo>$7W@_c{ktq;$Pkdszz8&xpnqhFWRJF&WGaaN!8mq(Qj z`}q76=Sv&YyXU5FKDDsw$Yw0>`+mM|$wR--zh0Ev_w$p>9a?TF_b?6k;^cB&i!bAQ z*xlOt^R&7*x1Jo<;KtUQ-BpQSt((&6#@DBY_{DC!AC%F3o9py!mu*2KQhI&!uziD` z+Ye9Dg>8@Y%!&N=yQPlfzP%FKDCygWt<$>faO-=#+jqyhxVHQ5Sa;XfJ5LU+6T9G-(Ua0zVogfi#-nSitO$l%%RP$sD<$%y9^zYLw02~f7*PP4EQ0ehgyaj#yR*zxDz<~Sbu`Pfj$+W@EAKOg5%zs|#&`@)CTirW|6 z&mmyn)3JBk?2jK**m!@T{hihaid^G@57d+5IviZJ_*~P25$|}mI(TJ7OwV6##3cs) zx^rQ{#b0+9918q3rD>Agq1gU$frlQ2#Q7fHxaeHhBdZrnU61Tqe5?79#9lX=A3YFy zx5d%3lYeS()G7X}Ccl68ZcNkPe_s&OH0*~Nss~{|yptUic63TX^RVzYZ#afsZW7ll zEOy3?CSeaI)}mp?AXtl?i?zqXCrRPQ&d#HIkDc#)z45Wz?*i^DKgf>Zbaue8%mvr$ zAIp6oZpFm_!TEaQ%mk1oCzef;zrkgmCXGZ~I-7d?gZa2tsJQWud=W*)M61Zh}0Lm*b`$eo-bg6#C z5AOhe1CKrvoD90?v;GTzngF>G`D@5tk-Iis=G)z=J&FzC!y;^-pgc6;Pt1Kd6WH0%Lr-Tz|Bna&~4JkIoOcCp#nZL==;o()?9I6V!JM=vxw z+bQIN@42I8zID%?d+S2|bLah3P0w9@>wKNa9|k|E6M1|Yrn1vxB7-6$r$M?f2@o{_ z5IqiXZ6x5vQ$Q@)rii>X0McD=z@rer<8~Z6M0!Ui2SLi>vtm{~fN3>{<&o;ha&p-x zvQiBz;l55z0B<~gBAbjoTYUb*chA+na6r0DFWd?|>vSQ}KW@l{g5tBv3*{tkNK}HZ zaAZ_rpVK!kJ)M4zUjAaNx%~3i3p{);hfRvky?lPGSr;7@7(Vvu*?JfJuIBdtz2>!@ z6O4PWotSg3-?jJ|$K$R!_=fq#oSHylV$O^sw_>>zcgEMb@J`(9)LM7qXGJ*NeKhNr zVRw_<%ZA1ucXlNYPfS1e?9mP8@0EDg>#oD&gR`$0pL{bczQz-S*MrLBUuNujnjGF) ze>3^M@2;%mc>m)~lM`LxmKd<>1|A1_BstG3XHFRrZuS)!_r@L&GcN~T zcgp%=siD5{$>>v!bMxwcZp=@5|HmZLFW&yDl2mMpN^gd!OYx^xm;Kh=^>O9FnND>o zWBVx-Rfbt7uT_ocf%^a(gK-Qx*}brerMMSV2q*jL?rW237MAONpZHagi=kU~Ov|sm zV=62sd-*96yw#D))um23?in8G-Ugp!zhwX9CdmQGfyqJ1ZIZ1E>ktxxN6pJWtQcNe z;1>cL$7=VNF7Qx>2)z_Xz}JwV|8x4COi+TPEHXhw!E-<+P;V=}Qa+7E=HXv0_uq`K zDkMR#lClE{`vd8vVWh(gw|KHl`LAi7CCp_KCOt-6@V}%KqL*8cFiwh_|4(Tx&rg|w zwrmJ$fi}k1AXRHX?|_z~Ek9zbx&NQabrR(~$(|`JoUBxqqCNfwNjicPWB;0z1t?bm z*=yrmevPDHlnri@gVkMSAYp1-7?mB$-VZd4Oi+IY+6Vd)v;%IrR+9Fz)jS+e<6nj4 zVW=PK+mCa04Rn(pC9dGbaJS=rkmEpkATuZ*Zusq&d}VX*4Q_ya7U&xqM0SFQiJi#= zxL*@jk_qB^v~d(D5flx&1*&T$YkElc5nFAIV=afcj*R-xh?Tz@Pl>+A7w0BO=z9r$ zkFUT@ka*wUuu^qcMSIpk0bO`u|C~%xaC%_lkd@=mAAdp8aip~uezUFqinVCZKM^y( zaJxVP6+aL?zC{YZW^qDvFwUilQUMssTu3E+z^+NkslOFX9!n_2#^!>{?O6ca_B5#CS z2MQqWN?)?@-=<#>w-NN>pHVXsqHG{UzZRwl@p3?%|2K)}@h4*CuRdQe?*#oLQZ6EG zU(k1uIdQi+c&>(#HWtq1e@1T-iTkTmXeZ7Gp|5`eZG#Rxx4Ln6NnuW}4ufw_F8fT#TX%?=qg*gEJe@ByO7RjKv?nr;a^XmxsIB>|)Eb!H!EYjJ+5k`t2 zdORzpal-S!!mI6n;_s0miVL6!v~L7i57JoZ6zm|?cNX`Z_mjg*wR}8 zdpaoWCGmXN5#;k8JwrNke|p(UkWbi%$4Td9QA~Q3Q2y8K#EaW)Whz1(p3g&W7f0a# z4K!ybsHHxZFuiEFg};w5sWv)eSzAm%-y8zXA$oBLs2=);uQ`Iqdb+JzW~s2(r9rI9 zl71rEnf_zq{qQO&%ekFhv9$Rg{9X;mW$6IfsdPg48_wUU9ooD3Ekkd1G>?j-IXR;>4dfI_fdc@M<*VA$)Z zvDZ;0SV{VUeeo~D@bqdUudzt;0@gg>D2HCz7<&kmMR6L>n@vdX5NWHrPueQ-$@@ya z2N@2+{y|x3UE3?KTplqy?&&VvNhb3EN;uN23#hcK$%L&N`(pe??yfm*uU+^^Ka1q8^Zo^r=?GS z!MxND^8}~0Hr|g+fjmvQ1-#ntuedYf{vF-^pM=4C0_*dLU9s`Q;44A%@H~nJ?R$ma zg?t}=L#>=Yx7gdDd!R7vIf51V|0&pmHhg^!#+p8Zy}TLeR%%7YNi;DHN`lK+cc>i+Ab7vjw3wAh~g~%zZ3S2mNoPg z;;l#}b3||O^B9*qNI#65qCy0BCY*S-jujKE_@3QAF-s5TUtnEU^;Vb^zEyU-J#$<`&9LDhz zj9(M>NbA50!69S)xeTXPKj7uJg!>upAZ2s136ugl1NsWI6m%5yHRw2Kn@z3`_de`A z%vny<`fdpK_eeF(5`72*%>xawa{TWqtNkzCmCc0#RwCzY>2l8Lu8qRLFTO(D&sHu$ zg8U!EBbQNTQccPR7;l)1X@9Z{y0?pStS@#c+{h4h02u;#2;?aW9k`B6q0h-9&^VC6 zM$5sMi_^(xpylZIWw5^*<~2Y3Hw9n3r&>Y{SWoR2B-)Tfa#=h?K2z{_+0Dr~e$bYa zVx>?`L6FDbxv>Sb1G4)c!~e6e)D`vOgmYCGGjZZtvdcna$Pyt3WpO2gU>_tzlPTgS zWQr1VuL^a2)h^_cP2vNxi4MTLHvnyvg*MuOG$FsB8CE}w9kS?5CV>XwnKB79NZFSh z0%e0@ZDauFbP*Ky3UU9RL3=^}Nt|SHFK!g(kogw6gubI>KHeK%R1PDP@J?c1HDO#1 zQN)rV$}d6bn46xFF?i;V5%-W);t!;om`DZ)TgjXFUmoKWabylCOtBH`hW9ZyIFa2- zXT*ypr>K#oD>aw{mLZL4(Enf(DtiWV0BoPr16C$D_IVvIw!g;G_x(WL4z0cjxKKDNNta;wlSP^;LC+ zUenmfb3QL)pX2b?5-O6n*_;#Kb$*!wbzmN}wmQowtBZ``JpKO);Nj!8{@QCe9kDQgDEB~KglpVGrhfgZNPcByXJgh_n2GD~0l_uwh|;p()!4CQ%u z%phn317ZCCku<)PTFwrsA$oI`oGz`O(@h2l!{&R{T~}EBc3ix+U)wpP5UDw=VwsTBZ*)|F5Yzs|J4GDD*A9Uw(qf|MNCV zapJYAn)b;!-QWB2 z_iM~py0yP1*qiy1zg}lq(%Y;TX|vwLx($tdCT)Ixf?uzG7FWjRG_PlOAXQJ$8 z-NEc(-MUY8=l)Z$r}Q7~4+VQj^l{AJL+oJRhJ6(~AhD-n^d9Vg2IpJ%FK&~AH9@l< za4f+c>-T0o-Cqx1e`722xanpt$9n7x)^oST@ny3n?XNqV`c13>Pcv)2!J4gEXZ5+c zzIt;#)2w?Mck+Wh7^WHiX&q6|XP-K;J{Zm`|7oso*4m6aYg4zbX+?tdt6SE+BEkAn zpo2Tkro4JF`N~7t{B>&P_=9e2#a^cyoBa>-VbQ*Z2}v^I0-aQdBu|llCCS`YjXm;QA2O(~_eZ=~o63ZYKR4{hpcE zJGE$+cd_5RM>@L?GpEVGTqHH|rDpEYNE#V;+G%p6sWh=amyR~!6YX!l{jqe5JjI-% z5o;@rrHPqmM6y%&&2cw2q0yUJn=st)&;3|~Xl5VB4jRZ*zpa|^@i_YU=iqx|8Scl2 z4V!f-I~Qfm%9`&e<~jJW@1gHK##pbfKB?lbpPDsPpBsOe{@CxUP5)~8cGHiV^xGzyBtRqDW68>o!Xx4tsx@T-2Ya(VHB?zDBL(TdqX(pTYY1U-@b=O&PRx8GA5u+w?NrnMnQZ#&-G6ys?s58}`>@O@D$v`j{Pl+KF;HtS>V5pJAZR zni>7!KiTPq9^XvV!<$knx=%_a{Z5_C!Ohgw94Q<9kbZEupHKfD?Q(y`9=clWRk9v7 z!093bu&YA$b}8gNE2U#~(PJIo?S9Ucy$Vv<>_6yc- z4od-p^~#Owl(mtuZ}NMDc~2jsD{QwJ?V-63GIqR(dtql?KuY*BX{vUtFFcOlB{G>X zL;UMMNH=Y$>c@YXHnZPA8f|nQ@_Gxz-N%~Yr&q&|^8|aQcXE`uS*077@Yja5uJn{+ zru`-Lds6x^2V*{F_-hE~op2a-!{&rsj%!s=U4exAb-q_Z#{C-8qmUKIhGbX*#PwG< zVEV>CNXWJAFcP@#yRGBgk?&u_WLPTlXGXXWxe;*fua>X~xdcoFp7N9eYBg(B3G_Uv zr#HfJI1kmBFM^qa8n{0VP$ki$nZGjce@ioU=d*Fn)V*u@Vr!U+#x+PX|@v zTy0J|aIJR2tqy({z=6aW`I^ShF|K>axL+fW*WL%xx>fQ0H$PF>40?_x-Zi=-e{5meTs3&Uqv+cMb5-=owEMb1}L}hcY(#a1^GV$$8hUm+8=Q(0i(as zas2D}`6~-!lmfvRg=a_#8OZO31V8G2TxSgg8vEx;w6j~FzuE5VC_nexI3wvF2U;^( zQv#zq)qy^rdn z$!2_YV+~=Os4kMpNk?D(pj0yVRh#`mA&>N9-A5}59B?%ncw7S351IgE=IW6 z7B~ohiX;sJeybye4K3h1;CsrHkQeA@Qa%ZtU?|YHrCblxMk)oFp*Yk8!l&vBjO$aq z0UyFQa8V?66mmgXcpRRGQLqr!!ag`Dl14#hC=U2bLwV9{g#&O#BrQ5mn+-|<&hHo_Nho;MDY*DT!MvXIv-PeE^(0q?>!Uz@XQjTg zQr}tW$FgDrS-*x0BH273KiSAnHu96L4GaYImks@8+YX1}vdBH?@16`$1gb$Z=mq0p z383qHz65lg9lzPLLunx1?CqcrP~Pm6B|BxwPPuc?&E?1n*liB%HV1Z_1G~*ZJ?D56 z&{vLc;a8EINpLSb2v0x<7z%UX9ry%(f~z9A?t;SbFgye3G8g*KH4o5HF7%iiJ?2JF zx$^^cn43DxO&#XOE^_|>e~8>$85+au@E%}y_hNT>u)93iT^{T%PXp)(!(cY7guNnp zNjER)=A{nvQipjdcivCoJD}|Os>8F;8zum0ggMMjAfrL09M zYf;Ks6ulIE3fjU;fUb&_1>!D7+{Jc_6vtkR*MR15Ql!Kr!0t+7cO|jAlGt6zI?x6N z!gN>zdqhgr1oCx%en6M^qs#lzWoZl0W$BWDE=#w9{xAjJfsaHUcoH_i5x62!CJhvT zO3(BVXmnS2^-kZUtbB9BA? zn|lPicm%t61Uq_U6f6Yl_K|&XQly#!{8htWHT+e39$tcJ@Fq~F)xL#aMXD#kz3?F5 zw>o~S4~AK=3W&2haaJdPHPAgjP zg_2MUT0wunUoHIA!e6bQ;17}7=&*KPcnGk$+MNI!syz?h1^m{=Z|$ohkKF}@;bCYD z=lA5?&bK>t&(B5W0TybTP5>97K}!(pHd^$Ax$J5cxamy0}64}KMC z&{yP1?kP`Fzfbmq$*>Hzz(M$h|4|kR3_`W$r|oJ5lCNbAfVnq8y!V$O^@wI&1^(lbwGP>5>HZ!h`TM zbcZo82e7HGMd5(R3w7X2I43N%0BLq>4}*ZdqdVp8L0joD5@y0m*a=5~dh3}63IOHl z*$DaodFxdYYQnS7Q=~U>_a^S%l&3do_NF|&DNpaofWCX9@7~1O`?5%%RFDVCL46=U zeaKIr>98E|+vgzsBGNYoxdFd@@!Pi>jD|(9j;TIn?)NHu2>alqNPh*H0loDnkNsOf zUqJ8uUl$p05Bv+T!2#%Qz}Ij=?`ot_IlCVCr@V z`WZrbhl~U2W(fX32h6z;32*f-gm8knb7fdj|QQLB3~@?-}HK z2KtzZoy^2eX7&K|I`f*ytfqhs&vv0FU^la0gZ1z^oMvgvf~-&yY65wgL%!#X0P-@2 zH0GuU>R~SRGM6~#lEz%>V(w8Ooq5>GywxJ}(eZqAJij751(az%@yve(eiM0xGQC21 zU-F;1Nd_nZ=w!){@TbV@wEx%1-|J)G9FMvb1I4Z2-}?%o>nO)M!mT5o^=SYdug4zNlg@h5Sw9je*ZTKiFPvbxO&~jzhbGVmXhR#4VLx0D z*_aCQKouaajXmHDzr2$dXv3SHfDSMiW&rx%v;(MvP1L~$DdAoy3y;I|fZq>h!77o> z=ymgEkq@!S56R<)lJ&lYUTLW?So(@tc0C#1g?neNCO3+5>VzHU11{NZ^vG^AhI(J z6oY!u6(+(`AfBDX^RYmC9(9U97nlI+;A@dj2=~cwm@l$B2aw0z_}Psee2Tt5%?BM} zHT){FCkfEuo{~@to`&wQM&vW}_SwJS31|hRy|*EB2GZC|8hek!4YqeP0_|oWcD9ea z?z<+kKRxsYYhBBk@&$GNL%g4ckP%ZUaX}zQGp0 z;rl__*+F!1@U+Oc*vhvjL=N2p_XGXdAg2#<9w<9LVqS(ohF}6giO_W{R97-IMRaw<4#eh@3`G zrW&a zLm$~X!7@?zgQsq&Gxd_MvCVa-$J z&kThDUFEL}EucHR43s55I?cZxK85e$qA0e_RDrvJaug^F*iwOKpbHF!`EXKH!Ky(1 z3zB}pv!V)hfW4v$li$KsfIQt-1hB39-WT;R>XNlfRiq|RhebvM8Fisk|GSClx4 zVnaobz%QbTMF3kVRshg>vD(lK2w!XnOa*jYjQES~0_wlmIZ<>Cs(3me-r}X91~h@; zun3Z2JCIHZ4frcj4cYw-Ogbl_Z{$rJy170qnEn1~@GK)>x_BoU1}-QTOAo z^kbqPSPS34AEL@+hO*EY&{rAsQHDH}Sq)o6mHih`KV=D57Co2k1TVrkmXP)C)p$I8XvOHr&#sw%|&a3xqEs;U6~AGr(Wi>ijbReJ!c18t*P zTfoMvjR0)5+UxKh?0~P}IQ%ZEx(k`1Fgyg`|k2Mrk2VK{B1+d#X*F@E&&g)Ktjc{F5 zJ>q|S8SEETpS;yC3h3bp>hOsNVUwr^glRzj8vG^d$wu%Rd?u=4I;al)U=^GY^;8ac z3dX|$QH>~TBkG`W1t1@d--ZtWoisi#stIW}Axx78pf0q9mtY3qx5*Cp9)1_~bZW>C z72zr90wdrRcn>~<<8VV%(~M9Qs>8F;8z#W(@Bw@U=S4l^LJoKU>Oxz131-0Cumiq_ z-$gY`4f&xWJSFPcqEKB_^Ym~ZVC&7BLJt@Vi(vzxhvw+NMFg@zDX0ytVE{~p<**eF z!6i{GQ$QXl4-KFr41>9l47=edToctQJ=_OXp(*r$v9K66zyUbJmUslRK`E#WtziI6 zh2^jn4#6c+&!vDoP#zjUM;HckMYW+$+Z2aaf&RG7=Wtq7Tl(U*S)nA*w%fLX{xAjJ zgpc4`_*GQ9B)AtIgvSBBw?psku+4V#A?@(n4m*B62awkD#P|Gr@EIHz)t)rk4+Q$r z4o?E%JJ8>=7O6UX2!!o$QB=n$YO;1PHRdcruMop-0N=}w(>r_Op%XFaI19@JTnC!hlig;_wHJw68F z>}dgM_j~~A0(H}qy6yQ2P=`IK!=A?94N<+Q-(L3v_1miz^oJ?%27CzLz(rBLQ$v2J z2(-Q4U0?#d4j;f*a8^_w((aQLibHjvJbfrnAIj6`Rag(7!)Z}{@!OYt_Qh{s{Puko zdIRzHCEmU};5eY8es@7(co?3B?l1<3v)@PXE&M8~e-h+^2cRxI2mN6ZP^SKrsXuw` ze_qr87byP#{0_kHfVS`w%z(Fn^aqgsfE%J-q>f&sj$S1F7aId*dXe;BTnNPZB6@le zJq;wzf%gD99rzfug1$id1Ig>awQyS0OBQ5>2LV4XanE~+{(De9psyZ8KR>7)^nr=6 z6uAEnqJJ37J!SC2@C3XIU&94aL%2r{DFpN_Ll|ESp?wbFJ}_iGd=8{L1YHhI3FL8T zQ6Qb6V_`mU4;p$@)UbPj{11B`(A}^%VIzD7*G0YD61oBR#g`YsIw0)Jr$i0ckOfLW z4QK}3Ux$;%@Wp`N;d@1mP>>c1LS=Xw$m@s&Kwd|X*O3pwlh6S+!2waD$kQnFIqEl2 zqtV-F@;Ca7s4<+!Oabyb<|Fu4)L3*k7Tt{{U*jHy^`gd;r|}y^O^5>ZHZdj>)}6O(_7xF_pTXIt$(qHLWnL0P-{)|I@LV>0FzE9nGMOGb_NyqGsg= z^e}6>sM(AaW_O1%fSs`3s^*}FIVVKTCBC`DHLp0J=lRb7Wq743ppyl}vEWrv3-5s* zq823q*I%74YVnJ(4}K8!S|RvG)RJs)AGClkM7{nL5bsjrUAjlq8|3ee5r7_+q5EaD zk!8C9+hQG4y_p)YjW2D}dM0e12h;Z{@! z{H&n7Z`TH{t*iv-dDU!K23z1Dp!3x+KxeFdD%L&~YoBU$Hy90z0NYu;A5MvSM?)6C z?>qGY_vE_J7G47Kll(U90P>RjyQp{D0Qz223#gy>>cACI@4o?SfqcJz9BznOn-TIu zMR*Fjzz87TwQs>Tz{b{6wskI0H|xp*d02-&*S!qH&3dR>KNx6N8<012g^@55mIC?O z_&i(_wP^xS#t&?GN7Uv=0Nehsy{IkdWeYaG1smV;CQ$!dz5&wzCT@r3u(un`hGf_ZUy9nt^?h95$Mt=qfH?Qx1$lw;?tcV`Yrk>-Nz~^a6a~uoIXe2B zcs@TV>Og9s9u7PUBY^T8!2Z6-3H<>1%Lq{3uWXVL+#cX`_eH&-c&3W>H6IA4iDiNL3gHpNe9wQ~f|* zfB0F{k52*p+K;=Hq;raVoazLBiaLEa!W#+Jb0)5fmEZ*+e;20#b#alhU;ItfFU8>_ zI05AGSN#6k7&^cXQI}Febs+qu*{}f)i~8+Bm?i3R9w0xLY0sA@!8`DSsNYiqb@BT^ zSO(`sUC9T1;jE}Xo(A&pXFb6F{v_?IBZ0bQO;TN3C+aWi;jf;eu9tvqqHffNX0TYa znD^IJf!XjqoQE5tRca^=&0rnu7Oib~7S@Zl(nB{m2xs8B=tvsK2_=9yBgDzGvYKBZ z(GlXcd3l%J3UIfV!zS1(+KIqZ@I3T~rSLxN0^(;aP`kwIHipTt6L5Dg32*m>Oi&0O z2jcg7!U*^dNSFU?*U`J6EVPH$;eu%MTJu<2plnHvpdD-h%AcYdj1ip@KPd^9vMX#5 zo$6kw0QG>lQjwoj9|7s4eiW7fc~AX0{46>Rc}r6O&_^2blQtJT1P!4JjDlsb3-F(g za;78Rbi?5d_z(_>z6*WdbszMGRq%!A^u=H-%!TDZ{?b#H^!QJI2Cj*|I|bYi^`SGo zBRWG2n!{^=4l~w-mjRt-LRXoxKuKr{gvm4=Ho|A3GiL*2}`a>}O1>ekLF?d#WlET?2(#rlTiCj{vUZ`TuB z%$pWHiSp*bBuODDC6%O>G?G@*$z76OGD;@NELkM0WRrU&yX26Zl1p;Sy^=@rNeN+#*{WBu z;(80m5_$#4l6p4BQhGSY`*k;trFAoo59miZmeHj-mesji^(tOWr{Y*#{mHQeU3ahI zCDne8rPQX@9iMNj*0k>2rIT9Ordx|vYI4Uetvaf){&Bc}9MrjIr*5iWmu{^)t8QKS zqB?dbZ>mlA=R3Di&AWFmR$MjYSVGn2SW;EtSW1=Qc)u#bv9!v=@d1^EV;PmEd(W2L zRjhl@uH99n2eI)lInQ`T>= zGWk+9q>R4w2vP=LGE0Pa`x5&+D!m_T8vn>%5plEYjO|vG%Ko>s{xQivrtps`Ii|#R z@5XjBW4HHUv$?R>yx3|%VNZ)V*kL1%`=zX(P9vNO;u8Fs5tB2rXJNJ(VvyEcCE z&&Su)t6eW&#r+jmR-9hGO!;i(b=llyV`XDyc9vOMX5<6A9_aW$hEj)0?J2dQ)WA~Z zA$zHTC9lIFcpoN1d#D8IOARcs_kj#0R+Ly!{8Y}RIZNcsl;d*trgz8EH@K^Gx@+l9 zrTa47*avo{E0A_{nwe?Zq)C_hLh3y!cBUwuG%$80);wCmlSsKp3hSV?)aqqb)z|cJ z-A!FryVXopKsmg`ll!%wbH;w+xQq(edoxEaSl`F{k&Ez-@?yHUE}={EuG#x}Yw`oSjCpUheo&X!59tcL z!Mu{Ltnc+^db7OQriRp2&0ed{th2~HI-AZWIlMYvJ<4!1y=?UCGi0XBVzeqFv9a>hyHRIZNEp zj0kR~@+fzOQ*v6)$XPkZc;R;}7{$uCf#Qmt2<{M(Ii`OGT8e z9OWucMZFpx_i*k>+xUp=kgtgUkerlsUPdpYn$#XLAFEgBrlpY-v{qZ| ztk0~EtYqs$>mBPo>l5o;YmfD@^{KVS`oLOmZL~I7Ypq?@25X=7zO~odZEd!;SX-^_ z)(&f@war`Xz2+_PUiX%IZ+OeRH$Cq1)cPcADz!e{S|t&0h&TE+9p{nM`nY~-95ln&#q=Sop zbmwiwi`~iYUOz;6yN*-Y>BbwWUv)>i+iwpMO&7f@nkJeynmU>)nm&3r8qoGm`(qJ% zx4n;VVUE-hIqTWHGuiVZp5;Zon3v?G@KSoIy)?YdJr!^DzRSDrRrhXqcY7JU^jJeC zFSC~gd#K^n^d9wUdGANX%NkW)Ht!xUyO$%Xy_{Yy?_RHfm(R=X<@X9kEw7MQ*t;(p z@rrmwy?=Sdyy9L7ucTMTE9I5;9`Nq>9`wq3<-GFVLtX{1qF33g$QtIrq;drQP8dk4L5yuIE&Z@2e}_nG&p_l38|+wbji7C39Y zW8MkxxOXU;(mU;K^3Hgld*6EBd5694y(8Wa-jCi--cj#o?-%ciciFq@UGjeO{_uYF ze)rCL=e+aY1@EGF&HIabKFe4$HFxB!w3#aMu+-uH+DKYS8+k$c$QYR{Q~ZAEHU1az zb=v(-*-zU)>Gvj=c>pA;(x|(5ll46+yUM5X^G53uijj+Is2ZzhRcqB(4N=2cj2x*Z zt0`)#nyzN3SJVRamU>sMQSYhu)q3@T+N`##UFtKnSM5`us{`tgI;@VUU(|2vhIVw4 zPQkPz6+Lp1u=lN|tLu8YseVSc)tz-;J&1mN0{!_6{ia^7-_omevVK=@*4y-FdY}GA zAEH&C)Ti|sdVtHO^;?z|u~J%Tth81}tFTqXDrJ?m>RQiOEv%MSE332B#p-IkV0E*) zTZ643)=+DB;*K-Xnrtnw7Fvs}SFI)1GV4ui6@A)zdMwj-eL#P;jb7^u>nrP!^__LZ z`oa3yI&Gbc=!hMe8(9;1&wj>kW}mG5_96Q_`+NI{{iFSpebkxnyyqNr zzIA?ZjylJkv(b-Y>0)=sio{;#o#Z28V|Xw5^4NQ^wXt=vqp{<$6S0%A)3GzLv$1or z3rQ+TCuK^?oRlT$9)3kCY{lG1YPieXH{Ip#TkZ3;0)azAl*yPvvy+|S&-?mlj&El}FvDDy!aVpcuoH|}GMZyZ5@qTvjZ?o_MNMcRRVA-7aod_XW3`+uiNq_H=u>z1==;U$>vz-yPt- z=niyWatFDC-68H!cbNOKJKP;XZ##&zFCWv)YhkHc(A+hHa{`-Nb&H(a$4{O&(TJ5_D(|Gy6tKDd+|L&+iKCk$XZ1|t%E&tuo@GX5_BRi;} zDq$UU4{K>mSWk#9!G8|UaAQd_f-JX&UBmE&X%mN`M@ zVx5y^9u_*CUnhFic~xG)R+sSWH%nQWU5L%TDT|!9owvn|wMWWgW5MzoHat#V$BrlR zTe{mBgTG-cT9#qcpU86VqkH5ne|ERRpF_Rv&kk2ILVa4^@n;{&%wR{$8h>uJh53Da z7OYtNV@?1O=LU113^qFpOxf<^7|fHI={gylj0WS6)#}RXs%2JjnB=fN7-Q6@q!gI- z789-#M||POT*-3h6|P++q(!g6niBO;8JRZi&+~#*%?ydY$f@MK;EZw>`F)a^y*y|? z=2UdLIwPHh^fauuhxr^N577hFvFrMMLw#znf&C;k*w}7qx3b&VZSB_fb9OuXdAp3VushpbZW%vywO_Ei+1>3Pc2B#P-P`VC_qF@k{p|tvK>H4*)Q9} z?Gg4!dz3xee$gIdkG03y<0((tzn6y|ucpN8IrcnBvR|=ZkyQ3Vdy%BJ7u#>(f0eyT zGBUE*$osJvxk+X-a+9o#-F}pN>{E`#4+KP=n3QqOI~Sy!bJe-Zy4dX{{-^OvO@AC1 z9|g{_=bAL_d8EJCe$8HLzhN)2Uk^t#XY8|#YR-q_noIU?_GKsPk8l34|747FEga?W z6HJcf#7u6;iK)TIXdi-x8&k?>&_dG9+~vJ4g`K=kJ}19Zz$xeyatb^5IsbBsI7OXe zPI0G%Q_?Br-0zfj9&pMyWu0=)gHCzpA*TYhTiJQo@7HV4&T2WeX=invdd}lcedkH% z38w)zXe`!H8u>m}Br>s{jcxjSAM7z9&0-eHPEW#({o91wG+&xzPhnhVT;nlm<|kIX z-@WO^s-)JVHG5>CFVt}wS`ndHQZyPh*?LMwihpHrXH8o9z$nE%rzDR(qSh z-QK~>#*DoGjb$?S&f(^CbGf-0gXeMcy7}DvZc+C>_g`)iw}4yFE#wwPk1^|e>wE0h zv`gi;%Lr}cRk3Lm$@DR6><#?1%lG#8au=h1eu0x#7?oU%@iS6Krr&!cnuV2$quwrW zCF=xDxzm+kokEzC>|y@1fU$c!w}D%M^iyzeDP(7`BausyA0vAr??)Cy#zzK3+C}QK zewmXM%*)ncR-;yOZyIQ|wrW_ZSX(-X_7=0E*`$&pqs9-_ObSSD}<{#$Nh%Z z6l;)hGb?)RU5aC7V2S&H_U{8WW0Betr7v$tkJ-p>${N2Vu5EZm3pN|c@bH&d`vzZW3|CG+fI&u{Cu3cTgG_7m%=NX)F!=)D?AZl&Ixy|mgv>E(37hraXGv)R%5l=?6&(-=-xvQ z!W|nn=Y(tgzj2BYOEc9>-(tbKKK?QhM>QjbzPTf74e>j($vY)dtoo{R@sBPL`ZiY% zu$q6seE2P~AQt1kffP6JxqO@J64G0uj@;%p8Q(k!66An&G^GaNZxGKCuyjqzo#P+w zJ4$tirM`QFl{7x=3n-3CP4rzYRY_Hnk$kTN{d_mg2#}t@j0060-$e;`1orrpV&982 zl&T6bON+;mfQX9i^40<37!;3rjrjTu)r%o5?%-(B?|^aE~K5dmL4C zYss(M=$9ov)^D%J>2cD5b-C62T6MC1kN2Rg)th80J>@o;&3)!$S-@T9GkKLYy2G-R zS;0A3!@kfp*`}{s4P?93&}yRYv6@=FRBrBu!_`A(R-kH|S%Ip@J@K;YXkCpsYEtA0 zJCmAcXR)*BR&K%fFB0^zS3N2$|D_rtC?qsd;9-N~SXRIVo#dw;Cmf)L8YZYN}qt zih8IE>QA-F`rZ0NeN2YxtKD{1yNu4nn6s%aZBMW#=titn&DM?W74{0<%E{tn(XE|z z&U*cvv(ee8+d3aOn{_*9yR%)lcXm3T=nl>v=L_A%x#8TX1~QG&Fbl0r-#arJG*Vgq_P3zwP=Xxq-;Rl{NmA8h!|6?h)aY zWd92HL_f@PGo^(oC$B=||hrQm_YKN2*FWo;t}uY$@2kEy#HOihn$vaN)ThT+Hz_@kCs9 zo)Qw{b|gy<%a^hXT`reJ?2C?P+F)RNQnQIJSPobULj2XwHZY8%g^PSx6 zMWk{g&Y#TGk2!~&{q*>ooMgtj3s`v?>kMJVwX@UOX~Mj^8e`HD%+a$u=^3Aiec3+4 z3dGl}LvOX$*(;fm&9x`9-Z7Ask@oB~HDp($ie1JoV&~xwnZ|Y^*CM|}PDBnz4n%fE zHnS_WJhCVxy;GI?B@!`{9xk{*x2_S10_hPWWG)@K3Mn`(K^#zdGT6Rl@(Ogn!za z?~hq<;v6q8>r`$|`U&(yh(OvDN*hBdIh0n1QV`Z?D<0P9E0BV)MrVOr5Z34|?q)O> zNI_Vmy}&I9Ycv?R1!0XA<8DTeffR%_x(wWcutuM8H>1%&3c?z#25v!EquIbM2y3(( zcQg78q#&%(ao`q&4fKqr5@C%L4{M}&SR=*58Yv#uNbz*hZ5TGtvvCW;26{GbLD)dg z_zJ@s8#VbeZb8_MVY-2y&D9{?K+mKQhBX!&k0sEvxf-M!=$V{^=^EROhY0j+t_I}| z^laRMbOSw8iZHCPYwbQar4Y;(9hx5H`>= zr47RddNyuB*g((5EeIRv8U2J|13eqJAZ(y#;}(P^4}Kk?(J*YFXX6%x4fJf>g0O*} zxfX^E^laRMuz{Y9TM#zTGqHtX13eqJAZ(y#;}#D~s);-sDXwQD#r15YcvvIF^=zad zY@lay9EJ__Y}|sdfu4<95H`>=WemdxdNyuB*g((5EeIRv8Lfn213eqJAZ(y#;}(P^ z|9%~z$1rT5XX6%x4fJf>g0O*}aS6i)dNyuB*g((5EgqJTiFAz=*Rzr0dNxu#tdZh+ zHc}8a&@-uqVFNuIw;*hwXX6%x4fISt!?1y#jav{l(6ez1!UlS#WMSAq&&DkX8|c}% z1z`g{ql++Xpl9P2gbnm;+=8%_-LE6G7lsY=Y}|sdfu4=qDrRL%)LfqP8p%^TJyd(u zoRzEEJPTGv72$cFEIdcy@Qmp%a)Mdj0of&+S?e7}cC*7nyv+5>15md)@ z8TBRm$JHDyzqIjl@O_oWaYFnSSc8-U(x6EQO4?f^>)31k@`|S zUr%M6K1lb{9T}xJ(sguI#^}X(LOPpH$73_sSrI2C9aXVP)_aeL^4B2bf80)@yjuZ;_s<$LnFtBD%8j*pxX$4W7j=#S9{+ z&cJhnT3ul+?x;G*vxVEq;cA{Fo6nxiXjWc(5qC?~h!wA@tYDPliL*Q^n@Z0LRs=ik zBi-CQ*x(hH{}^JfT}BVoJ(#I9*9~=T<|$=(8Zi&Elr-8=*LYU(1kWrUV8>@O&owSr zi+HAeyc(wZsjjLG`LC~Ps0yS}Naa)+NP+iBvJ!Tb9ihFlT{h7YR2?C%Wp zQ@__wJ;|g_PRv~9ihtgeIB!gxlN0CFiSsJXDf||5+CQ%*y!pP0b6)I#o)8~#$;1_x zOl)z<#21%LjB&}tX(W2WPzu7*6NYX?-rZdZ#?aXoJI`eokZ{00%pfTQO5~Jnc z`C0C#THtoS)^#z@I^X^oW{Iq24F<9GX{|z76@9%*4$Z-*^8FYpU}n!KfvyVVx8 zm=Q=vdfYN9C;Iw@r_#)S7A<0Be-LZQEg4>K)QJ?;$Mqjy(!g+)&UxNx?{siFI-OWG?!wb|-JBl&6SsYN)^ULIBF{Pwat5=i zJk%NP40B$l1clknt->1YlZ>QBusS=Rr-MG@*`y=v#oCMyYgv!;bW&UPVJETk`Xx`~ z9JQ|f+uWO{*W$Xlx!+*+$czO>qGn|E6O!UTPygY%J|0}xN5ZzwYsqPW`TTxCtIS*> zCiL#dhwyzM9-!4Bmd{d zdnyBanyYydXg<5gqv@5*4$K?u^>py1rJ?j%C@l`9S3~LbP+Ag7i$ZB(D7_L&^FwJ~ zD9sHe_AC=Q;QtpB(t=Rp>AZy7%ut#UO8iHB;_9?eni@(|LdpDELA;ccLTO?sO$a5P z&P@0l7fNG8iQgeeTpb-sqe5w9D2)iE;i2?$C=Cmxp`kP+lm>^=pip`#lm>p3 z%YaboA4>f~sc$It2_^osArZD$DD@1b9--7dl)8n|3!&6Cl=%IGM0`9Ch7`6eYYUd; zw>-C#f5lF;2+OnwYbotUc&>r=cxx#A_5bUwf_}j5=(mbvq4ZNI{TNC=gwoHUbTpKX zgc47$CF1)oln#Z`x1n?}l)ee2uS4l@D18-5Uxv~bp>!aWJ`bh+p~Qb$Ci1d3l=v@> zgxj7_`ZSbwhtemZv@4W84yB!;v?G+ZhtjrC+8Rn9h0>N#`Y@C>htdb3v?-J}hSG*m zS|3X5LTPO%y&p>Nh0>Z(dN-7kL+PDRS{+I&L+R~MS`kW&0TP;A9!hT}q=){;{*US{ z=4QA3*V%3VXJ-D#ENqkJiM>RdT;}KNwl>Ll)U-)|9O#!r(Ne>(skcg%R7X;MnQCjQ zWvTk4s+Mw4%3dkUr;Mc-o?<|X`YAFcT~A8(_rfQ#2VOpUEV`Wk4Nd01Q@irtuPOLX zy)ryqo6XhEUPe0cJ>)(1RGyvB#NP0s$n40(NUz9!{@!jw_H?!0tOv3#GnVVMVS;4n=5A{ z5qyn{&NN=5z0x3B(ny{IK2uZ_0l{gQHa@7cFAO&!OukNwDD=3!slu00>?Af zd0Zx0=S-Z|S&k>IlemoIIcaiX{mF5m^Ydw*0kKy3?pP`L!$Qd))=5|_ zlYHf(p$WPDnk{ZJkK7&>qqXSK)W#6QH)+E2HoA43a2 zhUOfvSkK~e%4&u@-g?H5t*IYF+z#scKI`}{x7J-*T+Ui$IG(W{z|SPBH1bL7e%~jy zZk6+;^#8TmlR{Qu@|WEzL|hlGf*j9VriF~P@^L(C<>h$A%0rx|tlY@cty~<(TSYle zvWnn-*2;`L%gV{|l9hww1S>o4r>%_0W2{WbXRLdW$648sPgq$wp5(4xm8Yi4Ft-oe zpmi6%u3PDRjitrcSt|`LW3ALCgp~?;ypJEuS5ctvj^^eO$JA8sqh zNj#H^HuXl_PiyW&&Kb>}CpJcJ;5g3wx|ef8uj6t6W3uDf%brfGL*gs~(V>Cf?u?uW}H-52?+rah8=7mm|) zXO3gFX_XT+?U6jTMINL5R;m3~sh`LF-pE~avZ1Y5aNWcg*;0?#&KF$4>kN4tNSrlGa(XIK`SALw4sD`(mG-jvETnv zUn_+)Z7nISPHAG)v=ej`Bd$|AHS&2)tB9S|rUi`G3ZIW^S`k`H<>wMhI~S&;dqDcc&;3Mso%JAR$cP_{>pK@`i0|I zb<{)}eY+hicz3jNMazv{SG3lo^lCjv z{}=jVSJYbKJEcrp91s8{OKKIziE1Us3F>XGp61OTCSPyiGDa=u zI8M=KDIaZ?JboaSbDArgIG@4weX(L=aDUI7Nrz{2`F=vZ>z_~G*)OoBZSuO%uh|8} zcviiF%UCs!<9Icf<1{rN_v6%ToqwoV`hlb@YN{V{iXU>4A9At@!~X}E@Dn(WQ{!oi=$EzkBC#lA`pXI+D=r5G%H7=<~IZjYDaX+o9AdgWG zBmXbn-UG~vqHEW#)YVfx3`mZ%_Y5%PoHIzyNrL1Y3?LZ;3L*-K0xDUN3?c}Kk`*w5 z!@uXB}Sk|{c-d}?}{(YTK~(P2-kmX7I747-$jc*%1RujJz)eHq#QlHQC1jF z25AuoF`WThKq-m+`0ICq?kU&tZ3W6o?8WqTfjbLQx*OB0u$jnJ_V+szEC^3qu`II9 zQml7FEW!M1Vlk$Z#Ue~!|K~iyyEh#T&sFRjR~6QK3-gy@hqNVKTT}3MPFv#4#iYp6 z>}a+5M~m(M`0g&AisZvUY`b(B%WBE|5?@2j|r zlBNEWS0?C5d>>v1ojb0d*mNjSN=thze+_R{l%l`+aROH#mNk$3ovy*xQT~*gR+yhH znqZ214!<+veT?=<*W3;GbhO9FQwDFQbVfBJ+fn`o_}f(YiKk&Nd&HNhC;b?GL>B*V zJ-5kjGh%!}+B@tP`I`~@y#_y;(wPm@H!#u>r63OTGw_t7JdKT{z+u}wxHr<#HKvo0 z7Ah4V^Yn&B=jCB~6;>a){!yz;7Wf^!QXNdep5saJ_dF53f^&%64=~ap=YEdJ@2m7( zWF+?`Ed81rYnM#n63_1W4a;Oi0PK#6HK=TJ22f9oW%5Wa2nII!C6eN2WaI9ehPlZH1c(A zaF3#31rK78j#wRwmb+LNis@pqc6=R^z_+>KKY-ZFU=bC(pijNI^6CtQ1wS| zpU&v((h@B~bS7R z_bjHu!&?!)iho*I`!ZU?XM6W;8*e0jjYAvvEbpFu#2bM%e)ooB*(+%Ip5y&y+j_&W z>{k!Z0emHo=K#Kz$8!K*^$bFXEd(Su|lrprA% zUGN2eZ|wIaZ<%fF^~2m)Z>eqN^+h^gL_0mc2eU1`UidWzE&OkLi){-JPayAn%t=g> zW4^`oT+9hfPsbd?^i<4or0oUt3YcMO#0d{Aj;qikqp8;x^P|vXV47{>wZZ(e=s$qb zSul2sXP?2>E^pX|UTe&c#ONwhY<-OE;-TGgDf)HP^;%+n82TN&X6tw@Fh2yn5MH$p zd(AOF2raG?Y)#zJ6xuuKxiZjJ$8>MpB>%*AG4J( z|C7Vh2Q8-+G5@2(69Mh06)^2<%VXNd;yYoqua?D_D$4)C#o$*jwDXq1-+J28Sa!?d zY2knG=xNczmcsl^N6(Dzwj|c=W=q8MLB4jyeV}*@J+ZskVwiTeMKSGUi(uN(7RI!L zErjW#wjieMZ2@fC4x@75`Q@iMJeB+#4);_4y2HK1|BlA;_OIcSROMfdd4TDa82Uyp zdbd`u%Gx&D|2TqUga15wV@&QpMsB2W8*TQ#j@-!OHp(~upN`wuBRXp1RT{C8$7}p! zw8noPtC2@))C2!9Qloy6M{4}PGfpE$QTca00~%33gvcG}|N00nF$ZEB#vH^*8-E=W z=iiOF@f1dk#8{F4WgMJIe~cj+^Bs?e^S?6^M`WzY|D%y6$?L+i?Z1yT`3~OK|I;{= zng1MPl17+(_-|uO@(7bt|DTR9`N#N@2P0!k^2m~Zj4S!iqe}LOj4Ao#U&fRi9UW7W z#xVZ>KB8o!|1ydajiA&QBPo%^&@tq&w+t+k{9KMRo;6LPN z^fURH{VaY~KbxODGAic($(WcT@@9&6tmdb2kEFL$^sJ@(C4OsoAM~3>D;Lda)C~Ny zlUXBEy00cM=#MBcB1Qy^T15T!B6z!@`512nG(Y3zh9)s~AG84DO@S6jiZGT&94N|o-$Q9!2H+u9uPDx-Kevc{fm;I0>D)>(-UrZ9pbVDNNbod1czMj% zgjN8RF<%E-g~7MoLeP4^I}feK;5%IbUnPQ<2Bi_^0eYkg_-_%s>Cjq?w+Q-h6nf{a z9fg+B@rXvCZWJ1$qF$74(E3p(LK{SR9g5NFBP@W@_QY49F(U(L6Gmo$Hf7KkNYI$; zk#X0lOqw&2^0EaZDQztoITDI7-wAxOD>L5_i9%=pN-@su7)H;`{bG3I<^+<6+i z1EXvaawimH))R6!^if70f_7l!2`EO1A|#b>C&t4V6{zcs;H`mnVKAbCz`Jz>8v~{M z0@j0eXDscr2ZOQO1nLVSC@P0ujG{d2%_vHHA4c_sMp5H2-;YsmLi;o7UFc&BJR^Y` zjR^341nM{Kl8I=qjz^MDsfsCd!KgH;5&_Rr*G91ikDu*GAru-htXiC>GMn4T5 z&geIwBN&XDB~TL*K_7$CdVofg!nj!x^hGF*q5|~q(B~MBY-|+ct%5$!cnhIq6TrI- zozGY*JK6?Nzi71WM)2}L7cpK4x|s1+LYFXJJaj4Jt%fdRENx5n1njra<%~@SeV@U* zp%5z=?+TRm3#@@sx`B;_(td$G3nhC3HY=3M4|qR8*Mg6*oUYH08B6)Lj?vSh>lvF1 zx`FX7KsPcr6}pMB4?{OIcyktF3*%jde!|#o&`%jlf8Wa3?NIs~@Mb~j_ve6Y=8GtF zj$cO6(66GzK);Ss9l9e5+0{<48*~SIz&=2~_eY_14=|3-@gO6ML&=_w;5w}jJ<6ai zS)evE0^V~3>Ng{JKR{0~vM%%_IE`(o+|Dp)a}oF+DuVH$WS2mXfF?6&8xkUgk-tOF zGiW~&;sS#fC4uirBA|^)2r2`hvOzB~Dh$2MD7qJttz5%8bRB=ksD9AvAPvi@+-@>x z5fvE0EP|o?-z`R4D4i#4N9TtA%%DA5pq?lKTBwD%!+1YJ?=oo77UCE18@8PZrE>%N zP3S!ay*3c|ol%XT4;b|9h`KrWjq{a~L5mxzX&8EvOV~sN>`x-U2&%i19;32AaSjAM zD7n4S=GI3A$$^YD)yP*HSE(2z39Z=O3Y- zg2Gk^x|gFBAc7hM&BLI+I}(QkZ3aT&dY2&vwc(LCCK7g}W?(*nL0!3!iHt%y%DfC} ze}v4(s9Dhb4BZQ55`#8yAqy~cZ<7TX^&YekL)WJ)%&7IyA`D%xvM7VrdjvLT=-Q=g zl|b9RkR=$pj%7(kZHJa(=$b~GO9ZtRT843S9%UJ|7g{a~m3eta(Y6($JOr)CppSx( zl^E9&S~&{keHBJsfmV$|(_8bO;e=$C?E z+)-$|77Ti=ARu=X+D9u!lO45=G6MPtqv`lJ5o1$~S`e=H#>t;A|T=>vKWl+r}30hA7)--Zs1@*(so zMpN1bMIk#M%xFs6kSHHPhcfzI=&&ezpu-vTeiM?)i8uvDGU#(B&?3y7hCUPJ7W7%h zNa%A>et?c*(EASoexv*drMv*>8;HQY3`Ua^lJX8HvTL#lK)wR%n zt`PKP#-Z%xD-6BYqwPBa`4Dmfqnbh|GV~5EUu7ci_meOWTu$g~j3Qg3^Z~@xLl9hs z-YFxt1^aUGm?wJzE;p2H2%zsDg5feAT_aT9K#)yNV@zQv-HU-*44uKy`-Pmzh;N{? zqGX58X3&otL2(&%9{M)pT0zMsfGY|mJ0L0mDtn+~q4T1UUCfVC54wQSxu6RfR{*+* zagRVLtpt_%5=J+JE@fO1=&~r3-uDj3pFfIn68bTtFG1HuxeHy-=zGu&j0-_GGA;qS32a8* z)`4zeyc*C?7_T<;Q^s|KZe?6!=r+bZ3Ed7p$9^dPzhK-D=vNH+g6LBdLG^;}U|c@v z&L}ib_5su}=q|?5x$llr3%ZBVS)h9vksi8_Q5w3RL2EvOyE5uC=)ovWp@$fq2&MH1 z%F81R`lKRQYZS^?N)ymzU&o_R9+T|?^k+rD)+m(U-!hu)?o<@Y`_l~i!Xj`hqh5xd zWl#etVGx9P(Vn zGp;!_#8BM>`GFDMhbAztIFz;_I)c26D*=W55e~MbC@%onh@$fc8fB{rFzA0FRKX~e ze}x#T6HC=prSMZP5YyC0D3o6B^gcoF2&G$qAJa3I!+me z?g6SSqpv~BMZvYI$}^hmq5`9zgI0{P16m1G#<_QelDz_|XH``hsvlIxGmg%wHsiWO>oB4ov@S#Ubw$5{qw}uMP(7Av5T!q~Aw#|> z`OFc%fi`C7eM(VUfu{Uy%Fw%vYR2ex(B=%?|H$tp^krzvC}ekZ{wOO)c}e>sD2-Ge zz_o?azS>856bc;WWd{ITbCge=Kxe$~4TF-M0W%Ux`3}qoXtyXkp_E6&Tc8KyC=Yr@ zc?a5yG3TMZ8AoN?C(06NU(gRM1eABgCP3u`%xvi6QT9Qfh(dYtWRz6sfG8KC0~tp) z{1h03d=t>YQK;O9Fgkz^WwZ~a^9P#l4Z|5t`AO#wG?gdi3vkF^^)%xskI4>zZV!Ey zaWqdh4K(Hb3yem-E3yNiJ3z;P7cozHGnUbm7cVicDs&v<$QE8<9NGSOMn4aoz-T)D zL`Khmz6vHG&6Jm9XFwl@zRu_q(8-Lx4V}X1-=I?&LwWcn<7z>v?18HdoyNF^(CLgL zTc%?J*AzOFah0I67#};O=QQESj^;3i%H-`RbD?BUz){)KZ{i?$H_BP)yeRje^BGMx zuz+z?c4TM3;o4J+qV$9=j?x>tlyRM*%NR#x_a381L6=8)1NuJW$WB&(mDoS+^8?1! zhpqyv0quJYgZ>AC>h1{#yP>)d!j^!3#MlbZj~Pd0x{h&AL)SA7wx~8Rmh5CBW2-|q zfz3jAC!t%I*y7Mn81EbCr%WuRXDbt14!VtrEeqWaJ`*CA()Ky{62Y|#Lcd~y0?@A+ zdm6ffv2>m8WMWH0>3F-b?K$WkCbm3uFB4k^x{rx11>MiY7KR>RVv9l#GO)*rZPbtC>6V!)Z zWrDiUYv4PqPsh0qZeX7BKaKGyzi%=D<>&X{4%VTvzRLtu*1v!USPr|=g0VP`mW;*m zwPMgyQ)sM2SR7Lu#v&b5Pfei5r=a^RfqtTb>aYm(_!RUWK%h6Lp!+g`{-A>D%Lw!$ z74+^)Se&2E%mi;jvogV3(A*6AH41unCoImN-fswtJfL?PdVfJ5S)q$D!AvO1Ls!D` zS`4(4&)>H452=I20rfu}Iv0y+!~$2=Wp1ViRg8HNO6Ly@ogbA4Ft|_a z4;j-2x)!X%@_|tL+Xl>&Eo=mvFi-innQ?oeTNp!W`2>6lTiOTR${0%5wkTDh+rekp z52f*Q@D=9im|rty2lNjvV16g~2I~O5i!oFtyBUM@={-@ZK`BkZP)b>mmDyw_o0gi?IFj$r9O~orv6{R&)Gj0?V zD@AAu^%$xPG#2>S_6jtHk?Elh1fU*>jZzbuj&ZODvSUJ4hSD~`je};0QUUr9BT7Lt zM!~tzwM~cy(9Dc`4w@xOacEXXybR42r35s46xwGFhT2k0PDWON=894pnmbB4XdZ^z zg-kp{ZO$gdP@T641KiKt7%1+WCO@bMP&S04^uSgk(76|k0^2r)Krv7Y&^eR@bdHo( zK)$!3G?fLgZ9{1yDF4ed^qy@f?})2_j!ozH9iZa@w+u?>Nu&WfHUV2R)uP;nR*!NQ zT7#iyg`u(}U`LU(BE7mL<{xIrH#4=P+=td-$i7Y8C=a0Z7=0c}`vo4^Ae}St$nF|~ zMws`ZjX@L4lWmbL0*`F18EAp|hoF>>R>-TdP_kn{@5lz%1K}1!DSf~r8)?hXv(B^w z?XmuD=%Wn1TbT}^Bi1K7?Zl}0(9Voo4(-CY_n=)Fx{ggZ#-r=0JL6t~_F&xm(4LGN z5ADTxanRn3n*i;@xE0X8jGGAU$GDZy{)~GS`WWLrfIiN+Nzf-4w+i|s<6eUfVBBix zK*qfeeTs2wpo17U8A`Se+=tL1QOE{|GMdVn@{~Y%nBj~^*VYKeO@WSN+*;_lovpfEz|iEHvpY8aEqXHp1?~7 zrE>$+rer96KsSX_nuwy{jVPC(Z!+!$=v$0i3Y`{((m$Q?GDBxXDG8k!lpk~q;68?~WE|xU9Rs*^&{d41d|J)8_0TnpqdfbN zaT}m(8F>^+`2su%rDG6P0PPoe*`c&Ag6?OuJ@7o}#wcVDn;6eRH%FoS+!jVrT0V)= z9QrBa<$!Kw+zjY8#?ijFgU@h|Goha|j*jz1l(Nt-quhjk#W*VSucQ0~CHn?~@^xpF z-=NMNRHAHn!`Nxucy8lx%ysq6v0-chvanD2P7{JjrP5T9IK9u$a9NFEkQHnu-i$dr9J0qz)?=hZ$-e=qb z=mW;VX1$UOr6YY!#;YtuJhma+0B9WJo`U9P+#qNj#wJ0L*MuzqZ3r4+9(EhwjIl+b zEf|Yq#-qFm3%iYP%~)Jt@$DE}4%!|(ighYMJ2AEjv?pV`LVGc`AG9~mNy(7L;rpfeQLZ2TcCr*(0C z#UsCiF3{slfa^H^1h|alJ)p2dBG?Os4HEd)QN+Ur3_^~f`ub472;_YzmQgt0 zP&!6D3WYro3g;V2&xj7t42(j#gdSoghw77=iK(WoA@&D4iz|PeQXYRNo!S z#)tvX>JX_Sle-bl!~^O?lOrp?-X! zCJfc*g_<(5EwmYCirm9t`cv$QjUnj2;5*&&Zk3#~3{n`Zz;< zi$YH@dMxxwM$U&0VDwASfeiII3Q_t1)merpO@R6zg(w|>>MuidY#_Hl>3D$Z-a^9| z`3ZD5Lv?VW5sds4I+CF}xe%2TkXxZtHh}8sLeDaC8}vDb>g__K80u#edY;iopf50T z7j!g3^-!TP4E41My~yZqp_CUu?t{|#1APig=M3b2D4i$J$Dw2&KpudSO#rIL3XvTE zc@Rov52!vX^eQ6{Lnkph8TuL{Db24lIt4nJp?)->DUALOI+c+>LEm7g9xC)EBY%dz z#ZbLeXc{ALL&=5!)lr4WZh%@1C0hYh_Z*tVXgcm}Mqh`{VWlu*%-N48P(2a~34Bf;~yK-nVBk0=N!caSNh^{3daE*mN zWvIP5w3QLVpxYQV3c8&U!=axs>Urqrj2Hp^f}!^6(3gxD3H^$pcI(jBjCdNlgHdCk zI~j2d`VB+vg(153fH)4_%~1PcXb&ShK)JhdW50A#i65&E(4|X0qSQQqBH?r7J7oAe#fDcj4lWLmXY0|rx;xxdYX|v zpl29e0eY5^J)!3qYF`Q^GqM*ng`qa5(0NAEwR3^dm7y0IxgDCyP`gCv5+kV$FEi9e z5xT-iD$lD7wO53$G1P}KME4FrZ5JWBo`Iw?y1`HzMktMuR7N)$YR?FL&qyl&TMV^t zgnnSCe_!ZFM$`U&VkDjS&y1#h-)1D`!5v1^aqcqI2Qc&tLv{b5Ul~Vb`Wxf=LVsr* zmFYdk^@HAL9F^$<#`PC;8H%uv`(8UJ?hD}zSdb6;5Xg$;aZ>`^0qp(A!vrYrQ@~)?XmtQXa`23+`_P9LSBJ( zVI;~e+>Mbaw{UlWbE0~SFzxFp%x8uUVyHefJeYB?&F~P$yZ{}_I7%Dt1q9Xig@-fl z40HrT^;_YQj5`aZya3b=8h(ax$xz&P2&%^kKgT$fV|Wxp^&jEq83$Vqj{)PcKAqD9 zFhvM~atTibGqHRUbQYM0<)1+3gJoEL6#5=mjrly#HQ-~+BQL`1!3NAf3Ec=bW80$8 zEsQ`pg=t?vRE2)Z1eC6=jKj4R-o^x@pxc>%@`uh11W0!n*HReQ2i0GOVfW!3n4baN z32=`SFF|*K-I%BHrE&rS+l2Qr!D#3{CU_CLp9yH&1K<$$xfFVsap$1Nz;P^J4m|-* zVV{&{Dl-s_fl@hvfb5gD0l_$EGQc%Nb!=fO1K{vGOxxo=LUoGaOW-o*agB$s0NfKK z2w!I;UJN=cR_M-4_47FK> ze`ly2D146zCPMEs!3#np2oQ_9g&(000c;n9pqUu&BWPyETMNwsa4mbVm4s|e5D(4H z1lVCh4v-V;pbQgo0i?@AnIz-^*tduBO9(UGK`3pLh%{JeUdCGw&Bu5s_XL~+5m364 z81F2!0OKLA5(+W_&LN>BV{buAfzsGNrJ)Qchcq05mIoCv4_ip6#02S~l|dCOM;Rtm zWdb_yYK-?Lv^wL>hSp#L%BPx4KzUP(2`FD5W`YdR+Ds4!t;2X5p>-LH>ms2Z=Snv|or{j!cEbSZDQo<`(j$Gru+i79rO*xc7(pk*xt~$7&`z;c?}$$>vYC;fl_(E7HoIu9LDy6z75{N z^8QdNKMC{-D;X~@^aI8#2weqM!)8iDKV-ZD(6x+L9QqOX80!>)u4AY!Hi7IIc=@55 z7>};w&5V}-{elAM8LuLA8{<`ho?-&p-)Sb;06hcF3gJHiy}|@^obMTo zo34?&T?gv|g&J`#qOBkvN6BK-;qJ0N^YR~bgYUK4TuA#5fn?1r!hp)DD^1KJ9- z#_~PTM?f3QBVQBSf{vK~4B82F#r#faH^w5b61#)Zu;Paa1B_4Qd5H0;ypAwFl_%N9Ijr9odJ&{z zzBiQe0{G3L*BBr7rMxI7!lKOcqMQhu4~p|AsP9Q$lrdpZ{&_PpHVj4i5#B9mF2?4C z(!PL=hr$*J3%kjSYnbp-pbZ)CXDH5}@NPnp*1UZ%e;tav%!_>V--XU(`~}cij6WZ* z;+pYWK@H=>Uh?5Q2){KHl1f_F#P2OMyO&-w29)Bm62t6wJ){rJ&gvzcjQRper-Y9(E{gLbeBAGf z;<_nH>(qkgVf@FT$VbBOFGR6XjKzJR*l0%LIK>|@7y(n1$iaAXgjhO{37!#R8P*}( z2qBi^9!vzWLL>{uuLo5C%fw75&NI0v=C47E0pyd)2Ss@hY9q8MqqaeD4utv|ihBy7 zc0pS(3iqmHTyM#pu^joEjBA6?ZK1ek2#xD58RtyskKidSWf$%!FaHjWICTkSbhge+XEXwmw=_1N1i1w1MguT_l@M`UwC<1KXY|jnBSea2f{Rex>xlr{q#(bD*!s|PAU&32A1N7_*apyy05%+4 zg=PoH*Wfxd5#+^u8Zd=D+cVC+|sQWR8#o#Wh7DgoT9e4JNG6;K`XOQAJDJIsFt zMcJfu#5~S91!a>0`|&Bwy%<{_`UT_dfF1%^*AGFFCq!&RDDvh!{vM;D$mjDXFnz2x82u|0`AO*UP@f5YfFfTBALW)t`vT%? zDDsw&i=lAi_ZFh&0bw%>dADT*;bWBGyR9P#A2Efqu0k2c zC`tq+;{~x2QK!F9ZPas!kUbTn``%P{w9x?~eP_^_byl5Q=hKCBFazGggnhs5WOuM^)WzFBL zyW$VUACJEre>MK+_y?i4Ll44!I72vVI6ho3TrylSTr*rR+$`Ka+#%d4+$G#C+%No8 z`1$bo@Rab(@VxM{@api#;V;97!^gv?!Z*V|h3_ZCCKOC)o6tU?Tf*Rk$qDZytWH>$ zusLB{!r6o?3EwBmM4f08U1H|M?1_033nUgzERk3yv1($o#BPbB6JJT3m^eLgcH;Yq zA11C#+>&@8@lfKC#B+(MiI)?v9eFANxPHwCtXUqi9xSzfocVM z6zEf6e!i~A2Az~b;PLChDcWMlai#&do_y@HX#mm)&@U2Xqn2?byY6?JWl z6WvP>)6eR0u!PBakv^@z*T3lB%@{NPZW+GinLj;&;a%jz1B9CH`9c z?;*6tg;B#DekhzHoDeP)E)}j6t`%+=ZuzGrbPx9r4+@V7PY6#9&kD~EzZYH;ULXEC zd@Ot-d?x%u_;vzn+Y(AfEMZW>u!N}z^AkQy*qHE1!l{TQz*kN5B9@RXF$7B}@~0)V zOzfWc5-eeA;*7-QiE9!+O57Z^gtLhkqn6;o5;FeH5_;#Io=?LPlK!-WeXxY?`Pah| z=ED-!Caq7}oU}dZ%cNa@T7vg4mM{jE@N?7>GW}aiP|4|%izPQnZkgN#mheRKi^&s` z-%OsFyf}Gh^6BIsVF@lJT}sB3>?w&6ODLLBDWyhA`;<-*OE{DQQ#il#{K+(#=F-wd zEFnIv04$+l+7MX6gtVz?bJFIfElWF`b|URu)DrLn`qzK-9*eupSbfSL?Z4nZqj%xg z6Mhdwh-+i<+@*i_aBu(nzXxC6{`LBR>picxy>ecNE4T+-eh>sqIp7r}j_jliD+7V(NgD#TV|KPe^_2{N(empMUNAq|_%b;$7+Dx#SfW ze@n@qTK?j&iyLUGi-+kw;o^5fT*7@DdwJo~Tcm&eclFTKwA3T1_wd)dS1Zw9&RV(uWsr^#>T+D+bWCSr69$dJ6 zVa0_HE=;+ArwEo{Zx>5mIC5eCg*`|?;R}V%Z#X|UWo~jN`s*M6Ql30_Fy+~l1}XJZ zg5=A|7nA2C$0z4HcjMfpt=@O@#pRRlI!qMl>R6IVm*S9@Jc0W_F zUc&T*vx#`}@dYLn zn3(@iev{O+aK8eH1(FIp#A(SgFU!uXPh@>9d(P~+gvj%G&Ov!LLpSDWhUu$9Jd{6{ zqALpwTK>8E)};5~p<}%3VHh`iB}%;-`KJH)N1p8;|NO>&bE+-yd-@an@BJVA`*_6s z>dO7`cT(zI=dMJ##(!~8cOOqtL%#zR8z~?D+kcwIe*5>o#9m5=N171na)2tJVLH4& zK)0sbp6;i(ba6%E%EVQOt47P{UmVuKv=S@L)4w=8U*lSXcG2G*S^qkXYl^>vzy6D> z6;~fi8~(8@uI?Xmq;XXcb+JZV-MISyyxm{dimX}cpO^gW?>L-cTzv4zkw!$=S4FBEU#3;VIy%k<=yU&~A?V?eutwXHr zQy4k_G@@w#yRog$MaIrg!|3@K{Z}aQuxKROh;Cw{cwH*fsxF#{ z?xKh2DSCcDP)!xU4T5;QgtNtnK|E>&o}#3Pd>gKz?g{JwiPse^*B3La3>1 zh{DlKJ*FO4PpE!~AH4<96h1bO;Mtm6=h5*xhtBT(sB`L>dX}E8-_%!BBb|y^5t&6g zc~8WtETXAuE}E$pqPc1*TBuf{o$BOrsCJ^adQ$XJ14LgnQ1nwziT-Mkn5w3WH`EOA zrkW|eCz1WZMs1K;E;-K0t4ym;+ui7hP)gk$iI_X-YB^1F3UufB1`CWvZRiarF42(QOC$eIwTwGuxz3erWMdWNf+~slA^)qsj>mZlN<$9V)a)sqdeM#Qc z_gu&&xH_(`SHr9AI=aU0NjKCDbHg!$QdiZ><&*i;F_$E3X~ZT~S{zoN$)-9{eyr!o zb$Y&BuNTM-dZFB?7pa#;29-`cq%w#aDnU$E)5J72TV_<>%4RyRY_9XUuzOe?#`rs3 zR1Yy-%@M0yF=&h&ZA?7f00>vW3nsTk0e^ zPIr?p>+W)sUMx53CGt%@P`;~QbcqF4o=Im12f zp3sN&3HTbn8R2TV+ODB%=9;=nzHvRwP&L;LL}Zk+>Yi@x8o4HjNz&VOc8}^&>J3-V z^>n>leZ0#qv&GyqwuG(Vo^?;Vk+!O>?w$0$jVtOd(=@cy_Z4Fcgx*-`Ug+D zSKRw3oF-jp%7WLEQo zSb?uQJ}|$UCSDfP%**E9bRWC7+&VYSt#{Mi26@zzriz!{RP#Jj9TAyps5Pd!m&46) z8{JGd%WX1c%}qDk{3PDTHz6y{?{1FUEDM=C>UU9E#fvg3Bx>Pnp9yN5Y2miGx7{c1 z9rvl5>$aMfUQSunZIjj9b~!{hki*<(a=824z3aZvcl3{lF7y+;y`SAY_oaK@EphYR zSEiV`ERT7@EpT7k0=AGXY)jhGww$eGDlj<&> z#5Vy0(EooR`uabmMv6h|X)#Gn6*JV^VhQ@;FGZjGW$0hOP|XwXq2K&2^pW4G*2zrj zl+3J7%Pi`Qj91^ukh(6z>V`~EX);MGSwL%9P#alBKP1cQjIx~0B+Ki}vaK#6+v%dR zy)Gsn)y3u0y19Htw~(*s9&)_yDJSS&a-!}n-_lRXX?l>Ht_RB*dWf8x9Fwv6TM7+s^61a^>VpQzc0VhN8~PjRPNQ^%6(z1NyW)sL#m5 zI$0jkDe|oTPX1(!{MmT&wy|openp?v*HuZLUz<8WhY%qcGjh37hOhn)n#QjT~2n_bxif78{SGB3E( z=2dsbEOBRb9hYoAb1CMCyI`I)1JHM>m?$nvimJi$?qV=1cr_Sr-ZIn7bTh-u6fcQ! zVw2b`z6?eOF9t7}rDmCV&z%d#1Y?77=5zCf`O=*?tIZnomHAqJC$GyJh*0%>@N)1< zFu^376gf~1GPzAglRubc6^ zChx3wCdg=S+5Pr_cgQ>J9r2EN$GsCldhe2VImqI@>CFx@2iby$f(${Nx6zLG-u32t zGrXCIsXo=_rdSw3_8@DJBgh#v4{`;$gFLpTEpF@EhPIJyVw>7#_7U60wzVy6OIrr9 zjE2A~8;%%A&mcb1D8x>B0THBLM#QKwh{n_oae{i=0d6oN2z5uKp`M6C)CYdtV?i^o zgjdR|Ad+$Y!(|><+uf?zNxVt@bnfx&6X^X}_{x+nx3syUXsjn*-lH z7Q_S&F*|Y~T1QUA=g94M_j~!>Y(C>`*t_Mw>(BES`1Ac%{v&>CzpdZN@9cN=yZB}O zihg;&a)dH|1-~3>9EjR}U7H`isE^nmSrG#ygP&b(j7Fq8pxf$Bh=IlNzIs@Ssg~2W zvaJy(=~2W@>V#-YeG%(vAR<7GwJ*W1eHF2yUPm0MHxOfL8sbmQK$NIOh#<8T(WTx; zfem`$Upi&`9J%&{M*5gv2FZ2v9%FX zu6t~c7}N{Lq9zyKQ{>C*!FgB4*Wi#1awva7FA$U`L#7?nGwvw&oBeIQbE8EHT@=@7Ac9flD zXW2z|mEB}_*+ce}y<~6MNA{KdWPkaXd|W;upOgb|#Xcnm$-#06-uj2h;i9k{0l)HT zQA9o?pOw#vyK4V7sOHyq53f%YldHr&xmvD~ABz3t zBO>12NAhF2PW&p@%MFNsw@Gf6TjVG5Q$)ktCb!GaBd+-{kM|p1dy~z`)KciDyA6rQxl5 zsQ2+HB%CM>pAwZZDxHc$G`|e$A(c^OQkfCkFDv}n>?()K3Ewt1#IOD}( zaR8oW2;OdjN>q7OK9yf3AtGQwRY(<9MO0B$OchrpR7q7zl}4<S0w|)lqd-Jyl;dPz_Zh)fn%-WmOZ^6uxwG)dHSyIe5b5RcpL; zwoz?SZw+5Wbx<8uCqyOeA}Xk^cvJXITvXjv4|oN=RBzQs^@RuCUsQ~G<4>vqYM^>b z4N`;E5H%EU6qWEsaZDT$N8wA4P$SjT>KXMcT1Q5y=TR>{T8&XJM&1|4sh3epKK?IP z`s-@4nj$Kr73B?41@-4|scC9Dq9x80Rn;stTg_2#t9L{-HCKG6-W6NaJT+e}i11X$xk|@4&$u}aU|6-bzIDc z*L)HY9#0`c)fxCu=MZNxMV*JQb`jp%C3RU{LEOb_iegvYP-$o#`d-~aRLCFIPwHpH zW4xmf&q~x&zpCHV@9G{RMm`W}qCWX};wf=oE4)kLn>_7dT&)ID!sm;D#~C2dW;z|G z)1!@v;$US$OH&q|RWuYsbv7|r3=%_hcGQaGL|js?AqnZQPSA-uug<6Q>m*%37euR6 zVO>NQMTTqo^%_@*6oC*4_h z5yQnWF#<6&yXo$_2U^8?>E02)wV&<}-}!O@8-o~@WA#gDPk9+{bmI}7b0S*VCZToZb+oTcLHw^b zP%rcrB6m*LGotlGb3{x1HllgX)$i(gdcJ5So<^(OLbS##)=TtKy-dHSmm}im3cXT) zpjYYDs7D!vZv__UHR4(EjCf9esMqR`^v8M~VuNnb8}%l=S#Lq4&`|uRGA4QbWTkt|`jkE`9zomS zS$z)iM^i)_eO_M>ZS_T+iio6_^%Z?pU(??qKIsjerf;I<=oX@t{s^DqXVDH`{vG%i zzlh&ODxRpn>EHD|#4~+hgpp#RQKG%k#u(37ZXRN`4 zdfWZeM;~H_M(S5apoZmXw75Pix|rw8DD%8|f&6!I8D9NZ^OCq>#+jGRD`vczU?!SZ z%_Q@hdEHDlQ_NKJhI#WJZ_4EHn>nZncn7Vw@0xjLzFA-vnnh-@S;BA4X1RIatS~Dh zbpiCo{NbN90_)8N)B|iXo6Q#UiTTuQHQUT~^BKQi)0?%~VRlC940fA6X0O>Nu9^Mj zfH`OmnZxFYIckoXgJ-rRsUe-pm( zE%O7s`k&0t@a*rHyXF`8$G@51%{_A;EzrV~@a{GId(X2GAKyj1{B&L%`Sa}Sdzrl~ zURE!gm)*P8m*6GhEjXW--%G+>gjvYBlbo7HBs*&|*?E}L7d7aP!L#}glmb>b`WC0ZTg(SjDX2{zH@ zwfSs*#Az)cGKh>~g)J!Jsis7P#22=REh@H(ZDP52UxZP&H%&|z31W&!FK&wO#YXX# zSSEVdV(Q>4i&TKhb-c~@ZVg3)Ov&3vMM>5+)% z*)Hlsb&PmXT~M#n&32D^m%VKt+t>CZACu~O;9(7j_*jGNV0fxSBfjbgc&krG{MF~8 z9_#3cw>lPn*tm%AIv)PmM0jMAB0kw5~XycThkKSqq=^@xSM(QdMv(Z=&h#An@RxBty= z-SMaAx(8nEKJw@7LHN9f?Gby_9<#^o342of6p2WATKp_-qyG0S>VlIawZRwcMYJPb zvX>Fb{3_awzP8uwcc|mNVbknQw11rwb?o z?9cYLy<_j%U+k~;Hw;JmyS-=c+XuezrLTPL8{hliiGB%RJExxuo^~ET{@+GzE#w#W zi}*$TVt#SIgkRDx<(Kx$z~e3lZ@U8g?n>~xtN2ynWmorW_%;1n{=@LQ>-cs3dVYPs zf#1+?gcjc>epA00+I?I2E#bYlhX39M{(C#Wz5l4+!S5LH=E`Mv!< zeqX|FRR0bCP5&)_nm^s2;m`DE`Lq2w{@eaL z{@h6Y!hF;)ER6W=OaAcO{rCM9{!0G?f0e)5UjrX~t^bk#vA@n=?{Dxo`kVaC{ucie z|5Jafzs=w7f98Mgf8l@Wf8~Ge@9=l}-}t-y-ToebufNaV?;r3F`iK0({t^GEf6PDb zpYTum-}w`j`5Eo2U)Ag*NjaBlQ8d{X70$ z|Cj&8+MB>xQ62l^x9{4wdlrV(B`$z4E|J`AHZ{V`+}nVnAd9G|ILn0@7-q&<*n&b_ z(8#c8+!OaiNnB!-m}nB@C7Kwsyu`%i8Izd2L{ZF(Pm?@^nLGdQsjAc6GYsnQ^Z9>1 zFx{v6be%eNs_N9KI;U^!K;nk%gp8^{4(6%p1%tW}ms$ z+-7b!cbGStH<@2HZ#Hi+Z#8c-Z#VBS?=-(+e$~9oyqkXg`9AtZ=dYPxHy<<~GIyHa zFu!R&Y(8Q>YCdLu%Y5AYw)uqlq`Axdj`@`NUGsb9)8;eg_st)eKg2H@KWpwW|HJ%| z`D61r^Lg_p=1X7)#wW!m$4|hoxx5pf8ZSp?^`!X8 z@l)cTjh~8K;pgHB#9>p2!Ddu$FJ2X|j@KZwcY6Gc_~%uA51F@G(ZXU9)^WzJU&s!8fKfXAALHt7GQkNhXcZtd`#xILEP&TePo=3cQ zdAudws&aeCwXTY{$2;Pk@yp{~l6PGlUxO^&I^^oU5Z{0t!CakdV5u-~cd7_wr>hs9ry|33ak{15Rz#{U$5GydoJ zU*i8A|0^c7_{$Bj=@&AecBmRE;f8!s-KaBq~{;&AISgqCyYo*m@t+Lv!4y)6;TxH38IkR=vdg}{_@n3;R{6@s^zlbRQ z)z&o>t-sE?9ufK*kT>nKwp!beL*0Q4>P^;{t(z%_dYg5-b%%AQ^%d)@)?L=!);-p} z)_vCf)&thptgl-SS`S$}t#4T0v>vt|u^zP^v%Y0LZhhN&!g|u$Wqrqb%K9#HR8L#a zxEU)iU-g{zy!8|7r`8MB&#V`%pHtTASJr=8zqWp3y=48?`knQ%^@{bX^_ulx*6Y^q ztv9ScSbyYPGUbE+YW)+P@S^KSz ztOM3TtKS;13f7<jy=uB z_IdVvdx5>sUSywdFSakRFSIYRm)IBEm)MtbT-$E6o9t#gZ!fc#+bwphigMd+_A0yG z?yx)U%k3_^+wQS@?bY@gd#zR5)!M$ip{chgZ_{nZ@{acW%2@5HhNiBL_GoQmSAKOq zRx9A%_SQtQJ{zNJHpkaog088AzNWaGs#Moxj^W8%Ej`DTp5vO~=e1I=wpz-ya;=vu zwUf-%$@_Kkex1BuR~e~o>uhPT=xMIlIRLlF-@_x0vS1s*VOZ(N*ezmk;t=q4U z&S_ZH*xaDVNhEWrSZ$7miprU+^gki|Na!;0Gm*-q`Mv5S*H5Oxb@?7aR$XP+H??*( z^{!ghmS1btH+S?jG&SYhdsJPF8Ig{zau<`;j5F;}R}#tU+Gw_^0TLqBxo5eA44H5; z$DB##lBr0xv7w6zNjfPryPn=&8q0~G&nKVArl0_a?T|1Rh z&k3^zIT^l#%qFG@!KrF{mPZ{?oJtDcQ^N0*rb)&zRU4kAyO*i2V$QJrNMtm}*p4KU zPGu<9(_rGiZ#YCkoRkblN`@mP!;umlOv!MhLmQJOmQ`*q66-YX1 z=~P-em6lGWrBhjXKP&HNg(+EKo~UgmQ5Bj!Eoslzt-HdK(*jge<-+PAU9*>WHLT97 z8?t|p&e@(QDKK#guL!RcNCXxgrYQ}gJKI$V-AuhBcVM7HB9o|OEVu(}?6bV?vnFL&0Mb<~ z5E&7OWLnHdj)x|f77LdajZ2fpB{DoYCo(L@5X0)o*$thY4VZzHb3lzG({<4~t_4Zv z7&opOw+J7L2__E`6F3a|Ig3j!BQ#`)1}Jhicup_1kah?KrqufAxxR)o7149uhN0<# zFeBz7Bj$pckj!OjBj>hsbgT^J+LsgF3|nO|k4eg~bxve3rz(q=$q5^HicX}fDud@T zNy+-U$lR6=*!;PohjU4?is!cU!gK2CUDejm+r#|H$^@6=UO<8fp5kc{+Q~YeVk4AD zvGKuG;CgT}tY^85FswotRw08{FVE}cIXfQ7TvncI&w%F)z=dC;kGZV8&mLSd#~vK6 zDSK{rTSIpXvwrTXR^Zd)LALyRSJ2PGVWE>AUIK9q@)JXl_xU1%t&OM`cQpKEAcW}m$onT@C$#XkJY*8h14FS z*Y1@gT@n>YL8ljq^C(x+t5PWpq0y-j{i+bXst|pukO^1#0`6BAP zb&)&wF?ls2t2IJ>4UG^?Q6_v5G%IS0}<-C+=Mxi3nV3 z5fN>hYN?F(nDm?$`dTJASIfRZO8nepmM5@OO7Pcen=0+pvM8n!=~x3xajd};)nu-w z)@tz9v~=r#oJd9*$b%=d&jvS<5yPF)a-IY7yk|x-*@{pDe2NAFsT=KF2uoy!2UFa; zqoTCEU?IZE9LXVo1uj}a^G!X^<@;ZJ#d){hl6W`j6m;YMxJlhrb46(S@U zJmMT%LM%7go}|QcNLE+dO&)iwreVU%3wlf^B)o}CCK+q?o4g7!Pom1HR5IG^&H`wS zwJ9g-{+z7$b4d}gBvT7FjWt|EE?Hk0XN77Z6XE3$3wyb5c$W`(A(2d_qsu*(B^~KPDj8lr zt-HxmnwzRj;Fg7mS%|K%CCRXtgeqitaTRH(sfe_wF52|BjhqFG?v>k+3b!b_T0FX# z;G{4%B@9hzYGj~fA(^Y;)-rWMkwRckYIvdypb>>Od$kqNh7bQUhsbpY$#rqZ5J!`Sl~*p9n&kZ)nKhV8ebrt8KTmPE2JVW$wd}}(BbO1aW%HKI z>4}G9*H937&J#aYNFsCW;G!)3l1N659bCZ0+_IMfhg4XbOWPg3l~aeToaE|XIkDg6 z$e1|121YQB32?-tb%eoAm8==6(%}xRwHc1PV1-l_>BwtI$W*ajWjK6{)qspqg}$=B zWqA3WNMXr;xhPPVUy(A|NEg{j`pZv>x|X$ewbv;eG&^$clOrjXj|NRMJkxJQpcvN9d*@pqF1 z6V2$W2uh_0T9uGjQx)prsZ_P5?<(PRnT$?AsiY1i0i~XMD_y6sQU{Q7RpQpcH-oDe z?Ip_?=_S2Ws+ACjJf*S_qKCbthml@d_Vo_c#v#)TFA9=6_RMhvOh2@2!Qcj-dFIB3Yt;X@QO|Dk!EKW z;pgIBPf|4hs>B&q6vJ)q)r&3nJsph8RcU`1eRZEoNDuv%U>qm}9@i-S4^n5RAuUt({be(@8Ekfc*vpRMp>8%XMYG4dWNU&aKgOhR6k#)MecuJ82 zJZ0x3gQWvpd6nx7uYuh)v5Vh0cDvqWq?+k)tN>bTqi5(#oDc@!Oq2svVI9FC(V9m%OWx~WP-nu@KP%+6Lnfv%IRz-N_`fli?z>icmeEYOOcVlP=*6T2s4YfoUB5axoKS&u`I($ z69j~bp@;(WY@gvU5&~y3#O#YFGMp~LRr}s7Wf_hgAcUjgGKoq8H^c5S-V?Z9K}cLS z!#+Kpvwh2mo1Nk83gEFb@(wdT>C{z)8=(S`0sU*jrCTi?LxDD&NOwNf8Mc_w7n97#DlvY2-y)a!`<;Yh03kp;I?l?ty1i6Q(q z@!!YvlrSsBQ@0~YN=H_LjwCA`SrIz2-@%c{l_NnC{(6dX%9Fx57M?sXyADQ)l7|LU%>}XUr|hI5^@zJCYW1 z#3nnO9zrBUXp@wWBP%FJ0=kZ5ARO_h9mzyEl7VotRalRytqj4nt)VVAYA@o(egNV* ztRLz6RHU2CWluwIte1shtXzlmt#lZ$NXouZl-!9UD*;DxD2}WM9LX*?;w3x0_(jk` ztBzPq$&onX@jIM3Lgs;N46aq7&X!ga|Cg&gYY0?FI+Rtn=&z3=NfS8Yu{x42aKv+U zByHe`2kS`sz!6W@kzltY`)M4GkiwRWGIO{JW#O)@1030B<46S7;pkb?;dMH$QePt0 zj#CvSy%p2WB9h3kh~O$ln#CHXUW~QQC`#rr!(xp3#86zt=(4+jvP=;y)|eUu4`vi031eL78a6$&2UZ(^~Bh7ZVY9iU%N*<+z4CA`<#P9J!zkle<%x`Y#zW5X`clb zWqDux{0v6~;qeIH*a=67i1>_aHk|A2qP!Uj^fIp1(awA~jEFJ|DAK#wO+d8ZS`%(- z?a~iv;8BU>1rLcqD5r{>IuHwN>h0>vw>Pa5RAf0&i=-1*nlo{wZsDq*Yx0O~fYWdR zE6=ex9xjxb9ZA`Im8`3`*5|uArqSx#ghQtSCCOM%3$zff^0JOzEGq#qyIa@NO}xET zSE3htmPOTDxb&>)z{TWOdzQuMU3G26-UB>W81TFsJL1~aO}=$`OAl^hgt2E?vEXA` zY{p7$T0#yg7YSB^+l-R#iv$Z5%%%>^n5Z7_%y+eRG^^%_xwv62PisPeR^jW;BiNw$ zt_ld==G)uy%M@9~ROk{SiyE;=&+A82(2q(~X=NTkOM0(Viu!Sxhg8}e?ZYgfG7r73 z5w*V#f$E|EKV5L1Cw^v!~iZRp8&sXi-E{_@_2HWW$)uFIyr zvD-U(^nI+etydv2SGBg&jlHV3t*5oKZ5@E|=GN7%&BR$%O;;(wW{9qiwHifn0|F5o z&&4}-LpNw??Ous{B1aF7CP$acJX&@4hY&2lVs1uCqg>JjP(<|Wx=f+2>-IzdCDI)! zmU(>=$QXae1UA~&I{}SyyC?v&yA$$+4pTfnQJaukYy+*vEh~SfHlAPGgqa^f7J-Pt zVJnIaeWK=UwNtrdHA#|4A}Tie=3`$dF7B|~MhbeKSn8ZKxWDa*9#0tr3qgDR5H zV+@r>t8%P?uhz2G<-J|`W^9V!4kRnAj?R30V{cnqzDN3$F6rrNZD?QKmLCEZOU4F0 zRZ&}O`-Is>m=;K>u~M%QD%<&0?lYTFay_qQDyz^mSeT~5n4vWIE7bc7NI0N>^eRZA zLhca||I`6(0aT%6s9u#F zqEwcrQN=^lDemf$gp3-5O13Mv>L6I5lO=3O56JL^pW!Q?6 z2%(R}QEad>(REa+bZ=^-t<&0jS9L|(=^_$WXXaOR_N>E<9c`G_4X-!a$d@L*H1h?M zp?bEAFUX#$QVU;@*HNVvd|AmCe9)V%lU#?RRO#Cg1m<+_3{M) zTvb}b7fidVgfNS`tfxzy=WGB<-8=sjjt9jwm*a*fL^U}p1TvMX+ftrY)h*SL$sWZL zp7y;5Y6e&J*bBf-F}VY}g2i(>;pbUh-HNAxyAt*pbakKgQT6=RN0osN$l!f{PkeT-2!IqDB=LHLAF%QN=}#DlTeNaZ#g+ ziyBp26gzQIql$|fRb14l;-W?s7d5K5s8Pj5jVdl`RB=(Gii;XmT-2yGcI#1XbVs!j znACm2^cK6fRL#3pqmV8gTCQT5QWlVTrtW+^Q$^w#!!^%TNqDAig=cOZ=9wxo z&wLLQ&s33krf!I5ZUy0)DhtolE%Qv>G0)uU%rmOZJfmCY8CCaqriy-^sj}u7y~T{7 zYTm7%XX=qE(*hAG4w+~;PLDFL+mgI+M44A!Nsf4+tlwjoEXfN~zG?bNR%`*H)C6RH*{u2 zzNsfd!4r70yjOi1wOx#;F?_?;CeA7Ahi;rDkxs#XPOzO7>o7Fbw((QT8w0RNU}UYWfU!rTN$X5sY8ikTy`D5t@qRwPJQk)K?G z^+}g-YS%xI0M>ZX#>LwKhVHFizVun9v-<|4$ zo!*PsyYDY>3sQ|~w3^$OH!f{z$v3SmqT9xXwziI*wMc%qHY^AK=-S&!16+YaQB>|? zIjP_PQ>uakWT^_?a)vSwX=QD~Rw+I#hOiB0rQEM+Y3;$8WN2)@xv^s{W9HBvRz_?Y z>Nr-Ps!OJenp+!Ib*S7tP(bs&!qT<+8KKnAsIYWxsJoX(zfu&lvN9QI$wT*8SUI+e z-D7NPD#iBHX&c0uXq-!`9n*=X^Bv1t+m+gO_aGq6nA4neNu>n`YzW~_b8;n>=6paZ z%}EnnMRO$pCy^`EV>s*-zt4FC?15yvovzQ7V1t1g5fGp=nb;Q~W}I^|D6{2BN;nRu z9|&ENgX*ayZ}P{L(bjR=B2~fpQZQSVsl2rXJ8n2YC&{x^Mus)RXXj96P0mPnu;Go` zljIhjvsuq@;0|SOH^XP^QW+7K45u|v=Kf?j{);jXO@`yVD6?2e{1#T1Sh{xfU%iKVut}%*yRo^RR?2h^%B^*3)-?+S28E-`Tq}9=w@f)#bhiGb$8!sab zY?f#g7rbZ`mvxwlOBN$lE-VdbN=$XCrn-b5=~Xd3u1#079qjGYZHuLXN!E~3WocoW zb(w3hRpnOMGKyIgkY}Crdt?1pN(1BAlBcFs%w*UqCwJIqCwJH}CwDpb&T;Q%>N^{U zAN75FmU}nby*tajJKMdR4aDf%o7uxXXW@M2kAafyS`Qmu&M-1;4oPI*UG$5JBa&TW*1mLK^Q zL-2TtApln~r0i1+{fes?x{9Y50?~@0-*6Q}0Di^LuSo1oimVt?a1}$!q8Pd=uj-`t zvasXQsAY(plSWG2QCdede{}p+w?q>?R7T1^mC-e{$_%MNDkBiT%4m>JWdt--Wdx|a z0PL?$(W;?D7a;#wc=5AzrN2O1P~}Oc!uO=YS1pyS_C4VZtr{C`drg;7OR@*nS2yFs zg5`(z1pc7Ez+@1;nD3OnYI-qwQUSq3l`m275-%a5t06`T&BwDc9ol4m0y0`d*aQ&* z0=YFxspviCH*{>2(2jGFNC=gnV+h)fqg8EcGyPBkT!>;jw!Km}sq*gC# zsL11!g)zf3Dx|pc`mKcf1V?inI+z_VBcGU(szW{dsy%9LfYSRmS(|^qr2Wa z89%o&{&zA=2J?U=R1n~-u0l{UcL+mdC_Ea}c}`zT^?sh_hP33b#u_CpRrpD*@KD34 z^JA;@^HQCN5XrzcRDU~_er%O~Y`k;5s$|ijJxXRtABCVQ4&-7sAha67ar0L>{j$M8Q&#PJSz20!}%Jk z;Xao6#Fi{g)hJIQ8K5kYIrdFENI;2@92?t)hZs7n673A~_6;yCr1Rc9H{Y0g?Dfb|T2qI5O zy5N5ykO1z!Yx+0|1ua4L+?R(;>H>!Dx zih^ICv3FX(lgwdfv{%5^W?zB%Q{wJY_C2fiQ;v<`-dojv4plS0l68j+8SI0f{fG%E z6s6uyUmr?*6-7N{v0j7$-=+Ym3}JfJ;Q${(b*8{ybjkNd+Ivo8?G=->JKlX8Pqm#R4^54@ zSDD7#EAozc_gxxy|5LT8ou1OHijUv;3*KPk#I8@qGJYA$_&p?i*TIJ%=YJ>XlS}Lh z_f?Zgf+=uV%Bg4c=?z~+|J0YsdIAUPJfXryEKkl+@I)I0|2rak(yS-T3GA_^Mu%+e zB*^eRkpw&v=|lBxA}#9_5qGJ^7e~CQ90iZ3*uv<4;S2NXCe9?Y<)fnD5ptd@x ztpij6i6hml3a4`0?#?MHa!?;TQ*=pHmQtBVra(sfkO_zeCMEJP`f%L!4JjDv^F5q; zpDd@bM@Ds&aHjD^7q8%R{Sb@ugcJo&bnqn+Mo_HjAPm@jAqNzwIL3V_i+;4|Kgmg~ z9(rPz-YeFL5Gs`+wB>HnAjky7V^vJFwVPy1-~?%FqI#p~ z+!TP*4gvIXjUzSXZURSF9pTCFcL=+`J@*~&$?$jF^w1&gcu(Y=qcm-N9LL|#NRRYe zTwA|OZ#A)!95uotVa$<~oEq}d;h#uLBvZ1+r^$*I{668sH-TsvxMJFqszlP~^(2rN z^s31kp9MDQbEpz>w6#i2MY`bG5uo&8ku^SJku|<9Fd1ZL-8H`Ghb$RKSWj2G3wpht z_8@uzUw!BjzZ&T{+E|4Bj!=T?VM}~@!@tC@9&w3JsXBZNkJ1-79Gk>P@wz%1DAxdR zota}(Op#`s_oK))9#BL&k(3?8m=je*6=lYkO4bKwpT(g1s_+~riWF*tb7zMa>XS;z z>Wq0lHrsY1`x?A(zPYNYp;M;`Kn#V=;e}G-5?3ZN#RpH}I8&G%6EJ0Waiuf>t~owA zR)f_|47-`IfvX4m8|kXB#~T|O*U`Rkv#GlWiHW9`I34Uz55q7EtBKf@{^Ub-rIqh) zY3u0dbRWr%XXJgjDj$fz;a}!UiVsGBAHpv=wo@U;4lA-*bGh_e5M)Z0BY)M?&<2at z3I3D~E#gS&FbHVzzcQ=>o&v(SlUaL(I$Z+82E;`xRqD094aY`Xo8o+=0pLiR`Yr^$ zkgUZ~V72d`52-Mcii|oK(1tH#wA*y577UeR+Bk>HBbDUM*621RFd!y0LU>LI2#g>l zC~&}C#Dq_;V&uACr&IO*dPL}+X^4?6d% z&IvcDb8^^b)D*_1m8LLMr%63aC2)X+r}AXDv$ZoHQ-8oNORgfXwN;K|%~w)_4Nmzk zLR*omu;^4Vfh=N;_cS9P;>d8u4M9BSL`DQABj44^@HZN89*bo?!zXr7X3k{fyIUDK z+9xTXgrt6OrIZ=2f?t2AiBH`DPP&0BD{4l*yOiP6FNuskq|M1}Y{_TA&hQAJEbnp3 z4o3#0%qcM(O<)5dsV|f%rKOx);u8E@xypO;U9k+G*Z^NxA~KFV=S(E_*2;Ul^8@u* z(lVl88Ihn2pTWja1l>=ehf`qax4fs%Q1dB7z*$l=eCiNDt8DifUML$&bO1p&UQqRzkR zX7sf(O6ZbdoP-jXeB{KDbK;I1p?2g5wIfHU9r;j!!-xH0<*BQ1HHj4{Q_DDc$D)vy zFKlJ&9J33bVQ7al$2ahxErO757^hp6qm(<$DO?Z97_LW2Aikvx5LZ;G89|tbZBtEP z!!EV}@qN!4Y(mNNTgzIP^{gwRDmZeE4L;4IxK*?zfMNpRYZSfRu&|29IQq;xZ<5Rz zlu4;^r5@ud`EEJql4PX>oP-uvQVv{c@Np%j!j<#JNgg|tiF3FT=Wx|#QQBd13`dBJ z1UBSQ{n{En1d7igaD7-EqC3H68cP-$VO$e(ssZzPk>975>fCICzeS=}Is(^dMkmnk{Ym6FfKrSt?ZpN~t) zcLlJhB77-1RGgAS#wq>%MEx}o{nN3Kc)1Cn#3NaMBh`IKJ5#M{-@+ra!QV@ zrR3aUO7uP@dY=+Ik;-QHqeEE9H?_+xw$*m$7{%% z3U4cS5_?@}hZt?`myI#*ZZr9mQCiN5rDdW@i(yX7gq0QpotB9!ErvQR6Ifadc3LK~ zv>5KROlWC2HlCHQ-DbtgWaUGWS@|qVR*rLK<%5!0`8-NiJ}jA)&!lAK1Cv?#TuN3x zG?|sprex(XWmXPTX5}ztR;*Z7z6Y3<4@6|;kY!dr6p_`2p1<7(LoSA0oX!lNV+NdE zsEmBRI>TFXYAe|lN~FbgO6#w$$XB!+cENBYl9UcdY{jZde;oO^og@8n{w0P8u=}8 zlhX1TjbC!%W?L!oWx9vBbk;Hj%kt8w47v2%SpzxoM24L3C6UXY)s2Z z#k8DMOv_2dw0v7E%_b_5)>15PNLo%VrsY#CY5Cq%T0YyBmJddyWu1_gbwXM`@Rb%f zB`t1CTD}00<~^B-H1F`iRpwUN;e%)jjYd{>8X=&f$|{y3aGGflzvR)%%E9Zb9K6oT zx74z7@H)#olTcs3Pg007&&gT&Olwx8Co58u6=})Jq3Ns~n$F6h>8yNwD68p`uR3Ps zvnW~l8fI3cEGtr$73s>dnM4{%`XL8>vvSZkD+hhE;&WtW>6aC$%!<@!WnqxeW>-Go zm68**DREO%OkX0Es~f9B75=zQ5rthODpZ8oPrhT))-+C69}-G&k$9mOR2?x$QcUp? zY7BuJ_eC$xGa|5e#D;sl_dV)+n?|_=Q_RL!%OkJ1smvops?jt7g`L*h6z32umZn97 z8S|~GR6+&XSfGmhOxT1Cp-nBQr@2O(oUtZV6>(Gz4i*YS9!deH6k3~?`<@mvKpvMi z!LRCSXl!k7?p3(T)Xpyc4FI=>+DQRV0F9^`qwojYg?ZVg00?~!kN&BEp+{^nF-C=7 zRP`b%3Gy=6dM(8>93bo&r673P*?>i-s;>cvEOd1!fUUk-AnzBo!QE8uje1b33&Kyg zER~B{WONB8lieL^#VyIpW7M~h6wNISZOh~pY~^iVPUGVP+6pKYu+^ktISv9!v515K zU#C+oa6wn}oGtYXu`-|Ur6A^7a9rJ#vV^z+Q=4QN{Dpjb_q|O;iVUiY2fhO2bEKHz z2*BQ^(M&J>(uZmrn1(jr)Q*!&;5W5HH8jMNeGMU8QfIs;RsL6fVE_B$yxK|_4#nX| zJqC|g;XAA`Npou=CEs#Q>2D-TI3*4G>@HNja!NH%YE9uPZ^Ak$7*)PRD z5%n!fRlxU2v2|4nII@|df;!!(;(<$*_YZfrq^+R~o0HX{S=D5zob{y*ph&|IRs?6J zsU~do4jdW$uW=1E8OEjUv?&e?5_n2ZZW;l(gsi!*gcO^vgm38jO33>8O8Bn7uY_!z zuY|8`QArtS%+-~UdbU`4sESEub;*=3Y}M+Ux%y_2zG=`md41ENZ`N7zI=)+0L??tr z&r2{OuoOr_Ems0>YI!NVE|UikKb9i(mg6_hD*~qnW(ESr#IlLTxQP{$t12drONYuPmc=Glqzzmrk4smi z$4#y%S5L=HQ6*JnTp6AdjGqWJw0~e;@bQ9a%y{+H!k*V&GiJ=#vu7}1%-DmQ87K_~ z_G~Kb*)(IuCS%5?J;sb3I|_R`3zwO1?hnkEv9WfBu?e+N$53wtHlfa@8E@XZX~w-X zW?W_*f7y&TXP_|SGW-h!1DQY|a$e*m>QEqUq>ahO)T!m^Xf$Sw0g;*DF>eGh+aAN+vEV|`%U7zpTg%|B?k{TU4}%!%9* z)Ah@_ejsKq4&CCt7n`Zyi|z~jm#-bK{q4r*8Lsd}Uefi4;qmHk^V4g({IK2*tnl#r z;BOhf^58KSPxReDqYwUoZr9px9P5Lxap8TMe*^EUfp+1Q#(mu1!jd8IKI2y(Ihuc7 zyVi}ycRl>(s~V4ef-z}C_>{xIrw)NX>EbEZa7B;XZaI%LW?nP2)4VNA3v3ELYN%sTe^s;tC!eFPw~8*Z(;X2n8~Ob%1WeowAex z!iJ@NE~AR){!yK3VN?S# z)T$>r18q_N=&JY6ruSu(@!UVkQ!R|LdOvu#`@Vm)@xFQjX_HY$?|k+l{#WC5JT){= zF+mKQ9uA!vhK>|ZF0oaZTuSj<-*|iCI82f9f3i$3zX&%DWP*1OGzMSo@4jTOna z2f{Cak}-i}An$>6*~HZN@#Dr~nj3d)P+ccanV71q$V{C&d13|b)a~SnyMm*>`tzo2 zN3FBvlI3qaUbxG+^sz6`*>YLoc4OAOtL9vHW8sO&ODmsTb=ehXjV=jaaDQ*-uGXEK z8_vFR&b`yS&uQ2U!l~hyGRL)waG0?^B5+q>i6Y1czm3f9uEG{S9MjJO1Ru*C(Dm(_ z!S@P341@z`K>6N8$87xI_ep_tATXu^2fg5JOp1;hH|EoI^Nc(1ym(3dj6Z&IZ^IXz zb^EOk)y`X2PT~mtjLT$A)PulIAQX%5NOAk zj2@p(of)i3mxMy4&lX;O_+jIeXD|QRy(`D=9(7K~=GmKHyK>`e*UsMDan7jSV^-e# zv$0Pbi{JaZvG|#~jo;}$r)S=Y*B3r|c6Z^U>rR^6JEv>crh3RD+QcuE-vW88fM}P2 zQK|70#$n9CtTN*hs0crE^R|gKrK655UU=@nL!W}8(13BrhN~mtTY|x4rm+0rKR$^< zaA|#Ef#M2=c{Yj5Ey3{MzZ6_?i{zZ((Y?VVNQ{QTFrQ9H!Bx=0II^OG=Ma1*C|0=JC$!CN03SYb@~6pY#(o3nq{H$RCxkeNfW~>E!<#S{kl={^#=<-Dp`N6VOtXQ4Swrz4f?e9aBZ`KM&^(REF{s}7I=;=mFh29Fowf^ zX$z+K01?yhP1=I_;jmzR0v|I3{W}J zEEMfzUW8{8FP5rSqVEL$ta&j6zSp>ZBslu;nh&lV4Z?#(5j)GfV9jUKbjJAi1#b1# zSJN5S-yS&NgDZ7rc%N>!FeltcW5DoRgWEJW{B&B|hS1~U$713*AAYr%U_AQ+zh`*i z(lEH@!f!EpJ^bW^Q+<13;5HAB2e-bb;e}a4@N5nINn~QVACAT0MuDSUm;W$Fs$Gnx z%MZ++e!BXME&Lww)$Jb^Eh4J~SBoy97wxQf>237rwND8A$%W7M!KWMsK6MCOEoPV> zdKRD1cLIwQUeAJ{@{mu zWc3kw04o>;zlh*hjDADvKTNBPr3T(b{VUy7#@h@RztUU93<~@ys_(60_6hucf_tl& z4+Z{Bf|Gwccnvx_00jpjy9X*dCCQ`FhGq=L*L#r9PaJy7gK3#FW&4V{!5$ z#gyy{T{}<{yn5gYH8{bY0~-Rt!M6*G!m47ankhmRV<5WEIsa^d9-*F+3>TZ~m~Qw`bUFA*F1j7>%l$l3%3MVKs4 z62g)GF9d%&a7FNHmV(00;5DeL+=jK7NZ|?}-uZ$PJOR?AX4%lVpB8?%bEk3Qv#Wo3 z|H`q?O_ zHG7sH^|_VbF)lc+uqOEV!w&c5zuCTJ$rCp%9&7Zz^x(jlA!B}Zm(0vLX%1Tg$i2_X0g62SEjl>n}Ps01*aC4ju1 z$eFMSwlY=ld+>iU{_ffxD%rie|AWX&0~>x&*q;yI!|qaHPc+~%HfEm@M&pVY@N<)|E9HIy zVw(mwZ|a}s4%NN)szrj=o|=gU-!m8D{bQ7RPn}u;?VT)L7)!H_OgGVJ$Cv+f1$qHw zduCm~^s=k7yT9=Jv7K++|AT=Qz}XhOx!-Kxp3QD=4_-BJ<=5V5*W*spC&mS3@IAU1 zjg<%P8}2AXZzo6LzTu8S^j@0C?;GwY#NJ@|a7Q8bPk|3}6k=}*e3+vU+beLNqoC-F zolfD-`-}mSV>jFhxC(b_I0e3xV-fj)9E?DP#CMgS~8OfaORZd~e`0)apxsFZAHn*13LY3&m}7j$r88>X`duYXw}Q$W z$-yHmR^MT$*(TH=!V@$+?9q1%Pg1LY0X!R1i#o=$f3SmEZ3F(DUVZDvfJyK>3QLTe zU20&U1IH29?jGD9#1bPI4gQ0znc|`i>ca#`D7sM(}r} z(9aJy7~xS2A8s(hCklL+!3d8P_%MSJ{*1tf8H`W|8I0$L8w|je!O(Cv7>60n!NZL{ z3`}q;8JOn}H83M_PSx^rcw|mO!La@bodV9Q=^eITIqUd4?qVZe_`l$P7WNzF&hS+` zcOJY_E&Ql5sn)wn<*Y4(%W2pNPO8Q5zp_|q_y zSBoXWg2mNT4?77P!}kV$IT9QZlVf~vrCMD7y}@UFaFqaH_`X4oPz-Aqi?r!nzc44% z?$K%8YIKsYa((5U5FQBm$n^up+5Y`9Yf;%|z`Te{5 zy1V*z|KHfXugsfw{r2tGhfh5C+ADj35pRZ{Rby;eXjgf-uhN()_*%y!B@D93y8z^DNFW>g@Jfsi?QccBT`B~oP=mFfJ7YU76=4P0Xt#_kS< zg1;O1+v?zE?O9oyH{`=@!_ z0{B7f0&Fb(8(IYh6KGE38aD@7N^}*6Lilu`E1y?=+;VEZ0 z+7aIX?QHSC2bNzgLov$_hk^V#!Ba38mnjw+M?#mP_R5qJVTf6HIz#=z^1`K&{%AMUKMikIeYE<**J;K?H{-2SDUxLz8=8R3NfkCSO)krD>MaKTPYbC8%eb;#Mv2i; zJaNjT;*(1z;jgj6DE+<>3K^rueeTo~OA1eZz3^Dc)Ke2jjt+)`!N^Pf3nw=9bSI}> z-n(p4=n-rgS}<+(Rhy^hwqM(RLjR+o1(R1^edFxvtFHY*vj0&YwdiIVh!c1qg7<3~ z1{@aKAUM{I0xv94#QWf17sl<0Wj(lkMuY>MkP50FRd{rJs5fF+WO4zR1AZ!5r_^Ln zh3~)-m2wJ#BX0&TJt8}I>d|pCxce^yf1PsnWiuy7Ohn=OXHMz4X~Fq-T{A6o6M8=H z^eY~|vf~GRC-y%k^0`2{j|Sp05QX;wgSw}Y1;hb{?+q4^yYUeR%fD68}<7#t2LN5y=Ss*0HiNx*s6&HO@u~ORw9`lghoMz z)Xsy+G%Z9HJR6CJC(pd>tf>Qk{mbs489%!G+}woVrEiAjpV;?I&;`IK{1vm9f2|ew~keaiqNrr}wu?xrMosGV+S7wOo@00Cl%2qB=ULnJgp?XMUZclVP z$)c)X&Uj$4&rp_Iw@=!G=R?YnEmbaD8oPo_kCHW*o@h+bY4t_bmKu&tehm@mQtVss z#h^KVTJ9!uc_Tn*6tc*VJc11X&l)Eqsr$;a*S@-Oz2f+068ewdyVot``AhK-}Yx$yk*`pfRR z;$!+1S+OB%Mn~^{YSE&iQAG!?()saE?xq@j?#??#mvl#U--GWBvVXvAapBP9R$qN3 zEsSR$oEPQY4zIu6*fJ74`l@l_NN`vv!H-Ro7=4Z3z97in`FaJX$x^jj&TutZj@&-- zmyi3}fj7?dVlyNy)R|tjsZ!u?2fpXk_u%$%#wZU?a}D(yme9v1ufFwGkOBmP|5X?N zyM8?8JsNHwGX(C|w-@^Pq}E_U=l3)mewvR@pocB+@Nu{?81d-$@L+Rh=ZNqrhk;KW z0!KcR>9NZ-T+!pok=mSz@l;xU2wiIJptg_r_($(C{7|_hxHY67G(&QIS1nch(fdIc z*Z10mx_Wepx(;g}@)lM7fgCqtBhX5f|3dkvpwvxz0{)0vjTyCuZ2jY;C*Zrdz5x%| zTa69A7ki)K!8rt%)ffV^rofvCE|EIa|EIv`5nNVdfcFdh0)lhWbCNP!YHOv)#q|ZO zN3|J3{SZ9n$>NcJg5`-6^#_9CkFhtQ>U;0)-}Py%%;C64Qt9I|OR1V$Seu3zU8>hX zqv5^hE0eKC8PY}XsW}6MYcR2^jrW0xy1$5dOp^U#6UQODK_L(o1_@1guJ97JjZJ&b zIKJ@aj@Q?(e|?8>{46h<{(WQaAOCF3eP-vj!oas5D-3Mk>CdcVW(;F5VqgL}$Fg0} z+Z~AQf$)e6kNtVosy81i+-6+1dPi;TjcW=oMqb+Z%C**TSzY%t~@e=V{rgDCkCkP5M?P0EQ8#gd_yS+on}niTzIJv7>Sb5?n2^x zIV%mFt95D%V!FhIaaPna-fa(Q`754CKs8txthU;4>|C-48s{U`7K81Xh< zdi}J*Q?FOw`H2#)2q`Rz)`AeVpQsXq;H1W6>=8wdMQv29qHSgTEW$(_5m+aizcgH(bMTHsn{bWhQPwu$>#g48QL-}nt@0z`!_}EdU*d}mb z+P*JbfBAnJpZVs4#`s@!Ufc7x125iX6hHWYQFPDkg?~TtRN>z{2A;lV!^00}lEL7u z;fTn|UXoE-rRYtqLkDgm*6E(4t~B~qZ?CJ{0Z9$sS-AZb15vUyzrOa`Uvz&Xw{h`> zSI<81a`2vk1{t-zB($4EXm4hr#mX3sLTDNOwsAeXd=PS9eQY}Vt1IL_I412=d~h{s z6CP`S;B6mVDFws(4BhS$f4fkYZ_9E7aB><{eYL^{TsaK^y-?iZe-H2<;yJ=Jg?sVd z5eN$$qYR1xA7e_@;S^0VG4#iQJzc>Q3(uWc=yA2+P<}>l-d}V??AA-$_Jd0!uvRq< z%0X6XNwrqS&KKHU zU0#kfC00V%csqW=WZHQfMVxN@gb9c+U{Msj@xH%rj{n$fykl|xzDv*BJa5;o7p~vC zpKcdnAwT{;<8BgWuK%8{Zy#^0Amt*sd9MpUGVp=W z2|VPnD*Q_Lfk$~{x*Z-1cs}a+A!Ada;yhJn!m80&s5Pk?8Vp4&R)S5~oJCga*5Hkg ze6VrRH8-AdOVu?E^L8{``&98Wk*ogl3#VNZf}bH*u3t&qq?43 z-?IIZ>QN_^RxCZ&Sv7sls>`2jY5C!mH}6iT$Del5thsCJDpoDL;`=QUXQ_s4PEzwM zwq#EnhgB7C&Yz3}aR3{?l)RE(=JysU8Q z?#RhIcOG~pas>+d5q!Kn-VEY<+hb1~(AT_tCxP=O- ztGP|T7P@fT&AVqWu#Yb-{e1s_6GID&uGwImTX-UKYA|?bI1+uC`3t%m2R|2nA^HH! z5HeO1ltq=se;qu~HxNBQum2|2v%fEL&>IvUI7qN!CZZ~Xdm}SI77i6*@0w7h$db(t z;o!l?3m@&+z4Y<*>%VpB?j41LN8IrK9idnIt5-gB*<}x{4872Q(jD*Lpk_~C`ZpHy zma@SHL`~N5I0-NU+VJLGYS}*Utzpq~l8_lpcS=nE!q4A7dv^ zDm@DKqxU^mX!*ln;K*s?PaJ>pF@q`~JNve;%%A_2+h&Ju^4}xe|M>;L}698P5H|^q}ytaGi#F zNc~ML8zNn0O1$1M%2Vf@H0PvqKW9w+{lFW+#!zSBxbwc;*>%e~#-ICl4BU-*0TJy; zUt})?AJZ%51#rnQ&J1!w60>XY{u2heUNgRCoR$3IO=qPlFD)<;!%niPCx;LPAD z^B2Y@-7(>)83XGE?g}=E$eujG1u=K89yc-j>cu6DqxYu17GpSWPrF`6%A%QHyV%l+Es>w+kG$a1Vt&L zpmRQ0&3vM)@KASIJnkt3vsvIL6WrYf5&N#%?qIwYyn^7o-C;P3 zwi_uQiw+r@0=8&Geyz4Upg-8x1-o>8f#vqFJ;SW(ADml_C3p{8_TdI?11I2%A@Trw zIROC)tmDQRNF|kHMKy8URlz$?U6vkq@iP|>{3~2E(3x0SG4A5;UK}jh9QsrJb#-S( zLp!hUPtM$yQ+Hr4p(MF{Uy%m6+)ABcxH2gOhkW?qN!?DHz$!`5V z;|7wm^>BB6aJaj-l3R#?9SV7gyMfNbaX?at4?Ik#DL#%L z9=&!;;0Gjx!g>g$>Nkc0r>^e7P+;4wF=<*@Ely}4j*TcURC&kYBhLBa>Yk0aT-*^l zuK)FLQUB`~-?Tb^@ud}?`P@lkjy$6Q*L_v{Oax3F8+4Wn1!2XG~Y1GK5OZPN7Y`QJ)`BQd3Uy7wl+7( zDlVCHQq*b4q|Pp{S=OC6dfr`E4;*)U`GV?u&b=c0mEv)u?2uW0_KefcJrR;aG*Vpp zph!*?acUSGyRqPE==!lHNCMTjCkC*;Rly5OLT|cwkY!Z$Y4WZiNg;f`_a^E06pwuH zvxdOWblaKihr8|IRAAsbk1ij4s|OF4`S70*;B$4u>81J~L+=#*-Y|Nn4uMa1=`Hue zUAm&tcYoz_F&dQGQESU@Jm|k%=-aP&(2&Lq9hMPN91pko;66@AHI|t5mT}rh9Kyc7 z9V5ZvWFm~LM>_gB7ytgi4IVu5TTNwjUyf=Q&JR!asIe~!!cB9reAUJBuv+_#0N)Y7 z4sQ*Q_38Td(HgGsM>U>ui|Qkb?Y1-U{t#|d8buzyh~`E#`rb(@XMOkJ2jLUKYgsc5 zic!E-LYTyjlT0WHsw^;8%Hz^-7$!w=2>;BG#8-k3iDou!{CuYMobxW9RR+!c)?%%h z*G+HQ!kSsv3C#?z)v8%kI`QOat}&CGQ(lwrA=M22`Sug%Lp3+#ST*BQ=S=shCRiF( zs-{`vijq1)=~Ky}!UWR>g^PX&FkNqS3S0@>7FI|4Ub$j^_-eh z7kzQ|{F^QdR?J^pd-~O{eC6zu7S33FA&i%?6uc9&HhJaBfeSvjVEPHnndpZ~B!lpU zG5zf`p3xkOeyBJm@V&uDJh{-O{!xJG%7ACq|8)dDS_~`ex1STfSo3%vsxx*k**E__h)1n|blEPaeGg(c>GU}8#X-Mvg-T2z0Y0_qN!0L4*EuOb|(=x431%j`=jeCOBSU1u)J?G9CPz57mu>M z%3V@%!!EMCgwOZhB>kT1v=4sP5I8Kav@_WcciT~xcaKMx55Cn0Kg);z1eKpv{TN0s z)&CfJr|9>F(K~er9F|w;E%(D+x?p)@E*F)uP;FRV%Y(v72=whz4+_i6n88sQA=LwA z>s+{x)5`J+{4JwXBBKSMcl){Mdt&m@5g0M)zGwwP6{zc+|im za;TH#2o_VYJZ${V!=fyyz;_sMqjjsw@-o~$TEhq4AJW@Om9f|Lm1U*+u&nTdm1W%< zz7iglR;8)slab2BwmS+(s}DUy$<6Vr($`m9<%}8o*shW>*-fkZUS7BM<%e3g&UCh} zSl%~t*486tUvk`|*dqF0$1a)GhxG0fh5l{#UH)uu&-dF~p5Cxw_e#)WajT%EPRYLA zFnDF~b6Cx2YC?gd0$3D`DTkA-@)(~;)4@wZxx$Y=maa(69d{h~)+Z4sM68ayNh|@Y zDUm`1sF?*;W<;h8KR_A$>Pp3=uYHr4wDpyBYhSs4<&N32wy$d4Hv6pYg^hP^n^;|1 zdR)=MbHT7<9|>Rj#4$@|Z7mEu{_VoSZFhG6XwB-~ovlB-^2+~M73?>fyrZ&rfqlfL z=npmfAn4x+sP9MJ4ME3X`r!AFNx2(tzYh+#|6y|b??%*p*x+N4{V4{&WY@?SI>}>2J56w`0xu6CN!}&Ahm( zbBE*X=&ZhUcB<%+@r&2qbi@uL{K&VA$c~fecF#_pGOOa4-rb$;KkPZSYR;+2vwIhu z0x54nN2fu`LjkxGV^mOKlAbh1ZudLE7jGD-G^T!H>)33Ept*6wry&B}UK=?c@sSEF zU&^PdRGfSRXTpR}A}F*YH+{`*lRj5G?$ol1sYlEzoc@WVg+B9xN4^>f-5d(lWq;ZK z?N6dmnFz%uGEHaElyr+)0V}u~Ffz>q$8xoOBsj7pyu#-C$`CW2zQfetrt7PAUHs~! z6KI#FSYJCd{{x?lAn6>2CVEKV*j~;i)Y;g$Mzu)6I1aFYGAx@@@nDHVRu@-E`9S>?sWedab z{EaJmaL}M@BhwdtY}-vw%vxA)BqfdoB$iA zX*}eMOju zP;TI(k7XKWg9GC=5g*4WY~=qCgLxcwqc|$+fT#e*j1RbErj}2Q;;2M<73I~DyZv+& zhF|E~GJV(lNv-*FzBzu|%V```aqstcE1`sH(s^9#4mI(tU))7 z2U0U#%=)RlMqIljVbO}cMpb&S4l7A+jq*jneyoj!Ubb5+;sLO zH!q&E^M=*NhJhuxISb#n{E75jZ20WKMT5r#j71v^DkkC z&I)#fPccXuZZA=vNFAZoZT?nWDzPLx=+#!s9mc*t@Lk4&S=@&mv-koJJ1s1Uc6(vB zi^YRl-(x7OgdfM&!1u+fEce5a8RiVI@>o1RD$fCJVsYwn0v@p+U!SpDTOHqfF)Zcg zd9>0ZiTV8A`G?XP!^f(=@6~qu5VNNo20nENTrGWxb9T9gD=J++V{^CSKP|C+)RMo0 z_YNJeVN#_Q%S64>hC^#JZ=kZx2&&bES5wGeRy7ntQ#(Vd^j9qWq3>X&(u}{{p;R{1 z+&`dI_Ri&^vfm4@-ccw#@^Im!VLE%oBc!v7HqJu3d!eGg!VENuw;hoi9FtE$?p<=z zc{3J{O_iKFb-|qQhYeTNjf>wIDOz;i!3ya0SiJcP-VD*THj4qxJjvo=W>0 zLvUPr=bZ=dKLpd=z@%nD7gOjEObZKl&ifRYKvU=j>J9GHHmV`?z}%1fA7D8;G3ID% ze93}2g|5(Qq9z=EAQ)8r3O#q{o#-1V3Y~UXJZihn-5Sq?;stXq4rUKUgTbbb#l8a( zxSmBQFxPlO!QqrDrVBh|8s8>S(u{}(H$SC>WjYTI-rtP(qxOpF3MyFfHfkxs6s$BL z)Ec7t1iz138;f1M$53ko51BzcCd2H6&!UxI1^ZIvRWv(RsC3F?%D52!*y+Ts#E|S> z#CJkzT^MZI@SU}jZ#rV;y2j0qmW|1Mv2F7!8&p{Di_Vy`Z(a3=t-hFE!I|Fj`Ad&K zw`$W3ZytX^{l(N1u?_Im&yXObnF8S8_HQ!mj>Z#7|$D5SHQE(zt$tA7`AYw9n zL9aZ&|5s{5w|Wk8;mA)<`7m$!zzd(gBN!~zkt#5GYVamCC^$U*rNOfq9zKh#48dV# zFqR|0w+A=_3_Pf>W)ZI6XNdJ7IFgitf0fZPBA#s{Lqp1D=I$CGzs1fk`?yU3 z6OLCgVQoa^0+(>d$TPXC&PMLcZC1LX=>5guNNR46bO*|aI1-EUCRXK5V<1+K>DqpN zxRRhYbVxr7+3BBA5}q9jEuO-&(=IL_Tl?2PV#rALq|PvAY=|exJA;rN@^xbq(gLZH zK@%2OUES63Le}C}BTubP^3R?`S)Jj5%h;Y1{(6yiq;O_MsXIl4ixwb!>BN3RAW$j#4Um)1 z2zgLO(q>A|`zp4DC9g)XWuL62Ktbw!C*3;LjI_PcXJTzZ6mYbBjvVDip{qW^IuKzF zMOJgdG!U$Bwb4?9{ChnzK@&h%>Kdh0Un3oVAvEd{!KP`P>d`{gr`|fbQ3SyfI246A zD6`&NMv!qq;R8+&3*6kNu!k~*x3j=Lep^8;{XFk*Onz!mj(2r+o8G%}5)VhY&g?gR zYxtS9zgb1!*d?zlcF&9ABk|?**s#~v%a1cx4&067d_y*GmS2%4-+*wI%Y{;BDMH~z zaH0jStKe!|za$?LA)fCt>|#1tJj%`7x}C;yu${~Nz=8Qm{#lbw$q~Ydo!O|Ly%Opo z2tt`g?B$ans@8X9Wm$r z5t09KCyk&Invy%@Er2yGb*A7DE~Q9{QODE1JqIsR;AZK(Jn3VSYMPvtv~T1MHcC5f zFwG^~x9%7(kN7WL`2tx<`&KTy)+@i>?5n@MfG2s6P3pl)KZ)DWI>m%tIsAok%R%K7 zK3|;V+NBVaK388XP>O(9@kDs#)l$T>Jya{~oe6+D2vk}loTLyBV0RpL!UVpe<8aup zJ2?Cl;2zx8R^UI#yE*(*z&mk^ox{~X%YShAe~^i)y8#;q#8#pey#1TgfWMU@Km+|( z3LG<{FuLJa(1?BO<^i|g(V;*-gDPL$uT%V^=nJbez|}Z>Nj*7>x<(qHG>(BBvmXF& z&F4L(mWOq)gi-!8z+W;czrGF=BH(`_SJfs1uEdf7=4Ushx&uQo6Fm7vR!tt!o=WlmXeor-fs7QHoK zOWX)J$a7bYU_lsfZ1Y<>TG&ANlp_W^Pq9g*3wn0<^#o%C&fDU@e_9Yw!YOuPB(Da$ zkbH-zcinD1oHtHz_#x^5r;an~NE2T*#+oSI1goKg5n?Qzpy$k?unx}A8)CA+24HNm zlrzzx78DjnUR*i@>1gfp)?0$h)vMdYzT39de!30KP|uSMD05U1;|r*0hR>B_L3+~{ zOE3L)K6f*=7LdKcf;F3-;~uysfc+}B8#Zp_K$n*fIZTQlg5J8PP6}E!VGkQOsNa~F zfhXM3S6b==`;f-{m@s5#T8>sg! zjZg!+VRnW3A!ofB(f_SMNZiy!x09foJUx#`clPx++GkdBX|>Qs4$c-VS?M?I7ws=v zTPdWksMECZT5)liz0g~;d1@8uanD>~)LYw4xHApsBT+E5T`-S={V(8~B7!TEO= zWKY4x2#1oHLNfuZSDw`n!%W-12D(vLE!&!zQEfk>bC*^<+q*Ygy-fP@S&)y5ByG`{ z13@c%NLjrdg#SX|!DCjGC6}Pv6lerNXXaD3Jc3SFpa|wvx0ru}fx`e5iNknw0j=ou zCnBwJNgTI5Icr#p4e?rrHK~puVFA zc}yJC&!<#d(GY*?xH5eI#c*hh!w2|FeE2xrFnsxtV@m&s{wl-Qkq;kAE5pZ28-@?& zo=IsMzKMq6dx;Mpm8anY+&Fw@b{IY%Ln}0_e*8NvhA+zq$M7lli&Sa05@o zgm298RbDOhmLh6B_bVDh9U~>uwmY^u=>P~V;Phu5Ar{7!i~g_jW3099tX$8uGMn~R z^U9{ptca}6pYFY2RQ2-6@k`vLh^I4BMsH3FPMZ*XC`a2`_+?p~%R&#WgHRWqI3{(P zcATO~c(ZLNlK1(A>x&_xnqXYTKE3(RSk+s^u~%OJNs@z~fn)^H2HKPke_$!4`(+dh zpdG;^c|oEsK#wO;r#yY>R zsp+n^=kqxI)9fdWYZS}6x5-PRr;TL;=M)&au^}03l<1AmVbRbh_1u6EcaR5-c4DM# zJ*P^Yh=Vz|&_NExw^@$!$w1BkzsJTkRvtl_SaIsP&ZM6EW*1H1kOZZEEOYer&oHfD zzqSW{1*UN0S{?gpCiuYji%bkkf7F9%sl)51JcWqFrGM}Qf^zHrR=*kQT0c=yr^3m08X zQi8wGHQo$45=5v4+8S;~gyv8#-W;m_If|e`bstM9Qh~l~i}4IrBrltz3WaaDo7rGc zqkhV<9xxL0fkus^KC6FlNJzgP2ZBSVy*Wh**LtoS;$O**48Lby^3_MT%5ZT3>&-yeG_n=dtA>O8tgeM{luQLt{%e2FsiiY4BjBg zH^D21zwgL-&6KB1;if!j0$2Khg*x>wNA;&ph&=!AVq}S99oXg2a6doaEp&#vSa=cP zdTj6E;$+#egODlu`S+SI(y2p-j;eGq=*5Lj+R~wWhoPOPCwJ`L(V<@iV&0Xb>;J4J#Rs*2 zvD_xKqp)n_M(v^(Qj5>gwh_&**$&)vf*{0-aB^bR5{Jp5AEr24?uw(!dXeD_D?`3C zgg=fp9eLmqE)Iu}uRF|T+Ks23eIaPmSIbiWvylbJ2g6o-PFf8YXImj%t3K%{^u!V8 z^i5BiRACF&1k?^hA2$QgUGRmt=Lh=y^uRS63GY!lq zKas9oeb6ObOjsYhKfyfNU<>ZojG61)uOt{>u+O``g$YWpdtnUvsVvTYF8s^3@zZ5u{ASiTS@ zVxe(cv)R8O^&E9y1ji6n7YKnqNZHgWLy2&V_SZiIDNCnK0;%WzV~}!_fz(X}DRbwC z@QJjL9G38JSSx$4x}bjKOe#Oxvg@uGE9bV&#`W~if&=z&$Q*&45}Hj+nyhtI7I^&o zHaa(AgLUQeHJZ9W`|3R3|+p!!$-3Z8}jha15)vWo%WN+a-Ix^F;#1;C}%!sBBcbN{TZ>bp+5v)iRs`go)OxbX)r9@A^Qo}A$(Zi z|EuP6EZnyy!=-oMBFI?y*-%<-UL?VusQZqF)&SM0AJjp=KtH7cew()T_>im3pYT(4 zaIl~>X(!$ye1b}gZ~>dNNIpdF0w&k6@Ci5GsXX}*0Vm`AGPO+(U=y}d9Kb{;Y&wkN zYy~=DduJ$RH&ZeslCza|FdSJqyYSU#pM51CWZvsDGK*`4Zt~4*Y=ji?){gBpWwBx5 zakVb&sjPm!J_wwWq6{I%O4TCOS5;e_nXz6z#imfbo!HR#<#*tYbcqWOi!H0!zT+(+ zhS$f;=GXlsehmH6PYI*w<*eKj3IDxmt~_06p%pUaDY?;vj!UYRTD{snHzuhdAn4qd zjq_hmS+aX}hrPD(^J8+SM<3i$y6Elp6IVs`2@8#J8|h=&zh_x;)P^9pWqy4}M~AtO z^|tNZvusK9)~V>ze8>&qU_sZYliXN1>w+Gft+Dw>Q~e8~#fpkH9t;Xa5wpkC*ftY_qicc;`4TmiWPo;0uZLS9}oRA0VD zdsb*{SJU4B?_gG-!xQyzfu{N)pi~cE&6)I%n;rC?nZ<(j-viqruFFyvFGPt4^?T3G z{5U<|9&yIm7G-Nhljd;gaO&&xH*Pr>7*x1)?h2bDmWfr#E8kwUe*3}b>A5lS^KHfO zlr2FUqLRyc_P6vI=^is}szc-|x2fA^F5Y6_+tz!md)Uk|Xdc=|N>kk>$ulTm%6w@3 zeD*=^_*S-NEUFh>&CYy#X;ys6$@y1T?c2AsVB5+SGd$hfdu zty^E+mV9nr<=XhgrH8zdCd^r%JMY_NMZe4e$-=dNQ6SlQM-Uf=4 z|3m3Jn|Jz$!%?SG`V8;mJ}1^a`b2VcWq-FG^QOk7y2s6R@BebY?67$WlKr#)NcP`; z=UtxR9qlopzmu=m)U@d_>%0bc+hH{%*n4E?O;`n+= zxlvJ{=#O&DKpP05+L{B^V>~RJZ0tLE*p*=R*tU(CKRf5`)=b)BZoYL5zO|Brg;!Av z?VDoNWvTdyd-Za(wV_IA6^4)tC74N9?*l~VB~G!jIbqXn_t|bQv+xGZ$l(ajn-Uf? ztLP1tTGg;gATIj~OLR>tYRjYnS{kum*Bq+$m~3GWygg-=;3fN3YyH|hqh6BDQeiTA=s$18Nm=#`~RBqF3Y2;<$Z#e)jxTZ*&gxBS_fe` zkGr<2`w2Hh#+i*Zn^4ai*)ZZ%F9dv-#^@18h6≷9WK0CApUWn4bB=meh;stxvU@ zINy8LhJb)gG2<7xIgOpM$=_+^=Ahs;{?&eiT#6@pm6lImvv$$>XVV3*_M4bg`6^Rw zoLVv8ahh}Nxs2=!alL(<`wxl^8xuRmt#F<+>e*XUCx*HEB#c=fA6*tK-saf%k^d6b zLXLSNC|aJ1x-B)Hmak6tb$Zp2Tj$I1sCdamLh$|u2zn%(+VCvY4s%B#K7y(g!*RSzEBxo5%JzGG*`4;T<1HMTdoakSIp zR*f0EGEUfEyUV%X_U!c8i`EBp?;fyzQLU^@Dk_RBBOi|51=kogRc6nu1n2tw5O%Fy zs|{+b4+gT5FOUdG+Yh=;DjGO$R4H)7CRlwqj4CWZ_4V-8gfb8a{MSQ`wP5?+F`%~8?F?7=1RfZ)Hu*ZLqID-CxowmC$4%_%Cu=IqfW)WV-73|w8jKR^yKr8#U8(_#9yJKW8f(jral{V@?D#n~5-j zvbY;jQ8(hWbEODSfVazW|45!47%qQ*mHQ=)pbSvKlDyrny{x%26sXyN};Yfh#E0>%9pbjM&N5K%?-V1V0lvxi*w~3iWUVYk}S-@IbC$l=_JimGZ>`bUTPK9$ZA` zAA(UGvF(J#tHMXlj+7o59y!h$n(^#4CP#i$Fx;E*{)YY{#_zUfZTLNpukCQ5*&K-BVr^S zLfEn;D6M%Tq$DX0F7ECy;^1n;W;D0l;POI(aqLpli3VUz@4VPYLCmRxgY)42{eu_! z_jy-->#6*G7yBVY`}Yrt_wWBM8+A!`gLf%GyKBgR7W=yd1+Y!pgTlK)kC8)K?(aT5 zL{1faYu_aqLBcQ+FIaPy&bBrXEoAIA;aa{TEgWd+Jl*r9Zd~Aoae@2oifr8@GShFc zyBxnm)wF681l;}ke0fZtPR8j zk51jVw#P)#$;sijE9_y9s0{F}>hTU$o7UjlXFH6lU!~IV?JzX(F$FeD&U|zNZM4w?$=2BQ!oTcp z)hqJ&FIcL>$Zjn03)w}m(|*O$h3~atE^de|?F#%2T55x8usf$BZ=^{o95{Ha#re)t zv=#a~*}V1T4@Y^-HPn10do~0o6T0gd>S#{WpAi{sI5_k&DlRBA+%WNL5 z!zNAjUY<2)O~S-sE(1Gu9ni_8Z@2uUWJfRc*6s&CIoDsF9YQb95?xOx(DS>tfX znkk$lR)H6ptuiTZgs;=X;g-k98Oj&3AzaonMYIHwv>Cz0AU2%KKlG_z>Hp`A(=jpjFR?enVG}3OCbU8WY&87RtE)7H? zRSbMJYQ+HximV5r5YZ~RXdP=*AJJVhAE8zBO-1iTx=n^oQQ3EAcEf}o*M+Xno{EyZ zvL#hq33k_Ap9;@6`Lw_Sq&sn`1jQNjbZp{sHv}aYvQhtVv77iZ?k%i(kz3ucc_HDf z5XY2DgAzYOF4ID(cWpm$W$jR@ePyMv6F)_jl}hJTWAO>PMuE=Mebv~S+DGU=PQ&!Y zy+I$LR30kU;xD*Wz}3jsZ4C>Ks_hO(elO(qW7Z6rf+;^&ck4V>*>CFPjvc+H43N_$ zZ|$M5(y?uO_=JsMW92u-#82Gjy0+I-Q%_Dr^y>#+pSXbU6;Jp;zIf&g8~?%T57_uK zXXJ|?tX{qQK~~m--K#4eW@SChKQrbS8+Pp)b2@rde)rln`Npw**$;M>-_OpzU%vA} zw)~Yk`0v9Qez1o+6n*c&)1hNA3`0Pd0~yYKT+2nxGccp7{A}PWf9}oO^XDr;XTxtE z%767{n6}{TSzL0Tb?{kbP*CNwgKUMo_QH>I=KOepB|jYq%3|YKbqoyChbY_K2~oe; z4B>~evFO%7Zo69yZwGb6GZ0u?wX?OzWbrAl1kXJA$A%4moQw|Ho5JF?KkYl4{OS7Q zPm|B=ubk9l@56Ol>(0f-o~zrs?&01ZlPX#N{;c`Cd+%pt+~0d${@(!k7K9iM8(evR z*Q}S+*6b!Uq5kH9e}dQ0)uB>be(k*4JVUuU6raAO)WhJ|Mm@$G2?PZ$1aY|T>QFPY zW*v3#cn&w-RG`+ur*OFL>QIz#t%FB$xbX&qnHn$!@S%m0+I(g7b){dLnRK&(0nTqW zsM~-dy(piU)D~WG+*448)~USQPHoz+Ud?|HNAdb}{lk0H{?Yr1{JpN9a=T`x{d6!# zKg~4Fe~_~D_1Eh!zL(1Q_qu-Ld((c?`<48?uK##%+JAaqz~AfWAh$z*jdZ|!@oWBG zM-TqqNDt$C<;I6JxgBvJ=4+gfCcFh$qQsfFnsp!ezBlaQVeN477R23nV;Zq!FH!*3 zJW;Cz&#HrvhKok83$~HFqL3@5;M26o@<_o}zFu_Tko?IT#g9(T6l!j{mKk2>y z?wYNCmzMs$bMZHA>V%AcV>kBs2&dL&aJx*w2*x52;k$dTK+oZ|YplG1Z`-6LK{<{0AyFBYkoMHpcgoitUh4>8#&c+!tyCAHd=HWG`c6N{Xl!j-l_AT zUJip_8fLHh=-KrAb6JCY7mmzM={zA|&Z=N(|INaLPwLF`OGI49@fy@HtsQP74JPB2c(EWUJYA)ciNHPlT@JkwA zQ~L%yA8z+pHqzo}-j&_hw_nPM+Kuue<+v>FDQYfU)6Nq%Yn^KU7B*_4Os*{1Fo19NP$}A!nq$>U)2lcS`xMKpVF+IMRm=G?CxFdC&I)Z<>p@? z`%8+jMmtbzCcL5zQ|O9#Kf)QpM3Vy1R*$m8J?EzV*V+_1xnm+o5!m-Vt4jzv1Jmykk$>V?nFv(}}_x3>zL z_B7M{P$hS}P|0wRJNmIu@k*im?DXL$x%=67H8t0@Q3}GfH?>iUkg4d1csrVVXd0uT z!@!BHMZ4(U?jeI)`iWa=v>#7sF(|ZPPUVQ5W#UB`1^sG2&wVp<42ah7UgK(9ZDdJj z8CziO1>%ksz+Gqv7L-KEC&41!sUg0yhSgv}}Q1|^mn+9#(l|S3P zoAr4&=LYMI5&8T^TH?;n7cc&NXX1`87BBuHxh8%G>vH7^>#}2qeCNs)`Oc1G@$YZi z^nSeZuUw?g`8yN+r~d9ozx4ebBP~ULMG^{4U(N+BhqUV0+1fv%RhfD1QY$ ziv3=n65xTRh5mG!nuq~q zL1Juy3k}KnZ>WqbbzN;LkVsmy{dQ7KQ0dInFJr>KlWS|_{?cJ_9aZbcjxSg}&d=-I zNFR3TX_d;Shu(Nq|B0$wkQyfHDnHdVf^A^7cVi*>fZiKeU_Zcns81&_{&52;5p$n@R&rgOd7hf zn^u3yrQKN5SwaB*ZCxiEm7`*6q_BTl*^R5H5K2i-o_75A^__s=8D;%TXDG}NkgL0t z1X&hnA*P(N$J&%RN{Kb{_2d%Vf}O+)<@h7G!~MIXOs+j}n=SlQ)z53?v?*C`2bbK) z&HW$+$v$=wuhvyc?-Q>!Pv^4XVRnma2*8_>_&P|K3ZXeXBrI9LcclBtWYUd{evM07 zo#&T_P1Ft>M_4O5;&K#>jMxU0z-{&YE-t=S5zj{`Z&L2(nVFyGPRgSZvcw2!7YJ2W zdQwctI<>@SDMm>C`_e`EDMl!1a$)P9qTLvmegk%uTb*N5e?GxFzCG`l{O&ke4q%#l`YZR`&}`^qgO{ z!b5WM@f_E7|9;9Wkd0xcRU%da96Jo8K5J=XGKh`hJD24DmPiY0Ek{Jw6s{#iR{G`Y z5V@Irk@Y5Qt&TA#YJJngynLpvxyUs6`qoMQPRMuPo(HBRt86(83bl$13gzy@g}??b zTwIpMw*`)Nlcv+8!zYel<``g%T#fQfB~+M??!)j`C{ zY`enB0z9Y?Y&zUEUOfPRb)krNx+k6K#R;>Ots~S9qJ!Htdi+#;Tf1+NFkPTGMJ!Ku%kn|N=a z1HCu&*NDG?9!XxXb=MH0Q^{7el-3s?8)HG1}`S0p+ z7j3TSTKjpXVQdUgeQVhCpGgx_IehT`&=H+Tqf)vmXj5gOtZ_1|)UnH{KiQ`wNYllg zQYu87^2OIyt+={q{u?V+f18)|US3dsgryDZ8n)W2C~aLpK+?Fvl(gar**MzuYueek z*(a7RD)DaSqzQbrXyv%}dW-sP&wbOvjLh12ts_AFBdY zz1>BeZLz@Swy)_vzR!tjD=E=rE5AriWQXKX)I=)Fl~fsRpYSj~>ZdiuQtS0168SRM!cXu4#uk(=py{&B`)*PR^_s3te2DtjjR$lG-w=h4MF4Hkgk9AT*|5bkIsr5<6zh`hUwHrU8B zAPgq5Qyj7lFfLn>lwY!J(HfU37XQU{j_>8H%-7OKSGlZ9Ua&GHWrYFXcpsmrxtCX; zlm9F-pgXzbW=>M|?0WdpSptdh6>yf|XMSy_fw1~C$%G4Si7U)4D*66+D8Hr``I3qG zTMI(_jH>xIC)zmA^3T{bCeb%<$BKP3dZyfr+K~rBE{4RHe^{RVSZ{8{Q~LO%fkmiiMu zP2h!7=G@Dz!z^(Bm-DJz)-Fsd&QDrVrbDZ2MM}!b1kH{UowmlylRNGQpHVFTJ6 zU{;T(zxReum-=p)lQV3FZ&$Hr=!~fj^6O$dd0j;MX#dh^ z$Mr?>cb9thwTL{CHhcg1mHQ_|dUPBqsa3-6HL~5Z?de`q7Yt2VaRmsF{-9HSvF~_m=%XHIBLy0JYor6!OiWC4(|=o!8r*83 zw`!sWAN$6>smB@IHsRl>f1YFb#sF2?fxR+^Rv&4Y$}?3Dpr?3}LvH8OA`qg1H6Xtj z5Xq(6B4k~#ux{Iqk|GGp%kO1m+$&f9tvGzR0zY4gX6>t)*B#e8%&WRuK7&8xGu69s zCAyTyB-TN`C)ei5wemf|dh=$I6H152BL~QUFdPGDEI*(aTl=;lKVaS4_^^*A3Pgjc z43B`f{FA~|R3RrO!Lgm#9$m*VgeSsy3VTvYBhfGxUm#;PFHNkbl+GAT(1Zri%x+Z2 zJN2L%^Q=1T-~oQ10X(}73p!nSXU@hPL-Q3T<-3}|b9CizQ9Yc66kJx`qFO!$w37*R z6~A{lh_^{Bqm<3>r!Q;lJ1$14Tf#Bf@U+vj>as;y!gmyMb*z2293dMQ^7rJQP%l8}0Aas+F{(S*ziR zGip73y<4}Gwrg7BfgO&&8pFdwfxuFz`$tgsjz)FgleWPet#$U2vo-m4bw?h#8OW_B zuJ_%ohxT9all<4Dv<;hGd=HdPPd*(fFAzKB-~D~p<{zFMJAZ8NbW;3p^z38t@!8pH zLkC(|TaH{hGhxMOwaQ{^akExwIrElR6s}0mBi)Z{2kV|lM>$Vh(mq*3zP2YL8`wzw zGMDS}U)s3U7F1&sW(Cqvrf$)U&KQbUw{C@jL&}~E3FXwEg$0-1(^Eewpvx(h{2;Cc@v%i-xOp-{{NOkf3JYZU#Bup)u`WMI!o`#&uP>bQ z+Vb>^v`fXCdNX)rtnyJXgFPqtId#+ ziL-EoDh#Pjc?wNpC%&E$B=9mhOJP^SeG8RBwxo%7*Fwm1TNRo8#)9qgovR&QlYiS7 zuzlg636aC1E);gydSkZpTVfGV-{CXvFts%c&LJiNaSF=R>i6-eufue&{+i}W~d;Ym(L6sRgO@9Lc@*xP9 zkNufl9kMUEI^~tn>g+#{S65pVvNo3mYwg3$S7*+=x|#mEjNZzL8uKTAahd&1-3&4` z#OO(Y&z@)siW(A(@K^>%QwIvxlv2vM_{j-Q*IiP5(-Yzs9*bG9ba_DP= zCij~BBu$0g)RTvo(zJ|u84F}&LU5$WKkRHe>YX?0=kEreU3_7d z<(n6@?)NR_wWZ||N0+4@4v#sxbirY|po>BRx`GC6%{t(2O^P*wt%jr#I?*=-8&SY5 z*%aN&*|pQ3CC7|U@tL-72@V<04vmfu#qX}O6PL`E??@4QS4SsWHTPIHB{bjnl|c8< zsIp*BcR!FDO^Ly1N(FK&@kha088?^guSN1B(t7k>wGc=lj)XzG4dJDPLx*@V)>i(F z7X?Q&mxHM)aLSJ)Nw9!25Y>$+rB38K>n&fDF7ZA=9vhER>K)c&XwQ!3_QHpUs%qa~ zDp<<7?b|xb7uh|#pr!kku+z1kh*26(0JtT7J1J|?f$>=9;Q9)5x8(kO zGOWe*b`gk*=>oEV4;9uVZzs)Crm;~SS%>5+>wRY(S>))wbi#~1bLDu{U2so(|6O*g ze0=IFvl7mxS#7WkO!f@P@kYRj(pIwEUx$7+ABSI+aS(m+TZzlrz|n?#gvUK#=qV`8 zf@0FktJy#{>LYmzTlkUuoqT-mu3hzeAi8R=pcX51{wLJJL&YfQgr;u;39MN0)Ou79 zZV!Xy$*G&`YOR-6UK(v$ zy^amO;5}3@A1(ZW9&qg?*`Qes9)Ks|D%s$AkW+-1c$Rb+(H8@R6$E^*N2pmR(+$@OEbA+X)LVFVlXbS^5O|C7@X*iw}Tu zH-$!qlvfToT4zTJ4F6jy(j;u}GTOGag{RHL{xAOnFiKTL3!zR$XKA5Ss)H~@hsIO5q7k)wSxcX7k`6(qj5skxxptEc62N^y* z1dzP0q@P7C8#8CYf;lh)#-F}*>-4jj>VCrPzSa8!Ci{i#Kf0-`iXROAv9eNzZxv^+ z1>FDp6ZgRwL)e^o2pi4UN(fso3Sr}6^lD2qFANAQO%1g@+;XT_#0X`X=p5!TwB-@o z(3Ii;F8|lH&jeNbenCmzLk3KC?gF<6mW-X9Jo^ptTH@DNuLIbVJ=8;FUJ8W|$PmCr zGZys+T2fiT>e_C&NnC!YH`~H?DGJ9lqxzx}cX#Cg^w)@cM5YjldxZ4O*2@qN0t^Bd zeFzeBmmJv*J?M^EC1wpKwj+;N6F;Pcf|*6kTRun$1=$7dfcM#mYvLP*go1S5o3wyj z!;kyDq|YQ{vg)@pI0z6at(cVTeg7FN-A5<-`!62d(Phn|`6uV4e3-|aU_r_?6@HuN zhp(70VMTc6yD3Yr=M;aJt*S6spYk2`6OK1cDZO;fV1R}z`<8m8uUa~6b?&Tv(eeXk zv;FtAeZw}&BWM~x*sxtieDpe>3by3ajU(2rjZ@9lPYmb>xylA&Ty%V)ABzT#ISQb& zRRZYro`itnlu+9vEr)u9IV;>TBEoBE%fmKPQcLB@U<>_t^z~cfHKd=XQ)eYte;4P; z1BQ4f1@!}4P)-1}ueLz@V`0S1rs~13n+G0iL4er*N$e*ai`fS9#}0!R8)|apr~dXFwlio zBORG45S`Z$chXW1d)AIdfxr}K5`CvjOicmu@HMgQQ)kL&E)dNA?f!c9$EE$z3EHS) z_OJKXXUM72a!foLigS}`rbbwYl#hQ1he;~(RWdmFOWM3Tb?HSNn zejVK_5Gvzm?eeKD0%I+$gBw_V7&M~T5KyKUxv3rb#y3|w_401okrGly-BuSZB7~%l z-u+8n^EaDCZ=EuGcSJy*e|7ac`9BqJxU*6^*Xnd`REhu0+~J!>m#hlOdi13x?@8LF zxYsj+7dTBw_4AxQ60W$KjhPqX;;I%eNO4)M5)Ricvihy6z^IwQY%+UB%;`3yK^G8mXRMX~Dn)T~UKWNhmV5S)^95+`b#3xCDs zO_-23M?O&_w5t3fKK_fN+P1>9N{>u`{|qlW%)9J*TH3oQYY^8hnhgW;|G{Vx8}tI| zn(ofZ56)>D;)O}#N#}9(h!eav(m>bRNlv2s#M*b9q(ra&vlGXqT~0{2oHl-ObRX}@ zHxjC3$~tdTwfMUDW=Vj~^byWJXnrKv2KT<= zFXb*hLWK2#5yBVr-JSJ%)(#VzrN-V_>g-tSKa@Sv{#RnnV%JaJ`HFns@rRQ?E{gf` zq>A0U<*2n8DlI;8+W0U<9VoVC8JdoS7=^={dHyMvBr7X#M># zVQU-hqj(`-j6ZNdm_u>)wSQRF;{5Vg2zr0Bxb_IJ?~kq|>uQB_h}Q{HSd!A^|ItN| zc1$zx$xY4BCpS+rr`A~AvzOa4$4m0uEAsi4^7*&qc<~#ULX{O2;^b$3(h2&(S@Y6o z3sJMwKr>**d2!u9GVeqWMZQf(kukKxQX|}V| zk)F<}Z0EE0BuDzu{`m2qqn&GB9<*|K@}_r_K32I5aP#aKdw8zjw#5_D2MrrAeMmsG z_UT@E;9jju^}z|*y*oP292pcfGd{aA0DT{bzCYFVU4KrWMjdQ~DacNL`4T^Gg8Q*m zPoEA`-FP}|%eO*Adsjb)mzp;dYVK%%u=kGguv0Zti6=paw@OR3I9*ihVZXFAD0Jtl zfnq7>;y)_m)yzfb3wzfdg+fcKb5N!7xfwHrGB@jW3vZV~x^=<+kbhechYq#~Z>RQs zTfTMR0PB4*`Mo{yR#$8%&t2uW^3%+$FV|#Viho&l6GoTIzwoK}t#fP&KYZYxQ#Ic+ zF|Z$t~7>D|!tu=+onP^aT4bg!eih~@|E$yW! zVeBjX-|*)Tba>@`o|*M|u9x9Jhq~~|IgR_1bCSmW?-;$>(;w`}m;b(4Bmcd+kDqd| zLr0)EP9WF@|4bkSBjK&oK`(%ZLYfys;Ad4Vc-M*)GgA&3t2LNz^w)DZEBofUQBurv z;|$S7P&FUZYM}k}OjqIL$(C(HlE(MUptQGnVY| z8;&4xw>Os>oR4voL+1yk>h1Isz zq1hgRX=#DQPj~re+}s?{+QDvH&wf_(ugHH^_MZ^@dL)@W~>5S&__Sr9fZ(X+a` zrA2V6$D9og=Iv(W1w?EOJMB7aP(r%O>}ifFQ+q)>QTRL~-ecu-ZHyFgZgAM>#E?N@ zqo-7@)%F&?4vp@+9;j4fU_QlipcT0{>2#vCf(izi3}|v$&>*Poss-26CoTkp$;&UW zlnZRt^ni;ePD>FdtKQb`kgwc-G!QuMKhjNf%^IOa^a(v!h1*5OeH% z2;O)qm1-}alpk-}%vu~d$y#o{F!xMK!r29L-`yZjRrga1JH+9&*Tvyj5bPh!j<7zj zU0~K_fV$WWB#hHbC=%X%Fz~1 z2A!t1H!(Qt?}bsWAcMUo1g2^q2yJ(~JIi&^c=v+vIk%6?J6Xih4U-F^uqWW@pEG%N ziF}eP#dE5U;;R-!WlU<b;_ts@0sq&knuR=m#6nM>mJdC8Bp<5YkGSs(HLO+9 zdijr<3kaIW3ONcc;dK~1o%*k;_4o1M%>mmPnuv16K?_Gr%JdJ(a6O&&QEu*gNh3>F$>)S)YlIHd^V8jY6UVq^1h0B$ z(W3K70b4H@%KKW9VN5OH#0lKSV~E*X#F_nGSAv#xIR*4PyrxTV9vE+QT-DpO)A`@i zOYh{WE~!HbLLy3n1GX(%wroo7r17Z%lXFAFGt#OrRu){FpPLxD+GEvStC*9?9&?<> zC;J4XyYF*J^qI79jB~74(#e>F)AKTq_h{KBdUHhDrA3hURN#6FRBKLKEtbezgNx45 zb%y&e94%6X!X-X(2Qd*@lTNWg+ofi|*2*m_ja7lz)QU8-u#kSgF$7TZyePWMfd#w1@YED(+ zgt$?`8zZ8&OzGdlMz~Wc`#21lwEFb?_1~@l*0j=`jaEgvNdfpGsnQFyja#s83+-N@ zeI&@JeA@Bfkidyo97ep0O>sxg+ltSE`@wAWUKIUQ00Y+OPF3QQguL)`Tf@B9U(d>T zE3@6X4uwk-_f2itJ7KL)_N}btceYF~4h|@q?wT}pNMNy*YXxg}^C}zi@q(C>Ni%cu zv{hrL4a)qcxZwNMmD9_@XKb446rM7EY8eIjkqv+>A|wdN;YcPUdj~l$B=JpcysB%< z$`en&0pSSyWn2sfnYk$V(>(9&RL`5Iu~vr7h`iz&vNVmYf<6|OmEBu0c~#JY?Btud zD{rQ(m>00hcjdj!vx@zKOT!{cgMCY5{L(yJ(*pw2UEP*jAsQ%R_xv`^I-N^SteOK# zC!g!otj+x05zEfU?;D@w7nJ4Yl@;WdG+wyqlIZQdaLky6-rk8W7{pFtS|U+waDDc<=<|;CExsbfpBqnc*=y(P2tnZ z!a#lA`a(&tNVzx#PqT?HMTrk9_*qi)g=0Xg{2c<^Dkn zx<*7v;DI^ejnmYNI)3W8^phIqfSI*l#UW{K%?-@QMtSar`T;zY)zIm5=LP4~|aE@Nj}Ru*W# z?`pB|-HhzF7n>*=4JGv>MWd)0GE-88fY-Gb#HI3zYOZ8z+v;=-tF5K54;u0T=o`P4 zj}WERA+1UckqsuorSPW^y!O+yuv8ZCp4_^fmCu~f`PUNCKUovH(%XAwXlSmtcdiwr zXWlO-*zejteck#jey?Ed=gap77R`uQ=kHe-7G4}k4MUO!U_^PtOgS~kj~AP8n!!V^ zXm25iH!L4}k3HfIufCp;{%L;5DsS&qA=L0H)m^1!xsC9d(sIFPyyfsBf4_Azl$MLo zlfm^c0WXx_xXm^VWo)tfihv6dGuvX(_f@*fv3$bYOu zC%;1{)3`2eK{F!hev0LAvj0xpDxXj$(25G<6p9J%0zOFuc52laJKDJ23~Y<}vr7kh zUWWIlQKecnj?wc;yieiJ678J@TD@)JALGlG_(i>SP7Fg-Mq*9k z#HvB(7jt5GyHe+2DC3!kJ()kxm$$M~vk$Omd9CioQyC?S>mK7iFNPAgf1@w>v(y*c z*?2~|+jYP3@S9?@NRvX#l>S`g+-ZlJX)+IBkY+&TZ*t^NIYo$%eS*+8D2sejF z7MuLToQ;tKL{D~_)zz^c1|sbory~-RIgxh#NzZk49w1@?CnBwe67bxeKksNu&wLBr` z^P*XQX^|9-Zys0>PBnO5$e$m6NY59|-0(gH?|=Ti{_`*V`4)Rg3KN&${jZPc{VlVB zQWzGBYSn-FbG2EFv1qs+-xI{*0^Z)zO+cCLZ9jI z)L!1l_oam#^04kh4!LOdAZNf5A_GQ*4sx72 zLOA+`_Fexhuj%_1ChVE&IBlf-X_&WrX5h3}awZ>fozl6d*MF=QytX8|C~d-Cs}a7# zJi-PB3?C7@(6#oGuz%*(&C_OW37t|h&2?#Xzpq@A+`JZz^~%4Ly0KS{D_|`#HLW?m~iQt3Zs8cRuN2Fw3otBQ4rggs2 zs;cE2XFpzzTt;i091-~rl#`_r1j7eF*rK4lXyT{^UMi0Eic3Iyysv^bC@Ab~k-s^#FVJ?2CA2e6gvXvB zw0nEbo!Wxk^aVwUvNV-|dIvc5?CI;k!;+hN%$d^vHc z_c5Qvqn#Fo9?M!OMDAs?nRoh5`LKMdG*|vj{_x}%>@vhI3UyfM>KNXR{4mI+rR22H zL`_@hI6A@in0L~I+zk2t88(Eq&Msx%EO=+Sd`jN7=NozKC#d5N>PSExSZP`!*%$f5 z@35jf0Bc;g{{1T*meUuZ01duU7j*@3!t0$-DlB}qWlL>4-3QDp9d$O>)#-%rS!@ZV z-$AYN{jKVDTc6OSxB4Q+I&}rvY55(I3M+N$J`l}{QRlCy^Z#h16#xHbq!7km7!i13 zBjt<#zl;=6Elp0+j8m1Hd6`YaNc8BbxGN|PHLXn*A@8N!L9CDGu|*n4*j0e>Bg=ZOzhQY2nSAbCaQ&MmDdJX`SR9sK#u|3llGz*Sjo z|HEfL&vVWJR16V8Qv?xkM3fnsWRiKFK~zLU98(a)AxE5W!g)3awDg*pO*W^QnOUKk zWw+E@U9;?Vy;+K!hxfbo^Bg$fvF`o+{%@ZIp0(FrYwfkCwbx#IZ+uVZ-@Ewt9DG~y z?>+o`9ln3z-_Nw9f62f1@$c&9Onn&cao212V`#k&-!azK(j7i`sqxItxR{CgIqMmI zi})>k@cWkEn%?U!6?AV3{cX!{Uv-!A5D?tx!FFukEW^u%O~XA~w`z2nDopcPu;1z6 zDOuY?6vq?I>$DOkPmQ(E-c!Te{p>~j)2hFPn{%)3FDTf5b?%%m4~!ah;LABxr)SPQ zT~+yhMaBCFtT|U*eQwr;hg;P@x{f*W-O{DsJvVyvbNK!JkNjRx*kcrp4as|4i+=Kyc&0&Hy zXK-D2{{h_m5ZYl@_4!WT~&BScFVgrALhu))1i z_}Q4p!X%G{=04AZwghUF0xKQ6F({XYbrBbcb!4NEJCdd^W2}Y{mQ$CNRhN^yJ}YZI z>tg&X7f_90l2M6OZ<7`Esn~H?%d{brn>1}&68>>=IqS2t*XMxT;;G35Po4^ayjioD zEF3fzN=!C*eE)Rfl&Jk8d}(BV0dfz_X++zMtDKGAZ8=|MPs^!l75oDq3vhI1yX->QMet* zR6cMB-H_O3XrXkWG5g4IOKH&f^wV{ z3#E?0d4nKmoO{I`>@08L8e0qidv~tP5qAiFh7Zl802_r&R=_2tXavUv%mjxR4dN1Gy6(A_e8zPA zrgXy7A{wbJZ!T`}lInBBybbFsJj<0wSQFe`%i$n-g_CBQHR47#RxJ){zC& zh~YiZM4A}O#rl%d{E{hGVl054rC#;jT!)6y9k1Gn7RQb63sPPDZm2 zRDr9D11|hJT%xZ9oST3|UrRfcdW8Yq)RGQ3r7QW^F=+tyqbMKwb~N0#vsd#mJEoNc zcMktbNs&a#@z@k9!FknFL?s|7j1-kNsC+wa)jjg{Z&tfarv8(K=+)wTYycFO)uizmp4hK|H*h+cUMYdj^)`A8` zASdMl%?;}bD)J9(Ggk$&OHmyDPY(Yl!9n&Iydi~B5uGc9Z`zLe*|{^jW8!hcFf58M z3KsL(*JtD-9eY~IN62`-1N2=AIzu!g{7LvJ?P^L9KGEw9L&f8Qh1~Z&mTA?qBg;G^ z_r``+H zn$z(6EJuUj4Co*TyWOLLmCg33e-TV=4wE( zxS~-*m!BmqCmud5eAbb$Pa;Hj#Ir|L=Cinmn)e3bxsEU1hT@^Hl1tHHl!(+;zA zYz|bX_=LKL#s~+*^koS#RjJQtXFw~&zsgp8WFgt>{np>BZ*jJ+~ub66J|MpWvIBG)F#pdH!vOiWiqinc+kEL=K<*kLaJw%0`4wK18tZ&Lg{}MDaSdB>Ab6S3KSwwr{cObL3oliK+5uz2tBRRrB zL%y6V%-^yF*=K-;F#`q+=hiILCXLyeen-|r(!htGBV)*xE$jni7Bp$N3wEYc{2a8$z2Os>TM14Tj&sK=;;3uz0Vr%6YDXiSD!|o3LtULu5 zTdqx#SNL~mX0O@ZU2!4s)8+T<*lY4gyL-z&4ZN`8t{pB&k~GCEPx=)@ys{!fv;|*T z|H*2|&)v%&2Bm6^Dp40?Uc1-`&sAO8o1ee;Qq}BBd-L-4UYcEfuCnr6_3RHTD?hxq zc+pbXi#^dN$uA()_4J^7UH~LGyy=TrB2MP)fd@)D; zE!?U)Q=YqZY}}ZURk1OZBS*)N-JV-<#!%HR8z*(lBqJwarc`h*lgkjpAC*Rl&_^(c zx?lU>luDacai(&@vy-ilKT(kxJ8R^~S+SW*t&dyJJU*fFOog2FUzG}&x!b8!l`*kZ zRI07Hr-XQrfNt|<90L}vw} zena`py2UYpfzgHq^6LLiWGy=WDQPJ>unzC4(*mv=#1_41p~0g9G3?N9n$pDdpNsY% z7<*g3IGRl#+ca&gY+wh)Q-Xs$n=Lck5*(C&^u8)PlaYbGmL|R|P1j!Go`+#{ln9Gl zeoTrlQ0WYjyBoJ&5vND99WKhoKPZeMiM$jkc#v9;F{L?P!NUXk4GT()7g)}u{Bb#h z1EOH|x%CT5O`m*r^q!xJ_U$jay?c!Ka(3!~etr8mx>{IuN(l@sOiS(GcYu9QC%sjt z^sv};Ns+TvW(=^>;?oqXXsIW4NVLT7}H- z>Zwzgu4R8Q`rtk*HgqMMS|-;qOL^U7Hia|VnRbPWzz)j!^AZx~8P%@vlISr7V^z98 zQkA4k<#Ja(f+66en_f>{j}@5KZNXUI9f1i6fxCAX)i4$^VZ@`gOl&g}LJC3>GUkmf z7%|=-)m@0{eo1>-<=^iS?k%eob?j0?f8mPOwPnvDD{JP@F0Hg0+aY{-TtZUPuyE_q zohCmjgtWY7`SPNCFW-WK{IMS1nd2~#EfRx87wtDn7({!g$|Ihx%cHgx6l@z+ur)t_ zYk^nBuwfZq-kHOOWs1Rh+X_Z)&(GgJs$g5*W&&q;d1ZJ51w0~zWnzC=E-sp3yiIfO z3+8s1UP)Cae*@EsC1BXdRSgxwh_C_m@wgudGlT`#i_gWNj~0_)6*vT_~4++l=l`c zK0SHz>BS3APhOk6I5~Mq3jM!089Z`?Tdb4b9>X_9{-G_I8qP(}B*Tz(>+9nq+!7s{ zZi(}J`?~n}xVU*^Rv#nW628@0AS537$m!4H`-~yTeRR!xw&{9Qq&~HFPu9qhqogZx z7tvusNL=01hJUl)5_9QYq^aZ%OEc?7#lp68; z4Ki2D4N@0a4Ix-f(kRw)8V(1r-CdP!Qa?ZL!9$;Fy^Xq?y;g~a)2o-?*y2@Sv=U@; zQ#Q=(1+N+V#&vt9`^2fEW(O2(88i*;c=!bf`z zpAas320IoVv+kW2)jMdgeA6i-#4*HEZirhoc0m55)MURnZ`%WIC9{HppGuIl0W*@< z9Nf31|JVsxu|DzMcJ=NPW(0;#i_z)(CglvHo33lSbWaQKA06ENfNjt85&cGn^+ffS zYQEFkN}UiY8KOx;ff3I|gEQY;Gdd{wbd7>wU$|wv&lg@)GDd)i6y;c`)|dZA>(_HC zG`+Lt_@^fg>NTXRja#qKmu63YdGh3!rzb6)GSFeLjg6=MfGJCoo_cM{l-H_QNLg6a zEbrb!?X10BJZ8s)m4#@NSV(DTLe-FdLps_Gb@Z+r8Cn`5bz}pg20ZEA-F{HdkZ~aP z)T>hl#|*IdvA6T@)<1Uelvkgc{>o%|3i5{za}4Tb8}2-KL~vTd2 z6MWhGpBk<#n|IcAnS5a(i~C*r8haQvhJXAf+R!T^BDP8{Y8|CD_{*MN60zr@l8@WR zP{L)8CP~v%4Ew4++nt-c`?IQ9pX|xY+w;jRTv=51!lblylY%F{I4nh)p78K!-tNyT zD?izjo4e14a+q=*qOS-iujjdf_ ziwI|*Csul}A@aFSHEfgo)JhC_x3?JnW;nWWqmUt_Z!vT;IDk~NSSh@VMWqwp@#hXa z>M(8Q4j4f#bBB))dX6%6_)Sf20FR+gB%rNwKza`3`4RL zAJW`i$l$S224*tCXyQ^PG}=cH3nzB9diIFE%Y@>TrK!nFS7qkrW~Sxmi`8+1riz2) z4+f=0)J~mPJ95c{)U=Y~jI~h z3uwAL)gxv}Nm@qngw)gtN-;zP9nr>Io0%2Mb$VV0Md9cBY2?FMU-stoec9Q&%0_R` zH*6HTr{-t)rw%I12}-fq+Hj1J))8`-5mQw<)Sc7;v%7| z?psW+bfg>)Pnb+5Hnw1Qu%>WuT-Sb`oVpGfyiXni^i`#0opp01UEf}YNy4Q0bTM3p zuukm2#r04GcK1`R`66qiW!l8h#`kJ<2mIXnNGheCb!Y4sGdhR8BxiBo_2j z{lmghpvmGQ)?fDz^nEjSN5zpWH-CROcYl9qW0GN$@TO;_}(J zu{%h%u3`Y&nz+s@jz-Q-?P2TVo#pPwDQWK*X2oUqUD(G-_@;%L$jhTgc@zU1;U-?L zUJTENVvU4HJ6UH(W^q=2XMp_O5$1SjCX0}UesV`X$K;FRpr*^LQ?)!>Q3`Vc5%Ow6 z4^V;bSkq;3kdh>=fOpA_D&Grl8&dsuvsV_u>TxY}8>9B@Y$|qju(p3X-aETUfX@Rr$GaQo4VJr)Q>L+GJFJg|J&30P1|Cf?`zTk&jVm&bU6= z)}#ejJZsgZL>Z&f3(LCdkEjC`h$L4T2dTKh^558)mXV73%x=-r$OC7|9_=a2gokK( zRBu;Spo}G>%WSQlm7i4y5;VB+n9_a=nsMeJcC_${i?;Vx8wE^D)W#< zx6;r~y*hOo)H$J;s_&`bU`g7n)yDYCdr@0y7)>vtdC&nCnwq(-g`rKojDwFA#(`B` zNb6Yb(+BK#bsQW%AS`~E|NKeC2bay+@Z^?`{rvm-CVB=gDIK4_UaTCI>D;@YtxHdb z-d5dgb0YG`r^oen=xy!Xt6LwdZZ>HnLJDZv<0Q^v7j$#f(TT!5$k4-xr`Ngh8!r|f z@o?(Y%fjhNKj+c2nXf!iH^=ZwZ+phH!W;`L#FJDaS(x+1w>%Mk*dvdzET6gK_WSjQ#wd187yV+SeTH1B% zSV~M9{g&lstO#su^jn#gzG8TzVNl&+08!tB(4Tx&M!}J^>{MM*~M)olD@Lo>~j}pU_s1 zCtSL?bYFi`e%C&F!pMH%!trXMqws{IT_0g8=`JR5b^vsjJ~UxKC$N};qMQc{IE=9Y z=7YJ=4jU_WQ`kItvk-tyzbbLjXtq#WQzoy}(u={P5I~Tkdlebr+(ip+*?%!LG}sp} zesJ!@i4V@cIJ%@{^ympCV%4=XXRm$z{+VlQYM0imS-W(}YK)zxv}i}?G1T;ACbp*- zJc7-NGDCJiFQK423N{YP{SQWH)lhcP`}nc{{LSLpCp1tC+ZJw}3#1|-eGepRdP+3E z3wdE}n4gtRZ0v*Ydt0DlV*%U3`5|==zd-yLxzUT3HBTu;5RtBEGW2iL}Y#kU8kbNsL8>q1pd&5 zYUsHuZd^qI<=t;)JQTz(6z>_0nAg(Sl%grcSwG3C!b17Pr25fg4v19&6G!$R7Ut&_ z_tXh(ZMEFwZXGZqK4DrAS`V5$TF)^oAIJ!3Id-V^A!p%uwzDc85px0ZP3=C(qDF9f zS3YX7qUQPGLO4|4n!C+eB4ggE2jv|?u>23*PM+296LKC4Vm7iCMVkZV_5)w%N0e(k ztf+6bnc2B9o3q$+LSaNo>6DayDLr=k;5x$sCtdf_5n0QU&xlnalOsI?lEcE2dv$kX z&(vOX_6V;?nzLK&9-gSh-gKdevMXUU0F;%q$-LLnW5{XB8lsj=4$FV8a@5)^%fDHr zjffmIMBi&wM#kcVcXub1mL~gTc!=5}v8rFe_NNxVH+jUA$N`=ss?r0Z!e_?K-LoV; zI5@4}hgT2FBwL9=6D5Y8bGOCmtGuYY}3{W^XAh+?+Eu)_+vM z*!qd76DK7nPby7|EF2XTH440fCxm%s1X}8?OTt6O1_TrZhnHCEEdn!!hD`{rj`H@7 zjP&-7Vr5}NhJ*xrdW_&&neW~j|ERb|u>N5u3#iUIWQ0T% zcdF{XNG#eLIC@ z4$WFd^_NgwoPhe%y%sV%cf{;axq(el>MwI1)n8CbN)WHV4Z<(%OWk+8LsH~5f{H;I zldzlHh5-q~cE=_54)@)io+*8v^Y)8Ti{+mxN3O4*I13Sda994qHmX@^Af#7$ky)ot z@(XuNjN9!MKOi$*_+{dubt5a8-QuVh-_EJNUxAFfknuD!QkzlZf_E{uMT>-A#NX!| zbYxu=17)R(qK5evFIvRhgkR?Z-no4ozS-UAn#@4O0(DWHMmr{07*j( zpIxr00jX8e5#c*+05{gL{1U!19MT4?p)1lhOZ|ka+BXym#wdx+Lb%~AAzbTOTPxqM zr7i1d`2*oy76Iq4t48xM1&|8)yf}c#fWXY>kPP75T!tUT{v3j>VU9BeIOjPeM1_n($asoi0bZexK*ChWIOzF5Tw^0t zND(04b4a)fnGOkiaXJwyWCAFAb4a8K#GmJiQX%7!D}_U%AAyWiA#*^dAIFJNAtbXW zrySb?`9RcioH!MdhLYalkoXpyb7C;h1@|4-#whtL=+I_HVhf}})C!h>B&m=MatQlU z@CGDVg(Ly8kwf4<=X7=fa+*U@RY)-)D>($tHjcBR*+clAjRKu?6|$=Nq41q%1t1wJ zWOH+sa21iLdR-<#G)$h3Fo^ElTgtCJl&4FYXPYz>6OCS*)cl9=p6H{Ppe_4R7zo>wr19PP9Qre<~%$Y&4AzA51Er(qL`Q_}mk$VTIN!7&E^W2t zz^pK5hk-#)1`U|;Zii)M@;{!m4xSz!T^fiMlMkhx2NP~~`f;;UgqBbxFgx>E2sEkUi>EKrw z)MsEqa7dy2_%R(^O2VScqYu=EW~PL>jToraXIu76-L-J@y9GfdBV32go9p8rS(QBN z;Nq;%pe)x=_rM}mfLSQ3Ev+1=v=D%so80nk-!F?3pN{hkJG@Vp`*tZBIAigyihziS zfWW9I%Mr8FLzWjYjXcYxUv>Gcx&<-8Bch{2LSxZPFenO7g9myOPWJL;>U@?h&k+3O zL&A;@HMNE@=*~1UyaHA9hAJ@4sIvUYqea$qgiEQbyA1D)+4-!B@VK3Ee(mD z6`i~)um7a+5qTT428H(%wV9D&=}A^W6T{uS{imn<1VznGN?#l&vK~phmTY~mfNdNY z>EbgsJ0zIi4aO)h%o2UoeFGBO;*NEwmcJ+HhB}W(`k2EjKM# z00K*!UtrIR$IThBOWmUe)rBSW3K?1#o6I(3pL;!Gnfz@QdeWn6F8~F5x}xHB7k6(GcnZNdV*mhpbQ`@j^Yr zWe&husX}4_ImjWaRLD4?URcL-)vAzU(0Q9fR+~U@$0+EmQ6c%rb)G}kJ_1>%Ldu05 z!cmU1UWJg%Z*j_XCJ>xO0ObuTWF#m*%^@32a9-iLHnl)@g3h?8=a z8esv;5p%e|&7_Wvtwva2+HBY=mL{f{HXA_DY*1hG*$!5U(=K}59#CEj$QpK>#ffCh z?*(KfAh1Aig<*rnU8zAFnO`Qr;8Rj`rYY`93NUKLD;xtOsXj-04S9JB!S7|_6B>ln z<3$>1fM_)Tq1$9ay_LplmBu*G$mTS7>)@2GOz(``0OW4(BrV~lIQr252qkk=t6$56%D3-9yw9y?i?qmr_Mw79cCaXvg644rL!(ovmp;{EKvc zJg~#)#SxK<3&$)Bk61W5AtWd%enhC$RRt~@(F$zp%$FBpX!^>uq_h?3=_~MSW%}ZY z5lP`?W#LH?6PuhLn}mLZRf(QTVfmp+DN;3~$yom@N&hOH+c1BkiT|ynqO+OeKN$Xh zDE{KVP>dxhrsJ-5ig`-&v<6N7uL}QH>3^x5|EJBwtn$nPf1mLGi$?!K*XWi`&A;g0 zL8o);0P~l3+kf(|<|p25sziE5bECcvJEU$pcO_j^x^;k5+ccl2-_y@DlkO!I@bu{B zf9fY{e?op&9Ehb!!I#r_mnxg4BR{1_nMwCk(+^9(GJ~!gZZNrpe1hgV=~uA`---** zg+giJrw-mwac=j z!YvZbN|`EdYMsS?+IuMD&s@~Y+9*NBgP}UzPyBnF^iVj&@iCPkfBWxh+>~FT!`1y( z%1}E%Iw<(-cH$exQ5$|RsCZEwq$J%Ryr}SDpwE~sDpq8o_?2g5q$mCv=Bt04?DKAk z)`7zauKs(C^bS@#G!}N+Td1AR*nJv;wtn_5&I8$JZJ#i87(o zE$TI~A{r9G@$n6!u8r^&;z15a5wtj3E9e-kH{f~WJ5}E(xS<0hi`88Qdxo@xdkPNF z{ih7>QaT~ZeiUW^XE~xVmqlC$HZ@|hoP&#C=wr(pY}{OZu) z0C5V7@Wo96PCh|s0Ow&#?bABY=D9Nh{pcl4M0YFC4I$dXQM9)gvkc`zZTTgy#f!Xq z4oFY$F9{Fj{^{uhdb*D)9_4;Xs6}dn*Yt_*PL|e|^_HD2oO;Ejr@J*Mkz~t)X|quz3hVyc)qz2#dYK- z%{nQWECqKoFFzdmP%;{>3r=#`L2akT{~VM`mF$L_hU=_XeLXAW0h*wKQvEknEL{J% zc2K{-3>BP2hU%}Xx^edl5&bB*7(N?{gF+~-ySpEg63Q=eCHEwSJd`DY^xu0S-xhH^xRfVkzae@oQ&TE*I)_v+jy>5o!Bsd~7p0-eRPDD_kR-LyYqAx5LoDA2&_>U(nTo2w~pkHwCQ6db1 zzf7BrgS=?%e%MjNyvAdT>=D@ph2y0$JDWyvyL=_)Xnoj#zLDX*ge$WQ1AF;+I&{!? zw9=gsx7S-eVcEfcsJEkG;4I;aV_3Ay0ME(=^L*tiXzhOod3vD}5LSOQM!q6$Kf?z2&RbCFIlv`4%+WAlmhicw_fY!|mQPsG zs`&3{uz}JjB9G%%N<%?-sXMfZ_Xlv%zK@MIu&IO%;NrVPjW8iW;)~mxO1S(Eo<6;V z&u1A1^a_vctH{p=fB}|SP!DH70fZO}+#xyomGT#j3XO{RZ7GoWY(P^9sZNowQ61RyD-S++@}MrP zwS!P9#>h5<9OnQ%P=|XQ2de|dUiZ73DF>@h58nIypk>XUbKrB2_9m7*XwSBc=_vpW zR}=GqOp4*M7WMk`kDXk^LZOyw-h0tvFJ*N6S?bC)uFOg_`QX{In=tc1-xBY_p^t!t za`Qj-f6&hSv^NbHE-`sMRqEa{iYk+rfP}WQUe|{|n^YQRY_w-BUB4lcN8gK#6c{X6 zgo8Gs+z<;0o+9x)uXQvywi%(@G{1T{`b3MS`8TwHuDvaTQ_NJar#$t3g z62{G)MoppmTQ%4!1Wn#%TRIA7I&GWujR#ch7S{dM-lk|3x!NdQ?QM$CBqi*)NjeRM zXSMC(d<#wq(ozDlT=?K?zJrV3=B~@gSeKiSu=3l*i@#mjmH$fZhgH$8(_gcg=Y~+yO!OFS=nfW|s5XY!0z)fgAyZa*@OI zIkcT`G=I~LmR`Vsqp^j3V9&m(VDrRovzwk0c2&tM#V(aiGjSi(8qMFdE2IPHb=d2m zYs7J1816urEiRfpyJ`MxapB))i}PnUE#f~wwxoHV{y2ImtZ-XrR-%t<%EKw3PbmfF z_i-KVmD79VCE@|BjXfTa*;>>bt=pulr4-nex=jy0)_I*0_lXCP_72kit)$u8uy=Gr z@i;{jo!V<%zceE6mmL5MAUfe6m75Q~^U_);qw=8*{X5`|i| zN&!5`A=;>-9DI0+OY`7stze*I5Unx+)858XgjkIgQpCeIxo`&f(}hLQSQt3}#%1Rh zSP^gI)-~2pSU|djiC>9dHMs>OCkNnuN|%>9Uewh-D1S5AFCZla5O#vmD-p(n`lw92 zhS~P6JRI^C&7G^aH+M`MvbWNLv&9^>_GkLdQg451-z>LBPv@gLVTIT6L7d)Vvw>Sk zjh|>uKs%!(S6q#NJa@vV59`bJPsv+o z0~^ooHOE78sB5bJf5)h!*1z%GPIHWeggQC;F^s5NIaug)r%*PHrD&mR@^0!UUKMUN zd5WKkbX=}AUA+y^va|N4oFk{<>ZA+!jc`+3K$<3}u;b#V3S8TfpzK(a2Vt@2fyHEdIp{53dYcVWYyI5y%1>8I zfj-n!zNFXR!&2PhqzLR z3PRv&Dw_hur3ELNGxT5S=EFzQ8}2&eC5L{ve<2j@gFGZ^OoO@=Kv5~QHO?OeqNBrf zcMyNfm=<8P&rC7nFkI*{Kq)$JQ*Q32y!?&1xf}C`r4JpNK8*jgTf+iNSWxi|c1k`i zpDAI1P4XH;Q85cFHmqQ$a^Jb3v%*V`CX8U{u0#i|C%+7lI64T6tx!8sCHvfA1UJhRHS|Xdojd5|AXWL?#a2O z-vYNVU5SD5+Re=uheoiL=Rdo3!~1}UM5438nEClnovxd`Ty!#yn%QEHHtq#4zb27z@Nt;d#q_m`UK- zW6YL0+hT6Dl{^ezePy^R9Qd?B*k}0aDw{8t3pKcKw#HB`&oES@Gq&c#+|inoX+>9O zQFPXd?P(akViu*$T7G@vc=m*1E9{fNmraPfKvz82#m?ZROlHpEk&gBM2;ik|9gVibd zJ(UsNCITNJLsl@vFH9A5uOt5!Dsh96Du9?`h!uJpZYV?zKQoDWuoEPdoMhO@-jc(Y zonWuAlR_86FDStM=5O>JaZA&11i)o$#%pG3D$%ag$R)T&O*=f?x+Z-eH}=qRIvNHz z7P>h8#9|(o6sW_2Enc>59UPny4$(SV*iwWC5?x>vbR!)K-r4IiGuLGwKCJx4g_^8q z*1DXWby@gXU$6YePx(7sv^gYU@ZrI6gYo}ikN6?{FVVT)BYyDUcn|zk|EeDxZ@A2Q z%HQ>2S?jVn#mx0N*=w`1)@Ik&E5D5tv*~6~g09Qj!UiP_MqZD>34_FK^&WA92E}=J zBn%pq;Gz8e@EyV+KEOa2YELwZ^P+=xmi9WCmhi^F+rxrp2w6hb47sQ1fWHjS&0sgt zD9*D3z@rr%+BJ`(;n<%@`yPH|%%B~%cW={-mQ2brbCz(N6Q@NV(@HZ8&k@P*gyZ;& z*wFoLC0LH$M}V~!haRzt)6y7h@5=WfXrTBnPDP6z2LB?VL?|t4y78CiqaB3vV+>n` z(lH(W>vPi7C(3#8J3vveH_(i^PgQzDc-0W|P!>iAyV%!;cMapT4fXYnm+Q5I)UHI? z_~m$52lFh*YIll#gK(5kxvyaydslcB*p!DNe6X*jJH-6!=nIFsl*|A-|d$GJeZx3#oYS^8ZyQe`r zu)?rt2K-@CeSP`I@?FBXP+oq4S#Pgwz@hlv4Zz%!52?8b`o5S-`#^v{<1E7TZuk?P z@p3$S31#~ee)3v&lF?;_ZMZtO+Rk=;lnblW2M5TAwjr+a-H*%5KW5fDsv2Ii-|WD?XFdLs6JYUAh9vWK$`?`V%tH&{%YCfuJ+2m869Yj{I`>nY)nr<%6XPa6I} zmabTkU@vtfvM`b1ds3ETjCh>VP8EJI^k{l>s_@6OX$H%w!u^K}rDZ~x;hl$g&xY;@ zU_L|*k5SW)W{nUoM1ml8QuK7W53TUX_tav2^UehpCw1RFcu%{vu~h0}&`lTa8!V;^ zjd(nmOWLGRdy30Rp>f~pim3q1PvlA&?mK5YPO^qf*a%yw$>CMs{Y^S)*TZp~lSbdOH5*-Sj69ULm6NcA+Rek4D2s^9lnn#d zKUxLUGECEoN+b_CL7Zr46b?1Ti7z`qJJKr;W2Np~$WypGqFCD1A|%O2dK3?m9u0N} zaG_xg^CC4iZ52zKwz8rFw5)HJ4^nE8izJn1vzHock?E}ih%hFN95^8FrmUzwG)T!s zXWd$Tww8jLkGP%!pOXeb*dUMJFOL^CNV)7KIo{A^>sEP_bmL)f;W%y)2bJaq$pte- zOCvyODLb*}jbNG_j}o`i@c>;L5ShSxZJvHffmW2;xo&GLSElwk%XjAHwdZ~zEO!alj+)-8Dy z5Sr)Tg(!k{nRQ7;KNeAK0j^w%QX`iMy&-=ruloqM zx5yvgoICd>>TLR7i+5!14MP0r0uiBXMC8IpMC8U?$R1|cXLZ5C1GbYR$xg0Jb>Xag zbmbh_xS-l%YxRN!)qkW~oON(?boe8%d+*+ceN7gdIkarf!oN8>Se)ZY{tk{XSyko0 zWSz)2UR8$pF5}-JP$LM}%)&>AFdIzh8S!I<#b)6d!avQzM~fIG&Cppv(IIBw zs|AD)n1Mg@82J0rTyyx$N*rVs-dXsMS$KCb*(|&-yK5FcNI)>jqjItl!UD5!oVqj% zpKo5DYysQ_=IE~$WV7%cLZeyuVL=JcXjkue{_%oY{Eygfv+xGtvRU|5;i_5quOh6e zNA;r}F2cw(0}m2kHw(vQZ)V}46rE&-PMC1fEPRA$Z5AFOb~Ou+G~XVyQDQf<_|cD{ zGg4FnBiiX-8zX*XmQJj2-z+>%xMCI_F9w-~Cp-qeUMe+ysm)QtB=|IY(_$UZ}7ZJ1%~UraTB zHqOUt!y4ZaGPZ7Q{QR%xqdy(+&FJCZyu6}Gd7q?p5%)>`k5ieXTO#gUV)*6%P|(_` zQsUJ6+x}k{@-8L=bCvxc;g&hL-a){j&PSD{@BSEg55er@q1Ul5`Q}W!#;5PV_M3%! z3YHpwGkk{%q9zn@5K={Kf{%YZ#S@;g&-)7;51;qB5k>{Ln+ARDdrcAo++5>A4ltI5e z{3;%xF-QMb5mxh~d@+eC#z8we+91(vy=a5Q9%ku;szyV5I$^@+X7P++rxi)4Ez^3cWDw;@%bdmA1H2}ME`W9q4nW& z=7X;07aT3q;Y018tUTHa`)}b}2!-ueb;%>*TC6_CneI=;5e+o4J$S~vG|HUy;r}4X zUEv$E>iiE$_^A5z4uYq-YQ%J89^OMRJ3M1L`jk&Q?KFewXuDasCwewxR@e|$?+ zfGs@9q3E;(?X5zko_HGm`~MW-!o&7ZdX#63RV#@*Rt#7N)J_&J7o>HSKkNLta+ zKitPRA}?jpUMh|J340HbA9_7J8V?;*2!9?&ff%31fiqwUaF64i6bB7_7|L6${GKR& z#M2AG&ve&{XcDu}W`<4-O^*&vit%zCXzLp1zfpY>NdBZ((b_S&#qmLbaUp#qt&}=G z!cqLU=|e$cgIK9GXx#L0`$0Mbdy(sNFJ4)y)ZPRf>q?TE;Lyfiv4Ncdz5_n+s2_!5 zx7;Iu-4)t*Vao>j$GO@LboGi!3XV<>ov1Yw^oksx3if?MVuuICkIxyqrijye1+;GR zD;jA#=FxMky1%z0CB9PN7@?Ai3k-@c&K=xM65(Ra#tWq&F~a&IRhc~ zD=`XSxX`#;t!2TiT%)(R!OlO32Ceq#tJ#xn)3FPas*hIl>n*Np#xsi8@+P>{B{ zO<6141Rs19ILSf%mdnw~|0EoJ=Q#cdYEh7D6~{j*XGnC|*G*%DZ{YIM(STBpAFJRC z_6k1aN#Jnm=|pFyk&ZTj(=o#5aympmmg@zRGxV}rb5$+(G9?{y#d3O-PUY@n#_8w!YdYa=?IjT6EkA@aE1REoLZ3u@yqw?Kh7Jfs}82M^L zI9~-HaN^tiE&QA#Ii2?vI`YeeufQ$7fSd~auKYTu^8$Kqg$e!*4cYz}c z?eJI4-Bvi~qqa}BRe^(^$t(_f+c}?dg16ukAHccXO1>gvKW95sxp#AVMaakdc`ICz z8yhNNoF1$Y(7CQr@J|w)+AmY!cwe2|2HFkJ37C&fm3}ebnYAB z?dabSM5BJSzw>&1Rw zD)mPNA8^hWww$zv)A>~`S1rd!e~{L2d?UO@!EYJ=xD|?iqKtn9IEXlq=xA;$@-$E2 zbU2)kTMGPc^W;{z3H}Yu1qCi=hy#hQN(XSxHxKaZIPU4j9)dpRLycN*%*Sl{Y7JR^ z7>|tU%|D3Od3h+m&f@xKWFzJgRCiCMs7bmaCn@GKZRRq1dri#GF5mIr&9_#iluFY=PPso zk0zZ6CBUb-5VJBb^SBm1(VP#Zqs%guj<~c!sTAWB?nI4B$Ek5Rr_vQO(iQy@z*UWE z_OcU1M~;@_IQ2S}>v*EmjK#Xti)vQD9s{*ZJorjIXmdOG4a{5wCoT2jbQJs>Y&_?S z_Qmo!9PSQCe*@>Fnwv4334>dpM13dc%HjHP;7Ztfi@P%BRZ!o8d9am=8&M$*X&CWn zzK7$ixWYos7Ye)|2?TZTFt)j0^D~r(bEC2-HT4CT z;s#6PWkJbs=QB=a3)eaPBQ{K^fOMN6T`uRLz;ECpKa%Soa)!`}Mt_d)Zo%Pv*6pn5 z3}WIH6r{tFr1A*Mjr?`R(hO6$k-vm}c;h+?S{QG{H(h@boa0lAE2fr3`yjk!y`}JB zQbvDarH_g6D)wHbWYGBMoU3X-YLt}_G;Xr8gfxoAsV0Db3DgPSsBxUwevi0J zZoVa6()U5?1kUAOOmP(|aFR)Z|EgKVxpO$@uE1}wp$Z(Mn8^wjx){pKHksfp_~1@( zUUr_Z`DXK7p+fgH_|OWMTK#Lx3UYOA2fv|lRNyF0>xvkq)GXwDb~oQu`kJ^NbWm@K zt_VtfM}^+iDw;*VlnPZScq)BH!)cgc|E6HKqw#wyl(z_l-+i-`Z?Fr+G zm__ksJW2`|IIn*%jz1sm;|`b62p`Px`OLs)02koXtaww+H`Q$VYRx-s(`m*begfr3 z8LdcaWgbF5R-yMhw?JEPsVvjDY+MFDZx9WFE4=y0OERk?vO4B!Xe?a9=M#)sgs(~B z^U3b!`|23^FAg`3kr-1=JV%5dN@e3? z;260%ocOf*i-?ahi|)qZb)_9B^U!r(Y7XVSK!LXJGs-LkTwV|Dv5kEOluc+D`wXDG z&nT4J_Zfw-^>;&AX)SAG5}{1AupRT|+_TOvB@>TNps z7IMBLRrnN+?+y7caX5M%$9Lm!v?=Ka4j-<<6>kUl>I(GH8aF8taCgo7l4_kYOP-Hg zrLpb$?(yQd-g1CxF{L3pA<;N*95F z=ilW-qS5>z;6wGjTj1X|qqdE-O{n~xs3;swpq#b>B^sQg-r`%$E>7bFUai=P(L`^7 zb(1&?{zgc|b|UChL=Yvex;t_E)-8g{|Kd=@f!MZQgomT6E+=|=c-pSMJ#~!yud&I2 z0bOp%51v{GIXE2&9EmbYL-gr2drpbNFSLcD!u-Kk!FRX#h~^Q>C&3bhmuxTOT8WzB zhxlyqOpP2qB}Vs_w(`AMi{36P8ylAFa``rUx5J7}+5P*?$&McHKeRM`{_#=c zPt2HobhvwLX>JEO^wupZl90f5;%Z`ENFf12sD(tNS{DdGE?+#%;^GINv=_x`da<27 z$4tGw+Sb#f=k43_zZO2l^tZYM1SX6BYmAE4Qaob;&z!+1gms&U zpm2=cpF;06k1It&(<8eBYp|M^6}Vz|Y{nWaubGMjJU&~iBMX#)!ndzU`i3>Op67tL zu+s&Nhx7%zt?$F@mM%L&aJ846$LR<1PutEQslWUMTXyCQ5R0Xsg&%OZm;%eO<>H6> zcM);RG3v=~-I^DFKJwGoXLOq9pSP6nu@08$R3J^UZjA5)Tzz^AxMJY^6j@(XbgiDk zp82_ima3)}+!xg?FUDO;CNta9l$#1JvTE+U^l#1?N zOX1KtNt;yJ;#9*GC!%q0!2-E^8wwLqu=_*-3VThrOLVrF1_*pTB$?(lZGX{O_t2E4 zyHPUS81*D*YF}BiW&x1|(q~$o7=T(*k4BomKB}c?kGn?7&`e}?ZM!K;RKzy&KqG@m z5233K+fBE9Ejy8usSO;y35~u5eiHa5+rEIKRRbSzQ%64FV~ucA2Rq=6M!2a18gRuR zC}f`ht!6x+sb~Y5y^wBZ^H|Jtre*<7j=AjZK475D z)0f6&8@A^}`*=O^*_{$R(YMa;gCFh;ya5LpPPO#W#P!Ot>6TTRvDxd zO$KpdziQ7a6^5x~f&zaMO{Bv_ELH_g75H!0o`gQY-y3%?{zVP|rO1~lM@cUOd2bw4Z_xoq@|rISBgwfM^=(ko)-(uCB? z@c1R!vu97;k(yCADsxqq_8sk#e{`MpPIcktqRt)r9G_qI^7P^p6|p6L!BgWBXM`S% zo|Bq5YlPo~k+V(|Lsas={~J2dNu7S9hDs~0`4F-n35nfmoX>FfBLxie?Sl7JaO4>m z7P*9oIB-G7EhORY((j%dEvF?SpX6sxfp?J{qt!tctmFOgj!VnA zNxV<%gUY}Z)vX(t(wk#_-HP7RPk*6w-b>@=URztves21^)X#A5pkZy+?rSyA$c=j< zinnG~UU+(K!^|D*e#axL3^%Oxd*!>&FZ=FL!NTuQa?Xu*_ADK#l~}D|cIa`q3LH{6 zC{noU=(HT|*>}Me?hwL`gfJfSB>DVcr#vG+e5U5w?yPl&1BQFjfTnBgr>E9_J#XC0 zbIM+rrjOYC{epr+-z|Hdb>4$p$a=3j(owe9F|%Rq(-$f;w-%!TE|L?ZI;gw{UF~A) z+`;iW!0YAWfCDBm7omM2GTTA5!Hca<&qant&ni_I>Hhtt+5< zA6M!7!Q0-Sw&B*kj4wvLu{wW?$Hj{Sw`Z?@bIcdn^}lQ>dwUaG`RT9BNo0mIkz>8b zzQTlOs-^k$KhN_)#I4ZL@G1MxFyE>lkItNVfQc`S_AOd+zgD*|_at3UO3T)BG>a(P ziXBREzj_zc5%pO*rzO&}7q;%WCbVUYa>nr`x{ZQXPR^h0YPy)mHf_H#r~1YY!}YgV z(fVf-Ysboy#Fw+#`-YQ~KUlu{;&dFRyR>(CW_1KLH;f9;KpmJ<_^1GC>LZ5Avll+f z8wPawk+5>h-PKo))vZ68p4e%ib&m+=6|Zglt?Sfl2gUnM7WF%K)(g)TB?;0Y3#(U; z7_tw1jT_={L*_Q^7%Uh?+@ayDSl#YM+4XvKD`kX*?6?pTr;6;Re)Sdm`Ox>3DO<_{ z

    s4ToVzzbdvnbS5gPVjeiI(+NtNy7mq)^rsP1mqy3Sn%D9xu@W#K(+eEpkqZQcH zqy6lHst|2Y4Vr=jU2OdzKGm}FMk7_Sud83TcCZjZ`^m-Ax`K?CHkLh;chN6v$WwDX z4O0tP)~g#Q@6G-c50H(SIo$RO>0zS>yDx0{WkuG9$E5BL|2cEM=Y)OP1js&rR0YIL zosD{%0rxy^goIcqJ3W-j`&Snn2v!DLy$z`sLm3?%lUUr<($lQxsXWiZRrxbl#yl~6 zf5i*Ta>EAsfxJiCaKBbx{$TdsoDToQte5y#JU7iJd3_OEF~om`{Nv}|r{!lcywHRq zkCAqOrwu>3PH)s=)YPHZ@-1mF>G z^wPA#y!Tep7c(hcisU+g`$Xmm-p1KOylquH^c^>+ToiDA=-ec5^Nn2Ih_#9JDCP|kQo z+fu+2Wm2U)+PhxpVb~~-zaw-%D1Cni_mY195Yc}$Dk0AaTO7PR#oMczX}HsE+M%eCOV~y9kH{ z5JW`!(xff0NUuwk-aAMKX#&zkR76EVEWwVbSYnBaMola)y&7X;G|^~!G3A+NqVApj zow;`xvAy@cpU?mE&wIFgXXnnGnK^U%nep=Ky?fTQ`B0TxbaQaR0-xw++IHWp4cC_T zeYBup_uIvDcB1KDZ81M+Si0?l&aRF_>A{_u)tfS5dMbg}Lbfh^n;@9Z^$hzDGNQ$> z5!GUdYPrW>EHsV!0zLmL+hT}cM(;Q*X8%C5^0sAXZ_6v)l9IDE*DpQYFEHI!PPx)U z_dh@x^i@TLoW6eFC~I3@?$(^b?WuWNv(LoG#KV7XVU{pj_5XX>o=orTpdT078cGoPZJSqckS7|!SqW_%H6 z^ryfO2TpCK5&|V*NeE@+Zx+{=!V#*jL=2$FZA(1aRh_#)jZ}2WbtfM?K1nyYv$FS- zmgXz_%GSUwx)mk8Vd1L{!*gaGVseg~WQFx&_UJLJShKVCd_%)a^Gi>+wVkUVDR8bjV z66)H&L^9JZd#+1ZsI$2j6W3|%ZR!|V02iiMnOH4a=Ne|>H@~taDs3g5)ajEoclEhC z_mu;ICG7{0y`e?*i52P9QitqLOMBlP8={w#SWdS+&|VU_c9pF5T<_dWSh^f6r|$v4 zgu0Q7LwR0G;-2|cv{0`W+4>=QtKgi?=GPiVu9lTw+uij_75$oMQDs%LlVdafxh^qp zPw|Gk1L*d^-y6%0*XFFKJ(P*3WgM!3iIL+TMi2TiC7cie*MI{B3CmXX!yS+h*UMYL zp?tJzpL3U)8}NIEUctJd)7qi&X2b6;_;l0e>-80HteB#$ZJ(8yXc6MoR+!)FJzuVC zV^p%lANTB>5)?UO1=8KGH^TOKB zDPU!SAf{hoK_mhaKE#nxD}A9#jTmDDB60*Ow=A%i3Bi+qaL0-LD{5sz*6d09knzMBQfH&tnnuQfhTcm)*)nO# z<^{!7&*#tEky+G!Fx`6Y9OvXk;baY;$M@DFuc+qG=^O1+mdEHYO%rti{C3>Lv=Q6B zFhU@}xPKf8o8WC^{v&IkamiBECOoHL7Em(2aP0-u61bsI?#9 zY}>d6QE^QnK1tR;H~-?2?(OIlU{^`F1I3HJGA{&v8-fW zRzZn1lhcWHx=`eU*X6MmDUlhw%M#Y*1(wA7>Q9w;&Grn~9l0b$UJ~i9r|%gE(*UMc z;bLH{P9PSLY2amQUPza~@la~ArQVKuuw7MR6*akOPx5G18oJQ1Dn9Yd;QGp!7B1LX zG<}yrMP=c#jDr1x1J!RXN#4`Ml8# z8b1AulGHq(;goT(x+Xb)Ptk_I);g{IYr_cpDJ6j+sQaLS4_NZUfHnP(q8%+~TW&n7 zGb9}z{_~mrfg}yymv6w8%lGEy$Z#uZ!l(ojYj@sCK&Ft^bx+JrbieGLkdxqEaemRd zw@W^tqpD@4HFoR)Wabj&;Tc_%2$$qt^^23a2INQOtxqi7lNp&}nMkK@+5{7-8XVXI zz?&!#Yvl{(v)>am2O#bNGVziuI9&)5t09o^S5)&fjcBdZ6c=Z1GWRaJP$hg-qph;K z)!Dg~xlG?0+4%QBc0b{*)mba*4y9#h9jZZp;ECuD(u->OW^$~Yn129G$M96m#`WRQ z9_}oX5EqvxnD|s#&6JCj#mI)g*Sf%>l>iH0UVAXj#d%Rv9Q!#XVLyd_y5UrjD?pkJ zh*%|a=XWM&YTy|K42+S1z^6Z1Amo4j{NOLGg?WJm9v931we-a+r||K&Dry_>V!Zs# z->X9}ZnA6NnVX*HnB-NtJ@v({D}Q?a?Na-5Yp?=elJ0bDJJMXxGmdRqb8HhPSGj=A z0*u-YYJwrq5OONiM8GJrDS(a;F&K_;N*JYE*kAr0LVa4eET5XjI&)rHNA&}AF7`TUmESwn3tC|uTxC~sqAf_Q)`Y!8?4jGup zQ|C-|oDyjp)1GgRDoo^Qo>Ltshs}#6?V;$ZXY^ z=uwxc2#<-<(Pp-Yb>IOVta!aJ@4ZsC3_u#iA+YBLUlO)^~%P6#N+Vlmd%556< zWzg^47ch4_5LynfRDlIx2Ea0dWY{Bpm2^&|An*L$t$i=QP`xF`KJF0GfBki&cPP$2XLI$DvuhA)N{q|0&i#_99BKV% zNy@JB4aZ+T^DD+#n@(mle|@5SWTgDW*UcFxH)SE}*E27_Fj&4TWywdaz#h0A4e$Y% z)K%LVpQ@a~9?W@F6gWS$Y@v%=b6LYejaP*g|5`SDt)l9aEgi4dqs%#(?zIOqJPMB2 z4b-+@s2pSmJz$3ylluk-aXqsPkYFl^K_DIKsyDNWb~sXC?jq6-Y(c(r9ZDA#ZLsC5 zEnB~AZvJv>{h6hqm#iYIBOd^|n`TY-m6zBXi49ifg{F&+$5j|W_Wrh*D^LPjwoZAoR@a1>$Q6szQN!9)n z+)(J+Iz5lW!i4(*(FH~nP#bBy_*$1Hyl)N2)hyD3LFog65zz-IE;+`Yh_)UoQ4?fM z3;Gc$S4@uV212Hm$f-N5$N9@rznOBGct&GL(LyOz`yP7jfN7xF1U(O<#Ch+%_u7G3 zfu zhf0tjNDg$AU_EEZgIinOQTJ#VbqFI9^QJki2QyO7Z4TSeKKCuuk-M;>x+)=8AAg9h z>QKo?e=xA5=(Gb(WzQGc#I+_&j?HS!%&GULypA4)o;^UkvE+#6bQtI}jFA>*uk`-< zS^)}NdNMcnN` z*#cpvc_9+;mt&E@wH~v8@kW7X8o;Dr0o!_i?ZCaQhFic8c5l>h^Gh#nHXJ;Au0MS+ zJ2$K5c=>aG4@lPjv$6a{Rc2n+U^-n>@cO#V?-drlyJ^Gg^S1V{h5y#|ui0x?Ra8-L zv6(42v3TS0n3&@myH4aYn=R@q3ajklBtqDJqO08gHwOyfC4g9a#ET7}I|FMa;{c-| zhl^tHY>=(QoleHZ;&238Uxy}x9J7nN^V+=VYQ^g-ayOI*YELdbxuoZnvdhS$sc_NC zX)~90&1-aHT`!$yP0C*FS#qLOM=PLWLtfA8<(J$W3l}dn)?2x#u*rjR)7!0F8`1kt zm+!$pyH+bN=v5ytKhS=??Lg_Vn+(41NSW##RIcv&^Pq3nJG~KWm8bNoj+Gv0yWaME z`EfEdoF}>p1XKtnUK=<=GYW<_MJ9zayVXR_;2EHktBQamJbMOc*_4C2iroJMy>RG1 z4j(M3vp>9a|1y2y*<+}F_V{5V^nxt*J`9hz-vzp3auHx07tzISXMF=;>%Em1+Rt8( zJ+{u%aY1%YRY^_5rAyS=l^2SxU95SbaPf+oy1~Ad``8zxOCspZ0v%Jtd;`)Y!>K6w zoQR-0I?#&H2ckkrEeLT1<}A!t{6K56236G073m7in9-4u5!zniw1IwmWCYc724W3D zeCS5uF<1*tpnduPvMJDLU^qxuDu&<=FdWeEm3$Lh&=UHorHEdcqq#c4Cs&;LJe8ci z{rRlCK9sydWAy38^snf(71*DO#6I2RAM_e~l6yd}F2DZ?y08rUQsFqDhx~&su&1~O z^y)I0TL70j57r7orUho4+xekcL`LRd$A_;ToE0)lOIK=?lKTa_m)U#toQ7zGVG477 zHgAThl!Il^9Hvgh)ZB!@3m~MKG2y0?k4Aw;h^L4sSB#0m4z-`8Y)%MWhlhvAlyTF> z+k26Sx4Qlh_Ffycz4vINSJ=Rpo5IWYHw6%TZ=Fx%Blh06O6Kf9Mt{5+J?Qb=oA(EU zIu3)qH>+xpu&4rX^j!eD%|N&d;TgKR5~Qo<4`4VjATmFPZeVeQD~hCWISjh}ZRRLm zdBHy|%|8$w<n;B4b&;~ZevyfSbq6dDG140vVEvkPsg zEc2WZAM3GaqJxu1PSN6w$aT#T5iRQ?G8Px*csMys+~W}!H^Z~6vLV#2FoRsi4%dOv zp8$MXL_D_w+pXFy1p+B3v*54+3$I##fYar{7&W`~T;;_%`M&=7G6x4^56kJ!E#3v` z(<{#p9?W@nH?@b&1RvDFgU%fbP-}>{rpAs*Gqcu2<0ZS_&4CV2fk!9o%1ASROG>f60OXtiAw1cX)maIIRKmUDv<3(y85)XQ}7NpdN&};5r7GfL` z)Bf7Z@KyKsIB)%OXQOmPvh2!`bIIYdm`;9^F&2Pms+$O;AeaHg<@6qM@~$;n^#vMu z_2bEs*)y!HeT?2Wq{4`5Mddqp3>U7u63y;FW*asD#uBP!z)2coTZ>R8vbE@5c8mQe z-8@RI1#0v@QOwAn%KEYjz5xva&!{e#gdtu)rb}KT#vrgf5xPVGGNJXSY)3juL(3Tk zS|*y)W>^|3(9{!%qL7|}otxF4+yAz5vo|nAG=%`kH+mIkrO&oc%_;Gw$^qQ)Id%1o zKI#2^>4e0j^{q{p)-^SIv!?(E5N7BR00Ml@TqFlXm7t}#pB}&i^x2&|6~y&g1-fu3 zgcT8M*?6U%3wlu5ZF%91AOpHkccidte`e#3IBGtQNk~i2TbM0RGjEh^MLqKl)tD|h zm0x0$lFB(*K(B_Be<08XDUpyQp&-Ep_@_X438t6RcTvHx@X~!} z;U%aFAx@CpbqS(*7Q6*EhzSb>*NBf@Ir^|1*aw&Oh4hGx0x`+yIp@!%HC7Q)zrtXEp{$b3ZtEEYPrO;mx)q%d_h6T(`H7s(O zI#Gg%LT(Xc=xQ7v>Fiq`swesK^lA?JrfdTJNvdA)1t15kG~*AP*RLgD}(ty1MT zi0l&RU+e+KQd(e3VibsSXa$$N0SW*QnhK&g&?)fwLYxta{ejbEO(=psLLY@#_{Z7F zbE~@}mR;-UxV9{!yL#^Av-%Z7Z`Pj2R}O&N?tJx|L-X&)pQFP^Mo!Vu4Y9V%@x2S@ z@%`mCaShRQ-DpS9X_Wf&FDUg)VEYM_tMc>1%#r+VfQSzpfZj+nh6T7=Il_$y;ihMu zVLzZ|-k>bs!mrJoE3@>7HNx-9DVB0#KO}!dyrJt8{yKNzUbLbY-Gsw}`7@uRxTeJ* zAChfM8h@e#P~r4Z-8rV{G<%UUxla0ceMYP&-v>4oTJW-yB71yS(f7|k>`F-Zg@^=7 zlWt64(@6*mAe)SfIDsxmFLzi$J2WT(2s?Xt4B`|$v(=@}U40fMfj_=}oP7b(7vPNJtMwow1t zy&H%=Y!i)DDnN)y)f%`F@di8_7yyBw;-f%OfALU#Sa}wpGF(2uqI0w9qm?i3Wq-Vh z{$)%+yT;yrI%s6RsFfolY~xdbqy-%~fc{e40Ftn~zz=qUIxOd*o?)g7!6}@|1gJ?| zOK=q-V5A3$_TcOa7P=&c8_>l4G_qg)SzFt!jqCch^>nYsS5~gOi}c$*>y<_ZdPW&1 z2Dita-e#BD60mD?-~7Wz*5lvG@SnoVIR_i+59f8PDz03_{`tngmY*;7cX11IoY-xg zxg@o#GpH%PtA~02;6ZJON1dY06ChCvM z2uXD0Nf*T>ERp_2no!T0fga#}AI6HSza3xTf;X%g5Obm2mw{9pT45Ex<|BYPd`BdIQgH$ae~4EQ>j@4#a%A>tW2%;-F5r^49JZd`Ttm71EF zO8uR$UuCaTf!FZ^J1^H+wkA?<2Yiousk@sEr2|*fCipdZURHpAR=ynhC~o&EPN5d! zL-tP3^vB)MKLnZzoxn_QNkU7lIO(4!tdGRK0$PI{phF44en(wwzPcxOH=22}^1`NK z-E+FNJ2N}pX=r?>J!_~|H!8Aw6wT2%xgzJda_AG}w%4bAq#~omf8lo{d%tb;Z%)e^ zZVg=lV>k_q@>D(mz9o|kcYh~KXZ%K$IES;{5-B)^rPTB-##p&7gzsRzF!1JRp9=R1tt z5!eDSNp!DxTiR%4R(8c`MP^p{Xxf(I`CAf^N8(n!-2GLQf)(b~*5*+U#y**<{ka7P z%F7Or>#>b8U(IL`1vtF@w_9(q%`?sx& zzS*?!LbJ{Z?cn**`Rfzn*Uc+jGwp=Vg0ocT@G!c`DFrJxAZG9LNVq<2sM0mvv-ot! zqI0FCnZ-MaN(`nqQj78~ofg$@n-i^;(!6u*ER1#2u+a$S%_ zlbJ;3;JYEzo>2X#b|>`ax;uS;$H_$W&o*_Rav%0NbaHDszQ*@SlpeHn%NE==uJ^Fe zhrQ=P?~c&>c#cR2IM8~1650wDJz$7rkWVn5yx%@Xp&$J=8q$)0UrJ~VMVZH6#TA!P zvA)j~l=CT`hn=W9!+*a0_MgMU^P63qo2!PYK=wR(mD<9tu(YFU*&_50+%h00Qy`84 zAPhMtrX>hHyp0#Ma$0KT@ia)D&JNpSo0n7=BHv%H9Ot2G3t{JvhS53Z09xf`RHW}S z8Rc99(447%_csj}yX!IZx1Wx|$4%Giks%!TR7fd7287e}gLz0M2mpLk+e60G4b>Nj zsZi-19II4W+<6u$g)|*?UK4twW>l2>xH=4VCC(UQC) z&5`N(McIza_*^5LBkxL2?~*4*&rKXkoI5vRD6%Izr@`GhuroKeH#*qQFL+3NN}Y>D zc0SG#Zh=$&M9kaV&L#-mLw2?h&{-f7-Tr_ug&Y{xAX5(X;U_2s<|g-7)LI5 z9jwEo9A$Flz_f_=3Q~IwDcABvoA}nDJ~?wh#8f}>Y)zh_UcR74L9_%tfGUBs3#oEk zyHqY$AMEEMp6>j}mG`03Tsbf^W<4N?wUFWaqd~HVb{+}rQ9p|)bzZXOs)u>Wq?FG~ z2A1?C_coW8%$1W-i!@h_%S(onF8I7;(NFcHCqPl4prj2#L7^_!0++2!#=)QHL9&&h zE>}wGa;2&qX3bR(bC}6*d=9hbs>|xu5m!pa2dzKMVHSZ+^3fb-=(`_kQT$m8Fe}_8 zv^>icBPn<(4J?37$vBn?(IlLFF0Q5P6ghCi6wbZ8$|Ke;$39H9<|58UCqCTmKG!za zR_-x^?sj)$O=`(Xs_4_Cfsl}aq)*v(%h;6<5>^KWu11FI3>P;4Y*oy^w*)|yQD2^fNT~Ki{;hqQ_7Q`SaL4-_!p1# zqH<4k_8z-b6YHI$X{vL>kL7Hn|%kzl%sI!?s^4D zpynhTkPta$$D4y-e?L9TGKDwLEvg#IRj|N@dk2fm#HL_T>3ey3@0U=|Js5`OxD0$)e)W{VwdpV=svuVo%p{!E zkPCjLW+x0Zr5GI4c8$pKtm&7_`)g!bVXoST3{n>M#}9tn+4-%ZePTnD%j{^GN&VTX znsW^^-NWr&A{NBj_2UOe4q`<=K{zm`0|f$GC|Cl3;1b>htVvHqk0Bn2h`eshX&`Fd zm@4p?JJ$pLq+DNv@%;-^3=V0#hGofkXgqTwT(u8QOKloJ2f2z+8T<4j=HL+|?6-?u z5aD7U?mn~OTus&4dJ|dnY?r8pM0)^9Jqb?%39#fpO^K-rB-{TwHE8b`NP+)#mOw58 z`u!$Q0@yaXW7}psZOg8r8rW5k&?4B0lXrr+0WuYi;UZduPh-o z$l3QrS5*4m;I6!a=aQ11E9lz@xZ4fT_QQ6T0;~qahG#hzz{J_mEynEZgp2bAH&sx* zAH^)6U>+W3{v!TqOZv9rEoZ7LrK9RX6y9`}j-DvII6DMo*DRxh( z-JbO7aaKO?ens8Il}L`yEZfxAN4;E{6z) zXc9A%Dg#AgJ=IFe$CN(;CAaZJZuhN6-wBm+4?C41`Yq8J8}thJmnx)Rw>{(+AK%IPImzi=+i z3~6dW@59)dse|69LEw-dES&|2q!2#^IA)+PBEj!85yPNlEETC___8uzyn4ZjE{XO`{RN+G-uziZ#akx}+Pu8l3JTi1z1y(0 z)XQAQpzZ?FKb1kJ@4UVuT5qy>Km$6GyDe*`^K3)Cy%@s3NAy>H(KV8}AyvQTSZV38 z8vWD_BYzBmE_j-T!!-{!ZtI5mI8 z-r6&=_U@o$@Q*bkCHv<|@()#@&vx(Tr3)eAbh9MF3N-&f-5lwMYz_K5uDn)N4h9bu zOW<|9PE~m?Uk;~{%;4&S>;^K&ABIp{`6iUwit|9uA*EuiDEtFgo0n-oZscSd((5u+ zE94v*cb#-PU=vZJPSCzvlJEWZa z&O~!_1@DO$j{Po%TZ!OzXx|Oq1Dh+Dkigl(Ka2h*^~f`MU&yZk)SCb&>8Q$~eNs+- z7eB9lPb>I|-=QA)oz$D~;TRkwUr7Z&T-3nBF$drnvyKBC@GSbCYmZpV;eBBP_db+Q zm^fAr?UQm2jxFl;xzu|Rn$A6|g&noE9_E2N(^y3EqWSsd&d3!ZOYRwT4OAhb;e?7~ ze-e!Cy-RAf!K|0?rO=zTW-kREDC!uxL;8kn&)>Osp_VZ98MP#QE$)<{FZj3Uuhege z<~;V67|sNTwxMS`5Wak2i;L*XoV*Ut!dOz9Jaesq>5srbf5z1Z-Anu(|2vcejLahP zsoBdc=AT8JmZaG$f*lO)L;VS#{MY*MKB-UI(*P}s^rw+O2K~e*pueSp-W{Ny!a;wF zgC2A*;T5j8TYPKq8`m0>$F&B(i(h}@cLF{9PI?z{226lPC5pmoB@pbTzTn5%Wx4rvFQpCur^cA^RK16|w0`uJWA=irs-bXG6+CT3fIM(&U-~j-`BmSV`Be=>2 z?CXHUy5JQh_h^#fVq^e`b;Q1fEI@bBqr#;-*fq5C2ecbIu*-!?py#knN2!Cf=>Eq- z2gM(3=21A-5($jZZ;T%uNMw9uL7YS&h;te6CKThh+b}3?L1cVGlDGa|)57kN{DQs9 z3(;@pA#*G&y+TZbnsQ|xxy{f@I9`ozie889DlmYXL1l3FV)%_4OVL#P>mN6Sou75# zU+>+IS+>;-g8lm=)YIPU!4^dZzxxzDdn##tp%! z4}VjDe?Y6ka4k*orS)T_y>cj}**7S!mdEO%nmqg^S|5Td7f_z;TlHuie5LIl7J9L7 z@P#<>Vc}R!z=9n1b6T$40c!y=G@>C-QgB-R1o1GD@O@SX`LlY6u&Q}Cn_fRc=>vJ`_A!2M}54! z{QSJUeAJ)l2t1Jzznk5(agX9V+NekI4T^IK4t8-248RReK|xMV!NK$O$o9`_Cw>JlFRF{0W{J)s+wzl3x%UH+Kn17pqft(*4vM zkR1oOp!gG@@q;F%qAAeiELb=XSS+!%rG+^OjPbD41x@0SPF&wWHt+l9l;E>>?%)rK zs-oXQM(;kQW6@+3w0mn%PiZ^y6r-ID64x6#b{*shp3hiIpqu$Y9! z(Xe591P%0c`mHf~AW-jjpk=U$0LiW^4$+A(u=|UtRD63KeH1>6(UpFjK;;#)JE;TY zXT`rusN7;WH%Wv|sSm`*!0L4v_=;F~7&&lQZ9~~oC*g`Hk?`ms!SY9*1(XuwJbEX* z!S6;AsRw`bPDM7oOOcJ0J9Z$nV+Z`2Cb&X}K@bGl>CiI79zyt$uI|H}X0V*8(q_5X zH;F{bXV{9+m67-GtwmjX92enRsMbHjMXaNpv&m)JtjSZS8B8`eoxL?(Yof>{lip70 zaB(HS<6mjh2e$U{b0(SE8eebqjCR%4H#E|nnVq=s%5;0HDRbiOVF~Ji6>k;#k&r_~ zL#7^t!dS_K+@BPlfdwFoaQyknBVXcMawK^R--<%!rw@LEtfOfgJb1A4AijXszy~Vd z*L48Y&GG{sFhgI^>(Oz-VTGS2K+BN$pM#Dn&%qoBge2WVk7 zuE@xt3lh+&EL4_kPLDSuCdL5dTGk<>5hZIb#TSt)(Oa&K;EU%dvpu;x3vCK_=8mlX zd96+VFRRhJ;OYj-N`=wBLQ@rGC5L|z;;M@_$qyaHpZ4LaN1qR;!q7DIDl4Hr#lHA9 zyp%^J(mp~d|B@~&r4XF(LSY_tgNx?0M26@!dQBU$J$?vxBD>az=O2FwSy@56s=Jky zKJ9~VV=w9kYl+@K)95v!hmPZ`IO6E@5v@;vM-%@JxN~T#_YjbL32TOITf?6R_+CT$ z9DF}vA9!n6@H{98GkIhq^FnytFxr8?-h$Tj^?i-%FvtXu!6(FZy_}lm;ri;yJ$zikOO66m{eo}o@S92 zm_Xn;fT~7eU>g)`f*QcS1HBL6TzZ`zpMk#0dGkiuXAzs5_6psjG5~fj4Z}JCw2V1kmysFhN8`R7G78Bqs_YuOEVX zXdYS@99$O~S{o8l8|oSw>gpOQXYcry`1_Uk`j+_lm-yBNIXMOeIXaPq$Gd{NR1{?l zboxg@o`R^Smzg{CN^ll|m&J!n_}H`NpEhJa7UOU@yfKS}ya)}rh$4=V>q|I*I7&{v zu-to|uQbGcj;*c73a=tRyFf30iLKXe?Wk(s8CEm=XN#ih0^kdgN1~Z_bVH<~fFxK% z0*D#?+;gZOeprIr|Lp5~ycRK+TR|-pxCPfdRu5aC8%lzTfN-QW3w3MoLpN^W8yjDV zGzJ7i-Pl+)8Ys^k+ETfVYfB4;4#+~}XfybuHsK=OP1M0H_{ENn@94(*IVEZRYB=Fw3Y?5%HUlwN%M{UlgO2 zg+;iv5Op)li_xk=T#NcjajgI;|3vesa9T{nR9f;d=N$w=Yl#MfY9>x|_0%-iG&2q~ zk4Sf$N=deaCRBTBQrjnLhg1<%EQZKN7vUMsqboq13PnshkT=T32N5B28ju%vDb*-W zi_D6PD6|09k(0ZtxL{`<{{7S`X02arO?LMDO(Av21CUH1PQx5L z8YBmZ6Ij6^06s?xcP^TnGw+bxFR4CNe(BtX=W^Qo8b8M0j4b!%8glfbBLNoc839T3dqH<4jJEsE%p`CZFVwN^5(gzW$B2)3+v`)vhm%s0|FN zqpjG3PN%6p_5qc|9;D{2Yq-?2apec~r(a%Jke{^aUuM_Aqo6-IQ2!8U(C18NZA>!H+ZMx$`-$#DRnDZS5W1I#7F2 zUaMGhxmZi5M(Sf{KRa1xa%yC9QP%>YBbVd8vYe+yfZk*qSBJhg=zKJk(?*VY90XUO~QwOj; zg0E!WARwRpAfry!v8<|z>7=9{ww}huGCL;^QKzt*8ID;Lp;xoJDSnpE>?lV)r@0PJ zrqQ(mT5$ndKQDd@z|`dyhX5)jR$XoeAoGfX86YPMs+I`P>8{^j+}2WBSw2A|&9Rrt zCuzz(y#xG(M$}=&?a4z;+U?uAT0-M%y{mkDYPx`kcq;1S-*v?D{7JV^)em54yGgKKP zDo6n=dJlTP$YBw&bP#bAv};`h_K0=S`q!IY@2@atj=h6&2tMOskO~zTW{80o2;&B+ zkk+-3h(WPqcjn~nJaBk>QSr7z`$eA}IDmh}w+|fxK^_jIV*}XFaE-DXrj>w6#wb&d zk(*2QwWUSxPBMi-dZ4TVo#{#e(A5#DeS$b_i>ks zromvr8>V{XLi5?GI#iq&TqNs6?)DZ^G^Yo(7X;?JS0f*LCmUf^VS!7!4-gbL)fY!t zcO7O}nFxz82`G0MbnqDYk~}0_>J(i--~WNo6xlcs($-O!+tvb9&EC#FT~mKrQ3N4t zs4}sUP@&dFoPn%e{e3;d0-Zvqhs~%Eo-A;1QW3m(M~>D>Z6e$v2(811LYrg8D3mlL zJ5et>VV0%bCRo65!yvdshT$d%`acYWcSb#xB#n(BuazHzUAorT7%CbDf*WJT5v>0V zI}o$vMe--88CigRN&{{e7zvyKZi0xC$5!D#F%cFoTmN-)ih8>9S~av*@SMcnj&PFn zaE_BSqkQ&S-JfH#`}M3~vvKUiGFZh<4g+z0Rs1gTaB>H%aVjstI5j5~dvFAc=Vbt= zH5*``zO5jdvIiU1uVwjt0ndA5eB zEIrUCE;GfoY#<9z{9&Jjfs!=8|K6*WP-69;dq$r4xDAT^uK+lmlbwf+FiKy;=SAc= zKyz*#9=huxWtJp5NL1VvC?I5}JWjpYW8RBw1I!a`!sI$l(JiULmHsOWLv!54V%Hq| z;DAY5;r_uve)Kgb*A!cGPhYgRh5a_M*zr>2qFC*=?SU<+GvZrAD}y|1yaRx)$3TLX zVg#1rf>=Xf3X}4XZ>SCN{3#4FccP8}gF3x%>z4Y3t>RWq4?jN$JVde-H~)#Ph|*od z?CUA-s907JXK&>gX&WDutf!q&`TGoC81!2}1m6)~1S&dqu7{8oOJp)#$OL0!y3zGt zS5C>@WOgj>p7^5T8`}6!{6}9)?NMN+R0iHHKx`dzjJ?Y-A0SRsEZDlGO9_Jk(nmqt z>%<~x8^lMo+=tiU>qgx|)2b@;9msY`07D+o)d^5f1eviqP?M*`7Cd6Jn0P?*6V9?ZklCEb)~4}+triD7C0p!-pH>j=Cx1A;XLM3m=MX97P7 z9@P_xMUX(QLs`gp?<+wbSVQ7}Wj}j)hqbet`I2rlp|mk3OiY1MVuIMsH6q@OnIP`b z5jhE0&3CqvSYjh8DL2t8QIFDr1ECly(KgheJwojB0t#2|pE6yjwPcPis zS86HI$MIil{S4QTD$W2NZh)NuUQh%fzZ@+fVgQecM8f4FJ9)muF(RzvA&+Qn%bwTW zE|N-Z>^8;xzbr|fps zu@!2TIE8I5U)(W2EPb{}?3yMC44pDDA~4wB|LvA(lQkLbsk04z+?jnF8#I>ei)f9S zmC_PWnHoMLGOr{>BRUamTXnN$GMU=Kqy!=}DE866;D3pSA=JYOz#uct`5vstol{|< z%a@L!8+h^o;ebZ7i=*lnuV{2D4RsQwMg;{&xck``b}ySZ+uqYN$ki^cJElpLTAwz* z(JC>-*gD$ZJKP6tEK1AHk975z?ID}Xx>N5a7dr_w?i=7&&?zuJ6Vcrk93`QGL_xj) zT(d|e20{o5h^`Qc!U=^I9AD5?|HeJh>L*oAUVP;7X6!v~U1^o5$+xK%Jo%bzV;!oS zms?lu=q8&@CDzsE7c8hlvt@1$FD*duP*;Z>WUh{H^0lfQ-QAto^L3~;uV6tf{Nm=o zp0BSfD5$GJvpw7#j@O{Nym<@i9O3&&Equ>kP(y0jGk?aHmBrim_~F+Q%gPdMy?xlP z;>t;h4|^$oenq0h-ygq}SW%H^;}^i(jJO1PM#6e8P*OO*r^5?th>;fZtA!a`AL7vDL*+`GDSg^#0sZq2GR2jO;0 zN6WJL)|1^`;0tW!0--+66lH*&00!?a6&qlUk@H{(*MGZh%>wv+7=GWZ`W+aLM2a_$ zB=|5Fgg4&~S+E9Ra2*c=NTa-MkU#-wXF}n(hK@*MV+z6@Q+qaa#a1VT3%7Se*G{s@ z)&=#JP8J{>u7`mV-G*=?Ywq|dc%3`Ymfzz9f7RrSi-=KaeO3^F!=5;9*o}T^SR1r( zu19Hb6XRn$+iQk@u!l3eB*`RLaNdQPIj{t$9xMm(g z^01ln>SmqB_B;j)+}y*D=}mPrPeMbQc?6oxTsw~d!?h!Txsi+0VQ#<< z_9QsEJpPI~!pNDniFx7txW%n`#&tg9fkxXOy)6Xbf27|h;eUF+X!*EBp8Cv=ACB^g zj%lPPG-P5#e}h`gh4`tu=5W@G$13hSx12rBAtBE2N13a>;Gbblg@#-q`9bw}n!6cE z)iNal;SNZYppCgl>%h%Oq$k1|$F)Abr>9@TLJhje0Zv^&-J4g$h8sMP;1&$S=Q4=ygIGdtr++0hqitaCi$p)eIN;0# zp*}qV`3z(n&m0Bq{5ZTq{l~$Az273#M-AYB0(n(;+|Ya(JXVjmzrZl!jto?n{7h3P)j-90 z%mZpKG)RnsoKb>T3b};0W${a5mZpb>rZ1hlgn1CPI5s3TH6*r^Tqb76?4dkGTj90; z=6P}nZwd=*3XfO_ie-d*M1;FrL8OnDWy9u`+IJ--^r4c!melikFf(Nq&4p5iVWfQ{?9wov#U&ufmKs7xoa)}I4JGpa7o*wWa4*X$4o9Rjgb>JZ9(f$`=sHeVc~3{ z;gRBEX72d6mMqE1+|1EZ5CHY5xO%qnG+$`J7_aPr$IG^m3h zc=j;qI9>QXM=eC0^rLUhZ5o`O(%PEh;N|7OWT48_)cPD0;Ara)Q`^m~#XpL6z$@UY zCK32#_IUMYe83p~ad&WJbkOdR-Zi^$s-LHqpO25HpK!Hza*9{e=FONTabB=;fW$`Z zulN@wA2S5++@TMAWU#LzV=dBe6 z&-y{(hq|&d7B9-m=`1&Pw{Z3McXbQ+8Fgjmc6McFc9omBnYji9xw-`5-_l!J($kun z(v02ALuWgNg*m3Tw5O;FL6MWl8V9jDzji4+I_ATxA|Qab)3Lg zj>9Ae;Zo`iiHL_Huhu2K0 zoAi7~HU0yCg@5=vJG`8^Y3x@Sq;F!PKgZvys9n#*M9+&G%r+RzMNZFqEOeH7W4Ccd z|AFl61O4SQciZ-y{Jdk)=O=rZZ8Q8l%*{RgX82SkySgTW0fmiWKc*AMwmWC?1GWKM zTqvf#yg#40K8jlBVe?UBSB$^T&c?r?>0Y;PxdDa^356PiB3%I&tRsYzScNy>8cFUG z0>Y+};2pR|l3eifvCx*nsw)k7w+{&B6x_{skz8d6ck>PM_ZX5UMUhWw5}G1Sl1_1y zC{mfucS`P?B%PA`CP@wWq!kpiRq4Y8lJKGm1joTqM7%x*#B)4JXIH;^4Bs6H@Pfck zPp<$f+RxL+$Ir{tPb7d#9c}+{4Gea6^z$2V^@mWIfB*sg;5%sC92(bB$wA{928#e1 z-#B)Lc1Z+j06zTcKX5BzzB;y4sNozO>}($xC?c0fZg2|ncXbSaqt#gr_G6r>+)H+t zkhi+1&TZq*Z{y5qO=26SEo)C9WZU_MY}mK2GreFz?B^y^g+)@PByHonjd> zbG4C?x0R{+6zvHUG$$Gv8XIX%m>|{`#)w@UXBcW~!2c#1OfxXipFCmW1cOOHH9uxf zD(8u2@Qj)^5K1I6q4@S(+!*s>3__8j8B4xeGG)p7C4l2O%vI$?m=j_wprr=D&pki; zItZ;KcViB~hESY>3I(bP?qG3J|V~Ut|=a8Ebbhf)Ngq0V8{u_k6qu$y-fiHtLzZ z^b+B(M4;Cd>Wjf8;%?fUCI@k-$roQ%jRWDo7a z zX~Ph|ZdhLrh7HBXc)D^g1YD8;FR7SAN1k+n5s8;U#1eL1?>yV^>DGQe=CjA=^-Ex@;Ub%*zekqre2zp#^3y{e>cc7tzCcvO2- z`@X=+WuX$^#c3%a0STc2GoAH}riTg#oQe|W)(7tDt=Ul#IJH7AWL0Z^QH+0hu!d&r zR6W=>@6ngI;Dkw>UqLG7H37*qV$7Gwh`Oa38gC@!6X{6bBwS&0vS^kxEk$H*Wn~fZ z%+Q6yGel-iECWf|kf-|&fh=g{;W&8To$bf8dF%yTu zQfJ0Z@zW3Z?zdHk?)_Rb2_$S;-tM04jAD;g80TIx>FGMs4YJkC4h;5eHd zDx4=`N|4arMKxnPIZu%WC(Xl*sX~dk;f#e6d2n-u{Ci3!Iu+KY6?fZuxXW}Iey))J zV{t`Z!7dRX<$8ik(-j(5ANEU}nvWgw3&;IR*eNCDLB(OR`02@;D4Xc+O{sD7W(Ajo z3)@i6H~3{P@Pzy`7KMUBVETcf5*PhmkQ@xaQ56i!Jx+Fzjr&fH%;6A{7o`bB1Y+^@ zSTt5-4~s}*xc;<(e{8I*ZCoBkr-?MXNlby8x1GI1iaI(?oU4lS0!nIUVk(ia-;N7c zn;BwmB>O$d#bkWA8Uw!mVf>N`cq=XmeH_5!fv5iy{@$^%f_r?dtZaU>v$C+Wv#_#r zcr1MEN#N#v@bzj3sMZyH$jNA!DZ9|<>se~)ms#&{jSG;(mz3s-y6}W7sue+(Ko3Dv)RkDjSh~<__ z@%}QeLQdtE%vs@EJEad0 z5fWM^BaWqUda422tnqQ;_3Me3MY~0p6F>bFzA)e2?d)Ax-`DleKTG=S7xZ`D1r_0A zVJL-#AA#leC(e@lGINKT0v1M*=m$L#$$l4}qqyfE^8}!sB*8x$>0VycB`1aJf>V~- zh-Jwa;ZShuJ4HSG2+{petwmi;9T`Ej;M&wn)Xb3)RL#``(>fTuM?l&6FQ#=!D4?G- z7bk*3#5`%qPEqMcDsvyK1aI!6lwUCSC{58{oI>)5F_~zlYJ>7E_7l=5Z&OB!&Q355 za$s^V{znf08R(N>qGNoEV3{9pa39z5|1bd%xc6ksF>saqvnBvmXgA#T?jg2^Z@h8` z&{>VMppW+`5d9MU{DeIow9VyDnB*zJS8yx54FIj049?*An7@a_xKPi!4<3I`RN2Sx zgO4EVrk-rol$Zi;KLcd$yh&oXe{x=Qc}xD2Q$v-jZTpaZrYD+Dk7!&40|cNxIVn_#TTie&~dz-Jud2;r_cb4DpWH>AZM<=I(9Z%$wES8@=9O7%21Ec5LsxYpC9<(1q;Ayb4BC?J@9ACJTyCcpxtRJ;+B?% zhL$dkTPbpi>q(Bx&W=p(i38HK0P9m%ltHjI|9}AruFq5S;t+gEnF~9TK+wriaw9Wfo$Os^wB2;R)K!%d12kGCCk!^i!+xMK2w@*%djP-B+0*@ zSLPUer*lAM_E%|odva#HF%1Qq=*n}Q#*QFW_gqV-?{XmdQ0bt{p@iW1zrLlOJq=Ajajdmr7=)d0sCK2-o$Z;*{=P z%WiWP;`P#n6|Q1;3i%b@&5&?>%)>KK=1P%Zb4~o?^UsIm{G{t`fyR_xzxwvqYgBa! zp4|0MCW32+tC-y9NyrC6>`Neg&WyEw!OuUx{gz}?VBesgS+3$YSA6@eqkptOm#8Xo z9DU)G>t6JyP#5a`7$y5UBh=A$Vf4I^p9J(&wUM_L49{_$36B;>%bForo^WB2{J1L# zf3v_|uh{whhl27ZDD1^cBLvPu8hFSeypl=6D;Xa5yL#45x}FtXZlq@2B$MX(gNoi= ztu%u|ft!hV{s`Uaeo}qJV{`mZcF^EH*77sym6o3=4`WxV_^V%`Ad_4_b!E9%gSq8G z#CPof$Dqo^A8M$xw* zi4T5#Yt}tx)D5AJOztOOL?gJsc|W3 zsmP~B_=9Vg_%d$d`1utr%NWJ=08A*!L-E|GYn!{;J>GMpIE&{Gy0q>m-K#t|ihnib z+gu*=Y`-0}WQog-k3HkTzx)?$@xAoH|Kp>Cr$&r$RX!k8$)AoAWKa)z{slGDa%H#Uq+Uq8h;7fJ{PZ_{&_C4SCpxdU7*1 z4MSnC*ouEOXWfQ^cHZhLd)KbL@Td0Q>>?A^NoYRnXeLLHNk%)pg(e!A>~FuvkjMt~ z+a(wf^uXpJHluHMrH21Y_KSuHSL)4`%Z3e?_U!gc#}JUSw=7r=ibemZKt86x5bG*q z3cl6uN~OrUu;;Q7#a9@-L9dh&aiEH}->X-W|77yZ=k|W+dNn062|q`YQX;6X?*wCf zdZs=;1Fx^J0N}6UYZ#31S9@ssFKYYo7dDdMzYO3NHX{9N+fo2w0w2?eWa$SXB2x^6U@fzlp2VRFox>Ro=yAxx(_7ssT?FRSn6` z9#U2GMDmqZgZ!=V@sjMj?#eECyij=Nl64>=87&`?oZUWY`GLa$`&Ke7S}Dt#(j-=>?s)SGr?}CZdfI z>ztgWb_k3W;i#GhUZH-x65CaKAJZqUBqU!!_go?%fozo*3Ki(N*+2|`sIqmfl@VOk zOv_yjVi)%wG@eIEx9>KgkFA!53a{eN+F0b<)V|fiPiF+n&VwjzHOlA-!I+JOVd-QK z9{$u*!7~u2W%;2);4o76+F<{_` zcV0Fl!8GYp;n$vV1pn_p&p2Y5exQAFybZ&M%;%pn6=&DqvmS+*gzcPt@JAJFt0TGD z^B|i)|KiK_eBabku{yZ#B)aPP$|bk2YwUa59mTy%XFEykIm~*eEV%-Z#CY& zq+-r*T>aOtr(fEOXN?|}`Pjhvf4(>Bp>^Y5yRg_b19CnCITvy{V@o!d^B5TmFVMR~ z&TLbXAk9~P;u0RNQiZH(@HGKXl;0M2<-qCjL+S3J2#XSzi>oC)S9b7=6061K)zt{7 z6`z;7N;mrgY7y}Kj2Q^15|#^nWKRl+qiPAZidA2zK*(Zj$^8de&fc8i8y@)1(Bk_t zgoqbFO<(Tni3nyuvo`|U!Y(~lC}5n~8eRoxBk4R(m}%8pEUd~^#gh&a_LF5?3V24C z=$X{mpz|2fF8ygzkHDU`pQPj$pI_`6q^kJ8^P;=PEIacWp2CnuVTG)U>v(I0w@lv0 z;@)NR5vV#rDM@;4VAdn0(e}GyZ+BMSmG#KIF{WX{ij;zh27@I%aqxo%LlQU*bblmgsqy?0Oib6I7fqJ& zrDhTD(=ocd-yP!~i=;zEGd`+;-5*{<=)Rub(ehUbn-EBdkT2X#U+4XzMe-lJ+ZHzp z<14zGx(3{$Ge^?bTi&0!R8bZPG^Gf-j}Lq)W8`u8JX>Ful`dZnb-xaN>F(F0^HMz) zKHx8V**n43^&?y^?2SfWq3WUeZ#T|cuXn&pI&0&;HUT3 z44k=i$;^R;xs#u*ot*2xYB2ACJBK_n|I?RWJ@MKvZqLj$w6Z1XU~3W1E97c8*NlIX6A6i;mx)r$EAz>nqTZeknP_p17xS@u;7f=WgA7Nr7B44MkT z`wZwpPcgE`JlZmC+Bu=zg^%u?&k_De!o`PK`^hFC;pcA?AI1ci^B*u$cAEQqAwKn3GW+b9iH)%@X(+B zTz;S;Ecll6{K)w9J{3KB^vo=b&S5(;%;Ey+J_*YeAr)k8mg2+(^X7R268!_HH-LyQ znrgmiLL#CjS0SYFJ>LLkHvaWD!6PYNC?=0%K` zhwcqUuOcv)SBf}6kb#gJGzcTM+^FGOiIB6qcqjaQ{jqH+=Sa?O_$P8uj+~vhABDer zDAwWR4!^4${;Y@2@;-!>pYg^Syghfru7q5B4vRfHSBLzt2{Q`Za3;H!d9LqoM5xG9 zs5#619TD<#NEY};=B~(d<~HV?$=jY=l{*utciwE~a%bn2<=5j{pIe_-lDjBxW!?zp za#wKInA_?Qx51kR`4#DTe)knj%x^9@k-s8;FWl<4Q{l?|P5GM&0t*5`IgGpy z^Q%4u#{8{?-C9#L=QD@0fiKI6=f-luHOfK6$_6H%$DCEm@bWwplIrjek0D!s?y8ER ztru4XQ}$jd*nMEifh&di=HteHa>Fj=R%3*i&6!wXGxO->CjP~NXb1DPdqqj$MD^t$2M{lD| zY4Kq(AarU>18^P~ddAQ%`c#8i_Su3kLUcwt3^WJg|>0RDoow~uf zlN)J&9W9%?s-rQ{@A|dr9Z&UR#Vp4Qo9q3R<9+N8`IU!WZ)*4Q%Tu~*E0?*4lt)}1 z;-R>U%JuFYQ;XWdEB$(zHY!8z`f{_of4Ti)>cv;?56V+7rg*O0&E;%#bj2Uj2lwC0 z3oegPvHD_K`2gQFPvciwpH8`($~XJs@N@6-!EU@;a+ea~`i4=;hq+6qT77Zc4V6qg{IDd3kl#y`X%AdwuyB|2XbP+OFJT<;UBuzz~h0ex@zB=?!} z3LlM~-2=<3{IA=^QRUU`(*Ng)d=)cvJDUwJx8zcilzwY#c( z&ZXl+cFhl4d8@^^j+QUHIGz7h8}{!@E-k0qYccHp-JiFm=_qplxqKOmd(l|p*7B9U z^!{{@jru|^KfAtnRNs~cZ6elHcz@c+3FmE*zn)qmetmaC`!&xKw(miF}-OwRu8 zrmrpSXi44n@@*Z7>*nom`!Ug`?rcBga;p2i^4*tHFQ)kG&cz{@U!mRI$EdKKgO}-e zL-~=*Q(R2Bl0ICCesSu5@71OE)6v{6>&KePPjxiywZwDpoO*&C9Zyq!w&QVscueS} z3+0nBf7AKbr~C7%L5%yg+DdzAz$@5SQumgrc*@=ObJtJp({^=rgq8%9*Q!> zyZqX8|1ncXx%l@T9ZmC}k9TeJM*n9;KVI3dQbuRnIP=FU&-K{Ak5#t+rhMFoD(AG7 z=gM;}7RTK*)!7#JMx)A$#(p=N`;F%J@0G=kEiIc*`v3lavJH3h)O2?5{?Do}lEtB^ z?aY4sV_b~?FUZb+%y9AD{olXxJzMhVL)-I}|HQb!eSGSWb|v0>>LoKkPb~M|c4Plr zi5K?||JJOoiMjiG-)MD&-PDm+i-qy^*sF!#=rH$zsS`TRb81<~<6eJEch%HKuRry_ zKc%~N>hynCF7CZkXa2i*|F`kI(eDEP+wSjr%X4vmKXulne4rz9FQs+#+TAd99?~!J zkAJo2QU38R##5JHeC@lMy2^+C7tYsr?;X9fscSkK^S>pY`(Ks+w)YP=(%vt&k^dIy zwO@`a%>&wxeQi{KIr!c~TpNeWV}IW>WB+lh|G3`2ESGcjym#z~EIJaWJ^sG@{max| zynhD`&m1>)l<&2?C+jHB8y9ctmK(?G+Bl(o*nXuHm{ae%QrQ0--8^;Ae{Y$m?*H#C zTgOXwX}|8jHi2<*Z~cB=?)l#z=f{lUKJdeHbG1x8l=;Rd#8o*=jj`r z&-L&_InEI*4fpTQu7{5=&87XO`zv1v^zC?s(sp&_XM_H6eAqpyqLU9_-?^gemD64S zdlHnZ_m%nH_z?G*iZIky%=K}usJk?syRV1B)=aRqNZ0ziY<APT^rMiGe_ls3a=%wm($ToSckbj#;YwI>6hB#cO3c8FD^}WW!tP2FN=H0T8&G?J9_Ptr*Azuw+rtW z+sDHri%N~}+IMyz^dacXGal^Qk9Muk^Ddny`{eKbds`k?L{XQAxTjTiyPSICDDH;J zmHyn^eyc#;zw-~dcI^JL{WR`YD`-EQQ-j;W+O6%TIX}{il z(59qbn)cFm(LWrvmF0hZHSBJxECBxjSBvxiTbO%i<(M0B&+apogKq>MLMn&d7(U;} zrEVNn)ZdlRXUv#jq zzhOCE10x}geMd*oy#$|lF#D(`D0Q5ofiLRr(A~+tOOqz!2m}RxXRUmxoKU~j-lUta z!#4nx&#`j{-;hehY%C4gr|EEDB%jtB2gdnn_<%E!m80y>aWw}ueDNRi4lop&8nii90O-oJ7OBwTh;8m-8PB%;Uyl%Gc1>J9SYjlme1G-b-K8Ssl&|3nMhG*Alc>9os{xoeQ zj-lQ&+P$=L?=cjveVs^HiHQ2o$k`H z_|q!o1=5X`T6voE22TMW@FTWfIInmAIc+@X_;{oBD5QaZP2*oB3Ltg(3ejTn2YgfM zeeyYZN6-n+ko`ifuv8c(yega!CJ85nbHYaHTj@LD16h>^3Oi^f%@jj4F0Cy7L>sKN zh@sj_?NTvSyG^@8d;tG5*5F={rm(!N_|B4&>t0~3P>S9EvQ`m^CZV2A(tg~}Wj>LV zb?!sfk;>cLZ)84bi*X*HtN@R}%Ioll5n0yaT%uaIpU8bD^NCC|+<#A9M04DyQkT;g zz;%^^cXFv~2vweTr=%`aR=68emm!}9Ybf$zek1c)?Yh^e*3kXj|BU(SS>|j0$o(DM z|B(6QCW;zN9ZB=JKO6p7r2Zha45^v_8S@nl=o1u_6h3M{bprT-4t~tv!Tk@J4^4`c zwX_dSrWv#!&7y^LHEr-{8~r=|LZM1erBHcVS**OOysoTM-c}k_{12w;R8!PcHBC)d zGt^<~S=Hsy%z!-7MW`X!DF=mTnNP#HpTKvCOmx*P}}%RXJ}wO@be5q=6?qcV;DZ@h{^EtnBg-Y|8xvG zK6k4GxW7>X@eW_4yIh~sk6ifg5*7k(3?U{`PHqkT9Qp$qt1U=730gAS-K4w#9(CZM zpd{<*HXNumaG0!~$N2*CQKXmfN6F8-C)3}!3Cf;C&$yTJGO37PkNEYVzM;rGzx7C0 zk97EUEWRkelzzzLF4YL`r5e$_R2zt+Gwxi4)*Xk=9j68EDsW#4?zP~Pqfpm+qQQMU zNV9M>;Mj&dces<7oq~H3xF-=Ueck;AG-xvJXEN?*GD=hrZJ7*RnG8*ttoac4u0`4^ z`T=6>!1+%&@8)+_rMBRF!Cj@g5G#Pm88Ls2n6-%cYs9QY%v$9526Fs0V%H+}uMxYJ z1ffh-h@C^XLt4;uq{yL%arX+OpyCK{H=|yfmCnGc-Ah@0fJ+WIa^3K4yAC7DsIXH50-8r-EQ$r~ua8;S-p(BkL> z+#Tnhko;}r4{8$bAsL6K6mOsu&?a)9^b+jnHX2UjsevYWttf})!jh`!6LdO#l2+51 z^cngreV)EZm%_T%(YI+MeTS~6zo(n&Hu?eGPX9=E&_B`Lt(JF~enF4ZFX;*T6+J~y z({Jc^^c+1;Tc{f!lf^gmV43(oXGNoE6`c~GbW#G9&PsRX7Gh=pfjQcE+F~j4s=-ek(u~j>0t`Hl@Vku9`BP*o`q{-wr zQn@shG)a$1v&jb8C>zNxd7wOyd?bG%e?gjQ2n``0(_3gSd{Zs5&RFs7wXT|pOfS2By|$`Qhi*lA}7>U>MC+lU9GMrU#n}>HRP1~uDX%@ zOFf~UAm6B`)idObCO{KFzS9J1x{!04Zkk}?(!^=Zf}k0s86>EhyEH=ujpkm>eL^Qq zh2{yNvu3eosnA37y5@DEr{=es-wL;A-qE}x^wO-?tQT(8%34|It<`Be2|v|FY9oa{ z+Ei_-&{sQM`=k)2eL?%85TSiV`-%{)eN&77jny`48wI^~y>^2Tr`@RCC>XTcwL1i( zc9*tUuxL+fPYVe;StkpLf;}ntQfDtu3U;oz9%rBKTw!mv*V^msi|i}yYwa8D+w9G) zoNAmGIs5*$Yx_z2Ifv{Bbc8tiIP{JLN4oup{iL(zQmV7YQQ%zDzHcDNiar^eaU*~=N>G~z#Yvz#T)Af|isC8E7|N6Q&8% z$Y;U|;eB#I*emQME@7YW6>$sa#oqYvXJ0WxaEL?1A;J*Wk_dN;zZKUAQ^mF7R-sb- zqqtL;CGHYG66Q$3Qm!ypxZFf^KT3zCGomV;lg^1fVG26&7TG4- z#9negxu1Be{Fb~`>`en`fM}#$Xcy7MS{u=<1S>s6ixRGci#FD(hzUxZ5+^1q7R4gk z6^G&w9ZHIlB0819%3v{xwJ>6`QmT}SDauG?q?oFVRmO^G%6MhGn9f=oF+-J9SQm}dVt;j|x>6j#S|Rav z^-c9n{5!=v>O0~c>Uwp(I7t1S`aAJX^*!}Haj-_xXvDi%TO{7A>7nT<-mmGSi4sR? zESe;7oF-F~B~I4lX>J$GS-T`o*9_N;6st9lXeNu#YNl&y#5tN5G%t$tHFcT=;zG?b z&1&&g&6}Dx#RkpWns>zCYTnhnE3VaS(rgmn(!8hHBCgZCulYc1)O@b_TwJg1qwOPZ z(Au;K;=8QX5`U+CSo^TJNjq2jlK6Y=FSWlE-_tJAE)ut}R!jT?Yqi9!+TUw8i`%q+ z(EdT(u05zdB>qwRPwhX&o!VpCW8#O}o3-C+zZE~$p3|Na zcWYa;E#jZGZmnC~qr+4{{3M`XKtFLWwC*V!82^$Ck_QbOM26wZmiM8@DngI-2Qr_$ z5B*ysbjBZLd?SQF3(qqx6ov{_LXB__<`XXqqghXe`NXTx$w#4)-w6#|BX>a~^Muo^ zZxeemy%fX6`{5$QpTR{k9TlU*r^RQ*Jn=bkwpc9A5toX$vz`y(E5tS8Na*hS;v-CV z#V48WBK98fu=tF4L~If3MVFK-z9AJ#L&Wc-d!&()%KAvD3+p4LQ1p>=r9RN?Lz0te zw)8a9Y-ygcE%Hw({z+YqX4WFd z&_Eg}$1V9$EI-U_L;g8zW4t_3ouE#ZC#h4^O1T17Q7u0XOZbiajQYCzru?k>mimr7 z2R5)p{-t_UJtHrM_Uq&ZO(#v1+^C7s43hs0EgwmV<^jzcG*GifvzFe)^q3B1dQ9(S zdQ3~19@G1n9@G1w$H(YM%oID((b^Dg2%W6$sqIP2pxNPc3hR$)CDU*EDAR8`O`EID zrH^ax*4|C4pzDv&C$vv$pQJOiPidc`PibG&zC^2;-qUBaFKb_>HQM>w`Se-sLhV9Y z3rko+pVO|@uBEe?J$RU~KcNe>pK3p)uV_EhenuC< zPX11R#q5MG)qbJryGOM_UydeaWm9T_qLRW4BQ@IVyfDP;w=5X!b1?|rhPD1a8i>cxW zxI3ZAPl8$LKGo%)#_cCRAFDuad0%$Yz9z(7E(0LuxcsWr1xja#hk|)WNva%)+^ zt>xD=2-dO^e>iZnyoz?G-R0L|Ilbi7+*;m*wHV~J+*%r8ElKh_G=-+g?_!RSD{rFt zG++KbEvCitX4;<)kl&+s(Yxe7(7Wl~@>XtF@58RrMv+^FjS@}TufOc2@ zr2L5nE1xQ#(jFKu%QS>pDZLd|`ZVpWKBLx9hx&s00!>n1R9~dYu+l%!6!m>|8@*fo zQ2mhJqkg1*M29geh1E&0$#JmB59m(Ke$9Txq&ccNrkI)4DK=(xN*1i{7A0HTTN|q6 zYQwZ)N&&MyrI6X4Qp9XeDbc2DGnD??e%gM@?aU69fy@q-J79+sltFmH^SClZ`-Ju> zZ%9rv0@tNxMe-mQn%R+p0`sqh{qfW^c-DW^c+|*xTQg zmza$y3z>~6zhX9~EP{=Ft1M>Lr7UOGrMw2~Qj`_Cv%0g&DrQ~E>jC)z`O4~mI|BwQ zZ-{@f%=@9vaM*CdaMmaqI~jwGp~e^=Kf^u`YA1Y;VEl!aKwS&n_bGq#u%Q;?~r zDa;gavYRql3R973Fi&~Va>JZyfoZsDtZ9;InrWtK4&!TD(8k@g9C+Ceai#`qulAkk zxV4w5$+X3^)3lexFdbqP%jzHIOlQ35@kaqa$TeCT&BOx#dYsj1U3hKIth1c9oVJ!Q zXYS@XU%!7f4v@-bk2Cl7p=&!^hggT0qnLkv&a#6!v)OZQ-!~_N4sEnXbSRH3bB@($ z9$+479%&wLE;mm%*P83hi_9y{Yt0+Y+sw`8{pKU)ljd{gb7(IPT4YP0CB)LlqPHYi z(k%s+L6%a>7|TS;5;nDS0`}WsnuO|8F1+Q&iEw@t# zTu%O4jI@`@+_DEP3gl!t2CXZxX0^qDK8>@utQyd}@^X!XG>y1o@mS0f9+$OgBXE>i ztH8eo$6V_|+K^cBL!Ciq=w|3`h%%TB$%Y)m03SaC@t}6YXC!iG7-|^Fkh70ryt7Z6 zvmdg@Uyn1C+v5$}4ATv@hC0I{!%D+i!$!k4AE(4w%o&>PW7~0)?3cO3`iq@mf7`X; zq~RPM-vt^&jD3uHV}dc=aKvyjvHntOV!h)WbH)PCxqaWh95Gho7-TFpjxkO&K5DEs z&Nj~DDVMPj$2oh^_d8>wakFuUagXt!@tENn-YJJGadG%caR_A z61OvFI^j9T^lHyJi0Q29Y~t?3-DZ*b*W(;@i3jY%&7JJSnKK95hbJCzbi3S{L(QR% zBOYgOus4`vd{=E}=a?(G#G{Ev{lm;w&y_jVKG;6kImo`i-jYHxobA93|k25bZ zFL4li6LaQO=2aeaiND2uMQ2`T-)Y}z-o*S%++O=-F0sYq%-enF+Rm(Q&AZ!b_*#)& zF(0ria6$GU^HJveoMWZO*?Tf)KIJ*L@0(jd54+4|TE^mmsbcAA3A4mo?3PSRk!7%D zxFzT^%2>xxd!}WQquepxGR-p6F%m7)w(D@sj%#yXd%3FAPCBm1S>`b3NQN`Fx|NPa zmIapOj#~Si=+hRAFf2{>v5p+e7S_{Sc3SpY4q1*zyDVp{#HzD)vktKKwnkaa)?{lA z@KEbW>v(Iqb-K0IT4!BkU1?ow-DurrZMN=@(O8dIPg>96p>~2T&=z9rW77kr+X`%h zY^Am_wu!b!ZPm8fwt2Q?w$-*q`?SOgd!}u(ZHH}-?V#gn^mJsj)JF7La1eG%({JUtKVQBXeb*x{nS1TA_@ zuhv$YwXt9wnagwi9iw18)N^w`q^X*@Kvkd2I^z``Eo{rnkKzhWX1CMTw z-i|0}Zn7f>63gK-JLeb*n?x?4Opod1s6{-M8_RE{V{J@V^nP6(8)31{pfQ=91Ya34 z55Wjl?@Vx}I}4nHVtS#?AEW#>=a6BAp_d`TV9eQ)bBH;^+?*!(PL8t-C59nh`Ut}~ zjd7gjdG=T%7FNEI&?w5um49`#aQo@Ws_FEA}PH5ATd&hw-I=e_JXLGIAp zp{6rtV%FsjHFv|&n^8Q@Y&IvGb1s?-Mw!isa|tzfXvti2tz``SIvn1)BIbhWxbn^z zOXk9zEt$(^hivxP-nwq$U{*GQpU0&gD4bKW##&aghPmAF%*QOP2IpODmR7RM=L#2C zS6J7;-(cNp-Bq~2=WL>NpY^Quu=RxXtW9Lp+(oucw$ZtZa2{rxkXvgDwRHjx#t~|Z zu~~UYD)ZZ%EzdTPU)9eZKr@*au*dYPf&0KCG<=PONcLAp1ToO_JqvBhJ>Po!SILU7+cuD96KjWDrzoj zPMC&cCPO&SNm$@Pxkqx3B%Db&p0GTjA(13BC2V07&~|#xKzkDo`LH+ac*2?7BZ&lu zu4G|iw~~c| zXp0h8Caz7~n79q0`xB28?axb3JehdTF53h1(p%kZdr00Gd!M{9d9&?$dxAYZZ=$^b z#~}9qdG=EKGWZCCKM}{H_G-`n>Un(oZ2LT*G4^Hl&GyyyM*HTXDEp3rQ2U;OP@seM zWA@VpF?Lr$j6+itRTSmu>gWYOf@33)(~-rn$2m&!mNDlT;uyis{Y8&+)HudD${baW z8g`wxnYp4U&)G58b8hvIIXgKPIz`8dyd91;&S1xeykm~7j$Mv@dB=Rt8O)I5u;YZ| zET`?rJLVLfot(kNjm}U!dWgZ1ha(mKz@nkdIfpq%J15{=;hf=|<*aws=lAls!luF| z=aT$h&Q;EJ&P_OP_t2Ay`o}nTCuPDvz;IHL^QiNbvn5GM3Sw7DJ(I$c;u*qW$8{!- z!AZlD#=@G*Hz{xmrk_RLY zEhz&U$^15#JU+RcT_N`LuhJ!hcCl+8Zu4cn2j2Xv$>sY>mdax8s$%IOpr zuqJ&$NmZLm?Zt3vR~$g8MjR384W4jzRZ^9|Ce@jmmA@u`S87S>kkk>W<5J5~tMYf{ z?<(n)T9Z1rq*sBMdIHD7)D@{~QV*wY0Nz@FmwBnXaO?wxg`VYWi;sdH%$#qPF~dc5 zxr@@4V7+l_+j`@)8ELc9>it(BdnrrOR;8^=+f-DSwmofk+U}xlX$Oi<<}T`QO*@L? zRM9!+(pu7$^dPvN>0#;dMRn=+^vsNr=|x4&B^~;u4^AKM7jZ}lT>9AbvFz;UMwDEe z8&?9CK8g7kyYy-4Gd+~_IUdI(e{oHD>IBr~zWOPtxi(ikh`IEp^rqtK^ex4u={w=} zrXOOiXgXZE&lS%u=~cX%x%A_3XEI2JE~8uVpp4$d^NNx)q6!-_%sGcLk_)G?u}x8P zMh+Y4WDLz1kTIZW8_pv$re}=LD2HF0QJ1kOV`awLjEx!FGMY2?XB^2mnQ<;t&J2VL z$?TJ<$DbjmXBK1*$}G(slQ}W-(ah@1*_rb)m%**hY|Px8xg&E==E2NknWr;d{WSf$ z_UqLz0?yda*)OYKNxvceM)VukudH8HznXq?`z`FZqTiZ+8{oF~+tqJhzr+1b^gEj+ zW_8L6&I--S%Nm#!lV#0H#hKxOS;MkMXHCeeD4dgXD0flTjI3E%^;t`@R%NZr+LX0D zdvMn7tOHp`vrc8TWGmT0**&wvvg5Pu*_qiz*@F#1*~4*+#W5*+TK3HBIoS)cmuEL* zH)U_h-kH5O`%uoL?Bm&Ia!8IYr&~_%ocJ7jPG(L~&fuKkIb(As8G>`B;h2eIPR;`S zuWSQGX*1c#1mlV2g=iU>Lp#S>lf8u)d15@f6C=>QIF9F>$tAhElC8Pj3P$Jl#(2=2 zo1B}IJ0N#x!Gw~zxg$&FGETYUbIVJ1Q2g+--$8axn(T z-G6bfo_nNZVeZM?b9r)JU|vXGpFBPK;{^1=1qHh?@*0F=Oy0!2N70+I9(Er3ztwq- z=+$=Q?a4ccUg~t7D_@h}6@7~_KcasOe52={3i9%u{e$zf3Y+pv3Kg`%Lp<$m{)qf> zy!}MmnP0_@{F?l^g?sZCUfjCjYEAxz{HKlqU2ve_Xu+w1 zmO`a4sIX^YSYdpjy)d(|h+8OcI|~OF4lf*AIO*#3Rd^ikOc5#46?H4>jnP$9k-50B zD7h%7XaM@>kwxQSZ_|rs7u6Nj;;1WHRJ4*=Q_)(Ck~S7?;~3+klDS1kicS`tE0&7` zi$jY06zhu&u77Z-4gE-fBYJhAxE;_Cil@$BMx#miu~jm4XbcNFg_K3IH=^^V1^ z5=}|hl3pbdu-AwZV~MlBSmG?nLK`;(Z5V5f%D7Z&FfLz+QQ8_dL%|q$U&-N;6D4Oc zhUtV}IkbOFe{28L{&`q)EL35E2ML9*A)Po5thEiapf$e_qX1D}xF;Hx~oLMX6?U_mZ?hE?tdkwjpDmn}%F0}c`n@;Dy;zu__2 zU0{hX{G0ea29hY?Ad!`yl5X&IB!N?AGoNEf1z1bCltj-xcIPo=_<B+Fb>OGLl zCrFIonALt~UP5RArxd~uAToUHOM)LHKF9qR;OqFk;|z>{4u_AwOXvPi;LE&J0_QJs zi3N#apa*a)@_G*v_Hqi#s}sLR!plZTG$^f@e+NB=xIHD zg~ZWc(O(e@T}+n|D_u^P6FXf&R}cq%ovtQM>{9qGNybiv^(2+POW!3~Y_9^z#%_fV zNiOy&>?ZxOSK%``~}Me$MtLkaFx! z=uRrIpXe4+siY|-vDo>yil&ydAf#Z*gPQ=V65lNHK~%3QKisZ)MQ zRw=J2zap!ZrOGn$TV=Jfn!Jg@xca_EjaL{!&m+ z>8PhP(u1a>wtkA*$|8w0n`VQ8-6DvYhdo^uwi*nS0$PALg|rZLP=tCl@p{$rde!oJ z)uKk9#15uvS`971{u1D4QO6Rm;}FzwJqe>P)0asIok!;Z&!_WASGs^MfR?-h4d}`> z;AUvRV&Elo3Fz1jgB@nepc7iI6I!kl*ds(=Cu!Ijwi@@+KpU{HXdQhAc{R}{5`%p# z>w%emNYIaMBoI4VKEQ4u>}r8_?w~tB`4jyUC_AA&I<7qdTzgcmJrvq=h){YMI@Ad| zbcFPw|D^vUed$qp6!>#!RbOb;SGdPh^b~Tzei-2IXbXHdbz={fporMvg&i>@nC*!n zU9c=t2YP@KK%%fa20JXFh3F@+OQtj8FkS2dUF?aawYNYct(NGS%T1bS@ddK?Zt#x60XOerJ1l_^R&@Kj|g zaD`F&bZ0w$umkQkSc1SULBlOU150ocjfx#W>$ zmZ<7?>N!HOGw3`C#O|OL5`Y~-7l2)`nE-AxI@rv1(uwUEBC6(3nw^AdK77i||3t?jDq3VgHnX5enHw9eepD7Q3;TN?TiQqL_-5=V)n zNM~+uR&H+r+}`52y>;gHCVAUGwl7HHHimtiu(9sk#zOsUEQ;G$2=+7fLui|&#j_Qc zpfF1d<(3x3EiJ^)(tgU@OMzRLNN3Qepkd6$D1Dkf4a_V}q&2h#nAsbpv*~QuCu>=| zajWait*#rly58L0x^PSD!fmVzWj3bzS(L;rDwh5hEv^8&Y64GYQF`9)3f!)!pIvq0 zc6F1VU3K!at4`dmG_b2f*o*jg%Jx7qySjRGZPPj0q#m+O%k^avG?N zhrS9lk%xu=jpvB*8_Po%0F7n{2eFOd=vyTiZ#El|??Rv*w$(%uhXW0>Ed%Xy`mt?@ zZ8>O^er-zzZ%MqDHroauXNjBzN=MG1DI!8=fcC8-*?QWlkoGljiMJJbQJ5_m=qb=b zZ37sh=WM~)p^g-Cscooj0cZ<>2HWPa63C_2eYTmPeGaq-cZ?DsG}ShVN|JsD6pwEYNK#jz80!Hq zim>iRS(=blx&_X73grP>0C2SeBrT?1NI9(prSAVcIUgz79KFyiC^7-EL$x*Kzm!7Zhh3U4Y|d;Pg%BDb^&z)+GN=Zw7~t1WwSL0DX>4?veB9h zgnP5#PI2eIAp%xt?(@<%G8+4CfhUyCM2dPV#!%?@h-EHMu>>h*TUK~!wLHa4gyKM1 zx+8Rkr3UhY)GXDEM!p4_&XD*x5N|ESIY1Sh76HUt3*il*iJS{70oNh%mq7D)JmhW} z&XDvV&@fIL3N(Zv;X5D?=j|wSfd!T)VL!hm#{!GTxMHJaoF&gAE6V^&7DJF^j>U=4 zIHb+v{6+y~F+^EO(_2w;E3z}BtV5`kqn$uTjv!Bqo}X5Lx|jX-A^P22%=nxox7 zCmE8#)qI>Il+ApUBjjm@h9VRtG9P3}dIV^HE85GDh&nQZ6A z#*oqlXbVTMY4awIAOrITj?fC2QAU(`0?^u4)WDGRSI|~5B-aA1U`Sad9yKrHXf4np zhSZZl3mC$V$fM?ZPCE@Wm!lS-*$gQU0f7@z%mBZc4AFGqym^MD7I{EU<|-bq2lc%|k48kPCFrJc!2|DMnf5nbD`n z{XqkFhJ+>NJPRyYgf5%27)|(xTxw1?SW(i!VieYek4J9N;>%{cWf9OU*>1L)nRfnO znr@zEWWCTga(A=dY)0Bp(Bh5gg`^(xTjm(E5l8_|Z)Ckt08o^1I7{nlHfI`P3*tBK zQKploGc2vE*>uW?UPwaEU^-zs&C-H)(#TrY0HEVW*0O#iJzzR&Isy5?R!zrPnblti z7Sj>aamHDy@Px`g6StX;8PKw7^oVza$Aev(4lzVe0G(t=6DA%-N`@kVw)1#@7S5Xv zo6tV!%G@SE88({)Z)Fn$hgdrLO#Pts|b7C66Lk9vC0pUw0+^=R-5w&3pY(=_Oq%b5$ zB2?rk4Lz5OBiy_797kw!tY;XKzze;p522R98Eu2{D5ot2I?NGdYdpw9M+5EWG&Vxm z%n=(QY-EU!5Slo$fwqn#HbPj#5gQ?JP2eL0t_e~-Xv77&-N^e!O=E?p@Ih)b6vE?w-GC$K2SSr5de zD?xr1Pm1wCJcSHNSa=E<^0e?2GVa5|Q^@OpjxjEHibyO+TG1iS8TVy5z>pLUw2#wX zLvGa^!8c52NDe_LxC4a(ZDfc-`@ACr$i#wtKpPIUh9hXb1*HJzPk?xBv2m4UIiuk# zA;b{R(c3^V9HD*;5geUIJYFC2%b+1|aQ+gg8%NL=1LOso6{r(KQZclg>zX`(ka#X3 zu?UDuh*@>~nRv7?Y5>q#mO{J-dGOqXJkYp=giN3#JTx1~BcW2cH2y#|T3+%yS7Q9W z_=BkP`_O)$LRbzl4{BYG9-E1(s|3E&JZj9jzo_N35h~hUB{t zx{4vpwn=;#LsBqk{C)-KPdu(5jrYWtJ*Ls zPj~}J6OFN;uv(gK8X1lLkDQaH$DNC2v#*fHuWp9L_m! zj>7mKU$U4Uw=rrHP%uza6nZWJy?ords8v9(OVf>eqFDV71Z_nW8~@J(ZClhVPrh;U zqGo``xYR{KUxYcX=D5%(rY}2zdPSjs5#9g_i5dp9+SP2F6g3bfI0u?8ij9IBE_{Jc zUUXum0!(1=Dt^jsq(tGG?SBC-nl_X=8pbxaS~j7ASz2ZD9$>wGi= z*B92K>t}M>e4rT|!DjR;IeHUZxP7ozO3y6-PxZVlA?rJD8QRK^TMru_8n}*Av}XEr zU#K@ND~rc^fD8k9=q#X;R#Xr-5+!9N$l)}!2l`Bo=0dYuIGRF8>~W5I0v+Y(X@nkT zNKOZBAE(U$+QSjb9NWxMHPB9uCL#qShuqMf#%|(hI%u9dN4%cXIE`^$z)=lQJx92X zMR|}Gp|Q9ZFRJyS_%e>3LJH)AP=>}s(h{DQlGw4FRs}SgA#pQ82Xn+qI)owNW+2{f zh-VR+%0tly#5)<1z6MI*G?X&d%uyUrJV&U7*cgsb=kaK5z^?$bUJQ}buK3uH*xo>B zSz~*~hN1+ezEBaPn>a30DF?JbmO?^_Vs)HfIFP~-T0K}V3zZ0oK^cL9Kx<@3K>HQ9 zmZM27Ys^802;#)-XGo$zdt1?NhQv)s!8MWfgE5>ln+wN4HVDlI+Q^WQ2-L)B_X4eB zh|Qy8)^J)h&}trUD$q)fAcL6YJQR8rvxK9EfEF?&$;daD(?)~F>r$)&;&mxbaOK9# zj6wS%OaiTj#|wi_9^+^z&=H1Mi>^P!X?2LlbzFQKXe18}1scvnPaxhVjpC}p3344LNjvNtd7!?_;ikHVq2_e3qh-i zJ!@rcAwyQHFEqx=+QQB1{MgXAO&m>$Jqn?KruihcQyiNeZr1FH72^^-DCTTzBhdTm z_}I0vO`Ntec3rH8b8gH&JT<}ipEgHxeFx{Vn5|ayAe@GxdJuXwAUTpYM?sf?(tu8L z6bp2cBMZ=Rh9uk<`aB=v@}===ZtMUv=8ix$vFVnB4AJJ;WEM|+Qk@cOk4QOZq}D@G%9Ah zehx#LPog)*ZsTZA^x9aA3&csZ*<+8=UubhAmy7f_ATAerD-f3pwE=nLavRV=R#F-b zOR;7KQKHgQ`Kr{FG=^^mi)1ZAF|9SB-Q#Gbxs0PJF`;_s8mHBTA5T;h1ytw9$vmI*xL&(?Le5&Z;$0(~Qf#Xp)KP)mfz7#3gxFEsGj%4n&IHv^j!n zle7xRqfK3axYXoVP~_TMfjehBs@?5nO6`qt+b3r6zv^w2R4rqa9q5 zy`b$`3<-AwrE_#MlO$%w=zW0j6$%eB9W)%}XjF6zT1^%@B{~BA9*<|*Y=rDt3R9yA zIteu2v^pjVclj_l%RINqKmfSQ-@+=m9Z4SmaXXUU0`lDXyFea&2?FAGE-}RKoaZsf zxRK|PYbr26ro#IO<@YA)f%v^4UYSX6gq;gpAp4DM?!ZwKpF4mS8Da?Lv{6y3qIYmK zC29qF7#=U;oB?{pQbe3IrgAht;s_fpOQUIXIC>?Z@j$Z}!nbXj!+FnuZ`3r0S92Qb zBYZkX>wwTlBc2whq7{`f#Pg^%9Oo!EVtPEY%W}j+e~diBfzZeB(6We;tQMqcxFZ>J z3m{mA+Ck1Q9>`;5FGCu84UFH*>XgXc z(U6)XtD_=!MXlwiCUS?t=|SNqFiT^hHIW-JD`jX(WK;A^FKwNnD@VEUz2h++Mrc`l zH})KlrHI!V6ozPXJPH65q}GJj#-H+{>6l9~T5fnbn;r1b@t7SlG(UVK8!ZD(i5wE$ zlOfF~;R7NEakM8qCvpG}t%)p(hfX5isK~s?GhSL2W_XyZ)8?>XmaiNN)RiGR42ZY) z6zx@*j?;z%DIARf5*d;{q0N0=90jRmVSD38AZ-(9Tu;SX$o@!t2xMQYPKh|g<{e@! z?Ck(gu?%P*Lvm-JJq(G@B1LoD5f6%sj~nYnVR5Vs;EZ!>2JXpM%zDwe| zaWubgeT3dai-^LIl!r!y={@{LMTADMzVeIshBqFd$D-(0K}WikT?TnnH|d{J_9AFjVSfdViv}hdC^=pwiM%);P6R3_A{i8 zh^Y==1Z1J-!bkU5#%T%RBf{%F@nXjGSiosNiz$upgpPw8S+w8$7e?{VVk$1#0+Fg=Z@Ux#<=!8B)(TpAwKV-t@j#>hROuJNJ)#&wnKy(T2O3Aug&hq($!Q5;$HESK;>9csKFVo7iR@IcERU7J&;y{zm?5mk5g$P5OM*9W^mW*r zVAQnmB5akn#o{96-LRUlB?uj*1c%HCW^x_{evbw-xiH$a;9(vVHa?i?K@L)k4VwUT zk6aoW#W~-Dd`AQqF&h0iba!wGL*OzfcmPMip__t(y=Yx<7)Ozzt76!_-9x_)Gj~75 zkovRG`f#QP_oyR6XN9%!c)?*kJ)w#iTG9QShZZ`aJ7gu)D?w3Jak!A77Q z(9Q-&0L=#NSa2^!Q{U=)q&wsy)B_#r4*S5H{ov@K-K)K5K=-*ENqx6>U*M%BcSo(V zczri?XZL$A{Wv zRuFMdDQ-j&sWpgu6mY>55w}zkkswkRE-G~)iik@Qky=o+R;^Xs5fLl3RLSrCd~Q&* zwqNb{|9kyj|JQlVJ5Ofj%$b?znVILDd+uDWae015i*!l8my$Zal!sQyx3TluJX&t~ zIzt5|H=^4v8o(%rcW zly76%g52GyH{~u;>ON)l_N2?Ky<$f05|h$SE}K?SCuu_2mF*_EoT?!cIJ#OIcUbDy?f=XYLU9+ zSPeRpFDGXcQU=$^%q^yrQAp{z38aFw))n!J97!2_?W*9qXsdPmFWXZxQby^b_FR{~ z_x3t5XBN`6L3R5rIWH-$V6Up2F(xtB_0Ku2C9YS_xE85PPPZ1RT~1#~`|MSq6$kyc zzc;5-Dz*KRoHCPQpS53rk+x=UZjm-;(=V3Vep>e1Ez-H! zuS(jd?X#NOt-RB+7ZcZq@)qT^LpqGOW3%adzcTK1oRf{c_yb6-&%v6AE9iJpHaa({ zeeY~EBdKRLnnCj0cgwD)l#!rci0fv?opx`vdyly3X+t``*K*V3O7kKMzU1yq9EVI8phEiMz-o zFNr@?)o}vS&BRT%IP(rW_EzeB#EqB4eRl`l*#uUXF&5`$B8^h&jYz{x@?S=(l*D~S z%G>14p$)~0l#e_AcHNYU@7P8y_qLK+r_?`k4YkNS4V~|uWwXF(^nKf`1xO`9b@`WB z+iV12CB<3T3sOPHMAk(nkuRP#jeN7ow<&84662%z$ShkCvB~1ftg$A=HWv5ILg#*e z=Fa&lnK!vQ+>&(@sa-jDL6%w5Ye@65%$g=6HCW01Fr*u@CKLAwXIzkV3et4quE;tb zsXKAUTgkyOL3PpVS(U_{OzPw;8)cSSmemvK)L7fZy7KXq$4p%$Kg>MZa`mY<1u501 zS0JfRz4Mu~2dIbe3d;L_Bk37{$CHHeUWy}Mc@LA^XJT)acP-~??kBOgicTzNOuIM7 zJ}K|gt{pY-+lcFBwUm|sh|EWAtjy=1Na&AsJ zd72BeipZs_m8Kcp?+3i;rmhG}0a&IjQ%4nPWu-h*yqm1?3y}-S-bVun|NK4&*g_&iWkh*xC zOShJ?HoA|x{Ytl4$?QqJt8`0QHPTw|fYJ|2w_3jt_fgp}>le3Q!K-DraK_`D@p{=D zq~8&@w(JU|&BU!Q!=BxhNNdZ^K^lVeY}rJlxk#&3@+SBB(v_u8x8z%8CA;@g%B->h z)-U9HpsY7>w-9$xSy!ax#4ReTK%$kU3(AU+UZGa=N|z%&gVa!ZH_{78HQORd-Hk!F`#uU&vNuhe>NE>c73BBbk)rj}aUu8M6a9WP5{UMoFO61H1< ztVyvW$v3u?w#6nQ9a(B^yMWXY%EwqJtuiU?d{U1nrEPxqxK~<9x*z9mF_jwUX-}!1 z_V|ay4k#T^%6Q{@4jb|{|MA}ThSGyeza-Ae)A)#uLDKl(9qtYIi&htAxY7zqw0i zB%Mv%Ic-0pysMEWD=r)93`tj#?*d8O+qadrVmBeBv}M;Ft5jyBwnv&2e;rBMik-u` zseTzqTz}=eoH(r?dJdjW9KZWiXRWa-rCN=C+7>AHEaLKZpVv0k)|sRRisOv72&r>( zw@V^5ck4ION0O*p$(xcWujDmJJOe4&Bnc}lN%cF=KuXqIoIQWfU5*8%q`OOAl~kF# zpzVMb>829KuE!Iw+p5;8_uXN8HE%ebt<79_Jk=nql8%tsWR6qVeYdhjX7Vc_VsC% zZF`iorQ~b4?yQm=NkdBNN_ryAA?~7LoBM7eU%O)FG@gM}=FC?r&o**yuu^zZQJFKR zq^w1{qL^9L|BTd&bd43HZY#0gHR-)#<_DfdRF-Ti-qIqiE2h_Yc2HS7shGLXo5AWo zK_$~0#aoMQ?t7YZhZWo0_cSFBP{}+AC?1eDmorT2oi@cJ&h45usYR+tV;=Fk1=Ynx zX={-d#OsUmii=H(6&0tq%|kjhwjnw)&GcJIs@LW#lVU|}zAV{+#65EMinIcgVjHrT zrCAS|v^Xu(q@X%`o?Vwa)ERBo#di{S5;b@vzKt0k4Yql@*w#RkRH}DN0)iirH-vZ*!gi@yPEjjKpkT%EB zKGMcEuf@%B_-vugrubyy3gWM~+0cgGkhG?a^#=88(}(XcAqCZKdc+?-yT>*pcJd7>d>5o`_U$S>;SEJQV%Bn#?0bJmcsp;$k44&- zxHtJ83KDTI#cgcyyra!IvDaFpiLpma;=0u_Yq?2dd5g_@ZAfB$5w*gndp+@V5$lPh z6-5ikcWGK>qN}CaSSapTG@rPw#8nm1SN<#X_!ULg8y6s5UQ|aMqoU}HqNzxXg{&P# zW+iJmqixXyBt~+ZE=5-7)kx!tMv?C{q%lR*+3YWCZBZXmkEMR+#%Q_UA8ASqUApKp z%?YgK*1HpeElAi^)?0zKoW5!^Hh2|jG}4j5dL*8_wHXmSjg(BQEPSo-Eo&Qb;{sZ4 zrDWY5ET-gsu?>Z5f`v$|6NOI)4P2M<3ReZQiTe}MW5Eoh%Cs4Uj|3Nx%JcWa<*Ctw zBwO=VQC^PX0;Eih9xJaw5^GGMMzUE%;hccpb?-r{56sp&BVENix%L!cV&RNJ`jnbZ z%<5H0pL)!(g{g7Qr&5K}RNiAq=cc5zS%qg5GCsV=iJPRjXZ+_2Cl*d7?sQ5%Bj|~A zDbi^Hw&tCWG|`^RdFRIq3ablGLHZSO;{&rbtKW$%0=_$#^0XFt9fRu3L4{*UJszpQ zPmlX~NPT?k@r$TI-@@@GamK-_!F|N_DjY!^Yd~QS#Z4!!YvC~B{)p76up1KoN1?n9 zkCm^mo$@`5WQ&hk*sQ{ig+)kHiOW+e>q06OOPtzzWueW9ucS>a_=f{zNY9BgfB!FvT}In13rNwije5icm%?A>irP+jnr&s<{nErpIZFC`Uh z@X(Ccm3-^H%aOVxt@WmH#y|K~gGt^?NN;fNW8PY>L65iY<*h>MPrl{eIL;WY)O(S5 zDpqidhplBvQ}yZ2dSj?qzKEY<9#i zD>%&ykwzm;aCaenf>iDP%~FHvj7NB5lh1Ey<&o+&^qXyBP^9$(ZCazxanBO>|$R_PJb* zi;;?sGLDzD0%@E{mYS~~_p^{TEA>pIw@mVXL%v=nIiJM+{0**I>;aU#w>uT715$-M zDK&mG*>hc;?FMil%yrsJ?g0~Sqj;S78}VkCuJ6t#oNC1^fCWxl zlN}zDan?BTT$t^Iir-H$%I|EF{DRTDMXAc~tdTrK90s!-SM(H9DfSb)imSzSBDZi{ z-D;lDcit23(I&gKM(-qXGW7NRq=Y+Hsh=zUE~C>O#`LZEgvVsgxvdp{us9p$IRnMM zFxPQJt6`y&Cb$azkG zm~gDjY{z;g+p+#HP&oxossz(}uIg5((sI?ex!S8CSKn95b#lm)=M;<94|z(>b2{r> z>+M{9n=M~GpR0S2ycX?SYJtirZ~|oJbkVGc`CBww%5RB}At$ttN4A*fbW@%-qFH91 za|p~;D{>u;S?4{)`4p1nU1Q_!LvYj48+v+^7ZTd8#k=Qzc`Z1h%% zw<|_7h9|4=hAMusxIomb;tfO2b~;*&^S9brZ7NJf{P4UW;XRm%KmBmPKW26y&$q zbEci;YD{@`k{=cy5*Hbrly%w2%4@L-J1bW@@|^&=K=#kK8)|>65?9j$F{)eTyeT~k!l1)^;ht~#-8n|@m*PO!s$~wbnIYk+ z-(0OzZWA%BHCH6ul(nB_d7SmiSs;0<(M|QkBZ~P-F;7UY6}u?r4)K1)TwwHWlYEVM zB+PIE$EtBv9rCtIv zHD_i>%bBTJ#k*9gk=u z&^%lq%PCOL7f5$`S`G8mKM5z*S0`{*fn)kAaI81-v>#NS*6YHSx@n$q?j$u|eV(iQ z30)=8vQF)#bIm66w8G|TXQyn{A-m=3HpcTb2j)qGc}}1h(|Ddr&TGlpj+lHWP<$aw zNKQCrQ3YE23MA*L6?u+nHn(N|>8AXjh&PKi2PT}WV1~|0IL|5ORHHXa@<{P5qdP|O z46%=RohUsz*P869PrUJx4-!X<(v)|(PdH|V$3QMUTaZx_EZPY=HZ0b2y|7wW8@F;a($NE&dv^&xgtKA`&Vm;n+Hoso5am zoT5~n>!d8D{B$l#bdfzD(>W4in2^Yx!)s(t_V|e`nU7;pD(<$9ghbb8Nz`LQc4g z#k0jTjLz%gU&M{#YvNMax~1Q&J{j_AqLlCCq_lINa{gX?Kpbjxw}_vKTNN)^Uqnwh zzf_Fs;2bS^jB?Hvr77=9$pgi8VydsQO?KWCH;8`{-w^*OzA7#gRf2XqOlY@{1e+aM z3G>ATqU_nZMe^;?myN~Lw+ZKGiXRTMTkb?sGmLCc_EVtVF3?$ddj627JG(@S->VtJ zy?_#OQ={IKjkyP^mfUh zDaO{gjMQxAOjXPw;v#XSD4lvq$#cXr#Z*6ZmYlN5k0hTi{sCsB>hH?Zyona?9cy$g zHBa`R*Rn=@t@u|I-wOIIW2CLcJ0FOeb5i$E&KHUqZS>9&JyG`Koh5mIxLPzzNO)sm zrc)`oMto8{LQKtIDN7!q7_;rxsa3$Uc_wU`!DNl@cI7!5W;#P89|bdXPmz`CRZl(R zEmi7Z@pn1r>zUtsiu^7kNp75@M zv6Kyamnr9qN^MfAddV$NyxCiJ>MG8~is=VqP9XU`qodw%drDT_+$ECDwlh}2lMlL%HKcI*H}!7 z-%tDA|CJF(foReXAL(ljdSLI>*x8%kYU;cTf9YGJ%5Y*h@!z-m-=v)TZ{mMYAK|h+bMxQp zI;<`!>3iJWo*(!3d-m}!w)H=yGb=93ofa-j9~bUSUz1f8?#v5p>?GFP>_DFr8?6?p zF><8^JHnm2+eH~$cFWNYTJ>XUVP&Z0;WX0=C5ZO7CA^-JT8-ga(qm z`CHB~4eoiEZd3E2=E2kqXtucfFukDT-CCwsm!;BdbnYIXj8$@w=R0Rf6FR>|vl^HG z*%~NynU&OH?K>mtEz1g1dgTf&)_l4E*ekSvI z_npn-M~>rcXe}MF%?WHzfsvEEPvAM;RyZvlgO@mga(7bhPSpRTk==C8Y9|P?nu|tu z4GwKij@(-{J2Bda8Xf5b)~*q+2Qg~fH;5apjU(RXI<1=LjqK*-Hs3L_msi|;_Q*b7 zQ}eSU2avMbaYk(OuY~JjA2laOY={TVO(QltF{Nx$$^%MyBz_(#e~V8uIx+W6{(kAc zCH_@>+nBbidD)0v_}|YtgGX$z7LJ%>EgW%^wQ$5d`iOWZuG)69+K!lQwH+~Awe7`K z>PI}yRUn_k?A2T|Vv%zoyxZvwmpKQ+hnz#;V@@Bq+KIdKIDd(Io00yh9xw&pcMeKF) z4G}#IKhg9sd~({C785=%t`q-YbTYX1;NhFOw()UsrMOPCn)k4p58q%lAHL3NK74~} z-j~{NRye`vwD#U?t{c9<|BLvR_*e05WALH)kuk;xu#iV?5ZfAdcX+MI;T&W1tdaZN zS&lPwr`y};{a#!n{z2RzGImLQNqpJpryJQD8HT4CqeG3TkLr<|@M5Y%Zy$I>WVdJeAA-yLh3qnh1C^O*P$z{u8_Jy>RPqZ>N<3V)ph6!tLxAe+(FNBMpw5*%m%7HG8?GcW;RfDyxG7|&un0*$5RPxv}&cz2vxnzQmabC1!}Njo}8PlkeT{3?A~L_kj4I z_^|khxI%0aSrf_mgt$s%eI({-@fq=1@pG7M~HH6`vPhFj5OhEsQ6Nr-<{6PJyl4gYQNc9V%Z$ z7si?5mEtV%e(`bf9dVoZnfST5L;PCYWkeT7Ukr@s!k8hpHlhniU#w}KS2@moPFyQK z-&|jLq`OXhL0m8XL3~mCqcP}g`GbSR(Z;Z^*w0eK{^DWA=vs@9J{Gr#pNLzH)Pi+$ z@Dy7&;fCfFgEzSw#h1jF#ZBTXMq674U&PvKGHdJL%WZ8PJT)9(G2uXQnK7CvHi+}Y zJH&0`XX19FQ|P|h+;qfJ_fO($;-AIW#WzH}4f6a&#OFZ9=YaSe5DyS;7T*=$6F(NW zh@Tomd^vcyc!V)xeuT`AM=Yf-`OP(l&vkDV=ZUw8^Tpf61>zmzLa|ZAKjd8eLwJ`l zz(0Y9iG##3#wk z8L5l$FwuIUe=x>KT_ANa_B2u#<8<+QBfrvec60UNV;C>Snc|h=Eb)HvagkMmJln+2 z#LvYY;@9FXBjd&Bi-CAvbJ3uO@ckfThg8Onv9*!01DztXmqC}9y+G^*VlNPTf!GVg zULf`Yu@{KFKW7l^$Ky2QNMK{L#123=xS1F@PxmzdQ+tY*+9 zPLZvr11H$pJ8-qFy#rU%UrmE|qSfUV`pfv4__?@4{90uFA9M@- zW%R|sNFNz9#MVao2+~LS&D8@oVLQfo;%(x5@pf^6c!#)9Y!vSl7mIfpZQKu(H$89( zc4RS!iG##3Ml8tK(})Ebr;FDcRip6(#!(~VJn=ShzIeO1K)gd-C^m}RJ8>5GPVg=x zwJ;tg4id*0v1UkJj6IFi#W-EOUgYlUux(BovylP4%{m~~0kMt&z0qC6fRE6fai(~s zI7_@=d|Z4-+$Me|elC73?lPh^qb~+Vv}Vi@TZ>rPfWGLC_*A_Pn{V}k)C*Ft!{$@3 zWdC*4%ZUE~@gE@m1H^xT_zw{O0pdSE{0E5t0P!3koMc}Q*8ZpSX}jrt6p){D{i)a*ebJih^<3x9b)ScTZh;>#MU9U4zYEJtwU@b zV(SoFKWr7XGh*uyTR&`-**e75A+`>!b{S^zhplo-%nR@TnvKMMZ~Az)$auC8&lcj@ z!mpZ}`u8`V8gd=jY&4%5x}qm?Wn#E8WL!hWHN-dW|C;&6{a-WR7~&g4X1#u!%{T7< zn)$|%8KQrG^Sk@^H$S=mYfcCAllx6IKdA4Q<|p@?YTgjU8-jR45N`p&&jK#6O34QvIfym)>ux(}5N^{g%-JoNda%o6zOQA> z5L=6dB2T7>Z(~gR!Wf<=>gu7c9_s25{nz*HXn;6SREwe$B~KC6Khf_@b`GYu$_`yY zZyC{apT*vzlAjaTitmbOsL#E2&(!Au_Gh>JpNZSW&yDn-ahk|4iXmShUMOB9UMyZB zUMkj!mx-5))5RI$Oz{dM{byV*zAx(P^dIr`pD{yhEf$JJVjFRe(d?bdI z)=5Qk5Zyy`4>QFqF(GD)IbyDuC+3RNpvav(Wga9REFL2E5xEa1 zwXfJu>@OZBV(|xW@;8b*jG_8JRR4$S|4{uOs{cdve{`1RjII{%H9BJ*uetu7c=8FYlp;#og5sSqVv8`AtmWkzJJF&f3 zA$AZuihGHBi~ES3#Li+DabK~kxS!Zf>@M~Y_ZNGL2Z+7I1I6CrLE^#UAz~l#P_eJb zoi07hoi5}~7Y;Dm>VME!f1~6bMq9fNT5fAMRBzi#ebDhdWv)4BDR&w$O*Ki=b<=dM zP`V4HyHL6drMpnN3#Gg0EGs{{TD;fjoaEiv+|+xuc_O{X`S=vQSNr%B@H%m>$Q>0i zcn1*g0Ny0tEH;S05pNN>|056gfABVOzR3L_F$=^y#D!v`xJXoe{Kb;F3nc#%@wejL z;yvQM;(g*$F)1z+@!>cZuZnizRYANeh*t&ist#OXUKPZvf_PO2t}w3((gP5$3gT5k zyef!hAew=ARlUcVSJiu*c~uav3X;FK?xlLqHUARM6!9;S@h_p(ZLay3@EsBV5*hyz z;$K4iOGphNHS9gt{7Y!%&o%$D_gwQYA^s)AzwA9Xyd16$r;B#=)uCN|b$Eqnbyyu< zC0;FBeO8+f*n6D$fDkPoxWaruh@aYfu6csJ$F&+{Ii2gglIE9tHF&$kW~1+kz8Hux zF)pTwp%{s+#0;^um?>t72{BvD5p%^nakw}_93}otJW@PL94j6zjuVd;PY_QOPZF!e zpNr$gUx*XLlf_fS2aLgFu}-{9yj+|v&JbscSBO`NSBY1Pv&3t}dhyrdZ1Gxgj(D9o zSG-=lLA+7CNxWHHDkjBc;{D?9#D~O(#YeUNRHt{oYhxm8#OYtA#PVt}OSK`;km@F+Oi;BsjVzQ`MmQrOi zFC!}BlTpoQSmPsow**K%l^GRJY?8x^zpDDD?DU{hph0B z6&|v}Lsoc*hXwJiAifpEw}SXq5Z?;oTS0s)h;Ie)tsuS?94?L!M~OcZj}(s*$BIXb zs^s&;Y2x{! zdL~pqglc=Jwuir0>T||OHHlP{NOl;h%xIBP?-cJ6mx#X=?-A9{k;;$MpV3Cezhrdo zveDLcg*_M8Z@oPifDel36B&I%Gzn=dq`r_+A*DjJ2dOpW3J{IK=fxL9Ml)AwHri;0 zz8HuxF)q^gq|#zYi(xA^i{nK`Gcgmylf_d+da~;Z8_lpzWHcjRE>0I`h%?12#4E+C z#H&R{GiO~R){DOuXN%X0bHwY!x#IQW4I-nN5^fT27MF@iahZtr$-`)d4~b|W8SO(x zGsFfUHUO~!$Y_R)W{5pN>;YmAaJ7gk1pqP!?sQ8Ce!(Po4H#Z)2JmS&ofqWZ8_3>8>kmOvBHO zA-djgy*+z^=og-;_(|efqI7Meoc!mBjB;c~Ih?1n)Ni4B$;LYI>KPmB$j=#VtV7k) z#yYY}wy}ofD&NLB`8SF$8F^0e0HLqrIAb^t<=*#1{=0xIM4Dt<2h%YGsXF%v(Y)%+2p+DoaemmyyINtyytx2%y2&9xYCVyE_#)l#m*sD zyOnOO^Mreedx`U@d#ihwv(0_peaZRS-Q<4irn&$0irw~JTd&L==ymYMxkq@%ddIob zyc4|f?gidy-b}a7yVASbUEp2g)w_+}9Pc{!PH(<{xNmyzdY`**dtb1d$!Fd!zD)HGHbRWKU-^-r&PFU5exB#~1%4ZTbF##*^0NG4 z{*hike~drIJHj99AL9-7kMmFQhWgX|^Sz_}3;hedWBiN#>%C+7o%f~Q+5R&B0q<&m zqrcIs_uup1_kQi~@OQAUY#4;zoFF|&_pS>vf-G-tuy4@SyD8`vbYoxM{ezy~Z-QRI z!QQRGp~0cvf}npez`G+D6b$he1;c~k-rd2N;3)5&;8($~y!(QCf_uEB!F|E~UNU$v zSmP}Z-VHwR)&?I3pLpwoPlHdrKL(!%&EAHX7t8Vf63dI_^P4Dzu_A9fzdm2;{Vi4= zEBAK9Dq-Ztf%)6``vkOXKY|>AbY(}j7{{uik%TV!}~fmDK^R5Wxpfu zHQO)9`wsiuUFqvrf6IRN-2ZF%Vfdl{cEsyv{yR}Tiv0JYR#ArkQB)ii`&*)Y zqJ8{NqW)2Te`|Djbh!U%G&&mNZ;OtLj`O!iXGN3!&!Z{P6#t9p!l=&Q5lxS#`(H&f zqbvQdqpPD?ffHRH-57Y$%~3-Ti*AkP1!>V8(H%h)HAYK<^yr@G-XIYzkA4^AL@T1l zg1qR7=$W7}`hE0VP!_!(eHgTlwnW>5j=aabBiOgqpjLx|uKeEm&|p93J$n&s$&S*$OM^<}ZXEY_FB`m$JG7VFDmeOat8i}ic!x(A5|i-(AP#6v|{tS^i8WwE|2 z)|bWlvRHqRavm-oAr2NR#UWyqI8>B{`?6kN)*Hxr16gk%>kYkVYRfvh)>^#-!uK-L?`dIMQ+AnOfey@9MZko5+#-aytH$a;eZb;aL{ zYs8PmW@9WN=8Lk5nDid1MWI?0szsq%6skp`S`?~9q1qB^bc7lmp+-lj(GmX2N)FEz zYsH(yo5cq4H{va#tTcR7^5f!4VIk_loz4OU0zPOyq4$`h>SJA#YaJ%_xp33+1^J}IsipAw%IpAnxG*NV@Je3H&t ze-t-}8^ulHE8?r-pTrNukBn}Xm=LqY95GkS6AQ#bu}Ewq7K>zd&_Y(IO_YpgZoy9KVzG7E#Ke3zGUF;$5FZL7<5POLSiU)}Yi-(AP#6!itVn4CJ zc$hdqJWre^o-bY?$|l{5BwsAniI<6&i_=Bftt-2AWw);E)|K75vRikSa>{z$ddasK zy+Ps;;$X2-93obULq(P5sT_Xg+|E@w-mfKJE6x{h7Z->cDeR|d`886!MUpilyt^ge zBi<`&M0gqzo<@YH5#h62#J`JQivJLIivJY9GWsLMsiHK_H#{s}z3NLxzWUZz-}>rX zUw!M(R!;S>KgZ-CO$@~hv5UB`C|hMuS<5L~4SGwKWd^dqK(-eQRQ%zhY#@;KgDS-g z6NejP8b>je6H_^{K8ly7;w2`>n~iC*^E6#0O?IB9Yo+O0X(uX=`Z-PgoTh$GQ$MGv zhtswx&u5}~IE+jV(?zv6Y%Muc%n}o#W|&YjOsKgf%vXGY*hVZCRp+p+?^DbJqI!(S zH1tE{ik|3;F)=R6HX`*>q&X_mxQ@d1n@*43u z@qO_F@k23HPw6p|?pjq@e5;{G_g3>h*pGRTh}|J$caSXNr@=v&6~b+2R!O zm*P3%uf%i3T5+n#=%+O1dw9NxPLOAaGez`3%$4F*;??3c;vFKELLMvyE)wq)7mIg^ zOT^!bcZ>Ik_lnpT=Q4s}QbZ5P_lwva@`K`X@ps}w;=|%2;-lgUu}OSPd|X^9J|V6W zpA=V%Pl->9&xp^8YsKfqb>bhz4dO;|llY4Gs`w}I0})FsTxQ+}WNv`W4KPQ{74yUb zu}~}$+lb5)Npm>mYuy}~rM?6&QEA|uni-(B=#Ph^y;`!nQqHNN<4_bY(SSMa4UM@}- zWw+*ikW+SR-UqVm*1QkoS)#1hyboe-F`D-Qj}QlomEsVwN*pSxH1j^luX4=$KxSrw z*NXGS+rSerNc>pbB7P!n6+adKCVnCA5dSWIDgHy; zDgIOZ%IG_yD~=SWin0XrO30($Hm?L(eQsU}vijV-66D#UdfvPeVy+YCir0%bh&PHi zi8qT4;%`J*f`6;zdE#xNwCPKmzO?B}o4&N^OPjv5=}VjbVx4uDD9!rPtS`;_(yTAd z`qHc~&HB=;FU|V*+qvdj!BET)yNLUW8j0pxkw+uZd@JNbM2#==t%w;Y9xlql&9@@H zN*pE*H^zK%FHxnLhedoJQS~$rt8kflScS{X!-Be&d05E0qIp=zCyF15AB(ad^RP&j z<(P+s{F%7JXdV_!7u9O>u!zYNv&4j$Eov4u4~sncVu9F3EEZK~^RUQMDwc`mVmqEKyb$&X;_< zD60!)b)l@z{4(k$`w1VA{E^Z8GU$q)D4j+z$#GG3Wquhs)j#H)ArBVSQuEG;xk$WJ ztP?L2RsTpeH}8y`_2REZjjU*m}#Z+x&1LmC-E^9SNtP+PBy)^UDGDn$@ z1`ijpUu0?uv0^wx+I8m$-&k)ZPCy8f?lf|>eDdI21bHrbX=ZdxBRFS!m`mid(^F=g+j5WcTBHBQ{ zQoKsMTD(TQLtH3gHRN0*-YG5??-G}YzZLHm?-B16u{F+R4uDCKxeEDy5gSB)P+Ttl zPJBpwSbRi$R9qo8iI0hoiz~$^#8u*x;%f0J@oDiH@mX=L_`JAI{G+%*+$e4mUlCsw z|0I4OVvm`l%twRFE|A#;=7_mso>(9jibY}@kH5|Nn(xl}9@%f)tLd$B_7Aa)e@ z689GO5j%;U#V+E$VpnlLv76Xk>>=(i_7o2gdx-~%2Z;xZhlqW|L&d&gKe4}fm^eT@ zPn;&6FJ2(ZCe25q)fbC(;$`CH;&f4VYd#t|Ww+*|AcBj%lvM`OgiGvpf-^BQcxI%0a&GHBFPLXkyXx2W6cZ!T=;e&Xm z$Y{1bh;I-XWy$98aV^=fd3?x!7GD?N5dR{+CH_@>TYN{{EWRhcFMc3?C~8D_A4}dM zej;uaKNbHbej)A<|1N$h{zKd;{!{!)+y$$6CkpaTl+hOhF($^vG%*w-v6Yx1wiYwR zEYU`86>m5iZRA$*hNIC&ZWZrD8ApgC;V6HU;_>1M;)&u(Vzu~l zalH5oae{cVc#1d`j`B5v%(q3Bt(tF(EQ>YY7P(&hwK!Y6R-7Z6T~(QHm^sRPLukD* zD(E8aD{2NX-;j9C0OlJaYrNZYH{=20K=E+#2vH-;d_(dK6RqDy1(RV_P$ym{UM@}- zXNWegtAZ;;YhzVlZLA8cja9s%X|y(01=hx@z}i?9SR1PXYhzVlZLA8e6X%N8i#Lcj ziZ_Wji%Z3%xJTq)Xktm2(6l>i61y8W%lKF$1L>|ajW>LxJ~>_{9HNzCVnCAP`r(!s=&rk z74Lc(Z5&kvHjb(S8%I^a*GAs;G6rlB+M74NOg5{i;*Bq3D>x{YX(jXCm&Ig@8W+6x zMNHa9aFjg*g;wWL=AlEY?I`omq1AR2?}l0a&qb@}DD&1~x|kuh7BfY=;;1ko+I2_S z^H6Bl9cA7+ED+m>#bSxrRxB0E#B#Bn*k0@`b`kd#yNYAPqeL60qe2^}qe2^}qj;;0 zKH;r4<0<0lik~Rfh-Zjr!m4nRc$PR>oGRK_t_r7#=Zo{;pzt<0%KUeDySPBSLtH2} ziuZ{Rh_XZ8k+U*oX}lw6^6$mxL}|)={LE42<3rQ_sK^&><{TBpMYDuayi-RByi;eC zcFfZ!ez16?;?-BYYiD^bRQyHarDC0UnP}sBlzIJdmRK+TTD0*vinsNwoIAz4#3kZy z#e2jjM75N+^(_AyI4ZI>j^fQdi+M?WUojtuABw3SkY(C^L*^)Zo{Q(1a5vZ4ub47H zv3G2`JFR$ncbO1D>LCe2($I8K?(0p#oJXfwS#h@+E}39Z*jCkaE?mOQB87GPL7j7{v2IB z$B82cY8~%kB8Te3K=F=#6Dd=_JQFz`N%fSWPk1L<~I*UDYk7{%oyyw4u+BT$~S&ixM0^_NxEOeCb{GJfe#1?U^5BZ_nPIby4}f`D@x0 z`Ppjne|tm?KjtW%(X&r9!;ZwOKPLZw%9Ea%I-=R>nbGWwoqO(i+G_Hi|Fy_wPl?RM ztvlr$iA5%`PmAf*x>M_iBqa()MG2)?njPd$rN9;6NDHHHkJ-rm$+;%n&Y7vR+S>K7 z`xcGlJVbnpyvN@})R5aYx9xvw;r~93p4me$ZPrDdbXaXt()XBN`Ar*kx8*A$s_R%6p&Yc&=+-#rwUnK8ru{<|YR(^4Ylo0LU;%Z}sNBmAPMqU@%qqRX(12{wAe z*IF&Mc_*sKTHosF@HNu}+}%4ie>7xF(EnA@Pd?I*4c`mjiWRnSMrk+kbat z42Z_i>n%0O9~O1`F}-Q|x#mwiTF=!Xf+h_jj-#WC7b_@va%!is4UE7^1S z+UBI?Z*EX3yAa!FiMAJU^RtR4HMpm_7P224yAMC)j3&ou{62hi+b`1bY`0zePC9!G zb49Lw4br8}4M78?T3j9H)*|gk+y~9|oWb|rIjc^46ermyD~a?cQaf8ICV79P?pE>` zO0H#R5w7OF?TpqH{oS=-H_g$hG>f-SFxa=4{qJ0MwxiS<$LH#mwkxt~z;6sYC(+^t z$|a3lb>yleriowreVmqYw?$vPzZrR z{z>?nx&{H?#^2-oq+@yN$Wuq2I`Y(MSL9@HUvq7+y!lXe{HkXMlCquWnA@pc%CLxRdN;AtPM-4e@NUb5YhSVBTYuGvY`z5FPrr!F-3EAhk5ltu2 zIX2zg#LmWz>}%W@t|R<`{t4B~XE)dH)}!@wjQu`hRn7J4@sMwiB>f`FF`H|o?(B0$ z+iSQ&oq8qWDm7fChO2zjS6r!vE7fqN8m?61MC=*S@SR?IhrVEM)p)S$i*NLm{vUJg z*{7{+r&`6-4J^h`J?9UvF-HdZtKhVj=lVt zzG(e}wdwuCHDd3u!KziJ`sO(_@B-mmi#LsY+mqkgcvBjCgs@**6TA9;Ux)17*fInB zi$2j@6Pjy6b4_TjiF)|`6wEUjmE$9nd5)SSS+j`h*?EhRTl zaswqdP;vt$H&Ai|B{xuV10^?5aswqdP;vt$H&Ai|CEIG&K*5UHlG2lu zo}~07r6(ypN$KpBYCEOcKB=}#s_l_#JEUrL{0o2gSRq}!2zN69JGT8$Z8ucg3)TG; zD`UqR+yB&dKefG2ZRb;`@W)sk`vN7|4=5S1-{emk9c{idQh6%+&0N{r8T>EvCA)!p zJkMphJ)F`a4@@V=3@&#(QbD8R^PWD z^zg$r^j}}a{I(x`m15fRzqZspeTjc-wI%=6?C{-r`Uh9A@6Y&}HQKuA%^7Yh0&Ah$ znvltwXe+_@XW{S7c>lHSm2<@;S4?unBv(vw#iaLBXTv`;*QY$5XDhF3Ad) zWQ9wrz2)+=&SqVlL6}KkuPNK@D#rd?w%?WQcJ&=khmmW$s@k4cw&RugownPR?R8~4 zUD-ZY_y|q$rOrvV&z0?RWqVw4_3wB_wnvrI{zvsJ>$&atW&3^EZeNV%%XMu-!B!kTD1X4!sOPQ`ad|G)E+?q_848IQdSU~dB0djRY#GMIfuQfoJ#hj@G% zqBWfL%I@zI;1j|0ZVvmKv&OSe?KYr{i_h)>7 z*9qne`~~|kf8ei#w+SCLn_p0iR>q|LgIRs2Y-0?zF(!NgdA)38Ot_K1FE=OC?cSlg z%N`Z(R|NK_aM=^ZWk(bq(R15JK=61O)Z@9l$5VPQPGIi}k53{zKJ4<+39Sejgw}*i z0()3^2?D!Vct3~!|%>Uf?_223H z8|dd73D492|49eE*v&u5Uj9jUEnx$5@J7N*gqI1M2(J)cWu4(poRzqNmAHYGxPg_p zft9#{eaahHi5s|AZE)Wv;NP7@!6fkKc8>`;X=Yigo_E65H2O~ zQIYL%>t9ZoPMATMNw|VfjqW2XC%jLfOxw*ihIO(Ns}sXAV_01btFs+!V{;sCrkmek z?^VZkv1Q*?$3C#K{cCOa+Gqe_Ab~pBuC?s2>O{21_N#UJutwIh-q+%9)#7i};&0XB zZ`I;&)#7i};&0XBZ`I;&)#A<7;&0WW-zN0ignpaQV=a2DMUS=Uu@*hnqQ_eFSc@KO z(PJ%otVNHt=&=?()}qH+^jM1?YtdsZdaT9wueIkh=&^|vz7~J37Jsf5oi^dq)#A_9 z;?LEhU-Q0d(Q7Swt;N5q#lNdXuTA)Pwdl4M-PWSdCiK~aKAX^IlXEC7{mGuCzk&Oh zmk2KtutFaz^sz!8EA-zd@T9>1knj-!tMsu-{}aMi0@mrX7rM_*=>B#Bd!hS(BYZ*F zN%$w$Yfnlg zcc*AOQQAI~A^w2vL1{ZshWG@w`($`E0pGy(l;nF>>@e=~_J{2*&Ns0JyR2X?Z%w$o zGvV^agu8%n2Z47b+(rWL6S#{ByiveAd86$OyKh$1<^0cYsp;w9zpk#MH7~3r{kyBI zJ@xzUD$9Q4_~oOu+OFfTt+?z(Zab0NKIFCwx$Qx2JCFx`oWa3igh2$}pU|qzZsch^ zGYR)6^d#5{{R?L_Pg6#Jvr@A&xvkdGKJx2FN7MOxJ*)R%4;^{v$a|E)81@*$-n#_+ zB@che+fKk|@{c1-Bb-mTfN&wIjz+(1(vceDvX?4s4;YQlSj4+uQp zXq5HS0$;>qj-~M>Xq5HS0$;>qj-~M>Xq5HS0$; z>qj-~M-A&o4eLjZ%acf#Cz0+81fHe3Jdt!?B>a)EfhQgt2`>>|CTt?SLU^^gk~OE2 zHK&p_r;;_Nk~OE2HK&p_r;;_Nk~OE2HK&p_r;;_Nk~OE2HK&p_r;;_Nk~OE&-Nw`8 z&j{NIpA-H@_=2#5@OJ{gHs}6>u#;!d_N2daa}Cd?oAkuL7k>}pe}4aiwW^x6swNmr z+-Uyt3n0Oz+)MBqAgo!{tXVZ-KSF;3zkY*%QjdR9&pKAgI#$U#Rv8X%t_v#(%k(6t znl-H&f2E#vt%h~2nsu#`b*&Pgr5>N99-pP2HLiv=u7)+PhBdARpQRq3r5>N9-l;`1 zKl#}bnyEuGb!esz%_PxG63ryhOcKo`(M%G}B+)<(8mK`7HE5s)4b-548Z=OY25Qhi z4H~FH12t%%1`X7pff_VWg9d8QKn)tGVSR04eQjcWZSpq}ULpL6!1EcO`(yug!W)D) z3GWa#6PT@7f16l;n^=FFSbv*Xf16l;n^=FFSbv*Xf16l;n^=FFSbv*Xf16l;n^=FF zSbv*Xf16l;o6u?vTCGEqb!hV2=W3(*pJ!{V!A-2eP4NPrh8Gfw2yO5%Tb{Y0**Y|v zM6*dWn?$2EXtV~6*05eTh4qAA6J`@waap^YSi758yX(=gX}1pT)}!4Tv|GbE-o!fI z#5&%@I^Kka>(Fo=8m>dbb__NE=uYTC;F+AmGdbq~LNCIBgx&;vcYen*$*))@`3=jY z!>@Td`0)-s%dX~0_B2mA1F*Uucna}fT6qIj-hh=iFb5==1Cq=EN#=kgb3l?gAjuq% zWDZC&2PByTlFR`~c2ZCJ#ItjH63eN_avHE4dv3a?<=C^+`q(nOw>A7-M_AwN@EMWA zGm7rq?RMvGw>!J6C(~#*JFMIDO1snQ&gXaae12EYXGsp9B{}T7o@Cec|HIz7z}Zyo z5B#_HUi+NAXU3Su`m;$ zXQXbqa)lIEa$Q&U|GU@BF=uAZJP1Ah&V0W6{MK*1erxT0_FliW_g?clYp%b}itDdC z>}OV6k39Nja^85I^M>!0HFMTNir0*LP{KVZ(MQ?M_%i;H_5k0HFgo;)wgt$HRynd} za%9cq$ePL8gBQ`u-Qi;B0hhp~a2fQ35cGn}VXCZ>hxmRN9)W4_C`^aP0AEp_fEn;4 zz2~2cTFEM-BXTFW06GKK8(42#2;Bf13~Vs4!N3Ls8w_kPu))9v0~-u%FtEYs4Oc)P zxDxupRnQNvhW;=BlHeK`2-m_OxDE!x^)Li(fT3_B41=3sIM92>2)G4C!mTh0{sg1p zHW&jF;4V1KxqRa#^1KYMz^lNXF_{2+MzIYYm(p8K_S1}e{#aMI|W6@QNMOQHvUB$UN ziF0)l=jtTR#YvotlQt9w|epyMZk2jkCsyA1)Ql8O%8q>cMGHA5Mn` z5I(P;h1?RT*J%a(QXT4cI72!7Vja#=Uvm*0v0B|gRXVs!7oUm*qPJARo-{4$gHWhTj1*2Q0n50`8* zel{8J;>#uD%OzVTGNU<5feir&f&(rv{cqk7Ea1ta!1*ixYLl3wVE`y$M zIrN4rpbzwetAR1JH2{*}8W;%I!XUT~2E(J+HXS(E;9Dl+TPEXMCgWQs<69=baphKz!xJzAo`eOk5MGBx@CLjMT*VbvafJ_~@L|*vSPIMF zZ}2|+9hSofumV1Wm9Pp{!#A)Q81X1ZJZc9px=}yCPS^$ihQ06~*a!awMmNeI-QX#x zpBce$obbORl3Cq;DQnv=wK+Ce*M6zZalo4POIgu=sofSj@I1o>d|wFNk!e43?6=w2 z%{45JYgioDu(n*o+HwtR%Z%+rW^5-iV>^)<+llyQbL?-u_4sFV>}`B+_qqmbu4}vT z&wkYN>us6Ioybh?M6PjhT;t+cJAbJ&9Bu~o7i;D(b#8~Ta0g7{ddT$)Uq2aNKN(*? z8DBpcUq2aNKN(*?*`ZHaO@AqC=`Up^{iUpFCYMt$ob1v!K7EJjoG z1FP#Vd`rfA3^c`p1 zZcLC@Skw0k677vy^+ma@)o_OZ)4WRSNXkHukw4pUgh@z^D*-=<3ZN?oo6Jo*6(uTF>?hg{w`p} z-yGvz^KN^03K!Sw>=cse_8o=k9b5%#x$$UX|R^7}O^-91CR7l;-8o;-xk*xSTO5J6yRXj6h zZcww-Eb|LBTg^7VRL`h+=0DVYRtWxv6@oXJJ6R$4TkB+22==VHwrK~gbL?2VoOPa^ zU?*A~>`Hbe>jJx~UDfJrSGTKMUHo-|t*-t$!B#iBf!)Bm$X_AY>Tb8S+gcafo$bz6 z54)>

    >Hp*`91&WTj>HKem$WwRVa%(9X0ot-xZ|kqlm8|l+oK=2ruvR%YvC3~MtNcF9s=bf0%I{9+Iac{SpjY|z zoHv~}mEo`Qt4x2DUu8KfoR5^_ukWkkoll(m0wkfRelXs*|l9;RdpTL zRn`2pepL-O-Yu_cxmDaM>SVW?TV2(0>$&w*U4Qjob*jJmFYE8J`mZ|8?dkSX_5Ia< zRYQOEU)9K8{Z}<%_20YH8SdTgJ*ow(|K6w0cJF8P-&XD;?jx#=JDoLv+xpK&s`K?4 zz#a4&z@1nF_+@p0UIVzhyVzZHG_PIs3Y6jLdtk{TS-ET*}-Uat*&gRzj+2tuqy5Mm{Q5ObIz=IuhPK@egEf{=8B z-f#uK5)hWlX(JOKU^3;tR0Id~rC!aR5Z=EIBd z61)trz^kwTUW0{z{~?Qj{}3sQf&UK43Xvh!hYYbgWQa3xh?OBjtP2?u{!gSV1^!2* z@Z2S7<1DI(az)br9 zX4(fZ(>{Qi_5sYa4`8N!05k0am}wutO#1+{6EohOp$l|{3!xia1k}M>_95P}5AlY5 zi1+J5yj>qM=fE@YEO1QmPJM_s>O;IwAJQ}T97nuKAL2dw5O2|kOpYVopbzo>e2BN_ z3(xHP&nx;f`_b>4a}4Qc7z@tq=RVIEdH+28#EpL+o#!0$X7;=I&&owUdBc14A(LZ@ zcj`mDQ6J)c`jGi0`~$v*jqp#{1mD1B*aF|eR@esH;XBv?-@^~E6ZXJQuowOV`{2K@ zAN~gi^qqw+tj-v+#sK@Dx5PudBOc-n@euEahj=?Y#Jk}kRy_)_=26I^zgh1nq=Mi; z95jN)!12fX-yz=q4)N}Hh&R9eyA}g@_dCR$ivfybjCFxR+`$;2F5sK}%&I^k-un*m z)^|u<483@AnCA^_j(yf!46znah?RgsHv5ZJfI@jsDTP=CD8w2-Ayxni1u2)+fBZWk z1A_herayx8N09yq(jP(kf;WmoyiXho(iglx9OCWakb~{4^Alo~pO8}?5`f<@?+b@` zTR6nK!Xe%i4ms7JI#56B`Gi=_C*;(Ilc5fr0u6zFVEvvDtM`PsBQt#mL*~Oc$;@(^S_SP$6K1oQ!0EaX7&n~>8)R%TYsLy>y_Ep8}vSh)i>&{TEqAX)-ej{4U`2^*m`7<xnQSr=L;g8?1hx73!rgbQszVNYF zI9?>@@uIpyngJ)Peq!%^IxQ{B-v}7QtI{7<7u+uKGEAyT|leOI_dRVVsi3rY>-pak0o7Q{k<`# zfo|jajQ)|ia^)Nqsq?mFaXrcVRLp5g8EwKr{arh!Wf5J01o;7PT}}tCO~dmwxspD1 zinO6{s@#3zeUpPoy1n#Y;m-gRv_Gsae0+rUzxnScfAbdpG6TD0 zMZcCGVX6Lk?g+CUm28p6nSXx!we2}7jq(dAqyOZO(aQcwnOgR=M=9l`^yl+SN|}%^ zAE}gb?~&&G<@uuYUze0=zo^_gDGR-?QqrYP%6h*XeXXsN@)g&C8HdWPlQR2|oG0k9 z`RggqOU0Ce^9YnzByD&+RwpHYOi(A~EuHS9lqLGROUiQpJNLS;HF~_DuRR=N__nLD z@he!H8=Jq?Y5X>n^3iQ0jW;t*UvEoEUV3z1pB+j#Aa4w#`|pr8Pe|E}?T6Z5-0y}l zR+MW#MZ}~Wh@5ZGW4qd^br`E08uR7XnDekN>oJ5sR(d@}pCiNV*XbxPKmFlIsivRW zo1f~0bLsP*KOUS2bD0yVofBB4l6q(inx8teLB4z#68%>+ zb!z=cUF7+_fWCzDo_Aep3vW?sd-f&IeP-nL*Da|{BHM8=Gc6{6d``&?Qu}*JX`8&& zIjO9A5%r_Y|gZM#Oulq4~{>~4}w$!2en@zZny}79)!aBbU${ekKmlIRR zX+38@U4O(r@#4_=eg^%S`$?S~mI{CSZ8*3!b$VgFA3kIQxwXG0X$ihw+cDFZ^3&vo zqHn|HgwyLjo1QucUutk<4v+o|hkbv~{}^`aE4dPN<@IZ>-$$77lK=5$r}Dg4t{mPL z4(oIga}MEjT3_gVu74+aZH-JBh(zd*L*5-2z~ zbR<eFi_ESmMr-E(B z9a}}(!<=oQ{_|rC&f|vXyb9*~V~2_x3;dB=9sSr&&5Ufb9_QVdx*<~M%ifZ-9))#< z!`{r?`93eg+wo?mZY-QGGF%5GM^>^gBZep(P#P)D{Ao~3Y=I+}^mg4QrF5)+MJF>e+rYW4KU|gY)Kc5`+ zQ=C0f6PHQpjZWQCri8~ZQNjMp)_za)>Pm`>o+DE0C1+KQ4F6)e%aNCWFSg^zbN@Oy zkEH*?&zKyJ#nG>0hf~7QO?xD5IJy=bUTH_tf3)$3+*&gCglqHmX6_Hmg}=R&EL2&6 z!uqm=y|1#EWr*_8=e_*;qogQNmEVRElO2`FV%u<7r5}F(g~tir($syCRfYF?I6gAY zm%URNwAD%F?H}YFX{vC%P7^U+h#EgJPrHyZ3e_JieQ~1I6gj_FN}m+S7bOtwxf4HG!MP9S#r!$hLh~L`ii@HudEO{WhTpxaaNqrs z21WWZ(aYhPfO%n|MRy@Dx@FjTB>QCfLvUBfBD*P3ux zBqDBgt~T=dyVCZz_j+32@ce_{PepyFyfP2tS=Gq;^X)8~RN^z!OEM2#Z6dE@k#gZ$ zyxn>Bug{zx8RzANy}UBLfoZ+`wukkRX$!~^*C$`^_g{hd-%FrDIwOg^Pn+zGVet?1 zU1&f3(sp_4vziwzBjOpnqLGI*-cQ+!4h0TZD8|o?ZqMNs{>MrmK6ezpCKtHhPLsW* zX-Qt&v?*S%w85qIK;iSPcUyMe86`4jb`w7ou0JxSuwjZ9PSt^x2Cj*kG#e|LeBj5A6ft9_W}1i z=H!Q>Oa46(@5Agv&t-&DmF4*Te(BD!LZ!y@TQPHOFXO#Wd|x_|NVUw?ONP*&NEFder+@k!^@fU6Ne7 zT}QZ|N~$z3?r{1qT%5lyfVU*Q74KEI&PxBE&5zc z_&HZUHvf~}JS-owwMBZpTpjhd4aZWa|9m;`godwA`7uSMlGZkeOzpS9tC!x$*X3Ue z(Z&zNc1F z5xMJDq_g5>#_rsAggMq`rVotF<3AT(c)o!&L;N(w9q&3%;rBIar{5bcw?IBUE~oys z;aJM$*Q(#Uj?m@D7L@!p96XRds-QME#9ADAVxZ`u2bOP{OHKIqNO?o)D_!;g&| z2Oaz>*Ph6IS6qp?Z743bpx#SLpIuOMq@mnCJF?u_2M;S>cwM0`=}Qjf8+u48UI?cy z@V+L=!{tTZr^rvcoVDhGj7)ySa%uPb!flA`v&hP#^`Ebsl)n5> zvT&dHib(lTf;^$5xWZ|pgrmKe6D8RRRWUtqy!x;3*smCKil_@*lZwQiP?V(yBG;@b zDZR(LnMb;xN-FpF5a)fIzSg(XTbr&ORmSD(kEPw-H|boZ!=EEtH%I4Y-JIM~%Jv+1 zN$ci>tttLHL(j{F<0F0Iu&$)<$T3Pu>B<%txo%GJIS%=VQIhlt#m|DTD7Bt@7}*4UutW8e2?^%u$RS zZ9H5|(&MEr+;_jEUhnmc_TI{j&c3f3?niH3M(gmt3V;8iI+zD2{X_i`^A~zug05V% z^WJSMDZk%_lHw!dyyRle0g;KyAm{d384~(QBG#cQxj!=sy+c;U@=qkA2XZ)Cj^vZk z^GI^#=E>;eeP3`59dA>{h{*EzEln>FUY{{2G8~pUo<@h|BmFL1m(Su+dkN(y`HGv-9b zd#f|%MTQUh&$pdjQ0K>&>G`|7eBt}?Wyup+ccknk=f0oduRU4#i;N4)rIis*7y0d9$Gxl!{^JyL$y+lPNv(``eEHC|qE?1}R>r$7V?|l6`K6^P zEk4pVU*=!j7ds~(nYOfR_TknWS$1K$@V$e=@ns1g>faI)N7~HqZB007$@5Mb>%!@d z=l8*gu|-Ms9*w>$sr=tkT-3F6F);ESHodM_8P;2l)-L}ZS5bZbddo%Q{wSKGyQa?J z&cz(-Ui9rLi%lhub&s``$ESR6QKkylRMI^;-tmbEr#qhC2buAVeAd6Xx+Bl!#ijW@ z>v`uYA#8u{n%(~Mg_$+OI^+hv%sb_E^L4*e3O~ye{w_;-hx^=PS<3wXm!{Nr>dMe1 zjMrO+la)4p3#W_x=2&VI87@iwe{rr{l6izadPzO^3*%a3cKC%Q9JlP8!+uf!9r`~$ zzmfkdR8q(N7Jd&$`hSH=YFBA--nPt#y@>x$QCj-r6<^ZWCensuyWcWrMb>rv<;-Vu z*ObeA_L$e*rfm0okGUPcMg^rk!}M#|bfV5EJy3LA+bD@jR7IXI%HmU&SlBU${4Lsa z44Qe|iaZ+Q^5bUg@4TWvx=;Um-aX#v%$44h%+zojyp@@s_~EdQ?*)EbI2=xQINycy z5?-8F=DTGr2fN-cF0J2&;^K~#zHpnu_4xf4mZNhrdfs{hNucpQYao{-yRrUq|iNTi5NEw&z6t-&+r4 zwL4;qgIV_;ao!>sv!)hN6;Wgt`wU}|q=gj6^8ZK`s_I0rOg)g*jWy&iEmP7HiH<`L zWc5A{m7J)eNDpN7E226AMR9Wtkwy7)fJgG|LSz{yCo;R%Xzw_7I2$ytZ-{CTkEO*&*9qu!?ztpDxcfW_GSp&~vYf zk5<`bd3P*YUEaW~QHAy1u&l9#b$^ubFa16JQTpNdv?FW6@u{a^E#9W}rOe3`O#lBr z^c(SW!|#fD{{QpNd*{pBR&?)#b;bQ}+3^3QD4O$^X~H)BvQmEkxxEQl)0o%O>xmsa zkTtuw7I}TLo-Z!$g#Os~fVU)TVS!rOWxZ7(Ts!Nf0^#3aphz2jO&bgM->+%QF|OO& zlC`A7DuP+dON>8)Nbl3E)kl!6Ko;-Q9R1vfRLokNACbkIL3y86VIlmT6>(=hPggPP zt8i>_-yD~li;FuR`oe2r{o3{%k2W9EI(lYpKc?mX(F^~b_h00^%n2Xyw|lTg?x)|b zmA_5PyiM7ek(PNkX6^DuXYGyDl~s;5uNX~N*7ANo>9P*|B3rv;J34=t>=uVrPIx`J zP_|hhU%0#i%Dg~Hb#%#&&(BiII#JKdf`2V0p zs0xbyJ1}K=6S;7E9?bo}a0Ls?3;AV6rVsa@A6?QrHzmaV-mVMbdnI{2@~eLxpZ+WE zU9#-PhU`I+;e7e< zpQrOa%^nevUyp8b!`b8V^;|19WlxL@`!fIkaI#d)PA;rJ((qCJ|Acu)M84x0eeJTM z@3|h0aYS*?b{tmU49K2&Sm~oAF6$VCeK{ve_5v!>9Jl}XS)k(Ib)ew?`}|!uCBk^` zpAms$AbKAbJ@@m!QddN;Dtk?I{cl^V*Saj}7v3&=-EX@EMXf75=TbC9&f`UOzn;dM zpS|JNtNldHg?r$rTsI3BdKAJ(XCBsH-#2^XQ84E>Rm740Kegl4h_bG4biC~Ob?V7c z-j1A@f_3L4NK8)UL%IezHGP?MdUn#=nA4!Fp)4zPf2n4*8h5vx2=@_l~5_ChqkU# zPOqb7CG$glqn1@ECn>7FBxR+Xk%yuy&HyVZOVDLsK3clh47|G%C5 zPt@c7^_*wJ>2gy=-_zl0rGzzf#S|v|4XpDaDsBCBBqs@B5rD3n%fmZQ|dta|-PGEhYu>M3iw+FmW*ThnM3->R+P=ynn8*dc5mB8t=m%diJFH`bI}=e=&KzmFt@yahbo7jDLhS{6@O+ zu*~qjJFMh??8L5^6GalF6jNz+j6?D7-^@|MdMQ_vC+ER(F(q$&<*m&{xOM>xF z9qZd!MaPQxbye1TfsqAroNh&B-7|5zuK2vVd}|YA3EQCVJzF1p98+i&@!u4U`$UW(Io#Y;ytqK__9@7Z{L49Dv<38r6{NE5d4`uMSR&tn}LC$}x~ zo{l|5Z`*6Ve`J=^_KR*yIeo;&<=OTiu`#+WiK39>r6yZ)uuafyNifrW+4!0*$2;3{ zx+mkcoFIQfQ$-)=O7GuzxeQIbbY~lz_shBz*%ie$UV5A*HNw;#+o$O-05t$60>oQWKp+b8C`$mMmP#OSkSj6Rm)CHEJth;_7u z@p*f>9vacHj_#!d;~!{TU0S?!(Xlq$px$4OK7L|zEi|*xCzzRR%jx}FKCeI5YyD$- zo540F?>IRhxx8*;Z0^xz`oEeOeax5Bd4sxFW3+8ST}z_wpBUYrqR&q8ayGf#yrcJV zWXbz|wL%VvVr%NYiqZWOlXoQeX8>2%Zs;C}k!EO4(!Y)b^GW0wsmC_K9K_bqeNtZA zvGELeVt%V^HUm|*p}0lxVgVj^I9}9a<10<``Xa0uyx)9b2OSb-G}kg8CmlB z+27ZuF2~e0np&Tz>k?^9Y$YwnNONS-`&8*;+s-?}{bR4Zt}9j)a)Q~At*K=ZU*??0 z*42Gw>phZSEhj=9FKZifo)I}TSK?5ir%{V z4DaZkPcT;!n_zt9Z_RpaMIXuWx`Y_<&tufdHePbCFa9~u(fv?f{PSMCuD4uXY44!n zY^1mTH6pm-)mlzqTQ2v^VEE@ZS6@|KU2lSsh9*AmI@?6Y`fZ8H>zQ2H zzYf~E1XCX~G5TB`qkkiD`g~<;+0HG~d|Q`LpKZK8t}5w0DEgX`D1N`W+Conpkw{LlVG;c+b6Wfzfy}nuGs&`w(ccc+mm3nLKCZd$&~5r=_g71oUD-< z@`-Gh_l+1MQ@%E`%`1#o%q#h8WM0KzWAkeBIlw$>8WUUjpU zs*6<*YqjdBZnf5^KdC=i+tn;J%lb~uRKQf9`d-afZ&*L7H`OM~Q=9EX)y%GF zS5--Nb-Sjz&OXgPO%1Wnu$!qH>=t$lHOy{hw^BFR=i2SmaQl3_gBodfwl7qp?2GIk zYK%S99;(LK!|Y+|PJ6gLT#dJHv2Rg-wkO&TstNWJ_7iHVJ=1<#J!n5;KcgPCpR?zw z>GphkzM5gbWWS`Iv|qCqs+sm8`wcbAe#c&_X4`+W|E8X|Kd@J*x%L`6MZI8W+L`J# z`wROYYN7qL{k3|--ehl9i|sA;Hubi>!``djv;P~g)XG305T{ZC@qzLxJ5VW5L#+?g z4>VR^2hIpISN{r}6=<)v1v&;gshAdDFviCTPoj2`$&U?;# z_I_uXv)ullv%>i(AkN25N+95*IT?X+&NgRzAi+8491J8f;EW4YaO2%dfjVw=w|by~ zTidN2Xy~5qo*roAwsczt8oTY?_JJnu1?~lbGu$q2mq1gur`s#g%)QFJD$v61@AeOz z<=*U$2()xZx+4Rv+&kTS0_V8*xl;oj+=twU0$trl+(!Zzx{ta~1iHCTx-$cpxKFvW z1DCnaxz7bc?p*ifKri=IcR}DPcd@%TaJBoE`);7WyTn}oJ6JnahG!NVr+bOnF;O^LNu@?mIpdJtatHFA{26kuJivA;e>LSn{tS7D->2gDxf=N+ex+6Tm99npOj424Bm+59 zvXHaobL21ht0J4_Uu?JXXUI1Gs_@I+E(X7CMyN)4qat!8qmq<2DjR1bw=!BuP2(J+ z19C@Wj40!FE=_UoMX;I^Md(;IOcrwMG2TMnTyc8VZJFRnQxiz$f@RE&3BRCGv6b2iMb5(s~Pj zBcHF%NA93HB6m`qkS|c3k-Mlal+#spMZQp7NZxLWU!3Z$E@s<9^*|F+AxiG0dXejL zb+edigc>21x<%b0SE-R|q_k7Fs#_&ajZ&kewttO59<3%I-=*%78`VVhB8NM--3 z!)LG!WtSO%(3 z)IX)1+N3r~jQU1>BbTYoY71$;Ro{|+tJ*4!)i$+VPFCNk@5r@7{hQd`YB#Yzs-KA6 ztA0jvP#r|$DNjsWY(q}AO`Fl7&7U~7vXz8v+YU(34%!@Pc8nb(0Xx=?m6Pl^8=t|B zx8tR;UEVHFY=WIYIVagCAt&025@%PiIVSB&c4hQc>}sU1ZdaEWyM|pudf7GYnsSzZ zeU(c7^;PQmS61Y6?KW)N+HIwc-Og?&UAfAhFBjPz><)6CeSv)e`p$M|8Dw{{yU6AC zh4zJLy4l@Gd69h)DZAU-S9QWdFtf3-T;`7V=Z} zQ_{+wZO@i-Y_4p`T-}kMv!6qL-hLi=u05AFzF@yV?JwFdvVGZpneA)#YjVB4&|XNZ z7uk#CQu_`24eEWzUP8S~?WNMw9|55Gz+Qo7jlD)%*dN;;%Nh13_NUU+UTd#K^O^k_ znss)HH1Nj<$czulZK{S75-wl|Z0i@ilU*x%aUN=JLEy%l+zy$yMXy^~h& zvj0uW-S%$k`qBOod5`@Q`n~pE0_|jI;Jmu;L5-)5*rv9xJ{}CZV!x=_JMJMameEX5Ii7f1b+_xjC?S7P|jckW=aFca+q0gT!(o&C)UBQbK;yt~PC>5gG(g|bX@Y!)(_AVzXF4sUymOY*R$Ql@(@ttR=Q-y~U8jT7 zQ7Sr}oX!&ObaA>$bw+z32|B%;%gNi@=_jUhwR5#Jar!&`kq0=}pdaWAl#`rmok4P{ zbDc97`FiJisp$-HhDgAVL8Ql#F-{1JC8b#p?TbSLTWfO9Ip7zY-cv*Kkdw=%-5XP*e-My($d$R*JE>2&E3joWEEoIZU@7m9gGtlY zZ7Vey4WBQj+rjNfUPi;{d%BDb+>jfR3*26AFEosdiDhKW_Gb5HvE30aeeREs(cI~d zC(T{%-E1ehld$t1_a17w&%IA7x|7|>=%=`hxZDTa2gp0sor;|7GVbz6(o)Nv=1wEm zbay)2$K1!r`?&jr#51;jk~A~jnUwRC`xLRW-Py>Dv)RsbU%-a>?tE-`(R~^DRrgi4 z3)}_N!pK`<7)v+XCe1tUU(qaem!bK)%bs#Sa6d#|<*q_r?XH%S-H+Ul(5!Jk zre4PAq|b0OiT%R;0{yox`@;Rs{Q;YIx{SfxUG8oyVeBr3KX#Wo{@7ir`D1r+7`u0q zO0gHkG8Sb-S%(pPJ4W#5F^&)FaeO7l@gb=#z4%j%=r2d^E&Y)PNRm{RYZ%`L8Qo7| z`v7D5mW=6Vu)52W{8f=xWdSGI*Z35+XEJ+i+@kL;^3vS&XTe_?!INssRHD!3uG;KhOk!0B^@!bI22LwI|R-djc+=KsK>C_yEvO<*83vZzw-ooXS(;J^5#=6qFk`nq_ebM;- zgQ@+880|kKX#b%S{=*&AIL;bJnmesKk;hx(7jZX3-(Ds$EU3(Yx@h-lh{LR+CB-YwuZNbiO@i$_%zfn*78?n{_ z%VTWKk4sKd2EIpS=87Cf%)al@JkR%tRk0Gx^F4g8qY++5BdMest0tuMeUJ0C?@?3x z9_{fxT1ZuO79L2YJm16jI^rU{j`Q(1IK!%rcpB$xPs3213wakw%t(d3i))yd>M8BD zkI@JpgR7(JjgL`Y`xuq9k8y+cF$QTLBSCu?hV~_DXkVg+_9bd)U!sQgC2X}oEs#4S zJPHF};w}7yd@sV#Uc@chi#SPp5wZ9WnX*V_C~sBZ=|R8L;j@wkS=yt{E%+i1L>kYke=EDxlMZ@H~I5| zjM14FWXrrDTi*jo)E-C$?SUj}52TLvKoYeFQbBtliP{6HqkWIA+V{9Z`yO4j?{S6p zJvwRM;{xq_bke@Z1-9>dbke@Z1={y$W6!haQSW?vK3nD*+4{c680~vBw0+;Bx%NF0 z@jc$dXIO#5)J&@Mg136cFAnmjV(pq~UXK4?lwe~=o`7@Mw z9h>k&nrc5JQTrk1YCoix_Cqe$en>Cvhg`1xkP6xlNz{Hw9qorCYCoic_Cpf2A5ua4 zA&J@#siXanMD2%Ezz?zTMtnb{v-U$eYCoio_Cxw=KctQJL;7kzq_g%zI%+?pt@cA2 zYCq&=Jdo4zPZ|Uo$fbdX_#tO&KV*pZLqd2UO&Q-e3pA4(1I_V3F2(m~NgChl2x+fl zr1m;SX|Lntc>>OVx@;a7t7Z7 zL;7exMa7W+{WZx&LroEAB+8e2+{gA=T+H&_J=mxnX z%G_-o82jb~DhQFy#_5?(|iUP?>trPS5_NiC;_Q$wmcHJw_F^}}9DLuu?Z!dI!H zeU*yZS1E_DaxNZ18>fw&>9oafakSskNc$}fwBJ%w`z>+WZ#h-_Ehc^oBMs+D{1!v| zE%mhDa*Fm_OzpS0+HYxs-*P>^P}pO+0nf+xSWG;YG331+k0nNXEC~@Fi={o5SnaW# zu059e+GB~;9!q8Iu~^z;aq(E5mU_-}_$>kLw*{)l_Wc><&+bbl?bVdiUQI=RcAs%Mv-_0k`!fmJpQ*0>8C&}^W`sYJ5aG|5+MkKR zpP3-#@MrGA^1JbB8f&knw)SeuX|JZHd%t@>wfJ65ZM>SP_#50Az!Kk|X~dm@N2R^? zXKHJIrkwU?;&2h!H=43mNU zCkLP_5I%Xm$SLoOoH`Atqux3m8X|9f^ru$@>^OZ2Q10optpWYifPQH}+J>!REW8W* zcxZ`mBf^acHzM4)C5#68xG{a&gg$RF3Kqi-_F*F!39rC*k*0*34gnAmhph?EguGKY1IiP0rj>f{#-|-?F`5k zX;%>_vmJfVE>+|_>N~He$oa!zzDTDLun>L_xu7Xv;|17w0XBBV#?I8)dA&%Ns?Zl6 zhjk)d;{ltxZWQT8zHXG)?In?m*caWG0JdC=T^CdD#T!I=Gza!ck1s_oLB0fgFUb_S zv;j}g>SRvAfa38{bD6h{#!2T;+!5H8;=u5qQhrlwCs}ceED(dYQ1KnXNd?j*q zOCav*`6B)61MTn6KJ34nNhWlHNw7lX9>Vt!zK8HVewecEC44X8dkNo1J@<8hi9mi#kjaE6 z6P~;r_KV!#1~BOUB|vxz;VH?mR^)*=!1f0SKY(phW1t<-H&ee9d9Xgvjt6OHGX0xO z8;Ed!FAU@lQn{u&2_|2h>2zf1Vtu0R{!T>|?>-fIK2<2~B( z-X4)9tzZni1G_|))q(9Ie`^FoME>3rsPFICx}4*0d0)WxPl%Ul7`)B!{J$xj70tuFW*@l57||r7jPV86ZTho z$)RmI*qXCOWW9pkFb$~dbL#%QGho-}%K$q!P~V0T@CvXmzo-iXVK%H6`LZHV=9kl9 zoyb38M7}2N*VMO>`ZrSk<^<>gQ(%e67LMyJ{`X#yZz=!VN$@`G7unhhMgi^Jx?N-& zeYb54EQTLMwl@Oy(RSLj{VS30sN*~8_>MMwM;ms~h8^tx9hA9)GIvns_mufPWqwbY zJJIi4A@Xn7gWY?`x91a)pXiIdv}fNek^SREehvVA@iTq#Gkx(hwjRXRgV=KLCD_2| zYBqey{2TLeHuoKZbHs>w7xsw}+XhAh&m_d|5F@TW3>71Or5NQC#7HDhBF|V<90-(C zc|4G}@*Xj&G>73ZA2x|mwKm|#RGk6oVpJo4wH`19R=@!^Ai&0&8^owpQH@Gz_sqciq)?h2EDI=UnP>ATRE zUDk`ymG*U|EnR6#S8TnI^cRx;LegLOKI|8xTX&!>7j**KaM4;Zx(9%8_X$A0i@O2s z??J!y*eJ#&HK8wH|0VR%rIlf;7(GY8JlHHos2(K2Ofh;56XWs@Fcnsd(c6UfVqDQ$ zj6Tnc(YFnZgC($AjH_Az{dd)Jz{Y+p#kjgIjD+{a=pQFW5^YQB1=E4PxF#O3^BUTI z4Sh9`z8FXw1`eZ#Q(CdwXuK#URe*{yxV_){b357_>v zOfg1N*61EE1y;a8F>b^5+bCnqGBL)|r(-7pw%s9O+=;z+V(Xm?fbz#v_xOQ;ZR4rq z&sAYCJPTinF@f-e-Y^Z;ig6cqa*a0bnh2DC*IqFu(kBz?--)ll7BTL|=DSD1Lf9_G zr21mq*AwXD`>=U(06GCSPNtocDdT?1xSular;Ph4V+v(FKwnLzzNyqVl{O?d1=2iB zcv^Qcrc>5qwEr>M{}^RGL77id-mH3JJk?5!+55zJy0sW{+KcgQoEXm~it#+(bBUXa zeRHX6E_KbLu6ao?3n=r21~6ER`PBI$X45`;3YBMd|r&VY4>0Iit+9^F_vKSQsVyBMvUb{#rWV}F;--Y@nL1?12bTQ7%S=X zm4sJ54(r8OwONdhW&!dV(yqA_9)>kyd~8BH7%#>ra{&84jTd7rWv#0tM#@-t3wDW- z+60EdeApyLT5aeDq)kf~Bb|Kd*qA;UR=`0qGFrnJSOh!7$ZQ1kO(w@j=2v25)db4m zKkym+XDcI{dUB{|J;%p-j*s>G#Q3}=jDS~QqZk{=vtc022HNyRMYt5E!fG-8fertd z0xQM%iuQfg0mj3-ut$uqTfj)brmwMSBYnPc5K!Jf`--uN@;71ACT!ZA1arjLG6d$s z7BRjh{kMd_od@*ow$3mKmILK~M>#ua$M;Re*x6HzU8}|Tw}MVUx_^`I-}KGyb}$y+ z0rLIW04Vpz`9QgQ>Wc9b>3>=w#$NL8rJlWn|3h2%#RFykm*e1nwD-VUK%GBR=g-4| zzWRBK7zgW#;dKJ;wn=rKV_YYu$|`^BunTP9UT z0r!Qg60WvG%<4^HpqMp?tFcSWT3yAg-3EpOvoUo{F;7Vsvo2-S#r{)Qi&+m{y$)iY z)=td&lVGoyr?(Wd!APL&hM$Ppm@=Cz7qcmL;4_;oVt}$+kiI3HGe^ueoy2U{90rMr zuWO!{Eav%?(QyV)XD4jx)E#J7rxkEe%nMq>XjlZ;-x>QmlczIvblxLomsT(qmcRiq zyOO>u>ASuIyT!b)C6N9?(s!>c=EbI%my8kfvbAFNWJa#n5SS@uZ~EuT5n}dx7by4Y z=0Km|N19h}60<*dV*B?KGpUJ~*YJJK48Z1r*gO!M2V(O;Y#xZs*R}!bxRyGvrK~}% z02>D_0c^akJxm1B59Xb!!5v@{ye~YyDdv!=V%|Xd8{UFlOes<4Q0yE!2R4X#V`bWw2jNd_407(!W6Z7fJI{ zU%-}EDvJ5)G*~OFFU&N?yQTQ24j%3nr#%h(5hTPf!9 z_F}H6E#`-H#az`w%+(9UToWhe$Jp~J`)}<6F+bz`v!P%rQiuo;V-Bwl1?TOG`%=LU^6Bq*Isk5OVr!E((9&J7?4lWg|K7Chz zlvoXB1NJwpBUWSjt;v^SHSI4}Gtx9~4aA>G-DgtInbgsedRtW%>)icfwV{kQLxDbQ zlPOkP_CZ^0YCA!!cJ0JE?+3BkQ%3tqut}`*vG089JbxkV5vv3FJJ9Y9gga7C$1P&v zGgzJ0i*>;?pq-t0E3k7b7!I=_Rje-bVHe8nLf!ZZRu^<#1%?6gg`~d_d+`x?nFZ+o zZZlw?SQlZ}MKfW8Slt`Iz3`=27uN>rxR^S7!~=OQA@3zUVLD`sb!kl)1aHA^u`X)? zXoX6>Add@0lppz3+;3ALZN+4+O-TO8E~S5bGi8eK=XHY1r{7_D>%v)??&( zoIZJ+zL-I}CrLAtx@L_Q>#03r&E7879QyYecozA&MPkkEFV;Njm`{JqUnbUzJH&b! z8(&Tr>y;T|y^2k*Vc+Yc#9Gt_kQY<#TeR_=20$6_RTXP#O|h1}CDz}t>HWd5T&%yN z`}=rcUoCG617SL>guP;YK>iQ<0eL>a<`3vA&V|;Boe{_Cqic!124e_u|A4}4nUheng{g98VB0IaF_$B zVlke#K5hv^VK$)qgf@Lj-n9>l^%>=SMq5(In|e^Jbn4BZ{!H>^%@iwVvRI!BOcLvh ztz!LyKL48Xzox#8l(TWPSpTG5o9e<6vA#jKnX>;?9q5C9k^f)IV7FLX8bA`ze_Pf7 z^?i$N-wuOkfikyZ&(=;rpKPW5TQ`ffjk2~;);7x8UK=LD`*1+4@2Km$X+XLit>9ju zobQ_eZTX(I{XqZyFbEclwUc)4>;vH2Pu2Ar~r3C1DC>NSOPzY;{Lq~ZW6`Qy2>GsGY9sH za!KcI5EVn7Sl+bY=|mN~2BZzh?JTIzNeNnY%iK;`{rzDH2 zN4WlSQKwU0!#Gim9v9W5j;J$8-@LD=7KB@3drR7NHuas|3ns!s_)=6W+SdvjTMYry zwxX<7J4BsR9ccGC_X4_e4v1<^y4H(Coy+&R;{kme1*B_({cX{=r9ax$g_W>b)Ok~Z z_MEq0)cM%lfpR-ybI1O$L{ul*e8B`!U8uk7`=Yv0?nQlpdM}zMs(S~ZEf>!h)nl2c zp3Q;sLfwHnF5fPy_h_Jx`qUJ4Wdi&lsxS6jHA7UtVepQqt4V+LYEhhnRsVER1Nw?`JX z)b+h#GQ11eHUyi7TnZCmF>DrfLuH`PZWsp(;44u>v3qDIpe;jb>(KS0Zj6C;Faogi zM(iGj&BI#3P?!bSJq#Of!p57h@g{7%2^(+PD{6QXKsS7bsGGNn8i6ghQ2s5i0CkRx zhxRZMD1T(C@TeTL0s7+BIe>1I2`ym=%!E(ifT%w;g(R2;D`1bP(e=X3}agPvwWFb)Iw5Bi=mH>4=+6IV!^exyUYI;i`Je~B@ zN>>AERxL(SMK8&yUvw;vatth<}3kCn)a;%6o$HW>DUYaX|bG;-4h`N%B9r0Jeyl ziS0AT1N(9&w*Q6p{be$&6dt97&M*P!lUWBuJw@L>H3rzHPwf(gU!-P_0K(kwQcqL< z(<6aCdwP$kIn9B-nzImo5cNz;z@BH8i+Wa|Cp-&Zih8ap^oHq>E{Zum^*sCJ`6;kM z)Z9e46zJQz*f%c#oq&BdkMi)d)C;YE`d*;E7Y_a(b#DS6MVb5$SI>0!%;Zi8ApsI5 z9N|7h+hARXJAs4yt z`#z`t->07$5+DP^?tb3S`^!+%J;PKzS3ULAQ&mqjiR&x?l(#OC#IHR7Xy4ap-`8m* zt`7pt1E9S1$4UIAFW>>dDnJs68}R!LX#WN)U^f8c_bupui~7L-CT;{jY@7yI0yskA zcc|}oXwP?V1MvTFgNU0Z1D*%`2tfP49|Cw3uojR-;^r{GgMg0#sM8PVgC9_*A5e!M zP=_CV0J8vV0Hq{ui2y7Dd=Jr`dov04Q@S@V7ny_y};8#BFH9HuT{({QtH? zByLBYw@(8s0sI80C2_}Cz%zg!0re#QJPz^Q~>BfhZA?(N!$|!cm;sZd#XuH7y)=35D&;E5xSkYcL886iTj=f>?H9Q^wlqS z15!!+bv>Yv!~?*^zE*@zCLY9h2UAEq)En?10JIL3lXw{4A9)P0hQy<}BqojnJOx+< zs3q|j>Us=)aqI}7oy6m4=W*2QIB1_31y}^Y|DQ-E@#GUEo|*_)Okz@hzyl9Qxqg4gktc0qqpfOIZg%z0QXL<^xdHd6bok zaZN>gQ!y?VaJ>)*KpoRiUK-k$J{IsSi5U+8J|-~}Q;a@7i=c6D1gM`N)k&(08oAj$}h<$u^eqE2fd0660063u_lPbI{daCpBoxUY+g@d zE9%sWa#~SNJKAV}oFrNaNFYhKiX=n`OES)s7_&(7cnz?VB(HrW`MeFl)&DmD#DMu9 zCn?ZEQcs+54PHc2?~^3;SpwKVQs_jI!giBnCctEp!a;k$e3AwY0pPb05hM+nM$%A} zF+7Q+QJ<28J+1_sp)|Ibq+39D0)9I&jikxIpPE6^ZL>*=dX}W=sN0NGlHh}qVAGTC zI7HH{6q4@r1H44iUHHvCfh5h@MACh8N&2mkqA_Z#9zr`FE+lEu`v8>vNMFDMfK>pL^(bgQih4b|7y!DDW|QB%=p`U~j)4R}u<0HCj)K|7xT{b#lTno0WmIKbn8cmU`;i>xHi z&I17FS@g|wXcyL9={eNpxnz=_$NxY7AW2IGlC(4kfc{*HIxI~i>BZgv;J%38zKGwx zyn&=wKPJhFHpHSXap;HTH8=&>Z9KhRvlO(;V1EB72z5+-9w3D|_3ckddUqH}?}7IFUL<`GL(+$+--n?8Ad zX%og^Qw$)Mr0*91(1y)B0Hq}TFa+={U>g9Re?%L8d(FmA9 z(y@soA!b23F&6L?U;`kZq?40LN{Rs-05p>XzmSx?lB6?Z0BF-$+@Hh!Ib2hQ0MHjH zNhF;Q0?Y)UFH!>mpn1Uyu%4te3rXqtKK(~PB}p0RgUr5wSU@gGSy2G|?qYAiDIrZq%`r{-utRblpWi;ZxX)#I72_&@v&%T&snn$v*jb!~~k|mtelM_fb z%mus;$RgSJILRh6;8T)4XOrw@C4{;I-XUUO@o-t{3ow zSCQO%D&SGTMi<%dwt!+l9!6hQK$kCXh^Sd#zr8UTIrIOsir`aXejp9HW zg0{z^KVv5X(DvAm0ibE~0-#A0}NPc%Q$?t*w`?Vy0fU;MC&W9UG{%0WILBP9!Qj%9c zPV&Fd&VS+m{*7_?7-RMEev&^yn?FJMpCps~sR)=w^5;uQ{^EH6>acbc0QFtFljJW4 z0)Y2r7Rm9cB!6|5nbEImf`!kjsgw1QSxEp%Rqi?p9qC+bsk zbJHT#Y4(#=i~5Qn%Xqi-s4Z#fXsE5PZ!WGVtD)jzgMq40uiCOURc$G#r50T9tJZ+t zMo(W)PhYR*c6~!ba4>mkzdCpR{2Bb}Tt&q>8*jabnFzVaaKJ&2D{`oz^*TY&37%Vp zX=K%%MRS%}g%RkU^SIHm@^EPd!DZK>pyw3QIEBAlK_T~Q3i6e87|SWx8HHF!1&;9w zTwv(h__=JsO&q(Lmp|HBejO)(qvqFbK?A37fK%{wQW(i8SUM>fz(ZH|q7|oEx3a$S z;pin!^er5{@lp$n7z3jj`54iDqgmrtHU#K>jRLM32`WJs2z?2kH4;LjmoS=-ff3~! zF^aGPry*hj>H+>kF~rbyqz7P@{T|<73WAe`_aSP$&FSW zYXc*9lV?rN;!pSHz)|4f(LJfzmtMLr%>BT z!8w*)C5sg&zqPT{rnD7j;~lg5SB~Dn(6w33B{!cF4hgRDB04dM1jE0CfkZxRK7iE& ztAq>^8oY#<>qw+*UiWrh_esveD|zAVoppD~1(lq_9!|mNq%e+Cxc&b&g`2Kwg%>0O zvZo+3gC1Apbk^ibgB^ot6{ZE*toj3tl@uCiv+8^~ODkl=_ahlwE0lp-3uA4CVy?I1 zoaB95aYf%cW>_yb`u6|-6#P3|(S6^Z>C(5goqY>m6`Lu%Z`IDe4Y<5-C5)GZ@nRBO z!gxs-F9m}Yr=U~g6h;EK&=?fdBd&|o2M$|(y3VxUZpD0xq$>4afcFeV65)M@f zDhX%31cPI)p+pa0vz5mKC3>Jlef4PiO0m*lo8Iq5`UZC;HeJh6v>*xh$87r6_o)PT zfi``>FBFD5KPPX18cxGrqQ|E4!XNFy5DlTQ_OVxe@u6iMt7Nm?oS%sFFD{_xZrqpIba${GFyp?^rhmAc=HM>Crk`V`1-NT+jDEKizhD&S z;a9hHq1a~AJ;x}{#`h_pQ;u3$$-|}}Wv0ovGdTIle6?PtEhBDgB>$~Wr0I6(rK@#2 znXQD8l!N)zOp)qB%&$sJ76bg5 zAXXrVwVq%UndEe?`)oe}icAnIn23U3gF_M{S(Z&7GKq^~1)}&8QPC#qkpI*Nn+&$g z_5(#fQ1ruS`GJcW#YpvTHO3*08QDm6uR~bjN9<>!iv?*QqD!zxXE?-K^63 zt!wydpirBQp0m)0T8(F;hAB32&SECO6$o%~)JP`5l}i%5`&~pM-$fK~RnZKN(2Ns( zndr14bCh!z5sKChL2E~$UZLQ+P_)+fa=FR0BZJef0}Ia;CLJqNoUW3QajpzG7}u)} zIIz!jHQQ(kn89hXiSxZ&<}t+#|HssqK#xoZ6Y2Agx+ZB(c`=>6ayY8am z2Z&mytIyH~N~O;M{}w!kMN<)(=pFj2Vo?_fb9YW3~0q=mZm9* znx)B~wzI)|1SRA0HSJ}WDw^%o=A3f!D*WjjKb_;}aQurLzfUK=Q@Tqizf(6(WV-Nn z_V!oywwJxlR(QYPi+*SMss9Fyx#)M~up%)#yEMN#Hr&Hor}OvK>-7!!IVpuXxS_hj zl$183hbW3w4JmX{v3YxVsU9YKqprElv!`rVOwG;BHC}@DaS-`g)TGV-PnzxenuGiF&V7LVq5$^}Kg5dRRdw6wM=Kd+{; zQR%UK`SPEuJV#8PJo(6xBL$|ObC)h%YU{CaWB)OCJ@n8+3ucWQcQ>vN-7)133&je# z|B6!o!d|-9cV$Jw^cbtlKDa7MeV)C9HQD%$mHMI>-#)zt^)`7Rv59wxuc$=P8us#D zbmuLUmX^oCYQBhueu{5m}H>oGmN=py! zI9Xm^e(_i-riRVCv0i9w7ELlq&7wF%HFC6-yQ=2oI`Ci9q;R?LSEnEZ9$;_s;552HVFq}uaa(pU9F_&8<*#4^P zUdjI=vq}ul6>v;wOJ8P_@Ho$G5*6)&!zSUhXIzOn>7p=?ggZ3M-)fjYo)>WN>3q8| zq00`-jOY2F%u9MsSSfWVR_d^6+U&?&A)EAbnrJb;n{3nPX3@{M)7$hXgDDYrm5%oR z865o@iL?&1o&ZPx6&&q{e$GqH&OV=(pP%nOckA|z>J=*)42Iq#_PNLD-oe4adFyxX z{Pwd|t5)saJy(56U8u&W7VUKpR=K%&%~S7kd~-V(^%Hf3VeRUpN00ve#p+0P1U~IJ znG8;UAM5Q1z!Qp2TrezFdf|l^o_tVrb9(42D=RS_b_&lv8$;jFSM(`+;nRIoO3la$ zKSX;C6auK7+L0fge;y+EBN3s$FpD;b58rajE%#0*8?6&&;fwY3mbhTrv}ubXX}vHD zYe2rDTNj@e8>2p~y`EH;(z+X*RktK3Ctobh%`GbiBj3Ob(^-;BVx^Xrmcp|Z_hy^E zwY7DtMV&py&#x|c?AWn4F!^+o+cRUOva+(u`pIriL``3iz=urJzFZbxWWqO>z4c{p znysQ;;j;L~G3KHSL;%rZ@H&&;_mDt?+beRhamuM^M`*FpI_DYh3}NKa2s&2JYy zJiYx)Mj81PtCT_Q?M5$ZXn{1R)`q&eCcy}4(pE>_a(#V$lU?@oH0jlL+kmPj&)&Uz zdp1=GLS;(QA0AqGNAIeP&F{SP&iCn6z3*7?&>xb{Rm8$hp`bu&ZizJ%pUcK-G3@tI zO#V`=_{dN?`|l|A74|a3pYQ1CFb@lk^Ol;kjvqgs(=PRkyl>u|NcCCu6)FA<`Hy`$ zing$q^vX@X_M|T()mPQ$<+%_3^^X&107_|sMaVN&YAQUsb?fTs>NDzmb&mFWPWLN~ zpN{Mto3+J@npG*Y4-%=cR9LXlPPJqit|`yB@2Y z&|F?#QBhTueKGBPR|G9(T7@e zpyzFpB`NoO3Fm-kFrF`9Jm153K7;Xm3gbD!-{0TI6TU%NzL;5q3|wxH?=5xq_NJz$ zb`n(dxN5I+bG&b!GiOe4;hA3#9^AiU$AP4zBzMQ(S3jn=R^Xrh_{bxV+&xBrgA5U+ zKE+;+DVu`s*Kky7fQenhD>3|NW0!f7-Wm z=gyqGQ`@(1cbkiD@z#kLE0I(IGeOXKyUA4(ii(O-(~I=Awf3H}P*qjsHkbLfWfj%b z)HGAd+0&;_A3oCN7TfXl7vUyY>$wHTWbX9N#X?G0$GP+Wj9c?z-sxqCIud^ki{w%haH=B|Jr+;@KAdUq%JvKtff z{!!T;PyA!jkf8wt|adGnN)ZdpqJruvQt>W~|(jH`TUb zB{%8VTGb(lw!uPcK}t$WK`SJRuDAKUcR%<#MF@<1_~D160)_KmuX^`Ab8mLei7nwR z&15vh8bU^dW6?iQ0**$C~;Hmh7+ zQd(MCR;PrjSR=i>1A6+1iwTU%S$&|cTl;xvC$HBeXnPL4l>!~+p`Z%)yKcfy& z33%xRQGlM|lb)HLlbf5HnUS8Ak&>NYm7AN}R6#N>r#>Zlc?~`mY=LKlnaaxAt1n`x zFXY6>j~mAZ)C2qHFVl5HAH=pfmeI0zV0#o&QquEe47Mi^Xs;JM_coQ4k=LN{Luizq7_q_aq-2Bq){Z`p%lmxY{ z8I=m{($K`GE6z6obA%{t6n5KEAYjgI<-M#d;$5VrC|KBNOFkq^G20XTxIJb;Ab34eKI8 z9~q7BG6l9^!|vVReeuypA4RKAt54$fvf-m6#l^)Z*S&YA`ggqI)FtYl)CaZKL+YQ| zW{jO?x2Q`l+k<16Ek*b2SGw+- zv>}iL!e})&zw6u|^>^%#E{$HFZbq-`j>z>$y3`2j6EuxLS$V zKeJ(Y4ZEHVqkjUkVT{! zjq6?2w=_XOl9t43A)JJhIzb{KV!v>hga?N?qoTSiRX8q2LT%tCQ6AS`p*>wQ7T7j$qA8dp3~6TvUC z!}Kd>ho%Ik1fI3(jRxOlMIN_5{%HKEOsjtB?(xeNVdGr3B;5(S-}GQ8R*Qt$+hEL9 ze+De2L=+xPc$V z@#k~=Xpa9g$8U54zmnreaQx3Weh0@N<_3OWjz5j#f64KEIlf0Hes`R2)lH$ZY@C*I zDCYVXPu`Nv6{oy{Rw7U}1z*ybF}6{?59`Iv4$H8o zLtZf}Gm9W)D~eU$w-TMEuth~bmhtRz$dMU3#}xvT_Q-?UinHZcgR_` zwoK9L@VQopfEKp4c;Dw-TU_>A8}Iiqyx))UelO?!9_U8DhjDx#j<0fj$Bb`v10NmF z;-G6d{u+)ymY4sB|Eu_;uc|+byk;IU}K z13-$$`90u`?&ys3WBz!vS)_0?_xSsfU^L()qV{LVcC1|rU%SYE#IPSXj~lmte|-GW zqw(>lPNA^nhNVk)?;byX`SLh}>_Bw2s(~P1))c<18T=gUn$mS$bFQX6x?4@_x7+vG z_hB_{ZmGe#_)VWlfs=f&nl6t?T&l>64}Emvqcd1Y;VSrP7M4>*4k`5&eVee58kkT|~b< ziQdF-2HW&U!VuQMV)Wqcr-yNe2)*EP^2c4EO$UFxg{-*m8l%^h8j}?I!%6gS{05Hx zePQ$o?hqF8a0)^vwD3F!H4$|*(?iO&;^>~$A{}OGv@GgW7U!UbX!Ix_!igH)o({T4HM%(tx>jAe)*q;ClNsZ4buTd680*bzQ>|fC1(QZqzoeX= z#vN!@nJLOalj*Kb9%8yH8*dA()GcOx&kmE!LJ_|QTC>d}_y|KlE&`t_ZF&!|2>VHt zZjHuq<}MuMqau+69Qyl2^zB5<_&Jch6H(Gca6q5)=Tq9L0!C710?ACz%+40YloT(o zloZ;gSY*vv=Pe*CT)g{4qlfyYN5xv(GjjJil?H!&oLpcI@ub zJ`$qwc)&Mc2UuZz;Tc*KQCr(W&NlZcDJea$?+oe>t5i{5K_0BV?WLs|8KtF^je6(_ zetuFFN!1)Xv2aq=)RdIO_372is}T?I_4A8}h!{K|SoZU~`R0g-=xCf(e@^jLLwbjX zh7K}USN9L`@$oUlN?BRHzIAmDOS4P8q~|tGFPMftfKL56zj&f1_+$iO$4>oPW1jM3 z_J(YXd2`MMMcNy2&yagC=F3O?s0g?J#LO_80T^@oRIR4vidD?VW5_V(M&DkF^v9@s z#@>T5?_cBX81vs^%ok$J{|fG!hcRc-D*Y>~VEo7~s%vdS(kNsT?O|+hDT2E*JuSDg zqQz)5iDIn2sS?#5+}>`06;E?`wl&n$)Z1m4Nn2~lTY}lNrM*>$GjDd2+-@6M(d-3_ zk#}2z&{2|n?4c+Act@}LuP%Mu8wm zqaJh){BMJ)bfd$5YFghgZ20gkTZZ-Xiu3kt$$;Op#IxssNw?pATO=$|FNhx>>F7H- z8fH?>@hQGE+P7kTq`FjHYFbfQIb+6*iG8tJDKNa*4Te~`t*)%7=-@jshH+;9_z~KvUd=?aDC7(IG zW5@R0yZ7zecQlzkR|Xr6ZFm-+=KIup`uchaP25*m*8$!#iL%`;in^wDg!D94H-X(7 zOY)0qn%mnlQ?p8HD%(wPe)@O{9Tizw6}h>Mjg6`f!d4NRn{P=wWX!Ck^4W} zD_a>&>S$;XVRUZiw$Ljze4J|3HdcPwfZyn)P@^mp*aSPu^mgDBy|pqiReiu$_GD-&we4(0+DbbQX$xXnA0?!5EP9yuoy z_U+rVWy{`U$Bx|qFZl{L_0dXw{IN$Lee|B2gd1kADD@BQ)%(-DmH`&a*kEJ-+itt< zKV~>x(L2Xtg_fEa^*Qw)j2qRZx)tvf{U>}lY1_7KKkwdk_?KUPDK1S(NJzK=zMSAI z;Bm)By=vzI+}CYBom5v>mtR!pYqxuZ2YACye*=80YpH0AH5q)F7|u>P-*O{->{6zR z*_uqv<+jWqF8S*@vJXct<;WQf`P#OX|FGqk>TcmJ_u<7S@#2}(LeDjK z*8R$!yM#ZUiSsV6xbD*P^0;2bpGCqIvc%`WfdjuDPR&hDuBw_b z15vx~&U?dihF!bTPMxn%#B{a}#r69o(e)y9A#v;0C`9q#<@x5uUw3Z(dDpJJdl5W7 z{F>9;*T>ttwnZ-)={~kB#f{1;Zti|b@|Ih4b?NDt#p$VO)kQ@)ISmbnEp*Q3-rcNA zzKC&ct48>#_#j)K9Ga5L)hSO~oocn!$xB!w|~PTeRM{%4C-cWl}1qV?ag>VLU%UB$tpuUx+VBaWVOLkroJ z96s&J#q35o-qnic{tR#_UFAH-x>n2N@n#4MTs$y|aT$ThcD*>Ce2(u2=?08V96y8O z+ugtq;P^v0{*N5LlH+G};&;EY1w&+`)OwEW!;y*S#EVbj z#W!}=-DTfT|j2Ev6TY+6q|gQpF)3x`3PEqg_!Hq+Jm=R z<>emdt#0Hc+pk~pi5<+h5?m>mNm%bA;E^JAM})l?gJ5AQFPYi0!^|QLHg{*sPD(nI zd?fkM=L+W!lhJAI&>7c-^<6o3?bwwE^v2IQyL9V?dJG#gwy87t>xtel}2E~MIZRfug! zO-)U|n0b*IeAtKFTv&QAMps#SgpLlIAW3nd)2H7)XYTE@X3auOXYHVgw=!gwu%9K=?^PPF@Z(_^=OIS+MjR>BiE7 zC{mly))p7q(o&gPPy}k&51FLaVrKGTA8Ko1>k010)+2a%AyBi_p}{!ipVM#1v||>J zxrxo55K6-Zrdcal*ymzOW)~y%$A$UKo!FP}3-Zw8_}R;xHP3O@%;c=u!B|tnW&9Nm z{uz>i0497Ja6@ z-jaVmDk*8y@Sbt~TAN{1%qYgDrns10i(ACj8raH=%;?oYt%__k`1?{@n+=So_%xdW zd-duSXll;QrNBUgtzTG}2`tE>xfu6Bj}7cMVDRt}BSuV}I%RM#oGz2w+uPcywA2Rn zQ+xspDgp*oL%^6Z)Y{T+<1@Q^>-Y}lDyn2}KJ4uXdxH;28z<($yV6FlgN@#zGA~Tb zCEpP{ao_1XI)P8zcf|ZE+;`-QJE!l6QSIvO(N6ipcVtojhF@{-5v<1!@6r9Z=`g6AnEwnp9lXpm z5O-a3IxzmZjAGXjcTj8t#T^>OL%>E(hrn{mz#Vcrl$sG4*2$4qdbfN?aCo<3I=x%$ zAY7++t1mur?-pb>yjuc0;XEH-I{jO$-T(W!BZ2DG-rsA@VdgEk(hINOzL8Hi?JWP1 z)G&B!*jaw@4pR!9w6=&dmu^O=KCyX9Dob+eS4>|(K>>U48b++aSMKr6% z&wF$)GuQ1gjdmn`K%Z*UPdmh~li2*bI!iz&^+(hPH0pgE)W;!f0P_p?*QiI*Inqv( zW1qs$zptXtw0WqJA3B0f#4Bp=cUkRURk_@l;GjQIovG1}aL_+WuWOvi^6#)L1Xg-e zn~@s*rdBp3|10`6^n*rU)#wxQ>Dxfxf=}rEDx^;}IO#h_ll8tcN}~&ZOt6WGX>5in ze1_^~vtD0=kA*gI&=@vH70gl5%a=`31(Q@;Il85K8IL%rYO_>_B>xsV1biIb5A4V0 zsdjX~w1Q1kc648(+Pyy1!1Ww<#+m zEqXguS)qq-QHRZ!uyItZq=oS&;(xU;>ha17zK&iQ$EoRb$S`YB7c0=qQn6RVWfY_~ zE{U{?UB=@a?R9pwM(+Kqqf~~FNu$`M8S%-uc+vRl?z7rBzMbQza{LC4Z|Z`NeqcFv zoRJ$^b|oC`XJ>j#glqTR3(Y$99ZjeJ8fd)(_;R zPv`iHIQ~XndbJz)NgUsw<0o+ZYK}j!6TkbJ)Peeg+wS4W!#VOkj-1AjwOQ!0?si`G zP>#Nzqw6@j*jctqd^daECH{=d`tBWG{xM#@;70jX96y5Be-Fn`=lH*M13#1F_u=@Z z9RDK6_vyrUHD@q^VXcVJ=FBc_&Pa*OTH!g~X|0ejcduUk*D{qop1r-w-ZWi)kis?i z?lHEp-=(s*QS41iSTWv#;aYF`htgH&pMg)EIzQ9YxX#ZsOs?~f#GO;;cR6NmNRV_hFsk7J;NioyK>Lq%rMuvXLuRE;(LbGxI_N7huOmP z4(|D`fzgA`>}{rK$85OPu7T|nSPU1BISbS%I(H3a8cn`yV0nB%6}yIs*QCmK4J>wz zQRTY^F9%g^*C4PQbWFczyM_ljRqPsY9zmPA-Pe`x8g6k=<+}zy2UWgnXmU_xyM|eu zDs~McyPw)UDjQmK1W?H*U&{K$I8=?H~kN?UlB7UH!-P z(drcSeZXk3*+|A_Mb}r~HyXJ^{tkFST)vc+A__e#H7got%QgX4L?UEr5waF7V#i(5 za?|R{>rRNqW@<)QuNWlWI^xzy$470Zc|kj2+Gba~ABzZ}0~U2Ov!@HcLeFA(8j>RD@kS zKtbq1Liq{i85k4k^bB;x?|NE&8^a$=tVu6G&!cFDj~~aP22B16x=1K{*J(+avn3VI zmZUpdlE7P1*Vz)6{ZZ~>N?T0B0& z2(P|ugxfo*u)g&$UzH!5vN;j(Nl#8C5|7I?Qk>C;B!mPwDdu64wCOOWU z6gz8D%iA-evnFiDb;q$_d3Cg6#lSjRGuKJuD5p`=NyBALb~8*$?fn zIH+oxl>Ivg)h!y;$quUTgX$kR)mzN;d^dIN)OnuN>bk~3Rnw&GKRBpZcFXYSEFrO{+dJo@zca_FQQ%{DW9$PU?X%`rztU1&WPmEj?y?h|Z; zYH$xd3f4cCKLdJHu$g{`yRLc^8}rLFv#w))i6YaaKGJF!3Vdi%0p&@JE~9E2^?V{K4C}Y$?O=~j!N$fPU-Bk zuDi%fxd$Omu%Tmu8t;&=aSEge-Xax;KhkAS-;iF?3oROoRcj(_X}!=(!_9)Hhk(op z$j`+7c@z|7X67>Y>cwqVgWcZNf>?38LOl$Fy{D|CONn-xV09CFJo=6O-D`{E_c z>#qpIdd7K_ASF+EVPSQ3Rfg4|(@6$N2m2xQ@DBV)2~*s5LaTgQy8E17WSi-cy>3U(C>F+(0#q`#gydGOc6iV$dw)s^pQRfL$tICONm}Fw1z2}5Qitz2uSPLZxb4&5%!^qbY zR9sw?Ur?M=US7uh_(i3qIVIHZ)c(A&?V8O%a!%xC(7?CaDQTw$BHAU6Zcf30CZ@4()i&+;==`#SS8 zuq-P>z+<<7$5@t?LEy1~)YPP;)YOwFPo6(^^w{}RCr=`1>McQ4*45+o6sM#&>zE@%tU|mWHT)#Gczsa43hJnOG?VXNAOgvXG@D-hg@a`*~5qlXEell zvxtT@3ulB@oo;LCFj$bFFXg4<=%dq0Y@~XtdaJ3YTGOch<(X%eo>GLdo^hC4{9sI+ zE>D6czr-dL7&W~wlo)0 zo7b4>B+M(!>=RC}pH<&65CvO>Aa*!EdIn0j(!e;Y@WBEbx-8DB*Kb56tdxOUsSy={ z_TrcwuIe&qKg;9tu(K|=pf2AgGeJ=`7lMM<#T9PFegO+pOREsBwc{<4gaOIIVG{12 ziv$IoxSnR5F~6b@iuYqBA$_h<^Z3JCr5|#VhM>X~HX$RK#Xsuj;@#>h%8KQ5Dd#y| zus*Y#E(>{1mz^x9i$KF2vzqCPOv17VtxHeI((T^|d9Z{fG-&M)>pDzH+1Q!jBc2od zF}u#a4`+b6e|jM(ECYpAps)~fft`OCot=F+Ir&^l%9*oCr;?ISr|;j718nT?-07s` zl$4`sNlB-ZlTK!3Wo98O6YQ3;o~32g_MlTcsMJSh>5gJi@In29~SJsdG&{@ zzx?56Wa|9>yYIe(S0Of9^-xX7r~ezh^7h+t(a5el9N&1@VuVnk7uC^0dQVuoG=@H; z(@+J&g;0h4Pk$^e=jmVC|GYsvsZ0nILYZ{32z@zW!^;6HA<6Kv!l@b?Bcus?g#}3N^p3D( zNer^@*9s2_`-C)Gw7tS!s}55CqTiC3xH%f<9md&Jdxe7URZ=IaOY}b_CT@&|v7E>n)X)Wu>JR<<&LSl~o0HJB}%`zp|?8%F@y%rdL*0*XHKt<)V_5ge z9$5C*BhcT+!=o3@kotIgdISXY2wo#R3KVK(tlNWN7 z3auc{`Pik_I%KUT!@{_OT+m$6$UvRNWQ-3=>Y{OU>rrjy_!BvP9>+Iw{Kigv=RCeL z3b$b`lSj8`QMlQQnWo{H#l%rdm{ZemO2NzH98Z9+QNxFMu@iW)b-Y+TD^{Dm66TMD z`NL#y=L(|v#57OXVzzW-v9~bgTKe+;7Gn}>H4nAAAGMl=?- z{HMpBc`+K#UeaDrOf2-Z*V}Ks^VOjrHm>{f%P(mVo1%BCw_)*qUik3CXc)6^Rm0W6 zwrC*)iKjbgC*nDy>09U(1B71M6r{KwuYdOK*Xuuz|Kx)YK6viA&z@R%-=j-jWlH($ zh-1Xdij7I^9l}$>9l{_;)C+i>6}*J|g{N$H*po27g4BL$FT6rklReLVG7hb*DXD0% zi%pn9RaGr5#g!Mca#Mf_f`-G~FECZtXF_in2iG|1eiPag_lh&N4~w$OXjgpmv}>q240xwQ2B+HSbtnZHb1 z!JEIh*X(f@9ypvCnJUG+2Z*{D4mU?m4&_ql$8Rbt#%H zB_>CO@9*+Mr$M@(` z{mwPmsl#>EL+ZX}^M3Xq0eZ-`a5nF~FR*#v5#pTpt|UgEubI~HgogQmhPm|uQyPjw zoVyEGn0a3_ok2H9fX=WvT#!f@d4Z`6_k=)Y(0Y45X68RIGvhEb=VNBh$IKjgbsmFj z&eW^p-Ca#xY%Q+Lbl~1pzB&oQjm&MMmJ?wwEG>@1@ls?*;cl$C?oD`?0rfW`2u5C= zT;ZC6;p%h?t|Z0CtJ5!BQ*vCLkinHGxr{?z?aCp0_Uz6rJag#Kq20T8@BQ_cU#}y} zZ-7Ide){h(z4G_xpMSovurTc2NB{1wu>bEk#DB$#Rp0IX8hJLJc;bnn1t;VGKV}ZS zUd&3XXl-qU1|)Q-9oJbkZh$!?sWrc*4v9Np?}K($f1PE;edgfugRSf=PxOMWD?7CH zj3`|RX{je}^ zQxAX5S}3B?*Z6>1pRG3NK_a#E}z5c`uwc@h1PvSIR5B>cW_=D;>68 zOl!fyyNjf9TD{T24c3t4>z-9-(!>cUyD{X6WN3+ePF zyN2|JE6Y*S>1gpiXz@Z&nSu2>h6M7cD#^v^^4yZD#*Ww?9gS5r?UE$5*Hl&JBM(hJ z%RSStz1`$Z_0143NZIXeYR3c4k&cKxal@pij;4BBcx8hL)+vvM%EILR`}ZdoRt8MH z>#n<|22>vV0*PlnJIaEPMWY8bwcz0=O=L14Djg5h5I2@k5Hf7XvX?k71W9MubJPPo z?Ah3$XWKo3#^JczxFCUmT$w8ge{T15cW$ZzP`e`8C}n zIuwJmPCUoV3%bKJi9MKlvn~AedGsqvY9xZj{%o1ps2Tiq9f zpB@1}h1S(I$U!}^o#uZ!)!uSF7AsW5auIe%+fcI^OeTsQb>-LPxp6f$wSr#{Q%p<@ z9yyfJRGaY6jn`$vT!rQ>rzwiz=DTnX^1YFB<|XUe&v71PbHnZF2iqA$o#sYm>GG59_H&YKi2?U za(^`E-hP|`*K!72&l&JwC-=IKNXbOS%1K3vzb{=@Z4)`8sO>vH_(U)KLt zUWM(v3h8dNU(fM}@Fr~H_=h?E=*#h$#jLBfO|z8vGW=MMeUxM8aBO#a--nmp$niZm zeksR4dwJ>IS{Va4{&0@Jh2vLp{46){wc4{>OPV)+Gsmyt_&ILi|F`wO7Jj}PC+I#{YldV9u-0r2V%D0P&zZF*C*NtUamlweyyl_2=IOlV z$9T<)-Ke?B@v}Jo29EE~@o)aWitlq({hdCy%kv@2zK|VOVOkJ;ZUu~a6&i@!9~cAk ze8?V*iFrQc5sZ;L^C7z&k*Gq==IPI#J!!@B>tom@3CH$uX=I0duD6nL{C*sNEyrKa z@ejIzU&HZ3Iet3FKgRKk-M|mz_-!1Y1wLtdq*nivo%k+!xlrR}?FpEhV>Dh~qw(@K zCogv|$5%&PlH=`c6^7j2%a&nWN3wMo*ZbK*%;k9Zd#G%-Y0~0zg4o^X><#W~ZS`O~ zqAO$P^y{=toD^fzy@5bs^)bb&MS*>xv}FmkE6mXfrnEu8T_O+ zVmYH(q@aRt5qIdICff87W^KHfPqq74&fjTgZdp7iJHPUPb|Q8hJ_(2v{ES5kB84?S zzmfnOyW{*yA2YI1^VPR|sxvjJ%(=^`&e5oja8TW@QEhNg-P;+<8D&O5+O?=Ys!@$` zQ2mWYb&P}Rw;I(F2i0#u)e+5!W6j6^W2$GgSV@*anw{YJ5>y@WoJKPpa8Si#e6wtT_N=lm>;F?)vPW7z-PiT%$^637R8 z%k1lIc(N7S^*7anrcPbA?z%gg3`Ktgj~BjyeN0e!`TY4euyF}e^br{u5jVJ98N~YG zx_gt)(Gl0(jSNDvRlny@@Vabvgeixp-9&t_D<9NJ!G^D`fbiO&{ z>8D4}ia|`=H`*)K5HsM(Cl4Nc^2td0H-h9JeN?;ZAAFE*LQR_**%Rj+&lVH^Sw=nn z8#(T@hCbPsMvhl^Ao@S^s~=C!vl{4^Afk%9Ol0` zRz;Hmz7DS*Lf^iP62UJ{Vm*TB_M^ats`|Eiqy)m3^-cA)4S3FST!^1vm3I*G{Dhk2 z+ixFr^8h@baKP9RNI4Y&jJk^2mNGmv9S;no4g~fT6%{%33s-Y=^ifS8?8%>wbpo}8 zK9$F#%m#qw_4GlN9N+ARd7Ixfr8;%8G8s zC{)~MUNmAv{+9RI-;a5lHkB7pcz8G!R(||3LaKzc7z?~5!QGEn$onL&+OcEmxY=-S z-7$1zP*DE1m5QRQ++G;T93WPlIidcMhzNQX%C^gKb5_0kv13*5ZtM`p!2Vo}pBY## z$B(Ve5262_LI2G{|J{ZD8`Sk_d@lR(sM7UWeBJZCH>};4p2@d+_iUtCiB|hxdPTuD zHw~|s)aS(a@Z87uqSfbN@5V#ulot7hg<4r zD39jAz^1-B{=wq!2r^3*EaHi$q~(eyln(n}*sh<8E-#AHYTb02kc@E}9KG0elv^ zqvfr*zLq!iv9y6=+GDd9yP@eR!kQzBV6MLbWlwi9$|sWIXM)dEFf?@NXDbmT{j&t~`x# zeH!Dc3`H)uUi!A?>O$1MsHUY&4GQ$|Xw0j|;?Z2*Sd$BdC9_O4;A5<*BqbG*RV@;> z3b9Z|q}XtsPHk(m3vgf9Rh8=NE2=sIdSK<1g$|*;9?$F$6qBGijqD_vY!lb69WrFp z;69ScKM2w!xTmjozlc#oh8XSs!-fs(sgv_JzWL^x-{d4zZ(4 zoKj4ue)#y~kJqebrJJ9Be)Z}#pX~eP5DSGpo^<-yFMHRlS-pB-@wa%&=ns|sXT-o; z_PzFs3yAJ}e)a0rh^v?%dGC`iys%&_g1uwCii%{^z{6W^t*dK^)wd$l5}PL4EIt$3 zei5k#XS0{METw(qs}|af=YmHpQcR2f{EvV9W9dsmORa6!6Qj&=`QSUtbLAmtCG;Zuj=~*4G~T6)^}<3My-;Br~%z83{dcT&F2FHoU2(N%HZQP+(t^ z2?h4-p|4;Mf^E?X^zjMlub3uI9zA;Wh+(S1ho4!S3|?bdK<@yrUCIKQo$cAuh6l3L zm9(d&;mJ@4u#m_%C8ew^Cx=R)n3#NwIvrFj_S6)~=wZUG!cKRDcG0+UbL%Ti(6~%Y z%7mpk+?1(_mQSYknAJv5~bLw#XkPE8r|F_zcl zl$M@2ckw*3h}i;~YMT+7(9(iVrnctBCfq8}C0SfzMytN5smUni_3t0j&j%gp+b<*{ zBG@v$4+Q|Trm(c3>f*(VSgD(vN-ySs0dVxFJTGeT z7RNu$@tfSh_vHBD9Df(b&*AuYx`E%D;}7Kc`#8Rl%UUxYosTmSB zeE*Cwy+@5I+=@V8CTJKK~p^zogt?53f<>u4gD6iKNeI zO+a#D!cRX<95eH=#~zzLXy~+Q*+0G>g?%huy4SblM&l7oZ>X`5>hm5%qF5=oIK!*5 z^4e4OQX8)^aakK(P22%o5x?g2H3Q$(^yT)l81(*J^#1SA`!VSK81#Ps?6kDh^9UME zNjVR%b7l_yva`~VO)lkpGBXjJr5}{o9#~QF4}1N#wsxG860l7+#6li2uo=eH?)mJi zufBQ>52I3lgH*@6c1ewRjMU9yPA&4+wid9pXrX$ikXL6>AA$;w@a`Tx2AX>sdfXHh z1v!~2gx~!%)}>gXSFczhB*bRi7%_bIlTSYR$X_h!{P1MNEKO2w z#L0>0D3!|S3q*E*L7S}>!BdI#>(QgJs;asqH5ItibY;1pMd1h-Ua>;Iehh5=vkME0 zOAyXkT3S+2P?QHzcQHG+5b5zsODdUBpqx&ja#+pq4^K>zBvFT!w?l%)LV_}|_4mWM z2>d~72K!d0uLP5kI1q!QFmotl{FdJ=@pt*@VZ z=+LaF`Ae28xqrfxIdf_c{2M_;|2|kBNgvb4(yDhKe+rMv(;qn!6LW2@ceLf2Jio#v z&xdoKzs7>aHlWw$_{HeqzoCa;Lk~ZJ9)1!%JgzGL-08GahZ9a!H8tU}2p+z5l!SAf z2M;Etrl+Q67vv=+9Y1{d@Y>H)O8X4+MRZ_c0lIChPSEGL1poW~ZQn8~$XN$;e3?4G))SAs?0=8*x9ze(;oTT;>Ez2((Nq(=3f#scm2iqG~8!~di1JHVSd zvT*NJm)zvuZMj##nC3tL8&d*=gj5nzNpG7@3d!#3S`NvkZW3UDG*TcWA@m-KacDLe z_udP*cgsz(^!~XcxqywumiNAAC6;Xb&&-`UbLPycljqL8ZGN!Q&R@>RG*dH68(Xim z_X*lFQ&QkJb8t^u5?m}hbh@^>%SatvT-w&wg)Qal*O0e$!yY2-6_dQ2lbTL%+jguD zGfI+3Dc|*aTPW;F&X3*yaYqKwVY4L8y$1o$BHw>D7#5XPk1Lmw}|beFF+9UIr?s z`8=nRv}NPqdnmq`;{T+0cZz>*2yd35hWm|@TJd}h14kYV!<(AISqgU}@as!aH`x1d ze+!b*lkdHj4n2tW-9`I$7{6~6G9Z}<{3!ekg%41;*U*!XB>_p#6A8%cGz$snEH8ER z9VQZx^CM;nh|C#@KPRJBs7%$HA#jpW$1-25X~!E#$GFD`*!08f%NS6^P~T?h(LC#< z-aY@P6?E*brip*oH1TEp=n;#k9%^HBISllD-YkcCktyKgRE1~BG*CGVJmxSyl1No& zG95#5m>AD>t>uGHdw!Z|W1`77KB4j>9F^b+KYRf*@!oKLSXfTRew_2Uowlhew@zFsj;fcs4X zmk@y)N}PjYVsD-mDXXtM_Z_(9)(Zs%wS5AOqgY#jAct*V?(4{}{X?VC{NAFR5{IMU zpEhTc)4N<|Jo56(FW)o&u}9*~)&}dhqO910eY;+GZDlYf52fN5xxbwZm3=T4TE z9E!)~o@aTYgl*f-wI*1#7pCm|{@3HlS$)2X7ola!EVqJjf_(~ZaReue9xICr_R{eg1q=X+cJ2cE6j}6_zx?d%kUk+; zV6ZB1Rz!eb@PtW`fgxal(YMckczIM*)SO%5d~Mu(r$o-W4b4!V1I=;EMt~TLC)&LMYk(;fKc>%$#thieVlAEfHCs+^dN+>FB`k z3Vg#p60;KWCbCCnfN#J3?(0pPHhqh)_21^@A-jx5BvXo^AYl3vU|B)gn5k1^=700e zV&fv?Vu+EeX{7|bKW1Fa{_#hwak1zJp-}h(J}C0?dg|wQ{RA_|57;|bIum4TTD97) z_O@nif*nZURGaFqyY6b4vu4ejfUHDc#10j^y!qyIkDHzZ-MlcRbNckotj|CHJgbvs zQ!0rAG#^W#CdnJw^H&m3M_T&Jp{D5Bo=i`|Jm#O|C0XX#1QEA9-uSTb3G!k$<|GC} zSZip&dG7BQ2(t`4R22&H6aN+A4=xznLApF3kA0}u1}WFGknkIYb#dvb}wd`}(=*~Hi4$vn;a zF>K1+te9bMQL^U{;aKRbJ8LXr4L`KG`15iI@WPJJM+O;k&xC07A4~8d5pv-#yVG$Hs zP--!6YwT=^F&IZCzJ!G61Snaul=+!;W3rH2N66^$%3>EHsC_N#MeHHme0n28<=QnM zzE%AH&L}PH8SLW|5(u*Q@rgh=5PyHhmE(;a67aq&IkC`SL5s!oX@uZ;Cu5EGV*d>I zBL|a*hl`7o6V!CmsXapiZrai{vz%kg%Q>dpB)6IO38`&QDq;-VCw0X|HNUBaQsNVE z<(ID`rPGjB7D4GAgw7kg^O;5bD&l~o04YGEJK$A}_Ag6tNlh2=~+rBPxroQ|0% z)rQO#L-*n`+^2E9PWpXMUVeUNenv)JU4A}vjE6ZTQAumHEYXkZ`=Jz;afA^(9XWO<-o5I3CsX$#$9~emA1BLL0bwI7*kdJ_4)@VfYjVO&e z57`&+61?@uEAtTeyph=`e&?RNyubY|j{I79;snUtil~BT&wlQeciwpiyg)60HW<7B zIZ|LUN4Ap3H12W#3_kh7bRH;&k#wV*X>2VlwJ<0 zS>2oJaY}$c1%^!Zb#Mq6P^lUkl*;~oY$hd9&USXj%!CLYH4)0$GL9`PLpZx>eVlQc z_^L#V3uez5d`!u(a%?Hk7WtBh88LYmSMW>?mlQBH#wJ+5C3V=`h#VUm9S7$_%)b0w zpPAPsnt2^sB0>XdKl0p;Bq-_SWZoZUJFRDGT`9>og-N;! z!7~t>{(L+`D4v1X^sSMSgya$9)3=DMa!Jad^n$XooSYrJZXD%{QI?x=y*ywK9du%1 z;@0fTy}`HKa?9bvhmT!?3l>Jp8V5L8OZqozwfdX%7EU-|WCa>=ZT|70pjhE5uiAa_ z#+PZljRdv4oqCYqL?n?2t-U!m>Yn+`=gfZo^*MV_6esky7D_b32gi!7T2WQ2TgxXM zE3uMDq>RkQRw2W-aP@RW?kP7MYkuCIZjSuf<>BoE8A;UNuOHBHGChnS7^p$d33UVd zeyVwo!F(*jd_0Qzn1cBr5`u$0SyGlya$}wq`=T83HEeG7CI}FTGJJykpLinZ(MKPx zsj2C89AmB=IOeeg1eeH2aLE|6wt%;a+)Iye1u|OXoEU!|3pi#Aiq~c5l39>|;Lr{{#GpoE=@)j^7lQ zVm&nT=FtydcYft!DVow9$wx?^KbJl~T*u6mV{T#i+VL&a(-wT`{C!06rnssVL-?^I zPHa4ASDG%efecA^g(F!|LZn>V=<%dnJa`DhHuK0@9w{p!byM`w=DI0k^?7**$$XJ} zk%g4|eio5(CzTK>_kn1$lso3rnr6G1K5Z?1S{a=!{m|2niT9?nHJjomQT!o_?;OI9 zHF6LeX3Lj<=1=%%w&Q0@J~&%r_MA_9cA`BWrajlwo|QvAkBJZ6%ky{{F&UE7+I;Dq z7G?wa$%l!&noh2vv@nWy;qm+#!MAXhw$OZS3)fIuK5fB?j^HpIK|RGQEx`9vd&&i!6~?i~oFeo{av)ouf!RC%PCQo|sR#6lh+_Ld09BQtP5+Tij$OhB`) z7$$`zB@piglY}NOD8RiVzY+ETq5~kWzuWwNGK+y!1i!uw+fe~iW_|Cy@DCkTUXw3e z*xw)Itb(@#G|JnNm)P|}L2;{8X&Ro53XMvR#iTUQJdFOt+F zBsU}qRVv4jDpZ{2`tpP~!yvb#h?#`>@@541pJnW-D)LH8N~-gUOG~PAvq{E9X@{0% zLeO@sW!-FJjc>}ES__l1k8b@X@FT=-sFC_IVIC4Nw4mzQ!GrURiwrFW{Ap_ceB2FR zM77h6%%`IJm)-M9bsSU5xu*)!u z%Q1@~6%{!MS1&9?jDJy4d45T8NpUgK-yzj-US4HMQ8E52EY3rCP=Xxc>bQvsd)wQ% zVV^ZprJJ<0)TmGbVw%bP%7G zex0@(F6o`R?w;=6zRoVKR@o*#ko7-t)mEGDF#@KIVZY*k9eEhN~+L-yXhK*}Nc52a&KXScs zRJ(5G&BLjTKZ8xxppIkWL1zlk8SiWoMbp9YZ{tYtrb=Qxv<02Hg-f)B6!JDs3#M9P z^|Xa>a|_+Hg?nzTd+2_;6z)f8ft;;H@S+&CXd)} zq^=Y^07w*9poWoSBKa-X!f&}2Y&kj$3xFq{4&>&!0DEo$_MBfuX=!Z)}9W z$=x;=jvPFGVZT14xNoPKC31%W$fw6P^Ua6eSgDu z`}S@5*ZcU)u_4Df`5TYkd)v&JOYeH*jYOd~DoVM4l_T6?EAu^)*KQKW&Y3kmIwPJ* zX7=G#AuX6Sd&vWjz4S&rw%vLp)#+5nGP!8|wr!kz&D;0I-G2ML``?D%SQQww8jT}S^od&F>nmtPE+H|!Ay&_*s{!tNLz-fD(;@tC_z zMq4;fTbN*O!H%}jG}MCG9yolTl6p|Tk)Fj2jxh3}n~7!ZN#AwQMB>eZZ^xWv_3C`q zYC#8LX8?-={x9T15b(m zahWM+?N~d(?AcgQTTXZ{zBuu{XBwIQ-gx8F#%GN87;j_#ds8z0UhDOaz>EY(&?B?m?<>BG!?F|p!iKG9q zn$XRdlyBngddv+7@899=rV}cIw(#$qP^QzNg0^s-(}D1Ind+K4L2Mu?NC0JYG-)A@ z*#V-%PbcW1AfPARb!WkJy1390T<3Hda~@5ntb(?1ol|y9d62Jr=(v}KJZNh=oZR@s z=?XcVuD^<=nu{{p`&95RUP@@Er(@I5={UMHOZGm}c0gOiQ^7N4yy<+-8p4NR1){Lm zW?`>|VXuXey#`)FzDDwsB(R^W_=#l0hOVyBc*+NyxH~qs`)a;IUDBPLuFqb?y?gie zUd>|oFzn`7d9WVlS93nfF9f@k!*} zr5LmQXhf|TzJ5HdCMQ@&BHkBb4UnPx6_U3tmL+vz5b>_FBd)fjlB6>#6XTOP;(bgy zw0JL(QC%J7E;vVG=KqR*5a)iyhuMfvh;w%&G3g}22C+7hCZ-FY5a%ADA_>`ex*R;ZQR@*y?^$kz=;c&2L~TM9EUnB zFNps6{f=*8)O$m?e?OB0?DkdnFNuF?HGEb%`F-zwx$BS5{;?L)FI!U+YkXC<_EhEN zyraLhv@B(QV35O`6)0>!5#{WD6>5d zfBZT7>|OIAeSbs3i4joj-q+XIp>URqdOEtg?Cm|B2DGfTU!=1$Bf)sObF2ErLgF>U z$rTa4(gFUt65XAgw*T_Yzkd69^ZMT2!-w6xqT?n+goP%N%EW5^{0r(qvt47Q3Q5qSu1xMOK5N)B|+=4%C zfiF8eP76}nLOE@r)!f1y+Jd_UW3ae|vCdu>n)N4##o612IUpB9UkSMwUQ9Kgy<=hL zR1lAhg#=5~96a`%g!p6}Of~yt+yiLU*z6bOi=I5tcxMT)g+Egl4KuA!? z_2^pyXJG{7u$+<=A@m47JEPOP_uRR0OKy!rV!i$Q_c!@Q$5}xClFz8`>n-WK@(u1DWnVSDy$`{y^`!EFV>YNw9x-Ey71CN;WypmtV} zic|Wdn1QX7x$fR0oRHME9C>5xCwWSE1<&LhQh@`xWlejpo6`OHA-Wf%g==Mw3LVN=EM#TcGePs^25SLL2NiCmmXOfmO?;1}>x$%@Z^*!jy>A1yHcoqzoeG1c#| z{f0#OqYFa4Tx@l9moG!uVcXgk!Rv7Y(}oM}5D{rl-TFr=jW!`RDAq z{{C9HbGEkDRWaK*SA{~*)6rauDjPL5Rh5?uGZE^TdHyK$Lhnkqz%^jc*5n?}#b7Xs z6>zW7=`SM=r3n!#&F%fHP&2DpXAQrQrj#6MyB;5$`g4;1+_ULRCsFy?b2Y{LVLY(6 zca=3&UCQakzbZVZ%<#MT`6r)z^4$pt4l02FNmEk}oGL0WMk47+v5&m^>Z_}tSso_W zffF@WisLw!B};)!N!0$N;!s_%|Z_Y&`Y3Y(Z{q$UU?~$aG z^sLORf-cwSd!E+JG8$Tu;p^>&|#m@DeRWX&<6NN+of?X zNHU^DG8S!T%RqlmPoGZ5utt2)(EV?gzlK}d)Pl*NDxB*=A{eR#EG8Fkebi`XgMO*p zJvugmk2h2B8wgbMnE~nMT1&P zzG#4^g)7fSBQ!0X*c}bgv~bP{g~G@A)PsmT#lJU_|9eQM!KWX6&b4&T^Xbf0(wUn= zXDSL{QcLl zGp?S4(feIY=kF#uf4k|rRZwil;QWn_wW8QsiZxKI4aE+OgEdfW2gUYLtZAq9kAr0? z)|+ChDb|(ts~yCerC_4}7;arLZsRTJ2U%Fqvu`2tv%3S4pM`VG^7H6VYZ|c!?X`mT z>Op($9qe^W1UV|RVN1KPE1z{9xa@(jH2U|k11@wudDOOMWbr=WRO0j+v`v=7y zq1e9v3YO?45o>_|hh@nZwvq>4s}}l^`NI1jeuZPQh$B!LArtkyDPn|q*p7^Kc=jWtd zJa_bHrAV9p+tyQ1mgeWDXPo)t^hxs1*(Y}FIC=CCzD}G$pEn&ooOCfGx468bysW$e zwXxbEcXa4vaSQ!Taz}D9vCbbpc{2Iz+0w$C>^!&w!68?3^R~q>1$k*2g!E4;L4^IlAM_{F# zv2<>*UyzrbP3Fn(At!#kX?K2)7&j?rU)@R0<>0}>Uc*Z-E$KV>%Wpe(?>%s$+$i8!rKG=?uAbC(;wYrp1$-I1 zkC@F+l>CeJxqAVi(6r$7Gs9Ue_4bry|MuH&DFc{)_ugKCmBLok-Po#aZRphctUzLu z2WCxQdRJtE(0j6bH4(_{ZF)CtO74~|ik&fe;-raT*7fPXZGu4N6o3dOUu%gy+tGwC zEhF%Vjt&j?X1b-5qU2Z-2+pp3@St~0U)87~1 z?TCwC#FFJ_R#vd{!J;G* zOT-en4Zh(^-;{qS9hR)JOYKuQ7nw|~|U zUCd88k#sgK2jPUdsY%JHh_p^VfA&;zN^bVK?c4U8I$K;pDp>T@RN(?uS#=;=0B8>S#*ClkVbOK0BzDr}IOR?)D#n26lP*2O{R_oUd%%T_G4*350{CwDJ>U90W1 z)lbiHWIN9xzDnwl2wLgzty65zclMO<+vB%@` zlyB%X&I&$=yIMoR6)VKLL|V0HAaHHJXvw6w5qG&uKkk~x%Am?TEqE$WppjQ)XX5gf zk$G7Vk34918y6ZMwcc`}d*-9~x=EX|Yi%4Xmk+ym2RhfC%5SMUvu*F6sppz)i6?sK zhRUw2B*HJUaO9p zE^q{a*WGchh{Ta~Wu1&=)Ds@`0qxp_hT6f?2sb({2GCJaQd&LYB02Gqht8#+zH~*e zk;|MNacP{e;;q%O%t4RIb8lHVZ_ac?y-c}z!GgK7=EmPVJ!0bQdGqdhjc6LzO?-9H zEI;=!tZk295U!4%9*%CvTM`g%x@yk8Z-M9O{aREOmsZT_)f9$+ufg-0f~~nLEH`9tW4X6I|vFaGB7o>_SjgLB^$@x1|=PrIA9j zMQ69}xRiPcd?796QfV2OPH72-WQRgsT{U?mGNZ1$urN89bL#BOtrqF^owZe{Op}>e z(^{2xS+5E0=(R#fnpJm8ahqqbxA(@4US7c-ofSEk#rUG9EMs+X+ej0hj7kB^b%b&J?_uaei zZrQT^$1gWW8cu<=Bn?EOa@Kb!Z%tm`3La+Wa}tH3uT$XVk|4BIw6j) zwFoAE{XFAe$V;@jj0t!?mia~#YHQPjdFWL-EO=mcKt#m!>5-8Eb63VXxcgyy26#FM z+YuqxDMm7I8<9xc#@NciF0)S8*K3??Y^H{atgQt)fy@BHY3~tOE3Gx56Fuy3z}dJ* zExy^!-rL*9$IHthXz@~KH+Q%&API_8YHx3kXz7WZO-Bb@nJ%N8jHq847N!xkx7*4) zIBTI;pzntAw!Tr|>Ug~q%7apj05b|QGhhPnhOjX5W{nQ6M{UO_Rz$HS6sx3Ir*W`{ zDAq`^`4sC+vF?M|(QnJg>Hwzt4G~-$@i7ppj*<*CLiHO;2Q)J-h*zf4;RyMS5c|+?~ zHneVYhSqJK1?y(nSVmvB|2lTw)pIarGV%rDjZ**)I#a^LU9mTp) ztbQD9EycD|Y&XT4_FLaL*anLAqSy+G^{4%I4PwXAa@6rPoFkK1XgM+^ucfrtAlhrsV6UUklqVgrl48>-wuEAf$HAshtS!aLDAq`^hHM0tb4g?fLvDOruH4gR+#pYA& zB#M<%to=CH0*X~p>|u&MO|iZIL#%0)0%*T`Xuqduzd_^l+eF9QM6owhtZ5(B3}Wvz z$vm$QmwD2)y27(?l0=G))Iwcf#-($xeV$Fr_%r#!g$t*WGjcOiQ&Tgtk+M@P5>@4P zc51u|3%lzf_@w65bird+e>tzJ?J^{u{IaUrPDnhNm1$cyZ{JRGo^Jjr{YqBK0hn|S zq%`%pA-$4oZ&T?p2r5U5Fjk~;pYZ(i8n5;1b4ZkScPC?`fW-4liNS9LB%V9m4b{6o zfpg_&sMEa{nt*Sa?aX%e{P~5vGT~e1du9)_lUb*H^Vt{i^^VKst?$48{!cY73+Kbb zoAFB)L#$9}+pVlL!q!%WU4q!fOVHV85`mr%V9LKTi$Lr1nTXej2oxmEKT!l< zW|Ezwz3^C;M5c;~iSbiNoPv>aCd64X0Fk~MG3#z*TEeH<*l4`!>rZDw4wBn4ZCZ#x zN^3!P8zdlu48jmZAZdMrjjxM~Qt9UA>($>R@d<;iDa0#a(j=1BD=NU(2Oa>ven>(f z?&wfB^z~`10|LZ-kb{uzTh2lRam)IkL5)(gbKt9&|!?;YNt+^_{+P-=KR^4q7M zl7tJ!i_H{?PD~yhD?t-0AqT5L6Qlz4{g8tri!0L7ET;?jBS?@m*Am)@tPcI+3(M() zd7i_677>YX%6X&lzd$Lm#?>&auD)S{3A;Ed&pX5=FDKLtwGpa~c(TbdEv9KsV9ozjr??RJ zp#pMODlRC*-6;F_Ur!L{@$HmHbainPlamVycrI}vzowHF355*>RV@`@C7C5v^}WKZ zz8)wudvr*Ihh)EfS;*|$n7<2^8AC&N?D%PCVOvSo@tr$&9?xpixgsZ#OJ7^{Y5bgA z0nKH@;jJ4sZd~`#hZ~@}L>?(ZATPY2iCnji=OR5_DA~!ZTeq{!;Cm<7!ctem<^AjY zBb@mTAA1y)3l>NCgR5+04lu{?;M>7l_JXS{BB_t@({E5e9R~YQi(Y|V@r_s}QSMRu zQ(|J`kC#1eB_?YyA-w(}S%TRRFL`t^{13=4HenOJ1ZC$o@>1ib$rF*awZ^)nq`Mb> z-2%J*7Q%LZVtx|6=-Zo(oS-YnOO$Y-Hx&K+BuM@2LIT^@J|9Vo-b6BAyaXS7ma2&{ zr0vgWEF)D7Nhtxe;r3X|~M>k4bd zboBMvx(J(^p<~cP17qk5ZthNyOYJUKW6Zhv!6th(k+_Iww@d~7ziP0e4=dfmIx*m8 zj)a@x{dU66@NP%=8LOQRk9D4!$CB8Zw-CFOH!rd6;UzXdHzKhar<*0Vv5?MNh+o$% z9(fm!OmQP#UG>w=UR`6Hssp8|NP+raK2=G^@&Cc83VQv&b*diAYe%Zo|K6!8Z)|Zu z&L5HsE67*hB2z_xKEr&1CQn8>t?1|=KNb8R{Q{U6N|){J8tbqy>3FB=a?l}({(2Hz zak)u56q1%rYTp!Rr{$HGW}Z2llAf8FR#Xg)P;p^e#>LYoPMo?}Se(Or!*Tut?N1fqtY4I=X*RS7uA^EF= z{lO}k#>sP_9BjCJz;n)Xe_QjHUv{3UvGRLxK3Lfi{`IZRgMNz2Q@{T7?CLeo%x2;^ zB!7eS!Wt7KEth`!c=I5xbsqU;IX}&9~J8Q1!-2+4I8w)BRw{rgdy*ic%N)Em~`#0y4P|4QX z2AvHlJk#43{Nxg0VRd4Nvlj}yDBZoBtrbcye02M1S$HN3K{tz8{my?wocgFGGW znVFnKukYd(YR{?3<9MYZLPi=f8|c zJk}%F0rHq~f}dqVB;+2LZ+rzIpl^_ueEp7({|z=V!~OB6AA0^4V`y=vd?-TP2DV%NI;ma&oPIIMl&rA678#<>A9b;e+3=KT*4T{Heq6>$fV-9t82EeYSmTpwFkJlXNpJLGFtqDw zLsb;5qg#8vA?6)E z1U8O*;tP(>G+BFY9ATE~M%afkv-vV7SsB~6p3O^5O-e#4*0WoG%ea_kUmMtM1; zfwEHQN?;c;+lT65AJP!(P}MN&(B(F29cu02twTL6MeSZlU$}9jn_IAFXIai=iI(B&tn007{AGHo0pw^+Z-5b9Aaoe}wzWeUiUvK;Vi%k=ae?ke9 zY?!!eKC{JS9r{l22*l_i>yVw|E2 zU2(~9b8{3+U>%Z5p|u!h9qJPY1ZYrc&_OZGI#ky%XdP0hXvU6fG$T1wGZITRBL!45 z@&wh4cnxVrUd8$HD$bXWL8*f`Ja!wZ z-dS!0_ui6n0Yc-2b7#*(!F4`m%Mhtu&nO-PLX#?7CaY8v;-^Jn<}Ejp3yAu}bx3_O zL}}yM%!$62R6PC*C~Z*RdnP+O$#PBxFEmTfx5(0o6E|AUu~--b2Av?T$D;!67c6-9 zhS^aZx!K#iUUGo{h1_MQqplIHRNSEy=NC$1^=yVaH zlj%IPe(cUz1no?Wh>Fmk{t9xc=weQ{YIQtR#^m*(>V}Du3+(f_@6siMPh4D_uVSFO zBroI8rmUN$1tKa*(c8uh@-YjK%()XIL+P10Y2pOJ((BKL)VW?BM2Z7@^Y-$DzRMTRKOQcQ z_Aah&9zK44em>*TzideQyZ(5FnBaAeTe>qyw7dj6gTyZoJ+n+jGLQ>!i>ZU~{$3pX z9UtW4U>DfV;de8MY>0PTA{-9tnTeiSp&F^DE;s3^L#Un_M915_fsN-i)vn`esylg2 zwJWc0F6Z^lGk8t4<29PgPSTj`^zK&%|`%;q^89`jMXh2+{yOJe;Pws1bOAF(UkSmfG#a=JO!rW|~T_$Zn0 z$E5a&n%%<08s((JB<4N5K@lH|>aO_NjUh>s*y*O9NhW~T&;qeieTwyZ2Au3!v>-lm zgyT-0jDw}*P4>qhi971kA>(l`GxNgAaGYyaYHD)w>9*cpW=KG~-gQk}Q6UuO<}N_~ zfJaF!U=oQMYAJJqL?ztu)~l~_!j&I<`e}ki=bZTU*Q=`j`qu?a1yjL(%zlhyLS;9^ zV%SG8i&>nSd2k*o9KUFMkz>})!<_q-=-~71fJMWx1bLC2B=R5BlkBgX@NC3wP zU}xeS3|4UOvy5d-4G37i+}Zh#JM8W6zkfXTVCD`;_aXG}MqIB$jZ7qBEMf-yUM9If zNX&^@BZ0jBjGEa3g%I=*a-w@kS_cYUs}{`n;Ykv|aa#czU8`oM$1?v^6S6gFCN`q2 zuh3Q!==5Kp)AeX;?!n&=oJq|^imjqc>Cl|R!4zM`$k&Ls+^nn%$;sD0bghPI77lY> zN^4S|S?*x#MsmIDJ+mTD7X7 zVej5~&|f}p{QH#kkjoF`qMVDPr=eFU5!1UO_?U&%gfqTW|4oPGPkDDjem;;uRg|zuB6$)K%g51_yb;DoAw zfdJ0368%7LM@P3lL5afmmhG##vlh$_adU8oLLnm1K`2a3_4oJiu(h>8IYJj7KQ{;e zNY!<>o7FH~j2owD>qZp^VQ*7oX+wfdSX(EG;UFQusIR4^Pop&SqF$TXllM9Y^57o4 zHtIL$JPsx`o7bdK3u6m#I&n-RJ{D+kLUBwyK0e`(Q;NF9dPU)+LjCYz_Mf}46Cc7( zybC*#&$V&lOfI%xS@yY-icEMpB_$=NrJXr-?ou`+;;eJ1rgSmobkf<=C(tbyXwcS` zW)4Pu)YdjNpyUFL4q<3?NKA-rVpx}mSn=GXoqBBz{{8S_uLmETF7CX12q}HP_%W^1 z{ie9s!$-1OP=c5Xl-o)WSY-~z$ja(=y3HKAkwQZ^lAA<{u8O`&C=@R<&a)nPbmhvG z1qH@`{cC@V`}_sWT2qCnUm1Mob zCNp&FSv~A`GD z<`)qeHPhgT)`Z1*t z7xK|6-eedu-t76Gh|F*wcrm%@5*seD0Imcto`5H>xB)77&q6Agpnt((n^WM_nKNg` z1UrH;T16b7=85(H1}?uuo${W}CfEP<9$ zgdLd_6IC>fM->{&$zsCHnN?LcKpMpQ_aD=SFO_8jDQ)~lY2zZLjkZDB7+ntbp;#A+ zEuz>iiuD@@TSc)A6nm6nCsOQ+aj;Gldn?60NU_H#Ry!&dv4D`MA*Voi=4E#`iakvJ zI)Ng?Mny^oku4M%MUls7uMQL|9|!vh#okY`ODJ{<#RiRo{ZD3U`iPnO2kp0*_A42u z-*SqbK*xNDVjC%T)p=DXQgNVt!NFs!bDg^#;q6p7@wts%7+ zj8W#=3nMIfDzhc8mJlQ) z{Od&_Zz1;D2J$V6e5)~iy3RWPCtTh#>0vm1-59sM7jg%YJRbxPxEI`(mprSAFA#C_ zV&Ul|dIv1ahBsMZRvLUxE)~{P7vY+V-dg4Sy)83Ci{&=rpMnOvqk~-Z!`|deT$hRCEb2~)N zrIS3hxx}h{-+hzg;7zj0_T71|O}|2*T>E>6Z_KK{qpG?%C%;aCCBv~T6!!P(6TF>* zgX~nQl9H@}B`BPEi?lVoXGcOp!sq+aGAq0Eny}t(J#MTLeWTvk2``J9n#Rt~#w(3| zyfb}mO^+>8ns@$EgApVUf*5~=h!G(Ww!NSLzWZXafwgjQaBz}oZN04F%WJ6Fk=%ru zabY%MC4^2d7pF!@7hG|2BE=xA5tlC)iYT>)VbdG)!6|3b2z?I15T^Dw6te6+tVfgLc+M=8^{sbe0y z`OicLjNwXMMSH)fqNw9?A#wC21F&&IRH~?_yQu+waec-_2jW&2K7Glp%kR5?)#B;6 zr7%;`rmLHEf=noAs7EM!);=0ZITcSzGHuSolTN~uPQjB}ZQZKVp}NHBUHdkGkFS>f zykqmu6NvL*NN!liWa!SAtDl)KJ|gSxCgShjZ3q#6%J`J%!?kNa|9l=%J{ey`Lr-vO z_JtR?YbGL&c@Jt9DWQjnt6^>#}St#Zj5}`ZUFzd=E%iJ#`lRrDItp`g6Xs+A<|GOfGzoL!f@&mZ_Z;|j_M zqhWS&YDY>qCz7)>`OU&W51Xp70n4j8efZFxy(bU-`s0uDP>c{Sw!1sdu-UL#O=>nM z>GtXEdj4g8`!FwX%nKSBG->5*_^efki2oG1H{XOo=AX1!A+-3-OtYF zdXL~cE+im-+5L|^`{G>>Jof^Mk<1p0LqbReV4i9eVkrt>lP$(HaST)#q^jr;cQ;n_)c_S&_`jXTCHIe=!Ix*O`NYuf8eb8|s6b#-xui-wD8=EPVN zWK9Vt;*Q7v_Qo?qgu}+hqGAqHs%D-Xe|j*?f|Eb~eDKIuU;niIFqlqDi&Dw_H0JbB z`nh@G(x{k)JomA;U$KG-8Dn~`C)F951P)jGY1kWhO9i##treNM@n&G2!W)@eVr3vT z6^Kn{Q1)ieVtt>-U$7tH!@Jyj#LR=#@N59a;YhPE&tdK-<{i?;aCuY=Di(u^K^*UJ ziGl`ZGrspSuX2ef7C^q;&Lv(u>Y?iJWz?K97hE6X)PbM#owHR*Y&|Y+4q(QnUd{HA z5w`}U10@~5Cv2F8p5GJprlM>9xrOA-fbK^=w;4^XEB!$K6@S69`0xZl6o7x?%^Lg= zzYgG;9BqMkN|`M?BdlTUX@H+4_OJ(y%dnGQ+@g&1%WUsJXKo?fB1j^ItaL;~@c9*fJez;+9E#1JJbz7cg>SiO3Wd|Gr(~@i z&@Q?-W@jHf0p0nP`jRY?ZlS(2BO^K4+g_o_&MrXsQ69;)&`^+?nyr~psVZf=yjM7(*jmQKA$94y)?a0IIV}hCx!1ERV>4(%H@3pvikcFMhx6YqHCl~)!y zcXk$6Lr7?<>vuC{ZQvA(X1X(L)~xjH&RS=RRupe8>~_6tjb?_ps_!NY712TxD*6g~Z#><&{g(emCby@x0A_A*vcQH$n+nN5pwYHbF8 z)v}?XoTGR^&&0>y6sEEl8jVIRqndR~yh*3K4pjCvsO&dT*_YUzUto961xfswbn4)- zV}GpwVjC#xMyXC}%IWbfH}`6h+%K?VyhVN@o@eqAm`rj+wS1mDX3w-B_~D; z7$n|unT6yn9DDMqR9zm%JsS6P{smUbzN*$-=$VW7P->+I>%r_bUr zPQ3`V!-cGh)*fPdsU~smm%HG!72G+X^z`(!(RUBDvKB_maU!u8R&NJ#&(FjWH;k{z5jk+J2o-c(9;G{yS+yz5KCkz7NzSqkVNk~#%yOv3exVddAkQW~Z>wFdL)^)b}f`D$*@oPTSPm7Kb8BzpaC#J%)Zq zEORs{)v_g?VdoVYA*o0^e)8n8y?c*k?#s4p&GR{1aiCrP@4x@P_n^0@ySqDlAHHV9 zmJot>qtr$&wGF)O`R5;spK|+SOXB}6%6OcU)umMQ<6>s$o{x~LcWFj>!H;EiH%>A$ zqNap6$n0!Ake_eX>o2GpwyaC!H`if}K-crzBG-xjxwC6^H zlTRg|ICk(4_6Da| z<;$15c9k6ZfobQM2Zm3}zypxGauIWVWf9H#t<;5wy4GCJ5SF1RQ((vv}M($m>mJr^by24I2#tkplN} z9d=k#OJjWl$_y~w8d-Zgsgu&**UL$?&DfnyjT-3wluCbpd(Vl}X9Ypr#NZ@0*7k0U zC&#pDWKB(^q?(nj40#y3piOLUxSqwi3)6x_&lS&#Hx7fvn>9Men+Poq-J`Cy4)D1+ z8n^PfIMOM+k;0uRd=`PzdnEDAei-ZY7S@UQW)q8bb(kg6aCKE{MaUIdYR#9As$YnfG4l%)xsddrG??zw-}V=J(dI1&V7tZ#mP9HN5X zQDg9Y>l-(J{q@&-(m5gOgp62sg=yUrKgv5OIxwZ3;4_myTSM@CJ1qP z`O{Bhq7YO5YE$l{WINg&pJkdb&$8D23IhB0D^NAi`|vAQIh?0c-PSwj+Y z_1?C8wMXo8N9=P;S3lzNeK>-BSVm$oSF;k!Np1vN5reIeNT6{^aEQa)W#TY*gTo-P2@{?{v{ZR{ zD3(%)HTxXNx$~*4 zUI&S~zu%CYY|!>3hF4Te5P}c{bLB3mr%B3^O`CRo`^`7-DgE|~tvO;}=-(fu@Gt2*w=JA2278>3a{UdwWS~VSWZt6hlqf(%jzBPJ}##MusaINqR1Y$Y$jTjir;b#!V=+ zvg+$Yfzr$6-NfMF?(S@Brx0Qx7^Oz$V-v!k;~lXwT_=w9pua+kgx zJK;9$gpm4rqfsnWkW-^y+1EcXK=LEkcJyj-hO{*`H`HVC%gc#DD4@NaxPU_5#8x?e z{N$-WPaHy}wZkVXjz(MLg(t3*O$8}G%s2iO`6Bq2;+v!b z1FnA`e~e4I+OM(_b4n)l?v==E{me7ZG9=rw%x~V^ra9id7L?*0F<0g3f($9n4*ovQ za!#n6_2^$0!A4rlbedi#nGK@1;(UF5;}|`oSI07^VehcBn~)&Fyzl;a0ZsW!zq1B? zf>(u8QKECh~{jix&k5#A2tJ(NRHeSQ%R@*w>K% z0_F;PPVImyRdnIHnn>DdtyUmPuxhF4^))S{(&Rt%0@jh(9lpdmzJPUn9_u)@`U(3Nm9wwjp1H_TRNl2vj zjnN0EFvoT4v>lqMj!I9TM?5%qco`j%dS8!b>OYcU zhqZ*5Sc$<(($^=-`W%`H7h79n)wqm_1T4JQjfwBr%2%u} zq4&Z(dN1^$_riOI?uGv|yn*)bPx~*R{R?RS(xLu`TSbRQesyjoS=FYrv+xnDe3SSH z+7z?!($jmIz4XQ$xs3MQM0@U}J@?X{?Z)r<&#ffp%)OmeF>LXhEFu&KirJG4>*LAZ zOl2MOn6HVF+e-4e-9g422SI`#^?_oN(d~+-Iis5xT!r`tiosO|t-v@c6FCeQ-`*tO zipe)m?s}QXO?7Tnq=utfP{zDS)dl=dxZ9bu21I{ODnpWy66>)-#R>%^9co41K_;1F z#6*5V6Yn$Ui9+SY1O{PfaQPv>5tD49Xz6{m*#b`V4zO0^Z6Cy1VfL<#92p=rY&a$aAtI_qG(bHR%Wgs=yoVwT*!*uV;1AeNG{!Mg=w3I| z4Y8!iu^L4C3zq@5?;sn(u#Ksp8}_CbLF2!X=hc-58^3;74O5qfzkv zqFjQote+o^9X^U&`QNf+G%9{H?bp*rO{6p91Q1pvjn2#o`cvKD%#3cAnLx1;`qz4j zy+VI-9|zk`u|@Q+ODNVv-M5c}4WQVYDfVv^yOaLZJPtOUVrBHNizv2({^U0f*0cwg z(Z9Z(Vo9xJ%-qF6tl5`&WZfwQR`KBTu6vHw=z^^nP0?)|>uyA;p%`pY9w7TSBo8^sglpTS14xG&tMhl z+_dk?(FpYn7D@TvmT1(-8jQvU>lh5*cjSzb(}--g{~({alizpcWZ$vURGcx`eWBQW z^G))+FSvjYI9~{Mtq;&X49nOehAOwLOIB5-YpJZnxJ1dx$BKGz(`#!h&C}RU+P?kg zU(dG*1f9K;CP4(^1Upk&5=0+-@WFRGHB8Dp{*t*>@p`BrC#SL`C51eLAe`+=n1{~s zV(*F8*p@e-zRbnLWo2iwa5caK3+4yQ6GXU*1cYmB?L9m^qvk}}+S)7Z?U``$uQS~f zM7Y{by~Q;0tDoD8RDgPQ@^fVeF4$>`p2HB}8bA(6#EN=fGj;UaN6~F%YV~?%+`wC_!+r&08B}@zd zYG&&3ITK14yx1@J*O%;(Sj57-aXvFr}cv#zcY*F56l4Nv*@UIXqoJuuuZkM-2o zy)TopV)*i6HKgSzZeLc>ko3MnF2)*(gkt2QkmD+fN?z7VHxtZUbSw+{yB-i=Sb-8_TUHY~Tv?gORu?4-zyJOVNu2RnmkJq=xOUyo|gIR~P#nj4p2p&!K44i~{XI^&oYK~1wNo4;uJyF@vVAy-0 zy_pEjmof@Up}-NiL`HI~uWusj9h?XW+TYQ|-Ia*awoY#D%mhwgu(jpb0od*b=pKv5 z6WxL*dIC=rizk|cCsIK}b(L-A>L+x+S}SjQVkSA6BMZ5yDrQRj!$KqRX}(GN)X<3Sz8+ z`K&PC%Mw6HV24GZ6TD5u@*{@pB>6VM#2*zPM>ERo+r*3X9lx=2K;0!2T1KEGT_?cA zy$Nv2o5#dYq<9&{8z{bp;$4UEW1aB2--u*mm%!p|&wYbPHsX`y_AH!WzCDkH4ET-Q zVJGm&H+kgmC&>-Ad4l-{8;F?-2faoP-+{Tg^(EycQ0C|6W+M0q6DKHWY2oCRl|%!a zTwa%~5mZ*T!KGE;t`x)>R~lEc6%|OvDmv~Fj)xRvJp3@owmy;l!#h#X+FB6HY#02Q zSQQ6@{!TU{BbNC;)V&8>R9E%~{@(N%%Fu?6QUnnJyJDx=YfLoVB&P0~x+$jYZsyGp zO=7wx(U{_@F-?>KHzKu#{;p*hz z=;5JKjdoEvkQFFE@F}8dq8K4vfj)Sm5y%wzZ7b~mvtotvmMLc4(z9QK5fd*AcK$75 z;yS^uvV}JXAPtMS=_k8BU4HN#gB)71 z60GT8>#6qPNI#7l2DY2R8t7mDL1D%8leiC-}lVk60wkPQ0xOI%p2?G7Kpbzb%dlQBq#5$2}vzAzIgK0o+4%)=-D6Kb z^Yr5R;Y>IAPT=N7AUG+i9Gtm)DXCV&X8#Zdm2!dN?Aago!_T+p+>Npi?HbC8i;Bzm z6q>S%ipq+HvT7y7&ZSE+F_)vyoxOB1`bvz3QFuA|dLbsu&9>`QB;rnQm@FeR3r~!&r}XUnqLTc)-0XBf4;d7oxThvUYGC=;vO}eaJd;@B3Dk7gJ`gs;ofFD1xV|%Zm&0ic3n%s}VfO zdp5B`hpMr*)z{I%mh!oiqY6}VK@g0Cjg^Ist(}82c!PIkVto&J6aMr{_BNtvn(VVu z<9Xlu0&9&#iw%Z(>`GW@zhduWV;Q#V0^0(@L5*Z8huf8p^SipySY#AirBw7d5z#~8 zZ`UQHxVeT(ty=oWbBNR;Mm`8ZL!Zndk=BFe$_UyCG~s|}kSUXbqFc}{v74#F_gCD}y|1Z356a15T7v6G#>q?GY^p8kd1PTSwwhEM10=c~b*A1ss znY>A-H`;f|iOLdBOKcPhJgovxD|G8-3P1#dR*=vJsl1VY&salmLiQ(PuO)xbhuOA$L>YZk7Y?CguOwpcNL^GoSJI64+QmCckgI*#xGC-pper^Em&`xQ2{( zBOFC)vGa5Da7Df%nVZl`!B>tmz}@&QyMzg4>Eu{j2Q5Fff?~$umbOZtot0(Xg*S-5Zf9!Bi_TtaV`31Up^$g9bZA(8?$ouFbiOLeYsdG-#8J`jSAPEpdD<@sxO2er z^W3QA%a=?aWyhCKa~sp#Y&ZA0=bl^aXFzMF<%6-2z{bT!BXSLyzwVuIQz!f4;TCTa}n$W@O2UMu>k+Fl=bJYajc591nV2{s$%poZ&-2`7cQjKcPPe(hK7oN z4M*%UB3qT&=?JB%>yZQn&%0obB?Ai@dw{5h!tB(`fBp3r-nJPK2GZSCAQT=?#~Z{$ zXU1gLHOYH!Fl|6#XO~na)<`6%$L{GQ*9f`{V(pzg90)M{bhOL7J>j^epAbWOTx@f}XK z*X<#)y}F3U?x%||j#K>-fR z=||Nr(WRy7SD~d|FE3BI939QX#>FtP*W#{SX3%Hqr~Ui(9sKL$>6DbL%+#AnnMug5 zffSe`X-_(GZbnW@PfuHEX?+J?bxl2-lnp@PjSVv$IBCOF zyn7%dI0^=>dr<@8KiI3jMo8Np<|k$!`g#Aw?1i#-in)OQFEcSrG`Ym6py565agyu) z8@UiOU0^$TNd%*&J0B4BA*Zp4k1uIQ6j96Xzh`&iwXsRV{1AyIH8|hndX@e5Fvolx zi2@V!i&xlv@lmcWx1mge!hS}KxfE7aH&v8ZS63#d_itd8e|kk8#( zA@XAcoV>fEhw1L@=+ragZEON;echZ~oK<6FJ`UD4WCOLaL9;o9g*C#T>>+Jz?Np>< z2jdB3Sz5|v2)VNl2ynB)tEH7SCq?~GnM|&*G`+L1rX2JZc^G6)kfZ$pu1lPh|H;@mA zy@Zp35#r*m#m1Q4#axb>9UOkos#W(s{K!3E1h7NTU2(~P2wMRnY)8yC7Dz3I?MCLp zf+r# zI}QlB6(HpH?)dY5qeOZY{7EWN5{cyQMweH=SVF`HFOx*rVBJ#l^2_pa@^h}ll8kw( z8fU?HmYth_R2|qPE=JdIZZFm}sms{7;tI<;x2eE?zW$>XeDjF7^&iZtf~~ zsxhA|Z*_-2f>=uFa0mf!@-oTVS|Wfo0r|>ysF^FWQi?dKAsvM%zmKWqq_**R_w%*n zs4&7$_JXQ*U42DKd38!fJ}T12Ugo1a`AA&JbISv|u!O>?9Fo5EK&S$I2o;pbpQ%&b zN{)Fo5)zw!@hSW0)Tr^bwI#(lW#uKsW!;_i zP1Uf1kRLEWWd-aAMWo+cRYiFzSOgrQ5h~ye(ui0~OA!=rRwETlL20QN&x3?Oyb}qc zAcNYJ-D0_1X3^{1YiVz-v?ekLBF9l>3;9E`@RZgNJg8GlWKc^>dk2)s`t z&772U%V1Dqd{CUsU599NIvkr&8z!6EsVqJNuwb`liW-V$!s` zj}p(qq*@kkfLdaUC%%PvqH*?mnlw&Rl=vMvfrDok5wSaHHW8l{IC%EZuvvj4N(mY^ zD{$~+pP{k>2T$f1X;vWdJP~UVshhu_kW)r0(S$N@5_?fL`Kppn-X>8M_dz=$SzVhT z8Aw*w8bJDAi$~QXBwhZ#>pSQO=lDjkgX5i=y%lVq+1nf){L7qHk%0Yc(SQAQ@$%*9 zWMX<8oG#Qr-WO0T8koh`m(?X&YlG4%%W1AA)X}$LMA^re;`$PC){xcEE|$VLJc6_? zYFY1#7w$Bh#77dk+FM{0z`54e*9UsCpv`Rp-Qy1LVnN)dkr@^-YewU{TZS|T(i zWlEsnLE$r#t2cG(wD$Hc92Y@ecV{af^Q4s;pb$AZd3$masBlQm9GtZD6iIrEsSDf7(YIxLV^4lq0ZQ>Rf@!QSw!bp4$Qe!8a!P!Lo%KUanfC!@m_}J!O z8z02ax`)?|ZHW1--QcxsA+sC#1mrq)cLISMHKem*D48G9*{-7k0_it|Ghjw+*EQDb zT3XvlMpCzVrW(pCp*6E%gvh1vwh`%}kjr4(wNzMGNWq&zkyvU$@>a}q+|a`f&VIZd z$|PA{zE!g1=T#IH6-yPQ_acs|ykB(iJ!8P}mCpe;vv>PjdQJ!mnly9P1bRrwHqT81 zPNsKce)sJe7JfFxSFWVi_Lkvru)wI$-{od8;EXv`#y8n4ic7Kz3i3}LzeW!Syn}Xd zg{SMvB%1asp;&dX9HjJ^c`?6yd%TAFa5XQ-*l&C?Cx_4{HV?`is~G& zb`W)=mTDa~eOiY}uats=OffHg`ilM*eT8T*yvEWo3~V(Wx0%8g({cSM?80GS$5Plp z3Y$V?c_KXQv20=|Epr{tPFk{`*h#CJS=dQ)-XwO?pllX)(iN^IJ1Ozw z5bvj{=L{wWeRBUkxG5eX(R9tOOf+5vZ$1F!znpxNoy+VaudZ%PBxj7Xn}NHKnwzUY zQB{1CSg;x@ zQCuP;qag~V*Fp_1T8tXA=#f?TtiAvK`;(JlWmTjnAO7~&pNI*wi_^%s540Lh)JI!6 zt@Pd3H@*G#rkCH1QevG)eYtP%d;HIuHA!J8%l00-X;UOrL*qrPvzoa~B1LGMK z`<)Su4faxIoz5dNGSa=d zuHqC544y1!(n%dW3nZFCap~$5S;*1I_s48e;Chr5e001h`^S&9cJBHD5UoJ(K6CB* z_3PKpbT8Nuj?_V1!W{+f_eVnSKFMFZ1oxvx6e=4d!I(8@#Aw-uw_zxg^r`4k4@m>O zO=x>)cwO{sy~N|uQ07yuS6?q~Ju#C=dUZl6c21c{S4`|G#idLoH(Dk`t2dDWmJb7J zCK-@@>tLmCs6pagO{3o0L83aseeBe!Q^&e{jw6ZC6UTYFkknL_D;O1} zp!Rg8TYxQyxB|WomOz53;gbXI zJ@5oQP|`J1*e(j|IRLh204(vQ5?JD!rKdUrg>3*V{195=Ya*r21a=r2C8F>J+jocO zz(ddA4*$j-NMEvN!9$Z16I1_uf6F2K{PN2$2YxviKP*#y&)LMq$6vn=b8^h7Q_+`5 z?)|WAHo3mOwywUu4hAfw)gMl{Sv@S@<#NI`0=RZ%<1;hef@aO0yL?zalzVtsD3sDh zs5;@KYZ&u|W2B9f(_L}rERH#`84bBD{kpKIPTU2jPHt+dcbFRrz1<19%uQLYau(rd%j_i4fRv`04 zbdskK(LQ`{3m9<)?B*TF%E3;(o(zrGFfukPsAL6*SrVY<;w`y3rsyUTt78SZpcZZ> z2&1_<>j~1Zore*u;Z;D;#%JY(qRaz99{<6-bPO4Q`_Cy3vM3JZ!{DHZ!j7f*J5OP& zDD0!dz}8V%1BK0|u)>kyzLvtaQdl<%Ye`{&ydfoeBZUp1u$L(;qQQt{wj2i5IA`PY zUZk-06jnJ5?0+-opZtkPhd@&@9jEs-43ENk4hJPLc7!cL*EPyGK0cH-@GCOK+% z<|mgDmZW)-rGzbMo@6v(O`0beNZ6BhP3D91z&r7Ib$kJ`}0?qX&Zhh@>s6qA=R z%sjl9mn2@a`sgSMdxXNqQCQtDu!$7box;{q*g6X9*ay24IQSY`(jU;0zJWB{0ckk@ zuC=6u8;Lh=Bqk;$rKF%)^$i6=r*0%9T)TE1a~MQ@nl`kS4#zzyPXj|Ss>S zvxuT4=;*M5kxWEtzQ~2b)^_duyH?zE%}veCO-+p`vZkyt>C8lcdgM` zT3V5UKh{=OO6YVpGCtnK)>dh$oqyM=UQmGF7;n!}Zf-WVuA{yE6#fC@gC=>3*V}ZtDAgUqMiw2?_;;@P1j8IP$M0{sQG(^%62;bqpeO3iA?sTW6 zY6lmE0-fa7RPw8m{EFo0NqX@8pn=NtMO3D5p)%dLV-L6MmQh$4g(ZD!Ajr}%Dqe=$ z{jLlKYeivQhS^IxDeP=IZWSH3jKW$C18Y2U220O;}r*fnsEsQTC zw!io`HMO=5dyuWl)r<^3G#-gHR^j1qRDJrb(>SDSO08^45 zeU%wE+MTy0I=gz6mrv+Cip2LXj&+VBp(!ljRk@^ zA05{vN&62Xs5thMPohNAV`HcPgJb@ob#paEp(4zc&X7!x{>a;NF|+i%j`edR5!K0O z%rl>gAD_E^>WrdLW~XTCQ zQhf@`8g%LDb@er{o>o`YG^D4eqLsoO95{c?~ z-KgosNHl2t4P;Uuh>6kzVrBS5{7HnU>C7R#u(M$DzDYh;(sEFd%nBo|iC>J+`gO<> z;&ERMS%UhnnVB(Zot;THVv!COlaScjn}>jkq)XALT6r-#DeWeVX-O$)tf%&kj?T(z ztAb7ymtKmV?S)ez`T% z*b2M^OoI;$Jz*>bZH;iY>Scm%fu$CeaUC6fJi2NnqsF03X`WMme{1cYFXE#fdv2{xQ7N8r|7(|jDf z67eIY`vnrM-PSfj;s+wsvUsEn;Ay<|$hqJ(Z~>{0{4==VRdB(};DQjyumKeoM#>Sp zx#_o8UKlBNgkbSkpZM{s9Xqyf{qQSzzD6z!?r79ALau>4LiOw=gB5T1UkDB}CtSHx|TA`EHyd6*J6 zMyE?FhKy!gO4Iq)YHe9r(b33FsVL5eU-)KOV{v*Jw!)=)M7AAPPlPn5Ua6JSTVXb?20Vuou>Tw<3CFDF85$Ryx&Wo z`^(2#&wqaZ{r>-avGdq9%qcx5;m@zO?cM+FxBm<_T*7`Etq*=85_0n_fBhhM3<%>8 zfYa0pDc`9RxT7bUQqj@XKO{JKZ1^MLw$>zI zrbB7B@S!zV=c!nc=K{q6^FL4*z{z2 zY=4by0&Im^f4@MCwqE!CUTGTg&`GeUo*))MCK*LUB3W})DPeO z3Gpy;nksLXdm|-s2sZS}<9oM%_0<<2yuW?-?%g9N`WZxP5%mz8oPxwDzF*`1xa@?4 z=;-)((#G+QmW_M3(WE>_Lj&6vr`#`9DtW2GIY8KypxuQ@xRf^V!X@NrIfTfL9Xr0* zyydg)+rRiva|bVF+Uj!}-TXW}cI_jA=1rk05ze1s*bSBbFsi3l4dDfIEg6a9rg zxIVcH^VBqxW z)0aQ~=#1&p=S;G;;w6=h-GYbb`ivhxeR^P^&-{l%?OeRjQOn28PDlinP7FV~2cjMg zN@aifk$%yY4AG@^u(F!!FY1?HXk$s_SBOFiRs(}gnidg+glxxPD#*~YagMXChsUT< z$OH0Svh0qwjlrA;DPz*q)E^h!FWI_?WD`N$$(y-80asN+1RUkGAV@gUEqNh0Z9F90 zcxZO2!ot+l__$ww{%zNnUvB$m=N~w+r8A#%YTIZW!bvpkh4XRO!HikCH=5u6 z`5TwM*t+$T%LNICNkG!qoJQK!)dTw_!-gSaagwz=Au@8++O>~Ac<(GPlE%0U ze`;eR#U9WjwjoFB>Vk43Mo6aoYG8z9ge?a~mIEWcx7WQfgCUv_;NHK`pxEAlG2h!u z_7sH8B|+Fyq!W zap=(5vsYp(tIE(a!qDDP9E$)x+!2w00`!s7D1<_ikJeUI-V&`1L;mhd_6-fK=qHdO zwzQO|oQNw!oqT>-NF<~c7h?z`ZR5onjpnNpH;}}4kU3}%-7medyQQPCQR3k&EJi## zzYYQd~Gjgt7z8rn+YJMkb1Mus>#YKtWF|pVN z6&vkFdpks_X3w59Y04CLsT|Gns&yT<0qBs;uY<}g9hQWlctA;fjnWJ?{XJ=yx!HuLJq1h3b6nL7Lc{QXok{_u9K;1 z#kK1Qc|-owl^Dk1Ff3G~D$Znql`WcL4_>->_%LF8If2KN4UxpUiA%#o$>cIIyk#!|Cvt zB^DzwzVhZ*0b*h=UA}NR8rdB2@dwlv4QTRC)>&I?3*!(8XY8a=c8a>THl3x1&9udl zk<5J@r%cF3m$3$2XK!Z<(ploRlRnV=>CxMWg;~IYuMrCh6sE;!Qj8~*L#pLwW}@O$ zs#+Fv}9;Tq!rYPdKB*;m1_oyLx8K0|i6Y)&n6758A)VwpUWi2??AOb9ki@rbg< zu^_`~!-i*G`R6}hdgjG9B1{RjPR900VP|I6d+uHD0^$m;zPB@lcks(VSU%&pDW#QA2Sc6p9$)W|1Y zh8GgUAqJLB-V#$LqJ~1&QpivOsiAN^eQ<-1nL{BvC}chza~g$xdKlOV6!swsdyv8w zQ&@*#U^6M~Yzmu0VVf!JbHl(^P}q4CwvfWQP*|rvSaZ=$-JcUry4Tk%$q0Cz&x~y# zo^-=9lPBF2v2$CUEh`MGW*kvw#fv;LEyV(#jGAbY#xh%%Fbi6W#UR8?NGTS&-X*z|ZGk1IaR|T4u^Jm8hSW;~fO%V{eqrr5EGj>ZkDYU)w1 z0IkjHYAQHkVRUq%7UYrYf{bTX3IC=|;%Mu7jcK$43~mlW8s6goPx z&cK(d%gUNZlExb8X-X?pnzx0pwy?rDp7>8oIgof+9s!=nnPWZhv>td0N=?6+QkZum zHu*Y=6`r`9N;<$@Kl|6=3(*IDIsfPJlkl8fJ$&TIAAkID^fI%JS#k1&DJKerB`TC`E@!1V}jeSQ^+{H5= zfBf;~3d`{`X3Pzp;bkGxssrmv$|`Fr+6AOwgp;JM2wk?T4T>@2$M}u&542zqt<|P< z^1wds=LjG5c!k2jSs65j9JoE)I=l6T!jh^cg1O4N#B>AzHP+Y4#1bi54LK{cL2eHA zDw}cReZ}af<>c*dX@@~Pe8)dY#E`olEDOq>P5w^ta6BNiR;j96DXQlGHVUqcWOJV%M%K7ZQ_dfje^Upu~>c_ie z8%&@qv8j=_0qxqkgHGTsIEk+e8xG9i5 z_&XSF)7$SbT`f&_!+G-?>!{>Yc(_Y(!s~2pVD63==kn$v8{S-GK!iGu1b4-q^AD6k z#X&}oeUHS|*TciV_2^yl>s+*lE6Iy-fh6$r_4Z;Umdd;5;5Wd*Bq#d^aPTH@@Y{rg zOG^>3(f5OrPo;Os!O)Tg>_?37uGluGq$o2b`^LrO{a;?pZ67^<_1#K4#m-N*ehJ6( z2Vdl8YlNjocckBq{F6Mka{k-}%h5aWGiEmv&-`xj<{ay+OP4eMC;TcMH+jk9FTecq z8=;Jvo$8};4p_6&`>r?^8*kZwpVGVJSfNxZ7JL*azdJVd@Ngl=mj0i>yJS;z1Q!ZE zKx3M~>vzSTbcZDxHi?^*fA2JAxMS$J4#SLFL}6ViY#N32qp;3> zuqI9V_N;~cZA7nkI>|z>Pk)E#_2aCGUhg!?q}LCA0=h^?b)chG(NQII)PTNGZ-?bO z%?Qp>SQ`ra5rviVu%XO*>ON@GiGtLC?XTRMPac?_D4fVc^Ct>nI(cwgu3; zFp@xLsYoPhDJ?6^&B;kmOSy@|M$8}DKt~sG_SIj${PN3vr(=IdNs_bK?Jn-$eRuF$ zPLmB{gzTF;WgZ?LJ=w{>?Z%v)RBlAmb@iS&ZRX5LvYNAS@f=TUZ0pr1TgwX4<748I z^iE;W0-useVh-v{`%T`Yt?$$fWmA1?8Xe6fy63_MMMhoEZTt#*K*mi`eC034$pIB{zl6g@pw~ z5xkiQ7j$x3W>yYtRe5A$1mFXzDOL39>{OOf>4$qIV~BB==JEy(`$=&Z~-`t{de zZwcq?wZCo!tFtC*OH9m`i*VW>2sJ#cjEUinbRmnAEjfSS0BFUj1wo?~8ViY?o3yF9 zr>UZ{rd{KJ=(iK)t}9or1mSY!GoHk0>cNRoj07N8*@^e8V0JRs`Ky?z!Bx)`^4B%! zY&-2YY>4oMjnRMEh7HRCy0Hz}&s-Z0-w-Z@M}CVD>sop=7Q*(TW}&T>gNUhV(1e43 za9Q3zQG`Pfj=&Cim%a}-JV7q@!+lXeNfGq*riLcCd+KXYey+5#vZSPjHG2&gTLkky*+x3tx(WIGFN-xJY%6WOCjJa4;%m?#nojeaMsl2Hr)oRTz1*z%jhfY;gv;u(bY5_hvq%i)o)oBNqUpZz1$IiCGk`)EI3YyA`OLK0- zTur@Msd094-~9y$`*Tu8br;)KR#Ic|o)bpw3R*_`=9?gGvH!EHK-vHB*D)rMDdDf1 zu&95F8bjoI-(#KCxG7WT&l*3v3WQ21>4Nv6ofI*W`>%hKS;#SU99!1{_fK+WJ%fed zI&t>=189x_VY+)!TET9f=mIQk3VVvcIP5YALsUWvk! z4Jd_QS%%_*6%}PDHCR#t9kaNsyebvJKL(Vh)^ns3s>Zfgj}kCaR2C$}*8`c17fKP> zIHC@|`)ChX1XV6hj!sT0m7T4A4HvB^`8eSB%wKazuo+p=J``&LN z44bexR~a^lzxwlLb2EZ}S8{6h`R6rON4`;iwd2cedWnU+KK-ILjD($XO!;DznkCn7 zD0clZVr{-)zLUHb7A!0;M~Lor4(-j}Fub8*7vR*yTRBLDY>AvSfVK(Ns#vzWy8_WO zMTOJQrlB$Xd#tI{LbZD$pNN^HK<$|u(b~{%Wt!mLCu~>yA<3Bfa<-Lnverm-; zm5Nz}Kd!CSSk3iODwWpO?F^K48&7(kB|g-rfT8Dsp%uW;3Sh_=1#+xy3r^iAO~0`F z&%B%pGZ=GXd0#-zQ9HXMS0SFngJDpXot%9!mJUZ-^TFvo7lJR4V23x zZDEmbM+))~^zc=w94gD4=J?xrPG7gs#Z763K6c%`L^60SuC87(72fN1W)OP$&Ye5a z!8?4z{mU1xTm8(viv!Kj&yEcYhRtlviv~kzFt8wJ%+WI#^qdEJk|=>mpyx!w@i$?o ztcNO+j%1OX<0s;}yW`_wjYHUZYbk7ZRgm>vZQWv{pkPNy>lf(f>M`B4N) z8gDe*V|Yriv#l+RtaG?8R-<5aYx-4Av1glh=dN$R5n0+j&oR$yMH6}|Dk>tFEBv*G z{r9^>VyKxf+2X!bSpHFl7X5G-#wiDM@{Y2tX&QNUV^rNp-#(^^iQ3aJE}I zz%~!PO5j>qOVC(>eXFdrw6eCLskWxNw5YzcJHEgc+lXEyFbGgU1ck1oA_Gc@2r>oH zaX8JKENbsZschTHd>8@Z_G~4)V}OBW(?^9GHp##L=9e8Ge)!=ooi2=o;ISrLGkZAP zvh?DGFvCK_Ld(>1yLRn5n=ctZW|R)@vIw^E^Du+b@R}4AA>#AvMLozt;3)ywvPHDk z3e;F&q{d!s(3~}l(l0&#%JXx@x%;hDGoFaV-atE=UuP>S!pNWY(J7VlG#ENu?oUYQ z^+3V>QSzSRoV3K_J8p(d^#MD|+Zzn%yl5~{&DB~#hz1d3Z);~~t&oZp7M6A@nO%2) z&{p4T5cKF-J!wea+uPa0vKq0T(K9HR6fl;MZhSz%grMNy06!luzp?(oll+3kkFRxw zF-!qxogK#V-Toxam$!hgLa9rkZpJ~Dj?;2i|a);v6D*9Q)W=HJJ%Vim- zf84X<*s)_5ZuYUnQH~pjeX|$pt3W8Xv6=EXB-kn!ebGVa+TR|JZ9_bigRHIw7R>g7 z^PKIu(@+~ezI5Rta&)i9rqsi?iM6_%NoG!9N%D#viVPjaXFVD*gs_3^##fgEpU0SF zpnD;^k-r{cXR>Z!mLmd$2rWcl!$~HSIS(!JJhNBw_o*W6^nJYc4S4N6cKx$U5RsqA zbTU1x6C1%_A*|HIV61@D+hGhMu}~u}`ZX$QtHxAh=Z6Bu{vqOt#%1%`42v3Ar%Npz ztnt7WjxL;y=Q=#`>LvHDTefW3nustTk%n4J@d4>=>{d}T6yhn__UMq3xcJcu{Dp%cdf zA^zS9T{JO9FO)tF;YZF^Ac8_6V#N|{tqLLT%uA*DkKDTi6^oWGi<~`Y;gXP<^A|2( zwiM2b@d}~;s>dFCZ2d#){cfDx}~BXUf|Ky zLUxDQSS?{#Ekqv*B)da(b!ll5ye6g9kZN*iZ)I^ z($db(50>u0pt1JSx-RDcUq3%y6_Uv`Ry8FhT|Hgxyz*61q2=tu!zaU`Hd-O+YB6M# zbvaF*%xgjp4jQXVxlpKd&Ag7)SV(sR;vZZB+^hy}rU5t8fEyR0`Iymum<~?XF_^}~ zt2Vs&9+~ba>3Hg?j=|I#s=$m;jS=1W=%X7$t1d1?RWYNkAX!Ez8%#xUAu5R(wM0^i zsE5H+k$y`a=^HyBPfgGT-a>lX30`l;n-6&<7#6y@ASGUAgG$t%M{HE2`U|m9k-j=I zPjb{2uoDUZ9j0HSuZCqRjCBPi!kAvhiXFosdQ990PpnOMjz58pA8i_cwP}1M9lsx5 z;dv)KKY^eE7|Ebhn{J^u-9k=pAtFxIm^3CvGyZ4Oq%)+SqI zvbC9mUQ+;nH>0A^VGy-E^cgGS@orvZ@_3trb}c|%J?0>R_F|YCGPamur&_V7t@qHP zzS?@`o_+>>`s1djcQHM^fj+%u|I?G0)gVxCzER%r=dYNU%1TmtGd>18>rcNNK7F~c zkoj})^OtJhzPdWnoG>mc>EI9V?T9azq8L>aWXvGPfH3HpYC&XVU(dIXEnj@;-Phh- z=VOC9n&_uF%t@=OD05h8gr5T$w)BPAey@>JREs_a4-6V%wDoGV&h|r zQ5T;H#~+*1%vS>!Bfq@eiEtv?+5>GM#-D=ysc2Tqtl&xj6NPz4;^ zJ#_iaGIZH_$*{_0d!x|K3<^hdvQi?c?@Idq_qToj?kk%n^^~0+7RPscoI-7hqI{ca zUKTDBR?W9_keMOE$c|GXA>)-Eb01x^DB_-FPppg>YlcW8I!^V=FcK#;P=;Lxzd}^i zgA?CVqa_w;myFNT!7cntI9^DUksNEt)a6d%i8>Tig28M2C(O_%U|#sQ!4tx(2SIB1 z643Y!U^+IjPn%+th$jS#l!wF+9sJ0G$#h&9f#d=CQHk8T6^_J3@p4Q5Cy`qMZwg#U zfiWvGX+!_GL~iuW8snO(XLJ*AHYkc6!>}oQzg#N`kypa-TjkYYkUoEs_?%@tX+ls=**@{v7e?wtOGNz$7PD;dN;`)GH6QC6+l|fO?8A-%m z34v9QQZIZpaGfqECk^S|A{fb~GIaQ{hwJ<5Hj!r&qZa%P!BfzH7!W_wRIA*Da8PCbwrY&7+hIo@FJ0ed%oY_q->H5Xy zi7BSU3`Mvb^S%ZVPr@5Nz5o6Yl=aEUfyIdam?Lrl_n{do`&3qrGyRw&xIfBcy(g>f zkjaLidm!kr+!Nj|Pa#eUpo1stsNyZ<(83V<3z@>gk}iVJLsgiLu%*;wbYqB(xq%8( zi3nm!Kck4&cj2_$sgLgNL`79{gzx1TUw)Mw%V|}H(7Nn76~)E%9yrM@STJ*pw5{yw z;r;vf?9pmuCocR3Bl@owPH>`Yzaz(6BPqO&5DHtFMkKREzDN{<$T8E(h?3kJ;?aOZ z`a-gTa3N)!(8j8x7A}mYCYgWal4Vc68^NDJNPu%Fk~cN1C$h%hN5DC`wybi!y+Inh|9t}3OKr33+fas?16kXTzgx+(^}2i*zgfD&O?5i77FW`Pp3 zup-=HZ|JtPCk3N`;np@C$~@^RD;pbSGHVq1b*L{duQ#^_mz0Jway1JenocqH_WIiWuAVfkh>ZIF{6+j@mA3 zsD`IF;f%?XXAHsa{F79dRAK%wp0JFNREkHN9Z7wCC7wj4>+aSKF3{Zn`b#W6eJmdC zLyTKfT1r9@i;7|s3sNsIXE>Q=AaR+j98Xec4&e@vIxq^<0j-N)2%mh1d&VEcHk#6q%U$Y{}`md?9BL*IUvaeV7 z;Lu|!bO41;GeKYG%`<}hVW86}v_FL=`wTyJ1%*aEqd{hVs}{=7oYIC3r_dS-ThRw= z!u9R8o32E{my>hAm&9`OHQ`K20pZM^zAod$(n?Mh#Pb}C9h+nuIHlx~dQv!5WM!r# zCh*6KE0?Z;0vemn`snDo7QW?TOI>tyedl{lohoCS?n`x@5_GB~eT%N3Y=87gmZ zvMcBIqAu3nb622HRvJWF@vPA`I8T^5-fx#YGuyTpqeK|vwLcWBMUDluMhD*jj-C57 zX)W+Ge{E-tfOr@GR7da9_XtZUNo$BORWcHmjVO6@qAy(O`mOD*l;SChWy zq!df&iNacDZEua4ArX}_zCTD0S5gFXjlcG@ zuYko)aWbL^4UK;ONtm`Kstcj|mx#C&a3-@+kS8Ik&|q|MBZ^R2PA*l1O7lyrN@3B5 z+FRFP(u3}v9s{)kV}_7XD>UYg(PR%l;BxdROt%JLh%Y zVnC(0$I#My;68VV&e*umwe`(d4DK81ENmcf#jq3?qwHYC#? zC->~L#_%U_ggt8ew>bmJd&0*Y?0UIcmi53kIh39|vhfF>`#_DtICrB)Vd$It;IJbb z*BpIy{7yH8-|1-PnHAmX$m6Qb*JmE6)u&Kw8@2kvVP-sVm$am?#$8g_2fF~gLF^X9 z!aEa^iP$Z?Z?RjL5&toytrUK!*D< z?^@V5vT3CZgifQ-{uJ8M1YJR)5#e*23?|UGYB~Ja5(*tkp*0k?>JHd}d-1@NnT~S2 z@nqI7n@qDF2s=>gFQBkSt$%6XYB9kMeB#^Au~8%|6qGWaW6OqtzDcSh1I{0TsX!_Kl`W*Eg@B^QF;7Rxlow@NO+&d`jt+cw;E*eT;jk`$Ippg9* zhk>V#*>qInsiUC}*2LMj@0QKvq{OnkG@Z@o&EniZj!7OkCh^h~E<(lwQf)kV%L7u) z15!1gUGQ&@?^4<01i zqx6G)88tN-`##vro=gvA{*pa(-|FS7mM)l|ABI4ZR5dy(ahfnhuYM|gX~BY*!VoyJ zbF-x`qpYseAgr!yWh~q|Mu+=IYyaB2_pe&%W~Ef7l-s&`Ro1r~dgYdkE&0c$n>jf* zo28rix%&?`_sfUQT}6EF*^3viXI7?R?#QKOuN@>|ZcpQSi#>P^Uul&G4~C1aEaWLS z6Vi7E2lJ1x>EUrI)wqW@Z4P77nUk{2X|v`>gf3XRI+QsiNI#7QMjinXaxV)%KYbc; zS+5wL-K@5Dx0ZD48s%a}SDB&XAE8r4Fs16;tW?O6$I{j~&f68fFERfJoi-=~IzidF zc_@k2jCo^SbYohOXe;=k@!1l$$4d!rM71*$-EWs@bvubf^XJbn^_Vx2Xf8T(hH0}k z?W3fw$djP_bD;bZP<|OGKPDmb`qk^Cg30BWnBRXtbnw8TKYlxSaQANq4j=vNaH_$PItXDt|CBv#OJ=(Fq+qwvis|;rd_oafU_g-o4?5mHM0dUpey4 z4Z~9Yo8|gM{jb^x_8B$|q3qk(r=E&Hlx;oYjh=~Oe>#CecCxjrB10yRA3wG>lzCsa zY|)bN`IEfeQo@)EOoF1+!p_4#c;WKE^bZhY}yc*l`XZ~cAa z#^>L`JM9w0hptDRqt%AVxK`oaB{`RyeLg3c=}FHJ5eFDoc2 zt*kDuBBv=->ht$1n2R`FBKLVxtt-rp*7}xb{mmfrndmlVR_O6rzaPDN^K@)!oz`Bm&BH;c zt6{sUG7=x;8G&8LPG?TCk0L4gFoMSibP=!>gB0 z4)RhdD|tRJydnN@=a*l8p)m%SiMM?9<(J#v!zb+$1S|aq$^!TA6>305okppNoE5#@t1ln?%O z3m*&)Z)1WVOW~6#d^Cl392Q<_f_I_tEQPP8@Qa3pFEGJRq3|UXUPj?3+yalo(Ez_T zL>_UAGr2$jk%kjVr*}4!?@?)S6S9z*g+*YJhJ!zSf@$;_boA47^k3=dQ*If3E=G^Q zDq4zFGzZc&4ARsiFF!voB?E2Dv$D#va!G_~URG&QQAWn~j1&?hpv*!|h3iGgN$)7S z9vxj-!AV;Su3o-;wV*Xh*o_*Pp@#LUbHD%gQ$uw_{_Oof|NPVZl=7yIKaQVD2sJ#V zM(nVfjhoC#cbzV4X(>Csi(_YHaMIV;cv)L}t$964m>v_G63XnCJ-&9$3V+{V-=YYZ zFHlpNIUvjN4fG$k;+`e{N`6)}2a6b^`zmrr!J64}Xx4Ju|7PNY(e)YqcAxVYNFmt#9RIqCH(8xIeg zs_Ri6vhF5ZPq~umvJ_V}q?a>_%9P$ND^+!!RPW6+V&H@8IjQfcg%3TnXcU0g{=f-N zpZ}DU<$6=_>G{)915SMd^~r7;sbLLFV4sA{!-Ig=12HAMl;Z9Ktr!EL#S}V|LK6!o z5A98%Yx|(Zpe?Zq5bcWSbYjq!18fur$S7fd6)w^0R?=-@JtcTodSc2=snkLodhpx5C$gI*meA&;(rw$f ze7bu#Vlh7WPeNW^!tM_^K}vbo7FmwiZ_*66mTN}kIU;oC^cmC2BAJU!0j@ac&G*dd z!J{C`z?Rf!Bt7Ln5WEyFMu5>y<`dy73l^YG=ckyTAZ-3<4dOxEbxN7Fuv@IlC~4^I z71y=2Ggj`FsJT;&wW|4p(YpFH_BK2AnQ2Cnr${xZZ*?4WeX*(rG)XPOGCjtv&xJ zSftier%lD3=~{gp6d^fFAA%wz3zyVj3BH(-nS)v)Nr{OG`}ZD;y%uw3C>mvOfzd5G z5@FHS)z*TShKjuCp{N!dw)T-H7rEKXMGfdq+t8G8>BLZU6oAKq=gvbSVBpvVILU0I z1yq>#759x|%u!fUembVod(E>E$YLNVe4h(8J+p}vC-@G<8sbMx|E-)_@Zq6y4cpPU z06~DS@MZd5L%-YiJ{&1y1Y6o!G#1wurY2WZx%t^B+`MgvVwDS=+F8_)j&ROYcsB5o zXK-cAF;8%7b!F9#7NUq1V{SY-&iFa9O2}8J54oz#*aF zkoDk@x!{mcaEK>K#T|+#c5>Pw7p~`zKZb=3Mf4mL&fTy&C3M1q9eU$3(65&62=xj1=Foz-nHNEi!8OI652izdK~ zp#8tD>d|ynE9t5x`ry}C#|Q)GPpYMTt9o#IPzi;m)+4sf1U;2PyWRn<@VvEZgo;8h zAY&6%FAPa5s5vHxrr;+C_)rq5fx?fYbMG*X?o7es?-+fc_K-)RgD7;83HlU;P8tT< zlR|q?=n4~bGKD_U2W{?zU&f#CGx-z#14b`pH-Ex!m78?h!Et_rj_gK9_B4&$N=Hub z8+mZ(CJNn2q3<(68>RS}KIp-9rkzAb%HegUugO~-`6V~$OoNYjk&b9fM{F^TD53MM z?Hkbq{lDdrk?vZi(FgL#B0Bdubna5q+=X=RH~Z$kz^H|+yG09Oe}Izzrpu?tlJhPR zn^scFxr^toWmT5ur(Qk%<2Kw`nwXY!@#xjsBX=0MBn1g)Q62WG*1f1#(ovX{@bke? zv4u>Lnv$5lD|mj0H(QmKh)GF1UwHnlSDttr!K9lWo8sd$<*`kiAjnm+C5$=4d?9@0 z*~i}rM=`7$%tcw@-0=JEU;o5o8$y}GlJpZN~G(&!}i=YL<;s0wq#u zcTYn{OFDFnPP_T55HQ}!35r?EE$%fcz0k?FzJu-RY01xWc2HT_%X+1TtdsFX!boj9 z8|yl1u2=)kYLc(-BqiX4o^C`5qU)cmBAKT#*Tl0w*Tm;2JC8xkv^(=Y_g)yhxoG30 z7Mv%VKVB32P5`Eu>szTyBn~SjH&6@W2)r8WE);iw__N?41vT_W#A|iy9mpAiMA`Cp zSWE9*1TjkfwpJlu`weaj&WFZTepFh8xW6o=p z=%`G8Jc}2eB@lao$zp9;Sop^u@$ox%o;&x!2RO6;jZ@2eGH>2QZd-LzQ!VGU4URN_aNehOpYO9+!-QBNTnK-ez8ObW0xRI!#36Ifft5il=+1MaA(;*^4 zp;)$zc^LobhgbzpSYVBEkCYb)ClFqgVg9#tuVmD0B|me*d9yJ=@>)Z>daU173=u>3EcF0ja5H3nh+AEwM128?=b_= zA{oEGjK}616H% zPtV}cesqrUMb9ZTE|cGgi9(y*@6A zBwU1y-Zu=JgztT}3oGIQBwKCYzS6MF5N=qEMD8G5^O65eN=ZCsScg3DxrV>1`?-#N zOrsIGcy#BMY6L(q!f_R!TueEG(lFaTDs74a5BB3ZoZy)`FMeY*tC;cP8e~&evsSqL zkz8HOBr$ss6YvfSfc%AOKK~DG?*SM^)wPe`nVsz=o8FU6NJ1J!AoP+#KtNG!6bmY1 z<+H$R!}ex&gIKVOpx8i)2!hgk4TKsJ0_kPbdvCkRmfv%CHVD25U-|!cU}ux;xifR` zx#ymH%5yq!t9QcX(?mxDocq^E(4bMI&iX(2R)R`@w=gIeoG*Y7^anB&khzazJK4hy zar1v%QBLZwDc*1uSvF}vzT$dMHp)EGR4L@GyqyxnGJ zmxcc|ej~;Wj~$nM&m#*xJ)d7ZZ}iCcaTCW+8Zk^48vEBYVahamP7|Q>km%JIS-0TG zXb-g1)5BsiP|ioh!?oxjs`*bu*GND49I@jceu%{DCFtxYU8%w2)4zt@DG&rCbPq^f z7T|t_laqnrhJf%s-Wd}Uvth%AAHVwQho67`aoY_uLl77O*16`dLK@^c0m9f1B*OxN zLp5*Syoq7)_dNP&g6H+a=$I~|dGjzZ0;ak0uc{c=4^*-+Wzqt}C@c&5hu9Gi%r$?Nw;PXOzKY?Vy6By$s<_x z9X&Qc`!`=4QBm62go&ZFNvN^mpuTFR8B%SJ3}i@AKkOJCa<{Q6jM1@J;TWw8c$LZ? z3RppP1(Ay}w}_VI1mYB;_)3fR@M`2-0Bj1Gzjju{5#k@cdUbSt<(ciM8uaJ+%KG|B zlP?OWc=c4)ll8p86V*a0dwaT7z)1c5_umg2``kw$h222Nfh3Ac!yWJsPFm~)gVHWe z3ikI8Run>$EK~&h!~AFP96e^t2%WMAR_yYpo?7_GC}qW=9Xoayh(9uwjMde(bx2sE zxD3_RRWus10x-AGEDWfM$x;bIp|OxlFX8YJ<~@T8k|k(qOn^VRX|-;i(YzYp1XdKj z#Q+AM{MENKS6Btf?#LA;3@1Xzu%d!o<7OD2MOW9ycao)gG8WcYW~)o0{Yky3w!xS~ z%zBY`gmyG3(>q4V9sNxG-9c!{9TLPHyeT(UT)vu9URX$W^9F<%%47ycHi|*s(MGIxR6haC zwE-B7CezJ+B|I0+g_?`Ic?NfL7w%>n?nVR4Z(D1z6?7jFo*SmbKbWkOV@sf(!PmaZ<7m9P{LO)etV-Y!G<`0;DkwsgyZn5O7%skw|O zq{}reZWCtCoH@a*<@{PC-~4ozmS4{pJ2r#oWD3%YJRWl|kxL29U3`z*T2H(lNX7z# zgPZM38h}yup22I&mPLycHX}?yMw`Mbe#VR$@m`9C?~?^AEbjXTMJ)CWJLw^MI~KKg zMBzn6;i#F~jamnKu~G$7HA$v$^yC9}EIFM5!|^P~v6RgqW0PRo{KF8A*w!o*8wD4k z&f2+?KrTLcF8fkCp!m`+WnV4^ixpo+j*lT&CNp;ul?0$5u(5WUWm>I5L~K3i#%?PS zDYb@hY6|Q$J4C;^v#A=`2GvcSEfr9aDq1>wDS9;9ORD%~TsIn0=sgu^r<-WC8W}Nn zpq+LTk-2CE{ZorE(_+m0F=kqfnU*k83FSq+Bu7sjId$|@n#4?`8tC{ESrgYPYwD6L zCx=ForG#WoA-~!S$oKYK@-ulok^Gd(-n|>_^<=8V)M4&0bu>t%M9uVj8vuhzHweQE5|mU{X@!oQoJvF zYiqCjFgu&o?Be6f&;sshj79^mtaRzD5qI1%;@&s#8Ou<4`7~QGoVjyg%X}YN<21Y4 zR?VC{H_cXKSKIG~Ciy=8GRN*^D;AZPQ{;H%nrJUmO`V^=x~1T0UIKu$*B3wfEOk}v zB}S&vE6E;gpC(QHdHrV}LA6R_E?r83Cib}aqvMw@9sh{#02YcaT^cto86Y#2`SoVX9%orB~7B_WW-yfVs|4~NmV*uvH2 z7oERcr-=dXk*50Mg`)gwS3)MJUBKs-X}P8_|!Y^UYMM``2LZxF`>bp@v*Vw0ove+Vw1+d zszWw784Uteq^!bUBk3+igWB4>{E$H*AtBDa3AIDa1}8iZPM88tm;z3S0s-bA-cbq~ zGY0Qvg~dih9~+Ac%Ze!)dO>k%X;Bd%D!=eVI5U9$7$gH)tpGs;gcvQTZHO8&I0#}W zXz-9-2h-uqNDf0!>Y= z{^px+_S6u+886^9!;jSL`Np-i)zuJGlzRz6=u&Rc>_-31Yma`dGq6J2 z4N>?JGZpVX-afv*zIYe;xKgK=#??opP(yI56@Uk8O!o=X@$$&TqD_o@{l6A0=0<5VwplN z;^iC)#n2z*-16YC@4ox)+wV8+^p6+@w?_PLzx|dY57=?4FDxu}-MUzk3F1pteb}*! z#iG8dOT0Y&3{BbmYWtb=;*qrf+Q zE&qQLpydHR9 zaunu*r?S7bGUx2clP7nt`RJpJ47q&xgAYD9QreRDD>gG*uUsJ|uwpDE)p?lW_Dpvii`he+Ba@I~vNbIRYF>qr$_Qh5MP-#^g499MTIE41B}(*_xA%~^Ko}J*R=phfos2Xv zZiu%xuh9khyUMM=lC>&a{eyIA!W>dz4yiES*R7?5-QI>3?Ky0nQw;3tY4}X5mM{kL zCQPwd0xsgGI)ljDg_Pox-5?t3Le~Z&bvZNC7?xw>>+J-AwHwcu{sj)w>IgR{MF`tjy4s}Hs?Fq9Pen825Zw+E1j+Wh*qB# zT75!CCf~Bv7agreI9pxdXcfT&cyg*V(N^=Et$u`7?-N=bs=F3@Iq>{9-s`Pe{ms#; z-Px)htxgwOjnG}Q)KU(j<3oG(KSz7CLDUt5;Gt40(k(me76`2mj3e#$ z0WA3IIAgSd>*}ZTfX6%#O>w7feup1w08l!P&IBGaft#E}rvi_ua074L(B=)iG0__fu#}W3 z@os&6`}Zf&gi(+j!w*6#=XIP!uRZpMLUY?OnTPRQEl)xmX{iFzdO`zS2u58R1$KB0v|E;l4mEvPe=$8h-8gGxHj<7j~zqjmGw_=K2~`x6f(ri=$iA z4@J{Cx5anE1Mm&dKKA1{2v6^NjM5kI1R!x3*ZGW3or-I}i9veHalFf>ab`W2ztOnr zs-D7BO$9Qf@TZsXwMA*6O(=TR(%g*n*havpba%J6w03tx_3G{I?g2P!PY+$I<>o$` zHr&?-sBq3k@nWIe631^$ede@Koh~FaG$cf)(*^57!=s|YLkC4g!9X7q6BP|xI1oJV z`dcGV_xf!&G~yUHYA+dBtp-+8QFa@7AFB{Fyfmue1>!RUVYHHJcuW|6f`-SqkPG+( z4X@UPxZ%^l`bWKYe@7jp6O=Knq@G~r2wHp*Gn`REetbq}JclR7iPk!HdNJ<7UA+u$ zdIQq!T18hQf~)dSU*c*(Q3>o*(5Op`D{=?6+{M^xmJ}D!O`N2qot9&^>!miU&DwA3 zXyG+Bhc%UA?^^o%`fUbsY??g`yfYt=+n0m&V9ce{kw)E!>+_4i-F z&+_H>U#&_<*xxJ_mQn6l8Gg!5hEp3BxnR| z>|xkf9Iiuz?fgo1qx0Xvg&93Fl>8tX9~a zAD(f~+(nPZ8zSI9tgEN2R$E<(^0L^ERaBKW3~bS9#XoFE=)O_n<|3nlRWF4&MWIlQ z8?S+Q|cf342N%ViOHDQkd zW+5YW@YL!=L{!3vR(%{M`(stM^{nr>apQd1^-xgt@})~re&qb+lBRxMh7mbh#3Uyt8(g*Y zDozw9x`V~_w%(8Botu5voik?RgH&}QH8u5wN(7eH#HNayv1604 zyjY_leNE)XeV2!GZCAmGf(T*y5*Hh(J)iX;fLVIDnA-GHnI%j0iS`$mM5kq+Sdw67 ze^M{5GE)0K4+B3y3aE*neY$%m&G$?eEac-)M5zjm`{}~ zU;e}4tn4#9JPc{l<;#~YJ+tN44F*|mE{q+PK2z^x$mWBz)&rwZ2xjworoKJ}$jlcU z$tih|)hLaV?65X|BYJh3mj!!vYIqdf5^z_u7T5XMbs7AcnjABww6wOipdd37m9k4p zfO%fu0KXboPbvZGwVr{A5oShIYmg{p20xvS8qo3T@4tBQ#EIx=Yk4_foUookofn6M z=sabhffZ325@IwG6t8yfKEg2$wC?06^kO*pKOBAIo{bznV|mBb?4rDb=g#JztH>dn z`7}g=Fjl~a;UpHPQN?tDq@zPVFX7WG(_ptqw|m$u_L=rp$TSmt>52B)py}k9++12I zl9@FtEeX)pW4MJ2yNh#XK+LRw!)_OIjJe2c0*FUAd6!`!V!j8J3(t%h!xL|w`R=8HHFkA3N5sOHL?WJkx4%El-Do^>renYHYQd$PjJExk^Rx2E7JR9f z2N)PMOvjfTD-P8N>H2VE6xtcY^OPxjKc=jq!~`c})QrBbgrpk^Umgd9#UP|zGsjl%#~2G};3 z0CUMs{zKSd((|$g{|vi*$){t>)~`prq#ht^Jc5pxgoHF^>sG3j>zRqbEj)zE zF-MrQLma;Whwa2y@4L_O)jfOCaDj#f1*3LRGji0Q=WqdrNrUMvBBH}Jur<)t91(%u z(=%EvPb548FT3!35xCoE-0cuNUj*(p0(a|Hke^>b?-*s2axRy_rc8G5qW*r4(=!$e zPpl56kSkZOMj4%r8*@>#eUAMh$?DN(H*O?rc~M^8gAeK%BuHU@!aVt8ntb~F`O~*< zednF9qsf>hr^sb)|LAw#!IGT8i=KXZ<3>I6@WVW|O3bQNX>MTuTKY6KC}Go54r4B~ zeduHsiym0wJgVMN7I5=MMN3-4*b7B#f?U1*{2(m;<6DmLn1CmofG3=ZC#1PR6YzwA zd3pJH7>V3G#Ifh)8*A(HjmEsZ;>!G7R9xM~yFzcL!UdKt;7PXBG)p9UIv*H`BrR~& zs%<+qeY<+~_EN^K)J4W|+v4LlZ6bGle-7`bdDrKl_AwfDaQvi6r!z{sMm_hOo@s66 zndhEMgT*IpyB$kupMQ;Mop zjqUlne*Re(Qg>*jBL@e}FcI20QajQD>+9?4>tUCzudb}DuQL|p)zR{-RV8%|_4UX@ zcZHfwJ-l$-T3FW2&%LitPbUb|*OwLugSW3Q?4G`!?(V)mTDcVUe|&vC)INSF%o=w~ zT1T%&qE}OSGp;VGu7}>gAypFH>QVN(-`b_CNe^TG~?T z7XLsz*S9zWOq@DJ@aJIex-w%84sboPiJhbATjoL4kVgFPK;(NDiV(CmHKIqTL`4pW zivf2eLsZN%4=A<%fanZ_Q{HRb_;GQA0)oi|hZGI57i1a4Wo1%@l)I%YFw)6iOy9id z+t2i^$|(zOz9TJ%=zx@Yms(*`*;T}yp32Bsdh#SAaiVf$6ndBlo+pE%|#tv8o?)qUT-o32dGb@iybjyY(O86G7$DpnU-dmYC% z@-q~CiZ!3cJ^tXo{`C+Y@{f3S)?o26zs}BXlf`1{uv<8`rxQg(yF0+Geel*J6HN?t zMJBcIGD(N=3UXbmBs|Np45eyrt88ooRZBDHYm&HorOx(8|A`-!%# zO>6Mfv|WKy;YyoE6Eh7^Rnuai9XFzs^&9jMZ*0_PhP|FlHqb-*`mhZf-gpC6E>~f8X3OKDcL}F?09L9j-*@DTmM9S1LvoXmKgS06rJ8oQf`9EQ|v02`Oen z^i`-UX)VdatE82Dw{`+(+TB=8HvR{4P5EKk*Fm=P2M?v|p;khY@(TI2?~3F> z+qOrpPKeX|x+#?`;mLBI{on&4${qUV;Y1YOS@+vYT3h=~@;(%%hhf$E3G?E8%#Ir_ z(bV14)7aC^kQO~kV_?PM6}lJ33>q6cc1+6T$*?c;B!p+ij>QCll!$T4;bFM@81Qlo zc-gC@l=^=lISb($MuDlaii-SvvX7?@PX_yPT0R9%9TB_StG&+y{knSf>a~Tv?*9JX z=1G&bZG+pFn=oN)xQdr6TaO<Nb<6sJHOlu)wXdTSfOg*o-Hdw8OGAOI&y{%F7==>w2+60 zAy(URub3cINP@N!5gMw~y0}>28}{)5I^2z4U3Ss-w=uub)|XGV(ZN;2LyFT1Au6rT zfysg`|8VDs4`gx3bvMNPG9~*M4PgdXFf76POuZ(d2Bz{1(?J;bNW2T&HqhfEukUdh z2$r7@J3&clG1V-fGZ~Gv$FF-*R(R6(x=e&{AxbAv)_5r+<3hvw_3`nv$J5g@3cGlD zw`)Q|LY&`sJJXko0|!lf;DHBd!4DkLg2Ik5FTecs2!{$Nf_EOhK62z8K?+`8x$Vav ze>~E_jvl$(i^hG`)nQ*zSy4q51gKEB7xemdqYrmG9eu0Z6RRJ754#xjfmrW|2q5dD z*Khdh+rwFBuaCoA8HY7#3Z8u|xQ*h$xw&F~U=BE6pk!HBR5+%+d z+y-1wkgv3pS6GA+4Y6t!VAUronyuLo2P9}rKO9i2+7xzt5 z9Xon^mJBv$^GtRbPs*J8FzTy9r(yOAo(3?T6tU-cF`UGhTY9x5XJ;yF#$K9>U+=Zt zOrwu~NW~Z|*ORSNh~A!0*4n!P{QE{KjakBfy~Nm1(OB7hk=nVXdWkkd7A_4OlFEjV z5Fyg;<}#A*^)OM+kPmU95%0p!&9G(Q*MVfTbH-3zWGwEO#<){mWGu!#AS)|BKd01K z3h=vYr!mm zN%MVth7Ju6uD~mChMs)LJp;SYVQ7Qq7>q^#(@Jy0(EmO~HFY)U@9J8FWI#!- z#Dmn9mlhFVfZVj}eNL)6F6`N}=X^U#zO7%cibSNxm|)dOlEq7J+T}`Lqjb`vk3Kq4 zVs!3A6~=?_zyJPE8Ae|S(|_(~pX$Q=LI|Lu0))FXG*pyeDJm$dsD_$z^Dg%Bjqs)> zLC>+X=_23-V_TmMwWnb0mEN>#9c$lU^vXQ+?29@P8H`?eP{+8t5m&Goot|_9=a5Pw zH{^kq6y+5b7hOG{lV4U=oSOr?9_=HnLb3tgBt(aONzka2sVNH=o;!IWJ+szspEgtm zUXc!&_R1?ppoi_gym-lyARiU?5B*5wc>a?7tekzRunHj& zgM98jKcM(@?~&>N*be=xj(qgd6HgrZb}~%B^mv!saHN+e_e>#7Hi5p&Wmem>=K#y9|lEOAID)- zDE7vE;8lvBo&a9eK@D)c{+VZ&-<)aPQ(Q(2R-E35+FS2yDj|^Ibm_}wzkqz8c_3}0>#1g52Jq7BAHStv6-OT zOEjdT57BNi2EjhveFRo`GXTJ(7S3wr45`s=?VO+Pi_gE_luO)`o_zAjBzJOk(^sE= zq4Q%~TZy?Eyj^WRdp0f(dp2I`9UTn5$=l$JZ{pTmC6Nn~p-$nDJ`#-(FXPoZz1(bq zZ5cxeBiHbP5ZA?h)LzH?G*{;dCm{q|A4o;pk zXU-H907`dlUrDje>FSv$c{h%{B!5gBS+vo z{DP0-IM!4v)`gTwlsazsC2Nd?SBxzE+}u~#yQT) zty|~c9}m$KfB*a!sQ=or*B~FrUzk6sZSUT_ZIk9t#jf=dBc%u2R;j7v1*A0|Idb>1 z|5-qL6zwpo8UHte+G%8>=)V!!9)&FomM^XLzY*Q;FfiOM#QiUcnuzhH)$*uBrCN8O z+}*vfz6ueJm34(KF3t8HJWfk{soaKIiZmX_4VEq%UrHqjKC-M#BTs|X+2H+UXlPd6 zp&x$u;ZXkNs;VLtGE!n=RfS(tWG0;sDFvxC)WQv++)qsim6gJS4*h#{RA`_&4W0-C za@W{D{+K%zG1-5@Yed`UM$}m>FGf&e-l@#0s;a!4nnqh!Qx5`ncsD52G|-f4 z$oOcpnL2vYs4f}EHZ_?$MJUP0b(!1R;#?FcO7G%5itX#Ycp0h#?*LE^r^DB;?jN~Le1vq}9`|exPo^~kCs2JtQ z$a2zVl5#XbX$K_uo7YQl3lFY*_CPNcVuE`|SWmL+M6zGuPgIar)SiY6LtqjwWoAvk;k4^V$EB*uN zCVv#S`AvWHiN~|9HuK_R#|96k5*$}2CbM2zchv}{kAB1{ySW9C8$f`4IGXhY>-3r(^qpSooau zV~6(d+xzFvUAqsRETZX7wM8cn?b*HakG=a296o;GLQak#zmb`Pbq$gI5(KKs?KiA` zsGQl|Yh|4ByUF6%0-GR*n_F9v^8zjcPK#2hRhgU0QKY`P`tq484Gm}u@|){H!*YFa ze{WBJe{UO&w_wzykApaO*FvYN$g96zet!!2y$16+9pnEg=JRsQ=Y-bQ_Ac1u5S7u{ z(bm!gf!om7+}_m=HN#j@-zwx9RWx@YAQ#I{XFI+F?xwG%*0$EBdJZLa;dV^G<3Y2v zAo9Rb4XBUCSNGFr7CEw$u+Fo@(u*c~di(lmyf3U0fV}Dj(l1_J78c(%LA)ymYDR@u%wTh0zk6taTF*h@={3U!2Hk*r$cI!q!<*jo z{c=}%f8dU=L0_=!nC?q~jc1A#@Dkeovo*oa4armR}%)+vgK?)p4F)6`03kBm1h^{_H zt|Hi^i)>-285?0{ZraeO*ulQ8fpM@>j2Pq=5)q)%bKPA?oq|}SX;N>pr^#@td(Z@` zKgieWg+tt`w!pOen^EPP^vYBu@_dSfV0!#W*0Z7>Q(8iH_N5EwE?hvUz{Sk0%U7U6 z#f_g%8YatpGatSPL*v%3+$*?Xcz{*M$M`JDw3E|=H>L9f?82DwO zVG)rS?C7Y$gLE=@jo2XGP>F(c!xB!-KLjDWP{ z-P%z)==z%xbF7}TTcI5B!EJ3FeWahm0tt0V%=ALu_rkaA0w)xtwts-r!n6c)<5SFy z-@va+FgISt+!)REw_!T8bab|Lp^lTXwA9_rP2Am9TZyS&-Ofqe0(E}K){t`j;On;H zQ{CN1tNQy%BOmF=%IoR2;5nO4|4E&B`>&YMAN?IfEykDoSQQf7#dZs?I14B=D^2hy z-)Ycn-hLdB1gH0HUb|`~^1x}_=X|j>G_?QN+NlV!y3an@aXe`Mw|Mgfn{SXuqW$08 z`#=1#t0x)ZSvV9Qtp0MneUPhQRQyVAbldN z+7lJ^orr}<0eg- zgq>HO#V}g!6@p}_LBT;Gfm(3ph!L>js{%rzhf*Cwr&ij#+Iz*ERH5z%Z!zkSfPlfU z4#9*H$#K1%M4?39mBnM+z0_lQXRH(>BnFsGKSoSq>uPWB?q@h&VQ%TcFk!IlE{4%f zop#t|IHV|eY4EF#4oD|6OI+aBba&;t&FwYDY7th+HZksCixF#F@?ok3J3_eFZ!^ z7d-k9cr?1dmsT-Us}T!Y(@kOa@G{p{6=a^zE(0iSZEYLM0W(%?e99?X4m@Lw^BKD= z*FPgVqr=e|hUkFsC|JcGfBfj0H7BeA$%~+tJvY&#?dbZSHcuL(Q_DOw7CM#09thXv z<=U8WCr^$aKaOl2*nn-RC%&_t%qE>b6dsl8;%2?L55XidE=_8;R6+{v+)2MdQ;uJX z=`Z{w86XKb#2;ogtCC+zLR{ESF6U34yjhv zAF~M@@<-orGR-rR&ra3WHg;O{aJ*87PFnQt{h8>dUw%Pw=;edEf8UHSw=H|mUdYKW z%sYGf^qI3~kL}*Ref!S6`w*@No*0<4SDlkq#a%yXF}qQI4YS)2&2;p^vE}LM?(6F! zk@j_BE$ijnT)>W@94l3O`+G|1?1qIF;m!e-2e%?k)qJIex~lqHEGiZw-wAjLz+ba^ zv5Pxv>h4Puv~Mv+`#&Q*I~gsf>`lU_E845vh}e$$$EHWFGCH_!lIVNgfi)U@W8UKzk(I)0?(|e)=#$I z<2YU-X@(?nlAJWSmXuV~Akd<=wyvqI!&cRWqNq&2RnLLVO??Jt+&F`472F5drNVC^f+n1do_bCvL|rGNr;-Vy z3#N8rhv|-Goc=$-n8(1)fcOPAxU8m<=KA4PR#90~TUA?C21_GSn<~nRVdmQ(%!ozU zWHYoOgrc%YZ^y~b^!3whcY8k}F6yE84O9B-pv{C}VZUopB1sK?^MyMn%?4J3H`3Bf zR)zn4z}1}LuIN9B^tdy2n8Aj|-Rsf}TRl`LDeNikmb^UI>2qc=%@r?R#Y^R_QuXB4Gpo;$SRr`?xP$r7); z;*)>uIB_vEy(r_vpKHE5fQiPd%}rHB+0cf%Bc~@Ml=GhDZb5y(@V#1G-P8@8^78LH z%TJsr7jQrR#e91?Elwn&-gkGdzpA*d&w3{J6M1BaytBEdcssPS-^)-;oeheRf0lc$ zr?co>5h9mRJl4+I5e4lT=Goe+XU4sqM1CfpE8u^h6(FkGjy)62H%YO-Ko03SrSig_ z+%%1QAN91Bb}7Bx?2UC{NtlkGNDqS>+d}VqF~tD|Ln9%+m%jW!R3Ad)Pf=XZ_0q7s zQhQ)HS&WPJnOV6M?FEgVGR44Z>(?U^h#uV6h0tf$jbDc3~YeP0mb0+lkELQ3q2tT3H~ z-a4BG^dUQ4yYGh+?d|O*yWJ5OaQAzU8zwt6)zf4n^MK3{CwHX`;1LiqbmnufEr~iF zJEpYSq87vZjxb!8zUnShOIgvF*ux=jzj1$r1U?vEtx(&#x?yyvOJ8j;Oyb?_M}Iv} zrLhH=7?_aJ_e`+!#7?0%maz+myE>)p&G*wS7GMoU*SDQ4sKHV?gU)yHnQ>OtYE0oN#R-peHdtzDq~S-9h~0_{Dwp*lP>*(H1Bl(H1Bp(H1Bt(H1BxIa|0n4U>Qubat5f-ka}pJ>x4gPjS~0@ahX_}<3s>*I z-PH%Umr~QKQ*wHBN>8s&8*pBI5qNJ2WY$;EAYRn5*DS6f|P*=B{Z#c@nsMiu$!qu)yU zqbE+Bh@h%+d2rI}ufIM)yXuP-9~q?hO_3;U+P{)v8^oc0@2R`)n)f0clCQk@`XkRy z#_vAF_Wf(OSV<;!OJ%a;jYK&Hg-Z5mi?<8bU+8?HhcMxS4ZST|my!YG9e z8+QJDUR@tWuL_0ZNg`30T6;LTCMI!Qpr4YM%@R*C%Y1 zZgh!^3^ARh8!tu$KHDDtv6r)*OJ-yEySm%6C-=}!%cReH}c^WWMS@d9IM$! z$Sgdo#ML(#;Sde=#{A;C$~Nq=7dwu(H7kGLb2_iK%^Gk=thcL#B!P4l_hy1`$FIpW zZr}bUx2eN7@l8PW8pc#smeWZ?(}>}BE-mfza+As2Tr3FSyL$3`om6YXr`qbOiZbkY z9nc*xgx1x--oR_RkSA1p>C(uNu|XP{t5P3|U65U5(|W5dy&YGNovy6uCQ@l%W4#jw zuD-EPY8d10fgl8}D_j%yUKT;PgTfeJUnY1kW|%H4#AojFQ|Mgt^l)>5Jq0^GFPbIb zu43p99M*sTC7F>K=+|ln=5X1MPV06?YcC}a;cN&UIEXI-g+@Do|2nk>N%h6(KOu{Ib+T1!SGqb?hgMel~Em zj`Mn*xlI-1@8;$Ie~n15k*HmUKeBl7;_0Eimo8m$ zGYpPUmP3?RSnaisJvQ>r=iYnoz0rQMj_Trnzw_d>T9|mTkW{)yCBH($1@x;#by&W zcvx~phS8A`e&8#g=sO<)a8mFn#IW8yCfZ%*GV_6Z#?OnAY2bMYjf@V7Nw{;6XXLO+ za3HwDtV`zM2^HSHUT$uP{>E*#w27u=BAjua;C z0P@n!^|?ZD2voO`7&e?%3trg4csTszjVlWJ-0R>-s#VY^idUUd@H&oMu_7ZA40-5} zpTGM2^X2bx&sX3}x$yv);M?@Jwua8X*itKk|D-K;!F7xo z=Orq0GG<%b$a}$;<3sJ4d-lBi(!60#zN}FBMNfb7<(K#DIb(^t_b;w&Ya>%|+3UGd z<6z2UNn2YzO!xy#sTGNE*|I;bW6Lp8YYW&i>(JR|_?!q03KjLs0|Udt-9=aT?aqWf zvdzZX#9m=xfdR4}tGT1A$0`aBM?TvqnM~{DVXmqW_|nZMkQyN5u-^7x&z5q(AOKg| zuVc(HV}>H)%E6eAKdz1jUrxItC=4h}PfoeR$(MuT#)d>pe&UJv_@R-};~bp%_~Yai zp7*+*GaDR?DOYK=QXVK;UGx2fyHj}_7`UCt}bKf4n^$-DPwXa9m2*Vh9-{Q2jfe^37^JA3nX zu0wL@4!WXe2M9qm0}N(jZ; zs$#HW=D9T+4nbg^&nZ8D`bwq2qpXSz)P4W+@8@cjBSwty>u-}t4j@tSosds9Y}jzP z;DAKZCJPG-B_-!~ZH@3aP4)f#3`fu=fJb*LoWoOr8rv_}xgx|Gj>6w!yim-7nBn#ll-1B!%rP4Braf=!`^+~ioEz+wK5)*RMGt{3$ z+?#?%hxC>1!d>mSQqW}8OTuOKa5U6_zyKQLo}j~4M3R`93M*yG`*0$V~dR#(=7 z8FMq+JA0^Qy}N;PQ$tz~(me{ia!75>#x85Y12t$Ou|Eb<=D3MU*fOHsP{L={wVes4(ZnfjQ4Z-4oB(VY!#ZRy`)R1X;C0;~D@$=kXu5P;WMZ9!~XTR$iI zi`lq1kEYyQy0!T%_>HN9_x3x)<9So=WCw}{*43`(-@&g<<#>#e-VTtWm0E!L( zYVBoZ-s7jmV-fI@$CZ^aQjOMCAyr#wCTt68!$>>=0|Q3gd$%A6`}?7Axq#&YecS|& ztEe)2cxkWaI6Gieg8byy@!Xg(iHMqKQ$C?h{Wu@!(`{SJ&4b zg=ic(bz1Vo30iIJjC)5X-hKc5_b+&E8U&#}3Vt#bjfrRb8~%yqG`9zbNj$^t0C3C1_qyB3~Y1%2a->EP4odVMEv2JH9xKTWQP4Ac={iJwfugd$S2zGw2y<=|KWd> zjk?>)#zgxayz97e(Sf1T@4o-`P(?*WL1Dy@asNAkIhkBRmCdchz`ng`5zVo0BKMS9;{*Ub`i z-5x>LjS+O+CPCL_uIairr~Tt*v|$2m$Zcq=1llO!x=!qQ1lCa<+w-Qh6g@#ehPFw#Dr)%AD>)JC3xw;2--h;#Ks#EXy(;vw z^J#sq(T;|!dqM}yQk{s5#?#K#F-R+hiA+K*W!GLb#0IIQoXD?~=4n<|Rc7z|efxp^ z=~pW%kvs)xf|7h-)UkR`RxEC*?h^MPmsTo8KCR8x2OLu}1c^JNbnbD(BA`2 zmcUZN697^zS1lQoz3V6VM8EiA(QcO{kB z^q#iM#M!*%3x36|+vSWIRTcMsnPR)ys(r zM|^0MeL?>56Gsm1If&qBfF4}Ad^InZY~T@Yp*V3OJNp!r)H7$#@sf*I5k;1pXYg6K zE;4fSue&!MIdUxh?Af!(+V<`$-n$O?DrA;H79317jBV;jUNZU~n1P?Z-;e~e>_%XL z%z&MD4FH+epnumRDsG#A6e~u3J@L?)GzV^DDtUtf}CM5)Bb(vjp1e+t8l78Ldp972k%|CeS7e*R=@OH43!m+tBt3v?B%D zVu7|wpw-@n_NLc$J})o|ZrN87;ku9AaNWbgb>+9AtrTeO0_{fOp3e)k<8MQIUZCX! z+A4uoD$owP4Q+)$J4>L=yP=;g*Jy9ls#}()BeI)MH$5DLHdUl zD2v>z1~rfDCVQ#`SShfmo1K|G-OB%+ae;IY+Nf1s%OQm2^0(sHnT;)tz#K5w1Q$xH=D4Jcy*h2cdG%PVY4<`)c>!8VEVPsvI>yt3Dkvm#o>a$wmR2z{ z8j;?OS5;LZO)tx>I*>}2Du5iasEZ*2*oReQwqYF(uWhHLP5k8TwWX)vy4iKAv=-r^ z6DGLU9)`c>%fnl?1O@_^VUSGLZ(?n7F}9{+xs5gT!#|Ths3daHqD7GQs7(N;mX!d6(dS5&55-Se=YKdRi~7dg4iccwa>4M~*yi5FQd)oOl%%*U*%(hT_vm z!nUzMC4{$)J|KNiq-B$EM6^3zh`6UXFEW`#Y(a_b9*bBFkBSFiOhKXGQM1g3EOG$K zAQjFPiC9)qFSLwakyS3O@$;(@)=TPhjDmJS<3tCrw_H1&&>h9qS?5o$+g604<}NZ> zOV5cDMb&7cyS>*YGq}1$!9WBzv$5BBWbaQq+sJxeBxyUemnR<~XjIC7{mmDj_7RzZ zdt3=sWICZ<-i^qjq?M`-i28=o#lA!SO@SW#?%|m`m!u$goE|F>!|S4E4X$erV(j5; zI=toR<=mF?oq)y@^=adjygXuzzf7XkNB}JDYNgI3s0(g#Prp!Ae>(}MkQZ)1qzLzi z^G_yk?*#s(!xC`wIai41!}RjR1K?DPTf%7+E{0P$TDz5YU=e5&1lnH&T9H5tgu7eP zN(I^`fwo_uH4C(|8)&H&_xflllt`wudVzA3KzT->Y`!VwUpt@6f2Y(7bm|-E{vMx| z%1VU09U)v(EL<~Rpk;1DYZquE1lnqWwpgI`xee`2uj_nP`E6)5!gcSt;kvtp>$={C zwo#y^Yz;oxF5L4;fp+q3XmbSGZh^L0ptTFMF}I;@5NHv@Nv~_Xp`TH&^OhWSNuX5- zw8a8#yKr6If6(5DRWwMTo#dcRB&n$a_3r|8(|=Ik$d5{aI#IZCkwDum(B5tiIk}@l zpq(JlI_E*#ZLWJjxUN{B^%iKS{{6ZOAmtvw9HQBd4`97{fLfjFu58(N#iTt@*Isnt z*skAx*}wJk6#F7%{rzTt$G*g6+3K~w?>l-SKey=gxii^CaId|?rv`2J?XK#ui>%%7 zAvKuQH8Ot)XpsWq$4lCI_BXmZJdI;1^ZVJf$f3jB+(>mnxmCq>mmb;l)1|D6ei^B{ zQq;tRj!k5c-H}bp@6y5yVceX#i(Y;D>Goc%Nmgz6uN%Jl-xKbG4ek30;uK^q4ePlp<}t5T$Ft<0~rSj;kow~9#Z{*j{wxoX`|Z>+mnY&Ex5Vu?=;Ql%-nWjbr{xkO`k3p_av^ewIWRwPd)BZ--`zJZry;-yx7G}3+9zhoJnafwM z)MWqi%P-Y~620LVBVrg!E*G~O+)ce#kDMy*vC7>>J@CNT(CE7!U9gDveCCBElfy`M zcJ}1SlVw$h1}r8c*REZ=Y10pzcN{u&==en=&=g4V0kpbiXI0gYKgPu=$3Oe*v#~Bi zhV+htt94k%Su-3Rh~Y_P8~Q~COKCdB=#DKzDb<@(djv5;AB|KmaNf1_uZD2fz-iqxd4a(6F#D zgBqW3^;>jWdH%9~pp5n}z!&E0c z^VX|0VB-V!m71C)SY);_y}b@(5zKTzU{IB+y**)33g$IEmXS1xi;5*c%cB)+@ZXSV ze^vSsP{5GlM_pYmAFu!A;O>tpfLL?$(WB&7biX!S+1eU0%ugy-s3i=_7q$|H2Q<2C zVHMm4P&w<$s%6UV#*0l&2%EE4R>IKdVYA5;wY5V65ZB|PZSS&?+FHQzl5RoYq)6v8 zF*~0_Z_Y$--i_W2hn82In{_@N&aP7zGTTkX=W=ti&SW4d?9}P(tAO%6mwg2gda0pR zg%_bpUdXR#rL(Onud@?*osFgC^+wEx+>-LDc8eh#TB(nJ5`zo81jI*427vU2b04e&s&GkQ!;L0Xgm6T!17Bn7hy$b@>8 zy||MBOel#< z2w)n+J>`rEecfv(K0E?dB;YHln(d}#9wSG9<&as+VL`$q?Bhf>PGvxiU9B#{KOh8= zMB#pl)^^uW#4Scgqr_)yENaMNRs`u#c`Ma5Pw9(Rw7%Xxh;ZzXsoHVRDh<=xgsIun z4-{^v4f5uCbDaa$kQJdbp{e-cv2kj`UE(KK znU2CKv8brgN$DB&F~&mhQyslFqxJ!h;-`pf^s~Ti_k*6IY|a9=%>uVYBG&HLUoYkq zq05VMF6Ne%WS=^H_EL8C#lj-YvZ8{En>PFcVM^9gsh<{PZLcZF#4hmYmepU-kXx(^ z)vYzfbyU#z0c|T1;wV7b%s2uFnPzim{?4Dnhm063Z>~K6;e4RBSzBD@fppiS;dkqxyrcU0vKb{TL3qDIwc9Dk+#`Q-Si%hBubJ^+4Mp- z2?-=26o(!0m!K z);zU3_>Pq~Ah=7Uju3bN4B-PvHb|y52ATYQy*OWWttTi1j$YM>lq7K{C!=>0M(+`f z-f$Jlg!B0o;=uj%Q^=JOJ9-c(wu48{pFg^LH%Q$MokC}GG48giODFIjaCV>x9!|nERNVO^=&;72yd9W+uABmAH{xpyaeRa z9LqAEzCm-?Bab}&^zE^pybXx=f0O5lz2oVpAANM`q|hLp+^RA8NG4Xb&-BC-Pn&Mb zgwI?Yt}mF|Ox2$fx?3EYPpeI)#EfO2ky|#MdWb{9I^{)rAjV7>F^t)!2CrcSVd})e z-eK@IY)T{lr}*KA&D)Qht3$eJLJziU;5M6jq|#mDu-WT+7{XPiV0(I4lq!X5z)N*= za1$V>PNOu36PX=w%K{95EF1JPHyi>WakaH0SxR9^h>9a9rO=3w@#7DLATp0fSk!2! z2`MRH>IyZ*B71qHiAv$H+4c3&8b5P5fEI1d-L!uo$wk!C-7RfxWo6ykU_WP{XUOJG z!YC5H#)TNgahQYSFb9prHf-r`cTW$NoyOW`N8^cO7oiKNwA$9P)2GRhRkb3^v)9FF zEs7&HqNw(-TQ^itoB4LoEIMUWYNOwUEioY3(k8(E^{ zy%u9XA+&G`8er7Kv*auH!7#oXfnxIxl-cpvlwSZ0j4080>`7# zgp8dyA!F)-dC&~N(x{7SLn2&T+pLl3Fs*igW`=r6ieCCptky=(%E6n4AJ38><>W^M zC(e#*ugR?FKU^w$@T$=ci?K)a;2F_YT$6{dMr=4K#)QN~&7`GSY{!#!X0a7`e*Wi5 z-X~shiP-Xc@rr(8ThAY~Z4uiV#I`fUwim^=^?%TI$mf4`+aaI7><`-V;&tbX*Hwzw ztroBA`h&KEV%r&F+a|GXt=Lxo2W_u;-J$Obmj3_Q&vNm)_g!^eFffn-_WOgjbzsShKeo2#XG)1yyM@ldN=IP9b69@&%E>g&5R+XV2y$FrQn}#>=eXK95G%ou6Odj^&#~ zqMcrMSA7wX$Ed1pjX}=BlG&r8g0MFP$EAc52YXmlLIP8>9eBKhuJDw(&!nSd6b|K6 z(?bdBFnO2^|{e*Qx_7HrBr;HS2zRXRn0UahxSzb^95o{Pvoaw3Go!pu`ue==9p z$@#gcXvA8Rky%oQS~pmWGQ*1`U;FbS;OI$Lc=A3?V_H8>hbB~wD|?bU)ksc+|#OPp#m{@2C8K- zFq^mXv-#=5F$bu%hxryfpI|>fcI?>aX+UZ|tZZ@o0zG%1qopMc|Mf6%n@eyhrHXaB zNL0h^WK~Me0M%+^8Jl=FbmM35PGrkAy)9hI^A*#F(wX5H!kXyoK3Ll(h9-Pvs@wC(vEv&x9qcQkXEiIyUFcYJnhS6Vw(I+^d zsTlo;s(J^c7_$lGxeJO2u^Y^0D1cQ}n6f`}T%fDwR8G#t29m4c9BjCllXI#D^*TyQ zta@!kf*9@jrAfPPYb885nM3pCasEZwI;|F7jE|rqvjdm5a>}j0kh%i@{PCBq9J}?> z0L!kGy6P{(mQEKk``1ezAL&X%p`wk4)nQ;rW+km;ca z9nVRoN6Oxa2n+B>kH^HsjEo5z8yf?wcDt3{=^&ND@txf5sa1HUR(+;@+ttGt7`0a zTX9KAaY-r4xK)&w@68H!)>Y@9%sE{MKWS@8>tN$~SS}~d*Eu=qU~65wlL}*;GE6g5 zY|@7B)+3hrt8Y9=NHLc3sgKPa@;!V4we_6<#vjhGpfS_ON9e*vX-%Vv#x7@BW0*`y zqujf8M5wG1CpcDm^l0fZ{8A}11}QoJ0k>2Z9)uB$TX@3_H;iH}3cV8eTyIw zS-GUOljc}QMXxqv2r*Bm0w^R&9mdaRXuAsSa3nQ1N)%EvwK#(+q9@anL9KS9b;9`q z!nRV3G=y>_a5h(7JhprH?!$)<|NPy@VCa7LqmR~q`57Q*AAj}z;nPQc*}wD0-7pNE z#)W-e%GK-Sl+M2XK`%@qp8Ssar~rc03l-k&@`3%|ENw1F{i`OcUabRcS%+G0q2ZAb zlAr*Uc?3!fMg$v(6sdIJD<){J&=F%t2dN_hK`k~KR~hK>qRh0}Ad#^+wSw-Ai0Gy9 zi;`8Mu(}k+uD53ZQwepfGWkr}Tzn1B3aEF7(1~8LI?LZNa?tKJpaWND(Z#|S!ru(8 z_@BO&-&*&GttW`BCyTAu54UdngVy=iYF#kgTJs03tqlpmmGb`f0Is z^}qEwd!_a7Q6r@GC8=GU#lVNUm*6v!iM-&ooe0dDEQww0JG`SIB6AJg%kR!Xgyvf3 z5aouPg9y(5J_i{zxPZ0Hou0}q-2}{)1)0Iz=~!cmhj(JmH*Nc zWI-f%KvqO@2V_YkcOb5bct=AP#i2X+A8Sjjc>S5;_0JDqzY~qc6_t3L|GgJ~Cs(9B zxjs1oxxRP?a(!}&q3dTru880AF33a%=2-^O5-KaE(T^gZFm8p+D}X=fc-+d>h);PkWCO4emt! ze13_qlh;G*zFs(9$x21oj*}LA`uR53&78XBr!}9vXSG`2`Q#sGx;W`Qfv3LzSD{t$ zY!XPvt6F_sbK#@>Cnz1^@}|B~y#DNcbLY;z{=TvEp;cd^Y}6oVt`)RG2qIT<9AlU? z4}5%|!6mA`;V+A3rQLYrjnii>e>nrx0SFs0FD*(5utwL`BGig(^2!DWVaUWc2LZXw zZbP{^?B31wM7fjl5*DT*r-oNe@T}p7oZ(od9E#NH=)geG5o)wr4XUp~3pNCj^jvJt zs2T@FC@hFd_@h$@I@`k-jVCc0t1%i6VKg4bXiP!f5NH#&y2iH7&PHM`+w6^JebeLg$OyUk83w4?XnIO|i0?>`yKJA%gq4=;E1z{?M4{u<&%?tu|3>P0MHA@#M45z4XGf&%W^N zqxk&t$-y1FaIx$L?bO#ZM8%F;Ey+OPIZiDxjEyb)YD>AFzur&61^&AJlMg@qU>g!N zk3-)o>G!!jFfHDs-em8OabA8uoqv=Z>}OSyNZ3kmT0pZ(2!m}ECHd%P0wuFmm0Y@b z?lSJZ0VzQqb0A1AyZX>&P9$-YY}}?6%EqmgqGmC)F$<#7k+f{Eva-AnnwT10!J;An z*u&MpwyEVPmMv#3Q&15F|Eu$3y(rbDBEOEB6s6L}jT5WUtxq0r$XB1TOZ zjl{Ot1aJW2GN@|c@|j15s0sUhSah^ef_nj9u?I}Gtarczg%OXwUblo5qn6i0h89AG zZjWN{D#4ScVr|GyKxne0-j2B(7Lm|4hZcsBe!TfCPYC>^)i=Wr)lwa@x?8%I$iWgEMut?#*$dPeq>CjpTHRGhfNTZSYluA@PLbc7` zP6V4u>E{o$jW{l(nm1Xk)tK8uRef2(2xH2MY_&`-T>ebShEc`T3ubCTDurZ*Imjh7 zGxewwOu5WVP>9Y*)r-}Mf}+IA;Ch_P5GBDm5&tAc91;)U-y?~%QrJB>uQVr*oLRcG zgq&N_iL*=ov0&)@bQm_3myl*T)C7=DTr7+3beW|d5RT}K68NXgBDP?7w^I$>ze>=^ zS}LeovdfGgiyBCHZR-XHBZ)<3F~)XMusDl}bd!=ORLzhTK`;ta&=0raJft>~fKech z2F$arZlnV@basM~969*_WKcz%23EGaztvWeljCyHG$+U3xw#cBE=#DvFd{Z(X6o$u zn>T;{d1T}}@BAY<`NF}2U%_iAm#Hdqk*&MTOog5kH`1^$yva`2XI9b1|uc@ry zlt{t~mJ+(HEnT1l-HZwyPo|@a!n@R$u<3UJ2{0Fbtpne|2cQt%Nxg)91Ao(f1SBB* z%9zbBJx^AWP~2@(Lw$WiJ%Za^jg37$_|Xmk37A6VJj%)V8KPnVI+dYJANkwmiZ#jr zv+#OR^W5!rksu6eV#x6WgUC=4d`H;(o!4`qYvThhvdY znPMDY#n1Ph$KGt`$CGy?4hP7Z@5eTcnoFcs@$uurll(e!CV5(^B0}PPi8>~k)!)Ae zAg=;+poa3$w^IYW55MCbNq_&$RKZhn9;Y#KsMjkz$*%_vkf$}+PwHp%SEIg5pav`& zYT#tlYX2ZA8VzVpsKiRj72{V@S=Q^d1}hjvpvlhS{0iCG-)ryyKVOxP!il!NC)iNZInn zBXjC&YJ;pWze19j@k9nZEab3Ao_Xd~g5$%%Jhd3vY;gM}-1F?SucQ;YE$Tzn%JFJ| z(y* zReXPKZD|Skk}hdz7DMPzJLHV5BG9+ zXJ<>h7d6cZKHrkefZ&?(*M+MIlg1$L@6Qh)0L}O#l26jJRB9DA0aOXsVi!=!DJ_@8 zvK>X|&h--A4hEA%g4xP=oY+)ogTWsap524}z0FSE)sHj)W@!I4VQr3pe8fXOLLna^ zQ8cBH!{tU?Y>eF2Tx}~lSGYS%)<)$N7w1r#A5&7MeNWKopO5(wb^Nfzlu#ugEFQy; zS)EF4N9tPHeYv@r7?@4)ovEnpB!P`f^R@fhEopQF9kE)2;-jU|44a#~;NC|G5W2F` zLPtlhmZN$g9yB8hGw4vsJuwSBTLc#+Xq&?;I&#~{mu#r!jb&T=C~gL?H8Emt zM5hgB#@|soDv=wq9wIRpV)0!QafS$d*DJNVOR5Vl71&56Y|tIba&oGwa&l@oM7XU| z@=Y(brXxQ*lfRxF_-?}n%(>@KYWv(d4&G=wGt-JZJ|c&k=w~XxS53f`pLAAH8juO8pz9pTLlgdA$TlcT8K@x!o|4BP&TO!zB@TSlKyG>~X*UI3_ z@8S%ZnD(|VuWvvaJ1zg{;c6;sR%WKqA*44z;hl8zYx7{md?y@Ru@PxAO zy3$wG8R#n&={NL}l{0J`H*TDn;4Z)!>_ zMU$tt4leiR=EkblUUcOdF2aO~8?5yC-1YS=JuryM*lLrBnms!PtmY<@l@18N&KL3A zP2*tVu-*`LbQG)}Qyef|=yt6MEOZF28IbuR@wt7n*1W z;lpe4Z}G`up{^fY0{M89tjT#;lP};Gs>ywm3461K^E&FnvUHggykm^YCyJH6C{n5I_W028b*cHT4hz zBLUxd62391sXCkq6F?vzZg8CRxH?)o++hF5byQI30VetV{Oj-k@WXAZ(if(tMme ze?K#EKRrEeH!9Ym(wCf&0RcPCVDp;&`zabAm3qAdej=6Wje02y-&-6<_2S=u&ebva z)}zQc<6B>;p^in^>%+S2?Ewo-Z%;Q&HK?6g3MlYB&9z_*<5)0?)%C76^!L~E`T(uY zw0ZOR@$27vXX}N_Borz`LCp8R-m=ZYzVJeGWAAECHrQB_hk7Gv)JAF}mZEfQeH*Fo z*vDqiryk?7G6aoq9X7v*qHS2FfsvB*m)5N|;7(v8A|AysgM$)=0nQq_aTfLznI z%b-*MN4-jZoFzXJx!=|e(mPjQuTZ>Rynsg#R*miC`Yq!1!mm_}Cp)zct;2A=kSGZi z9z~OteCVN&Q&RC)xxJ#a$W~b0EYERN3&<=*$44^`Z5Vaxh7YnN4@^E}O+&W%yL22t zaGUsCYByc{A|}$~^wD|N-<_qmuA$l1Qe)4eJrb)D8!j=^@IbWH5FDZl4~%1asTQgD z&1R$biItRrdmFZrA}Wz@XRDDZ`?2!Fjs}H)sZcn%u&}xso3eEs_MKVRfz z7aht&lP4c~OEo#NtDvCPt?oq?@pNi8IoOxt;#MGTO?GIq``|66xERCZ*WK-;UwP^I z=b!)UeVpvR`0jZ}kIs85q87Q3w(9+R&%mt){6T#ks(Ume#~#?t8jPpk0K6CyQg;m+ zw~Y4;075T$Dzbxs?0KLUAv~{3SDsF}9t_g2m1Tf#N3~1;|C4oy;eK z559UnwL_cwxClLVua=7Xj>_YBR00~@La$X^!2=Ue z#8N(upD7$^`2AtNO$0?8IdbGP5fstZaPZ@gKR$p!{xB$FnFxw-37`lN-2oJ#M-iaS z07WdlACIK~MQr|Lt>U^79nh&dMqGC*9ZFB5AEI9uj)(AjD0Lm4%`mdR7moc@Gd^=2 zass{}2?IF3kUVnTb!m7r6nL3sS3nUK8ImmsJ|Z296At#4AX(4P1Wzm+?9V;2pj(h; zuy*tn0I^S@BhFccug9m=1H&8O@9VKw!}npUBYOz!MdaK95y1+G2%Z%X5s0Viw7$+B z_<{JIPM=mMUprc%7N|khAQM%9l}e$~EA(m>JP#RI=>dcNk+d+uDd+(ufsnlnZq92-4sV$Omd!C&K zfBhH8Jbnj?5f0{ovB++*_`UKf5_^_Tj1CG^`Ey6nnh(rJoED7cfymGH2eB^jY5Yq52^F#%o!aCTzSUhV5hODMnu5p8aHOb z7(I21mD+2O#xBShof7sx_q77*1X((QDUy;)@5R0*@S^*H`xS5Jqe_#wq73ihWS9C) zyDg%Ql8=Q0yN}j8(YhMHP|ZZQ4WY*UTibtM%l44A&mxBfFNdgm$%_ptJ6^O@JYEO_ zBpI?a7P3UNM3T8aHj1W#N(nRp)3B6q5+mkOQB#ApdoLG=EE!0Q-G*351(q1Q9crUB zsLL%vX?C@hJ32dy&sI*KzH=uwRwgCIuaooZJP!!_!Q+$1C8exoAH;3|ZHd^aoBZy5 zFd0tNn2G5-canlg|I7mI4|Czdz&PMyE7HmHFtX0SpU2y3t1URzgP z4Y%KEqH-b^+>dy<5pnT)eZI9?4Fx+()WXT$sR<5*p+UHJ0TK^R@Pncyq)|mmWkWv8 z-}n4)+7oegqoV>4*Fn#3L(jiO{%#VLs$i(yV*km0w_?lCe==Mi%0FGPm#(7dQRQ?W zMeFWB{X4quG>ttsFaZu#q31)Q{(HsxA)6ddzM|^Fssh_CHphj7V@GWWdXFWgnm1)h z;AAvYxpSdT)KQ!HMt}w1$pX<9d74qXzNW?AdXA(y%TT*M#6QAd8cUK|!v0t7`U0rw z_?DH_`{Y~K;#&*y*9sRa-ZovyLm_^}WJt{{NKG=nbuw0<;ELLUix8Si1sE;V=5}DD z99VYtaB^(u9Vo=;G=s?pI=9p30SeI)ym{-_pL~kP*IS9bzqJW@JWX4XkHpE5l(+v} zUQw%60mVDJjKUnWFd@m;Bee6iaAYypCl495%47|q6q@7^>N!rnYSpUSQ%42R)*+9R zbjbuAWYrQ}=jiT%1a)@{=4*?M_}E(NFdO7#bsccgj8y^I;JaJl8?pokw6qpLYV%uL zh@Dz@3fv2)>H=VE$_YD>t)as$e7wKrGEk?YuXQ^5gk*v(LZ3`RpAdvvI95^jfUlzk z?%tLT&?9wq4I(qbOAQXdaUvLA*X|ppzkmJug<#3M1721DY=(~O`8()>f^@+yyMz4* zSv()5;UmkTRoqQXGQ|Ve>NySt4dZ8lZ~L90eexY<#Vo?q2+K?{D;#qxd{#Wh+B$5a z{nu_3z7J2q5+mjqTx3$I%nfXvTgIcXqve`?8II3N<+hq!aH{6k*hs_DgFpWGkQv zHq!q7ty@b<77JuhE5Pb`Gr!=mcy7F3z#n$tmC-~MlX&kB9dDgM9R_pH$OoZ|njz8~Jxeyzcqgd+tC_qp!XJ$sxhCsogCGKpZYYSUz;Gymw z1Wu0)BDJ$?&;}5$t>}y&)^{%!yOx%IC&%VctvxU-BhQ~NP|uq;PmNep0Z6tFUoIdk z{-HzT#@%=$3P6-_S-{(_%dYW9LiT|J2a3yVs~J+rel4Z`>t9X%SY!K5QmN^-p^yP` z&xRI%>53Ib7J?w~}ojIM@I+A}ow}gtVlc zs~>KDOdlW6EKF%{Z>gxC-+>;y8>`L}Sat3|58i=QC-KthoD-KSs;f&cpGOiOlpy?h z{@ev5=bt-z_|&DdIRLHaoQ1w#Onu3537Qef6KEel<)Lf3qtj4P=4O zLuuU2jaDe4;$3^b$T_Z+`R7XcmDC+g?6)2G|t-FbvjGp$SJcEW$oSG zu#^cSCrt*Hdh(=^<0nKjpkqi(oi;LQ@{}o4CMTH^5=>MOda1UQS@dAM6NN_QI*q%z z0r51NF8+MiKx6=d`(D~T=mO3jWzFMb;$uyL`iZE!JQ5*8d`W~5V^XHgm}v}8NE#Cp zHUj+9Bf_Pe1eH=alLyrvT3g!&pL&wiz8#sIX(B7>Gm~GvAD0|C}9XqyUA3A*&8{gSn zY8@PVIXM_kyPb;|PErtuSjbT5BpC`MN4LRg0Y}l6EhkQ#JXzVrk{YdDm8W)W*|KG4 zZe^`z(xgdhdu7pPj^lnTMEuag%*f51!9^e$L9n}BBo_fW;mAZ7OEMAeg3Z10ZHwTa zL8Vdcs#Q*BW*kYll*eVR8V=g2UR{+bKMG!?qw>txGEwQWMmUa3UZd`Wm3C<1!b5^_ zjZ|H!IFtefG>g=Oj?L*}>H$j<5AH(Zfn}&yV6-^_R_W|=GFo_SLV`3*U@*3(5ChZG z1)Zt8j~e7GBZ(#bo1M)Ii3dX30le2j)`8aGZy7D1n{_wdm@*{^DrC}>l(AEh?mBgB z%Jfw7n4Us1#pB`Bn71R-q(*reAT3mo2E)-4FTg1A-(RRBf@Dr2i z;$`^EE?;&yDuPM8q~a6!bJ&nzGij$q%1~+~4;ewQc5I}MY@0;p~sCIvrk+q z6#!IqjX<7&^Aq-k#pr4(DK3ZKuN)zm^ZEFtv;euyoCK>RaY~MyIa3lm=l0ugpB*e| z`*q8fAAUG)vm@vRX0bukpCax_)U6qzggfcy;zm9ZgzrhxuC8`Fypa`kja`&dNxksG z8rAaUNhS~Gu`glHy#(z~NG6_qashyJA7Fvv@XkizurN0yEm$D9E*Wf2FLRnR$ZEo} zRJiT4&pz8x!>Rp^a1LpF?e@GgXDGqDzh>moqc-3l0S&8!+s$S}p=7$VjuhmqEG+?K z2ChteS1ZFRlu}ggHAF^6>&=W(qhKw-s;n6q9-d+%RbV|*T>#2Mu2?ZWEGnE7NLGzW z1)T$w#PvZyFZkiApjMg@w4mUpRxK>EyoV)>^212!~ZTnzP^A zm~upW5?+ht>`JnH+Y{5 z9237_rpo1pvrVb=lSwt1kF1(yCVd>y{nR~^#wSm@`Kjg0pStVDn{K*k{se7MkX9Rt zEPvn<0)H5+Z7C@0$1(&(d7!MWB>UaB%{01Iu-D11zG7&NC%gLXWLLkEe|8Ppg-hp7 z96o&PTxog1$&;5a3%mO8;{I>k3nY;rGm)N3B!-r;dh+P`C^YNK1zWNeVWC1H4nqFlcgQX;Q6noBXo>I{rdc1 zWm{p+!M{HH(!;Yvw|#at_VeKcGqaq~F}(BkI`hy*&SrPYLYHTVTRhTaUcsJ!*F+A3oU~V`}?3X{=r2|X)T5AoVO%iP0ir( z@^V9TWa8w>lM|z(4JxIajgJct)K8c&bt?9Acvc4meufc*pTVFUY{N28>F@-N!1f*> zA8c!F?I!UBi5OS0V5Hx~NT0y`v0$WEW2EPl6(K-jE5Cg9Y)KjX=;zNBOVpiMvoD=L zec~7%ySMG#xP8~jBPWj^-M{ZhUTHqqUe2C5oqGz(>$&_g3i>rSlXVH)8^Uhh>4d7R z4Acy|N$nq(+bQ$&wBxtI0i+?~rM=tfCESEvU42fx=e34w{RL_VRAO=L173P-t!mnB zOQ2SdY&iD$T>kDMm;7A*kr5w#`pdx!r^^rQJ#w_kKc@Tm@#7~>96NjnDs8DdklN32 z0Xl=A6CfLLEZe>pHJg5};^L#1J@d>n%c5yd!KP0?gJb67uPG1iq~hY`K0vEr127Dw zGpenAcgK@Vs*dpK)1R3S=4z1=;v4uEKA|?r|8BzCLC=5o7HR|4MC-``jbR(gZGVN& z67T(YSZ6!%-Y3*rSvA?0#fqV5I=VhwIYDENfe4~&e@Oc_}0@HmGUGwko`%3k)`a^C@yn^2jNXCWA68V#4fEqocwI-I6gV zSip-}h4PYegnk--kZcA3bFP)JUgXTNa^E0m zr-tuA227Q(wizMu(NQclE{dT7A@P9}?WY_HZNlDD!g1y4>g;mVmoxC-VLx!W5DOFn zUQnfJMp@q0sIn0ZG{h&`TEtZpQcmq0|ap*m46Ze ztuC-vkZXd+muEG2y=sJRc ze-QI?ynv8&54uZjozNeh?e+n8D|Wl`a;Rsv3VTyKNh-A$m((<%vUyE)DRqF8x3*Td zww5(Ea5CLg5s7)? zhKb3MYSuE|4Wbjb+vk@15pG4l%$9tFuET5VIwt`gI;qBe+z9?ni;q&;;i%zQq@wLk#RP+QA0ojZ_sHkqyb5J zij)E{3xN`PUn%xqu=DayQ$JXBrNwoXHP!aUmWGC2FvkP6VMgqu%V`G7uCvFglPUb= zfR$_g+uDo@Mh{r7nUPBSeP#w-ZySnVJ%exbJiZZOVt5+g=xKbTX%{Yl>aMQva@oaG zhc9*v3;>Gi=iftJ0y6{r;3egN_&aJZT{;K16Q{b$SPnsX#i0B;giqfNBP+=Z-c&%V7i*yeWtxR2cgFeN8sO@5@oYRNhHn& z48cbDl)gGu)1(+X^Sab&aYh-Y7K{BP`IT2*d0?6;Qd)gJXV->rkJU8~N-PRE8xoV$ zC5KPKtzmk7MM?s*ABniz^Dngn5uH=uHqE~OiAOhV=3_E5u)Az02mAcum8q|%VPE@) z49e_^+0+D1PHq4N0vF6Dh*z*kY#iFs)~OaoI`eW4ZT@E4>C!GU+i_~sCJ-cvH3eSN z?yZoIpEKvi%y^xDj1m)IT3_G!^Oq}II_kTelCI0A3EX$j&*zVJB6Na!1C0(WYvq-l zoxK?1$mr&xjUWGvFi2TBiiR?*$HQ{TttU2ZI@pM=mMP?VolK&Z%NdGnhSHeS)Mzww z@l0n!4^OEg0!@ZIHaglPB%=Kc`(ov$n*Ag$1jq zo<_L!OMl(*Q-!U*8OuGdr21(eZ`ZF+Ox&|4 zFpxa4$Ye2%V-Sm=$86#hn|7$xJEC5CDGI)mq%~-_{55m#mIb=44@ke=*q==vKT`R zzzzEuj6p#fkd%v0D)yS675yW3p$~{p*or>53o^bEeUOAwRjtkTnmR{yo3qPS+uv4I zXDcqPvG=JAa2%oTv)vBpMh#-#KmSbq%E1P5c6wO3)Ew4$eA}4@tezS*1LFW3yQl%| zbh$CPec&I*ZCa85RhTv@HYP~Y)l!<9lQ!4bS+nnpueN9BOGb@17<$9NUU%ZeiE@_# zw4}fY#J#t6?UwUWIpHQ{jU#8=N*sh^B)ON*?fd3E3bc?v<-v@o;TVBSUR7IL(%xE; z-x;1T=PzmS@UCNz2Z2G705oo32=J2}62@3k3P1*2Xm$uhO;uTWS!sD`>HaKQ>gA9kJ8ni_uY8oEpryFfah_}oH1h(pI8k2@;rQ&C#g!R5Y#7!z*0j#-jbC{8!$!QetU78 zH4{U-4zmFV^YG%g-{$C=ZnHwFc>A5V-g;|yjn#;{rM0#i#OaX^Q*Ezz)PtF*;ba!= zX22~glQM45II_?`dCG{uOrTDW$R-%*`~!kuAsK^%o^BO{+F8pYIhH$@vkF2@!PYhyn=qWC=HfF zJiWO$m4CvLM*Jv}-Mjfr)SD}UvHU*8Ns>x-1D&RNa)M7T4+Mx#pQ4^|MEn>v-zK#Ieo5)_`u3jeRI#NG`@@2-GkFN0)@_9!vXZhA+fT^x@v(9<)D)$JLRYQC(w7Fc=mGW(giGGY z;2+>u^09m@y<^8h{5BpO*I~jDg5MwFACdrtl*-TM)PaFe0{1j1$KC~Ssa2-*SF)H_rsTK^DjPzcf{C&}<>nifAldYjmHx1#SJgk%y;h+sK|I1-R5 z!kAUsDv@SiUIOn^NjZvoRl~Usx*ZHC;C)yi2%3WDf_cy|QM`<-Y6=-m`P{^(H0Yu& zSXU^NE%jRbtNa4`e(etE>lqbmWWAR^8d?d0KDz8>^wFcbx{0up2SKV}#Qmz%)2k0h zM;~@R`g{hABLSwz-V#W)WfS^!a=gyG`8jBZS(S$B?Ke{{i+08&b#PpOI`}qq{QPB0 zUdY6%Fd!T!j4v!%k{PF7cJs0&Y4HF##-}a0V;MD<13*s}OgS7>q%{-vw3z&VJ!>uU zjeLGCXo|(-YibSM)C)4rexFQcrnHvPWUC~TwboXP#C%61yg7}I+Pc~rc;f47YcZ2+ zYHG2VP{%odo`cZ~=pz@|H3wZhB|+jl=_=mBkoPSVV;LO~0Qjd)rw`NxB2ORMV|aLo zG0+gGhpWZ_*FJdwUZDrb@Xwu5uo*Kjx+F?S&?v(^3Ne*XIY9E^S$@pE7pW1YQkpRa?34GWWv|yraEENY?R)1b9+Da zcTQTHon1?PZDyGE(;p5IL7NqRvJQKD&9+Svjy`qB8dD2pud1e|x(fPU86>v`{}3pv zYd~USL#@543S%P$+Z<1MEXZdkd$U=Nfg+?49y3Ygj0r^Yi&l>rq(aA5?oo za5yRj2L=G$0L172JcT&N{4ojv7==)bfr52jKT^n=fHz)hT%SsDCez5?)u{e_b?RyW!jZMj*Dvl|4evJn3e zU5IYf(4XR!hp~cwSQ&qZW=i9)_Z~^n{#qysEEEN@(vXL?wyLZexulM|QhT|*{1o^Pz-7)$;oE@SO;Mn> z?fePR$ju$^y;tv$!|GMkHwrtFQYPEFl{~3$IbRZ`+5HU zCse8@ECdfCWZ?@+3Q8Dy8jn`J66iUr#UJ z@9p>Y{M&zveM|DYNX#n@Ju?bDGYU$Ip$snj;^LZ`i+QL3S&X$4fcO0zLvo<3tVAxy zK+z0uO~aM~s%`AsfJo3%X*y5y0el$!!3UTu^uhD#e6a5fACIXshGX_7C-1e=i|$CJ zT4;@iqZe0NF$Co64|N8*gOpJUpf2)mq!^~73ocWIl80`1Hl3b9FQ9LyfBmbODof{| z;O~d&N*_>xuOF`Hrl#uZu6B~pjy0^k9Udr#q&-<#pVJ9hq`kD&5Cqc7&`^K>=n-My z?83?fBP z?uV3(|MOPd_U(52_U(V(l9T(SQeSmLL$zi6pSR`)2Mq?;gMZ$llV9K0cRl~(kG$po zX4R>PRTn8d|Npk^o3KKk<(o?HUAb988E^q;o!==^++ zLD`ExZRyGFZnqs}8;CA|l zTx*m+<>N=W+k5o#_|8~=ixvyrxuWW`W_hzMnlS94^Rv)P^XHD~6 z;HNRDnNgO;&-Pug8sKddA5BhgUw3csU}IYg3aobECc9hQFozu7*0uiKu7EIw${#j^ zdeDP#fbhSC#l`vfUvcIdND75NE(b_WAhLbNN?bb*&NtGJedD+ke?OAMgAXVHgoyox)F&e!l}Q)D5Zp4fL*E zY5WaWUSE-ZhR*rnt9g7lERj&*SkAv9*>b5o1N^X4u?+qVy92{1UVQPj=Wd9INC-BM zVnfD-$Rroe_)l5-!pfEP^;+#`pLtLRvA@@C9N}s^kd>8nfKVBDEw@kww1%EXFQ=E# zH^b+*9=m7>43ri+kd7w@l3se*#{gSc4uBofn_hcOSp4vsj*O@RK!sj=<+X*-j}L%8 zb~-&rICS`rjr4tg&7ddHadZN$$x5X^z;X;bM)r>*%^e*fVP?NR1Z6ymSVdjM52^fL zcRR| z1^Ae&sSgl{)GouK01%e zf^*?bz?=kFc@|KtZ!V;tq!&UuC_*F*DOA(9(N9_y`Y!pp`5-=$55^I}D}A+C3VzoM zT}=q1u>+kbe$?S~HiEIlUeqA+Mw4F9tGxEpZQ6+F6pOOkPZN;jm3Vu*t)z_0&_l21 zd97}svmc5n3{{z44GsSO?Hz=stDy(;onj=qpg@Jk*+3$|W%l+qC!sE7&CEbMn7I<8Y_=#6 z>8PkaC#)#S4U|Q%5Bc+IS2v}RLI-kskf;b}s#4~$Bt=FR7PbvS5W_U?KHA%qclO@D zJ-9Hq_uSX0ee|zWZ2@zZFCSk}0Pd{PCaRNT$k~x|p`e^Dv%Iy!LDSN4cc%|o&C5cl z<}K->)vWxXZ%kt1&Yfc-)mA+~5K~o7QlPU*WgIzu{`@&yFS-znvsFQ+er@g zm1O_69|S0ra&3D%&>z!Glz#Y{NGFGRD0BB968P)8nQ)KXEF8~Cer4wn9}!@lRJQWv zP}r7BmW(p8)}+$XpMTD~OxWW}&!7A?=jfrs*_*fSJ8`NMu2V;SRbKw7ef#zvJb3iz z(bIX<*Ic44D$0g(qkWW$R2%g&z!IqG0eJseE!E@2_$pZhvEfJ^@F*-v2M%;~^>=~; zL(<<=*U&ZSaaWd9HFdRnlt310RkXLQs;W&Wy~Il>hGUMVrXGbeazLWfm`zBJhqJA} zM>x5s-9=7LdHecU);Z`I+E-Q%&DUoj4J#oHBzBaHFC#oWZ7@4+HEkY>g_{?J%vG$% zL1>~&P;kUgrO`q37#^G1+Bj5+Qx8dltFNbLfL1^n+&z>=25A`Z$y6#O!+R`==gzr2 zNxf;^}OfH!V0x^&OIcP$L-sK|yqY%lK!TX^Ta_gpG$Gea0w6RHthof?Ee+gNCW zyKM}XRMCTS|7fbK%e^(5zOVpSUE0p>XCi0XHRzVtSJkrRtCChJ4Lz%nRq~GOJ z`U!yM3c?_LZM?tl(znETdro%i-H$&w6*ll2>_8u-R+*&(^{035{&X%lL{x>gTK$fE}V ztZwV*uvM28S2gk)$bpJ#ww0AvR<=3vato+de1m?RSZNM(qiB=cGbjoG<&@w|sADgc z+KZdSDEtbH{wk~m&tmjfVDtq{UQ0`0;GZ7((Iia$CkH=4p*VW<&kp?1D)9ekP@K_yM3h)D-Hv)4eS<^_4l>va=^l$j<&@ z=h4fRMW^%b!cq|0RY{hD?8>gNMR%{dJFl!qChZy&7L36zsjR0AMT|mgR^ADPZq=Qc zUww7)Vp&-gNTkk{QVgANWqG)76jgX-dDv6};_ukxF!Ru2B5BO8v`|LXg}h@YyT&Y7 zv}p6@MT_RmOV%?IO>8<+&hFx$XTL3I341!7+DZ=g>9Cgke-WtoGJbhrl-}Fj)>20< zT3FlC-iK_A4iB^gZS=LhMi-k8=bjE%| zandDI`+EmG!wy;a_$8K@S|ID{tLp3Fh6Mtvy0)dIwgLsO5t0);wKewI!$!vI;SoiX zu@VYu`rx2Xg38a}7Ke6-oDDO&Iz}pyU;=@dR4OC-JfVPrM~DRKa1~q$QLjI78c|-f zke39=%V@|;aFm4762BkC;t~>x%x1gPSX{Hu%;0KB%LUh(@E9>-)27cp8$B9fV1;eo zJX=|6YS~MuRrnG|y<`DGZpib66O0>|GA^YEDKQ_?hnGr`pKSBx@URSWGMz4}S?BUIGgT;gD)?+J;vQWTONV1Uy9k*+i`{IF(}5MazOM8l zh+A`yodQrBd-bIg$9_G1_T0JL6DN+JI9~(}yej|KL#NK>{(3sU4E}Cg)foUr;9^Dj zXa-}t?fVU1ehhN_PyY4w@iW`j|M0`NUwj62^OH|DY(h|d{kNNNw@-0gcps>p0PU5so0Ieg&251^^rcA^k4%nUfs$nlcwgF{Gn`2MY%;JUk?p9kxX9GU#> zpe1|~L8M9iXhey|^DoQReGl~Ocb55(ZsNGGnqzyuK;ZKq?|+Kk z*zcX#7PjO`=;u%W{RJ5phD}?glXg#x3jxi6R;W&%cYG@Xl?OE(Q+o7-9XaTKTzfHS z4hWKpT7su9y7#XSEt@x50;p7bE1dx>g8Y%ly2|r`D_&ZFIP2qh?w~4$j?Ma~9=`LrKwLqCpF28~Rak45R;Ctf2AmpJ_HNse=r9{39N zOF(1vBLAGEsShRjyaVlxae9ea!z#5P;2V)J7KsTH7juLG@E!V&k=*~oa*=aRfjZ4r zR?*M}o=cP&X(uw?(COjb1AXv0fH>OI(+tmmy^~h*fH@4b*CXgkeKiu~H_c>>)kd0%{KXqg`>ci|h2!0kVeSGr%{TUhQW4bTy+rNK*YU=*|yY}w;;@Orth{qO!EKtjjP73D)?7+CZN;u|%l%^daEsTky`;c>PK!?6Mt`jJ)$bG-iR; z2u(}b36t+nrM}Ig0&#Vah8BUf5(HnM_}U?#U}5gQ`_?!!J9gHr>mtk&*+3B|x$wj4 zp-)h`mhh&AItTnGHFb^6w&s?WCX`!h#(@xJ(^)XpP(GK-D?xn`AW<2KS?a?ZJ}Nvk z*kFVsL~jTQ(}jhHhlYlShv7gNGBokOwi5P~85p~z(2?)Oo-&*b6k)rB-Qj#uQ6ZR| zi*4mt%r9JmOCuNT;Ag=x#33n@#wHBn0GSymFU$?_ALE_XRD34FRZ+Z>91AS+h2so;hCWSa({EtWdWQoW zRF%VMbZi^yVza>1E6|Nf7e#nH5hGVe*gCOJAZH5Yz?zzD4n$YFJK?$j58GfLl8-qh z`L3vSjjw42_b$FP=%^vwt0Qz#Bk@2wP(%c>gpdn5A|?vJd;kMR#ZlA#M@w-(H=*yR zVBXBfyfI-vH&L`!gBb($RG21E3N#4)R+kqOPVu5*+ny{Xh&VAn@DbWfg|7=V7-`$Z z{a+xOYcLoo+r_<~u3NWuEg&FYbDY9|%6$NcKQPUoBNRgTN|au|7ICm_SyOY1WX#;# zNVNWI#8|$OAzjXuICps_9KXc+14v6!UyqMkahj&X?`36Wy??ZcQ`FYMGuY8wSG!h& zXl>2^kGl7OkE%@Dho5uiOwD9UGMV&b5>g?A&;b$|4q6-Ceuxs;|1NJ0}ys z!e3X#)wLqhMVf`)0wDwl2_d~sdheOZB$@KRo--LB%Bt+X-}n2znanwpGv_?L-uHE1 zcYQNul5c4STNV694nkcHi4AZtP{V5dU^e9w5EQ5xJ>oZa&c(>NhjmSioCBkTP45TL zW3svAO`Mp;h&+p54Glg$c=%M3@Z94lu~$%HNq8=aF#dY?G7BXFAuq-kVWdIP=_@J# z)+q*7$6QrXTwPUGf>Ecg{(v5R2FoY_z2OK4l3@`eh=BV4~;7EO=m* zU>_~6s1!|FaOXUdhG~8;K5ImGFL?ibs%tc%OxEyBR22QB3gAVf|AYw!Ijc{Eap>eR zQBi6$2EgX9(bN9mrkxh1Y4V0iUt3$>U~U4y$6VdmR8ZjE*2WoPvmS_znGSjbmU zc20*&kgpOwP_cSV0H8e@t$%=8>*tRlM(Gvz@9r1I5(R39@}4O$mJIWr5z*)Xh}4XP zGl}ZQQB!`BY!XeDf#AqUHkd?(C!kw2Lai@4U?yXQZ|)XC_wE$+`xk^oB{x)#Co3so z7-;h9x-idrv#eH)sn=3hUD!~KFQm7juy9DGfXJ@XQV1#}QF&BUxyf6hbSdOcD{C9_ z@{>4uIpk8^N*x|&u~HeZQ6P+ItP)84Nt5~tj)2xB6E3(fX5h>5dcRFUnmKdkj1Sao z+LW~5w&dhwg>4h5;n#X=0=0o07-Kr?YP&nEyziPdYYujLB}|_Vqw2Hua=*J@d+jxo zKXpyDb7y4I9gjZx=|JU3o{&o;`6HQ0KO$s+Q70d@0T@ZE49kaIp=>E$3UH+z~~Umq(ert$mEu#>rWU z9FDi7JS~(!)`|Y%OqGbFG75$PI6{5(M2)3o7agNPha2>Yz9szI1wlNyNw5 ziJ(D)0J0SUXm0ZE>0)}ihD2hu%+fjt2MY*OT3=9rEeT|38l9FJsk-%GD^TZuL7jVe zv8n!4S6pnfnawOrdEMvVUjHBbrmCNol+@6`afc3h&Nt8u$RwT;wTdPYFb{)$IPJGJ zi%|&)qenwgq4J#n%O>){jWv;JO-*Odn$5?KdCoV`Tt4_sEk#sUr`Pl1#6;mdCNc2_ zTFnQ)t--iQiFs%-^Uz|JU_MxYQDPoiOvJFHAEPRq2HN}DI(lK_EhoJl&E;T9C~qd^ zJY02(Wn|ESvBEJ(z?g5f4gq6`+r&U%T1^qfT~wg&)meOUBlehI9WQR7?91|&;xn6J zTel^h#3h+GN)w+#9(`uOMTM zAON{g+|<#>VFMwyc60-)A{y%MXsyP4Q{9S;g+m&`kpV0Et95{gr7%#?DoJlA7C0yd zrB-E%=?P{~of zoV&OBqU-}v_JJ6ohphsMBqJj)uPis0C!1kJWVL$r13vRt*I%WuHk>daN+3mk9b%AA zi8O5vX1Bdk=yk#ONFL&0=yb2^UdE(OgFRtKM{8Ht36R94iU2H)pqF}jxkDjlMd zAI}uep}?yQo`7nJ{6a&66zCHmTjRX?D`E1J7Lk~!w)RuZTwvVfRMKnEtY zP8&z{0iCtE7m7-2;T+Xm2@MO=k0}SRx&do1Dd8vC?S6BXFQ4v3(<<^-N>Lt|VTPP`^ z+9d8bP_1<;TvGU`u+Vb!cC=d@Y1~+~`CJaAH2Bj0^*bl2@<4u;=^O)Y4NW8VSq<88 zDX>lXkbEpTpIIy(Gyk=@$_ce>WgCJm)uU@!}^bf7_SBdyVS; zsk+MJ$B#oi8{BbmPa1HD!uxOB7Y7e!x0}3rD$Sj+b@SHwt3+v2is6;#RCb=ZzNEYw zmha6q*pU|&LE{3hiiWPb-0U-1<*m(q-T1AtprD5CmfFq0(@f&H*5jKto#}(ArOeUW zHvnlLMi#L`#SLIQk;KfH5wApRvpH4F0DO6ne%PwPm64^mStfFbq>ipO3Iph~!-qn@ zr41t0wgHiUuv#gSibWDxKh_j5OBig&xvdvu{d|OEpt7>Qo7Qw!Q#%zdB05&8lA(Qw z)sf@Ih00RBSf3bgi9+V(Y;U#z9!Xg&N1=UDcK0N-uTf}U5e9~6j~oG<4UB3s&}j7c z3y3;q+5}z}6%{vO!Zb2<(i*A3Fsr#)_t8fJ@=xlGPO)|^`G{x6{RmMOq-v5qKmIt5 zsPor|%geVdSz=ESiXZSacp0?Y1+=4){Do%@E(Ndx2ssIS933RB32U@%ZBCs|(bk5} z2h_a3KMCR)wVu)<1?%-{qSdW|{WL(Em^g5hCk?g)z<#cii-fRP32+3%xow>(BK&UT zjINR?BAjZt;vC%-mvTV@)$ZVA=-3)Rz4=(?g)Kcj+YjtJ-!4~BLnQ!VNF`k0YT|FY zDTZSETrxF;2SGu#TmHd&OQl6c4r)!V)2gIQTHM*6A{eOJ2arf8%#GLrvR|ml%i{%|^ykrx4!Y`sM@xk zo_=(In|(`;o|pssg>}jom)vaV0z{%O?v_Q*!IFW=**k|0hoXmGeDR4}up#{xCX!o# z`NKXH?|0Z0AS(x1?c~LaA9{7znX2sRn}Qu@Hf`F3ebVcgo99qVA+L)U+aS;K4~>lS zm!`$#UoI*uH&?=Scu6xrOYM~xDvzEhYLAF$Y46CoP+ZpwcV^Cxa$o{^->%}bXG{B~ zQYmBWZlH94-JFW|9s;7Ng2KleTYCmvX>qOnKH&krTB)t0p&HBC;`{hXaF=%hugSX(Yp!jpmtwfjQ9K8x<*ZO786hse|1; zyVCs+G=(Oom zCqODWVFLEeMp$j6{+35loFLEn?nk;`O;ta+aDu)8+!rljv*tejKN(x+ zKm@^a_dNOJBY&sJ&J7Uo{0r7y^qs~&Kz`2~g}O|=;rdSa{`gUx`Ja3!GUUG z?Epb!18`{4_;}rDr*q+)ITPFW?LA$IHil89k2eNj0>0t;e-`z>3iZDp_5bYfoH)7k z@DEuKeHWa+SOx`oA%Ij3MVE_xGXgmW0`0=HpcTy)~xy9+Y={FoW4{63Wc?6zs#(#8K6{*mYTD^Oj~R6S`g7q z*SZx0VdH-{7Yl<;@O6H!&@q1T9k2|eBp_R`1o|A(*YC(-$Veba!TSxbhv!E3fSIAw zF!R2@&%%Vq(IT zLlQsetNf()p4!r)(#qOSsB$ODWF4jT3SSK+aAJJJ!zI{06KhUR75EN-BawqhT&-3q z3u^XA#;zH2kU~7mEL*#J&m7ANDbq07+&>aJ4j)l8?)E#$(g&jV;4y!FA`}bF0+lO~TCGp4O=#*9P~izj*ktS19$@^C*t=sjXb zlZChlk`;PUn6K9&C99E=uaJ_pNXcrXWOi94g$_1b!AFL(xzgRfPaHUK@*>(f+`9`# zc~s1Yg_{@i>*mr~QeuI4wlxDnA4D6E=22hqB+mJJLEC>y|EmR3F0YMpg;e|(LMzVQO ztzH&I&V2NRq@>{9!b9tgM&tTJg}ot3rrCvgNY{m&{KCScP$XQ;D-<}x+v$LWN~wU$ z30g_{mr*fzUB;Qd;2Sm{x}c&$l+ri+Y&36Zf*^%*aVp}XhH5b9^ybiVS!^Z5GORi= zTtVy%J^_Z2fpwU`acn=v-F~Ob;S!r>yR0^J5WC3F&xX4!R+pCoD@A!r%MkU$#Yx?! zM6gwdqF)Smw7>-mHly%zj**ga!o&z=F<>KN#?H84=nahESZV|*b-@FN1gI?nwnuD@ z1@|#%EFxeK1tm2B|;+m#L z(9TdjXLVJvjkKdU;5rJPbE)IpG8khNVO%R39?gfKi|hjOj0I;lynP zRh$YcXt9_@33&r@ji12mBQ}L(SGvODK6z&^bfBn5wQ`_)NR5^(u1e3?{mxwXAny0! zvBIJz&ez*fmz|OE{U>0R`5L_8JU^a+g=;2coJZnbd+nLKArhTA9yCb~!gSYlsb(&S zCRtgfHpAqVt6&MV;^t^Yb(V>n8Bm*@M}^#<>i#klTiVlS^Dm_z{{9$RMuR5i4zNXf z{84@2{qmBDA>FxF`a66!i8-T3EB^y6JLIl-EdwW+?CpOdS zs32=wr(I^T4A_VIyE;J~)z#5$v06-%z12SO;wo3gg@9H$%sX`U?a85Ng)FBaf;&B_ zFk;iOUaF8vydVbjl3?JT$Z^nJ4>_?7I)}B&8H%uMACe5%T=pSWApir`(Vz5a5B~;gNzk%4{9Peo(0MYHdE{8z+dN{PhDVNm!tBC_Zzo1^|Wh{;B=RwrcKzalD& zwkU_ne}lM8WVgZT%5RaGMWCi4P*VvQ2O>~Yqfk?t>gwj4GWdN`1}WRB^7FweWK_Is z+EOd0^S{lq2F9k6o~jO|QU+swS6UkE|BQ;KUtG52cfZ5_qN;w45>n0c2hRXL1nyF7 zD9}rcWGQc)4-a`ayT-T@T+>|BIOq!MtZOt?RYl$P@WlN6mhbbc+uA@pXH>lKw`HOI z)RUpiYmLTcYqNC1tDD}@-qylkrwp2+z`)V)C`{KFzz`41dpxb~apYwY@^TyU64h04 zxwN(*`)omTH*m7OVC~tlqpogfC_n$$u>!1<8=Bi;x~8|=LvCBXJXwod#KGa?C&tCa zjnZOuA#aTzH##xFpyTYOsG1g0(5QFU{wuwTjfjtrx1Gt5O8@vrfB#*(-hA_|4`9vx z9W5(fE0x*pHQ!>#`9s@4)a`H#(E`0Cb|}2;aS)n0*+C9wauZ12Sx8*ICZB6o-9Ap0 z3yS$;?ClKmQFH-D7Y7)DP@G$>Q-R|V!8?Unx>rCw6yXtF;3o)T*S`m~?ntS@h$)aA?gVCV2p_Gv- zez@gDptjUUj!c?%{J2a8*>tJ-494*@(JY#OEDap}%A9%`<1t zoHJ`ms06zeumCAvTo5NM#Tu_f95oMtziU$DUAl<4+AQ0@`9IjJg}k{Z`+PwO)>ip> zm*Iu`(xv=@%egr?XJ3RlFU0dA=RoK!5Az|LHy@FUm&L6OYMi?Q_dG+_nO+-L1) zy-aa^ja?X&TB_@PCfv3#(NReGBg(4VTk8w4r!TB;?S%hfsl5~Bf>npF?~t|D%!hP# zwpAdi!U}Vr2s#!Q_QkbOSk!g(^tt52yAN8k)N9cnQM31YYIcBC(mvAF*Y5-BKkwc? z*l?QbItTj%7gc?OF3@!AmDJx7Vq=CZUGCUADWI^Tt;5QI&CrS1fHH;enVvqU)D+iQ zo`tGxo}B9MumIIB^$$Zu`;yMGJ-`p{DeDx`^?Cw}9PlDW89bhHtPSxYmoGQM$)H&4 zuf*K*(o6S8E3IcXZ{D0~)5PBYl8Kpm+XCS7+$-?iAm=!%kzcxX;W)O|4F9qh8UtqB z3mLA!*W~ruy)z6pKre0KLai!g?Io3gyz;y6j-5urj;Eii8H8~xI6{=`4j0>EAWx1R zEI+yqG)Ja5(8i$-_wO3C>7#>s@{4oM(FT>5dp;X@%be`o%lSCNy(p>_Ja-~#-GPmK z$b%0qoB)Y-xJD&|cUy}~2_o%?hzNhBLhiDd;+ie(D4t%s?ff?%e6-7q2T{ZJt{%{v z`+)tlr=uEc(Q2Ve?e_LIvBKSb{6?YsjEOXO(_Iq-pXZ_ANDtA#YNOx1Dp`OUrMkJL zs8On$OF>_ausf^})?=EobJ)U)2JMUrhFdDe-cPHxr60gos+*IJ)Xmue@#p~KbdH9z@CT4t=TIl6q@*OO9j)hn_~D0htq29n5XlJb+_p**NwsrJBT419 zH%vbE%ex^j{5H=XL#K?}qU|MEHwwCU?$Z*x;i0)yBM)~|4;g4^bob{1_W(q%EczU9 zVcata2gK zC7nh|`lynwvyUh>DpLRu!PtRt6w2&A%OD#DvYzAV^;86c^#%-AuF=scHKeNt8=;;q zqEQa(==@O!VWcAg$fVCHa7;|W-uCBWqw7TKQ+Zwl?&O0b*9~l25Bcm2AnuW_o z=ACy;upx9S>%6yZTd?587hx}U^k^piRVV|~6`dXYP}2Sm*q%&QjMS}h>+QUwqb(L6 z9~g0Ydpm^7S9N#S(H9XoX|eHXY>#lP7LIXvERC(C$A7D5BCJ}tH&i&D7mo45bF{dB z>A4``-tofmsBm-%&kg>{b9&+4(ZVrHI0gvMNq*(I2;ttz!tu0l^b($LegEwBDAe&c~__GH}rAO zuH%>cdGvArDdykM*FF1=U+nODYxAJ`hB`fvc24fM_xxy!$`@O-U+w*#y~i()03qTK zFSt1WwNb#c_4vi1z};`6QD5<>FUqqQjrxKjmQatg)W>dhJ7`nF62hF_P*u5D);

    Bd zVhl(~kj})&kdO#H{;wJiLwuc_Cle#GN(*g2J}2-wYjR7UX*to-;gItyzwrKO=v z)5tV@TJaH5w;cB_$6de2z02_wMJZybCA{LIvB;tu*ZA0&xQK}8IM4N%Sb8x6GVz!g z)0k3hL+YNa@?$V3*-gTFuoeH}PtMcx;YuYr^n9gIzzB{;NdN6d!Pd>5Y-oE;(hEA8u z){wdT);s6T^Ysml3RCUe@bSm<0h%|B`TX+_VV3spu>trJj|QG}T9P&|By!*wy$HBN zo;HbIj1KwybNCvUwPV$EYi-cPFeqF90>bNe_a#jX6Sec|+KY{$2fy99)7RHomz%fv z&%zEx^zA+Od~1R+q3|lEK1Zvtk}WDJEvDj4&-GHeMJb}LiSzPIGJgyUA_(9aUItca z&;`r2{=6TygkXHK+nbv^A+q40&Z0(|rZLzzp(xw2S+Lo#VFcu(zuVT|*F`Vlx9)xl zG<0&djt;Td1kulxu={P$*TC(t^lI49hhxXffwxfz``_1fL9si@R7wjM&-4W=hRWfr z!Lor~0aHamSy=(3 zu9RNN?HIdq9R3t?<&bbnr)mse2AN5INi-V4*^<#70Go89R22N$&4Y z)m?T-K>2XgxQi}(iAV%rO3wby=4Nad96+Brp|RvR(-@sbfqEB-9M}{<-inv6PA;d~ zbrHzGb@0Q5m)cvdv)gqhK$Mt)o*ugd+)-D zr|~(4&%eLh3%`0klkwf+xm)^gMkAs&U3Wa9v)RwaZK(;jr%}SbpN&bL@e7DBVi`tNt6srpCw~WEPa~Pm{|Hes0ZEozi!L{LTv8%&j{ZQLd>?SF<W8JYz|F7q6 zy#D?l%OCYuaP8WV71|3gRKWW`tp;HPqpjN0pN6rUWVl@ly^1lhv_Ft06T+ga=AZP4 zpFMlEN5IaIrG4V+vp?w-KYR9S>xRM|JAXXe=!y3yJ>$Q5mad@bs`5&H-SJ*+<)r!F z#QMMfTA$%-bFjwhIaHO=F&BoYiC#1QGG;adQF$38_Pn< z)Qa_O#_5tGbM^64u(SgxrLYj{xa!){(sC>nYuek}8o_c}RMaz2R#nfoP*}0v?v!>_ zwu;n&1|36L!hvq^GK8?agdHrRo2ydj{;`1vmpHoEC5nq!uGoBE!W@Tf-^4l039IdDn0(yKmYm9FF#pZv-pjDU?y08ant^jzYp|88LzOy{Hp(2AlMrwJ8Z^bT4(8SQ8mh<~E zJG-#3hXb+3(-#N%=)ogf3+*<#=R{pz=|A z0eBFf2;L+YlpvJGmRfx$_y=m#L!~UlfDnt=H?oaHbMLxdJs-V4$<#&qoP( z3dp$i96O+i^mBAKn%i8|3&0Q&S&<|L8hS1K2x-1gO zrDJBNYjAjdt3@lPoVYr)@?lO~B0Zaxn|n5kd<@F=tgQ5Om~*^MJ`%%*+#gm^6d<}r zbP=v}0S#lHB@a*-C#dDtNc(!*+AMvf8n+A#tdYjW7z}Z-X);dl+jr>L>C>l=9ooNl@8133qj-rYdwJzvSTpo?p&q(=t#*?fVhW0w#!EI~ z)`o92$rnDa0OE?h%)OWFTEXN4Io&gdvd&8$nS$b-0^rn}!$2t*3|<32T1IWP6iH%DDX-S2m&Kh>(w%%y>zZTKOaE3+_L=h=+EcLR-W9) zEANxb!-4|<#|;P$^XR#RnHIUDBMoKwRTvO5!_INN1S4o-#Bh9wUpACoVxO}D}1S-diM)_z@oj!c@#7UIT$rHzbqB%x*UY*HtnORwxr%$D) zr=L2VnKhzxW+r{}r6xK1^9qqYKHd%)iYV6h6yQAU8~`e_C65ZF^XNR7xeC5)$Xi0` zh%78?5!is)PmkB8vbD!QrJ@@xEHmKi865>apP@^(!ri-!3l8S``v<{`FM@4wze#&O z@BF2U#VDoXi>IM2JWW33WnM>*dNnrMEM2LhE{mBg+&qnD-ptz2!Tf#twabb5yChieG(^6S_qPa zaie^?ODk$Snwy4vbYZw285|G{!)-$pwxm&pQ0R?Ap`sp}Gbw1OcS!3G(m}l1vyUIo zZpXe9?CE;hP@hc|l{2FCsj#;VRBC5IN)kC}KJeBYu!+1Lrjpwl%~?O}-o5+4#?LoE z=Ts*F9fqVX9s0&}(0BU|+^ec?>I)3hk9`0xo)-GTrq!CNmg+*M-!~&EX3D)Fq09g7 zy}4kg#9Q>L6KY1Wp#(N9ke%nxi z@MTJNNgkX4mD_{|sbcvX;k_Dlx}R5QD`+kSw)eyl=;Dr?I1PmL zX|kOkTk=(|eW>o(WB_0v=q70qLXE91` z$ECYzYwLiu-EeE8Vf?JUA`GFm9=+OH_Y19cySue^jBKq>APs55t@RPwS^@8Ct8cgr zgimX~Y;^L?QxnEdFvui!T(^ON8W|j$t~e%m`po^_C%RXi%t5 zX)W&b^6EIZ<*m2g+H$VLOK45z@^(MWfV~Z6hc|8Axqshx8D&K^fa$d6r{`b-X>Lc+ zwl`DimpMNkZl%O2p{z@{a6Nr?NY+l7QR9_?to`Z0wz}E*BhdqJrRp(`8E4 zN%}6slW$9BcUy68aYa*YeRES|U#c!Y{frsW_O+s|xAxhA6tvUEnxzlYa9nh3ke?UE z7Ar{f#A>v4zd*Sd^xAfHNLGJ;O?@Nu`Hl58{lij=C<6hTo!e3_(ZG@@0|Q5jlnG0i zMHp7m(E-j>OHXTSPYn;yAY3tbTJ2;a5X^8*55k$7V%fSQ?;6*rWCJaA7pb{>N&3Ys z^jgJ~1BV*m*8L_~!)dgP(?A2M173OQ&iK*OCWMS${N$5QCVTS?MkbLmI1&czyyVHJ zAOF)U0j~=V8pW?;klOc;xvod)Tl9~8bdbU#Meprw!+MIDbk}1P|FDl75D!{a)9-%f zHnN$V6W*ER2-ro7!RdjwLZ2HoedS!Nbmp#{2mSkY;hlIBawErK>WO?>|>16#Or z9w0YDWUgnpf2OBnunJOp_x1Sr#7>y8C@E>%wj_%sXxh@h{q1i{rv+I|8nr~q04mRj zQBuNN4U1ArUQB&Cwe|M4Rj-jTGD;jP8!l7NjS7iG4kA*ZiIhMj$^C+2QE8Z?gz$qx}tqp z!JhyxyAo~!E%lZA(Wxc>{P5F{-e`g@4%{>1RJ%Q4@$+-Z2Kp8~zc|5eBDW6%YImSo z($P1~!uvRRn|uDD@hVvgIfMEE|Ag|{dxFs~2e`&2jHW5*$z7ON)-Vr)25K{XOJ9F@ z7N*Qu=%1*i-0y+;UnSn2UgaDOk53+C`3IsWX3YvT1$Fl+RUI8A*{8qH&0V@Q*UPIR zbDPOz+LqbiWm0r@!X(>4S6J1&%w@y6uFK{kXvh%64}eWpqDW6axO3~LpKjfG&|Utc zG4@jLVTl+!JH+4r(lnel{1{geXfn^C7+ zx2>fc5SJ9Jv^L=LDaSm$9vGcn_k%W#xqlpX9WRl$c}^584whoaA?3O7X=A3{VPx8U z-4>KSh{GRo_#uv;`uh-XJ~AASq_0DWDcSW3Q*|{q$>j4we4_hq2jv5+8y|I7Ft$z~i zl5EM=5^6X%4x295OQ~EN+Xi}bDJTqdk-Ee?7+;Xf3ZoOvoiDQC)y(rw-86fQl z()OSE;)`!K;UN!Y} ztwIZ}wyvhRxt{s}q^}xyG{TGe2|)oC@HeJC9Sav;^yTA+HD4@MT!#fhlHlj#zO42M z45Jrg;awyy4n#fwf&SFfDk!_ll~!>sJBQ*_FIu}r``oz; z^vcCc`R8(T&k3#KMl`5nHy6l4v8@aD8#T&)elPrmZTo8Ts8R3|93CDjE7-hwBfYfu zynXZ0qnm|RaU(iZpcj-Y6jgwlUMepssS15!K7|De?@eU8yriU@{#2z{u|QbEZ1()h zJ`q}lXBoL1N1;9ZNBY&V*4}P-T)>I~fXdDuY=>aN+15rEv3)(=y;iHh>(SHM1}+dQ z+QW@$SjUD1>(p3wD5Y4$`3FIO7#bR;(+QqZ)Vg5MPtz+w{<<)Z3qyPO4|J?YFiIT9 zC{c{|a2TV+5sVVEYpwQ9tQWz_ZR@ZHV5_dPc8UvYwYuOStrCV!0&Y}`Wu-S7CTu5z z#*Q5uBZxsZWag+6W71rJ|$p9lqp3zyANqhEdX)zPxxdeGCfs)4Sq4G6gn4mKAXe3^@Ap`}`4;A31a z->86rnzCk?vUQj2T=UM69>f+k^X|LhZ`Cnm1+W0cb9fFIz5kpfkrZvi7X8$ov?Hpy z%a<>o6<~L=4M)LV`sJaj`LS}>`|lsBYix>n{PD-1V4zx65nPO<%kK zK5Vimu^!%M$h-O{UijT(e}Db;RZDJ}6OWZ%NMiE52OoL#(G<8Kr0;k9pLy_}2OfCf zzWbK{Vfll?TulNJ>Wk<`#;9d z2DHAYp|ejr5d;bM-WE`C_)rNlNl*_|)M_?&%3voN8zY5@wKC$-M<1Qp^L<6rMaUiw zU+UuYH!TUowfpXQw6^s8sb$ON`!*f<4Avoss+$LrmYHVv6=vk3N#$k0a<{hxa)}y= zW*}@3`{L78McF{G*B9j!Rh>JQo{rzlJ+>;KunU@+OD^XX!J49|IJj}B$7*RT&Ocv# zuCf4&E4aHpdlACy^aEhoo9*MnS#2^k=;1AvZb!n(KiwXN-ClJ$M6D2}vYb{S0w*5K zypSUm@E-Q0r>Dv6xCEP1(25cbHkS%+z)2K<$%++$qk~i7*gh~eL~fcLr5u18K#+3D z#eQ5R_VUHuy~X0rLkIRBGuM_EV0bat6<&gUZdOiCPDS3u6Og!^$gQgGWzb5oOb7-D zI#Ac$+SVh>%mqR4*{pLHvM*svnQ;^=_1O{$84_s>K`aWnrrH0lWm6d_D!%*b;K4Gu z?f_e0fDHR15eRsD8vC%b)!hB}DVAmcR4n}2xg)1ObnQXByTp^dO~&JNRLFGkuE6Q~upkQ11WR=l-+U zZu}`K^YQ3eujC9fl#sEL$PS(8k*NZD80*Q|fBqCL52~&7$U}JyXJi27V1SI%gxHSp z#EO46@NnVCqjrg>4m>tz5qLTRbr3QAC8NkOe1z9fyvAu6eN`SPX%!9WQj>^2$p3R}QxY^sn*=+K#{AT?U5sl-83ThR@hX z%%3-J-uzn@En0%nbjhM;Z@c^DyKj5;<>Zi< zTVg_zZz1sAG8{uvQW(5Om?2F!Ll+;G)T!1&l!b#y zrhhf3q51V^sbeJMo|XYU44(=QQUMswVamT&{Px=zJy-Kj7$q5T?g4DWRR_@KEXCt z6ZTL@K07;;u+aly?0bQPt*$!3!W0l>mFI<`$hEPDQwWQJiNzk5 z)pfxU0?=ndKK`2&ut@#SuxTFn+VC7f^=PpMw;qk6*d#E_!MMhpFfzcGAU7i6%|x$E zh5_BmvM;D)=ZoR*peJF!h+a9zwbZ$X$0IyrNM-_tQyzklXCUO8(CUU+dEucL{-^MQ z)(HpYi4*A<8`6(M({|iw^n#}X_@xrD#APSmN(@P~MM6k{7m1=#u5gu5dh}PtC9M~I zlbm32Wum_n1anMzH2&kpx8?W|l{YH}0SMy2n1=Hm;Hh8gdSLi78?KgSw`>hx%8P6_yoZ`T?Zi*lL86O4OaJR$aQZYE@_LIY{2m z)eZ^5b9Ts`>_dkZEI_J@TZvy>0!_Dxkmu%*2GYR1^pbG)es1|K$;rvLEPqL*dWohR zFj-Kv!s8K_oUf@jr>{mir7HY_{DLSL)=v{DT)0|ODx{n0plRQ6*OZvn z2~y5N*Gg*&2RPdPXY-!U)fE<^5o{v_$c~pxoCxCw%P3}1gYu&yiUl%q4n+V_&>WC-lepnX zNgwD72yhcE!}|>ylj>(N?fH1)H8F{GhP3aliH45oL_rx@)1csFXf$w3cTF_sOGHv4B^*o&VlFv=t z+BKhlhD43xfleWpP2#*fNbPpwLK@80mO)!q39^ zf_V^O6oT;^VT7S(u1SXy^`b(Fd6>aj#RY_MGon}Q!8wQ&B8&o|!1WH0-wMEzV9&!?Nix{#NVth@BUX$N z=wN6s5$-TN+6AovE=duF5@B#BE*8_$d7ft$FJ6t{m@-AO>si_=F$AxM*%_S`tG;Krr&TM?Z>1 zlDV-+#3-~703YVK5-8U(4J|Ir}XhmFaiNc=~V=V02x-QdVyOT&GX2w&ZKrM-!kBOQZ= zM|$@%4dn>ef=KYoOesndBTyO`KF6)b!<(3}kYut2A)e;J6uO$0B60Av5OMndW*K_M zCQlg(V^b9z=E5;9;qYe?cN^C32f%^ci$9aN;PfL);1>}46SB`Fu7;adxMKx-J2?5p zT1rhCJN)0(Hci)y*X}G(M<~BnhX)d;+4&|0fzgoXU=T=#Y)2SR7J9}4&ko1acLmEr z83K3_2wNRcHMHk8p!hKw8ceJPOibvxfYa=u|GQIRKyB)giYU}(7;$?{R(kM(kPA|L z;ETK$&{-4#(bU-kY#Q#Waa#U(KNFF_)`9^>S)n3dr&5rGDeI{^5OK*I!L8OM$J1@ zQ3q)ApPEu>9Z>tuRAidkccvnHMvr|b9o?v9=Fh@lf>|L9!X%T=+<`FSH-$hLra=gV zu@w#1AEM!TfYr|ICZ7a$)bf9s4h6yIkB+@9*)Tza&C`b+T zCv(MS)FXo&A#NK>fL4Ndg4iKi{J8z(f-VFaV6jsN`V3fxzYC%+he{2%Ve z=>ijv=M9(2|9*bxUWJaW8uS>t$D(7a^g!`}T^Dy@yfli;kbS46&V^6eKe?VlWBWV& zYzhM;X`2hx%n=L@QUfuJR*WX2al0Wo*^owcLx3`3k^835s%RWqv`h81aTFgYzPRh+ z)i}f)5q3v9f1Lm%O~XQuZuO`?lrxCMC@~PlXvHdQ<(3M!kwA~>V+U`87;#7q1 zUJSXGIl)TFyhxIn80tA@G(9AH#-9J?*&xIlgAyEtc!N-aK`245rluNfPN_%@5>z=i zXy+hHa}Ba4QC!^m^(nA}r|hWDi`K8lYLJa7M$3CgG~i&w@TW(;p7ivrT9pExpAC3_ zB3iWyPP@n!Ok2|Bd;CMy{;%hmyv*<;gE6eBp#c(9xn07+0-j+cP80Y)sn=-gJDNm~ z_(iAt;fg&;E z7(jxJq}b$jXGX@hufN`wk#Q%|DBF_vy1zQqf6sEH)#OzVlh=w0*ud5cNpwnR3hkW4 zIM`uEon%9>xGi`hK5G$kcnIn>_FIT zQtLGu`iJZwLwaQAr*{aY?#WT?wK<}>L6%TJkSw@!G&ID3atZlCu52a=%?+|d{Q^*y zp!FOsAV*?kDA4jf@**k7JD>Gb^A3}m<_1}U1KmMWJ zOU-46Ijg-N{y3wykJ^#VrQdvJD&g{;YhF5sTU@!~9O0@O%=HjYsRFP+0?{hqLm<_s3)%=&Ah>Disa>vHy%n@bBz z&E~8Scg+BgxX=wx;0~NCovz~b3iCGaMd>2K`#`oFCi*9Lt{?ZL! zoo|%^Fez(2|JC3?Ov=h(#(2@nl$ZgNvJv`_+QwRXsLxP3IXhvg+!Sn0MClKWhp)dc zJ@`eB{LNkZRN6qb3chH!R1Tn_K4U!%3S(HQx#X~9h#4f*LrlQ;0pCY%&6Z>@+!g02 zB@zrvkL5lyhk2nnnHWWqsf&pOBT$36F+(DDVP%Kq9mX5I#y{HgzfjI8h?7S-zm9TF zL7XXsi7+r6##>=?ip4BMcpQqGmZ$rL?*?;a1w4P1m6n4yxdfvsRAE)s_`eLgF_49q z9+rY;bZ7t+;MD009)+zvOz!P21OuCTv51gq0g?W!eW)YrM4q`rAOFMGJ1>;h42H*g zTZ(sYbpO_Q_SmK2-_DoTIz|DdU$T26nj9<5TsIeE!z$NbV^(iJ@WT(^@7u7xKnL2# zNLv$7?2XJL;q{a{fzzo}-pYaYvCj? z-m6Ey^2*9Y0J;aeYA#;9SZX<11b&;SsSECg3EJY>v*WB#l%M!v`$r#rwCSkHV6Lf! zBf0v9I)JhpF-2g?uf~MeP!F|QE#{02DVS)*;QVD%!S?S2i!=sE*yhoa1UzHNK@1un zU#&*%r`P%T`TJuIfjSLJP(QsN^lQGDRHA=12zR@SK^vHjHZU7)AO>w9hA=)_)HSVn zdK6TmB9WHPIOZyIZ8c>PMm3inHflhe0geUmAfk?uI@D`uz^DLtFhRXuGIs{V|FCIS zc1b-GnV_~5@1k`VJihx!KtXq0#w=e|e(o5PeD~e=E--*RIK+31iG;Q_mAC8R^3T!J zV=gYZ?{_Qj8k008@#e*MKKLXp`QVeK;oypRK{jL;c@q@I=_@psG9Xvuq?^sLKy%t1oQU;7-s-%z># zujK2O+XdZ~Azz*UK?&YqTLM;t#?U2JYy<6okbhQj7KJ z0i#TWduGpb0z?okq_jp~V{>2Sp2?d*khq0c6E~bd8BL&e)99l_juj(Pi1q7Pqs&1Z zjM2a@A0zZ4}ZLig-hjwon4rMG8nUSCyBUD=N$A%8bsd z@bkt)D3vBYy>sVjm7Mw&TftR+cjR3NE}r9ICV0<1!YxadqpIemOITAiQ9*T)Y%Z^Vy6 zea9oc@ks9sq&FVvjYoQY;UgYvqe^o%+&}?CW3H%xCdnufVSNNjeqQ3h;?2p5(tO87 zHs}}V8zRT$lm#>$XbLFHffmdtnK`n@*_S=@%>-MWGZ@+oGOhWr7<&k$;_Bi5>t9A&9_QON{zXyw$=Q^J|VAsmb4|)VFjkNeG)~Qg?`^ zPiRpnni8jrI#&I0{LExxVpgnxg_U^P_&=^9Px1aawOQMvMpc&FH#Kzp_|U2Ml~j(3 z+MZRLV+yRU?y54|Y}GYgfcsQmuC6(MzP9dibu~G~`&(sdms4byh`MbGg{@m8v5TB8 zwajV?^kMuZK(%V+dd5c?D%Y$0{M3Q6P$dcH{hb;{Y?WD64jGt!yJQX({t+`8=dff7 zV+>hl;K;)U5xLXA;==?75kDyq7s3LBp75e;PoXacl~F#aL#`GInQFGU?LLLG*qzlRe>ts({; z-GB}MiBu43)nRp8XD-Jsq5K4|9He}+VhQAT_8}4+BrGzA^M|K|Z#V>?MEq4Md)cMR zWht?$@7J&YUKM-KlO5(-yNV=Jt&cHP8$56M@~2;WZP}>Gt!vh--?;H`1J4+RUjzJ6 zUP53PABRI_nLrw7WPr1^y8M$3&V?%%I!pKLXzS{CHmwVI(cfBMUWGPu_Ok<_w>|dQ zG`&7JCgw%_LbD(w-klL-!JT+_MhF$1DZLb4@Ii|uxX$pCpM?>Emee0mQg{F7C57GD zh}V^pa$nUWYoR^8y**r5r0(Ngr9HYko_+QXT~Fz;Es;sl9DW%twKU&`!y7k#ux8Dg ztz^_PI8l0P`SN*SQgoMFP#|`BLWoz2O(;DYeBgjwt6#b#hCQ_55F4}N4^iQM>_Af` zExW*&7*Md^^w?v!H5~Ztv&^QZI`q+c>;F>eO-Jd`eM$;S?>>wbD-29XFjDFMbDu^u zy5Op!^75kUoNiziYr1Pkwm{(d{D97f%$Xwpd&r3Pi2aYy2x%G6;Qf$q)r|6U=PDg- zx`=u6B6JzU<*0_==){|EhL}$_?cs-6ck%h@Jat3^pf8q|oR0Hur;=ADwEhgXK$#6U z(L&Q3k5Sawbut!oxx25gx4o(rj!mobMmEbJn9cR| z!!QwSg z&ppR8DJi@tC51ePFlWC*a(R+#3aG7btgET5uZ7#anwol0>0r*M6MhD661{`91z4d1 zeEs|>0gA7$UJrDWp9T{>{gCKuZW8jg0(tur^7af7krl|>3ao8z!nW89xd%N#5K>u( zQwK!}Q`lEac^PIY{DT|HVc1h%Rstaj$^fG1LpX4&rU18brJl43mm+ z_f6t>3*LhGNri>*I0{=w1&jHE75UoXcPBDJY-fs#ira1UKbu9VEGz4CU9to;^{7+; zfRys+#T*~m1$lAjAW>_5wQ3;j@si77B^nbf7j?kgr+biL){t-CVn&DX-2L}2NYbQ= zd?O>JsodCvoA1A$SNkMB^uPo6&z~}B7GxQK6AMI*%;LW!laJ)TPrUo(DN|l~1zUhv zxZ>uxAO!3q8ECGruI{r^QR_f=cMGx)uLyFbMk|p>oomSKw{F5l-3(a=Jwf zaDns z-ioGwh-8zTbBBsI|8qUhMwJHeTvTr$>QW`-ncn3NmZZ$ZN_P!OGqNcpWj?;qcP4DC z_ZgYoBomk0?n{z*Z|ZQ;Tnp!`4GqmrXJPuwrgCsZhCb(w_O93XYUn92Anw|qgw{R* zc}qlVpNZB!0ePE%)*eQ?t@~A3QHu5~s8!$xv$U+DgaVXPQ8UOQyBa$>K*5=}`M`>b zoSfR)oE*KnrBUao_wwrMnvmoq<0ms|sp9|3+k3!ARc8I;cY4pXBs1xgLMVpNK|rJz z6&ot*u4}=9uDb53>$~;$GP&5+eODLzx{8R1ND=7-LMI`RUMIaxZ<*f9|NBgctE~Ea z-{0r|`%U0xlH7aeKIb{lc~1GBb4p5FE`{P{&Ma!8KrrzQtzh1~<;&;KU%q_y(xpfh z{*Zs)#@{^h$RGYtejfr6%s*{hOiBYC*|Y?m2rD>EO^HN9!^@oW)R0s<^adUd@buJc zc*F>{NrhjA^=6}C#6;lH6>tJ!n2DER7ZQZ}8~J3NkG&MiOz=S}9HU5AXJ#l>1V{aE zSEMMj9F$oB%B%oouKG=>$uMjNh@ArhAu|=P9>_b$1-!8KdwUUs!m+1!Oz6Ey`Uoeq zDc7nND;=`n6|UJ=Z4vYuwVK982BWEoNWreIraqDj-PhFH+jQ>Sc~s@8X-bK&y>AL| z%OI02fum8&Nv1rlFLRlb)~bfMQ?#1p%Wbx@G9nspx#f;!kR639?zru?J2q^1^s&bt zf8Y-4aUv(*6rg~OI!2j+{l`6`=;qCMc{0qRAz~$goX7>m%K!7L?Y!G7u0#&g# z&7kzHp!5px;0o~Ilxry6o8m{1wg+1bbTmTf0Yd(+F;Z`lVXnzD0?CXQ4zbPOlDcq= z)OtP2$e{mGdpn;Owl;#wb^54Sh|YghC+M{IubF7Vf?Ft5stIs3XD7^v4vfIry!l&# z7mgA9j_c8Sa>@E9pL=Td!kgDW0p04cJ8r*m_R}vufhuF5_+PT%4`(Ccgc&dx`r#F} zQ3{w6QzuOlGUzGSP+p&6W{}!|O$-_|A^Z@T4$~Ovx5%)@;usV#<9Ok~-aO?R(x<2R z8??^A9tZ7|P+m^vLp4VGAG$}1_LD&SRgi6~G~ZT3P%1!0xHlk!##BS1peV5Or9!r_ z@38}d#rvy>~e2 zMBVYac*ZSjm(MO%@cD}3*~`}&f@Kf==Ap76?t)8GSEr6-VpFr55lc0F<`$vuK@m4_ zrH8AYy;3dBIDERX8|`?#`tHWl8JXRCKl*5|Et67U-x(r`A>07if56Mb=b0d+7W*SA z4v@}$QE&}Kwhy^N3BHwqn-ySKMez@vH?|u*4T?PjZhZ#a`ZT!p8F1_DQB-sZhds_w z2O_9Y6$T9kpSQE>k|DuxAbY*HcWBUMbC}={9}L8;0c70T03OK_=ff@0{BG*gqoIr`OXwMim%tW!LXp{-vKF$&d(G4>mZ@)a~PE@FcGRlmw0JLKcC**MR5V42` z6Y(I2p3UQNf$Jg%Au^ZbIr)WUMe>q)vx}6n9Kf*fXLd{}5)(Pbn)jORKA+oWvRceGyWQpS_^$duR*_W(o>>K+A)Rej zfoBT42M9{Th_<{?BJA;Eu2Ho- z5evNi_U`VmdIcIyEmwuRcfU;+rX^4^+cKI+D=0_*`tpJ_XD!kVE;-Z6*FE~^qwC74 zTx|hRHAvb-=O_FZ3Pr+u?pdG~CA*Q*(v=XY7u-V^`s_xN$z-&l&k|l#)R#c6P?$>k z0H_-swOS~nwg3er87vNIcgcbijw=$2(}b)jscsmFv4m1FU8v5IA`e!IdCtdVBrD*n(o|Ezsa;QE>@-v=`&- z9p#qLl)S%x<;t`KmU%2~0#?BUY6H$08j4Dbmo8m8K^h-|AA9^mc!LH=A%y#|@j`?# zD84KM;s(LHdM!Kko3V@*HJ-E_Pf9XF)Zh>`V7ekEiP92T3Slf7#A<^gMohuivHMnV zpx&{W@M-VetI?3c?uWkKx0k%vBn|a@vE->T8YSX66%4La%bBtW!OTT8m3h-mgM&+# zu3ouh_04EI+uz?usmgAqo*+mvW+W1?V8lkXoZicO_Ux&qMG#|nbXZh+gA&S11qTXk zEJ2B6*yy1}LHW+O_?_RUc9e9@SnWrW*_I8Or>C$P@OY(ES{{n$5=04;+tAY5(t-!& z0yX^{r?|MJxH!5r`K#n-)Jw1O7Y@``*Y*so@=z78Qi^)yD_^Q5*~ph_@xGw}Fgivg zvgKblE6hWAfufmIWDKoH5GWc@GZ)mnio-yhoq;Wi$>I*cJ=HU~w}RqUq_CVULS7a% zYXv$}|0iml;>g@N`?Jl|OBIyj=uwqsGQeb$H7YP6dK3`oUd3PVSll5vy9|T-DyT>W zR}P0zhHNHzKekJ}f+skD72gCZyn^}sJ68PSAzjU-eoNB;X}FzwrR+&`F!eUBh%BK#$bI-au>AC83$C97nHM&7o;;9W zoESOx&O7hC^_6aE^7`a+ncEK?I<({6x8K=a+tJa{W7*d)hZQI|jVxB-e8D9Y9k`si zo@&sdWI}{(*o!MK`>ZzUB0W#a5^B_uQHR+YbzzzNBvTeV`Q($67;#TWWu|X5;_B=5 zaV0P#qi9xYt#3(lM@6}df5|8)#b;TduG8_=Hh%!wolFH*sNR!PMEaXHV6rbCx{%%rnodpB9$cPjCHV&&l&A z(+gMrY~9T(mo22Rl_;k!T=wKL)Lp?B@AvmE%oQ*{LOG|GlK;V|*s=o2e|INuP2QAT zDcH2L4FcD!Yx-gtDV30Xp7-vaGu<}t!!O-L{f&AHRcrnTD`q>jliG_p|2yZ2pD)W| z{th@d?(bF?DVRRfGIT+B9Laop8rlc_e0uSfOILLLZ8awvF^bfyp&s9K1HJv^dqcyC zlhqx~^{I}?V}Hl-^pP{CuaFCBYwHx>{&xHg(igOB)M-uTye>|n5j7MX6mA8qqk>gf zz(nzV#>^1YPw*o$>@1?#3A>t-+3&0ex@dIsP#02%>cl-}+x9q9xO^mJ!uH|(u$ zU5~Dtm|Vm)GtjHk0j}6Th=n!KMbuOv7j=E4@xb5g3L|=j)VWeKp;j&L9NHHVck=Z^5k?DN6TX|;z@Fg z&C*8Qz=*lK_S~ucpX}LtbjK@X^6@p}l^sWSZ~Ne{A6K5cZ19)|yBY_X*tppvAH2Ww zY-3{DikUe|nPz6Tr}4s9A9y_j7mjXO1EyJv2;c9}o%omF#rerQxGx{QFyNtH*Rn3v z9DEb$ChUqg-}xJW*uNG31M*Slq0%NVSg_#UWz?%+$oHAQd*%h=$$5X~pYOu2C#O-K z&KkBN|M>ay$N#qJFUQVbJb|BT>{P}L>nZhOoUwf2 z^b9IAa-j;pzhI#(7x4YWMWdE4o3?Nz{=PU1^_o#G3LCN!xE#IFWVS-VwVF*9ixC#D z(QGgqiP4Ka8_`ijagB!I6yC%{><0rBwvqG^TxKwt&35Q`7Ms;%)Q=#4d@-uQAQA&O z50tfU*#I`)zF|UdDOP;P)130Tl!Cq9vhyjLH4PzXa5!ER- zCpR|>C80CZmB?}bzw)rD^}7-4_eHGVKVtoE#QNQc1v9H3I96gl!ka|=Nh$Lkz9hI! z2$^B&W09s7ZeOan*YHq3)Gjf+eK3k(S8`!lR!qY)2FmbiTMGbq3x>^k3Qi4kSV+j08!<;e|WU*gwK)LU{FE?g+> zsHQ&EavYBH@4D|_4(29bo0)&+i7E=^QkTqa$E5h29=*fmc>M9()dp|vhlr?bwa%LM z#D6?a%SuWm(uAaZ?rqQiVewPX-F7z${f#;4US+><`@HGt6>LUY{@Q0(qNdGQP1CQa z_k>R@&haAn+R-ZqtG}id<>bs+t~`eV=|^-Oi1#ezT!80{Sj)(9 zh93famf}5Ss`OWyR6&*g&p&2NYbpl!%+sLb9Q3wga8EI~N72>Q*N2u!UEST#lHjlE zAc;v46bPiaT;%S-r-l@I;%P`J%lN)Fd1rE3auKwaTT%r-vjM1&;D2tCBTOV-d5LNb zB;FeNwN@-jjh?93cykLwP90}Fz$k<|$Y$I|Z9;e1O^n+ZZ*GEQOTM&;k%z23Ia8}J znNT;%3OmP$4y52lL#5%^icMm~1{<%Q(3fwY(YqA26@~!}VImFTYv_P|l6i7tH;e_iNe1I_uy$ zCVEA~hs@886)|HcV@Tp5*>N}G?lIy8b=S4G zHp7H(YNak~bDT~iDv$?k$u8R?y5se1=d%H|lC!MD^N0pQ6wE~Ytq%UT)u!8 zclkZP{M}2>{qDIH*sHdUeK8+`EtTVTk2-$)+m~K?>6u4%^@7QiX|I#lq2d3vz)j}? zV%+Z<@uYC~2D1$Ytd$)b?)|M|$G`C~N>b6r~Xb~*TRBJRS_a>q4no%K&18jg-*jF`_ ze$}SbfU0vq)j6Q522|C6s?vVLNLPOk#24foqB7^Th;>1ifKDc0v5?cXXBG8o(t^gX zmy+`2yGZZo{N~)brAcv8ya{{VlH~O?H4#TGj?~9!iiabOlpluox>Zyd8*mRQ!fkeWqapmONyGUQd!a6Eq_Z?uZBD z@gOZ#h$JdS2G%MTH43ZbUGw)b&LLymfOU8?*5TDLD&&-qz!Wg4Fk{IJVNs%T;mcq) zAUe>g>+C=CiZH_DqShavJ|SSj%$ZXn#9_cA|Hy0nUHQ|?pgC`$-V#4u&bOa@S6i`Z zhn`YsS;AgVZUxL+im^F$M6 z$XE!jv`5#2V1mVL8OB(+i3l1m0KbHi{bZBHmxwPy=paj-rOqW?wUjalH0 zt2hbVctD4;4qXF1gWY|d9i4q7s=-7bQ!O(>@m^zz<@?SqN&ZrNVz+c;fj#`;2X6?|dcp1$+v zF&=oB@Bqg02*$Dje7*sE&aisu$wBi#HD0a#20tTV@9G+{cA~~vRn^(*5d-;4y}pO& z%695Y?Np0JKQPn_t-A$fW?{UYJh5%x$@6$!s5YDX`bI1^+mNmaO{m-Yd-XPY8cU3@ zb0nx>!yL^H_0;b9Os3$Eb~hh}XHzPbOIRIVtn3V)zY2cMlL$qj45f@8wAx2Uoi27B z3dWUBEX-oS+Z1Lp6pH-(Jn7}jvAh}AO`bGCp0Heo{t&B}l8pFBBMu9v@cO$Rh`Jrf z0J!&OKl>ScRlabU2nwMoyJXeIjT@I!cWI{vgZ>y>4h1ZY&$6Q4p26mEQgD8-l1iV? z<+224%@TOv@HJWj{G3_zG(Ji{Ck0Y|CLT0PAWS+U(Lk8VjSC@EAcrO>BxpgJm8ph* zgvF9z0Zv8v?EpN%p$NqahT!M7A{WC7#m3{qjshhI4o=kmjYlJaP}moxry0z6_TgcN z8JHihZ`AIy2U!BjZz177=%_>8CQ@?E++yp5zerEDXs7yoL9Y#vX?PJ#egvArEJT=* zp_+*H!@nHy`HaQ@s=u>&thmo<9K%bl!Thhm{I9|MufhDU!Tc9h_3L}|dd#Z6-)1;< z`tYt@yAD;=RieSh@j4y02_2-8Mz61HX@TltHWdMn?BW6T+JKLJio;`b|t;a+c z9X$HQ$De+hle2Z}Cp#+-96yBn95`m;VpHLmE^XVktp;4Jkw`L%iWCY1;!sY^sNS5E zr%MhWHc?q#nJ(adHdR&_%ga0D5~O0p-Y*$Wdk^{F9R| zkrfseDkK7ELn7!j8bpR963j~k;eH&2(zrXoOeDN9HrSmV3nb{GuDW(a8tiuDP?;J# zO{B!6+2{H9`$&~Yg3GSjmiG~ zt=G7^-H2<9?bYW_9Y3(=>mz4sI&yMet4?DVE-)I0yHJe`o9xMbpVw5M zJ$33-ZEGLu@fn8)nIXH8E;L5OaD0m+#=*-6zRE4kP8w>qY}ry{NMse}?KyP0A1YP> z01|vg!~@E^WH;P0do~JkG_zF6s>op}OXsb<=PtK9sh)J(3opEI z+oa5x(|O13YvyA06!`s+Izla&xzC&CbqOaoW5PcAmB)Z@u-_;W55XmnF@5LUY#;E% zrNR{7_mVk~NMvC9K#J{IF(23-%?Ln!y(vJM(2^OV39V<71JIJKBLf`}LY-r5A4__A zdO=5TJWkjjx+2)#>lp>x8$B`W82#5m?yQB}xdrrJ3%Rova;M~U6QRGc?ZVjuP&JLl zFZS#?Qr*-KfoE(ubMnC6eaH4A8@|YBG!3@ZSA+3SUuKU%ni6Qcn-7aL&ERTX}Xx9@}bIxp)S( zQd%MYqUq^3yuK|&m=4L)07N*i{XgqrKXm#Div!tHvDRb4aV^wqp z?97q64p2KA6u>FY{h56xD$wZjQ*N%~aH%_>;zq*i1EdIZhn42x)38!D4--qw8{`WiUsRJTV?QBe_aTy zdzBv_!`L6g*dN8%AH&!m!`P>Hbo5!hT*}#b30vRUBgeXQ!$dxG=nn5Wwtqi@Cr2(_ zId%N_@v19*kPf|obC*LU5vg5H5GoYb=^CMKIvu*RL|VWVP*oKjwT)V>hMty-7cc61 z2Cek;p=f%J>YK`r;Rx*O;%tV14YaPDO9@!A9JTtBPd@pkvE2n#!qZM9#}SC^@`3^- zBWN8#7||&zDN#e0Ql~Sl2JoNa*M?t7DzK;Sjhu=y&j5&$x7uG z5h){=t5iIu&oX;-+oB-j~geS}YDP5h8J~*+Nfu z_uCSFSJ2_HfZYQLM!;eO|Bo2`m{f~}kUBhwUBG4-B+{q@HFwIPStVUovNH!+&B`t(KV}i#fvi?$G!zjEIZS9r5jRsV99stjEj1IoFcZ8mj+RmpeaLKX>S#E3Y}cos zehS#<@r$i`_-HeZkxihgij)PdsX1?kGK8J@JgwTgH795L_HEk^?D`UCe^FeFSj(lY z2-#&65Os&e;$X&tOs+yvJO^#MXVI#so(hLIYo z5b!YpVm8<3v+D3^eW=w55uHdVMO=Xp(K>|cWFi5FNh!4%DU>&Z;ls!R0US`77wd;s z4Gr1tW^-4s#UHewYNyTKC%&7z9zZ8@(A3AjG#OZVA&YeDS>=2zweT5Rt;^u7iC~9_Aa-9lU z(-A)!)DV{pu`&@f6o#Lgp%G|iEV*yV2BFFqi%mz&RS}~Eao{D04$Wd}2`ni|OCyOy z1mHgjL??$Yk*A{*Tk1v#p(4S@hJc;XU@@8N1=bKe8#-NQCpBj4kQ@b)i+nximE?6? zk2Ric1$&28CDEuf0e27CYLLf?qX}7U>Z+=4D=8EMrm{H6s89`hY*aESo*mY4>X3?h zO1Eu0(jODB(CaRcarvvSs-#X~enA21)DMheyv=?pI|OUvrd;GOas{E#bPcT6g6WG< z(RDHPuvU#WG9D9FAtLj+!dOgENGLG9P!Xfm1Tx|s&_$y*ARo~99Iq5e5`b6G!sWs2 zg2O#_K%gbU6jfyDbhZ;C3-b9!qXVr9&BRM+Hba51kWE2twF=P398gogpCbpEKqo?} z&hh#yC7{(5tbr+@6`Ai6&`M@Bb{#*~(Mt@2)4jSA)Lv~`PjhosRddP?Ft^m3Y4(?2 zeklJ7tW)m$MDI_(h@b`YHK?m;%a0y)I@Kz0zCtmtUqQ1Uc;G3{AD2*n=cY9K=uLnE zGhd`$)TW(ZzFehpIKlBgUrVu%MvYCu@CX-ZKPJ=T3;Gb8O+(B(kq{Ae1uD8ELd%FQ zj=pC^Dc6s5wZS=)7J$#+>T(gb4*9JidW;K5MjffSLu#-HHL2QEC^G_*>Miw5lwM_S z&8cK63>5RJ@CK$&L1BvV=L90r%prR{GX#f6$_ua__qiMQxeNEX8x*=56q*QuHEb|< zHC$SMulX7zQe)h!|yLNtc@W{SnhxY8;^(Blae0mxu40Y3*JvfShienU^P+nqgVU7;U#j1GG(1n31hPnM$|$$^8=Y3wH8 zlZPPdOR<}W!^DL2Bt)VZQR!SBc$aswMhh_Y(FMdSE zJDoowkH~nH>PKXJB>p2Z9?IYk%lJ{tkH~l{B;F6p_+{YeW#H(g;OJ%G=w;yO zg3FyIYzU^#%T;@K?%cVz>hiID`}Q3;b-800BA=){c}C-)uC2Ki%v#&rrW-Wc?Om-+ z)u2{&b8DxaE-;N*p;1%YIc$vQ+DsfcaEkHc9vGvwRHHUeCX>KZMr=^h??)m#al)KT zD~BaoE=`(F(9B{PqauqTE1t6qzgsq^I03!dC@Nm~5Dd}P)Cz4Lb~x0UC-$ct?#oDE z@nSVEHiWSGP@I>!m@Z()qnP|?j4e(>S^YFITLf<83)l$?dUpZ(*<%Ed@?&3uJAK`B~t6DJ(4bzPBYDe)7qUk3BW+XL<0M zUv7NrscXG0b``R@K<}%57C2u3TKm4YB>_Mi+Z7r2v*c1E-}ScG?TE_iuZ;Uyy6U?0LqpekTd??1P7|Cu=4XNPB@n>x{>|GmJ_Fyg`(fxG z`=;t!>%a2A30VKv+WlrTnkhhbk6ZsECKH*H@7n!ab93`DIp13U?DV|s?5)?@{RIVu zg(}#7px_)!ur3~?uWb%guw0N z)_<6Rf|vhp_m@G<IRnLB^Cp#t<7bo%lI>btjLXKX~}?@ydg>WQRPYRehVkJZq5jx!tJF9=d67!PGR1w=rHlR+e0{j*TjN%8g zotuFF)i6A+b3A}iKY&p`gi$|$Q9p!Hm$h~eIQ{mX`b#HaNq%##h4?v|&Ye2FbNkmP z&R(d)efnySAK0;D$AM~y*(nBtE7;Lbkoy41VBI%1Tp=BN_kL7)EahvjscCF!Y3}Ii z8!^yj?VilconIb2UvGgiN%<0ryu7@4-?0z=$z=x3y{A6=?6afQhdzV+{rnuYQ#(Z_ zi`hCGt8u&Gh@iN1;)Gl|yn9ZjR8u+=;qyFx0BKG={bnRL(`7Smm?m|4l#>gjP&JhW z3O>jpE6r8RdwS#I!YRw(VqZ4@*88C9u3tnwsZ|Aor{9e!CIJ|)5+!_Y7q(|2IPOP5 zSGXn(aN_z{%FLXiN%=~CEJ&A07|^xCRGKOmudFmgGQwfDBsVX&pde2vL?6)@L!6<8 z&o)Pi`jk@`3|?B1T1X584v$1}JbqNHiTXT4j?{bwgK@ZA#C$e_wq=g$%FdtPTgFNt zWfvVNhQP=UhbQ+&^-H0S$LIAU8_w$TVqa2uy*E6Gj&;zjeWOkSAhBA_(9jW+ zx?etI27#P*rz7ZhIRLwZh#%7ni5F!$=6c+VQjWP!S!O^mq$xZ=#Ktm^7Y#28L#w=U z#pwb`tf(U3@%pU&DKAQT0-@FSc5i?8olWmmwGJf`fB!p4_e}m#tAxX|?nqVD$&(Ug zmO71P9`mCxe)F5(+<()`b+!iY52IJm);YWF1t5m77W-h*A!Qv$`zuoDMWTgBk zRIEZCg`?ar2-x+YW+((dibAVQB+TA;0BxF+VXrUY52pMm8GaBW$`y+wp){2Q4Y1j4 ztj%7n(&b8vcmpA?#}x#AfDt2W{UKdn4`idfIX!~K23 zRaL`iLSPzpz<9)KaF|wQ<>X?8Vs)SI55si+;tO?dVUdDZ&oUW5j&8rbm*JGD8*(}o z3XDQVtAG;)t6+`aIXS0g|pyUod!e8=VLOF@2(P}F#$0u zn0!*n9R_rECcp#4dPc58%HbBr9iv_-kmya!K#P#3dRQ1RG0p%0P;_uKO_R<$nYZY1c?|a=H zh4V#!LTP|a)E~4G$hWmCCJTiVm#)QFX{jX67K$ViRjw+#XrhWL(MllILP2YPe@ZEi z&{93J8~aC0R~mqttM5QJT?u5FbHo%1*{x2DWo%~e!YI~b6t`g%w_y~w5zZd)At|w| z7Y1bK$wMD~^ub5(z5Brj^pfQNlDzxjk;6wTkDsWlJXzHR_I+Mki~)LGE)K`y^x~bv z!{H1%J!H-=w^x6?fB(Ky^_R``1WnEdEzONBy6(<4**=?DZ?wBTqmw_|nu`@^Ht9M$ z+S@)u$Xg9!RQ%VGwzgw`ufV%EkGHi|zKwSZ#Zm--U*`P!{`>F0?^jEK^g%e7o?vr7 zK6}>8^78AZO-zhaV`kbBhc&9_vjFGkm!cWr?nH36^S;f;M^n_T@kpwc`OZfvl764w^ zX&F*!j$E3}WOI3Z7TPEYy)c`KF)PDzrB~-_ z3Fc}E=4u({Y6<3Q3Fa!_)!OLsbT?k9Zmz#{{`BWNYOd_qUensqU0q+>>ct+DYu*)w zEvKg>5|+J32Y2Oec(%U&dqu@Z-+&)Cxx;i0DHu6Efw@4f$CdUMZ!@z|MD zwVSE;w7H543!Kg~jXo~d+*EtU>Ad~|CSw{Ojc3Z2!sZkT#M~Edz4^{N%4W@*cfIn@ zzkU9Xin((pm9D#Q<(lX4h%ak%eK(y~spedlfElYPo1;=)yw&IHvuUPJpKQS{ou8SN zr&Oe=^Cx7@nO{D=ybO_y^65pn^3sel zO44}|I={cQ&d{T4sjV66Z@*l9uCk&1fTAWeYW z)4vXdI$J*9vgOSt-S}=!XiE?`%RyU$xVaIu%@ua!i%fN=&(^uE4VM}k&Yh|{`p(}D z9l0%3`>&%3E8B5j#ldv4*=lyM{gS+^qL&tDBni zCgX_wE&8>8obGqC0UpUwSJz&=aHU(H1_^LN=kxU(g9I3jDiCkZp_PfUiJH>#iHdOI z`IRf?%$_%U<{a5FN{ePX|Hb{&9VIzz3J(xobYuC%iABl~^0PBJE(ClgX>%CiKqQLP5?E59!05U23Z945_3H)R4}>?ioqO;{t51PMaYRaM>YOEPZCO zRgX!Rd{DJWGlu5R)i8nTM|B*VZSP*4Zu|D;=J(#C06R8^VMA6`?ML36nNAghB!Dm` ziz5;(&DghZN%C>VfhxYDd-ZB(`SIh_w`KQO;yU52OQ`LPTkho+Tvq9q`OpjoWr1)t zs(1#2a*tXJ(QzV21qjEoHpAG!^1YS0n=_YTZiea`ca@f zo%LVQG`*|dh@==}{XuAEho}#=fOv9aF6f2s7*DZ(e{U>z)v8r_v7Y_39EAm;mBM=p z=jqrNdj+1igkXa4#h$|FPkCrLSU`)eny<@z5tc|HVnx;#8cv)rXx36$X#%LI!Zana zZ_?9ba3aalBqRz2tbzzm*N6{)_Mt|hmX}0LQNQ08c{!^YO_l~bnlEF3SDM>l`L;J- zhSGQi;7aIuz&4QW66Fd^CSNE9_hu~QH;v8iI7;p2@2r`;AEQ^maO@)C`5&X1-fqK+ z`7ydx_6?r^?En$`VcKyRE-%enF@aEP#ba0L#eZxCD8`?%;vuqZ@WpxX=2;8YLnytj z&GDtEry%t6tzBEl0trtTSems-hEPQ3{U{+NYT1tyQ_G6NX+h`x7)h@Tcbx)RbAF7h z9(QzH9%X_TNzJP)(DrK0D@t(s^u>#Ht*ygQ?|D$EP`-n#nAcu|^I3>q)Zb`%@WbA8 z(;Ab3Cfj8NPoa=P8c@H|^5Ax@tURMo&}fG#m8PYqM=3etYzWJ91RPWVJ$n|3Y@MC$ z$j{*sKS_cK4@t@=T13_`o}>s*@~xiJ+S*aujp&Y2)>l_w4@ADi{P{ zW+IbNE93yr1vuH8ZvyWlQLI{X``zo;g9WkD6|_`FrE?q?arywUAcGxapd-n){c(e-q!YN1p7~(>5aFv?B0#$ zYq@57Z)aQAAnFud`d3IWzfMJH|+IdQWadk$JpCFa?|S7*gpdS zo=}25-wE%i5mgvQy+XKiGi8CEKme7&MzMd!-445h#2MnG5hb|K`Fb@Ql431o|vGilOnM0%#=ioxyhPWtqnaFgns&WI4R zCr)?+q@xyJ#0>f9X-Of&jerJ^jqF+x2Qcn9MSSdRh8IbrF%DahhG;N57!+Vjk#Q)o zHVYLzff03+osN`pC5WVGBnWvPt&l}Ay?&3&i>|jANd@mnqv?-)=DXchxo4knN@W=+RxfoX(v)@o_bnfzu68&k78N zusky}jyboElF~S4#4;h=+)j(l6AU;EbYYK!o2h#LofF+j%(A)G=bJuVq4@MuEVoZF zuirFcUJ-sJ^V){AkxrWk``^Iol`9Q~g)Ga6!4>91@D)zZ0@UE~CvPl8#^;b?kwk*E z7=G}Fn49y60n`-6iUy`>3*zxn?5Qs7yfKPo0E@#Av=N3tA{H=0ZhxFE z1PCu280xp1tq_=&LD=c2`QFtf67lSS01VmSDk}&A`KQ;DEEk6r-eY|iQn^AQ#udAH z*b+(t5j^IbP-Epy)Qm-ON&e|X4LYhpf5N|%8YS#)IN#ic+_c)d_k>a;R&&H=smzQ* z-*Wln`uf;MAHDo?EEWvD@(T5;Ryw2EV7TwT=Jxg(iVPbO(^v0BLb~X3mNYL_KKmlj zc)xo>s8Q_Lp|SZ40tF?EW?omwo?BR0s<}}ZgKEJ{M1cCjXT)L#)Z6XpIduyCbvxVI z5Y&;H6QNLMW-t;lhd2~?_v${Qxw_A2fWU>WNbG4ufyw8l#v$m9afYzj)YQKxkebaH zg-m8b*&O+;J)e9+h7bt+yI5cS-I_tFTOr?{QR+R_I!oc z2_R_-QTxT0OokxuY{-ZPoTGIwSpwM8!8TsFjg541O>a~p+4A>2H6viF?&=dq_DRy; zfB(R?Prv$P%Nu|B>tFx68y%?%k@oJl+dF|@83-zoOF8exLW5z&3NANFZtn836m<4& z3$pm4-~Y#kTjwEJf6=;o9^R-FKL7mvk3I3w?^goX{SK15HjyvJQ_z$uw1qxj0&eJ1 zh)9jdi;5U^7p)4#sZ)A=AO)JFD7&0!UEhp0B=p;>gsqH zv+^)z<#Ei)!yMf0?RZxWjcqNY$TQ1CWXau0J19=f7-s0Pz`u*?! z@cc3)w7x<8MfsmEzWCz$#qbshr!2YeH@|!FR}WDyY182zjBw#%#D{S_Hk%F}>XSp- z3<6C;!I7A6$&!454_knZBgt7pm#1Ya+1S095{X!uSEy2m%GQ<5wuKTeEm?gF)MZq!frwmA<_-)Bz-WILP(eNSy-58Zg~Ec zQ*MXu%#r=OkWPK5u8*{9_x4=`sHhsHQ~`y~5c>5O55V8=vX5Y=>m9Pi#ERp`d)?@B zKAG?7Jx&W3%*n>2iiEB;w-S40Gxb{LA2)9N<*$DAt4D5LI1!s~>B4o$ioTQjwYJFT zbNKkm(uoQM9O?)@6&4p}@k73)OG~5?+&&^LSxT2A*-6X=n>X5Bd+0=WOu!T3CVWN; zsV`sw+!c(Y&Qwt><{b@j)v8EjXvjhq69W230{}`9d9_I>7~u$A(%I04$!a*&V-tP7 z`-}Z&A=av^FC+N~3SDzk1JL!2NWv+CTc7Rs4%USb{`c8HID_8kiD1z?UBu(=b2&%H zW+r9dXxOlAVBbL6qoyya?^aftOotBjp<6i5jCBQan`thG01OyRPTE|>;s2`PC{`Gj zD}W)==@N-GYo4W6X?ec0=guhc!%L74FX1^}f_!)h&oRHIzK^VEh+-s+jda!3TsVrwvKt67g8DjeuBHm{ z@>B4C4-K`{oc;Riug}yFRA3|ZrgomeFgyY@BFf3=`%DP=VfVJ1kYnI*x<}0@*$Zs_ z)$Be4N^4l{h(X{C%P2j+Zj4S z$(g6cS`@Ba1nbgg*xuYNO2?iYFsTJ%~{piQ>YAAuoT@q)9yQ z&;^|?AS^&malX*6qvy{rQ-ZvF5sybuM(JhqS3msRbI(2b?6Xfj^ZfJAKe}dq9(L%0 z`S&0f?Ed-1P-cqe--Xb`I)HlSq0S9z1~HlF{K{eT(jZtQs4>StSOzaD?3*{QKoUkU zC@j&;D=6T32eHBiy}SZ?K1z&$HN#L=z1ZRsP-9Ub3#thxfewH?CdubUsVb<#cp+gO z9|1;Q}X3S~}a2_ysU(^UUv;lmUvX2_u z`i2p2Xl-n`bne`x>S3pyV7c^d=jyJU!#;GTuA#Q-Sk;9~%`o&0UD$}GB7+g=_-;t) zW}q=@@wj>zLpFoOX6PRT)C~rcndFpWD}wPvPiq{aGSnY^u)lE_3zI7lrIBiKnViA% z7Y}`eo#@ND4l5^(7aeFmfixYm6-`Y~=R)kYL5x+|q|$V#2pKX_%rc}m4v+ZL^7GT{ z>IP%{62LujIF34c+6_~rN$NMxt)DY74Wganwb@9GU#?V@ef`g|4gF&I+*0J?3G?UO z4%u}Z_MxdB52eh?ng)#Ani3#2h*kFjv$FP4=|)W7x={N2_yr9WGe!*^mOam zdpqGM=;$C;B(P0@Z;s-pL0G1Juv&a^7GNbo??|^EG0HG12w|rJ2S+0z&s826-;Hj+ zx*OdBSyR&0--+$#%rUF(%ArHGgF{2aNJA_N(M3eD3b+K3SwSc1p9^)clPx~BG8D4ZQ%2`10>N2NmjUk zyrWARyIQ~;I?I)l?}Xy)un|3%p+~fK=pbr3I&|2JIXVhV?eMutJrPFS z?`I`E))8|cU^de7oSbwa?;jsw@oe4t51uRs8G=$i(~dGRHef~~q)vpCmb=}71oIaU zKK9sSzqtPw%!nK5HB!DPdqQz>j);jxwTtivjo|tIHpcx@o!JM%+Ahf5->m$gm?s#^o6qum`EA*Xs^MfXf~vj=BiP*ZH*oe$o57rVH3CD36gTa~6@djd8W#kQk% zX)_xi;GxEN;V15`Z_&5!WJle8+#A&z{&ARnVb&n-&Ga)Lx{JNCx0oQ$_-u}xZ3}wH zy-{eaneFy^ue~?P85e`k(65t|O5b2L{oj3N+r0JSYKkyI?E;}3xBjNDkvHq<~l7D9V628RPvyY=-MYuo8(2~9qV<^}_$jS8k zDdww;SD82VQ_E2EMnQedv?0*NVsMxaJH^o(sTe*L9qrt8Tf=eFiXNi z_rR2InS+@meMC6;9ISu*M=%oEr<=8K<>GJ`IpmQ;i+1xSk{SQQSG-S7sU(MWsD_$q3oF#`R$U#XC7k}Ui=C|_!N?xVz2JPm}c#qyN|2}{JIzQDq zrixtj@Ywkn^uM3~ule3fPEAb++QO0!n>d{R$tOyELTbLReIo7;aTGyjyP_yE0%#%2$l&aK2Ht`a=_64|D`WY?wVqjKah7NSo%rSAVB&LaU#oSjDwpI-YZ|0macU;d`1{u^@h zUy*~H9QIxFWZ%EyFgYb9u^%JvxL1GtmSg^1#{ZR^VkC#uS{c3i<4;_%gPig;IXp@Z zVsgOe{=^lSw5wO7o+^#}h}G~DSCDCk3mJ!$Y`l@Y`)O(V-?`%ZGm;YTpW)8Lxr+Pe zlEap3XYu>=#UyKCoZ;{iIb@RqNbwURNzKS9a^A1Wfk_Tu{NxoMkyC2OVHG({A%|=E z@%vBpCOM^&98%&nPTuYR$i_VH6p8;wpYg9NQfunk zryA!IE=9B;CmF(PIG~ygdCMhllYhQuBtLKs;a^mtA=hyJyVnqTa`kiA-@As?IU>Ev zNM_a5I!6>6x$~!ifBL=l)lKZ!8~;VUp?y`$TD_WDt$mffYSq%Eh#S0`dVA-cc>BKg z)iHTHj;|s0cSx?@Y>YFD!Gv80XAdmEB!drcCZ7?7`wArxwP~@KkP5-f3{!%b*PGN5 zGm)W1LxHTm2rS!h-x(OTr~15bmwEeY;F_y}Dg+#O#M%Q7NuM>G+_|&f%0q1mzO{ZQ zomDD@d7oCg7_EaAl`7z@QIsx3?$ms0tyY^Mkt$&zE2Uy;(IO2>!43_@rG<;=EH2tr z2T<-7rksS!gBgh$!?0&K2=uAo>;h6Yif%bfwpc8nNDMYW@x^qO)e0Yj&5AUq0h<-> zVt}aNI<{EQ z1x}n~@CFr82kOvib7CLXzgDvs2w1jP(RxojVu*A>8{2f9wcS6JEQf>XrR zbq)^2^UWsxWne~f5Y@N#gIBCM)_8mvXF)d%T{$K02>R1hY(3wBVk5?wMaC z#h|1`b8o%%);W{X)iWTwwOL3h2Hi5Hd}`UUVks(Y^hTt`%LHOoCcuFlnD-o^3@8bh zw+I&rg#vgP1wjFdsAOt$;PInCz9=e9puQz3!v#y9;_^^mM~sS3PLLCTY>;i#ISLQ_ zs59kHH7CLYRKHe9BIO7TA{3hd8U!vv+**s+eKf>mk9nDu;NDDdFYz)f!Es7(oB);K zN9JgN4>6LZ(cqv zP^qL+F`sXT=3K!7Wo1?A!?FMb6a+|{`x=(j?AdLceNAD>=`PnW_E)u*R8 zoC(*loXAWZMiJqUw;Z%dBo&n96=_+-A_E!Opm7rA#kJ1;pOLkzH8U!+`c{rgM4C) zn-1q=PaqE!{O78jt5#iqJ=WkR_2OJ<5J(iKGs~K>poXe1o*QzJh*Ycx73MGvh?F6< z+<|a{KS`H|qa$_AC3)P09WKjG5*;(a6KU~zGO`-4wCLU83TM*1 zNyGxgX&G#^-VvwE;E0yVl;{dYaE+8yTgGBJ;Rv)qqOnn_6K&(!Jc>*F$>M?n6%W&i zyk15eX{$`6Y0%~J5T_tJGcQ{VFBU&9LoUxCjdas;W=$x}&d)_Dc?HBeTI)cnvn5oN zxlcX!QF#IvN{?&2V*DM&Tn!kHvCVu3I-1Go6t&c?yC%a^ z3P#m=s=UJ73{H$L>M;bl;*Z`wtcxSi;%T@vJUkhI-yJ*t@sF+B5NJHq1Sg#aT{=WN*1==c_^EhpFfg0XW^nUweaO4zD&Z52^AS)7T6{~mydvO zRv=JXnw_1Vg=tpuAl}p2Y^&qBg)Ne(BeILsk4r>J>W|D%#(i3IVnlw?SCOf+b zjU=$GO`4iLb7nM(Nf+OB*G&^3OcW|Q9ci2)o6U|1KxDQb&h8Fm{%QrxfmTp3#9&{g zeKA(t1gy9Tp#69SO8M}xgu_vEcT1%r2vRni3TPQ~%e4?Rg;S=?rDjimMJ|z)4Gjr) z?fUp*4u{D^=Gh)?TG8d!*2Rmf>*|USl!Vw7==CFIWxRRwR;|*)iFwma*bmao1^M~S z%{Z8;2;|OYc`_+N>jpl*tPE`jP+AiRYep!k37|Zz1Q3yUN03S(*hi|=Gua`=pfgs8@K9Z~BNG3N+C6Zsn zzxrxy9N0YeXvJh4S4UwsZ!N+c^*U<_h1hSUf`=mfJ_noM?gB$L-Z z|JA4WdN%`28fNF`>rvroXb3ek$DW>>dUnCpXCIGpti?Qh8*>MOy6(z_3++}9@%P8t zPG73osn_p6ak#b%PEW)Q91dzwt4Jn=g)?S=kpe(ALv%UMZqrKmARwu|Bozxes2W<* zHW*-WKK|%%+bALm=AP=)l|uIJ-Cuw9>HdQUzuJy8xUZ{WixwfhFB}MTAaL965KWjc zA)P(i+B#)QW1|jHn|`wujkV~K>(oJ`ebM~sQ}V&%rPp6qJ||*ay*dl)=9b?+{|IW5 ztzAeh))q0DveFqd%4W=%QIsu*4%B-Ri@{_%bH;$Iw;WBLlFOt^1VTP4cF3fB(&-|D zBMpc6sx3~1%nYmmd+@cJCVxi4>MK3wW7F?9@>sC0B9MW3qd4i zBp9&uwjv;Nsn(E4IO2VMs*DU#%V(dTf-=zF0hbJtq+F<&6~ZPG_aRx=oxTkB3Wc71 zb`d0J-0P3TQAj?;QNI93J&4uu3(U%c;HU?|QB&P*4TGSyq2J~AIxK#4sXz+KAWC(U z;FHblhVR;oOjVNwiWoJd6~|&3Q#5gh7lM$DV!mluvJ|4VSebxjxMO}lI-XRZoSMh( zr>C|w>M5Myu5YQ{{qCC)rlY-b-`<^{e6jPh58pwj$W{Y3v%V{p+y1$$s-F5vdmZLJ zEz(jRn7CG&W*!ENek!9pEGQY zWEJ0Z)5@85Kl0Ems76$z;z1u^DzmP;VacjV)CBBAf{eTz2~iKEQ3UQl)tZ3;J2nE5 zLJdkxRf`w|gB0T^rqWZ>3UUaLUXUddDGMg#D#fzQSdzn%7ix+iAoJ5ykk8o)36~K^ zyfIgM9h>bLv9aVr5F!Z4Q05}A;miqb~V&w>mtKzDHl z7kPGJi^a)lKVN4d%4`bGnTYd6>?BnA5Ft!77-glSVg`c)j^|?gTf1h>4Okv>c9@5y zEFfhx5+a3!FS$C~t1#O)VYXLcw!f`ECq}jhESZ=zL!6QB6HAe0qUOs0;*s&8ba|Lm zh8CM9iw_`xUyH~g`IA`4#V!hvHV7vX@B%IS^ZnO8GifAIZ+YMMzTcgb>^bMmu4}Ko z_TFo)y;ehM`iD3k7L=CO-hPgx*JUlrS6^jRwqYc;m7n@bhUT<^Ixx3>@c4$Nn5!>? zk?+Ydp_olVW5!ONJb5geWy6ub`W8z_FeH~#r;dlndE9U>=jhRK11*4qC4AUuIjR97 z`Nl@Ch=Q!tt-E225rD00X~jr_U>D1`238FV?-Go7n@7#J1spwEguZEOZ-t>ztVe(& zjCN3!F^pb!PcIR_3L$SJ1NEal*MBNrdgr*@+)0E>O>a!N|! z^~3~6CfMTDHo!43A7=r8d;lOH0LTYmoWZFQGs52q+2|^VpE#@NB}w4+r5HU&jIdaC z?tBXjCaZQ-TwGe(h!kVlkt3z0At9H*^`KRI;q2J~1J0(1p;1vb4Q4FNCsq;XpKPo- z#fa0ffPuvy_-7R>&YiQdMs@Y>)@XSGHR#Z2sUxkdM&=U{0p~>Z7}gE67&!yG^A%Wf z)L=3Ou$t<~(m^9X6Wv$`LkDNivl81x*zapsbVi-V(Nxc^>}w#ks%vtPwSA?%6+81* zCuwgePC8X7Y4uo;>9vsjO|EDq5)l-FBlPH3kLHT315ktB_T6{i6Pbpc*+gJ{eKkz}RFqV}G+ZXA0@lv-1l5M>`c`G*# z(l+kdQ_`RVV(A)6_Q;dN0u(f(3ED6WZB+ciVkTd9*=3WaPMwl8fByU_F<}x{xF3Uq zqm#nSph8f-pEhmU2sY7X8?W~?8B91DYl0K)5FmYszrU}^49PK+r}Qi~c3_|$fP_b^ z*pLuEE%)T9^$(FJL!S}v>tP723Xr62EV(8ydU%jKDpUn_eh#2^xmDSsV zu?v<2SVo7cuz^&zTcNb07+}=WhI7A?7MumBKjAGxPf0>vd=71sRMnRWXTs$japVr0ESAkUZX&Jrv)ctiMnW& zb}pNM@kobaG=i$2x{8w-y}aNX0*wOM_fqkk^jU5No|C@+ZNPJ`&%)2jmK2m%HZ<44 z+Jg?qVeqLtS7i>;(703TpoayK?|ESM**eT9Bih@c$=6ObYXCKb=Sw4`^d1@}X(~=+ z3;x~^9>P#i0N$Cf=8;F8Ds1-){_4}6X=P2IwH{hgZvlpBO49O=e}h@&`)qh(@nGk3 zGvH-?{Siznw&5-4lSAMW{nP+MXIMFVY1*nXPo|&RyW@CMSP0v@H?3NU1nzk>7w(lu z`AO$1s%K0ZVw7MXH>c+K?tNK+WJP7IX~=j?ESC&|?qq3EezQ4p{P^)Pm{~@&wV9)a z3>gWY$TbV6Pag%c>~GeaETK`c5niI;w4Q$6M!%?m zVZl1Mcma^wJ^Wgl%$Qw#btb>Cfss+c-a0^rrk4@ubL?M53?Y*#G}IG44nri3ZO*n< zzaaxL9Yc`?=l5y})=dsrnMH3ZKse?Hx0$})jWH8O5Z^|Nv#GEQF9=Nyuxs1kkhD(n z7UIAZgA!VCNW2YwRF?Mr_djE|UG325J+w|oyGE;=tsyI2E!EI1sBLU%cXr?Fbh9#R z@{ho@;vcGGCP9ia>d9n+(@fiBo!K`gCdQ8@8BuI&YH4csumx6D*5Is3ITTZ2`>Cm} z4%;CdKsU(Ib>$hDSPI(T(`(Akn_(LOy{xUvh5B!9odGKY1Ev{0_NV#h*qB>wfv34a z8<;uWshC%;y6egauZ0T+)HXMuGd%|m!*n%lAed3$?aZ?2 z2M!z-1F9Tj3xt_h@coL<)>>dwrCuS8HZQT^oE+U$Pc}>K$2|Sw#|>)*n-A@RZVJW~ zRKL)=CYoo^T{^Mu#1>oNm@xyeF!a)DHJCl&;v00zpoPw1bU}3~=9a?T3kSEp`OYux z7tSNs%8D9n?Q1GZp>|e6GY#x0ofjlsvki?70MH`CLN(5Y2F$`(cVHH7aLw|5k=^wm z{2~RHK7m#P$vzewn6E@d(+C|TrV0Al`(%OU$H09M?4KQ87aOdJsg0{ZYBy50Y89o3^vd_tr zY3Zl@gFqw(2Kb*kc@lgxZ%lp|OYpF4@PZEmCdtsD4vHK+I2r^X6yFiPU>kb3H#WoW zy0fKS<7LK#ZNM2%i`i%hivebZMTQx?ED*1HX|yH}qcPAI*aEUEsq=WxR&lMQ*7IaR z8pj0dDuAd}&D5&)GJBom1MNFzFiAD%pd%FSOXtCY!*^}}3N8C)7q!Y#U)g5#fO){Q za*fg8X_0+O3NwLmg(WeAp`|rsNDQ?~Fq+^%2g;Xl%Yp5%Ap5i#yoe&VwF|_*)wfft zq~L*G?RAZk#9BbO%xJqoN0wQcA&h%`3kpN4yp2#7(|G$Cjb?9yM$*ETK3^euxv(HA z5?s+p>}@&Lg7qRvUw!-O-xHK;HElIe`_X7(GPY!CgiWs$0Bz>RV^tA9_m*d$efAc} z6xP~2nwkPe%!NwT+z|md3L_YG$p#&ayg&yP{5cPp5WzWEKR+*%9*!N%g_T(ARl*=m zW@Zj7ox+HjUf{>35UoqS$fJV=IPL`;hjtFrZrJ+W*O1h*8Tga6p%eF}LzE^p)Ydl0 zn)5z8->FGBg=^sY3X$4`osw| z`q*Iz+P%ogVHH`oq^jWv5j-Mj$@z(H>{zJs!oaI&(;fWi<1coXYi!#0->=L%aY(jl z{l_FFCEZ|4!YK({;x%U8cHNJ_aBWbuP5Y-m4J`lhN7<5_o1KNRkp}bJ85#LSXsZ_; z&I<^j4I}@Ojk|Y$_d1Su+jj3xQtqHjeBpbvaJ0>9@suf3CWPax^v^&0PhY&))iNzw zrbo;4Xc=E+p=CBmxl%}fhmDYJkV3ISd~rE-Mi)8X#i?2-)kZbdY&BINy~^u1)k%s1 z2?v!0kp`8;wSVgRO;t##4ydW%bAnRo=@sh0$FsKAc9l|;CN-6pnyQRmK^)sps&b05 zLroQ=rt+p&t$m~t8NraNSZz^L!Pz0z5YR|*apHTgUn6C^UQIJVO~aKo(6oN)_m8Er zQ(4>8vSzAf`O~Xo{gzcgQNB=9#j2^esz7tUsRAffp_*!znyQ*!L1U_)c4bkNBWfzx zZ6^qFXrRD;Qk77ulWMAoYN~pARori?|H*bKRGk8~hQVqLYw1;A_@rfoPh7PmS4|b7 zros^Eu8DW?m~>JUd$+Q9DyT*D1H&IF)wgP@7PYKsdIhDce0Q)_R;qf;A6TiPNB@Q{2To&t1nr~M13Q98kU}FK zF)1;Nx~J(q_2h1h&zdz<%cWF)u(0Pj6D58CSCT?mXRfq&&OT@m}O~wX2So;a*a6jfX*CnOL~Q`rP} zZvEEp7FYeoSreQzT31*Z>bD9xe(b8>eCRMz{T5QrDiqy0Qm+scp;?VyDNj)DM=Nio z9Csur(~x61&ru9|lqImgs!;TXo)>ce0l2jAk(Y`o;((iw*PA>qaVTN-DvF~|U{));61Q4tfOqYSj&{07NCL%IM~J&G_y~E! z)wfGwOO4=m8(ZS8pF6*9BZS#u4-oa5Liue=P-dZTt9g6bOt^=&Ca^En-t|vlZ{cYx z<$CNcjOHE~W9m6Z;O$KsJAj0hN3)o1D>eqK((gPG)mrA zTVDGo_5kIU{V2P>x0e4au*VD7Z2{$?GaW`?w~pYzTY*7v z_%>ptRfvBKz_cdN!lZDeRdagjBhVU#D&67Ru7iiqRUFH|M!s8+??=e@E#&(J@*Vi~ zH=n_I#Jit<{qxQqtgw2|tKL2_nP9VLr>2_s;G7%0GV_nxsJWo)@Dbtio zUDpg{Jl>2|E^}R@@ia+^aa~imljJ9rCsEfDn;xc`T1{CMj$}_qMV1LC#EOfnOj(ZP z7)O?=s#vS_2#a$VLn9+2LyeBOFb}xg^@wxuXGffeW>x67N8nMZurUAdx38yQrzEd< zyf$VpwPRn0 zkhMwXZQIhB-3uBkUUrtgZPmbvjGbT~ZrGJs(H;Vh&(M&ziZcY{GZhZ-^a7kfdEIk& z;`HJTcU_L9#mnpy_7gkdx^}aV@g`Zi51&h6g6U=UA^X8~eX0!)zx{S}wf?>L*6%)g zG^Z|ESC@12XzG{m*@nYP#x^K%e0TI{36#TphmISUbmLuj-F0KqxN(Dhw0GSFFHQrn zyA2yR?((}<4I4j{1d)RW4I4je9?WOYzAS0{f?4=193M%noB_-3y&bk%@4n-{WeZsf zPW!BIU8`X@aT$97@6_u>k7c)FqkqpW*I&<7HNUj!!@|M?%|1E>4xn{D=0Ng38*BDy zN{(srF~>ros;#lQrj&bZF0H9|@VHSp@|uZEOm{#Dmrz2GW% zDPMfm6<05Sj^&jLuD$^#e6P9Y78scLZ~VXDx?X{Q)mP!@_*I~}dc6YENA%`p*G0d* z$zF3`nui~L>M0CzASe`bAaOG2lA+S$1cwxrsmF7&C;XJuLypq>uNwZVs*?)Op-jLT zF6d9e;3KpNX?^sougmooYRLpNyVi20`Jw!U-7^j12A8`ba?WD zCKJCD%d9Q~bw}R=b>?pxsQnCNP3W(Vka3Z zcymM%)XRAYPdjHj*SM}9x{!k9|2!!e_&-Q1?(Rt|n4SFs-`XF$uFY`UZD;j@w=mFk zSy&z39B12H*Ks`62%hT28Z?~NB#=1aVrnz<_fVVOsFBCNi5%%%Ps~JbJz6= zTaP#U0g?l*>oD7gH^=Z!y-wii!wyHyA81?z4N{rD2-?iwM9}&nc$(ix@tyH7P>aQ*9^aGv{TCmiNr)vSzFxvp|{ zjyVJa=4+$tGGmG95Pa}Xz5I}(gq?L=MXm`;81-+QuwwXx)z6Ifzhy$Zg_Q%^<*dMU z5!}mJ5&qA+t}<54-;^@-qTkN3O84cS5hwq9X2jnn%H4Y5C-8w>rGsrjuP0)1m`i7aEGe|sbw>qA@Kag$#-M-_N6=b ziUejw?yKCnZ%%?5Md=OJ|Nerm|y_E1r=T9i%`feq}bt@s)UBa9Mb_6Ay zcb9N-4)nacl%PWi7oFw?f05)opCL$4kLQo1=ix574_Wn$08}FYRk!_T!UrH@U6UrtaS5>90$DD!|QciW(Gq$u<+OHZ)s`_Y5Kpx6Yq7uC!p z1JA+1^e;n2BeS5QJoNyx*!0!a0(79EeQvBO^$Qs`8$d+tZ4fH_di2^qNs&pmFxfZq}vnNcRKHL-nUK;o}37sf)70pwT&us|1;0? z)it$M71igetIl7@_G}kKXyB>;sB8!vVYVAXhC#qK%+L(Yv}QvH)V)G{1SmHMK2cDP zje-yDL7O-4I(!B?7iSLd+T7Q@Iu;4i7hxLNPEk!V&>+U`UtEDI+i<7peztoH#koKM zmw6wjI~)n;kR}|ss6P{w2N4H!i-&Ztnu1tlOX*%tcPt2`h*TI6Fx*zUg3dHl5=rx* z(t)-S=0w_3#AdSeB6zX{!9a$1kIAfcMYz z3<+&#{eCaJ2SloP8iO_kM~W<@E1rr_z7cT{))P8qf~Y%i`oNLzcE2lRWXu9X`F32l zSZ5(ztHddSHfmn^kC{11$^zv%WxnT};vEt6zvI(ScQE0JMTzWFc1Y`UZq{A#aY@(` z!UntN0pBsD^e;w7i%?zT#YozgiiR3vp^ZIB7`)cnS$tb{V~wG;LRVPmWio-&&3m8l z<)Yq)ER}lU848652u6D-=qkm&cr@>zDEla=>bSelg<~X@aZ9sE;zsk9WY0gHB||kH zr+}o9)3+XS-@p0!jgc}7v#~H)vKPQwF4Sr;7fKeNkU-bHT6cG@ZVOC2*Nq#$ST9RN zOl8SG>ea7hiD;*cW5w`Z%AS-ZqL#AMo_)qCOGG1OsU&uGCEkECs^hNLe82wY^UvM+ zY6iUYt~=im{)u2!eBjP^$h?K|?tEvKYUgy#HwaJwcantxSNs75e?UQ40Z%;7XslmS z_IlSGuuT?rpGs5`l|M+$173Jp*1O0*nsNIBiC7+G3X@UnCib1IcM<<)S#KIO$!o~G ztA@&Y7wNB-^*0V5vox%vxIDQ%agJ z`nYiPec{+JxReov$&-bsfkNC+p`wc8O(@cjkJR8FHu}((;gNzT)bs>slEP#s2(?E} zZca{KK_1RlE#4J~dEngC3i_!}h&coDK161}@T+^eyq$T3kL?CSduId#i6BS}Hq)YI%s#*Z`svNu> z0&6HPXw%7cI!6)=Xm^}khelmKOv_;^z`8~O_OCQTOS(e1iWuy`#8!`41+~WG zhj6?TGklOpCc`dhbg~h`n1Uj^9S2sBUcks$2_we7K5);76SwYH%&GyNsiVA8Q_TwE zDr0G}7$Ls}6_)>jm|bTXS4JOo&9alm^rRQA*mjCNu4@u2Q3rQyD?3ELU2g*%-R}uw ztq_Nz{+Ro^&y?Nk(@beQ0|Px$Kyv5aRz3j8;}j}>#;I@{6ZxFbl*u4aTBJ0uO zeqx@a=sXR;t-@v|Fyp~U4gMvZ(iibB41{$7`f#0WU@5G+(a6M~L|=k9z@C+VCO<0^ z9-+zlA9o6Uh72szS;&lxt)!~wNum!N2p2GTMa5=v;$D()R2NF%apT6Fy!FN#Pn@{q z{V%?N+;Qu+pAOLH=RG?&e~nXL8^77~eG!&1~!dnvFednavG5&!E{%gF$eCi#U#jRgIde`>tQ>Xqo zG(P^?Yj3~(=EaL}yyA*$Zd`gB4AVaF;QhDXJPYUhC(c^>yr7s%(MaQoOQ3~ai1-*qY{x1NU)WpH7iohan?ab`-oOo|?|nYsgc&(OxrahV z_Y6vX*i^b^XLpUeX*!ALVUG!Ac)?K*%BVmYr|EeUc1HMWgkGImV%PN}t?Jw}$rQWb z%>*S!Sipn>fx=!Uq`fcH0AGrQG8Om3&{mp5CPqq_6Gi+B79_z(r_<9H%tULvVO|>! ztMVYE%rCI#=ly{7LJCuonUKte?BBoda9T!A0nDcqWS>eqb^xx9cJI#3h1JlLhttwe z=jI+dbodxd>BIWot31&YyZh`@-Y4kHr^<0erkIKEBbN zKdJ4U9Ld-XOv9EiiA_U0rvcM^eK4ugXck~jlfJMQ@qyjb9Qc`l_|>k_G{YlMs?8f# zAv`q!?NU`+BRn>>fs^>vJZGvi6Qac1l;z6J%2mpAWw;V)o3BLTdpe$P#{0XK$%^Dm zbN*Ouc@VJSy zX3c`{r2}bc4pCAZttaIOw5F)I=-93wFkw;C?4(EV!>$9Ti;9Xt4U>c0+S}?bV0I_x z8gxp^>}#_4cz~kF5#AOrzqCT%@na?>499^5e1<1X8Z$1UH7m^zb5?S&^rsIXVq z7xoK#_`L$K{nLv4dV{?`hke`khX8xOKpbPFeygvl?hbq6dV~V+qk#A1%On_hZ_2V~ zWx)7+dIpL&z(x>AFXkm#axlkd3CaMFsR8UWnSNQgXV28Bn*p6xTTtoEIEcz7Ca}#bT-7!WWm)fqguGXF>#b$7H`TqovK;HU zqH?uJY5+NG6<}dktH|Eeir~;-EbIJ&EfzGz+#|;Sm+f(n+Z(#mOqFE1X(m=gKKA0` zOn96kva)mM@#128suh;Tn(G_es-*S+ji*VYv02%*^CA5h!P?mYP`lOaBlb1>i9u7y zHjhD12=`y(-D>tOD-?zb3GA8`R`wNfW>@8Lfw{xm6o@1jqv=6xyrpzb-GiTkC+k4lPRE> z!6#c`C0lghWDsa2DCWet;ZjSZq8SNNni65p$<3{3k;J?_Y88F*@&iO@8PVOBKwS#2hv*H944 z6qAeWX7S8iWLK9$uE{JGjSgpLBjG_%rN%myGe|UYFlgk6t~8R!RVh%i5ag~8LAF(*utlPpCI#>shoQ*LQgN1#J@@z6Yl%OmC;a|WG z5p5T<7}eOS@1&AMma}LafGlU`QK3YZVJ5GGN}6;Av;hY^4RAi~#i?ZX7#&oy&lV8@ zE-&hue69Jn#|n(T4bgcnfq!HSr}Z5 zKKdj2XcF)S8!{FFGxzwwMO<#?i6cio`wD(t_WX$Z!(gXCAv+9fWZdo9zUjn~eW|HM z<$L$0Zr$|B-#_1iA7q%kYiyk7{M30+iB_IaUQq5+{s8^b@yZC(;i%*_FTM8W-~PH9 z^zB)?tf+ai=kn#tAG!OFH!Z#6zaBp|d^S7C>FFD6t#J3BW`Rr9AdC~PNU_br>z^1j z^)z??;elmvZ}b5>!-|E#EeR|||EERcCf;@DLr;^_BV1N&kVpoUr2LsK-Scm}e&Eo0 zp!A=G%gU3KP^iQ%)vjCp(mQXgOj4GC4o{Qi5J9QRDQbj)FmZ4cXq$&uQ!9f-FCC0; zCc__&!>KjFjfyDdXW^`|!DC=dxVI>z*uwVh`*3?*sDIqp2uZ3eg{3loOZ1>nZy4P{ z>u^{!n6{gFd9~-7G*Wnwv%TneW>qPRTXgdbm6t$$a25LCdh`KV*rWA*#1YuzKX!2U zuI!9shYnTO!eQG_yT18iJ2*T?j=1_@_mi4TseAX97p11|J91*v_C4qq^nuqQ zoNSyqY0U7!aib>8c&{Omy~5UNmfwE;C6^~*l4=%0G=~lyLhWp`ge!z`tbuJ3{&e?j ztlHn;GuuJW;in$|uRE6B^vAm&LFcUS3VZFf*WO#d<-6jtgCD$pr#1-+j{vRi^N%(5-|)gp9SsWX#nRk?aDXad1>{!BxNZ;9V(Px2GHwL)*m|X`Wu+zc&Dt0 zfXT!1igUG1_TtJ57k>E}7DgJHJ$$9})ldrt&}~72=p7z2(8GX@c2++8b~M+SV1>Zp zSP_v5r)C>JdFS14)^FLfJ1b+?t`A>)ZT%;|=ptHMQ_-%F88dEP6vs-dGLILvJA=X{ zP1CvBygb_YyXGku%on?0P9B~kIB<__|KginKOH=Jw6^-tp<@}@yLKNuhQ0vIlLL@k zF?z#ipTLnoGI@tbg?O%%0L(*4LI`Vysr$<>x&HR$#5;e*yWzbV6Gp`i9zJH$O!Nmw z_0_9azxd|+AAj)ntLuiJnhh|{gue4V$`itWubQP?p?E;(v?)ah3*#Wwz54d%A2xja zIROkH_txI|`UeNgioe^k{yp>up*Y)%upHPfwljpM-)##<;($TX zUcN?WdvS#*hL0W^5W+Gxe6-uWmvYe>vVsV21}dJDGtk3uxc1|o!^e+*_1TdlxUalW zz$p-5E(dsqM1^~sU~>W(0EZhj-bOOfxPmphNQlX{`mcX`^R<`OBu5>dr;GrhxeNs7 zJ`kQKlxXKc=cnYijH9}96SPG=lq-Z)|2-Q&-UG_!8out;w?Fv!{Wo7k=d8e9 zfm0Cu(+}Nw*Tiv){sihGYza()3c(rnf#%U=5B%ZoSzsWpB5DK~1IW@|T&vj{?VsR%bYE#FyB38$os04A{EH1TR3ApoK`(aYUd&Os7ac3{IbBxb%f%DyhfLao!zloz9fx}Nbkbx=vcX@k6>x>tzB02oh*=ec|5I`x^2Vxf}Srddn`IT zSR;IU9?+FF(z(n}4(hfN?c|t<^7x^aA@N;z?R@=@btp`4HP?4rqjqpo;<326q=E(c zx%TtKOVOBJ>r+2?pRc|&VVEyG&sEc&!P!uK9=@7}C0ttVq6`1Q%+>$FOmzeL{}^VD z#EdlsG;JG-u>nVZ*Ccf&2)koUL^x*jL-ONY2#dr+{m5cQwoSry~ zZBR~w3ABMtl>OAbO%FCu$9_PtGU8!%VF?Kn56)7Sz_rHlBEaAA@ z{CUDeVIa%GIVmf9n|%tEk0Vg_n$I%vy&cbQWeyy>PnU`wvSFRSn^3rnDH<7E@A2J0MUu(A-#sRxv&l}CgZ zUP@AKQi8xrNCGP%NV!RP>4mwfjFs5}#2n1I`jGIa9nU2gcBY#iCvfysENQPNS z!@@m=OolvujH?Cju+) z&@95z1AX>7TWP#`tdC(}?AR%+4c0POuZD0m+%``MSB607wE%tcICia}&Qs1^nlI4T zU(9pvavp=YcRX~h9>>oM5N(KT8@PCJdHKT+KYaW3H;jsn9zL4g7_TfIGI;o?k%JYb zzML(#K{*%lXmrjsw#C9V!Ys&*53-|BJI-V0p!jXZJ|UT1gRzz^m?7+RLNliTzaB*_ zMVKW=5pl@RhrLvL=EGrJzIaiBpWuwgMVIMO<9`jkOK{AvoO%ot+vfAH?BCJ!GO=pPot z7OJ@jwqQXBA2Td^uqJ(PM6eJNvG?#=U@#G|(<1Dt0d9Nmy&7{P8FIwXGds3A6OJ+{&1^|t6CqejOM8G0idu|yaXKWOmi z#W&mmJbCzz8y1fqJSct)2IJ3sSy ziEIoTJv1s-O4}12CWMdbLzXUXfX8At%EokVRx=xQ!4qJW7Hp>cMqLoupY6FnRpsk_$awt% zlw;p~^UX0Opsp@p=#-f=XHFR!(5KAZ3W=gBc3WXjXJU+hY&uneL`W`E)>dlodSG+D3l_5zUfp2_!>!Ywjy_A-|B!i0N-u) zl;{)g*nm6Fx^QPH;Z82J=uYk1zMTe0ZeCFt#&KB@6i4#%bI3nx9wa;XY~TJ1;^pQ7 zcdW3suCh65+6YrL8c|V_{eeE@Vg&x}+hOEUWr%222<=zShsskN_5gdaAX@>>^?Dr2 zJi&7Pjuy1R@Mv4X(UT{S?s{+S+O_ZPIy#T#;i7MLoc9T8s_~DnlbM474 zr15#<=e&j(ELA$#8W2dXOc_-KtT@}I(-st9ZB>|i26p?-f4<`0bk#OknE41Z9|_j0(hs)(%aAbs(C|o=WDBqn*IEIeT~WCP-KMV z@bFNwz8Mkf=hs|RReR2oaFw^8ACSiC+f9f`h~jAFxjR~&s!eNEs@Wl3(*kE;85vin z4kU5iU{(%FJkZs|tqn^aRqpkl*#8?~BKFdk=)_)o^7q93ci@N~_}5jdPt_iVX{6yC z_gEFapz!X2FPMJ!QC~KwYwSML`l-=K0U2o<)U@Z+mmdE_TG!gX&vN6`BD&YvSAE&2 zmg|f5`eL*Np^tnq@_a#~_4YG4_=9;&Em`o_=w-eCkLA8Q%3*s_J1p(f6GLy5^%)OD zXWko)H^O)Ga1c(Iw>d$1MAqBOit&dtMcQ^rFSWJEdP@*o2>C#{=O{6ZQH2#Jo0kXJTkVadCYzgd8AMtg~e(f zzW%?GN3NYFz?GUGYk!38)s@oi!M7ui=kcb{AgHGkq$p_MZr*nT6jt@C`r}E0soMchgpp!+wB}s`=GT2%e62mfU zS%MP8mdPUEl)&1>+0S!mC9~uNXelNGDT)iL3eTX?$)dx<0hI9f4f8dk@#?c$dn%cH z#t|lVmYkP!s*E}VF&!Q#Ib&O*5(N{o)G^7z>eUIX0_Dzrn96-#?WJVl>8D|r3dI%{ zSDm5$LbY`bDAw0M%t-yFKJ)&kbD;hxmHbA~IZ$Y|GCkm4SeQ=hR-w_9Lg1e;Awkht z0l?%$Wi&gQqCr;_7bgfYR_c%BM0QjeouWstUzWpDK^?B5~>v#T=DI^j#ABJ2)PHSBu3v?M0=h zOQ~Sd#2V33i(jBVkV@vyyrl%+VUZdZ6RE-%R?7`~rkA+(x*i6GFVvo|v0EXJ#PW*y z(~At)eb2aH9655NG4iMHMnDNG**tYxkWvh962(f;v@>T`Fxd-!R!kb!2H9VmePBe1 zoNV^+^o1>M*ry8e_4M#qfpd?|&2SNN!OGR*lXY6P)MU{qDEt++#prVlYN$aUXy}9s z3lQw7HvVskgi{V+B>DC8M++=y0W|ONFL;@mKk+bJf07v+`I6F-VtY<@R(4LR)zbkV zks7T|iwd^FfVn@cYXf#xE3*n#MmHKM0Pm#ob-|BIk0V45Jgu4!a!}K%`7pD^;^|@X z@U-{`1P1s=_V~4zR%kD31=&?Ip%o^yg6#6arlAIF9q?i7rCF&~(E-(LK`RQ&Rtu|_ z@w?=wg{PAxdsZoa)xiV3#S|FH?>JN@BER8GgMSHpY7zg?j_3O?HORB)7J-uipcKKn zGCtXuc~?R;+Z&6#)^>?DVp@&V-s+Io_&8a+G;Z0lWlxTi+K)g;`}`4^N2R+izdX;Q zUb^G;ls4<2%0o*zrVm8`d9944`V$ppZ7GGH7Fn8Y%{ z&lj!{xEeQ|&rts~zPx5G2$gX7K)|lZ9;gO~464MKbbQjJ9W++ z%P3VBV5ndc_U=tou2rt(2`>VdS|=BvvN@PgCqagHL>&hZ`i%K@1&ap>qxZ-772-vG-apOq=#U0}G-sH1=ra*hy*l7-evo+E|Zs zu&V^$nC^6k)>lM#0)b6*(gSNk&=YVpwEO@-Y6|G6BxrH`N7OgMM-2SYRIXz1OMs8X zb>S8h&nvD3P~@{%BGo+%t-wh4IuvzsufU9im*w_MVh^R@L_3Z_QthBoq|_@5M5<;A zjFN)*FP0@LCP}#s1a!A(K_MyJhynXX=DB1dAD&Ay3p%}$3}@gyRUELDvQwDO6d_qp{Diqch<`Uiu!x`cYXb>_0PMXQF~c@ zi9M7ezQ7(y!QK5S;)}S`abSI5Nno;$8s+=3FK)f~+xINbb-uEqm9qE%docyU3l!k) ze)dX=TIXzg39oZ6-@MMy3Q2+c->yLv0mkBCAy=2Zegj}~k2L&I9b@wWzoo#C2Y?~- z0l)c7h#ACu_^gDpVDwEN{?+Y%5Dpa<7J#dh2gVNPpwThI?)RxyBX;7HSb+~Jqo61P z46)(MX6n3W?5~00^Kd$5UN|e>`u5vzZ!I1*>cols*r;6DBPupOmp=`{lN2>5KUaRy z`^~qu6tgg%B#aerc}w=d`=~J|PK=3-%FdTPV)L{46Vl`ZO{hE_)&_JUD>+zUbveh6 zAIqs@p3$R6kB;`NuJ-dY*VUM1kD5BOnV#S>*ISklc%!h9qoU2ME*CBWa_g8mdX(&e z_jQO>U5Ds+GMimbh&~7`MS=&142K4toy1L-djJK@9{@xFU~EPX4-O5Rsq>xwRROY507-2ynd?^oPz} zEdpE@f&L5w)aWmtn8{#a74hfd;*!d9RaivWi|tvbGqcWQk@Qy9(2x+50|liW&T|9? z5tXCGA;EW*L8I3RHc@f5Vc#p*BmrkFlO^Ih)1&XT_x<_drE{b`>)E2oExSH@CFSKW zWOr_QcWxlFhV{E!$1k|=>HDuwV4oPSfAFb$7ENP&*Y}pE@Y!FW9N_utw+E$}i`i#C zI3{0f-&2 z$3-iD+JVJ^dPIVHxOTXD9m83Cl7?W9Tx!SqU)!pP&~IUDlEA@UCQJ}7Wm}V+KP#)D zIQ=|JNpLoaXYRTyLAXr3>#j3+lA#Xy&2ei18<+qhJ$8}&yk^`3J{cL%0+uo|d>)W> z;AUjmOOdlyfE`n&vv_)$EZ9M~p4C1Kh*bCev@Z>ag#W}y0os#PHQ27VXC6yS12g_> zn`B^+FOvzP*~+!f&z;%IG3VzAY(4f=>OVUPak?-b=FS2Hy=vp0?A$NJd2V|-oa2G% ze8MI*GpGz;Rl~3riHe8-uXs%7OAhxyz-l63H3_hC+2Jwage}d2blTcTRmx32lb!js z5mf@dw`mN_1A8foK~t<~o#(D0j{e*M4S==u?5-!irA41DS_FgI)^5odtO&Upzz(r5 zgg;DA#B$K1h(#c4qY0+47(9ajrM(UA z4ts#^k~AAU1h$C~L?xRA%p-UMDO6jVhRsT1R14}EsBHjG zG0*UN(*vHU?{LfUlVVYw={4Pf2|y`Dab|H9{B9S=K)YrLxKfZ_TKalMdNAGQ%sngnFk^_hJgW=2m z2!hBF6Mp{r4);Pjcw%62$SktAPjwqUPIH!-_-oe?k2sjzG?SS{+0SdA{3e)6kbrj z#TQb)OHdvb{8MI%g6Dv^ORrwEVZ-)}2G)?_HV6brsOA#DY2Fswd+I%sq5N3vM~9^nmy zY8p)&n~GVOHitg`{+Nm}7hAk=;bS&?diwkO;gmW=YyQ~jL&WB5_VmKfW}36TJi&PS z$IDg69PK)Z@7ay-pMy5piQB5h3?%9dvVH?|wk>o*dq=YfZ$K?SlJz&U_$AJGC{{qU z4QmPMq=YqJ))xazcH*{bG08X`FmvON23Tm@>wd#2JDs14M9Fm4*nNJ^gPfLilrC$h zP8>gR>U0?f!=28KDzJxb3=63p;{EsDpU8Fy*fuzwH{aZ)IuApF`f%4uc|bx=wNfi4 zYm3S%%8CkP586t0)K1psp=0xMTzlbOA%i(o6$-eUqF>nNWE(hSw%|b9mIUPvS?^pj zlHEL#Aj4L}*GxPqC2isCZ_Mrl88=wL09Pdl?l&AVcLOr_0W$XiGWUWG-ivh7YrR-4Z$kBt%&F9mgn=oR;sNqrdJ`*QSyd>5yd)u~cS(M0{%DlZThNjB1=g*(J zpoxI5=#fLBLIgNBZV?4v-vI+6ybm2}ix@L}*w9!XX9b+WjR}XoPkt6UJQuoFWX+^V z2^=%!%(24-N(A|OJ$?H0$i^i zGlOdYN%5<%zi^?V>5@x~wK?{Z(uzhy*d<(-AD}ZB;TGHIWe6k_Ax4u@4KJ)MqORQp zhpExg(Xkj>_#rUZOu}w62OivLH@37iH-p#O)C?4?%(CZHf+bj6&)~Zk>+9Cm3(X81 z)5hkO`c`>7jarMPglw1;7FL%-Go`Nn{Mq`mO_Gr{R1^`Lv!t{jtE>V)l%K3Rot>Rs z%p%cC^>AWZWhaXnMP=mLv_)z&Fq|B6w&tO9_!}-QZEOI!&`a(0=So$H9jzk6F2t-N8)7w);q_+tU zstKkEnntX;wlS4kVdX^&eZXZXkJ$w0R+}JRy2`3nAhEMDDLURD1`yuGuGh0MNd`SRs=&JK~KQ8}4Ixrd=;e>Iowd-D5PhnYM9`?8Rl$I3cXXWIk>RO!e>B=0KSHK$=+8kgn z2Kjnf{0Z>_1AM(rp61AlBdO3C2m{_l0-F-Z>-uOq zb^EQto*0?e#ntlsto&3o+vI9?OFJBdc4@P>$vd*g&uW`P(B?5{^E9+M1Z@ssuzIEe zCP)zWP#Zk3U(CV22>gYtRGWm&j>9JDof2bfy~=jJy#`&s3tc}4U7rGlqvP@#uVu^b zm>tZ@ckyoDS-ygrTc0RQ3Tn?8$1AWkUn(AT^kwrpA0>oth=^wY`7?CFT)qKK!9 zl4Z5HqO7cGdPhg;rhc^gcif8v& zztX1c{L+F_6oR6PvUnlX6toB=nd#6b{DUP8q_v6BRf7eK0AYm z%rZGef3JB>$r|&$C9~NYWe>da>|qp(nt~QVF2f+nQrlEgkd>bW5CV|7rMx(53R(mK z4CueBO#wb8Z<9}ecc58s4&-g>9*ehWkgK0YqMrt#p9Z0yU^}R=Fsr;gtJI!T2qsP; z4s?>hNyl5nJIQC(E(nVWMF}mBE_z zx?~-EdJP8lB`EQTO}#}ev^YThFGfT7V#d%A11MRu=9hGM|Ta(P~Y{1Pk-3Fwy!KAa7^CJ}SV*D%|)Myd`8sa~o6 zq=5YfY={J_&Zg!?yL^)QleR(kT|VDs7_;r!xdfz~oksBMA@?EyjPV#d*1k^s3tP9& zcm>a|tK^&>RVF7R9T5Qq{ov24SbA<^)3abzv z5gwzHXy^=t;Ez?2ltNpHEY(bTZ09Q&2%N(H1nsQW=`F=Z>rqc-$^iAu#z=cXR#D~r@#mV zM%bcaxVERfI-?s(Htw_wyQyhq(gXez?v8EA){>ekAj zi2$anFly9F$yKLKjOs)6g6Yf`UtF0;RRBa&Y$hQ0jyZF7yocJz`fM($q`GLK#(=E! z9+*b4K0;kwZ+KmZ>I?v8g#(zqQ5}2s>D;XR%-pQ4KyR&}7noDSxO>Ap#b4g2MxO}3 zya6wGMs2$G*hGOHpTvZdm%z82E@WZ#@{omhOE0n)Ye0N7)ok=;X0(@J4^U8ERC*A! zrd4TCnAiq3VL*sqpii)0;IJ-le^dRk?e6+n4e%-;!Y(Uh9-DwHJ?b~0x_Vo5ZOwK} zJC~g7Qa{431;=TGti@p%P)K+EtnmJ-fvgct28DFjughC2+066+{EX;F56}f;BxK<_ zD3NF&VZqcbaUAerEb^h5ac-pGW8wK= z6(tetk5-yCFXZWjsT*WWD|qB)Gow53h;c(ZF{i~lgvJqmqksDH4w!P|BhNf?nHmmfc$iS)s!UZ{_^W!B(r406MvW%$g1}KeXYx?f?@67_g1pNX@7VEXHQ)`hzLN3xZk4z zod`fDg6u8A03n}*&_-&!m}25k(WsIX(FVD!p&e2%R8nbYGJ)Nx&=Ct~P}#(1o_Xfk ziDf|tQ8D?TY-QpK7&P{l<>W&lWfSn@GZV@}4x#w9NapQxK8H93Ip=+1v7w(4+Yuj6 zv3}KA#7D@g_KTe^rb9zf)`6%7Ww-a6>>r3I+Tz1b3uM)_V#+il?KXjRG?TtqA%~VWcQt6ckc0 zk%Bk`eW#c}QQ9dmP~b}erR_U~mZCT*;CB|fE9y4|kD{S?0tJy2)c2c$HdH1-$Hlc6!c$4EJg9Bpn`%h3Q7?4-SSBkWgrDj6kMR-FoM2Q@TT&DexS(F z6l8ZvLD<$EOORp(A~jGN^ET ziUAb4i~^1_$NNpeqwo|Nl%kn}{zo8rzte3`mPlR&M=C+fdc;M z*>4JtdJc+A;|@-15Geho;3V(@#Y?9kih}%pQ^Zr0p%k2^po2p6-42oBas2z4?g-I2 z#qK)=uNCj}{S=uafBz$j_xWUs%*)WzUH{{p!-{tbwO1YNcHH${D_#cg!QGT%I0Zzp zewH#azmnkBSfIj%PsF*>@R@6fcT`zf-`85jDT> z6dY~&Sk0pp{IS-RLW6Y-X{UD4Po)KPB(^I7SX|ez5x6gefoF$0L+K(rYyVp3vP0v)uDqpQ$L`5TvT(sVEQF)pw_I&xU%eNwrw1VdsyZIlT^* zS5UEb>|51!z*$#&h)r@SFj@b_aH(LfspTCTen`YAmZ`dCCt7p?XE{b=PI z+GD-%H)X#=*K^J9r*j-0-*ep`7@)44v5@Yz_LtUQRd2Xx10Z=cT)DB^E+D1nEMszyG zi#mfzzl<31%Nv0l^3U-77CYOq^*Jj`Lr=6xYN;)s<5_m5WMT5En?R$eJQQF_JOY3tS zfvkhMAYi0cV2EH!Mp*S5#t~xDI4Zbk9MRaLm8-7X{syE2E7t_&Pt@6*j?lF0raZql z{>A&JXv&vQ2I+LiS7m2t_Yh`@4 z-R-JVpZzfyu-C%mhmJ*~CooPBR`%?V?k(YK$?4SWl@#zt3f$1W7+MC3XQF`C#OlsN z({BpCjHE>fE2e-iQTm^mc^B}RnPv>NjOKpJ;MqGVGEc_afr+4>c7W_#8C6_ev~s*U z)^7^l3A~^tN`_8SGw2&uJbNCbQDnX*AsW_qiWrK=0oFoy|AO7-#n^~NA)(h%Du5Hv z`u=eQ!`#Z|P>@N%2nyUYpL@3Kj#Yf?M;!(?kq9{1F2P;j?dHoI-tI$`g0Fx2Uxo5& zoTJDWDBwlb^;-t-Kfdbu{ku&JBiB%OuVH#`IbiE*G+*^xLXQN$zFW=_FNES1QNSPj zhwguNoB#jpHh-Z$=cI_1%6(%ACz>45Gdi#&x8D?;l1!q=G?%F(>U_T`I1+OFqnSqK z#PvU-c=nvC?WYu;6wnChyB(aCOs050Q^0o~{g3l86ps&GzT2cG{K`I?m+?>UHd|0o z-|gT{<#g>;st|_(rr&;2aKK6w`CSV5V?e(te*bQhH=5UlLzfsze3tIJ9eg(DG?DlD zNJ>G(x9=4HW4q00s>_EI#8IH@w^kgSG=!L)0{%$Dh1#LPs_y0`BZQxf?;Cj|x%>&+dYr|BoUC1uwX}J3H4rc8ydETZ*dIekB=ap~ zzd+W~F0wBGOH+34oYFgNwX|RC3$A?YxE8WRZ!W58Z$qs}z9MxyxrAw zS6lu^)A7;P=k5e#`9HfwH(~0;%Zu0Nh{G4?z3(nB-jPv!l?#HUW)IreXK!G#Vtwuv z^}zj})`6^|4-8|#(cXF0##h&}X*c3J!OqA<;ckj%#OQUw zNtEEU8n5*5^4vWo`AF`-`ZB;;&x&N`i~HVE5-XEx^)tmKO?rGqswrF8HhKzTxAdE~RR!rOmQ1lnK?xdP zTK1#*HR4y&ZlP#paB+&SiR?i&5r2aAUB7AD;e`f?&a;o`YZQBqz9iU=;$H;5zS~>J zKA~tW@NJ5(scboYnc;mMU+pXuUwx--WiL^*GWI%sO=VBemsa%%DX`1?O`FGFq1d_5 z8^hOV_7r^$V_x(Xf|GikCXKoP3oM`ZF_qnb@WH>aX74P_#NI z_uy-U+D~Q>UOZ`8k}K_4jKi@Q4`YFqV=)Jf1%3qP=48X%U1nx3%$#sju6Z~~aQciK z7#C!c)s>M<4=DGZwV@I!Lojy)x?OIT0x^#VR<WR$4f$KH~~wsSl$;m;|uoMebWhA)I=&O)m&if_yFEPe(mW~u!h#CBUSo;pZsH&|0`=)0m z$xJ4_GLS$*=!hU7m_kudRK&V#!?vz%MP0jQW`bZZpRNlQiinDZj`ZGpOK+3jd!5W= z-v4*rok_t&_WSlf5Ar6HIrp7=&bjBFcJGx1@7j9+`w1V~xq0*6y}Ph*<#KlR`7>DE zf{uhVH8s@MG}P5Ki3&wsbwfjOR{FVf>3PL$*uLf&Pd{|`a9IHk#m#P0hJ={#8W~l+ zR~EBi!Gai>4ZC*QfVuK~y`uK`*Gbsw6_@W9JMwVaM`d1K_PJBGv&)mjH`ofcLi5}$ zBODILh+CdZU@NSN>>RTw=H7JEO*m8Y6BHU}OV<|miRPhOZ+(eguP3sV?40V!o3Nnd zrYC3Mbj}UZ^==B$T6ww5KjAiP-GuTVX3^iDI20Wp|JY-aj~+Q<_|Rd)hhw+hb~_57?d>fUTvD{)tLHp=4fv`t0X>vK zr3kwqEfdU(6UDp55h#EB7AgVSJDdhF?tyHidldioElh#TUkBNm4cYS8qM>{C?js30 zcyQ02{rh)q-MV$h?jzWz^U!{v^{P3f0~-T4I-RKD>H;0B8nIP?(~hlD?bfi>tIi5! z^~oKMimW}KfByNwQtv6rx5lQk%jS^WvBvzbI@uTNUokQ%Uxk;k zt9`KbT-f+))R&K5z$+`O))A7Bkl`<{3?Qa+Q*G!AAR}Z$Nurc3vVZ}Z|G9L zaO()v&W(Q{3AJj=**+7b_do^)z!6vZe31$wI1uIL}ytw?F8ZK>g)i5 z-afuqPJx%Vx1h#RItm{mmcLO?bi&u$+a$4YLQChBS2Z;?VQ@8ea5_;YGZ}QE^&~Nz zPJY-MkH5N=n10XxLlCtiySHxLzkkocgL^?KgGQs0%T(BjA}}mWfdi9)$?EjjTa%}F zmmY+S?a8WeI7sO9!r4_I6?>e{MZ$*n>IcSyJ|k|$5+R9TUT?qf!ap8+_~l2(8bPZE z{IWqH?Vtn@D0xtQem?qxJ(kBj09w(JR!;{lJDT1O3Kbvw8PwWdU43cQyK7dxzr<>2 zZK|i!soJ`_nwnZ08d|CiSZvni>cnOd)n#Q)Xb+&M)6j_sa&Ijtq%#EQX!|J~_UxsV zh}F?pT~h%HRh46rY_1by$=TV~j>QpN=id!#EeEwe0JUa-T2GmT!KN#z|!>fV;F6!B$gq44YY>=kT``zD0$kK~PIcTe7vwKb?<_<*ye* zUDb4n0peKcRIvGUs+x2?j!ox>x;jD!W>5lj+JG$5_OR$b;$kB9)L$=u1sks2$bMvd zSUxImaj`mCd1?GYLa-QFr_*04 zB`VXEQgY?*j9p`j48vl|kimoE;^It0hnq~{^ooeW$#MgU(@68Uj)BB!z(2U<%(LUA z$HO2$0h;1fvUtoGOIU4(d!m`Wj$5@E$q+NdmyPbRb@Z4$u(qw+*73|lC$*bVIst3m4;EAge$n7_Q4jmvjB>mbxv>eJpqb71(>7aswRyOwO?~e+NA#ReaV;Y?Ndr7m~Ja}ib%oAZs|DrkrINizSZ1$=p+}FkMaRqQ1KSd@fKn|EqL=5 z>i>flWV{9H6r2d&A>crr!HqiN#sS(?vO+ZAAvR9Lf5{3F&_ECqbjt)xaDE^$R)`4t zdLfqyP2R!`jz7U_OnfC1!MZINA+P`h00oA!BFSRe&mzo=1!acNN}pC?H9YrVJkO@A zeD7xi9?u?Agl7xeBIwD0#gnU?o+o=K*IRBm?dv7lkK&<(-&iztw}tz1Wvs^2>jB|~ zy%PLf4*v<(VFBL74$@7<;yv&KVeZ;Xn1?hF<{2`=TsTOWd&PS&_ei%6eeRKNLnH>9 zTe>@LV0mUqy5+0BVXPei>7c>q8Z$b zXyOVf`~xi`IXs~Q2toxnLIXEK;R*@5l8XQbEpEsoT6`BmwD7SLEwp!dXfYt>LE9wE zZ(z;ztd;!c#iK+Ta>e_OpFd#kXOrNEb1rV;0i`-71(a+QP`V1dcDn>G@-V|`?zy+| z=!uKya20rg!%yV!8#%m-!?*Ro4 z9-RYV^gBM2`n`8F`VDvQq?zig1&y&+Y`Qfa_mOBe*USsBC`slRaaXG=9(61;jy&-Jt0hrL$)t z7Koz0)D+=gf3>1wIWDGpI zrB@kw07qXYOY&3vG*Lm7FUe1ohYACZy!IcYC*Q`Tz~l}dnAGsVWG4kCdwAgSU3*gz z;CS+FLy3wz2Gf%@S@h)2P|uS+H0-PYRQ?#;Zv9s}5@80e|2A%BL3m60Pg{fG%k|&L ze1vEJ1N|4lwLmMH1KrM|Dcnl97AQj#R~yB_9KM0WpWyJ796skifcNL{)EzvSx`G>Z z12^izRcHxt&~juh(QICmp_|BOPaPZP>G6P153iJ@ZzvCK z@8Nf!qo?2fOP`h&m%2wVc7Z#}N3eYF zLqLF!VfiK?!N;(p42b-34126VFOMd)9hdqNECt&W#5YoSZ}O#ZJ2pK}p@92U^nil5 zF@y)~kMI^8JYXNu13!=)=#CRTOoxdcCnS2bY$tjcAN+HA=oFZ$-1IQf!zGO-KS2m^ zdbAJ{oE}C(q=z2;O9p+JBqNIVO@;?1m zb9f{UJaikV-*&0r(Ne!Zk@~Hb`mO5j_kfU_h+@%3qSz;yL^18Zh+?XdSEX1a$9WpZ zc?{>(?;PjmKjM6)6(RQWdjj2lk5Te_it4X?wr9+%J!9T&eVt(+K+Xj6+m4dJY56=} zMYq0^_Jzh))DQB>aLDI_Zp8g zGd+D2o}Nbd>$%NeL(b#X@aHXvyal7Dg<#%7$$!v-n}#;FiE#KYXc){}_&?JycoWeO zzOO_>e~E_0SEpeBpFxMgp#%PnpujssLlaNIog!nH({LaThVgm-GCjQ+(8R$o6>;#y z$tyYd2hYd9SkiREdCGGW`T1mXNa^r=N{91jdqdA&nR_JrogGdP$50ExAt~689;nfS zCDckMJ43esgoCcu%I}_5&U#vrd0J`btx#nDYOUZ%8jee+rB}ScTX!c?`YT` zyaFaQ0>7XWQ7xiq4SFMK;4?%~??ewpb%3X+Q(wFEk{l^yGbWxCxiXq7IWA@4%^xd>ydCzWWbAOxCHrH zNxN?YKZNufi5me%a09dabh0F@J>$a@U84M$${m@~tP}zm;z$A5S_mQ}yOqt5?mpyL z3#}7{_#XKFe3ZtO^z^~|>FHY}>o_8WWWh-;X20}OLcf!l@(odU1mcjm9VXFBk@=E^ zY++N}l%^gN&RTevy>Yg=agO6SCy2ArPYLTp3G3jiV%;ub9WG&ABw;;X!rIV{b^j7q zR1f2wOK{aV92~$Uc;{|57H>&{M;l<)bI0m#FX>aa1otm-9o!e_L{sQ~1^{q#*UR`x zzYQ#LgbKb*Xy8UD;6{(fjUN5yQNenN0^_QPZ@b5m)EFe+-fOIfZ?v-#X*D(cbFyQ| z7Eciz&>nB1ZD==aGD*+MWPhidmQi_+5DlgBKHdzBcO(0jzJD!uYckj@`psJ^Ac1#- zOsxbvpcSS0-few05?nKTnQk2{hi-ZtXy(>~z2W=Va3Y6~W)gfR!CN@Ax(E8ovE~^q z?cbBMPiiEpmAp(;)8vq}+rvFn>z~gqKC}n(;7vZS*^%+zgsar-s5xvE|F)gOcJkl6 zt^(W1Vcj9877p9aVO8C*{mVM`kR=sO+Ilb{&SgEn2#s)Amr)0}tfR6c4sYGE-h1}Y zKypAPtpC&@Q^GBWG;U4+El42gt&YQt(8})`lT6ix| zFj^2RV(%Z1>Rkm(e;iQ2+KGj~*}w z&C9KI#h*zM{4FF2ng4_&K;P~I9QJLsuu*v9riG-#Fp93UEPeOG9!(S%iqnPJERp?7 zSRiacWM{u%N5m&eX!)bo{bOuz=t+9I?OFbGcWh6LgD7%nkAIHc6{U<4r-c|pwBQl8 zq0>P>$sc951eTH_(LkDMCFozREn>^^TyPu-)V|6X*WInb;HP2pA`=|Kc3E9kcXn%yMn?jN?5!`5@y zY!0jAuwGYzrEcI6)CJrKecT9j+z9Qyob%Wo9(~yOZ{pmDbfVtj2}C{L3ZkC(3=j1N zqI=`F5~LK5J3K{#ERrCndm#JoYYoS@mE#-E@%85T8oTlBAJ)ic%rG(=Qe>6Vw~@ku zp_3w8LjmYMzdfmV)7;U_QF_G3^At1s7NKQGBq2c-`eL27@Dg$^$WhJku zRPnq@e4mQS-cdU>M*bg|YT7persN3ybkJ%fVym~5@m7v2pbQ*__;ZF|f zhV4zo-ea}se~60VJBW%|uanHg_>#;7jPuBhhk||mjv{(2ryw>V+<~_-AZ8Xlo(JG! z2n9~R079diep*xio}Dm>lBy}fHW)$kAU8rNl-}ngMEB0*3*W| z(}tS2fkHA*8$^+Q{4qglf;oT$Xu?{615riqLsU`D_E1F$XlY#q^sZ+%wXpAL_LPhu zp_dUPk`eSLveQg(DKZ_0baL~niyKhRZrFi1sFv&}QL>-(lKo_t>?h$W_S1j%)c!|( zQ-2t{IL2Mw821nB;INK9y;XA9mTuUAdh3#UJ5lOwi_}};3Tx2=d8KvWI!BT|F~`l- zFNv!{0ddt%(|aHO^p1K_9f-uA+a-*p5h)qSTRF}+V$03lfu7vBjGjDFdU9g|`+X!Ywx13lZ2)AWK5Xk!Ew%1>nvm;o%|-9k@}ld2lzfAKuIe~rGo29)O3mtriY zb8da*Uc!{ltuKQQLJ8dZ3J62qyxaQfzdan2>uE2&r@j21_NMo=m(-^{k34w1WIoTe zNP-|Mp3iKmknaJao9)>+jte&Y7QI8VQn1N#9>>yVLg&? zr8QS0WxB^oQIShhRKzEUMt<064>tyabV+gY!ID2xME-~%J4ujT-P(%gpx!!z=!Eif z>f%Ih(w)Ti!1%3}+wm^;IDR84(KGv-`+R?Kdr%>659%?=gDR0csILAzsNS}OAoX?B z8j`%sw+UmH6q9xh_4Kv3_tazeU-qWb*Yz=IMm{e@JkU-|x+xSgGj^!^%EpU6n zRgL6-AA90TDoB+cnhh8Tq-RJ+;snco(yfC>xz)7Lq{o)g& z_CR0BJueflPLqIzaj8U$bn;~d4MoVopCm%v%Ac(SAO6gW5XmNXhf)L<>E>Sl6< z-pQ?Lw-w>)hV?SR`ZDo-W_nrE!0UV96-1tPBi_VV1<`F&=ipcPj6x)s*msufJ3jxj z=l73$UwguhyW5@+yK(Oywy!R>TdT~p_%bYd!qd) z>Dfsa!C_XCz&?^-B~Q)XZ997~?mI7DVNY1e=YVLs?TNB(oXN`dNM@g`=YLx_=gwgL zpKGup3Z-~qm^2T{c2Rbms!2bln+)5_xz7;4DShfVu82tRi8b z3SXNsQUjQhKxN0IY!KuS-A&;0pY!P6;UX!B3i?% zh}wA-QEX2Y(SY#(ZRO4&UInV=^?!xD3bc^>!>xa;0_`n7l5SFOk@|lUhpWK6nE?ZZ zR(QgNAe#y+!EpPdcIzWUUc3UZkt$$QWH!l&m9_HM4BCj$BZC#UBbQ+>71C?1FC;*tAo z9=Z2tCDgg*?#MmCUYS*-#hSn3Yr;CI7kCqoPDq)=7%mq=w}s#TvyD87Ud|5C)9lhZ zRS2_-e(RJR@!pn6-xazVo~&sgi`?P~yi4Uo8a4tA5G2`pp4nQ%x?{f--U5aFDZYh! z2L-rrub~JR?pg|Q^%MK`NCDv{<%>!<3KiV48O$x4FgD5k^#1#-;Ulkw`#^Q}LJECbEM=so*dn8y+XEnz-hjRI`kf5||K+T8;h ziC1z?8VMa=&`1#WqM>K-R6z&kNs36p*IkEDAnDKMEj73AYG8K&cL!g;Yv<(*8c9D8 z50v1+|F4<4jr)`HxCeZaYyVD8$=Gh~-#^`JIkr7o!nITFyN_Xn#kUaA+f1fi{&*T$_vYqRc6Y>^JsX z0!E&!wW|r%pzKoK1{8@&<1`wr3_~j;(aKCo4_OP4_kr6D3BVnm2ViQ^PP42B-M(5UOJ{Vh@t zuvHF~X<^`gV6_k)7|t)?BRmQF^jgqh3U>bHg_ju^5n@wI&ItEqZ1w9 z3i9>pD7T&1x_OE=R-{xRl@B!&s%f80q zxAk0b1)VOD;p`rW&hKWeUtF*s&cLcVzdj{dd_uZjRD6BpNV#b7(W>f;%ZjU;z2f}X z%P*(K;*7!*r+0sjb>7SU_LDg#Yt?zD zqh#-;PG@I(d%JP`v}xmw9eA}jXPhXpyRtWCF_kJc7HcFc9O1OiR_u}mMg{w+bY60m zx)Yy=jU4XV+S*!yC6G{%F_Wsw8oHd>*=5zOST?Q=4G(uTVhq=m;e;C>UuAR2Bw}dy zY)w`>`3s)8mg^#KNf)sMu8Pd0s z+vv4v<3_xf^oT`HtBEa|hI-)Q=Z8bv0uE<%;YirEYwNHFL2>5cwRlg%R4!=R8zix{ zC>qkv96y$bQ-wYdTrRAA{|$>zXIUi0rbxtTn~Rdz7sM)3m|)UxW(5SKC|1%hW@UV& z5=>7NUl3maC#?Z$R|n|S)g{B)QJG5Jh4)Yb7P;P_Qdu+xKM>dHl*z0CI9C-XI^y_M zP`3uhu;M+m1dbgfbz`t-LPkIVm6o=)y0r#*4;qY6kN2SEp0zaM$y>;!Dr0G^({D{= z>bZT2hcJ=ToK1R13tCdU9TumiJuJP-Ko!R~K+_x?7`Tpqi|gQGBOmX-D*!|9q< zNx=#~{e(Bruh?xEe{oucxH#NB`t?uS7(DpOHoUzDXoG0gThkLBL@L6A2{;SSN{Dbu z^~R)!QvWN8x6))xGX@SV#~nLL=o;J>Ui;Km(G{-}ahwl2}hh0oo z@yg%fFA3wvCb4}G^&-tf*$-vQh75saO{ZzXti+L$m9>?%?ajJ$#rFK|`7bBp*p&CN z!(TxGR#{)d8CwNyl_<8+eZF$#=k(uKtMzr`9u3?hfqUctxJ!rw`PNQ#18^tA=|7<^ zA#Y_8;>r6c@_s=9c?a^q{rE0mkK*#*aqR2=tQ;&=mYzQWWe8pJ}egr{en}) z=?Y-Ky$5~ZUX4CvryVbl(0AkS3;enMAv@QT2cT$}60&XvnyFA>chOJyi>YSJsra$i z%rzbYAZ)ft!RDIy9^eUd(hm_*qNH@g38#h$!wj5rrK@8-~&U z>C*0qVz9%tp1hza-wAa?w+STf)=~fnT?AXkijg8MeH!{Q!6y~St?A)}eQ4<;) z7c+F?4O0_YDqAeOktLW1Pyc;<{6nLnq6QlblarH^6QbZrfG zbE&Ano-Vbubxln+f73|*;&NeozMu~m+^{lWL%L4J20pomtSIvD^!Sc9zNv?_q$H6| z=K8Buj$Lo^tx(k4Kex(f<7G{O?JE3LrnSm3hi|~2no82Ga%_R%f=1eqC_V&bBgYA5 z*<2RM;_FS19*fT^lbRNCw_9aW!_qupl}W7%O(od4#_6OsVf9&M)F7wupS6gM6s##y zi}U8qvvMoaD#HdM_>@}2$-dn!;>>Axi!@%yFYLVwuo=}e7#>aZ;}!amO+Uiiqs0S3 z^IFdc+fH9e#SfZ{@Rmj#il*FiiUChC;32)G74KbODgk#OWEGbGpN2y3eqE{5D zNJ_w85|m`@YI8k<|ED5xc_^EyNMagW^Hn4?+w+-<~hQ@F8z|zzIU`JujN06E*g%GtwE0l;5wGg5vda)U9zR;72FMX&?o(mvzd&n&D zXhSc~{BL!i2g}~kDB{0_CHzca2^o-(%;rdjb?43KlscN_hJe2AQ|e&Jh#hvb99CzC znF(fW(t^zuQL*KhAkJ4>eQ{nfEWi0?6oxAFO{-QlS$n(?ZSV}XhJ7h!u^(`+G_y)N zzE;grG&tB1o0Djm2B&wK6=Ha6Yq(YVR1kc8JfAu>8Vnh-o1%c%P~bHJc$qM&hcSVv z>~{F5ZFDT9944Cgk+VmRoXN;Ib7aqs9eb=`sr-oFzy1ZMNfX%CrqeB10`7VT6bA$Z z8w|k#ip`r9RxNVcq#Nmggc#4+pxn)X=#VE zBqE~3EeT|g^_B#9ZP5SI!I$ME2SNgggo56p1^0N1FfmYppR*VR3JeAew+%?%)%NPN zWBD0b`_(jotyr;&`CND1mMw8{Yt{@Jxnd{eOE)|-b2$CSCYd-%xyN*o)jk!BWBTNw zj#>92p{*H`KP2BzhW!O$($Ky@EiHAy5gk}+sx#Kq80kOutrV?ZMMXhDTJ7}d>UrR) zY#2tE?xM+bk^bw)jni|!ku3J^sS;S5F(#$Xa-p|SXKnNYXY}$h8V0B^^p)(DF!VJH zrDmvV^l(Zh(`994T};ohQLp#w-dX(4M@yG}L_KG^ugk{&{^i8r>$b$L$<>KxMVZ4P z%N(9LoO#_gKJe*VLfX;YM3`6>oou8`<>#lO{xTh$GNvu9Lyuc*I-GJD92FH@W2`YU zohc+VaI6qr6r`PQ(#=VjGb=7`7EPF}Mq(F@Rpi*$?)P+2LwuC_=TmdyY`ce_#7p9#ho4p- z?V93v4?l^K#6S---7@=s8UYs!nWg#g%AP8>Z9lwiw`=cA2(!t_=x$c*7nOkUjC8GqsQ`{3CW6qWKEMSU#{YyDPZ*y z{S3L-LMgv6ANwffX3%WC->_x#=1rS6uV23b*N#0q_Q3LOtZ8Lpedgv3ILf302Y$9W zeMgKh%d^d&fA(xr(%!wu@C*tGi5@$3>eTV!;Yw^cn1A8ev16C8Rbhy7%N8a5x3WRr z9Sv=rjTPD1pM7?r!midRltF{SAAKzD=bwiUPfNSw4(p(kC(}ym>+37do;4y=FlrEv zavd>YLdQw{9e3#IztvDvT~kw4UWEu^O=GUjR&vzf?c?L?>*MEVz;(Xud>ivOqL&yz zlAfTG(W#?GZ=JWdR;|{0X?=dpvxzU;As z_7k5w&8exl9i{i(daA#fmX>PRxZNNnzBGIzjdV{kh-?a)(fWtIhGz&|u+(6BX!ugJ z%X}#sj{5Bd>+#S%@thP{2;Z5t6vFj4d{J78xS5)%=Q*bsY3WAqXZOwfMDjJ~AyVE> zI#UAM&RnDhqOYWy)4n@Wch;;=Rf!lcNqk#b%G!-jLBmhj?~(YzF+hNo)>qqP6e zxD?J;D73-I!;>b`3I!_akbW~!qVi8T9LX~tE+*x6M)Qn^w91uwKm;5ZXMb36c#?)o z#}r;U*TQSyL%VC>`>s6a4&R&gG^A20p`F9rCA5fAQfG!;WpCDSL}BnX8SHD^Mshy2@S>N&RP-Xz zYlw-lA<(`trzJ7no$M8>%@t}M~v18W(FWQhWEDrzTe1n1%op7QW z#*L$RB8=JRr%g|q`v?Nejg5~y`N|@zZ(d#{ZHnlVmlqow>s@U1Z3DO=0HNCn6T(g6Bwo_WYBl%lk$kgY9HJZ?V>6BkmsnQjk(yb5nqPfSwdiAlcYf~}9G zo8qjL1ksn`lAj3EDJ~g9=~N~Upf))5JU+Q6uBqzX2JPVHi&Rb@XiGP&51vncAo<&# zy<73Xw~zrr7!6sQ9uyMUM~H1AeE3O90#He$%~0WS6FvfV@#7c?4W5WwG+G{pmdB#y z!Qd%by!wqBH%TF9iZ~zI@5x;uqKkN~96?*dV#9_FhtH*_r(?Fk#;BEtK)Mvg3EH*SYHZV*$Esc%D{{|2!McvZUQragK{&LryR-(uiA$T&HdGI}%h=OYl zvf2_8h0hizW2e!V=&~BF2)~0;iq6hXFRfXPn^s4m6+>e~6R6bK*aC{NhDH#}!Za2& z+zm}QA30$aMRsKmB2MGgdA z7A4$D7ozE56DC}`w07-8V=YdJuMe2GcH+zjo__l2naEyrnk`;0zr1?&cN-{Fh%QPv(biuRGj@A zWj@(C`L_J*qQau=tjz447Om6iP`U(xIqZxX*Zp|B4wBMv>c{!>y}Z`Vpa0{sWO0FX zEs`()k^N}4sGDqACw@Z!G?BJX)(W9li!eymjvyIkJcCdz!g+Nv|po;`cHu&T1M zs@it>^5qO$rKnS~Hix|d_Sgic)1}sX0e2@8n`D$`Y{@usB)b#aE~}l0rGVs!rl`Gr zDN$CvUjn zhLNi3v>&ir?79M{QxH=9t`8|Yefo4-om$<3dqd3hN0ZR|)$Cj4h07horrbM&S*Wf6 zmsxr5^!x9h{?rWob+>eVsl4yn@ahwXldzZV3bs^sAIk8|I=OFffR9QctGDGhSrxTs zcI@~z8C$zPLzi#rhO>3y>eOK>%pi7!uT}|l1e?L$(%hv%0<-1lnl<~X71oJOEe`ypvlf|0+#3oC zONwkIIhm)=m()7J9cNQRWo1KkGk7I-wO80mt4azm{gy+7TPtf_I*nH4Gt4Nn4Aw-B z88bvrIe3k)8ZFVo+G{Hs+MwYYOY`&XDtEkv{2sDAC??;>??L(0I&-SDrOHzBd?Z)M z*KK*we!C9>Y7wCBfn?qZ$&9Y9t{~A=Dl;-Oi)*UeW%T%t^5Tp$r!Eu}6v8&Dvs>h? zRq&DGkc8E!U6hLLblNdhs(e_p1KMBdY(^m1!62RXRtCYe#w*muNa)hIxP8l)Z!A!Z zn2#jOZ86TwweK(abeKug(%@_@##kSR(+Rh_{^Cz#~I<=nYA*nu(&(!$jV9&eLWc(g{}`|Hz^h#6SJ~WJUsza>hxpbp zR1cgYzcEgsaCO*a_~}w9)F4o2GxJe`U=D?kUXRfY!BXoOWXc*Jpi^rhOZD|ZLx)BM z`Dr>Zr8<0rjUmzTL)A*HUszO_2JF{`GKbk5>FVsTD-@I;_rkbtsdV~K{ZC7W!)_Pl za*M*pAZzbPm2}t?6Jz(8P!j@;9uIEEW5JNWG?Oab@f;Z1I9*p}^n;ea;xoUar_QEb zJasPZ{K;b{Po6$|;rv?fu8s~SSBPqO!lKKJm@;GfsQ0YjWXK{Gy!z^^3;ZiXLlfCn zwpAV!WV1>8)%$#N4mAWVOs(|t31k6g$ac|Mi^cNgzTC#hq&ai)64-ab>#v)AAImQ* zEy&7lkWnUwDXfg8Cqn11RlGD(mGM!Ml#qQyvApYs#>PZa0+=1<%_A-IDYJM#f@Dk5 zm{5It_IZ>M%Vy4eI2ox{X-{O$TggMZ4z66eGBQcZjXntb{0SeItJN787o`V(T*e7j z|F*W~#=7#V#@h0lCTyzSP*+`(>nm5N5$^7C3U-FI-!e0XBlC}sj~zNBJZk9RaMQ5i z!-mBS8DyH)?>D(UG+^y-=~{ty&y|BFn8|UtOM&tZmt5A6 zot0l&mj9R;%mT}N^XAM+ifqi?_a%6_7H$~xo;dZI07p~q@!h+3|L|@iHsgL>zI*p| z*Co=Xf2JXsxSmZf&S8%fs za8N*SBnq8OLl8Kc_Q&sV3SSQjKLrXe28FK&g|BBqBy1m~zd3m+Iz=zK$;lFxMHE0S z7Ukt;+HASmnHgCqFUrf#&dDvn%wpRaAvl~O3gj@buyn;GBJ|fKLZmzKDYUk>wzf53 z(k(a7Rpjl&F%mfY;2!g!_OiV5+t#dEv*B=lWajBDDB#QX4XLdi_16b)yY03Su$m>hqzy{53Deej$)F)a0$!j9H*`BW>@hK61{ zTtMDZe+}T?L-Ev)DyKs;_?mm)ped8C@7Oxwxvf+qaBzxS)q20WaB^sj@;Me>52cWViASfg>*hlN*9~2ZA;D*XA2?G3o*-2K>8&sCoiu6Gng$SBNOT(C%>>TD+~OAC}-gW z9QsQ^L4Iy_*7@_Wi}KSE#*w!ROtglGNL@gT9qN<7 z=EdJKv^!0t>EpU1UG|6_5r!!&rX**th~U|VVCO8)ut8hj-@u&dN>!d;++-2F zBP|JRt#Efis_^EUa0@gq-4Yw z_^q<_qSlQB`4$UbAA2^tM58Q@+NA$D^_R;%%`XrP>QrNz1_IGN==x`cn;mCTMX ze{rgKt*`-IkVb|2ljE&S*4imrg*K-p(beHv#=cCQ0H3+Mw79sCs9RZCQC3o1T2xR_ zUJf0CVNnj3k^TZEl$4b>G*na+6;)vy6uFnk1ZxP03(CutptV}zCA7CYF<5B0;9z)B zTqB0rjQV$F$_x? z{ufW+W2v`i-+R4Y*>*w6gyDzjXY&GK1w*kHHX{7W^JK{5t&qnBkjKf8$H|b# zhzwh4&d=MnZ`-tK)7Ia&ZP>7O?Qfe=g8BQ#-+pGFnFWnfiaj`-I0-P-3)PYihZDIr z!75+o8}q?U;x$m63voolFzK2KpZpDB&6*?})es=Qq5SujEn9y4I1%5u1`?HE7GlfH zYE&xkIhtMLOoj9R?(WR0P~lE*uyAUONFjg-M^kNkbVyJLN)bbYL!zQ^ z#zA-hlGu@^XhaZ%4u_e}VNohDwWNB%*j6eP4E(f4=$cwZhsIu2Qc}_$eqB?OM$=J_ zbdRmJ!vP;(=+K#Ain_X-m|;@Az*yg|mdeu7Qg`en8FZcpI{yW9P6nNmLFd6R3m{*a znOV8Hc{J8(?ka@#EyyK}keQ7DF$t5)tas7ycDR%(#46OV@GJ_s67yH8@@!fo%36|8 zSc5C%tFxM0@mer@`i!YlCq?{#!*N}a6o`R4Ir9=^m?PwFMx(wNDzy? zS`%F6XMz_GgBOp27c;?&ndE5^A8ZJn(0OAS>Df3R>Lnpuv+TRIW`ba`vcKAOL^Yq{^ZN8Wz>ZL^|T5bB$)h~_536<;oE zc>1MT(4Qz@m#!V`sP47Nlb)U(4Ezk!pQZzwzLhm(wIrb?`*Zd=21cqGW+!u545G`K zbJ>Ff ziJd=hgma6k=idgmo(H!))$?Nt3NkWar4WY-_!!E}%Fe#bKLe8V%EU)GIj2ut%tz>i zb-fSLnANadFp`>@m!kAW!+)vFEwch^DR!vqtlPA6>uJ&Mgtwe5J}O-==+|cNIbGkd zW#{h~%TaW&_bm`J$eEL++ zUkpv%f1xS&uj2hygmDtVsr9;;A@T7+UV%C??339V>3UE9{6FT7hz^U34buAA8&Tw* zj4&Hrsj6q5nKdLhjKS(Rv)S(pZKY;e={S^kr_d$b5o{JvB`37@{rKKtsbdNYD=T3V z6JrZ${zJ04ysWH-e+GG{7d|R4Ep2J9E5fQTCVwA9GOJ-Iqj$bOOHpW~;lBjOgjvV< z`GtlW@dA^PL?-r!1P290@XwG`rWbt_7#I^9Wn{yz)}S;y%>wVA2Ja~*G7BSh7L8Pl zM;lx!&inkF?5y+**wJ};7`rfw)6>&3kWaK4h0YF#2v?VdUN=CWvz}ecdk362n|={1U^&8^x!P zkJ)RsYDfLHclVyc(Dv|K|1pyjo$U|?3y%s)l~v_z4-bi-o?uNBw74F6D4AvA+J;Ej ztB8xfumB;qQ>=}30U&@?u_9a-Sv7OO&JGtwOV=o2x^ROKfCSG{L}6PK;TEh?zA-ZU z#EB1PV2*v1n>UKZKc6nFwx}!5ojB1jcJ3%^G)AhejE`H4+|si0s!G&378hew!{x84 zs;)$O)oK(~8l~Xs!qHlGj0&31T^2PR`s;$@YK_KFH3ad2i=i7B6dV?cQUZT}3~LyP zFcU*jkax9K_$a1T4>Q3@%Cn3BCu!9~a9(L?S&1z-&juSTFDC~}28g#K`YI;Y-=uPdt%`bv>tLCKE={ zyJoX8SzL!0Y?A2f`cj61RJ7BQI zNeLCm0Y!e$C@v^jOgr?0UhX$Cd0YXsAQWC-PPAXvb-2hGV}R z_#VIk_f--F6~x!ID-Z10`+Ei=Xcs@5MROi5 zoS28}0pUIH=4G?`#EFw<(rmSzR_NNI%(Ew_PoM5pv@IF-pmb@s+SJo`?ATcv8jDIe zOXwRK<}$=4OdpNc^y#tM=2o`or5j^IN_JYA|LmC$KJmZ<4?KL&%-Q1+&5SdRm^pg} zqDuwR^}&$YGe!98;?0u>=l}A{FJ+;jH{AcoBab|{aPrO3A^3Xyjn6+ngMIJ*`cJ|m z5P`*JHA0csj~hES#HtG(GkWaxGof8NMnB%3_7k(ai)8DU?1s(fdE^UwtkTjOH?oRtU?afZrc3n@fV2sf{?vak@oxD6%<#dwQh zIu$`3;~jF~Wrv{CWaa=Vx&&3MW-g^tbaWE`1Pe|@2SZ^hib@ve2Xze1tJjBy`uV{@ z$LV0+dc7a&bo~8{n1l^@>mM2#6#Zu{>d5`~G|9(u93=K`NbH@E*f@;kX(l?O9fFFk zNK%R}e~4jNAK)$yEa`hdT?vdl`&0Lcny{nDkYh$%X0$o|LLOml-XO3 zwM8&I;devmtT$q|9*b+dI8nN;7a#KaXw7a!$xog2tn`V$-gHfnFxR+!@#4k5mN>ei zZ?(>4^}-@yzA!-;DP3}Q2~){2wnFhdV&3y^V?PU_!d=3(xEA1RV+Fl*VYgwpvj-60 zOOY*FgoNlymV*iY6|?G_*5Xp-ARnu-^iuVpxVa1O8Xx}U>SIMYJ4&OU<>B;Cl`l;X zO#A#jYg~CFfS>)gy&n%8bVSu2+`jn~@Edyx&^ zyZPsjFlS+D+!X(0c8INFv)KvXH=l`*9O-=-!I4Y4k;!i)qZpsACHi?IM+_Z0bi~L} zIPiJgDE0!PR4HcVOn>%BVWCA9b=xzuFg5LGHt9OWmKmP8eGq%&%~xJ{0@KM8FTDKv zo7SMR(%S0sTB!NbGP2+*tE)=E;WH@EQbJuSVS(%9YOPi)r%a%VA>I*{P?f;e!`QQ>VHfSZ`K_<=GSrZT=pSM z-x!k%NvBzbd4ZO}#`p>6GOO$kjS*2BA7SYS$cvnAEOQJ=N=mx0;CZCF5)tHnkG-6# z#+09~4G;hOO)L$wj1#u^X0}$ka8zyZ%L|9b-SzMjw+(}_5#U-W1WvGC!r7BxWij*SAURhC!01`auGV-lTO3P^wqbwAwLO?>KFi^5#mPiX% zULqjo7FcdB7izjdTnBNGl!2{ z&Pt>DiVQT2qeY9;nw!m02M=atwN)HHe*9#5NsEA`#ii**^(8oxI3vHPyxD1uJ8L_$ zcKMn$adF>$xBUC_WfxCug^+ALQPUYZX3Uu2_L{ss_!p`ox;@lYy#7P$EEH<}j4;Y7c+@ZlgyY9&!6y73zpcQhzp>8^ zFaP~nyj~9}TZ_7i?@K~%oB;_~DP30CEH=Ze)N0!ru~u4MU$6DGD1#zo&2}x%u00ja z@^L1)R2-rBB7=>10cCO4I8FNQjAm3o*s80Wvv-|WsG?A*JxHVQi^lo(Ljx3!W(4Ap zNDd;x#7*$=u|`x@9=-?@!^@X7v?{#44c@YLoHyU-(#RY#EclHrEcJ?j!G~yaWN=f7 zE69Z9j)MY1hYdp=Pi$OhU;q?DU@-WlY-;lIZEUo94W@GDI;{jURN?JC7QA^4u0qsn^P_d@w!FM)lLz~1T1!fH ztj1C4W!~PS=FFLMO(OKdJ8X&Sg_}nB)!+bG-?vPIf#R?^^G5G9|9ZaEtVvz`-SK@} z57{OVccs&s;X9h%nt{{<|LtSV3lFi~dylU{>AH96`ClzNN8WPNFf?Mava!ckuleQh zz5|!q3{jLD%Q$%Ow{O1v=Iiw*H!fSX?iI_OXyMu+BT;|BzZl5-Ki0W|4C!f9l{`+6L*8U5( zZ!4~k@*6yQ`fax%ie(Cu2~krfM_QQ63X&J0vI>FcqC#7730y%K-~}kiLx{Dws4yS& zr%#Fwr62$nEr(Gh!#Ytq_ZcyKXP3i)W&R480-T0%pu|r$AiK%d@5^q30rF=I3=A~- z8TG2t2v;IauUmv~ni7|ShXFINZHkUz)0#T0QUTBu?C=iKHPIeB&NU|Hr z4bIKZDK4@}5nD`GxonqN1y2YjxtJ=K++1HIv*Z+4z@$VOGi!LZICAn2%YQnR-AP52 zokH{F9m~J`eo|~h`k7CXagMZf$v-~JR+&{xm##=lJ6&W6?hwKY92)T5Cu;ccTj5GP z8CMXVxqPWLY(hxdnJ1t7+ilm63qe#~6Fhp#?1!Iw@=TiF*gI!pfhI4K+r&PV-NT+Y zlRPe#-#%?xs8wlq>RJ_QMD=S=ocK8j!DC#?Wfxj?W1hhAJJx98TxnS;I4IeAbSur3_w4HCfaL)6Y1O*{w;3Is1C1CRW}fS^E} zs4j(NF>M5}p;v8%^CSU0`2u?3JLrW3NJ#2d9EDV8HmDSa= z3}-*bq0n9*mEmPOx%n&0JWeoJa{t5cPApfkuu`0_^3fx>Lt@hgb(#?clhW-1_g`xJHC&L zn)NKW{q)SJ`0>GKj_upG@1kgB2|}>$si$sCy7$SyJpINSZ@hU2dlxyYLo62s4=VSi z5F&&b!kxk_VYYCcFb=LjoDeL#|NaN&Vfo$$d^uZC3&Y%(0V_JYSUF2)n`Li4fc7nB zbCP6mELPk(W>UDf8Oqn9H^fhx6sgsyy>zWCflsh#7&wqZQ8Ur zoi&Qbi1Lx;FDXepf(9)+{ah0oPFK)7eSkQ#f5w zdP?f+eUa%83=E0F@!%BP)nkc!aBzS@KkRDoTw>-xV#q#Nh*3NT5;F((!B9vrW&pT; zP>KkB;4RF|Y*_!8F7S?CIg|xcizps}D0i7vQv6vZw;+;>$Q4A)4Cg!5udt*}*!s)i za>HD_#_0;qd`?L*ocV3gXi($Tnt62N4|r`k182Z&GWhRYhsgY?h)%y79tBU;TlVk2 z_F8EbXJ=zWfXbEXH%iwmj2Ry3)l5h<2aOrCfFeM!`qa-pTUS>fasA^nz&w6gR{SoIuSchUgdcE`(7GMxO z0;plUqgNn{{SR77q1p;1IQ2){Pg<}3Em?dE=|>IySZn6C-+h4i8+iK)czYjsdoOr9 z3SN9>4prslW#R34`1D!u=Rr)A;~<2Rmz{CpC~dOOteQSY2X`Xl0%8-iq6AAkJu(P}?z z?Jy~(Ja^BpTelUI7H`~$rAfNOM=GF@n~Kh(1S%06@fW^m!Ip+$Uv&(L8>1~cuwuoE zgPH%B2%%D||M@D$@l(%OlMsxrW=GU-&Q-(biFlqeZadi#VI;!|V(fo>7e4U#LWC1< zzhMl5IzG3~!G=34*cNsWKHhfx+|Djg1MpnVnq~Kd*)2mGI<%;a9d_;Hi_3OiNY5?q z(hf>YOpJ2jps+Bzxg1WA5H=HKIs_Io4BJ9Q28Kl;P8$ZoVxidsBc;+0_Xz zUl6(w8H8ffs^Q{d(ajw3W?XZ{WO0Ug6aLQ-Z$ZlDRm~=HtTHx#y@YZhbEPjm;=SbS z&FR%&?}TZ&^J_~IR@>kbEYR|!;g5e9eEX_@BZ6bKci}OoR^2j*>f8J$-LjCXuDF@E zM7hw%_lBG9PQoX+@^ixv$TlJwe*@EG>8 z4*o6Hf%jj_-k2FZm#xGfP;Y{3Io^LOd7$63ZxQoZgo4M&ppWPN?Ww1pda|4fPNL?1 z{Bs^q%KQ1_SuhZ<7Y9k#H1RpbvSnX@6gHQZ zoegMp$W@3Gh@u?9NPBy0n;pTDfB#mB8MMG_;Qy=80#maN?>`4gh9_|8QjRU_;u$!6XR}I*k*v+NW#<+a z<(xuc?!nW$&Ry8Qt1@rbf#XN_Y+1j3*Ou)F`k7+_hlM*FOe3=u*CD=MQ`=>`a2he4 z69*4}zGg4V$j@e!)H^Kdnkt2tjAh#_a=W7obvNyGttz-X@$qLr`e;j8yYiZQ@R}Xj zesSZvb>qi{INHjvSKIlE#p{lyT}+RTcyIB?%f5Iw2}SIw;>)s>FCi_^=95C=BiidK z@4UskZp%k#hfU6InDXL_FW!E`T?s)hj0$_JQ?6i7JaI?T>;=^qGh3m2Rq8wLd&24! z-kNqEI^&*u#!;2?GT%3nf@{{jgV(Q3gRXn{ZLAT|UCyY`4!iEw`|i7M?wCY4o~smZ z-#zz{N9I7q9A@W`8Qcqd@dy03u`IR^TEu3KQYb>=hx?D2DYT~x^N`=VS=T(;u>s9~ z@%x3e(hf^7%AuW1@!4npI?%;#yfZ`z9cc{sZS`l?snt%g8IcDBm71HY%c|{Nh@-WY zm$dt`y!7K|tHh2rN_nfC_R``OdrrB%xu&k2DV=u2O*>O_a-}F&cf9h&pv5_2F{1w+r^X%+k6*Uo0MMZ7!;A@4I7yr%q4_nWS~c z>>##>pjCkEQTlP5Z=q1{p3gkhg;e85tMK^RT7?9cnj*UdNR;{&0v0ztpoQtvvGi_4 z&)K4jFcx0NSa=UDEJ6#5@Z3plZ4DKa#Hp_Xb?VA1>zkYEY7qaaZ*6Hu7Dw5Zce)tG zF(_Td1X)nof+%N4TPsX}Ye^|0`i*X*!pMm>yGSXclmlOBN5MI*VG=H#IC1dcal0J; zTvL5Rt5dN0WZ63WqNAgueC!v#|KSkwQpTMuBdak7!OQi#&YjcFnl&rxFs0>`3l|0k z(gMB5#mBYg@3x@aYtzO9*gzvz6uVl>vB(X!dPTwR%rRke5!=?L9sCxtkX41DQ|`qS zXn*_FSIxRd+q(pxz+iuc9ffv2*G-LRJa_I~P2`j@abu?>A_n)pA~a}7(ql=qF&+EB z@Zw+Z8`*@3y)8CL5Kf#ZDVZhJ`cvp-4T@H*DuifWe45HOSD9n5K|x8A!Y>k%GX37W z6bCd4*aEJ=34eL+x#wnw>g%>ITei%H!Yr`(%(@h~xH%V(AKG~^qtZHQ!>^lmBMY>5 z`}Xa>Z`=O=*!%7Pr^@W@_uky}DVg4Tqr=cUQ&B*gh@z+{f(nWi6Bbu8SRA zQPEYFVnd1`Fhd*4z%adMdhdNElYGy)85CCCpZoj%`TqEFA;~1Sc-S-+lkDPe1+iyK?uGl=J7$ox6B3HTCS#Z~pbKe|_`A4=Bio-;U)AK`J~1 zg#iBp0pNM1jF=b|7>*1cIa@nmFWNph$yCk&C}{42&zYFqmHI*PL!>{pcJ?;RL(q+zeY_*j53HwR-bycB)_I3p`4sF;&;?p%6$W|2(gE#c&pOY_KPP>qtauj1IH8tZjhzGLrE?x%V;-PD{z(953&ql{| zGY5eZ6?gBwN+t5dY>Bo+_x<~u8jiywvGe<^7T*yOZ+-C1dCU$`C6`+ivd-p;)grfX zBf?HD^>^QW_Z<;^jfXI9_LU;VLSfQI`_)5+fBJTE^6As@Y$rD0mSoG9dq0$bRq`ik zds}|@1h49Ir-`$07fk1eG3yZF=QL#(GRw>Dh;(9vdrawwF>paUY*_+kojvF1Z^Dxc z7R30O!Y*CBgbSsfIeGHZrL3&l+M=TCXHubx=3Y%X_}NznfBJsk5ylHnEdz)v0Pjh(G#PKn(D#mbSW!>R%#_)mm9vTtont=hOGC0`LQDf<$^_K5$ ztgLCQE$>o7KZa+fzp0@cmT#}8jgqnD02%2UIqNF6-4EqZRDKlQ-X^`g8*@Hiob+(W;gB>cW z$V#hgZf>ZqswOCi&f?_c+SiAnArRgO52GKVOl9?OsOmgmLE~#zS0#g+hJ?4(G(iWe z4N8L!_L+ok#EVYj1ShV}FFeF8#2;^*umcQl=;*QztN*YFFG7;&EL~kN1Z_92WEn8} z3u-WPAbtQivJ49{zV7VktuL?d9ssbzz8V@Z*0x}*nIIt^!B~3)W6j~%$#nc6Xe+0V zf4Gy9tY~`%NZr@pg4Sc43wYquz}jp;L#{%2^(A!kQ?_Si27^IATSDVJaf8;CfBhrO zqIsgoH(oy{Mi!Tnj)53QO=YRMv^e(~rr^(?Bpo~t@spmBm4%kaI@gq>`Fn~SXpGN3 z`|P8SVr*-LEg*S=hv#MawoefM|1c3laV<3du?QTE;y!Bs68bC~`%+a56D+`*uKC{&u8V7(Q;?=rN(bS_}YZZ@1``>(;G{^|D?(bL#B1 zg0l7`xvnK0QHMAP9&Fsk1+mb=c4_apBZB> zckDnWn?po&^rX0`0BzDJ2oLlHRD#wjUWYb`@*HSIAhOTdcl@;R6Q@j`?x%zFPy|Jd z1OR80r-)KXBJb$yFGdPFL8eVy`+HCznT24iMPRJCW30gv zbVTs}riTMSRp#Py`}5CCFfkzs7%bVPt26y$ArpUncPCq95*FbkJPf84G8gB6T#L0m zaMQcz9KIu8HUEuZ!NwebEHjrC*`Je{#l$R04vrokI#);gbEy3@`<=>hr@!@gNUjbL zV2W%uJ%QX$1pU6<-;szT?!<2+)O;eYn}~T7T3iHgEF?!+334{_%t);%E-FOqgS;KZ zgwBFPS_gp)|0t4`$e$yF2g{^Hf6)VOPn%Jz`}}GU+I`7LzJEVK5m!6)AwkuWd+d8W z_J6%!N5A_4I4OYXRzmuum$)u@87^Ywi!Z)7sk>XHF{p>$xx`gF_MwL#t`=FfQ9C`} zEeE)_pG*AW@2AAFFPv1oDr9o}eQVaNS$qGS`026iBs(dKh=5C41u0Yd^=F;MKA=T0w?c^N~?%Hd|m5><>KXLYq18CPY9cH#l|L>Xxs@J8z( z{%jVDO>#kFpa6lO1W1M+oQMR9P**n{p)}KJ(|#n;koS>eQ+4zWeS6|M={yy?gh5`*Ui!l`wk$>iQPS_#ObJweuu2knL7?1nIcw{Txys2cG4!Q9=1E2kp%-0?*Qs@i>wqWJ@5A zZe*8)7L}F#B7lmE5qHg_JXF{Y5K%zal$AjqsW{R~5&N?x>ELT6N!~v|vlgoyXVQIs z`SO9U_mk*3mTRG45@m+2R{zv58%aVcFfEo~w@7k^GzyA}+Q0umk&>M}RflClfxz2Y zq%drM`y`8=F@!}$nRM}H^W^yDYaw@*Cr%&b%Bx~2%&vYUKB$x=R9O&caX@&B1cPTq zO?f%?d+wQswEexv0CAl(3I2DbILp>EL1ZU`RW*PV69O-0;NngoEB~C~<==wQbTJdY~H0f?%GoysD!*jg`DMk$v6NY-r%nk7c<;UU z{`S`HW)8thzVZC|uLlrsAv9%vq*ij2z_%u`sQ);4XF`}AnAr+j8M82 zR)9Q$ol~k0jH!a+vPzg+c_cRJ>vDvj90dp`yA3X2YA3m3@PPOaxx-i!^<{foU$@wq7_+szbG1Yke=k?0Ufni`Xo{4S>o|=CAihNii z081+nKhTnd)cSaMM2x7ono*Eh$&exk`UKf$SE`+j8h}Cs+>kbU0Wj@I0FuLo^BVnl zJTpq$8{AVD65ze;v;VERXZUa*id#|T8Rxxzqy6eLk7DuOo7vpljj{_uV@-YLmucEz zE-}$;9rf;3*#aNc72^;H2TIGrLQzflc1rreI#yX$TmU1en$}JFx(F^QM+-!p-2r8R z@J@1t)EKZ>;owKxQa}-_zi|jc3Y4RTWdvS7KvNKC#>(a4>ZFCq;|1$S`r462{kikm zrQILN$;ykIfI7baK>ZG`ZzUH63E2R$5U%)3JWJys1mhqK<6tDlK>)@<0PfY9;7RG{ zva^eeinCJF*|+cwit6O#%*>{yWR_WO56?U!h97Nj7Fo zak0ovIAw~JOoj^=5;zBL8W^D7WnTfw6~|6+-*60eKn-@@yLZ6?kvo)cawgbMb5C6z zV)&gX{GN_49UR&~@8$1ka`yEFxGOZy*VWI}FDi(I;7dX1>JmAJ0lV(G7QbORegoNX z%kdkQ<2QI-{^(#?P3FIjWcKKe)wC8|KJ~-i&yVi=TLQ-6UADbI)<5T*^!}&&kDR=m zn_Y13;)V1A7-}zzVHG*0t?ZyrXGNP$Hh@|fHFdp?RVqIx-YV0;OY5m@Pu)x0E|B`R zjokiJ(+F?xefvg5ySlQ;$nb!#_V_yf9#ufEv1#k{YzzLsrgTxjKcb3^jfFb#lx*iu zKVh~#XM4;vqE6kFnbXPI2DCD5b9rvH#nP+NcsYQo!EeaKFc%%*+C80(@`2vwa?}hP zs&tU`p=Q8OzR^tbGXzN0ig4SZ8JguJkPS0MCX$u$5a_u#Zh`EFBRryTx69k9; zh|#m=#znch)pvDTTu#2T=Tb&S9TxNMuGTTbJRO~ZuN>N<>d0ZPKjyWzb@aL@p_?i+ zvY0TPySGa-fwua)26}ZPBO;<5d+pNy(hicGsk@O z+V$%>xf$7cxoKA~UAl55?Rs|h*Ln{riw}t*Hh3u@@$73PW3rf|M`Pj;fV%@ks`^jQ zAb9dEafmDhCuw72Rn=Sx(CK6Iu>}Bq9V>13+BOKLcmDMqM%oji2;l!$AHwywx1=}d zE-(TrUwP>zQMF3x=mBvU`S9n`3-h4&Uq_ zcGQ=b7hbs5-c3OqM<1hSt+}>#A$oT01-2;h3+hE7E&N>77Wg0L@4rgq>Y4 zVxB&_5TNUPy#su`yhB4GBErH#1ATp`{?Y!D?4e9+J^JS@^v__XwZETamq=(CfnucS zDS9UDkrVuiOAs-KI{@A$G9OEd2`*DuoDOH;w_wTZw*mkFNVL+@hYU9|w)*-+AgCA< zE@yYkWbts;JdD8TX6?6G&3$kObXb}$@25l?`ut&-pPl*FFT8deTqd+F0eEIV5z)yG zA3kyfJPL55XhqqUq}Sh$1z!Dl(Ej8qOqy=1OCw@EJntQ|{MeD_p+|3ebZDbX%fT&% zSj^JPF!Pu&b@KSAs08>=t^yLD%KoOAKPixW2f-5)fR|2Y1@x6XDr(I1sSnSG2k*$> zW?%{23T_2Fc;8D2{|<FcoFZaO_-q(0rn{S5Uxa zAPZfg5W%9v{9IiSAcwmIetNP(S%8A*qi;Qa=;S0~yP) zDFvi$pdSH%#0`5py98e&d^Kf001$i5q@%bHIjnkT0(c=h4n>fn z{1&SL+7Ip=naB!QaL`fkFgW20tCbS^5aWini=>EGvFU9KZGqSV0FHWDc1!zlV!s zKh_bXehi3{_)VME&-XHa3S3QIRYOA?IhZ@@Eq)Ux`tzH3-hbjmKTCZFQiTWF8XBsy zvD>F+ulegY8Rtp^rIpiiPS~6|IZADP&pzhYu3bBucw?|#LAJzu%p`kZ-IGr~hf#I9kaaLs_Zu;X}n zSK^>qA-^wj*s!~Y8IIy9c)@z~BI7A|bO9mOxpm5?Na| zsDXm$18ENtz!2Glj)iy-92c14Hd{ZN;z61eCDH=(qa!7X=p_iwM^HWwcMsx}z}NU5 zxm~`<2C(OCxFh3M?<`q&M1TW~CoaSrV0sK*3hoZ+S~i0dzL2h`kRkR5l%!pEBKwAX zUiV`(->`4ODKLRdT)R;Tt1BNa71E>&q(>3KO<~};!TlqdmXa2Yt5dp%2{O)R!ha)! zS^1c4z2m3p70pq|0$Vn$x#$dKD7p4Ix^x$xIdqgBmnsHaGyrb}3jp{k3JNj3YTKkRy$iGFi`NqoHFy;ty#nOrgIs!gtS#CWjdg{? z8r{=l;QD$+5ZD`dg+?i=s!x3M+O}=GPSl9X&}ADoy!2?I?^Z_W(aH0G&_BT%xHxX( z;1-8jjXH4r6C?S%pc&G35)=O#eF*gBF$fIW(%4~qa{}L7XQ$Xc?ujR2i9-Y;;4ip? z$)gP1sL3LC;ktn@IDSdwTikE>82Gz9T1Cz&b#NH)2kM=-`oOE++}hfV6pk`;O#xc)d&*_Hok!wTpM%qvj@66 z8w#%#7hS(zUT9{^fSTHZLle=EC>bYh?+Sl;!3)!Y^%@<_HHhulLCNL|Okt2A@{3$E zoi}*o?I>g2kY7HwkJnlFb>HIG9l)=fgb{R1DNNLtQDMX zE-1*)FUSUN2tk9)tjz1z;LAfy>GBmoc``E(s^G1qwXx4;klEnWG?J$Ws0kzA2QO}{ zb+2_Vf9%-D^R4~Z`k?AW+B~h*)>Lb{wZvM1h3!7uX8yyEKVD&7hE)GnS+`hU zwQjU-us&~n*1F#MB)+3>>G@vkcIz4IIqPL>uJxKV%UWp7!HGZSs3EUoXXlB% zdOdQ^HbGC>#C5hA_=dC-u{2+}bwf>x4V!@CSql^V2lfN^PB!?AEL)OHCewTG7+EY~ z%=q=|XWYR`v6-O~*P$|)D>UF}Y#xX>?Up^2IAg{N-k)2`J%Mc(_Y-%GtKcfR7Oss~ zARwaQ{pnkJ=KZ)j?lO0j+k@?WZYO8t{>p8}k!$ra{2WoQ*s+5-J10J#h#)F$27sZr zb3q;k-hF(c=vdy4so2+JG68P%W4Xu$xfoPR)ypsA2P_OS2G^M@k*QW$S6e}W7t|L3 zprNgew9fVx#Ge|{RnYQj*jML)#5E6JA9sq;`1|<>28RH$ zh(}m(aG<|`^nX0uKCTvETv3MUMkZtPFs>Ft--yqj1fK?yB*RcDwH!Cp)z&9Nn6|wFc}mIGd%F=Y@9wws!Itamy>4V}gUP}C z&Ru_xhiH2i9=OMZ55B+H)+V`Y6kT07;s?WAgiIfT?!6K{y%gHiLzsBjAo}is;CW{o zRENO7{e9f zOI4Lb@FL0)mJWM-^Bl;$bJz~ULf!$$*csYj4S$SF4Y{At5VV1fTIoQXrRp(Ex>vBO zy?M#N^|mG;!i&*K8^7(%i^d2w9sqNB1W$=}>f_bwu(1AqeB|V!^FuF9pEVr11q&5L z$FPV{2aQ_G_jh)?x;Y!2ipe8a3 z{rocI$0o>+QRwGU_MB1-Pg?G_f< z1L;5z25=B_h&+IhiaU375j5Vhwn@ki9u3BWMR*qiwK)LFvp=-r`)&8y*7G0y_+tY8 z^n{{(zipEYV1~B{Klm5hZNko-7ca6HYb9Jm!^K!~$*EI{I@(&BI=Sv1gHDk5HP)WQ zEIQFWZ1rQ0EkY0oyO>xEk_8@mejy1#)((hq59@$C$&Cx-BBafgHDZDLTDIxoWy=yc z73a*V@b_*WVT^d#P^Vc5>k*!R{=WNgrdLI-xlz=<^G;(Gz|#ew2JbZra%VrUSaP3W zQ_plMfGhV3d*>aKXN?(|0LVl{+G#0RU!cm=K&q!gIb{73<_vZQW+@^CwtmsmTL<;h z%hwl4r!Js$fy&|yVL$Zu_qNZ>Y3Q-}n3;<(Gbf?PCShj!R@Gch`Q*#@cEU2>`QDeG zfBwM-yT5|c@YU{5&<%i-)iot20~poZ(bdsw&?F~gcJ^8=m?K8n#Zh;jnFAws6&OGk z{<7U;Tg8=>#G!lcwXG6%!;sodA=HO4>(+=|_i0fHWY@9F*RvZ;DkzZOxOkR6rh5%Su79TjM_(c&vlCFA%~*|IdyTld zp0#bk{NF6-|Ni&C@3;*293B}6o*x2=H{MCryo#ST3KCD zUS4F!b)5KQ>%of>t|KQG*}@>uY8GV#B{K>q%aB+)m;vds=U-o3K&M_le1=dC**|^( z)a}#I{?`a48DD>4h?ivMO51uaX-5J)a#S&Mh3yIGlU-e(+AuC&g-oj}79+p;{4G3= zRx3tLoHB8Q9o6CF?Ge3NLUpk8i)YTK6;^eLAdF;B7=;8IdSHztCC|Zo_zx}?p8M0% zmX38bAG)GFIV*)VabC#Nd6V1Ay#v=p0jq~QY617UwB5%|;a*QG&aBHR$1g}wkJ@AJoH#5&J)DM5ni4a4Y>*l_h8O6+(J#^; zvJ$YM;9(D}dFGjCHa&6QB4Cvsd2YkY>(<3XG9-bgT-0uOan-6-YhL-whF4z&de|hq z{CI!=E5Nhs*=qEP--sbYVn%~?TpJYR=i?s`5EeRQ3?NqV4{uyNapcI6qfv%3JaWje z$S@aCHfiR}NyhQ9lOUP#)wDz(;7o>09O)l`myr{v&KMRsEq3(WxT#20m=!;3@`MQ! z65>b57W);Kyj^x1(R4kK?52JuK=pS2J5L3DvZ;qD@9#6LUo zh$1k%AhRi7Y%l^76$ue78~n{HMGtWRlY zW6H5BSVYeq=mx>fI$NByZ45l!I0?>&I5zfyM<0E3PIPoYEwq6ux%J>z7XY|FaXaP+ z`OxlyD}}bjwwHO-z8016saz|l>gJ3SxbB+5YgeXBnX0Qf30br4WR>Hzl$3^`5p(~t zmUZ>Gjfk6t#vc_EH^Qm0zUQHJ@goB2Q&Nn~Ju!CPop;@J*V6g1iIYcz^Jesv*u+=k z7%@lXhQ68@>x8U!=gC8=f#A*a@E9`>`z~KJdH%%V*mvThb?XwKEl4}FPx%$|A+AiK za?F@9lg5V!I~%pmA(7)JjfNnpihL~I?#p1?G>?T>o|`vsc34*`U{@RTI)#%1>mrH#maymumjk(~z`6^^G>e2+jlz6nFSvVqJ4rk>AgaNqL|%hb6>!FR zxP|`N^VwIN%@}WKkcj6X5jR63ZiYmhMNSDj_yDi;773#OtdtOm82oU^rbNChLVq_s z@MF;qb~3pl2k$?6B5L2|di z;@`i2|97Xa_k={j9pXD&xP1JZ{fieb8lx4w)4%@uvZqx(at*(oghTI(6lj^rWFkWtHIj?``4T4_YPS8tLkE)-D@iQPv zwbm0{0xMF_CoRXS>KXC)>_N@`AUkiHYFjLaW%5C0i&3fSAxAhrKb{qF3Dy%ft)n3W zC+X?$QDuIBon~W%Pi5yJIQ6t~kB>0kN%$;mJAlgl2vtx13g_BR{6&B7z`%N2`s_6Y z+OBu_od9Nj&QKAaeXMwCLA0%OU?7$_Gu%)*4D=PQfj%z2zV4oWURqb*fOvL6+P+f1 zbZ@k#sfi!G6zrb+*?DNRUqApMJ6hoy9`2HxR$-t7I8_yq(C*%O-@S46+v6XK9Gir1 z42p`hT$k{0SL|jLxyXkGzgOLR-_B2Q$iJ$Lvj`7k2XNq(-Xf_MXf25{4b-q@@X8V) z>`em#G6=E=!<-27${os)&JJ8I;59XH`n-k_ZnbT3B3FE$wO^ot0cYBF83Ysenj>Tp z{ucTr2d>RHi#$DcWC&jAEs}9jhE1YcgI~`Tg-D1jmfX)b9?n3MAjMCj(sOeF-rUK_ z8A*O#-Ynw3yt;j!h?pnWF;6aHo`{$yKxVO7=rl}-n||~yv*4yl@COsh@#YzYiGqCZ z-=7qNGsyAQ8Dx|N1$l0dg*N?);(s#{{>)6nTtTsKQLSPB<>1#mKH|TbV}dltjF5qW zaR>%r(Er≠1AU|K%)`OS25Z{|o%e%0D+5{>04U9*Q*1!q^8U;HJTG%Y?XTKK#Ml za=dwRVa|a2;rHjq;AC>Vbut-eVfX_?aAQK?^_Dqt)0Fsw>E(Fy{KB;PZ*M^I&_!V0 z#0Cp&A@rGF@osj1t7L{swk7XkCMw_jyi$UanDdeCepDcU1SfI>1EL_YoQcfC!MQ*0 z{C-47x9`id+49<89@nMrAuD~YJSr(E3dQDpw2l;YlY6_kKu)BjBsYgHgY$d7HYFvA z0S;pE9=S9Q9vf`j`ln-#nDlGvPaQ zp2!D}`t44386!tq{T5%lp8&O`^fZ#5is(rznxxhx?z13BrEo~4LGClHMl$Y6pWt~- zMoz}H-2BYKZ1eYec^}fDn8G+4*80Q9mb~4z8oaNsd;)^K zJ^kH%bzqGe{Luc(h`Ujb`$XnEuwN2)BO6&#LaGERf|52=SO7F|0U{*7|0z2z(elbb z-GP55Q1tRL;_1EV112+Z#+|ca<=zX0(}Y0#v$nO|r(eyrEw{Z3`m)imo@UtQBL8(0 zthvAZH~Yd*d7YBD*%Sl&68}ti&rf1|L^jF5_TZmMdT&}wuwnS@^59r9A@Y_7asv?# zrH`@9Hz4BnIikFU@K_u{!2N|kzc2R!JQXiXMZHrEY>a`8H6g2%PXWakJA2~;dT$yD zV-t&U1=2F0V^&lWw-O>SzyB!;aRDdJ1`J@IX1odS`GJ4?fDM>Ny138^>yG>jsG05_ z03n061Pmp=_bChiGpDqViD4KMaTpU5F(w8pyD$$drW7zDofFc49%eHZMiGXINdu(- z5ix{n(vUbuO?wE0Fl`7*rvb0 z0kmda1l0w3i$xe?*iJ26IlhzTu6X#V=U&-Fd{vZL1>KB_jxQBOmr^Qr)YGLGp#>c( zO?~stRHFwZI)(8D)gjD(d;sYzLXk3Qpv@qDgs8@!E;J9aUbG`fI=#UeFgJ*3(bFgDzaeFWIxgYE$N0MsoF83ANY@8 zkzKCxii_r~l1tfpVNsZ7Ti>>jS43*=XEW9~C4 zy3F;ZopsqTFHMR_ou5mD??ifUzt&=0i+snMajjq23G+cwf(Lld3heBJMY&m78R^%v zD1cu;469fM!A^Mi7j{BFQvIR$0}?1l3<74(Aags>Mk5e^A9x3cB7BZ87`BPFd(~S% zK6LsF9!K7v2VZiSwbR;f)7ZRip@Z8<+cMR*oj(F_b2JG~#m-yV34<0dTROwHxAB1` zb8nv=JJ0p#mMvQjRQ6jV78>thE!^|)Tnv@aG65hOPLpC{`&p6X&qCmi}GPiXJ= z(dc)oF!N%tjE%+^8~qDEphJDBbhD3~$-YtVy{@}n*c%cO+*6qR;fEillyv&S8u#faPW$G=9dGnC zFa5_~cYbyf3=#S`YXcW9}hoTgMy!f$WeM ztUaP^Z%XZe5t`l`+~T{C!RS>nA}TB_Z0LxQdm%atHKk*ifAr&5TX082T6 zMcM-$f8HbhV0U}}%trtGf7HSMi>QMQ=o!je{SXp!17^+!$n_amLf~<( zp!&S9o#}xTRk&K<7zd7tU=s9@_K++Hs+dlTBK9eQo10K=ShtZnfn)>#vJjO)sw7YN zMJ`E0FdMw553z$p6vY{M2w0;_PCgrnwDf5q2tS1n7tWpgFdj~Ain*-|)W7@n-hb^s zbUvr53-U$gF?`0%S(99=s=gAp!5I#Qp6Bh;N)QiZpgy1fwr%jsmAt`^SGf2^jh``R>0OU*+{p7GaSLWd1qZ7uzTK+dW^_sPZ!5*G zuIq9hN$~K|7f#Y1l1)dvo{}#FL}e`We|=raj<_9TbT!84Ll~pC zV~pO8F&c@{NWYLUc$A(;@Hz-|fO+t5kl!>X59A{NRX{yM3L4xsv$=ozgb5>bxQ|}* zWwSioYeMv}aZe^t!o0MZ0EBr4l35SnVsoWPWgzf}K9+o9{BzNj#A~|=A^sijeZtfR zxkLZ8%H}>(B>KM$jX};VO5h{XU`X?Ftk0Opc6QR*YQ)C@;UH(5j_^Q8CkDa=Q$tH6umZ- zY9ro=5&$5FUV%KoFmp4b!W-a}P^u;GEm+fYkBNdDcWOf&NvIJ6Scc>ROm$*|=CZS6 zQPJT6;p+>fnV-yqbxrxqAiR*amuvXmIeBzU+-jtuqnWXN#x5BV&jlU{%0nWS+f<3QU?(&28#is9 zjX5lFU*P!-_E(3^PbZFKG!tc0?_U#(4}X9)`i5u{-YW%E!QjEGG`zL3IJflSnUUVS zrs|&xTuLrV(Eit)6NP?)e~KFT+Tm62Cq73v9V)W{E(jl^Cm;iL&FZ@l+y)r-bhm%(>UP?>L&B@KmuBbS4hgoE_$$oHQv8 z#&{vvv1IcrD&`xxo>Q@?i&~<{U3+(8{A^^j>bdog#ehv|(IO({+lq$Cv7aN>`jc&l zun?hQKn>Q@c3U=9eKWu&b<= z2=lE*_b#S%)+jwa)G9|+cQ0tAVa3P?_vNo91Am*Ktv zaP9JyjDoWAq71Z$RJ#adXL{%4%(ua$6N^6e^4cwSlS_h8i+Nz4`=AMeq zPWYYaQC`(ZpH^3Z$hpQ|5s4Z=n=rF~^)gU1KQD^IE5j8E{s8p>;MO>j2Z&GyQ`y9i3-kgjiakV>){*{8WIF74vVx%}>q_|?FXfaaUf;nUm zLOAh)1?<61d}?y4$a7uWD5SKRe;$0~Tm?K6UtxTLV*3Tmn-@?@zF#I-c|fd1h)EnP z!y4x3=&F}h!VHw8F`;el3>MfO!M~twJW-2462tIWpt+%*VTQp}@}R76`~%r9J;CRJ zf2Ak%u%M7>i7+e`-K0B)S@ig{+>cVF26$hRWFRB|5igOU zY?GKIePn<`#?DkjS0@SRFnDz*$R@!m9kE52hCJDe5TEWHMr2D`*;9DG^LwMfBTMt5 zxJA$e22wqVSfDGs22m@aINvCoZxYTIieD3o^LgZ#mcY9L^$d``0=&=5D=S3`CXW%c z&HnVE-`O}R26{W!(L#aI=LIj1l-TMR+cWYv-$Z1FY}?p?ku1`4!1I!=lx zp5oknW1(*yf#b) zJ@V-MZPJs6r?U$@4^Vkf%+Ukyy}X>9S=3E?NG(m=1T*m)r{FgZauYDWVuX8OTb*?B3$%ifE)=@gq&&g{US;p@%$YVZVEX)Jk!|b}N4&T4`o5&*Cu> zJUt*%=(Oa6u|Lrta!gE2M3OB4uIJ?;c?L#J-cbR(XvkvMK=orb3+u-a40Ux?H5C@B ze42|3T4WBsC$2W@LdH*yj*i~5=Zj+*SAIGH+EHcbJG;Oa|96x{o@CqTQV|{=9@f!~ zj9y!1%8z}0U;i}$2~)J8D2&m$q5_Rj(AC-5)l!_EUHLTWCP|tXURV~;-QA5vb0NwU z2%FYE4L57BYTiN+1^8=SoM=tcs;FL}i!x}DO7+M?kT#>o7^RBeK)~Gk1KrH1+eDe{ zSa+(CF9JKP`n-TFPytECXb8p$3}TRlf*Xj~-tmy+4Z!8#<3%!v3pF_TWicG}DmoB{ za@-aLH6xA|Va7ma@ciIBjKmmS6>R5jh{QacA=UGd8Xm%hZcZuV5yp0v1}faw6db0g zMsd+kGJ(m^{iJ$wN|EA2(6y0Mq(X1>)@Gjl5{#9busz4=lj0~H)Ap+KJMZi}`1Y2h zq$CWGswRcHgfIl(5N>Mf54VrR^l9y@>8W9wgdVdBIIiPNV{nU0{B%Xd6~ zzE6j?Kv}W!8W=mrwqe|#{TVAzPK(jCoCY@{n-tun@IqPnS($|mU=or!VMC?@B6jX> z?B;fn180hcag&BQQSMSsvnv7#u5J;x9BiP2?XuSytvwp8Jt|nnoYC6Oj05ryj!zjJ zj!J^ilopm8mti;~Ge&glt;}w8b{-fIgkFmxBI3gjZR~xKp)mN)q+y9j2B}shp-QSS-SZ|PlfRQ7EzWX&(8e~~}#<#PL1~vr*{M9M3 zN%mG#;!bX?ST~N>^V!f0u%FC+@F(_z>+A|8)$9`@0q>%1IixUtrXsNQ4}Sra`u z(bENb@*Z5}X?6bX5vjC`#77iP?~>^$@;@E{HcOGU&{G&ay+u!;ZI;^P_pc${)fw9F zE_%|@lVb80Ab3jK!?sssJCJ>mXzid4vE=)08`WE1 z+xhAK6ZtOV;R_fdB9%t5Z#xX}59rMYc=OkNBERoUoq>0l;fOq`Gf1b|w=a$vXp`T$ zdIkLbcVeJp146r!HQBv^+%{=@nDfGCSxBqd#omywUmabvkHv7~(HMGhHXI7C54r@m z5B_b(^3$`P_Te4~Gw{wnC`=Rf08~~=N~&sU0V5rV*BTq@Q4GqQty3^04=U|X3OTe_ z11D#w!t^_>h`H?vZZIz&&*0#oApgk7$N+yoRDkl(P5kX?j5v+Gmo(_58}-zQFIB@h zCJW$LiKu4-Mp>#OxeVxCs1+ki&!!q>zO5`5?_@?j} zcM`(x8K-T#KG^cc9)wi(ys>51$Ji zru;G&nRQ_!eGGz9#d>-TeD(FBtrD#Yw`NTYE0+0qjafbyG1@&W6Qt6srEMg55B9L_ zvZX_uoR`3`zRFG^$74S}`v)Suc2DwKc)thvAK!s4_!^Bkejf%9J18oK?cP1idSXuk ztiJ~&YV11%Q;szpP&~UgdbeWp;z?b-Sm1=7E~`RefLQ0Zj;1Vtrb;BIB7>(5FmEu_ z68(53u;h)XWF%R|4H#JsNRUTMUl4tHiv_uY{nWrceK0hr$(3*xW1tz4K}s1uP!6_^ zri_6=q_6rQxhE9hN0El*q?b)ScW$aN7GA&4zy;EG zidZxh0Z)G(#N+rbLHix$QerO++ZJ-xKuB?|WYr$)_MRP!NY%2 zG~Q?$Kj@3UG>vS}p7G=DRTyP5n-fi&msM{6*mdyYk@U*a7~7-r>S{3UZx>&uW^!dm zey5o*Iw2W}gKn2>$dL9Uv5+6x^30WqV`qY&$iV$&MFLV>lCbHSULV7*iLX<0&6MT+ zTXXr*&=X*ZTRLPYSBpS#TX$!_#%u;>T(9`LLtACvO-<$(8L87vnKE{4@X(msCr*u- zI%USx$YIe=PP1pTsnXt=H#M8II^K;|yBn?cG+OO$$e_C+gN8ystg0x;zjieXj-Qf} z+=BdKNFAvXwK=O6m8J{fp+l?-qf4(ABtHlI1A?gJZ6t(jJjz2T;bN2_*$7Z7xgavZ zI253Jf1$gf^vdberx!0?jEtw0_uqei`t$<_Mm%)nyQ{j05wCs2^8jwt;;hy_P!?2I zH}_dZrDfa-JjPo@S#2%lFt2|0*#|`K*%w7%ofH7KpKl(WkN_v^J`}gYrU2$dGhrq` z?Vt~WQCv|Wi63QM^0FZl#@+54gI!*pH*e}x_($~W1INor9F!jisQvQ(?Rf0JERqa- zT+eojvVfgC{Y17CRUeVVu--^blaKl|Ev-oH0MHG|b!89~QjKYIeGY0!SJcB>U0KO4 z;oKIuduwx5WdZU6OC{G|6^IOB{nFY~R77+=b@k@Xs;Zo7;?V$^%T$KR)#uX}T%r2v4O2v>r5?**=(ITuOZ~wtaFbUfMYKEcr2jd|c@!ZVp5~w4vU7=r< z!A8eBA~uCP9cjGa{LHVsR#MVbOwlJWW1jhgabW^F*V+S`?SCH}(eR@x9V`P)t>|zx62fmtZP_yf2SyzYqI44|*90*J*RAds^ha<#81?eO8lCzFp?RvzyG zH-dlP)ygwRKltFo11TvjCyXNI-$oDpyXi$%phCNA&eR1>pD{yuuJXXKOR-3fyGOpQ zJ}e9p$>62+4f3PJz0}kTJ!6-@^wLYcy*;f~C#^B4;qsMJ`~JbKEg45Y0tR*KSA|`k z@Gxm>G3nghQPz%mCTil>qDIA&_e>58o7k3;l2W5d3>$_qMVK?_BEC<|pL+LWk3F^^ z=Fa)LQgnZvFv_zBGfF!mj70mY#5jq^BP2pU63DjUiaY0K>09otG+5 z#N&M>JST8cAvFc1H0#PsQuWS&WZ7ggOE2jn3?6QT1|f-+yoIa>5v?Brra2I1xI25e zdHVRkbpnhE9JsFTNJv3}OMk!+eYdYVX2QYoFWqTLevL%V8riey&> zBO(a9stEJ(G8t4wET3LVSm6x|Q;ji7w(-q2s-82$T-iu_5_Jqt;E5CLr@wbYg5#wE z*;%*R$G05MH?Ta~tyela#`ct+t)ajDp(02x)q_W?gPFuyNj_KeFCyXN-$_IVDpcwDFlvH>sO!fwEeBWzN<=wOxF z?_G}H{u?hkT-a#k@9-3Zk_~t(8QOTVUr&K!h-Z`T7$Bt^cg$7(2+tZ9`W*+S?Km9C zon%~d6W7hSf+nuv)_c8yX0%s^aAEayTO;AVMN2P3OP?n1c@=$V{>{DK>IsnUHJ9#k z6g^2}1EWM5O*A&t|JJkAxZEYlYr>_WkME{SJ_n=mBf6Rc6b!tSAPfjKimEwL>(V)> zmcrI!oh-DFU8QtODCKx|I_j6BhvA$tc;E@gGvrB-LR(cqC}iz@OVPN zQp81rz8j49KsP=Ep}$EaMh|_A?P+Nc+$L_(^Y>XW@V4Qd%u0qCUcDXI0&$D|x6H@6 z?!&pB#JTRksGrX`76FRbVg&0-GICQd=j9d_);1KCLEXtJEvl)=2QKUC<=jlH9*SPM z9F<@MQQe1#roX?Vi>^=PWEN4a=<4pRsX;(fVNy0gHmmFFdtz*NIbJ&YOG_*`-dbdxW5(zbIW_FvFW9F5rhXugnK^CZ*lWZoMx0@0 zW$v^YbC<7t`o(y-j^6=gR6E9amZ+%m_pf3Lz7*A)HZF@@xG?VCjhjU7$9W>ZW!n}4 zfGx-N3~$)7#U#2mHFfv(^$0d64b_=$s_*OML?^zhzomm}uPD?5G8h>OWq&`E`98Ej zOACk-JDOV*@DR(yh)G3Ykpunzp_{*+rHKF`2%IUL?ze*dK@a z^EFuX%%!O6Lnv~sQqJ{O5NbKMyq8z19L5BA_=h+v<#JXjDtmeW5V5p(8~iQZZSC!S zv=sKXSyT+QWV-C<$<&pgD5siU&sz z9X={z#XVQAvQE(l#E#6`)6xcQzA~pAKK7W68#gWw=}AKwJ}H@8^DT3< zUCqh84p@`fA`ebncHd$$vrQ)lT*kf@U%deu&#GE2*5ptJ3!62|G;Gh$ zxlqE)B`809`qYWzKOQ@A>U<#~lxvF4pEz>l(8vEe_|M(n9RrOnh$K}i&N3i4s0{wR z0_Zfs0@Y06B=mqI#aWGkp>cNx+gQITs8!}js4c#iYOmL~lon?f)G`qZV+TF(-oG5H3Pv0u z_*Y4OLr2i_zyp!6<-MyF`(%%_WzyXuw|YGw=d^L_SO42z{N0mW406TLyYC(%H+h5l3J;iAc2B<) zij-6UFCaGS>Z&T5n(FIozzEXO-p8@)`R41Q7K6-=P=tnOltNiz6u_tOB|w%zn@3a{ zSQ}8}Aw~+%V^C;lh_~M`!QI;l`j-J=I7CMY{-==t`UX)xlRW@CD24AqztURz%Agcp zN>UgI=OXf~T?f5&RaIF*W)}D`5Sm8DNH+R8=Xyp}HY#CWxdiX50RNjgnW8uWJq=Yw zBn!`;Jay_aN?~SXgKHTzrFtQCG=j+&wapI^1M&B%@GTW&ojdf|c0-bbrsmWSA15Fc zY>BizrP=jEmZQVgq^)qSiL!H3r=Cj+kSPR|(gYqzqtlIBfofWde5^G;00Md$7^FUv zEG8xz<>}C&>I)ZMeQeIOY10znHxL2zF9Fk3kq{U$bp*_p>$ZKXc0T5w%>0Xm*hlxZtHsOUDlupkN$U z0fZ^J@oEaQ3H@DC)!5Z<>uhdps79^Jii(=L>Y~=3fu7z@sMaz>U^!#33ItzP4wY}Z zdb=sefkzK7FgYY9^_Nm*9A>e&xsmr6wNQ~s?cwF3Q0d$`nWH!HNr8wh&|j$*fIoJF zj%7b9h=5UjfN=&-wP#?UqKzuEb@N)POwk5?sK;XW7!iX_EP8wfdi(~1jel)@MM)`^ z4>&ihDTHImb!lZN`}ZMC!aU@-+6xQ*QX~= zoG9z=9vDC!_wMexqeqX9ntgiYZnB$VIA@S--ZS}UWaOieDxfuJ`OEaEsb~F0aN<*%xl1=;3!L4ZjI7_} zuU2)rQDNi(tXWz|9h%r5hOwkIFCOA%vxCE)ELLw4^jLgfP>E^;niN>9IZ_UxrgR0M+eeH&75y&hAJT6YV) zTU=BWp7p=q&QIIhnaz%eu6djbgWS1rL2~tTGiKZgk$1>03_)r@@svYA5`70h>o?M0 z``9VkxRo>G;D0^Mg<_&mwClqyZpk)VGP|Wu3Zy;>zj!e{E7NRlXlQM%twp+59>z;< z`XvmT3s@r3uB7MZ5Vn)`Z*g-3ML4^qgIuK!3RfvR33l!8KQM6lGMrKt2L~mnG;|IQ zzUU~WJj_F9@2D+qeU6TO?3UhA|Jqx3p|_qyZ#|FRx(mHU{%?aTKVX5*N~O>F&0Mc%drYKxs~IU?A!l^oV+;^%_miYgSQhtpJ#^v$MS1D(X5rS!Z%`b7yCB za&qUD`Nn0{;!EcpkH?ubX8qf}UoLuC%&9G>Q!{D2V=-rSycwkQ_N z6$^zud$w;!cdo>>Dzf2@O3AJe)fK`p$gpWc`3g~1TFRnCSx8irAFPQNMD~J_hUDZ2 zkO#Gc$oAL|3alGGi`F7JT93ZVqm9#OVv!hii5qW=VR^#FM;~9Rj0Jf06D}r3@|L3; z&*74h-Viy?fKsa?>%aX3k0b4oq73wTpNKM-Pd;%GxlhiE?7Y$7>L3#a06v7K2a7@m zSh89z;)$^Y|6{4)G11=7(No5;De{rl$RD*Di9+uuVQHGiaGT7EyGuMl!@v(K@J+GcDcMx zl(n}5^dRePZwIKQy;5YAb{Y2sdfk9t--%vdgI-^QULTJI>B8|Jj-5|CfUv7nwHJKh zsd$hIQ&NIbff*RN?X_vB_4xf>@R97_4=oD6qNWBt-P%-t>mcyPH15RMV80y<0%-Yu z-a-(!m-$2A?TmxMj}2H&ffavu;;ne#^&g{6O$rY%J2}0*vB-;EuXGDCzWruKYH2Jq zox6muzIyXb42{P`y{0Qa`8bLho=tAOWptQSpyHXpemg{j88c?gTzngrz+!F~axgw- zrL+l97!qEdjSpVKY_8zQ_bZd60VzXyd27~8m=Fu-a~HQ__-%-3eF=}~PMl;MQlc+% zPd*uos4cP6x8s&N^M3sF{deARaT+I$p6>=nk4g23v-1b2Df90`Xgo|0pCMz0cq3ia z*B96>rQAgq5CCT{dDClay}TS8JUpB_teEz?@R4ra-NS~VR+V1U(%RnL+uv{FD{mQ6 zCRINTv)_)Z@c{UBJNoOoDjhK?OA+`et*k(W>4pZ7Gp?HE5&c{D9F5&EIrO&P0vm{efl&k-PVrYe(QGC@t=+#KlAg?XO0_Z6|^`( zA9QTa_n8G%X%?a{-ZR&)Uq92^(Gj{Krkm2sTdnr-R$)GI&OZ4m(7MkSl$87=U!0x& z3~7UTt~j4}An9`HhnmDikn+*4!Hq3#_HVS*CV$r%Y-zX?0aYTffn{OMd@SGrKg-rn96vsWU6DOu-L*i}I!DY{vU26hv0p@I ze~FdR;HpvgmgHCB(%>g7fqjy#yg!D$uJne2Tvg@AX0_I!z01mL+cHUI%g#(o%gBNf zk&|)xaz=3pz01rx3}7~NDIM(Fn#nG z0BF>+yQQmNgB3C;C`b(yNH^^Ou79HnupwIKx_a#rQjSskTW|BX_H)Yd%r4tg64k?5VB6dXtQ7oY1qln$iPC{3zh#e3S6%YX%NbfCxKziBq5C~}` zr0#ydGqa%x`uM#6Zw4~6vpX~Qo_p@Oryo~RGAB#UlhWOtkvIaEuVXp+r@Kp$7Zg7W zlmuX85=dt8^x2Pj(m@52_|3cTlzr=q%Sh>WaV8`#SX8x5G+- zEDLDndbIaBIysK^uBL|eCOtE$nvr3~8iLN>R9nBhE~h(NL(gx= zT{vV*nh1qEE1^&IH}n#Bhm}(9>-Qaf?b6ZLaJjGC9eur3vrnCr@UmROOJK2tl^!x) zwEP#W443sW36ef0j-^roS2TH(tdY5Ln?{D9OmMns&Ctqs*U;S-`OYT!PVF6c-rnYR z85`VLI=!xm^`^rW@J={5v0*sS=ydqO-V%4gl9lZ(xEWe@_iMNt$EE_@j^hUrE~T9X&__?HdEgKLp5= z1UCv#3MnapZv_0n&u5%Eb0#aRG%FYU;`zKRIGJ%C<@r2v%FAhWgsBD~lvU$&zAHJ=wtj-^}VdKVchn_F1ZTj(- zou?6mnatJ+>ge?JXm%n-(0sNFoNno^&jfBjhM<`}5pmxk6KAJzRaNoqEB2K#{m9Yt zacqNj%7YWfb&BW_QJB~vnQ3z(x^?P2?!Hm?$FX(jwM9@<^aFcjqo5fx!cVRC9Wh4W zetJgG{QO6)PFMTK&nY35m7O~~X|+z~7QKhBvzuGE6JlIFbkY6>-c6^hHlqqF>?6%8 zI7DkA^yDa_k3b5DWX&yY`|w(Hr(Rpa$P1OVbG$3-aoE1Y?na zv53c51Z8Fx7M@1r3}Trto;j0t`gD5w`7@_alDiV31mW$hY>tR%7Q9H|bk3o0Lfs)K zoN}$@b$mJ6oZ= zHw5{fiHsWz`0LOdDP+yjV)2INsDOn(6Hl5WF{r30E32vq%5^m0Aw6SX$f*Hyta z5&3~@s<4U#7>kQhQHm5q?G6Qzq#|m<@}M>v#UPVOqcM9Sy|l^8*Ino2?!~K}-Mx^Q zpF9%1KqZ_E0RaYC8KkfnNg1T6CuLBhLXRR1^7?N8+zo)c8E`iM?gqxGRp8RFsjPfD zpMD7DyWl86)%*1N)9Z7R6>~14=GUg7RCsDA{=2}UF_pQ(dZ)6sRkl^uCo5WOE(t1A zY-DWYV=&vg)C=-z))6;EetR8pL*AJv|C5thaN)v5626es`fOG~#@X|vvf)F_YFn!S z7Zi*3dK(-{T9}4QQRkexm|s-pp8yT;9Q$lFc^ZAYem&LDJ*!DiMJzOEz;lA?rSoZr z4;~db53NOeWp@ixZfC zC%U7`qQD)3QpcJd){==BA<{Fw2)Iqe2vH9BsQQ{Km&(ctib{YxPF~<6J)P<=Bk2JW z*;QPwu2)h@MS}u*w}`ejyHbZrpSJ7B-mEsT`d09fTJAawcP$Kp1=Nww+BPu?7D=UV z@Uo>Vzs=x05~fa_8t=iSf4gGovS43Dbv4&giWyMaf;lZ{yt@P=nkxepbtP9|{<90L zGnjJ%p+)m}7q|~up>OA#o0wLUq6)mS=jX%c5@5Xgh9&d&M|#w5NRSIQs5aEOg-v)r z9$)lR+`%o?tzM4di+AtO7xFh4iv;^|+$Lt@$Qe>r^-c{-5&G4tZN)4R9*^wUq< z_rR(hq1E#C>kSPpN@q-J?WJ z#Z*{ug?BN#8Jv;M1F1UeFT=6!QVk5`g72k_B9%(5gD%UYXl5=v9$4Q5<7ngcRtvAg z^BQGZ*TEA~)WEIY8l$Q?1R&~auxfGp!nmW1JFtwq#yHg#u0ozee|KkJ=}s5v&UMnA z1=5{^Zrpiy-c{oBQTGp|3kCfIFdO(2(!smZJqlpn?a>W?2#6=Z4H74pOE+}HQ}n{` zDf*H8mB}*~q960RfI!1CMD~{!WOLf<}BpGw&;Qq7JQR&`Ethk1#`74 z&ga!?A=f=YS)Dd%d-v!P**QGO=ht}XE4Gk>54=0c?juFvTG$cj<&ZmXiSp&7B zsUruLt^Q#zmb|pAvfB2PsAIbi=5ZcN=Zhb1K9<&sh`-jfW1By;%wK9&8ie3*X|n~6V|&Aih}m( z9o8!*M2U)oUE}(Nb?*ag!+|uf@+;ST2RvQ=7GtiI{+vo{xfM;X+#*G`(s}S*v%)(q% zbA9IaH4)uoqjXi}JHgKEEU(f=#dMEYyFIh62?O10{P#-yO#Z^#Spz4hXE zTMn1#Lr{9-O9#*MrD0NtQ{n#a*FQh|t>*?~_}$=?C`H3#sYaPaXoEQ;C&ARHg9JEmcL_|1SxK>#ex+zY`B|B8Y#p=n;F3)Bl-{#JU-0^H~ z;8t+!6!&x%5|qluRLFAV?Hw*CtLh4vOz|S|=ANK3QeUP+2%xsb?j{g7&}xR+688;6RBOZ#UW}a($?%ux=w6HjW+J+84!!-%W<)ll?*` zT++T2MO<2%z~aOtt~bmOo=hq%3=_l%v~mNktYT}RId1I{upmolIt;aff_=dI8QL%9u2cPb*OVUriN9Ay`wQ4<0{-Dvc%;Ts!=P(B=` zJo{J@wW4CpCvY@CWWN1%URb-&+G=s(Z1rU&C%0|=@ypfUE)yU)Pqj~SUa`Lwu{CC{ z__t4?>9EgHEIzt!7}#n!$YXzByY1kaCghgPg|zazpugU9^-?VmudS_YT|AO<&f(S< zS~}aD0-e2}T~0{KK8?v<82QMYIj=t&nZNzdrdhM@@71+w%iOth*PN*hoh7o@hanPu ziR|RUzcXXH4DM^5T+$0q1+g0fNh{G7#~shw2jNWVbp zb5sNk;5g)nkdngT8-=~82{~k?RPR{q+M2{6$V&x7mZyiDyVHoIkf@O-dFdc!7&bF9 zs<^vTUVQx0+uZ}<^>EFXT;z-<7WsdFvpj2O0+%V$bUtu-CUAKsaJdh>U{SwPs<{fe zD4U8SBG6REY(ixT#VMyVJhn1Xbp*K@^5CiN+{P1@VngJk!h*B_fQf}tnxeoSA#AHn zp`uV!B~Q5xX%qd{e!2a8`K6kASfcB#M|OUlif!fQJ?YI5Ivo)ax;1+%J9UbTxVjJd zisVAQ6SbDZh1QH?KSA;C{9BK{LlYAd2lng|;Gwbb0SVhKG*}E(C$>HE$hLI|PX1xr zfcp|;Vbf`-xsxgrw(2Lw`9d!{lVO?ekr!_$Kk8w zVNttAbXvHu6KXO@5zESFI%UJ6OISF`N!Y#asQZW!BNF0DqK^3D}OJ};x9 ziU>EAagmTuLP}(nqzbAPs4IfxAH{HPhCilcC2vKdvv!+>w}Fsx7D~U{(xRX;3o0ar zgkajz-UgpoX%qW+d3&J3Cgr9>5<;^lRbxZ|s*HVvZc++aN(4;=9yLeYOh$4P`oFwO z#;{j`VV?lQeguZS3JiM{7}h^64bgI7U9nw&4pUA&l{k$L8HI+BHFBfs#Uf5N1h zL3PXMEIor9zjAZn5txkS-eQD-5UFN?Q*9`8(}pU*yeTxoOC|8g#)RN*qpht?sdOM6 z!kz76JhEOj32e&<)I98lOn7qk;nDV2_<0*opFS;@0iJ1pUAtiQ=imRfd;d=BNQ6KS zmTG$UL&m)Okcj#vrLbrblA&~w_6hb`n$*ol4j+ct6=!Qi!(MyMV(i*4*yGA(2tG&3 zs;UCr)fV2|IU>wm;9;fFsx3yfdmxI1AHfecU-1a+(^-t?mSb0|N5h-n%ymKJbBMHS z;El1Htx}*$t6TxNiPfS^1(ZQwx~l! zKXm-~jdH)LnK9}%tx4=(SBDr-xG7+}LTFlA3MRYqQblcjWoaRDg;it5R3Yk~%Bj&= z%bng9Z+Ex%aNfRs`(=>CmX`X9XTBc}2i9J=OnZ0xM1IwA zKytKw6!y_}S6eeJELdQeU1N_z_IjHjvVh?j`t?{vX%|@viWgz}hd)jgW-jm_DikG1 zmHd>w5BGX$ynUGc84V)b=BA*8f#~ly>Fq)Af@ux#ba7I<^cy$({prXa7VoAG!%8f# zyPvXoidSBHPs+Zkc$ETtBA))22gnK1tfr$l_h`2l{I~a6S`c*#jAzB!vzXgY9XXlT!HyVBs{s%7&& z_MQ4jn{`}=i+CDj1;My0PtM%lsh~c1Go7k0ee`U*s z$g0;=tCCTCawV3QFoQmFOLL1w-B!zEj)0-Sq4 zjdB`K_jV4d{&9(@ot&H7?q}9fvXI%cSqW-m{uI&UCWlYi&NniryG``;>J@*ueZ3}H{D>{DQ@X4H7Lr~%RLYJP= zQQg9u(=&KJ?|d77JvS{azrmu`2X*sv)AI3PO%|wT^!MvlyUyVvr`p)m&2PXAq%RYP zLWHnlGU6|+BBf;*D~uSZO(|HmB8is+u3=4;Hd;7=j)aI%Fe2S+DFfyS?nvN^lEuV$ zx7m5*N=1GjJIc_)BNhy|%`Rg0!qr4@#fl1l6gw?Da4JHWiCP@$K^c8rk-!%?iIlzW zMrQ*M6}B;QvnGcmBk&Uot+df<3KXU*o>*OxCzb(Q|DPHlVRASyd3Z2tO9m@hI50UJ zP5~xLN(TQJ*=vSm36q4sNY;;Zv6)$U*%$ZFcZ@^G4T3;Hl#~d!HB}}jSJu{oh*#Iw z(fQFNxS)-9?C-0OFJF!n39ALCMT>ysMH$yrA-)#F>#xsweWdEz=F~+;2M<0fG9&7@ zO&r-1bUCo+2s$5w*|kH;Ku49WHX&0s60xIfy)K!}6r5gsk@)GVMkrD(jzM->pIP1* z`p}C($BzezCTS?GMWx92T2WqHcKHApZcso32SKL}3enCni~^vI?Dtd~-pk$7FZ9me z$Q;StZOK=qhz7-YwwA2NE0InB2b~lkitq}uiQnqsaJvOPn_3VSS`l_M#8L{V(1IZ7>MqrlD*-9lmDo(?Kb@r{Hj_SI3=^ zBiZu3X8GQ7`QCfvdn5kKz12VfylI5&F)&Z|7%)NV#zmuK!T$V!Hr zoaFwtA>+wE!I_uueL%jqK)$z4zSn%$dv%x%a(rsL>?`n!><+RH$r)3VxeQ3VuTle$ zW!drOFwCV$tO{MRDo`f4NURF3*PDQNpdpuQt~NF`L0!6A5RgJzYr?_uKPEalin-y1>91B-U-b-q^d}08ty`39}<7*fc@lR0=_B3PvU19^Dnn6xZzYn5I}}@(W8Y{(1adI?@dq-Q0Ap6%|nK6VLDM z;U>Crl+6w9weHLW7u$9X3}MP{-TEX7x?w}$34c%DE}gqM^cE`gOog72--rr5Q=w-9 zdXgfbt@tyI{v4z~VS;RTB3L{Cx)Kk%;?U{1xk6E)L=IAr3{V#iaUlv*g<7#8GGgF=8lN<+b9Y53lMT9 zA!H`pMsVoCCeh&p@=)M#4~V5aKPW1eveHphY)3>~GU>`@Cdb)B$!{fds}u*T=rIY4 z?m}EqoFMQ2$#izlOk7(hIJ{b@C3kpzNiCUyw{e-ci-;i{&*C^0HRA+ENNP<7h3V-1 z?RGyYs~I`FQQm4`m6VI^Mpmkm0Zg??qKjUT6TgfK}Z^as>h-VpiZ?d9Ver+|w zYh_B_5gU9)${ax>P&fDns}prv!wXG5s;H z{}Y7a_3$X*(My6b9s_95xw82n`{1t-?!XfTAbgVoNoo9A1TqpsNW`CsLW&Hxj^E!- zu_QeDpK|(1DFdZD6!d)Qg*#k<%7mG8mQQ~I9e3QFe`uF-2RC`+x2a0! zN-28)JVN&oKsnqi&s)7?uIT{*TAPsGhP>#0*^a@O3C!VxPgG*A1Do<+8VeN4rEC0& zH0c`8b?OYSp?MO;p%9C$JQQlH|XHMz+%x z{sfj19Jx-cf5(g>*ix2>+v&nS0Sa|Q@Tb6A@+kr=E(k367U)vH4Coip1@hLT>Sg=z zP=wV_;gZn?e|&I`=kDP=(3cIuTtR{~-v0d$6!kA-4`mr8zdwS|4I0UPvML^R%p6h|< zx}b%W13DScO{C}ARdxl4#V%oCvXY)n*$F5XqdzWC2%z)=ZUzS$!e!x_nqD$*5q@zW zeWDjleF`vLARxVA$4GREY zSV%xcMR&U|zw_sBmWm^gocKZeILH2weI_p~|CZ>7M)0q!JOr)fk~5LHk?m8o&0=xfJVDW^(=dl}QuRP+Wd7MT4x!NSTV`f%4PI)i&W-foMP?Gi z5HJaqI0^RT+y)6zRy0AN#8y^{R7mhUcZUT{hKJ3eH726KNA4cyHjNTlK-i<1Hc^A_qI|;NDR@6YugGtZ|RfVKA z%7oDn<&@!sMoHe4*PB{xq#(k&Y;lKdi=ufLNF~zfNj5(e7eZ*NcR<$Z5_|{lzD}2VXh84So|(%e#K@{`Bl}<=nYZQOM7$+-_2~TU0hfWcD?s#oYzTChtW- zohSXXBQsZFSd*9jE->&fs7aECk`)`AUm6&yBC$mk+-KZd6odOQoXVWLC%#Dr5Zc!w z%lD-5z05)JV3Lv*Sy`C;9|-YmFI&&O+S#H~nqWQD85Mor*74N_Gu`4=OMg!{}y(=BBu}B>l;2q$H;ETWhLc(J5y(RDJQRr!h@2yuh z&=dk5v~77vgATo?WY5OxVMd%5sp_F8b7(#xKOuT4&K{@S^6d&7-&Zd9^pjL#rKhU? zEGRy^aX}*TD#TG4$mBS?n_%wTxnKOy{{8#MB@XD^xx42zP_=q=MICCPQ;?Vk!?SK&mD)80?V5siQSPHDXx9|9s}~g7NPwQ7S6G;rhk^{y zaFe#)@woufd_e(Z`#d zt3-W8wL5&D%gT(dRrpj^<%&;^@09eN7&$H`W?VP(#OKZC88gh?o}V}=F>%u1h$(ZT zqTYNnV(^?PGw!=@#+cr(z*&Gp>Q*d03%TB7=+i5pR-Xu(^z=@hE)@p^iTdz>;!El2 zk&%~*gXmKbeR6!Kr0>LD&?X}>eNk~iL2)r^LO{JtW+2DsLTC^Q3!zUaF0o=*h}ns0B28L4AozaBl8Kqq{pQNI-k+3dOSHvn$jOuB41`hx@*V z2hlQUY5tez%M{EPSP%g$hy)h+0tg11)WBv(rEBcqY~rUE@U6;F((NM-V3mce~s|q-oVzU1RaIF%bTb# zFSVjZ7xD~U0lNdELdPcRz04*Qo+&%lP6m2f9rh8&V<$a`*}Y27nib7uB6}&0E!Wf>EA@EzeKti<>4TLS)4yg}` zy0Quwd!SYZ89)k370=l;yr}c^Hn|v~NXAsZc~ypAC%~^eS`&lTbOQW30e4jpWcR7r(_)gf75bVo5Xm`8z5v^1JMy?uP3 z{Jisb67(tRi*&~%HFRicutL2iFrz3yI%SEX5C^L~JA{vQDAj|*R)Lkvu_9{ROLVR0K6Wj$aXc-3vZ_E@}aYcS7t1)4y^{x}~6;hj9s?{+=h9tIm}h3&j;vuz9Z z^EkKL!@)v4CG_o`mKM`HCN0gS0o6tuT*9FL4DT1B(`Z8ajYpc@@uzlt{`u!SP8C6O zT;%voIQ>RsBM?2%!iAzxawv}DaeVLn7vZy;&8~6Lv?HuP8!lV#Bj3$s{4SAuiu**` zpTc=4%Gv$RbtFQ%pIgLj;!Xi-*tk;%k%LQ};DyD$4NxN~03%0sFIe=z0EGK`di!~M`udrS9!QMj zZUjB@3kX1P6b?S#zJ9@>fqpUn?pe~zAxaSnZ0rMU?64!4Htg85VdtJhhYs!8`RmrL zTYm*6PZqfGh(s?g&dW+i{)qIfykb!Yr3X+86m~!x7RE$GlU)R=` znX_I-WI|@_F}66iiSKJN^_`Ol_oOc&eSEIi9UP4Gr>ZY=IWG50oF%(nDJW`bZh&pC zp&6bRd^;i>jVDfMwPj^hwGB|DH`G>@iLQIsor3cse9Y4`v%p|vAtj{I$tgG(8UC&} zL*8hb4?azF{Qs5H1y0y799)+k8| zd**We`QJ7lKm~$l?9&vVgA}4H&-GT%Ok~VMop<~5s^yuU4RtC}H@L6fntiz@cJRbR z1ko*p^zoH)_6SfyBkI)UWik&ShZoDtt85TmOG_a1VI?jtECmfLr~2APX?lVLf^mV! zfaTT|6||}9y}Q}f3<~H)MYHexl{Av7yHPQ@42ex5Vp2?tq*M&;kl{&Ecod`mFh<`Y zDVU55wi9XCqywigT4hjmKx$vI3!x*yNd^;RT-wL+2wZ#GYwo^{Kc-zNJYov#t~zS( z66$iWp#12jwZA05PySK+!-}OK!UFGZs_FKsBfYuv%iU_64Dcbo8$cO10Y3E$*)N#b z8`w8)Qzs0wwNzC3jv6<@ucoq?b&GrE(90U79ZhsnxDQ;VmUFN|m&P z2bk1%z}{$;Vc!9I^oZyp0bYhXGn!<$cYrIcdE}@6@phSU~Pm5qC*WU|0tf zO*W}j$yf=buPjiZ$XZrEK>r>6 zmW<3<KXol-4iXv82h>XG8=1aEF{K|hGA2GCdR7`l-m8LGSeTI&@ckH-{Xy{xt`lHdX zbRn4!_F33pk@i{kms#O_6ytC%g-Zg26OkU{E&9Fz#djL5@OMAbg9=D;$dhlo!|;m&^%4~@Zemw5+v z{J8IYb;TLq9(|oL6$ReSrK;=WRK(#EN|(>i>|Eu9tXqSJ&rSqlti>!`rk^!(aIdP% z*?D{n=AvM5^7LycOGoCWylOuWZHikdB=}1tz?Y(m7l=YJ=AvK#!6K}ng_MJx(9I07 z+=rxcvllpA@V=Ofcl_#RKPd-U(ybKdwAFh0yfynYQhMcNThE_H48Ku@`fwJ?= z*QDdGw~KMMAGn=6$J%~?Th31VbGGkn?Kpb@L-Cw_C(ggaIyy~ICMKRg58JHrV(Os9 zi7b_++84w9X;A7#F;1c3UVnWo{(b<}+*KTxam&~hh^1HAYupD44c4`{1?5*?jTkZ7 zK9D7{U+v$sP4@ow{`fl(=f8qka;>*o>#M9~^>6|#EwfhDBME}BwN|K5(LqvsN5G-;+fU8J=<*=a9-NbE?WvXMINo*B2A2){!DaiH&Uj7B*&^nl{oA3R<-AOhO zs^CaA5Qm)&oHua@d;xG|Z!(C5z!y{$R2)~lrF%u-W5Z*^A5G?}^#WfAG~SOox*QxT zLA^tyQ;A|s!c*VCQ=h?8CjkzV0EZrV1z3%8ka8ZfEj;$34wcG%(Un@hvTpku5lhz@fFZ z&U*HIQSfxTeYDe=y?eK;TD5A^-u>5y+wHIQtb(t5eXH0bN%P_ZUQ@##Ft+sXV?r2F zBO>>wjT|!~K0dyG|9+^n)IG-aiHT3lMB<)D2gCXu2hT038Us!u5azGfYu&=5`i`F9 z=f~VS_3nEQ_^;@`G2U%{eh($}3=enH>ct+#$b5uML?u{8A>=}`foxMMIq#9NT-xvp z$*-!km>ot<>lCK8gi6aB7!o)Zfy79>HB>?hC1@{l`OwPGi9OtD?KLB_JcL|0^BJj7 zsO-x}R(5H_FC>>Kc=JX4FVB`Rf$VcVgXI_)hs>hlRXi2&gPUglo)l*OZOM*`{a^V8 z*wV_qS`HSn`SUjwAsaY9zV1S|(SpXK+l40&$ipF$!Pk4-vpD=c4UnJJ)F?iHH z1IJ8!Y{F#Os{Sz$vZ&}l3XsvwEi~ok%(%PNYY>c6Dd2Oa+&Z14Hh0*bsj_m1O-YrL zY0U>xl7ylVRo^W%H9@DK4EFU67TL2%R-tU^2J5x*Y z(8-^qHxNHM3Q(an(IL6IyU~EqMv@FR43)yIqPz&gW$&SMSTeSFsI=Zy&%J83e)Sd5e3sxxv82bY zYx+-l=9y=zs;)ZuXs(xC$6yaRabk$bd;){(_iX%O)0VRe|G+^1P6-b@@W6wKU4jGC z(&8!E8M%o0!MkI0fWG0_+K)f}7@okMe1pEF0=SqN9i1t}Wb@Rp1CpYP?z~HAW*@Kmq8RG0os-`A`p$T@&BxVrxJv)0mJd^W$ za`+JFy>+#TF)_8ZF)@j?q8_9P6&y&bgdx`1F%DAH50q>W(@~Xy^g&Sg-1?0)s?^3{ zwDC554r%M4uftzxLXx#M_$qQr5He928tUW}XMarL6dEcWAOPqB|Ezwg)!5alTP}_Q zZ@rwdj3f7HjdYG9$iGF!b!(-i;+DzWJ)6NdC(Ztcx)ZnpN0@rR)) z7iPHpeA0zNS0;}?$eMQa#pX>Xujuxm;`__V^7yme9peu-e2l+G=bkek_T+{Pe^MI% zLV5fPG5#>@mB5Gv<+L>Zi1?!MCnYPYn|i=71tqjJ{x`po#{X6i7)M3+jvd`qEDEd}0MVa-80)0h+zeqaD&-w=#=HzEtf9LsD zg5fuAKR9paBlqKx{Ntu~Yo6!7pN(horA6h|qkNl+U}$#pGDXSf^0UbYhvXRIuR5&v zum-&*aJ{@CwLpQUh+|)f7Sf?{%6Yg{Cvf~F9JeAbC9PC(W!Ov_THXh&M`m#9gIYHYN zi7=gNliGJRJ<5qAy`IzXa zMJ?kv=%+%$aqsriXVR|LHymB>z;R!V##h4eKtun*69PN;dVIVC$Fmz6vLzh1g7GZE z{4OqX;5ewIO~P@L5h{MnZxn`>alGT3e`*^_Rgm>9kLXV_te}`KXq!#c*VP?9eEe`- z-SNZV8FZ$yya3NG9#^ka5LJHg!Eq0oOyeG8&w?t48w+iXxmu@iqU2aYVGb)MvXcz{ z40wUINt7JxGt6G@M0RfdMsAzWEpYPzuJ`~~^i&&J`cG{W5a4KYJnZmQG_J1h>S4KQ zo&kAfT`I0z^$_(Bg8rgq9uLY*vlSY{8^fKnxuPDevP%D{X$W(KUPSIE6&jZQQ_F7o zLYgVTXjek8taTe4tYAZfIim|kPX*O1m{MYM_+$gGZfiyYZRG7ma7mjBw1~CUl@(Vm zQ%%Rq%-s#YqK#Z!!5K+6Y03;?VPS@{r3ojyVA+v#I-v!DqN1& z(yK^4A*rTuRJg^&#Ky+N^r9^~I;L0e-Z7KhYAW+G&Ky6Jfr4y#&>5!ZRl?+`*BLNi z2A$pj3!Oq^&}$*3X!XtrJTM^Hh+k}M*T|?&p@=OF?GzQ+HMUQmE>WQf{tJ!j(&uJo z$GdyD3@6!7@gZS$+h4Wrq~q_ni#*!Yzy!FhF2)Te6kSBQ`%86NHwe!@2Tolm ztFNzjiWoF#kRQ*QiV>;!$6C|?+$1u;(C1&6{`iFZ$Hv{$FLBf;g;MM0=M&nqZ||-l zY=+&~q@#DwGK*NKKOGU*pmwMP4TnJ!=5Dg`-fQknN z;@#Y3&NSlo@8llSIz@hGBqRKFt%oKHbPyG18QYhLcXnCY+T0wQ{1d% zs(gY}xZ=ng;XJVX2E$J}e zumceRn%zIt_EuOH+L^lQ*zw+$>K*F(uj_N_bC}xC<6)Pl1OB5nK5y)6?90^P{h>EK zq=qvwC%iPo18B z(fSJ%5(2-f-_Ma?b6*#rZMWC4AGkvgL7nruz*iPsww{0@N#K3m{7hZwOv20m_EZbk z!sMq~pnJRdR0}lk$?{Vz0H6F+I+LE-p-ZE-{zaE2w^gCUB$pejNIZGVbCFXqkvUqc zXa_dP&85@-{mFxm-uC3(@&CU)dFWrB{EuxW{3Txj&s*?Umv7C@24Tr2Ujan;LAY$Q zi@J3GvUQgOvo`>vH6b}$Yl7-FWr@1rx`bJSuLjOU)P<%tXi7|V%w#`NS7a?I1Ia8S zF9C#fLVV;oQRimz@C5PnWbhKWaaD$0lw%%sK|7*A9il+z-DRm<3MEB+FolkyMTifD z_d*IXVIj&@qvAw&-@vpJIptY}1%;9v$1KfA9XIhlU!jrl%c7)^pdwv9C@HgG)fWZQFOym3(uuTEweKV5hLi zvND9bAPf>+;ZdrD=TcBX3O3>9R=WtMillcbcoA{56s(B8-SIOCJ3`RIF~PDnDFi(X zp@>7`UnEVE#J_+GLEIjA)jX*FN%4wIc>9yFLM!YRNO)MRAb|1xdNvm*!VPaisRXv@0IBx!IQ{!*q|WV6?wcX+Z$1PT4GByOcL)pD+?1+B z;szUn;*l@|wbG$NPT@Yx6ct(P#9^om@rbm)?ELA4#>UH%NU(j~r-^Vfkaq5~4=L%( zR1o8L1R;p4nG%c#F#iDoPA)z^n>YV*-f2J*+&>?gQg?3U%9T*^r>OcP`C!fX5#Uib zvu3WFv_~O(|C<7vMD?maQs9QsOhlfYpV<+|zMZ9@;KvWiaS&StE^-1qTG)3Mm;8+j z*PXt~aUmfguDRbXa-^qGetclU`w7UMfjuR>wT}8+4Ju7XX=}O$NVK%J04#83133q0V`q_HRfmXvT+8=38zReC47McTMFd~a9j$ZwY}u#j7yfef~8 z-ma_`ziF4BA&poEPeN4mud7LRe&%n<$~KH6@3hc+^}1x`I`W*l`gCACTg~PsE61{I z##_sZDvrVNo)$FafrI?COO_E*&fl9u5Zwuu=G%3^9g{`TqMcES@T%r1-!VfxIVWD6UKxPRae$jo>jsqW4Gw-R5=(77t9SBLD8mc7JOh%ME(*_Kl1j4tmEw&>>K16hDfinF^Wvr8!kXG%dvn>3ngzu_%u zJ4+FI_9E|L`fBZVP5-L>E}e3>wx=+sL6Hh1a#KVOy8m@@@x{))e*e9!m6fvnG?lRL z$9XH~#`WRGNP8cgzwt&qxTd|}nfeMr-+i~YxY-noJPom?=Hk8Ity%L+c8#V>TwGii zO-=SMYv!rzp6JpgBI4d;=9Qv$u7|4&(kK4*WQyFkX!Pv~^i4qDqJfXmjPvq@VH`pe z+7H2rq)_?`uX9hRLO{1+3MfS20oeoc;ap)Yzz?A}GN7g~L@Kh9(SIm4Sl4&oeP{Z8 zgF=E{dG+ZD10gbpg$K`mJ?XaQQ z_=Z+^wrI8btJej$_5gR3F4U+I&~J9;-3@4(pwM7}@RVQybq*zdk}M5HRMNUoDfQlf zO4gE#39MZN9maV=!Gl!-#s-QjAd7>Wk-YBy?)UQejKKJe#riRYDcJ~&&j_p^A*6Ga zHW~}cc0kK1or05~{09YAF&XkC8E7~QEmoWW=9cJ*pmmtq;Q(mtKk|{ulP5p<(7pE$ zfn?jSpNxYaxIA-zA5c_HQ18KyKRu{xgqW;Gc1#;R1jeLee-0PlQ!QViktjLo96~TMoiQ>F zodYvUqk*xY!NA`_QB-3#&^F)b>pN@K^k*JP>WvD8y+=Iyj40}xtI@COW-7&`61g>N z5@AB7{S~cc-jcQJ)-G8fB4!moG5QNMV&%F z3B!PKw|pmKRA00kEIGu$o0Z%A2y-PQLZfLTaN~ep$vy z#)qc?fhIrm@JNuKks}{|B@qc4)KriD3$epY)o@T2j$3PFXLgblGqU%onlN0WRF9pN@hQKOIGRUxgjxIe4i^K|RV< zt3Fzm0Iz~M_UR(irZOdj##%vDs|RWI6}48VnGyX|RG0z+U{`3N>;~!sC*;p^>pP?M z326O9w7xT_OJ~NxmmFF%nGHdqN#GO~0nVL334AC3h{;(3$Wl-M8UrsWq~^i$fC&c= z8W5+zu5nOrjURt+Ulfj?@!XK!;qYX+-lPtW9y0YAceM}Hi`=y<*BW&e4}VwJSfE0z zd*ijd;~*txs;WvsN)!tgi4+Mz`@8&t1&dcLNdTMjg8g+}O6sRS{`lk8&*n;54rke) zRV?{<89lMJP2IvaLgp4&2fNU9MNXNItQtOq=o5O1YcCi zumzuRnod@c*-{uc*Jx8fkBw;r1G>a2NK-20?YG0iW{;cz2#%XLvhRZrKm1VA;HUx5 zzWnl(d%F68Rc-PT6%{;>I38Q04F)wg|H5l_+n&9-*3eEvhYk&Aww%2p`A|N?eeww< z&3g}Jf-TraLR0-7Xv<5$m^pAZ!#c^WSV@3>k!F}?$%@Z+VEar|1cPX*)Ec!~!CTz3 z^MgA3X*xyqxhE|b9_2L(wSSZtLKMQW20@essmq6>v}6o~%Q;Oq^iq++>~oC@8&O^u z6BNJ?Ac-{PLTI+ptdllpE{R2Wp#tT|r&6{w$ z!UxM`bV5QxSCMh65F(5Tr(L>w^^c#f1QU@YQLE{|$JxjRz67HQGf67fiiul)4W{;T z(h(Gu$Y_TARitQ!e!$H`#wW)Wxd)Nx!8r6_I(p#n3pRTIDX0;cT;wG~NBr|ZS@L{o z;(?lw!T?BIoS!N^USUzwTcGG?;T|3{XLgP3*Kc5tZcz5moH^s^`vyhQ=dNAx`K6a$ ze&*h|D3Pf}joepth+t^%ioSMI^T@%h(1Zyp>2nV_gG+k7vG7z{g0q+Gv$(l1SH7(P zGL4dQ0No4XJOfU1@7a@qSg&&Puv)#Vup}oZA&&F`(C$azxBDuyh^%P=r!ojC0|BTk zl~bc=0hJTYtrR_q;OHPtN)Ri|PIRm=uP9X-Fq=h9<7Trpd$_^rL7I^uVWf=!<79CA z>#wABnPQ8>0Ml^5)QB-RGES=j2tmtug@(WgTV8>M9I6H8+Sc~K14i(aMlWBrIyBTz zrvh+NRO3d*$H%(~%xx}n{qxT?YYO)5J9u(0)mb}qan~Q{kEpgC`~#Uw$^@pEtD!!? zrdMc@a)oH9u`uMh${(dxd7)KZ(W=g970URsD8$A(0Z-8`TDLi5JOcr>A>9Z%OqO&I zC8!!0w|lqXqDM}+9b3NLw(ZAVCu;P1QCSfYQ6V@V08f16C{Lk|)+0bwyfM*g)?s(_25Sb>Nsa)LOMnRDa4M)A_xQ7M>`V24 z*q5Jk8aN7^)o)@CmGR3}_NDzJyuGI<<=fhKUAlV3~vVwBr zX^K=(6P@G%Ao&7xT#{!I*{WgNg(uTZTeThw7N{-OEG&c69+2F5T+*9y>^nu=pafW< zxH(A)NJsV)+axMm4z#qOB8i2yh-2*WaMSE=PXKO@M0Utn1ZsrZ&Gtx<*M=f zoU|gmbcoPlUBL=Z?E$r$2L~OZURXn*6Y97qjX&`TH2$F&|6BM3@{Yr-(@JQGi7nx! z!ou8awuuu_#aHEG_Tl;PP(QsED?Mng-9{>#dU_|?1L<2x- zK+`cJ=~zw&B0B_F@&Oc6C0-juU47%MT~y8L)_v%s@H_vQA*+|iBacja^7)sd(=W2{ zz7I^B78()|9MnS$xNw1_va+zQa5gtKFqSeAO^Iw&|eE~M1 z2XxG$1J0#U1b0BaAz=xBT5g;v#Q=~Su+bnlbkxy-NDqxNxt!kpCK)Ri0-`Gc(UpMc zLO^sOAUYJU!$zy83`v{=wty!zz_=o9#Adrf2R zH1NKJ*of#MqeqYKZt&_B57vIv(Aa1fNWo!Yk3T-CPme(SyiTKW_v{*joNHotMH@vJ z-H_YsYY`=HlqB}_?DkekVsEakWypSbQ;4jqYd3@U^JlgKaDEqa^sQ(UTm!%O`sXue zmaSO3^>+}rwcngc18_199{F_HhK;{(`Uzn&+fVM^wg$TFkC%M*%g-pmx?;sgAFcXq z+t2vB)yBaa_Mc>x%FH-Yq?IdkSbk<`VYf8xmr!+OIt^p#h71p_dV{qKMF?bn{X z_n{{sagFUYWzKADb7oCR0^^e~e9Fs-(B9JivF7Ec#^bht@nWZc!fF*oOrf9|VRefD zpN_kWw*<;?fd$}cs0akPk~2j3=*4tLT*QKA~ryG8O3F!mj(@kMeM!297tEBMFD4pg$jK6k2nPv z3`BQ;-4O34f8U1Kz}cIKjWY*c(~3&s0sePfL3BGIIlfWg%m0V>OY0xiv!8?Yc_G&4 zfxza0j0=EPM?z>6-P;O?(xF=@CA-%?% z|9RpCXo)SfbI&Bf_yZc|T6sI^h0gm9m8nfaRY_$EUqj`RUpK5slFErA&IerA``pJL zCqjuj37q^}Y5}!imaSer3?E3#FvI?$=HpcxHf&h42$ch%c~{J{h)Gy`-vXA>{x17i zR5Ym7f@|pQ1Xo|xc!%zk7#3M~>TfNEFfKu_utR+cib|J&P{fqda)wYrgatwPkqIT{ zJvd^2lRLz7Ns|f%s;o(sRH-CSy8h#>GTkEI_s22PPhzBR_`cIyApDTBLkR~QNMM{B z!okS9PF93Lct@@lTqd75injz04FXMr8{uyQT&6=6<>6%P+tD^ur~{ zp8mr?h5-wN!Ye*(5^XHSr)e+6)lI{?0V!Ms(s z0^R>_fnHqSa#=Lr1@Kr79WW(!4#J=edz>OV<__?KC;gjWOVn&C#$YJ|!Cn67kowHNK6O`)Em)QkU^rf08SPx|c zC>RQgb!^bOw~NsehmRcU*LCp3DQ{y72@4-^??Vqye){RBr%j&n80wCWn>cRl&;gIX z_~N7ehQ^|zgL7{&x&`*ytW$sd@kf4Di_KPThj7^1aP`WS8YsG%&ZYcHL+hU1M>B&v zAsdYTN@d1wQ8RzZich}#)G~h<@SikSFCwgY?b@~LKb?!rGQ7AL8!0K~E}c6J8ZQ_j z@b@i+_}MbbEJ{CrRkPx=EqnIt*}YYaMl4jlJ^~uk9zl>C%z>U-HOx&~5_r`uwM_=I ztEOAGnBHkQP<>rfYlFLq(NO6EZ15zcjlf0(qGLtuxF&rot)e7@BQ(Gc{R63TEcnj} zWT{Ans)@m@W~b070F=NB`Xff0Txhj3NcpE^r8G|ERSzp3Hd-U4ZzcW@rl|kz1rmk~ z0)~)XiE{c30)`9%h6FphLBmhzP<5G9O`y-zM=bXRFaaY=Ma zOn}9CczjY!%m7q5b$(|;d|y{o9KG5jao9`oY#y7ZN=WF@!!<-qhLf2~QIo~X$zTW@ zI(Sg5XJdOg^eShqJ$k@<9L64}PnIs6KY!uk6)V2qngI3Ef9&t*R_r+V#~-_P?fV4L z;O`K7r26o~jT=ARN-dgea;vSqf<|8Rh`gi@4h}Xti3$Y-ieLwN(?HN*oxeybCiD#- zNdA*_3-)=Ka0LFEs|q{p%nFXASuwc7Py`N}V6KFb#@!u-Wbi@Kqr)@+iT|$mN)RTE zj~^iH2MCkK#|sda{O4#m6|!us`>^Qt!dzTuPYRTuD8mA280V!BN!lA|2aZy+F)Pk0#d#q%XkIU?7<5h$TL zE=z6F2H(uVs0~Iw#bD4{7_pTk+Kclz{xObc+_9CgRNxu}WN{u-lFTd4lkTSohK}s8 zI`jz{D`Dma%#;WMu|mrtDBW8c$0x%@Sh@fE% ziE=I%4?N2Q@Nt1;08En&3>YPmwJ*~So7vgpwa5IrbcqK&OIAMlqzXE^)5{p#S{|{_ z&@5lL_3+M}^XJFYzyV%4r=@K(@jkiCWs0v z^I6O`!Wl?&7=B>ewr$!(F?#Qaid^QmSZ~dz$wIe7#{ja#E!LY*y;M?rfa8`gNpJ6m zw@(4oW&vvbFn;~OoB0umgKm)ywBXZ$R4NbvIE5drEk&T7#33X17QI8FmvkLo$FPQo z6oh!=UMw?YiDEs5dwLGMAL*WFj8Us6Pae|`*zMk}+klZtV-f}pN78*Fh?Je{)mQPG z7oHNGdiJcTsnz-QiHnO%@icpQc!h*Ubne=%dyk%NN;hMh-VUZ17L;mzTLARjiup@F z9FE1|P07agD)_89%0q{iFNdp{xA^tfUw^v%qlMz4MJrY=p1*eM=FOY8{P5j(-^Js{ z@A85mSS%F80)u%|RW`bm;@Q$vgSf^km#eOnmz3w`UOb;$uTZwaq|(w>pPN}G`qAQ$ zkHr{t2r>kXmAw4Yb@**_2+niEii>~vV}tPoqu6G*AsYz4j(aAgp1#W7mUSBm5%8t*VD&}dM8FVZi0%PTtu4og0G0B~Y7o~+%L>K!(A zd{W=&ZWx8=gx-(bH{->ZAMev|;3JPTqW!)DJ%RVChGzzn?qzlTDjMpR~sMI-HUW)-1{JBkq@)A=B7|`$?!dev z%!ITc|8Sp77ZQQtRC{$IV3r6BPsD_bq{W{$NpXipNW2pGSb$ENb(lB6>canzy!Q@| zs@ndC_c_xiGnve!O%jq!LJGZv4gx83=}ov|!7HfOUa#d|&Ey0@KtNFt1qGxiND%}< z=^#xAJ&+K>r1#$2%7*~TL;m0J?dI$LXUr0)I_(Tg4wRX?& zzCArDe~V6|i5Nb7Sfrn4b4i-5ngA?Yns~krO_VJ}e6{+=Ez7V1SiASk*|SSN-@NxY z?zrRp*`r5~p4_(SyY=fYU);L=+tosRcxiD#VL{d%7y=qADvE%LnO|maY*IAab@-|Y zEeHL-mKP#iGL5JT098mDf$VG=!eaeJc6b-nq;_{8V+(TG&_=S20|*uiBV1C8sHQ?f zX;E5%EK6fm6lX|x1qX${zJ*A+ zkBo{}9*aUiL^MNTaoC!nG8E{d-2e7UsqdoEch92lXoieN-$gSnI7nnsG%?EYAh83f_h6f( zH;xNJDbZz6v|*$XFQG%~Nt)B~RS=2`h76Vm%?OULV*_&*5Ana}H2>hR@bIqv2ThnB z6Etv4^tAUs_+U=YY0**0ztXSEfB~`7(RVXreZfKCyEFa~L4oPRSEEaas7pO``oQ_~ z=Zlpl`e^}5ZQH}5-g@W-|5c5ht=FM^LG z02^Jru;Gi%P}3jyWy6Ng<6s#kY@64#pMJIO*s)_PR;>8=V?hCj&4!w$ga};_EOt`TES^-90=e zPmUWtq~{~iEME7~ys3`_2S4-7w5j8VP9c!qRc;^5o7|_*+wVL-@98PCVhFa!?V}fG z3>x&oe_na-YA*pPM&#vM}6aqCEC8lgSUx0s&rLJmA52FE4)p zA_sVQ$mo-Y2j!^r_41-@h%zrPF`=!O7yTyl>gg4L9|Ju+>`0htN1`@_5cnXUy>uad z^6@j7D}MQGV$N|^d^3phMawq zVQHQXJtx$dqP}wuKFxre!g$9h?Bj3-{Z8M_#J5x6;qWM~n+Ug3`u-{Bbx+`0U~D<2 z35y`TLv~~>3FHF8ZDw-uxJCSrRSwL!u!8eK5Y{1Cz%M4@1q!!pa(5sGL<5}u7SfFQ zDbSaKFy(jxmzbm#sC*Ct2p<9nFc^fS5iqu%D9#Mz{X+Rzx`7^@-Ysxz5XhfQ=*gv%I<;$5qVd-i4V7txDJ><2+1`mwLz1rVLZM*Cxu*mc0FDGBl zyOU8~YlrM>F97;p`S}aU$rpC~bnwK{y&E^~-FtAy-ro-j+YM4%z%vSCnySAXu%=X5 zz%XN!QazW8^^>E1KB%7#^^=3wVPT5V!gJ|1s{)^iTW*SqX1{P9!+{F{M#)s?ckkZv z!^xxPg(Y03&B~b&hfb*ZR$z)>c5d6Y^B1Uj-1K;u;5-)(l9A9CPkku%3ZdK#z}=tq}+k5nVOC!$1=~V1NX<7kh&WncmS(>>K!B4y3Lg2i$RWd!+jI;* zkH=?nKYbHCc?!<&@8}G)2TEQ$3*`MFA(;y@zO0NF-~`~QWeqJ5E^2|dNFIZ!*}PkK zNMv0t7Oe`-yK2EV4?-`vAlMe#U;)@UmYB$%6cVEy&kBjOwQ?B*QX<;CmCG<@ zfzk|c8+`>fUx0g%__-Y4iC1*NohdFh92C$6_w9l^YwDYt>X8f#FRQA)3MVBBL{!^`RAYEDz^9F$%BH%4RHmYXbw*|tI91CAfK7+k`==oPO=8S&XAn}(91MF zM!`)hiS(clj@P<-`gCv)g+9X)jYJDoMWLZOzOkGE0AHa-AQN6zmD-2{BcjHPm6e!( zODgNo4Iqa4%KS<}Q(se~hT5>6$C>qwIB2FK(%sSK#7(gQ){K+l|Ns9{OR@!aMqiSz zpwkuz9it{ep(;}k z&YY1t(no`9TF{Z5{S4?->PV?^AtK7;EI2_K7!@7XB`R85-pK1hLyL;4$a$hJK~cxl zYD~F6=$8Q?hI%o-vL5}1UaYGuL625qN^Yj#>+8#F6BM=OI8W?D>Nj!2c%gTx4+*z_ z5NbC7wIkeq#P+nKR5yuIFI>+)3m9(i2dVYvpN}7JN3jZtVks}nAHcDCKAy78ct1$I zo}N$%7~Jm>Fcpf|v>{@kgpcn}QtD^!_sF@vK0Le~NxMlT?ar!=%-!n!AX!&eHv{Xt z4Qabcq+L>QyAlZzJn6aZ$-Ak^$;o@6bq1n!`lEGvp><%VY}fZ1>ie^oZKr_KhPJuC z|HZZ;8ql?)fd`uAesXX&jaV=YJ9weukOx^PoMgd1=OzrGaKg{s?-7XDGHr=OLquYd z*LySMOL&k_#CB;*EE*yfnWF+TEVau01k>6oL^D7J5PhnNW}+TWGgodLJck4&52Kmm z$A4}|Gfpmd`h1LIu;XP=%;1MojKksiAjP=dqnOdrv1}s}$Zl*)FX7?!52Kg9y(h-9 zi4Uch=H}{$(Mz_iwgApC64*;ScGyHIRwT|(6hk`HzD|mv@zj=Eh!pe3xgCdrIr=b) zp_X}&Yb0HbfwSq`XqZ@PmL#!uw8;b1a<5HhS)};p=b4Pnu<(d=<`@)IG@ppFkXUZ|BpC(30R!q9 z>uT!o=cp?Me=Kc6$T@(?ny}T@28c5zCQRpi`SqGLUwyR(9)8$aSD@bd1oi3DC&XuLw8+32X#)gq zLI8q=TU*B2KCKPo50`5<4gNTdNux0VrcxneGKyQ)YW>9t9rHP+1I!9Ky&eiFS}Vcj z#UFveg;|8dT$q`e3JBxW^vgHWGV=rntJK%o;`ccUb=eq?|b{abE|cTcxQUGyQ!XoC!@A zVoFj|uinDJt*bdXf8W9PkJrEKV2Q5_fmM?GyWtwr z%|36Dyp*1`aEQE-sw_^)t;5pGCn0H@j+ew_{KOS`KOGuobs}>->)yPs?RSkE66Rj2ZSbHynHNp zOxD35dJP*kcIMoFUAyKdb#UwyL<+eW0U@M^(|m0pLB965UQ$cYoju~Vr~jP^S* zeX($m136i6W^!^Suh;XpE?&IVAeT4PRM*s0m6umhQkdGtMq6-Zk;-6D6=k~KO zzMhuu-8***kL(uAq~0dIffuONi?!+vn4+`_i6=^}vI7SW9TH$U;4U(N&01+$T7es3HVXo@78avyl@$KbKqzo}e=LRKgzK@9XAh%ACKL#bh|>U?w0JbI~0 zi_0sD3Q9^ixpP{0zSwPRYnfIsA+3U21zOtEWW{ zE+LgQkjhGWIl3QgV7a-rlxx>iDJjXsdkk_uB zJ9=1}nOq1z6(>LW+?A3C%jREonvgJvB}Ntvz|)Ud}Y6fns~ z39`qJ9qSki==w@l$<3ZEAoWF-*_@S|dk4OJM~>`Ey?pu189@!*x0hFd*&HgUZ_DKe zlNK-Dds0A9Yhy!0V_jYO-<74HATtwr!pjBqq2CW3I(6j8sY57>dU{IA!C8X_32H|Y z%%)9EJdy%EK6`d76@;zeJ03fRr-tCUxw!%GU_5{3%;n3e`;HtD)czj+{=P`IhydT) zC-*L1oCH8pLEXR_8fxq6Y8#}o&S1#CApWjZoQOS!rUHz#J#3wZ~gA1Ac2@@rrIbggSqPHCkW7K%H`Xq>$F!V9s`o_)!_ zU2A;W}XtXBZU@ud!S!>n-I13KIV6U*xac(gK`;C||E@o8Bu%W{ML+k7B3mgc* zB6+g_gsVbNZ*VtyBTJV7sY59D5;WLQ(3ymQl9K$q;_@;q+RMs|^YTlos!ECr;crt| zTv8?Vk_EN4VBA|ky%z8Z3xj_chujkyKu;)vla8L%A2*v6qKD``iEfi@_jIUKZsWMp}n$cvJh zClXU?4iYlvVh|A3I+_-;+J&B(nMpU{`*ZV5dOkwXKSv#E5U(@)-{xy!jWEl@kwquj@YlQbF) zmShX$9 ^q}(zjMGi)7yP~$;P+J(yndgmEC|BiOQ%=LqSzMlZ^UA8#zX)(9(d*0s zotdpzuGZ`I%@wu^lc4-`H%K)nWjsub6CESPozlU~g8_0PRP4Q=tM=4t%d<*rnpn}9 zqg&zVylO>mxmJibrXl!k&u5Pf!>V=WqtQ`426j!vsX<*j2f8s+m!YwvM~{vk<|fw) zoMUwe_xt**?tag%xbs;L<~Of}855*L%B1xU{>M+-QubPzCmKv^D%mjbWj zc=dF0j)!1tF&U^g9gu2R6A3bP@V{mXvKo!12AY!Zp!d=Br9w#94GrMZD*Ry!dq-I zf;N@mA$A!7n~Fzbn_6(c5f(X{Vv**ju1@r1Orw~cME1kEr=^LQx|BDCOk85f2KX_Y zQ}AS%qr4_gojf-Bk**$^XJ4E*b>u|gSg(s$?PQCi}?F;m#0Bvo08fGW=?CeZ9O=nWh z5;8!E@fIj9w6G{T&+B!V{H01#HFaJh5HGMt$^4`vCqwaEF^inoQp5$`MoXnt;PAP~x9&D-bSFg? z3=^4($k9T;uLo}`kOeKWH{vD+^^fw-heh=CZ&0c1+4EJby@wI>PA9!Y~OczEF8 znLzZLiToQsRaBPerl#FEcl5})n>Utz`q|pen|2&HdSu_u7#Ol&R&PF;gK(vslbZ-z zgud8#GF$v&10c?Xd4Lo?$xZ?FeS^5KgNA>lxbI@SVbbiSJ^F=Cho^LH5bAS z68B(E4KJX};$FthdLtIfHtfpRXT?Cghyhwej?{Z_Bu1*vjP%Uxyu1uA4RRZ*)EbRe zt2Y<`0H{)dM7@wpO6)tM$sGwijqWBdUtii=Y0Pm!F$9dfMz6Q=bMhL7SgCxB2O*_k^COPoy`YiYBylkiS_|1oh=b z#EJd6W9OM{bRzwREUZ=6j~zR<|EHgR`bEI<_i5$Ut>3R%wgOW>In#W=ee%hpV$MlI zN+TDR=ddb!0|*^+??3f@oI36yr#|nPD@69_7OsLaq6|UcKq{97Lt)T2Dk`ee+wZ4=s+5Kh+^aLh{J!fmFrZt&AanA+wyk{GWq4pa62!^yMDc- zz^ceAxPHB$MDQ#qu-VWSqBm$^UVdR-VPQVXWrrmYR&YBS$N4UJ`uT})_oAbyzpvTf z7i!vvpDMLO0NR0KP&%L;pp?f8$o!($DfnMj76K!z8gOYiAAn&a(ME?EGde^>4DxyO z(HVpAlb2UeW{F#7kS8)OpIEr?#Ia+Wzy0KsA2%c0vpp=8X}7JTx+3uPZg+kYnij!)Fj#S+4kU+i=Qu)`3|@*7ykTneXOWzeM-H4)=;Q& zhZ0--M%)1#rYc&#Y{lY*iGWn;0oanc{NlZZv8Y@h$G?eYj(&pyJ{2t}L>93M#ls(O(7u28wQ;@UADU_lH!#=a01#MqBGl%hE1l*c?! zf)1b)KhOzjVZtpyg@?Npc$ZA6ku_;#)s4y!S^ z%Ay?7U(iJ$E(j#01zDv~g($U9R6t$f`~i2Xt#7Jr;2lcjsr7<}f*Go-kyEgws=B(q zwz9Iawq7u}6c8?b#NSEpogt16$4iD6hgYRjdbglPp{Rc-#;QwW;V$;BEuGxV$z@AG zB`D~D9GmFKee#rX!$wXHi|iX=35^@nU2s=;29(z#a-lT9TM^LNPjFwfdeMq?pZ~OZ z+nyixeD~cqf;)=NLm^zn;~*n1LvXi=JrA#6(P1t_phyEm9EfhdI6}N6Q31SqMW;FA z{l81&HVmbI1^w|l`Xda~8RkT(=9QRuDDQ$y!7F7R)#8pi z!R05aPp{71LPJA4c8=;(SFaB0JOr-PLplf4Kk?+_Go}GKW#Wv-pA797HFm~xu?(?~ zY@zJA86!F(uRA!lzqdDq$mk7TatXqfR(}4C&^zlk z*15N{N-$fOWMvm&jh&a30UyQ;@rU%bKjdbmt=;xq&fEAD7~ zxc0)2n#_+FdgNLSQ{)5Zh~tn}eB{OHk5+Gu-XN_b^6#ak+_6#I7np+b#h?z}z5fdg z;Y5UG+`et)!ox|1c~q8=aC`$L0DjTY4Zj2iTDhz&L7tJFmxXJ=OdWEgm#?RXbliSU zA)l@`a|$B8ECSmju=C=emd+)cKIyV3E{eQB$foa|Mu2^kH!U6VIr6!u-MUG#f;SNg zWI8keZ8(CUhirV_Jtn>W{_FEAl^y%`?Gh5yef<2&ytHhM;2j$qEC~7zi@z8 zhxHlKzkj4WJ4x`)&HzyognK4LUnF_Y&MzQ&!JEoO<&!oXL99aZKV(;=z(MG0Ob~yO z?A^;uw%xxxUuqSt<(@@N3*M!*kkAVsIzZLL;|YnJL&xbia*kpyhtW{Sw+?u*w;r|f zV3%&-UOyzV5#T8ZSH&hhjdVt%K_lZpBcbSnP{zTN8TSH5BqJC8WSk+5Wb2fhH?B}F zcJzsXW+klHKuqWaa8JfQ90^{0NYoCkXP+6~n%P%dj=Zn?ez}|HId=YgF<45h<6e4I z@Q>}*p$6mP=9NFso;{m%;I8{PX1g6KMcVDCoQOOY2q4b-nNM_0!beZtN&r*2%k z5+3Qn6&+qN0=7-bPke;npJw-Q$DGaS{lE@P%CcmHRM~EvKY8+GvS7$20XMIpsEqQE zqpJ+!^pAD~38BAQ>;^9qXMOzwf+@Q=I_H5iq!uEtQ_8J97*y)?I%VFskjnQ~3euV0 zg|Go}&X$&Z?K+;SP$(60+VHM`RFZ(PrVzX<=a28=AJ}EkBVE;%^I!e<%M*I&3T*j; z0lNJ;v!_m-I(yFakt2sjcqH(~PQwKyzis!OmV7Mw-f%p@$8Q%&b}UHN?3gLvDI4C; zT`+v}&FZy30y6T)wX47RX3Lp^1is+R7Qs6|J0~wc4+z~E5GNoM+Oi9w`xcPdwy_BY zo#tk|H#OUVC}3}H61>gG^lSD-0#GC53kZrHUjBg{{Qv57sXfS}+k*BW-MP!7+wIB~ zo9)uItCy}_x^xZtB+!4WK+h=@F>DLlqT1)@vBPiQ4o|_YCM>MRZZD{U4Bu1WdiRd8 z7upL|lN#LTHcXo9Cb+pJCAle;c_on5s|4U=*=!Xhr4`_#B?4h$5Tdfk@dUTvU=NRw zz@U)8kdVOpev?W|h=e+n)`ch!+Gg>wj<@Xj_BqgNVH4u{v;$uVG6GH&;$zuPZVs;% zWT2vx_^C2G8EXz~@X^^F1O11GAVZkWd1(&!v`AQdzGw@Qx+urb>VEB7cO%am1r@0d z20im!9ALhzPUCv8>V4Ftryt^f-PT;6>5g2rRV^gYO?>>y__L?a`yh z)OTZGIyxo<_%w*E+ykxLqhC@|zv9bi>5F1ZD+O7HfXQ!-KnT=1xTBn7U+`~D4j?o2 zN1UOB_QhyQnh`U2i2 zMusK;GLR0jSs`q(-lHBJ(yueLP~N~a;j})`G5V0RS7gtABN00~b1nk#=g!2@WH`Ld znfOTT@a{ryi18&QwJr5k<(LQyv9v@~M5YaScvy2mZeC7CW?o@wReeK4RYi-t2?Xa4 zg~dTo!3Toksd<4P=RE@+E3)~$0%r)S@0&XuHy=OQLEwJ) zb0ny8G;DVNaeV1`K~N1SM+!EzuY7@5vV;rE)c`HeAg5tvubw>t2FCe)fZ!kPB z#87AncQsptU=~=ql`kkr7ZAM!V&dJKYEt^D)PBUCB|io0Ohu3cYzCDtuHy+DxJGV*dMJlG-g zF#7t4M`E1_IfSyL(u&xL9Ktk|+v!g7P2`|P^Ce=9BIAS+teo=zB-3JcPJZA^8h216f%Faqqo)-eK1CWbqGI0ha2F8i9E&XiVJLBY8K0XNOg&MGWGGH$dfAl#8m zP5>M!OtAib-ah7gU$&Mj_`!0e-iE6vv~>5%6~mgjAi4I@e;v(Az!A-WR4ovZ`5ob!GP>Q`1w{!%62Hc z7cE>UFxlDjC)SR`T6wCY1NJw-&1vO~ROG;^J1ZFAw+F3q$e=+nkV0a}{|0txT1w^Q zqX=NfkeOv_voDf)!NtRT?>n(g32lWk$&>cciME1xHLwQg4G^ye5ghXw)O}VZ(#|wP zTwvVOFOm-VTQb(jV`j{M@?|k zM&RjC=Y;`#m<+;zj^r+X1IHb&fI%qypb&)`vNfg`XzHxMW`_ELV`|9Hm^GlDYyCO1&6a|DU=sLP4XvQs4GLe4^9!lZyW#n9 zcz!gVPit(^N?w$MrPrY^^BN(mT)%v`u&hgGH^H;Cw5mEk=OzpGYOaR}CpTCOrNT6% ztULqiq zP1Zn2U?N-?w{YutdxpKyo+9p9_7Z!8L+Q|h-t>-vf@gB_$^E|~8|cOT-+#Mi&6@Qa zZ=@v)9>Tuge*5i8@~ZE4?%a9y+f}OpNANPt^skb6zEcc8aTFT(7-B?ZLdCwy@ z=tTU7T;GE^>(q&TySDH9c|V=&=5FcInH3k-H^cY6yp-(oVjs~OfmSrMMlhg_XpP`T zVWJ2am-MI0xlRLP;LfpBb;&kzz{H7Ae8tPc!oU>e>0#;V@Q~ZN<>O^1e_8YWiWT2| zCoFf%%!K?wiVRYjE_b6~cf=D*+S27x&%mb=-d1uJ1DX!TQ-c{Y(=O5*9At>D#P20`f=k@EaBd=+)1B2ZZ@Ln$^LGz3kDeDEn@uM@ZCw{;^}mMYClB zvQeEs0!8*un>TOXwe5IPwR6h!@vI^Z&p$oaZQ1_qx9hk3u;sgro6elvzjfpKFBcHF zTLQ*yf@7-Sr$y3tTtr$X1*=KF35^tOg;`~SyBI%+5u8K%kW3Vg29SgR00Gxsj29$- zEmD6V9=QF_V#`ON4W^+DrlAd-5ie$6+ypCUFeOsyJ9lafUr{f37djZy4`QAbB?WYm zFQlXvGzTY#Bn@~CJRBxJ$WAO`#+;X4dTH$N;Uk73bYaM2uf6%~ql5Z&g_~HvDN`m+ zo;-Qvpbol-fs=%YTdCP4EzEJ_+V%9JO1N1xm1^LO(NtA<^}54RSCtQGtsy@Hvr5Fu z`4oD-wVO6?`gHY@gk_&DT)h@vGa$@GAAkJuhVRyI-n9N(0THpm!5!W8aj`?X z1>2#7YGFvaVf65|bna19*yI}(7Z(@X$r2`@Ueu(}d(k@~n36aM;z4XH7&8WUGp0)! zMmhK$F+AQ^RQ(wZqy+?u3&;xEKnhs?%PYjz9*x$X3_5=dbUqpsHyVT#hNi>=(Ub(l zayu0&4II)2Ah2}GIYMuO73n0vlWElr&5sp^SZpGA3;~}x1mm;`)37p{HD%IZi^Vc{ z`kY6fra9v?4_LOJ7}pak#=eg}`pl8c4mmRhFN63N!tL|Q3 z3bV_Vn~lv7V-}3U+U@1mbPw;U7km%<>W-E1R$7F=XtV;);?q^%BFE8p3l@B`c=6Ju z0;GA!6@sBLFT1c70%BmGwyFivZ-NT_9RL%YLkL3yi?u~&5D;!I6eVbB+5lGub*luv zi`sG@V-lpv03!|gYG_NKN@IBh$t;Zekv1b@VZFS4pg_SPZE!V$Mpgoc{vU56@qs?* zm3bHgA7BjhL9g_2S-uJXR$Mz^FhCoJf1n35?0q8rV+>I5w9g15igFL6g$bD0IlL=` z^2i}Wr@*7{>4|Z(=3|@p+@t~BdII0p_|dF!gDoI;L*Rf>v)}t~=b-+Ak6d0;U9B;g z&5dx&fU1?bGROEBjlH6snPbkv4t@>m3dkkVj`uZt_kq{!{r<}}pNzoFPkSOE%=}-b zl<~3KYw9X%3ewUrlQ-5iH?qG^F=K{NRUA4PJ`d)Z5#`kTlg*fc_9KoRjGqTH&OG!I zaRd=6)dY;72^c|f=&Lx4pb)ZJh!_ue!*IcXazPA0Dxsf%)CBQZ0h~?;uSjrVXD5&w zAu_XI0qXtalXAIsP}~scA-awo)C0V#Pmh1hkE8et?2A?ZoE{epxDTXp&_YfE&Hz`N z(ZQZzQZh)dM~>dMvk(lRuu$90qk{M6pPxGQ+pc|we%rtQ@c!*TBS3cRp0Ae2L5>l3 z)zUAP?KyE`*Bac+ZnrR{DzIyC`mF>Q_LWK`n+aC(MQ{?hlGMLfq=3rNxR3@3MhC_Z zO>I<1NI>)x(6+FGKw3L!=9&^*7}E*Bq@v_H7>^-jt`PAh@P?6ru|v}#s14XYG+tmp z{2y-7%Dt%VN4Mc#AtLj-gH|TYw?aNuBKFAx)hCW^C?z6I@%)u!F5BJ~m2D&YSx zwu!3>x2V}8Vi!D~pFXBr$MKIpH7TYydZtU%5CI@gzZ;IQ9oGJ@#Ouhons^P%d2gI>ZHPQ@_ zIuxvm=dFaT4*`MW6R5&q8DiEo@F`5-SPwXv7EK=FY$38Npj7@ZZX@=9D;Hx|a4M%P zOZm1a{WqSj#&R73p^Yiv~VgSAwxXy#Jnd00TMcdba3|$-3kP36@@UkP!9C> z?O^HB?~#}xLk7KWPv9p zKM@0xkRPOjj4bLjq9rVEVND{C2Ty{EyLVtj#)h`1NbOA8#<3Wck7881w2i@{9hYbp z;tUv|$OCESl?EkrELQXb3|foi;1QxY0eZnG1uOJ?GS=Q9ZW7G6V<*S;fp1fvzHxKn zq?l0liRy`w0Me+e3>TEOSIhh;0RU4aFpbsF$elQz+cv$OCSmb`oh#!ZkS&OIe5hWz zboX@vKk7rW0X{?Q zBsqcD9_m8k8jhjuArS#CH;d$8=xD<>3GQH@Ab|j5(3LU*{0<#Nf*W`rv_gM|i&JkBtelq~AJz z{CFDU!()VyjvdR&0<=cZp3$pMI)HMJUlL7BQW86qx7lm-$w(C8={%{v(x3!prQtO&--vEbS%lP3jx8P$&3%G&0#+S)15OGozZ zA3I{oj0t0BPniM*cn5b?f|+BF41MI0M?#T#xrR~&$mQowpF4LhS0=FFQ>zho{nOrK zdrzG`eF|89t52Oeb^7OR-)~&~#phpsyLvSY^5BeSCxT)jkCBcTZs4H!QG#9f8o@wf zBAOB8K=a4ly9=QGhVYJQ&jI=)C~J&xv=5PHo8Kg=>w#A21F9Q|aqUEsXMWjl&Y^iNr_$+5xVtk`_R` z!C#_yoap`foKVx?Db3zvUY`8cJC@Fqy*x+Fer>wILal650t-rD{S#SVE8Cr@7FK+I zVCR|xJAm=`fnymOW4>a==EY0bEhf+R=NXiw=dG+)0=tmFZY9!jw?uC|8Px^@AlO9| z27iQa1p$-_fudj((Q%?Tm=dZC$&6qYB{v>IyL#|Usn$b55t}eBPGDRN#kd&CxS&9g zJRGE;u~2Yu;KUfgc$8rSJD1D&K=4KyC|MLEBeEF|Lz)d* zUy5r=D2SJ_WvUNfePZg=sZYEbgRtUdZqNR6=7`?m;o-eU%>1X6!Xwf6^6U|jk^g@G z#aXdkyLRo;t@D3B0tmYHqmM=o2HrsQq^IFPD#$;0^NE-KEhQaHO59~ zA*=3Bgj`pTtR7{Q2wC}%M934M+Zq4_y9sb2p+u_l@jd|>y8$%Is246>3B}9Gr3*)4 z!jUqn&W6YDYjE+EGA2I-@4z<{2qQ>bgdECGz%O_P+zsa{KKbNpF=O(79aizW_i+6i zxH)-!)BXy46g#)gORM(+EA`PU;ODHkKa+BOGqQnrQxYbnm`PdNHj{E7OdiB2?ofb{ z)JX*Oh=D`wID^)_;A$AukcZGM8tNeQ6?GTm0^K2OfP3gE7D;5kw3;HGD`r>z`->&2 z8UU(#161`NP!$xOcsVgvkuQ)aiewV}#~Ub%xP1U|vy2Rsn#w1xw}_rldelS|UZ9V# za-1X44TXfd0vBVdV3DuA_RlH9`}glZe9Avx8$D)FCtqlJd^-&q(<*FW(bf0i!iN7} z5sOOzapu=;CxEaxq>M#+Y{$Rl@vx4;>40Do=LT%pu;;)L7)OsE*t22XLS#%>vT)rl z$|$sJ9nvI#@nL^o_1U^DPiEijsXgRPnf`AY}?n+`BQ1AwlS7>z%;e|w2<6mDU^(0B$|3FX9 zLr*#-ZcmdEeTek|iFa6Zk^Pf1L4$XPU^xMIj?#f?X4-(A=fL){g1XF(NRGGPojk~`bG%J_vyW41V+(%QXf*s}Gv!>4a0-#UHxx2=hZ2M$C#-ZLb8_PO=r#qg8b zvuE$lHER+DbK5cI>=bdl-IxD2NH9T{mj&-%1Wgf4YB19!3DJ&;0-G+NjESP;;_{~>Po-9Wx?5>* zqf4Ox3DHw2)0TGY2Km!TTpAri{lZIb2(SWrr%vqet8p`R=%6c2o%qs_E0!V`kX8w+Np0 zHL$(wPc%20J&dF56%a7iz!d+}&tF|FNJ`p50lC=uElEi+IQUSu^1{Bu0y1P56eue~ z<;`jJ71!my%7OwvrQivvj^rrPkHaPeB>^}pN(A8$Lm4p~gv;eS}5v?ml>>43fqXi`4nsAF@GsYT>Ru_p%Y6ct&h~ncv z8uCsw+sl0zL!!|_wb6Hme^$_JU!!0&zcfA{I41{q{=k(%#@z8QnFZs%qep-EdCSq` z`wH&dsV%C%d#6A!+S0BzmgH#p=4Pc^PFZ7enoTf5Tt;mrp*fqFSeAfv*R*g*R8p03 zERd8;76(yQCj3~~njbhxs-Y4!d;lK?@F0NUh;~dC|I;xO|KYQAl>%3};sWl&@i;+e zKTH2aSPwvXh`f)kc;Gp73O`ExzYJ~-_g_O^3I?>k2U_3hrJxX1S-d>?_AT4~c-ELO z0M~Y z`zr3ehm*D7-kls90PtDr@Zn<4;oc))Vm;3++#GJ-&;)rYZk@!n$P(m1xOERYl3J&e z1yLXCX@Gf3wqO5w!gAvL+VfK1YFrW|u~f#<+4k?CB3Cc)jl`z=l9LN!4@ z0}p|c%eP|P@Vta?q9S|~zot>M5%;Kb!Pt<`BGb9BPD*_{)Z26}z!Oj`%<+CEpq$k5 zCg7ZO4@I^PK%J<8brR;Mng|aYK7v`S0l=PV00@mi)c_cpHphnEz2O=nwyFv}rb5ku zq=$DY*4PRhFa$j#U$m7)3f5@0$BNizEEcTnxb)_VfF`U9iV6usyM<1ypv(lIiefJi@Zh@uT_P@T zg;n8RkBerz)JKm7#Kh!yPGIC+AUy<=j@AXXl96#n>%v=UX#(1BTgVmCkh4gup{S^& zjsyo*S5nl?%d4BS9jGRhzo(P4CQj52>ThhQNo$YWwrE`)j+$s){e9f8QtRq%)Ivn_ z5c?VzJ9}D5J^fhcceSow7Hi_7E2@dB!z(IMwXW_)O+0~GhX!rC^E~x`JCqxfhrh`M zHspHug13U$&-YXK@V}vOS{k%c_+48Y3g7VEclRhfXyEn(|9c7#?DhT=e@)@V=aTB~ zr|`hPqHyh|tS&^}Sy{!6YG|g^jm2Q9_l`yK7Hdry85Z;^VaE`CO1(;ginM9=AK3QO z4_kNa+P*E?@rw5N(IdYdJVY;sq-(NkXDdI2NuN)VeM`QS6SciAhaLU0kzP{&I!Cii znY4b_vRxcfw$0-{zD0Ms*4^Igp8@nDT5WxvOpEyCz5@sK>D{mY8(Pkhk&~N6FX~|u z=-cn6E}it4@vuM%fZ8@H2HR9qO^|yBu^$7Zx-zGljRqRX z468@pfcY;iJ%gs+w41h++s6#Q?A;52eAlks*olJ${uM@D_rW{P0`v*kK^=Py88Rd) zAV2|E-1IAF&YZcHkx_3Z9w!9o0Os0Qk)N9S`R7;i8&qn!!Y4T3*?FDr9MuL20q4)( z$cEY;0o*1m9J>c20I+M{zV+utA8n%t3G_5xsI;^?;ysFsii;6LO5hU}*?LpyX^*yO zfg&q3iLk%U%fuBe$GeAh3=0blCfO}4yt8I9pt87579(;~2EjEo#L_X;5<+C+Y`aj< z3B`g90-c0{3y!srhSEtSBBMcx3@#RcaOJDXw{6D^EQH`X%^5DZbV5@9Hz0)I7LEM`A%?_ONTju+C8%STvd zYytjZwxGi^^e^)M(zURQ`Z$;@}@mK5#fwe)M*TYEujWLCFsv zJbK~}x0y5Zk8eM2R@xD*uKwut=U;f?h38-YXcRPitDGP*tDxii=%Y8@c;iF(z`Ub@Jw_a;utr5Y=kD-je%T}K~a*2w^N*V1N24+I4Vq90oE)VA;5yJNw^h4J9;y{xJO^gzPc zlWo>UAVJ!z+>O*|$5=f1IV0^~Rq0xtnB*S9hX z@{gTi9##MepfH5Krzy(y^y?h?*pqC86%cSJ)IAhFlHVT~S<#N!M5;fCq`QF*d(gkG z&N0H{8j{6b2r3HDR3hQOBbqhZF+Uz7keRUz6vv}D^O0_uZ2J>Yd(tfr#}&a?0?|s4 zG(_S%>9esa60JeBMiMP&=x3{>XB6Q5g-swnwICLuXmFg+{O8|u=gjF8+5fTEDHilc zcG!SVA#SR=x>KiaHW<2&e_RL#Py{YP$|CbX;P&)JhSf&tvqx`y9^i{Izb{+8YQ<;K zj{m9?6E|-7{FBcXqt)Lx;&{UnbU>q*iJiSIa|^_FP1qtzGch3AWAM6p8T42NDKgY<$B(RL3h0vaP)Y0?3$ z!^oCMZyqeu`Zt$Ld@lm69csBJ2S=c_-HHorx1cM+%mtet$_~0QY}oJvkni?EIZ1Cn zY!HCuh9QR?f0|#vsZQJdRYl(QrJ7TmCNNOLT|Xts!{R9lZq8d+pUUSxGh>DVFraFc zD);93^EY!z8o^{q8{V{fpCDhguBFzl^3wL}5n}G%-5k=RA2XghbNSY-3m2faJ9G#+ zjgGTLR)d$9p4HXKp$wE`V$pj^+@G)sT&n`YCQzY1R+K-QEdl#qYGp6evfzNUEbzj* z^L>21$O=`2)E=%iD$y$fz6sj^?(zZ_6>Ok@xK)sQJi3z5v@T8^(Y;vM{uWsd;;yM*i4Ey6iT&fExv_u z;oi3gT5OqrXcYlf0p|j$2mmP)07&pM0{0^TBokH?(kHQ;6oWkyPbFVVioqUi;Yfs` zKA?`^XyuE{vg8AP=l1QJDXF(b2!&J%z=C5;3r1HnaB>=&noW>@=t7ct=z5wffLo@S zHG5|F$e(Zc#Jm#2K4u@wpNTft`sliF%xva@Ccao%v2O*Xl*P^^RWQoh@*Kq&xFrgU zmVUZ?c|rb_t;VCP<>rV$3b_T*%tf3h85sR+L05AQb$;Y8!$ce;mke0((qgX9U(8syvBCiAso% z%5mL2J#I8xfp}ctBG4xG60HHqU!0;V3EvOaXQm}19)%1+3Kp~ubOiLG6cHh4-$#P6 z|NOV38tg>SslZa7cKCp(*w|ivNOwv+ z6;@K{*#uxY(qs=i0Q|LdQ2oP2h_tL;5SZHiCba>{7!T@N{vPv}8O0!h`OXn935>Gj|< zW^Zb0YH4Q8R{hPJ$u|(J2%YWCTPZgM#b$?y3aLuJdibZ!Kd|jqJ;xQAow$%1!iAi~ ziMWueh)J)$^7P~EMXMfSOpX&7vKYva3YA0e300zsSGbwHj8J6gNzsNuhoJ=&BMRvO z%4l7qRi)9pspU!*VnsINOPAr_j*h>RdgFQe98y0MWT2SD=`wnV4#abwA3iC$8{`i_S zfH6w{;g~Ei)0h`ue5seG%&oDJ(|T)=a1^NZF>EH?GTse-OB61J$<3?Yt<7mnqCLW$r5chCno~{!k}9z- zkyC02&<=UG;4zO#I;8eN?a-FGV+NRujvPAl>knUlnJBpzEc;ae$i+9P9x6E+fuE02 zOo6z+=6IRUD+J`GM_*rrs@v_3jxqgXMoyRz9V^HJIz)66YpCtkcM9tE(hGrp9Ry7_ ztUXAt1#ftSp8;AERpU4ff#o5^7M$SQd@I$2VzR0Vkd7{kvh!Erdk>B z1=>CFz8Z}Z*lz-I%9*U}9r1ak&iR^jxfESZ$jhY5^|V0WNA!lw+jng;yQ8^9)@M$i zIWnKk{7R<=nZe=qeaCXg`^+r2c+Y}+90_}1v2vM9$4mlTK7>2DjNL02we)?qK4e6S zc8rH#1`3rZ%@Pmirgc^}6gADVvPba-zx=Cs3$(I*cmoLyyvI$cz1z)TP>}-m36mBO zNCEuA$^`bO!1fC4uE5~+N}1e9YNU7-@{5WF|i?<(xfB}=4FjK=06|IiRq-lmoYIIWC6aujaRQ; zZNkzFo7va5S^9({UtfW_-`%?9!rf9IpS!Sg-%Z~d1Ge;vZq?Rb@8X9oTQ_fk%z#^j zAfz{xt`KiQA=LOJ-NKlYg>b$cpb*j=m_ycDO!*`gP-Tdf(3>X^U+=FUZ}Rp;&>x~& zylIm3CM#~H>A`EAed>}eiPl1R6)GdyJ|3Y25SSG!AtFC+qV}pJ!xa zk{{1=QSkDFxvc0CQ6B*D|qzT_uqei{-b@dOzbPf^Oyy! zYG_{IPzkdG`j>{X&LN`mK}An=`M*3%s$~z2p!j)>|Oaa<^&<^=fa2mBw#Rj~KVylc(S zM1R0-``XPb%L`FSzDK@j41?Mb3K+4V&j0ceKulZ1)^JhmGWSmw!`87+xYu4A0k__D z>?}LWMH}>CdSh*^F(9DVz>y;d_F|ND+hpocXVohphcxpF3dhngxz$Ti0imIxfrmG< zPA@vv>UCOyKe-)vu)tdM*uSB;c-z+iVfuR8A;c4ra(uGYtGTwhxjrYSzWLOt6%zazavHWZ1oE(e5^I>QLK~ zK?ocQ55a;FT{3A>47Dm=HBheC%fH)SZ9FCgY!H7a6N=VULVsIX8m1# zP&=S?A8!UJ23_g;lh`5eEdsC`+zEc&(;MLH9)MmqS-kUryiUODW#t6BUINhd!jdW` zwODTpH2!F@e=yJ?wHR6%bwQJna4M-9W$tzDl#Mob_jc+=1_cKykLd>2NIFLuR}U@N zlZvnJ=^hoL?lIou*X7H9ZL5G}9Krr2LlyDA)kJb1v1sTLf=|`fo&r>4^3{tc;XiTm z;?-nZK}ku<)${xJ?>~Pvr9_}+MWI|ONnY2#tx^)=j~+>u?kOf&2>DC@opuuIli+l0 zM~|Mma4jtxn$PUCYZp!(1-{O;>rm+<$HOIQUZx1&XVI>6L5YNbJ6Fu6f;Mv67|=}4 z&8w|7#bq8i=GE4u(rQt%1d9z#qqzOnY8?s|EiVtIe+nF(d7#Ivs&kFOlIi6E%S)?O zN=(Qt-M#&Dh`8Z1CJ(x2o@r;DsY|XKo3taz64Yv-X>Xqi9zJ(Cy}a7mV={ool8#96 z(_U?DGOaeP;2Hzr4 zfIu?@0zW@5v&~jsR#uMXne*K_3TW+2yap30-v8$eCE}>T`D7>SjZuRPB^WhQQAtRF zeuL-T+jeZa?U;MpF5R^8uZ?6B1Mqi7vb_q$-oG}InazcTe{Ce2;ORL5S>@Um74Gxy zZM&qP;Iw<&&Vcc_oedD)Sv+rOXB-Q)SPbp#Vy!k~vWfS`IG~jwt$oc5YhRI75>JM? z8X&+8a9?SLepd^`lX^@mnBW=wG+%ZsvzI#*RzCeVBn^Ixc7Ek=HfyxK+-m65yLTt| zYWQ!}x_6#5X;LSFaj(7~1O3x-t2(Gl9|RHi>0-fb4)tWW;NWf-u42V_{lXuI4<9}y zXpt%m5#>}3cK`?|2<{M_#fQeEtI#O&6u77t4&sfRK=o0HaY>lrEY)X{nA1P!t3X z+?>fqM1Kzm@tBQanBjRV>_<3_Mh_{nd7UfuNL|+v%{CZfOcak{3W|;nlIk1otnVPy zw=3$~1NBAM$;B*7G$6I!{yso!^9l?$TeM<@j~`Enk7wRFB~aCG-+m>hAvpNlxy$K4 z`}E!@*nh@6zknM#E1G>D@2qxFn`#%Q z1ALaJRL1BiQS=h)Q@0LtFY*gtysQ% z*|OzJ5Q)BY3F~ajtEj8ny!nEyIw0WOxl5@zwu0)atP5K=Ro68a1CF$?oDYsjy>cB* zgZ%RDN~^r^#EBDyR)3|tDLA+bC7l|C?0K+HIV#N1hvVqzK^22OhVl~-SU>Axe`$5`M0iR=%Q;pvz)GCRA{&EG#9 zzKr4i{%)1orKQ!4kem663V&F*Ltv9PckI}aD-}EpGNqh|q_w6n%9Yn5KTUN7fjQMS z*$m~C#C~>-MyjFZnyoP z&aMN#sdDYV=bWr&v`O1^PdX{P?A_9`EeeV{eQu&U7z2LYpSv|2b*W;QD>v&HGDEa&pf5p7(jz^FHJMz|ljG30#B2 zh>>F_j!B(*%Pq5JX3m&9rEk=s86b6q^#$6&gRts}XjEQ)wFFxTZC>7<0|)lx<-xQ_ zG@E@rv5k^O{rfQ1Y%P>Y2uSE~43kEg;1%UaSiJaogqQw=?U9J2wOT7fLxG^~8d|=d zphx?v>W(p!sKpMz=>*5EBeMasuP6M`r@vhPg8nDpJRo%IeIln$jqKxO-E|m@I*<%A ziJ>of>d@3&Qv)xPKdi&3d56yYc^wd`DTC#)3tS|2N@=V}{8cSD^hamgV8K{jr9%`_sHkCD2Yc1f_s>{o(D$z8?{-!ZQ z*`Jdmiek=svpLw)M*D(v$pbrrgFC8D>^+Ez0=LuMSRRqB5)xbQRzr<%VIQ&=(ZQd` zWbqH6lD~=1ok&~zCXK-UmNY2atE#Fn5nON13HH$9^wA4NNAr&z%RhDbmn&#G*j~Gv z)+6R6DQ~c+jdy?Y!PfV-e)7qe|JkwqbGoCyu_F=JDIx;i-XmiH@g&hH1$3GnBV%{O zxcre|4-yEJ9GZXnMo1s05Kv3;xoT_c8(>$~(g^e5`f5vnt4ANgg!1aEuWj782?>&L z57m5Rx{zRH%gOwZ@ECmvp!@?9Gv+utY#?Bahk#L01IMIeD-j#3X=-Y!yaLH|k=5#m zf>y+92m~!&{P3dN?p!i|)`SUdZ6jtaUj5*z`=5RBuhZBO=y5hk<08gGO@JuhfVhZj zX$i$_5fcV6&VT^}7*N|!AKFuL0S>Xp^Yf4WbPQNF`KMnBaM4tE(_Fy@cu0Ee=#isG zj~sznU*1pnMz3SXkO!z6dni?iza-i9PriV;S5Ob%DaB{u-=~+lFo;=4p~iQ$#(2ux80WGL&X7-p^A!>+3kSA%kiO7p~W*&$$jTseV6oIZrpNrJS!dP zyGkvPA(j+NPk^2b^*5eD7Zj8KKnb4Mv0=jo_LB&WoUQwMPhzwj z8aeob`$fiw%wGjW8g{R!j!d1DF=cG(l+59gX37&*02Kg%ucZ6HIYEX19fC_AM@s$x zK)?sGxSRiT(U7bsZoyN!tta4_2kQw?PQIWRNGm9^QM&VNc?==Zpi@8*lGt(T9Vuc@o6J$ibdnBiQec}M2EY?&{^{#aCMBGA#I2N|KBS8WBmjbHhYxI6;y3OJ9{X=q0X;|a8*^g`|6 zdW!1cbq#2@l|Ti!M%Cg3AZzekHx^+)SUP*VZ%D9@9G44U>^}csh>Z}%mo{=R)Ccx$ zAP@w6F`@m*%~F4u1WGRir5B<<^n1I5uH@2G0#P>(uIGd18jlJ<5IhplYC%_EvnUmO zHsB{XIi`QOW;=5sxR6~u^LKIMZ0MdStp=oEE))l(i+e$_n|$4iKc5c3#e1E%J1@b| zXbyKtqPW^<%mjh|_ITOZvTWs=HETnvA6k+ua(qy(JhP@is~ag$V_fc6%du< z;ObnEzoVa(mzPp4p{pd4^I*5K0W;w2Av2n6Msx?2$=``&hKU*)A{pe$<~}$UBKT8zA>y)g?Y#R^I3fgLsqaKqO1^&4^{~h z9+4h8Zt17gcYlA)=_1)?1${W_g958U-0jjbdDs0n6qG?1NE93eK9eZ8EdmZd-cZ>V!Q`a)mm-YkvrtpxMFWcdEt0&; zbMaj8{|DW&(XT|Mvx9?j9*>P+1UP{vHLqnmSP9Y&PK|g+-{7-{ZNmUK#LnUDdiFIt z$gZmiuZ zh;kn+iPx@WbK}R0%8s`By4qT62fq1&Pl+S>a^xb-a?Ef%jWh$_zjIsxi104Q z*N#%h5BT+l<1M5MNc$ZBcD(C2gYzFEZNn8`Ilgxsa2%G>ddGT44zBzjXAj|;QZqCI z_4Vs{fX9gPR>U>3ShBhLdQoZADQvWfDpgxQIez0g4m7v0!lKAy~>K%d-GuFr3!# zhKf5F)-!=Y!I(^Ap~J_EMmzl*{C~~X)nAC4rT9U^(O*)~UsBLt0(j0^+J-G*2kq$^ zdV8@o7fjFR~KR zx1{8gAL<;At^=QxoWFSSycxV^WE?Ct2*SwU5Tpg#1xpk$bbal1Ym-c+>S?yxopL$g zOfH{ksXA9zS5#74ckzO?6zeZoD&{M~gw5LKwAq?#t-c{iNr)swI#$BD^hMptBZ+V> zA?;?c(M-plD-Yc9m!i<6L3lp7Ws>YOfDa0>Wu?((*|Y(7JdJ04TNEtqQ7SVxd}z8e z)ERCfb%mgA@6?)QpJ%_m2kXWtV5AL|T{id6ZKT&GyEpIgZQ_3Ug;#VlQ#zcVGWiQb z@D0@3z;|eviGv?Uw*trNYF^#R`g3w?pETshb+<1ZL{l*s!Iz@ zM;H4}%;vhgbA@>lsbaQdzLrDlw#J8@#1#sK z`_~}XFR3J&+sS{-2>Q8kBWcJ8?H#P$RTLK+D+=%Yi@oD2hBF#r9n;M!(3susa%eUa zAUV^zviANtetiE-tdEhQZ+KdYi9$yw>kxAVOvD1qOLvZU+%R#4a0_SYtcqPj5mjQY zC_s{?=|8$?L}QHn4o4tA&lu~n3#_fJm3WReL<)w`2HIiBn*{p}3Q}wE7iI13c8A)m zJA1XAb=RCd27A%s{rjg$2*~|fC-=@&06+Tdt2m%Sc zm|9dr0IJpj$3WB8B+9f}nVdHVaG1j#N+*!e;5aTgd;-*1^VBMpk19GE(n~w8RH-ht zK}#;R8FBO@LCJA3uBcoiCC?%#lxVw*gTQOgoPmDl@}*0cul#)a6D<_V*WdvP#ELQt zV546@a^UQVW5=)5_eLhP*sKjr4aEg}He&@f#`1QsQI}wAceT{6AS5@yPb8RG24#MVsZlZ1O~8Di8vo zSq{LrGIS|Gr(p?z4g0W>cznSwliB-dt3m!WpU~_@b~2E@yNv-Xm&-2i<_~ut7G=pF zX6&mZUrLsr!9I2FWn0(^^wpQLWtO_qx&jkZWXtp+x}Zc@&boXl70B1k{gDj?^s{I! zo~zppHjYylaMyvu@F)kZb8rEzR58h1YyXU*!4qllL^sV#-~j=`4>W+4@V;DV;%=~M z>;l`uJ!C>K0pbKrL~w9|badsFh^H8Wya@HhZ7pCZDf*?Tg3Re8Rt~%IA4ET_k%WFn zR+@OhU6%Dw*vBWGPiWsSu7Ozr?CqL*dN5iZ6A@nZwI9*^t<9nd@+#A7=IJnfXk<-z zf5m=?FVgNW8{^lxaln}+;^SL+0TM9r~_A$mehv{~3 z8vBgvK?t@5M0{km7y%|Vc2 z##GK2zjW!+OqlgrrS!hA)Ohx+s0~e9v}DO$cinZ*y-V3$qGDQJUS4xV7Ad$XO%u6} z&TNIs7?A)$w%MSU1ohGfRiEAt9aifJMe<3>)OKD2f^Hl1AUHVIzY#5J{M z%e$Lv3(qsmcD!5!gDwb8L8fqa0o{~v=KkosRO1BHm@Mj3(Kiy%Lld!X8-6;ncYod? z@bZVqlXU+9CW918}V!H*VjHQJfcRet|{EN;~R6tZai8hi1&4I`{6y zi|>B!xz%gT;lELdc}sgGr&mzqrbqYh(~z|{bhC!$#^&n)V)O4)l({T?QA0?6BRX(l zi`|Zips5L3%C4?z$PUV&t!rzpt2al$^aTzc(3(MQMtDt#0pTSCc8xhuGYM@p0MrZu zHAAI!txxe!goN;cX`z_oco9xct&TiJUfz~>-`&FaAIl+TStO5{Wj)Y?z@v|GAAX~} zmt7f8?+oC%PeEE>;%*0zyq!;@G&xJPX>*yqj#cQp2!q(sQ^K~bLZDz z{FEmT3YxND0i37%id8hWm6wG&Vz4(2N==0wuk_r%zbm!THuNlgNVYdB`i66!JKU>5 zzwGoBE=hAfOrK!(ak(#jH2`Cj+DNtgT6hw!KOkk4M4a{_&`59#uG^wIgJ_k1PY zGfleZN4k6teV|gF`jADy&3=B!QF0hTG3V*4nm+crt&ijwk^jv-Zf>9Grequzl_oKV zvthFN4;ch<5XEteaS{fj5<%KT7Sp#gLH{DZ1mddQQes-%(V-~P(eAHhhYrHu+8ybr zuq_?ZtS8O=k&cuU>1gY&Udyk;9E~%Ed51YdES}_K-M_1cn3X`U?COtkME26ey?FcL z$-dsXeLEJ?#iD?q8L-Ch4$OTEerYZqg^N?C!|ZG`=E-fMP#qFdZI;I|`A`@Qg580< zKt8ovJ{`MA0k|FjilrUc@0PN2=k_nWO9_m9>+Akf)T%VLnJk4orI=&`hpPKasZN|; ze{(5AQJ2Z63-RA>bUovRLQK0YwS~t4ty9-zx%~56|JVh70$M(u)+jfyibn6~^_zB_ zI(4cH(d+L(MBxl6J%Zq9DuAy}YU5qmMqyPIKPt zd*{?;%a+|Sd&-C*V+J~=62 zQXu=fb_AR4Qf7hi6B9Z-5JoDXwb(Y!CxW~Z&* zYJohe;<~8swY9g@mYzSpecQHeZ@*5F1)?KtR;%^8JbXmPt+%dR`M{dTo__wXfBox| ztMAU5bH~gGwvuv)N=Y4?niQ%*=qf>N2#Xs$&Wn)I%O?*XK72?@Qd|rz^D zkqa~D%%1zq3s1wISKZku?`dspXSx8LsPDG*IOQsZpn@NRLK~(Bhz>Ic-8&Ai2@@v) zj%n)D>GPJ{|LD_CKfUh7>0k}sgk$Y1><#vc2^(4D)!tTH)nJq9)Y*Q#0)`{%-%vgc zl&5%6vq1SYP(BTmk1DMMpW4;dPz@0_MB3JdHqyp-HTLd^w!3Wvq0e$V8HG{(5NL+^uEQ2Y&sq4g9m|-HV!g|=S|SU zMX1#+6rHH5{(3j~LPw|d%5kqg%Z)ZMR;_(@2DVhMB7_vBP0XZx=7E)u ztQgH!Q9e;A6SEd%rKiS4Mh-|!P8l-Bs|BLQtI$X~|BxscgeN7%MTYo87SkRTAD3|J zUGpFSjp*#u^mKK>GuTh3RKf>G7vSsFxufO}!%)x#CqR9Zk(rr+7tAN-WGz~;YRU2? zYzp1q-d26Ju(+z(*3;9{VuR|>i|EoUL*suUXij>1ve_F7nj0_ug7M!_QIt>c0WFq` z=ic7@Ev9@q9FloOuc#@l=)n$GXsx+yRhbp9?!@@7!uVf^@jqQk6s%+=0$8pz>J9=2-f!*%`@N83VadnQ#Pz;9en>X&7x_tTaJLgUrHgwz|k=2W$5Ev8b zFRBA$r9k4NM|0gd#>raLR+*X-0|!&vfRvPhw=Z2F=GxHb$jM}sjwEGO{3 zG;k=}|&T6Cw+k+Z|z(JrckVhq^41cZk>6|S{!4CpWubQl9V zi~$|UHaXO&PEJlviVM?^oR&5ey6(u~1A+~gue7s~Vz9rzTB-H*vA2%PNFQwQ>TxQG z9+xb2we!%`7J(+Sy{K|OW=RiKHjBa3)vO)z0OR4(#!~NI7FxC+L18|9@6ApA5ktnO zrKOD>9IN*W)s|so;;yKa?6er%*$KWmcE*EGOo;%>t5;779T5#4y}7+trRpR(-RN^mld8pw*QA8X1O(g z{YP%Y%hO@9@d$KOlR;eac)!OrXp0Mh4bb6~$yD;Ls&j|E25r>EogaMg!S+2TuQhgc zMMlOX3>prU#gym(8LxF-q`^is9W!OZ&?rBsnp8AhdJ{IP;T$Xpe!fytQPMHP^SC^rD-=5q77ncNI+GPc?v88t&wb#;B44L-0SGBkO=ZVQqN#G@_9 zi-7!U<8c`u6Q&WhCFPY4Kdz-|*P%AHOY~F0V9%fF5kJAeI%y0W8roYrweM_P|NYf7 zKYq7q^G|G(C~9xCwzB+P7_6_*VCCil5O&>62{2(B!Wq3Si2^q9f18Vjr_1BB?qmZ} zAqN;k=L#Ac>ka4b2fgq z{e#UruuT)Smc9RB;VO(}tyW`?%uWn zlh;E8Zc2&Y^3T^Y5QOGAa5Ia!SFt?zbvA25OG`uLl{06`Z#Zw%ocVuuwsAAFZpoZ5 zEG1>+q{$Pd=LDmXZK*q^q;FRbI2y+Tfd@ z=xlCmv)|O&f;btZNR%pAFBid0;_mcxwjo?17mk2Qeo_kJ)LacY(;IyKEjZCHZzJiC9%Yd9M;GIgw_UY>X=b%) zERV}7l60vIu2;r{Yj#Sxg8V13O@8W#&O&_wEvcbtL$LJU=QYnJz)T^liWi41L%Un4N`ana7RK`s; zr2MFUa#sx>G_!a!Y&lFUQDny?-!Mzx4wYs&1{y4qNW^_* ze~+b|GWqOzON~uXvr77=bgY7p@C9R_d0M-cA9fB$qmlZmiJLqbV)wq5qn`i!`CPx= zmrs^6%azaExvqTn?|0>c{5<)P?Nao=C@lEP98m+Z-z1p-u_sSpNF_$ zA3u*hic>JkeaQs5I>Z&@`8~4rh>rwMFNG*{_OctEAJ=cVc?v3pv{^To0+@|1Q8;(< z*eTosN?brlvvT(7+&92CcUvL!P$a#EJa;6EM_60)}8oVOc}{)gvFg+tA!z*-&3q zUeg^Mb^gRDNJxT1L*qxl3wmTk1W-G%YlBVT#q#nS@RLjjVB==Zx~JAGoi$>__j|(T zu9=M;(!1GHJ$>3Rdi3bQQ4q7Ij2zi@!mRc7 zTgZb02rT;%%LBr61FW^ZDgo0iU2;-#a$;N}*x2NOLt=CY{n66Wj3^us^SQmDh!%u4 zaFb;j22e%Cm144Lf=Qv;QrgqK=zeM8xqV=ZcV zdxBYu9a~$M&mF7b>CPU29?mau!sLk)7Y$N7%vv%BsA_E>WHMMIsyn-?ip#2QI2t=U e%~}iuoh~$xLRo=ZP+Q8*9y@uqqUH)pW&aByuM@uj literal 0 HcmV?d00001 diff --git a/proxy/web/src/assets/netbird-full.svg b/proxy/web/src/assets/netbird-full.svg new file mode 100644 index 000000000..f925d5761 --- /dev/null +++ b/proxy/web/src/assets/netbird-full.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/proxy/web/src/assets/netbird.svg b/proxy/web/src/assets/netbird.svg new file mode 100644 index 000000000..6254931c6 --- /dev/null +++ b/proxy/web/src/assets/netbird.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/proxy/web/src/components/Button.tsx b/proxy/web/src/components/Button.tsx new file mode 100644 index 000000000..aef8496b9 --- /dev/null +++ b/proxy/web/src/components/Button.tsx @@ -0,0 +1,156 @@ +import { cn } from "@/utils/helpers"; +import { forwardRef } from "react"; + +type Variant = + | "default" + | "primary" + | "secondary" + | "secondaryLighter" + | "input" + | "dropdown" + | "dotted" + | "tertiary" + | "white" + | "outline" + | "danger-outline" + | "danger-text" + | "default-outline" + | "danger"; + +type Size = "xs" | "xs2" | "sm" | "md" | "lg"; + +export interface ButtonProps + extends React.ButtonHTMLAttributes { + variant?: Variant; + size?: Size; + rounded?: boolean; + border?: 0 | 1 | 2; + disabled?: boolean; + stopPropagation?: boolean; +} + +const baseStyles = [ + "relative cursor-pointer", + "text-sm focus:z-10 focus:ring-2 font-medium focus:outline-none whitespace-nowrap shadow-sm", + "inline-flex gap-2 items-center justify-center transition-colors focus:ring-offset-1", + "disabled:opacity-40 disabled:cursor-not-allowed disabled:text-nb-gray-300 ring-offset-neutral-950/50", +]; + +const variantStyles: Record = { + default: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900", + "dark:focus:ring-zinc-800/50 dark:bg-nb-gray dark:text-gray-400 dark:border-gray-700/30 dark:hover:text-white dark:hover:bg-zinc-800/50", + ], + primary: [ + "dark:focus:ring-netbird-600/50 dark:ring-offset-neutral-950/50 enabled:dark:bg-netbird disabled:dark:bg-nb-gray-910 dark:text-gray-100 enabled:dark:hover:text-white enabled:dark:hover:bg-netbird-500/80", + "enabled:bg-netbird enabled:text-white enabled:focus:ring-netbird-400/50 enabled:hover:bg-netbird-500", + ], + secondary: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900", + "dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20", + "dark:bg-nb-gray-920 dark:text-gray-400 dark:border-gray-700/40 dark:hover:text-white dark:hover:bg-nb-gray-910", + ], + secondaryLighter: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900", + "dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20", + "dark:bg-nb-gray-900/70 dark:text-gray-400 dark:border-gray-700/70 dark:hover:text-white dark:hover:bg-nb-gray-800/60", + ], + input: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-neutral-200 text-gray-900", + "dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20", + "dark:bg-nb-gray-900 dark:text-gray-400 dark:border-nb-gray-700 dark:hover:bg-nb-gray-900/80", + ], + dropdown: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-neutral-200 text-gray-900", + "dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20", + "dark:bg-nb-gray-900/40 dark:text-gray-400 dark:border-nb-gray-900 dark:hover:bg-nb-gray-900/50", + ], + dotted: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900 border-dashed", + "dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20", + "dark:bg-nb-gray-900/30 dark:text-gray-400 dark:border-gray-500/40 dark:hover:text-white dark:hover:bg-zinc-800/50", + ], + tertiary: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900", + "dark:focus:ring-zinc-800/50 dark:bg-white dark:text-gray-800 dark:border-gray-700/40 dark:hover:bg-neutral-200 disabled:dark:bg-nb-gray-920 disabled:dark:text-nb-gray-300", + ], + white: [ + "focus:ring-white/50 bg-white text-gray-800 border-white outline-none hover:bg-neutral-200 disabled:dark:bg-nb-gray-920 disabled:dark:text-nb-gray-300", + "disabled:dark:bg-nb-gray-900 disabled:dark:text-nb-gray-300 disabled:dark:border-nb-gray-900", + ], + outline: [ + "bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900", + "dark:focus:ring-zinc-800/50 dark:bg-transparent dark:text-netbird dark:border-netbird dark:hover:bg-nb-gray-900/30", + ], + "danger-outline": [ + "enabled:dark:focus:ring-red-800/20 enabled:dark:focus:bg-red-950/40 enabled:hover:dark:bg-red-950/50 enabled:dark:hover:border-red-800/50 dark:bg-transparent dark:text-red-500", + ], + "danger-text": [ + "dark:bg-transparent dark:text-red-500 dark:hover:text-red-600 dark:border-transparent !px-0 !shadow-none !py-0 focus:ring-red-500/30 dark:ring-offset-neutral-950/50", + ], + "default-outline": [ + "dark:ring-offset-nb-gray-950/50 dark:focus:ring-nb-gray-500/20", + "dark:bg-transparent dark:text-nb-gray-400 dark:border-transparent dark:hover:text-white dark:hover:bg-nb-gray-900/30 dark:hover:border-nb-gray-800/50", + "data-[state=open]:dark:text-white data-[state=open]:dark:bg-nb-gray-900/30 data-[state=open]:dark:border-nb-gray-800/50", + ], + danger: [ + "dark:focus:ring-red-700/20 dark:focus:bg-red-700 hover:dark:bg-red-700 dark:hover:border-red-800/50 dark:bg-red-600 dark:text-red-100", + ], +}; + +const sizeStyles: Record = { + xs: "text-xs py-2 px-4", + xs2: "text-[0.78rem] py-2 px-4", + sm: "text-sm py-2.5 px-4", + md: "text-sm py-2.5 px-4", + lg: "text-base py-2.5 px-4", +}; + +const borderStyles: Record<0 | 1 | 2, string> = { + 0: "border", + 1: "border border-transparent", + 2: "border border-t-0 border-b-0", +}; + +const Button = forwardRef( + ( + { + variant = "default", + rounded = true, + border = 1, + size = "md", + stopPropagation = true, + className, + onClick, + children, + ...props + }, + ref + ) => { + return ( + + ); + } +); + +Button.displayName = "Button"; + +export default Button; diff --git a/proxy/web/src/components/Card.tsx b/proxy/web/src/components/Card.tsx new file mode 100644 index 000000000..ba92274ac --- /dev/null +++ b/proxy/web/src/components/Card.tsx @@ -0,0 +1,23 @@ +import { cn } from "@/utils/helpers"; +import { GradientFadedBackground } from "@/components/GradientFadedBackground"; + +export const Card = ({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) => { + return ( +

    + ); +}; diff --git a/proxy/web/src/components/ConnectionLine.tsx b/proxy/web/src/components/ConnectionLine.tsx new file mode 100644 index 000000000..39080ff6f --- /dev/null +++ b/proxy/web/src/components/ConnectionLine.tsx @@ -0,0 +1,26 @@ +import { X } from "lucide-react"; + +interface ConnectionLineProps { + success?: boolean; +} + +export function ConnectionLine({ success = true }: Readonly) { + if (success) { + return ( +
    +
    +
    + ); + } + + return ( +
    +
    +
    +
    + +
    +
    +
    + ); +} diff --git a/proxy/web/src/components/Description.tsx b/proxy/web/src/components/Description.tsx new file mode 100644 index 000000000..60e7ce1cc --- /dev/null +++ b/proxy/web/src/components/Description.tsx @@ -0,0 +1,14 @@ +import { cn } from "@/utils/helpers"; + +type Props = { + children: React.ReactNode; + className?: string; +}; + +export function Description({ children, className }: Readonly) { + return ( +
    + {children} +
    + ); +} \ No newline at end of file diff --git a/proxy/web/src/components/ErrorMessage.tsx b/proxy/web/src/components/ErrorMessage.tsx new file mode 100644 index 000000000..67a66c20f --- /dev/null +++ b/proxy/web/src/components/ErrorMessage.tsx @@ -0,0 +1,7 @@ +export const ErrorMessage = ({ error }: { error?: string }) => { + return ( +
    + {error} +
    + ); +}; diff --git a/proxy/web/src/components/GradientFadedBackground.tsx b/proxy/web/src/components/GradientFadedBackground.tsx new file mode 100644 index 000000000..fc0bdc831 --- /dev/null +++ b/proxy/web/src/components/GradientFadedBackground.tsx @@ -0,0 +1,22 @@ +import { cn } from "@/utils/helpers"; + +type Props = { + className?: string; +}; + +export const GradientFadedBackground = ({ className }: Props) => { + return ( +
    +
    +
    + ); +}; diff --git a/proxy/web/src/components/HelpText.tsx b/proxy/web/src/components/HelpText.tsx new file mode 100644 index 000000000..ce71bfa6d --- /dev/null +++ b/proxy/web/src/components/HelpText.tsx @@ -0,0 +1,19 @@ +import { cn } from "@/utils/helpers"; + +interface HelpTextProps { + children?: React.ReactNode; + className?: string; +} + +export default function HelpText({ children, className }: Readonly) { + return ( + + {children} + + ); +} diff --git a/proxy/web/src/components/Input.tsx b/proxy/web/src/components/Input.tsx new file mode 100644 index 000000000..7b880ed00 --- /dev/null +++ b/proxy/web/src/components/Input.tsx @@ -0,0 +1,137 @@ +import { cn } from "@/utils/helpers"; +import { Eye, EyeOff } from "lucide-react"; +import * as React from "react"; +import { useState } from "react"; + +export interface InputProps + extends React.InputHTMLAttributes { + customPrefix?: React.ReactNode; + customSuffix?: React.ReactNode; + maxWidthClass?: string; + icon?: React.ReactNode; + error?: string; + prefixClassName?: string; + showPasswordToggle?: boolean; + variant?: "default" | "darker"; +} + +const variantStyles = { + default: [ + "bg-nb-gray-900 placeholder:text-neutral-400/70 border-nb-gray-700", + "ring-offset-neutral-950/50 focus-visible:ring-neutral-500/20", + ], + darker: [ + "bg-nb-gray-920 placeholder:text-neutral-400/70 border-nb-gray-800", + "ring-offset-neutral-950/50 focus-visible:ring-neutral-500/20", + ], + error: [ + "bg-nb-gray-900 placeholder:text-neutral-400/70 border-red-500 text-red-500", + "ring-offset-red-500/10 focus-visible:ring-red-500/10", + ], +}; + +const prefixSuffixStyles = { + default: "bg-nb-gray-900 border-nb-gray-700 text-nb-gray-300", + error: "bg-nb-gray-900 border-red-500 text-nb-gray-300 text-red-500", +}; + +const Input = React.forwardRef( + ( + { + className, + type, + customSuffix, + customPrefix, + icon, + maxWidthClass = "", + error, + variant = "default", + prefixClassName, + showPasswordToggle = false, + ...props + }, + ref + ) => { + const [showPassword, setShowPassword] = useState(false); + const isPasswordType = type === "password"; + const inputType = isPasswordType && showPassword ? "text" : type; + + const passwordToggle = + isPasswordType && showPasswordToggle ? ( + + ) : null; + + const suffix = passwordToggle || customSuffix; + const activeVariant = error ? "error" : variant; + + return ( + <> +
    + {customPrefix && ( +
    + {customPrefix} +
    + )} + +
    + {icon} +
    + + + +
    + {suffix} +
    +
    + {error && ( +

    {error}

    + )} + + ); + } +); + +Input.displayName = "Input"; + +export { Input }; diff --git a/proxy/web/src/components/Label.tsx b/proxy/web/src/components/Label.tsx new file mode 100644 index 000000000..09e122f8e --- /dev/null +++ b/proxy/web/src/components/Label.tsx @@ -0,0 +1,19 @@ +import { cn } from "@/utils/helpers"; + +type LabelProps = React.LabelHTMLAttributes; + +export function Label({ className, htmlFor, ...props }: Readonly) { + return ( +
    + + Powered by + + + + ); +} \ No newline at end of file diff --git a/proxy/web/src/components/SegmentedTabs.tsx b/proxy/web/src/components/SegmentedTabs.tsx new file mode 100644 index 000000000..582b01f79 --- /dev/null +++ b/proxy/web/src/components/SegmentedTabs.tsx @@ -0,0 +1,145 @@ +import { cn } from "@/utils/helpers"; +import { useState, useMemo, useCallback } from "react"; +import { TabContext, useTabContext } from "./TabContext"; + +type TabsProps = { + value?: string; + defaultValue?: string; + onChange?: (value: string) => void; + children: + | React.ReactNode + | ((context: { value: string; onChange: (value: string) => void }) => React.ReactNode); +}; + +function SegmentedTabs({ value, defaultValue, onChange, children }: Readonly) { + const [internalValue, setInternalValue] = useState(defaultValue ?? ""); + const currentValue = value ?? internalValue; + + const handleChange = useCallback((newValue: string) => { + if (value === undefined) { + setInternalValue(newValue); + } + onChange?.(newValue); + }, [value, onChange]); + + const contextValue = useMemo( + () => ({ value: currentValue, onChange: handleChange }), + [currentValue, handleChange], + ); + + return ( + +
    + {typeof children === "function" + ? children({ value: currentValue, onChange: handleChange }) + : children} +
    +
    + ); +} + +function List({ + children, + className, +}: Readonly<{ + children: React.ReactNode; + className?: string; +}>) { + return ( +
    + {children} +
    + ); +} + +function Trigger({ + children, + value, + disabled = false, + className, + selected, + onClick, +}: Readonly<{ + children: React.ReactNode; + value: string; + disabled?: boolean; + className?: string; + selected?: boolean; + onClick?: () => void; +}>) { + const context = useTabContext(); + const isSelected = selected ?? value === context.value; + + let stateClassName = ""; + if (isSelected) { + stateClassName = "bg-nb-gray-900 text-white"; + } else if (!disabled) { + stateClassName = "text-nb-gray-400 hover:bg-nb-gray-900/50"; + } + + const handleClick = () => { + context.onChange(value); + onClick?.(); + }; + + return ( + + ); +} + +function Content({ + children, + value, + className, + visible, +}: Readonly<{ + children: React.ReactNode; + value: string; + className?: string; + visible?: boolean; +}>) { + const context = useTabContext(); + const isVisible = visible ?? value === context.value; + + if (!isVisible) return null; + + return ( +
    + {children} +
    + ); +} + +SegmentedTabs.List = List; +SegmentedTabs.Trigger = Trigger; +SegmentedTabs.Content = Content; + +export { SegmentedTabs }; diff --git a/proxy/web/src/components/Separator.tsx b/proxy/web/src/components/Separator.tsx new file mode 100644 index 000000000..877c605cd --- /dev/null +++ b/proxy/web/src/components/Separator.tsx @@ -0,0 +1,10 @@ +export const Separator = () => { + return ( +
    + + OR + + +
    + ); +}; diff --git a/proxy/web/src/components/StatusCard.tsx b/proxy/web/src/components/StatusCard.tsx new file mode 100644 index 000000000..44ed957ee --- /dev/null +++ b/proxy/web/src/components/StatusCard.tsx @@ -0,0 +1,38 @@ +import type { LucideIcon } from "lucide-react"; +import { ConnectionLine } from "./ConnectionLine"; + +interface StatusCardProps { + icon: LucideIcon; + label: string; + detail?: string; + success?: boolean; + line?: boolean; +} + +export function StatusCard({ + icon: Icon, + label, + detail, + success = true, + line = true, +}: Readonly) { + return ( + <> + {line && } +
    +
    + +
    + {label} + + {success ? "Connected" : "Unreachable"} + + {detail && ( + + {detail} + + )} +
    + + ); +} diff --git a/proxy/web/src/components/TabContext.tsx b/proxy/web/src/components/TabContext.tsx new file mode 100644 index 000000000..5a606ed49 --- /dev/null +++ b/proxy/web/src/components/TabContext.tsx @@ -0,0 +1,13 @@ +import { createContext, useContext } from "react"; + +type TabContextValue = { + value: string; + onChange: (value: string) => void; +}; + +export const TabContext = createContext({ + value: "", + onChange: () => {}, +}); + +export const useTabContext = () => useContext(TabContext); \ No newline at end of file diff --git a/proxy/web/src/components/Title.tsx b/proxy/web/src/components/Title.tsx new file mode 100644 index 000000000..1ed4a3b3b --- /dev/null +++ b/proxy/web/src/components/Title.tsx @@ -0,0 +1,14 @@ +import { cn } from "@/utils/helpers"; + +type Props = { + children: React.ReactNode; + className?: string; +}; + +export function Title({ children, className }: Readonly) { + return ( +

    + {children} +

    + ); +} diff --git a/proxy/web/src/data.ts b/proxy/web/src/data.ts new file mode 100644 index 000000000..8f7eac58d --- /dev/null +++ b/proxy/web/src/data.ts @@ -0,0 +1,54 @@ +// Auth method types matching Go +export type AuthMethod = 'pin' | 'password' | 'oidc' | "link" + +// Page types +export type PageType = 'auth' | 'error' + +// Error data structure +export interface ErrorData { + code: number + title: string + message: string + proxy?: boolean + destination?: boolean + requestId?: string + simple?: boolean + retryUrl?: string +} + +// Data injected by Go templates +export interface Data { + page?: PageType + methods?: Partial> + error?: ErrorData +} + +declare global { + // eslint-disable-next-line no-var + var __DATA__: Data | undefined +} + +export function getData(): Data { + const data = globalThis.__DATA__ ?? {} + + // Dev mode: allow ?page=error query param to preview error page + if (import.meta.env.DEV) { + const params = new URLSearchParams(globalThis.location.search) + const page = params.get('page') + if (page === 'error') { + return { + ...data, + page: 'error', + error: data.error ?? { + code: 503, + title: 'Service Unavailable', + message: 'The service you are trying to access is temporarily unavailable. Please try again later.', + proxy: true, + destination: false, + }, + } + } + } + + return data +} diff --git a/proxy/web/src/index.css b/proxy/web/src/index.css new file mode 100644 index 000000000..ad011f525 --- /dev/null +++ b/proxy/web/src/index.css @@ -0,0 +1,213 @@ +@import "tailwindcss"; + +@custom-variant dark (&:where(.dark, .dark *)); + +@font-face { + font-family: "Inter"; + font-style: normal; + font-weight: 100 900; + font-display: swap; + src: url("./assets/fonts/Inter-VariableFont_opsz,wght.ttf") format("truetype"); +} + +@font-face { + font-family: "Inter"; + font-style: italic; + font-weight: 100 900; + font-display: swap; + src: url("./assets/fonts/Inter-Italic-VariableFont_opsz,wght.ttf") format("truetype"); +} + +@theme { + /* Gray */ + --color-gray-50: #F9FAFB; + --color-gray-100: #F3F4F6; + --color-gray-200: #E5E7EB; + --color-gray-300: #D1D5DB; + --color-gray-400: #9CA3AF; + --color-gray-500: #6B7280; + --color-gray-600: #4B5563; + --color-gray-700: #374151; + --color-gray-800: #1F2937; + --color-gray-900: #111827; + + /* Red */ + --color-red-50: #FDF2F2; + --color-red-100: #FDE8E8; + --color-red-200: #FBD5D5; + --color-red-300: #F8B4B4; + --color-red-400: #F98080; + --color-red-500: #F05252; + --color-red-600: #E02424; + --color-red-700: #C81E1E; + --color-red-800: #9B1C1C; + --color-red-900: #771D1D; + + /* Yellow */ + --color-yellow-50: #FDFDEA; + --color-yellow-100: #FDF6B2; + --color-yellow-200: #FCE96A; + --color-yellow-300: #FACA15; + --color-yellow-400: #E3A008; + --color-yellow-500: #C27803; + --color-yellow-600: #9F580A; + --color-yellow-700: #8E4B10; + --color-yellow-800: #723B13; + --color-yellow-900: #633112; + + /* Green */ + --color-green-50: #F3FAF7; + --color-green-100: #DEF7EC; + --color-green-200: #BCF0DA; + --color-green-300: #84E1BC; + --color-green-400: #31C48D; + --color-green-500: #0E9F6E; + --color-green-600: #057A55; + --color-green-700: #046C4E; + --color-green-800: #03543F; + --color-green-900: #014737; + + /* Blue */ + --color-blue-50: #EBF5FF; + --color-blue-100: #E1EFFE; + --color-blue-200: #C3DDFD; + --color-blue-300: #A4CAFE; + --color-blue-400: #76A9FA; + --color-blue-500: #3F83F8; + --color-blue-600: #1C64F2; + --color-blue-700: #1A56DB; + --color-blue-800: #1E429F; + --color-blue-900: #233876; + + /* Indigo */ + --color-indigo-50: #F0F5FF; + --color-indigo-100: #E5EDFF; + --color-indigo-200: #CDDBFE; + --color-indigo-300: #B4C6FC; + --color-indigo-400: #8DA2FB; + --color-indigo-500: #6875F5; + --color-indigo-600: #5850EC; + --color-indigo-700: #5145CD; + --color-indigo-800: #42389D; + --color-indigo-900: #362F78; + + /* Purple */ + --color-purple-50: #F6F5FF; + --color-purple-100: #EDEBFE; + --color-purple-200: #DCD7FE; + --color-purple-300: #CABFFD; + --color-purple-400: #AC94FA; + --color-purple-500: #9061F9; + --color-purple-600: #7E3AF2; + --color-purple-700: #6C2BD9; + --color-purple-800: #5521B5; + --color-purple-900: #4A1D96; + + /* Pink */ + --color-pink-50: #FDF2F8; + --color-pink-100: #FCE8F3; + --color-pink-200: #FAD1E8; + --color-pink-300: #F8B4D9; + --color-pink-400: #F17EB8; + --color-pink-500: #E74694; + --color-pink-600: #D61F69; + --color-pink-700: #BF125D; + --color-pink-800: #99154B; + --color-pink-900: #751A3D; + + /* NetBird Gray */ + --color-nb-gray: #181A1D; + --color-nb-gray-50: #f4f6f7; + --color-nb-gray-100: #e4e7e9; + --color-nb-gray-200: #cbd2d6; + --color-nb-gray-250: #b7c0c6; + --color-nb-gray-300: #aab4bd; + --color-nb-gray-350: #8f9ca8; + --color-nb-gray-400: #7c8994; + --color-nb-gray-500: #616e79; + --color-nb-gray-600: #535d67; + --color-nb-gray-700: #474e57; + --color-nb-gray-800: #3f444b; + --color-nb-gray-850: #363b40; + --color-nb-gray-900: #32363D; + --color-nb-gray-910: #2b2f33; + --color-nb-gray-920: #25282d; + --color-nb-gray-925: #1e2123; + --color-nb-gray-930: #25282c; + --color-nb-gray-935: #1f2124; + --color-nb-gray-940: #1c1e21; + --color-nb-gray-950: #181a1d; + --color-nb-gray-960: #15171a; + + /* NetBird Orange */ + --color-netbird: #f68330; + --color-netbird-50: #fff6ed; + --color-netbird-100: #feecd6; + --color-netbird-150: #ffdfb8; + --color-netbird-200: #ffd4a6; + --color-netbird-300: #fab677; + --color-netbird-400: #f68330; + --color-netbird-500: #f46d1b; + --color-netbird-600: #e55311; + --color-netbird-700: #be3e10; + --color-netbird-800: #973215; + --color-netbird-900: #7a2b14; + --color-netbird-950: #421308; + + /* NetBird Blue */ + --color-nb-blue: #31e4f5; + --color-nb-blue-50: #ebffff; + --color-nb-blue-100: #cefdff; + --color-nb-blue-200: #a2f9ff; + --color-nb-blue-300: #63f2fd; + --color-nb-blue-400: #31e4f5; + --color-nb-blue-500: #00c4da; + --color-nb-blue-600: #039cb7; + --color-nb-blue-700: #0a7c94; + --color-nb-blue-800: #126478; + --color-nb-blue-900: #145365; + --color-nb-blue-950: #063746; +} + +:root { + --nb-bg: #18191d; + --nb-card-bg: #1b1f22; + --nb-border: rgba(50, 54, 61, 0.5); + --nb-text: #e4e7e9; + --nb-text-muted: rgba(167, 177, 185, 0.8); + --nb-primary: #f68330; + --nb-primary-hover: #e5722a; + --nb-input-bg: rgba(63, 68, 75, 0.5); + --nb-input-border: rgba(63, 68, 75, 0.8); + --nb-error-bg: rgba(153, 27, 27, 0.2); + --nb-error-border: rgba(153, 27, 27, 0.5); + --nb-error-text: #f87171; +} + +html { + color-scheme: dark; + @apply bg-nb-gray; +} + +html.dark, +:root { + color-scheme: dark; +} + +body { + font-family: "Inter", ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji"; +} + +h1 { + @apply text-2xl font-medium text-gray-700 dark:text-nb-gray-100 my-1; +} +h2 { + @apply text-xl font-medium text-gray-700 dark:text-nb-gray-100 my-1; +} +p { + @apply font-light tracking-wide text-gray-700 dark:text-zinc-50 text-sm; +} + +[placeholder] { + text-overflow: ellipsis; +} diff --git a/proxy/web/src/main.tsx b/proxy/web/src/main.tsx new file mode 100644 index 000000000..e836cc12b --- /dev/null +++ b/proxy/web/src/main.tsx @@ -0,0 +1,18 @@ +import { StrictMode } from 'react' +import { createRoot } from 'react-dom/client' +import './index.css' +import App from './App.tsx' +import { ErrorPage } from './ErrorPage.tsx' +import { getData } from '@/data' + +const data = getData() + +createRoot(document.getElementById('root')!).render( + + {data.page === 'error' && data.error ? ( + + ) : ( + + )} + , +) diff --git a/proxy/web/src/utils/helpers.ts b/proxy/web/src/utils/helpers.ts new file mode 100644 index 000000000..a5ef19350 --- /dev/null +++ b/proxy/web/src/utils/helpers.ts @@ -0,0 +1,6 @@ +import { clsx, type ClassValue } from "clsx"; +import { twMerge } from "tailwind-merge"; + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} diff --git a/proxy/web/src/vite-env.d.ts b/proxy/web/src/vite-env.d.ts new file mode 100644 index 000000000..ddeb09246 --- /dev/null +++ b/proxy/web/src/vite-env.d.ts @@ -0,0 +1,6 @@ +/// + +declare module "*.svg" { + const content: string; + export default content; +} \ No newline at end of file diff --git a/proxy/web/tsconfig.json b/proxy/web/tsconfig.json new file mode 100644 index 000000000..5a060c775 --- /dev/null +++ b/proxy/web/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "target": "ES2022", + "lib": ["ES2022", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "noEmit": true, + "jsx": "react-jsx", + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true, + "baseUrl": ".", + "paths": { + "@/*": ["src/*"] + } + }, + "include": ["src", "vite.config.ts"] +} diff --git a/proxy/web/vite.config.ts b/proxy/web/vite.config.ts new file mode 100644 index 000000000..a5f9ee2a8 --- /dev/null +++ b/proxy/web/vite.config.ts @@ -0,0 +1,32 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' +import tailwindcss from '@tailwindcss/vite' +import path from 'node:path' + +export default defineConfig({ + plugins: [react(), tailwindcss()], + base: '/__netbird__/', + resolve: { + alias: { + '@': path.resolve(__dirname, './src'), + }, + }, + server: { + port: 3031, + }, + preview: { + port: 3031, + }, + build: { + outDir: 'dist', + assetsDir: 'assets', + cssCodeSplit: false, + rollupOptions: { + output: { + entryFileNames: 'assets/index.js', + chunkFileNames: 'assets/[name].js', + assetFileNames: 'assets/[name][extname]', + }, + }, + }, +}) diff --git a/proxy/web/web.go b/proxy/web/web.go new file mode 100644 index 000000000..6773a9c1a --- /dev/null +++ b/proxy/web/web.go @@ -0,0 +1,189 @@ +package web + +import ( + "bytes" + "embed" + "encoding/json" + "html/template" + "io/fs" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +// PathPrefix is the unique URL prefix for serving the proxy's own web assets. +// Using a distinctive prefix prevents collisions with backend application routes. +const PathPrefix = "/__netbird__" + +//go:embed dist/* +var files embed.FS + +var ( + webFS fs.FS + tmpl *template.Template + initErr error +) + +func init() { + webFS, initErr = fs.Sub(files, "dist") + if initErr != nil { + return + } + + var indexHTML []byte + indexHTML, initErr = fs.ReadFile(webFS, "index.html") + if initErr != nil { + return + } + + tmpl, initErr = template.New("index").Parse(string(indexHTML)) +} + +// AssetHandler returns middleware that intercepts requests for the proxy's +// own web assets (under PathPrefix) and serves them from the embedded +// filesystem, preventing them from being forwarded to backend services. +func AssetHandler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, PathPrefix+"/") { + serveAsset(w, r) + return + } + next.ServeHTTP(w, r) + }) +} + +// serveAsset serves a static file from the embedded filesystem. +func serveAsset(w http.ResponseWriter, r *http.Request) { + if initErr != nil { + http.Error(w, initErr.Error(), http.StatusInternalServerError) + return + } + + // Strip the prefix to get the embedded FS path (e.g. "assets/index.js"). + filePath := strings.TrimPrefix(r.URL.Path, PathPrefix+"/") + content, err := fs.ReadFile(webFS, filePath) + if err != nil { + http.Error(w, "not found", http.StatusNotFound) + return + } + + setContentType(w, filePath) + w.Write(content) //nolint:errcheck +} + +// ServeHTTP serves the web UI. For static assets it serves them directly, +// for other paths it renders the page with the provided data. +// Optional statusCode can be passed to set a custom HTTP status code (default 200). +func ServeHTTP(w http.ResponseWriter, r *http.Request, data any, statusCode ...int) { + if initErr != nil { + http.Error(w, initErr.Error(), http.StatusInternalServerError) + return + } + + path := r.URL.Path + + // Serve robots.txt + if path == "/robots.txt" { + content, err := fs.ReadFile(webFS, "robots.txt") + if err != nil { + http.Error(w, "not found", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "text/plain") + w.Write(content) //nolint:errcheck + return + } + + // Serve static assets directly (handles requests that reach here + // via auth middleware calling ServeHTTP for unauthenticated requests). + if strings.HasPrefix(path, PathPrefix+"/") { + serveAsset(w, r) + return + } + + // Render the page with data + dataJSON, _ := json.Marshal(data) //nolint:errcheck + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, struct { + Data template.JS + }{ + Data: template.JS(dataJSON), //nolint:gosec + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "text/html") + if len(statusCode) > 0 { + w.WriteHeader(statusCode[0]) + } + w.Write(buf.Bytes()) //nolint:errcheck +} + +func setContentType(w http.ResponseWriter, filePath string) { + switch filepath.Ext(filePath) { + case ".js": + w.Header().Set("Content-Type", "application/javascript") + case ".css": + w.Header().Set("Content-Type", "text/css") + case ".svg": + w.Header().Set("Content-Type", "image/svg+xml") + case ".ttf": + w.Header().Set("Content-Type", "font/ttf") + case ".woff": + w.Header().Set("Content-Type", "font/woff") + case ".woff2": + w.Header().Set("Content-Type", "font/woff2") + case ".ico": + w.Header().Set("Content-Type", "image/x-icon") + } +} + +// ErrorStatus represents the connection status for each component in the error page. +type ErrorStatus struct { + Proxy bool + Destination bool +} + +// ServeErrorPage renders a user-friendly error page with the given details. +func ServeErrorPage(w http.ResponseWriter, r *http.Request, code int, title, message, requestID string, status ErrorStatus) { + ServeHTTP(w, r, map[string]any{ + "page": "error", + "error": map[string]any{ + "code": code, + "title": title, + "message": message, + "proxy": status.Proxy, + "destination": status.Destination, + "requestId": requestID, + }, + }, code) +} + +// ServeAccessDeniedPage renders a simple access denied page without the connection status graph. +func ServeAccessDeniedPage(w http.ResponseWriter, r *http.Request, code int, title, message, requestID string) { + ServeHTTP(w, r, map[string]any{ + "page": "error", + "error": map[string]any{ + "code": code, + "title": title, + "message": message, + "requestId": requestID, + "simple": true, + "retryUrl": stripAuthParams(r.URL), + }, + }, code) +} + +// stripAuthParams returns the request URI with auth-related query parameters removed. +func stripAuthParams(u *url.URL) string { + q := u.Query() + q.Del("session_token") + q.Del("error") + q.Del("error_description") + clean := *u + clean.RawQuery = q.Encode() + return clean.RequestURI() +} diff --git a/shared/hash/argon2id/argon2id.go b/shared/hash/argon2id/argon2id.go new file mode 100644 index 000000000..8d493aaba --- /dev/null +++ b/shared/hash/argon2id/argon2id.go @@ -0,0 +1,136 @@ +package argon2id + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "errors" + "fmt" + "strings" + + "golang.org/x/crypto/argon2" +) + +const ( + argon2Memory = 19456 + argon2Iterations = 2 + argon2Parallelism = 1 + argon2SaltLength = 16 + argon2KeyLength = 32 +) + +var ( + // ErrInvalidHash is returned when the hash string format is invalid + ErrInvalidHash = errors.New("invalid hash format") + + // ErrIncompatibleVersion is returned when the Argon2 version is not supported + ErrIncompatibleVersion = errors.New("incompatible argon2 version") + + // ErrMismatchedHashAndPassword is returned when password verification fails + ErrMismatchedHashAndPassword = errors.New("password does not match hash") +) + +func Hash(secret string) (string, error) { + salt := make([]byte, argon2SaltLength) + if _, err := rand.Read(salt); err != nil { + return "", fmt.Errorf("failed to generate salt: %w", err) + } + + hash := argon2.IDKey( + []byte(secret), + salt, + argon2Iterations, + argon2Memory, + argon2Parallelism, + argon2KeyLength, + ) + + encodedSalt := base64.RawStdEncoding.EncodeToString(salt) + encodedHash := base64.RawStdEncoding.EncodeToString(hash) + + return fmt.Sprintf( + "$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", + argon2.Version, + argon2Memory, + argon2Iterations, + argon2Parallelism, + encodedSalt, + encodedHash, + ), nil +} + +func Verify(secret, encodedHash string) error { + params, salt, hash, err := decodeHash(encodedHash) + if err != nil { + return err + } + + computedHash := argon2.IDKey( + []byte(secret), + salt, + params.iterations, + params.memory, + params.parallelism, + params.keyLength, + ) + + if subtle.ConstantTimeCompare(hash, computedHash) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +type hashParams struct { + memory uint32 + iterations uint32 + parallelism uint8 + keyLength uint32 + version int +} + +func decodeHash(encodedHash string) (*hashParams, []byte, []byte, error) { + parts := strings.Split(encodedHash, "$") + + if len(parts) != 6 { + return nil, nil, nil, ErrInvalidHash + } + + if parts[1] != "argon2id" { + return nil, nil, nil, ErrInvalidHash + } + + var version int + if _, err := fmt.Sscanf(parts[2], "v=%d", &version); err != nil { + return nil, nil, nil, fmt.Errorf("%w: invalid version: %v", ErrInvalidHash, err) + } + if version != argon2.Version { + return nil, nil, nil, ErrIncompatibleVersion + } + + var memory, iterations uint32 + var parallelism uint8 + if _, err := fmt.Sscanf(parts[3], "m=%d,t=%d,p=%d", &memory, &iterations, ¶llelism); err != nil { + return nil, nil, nil, fmt.Errorf("%w: invalid parameters: %v", ErrInvalidHash, err) + } + + salt, err := base64.RawStdEncoding.DecodeString(parts[4]) + if err != nil { + return nil, nil, nil, fmt.Errorf("%w: invalid salt encoding: %v", ErrInvalidHash, err) + } + + hash, err := base64.RawStdEncoding.DecodeString(parts[5]) + if err != nil { + return nil, nil, nil, fmt.Errorf("%w: invalid hash encoding: %v", ErrInvalidHash, err) + } + + params := &hashParams{ + memory: memory, + iterations: iterations, + parallelism: parallelism, + keyLength: uint32(len(hash)), + version: version, + } + + return params, salt, hash, nil +} diff --git a/shared/hash/argon2id/argon2id_test.go b/shared/hash/argon2id/argon2id_test.go new file mode 100644 index 000000000..f907a1687 --- /dev/null +++ b/shared/hash/argon2id/argon2id_test.go @@ -0,0 +1,327 @@ +package argon2id + +import ( + "errors" + "strings" + "testing" + + "golang.org/x/crypto/argon2" +) + +func TestHash(t *testing.T) { + tests := []struct { + name string + secret string + }{ + { + name: "simple password", + secret: "password123", + }, + { + name: "complex password with special chars", + secret: "P@ssw0rd!#$%^&*()", + }, + { + name: "long password", + secret: strings.Repeat("a", 100), + }, + { + name: "empty password", + secret: "", + }, + { + name: "unicode password", + secret: "пароль密码🔐", + }, + { + name: "numeric PIN", + secret: "123456", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hash, err := Hash(tt.secret) + if err != nil { + t.Fatalf("Hash() error = %v", err) + } + + // Verify hash format + if !strings.HasPrefix(hash, "$argon2id$") { + t.Errorf("Hash() = %v, want hash starting with $argon2id$", hash) + } + + // Verify hash has correct number of components + parts := strings.Split(hash, "$") + if len(parts) != 6 { + t.Errorf("Hash() has %d parts, want 6", len(parts)) + } + + // Verify version is present + if !strings.HasPrefix(hash, "$argon2id$v=") { + t.Errorf("Hash() missing version, got %v", hash) + } + + // Verify each hash is unique (different salt) + hash2, err := Hash(tt.secret) + if err != nil { + t.Fatalf("Hash() second call error = %v", err) + } + if hash == hash2 { + t.Error("Hash() produces identical hashes for same input (salt not random)") + } + }) + } +} + +func TestVerify(t *testing.T) { + tests := []struct { + name string + secret string + wantError error + }{ + { + name: "valid password", + secret: "correctPassword", + wantError: nil, + }, + { + name: "valid PIN", + secret: "1234", + wantError: nil, + }, + { + name: "empty secret", + secret: "", + wantError: nil, + }, + { + name: "unicode secret", + secret: "密码🔐", + wantError: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Generate hash + hash, err := Hash(tt.secret) + if err != nil { + t.Fatalf("Hash() error = %v", err) + } + + // Verify correct secret + err = Verify(tt.secret, hash) + if !errors.Is(err, tt.wantError) { + t.Errorf("Verify() error = %v, wantError %v", err, tt.wantError) + } + }) + } +} + +func TestVerifyIncorrectPassword(t *testing.T) { + secret := "correctPassword" + wrongSecret := "wrongPassword" + + hash, err := Hash(secret) + if err != nil { + t.Fatalf("Hash() error = %v", err) + } + + err = Verify(wrongSecret, hash) + if !errors.Is(err, ErrMismatchedHashAndPassword) { + t.Errorf("Verify() error = %v, want %v", err, ErrMismatchedHashAndPassword) + } +} + +func TestVerifyInvalidHashFormat(t *testing.T) { + tests := []struct { + name string + invalidHash string + expectedError error + }{ + { + name: "empty hash", + invalidHash: "", + expectedError: ErrInvalidHash, + }, + { + name: "wrong algorithm", + invalidHash: "$bcrypt$v=19$m=19456,t=2,p=1$c2FsdA$aGFzaA", + expectedError: ErrInvalidHash, + }, + { + name: "missing parts", + invalidHash: "$argon2id$v=19$m=19456", + expectedError: ErrInvalidHash, + }, + { + name: "too many parts", + invalidHash: "$argon2id$v=19$m=19456,t=2,p=1$salt$hash$extra", + expectedError: ErrInvalidHash, + }, + { + name: "invalid version format", + invalidHash: "$argon2id$vXX$m=19456,t=2,p=1$c2FsdA$aGFzaA", + expectedError: ErrInvalidHash, + }, + { + name: "invalid parameters format", + invalidHash: "$argon2id$v=19$mXX,tYY,pZZ$c2FsdA$aGFzaA", + expectedError: ErrInvalidHash, + }, + { + name: "invalid salt base64", + invalidHash: "$argon2id$v=19$m=19456,t=2,p=1$not-valid-base64!@#$aGFzaA", + expectedError: ErrInvalidHash, + }, + { + name: "invalid hash base64", + invalidHash: "$argon2id$v=19$m=19456,t=2,p=1$c2FsdA$not-valid-base64!@#", + expectedError: ErrInvalidHash, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := Verify("password", tt.invalidHash) + if err == nil { + t.Errorf("Verify() expected error, got nil") + return + } + + if !errors.Is(err, tt.expectedError) && !strings.Contains(err.Error(), tt.expectedError.Error()) { + t.Errorf("Verify() error = %v, want error containing %v", err, tt.expectedError) + } + }) + } +} + +func TestVerifyIncompatibleVersion(t *testing.T) { + // Manually craft a hash with wrong version + invalidVersionHash := "$argon2id$v=18$m=19456,t=2,p=1$c2FsdDEyMzQ1Njc4OTA$aGFzaDEyMzQ1Njc4OTBhYmNkZWZnaGlqa2xtbm9w" + + err := Verify("password", invalidVersionHash) + if !errors.Is(err, ErrIncompatibleVersion) { + t.Errorf("Verify() error = %v, want %v", err, ErrIncompatibleVersion) + } +} + +func TestHashDeterminism(t *testing.T) { + // Ensure different hashes for same password (random salt) + password := "testPassword" + hashes := make(map[string]bool) + + for i := 0; i < 10; i++ { + hash, err := Hash(password) + if err != nil { + t.Fatalf("Hash() error = %v", err) + } + if hashes[hash] { + t.Error("Hash() produced duplicate hash (salt generation may be broken)") + } + hashes[hash] = true + } + + if len(hashes) != 10 { + t.Errorf("Expected 10 unique hashes, got %d", len(hashes)) + } +} + +func TestOWASPCompliance(t *testing.T) { + // Test that generated hashes use OWASP-recommended parameters + secret := "testPassword" + hash, err := Hash(secret) + if err != nil { + t.Fatalf("Hash() error = %v", err) + } + + params, _, _, err := decodeHash(hash) + if err != nil { + t.Fatalf("decodeHash() error = %v", err) + } + + // Verify OWASP minimum baseline parameters + if params.memory != 19456 { + t.Errorf("memory = %d, want 19456 (OWASP baseline)", params.memory) + } + if params.iterations != 2 { + t.Errorf("iterations = %d, want 2 (OWASP baseline)", params.iterations) + } + if params.parallelism != 1 { + t.Errorf("parallelism = %d, want 1 (OWASP baseline)", params.parallelism) + } + if params.keyLength != 32 { + t.Errorf("keyLength = %d, want 32", params.keyLength) + } + if params.version != argon2.Version { + t.Errorf("version = %d, want %d", params.version, argon2.Version) + } +} + +func TestConstantTimeComparison(t *testing.T) { + // This test verifies that Verify() is using constant-time comparison + // by ensuring it doesn't fail differently for similar vs different hashes + secret := "password123" + wrongSecret := "password124" // One character different + + hash, err := Hash(secret) + if err != nil { + t.Fatalf("Hash() error = %v", err) + } + + // Both wrong passwords should return the same error + err1 := Verify(wrongSecret, hash) + err2 := Verify("completelydifferent", hash) + + if !errors.Is(err1, ErrMismatchedHashAndPassword) { + t.Errorf("Verify() error = %v, want %v", err1, ErrMismatchedHashAndPassword) + } + if !errors.Is(err2, ErrMismatchedHashAndPassword) { + t.Errorf("Verify() error = %v, want %v", err2, ErrMismatchedHashAndPassword) + } + + // Errors should be identical (same error type and message) + if err1.Error() != err2.Error() { + t.Error("Verify() returns different errors for different wrong passwords (potential timing attack)") + } +} + +func TestCaseSensitivity(t *testing.T) { + // Passwords should be case-sensitive + secret := "Password123" + wrongSecret := "password123" + + hash, err := Hash(secret) + if err != nil { + t.Fatalf("Hash() error = %v", err) + } + + // Correct password should verify + if err := Verify(secret, hash); err != nil { + t.Errorf("Verify() with correct password error = %v, want nil", err) + } + + // Wrong case should not verify + if err := Verify(wrongSecret, hash); !errors.Is(err, ErrMismatchedHashAndPassword) { + t.Errorf("Verify() with wrong case error = %v, want %v", err, ErrMismatchedHashAndPassword) + } +} + +// Benchmark tests +func BenchmarkHash(b *testing.B) { + secret := "benchmarkPassword123" + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = Hash(secret) + } +} + +func BenchmarkVerify(b *testing.B) { + secret := "benchmarkPassword123" + hash, _ := Hash(secret) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = Verify(secret, hash) + } +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 5a504c471..1f4a163e5 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -36,6 +36,8 @@ tags: x-cloud-only: true - name: Identity Providers description: Interact with and view information about identity providers. + - name: Services + description: Interact with and view information about reverse proxy services. - name: Instance description: Instance setup and status endpoints for initial configuration. - name: Jobs @@ -2244,7 +2246,53 @@ components: activity_code: description: The string code of the activity that occurred during the event type: string - enum: [ "peer.user.add", "peer.setupkey.add", "user.join", "user.invite", "account.create", "account.delete", "user.peer.delete", "rule.add", "rule.update", "rule.delete", "policy.add", "policy.update", "policy.delete", "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", "setupkey.delete", "group.add", "group.update", "group.delete", "peer.group.add", "peer.group.delete", "user.group.add", "user.group.delete", "user.role.update", "setupkey.group.add", "setupkey.group.delete", "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", "route.add", "route.delete", "route.update", "peer.ssh.enable", "peer.ssh.disable", "peer.rename", "peer.login.expiration.enable", "peer.login.expiration.disable", "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.enable", "account.setting.peer.login.expiration.disable", "personal.access.token.create", "personal.access.token.delete", "service.user.create", "service.user.delete", "user.block", "user.unblock", "user.delete", "user.peer.login", "peer.login.expire", "dashboard.login", "integration.create", "integration.update", "integration.delete", "account.setting.peer.approval.enable", "account.setting.peer.approval.disable", "peer.approve", "peer.approval.revoke", "transferred.owner.role", "posture.check.create", "posture.check.update", "posture.check.delete", "peer.inactivity.expiration.enable", "peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.enable", "account.peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.update", "account.setting.group.propagation.enable", "account.setting.group.propagation.disable", "account.setting.routing.peer.dns.resolution.enable", "account.setting.routing.peer.dns.resolution.disable", "network.create", "network.update", "network.delete", "network.resource.create", "network.resource.update", "network.resource.delete", "network.router.create", "network.router.update", "network.router.delete", "resource.group.add", "resource.group.delete", "account.dns.domain.update", "account.setting.lazy.connection.enable", "account.setting.lazy.connection.disable", "account.network.range.update", "peer.ip.update", "user.approve", "user.reject", "user.create", "account.settings.auto.version.update", "identityprovider.create", "identityprovider.update", "identityprovider.delete", "dns.zone.create", "dns.zone.update", "dns.zone.delete", "dns.zone.record.create", "dns.zone.record.update", "dns.zone.record.delete", "peer.job.create", "user.password.change", "user.invite.link.create", "user.invite.link.accept", "user.invite.link.regenerate", "user.invite.link.delete" ] + enum: [ + "peer.user.add", "peer.setupkey.add", "user.join", "user.invite", "account.create", "account.delete", + "user.peer.delete", "rule.add", "rule.update", "rule.delete", + "policy.add", "policy.update", "policy.delete", + "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", "setupkey.delete", + "group.add", "group.update", "group.delete", + "peer.group.add", "peer.group.delete", + "user.group.add", "user.group.delete", "user.role.update", + "setupkey.group.add", "setupkey.group.delete", + "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", + "route.add", "route.delete", "route.update", + "peer.ssh.enable", "peer.ssh.disable", "peer.rename", + "peer.login.expiration.enable", "peer.login.expiration.disable", + "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", + "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.enable", "account.setting.peer.login.expiration.disable", + "personal.access.token.create", "personal.access.token.delete", + "service.user.create", "service.user.delete", + "user.block", "user.unblock", "user.delete", + "user.peer.login", "peer.login.expire", + "dashboard.login", + "integration.create", "integration.update", "integration.delete", + "account.setting.peer.approval.enable", "account.setting.peer.approval.disable", + "peer.approve", "peer.approval.revoke", + "transferred.owner.role", + "posture.check.create", "posture.check.update", "posture.check.delete", + "peer.inactivity.expiration.enable", "peer.inactivity.expiration.disable", + "account.peer.inactivity.expiration.enable", "account.peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.update", + "account.setting.group.propagation.enable", "account.setting.group.propagation.disable", + "account.setting.routing.peer.dns.resolution.enable", "account.setting.routing.peer.dns.resolution.disable", + "network.create", "network.update", "network.delete", + "network.resource.create", "network.resource.update", "network.resource.delete", + "network.router.create", "network.router.update", "network.router.delete", + "resource.group.add", "resource.group.delete", + "account.dns.domain.update", + "account.setting.lazy.connection.enable", "account.setting.lazy.connection.disable", + "account.network.range.update", + "peer.ip.update", + "user.approve", "user.reject", "user.create", + "account.settings.auto.version.update", + "identityprovider.create", "identityprovider.update", "identityprovider.delete", + "dns.zone.create", "dns.zone.update", "dns.zone.delete", + "dns.zone.record.create", "dns.zone.record.update", "dns.zone.record.delete", + "peer.job.create", + "user.password.change", + "user.invite.link.create", "user.invite.link.accept", "user.invite.link.regenerate", "user.invite.link.delete", + "service.create", "service.update", "service.delete" + ] example: route.add initiator_id: description: The ID of the initiator of the event. E.g., an ID of a user that triggered the event. @@ -2702,6 +2750,105 @@ components: - page_size - total_records - total_pages + ProxyAccessLog: + type: object + properties: + id: + type: string + description: "Unique identifier for the access log entry" + example: "ch8i4ug6lnn4g9hqv7m0" + service_id: + type: string + description: "ID of the service that handled the request" + example: "ch8i4ug6lnn4g9hqv7m0" + timestamp: + type: string + format: date-time + description: "Timestamp when the request was made" + example: "2024-01-31T15:30:00Z" + method: + type: string + description: "HTTP method of the request" + example: "GET" + host: + type: string + description: "Host header of the request" + example: "example.com" + path: + type: string + description: "Path of the request" + example: "/api/users" + duration_ms: + type: integer + description: "Duration of the request in milliseconds" + example: 150 + status_code: + type: integer + description: "HTTP status code returned" + example: 200 + source_ip: + type: string + description: "Source IP address of the request" + example: "192.168.1.100" + reason: + type: string + description: "Reason for the request result (e.g., authentication failure)" + example: "Authentication failed" + user_id: + type: string + description: "ID of the authenticated user, if applicable" + example: "user-123" + auth_method_used: + type: string + description: "Authentication method used (e.g., password, pin, oidc)" + example: "oidc" + country_code: + type: string + description: "Country code from geolocation" + example: "US" + city_name: + type: string + description: "City name from geolocation" + example: "San Francisco" + required: + - id + - service_id + - timestamp + - method + - host + - path + - duration_ms + - status_code + ProxyAccessLogsResponse: + type: object + properties: + data: + type: array + description: List of proxy access log entries + items: + $ref: "#/components/schemas/ProxyAccessLog" + page: + type: integer + description: Current page number + example: 1 + page_size: + type: integer + description: Number of items per page + example: 50 + total_records: + type: integer + description: Total number of log records available + example: 523 + total_pages: + type: integer + description: Total number of pages available + example: 11 + required: + - data + - page + - page_size + - total_records + - total_pages IdentityProviderType: type: string description: Type of identity provider @@ -2767,6 +2914,251 @@ components: - issuer - client_id - client_secret + Service: + type: object + properties: + id: + type: string + description: Service ID + name: + type: string + description: Service name + domain: + type: string + description: Domain for the service + proxy_cluster: + type: string + description: The proxy cluster handling this service (derived from domain) + example: "eu.proxy.netbird.io" + targets: + type: array + items: + $ref: '#/components/schemas/ServiceTarget' + description: List of target backends for this service + enabled: + type: boolean + description: Whether the service is enabled + pass_host_header: + type: boolean + description: When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address + rewrite_redirects: + type: boolean + description: When true, Location headers in backend responses are rewritten to replace the backend address with the public-facing domain + auth: + $ref: '#/components/schemas/ServiceAuthConfig' + meta: + $ref: '#/components/schemas/ServiceMeta' + required: + - id + - name + - domain + - targets + - enabled + - auth + - meta + ServiceMeta: + type: object + properties: + created_at: + type: string + format: date-time + description: Timestamp when the service was created + example: "2024-02-03T10:30:00Z" + certificate_issued_at: + type: string + format: date-time + description: Timestamp when the certificate was issued (empty if not yet issued) + example: "2024-02-03T10:35:00Z" + status: + type: string + enum: + - pending + - active + - tunnel_not_created + - certificate_pending + - certificate_failed + - error + description: Current status of the service + example: "active" + required: + - created_at + - status + ServiceRequest: + type: object + properties: + name: + type: string + description: Service name + domain: + type: string + description: Domain for the service + targets: + type: array + items: + $ref: '#/components/schemas/ServiceTarget' + description: List of target backends for this service + enabled: + type: boolean + description: Whether the service is enabled + default: true + pass_host_header: + type: boolean + description: When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address + rewrite_redirects: + type: boolean + description: When true, Location headers in backend responses are rewritten to replace the backend address with the public-facing domain + auth: + $ref: '#/components/schemas/ServiceAuthConfig' + required: + - name + - domain + - targets + - auth + - enabled + ServiceTarget: + type: object + properties: + target_id: + type: string + description: Target ID + target_type: + type: string + description: Target type (e.g., "peer", "resource") + enum: [peer, resource] + path: + type: string + description: URL path prefix for this target + protocol: + type: string + description: Protocol to use when connecting to the backend + enum: [http, https] + host: + type: string + description: Backend ip or domain for this target + port: + type: integer + description: Backend port for this target. Use 0 or omit to use the scheme default (80 for http, 443 for https). + enabled: + type: boolean + description: Whether this target is enabled + required: + - target_id + - target_type + - protocol + - port + - enabled + ServiceAuthConfig: + type: object + properties: + password_auth: + $ref: '#/components/schemas/PasswordAuthConfig' + pin_auth: + $ref: '#/components/schemas/PINAuthConfig' + bearer_auth: + $ref: '#/components/schemas/BearerAuthConfig' + link_auth: + $ref: '#/components/schemas/LinkAuthConfig' + PasswordAuthConfig: + type: object + properties: + enabled: + type: boolean + description: Whether password auth is enabled + password: + type: string + description: Auth password + required: + - enabled + - password + PINAuthConfig: + type: object + properties: + enabled: + type: boolean + description: Whether PIN auth is enabled + pin: + type: string + description: PIN value + required: + - enabled + - pin + BearerAuthConfig: + type: object + properties: + enabled: + type: boolean + description: Whether bearer auth is enabled + distribution_groups: + type: array + items: + type: string + description: List of group IDs that can use bearer auth + required: + - enabled + LinkAuthConfig: + type: object + properties: + enabled: + type: boolean + description: Whether link auth is enabled + required: + - enabled + ProxyCluster: + type: object + description: A proxy cluster represents a group of proxy nodes serving the same address + properties: + address: + type: string + description: Cluster address used for CNAME targets + example: "eu.proxy.netbird.io" + connected_proxies: + type: integer + description: Number of proxy nodes connected in this cluster + example: 3 + required: + - address + - connected_proxies + ReverseProxyDomainType: + type: string + description: Type of Reverse Proxy Domain + enum: + - free + - custom + example: free + ReverseProxyDomain: + type: object + properties: + id: + type: string + description: Domain ID + domain: + type: string + description: Domain name + validated: + type: boolean + description: Whether the domain has been validated + type: + $ref: '#/components/schemas/ReverseProxyDomainType' + target_cluster: + type: string + description: The proxy cluster this domain is validated against (only for custom domains) + required: + - id + - domain + - validated + - type + ReverseProxyDomainRequest: + type: object + properties: + domain: + type: string + description: Domain name + target_cluster: + type: string + description: The proxy cluster this domain should be validated against + required: + - domain + - target_cluster InstanceStatus: type: object description: Instance status information @@ -6996,6 +7388,106 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/events/proxy: + get: + summary: List all Reverse Proxy Access Logs + description: Returns a paginated list of all reverse proxy access log entries + tags: [ Events ] + parameters: + - in: query + name: page + schema: + type: integer + default: 1 + minimum: 1 + description: Page number for pagination (1-indexed) + - in: query + name: page_size + schema: + type: integer + default: 50 + minimum: 1 + maximum: 100 + description: Number of items per page (max 100) + - in: query + name: search + schema: + type: string + description: General search across request ID, host, path, source IP, user email, and user name + - in: query + name: source_ip + schema: + type: string + description: Filter by source IP address + - in: query + name: host + schema: + type: string + description: Filter by host header + - in: query + name: path + schema: + type: string + description: Filter by request path (supports partial matching) + - in: query + name: user_id + schema: + type: string + description: Filter by authenticated user ID + - in: query + name: user_email + schema: + type: string + description: Filter by user email (partial matching) + - in: query + name: user_name + schema: + type: string + description: Filter by user name (partial matching) + - in: query + name: method + schema: + type: string + enum: [GET, POST, PUT, PATCH, DELETE, HEAD, OPTIONS] + description: Filter by HTTP method + - in: query + name: status + schema: + type: string + enum: [success, failed] + description: Filter by status (success = 2xx/3xx, failed = 1xx/4xx/5xx) + - in: query + name: status_code + schema: + type: integer + minimum: 100 + maximum: 599 + description: Filter by HTTP status code + - in: query + name: start_date + schema: + type: string + format: date-time + description: Filter by timestamp >= start_date (RFC3339 format) + - in: query + name: end_date + schema: + type: string + format: date-time + description: Filter by timestamp <= end_date (RFC3339 format) + responses: + "200": + description: Paginated list of reverse proxy access logs + content: + application/json: + schema: + $ref: "#/components/schemas/ProxyAccessLogsResponse" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" /api/posture-checks: get: summary: List all Posture Checks @@ -9063,3 +9555,286 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorResponse' + /api/reverse-proxies/services: + get: + summary: List all Services + description: Returns a list of all reverse proxy services + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: A JSON Array of services + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Service' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + post: + summary: Create a Service + description: Creates a new reverse proxy service + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + requestBody: + description: New service request + content: + application/json: + schema: + $ref: '#/components/schemas/ServiceRequest' + responses: + '200': + description: Service created + content: + application/json: + schema: + $ref: '#/components/schemas/Service' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + /api/reverse-proxies/clusters: + get: + summary: List available proxy clusters + description: Returns a list of available proxy clusters with their connection status + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: A JSON Array of proxy clusters + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ProxyCluster' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + /api/reverse-proxies/services/{serviceId}: + get: + summary: Retrieve a Service + description: Get information about a specific reverse proxy service + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: serviceId + required: true + schema: + type: string + description: The unique identifier of a service + responses: + '200': + description: A service object + content: + application/json: + schema: + $ref: '#/components/schemas/Service' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + put: + summary: Update a Service + description: Update an existing service + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: serviceId + required: true + schema: + type: string + description: The unique identifier of a service + requestBody: + description: Service update request + content: + application/json: + schema: + $ref: '#/components/schemas/ServiceRequest' + responses: + '200': + description: Service updated + content: + application/json: + schema: + $ref: '#/components/schemas/Service' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + delete: + summary: Delete a Service + description: Delete an existing service + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: serviceId + required: true + schema: + type: string + description: The unique identifier of a service + responses: + '200': + description: Service deleted + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/reverse-proxies/domains: + get: + summary: Retrieve Service Domains + description: Get information about domains that can be used for service endpoints. + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: A JSON Array of ReverseProxyDomains + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ReverseProxyDomain' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + post: + summary: Create a Custom domain + description: Create a new Custom domain for use with service endpoints, this will trigger an initial validation check + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + requestBody: + description: Custom domain creation request + content: + application/json: + schema: + $ref: '#/components/schemas/ReverseProxyDomainRequest' + responses: + '200': + description: Service created + content: + application/json: + schema: + $ref: '#/components/schemas/Service' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/reverse-proxies/domains/{domainId}: + delete: + summary: Delete a Custom domain + description: Delete an existing service custom domain + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: domainId + required: true + schema: + type: string + description: The custom domain ID + responses: + '204': + description: Service custom domain deleted + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/reverse-proxies/domains/{domainId}/validate: + get: + summary: Validate a custom domain + description: Trigger domain ownership validation for a custom domain + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: domainId + required: true + schema: + type: string + description: The custom domain ID + responses: + '202': + description: Reverse proxy custom domain validation triggered + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 3f16af46b..7a7e75855 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -114,6 +114,9 @@ const ( EventActivityCodeRuleAdd EventActivityCode = "rule.add" EventActivityCodeRuleDelete EventActivityCode = "rule.delete" EventActivityCodeRuleUpdate EventActivityCode = "rule.update" + EventActivityCodeServiceCreate EventActivityCode = "service.create" + EventActivityCodeServiceDelete EventActivityCode = "service.delete" + EventActivityCodeServiceUpdate EventActivityCode = "service.update" EventActivityCodeServiceUserCreate EventActivityCode = "service.user.create" EventActivityCodeServiceUserDelete EventActivityCode = "service.user.delete" EventActivityCodeSetupkeyAdd EventActivityCode = "setupkey.add" @@ -288,6 +291,12 @@ const ( ResourceTypeSubnet ResourceType = "subnet" ) +// Defines values for ReverseProxyDomainType. +const ( + ReverseProxyDomainTypeCustom ReverseProxyDomainType = "custom" + ReverseProxyDomainTypeFree ReverseProxyDomainType = "free" +) + // Defines values for SentinelOneMatchAttributesNetworkStatus. const ( SentinelOneMatchAttributesNetworkStatusConnected SentinelOneMatchAttributesNetworkStatus = "connected" @@ -295,6 +304,28 @@ const ( SentinelOneMatchAttributesNetworkStatusQuarantined SentinelOneMatchAttributesNetworkStatus = "quarantined" ) +// Defines values for ServiceMetaStatus. +const ( + ServiceMetaStatusActive ServiceMetaStatus = "active" + ServiceMetaStatusCertificateFailed ServiceMetaStatus = "certificate_failed" + ServiceMetaStatusCertificatePending ServiceMetaStatus = "certificate_pending" + ServiceMetaStatusError ServiceMetaStatus = "error" + ServiceMetaStatusPending ServiceMetaStatus = "pending" + ServiceMetaStatusTunnelNotCreated ServiceMetaStatus = "tunnel_not_created" +) + +// Defines values for ServiceTargetProtocol. +const ( + ServiceTargetProtocolHttp ServiceTargetProtocol = "http" + ServiceTargetProtocolHttps ServiceTargetProtocol = "https" +) + +// Defines values for ServiceTargetTargetType. +const ( + ServiceTargetTargetTypePeer ServiceTargetTargetType = "peer" + ServiceTargetTargetTypeResource ServiceTargetTargetType = "resource" +) + // Defines values for TenantResponseStatus. const ( TenantResponseStatusActive TenantResponseStatus = "active" @@ -336,6 +367,23 @@ const ( GetApiEventsNetworkTrafficParamsDirectionINGRESS GetApiEventsNetworkTrafficParamsDirection = "INGRESS" ) +// Defines values for GetApiEventsProxyParamsMethod. +const ( + GetApiEventsProxyParamsMethodDELETE GetApiEventsProxyParamsMethod = "DELETE" + GetApiEventsProxyParamsMethodGET GetApiEventsProxyParamsMethod = "GET" + GetApiEventsProxyParamsMethodHEAD GetApiEventsProxyParamsMethod = "HEAD" + GetApiEventsProxyParamsMethodOPTIONS GetApiEventsProxyParamsMethod = "OPTIONS" + GetApiEventsProxyParamsMethodPATCH GetApiEventsProxyParamsMethod = "PATCH" + GetApiEventsProxyParamsMethodPOST GetApiEventsProxyParamsMethod = "POST" + GetApiEventsProxyParamsMethodPUT GetApiEventsProxyParamsMethod = "PUT" +) + +// Defines values for GetApiEventsProxyParamsStatus. +const ( + GetApiEventsProxyParamsStatusFailed GetApiEventsProxyParamsStatus = "failed" + GetApiEventsProxyParamsStatusSuccess GetApiEventsProxyParamsStatus = "success" +) + // Defines values for PutApiIntegrationsMspTenantsIdInviteJSONBodyValue. const ( PutApiIntegrationsMspTenantsIdInviteJSONBodyValueAccept PutApiIntegrationsMspTenantsIdInviteJSONBodyValue = "accept" @@ -492,6 +540,15 @@ type AvailablePorts struct { Udp int `json:"udp"` } +// BearerAuthConfig defines model for BearerAuthConfig. +type BearerAuthConfig struct { + // DistributionGroups List of group IDs that can use bearer auth + DistributionGroups *[]string `json:"distribution_groups,omitempty"` + + // Enabled Whether bearer auth is enabled + Enabled bool `json:"enabled"` +} + // BundleParameters These parameters control what gets included in the bundle and how it is processed. type BundleParameters struct { // Anonymize Whether sensitive data should be anonymized in the bundle. @@ -1329,6 +1386,12 @@ type JobResponse struct { // JobResponseStatus defines model for JobResponse.Status. type JobResponseStatus string +// LinkAuthConfig defines model for LinkAuthConfig. +type LinkAuthConfig struct { + // Enabled Whether link auth is enabled + Enabled bool `json:"enabled"` +} + // Location Describe geographical location information type Location struct { // CityName Commonly used English name of the city @@ -1699,6 +1762,24 @@ type OSVersionCheck struct { Windows *MinKernelVersionCheck `json:"windows,omitempty"` } +// PINAuthConfig defines model for PINAuthConfig. +type PINAuthConfig struct { + // Enabled Whether PIN auth is enabled + Enabled bool `json:"enabled"` + + // Pin PIN value + Pin string `json:"pin"` +} + +// PasswordAuthConfig defines model for PasswordAuthConfig. +type PasswordAuthConfig struct { + // Enabled Whether password auth is enabled + Enabled bool `json:"enabled"` + + // Password Auth password + Password string `json:"password"` +} + // PasswordChangeRequest defines model for PasswordChangeRequest. type PasswordChangeRequest struct { // NewPassword The new password to set @@ -2301,6 +2382,78 @@ type Product struct { Prices []Price `json:"prices"` } +// ProxyAccessLog defines model for ProxyAccessLog. +type ProxyAccessLog struct { + // AuthMethodUsed Authentication method used (e.g., password, pin, oidc) + AuthMethodUsed *string `json:"auth_method_used,omitempty"` + + // CityName City name from geolocation + CityName *string `json:"city_name,omitempty"` + + // CountryCode Country code from geolocation + CountryCode *string `json:"country_code,omitempty"` + + // DurationMs Duration of the request in milliseconds + DurationMs int `json:"duration_ms"` + + // Host Host header of the request + Host string `json:"host"` + + // Id Unique identifier for the access log entry + Id string `json:"id"` + + // Method HTTP method of the request + Method string `json:"method"` + + // Path Path of the request + Path string `json:"path"` + + // Reason Reason for the request result (e.g., authentication failure) + Reason *string `json:"reason,omitempty"` + + // ServiceId ID of the service that handled the request + ServiceId string `json:"service_id"` + + // SourceIp Source IP address of the request + SourceIp *string `json:"source_ip,omitempty"` + + // StatusCode HTTP status code returned + StatusCode int `json:"status_code"` + + // Timestamp Timestamp when the request was made + Timestamp time.Time `json:"timestamp"` + + // UserId ID of the authenticated user, if applicable + UserId *string `json:"user_id,omitempty"` +} + +// ProxyAccessLogsResponse defines model for ProxyAccessLogsResponse. +type ProxyAccessLogsResponse struct { + // Data List of proxy access log entries + Data []ProxyAccessLog `json:"data"` + + // Page Current page number + Page int `json:"page"` + + // PageSize Number of items per page + PageSize int `json:"page_size"` + + // TotalPages Total number of pages available + TotalPages int `json:"total_pages"` + + // TotalRecords Total number of log records available + TotalRecords int `json:"total_records"` +} + +// ProxyCluster A proxy cluster represents a group of proxy nodes serving the same address +type ProxyCluster struct { + // Address Cluster address used for CNAME targets + Address string `json:"address"` + + // ConnectedProxies Number of proxy nodes connected in this cluster + ConnectedProxies int `json:"connected_proxies"` +} + // Resource defines model for Resource. type Resource struct { // Id ID of the resource @@ -2311,6 +2464,36 @@ type Resource struct { // ResourceType defines model for ResourceType. type ResourceType string +// ReverseProxyDomain defines model for ReverseProxyDomain. +type ReverseProxyDomain struct { + // Domain Domain name + Domain string `json:"domain"` + + // Id Domain ID + Id string `json:"id"` + + // TargetCluster The proxy cluster this domain is validated against (only for custom domains) + TargetCluster *string `json:"target_cluster,omitempty"` + + // Type Type of Reverse Proxy Domain + Type ReverseProxyDomainType `json:"type"` + + // Validated Whether the domain has been validated + Validated bool `json:"validated"` +} + +// ReverseProxyDomainRequest defines model for ReverseProxyDomainRequest. +type ReverseProxyDomainRequest struct { + // Domain Domain name + Domain string `json:"domain"` + + // TargetCluster The proxy cluster this domain should be validated against + TargetCluster string `json:"target_cluster"` +} + +// ReverseProxyDomainType Type of Reverse Proxy Domain +type ReverseProxyDomainType string + // Route defines model for Route. type Route struct { // AccessControlGroups Access control group identifier associated with route. @@ -2470,6 +2653,112 @@ type SentinelOneMatchAttributes struct { // SentinelOneMatchAttributesNetworkStatus The current network connectivity status of the device type SentinelOneMatchAttributesNetworkStatus string +// Service defines model for Service. +type Service struct { + Auth ServiceAuthConfig `json:"auth"` + + // Domain Domain for the service + Domain string `json:"domain"` + + // Enabled Whether the service is enabled + Enabled bool `json:"enabled"` + + // Id Service ID + Id string `json:"id"` + Meta ServiceMeta `json:"meta"` + + // Name Service name + Name string `json:"name"` + + // PassHostHeader When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address + PassHostHeader *bool `json:"pass_host_header,omitempty"` + + // ProxyCluster The proxy cluster handling this service (derived from domain) + ProxyCluster *string `json:"proxy_cluster,omitempty"` + + // RewriteRedirects When true, Location headers in backend responses are rewritten to replace the backend address with the public-facing domain + RewriteRedirects *bool `json:"rewrite_redirects,omitempty"` + + // Targets List of target backends for this service + Targets []ServiceTarget `json:"targets"` +} + +// ServiceAuthConfig defines model for ServiceAuthConfig. +type ServiceAuthConfig struct { + BearerAuth *BearerAuthConfig `json:"bearer_auth,omitempty"` + LinkAuth *LinkAuthConfig `json:"link_auth,omitempty"` + PasswordAuth *PasswordAuthConfig `json:"password_auth,omitempty"` + PinAuth *PINAuthConfig `json:"pin_auth,omitempty"` +} + +// ServiceMeta defines model for ServiceMeta. +type ServiceMeta struct { + // CertificateIssuedAt Timestamp when the certificate was issued (empty if not yet issued) + CertificateIssuedAt *time.Time `json:"certificate_issued_at,omitempty"` + + // CreatedAt Timestamp when the service was created + CreatedAt time.Time `json:"created_at"` + + // Status Current status of the service + Status ServiceMetaStatus `json:"status"` +} + +// ServiceMetaStatus Current status of the service +type ServiceMetaStatus string + +// ServiceRequest defines model for ServiceRequest. +type ServiceRequest struct { + Auth ServiceAuthConfig `json:"auth"` + + // Domain Domain for the service + Domain string `json:"domain"` + + // Enabled Whether the service is enabled + Enabled bool `json:"enabled"` + + // Name Service name + Name string `json:"name"` + + // PassHostHeader When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address + PassHostHeader *bool `json:"pass_host_header,omitempty"` + + // RewriteRedirects When true, Location headers in backend responses are rewritten to replace the backend address with the public-facing domain + RewriteRedirects *bool `json:"rewrite_redirects,omitempty"` + + // Targets List of target backends for this service + Targets []ServiceTarget `json:"targets"` +} + +// ServiceTarget defines model for ServiceTarget. +type ServiceTarget struct { + // Enabled Whether this target is enabled + Enabled bool `json:"enabled"` + + // Host Backend ip or domain for this target + Host *string `json:"host,omitempty"` + + // Path URL path prefix for this target + Path *string `json:"path,omitempty"` + + // Port Backend port for this target. Use 0 or omit to use the scheme default (80 for http, 443 for https). + Port int `json:"port"` + + // Protocol Protocol to use when connecting to the backend + Protocol ServiceTargetProtocol `json:"protocol"` + + // TargetId Target ID + TargetId string `json:"target_id"` + + // TargetType Target type (e.g., "peer", "resource") + TargetType ServiceTargetTargetType `json:"target_type"` +} + +// ServiceTargetProtocol Protocol to use when connecting to the backend +type ServiceTargetProtocol string + +// ServiceTargetTargetType Target type (e.g., "peer", "resource") +type ServiceTargetTargetType string + // SetupKey defines model for SetupKey. type SetupKey struct { // AllowExtraDnsLabels Allow extra DNS labels to be added to the peer @@ -3032,6 +3321,57 @@ type GetApiEventsNetworkTrafficParamsConnectionType string // GetApiEventsNetworkTrafficParamsDirection defines parameters for GetApiEventsNetworkTraffic. type GetApiEventsNetworkTrafficParamsDirection string +// GetApiEventsProxyParams defines parameters for GetApiEventsProxy. +type GetApiEventsProxyParams struct { + // Page Page number for pagination (1-indexed) + Page *int `form:"page,omitempty" json:"page,omitempty"` + + // PageSize Number of items per page (max 100) + PageSize *int `form:"page_size,omitempty" json:"page_size,omitempty"` + + // Search General search across request ID, host, path, source IP, user email, and user name + Search *string `form:"search,omitempty" json:"search,omitempty"` + + // SourceIp Filter by source IP address + SourceIp *string `form:"source_ip,omitempty" json:"source_ip,omitempty"` + + // Host Filter by host header + Host *string `form:"host,omitempty" json:"host,omitempty"` + + // Path Filter by request path (supports partial matching) + Path *string `form:"path,omitempty" json:"path,omitempty"` + + // UserId Filter by authenticated user ID + UserId *string `form:"user_id,omitempty" json:"user_id,omitempty"` + + // UserEmail Filter by user email (partial matching) + UserEmail *string `form:"user_email,omitempty" json:"user_email,omitempty"` + + // UserName Filter by user name (partial matching) + UserName *string `form:"user_name,omitempty" json:"user_name,omitempty"` + + // Method Filter by HTTP method + Method *GetApiEventsProxyParamsMethod `form:"method,omitempty" json:"method,omitempty"` + + // Status Filter by status (success = 2xx/3xx, failed = 1xx/4xx/5xx) + Status *GetApiEventsProxyParamsStatus `form:"status,omitempty" json:"status,omitempty"` + + // StatusCode Filter by HTTP status code + StatusCode *int `form:"status_code,omitempty" json:"status_code,omitempty"` + + // StartDate Filter by timestamp >= start_date (RFC3339 format) + StartDate *time.Time `form:"start_date,omitempty" json:"start_date,omitempty"` + + // EndDate Filter by timestamp <= end_date (RFC3339 format) + EndDate *time.Time `form:"end_date,omitempty" json:"end_date,omitempty"` +} + +// GetApiEventsProxyParamsMethod defines parameters for GetApiEventsProxy. +type GetApiEventsProxyParamsMethod string + +// GetApiEventsProxyParamsStatus defines parameters for GetApiEventsProxy. +type GetApiEventsProxyParamsStatus string + // GetApiGroupsParams defines parameters for GetApiGroups. type GetApiGroupsParams struct { // Name Filter groups by name (exact match) @@ -3269,6 +3609,15 @@ type PostApiPostureChecksJSONRequestBody = PostureCheckUpdate // PutApiPostureChecksPostureCheckIdJSONRequestBody defines body for PutApiPostureChecksPostureCheckId for application/json ContentType. type PutApiPostureChecksPostureCheckIdJSONRequestBody = PostureCheckUpdate +// PostApiReverseProxiesDomainsJSONRequestBody defines body for PostApiReverseProxiesDomains for application/json ContentType. +type PostApiReverseProxiesDomainsJSONRequestBody = ReverseProxyDomainRequest + +// PostApiReverseProxiesServicesJSONRequestBody defines body for PostApiReverseProxiesServices for application/json ContentType. +type PostApiReverseProxiesServicesJSONRequestBody = ServiceRequest + +// PutApiReverseProxiesServicesServiceIdJSONRequestBody defines body for PutApiReverseProxiesServicesServiceId for application/json ContentType. +type PutApiReverseProxiesServicesServiceIdJSONRequestBody = ServiceRequest + // PostApiRoutesJSONRequestBody defines body for PostApiRoutes for application/json ContentType. type PostApiRoutesJSONRequestBody = RouteRequest diff --git a/shared/management/proto/generate.sh b/shared/management/proto/generate.sh index 207630ae7..7cb0f75a5 100755 --- a/shared/management/proto/generate.sh +++ b/shared/management/proto/generate.sh @@ -14,4 +14,5 @@ cd "$script_path" go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26 go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1 protoc -I ./ ./management.proto --go_out=../ --go-grpc_out=../ +protoc -I ./ ./proxy_service.proto --go_out=../ --go-grpc_out=../ cd "$old_pwd" diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index dfa9adaf6..44838fc16 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.1 +// protoc v6.33.0 // source: management.proto package proto diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go new file mode 100644 index 000000000..13fcb159e --- /dev/null +++ b/shared/management/proto/proxy_service.pb.go @@ -0,0 +1,2061 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v6.33.0 +// source: proxy_service.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ProxyMappingUpdateType int32 + +const ( + ProxyMappingUpdateType_UPDATE_TYPE_CREATED ProxyMappingUpdateType = 0 + ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED ProxyMappingUpdateType = 1 + ProxyMappingUpdateType_UPDATE_TYPE_REMOVED ProxyMappingUpdateType = 2 +) + +// Enum value maps for ProxyMappingUpdateType. +var ( + ProxyMappingUpdateType_name = map[int32]string{ + 0: "UPDATE_TYPE_CREATED", + 1: "UPDATE_TYPE_MODIFIED", + 2: "UPDATE_TYPE_REMOVED", + } + ProxyMappingUpdateType_value = map[string]int32{ + "UPDATE_TYPE_CREATED": 0, + "UPDATE_TYPE_MODIFIED": 1, + "UPDATE_TYPE_REMOVED": 2, + } +) + +func (x ProxyMappingUpdateType) Enum() *ProxyMappingUpdateType { + p := new(ProxyMappingUpdateType) + *p = x + return p +} + +func (x ProxyMappingUpdateType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProxyMappingUpdateType) Descriptor() protoreflect.EnumDescriptor { + return file_proxy_service_proto_enumTypes[0].Descriptor() +} + +func (ProxyMappingUpdateType) Type() protoreflect.EnumType { + return &file_proxy_service_proto_enumTypes[0] +} + +func (x ProxyMappingUpdateType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProxyMappingUpdateType.Descriptor instead. +func (ProxyMappingUpdateType) EnumDescriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{0} +} + +type ProxyStatus int32 + +const ( + ProxyStatus_PROXY_STATUS_PENDING ProxyStatus = 0 + ProxyStatus_PROXY_STATUS_ACTIVE ProxyStatus = 1 + ProxyStatus_PROXY_STATUS_TUNNEL_NOT_CREATED ProxyStatus = 2 + ProxyStatus_PROXY_STATUS_CERTIFICATE_PENDING ProxyStatus = 3 + ProxyStatus_PROXY_STATUS_CERTIFICATE_FAILED ProxyStatus = 4 + ProxyStatus_PROXY_STATUS_ERROR ProxyStatus = 5 +) + +// Enum value maps for ProxyStatus. +var ( + ProxyStatus_name = map[int32]string{ + 0: "PROXY_STATUS_PENDING", + 1: "PROXY_STATUS_ACTIVE", + 2: "PROXY_STATUS_TUNNEL_NOT_CREATED", + 3: "PROXY_STATUS_CERTIFICATE_PENDING", + 4: "PROXY_STATUS_CERTIFICATE_FAILED", + 5: "PROXY_STATUS_ERROR", + } + ProxyStatus_value = map[string]int32{ + "PROXY_STATUS_PENDING": 0, + "PROXY_STATUS_ACTIVE": 1, + "PROXY_STATUS_TUNNEL_NOT_CREATED": 2, + "PROXY_STATUS_CERTIFICATE_PENDING": 3, + "PROXY_STATUS_CERTIFICATE_FAILED": 4, + "PROXY_STATUS_ERROR": 5, + } +) + +func (x ProxyStatus) Enum() *ProxyStatus { + p := new(ProxyStatus) + *p = x + return p +} + +func (x ProxyStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProxyStatus) Descriptor() protoreflect.EnumDescriptor { + return file_proxy_service_proto_enumTypes[1].Descriptor() +} + +func (ProxyStatus) Type() protoreflect.EnumType { + return &file_proxy_service_proto_enumTypes[1] +} + +func (x ProxyStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProxyStatus.Descriptor instead. +func (ProxyStatus) EnumDescriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{1} +} + +// GetMappingUpdateRequest is sent to initialise a mapping stream. +type GetMappingUpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *GetMappingUpdateRequest) Reset() { + *x = GetMappingUpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMappingUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMappingUpdateRequest) ProtoMessage() {} + +func (x *GetMappingUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMappingUpdateRequest.ProtoReflect.Descriptor instead. +func (*GetMappingUpdateRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{0} +} + +func (x *GetMappingUpdateRequest) GetProxyId() string { + if x != nil { + return x.ProxyId + } + return "" +} + +func (x *GetMappingUpdateRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *GetMappingUpdateRequest) GetStartedAt() *timestamppb.Timestamp { + if x != nil { + return x.StartedAt + } + return nil +} + +func (x *GetMappingUpdateRequest) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +// GetMappingUpdateResponse contains zero or more ProxyMappings. +// No mappings may be sent to test the liveness of the Proxy. +// Mappings that are sent should be interpreted by the Proxy appropriately. +type GetMappingUpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mapping []*ProxyMapping `protobuf:"bytes,1,rep,name=mapping,proto3" json:"mapping,omitempty"` + // initial_sync_complete is set on the last message of the initial snapshot. + // The proxy uses this to signal that startup is complete. + InitialSyncComplete bool `protobuf:"varint,2,opt,name=initial_sync_complete,json=initialSyncComplete,proto3" json:"initial_sync_complete,omitempty"` +} + +func (x *GetMappingUpdateResponse) Reset() { + *x = GetMappingUpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMappingUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMappingUpdateResponse) ProtoMessage() {} + +func (x *GetMappingUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMappingUpdateResponse.ProtoReflect.Descriptor instead. +func (*GetMappingUpdateResponse) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetMappingUpdateResponse) GetMapping() []*ProxyMapping { + if x != nil { + return x.Mapping + } + return nil +} + +func (x *GetMappingUpdateResponse) GetInitialSyncComplete() bool { + if x != nil { + return x.InitialSyncComplete + } + return false +} + +type PathMapping struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` +} + +func (x *PathMapping) Reset() { + *x = PathMapping{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathMapping) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathMapping) ProtoMessage() {} + +func (x *PathMapping) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathMapping.ProtoReflect.Descriptor instead. +func (*PathMapping) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{2} +} + +func (x *PathMapping) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *PathMapping) GetTarget() string { + if x != nil { + return x.Target + } + return "" +} + +type Authentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SessionKey string `protobuf:"bytes,1,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` + MaxSessionAgeSeconds int64 `protobuf:"varint,2,opt,name=max_session_age_seconds,json=maxSessionAgeSeconds,proto3" json:"max_session_age_seconds,omitempty"` + Password bool `protobuf:"varint,3,opt,name=password,proto3" json:"password,omitempty"` + Pin bool `protobuf:"varint,4,opt,name=pin,proto3" json:"pin,omitempty"` + Oidc bool `protobuf:"varint,5,opt,name=oidc,proto3" json:"oidc,omitempty"` +} + +func (x *Authentication) Reset() { + *x = Authentication{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Authentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Authentication) ProtoMessage() {} + +func (x *Authentication) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Authentication.ProtoReflect.Descriptor instead. +func (*Authentication) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{3} +} + +func (x *Authentication) GetSessionKey() string { + if x != nil { + return x.SessionKey + } + return "" +} + +func (x *Authentication) GetMaxSessionAgeSeconds() int64 { + if x != nil { + return x.MaxSessionAgeSeconds + } + return 0 +} + +func (x *Authentication) GetPassword() bool { + if x != nil { + return x.Password + } + return false +} + +func (x *Authentication) GetPin() bool { + if x != nil { + return x.Pin + } + return false +} + +func (x *Authentication) GetOidc() bool { + if x != nil { + return x.Oidc + } + return false +} + +type ProxyMapping struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type ProxyMappingUpdateType `protobuf:"varint,1,opt,name=type,proto3,enum=management.ProxyMappingUpdateType" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Domain string `protobuf:"bytes,4,opt,name=domain,proto3" json:"domain,omitempty"` + Path []*PathMapping `protobuf:"bytes,5,rep,name=path,proto3" json:"path,omitempty"` + AuthToken string `protobuf:"bytes,6,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"` + Auth *Authentication `protobuf:"bytes,7,opt,name=auth,proto3" json:"auth,omitempty"` + // When true, the original Host header from the client request is passed + // through to the backend instead of being rewritten to the backend's address. + PassHostHeader bool `protobuf:"varint,8,opt,name=pass_host_header,json=passHostHeader,proto3" json:"pass_host_header,omitempty"` + // When true, Location headers in backend responses are rewritten to replace + // the backend address with the public-facing domain. + RewriteRedirects bool `protobuf:"varint,9,opt,name=rewrite_redirects,json=rewriteRedirects,proto3" json:"rewrite_redirects,omitempty"` +} + +func (x *ProxyMapping) Reset() { + *x = ProxyMapping{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyMapping) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyMapping) ProtoMessage() {} + +func (x *ProxyMapping) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyMapping.ProtoReflect.Descriptor instead. +func (*ProxyMapping) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ProxyMapping) GetType() ProxyMappingUpdateType { + if x != nil { + return x.Type + } + return ProxyMappingUpdateType_UPDATE_TYPE_CREATED +} + +func (x *ProxyMapping) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ProxyMapping) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *ProxyMapping) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ProxyMapping) GetPath() []*PathMapping { + if x != nil { + return x.Path + } + return nil +} + +func (x *ProxyMapping) GetAuthToken() string { + if x != nil { + return x.AuthToken + } + return "" +} + +func (x *ProxyMapping) GetAuth() *Authentication { + if x != nil { + return x.Auth + } + return nil +} + +func (x *ProxyMapping) GetPassHostHeader() bool { + if x != nil { + return x.PassHostHeader + } + return false +} + +func (x *ProxyMapping) GetRewriteRedirects() bool { + if x != nil { + return x.RewriteRedirects + } + return false +} + +// SendAccessLogRequest consists of one or more AccessLogs from a Proxy. +type SendAccessLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Log *AccessLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` +} + +func (x *SendAccessLogRequest) Reset() { + *x = SendAccessLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendAccessLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendAccessLogRequest) ProtoMessage() {} + +func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendAccessLogRequest.ProtoReflect.Descriptor instead. +func (*SendAccessLogRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{5} +} + +func (x *SendAccessLogRequest) GetLog() *AccessLog { + if x != nil { + return x.Log + } + return nil +} + +// SendAccessLogResponse is intentionally empty to allow for future expansion. +type SendAccessLogResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SendAccessLogResponse) Reset() { + *x = SendAccessLogResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendAccessLogResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendAccessLogResponse) ProtoMessage() {} + +func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendAccessLogResponse.ProtoReflect.Descriptor instead. +func (*SendAccessLogResponse) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{6} +} + +type AccessLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + LogId string `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + ServiceId string `protobuf:"bytes,4,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"` + Path string `protobuf:"bytes,6,opt,name=path,proto3" json:"path,omitempty"` + DurationMs int64 `protobuf:"varint,7,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` + Method string `protobuf:"bytes,8,opt,name=method,proto3" json:"method,omitempty"` + ResponseCode int32 `protobuf:"varint,9,opt,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + SourceIp string `protobuf:"bytes,10,opt,name=source_ip,json=sourceIp,proto3" json:"source_ip,omitempty"` + AuthMechanism string `protobuf:"bytes,11,opt,name=auth_mechanism,json=authMechanism,proto3" json:"auth_mechanism,omitempty"` + UserId string `protobuf:"bytes,12,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + AuthSuccess bool `protobuf:"varint,13,opt,name=auth_success,json=authSuccess,proto3" json:"auth_success,omitempty"` +} + +func (x *AccessLog) Reset() { + *x = AccessLog{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLog) ProtoMessage() {} + +func (x *AccessLog) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLog.ProtoReflect.Descriptor instead. +func (*AccessLog) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{7} +} + +func (x *AccessLog) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *AccessLog) GetLogId() string { + if x != nil { + return x.LogId + } + return "" +} + +func (x *AccessLog) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *AccessLog) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *AccessLog) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *AccessLog) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *AccessLog) GetDurationMs() int64 { + if x != nil { + return x.DurationMs + } + return 0 +} + +func (x *AccessLog) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *AccessLog) GetResponseCode() int32 { + if x != nil { + return x.ResponseCode + } + return 0 +} + +func (x *AccessLog) GetSourceIp() string { + if x != nil { + return x.SourceIp + } + return "" +} + +func (x *AccessLog) GetAuthMechanism() string { + if x != nil { + return x.AuthMechanism + } + return "" +} + +func (x *AccessLog) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *AccessLog) GetAuthSuccess() bool { + if x != nil { + return x.AuthSuccess + } + return false +} + +type AuthenticateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + // Types that are assignable to Request: + // + // *AuthenticateRequest_Password + // *AuthenticateRequest_Pin + Request isAuthenticateRequest_Request `protobuf_oneof:"request"` +} + +func (x *AuthenticateRequest) Reset() { + *x = AuthenticateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticateRequest) ProtoMessage() {} + +func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticateRequest.ProtoReflect.Descriptor instead. +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{8} +} + +func (x *AuthenticateRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *AuthenticateRequest) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (m *AuthenticateRequest) GetRequest() isAuthenticateRequest_Request { + if m != nil { + return m.Request + } + return nil +} + +func (x *AuthenticateRequest) GetPassword() *PasswordRequest { + if x, ok := x.GetRequest().(*AuthenticateRequest_Password); ok { + return x.Password + } + return nil +} + +func (x *AuthenticateRequest) GetPin() *PinRequest { + if x, ok := x.GetRequest().(*AuthenticateRequest_Pin); ok { + return x.Pin + } + return nil +} + +type isAuthenticateRequest_Request interface { + isAuthenticateRequest_Request() +} + +type AuthenticateRequest_Password struct { + Password *PasswordRequest `protobuf:"bytes,3,opt,name=password,proto3,oneof"` +} + +type AuthenticateRequest_Pin struct { + Pin *PinRequest `protobuf:"bytes,4,opt,name=pin,proto3,oneof"` +} + +func (*AuthenticateRequest_Password) isAuthenticateRequest_Request() {} + +func (*AuthenticateRequest_Pin) isAuthenticateRequest_Request() {} + +type PasswordRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *PasswordRequest) Reset() { + *x = PasswordRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PasswordRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PasswordRequest) ProtoMessage() {} + +func (x *PasswordRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PasswordRequest.ProtoReflect.Descriptor instead. +func (*PasswordRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{9} +} + +func (x *PasswordRequest) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +type PinRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pin string `protobuf:"bytes,1,opt,name=pin,proto3" json:"pin,omitempty"` +} + +func (x *PinRequest) Reset() { + *x = PinRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PinRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PinRequest) ProtoMessage() {} + +func (x *PinRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PinRequest.ProtoReflect.Descriptor instead. +func (*PinRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{10} +} + +func (x *PinRequest) GetPin() string { + if x != nil { + return x.Pin + } + return "" +} + +type AuthenticateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` +} + +func (x *AuthenticateResponse) Reset() { + *x = AuthenticateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticateResponse) ProtoMessage() {} + +func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticateResponse.ProtoReflect.Descriptor instead. +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{11} +} + +func (x *AuthenticateResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *AuthenticateResponse) GetSessionToken() string { + if x != nil { + return x.SessionToken + } + return "" +} + +// SendStatusUpdateRequest is sent by the proxy to update its status +type SendStatusUpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Status ProxyStatus `protobuf:"varint,3,opt,name=status,proto3,enum=management.ProxyStatus" json:"status,omitempty"` + CertificateIssued bool `protobuf:"varint,4,opt,name=certificate_issued,json=certificateIssued,proto3" json:"certificate_issued,omitempty"` + ErrorMessage *string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` +} + +func (x *SendStatusUpdateRequest) Reset() { + *x = SendStatusUpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendStatusUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendStatusUpdateRequest) ProtoMessage() {} + +func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendStatusUpdateRequest.ProtoReflect.Descriptor instead. +func (*SendStatusUpdateRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{12} +} + +func (x *SendStatusUpdateRequest) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *SendStatusUpdateRequest) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *SendStatusUpdateRequest) GetStatus() ProxyStatus { + if x != nil { + return x.Status + } + return ProxyStatus_PROXY_STATUS_PENDING +} + +func (x *SendStatusUpdateRequest) GetCertificateIssued() bool { + if x != nil { + return x.CertificateIssued + } + return false +} + +func (x *SendStatusUpdateRequest) GetErrorMessage() string { + if x != nil && x.ErrorMessage != nil { + return *x.ErrorMessage + } + return "" +} + +// SendStatusUpdateResponse is intentionally empty to allow for future expansion +type SendStatusUpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SendStatusUpdateResponse) Reset() { + *x = SendStatusUpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendStatusUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendStatusUpdateResponse) ProtoMessage() {} + +func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendStatusUpdateResponse.ProtoReflect.Descriptor instead. +func (*SendStatusUpdateResponse) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{13} +} + +// CreateProxyPeerRequest is sent by the proxy to create a peer connection +// The token is a one-time authentication token sent via ProxyMapping +type CreateProxyPeerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + WireguardPublicKey string `protobuf:"bytes,4,opt,name=wireguard_public_key,json=wireguardPublicKey,proto3" json:"wireguard_public_key,omitempty"` + Cluster string `protobuf:"bytes,5,opt,name=cluster,proto3" json:"cluster,omitempty"` +} + +func (x *CreateProxyPeerRequest) Reset() { + *x = CreateProxyPeerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateProxyPeerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateProxyPeerRequest) ProtoMessage() {} + +func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateProxyPeerRequest.ProtoReflect.Descriptor instead. +func (*CreateProxyPeerRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{14} +} + +func (x *CreateProxyPeerRequest) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *CreateProxyPeerRequest) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *CreateProxyPeerRequest) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *CreateProxyPeerRequest) GetWireguardPublicKey() string { + if x != nil { + return x.WireguardPublicKey + } + return "" +} + +func (x *CreateProxyPeerRequest) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +// CreateProxyPeerResponse contains the result of peer creation +type CreateProxyPeerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` +} + +func (x *CreateProxyPeerResponse) Reset() { + *x = CreateProxyPeerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateProxyPeerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateProxyPeerResponse) ProtoMessage() {} + +func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateProxyPeerResponse.ProtoReflect.Descriptor instead. +func (*CreateProxyPeerResponse) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{15} +} + +func (x *CreateProxyPeerResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *CreateProxyPeerResponse) GetErrorMessage() string { + if x != nil && x.ErrorMessage != nil { + return *x.ErrorMessage + } + return "" +} + +type GetOIDCURLRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + RedirectUrl string `protobuf:"bytes,3,opt,name=redirect_url,json=redirectUrl,proto3" json:"redirect_url,omitempty"` +} + +func (x *GetOIDCURLRequest) Reset() { + *x = GetOIDCURLRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOIDCURLRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOIDCURLRequest) ProtoMessage() {} + +func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOIDCURLRequest.ProtoReflect.Descriptor instead. +func (*GetOIDCURLRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{16} +} + +func (x *GetOIDCURLRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *GetOIDCURLRequest) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *GetOIDCURLRequest) GetRedirectUrl() string { + if x != nil { + return x.RedirectUrl + } + return "" +} + +type GetOIDCURLResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *GetOIDCURLResponse) Reset() { + *x = GetOIDCURLResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOIDCURLResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOIDCURLResponse) ProtoMessage() {} + +func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOIDCURLResponse.ProtoReflect.Descriptor instead. +func (*GetOIDCURLResponse) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{17} +} + +func (x *GetOIDCURLResponse) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +type ValidateSessionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` +} + +func (x *ValidateSessionRequest) Reset() { + *x = ValidateSessionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateSessionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateSessionRequest) ProtoMessage() {} + +func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateSessionRequest.ProtoReflect.Descriptor instead. +func (*ValidateSessionRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{18} +} + +func (x *ValidateSessionRequest) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ValidateSessionRequest) GetSessionToken() string { + if x != nil { + return x.SessionToken + } + return "" +} + +type ValidateSessionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail,proto3" json:"user_email,omitempty"` + DeniedReason string `protobuf:"bytes,4,opt,name=denied_reason,json=deniedReason,proto3" json:"denied_reason,omitempty"` +} + +func (x *ValidateSessionResponse) Reset() { + *x = ValidateSessionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateSessionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateSessionResponse) ProtoMessage() {} + +func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateSessionResponse.ProtoReflect.Descriptor instead. +func (*ValidateSessionResponse) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{19} +} + +func (x *ValidateSessionResponse) GetValid() bool { + if x != nil { + return x.Valid + } + return false +} + +func (x *ValidateSessionResponse) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *ValidateSessionResponse) GetUserEmail() string { + if x != nil { + return x.UserEmail + } + return "" +} + +func (x *ValidateSessionResponse) GetDeniedReason() string { + if x != nil { + return x.DeniedReason + } + return "" +} + +var File_proxy_service_proto protoreflect.FileDescriptor + +var file_proxy_service_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xa3, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, + 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, + 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x39, 0x0a, + 0x0b, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, + 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, + 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x04, 0x6f, 0x69, 0x64, 0x63, 0x22, 0xe0, 0x02, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, + 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, + 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, + 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, + 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, + 0x70, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2d, + 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, + 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, + 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, + 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, + 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, + 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, + 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, + 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, + 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x02, 0x2a, 0xc8, 0x01, + 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, + 0x14, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, + 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, + 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, + 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, + 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, + 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, + 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, + 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, + 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, + 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, + 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, + 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proxy_service_proto_rawDescOnce sync.Once + file_proxy_service_proto_rawDescData = file_proxy_service_proto_rawDesc +) + +func file_proxy_service_proto_rawDescGZIP() []byte { + file_proxy_service_proto_rawDescOnce.Do(func() { + file_proxy_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_proxy_service_proto_rawDescData) + }) + return file_proxy_service_proto_rawDescData +} + +var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_proxy_service_proto_goTypes = []interface{}{ + (ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType + (ProxyStatus)(0), // 1: management.ProxyStatus + (*GetMappingUpdateRequest)(nil), // 2: management.GetMappingUpdateRequest + (*GetMappingUpdateResponse)(nil), // 3: management.GetMappingUpdateResponse + (*PathMapping)(nil), // 4: management.PathMapping + (*Authentication)(nil), // 5: management.Authentication + (*ProxyMapping)(nil), // 6: management.ProxyMapping + (*SendAccessLogRequest)(nil), // 7: management.SendAccessLogRequest + (*SendAccessLogResponse)(nil), // 8: management.SendAccessLogResponse + (*AccessLog)(nil), // 9: management.AccessLog + (*AuthenticateRequest)(nil), // 10: management.AuthenticateRequest + (*PasswordRequest)(nil), // 11: management.PasswordRequest + (*PinRequest)(nil), // 12: management.PinRequest + (*AuthenticateResponse)(nil), // 13: management.AuthenticateResponse + (*SendStatusUpdateRequest)(nil), // 14: management.SendStatusUpdateRequest + (*SendStatusUpdateResponse)(nil), // 15: management.SendStatusUpdateResponse + (*CreateProxyPeerRequest)(nil), // 16: management.CreateProxyPeerRequest + (*CreateProxyPeerResponse)(nil), // 17: management.CreateProxyPeerResponse + (*GetOIDCURLRequest)(nil), // 18: management.GetOIDCURLRequest + (*GetOIDCURLResponse)(nil), // 19: management.GetOIDCURLResponse + (*ValidateSessionRequest)(nil), // 20: management.ValidateSessionRequest + (*ValidateSessionResponse)(nil), // 21: management.ValidateSessionResponse + (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp +} +var file_proxy_service_proto_depIdxs = []int32{ + 22, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp + 6, // 1: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping + 0, // 2: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType + 4, // 3: management.ProxyMapping.path:type_name -> management.PathMapping + 5, // 4: management.ProxyMapping.auth:type_name -> management.Authentication + 9, // 5: management.SendAccessLogRequest.log:type_name -> management.AccessLog + 22, // 6: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp + 11, // 7: management.AuthenticateRequest.password:type_name -> management.PasswordRequest + 12, // 8: management.AuthenticateRequest.pin:type_name -> management.PinRequest + 1, // 9: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus + 2, // 10: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest + 7, // 11: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest + 10, // 12: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest + 14, // 13: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest + 16, // 14: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest + 18, // 15: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest + 20, // 16: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest + 3, // 17: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse + 8, // 18: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse + 13, // 19: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse + 15, // 20: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse + 17, // 21: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse + 19, // 22: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse + 21, // 23: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse + 17, // [17:24] is the sub-list for method output_type + 10, // [10:17] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_proxy_service_proto_init() } +func file_proxy_service_proto_init() { + if File_proxy_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proxy_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMappingUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMappingUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathMapping); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Authentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyMapping); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendAccessLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendAccessLogResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PasswordRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PinRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendStatusUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendStatusUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateProxyPeerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateProxyPeerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOIDCURLRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOIDCURLResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proxy_service_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*AuthenticateRequest_Password)(nil), + (*AuthenticateRequest_Pin)(nil), + } + file_proxy_service_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[15].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proxy_service_proto_rawDesc, + NumEnums: 2, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proxy_service_proto_goTypes, + DependencyIndexes: file_proxy_service_proto_depIdxs, + EnumInfos: file_proxy_service_proto_enumTypes, + MessageInfos: file_proxy_service_proto_msgTypes, + }.Build() + File_proxy_service_proto = out.File + file_proxy_service_proto_rawDesc = nil + file_proxy_service_proto_goTypes = nil + file_proxy_service_proto_depIdxs = nil +} diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto new file mode 100644 index 000000000..b4e62a52a --- /dev/null +++ b/shared/management/proto/proxy_service.proto @@ -0,0 +1,185 @@ +syntax = "proto3"; + +package management; + +option go_package = "/proto"; + +import "google/protobuf/timestamp.proto"; + +// ProxyService - Management is the SERVER, Proxy is the CLIENT +// Proxy initiates connection to management +service ProxyService { + rpc GetMappingUpdate(GetMappingUpdateRequest) returns (stream GetMappingUpdateResponse); + + rpc SendAccessLog(SendAccessLogRequest) returns (SendAccessLogResponse); + + rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse); + + rpc SendStatusUpdate(SendStatusUpdateRequest) returns (SendStatusUpdateResponse); + + rpc CreateProxyPeer(CreateProxyPeerRequest) returns (CreateProxyPeerResponse); + + rpc GetOIDCURL(GetOIDCURLRequest) returns (GetOIDCURLResponse); + + // ValidateSession validates a session token and checks user access permissions. + // Called by the proxy after receiving a session token from OIDC callback. + rpc ValidateSession(ValidateSessionRequest) returns (ValidateSessionResponse); +} + +// GetMappingUpdateRequest is sent to initialise a mapping stream. +message GetMappingUpdateRequest { + string proxy_id = 1; + string version = 2; + google.protobuf.Timestamp started_at = 3; + string address = 4; +} + +// GetMappingUpdateResponse contains zero or more ProxyMappings. +// No mappings may be sent to test the liveness of the Proxy. +// Mappings that are sent should be interpreted by the Proxy appropriately. +message GetMappingUpdateResponse { + repeated ProxyMapping mapping = 1; + // initial_sync_complete is set on the last message of the initial snapshot. + // The proxy uses this to signal that startup is complete. + bool initial_sync_complete = 2; +} + +enum ProxyMappingUpdateType { + UPDATE_TYPE_CREATED = 0; + UPDATE_TYPE_MODIFIED = 1; + UPDATE_TYPE_REMOVED = 2; +} + +message PathMapping { + string path = 1; + string target = 2; +} + +message Authentication { + string session_key = 1; + int64 max_session_age_seconds = 2; + bool password = 3; + bool pin = 4; + bool oidc = 5; +} + +message ProxyMapping { + ProxyMappingUpdateType type = 1; + string id = 2; + string account_id = 3; + string domain = 4; + repeated PathMapping path = 5; + string auth_token = 6; + Authentication auth = 7; + // When true, the original Host header from the client request is passed + // through to the backend instead of being rewritten to the backend's address. + bool pass_host_header = 8; + // When true, Location headers in backend responses are rewritten to replace + // the backend address with the public-facing domain. + bool rewrite_redirects = 9; +} + +// SendAccessLogRequest consists of one or more AccessLogs from a Proxy. +message SendAccessLogRequest { + AccessLog log = 1; +} + +// SendAccessLogResponse is intentionally empty to allow for future expansion. +message SendAccessLogResponse {} + +message AccessLog { + google.protobuf.Timestamp timestamp = 1; + string log_id = 2; + string account_id = 3; + string service_id = 4; + string host = 5; + string path = 6; + int64 duration_ms = 7; + string method = 8; + int32 response_code = 9; + string source_ip = 10; + string auth_mechanism = 11; + string user_id = 12; + bool auth_success = 13; +} + +message AuthenticateRequest { + string id = 1; + string account_id = 2; + oneof request { + PasswordRequest password = 3; + PinRequest pin = 4; + } +} + +message PasswordRequest { + string password = 1; +} + +message PinRequest { + string pin = 1; +} + +message AuthenticateResponse { + bool success = 1; + string session_token = 2; +} + +enum ProxyStatus { + PROXY_STATUS_PENDING = 0; + PROXY_STATUS_ACTIVE = 1; + PROXY_STATUS_TUNNEL_NOT_CREATED = 2; + PROXY_STATUS_CERTIFICATE_PENDING = 3; + PROXY_STATUS_CERTIFICATE_FAILED = 4; + PROXY_STATUS_ERROR = 5; +} + +// SendStatusUpdateRequest is sent by the proxy to update its status +message SendStatusUpdateRequest { + string service_id = 1; + string account_id = 2; + ProxyStatus status = 3; + bool certificate_issued = 4; + optional string error_message = 5; +} + +// SendStatusUpdateResponse is intentionally empty to allow for future expansion +message SendStatusUpdateResponse {} + +// CreateProxyPeerRequest is sent by the proxy to create a peer connection +// The token is a one-time authentication token sent via ProxyMapping +message CreateProxyPeerRequest { + string service_id = 1; + string account_id = 2; + string token = 3; + string wireguard_public_key = 4; + string cluster = 5; +} + +// CreateProxyPeerResponse contains the result of peer creation +message CreateProxyPeerResponse { + bool success = 1; + optional string error_message = 2; +} + +message GetOIDCURLRequest { + string id = 1; + string account_id = 2; + string redirect_url = 3; +} + +message GetOIDCURLResponse { + string url = 1; +} + +message ValidateSessionRequest { + string domain = 1; + string session_token = 2; +} + +message ValidateSessionResponse { + bool valid = 1; + string user_id = 2; + string user_email = 3; + string denied_reason = 4; +} diff --git a/shared/management/proto/proxy_service_grpc.pb.go b/shared/management/proto/proxy_service_grpc.pb.go new file mode 100644 index 000000000..627b217d8 --- /dev/null +++ b/shared/management/proto/proxy_service_grpc.pb.go @@ -0,0 +1,349 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ProxyServiceClient is the client API for ProxyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ProxyServiceClient interface { + GetMappingUpdate(ctx context.Context, in *GetMappingUpdateRequest, opts ...grpc.CallOption) (ProxyService_GetMappingUpdateClient, error) + SendAccessLog(ctx context.Context, in *SendAccessLogRequest, opts ...grpc.CallOption) (*SendAccessLogResponse, error) + Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) + SendStatusUpdate(ctx context.Context, in *SendStatusUpdateRequest, opts ...grpc.CallOption) (*SendStatusUpdateResponse, error) + CreateProxyPeer(ctx context.Context, in *CreateProxyPeerRequest, opts ...grpc.CallOption) (*CreateProxyPeerResponse, error) + GetOIDCURL(ctx context.Context, in *GetOIDCURLRequest, opts ...grpc.CallOption) (*GetOIDCURLResponse, error) + // ValidateSession validates a session token and checks user access permissions. + // Called by the proxy after receiving a session token from OIDC callback. + ValidateSession(ctx context.Context, in *ValidateSessionRequest, opts ...grpc.CallOption) (*ValidateSessionResponse, error) +} + +type proxyServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewProxyServiceClient(cc grpc.ClientConnInterface) ProxyServiceClient { + return &proxyServiceClient{cc} +} + +func (c *proxyServiceClient) GetMappingUpdate(ctx context.Context, in *GetMappingUpdateRequest, opts ...grpc.CallOption) (ProxyService_GetMappingUpdateClient, error) { + stream, err := c.cc.NewStream(ctx, &ProxyService_ServiceDesc.Streams[0], "/management.ProxyService/GetMappingUpdate", opts...) + if err != nil { + return nil, err + } + x := &proxyServiceGetMappingUpdateClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ProxyService_GetMappingUpdateClient interface { + Recv() (*GetMappingUpdateResponse, error) + grpc.ClientStream +} + +type proxyServiceGetMappingUpdateClient struct { + grpc.ClientStream +} + +func (x *proxyServiceGetMappingUpdateClient) Recv() (*GetMappingUpdateResponse, error) { + m := new(GetMappingUpdateResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *proxyServiceClient) SendAccessLog(ctx context.Context, in *SendAccessLogRequest, opts ...grpc.CallOption) (*SendAccessLogResponse, error) { + out := new(SendAccessLogResponse) + err := c.cc.Invoke(ctx, "/management.ProxyService/SendAccessLog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *proxyServiceClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { + out := new(AuthenticateResponse) + err := c.cc.Invoke(ctx, "/management.ProxyService/Authenticate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *proxyServiceClient) SendStatusUpdate(ctx context.Context, in *SendStatusUpdateRequest, opts ...grpc.CallOption) (*SendStatusUpdateResponse, error) { + out := new(SendStatusUpdateResponse) + err := c.cc.Invoke(ctx, "/management.ProxyService/SendStatusUpdate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *proxyServiceClient) CreateProxyPeer(ctx context.Context, in *CreateProxyPeerRequest, opts ...grpc.CallOption) (*CreateProxyPeerResponse, error) { + out := new(CreateProxyPeerResponse) + err := c.cc.Invoke(ctx, "/management.ProxyService/CreateProxyPeer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *proxyServiceClient) GetOIDCURL(ctx context.Context, in *GetOIDCURLRequest, opts ...grpc.CallOption) (*GetOIDCURLResponse, error) { + out := new(GetOIDCURLResponse) + err := c.cc.Invoke(ctx, "/management.ProxyService/GetOIDCURL", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *proxyServiceClient) ValidateSession(ctx context.Context, in *ValidateSessionRequest, opts ...grpc.CallOption) (*ValidateSessionResponse, error) { + out := new(ValidateSessionResponse) + err := c.cc.Invoke(ctx, "/management.ProxyService/ValidateSession", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProxyServiceServer is the server API for ProxyService service. +// All implementations must embed UnimplementedProxyServiceServer +// for forward compatibility +type ProxyServiceServer interface { + GetMappingUpdate(*GetMappingUpdateRequest, ProxyService_GetMappingUpdateServer) error + SendAccessLog(context.Context, *SendAccessLogRequest) (*SendAccessLogResponse, error) + Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) + SendStatusUpdate(context.Context, *SendStatusUpdateRequest) (*SendStatusUpdateResponse, error) + CreateProxyPeer(context.Context, *CreateProxyPeerRequest) (*CreateProxyPeerResponse, error) + GetOIDCURL(context.Context, *GetOIDCURLRequest) (*GetOIDCURLResponse, error) + // ValidateSession validates a session token and checks user access permissions. + // Called by the proxy after receiving a session token from OIDC callback. + ValidateSession(context.Context, *ValidateSessionRequest) (*ValidateSessionResponse, error) + mustEmbedUnimplementedProxyServiceServer() +} + +// UnimplementedProxyServiceServer must be embedded to have forward compatible implementations. +type UnimplementedProxyServiceServer struct { +} + +func (UnimplementedProxyServiceServer) GetMappingUpdate(*GetMappingUpdateRequest, ProxyService_GetMappingUpdateServer) error { + return status.Errorf(codes.Unimplemented, "method GetMappingUpdate not implemented") +} +func (UnimplementedProxyServiceServer) SendAccessLog(context.Context, *SendAccessLogRequest) (*SendAccessLogResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendAccessLog not implemented") +} +func (UnimplementedProxyServiceServer) Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented") +} +func (UnimplementedProxyServiceServer) SendStatusUpdate(context.Context, *SendStatusUpdateRequest) (*SendStatusUpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendStatusUpdate not implemented") +} +func (UnimplementedProxyServiceServer) CreateProxyPeer(context.Context, *CreateProxyPeerRequest) (*CreateProxyPeerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateProxyPeer not implemented") +} +func (UnimplementedProxyServiceServer) GetOIDCURL(context.Context, *GetOIDCURLRequest) (*GetOIDCURLResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOIDCURL not implemented") +} +func (UnimplementedProxyServiceServer) ValidateSession(context.Context, *ValidateSessionRequest) (*ValidateSessionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateSession not implemented") +} +func (UnimplementedProxyServiceServer) mustEmbedUnimplementedProxyServiceServer() {} + +// UnsafeProxyServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ProxyServiceServer will +// result in compilation errors. +type UnsafeProxyServiceServer interface { + mustEmbedUnimplementedProxyServiceServer() +} + +func RegisterProxyServiceServer(s grpc.ServiceRegistrar, srv ProxyServiceServer) { + s.RegisterService(&ProxyService_ServiceDesc, srv) +} + +func _ProxyService_GetMappingUpdate_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetMappingUpdateRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProxyServiceServer).GetMappingUpdate(m, &proxyServiceGetMappingUpdateServer{stream}) +} + +type ProxyService_GetMappingUpdateServer interface { + Send(*GetMappingUpdateResponse) error + grpc.ServerStream +} + +type proxyServiceGetMappingUpdateServer struct { + grpc.ServerStream +} + +func (x *proxyServiceGetMappingUpdateServer) Send(m *GetMappingUpdateResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _ProxyService_SendAccessLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendAccessLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProxyServiceServer).SendAccessLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ProxyService/SendAccessLog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProxyServiceServer).SendAccessLog(ctx, req.(*SendAccessLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ProxyService_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthenticateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProxyServiceServer).Authenticate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ProxyService/Authenticate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProxyServiceServer).Authenticate(ctx, req.(*AuthenticateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ProxyService_SendStatusUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendStatusUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProxyServiceServer).SendStatusUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ProxyService/SendStatusUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProxyServiceServer).SendStatusUpdate(ctx, req.(*SendStatusUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ProxyService_CreateProxyPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateProxyPeerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProxyServiceServer).CreateProxyPeer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ProxyService/CreateProxyPeer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProxyServiceServer).CreateProxyPeer(ctx, req.(*CreateProxyPeerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ProxyService_GetOIDCURL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOIDCURLRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProxyServiceServer).GetOIDCURL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ProxyService/GetOIDCURL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProxyServiceServer).GetOIDCURL(ctx, req.(*GetOIDCURLRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ProxyService_ValidateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProxyServiceServer).ValidateSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ProxyService/ValidateSession", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProxyServiceServer).ValidateSession(ctx, req.(*ValidateSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ProxyService_ServiceDesc is the grpc.ServiceDesc for ProxyService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ProxyService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "management.ProxyService", + HandlerType: (*ProxyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendAccessLog", + Handler: _ProxyService_SendAccessLog_Handler, + }, + { + MethodName: "Authenticate", + Handler: _ProxyService_Authenticate_Handler, + }, + { + MethodName: "SendStatusUpdate", + Handler: _ProxyService_SendStatusUpdate_Handler, + }, + { + MethodName: "CreateProxyPeer", + Handler: _ProxyService_CreateProxyPeer_Handler, + }, + { + MethodName: "GetOIDCURL", + Handler: _ProxyService_GetOIDCURL_Handler, + }, + { + MethodName: "ValidateSession", + Handler: _ProxyService_ValidateSession_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetMappingUpdate", + Handler: _ProxyService_GetMappingUpdate_Handler, + ServerStreams: true, + }, + }, + Metadata: "proxy_service.proto", +} diff --git a/shared/management/status/error.go b/shared/management/status/error.go index ea02173e9..78288aef3 100644 --- a/shared/management/status/error.go +++ b/shared/management/status/error.go @@ -262,3 +262,11 @@ func NewZoneNotFoundError(zoneID string) error { func NewDNSRecordNotFoundError(recordID string) error { return Errorf(NotFound, "dns record: %s not found", recordID) } + +func NewResourceInUseError(resourceID string, proxyID string) error { + return Errorf(PreconditionFailed, "resource %s is in use by proxy %s", resourceID, proxyID) +} + +func NewPeerInUseError(peerID string, proxyID string) error { + return Errorf(PreconditionFailed, "peer %s is in use by proxy %s", peerID, proxyID) +} diff --git a/util/log.go b/util/log.go index a951eab87..03547024a 100644 --- a/util/log.go +++ b/util/log.go @@ -30,9 +30,14 @@ var ( // InitLog parses and sets log-level input func InitLog(logLevel string, logs ...string) error { + return InitLogger(log.StandardLogger(), logLevel, logs...) +} + +// InitLogger parses and sets log-level input for a logrus logger +func InitLogger(logger *log.Logger, logLevel string, logs ...string) error { level, err := log.ParseLevel(logLevel) if err != nil { - log.Errorf("Failed parsing log-level %s: %s", logLevel, err) + logger.Errorf("Failed parsing log-level %s: %s", logLevel, err) return err } var writers []io.Writer @@ -41,34 +46,34 @@ func InitLog(logLevel string, logs ...string) error { for _, logPath := range logs { switch logPath { case LogSyslog: - AddSyslogHook() + AddSyslogHookToLogger(logger) logFmt = "syslog" case LogConsole: writers = append(writers, os.Stderr) case "": - log.Warnf("empty log path received: %#v", logPath) + logger.Warnf("empty log path received: %#v", logPath) default: writers = append(writers, newRotatedOutput(logPath)) } } if len(writers) > 1 { - log.SetOutput(io.MultiWriter(writers...)) + logger.SetOutput(io.MultiWriter(writers...)) } else if len(writers) == 1 { - log.SetOutput(writers[0]) + logger.SetOutput(writers[0]) } switch logFmt { case "json": - formatter.SetJSONFormatter(log.StandardLogger()) + formatter.SetJSONFormatter(logger) case "syslog": - formatter.SetSyslogFormatter(log.StandardLogger()) + formatter.SetSyslogFormatter(logger) default: - formatter.SetTextFormatter(log.StandardLogger()) + formatter.SetTextFormatter(logger) } - log.SetLevel(level) + logger.SetLevel(level) - setGRPCLibLogger() + setGRPCLibLogger(logger) return nil } @@ -96,8 +101,8 @@ func newRotatedOutput(logPath string) io.Writer { return lumberjackLogger } -func setGRPCLibLogger() { - logOut := log.StandardLogger().Writer() +func setGRPCLibLogger(logger *log.Logger) { + logOut := logger.Writer() if os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") != "info" { grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, logOut, logOut)) return diff --git a/util/syslog_nonwindows.go b/util/syslog_nonwindows.go index 328bb8b1c..4a33f21b1 100644 --- a/util/syslog_nonwindows.go +++ b/util/syslog_nonwindows.go @@ -10,10 +10,14 @@ import ( ) func AddSyslogHook() { + AddSyslogHookToLogger(log.StandardLogger()) +} + +func AddSyslogHookToLogger(logger *log.Logger) { hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "") if err != nil { - log.Errorf("Failed creating syslog hook: %s", err) + logger.Errorf("Failed creating syslog hook: %s", err) } - log.AddHook(hook) + logger.AddHook(hook) } diff --git a/util/syslog_windows.go b/util/syslog_windows.go index 171c1a459..68fddfc5e 100644 --- a/util/syslog_windows.go +++ b/util/syslog_windows.go @@ -1,6 +1,13 @@ package util +import log "github.com/sirupsen/logrus" + func AddSyslogHook() { // The syslog package is not available for Windows. This adapter is needed // to handle windows build. } + +func AddSyslogHookToLogger(logger *log.Logger) { + // The syslog package is not available for Windows. This adapter is needed + // to handle windows build. +} From 01a9cd46514a24fcc13d037288bd7e9fe731dfb4 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Sat, 14 Feb 2026 16:34:04 +0100 Subject: [PATCH 126/374] [misc] Fix reverse proxy getting started messaging (#5317) * Fix reverse proxy getting started messaging * Fix reverse proxy getting started messaging --- infrastructure_files/getting-started.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index b96598622..2d800eb11 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -169,7 +169,8 @@ read_proxy_docker_network() { read_enable_proxy() { echo "" > /dev/stderr echo "Do you want to enable the NetBird Proxy service?" > /dev/stderr - echo "The proxy exposes internal NetBird network resources to the internet." > /dev/stderr + echo "The proxy allows you to selectively expose internal NetBird network resources" > /dev/stderr + echo "to the internet. You control which resources are exposed through the dashboard." > /dev/stderr echo -n "Enable proxy? [y/N]: " > /dev/stderr read -r CHOICE < /dev/tty @@ -182,11 +183,16 @@ read_enable_proxy() { } read_proxy_domain() { + local suggested_proxy="proxy.${NETBIRD_DOMAIN}" + echo "" > /dev/stderr - echo "WARNING: The proxy domain MUST NOT be a subdomain of the NetBird management" > /dev/stderr - echo "domain ($NETBIRD_DOMAIN). Using a subdomain will cause TLS certificate conflicts." > /dev/stderr + echo "NOTE: The proxy domain must be different from the management domain ($NETBIRD_DOMAIN)" > /dev/stderr + echo "to avoid TLS certificate conflicts." > /dev/stderr echo "" > /dev/stderr - echo -n "Enter the domain for the NetBird Proxy (e.g. proxy.my-domain.com): " > /dev/stderr + echo "You also need to add a wildcard DNS record for the proxy domain," > /dev/stderr + echo "e.g. *.${suggested_proxy} pointing to the same server IP as $NETBIRD_DOMAIN." > /dev/stderr + echo "" > /dev/stderr + echo -n "Enter the domain for the NetBird Proxy (e.g. ${suggested_proxy}): " > /dev/stderr read -r READ_PROXY_DOMAIN < /dev/tty if [[ -z "$READ_PROXY_DOMAIN" ]]; then From 68c481fa44a0790583f80ae8fa1d34e425b8d83b Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Sat, 14 Feb 2026 20:27:15 +0100 Subject: [PATCH 127/374] [management] Move service reload outside transaction in account settings update (#5325) Bug Fixes Network and DNS updates now defer service and reverse-proxy reloads until after account updates complete, preventing inconsistent proxy state and race conditions. Chores Removed automatic peer/broadcast updates immediately following bulk service reloads. Tests Added a test ensuring network-range changes complete without deadlock. --- .../modules/reverseproxy/manager/manager.go | 2 -- management/server/account.go | 10 ++++-- management/server/account_test.go | 33 +++++++++++++++++++ 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/management/internals/modules/reverseproxy/manager/manager.go b/management/internals/modules/reverseproxy/manager/manager.go index 2a93fdff6..535705a37 100644 --- a/management/internals/modules/reverseproxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/manager/manager.go @@ -473,8 +473,6 @@ func (m *managerImpl) ReloadAllServicesForAccount(ctx context.Context, accountID m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Update, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) } - m.accountManager.UpdateAccountPeers(ctx, accountID) - return nil } diff --git a/management/server/account.go b/management/server/account.go index 7b858c223..1e35d4ad1 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -297,6 +297,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco var oldSettings *types.Settings var updateAccountPeers bool var groupChangesAffectPeers bool + var reloadReverseProxy bool err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { var groupsUpdated bool @@ -327,9 +328,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco if err = am.reallocateAccountPeerIPs(ctx, transaction, accountID, newSettings.NetworkRange); err != nil { return err } - if err = am.reverseProxyManager.ReloadAllServicesForAccount(ctx, accountID); err != nil { - log.WithContext(ctx).Warnf("failed to reload all services for account %s: %v", accountID, err) - } + reloadReverseProxy = true updateAccountPeers = true } @@ -394,6 +393,11 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco } am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountNetworkRangeUpdated, eventMeta) } + if reloadReverseProxy { + if err = am.reverseProxyManager.ReloadAllServicesForAccount(ctx, accountID); err != nil { + log.WithContext(ctx).Warnf("failed to reload all services for account %s: %v", accountID, err) + } + } if updateAccountPeers || extraSettingsChanged || groupChangesAffectPeers { go am.UpdateAccountPeers(ctx, accountID) diff --git a/management/server/account_test.go b/management/server/account_test.go index 44bb0fb1c..1cc0c9571 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -3918,3 +3918,36 @@ func TestAddNewUserToDomainAccountWithoutApproval(t *testing.T) { assert.False(t, user.PendingApproval, "User should not be pending approval") assert.Equal(t, existingAccountID, user.AccountID) } + +// TestDefaultAccountManager_UpdateAccountSettings_NetworkRangeChange verifies that +// changing NetworkRange via UpdateAccountSettings does not deadlock. +// The deadlock occurs because ReloadAllServicesForAccount is called inside a DB +// transaction but uses the main store connection, which blocks on the transaction lock. +func TestDefaultAccountManager_UpdateAccountSettings_NetworkRangeChange(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err) + + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err) + + ctx := context.Background() + + // Use a channel to detect if the call completes or hangs + done := make(chan error, 1) + go func() { + _, err := manager.UpdateAccountSettings(ctx, accountID, userID, &types.Settings{ + PeerLoginExpiration: time.Hour, + PeerLoginExpirationEnabled: true, + NetworkRange: netip.MustParsePrefix("10.100.0.0/16"), + Extra: &types.ExtraSettings{}, + }) + done <- err + }() + + select { + case err := <-done: + require.NoError(t, err, "UpdateAccountSettings should complete without error") + case <-time.After(10 * time.Second): + t.Fatal("UpdateAccountSettings deadlocked when changing NetworkRange") + } +} From cb9b39b950bd0fbb0d2bad29b74e44bdf7cfec95 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Sun, 15 Feb 2026 12:51:46 +0100 Subject: [PATCH 128/374] [misc] add extra proxy domain instructions (#5328) improve proxy domain instructions expose wireguard port --- infrastructure_files/getting-started.sh | 33 +++++++++++++++++-------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 2d800eb11..864e9af32 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -183,14 +183,14 @@ read_enable_proxy() { } read_proxy_domain() { - local suggested_proxy="proxy.${NETBIRD_DOMAIN}" + local suggested_proxy="proxy.${BASE_DOMAIN}" echo "" > /dev/stderr echo "NOTE: The proxy domain must be different from the management domain ($NETBIRD_DOMAIN)" > /dev/stderr echo "to avoid TLS certificate conflicts." > /dev/stderr echo "" > /dev/stderr echo "You also need to add a wildcard DNS record for the proxy domain," > /dev/stderr - echo "e.g. *.${suggested_proxy} pointing to the same server IP as $NETBIRD_DOMAIN." > /dev/stderr + echo "e.g. *.${suggested_proxy} pointing to the same server domain as $NETBIRD_DOMAIN with a CNAME record." > /dev/stderr echo "" > /dev/stderr echo -n "Enter the domain for the NetBird Proxy (e.g. ${suggested_proxy}): " > /dev/stderr read -r READ_PROXY_DOMAIN < /dev/tty @@ -202,13 +202,16 @@ read_proxy_domain() { fi if [[ "$READ_PROXY_DOMAIN" == "$NETBIRD_DOMAIN" ]]; then - echo "The proxy domain cannot be the same as the management domain ($NETBIRD_DOMAIN)." > /dev/stderr + echo "" > /dev/stderr + echo "WARNING: The proxy domain cannot be the same as the management domain ($NETBIRD_DOMAIN)." > /dev/stderr read_proxy_domain return fi - if [[ "$READ_PROXY_DOMAIN" == *".${NETBIRD_DOMAIN}" ]]; then - echo "The proxy domain cannot be a subdomain of the management domain ($NETBIRD_DOMAIN)." > /dev/stderr + echo ${READ_PROXY_DOMAIN} | grep ${NETBIRD_DOMAIN} > /dev/null + if [[ $? -eq 0 ]]; then + echo "" > /dev/stderr + echo "WARNING: The proxy domain cannot be a subdomain of the management domain ($NETBIRD_DOMAIN)." > /dev/stderr read_proxy_domain return fi @@ -340,10 +343,12 @@ configure_domain() { if [[ "$NETBIRD_DOMAIN" == "use-ip" ]]; then NETBIRD_DOMAIN=$(get_main_ip_address) + BASE_DOMAIN=$NETBIRD_DOMAIN else NETBIRD_PORT=443 NETBIRD_HTTP_PROTOCOL="https" NETBIRD_RELAY_PROTO="rels" + BASE_DOMAIN=$(echo $NETBIRD_DOMAIN | sed -E 's/^[^.]+\.//') fi return 0 } @@ -566,6 +571,8 @@ render_docker_compose_traefik_builtin() { # Hairpin NAT fix: route domain back to traefik's static IP within Docker extra_hosts: - \"$NETBIRD_DOMAIN:172.30.0.10\" + ports: + - 51820:51820/udp restart: unless-stopped networks: [netbird] depends_on: @@ -1150,23 +1157,29 @@ print_builtin_traefik_instructions() { echo " NETBIRD SETUP COMPLETE" echo "$MSG_SEPARATOR" echo "" - echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + echo "You can access the NetBird dashboard at:" + echo " $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + echo "" echo "Follow the onboarding steps to set up your NetBird instance." echo "" echo "Traefik is handling TLS certificates automatically via Let's Encrypt." echo "If you see certificate warnings, wait a moment for certificate issuance to complete." echo "" echo "Open ports:" - echo " - 443/tcp (HTTPS - all NetBird services)" - echo " - 80/tcp (HTTP - redirects to HTTPS)" - echo " - $NETBIRD_STUN_PORT/udp (STUN - required for NAT traversal)" + echo " - 443/tcp (HTTPS - all NetBird services)" + echo " - 80/tcp (HTTP - redirects to HTTPS)" + echo " - $NETBIRD_STUN_PORT/udp (STUN - required for NAT traversal)" if [[ "$ENABLE_PROXY" == "true" ]]; then + echo " - 51820/udp (WIREGUARD - (optional) for P2P proxy connections)" echo "" echo "NetBird Proxy:" echo " The proxy service is enabled and running." echo " Any domain NOT matching $NETBIRD_DOMAIN will be passed through to the proxy." echo " The proxy handles its own TLS certificates via ACME TLS-ALPN-01 challenge." - echo " Point your proxy domains (CNAMEs) to this server's IP address." + echo " Point your proxy domain to this server's domain address like in the example below:" + echo "" + echo " *.$PROXY_DOMAIN CNAME $NETBIRD_DOMAIN" + echo "" fi return 0 } From e5d4947d60247c0768c57823f1226dbe686b7167 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Sun, 15 Feb 2026 22:10:26 +0100 Subject: [PATCH 129/374] [client] Optimize Windows DNS performance with domain batching and batch mode (#5264) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Optimize Windows DNS performance with domain batching and batch mode Implement two-layer optimization to reduce Windows NRPT registry operations: 1. Domain Batching (host_windows.go): - Batch domains per NRPT - Reduces NRPT rules by ~97% (e.g., 184 domains: 184 rules → 4 rules) - Modified addDNSMatchPolicy() to create batched NRPT entries - Added comprehensive tests in host_windows_test.go 2. Batch Mode (server.go): - Added BeginBatch/EndBatch methods to defer DNS updates - Modified RegisterHandler/DeregisterHandler to skip applyHostConfig in batch mode - Protected all applyHostConfig() calls with batch mode checks - Updated route manager to wrap route operations with batch calls * Update tests * Fix log line * Fix NRPT rule index to ensure cleanup covers partially created rules * Ensure NRPT entry count updates even on errors to improve cleanup reliability * Switch DNS batch mode logging from Info to Debug level * Fix batch mode to not suppress critical DNS config updates Batch mode should only defer applyHostConfig() for RegisterHandler/ DeregisterHandler operations. Management updates and upstream nameserver failures (deactivate/reactivate callbacks) need immediate DNS config updates regardless of batch mode to ensure timely failover. Without this fix, if a nameserver goes down during a route update, the system DNS config won't be updated until EndBatch(), potentially delaying failover by several seconds. Or if you prefer a shorter version: Fix batch mode to allow immediate DNS updates for critical paths Batch mode now only affects RegisterHandler/DeregisterHandler. Management updates and nameserver failures always trigger immediate DNS config updates to ensure timely failover. * Add DNS batch cancellation to rollback partial changes on errors Introduces CancelBatch() method to the DNS server interface to handle error scenarios during batch operations. When route updates fail partway through, the DNS server can now discard accumulated changes instead of applying partial state. This prevents leaving the DNS configuration in an inconsistent state when route manager operations encounter errors. The changes add error-aware batch handling to prevent partial DNS configuration updates when route operations fail, which improves system reliability. --- client/internal/dns/host_windows.go | 37 +++-- client/internal/dns/host_windows_test.go | 166 ++++++++++++++++++---- client/internal/dns/mock_server.go | 15 ++ client/internal/dns/server.go | 43 +++++- client/internal/dns/server_export_test.go | 7 +- client/internal/routemanager/manager.go | 18 +++ 6 files changed, 244 insertions(+), 42 deletions(-) diff --git a/client/internal/dns/host_windows.go b/client/internal/dns/host_windows.go index 01b7edc48..9b7a7b52b 100644 --- a/client/internal/dns/host_windows.go +++ b/client/internal/dns/host_windows.go @@ -42,6 +42,8 @@ const ( dnsPolicyConfigConfigOptionsKey = "ConfigOptions" dnsPolicyConfigConfigOptionsValue = 0x8 + nrptMaxDomainsPerRule = 50 + interfaceConfigPath = `SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces` interfaceConfigNameServerKey = "NameServer" interfaceConfigSearchListKey = "SearchList" @@ -198,10 +200,11 @@ func (r *registryConfigurator) applyDNSConfig(config HostDNSConfig, stateManager if len(matchDomains) != 0 { count, err := r.addDNSMatchPolicy(matchDomains, config.ServerIP) + // Update count even on error to ensure cleanup covers partially created rules + r.nrptEntryCount = count if err != nil { return fmt.Errorf("add dns match policy: %w", err) } - r.nrptEntryCount = count } else { r.nrptEntryCount = 0 } @@ -239,23 +242,33 @@ func (r *registryConfigurator) addDNSSetupForAll(ip netip.Addr) error { func (r *registryConfigurator) addDNSMatchPolicy(domains []string, ip netip.Addr) (int, error) { // if the gpo key is present, we need to put our DNS settings there, otherwise our config might be ignored // see https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-gpnrpt/8cc31cb9-20cb-4140-9e85-3e08703b4745 - for i, domain := range domains { - localPath := fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, i) - gpoPath := fmt.Sprintf("%s-%d", gpoDnsPolicyConfigMatchPath, i) - singleDomain := []string{domain} + // We need to batch domains into chunks and create one NRPT rule per batch. + ruleIndex := 0 + for i := 0; i < len(domains); i += nrptMaxDomainsPerRule { + end := i + nrptMaxDomainsPerRule + if end > len(domains) { + end = len(domains) + } + batchDomains := domains[i:end] - if err := r.configureDNSPolicy(localPath, singleDomain, ip); err != nil { - return i, fmt.Errorf("configure DNS Local policy for domain %s: %w", domain, err) + localPath := fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, ruleIndex) + gpoPath := fmt.Sprintf("%s-%d", gpoDnsPolicyConfigMatchPath, ruleIndex) + + if err := r.configureDNSPolicy(localPath, batchDomains, ip); err != nil { + return ruleIndex, fmt.Errorf("configure DNS Local policy for rule %d: %w", ruleIndex, err) } + // Increment immediately so the caller's cleanup path knows about this rule + ruleIndex++ + if r.gpo { - if err := r.configureDNSPolicy(gpoPath, singleDomain, ip); err != nil { - return i, fmt.Errorf("configure gpo DNS policy: %w", err) + if err := r.configureDNSPolicy(gpoPath, batchDomains, ip); err != nil { + return ruleIndex, fmt.Errorf("configure gpo DNS policy for rule %d: %w", ruleIndex-1, err) } } - log.Debugf("added NRPT entry for domain: %s", domain) + log.Debugf("added NRPT rule %d with %d domains", ruleIndex-1, len(batchDomains)) } if r.gpo { @@ -264,8 +277,8 @@ func (r *registryConfigurator) addDNSMatchPolicy(domains []string, ip netip.Addr } } - log.Infof("added %d separate NRPT entries. Domain list: %s", len(domains), domains) - return len(domains), nil + log.Infof("added %d NRPT rules for %d domains. Domain list: %v", ruleIndex, len(domains), domains) + return ruleIndex, nil } func (r *registryConfigurator) configureDNSPolicy(policyPath string, domains []string, ip netip.Addr) error { diff --git a/client/internal/dns/host_windows_test.go b/client/internal/dns/host_windows_test.go index 19496bf5a..3cd2b1bd5 100644 --- a/client/internal/dns/host_windows_test.go +++ b/client/internal/dns/host_windows_test.go @@ -12,6 +12,7 @@ import ( // TestNRPTEntriesCleanupOnConfigChange tests that old NRPT entries are properly cleaned up // when the number of match domains decreases between configuration changes. +// With batching enabled (50 domains per rule), we need enough domains to create multiple rules. func TestNRPTEntriesCleanupOnConfigChange(t *testing.T) { if testing.Short() { t.Skip("skipping registry integration test in short mode") @@ -37,51 +38,60 @@ func TestNRPTEntriesCleanupOnConfigChange(t *testing.T) { gpo: false, } - config5 := HostDNSConfig{ - ServerIP: testIP, - Domains: []DomainConfig{ - {Domain: "domain1.com", MatchOnly: true}, - {Domain: "domain2.com", MatchOnly: true}, - {Domain: "domain3.com", MatchOnly: true}, - {Domain: "domain4.com", MatchOnly: true}, - {Domain: "domain5.com", MatchOnly: true}, - }, + // Create 125 domains which will result in 3 NRPT rules (50+50+25) + domains125 := make([]DomainConfig, 125) + for i := 0; i < 125; i++ { + domains125[i] = DomainConfig{ + Domain: fmt.Sprintf("domain%d.com", i+1), + MatchOnly: true, + } } - err = cfg.applyDNSConfig(config5, nil) + config125 := HostDNSConfig{ + ServerIP: testIP, + Domains: domains125, + } + + err = cfg.applyDNSConfig(config125, nil) require.NoError(t, err) - // Verify all 5 entries exist - for i := 0; i < 5; i++ { + // Verify 3 NRPT rules exist + assert.Equal(t, 3, cfg.nrptEntryCount, "Should create 3 NRPT rules for 125 domains") + for i := 0; i < 3; i++ { exists, err := registryKeyExists(fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, i)) require.NoError(t, err) - assert.True(t, exists, "Entry %d should exist after first config", i) + assert.True(t, exists, "NRPT rule %d should exist after first config", i) } - config2 := HostDNSConfig{ + // Reduce to 75 domains which will result in 2 NRPT rules (50+25) + domains75 := make([]DomainConfig, 75) + for i := 0; i < 75; i++ { + domains75[i] = DomainConfig{ + Domain: fmt.Sprintf("domain%d.com", i+1), + MatchOnly: true, + } + } + + config75 := HostDNSConfig{ ServerIP: testIP, - Domains: []DomainConfig{ - {Domain: "domain1.com", MatchOnly: true}, - {Domain: "domain2.com", MatchOnly: true}, - }, + Domains: domains75, } - err = cfg.applyDNSConfig(config2, nil) + err = cfg.applyDNSConfig(config75, nil) require.NoError(t, err) - // Verify first 2 entries exist + // Verify first 2 NRPT rules exist + assert.Equal(t, 2, cfg.nrptEntryCount, "Should create 2 NRPT rules for 75 domains") for i := 0; i < 2; i++ { exists, err := registryKeyExists(fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, i)) require.NoError(t, err) - assert.True(t, exists, "Entry %d should exist after second config", i) + assert.True(t, exists, "NRPT rule %d should exist after second config", i) } - // Verify entries 2-4 are cleaned up - for i := 2; i < 5; i++ { - exists, err := registryKeyExists(fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, i)) - require.NoError(t, err) - assert.False(t, exists, "Entry %d should NOT exist after reducing to 2 domains", i) - } + // Verify rule 2 is cleaned up + exists, err := registryKeyExists(fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, 2)) + require.NoError(t, err) + assert.False(t, exists, "NRPT rule 2 should NOT exist after reducing to 75 domains") } func registryKeyExists(path string) (bool, error) { @@ -97,6 +107,106 @@ func registryKeyExists(path string) (bool, error) { } func cleanupRegistryKeys(*testing.T) { - cfg := ®istryConfigurator{nrptEntryCount: 10} + // Clean up more entries to account for batching tests with many domains + cfg := ®istryConfigurator{nrptEntryCount: 20} _ = cfg.removeDNSMatchPolicies() } + +// TestNRPTDomainBatching verifies that domains are correctly batched into NRPT rules. +func TestNRPTDomainBatching(t *testing.T) { + if testing.Short() { + t.Skip("skipping registry integration test in short mode") + } + + defer cleanupRegistryKeys(t) + cleanupRegistryKeys(t) + + testIP := netip.MustParseAddr("100.64.0.1") + + // Create a test interface registry key so updateSearchDomains doesn't fail + testGUID := "{12345678-1234-1234-1234-123456789ABC}" + interfacePath := `SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\` + testGUID + testKey, _, err := registry.CreateKey(registry.LOCAL_MACHINE, interfacePath, registry.SET_VALUE) + require.NoError(t, err, "Should create test interface registry key") + testKey.Close() + defer func() { + _ = registry.DeleteKey(registry.LOCAL_MACHINE, interfacePath) + }() + + cfg := ®istryConfigurator{ + guid: testGUID, + gpo: false, + } + + testCases := []struct { + name string + domainCount int + expectedRuleCount int + }{ + { + name: "Less than 50 domains (single rule)", + domainCount: 30, + expectedRuleCount: 1, + }, + { + name: "Exactly 50 domains (single rule)", + domainCount: 50, + expectedRuleCount: 1, + }, + { + name: "51 domains (two rules)", + domainCount: 51, + expectedRuleCount: 2, + }, + { + name: "100 domains (two rules)", + domainCount: 100, + expectedRuleCount: 2, + }, + { + name: "125 domains (three rules: 50+50+25)", + domainCount: 125, + expectedRuleCount: 3, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Clean up before each subtest + cleanupRegistryKeys(t) + + // Generate domains + domains := make([]DomainConfig, tc.domainCount) + for i := 0; i < tc.domainCount; i++ { + domains[i] = DomainConfig{ + Domain: fmt.Sprintf("domain%d.com", i+1), + MatchOnly: true, + } + } + + config := HostDNSConfig{ + ServerIP: testIP, + Domains: domains, + } + + err := cfg.applyDNSConfig(config, nil) + require.NoError(t, err) + + // Verify that exactly expectedRuleCount rules were created + assert.Equal(t, tc.expectedRuleCount, cfg.nrptEntryCount, + "Should create %d NRPT rules for %d domains", tc.expectedRuleCount, tc.domainCount) + + // Verify all expected rules exist + for i := 0; i < tc.expectedRuleCount; i++ { + exists, err := registryKeyExists(fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, i)) + require.NoError(t, err) + assert.True(t, exists, "NRPT rule %d should exist", i) + } + + // Verify no extra rules were created + exists, err := registryKeyExists(fmt.Sprintf("%s-%d", dnsPolicyConfigMatchPath, tc.expectedRuleCount)) + require.NoError(t, err) + assert.False(t, exists, "No NRPT rule should exist at index %d", tc.expectedRuleCount) + }) + } +} diff --git a/client/internal/dns/mock_server.go b/client/internal/dns/mock_server.go index 0f89b9016..fe160e20a 100644 --- a/client/internal/dns/mock_server.go +++ b/client/internal/dns/mock_server.go @@ -84,3 +84,18 @@ func (m *MockServer) UpdateServerConfig(domains dnsconfig.ServerDomains) error { func (m *MockServer) PopulateManagementDomain(mgmtURL *url.URL) error { return nil } + +// BeginBatch mock implementation of BeginBatch from Server interface +func (m *MockServer) BeginBatch() { + // Mock implementation - no-op +} + +// EndBatch mock implementation of EndBatch from Server interface +func (m *MockServer) EndBatch() { + // Mock implementation - no-op +} + +// CancelBatch mock implementation of CancelBatch from Server interface +func (m *MockServer) CancelBatch() { + // Mock implementation - no-op +} diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index c2b01de62..179517bbd 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -45,6 +45,9 @@ type IosDnsManager interface { type Server interface { RegisterHandler(domains domain.List, handler dns.Handler, priority int) DeregisterHandler(domains domain.List, priority int) + BeginBatch() + EndBatch() + CancelBatch() Initialize() error Stop() DnsIP() netip.Addr @@ -87,6 +90,7 @@ type DefaultServer struct { currentConfigHash uint64 handlerChain *HandlerChain extraDomains map[domain.Domain]int + batchMode bool mgmtCacheResolver *mgmt.Resolver @@ -234,7 +238,9 @@ func (s *DefaultServer) RegisterHandler(domains domain.List, handler dns.Handler // convert to zone with simple ref counter s.extraDomains[toZone(domain)]++ } - s.applyHostConfig() + if !s.batchMode { + s.applyHostConfig() + } } func (s *DefaultServer) registerHandler(domains []string, handler dns.Handler, priority int) { @@ -263,9 +269,41 @@ func (s *DefaultServer) DeregisterHandler(domains domain.List, priority int) { delete(s.extraDomains, zone) } } + if !s.batchMode { + s.applyHostConfig() + } +} + +// BeginBatch starts batch mode for DNS handler registration/deregistration. +// In batch mode, applyHostConfig() is not called after each handler operation, +// allowing multiple handlers to be registered/deregistered efficiently. +// Must be followed by EndBatch() to apply the accumulated changes. +func (s *DefaultServer) BeginBatch() { + s.mux.Lock() + defer s.mux.Unlock() + log.Debugf("DNS batch mode enabled") + s.batchMode = true +} + +// EndBatch ends batch mode and applies all accumulated DNS configuration changes. +func (s *DefaultServer) EndBatch() { + s.mux.Lock() + defer s.mux.Unlock() + log.Debugf("DNS batch mode disabled, applying accumulated changes") + s.batchMode = false s.applyHostConfig() } +// CancelBatch cancels batch mode without applying accumulated changes. +// This is useful when operations fail partway through and you want to +// discard partial state rather than applying it. +func (s *DefaultServer) CancelBatch() { + s.mux.Lock() + defer s.mux.Unlock() + log.Debugf("DNS batch mode cancelled, discarding accumulated changes") + s.batchMode = false +} + func (s *DefaultServer) deregisterHandler(domains []string, priority int) { log.Debugf("deregistering handler with priority %d for %v", priority, domains) @@ -523,6 +561,7 @@ func (s *DefaultServer) applyConfiguration(update nbdns.Config) error { s.currentConfig.RouteAll = false } + // Always apply host config for management updates, regardless of batch mode s.applyHostConfig() s.shutdownWg.Add(1) @@ -887,6 +926,7 @@ func (s *DefaultServer) upstreamCallbacks( } } + // Always apply host config when nameserver goes down, regardless of batch mode s.applyHostConfig() go func() { @@ -922,6 +962,7 @@ func (s *DefaultServer) upstreamCallbacks( s.registerHandler([]string{nbdns.RootZone}, handler, priority) } + // Always apply host config when nameserver reactivates, regardless of batch mode s.applyHostConfig() s.updateNSState(nsGroup, nil, true) diff --git a/client/internal/dns/server_export_test.go b/client/internal/dns/server_export_test.go index 1fa343b52..25d08d698 100644 --- a/client/internal/dns/server_export_test.go +++ b/client/internal/dns/server_export_test.go @@ -18,7 +18,12 @@ func TestGetServerDns(t *testing.T) { t.Errorf("invalid dns server instance: %s", err) } - if srvB != srv { + mockSrvB, ok := srvB.(*MockServer) + if !ok { + t.Errorf("returned server is not a MockServer") + } + + if mockSrvB != srv { t.Errorf("mismatch dns instances") } } diff --git a/client/internal/routemanager/manager.go b/client/internal/routemanager/manager.go index 077b9521b..9afe2049d 100644 --- a/client/internal/routemanager/manager.go +++ b/client/internal/routemanager/manager.go @@ -346,6 +346,23 @@ func (m *DefaultManager) updateSystemRoutes(newRoutes route.HAMap) error { } var merr *multierror.Error + + // Begin batch mode to avoid calling applyHostConfig() after each DNS handler operation + batchStarted := false + if m.dnsServer != nil { + m.dnsServer.BeginBatch() + batchStarted = true + defer func() { + if merr != nil { + // On error, cancel batch to discard partial DNS state + m.dnsServer.CancelBatch() + } else { + // On success, apply accumulated DNS changes + m.dnsServer.EndBatch() + } + }() + } + for id, handler := range toRemove { if err := handler.RemoveRoute(); err != nil { merr = multierror.Append(merr, fmt.Errorf("remove route %s: %w", handler.String(), err)) @@ -376,6 +393,7 @@ func (m *DefaultManager) updateSystemRoutes(newRoutes route.HAMap) error { m.activeRoutes[id] = handler } + _ = batchStarted // Mark as used return nberrors.FormatErrorOrNil(merr) } From 1024d45698c06fc9c674dfb7132c26c3b4e4fb6e Mon Sep 17 00:00:00 2001 From: Diego Romar Date: Mon, 16 Feb 2026 09:04:45 -0300 Subject: [PATCH 130/374] [mobile] Export lazy connection environment variables for mobile clients (#5310) * [client] Export lazy connection env vars Both for Android and iOS * [client] Separate comments --- client/android/env_list.go | 13 +++++++++++-- client/ios/NetBirdSDK/env_list.go | 15 ++++++++++++++- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/client/android/env_list.go b/client/android/env_list.go index 04122300a..a0a4d7040 100644 --- a/client/android/env_list.go +++ b/client/android/env_list.go @@ -1,10 +1,19 @@ package android -import "github.com/netbirdio/netbird/client/internal/peer" +import ( + "github.com/netbirdio/netbird/client/internal/lazyconn" + "github.com/netbirdio/netbird/client/internal/peer" +) var ( - // EnvKeyNBForceRelay Exported for Android java client + // EnvKeyNBForceRelay Exported for Android java client to force relay connections EnvKeyNBForceRelay = peer.EnvKeyNBForceRelay + + // EnvKeyNBLazyConn Exported for Android java client to configure lazy connection + EnvKeyNBLazyConn = lazyconn.EnvEnableLazyConn + + // EnvKeyNBInactivityThreshold Exported for Android java client to configure connection inactivity threshold + EnvKeyNBInactivityThreshold = lazyconn.EnvInactivityThreshold ) // EnvList wraps a Go map for export to Java diff --git a/client/ios/NetBirdSDK/env_list.go b/client/ios/NetBirdSDK/env_list.go index 4800803d7..88ac97957 100644 --- a/client/ios/NetBirdSDK/env_list.go +++ b/client/ios/NetBirdSDK/env_list.go @@ -2,7 +2,10 @@ package NetBirdSDK -import "github.com/netbirdio/netbird/client/internal/peer" +import ( + "github.com/netbirdio/netbird/client/internal/lazyconn" + "github.com/netbirdio/netbird/client/internal/peer" +) // EnvList is an exported struct to be bound by gomobile type EnvList struct { @@ -32,3 +35,13 @@ func (el *EnvList) AllItems() map[string]string { func GetEnvKeyNBForceRelay() string { return peer.EnvKeyNBForceRelay } + +// GetEnvKeyNBLazyConn Exports the environment variable for the iOS client +func GetEnvKeyNBLazyConn() string { + return lazyconn.EnvEnableLazyConn +} + +// GetEnvKeyNBInactivityThreshold Exports the environment variable for the iOS client +func GetEnvKeyNBInactivityThreshold() string { + return lazyconn.EnvInactivityThreshold +} From 0d1ffba75fb32bb1150017cb3db1f66250b3193b Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 16 Feb 2026 13:30:58 +0100 Subject: [PATCH 131/374] [misc] add additional cname example (#5341) --- infrastructure_files/getting-started.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 864e9af32..d8a9e9ad6 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -1176,8 +1176,9 @@ print_builtin_traefik_instructions() { echo " The proxy service is enabled and running." echo " Any domain NOT matching $NETBIRD_DOMAIN will be passed through to the proxy." echo " The proxy handles its own TLS certificates via ACME TLS-ALPN-01 challenge." - echo " Point your proxy domain to this server's domain address like in the example below:" + echo " Point your proxy domain to this server's domain address like in the examples below:" echo "" + echo " $PROXY_DOMAIN CNAME $NETBIRD_DOMAIN" echo " *.$PROXY_DOMAIN CNAME $NETBIRD_DOMAIN" echo "" fi From baed6e46eca4008725a917d2795857791fd70a62 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 16 Feb 2026 20:59:29 +0100 Subject: [PATCH 132/374] Reset WireGuard endpoint on ICE session change during relay fallback (#5283) When an ICE connection disconnects and falls back to relay, reset the WireGuard endpoint and handshake watcher if the remote peer's ICE session has changed. This ensures the controller re-establishes a fresh WireGuard handshake rather than waiting on a stale endpoint from the previous session. --- client/internal/peer/conn.go | 17 ++++++++++++++++- client/internal/peer/endpoint.go | 4 ++++ client/internal/peer/wg_watcher.go | 18 ++++++++++++++++++ client/internal/peer/worker_ice.go | 18 ++++++++++++------ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index eb455431d..af6ab3f83 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -410,7 +410,7 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn conn.doOnConnected(iceConnInfo.RosenpassPubKey, iceConnInfo.RosenpassAddr) } -func (conn *Conn) onICEStateDisconnected() { +func (conn *Conn) onICEStateDisconnected(sessionChanged bool) { conn.mu.Lock() defer conn.mu.Unlock() @@ -430,6 +430,10 @@ func (conn *Conn) onICEStateDisconnected() { if conn.isReadyToUpgrade() { conn.Log.Infof("ICE disconnected, set Relay to active connection") conn.dumpState.SwitchToRelay() + if sessionChanged { + conn.resetEndpoint() + } + conn.wgProxyRelay.Work() presharedKey := conn.presharedKey(conn.rosenpassRemoteKey) @@ -757,6 +761,17 @@ func (conn *Conn) newProxy(remoteConn net.Conn) (wgproxy.Proxy, error) { return wgProxy, nil } +func (conn *Conn) resetEndpoint() { + if !isController(conn.config) { + return + } + conn.Log.Infof("reset wg endpoint") + conn.wgWatcher.Reset() + if err := conn.endpointUpdater.RemoveEndpointAddress(); err != nil { + conn.Log.Warnf("failed to remove endpoint address before update: %v", err) + } +} + func (conn *Conn) isReadyToUpgrade() bool { return conn.wgProxyRelay != nil && conn.currentConnPriority != conntype.Relay } diff --git a/client/internal/peer/endpoint.go b/client/internal/peer/endpoint.go index 52d66159c..372f33ec6 100644 --- a/client/internal/peer/endpoint.go +++ b/client/internal/peer/endpoint.go @@ -66,6 +66,10 @@ func (e *EndpointUpdater) RemoveWgPeer() error { return e.wgConfig.WgInterface.RemovePeer(e.wgConfig.RemoteKey) } +func (e *EndpointUpdater) RemoveEndpointAddress() error { + return e.wgConfig.WgInterface.RemoveEndpointAddress(e.wgConfig.RemoteKey) +} + func (e *EndpointUpdater) waitForCloseTheDelayedUpdate() { if e.cancelFunc == nil { return diff --git a/client/internal/peer/wg_watcher.go b/client/internal/peer/wg_watcher.go index d40ec7a80..799a9375e 100644 --- a/client/internal/peer/wg_watcher.go +++ b/client/internal/peer/wg_watcher.go @@ -32,6 +32,8 @@ type WGWatcher struct { enabled bool muEnabled sync.RWMutex + + resetCh chan struct{} } func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey string, stateDump *stateDump) *WGWatcher { @@ -40,6 +42,7 @@ func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey strin wgIfaceStater: wgIfaceStater, peerKey: peerKey, stateDump: stateDump, + resetCh: make(chan struct{}, 1), } } @@ -76,6 +79,15 @@ func (w *WGWatcher) IsEnabled() bool { return w.enabled } +// Reset signals the watcher that the WireGuard peer has been reset and a new +// handshake is expected. This restarts the handshake timeout from scratch. +func (w *WGWatcher) Reset() { + select { + case w.resetCh <- struct{}{}: + default: + } +} + // wgStateCheck help to check the state of the WireGuard handshake and relay connection func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn func(), enabledTime time.Time, initialHandshake time.Time) { w.log.Infof("WireGuard watcher started") @@ -105,6 +117,12 @@ func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn w.stateDump.WGcheckSuccess() w.log.Debugf("WireGuard watcher reset timer: %v", resetTime) + case <-w.resetCh: + w.log.Infof("WireGuard watcher received peer reset, restarting handshake timeout") + lastHandshake = time.Time{} + enabledTime = time.Now() + timer.Stop() + timer.Reset(wgHandshakeOvertime) case <-ctx.Done(): w.log.Infof("WireGuard watcher stopped") return diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index 464f57bff..edd70fb20 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -52,8 +52,9 @@ type WorkerICE struct { // increase by one when disconnecting the agent // with it the remote peer can discard the already deprecated offer/answer // Without it the remote peer may recreate a workable ICE connection - sessionID ICESessionID - muxAgent sync.Mutex + sessionID ICESessionID + remoteSessionChanged bool + muxAgent sync.Mutex localUfrag string localPwd string @@ -106,6 +107,7 @@ func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) { return } w.log.Debugf("agent already exists, recreate the connection") + w.remoteSessionChanged = true w.agentDialerCancel() if w.agent != nil { if err := w.agent.Close(); err != nil { @@ -306,13 +308,17 @@ func (w *WorkerICE) connect(ctx context.Context, agent *icemaker.ThreadSafeAgent w.conn.onICEConnectionIsReady(selectedPriority(pair), ci) } -func (w *WorkerICE) closeAgent(agent *icemaker.ThreadSafeAgent, cancel context.CancelFunc) { +func (w *WorkerICE) closeAgent(agent *icemaker.ThreadSafeAgent, cancel context.CancelFunc) bool { cancel() if err := agent.Close(); err != nil { w.log.Warnf("failed to close ICE agent: %s", err) } w.muxAgent.Lock() + defer w.muxAgent.Unlock() + + sessionChanged := w.remoteSessionChanged + w.remoteSessionChanged = false if w.agent == agent { // consider to remove from here and move to the OnNewOffer @@ -325,7 +331,7 @@ func (w *WorkerICE) closeAgent(agent *icemaker.ThreadSafeAgent, cancel context.C w.agentConnecting = false w.remoteSessionID = "" } - w.muxAgent.Unlock() + return sessionChanged } func (w *WorkerICE) punchRemoteWGPort(pair *ice.CandidatePair, remoteWgPort int) { @@ -426,11 +432,11 @@ func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dia // ice.ConnectionStateClosed happens when we recreate the agent. For the P2P to TURN switch important to // notify the conn.onICEStateDisconnected changes to update the current used priority - w.closeAgent(agent, dialerCancel) + sessionChanged := w.closeAgent(agent, dialerCancel) if w.lastKnownState == ice.ConnectionStateConnected { w.lastKnownState = ice.ConnectionStateDisconnected - w.conn.onICEStateDisconnected() + w.conn.onICEStateDisconnected(sessionChanged) } default: return From 0146e3971494230ef931ba8b310c6a5d646af2b0 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 17 Feb 2026 06:40:10 +0800 Subject: [PATCH 133/374] Add listener side proxy protocol support and enable it in traefik (#5332) Co-authored-by: mlsmaycon --- go.mod | 1 + go.sum | 2 + infrastructure_files/getting-started.sh | 39 ++++- proxy/cmd/proxy/cmd/root.go | 3 + proxy/proxyprotocol_test.go | 106 ++++++++++++++ proxy/server.go | 180 +++++++++++++++++------- 6 files changed, 276 insertions(+), 55 deletions(-) create mode 100644 proxy/proxyprotocol_test.go diff --git a/go.mod b/go.mod index ff9105761..4a8bc3f2b 100644 --- a/go.mod +++ b/go.mod @@ -83,6 +83,7 @@ require ( github.com/pion/stun/v3 v3.1.0 github.com/pion/transport/v3 v3.1.1 github.com/pion/turn/v3 v3.0.1 + github.com/pires/go-proxyproto v0.11.0 github.com/pkg/sftp v1.13.9 github.com/prometheus/client_golang v1.23.2 github.com/quic-go/quic-go v0.55.0 diff --git a/go.sum b/go.sum index 23a12ff68..2a9ad6d70 100644 --- a/go.sum +++ b/go.sum @@ -474,6 +474,8 @@ github.com/pion/turn/v3 v3.0.1 h1:wLi7BTQr6/Q20R0vt/lHbjv6y4GChFtC33nkYbasoT8= github.com/pion/turn/v3 v3.0.1/go.mod h1:MrJDKgqryDyWy1/4NT9TWfXWGMC7UHT6pJIv1+gMeNE= github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc= github.com/pion/turn/v4 v4.1.1/go.mod h1:2123tHk1O++vmjI5VSD0awT50NywDAq5A2NNNU4Jjs8= +github.com/pires/go-proxyproto v0.11.0 h1:gUQpS85X/VJMdUsYyEgyn59uLJvGqPhJV5YvG68wXH4= +github.com/pires/go-proxyproto v0.11.0/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index d8a9e9ad6..dc5d53504 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -329,6 +329,9 @@ initialize_default_values() { BIND_LOCALHOST_ONLY="true" EXTERNAL_PROXY_NETWORK="" + # Traefik static IP within the internal bridge network + TRAEFIK_IP="172.30.0.10" + # NetBird Proxy configuration ENABLE_PROXY="false" PROXY_DOMAIN="" @@ -393,7 +396,7 @@ check_existing_installation() { echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." echo "You can use the following commands:" echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" - echo " rm -f docker-compose.yml dashboard.env config.yaml proxy.env nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" + echo " rm -f docker-compose.yml dashboard.env config.yaml proxy.env traefik-dynamic.yaml nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." exit 1 fi @@ -412,6 +415,8 @@ generate_configuration_files() { # This will be overwritten with the actual token after netbird-server starts echo "# Placeholder - will be updated with token after netbird-server starts" > proxy.env echo "NB_PROXY_TOKEN=placeholder" >> proxy.env + # TCP ServersTransport for PROXY protocol v2 to the proxy backend + render_traefik_dynamic > traefik-dynamic.yaml fi ;; 1) @@ -559,10 +564,14 @@ init_environment() { ############################################ render_docker_compose_traefik_builtin() { - # Generate proxy service section if enabled + # Generate proxy service section and Traefik dynamic config if enabled local proxy_service="" local proxy_volumes="" + local traefik_file_provider="" + local traefik_dynamic_volume="" if [[ "$ENABLE_PROXY" == "true" ]]; then + traefik_file_provider=' - "--providers.file.filename=/etc/traefik/dynamic.yaml"' + traefik_dynamic_volume=" - ./traefik-dynamic.yaml:/etc/traefik/dynamic.yaml:ro" proxy_service=" # NetBird Proxy - exposes internal resources to the internet proxy: @@ -570,7 +579,7 @@ render_docker_compose_traefik_builtin() { container_name: netbird-proxy # Hairpin NAT fix: route domain back to traefik's static IP within Docker extra_hosts: - - \"$NETBIRD_DOMAIN:172.30.0.10\" + - \"$NETBIRD_DOMAIN:$TRAEFIK_IP\" ports: - 51820:51820/udp restart: unless-stopped @@ -590,6 +599,7 @@ render_docker_compose_traefik_builtin() { - traefik.tcp.routers.proxy-passthrough.service=proxy-tls - traefik.tcp.routers.proxy-passthrough.priority=1 - traefik.tcp.services.proxy-tls.loadbalancer.server.port=8443 + - traefik.tcp.services.proxy-tls.loadbalancer.serverstransport=pp-v2@file logging: driver: \"json-file\" options: @@ -609,7 +619,7 @@ services: restart: unless-stopped networks: netbird: - ipv4_address: 172.30.0.10 + ipv4_address: $TRAEFIK_IP command: # Logging - "--log.level=INFO" @@ -636,12 +646,14 @@ services: # gRPC transport settings - "--serverstransport.forwardingtimeouts.responseheadertimeout=0s" - "--serverstransport.forwardingtimeouts.idleconntimeout=0s" +$traefik_file_provider ports: - '443:443' - '80:80' volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - netbird_traefik_letsencrypt:/letsencrypt +$traefik_dynamic_volume logging: driver: "json-file" options: @@ -751,6 +763,10 @@ server: cliRedirectURIs: - "http://localhost:53000/" + reverseProxy: + trustedHTTPProxies: + - "$TRAEFIK_IP/32" + store: engine: "sqlite" encryptionKey: "$DATASTORE_ENCRYPTION_KEY" @@ -780,6 +796,17 @@ EOF return 0 } +render_traefik_dynamic() { + cat <<'EOF' +tcp: + serversTransports: + pp-v2: + proxyProtocol: + version: 2 +EOF + return 0 +} + render_proxy_env() { cat < 0 { + ppListener.ConnPolicy = s.proxyProtocolPolicy + } else { + s.Logger.Warn("PROXY protocol enabled without trusted proxies; any source may send PROXY headers") + } + s.Logger.Info("PROXY protocol enabled on listener") + return ppListener +} + +// proxyProtocolPolicy returns whether to require, skip, or reject the PROXY +// header based on whether the connection source is in TrustedProxies. +func (s *Server) proxyProtocolPolicy(opts proxyproto.ConnPolicyOptions) (proxyproto.Policy, error) { + // No logging on reject to prevent abuse + tcpAddr, ok := opts.Upstream.(*net.TCPAddr) + if !ok { + return proxyproto.REJECT, nil + } + addr, ok := netip.AddrFromSlice(tcpAddr.IP) + if !ok { + return proxyproto.REJECT, nil + } + addr = addr.Unmap() + + // called per accept + for _, prefix := range s.TrustedProxies { + if prefix.Contains(addr) { + return proxyproto.REQUIRE, nil + } + } + return proxyproto.IGNORE, nil +} + const ( + defaultHealthAddr = "localhost:8080" + defaultDebugAddr = "localhost:8444" + + // proxyProtoHeaderTimeout is the deadline for reading the PROXY protocol + // header after accepting a connection. + proxyProtoHeaderTimeout = 5 * time.Second + // shutdownPreStopDelay is the time to wait after receiving a shutdown signal // before draining connections. This allows the load balancer to propagate // the endpoint removal. @@ -647,7 +725,7 @@ func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { // If addr is empty, it defaults to localhost:8444 for security. func debugEndpointAddr(addr string) string { if addr == "" { - return "localhost:8444" + return defaultDebugAddr } return addr } From 1bd7190954deb550e67ff0faf0803eb276652935 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 17 Feb 2026 12:53:34 +0100 Subject: [PATCH 134/374] [proxy] Support WebSocket (#5312) * Fix WebSocket support by implementing Hijacker interface Add responsewriter.PassthroughWriter to preserve optional HTTP interfaces (Hijacker, Flusher, Pusher) when wrapping http.ResponseWriter in middleware. Without this delegation: - WebSocket connections fail (can't hijack the connection) - Streaming breaks (can't flush buffers) - HTTP/2 push doesn't work * Add HijackTracker to manage hijacked connections during graceful shutdown * Refactor HijackTracker to use middleware for tracking hijacked connections * Refactor server handler chain setup for improved readability and maintainability --- proxy/internal/accesslog/middleware.go | 5 +- proxy/internal/accesslog/statuswriter.go | 20 +++---- proxy/internal/conntrack/conn.go | 49 +++++++++++++++++ proxy/internal/conntrack/hijacked.go | 41 ++++++++++++++ proxy/internal/metrics/metrics.go | 12 +++-- .../internal/responsewriter/responsewriter.go | 53 +++++++++++++++++++ proxy/server.go | 23 +++++++- 7 files changed, 180 insertions(+), 23 deletions(-) create mode 100644 proxy/internal/conntrack/conn.go create mode 100644 proxy/internal/conntrack/hijacked.go create mode 100644 proxy/internal/responsewriter/responsewriter.go diff --git a/proxy/internal/accesslog/middleware.go b/proxy/internal/accesslog/middleware.go index ca7556bfd..dd4798975 100644 --- a/proxy/internal/accesslog/middleware.go +++ b/proxy/internal/accesslog/middleware.go @@ -9,6 +9,7 @@ import ( "github.com/rs/xid" "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/responsewriter" "github.com/netbirdio/netbird/proxy/web" ) @@ -27,8 +28,8 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { // Use a response writer wrapper so we can access the status code later. sw := &statusWriter{ - w: w, - status: http.StatusOK, + PassthroughWriter: responsewriter.New(w), + status: http.StatusOK, } // Resolve the source IP using trusted proxy configuration before passing diff --git a/proxy/internal/accesslog/statuswriter.go b/proxy/internal/accesslog/statuswriter.go index 56ef90efa..43cda59f9 100644 --- a/proxy/internal/accesslog/statuswriter.go +++ b/proxy/internal/accesslog/statuswriter.go @@ -1,26 +1,18 @@ package accesslog import ( - "net/http" + "github.com/netbirdio/netbird/proxy/internal/responsewriter" ) -// statusWriter is a simple wrapper around an http.ResponseWriter -// that captures the setting of the status code via the WriteHeader -// function and stores it so that it can be retrieved later. +// statusWriter captures the HTTP status code from WriteHeader calls. +// It embeds responsewriter.PassthroughWriter which handles all the optional +// interfaces (Hijacker, Flusher, Pusher) automatically. type statusWriter struct { - w http.ResponseWriter + *responsewriter.PassthroughWriter status int } -func (w *statusWriter) Header() http.Header { - return w.w.Header() -} - -func (w *statusWriter) Write(data []byte) (int, error) { - return w.w.Write(data) -} - func (w *statusWriter) WriteHeader(status int) { w.status = status - w.w.WriteHeader(status) + w.PassthroughWriter.WriteHeader(status) } diff --git a/proxy/internal/conntrack/conn.go b/proxy/internal/conntrack/conn.go new file mode 100644 index 000000000..97055d992 --- /dev/null +++ b/proxy/internal/conntrack/conn.go @@ -0,0 +1,49 @@ +package conntrack + +import ( + "bufio" + "net" + "net/http" +) + +// trackedConn wraps a net.Conn and removes itself from the tracker on Close. +type trackedConn struct { + net.Conn + tracker *HijackTracker +} + +func (c *trackedConn) Close() error { + c.tracker.conns.Delete(c) + return c.Conn.Close() +} + +// trackingWriter wraps an http.ResponseWriter and intercepts Hijack calls +// to replace the raw connection with a trackedConn that auto-deregisters. +type trackingWriter struct { + http.ResponseWriter + tracker *HijackTracker +} + +func (w *trackingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := w.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, http.ErrNotSupported + } + conn, buf, err := hijacker.Hijack() + if err != nil { + return nil, nil, err + } + tc := &trackedConn{Conn: conn, tracker: w.tracker} + w.tracker.conns.Store(tc, struct{}{}) + return tc, buf, nil +} + +func (w *trackingWriter) Flush() { + if flusher, ok := w.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (w *trackingWriter) Unwrap() http.ResponseWriter { + return w.ResponseWriter +} diff --git a/proxy/internal/conntrack/hijacked.go b/proxy/internal/conntrack/hijacked.go new file mode 100644 index 000000000..d76cebc08 --- /dev/null +++ b/proxy/internal/conntrack/hijacked.go @@ -0,0 +1,41 @@ +package conntrack + +import ( + "net" + "net/http" + "sync" +) + +// HijackTracker tracks connections that have been hijacked (e.g. WebSocket +// upgrades). http.Server.Shutdown does not close hijacked connections, so +// they must be tracked and closed explicitly during graceful shutdown. +// +// Use Middleware as the outermost HTTP middleware to ensure hijacked +// connections are tracked and automatically deregistered when closed. +type HijackTracker struct { + conns sync.Map // net.Conn → struct{} +} + +// Middleware returns an HTTP middleware that wraps the ResponseWriter so that +// hijacked connections are tracked and automatically deregistered from the +// tracker when closed. This should be the outermost middleware in the chain. +func (t *HijackTracker) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(&trackingWriter{ResponseWriter: w, tracker: t}, r) + }) +} + +// CloseAll closes all tracked hijacked connections and returns the number +// of connections that were closed. +func (t *HijackTracker) CloseAll() int { + var count int + t.conns.Range(func(key, _ any) bool { + if conn, ok := key.(net.Conn); ok { + _ = conn.Close() + count++ + } + t.conns.Delete(key) + return true + }) + return count +} diff --git a/proxy/internal/metrics/metrics.go b/proxy/internal/metrics/metrics.go index 951ce73dd..954020f77 100644 --- a/proxy/internal/metrics/metrics.go +++ b/proxy/internal/metrics/metrics.go @@ -5,9 +5,11 @@ import ( "strconv" "time" - "github.com/netbirdio/netbird/proxy/internal/proxy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/responsewriter" ) type Metrics struct { @@ -60,18 +62,18 @@ func New(reg prometheus.Registerer) *Metrics { } type responseInterceptor struct { - http.ResponseWriter + *responsewriter.PassthroughWriter status int size int } func (w *responseInterceptor) WriteHeader(status int) { w.status = status - w.ResponseWriter.WriteHeader(status) + w.PassthroughWriter.WriteHeader(status) } func (w *responseInterceptor) Write(b []byte) (int, error) { - size, err := w.ResponseWriter.Write(b) + size, err := w.PassthroughWriter.Write(b) w.size += size return size, err } @@ -81,7 +83,7 @@ func (m *Metrics) Middleware(next http.Handler) http.Handler { m.requestsTotal.Inc() m.activeRequests.Inc() - interceptor := &responseInterceptor{ResponseWriter: w} + interceptor := &responseInterceptor{PassthroughWriter: responsewriter.New(w)} start := time.Now() next.ServeHTTP(interceptor, r) diff --git a/proxy/internal/responsewriter/responsewriter.go b/proxy/internal/responsewriter/responsewriter.go new file mode 100644 index 000000000..b8fc95f2d --- /dev/null +++ b/proxy/internal/responsewriter/responsewriter.go @@ -0,0 +1,53 @@ +package responsewriter + +import ( + "bufio" + "net" + "net/http" +) + +// PassthroughWriter wraps an http.ResponseWriter and preserves optional +// interfaces like Hijacker, Flusher, and Pusher by delegating to the underlying +// ResponseWriter if it supports them. +// +// This is the standard pattern for Go middleware that needs to wrap ResponseWriter +// while maintaining support for protocol upgrades (WebSocket), streaming (Flusher), +// and HTTP/2 server push. +type PassthroughWriter struct { + http.ResponseWriter +} + +// New creates a new wrapper around the given ResponseWriter. +func New(w http.ResponseWriter) *PassthroughWriter { + return &PassthroughWriter{ResponseWriter: w} +} + +// Hijack implements http.Hijacker interface if the underlying ResponseWriter supports it. +// This is required for WebSocket connections and other protocol upgrades. +func (w *PassthroughWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hijacker, ok := w.ResponseWriter.(http.Hijacker); ok { + return hijacker.Hijack() + } + return nil, nil, http.ErrNotSupported +} + +// Flush implements http.Flusher interface if the underlying ResponseWriter supports it. +func (w *PassthroughWriter) Flush() { + if flusher, ok := w.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +// Push implements http.Pusher interface if the underlying ResponseWriter supports it. +func (w *PassthroughWriter) Push(target string, opts *http.PushOptions) error { + if pusher, ok := w.ResponseWriter.(http.Pusher); ok { + return pusher.Push(target, opts) + } + return http.ErrNotSupported +} + +// Unwrap returns the underlying ResponseWriter. +// This is required for http.ResponseController (Go 1.20+) to work correctly. +func (w *PassthroughWriter) Unwrap() http.ResponseWriter { + return w.ResponseWriter +} diff --git a/proxy/server.go b/proxy/server.go index b08837679..52b4972ec 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -37,6 +37,7 @@ import ( "github.com/netbirdio/netbird/proxy/internal/acme" "github.com/netbirdio/netbird/proxy/internal/auth" "github.com/netbirdio/netbird/proxy/internal/certwatch" + "github.com/netbirdio/netbird/proxy/internal/conntrack" "github.com/netbirdio/netbird/proxy/internal/debug" proxygrpc "github.com/netbirdio/netbird/proxy/internal/grpc" "github.com/netbirdio/netbird/proxy/internal/health" @@ -64,6 +65,11 @@ type Server struct { healthChecker *health.Checker meter *metrics.Metrics + // hijackTracker tracks hijacked connections (e.g. WebSocket upgrades) + // so they can be closed during graceful shutdown, since http.Server.Shutdown + // does not handle them. + hijackTracker conntrack.HijackTracker + // Mostly used for debugging on management. startTime time.Time @@ -185,10 +191,18 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { return err } + // Build the handler chain from inside out. + handler := http.Handler(s.proxy) + handler = s.auth.Protect(handler) + handler = web.AssetHandler(handler) + handler = accessLog.Middleware(handler) + handler = s.meter.Middleware(handler) + handler = s.hijackTracker.Middleware(handler) + // Start the reverse proxy HTTPS server. s.https = &http.Server{ Addr: addr, - Handler: s.meter.Middleware(accessLog.Middleware(web.AssetHandler(s.auth.Protect(s.proxy)))), + Handler: handler, TLSConfig: tlsConfig, ErrorLog: newHTTPServerLogger(s.Logger, logtagValueHTTPS), } @@ -457,7 +471,12 @@ func (s *Server) gracefulShutdown() { s.Logger.Warnf("https server drain: %v", err) } - // Step 4: Stop all remaining background services. + // Step 4: Close hijacked connections (WebSocket) that Shutdown does not handle. + if n := s.hijackTracker.CloseAll(); n > 0 { + s.Logger.Infof("closed %d hijacked connection(s)", n) + } + + // Step 5: Stop all remaining background services. s.shutdownServices() s.Logger.Info("graceful shutdown complete") } From 4aff4a64245ca5650d54c52f045a95cb8fca37c8 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Tue, 17 Feb 2026 13:29:32 +0100 Subject: [PATCH 135/374] [management] fix utc difference on last seen status for a peer (#5348) --- management/internals/shared/grpc/server.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index ff9d7ea05..0167aca07 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -224,6 +224,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S s.syncSem.Add(1) reqStart := time.Now() + syncStart := reqStart.UTC() ctx := srv.Context() @@ -300,7 +301,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S metahash := metaHash(peerMeta, realIP.String()) s.loginFilter.addLogin(peerKey.String(), metahash) - peer, netMap, postureChecks, dnsFwdPort, err := s.accountManager.SyncAndMarkPeer(ctx, accountID, peerKey.String(), peerMeta, realIP, reqStart) + peer, netMap, postureChecks, dnsFwdPort, err := s.accountManager.SyncAndMarkPeer(ctx, accountID, peerKey.String(), peerMeta, realIP, syncStart) if err != nil { log.WithContext(ctx).Debugf("error while syncing peer %s: %v", peerKey.String(), err) s.syncSem.Add(-1) @@ -311,7 +312,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S if err != nil { log.WithContext(ctx).Debugf("error while sending initial sync for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, reqStart) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, syncStart) return err } @@ -319,7 +320,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S if err != nil { log.WithContext(ctx).Debugf("error while notify peer connected for %s: %v", peerKey.String(), err) s.syncSem.Add(-1) - s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, reqStart) + s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, syncStart) return err } @@ -336,7 +337,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S s.syncSem.Add(-1) - return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv, reqStart) + return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv, syncStart) } func (s *Server) handleHandshake(ctx context.Context, srv proto.ManagementService_JobServer) (wgtypes.Key, error) { From 1c934cca6450e1cd813023a4a943cdfe6a05e70e Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 17 Feb 2026 16:07:35 +0100 Subject: [PATCH 136/374] Ignore false lint alert (#5370) --- client/firewall/uspfilter/nat.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/firewall/uspfilter/nat.go b/client/firewall/uspfilter/nat.go index 13567872e..597f892cf 100644 --- a/client/firewall/uspfilter/nat.go +++ b/client/firewall/uspfilter/nat.go @@ -358,9 +358,9 @@ func incrementalUpdate(oldChecksum uint16, oldBytes, newBytes []byte) uint16 { // Fast path for IPv4 addresses (4 bytes) - most common case if len(oldBytes) == 4 && len(newBytes) == 4 { sum += uint32(^binary.BigEndian.Uint16(oldBytes[0:2])) - sum += uint32(^binary.BigEndian.Uint16(oldBytes[2:4])) + sum += uint32(^binary.BigEndian.Uint16(oldBytes[2:4])) //nolint:gosec // length checked above sum += uint32(binary.BigEndian.Uint16(newBytes[0:2])) - sum += uint32(binary.BigEndian.Uint16(newBytes[2:4])) + sum += uint32(binary.BigEndian.Uint16(newBytes[2:4])) //nolint:gosec // length checked above } else { // Fallback for other lengths for i := 0; i < len(oldBytes)-1; i += 2 { From e7c84d0eada91c049e18cfbb8ff3ca7b72d875b7 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 17 Feb 2026 16:08:41 +0100 Subject: [PATCH 137/374] Start Management if external IdP is down (#5367) Set ContinueOnConnectorFailure: true in the embedded Dex config so that the Management server starts successfully even when an external IdP connector is unreachable at boot time. --- idp/dex/provider.go | 20 +++++++------ idp/dex/provider_test.go | 62 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 9 deletions(-) diff --git a/idp/dex/provider.go b/idp/dex/provider.go index 6c608dbf5..68fe48486 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -99,15 +99,16 @@ func NewProvider(ctx context.Context, config *Config) (*Provider, error) { // Build Dex server config - use Dex's types directly dexConfig := server.Config{ - Issuer: issuer, - Storage: stor, - SkipApprovalScreen: true, - SupportedResponseTypes: []string{"code"}, - Logger: logger, - PrometheusRegistry: prometheus.NewRegistry(), - RotateKeysAfter: 6 * time.Hour, - IDTokensValidFor: 24 * time.Hour, - RefreshTokenPolicy: refreshPolicy, + Issuer: issuer, + Storage: stor, + SkipApprovalScreen: true, + SupportedResponseTypes: []string{"code"}, + ContinueOnConnectorFailure: true, + Logger: logger, + PrometheusRegistry: prometheus.NewRegistry(), + RotateKeysAfter: 6 * time.Hour, + IDTokensValidFor: 24 * time.Hour, + RefreshTokenPolicy: refreshPolicy, Web: server.WebConfig{ Issuer: "NetBird", }, @@ -260,6 +261,7 @@ func buildDexConfig(yamlConfig *YAMLConfig, stor storage.Storage, logger *slog.L if len(cfg.SupportedResponseTypes) == 0 { cfg.SupportedResponseTypes = []string{"code"} } + cfg.ContinueOnConnectorFailure = true return cfg } diff --git a/idp/dex/provider_test.go b/idp/dex/provider_test.go index bc34e592f..bd2f676fb 100644 --- a/idp/dex/provider_test.go +++ b/idp/dex/provider_test.go @@ -2,6 +2,7 @@ package dex import ( "context" + "log/slog" "os" "path/filepath" "testing" @@ -195,3 +196,64 @@ enablePasswordDB: true t.Logf("User lookup successful: rawID=%s, connectorID=%s", rawID, connID) } + +func TestNewProvider_ContinueOnConnectorFailure(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-connector-failure-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &Config{ + Issuer: "http://localhost:5556/dex", + Port: 5556, + DataDir: tmpDir, + } + + provider, err := NewProvider(ctx, config) + require.NoError(t, err) + defer func() { _ = provider.Stop(ctx) }() + + // The provider should have started successfully even though + // ContinueOnConnectorFailure is an internal Dex config field. + // We verify the provider is functional by performing a basic operation. + assert.NotNil(t, provider.dexServer) + assert.NotNil(t, provider.storage) +} + +func TestBuildDexConfig_ContinueOnConnectorFailure(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "dex-build-config-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + ctx := context.Background() + stor, err := yamlConfig.Storage.OpenStorage(slog.New(slog.NewTextHandler(os.Stderr, nil))) + require.NoError(t, err) + defer stor.Close() + + err = initializeStorage(ctx, stor, yamlConfig) + require.NoError(t, err) + + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + cfg := buildDexConfig(yamlConfig, stor, logger) + + assert.True(t, cfg.ContinueOnConnectorFailure, + "buildDexConfig must set ContinueOnConnectorFailure to true so management starts even if an external IdP is down") +} From e49c0e88622eb4f8b63d389aac783ce8b72eb1f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Diego=20Nogu=C3=AAs?= <49420+diegocn@users.noreply.github.com> Date: Tue, 17 Feb 2026 17:37:44 +0100 Subject: [PATCH 138/374] [infrastructure] Proxy infra changes (#5365) * chore: remove docker extra_hosts settings * chore: remove unnecessary envc from proxy.env --- infrastructure_files/getting-started.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index dc5d53504..7fd87ee8e 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -577,9 +577,6 @@ render_docker_compose_traefik_builtin() { proxy: image: $NETBIRD_PROXY_IMAGE container_name: netbird-proxy - # Hairpin NAT fix: route domain back to traefik's static IP within Docker - extra_hosts: - - \"$NETBIRD_DOMAIN:$TRAEFIK_IP\" ports: - 51820:51820/udp restart: unless-stopped @@ -822,9 +819,6 @@ NB_PROXY_TOKEN=$PROXY_TOKEN NB_PROXY_CERTIFICATE_DIRECTORY=/certs NB_PROXY_ACME_CERTIFICATES=true NB_PROXY_ACME_CHALLENGE_TYPE=tls-alpn-01 -NB_PROXY_OIDC_CLIENT_ID=netbird-proxy -NB_PROXY_OIDC_ENDPOINT=$NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/oauth2 -NB_PROXY_OIDC_SCOPES=openid,profile,email NB_PROXY_FORWARDED_PROTO=https # Enable PROXY protocol to preserve client IPs through L4 proxies (Traefik TCP passthrough) NB_PROXY_PROXY_PROTOCOL=true From 2cdab6d7b7264da8a3b5c8b33cd4e9847b3bfced Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 17 Feb 2026 18:04:30 +0100 Subject: [PATCH 139/374] [proxy] remove unused oidc config flags (#5369) --- proxy/cmd/proxy/cmd/root.go | 16 ++-------------- proxy/server.go | 8 ++------ 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index b8960b471..121621109 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -6,14 +6,14 @@ import ( "os" "os/signal" "strconv" - "strings" "syscall" - "github.com/netbirdio/netbird/shared/management/domain" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "golang.org/x/crypto/acme" + "github.com/netbirdio/netbird/shared/management/domain" + "github.com/netbirdio/netbird/proxy" nbacme "github.com/netbirdio/netbird/proxy/internal/acme" "github.com/netbirdio/netbird/util" @@ -46,10 +46,6 @@ var ( debugEndpoint bool debugEndpointAddr string healthAddr string - oidcClientID string - oidcClientSecret string - oidcEndpoint string - oidcScopes string forwardedProto string trustedProxies string certFile string @@ -81,10 +77,6 @@ func init() { rootCmd.Flags().BoolVar(&debugEndpoint, "debug-endpoint", envBoolOrDefault("NB_PROXY_DEBUG_ENDPOINT", false), "Enable debug HTTP endpoint") rootCmd.Flags().StringVar(&debugEndpointAddr, "debug-endpoint-addr", envStringOrDefault("NB_PROXY_DEBUG_ENDPOINT_ADDRESS", "localhost:8444"), "Address for the debug HTTP endpoint") rootCmd.Flags().StringVar(&healthAddr, "health-addr", envStringOrDefault("NB_PROXY_HEALTH_ADDRESS", "localhost:8080"), "Address for the health probe endpoint (liveness/readiness/startup)") - rootCmd.Flags().StringVar(&oidcClientID, "oidc-id", envStringOrDefault("NB_PROXY_OIDC_CLIENT_ID", "netbird-proxy"), "The OAuth2 Client ID for OIDC User Authentication") - rootCmd.Flags().StringVar(&oidcClientSecret, "oidc-secret", envStringOrDefault("NB_PROXY_OIDC_CLIENT_SECRET", ""), "The OAuth2 Client Secret for OIDC User Authentication") - rootCmd.Flags().StringVar(&oidcEndpoint, "oidc-endpoint", envStringOrDefault("NB_PROXY_OIDC_ENDPOINT", ""), "The OIDC Endpoint for OIDC User Authentication") - rootCmd.Flags().StringVar(&oidcScopes, "oidc-scopes", envStringOrDefault("NB_PROXY_OIDC_SCOPES", "openid,profile,email"), "The OAuth2 scopes for OIDC User Authentication, comma separated") rootCmd.Flags().StringVar(&forwardedProto, "forwarded-proto", envStringOrDefault("NB_PROXY_FORWARDED_PROTO", "auto"), "X-Forwarded-Proto value for backends: auto, http, or https") rootCmd.Flags().StringVar(&trustedProxies, "trusted-proxies", envStringOrDefault("NB_PROXY_TRUSTED_PROXIES", ""), "Comma-separated list of trusted upstream proxy CIDR ranges (e.g. '10.0.0.0/8,192.168.1.1')") rootCmd.Flags().StringVar(&certFile, "cert-file", envStringOrDefault("NB_PROXY_CERTIFICATE_FILE", "tls.crt"), "TLS certificate filename within the certificate directory") @@ -159,10 +151,6 @@ func runServer(cmd *cobra.Command, args []string) error { DebugEndpointEnabled: debugEndpoint, DebugEndpointAddress: debugEndpointAddr, HealthAddress: healthAddr, - OIDCClientId: oidcClientID, - OIDCClientSecret: oidcClientSecret, - OIDCEndpoint: oidcEndpoint, - OIDCScopes: strings.Split(oidcScopes, ","), ForwardedProto: forwardedProto, TrustedProxies: parsedTrustedProxies, CertLockMethod: nbacme.CertLockMethod(certLockMethod), diff --git a/proxy/server.go b/proxy/server.go index 52b4972ec..60811e53b 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -23,7 +23,7 @@ import ( "time" "github.com/cenkalti/backoff/v4" - proxyproto "github.com/pires/go-proxyproto" + "github.com/pires/go-proxyproto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" @@ -89,11 +89,7 @@ type Server struct { ACMEChallengeType string // CertLockMethod controls how ACME certificate locks are coordinated // across replicas. Default: CertLockAuto (detect environment). - CertLockMethod acme.CertLockMethod - OIDCClientId string - OIDCClientSecret string - OIDCEndpoint string - OIDCScopes []string + CertLockMethod acme.CertLockMethod // DebugEndpointEnabled enables the debug HTTP endpoint. DebugEndpointEnabled bool From 2dbdb5c1a7628ac9f08fc9559154ebc0e7bc7683 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 17 Feb 2026 19:28:26 +0100 Subject: [PATCH 140/374] [client] Refactor WG endpoint setup with role-based proxy activation (#5277) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Refactor WG endpoint setup with role-based proxy activation For relay connections, the controller (initiator) now activates the wgProxy before configuring the WG endpoint, while the non-controller (responder) configures the endpoint first with a delayed update, then activates the proxy after. This prevents the responder from sending traffic through the proxy before WireGuard is ready to receive it, avoiding handshake congestion when both sides try to initiate simultaneously. For ICE connections, pass hasRelayBackup as the setEndpointNow flag so the responder sets the endpoint immediately when a relay fallback exists (avoiding the delayed update path since relay is already available as backup). On ICE disconnect with relay fallback, remove the duplicate wgProxyRelay.Work() calls — the relay proxy is already active from initial setup, so re-activating it is unnecessary. In EndpointUpdater, split ConfigureWGEndpoint into explicit configureAsInitiator and configureAsResponder paths, and add the setEndpointNow parameter to let the caller control whether the responder applies the endpoint immediately or defers it. Add unused SwitchWGEndpoint and RemoveEndpointAddress methods. Remove the wgConfigWorkaround sleep from the relay setup path. * Fix redundant wgProxyRelay.Work() call during relay fallback setup * Simplify WireGuard endpoint configuration by removing unused parameters and redundant logic --- client/internal/peer/conn.go | 24 ++++++-------- client/internal/peer/endpoint.go | 57 +++++++++++++++++++++++++------- go.mod | 2 +- go.sum | 4 +-- 4 files changed, 58 insertions(+), 29 deletions(-) diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index af6ab3f83..05a397f3d 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -434,14 +434,14 @@ func (conn *Conn) onICEStateDisconnected(sessionChanged bool) { conn.resetEndpoint() } + // todo consider to move after the ConfigureWGEndpoint conn.wgProxyRelay.Work() presharedKey := conn.presharedKey(conn.rosenpassRemoteKey) - if err := conn.endpointUpdater.ConfigureWGEndpoint(conn.wgProxyRelay.EndpointAddr(), presharedKey); err != nil { + if err := conn.endpointUpdater.SwitchWGEndpoint(conn.wgProxyRelay.EndpointAddr(), presharedKey); err != nil { conn.Log.Errorf("failed to switch to relay conn: %v", err) } - conn.wgProxyRelay.Work() conn.currentConnPriority = conntype.Relay } else { conn.Log.Infof("ICE disconnected, do not switch to Relay. Reset priority to: %s", conntype.None.String()) @@ -503,20 +503,22 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { return } - wgProxy.Work() - presharedKey := conn.presharedKey(rci.rosenpassPubKey) + controller := isController(conn.config) + if controller { + wgProxy.Work() + } conn.enableWgWatcherIfNeeded() - - if err := conn.endpointUpdater.ConfigureWGEndpoint(wgProxy.EndpointAddr(), presharedKey); err != nil { + if err := conn.endpointUpdater.ConfigureWGEndpoint(wgProxy.EndpointAddr(), conn.presharedKey(rci.rosenpassPubKey)); err != nil { if err := wgProxy.CloseConn(); err != nil { conn.Log.Warnf("Failed to close relay connection: %v", err) } conn.Log.Errorf("Failed to update WireGuard peer configuration: %v", err) return } - - wgConfigWorkaround() + if !controller { + wgProxy.Work() + } conn.rosenpassRemoteKey = rci.rosenpassPubKey conn.currentConnPriority = conntype.Relay conn.statusRelay.SetConnected() @@ -877,9 +879,3 @@ func isController(config ConnConfig) bool { func isRosenpassEnabled(remoteRosenpassPubKey []byte) bool { return remoteRosenpassPubKey != nil } - -// wgConfigWorkaround is a workaround for the issue with WireGuard configuration update -// When update a peer configuration in near to each other time, the second update can be ignored by WireGuard -func wgConfigWorkaround() { - time.Sleep(100 * time.Millisecond) -} diff --git a/client/internal/peer/endpoint.go b/client/internal/peer/endpoint.go index 372f33ec6..9ba1efb6e 100644 --- a/client/internal/peer/endpoint.go +++ b/client/internal/peer/endpoint.go @@ -34,28 +34,27 @@ func NewEndpointUpdater(log *logrus.Entry, wgConfig WgConfig, initiator bool) *E } } -// ConfigureWGEndpoint sets up the WireGuard endpoint configuration. -// The initiator immediately configures the endpoint, while the non-initiator -// waits for a fallback period before configuring to avoid handshake congestion. func (e *EndpointUpdater) ConfigureWGEndpoint(addr *net.UDPAddr, presharedKey *wgtypes.Key) error { e.mu.Lock() defer e.mu.Unlock() if e.initiator { - e.log.Debugf("configure up WireGuard as initiatr") - return e.updateWireGuardPeer(addr, presharedKey) + e.log.Debugf("configure up WireGuard as initiator") + return e.configureAsInitiator(addr, presharedKey) } + e.log.Debugf("configure up WireGuard as responder") + return e.configureAsResponder(addr, presharedKey) +} + +func (e *EndpointUpdater) SwitchWGEndpoint(addr *net.UDPAddr, presharedKey *wgtypes.Key) error { + e.mu.Lock() + defer e.mu.Unlock() + // prevent to run new update while cancel the previous update e.waitForCloseTheDelayedUpdate() - var ctx context.Context - ctx, e.cancelFunc = context.WithCancel(context.Background()) - e.updateWg.Add(1) - go e.scheduleDelayedUpdate(ctx, addr, presharedKey) - - e.log.Debugf("configure up WireGuard and wait for handshake") - return e.updateWireGuardPeer(nil, presharedKey) + return e.updateWireGuardPeer(addr, presharedKey) } func (e *EndpointUpdater) RemoveWgPeer() error { @@ -67,9 +66,37 @@ func (e *EndpointUpdater) RemoveWgPeer() error { } func (e *EndpointUpdater) RemoveEndpointAddress() error { + e.mu.Lock() + defer e.mu.Unlock() + + e.waitForCloseTheDelayedUpdate() return e.wgConfig.WgInterface.RemoveEndpointAddress(e.wgConfig.RemoteKey) } +func (e *EndpointUpdater) configureAsInitiator(addr *net.UDPAddr, presharedKey *wgtypes.Key) error { + if err := e.updateWireGuardPeer(addr, presharedKey); err != nil { + return err + } + return nil +} + +func (e *EndpointUpdater) configureAsResponder(addr *net.UDPAddr, presharedKey *wgtypes.Key) error { + // prevent to run new update while cancel the previous update + e.waitForCloseTheDelayedUpdate() + + e.log.Debugf("configure up WireGuard and wait for handshake") + var ctx context.Context + ctx, e.cancelFunc = context.WithCancel(context.Background()) + e.updateWg.Add(1) + go e.scheduleDelayedUpdate(ctx, addr, presharedKey) + + if err := e.updateWireGuardPeer(nil, presharedKey); err != nil { + e.waitForCloseTheDelayedUpdate() + return err + } + return nil +} + func (e *EndpointUpdater) waitForCloseTheDelayedUpdate() { if e.cancelFunc == nil { return @@ -105,3 +132,9 @@ func (e *EndpointUpdater) updateWireGuardPeer(endpoint *net.UDPAddr, presharedKe presharedKey, ) } + +// wgConfigWorkaround is a workaround for the issue with WireGuard configuration update +// When update a peer configuration in near to each other time, the second update can be ignored by WireGuard +func wgConfigWorkaround() { + time.Sleep(100 * time.Millisecond) +} diff --git a/go.mod b/go.mod index 4a8bc3f2b..81765714a 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/c-robinson/iplib v1.0.3 github.com/caddyserver/certmagic v0.21.3 github.com/cilium/ebpf v0.15.0 - github.com/coder/websocket v1.8.13 + github.com/coder/websocket v1.8.14 github.com/coreos/go-iptables v0.7.0 github.com/coreos/go-oidc/v3 v3.14.1 github.com/creack/pty v1.1.24 diff --git a/go.sum b/go.sum index 2a9ad6d70..16cc1af7c 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= -github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= -github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g= +github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= From e9b2a6e80892ade6925e156690f86e758d42ceee Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 17 Feb 2026 19:53:14 +0100 Subject: [PATCH 141/374] [managment] add flag to disable the old legacy grpc endpoint (#5372) --- combined/cmd/root.go | 20 ++++++----- management/cmd/management.go | 18 +++++++--- management/cmd/root.go | 32 +++++++++-------- management/internals/server/server.go | 51 +++++++++++++++++---------- 4 files changed, 75 insertions(+), 46 deletions(-) diff --git a/combined/cmd/root.go b/combined/cmd/root.go index 0ec0e9480..b8ea7064c 100644 --- a/combined/cmd/root.go +++ b/combined/cmd/root.go @@ -488,15 +488,17 @@ func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (* mgmtPort, _ := strconv.Atoi(portStr) mgmtSrv := mgmtServer.NewServer( - mgmtConfig, - dnsDomain, - singleAccModeDomain, - mgmtPort, - cfg.Server.MetricsPort, - mgmt.DisableAnonymousMetrics, - mgmt.DisableGeoliteUpdate, - // Always enable user deletion from IDP in combined server (embedded IdP is always enabled) - true, + &mgmtServer.Config{ + NbConfig: mgmtConfig, + DNSDomain: dnsDomain, + MgmtSingleAccModeDomain: singleAccModeDomain, + MgmtPort: mgmtPort, + MgmtMetricsPort: cfg.Server.MetricsPort, + DisableMetrics: mgmt.DisableAnonymousMetrics, + DisableGeoliteUpdate: mgmt.DisableGeoliteUpdate, + // Always enable user deletion from IDP in combined server (embedded IdP is always enabled) + UserDeleteFromIDPEnabled: true, + }, ) return mgmtSrv, nil diff --git a/management/cmd/management.go b/management/cmd/management.go index a4dc54550..27d8055e7 100644 --- a/management/cmd/management.go +++ b/management/cmd/management.go @@ -29,11 +29,11 @@ import ( "github.com/netbirdio/netbird/util/crypt" ) -var newServer = func(config *nbconfig.Config, dnsDomain, mgmtSingleAccModeDomain string, mgmtPort int, mgmtMetricsPort int, disableMetrics, disableGeoliteUpdate, userDeleteFromIDPEnabled bool) server.Server { - return server.NewServer(config, dnsDomain, mgmtSingleAccModeDomain, mgmtPort, mgmtMetricsPort, disableMetrics, disableGeoliteUpdate, userDeleteFromIDPEnabled) +var newServer = func(cfg *server.Config) server.Server { + return server.NewServer(cfg) } -func SetNewServer(fn func(config *nbconfig.Config, dnsDomain, mgmtSingleAccModeDomain string, mgmtPort int, mgmtMetricsPort int, disableMetrics, disableGeoliteUpdate, userDeleteFromIDPEnabled bool) server.Server) { +func SetNewServer(fn func(*server.Config) server.Server) { newServer = fn } @@ -110,7 +110,17 @@ var ( mgmtSingleAccModeDomain = "" } - srv := newServer(config, dnsDomain, mgmtSingleAccModeDomain, mgmtPort, mgmtMetricsPort, disableMetrics, disableGeoliteUpdate, userDeleteFromIDPEnabled) + srv := newServer(&server.Config{ + NbConfig: config, + DNSDomain: dnsDomain, + MgmtSingleAccModeDomain: mgmtSingleAccModeDomain, + MgmtPort: mgmtPort, + MgmtMetricsPort: mgmtMetricsPort, + DisableLegacyManagementPort: disableLegacyManagementPort, + DisableMetrics: disableMetrics, + DisableGeoliteUpdate: disableGeoliteUpdate, + UserDeleteFromIDPEnabled: userDeleteFromIDPEnabled, + }) go func() { if err := srv.Start(cmd.Context()); err != nil { log.Fatalf("Server error: %v", err) diff --git a/management/cmd/root.go b/management/cmd/root.go index 3cb2bceb6..fc43d315d 100644 --- a/management/cmd/root.go +++ b/management/cmd/root.go @@ -16,21 +16,22 @@ const ( ) var ( - dnsDomain string - mgmtDataDir string - logLevel string - logFile string - disableMetrics bool - disableSingleAccMode bool - disableGeoliteUpdate bool - idpSignKeyRefreshEnabled bool - userDeleteFromIDPEnabled bool - mgmtPort int - mgmtMetricsPort int - mgmtLetsencryptDomain string - mgmtSingleAccModeDomain string - certFile string - certKey string + dnsDomain string + mgmtDataDir string + logLevel string + logFile string + disableMetrics bool + disableSingleAccMode bool + disableGeoliteUpdate bool + idpSignKeyRefreshEnabled bool + userDeleteFromIDPEnabled bool + mgmtPort int + mgmtMetricsPort int + disableLegacyManagementPort bool + mgmtLetsencryptDomain string + mgmtSingleAccModeDomain string + certFile string + certKey string rootCmd = &cobra.Command{ Use: "netbird-mgmt", @@ -55,6 +56,7 @@ func Execute() error { func init() { mgmtCmd.Flags().IntVar(&mgmtPort, "port", 80, "server port to listen on (defaults to 443 if TLS is enabled, 80 otherwise") + mgmtCmd.Flags().BoolVar(&disableLegacyManagementPort, "disable-legacy-port", false, "disabling the old legacy port (33073)") mgmtCmd.Flags().IntVar(&mgmtMetricsPort, "metrics-port", 9090, "metrics endpoint http port. Metrics are accessible under host:metrics-port/metrics") mgmtCmd.Flags().StringVar(&mgmtDataDir, "datadir", defaultMgmtDataDir, "server data directory location") mgmtCmd.Flags().StringVar(&nbconfig.MgmtConfigPath, "config", defaultMgmtConfig, "Netbird config file location. Config params specified via command line (e.g. datadir) have a precedence over configuration from this file") diff --git a/management/internals/server/server.go b/management/internals/server/server.go index 55c7a271f..3f7f9c4c0 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -50,13 +50,14 @@ type BaseServer struct { // AfterInit is a function that will be called after the server is initialized afterInit []func(s *BaseServer) - disableMetrics bool - dnsDomain string - disableGeoliteUpdate bool - userDeleteFromIDPEnabled bool - mgmtSingleAccModeDomain string - mgmtMetricsPort int - mgmtPort int + disableMetrics bool + dnsDomain string + disableGeoliteUpdate bool + userDeleteFromIDPEnabled bool + mgmtSingleAccModeDomain string + mgmtMetricsPort int + mgmtPort int + disableLegacyManagementPort bool proxyAuthClose func() @@ -69,18 +70,32 @@ type BaseServer struct { cancel context.CancelFunc } +// Config holds the configuration parameters for creating a new server +type Config struct { + NbConfig *nbconfig.Config + DNSDomain string + MgmtSingleAccModeDomain string + MgmtPort int + MgmtMetricsPort int + DisableLegacyManagementPort bool + DisableMetrics bool + DisableGeoliteUpdate bool + UserDeleteFromIDPEnabled bool +} + // NewServer initializes and configures a new Server instance -func NewServer(config *nbconfig.Config, dnsDomain, mgmtSingleAccModeDomain string, mgmtPort, mgmtMetricsPort int, disableMetrics, disableGeoliteUpdate, userDeleteFromIDPEnabled bool) *BaseServer { +func NewServer(cfg *Config) *BaseServer { return &BaseServer{ - Config: config, - container: make(map[string]any), - dnsDomain: dnsDomain, - mgmtSingleAccModeDomain: mgmtSingleAccModeDomain, - disableMetrics: disableMetrics, - disableGeoliteUpdate: disableGeoliteUpdate, - userDeleteFromIDPEnabled: userDeleteFromIDPEnabled, - mgmtPort: mgmtPort, - mgmtMetricsPort: mgmtMetricsPort, + Config: cfg.NbConfig, + container: make(map[string]any), + dnsDomain: cfg.DNSDomain, + mgmtSingleAccModeDomain: cfg.MgmtSingleAccModeDomain, + disableMetrics: cfg.DisableMetrics, + disableGeoliteUpdate: cfg.DisableGeoliteUpdate, + userDeleteFromIDPEnabled: cfg.UserDeleteFromIDPEnabled, + mgmtPort: cfg.MgmtPort, + disableLegacyManagementPort: cfg.DisableLegacyManagementPort, + mgmtMetricsPort: cfg.MgmtMetricsPort, } } @@ -152,7 +167,7 @@ func (s *BaseServer) Start(ctx context.Context) error { } var compatListener net.Listener - if s.mgmtPort != ManagementLegacyPort { + if s.mgmtPort != ManagementLegacyPort && !s.disableLegacyManagementPort { // The Management gRPC server was running on port 33073 previously. Old agents that are already connected to it // are using port 33073. For compatibility purposes we keep running a 2nd gRPC server on port 33073. compatListener, err = s.serveGRPC(srvCtx, s.GRPCServer(), ManagementLegacyPort) From 318cf59d660ef6195f86b8982d38acb891c0beb6 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 18 Feb 2026 10:58:14 +0100 Subject: [PATCH 142/374] [relay] reduce QUIC initial packet size to 1280 (IPv6 min MTU) (#5374) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [relay] reduce QUIC initial packet size to 1280 (IPv6 min MTU) * adjust QUIC initial packet size to 1232 based on RFC 9000 §14 --- relay/server/listener/quic/listener.go | 3 ++- shared/relay/client/dialer/quic/quic.go | 3 ++- shared/relay/constants.go | 5 +++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/relay/server/listener/quic/listener.go b/relay/server/listener/quic/listener.go index d3160a44e..797223e74 100644 --- a/relay/server/listener/quic/listener.go +++ b/relay/server/listener/quic/listener.go @@ -11,6 +11,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/relay/protocol" + nbRelay "github.com/netbirdio/netbird/shared/relay" ) const Proto protocol.Protocol = "quic" @@ -27,7 +28,7 @@ type Listener struct { func (l *Listener) Listen(acceptFn func(conn net.Conn)) error { quicCfg := &quic.Config{ EnableDatagrams: true, - InitialPacketSize: 1452, + InitialPacketSize: nbRelay.QUICInitialPacketSize, } listener, err := quic.ListenAddr(l.Address, l.TLSConfig, quicCfg) if err != nil { diff --git a/shared/relay/client/dialer/quic/quic.go b/shared/relay/client/dialer/quic/quic.go index c057ef089..78462837d 100644 --- a/shared/relay/client/dialer/quic/quic.go +++ b/shared/relay/client/dialer/quic/quic.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" nbnet "github.com/netbirdio/netbird/client/net" + nbRelay "github.com/netbirdio/netbird/shared/relay" quictls "github.com/netbirdio/netbird/shared/relay/tls" ) @@ -42,7 +43,7 @@ func (d Dialer) Dial(ctx context.Context, address string) (net.Conn, error) { KeepAlivePeriod: 30 * time.Second, MaxIdleTimeout: 4 * time.Minute, EnableDatagrams: true, - InitialPacketSize: 1452, + InitialPacketSize: nbRelay.QUICInitialPacketSize, } udpConn, err := nbnet.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) diff --git a/shared/relay/constants.go b/shared/relay/constants.go index 0f2a27610..fc0545dd5 100644 --- a/shared/relay/constants.go +++ b/shared/relay/constants.go @@ -3,4 +3,9 @@ package relay const ( // WebSocketURLPath is the path for the websocket relay connection WebSocketURLPath = "/relay" + + // QUICInitialPacketSize is the conservative initial QUIC packet size (bytes) + // for unknown-path PMTU, per RFC 9000 §14: 1280 (IPv6 min MTU) − 40 (IPv6 + // header) − 8 (UDP header) = 1232. DPLPMTUD may probe larger sizes later. + QUICInitialPacketSize = 1232 ) From bbca74476e9e5c6df662b706927a189f96c2139e Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 18 Feb 2026 16:11:17 +0100 Subject: [PATCH 143/374] [management] docker login on management tests (#5323) --- .github/workflows/golang-test-linux.yml | 37 +++++++++++++------ go.mod | 19 +++++----- go.sum | 45 +++++++++++------------ management/server/cache/store_test.go | 4 +- management/server/store/sql_store_test.go | 3 ++ management/server/testutil/store.go | 10 ++--- 6 files changed, 66 insertions(+), 52 deletions(-) diff --git a/.github/workflows/golang-test-linux.yml b/.github/workflows/golang-test-linux.yml index 3c4674fc6..450c44aea 100644 --- a/.github/workflows/golang-test-linux.yml +++ b/.github/workflows/golang-test-linux.yml @@ -409,12 +409,19 @@ jobs: run: git --no-pager diff --exit-code - name: Login to Docker hub - if: matrix.store == 'mysql' && (github.repository == github.head.repo.full_name || !github.head_ref) - uses: docker/login-action@v1 + if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_TOKEN }} + - name: docker login for root user + if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref + env: + DOCKER_USER: ${{ secrets.DOCKER_USER }} + DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }} + run: echo "$DOCKER_TOKEN" | sudo docker login --username "$DOCKER_USER" --password-stdin + - name: download mysql image if: matrix.store == 'mysql' run: docker pull mlsmaycon/warmed-mysql:8 @@ -497,15 +504,18 @@ jobs: run: git --no-pager diff --exit-code - name: Login to Docker hub - if: matrix.store == 'mysql' && (github.repository == github.head.repo.full_name || !github.head_ref) - uses: docker/login-action@v1 + if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_TOKEN }} - - name: download mysql image - if: matrix.store == 'mysql' - run: docker pull mlsmaycon/warmed-mysql:8 + - name: docker login for root user + if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref + env: + DOCKER_USER: ${{ secrets.DOCKER_USER }} + DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }} + run: echo "$DOCKER_TOKEN" | sudo docker login --username "$DOCKER_USER" --password-stdin - name: Test run: | @@ -586,15 +596,18 @@ jobs: run: git --no-pager diff --exit-code - name: Login to Docker hub - if: matrix.store == 'mysql' && (github.repository == github.head.repo.full_name || !github.head_ref) - uses: docker/login-action@v1 + if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_TOKEN }} - - name: download mysql image - if: matrix.store == 'mysql' - run: docker pull mlsmaycon/warmed-mysql:8 + - name: docker login for root user + if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref + env: + DOCKER_USER: ${{ secrets.DOCKER_USER }} + DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }} + run: echo "$DOCKER_TOKEN" | sudo docker login --username "$DOCKER_USER" --password-stdin - name: Test run: | diff --git a/go.mod b/go.mod index 81765714a..4bcdbdc78 100644 --- a/go.mod +++ b/go.mod @@ -93,10 +93,10 @@ require ( github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 github.com/stretchr/testify v1.11.1 - github.com/testcontainers/testcontainers-go v0.31.0 - github.com/testcontainers/testcontainers-go/modules/mysql v0.31.0 - github.com/testcontainers/testcontainers-go/modules/postgres v0.31.0 - github.com/testcontainers/testcontainers-go/modules/redis v0.31.0 + github.com/testcontainers/testcontainers-go v0.37.0 + github.com/testcontainers/testcontainers-go/modules/mysql v0.37.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0 + github.com/testcontainers/testcontainers-go/modules/redis v0.37.0 github.com/things-go/go-socks5 v0.0.4 github.com/ti-mo/conntrack v0.5.1 github.com/ti-mo/netfilter v0.5.2 @@ -142,7 +142,6 @@ require ( github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/awnumar/memcall v0.4.0 // indirect @@ -166,16 +165,16 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/containerd v1.7.29 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v26.1.5+incompatible // indirect + github.com/docker/docker v28.0.1+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/ebitengine/purego v0.8.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fredbi/uri v1.1.1 // indirect github.com/fyne-io/gl-js v0.2.0 // indirect @@ -221,9 +220,10 @@ require ( github.com/lib/pq v1.10.9 // indirect github.com/libdns/libdns v0.2.2 // indirect github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect - github.com/magiconair/properties v1.8.7 // indirect + github.com/magiconair/properties v1.8.10 // indirect github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect github.com/mattn/go-sqlite3 v1.14.32 // indirect + github.com/mdelapenya/tlscert v0.2.0 // indirect github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect github.com/mholt/acmez/v2 v2.0.1 // indirect @@ -242,7 +242,7 @@ require ( github.com/nxadm/tail v1.4.8 // indirect github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pion/dtls/v2 v2.2.10 // indirect github.com/pion/dtls/v3 v3.0.9 // indirect github.com/pion/mdns/v2 v2.0.7 // indirect @@ -256,6 +256,7 @@ require ( github.com/prometheus/procfs v0.16.1 // indirect github.com/russellhaering/goxmldsig v1.5.0 // indirect github.com/rymdport/portal v0.4.2 // indirect + github.com/shirou/gopsutil/v4 v4.25.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cast v1.7.0 // indirect diff --git a/go.sum b/go.sum index 16cc1af7c..1bd9396bb 100644 --- a/go.sum +++ b/go.sum @@ -33,8 +33,6 @@ github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= -github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible h1:hqcTK6ZISdip65SR792lwYJTa/axESA0889D3UlZbLo= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible/go.mod h1:6B1nuc1MUs6c62ODZDl7hVE5Pv7O2XGSkgg2olnq34I= @@ -109,8 +107,6 @@ github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g= github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= -github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= -github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= @@ -135,12 +131,14 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= -github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0= +github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= +github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/eko/gocache/lib/v4 v4.2.0 h1:MNykyi5Xw+5Wu3+PUrvtOCaKSZM1nUSVftbzmeC7Yuw= github.com/eko/gocache/lib/v4 v4.2.0/go.mod h1:7ViVmbU+CzDHzRpmB4SXKyyzyuJ8A3UW3/cszpcqB4M= github.com/eko/gocache/store/go_cache/v4 v4.2.2 h1:tAI9nl6TLoJyKG1ujF0CS0n/IgTEMl+NivxtR5R3/hw= @@ -195,8 +193,6 @@ github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3yg github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= @@ -357,13 +353,15 @@ github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81/go.mod h1:RD8ML/Y github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= +github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= @@ -437,13 +435,12 @@ github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/petermattis/goid v0.0.0-20250303134427-723919f7f203 h1:E7Kmf11E4K7B5hDti2K2NqPb1nlYlGYsu02S1JNd/Bs= @@ -513,6 +510,8 @@ github.com/rymdport/portal v0.4.2 h1:7jKRSemwlTyVHHrTGgQg7gmNPJs88xkbKcIL3NlcmSU github.com/rymdport/portal v0.4.2/go.mod h1:kFF4jslnJ8pD5uCi17brj/ODlfIidOxlgUDTO5ncnC4= github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8= +github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs= +github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -554,14 +553,14 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U= -github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI= -github.com/testcontainers/testcontainers-go/modules/mysql v0.31.0 h1:790+S8ewZYCbG+o8IiFlZ8ZZ33XbNO6zV9qhU6xhlRk= -github.com/testcontainers/testcontainers-go/modules/mysql v0.31.0/go.mod h1:REFmO+lSG9S6uSBEwIMZCxeI36uhScjTwChYADeO3JA= -github.com/testcontainers/testcontainers-go/modules/postgres v0.31.0 h1:isAwFS3KNKRbJMbWv+wolWqOFUECmjYZ+sIRZCIBc/E= -github.com/testcontainers/testcontainers-go/modules/postgres v0.31.0/go.mod h1:ZNYY8vumNCEG9YI59A9d6/YaMY49uwRhmeU563EzFGw= -github.com/testcontainers/testcontainers-go/modules/redis v0.31.0 h1:5X6GhOdLwV86zcW8sxppJAMtsDC9u+r9tb3biBc9GKs= -github.com/testcontainers/testcontainers-go/modules/redis v0.31.0/go.mod h1:dKi5xBwy1k4u8yb3saQHu7hMEJwewHXxzbcMAuLiA6o= +github.com/testcontainers/testcontainers-go v0.37.0 h1:L2Qc0vkTw2EHWQ08djon0D2uw7Z/PtHS/QzZZ5Ra/hg= +github.com/testcontainers/testcontainers-go v0.37.0/go.mod h1:QPzbxZhQ6Bclip9igjLFj6z0hs01bU8lrl2dHQmgFGM= +github.com/testcontainers/testcontainers-go/modules/mysql v0.37.0 h1:LqUos1oR5iuuzorFnSvxsHNdYdCHB/DfI82CuT58wbI= +github.com/testcontainers/testcontainers-go/modules/mysql v0.37.0/go.mod h1:vHEEHx5Kf+uq5hveaVAMrTzPY8eeRZcKcl23MRw5Tkc= +github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0 h1:hsVwFkS6s+79MbKEO+W7A1wNIw1fmkMtF4fg83m6kbc= +github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0/go.mod h1:Qj/eGbRbO/rEYdcRLmN+bEojzatP/+NS1y8ojl2PQsc= +github.com/testcontainers/testcontainers-go/modules/redis v0.37.0 h1:9HIY28I9ME/Zmb+zey1p/I1mto5+5ch0wLX+nJdOsQ4= +github.com/testcontainers/testcontainers-go/modules/redis v0.37.0/go.mod h1:Abu9g/25Qv+FkYVx3U4Voaynou1c+7D0HIhaQJXvk6E= github.com/things-go/go-socks5 v0.0.4 h1:jMQjIc+qhD4z9cITOMnBiwo9dDmpGuXmBlkRFrl/qD0= github.com/things-go/go-socks5 v0.0.4/go.mod h1:sh4K6WHrmHZpjxLTCHyYtXYH8OUuD+yZun41NomR1IQ= github.com/ti-mo/conntrack v0.5.1 h1:opEwkFICnDbQc0BUXl73PHBK0h23jEIFVjXsqvF4GY0= @@ -851,7 +850,7 @@ gorm.io/driver/sqlite v1.5.7/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDa gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c h1:pfzmXIkkDgydR4ZRP+e1hXywZfYR21FA0Fbk6ptMkiA= gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c/go.mod h1:/mc6CfwbOm5KKmqoV7Qx20Q+Ja8+vO4g7FuCdlVoAfQ= diff --git a/management/server/cache/store_test.go b/management/server/cache/store_test.go index 1b64fd70d..b869170f0 100644 --- a/management/server/cache/store_test.go +++ b/management/server/cache/store_test.go @@ -7,8 +7,6 @@ import ( "github.com/eko/gocache/lib/v4/store" "github.com/redis/go-redis/v9" - "github.com/testcontainers/testcontainers-go" - testcontainersredis "github.com/testcontainers/testcontainers-go/modules/redis" "github.com/netbirdio/netbird/management/server/cache" @@ -50,7 +48,7 @@ func TestRedisStoreConnectionFailure(t *testing.T) { func TestRedisStoreConnectionSuccess(t *testing.T) { ctx := context.Background() - redisContainer, err := testcontainersredis.RunContainer(ctx, testcontainers.WithImage("redis:7")) + redisContainer, err := testcontainersredis.Run(ctx, "redis:7") if err != nil { t.Fatalf("couldn't start redis container: %s", err) } diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 7cf42c4e8..bafa63580 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -1360,6 +1360,9 @@ func TestSqlStore_GetGroupsByIDs(t *testing.T) { } func TestSqlStore_CreateGroup(t *testing.T) { + if os.Getenv("CI") == "true" { + t.Log("Skipping MySQL test on CI") + } t.Setenv("NETBIRD_STORE_ENGINE", string(types.MysqlStoreEngine)) store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) t.Cleanup(cleanup) diff --git a/management/server/testutil/store.go b/management/server/testutil/store.go index f92153399..07699e2c3 100644 --- a/management/server/testutil/store.go +++ b/management/server/testutil/store.go @@ -32,8 +32,8 @@ func CreateMysqlTestContainer() (func(), string, error) { } var err error - mysqlContainer, err = mysql.RunContainer(ctx, - testcontainers.WithImage("mlsmaycon/warmed-mysql:8"), + mysqlContainer, err = mysql.Run(ctx, + "mlsmaycon/warmed-mysql:8", mysql.WithDatabase("testing"), mysql.WithUsername("root"), mysql.WithPassword("testing"), @@ -78,8 +78,8 @@ func CreatePostgresTestContainer() (func(), string, error) { } var err error - pgContainer, err = postgres.RunContainer(ctx, - testcontainers.WithImage("postgres:16-alpine"), + pgContainer, err = postgres.Run(ctx, + "postgres:16-alpine", postgres.WithDatabase("netbird"), postgres.WithUsername("root"), postgres.WithPassword("netbird"), @@ -120,7 +120,7 @@ func noOpCleanup() { func CreateRedisTestContainer() (func(), string, error) { ctx := context.Background() - redisContainer, err := testcontainersredis.RunContainer(ctx, testcontainers.WithImage("redis:7")) + redisContainer, err := testcontainersredis.Run(ctx, "redis:7") if err != nil { return nil, "", err } From d1ead2265ba114e84ce9fec04e9abf01525cecc8 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 18 Feb 2026 19:14:09 +0100 Subject: [PATCH 144/374] [client] Batch macOS DNS domains to avoid truncation (#5368) * [client] Batch macOS DNS domains across multiple scutil keys to avoid truncation scutil has undocumented limits: 99-element cap on d.add arrays and ~2048 byte value buffer for SupplementalMatchDomains. Users with 60+ domains hit silent domain loss. This applies the same batching approach used on Windows (nrptMaxDomainsPerRule=50), splitting domains into indexed resolver keys (NetBird-Match-0, NetBird-Match-1, etc.) with 50-element and 1500-byte limits per key. * check for all keys on getRemovableKeysWithDefaults * use multi error --- client/internal/dns/host_darwin.go | 171 +++++++++++++---- client/internal/dns/host_darwin_test.go | 238 +++++++++++++++++++++++- 2 files changed, 360 insertions(+), 49 deletions(-) diff --git a/client/internal/dns/host_darwin.go b/client/internal/dns/host_darwin.go index af84c8a85..b3908f163 100644 --- a/client/internal/dns/host_darwin.go +++ b/client/internal/dns/host_darwin.go @@ -14,6 +14,8 @@ import ( "strings" "sync" + "github.com/hashicorp/go-multierror" + nberrors "github.com/netbirdio/netbird/client/errors" log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" @@ -22,6 +24,7 @@ import ( const ( netbirdDNSStateKeyFormat = "State:/Network/Service/NetBird-%s/DNS" + netbirdDNSStateKeyIndexedFormat = "State:/Network/Service/NetBird-%s-%d/DNS" globalIPv4State = "State:/Network/Global/IPv4" primaryServiceStateKeyFormat = "State:/Network/Service/%s/DNS" keySupplementalMatchDomains = "SupplementalMatchDomains" @@ -35,6 +38,14 @@ const ( searchSuffix = "Search" matchSuffix = "Match" localSuffix = "Local" + + // maxDomainsPerResolverEntry is the max number of domains per scutil resolver key. + // scutil's d.add has maxArgs=101 (key + * + 99 values), so 99 is the hard cap. + maxDomainsPerResolverEntry = 50 + + // maxDomainBytesPerResolverEntry is the max total bytes of domain strings per key. + // scutil has an undocumented ~2048 byte value buffer; we stay well under it. + maxDomainBytesPerResolverEntry = 1500 ) type systemConfigurator struct { @@ -84,28 +95,23 @@ func (s *systemConfigurator) applyDNSConfig(config HostDNSConfig, stateManager * searchDomains = append(searchDomains, strings.TrimSuffix(""+dConf.Domain, ".")) } - matchKey := getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix) - var err error - if len(matchDomains) != 0 { - err = s.addMatchDomains(matchKey, strings.Join(matchDomains, " "), config.ServerIP, config.ServerPort) - } else { - log.Infof("removing match domains from the system") - err = s.removeKeyFromSystemConfig(matchKey) + if err := s.removeKeysContaining(matchSuffix); err != nil { + log.Warnf("failed to remove old match keys: %v", err) } - if err != nil { - return fmt.Errorf("add match domains: %w", err) + if len(matchDomains) != 0 { + if err := s.addBatchedDomains(matchSuffix, matchDomains, config.ServerIP, config.ServerPort, false); err != nil { + return fmt.Errorf("add match domains: %w", err) + } } s.updateState(stateManager) - searchKey := getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix) - if len(searchDomains) != 0 { - err = s.addSearchDomains(searchKey, strings.Join(searchDomains, " "), config.ServerIP, config.ServerPort) - } else { - log.Infof("removing search domains from the system") - err = s.removeKeyFromSystemConfig(searchKey) + if err := s.removeKeysContaining(searchSuffix); err != nil { + log.Warnf("failed to remove old search keys: %v", err) } - if err != nil { - return fmt.Errorf("add search domains: %w", err) + if len(searchDomains) != 0 { + if err := s.addBatchedDomains(searchSuffix, searchDomains, config.ServerIP, config.ServerPort, true); err != nil { + return fmt.Errorf("add search domains: %w", err) + } } s.updateState(stateManager) @@ -149,8 +155,7 @@ func (s *systemConfigurator) restoreHostDNS() error { func (s *systemConfigurator) getRemovableKeysWithDefaults() []string { if len(s.createdKeys) == 0 { - // return defaults for startup calls - return []string{getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix), getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix)} + return s.discoverExistingKeys() } keys := make([]string, 0, len(s.createdKeys)) @@ -160,6 +165,47 @@ func (s *systemConfigurator) getRemovableKeysWithDefaults() []string { return keys } +// discoverExistingKeys probes scutil for all NetBird DNS keys that may exist. +// This handles the case where createdKeys is empty (e.g., state file lost after unclean shutdown). +func (s *systemConfigurator) discoverExistingKeys() []string { + dnsKeys, err := getSystemDNSKeys() + if err != nil { + log.Errorf("failed to get system DNS keys: %v", err) + return nil + } + + var keys []string + + for _, suffix := range []string{searchSuffix, matchSuffix, localSuffix} { + key := getKeyWithInput(netbirdDNSStateKeyFormat, suffix) + if strings.Contains(dnsKeys, key) { + keys = append(keys, key) + } + } + + for _, suffix := range []string{searchSuffix, matchSuffix} { + for i := 0; ; i++ { + key := fmt.Sprintf(netbirdDNSStateKeyIndexedFormat, suffix, i) + if !strings.Contains(dnsKeys, key) { + break + } + keys = append(keys, key) + } + } + + return keys +} + +// getSystemDNSKeys gets all DNS keys +func getSystemDNSKeys() (string, error) { + command := "list .*DNS\nquit\n" + out, err := runSystemConfigCommand(command) + if err != nil { + return "", err + } + return string(out), nil +} + func (s *systemConfigurator) removeKeyFromSystemConfig(key string) error { line := buildRemoveKeyOperation(key) _, err := runSystemConfigCommand(wrapCommand(line)) @@ -184,12 +230,11 @@ func (s *systemConfigurator) addLocalDNS() error { return nil } - if err := s.addSearchDomains( - localKey, - strings.Join(s.systemDNSSettings.Domains, " "), s.systemDNSSettings.ServerIP, s.systemDNSSettings.ServerPort, - ); err != nil { - return fmt.Errorf("add search domains: %w", err) + domainsStr := strings.Join(s.systemDNSSettings.Domains, " ") + if err := s.addDNSState(localKey, domainsStr, s.systemDNSSettings.ServerIP, s.systemDNSSettings.ServerPort, true); err != nil { + return fmt.Errorf("add local dns state: %w", err) } + s.createdKeys[localKey] = struct{}{} return nil } @@ -280,28 +325,77 @@ func (s *systemConfigurator) getOriginalNameservers() []netip.Addr { return slices.Clone(s.origNameservers) } -func (s *systemConfigurator) addSearchDomains(key, domains string, ip netip.Addr, port int) error { - err := s.addDNSState(key, domains, ip, port, true) - if err != nil { - return fmt.Errorf("add dns state: %w", err) +// splitDomainsIntoBatches splits domains into batches respecting both element count and byte size limits. +func splitDomainsIntoBatches(domains []string) [][]string { + if len(domains) == 0 { + return nil } - log.Infof("added %d search domains to the state. Domain list: %s", len(strings.Split(domains, " ")), domains) + var batches [][]string + var current []string + currentBytes := 0 - s.createdKeys[key] = struct{}{} + for _, d := range domains { + domainLen := len(d) + newBytes := currentBytes + domainLen + if currentBytes > 0 { + newBytes++ // space separator + } - return nil + if len(current) > 0 && (len(current) >= maxDomainsPerResolverEntry || newBytes > maxDomainBytesPerResolverEntry) { + batches = append(batches, current) + current = nil + currentBytes = 0 + } + + current = append(current, d) + if currentBytes > 0 { + currentBytes += 1 + domainLen + } else { + currentBytes = domainLen + } + } + + if len(current) > 0 { + batches = append(batches, current) + } + + return batches } -func (s *systemConfigurator) addMatchDomains(key, domains string, dnsServer netip.Addr, port int) error { - err := s.addDNSState(key, domains, dnsServer, port, false) - if err != nil { - return fmt.Errorf("add dns state: %w", err) +// removeKeysContaining removes all created keys that contain the given substring. +func (s *systemConfigurator) removeKeysContaining(suffix string) error { + var toRemove []string + for key := range s.createdKeys { + if strings.Contains(key, suffix) { + toRemove = append(toRemove, key) + } + } + var multiErr *multierror.Error + for _, key := range toRemove { + if err := s.removeKeyFromSystemConfig(key); err != nil { + multiErr = multierror.Append(multiErr, fmt.Errorf("couldn't remove key %s: %w", key, err)) + } + } + return nberrors.FormatErrorOrNil(multiErr) +} + +// addBatchedDomains splits domains into batches and creates indexed scutil keys for each batch. +func (s *systemConfigurator) addBatchedDomains(suffix string, domains []string, ip netip.Addr, port int, enableSearch bool) error { + batches := splitDomainsIntoBatches(domains) + + for i, batch := range batches { + key := fmt.Sprintf(netbirdDNSStateKeyIndexedFormat, suffix, i) + domainsStr := strings.Join(batch, " ") + + if err := s.addDNSState(key, domainsStr, ip, port, enableSearch); err != nil { + return fmt.Errorf("add dns state for batch %d: %w", i, err) + } + + s.createdKeys[key] = struct{}{} } - log.Infof("added %d match domains to the state. Domain list: %s", len(strings.Split(domains, " ")), domains) - - s.createdKeys[key] = struct{}{} + log.Infof("added %d %s domains across %d resolver entries", len(domains), suffix, len(batches)) return nil } @@ -364,7 +458,6 @@ func (s *systemConfigurator) flushDNSCache() error { if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("restart mDNSResponder: %w, output: %s", err, out) } - log.Info("flushed DNS cache") return nil } diff --git a/client/internal/dns/host_darwin_test.go b/client/internal/dns/host_darwin_test.go index 28915de65..94d020c39 100644 --- a/client/internal/dns/host_darwin_test.go +++ b/client/internal/dns/host_darwin_test.go @@ -3,7 +3,10 @@ package dns import ( + "bufio" + "bytes" "context" + "fmt" "net/netip" "os/exec" "path/filepath" @@ -49,17 +52,22 @@ func TestDarwinDNSUncleanShutdownCleanup(t *testing.T) { require.NoError(t, sm.PersistState(context.Background())) - searchKey := getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix) - matchKey := getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix) localKey := getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix) + // Collect all created keys for cleanup verification + createdKeys := make([]string, 0, len(configurator.createdKeys)) + for key := range configurator.createdKeys { + createdKeys = append(createdKeys, key) + } + defer func() { - for _, key := range []string{searchKey, matchKey, localKey} { + for _, key := range createdKeys { _ = removeTestDNSKey(key) } + _ = removeTestDNSKey(localKey) }() - for _, key := range []string{searchKey, matchKey, localKey} { + for _, key := range createdKeys { exists, err := checkDNSKeyExists(key) require.NoError(t, err) if exists { @@ -83,13 +91,223 @@ func TestDarwinDNSUncleanShutdownCleanup(t *testing.T) { err = shutdownState.Cleanup() require.NoError(t, err) - for _, key := range []string{searchKey, matchKey, localKey} { + for _, key := range createdKeys { exists, err := checkDNSKeyExists(key) require.NoError(t, err) assert.False(t, exists, "Key %s should NOT exist after cleanup", key) } } +// generateShortDomains generates domains like a.com, b.com, ..., aa.com, ab.com, etc. +func generateShortDomains(count int) []string { + domains := make([]string, 0, count) + for i := range count { + label := "" + n := i + for { + label = string(rune('a'+n%26)) + label + n = n/26 - 1 + if n < 0 { + break + } + } + domains = append(domains, label+".com") + } + return domains +} + +// generateLongDomains generates domains like subdomain-000.department.organization-name.example.com +func generateLongDomains(count int) []string { + domains := make([]string, 0, count) + for i := range count { + domains = append(domains, fmt.Sprintf("subdomain-%03d.department.organization-name.example.com", i)) + } + return domains +} + +// readDomainsFromKey reads the SupplementalMatchDomains array back from scutil for a given key. +func readDomainsFromKey(t *testing.T, key string) []string { + t.Helper() + + cmd := exec.Command(scutilPath) + cmd.Stdin = strings.NewReader(fmt.Sprintf("open\nshow %s\nquit\n", key)) + out, err := cmd.Output() + require.NoError(t, err, "scutil show should succeed") + + var domains []string + inArray := false + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, "SupplementalMatchDomains") && strings.Contains(line, "") { + inArray = true + continue + } + if inArray { + if line == "}" { + break + } + // lines look like: "0 : a.com" + parts := strings.SplitN(line, " : ", 2) + if len(parts) == 2 { + domains = append(domains, parts[1]) + } + } + } + require.NoError(t, scanner.Err()) + return domains +} + +func TestSplitDomainsIntoBatches(t *testing.T) { + tests := []struct { + name string + domains []string + expectedCount int + checkAllPresent bool + }{ + { + name: "empty", + domains: nil, + expectedCount: 0, + }, + { + name: "under_limit", + domains: generateShortDomains(10), + expectedCount: 1, + checkAllPresent: true, + }, + { + name: "at_element_limit", + domains: generateShortDomains(50), + expectedCount: 1, + checkAllPresent: true, + }, + { + name: "over_element_limit", + domains: generateShortDomains(51), + expectedCount: 2, + checkAllPresent: true, + }, + { + name: "triple_element_limit", + domains: generateShortDomains(150), + expectedCount: 3, + checkAllPresent: true, + }, + { + name: "long_domains_hit_byte_limit", + domains: generateLongDomains(50), + checkAllPresent: true, + }, + { + name: "500_short_domains", + domains: generateShortDomains(500), + expectedCount: 10, + checkAllPresent: true, + }, + { + name: "500_long_domains", + domains: generateLongDomains(500), + checkAllPresent: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + batches := splitDomainsIntoBatches(tc.domains) + + if tc.expectedCount > 0 { + assert.Len(t, batches, tc.expectedCount, "expected %d batches", tc.expectedCount) + } + + // Verify each batch respects limits + for i, batch := range batches { + assert.LessOrEqual(t, len(batch), maxDomainsPerResolverEntry, + "batch %d exceeds element limit", i) + + totalBytes := 0 + for j, d := range batch { + if j > 0 { + totalBytes++ + } + totalBytes += len(d) + } + assert.LessOrEqual(t, totalBytes, maxDomainBytesPerResolverEntry, + "batch %d exceeds byte limit (%d bytes)", i, totalBytes) + } + + if tc.checkAllPresent { + var all []string + for _, batch := range batches { + all = append(all, batch...) + } + assert.Equal(t, tc.domains, all, "all domains should be present in order") + } + }) + } +} + +// TestMatchDomainBatching writes increasing numbers of domains via the batching mechanism +// and verifies all domains are readable across multiple scutil keys. +func TestMatchDomainBatching(t *testing.T) { + if testing.Short() { + t.Skip("skipping scutil integration test in short mode") + } + + testCases := []struct { + name string + count int + generator func(int) []string + }{ + {"short_10", 10, generateShortDomains}, + {"short_50", 50, generateShortDomains}, + {"short_100", 100, generateShortDomains}, + {"short_200", 200, generateShortDomains}, + {"short_500", 500, generateShortDomains}, + {"long_10", 10, generateLongDomains}, + {"long_50", 50, generateLongDomains}, + {"long_100", 100, generateLongDomains}, + {"long_200", 200, generateLongDomains}, + {"long_500", 500, generateLongDomains}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + configurator := &systemConfigurator{ + createdKeys: make(map[string]struct{}), + } + + defer func() { + for key := range configurator.createdKeys { + _ = removeTestDNSKey(key) + } + }() + + domains := tc.generator(tc.count) + err := configurator.addBatchedDomains(matchSuffix, domains, netip.MustParseAddr("100.64.0.1"), 53, false) + require.NoError(t, err) + + batches := splitDomainsIntoBatches(domains) + t.Logf("wrote %d domains across %d batched keys", tc.count, len(batches)) + + // Read back all domains from all batched keys + var got []string + for i := range batches { + key := fmt.Sprintf(netbirdDNSStateKeyIndexedFormat, matchSuffix, i) + exists, err := checkDNSKeyExists(key) + require.NoError(t, err) + require.True(t, exists, "key %s should exist", key) + + got = append(got, readDomainsFromKey(t, key)...) + } + + t.Logf("read back %d/%d domains from %d keys", len(got), tc.count, len(batches)) + assert.Equal(t, tc.count, len(got), "all domains should be readable") + assert.Equal(t, domains, got, "domains should match in order") + }) + } +} + func checkDNSKeyExists(key string) (bool, error) { cmd := exec.Command(scutilPath) cmd.Stdin = strings.NewReader("show " + key + "\nquit\n") @@ -158,15 +376,15 @@ func setupTestConfigurator(t *testing.T) (*systemConfigurator, *statemanager.Man createdKeys: make(map[string]struct{}), } - searchKey := getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix) - matchKey := getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix) - localKey := getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix) - cleanup := func() { _ = sm.Stop(context.Background()) - for _, key := range []string{searchKey, matchKey, localKey} { + for key := range configurator.createdKeys { _ = removeTestDNSKey(key) } + // Also clean up old-format keys and local key in case they exist + _ = removeTestDNSKey(getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix)) + _ = removeTestDNSKey(getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix)) + _ = removeTestDNSKey(getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix)) } return configurator, sm, cleanup From a322dce42af6368bab14a27a3480599f76eab451 Mon Sep 17 00:00:00 2001 From: shuuri-labs <61762328+shuuri-labs@users.noreply.github.com> Date: Wed, 18 Feb 2026 20:59:55 +0100 Subject: [PATCH 145/374] [self-hosted] create migration script for pre v0.65.0 to post v0.65.0 (combined) (#5350) --- infrastructure_files/migrate.sh | 1286 +++++++++++++++++++++++++++++++ 1 file changed, 1286 insertions(+) create mode 100755 infrastructure_files/migrate.sh diff --git a/infrastructure_files/migrate.sh b/infrastructure_files/migrate.sh new file mode 100755 index 000000000..67895fab6 --- /dev/null +++ b/infrastructure_files/migrate.sh @@ -0,0 +1,1286 @@ +#!/bin/bash +# +# NetBird Migration Script: Pre-v0.65.0 → Combined Container Setup +# +# Migrates from the old 5-container deployment (dashboard, signal, relay, management, coturn) +# to the new 2-container setup (Traefik + combined netbird-server). +# +# Supported: Embedded IdP (Dex) setups with embedded Caddy or custom reverse proxy. +# Not supported: External IdP (Auth0, Keycloak, etc.) — use getting-started.sh for fresh setup. +# +# Usage: +# ./migrate.sh [--install-dir /path/to/netbird] [--non-interactive] + +set -euo pipefail + +############################################ +# Constants +############################################ + +readonly SCRIPT_VERSION="1.0.0" +readonly DASHBOARD_IMAGE="netbirdio/dashboard:latest" +readonly NETBIRD_SERVER_IMAGE="netbirdio/netbird-server:latest" +readonly SED_STRIP_PADDING='s/=//g' +readonly MSG_SEPARATOR="==========================================" +readonly PROXY_TYPE_CADDY="caddy_embedded" + +# Colors (disabled if not a terminal) +if [[ -t 1 ]]; then + readonly RED='\033[0;31m' + readonly GREEN='\033[0;32m' + readonly YELLOW='\033[1;33m' + readonly BLUE='\033[0;34m' + readonly NC='\033[0m' +else + readonly RED='' + readonly GREEN='' + readonly YELLOW='' + readonly BLUE='' + readonly NC='' +fi + +############################################ +# Global Variables (set during detection) +############################################ + +INSTALL_DIR="" +NON_INTERACTIVE=false +DOCKER_COMPOSE_CMD="" + +# Detection results +PROXY_TYPE="" # caddy_embedded | traefik | external +IDP_TYPE="" # embedded | external +MGMT_VOLUME="" # detected management volume name +DOMAIN="" +LETSENCRYPT_EMAIL="" +STORE_ENGINE="sqlite" +STORE_DSN="" +ENCRYPTION_KEY="" +RELAY_SECRET="" +SIGNKEY_REFRESH="true" +TRUSTED_PROXIES="" +TRUSTED_PROXIES_COUNT="" +TRUSTED_PEERS="" +MANAGEMENT_JSON_PATH="" +BACKUP_DIR="" + +############################################ +# Utility Functions +############################################ + +log_info() { + local msg="$1" + echo -e "${BLUE}[INFO]${NC} ${msg}" + return 0 +} + +log_warn() { + local msg="$1" + echo -e "${YELLOW}[WARN]${NC} ${msg}" + return 0 +} + +log_error() { + local msg="$1" + echo -e "${RED}[ERROR]${NC} ${msg}" >&2 + return 0 +} + +log_success() { + local msg="$1" + echo -e "${GREEN}[OK]${NC} ${msg}" + return 0 +} + +print_banner() { + echo "" + echo "$MSG_SEPARATOR" + echo " NetBird Migration Tool v${SCRIPT_VERSION}" + echo " Pre-v0.65.0 → Combined Container Setup" + echo "$MSG_SEPARATOR" + echo "" + return 0 +} + +confirm_action() { + local prompt="$1" + if [[ "$NON_INTERACTIVE" == "true" ]]; then + return 0 + fi + echo "" + echo -n "$prompt [y/N]: " + read -r response < /dev/tty + if [[ ! "$response" =~ ^[Yy]$ ]]; then + log_error "Aborted by user." + exit 1 + fi + return 0 +} + +############################################ +# Phase 0: Preflight & Detection +############################################ + +check_dependencies() { + log_info "Checking dependencies..." + + local missing=() + + if ! command -v docker &>/dev/null; then + missing+=("docker") + fi + + if command -v docker-compose &>/dev/null; then + DOCKER_COMPOSE_CMD="docker-compose" + elif docker compose --help &>/dev/null 2>&1; then + DOCKER_COMPOSE_CMD="docker compose" + else + missing+=("docker-compose") + fi + + if ! command -v jq &>/dev/null; then + missing+=("jq") + fi + + if ! command -v openssl &>/dev/null; then + missing+=("openssl") + fi + + if ! command -v curl &>/dev/null; then + missing+=("curl") + fi + + if [[ ${#missing[@]} -gt 0 ]]; then + log_error "Missing required dependencies: ${missing[*]}" + echo "Please install them and re-run the script." + exit 1 + fi + + log_success "All dependencies found (docker compose: '$DOCKER_COMPOSE_CMD')" + return 0 +} + +detect_install_dir() { + if [[ -n "$INSTALL_DIR" ]]; then + if [[ ! -d "$INSTALL_DIR" ]]; then + log_error "Specified install directory does not exist: $INSTALL_DIR" + exit 1 + fi + return 0 + fi + + log_info "Detecting installation directory..." + + local search_paths=("$PWD" "/opt/netbird" "/opt/wiretrustee") + for dir in "${search_paths[@]}"; do + if [[ -f "$dir/management.json" ]] || [[ -f "$dir/artifacts/management.json" ]]; then + INSTALL_DIR="$dir" + log_success "Found installation at: $INSTALL_DIR" + return 0 + fi + done + + if [[ "$NON_INTERACTIVE" == "true" ]]; then + log_error "Could not auto-detect installation directory. Use --install-dir to specify." + exit 1 + fi + + echo "" + echo -n "Enter the path to your NetBird installation directory: " + read -r INSTALL_DIR < /dev/tty + if [[ ! -d "$INSTALL_DIR" ]]; then + log_error "Directory does not exist: $INSTALL_DIR" + exit 1 + fi + return 0 +} + +validate_old_setup() { + log_info "Validating old setup..." + + # Find management.json — check both root and artifacts/ + if [[ -f "$INSTALL_DIR/management.json" ]]; then + MANAGEMENT_JSON_PATH="$INSTALL_DIR/management.json" + elif [[ -f "$INSTALL_DIR/artifacts/management.json" ]]; then + MANAGEMENT_JSON_PATH="$INSTALL_DIR/artifacts/management.json" + else + log_error "Cannot find management.json in $INSTALL_DIR or $INSTALL_DIR/artifacts/" + echo "This doesn't appear to be a valid NetBird installation." + exit 1 + fi + + # Check for docker-compose.yml (in root or artifacts/) + local compose_found=false + if [[ -f "$INSTALL_DIR/docker-compose.yml" ]]; then + compose_found=true + elif [[ -f "$INSTALL_DIR/artifacts/docker-compose.yml" ]]; then + compose_found=true + fi + + if [[ "$compose_found" != "true" ]]; then + log_error "Cannot find docker-compose.yml in $INSTALL_DIR or $INSTALL_DIR/artifacts/" + exit 1 + fi + + log_success "Found management.json at: $MANAGEMENT_JSON_PATH" + return 0 +} + +check_already_migrated() { + if [[ -f "$INSTALL_DIR/config.yaml" ]]; then + log_warn "config.yaml already exists in $INSTALL_DIR" + echo "It appears this installation has already been migrated." + echo "If you want to re-run the migration, remove config.yaml first." + exit 0 + fi + return 0 +} + +detect_reverse_proxy() { + log_info "Detecting reverse proxy type..." + + local compose_file="" + if [[ -f "$INSTALL_DIR/docker-compose.yml" ]]; then + compose_file="$INSTALL_DIR/docker-compose.yml" + elif [[ -f "$INSTALL_DIR/artifacts/docker-compose.yml" ]]; then + compose_file="$INSTALL_DIR/artifacts/docker-compose.yml" + fi + + # Check for Traefik service or labels + if grep -q 'traefik' "$compose_file" 2>/dev/null; then + PROXY_TYPE="traefik" + log_info "Detected: Traefik reverse proxy" + return 0 + fi + + # Check for embedded Caddy — two patterns: + # 1. Old configure.sh: dashboard container with LETSENCRYPT_DOMAIN env var + ports 80/443 + # 2. v0.62+ getting-started.sh: Caddy service in compose or standalone Caddyfile + if grep -q 'LETSENCRYPT_DOMAIN' "$compose_file" 2>/dev/null && { grep -q '443:443' "$compose_file" 2>/dev/null || grep -q '443:' "$compose_file" 2>/dev/null; }; then + PROXY_TYPE="$PROXY_TYPE_CADDY" + log_info "Detected: Embedded Caddy (dashboard container with Let's Encrypt)" + return 0 + fi + + # Check for Caddy service in docker-compose.yml (v0.62+ pattern) + if grep -qE '^\s+caddy:|^\s+image:.*caddy' "$compose_file" 2>/dev/null; then + PROXY_TYPE="$PROXY_TYPE_CADDY" + log_info "Detected: Caddy reverse proxy (in Docker Compose)" + return 0 + fi + + # Check for standalone Caddyfile in install directory (v0.62+ getting-started.sh) + if [[ -f "$INSTALL_DIR/Caddyfile" ]]; then + # Verify Caddy is referenced in docker-compose.yml or running as a container + if grep -q 'caddy' "$compose_file" 2>/dev/null || grep -q 'Caddyfile' "$compose_file" 2>/dev/null; then + PROXY_TYPE="$PROXY_TYPE_CADDY" + log_info "Detected: Caddy reverse proxy (Caddyfile + Docker Compose)" + return 0 + fi + # Caddyfile exists but not in compose — might be running on host + PROXY_TYPE="$PROXY_TYPE_CADDY" + log_info "Detected: Caddy reverse proxy (standalone Caddyfile)" + return 0 + fi + + # Check for disabled Let's Encrypt (external proxy) + if [[ -f "$INSTALL_DIR/setup.env" ]] && grep -q 'NETBIRD_DISABLE_LETSENCRYPT=true' "$INSTALL_DIR/setup.env" 2>/dev/null; then + PROXY_TYPE="external" + log_info "Detected: External reverse proxy (Let's Encrypt disabled)" + return 0 + fi + + # Default to external + PROXY_TYPE="external" + log_info "Detected: External/custom reverse proxy" + return 0 +} + +detect_idp_type() { + log_info "Detecting identity provider type..." + + # Check for embedded IdP (v0.62.0+ getting-started.sh format) + local embedded_enabled + embedded_enabled=$(jq -r '.EmbeddedIdP.Enabled // false' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "false") + if [[ "$embedded_enabled" == "true" ]]; then + IDP_TYPE="embedded" + log_success "IdP type: embedded (suitable for migration)" + return 0 + fi + + # Check IdpManagerConfig.ManagerType (old configure.sh format) + local manager_type + manager_type=$(jq -r '.IdpManagerConfig.ManagerType // ""' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "") + + if [[ -n "$manager_type" && "$manager_type" != "null" && "$manager_type" != "none" && "$manager_type" != "" ]]; then + IDP_TYPE="external" + log_error "External IdP detected: $manager_type" + echo "" + echo "This migration script only supports embedded IdP setups." + echo "External IdP providers (Auth0, Keycloak, Zitadel, etc.) require" + echo "a fresh installation using getting-started.sh." + echo "" + echo "Please refer to the NetBird documentation for upgrade instructions:" + echo " https://docs.netbird.io/selfhosted/getting-started" + exit 1 + fi + + # Check HttpConfig.AuthIssuer for well-known external providers + local auth_issuer + auth_issuer=$(jq -r '.HttpConfig.AuthIssuer // ""' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "") + + if [[ -n "$auth_issuer" && "$auth_issuer" != "null" ]]; then + for provider in "auth0.com" "accounts.google.com" "login.microsoftonline.com" "keycloak" "zitadel" "authentik"; do + if echo "$auth_issuer" | grep -qi "$provider" 2>/dev/null; then + log_error "External OIDC provider detected: $auth_issuer" + echo "" + echo "This migration script only supports embedded IdP setups." + echo "Please use getting-started.sh for a fresh installation." + exit 1 + fi + done + fi + + # No embedded IdP and no external IdP detected — assume old setup without IdP manager + IDP_TYPE="embedded" + log_success "IdP type: embedded (suitable for migration)" + return 0 +} + +detect_volumes() { + log_info "Detecting Docker volumes..." + + local volumes_list + volumes_list=$(docker volume ls --format '{{.Name}}' 2>/dev/null || echo "") + + # Check for well-known volume name patterns (exact match) + local volume_patterns=( + "wiretrustee-mgmt" + "netbird-mgmt" + ) + for pattern in "${volume_patterns[@]}"; do + if echo "$volumes_list" | grep -q "^${pattern}$"; then + MGMT_VOLUME="$pattern" + log_success "Found management volume: $MGMT_VOLUME" + return 0 + fi + done + + # Check compose-prefixed patterns (e.g., netbird_netbird-mgmt, infrastructure_files_netbird-mgmt) + local compose_prefixed + compose_prefixed=$(echo "$volumes_list" | grep -E '(netbird|wiretrustee).*mgmt' | head -n1 || echo "") + if [[ -n "$compose_prefixed" ]]; then + MGMT_VOLUME="$compose_prefixed" + log_success "Found management volume (compose-prefixed): $MGMT_VOLUME" + return 0 + fi + + # Try to extract volume name from old docker-compose.yml + local compose_file="" + if [[ -f "$INSTALL_DIR/docker-compose.yml" ]]; then + compose_file="$INSTALL_DIR/docker-compose.yml" + elif [[ -f "$INSTALL_DIR/artifacts/docker-compose.yml" ]]; then + compose_file="$INSTALL_DIR/artifacts/docker-compose.yml" + fi + if [[ -n "$compose_file" ]]; then + # Look for volume mount on /var/lib/netbird in management or netbird-server service + local vol_name + vol_name=$(grep -E '^\s+-\s+\S+:/var/lib/netbird' "$compose_file" 2>/dev/null | head -1 | sed 's/.*- //' | sed 's/:.*//' | tr -d ' ' || echo "") + if [[ -n "$vol_name" && "$vol_name" != "." && "$vol_name" != "/" ]]; then + # Check if this volume exists in Docker + local full_vol + full_vol=$(echo "$volumes_list" | grep -F "$vol_name" | head -1 || echo "") + if [[ -n "$full_vol" ]]; then + MGMT_VOLUME="$full_vol" + log_success "Found management volume (from compose): $MGMT_VOLUME" + return 0 + fi + fi + fi + + log_warn "Could not detect management volume. A new volume will be created." + MGMT_VOLUME="" + return 0 +} + +detect_domain() { + log_info "Detecting domain..." + + # Try setup.env first + if [[ -z "$DOMAIN" && -f "$INSTALL_DIR/setup.env" ]]; then + DOMAIN=$(grep '^NETBIRD_DOMAIN=' "$INSTALL_DIR/setup.env" 2>/dev/null | cut -d'=' -f2 | tr -d '"' | tr -d "'" || echo "") + fi + + # Try EmbeddedIdP.Issuer (v0.62.0+ getting-started.sh format) + if [[ -z "$DOMAIN" ]]; then + local issuer + issuer=$(jq -r '.EmbeddedIdP.Issuer // ""' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "") + if [[ -n "$issuer" && "$issuer" != "null" ]]; then + DOMAIN=$(echo "$issuer" | sed 's|https\?://||' | sed 's|/.*||' | sed 's|:.*||') + fi + fi + + # Try HttpConfig.AuthIssuer (old configure.sh format) + if [[ -z "$DOMAIN" ]]; then + local issuer + issuer=$(jq -r '.HttpConfig.AuthIssuer // ""' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "") + if [[ -n "$issuer" && "$issuer" != "null" ]]; then + DOMAIN=$(echo "$issuer" | sed 's|https\?://||' | sed 's|/.*||' | sed 's|:.*||') + fi + fi + + # Try dashboard.env NETBIRD_MGMT_API_ENDPOINT + if [[ -z "$DOMAIN" && -f "$INSTALL_DIR/dashboard.env" ]]; then + local endpoint + endpoint=$(grep '^NETBIRD_MGMT_API_ENDPOINT=' "$INSTALL_DIR/dashboard.env" 2>/dev/null | cut -d'=' -f2 | tr -d '"' | tr -d "'" || echo "") + if [[ -n "$endpoint" ]]; then + DOMAIN=$(echo "$endpoint" | sed 's|https\?://||' | sed 's|/.*||' | sed 's|:.*||') + fi + fi + + if [[ -z "$DOMAIN" ]]; then + log_error "Could not detect domain from management.json, setup.env, or dashboard.env." + exit 1 + fi + + # Detect Let's Encrypt email from setup.env or dashboard.env LETSENCRYPT_DOMAIN + if [[ -f "$INSTALL_DIR/setup.env" ]]; then + LETSENCRYPT_EMAIL=$(grep '^NETBIRD_LETSENCRYPT_EMAIL=' "$INSTALL_DIR/setup.env" 2>/dev/null | cut -d'=' -f2 | tr -d '"' | tr -d "'" || echo "") + fi + + log_success "Domain: $DOMAIN" + if [[ -n "$LETSENCRYPT_EMAIL" ]]; then + log_success "Let's Encrypt email: $LETSENCRYPT_EMAIL" + fi + return 0 +} + +detect_store_config() { + log_info "Detecting store configuration..." + + # Engine from management.json + local engine + engine=$(jq -r '.StoreConfig.Engine // ""' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "") + if [[ -n "$engine" && "$engine" != "null" && "$engine" != "" ]]; then + STORE_ENGINE="$engine" + fi + + # DSN from environment files + if [[ -f "$INSTALL_DIR/setup.env" ]]; then + local pg_dsn + pg_dsn=$(grep '^NETBIRD_STORE_ENGINE_POSTGRES_DSN=' "$INSTALL_DIR/setup.env" 2>/dev/null | sed 's/^NETBIRD_STORE_ENGINE_POSTGRES_DSN=//' | tr -d '"' || echo "") + if [[ -n "$pg_dsn" ]]; then + STORE_DSN="$pg_dsn" + fi + + local mysql_dsn + mysql_dsn=$(grep '^NETBIRD_STORE_ENGINE_MYSQL_DSN=' "$INSTALL_DIR/setup.env" 2>/dev/null | sed 's/^NETBIRD_STORE_ENGINE_MYSQL_DSN=//' | tr -d '"' || echo "") + if [[ -n "$mysql_dsn" ]]; then + STORE_DSN="$mysql_dsn" + fi + fi + + # Also check base.setup.env + if [[ -z "$STORE_DSN" && -f "$INSTALL_DIR/base.setup.env" ]]; then + local pg_dsn + pg_dsn=$(grep '^NETBIRD_STORE_ENGINE_POSTGRES_DSN=' "$INSTALL_DIR/base.setup.env" 2>/dev/null | sed 's/^NETBIRD_STORE_ENGINE_POSTGRES_DSN=//' | tr -d '"' || echo "") + if [[ -n "$pg_dsn" ]]; then + STORE_DSN="$pg_dsn" + fi + + local mysql_dsn + mysql_dsn=$(grep '^NETBIRD_STORE_ENGINE_MYSQL_DSN=' "$INSTALL_DIR/base.setup.env" 2>/dev/null | sed 's/^NETBIRD_STORE_ENGINE_MYSQL_DSN=//' | tr -d '"' || echo "") + if [[ -n "$mysql_dsn" ]]; then + STORE_DSN="$mysql_dsn" + fi + fi + + log_success "Store engine: $STORE_ENGINE" + if [[ -n "$STORE_DSN" ]]; then + log_success "Store DSN: [detected]" + fi + return 0 +} + +extract_config_values() { + log_info "Extracting configuration from management.json..." + + # DataStoreEncryptionKey + ENCRYPTION_KEY=$(jq -r '.DataStoreEncryptionKey // ""' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "") + if [[ -z "$ENCRYPTION_KEY" || "$ENCRYPTION_KEY" == "null" ]]; then + ENCRYPTION_KEY=$(openssl rand -base64 32) + log_warn "No encryption key found in management.json — generated a new one." + log_warn "IMPORTANT: Save this key! Without it, existing encrypted data cannot be read." + echo " Encryption key: $ENCRYPTION_KEY" + fi + + # Relay secret from management.json + RELAY_SECRET=$(jq -r '.Relay.Secret // ""' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "") + + # Fallback: relay secret from setup.env + if [[ (-z "$RELAY_SECRET" || "$RELAY_SECRET" == "null") && -f "$INSTALL_DIR/setup.env" ]]; then + RELAY_SECRET=$(grep '^NETBIRD_RELAY_AUTH_SECRET=' "$INSTALL_DIR/setup.env" 2>/dev/null | cut -d'=' -f2 | tr -d '"' | tr -d "'" || echo "") + fi + + # Fallback: relay secret from base.setup.env + if [[ (-z "$RELAY_SECRET" || "$RELAY_SECRET" == "null") && -f "$INSTALL_DIR/base.setup.env" ]]; then + RELAY_SECRET=$(grep '^NETBIRD_RELAY_AUTH_SECRET=' "$INSTALL_DIR/base.setup.env" 2>/dev/null | cut -d'=' -f2 | tr -d '"' | tr -d "'" || echo "") + fi + + # Generate if still empty + if [[ -z "$RELAY_SECRET" || "$RELAY_SECRET" == "null" ]]; then + RELAY_SECRET=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") + log_warn "No relay secret found — generated a new one." + fi + + # IdpSignKeyRefreshEnabled — check both HttpConfig and EmbeddedIdP locations + local signkey_raw + signkey_raw=$(jq -r '(.HttpConfig.IdpSignKeyRefreshEnabled // .EmbeddedIdP.SignKeyRefreshEnabled) // "true"' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "true") + if [[ "$signkey_raw" == "false" ]]; then + SIGNKEY_REFRESH="false" + else + SIGNKEY_REFRESH="true" + fi + + # ReverseProxy settings (may not exist in v0.62+ getting-started.sh format) + TRUSTED_PROXIES=$(jq -c '.ReverseProxy.TrustedHTTPProxies // []' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "[]") + TRUSTED_PROXIES_COUNT=$(jq -r '.ReverseProxy.TrustedHTTPProxiesCount // 0' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "0") + TRUSTED_PEERS=$(jq -c '.ReverseProxy.TrustedPeers // []' "$MANAGEMENT_JSON_PATH" 2>/dev/null || echo "[]") + + log_success "Configuration values extracted" + return 0 +} + +print_detection_summary() { + echo "" + echo "$MSG_SEPARATOR" + echo " Migration Summary" + echo "$MSG_SEPARATOR" + echo "" + echo " Install directory: $INSTALL_DIR" + echo " Domain: $DOMAIN" + echo " Reverse proxy: $PROXY_TYPE" + echo " Store engine: $STORE_ENGINE" + if [[ -n "$STORE_DSN" ]]; then + echo " Store DSN: [configured]" + fi + if [[ -n "$MGMT_VOLUME" ]]; then + echo " Management volume: $MGMT_VOLUME" + else + echo " Management volume: [new volume will be created]" + fi + echo " Encryption key: ${ENCRYPTION_KEY:0:8}..." + echo " Relay secret: ${RELAY_SECRET:0:8}..." + echo "" + + if [[ "$PROXY_TYPE" == "$PROXY_TYPE_CADDY" ]]; then + echo " Migration mode: AUTOMATIC" + echo " A Traefik-based docker-compose.yml will be generated and services" + echo " will be stopped and restarted automatically." + else + echo " Migration mode: MANUAL" + echo " New config files will be generated. You will need to stop old" + echo " containers, replace docker-compose.yml, and restart manually." + fi + echo "" + return 0 +} + +############################################ +# Phase 1: Backup +############################################ + +create_backup() { + BACKUP_DIR="$INSTALL_DIR/backup-$(date +%Y%m%d-%H%M%S)" + log_info "Creating backup at: $BACKUP_DIR" + mkdir -p "$BACKUP_DIR" + + # Copy config files + local files_to_backup=( + "docker-compose.yml" + "management.json" + "setup.env" + "base.setup.env" + "turnserver.conf" + "dashboard.env" + ) + + for f in "${files_to_backup[@]}"; do + if [[ -f "$INSTALL_DIR/$f" ]]; then + cp "$INSTALL_DIR/$f" "$BACKUP_DIR/$f" + fi + done + + # Back up artifacts/ if it exists + if [[ -d "$INSTALL_DIR/artifacts" ]]; then + cp -r "$INSTALL_DIR/artifacts" "$BACKUP_DIR/artifacts" + fi + + # Record state + { + echo "# NetBird migration backup state" + echo "# Created: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" + echo "" + echo "## Docker volumes" + docker volume ls --format '{{.Name}}' 2>/dev/null | grep -E '(netbird|wiretrustee)' || echo "(none found)" + echo "" + echo "## Running containers" + docker ps --format '{{.Names}}\t{{.Image}}\t{{.Status}}' 2>/dev/null | grep -E '(netbird|wiretrustee|dashboard|signal|relay|management|coturn)' || echo "(none running)" + } > "$BACKUP_DIR/state.txt" + + # Generate rollback script + generate_rollback_script + + log_success "Backup created at: $BACKUP_DIR" + return 0 +} + +generate_rollback_script() { + cat > "$BACKUP_DIR/rollback.sh" <<'ROLLBACK_HEADER' +#!/bin/bash +set -euo pipefail + +# NetBird Migration Rollback Script +# Restores the pre-migration configuration and restarts old containers. + +ROLLBACK_HEADER + + cat >> "$BACKUP_DIR/rollback.sh" </dev/null; then + COMPOSE_CMD="docker-compose" +elif docker compose --help &>/dev/null 2>&1; then + COMPOSE_CMD="docker compose" +else + echo "ERROR: docker compose not found" >&2 + exit 1 +fi + +echo "Stopping current containers..." +\$COMPOSE_CMD down 2>/dev/null || true + +# Restore old config files +echo "Restoring configuration files..." +for f in docker-compose.yml management.json setup.env base.setup.env turnserver.conf dashboard.env; do + if [[ -f "\$BACKUP_DIR/\$f" ]]; then + cp "\$BACKUP_DIR/\$f" "\$INSTALL_DIR/\$f" + echo " Restored: \$f" + fi +done + +# Remove new config files +for f in config.yaml; do + if [[ -f "\$INSTALL_DIR/\$f" ]]; then + rm "\$INSTALL_DIR/\$f" + echo " Removed: \$f" + fi +done + +# Restart old containers +echo "Starting old containers..." +cd "\$INSTALL_DIR" +\$COMPOSE_CMD up -d + +echo "" +echo "Rollback complete. Old containers are running." +echo "Verify with: \$COMPOSE_CMD ps" +ROLLBACK_BODY + + chmod +x "$BACKUP_DIR/rollback.sh" + return 0 +} + +############################################ +# Phase 3: Generate New Configuration Files +############################################ + +generate_config_yaml() { + log_info "Generating config.yaml..." + + local dsn_line="" + if [[ -n "$STORE_DSN" ]]; then + dsn_line=" dsn: \"$STORE_DSN\"" + fi + + local reverse_proxy_section="" + # Only add reverseProxy if there are non-default values + local has_proxy_config=false + if [[ "$TRUSTED_PROXIES" != "[]" && -n "$TRUSTED_PROXIES" ]]; then + has_proxy_config=true + fi + if [[ "$TRUSTED_PROXIES_COUNT" != "0" && -n "$TRUSTED_PROXIES_COUNT" ]]; then + has_proxy_config=true + fi + if [[ "$TRUSTED_PEERS" != "[]" && -n "$TRUSTED_PEERS" ]]; then + # Check if it's only the default ["0.0.0.0/0"] + local default_peers='["0.0.0.0/0"]' + if [[ "$TRUSTED_PEERS" != "$default_peers" ]]; then + has_proxy_config=true + fi + fi + + if [[ "$has_proxy_config" == "true" ]]; then + reverse_proxy_section=" + reverseProxy:" + if [[ "$TRUSTED_PROXIES" != "[]" && -n "$TRUSTED_PROXIES" ]]; then + reverse_proxy_section+=" + trustedHTTPProxies:" + for proxy in $(echo "$TRUSTED_PROXIES" | jq -r '.[]' 2>/dev/null); do + reverse_proxy_section+=" + - \"$proxy\"" + done + fi + if [[ "$TRUSTED_PROXIES_COUNT" != "0" && -n "$TRUSTED_PROXIES_COUNT" ]]; then + reverse_proxy_section+=" + trustedHTTPProxiesCount: $TRUSTED_PROXIES_COUNT" + fi + if [[ "$TRUSTED_PEERS" != "[]" && -n "$TRUSTED_PEERS" ]]; then + reverse_proxy_section+=" + trustedPeers:" + for peer in $(echo "$TRUSTED_PEERS" | jq -r '.[]' 2>/dev/null); do + reverse_proxy_section+=" + - \"$peer\"" + done + fi + fi + + { + cat < "$INSTALL_DIR/config.yaml" + + log_success "Generated config.yaml" + return 0 +} + +generate_dashboard_env() { + log_info "Generating dashboard.env..." + + cat > "$INSTALL_DIR/dashboard.env" < "$INSTALL_DIR/docker-compose.yml" < "$INSTALL_DIR/docker-compose.yml" </dev/null) || true + + log_success "Old containers stopped" + return 0 +} + +start_new_services() { + log_info "Starting new containers..." + + (cd "$INSTALL_DIR" && $DOCKER_COMPOSE_CMD up -d) + + log_success "New containers started" + return 0 +} + +wait_for_health() { + log_info "Waiting for services to become healthy..." + + local max_attempts=60 + local attempt=0 + + set +e + echo -n " Checking" + while [[ $attempt -lt $max_attempts ]]; do + # Try OIDC endpoint through reverse proxy + if curl -sk -f -o /dev/null "https://${DOMAIN}/oauth2/.well-known/openid-configuration" 2>/dev/null; then + echo " done" + set -e + log_success "Services are healthy" + return 0 + fi + + # Also try health check endpoint directly + if curl -sk -f -o /dev/null "http://127.0.0.1:9000/" 2>/dev/null; then + echo " done" + set -e + log_success "Services are healthy (via healthcheck)" + return 0 + fi + + echo -n " ." + sleep 2 + attempt=$((attempt + 1)) + + if [[ $attempt -eq 30 ]]; then + echo "" + log_warn "Taking longer than expected. Checking container logs..." + (cd "$INSTALL_DIR" && $DOCKER_COMPOSE_CMD logs --tail=10 netbird-server 2>/dev/null) || true + echo -n " Still checking" + fi + done + echo "" + set -e + + log_warn "Health check timed out after $((max_attempts * 2)) seconds." + log_warn "Services may still be starting. Check with: cd $INSTALL_DIR && $DOCKER_COMPOSE_CMD logs" + return 0 +} + +############################################ +# Phase 5: Verification & Summary +############################################ + +verify_migration() { + log_info "Running verification checks..." + + local checks_passed=0 + local checks_total=3 + + # Check 1: Container health + local running + running=$(cd "$INSTALL_DIR" && $DOCKER_COMPOSE_CMD ps --format '{{.Name}}' 2>/dev/null | wc -l || echo "0") + if [[ "$running" -ge 2 ]]; then + log_success "Containers are running ($running services)" + checks_passed=$((checks_passed + 1)) + else + log_warn "Expected at least 2 running containers, found $running" + fi + + # Check 2: OIDC endpoint + local oidc_status + oidc_status=$(curl -sk -o /dev/null -w '%{http_code}' "https://${DOMAIN}/oauth2/.well-known/openid-configuration" 2>/dev/null || echo "000") + if [[ "$oidc_status" == "200" ]]; then + log_success "OIDC endpoint responding (HTTP $oidc_status)" + checks_passed=$((checks_passed + 1)) + else + log_warn "OIDC endpoint returned HTTP $oidc_status (expected 200)" + fi + + # Check 3: Management API (expect 401 = working but needs auth, not 502 = proxy error) + local api_status + api_status=$(curl -sk -o /dev/null -w '%{http_code}' "https://${DOMAIN}/api/accounts" 2>/dev/null || echo "000") + if [[ "$api_status" == "401" || "$api_status" == "200" || "$api_status" == "403" ]]; then + log_success "Management API responding (HTTP $api_status)" + checks_passed=$((checks_passed + 1)) + else + log_warn "Management API returned HTTP $api_status (expected 401/200/403)" + fi + + echo "" + echo " Verification: $checks_passed/$checks_total checks passed" + return 0 +} + +print_summary() { + echo "" + echo "$MSG_SEPARATOR" + echo " Migration Complete" + echo "$MSG_SEPARATOR" + echo "" + + if [[ "$PROXY_TYPE" == "$PROXY_TYPE_CADDY" ]]; then + echo " What was done:" + echo " - Old 5-container setup stopped" + echo " - New config.yaml generated (combined server config)" + echo " - New dashboard.env generated (embedded IdP)" + echo " - New docker-compose.yml generated (Traefik + combined server)" + echo " - New containers started" + else + echo " What was done:" + echo " - New config.yaml generated (combined server config)" + echo " - New dashboard.env generated (embedded IdP)" + echo " - New docker-compose.yml generated (exposed ports)" + echo "" + echo " What you need to do:" + echo " 1. Stop old containers:" + echo " cd $INSTALL_DIR && $DOCKER_COMPOSE_CMD down" + echo "" + echo " 2. Start new containers:" + echo " cd $INSTALL_DIR && $DOCKER_COMPOSE_CMD up -d" + echo "" + echo " 3. Update your reverse proxy to route:" + echo " - /signalexchange.SignalExchange/* -> 127.0.0.1:8081 (gRPC/h2c)" + echo " - /management.ManagementService/* -> 127.0.0.1:8081 (gRPC/h2c)" + echo " - /relay*, /ws-proxy/* -> 127.0.0.1:8081 (WebSocket)" + echo " - /api/*, /oauth2/* -> 127.0.0.1:8081 (HTTP)" + echo " - /* -> 127.0.0.1:8080 (dashboard)" + fi + + echo "" + echo " Backup location: $BACKUP_DIR" + echo " Rollback command: bash $BACKUP_DIR/rollback.sh" + echo "" + echo " IMPORTANT:" + echo " - Existing peers, routes, and policies are preserved in the database." + echo " - The embedded IdP data is preserved in the management volume." + echo " - Clients should reconnect automatically; if not: netbird down && netbird up" + echo "" + echo " Next steps:" + echo " - Access the dashboard: https://$DOMAIN" + echo " - Re-authenticate all clients: netbird down && netbird up" + echo " - Check logs: cd $INSTALL_DIR && $DOCKER_COMPOSE_CMD logs -f" + echo "" + return 0 +} + +############################################ +# Main +############################################ + +main() { + # Parse arguments + while [[ $# -gt 0 ]]; do + local arg="$1" + case "$arg" in + --install-dir) + local dir_value="$2" + INSTALL_DIR="$dir_value" + shift 2 + ;; + --non-interactive) + NON_INTERACTIVE=true + shift + ;; + --help|-h) + echo "Usage: $0 [--install-dir /path/to/netbird] [--non-interactive]" + echo "" + echo "Migrates a pre-v0.65.0 NetBird deployment to the combined container setup." + echo "" + echo "Options:" + echo " --install-dir DIR Path to existing NetBird installation" + echo " --non-interactive Skip confirmation prompts (for automation)" + echo " -h, --help Show this help message" + exit 0 + ;; + *) + log_error "Unknown option: $arg" + echo "Use --help for usage information." + exit 1 + ;; + esac + done + + print_banner + + # Phase 0: Preflight & Detection + check_dependencies + detect_install_dir + validate_old_setup + check_already_migrated + detect_reverse_proxy + detect_idp_type + detect_volumes + detect_domain + detect_store_config + extract_config_values + print_detection_summary + + confirm_action "Proceed with migration?" + + # Phase 1: Backup + create_backup + + # Phase 4: Apply migration + if [[ "$PROXY_TYPE" == "$PROXY_TYPE_CADDY" ]]; then + # Stop old containers BEFORE overwriting docker-compose.yml + stop_old_services + + # Phase 2 + 3: Generate new configuration files + generate_config_yaml + generate_dashboard_env + generate_docker_compose + + start_new_services + sleep 3 + wait_for_health + + # Phase 5: Verification + verify_migration + else + # For manual proxy setups, just generate files (don't stop/start) + generate_config_yaml + generate_dashboard_env + generate_docker_compose + fi + + print_summary + return 0 +} + +main "$@" From 4b5294e5968af6208404e89914cd99b8703cc029 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Thu, 19 Feb 2026 08:14:11 +0100 Subject: [PATCH 146/374] [self-hosted] remove unused config example (#5383) --- combined/config-simple.yaml.example | 111 --------------------------- combined/config.yaml.example | 112 ++++++++++++++-------------- 2 files changed, 54 insertions(+), 169 deletions(-) delete mode 100644 combined/config-simple.yaml.example diff --git a/combined/config-simple.yaml.example b/combined/config-simple.yaml.example deleted file mode 100644 index 4a90adda8..000000000 --- a/combined/config-simple.yaml.example +++ /dev/null @@ -1,111 +0,0 @@ -# NetBird Combined Server Configuration -# Copy this file to config.yaml and customize for your deployment -# -# This is a Management server with optional embedded Signal, Relay, and STUN services. -# By default, all services run locally. You can use external services instead by -# setting the corresponding override fields. -# -# Architecture: -# - Management: Always runs locally (this IS the management server) -# - Signal: Local by default; set 'signalUri' to use external (disables local) -# - Relay: Local by default; set 'relays' to use external (disables local) -# - STUN: Local on port 3478 by default; set 'stuns' to use external instead - -server: - # Main HTTP/gRPC port for all services (Management, Signal, Relay) - listenAddress: ":443" - - # Public address that peers will use to connect to this server - # Used for relay connections and management DNS domain - # Format: protocol://hostname:port (e.g., https://server.mycompany.com:443) - exposedAddress: "https://server.mycompany.com:443" - - # STUN server ports (defaults to [3478] if not specified; set 'stuns' to use external) - # stunPorts: - # - 3478 - - # Metrics endpoint port - metricsPort: 9090 - - # Healthcheck endpoint address - healthcheckAddress: ":9000" - - # Logging configuration - logLevel: "info" # Default log level for all components: panic, fatal, error, warn, info, debug, trace - logFile: "console" # "console" or path to log file - - # TLS configuration (optional) - tls: - certFile: "" - keyFile: "" - letsencrypt: - enabled: false - dataDir: "" - domains: [] - email: "" - awsRoute53: false - - # Shared secret for relay authentication (required when running local relay) - authSecret: "your-secret-key-here" - - # Data directory for all services - dataDir: "/var/lib/netbird/" - - # ============================================================================ - # External Service Overrides (optional) - # Use these to point to external Signal, Relay, or STUN servers instead of - # running them locally. When set, the corresponding local service is disabled. - # ============================================================================ - - # External STUN servers - disables local STUN server - # stuns: - # - uri: "stun:stun.example.com:3478" - # - uri: "stun:stun.example.com:3479" - - # External relay servers - disables local relay server - # relays: - # addresses: - # - "rels://relay.example.com:443" - # credentialsTTL: "12h" - # secret: "relay-shared-secret" - - # External signal server - disables local signal server - # signalUri: "https://signal.example.com:443" - - # ============================================================================ - # Management Settings - # ============================================================================ - - # Metrics and updates - disableAnonymousMetrics: false - disableGeoliteUpdate: false - - # Embedded authentication/identity provider (Dex) configuration (always enabled) - auth: - # OIDC issuer URL - must be publicly accessible - issuer: "https://server.mycompany.com/oauth2" - localAuthDisabled: false - signKeyRefreshEnabled: false - # OAuth2 redirect URIs for dashboard - dashboardRedirectURIs: - - "https://app.netbird.io/nb-auth" - - "https://app.netbird.io/nb-silent-auth" - # OAuth2 redirect URIs for CLI - cliRedirectURIs: - - "http://localhost:53000/" - # Optional initial admin user - # owner: - # email: "admin@example.com" - # password: "initial-password" - - # Store configuration - store: - engine: "sqlite" # sqlite, postgres, or mysql - dsn: "" # Connection string for postgres or mysql - encryptionKey: "" - - # Reverse proxy settings (optional) - # reverseProxy: - # trustedHTTPProxies: [] - # trustedHTTPProxiesCount: 0 - # trustedPeers: [] \ No newline at end of file diff --git a/combined/config.yaml.example b/combined/config.yaml.example index 6cb10e04d..b3b38c5a9 100644 --- a/combined/config.yaml.example +++ b/combined/config.yaml.example @@ -1,11 +1,29 @@ -# Simplified Combined NetBird Server Configuration +# NetBird Combined Server Configuration # Copy this file to config.yaml and customize for your deployment +# +# This is a Management server with optional embedded Signal, Relay, and STUN services. +# By default, all services run locally. You can use external services instead by +# setting the corresponding override fields. +# +# Architecture: +# - Management: Always runs locally (this IS the management server) +# - Signal: Local by default; set 'signalUri' to use external (disables local) +# - Relay: Local by default; set 'relays' to use external (disables local) +# - STUN: Local on port 3478 by default; set 'stuns' to use external instead -# Server-wide settings server: # Main HTTP/gRPC port for all services (Management, Signal, Relay) listenAddress: ":443" + # Public address that peers will use to connect to this server + # Used for relay connections and management DNS domain + # Format: protocol://hostname:port (e.g., https://server.mycompany.com:443) + exposedAddress: "https://server.mycompany.com:443" + + # STUN server ports (defaults to [3478] if not specified; set 'stuns' to use external) + # stunPorts: + # - 3478 + # Metrics endpoint port metricsPort: 9090 @@ -13,7 +31,7 @@ server: healthcheckAddress: ":9000" # Logging configuration - logLevel: "info" # panic, fatal, error, warn, info, debug, trace + logLevel: "info" # Default log level for all components: panic, fatal, error, warn, info, debug, trace logFile: "console" # "console" or path to log file # TLS configuration (optional) @@ -27,53 +45,45 @@ server: email: "" awsRoute53: false -# Relay service configuration -relay: - # Enable/disable the relay service - enabled: true - - # Public address that peers will use to connect to this relay - # Format: hostname:port or ip:port - exposedAddress: "relay.example.com:443" - - # Shared secret for relay authentication (required when enabled) + # Shared secret for relay authentication (required when running local relay) authSecret: "your-secret-key-here" - # Log level for relay (reserved for future use, currently uses global log level) - logLevel: "info" - - # Embedded STUN server (optional) - stun: - enabled: false - ports: [3478] - logLevel: "info" - -# Signal service configuration -signal: - # Enable/disable the signal service - enabled: true - - # Log level for signal (reserved for future use, currently uses global log level) - logLevel: "info" - -# Management service configuration -management: - # Enable/disable the management service - enabled: true - - # Data directory for management service + # Data directory for all services dataDir: "/var/lib/netbird/" - # DNS domain for the management server - dnsDomain: "" + # ============================================================================ + # External Service Overrides (optional) + # Use these to point to external Signal, Relay, or STUN servers instead of + # running them locally. When set, the corresponding local service is disabled. + # ============================================================================ + + # External STUN servers - disables local STUN server + # stuns: + # - uri: "stun:stun.example.com:3478" + # - uri: "stun:stun.example.com:3479" + + # External relay servers - disables local relay server + # relays: + # addresses: + # - "rels://relay.example.com:443" + # credentialsTTL: "12h" + # secret: "relay-shared-secret" + + # External signal server - disables local signal server + # signalUri: "https://signal.example.com:443" + + # ============================================================================ + # Management Settings + # ============================================================================ # Metrics and updates disableAnonymousMetrics: false disableGeoliteUpdate: false + # Embedded authentication/identity provider (Dex) configuration (always enabled) auth: # OIDC issuer URL - must be publicly accessible - issuer: "https://management.example.com/oauth2" + issuer: "https://example.com/oauth2" localAuthDisabled: false signKeyRefreshEnabled: false # OAuth2 redirect URIs for dashboard @@ -88,28 +98,14 @@ management: # email: "admin@example.com" # password: "initial-password" - # External STUN servers (for client config) - stuns: [] - # - uri: "stun:stun.example.com:3478" - - # External relay servers (for client config) - relays: - addresses: [] - # - "rels://relay.example.com:443" - credentialsTTL: "12h" - secret: "" - - # External signal server URI (for client config) - signalUri: "" - # Store configuration store: engine: "sqlite" # sqlite, postgres, or mysql dsn: "" # Connection string for postgres or mysql encryptionKey: "" - # Reverse proxy settings - reverseProxy: - trustedHTTPProxies: [] - trustedHTTPProxiesCount: 0 - trustedPeers: [] + # Reverse proxy settings (optional) + # reverseProxy: + # trustedHTTPProxies: [] + # trustedHTTPProxiesCount: 0 + # trustedPeers: [] From a6db88fbd2b9aa4dd8ef77233d2cd3c937fcbca1 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 19 Feb 2026 11:23:42 +0100 Subject: [PATCH 147/374] [misc] Update timestamp format with milliseconds (#5387) * Update timestamp format with milliseconds * fix tests --- formatter/txt/formatter.go | 4 +--- formatter/txt/formatter_test.go | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/formatter/txt/formatter.go b/formatter/txt/formatter.go index 3b2a3fb4d..4f174a740 100644 --- a/formatter/txt/formatter.go +++ b/formatter/txt/formatter.go @@ -1,8 +1,6 @@ package txt import ( - "time" - "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/formatter/levels" @@ -18,7 +16,7 @@ type TextFormatter struct { func NewTextFormatter() *TextFormatter { return &TextFormatter{ levelDesc: levels.ValidLevelDesc, - timestampFormat: time.RFC3339, // or RFC3339 + timestampFormat: "2006-01-02T15:04:05.000Z07:00", } } diff --git a/formatter/txt/formatter_test.go b/formatter/txt/formatter_test.go index 590af5d50..1b20a3ebf 100644 --- a/formatter/txt/formatter_test.go +++ b/formatter/txt/formatter_test.go @@ -21,6 +21,6 @@ func TestLogTextFormat(t *testing.T) { result, _ := formatter.Format(someEntry) parsedString := string(result) - expectedString := "^2021-02-21T01:10:30Z WARN \\[(att1: 1, att2: 2|att2: 2, att1: 1)\\] some/fancy/path.go:46: Some Message\\s+$" + expectedString := "^2021-02-21T01:10:30.000Z WARN \\[(att1: 1, att2: 2|att2: 2, att1: 1)\\] some/fancy/path.go:46: Some Message\\s+$" assert.Regexp(t, expectedString, parsedString) } From 564fa4ab04dcd18831c67eb31c28fa62e141db30 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Thu, 19 Feb 2026 18:34:28 +0100 Subject: [PATCH 148/374] [management] fix possible race condition on user role change (#5395) --- management/server/user.go | 13 +++++- management/server/user_test.go | 84 ++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/management/server/user.go b/management/server/user.go index 48005f325..924efc1e4 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -737,6 +737,14 @@ func (am *DefaultAccountManager) processUserUpdate(ctx context.Context, transact return false, nil, nil, nil, status.Errorf(status.InvalidArgument, "provided user update is nil") } + if initiatorUserId != activity.SystemInitiator { + freshInitiator, err := transaction.GetUserByUserID(ctx, store.LockingStrengthUpdate, initiatorUserId) + if err != nil { + return false, nil, nil, nil, fmt.Errorf("failed to re-read initiator user in transaction: %w", err) + } + initiatorUser = freshInitiator + } + oldUser, isNewUser, err := getUserOrCreateIfNotExists(ctx, transaction, accountID, update, addIfNotExists) if err != nil { return false, nil, nil, nil, err @@ -864,7 +872,10 @@ func validateUserUpdate(groupsMap map[string]*types.Group, initiatorUser, oldUse return nil } - // @todo double check these + if !initiatorUser.HasAdminPower() { + return status.Errorf(status.PermissionDenied, "only admins and owners can update users") + } + if initiatorUser.HasAdminPower() && initiatorUser.Id == update.Id && oldUser.Blocked != update.Blocked { return status.Errorf(status.PermissionDenied, "admins can't block or unblock themselves") } diff --git a/management/server/user_test.go b/management/server/user_test.go index 2dd1cea2e..72a19a9a5 100644 --- a/management/server/user_test.go +++ b/management/server/user_test.go @@ -2031,3 +2031,87 @@ func TestUser_Operations_WithEmbeddedIDP(t *testing.T) { t.Logf("Duplicate email error: %v", err) }) } + +func TestValidateUserUpdate_RejectsNonAdminInitiator(t *testing.T) { + groupsMap := map[string]*types.Group{} + + initiator := &types.User{ + Id: "initiator", + Role: types.UserRoleUser, + } + oldUser := &types.User{ + Id: "target", + Role: types.UserRoleUser, + } + update := &types.User{ + Id: "target", + Role: types.UserRoleOwner, + } + + err := validateUserUpdate(groupsMap, initiator, oldUser, update) + require.Error(t, err, "regular user should not be able to promote to owner") + assert.Contains(t, err.Error(), "only admins and owners can update users") +} + +func TestProcessUserUpdate_RejectsStaleInitiatorRole(t *testing.T) { + s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir()) + require.NoError(t, err) + t.Cleanup(cleanup) + + account := newAccountWithId(context.Background(), "account1", "owner1", "", "", "", false) + + adminID := "admin1" + account.Users[adminID] = types.NewAdminUser(adminID) + + targetID := "target1" + account.Users[targetID] = types.NewRegularUser(targetID, "", "") + + require.NoError(t, s.SaveAccount(context.Background(), account)) + + demotedAdmin, err := s.GetUserByUserID(context.Background(), store.LockingStrengthNone, adminID) + require.NoError(t, err) + demotedAdmin.Role = types.UserRoleUser + require.NoError(t, s.SaveUser(context.Background(), demotedAdmin)) + + staleInitiator := &types.User{ + Id: adminID, + AccountID: account.Id, + Role: types.UserRoleAdmin, + } + + permissionsManager := permissions.NewManager(s) + am := DefaultAccountManager{ + Store: s, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: permissionsManager, + } + + settings, err := s.GetAccountSettings(context.Background(), store.LockingStrengthNone, account.Id) + require.NoError(t, err) + + groups, err := s.GetAccountGroups(context.Background(), store.LockingStrengthNone, account.Id) + require.NoError(t, err) + groupsMap := make(map[string]*types.Group, len(groups)) + for _, g := range groups { + groupsMap[g.ID] = g + } + + update := &types.User{ + Id: targetID, + Role: types.UserRoleAdmin, + } + + err = s.ExecuteInTransaction(context.Background(), func(tx store.Store) error { + _, _, _, _, txErr := am.processUserUpdate( + context.Background(), tx, groupsMap, account.Id, adminID, staleInitiator, update, false, settings, + ) + return txErr + }) + + require.Error(t, err, "processUserUpdate should reject stale initiator whose role was demoted") + assert.Contains(t, err.Error(), "only admins and owners can update users") + + targetUser, err := s.GetUserByUserID(context.Background(), store.LockingStrengthNone, targetID) + require.NoError(t, err) + assert.Equal(t, types.UserRoleUser, targetUser.Role) +} From fc6b93ae59a02e162337570a93b27a081007cf64 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 19 Feb 2026 18:53:10 +0100 Subject: [PATCH 149/374] [ios] Ensure route settlement on iOS before handling DNS responses (#5360) * Ensure route settlement on iOS before handling DNS responses to prevent bypassing the tunnel. * add more logs * rollback debug changes * rollback changes * [client] Improve logging and add comments for iOS route settlement logic - Switch iOS route settlement log level from Debug to Trace for finer control. - Add clarifying comments for `waitForRouteSettlement` on non-iOS platforms. --------- Co-authored-by: mlsmaycon --- client/internal/engine.go | 2 +- .../routemanager/dnsinterceptor/handler.go | 5 +++++ .../dnsinterceptor/handler_ios.go | 20 +++++++++++++++++++ .../dnsinterceptor/handler_nonios.go | 12 +++++++++++ 4 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 client/internal/routemanager/dnsinterceptor/handler_ios.go create mode 100644 client/internal/routemanager/dnsinterceptor/handler_nonios.go diff --git a/client/internal/engine.go b/client/internal/engine.go index 4f3cf0998..beb2a411c 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -28,8 +28,8 @@ import ( "github.com/netbirdio/netbird/client/firewall" firewallManager "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface" - nbnetstack "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/device" + nbnetstack "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/acl" "github.com/netbirdio/netbird/client/internal/debug" diff --git a/client/internal/routemanager/dnsinterceptor/handler.go b/client/internal/routemanager/dnsinterceptor/handler.go index 12c9ff4af..4bf0d5476 100644 --- a/client/internal/routemanager/dnsinterceptor/handler.go +++ b/client/internal/routemanager/dnsinterceptor/handler.go @@ -351,6 +351,11 @@ func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg, logger *log. logger.Errorf("failed to update domain prefixes: %v", err) } + // Allow time for route changes to be applied before sending + // the DNS response (relevant on iOS where setTunnelNetworkSettings + // is asynchronous). + waitForRouteSettlement(logger) + d.replaceIPsInDNSResponse(r, newPrefixes, logger) } } diff --git a/client/internal/routemanager/dnsinterceptor/handler_ios.go b/client/internal/routemanager/dnsinterceptor/handler_ios.go new file mode 100644 index 000000000..4cf80eb16 --- /dev/null +++ b/client/internal/routemanager/dnsinterceptor/handler_ios.go @@ -0,0 +1,20 @@ +//go:build ios + +package dnsinterceptor + +import ( + "time" + + log "github.com/sirupsen/logrus" +) + +const routeSettleDelay = 500 * time.Millisecond + +// waitForRouteSettlement introduces a short delay on iOS to allow +// setTunnelNetworkSettings to apply route changes before the DNS +// response reaches the application. Without this, the first request +// to a newly resolved domain may bypass the tunnel. +func waitForRouteSettlement(logger *log.Entry) { + logger.Tracef("waiting %v for iOS route settlement", routeSettleDelay) + time.Sleep(routeSettleDelay) +} diff --git a/client/internal/routemanager/dnsinterceptor/handler_nonios.go b/client/internal/routemanager/dnsinterceptor/handler_nonios.go new file mode 100644 index 000000000..68cd7330b --- /dev/null +++ b/client/internal/routemanager/dnsinterceptor/handler_nonios.go @@ -0,0 +1,12 @@ +//go:build !ios + +package dnsinterceptor + +import log "github.com/sirupsen/logrus" + +func waitForRouteSettlement(_ *log.Entry) { + // No-op on non-iOS platforms: route changes are applied synchronously by + // the kernel, so no settlement delay is needed before the DNS response + // reaches the application. The delay is only required on iOS where + // setTunnelNetworkSettings applies routes asynchronously. +} From f117fc7509268944e307adaf05b6225d790f7600 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 19 Feb 2026 19:18:47 +0100 Subject: [PATCH 150/374] [client] Log lock acquisition time in receive message handling (#5393) * Log lock acquisition time in receive message handling * use offerAnswer.SessionID for session id --- client/internal/engine.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/client/internal/engine.go b/client/internal/engine.go index beb2a411c..f2d724aa4 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1562,8 +1562,10 @@ func (e *Engine) receiveSignalEvents() { defer e.shutdownWg.Done() // connect to a stream of messages coming from the signal server err := e.signal.Receive(e.ctx, func(msg *sProto.Message) error { + start := time.Now() e.syncMsgMux.Lock() defer e.syncMsgMux.Unlock() + gotLock := time.Since(start) // Check context INSIDE lock to ensure atomicity with shutdown if e.ctx.Err() != nil { @@ -1587,6 +1589,8 @@ func (e *Engine) receiveSignalEvents() { return err } + log.Debugf("receiveMSG: took %s to get lock for peer %s with session id %s", gotLock, msg.Key, offerAnswer.SessionID) + if msg.Body.Type == sProto.Body_OFFER { conn.OnRemoteOffer(*offerAnswer) } else { From 36752a8cbb4b01b51eaaa2bbf4ca0cb97ad2b5cb Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 20 Feb 2026 00:11:28 +0100 Subject: [PATCH 151/374] [proxy] add access log cleanup (#5376) --- .../reverseproxy/accesslogs/interface.go | 3 + .../accesslogs/manager/manager.go | 70 +++++ .../accesslogs/manager/manager_test.go | 281 ++++++++++++++++++ management/internals/server/boot.go | 5 + management/internals/server/config/config.go | 9 + .../proxy/auth_callback_integration_test.go | 12 + management/server/store/sql_store.go | 14 + management/server/store/store.go | 1 + management/server/store/store_mock.go | 15 + proxy/management_integration_test.go | 12 + 10 files changed, 422 insertions(+) create mode 100644 management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go diff --git a/management/internals/modules/reverseproxy/accesslogs/interface.go b/management/internals/modules/reverseproxy/accesslogs/interface.go index 1c51a8a7d..04f096bf1 100644 --- a/management/internals/modules/reverseproxy/accesslogs/interface.go +++ b/management/internals/modules/reverseproxy/accesslogs/interface.go @@ -7,4 +7,7 @@ import ( type Manager interface { SaveAccessLog(ctx context.Context, proxyLog *AccessLogEntry) error GetAllAccessLogs(ctx context.Context, accountID, userID string, filter *AccessLogFilter) ([]*AccessLogEntry, int64, error) + CleanupOldAccessLogs(ctx context.Context, retentionDays int) (int64, error) + StartPeriodicCleanup(ctx context.Context, retentionDays, cleanupIntervalHours int) + StopPeriodicCleanup() } diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go index 7bcdecb1b..e7fba7bed 100644 --- a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go @@ -3,6 +3,7 @@ package manager import ( "context" "strings" + "time" log "github.com/sirupsen/logrus" @@ -19,6 +20,7 @@ type managerImpl struct { store store.Store permissionsManager permissions.Manager geo geolocation.Geolocation + cleanupCancel context.CancelFunc } func NewManager(store store.Store, permissionsManager permissions.Manager, geo geolocation.Geolocation) accesslogs.Manager { @@ -78,6 +80,74 @@ func (m *managerImpl) GetAllAccessLogs(ctx context.Context, accountID, userID st return logs, totalCount, nil } +// CleanupOldAccessLogs deletes access logs older than the specified retention period +func (m *managerImpl) CleanupOldAccessLogs(ctx context.Context, retentionDays int) (int64, error) { + if retentionDays <= 0 { + log.WithContext(ctx).Debug("access log cleanup skipped: retention days is 0 or negative") + return 0, nil + } + + cutoffTime := time.Now().AddDate(0, 0, -retentionDays) + deletedCount, err := m.store.DeleteOldAccessLogs(ctx, cutoffTime) + if err != nil { + log.WithContext(ctx).Errorf("failed to cleanup old access logs: %v", err) + return 0, err + } + + if deletedCount > 0 { + log.WithContext(ctx).Infof("cleaned up %d access logs older than %d days", deletedCount, retentionDays) + } + + return deletedCount, nil +} + +// StartPeriodicCleanup starts a background goroutine that periodically cleans up old access logs +func (m *managerImpl) StartPeriodicCleanup(ctx context.Context, retentionDays, cleanupIntervalHours int) { + if retentionDays <= 0 { + log.WithContext(ctx).Debug("periodic access log cleanup disabled: retention days is 0 or negative") + return + } + + if cleanupIntervalHours <= 0 { + cleanupIntervalHours = 24 + } + + cleanupCtx, cancel := context.WithCancel(ctx) + m.cleanupCancel = cancel + + cleanupInterval := time.Duration(cleanupIntervalHours) * time.Hour + ticker := time.NewTicker(cleanupInterval) + + go func() { + defer ticker.Stop() + + // Run cleanup immediately on startup + log.WithContext(cleanupCtx).Infof("starting access log cleanup routine (retention: %d days, interval: %d hours)", retentionDays, cleanupIntervalHours) + if _, err := m.CleanupOldAccessLogs(cleanupCtx, retentionDays); err != nil { + log.WithContext(cleanupCtx).Errorf("initial access log cleanup failed: %v", err) + } + + for { + select { + case <-cleanupCtx.Done(): + log.WithContext(cleanupCtx).Info("stopping access log cleanup routine") + return + case <-ticker.C: + if _, err := m.CleanupOldAccessLogs(cleanupCtx, retentionDays); err != nil { + log.WithContext(cleanupCtx).Errorf("periodic access log cleanup failed: %v", err) + } + } + } + }() +} + +// StopPeriodicCleanup stops the periodic cleanup routine +func (m *managerImpl) StopPeriodicCleanup() { + if m.cleanupCancel != nil { + m.cleanupCancel() + } +} + // resolveUserFilters converts user email/name filters to user ID filter func (m *managerImpl) resolveUserFilters(ctx context.Context, accountID string, filter *accesslogs.AccessLogFilter) error { if filter.UserEmail == nil && filter.UserName == nil { diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go new file mode 100644 index 000000000..8fadef85f --- /dev/null +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go @@ -0,0 +1,281 @@ +package manager + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/store" +) + +func TestCleanupOldAccessLogs(t *testing.T) { + tests := []struct { + name string + retentionDays int + setupMock func(*store.MockStore) + expectedCount int64 + expectedError bool + }{ + { + name: "cleanup logs older than retention period", + retentionDays: 30, + setupMock: func(mockStore *store.MockStore) { + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, olderThan time.Time) (int64, error) { + expectedCutoff := time.Now().AddDate(0, 0, -30) + timeDiff := olderThan.Sub(expectedCutoff) + if timeDiff.Abs() > time.Second { + t.Errorf("cutoff time not as expected: got %v, want ~%v", olderThan, expectedCutoff) + } + return 5, nil + }) + }, + expectedCount: 5, + expectedError: false, + }, + { + name: "no logs to cleanup", + retentionDays: 30, + setupMock: func(mockStore *store.MockStore) { + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + Return(int64(0), nil) + }, + expectedCount: 0, + expectedError: false, + }, + { + name: "zero retention days skips cleanup", + retentionDays: 0, + setupMock: func(mockStore *store.MockStore) { + // No expectations - DeleteOldAccessLogs should not be called + }, + expectedCount: 0, + expectedError: false, + }, + { + name: "negative retention days skips cleanup", + retentionDays: -10, + setupMock: func(mockStore *store.MockStore) { + // No expectations - DeleteOldAccessLogs should not be called + }, + expectedCount: 0, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + tt.setupMock(mockStore) + + manager := &managerImpl{ + store: mockStore, + } + + ctx := context.Background() + deletedCount, err := manager.CleanupOldAccessLogs(ctx, tt.retentionDays) + + if tt.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + assert.Equal(t, tt.expectedCount, deletedCount, "unexpected number of deleted logs") + }) + } +} + +func TestCleanupWithExactBoundary(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, olderThan time.Time) (int64, error) { + expectedCutoff := time.Now().AddDate(0, 0, -30) + timeDiff := olderThan.Sub(expectedCutoff) + assert.Less(t, timeDiff.Abs(), time.Second, "cutoff time should be close to expected value") + return 1, nil + }) + + manager := &managerImpl{ + store: mockStore, + } + + ctx := context.Background() + deletedCount, err := manager.CleanupOldAccessLogs(ctx, 30) + + require.NoError(t, err) + assert.Equal(t, int64(1), deletedCount) +} + +func TestStartPeriodicCleanup(t *testing.T) { + t.Run("periodic cleanup disabled with zero retention", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + // No expectations - cleanup should not run + + manager := &managerImpl{ + store: mockStore, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + manager.StartPeriodicCleanup(ctx, 0, 1) + + time.Sleep(100 * time.Millisecond) + + // If DeleteOldAccessLogs was called, the test will fail due to unexpected call + }) + + t.Run("periodic cleanup runs immediately on start", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + Return(int64(2), nil). + Times(1) + + manager := &managerImpl{ + store: mockStore, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + manager.StartPeriodicCleanup(ctx, 30, 24) + + time.Sleep(200 * time.Millisecond) + + // Expectations verified by gomock on defer ctrl.Finish() + }) + + t.Run("periodic cleanup stops on context cancel", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + Return(int64(1), nil). + Times(1) + + manager := &managerImpl{ + store: mockStore, + } + + ctx, cancel := context.WithCancel(context.Background()) + + manager.StartPeriodicCleanup(ctx, 30, 24) + + time.Sleep(100 * time.Millisecond) + + cancel() + + time.Sleep(200 * time.Millisecond) + + }) + + t.Run("cleanup interval defaults to 24 hours when invalid", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + Return(int64(0), nil). + Times(1) + + manager := &managerImpl{ + store: mockStore, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + manager.StartPeriodicCleanup(ctx, 30, 0) + + time.Sleep(100 * time.Millisecond) + + manager.StopPeriodicCleanup() + }) + + t.Run("cleanup interval uses configured hours", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + Return(int64(3), nil). + Times(1) + + manager := &managerImpl{ + store: mockStore, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + manager.StartPeriodicCleanup(ctx, 30, 12) + + time.Sleep(100 * time.Millisecond) + + manager.StopPeriodicCleanup() + }) +} + +func TestStopPeriodicCleanup(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + DeleteOldAccessLogs(gomock.Any(), gomock.Any()). + Return(int64(1), nil). + Times(1) + + manager := &managerImpl{ + store: mockStore, + } + + ctx := context.Background() + + manager.StartPeriodicCleanup(ctx, 30, 24) + + time.Sleep(100 * time.Millisecond) + + manager.StopPeriodicCleanup() + + time.Sleep(200 * time.Millisecond) + + // Expectations verified by gomock - would fail if more than 1 call happened +} + +func TestStopPeriodicCleanup_NotStarted(t *testing.T) { + manager := &managerImpl{} + + // Should not panic if cleanup was never started + manager.StopPeriodicCleanup() +} diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 7da1e6898..e897a09f5 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -197,6 +197,11 @@ func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore { func (s *BaseServer) AccessLogsManager() accesslogs.Manager { return Create(s, func() accesslogs.Manager { accessLogManager := accesslogsmanager.NewManager(s.Store(), s.PermissionsManager(), s.GeoLocationManager()) + accessLogManager.StartPeriodicCleanup( + context.Background(), + s.Config.ReverseProxy.AccessLogRetentionDays, + s.Config.ReverseProxy.AccessLogCleanupIntervalHours, + ) return accessLogManager }) } diff --git a/management/internals/server/config/config.go b/management/internals/server/config/config.go index 5ed1c3ede..0ba393263 100644 --- a/management/internals/server/config/config.go +++ b/management/internals/server/config/config.go @@ -200,4 +200,13 @@ type ReverseProxy struct { // request headers if the peer's address falls within one of these // trusted IP prefixes. TrustedPeers []netip.Prefix + + // AccessLogRetentionDays specifies the number of days to retain access logs. + // Logs older than this duration will be automatically deleted during cleanup. + // A value of 0 or negative means logs are kept indefinitely (no cleanup). + AccessLogRetentionDays int + + // AccessLogCleanupIntervalHours specifies how often (in hours) to run the cleanup routine. + // Defaults to 24 hours if not set or set to 0. + AccessLogCleanupIntervalHours int } diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index 0a9a560cd..6a1b144f6 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -157,6 +157,18 @@ type testSetup struct { // testAccessLogManager is a minimal mock for accesslogs.Manager. type testAccessLogManager struct{} +func (m *testAccessLogManager) CleanupOldAccessLogs(ctx context.Context, retentionDays int) (int64, error) { + return 0, nil +} + +func (m *testAccessLogManager) StartPeriodicCleanup(ctx context.Context, retentionDays, cleanupIntervalHours int) { + return +} + +func (m *testAccessLogManager) StopPeriodicCleanup() { + return +} + func (m *testAccessLogManager) SaveAccessLog(_ context.Context, _ *accesslogs.AccessLogEntry) error { return nil } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index db7cfd32d..e528cb4fb 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -5100,6 +5100,20 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin return logs, totalCount, nil } +// DeleteOldAccessLogs deletes all access logs older than the specified time +func (s *SqlStore) DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) { + result := s.db.WithContext(ctx). + Where("timestamp < ?", olderThan). + Delete(&accesslogs.AccessLogEntry{}) + + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete old access logs: %v", result.Error) + return 0, status.Errorf(status.Internal, "failed to delete old access logs") + } + + return result.RowsAffected, nil +} + // applyAccessLogFilters applies filter conditions to the query func (s *SqlStore) applyAccessLogFilters(query *gorm.DB, filter accesslogs.AccessLogFilter) *gorm.DB { if filter.Search != nil { diff --git a/management/server/store/store.go b/management/server/store/store.go index a8e44a438..2bc688a11 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -269,6 +269,7 @@ type Store interface { CreateAccessLog(ctx context.Context, log *accesslogs.AccessLogEntry) error GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) + DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) } diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 2f451dc43..79d275298 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -460,6 +460,21 @@ func (mr *MockStoreMockRecorder) DeleteNetworkRouter(ctx, accountID, routerID in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNetworkRouter", reflect.TypeOf((*MockStore)(nil).DeleteNetworkRouter), ctx, accountID, routerID) } +// DeleteOldAccessLogs mocks base method. +func (m *MockStore) DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOldAccessLogs", ctx, olderThan) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteOldAccessLogs indicates an expected call of DeleteOldAccessLogs. +func (mr *MockStoreMockRecorder) DeleteOldAccessLogs(ctx, olderThan interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldAccessLogs", reflect.TypeOf((*MockStore)(nil).DeleteOldAccessLogs), ctx, olderThan) +} + // DeletePAT mocks base method. func (m *MockStore) DeletePAT(ctx context.Context, userID, patID string) error { m.ctrl.T.Helper() diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 53d7019f7..1163c50f4 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -165,6 +165,18 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { // testAccessLogManager provides access log storage for testing. type testAccessLogManager struct{} +func (m *testAccessLogManager) CleanupOldAccessLogs(ctx context.Context, retentionDays int) (int64, error) { + return 0, nil +} + +func (m *testAccessLogManager) StartPeriodicCleanup(ctx context.Context, retentionDays, cleanupIntervalHours int) { + // noop +} + +func (m *testAccessLogManager) StopPeriodicCleanup() { + // noop +} + func (m *testAccessLogManager) SaveAccessLog(_ context.Context, _ *accesslogs.AccessLogEntry) error { return nil } From 5ca1b64328e7dfe804a79198ba9e3c8bd1b2ba8b Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 20 Feb 2026 00:11:55 +0100 Subject: [PATCH 152/374] [management] access log sorting (#5378) --- .../modules/reverseproxy/accesslogs/filter.go | 73 ++++++- .../reverseproxy/accesslogs/filter_test.go | 199 ++++++++++++++++++ management/server/store/sql_store.go | 14 +- shared/management/http/api/openapi.yml | 14 ++ 4 files changed, 297 insertions(+), 3 deletions(-) diff --git a/management/internals/modules/reverseproxy/accesslogs/filter.go b/management/internals/modules/reverseproxy/accesslogs/filter.go index f4b0a2048..a1fa28312 100644 --- a/management/internals/modules/reverseproxy/accesslogs/filter.go +++ b/management/internals/modules/reverseproxy/accesslogs/filter.go @@ -3,6 +3,7 @@ package accesslogs import ( "net/http" "strconv" + "strings" "time" ) @@ -11,15 +12,39 @@ const ( DefaultPageSize = 50 // MaxPageSize is the maximum number of records allowed per page MaxPageSize = 100 + + // Default sorting + DefaultSortBy = "timestamp" + DefaultSortOrder = "desc" ) -// AccessLogFilter holds pagination and filtering parameters for access logs +// Valid sortable fields mapped to their database column names or expressions +// For multi-column sorts, columns are separated by comma (e.g., "host, path") +var validSortFields = map[string]string{ + "timestamp": "timestamp", + "url": "host, path", // Sort by host first, then path + "host": "host", + "path": "path", + "method": "method", + "status_code": "status_code", + "duration": "duration", + "source_ip": "location_connection_ip", + "user_id": "user_id", + "auth_method": "auth_method_used", + "reason": "reason", +} + +// AccessLogFilter holds pagination, filtering, and sorting parameters for access logs type AccessLogFilter struct { // Page is the current page number (1-indexed) Page int // PageSize is the number of records per page PageSize int + // Sorting parameters + SortBy string // Field to sort by: timestamp, url, host, path, method, status_code, duration, source_ip, user_id, auth_method, reason + SortOrder string // Sort order: asc or desc (default: desc) + // Filtering parameters Search *string // General search across log ID, host, path, source IP, and user fields SourceIP *string // Filter by source IP address @@ -35,13 +60,16 @@ type AccessLogFilter struct { EndDate *time.Time // Filter by timestamp <= end_date } -// ParseFromRequest parses pagination and filter parameters from HTTP request query parameters +// ParseFromRequest parses pagination, sorting, and filter parameters from HTTP request query parameters func (f *AccessLogFilter) ParseFromRequest(r *http.Request) { queryParams := r.URL.Query() f.Page = parsePositiveInt(queryParams.Get("page"), 1) f.PageSize = min(parsePositiveInt(queryParams.Get("page_size"), DefaultPageSize), MaxPageSize) + f.SortBy = parseSortField(queryParams.Get("sort_by")) + f.SortOrder = parseSortOrder(queryParams.Get("sort_order")) + f.Search = parseOptionalString(queryParams.Get("search")) f.SourceIP = parseOptionalString(queryParams.Get("source_ip")) f.Host = parseOptionalString(queryParams.Get("host")) @@ -107,3 +135,44 @@ func (f *AccessLogFilter) GetOffset() int { func (f *AccessLogFilter) GetLimit() int { return f.PageSize } + +// GetSortColumn returns the validated database column name for sorting +func (f *AccessLogFilter) GetSortColumn() string { + if column, ok := validSortFields[f.SortBy]; ok { + return column + } + return validSortFields[DefaultSortBy] +} + +// GetSortOrder returns the validated sort order (ASC or DESC) +func (f *AccessLogFilter) GetSortOrder() string { + if f.SortOrder == "asc" || f.SortOrder == "desc" { + return f.SortOrder + } + return DefaultSortOrder +} + +// parseSortField validates and returns the sort field, defaulting if invalid +func parseSortField(s string) string { + if s == "" { + return DefaultSortBy + } + // Check if the field is valid + if _, ok := validSortFields[s]; ok { + return s + } + return DefaultSortBy +} + +// parseSortOrder validates and returns the sort order, defaulting if invalid +func parseSortOrder(s string) string { + if s == "" { + return DefaultSortOrder + } + // Normalize to lowercase + s = strings.ToLower(s) + if s == "asc" || s == "desc" { + return s + } + return DefaultSortOrder +} diff --git a/management/internals/modules/reverseproxy/accesslogs/filter_test.go b/management/internals/modules/reverseproxy/accesslogs/filter_test.go index 5d48ea9d2..ea1fce54b 100644 --- a/management/internals/modules/reverseproxy/accesslogs/filter_test.go +++ b/management/internals/modules/reverseproxy/accesslogs/filter_test.go @@ -361,6 +361,205 @@ func TestParseOptionalRFC3339(t *testing.T) { } } +func TestAccessLogFilter_SortingDefaults(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Equal(t, DefaultSortBy, filter.SortBy, "SortBy should default to timestamp") + assert.Equal(t, DefaultSortOrder, filter.SortOrder, "SortOrder should default to desc") + assert.Equal(t, "timestamp", filter.GetSortColumn(), "GetSortColumn should return timestamp") + assert.Equal(t, "desc", filter.GetSortOrder(), "GetSortOrder should return desc") +} + +func TestAccessLogFilter_ValidSortFields(t *testing.T) { + tests := []struct { + name string + sortBy string + expectedColumn string + expectedSortByVal string + }{ + {"timestamp", "timestamp", "timestamp", "timestamp"}, + {"url", "url", "host, path", "url"}, + {"host", "host", "host", "host"}, + {"path", "path", "path", "path"}, + {"method", "method", "method", "method"}, + {"status_code", "status_code", "status_code", "status_code"}, + {"duration", "duration", "duration", "duration"}, + {"source_ip", "source_ip", "location_connection_ip", "source_ip"}, + {"user_id", "user_id", "user_id", "user_id"}, + {"auth_method", "auth_method", "auth_method_used", "auth_method"}, + {"reason", "reason", "reason", "reason"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test?sort_by="+tt.sortBy, nil) + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Equal(t, tt.expectedSortByVal, filter.SortBy, "SortBy mismatch") + assert.Equal(t, tt.expectedColumn, filter.GetSortColumn(), "GetSortColumn mismatch") + }) + } +} + +func TestAccessLogFilter_InvalidSortField(t *testing.T) { + tests := []struct { + name string + sortBy string + expected string + }{ + {"invalid field", "invalid_field", DefaultSortBy}, + {"empty field", "", DefaultSortBy}, + {"malicious input", "timestamp--DROP", DefaultSortBy}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + q := req.URL.Query() + q.Set("sort_by", tt.sortBy) + req.URL.RawQuery = q.Encode() + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Equal(t, tt.expected, filter.SortBy, "Invalid sort field should default to timestamp") + assert.Equal(t, validSortFields[DefaultSortBy], filter.GetSortColumn()) + }) + } +} + +func TestAccessLogFilter_SortOrder(t *testing.T) { + tests := []struct { + name string + sortOrder string + expected string + }{ + {"ascending", "asc", "asc"}, + {"descending", "desc", "desc"}, + {"uppercase ASC", "ASC", "asc"}, + {"uppercase DESC", "DESC", "desc"}, + {"mixed case Asc", "Asc", "asc"}, + {"invalid order", "invalid", DefaultSortOrder}, + {"empty order", "", DefaultSortOrder}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test?sort_order="+tt.sortOrder, nil) + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Equal(t, tt.expected, filter.GetSortOrder(), "GetSortOrder mismatch") + }) + } +} + +func TestAccessLogFilter_CompleteSortingScenarios(t *testing.T) { + tests := []struct { + name string + sortBy string + sortOrder string + expectedColumn string + expectedOrder string + }{ + { + name: "sort by host ascending", + sortBy: "host", + sortOrder: "asc", + expectedColumn: "host", + expectedOrder: "asc", + }, + { + name: "sort by duration descending", + sortBy: "duration", + sortOrder: "desc", + expectedColumn: "duration", + expectedOrder: "desc", + }, + { + name: "sort by status_code ascending", + sortBy: "status_code", + sortOrder: "asc", + expectedColumn: "status_code", + expectedOrder: "asc", + }, + { + name: "invalid sort with valid order", + sortBy: "invalid", + sortOrder: "asc", + expectedColumn: "timestamp", + expectedOrder: "asc", + }, + { + name: "valid sort with invalid order", + sortBy: "method", + sortOrder: "invalid", + expectedColumn: "method", + expectedOrder: DefaultSortOrder, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test?sort_by="+tt.sortBy+"&sort_order="+tt.sortOrder, nil) + + filter := &AccessLogFilter{} + filter.ParseFromRequest(req) + + assert.Equal(t, tt.expectedColumn, filter.GetSortColumn()) + assert.Equal(t, tt.expectedOrder, filter.GetSortOrder()) + }) + } +} + +func TestParseSortField(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + {"valid field", "host", "host"}, + {"empty string", "", DefaultSortBy}, + {"invalid field", "invalid", DefaultSortBy}, + {"malicious input", "timestamp--DROP", DefaultSortBy}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseSortField(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestParseSortOrder(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + {"asc lowercase", "asc", "asc"}, + {"desc lowercase", "desc", "desc"}, + {"ASC uppercase", "ASC", "asc"}, + {"DESC uppercase", "DESC", "desc"}, + {"invalid", "invalid", DefaultSortOrder}, + {"empty", "", DefaultSortOrder}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseSortOrder(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + // Helper functions for creating pointers func strPtr(s string) *string { return &s diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index e528cb4fb..018e54810 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -5082,8 +5082,20 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin query = s.applyAccessLogFilters(query, filter) + sortColumns := filter.GetSortColumn() + sortOrder := strings.ToUpper(filter.GetSortOrder()) + + var orderClauses []string + for _, col := range strings.Split(sortColumns, ",") { + col = strings.TrimSpace(col) + if col != "" { + orderClauses = append(orderClauses, col+" "+sortOrder) + } + } + orderClause := strings.Join(orderClauses, ", ") + query = query. - Order("timestamp DESC"). + Order(orderClause). Limit(filter.GetLimit()). Offset(filter.GetOffset()) diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 1f4a163e5..b0ce1b5cc 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -7409,6 +7409,20 @@ paths: minimum: 1 maximum: 100 description: Number of items per page (max 100) + - in: query + name: sort_by + schema: + type: string + enum: [timestamp, url, host, path, method, status_code, duration, source_ip, user_id, auth_method, reason] + default: timestamp + description: Field to sort by (url sorts by host then path) + - in: query + name: sort_order + schema: + type: string + enum: [asc, desc] + default: desc + description: Sort order (ascending or descending) - in: query name: search schema: From 2a26cb45671b4fe077ef4766f0d5ae007cd08e53 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 20 Feb 2026 14:44:14 +0100 Subject: [PATCH 153/374] [client] stop upstream retry loop immediately on context cancellation (#5403) stop upstream retry loop immediately on context cancellation --- client/internal/dns/upstream.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 0fbd32771..375f6df1c 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -351,9 +351,13 @@ func (u *upstreamResolverBase) waitUntilResponse() { return fmt.Errorf("upstream check call error") } - err := backoff.Retry(operation, exponentialBackOff) + err := backoff.Retry(operation, backoff.WithContext(exponentialBackOff, u.ctx)) if err != nil { - log.Warn(err) + if errors.Is(err, context.Canceled) { + log.Debugf("upstream retry loop exited for upstreams %s", u.upstreamServersString()) + } else { + log.Warnf("upstream retry loop exited for upstreams %s: %v", u.upstreamServersString(), err) + } return } From 2b98dc4e52597a88e1266ce371e5d3c7a69cf1af Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Sun, 22 Feb 2026 11:58:17 +0200 Subject: [PATCH 154/374] [self-hosted] Support activity store engine in the combined server (#5406) --- combined/cmd/config.go | 1 + combined/cmd/root.go | 16 +++++++++++++++- combined/config.yaml.example | 5 +++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/combined/cmd/config.go b/combined/cmd/config.go index 04155f72e..d0ffa4ba4 100644 --- a/combined/cmd/config.go +++ b/combined/cmd/config.go @@ -70,6 +70,7 @@ type ServerConfig struct { DisableGeoliteUpdate bool `yaml:"disableGeoliteUpdate"` Auth AuthConfig `yaml:"auth"` Store StoreConfig `yaml:"store"` + ActivityStore StoreConfig `yaml:"activityStore"` ReverseProxy ReverseProxyConfig `yaml:"reverseProxy"` } diff --git a/combined/cmd/root.go b/combined/cmd/root.go index b8ea7064c..00edcb5d4 100644 --- a/combined/cmd/root.go +++ b/combined/cmd/root.go @@ -141,6 +141,17 @@ func initializeConfig() error { } } + if engine := config.Server.ActivityStore.Engine; engine != "" { + engineLower := strings.ToLower(engine) + if engineLower == "postgres" && config.Server.ActivityStore.DSN == "" { + return fmt.Errorf("activityStore.dsn is required when activityStore.engine is postgres") + } + os.Setenv("NB_ACTIVITY_EVENT_STORE_ENGINE", engineLower) + if dsn := config.Server.ActivityStore.DSN; dsn != "" { + os.Setenv("NB_ACTIVITY_EVENT_POSTGRES_DSN", dsn) + } + } + log.Infof("Starting combined NetBird server") logConfig(config) logEnvVars() @@ -668,8 +679,11 @@ func logEnvVars() { if strings.HasPrefix(env, "NB_") { key, _, _ := strings.Cut(env, "=") value := os.Getenv(key) - if strings.Contains(strings.ToLower(key), "secret") || strings.Contains(strings.ToLower(key), "key") || strings.Contains(strings.ToLower(key), "password") { + keyLower := strings.ToLower(key) + if strings.Contains(keyLower, "secret") || strings.Contains(keyLower, "key") || strings.Contains(keyLower, "password") { value = maskSecret(value) + } else if strings.Contains(keyLower, "dsn") { + value = maskDSNPassword(value) } log.Infof(" %s=%s", key, value) found = true diff --git a/combined/config.yaml.example b/combined/config.yaml.example index b3b38c5a9..ad033396d 100644 --- a/combined/config.yaml.example +++ b/combined/config.yaml.example @@ -104,6 +104,11 @@ server: dsn: "" # Connection string for postgres or mysql encryptionKey: "" + # Activity events store configuration (optional, defaults to sqlite in dataDir) + # activityStore: + # engine: "sqlite" # sqlite or postgres + # dsn: "" # Connection string for postgres + # Reverse proxy settings (optional) # reverseProxy: # trustedHTTPProxies: [] From 44ef1a18dd990734a79697a33b34fe7934fc8739 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Sun, 22 Feb 2026 11:58:35 +0200 Subject: [PATCH 155/374] [self-hosted] add Embedded IdP metrics (#5407) --- management/server/metrics/selfhosted.go | 20 +++++++++++ management/server/metrics/selfhosted_test.go | 35 +++++++++++++++++++- 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/management/server/metrics/selfhosted.go b/management/server/metrics/selfhosted.go index f7a344fcd..f7d07f3a0 100644 --- a/management/server/metrics/selfhosted.go +++ b/management/server/metrics/selfhosted.go @@ -210,6 +210,7 @@ func (w *Worker) generateProperties(ctx context.Context) properties { rosenpassEnabled int localUsers int idpUsers int + embeddedIdpTypes map[string]int ) start := time.Now() metricsProperties := make(properties) @@ -218,6 +219,7 @@ func (w *Worker) generateProperties(ctx context.Context) properties { rulesProtocol = make(map[string]int) rulesDirection = make(map[string]int) activeUsersLastDay = make(map[string]struct{}) + embeddedIdpTypes = make(map[string]int) uptime = time.Since(w.startupTime).Seconds() connections := w.connManager.GetAllConnectedPeers() version = nbversion.NetbirdVersion() @@ -277,6 +279,8 @@ func (w *Worker) generateProperties(ctx context.Context) properties { localUsers++ } else { idpUsers++ + idpType := extractIdpType(idpID) + embeddedIdpTypes[idpType]++ } } } @@ -369,6 +373,11 @@ func (w *Worker) generateProperties(ctx context.Context) properties { metricsProperties["rosenpass_enabled"] = rosenpassEnabled metricsProperties["local_users_count"] = localUsers metricsProperties["idp_users_count"] = idpUsers + metricsProperties["embedded_idp_count"] = len(embeddedIdpTypes) + + for idpType, count := range embeddedIdpTypes { + metricsProperties["embedded_idp_users_"+idpType] = count + } for protocol, count := range rulesProtocol { metricsProperties["rules_protocol_"+protocol] = count @@ -456,6 +465,17 @@ func createPostRequest(ctx context.Context, endpoint string, payloadStr string) return req, cancel, nil } +// extractIdpType extracts the IdP type from a Dex connector ID. +// Connector IDs are formatted as "-" (e.g., "okta-abc123", "zitadel-xyz"). +// Returns the type prefix, or "oidc" if no known prefix is found. +func extractIdpType(connectorID string) string { + idx := strings.LastIndex(connectorID, "-") + if idx <= 0 { + return "oidc" + } + return strings.ToLower(connectorID[:idx]) +} + func getMinMaxVersion(inputList []string) (string, string) { versions := make([]*version.Version, 0) diff --git a/management/server/metrics/selfhosted_test.go b/management/server/metrics/selfhosted_test.go index d0ab45cd7..504d228f7 100644 --- a/management/server/metrics/selfhosted_test.go +++ b/management/server/metrics/selfhosted_test.go @@ -27,7 +27,7 @@ func (mockDatasource) GetAllConnectedPeers() map[string]struct{} { // GetAllAccounts returns a list of *server.Account for use in tests with predefined information func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { localUserID := dex.EncodeDexUserID("10", "local") - idpUserID := dex.EncodeDexUserID("20", "zitadel") + idpUserID := dex.EncodeDexUserID("20", "zitadel-d5uv82dra0haedlf6kv0") return []*types.Account{ { Id: "1", @@ -341,4 +341,37 @@ func TestGenerateProperties(t *testing.T) { if properties["idp_users_count"] != 1 { t.Errorf("expected 1 idp_users_count, got %d", properties["idp_users_count"]) } + if properties["embedded_idp_users_zitadel"] != 1 { + t.Errorf("expected 1 embedded_idp_users_zitadel, got %v", properties["embedded_idp_users_zitadel"]) + } + if properties["embedded_idp_count"] != 1 { + t.Errorf("expected 1 embedded_idp_count, got %v", properties["embedded_idp_count"]) + } +} + +func TestExtractIdpType(t *testing.T) { + tests := []struct { + connectorID string + expected string + }{ + {"okta-abc123def", "okta"}, + {"zitadel-d5uv82dra0haedlf6kv0", "zitadel"}, + {"entra-xyz789", "entra"}, + {"google-abc123", "google"}, + {"pocketid-abc123", "pocketid"}, + {"microsoft-abc123", "microsoft"}, + {"authentik-abc123", "authentik"}, + {"keycloak-d5uv82dra0haedlf6kv0", "keycloak"}, + {"local", "oidc"}, + {"", "oidc"}, + } + + for _, tt := range tests { + t.Run(tt.connectorID, func(t *testing.T) { + result := extractIdpType(tt.connectorID) + if result != tt.expected { + t.Errorf("extractIdpType(%q) = %q, want %q", tt.connectorID, result, tt.expected) + } + }) + } } From 22f878b3b783f3f7fe13ce055ee1aaae3d07a798 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Mon, 23 Feb 2026 15:34:35 +0100 Subject: [PATCH 156/374] [management] network map components assembling (#5193) --- .../network_map/controller/controller.go | 38 +- management/server/types/account_components.go | 576 +++++++++++ .../types/networkmap_comparison_test.go | 592 +++++++++++ .../server/types/networkmap_components.go | 938 ++++++++++++++++++ .../types/networkmap_components_compact.go | 230 +++++ 5 files changed, 2368 insertions(+), 6 deletions(-) create mode 100644 management/server/types/account_components.go create mode 100644 management/server/types/networkmap_comparison_test.go create mode 100644 management/server/types/networkmap_components.go create mode 100644 management/server/types/networkmap_components_compact.go diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index b2b65f47a..121c55ac5 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -63,6 +63,8 @@ type Controller struct { expNewNetworkMap bool expNewNetworkMapAIDs map[string]struct{} + + compactedNetworkMap bool } type bufferUpdate struct { @@ -85,6 +87,12 @@ func NewController(ctx context.Context, store store.Store, metrics telemetry.App newNetworkMapBuilder = false } + compactedNetworkMap, err := strconv.ParseBool(os.Getenv(types.EnvNewNetworkMapCompacted)) + if err != nil { + log.WithContext(ctx).Warnf("failed to parse %s, using default value false: %v", types.EnvNewNetworkMapCompacted, err) + compactedNetworkMap = false + } + ids := strings.Split(os.Getenv(network_map.EnvNewNetworkMapAccounts), ",") expIDs := make(map[string]struct{}, len(ids)) for _, id := range ids { @@ -108,6 +116,8 @@ func NewController(ctx context.Context, store store.Store, metrics telemetry.App holder: types.NewHolder(), expNewNetworkMap: newNetworkMapBuilder, expNewNetworkMapAIDs: expIDs, + + compactedNetworkMap: compactedNetworkMap, } } @@ -230,9 +240,12 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin var remotePeerNetworkMap *types.NetworkMap - if c.experimentalNetworkMap(accountID) { + switch { + case c.experimentalNetworkMap(accountID): remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, p.AccountID, p.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) - } else { + case c.compactedNetworkMap: + remotePeerNetworkMap = account.GetPeerNetworkMapFromComponents(ctx, p.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) + default: remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, p.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) } @@ -355,9 +368,12 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe var remotePeerNetworkMap *types.NetworkMap - if c.experimentalNetworkMap(accountId) { + switch { + case c.experimentalNetworkMap(accountId): remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) - } else { + case c.compactedNetworkMap: + remotePeerNetworkMap = account.GetPeerNetworkMapFromComponents(ctx, peerId, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) + default: remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, peerId, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) } @@ -479,7 +495,12 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr } else { resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, account.GetActiveGroupUsers()) + groupIDToUserIDs := account.GetActiveGroupUsers() + if c.compactedNetworkMap { + networkMap = account.GetPeerNetworkMapFromComponents(ctx, peer.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) + } else { + networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) + } } proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] @@ -854,7 +875,12 @@ func (c *Controller) GetNetworkMap(ctx context.Context, peerID string) (*types.N account.InjectProxyPolicies(ctx) resourcePolicies := account.GetResourcePoliciesMap() routers := account.GetResourceRoutersMap() - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, validatedPeers, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) + groupIDToUserIDs := account.GetActiveGroupUsers() + if c.compactedNetworkMap { + networkMap = account.GetPeerNetworkMapFromComponents(ctx, peer.ID, peersCustomZone, accountZones, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } else { + networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } } proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] diff --git a/management/server/types/account_components.go b/management/server/types/account_components.go new file mode 100644 index 000000000..1eb25cecc --- /dev/null +++ b/management/server/types/account_components.go @@ -0,0 +1,576 @@ +package types + +import ( + "context" + "slices" + "time" + + log "github.com/sirupsen/logrus" + + nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/zones" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/telemetry" + "github.com/netbirdio/netbird/route" +) + +func (a *Account) GetPeerNetworkMapFromComponents( + ctx context.Context, + peerID string, + peersCustomZone nbdns.CustomZone, + accountZones []*zones.Zone, + validatedPeersMap map[string]struct{}, + resourcePolicies map[string][]*Policy, + routers map[string]map[string]*routerTypes.NetworkRouter, + metrics *telemetry.AccountManagerMetrics, + groupIDToUserIDs map[string][]string, +) *NetworkMap { + start := time.Now() + + components := a.GetPeerNetworkMapComponents( + ctx, + peerID, + peersCustomZone, + accountZones, + validatedPeersMap, + resourcePolicies, + routers, + groupIDToUserIDs, + ) + + if components == nil { + return &NetworkMap{Network: a.Network.Copy()} + } + + nm := CalculateNetworkMapFromComponents(ctx, components) + + if metrics != nil { + objectCount := int64(len(nm.Peers) + len(nm.OfflinePeers) + len(nm.Routes) + len(nm.FirewallRules) + len(nm.RoutesFirewallRules)) + metrics.CountNetworkMapObjects(objectCount) + metrics.CountGetPeerNetworkMapDuration(time.Since(start)) + + if objectCount > 5000 { + log.WithContext(ctx).Tracef("account: %s has a total resource count of %d objects from components, "+ + "peers: %d, offline peers: %d, routes: %d, firewall rules: %d, route firewall rules: %d", + a.Id, objectCount, len(nm.Peers), len(nm.OfflinePeers), len(nm.Routes), len(nm.FirewallRules), len(nm.RoutesFirewallRules)) + } + } + + return nm +} + +func (a *Account) GetPeerNetworkMapComponents( + ctx context.Context, + peerID string, + peersCustomZone nbdns.CustomZone, + accountZones []*zones.Zone, + validatedPeersMap map[string]struct{}, + resourcePolicies map[string][]*Policy, + routers map[string]map[string]*routerTypes.NetworkRouter, + groupIDToUserIDs map[string][]string, +) *NetworkMapComponents { + + peer := a.Peers[peerID] + if peer == nil { + return nil + } + + if _, ok := validatedPeersMap[peerID]; !ok { + return nil + } + + components := &NetworkMapComponents{ + PeerID: peerID, + Network: a.Network.Copy(), + NameServerGroups: make([]*nbdns.NameServerGroup, 0), + CustomZoneDomain: peersCustomZone.Domain, + ResourcePoliciesMap: make(map[string][]*Policy), + RoutersMap: make(map[string]map[string]*routerTypes.NetworkRouter), + NetworkResources: make([]*resourceTypes.NetworkResource, 0), + PostureFailedPeers: make(map[string]map[string]struct{}, len(a.PostureChecks)), + RouterPeers: make(map[string]*nbpeer.Peer), + } + + components.AccountSettings = &AccountSettingsInfo{ + PeerLoginExpirationEnabled: a.Settings.PeerLoginExpirationEnabled, + PeerLoginExpiration: a.Settings.PeerLoginExpiration, + PeerInactivityExpirationEnabled: a.Settings.PeerInactivityExpirationEnabled, + PeerInactivityExpiration: a.Settings.PeerInactivityExpiration, + } + + components.DNSSettings = &a.DNSSettings + + relevantPeers, relevantGroups, relevantPolicies, relevantRoutes, sshReqs := a.getPeersGroupsPoliciesRoutes(ctx, peerID, peer.SSHEnabled, validatedPeersMap, &components.PostureFailedPeers) + + if len(sshReqs.neededGroupIDs) > 0 { + components.GroupIDToUserIDs = filterGroupIDToUserIDs(groupIDToUserIDs, sshReqs.neededGroupIDs) + } + if sshReqs.needAllowedUserIDs { + components.AllowedUserIDs = a.getAllowedUserIDs() + } + + components.Peers = relevantPeers + components.Groups = relevantGroups + components.Policies = relevantPolicies + components.Routes = relevantRoutes + components.AllDNSRecords = filterDNSRecordsByPeers(peersCustomZone.Records, relevantPeers) + + peerGroups := a.GetPeerGroups(peerID) + components.AccountZones = filterPeerAppliedZones(ctx, accountZones, peerGroups) + + for _, nsGroup := range a.NameServerGroups { + if nsGroup.Enabled { + for _, gID := range nsGroup.Groups { + if _, found := relevantGroups[gID]; found { + components.NameServerGroups = append(components.NameServerGroups, nsGroup) + break + } + } + } + } + + for _, resource := range a.NetworkResources { + if !resource.Enabled { + continue + } + + policies, exists := resourcePolicies[resource.ID] + if !exists { + continue + } + + addSourcePeers := false + + networkRoutingPeers, routerExists := routers[resource.NetworkID] + if routerExists { + if _, ok := networkRoutingPeers[peerID]; ok { + addSourcePeers = true + } + } + + for _, policy := range policies { + if addSourcePeers { + var peers []string + if policy.Rules[0].SourceResource.Type == ResourceTypePeer && policy.Rules[0].SourceResource.ID != "" { + peers = []string{policy.Rules[0].SourceResource.ID} + } else { + peers = a.getUniquePeerIDsFromGroupsIDs(ctx, policy.SourceGroups()) + } + for _, pID := range a.getPostureValidPeersSaveFailed(peers, policy.SourcePostureChecks, validatedPeersMap, &components.PostureFailedPeers) { + if _, exists := components.Peers[pID]; !exists { + components.Peers[pID] = a.GetPeer(pID) + } + } + } else { + peerInSources := false + if policy.Rules[0].SourceResource.Type == ResourceTypePeer && policy.Rules[0].SourceResource.ID != "" { + peerInSources = policy.Rules[0].SourceResource.ID == peerID + } else { + for _, groupID := range policy.SourceGroups() { + if group := a.GetGroup(groupID); group != nil && slices.Contains(group.Peers, peerID) { + peerInSources = true + break + } + } + } + if !peerInSources { + continue + } + isValid, pname := a.validatePostureChecksOnPeerGetFailed(ctx, policy.SourcePostureChecks, peerID) + if !isValid && len(pname) > 0 { + if _, ok := components.PostureFailedPeers[pname]; !ok { + components.PostureFailedPeers[pname] = make(map[string]struct{}) + } + components.PostureFailedPeers[pname][peer.ID] = struct{}{} + continue + } + addSourcePeers = true + } + + for _, rule := range policy.Rules { + for _, srcGroupID := range rule.Sources { + if g := a.Groups[srcGroupID]; g != nil { + if _, exists := components.Groups[srcGroupID]; !exists { + components.Groups[srcGroupID] = g + } + } + } + for _, dstGroupID := range rule.Destinations { + if g := a.Groups[dstGroupID]; g != nil { + if _, exists := components.Groups[dstGroupID]; !exists { + components.Groups[dstGroupID] = g + } + } + } + } + components.ResourcePoliciesMap[resource.ID] = policies + } + + components.RoutersMap[resource.NetworkID] = networkRoutingPeers + for peerIDKey := range networkRoutingPeers { + if p := a.Peers[peerIDKey]; p != nil { + if _, exists := components.RouterPeers[peerIDKey]; !exists { + components.RouterPeers[peerIDKey] = p + } + if _, exists := components.Peers[peerIDKey]; !exists { + if _, validated := validatedPeersMap[peerIDKey]; validated { + components.Peers[peerIDKey] = p + } + } + } + } + + if addSourcePeers { + components.NetworkResources = append(components.NetworkResources, resource) + } + } + + filterGroupPeers(&components.Groups, components.Peers) + filterPostureFailedPeers(&components.PostureFailedPeers, components.Policies, components.ResourcePoliciesMap, components.Peers) + + return components +} + +type sshRequirements struct { + neededGroupIDs map[string]struct{} + needAllowedUserIDs bool +} + +func (a *Account) getPeersGroupsPoliciesRoutes( + ctx context.Context, + peerID string, + peerSSHEnabled bool, + validatedPeersMap map[string]struct{}, + postureFailedPeers *map[string]map[string]struct{}, +) (map[string]*nbpeer.Peer, map[string]*Group, []*Policy, []*route.Route, sshRequirements) { + relevantPeerIDs := make(map[string]*nbpeer.Peer, len(a.Peers)/4) + relevantGroupIDs := make(map[string]*Group, len(a.Groups)/4) + relevantPolicies := make([]*Policy, 0, len(a.Policies)) + relevantRoutes := make([]*route.Route, 0, len(a.Routes)) + sshReqs := sshRequirements{neededGroupIDs: make(map[string]struct{})} + + relevantPeerIDs[peerID] = a.GetPeer(peerID) + + for groupID, group := range a.Groups { + if slices.Contains(group.Peers, peerID) { + relevantGroupIDs[groupID] = a.GetGroup(groupID) + } + } + + routeAccessControlGroups := make(map[string]struct{}) + for _, r := range a.Routes { + for _, groupID := range r.Groups { + relevantGroupIDs[groupID] = a.GetGroup(groupID) + } + for _, groupID := range r.PeerGroups { + relevantGroupIDs[groupID] = a.GetGroup(groupID) + } + if r.Enabled { + for _, groupID := range r.AccessControlGroups { + relevantGroupIDs[groupID] = a.GetGroup(groupID) + routeAccessControlGroups[groupID] = struct{}{} + } + } + relevantRoutes = append(relevantRoutes, r) + } + + for _, policy := range a.Policies { + if !policy.Enabled { + continue + } + + policyRelevant := false + for _, rule := range policy.Rules { + if !rule.Enabled { + continue + } + + if len(routeAccessControlGroups) > 0 { + for _, destGroupID := range rule.Destinations { + if _, needed := routeAccessControlGroups[destGroupID]; needed { + policyRelevant = true + for _, srcGroupID := range rule.Sources { + relevantGroupIDs[srcGroupID] = a.GetGroup(srcGroupID) + } + for _, dstGroupID := range rule.Destinations { + relevantGroupIDs[dstGroupID] = a.GetGroup(dstGroupID) + } + break + } + } + } + + var sourcePeers, destinationPeers []string + var peerInSources, peerInDestinations bool + + if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { + sourcePeers = []string{rule.SourceResource.ID} + if rule.SourceResource.ID == peerID { + peerInSources = true + } + } else { + sourcePeers, peerInSources = a.getPeersFromGroups(ctx, rule.Sources, peerID, policy.SourcePostureChecks, validatedPeersMap, postureFailedPeers) + } + + if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID != "" { + destinationPeers = []string{rule.DestinationResource.ID} + if rule.DestinationResource.ID == peerID { + peerInDestinations = true + } + } else { + destinationPeers, peerInDestinations = a.getPeersFromGroups(ctx, rule.Destinations, peerID, nil, validatedPeersMap, postureFailedPeers) + } + + if peerInSources { + policyRelevant = true + for _, pid := range destinationPeers { + relevantPeerIDs[pid] = a.GetPeer(pid) + } + for _, dstGroupID := range rule.Destinations { + relevantGroupIDs[dstGroupID] = a.GetGroup(dstGroupID) + } + } + + if peerInDestinations { + policyRelevant = true + for _, pid := range sourcePeers { + relevantPeerIDs[pid] = a.GetPeer(pid) + } + for _, srcGroupID := range rule.Sources { + relevantGroupIDs[srcGroupID] = a.GetGroup(srcGroupID) + } + + if rule.Protocol == PolicyRuleProtocolNetbirdSSH { + switch { + case len(rule.AuthorizedGroups) > 0: + for groupID := range rule.AuthorizedGroups { + sshReqs.neededGroupIDs[groupID] = struct{}{} + } + case rule.AuthorizedUser != "": + default: + sshReqs.needAllowedUserIDs = true + } + } else if policyRuleImpliesLegacySSH(rule) && peerSSHEnabled { + sshReqs.needAllowedUserIDs = true + } + } + } + if policyRelevant { + relevantPolicies = append(relevantPolicies, policy) + } + } + + return relevantPeerIDs, relevantGroupIDs, relevantPolicies, relevantRoutes, sshReqs +} + +func (a *Account) getPeersFromGroups(ctx context.Context, groups []string, peerID string, sourcePostureChecksIDs []string, + validatedPeersMap map[string]struct{}, postureFailedPeers *map[string]map[string]struct{}) ([]string, bool) { + peerInGroups := false + filteredPeerIDs := make([]string, 0, len(a.Peers)) + seenPeerIds := make(map[string]struct{}, len(groups)) + + for _, gid := range groups { + group := a.GetGroup(gid) + if group == nil { + continue + } + + if group.IsGroupAll() || len(groups) == 1 { + filteredPeerIDs = filteredPeerIDs[:0] + peerInGroups = false + for _, pid := range group.Peers { + peer, ok := a.Peers[pid] + if !ok || peer == nil { + continue + } + + if _, ok := validatedPeersMap[peer.ID]; !ok { + continue + } + + isValid, pname := a.validatePostureChecksOnPeerGetFailed(ctx, sourcePostureChecksIDs, peer.ID) + if !isValid && len(pname) > 0 { + if _, ok := (*postureFailedPeers)[pname]; !ok { + (*postureFailedPeers)[pname] = make(map[string]struct{}) + } + (*postureFailedPeers)[pname][peer.ID] = struct{}{} + continue + } + + if peer.ID == peerID { + peerInGroups = true + continue + } + + filteredPeerIDs = append(filteredPeerIDs, peer.ID) + } + return filteredPeerIDs, peerInGroups + } + + for _, pid := range group.Peers { + if _, seen := seenPeerIds[pid]; seen { + continue + } + seenPeerIds[pid] = struct{}{} + peer, ok := a.Peers[pid] + if !ok || peer == nil { + continue + } + + if _, ok := validatedPeersMap[peer.ID]; !ok { + continue + } + + isValid, pname := a.validatePostureChecksOnPeerGetFailed(ctx, sourcePostureChecksIDs, peer.ID) + if !isValid && len(pname) > 0 { + if _, ok := (*postureFailedPeers)[pname]; !ok { + (*postureFailedPeers)[pname] = make(map[string]struct{}) + } + (*postureFailedPeers)[pname][peer.ID] = struct{}{} + continue + } + + if peer.ID == peerID { + peerInGroups = true + continue + } + + filteredPeerIDs = append(filteredPeerIDs, peer.ID) + } + } + + return filteredPeerIDs, peerInGroups +} + +func (a *Account) validatePostureChecksOnPeerGetFailed(ctx context.Context, sourcePostureChecksID []string, peerID string) (bool, string) { + peer, ok := a.Peers[peerID] + if !ok || peer == nil { + return false, "" + } + + for _, postureChecksID := range sourcePostureChecksID { + postureChecks := a.GetPostureChecks(postureChecksID) + if postureChecks == nil { + continue + } + + for _, check := range postureChecks.GetChecks() { + isValid, _ := check.Check(ctx, *peer) + if !isValid { + return false, postureChecksID + } + } + } + return true, "" +} + +func (a *Account) getPostureValidPeersSaveFailed(inputPeers []string, postureChecksIDs []string, validatedPeersMap map[string]struct{}, postureFailedPeers *map[string]map[string]struct{}) []string { + var dest []string + for _, peerID := range inputPeers { + if _, validated := validatedPeersMap[peerID]; !validated { + continue + } + valid, pname := a.validatePostureChecksOnPeerGetFailed(context.Background(), postureChecksIDs, peerID) + if valid { + dest = append(dest, peerID) + continue + } + if _, ok := (*postureFailedPeers)[pname]; !ok { + (*postureFailedPeers)[pname] = make(map[string]struct{}) + } + (*postureFailedPeers)[pname][peerID] = struct{}{} + } + return dest +} + +func filterGroupPeers(groups *map[string]*Group, peers map[string]*nbpeer.Peer) { + for groupID, groupInfo := range *groups { + filteredPeers := make([]string, 0, len(groupInfo.Peers)) + for _, pid := range groupInfo.Peers { + if _, exists := peers[pid]; exists { + filteredPeers = append(filteredPeers, pid) + } + } + + if len(filteredPeers) == 0 { + delete(*groups, groupID) + } else if len(filteredPeers) != len(groupInfo.Peers) { + ng := groupInfo.Copy() + ng.Peers = filteredPeers + (*groups)[groupID] = ng + } + } +} + +func filterPostureFailedPeers(postureFailedPeers *map[string]map[string]struct{}, policies []*Policy, resourcePoliciesMap map[string][]*Policy, peers map[string]*nbpeer.Peer) { + if len(*postureFailedPeers) == 0 { + return + } + + referencedPostureChecks := make(map[string]struct{}) + for _, policy := range policies { + for _, checkID := range policy.SourcePostureChecks { + referencedPostureChecks[checkID] = struct{}{} + } + } + for _, resPolicies := range resourcePoliciesMap { + for _, policy := range resPolicies { + for _, checkID := range policy.SourcePostureChecks { + referencedPostureChecks[checkID] = struct{}{} + } + } + } + + for checkID, failedPeers := range *postureFailedPeers { + if _, referenced := referencedPostureChecks[checkID]; !referenced { + delete(*postureFailedPeers, checkID) + continue + } + for peerID := range failedPeers { + if _, exists := peers[peerID]; !exists { + delete(failedPeers, peerID) + } + } + if len(failedPeers) == 0 { + delete(*postureFailedPeers, checkID) + } + } +} + +func filterDNSRecordsByPeers(records []nbdns.SimpleRecord, peers map[string]*nbpeer.Peer) []nbdns.SimpleRecord { + if len(records) == 0 || len(peers) == 0 { + return nil + } + + peerIPs := make(map[string]struct{}, len(peers)) + for _, peer := range peers { + if peer != nil { + peerIPs[peer.IP.String()] = struct{}{} + } + } + + filteredRecords := make([]nbdns.SimpleRecord, 0, len(records)) + for _, record := range records { + if _, exists := peerIPs[record.RData]; exists { + filteredRecords = append(filteredRecords, record) + } + } + + return filteredRecords +} + +func filterGroupIDToUserIDs(fullMap map[string][]string, neededGroupIDs map[string]struct{}) map[string][]string { + if len(neededGroupIDs) == 0 { + return nil + } + + filtered := make(map[string][]string, len(neededGroupIDs)) + for groupID := range neededGroupIDs { + if users, ok := fullMap[groupID]; ok { + filtered[groupID] = users + } + } + return filtered +} diff --git a/management/server/types/networkmap_comparison_test.go b/management/server/types/networkmap_comparison_test.go new file mode 100644 index 000000000..c5844cca0 --- /dev/null +++ b/management/server/types/networkmap_comparison_test.go @@ -0,0 +1,592 @@ +package types + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/netip" + "os" + "path/filepath" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" + + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + networkTypes "github.com/netbirdio/netbird/management/server/networks/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/posture" + "github.com/netbirdio/netbird/route" +) + +func TestNetworkMapComponents_CompareWithLegacy(t *testing.T) { + account := createTestAccount() + ctx := context.Background() + + peerID := testingPeerID + validatedPeersMap := make(map[string]struct{}) + for i := range numPeers { + pid := fmt.Sprintf("peer-%d", i) + if pid == offlinePeerID { + continue + } + validatedPeersMap[pid] = struct{}{} + } + + peersCustomZone := nbdns.CustomZone{} + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + legacyNetworkMap := account.GetPeerNetworkMap( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + nil, + groupIDToUserIDs, + ) + + components := account.GetPeerNetworkMapComponents( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + groupIDToUserIDs, + ) + + if components == nil { + t.Fatal("GetPeerNetworkMapComponents returned nil") + } + + newNetworkMap := CalculateNetworkMapFromComponents(ctx, components) + + if newNetworkMap == nil { + t.Fatal("CalculateNetworkMapFromComponents returned nil") + } + + compareNetworkMaps(t, legacyNetworkMap, newNetworkMap) +} + +func TestNetworkMapComponents_GoldenFileComparison(t *testing.T) { + account := createTestAccount() + ctx := context.Background() + + peerID := testingPeerID + validatedPeersMap := make(map[string]struct{}) + for i := range numPeers { + pid := fmt.Sprintf("peer-%d", i) + if pid == offlinePeerID { + continue + } + validatedPeersMap[pid] = struct{}{} + } + + peersCustomZone := nbdns.CustomZone{} + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + legacyNetworkMap := account.GetPeerNetworkMap( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + nil, + groupIDToUserIDs, + ) + + components := account.GetPeerNetworkMapComponents( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + groupIDToUserIDs, + ) + + require.NotNil(t, components, "GetPeerNetworkMapComponents returned nil") + + newNetworkMap := CalculateNetworkMapFromComponents(ctx, components) + require.NotNil(t, newNetworkMap, "CalculateNetworkMapFromComponents returned nil") + + normalizeAndSortNetworkMap(legacyNetworkMap) + normalizeAndSortNetworkMap(newNetworkMap) + + componentsJSON, err := json.MarshalIndent(components, "", " ") + require.NoError(t, err, "error marshaling components to JSON") + + legacyJSON, err := json.MarshalIndent(legacyNetworkMap, "", " ") + require.NoError(t, err, "error marshaling legacy network map to JSON") + + newJSON, err := json.MarshalIndent(newNetworkMap, "", " ") + require.NoError(t, err, "error marshaling new network map to JSON") + + goldenDir := filepath.Join("testdata", "comparison") + err = os.MkdirAll(goldenDir, 0755) + require.NoError(t, err) + + legacyGoldenPath := filepath.Join(goldenDir, "legacy_networkmap.json") + err = os.WriteFile(legacyGoldenPath, legacyJSON, 0644) + require.NoError(t, err, "error writing legacy golden file") + + newGoldenPath := filepath.Join(goldenDir, "components_networkmap.json") + err = os.WriteFile(newGoldenPath, newJSON, 0644) + require.NoError(t, err, "error writing components golden file") + + componentsPath := filepath.Join(goldenDir, "components.json") + err = os.WriteFile(componentsPath, componentsJSON, 0644) + require.NoError(t, err, "error writing components golden file") + + require.JSONEq(t, string(legacyJSON), string(newJSON), + "NetworkMaps from legacy and components approaches do not match.\n"+ + "Legacy JSON saved to: %s\n"+ + "Components JSON saved to: %s", + legacyGoldenPath, newGoldenPath) + + t.Logf("✅ NetworkMaps are identical") + t.Logf(" Legacy NetworkMap: %s", legacyGoldenPath) + t.Logf(" Components NetworkMap: %s", newGoldenPath) +} + +func normalizeAndSortNetworkMap(nm *NetworkMap) { + if nm == nil { + return + } + + sort.Slice(nm.Peers, func(i, j int) bool { + return nm.Peers[i].ID < nm.Peers[j].ID + }) + + sort.Slice(nm.OfflinePeers, func(i, j int) bool { + return nm.OfflinePeers[i].ID < nm.OfflinePeers[j].ID + }) + + sort.Slice(nm.Routes, func(i, j int) bool { + return string(nm.Routes[i].ID) < string(nm.Routes[j].ID) + }) + + sort.Slice(nm.FirewallRules, func(i, j int) bool { + if nm.FirewallRules[i].PeerIP != nm.FirewallRules[j].PeerIP { + return nm.FirewallRules[i].PeerIP < nm.FirewallRules[j].PeerIP + } + if nm.FirewallRules[i].Direction != nm.FirewallRules[j].Direction { + return nm.FirewallRules[i].Direction < nm.FirewallRules[j].Direction + } + if nm.FirewallRules[i].Protocol != nm.FirewallRules[j].Protocol { + return nm.FirewallRules[i].Protocol < nm.FirewallRules[j].Protocol + } + if nm.FirewallRules[i].Port != nm.FirewallRules[j].Port { + return nm.FirewallRules[i].Port < nm.FirewallRules[j].Port + } + return nm.FirewallRules[i].PolicyID < nm.FirewallRules[j].PolicyID + }) + + for i := range nm.RoutesFirewallRules { + sort.Strings(nm.RoutesFirewallRules[i].SourceRanges) + } + + sort.Slice(nm.RoutesFirewallRules, func(i, j int) bool { + if nm.RoutesFirewallRules[i].Destination != nm.RoutesFirewallRules[j].Destination { + return nm.RoutesFirewallRules[i].Destination < nm.RoutesFirewallRules[j].Destination + } + + minLen := len(nm.RoutesFirewallRules[i].SourceRanges) + if len(nm.RoutesFirewallRules[j].SourceRanges) < minLen { + minLen = len(nm.RoutesFirewallRules[j].SourceRanges) + } + for k := 0; k < minLen; k++ { + if nm.RoutesFirewallRules[i].SourceRanges[k] != nm.RoutesFirewallRules[j].SourceRanges[k] { + return nm.RoutesFirewallRules[i].SourceRanges[k] < nm.RoutesFirewallRules[j].SourceRanges[k] + } + } + if len(nm.RoutesFirewallRules[i].SourceRanges) != len(nm.RoutesFirewallRules[j].SourceRanges) { + return len(nm.RoutesFirewallRules[i].SourceRanges) < len(nm.RoutesFirewallRules[j].SourceRanges) + } + + if string(nm.RoutesFirewallRules[i].RouteID) != string(nm.RoutesFirewallRules[j].RouteID) { + return string(nm.RoutesFirewallRules[i].RouteID) < string(nm.RoutesFirewallRules[j].RouteID) + } + + if nm.RoutesFirewallRules[i].PolicyID != nm.RoutesFirewallRules[j].PolicyID { + return nm.RoutesFirewallRules[i].PolicyID < nm.RoutesFirewallRules[j].PolicyID + } + + if nm.RoutesFirewallRules[i].Port != nm.RoutesFirewallRules[j].Port { + return nm.RoutesFirewallRules[i].Port < nm.RoutesFirewallRules[j].Port + } + + return nm.RoutesFirewallRules[i].Protocol < nm.RoutesFirewallRules[j].Protocol + }) + + if nm.DNSConfig.CustomZones != nil { + for i := range nm.DNSConfig.CustomZones { + sort.Slice(nm.DNSConfig.CustomZones[i].Records, func(a, b int) bool { + return nm.DNSConfig.CustomZones[i].Records[a].Name < nm.DNSConfig.CustomZones[i].Records[b].Name + }) + } + } + + if len(nm.DNSConfig.NameServerGroups) != 0 { + sort.Slice(nm.DNSConfig.NameServerGroups, func(a, b int) bool { + return nm.DNSConfig.NameServerGroups[a].Name < nm.DNSConfig.NameServerGroups[b].Name + }) + } +} + +func compareNetworkMaps(t *testing.T, legacy, current *NetworkMap) { + t.Helper() + + if legacy.Network.Serial != current.Network.Serial { + t.Errorf("Network Serial mismatch: legacy=%d, current=%d", legacy.Network.Serial, current.Network.Serial) + } + + if len(legacy.Peers) != len(current.Peers) { + t.Errorf("Peers count mismatch: legacy=%d, current=%d", len(legacy.Peers), len(current.Peers)) + } + + legacyPeerIDs := make(map[string]bool) + for _, p := range legacy.Peers { + legacyPeerIDs[p.ID] = true + } + + for _, p := range current.Peers { + if !legacyPeerIDs[p.ID] { + t.Errorf("Current NetworkMap contains peer %s not in legacy", p.ID) + } + } + + if len(legacy.OfflinePeers) != len(current.OfflinePeers) { + t.Errorf("OfflinePeers count mismatch: legacy=%d, current=%d", len(legacy.OfflinePeers), len(current.OfflinePeers)) + } + + if len(legacy.FirewallRules) != len(current.FirewallRules) { + t.Logf("FirewallRules count mismatch: legacy=%d, current=%d", len(legacy.FirewallRules), len(current.FirewallRules)) + } + + if len(legacy.Routes) != len(current.Routes) { + t.Logf("Routes count mismatch: legacy=%d, current=%d", len(legacy.Routes), len(current.Routes)) + } + + if len(legacy.RoutesFirewallRules) != len(current.RoutesFirewallRules) { + t.Logf("RoutesFirewallRules count mismatch: legacy=%d, current=%d", len(legacy.RoutesFirewallRules), len(current.RoutesFirewallRules)) + } + + if legacy.DNSConfig.ServiceEnable != current.DNSConfig.ServiceEnable { + t.Errorf("DNSConfig.ServiceEnable mismatch: legacy=%v, current=%v", legacy.DNSConfig.ServiceEnable, current.DNSConfig.ServiceEnable) + } +} + +const ( + numPeers = 100 + devGroupID = "group-dev" + opsGroupID = "group-ops" + allGroupID = "group-all" + routeID = route.ID("route-main") + routeHA1ID = route.ID("route-ha-1") + routeHA2ID = route.ID("route-ha-2") + policyIDDevOps = "policy-dev-ops" + policyIDAll = "policy-all" + policyIDPosture = "policy-posture" + policyIDDrop = "policy-drop" + postureCheckID = "posture-check-ver" + networkResourceID = "res-database" + networkID = "net-database" + networkRouterID = "router-database" + nameserverGroupID = "ns-group-main" + testingPeerID = "peer-60" + expiredPeerID = "peer-98" + offlinePeerID = "peer-99" + routingPeerID = "peer-95" + testAccountID = "account-comparison-test" +) + +func createTestAccount() *Account { + peers := make(map[string]*nbpeer.Peer) + devGroupPeers, opsGroupPeers, allGroupPeers := []string{}, []string{}, []string{} + + for i := range numPeers { + peerID := fmt.Sprintf("peer-%d", i) + ip := net.IP{100, 64, 0, byte(i + 1)} + wtVersion := "0.25.0" + if i%2 == 0 { + wtVersion = "0.40.0" + } + + p := &nbpeer.Peer{ + ID: peerID, IP: ip, Key: fmt.Sprintf("key-%s", peerID), DNSLabel: fmt.Sprintf("peer%d", i+1), + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, + UserID: "user-admin", Meta: nbpeer.PeerSystemMeta{WtVersion: wtVersion, GoOS: "linux"}, + } + + if peerID == expiredPeerID { + p.LoginExpirationEnabled = true + pastTimestamp := time.Now().Add(-2 * time.Hour) + p.LastLogin = &pastTimestamp + } + + peers[peerID] = p + allGroupPeers = append(allGroupPeers, peerID) + if i < numPeers/2 { + devGroupPeers = append(devGroupPeers, peerID) + } else { + opsGroupPeers = append(opsGroupPeers, peerID) + } + } + + groups := map[string]*Group{ + allGroupID: {ID: allGroupID, Name: "All", Peers: allGroupPeers}, + devGroupID: {ID: devGroupID, Name: "Developers", Peers: devGroupPeers}, + opsGroupID: {ID: opsGroupID, Name: "Operations", Peers: opsGroupPeers}, + } + + policies := []*Policy{ + { + ID: policyIDAll, Name: "Default-Allow", Enabled: true, + Rules: []*PolicyRule{{ + ID: policyIDAll, Name: "Allow All", Enabled: true, Action: PolicyTrafficActionAccept, + Protocol: PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{allGroupID}, Destinations: []string{allGroupID}, + }}, + }, + { + ID: policyIDDevOps, Name: "Dev to Ops Web Access", Enabled: true, + Rules: []*PolicyRule{{ + ID: policyIDDevOps, Name: "Dev -> Ops (HTTP Range)", Enabled: true, Action: PolicyTrafficActionAccept, + Protocol: PolicyRuleProtocolTCP, Bidirectional: false, + PortRanges: []RulePortRange{{Start: 8080, End: 8090}}, + Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, + }}, + }, + { + ID: policyIDDrop, Name: "Drop DB traffic", Enabled: true, + Rules: []*PolicyRule{{ + ID: policyIDDrop, Name: "Drop DB", Enabled: true, Action: PolicyTrafficActionDrop, + Protocol: PolicyRuleProtocolTCP, Ports: []string{"5432"}, Bidirectional: true, + Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, + }}, + }, + { + ID: policyIDPosture, Name: "Posture Check for DB Resource", Enabled: true, + SourcePostureChecks: []string{postureCheckID}, + Rules: []*PolicyRule{{ + ID: policyIDPosture, Name: "Allow DB Access", Enabled: true, Action: PolicyTrafficActionAccept, + Protocol: PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{opsGroupID}, DestinationResource: Resource{ID: networkResourceID}, + }}, + }, + } + + routes := map[route.ID]*route.Route{ + routeID: { + ID: routeID, Network: netip.MustParsePrefix("192.168.10.0/24"), + Peer: peers["peer-75"].Key, + PeerID: "peer-75", + Description: "Route to internal resource", Enabled: true, + PeerGroups: []string{devGroupID, opsGroupID}, + Groups: []string{devGroupID, opsGroupID}, + AccessControlGroups: []string{devGroupID}, + }, + routeHA1ID: { + ID: routeHA1ID, Network: netip.MustParsePrefix("10.10.0.0/16"), + Peer: peers["peer-80"].Key, + PeerID: "peer-80", + Description: "HA Route 1", Enabled: true, Metric: 1000, + PeerGroups: []string{allGroupID}, + Groups: []string{allGroupID}, + AccessControlGroups: []string{allGroupID}, + }, + routeHA2ID: { + ID: routeHA2ID, Network: netip.MustParsePrefix("10.10.0.0/16"), + Peer: peers["peer-90"].Key, + PeerID: "peer-90", + Description: "HA Route 2", Enabled: true, Metric: 900, + PeerGroups: []string{devGroupID, opsGroupID}, + Groups: []string{devGroupID, opsGroupID}, + AccessControlGroups: []string{allGroupID}, + }, + } + + account := &Account{ + Id: testAccountID, Peers: peers, Groups: groups, Policies: policies, Routes: routes, + Network: &Network{ + Identifier: "net-comparison-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(16, 32)}, Serial: 1, + }, + DNSSettings: DNSSettings{DisabledManagementGroups: []string{opsGroupID}}, + NameServerGroups: map[string]*nbdns.NameServerGroup{ + nameserverGroupID: { + ID: nameserverGroupID, Name: "Main NS", Enabled: true, Groups: []string{devGroupID}, + NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("8.8.8.8"), NSType: nbdns.UDPNameServerType, Port: 53}}, + }, + }, + PostureChecks: []*posture.Checks{ + {ID: postureCheckID, Name: "Check version", Checks: posture.ChecksDefinition{ + NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.26.0"}, + }}, + }, + NetworkResources: []*resourceTypes.NetworkResource{ + {ID: networkResourceID, NetworkID: networkID, AccountID: testAccountID, Enabled: true, Address: "db.netbird.cloud"}, + }, + Networks: []*networkTypes.Network{{ID: networkID, Name: "DB Network", AccountID: testAccountID}}, + NetworkRouters: []*routerTypes.NetworkRouter{ + {ID: networkRouterID, NetworkID: networkID, Peer: routingPeerID, Enabled: true, AccountID: testAccountID}, + }, + Settings: &Settings{PeerLoginExpirationEnabled: true, PeerLoginExpiration: 1 * time.Hour}, + } + + for _, p := range account.Policies { + p.AccountID = account.Id + } + for _, r := range account.Routes { + r.AccountID = account.Id + } + + return account +} + +func BenchmarkLegacyNetworkMap(b *testing.B) { + account := createTestAccount() + ctx := context.Background() + peerID := testingPeerID + validatedPeersMap := make(map[string]struct{}) + for i := range numPeers { + pid := fmt.Sprintf("peer-%d", i) + if pid != offlinePeerID { + validatedPeersMap[pid] = struct{}{} + } + } + + peersCustomZone := nbdns.CustomZone{} + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = account.GetPeerNetworkMap( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + nil, + groupIDToUserIDs, + ) + } +} + +func BenchmarkComponentsNetworkMap(b *testing.B) { + account := createTestAccount() + ctx := context.Background() + peerID := testingPeerID + validatedPeersMap := make(map[string]struct{}) + for i := range numPeers { + pid := fmt.Sprintf("peer-%d", i) + if pid != offlinePeerID { + validatedPeersMap[pid] = struct{}{} + } + } + + peersCustomZone := nbdns.CustomZone{} + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + components := account.GetPeerNetworkMapComponents( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + groupIDToUserIDs, + ) + _ = CalculateNetworkMapFromComponents(ctx, components) + } +} + +func BenchmarkComponentsCreation(b *testing.B) { + account := createTestAccount() + ctx := context.Background() + peerID := testingPeerID + validatedPeersMap := make(map[string]struct{}) + for i := range numPeers { + pid := fmt.Sprintf("peer-%d", i) + if pid != offlinePeerID { + validatedPeersMap[pid] = struct{}{} + } + } + + peersCustomZone := nbdns.CustomZone{} + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = account.GetPeerNetworkMapComponents( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + groupIDToUserIDs, + ) + } +} + +func BenchmarkCalculationFromComponents(b *testing.B) { + account := createTestAccount() + ctx := context.Background() + peerID := testingPeerID + validatedPeersMap := make(map[string]struct{}) + for i := range numPeers { + pid := fmt.Sprintf("peer-%d", i) + if pid != offlinePeerID { + validatedPeersMap[pid] = struct{}{} + } + } + + peersCustomZone := nbdns.CustomZone{} + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + components := account.GetPeerNetworkMapComponents( + ctx, + peerID, + peersCustomZone, + nil, + validatedPeersMap, + resourcePolicies, + routers, + groupIDToUserIDs, + ) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = CalculateNetworkMapFromComponents(ctx, components) + } +} diff --git a/management/server/types/networkmap_components.go b/management/server/types/networkmap_components.go new file mode 100644 index 000000000..ab6b006e6 --- /dev/null +++ b/management/server/types/networkmap_components.go @@ -0,0 +1,938 @@ +package types + +import ( + "context" + "maps" + "net" + "net/netip" + "slices" + "strconv" + "strings" + "time" + + "github.com/netbirdio/netbird/client/ssh/auth" + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/management/domain" +) + +const EnvNewNetworkMapCompacted = "NB_NETWORK_MAP_COMPACTED" + +type NetworkMapComponents struct { + PeerID string + + Network *Network + AccountSettings *AccountSettingsInfo + DNSSettings *DNSSettings + CustomZoneDomain string + + Peers map[string]*nbpeer.Peer + Groups map[string]*Group + Policies []*Policy + Routes []*route.Route + NameServerGroups []*nbdns.NameServerGroup + AllDNSRecords []nbdns.SimpleRecord + AccountZones []nbdns.CustomZone + ResourcePoliciesMap map[string][]*Policy + RoutersMap map[string]map[string]*routerTypes.NetworkRouter + NetworkResources []*resourceTypes.NetworkResource + + GroupIDToUserIDs map[string][]string + AllowedUserIDs map[string]struct{} + PostureFailedPeers map[string]map[string]struct{} + + RouterPeers map[string]*nbpeer.Peer +} + +type AccountSettingsInfo struct { + PeerLoginExpirationEnabled bool + PeerLoginExpiration time.Duration + PeerInactivityExpirationEnabled bool + PeerInactivityExpiration time.Duration +} + +func (c *NetworkMapComponents) GetPeerInfo(peerID string) *nbpeer.Peer { + return c.Peers[peerID] +} + +func (c *NetworkMapComponents) GetRouterPeerInfo(peerID string) *nbpeer.Peer { + return c.RouterPeers[peerID] +} + +func (c *NetworkMapComponents) GetGroupInfo(groupID string) *Group { + return c.Groups[groupID] +} + +func (c *NetworkMapComponents) IsPeerInGroup(peerID, groupID string) bool { + group := c.GetGroupInfo(groupID) + if group == nil { + return false + } + + return slices.Contains(group.Peers, peerID) +} + +func (c *NetworkMapComponents) GetPeerGroups(peerID string) map[string]struct{} { + groups := make(map[string]struct{}) + for groupID, group := range c.Groups { + if slices.Contains(group.Peers, peerID) { + groups[groupID] = struct{}{} + } + } + return groups +} + +func (c *NetworkMapComponents) ValidatePostureChecksOnPeer(peerID string, postureCheckIDs []string) bool { + _, exists := c.Peers[peerID] + if !exists { + return false + } + if len(postureCheckIDs) == 0 { + return true + } + for _, checkID := range postureCheckIDs { + if failedPeers, exists := c.PostureFailedPeers[checkID]; exists { + if _, failed := failedPeers[peerID]; failed { + return false + } + } + } + return true +} + +func CalculateNetworkMapFromComponents(ctx context.Context, components *NetworkMapComponents) *NetworkMap { + return components.Calculate(ctx) +} + +func (c *NetworkMapComponents) Calculate(ctx context.Context) *NetworkMap { + targetPeerID := c.PeerID + + peerGroups := c.GetPeerGroups(targetPeerID) + + aclPeers, firewallRules, authorizedUsers, sshEnabled := c.getPeerConnectionResources(targetPeerID) + + peersToConnect, expiredPeers := c.filterPeersByLoginExpiration(aclPeers) + + routesUpdate := c.getRoutesToSync(targetPeerID, peersToConnect, peerGroups) + routesFirewallRules := c.getPeerRoutesFirewallRules(ctx, targetPeerID) + + isRouter, networkResourcesRoutes, sourcePeers := c.getNetworkResourcesRoutesToSync(targetPeerID) + var networkResourcesFirewallRules []*RouteFirewallRule + if isRouter { + networkResourcesFirewallRules = c.getPeerNetworkResourceFirewallRules(ctx, targetPeerID, networkResourcesRoutes) + } + + peersToConnectIncludingRouters := c.addNetworksRoutingPeers( + networkResourcesRoutes, + targetPeerID, + peersToConnect, + expiredPeers, + isRouter, + sourcePeers, + ) + + dnsManagementStatus := c.getPeerDNSManagementStatus(targetPeerID) + dnsUpdate := nbdns.Config{ + ServiceEnable: dnsManagementStatus, + } + + if dnsManagementStatus { + var customZones []nbdns.CustomZone + + if c.CustomZoneDomain != "" && len(c.AllDNSRecords) > 0 { + customZones = append(customZones, nbdns.CustomZone{ + Domain: c.CustomZoneDomain, + Records: c.AllDNSRecords, + }) + } + + customZones = append(customZones, c.AccountZones...) + + dnsUpdate.CustomZones = customZones + dnsUpdate.NameServerGroups = c.getPeerNSGroups(targetPeerID) + } + + return &NetworkMap{ + Peers: peersToConnectIncludingRouters, + Network: c.Network.Copy(), + Routes: append(networkResourcesRoutes, routesUpdate...), + DNSConfig: dnsUpdate, + OfflinePeers: expiredPeers, + FirewallRules: firewallRules, + RoutesFirewallRules: append(networkResourcesFirewallRules, routesFirewallRules...), + AuthorizedUsers: authorizedUsers, + EnableSSH: sshEnabled, + } +} + +func (c *NetworkMapComponents) getPeerConnectionResources(targetPeerID string) ([]*nbpeer.Peer, []*FirewallRule, map[string]map[string]struct{}, bool) { + targetPeer := c.GetPeerInfo(targetPeerID) + if targetPeer == nil { + return nil, nil, nil, false + } + + generateResources, getAccumulatedResources := c.connResourcesGenerator(targetPeer) + authorizedUsers := make(map[string]map[string]struct{}) + sshEnabled := false + + for _, policy := range c.Policies { + if !policy.Enabled { + continue + } + + for _, rule := range policy.Rules { + if !rule.Enabled { + continue + } + + var sourcePeers, destinationPeers []*nbpeer.Peer + var peerInSources, peerInDestinations bool + + if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { + sourcePeers, peerInSources = c.getPeerFromResource(rule.SourceResource, targetPeerID) + } else { + sourcePeers, peerInSources = c.getAllPeersFromGroups(rule.Sources, targetPeerID, policy.SourcePostureChecks) + } + + if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID != "" { + destinationPeers, peerInDestinations = c.getPeerFromResource(rule.DestinationResource, targetPeerID) + } else { + destinationPeers, peerInDestinations = c.getAllPeersFromGroups(rule.Destinations, targetPeerID, nil) + } + + if rule.Bidirectional { + if peerInSources { + generateResources(rule, destinationPeers, FirewallRuleDirectionIN) + } + if peerInDestinations { + generateResources(rule, sourcePeers, FirewallRuleDirectionOUT) + } + } + + if peerInSources { + generateResources(rule, destinationPeers, FirewallRuleDirectionOUT) + } + + if peerInDestinations { + generateResources(rule, sourcePeers, FirewallRuleDirectionIN) + } + + if peerInDestinations && rule.Protocol == PolicyRuleProtocolNetbirdSSH { + sshEnabled = true + switch { + case len(rule.AuthorizedGroups) > 0: + for groupID, localUsers := range rule.AuthorizedGroups { + userIDs, ok := c.GroupIDToUserIDs[groupID] + if !ok { + continue + } + + if len(localUsers) == 0 { + localUsers = []string{auth.Wildcard} + } + + for _, localUser := range localUsers { + if authorizedUsers[localUser] == nil { + authorizedUsers[localUser] = make(map[string]struct{}) + } + for _, userID := range userIDs { + authorizedUsers[localUser][userID] = struct{}{} + } + } + } + case rule.AuthorizedUser != "": + if authorizedUsers[auth.Wildcard] == nil { + authorizedUsers[auth.Wildcard] = make(map[string]struct{}) + } + authorizedUsers[auth.Wildcard][rule.AuthorizedUser] = struct{}{} + default: + authorizedUsers[auth.Wildcard] = c.getAllowedUserIDs() + } + } else if peerInDestinations && policyRuleImpliesLegacySSH(rule) && targetPeer.SSHEnabled { + sshEnabled = true + authorizedUsers[auth.Wildcard] = c.getAllowedUserIDs() + } + } + } + + peers, fwRules := getAccumulatedResources() + return peers, fwRules, authorizedUsers, sshEnabled +} + +func (c *NetworkMapComponents) getAllowedUserIDs() map[string]struct{} { + if c.AllowedUserIDs != nil { + result := make(map[string]struct{}, len(c.AllowedUserIDs)) + maps.Copy(result, c.AllowedUserIDs) + return result + } + return make(map[string]struct{}) +} + +func (c *NetworkMapComponents) connResourcesGenerator(targetPeer *nbpeer.Peer) (func(*PolicyRule, []*nbpeer.Peer, int), func() ([]*nbpeer.Peer, []*FirewallRule)) { + rulesExists := make(map[string]struct{}) + peersExists := make(map[string]struct{}) + rules := make([]*FirewallRule, 0) + peers := make([]*nbpeer.Peer, 0) + + return func(rule *PolicyRule, groupPeers []*nbpeer.Peer, direction int) { + for _, peer := range groupPeers { + if peer == nil { + continue + } + + if _, ok := peersExists[peer.ID]; !ok { + peers = append(peers, peer) + peersExists[peer.ID] = struct{}{} + } + + protocol := rule.Protocol + if protocol == PolicyRuleProtocolNetbirdSSH { + protocol = PolicyRuleProtocolTCP + } + + fr := FirewallRule{ + PolicyID: rule.ID, + PeerIP: net.IP(peer.IP).String(), + Direction: direction, + Action: string(rule.Action), + Protocol: string(protocol), + } + + ruleID := rule.ID + fr.PeerIP + strconv.Itoa(direction) + + fr.Protocol + fr.Action + strings.Join(rule.Ports, ",") + if _, ok := rulesExists[ruleID]; ok { + continue + } + rulesExists[ruleID] = struct{}{} + + if len(rule.Ports) == 0 && len(rule.PortRanges) == 0 { + rules = append(rules, &fr) + continue + } + + rules = append(rules, expandPortsAndRanges(fr, &PolicyRule{ + ID: rule.ID, + Ports: rule.Ports, + PortRanges: rule.PortRanges, + Protocol: rule.Protocol, + Action: rule.Action, + }, targetPeer)...) + } + }, func() ([]*nbpeer.Peer, []*FirewallRule) { + return peers, rules + } +} + +func (c *NetworkMapComponents) getAllPeersFromGroups(groups []string, peerID string, sourcePostureChecksIDs []string) ([]*nbpeer.Peer, bool) { + peerInGroups := false + uniquePeerIDs := c.getUniquePeerIDsFromGroupsIDs(groups) + filteredPeers := make([]*nbpeer.Peer, 0, len(uniquePeerIDs)) + + for _, p := range uniquePeerIDs { + peerInfo := c.GetPeerInfo(p) + if peerInfo == nil { + continue + } + + if _, ok := c.Peers[p]; !ok { + continue + } + + if !c.ValidatePostureChecksOnPeer(p, sourcePostureChecksIDs) { + continue + } + + if p == peerID { + peerInGroups = true + continue + } + + filteredPeers = append(filteredPeers, peerInfo) + } + + return filteredPeers, peerInGroups +} + +func (c *NetworkMapComponents) getUniquePeerIDsFromGroupsIDs(groups []string) []string { + peerIDs := make(map[string]struct{}, len(groups)) + for _, groupID := range groups { + group := c.GetGroupInfo(groupID) + if group == nil { + continue + } + + if group.IsGroupAll() || len(groups) == 1 { + return group.Peers + } + + for _, peerID := range group.Peers { + peerIDs[peerID] = struct{}{} + } + } + + ids := make([]string, 0, len(peerIDs)) + for peerID := range peerIDs { + ids = append(ids, peerID) + } + + return ids +} + +func (c *NetworkMapComponents) getPeerFromResource(resource Resource, peerID string) ([]*nbpeer.Peer, bool) { + if resource.ID == peerID { + return []*nbpeer.Peer{}, true + } + + peerInfo := c.GetPeerInfo(resource.ID) + if peerInfo == nil { + return []*nbpeer.Peer{}, false + } + + return []*nbpeer.Peer{peerInfo}, false +} + +func (c *NetworkMapComponents) filterPeersByLoginExpiration(aclPeers []*nbpeer.Peer) ([]*nbpeer.Peer, []*nbpeer.Peer) { + var peersToConnect []*nbpeer.Peer + var expiredPeers []*nbpeer.Peer + + for _, p := range aclPeers { + expired, _ := p.LoginExpired(c.AccountSettings.PeerLoginExpiration) + if c.AccountSettings.PeerLoginExpirationEnabled && expired { + expiredPeers = append(expiredPeers, p) + continue + } + peersToConnect = append(peersToConnect, p) + } + + return peersToConnect, expiredPeers +} + +func (c *NetworkMapComponents) getPeerDNSManagementStatus(peerID string) bool { + peerGroups := c.GetPeerGroups(peerID) + enabled := true + for _, groupID := range c.DNSSettings.DisabledManagementGroups { + if _, found := peerGroups[groupID]; found { + enabled = false + break + } + } + return enabled +} + +func (c *NetworkMapComponents) getPeerNSGroups(peerID string) []*nbdns.NameServerGroup { + groupList := c.GetPeerGroups(peerID) + + var peerNSGroups []*nbdns.NameServerGroup + + for _, nsGroup := range c.NameServerGroups { + if !nsGroup.Enabled { + continue + } + for _, gID := range nsGroup.Groups { + _, found := groupList[gID] + if found { + targetPeerInfo := c.GetPeerInfo(peerID) + if targetPeerInfo != nil && !c.peerIsNameserver(targetPeerInfo, nsGroup) { + peerNSGroups = append(peerNSGroups, nsGroup.Copy()) + break + } + } + } + } + + return peerNSGroups +} + +func (c *NetworkMapComponents) peerIsNameserver(peerInfo *nbpeer.Peer, nsGroup *nbdns.NameServerGroup) bool { + for _, ns := range nsGroup.NameServers { + if peerInfo.IP.String() == ns.IP.String() { + return true + } + } + return false +} + +func (c *NetworkMapComponents) getRoutesToSync(peerID string, aclPeers []*nbpeer.Peer, peerGroups LookupMap) []*route.Route { + routes, peerDisabledRoutes := c.getRoutingPeerRoutes(peerID) + peerRoutesMembership := make(LookupMap) + for _, r := range append(routes, peerDisabledRoutes...) { + peerRoutesMembership[string(r.GetHAUniqueID())] = struct{}{} + } + + for _, peer := range aclPeers { + activeRoutes, _ := c.getRoutingPeerRoutes(peer.ID) + groupFilteredRoutes := c.filterRoutesByGroups(activeRoutes, peerGroups) + filteredRoutes := c.filterRoutesFromPeersOfSameHAGroup(groupFilteredRoutes, peerRoutesMembership) + routes = append(routes, filteredRoutes...) + } + + return routes +} + +func (c *NetworkMapComponents) getRoutingPeerRoutes(peerID string) (enabledRoutes []*route.Route, disabledRoutes []*route.Route) { + peerInfo := c.GetPeerInfo(peerID) + if peerInfo == nil { + peerInfo = c.GetRouterPeerInfo(peerID) + } + if peerInfo == nil { + return enabledRoutes, disabledRoutes + } + + seenRoute := make(map[route.ID]struct{}) + + takeRoute := func(r *route.Route) { + if _, ok := seenRoute[r.ID]; ok { + return + } + seenRoute[r.ID] = struct{}{} + + routeObj := c.copyRoute(r) + routeObj.Peer = peerInfo.Key + + if r.Enabled { + enabledRoutes = append(enabledRoutes, routeObj) + return + } + disabledRoutes = append(disabledRoutes, routeObj) + } + + for _, r := range c.Routes { + for _, groupID := range r.PeerGroups { + group := c.GetGroupInfo(groupID) + if group == nil { + continue + } + for _, id := range group.Peers { + if id != peerID { + continue + } + + newPeerRoute := c.copyRoute(r) + newPeerRoute.Peer = id + newPeerRoute.PeerGroups = nil + newPeerRoute.ID = route.ID(string(r.ID) + ":" + id) + takeRoute(newPeerRoute) + break + } + } + if r.Peer == peerID { + takeRoute(c.copyRoute(r)) + } + } + + return enabledRoutes, disabledRoutes +} + +func (c *NetworkMapComponents) copyRoute(r *route.Route) *route.Route { + var groups, accessControlGroups, peerGroups []string + var domains domain.List + + if r.Groups != nil { + groups = append([]string{}, r.Groups...) + } + if r.AccessControlGroups != nil { + accessControlGroups = append([]string{}, r.AccessControlGroups...) + } + if r.PeerGroups != nil { + peerGroups = append([]string{}, r.PeerGroups...) + } + if r.Domains != nil { + domains = append(domain.List{}, r.Domains...) + } + + return &route.Route{ + ID: r.ID, + AccountID: r.AccountID, + Network: r.Network, + NetworkType: r.NetworkType, + Description: r.Description, + Peer: r.Peer, + PeerID: r.PeerID, + Metric: r.Metric, + Masquerade: r.Masquerade, + NetID: r.NetID, + Enabled: r.Enabled, + Groups: groups, + AccessControlGroups: accessControlGroups, + PeerGroups: peerGroups, + Domains: domains, + KeepRoute: r.KeepRoute, + SkipAutoApply: r.SkipAutoApply, + } +} + +func (c *NetworkMapComponents) filterRoutesByGroups(routes []*route.Route, groupListMap LookupMap) []*route.Route { + var filteredRoutes []*route.Route + for _, r := range routes { + for _, groupID := range r.Groups { + _, found := groupListMap[groupID] + if found { + filteredRoutes = append(filteredRoutes, r) + break + } + } + } + return filteredRoutes +} + +func (c *NetworkMapComponents) filterRoutesFromPeersOfSameHAGroup(routes []*route.Route, peerMemberships LookupMap) []*route.Route { + var filteredRoutes []*route.Route + for _, r := range routes { + _, found := peerMemberships[string(r.GetHAUniqueID())] + if !found { + filteredRoutes = append(filteredRoutes, r) + } + } + return filteredRoutes +} + +func (c *NetworkMapComponents) getPeerRoutesFirewallRules(ctx context.Context, peerID string) []*RouteFirewallRule { + routesFirewallRules := make([]*RouteFirewallRule, 0) + + enabledRoutes, _ := c.getRoutingPeerRoutes(peerID) + for _, r := range enabledRoutes { + if len(r.AccessControlGroups) == 0 { + defaultPermit := c.getDefaultPermit(r) + routesFirewallRules = append(routesFirewallRules, defaultPermit...) + continue + } + + distributionPeers := c.getDistributionGroupsPeers(r) + + for _, accessGroup := range r.AccessControlGroups { + policies := c.getAllRoutePoliciesFromGroups([]string{accessGroup}) + rules := c.getRouteFirewallRules(ctx, peerID, policies, r, distributionPeers) + routesFirewallRules = append(routesFirewallRules, rules...) + } + } + + return routesFirewallRules +} + +func (c *NetworkMapComponents) getDefaultPermit(r *route.Route) []*RouteFirewallRule { + var rules []*RouteFirewallRule + + sources := []string{"0.0.0.0/0"} + if r.Network.Addr().Is6() { + sources = []string{"::/0"} + } + + rule := RouteFirewallRule{ + SourceRanges: sources, + Action: string(PolicyTrafficActionAccept), + Destination: r.Network.String(), + Protocol: string(PolicyRuleProtocolALL), + Domains: r.Domains, + IsDynamic: r.IsDynamic(), + RouteID: r.ID, + } + + rules = append(rules, &rule) + + if r.IsDynamic() { + ruleV6 := rule + ruleV6.SourceRanges = []string{"::/0"} + rules = append(rules, &ruleV6) + } + + return rules +} + +func (c *NetworkMapComponents) getDistributionGroupsPeers(r *route.Route) map[string]struct{} { + distPeers := make(map[string]struct{}) + for _, id := range r.Groups { + group := c.GetGroupInfo(id) + if group == nil { + continue + } + + for _, pID := range group.Peers { + distPeers[pID] = struct{}{} + } + } + return distPeers +} + +func (c *NetworkMapComponents) getAllRoutePoliciesFromGroups(accessControlGroups []string) []*Policy { + routePolicies := make([]*Policy, 0) + for _, groupID := range accessControlGroups { + for _, policy := range c.Policies { + for _, rule := range policy.Rules { + if slices.Contains(rule.Destinations, groupID) { + routePolicies = append(routePolicies, policy) + } + } + } + } + + return routePolicies +} + +func (c *NetworkMapComponents) getRouteFirewallRules(ctx context.Context, peerID string, policies []*Policy, route *route.Route, distributionPeers map[string]struct{}) []*RouteFirewallRule { + var fwRules []*RouteFirewallRule + for _, policy := range policies { + if !policy.Enabled { + continue + } + + for _, rule := range policy.Rules { + if !rule.Enabled { + continue + } + + rulePeers := c.getRulePeers(rule, policy.SourcePostureChecks, peerID, distributionPeers) + rules := generateRouteFirewallRules(ctx, route, rule, rulePeers, FirewallRuleDirectionIN) + fwRules = append(fwRules, rules...) + } + } + return fwRules +} + +func (c *NetworkMapComponents) getRulePeers(rule *PolicyRule, postureChecks []string, peerID string, distributionPeers map[string]struct{}) []*nbpeer.Peer { + distPeersWithPolicy := make(map[string]struct{}) + for _, id := range rule.Sources { + group := c.GetGroupInfo(id) + if group == nil { + continue + } + + for _, pID := range group.Peers { + if pID == peerID { + continue + } + _, distPeer := distributionPeers[pID] + _, valid := c.Peers[pID] + if distPeer && valid && c.ValidatePostureChecksOnPeer(pID, postureChecks) { + distPeersWithPolicy[pID] = struct{}{} + } + } + } + if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { + _, distPeer := distributionPeers[rule.SourceResource.ID] + _, valid := c.Peers[rule.SourceResource.ID] + if distPeer && valid && c.ValidatePostureChecksOnPeer(rule.SourceResource.ID, postureChecks) { + distPeersWithPolicy[rule.SourceResource.ID] = struct{}{} + } + } + + distributionGroupPeers := make([]*nbpeer.Peer, 0, len(distPeersWithPolicy)) + for pID := range distPeersWithPolicy { + peerInfo := c.GetPeerInfo(pID) + if peerInfo == nil { + continue + } + distributionGroupPeers = append(distributionGroupPeers, peerInfo) + } + return distributionGroupPeers +} + +func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (bool, []*route.Route, map[string]struct{}) { + var isRoutingPeer bool + var routes []*route.Route + allSourcePeers := make(map[string]struct{}) + + for _, resource := range c.NetworkResources { + if !resource.Enabled { + continue + } + + var addSourcePeers bool + + networkRoutingPeers, exists := c.RoutersMap[resource.NetworkID] + if exists { + if router, ok := networkRoutingPeers[peerID]; ok { + isRoutingPeer, addSourcePeers = true, true + routes = append(routes, c.getNetworkResourcesRoutes(resource, peerID, router)...) + } + } + + addedResourceRoute := false + for _, policy := range c.ResourcePoliciesMap[resource.ID] { + var peers []string + if policy.Rules[0].SourceResource.Type == ResourceTypePeer && policy.Rules[0].SourceResource.ID != "" { + peers = []string{policy.Rules[0].SourceResource.ID} + } else { + peers = c.getUniquePeerIDsFromGroupsIDs(policy.SourceGroups()) + } + if addSourcePeers { + for _, pID := range c.getPostureValidPeers(peers, policy.SourcePostureChecks) { + allSourcePeers[pID] = struct{}{} + } + } else if slices.Contains(peers, peerID) && c.ValidatePostureChecksOnPeer(peerID, policy.SourcePostureChecks) { + for peerId, router := range networkRoutingPeers { + routes = append(routes, c.getNetworkResourcesRoutes(resource, peerId, router)...) + } + addedResourceRoute = true + } + if addedResourceRoute { + break + } + } + } + + return isRoutingPeer, routes, allSourcePeers +} + +func (c *NetworkMapComponents) getNetworkResourcesRoutes(resource *resourceTypes.NetworkResource, peerID string, router *routerTypes.NetworkRouter) []*route.Route { + resourceAppliedPolicies := c.ResourcePoliciesMap[resource.ID] + + var routes []*route.Route + if len(resourceAppliedPolicies) > 0 { + peerInfo := c.GetPeerInfo(peerID) + if peerInfo != nil { + routes = append(routes, c.networkResourceToRoute(resource, peerInfo, router)) + } + } + + return routes +} + +func (c *NetworkMapComponents) networkResourceToRoute(resource *resourceTypes.NetworkResource, peer *nbpeer.Peer, router *routerTypes.NetworkRouter) *route.Route { + r := &route.Route{ + ID: route.ID(resource.ID + ":" + peer.ID), + AccountID: resource.AccountID, + Peer: peer.Key, + PeerID: peer.ID, + Metric: router.Metric, + Masquerade: router.Masquerade, + Enabled: resource.Enabled, + KeepRoute: true, + NetID: route.NetID(resource.Name), + Description: resource.Description, + } + + if resource.Type == resourceTypes.Host || resource.Type == resourceTypes.Subnet { + r.Network = resource.Prefix + + r.NetworkType = route.IPv4Network + if resource.Prefix.Addr().Is6() { + r.NetworkType = route.IPv6Network + } + } + + if resource.Type == resourceTypes.Domain { + domainList, err := domain.FromStringList([]string{resource.Domain}) + if err == nil { + r.Domains = domainList + r.NetworkType = route.DomainNetwork + r.Network = netip.PrefixFrom(netip.AddrFrom4([4]byte{192, 0, 2, 0}), 32) + } + } + + return r +} + +func (c *NetworkMapComponents) getPostureValidPeers(inputPeers []string, postureChecksIDs []string) []string { + var dest []string + for _, peerID := range inputPeers { + if c.ValidatePostureChecksOnPeer(peerID, postureChecksIDs) { + dest = append(dest, peerID) + } + } + return dest +} + +func (c *NetworkMapComponents) getPeerNetworkResourceFirewallRules(ctx context.Context, peerID string, routes []*route.Route) []*RouteFirewallRule { + routesFirewallRules := make([]*RouteFirewallRule, 0) + + peerInfo := c.GetPeerInfo(peerID) + if peerInfo == nil { + return routesFirewallRules + } + + for _, r := range routes { + if r.Peer != peerInfo.Key { + continue + } + + resourceID := string(r.GetResourceID()) + resourcePolicies := c.ResourcePoliciesMap[resourceID] + distributionPeers := c.getPoliciesSourcePeers(resourcePolicies) + + rules := c.getRouteFirewallRules(ctx, peerID, resourcePolicies, r, distributionPeers) + for _, rule := range rules { + if len(rule.SourceRanges) > 0 { + routesFirewallRules = append(routesFirewallRules, rule) + } + } + } + + return routesFirewallRules +} + +func (c *NetworkMapComponents) getPoliciesSourcePeers(policies []*Policy) map[string]struct{} { + sourcePeers := make(map[string]struct{}) + + for _, policy := range policies { + for _, rule := range policy.Rules { + for _, sourceGroup := range rule.Sources { + group := c.GetGroupInfo(sourceGroup) + if group == nil { + continue + } + + for _, peer := range group.Peers { + sourcePeers[peer] = struct{}{} + } + } + + if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { + sourcePeers[rule.SourceResource.ID] = struct{}{} + } + } + } + + return sourcePeers +} + +func (c *NetworkMapComponents) addNetworksRoutingPeers( + networkResourcesRoutes []*route.Route, + peerID string, + peersToConnect []*nbpeer.Peer, + expiredPeers []*nbpeer.Peer, + isRouter bool, + sourcePeers map[string]struct{}, +) []*nbpeer.Peer { + + networkRoutesPeers := make(map[string]struct{}, len(networkResourcesRoutes)) + for _, r := range networkResourcesRoutes { + networkRoutesPeers[r.PeerID] = struct{}{} + } + + delete(sourcePeers, peerID) + delete(networkRoutesPeers, peerID) + + for _, existingPeer := range peersToConnect { + delete(sourcePeers, existingPeer.ID) + delete(networkRoutesPeers, existingPeer.ID) + } + for _, expPeer := range expiredPeers { + delete(sourcePeers, expPeer.ID) + delete(networkRoutesPeers, expPeer.ID) + } + + missingPeers := make(map[string]struct{}, len(sourcePeers)+len(networkRoutesPeers)) + if isRouter { + for p := range sourcePeers { + missingPeers[p] = struct{}{} + } + } + for p := range networkRoutesPeers { + missingPeers[p] = struct{}{} + } + + for p := range missingPeers { + peerInfo := c.GetPeerInfo(p) + if peerInfo == nil { + peerInfo = c.GetRouterPeerInfo(p) + } + if peerInfo != nil { + peersToConnect = append(peersToConnect, peerInfo) + } + } + + return peersToConnect +} diff --git a/management/server/types/networkmap_components_compact.go b/management/server/types/networkmap_components_compact.go new file mode 100644 index 000000000..b60f8bdb1 --- /dev/null +++ b/management/server/types/networkmap_components_compact.go @@ -0,0 +1,230 @@ +package types + +import ( + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/route" +) + +type GroupCompact struct { + Name string + PeerIndexes []int +} + +type NetworkMapComponentsCompact struct { + PeerID string + + Network *Network + AccountSettings *AccountSettingsInfo + DNSSettings *DNSSettings + CustomZoneDomain string + + AllPeers []*nbpeer.Peer + PeerIndexes []int + RouterPeerIndexes []int + + Groups map[string]*GroupCompact + AllPolicies []*Policy + PolicyIndexes []int + ResourcePoliciesMap map[string][]int + Routes []*route.Route + NameServerGroups []*nbdns.NameServerGroup + AllDNSRecords []nbdns.SimpleRecord + AccountZones []nbdns.CustomZone + + RoutersMap map[string]map[string]*routerTypes.NetworkRouter + NetworkResources []*resourceTypes.NetworkResource + + GroupIDToUserIDs map[string][]string + AllowedUserIDs map[string]struct{} + PostureFailedPeers map[string]map[string]struct{} +} + +func (c *NetworkMapComponents) ToCompact() *NetworkMapComponentsCompact { + peerToIndex := make(map[string]int) + var allPeers []*nbpeer.Peer + + for id, peer := range c.Peers { + if _, exists := peerToIndex[id]; !exists { + peerToIndex[id] = len(allPeers) + allPeers = append(allPeers, peer) + } + } + + for id, peer := range c.RouterPeers { + if _, exists := peerToIndex[id]; !exists { + peerToIndex[id] = len(allPeers) + allPeers = append(allPeers, peer) + } + } + + peerIndexes := make([]int, 0, len(c.Peers)) + for id := range c.Peers { + peerIndexes = append(peerIndexes, peerToIndex[id]) + } + + routerPeerIndexes := make([]int, 0, len(c.RouterPeers)) + for id := range c.RouterPeers { + routerPeerIndexes = append(routerPeerIndexes, peerToIndex[id]) + } + + groups := make(map[string]*GroupCompact, len(c.Groups)) + for id, group := range c.Groups { + peerIdxs := make([]int, 0, len(group.Peers)) + for _, peerID := range group.Peers { + if idx, ok := peerToIndex[peerID]; ok { + peerIdxs = append(peerIdxs, idx) + } + } + groups[id] = &GroupCompact{ + Name: group.Name, + PeerIndexes: peerIdxs, + } + } + + policyToIndex := make(map[*Policy]int) + var allPolicies []*Policy + + for _, policy := range c.Policies { + if _, exists := policyToIndex[policy]; !exists { + policyToIndex[policy] = len(allPolicies) + allPolicies = append(allPolicies, policy) + } + } + + for _, policies := range c.ResourcePoliciesMap { + for _, policy := range policies { + if _, exists := policyToIndex[policy]; !exists { + policyToIndex[policy] = len(allPolicies) + allPolicies = append(allPolicies, policy) + } + } + } + + policyIndexes := make([]int, len(c.Policies)) + for i, policy := range c.Policies { + policyIndexes[i] = policyToIndex[policy] + } + + var resourcePoliciesMap map[string][]int + if len(c.ResourcePoliciesMap) > 0 { + resourcePoliciesMap = make(map[string][]int, len(c.ResourcePoliciesMap)) + for resID, policies := range c.ResourcePoliciesMap { + indexes := make([]int, len(policies)) + for i, policy := range policies { + indexes[i] = policyToIndex[policy] + } + resourcePoliciesMap[resID] = indexes + } + } + + return &NetworkMapComponentsCompact{ + PeerID: c.PeerID, + Network: c.Network, + AccountSettings: c.AccountSettings, + DNSSettings: c.DNSSettings, + CustomZoneDomain: c.CustomZoneDomain, + + AllPeers: allPeers, + PeerIndexes: peerIndexes, + RouterPeerIndexes: routerPeerIndexes, + + Groups: groups, + AllPolicies: allPolicies, + PolicyIndexes: policyIndexes, + ResourcePoliciesMap: resourcePoliciesMap, + Routes: c.Routes, + NameServerGroups: c.NameServerGroups, + AllDNSRecords: c.AllDNSRecords, + AccountZones: c.AccountZones, + + RoutersMap: c.RoutersMap, + NetworkResources: c.NetworkResources, + + GroupIDToUserIDs: c.GroupIDToUserIDs, + AllowedUserIDs: c.AllowedUserIDs, + PostureFailedPeers: c.PostureFailedPeers, + } +} + +func (c *NetworkMapComponentsCompact) ToFull() *NetworkMapComponents { + peers := make(map[string]*nbpeer.Peer, len(c.PeerIndexes)) + for _, idx := range c.PeerIndexes { + if idx >= 0 && idx < len(c.AllPeers) { + peer := c.AllPeers[idx] + peers[peer.ID] = peer + } + } + + routerPeers := make(map[string]*nbpeer.Peer, len(c.RouterPeerIndexes)) + for _, idx := range c.RouterPeerIndexes { + if idx >= 0 && idx < len(c.AllPeers) { + peer := c.AllPeers[idx] + routerPeers[peer.ID] = peer + } + } + + groups := make(map[string]*Group, len(c.Groups)) + for id, gc := range c.Groups { + peerIDs := make([]string, 0, len(gc.PeerIndexes)) + for _, idx := range gc.PeerIndexes { + if idx >= 0 && idx < len(c.AllPeers) { + peerIDs = append(peerIDs, c.AllPeers[idx].ID) + } + } + groups[id] = &Group{ + ID: id, + Name: gc.Name, + Peers: peerIDs, + } + } + + policies := make([]*Policy, len(c.PolicyIndexes)) + for i, idx := range c.PolicyIndexes { + if idx >= 0 && idx < len(c.AllPolicies) { + policies[i] = c.AllPolicies[idx] + } + } + + var resourcePoliciesMap map[string][]*Policy + if len(c.ResourcePoliciesMap) > 0 { + resourcePoliciesMap = make(map[string][]*Policy, len(c.ResourcePoliciesMap)) + for resID, indexes := range c.ResourcePoliciesMap { + pols := make([]*Policy, 0, len(indexes)) + for _, idx := range indexes { + if idx >= 0 && idx < len(c.AllPolicies) { + pols = append(pols, c.AllPolicies[idx]) + } + } + resourcePoliciesMap[resID] = pols + } + } + + return &NetworkMapComponents{ + PeerID: c.PeerID, + Network: c.Network, + AccountSettings: c.AccountSettings, + DNSSettings: c.DNSSettings, + CustomZoneDomain: c.CustomZoneDomain, + + Peers: peers, + RouterPeers: routerPeers, + + Groups: groups, + Policies: policies, + Routes: c.Routes, + NameServerGroups: c.NameServerGroups, + AllDNSRecords: c.AllDNSRecords, + AccountZones: c.AccountZones, + + ResourcePoliciesMap: resourcePoliciesMap, + RoutersMap: c.RoutersMap, + NetworkResources: c.NetworkResources, + + GroupIDToUserIDs: c.GroupIDToUserIDs, + AllowedUserIDs: c.AllowedUserIDs, + PostureFailedPeers: c.PostureFailedPeers, + } +} From 5d171f181acdec7be74b7ea562a91778d9e265f6 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 23 Feb 2026 16:08:28 +0100 Subject: [PATCH 157/374] [proxy] Send proxy updates on account delete (#5375) --- .../modules/reverseproxy/interface.go | 1 + .../modules/reverseproxy/interface_mock.go | 14 +++ .../modules/reverseproxy/manager/manager.go | 92 ++++++++++++++++--- management/internals/shared/grpc/proxy.go | 52 ++++++----- .../shared/grpc/proxy_group_access_test.go | 4 + .../internals/shared/grpc/proxy_test.go | 72 +++++++++------ management/server/account.go | 5 + management/server/account_test.go | 4 +- .../proxy/auth_callback_integration_test.go | 4 + management/server/store/sql_store.go | 22 +++++ management/server/store/store.go | 1 + management/server/store/store_mock.go | 15 +++ proxy/management_integration_test.go | 4 + 13 files changed, 227 insertions(+), 63 deletions(-) diff --git a/management/internals/modules/reverseproxy/interface.go b/management/internals/modules/reverseproxy/interface.go index 7614b3ce5..8a81ee307 100644 --- a/management/internals/modules/reverseproxy/interface.go +++ b/management/internals/modules/reverseproxy/interface.go @@ -12,6 +12,7 @@ type Manager interface { CreateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) UpdateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) DeleteService(ctx context.Context, accountID, userID, serviceID string) error + DeleteAllServices(ctx context.Context, accountID, userID string) error SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error SetStatus(ctx context.Context, accountID, serviceID string, status ProxyStatus) error ReloadAllServicesForAccount(ctx context.Context, accountID string) error diff --git a/management/internals/modules/reverseproxy/interface_mock.go b/management/internals/modules/reverseproxy/interface_mock.go index d5f38c38a..6533d90bf 100644 --- a/management/internals/modules/reverseproxy/interface_mock.go +++ b/management/internals/modules/reverseproxy/interface_mock.go @@ -49,6 +49,20 @@ func (mr *MockManagerMockRecorder) CreateService(ctx, accountID, userID, service return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateService", reflect.TypeOf((*MockManager)(nil).CreateService), ctx, accountID, userID, service) } +// DeleteAllServices mocks base method. +func (m *MockManager) DeleteAllServices(ctx context.Context, accountID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAllServices", ctx, accountID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAllServices indicates an expected call of DeleteAllServices. +func (mr *MockManagerMockRecorder) DeleteAllServices(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllServices", reflect.TypeOf((*MockManager)(nil).DeleteAllServices), ctx, accountID, userID) +} + // DeleteService mocks base method. func (m *MockManager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/manager/manager.go b/management/internals/modules/reverseproxy/manager/manager.go index 535705a37..8068178a5 100644 --- a/management/internals/modules/reverseproxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/manager/manager.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/shared/management/status" ) @@ -150,7 +151,7 @@ func (m *managerImpl) CreateService(ctx context.Context, accountID, userID strin return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) } - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Create, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") m.accountManager.UpdateAccountPeers(ctx, accountID) @@ -330,21 +331,35 @@ func (m *managerImpl) preserveServiceMetadata(service, existingService *reversep } func (m *managerImpl) sendServiceUpdateNotifications(service *reverseproxy.Service, updateInfo *serviceUpdateInfo) { - oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() - switch { case updateInfo.domainChanged && updateInfo.oldCluster != service.ProxyCluster: - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg), updateInfo.oldCluster) - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Create, "", oidcCfg), service.ProxyCluster) + m.sendServiceUpdate(service, reverseproxy.Delete, updateInfo.oldCluster, "") + m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") case !service.Enabled && updateInfo.serviceEnabledChanged: - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg), service.ProxyCluster) + m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "") case service.Enabled && updateInfo.serviceEnabledChanged: - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Create, "", oidcCfg), service.ProxyCluster) + m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") default: - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Update, "", oidcCfg), service.ProxyCluster) + m.sendServiceUpdate(service, reverseproxy.Update, service.ProxyCluster, "") } } +func (m *managerImpl) sendServiceUpdate(service *reverseproxy.Service, operation reverseproxy.Operation, cluster, oldService string) { + oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() + mapping := service.ToProtoMapping(operation, oldService, oidcCfg) + m.sendMappingsToCluster([]*proto.ProxyMapping{mapping}, cluster) +} + +func (m *managerImpl) sendMappingsToCluster(mappings []*proto.ProxyMapping, cluster string) { + if len(mappings) == 0 { + return + } + update := &proto.GetMappingUpdateResponse{ + Mapping: mappings, + } + m.proxyGRPCServer.SendServiceUpdateToCluster(update, cluster) +} + // validateTargetReferences checks that all target IDs reference existing peers or resources in the account. func validateTargetReferences(ctx context.Context, transaction store.Store, accountID string, targets []*reverseproxy.Target) error { for _, target := range targets { @@ -397,7 +412,54 @@ func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serv m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, service.EventMeta()) - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Delete, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "") + + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + +func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID string) error { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !ok { + return status.NewPermissionDeniedError() + } + + var services []*reverseproxy.Service + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + var err error + services, err = transaction.GetServicesByAccountID(ctx, store.LockingStrengthUpdate, accountID) + if err != nil { + return err + } + + for _, service := range services { + if err = transaction.DeleteService(ctx, accountID, service.ID); err != nil { + return fmt.Errorf("failed to delete service: %w", err) + } + } + + return nil + }) + if err != nil { + return err + } + + clusterMappings := make(map[string][]*proto.ProxyMapping) + oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() + + for _, service := range services { + m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceDeleted, service.EventMeta()) + mapping := service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg) + clusterMappings[service.ProxyCluster] = append(clusterMappings[service.ProxyCluster], mapping) + } + + for cluster, mappings := range clusterMappings { + m.sendMappingsToCluster(mappings, cluster) + } m.accountManager.UpdateAccountPeers(ctx, accountID) @@ -452,7 +514,7 @@ func (m *managerImpl) ReloadService(ctx context.Context, accountID, serviceID st return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) } - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Update, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + m.sendServiceUpdate(service, reverseproxy.Update, service.ProxyCluster, "") m.accountManager.UpdateAccountPeers(ctx, accountID) @@ -465,12 +527,20 @@ func (m *managerImpl) ReloadAllServicesForAccount(ctx context.Context, accountID return fmt.Errorf("failed to get services: %w", err) } + clusterMappings := make(map[string][]*proto.ProxyMapping) + oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() + for _, service := range services { err = m.replaceHostByLookup(ctx, accountID, service) if err != nil { return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) } - m.proxyGRPCServer.SendServiceUpdateToCluster(service.ToProtoMapping(reverseproxy.Update, "", m.proxyGRPCServer.GetOIDCValidationConfig()), service.ProxyCluster) + mapping := service.ToProtoMapping(reverseproxy.Update, "", oidcCfg) + clusterMappings[service.ProxyCluster] = append(clusterMappings[service.ProxyCluster], mapping) + } + + for cluster, mappings := range clusterMappings { + m.sendMappingsToCluster(mappings, cluster) } return nil diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 4771d35af..e47ea5315 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -61,9 +61,6 @@ type ProxyServiceServer struct { // Map of cluster address -> set of proxy IDs clusterProxies sync.Map - // Channel for broadcasting reverse proxy updates to all proxies - updatesChan chan *proto.ProxyMapping - // Manager for access logs accessLogManager accesslogs.Manager @@ -101,7 +98,7 @@ type proxyConnection struct { proxyID string address string stream proto.ProxyService_GetMappingUpdateServer - sendChan chan *proto.ProxyMapping + sendChan chan *proto.GetMappingUpdateResponse ctx context.Context cancel context.CancelFunc } @@ -110,7 +107,6 @@ type proxyConnection struct { func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager) *ProxyServiceServer { ctx, cancel := context.WithCancel(context.Background()) s := &ProxyServiceServer{ - updatesChan: make(chan *proto.ProxyMapping, 100), accessLogManager: accessLogMgr, oidcConfig: oidcConfig, tokenStore: tokenStore, @@ -177,7 +173,7 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest proxyID: proxyID, address: proxyAddress, stream: stream, - sendChan: make(chan *proto.ProxyMapping, 100), + sendChan: make(chan *proto.GetMappingUpdateResponse, 100), ctx: connCtx, cancel: cancel, } @@ -288,7 +284,7 @@ func (s *ProxyServiceServer) sender(conn *proxyConnection, errChan chan<- error) for { select { case msg := <-conn.sendChan: - if err := conn.stream.Send(&proto.GetMappingUpdateResponse{Mapping: []*proto.ProxyMapping{msg}}); err != nil { + if err := conn.stream.Send(msg); err != nil { errChan <- err return } @@ -339,7 +335,7 @@ func (s *ProxyServiceServer) SendAccessLog(ctx context.Context, req *proto.SendA // Management should call this when services are created/updated/removed. // For create/update operations a unique one-time auth token is generated per // proxy so that every replica can independently authenticate with management. -func (s *ProxyServiceServer) SendServiceUpdate(update *proto.ProxyMapping) { +func (s *ProxyServiceServer) SendServiceUpdate(update *proto.GetMappingUpdateResponse) { log.Debugf("Broadcasting service update to all connected proxy servers") s.connectedProxies.Range(func(key, value interface{}) bool { conn := value.(*proxyConnection) @@ -349,7 +345,7 @@ func (s *ProxyServiceServer) SendServiceUpdate(update *proto.ProxyMapping) { } select { case conn.sendChan <- msg: - log.Debugf("Sent service update with id %s to proxy server %s", update.Id, conn.proxyID) + log.Debugf("Sent service update to proxy server %s", conn.proxyID) default: log.Warnf("Failed to send service update to proxy server %s (channel full)", conn.proxyID) } @@ -418,7 +414,7 @@ func (s *ProxyServiceServer) removeFromCluster(clusterAddr, proxyID string) { // If clusterAddr is empty, broadcasts to all connected proxy servers (backward compatibility). // For create/update operations a unique one-time auth token is generated per // proxy so that every replica can independently authenticate with management. -func (s *ProxyServiceServer) SendServiceUpdateToCluster(update *proto.ProxyMapping, clusterAddr string) { +func (s *ProxyServiceServer) SendServiceUpdateToCluster(update *proto.GetMappingUpdateResponse, clusterAddr string) { if clusterAddr == "" { s.SendServiceUpdate(update) return @@ -441,7 +437,7 @@ func (s *ProxyServiceServer) SendServiceUpdateToCluster(update *proto.ProxyMappi } select { case conn.sendChan <- msg: - log.Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr) + log.Debugf("Sent service update to proxy %s in cluster %s", proxyID, clusterAddr) default: log.Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) } @@ -451,23 +447,31 @@ func (s *ProxyServiceServer) SendServiceUpdateToCluster(update *proto.ProxyMappi } // perProxyMessage returns a copy of update with a fresh one-time token for -// create/update operations. For delete operations the original message is -// returned unchanged because proxies do not need to authenticate for removal. +// create/update operations. For delete operations the original mapping is +// used unchanged because proxies do not need to authenticate for removal. // Returns nil if token generation fails (the proxy should be skipped). -func (s *ProxyServiceServer) perProxyMessage(update *proto.ProxyMapping, proxyID string) *proto.ProxyMapping { - if update.Type == proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED || update.AccountId == "" { - return update +func (s *ProxyServiceServer) perProxyMessage(update *proto.GetMappingUpdateResponse, proxyID string) *proto.GetMappingUpdateResponse { + resp := make([]*proto.ProxyMapping, 0, len(update.Mapping)) + for _, mapping := range update.Mapping { + if mapping.Type == proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED { + resp = append(resp, mapping) + continue + } + + token, err := s.tokenStore.GenerateToken(mapping.AccountId, mapping.Id, 5*time.Minute) + if err != nil { + log.Warnf("Failed to generate token for proxy %s: %v", proxyID, err) + return nil + } + + msg := shallowCloneMapping(mapping) + msg.AuthToken = token + resp = append(resp, msg) } - token, err := s.tokenStore.GenerateToken(update.AccountId, update.Id, 5*time.Minute) - if err != nil { - log.Warnf("Failed to generate token for proxy %s: %v", proxyID, err) - return nil + return &proto.GetMappingUpdateResponse{ + Mapping: resp, } - - msg := shallowCloneMapping(update) - msg.AuthToken = token - return msg } // shallowCloneMapping creates a shallow copy of a ProxyMapping, reusing the diff --git a/management/internals/shared/grpc/proxy_group_access_test.go b/management/internals/shared/grpc/proxy_group_access_test.go index 84fb54923..31b1df3b1 100644 --- a/management/internals/shared/grpc/proxy_group_access_test.go +++ b/management/internals/shared/grpc/proxy_group_access_test.go @@ -17,6 +17,10 @@ type mockReverseProxyManager struct { err error } +func (m *mockReverseProxyManager) DeleteAllServices(ctx context.Context, accountID, userID string) error { + return nil +} + func (m *mockReverseProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { if m.err != nil { return nil, m.err diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index 4c84e6010..de8ca3c84 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -16,8 +16,8 @@ import ( // registerFakeProxy adds a fake proxy connection to the server's internal maps // and returns the channel where messages will be received. -func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan *proto.ProxyMapping { - ch := make(chan *proto.ProxyMapping, 10) +func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan *proto.GetMappingUpdateResponse { + ch := make(chan *proto.GetMappingUpdateResponse, 10) conn := &proxyConnection{ proxyID: proxyID, address: clusterAddr, @@ -31,7 +31,7 @@ func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan return ch } -func drainChannel(ch chan *proto.ProxyMapping) *proto.ProxyMapping { +func drainChannel(ch chan *proto.GetMappingUpdateResponse) *proto.GetMappingUpdateResponse { select { case msg := <-ch: return msg @@ -45,20 +45,19 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { defer tokenStore.Close() s := &ProxyServiceServer{ - tokenStore: tokenStore, - updatesChan: make(chan *proto.ProxyMapping, 100), + tokenStore: tokenStore, } const cluster = "proxy.example.com" const numProxies = 3 - channels := make([]chan *proto.ProxyMapping, numProxies) + channels := make([]chan *proto.GetMappingUpdateResponse, numProxies) for i := range numProxies { id := "proxy-" + string(rune('a'+i)) channels[i] = registerFakeProxy(s, id, cluster) } - update := &proto.ProxyMapping{ + mapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, Id: "service-1", AccountId: "account-1", @@ -68,14 +67,20 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { }, } + update := &proto.GetMappingUpdateResponse{ + Mapping: []*proto.ProxyMapping{mapping}, + } + s.SendServiceUpdateToCluster(update, cluster) tokens := make([]string, numProxies) for i, ch := range channels { - msg := drainChannel(ch) - require.NotNil(t, msg, "proxy %d should receive a message", i) - assert.Equal(t, update.Domain, msg.Domain) - assert.Equal(t, update.Id, msg.Id) + resp := drainChannel(ch) + require.NotNil(t, resp, "proxy %d should receive a message", i) + require.Len(t, resp.Mapping, 1, "proxy %d should receive exactly one mapping", i) + msg := resp.Mapping[0] + assert.Equal(t, mapping.Domain, msg.Domain) + assert.Equal(t, mapping.Id, msg.Id) assert.NotEmpty(t, msg.AuthToken, "proxy %d should have a non-empty token", i) tokens[i] = msg.AuthToken } @@ -100,31 +105,36 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { defer tokenStore.Close() s := &ProxyServiceServer{ - tokenStore: tokenStore, - updatesChan: make(chan *proto.ProxyMapping, 100), + tokenStore: tokenStore, } const cluster = "proxy.example.com" ch1 := registerFakeProxy(s, "proxy-a", cluster) ch2 := registerFakeProxy(s, "proxy-b", cluster) - update := &proto.ProxyMapping{ + mapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED, Id: "service-1", AccountId: "account-1", Domain: "test.example.com", } + update := &proto.GetMappingUpdateResponse{ + Mapping: []*proto.ProxyMapping{mapping}, + } + s.SendServiceUpdateToCluster(update, cluster) - msg1 := drainChannel(ch1) - msg2 := drainChannel(ch2) - require.NotNil(t, msg1) - require.NotNil(t, msg2) + resp1 := drainChannel(ch1) + resp2 := drainChannel(ch2) + require.NotNil(t, resp1) + require.NotNil(t, resp2) + require.Len(t, resp1.Mapping, 1) + require.Len(t, resp2.Mapping, 1) // Delete operations should not generate tokens - assert.Empty(t, msg1.AuthToken) - assert.Empty(t, msg2.AuthToken) + assert.Empty(t, resp1.Mapping[0].AuthToken) + assert.Empty(t, resp2.Mapping[0].AuthToken) // No tokens should have been created assert.Equal(t, 0, tokenStore.GetTokenCount()) @@ -135,27 +145,35 @@ func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) { defer tokenStore.Close() s := &ProxyServiceServer{ - tokenStore: tokenStore, - updatesChan: make(chan *proto.ProxyMapping, 100), + tokenStore: tokenStore, } // Register proxies in different clusters (SendServiceUpdate broadcasts to all) ch1 := registerFakeProxy(s, "proxy-a", "cluster-a") ch2 := registerFakeProxy(s, "proxy-b", "cluster-b") - update := &proto.ProxyMapping{ + mapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, Id: "service-1", AccountId: "account-1", Domain: "test.example.com", } + update := &proto.GetMappingUpdateResponse{ + Mapping: []*proto.ProxyMapping{mapping}, + } + s.SendServiceUpdate(update) - msg1 := drainChannel(ch1) - msg2 := drainChannel(ch2) - require.NotNil(t, msg1) - require.NotNil(t, msg2) + resp1 := drainChannel(ch1) + resp2 := drainChannel(ch2) + require.NotNil(t, resp1) + require.NotNil(t, resp2) + require.Len(t, resp1.Mapping, 1) + require.Len(t, resp2.Mapping, 1) + + msg1 := resp1.Mapping[0] + msg2 := resp2.Mapping[0] assert.NotEmpty(t, msg1.AuthToken) assert.NotEmpty(t, msg2.AuthToken) diff --git a/management/server/account.go b/management/server/account.go index 1e35d4ad1..d436445e8 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -714,6 +714,11 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u return status.Errorf(status.Internal, "failed to build user infos for account %s: %v", accountID, err) } + err = am.reverseProxyManager.DeleteAllServices(ctx, accountID, userID) + if err != nil { + return status.Errorf(status.Internal, "failed to delete service %s: %v", accountID, err) + } + for _, otherUser := range account.Users { if otherUser.Id == userID { continue diff --git a/management/server/account_test.go b/management/server/account_test.go index 1cc0c9571..f9e9c162d 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -31,6 +31,7 @@ import ( reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/server/config" + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" nbAccount "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/cache" @@ -3122,7 +3123,8 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU return nil, nil, err } - manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, nil, nil)) + proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil) + manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyGrpcServer, nil)) return manager, updateManager, nil } diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index 6a1b144f6..732fd57e3 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -357,6 +357,10 @@ type testServiceManager struct { store store.Store } +func (m *testServiceManager) DeleteAllServices(ctx context.Context, accountID, userID string) error { + return nil +} + func (m *testServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*reverseproxy.Service, error) { return nil, nil } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 018e54810..70d501593 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -4906,6 +4906,28 @@ func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStren return service, nil } +func (s *SqlStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { + tx := s.db.Preload("Targets") + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var serviceList []*reverseproxy.Service + result := tx.Find(&serviceList, accountIDCondition, accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get services from store") + } + + for _, service := range serviceList { + if err := service.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt service data: %w", err) + } + } + + return serviceList, nil +} + func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) { var service *reverseproxy.Service result := s.db.Preload("Targets").Where("account_id = ? AND domain = ?", accountID, domain).First(&service) diff --git a/management/server/store/store.go b/management/server/store/store.go index 2bc688a11..a79c57f61 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -256,6 +256,7 @@ type Store interface { UpdateService(ctx context.Context, service *reverseproxy.Service) error DeleteService(ctx context.Context, accountID, serviceID string) error GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) + GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 79d275298..8baca36c0 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -1109,6 +1109,21 @@ func (mr *MockStoreMockRecorder) GetAccountServices(ctx, lockStrength, accountID return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountServices", reflect.TypeOf((*MockStore)(nil).GetAccountServices), ctx, lockStrength, accountID) } +// GetServicesByAccountID mocks base method. +func (m *MockStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServicesByAccountID", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*reverseproxy.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServicesByAccountID indicates an expected call of GetServicesByAccountID. +func (mr *MockStoreMockRecorder) GetServicesByAccountID(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicesByAccountID", reflect.TypeOf((*MockStore)(nil).GetServicesByAccountID), ctx, lockStrength, accountID) +} + // GetAccountSettings mocks base method. func (m *MockStore) GetAccountSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types2.Settings, error) { m.ctrl.T.Helper() diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 1163c50f4..420194c58 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -191,6 +191,10 @@ type storeBackedServiceManager struct { tokenStore *nbgrpc.OneTimeTokenStore } +func (m *storeBackedServiceManager) DeleteAllServices(ctx context.Context, accountID, userID string) error { + return nil +} + func (m *storeBackedServiceManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) } From 9d123ec059598122b4e30bcb4e341b1567cf1cab Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 23 Feb 2026 16:31:29 +0100 Subject: [PATCH 158/374] [proxy] add pre-shared key support (#5377) --- proxy/cmd/proxy/cmd/root.go | 3 +++ proxy/internal/roundtrip/netbird.go | 22 ++++++++++++++-------- proxy/internal/roundtrip/netbird_test.go | 18 +++++++++++++++--- proxy/server.go | 8 +++++++- 4 files changed, 39 insertions(+), 12 deletions(-) diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index 121621109..c594f9800 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -53,6 +53,7 @@ var ( certLockMethod string wgPort int proxyProtocol bool + preSharedKey string ) var rootCmd = &cobra.Command{ @@ -84,6 +85,7 @@ func init() { rootCmd.Flags().StringVar(&certLockMethod, "cert-lock-method", envStringOrDefault("NB_PROXY_CERT_LOCK_METHOD", "auto"), "Certificate lock method for cross-replica coordination: auto, flock, or k8s-lease") rootCmd.Flags().IntVar(&wgPort, "wg-port", envIntOrDefault("NB_PROXY_WG_PORT", 0), "WireGuard listen port (0 = random). Fixed port only works with single-account deployments") rootCmd.Flags().BoolVar(&proxyProtocol, "proxy-protocol", envBoolOrDefault("NB_PROXY_PROXY_PROTOCOL", false), "Enable PROXY protocol on TCP listeners to preserve client IPs behind L4 proxies") + rootCmd.Flags().StringVar(&preSharedKey, "preshared-key", envStringOrDefault("NB_PROXY_PRESHARED_KEY", ""), "Define a pre-shared key for the tunnel between proxy and peers") } // Execute runs the root command. @@ -156,6 +158,7 @@ func runServer(cmd *cobra.Command, args []string) error { CertLockMethod: nbacme.CertLockMethod(certLockMethod), WireguardPort: wgPort, ProxyProtocol: proxyProtocol, + PreSharedKey: preSharedKey, } ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT) diff --git a/proxy/internal/roundtrip/netbird.go b/proxy/internal/roundtrip/netbird.go index d7fd2746f..481b42d2b 100644 --- a/proxy/internal/roundtrip/netbird.go +++ b/proxy/internal/roundtrip/netbird.go @@ -86,6 +86,13 @@ func (e *clientEntry) acquireInflight(backend backendKey) (release func(), ok bo } } +// ClientConfig holds configuration for the embedded NetBird client. +type ClientConfig struct { + MgmtAddr string + WGPort int + PreSharedKey string +} + type statusNotifier interface { NotifyStatus(ctx context.Context, accountID, serviceID, domain string, connected bool) error } @@ -98,10 +105,9 @@ type managementClient interface { // backed by underlying NetBird connections. // Clients are keyed by AccountID, allowing multiple domains to share the same connection. type NetBird struct { - mgmtAddr string proxyID string proxyAddr string - wgPort int + clientCfg ClientConfig logger *log.Logger mgmtClient managementClient transportCfg transportConfig @@ -229,11 +235,12 @@ func (n *NetBird) createClientEntry(ctx context.Context, accountID types.Account // The peer has already been created via CreateProxyPeer RPC with the public key. client, err := embed.New(embed.Options{ DeviceName: deviceNamePrefix + n.proxyID, - ManagementURL: n.mgmtAddr, + ManagementURL: n.clientCfg.MgmtAddr, PrivateKey: privateKey.String(), LogLevel: log.WarnLevel.String(), BlockInbound: true, - WireguardPort: &n.wgPort, + WireguardPort: &n.clientCfg.WGPort, + PreSharedKey: n.clientCfg.PreSharedKey, }) if err != nil { return nil, fmt.Errorf("create netbird client: %w", err) @@ -536,18 +543,17 @@ func (n *NetBird) ListClientsForStartup() map[types.AccountID]*embed.Client { return result } -// NewNetBird creates a new NetBird transport. Set wgPort to 0 for a random +// NewNetBird creates a new NetBird transport. Set clientCfg.WGPort to 0 for a random // OS-assigned port. A fixed port only works with single-account deployments; // multiple accounts will fail to bind the same port. -func NewNetBird(mgmtAddr, proxyID, proxyAddr string, wgPort int, logger *log.Logger, notifier statusNotifier, mgmtClient managementClient) *NetBird { +func NewNetBird(proxyID, proxyAddr string, clientCfg ClientConfig, logger *log.Logger, notifier statusNotifier, mgmtClient managementClient) *NetBird { if logger == nil { logger = log.StandardLogger() } return &NetBird{ - mgmtAddr: mgmtAddr, proxyID: proxyID, proxyAddr: proxyAddr, - wgPort: wgPort, + clientCfg: clientCfg, logger: logger, clients: make(map[types.AccountID]*clientEntry), statusNotifier: notifier, diff --git a/proxy/internal/roundtrip/netbird_test.go b/proxy/internal/roundtrip/netbird_test.go index 3e76af9da..0a742c2fa 100644 --- a/proxy/internal/roundtrip/netbird_test.go +++ b/proxy/internal/roundtrip/netbird_test.go @@ -49,7 +49,11 @@ func (m *mockStatusNotifier) calls() []statusCall { // mockNetBird creates a NetBird instance for testing without actually connecting. // It uses an invalid management URL to prevent real connections. func mockNetBird() *NetBird { - return NewNetBird("http://invalid.test:9999", "test-proxy", "invalid.test", 0, nil, nil, &mockMgmtClient{}) + return NewNetBird("test-proxy", "invalid.test", ClientConfig{ + MgmtAddr: "http://invalid.test:9999", + WGPort: 0, + PreSharedKey: "", + }, nil, nil, &mockMgmtClient{}) } func TestNetBird_AddPeer_CreatesClientForNewAccount(t *testing.T) { @@ -282,7 +286,11 @@ func TestNetBird_RoundTrip_RequiresExistingClient(t *testing.T) { func TestNetBird_AddPeer_ExistingStartedClient_NotifiesStatus(t *testing.T) { notifier := &mockStatusNotifier{} - nb := NewNetBird("http://invalid.test:9999", "test-proxy", "invalid.test", 0, nil, notifier, &mockMgmtClient{}) + nb := NewNetBird("test-proxy", "invalid.test", ClientConfig{ + MgmtAddr: "http://invalid.test:9999", + WGPort: 0, + PreSharedKey: "", + }, nil, notifier, &mockMgmtClient{}) accountID := types.AccountID("account-1") // Add first domain — creates a new client entry. @@ -308,7 +316,11 @@ func TestNetBird_AddPeer_ExistingStartedClient_NotifiesStatus(t *testing.T) { func TestNetBird_RemovePeer_NotifiesDisconnection(t *testing.T) { notifier := &mockStatusNotifier{} - nb := NewNetBird("http://invalid.test:9999", "test-proxy", "invalid.test", 0, nil, notifier, &mockMgmtClient{}) + nb := NewNetBird("test-proxy", "invalid.test", ClientConfig{ + MgmtAddr: "http://invalid.test:9999", + WGPort: 0, + PreSharedKey: "", + }, nil, notifier, &mockMgmtClient{}) accountID := types.AccountID("account-1") err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "key-1", "svc-1") diff --git a/proxy/server.go b/proxy/server.go index 60811e53b..48a876899 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -114,6 +114,8 @@ type Server struct { // When enabled, the real client IP is extracted from the PROXY header // sent by upstream L4 proxies that support PROXY protocol. ProxyProtocol bool + // PreSharedKey used for tunnel between proxy and peers (set globally not per account) + PreSharedKey string } // NotifyStatus sends a status update to management about tunnel connectivity @@ -163,7 +165,11 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { // Initialize the netbird client, this is required to build peer connections // to proxy over. - s.netbird = roundtrip.NewNetBird(s.ManagementAddress, s.ID, s.ProxyURL, s.WireguardPort, s.Logger, s, s.mgmtClient) + s.netbird = roundtrip.NewNetBird(s.ID, s.ProxyURL, roundtrip.ClientConfig{ + MgmtAddr: s.ManagementAddress, + WGPort: s.WireguardPort, + PreSharedKey: s.PreSharedKey, + }, s.Logger, s, s.mgmtClient) tlsConfig, err := s.configureTLS(ctx) if err != nil { From 98890a29e3872b1faeabbecd7ee7ed84c6af91e5 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 23 Feb 2026 20:58:27 +0100 Subject: [PATCH 159/374] [client] fix busy-loop in network monitor routing socket on macOS/BSD (#5424) * [client] fix busy-loop in network monitor routing socket on macOS/BSD After system wakeup, the AF_ROUTE socket created by Go's unix.Socket() is non-blocking, causing unix.Read to return EAGAIN immediately and spin at 100% CPU filling the log with thousands of warnings per second. Replace the tight read loop with a unix.Select call that blocks until the fd is readable, checking ctx cancellation on each 1-second timeout. Fatal errors (EBADF, EINVAL) now return an error instead of looping. * [client] add fd range validation in waitReadable to prevent out-of-bound errors --- .../networkmonitor/check_change_common.go | 111 ++++++++++++------ 1 file changed, 73 insertions(+), 38 deletions(-) diff --git a/client/internal/networkmonitor/check_change_common.go b/client/internal/networkmonitor/check_change_common.go index c287236e8..a4a4f76ac 100644 --- a/client/internal/networkmonitor/check_change_common.go +++ b/client/internal/networkmonitor/check_change_common.go @@ -22,51 +22,56 @@ func prepareFd() (int, error) { func routeCheck(ctx context.Context, fd int, nexthopv4, nexthopv6 systemops.Nexthop) error { for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - buf := make([]byte, 2048) - n, err := unix.Read(fd, buf) + // Wait until fd is readable or context is cancelled, to avoid a busy-loop + // when the routing socket returns EAGAIN (e.g. immediately after wakeup). + if err := waitReadable(ctx, fd); err != nil { + return err + } + + buf := make([]byte, 2048) + n, err := unix.Read(fd, buf) + if err != nil { + if errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EINTR) { + continue + } + if errors.Is(err, unix.EBADF) || errors.Is(err, unix.EINVAL) { + return fmt.Errorf("routing socket closed: %w", err) + } + return fmt.Errorf("read routing socket: %w", err) + } + + if n < unix.SizeofRtMsghdr { + log.Debugf("Network monitor: read from routing socket returned less than expected: %d bytes", n) + continue + } + + msg := (*unix.RtMsghdr)(unsafe.Pointer(&buf[0])) + + switch msg.Type { + // handle route changes + case unix.RTM_ADD, syscall.RTM_DELETE: + route, err := parseRouteMessage(buf[:n]) if err != nil { - if !errors.Is(err, unix.EBADF) && !errors.Is(err, unix.EINVAL) { - log.Warnf("Network monitor: failed to read from routing socket: %v", err) - } - continue - } - if n < unix.SizeofRtMsghdr { - log.Debugf("Network monitor: read from routing socket returned less than expected: %d bytes", n) + log.Debugf("Network monitor: error parsing routing message: %v", err) continue } - msg := (*unix.RtMsghdr)(unsafe.Pointer(&buf[0])) + if route.Dst.Bits() != 0 { + continue + } + intf := "" + if route.Interface != nil { + intf = route.Interface.Name + } switch msg.Type { - // handle route changes - case unix.RTM_ADD, syscall.RTM_DELETE: - route, err := parseRouteMessage(buf[:n]) - if err != nil { - log.Debugf("Network monitor: error parsing routing message: %v", err) - continue - } - - if route.Dst.Bits() != 0 { - continue - } - - intf := "" - if route.Interface != nil { - intf = route.Interface.Name - } - switch msg.Type { - case unix.RTM_ADD: - log.Infof("Network monitor: default route changed: via %s, interface %s", route.Gw, intf) + case unix.RTM_ADD: + log.Infof("Network monitor: default route changed: via %s, interface %s", route.Gw, intf) + return nil + case unix.RTM_DELETE: + if nexthopv4.Intf != nil && route.Gw.Compare(nexthopv4.IP) == 0 || nexthopv6.Intf != nil && route.Gw.Compare(nexthopv6.IP) == 0 { + log.Infof("Network monitor: default route removed: via %s, interface %s", route.Gw, intf) return nil - case unix.RTM_DELETE: - if nexthopv4.Intf != nil && route.Gw.Compare(nexthopv4.IP) == 0 || nexthopv6.Intf != nil && route.Gw.Compare(nexthopv6.IP) == 0 { - log.Infof("Network monitor: default route removed: via %s, interface %s", route.Gw, intf) - return nil - } } } } @@ -90,3 +95,33 @@ func parseRouteMessage(buf []byte) (*systemops.Route, error) { return systemops.MsgToRoute(msg) } + +// waitReadable blocks until fd has data to read, or ctx is cancelled. +func waitReadable(ctx context.Context, fd int) error { + var fdset unix.FdSet + if fd < 0 || fd/unix.NFDBITS >= len(fdset.Bits) { + return fmt.Errorf("fd %d out of range for FdSet", fd) + } + + for { + if err := ctx.Err(); err != nil { + return err + } + + fdset = unix.FdSet{} + fdset.Set(fd) + // Use a 1-second timeout so we can re-check ctx periodically. + tv := unix.Timeval{Sec: 1} + n, err := unix.Select(fd+1, &fdset, nil, nil, &tv) + if err != nil { + if errors.Is(err, unix.EINTR) { + continue + } + return fmt.Errorf("select on routing socket: %w", err) + } + if n > 0 { + return nil + } + // timeout — loop back and re-check ctx + } +} From 4a54f0d67007566ac1c3f872086755997f880352 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 23 Feb 2026 20:58:53 +0100 Subject: [PATCH 160/374] [Client] Remove connection semaphore (#5419) * [Client] Remove connection semaphore Remove the semaphore and the initial random sleep time (300ms) from the connectivity logic to speed up the initial connection time. Note: Implement limiter logic that can prioritize router peers and keep the fast connection option for the first few peers. * Remove unused function --- client/internal/engine.go | 8 +------- client/internal/peer/conn.go | 32 ++----------------------------- client/internal/peer/conn_test.go | 4 ---- 3 files changed, 3 insertions(+), 41 deletions(-) diff --git a/client/internal/engine.go b/client/internal/engine.go index f2d724aa4..90fc041a9 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -53,13 +53,11 @@ import ( "github.com/netbirdio/netbird/client/internal/updatemanager" "github.com/netbirdio/netbird/client/jobexec" cProto "github.com/netbirdio/netbird/client/proto" - "github.com/netbirdio/netbird/shared/management/domain" - semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group" - "github.com/netbirdio/netbird/client/system" nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/route" mgm "github.com/netbirdio/netbird/shared/management/client" + "github.com/netbirdio/netbird/shared/management/domain" mgmProto "github.com/netbirdio/netbird/shared/management/proto" auth "github.com/netbirdio/netbird/shared/relay/auth/hmac" relayClient "github.com/netbirdio/netbird/shared/relay/client" @@ -75,7 +73,6 @@ import ( const ( PeerConnectionTimeoutMax = 45000 // ms PeerConnectionTimeoutMin = 30000 // ms - connInitLimit = 200 disableAutoUpdate = "disabled" ) @@ -208,7 +205,6 @@ type Engine struct { syncRespMux sync.RWMutex persistSyncResponse bool latestSyncResponse *mgmProto.SyncResponse - connSemaphore *semaphoregroup.SemaphoreGroup flowManager nftypes.FlowManager // auto-update @@ -266,7 +262,6 @@ func NewEngine( statusRecorder: statusRecorder, stateManager: stateManager, checks: checks, - connSemaphore: semaphoregroup.NewSemaphoreGroup(connInitLimit), probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), jobExecutor: jobexec.NewExecutor(), } @@ -1539,7 +1534,6 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV IFaceDiscover: e.mobileDep.IFaceDiscover, RelayManager: e.relayManager, SrWatcher: e.srWatcher, - Semaphore: e.connSemaphore, } peerConn, err := peer.NewConn(config, serviceDependencies) if err != nil { diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 05a397f3d..b4f97016d 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -3,7 +3,6 @@ package peer import ( "context" "fmt" - "math/rand" "net" "net/netip" "runtime" @@ -25,7 +24,6 @@ import ( "github.com/netbirdio/netbird/client/internal/stdnet" "github.com/netbirdio/netbird/route" relayClient "github.com/netbirdio/netbird/shared/relay/client" - semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group" ) type ServiceDependencies struct { @@ -34,7 +32,6 @@ type ServiceDependencies struct { IFaceDiscover stdnet.ExternalIFaceDiscover RelayManager *relayClient.Manager SrWatcher *guard.SRWatcher - Semaphore *semaphoregroup.SemaphoreGroup PeerConnDispatcher *dispatcher.ConnectionDispatcher } @@ -111,9 +108,8 @@ type Conn struct { wgProxyRelay wgproxy.Proxy handshaker *Handshaker - guard *guard.Guard - semaphore *semaphoregroup.SemaphoreGroup - wg sync.WaitGroup + guard *guard.Guard + wg sync.WaitGroup // debug purpose dumpState *stateDump @@ -139,7 +135,6 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { iFaceDiscover: services.IFaceDiscover, relayManager: services.RelayManager, srWatcher: services.SrWatcher, - semaphore: services.Semaphore, statusRelay: worker.NewAtomicStatus(), statusICE: worker.NewAtomicStatus(), dumpState: dumpState, @@ -154,15 +149,10 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { // It will try to establish a connection using ICE and in parallel with relay. The higher priority connection type will // be used. func (conn *Conn) Open(engineCtx context.Context) error { - if err := conn.semaphore.Add(engineCtx); err != nil { - return err - } - conn.mu.Lock() defer conn.mu.Unlock() if conn.opened { - conn.semaphore.Done() return nil } @@ -173,7 +163,6 @@ func (conn *Conn) Open(engineCtx context.Context) error { relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) if err != nil { - conn.semaphore.Done() return err } conn.workerICE = workerICE @@ -207,10 +196,6 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.wg.Add(1) go func() { defer conn.wg.Done() - - conn.waitInitialRandomSleepTime(conn.ctx) - conn.semaphore.Done() - conn.guard.Start(conn.ctx, conn.onGuardEvent) }() conn.opened = true @@ -670,19 +655,6 @@ func (conn *Conn) doOnConnected(remoteRosenpassPubKey []byte, remoteRosenpassAdd } } -func (conn *Conn) waitInitialRandomSleepTime(ctx context.Context) { - maxWait := 300 - duration := time.Duration(rand.Intn(maxWait)) * time.Millisecond - - timeout := time.NewTimer(duration) - defer timeout.Stop() - - select { - case <-ctx.Done(): - case <-timeout.C: - } -} - func (conn *Conn) isRelayed() bool { switch conn.currentConnPriority { case conntype.Relay, conntype.ICETurn: diff --git a/client/internal/peer/conn_test.go b/client/internal/peer/conn_test.go index 32383b530..59216b647 100644 --- a/client/internal/peer/conn_test.go +++ b/client/internal/peer/conn_test.go @@ -15,7 +15,6 @@ import ( "github.com/netbirdio/netbird/client/internal/peer/ice" "github.com/netbirdio/netbird/client/internal/stdnet" "github.com/netbirdio/netbird/util" - semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group" ) var testDispatcher = dispatcher.NewConnectionDispatcher() @@ -53,7 +52,6 @@ func TestConn_GetKey(t *testing.T) { sd := ServiceDependencies{ SrWatcher: swWatcher, - Semaphore: semaphoregroup.NewSemaphoreGroup(1), PeerConnDispatcher: testDispatcher, } conn, err := NewConn(connConf, sd) @@ -71,7 +69,6 @@ func TestConn_OnRemoteOffer(t *testing.T) { sd := ServiceDependencies{ StatusRecorder: NewRecorder("https://mgm"), SrWatcher: swWatcher, - Semaphore: semaphoregroup.NewSemaphoreGroup(1), PeerConnDispatcher: testDispatcher, } conn, err := NewConn(connConf, sd) @@ -110,7 +107,6 @@ func TestConn_OnRemoteAnswer(t *testing.T) { sd := ServiceDependencies{ StatusRecorder: NewRecorder("https://mgm"), SrWatcher: swWatcher, - Semaphore: semaphoregroup.NewSemaphoreGroup(1), PeerConnDispatcher: testDispatcher, } conn, err := NewConn(connConf, sd) From 37f025c966b0bad80f9cf6e691b9b37f419f03c7 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 24 Feb 2026 10:00:33 +0100 Subject: [PATCH 161/374] Fix a race condition where a concurrent user-issued Up or Down command (#5418) could interleave with a sleep/wake event causing out-of-order state transitions. The mutex now covers the full duration of each handler including the status check, the Up/Down call, and the flag update. Note: if Up or Down commands are triggered in parallel with sleep/wake events, the overall ordering of up/down/sleep/wake operations is still not guaranteed beyond what the mutex provides within the handler itself. --- client/internal/sleep/handler/handler.go | 80 +++++++ client/internal/sleep/handler/handler_test.go | 153 ++++++++++++ client/server/lifecycle.go | 77 ------ client/server/lifecycle_test.go | 219 ------------------ client/server/server.go | 10 +- client/server/sleep.go | 46 ++++ 6 files changed, 286 insertions(+), 299 deletions(-) create mode 100644 client/internal/sleep/handler/handler.go create mode 100644 client/internal/sleep/handler/handler_test.go delete mode 100644 client/server/lifecycle.go delete mode 100644 client/server/lifecycle_test.go create mode 100644 client/server/sleep.go diff --git a/client/internal/sleep/handler/handler.go b/client/internal/sleep/handler/handler.go new file mode 100644 index 000000000..9c2c5d4d5 --- /dev/null +++ b/client/internal/sleep/handler/handler.go @@ -0,0 +1,80 @@ +package handler + +import ( + "context" + "sync" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal" +) + +type Agent interface { + Up(ctx context.Context) error + Down(ctx context.Context) error + Status() (internal.StatusType, error) +} + +type SleepHandler struct { + agent Agent + + mu sync.Mutex + // sleepTriggeredDown indicates whether the sleep handler triggered the last client down, to avoid unnecessary up on wake + sleepTriggeredDown bool +} + +func New(agent Agent) *SleepHandler { + return &SleepHandler{ + agent: agent, + } +} + +func (s *SleepHandler) HandleWakeUp(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.sleepTriggeredDown { + log.Info("skipping up because wasn't sleep down") + return nil + } + + // avoid other wakeup runs if sleep didn't make the computer sleep + s.sleepTriggeredDown = false + + log.Info("running up after wake up") + err := s.agent.Up(ctx) + if err != nil { + log.Errorf("running up failed: %v", err) + return err + } + + log.Info("running up command executed successfully") + return nil +} + +func (s *SleepHandler) HandleSleep(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + status, err := s.agent.Status() + if err != nil { + return err + } + + if status != internal.StatusConnecting && status != internal.StatusConnected { + log.Infof("skipping setting the agent down because status is %s", status) + return nil + } + + log.Info("running down after system started sleeping") + + if err = s.agent.Down(ctx); err != nil { + log.Errorf("running down failed: %v", err) + return err + } + + s.sleepTriggeredDown = true + + log.Info("running down executed successfully") + return nil +} diff --git a/client/internal/sleep/handler/handler_test.go b/client/internal/sleep/handler/handler_test.go new file mode 100644 index 000000000..9f79428fb --- /dev/null +++ b/client/internal/sleep/handler/handler_test.go @@ -0,0 +1,153 @@ +package handler + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/internal" +) + +type mockAgent struct { + upErr error + downErr error + statusErr error + status internal.StatusType + upCalls int +} + +func (m *mockAgent) Up(_ context.Context) error { + m.upCalls++ + return m.upErr +} + +func (m *mockAgent) Down(_ context.Context) error { + return m.downErr +} + +func (m *mockAgent) Status() (internal.StatusType, error) { + return m.status, m.statusErr +} + +func newHandler(status internal.StatusType) (*SleepHandler, *mockAgent) { + agent := &mockAgent{status: status} + return New(agent), agent +} + +func TestHandleWakeUp_SkipsWhenFlagFalse(t *testing.T) { + h, agent := newHandler(internal.StatusIdle) + + err := h.HandleWakeUp(context.Background()) + + require.NoError(t, err) + assert.Equal(t, 0, agent.upCalls, "Up should not be called when flag is false") +} + +func TestHandleWakeUp_ResetsFlagBeforeUp(t *testing.T) { + h, _ := newHandler(internal.StatusIdle) + h.sleepTriggeredDown = true + + // Even if Up fails, flag should be reset + _ = h.HandleWakeUp(context.Background()) + + assert.False(t, h.sleepTriggeredDown, "flag must be reset before calling Up") +} + +func TestHandleWakeUp_CallsUpWhenFlagSet(t *testing.T) { + h, agent := newHandler(internal.StatusIdle) + h.sleepTriggeredDown = true + + err := h.HandleWakeUp(context.Background()) + + require.NoError(t, err) + assert.Equal(t, 1, agent.upCalls) + assert.False(t, h.sleepTriggeredDown) +} + +func TestHandleWakeUp_ReturnsErrorFromUp(t *testing.T) { + h, agent := newHandler(internal.StatusIdle) + h.sleepTriggeredDown = true + agent.upErr = errors.New("up failed") + + err := h.HandleWakeUp(context.Background()) + + assert.ErrorIs(t, err, agent.upErr) + assert.False(t, h.sleepTriggeredDown, "flag should still be reset even when Up fails") +} + +func TestHandleWakeUp_SecondCallIsNoOp(t *testing.T) { + h, agent := newHandler(internal.StatusIdle) + h.sleepTriggeredDown = true + + _ = h.HandleWakeUp(context.Background()) + err := h.HandleWakeUp(context.Background()) + + require.NoError(t, err) + assert.Equal(t, 1, agent.upCalls, "second wakeup should be no-op") +} + +func TestHandleSleep_SkipsForNonActiveStates(t *testing.T) { + tests := []struct { + name string + status internal.StatusType + }{ + {"Idle", internal.StatusIdle}, + {"NeedsLogin", internal.StatusNeedsLogin}, + {"LoginFailed", internal.StatusLoginFailed}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h, _ := newHandler(tt.status) + + err := h.HandleSleep(context.Background()) + + require.NoError(t, err) + assert.False(t, h.sleepTriggeredDown) + }) + } +} + +func TestHandleSleep_ProceedsForActiveStates(t *testing.T) { + tests := []struct { + name string + status internal.StatusType + }{ + {"Connecting", internal.StatusConnecting}, + {"Connected", internal.StatusConnected}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h, _ := newHandler(tt.status) + + err := h.HandleSleep(context.Background()) + + require.NoError(t, err) + assert.True(t, h.sleepTriggeredDown) + }) + } +} + +func TestHandleSleep_ReturnsErrorFromStatus(t *testing.T) { + agent := &mockAgent{statusErr: errors.New("status error")} + h := New(agent) + + err := h.HandleSleep(context.Background()) + + assert.ErrorIs(t, err, agent.statusErr) + assert.False(t, h.sleepTriggeredDown) +} + +func TestHandleSleep_ReturnsErrorFromDown(t *testing.T) { + agent := &mockAgent{status: internal.StatusConnected, downErr: errors.New("down failed")} + h := New(agent) + + err := h.HandleSleep(context.Background()) + + assert.ErrorIs(t, err, agent.downErr) + assert.False(t, h.sleepTriggeredDown, "flag should not be set when Down fails") +} diff --git a/client/server/lifecycle.go b/client/server/lifecycle.go deleted file mode 100644 index 3722c027d..000000000 --- a/client/server/lifecycle.go +++ /dev/null @@ -1,77 +0,0 @@ -package server - -import ( - "context" - - log "github.com/sirupsen/logrus" - - "github.com/netbirdio/netbird/client/internal" - "github.com/netbirdio/netbird/client/proto" -) - -// NotifyOSLifecycle handles operating system lifecycle events by executing appropriate logic based on the request type. -func (s *Server) NotifyOSLifecycle(callerCtx context.Context, req *proto.OSLifecycleRequest) (*proto.OSLifecycleResponse, error) { - switch req.GetType() { - case proto.OSLifecycleRequest_WAKEUP: - return s.handleWakeUp(callerCtx) - case proto.OSLifecycleRequest_SLEEP: - return s.handleSleep(callerCtx) - default: - log.Errorf("unknown OSLifecycleRequest type: %v", req.GetType()) - } - return &proto.OSLifecycleResponse{}, nil -} - -// handleWakeUp processes a wake-up event by triggering the Up command if the system was previously put to sleep. -// It resets the sleep state and logs the process. Returns a response or an error if the Up command fails. -func (s *Server) handleWakeUp(callerCtx context.Context) (*proto.OSLifecycleResponse, error) { - if !s.sleepTriggeredDown.Load() { - log.Info("skipping up because wasn't sleep down") - return &proto.OSLifecycleResponse{}, nil - } - - // avoid other wakeup runs if sleep didn't make the computer sleep - s.sleepTriggeredDown.Store(false) - - log.Info("running up after wake up") - _, err := s.Up(callerCtx, &proto.UpRequest{}) - if err != nil { - log.Errorf("running up failed: %v", err) - return &proto.OSLifecycleResponse{}, err - } - - log.Info("running up command executed successfully") - return &proto.OSLifecycleResponse{}, nil -} - -// handleSleep handles the sleep event by initiating a "down" sequence if the system is in a connected or connecting state. -func (s *Server) handleSleep(callerCtx context.Context) (*proto.OSLifecycleResponse, error) { - s.mutex.Lock() - - state := internal.CtxGetState(s.rootCtx) - status, err := state.Status() - if err != nil { - s.mutex.Unlock() - return &proto.OSLifecycleResponse{}, err - } - - if status != internal.StatusConnecting && status != internal.StatusConnected { - log.Infof("skipping setting the agent down because status is %s", status) - s.mutex.Unlock() - return &proto.OSLifecycleResponse{}, nil - } - s.mutex.Unlock() - - log.Info("running down after system started sleeping") - - _, err = s.Down(callerCtx, &proto.DownRequest{}) - if err != nil { - log.Errorf("running down failed: %v", err) - return &proto.OSLifecycleResponse{}, err - } - - s.sleepTriggeredDown.Store(true) - - log.Info("running down executed successfully") - return &proto.OSLifecycleResponse{}, nil -} diff --git a/client/server/lifecycle_test.go b/client/server/lifecycle_test.go deleted file mode 100644 index a604c60af..000000000 --- a/client/server/lifecycle_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package server - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/netbirdio/netbird/client/internal" - "github.com/netbirdio/netbird/client/internal/peer" - "github.com/netbirdio/netbird/client/proto" -) - -func newTestServer() *Server { - ctx := internal.CtxInitState(context.Background()) - return &Server{ - rootCtx: ctx, - statusRecorder: peer.NewRecorder(""), - } -} - -func TestNotifyOSLifecycle_WakeUp_SkipsWhenNotSleepTriggered(t *testing.T) { - s := newTestServer() - - // sleepTriggeredDown is false by default - assert.False(t, s.sleepTriggeredDown.Load()) - - resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_WAKEUP, - }) - - require.NoError(t, err) - require.NotNil(t, resp) - assert.False(t, s.sleepTriggeredDown.Load(), "flag should remain false") -} - -func TestNotifyOSLifecycle_Sleep_SkipsWhenStatusIdle(t *testing.T) { - s := newTestServer() - - state := internal.CtxGetState(s.rootCtx) - state.Set(internal.StatusIdle) - - resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_SLEEP, - }) - - require.NoError(t, err) - require.NotNil(t, resp) - assert.False(t, s.sleepTriggeredDown.Load(), "flag should remain false when status is Idle") -} - -func TestNotifyOSLifecycle_Sleep_SkipsWhenStatusNeedsLogin(t *testing.T) { - s := newTestServer() - - state := internal.CtxGetState(s.rootCtx) - state.Set(internal.StatusNeedsLogin) - - resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_SLEEP, - }) - - require.NoError(t, err) - require.NotNil(t, resp) - assert.False(t, s.sleepTriggeredDown.Load(), "flag should remain false when status is NeedsLogin") -} - -func TestNotifyOSLifecycle_Sleep_SetsFlag_WhenConnecting(t *testing.T) { - s := newTestServer() - - state := internal.CtxGetState(s.rootCtx) - state.Set(internal.StatusConnecting) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s.actCancel = cancel - - resp, err := s.NotifyOSLifecycle(ctx, &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_SLEEP, - }) - - require.NoError(t, err) - assert.NotNil(t, resp, "handleSleep returns not nil response on success") - assert.True(t, s.sleepTriggeredDown.Load(), "flag should be set after sleep when connecting") -} - -func TestNotifyOSLifecycle_Sleep_SetsFlag_WhenConnected(t *testing.T) { - s := newTestServer() - - state := internal.CtxGetState(s.rootCtx) - state.Set(internal.StatusConnected) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s.actCancel = cancel - - resp, err := s.NotifyOSLifecycle(ctx, &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_SLEEP, - }) - - require.NoError(t, err) - assert.NotNil(t, resp, "handleSleep returns not nil response on success") - assert.True(t, s.sleepTriggeredDown.Load(), "flag should be set after sleep when connected") -} - -func TestNotifyOSLifecycle_WakeUp_ResetsFlag(t *testing.T) { - s := newTestServer() - - // Manually set the flag to simulate prior sleep down - s.sleepTriggeredDown.Store(true) - - // WakeUp will try to call Up which fails without proper setup, but flag should reset first - _, _ = s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_WAKEUP, - }) - - assert.False(t, s.sleepTriggeredDown.Load(), "flag should be reset after WakeUp attempt") -} - -func TestNotifyOSLifecycle_MultipleWakeUpCalls(t *testing.T) { - s := newTestServer() - - // First wakeup without prior sleep - should be no-op - resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_WAKEUP, - }) - require.NoError(t, err) - require.NotNil(t, resp) - assert.False(t, s.sleepTriggeredDown.Load()) - - // Simulate prior sleep - s.sleepTriggeredDown.Store(true) - - // First wakeup after sleep - should reset flag - _, _ = s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_WAKEUP, - }) - assert.False(t, s.sleepTriggeredDown.Load()) - - // Second wakeup - should be no-op - resp, err = s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{ - Type: proto.OSLifecycleRequest_WAKEUP, - }) - require.NoError(t, err) - require.NotNil(t, resp) - assert.False(t, s.sleepTriggeredDown.Load()) -} - -func TestHandleWakeUp_SkipsWhenFlagFalse(t *testing.T) { - s := newTestServer() - - resp, err := s.handleWakeUp(context.Background()) - - require.NoError(t, err) - require.NotNil(t, resp) -} - -func TestHandleWakeUp_ResetsFlagBeforeUp(t *testing.T) { - s := newTestServer() - s.sleepTriggeredDown.Store(true) - - // Even if Up fails, flag should be reset - _, _ = s.handleWakeUp(context.Background()) - - assert.False(t, s.sleepTriggeredDown.Load(), "flag must be reset before calling Up") -} - -func TestHandleSleep_SkipsForNonActiveStates(t *testing.T) { - tests := []struct { - name string - status internal.StatusType - }{ - {"Idle", internal.StatusIdle}, - {"NeedsLogin", internal.StatusNeedsLogin}, - {"LoginFailed", internal.StatusLoginFailed}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := newTestServer() - state := internal.CtxGetState(s.rootCtx) - state.Set(tt.status) - - resp, err := s.handleSleep(context.Background()) - - require.NoError(t, err) - require.NotNil(t, resp) - assert.False(t, s.sleepTriggeredDown.Load()) - }) - } -} - -func TestHandleSleep_ProceedsForActiveStates(t *testing.T) { - tests := []struct { - name string - status internal.StatusType - }{ - {"Connecting", internal.StatusConnecting}, - {"Connected", internal.StatusConnected}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := newTestServer() - state := internal.CtxGetState(s.rootCtx) - state.Set(tt.status) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s.actCancel = cancel - - resp, err := s.handleSleep(ctx) - - require.NoError(t, err) - assert.NotNil(t, resp) - assert.True(t, s.sleepTriggeredDown.Load()) - }) - } -} diff --git a/client/server/server.go b/client/server/server.go index 108eab9fe..8cd057852 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -22,6 +22,7 @@ import ( "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/profilemanager" + sleephandler "github.com/netbirdio/netbird/client/internal/sleep/handler" "github.com/netbirdio/netbird/client/system" mgm "github.com/netbirdio/netbird/shared/management/client" "github.com/netbirdio/netbird/shared/management/domain" @@ -85,8 +86,7 @@ type Server struct { profilesDisabled bool updateSettingsDisabled bool - // sleepTriggeredDown holds a state indicated if the sleep handler triggered the last client down - sleepTriggeredDown atomic.Bool + sleepHandler *sleephandler.SleepHandler jwtCache *jwtCache } @@ -100,7 +100,7 @@ type oauthAuthFlow struct { // New server instance constructor. func New(ctx context.Context, logFile string, configFile string, profilesDisabled bool, updateSettingsDisabled bool) *Server { - return &Server{ + s := &Server{ rootCtx: ctx, logFile: logFile, persistSyncResponse: true, @@ -110,6 +110,10 @@ func New(ctx context.Context, logFile string, configFile string, profilesDisable updateSettingsDisabled: updateSettingsDisabled, jwtCache: newJWTCache(), } + agent := &serverAgent{s} + s.sleepHandler = sleephandler.New(agent) + + return s } func (s *Server) Start() error { diff --git a/client/server/sleep.go b/client/server/sleep.go new file mode 100644 index 000000000..7a83c75a6 --- /dev/null +++ b/client/server/sleep.go @@ -0,0 +1,46 @@ +package server + +import ( + "context" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/proto" +) + +// serverAgent adapts Server to the handler.Agent and handler.StatusChecker interfaces +type serverAgent struct { + s *Server +} + +func (a *serverAgent) Up(ctx context.Context) error { + _, err := a.s.Up(ctx, &proto.UpRequest{}) + return err +} + +func (a *serverAgent) Down(ctx context.Context) error { + _, err := a.s.Down(ctx, &proto.DownRequest{}) + return err +} + +func (a *serverAgent) Status() (internal.StatusType, error) { + return internal.CtxGetState(a.s.rootCtx).Status() +} + +// NotifyOSLifecycle handles operating system lifecycle events by executing appropriate logic based on the request type. +func (s *Server) NotifyOSLifecycle(callerCtx context.Context, req *proto.OSLifecycleRequest) (*proto.OSLifecycleResponse, error) { + switch req.GetType() { + case proto.OSLifecycleRequest_WAKEUP: + if err := s.sleepHandler.HandleWakeUp(callerCtx); err != nil { + return &proto.OSLifecycleResponse{}, err + } + case proto.OSLifecycleRequest_SLEEP: + if err := s.sleepHandler.HandleSleep(callerCtx); err != nil { + return &proto.OSLifecycleResponse{}, err + } + default: + log.Errorf("unknown OSLifecycleRequest type: %v", req.GetType()) + } + return &proto.OSLifecycleResponse{}, nil +} From 63c83aa8d219a2b28874b5c224d00569d83c7a17 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 24 Feb 2026 10:02:16 +0100 Subject: [PATCH 162/374] [client,management] Feature/client service expose (#5411) CLI: new expose command to publish a local port with flags for PIN, password, user groups, custom domain, name prefix and protocol (HTTP default). Management/API: create/renew/stop expose sessions (streamed status), automatic naming/domain, TTL renewals, background expiration, new management RPCs and client methods. UI/API: account settings now include peer_expose_enabled and peer_expose_groups; new activity codes for peer expose events. --- client/cmd/expose.go | 194 ++++ client/cmd/root.go | 1 + client/internal/engine.go | 15 +- client/internal/expose/manager.go | 95 ++ client/internal/expose/manager_test.go | 95 ++ client/internal/expose/request.go | 39 + client/proto/daemon.pb.go | 739 ++++++++++----- client/proto/daemon.proto | 32 + client/proto/daemon_grpc.pb.go | 65 ++ client/server/server.go | 55 ++ .../modules/reverseproxy/domain/interface.go | 1 + .../reverseproxy/domain/manager/manager.go | 4 + .../modules/reverseproxy/interface.go | 4 + .../modules/reverseproxy/interface_mock.go | 57 ++ .../modules/reverseproxy/manager/manager.go | 189 +++- .../reverseproxy/manager/manager_test.go | 525 +++++++++- .../modules/reverseproxy/reverseproxy.go | 120 ++- .../modules/reverseproxy/reverseproxy_test.go | 143 +++ management/internals/server/boot.go | 2 + management/internals/server/modules.go | 2 +- .../internals/shared/grpc/expose_service.go | 301 ++++++ .../shared/grpc/expose_service_test.go | 242 +++++ .../shared/grpc/proxy_group_access_test.go | 16 + management/internals/shared/grpc/server.go | 6 + .../shared/grpc/validate_session_test.go | 16 + management/server/account.go | 16 + management/server/account_test.go | 2 +- management/server/activity/codes.go | 19 + .../handlers/accounts/accounts_handler.go | 9 + .../proxy/auth_callback_integration_test.go | 16 + .../testing/testing_tools/channel/channel.go | 2 +- management/server/mock_server/account_mock.go | 2 +- management/server/store/sql_store.go | 3 +- management/server/types/settings.go | 7 + proxy/management_integration_test.go | 16 + shared/management/client/client.go | 4 + shared/management/client/grpc.go | 133 +++ shared/management/client/mock.go | 29 +- shared/management/http/api/openapi.yml | 12 + shared/management/http/api/types.gen.go | 6 + shared/management/proto/management.pb.go | 895 ++++++++++++++---- shared/management/proto/management.proto | 44 + shared/management/proto/management_grpc.pb.go | 114 +++ shared/management/proto/proxy_service.pb.go | 2 +- 44 files changed, 3867 insertions(+), 422 deletions(-) create mode 100644 client/cmd/expose.go create mode 100644 client/internal/expose/manager.go create mode 100644 client/internal/expose/manager_test.go create mode 100644 client/internal/expose/request.go create mode 100644 management/internals/shared/grpc/expose_service.go create mode 100644 management/internals/shared/grpc/expose_service_test.go diff --git a/client/cmd/expose.go b/client/cmd/expose.go new file mode 100644 index 000000000..991d3ab86 --- /dev/null +++ b/client/cmd/expose.go @@ -0,0 +1,194 @@ +package cmd + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "os/signal" + "regexp" + "strconv" + "strings" + "syscall" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/netbirdio/netbird/client/proto" + "github.com/netbirdio/netbird/util" +) + +var pinRegexp = regexp.MustCompile(`^\d{6}$`) + +var ( + exposePin string + exposePassword string + exposeUserGroups []string + exposeDomain string + exposeNamePrefix string + exposeProtocol string +) + +var exposeCmd = &cobra.Command{ + Use: "expose ", + Short: "Expose a local port via the NetBird reverse proxy", + Args: cobra.ExactArgs(1), + Example: "netbird expose --with-password safe-pass 8080", + RunE: exposeFn, +} + +func init() { + exposeCmd.Flags().StringVar(&exposePin, "with-pin", "", "Protect the exposed service with a 6-digit PIN (e.g. --with-pin 123456)") + exposeCmd.Flags().StringVar(&exposePassword, "with-password", "", "Protect the exposed service with a password (e.g. --with-password my-secret)") + exposeCmd.Flags().StringSliceVar(&exposeUserGroups, "with-user-groups", nil, "Restrict access to specific user groups with SSO (e.g. --with-user-groups devops,Backend)") + exposeCmd.Flags().StringVar(&exposeDomain, "with-custom-domain", "", "Custom domain for the exposed service, must be configured to your account (e.g. --with-custom-domain myapp.example.com)") + exposeCmd.Flags().StringVar(&exposeNamePrefix, "with-name-prefix", "", "Prefix for the generated service name (e.g. --with-name-prefix my-app)") + exposeCmd.Flags().StringVar(&exposeProtocol, "protocol", "http", "Protocol to use, http/https is supported (e.g. --protocol http)") +} + +func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) { + port, err := strconv.ParseUint(portStr, 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid port number: %s", portStr) + } + if port == 0 || port > 65535 { + return 0, fmt.Errorf("invalid port number: must be between 1 and 65535") + } + + if !isProtocolValid(exposeProtocol) { + return 0, fmt.Errorf("unsupported protocol %q: only 'http' or 'https' are supported", exposeProtocol) + } + + if exposePin != "" && !pinRegexp.MatchString(exposePin) { + return 0, fmt.Errorf("invalid pin: must be exactly 6 digits") + } + + if cmd.Flags().Changed("with-password") && exposePassword == "" { + return 0, fmt.Errorf("password cannot be empty") + } + + if cmd.Flags().Changed("with-user-groups") && len(exposeUserGroups) == 0 { + return 0, fmt.Errorf("user groups cannot be empty") + } + + return port, nil +} + +func isProtocolValid(exposeProtocol string) bool { + return strings.ToLower(exposeProtocol) == "http" || strings.ToLower(exposeProtocol) == "https" +} + +func exposeFn(cmd *cobra.Command, args []string) error { + SetFlagsFromEnvVars(rootCmd) + + if err := util.InitLog(logLevel, util.LogConsole); err != nil { + log.Errorf("failed initializing log %v", err) + return err + } + + cmd.Root().SilenceUsage = false + + port, err := validateExposeFlags(cmd, args[0]) + if err != nil { + return err + } + + cmd.Root().SilenceUsage = true + + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + cancel() + }() + + conn, err := DialClientGRPCServer(ctx, daemonAddr) + if err != nil { + return fmt.Errorf("connect to daemon: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.Debugf("failed to close daemon connection: %v", err) + } + }() + + client := proto.NewDaemonServiceClient(conn) + + protocol, err := toExposeProtocol(exposeProtocol) + if err != nil { + return err + } + + stream, err := client.ExposeService(ctx, &proto.ExposeServiceRequest{ + Port: uint32(port), + Protocol: protocol, + Pin: exposePin, + Password: exposePassword, + UserGroups: exposeUserGroups, + Domain: exposeDomain, + NamePrefix: exposeNamePrefix, + }) + if err != nil { + return fmt.Errorf("expose service: %w", err) + } + + if err := handleExposeReady(cmd, stream, port); err != nil { + return err + } + + return waitForExposeEvents(cmd, ctx, stream) +} + +func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) { + switch strings.ToLower(exposeProtocol) { + case "http": + return proto.ExposeProtocol_EXPOSE_HTTP, nil + case "https": + return proto.ExposeProtocol_EXPOSE_HTTPS, nil + default: + return 0, fmt.Errorf("unsupported protocol %q: only 'http' or 'https' are supported", exposeProtocol) + } +} + +func handleExposeReady(cmd *cobra.Command, stream proto.DaemonService_ExposeServiceClient, port uint64) error { + event, err := stream.Recv() + if err != nil { + return fmt.Errorf("receive expose event: %w", err) + } + + switch e := event.Event.(type) { + case *proto.ExposeServiceEvent_Ready: + cmd.Println("Service exposed successfully!") + cmd.Printf(" Name: %s\n", e.Ready.ServiceName) + cmd.Printf(" URL: %s\n", e.Ready.ServiceUrl) + cmd.Printf(" Domain: %s\n", e.Ready.Domain) + cmd.Printf(" Protocol: %s\n", exposeProtocol) + cmd.Printf(" Port: %d\n", port) + cmd.Println() + cmd.Println("Press Ctrl+C to stop exposing.") + return nil + default: + return fmt.Errorf("unexpected expose event: %T", event.Event) + } +} + +func waitForExposeEvents(cmd *cobra.Command, ctx context.Context, stream proto.DaemonService_ExposeServiceClient) error { + for { + _, err := stream.Recv() + if err != nil { + if ctx.Err() != nil { + cmd.Println("\nService stopped.") + //nolint:nilerr + return nil + } + if errors.Is(err, io.EOF) { + return fmt.Errorf("connection to daemon closed unexpectedly") + } + return fmt.Errorf("stream error: %w", err) + } + } +} diff --git a/client/cmd/root.go b/client/cmd/root.go index f4f4f6052..961abd54e 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -144,6 +144,7 @@ func init() { rootCmd.AddCommand(forwardingRulesCmd) rootCmd.AddCommand(debugCmd) rootCmd.AddCommand(profileCmd) + rootCmd.AddCommand(exposeCmd) networksCMD.AddCommand(routesListCmd) networksCMD.AddCommand(routesSelectCmd, routesDeselectCmd) diff --git a/client/internal/engine.go b/client/internal/engine.go index 90fc041a9..b0ae841f8 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -36,6 +36,7 @@ import ( "github.com/netbirdio/netbird/client/internal/dns" dnsconfig "github.com/netbirdio/netbird/client/internal/dns/config" "github.com/netbirdio/netbird/client/internal/dnsfwd" + "github.com/netbirdio/netbird/client/internal/expose" "github.com/netbirdio/netbird/client/internal/ingressgw" "github.com/netbirdio/netbird/client/internal/netflow" nftypes "github.com/netbirdio/netbird/client/internal/netflow/types" @@ -220,6 +221,8 @@ type Engine struct { jobExecutor *jobexec.Executor jobExecutorWG sync.WaitGroup + + exposeManager *expose.Manager } // Peer is an instance of the Connection Peer @@ -414,6 +417,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.cancel() } e.ctx, e.cancel = context.WithCancel(e.clientCtx) + e.exposeManager = expose.NewManager(e.ctx, e.mgmClient) wgIface, err := e.newWgIface() if err != nil { @@ -796,7 +800,7 @@ func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdate disabled := autoUpdateSettings.Version == disableAutoUpdate - // Stop and cleanup if disabled + // stop and cleanup if disabled if e.updateManager != nil && disabled { log.Infof("auto-update is disabled, stopping update manager") e.updateManager.Stop() @@ -1818,11 +1822,18 @@ func (e *Engine) GetRouteManager() routemanager.Manager { return e.routeManager } -// GetFirewallManager returns the firewall manager +// GetFirewallManager returns the firewall manager. func (e *Engine) GetFirewallManager() firewallManager.Manager { return e.firewall } +// GetExposeManager returns the expose session manager. +func (e *Engine) GetExposeManager() *expose.Manager { + e.syncMsgMux.Lock() + defer e.syncMsgMux.Unlock() + return e.exposeManager +} + func findIPFromInterfaceName(ifaceName string) (net.IP, error) { iface, err := net.InterfaceByName(ifaceName) if err != nil { diff --git a/client/internal/expose/manager.go b/client/internal/expose/manager.go new file mode 100644 index 000000000..ba6aa6dc9 --- /dev/null +++ b/client/internal/expose/manager.go @@ -0,0 +1,95 @@ +package expose + +import ( + "context" + "time" + + mgm "github.com/netbirdio/netbird/shared/management/client" + log "github.com/sirupsen/logrus" +) + +const renewTimeout = 10 * time.Second + +// Response holds the response from exposing a service. +type Response struct { + ServiceName string + ServiceURL string + Domain string +} + +type Request struct { + NamePrefix string + Domain string + Port uint16 + Protocol int + Pin string + Password string + UserGroups []string +} + +type ManagementClient interface { + CreateExpose(ctx context.Context, req mgm.ExposeRequest) (*mgm.ExposeResponse, error) + RenewExpose(ctx context.Context, domain string) error + StopExpose(ctx context.Context, domain string) error +} + +// Manager handles expose session lifecycle via the management client. +type Manager struct { + mgmClient ManagementClient + ctx context.Context +} + +// NewManager creates a new expose Manager using the given management client. +func NewManager(ctx context.Context, mgmClient ManagementClient) *Manager { + return &Manager{mgmClient: mgmClient, ctx: ctx} +} + +// Expose creates a new expose session via the management server. +func (m *Manager) Expose(ctx context.Context, req Request) (*Response, error) { + log.Infof("exposing service on port %d", req.Port) + resp, err := m.mgmClient.CreateExpose(ctx, toClientExposeRequest(req)) + if err != nil { + return nil, err + } + + log.Infof("expose session created for %s", resp.Domain) + + return fromClientExposeResponse(resp), nil +} + +func (m *Manager) KeepAlive(ctx context.Context, domain string) error { + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + defer m.stop(domain) + + for { + select { + case <-ctx.Done(): + log.Infof("context canceled, stopping keep alive for %s", domain) + + return nil + case <-ticker.C: + if err := m.renew(ctx, domain); err != nil { + log.Errorf("renewing expose session for %s: %v", domain, err) + return err + } + } + } +} + +// renew extends the TTL of an active expose session. +func (m *Manager) renew(ctx context.Context, domain string) error { + renewCtx, cancel := context.WithTimeout(ctx, renewTimeout) + defer cancel() + return m.mgmClient.RenewExpose(renewCtx, domain) +} + +// stop terminates an active expose session. +func (m *Manager) stop(domain string) { + stopCtx, cancel := context.WithTimeout(m.ctx, renewTimeout) + defer cancel() + err := m.mgmClient.StopExpose(stopCtx, domain) + if err != nil { + log.Warnf("Failed stopping expose session for %s: %v", domain, err) + } +} diff --git a/client/internal/expose/manager_test.go b/client/internal/expose/manager_test.go new file mode 100644 index 000000000..87d43cdb0 --- /dev/null +++ b/client/internal/expose/manager_test.go @@ -0,0 +1,95 @@ +package expose + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + daemonProto "github.com/netbirdio/netbird/client/proto" + mgm "github.com/netbirdio/netbird/shared/management/client" +) + +func TestManager_Expose_Success(t *testing.T) { + mock := &mgm.MockClient{ + CreateExposeFunc: func(ctx context.Context, req mgm.ExposeRequest) (*mgm.ExposeResponse, error) { + return &mgm.ExposeResponse{ + ServiceName: "my-service", + ServiceURL: "https://my-service.example.com", + Domain: "my-service.example.com", + }, nil + }, + } + + m := NewManager(context.Background(), mock) + result, err := m.Expose(context.Background(), Request{Port: 8080}) + require.NoError(t, err) + assert.Equal(t, "my-service", result.ServiceName, "service name should match") + assert.Equal(t, "https://my-service.example.com", result.ServiceURL, "service URL should match") + assert.Equal(t, "my-service.example.com", result.Domain, "domain should match") +} + +func TestManager_Expose_Error(t *testing.T) { + mock := &mgm.MockClient{ + CreateExposeFunc: func(ctx context.Context, req mgm.ExposeRequest) (*mgm.ExposeResponse, error) { + return nil, errors.New("permission denied") + }, + } + + m := NewManager(context.Background(), mock) + _, err := m.Expose(context.Background(), Request{Port: 8080}) + require.Error(t, err) + assert.Contains(t, err.Error(), "permission denied", "error should propagate") +} + +func TestManager_Renew_Success(t *testing.T) { + mock := &mgm.MockClient{ + RenewExposeFunc: func(ctx context.Context, domain string) error { + assert.Equal(t, "my-service.example.com", domain, "domain should be passed through") + return nil + }, + } + + m := NewManager(context.Background(), mock) + err := m.renew(context.Background(), "my-service.example.com") + require.NoError(t, err) +} + +func TestManager_Renew_Timeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + mock := &mgm.MockClient{ + RenewExposeFunc: func(ctx context.Context, domain string) error { + return ctx.Err() + }, + } + + m := NewManager(ctx, mock) + err := m.renew(ctx, "my-service.example.com") + require.Error(t, err) +} + +func TestNewRequest(t *testing.T) { + req := &daemonProto.ExposeServiceRequest{ + Port: 8080, + Protocol: daemonProto.ExposeProtocol_EXPOSE_HTTPS, + Pin: "123456", + Password: "secret", + UserGroups: []string{"group1", "group2"}, + Domain: "custom.example.com", + NamePrefix: "my-prefix", + } + + exposeReq := NewRequest(req) + + assert.Equal(t, uint16(8080), exposeReq.Port, "port should match") + assert.Equal(t, int(daemonProto.ExposeProtocol_EXPOSE_HTTPS), exposeReq.Protocol, "protocol should match") + assert.Equal(t, "123456", exposeReq.Pin, "pin should match") + assert.Equal(t, "secret", exposeReq.Password, "password should match") + assert.Equal(t, []string{"group1", "group2"}, exposeReq.UserGroups, "user groups should match") + assert.Equal(t, "custom.example.com", exposeReq.Domain, "domain should match") + assert.Equal(t, "my-prefix", exposeReq.NamePrefix, "name prefix should match") +} diff --git a/client/internal/expose/request.go b/client/internal/expose/request.go new file mode 100644 index 000000000..7e12d0513 --- /dev/null +++ b/client/internal/expose/request.go @@ -0,0 +1,39 @@ +package expose + +import ( + daemonProto "github.com/netbirdio/netbird/client/proto" + mgm "github.com/netbirdio/netbird/shared/management/client" +) + +// NewRequest converts a daemon ExposeServiceRequest to a management ExposeServiceRequest. +func NewRequest(req *daemonProto.ExposeServiceRequest) *Request { + return &Request{ + Port: uint16(req.Port), + Protocol: int(req.Protocol), + Pin: req.Pin, + Password: req.Password, + UserGroups: req.UserGroups, + Domain: req.Domain, + NamePrefix: req.NamePrefix, + } +} + +func toClientExposeRequest(req Request) mgm.ExposeRequest { + return mgm.ExposeRequest{ + NamePrefix: req.NamePrefix, + Domain: req.Domain, + Port: req.Port, + Protocol: req.Protocol, + Pin: req.Pin, + Password: req.Password, + UserGroups: req.UserGroups, + } +} + +func fromClientExposeResponse(response *mgm.ExposeResponse) *Response { + return &Response{ + ServiceName: response.ServiceName, + Domain: response.Domain, + ServiceURL: response.ServiceURL, + } +} diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 1d9d7233c..3879beba3 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.6 -// protoc v6.32.1 +// protoc v6.33.3 // source: daemon.proto package proto @@ -88,6 +88,58 @@ func (LogLevel) EnumDescriptor() ([]byte, []int) { return file_daemon_proto_rawDescGZIP(), []int{0} } +type ExposeProtocol int32 + +const ( + ExposeProtocol_EXPOSE_HTTP ExposeProtocol = 0 + ExposeProtocol_EXPOSE_HTTPS ExposeProtocol = 1 + ExposeProtocol_EXPOSE_TCP ExposeProtocol = 2 + ExposeProtocol_EXPOSE_UDP ExposeProtocol = 3 +) + +// Enum value maps for ExposeProtocol. +var ( + ExposeProtocol_name = map[int32]string{ + 0: "EXPOSE_HTTP", + 1: "EXPOSE_HTTPS", + 2: "EXPOSE_TCP", + 3: "EXPOSE_UDP", + } + ExposeProtocol_value = map[string]int32{ + "EXPOSE_HTTP": 0, + "EXPOSE_HTTPS": 1, + "EXPOSE_TCP": 2, + "EXPOSE_UDP": 3, + } +) + +func (x ExposeProtocol) Enum() *ExposeProtocol { + p := new(ExposeProtocol) + *p = x + return p +} + +func (x ExposeProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExposeProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_daemon_proto_enumTypes[1].Descriptor() +} + +func (ExposeProtocol) Type() protoreflect.EnumType { + return &file_daemon_proto_enumTypes[1] +} + +func (x ExposeProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ExposeProtocol.Descriptor instead. +func (ExposeProtocol) EnumDescriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{1} +} + // avoid collision with loglevel enum type OSLifecycleRequest_CycleType int32 @@ -122,11 +174,11 @@ func (x OSLifecycleRequest_CycleType) String() string { } func (OSLifecycleRequest_CycleType) Descriptor() protoreflect.EnumDescriptor { - return file_daemon_proto_enumTypes[1].Descriptor() + return file_daemon_proto_enumTypes[2].Descriptor() } func (OSLifecycleRequest_CycleType) Type() protoreflect.EnumType { - return &file_daemon_proto_enumTypes[1] + return &file_daemon_proto_enumTypes[2] } func (x OSLifecycleRequest_CycleType) Number() protoreflect.EnumNumber { @@ -174,11 +226,11 @@ func (x SystemEvent_Severity) String() string { } func (SystemEvent_Severity) Descriptor() protoreflect.EnumDescriptor { - return file_daemon_proto_enumTypes[2].Descriptor() + return file_daemon_proto_enumTypes[3].Descriptor() } func (SystemEvent_Severity) Type() protoreflect.EnumType { - return &file_daemon_proto_enumTypes[2] + return &file_daemon_proto_enumTypes[3] } func (x SystemEvent_Severity) Number() protoreflect.EnumNumber { @@ -229,11 +281,11 @@ func (x SystemEvent_Category) String() string { } func (SystemEvent_Category) Descriptor() protoreflect.EnumDescriptor { - return file_daemon_proto_enumTypes[3].Descriptor() + return file_daemon_proto_enumTypes[4].Descriptor() } func (SystemEvent_Category) Type() protoreflect.EnumType { - return &file_daemon_proto_enumTypes[3] + return &file_daemon_proto_enumTypes[4] } func (x SystemEvent_Category) Number() protoreflect.EnumNumber { @@ -5600,6 +5652,224 @@ func (x *InstallerResultResponse) GetErrorMsg() string { return "" } +type ExposeServiceRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + Protocol ExposeProtocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=daemon.ExposeProtocol" json:"protocol,omitempty"` + Pin string `protobuf:"bytes,3,opt,name=pin,proto3" json:"pin,omitempty"` + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + UserGroups []string `protobuf:"bytes,5,rep,name=user_groups,json=userGroups,proto3" json:"user_groups,omitempty"` + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + NamePrefix string `protobuf:"bytes,7,opt,name=name_prefix,json=namePrefix,proto3" json:"name_prefix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExposeServiceRequest) Reset() { + *x = ExposeServiceRequest{} + mi := &file_daemon_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExposeServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExposeServiceRequest) ProtoMessage() {} + +func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[85] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExposeServiceRequest.ProtoReflect.Descriptor instead. +func (*ExposeServiceRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{85} +} + +func (x *ExposeServiceRequest) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *ExposeServiceRequest) GetProtocol() ExposeProtocol { + if x != nil { + return x.Protocol + } + return ExposeProtocol_EXPOSE_HTTP +} + +func (x *ExposeServiceRequest) GetPin() string { + if x != nil { + return x.Pin + } + return "" +} + +func (x *ExposeServiceRequest) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *ExposeServiceRequest) GetUserGroups() []string { + if x != nil { + return x.UserGroups + } + return nil +} + +func (x *ExposeServiceRequest) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ExposeServiceRequest) GetNamePrefix() string { + if x != nil { + return x.NamePrefix + } + return "" +} + +type ExposeServiceEvent struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Event: + // + // *ExposeServiceEvent_Ready + Event isExposeServiceEvent_Event `protobuf_oneof:"event"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExposeServiceEvent) Reset() { + *x = ExposeServiceEvent{} + mi := &file_daemon_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExposeServiceEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExposeServiceEvent) ProtoMessage() {} + +func (x *ExposeServiceEvent) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[86] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExposeServiceEvent.ProtoReflect.Descriptor instead. +func (*ExposeServiceEvent) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{86} +} + +func (x *ExposeServiceEvent) GetEvent() isExposeServiceEvent_Event { + if x != nil { + return x.Event + } + return nil +} + +func (x *ExposeServiceEvent) GetReady() *ExposeServiceReady { + if x != nil { + if x, ok := x.Event.(*ExposeServiceEvent_Ready); ok { + return x.Ready + } + } + return nil +} + +type isExposeServiceEvent_Event interface { + isExposeServiceEvent_Event() +} + +type ExposeServiceEvent_Ready struct { + Ready *ExposeServiceReady `protobuf:"bytes,1,opt,name=ready,proto3,oneof"` +} + +func (*ExposeServiceEvent_Ready) isExposeServiceEvent_Event() {} + +type ExposeServiceReady struct { + state protoimpl.MessageState `protogen:"open.v1"` + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExposeServiceReady) Reset() { + *x = ExposeServiceReady{} + mi := &file_daemon_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExposeServiceReady) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExposeServiceReady) ProtoMessage() {} + +func (x *ExposeServiceReady) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[87] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExposeServiceReady.ProtoReflect.Descriptor instead. +func (*ExposeServiceReady) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{87} +} + +func (x *ExposeServiceReady) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *ExposeServiceReady) GetServiceUrl() string { + if x != nil { + return x.ServiceUrl + } + return "" +} + +func (x *ExposeServiceReady) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + type PortInfo_Range struct { state protoimpl.MessageState `protogen:"open.v1"` Start uint32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` @@ -5610,7 +5880,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} - mi := &file_daemon_proto_msgTypes[86] + mi := &file_daemon_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5622,7 +5892,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[86] + mi := &file_daemon_proto_msgTypes[89] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6149,7 +6419,25 @@ const file_daemon_proto_rawDesc = "" + "\x16InstallerResultRequest\"O\n" + "\x17InstallerResultResponse\x12\x18\n" + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" + - "\berrorMsg\x18\x02 \x01(\tR\berrorMsg*b\n" + + "\berrorMsg\x18\x02 \x01(\tR\berrorMsg\"\xe6\x01\n" + + "\x14ExposeServiceRequest\x12\x12\n" + + "\x04port\x18\x01 \x01(\rR\x04port\x122\n" + + "\bprotocol\x18\x02 \x01(\x0e2\x16.daemon.ExposeProtocolR\bprotocol\x12\x10\n" + + "\x03pin\x18\x03 \x01(\tR\x03pin\x12\x1a\n" + + "\bpassword\x18\x04 \x01(\tR\bpassword\x12\x1f\n" + + "\vuser_groups\x18\x05 \x03(\tR\n" + + "userGroups\x12\x16\n" + + "\x06domain\x18\x06 \x01(\tR\x06domain\x12\x1f\n" + + "\vname_prefix\x18\a \x01(\tR\n" + + "namePrefix\"Q\n" + + "\x12ExposeServiceEvent\x122\n" + + "\x05ready\x18\x01 \x01(\v2\x1a.daemon.ExposeServiceReadyH\x00R\x05readyB\a\n" + + "\x05event\"p\n" + + "\x12ExposeServiceReady\x12!\n" + + "\fservice_name\x18\x01 \x01(\tR\vserviceName\x12\x1f\n" + + "\vservice_url\x18\x02 \x01(\tR\n" + + "serviceUrl\x12\x16\n" + + "\x06domain\x18\x03 \x01(\tR\x06domain*b\n" + "\bLogLevel\x12\v\n" + "\aUNKNOWN\x10\x00\x12\t\n" + "\x05PANIC\x10\x01\x12\t\n" + @@ -6158,7 +6446,14 @@ const file_daemon_proto_rawDesc = "" + "\x04WARN\x10\x04\x12\b\n" + "\x04INFO\x10\x05\x12\t\n" + "\x05DEBUG\x10\x06\x12\t\n" + - "\x05TRACE\x10\a2\xdd\x14\n" + + "\x05TRACE\x10\a*S\n" + + "\x0eExposeProtocol\x12\x0f\n" + + "\vEXPOSE_HTTP\x10\x00\x12\x10\n" + + "\fEXPOSE_HTTPS\x10\x01\x12\x0e\n" + + "\n" + + "EXPOSE_TCP\x10\x02\x12\x0e\n" + + "\n" + + "EXPOSE_UDP\x10\x032\xac\x15\n" + "\rDaemonService\x126\n" + "\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" + "\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" + @@ -6197,7 +6492,8 @@ const file_daemon_proto_rawDesc = "" + "\x0fStartCPUProfile\x12\x1e.daemon.StartCPUProfileRequest\x1a\x1f.daemon.StartCPUProfileResponse\"\x00\x12Q\n" + "\x0eStopCPUProfile\x12\x1d.daemon.StopCPUProfileRequest\x1a\x1e.daemon.StopCPUProfileResponse\"\x00\x12N\n" + "\x11NotifyOSLifecycle\x12\x1a.daemon.OSLifecycleRequest\x1a\x1b.daemon.OSLifecycleResponse\"\x00\x12W\n" + - "\x12GetInstallerResult\x12\x1e.daemon.InstallerResultRequest\x1a\x1f.daemon.InstallerResultResponse\"\x00B\bZ\x06/protob\x06proto3" + "\x12GetInstallerResult\x12\x1e.daemon.InstallerResultRequest\x1a\x1f.daemon.InstallerResultResponse\"\x00\x12M\n" + + "\rExposeService\x12\x1c.daemon.ExposeServiceRequest\x1a\x1a.daemon.ExposeServiceEvent\"\x000\x01B\bZ\x06/protob\x06proto3" var ( file_daemon_proto_rawDescOnce sync.Once @@ -6211,214 +6507,222 @@ func file_daemon_proto_rawDescGZIP() []byte { return file_daemon_proto_rawDescData } -var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 88) +var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 91) var file_daemon_proto_goTypes = []any{ (LogLevel)(0), // 0: daemon.LogLevel - (OSLifecycleRequest_CycleType)(0), // 1: daemon.OSLifecycleRequest.CycleType - (SystemEvent_Severity)(0), // 2: daemon.SystemEvent.Severity - (SystemEvent_Category)(0), // 3: daemon.SystemEvent.Category - (*EmptyRequest)(nil), // 4: daemon.EmptyRequest - (*OSLifecycleRequest)(nil), // 5: daemon.OSLifecycleRequest - (*OSLifecycleResponse)(nil), // 6: daemon.OSLifecycleResponse - (*LoginRequest)(nil), // 7: daemon.LoginRequest - (*LoginResponse)(nil), // 8: daemon.LoginResponse - (*WaitSSOLoginRequest)(nil), // 9: daemon.WaitSSOLoginRequest - (*WaitSSOLoginResponse)(nil), // 10: daemon.WaitSSOLoginResponse - (*UpRequest)(nil), // 11: daemon.UpRequest - (*UpResponse)(nil), // 12: daemon.UpResponse - (*StatusRequest)(nil), // 13: daemon.StatusRequest - (*StatusResponse)(nil), // 14: daemon.StatusResponse - (*DownRequest)(nil), // 15: daemon.DownRequest - (*DownResponse)(nil), // 16: daemon.DownResponse - (*GetConfigRequest)(nil), // 17: daemon.GetConfigRequest - (*GetConfigResponse)(nil), // 18: daemon.GetConfigResponse - (*PeerState)(nil), // 19: daemon.PeerState - (*LocalPeerState)(nil), // 20: daemon.LocalPeerState - (*SignalState)(nil), // 21: daemon.SignalState - (*ManagementState)(nil), // 22: daemon.ManagementState - (*RelayState)(nil), // 23: daemon.RelayState - (*NSGroupState)(nil), // 24: daemon.NSGroupState - (*SSHSessionInfo)(nil), // 25: daemon.SSHSessionInfo - (*SSHServerState)(nil), // 26: daemon.SSHServerState - (*FullStatus)(nil), // 27: daemon.FullStatus - (*ListNetworksRequest)(nil), // 28: daemon.ListNetworksRequest - (*ListNetworksResponse)(nil), // 29: daemon.ListNetworksResponse - (*SelectNetworksRequest)(nil), // 30: daemon.SelectNetworksRequest - (*SelectNetworksResponse)(nil), // 31: daemon.SelectNetworksResponse - (*IPList)(nil), // 32: daemon.IPList - (*Network)(nil), // 33: daemon.Network - (*PortInfo)(nil), // 34: daemon.PortInfo - (*ForwardingRule)(nil), // 35: daemon.ForwardingRule - (*ForwardingRulesResponse)(nil), // 36: daemon.ForwardingRulesResponse - (*DebugBundleRequest)(nil), // 37: daemon.DebugBundleRequest - (*DebugBundleResponse)(nil), // 38: daemon.DebugBundleResponse - (*GetLogLevelRequest)(nil), // 39: daemon.GetLogLevelRequest - (*GetLogLevelResponse)(nil), // 40: daemon.GetLogLevelResponse - (*SetLogLevelRequest)(nil), // 41: daemon.SetLogLevelRequest - (*SetLogLevelResponse)(nil), // 42: daemon.SetLogLevelResponse - (*State)(nil), // 43: daemon.State - (*ListStatesRequest)(nil), // 44: daemon.ListStatesRequest - (*ListStatesResponse)(nil), // 45: daemon.ListStatesResponse - (*CleanStateRequest)(nil), // 46: daemon.CleanStateRequest - (*CleanStateResponse)(nil), // 47: daemon.CleanStateResponse - (*DeleteStateRequest)(nil), // 48: daemon.DeleteStateRequest - (*DeleteStateResponse)(nil), // 49: daemon.DeleteStateResponse - (*SetSyncResponsePersistenceRequest)(nil), // 50: daemon.SetSyncResponsePersistenceRequest - (*SetSyncResponsePersistenceResponse)(nil), // 51: daemon.SetSyncResponsePersistenceResponse - (*TCPFlags)(nil), // 52: daemon.TCPFlags - (*TracePacketRequest)(nil), // 53: daemon.TracePacketRequest - (*TraceStage)(nil), // 54: daemon.TraceStage - (*TracePacketResponse)(nil), // 55: daemon.TracePacketResponse - (*SubscribeRequest)(nil), // 56: daemon.SubscribeRequest - (*SystemEvent)(nil), // 57: daemon.SystemEvent - (*GetEventsRequest)(nil), // 58: daemon.GetEventsRequest - (*GetEventsResponse)(nil), // 59: daemon.GetEventsResponse - (*SwitchProfileRequest)(nil), // 60: daemon.SwitchProfileRequest - (*SwitchProfileResponse)(nil), // 61: daemon.SwitchProfileResponse - (*SetConfigRequest)(nil), // 62: daemon.SetConfigRequest - (*SetConfigResponse)(nil), // 63: daemon.SetConfigResponse - (*AddProfileRequest)(nil), // 64: daemon.AddProfileRequest - (*AddProfileResponse)(nil), // 65: daemon.AddProfileResponse - (*RemoveProfileRequest)(nil), // 66: daemon.RemoveProfileRequest - (*RemoveProfileResponse)(nil), // 67: daemon.RemoveProfileResponse - (*ListProfilesRequest)(nil), // 68: daemon.ListProfilesRequest - (*ListProfilesResponse)(nil), // 69: daemon.ListProfilesResponse - (*Profile)(nil), // 70: daemon.Profile - (*GetActiveProfileRequest)(nil), // 71: daemon.GetActiveProfileRequest - (*GetActiveProfileResponse)(nil), // 72: daemon.GetActiveProfileResponse - (*LogoutRequest)(nil), // 73: daemon.LogoutRequest - (*LogoutResponse)(nil), // 74: daemon.LogoutResponse - (*GetFeaturesRequest)(nil), // 75: daemon.GetFeaturesRequest - (*GetFeaturesResponse)(nil), // 76: daemon.GetFeaturesResponse - (*GetPeerSSHHostKeyRequest)(nil), // 77: daemon.GetPeerSSHHostKeyRequest - (*GetPeerSSHHostKeyResponse)(nil), // 78: daemon.GetPeerSSHHostKeyResponse - (*RequestJWTAuthRequest)(nil), // 79: daemon.RequestJWTAuthRequest - (*RequestJWTAuthResponse)(nil), // 80: daemon.RequestJWTAuthResponse - (*WaitJWTTokenRequest)(nil), // 81: daemon.WaitJWTTokenRequest - (*WaitJWTTokenResponse)(nil), // 82: daemon.WaitJWTTokenResponse - (*StartCPUProfileRequest)(nil), // 83: daemon.StartCPUProfileRequest - (*StartCPUProfileResponse)(nil), // 84: daemon.StartCPUProfileResponse - (*StopCPUProfileRequest)(nil), // 85: daemon.StopCPUProfileRequest - (*StopCPUProfileResponse)(nil), // 86: daemon.StopCPUProfileResponse - (*InstallerResultRequest)(nil), // 87: daemon.InstallerResultRequest - (*InstallerResultResponse)(nil), // 88: daemon.InstallerResultResponse - nil, // 89: daemon.Network.ResolvedIPsEntry - (*PortInfo_Range)(nil), // 90: daemon.PortInfo.Range - nil, // 91: daemon.SystemEvent.MetadataEntry - (*durationpb.Duration)(nil), // 92: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 93: google.protobuf.Timestamp + (ExposeProtocol)(0), // 1: daemon.ExposeProtocol + (OSLifecycleRequest_CycleType)(0), // 2: daemon.OSLifecycleRequest.CycleType + (SystemEvent_Severity)(0), // 3: daemon.SystemEvent.Severity + (SystemEvent_Category)(0), // 4: daemon.SystemEvent.Category + (*EmptyRequest)(nil), // 5: daemon.EmptyRequest + (*OSLifecycleRequest)(nil), // 6: daemon.OSLifecycleRequest + (*OSLifecycleResponse)(nil), // 7: daemon.OSLifecycleResponse + (*LoginRequest)(nil), // 8: daemon.LoginRequest + (*LoginResponse)(nil), // 9: daemon.LoginResponse + (*WaitSSOLoginRequest)(nil), // 10: daemon.WaitSSOLoginRequest + (*WaitSSOLoginResponse)(nil), // 11: daemon.WaitSSOLoginResponse + (*UpRequest)(nil), // 12: daemon.UpRequest + (*UpResponse)(nil), // 13: daemon.UpResponse + (*StatusRequest)(nil), // 14: daemon.StatusRequest + (*StatusResponse)(nil), // 15: daemon.StatusResponse + (*DownRequest)(nil), // 16: daemon.DownRequest + (*DownResponse)(nil), // 17: daemon.DownResponse + (*GetConfigRequest)(nil), // 18: daemon.GetConfigRequest + (*GetConfigResponse)(nil), // 19: daemon.GetConfigResponse + (*PeerState)(nil), // 20: daemon.PeerState + (*LocalPeerState)(nil), // 21: daemon.LocalPeerState + (*SignalState)(nil), // 22: daemon.SignalState + (*ManagementState)(nil), // 23: daemon.ManagementState + (*RelayState)(nil), // 24: daemon.RelayState + (*NSGroupState)(nil), // 25: daemon.NSGroupState + (*SSHSessionInfo)(nil), // 26: daemon.SSHSessionInfo + (*SSHServerState)(nil), // 27: daemon.SSHServerState + (*FullStatus)(nil), // 28: daemon.FullStatus + (*ListNetworksRequest)(nil), // 29: daemon.ListNetworksRequest + (*ListNetworksResponse)(nil), // 30: daemon.ListNetworksResponse + (*SelectNetworksRequest)(nil), // 31: daemon.SelectNetworksRequest + (*SelectNetworksResponse)(nil), // 32: daemon.SelectNetworksResponse + (*IPList)(nil), // 33: daemon.IPList + (*Network)(nil), // 34: daemon.Network + (*PortInfo)(nil), // 35: daemon.PortInfo + (*ForwardingRule)(nil), // 36: daemon.ForwardingRule + (*ForwardingRulesResponse)(nil), // 37: daemon.ForwardingRulesResponse + (*DebugBundleRequest)(nil), // 38: daemon.DebugBundleRequest + (*DebugBundleResponse)(nil), // 39: daemon.DebugBundleResponse + (*GetLogLevelRequest)(nil), // 40: daemon.GetLogLevelRequest + (*GetLogLevelResponse)(nil), // 41: daemon.GetLogLevelResponse + (*SetLogLevelRequest)(nil), // 42: daemon.SetLogLevelRequest + (*SetLogLevelResponse)(nil), // 43: daemon.SetLogLevelResponse + (*State)(nil), // 44: daemon.State + (*ListStatesRequest)(nil), // 45: daemon.ListStatesRequest + (*ListStatesResponse)(nil), // 46: daemon.ListStatesResponse + (*CleanStateRequest)(nil), // 47: daemon.CleanStateRequest + (*CleanStateResponse)(nil), // 48: daemon.CleanStateResponse + (*DeleteStateRequest)(nil), // 49: daemon.DeleteStateRequest + (*DeleteStateResponse)(nil), // 50: daemon.DeleteStateResponse + (*SetSyncResponsePersistenceRequest)(nil), // 51: daemon.SetSyncResponsePersistenceRequest + (*SetSyncResponsePersistenceResponse)(nil), // 52: daemon.SetSyncResponsePersistenceResponse + (*TCPFlags)(nil), // 53: daemon.TCPFlags + (*TracePacketRequest)(nil), // 54: daemon.TracePacketRequest + (*TraceStage)(nil), // 55: daemon.TraceStage + (*TracePacketResponse)(nil), // 56: daemon.TracePacketResponse + (*SubscribeRequest)(nil), // 57: daemon.SubscribeRequest + (*SystemEvent)(nil), // 58: daemon.SystemEvent + (*GetEventsRequest)(nil), // 59: daemon.GetEventsRequest + (*GetEventsResponse)(nil), // 60: daemon.GetEventsResponse + (*SwitchProfileRequest)(nil), // 61: daemon.SwitchProfileRequest + (*SwitchProfileResponse)(nil), // 62: daemon.SwitchProfileResponse + (*SetConfigRequest)(nil), // 63: daemon.SetConfigRequest + (*SetConfigResponse)(nil), // 64: daemon.SetConfigResponse + (*AddProfileRequest)(nil), // 65: daemon.AddProfileRequest + (*AddProfileResponse)(nil), // 66: daemon.AddProfileResponse + (*RemoveProfileRequest)(nil), // 67: daemon.RemoveProfileRequest + (*RemoveProfileResponse)(nil), // 68: daemon.RemoveProfileResponse + (*ListProfilesRequest)(nil), // 69: daemon.ListProfilesRequest + (*ListProfilesResponse)(nil), // 70: daemon.ListProfilesResponse + (*Profile)(nil), // 71: daemon.Profile + (*GetActiveProfileRequest)(nil), // 72: daemon.GetActiveProfileRequest + (*GetActiveProfileResponse)(nil), // 73: daemon.GetActiveProfileResponse + (*LogoutRequest)(nil), // 74: daemon.LogoutRequest + (*LogoutResponse)(nil), // 75: daemon.LogoutResponse + (*GetFeaturesRequest)(nil), // 76: daemon.GetFeaturesRequest + (*GetFeaturesResponse)(nil), // 77: daemon.GetFeaturesResponse + (*GetPeerSSHHostKeyRequest)(nil), // 78: daemon.GetPeerSSHHostKeyRequest + (*GetPeerSSHHostKeyResponse)(nil), // 79: daemon.GetPeerSSHHostKeyResponse + (*RequestJWTAuthRequest)(nil), // 80: daemon.RequestJWTAuthRequest + (*RequestJWTAuthResponse)(nil), // 81: daemon.RequestJWTAuthResponse + (*WaitJWTTokenRequest)(nil), // 82: daemon.WaitJWTTokenRequest + (*WaitJWTTokenResponse)(nil), // 83: daemon.WaitJWTTokenResponse + (*StartCPUProfileRequest)(nil), // 84: daemon.StartCPUProfileRequest + (*StartCPUProfileResponse)(nil), // 85: daemon.StartCPUProfileResponse + (*StopCPUProfileRequest)(nil), // 86: daemon.StopCPUProfileRequest + (*StopCPUProfileResponse)(nil), // 87: daemon.StopCPUProfileResponse + (*InstallerResultRequest)(nil), // 88: daemon.InstallerResultRequest + (*InstallerResultResponse)(nil), // 89: daemon.InstallerResultResponse + (*ExposeServiceRequest)(nil), // 90: daemon.ExposeServiceRequest + (*ExposeServiceEvent)(nil), // 91: daemon.ExposeServiceEvent + (*ExposeServiceReady)(nil), // 92: daemon.ExposeServiceReady + nil, // 93: daemon.Network.ResolvedIPsEntry + (*PortInfo_Range)(nil), // 94: daemon.PortInfo.Range + nil, // 95: daemon.SystemEvent.MetadataEntry + (*durationpb.Duration)(nil), // 96: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 97: google.protobuf.Timestamp } var file_daemon_proto_depIdxs = []int32{ - 1, // 0: daemon.OSLifecycleRequest.type:type_name -> daemon.OSLifecycleRequest.CycleType - 92, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 27, // 2: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus - 93, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp - 93, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp - 92, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration - 25, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo - 22, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState - 21, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState - 20, // 9: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState - 19, // 10: daemon.FullStatus.peers:type_name -> daemon.PeerState - 23, // 11: daemon.FullStatus.relays:type_name -> daemon.RelayState - 24, // 12: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState - 57, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent - 26, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState - 33, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 89, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 90, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range - 34, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo - 34, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo - 35, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule + 2, // 0: daemon.OSLifecycleRequest.type:type_name -> daemon.OSLifecycleRequest.CycleType + 96, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 28, // 2: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus + 97, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp + 97, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp + 96, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration + 26, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo + 23, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState + 22, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState + 21, // 9: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState + 20, // 10: daemon.FullStatus.peers:type_name -> daemon.PeerState + 24, // 11: daemon.FullStatus.relays:type_name -> daemon.RelayState + 25, // 12: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState + 58, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent + 27, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState + 34, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network + 93, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 94, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 35, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo + 35, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo + 36, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule 0, // 21: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel 0, // 22: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel - 43, // 23: daemon.ListStatesResponse.states:type_name -> daemon.State - 52, // 24: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags - 54, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage - 2, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity - 3, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 93, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 91, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry - 57, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 92, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 70, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile - 32, // 33: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList - 7, // 34: daemon.DaemonService.Login:input_type -> daemon.LoginRequest - 9, // 35: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest - 11, // 36: daemon.DaemonService.Up:input_type -> daemon.UpRequest - 13, // 37: daemon.DaemonService.Status:input_type -> daemon.StatusRequest - 15, // 38: daemon.DaemonService.Down:input_type -> daemon.DownRequest - 17, // 39: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest - 28, // 40: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest - 30, // 41: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest - 30, // 42: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest - 4, // 43: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest - 37, // 44: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest - 39, // 45: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest - 41, // 46: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest - 44, // 47: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest - 46, // 48: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest - 48, // 49: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest - 50, // 50: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest - 53, // 51: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest - 56, // 52: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest - 58, // 53: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest - 60, // 54: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest - 62, // 55: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest - 64, // 56: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest - 66, // 57: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest - 68, // 58: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest - 71, // 59: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest - 73, // 60: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest - 75, // 61: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest - 77, // 62: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest - 79, // 63: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest - 81, // 64: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 83, // 65: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest - 85, // 66: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest - 5, // 67: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest - 87, // 68: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 8, // 69: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 10, // 70: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 12, // 71: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 14, // 72: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 16, // 73: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 18, // 74: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 29, // 75: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 31, // 76: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 31, // 77: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 36, // 78: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 38, // 79: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 40, // 80: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 42, // 81: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 45, // 82: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 47, // 83: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 49, // 84: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 51, // 85: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 55, // 86: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 57, // 87: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 59, // 88: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 61, // 89: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 63, // 90: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 65, // 91: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 67, // 92: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 69, // 93: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 72, // 94: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 74, // 95: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 76, // 96: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 78, // 97: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 80, // 98: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 82, // 99: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 84, // 100: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse - 86, // 101: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse - 6, // 102: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse - 88, // 103: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 69, // [69:104] is the sub-list for method output_type - 34, // [34:69] is the sub-list for method input_type - 34, // [34:34] is the sub-list for extension type_name - 34, // [34:34] is the sub-list for extension extendee - 0, // [0:34] is the sub-list for field type_name + 44, // 23: daemon.ListStatesResponse.states:type_name -> daemon.State + 53, // 24: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags + 55, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage + 3, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity + 4, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category + 97, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 95, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 58, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent + 96, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 71, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile + 1, // 33: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol + 92, // 34: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady + 33, // 35: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList + 8, // 36: daemon.DaemonService.Login:input_type -> daemon.LoginRequest + 10, // 37: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest + 12, // 38: daemon.DaemonService.Up:input_type -> daemon.UpRequest + 14, // 39: daemon.DaemonService.Status:input_type -> daemon.StatusRequest + 16, // 40: daemon.DaemonService.Down:input_type -> daemon.DownRequest + 18, // 41: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest + 29, // 42: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest + 31, // 43: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest + 31, // 44: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest + 5, // 45: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest + 38, // 46: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest + 40, // 47: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest + 42, // 48: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest + 45, // 49: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest + 47, // 50: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest + 49, // 51: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest + 51, // 52: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest + 54, // 53: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest + 57, // 54: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest + 59, // 55: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest + 61, // 56: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest + 63, // 57: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest + 65, // 58: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest + 67, // 59: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest + 69, // 60: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest + 72, // 61: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest + 74, // 62: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest + 76, // 63: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest + 78, // 64: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest + 80, // 65: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest + 82, // 66: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest + 84, // 67: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 86, // 68: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 6, // 69: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest + 88, // 70: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 90, // 71: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest + 9, // 72: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 11, // 73: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 13, // 74: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 15, // 75: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 17, // 76: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 19, // 77: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 30, // 78: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 32, // 79: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 32, // 80: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 37, // 81: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 39, // 82: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 41, // 83: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 43, // 84: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 46, // 85: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 48, // 86: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 50, // 87: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 52, // 88: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 56, // 89: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 58, // 90: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 60, // 91: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 62, // 92: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 64, // 93: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 66, // 94: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 68, // 95: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 70, // 96: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 73, // 97: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 75, // 98: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 77, // 99: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 79, // 100: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 81, // 101: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 83, // 102: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 85, // 103: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 87, // 104: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 7, // 105: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse + 89, // 106: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 91, // 107: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent + 72, // [72:108] is the sub-list for method output_type + 36, // [36:72] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_daemon_proto_init() } @@ -6439,13 +6743,16 @@ func file_daemon_proto_init() { file_daemon_proto_msgTypes[58].OneofWrappers = []any{} file_daemon_proto_msgTypes[69].OneofWrappers = []any{} file_daemon_proto_msgTypes[75].OneofWrappers = []any{} + file_daemon_proto_msgTypes[86].OneofWrappers = []any{ + (*ExposeServiceEvent_Ready)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_proto_rawDesc), len(file_daemon_proto_rawDesc)), - NumEnums: 4, - NumMessages: 88, + NumEnums: 5, + NumMessages: 91, NumExtensions: 0, NumServices: 1, }, diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 68b9a9348..4dc41d401 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -103,6 +103,9 @@ service DaemonService { rpc NotifyOSLifecycle(OSLifecycleRequest) returns(OSLifecycleResponse) {} rpc GetInstallerResult(InstallerResultRequest) returns (InstallerResultResponse) {} + + // ExposeService exposes a local port via the NetBird reverse proxy + rpc ExposeService(ExposeServiceRequest) returns (stream ExposeServiceEvent) {} } @@ -801,3 +804,32 @@ message InstallerResultResponse { bool success = 1; string errorMsg = 2; } + +enum ExposeProtocol { + EXPOSE_HTTP = 0; + EXPOSE_HTTPS = 1; + EXPOSE_TCP = 2; + EXPOSE_UDP = 3; +} + +message ExposeServiceRequest { + uint32 port = 1; + ExposeProtocol protocol = 2; + string pin = 3; + string password = 4; + repeated string user_groups = 5; + string domain = 6; + string name_prefix = 7; +} + +message ExposeServiceEvent { + oneof event { + ExposeServiceReady ready = 1; + } +} + +message ExposeServiceReady { + string service_name = 1; + string service_url = 2; + string domain = 3; +} diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index ea9b4df05..4154dce59 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -76,6 +76,8 @@ type DaemonServiceClient interface { StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) NotifyOSLifecycle(ctx context.Context, in *OSLifecycleRequest, opts ...grpc.CallOption) (*OSLifecycleResponse, error) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) + // ExposeService exposes a local port via the NetBird reverse proxy + ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) } type daemonServiceClient struct { @@ -424,6 +426,38 @@ func (c *daemonServiceClient) GetInstallerResult(ctx context.Context, in *Instal return out, nil } +func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], "/daemon.DaemonService/ExposeService", opts...) + if err != nil { + return nil, err + } + x := &daemonServiceExposeServiceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DaemonService_ExposeServiceClient interface { + Recv() (*ExposeServiceEvent, error) + grpc.ClientStream +} + +type daemonServiceExposeServiceClient struct { + grpc.ClientStream +} + +func (x *daemonServiceExposeServiceClient) Recv() (*ExposeServiceEvent, error) { + m := new(ExposeServiceEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer // for forward compatibility @@ -486,6 +520,8 @@ type DaemonServiceServer interface { StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) NotifyOSLifecycle(context.Context, *OSLifecycleRequest) (*OSLifecycleResponse, error) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) + // ExposeService exposes a local port via the NetBird reverse proxy + ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error mustEmbedUnimplementedDaemonServiceServer() } @@ -598,6 +634,9 @@ func (UnimplementedDaemonServiceServer) NotifyOSLifecycle(context.Context, *OSLi func (UnimplementedDaemonServiceServer) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetInstallerResult not implemented") } +func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error { + return status.Errorf(codes.Unimplemented, "method ExposeService not implemented") +} func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} // UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. @@ -1244,6 +1283,27 @@ func _DaemonService_GetInstallerResult_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _DaemonService_ExposeService_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ExposeServiceRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DaemonServiceServer).ExposeService(m, &daemonServiceExposeServiceServer{stream}) +} + +type DaemonService_ExposeServiceServer interface { + Send(*ExposeServiceEvent) error + grpc.ServerStream +} + +type daemonServiceExposeServiceServer struct { + grpc.ServerStream +} + +func (x *daemonServiceExposeServiceServer) Send(m *ExposeServiceEvent) error { + return x.ServerStream.SendMsg(m) +} + // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1394,6 +1454,11 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ Handler: _DaemonService_SubscribeEvents_Handler, ServerStreams: true, }, + { + StreamName: "ExposeService", + Handler: _DaemonService_ExposeService_Handler, + ServerStreams: true, + }, }, Metadata: "daemon.proto", } diff --git a/client/server/server.go b/client/server/server.go index 8cd057852..0466630c5 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -21,6 +21,7 @@ import ( gstatus "google.golang.org/grpc/status" "github.com/netbirdio/netbird/client/internal/auth" + "github.com/netbirdio/netbird/client/internal/expose" "github.com/netbirdio/netbird/client/internal/profilemanager" sleephandler "github.com/netbirdio/netbird/client/internal/sleep/handler" "github.com/netbirdio/netbird/client/system" @@ -1316,6 +1317,60 @@ func (s *Server) WaitJWTToken( }, nil } +// ExposeService exposes a local port via the NetBird reverse proxy. +func (s *Server) ExposeService(req *proto.ExposeServiceRequest, srv proto.DaemonService_ExposeServiceServer) error { + s.mutex.Lock() + if !s.clientRunning { + s.mutex.Unlock() + return gstatus.Errorf(codes.FailedPrecondition, "client is not running, run 'netbird up' first") + } + connectClient := s.connectClient + s.mutex.Unlock() + + if connectClient == nil { + return gstatus.Errorf(codes.FailedPrecondition, "client not initialized") + } + + engine := connectClient.Engine() + if engine == nil { + return gstatus.Errorf(codes.FailedPrecondition, "engine not initialized") + } + + mgr := engine.GetExposeManager() + if mgr == nil { + return gstatus.Errorf(codes.Internal, "expose manager not available") + } + + ctx := srv.Context() + + exposeCtx, exposeCancel := context.WithTimeout(ctx, 30*time.Second) + defer exposeCancel() + + mgmReq := expose.NewRequest(req) + result, err := mgr.Expose(exposeCtx, *mgmReq) + if err != nil { + return err + } + + if err := srv.Send(&proto.ExposeServiceEvent{ + Event: &proto.ExposeServiceEvent_Ready{ + Ready: &proto.ExposeServiceReady{ + ServiceName: result.ServiceName, + ServiceUrl: result.ServiceURL, + Domain: result.Domain, + }, + }, + }); err != nil { + return err + } + + err = mgr.KeepAlive(ctx, result.Domain) + if err != nil { + return err + } + return nil +} + func isUnixRunningDesktop() bool { if runtime.GOOS != "linux" && runtime.GOOS != "freebsd" { return false diff --git a/management/internals/modules/reverseproxy/domain/interface.go b/management/internals/modules/reverseproxy/domain/interface.go index d40e9b637..a4bba5841 100644 --- a/management/internals/modules/reverseproxy/domain/interface.go +++ b/management/internals/modules/reverseproxy/domain/interface.go @@ -9,4 +9,5 @@ type Manager interface { CreateDomain(ctx context.Context, accountID, userID, domainName, targetCluster string) (*Domain, error) DeleteDomain(ctx context.Context, accountID, userID, domainID string) error ValidateDomain(ctx context.Context, accountID, userID, domainID string) + GetClusterDomains() []string } diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index 1125f428f..55ca24ac2 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -221,6 +221,10 @@ func (m Manager) ValidateDomain(ctx context.Context, accountID, userID, domainID } } +func (m Manager) GetClusterDomains() []string { + return m.proxyURLAllowList() +} + // proxyURLAllowList retrieves a list of currently connected proxies and // their URLs func (m Manager) proxyURLAllowList() []string { diff --git a/management/internals/modules/reverseproxy/interface.go b/management/internals/modules/reverseproxy/interface.go index 8a81ee307..95402bdf7 100644 --- a/management/internals/modules/reverseproxy/interface.go +++ b/management/internals/modules/reverseproxy/interface.go @@ -21,4 +21,8 @@ type Manager interface { GetServiceByID(ctx context.Context, accountID, serviceID string) (*Service, error) GetAccountServices(ctx context.Context, accountID string) ([]*Service, error) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) + ValidateExposePermission(ctx context.Context, accountID, peerID string) error + CreateServiceFromPeer(ctx context.Context, accountID, peerID string, service *Service) (*Service, error) + DeleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error + ExpireServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error } diff --git a/management/internals/modules/reverseproxy/interface_mock.go b/management/internals/modules/reverseproxy/interface_mock.go index 6533d90bf..19a4ecfe5 100644 --- a/management/internals/modules/reverseproxy/interface_mock.go +++ b/management/internals/modules/reverseproxy/interface_mock.go @@ -63,6 +63,21 @@ func (mr *MockManagerMockRecorder) DeleteAllServices(ctx, accountID, userID inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllServices", reflect.TypeOf((*MockManager)(nil).DeleteAllServices), ctx, accountID, userID) } +// CreateServiceFromPeer mocks base method. +func (m *MockManager) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, service *Service) (*Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateServiceFromPeer", ctx, accountID, peerID, service) + ret0, _ := ret[0].(*Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateServiceFromPeer indicates an expected call of CreateServiceFromPeer. +func (mr *MockManagerMockRecorder) CreateServiceFromPeer(ctx, accountID, peerID, service interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceFromPeer", reflect.TypeOf((*MockManager)(nil).CreateServiceFromPeer), ctx, accountID, peerID, service) +} + // DeleteService mocks base method. func (m *MockManager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { m.ctrl.T.Helper() @@ -77,6 +92,48 @@ func (mr *MockManagerMockRecorder) DeleteService(ctx, accountID, userID, service return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockManager)(nil).DeleteService), ctx, accountID, userID, serviceID) } +// DeleteServiceFromPeer mocks base method. +func (m *MockManager) DeleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteServiceFromPeer", ctx, accountID, peerID, serviceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteServiceFromPeer indicates an expected call of DeleteServiceFromPeer. +func (mr *MockManagerMockRecorder) DeleteServiceFromPeer(ctx, accountID, peerID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceFromPeer", reflect.TypeOf((*MockManager)(nil).DeleteServiceFromPeer), ctx, accountID, peerID, serviceID) +} + +// ExpireServiceFromPeer mocks base method. +func (m *MockManager) ExpireServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExpireServiceFromPeer", ctx, accountID, peerID, serviceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// ExpireServiceFromPeer indicates an expected call of ExpireServiceFromPeer. +func (mr *MockManagerMockRecorder) ExpireServiceFromPeer(ctx, accountID, peerID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpireServiceFromPeer", reflect.TypeOf((*MockManager)(nil).ExpireServiceFromPeer), ctx, accountID, peerID, serviceID) +} + +// ValidateExposePermission mocks base method. +func (m *MockManager) ValidateExposePermission(ctx context.Context, accountID, peerID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateExposePermission", ctx, accountID, peerID) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidateExposePermission indicates an expected call of ValidateExposePermission. +func (mr *MockManagerMockRecorder) ValidateExposePermission(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateExposePermission", reflect.TypeOf((*MockManager)(nil).ValidateExposePermission), ctx, accountID, peerID) +} + // GetAccountServices mocks base method. func (m *MockManager) GetAccountServices(ctx context.Context, accountID string) ([]*Service, error) { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/manager/manager.go b/management/internals/modules/reverseproxy/manager/manager.go index 8068178a5..ac839b8ea 100644 --- a/management/internals/modules/reverseproxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/manager/manager.go @@ -3,10 +3,14 @@ package manager import ( "context" "fmt" + "math/rand/v2" "time" + nbpeer "github.com/netbirdio/netbird/management/server/peer" log "github.com/sirupsen/logrus" + "slices" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" @@ -15,6 +19,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" + "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/shared/management/status" @@ -25,22 +30,25 @@ const unknownHostPlaceholder = "unknown" // ClusterDeriver derives the proxy cluster from a domain. type ClusterDeriver interface { DeriveClusterFromDomain(ctx context.Context, accountID, domain string) (string, error) + GetClusterDomains() []string } type managerImpl struct { store store.Store accountManager account.Manager permissionsManager permissions.Manager + settingsManager settings.Manager proxyGRPCServer *nbgrpc.ProxyServiceServer clusterDeriver ClusterDeriver } // NewManager creates a new service manager. -func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, clusterDeriver ClusterDeriver) reverseproxy.Manager { +func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, settingsManager settings.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, clusterDeriver ClusterDeriver) reverseproxy.Manager { return &managerImpl{ store: store, accountManager: accountManager, permissionsManager: permissionsManager, + settingsManager: settingsManager, proxyGRPCServer: proxyGRPCServer, clusterDeriver: clusterDeriver, } @@ -475,7 +483,8 @@ func (m *managerImpl) SetCertificateIssuedAt(ctx context.Context, accountID, ser return fmt.Errorf("failed to get service: %w", err) } - service.Meta.CertificateIssuedAt = time.Now() + now := time.Now() + service.Meta.CertificateIssuedAt = &now if err = transaction.UpdateService(ctx, service); err != nil { return fmt.Errorf("failed to update service certificate timestamp: %w", err) @@ -607,3 +616,179 @@ func (m *managerImpl) GetServiceIDByTargetID(ctx context.Context, accountID stri return target.ServiceID, nil } + +// ValidateExposePermission checks whether the peer is allowed to use the expose feature. +// It verifies the account has peer expose enabled and that the peer belongs to an allowed group. +func (m *managerImpl) ValidateExposePermission(ctx context.Context, accountID, peerID string) error { + settings, err := m.store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) + if err != nil { + log.WithContext(ctx).Errorf("failed to get account settings: %v", err) + return status.Errorf(status.Internal, "get account settings: %v", err) + } + + if !settings.PeerExposeEnabled { + return status.Errorf(status.PermissionDenied, "peer expose is not enabled for this account") + } + + if len(settings.PeerExposeGroups) == 0 { + return status.Errorf(status.PermissionDenied, "no group is set for peer expose") + } + + peerGroupIDs, err := m.store.GetPeerGroupIDs(ctx, store.LockingStrengthNone, accountID, peerID) + if err != nil { + log.WithContext(ctx).Errorf("failed to get peer group IDs: %v", err) + return status.Errorf(status.Internal, "get peer groups: %v", err) + } + + for _, pg := range peerGroupIDs { + if slices.Contains(settings.PeerExposeGroups, pg) { + return nil + } + } + + return status.Errorf(status.PermissionDenied, "peer is not in an allowed expose group") +} + +// CreateServiceFromPeer creates a service initiated by a peer expose request. +// It skips user permission checks since authorization is done at the gRPC handler level. +func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, service *reverseproxy.Service) (*reverseproxy.Service, error) { + service.Source = reverseproxy.SourceEphemeral + + if service.Domain == "" { + domain, err := m.buildRandomDomain(service.Name) + if err != nil { + return nil, fmt.Errorf("build random domain for service %s: %w", service.Name, err) + } + service.Domain = domain + } + + if service.Auth.BearerAuth != nil && service.Auth.BearerAuth.Enabled { + groupIDs, err := m.getGroupIDsFromNames(ctx, accountID, service.Auth.BearerAuth.DistributionGroups) + if err != nil { + return nil, fmt.Errorf("get group ids for service %s: %w", service.ID, err) + } + service.Auth.BearerAuth.DistributionGroups = groupIDs + } + + if err := m.initializeServiceForCreate(ctx, accountID, service); err != nil { + return nil, err + } + + peer, err := m.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) + if err != nil { + return nil, err + } + + now := time.Now() + service.Meta.LastRenewedAt = &now + service.SourcePeer = peerID + + if err := m.persistNewService(ctx, accountID, service); err != nil { + return nil, err + } + + meta := addPeerInfoToEventMeta(service.EventMeta(), peer) + + m.accountManager.StoreEvent(ctx, peerID, service.ID, accountID, activity.PeerServiceExposed, meta) + + if err := m.replaceHostByLookup(ctx, accountID, service); err != nil { + return nil, fmt.Errorf("replace host by lookup for service %s: %w", service.ID, err) + } + + m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") + + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return service, nil +} + +func (m *managerImpl) getGroupIDsFromNames(ctx context.Context, accountID string, groupNames []string) ([]string, error) { + if len(groupNames) == 0 { + return []string{}, fmt.Errorf("no group names provided") + } + groupIDs := make([]string, 0, len(groupNames)) + for _, groupName := range groupNames { + g, err := m.accountManager.GetGroupByName(ctx, groupName, accountID) + if err != nil { + return nil, fmt.Errorf("failed to get group by name %s: %w", groupName, err) + } + groupIDs = append(groupIDs, g.ID) + } + return groupIDs, nil +} + +func (m *managerImpl) buildRandomDomain(name string) (string, error) { + clusterDomains := m.clusterDeriver.GetClusterDomains() + if len(clusterDomains) == 0 { + return "", fmt.Errorf("no cluster domains found for service %s", name) + } + index := rand.IntN(len(clusterDomains)) + domain := name + "." + clusterDomains[index] + return domain, nil +} + +// DeleteServiceFromPeer deletes a peer-initiated service. +// It validates that the service was created by a peer to prevent deleting API-created services. +func (m *managerImpl) DeleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { + return m.deletePeerService(ctx, accountID, peerID, serviceID, activity.PeerServiceUnexposed) +} + +// ExpireServiceFromPeer deletes a peer-initiated service that was not renewed within the TTL. +func (m *managerImpl) ExpireServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { + return m.deletePeerService(ctx, accountID, peerID, serviceID, activity.PeerServiceExposeExpired) +} + +func (m *managerImpl) deletePeerService(ctx context.Context, accountID, peerID, serviceID string, activityCode activity.Activity) error { + var service *reverseproxy.Service + err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + var err error + service, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) + if err != nil { + return err + } + + if service.Source != reverseproxy.SourceEphemeral { + return status.Errorf(status.PermissionDenied, "cannot delete API-created service via peer expose") + } + + if service.SourcePeer != peerID { + return status.Errorf(status.PermissionDenied, "cannot delete service exposed by another peer") + } + + if err = transaction.DeleteService(ctx, accountID, serviceID); err != nil { + return fmt.Errorf("delete service: %w", err) + } + + return nil + }) + if err != nil { + return err + } + + peer, err := m.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) + if err != nil { + log.WithContext(ctx).Debugf("failed to get peer %s for event metadata: %v", peerID, err) + peer = nil + } + + meta := addPeerInfoToEventMeta(service.EventMeta(), peer) + + m.accountManager.StoreEvent(ctx, peerID, serviceID, accountID, activityCode, meta) + + m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "") + + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + +func addPeerInfoToEventMeta(meta map[string]any, peer *nbpeer.Peer) map[string]any { + if peer == nil { + return meta + } + meta["peer_name"] = peer.Name + if peer.IP != nil { + meta["peer_ip"] = peer.IP.String() + } + return meta +} diff --git a/management/internals/modules/reverseproxy/manager/manager_test.go b/management/internals/modules/reverseproxy/manager/manager_test.go index 266b0066f..eab853cf3 100644 --- a/management/internals/modules/reverseproxy/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/manager/manager_test.go @@ -3,6 +3,7 @@ package manager import ( "context" "errors" + "net" "testing" "time" @@ -11,7 +12,16 @@ import ( "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/integrations/extra_settings" + "github.com/netbirdio/netbird/management/server/mock_server" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/management/server/users" "github.com/netbirdio/netbird/shared/management/status" ) @@ -356,7 +366,7 @@ func TestPreserveServiceMetadata(t *testing.T) { existing := &reverseproxy.Service{ Meta: reverseproxy.ServiceMeta{ - CertificateIssuedAt: time.Now(), + CertificateIssuedAt: func() *time.Time { t := time.Now(); return &t }(), Status: "active", }, SessionPrivateKey: "private-key", @@ -373,3 +383,516 @@ func TestPreserveServiceMetadata(t *testing.T) { assert.Equal(t, existing.SessionPrivateKey, updated.SessionPrivateKey) assert.Equal(t, existing.SessionPublicKey, updated.SessionPublicKey) } + +func TestDeletePeerService_SourcePeerValidation(t *testing.T) { + ctx := context.Background() + accountID := "test-account" + ownerPeerID := "peer-owner" + otherPeerID := "peer-other" + serviceID := "service-123" + + testPeer := &nbpeer.Peer{ + ID: ownerPeerID, + Name: "test-peer", + IP: net.ParseIP("100.64.0.1"), + } + + newEphemeralService := func() *reverseproxy.Service { + return &reverseproxy.Service{ + ID: serviceID, + AccountID: accountID, + Name: "test-service", + Domain: "test.example.com", + Source: reverseproxy.SourceEphemeral, + SourcePeer: ownerPeerID, + } + } + + newPermanentService := func() *reverseproxy.Service { + return &reverseproxy.Service{ + ID: serviceID, + AccountID: accountID, + Name: "api-service", + Domain: "api.example.com", + Source: reverseproxy.SourcePermanent, + } + } + + newProxyServer := func(t *testing.T) *nbgrpc.ProxyServiceServer { + t.Helper() + tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Hour) + srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil) + t.Cleanup(srv.Close) + return srv + } + + t.Run("owner peer can delete own service", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var storedActivity activity.Activity + mockStore := store.NewMockStore(ctrl) + mockAccountMgr := &mock_server.MockAccountManager{ + StoreEventFunc: func(_ context.Context, _, _, _ string, activityID activity.ActivityDescriber, _ map[string]any) { + storedActivity = activityID.(activity.Activity) + }, + UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + } + + mockStore.EXPECT(). + ExecuteInTransaction(ctx, gomock.Any()). + DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { + txMock := store.NewMockStore(ctrl) + txMock.EXPECT(). + GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID). + Return(newEphemeralService(), nil) + txMock.EXPECT(). + DeleteService(ctx, accountID, serviceID). + Return(nil) + return fn(txMock) + }) + mockStore.EXPECT(). + GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID). + Return(testPeer, nil) + + mgr := &managerImpl{ + store: mockStore, + accountManager: mockAccountMgr, + proxyGRPCServer: newProxyServer(t), + } + + err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceUnexposed) + require.NoError(t, err) + assert.Equal(t, activity.PeerServiceUnexposed, storedActivity, "should store unexposed activity") + }) + + t.Run("different peer cannot delete service", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + ExecuteInTransaction(ctx, gomock.Any()). + DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { + txMock := store.NewMockStore(ctrl) + txMock.EXPECT(). + GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID). + Return(newEphemeralService(), nil) + return fn(txMock) + }) + + mgr := &managerImpl{ + store: mockStore, + } + + err := mgr.deletePeerService(ctx, accountID, otherPeerID, serviceID, activity.PeerServiceUnexposed) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok, "should be a status error") + assert.Equal(t, status.PermissionDenied, sErr.Type(), "should be permission denied") + assert.Contains(t, err.Error(), "another peer") + }) + + t.Run("cannot delete API-created service", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStore := store.NewMockStore(ctrl) + + mockStore.EXPECT(). + ExecuteInTransaction(ctx, gomock.Any()). + DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { + txMock := store.NewMockStore(ctrl) + txMock.EXPECT(). + GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID). + Return(newPermanentService(), nil) + return fn(txMock) + }) + + mgr := &managerImpl{ + store: mockStore, + } + + err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceUnexposed) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok, "should be a status error") + assert.Equal(t, status.PermissionDenied, sErr.Type(), "should be permission denied") + assert.Contains(t, err.Error(), "API-created") + }) + + t.Run("expire uses correct activity code", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var storedActivity activity.Activity + mockStore := store.NewMockStore(ctrl) + mockAccountMgr := &mock_server.MockAccountManager{ + StoreEventFunc: func(_ context.Context, _, _, _ string, activityID activity.ActivityDescriber, _ map[string]any) { + storedActivity = activityID.(activity.Activity) + }, + UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + } + + mockStore.EXPECT(). + ExecuteInTransaction(ctx, gomock.Any()). + DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { + txMock := store.NewMockStore(ctrl) + txMock.EXPECT(). + GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID). + Return(newEphemeralService(), nil) + txMock.EXPECT(). + DeleteService(ctx, accountID, serviceID). + Return(nil) + return fn(txMock) + }) + mockStore.EXPECT(). + GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID). + Return(testPeer, nil) + + mgr := &managerImpl{ + store: mockStore, + accountManager: mockAccountMgr, + proxyGRPCServer: newProxyServer(t), + } + + err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceExposeExpired) + require.NoError(t, err) + assert.Equal(t, activity.PeerServiceExposeExpired, storedActivity, "should store expired activity") + }) + + t.Run("event meta includes peer info", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var storedMeta map[string]any + mockStore := store.NewMockStore(ctrl) + mockAccountMgr := &mock_server.MockAccountManager{ + StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, meta map[string]any) { + storedMeta = meta + }, + UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + } + + mockStore.EXPECT(). + ExecuteInTransaction(ctx, gomock.Any()). + DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { + txMock := store.NewMockStore(ctrl) + txMock.EXPECT(). + GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID). + Return(newEphemeralService(), nil) + txMock.EXPECT(). + DeleteService(ctx, accountID, serviceID). + Return(nil) + return fn(txMock) + }) + mockStore.EXPECT(). + GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID). + Return(testPeer, nil) + + mgr := &managerImpl{ + store: mockStore, + accountManager: mockAccountMgr, + proxyGRPCServer: newProxyServer(t), + } + + err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceUnexposed) + require.NoError(t, err) + require.NotNil(t, storedMeta) + assert.Equal(t, "test-peer", storedMeta["peer_name"], "meta should contain peer name") + assert.Equal(t, "100.64.0.1", storedMeta["peer_ip"], "meta should contain peer IP") + assert.Equal(t, "test-service", storedMeta["name"], "meta should contain service name") + assert.Equal(t, "test.example.com", storedMeta["domain"], "meta should contain service domain") + }) +} + +// noopExtraSettings is a minimal extra_settings.Manager for tests without external integrations. +type noopExtraSettings struct{} + +func (n *noopExtraSettings) GetExtraSettings(_ context.Context, _ string) (*types.ExtraSettings, error) { + return &types.ExtraSettings{}, nil +} + +func (n *noopExtraSettings) UpdateExtraSettings(_ context.Context, _, _ string, _ *types.ExtraSettings) (bool, error) { + return false, nil +} + +var _ extra_settings.Manager = (*noopExtraSettings)(nil) + +// testClusterDeriver is a minimal ClusterDeriver that returns a fixed domain list. +type testClusterDeriver struct { + domains []string +} + +func (d *testClusterDeriver) DeriveClusterFromDomain(_ context.Context, _, domain string) (string, error) { + return "test-cluster", nil +} + +func (d *testClusterDeriver) GetClusterDomains() []string { + return d.domains +} + +const ( + testAccountID = "test-account" + testPeerID = "test-peer-1" + testGroupID = "test-group-1" + testUserID = "test-user" +) + +// setupIntegrationTest creates a real SQLite store with seeded test data for integration tests. +func setupIntegrationTest(t *testing.T) (*managerImpl, store.Store) { + t.Helper() + + ctx := context.Background() + testStore, cleanup, err := store.NewTestStoreFromSQL(ctx, "", t.TempDir()) + require.NoError(t, err) + t.Cleanup(cleanup) + + err = testStore.SaveAccount(ctx, &types.Account{ + Id: testAccountID, + CreatedBy: testUserID, + Settings: &types.Settings{ + PeerExposeEnabled: true, + PeerExposeGroups: []string{testGroupID}, + }, + Peers: map[string]*nbpeer.Peer{ + testPeerID: { + ID: testPeerID, + AccountID: testAccountID, + Key: "test-key", + DNSLabel: "test-peer", + Name: "test-peer", + IP: net.ParseIP("100.64.0.1"), + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, + Meta: nbpeer.PeerSystemMeta{Hostname: "test-peer"}, + }, + }, + Groups: map[string]*types.Group{ + testGroupID: { + ID: testGroupID, + AccountID: testAccountID, + Name: "Expose Group", + }, + }, + }) + require.NoError(t, err) + + err = testStore.AddPeerToGroup(ctx, testAccountID, testPeerID, testGroupID) + require.NoError(t, err) + + permsMgr := permissions.NewManager(testStore) + usersMgr := users.NewManager(testStore) + settingsMgr := settings.NewManager(testStore, usersMgr, &noopExtraSettings{}, permsMgr, settings.IdpConfig{}) + + var storedEvents []activity.Activity + accountMgr := &mock_server.MockAccountManager{ + StoreEventFunc: func(_ context.Context, _, _, _ string, activityID activity.ActivityDescriber, _ map[string]any) { + storedEvents = append(storedEvents, activityID.(activity.Activity)) + }, + UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) { + return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID) + }, + } + + tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Hour) + proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil) + t.Cleanup(proxySrv.Close) + + mgr := &managerImpl{ + store: testStore, + accountManager: accountMgr, + permissionsManager: permsMgr, + settingsManager: settingsMgr, + proxyGRPCServer: proxySrv, + clusterDeriver: &testClusterDeriver{ + domains: []string{"test.netbird.io"}, + }, + } + + return mgr, testStore +} + +func TestValidateExposePermission(t *testing.T) { + ctx := context.Background() + + t.Run("allowed when peer is in expose group", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + err := mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + assert.NoError(t, err) + }) + + t.Run("denied when peer is not in expose group", func(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) + + // Add a peer that is NOT in the expose group + otherPeerID := "other-peer" + err := testStore.AddPeerToAccount(ctx, &nbpeer.Peer{ + ID: otherPeerID, + AccountID: testAccountID, + Key: "other-key", + DNSLabel: "other-peer", + Name: "other-peer", + IP: net.ParseIP("100.64.0.2"), + Status: &nbpeer.PeerStatus{LastSeen: time.Now()}, + Meta: nbpeer.PeerSystemMeta{Hostname: "other-peer"}, + }) + require.NoError(t, err) + + err = mgr.ValidateExposePermission(ctx, testAccountID, otherPeerID) + require.Error(t, err) + assert.Contains(t, err.Error(), "not in an allowed expose group") + }) + + t.Run("denied when expose is disabled", func(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) + + // Disable peer expose + s, err := testStore.GetAccountSettings(ctx, store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + s.PeerExposeEnabled = false + err = testStore.SaveAccountSettings(ctx, testAccountID, s) + require.NoError(t, err) + + err = mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + require.Error(t, err) + assert.Contains(t, err.Error(), "not enabled") + }) + + t.Run("disallowed when no groups configured", func(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) + + // Enable expose with empty groups — no groups configured means no peer is allowed + s, err := testStore.GetAccountSettings(ctx, store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + s.PeerExposeGroups = []string{} + err = testStore.SaveAccountSettings(ctx, testAccountID, s) + require.NoError(t, err) + + err = mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + assert.Error(t, err) + }) + + t.Run("error when store returns error", func(t *testing.T) { + ctrl := gomock.NewController(t) + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT().GetAccountSettings(gomock.Any(), gomock.Any(), testAccountID).Return(nil, errors.New("store error")) + mgr := &managerImpl{store: mockStore} + err := mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + require.Error(t, err) + assert.Contains(t, err.Error(), "get account settings") + }) +} + +func TestCreateServiceFromPeer(t *testing.T) { + ctx := context.Background() + + t.Run("creates service with random domain", func(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) + + service := &reverseproxy.Service{ + Name: "my-expose", + Enabled: true, + Targets: []*reverseproxy.Target{ + { + AccountID: testAccountID, + Port: 8080, + Protocol: "http", + TargetId: testPeerID, + TargetType: reverseproxy.TargetTypePeer, + Enabled: true, + }, + }, + } + + created, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, service) + require.NoError(t, err) + assert.NotEmpty(t, created.ID, "service should have an ID") + assert.Contains(t, created.Domain, "test.netbird.io", "domain should use cluster domain") + assert.Equal(t, reverseproxy.SourceEphemeral, created.Source, "source should be ephemeral") + assert.Equal(t, testPeerID, created.SourcePeer, "source peer should be set") + assert.NotNil(t, created.Meta.LastRenewedAt, "last renewed should be set") + + // Verify service is persisted in store + persisted, err := testStore.GetServiceByID(ctx, store.LockingStrengthNone, testAccountID, created.ID) + require.NoError(t, err) + assert.Equal(t, created.ID, persisted.ID) + assert.Equal(t, created.Domain, persisted.Domain) + }) + + t.Run("creates service with custom domain", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + + service := &reverseproxy.Service{ + Name: "custom", + Domain: "custom.example.com", + Enabled: true, + Targets: []*reverseproxy.Target{ + { + AccountID: testAccountID, + Port: 80, + Protocol: "http", + TargetId: testPeerID, + TargetType: reverseproxy.TargetTypePeer, + Enabled: true, + }, + }, + } + + created, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, service) + require.NoError(t, err) + assert.Equal(t, "custom.example.com", created.Domain, "should keep the provided domain") + }) + + t.Run("replaces host by peer IP lookup", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + + service := &reverseproxy.Service{ + Name: "lookup-test", + Enabled: true, + Targets: []*reverseproxy.Target{ + { + AccountID: testAccountID, + Port: 3000, + Protocol: "http", + TargetId: testPeerID, + TargetType: reverseproxy.TargetTypePeer, + Enabled: true, + }, + }, + } + + created, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, service) + require.NoError(t, err) + require.Len(t, created.Targets, 1) + assert.Equal(t, "100.64.0.1", created.Targets[0].Host, "host should be resolved to peer IP") + }) +} + +func TestGetGroupIDsFromNames(t *testing.T) { + ctx := context.Background() + + t.Run("resolves group names to IDs", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + ids, err := mgr.getGroupIDsFromNames(ctx, testAccountID, []string{"Expose Group"}) + require.NoError(t, err) + require.Len(t, ids, 1, "should return exactly one group ID") + assert.Equal(t, testGroupID, ids[0]) + }) + + t.Run("returns error for unknown group", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + _, err := mgr.getGroupIDsFromNames(ctx, testAccountID, []string{"nonexistent"}) + require.Error(t, err) + }) + + t.Run("returns error for empty group list", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + _, err := mgr.getGroupIDsFromNames(ctx, testAccountID, []string{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "no group names provided") + }) +} diff --git a/management/internals/modules/reverseproxy/reverseproxy.go b/management/internals/modules/reverseproxy/reverseproxy.go index 0cbbe450b..ebe9ace96 100644 --- a/management/internals/modules/reverseproxy/reverseproxy.go +++ b/management/internals/modules/reverseproxy/reverseproxy.go @@ -1,10 +1,13 @@ package reverseproxy import ( + "crypto/rand" "errors" "fmt" + "math/big" "net" "net/url" + "regexp" "strconv" "time" @@ -40,6 +43,9 @@ const ( TargetTypeHost = "host" TargetTypeDomain = "domain" TargetTypeSubnet = "subnet" + + SourcePermanent = "permanent" + SourceEphemeral = "ephemeral" ) type Target struct { @@ -114,8 +120,9 @@ type OIDCValidationConfig struct { type ServiceMeta struct { CreatedAt time.Time - CertificateIssuedAt time.Time + CertificateIssuedAt *time.Time Status string + LastRenewedAt *time.Time } type Service struct { @@ -132,6 +139,8 @@ type Service struct { Meta ServiceMeta `gorm:"embedded;embeddedPrefix:meta_"` SessionPrivateKey string `gorm:"column:session_private_key"` SessionPublicKey string `gorm:"column:session_public_key"` + Source string `gorm:"default:'permanent'"` + SourcePeer string } func NewService(accountID, name, domain, proxyCluster string, targets []*Target, enabled bool) *Service { @@ -207,8 +216,8 @@ func (s *Service) ToAPIResponse() *api.Service { Status: api.ServiceMetaStatus(s.Meta.Status), } - if !s.Meta.CertificateIssuedAt.IsZero() { - meta.CertificateIssuedAt = &s.Meta.CertificateIssuedAt + if s.Meta.CertificateIssuedAt != nil { + meta.CertificateIssuedAt = s.Meta.CertificateIssuedAt } resp := &api.Service{ @@ -309,6 +318,63 @@ func isDefaultPort(scheme string, port int) bool { return (scheme == "https" && port == 443) || (scheme == "http" && port == 80) } +// FromExposeRequest builds a Service from a peer expose gRPC request. +func FromExposeRequest(req *proto.ExposeServiceRequest, accountID, peerID, serviceName string) *Service { + service := &Service{ + AccountID: accountID, + Name: serviceName, + Enabled: true, + Targets: []*Target{ + { + AccountID: accountID, + Port: int(req.Port), + Protocol: exposeProtocolToString(req.Protocol), + TargetId: peerID, + TargetType: TargetTypePeer, + Enabled: true, + }, + }, + } + + if req.Domain != "" { + service.Domain = serviceName + "." + req.Domain + } + + if req.Pin != "" { + service.Auth.PinAuth = &PINAuthConfig{ + Enabled: true, + Pin: req.Pin, + } + } + + if req.Password != "" { + service.Auth.PasswordAuth = &PasswordAuthConfig{ + Enabled: true, + Password: req.Password, + } + } + + if len(req.UserGroups) > 0 { + service.Auth.BearerAuth = &BearerAuthConfig{ + Enabled: true, + DistributionGroups: req.UserGroups, + } + } + + return service +} + +func exposeProtocolToString(p proto.ExposeProtocol) string { + switch p { + case proto.ExposeProtocol_EXPOSE_HTTP: + return "http" + case proto.ExposeProtocol_EXPOSE_HTTPS: + return "https" + default: + return "http" + } +} + func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) { s.Name = req.Name s.Domain = req.Domain @@ -403,7 +469,11 @@ func (s *Service) Validate() error { } func (s *Service) EventMeta() map[string]any { - return map[string]any{"name": s.Name, "domain": s.Domain, "proxy_cluster": s.ProxyCluster} + return map[string]any{"name": s.Name, "domain": s.Domain, "proxy_cluster": s.ProxyCluster, "source": s.Source, "auth": s.isAuthEnabled()} +} + +func (s *Service) isAuthEnabled() bool { + return s.Auth.PasswordAuth != nil || s.Auth.PinAuth != nil || s.Auth.BearerAuth != nil } func (s *Service) Copy() *Service { @@ -427,6 +497,8 @@ func (s *Service) Copy() *Service { Meta: s.Meta, SessionPrivateKey: s.SessionPrivateKey, SessionPublicKey: s.SessionPublicKey, + Source: s.Source, + SourcePeer: s.SourcePeer, } } @@ -461,3 +533,43 @@ func (s *Service) DecryptSensitiveData(enc *crypt.FieldEncrypt) error { return nil } + +const alphanumCharset = "abcdefghijklmnopqrstuvwxyz0123456789" + +var validNamePrefix = regexp.MustCompile(`^[a-z0-9]([a-z0-9-]{0,30}[a-z0-9])?$`) + +// GenerateExposeName generates a random service name for peer-exposed services. +// The prefix, if provided, must be a valid DNS label component (lowercase alphanumeric and hyphens). +func GenerateExposeName(prefix string) (string, error) { + if prefix != "" && !validNamePrefix.MatchString(prefix) { + return "", fmt.Errorf("invalid name prefix %q: must be lowercase alphanumeric with optional hyphens, 1-32 characters", prefix) + } + + suffixLen := 12 + if prefix != "" { + suffixLen = 4 + } + + suffix, err := randomAlphanumeric(suffixLen) + if err != nil { + return "", fmt.Errorf("generate random name: %w", err) + } + + if prefix == "" { + return suffix, nil + } + return prefix + "-" + suffix, nil +} + +func randomAlphanumeric(n int) (string, error) { + result := make([]byte, n) + charsetLen := big.NewInt(int64(len(alphanumCharset))) + for i := range result { + idx, err := rand.Int(rand.Reader, charsetLen) + if err != nil { + return "", err + } + result[i] = alphanumCharset[idx.Int64()] + } + return string(result), nil +} diff --git a/management/internals/modules/reverseproxy/reverseproxy_test.go b/management/internals/modules/reverseproxy/reverseproxy_test.go index 546e80b31..c80d7e342 100644 --- a/management/internals/modules/reverseproxy/reverseproxy_test.go +++ b/management/internals/modules/reverseproxy/reverseproxy_test.go @@ -403,3 +403,146 @@ func TestAuthConfig_ClearSecrets(t *testing.T) { t.Errorf("PIN not cleared, got: %s", config.PinAuth.Pin) } } + +func TestGenerateExposeName(t *testing.T) { + t.Run("no prefix generates 12-char name", func(t *testing.T) { + name, err := GenerateExposeName("") + require.NoError(t, err) + assert.Len(t, name, 12) + assert.Regexp(t, `^[a-z0-9]+$`, name) + }) + + t.Run("with prefix generates prefix-XXXX", func(t *testing.T) { + name, err := GenerateExposeName("myapp") + require.NoError(t, err) + assert.True(t, strings.HasPrefix(name, "myapp-"), "name should start with prefix") + suffix := strings.TrimPrefix(name, "myapp-") + assert.Len(t, suffix, 4, "suffix should be 4 chars") + assert.Regexp(t, `^[a-z0-9]+$`, suffix) + }) + + t.Run("unique names", func(t *testing.T) { + names := make(map[string]bool) + for i := 0; i < 50; i++ { + name, err := GenerateExposeName("") + require.NoError(t, err) + names[name] = true + } + assert.Greater(t, len(names), 45, "should generate mostly unique names") + }) + + t.Run("valid prefixes", func(t *testing.T) { + validPrefixes := []string{"a", "ab", "a1", "my-app", "web-server-01", "a-b"} + for _, prefix := range validPrefixes { + name, err := GenerateExposeName(prefix) + assert.NoError(t, err, "prefix %q should be valid", prefix) + assert.True(t, strings.HasPrefix(name, prefix+"-"), "name should start with %q-", prefix) + } + }) + + t.Run("invalid prefixes", func(t *testing.T) { + invalidPrefixes := []string{ + "-starts-with-dash", + "ends-with-dash-", + "has.dots", + "HAS-UPPER", + "has spaces", + "has/slash", + "a--", + } + for _, prefix := range invalidPrefixes { + _, err := GenerateExposeName(prefix) + assert.Error(t, err, "prefix %q should be invalid", prefix) + assert.Contains(t, err.Error(), "invalid name prefix") + } + }) +} + +func TestFromExposeRequest(t *testing.T) { + t.Run("basic HTTP service", func(t *testing.T) { + req := &proto.ExposeServiceRequest{ + Port: 8080, + Protocol: proto.ExposeProtocol_EXPOSE_HTTP, + } + + service := FromExposeRequest(req, "account-1", "peer-1", "mysvc") + + assert.Equal(t, "account-1", service.AccountID) + assert.Equal(t, "mysvc", service.Name) + assert.True(t, service.Enabled) + assert.Empty(t, service.Domain, "domain should be empty when not specified") + require.Len(t, service.Targets, 1) + + target := service.Targets[0] + assert.Equal(t, 8080, target.Port) + assert.Equal(t, "http", target.Protocol) + assert.Equal(t, "peer-1", target.TargetId) + assert.Equal(t, TargetTypePeer, target.TargetType) + assert.True(t, target.Enabled) + assert.Equal(t, "account-1", target.AccountID) + }) + + t.Run("with custom domain", func(t *testing.T) { + req := &proto.ExposeServiceRequest{ + Port: 3000, + Domain: "example.com", + } + + service := FromExposeRequest(req, "acc", "peer", "web") + assert.Equal(t, "web.example.com", service.Domain) + }) + + t.Run("with PIN auth", func(t *testing.T) { + req := &proto.ExposeServiceRequest{ + Port: 80, + Pin: "1234", + } + + service := FromExposeRequest(req, "acc", "peer", "svc") + require.NotNil(t, service.Auth.PinAuth) + assert.True(t, service.Auth.PinAuth.Enabled) + assert.Equal(t, "1234", service.Auth.PinAuth.Pin) + assert.Nil(t, service.Auth.PasswordAuth) + assert.Nil(t, service.Auth.BearerAuth) + }) + + t.Run("with password auth", func(t *testing.T) { + req := &proto.ExposeServiceRequest{ + Port: 80, + Password: "secret", + } + + service := FromExposeRequest(req, "acc", "peer", "svc") + require.NotNil(t, service.Auth.PasswordAuth) + assert.True(t, service.Auth.PasswordAuth.Enabled) + assert.Equal(t, "secret", service.Auth.PasswordAuth.Password) + }) + + t.Run("with user groups (bearer auth)", func(t *testing.T) { + req := &proto.ExposeServiceRequest{ + Port: 80, + UserGroups: []string{"admins", "devs"}, + } + + service := FromExposeRequest(req, "acc", "peer", "svc") + require.NotNil(t, service.Auth.BearerAuth) + assert.True(t, service.Auth.BearerAuth.Enabled) + assert.Equal(t, []string{"admins", "devs"}, service.Auth.BearerAuth.DistributionGroups) + }) + + t.Run("with all auth types", func(t *testing.T) { + req := &proto.ExposeServiceRequest{ + Port: 443, + Domain: "myco.com", + Pin: "9999", + Password: "pass", + UserGroups: []string{"ops"}, + } + + service := FromExposeRequest(req, "acc", "peer", "full") + assert.Equal(t, "full.myco.com", service.Domain) + require.NotNil(t, service.Auth.PinAuth) + require.NotNil(t, service.Auth.PasswordAuth) + require.NotNil(t, service.Auth.BearerAuth) + }) +} diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index e897a09f5..216ea0857 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -152,6 +152,8 @@ func (s *BaseServer) GRPCServer() *grpc.Server { if err != nil { log.Fatalf("failed to create management server: %v", err) } + srv.SetReverseProxyManager(s.ReverseProxyManager()) + srv.StartExposeReaper(context.Background()) mgmtProto.RegisterManagementServiceServer(gRPCAPIHandler, srv) mgmtProto.RegisterProxyServiceServer(gRPCAPIHandler, s.ReverseProxyGRPCServer()) diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index 58125c0a3..faec5b99c 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -192,7 +192,7 @@ func (s *BaseServer) RecordsManager() records.Manager { func (s *BaseServer) ReverseProxyManager() reverseproxy.Manager { return Create(s, func() reverseproxy.Manager { - return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ReverseProxyGRPCServer(), s.ReverseProxyDomainManager()) + return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.SettingsManager(), s.ReverseProxyGRPCServer(), s.ReverseProxyDomainManager()) }) } diff --git a/management/internals/shared/grpc/expose_service.go b/management/internals/shared/grpc/expose_service.go new file mode 100644 index 000000000..45b60ceec --- /dev/null +++ b/management/internals/shared/grpc/expose_service.go @@ -0,0 +1,301 @@ +package grpc + +import ( + "context" + "regexp" + "sync" + "time" + + pb "github.com/golang/protobuf/proto" // nolint + log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/netbirdio/netbird/encryption" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + nbContext "github.com/netbirdio/netbird/management/server/context" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/management/proto" + internalStatus "github.com/netbirdio/netbird/shared/management/status" +) + +var pinRegexp = regexp.MustCompile(`^\d{6}$`) + +const ( + exposeTTL = 90 * time.Second + exposeReapInterval = 30 * time.Second + maxExposesPerPeer = 10 +) + +type activeExpose struct { + mu sync.Mutex + serviceID string + domain string + accountID string + peerID string + lastRenewed time.Time +} + +func exposeKey(peerID, domain string) string { + return peerID + ":" + domain +} + +// CreateExpose handles a peer request to create a new expose service. +func (s *Server) CreateExpose(ctx context.Context, req *proto.EncryptedMessage) (*proto.EncryptedMessage, error) { + exposeReq := &proto.ExposeServiceRequest{} + peerKey, err := s.parseRequest(ctx, req, exposeReq) + if err != nil { + return nil, err + } + + accountID, peer, err := s.authenticateExposePeer(ctx, peerKey) + if err != nil { + return nil, err + } + + // nolint:staticcheck + ctx = context.WithValue(ctx, nbContext.AccountIDKey, accountID) + + if exposeReq.Protocol != proto.ExposeProtocol_EXPOSE_HTTP && exposeReq.Protocol != proto.ExposeProtocol_EXPOSE_HTTPS { + return nil, status.Errorf(codes.InvalidArgument, "only HTTP or HTTPS protocol are supported") + } + + if exposeReq.Pin != "" && !pinRegexp.MatchString(exposeReq.Pin) { + return nil, status.Errorf(codes.InvalidArgument, "invalid pin: must be exactly 6 digits") + } + + for _, g := range exposeReq.UserGroups { + if g == "" { + return nil, status.Errorf(codes.InvalidArgument, "user group name cannot be empty") + } + } + + reverseProxyMgr := s.getReverseProxyManager() + if reverseProxyMgr == nil { + return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") + } + + if err := reverseProxyMgr.ValidateExposePermission(ctx, accountID, peer.ID); err != nil { + log.WithContext(ctx).Debugf("expose permission denied for peer %s: %v", peer.ID, err) + return nil, status.Errorf(codes.PermissionDenied, "permission denied") + } + + serviceName, err := reverseproxy.GenerateExposeName(exposeReq.NamePrefix) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "generate service name: %v", err) + } + + service := reverseproxy.FromExposeRequest(exposeReq, accountID, peer.ID, serviceName) + + // Serialize the count check to prevent concurrent CreateExpose calls from + // exceeding maxExposesPerPeer. The lock is held only for the check; the + // actual service creation happens outside the lock. + s.exposeCreateMu.Lock() + if s.countPeerExposes(peer.ID) >= maxExposesPerPeer { + s.exposeCreateMu.Unlock() + return nil, status.Errorf(codes.ResourceExhausted, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) + } + s.exposeCreateMu.Unlock() + + created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, service) + if err != nil { + log.WithContext(ctx).Errorf("failed to create service from peer: %v", err) + return nil, status.Errorf(codes.Internal, "create service: %v", err) + } + + key := exposeKey(peer.ID, created.Domain) + if _, loaded := s.activeExposes.LoadOrStore(key, &activeExpose{ + serviceID: created.ID, + domain: created.Domain, + accountID: accountID, + peerID: peer.ID, + lastRenewed: time.Now(), + }); loaded { + s.deleteExposeService(ctx, accountID, peer.ID, created) + return nil, status.Errorf(codes.AlreadyExists, "peer already has an active expose session for this domain") + } + + resp := &proto.ExposeServiceResponse{ + ServiceName: created.Name, + ServiceUrl: "https://" + created.Domain, + Domain: created.Domain, + } + + return s.encryptResponse(peerKey, resp) +} + +// RenewExpose extends the TTL of an active expose session. +func (s *Server) RenewExpose(ctx context.Context, req *proto.EncryptedMessage) (*proto.EncryptedMessage, error) { + renewReq := &proto.RenewExposeRequest{} + peerKey, err := s.parseRequest(ctx, req, renewReq) + if err != nil { + return nil, err + } + + _, peer, err := s.authenticateExposePeer(ctx, peerKey) + if err != nil { + return nil, err + } + + key := exposeKey(peer.ID, renewReq.Domain) + val, ok := s.activeExposes.Load(key) + if !ok { + return nil, status.Errorf(codes.NotFound, "no active expose session for domain %s", renewReq.Domain) + } + + expose := val.(*activeExpose) + expose.mu.Lock() + expose.lastRenewed = time.Now() + expose.mu.Unlock() + + return s.encryptResponse(peerKey, &proto.RenewExposeResponse{}) +} + +// StopExpose terminates an active expose session. +func (s *Server) StopExpose(ctx context.Context, req *proto.EncryptedMessage) (*proto.EncryptedMessage, error) { + stopReq := &proto.StopExposeRequest{} + peerKey, err := s.parseRequest(ctx, req, stopReq) + if err != nil { + return nil, err + } + + _, peer, err := s.authenticateExposePeer(ctx, peerKey) + if err != nil { + return nil, err + } + + key := exposeKey(peer.ID, stopReq.Domain) + val, ok := s.activeExposes.LoadAndDelete(key) + if !ok { + return nil, status.Errorf(codes.NotFound, "no active expose session for domain %s", stopReq.Domain) + } + + expose := val.(*activeExpose) + s.cleanupExpose(expose, false) + + return s.encryptResponse(peerKey, &proto.StopExposeResponse{}) +} + +// StartExposeReaper starts a background goroutine that reaps expired expose sessions. +func (s *Server) StartExposeReaper(ctx context.Context) { + go func() { + ticker := time.NewTicker(exposeReapInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + s.reapExpiredExposes() + } + } + }() +} + +func (s *Server) reapExpiredExposes() { + s.activeExposes.Range(func(key, val any) bool { + expose := val.(*activeExpose) + expose.mu.Lock() + expired := time.Since(expose.lastRenewed) > exposeTTL + expose.mu.Unlock() + + if expired { + if _, deleted := s.activeExposes.LoadAndDelete(key); deleted { + log.Infof("reaping expired expose session for peer %s, domain %s", expose.peerID, expose.domain) + s.cleanupExpose(expose, true) + } + } + return true + }) +} + +func (s *Server) encryptResponse(peerKey wgtypes.Key, msg pb.Message) (*proto.EncryptedMessage, error) { + wgKey, err := s.secretsManager.GetWGKey() + if err != nil { + return nil, status.Errorf(codes.Internal, "internal error") + } + + encryptedResp, err := encryption.EncryptMessage(peerKey, wgKey, msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "encrypt response") + } + + return &proto.EncryptedMessage{ + WgPubKey: wgKey.PublicKey().String(), + Body: encryptedResp, + }, nil +} + +func (s *Server) authenticateExposePeer(ctx context.Context, peerKey wgtypes.Key) (string, *nbpeer.Peer, error) { + accountID, err := s.accountManager.GetAccountIDForPeerKey(ctx, peerKey.String()) + if err != nil { + if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound { + return "", nil, status.Errorf(codes.PermissionDenied, "peer is not registered") + } + return "", nil, status.Errorf(codes.Internal, "lookup account for peer") + } + + peer, err := s.accountManager.GetStore().GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, peerKey.String()) + if err != nil { + return "", nil, status.Errorf(codes.PermissionDenied, "peer is not registered") + } + + return accountID, peer, nil +} + +func (s *Server) deleteExposeService(ctx context.Context, accountID, peerID string, service *reverseproxy.Service) { + reverseProxyMgr := s.getReverseProxyManager() + if reverseProxyMgr == nil { + return + } + if err := reverseProxyMgr.DeleteServiceFromPeer(ctx, accountID, peerID, service.ID); err != nil { + log.WithContext(ctx).Debugf("failed to delete expose service %s: %v", service.ID, err) + } +} + +func (s *Server) cleanupExpose(expose *activeExpose, expired bool) { + bgCtx := context.Background() + + reverseProxyMgr := s.getReverseProxyManager() + if reverseProxyMgr == nil { + log.Errorf("cannot cleanup exposed service %s: reverse proxy manager not available", expose.serviceID) + return + } + + var err error + if expired { + err = reverseProxyMgr.ExpireServiceFromPeer(bgCtx, expose.accountID, expose.peerID, expose.serviceID) + } else { + err = reverseProxyMgr.DeleteServiceFromPeer(bgCtx, expose.accountID, expose.peerID, expose.serviceID) + } + if err != nil { + log.Errorf("failed to delete peer-exposed service %s: %v", expose.serviceID, err) + } +} + +func (s *Server) countPeerExposes(peerID string) int { + count := 0 + s.activeExposes.Range(func(_, val any) bool { + if expose := val.(*activeExpose); expose.peerID == peerID { + count++ + } + return true + }) + return count +} + +func (s *Server) getReverseProxyManager() reverseproxy.Manager { + s.reverseProxyMu.RLock() + defer s.reverseProxyMu.RUnlock() + return s.reverseProxyManager +} + +// SetReverseProxyManager sets the reverse proxy manager on the server. +func (s *Server) SetReverseProxyManager(mgr reverseproxy.Manager) { + s.reverseProxyMu.Lock() + defer s.reverseProxyMu.Unlock() + s.reverseProxyManager = mgr +} diff --git a/management/internals/shared/grpc/expose_service_test.go b/management/internals/shared/grpc/expose_service_test.go new file mode 100644 index 000000000..75a16ae44 --- /dev/null +++ b/management/internals/shared/grpc/expose_service_test.go @@ -0,0 +1,242 @@ +package grpc + +import ( + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" +) + +func TestPinValidation(t *testing.T) { + tests := []struct { + pin string + valid bool + }{ + {"123456", true}, + {"000000", true}, + {"12345", false}, + {"1234567", false}, + {"abcdef", false}, + {"12345a", false}, + {"", false}, + {"12 345", false}, + } + + for _, tt := range tests { + assert.Equal(t, tt.valid, pinRegexp.MatchString(tt.pin), "pin %q", tt.pin) + } +} + +func TestExposeKey(t *testing.T) { + assert.Equal(t, "peer1:example.com", exposeKey("peer1", "example.com")) + assert.Equal(t, "peer2:other.com", exposeKey("peer2", "other.com")) + assert.NotEqual(t, exposeKey("peer1", "a.com"), exposeKey("peer1", "b.com")) +} + +func TestCountPeerExposes(t *testing.T) { + s := &Server{} + + // No exposes + assert.Equal(t, 0, s.countPeerExposes("peer1")) + + // Add some exposes for different peers + s.activeExposes.Store("peer1:a.com", &activeExpose{peerID: "peer1"}) + s.activeExposes.Store("peer1:b.com", &activeExpose{peerID: "peer1"}) + s.activeExposes.Store("peer2:a.com", &activeExpose{peerID: "peer2"}) + + assert.Equal(t, 2, s.countPeerExposes("peer1"), "peer1 should have 2 exposes") + assert.Equal(t, 1, s.countPeerExposes("peer2"), "peer2 should have 1 expose") + assert.Equal(t, 0, s.countPeerExposes("peer3"), "peer3 should have 0 exposes") +} + +func TestReapExpiredExposes(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockMgr := reverseproxy.NewMockManager(ctrl) + + s := &Server{} + s.SetReverseProxyManager(mockMgr) + + now := time.Now() + + // Add an expired expose and a still-active one + s.activeExposes.Store("peer1:expired.com", &activeExpose{ + serviceID: "svc-expired", + domain: "expired.com", + accountID: "acct1", + peerID: "peer1", + lastRenewed: now.Add(-2 * exposeTTL), + }) + s.activeExposes.Store("peer1:active.com", &activeExpose{ + serviceID: "svc-active", + domain: "active.com", + accountID: "acct1", + peerID: "peer1", + lastRenewed: now, + }) + + // Expect ExpireServiceFromPeer called only for the expired one + mockMgr.EXPECT(). + ExpireServiceFromPeer(gomock.Any(), "acct1", "peer1", "svc-expired"). + Return(nil) + + s.reapExpiredExposes() + + // Verify expired one is removed + _, exists := s.activeExposes.Load("peer1:expired.com") + assert.False(t, exists, "expired expose should be removed") + + // Verify active one remains + _, exists = s.activeExposes.Load("peer1:active.com") + assert.True(t, exists, "active expose should remain") +} + +func TestCleanupExpose_Delete(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockMgr := reverseproxy.NewMockManager(ctrl) + + s := &Server{} + s.SetReverseProxyManager(mockMgr) + + mockMgr.EXPECT(). + DeleteServiceFromPeer(gomock.Any(), "acct1", "peer1", "svc1"). + Return(nil) + + s.cleanupExpose(&activeExpose{ + serviceID: "svc1", + accountID: "acct1", + peerID: "peer1", + }, false) +} + +func TestCleanupExpose_Expire(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockMgr := reverseproxy.NewMockManager(ctrl) + + s := &Server{} + s.SetReverseProxyManager(mockMgr) + + mockMgr.EXPECT(). + ExpireServiceFromPeer(gomock.Any(), "acct1", "peer1", "svc1"). + Return(nil) + + s.cleanupExpose(&activeExpose{ + serviceID: "svc1", + accountID: "acct1", + peerID: "peer1", + }, true) +} + +func TestCleanupExpose_NilManager(t *testing.T) { + s := &Server{} + // Should not panic when reverse proxy manager is nil + s.cleanupExpose(&activeExpose{ + serviceID: "svc1", + accountID: "acct1", + peerID: "peer1", + }, false) +} + +func TestSetReverseProxyManager(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + s := &Server{} + + // Initially nil + assert.Nil(t, s.getReverseProxyManager()) + + mockMgr := reverseproxy.NewMockManager(ctrl) + s.SetReverseProxyManager(mockMgr) + assert.NotNil(t, s.getReverseProxyManager()) + + // Can set to nil + s.SetReverseProxyManager(nil) + assert.Nil(t, s.getReverseProxyManager()) +} + +func TestReapExpiredExposes_ConcurrentSafety(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockMgr := reverseproxy.NewMockManager(ctrl) + mockMgr.EXPECT(). + ExpireServiceFromPeer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil). + AnyTimes() + + s := &Server{} + s.SetReverseProxyManager(mockMgr) + + // Pre-populate with expired sessions + for i := range 20 { + peerID := "peer1" + domain := "domain-" + string(rune('a'+i)) + s.activeExposes.Store(exposeKey(peerID, domain), &activeExpose{ + serviceID: "svc-" + domain, + domain: domain, + accountID: "acct1", + peerID: peerID, + lastRenewed: time.Now().Add(-2 * exposeTTL), + }) + } + + // Run reaper concurrently with count + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + s.reapExpiredExposes() + }() + go func() { + defer wg.Done() + s.countPeerExposes("peer1") + }() + wg.Wait() + + assert.Equal(t, 0, s.countPeerExposes("peer1"), "all expired exposes should be reaped") +} + +func TestActiveExposeMutexProtectsLastRenewed(t *testing.T) { + expose := &activeExpose{ + lastRenewed: time.Now().Add(-1 * time.Hour), + } + + // Simulate concurrent renew and read + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + for range 100 { + expose.mu.Lock() + expose.lastRenewed = time.Now() + expose.mu.Unlock() + } + }() + + go func() { + defer wg.Done() + for range 100 { + expose.mu.Lock() + _ = time.Since(expose.lastRenewed) + expose.mu.Unlock() + } + }() + + wg.Wait() + + expose.mu.Lock() + require.False(t, expose.lastRenewed.IsZero(), "lastRenewed should not be zero after concurrent access") + expose.mu.Unlock() +} diff --git a/management/internals/shared/grpc/proxy_group_access_test.go b/management/internals/shared/grpc/proxy_group_access_test.go index 31b1df3b1..611ee36b6 100644 --- a/management/internals/shared/grpc/proxy_group_access_test.go +++ b/management/internals/shared/grpc/proxy_group_access_test.go @@ -76,6 +76,22 @@ func (m *mockReverseProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ return "", nil } +func (m *mockReverseProxyManager) ValidateExposePermission(_ context.Context, _, _ string) error { + return nil +} + +func (m *mockReverseProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return &reverseproxy.Service{}, nil +} + +func (m *mockReverseProxyManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *mockReverseProxyManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + type mockUsersManager struct { users map[string]*types.User err error diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 0167aca07..3df9ce7ba 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -26,6 +26,7 @@ import ( "github.com/netbirdio/netbird/shared/management/client/common" "github.com/netbirdio/netbird/management/internals/controllers/network_map" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/job" @@ -80,6 +81,11 @@ type Server struct { syncSem atomic.Int32 syncLimEnabled bool syncLim int32 + + activeExposes sync.Map + exposeCreateMu sync.Mutex + reverseProxyManager reverseproxy.Manager + reverseProxyMu sync.RWMutex } // NewServer creates a new Management server diff --git a/management/internals/shared/grpc/validate_session_test.go b/management/internals/shared/grpc/validate_session_test.go index f76d3ada0..1e03a461a 100644 --- a/management/internals/shared/grpc/validate_session_test.go +++ b/management/internals/shared/grpc/validate_session_test.go @@ -295,6 +295,22 @@ func (m *testValidateSessionProxyManager) GetServiceIDByTargetID(_ context.Conte return "", nil } +func (m *testValidateSessionProxyManager) ValidateExposePermission(_ context.Context, _, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testValidateSessionProxyManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + type testValidateSessionUsersManager struct { store store.Store } diff --git a/management/server/account.go b/management/server/account.go index d436445e8..fb8592164 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -376,6 +376,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco am.handlePeerLoginExpirationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleGroupsPropagationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleAutoUpdateVersionSettings(ctx, oldSettings, newSettings, userID, accountID) + am.handlePeerExposeSettings(ctx, oldSettings, newSettings, userID, accountID) if err = am.handleInactivityExpirationSettings(ctx, oldSettings, newSettings, userID, accountID); err != nil { return nil, err } @@ -492,6 +493,21 @@ func (am *DefaultAccountManager) handleAutoUpdateVersionSettings(ctx context.Con } } +func (am *DefaultAccountManager) handlePeerExposeSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { + oldEnabled := oldSettings.PeerExposeEnabled + newEnabled := newSettings.PeerExposeEnabled + + if oldEnabled == newEnabled { + return + } + + event := activity.AccountPeerExposeEnabled + if !newEnabled { + event = activity.AccountPeerExposeDisabled + } + am.StoreEvent(ctx, userID, accountID, accountID, event, nil) +} + func (am *DefaultAccountManager) handleInactivityExpirationSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) error { if newSettings.PeerInactivityExpirationEnabled { if oldSettings.PeerInactivityExpiration != newSettings.PeerInactivityExpiration { diff --git a/management/server/account_test.go b/management/server/account_test.go index f9e9c162d..340e130d9 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -3124,7 +3124,7 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU } proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil) - manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyGrpcServer, nil)) + manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, settingsMockManager, proxyGrpcServer, nil)) return manager, updateManager, nil } diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index e1b7e5300..53cf30d4c 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -208,6 +208,18 @@ const ( ServiceUpdated Activity = 109 ServiceDeleted Activity = 110 + // PeerServiceExposed indicates that a peer exposed a service via the reverse proxy + PeerServiceExposed Activity = 111 + // PeerServiceUnexposed indicates that a peer-exposed service was removed + PeerServiceUnexposed Activity = 112 + // PeerServiceExposeExpired indicates that a peer-exposed service was removed due to TTL expiration + PeerServiceExposeExpired Activity = 113 + + // AccountPeerExposeEnabled indicates that a user enabled peer expose for the account + AccountPeerExposeEnabled Activity = 114 + // AccountPeerExposeDisabled indicates that a user disabled peer expose for the account + AccountPeerExposeDisabled Activity = 115 + AccountDeleted Activity = 99999 ) @@ -345,6 +357,13 @@ var activityMap = map[Activity]Code{ ServiceCreated: {"Service created", "service.create"}, ServiceUpdated: {"Service updated", "service.update"}, ServiceDeleted: {"Service deleted", "service.delete"}, + + PeerServiceExposed: {"Peer exposed service", "service.peer.expose"}, + PeerServiceUnexposed: {"Peer unexposed service", "service.peer.unexpose"}, + PeerServiceExposeExpired: {"Peer exposed service expired", "service.peer.expose.expire"}, + + AccountPeerExposeEnabled: {"Account peer expose enabled", "account.setting.peer.expose.enable"}, + AccountPeerExposeDisabled: {"Account peer expose disabled", "account.setting.peer.expose.disable"}, } // StringCode returns a string code of the activity diff --git a/management/server/http/handlers/accounts/accounts_handler.go b/management/server/http/handlers/accounts/accounts_handler.go index 122c061ce..27a57c434 100644 --- a/management/server/http/handlers/accounts/accounts_handler.go +++ b/management/server/http/handlers/accounts/accounts_handler.go @@ -168,6 +168,10 @@ func (h *handler) getAllAccounts(w http.ResponseWriter, r *http.Request) { } func (h *handler) updateAccountRequestSettings(req api.PutApiAccountsAccountIdJSONRequestBody) (*types.Settings, error) { + if req.Settings.PeerExposeEnabled && len(req.Settings.PeerExposeGroups) == 0 { + return nil, status.Errorf(status.InvalidArgument, "peer expose requires at least one group") + } + returnSettings := &types.Settings{ PeerLoginExpirationEnabled: req.Settings.PeerLoginExpirationEnabled, PeerLoginExpiration: time.Duration(float64(time.Second.Nanoseconds()) * float64(req.Settings.PeerLoginExpiration)), @@ -175,6 +179,9 @@ func (h *handler) updateAccountRequestSettings(req api.PutApiAccountsAccountIdJS PeerInactivityExpirationEnabled: req.Settings.PeerInactivityExpirationEnabled, PeerInactivityExpiration: time.Duration(float64(time.Second.Nanoseconds()) * float64(req.Settings.PeerInactivityExpiration)), + + PeerExposeEnabled: req.Settings.PeerExposeEnabled, + PeerExposeGroups: req.Settings.PeerExposeGroups, } if req.Settings.Extra != nil { @@ -336,6 +343,8 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A JwtAllowGroups: &jwtAllowGroups, RegularUsersViewBlocked: settings.RegularUsersViewBlocked, RoutingPeerDnsResolutionEnabled: &settings.RoutingPeerDNSResolutionEnabled, + PeerExposeEnabled: settings.PeerExposeEnabled, + PeerExposeGroups: settings.PeerExposeGroups, LazyConnectionEnabled: &settings.LazyConnectionEnabled, DnsDomain: &settings.DNSDomain, AutoUpdateVersion: &settings.AutoUpdateVersion, diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index 732fd57e3..77d50d818 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -413,6 +413,22 @@ func (m *testServiceManager) GetServiceIDByTargetID(_ context.Context, _, _ stri return "", nil } +func (m *testServiceManager) ValidateExposePermission(_ context.Context, _, _ string) error { + return nil +} + +func (m *testServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return nil, nil +} + +func (m *testServiceManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *testServiceManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + func createTestState(t *testing.T, ps *nbgrpc.ProxyServiceServer, redirectURL string) string { t.Helper() diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index f5c2aafa6..fd2dc5848 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -94,7 +94,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee proxyTokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Minute) proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager) domainManager := manager.NewManager(store, proxyServiceServer, permissionsManager) - reverseProxyManager := reverseproxymanager.NewManager(store, am, permissionsManager, proxyServiceServer, domainManager) + reverseProxyManager := reverseproxymanager.NewManager(store, am, permissionsManager, settingsManager, proxyServiceServer, domainManager) proxyServiceServer.SetProxyManager(reverseProxyManager) am.SetServiceManager(reverseProxyManager) diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 032b1150f..ea848328f 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -407,7 +407,7 @@ func (am *MockAccountManager) AddPeer( // GetGroupByName mock implementation of GetGroupByName from server.AccountManager interface func (am *MockAccountManager) GetGroupByName(ctx context.Context, accountID, groupName string) (*types.Group, error) { - if am.GetGroupFunc != nil { + if am.GetGroupByNameFunc != nil { return am.GetGroupByNameFunc(ctx, accountID, groupName) } return nil, status.Errorf(codes.Unimplemented, "method GetGroupByName is not implemented") diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 70d501593..e5edbae34 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2114,7 +2114,8 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers s.Meta.CreatedAt = createdAt.Time } if certIssuedAt.Valid { - s.Meta.CertificateIssuedAt = certIssuedAt.Time + t := certIssuedAt.Time + s.Meta.CertificateIssuedAt = &t } if status.Valid { s.Meta.Status = status.String diff --git a/management/server/types/settings.go b/management/server/types/settings.go index a94e01b78..e165968fc 100644 --- a/management/server/types/settings.go +++ b/management/server/types/settings.go @@ -47,6 +47,11 @@ type Settings struct { // NetworkRange is the custom network range for that account NetworkRange netip.Prefix `gorm:"serializer:json"` + // PeerExposeEnabled enables or disables peer-initiated service expose + PeerExposeEnabled bool + // PeerExposeGroups list of peer group IDs allowed to expose services + PeerExposeGroups []string `gorm:"serializer:json"` + // Extra is a dictionary of Account settings Extra *ExtraSettings `gorm:"embedded;embeddedPrefix:extra_"` @@ -80,6 +85,8 @@ func (s *Settings) Copy() *Settings { PeerInactivityExpiration: s.PeerInactivityExpiration, RoutingPeerDNSResolutionEnabled: s.RoutingPeerDNSResolutionEnabled, + PeerExposeEnabled: s.PeerExposeEnabled, + PeerExposeGroups: slices.Clone(s.PeerExposeGroups), LazyConnectionEnabled: s.LazyConnectionEnabled, DNSDomain: s.DNSDomain, NetworkRange: s.NetworkRange, diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 420194c58..12cec89ff 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -247,6 +247,22 @@ func (m *storeBackedServiceManager) GetServiceIDByTargetID(ctx context.Context, return "", nil } +func (m *storeBackedServiceManager) ValidateExposePermission(_ context.Context, _, _ string) error { + return nil +} + +func (m *storeBackedServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { + return &reverseproxy.Service{}, nil +} + +func (m *storeBackedServiceManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *storeBackedServiceManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { + return nil +} + func strPtr(s string) *string { return &s } diff --git a/shared/management/client/client.go b/shared/management/client/client.go index b92c636c5..ba525602e 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -11,6 +11,7 @@ import ( "github.com/netbirdio/netbird/shared/management/proto" ) +// Client is the interface for the management service client. type Client interface { io.Closer Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error @@ -24,4 +25,7 @@ type Client interface { IsHealthy() bool SyncMeta(sysInfo *system.Info) error Logout() error + CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) + RenewExpose(ctx context.Context, domain string) error + StopExpose(ctx context.Context, domain string) error } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index d54c8f870..9505b3fdf 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -48,6 +48,22 @@ type GrpcClient struct { connStateCallbackLock sync.RWMutex } +type ExposeRequest struct { + NamePrefix string + Domain string + Port uint16 + Protocol int + Pin string + Password string + UserGroups []string +} + +type ExposeResponse struct { + ServiceName string + Domain string + ServiceURL string +} + // NewClient creates a new client to Management service func NewClient(ctx context.Context, addr string, ourPrivateKey wgtypes.Key, tlsEnabled bool) (*GrpcClient, error) { var conn *grpc.ClientConn @@ -690,6 +706,123 @@ func (c *GrpcClient) Logout() error { return nil } +// CreateExpose calls the management server to create a new expose service. +func (c *GrpcClient) CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) { + serverPubKey, err := c.GetServerPublicKey() + if err != nil { + return nil, err + } + + protoReq, err := toProtoExposeServiceRequest(req) + if err != nil { + return nil, err + } + + encReq, err := encryption.EncryptMessage(*serverPubKey, c.key, protoReq) + if err != nil { + return nil, fmt.Errorf("encrypt create expose request: %w", err) + } + + mgmCtx, cancel := context.WithTimeout(ctx, ConnectTimeout) + defer cancel() + + resp, err := c.realClient.CreateExpose(mgmCtx, &proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encReq, + }) + if err != nil { + return nil, err + } + + exposeResp := &proto.ExposeServiceResponse{} + if err := encryption.DecryptMessage(*serverPubKey, c.key, resp.Body, exposeResp); err != nil { + return nil, fmt.Errorf("decrypt create expose response: %w", err) + } + + return fromProtoExposeResponse(exposeResp), nil +} + +// RenewExpose extends the TTL of an active expose session on the management server. +func (c *GrpcClient) RenewExpose(ctx context.Context, domain string) error { + serverPubKey, err := c.GetServerPublicKey() + if err != nil { + return err + } + + req := &proto.RenewExposeRequest{Domain: domain} + encReq, err := encryption.EncryptMessage(*serverPubKey, c.key, req) + if err != nil { + return fmt.Errorf("encrypt renew expose request: %w", err) + } + + mgmCtx, cancel := context.WithTimeout(ctx, ConnectTimeout) + defer cancel() + + _, err = c.realClient.RenewExpose(mgmCtx, &proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encReq, + }) + return err +} + +// StopExpose terminates an active expose session on the management server. +func (c *GrpcClient) StopExpose(ctx context.Context, domain string) error { + serverPubKey, err := c.GetServerPublicKey() + if err != nil { + return err + } + + req := &proto.StopExposeRequest{Domain: domain} + encReq, err := encryption.EncryptMessage(*serverPubKey, c.key, req) + if err != nil { + return fmt.Errorf("encrypt stop expose request: %w", err) + } + + mgmCtx, cancel := context.WithTimeout(ctx, ConnectTimeout) + defer cancel() + + _, err = c.realClient.StopExpose(mgmCtx, &proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encReq, + }) + return err +} + +func fromProtoExposeResponse(resp *proto.ExposeServiceResponse) *ExposeResponse { + return &ExposeResponse{ + ServiceName: resp.ServiceName, + Domain: resp.Domain, + ServiceURL: resp.ServiceUrl, + } +} + +func toProtoExposeServiceRequest(req ExposeRequest) (*proto.ExposeServiceRequest, error) { + var protocol proto.ExposeProtocol + + switch req.Protocol { + case int(proto.ExposeProtocol_EXPOSE_HTTP): + protocol = proto.ExposeProtocol_EXPOSE_HTTP + case int(proto.ExposeProtocol_EXPOSE_HTTPS): + protocol = proto.ExposeProtocol_EXPOSE_HTTPS + case int(proto.ExposeProtocol_EXPOSE_TCP): + protocol = proto.ExposeProtocol_EXPOSE_TCP + case int(proto.ExposeProtocol_EXPOSE_UDP): + protocol = proto.ExposeProtocol_EXPOSE_UDP + default: + return nil, fmt.Errorf("invalid expose protocol: %d", req.Protocol) + } + + return &proto.ExposeServiceRequest{ + NamePrefix: req.NamePrefix, + Domain: req.Domain, + Port: uint32(req.Port), + Protocol: protocol, + Pin: req.Pin, + Password: req.Password, + UserGroups: req.UserGroups, + }, nil +} + func infoToMetaData(info *system.Info) *proto.PeerSystemMeta { if info == nil { return nil diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index ac96f7b36..57256d6d4 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -10,6 +10,7 @@ import ( "github.com/netbirdio/netbird/shared/management/proto" ) +// MockClient is a mock implementation of the Client interface for testing. type MockClient struct { CloseFunc func() error SyncFunc func(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error @@ -21,6 +22,9 @@ type MockClient struct { SyncMetaFunc func(sysInfo *system.Info) error LogoutFunc func() error JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error + CreateExposeFunc func(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) + RenewExposeFunc func(ctx context.Context, domain string) error + StopExposeFunc func(ctx context.Context, domain string) error } func (m *MockClient) IsHealthy() bool { @@ -80,10 +84,10 @@ func (m *MockClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKC if m.GetPKCEAuthorizationFlowFunc == nil { return nil, nil } - return m.GetPKCEAuthorizationFlow(serverKey) + return m.GetPKCEAuthorizationFlowFunc(serverKey) } -// GetNetworkMap mock implementation of GetNetworkMap from mgm.Client interface +// GetNetworkMap mock implementation of GetNetworkMap from Client interface. func (m *MockClient) GetNetworkMap(_ *system.Info) (*proto.NetworkMap, error) { return nil, nil } @@ -101,3 +105,24 @@ func (m *MockClient) Logout() error { } return m.LogoutFunc() } + +func (m *MockClient) CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) { + if m.CreateExposeFunc == nil { + return nil, nil + } + return m.CreateExposeFunc(ctx, req) +} + +func (m *MockClient) RenewExpose(ctx context.Context, domain string) error { + if m.RenewExposeFunc == nil { + return nil + } + return m.RenewExposeFunc(ctx, domain) +} + +func (m *MockClient) StopExpose(ctx context.Context, domain string) error { + if m.StopExposeFunc == nil { + return nil + } + return m.StopExposeFunc(ctx, domain) +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index b0ce1b5cc..2927d0319 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -326,6 +326,16 @@ components: type: string format: cidr example: 100.64.0.0/16 + peer_expose_enabled: + description: Enables or disables peer expose. If enabled, peers can expose local services through the reverse proxy using the CLI. + type: boolean + example: false + peer_expose_groups: + description: Limits which peer groups are allowed to expose services. If empty, all peers are allowed when peer expose is enabled. + type: array + items: + type: string + example: ch8i4ug6lnn4g9hqv7m0 extra: $ref: '#/components/schemas/AccountExtraSettings' lazy_connection_enabled: @@ -353,6 +363,8 @@ components: - peer_inactivity_expiration_enabled - peer_inactivity_expiration - regular_users_view_blocked + - peer_expose_enabled + - peer_expose_groups AccountExtraSettings: type: object properties: diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 7a7e75855..e53b876c2 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -512,6 +512,12 @@ type AccountSettings struct { // NetworkRange Allows to define a custom network range for the account in CIDR format NetworkRange *string `json:"network_range,omitempty"` + // PeerExposeEnabled Enables or disables peer expose. If enabled, peers can expose local services through the reverse proxy using the CLI. + PeerExposeEnabled bool `json:"peer_expose_enabled"` + + // PeerExposeGroups Limits which peer groups are allowed to expose services. If empty, all peers are allowed when peer expose is enabled. + PeerExposeGroups []string `json:"peer_expose_groups"` + // PeerInactivityExpiration Period of time of inactivity after which peer session expires (seconds). PeerInactivityExpiration int `json:"peer_inactivity_expiration"` diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 44838fc16..97a2a4d18 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.0 +// protoc v6.33.3 // source: management.proto package proto @@ -221,6 +221,58 @@ func (RuleAction) EnumDescriptor() ([]byte, []int) { return file_management_proto_rawDescGZIP(), []int{3} } +type ExposeProtocol int32 + +const ( + ExposeProtocol_EXPOSE_HTTP ExposeProtocol = 0 + ExposeProtocol_EXPOSE_HTTPS ExposeProtocol = 1 + ExposeProtocol_EXPOSE_TCP ExposeProtocol = 2 + ExposeProtocol_EXPOSE_UDP ExposeProtocol = 3 +) + +// Enum value maps for ExposeProtocol. +var ( + ExposeProtocol_name = map[int32]string{ + 0: "EXPOSE_HTTP", + 1: "EXPOSE_HTTPS", + 2: "EXPOSE_TCP", + 3: "EXPOSE_UDP", + } + ExposeProtocol_value = map[string]int32{ + "EXPOSE_HTTP": 0, + "EXPOSE_HTTPS": 1, + "EXPOSE_TCP": 2, + "EXPOSE_UDP": 3, + } +) + +func (x ExposeProtocol) Enum() *ExposeProtocol { + p := new(ExposeProtocol) + *p = x + return p +} + +func (x ExposeProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExposeProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[4].Descriptor() +} + +func (ExposeProtocol) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[4] +} + +func (x ExposeProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ExposeProtocol.Descriptor instead. +func (ExposeProtocol) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{4} +} + type HostConfig_Protocol int32 const ( @@ -260,11 +312,11 @@ func (x HostConfig_Protocol) String() string { } func (HostConfig_Protocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[4].Descriptor() + return file_management_proto_enumTypes[5].Descriptor() } func (HostConfig_Protocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[4] + return &file_management_proto_enumTypes[5] } func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { @@ -303,11 +355,11 @@ func (x DeviceAuthorizationFlowProvider) String() string { } func (DeviceAuthorizationFlowProvider) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[5].Descriptor() + return file_management_proto_enumTypes[6].Descriptor() } func (DeviceAuthorizationFlowProvider) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[5] + return &file_management_proto_enumTypes[6] } func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { @@ -3983,6 +4035,334 @@ func (x *ForwardingRule) GetTranslatedPort() *PortInfo { return nil } +type ExposeServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + Protocol ExposeProtocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=management.ExposeProtocol" json:"protocol,omitempty"` + Pin string `protobuf:"bytes,3,opt,name=pin,proto3" json:"pin,omitempty"` + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + UserGroups []string `protobuf:"bytes,5,rep,name=user_groups,json=userGroups,proto3" json:"user_groups,omitempty"` + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + NamePrefix string `protobuf:"bytes,7,opt,name=name_prefix,json=namePrefix,proto3" json:"name_prefix,omitempty"` +} + +func (x *ExposeServiceRequest) Reset() { + *x = ExposeServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExposeServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExposeServiceRequest) ProtoMessage() {} + +func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExposeServiceRequest.ProtoReflect.Descriptor instead. +func (*ExposeServiceRequest) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{47} +} + +func (x *ExposeServiceRequest) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *ExposeServiceRequest) GetProtocol() ExposeProtocol { + if x != nil { + return x.Protocol + } + return ExposeProtocol_EXPOSE_HTTP +} + +func (x *ExposeServiceRequest) GetPin() string { + if x != nil { + return x.Pin + } + return "" +} + +func (x *ExposeServiceRequest) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *ExposeServiceRequest) GetUserGroups() []string { + if x != nil { + return x.UserGroups + } + return nil +} + +func (x *ExposeServiceRequest) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ExposeServiceRequest) GetNamePrefix() string { + if x != nil { + return x.NamePrefix + } + return "" +} + +type ExposeServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` +} + +func (x *ExposeServiceResponse) Reset() { + *x = ExposeServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExposeServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExposeServiceResponse) ProtoMessage() {} + +func (x *ExposeServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExposeServiceResponse.ProtoReflect.Descriptor instead. +func (*ExposeServiceResponse) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{48} +} + +func (x *ExposeServiceResponse) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *ExposeServiceResponse) GetServiceUrl() string { + if x != nil { + return x.ServiceUrl + } + return "" +} + +func (x *ExposeServiceResponse) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +type RenewExposeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` +} + +func (x *RenewExposeRequest) Reset() { + *x = RenewExposeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RenewExposeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RenewExposeRequest) ProtoMessage() {} + +func (x *RenewExposeRequest) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RenewExposeRequest.ProtoReflect.Descriptor instead. +func (*RenewExposeRequest) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{49} +} + +func (x *RenewExposeRequest) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +type RenewExposeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RenewExposeResponse) Reset() { + *x = RenewExposeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RenewExposeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RenewExposeResponse) ProtoMessage() {} + +func (x *RenewExposeResponse) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RenewExposeResponse.ProtoReflect.Descriptor instead. +func (*RenewExposeResponse) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{50} +} + +type StopExposeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` +} + +func (x *StopExposeRequest) Reset() { + *x = StopExposeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopExposeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopExposeRequest) ProtoMessage() {} + +func (x *StopExposeRequest) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopExposeRequest.ProtoReflect.Descriptor instead. +func (*StopExposeRequest) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{51} +} + +func (x *StopExposeRequest) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +type StopExposeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopExposeResponse) Reset() { + *x = StopExposeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopExposeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopExposeResponse) ProtoMessage() {} + +func (x *StopExposeResponse) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopExposeResponse.ProtoReflect.Descriptor instead. +func (*StopExposeResponse) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{52} +} + type PortInfo_Range struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3995,7 +4375,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[48] + mi := &file_management_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4008,7 +4388,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[48] + mi := &file_management_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4616,62 +4996,113 @@ var file_management_proto_rawDesc = []byte{ 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, - 0x74, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, - 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, - 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, - 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, - 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, - 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, - 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, - 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, - 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, - 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, - 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, - 0x01, 0x32, 0x96, 0x05, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, - 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, - 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x74, 0x22, 0xea, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, + 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x73, + 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, + 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x70, + 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, + 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, + 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, + 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, + 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, + 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, 0x53, 0x0a, 0x0e, 0x45, + 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0f, 0x0a, + 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x10, 0x00, 0x12, 0x10, + 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x01, + 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x43, 0x50, 0x10, 0x02, + 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x44, 0x50, 0x10, 0x03, + 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, + 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, + 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, + 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, + 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, + 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, + 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, - 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, + 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, - 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, - 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, - 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0c, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x65, 0x6e, 0x65, 0x77, + 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, + 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, + 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -4686,152 +5117,166 @@ func file_management_proto_rawDescGZIP() []byte { return file_management_proto_rawDescData } -var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 49) +var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 7) +var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 55) var file_management_proto_goTypes = []interface{}{ (JobStatus)(0), // 0: management.JobStatus (RuleProtocol)(0), // 1: management.RuleProtocol (RuleDirection)(0), // 2: management.RuleDirection (RuleAction)(0), // 3: management.RuleAction - (HostConfig_Protocol)(0), // 4: management.HostConfig.Protocol - (DeviceAuthorizationFlowProvider)(0), // 5: management.DeviceAuthorizationFlow.provider - (*EncryptedMessage)(nil), // 6: management.EncryptedMessage - (*JobRequest)(nil), // 7: management.JobRequest - (*JobResponse)(nil), // 8: management.JobResponse - (*BundleParameters)(nil), // 9: management.BundleParameters - (*BundleResult)(nil), // 10: management.BundleResult - (*SyncRequest)(nil), // 11: management.SyncRequest - (*SyncResponse)(nil), // 12: management.SyncResponse - (*SyncMetaRequest)(nil), // 13: management.SyncMetaRequest - (*LoginRequest)(nil), // 14: management.LoginRequest - (*PeerKeys)(nil), // 15: management.PeerKeys - (*Environment)(nil), // 16: management.Environment - (*File)(nil), // 17: management.File - (*Flags)(nil), // 18: management.Flags - (*PeerSystemMeta)(nil), // 19: management.PeerSystemMeta - (*LoginResponse)(nil), // 20: management.LoginResponse - (*ServerKeyResponse)(nil), // 21: management.ServerKeyResponse - (*Empty)(nil), // 22: management.Empty - (*NetbirdConfig)(nil), // 23: management.NetbirdConfig - (*HostConfig)(nil), // 24: management.HostConfig - (*RelayConfig)(nil), // 25: management.RelayConfig - (*FlowConfig)(nil), // 26: management.FlowConfig - (*JWTConfig)(nil), // 27: management.JWTConfig - (*ProtectedHostConfig)(nil), // 28: management.ProtectedHostConfig - (*PeerConfig)(nil), // 29: management.PeerConfig - (*AutoUpdateSettings)(nil), // 30: management.AutoUpdateSettings - (*NetworkMap)(nil), // 31: management.NetworkMap - (*SSHAuth)(nil), // 32: management.SSHAuth - (*MachineUserIndexes)(nil), // 33: management.MachineUserIndexes - (*RemotePeerConfig)(nil), // 34: management.RemotePeerConfig - (*SSHConfig)(nil), // 35: management.SSHConfig - (*DeviceAuthorizationFlowRequest)(nil), // 36: management.DeviceAuthorizationFlowRequest - (*DeviceAuthorizationFlow)(nil), // 37: management.DeviceAuthorizationFlow - (*PKCEAuthorizationFlowRequest)(nil), // 38: management.PKCEAuthorizationFlowRequest - (*PKCEAuthorizationFlow)(nil), // 39: management.PKCEAuthorizationFlow - (*ProviderConfig)(nil), // 40: management.ProviderConfig - (*Route)(nil), // 41: management.Route - (*DNSConfig)(nil), // 42: management.DNSConfig - (*CustomZone)(nil), // 43: management.CustomZone - (*SimpleRecord)(nil), // 44: management.SimpleRecord - (*NameServerGroup)(nil), // 45: management.NameServerGroup - (*NameServer)(nil), // 46: management.NameServer - (*FirewallRule)(nil), // 47: management.FirewallRule - (*NetworkAddress)(nil), // 48: management.NetworkAddress - (*Checks)(nil), // 49: management.Checks - (*PortInfo)(nil), // 50: management.PortInfo - (*RouteFirewallRule)(nil), // 51: management.RouteFirewallRule - (*ForwardingRule)(nil), // 52: management.ForwardingRule - nil, // 53: management.SSHAuth.MachineUsersEntry - (*PortInfo_Range)(nil), // 54: management.PortInfo.Range - (*timestamppb.Timestamp)(nil), // 55: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 56: google.protobuf.Duration + (ExposeProtocol)(0), // 4: management.ExposeProtocol + (HostConfig_Protocol)(0), // 5: management.HostConfig.Protocol + (DeviceAuthorizationFlowProvider)(0), // 6: management.DeviceAuthorizationFlow.provider + (*EncryptedMessage)(nil), // 7: management.EncryptedMessage + (*JobRequest)(nil), // 8: management.JobRequest + (*JobResponse)(nil), // 9: management.JobResponse + (*BundleParameters)(nil), // 10: management.BundleParameters + (*BundleResult)(nil), // 11: management.BundleResult + (*SyncRequest)(nil), // 12: management.SyncRequest + (*SyncResponse)(nil), // 13: management.SyncResponse + (*SyncMetaRequest)(nil), // 14: management.SyncMetaRequest + (*LoginRequest)(nil), // 15: management.LoginRequest + (*PeerKeys)(nil), // 16: management.PeerKeys + (*Environment)(nil), // 17: management.Environment + (*File)(nil), // 18: management.File + (*Flags)(nil), // 19: management.Flags + (*PeerSystemMeta)(nil), // 20: management.PeerSystemMeta + (*LoginResponse)(nil), // 21: management.LoginResponse + (*ServerKeyResponse)(nil), // 22: management.ServerKeyResponse + (*Empty)(nil), // 23: management.Empty + (*NetbirdConfig)(nil), // 24: management.NetbirdConfig + (*HostConfig)(nil), // 25: management.HostConfig + (*RelayConfig)(nil), // 26: management.RelayConfig + (*FlowConfig)(nil), // 27: management.FlowConfig + (*JWTConfig)(nil), // 28: management.JWTConfig + (*ProtectedHostConfig)(nil), // 29: management.ProtectedHostConfig + (*PeerConfig)(nil), // 30: management.PeerConfig + (*AutoUpdateSettings)(nil), // 31: management.AutoUpdateSettings + (*NetworkMap)(nil), // 32: management.NetworkMap + (*SSHAuth)(nil), // 33: management.SSHAuth + (*MachineUserIndexes)(nil), // 34: management.MachineUserIndexes + (*RemotePeerConfig)(nil), // 35: management.RemotePeerConfig + (*SSHConfig)(nil), // 36: management.SSHConfig + (*DeviceAuthorizationFlowRequest)(nil), // 37: management.DeviceAuthorizationFlowRequest + (*DeviceAuthorizationFlow)(nil), // 38: management.DeviceAuthorizationFlow + (*PKCEAuthorizationFlowRequest)(nil), // 39: management.PKCEAuthorizationFlowRequest + (*PKCEAuthorizationFlow)(nil), // 40: management.PKCEAuthorizationFlow + (*ProviderConfig)(nil), // 41: management.ProviderConfig + (*Route)(nil), // 42: management.Route + (*DNSConfig)(nil), // 43: management.DNSConfig + (*CustomZone)(nil), // 44: management.CustomZone + (*SimpleRecord)(nil), // 45: management.SimpleRecord + (*NameServerGroup)(nil), // 46: management.NameServerGroup + (*NameServer)(nil), // 47: management.NameServer + (*FirewallRule)(nil), // 48: management.FirewallRule + (*NetworkAddress)(nil), // 49: management.NetworkAddress + (*Checks)(nil), // 50: management.Checks + (*PortInfo)(nil), // 51: management.PortInfo + (*RouteFirewallRule)(nil), // 52: management.RouteFirewallRule + (*ForwardingRule)(nil), // 53: management.ForwardingRule + (*ExposeServiceRequest)(nil), // 54: management.ExposeServiceRequest + (*ExposeServiceResponse)(nil), // 55: management.ExposeServiceResponse + (*RenewExposeRequest)(nil), // 56: management.RenewExposeRequest + (*RenewExposeResponse)(nil), // 57: management.RenewExposeResponse + (*StopExposeRequest)(nil), // 58: management.StopExposeRequest + (*StopExposeResponse)(nil), // 59: management.StopExposeResponse + nil, // 60: management.SSHAuth.MachineUsersEntry + (*PortInfo_Range)(nil), // 61: management.PortInfo.Range + (*timestamppb.Timestamp)(nil), // 62: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 63: google.protobuf.Duration } var file_management_proto_depIdxs = []int32{ - 9, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters + 10, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters 0, // 1: management.JobResponse.status:type_name -> management.JobStatus - 10, // 2: management.JobResponse.bundle:type_name -> management.BundleResult - 19, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta - 23, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig - 29, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig - 34, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig - 31, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap - 49, // 8: management.SyncResponse.Checks:type_name -> management.Checks - 19, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta - 19, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta - 15, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys - 48, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress - 16, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment - 17, // 14: management.PeerSystemMeta.files:type_name -> management.File - 18, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags - 23, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig - 29, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig - 49, // 18: management.LoginResponse.Checks:type_name -> management.Checks - 55, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp - 24, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig - 28, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig - 24, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig - 25, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig - 26, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig - 4, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol - 56, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration - 24, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig - 35, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig - 30, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings - 29, // 30: management.NetworkMap.peerConfig:type_name -> management.PeerConfig - 34, // 31: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig - 41, // 32: management.NetworkMap.Routes:type_name -> management.Route - 42, // 33: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig - 34, // 34: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig - 47, // 35: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule - 51, // 36: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule - 52, // 37: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule - 32, // 38: management.NetworkMap.sshAuth:type_name -> management.SSHAuth - 53, // 39: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry - 35, // 40: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig - 27, // 41: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig - 5, // 42: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider - 40, // 43: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 40, // 44: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 45, // 45: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup - 43, // 46: management.DNSConfig.CustomZones:type_name -> management.CustomZone - 44, // 47: management.CustomZone.Records:type_name -> management.SimpleRecord - 46, // 48: management.NameServerGroup.NameServers:type_name -> management.NameServer + 11, // 2: management.JobResponse.bundle:type_name -> management.BundleResult + 20, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta + 24, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig + 30, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig + 35, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig + 32, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap + 50, // 8: management.SyncResponse.Checks:type_name -> management.Checks + 20, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta + 20, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta + 16, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys + 49, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress + 17, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment + 18, // 14: management.PeerSystemMeta.files:type_name -> management.File + 19, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags + 24, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig + 30, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig + 50, // 18: management.LoginResponse.Checks:type_name -> management.Checks + 62, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp + 25, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig + 29, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig + 25, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig + 26, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig + 27, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig + 5, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol + 63, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration + 25, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig + 36, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig + 31, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings + 30, // 30: management.NetworkMap.peerConfig:type_name -> management.PeerConfig + 35, // 31: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig + 42, // 32: management.NetworkMap.Routes:type_name -> management.Route + 43, // 33: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig + 35, // 34: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig + 48, // 35: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule + 52, // 36: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule + 53, // 37: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule + 33, // 38: management.NetworkMap.sshAuth:type_name -> management.SSHAuth + 60, // 39: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry + 36, // 40: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig + 28, // 41: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig + 6, // 42: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider + 41, // 43: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 41, // 44: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 46, // 45: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup + 44, // 46: management.DNSConfig.CustomZones:type_name -> management.CustomZone + 45, // 47: management.CustomZone.Records:type_name -> management.SimpleRecord + 47, // 48: management.NameServerGroup.NameServers:type_name -> management.NameServer 2, // 49: management.FirewallRule.Direction:type_name -> management.RuleDirection 3, // 50: management.FirewallRule.Action:type_name -> management.RuleAction 1, // 51: management.FirewallRule.Protocol:type_name -> management.RuleProtocol - 50, // 52: management.FirewallRule.PortInfo:type_name -> management.PortInfo - 54, // 53: management.PortInfo.range:type_name -> management.PortInfo.Range + 51, // 52: management.FirewallRule.PortInfo:type_name -> management.PortInfo + 61, // 53: management.PortInfo.range:type_name -> management.PortInfo.Range 3, // 54: management.RouteFirewallRule.action:type_name -> management.RuleAction 1, // 55: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol - 50, // 56: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo + 51, // 56: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo 1, // 57: management.ForwardingRule.protocol:type_name -> management.RuleProtocol - 50, // 58: management.ForwardingRule.destinationPort:type_name -> management.PortInfo - 50, // 59: management.ForwardingRule.translatedPort:type_name -> management.PortInfo - 33, // 60: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes - 6, // 61: management.ManagementService.Login:input_type -> management.EncryptedMessage - 6, // 62: management.ManagementService.Sync:input_type -> management.EncryptedMessage - 22, // 63: management.ManagementService.GetServerKey:input_type -> management.Empty - 22, // 64: management.ManagementService.isHealthy:input_type -> management.Empty - 6, // 65: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage - 6, // 66: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage - 6, // 67: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage - 6, // 68: management.ManagementService.Logout:input_type -> management.EncryptedMessage - 6, // 69: management.ManagementService.Job:input_type -> management.EncryptedMessage - 6, // 70: management.ManagementService.Login:output_type -> management.EncryptedMessage - 6, // 71: management.ManagementService.Sync:output_type -> management.EncryptedMessage - 21, // 72: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse - 22, // 73: management.ManagementService.isHealthy:output_type -> management.Empty - 6, // 74: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage - 6, // 75: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage - 22, // 76: management.ManagementService.SyncMeta:output_type -> management.Empty - 22, // 77: management.ManagementService.Logout:output_type -> management.Empty - 6, // 78: management.ManagementService.Job:output_type -> management.EncryptedMessage - 70, // [70:79] is the sub-list for method output_type - 61, // [61:70] is the sub-list for method input_type - 61, // [61:61] is the sub-list for extension type_name - 61, // [61:61] is the sub-list for extension extendee - 0, // [0:61] is the sub-list for field type_name + 51, // 58: management.ForwardingRule.destinationPort:type_name -> management.PortInfo + 51, // 59: management.ForwardingRule.translatedPort:type_name -> management.PortInfo + 4, // 60: management.ExposeServiceRequest.protocol:type_name -> management.ExposeProtocol + 34, // 61: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes + 7, // 62: management.ManagementService.Login:input_type -> management.EncryptedMessage + 7, // 63: management.ManagementService.Sync:input_type -> management.EncryptedMessage + 23, // 64: management.ManagementService.GetServerKey:input_type -> management.Empty + 23, // 65: management.ManagementService.isHealthy:input_type -> management.Empty + 7, // 66: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage + 7, // 67: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage + 7, // 68: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage + 7, // 69: management.ManagementService.Logout:input_type -> management.EncryptedMessage + 7, // 70: management.ManagementService.Job:input_type -> management.EncryptedMessage + 7, // 71: management.ManagementService.CreateExpose:input_type -> management.EncryptedMessage + 7, // 72: management.ManagementService.RenewExpose:input_type -> management.EncryptedMessage + 7, // 73: management.ManagementService.StopExpose:input_type -> management.EncryptedMessage + 7, // 74: management.ManagementService.Login:output_type -> management.EncryptedMessage + 7, // 75: management.ManagementService.Sync:output_type -> management.EncryptedMessage + 22, // 76: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse + 23, // 77: management.ManagementService.isHealthy:output_type -> management.Empty + 7, // 78: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage + 7, // 79: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage + 23, // 80: management.ManagementService.SyncMeta:output_type -> management.Empty + 23, // 81: management.ManagementService.Logout:output_type -> management.Empty + 7, // 82: management.ManagementService.Job:output_type -> management.EncryptedMessage + 7, // 83: management.ManagementService.CreateExpose:output_type -> management.EncryptedMessage + 7, // 84: management.ManagementService.RenewExpose:output_type -> management.EncryptedMessage + 7, // 85: management.ManagementService.StopExpose:output_type -> management.EncryptedMessage + 74, // [74:86] is the sub-list for method output_type + 62, // [62:74] is the sub-list for method input_type + 62, // [62:62] is the sub-list for extension type_name + 62, // [62:62] is the sub-list for extension extendee + 0, // [0:62] is the sub-list for field type_name } func init() { file_management_proto_init() } @@ -5404,7 +5849,79 @@ func file_management_proto_init() { return nil } } + file_management_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExposeServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_management_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExposeServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RenewExposeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RenewExposeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopExposeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopExposeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PortInfo_Range); i { case 0: return &v.state @@ -5432,8 +5949,8 @@ func file_management_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_management_proto_rawDesc, - NumEnums: 6, - NumMessages: 49, + NumEnums: 7, + NumMessages: 55, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index d97d66819..3667ae27f 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -51,6 +51,15 @@ service ManagementService { // Executes a job on a target peer (e.g., debug bundle) rpc Job(stream EncryptedMessage) returns (stream EncryptedMessage) {} + + // CreateExpose creates a temporary reverse proxy service for a peer + rpc CreateExpose(EncryptedMessage) returns (EncryptedMessage) {} + + // RenewExpose extends the TTL of an active expose session + rpc RenewExpose(EncryptedMessage) returns (EncryptedMessage) {} + + // StopExpose terminates an active expose session + rpc StopExpose(EncryptedMessage) returns (EncryptedMessage) {} } message EncryptedMessage { @@ -637,3 +646,38 @@ message ForwardingRule { // Translated port information, where the traffic should be forwarded to PortInfo translatedPort = 4; } + +enum ExposeProtocol { + EXPOSE_HTTP = 0; + EXPOSE_HTTPS = 1; + EXPOSE_TCP = 2; + EXPOSE_UDP = 3; +} + +message ExposeServiceRequest { + uint32 port = 1; + ExposeProtocol protocol = 2; + string pin = 3; + string password = 4; + repeated string user_groups = 5; + string domain = 6; + string name_prefix = 7; +} + +message ExposeServiceResponse { + string service_name = 1; + string service_url = 2; + string domain = 3; +} + +message RenewExposeRequest { + string domain = 1; +} + +message RenewExposeResponse {} + +message StopExposeRequest { + string domain = 1; +} + +message StopExposeResponse {} diff --git a/shared/management/proto/management_grpc.pb.go b/shared/management/proto/management_grpc.pb.go index b78e21aaa..39a342041 100644 --- a/shared/management/proto/management_grpc.pb.go +++ b/shared/management/proto/management_grpc.pb.go @@ -52,6 +52,12 @@ type ManagementServiceClient interface { Logout(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) // Executes a job on a target peer (e.g., debug bundle) Job(ctx context.Context, opts ...grpc.CallOption) (ManagementService_JobClient, error) + // CreateExpose creates a temporary reverse proxy service for a peer + CreateExpose(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*EncryptedMessage, error) + // RenewExpose extends the TTL of an active expose session + RenewExpose(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*EncryptedMessage, error) + // StopExpose terminates an active expose session + StopExpose(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*EncryptedMessage, error) } type managementServiceClient struct { @@ -188,6 +194,33 @@ func (x *managementServiceJobClient) Recv() (*EncryptedMessage, error) { return m, nil } +func (c *managementServiceClient) CreateExpose(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*EncryptedMessage, error) { + out := new(EncryptedMessage) + err := c.cc.Invoke(ctx, "/management.ManagementService/CreateExpose", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementServiceClient) RenewExpose(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*EncryptedMessage, error) { + out := new(EncryptedMessage) + err := c.cc.Invoke(ctx, "/management.ManagementService/RenewExpose", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementServiceClient) StopExpose(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*EncryptedMessage, error) { + out := new(EncryptedMessage) + err := c.cc.Invoke(ctx, "/management.ManagementService/StopExpose", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ManagementServiceServer is the server API for ManagementService service. // All implementations must embed UnimplementedManagementServiceServer // for forward compatibility @@ -226,6 +259,12 @@ type ManagementServiceServer interface { Logout(context.Context, *EncryptedMessage) (*Empty, error) // Executes a job on a target peer (e.g., debug bundle) Job(ManagementService_JobServer) error + // CreateExpose creates a temporary reverse proxy service for a peer + CreateExpose(context.Context, *EncryptedMessage) (*EncryptedMessage, error) + // RenewExpose extends the TTL of an active expose session + RenewExpose(context.Context, *EncryptedMessage) (*EncryptedMessage, error) + // StopExpose terminates an active expose session + StopExpose(context.Context, *EncryptedMessage) (*EncryptedMessage, error) mustEmbedUnimplementedManagementServiceServer() } @@ -260,6 +299,15 @@ func (UnimplementedManagementServiceServer) Logout(context.Context, *EncryptedMe func (UnimplementedManagementServiceServer) Job(ManagementService_JobServer) error { return status.Errorf(codes.Unimplemented, "method Job not implemented") } +func (UnimplementedManagementServiceServer) CreateExpose(context.Context, *EncryptedMessage) (*EncryptedMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateExpose not implemented") +} +func (UnimplementedManagementServiceServer) RenewExpose(context.Context, *EncryptedMessage) (*EncryptedMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method RenewExpose not implemented") +} +func (UnimplementedManagementServiceServer) StopExpose(context.Context, *EncryptedMessage) (*EncryptedMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopExpose not implemented") +} func (UnimplementedManagementServiceServer) mustEmbedUnimplementedManagementServiceServer() {} // UnsafeManagementServiceServer may be embedded to opt out of forward compatibility for this service. @@ -446,6 +494,60 @@ func (x *managementServiceJobServer) Recv() (*EncryptedMessage, error) { return m, nil } +func _ManagementService_CreateExpose_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptedMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServiceServer).CreateExpose(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ManagementService/CreateExpose", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServiceServer).CreateExpose(ctx, req.(*EncryptedMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _ManagementService_RenewExpose_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptedMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServiceServer).RenewExpose(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ManagementService/RenewExpose", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServiceServer).RenewExpose(ctx, req.(*EncryptedMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _ManagementService_StopExpose_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptedMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServiceServer).StopExpose(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.ManagementService/StopExpose", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServiceServer).StopExpose(ctx, req.(*EncryptedMessage)) + } + return interceptor(ctx, in, info, handler) +} + // ManagementService_ServiceDesc is the grpc.ServiceDesc for ManagementService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -481,6 +583,18 @@ var ManagementService_ServiceDesc = grpc.ServiceDesc{ MethodName: "Logout", Handler: _ManagementService_Logout_Handler, }, + { + MethodName: "CreateExpose", + Handler: _ManagementService_CreateExpose_Handler, + }, + { + MethodName: "RenewExpose", + Handler: _ManagementService_RenewExpose_Handler, + }, + { + MethodName: "StopExpose", + Handler: _ManagementService_StopExpose_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index 13fcb159e..c89157eb5 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.0 +// protoc v6.33.3 // source: proxy_service.proto package proto From 89115ff76a8cc99f12542e0d4ec9e9bc9ece69cf Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 24 Feb 2026 10:35:23 +0100 Subject: [PATCH 163/374] [client] skip UAPI listener in netstack mode (#5397) In netstack (proxy) mode, the process lacks permission to create /var/run/wireguard, making the UAPI listener unnecessary and causing a misleading error log. Introduce NewUSPConfigurerNoUAPI and use it for the netstack device to avoid attempting to open the UAPI socket entirely. Also consolidate UAPI error logging to a single call site. --- client/iface/configurer/uapi.go | 4 +--- client/iface/configurer/usp.go | 8 ++++++++ client/iface/device/device_netstack.go | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/client/iface/configurer/uapi.go b/client/iface/configurer/uapi.go index f85c7852a..d9bd9bfab 100644 --- a/client/iface/configurer/uapi.go +++ b/client/iface/configurer/uapi.go @@ -5,20 +5,18 @@ package configurer import ( "net" - log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/ipc" ) func openUAPI(deviceName string) (net.Listener, error) { uapiSock, err := ipc.UAPIOpen(deviceName) if err != nil { - log.Errorf("failed to open uapi socket: %v", err) return nil, err } listener, err := ipc.UAPIListen(deviceName, uapiSock) if err != nil { - log.Errorf("failed to listen on uapi socket: %v", err) + _ = uapiSock.Close() return nil, err } diff --git a/client/iface/configurer/usp.go b/client/iface/configurer/usp.go index 1298c609d..e3a96590c 100644 --- a/client/iface/configurer/usp.go +++ b/client/iface/configurer/usp.go @@ -54,6 +54,14 @@ func NewUSPConfigurer(device *device.Device, deviceName string, activityRecorder return wgCfg } +func NewUSPConfigurerNoUAPI(device *device.Device, deviceName string, activityRecorder *bind.ActivityRecorder) *WGUSPConfigurer { + return &WGUSPConfigurer{ + device: device, + deviceName: deviceName, + activityRecorder: activityRecorder, + } +} + func (c *WGUSPConfigurer) ConfigureInterface(privateKey string, port int) error { log.Debugf("adding Wireguard private key") key, err := wgtypes.ParseKey(privateKey) diff --git a/client/iface/device/device_netstack.go b/client/iface/device/device_netstack.go index e457657f7..1a92b148f 100644 --- a/client/iface/device/device_netstack.go +++ b/client/iface/device/device_netstack.go @@ -79,7 +79,7 @@ func (t *TunNetstackDevice) create() (WGConfigurer, error) { device.NewLogger(wgLogLevel(), "[netbird] "), ) - t.configurer = configurer.NewUSPConfigurer(t.device, t.name, t.bind.ActivityRecorder()) + t.configurer = configurer.NewUSPConfigurerNoUAPI(t.device, t.name, t.bind.ActivityRecorder()) err = t.configurer.ConfigureInterface(t.key, t.port) if err != nil { if cErr := tunIface.Close(); cErr != nil { From f8c0321aeee024ffc73848e09988eb46468baff9 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 24 Feb 2026 10:35:45 +0100 Subject: [PATCH 164/374] [client] Simplify DNS logging by removing domain list from log output (#5396) --- client/internal/dns/host_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/internal/dns/host_windows.go b/client/internal/dns/host_windows.go index 9b7a7b52b..4a8cf8cec 100644 --- a/client/internal/dns/host_windows.go +++ b/client/internal/dns/host_windows.go @@ -277,7 +277,7 @@ func (r *registryConfigurator) addDNSMatchPolicy(domains []string, ip netip.Addr } } - log.Infof("added %d NRPT rules for %d domains. Domain list: %v", ruleIndex, len(domains), domains) + log.Infof("added %d NRPT rules for %d domains", ruleIndex, len(domains)) return ruleIndex, nil } From 327142837c0efaacd2026f83a0749b2d16bb86f4 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 24 Feb 2026 15:09:30 +0100 Subject: [PATCH 165/374] [management] Refactor expose feature: move business logic from gRPC to manager (#5435) Consolidate all expose business logic (validation, permission checks, TTL tracking, reaping) into the manager layer, making the gRPC layer a pure transport adapter that only handles proto conversion and authentication. - Add ExposeServiceRequest/ExposeServiceResponse domain types with validation in the reverseproxy package - Move expose tracker (TTL tracking, reaping, per-peer limits) from gRPC server into manager/expose_tracker.go - Internalize tracking in CreateServiceFromPeer, RenewServiceFromPeer, and new StopServiceFromPeer so callers don't manage tracker state - Untrack ephemeral services in DeleteService/DeleteAllServices to keep tracker in sync when services are deleted via API - Simplify gRPC expose handlers to parse, auth, convert, delegate - Remove tracker methods from Manager interface (internal detail) --- client/internal/expose/manager.go | 2 +- .../modules/reverseproxy/interface.go | 8 +- .../modules/reverseproxy/interface_mock.go | 112 +++--- .../reverseproxy/manager/expose_tracker.go | 163 +++++++++ .../manager/expose_tracker_test.go | 256 +++++++++++++ .../modules/reverseproxy/manager/manager.go | 126 ++++++- .../reverseproxy/manager/manager_test.go | 340 ++++++++++++++---- .../modules/reverseproxy/reverseproxy.go | 154 +++++--- .../modules/reverseproxy/reverseproxy_test.go | 28 +- management/internals/server/boot.go | 7 +- .../internals/shared/grpc/expose_service.go | 229 ++++-------- .../shared/grpc/expose_service_test.go | 242 ------------- .../shared/grpc/proxy_group_access_test.go | 16 +- management/internals/shared/grpc/server.go | 2 - .../shared/grpc/validate_session_test.go | 18 +- .../proxy/auth_callback_integration_test.go | 12 +- proxy/management_integration_test.go | 16 +- 17 files changed, 1072 insertions(+), 659 deletions(-) create mode 100644 management/internals/modules/reverseproxy/manager/expose_tracker.go create mode 100644 management/internals/modules/reverseproxy/manager/expose_tracker_test.go delete mode 100644 management/internals/shared/grpc/expose_service_test.go diff --git a/client/internal/expose/manager.go b/client/internal/expose/manager.go index ba6aa6dc9..8cd93685e 100644 --- a/client/internal/expose/manager.go +++ b/client/internal/expose/manager.go @@ -58,7 +58,7 @@ func (m *Manager) Expose(ctx context.Context, req Request) (*Response, error) { } func (m *Manager) KeepAlive(ctx context.Context, domain string) error { - ticker := time.NewTicker(10 * time.Second) + ticker := time.NewTicker(30 * time.Second) defer ticker.Stop() defer m.stop(domain) diff --git a/management/internals/modules/reverseproxy/interface.go b/management/internals/modules/reverseproxy/interface.go index 95402bdf7..e7a21a24c 100644 --- a/management/internals/modules/reverseproxy/interface.go +++ b/management/internals/modules/reverseproxy/interface.go @@ -21,8 +21,8 @@ type Manager interface { GetServiceByID(ctx context.Context, accountID, serviceID string) (*Service, error) GetAccountServices(ctx context.Context, accountID string) ([]*Service, error) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) - ValidateExposePermission(ctx context.Context, accountID, peerID string) error - CreateServiceFromPeer(ctx context.Context, accountID, peerID string, service *Service) (*Service, error) - DeleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error - ExpireServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error + CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *ExposeServiceRequest) (*ExposeServiceResponse, error) + RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error + StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error + StartExposeReaper(ctx context.Context) } diff --git a/management/internals/modules/reverseproxy/interface_mock.go b/management/internals/modules/reverseproxy/interface_mock.go index 19a4ecfe5..893025195 100644 --- a/management/internals/modules/reverseproxy/interface_mock.go +++ b/management/internals/modules/reverseproxy/interface_mock.go @@ -49,6 +49,21 @@ func (mr *MockManagerMockRecorder) CreateService(ctx, accountID, userID, service return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateService", reflect.TypeOf((*MockManager)(nil).CreateService), ctx, accountID, userID, service) } +// CreateServiceFromPeer mocks base method. +func (m *MockManager) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *ExposeServiceRequest) (*ExposeServiceResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateServiceFromPeer", ctx, accountID, peerID, req) + ret0, _ := ret[0].(*ExposeServiceResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateServiceFromPeer indicates an expected call of CreateServiceFromPeer. +func (mr *MockManagerMockRecorder) CreateServiceFromPeer(ctx, accountID, peerID, req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceFromPeer", reflect.TypeOf((*MockManager)(nil).CreateServiceFromPeer), ctx, accountID, peerID, req) +} + // DeleteAllServices mocks base method. func (m *MockManager) DeleteAllServices(ctx context.Context, accountID, userID string) error { m.ctrl.T.Helper() @@ -63,21 +78,6 @@ func (mr *MockManagerMockRecorder) DeleteAllServices(ctx, accountID, userID inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllServices", reflect.TypeOf((*MockManager)(nil).DeleteAllServices), ctx, accountID, userID) } -// CreateServiceFromPeer mocks base method. -func (m *MockManager) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, service *Service) (*Service, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateServiceFromPeer", ctx, accountID, peerID, service) - ret0, _ := ret[0].(*Service) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateServiceFromPeer indicates an expected call of CreateServiceFromPeer. -func (mr *MockManagerMockRecorder) CreateServiceFromPeer(ctx, accountID, peerID, service interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceFromPeer", reflect.TypeOf((*MockManager)(nil).CreateServiceFromPeer), ctx, accountID, peerID, service) -} - // DeleteService mocks base method. func (m *MockManager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { m.ctrl.T.Helper() @@ -92,48 +92,6 @@ func (mr *MockManagerMockRecorder) DeleteService(ctx, accountID, userID, service return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockManager)(nil).DeleteService), ctx, accountID, userID, serviceID) } -// DeleteServiceFromPeer mocks base method. -func (m *MockManager) DeleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteServiceFromPeer", ctx, accountID, peerID, serviceID) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteServiceFromPeer indicates an expected call of DeleteServiceFromPeer. -func (mr *MockManagerMockRecorder) DeleteServiceFromPeer(ctx, accountID, peerID, serviceID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceFromPeer", reflect.TypeOf((*MockManager)(nil).DeleteServiceFromPeer), ctx, accountID, peerID, serviceID) -} - -// ExpireServiceFromPeer mocks base method. -func (m *MockManager) ExpireServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExpireServiceFromPeer", ctx, accountID, peerID, serviceID) - ret0, _ := ret[0].(error) - return ret0 -} - -// ExpireServiceFromPeer indicates an expected call of ExpireServiceFromPeer. -func (mr *MockManagerMockRecorder) ExpireServiceFromPeer(ctx, accountID, peerID, serviceID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpireServiceFromPeer", reflect.TypeOf((*MockManager)(nil).ExpireServiceFromPeer), ctx, accountID, peerID, serviceID) -} - -// ValidateExposePermission mocks base method. -func (m *MockManager) ValidateExposePermission(ctx context.Context, accountID, peerID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateExposePermission", ctx, accountID, peerID) - ret0, _ := ret[0].(error) - return ret0 -} - -// ValidateExposePermission indicates an expected call of ValidateExposePermission. -func (mr *MockManagerMockRecorder) ValidateExposePermission(ctx, accountID, peerID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateExposePermission", reflect.TypeOf((*MockManager)(nil).ValidateExposePermission), ctx, accountID, peerID) -} - // GetAccountServices mocks base method. func (m *MockManager) GetAccountServices(ctx context.Context, accountID string) ([]*Service, error) { m.ctrl.T.Helper() @@ -252,6 +210,20 @@ func (mr *MockManagerMockRecorder) ReloadService(ctx, accountID, serviceID inter return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReloadService", reflect.TypeOf((*MockManager)(nil).ReloadService), ctx, accountID, serviceID) } +// RenewServiceFromPeer mocks base method. +func (m *MockManager) RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RenewServiceFromPeer", ctx, accountID, peerID, domain) + ret0, _ := ret[0].(error) + return ret0 +} + +// RenewServiceFromPeer indicates an expected call of RenewServiceFromPeer. +func (mr *MockManagerMockRecorder) RenewServiceFromPeer(ctx, accountID, peerID, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewServiceFromPeer", reflect.TypeOf((*MockManager)(nil).RenewServiceFromPeer), ctx, accountID, peerID, domain) +} + // SetCertificateIssuedAt mocks base method. func (m *MockManager) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error { m.ctrl.T.Helper() @@ -280,6 +252,32 @@ func (mr *MockManagerMockRecorder) SetStatus(ctx, accountID, serviceID, status i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetStatus", reflect.TypeOf((*MockManager)(nil).SetStatus), ctx, accountID, serviceID, status) } +// StartExposeReaper mocks base method. +func (m *MockManager) StartExposeReaper(ctx context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StartExposeReaper", ctx) +} + +// StartExposeReaper indicates an expected call of StartExposeReaper. +func (mr *MockManagerMockRecorder) StartExposeReaper(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartExposeReaper", reflect.TypeOf((*MockManager)(nil).StartExposeReaper), ctx) +} + +// StopServiceFromPeer mocks base method. +func (m *MockManager) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StopServiceFromPeer", ctx, accountID, peerID, domain) + ret0, _ := ret[0].(error) + return ret0 +} + +// StopServiceFromPeer indicates an expected call of StopServiceFromPeer. +func (mr *MockManagerMockRecorder) StopServiceFromPeer(ctx, accountID, peerID, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopServiceFromPeer", reflect.TypeOf((*MockManager)(nil).StopServiceFromPeer), ctx, accountID, peerID, domain) +} + // UpdateService mocks base method. func (m *MockManager) UpdateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/manager/expose_tracker.go b/management/internals/modules/reverseproxy/manager/expose_tracker.go new file mode 100644 index 000000000..ef285e923 --- /dev/null +++ b/management/internals/modules/reverseproxy/manager/expose_tracker.go @@ -0,0 +1,163 @@ +package manager + +import ( + "context" + "sync" + "time" + + "github.com/netbirdio/netbird/shared/management/status" + log "github.com/sirupsen/logrus" +) + +const ( + exposeTTL = 90 * time.Second + exposeReapInterval = 30 * time.Second + maxExposesPerPeer = 10 +) + +type trackedExpose struct { + mu sync.Mutex + domain string + accountID string + peerID string + lastRenewed time.Time + expiring bool +} + +type exposeTracker struct { + activeExposes sync.Map + exposeCreateMu sync.Mutex + manager *managerImpl +} + +func exposeKey(peerID, domain string) string { + return peerID + ":" + domain +} + +// TrackExposeIfAllowed atomically checks the per-peer limit and registers a new +// active expose session under the same lock. Returns (true, false) if the expose +// was already tracked (duplicate), (false, true) if tracking succeeded, and +// (false, false) if the peer has reached the limit. +func (t *exposeTracker) TrackExposeIfAllowed(peerID, domain, accountID string) (alreadyTracked, ok bool) { + t.exposeCreateMu.Lock() + defer t.exposeCreateMu.Unlock() + + key := exposeKey(peerID, domain) + _, loaded := t.activeExposes.LoadOrStore(key, &trackedExpose{ + domain: domain, + accountID: accountID, + peerID: peerID, + lastRenewed: time.Now(), + }) + if loaded { + return true, false + } + + if t.CountPeerExposes(peerID) > maxExposesPerPeer { + t.activeExposes.Delete(key) + return false, false + } + + return false, true +} + +// UntrackExpose removes an active expose session from tracking. +func (t *exposeTracker) UntrackExpose(peerID, domain string) { + t.activeExposes.Delete(exposeKey(peerID, domain)) +} + +// CountPeerExposes returns the number of active expose sessions for a peer. +func (t *exposeTracker) CountPeerExposes(peerID string) int { + count := 0 + t.activeExposes.Range(func(_, val any) bool { + if expose := val.(*trackedExpose); expose.peerID == peerID { + count++ + } + return true + }) + return count +} + +// MaxExposesPerPeer returns the maximum number of concurrent exposes allowed per peer. +func (t *exposeTracker) MaxExposesPerPeer() int { + return maxExposesPerPeer +} + +// RenewTrackedExpose updates the in-memory lastRenewed timestamp for a tracked expose. +// Returns false if the expose is not tracked or is being reaped. +func (t *exposeTracker) RenewTrackedExpose(peerID, domain string) bool { + key := exposeKey(peerID, domain) + val, ok := t.activeExposes.Load(key) + if !ok { + return false + } + + expose := val.(*trackedExpose) + expose.mu.Lock() + if expose.expiring { + expose.mu.Unlock() + return false + } + expose.lastRenewed = time.Now() + expose.mu.Unlock() + + return true +} + +// StopTrackedExpose removes an active expose session from tracking. +// Returns false if the expose was not tracked. +func (t *exposeTracker) StopTrackedExpose(peerID, domain string) bool { + key := exposeKey(peerID, domain) + _, ok := t.activeExposes.LoadAndDelete(key) + return ok +} + +// StartExposeReaper starts a background goroutine that reaps expired expose sessions. +func (t *exposeTracker) StartExposeReaper(ctx context.Context) { + go func() { + ticker := time.NewTicker(exposeReapInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + t.reapExpiredExposes() + } + } + }() +} + +func (t *exposeTracker) reapExpiredExposes() { + t.activeExposes.Range(func(key, val any) bool { + expose := val.(*trackedExpose) + expose.mu.Lock() + expired := time.Since(expose.lastRenewed) > exposeTTL + if expired { + expose.expiring = true + } + expose.mu.Unlock() + + if !expired { + return true + } + + log.Infof("reaping expired expose session for peer %s, domain %s", expose.peerID, expose.domain) + + err := t.manager.deleteServiceFromPeer(context.Background(), expose.accountID, expose.peerID, expose.domain, true) + + s, _ := status.FromError(err) + + switch { + case err == nil: + t.activeExposes.Delete(key) + case s.ErrorType == status.NotFound: + log.Debugf("service %s was already deleted", expose.domain) + default: + log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", expose.domain, err) + } + + return true + }) +} diff --git a/management/internals/modules/reverseproxy/manager/expose_tracker_test.go b/management/internals/modules/reverseproxy/manager/expose_tracker_test.go new file mode 100644 index 000000000..2dc726590 --- /dev/null +++ b/management/internals/modules/reverseproxy/manager/expose_tracker_test.go @@ -0,0 +1,256 @@ +package manager + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" +) + +func TestExposeKey(t *testing.T) { + assert.Equal(t, "peer1:example.com", exposeKey("peer1", "example.com")) + assert.Equal(t, "peer2:other.com", exposeKey("peer2", "other.com")) + assert.NotEqual(t, exposeKey("peer1", "a.com"), exposeKey("peer1", "b.com")) +} + +func TestTrackExposeIfAllowed(t *testing.T) { + t.Run("first track succeeds", func(t *testing.T) { + tracker := &exposeTracker{} + alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") + assert.False(t, alreadyTracked, "first track should not be duplicate") + assert.True(t, ok, "first track should be allowed") + }) + + t.Run("duplicate track detected", func(t *testing.T) { + tracker := &exposeTracker{} + tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") + + alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") + assert.True(t, alreadyTracked, "second track should be duplicate") + assert.False(t, ok) + }) + + t.Run("rejects when at limit", func(t *testing.T) { + tracker := &exposeTracker{} + for i := range maxExposesPerPeer { + _, ok := tracker.TrackExposeIfAllowed("peer1", "domain-"+string(rune('a'+i))+".com", "acct1") + assert.True(t, ok, "track %d should be allowed", i) + } + + alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "over-limit.com", "acct1") + assert.False(t, alreadyTracked) + assert.False(t, ok, "should reject when at limit") + }) + + t.Run("other peer unaffected by limit", func(t *testing.T) { + tracker := &exposeTracker{} + for i := range maxExposesPerPeer { + tracker.TrackExposeIfAllowed("peer1", "domain-"+string(rune('a'+i))+".com", "acct1") + } + + _, ok := tracker.TrackExposeIfAllowed("peer2", "a.com", "acct1") + assert.True(t, ok, "other peer should still be within limit") + }) +} + +func TestUntrackExpose(t *testing.T) { + tracker := &exposeTracker{} + + tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") + assert.Equal(t, 1, tracker.CountPeerExposes("peer1")) + + tracker.UntrackExpose("peer1", "a.com") + assert.Equal(t, 0, tracker.CountPeerExposes("peer1")) +} + +func TestCountPeerExposes(t *testing.T) { + tracker := &exposeTracker{} + + assert.Equal(t, 0, tracker.CountPeerExposes("peer1")) + + tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") + tracker.TrackExposeIfAllowed("peer1", "b.com", "acct1") + tracker.TrackExposeIfAllowed("peer2", "a.com", "acct1") + + assert.Equal(t, 2, tracker.CountPeerExposes("peer1"), "peer1 should have 2 exposes") + assert.Equal(t, 1, tracker.CountPeerExposes("peer2"), "peer2 should have 1 expose") + assert.Equal(t, 0, tracker.CountPeerExposes("peer3"), "peer3 should have 0 exposes") +} + +func TestMaxExposesPerPeer(t *testing.T) { + tracker := &exposeTracker{} + assert.Equal(t, maxExposesPerPeer, tracker.MaxExposesPerPeer()) +} + +func TestRenewTrackedExpose(t *testing.T) { + tracker := &exposeTracker{} + + found := tracker.RenewTrackedExpose("peer1", "a.com") + assert.False(t, found, "should not find untracked expose") + + tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") + + found = tracker.RenewTrackedExpose("peer1", "a.com") + assert.True(t, found, "should find tracked expose") +} + +func TestRenewTrackedExpose_RejectsExpiring(t *testing.T) { + tracker := &exposeTracker{} + tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") + + // Simulate reaper marking the expose as expiring + key := exposeKey("peer1", "a.com") + val, _ := tracker.activeExposes.Load(key) + expose := val.(*trackedExpose) + expose.mu.Lock() + expose.expiring = true + expose.mu.Unlock() + + found := tracker.RenewTrackedExpose("peer1", "a.com") + assert.False(t, found, "should reject renewal when expiring") +} + +func TestReapExpiredExposes(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + tracker := mgr.exposeTracker + + ctx := context.Background() + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", + }) + require.NoError(t, err) + + // Manually expire the tracked entry + key := exposeKey(testPeerID, resp.Domain) + val, _ := tracker.activeExposes.Load(key) + expose := val.(*trackedExpose) + expose.mu.Lock() + expose.lastRenewed = time.Now().Add(-2 * exposeTTL) + expose.mu.Unlock() + + // Add an active (non-expired) tracking entry + tracker.activeExposes.Store(exposeKey("peer1", "active.com"), &trackedExpose{ + domain: "active.com", + accountID: testAccountID, + peerID: "peer1", + lastRenewed: time.Now(), + }) + + tracker.reapExpiredExposes() + + _, exists := tracker.activeExposes.Load(key) + assert.False(t, exists, "expired expose should be removed") + + _, exists = tracker.activeExposes.Load(exposeKey("peer1", "active.com")) + assert.True(t, exists, "active expose should remain") +} + +func TestReapExpiredExposes_SetsExpiringFlag(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + tracker := mgr.exposeTracker + + ctx := context.Background() + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", + }) + require.NoError(t, err) + + key := exposeKey(testPeerID, resp.Domain) + val, _ := tracker.activeExposes.Load(key) + expose := val.(*trackedExpose) + + // Expire it + expose.mu.Lock() + expose.lastRenewed = time.Now().Add(-2 * exposeTTL) + expose.mu.Unlock() + + // Renew should succeed before reaping + assert.True(t, tracker.RenewTrackedExpose(testPeerID, resp.Domain), "renew should succeed before reaper runs") + + // Re-expire and reap + expose.mu.Lock() + expose.lastRenewed = time.Now().Add(-2 * exposeTTL) + expose.mu.Unlock() + + tracker.reapExpiredExposes() + + // Entry is deleted, renew returns false + assert.False(t, tracker.RenewTrackedExpose(testPeerID, resp.Domain), "renew should fail after reap") +} + +func TestConcurrentTrackAndCount(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + tracker := mgr.exposeTracker + ctx := context.Background() + + for i := range 5 { + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + Port: 8080 + i, + Protocol: "http", + }) + require.NoError(t, err) + } + + // Manually expire all tracked entries + tracker.activeExposes.Range(func(_, val any) bool { + expose := val.(*trackedExpose) + expose.mu.Lock() + expose.lastRenewed = time.Now().Add(-2 * exposeTTL) + expose.mu.Unlock() + return true + }) + + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tracker.reapExpiredExposes() + }() + go func() { + defer wg.Done() + tracker.CountPeerExposes(testPeerID) + }() + wg.Wait() + + assert.Equal(t, 0, tracker.CountPeerExposes(testPeerID), "all expired exposes should be reaped") +} + +func TestTrackedExposeMutexProtectsLastRenewed(t *testing.T) { + expose := &trackedExpose{ + lastRenewed: time.Now().Add(-1 * time.Hour), + } + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + for range 100 { + expose.mu.Lock() + expose.lastRenewed = time.Now() + expose.mu.Unlock() + } + }() + + go func() { + defer wg.Done() + for range 100 { + expose.mu.Lock() + _ = time.Since(expose.lastRenewed) + expose.mu.Unlock() + } + }() + + wg.Wait() + + expose.mu.Lock() + require.False(t, expose.lastRenewed.IsZero(), "lastRenewed should not be zero after concurrent access") + expose.mu.Unlock() +} diff --git a/management/internals/modules/reverseproxy/manager/manager.go b/management/internals/modules/reverseproxy/manager/manager.go index ac839b8ea..b2c67e0c1 100644 --- a/management/internals/modules/reverseproxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/manager/manager.go @@ -40,11 +40,12 @@ type managerImpl struct { settingsManager settings.Manager proxyGRPCServer *nbgrpc.ProxyServiceServer clusterDeriver ClusterDeriver + exposeTracker *exposeTracker } // NewManager creates a new service manager. func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, settingsManager settings.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, clusterDeriver ClusterDeriver) reverseproxy.Manager { - return &managerImpl{ + mgr := &managerImpl{ store: store, accountManager: accountManager, permissionsManager: permissionsManager, @@ -52,6 +53,13 @@ func NewManager(store store.Store, accountManager account.Manager, permissionsMa proxyGRPCServer: proxyGRPCServer, clusterDeriver: clusterDeriver, } + mgr.exposeTracker = &exposeTracker{manager: mgr} + return mgr +} + +// StartExposeReaper delegates to the expose tracker. +func (m *managerImpl) StartExposeReaper(ctx context.Context) { + m.exposeTracker.StartExposeReaper(ctx) } func (m *managerImpl) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { @@ -418,6 +426,10 @@ func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serv return err } + if service.Source == reverseproxy.SourceEphemeral { + m.exposeTracker.UntrackExpose(service.SourcePeer, service.Domain) + } + m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, service.EventMeta()) m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "") @@ -460,6 +472,9 @@ func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID s oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() for _, service := range services { + if service.Source == reverseproxy.SourceEphemeral { + m.exposeTracker.UntrackExpose(service.SourcePeer, service.Domain) + } m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceDeleted, service.EventMeta()) mapping := service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg) clusterMappings[service.ProxyCluster] = append(clusterMappings[service.ProxyCluster], mapping) @@ -617,9 +632,9 @@ func (m *managerImpl) GetServiceIDByTargetID(ctx context.Context, accountID stri return target.ServiceID, nil } -// ValidateExposePermission checks whether the peer is allowed to use the expose feature. +// validateExposePermission checks whether the peer is allowed to use the expose feature. // It verifies the account has peer expose enabled and that the peer belongs to an allowed group. -func (m *managerImpl) ValidateExposePermission(ctx context.Context, accountID, peerID string) error { +func (m *managerImpl) validateExposePermission(ctx context.Context, accountID, peerID string) error { settings, err := m.store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) if err != nil { log.WithContext(ctx).Errorf("failed to get account settings: %v", err) @@ -650,8 +665,23 @@ func (m *managerImpl) ValidateExposePermission(ctx context.Context, accountID, p } // CreateServiceFromPeer creates a service initiated by a peer expose request. -// It skips user permission checks since authorization is done at the gRPC handler level. -func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, service *reverseproxy.Service) (*reverseproxy.Service, error) { +// It validates the request, checks expose permissions, enforces the per-peer limit, +// creates the service, and tracks it for TTL-based reaping. +func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { + if err := req.Validate(); err != nil { + return nil, status.Errorf(status.InvalidArgument, "validate expose request: %v", err) + } + + if err := m.validateExposePermission(ctx, accountID, peerID); err != nil { + return nil, err + } + + serviceName, err := reverseproxy.GenerateExposeName(req.NamePrefix) + if err != nil { + return nil, status.Errorf(status.InvalidArgument, "generate service name: %v", err) + } + + service := req.ToService(accountID, peerID, serviceName) service.Source = reverseproxy.SourceEphemeral if service.Domain == "" { @@ -665,7 +695,7 @@ func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peer if service.Auth.BearerAuth != nil && service.Auth.BearerAuth.Enabled { groupIDs, err := m.getGroupIDsFromNames(ctx, accountID, service.Auth.BearerAuth.DistributionGroups) if err != nil { - return nil, fmt.Errorf("get group ids for service %s: %w", service.ID, err) + return nil, fmt.Errorf("get group ids for service %s: %w", service.Name, err) } service.Auth.BearerAuth.DistributionGroups = groupIDs } @@ -687,8 +717,21 @@ func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peer return nil, err } - meta := addPeerInfoToEventMeta(service.EventMeta(), peer) + alreadyTracked, allowed := m.exposeTracker.TrackExposeIfAllowed(peerID, service.Domain, accountID) + if alreadyTracked { + if err := m.deleteServiceFromPeer(ctx, accountID, peerID, service.Domain, false); err != nil { + log.WithContext(ctx).Debugf("failed to delete duplicate expose service for domain %s: %v", service.Domain, err) + } + return nil, status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain") + } + if !allowed { + if err := m.deleteServiceFromPeer(ctx, accountID, peerID, service.Domain, false); err != nil { + log.WithContext(ctx).Debugf("failed to delete service after limit exceeded for domain %s: %v", service.Domain, err) + } + return nil, status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) + } + meta := addPeerInfoToEventMeta(service.EventMeta(), peer) m.accountManager.StoreEvent(ctx, peerID, service.ID, accountID, activity.PeerServiceExposed, meta) if err := m.replaceHostByLookup(ctx, accountID, service); err != nil { @@ -696,10 +739,13 @@ func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peer } m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") - m.accountManager.UpdateAccountPeers(ctx, accountID) - return service, nil + return &reverseproxy.ExposeServiceResponse{ + ServiceName: service.Name, + ServiceURL: "https://" + service.Domain, + Domain: service.Domain, + }, nil } func (m *managerImpl) getGroupIDsFromNames(ctx context.Context, accountID string, groupNames []string) ([]string, error) { @@ -718,6 +764,9 @@ func (m *managerImpl) getGroupIDsFromNames(ctx context.Context, accountID string } func (m *managerImpl) buildRandomDomain(name string) (string, error) { + if m.clusterDeriver == nil { + return "", fmt.Errorf("unable to get random domain") + } clusterDomains := m.clusterDeriver.GetClusterDomains() if len(clusterDomains) == 0 { return "", fmt.Errorf("no cluster domains found for service %s", name) @@ -727,15 +776,60 @@ func (m *managerImpl) buildRandomDomain(name string) (string, error) { return domain, nil } -// DeleteServiceFromPeer deletes a peer-initiated service. -// It validates that the service was created by a peer to prevent deleting API-created services. -func (m *managerImpl) DeleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { - return m.deletePeerService(ctx, accountID, peerID, serviceID, activity.PeerServiceUnexposed) +// RenewServiceFromPeer renews the in-memory TTL tracker for the peer's expose session. +// Returns an error if the expose is not actively tracked. +func (m *managerImpl) RenewServiceFromPeer(_ context.Context, _, peerID, domain string) error { + if !m.exposeTracker.RenewTrackedExpose(peerID, domain) { + return status.Errorf(status.NotFound, "no active expose session for domain %s", domain) + } + return nil } -// ExpireServiceFromPeer deletes a peer-initiated service that was not renewed within the TTL. -func (m *managerImpl) ExpireServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { - return m.deletePeerService(ctx, accountID, peerID, serviceID, activity.PeerServiceExposeExpired) +// StopServiceFromPeer stops a peer's active expose session by untracking and deleting the service. +func (m *managerImpl) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { + if err := m.deleteServiceFromPeer(ctx, accountID, peerID, domain, false); err != nil { + log.WithContext(ctx).Errorf("failed to delete peer-exposed service for domain %s: %v", domain, err) + return err + } + + if !m.exposeTracker.StopTrackedExpose(peerID, domain) { + log.WithContext(ctx).Warnf("expose tracker entry for domain %s already removed; service was deleted", domain) + } + + return nil +} + +// deleteServiceFromPeer deletes a peer-initiated service identified by domain. +// When expired is true, the activity is recorded as PeerServiceExposeExpired instead of PeerServiceUnexposed. +func (m *managerImpl) deleteServiceFromPeer(ctx context.Context, accountID, peerID, domain string, expired bool) error { + service, err := m.lookupPeerService(ctx, accountID, peerID, domain) + if err != nil { + return err + } + + activityCode := activity.PeerServiceUnexposed + if expired { + activityCode = activity.PeerServiceExposeExpired + } + return m.deletePeerService(ctx, accountID, peerID, service.ID, activityCode) +} + +// lookupPeerService finds a peer-initiated service by domain and validates ownership. +func (m *managerImpl) lookupPeerService(ctx context.Context, accountID, peerID, domain string) (*reverseproxy.Service, error) { + service, err := m.store.GetServiceByDomain(ctx, accountID, domain) + if err != nil { + return nil, err + } + + if service.Source != reverseproxy.SourceEphemeral { + return nil, status.Errorf(status.PermissionDenied, "cannot operate on API-created service via peer expose") + } + + if service.SourcePeer != peerID { + return nil, status.Errorf(status.PermissionDenied, "cannot operate on service exposed by another peer") + } + + return service, nil } func (m *managerImpl) deletePeerService(ctx context.Context, accountID, peerID, serviceID string, activityCode activity.Activity) error { diff --git a/management/internals/modules/reverseproxy/manager/manager_test.go b/management/internals/modules/reverseproxy/manager/manager_test.go index eab853cf3..17849f622 100644 --- a/management/internals/modules/reverseproxy/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/manager/manager_test.go @@ -658,6 +658,13 @@ func setupIntegrationTest(t *testing.T) (*managerImpl, store.Store) { PeerExposeEnabled: true, PeerExposeGroups: []string{testGroupID}, }, + Users: map[string]*types.User{ + testUserID: { + Id: testUserID, + AccountID: testAccountID, + Role: types.UserRoleAdmin, + }, + }, Peers: map[string]*nbpeer.Peer{ testPeerID: { ID: testPeerID, @@ -712,16 +719,17 @@ func setupIntegrationTest(t *testing.T) (*managerImpl, store.Store) { domains: []string{"test.netbird.io"}, }, } + mgr.exposeTracker = &exposeTracker{manager: mgr} return mgr, testStore } -func TestValidateExposePermission(t *testing.T) { +func Test_validateExposePermission(t *testing.T) { ctx := context.Background() t.Run("allowed when peer is in expose group", func(t *testing.T) { mgr, _ := setupIntegrationTest(t) - err := mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + err := mgr.validateExposePermission(ctx, testAccountID, testPeerID) assert.NoError(t, err) }) @@ -742,7 +750,7 @@ func TestValidateExposePermission(t *testing.T) { }) require.NoError(t, err) - err = mgr.ValidateExposePermission(ctx, testAccountID, otherPeerID) + err = mgr.validateExposePermission(ctx, testAccountID, otherPeerID) require.Error(t, err) assert.Contains(t, err.Error(), "not in an allowed expose group") }) @@ -757,7 +765,7 @@ func TestValidateExposePermission(t *testing.T) { err = testStore.SaveAccountSettings(ctx, testAccountID, s) require.NoError(t, err) - err = mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + err = mgr.validateExposePermission(ctx, testAccountID, testPeerID) require.Error(t, err) assert.Contains(t, err.Error(), "not enabled") }) @@ -772,7 +780,7 @@ func TestValidateExposePermission(t *testing.T) { err = testStore.SaveAccountSettings(ctx, testAccountID, s) require.NoError(t, err) - err = mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + err = mgr.validateExposePermission(ctx, testAccountID, testPeerID) assert.Error(t, err) }) @@ -781,7 +789,7 @@ func TestValidateExposePermission(t *testing.T) { mockStore := store.NewMockStore(ctrl) mockStore.EXPECT().GetAccountSettings(gomock.Any(), gomock.Any(), testAccountID).Return(nil, errors.New("store error")) mgr := &managerImpl{store: mockStore} - err := mgr.ValidateExposePermission(ctx, testAccountID, testPeerID) + err := mgr.validateExposePermission(ctx, testAccountID, testPeerID) require.Error(t, err) assert.Contains(t, err.Error(), "get account settings") }) @@ -793,82 +801,290 @@ func TestCreateServiceFromPeer(t *testing.T) { t.Run("creates service with random domain", func(t *testing.T) { mgr, testStore := setupIntegrationTest(t) - service := &reverseproxy.Service{ - Name: "my-expose", - Enabled: true, - Targets: []*reverseproxy.Target{ - { - AccountID: testAccountID, - Port: 8080, - Protocol: "http", - TargetId: testPeerID, - TargetType: reverseproxy.TargetTypePeer, - Enabled: true, - }, - }, + req := &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", } - created, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, service) + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) require.NoError(t, err) - assert.NotEmpty(t, created.ID, "service should have an ID") - assert.Contains(t, created.Domain, "test.netbird.io", "domain should use cluster domain") - assert.Equal(t, reverseproxy.SourceEphemeral, created.Source, "source should be ephemeral") - assert.Equal(t, testPeerID, created.SourcePeer, "source peer should be set") - assert.NotNil(t, created.Meta.LastRenewedAt, "last renewed should be set") + assert.NotEmpty(t, resp.ServiceName, "service name should be generated") + assert.Contains(t, resp.Domain, "test.netbird.io", "domain should use cluster domain") + assert.NotEmpty(t, resp.ServiceURL, "service URL should be set") // Verify service is persisted in store - persisted, err := testStore.GetServiceByID(ctx, store.LockingStrengthNone, testAccountID, created.ID) + persisted, err := testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) require.NoError(t, err) - assert.Equal(t, created.ID, persisted.ID) - assert.Equal(t, created.Domain, persisted.Domain) + assert.Equal(t, resp.Domain, persisted.Domain) + assert.Equal(t, reverseproxy.SourceEphemeral, persisted.Source, "source should be ephemeral") + assert.Equal(t, testPeerID, persisted.SourcePeer, "source peer should be set") + assert.NotNil(t, persisted.Meta.LastRenewedAt, "last renewed should be set") }) t.Run("creates service with custom domain", func(t *testing.T) { mgr, _ := setupIntegrationTest(t) - service := &reverseproxy.Service{ - Name: "custom", - Domain: "custom.example.com", - Enabled: true, - Targets: []*reverseproxy.Target{ - { - AccountID: testAccountID, - Port: 80, - Protocol: "http", - TargetId: testPeerID, - TargetType: reverseproxy.TargetTypePeer, - Enabled: true, - }, - }, + req := &reverseproxy.ExposeServiceRequest{ + Port: 80, + Protocol: "http", + Domain: "example.com", } - created, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, service) + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) require.NoError(t, err) - assert.Equal(t, "custom.example.com", created.Domain, "should keep the provided domain") + assert.Contains(t, resp.Domain, "example.com", "should use the provided domain") }) - t.Run("replaces host by peer IP lookup", func(t *testing.T) { - mgr, _ := setupIntegrationTest(t) + t.Run("validates expose permission internally", func(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) - service := &reverseproxy.Service{ - Name: "lookup-test", - Enabled: true, - Targets: []*reverseproxy.Target{ - { - AccountID: testAccountID, - Port: 3000, - Protocol: "http", - TargetId: testPeerID, - TargetType: reverseproxy.TargetTypePeer, - Enabled: true, - }, - }, + // Disable peer expose + s, err := testStore.GetAccountSettings(ctx, store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + s.PeerExposeEnabled = false + err = testStore.SaveAccountSettings(ctx, testAccountID, s) + require.NoError(t, err) + + req := &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", } - created, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, service) + _, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) + require.Error(t, err) + assert.Contains(t, err.Error(), "not enabled") + }) + + t.Run("validates request fields", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + + req := &reverseproxy.ExposeServiceRequest{ + Port: 0, + Protocol: "http", + } + + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) + require.Error(t, err) + assert.Contains(t, err.Error(), "port") + }) +} + +func TestExposeServiceRequestValidate(t *testing.T) { + tests := []struct { + name string + req reverseproxy.ExposeServiceRequest + wantErr string + }{ + { + name: "valid http request", + req: reverseproxy.ExposeServiceRequest{Port: 8080, Protocol: "http"}, + wantErr: "", + }, + { + name: "valid https request with pin", + req: reverseproxy.ExposeServiceRequest{Port: 443, Protocol: "https", Pin: "123456"}, + wantErr: "", + }, + { + name: "port zero rejected", + req: reverseproxy.ExposeServiceRequest{Port: 0, Protocol: "http"}, + wantErr: "port must be between 1 and 65535", + }, + { + name: "negative port rejected", + req: reverseproxy.ExposeServiceRequest{Port: -1, Protocol: "http"}, + wantErr: "port must be between 1 and 65535", + }, + { + name: "port above 65535 rejected", + req: reverseproxy.ExposeServiceRequest{Port: 65536, Protocol: "http"}, + wantErr: "port must be between 1 and 65535", + }, + { + name: "unsupported protocol", + req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "tcp"}, + wantErr: "unsupported protocol", + }, + { + name: "invalid pin format", + req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "abc"}, + wantErr: "invalid pin", + }, + { + name: "pin too short", + req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "12345"}, + wantErr: "invalid pin", + }, + { + name: "valid 6-digit pin", + req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "000000"}, + wantErr: "", + }, + { + name: "empty user group name", + req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", UserGroups: []string{"valid", ""}}, + wantErr: "user group name cannot be empty", + }, + { + name: "invalid name prefix", + req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "INVALID"}, + wantErr: "invalid name prefix", + }, + { + name: "valid name prefix", + req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "my-service"}, + wantErr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.req.Validate() + if tt.wantErr == "" { + assert.NoError(t, err) + } else { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + } + }) + } + + t.Run("nil receiver", func(t *testing.T) { + var req *reverseproxy.ExposeServiceRequest + err := req.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "request cannot be nil") + }) +} + +func TestDeleteServiceFromPeer_ByDomain(t *testing.T) { + ctx := context.Background() + + t.Run("deletes service by domain", func(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) + + // First create a service + req := &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", + } + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) require.NoError(t, err) - require.Len(t, created.Targets, 1) - assert.Equal(t, "100.64.0.1", created.Targets[0].Host, "host should be resolved to peer IP") + + // Delete by domain using unexported method + err = mgr.deleteServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain, false) + require.NoError(t, err) + + // Verify service is deleted + _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + require.Error(t, err, "service should be deleted") + }) + + t.Run("expire uses correct activity", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + + req := &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", + } + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) + require.NoError(t, err) + + err = mgr.deleteServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain, true) + require.NoError(t, err) + }) +} + +func TestStopServiceFromPeer(t *testing.T) { + ctx := context.Background() + + t.Run("stops service by domain", func(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) + + req := &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", + } + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) + require.NoError(t, err) + + err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + require.NoError(t, err) + + _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + require.Error(t, err, "service should be deleted") + }) +} + +func TestDeleteService_UntracksEphemeralExpose(t *testing.T) { + ctx := context.Background() + mgr, _ := setupIntegrationTest(t) + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", + }) + require.NoError(t, err) + assert.Equal(t, 1, mgr.exposeTracker.CountPeerExposes(testPeerID), "expose should be tracked after create") + + // Look up the service by domain to get its store ID + svc, err := mgr.store.GetServiceByDomain(ctx, testAccountID, resp.Domain) + require.NoError(t, err) + + // Delete via the API path (user-initiated) + err = mgr.DeleteService(ctx, testAccountID, testUserID, svc.ID) + require.NoError(t, err) + + assert.Equal(t, 0, mgr.exposeTracker.CountPeerExposes(testPeerID), "expose should be untracked after API delete") + + // A new expose should succeed (not blocked by stale tracking) + _, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + Port: 9090, + Protocol: "http", + }) + assert.NoError(t, err, "new expose should succeed after API delete cleared tracking") +} + +func TestDeleteAllServices_UntracksEphemeralExposes(t *testing.T) { + ctx := context.Background() + mgr, _ := setupIntegrationTest(t) + + for i := range 3 { + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + Port: 8080 + i, + Protocol: "http", + }) + require.NoError(t, err) + } + + assert.Equal(t, 3, mgr.exposeTracker.CountPeerExposes(testPeerID), "all exposes should be tracked") + + err := mgr.DeleteAllServices(ctx, testAccountID, testUserID) + require.NoError(t, err) + + assert.Equal(t, 0, mgr.exposeTracker.CountPeerExposes(testPeerID), "all exposes should be untracked after DeleteAllServices") +} + +func TestRenewServiceFromPeer(t *testing.T) { + ctx := context.Background() + + t.Run("renews tracked expose", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + Port: 8080, + Protocol: "http", + }) + require.NoError(t, err) + + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + require.NoError(t, err) + }) + + t.Run("fails for untracked domain", func(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + err := mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, "nonexistent.com") + require.Error(t, err) }) } diff --git a/management/internals/modules/reverseproxy/reverseproxy.go b/management/internals/modules/reverseproxy/reverseproxy.go index ebe9ace96..10226710b 100644 --- a/management/internals/modules/reverseproxy/reverseproxy.go +++ b/management/internals/modules/reverseproxy/reverseproxy.go @@ -318,63 +318,6 @@ func isDefaultPort(scheme string, port int) bool { return (scheme == "https" && port == 443) || (scheme == "http" && port == 80) } -// FromExposeRequest builds a Service from a peer expose gRPC request. -func FromExposeRequest(req *proto.ExposeServiceRequest, accountID, peerID, serviceName string) *Service { - service := &Service{ - AccountID: accountID, - Name: serviceName, - Enabled: true, - Targets: []*Target{ - { - AccountID: accountID, - Port: int(req.Port), - Protocol: exposeProtocolToString(req.Protocol), - TargetId: peerID, - TargetType: TargetTypePeer, - Enabled: true, - }, - }, - } - - if req.Domain != "" { - service.Domain = serviceName + "." + req.Domain - } - - if req.Pin != "" { - service.Auth.PinAuth = &PINAuthConfig{ - Enabled: true, - Pin: req.Pin, - } - } - - if req.Password != "" { - service.Auth.PasswordAuth = &PasswordAuthConfig{ - Enabled: true, - Password: req.Password, - } - } - - if len(req.UserGroups) > 0 { - service.Auth.BearerAuth = &BearerAuthConfig{ - Enabled: true, - DistributionGroups: req.UserGroups, - } - } - - return service -} - -func exposeProtocolToString(p proto.ExposeProtocol) string { - switch p { - case proto.ExposeProtocol_EXPOSE_HTTP: - return "http" - case proto.ExposeProtocol_EXPOSE_HTTPS: - return "https" - default: - return "http" - } -} - func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) { s.Name = req.Name s.Domain = req.Domain @@ -534,10 +477,107 @@ func (s *Service) DecryptSensitiveData(enc *crypt.FieldEncrypt) error { return nil } +var pinRegexp = regexp.MustCompile(`^\d{6}$`) + const alphanumCharset = "abcdefghijklmnopqrstuvwxyz0123456789" var validNamePrefix = regexp.MustCompile(`^[a-z0-9]([a-z0-9-]{0,30}[a-z0-9])?$`) +// ExposeServiceRequest contains the parameters for creating a peer-initiated expose service. +type ExposeServiceRequest struct { + NamePrefix string + Port int + Protocol string + Domain string + Pin string + Password string + UserGroups []string +} + +// Validate checks all fields of the expose request. +func (r *ExposeServiceRequest) Validate() error { + if r == nil { + return errors.New("request cannot be nil") + } + + if r.Port < 1 || r.Port > 65535 { + return fmt.Errorf("port must be between 1 and 65535, got %d", r.Port) + } + + if r.Protocol != "http" && r.Protocol != "https" { + return fmt.Errorf("unsupported protocol %q: must be http or https", r.Protocol) + } + + if r.Pin != "" && !pinRegexp.MatchString(r.Pin) { + return errors.New("invalid pin: must be exactly 6 digits") + } + + for _, g := range r.UserGroups { + if g == "" { + return errors.New("user group name cannot be empty") + } + } + + if r.NamePrefix != "" && !validNamePrefix.MatchString(r.NamePrefix) { + return fmt.Errorf("invalid name prefix %q: must be lowercase alphanumeric with optional hyphens, 1-32 characters", r.NamePrefix) + } + + return nil +} + +// ToService builds a Service from the expose request. +func (r *ExposeServiceRequest) ToService(accountID, peerID, serviceName string) *Service { + service := &Service{ + AccountID: accountID, + Name: serviceName, + Enabled: true, + Targets: []*Target{ + { + AccountID: accountID, + Port: r.Port, + Protocol: r.Protocol, + TargetId: peerID, + TargetType: TargetTypePeer, + Enabled: true, + }, + }, + } + + if r.Domain != "" { + service.Domain = serviceName + "." + r.Domain + } + + if r.Pin != "" { + service.Auth.PinAuth = &PINAuthConfig{ + Enabled: true, + Pin: r.Pin, + } + } + + if r.Password != "" { + service.Auth.PasswordAuth = &PasswordAuthConfig{ + Enabled: true, + Password: r.Password, + } + } + + if len(r.UserGroups) > 0 { + service.Auth.BearerAuth = &BearerAuthConfig{ + Enabled: true, + DistributionGroups: r.UserGroups, + } + } + + return service +} + +// ExposeServiceResponse contains the result of a successful peer expose creation. +type ExposeServiceResponse struct { + ServiceName string + ServiceURL string + Domain string +} + // GenerateExposeName generates a random service name for peer-exposed services. // The prefix, if provided, must be a valid DNS label component (lowercase alphanumeric and hyphens). func GenerateExposeName(prefix string) (string, error) { diff --git a/management/internals/modules/reverseproxy/reverseproxy_test.go b/management/internals/modules/reverseproxy/reverseproxy_test.go index c80d7e342..cb75ee61f 100644 --- a/management/internals/modules/reverseproxy/reverseproxy_test.go +++ b/management/internals/modules/reverseproxy/reverseproxy_test.go @@ -458,14 +458,14 @@ func TestGenerateExposeName(t *testing.T) { }) } -func TestFromExposeRequest(t *testing.T) { +func TestExposeServiceRequest_ToService(t *testing.T) { t.Run("basic HTTP service", func(t *testing.T) { - req := &proto.ExposeServiceRequest{ + req := &ExposeServiceRequest{ Port: 8080, - Protocol: proto.ExposeProtocol_EXPOSE_HTTP, + Protocol: "http", } - service := FromExposeRequest(req, "account-1", "peer-1", "mysvc") + service := req.ToService("account-1", "peer-1", "mysvc") assert.Equal(t, "account-1", service.AccountID) assert.Equal(t, "mysvc", service.Name) @@ -483,22 +483,22 @@ func TestFromExposeRequest(t *testing.T) { }) t.Run("with custom domain", func(t *testing.T) { - req := &proto.ExposeServiceRequest{ + req := &ExposeServiceRequest{ Port: 3000, Domain: "example.com", } - service := FromExposeRequest(req, "acc", "peer", "web") + service := req.ToService("acc", "peer", "web") assert.Equal(t, "web.example.com", service.Domain) }) t.Run("with PIN auth", func(t *testing.T) { - req := &proto.ExposeServiceRequest{ + req := &ExposeServiceRequest{ Port: 80, Pin: "1234", } - service := FromExposeRequest(req, "acc", "peer", "svc") + service := req.ToService("acc", "peer", "svc") require.NotNil(t, service.Auth.PinAuth) assert.True(t, service.Auth.PinAuth.Enabled) assert.Equal(t, "1234", service.Auth.PinAuth.Pin) @@ -507,31 +507,31 @@ func TestFromExposeRequest(t *testing.T) { }) t.Run("with password auth", func(t *testing.T) { - req := &proto.ExposeServiceRequest{ + req := &ExposeServiceRequest{ Port: 80, Password: "secret", } - service := FromExposeRequest(req, "acc", "peer", "svc") + service := req.ToService("acc", "peer", "svc") require.NotNil(t, service.Auth.PasswordAuth) assert.True(t, service.Auth.PasswordAuth.Enabled) assert.Equal(t, "secret", service.Auth.PasswordAuth.Password) }) t.Run("with user groups (bearer auth)", func(t *testing.T) { - req := &proto.ExposeServiceRequest{ + req := &ExposeServiceRequest{ Port: 80, UserGroups: []string{"admins", "devs"}, } - service := FromExposeRequest(req, "acc", "peer", "svc") + service := req.ToService("acc", "peer", "svc") require.NotNil(t, service.Auth.BearerAuth) assert.True(t, service.Auth.BearerAuth.Enabled) assert.Equal(t, []string{"admins", "devs"}, service.Auth.BearerAuth.DistributionGroups) }) t.Run("with all auth types", func(t *testing.T) { - req := &proto.ExposeServiceRequest{ + req := &ExposeServiceRequest{ Port: 443, Domain: "myco.com", Pin: "9999", @@ -539,7 +539,7 @@ func TestFromExposeRequest(t *testing.T) { UserGroups: []string{"ops"}, } - service := FromExposeRequest(req, "acc", "peer", "full") + service := req.ToService("acc", "peer", "full") assert.Equal(t, "full.myco.com", service.Domain) require.NotNil(t, service.Auth.PinAuth) require.NotNil(t, service.Auth.PasswordAuth) diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 216ea0857..45c1b763f 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -152,8 +152,11 @@ func (s *BaseServer) GRPCServer() *grpc.Server { if err != nil { log.Fatalf("failed to create management server: %v", err) } - srv.SetReverseProxyManager(s.ReverseProxyManager()) - srv.StartExposeReaper(context.Background()) + reverseProxyMgr := s.ReverseProxyManager() + srv.SetReverseProxyManager(reverseProxyMgr) + if reverseProxyMgr != nil { + reverseProxyMgr.StartExposeReaper(context.Background()) + } mgmtProto.RegisterManagementServiceServer(gRPCAPIHandler, srv) mgmtProto.RegisterProxyServiceServer(gRPCAPIHandler, s.ReverseProxyGRPCServer()) diff --git a/management/internals/shared/grpc/expose_service.go b/management/internals/shared/grpc/expose_service.go index 45b60ceec..ef00354af 100644 --- a/management/internals/shared/grpc/expose_service.go +++ b/management/internals/shared/grpc/expose_service.go @@ -2,9 +2,6 @@ package grpc import ( "context" - "regexp" - "sync" - "time" pb "github.com/golang/protobuf/proto" // nolint log "github.com/sirupsen/logrus" @@ -21,27 +18,6 @@ import ( internalStatus "github.com/netbirdio/netbird/shared/management/status" ) -var pinRegexp = regexp.MustCompile(`^\d{6}$`) - -const ( - exposeTTL = 90 * time.Second - exposeReapInterval = 30 * time.Second - maxExposesPerPeer = 10 -) - -type activeExpose struct { - mu sync.Mutex - serviceID string - domain string - accountID string - peerID string - lastRenewed time.Time -} - -func exposeKey(peerID, domain string) string { - return peerID + ":" + domain -} - // CreateExpose handles a peer request to create a new expose service. func (s *Server) CreateExpose(ctx context.Context, req *proto.EncryptedMessage) (*proto.EncryptedMessage, error) { exposeReq := &proto.ExposeServiceRequest{} @@ -58,72 +34,29 @@ func (s *Server) CreateExpose(ctx context.Context, req *proto.EncryptedMessage) // nolint:staticcheck ctx = context.WithValue(ctx, nbContext.AccountIDKey, accountID) - if exposeReq.Protocol != proto.ExposeProtocol_EXPOSE_HTTP && exposeReq.Protocol != proto.ExposeProtocol_EXPOSE_HTTPS { - return nil, status.Errorf(codes.InvalidArgument, "only HTTP or HTTPS protocol are supported") - } - - if exposeReq.Pin != "" && !pinRegexp.MatchString(exposeReq.Pin) { - return nil, status.Errorf(codes.InvalidArgument, "invalid pin: must be exactly 6 digits") - } - - for _, g := range exposeReq.UserGroups { - if g == "" { - return nil, status.Errorf(codes.InvalidArgument, "user group name cannot be empty") - } - } - reverseProxyMgr := s.getReverseProxyManager() if reverseProxyMgr == nil { return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") } - if err := reverseProxyMgr.ValidateExposePermission(ctx, accountID, peer.ID); err != nil { - log.WithContext(ctx).Debugf("expose permission denied for peer %s: %v", peer.ID, err) - return nil, status.Errorf(codes.PermissionDenied, "permission denied") - } - - serviceName, err := reverseproxy.GenerateExposeName(exposeReq.NamePrefix) + created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, &reverseproxy.ExposeServiceRequest{ + NamePrefix: exposeReq.NamePrefix, + Port: int(exposeReq.Port), + Protocol: exposeProtocolToString(exposeReq.Protocol), + Domain: exposeReq.Domain, + Pin: exposeReq.Pin, + Password: exposeReq.Password, + UserGroups: exposeReq.UserGroups, + }) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "generate service name: %v", err) + return nil, mapExposeError(ctx, err) } - service := reverseproxy.FromExposeRequest(exposeReq, accountID, peer.ID, serviceName) - - // Serialize the count check to prevent concurrent CreateExpose calls from - // exceeding maxExposesPerPeer. The lock is held only for the check; the - // actual service creation happens outside the lock. - s.exposeCreateMu.Lock() - if s.countPeerExposes(peer.ID) >= maxExposesPerPeer { - s.exposeCreateMu.Unlock() - return nil, status.Errorf(codes.ResourceExhausted, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) - } - s.exposeCreateMu.Unlock() - - created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, service) - if err != nil { - log.WithContext(ctx).Errorf("failed to create service from peer: %v", err) - return nil, status.Errorf(codes.Internal, "create service: %v", err) - } - - key := exposeKey(peer.ID, created.Domain) - if _, loaded := s.activeExposes.LoadOrStore(key, &activeExpose{ - serviceID: created.ID, - domain: created.Domain, - accountID: accountID, - peerID: peer.ID, - lastRenewed: time.Now(), - }); loaded { - s.deleteExposeService(ctx, accountID, peer.ID, created) - return nil, status.Errorf(codes.AlreadyExists, "peer already has an active expose session for this domain") - } - - resp := &proto.ExposeServiceResponse{ - ServiceName: created.Name, - ServiceUrl: "https://" + created.Domain, + return s.encryptResponse(peerKey, &proto.ExposeServiceResponse{ + ServiceName: created.ServiceName, + ServiceUrl: created.ServiceURL, Domain: created.Domain, - } - - return s.encryptResponse(peerKey, resp) + }) } // RenewExpose extends the TTL of an active expose session. @@ -134,21 +67,19 @@ func (s *Server) RenewExpose(ctx context.Context, req *proto.EncryptedMessage) ( return nil, err } - _, peer, err := s.authenticateExposePeer(ctx, peerKey) + accountID, peer, err := s.authenticateExposePeer(ctx, peerKey) if err != nil { return nil, err } - key := exposeKey(peer.ID, renewReq.Domain) - val, ok := s.activeExposes.Load(key) - if !ok { - return nil, status.Errorf(codes.NotFound, "no active expose session for domain %s", renewReq.Domain) + reverseProxyMgr := s.getReverseProxyManager() + if reverseProxyMgr == nil { + return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") } - expose := val.(*activeExpose) - expose.mu.Lock() - expose.lastRenewed = time.Now() - expose.mu.Unlock() + if err := reverseProxyMgr.RenewServiceFromPeer(ctx, accountID, peer.ID, renewReq.Domain); err != nil { + return nil, mapExposeError(ctx, err) + } return s.encryptResponse(peerKey, &proto.RenewExposeResponse{}) } @@ -161,55 +92,45 @@ func (s *Server) StopExpose(ctx context.Context, req *proto.EncryptedMessage) (* return nil, err } - _, peer, err := s.authenticateExposePeer(ctx, peerKey) + accountID, peer, err := s.authenticateExposePeer(ctx, peerKey) if err != nil { return nil, err } - key := exposeKey(peer.ID, stopReq.Domain) - val, ok := s.activeExposes.LoadAndDelete(key) - if !ok { - return nil, status.Errorf(codes.NotFound, "no active expose session for domain %s", stopReq.Domain) + reverseProxyMgr := s.getReverseProxyManager() + if reverseProxyMgr == nil { + return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") } - expose := val.(*activeExpose) - s.cleanupExpose(expose, false) + if err := reverseProxyMgr.StopServiceFromPeer(ctx, accountID, peer.ID, stopReq.Domain); err != nil { + return nil, mapExposeError(ctx, err) + } return s.encryptResponse(peerKey, &proto.StopExposeResponse{}) } -// StartExposeReaper starts a background goroutine that reaps expired expose sessions. -func (s *Server) StartExposeReaper(ctx context.Context) { - go func() { - ticker := time.NewTicker(exposeReapInterval) - defer ticker.Stop() +func mapExposeError(ctx context.Context, err error) error { + s, ok := internalStatus.FromError(err) + if !ok { + log.WithContext(ctx).Errorf("expose service error: %v", err) + return status.Errorf(codes.Internal, "internal error") + } - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - s.reapExpiredExposes() - } - } - }() -} - -func (s *Server) reapExpiredExposes() { - s.activeExposes.Range(func(key, val any) bool { - expose := val.(*activeExpose) - expose.mu.Lock() - expired := time.Since(expose.lastRenewed) > exposeTTL - expose.mu.Unlock() - - if expired { - if _, deleted := s.activeExposes.LoadAndDelete(key); deleted { - log.Infof("reaping expired expose session for peer %s, domain %s", expose.peerID, expose.domain) - s.cleanupExpose(expose, true) - } - } - return true - }) + switch s.Type() { + case internalStatus.InvalidArgument: + return status.Errorf(codes.InvalidArgument, "%s", s.Message) + case internalStatus.PermissionDenied: + return status.Errorf(codes.PermissionDenied, "%s", s.Message) + case internalStatus.NotFound: + return status.Errorf(codes.NotFound, "%s", s.Message) + case internalStatus.AlreadyExists: + return status.Errorf(codes.AlreadyExists, "%s", s.Message) + case internalStatus.PreconditionFailed: + return status.Errorf(codes.ResourceExhausted, "%s", s.Message) + default: + log.WithContext(ctx).Errorf("expose service error: %v", err) + return status.Errorf(codes.Internal, "internal error") + } } func (s *Server) encryptResponse(peerKey wgtypes.Key, msg pb.Message) (*proto.EncryptedMessage, error) { @@ -246,47 +167,6 @@ func (s *Server) authenticateExposePeer(ctx context.Context, peerKey wgtypes.Key return accountID, peer, nil } -func (s *Server) deleteExposeService(ctx context.Context, accountID, peerID string, service *reverseproxy.Service) { - reverseProxyMgr := s.getReverseProxyManager() - if reverseProxyMgr == nil { - return - } - if err := reverseProxyMgr.DeleteServiceFromPeer(ctx, accountID, peerID, service.ID); err != nil { - log.WithContext(ctx).Debugf("failed to delete expose service %s: %v", service.ID, err) - } -} - -func (s *Server) cleanupExpose(expose *activeExpose, expired bool) { - bgCtx := context.Background() - - reverseProxyMgr := s.getReverseProxyManager() - if reverseProxyMgr == nil { - log.Errorf("cannot cleanup exposed service %s: reverse proxy manager not available", expose.serviceID) - return - } - - var err error - if expired { - err = reverseProxyMgr.ExpireServiceFromPeer(bgCtx, expose.accountID, expose.peerID, expose.serviceID) - } else { - err = reverseProxyMgr.DeleteServiceFromPeer(bgCtx, expose.accountID, expose.peerID, expose.serviceID) - } - if err != nil { - log.Errorf("failed to delete peer-exposed service %s: %v", expose.serviceID, err) - } -} - -func (s *Server) countPeerExposes(peerID string) int { - count := 0 - s.activeExposes.Range(func(_, val any) bool { - if expose := val.(*activeExpose); expose.peerID == peerID { - count++ - } - return true - }) - return count -} - func (s *Server) getReverseProxyManager() reverseproxy.Manager { s.reverseProxyMu.RLock() defer s.reverseProxyMu.RUnlock() @@ -299,3 +179,14 @@ func (s *Server) SetReverseProxyManager(mgr reverseproxy.Manager) { defer s.reverseProxyMu.Unlock() s.reverseProxyManager = mgr } + +func exposeProtocolToString(p proto.ExposeProtocol) string { + switch p { + case proto.ExposeProtocol_EXPOSE_HTTP: + return "http" + case proto.ExposeProtocol_EXPOSE_HTTPS: + return "https" + default: + return "http" + } +} diff --git a/management/internals/shared/grpc/expose_service_test.go b/management/internals/shared/grpc/expose_service_test.go deleted file mode 100644 index 75a16ae44..000000000 --- a/management/internals/shared/grpc/expose_service_test.go +++ /dev/null @@ -1,242 +0,0 @@ -package grpc - -import ( - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" -) - -func TestPinValidation(t *testing.T) { - tests := []struct { - pin string - valid bool - }{ - {"123456", true}, - {"000000", true}, - {"12345", false}, - {"1234567", false}, - {"abcdef", false}, - {"12345a", false}, - {"", false}, - {"12 345", false}, - } - - for _, tt := range tests { - assert.Equal(t, tt.valid, pinRegexp.MatchString(tt.pin), "pin %q", tt.pin) - } -} - -func TestExposeKey(t *testing.T) { - assert.Equal(t, "peer1:example.com", exposeKey("peer1", "example.com")) - assert.Equal(t, "peer2:other.com", exposeKey("peer2", "other.com")) - assert.NotEqual(t, exposeKey("peer1", "a.com"), exposeKey("peer1", "b.com")) -} - -func TestCountPeerExposes(t *testing.T) { - s := &Server{} - - // No exposes - assert.Equal(t, 0, s.countPeerExposes("peer1")) - - // Add some exposes for different peers - s.activeExposes.Store("peer1:a.com", &activeExpose{peerID: "peer1"}) - s.activeExposes.Store("peer1:b.com", &activeExpose{peerID: "peer1"}) - s.activeExposes.Store("peer2:a.com", &activeExpose{peerID: "peer2"}) - - assert.Equal(t, 2, s.countPeerExposes("peer1"), "peer1 should have 2 exposes") - assert.Equal(t, 1, s.countPeerExposes("peer2"), "peer2 should have 1 expose") - assert.Equal(t, 0, s.countPeerExposes("peer3"), "peer3 should have 0 exposes") -} - -func TestReapExpiredExposes(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockMgr := reverseproxy.NewMockManager(ctrl) - - s := &Server{} - s.SetReverseProxyManager(mockMgr) - - now := time.Now() - - // Add an expired expose and a still-active one - s.activeExposes.Store("peer1:expired.com", &activeExpose{ - serviceID: "svc-expired", - domain: "expired.com", - accountID: "acct1", - peerID: "peer1", - lastRenewed: now.Add(-2 * exposeTTL), - }) - s.activeExposes.Store("peer1:active.com", &activeExpose{ - serviceID: "svc-active", - domain: "active.com", - accountID: "acct1", - peerID: "peer1", - lastRenewed: now, - }) - - // Expect ExpireServiceFromPeer called only for the expired one - mockMgr.EXPECT(). - ExpireServiceFromPeer(gomock.Any(), "acct1", "peer1", "svc-expired"). - Return(nil) - - s.reapExpiredExposes() - - // Verify expired one is removed - _, exists := s.activeExposes.Load("peer1:expired.com") - assert.False(t, exists, "expired expose should be removed") - - // Verify active one remains - _, exists = s.activeExposes.Load("peer1:active.com") - assert.True(t, exists, "active expose should remain") -} - -func TestCleanupExpose_Delete(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockMgr := reverseproxy.NewMockManager(ctrl) - - s := &Server{} - s.SetReverseProxyManager(mockMgr) - - mockMgr.EXPECT(). - DeleteServiceFromPeer(gomock.Any(), "acct1", "peer1", "svc1"). - Return(nil) - - s.cleanupExpose(&activeExpose{ - serviceID: "svc1", - accountID: "acct1", - peerID: "peer1", - }, false) -} - -func TestCleanupExpose_Expire(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockMgr := reverseproxy.NewMockManager(ctrl) - - s := &Server{} - s.SetReverseProxyManager(mockMgr) - - mockMgr.EXPECT(). - ExpireServiceFromPeer(gomock.Any(), "acct1", "peer1", "svc1"). - Return(nil) - - s.cleanupExpose(&activeExpose{ - serviceID: "svc1", - accountID: "acct1", - peerID: "peer1", - }, true) -} - -func TestCleanupExpose_NilManager(t *testing.T) { - s := &Server{} - // Should not panic when reverse proxy manager is nil - s.cleanupExpose(&activeExpose{ - serviceID: "svc1", - accountID: "acct1", - peerID: "peer1", - }, false) -} - -func TestSetReverseProxyManager(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - s := &Server{} - - // Initially nil - assert.Nil(t, s.getReverseProxyManager()) - - mockMgr := reverseproxy.NewMockManager(ctrl) - s.SetReverseProxyManager(mockMgr) - assert.NotNil(t, s.getReverseProxyManager()) - - // Can set to nil - s.SetReverseProxyManager(nil) - assert.Nil(t, s.getReverseProxyManager()) -} - -func TestReapExpiredExposes_ConcurrentSafety(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockMgr := reverseproxy.NewMockManager(ctrl) - mockMgr.EXPECT(). - ExpireServiceFromPeer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - - s := &Server{} - s.SetReverseProxyManager(mockMgr) - - // Pre-populate with expired sessions - for i := range 20 { - peerID := "peer1" - domain := "domain-" + string(rune('a'+i)) - s.activeExposes.Store(exposeKey(peerID, domain), &activeExpose{ - serviceID: "svc-" + domain, - domain: domain, - accountID: "acct1", - peerID: peerID, - lastRenewed: time.Now().Add(-2 * exposeTTL), - }) - } - - // Run reaper concurrently with count - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - s.reapExpiredExposes() - }() - go func() { - defer wg.Done() - s.countPeerExposes("peer1") - }() - wg.Wait() - - assert.Equal(t, 0, s.countPeerExposes("peer1"), "all expired exposes should be reaped") -} - -func TestActiveExposeMutexProtectsLastRenewed(t *testing.T) { - expose := &activeExpose{ - lastRenewed: time.Now().Add(-1 * time.Hour), - } - - // Simulate concurrent renew and read - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - for range 100 { - expose.mu.Lock() - expose.lastRenewed = time.Now() - expose.mu.Unlock() - } - }() - - go func() { - defer wg.Done() - for range 100 { - expose.mu.Lock() - _ = time.Since(expose.lastRenewed) - expose.mu.Unlock() - } - }() - - wg.Wait() - - expose.mu.Lock() - require.False(t, expose.lastRenewed.IsZero(), "lastRenewed should not be zero after concurrent access") - expose.mu.Unlock() -} diff --git a/management/internals/shared/grpc/proxy_group_access_test.go b/management/internals/shared/grpc/proxy_group_access_test.go index 611ee36b6..827897981 100644 --- a/management/internals/shared/grpc/proxy_group_access_test.go +++ b/management/internals/shared/grpc/proxy_group_access_test.go @@ -76,21 +76,19 @@ func (m *mockReverseProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ return "", nil } -func (m *mockReverseProxyManager) ValidateExposePermission(_ context.Context, _, _ string) error { +func (m *mockReverseProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { + return &reverseproxy.ExposeServiceResponse{}, nil +} + +func (m *mockReverseProxyManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *mockReverseProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { - return &reverseproxy.Service{}, nil -} - -func (m *mockReverseProxyManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *mockReverseProxyManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *mockReverseProxyManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { - return nil -} +func (m *mockReverseProxyManager) StartExposeReaper(_ context.Context) {} type mockUsersManager struct { users map[string]*types.User diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 3df9ce7ba..029d71e2e 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -82,8 +82,6 @@ type Server struct { syncLimEnabled bool syncLim int32 - activeExposes sync.Map - exposeCreateMu sync.Mutex reverseProxyManager reverseproxy.Manager reverseProxyMu sync.RWMutex } diff --git a/management/internals/shared/grpc/validate_session_test.go b/management/internals/shared/grpc/validate_session_test.go index 1e03a461a..640a27bb2 100644 --- a/management/internals/shared/grpc/validate_session_test.go +++ b/management/internals/shared/grpc/validate_session_test.go @@ -196,7 +196,7 @@ func TestValidateSession_ProxyNotFound(t *testing.T) { require.NoError(t, err) assert.False(t, resp.Valid, "Unknown proxy should be denied") - assert.Equal(t, "proxy_not_found", resp.DeniedReason) + assert.Equal(t, "service_not_found", resp.DeniedReason) } func TestValidateSession_InvalidToken(t *testing.T) { @@ -263,6 +263,10 @@ func (m *testValidateSessionProxyManager) DeleteService(_ context.Context, _, _, return nil } +func (m *testValidateSessionProxyManager) DeleteAllServices(_ context.Context, _, _ string) error { + return nil +} + func (m *testValidateSessionProxyManager) SetCertificateIssuedAt(_ context.Context, _, _ string) error { return nil } @@ -295,22 +299,20 @@ func (m *testValidateSessionProxyManager) GetServiceIDByTargetID(_ context.Conte return "", nil } -func (m *testValidateSessionProxyManager) ValidateExposePermission(_ context.Context, _, _ string) error { - return nil -} - -func (m *testValidateSessionProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *testValidateSessionProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { return nil, nil } -func (m *testValidateSessionProxyManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *testValidateSessionProxyManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *testValidateSessionProxyManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *testValidateSessionProxyManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } +func (m *testValidateSessionProxyManager) StartExposeReaper(_ context.Context) {} + type testValidateSessionUsersManager struct { store store.Store } diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index 77d50d818..12634dda4 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -413,22 +413,20 @@ func (m *testServiceManager) GetServiceIDByTargetID(_ context.Context, _, _ stri return "", nil } -func (m *testServiceManager) ValidateExposePermission(_ context.Context, _, _ string) error { - return nil -} - -func (m *testServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *testServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { return nil, nil } -func (m *testServiceManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *testServiceManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *testServiceManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *testServiceManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } +func (m *testServiceManager) StartExposeReaper(_ context.Context) {} + func createTestState(t *testing.T, ps *nbgrpc.ProxyServiceServer, redirectURL string) string { t.Helper() diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 12cec89ff..e91335a81 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -247,21 +247,19 @@ func (m *storeBackedServiceManager) GetServiceIDByTargetID(ctx context.Context, return "", nil } -func (m *storeBackedServiceManager) ValidateExposePermission(_ context.Context, _, _ string) error { +func (m *storeBackedServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { + return &reverseproxy.ExposeServiceResponse{}, nil +} + +func (m *storeBackedServiceManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *storeBackedServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { - return &reverseproxy.Service{}, nil -} - -func (m *storeBackedServiceManager) DeleteServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *storeBackedServiceManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *storeBackedServiceManager) ExpireServiceFromPeer(_ context.Context, _, _, _ string) error { - return nil -} +func (m *storeBackedServiceManager) StartExposeReaper(_ context.Context) {} func strPtr(s string) *string { return &s From f341d69314e6184d0a201e685953efdaeb9b32eb Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 24 Feb 2026 15:21:14 +0100 Subject: [PATCH 166/374] [management] Add custom domain counts and service metrics to self-hosted metrics (#5414) --- management/server/metrics/selfhosted.go | 62 ++++++++++++++++ management/server/metrics/selfhosted_test.go | 74 ++++++++++++++++++++ management/server/store/file_store.go | 5 ++ management/server/store/sql_store.go | 12 ++++ management/server/store/store.go | 3 + management/server/store/store_mock.go | 16 +++++ 6 files changed, 172 insertions(+) diff --git a/management/server/metrics/selfhosted.go b/management/server/metrics/selfhosted.go index f7d07f3a0..9b1383c6c 100644 --- a/management/server/metrics/selfhosted.go +++ b/management/server/metrics/selfhosted.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/go-version" "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/management/server/types" @@ -51,6 +52,7 @@ type properties map[string]interface{} type DataSource interface { GetAllAccounts(ctx context.Context) []*types.Account GetStoreEngine() types.Engine + GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) } // ConnManager peer connection manager that holds state for current active connections @@ -211,6 +213,16 @@ func (w *Worker) generateProperties(ctx context.Context) properties { localUsers int idpUsers int embeddedIdpTypes map[string]int + services int + servicesEnabled int + servicesTargets int + servicesStatusActive int + servicesStatusPending int + servicesStatusError int + servicesTargetType map[string]int + servicesAuthPassword int + servicesAuthPin int + servicesAuthOIDC int ) start := time.Now() metricsProperties := make(properties) @@ -220,10 +232,13 @@ func (w *Worker) generateProperties(ctx context.Context) properties { rulesDirection = make(map[string]int) activeUsersLastDay = make(map[string]struct{}) embeddedIdpTypes = make(map[string]int) + servicesTargetType = make(map[string]int) uptime = time.Since(w.startupTime).Seconds() connections := w.connManager.GetAllConnectedPeers() version = nbversion.NetbirdVersion() + customDomains, customDomainsValidated, _ := w.dataSource.GetCustomDomainsCounts(ctx) + for _, account := range w.dataSource.GetAllAccounts(ctx) { accounts++ @@ -335,6 +350,37 @@ func (w *Worker) generateProperties(ctx context.Context) properties { peerActiveVersions = append(peerActiveVersions, peer.Meta.WtVersion) } } + + for _, service := range account.Services { + services++ + if service.Enabled { + servicesEnabled++ + } + servicesTargets += len(service.Targets) + + switch reverseproxy.ProxyStatus(service.Meta.Status) { + case reverseproxy.StatusActive: + servicesStatusActive++ + case reverseproxy.StatusPending: + servicesStatusPending++ + case reverseproxy.StatusError, reverseproxy.StatusCertificateFailed, reverseproxy.StatusTunnelNotCreated: + servicesStatusError++ + } + + for _, target := range service.Targets { + servicesTargetType[target.TargetType]++ + } + + if service.Auth.PasswordAuth != nil && service.Auth.PasswordAuth.Enabled { + servicesAuthPassword++ + } + if service.Auth.PinAuth != nil && service.Auth.PinAuth.Enabled { + servicesAuthPin++ + } + if service.Auth.BearerAuth != nil && service.Auth.BearerAuth.Enabled { + servicesAuthOIDC++ + } + } } minActivePeerVersion, maxActivePeerVersion := getMinMaxVersion(peerActiveVersions) @@ -375,6 +421,22 @@ func (w *Worker) generateProperties(ctx context.Context) properties { metricsProperties["idp_users_count"] = idpUsers metricsProperties["embedded_idp_count"] = len(embeddedIdpTypes) + metricsProperties["services"] = services + metricsProperties["services_enabled"] = servicesEnabled + metricsProperties["services_targets"] = servicesTargets + metricsProperties["services_status_active"] = servicesStatusActive + metricsProperties["services_status_pending"] = servicesStatusPending + metricsProperties["services_status_error"] = servicesStatusError + metricsProperties["services_auth_password"] = servicesAuthPassword + metricsProperties["services_auth_pin"] = servicesAuthPin + metricsProperties["services_auth_oidc"] = servicesAuthOIDC + metricsProperties["custom_domains"] = customDomains + metricsProperties["custom_domains_validated"] = customDomainsValidated + + for targetType, count := range servicesTargetType { + metricsProperties["services_target_type_"+targetType] = count + } + for idpType, count := range embeddedIdpTypes { metricsProperties["embedded_idp_users_"+idpType] = count } diff --git a/management/server/metrics/selfhosted_test.go b/management/server/metrics/selfhosted_test.go index 504d228f7..bc4d68178 100644 --- a/management/server/metrics/selfhosted_test.go +++ b/management/server/metrics/selfhosted_test.go @@ -6,6 +6,7 @@ import ( nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -115,6 +116,31 @@ func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { }, }, }, + Services: []*reverseproxy.Service{ + { + ID: "svc1", + Enabled: true, + Targets: []*reverseproxy.Target{ + {TargetType: "peer"}, + {TargetType: "host"}, + }, + Auth: reverseproxy.AuthConfig{ + PasswordAuth: &reverseproxy.PasswordAuthConfig{Enabled: true}, + }, + Meta: reverseproxy.ServiceMeta{Status: string(reverseproxy.StatusActive)}, + }, + { + ID: "svc2", + Enabled: false, + Targets: []*reverseproxy.Target{ + {TargetType: "domain"}, + }, + Auth: reverseproxy.AuthConfig{ + BearerAuth: &reverseproxy.BearerAuthConfig{Enabled: true}, + }, + Meta: reverseproxy.ServiceMeta{Status: string(reverseproxy.StatusPending)}, + }, + }, }, { Id: "2", @@ -215,6 +241,11 @@ func (mockDatasource) GetStoreEngine() types.Engine { return types.FileStoreEngine } +// GetCustomDomainsCounts returns test custom domain counts. +func (mockDatasource) GetCustomDomainsCounts(_ context.Context) (int64, int64, error) { + return 3, 2, nil +} + // TestGenerateProperties tests and validate the properties generation by using the mockDatasource for the Worker.generateProperties func TestGenerateProperties(t *testing.T) { ds := mockDatasource{} @@ -347,6 +378,49 @@ func TestGenerateProperties(t *testing.T) { if properties["embedded_idp_count"] != 1 { t.Errorf("expected 1 embedded_idp_count, got %v", properties["embedded_idp_count"]) } + + if properties["services"] != 2 { + t.Errorf("expected 2 services, got %v", properties["services"]) + } + if properties["services_enabled"] != 1 { + t.Errorf("expected 1 services_enabled, got %v", properties["services_enabled"]) + } + if properties["services_targets"] != 3 { + t.Errorf("expected 3 services_targets, got %v", properties["services_targets"]) + } + if properties["services_status_active"] != 1 { + t.Errorf("expected 1 services_status_active, got %v", properties["services_status_active"]) + } + if properties["services_status_pending"] != 1 { + t.Errorf("expected 1 services_status_pending, got %v", properties["services_status_pending"]) + } + if properties["services_status_error"] != 0 { + t.Errorf("expected 0 services_status_error, got %v", properties["services_status_error"]) + } + if properties["services_target_type_peer"] != 1 { + t.Errorf("expected 1 services_target_type_peer, got %v", properties["services_target_type_peer"]) + } + if properties["services_target_type_host"] != 1 { + t.Errorf("expected 1 services_target_type_host, got %v", properties["services_target_type_host"]) + } + if properties["services_target_type_domain"] != 1 { + t.Errorf("expected 1 services_target_type_domain, got %v", properties["services_target_type_domain"]) + } + if properties["services_auth_password"] != 1 { + t.Errorf("expected 1 services_auth_password, got %v", properties["services_auth_password"]) + } + if properties["services_auth_oidc"] != 1 { + t.Errorf("expected 1 services_auth_oidc, got %v", properties["services_auth_oidc"]) + } + if properties["services_auth_pin"] != 0 { + t.Errorf("expected 0 services_auth_pin, got %v", properties["services_auth_pin"]) + } + if properties["custom_domains"] != int64(3) { + t.Errorf("expected 3 custom_domains, got %v", properties["custom_domains"]) + } + if properties["custom_domains_validated"] != int64(2) { + t.Errorf("expected 2 custom_domains_validated, got %v", properties["custom_domains_validated"]) + } } func TestExtractIdpType(t *testing.T) { diff --git a/management/server/store/file_store.go b/management/server/store/file_store.go index 8db37ec30..81185b020 100644 --- a/management/server/store/file_store.go +++ b/management/server/store/file_store.go @@ -269,3 +269,8 @@ func (s *FileStore) GetStoreEngine() types.Engine { func (s *FileStore) SetFieldEncrypt(_ *crypt.FieldEncrypt) { // no-op: FileStore stores data in plaintext JSON; encryption is not supported } + +// GetCustomDomainsCounts is a no-op for FileStore as it doesn't support custom domains. +func (s *FileStore) GetCustomDomainsCounts(_ context.Context) (int64, int64, error) { + return 0, 0, nil +} diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index e5edbae34..92524e49a 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -1007,6 +1007,18 @@ func (s *SqlStore) GetAccountsCounter(ctx context.Context) (int64, error) { return count, nil } +// GetCustomDomainsCounts returns the total and validated custom domain counts. +func (s *SqlStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) { + var total, validated int64 + if err := s.db.WithContext(ctx).Model(&domain.Domain{}).Count(&total).Error; err != nil { + return 0, 0, err + } + if err := s.db.WithContext(ctx).Model(&domain.Domain{}).Where("validated = ?", true).Count(&validated).Error; err != nil { + return 0, 0, err + } + return total, validated, nil +} + func (s *SqlStore) GetAllAccounts(ctx context.Context) (all []*types.Account) { var accounts []types.Account result := s.db.Find(&accounts) diff --git a/management/server/store/store.go b/management/server/store/store.go index a79c57f61..d5de63c03 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -272,6 +272,9 @@ type Store interface { GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) + + // GetCustomDomainsCounts returns the total and validated custom domain counts. + GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) } const ( diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 8baca36c0..d3de457e2 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -1872,6 +1872,22 @@ func (mr *MockStoreMockRecorder) GetServiceTargetByTargetID(ctx, lockStrength, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceTargetByTargetID", reflect.TypeOf((*MockStore)(nil).GetServiceTargetByTargetID), ctx, lockStrength, accountID, targetID) } +// GetCustomDomainsCounts mocks base method. +func (m *MockStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCustomDomainsCounts", ctx) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(int64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetCustomDomainsCounts indicates an expected call of GetCustomDomainsCounts. +func (mr *MockStoreMockRecorder) GetCustomDomainsCounts(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomainsCounts", reflect.TypeOf((*MockStore)(nil).GetCustomDomainsCounts), ctx) +} + // GetServices mocks base method. func (m *MockStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) { m.ctrl.T.Helper() From d18747e846f9b2c4c72077c95cafd0cba296311e Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 24 Feb 2026 16:48:38 +0100 Subject: [PATCH 167/374] [client] Exclude Flow domain from caching to prevent TLS failures (#5433) * Exclude Flow domain from caching to prevent TLS failures due to stale records. * Fix test --- client/internal/dns/mgmt/mgmt.go | 6 +++--- client/internal/dns/mgmt/mgmt_test.go | 9 +++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/client/internal/dns/mgmt/mgmt.go b/client/internal/dns/mgmt/mgmt.go index d01be0c2c..314af51d9 100644 --- a/client/internal/dns/mgmt/mgmt.go +++ b/client/internal/dns/mgmt/mgmt.go @@ -376,9 +376,9 @@ func (m *Resolver) extractDomainsFromServerDomains(serverDomains dnsconfig.Serve } } - if serverDomains.Flow != "" { - domains = append(domains, serverDomains.Flow) - } + // Flow receiver domain is intentionally excluded from caching. + // Cloud providers may rotate the IP behind this domain; a stale cached record + // causes TLS certificate verification failures on reconnect. for _, stun := range serverDomains.Stuns { if stun != "" { diff --git a/client/internal/dns/mgmt/mgmt_test.go b/client/internal/dns/mgmt/mgmt_test.go index 99d289871..9e8a746f3 100644 --- a/client/internal/dns/mgmt/mgmt_test.go +++ b/client/internal/dns/mgmt/mgmt_test.go @@ -391,7 +391,8 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) { } assert.Len(t, resolver.GetCachedDomains(), 3) - // Update with partial ServerDomains (only flow domain - new type, should preserve all existing) + // Update with partial ServerDomains (only flow domain - flow is intentionally excluded from + // caching to prevent TLS failures from stale records, so all existing domains are preserved) partialDomains := dnsconfig.ServerDomains{ Flow: "github.com", } @@ -400,10 +401,10 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) { t.Skipf("Skipping test due to DNS resolution failure: %v", err) } - assert.Len(t, removedDomains, 0, "Should not remove any domains when adding new type") + assert.Len(t, removedDomains, 0, "Should not remove any domains when only flow domain is provided") finalDomains := resolver.GetCachedDomains() - assert.Len(t, finalDomains, 4, "Should have all original domains plus new flow domain") + assert.Len(t, finalDomains, 3, "Flow domain is not cached; all original domains should be preserved") domainStrings := make([]string, len(finalDomains)) for i, d := range finalDomains { @@ -412,5 +413,5 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) { assert.Contains(t, domainStrings, "example.org") assert.Contains(t, domainStrings, "google.com") assert.Contains(t, domainStrings, "cloudflare.com") - assert.Contains(t, domainStrings, "github.com") + assert.NotContains(t, domainStrings, "github.com") } From ef82905526a5944e9aac96e04b8b7ee67d27c9b9 Mon Sep 17 00:00:00 2001 From: shuuri-labs <61762328+shuuri-labs@users.noreply.github.com> Date: Tue, 24 Feb 2026 17:02:06 +0100 Subject: [PATCH 168/374] [client] Add non default socket file discovery (#5425) - Automatic Unix daemon address discovery: if the default socket is missing, the client can find and use a single available socket. - Client startup now resolves daemon addresses more robustly while preserving non-Unix behavior. --- client/cmd/root.go | 21 +++- client/internal/daemonaddr/resolve.go | 60 ++++++++++ client/internal/daemonaddr/resolve_stub.go | 8 ++ client/internal/daemonaddr/resolve_test.go | 121 +++++++++++++++++++++ client/ssh/client/client.go | 3 +- 5 files changed, 211 insertions(+), 2 deletions(-) create mode 100644 client/internal/daemonaddr/resolve.go create mode 100644 client/internal/daemonaddr/resolve_stub.go create mode 100644 client/internal/daemonaddr/resolve_test.go diff --git a/client/cmd/root.go b/client/cmd/root.go index 961abd54e..aa5b98dfd 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -22,6 +22,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + daddr "github.com/netbirdio/netbird/client/internal/daemonaddr" "github.com/netbirdio/netbird/client/internal/profilemanager" ) @@ -80,6 +81,15 @@ var ( Short: "", Long: "", SilenceUsage: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + SetFlagsFromEnvVars(cmd.Root()) + + // Don't resolve for service commands — they create the socket, not connect to it. + if !isServiceCmd(cmd) { + daemonAddr = daddr.ResolveUnixDaemonAddr(daemonAddr) + } + return nil + }, } ) @@ -386,7 +396,6 @@ func migrateToNetbird(oldPath, newPath string) bool { } func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) { - SetFlagsFromEnvVars(rootCmd) cmd.SetOut(cmd.OutOrStdout()) conn, err := DialClientGRPCServer(cmd.Context(), daemonAddr) @@ -399,3 +408,13 @@ func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) { return conn, nil } + +// isServiceCmd returns true if cmd is the "service" command or a child of it. +func isServiceCmd(cmd *cobra.Command) bool { + for c := cmd; c != nil; c = c.Parent() { + if c.Name() == "service" { + return true + } + } + return false +} diff --git a/client/internal/daemonaddr/resolve.go b/client/internal/daemonaddr/resolve.go new file mode 100644 index 000000000..b7877d8a9 --- /dev/null +++ b/client/internal/daemonaddr/resolve.go @@ -0,0 +1,60 @@ +//go:build !windows && !ios && !android + +package daemonaddr + +import ( + "os" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" +) + +var scanDir = "/var/run/netbird" + +// setScanDir overrides the scan directory (used by tests). +func setScanDir(dir string) { + scanDir = dir +} + +// ResolveUnixDaemonAddr checks whether the default Unix socket exists and, if not, +// scans /var/run/netbird/ for a single .sock file to use instead. This handles the +// mismatch between the netbird@.service template (which places the socket under +// /var/run/netbird/.sock) and the CLI default (/var/run/netbird.sock). +func ResolveUnixDaemonAddr(addr string) string { + if !strings.HasPrefix(addr, "unix://") { + return addr + } + + sockPath := strings.TrimPrefix(addr, "unix://") + if _, err := os.Stat(sockPath); err == nil { + return addr + } + + entries, err := os.ReadDir(scanDir) + if err != nil { + return addr + } + + var found []string + for _, e := range entries { + if e.IsDir() { + continue + } + if strings.HasSuffix(e.Name(), ".sock") { + found = append(found, filepath.Join(scanDir, e.Name())) + } + } + + switch len(found) { + case 1: + resolved := "unix://" + found[0] + log.Infof("Default daemon socket not found, using discovered socket: %s", resolved) + return resolved + case 0: + return addr + default: + log.Warnf("Default daemon socket not found and multiple sockets discovered in %s; pass --daemon-addr explicitly", scanDir) + return addr + } +} diff --git a/client/internal/daemonaddr/resolve_stub.go b/client/internal/daemonaddr/resolve_stub.go new file mode 100644 index 000000000..080b7171a --- /dev/null +++ b/client/internal/daemonaddr/resolve_stub.go @@ -0,0 +1,8 @@ +//go:build windows || ios || android + +package daemonaddr + +// ResolveUnixDaemonAddr is a no-op on platforms that don't use Unix sockets. +func ResolveUnixDaemonAddr(addr string) string { + return addr +} diff --git a/client/internal/daemonaddr/resolve_test.go b/client/internal/daemonaddr/resolve_test.go new file mode 100644 index 000000000..3df67708a --- /dev/null +++ b/client/internal/daemonaddr/resolve_test.go @@ -0,0 +1,121 @@ +//go:build !windows && !ios && !android + +package daemonaddr + +import ( + "os" + "path/filepath" + "testing" +) + +// createSockFile creates a regular file with a .sock extension. +// ResolveUnixDaemonAddr uses os.Stat (not net.Dial), so a regular file is +// sufficient and avoids Unix socket path-length limits on macOS. +func createSockFile(t *testing.T, path string) { + t.Helper() + if err := os.WriteFile(path, nil, 0o600); err != nil { + t.Fatalf("failed to create test sock file at %s: %v", path, err) + } +} + +func TestResolveUnixDaemonAddr_DefaultExists(t *testing.T) { + tmp := t.TempDir() + sock := filepath.Join(tmp, "netbird.sock") + createSockFile(t, sock) + + addr := "unix://" + sock + got := ResolveUnixDaemonAddr(addr) + if got != addr { + t.Errorf("expected %s, got %s", addr, got) + } +} + +func TestResolveUnixDaemonAddr_SingleDiscovered(t *testing.T) { + tmp := t.TempDir() + + // Default socket does not exist + defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock") + + // Create a scan dir with one socket + sd := filepath.Join(tmp, "netbird") + if err := os.MkdirAll(sd, 0o755); err != nil { + t.Fatal(err) + } + instanceSock := filepath.Join(sd, "main.sock") + createSockFile(t, instanceSock) + + origScanDir := scanDir + setScanDir(sd) + t.Cleanup(func() { setScanDir(origScanDir) }) + + got := ResolveUnixDaemonAddr(defaultAddr) + expected := "unix://" + instanceSock + if got != expected { + t.Errorf("expected %s, got %s", expected, got) + } +} + +func TestResolveUnixDaemonAddr_MultipleDiscovered(t *testing.T) { + tmp := t.TempDir() + + defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock") + + sd := filepath.Join(tmp, "netbird") + if err := os.MkdirAll(sd, 0o755); err != nil { + t.Fatal(err) + } + createSockFile(t, filepath.Join(sd, "main.sock")) + createSockFile(t, filepath.Join(sd, "other.sock")) + + origScanDir := scanDir + setScanDir(sd) + t.Cleanup(func() { setScanDir(origScanDir) }) + + got := ResolveUnixDaemonAddr(defaultAddr) + if got != defaultAddr { + t.Errorf("expected original %s, got %s", defaultAddr, got) + } +} + +func TestResolveUnixDaemonAddr_NoSocketsFound(t *testing.T) { + tmp := t.TempDir() + + defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock") + + sd := filepath.Join(tmp, "netbird") + if err := os.MkdirAll(sd, 0o755); err != nil { + t.Fatal(err) + } + + origScanDir := scanDir + setScanDir(sd) + t.Cleanup(func() { setScanDir(origScanDir) }) + + got := ResolveUnixDaemonAddr(defaultAddr) + if got != defaultAddr { + t.Errorf("expected original %s, got %s", defaultAddr, got) + } +} + +func TestResolveUnixDaemonAddr_NonUnixAddr(t *testing.T) { + addr := "tcp://127.0.0.1:41731" + got := ResolveUnixDaemonAddr(addr) + if got != addr { + t.Errorf("expected %s, got %s", addr, got) + } +} + +func TestResolveUnixDaemonAddr_ScanDirMissing(t *testing.T) { + tmp := t.TempDir() + + defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock") + + origScanDir := scanDir + setScanDir(filepath.Join(tmp, "nonexistent")) + t.Cleanup(func() { setScanDir(origScanDir) }) + + got := ResolveUnixDaemonAddr(defaultAddr) + if got != defaultAddr { + t.Errorf("expected original %s, got %s", defaultAddr, got) + } +} diff --git a/client/ssh/client/client.go b/client/ssh/client/client.go index 342da7303..7f72a72cf 100644 --- a/client/ssh/client/client.go +++ b/client/ssh/client/client.go @@ -19,6 +19,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "github.com/netbirdio/netbird/client/internal/daemonaddr" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/proto" nbssh "github.com/netbirdio/netbird/client/ssh" @@ -268,7 +269,7 @@ func getDefaultDaemonAddr() string { if runtime.GOOS == "windows" { return DefaultDaemonAddrWindows } - return DefaultDaemonAddr + return daemonaddr.ResolveUnixDaemonAddr(DefaultDaemonAddr) } // DialOptions contains options for SSH connections From afe6d9fca4904bb57e053e21065e77c75ca4e9e2 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 24 Feb 2026 19:19:43 +0100 Subject: [PATCH 169/374] [management] Prevent deletion of groups linked to flow groups (#5439) --- management/server/group.go | 13 +++++-- management/server/group_test.go | 63 +++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/management/server/group.go b/management/server/group.go index 9fc8db120..326b167cf 100644 --- a/management/server/group.go +++ b/management/server/group.go @@ -425,6 +425,11 @@ func (am *DefaultAccountManager) DeleteGroups(ctx context.Context, accountID, us var groupIDsToDelete []string var deletedGroups []*types.Group + extraSettings, err := am.settingsManager.GetExtraSettings(ctx, accountID) + if err != nil { + return err + } + err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { for _, groupID := range groupIDs { group, err := transaction.GetGroupByID(ctx, store.LockingStrengthNone, accountID, groupID) @@ -433,7 +438,7 @@ func (am *DefaultAccountManager) DeleteGroups(ctx context.Context, accountID, us continue } - if err := validateDeleteGroup(ctx, transaction, group, userID); err != nil { + if err = validateDeleteGroup(ctx, transaction, group, userID, extraSettings.FlowGroups); err != nil { allErrors = errors.Join(allErrors, err) continue } @@ -621,7 +626,7 @@ func validateNewGroup(ctx context.Context, transaction store.Store, accountID st return nil } -func validateDeleteGroup(ctx context.Context, transaction store.Store, group *types.Group, userID string) error { +func validateDeleteGroup(ctx context.Context, transaction store.Store, group *types.Group, userID string, flowGroups []string) error { // disable a deleting integration group if the initiator is not an admin service user if group.Issued == types.GroupIssuedIntegration { executingUser, err := transaction.GetUserByUserID(ctx, store.LockingStrengthNone, userID) @@ -641,6 +646,10 @@ func validateDeleteGroup(ctx context.Context, transaction store.Store, group *ty return &GroupLinkError{"network resource", group.Resources[0].ID} } + if slices.Contains(flowGroups, group.ID) { + return &GroupLinkError{"settings", "traffic event logging"} + } + if isLinked, linkedRoute := isGroupLinkedToRoute(ctx, transaction, group.AccountID, group.ID); isLinked { return &GroupLinkError{"route", string(linkedRoute.NetID)} } diff --git a/management/server/group_test.go b/management/server/group_test.go index dba917dbb..dd6869d50 100644 --- a/management/server/group_test.go +++ b/management/server/group_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -26,6 +27,7 @@ import ( networkTypes "github.com/netbirdio/netbird/management/server/networks/types" peer2 "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/route" @@ -284,6 +286,67 @@ func TestDefaultAccountManager_DeleteGroups(t *testing.T) { } } +func TestDefaultAccountManager_DeleteGroupLinkedToFlowGroup(t *testing.T) { + am, _, err := createManager(t) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + settingsMock := settings.NewMockManager(ctrl) + settingsMock.EXPECT(). + GetExtraSettings(gomock.Any(), gomock.Any()). + Return(&types.ExtraSettings{FlowGroups: []string{"grp-for-flow"}}, nil). + AnyTimes() + settingsMock.EXPECT(). + UpdateExtraSettings(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(false, nil). + AnyTimes() + am.settingsManager = settingsMock + + _, account, err := initTestGroupAccount(am) + require.NoError(t, err) + + grp := &types.Group{ + ID: "grp-for-flow", + AccountID: account.Id, + Name: "Group for flow", + Issued: types.GroupIssuedAPI, + Peers: make([]string, 0), + } + require.NoError(t, am.CreateGroup(context.Background(), account.Id, groupAdminUserID, grp)) + + err = am.DeleteGroup(context.Background(), account.Id, groupAdminUserID, "grp-for-flow") + require.Error(t, err) + + var gErr *GroupLinkError + require.ErrorAs(t, err, &gErr) + assert.Equal(t, "settings", gErr.Resource) + assert.Equal(t, "traffic event logging", gErr.Name) + + group, err := am.GetGroup(context.Background(), account.Id, "grp-for-flow", groupAdminUserID) + require.NoError(t, err) + assert.NotNil(t, group) + + regularGrp := &types.Group{ + ID: "grp-regular", + AccountID: account.Id, + Name: "Regular group", + Issued: types.GroupIssuedAPI, + Peers: make([]string, 0), + } + err = am.CreateGroup(context.Background(), account.Id, groupAdminUserID, regularGrp) + require.NoError(t, err) + + err = am.DeleteGroups(context.Background(), account.Id, groupAdminUserID, []string{"grp-for-flow", "grp-regular"}) + require.Error(t, err) + + group, err = am.GetGroup(context.Background(), account.Id, "grp-for-flow", groupAdminUserID) + require.NoError(t, err) + assert.NotNil(t, group) + + _, err = am.GetGroup(context.Background(), account.Id, "grp-regular", groupAdminUserID) + assert.Error(t, err) +} + func initTestGroupAccount(am *DefaultAccountManager) (*DefaultAccountManager, *types.Account, error) { accountID := "testingAcc" domain := "example.com" From 9a6a72e88ed35bbf227be79fe099ff3bde79a945 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 24 Feb 2026 20:47:41 +0100 Subject: [PATCH 170/374] [management] Fix user update permission validation (#5441) --- management/server/user.go | 9 +++++---- management/server/user_test.go | 23 +---------------------- 2 files changed, 6 insertions(+), 26 deletions(-) diff --git a/management/server/user.go b/management/server/user.go index 924efc1e4..327aec2d0 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -742,6 +742,11 @@ func (am *DefaultAccountManager) processUserUpdate(ctx context.Context, transact if err != nil { return false, nil, nil, nil, fmt.Errorf("failed to re-read initiator user in transaction: %w", err) } + + // Ensure the initiator still has admin privileges + if initiatorUser.HasAdminPower() && !freshInitiator.HasAdminPower() { + return false, nil, nil, nil, status.Errorf(status.PermissionDenied, "initiator role was changed during request processing") + } initiatorUser = freshInitiator } @@ -872,10 +877,6 @@ func validateUserUpdate(groupsMap map[string]*types.Group, initiatorUser, oldUse return nil } - if !initiatorUser.HasAdminPower() { - return status.Errorf(status.PermissionDenied, "only admins and owners can update users") - } - if initiatorUser.HasAdminPower() && initiatorUser.Id == update.Id && oldUser.Blocked != update.Blocked { return status.Errorf(status.PermissionDenied, "admins can't block or unblock themselves") } diff --git a/management/server/user_test.go b/management/server/user_test.go index 72a19a9a5..800d2406c 100644 --- a/management/server/user_test.go +++ b/management/server/user_test.go @@ -2032,27 +2032,6 @@ func TestUser_Operations_WithEmbeddedIDP(t *testing.T) { }) } -func TestValidateUserUpdate_RejectsNonAdminInitiator(t *testing.T) { - groupsMap := map[string]*types.Group{} - - initiator := &types.User{ - Id: "initiator", - Role: types.UserRoleUser, - } - oldUser := &types.User{ - Id: "target", - Role: types.UserRoleUser, - } - update := &types.User{ - Id: "target", - Role: types.UserRoleOwner, - } - - err := validateUserUpdate(groupsMap, initiator, oldUser, update) - require.Error(t, err, "regular user should not be able to promote to owner") - assert.Contains(t, err.Error(), "only admins and owners can update users") -} - func TestProcessUserUpdate_RejectsStaleInitiatorRole(t *testing.T) { s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir()) require.NoError(t, err) @@ -2109,7 +2088,7 @@ func TestProcessUserUpdate_RejectsStaleInitiatorRole(t *testing.T) { }) require.Error(t, err, "processUserUpdate should reject stale initiator whose role was demoted") - assert.Contains(t, err.Error(), "only admins and owners can update users") + assert.Contains(t, err.Error(), "initiator role was changed during request processing") targetUser, err := s.GetUserByUserID(context.Background(), store.LockingStrengthNone, targetID) require.NoError(t, err) From c2c4d9d336426b3b43dac7a507fe3b601399bb15 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 26 Feb 2026 16:47:02 +0100 Subject: [PATCH 171/374] [client] Fix Server mutex held across waitForUp in Up() (#5460) Up() acquired s.mutex with a deferred unlock, then called waitForUp() while still holding the lock. waitForUp() blocks for up to 50 seconds waiting on clientRunningChan/clientGiveUpChan, starving all concurrent gRPC calls that require the same mutex (Status, ListProfiles, etc.). Replace the deferred unlock with explicit s.mutex.Unlock() on every early-return path and immediately before waitForUp(), matching the pattern already used by the clientRunning==true branch. --- client/server/server.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index 0466630c5..cab94238f 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -641,8 +641,6 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR return s.waitForUp(callerCtx) } - defer s.mutex.Unlock() - if err := restoreResidualState(callerCtx, s.profileManager.GetStatePath()); err != nil { log.Warnf(errRestoreResidualState, err) } @@ -654,10 +652,12 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR // not in the progress or already successfully established connection. status, err := state.Status() if err != nil { + s.mutex.Unlock() return nil, err } if status != internal.StatusIdle { + s.mutex.Unlock() return nil, fmt.Errorf("up already in progress: current status %s", status) } @@ -674,17 +674,20 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR s.actCancel = cancel if s.config == nil { + s.mutex.Unlock() return nil, fmt.Errorf("config is not defined, please call login command first") } activeProf, err := s.profileManager.GetActiveProfileState() if err != nil { + s.mutex.Unlock() log.Errorf("failed to get active profile state: %v", err) return nil, fmt.Errorf("failed to get active profile state: %w", err) } if msg != nil && msg.ProfileName != nil { if err := s.switchProfileIfNeeded(*msg.ProfileName, msg.Username, activeProf); err != nil { + s.mutex.Unlock() log.Errorf("failed to switch profile: %v", err) return nil, fmt.Errorf("failed to switch profile: %w", err) } @@ -692,6 +695,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR activeProf, err = s.profileManager.GetActiveProfileState() if err != nil { + s.mutex.Unlock() log.Errorf("failed to get active profile state: %v", err) return nil, fmt.Errorf("failed to get active profile state: %w", err) } @@ -700,6 +704,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR config, _, err := s.getConfig(activeProf) if err != nil { + s.mutex.Unlock() log.Errorf("failed to get active profile config: %v", err) return nil, fmt.Errorf("failed to get active profile config: %w", err) } @@ -718,6 +723,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR } go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, doAutoUpdate, s.clientRunningChan, s.clientGiveUpChan) + s.mutex.Unlock() return s.waitForUp(callerCtx) } From 333e0450993354323c5d181ee45730b9e7e361f1 Mon Sep 17 00:00:00 2001 From: shuuri-labs <61762328+shuuri-labs@users.noreply.github.com> Date: Thu, 26 Feb 2026 17:51:38 +0100 Subject: [PATCH 172/374] Lower socket auto-discovery log from Info to Debug (#5463) The discovery message was printing on every CLI invocation, which is noisy for users on distros using the systemd template. --- client/internal/daemonaddr/resolve.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/internal/daemonaddr/resolve.go b/client/internal/daemonaddr/resolve.go index b7877d8a9..b445696ab 100644 --- a/client/internal/daemonaddr/resolve.go +++ b/client/internal/daemonaddr/resolve.go @@ -49,7 +49,7 @@ func ResolveUnixDaemonAddr(addr string) string { switch len(found) { case 1: resolved := "unix://" + found[0] - log.Infof("Default daemon socket not found, using discovered socket: %s", resolved) + log.Debugf("Default daemon socket not found, using discovered socket: %s", resolved) return resolved case 0: return addr From 59c77d0658287fa376dd6da11943504b1e6479c0 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Fri, 27 Feb 2026 15:52:54 +0200 Subject: [PATCH 173/374] [self-hosted] support embedded IDP postgres db (#5443) * Add postgres config for embedded idp Entire-Checkpoint: 9ace190c1067 * Rename idpStore to authStore Entire-Checkpoint: 73a896c79614 * Fix review notes Entire-Checkpoint: 6556783c0df3 * Don't accept pq port = 0 Entire-Checkpoint: 80d45e37782f * Optimize configs Entire-Checkpoint: 80d45e37782f * Fix lint issues Entire-Checkpoint: 3eec968003d1 * Fail fast on combined postgres config Entire-Checkpoint: b17839d3d8c6 * Simplify management config method Entire-Checkpoint: 0f083effa20e --- combined/cmd/config.go | 107 ++++++++++++------- combined/config.yaml.example | 5 + idp/dex/config.go | 167 ++++++++++++++++++++++++++++++ management/server/idp/embedded.go | 34 +++++- 4 files changed, 271 insertions(+), 42 deletions(-) diff --git a/combined/cmd/config.go b/combined/cmd/config.go index d0ffa4ba4..f52d38ccf 100644 --- a/combined/cmd/config.go +++ b/combined/cmd/config.go @@ -71,6 +71,7 @@ type ServerConfig struct { Auth AuthConfig `yaml:"auth"` Store StoreConfig `yaml:"store"` ActivityStore StoreConfig `yaml:"activityStore"` + AuthStore StoreConfig `yaml:"authStore"` ReverseProxy ReverseProxyConfig `yaml:"reverseProxy"` } @@ -533,6 +534,68 @@ func stripSignalProtocol(uri string) string { return uri } +func buildRelayConfig(relays RelaysConfig) (*nbconfig.Relay, error) { + var ttl time.Duration + if relays.CredentialsTTL != "" { + var err error + ttl, err = time.ParseDuration(relays.CredentialsTTL) + if err != nil { + return nil, fmt.Errorf("invalid relay credentials TTL %q: %w", relays.CredentialsTTL, err) + } + } + return &nbconfig.Relay{ + Addresses: relays.Addresses, + CredentialsTTL: util.Duration{Duration: ttl}, + Secret: relays.Secret, + }, nil +} + +// buildEmbeddedIdPConfig builds the embedded IdP configuration. +// authStore overrides auth.storage when set. +func (c *CombinedConfig) buildEmbeddedIdPConfig(mgmt ManagementConfig) (*idp.EmbeddedIdPConfig, error) { + authStorageType := mgmt.Auth.Storage.Type + authStorageDSN := c.Server.AuthStore.DSN + if c.Server.AuthStore.Engine != "" { + authStorageType = c.Server.AuthStore.Engine + } + if authStorageType == "" { + authStorageType = "sqlite3" + } + authStorageFile := "" + if authStorageType == "postgres" { + if authStorageDSN == "" { + return nil, fmt.Errorf("authStore.dsn is required when authStore.engine is postgres") + } + } else { + authStorageFile = path.Join(mgmt.DataDir, "idp.db") + } + + cfg := &idp.EmbeddedIdPConfig{ + Enabled: true, + Issuer: mgmt.Auth.Issuer, + LocalAuthDisabled: mgmt.Auth.LocalAuthDisabled, + SignKeyRefreshEnabled: mgmt.Auth.SignKeyRefreshEnabled, + Storage: idp.EmbeddedStorageConfig{ + Type: authStorageType, + Config: idp.EmbeddedStorageTypeConfig{ + File: authStorageFile, + DSN: authStorageDSN, + }, + }, + DashboardRedirectURIs: mgmt.Auth.DashboardRedirectURIs, + CLIRedirectURIs: mgmt.Auth.CLIRedirectURIs, + } + + if mgmt.Auth.Owner != nil && mgmt.Auth.Owner.Email != "" { + cfg.Owner = &idp.OwnerConfig{ + Email: mgmt.Auth.Owner.Email, + Hash: mgmt.Auth.Owner.Password, + } + } + + return cfg, nil +} + // ToManagementConfig converts CombinedConfig to management server config func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) { mgmt := c.Management @@ -551,19 +614,11 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) { // Build relay config var relayConfig *nbconfig.Relay if len(mgmt.Relays.Addresses) > 0 || mgmt.Relays.Secret != "" { - var ttl time.Duration - if mgmt.Relays.CredentialsTTL != "" { - var err error - ttl, err = time.ParseDuration(mgmt.Relays.CredentialsTTL) - if err != nil { - return nil, fmt.Errorf("invalid relay credentials TTL %q: %w", mgmt.Relays.CredentialsTTL, err) - } - } - relayConfig = &nbconfig.Relay{ - Addresses: mgmt.Relays.Addresses, - CredentialsTTL: util.Duration{Duration: ttl}, - Secret: mgmt.Relays.Secret, + relay, err := buildRelayConfig(mgmt.Relays) + if err != nil { + return nil, err } + relayConfig = relay } // Build signal config @@ -599,31 +654,9 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) { httpConfig := &nbconfig.HttpServerConfig{} // Build embedded IDP config (always enabled in combined server) - storageFile := mgmt.Auth.Storage.File - if storageFile == "" { - storageFile = path.Join(mgmt.DataDir, "idp.db") - } - - embeddedIdP := &idp.EmbeddedIdPConfig{ - Enabled: true, - Issuer: mgmt.Auth.Issuer, - LocalAuthDisabled: mgmt.Auth.LocalAuthDisabled, - SignKeyRefreshEnabled: mgmt.Auth.SignKeyRefreshEnabled, - Storage: idp.EmbeddedStorageConfig{ - Type: mgmt.Auth.Storage.Type, - Config: idp.EmbeddedStorageTypeConfig{ - File: storageFile, - }, - }, - DashboardRedirectURIs: mgmt.Auth.DashboardRedirectURIs, - CLIRedirectURIs: mgmt.Auth.CLIRedirectURIs, - } - - if mgmt.Auth.Owner != nil && mgmt.Auth.Owner.Email != "" { - embeddedIdP.Owner = &idp.OwnerConfig{ - Email: mgmt.Auth.Owner.Email, - Hash: mgmt.Auth.Owner.Password, // Will be hashed if plain text - } + embeddedIdP, err := c.buildEmbeddedIdPConfig(mgmt) + if err != nil { + return nil, err } // Set HTTP config fields for embedded IDP diff --git a/combined/config.yaml.example b/combined/config.yaml.example index ad033396d..f81973c6b 100644 --- a/combined/config.yaml.example +++ b/combined/config.yaml.example @@ -109,6 +109,11 @@ server: # engine: "sqlite" # sqlite or postgres # dsn: "" # Connection string for postgres + # Auth (embedded IdP) store configuration (optional, defaults to sqlite3 in dataDir/idp.db) + # authStore: + # engine: "sqlite3" # sqlite3 or postgres + # dsn: "" # Connection string for postgres (e.g., "host=localhost port=5432 user=postgres password=postgres dbname=netbird_idp sslmode=disable") + # Reverse proxy settings (optional) # reverseProxy: # trustedHTTPProxies: [] diff --git a/idp/dex/config.go b/idp/dex/config.go index 57f832406..3db04a4cb 100644 --- a/idp/dex/config.go +++ b/idp/dex/config.go @@ -5,7 +5,10 @@ import ( "encoding/json" "fmt" "log/slog" + "net/url" "os" + "strconv" + "strings" "time" "golang.org/x/crypto/bcrypt" @@ -195,11 +198,175 @@ func (s *Storage) OpenStorage(logger *slog.Logger) (storage.Storage, error) { return nil, fmt.Errorf("sqlite3 storage requires 'file' config") } return (&sql.SQLite3{File: file}).Open(logger) + case "postgres": + dsn, _ := s.Config["dsn"].(string) + if dsn == "" { + return nil, fmt.Errorf("postgres storage requires 'dsn' config") + } + pg, err := parsePostgresDSN(dsn) + if err != nil { + return nil, fmt.Errorf("invalid postgres DSN: %w", err) + } + return pg.Open(logger) default: return nil, fmt.Errorf("unsupported storage type: %s", s.Type) } } +// parsePostgresDSN parses a DSN into a sql.Postgres config. +// It accepts both URI format (postgres://user:pass@host:port/dbname?sslmode=disable) +// and libpq key=value format (host=localhost port=5432 dbname=mydb), including quoted values. +func parsePostgresDSN(dsn string) (*sql.Postgres, error) { + var params map[string]string + var err error + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + params, err = parsePostgresURI(dsn) + } else { + params, err = parsePostgresKeyValue(dsn) + } + if err != nil { + return nil, err + } + + host := params["host"] + if host == "" { + host = "localhost" + } + + var port uint16 = 5432 + if p, ok := params["port"]; ok && p != "" { + v, err := strconv.ParseUint(p, 10, 16) + if err != nil { + return nil, fmt.Errorf("invalid port %q: %w", p, err) + } + if v == 0 { + return nil, fmt.Errorf("invalid port %q: must be non-zero", p) + } + port = uint16(v) + } + + dbname := params["dbname"] + if dbname == "" { + return nil, fmt.Errorf("dbname is required in DSN") + } + + pg := &sql.Postgres{ + NetworkDB: sql.NetworkDB{ + Host: host, + Port: port, + Database: dbname, + User: params["user"], + Password: params["password"], + }, + } + + if sslMode := params["sslmode"]; sslMode != "" { + switch sslMode { + case "disable", "allow", "prefer", "require", "verify-ca", "verify-full": + pg.SSL.Mode = sslMode + default: + return nil, fmt.Errorf("unsupported sslmode %q: valid values are disable, allow, prefer, require, verify-ca, verify-full", sslMode) + } + } + + return pg, nil +} + +// parsePostgresURI parses a postgres:// or postgresql:// URI into parameter key-value pairs. +func parsePostgresURI(dsn string) (map[string]string, error) { + u, err := url.Parse(dsn) + if err != nil { + return nil, fmt.Errorf("invalid postgres URI: %w", err) + } + + params := make(map[string]string) + + if u.User != nil { + params["user"] = u.User.Username() + if p, ok := u.User.Password(); ok { + params["password"] = p + } + } + if u.Hostname() != "" { + params["host"] = u.Hostname() + } + if u.Port() != "" { + params["port"] = u.Port() + } + + dbname := strings.TrimPrefix(u.Path, "/") + if dbname != "" { + params["dbname"] = dbname + } + + for k, v := range u.Query() { + if len(v) > 0 { + params[k] = v[0] + } + } + + return params, nil +} + +// parsePostgresKeyValue parses a libpq key=value DSN string, handling single-quoted values +// (e.g., password='my pass' host=localhost). +func parsePostgresKeyValue(dsn string) (map[string]string, error) { + params := make(map[string]string) + s := strings.TrimSpace(dsn) + + for s != "" { + eqIdx := strings.IndexByte(s, '=') + if eqIdx < 0 { + break + } + key := strings.TrimSpace(s[:eqIdx]) + + value, rest, err := parseDSNValue(s[eqIdx+1:]) + if err != nil { + return nil, fmt.Errorf("%w for key %q", err, key) + } + + params[key] = value + s = strings.TrimSpace(rest) + } + + return params, nil +} + +// parseDSNValue parses the next value from a libpq key=value string positioned after the '='. +// It returns the parsed value and the remaining unparsed string. +func parseDSNValue(s string) (value, rest string, err error) { + if len(s) > 0 && s[0] == '\'' { + return parseQuotedDSNValue(s[1:]) + } + // Unquoted value: read until whitespace. + idx := strings.IndexAny(s, " \t\n") + if idx < 0 { + return s, "", nil + } + return s[:idx], s[idx:], nil +} + +// parseQuotedDSNValue parses a single-quoted value starting after the opening quote. +// Libpq uses ” to represent a literal single quote inside quoted values. +func parseQuotedDSNValue(s string) (value, rest string, err error) { + var buf strings.Builder + for len(s) > 0 { + if s[0] == '\'' { + if len(s) > 1 && s[1] == '\'' { + buf.WriteByte('\'') + s = s[2:] + continue + } + return buf.String(), s[1:], nil + } + buf.WriteByte(s[0]) + s = s[1:] + } + return "", "", fmt.Errorf("unterminated quoted value") +} + // Validate validates the configuration func (c *YAMLConfig) Validate() error { if c.Issuer == "" { diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 8ab4ce0dc..2cc7b9743 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -52,7 +52,7 @@ type EmbeddedIdPConfig struct { // EmbeddedStorageConfig holds storage configuration for the embedded IdP. type EmbeddedStorageConfig struct { - // Type is the storage type (currently only "sqlite3" is supported) + // Type is the storage type: "sqlite3" (default) or "postgres" Type string // Config contains type-specific configuration Config EmbeddedStorageTypeConfig @@ -62,6 +62,8 @@ type EmbeddedStorageConfig struct { type EmbeddedStorageTypeConfig struct { // File is the path to the SQLite database file (for sqlite3 type) File string + // DSN is the connection string for postgres + DSN string } // OwnerConfig represents the initial owner/admin user for the embedded IdP. @@ -74,6 +76,22 @@ type OwnerConfig struct { Username string } +// buildIdpStorageConfig builds the Dex storage config map based on the storage type. +func buildIdpStorageConfig(storageType string, cfg EmbeddedStorageTypeConfig) (map[string]interface{}, error) { + switch storageType { + case "sqlite3": + return map[string]interface{}{ + "file": cfg.File, + }, nil + case "postgres": + return map[string]interface{}{ + "dsn": cfg.DSN, + }, nil + default: + return nil, fmt.Errorf("unsupported IdP storage type: %s", storageType) + } +} + // ToYAMLConfig converts EmbeddedIdPConfig to dex.YAMLConfig. func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { if c.Issuer == "" { @@ -85,6 +103,14 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { if c.Storage.Type == "sqlite3" && c.Storage.Config.File == "" { return nil, fmt.Errorf("storage file is required for sqlite3") } + if c.Storage.Type == "postgres" && c.Storage.Config.DSN == "" { + return nil, fmt.Errorf("storage DSN is required for postgres") + } + + storageConfig, err := buildIdpStorageConfig(c.Storage.Type, c.Storage.Config) + if err != nil { + return nil, fmt.Errorf("invalid IdP storage config: %w", err) + } // Build CLI redirect URIs including the device callback (both relative and absolute) cliRedirectURIs := c.CLIRedirectURIs @@ -100,10 +126,8 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { cfg := &dex.YAMLConfig{ Issuer: c.Issuer, Storage: dex.Storage{ - Type: c.Storage.Type, - Config: map[string]interface{}{ - "file": c.Storage.Config.File, - }, + Type: c.Storage.Type, + Config: storageConfig, }, Web: dex.Web{ AllowedOrigins: []string{"*"}, From 0ca59535f10654f9c072501173f0c2cc69e744f8 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Sat, 28 Feb 2026 13:04:58 +0800 Subject: [PATCH 174/374] [management] Add reverse proxy services REST client (#5454) --- shared/management/client/rest/client.go | 38 +++++++- .../client/rest/reverse_proxy_clusters.go | 25 +++++ .../client/rest/reverse_proxy_domains.go | 72 ++++++++++++++ .../client/rest/reverse_proxy_services.go | 97 +++++++++++++++++++ 4 files changed, 230 insertions(+), 2 deletions(-) create mode 100644 shared/management/client/rest/reverse_proxy_clusters.go create mode 100644 shared/management/client/rest/reverse_proxy_domains.go create mode 100644 shared/management/client/rest/reverse_proxy_services.go diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index 99d8eb594..f308761fb 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -11,6 +11,26 @@ import ( "github.com/netbirdio/netbird/shared/management/http/util" ) +// APIError represents an error response from the management API. +type APIError struct { + StatusCode int + Message string +} + +// Error implements the error interface. +func (e *APIError) Error() string { + return e.Message +} + +// IsNotFound returns true if the error represents a 404 Not Found response. +func IsNotFound(err error) bool { + var apiErr *APIError + if ok := errors.As(err, &apiErr); ok { + return apiErr.StatusCode == http.StatusNotFound + } + return false +} + // Client Management service HTTP REST API Client type Client struct { managementURL string @@ -105,6 +125,15 @@ type Client struct { // Instance NetBird Instance API // see more: https://docs.netbird.io/api/resources/instance Instance *InstanceAPI + + // ReverseProxyServices NetBird reverse proxy services APIs + ReverseProxyServices *ReverseProxyServicesAPI + + // ReverseProxyClusters NetBird reverse proxy clusters APIs + ReverseProxyClusters *ReverseProxyClustersAPI + + // ReverseProxyDomains NetBird reverse proxy domains APIs + ReverseProxyDomains *ReverseProxyDomainsAPI } // New initialize new Client instance using PAT token @@ -160,6 +189,9 @@ func (c *Client) initialize() { c.IdentityProviders = &IdentityProvidersAPI{c} c.Ingress = &IngressAPI{c} c.Instance = &InstanceAPI{c} + c.ReverseProxyServices = &ReverseProxyServicesAPI{c} + c.ReverseProxyClusters = &ReverseProxyClustersAPI{c} + c.ReverseProxyDomains = &ReverseProxyDomainsAPI{c} } // NewRequest creates and executes new management API request @@ -194,10 +226,12 @@ func (c *Client) NewRequest(ctx context.Context, method, path string, body io.Re if resp.StatusCode > 299 { parsedErr, pErr := parseResponse[util.ErrorResponse](resp) if pErr != nil { - return nil, pErr } - return nil, errors.New(parsedErr.Message) + return nil, &APIError{ + StatusCode: resp.StatusCode, + Message: parsedErr.Message, + } } return resp, nil diff --git a/shared/management/client/rest/reverse_proxy_clusters.go b/shared/management/client/rest/reverse_proxy_clusters.go new file mode 100644 index 000000000..b55cd35a3 --- /dev/null +++ b/shared/management/client/rest/reverse_proxy_clusters.go @@ -0,0 +1,25 @@ +package rest + +import ( + "context" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// ReverseProxyClustersAPI APIs for Reverse Proxy Clusters, do not use directly +type ReverseProxyClustersAPI struct { + c *Client +} + +// List lists all available proxy clusters +func (a *ReverseProxyClustersAPI) List(ctx context.Context) ([]api.ProxyCluster, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/reverse-proxies/clusters", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.ProxyCluster](resp) + return ret, err +} diff --git a/shared/management/client/rest/reverse_proxy_domains.go b/shared/management/client/rest/reverse_proxy_domains.go new file mode 100644 index 000000000..7066a0632 --- /dev/null +++ b/shared/management/client/rest/reverse_proxy_domains.go @@ -0,0 +1,72 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + "net/url" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// ReverseProxyDomainsAPI APIs for Reverse Proxy Domains, do not use directly +type ReverseProxyDomainsAPI struct { + c *Client +} + +// List lists all reverse proxy domains +func (a *ReverseProxyDomainsAPI) List(ctx context.Context) ([]api.ReverseProxyDomain, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/reverse-proxies/domains", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.ReverseProxyDomain](resp) + return ret, err +} + +// Create creates a new custom domain +func (a *ReverseProxyDomainsAPI) Create(ctx context.Context, request api.PostApiReverseProxiesDomainsJSONRequestBody) (*api.ReverseProxyDomain, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/reverse-proxies/domains", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.ReverseProxyDomain](resp) + if err != nil { + return nil, err + } + return &ret, nil +} + +// Delete deletes a custom domain +func (a *ReverseProxyDomainsAPI) Delete(ctx context.Context, domainID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/reverse-proxies/domains/"+url.PathEscape(domainID), nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// Validate triggers domain ownership validation for a custom domain +func (a *ReverseProxyDomainsAPI) Validate(ctx context.Context, domainID string) error { + resp, err := a.c.NewRequest(ctx, "GET", "/api/reverse-proxies/domains/"+url.PathEscape(domainID)+"/validate", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} diff --git a/shared/management/client/rest/reverse_proxy_services.go b/shared/management/client/rest/reverse_proxy_services.go new file mode 100644 index 000000000..2ecb382b2 --- /dev/null +++ b/shared/management/client/rest/reverse_proxy_services.go @@ -0,0 +1,97 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + "net/url" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// ReverseProxyServicesAPI APIs for Reverse Proxy Services, do not use directly +type ReverseProxyServicesAPI struct { + c *Client +} + +// List lists all reverse proxy services +func (a *ReverseProxyServicesAPI) List(ctx context.Context) ([]api.Service, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/reverse-proxies/services", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.Service](resp) + return ret, err +} + +// Get retrieves a reverse proxy service by ID +func (a *ReverseProxyServicesAPI) Get(ctx context.Context, serviceID string) (*api.Service, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/reverse-proxies/services/"+url.PathEscape(serviceID), nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.Service](resp) + if err != nil { + return nil, err + } + return &ret, nil +} + +// Create creates a new reverse proxy service +func (a *ReverseProxyServicesAPI) Create(ctx context.Context, request api.PostApiReverseProxiesServicesJSONRequestBody) (*api.Service, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/reverse-proxies/services", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.Service](resp) + if err != nil { + return nil, err + } + return &ret, nil +} + +// Update updates a reverse proxy service +func (a *ReverseProxyServicesAPI) Update(ctx context.Context, serviceID string, request api.PutApiReverseProxiesServicesServiceIdJSONRequestBody) (*api.Service, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/reverse-proxies/services/"+url.PathEscape(serviceID), bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.Service](resp) + if err != nil { + return nil, err + } + return &ret, nil +} + +// Delete deletes a reverse proxy service +func (a *ReverseProxyServicesAPI) Delete(ctx context.Context, serviceID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/reverse-proxies/services/"+url.PathEscape(serviceID), nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + + return nil +} From 0b21498b3983cc8852ecc2e1f70dfdee10814f71 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 2 Mar 2026 17:07:53 +0800 Subject: [PATCH 175/374] [client] Fix close of closed channel panic in ConnectClient retry loop (#5470) --- client/internal/connect.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/client/internal/connect.go b/client/internal/connect.go index 17fc20c42..68a0cb8da 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -331,8 +331,11 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan state.Set(StatusConnected) if runningChan != nil { - close(runningChan) - runningChan = nil + select { + case <-runningChan: + default: + close(runningChan) + } } <-engineCtx.Done() From bbe5ae214535f8ba14dddda9a6ba960744fe3b12 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 2 Mar 2026 22:17:08 +0800 Subject: [PATCH 176/374] [client] Flush buffer immediately to support gprc (#5469) --- proxy/internal/proxy/reverseproxy.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/proxy/internal/proxy/reverseproxy.go b/proxy/internal/proxy/reverseproxy.go index 16607689a..ee45ccfbb 100644 --- a/proxy/internal/proxy/reverseproxy.go +++ b/proxy/internal/proxy/reverseproxy.go @@ -81,9 +81,10 @@ func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { } rp := &httputil.ReverseProxy{ - Rewrite: p.rewriteFunc(result.url, result.matchedPath, result.passHostHeader), - Transport: p.transport, - ErrorHandler: proxyErrorHandler, + Rewrite: p.rewriteFunc(result.url, result.matchedPath, result.passHostHeader), + Transport: p.transport, + FlushInterval: -1, + ErrorHandler: proxyErrorHandler, } if result.rewriteRedirects { rp.ModifyResponse = p.rewriteLocationFunc(result.url, result.matchedPath, r) //nolint:bodyclose From 82da60688612a689c4ae4de5411a07d7ce8cd5d0 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 2 Mar 2026 18:25:44 +0100 Subject: [PATCH 177/374] [management] Add explicit target delete on service removal (#5420) --- .../modules/reverseproxy/manager/manager.go | 9 +- .../reverseproxy/manager/manager_test.go | 67 + management/server/account/manager.go | 6 +- management/server/account/manager_mock.go | 1738 +++++++++++++++++ management/server/store/sql_store.go | 40 + management/server/store/store.go | 3 + management/server/store/store_mock.go | 105 +- 7 files changed, 1932 insertions(+), 36 deletions(-) create mode 100644 management/server/account/manager_mock.go diff --git a/management/internals/modules/reverseproxy/manager/manager.go b/management/internals/modules/reverseproxy/manager/manager.go index b2c67e0c1..3c02e117b 100644 --- a/management/internals/modules/reverseproxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/manager/manager.go @@ -4,12 +4,12 @@ import ( "context" "fmt" "math/rand/v2" + "slices" "time" - nbpeer "github.com/netbirdio/netbird/management/server/peer" log "github.com/sirupsen/logrus" - "slices" + nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" @@ -410,12 +410,15 @@ func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serv var service *reverseproxy.Service err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - var err error service, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) if err != nil { return err } + if err = transaction.DeleteServiceTargets(ctx, accountID, serviceID); err != nil { + return fmt.Errorf("failed to delete targets: %w", err) + } + if err = transaction.DeleteService(ctx, accountID, serviceID); err != nil { return fmt.Errorf("failed to delete service: %w", err) } diff --git a/management/internals/modules/reverseproxy/manager/manager_test.go b/management/internals/modules/reverseproxy/manager/manager_test.go index 17849f622..8e6b0e876 100644 --- a/management/internals/modules/reverseproxy/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/manager/manager_test.go @@ -13,11 +13,14 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/integrations/extra_settings" "github.com/netbirdio/netbird/management/server/mock_server" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/permissions/modules" + "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" @@ -1112,3 +1115,67 @@ func TestGetGroupIDsFromNames(t *testing.T) { assert.Contains(t, err.Error(), "no group names provided") }) } + +func TestDeleteService_DeletesTargets(t *testing.T) { + ctx := context.Background() + accountID := "test-account" + userID := "test-user" + + sqlStore, err := store.NewStore(ctx, types.SqliteStoreEngine, t.TempDir(), nil, false) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockPerms := permissions.NewMockManager(ctrl) + mockAcct := account.NewMockManager(ctrl) + mockGRPC := &nbgrpc.ProxyServiceServer{} + + mgr := &managerImpl{ + store: sqlStore, + permissionsManager: mockPerms, + accountManager: mockAcct, + proxyGRPCServer: mockGRPC, + } + + service := &reverseproxy.Service{ + ID: "service-1", + AccountID: accountID, + Domain: "test.example.com", + ProxyCluster: "cluster1", + Enabled: true, + Targets: []*reverseproxy.Target{ + {AccountID: accountID, ServiceID: "service-1", TargetType: reverseproxy.TargetTypePeer, TargetId: "peer-1"}, + {AccountID: accountID, ServiceID: "service-1", TargetType: reverseproxy.TargetTypePeer, TargetId: "peer-2"}, + {AccountID: accountID, ServiceID: "service-1", TargetType: reverseproxy.TargetTypePeer, TargetId: "peer-3"}, + }, + } + + err = sqlStore.CreateService(ctx, service) + require.NoError(t, err) + + retrievedService, err := sqlStore.GetServiceByID(ctx, store.LockingStrengthNone, accountID, service.ID) + require.NoError(t, err) + require.Len(t, retrievedService.Targets, 3, "Service should have 3 targets before deletion") + + mockPerms.EXPECT(). + ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete). + Return(true, nil) + mockAcct.EXPECT(). + StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceDeleted, gomock.Any()) + mockAcct.EXPECT(). + UpdateAccountPeers(ctx, accountID) + + err = mgr.DeleteService(ctx, accountID, userID, service.ID) + require.NoError(t, err) + + _, err = sqlStore.GetServiceByID(ctx, store.LockingStrengthNone, accountID, service.ID) + require.Error(t, err) + s, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, s.Type()) + + targets, err := sqlStore.GetTargetsByServiceID(ctx, store.LockingStrengthNone, accountID, service.ID) + require.NoError(t, err) + assert.Len(t, targets, 0, "All targets should be deleted when service is deleted") +} diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 207ab71d6..893e894e1 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -1,5 +1,7 @@ package account +//go:generate go run github.com/golang/mock/mockgen -package account -destination=manager_mock.go -source=./manager.go -build_flags=-mod=mod + import ( "context" "net" @@ -61,11 +63,11 @@ type Manager interface { GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) MarkPeerConnected(ctx context.Context, peerKey string, connected bool, realIP net.IP, accountID string, syncTime time.Time) error DeletePeer(ctx context.Context, accountID, peerID, userID string) error - UpdatePeer(ctx context.Context, accountID, userID string, peer *nbpeer.Peer) (*nbpeer.Peer, error) + UpdatePeer(ctx context.Context, accountID, userID string, p *nbpeer.Peer) (*nbpeer.Peer, error) UpdatePeerIP(ctx context.Context, accountID, userID, peerID string, newIP netip.Addr) error GetNetworkMap(ctx context.Context, peerID string) (*types.NetworkMap, error) GetPeerNetwork(ctx context.Context, peerID string) (*types.Network, error) - AddPeer(ctx context.Context, accountID, setupKey, userID string, peer *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) + AddPeer(ctx context.Context, accountID, setupKey, userID string, p *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) CreatePAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) DeletePAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenID string) error GetPAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenID string) (*types.PersonalAccessToken, error) diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go new file mode 100644 index 000000000..ab6e8b1c9 --- /dev/null +++ b/management/server/account/manager_mock.go @@ -0,0 +1,1738 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./manager.go + +// Package account is a generated GoMock package. +package account + +import ( + context "context" + net "net" + netip "net/netip" + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" + dns "github.com/netbirdio/netbird/dns" + reverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + activity "github.com/netbirdio/netbird/management/server/activity" + idp "github.com/netbirdio/netbird/management/server/idp" + peer "github.com/netbirdio/netbird/management/server/peer" + posture "github.com/netbirdio/netbird/management/server/posture" + store "github.com/netbirdio/netbird/management/server/store" + types "github.com/netbirdio/netbird/management/server/types" + users "github.com/netbirdio/netbird/management/server/users" + route "github.com/netbirdio/netbird/route" + auth "github.com/netbirdio/netbird/shared/auth" + domain "github.com/netbirdio/netbird/shared/management/domain" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// AcceptUserInvite mocks base method. +func (m *MockManager) AcceptUserInvite(ctx context.Context, token, password string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcceptUserInvite", ctx, token, password) + ret0, _ := ret[0].(error) + return ret0 +} + +// AcceptUserInvite indicates an expected call of AcceptUserInvite. +func (mr *MockManagerMockRecorder) AcceptUserInvite(ctx, token, password interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptUserInvite", reflect.TypeOf((*MockManager)(nil).AcceptUserInvite), ctx, token, password) +} + +// AccountExists mocks base method. +func (m *MockManager) AccountExists(ctx context.Context, accountID string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AccountExists", ctx, accountID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AccountExists indicates an expected call of AccountExists. +func (mr *MockManagerMockRecorder) AccountExists(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AccountExists", reflect.TypeOf((*MockManager)(nil).AccountExists), ctx, accountID) +} + +// AddPeer mocks base method. +func (m *MockManager) AddPeer(ctx context.Context, accountID, setupKey, userID string, p *peer.Peer, temporary bool) (*peer.Peer, *types.NetworkMap, []*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddPeer", ctx, accountID, setupKey, userID, p, temporary) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(*types.NetworkMap) + ret2, _ := ret[2].([]*posture.Checks) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// AddPeer indicates an expected call of AddPeer. +func (mr *MockManagerMockRecorder) AddPeer(ctx, accountID, setupKey, userID, p, temporary interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPeer", reflect.TypeOf((*MockManager)(nil).AddPeer), ctx, accountID, setupKey, userID, p, temporary) +} + +// ApproveUser mocks base method. +func (m *MockManager) ApproveUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) (*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApproveUser", ctx, accountID, initiatorUserID, targetUserID) + ret0, _ := ret[0].(*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApproveUser indicates an expected call of ApproveUser. +func (mr *MockManagerMockRecorder) ApproveUser(ctx, accountID, initiatorUserID, targetUserID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApproveUser", reflect.TypeOf((*MockManager)(nil).ApproveUser), ctx, accountID, initiatorUserID, targetUserID) +} + +// BufferUpdateAccountPeers mocks base method. +func (m *MockManager) BufferUpdateAccountPeers(ctx context.Context, accountID string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "BufferUpdateAccountPeers", ctx, accountID) +} + +// BufferUpdateAccountPeers indicates an expected call of BufferUpdateAccountPeers. +func (mr *MockManagerMockRecorder) BufferUpdateAccountPeers(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BufferUpdateAccountPeers", reflect.TypeOf((*MockManager)(nil).BufferUpdateAccountPeers), ctx, accountID) +} + +// BuildUserInfosForAccount mocks base method. +func (m *MockManager) BuildUserInfosForAccount(ctx context.Context, accountID, initiatorUserID string, accountUsers []*types.User) (map[string]*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BuildUserInfosForAccount", ctx, accountID, initiatorUserID, accountUsers) + ret0, _ := ret[0].(map[string]*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BuildUserInfosForAccount indicates an expected call of BuildUserInfosForAccount. +func (mr *MockManagerMockRecorder) BuildUserInfosForAccount(ctx, accountID, initiatorUserID, accountUsers interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildUserInfosForAccount", reflect.TypeOf((*MockManager)(nil).BuildUserInfosForAccount), ctx, accountID, initiatorUserID, accountUsers) +} + +// CreateGroup mocks base method. +func (m *MockManager) CreateGroup(ctx context.Context, accountID, userID string, group *types.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateGroup", ctx, accountID, userID, group) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateGroup indicates an expected call of CreateGroup. +func (mr *MockManagerMockRecorder) CreateGroup(ctx, accountID, userID, group interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroup", reflect.TypeOf((*MockManager)(nil).CreateGroup), ctx, accountID, userID, group) +} + +// CreateGroups mocks base method. +func (m *MockManager) CreateGroups(ctx context.Context, accountID, userID string, newGroups []*types.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateGroups", ctx, accountID, userID, newGroups) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateGroups indicates an expected call of CreateGroups. +func (mr *MockManagerMockRecorder) CreateGroups(ctx, accountID, userID, newGroups interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroups", reflect.TypeOf((*MockManager)(nil).CreateGroups), ctx, accountID, userID, newGroups) +} + +// CreateIdentityProvider mocks base method. +func (m *MockManager) CreateIdentityProvider(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateIdentityProvider", ctx, accountID, userID, idp) + ret0, _ := ret[0].(*types.IdentityProvider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateIdentityProvider indicates an expected call of CreateIdentityProvider. +func (mr *MockManagerMockRecorder) CreateIdentityProvider(ctx, accountID, userID, idp interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIdentityProvider", reflect.TypeOf((*MockManager)(nil).CreateIdentityProvider), ctx, accountID, userID, idp) +} + +// CreateNameServerGroup mocks base method. +func (m *MockManager) CreateNameServerGroup(ctx context.Context, accountID, name, description string, nameServerList []dns.NameServer, groups []string, primary bool, domains []string, enabled bool, userID string, searchDomainsEnabled bool) (*dns.NameServerGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNameServerGroup", ctx, accountID, name, description, nameServerList, groups, primary, domains, enabled, userID, searchDomainsEnabled) + ret0, _ := ret[0].(*dns.NameServerGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNameServerGroup indicates an expected call of CreateNameServerGroup. +func (mr *MockManagerMockRecorder) CreateNameServerGroup(ctx, accountID, name, description, nameServerList, groups, primary, domains, enabled, userID, searchDomainsEnabled interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNameServerGroup", reflect.TypeOf((*MockManager)(nil).CreateNameServerGroup), ctx, accountID, name, description, nameServerList, groups, primary, domains, enabled, userID, searchDomainsEnabled) +} + +// CreatePAT mocks base method. +func (m *MockManager) CreatePAT(ctx context.Context, accountID, initiatorUserID, targetUserID, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePAT", ctx, accountID, initiatorUserID, targetUserID, tokenName, expiresIn) + ret0, _ := ret[0].(*types.PersonalAccessTokenGenerated) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePAT indicates an expected call of CreatePAT. +func (mr *MockManagerMockRecorder) CreatePAT(ctx, accountID, initiatorUserID, targetUserID, tokenName, expiresIn interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePAT", reflect.TypeOf((*MockManager)(nil).CreatePAT), ctx, accountID, initiatorUserID, targetUserID, tokenName, expiresIn) +} + +// CreatePeerJob mocks base method. +func (m *MockManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePeerJob", ctx, accountID, peerID, userID, job) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreatePeerJob indicates an expected call of CreatePeerJob. +func (mr *MockManagerMockRecorder) CreatePeerJob(ctx, accountID, peerID, userID, job interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePeerJob", reflect.TypeOf((*MockManager)(nil).CreatePeerJob), ctx, accountID, peerID, userID, job) +} + +// CreateRoute mocks base method. +func (m *MockManager) CreateRoute(ctx context.Context, accountID string, prefix netip.Prefix, networkType route.NetworkType, domains domain.List, peerID string, peerGroupIDs []string, description string, netID route.NetID, masquerade bool, metric int, groups, accessControlGroupIDs []string, enabled bool, userID string, keepRoute, skipAutoApply bool) (*route.Route, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateRoute", ctx, accountID, prefix, networkType, domains, peerID, peerGroupIDs, description, netID, masquerade, metric, groups, accessControlGroupIDs, enabled, userID, keepRoute, skipAutoApply) + ret0, _ := ret[0].(*route.Route) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateRoute indicates an expected call of CreateRoute. +func (mr *MockManagerMockRecorder) CreateRoute(ctx, accountID, prefix, networkType, domains, peerID, peerGroupIDs, description, netID, masquerade, metric, groups, accessControlGroupIDs, enabled, userID, keepRoute, skipAutoApply interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRoute", reflect.TypeOf((*MockManager)(nil).CreateRoute), ctx, accountID, prefix, networkType, domains, peerID, peerGroupIDs, description, netID, masquerade, metric, groups, accessControlGroupIDs, enabled, userID, keepRoute, skipAutoApply) +} + +// CreateSetupKey mocks base method. +func (m *MockManager) CreateSetupKey(ctx context.Context, accountID, keyName string, keyType types.SetupKeyType, expiresIn time.Duration, autoGroups []string, usageLimit int, userID string, ephemeral, allowExtraDNSLabels bool) (*types.SetupKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSetupKey", ctx, accountID, keyName, keyType, expiresIn, autoGroups, usageLimit, userID, ephemeral, allowExtraDNSLabels) + ret0, _ := ret[0].(*types.SetupKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSetupKey indicates an expected call of CreateSetupKey. +func (mr *MockManagerMockRecorder) CreateSetupKey(ctx, accountID, keyName, keyType, expiresIn, autoGroups, usageLimit, userID, ephemeral, allowExtraDNSLabels interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSetupKey", reflect.TypeOf((*MockManager)(nil).CreateSetupKey), ctx, accountID, keyName, keyType, expiresIn, autoGroups, usageLimit, userID, ephemeral, allowExtraDNSLabels) +} + +// CreateUser mocks base method. +func (m *MockManager) CreateUser(ctx context.Context, accountID, initiatorUserID string, key *types.UserInfo) (*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateUser", ctx, accountID, initiatorUserID, key) + ret0, _ := ret[0].(*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateUser indicates an expected call of CreateUser. +func (mr *MockManagerMockRecorder) CreateUser(ctx, accountID, initiatorUserID, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockManager)(nil).CreateUser), ctx, accountID, initiatorUserID, key) +} + +// CreateUserInvite mocks base method. +func (m *MockManager) CreateUserInvite(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateUserInvite", ctx, accountID, initiatorUserID, invite, expiresIn) + ret0, _ := ret[0].(*types.UserInvite) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateUserInvite indicates an expected call of CreateUserInvite. +func (mr *MockManagerMockRecorder) CreateUserInvite(ctx, accountID, initiatorUserID, invite, expiresIn interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUserInvite", reflect.TypeOf((*MockManager)(nil).CreateUserInvite), ctx, accountID, initiatorUserID, invite, expiresIn) +} + +// DeleteAccount mocks base method. +func (m *MockManager) DeleteAccount(ctx context.Context, accountID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAccount", ctx, accountID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAccount indicates an expected call of DeleteAccount. +func (mr *MockManagerMockRecorder) DeleteAccount(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccount", reflect.TypeOf((*MockManager)(nil).DeleteAccount), ctx, accountID, userID) +} + +// DeleteGroup mocks base method. +func (m *MockManager) DeleteGroup(ctx context.Context, accountId, userId, groupID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteGroup", ctx, accountId, userId, groupID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteGroup indicates an expected call of DeleteGroup. +func (mr *MockManagerMockRecorder) DeleteGroup(ctx, accountId, userId, groupID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroup", reflect.TypeOf((*MockManager)(nil).DeleteGroup), ctx, accountId, userId, groupID) +} + +// DeleteGroups mocks base method. +func (m *MockManager) DeleteGroups(ctx context.Context, accountId, userId string, groupIDs []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteGroups", ctx, accountId, userId, groupIDs) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteGroups indicates an expected call of DeleteGroups. +func (mr *MockManagerMockRecorder) DeleteGroups(ctx, accountId, userId, groupIDs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroups", reflect.TypeOf((*MockManager)(nil).DeleteGroups), ctx, accountId, userId, groupIDs) +} + +// DeleteIdentityProvider mocks base method. +func (m *MockManager) DeleteIdentityProvider(ctx context.Context, accountID, idpID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteIdentityProvider", ctx, accountID, idpID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteIdentityProvider indicates an expected call of DeleteIdentityProvider. +func (mr *MockManagerMockRecorder) DeleteIdentityProvider(ctx, accountID, idpID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteIdentityProvider", reflect.TypeOf((*MockManager)(nil).DeleteIdentityProvider), ctx, accountID, idpID, userID) +} + +// DeleteNameServerGroup mocks base method. +func (m *MockManager) DeleteNameServerGroup(ctx context.Context, accountID, nsGroupID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNameServerGroup", ctx, accountID, nsGroupID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNameServerGroup indicates an expected call of DeleteNameServerGroup. +func (mr *MockManagerMockRecorder) DeleteNameServerGroup(ctx, accountID, nsGroupID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNameServerGroup", reflect.TypeOf((*MockManager)(nil).DeleteNameServerGroup), ctx, accountID, nsGroupID, userID) +} + +// DeletePAT mocks base method. +func (m *MockManager) DeletePAT(ctx context.Context, accountID, initiatorUserID, targetUserID, tokenID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePAT", ctx, accountID, initiatorUserID, targetUserID, tokenID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePAT indicates an expected call of DeletePAT. +func (mr *MockManagerMockRecorder) DeletePAT(ctx, accountID, initiatorUserID, targetUserID, tokenID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePAT", reflect.TypeOf((*MockManager)(nil).DeletePAT), ctx, accountID, initiatorUserID, targetUserID, tokenID) +} + +// DeletePeer mocks base method. +func (m *MockManager) DeletePeer(ctx context.Context, accountID, peerID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePeer", ctx, accountID, peerID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePeer indicates an expected call of DeletePeer. +func (mr *MockManagerMockRecorder) DeletePeer(ctx, accountID, peerID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePeer", reflect.TypeOf((*MockManager)(nil).DeletePeer), ctx, accountID, peerID, userID) +} + +// DeletePolicy mocks base method. +func (m *MockManager) DeletePolicy(ctx context.Context, accountID, policyID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePolicy", ctx, accountID, policyID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePolicy indicates an expected call of DeletePolicy. +func (mr *MockManagerMockRecorder) DeletePolicy(ctx, accountID, policyID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicy", reflect.TypeOf((*MockManager)(nil).DeletePolicy), ctx, accountID, policyID, userID) +} + +// DeletePostureChecks mocks base method. +func (m *MockManager) DeletePostureChecks(ctx context.Context, accountID, postureChecksID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePostureChecks", ctx, accountID, postureChecksID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePostureChecks indicates an expected call of DeletePostureChecks. +func (mr *MockManagerMockRecorder) DeletePostureChecks(ctx, accountID, postureChecksID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePostureChecks", reflect.TypeOf((*MockManager)(nil).DeletePostureChecks), ctx, accountID, postureChecksID, userID) +} + +// DeleteRegularUsers mocks base method. +func (m *MockManager) DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRegularUsers", ctx, accountID, initiatorUserID, targetUserIDs, userInfos) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteRegularUsers indicates an expected call of DeleteRegularUsers. +func (mr *MockManagerMockRecorder) DeleteRegularUsers(ctx, accountID, initiatorUserID, targetUserIDs, userInfos interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRegularUsers", reflect.TypeOf((*MockManager)(nil).DeleteRegularUsers), ctx, accountID, initiatorUserID, targetUserIDs, userInfos) +} + +// DeleteRoute mocks base method. +func (m *MockManager) DeleteRoute(ctx context.Context, accountID string, routeID route.ID, userID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRoute", ctx, accountID, routeID, userID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteRoute indicates an expected call of DeleteRoute. +func (mr *MockManagerMockRecorder) DeleteRoute(ctx, accountID, routeID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRoute", reflect.TypeOf((*MockManager)(nil).DeleteRoute), ctx, accountID, routeID, userID) +} + +// DeleteSetupKey mocks base method. +func (m *MockManager) DeleteSetupKey(ctx context.Context, accountID, userID, keyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSetupKey", ctx, accountID, userID, keyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSetupKey indicates an expected call of DeleteSetupKey. +func (mr *MockManagerMockRecorder) DeleteSetupKey(ctx, accountID, userID, keyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSetupKey", reflect.TypeOf((*MockManager)(nil).DeleteSetupKey), ctx, accountID, userID, keyID) +} + +// DeleteUser mocks base method. +func (m *MockManager) DeleteUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteUser", ctx, accountID, initiatorUserID, targetUserID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteUser indicates an expected call of DeleteUser. +func (mr *MockManagerMockRecorder) DeleteUser(ctx, accountID, initiatorUserID, targetUserID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockManager)(nil).DeleteUser), ctx, accountID, initiatorUserID, targetUserID) +} + +// DeleteUserInvite mocks base method. +func (m *MockManager) DeleteUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteUserInvite", ctx, accountID, initiatorUserID, inviteID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteUserInvite indicates an expected call of DeleteUserInvite. +func (mr *MockManagerMockRecorder) DeleteUserInvite(ctx, accountID, initiatorUserID, inviteID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserInvite", reflect.TypeOf((*MockManager)(nil).DeleteUserInvite), ctx, accountID, initiatorUserID, inviteID) +} + +// FindExistingPostureCheck mocks base method. +func (m *MockManager) FindExistingPostureCheck(accountID string, checks *posture.ChecksDefinition) (*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindExistingPostureCheck", accountID, checks) + ret0, _ := ret[0].(*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindExistingPostureCheck indicates an expected call of FindExistingPostureCheck. +func (mr *MockManagerMockRecorder) FindExistingPostureCheck(accountID, checks interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindExistingPostureCheck", reflect.TypeOf((*MockManager)(nil).FindExistingPostureCheck), accountID, checks) +} + +// GetAccount mocks base method. +func (m *MockManager) GetAccount(ctx context.Context, accountID string) (*types.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccount", ctx, accountID) + ret0, _ := ret[0].(*types.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccount indicates an expected call of GetAccount. +func (mr *MockManagerMockRecorder) GetAccount(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccount", reflect.TypeOf((*MockManager)(nil).GetAccount), ctx, accountID) +} + +// GetAccountByID mocks base method. +func (m *MockManager) GetAccountByID(ctx context.Context, accountID, userID string) (*types.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountByID", ctx, accountID, userID) + ret0, _ := ret[0].(*types.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountByID indicates an expected call of GetAccountByID. +func (mr *MockManagerMockRecorder) GetAccountByID(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountByID", reflect.TypeOf((*MockManager)(nil).GetAccountByID), ctx, accountID, userID) +} + +// GetAccountIDByUserID mocks base method. +func (m *MockManager) GetAccountIDByUserID(ctx context.Context, userAuth auth.UserAuth) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDByUserID", ctx, userAuth) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountIDByUserID indicates an expected call of GetAccountIDByUserID. +func (mr *MockManagerMockRecorder) GetAccountIDByUserID(ctx, userAuth interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDByUserID", reflect.TypeOf((*MockManager)(nil).GetAccountIDByUserID), ctx, userAuth) +} + +// GetAccountIDForPeerKey mocks base method. +func (m *MockManager) GetAccountIDForPeerKey(ctx context.Context, peerKey string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDForPeerKey", ctx, peerKey) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountIDForPeerKey indicates an expected call of GetAccountIDForPeerKey. +func (mr *MockManagerMockRecorder) GetAccountIDForPeerKey(ctx, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDForPeerKey", reflect.TypeOf((*MockManager)(nil).GetAccountIDForPeerKey), ctx, peerKey) +} + +// GetAccountIDFromUserAuth mocks base method. +func (m *MockManager) GetAccountIDFromUserAuth(ctx context.Context, userAuth auth.UserAuth) (string, string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountIDFromUserAuth", ctx, userAuth) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetAccountIDFromUserAuth indicates an expected call of GetAccountIDFromUserAuth. +func (mr *MockManagerMockRecorder) GetAccountIDFromUserAuth(ctx, userAuth interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountIDFromUserAuth", reflect.TypeOf((*MockManager)(nil).GetAccountIDFromUserAuth), ctx, userAuth) +} + +// GetAccountMeta mocks base method. +func (m *MockManager) GetAccountMeta(ctx context.Context, accountID, userID string) (*types.AccountMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountMeta", ctx, accountID, userID) + ret0, _ := ret[0].(*types.AccountMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountMeta indicates an expected call of GetAccountMeta. +func (mr *MockManagerMockRecorder) GetAccountMeta(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountMeta", reflect.TypeOf((*MockManager)(nil).GetAccountMeta), ctx, accountID, userID) +} + +// GetAccountOnboarding mocks base method. +func (m *MockManager) GetAccountOnboarding(ctx context.Context, accountID, userID string) (*types.AccountOnboarding, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountOnboarding", ctx, accountID, userID) + ret0, _ := ret[0].(*types.AccountOnboarding) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountOnboarding indicates an expected call of GetAccountOnboarding. +func (mr *MockManagerMockRecorder) GetAccountOnboarding(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountOnboarding", reflect.TypeOf((*MockManager)(nil).GetAccountOnboarding), ctx, accountID, userID) +} + +// GetAccountSettings mocks base method. +func (m *MockManager) GetAccountSettings(ctx context.Context, accountID, userID string) (*types.Settings, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountSettings", ctx, accountID, userID) + ret0, _ := ret[0].(*types.Settings) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountSettings indicates an expected call of GetAccountSettings. +func (mr *MockManagerMockRecorder) GetAccountSettings(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountSettings", reflect.TypeOf((*MockManager)(nil).GetAccountSettings), ctx, accountID, userID) +} + +// GetAllGroups mocks base method. +func (m *MockManager) GetAllGroups(ctx context.Context, accountID, userID string) ([]*types.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllGroups", ctx, accountID, userID) + ret0, _ := ret[0].([]*types.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllGroups indicates an expected call of GetAllGroups. +func (mr *MockManagerMockRecorder) GetAllGroups(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllGroups", reflect.TypeOf((*MockManager)(nil).GetAllGroups), ctx, accountID, userID) +} + +// GetAllPATs mocks base method. +func (m *MockManager) GetAllPATs(ctx context.Context, accountID, initiatorUserID, targetUserID string) ([]*types.PersonalAccessToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllPATs", ctx, accountID, initiatorUserID, targetUserID) + ret0, _ := ret[0].([]*types.PersonalAccessToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllPATs indicates an expected call of GetAllPATs. +func (mr *MockManagerMockRecorder) GetAllPATs(ctx, accountID, initiatorUserID, targetUserID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllPATs", reflect.TypeOf((*MockManager)(nil).GetAllPATs), ctx, accountID, initiatorUserID, targetUserID) +} + +// GetAllPeerJobs mocks base method. +func (m *MockManager) GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllPeerJobs", ctx, accountID, userID, peerID) + ret0, _ := ret[0].([]*types.Job) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllPeerJobs indicates an expected call of GetAllPeerJobs. +func (mr *MockManagerMockRecorder) GetAllPeerJobs(ctx, accountID, userID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllPeerJobs", reflect.TypeOf((*MockManager)(nil).GetAllPeerJobs), ctx, accountID, userID, peerID) +} + +// GetCurrentUserInfo mocks base method. +func (m *MockManager) GetCurrentUserInfo(ctx context.Context, userAuth auth.UserAuth) (*users.UserInfoWithPermissions, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentUserInfo", ctx, userAuth) + ret0, _ := ret[0].(*users.UserInfoWithPermissions) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentUserInfo indicates an expected call of GetCurrentUserInfo. +func (mr *MockManagerMockRecorder) GetCurrentUserInfo(ctx, userAuth interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentUserInfo", reflect.TypeOf((*MockManager)(nil).GetCurrentUserInfo), ctx, userAuth) +} + +// GetDNSSettings mocks base method. +func (m *MockManager) GetDNSSettings(ctx context.Context, accountID, userID string) (*types.DNSSettings, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDNSSettings", ctx, accountID, userID) + ret0, _ := ret[0].(*types.DNSSettings) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDNSSettings indicates an expected call of GetDNSSettings. +func (mr *MockManagerMockRecorder) GetDNSSettings(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDNSSettings", reflect.TypeOf((*MockManager)(nil).GetDNSSettings), ctx, accountID, userID) +} + +// GetEvents mocks base method. +func (m *MockManager) GetEvents(ctx context.Context, accountID, userID string) ([]*activity.Event, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEvents", ctx, accountID, userID) + ret0, _ := ret[0].([]*activity.Event) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEvents indicates an expected call of GetEvents. +func (mr *MockManagerMockRecorder) GetEvents(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEvents", reflect.TypeOf((*MockManager)(nil).GetEvents), ctx, accountID, userID) +} + +// GetExternalCacheManager mocks base method. +func (m *MockManager) GetExternalCacheManager() ExternalCacheManager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExternalCacheManager") + ret0, _ := ret[0].(ExternalCacheManager) + return ret0 +} + +// GetExternalCacheManager indicates an expected call of GetExternalCacheManager. +func (mr *MockManagerMockRecorder) GetExternalCacheManager() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExternalCacheManager", reflect.TypeOf((*MockManager)(nil).GetExternalCacheManager)) +} + +// GetGroup mocks base method. +func (m *MockManager) GetGroup(ctx context.Context, accountId, groupID, userID string) (*types.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroup", ctx, accountId, groupID, userID) + ret0, _ := ret[0].(*types.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroup indicates an expected call of GetGroup. +func (mr *MockManagerMockRecorder) GetGroup(ctx, accountId, groupID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroup", reflect.TypeOf((*MockManager)(nil).GetGroup), ctx, accountId, groupID, userID) +} + +// GetGroupByName mocks base method. +func (m *MockManager) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGroupByName", ctx, groupName, accountID) + ret0, _ := ret[0].(*types.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGroupByName indicates an expected call of GetGroupByName. +func (mr *MockManagerMockRecorder) GetGroupByName(ctx, groupName, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockManager)(nil).GetGroupByName), ctx, groupName, accountID) +} + +// GetIdentityProvider mocks base method. +func (m *MockManager) GetIdentityProvider(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIdentityProvider", ctx, accountID, idpID, userID) + ret0, _ := ret[0].(*types.IdentityProvider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIdentityProvider indicates an expected call of GetIdentityProvider. +func (mr *MockManagerMockRecorder) GetIdentityProvider(ctx, accountID, idpID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIdentityProvider", reflect.TypeOf((*MockManager)(nil).GetIdentityProvider), ctx, accountID, idpID, userID) +} + +// GetIdentityProviders mocks base method. +func (m *MockManager) GetIdentityProviders(ctx context.Context, accountID, userID string) ([]*types.IdentityProvider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIdentityProviders", ctx, accountID, userID) + ret0, _ := ret[0].([]*types.IdentityProvider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIdentityProviders indicates an expected call of GetIdentityProviders. +func (mr *MockManagerMockRecorder) GetIdentityProviders(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIdentityProviders", reflect.TypeOf((*MockManager)(nil).GetIdentityProviders), ctx, accountID, userID) +} + +// GetIdpManager mocks base method. +func (m *MockManager) GetIdpManager() idp.Manager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIdpManager") + ret0, _ := ret[0].(idp.Manager) + return ret0 +} + +// GetIdpManager indicates an expected call of GetIdpManager. +func (mr *MockManagerMockRecorder) GetIdpManager() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIdpManager", reflect.TypeOf((*MockManager)(nil).GetIdpManager)) +} + +// GetNameServerGroup mocks base method. +func (m *MockManager) GetNameServerGroup(ctx context.Context, accountID, userID, nsGroupID string) (*dns.NameServerGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNameServerGroup", ctx, accountID, userID, nsGroupID) + ret0, _ := ret[0].(*dns.NameServerGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNameServerGroup indicates an expected call of GetNameServerGroup. +func (mr *MockManagerMockRecorder) GetNameServerGroup(ctx, accountID, userID, nsGroupID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNameServerGroup", reflect.TypeOf((*MockManager)(nil).GetNameServerGroup), ctx, accountID, userID, nsGroupID) +} + +// GetNetworkMap mocks base method. +func (m *MockManager) GetNetworkMap(ctx context.Context, peerID string) (*types.NetworkMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkMap", ctx, peerID) + ret0, _ := ret[0].(*types.NetworkMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkMap indicates an expected call of GetNetworkMap. +func (mr *MockManagerMockRecorder) GetNetworkMap(ctx, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkMap", reflect.TypeOf((*MockManager)(nil).GetNetworkMap), ctx, peerID) +} + +// GetOrCreateAccountByPrivateDomain mocks base method. +func (m *MockManager) GetOrCreateAccountByPrivateDomain(ctx context.Context, initiatorId, domain string) (*types.Account, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOrCreateAccountByPrivateDomain", ctx, initiatorId, domain) + ret0, _ := ret[0].(*types.Account) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetOrCreateAccountByPrivateDomain indicates an expected call of GetOrCreateAccountByPrivateDomain. +func (mr *MockManagerMockRecorder) GetOrCreateAccountByPrivateDomain(ctx, initiatorId, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrCreateAccountByPrivateDomain", reflect.TypeOf((*MockManager)(nil).GetOrCreateAccountByPrivateDomain), ctx, initiatorId, domain) +} + +// GetOrCreateAccountByUser mocks base method. +func (m *MockManager) GetOrCreateAccountByUser(ctx context.Context, userAuth auth.UserAuth) (*types.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOrCreateAccountByUser", ctx, userAuth) + ret0, _ := ret[0].(*types.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetOrCreateAccountByUser indicates an expected call of GetOrCreateAccountByUser. +func (mr *MockManagerMockRecorder) GetOrCreateAccountByUser(ctx, userAuth interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrCreateAccountByUser", reflect.TypeOf((*MockManager)(nil).GetOrCreateAccountByUser), ctx, userAuth) +} + +// GetOwnerInfo mocks base method. +func (m *MockManager) GetOwnerInfo(ctx context.Context, accountId string) (*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOwnerInfo", ctx, accountId) + ret0, _ := ret[0].(*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetOwnerInfo indicates an expected call of GetOwnerInfo. +func (mr *MockManagerMockRecorder) GetOwnerInfo(ctx, accountId interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOwnerInfo", reflect.TypeOf((*MockManager)(nil).GetOwnerInfo), ctx, accountId) +} + +// GetPAT mocks base method. +func (m *MockManager) GetPAT(ctx context.Context, accountID, initiatorUserID, targetUserID, tokenID string) (*types.PersonalAccessToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPAT", ctx, accountID, initiatorUserID, targetUserID, tokenID) + ret0, _ := ret[0].(*types.PersonalAccessToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPAT indicates an expected call of GetPAT. +func (mr *MockManagerMockRecorder) GetPAT(ctx, accountID, initiatorUserID, targetUserID, tokenID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPAT", reflect.TypeOf((*MockManager)(nil).GetPAT), ctx, accountID, initiatorUserID, targetUserID, tokenID) +} + +// GetPeer mocks base method. +func (m *MockManager) GetPeer(ctx context.Context, accountID, peerID, userID string) (*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeer", ctx, accountID, peerID, userID) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeer indicates an expected call of GetPeer. +func (mr *MockManagerMockRecorder) GetPeer(ctx, accountID, peerID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeer", reflect.TypeOf((*MockManager)(nil).GetPeer), ctx, accountID, peerID, userID) +} + +// GetPeerGroups mocks base method. +func (m *MockManager) GetPeerGroups(ctx context.Context, accountID, peerID string) ([]*types.Group, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerGroups", ctx, accountID, peerID) + ret0, _ := ret[0].([]*types.Group) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerGroups indicates an expected call of GetPeerGroups. +func (mr *MockManagerMockRecorder) GetPeerGroups(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerGroups", reflect.TypeOf((*MockManager)(nil).GetPeerGroups), ctx, accountID, peerID) +} + +// GetPeerJobByID mocks base method. +func (m *MockManager) GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerJobByID", ctx, accountID, userID, peerID, jobID) + ret0, _ := ret[0].(*types.Job) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerJobByID indicates an expected call of GetPeerJobByID. +func (mr *MockManagerMockRecorder) GetPeerJobByID(ctx, accountID, userID, peerID, jobID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerJobByID", reflect.TypeOf((*MockManager)(nil).GetPeerJobByID), ctx, accountID, userID, peerID, jobID) +} + +// GetPeerNetwork mocks base method. +func (m *MockManager) GetPeerNetwork(ctx context.Context, peerID string) (*types.Network, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerNetwork", ctx, peerID) + ret0, _ := ret[0].(*types.Network) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerNetwork indicates an expected call of GetPeerNetwork. +func (mr *MockManagerMockRecorder) GetPeerNetwork(ctx, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerNetwork", reflect.TypeOf((*MockManager)(nil).GetPeerNetwork), ctx, peerID) +} + +// GetPeers mocks base method. +func (m *MockManager) GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeers", ctx, accountID, userID, nameFilter, ipFilter) + ret0, _ := ret[0].([]*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeers indicates an expected call of GetPeers. +func (mr *MockManagerMockRecorder) GetPeers(ctx, accountID, userID, nameFilter, ipFilter interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeers", reflect.TypeOf((*MockManager)(nil).GetPeers), ctx, accountID, userID, nameFilter, ipFilter) +} + +// GetPolicy mocks base method. +func (m *MockManager) GetPolicy(ctx context.Context, accountID, policyID, userID string) (*types.Policy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPolicy", ctx, accountID, policyID, userID) + ret0, _ := ret[0].(*types.Policy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPolicy indicates an expected call of GetPolicy. +func (mr *MockManagerMockRecorder) GetPolicy(ctx, accountID, policyID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicy", reflect.TypeOf((*MockManager)(nil).GetPolicy), ctx, accountID, policyID, userID) +} + +// GetPostureChecks mocks base method. +func (m *MockManager) GetPostureChecks(ctx context.Context, accountID, postureChecksID, userID string) (*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPostureChecks", ctx, accountID, postureChecksID, userID) + ret0, _ := ret[0].(*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPostureChecks indicates an expected call of GetPostureChecks. +func (mr *MockManagerMockRecorder) GetPostureChecks(ctx, accountID, postureChecksID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPostureChecks", reflect.TypeOf((*MockManager)(nil).GetPostureChecks), ctx, accountID, postureChecksID, userID) +} + +// GetRoute mocks base method. +func (m *MockManager) GetRoute(ctx context.Context, accountID string, routeID route.ID, userID string) (*route.Route, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRoute", ctx, accountID, routeID, userID) + ret0, _ := ret[0].(*route.Route) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRoute indicates an expected call of GetRoute. +func (mr *MockManagerMockRecorder) GetRoute(ctx, accountID, routeID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoute", reflect.TypeOf((*MockManager)(nil).GetRoute), ctx, accountID, routeID, userID) +} + +// GetSetupKey mocks base method. +func (m *MockManager) GetSetupKey(ctx context.Context, accountID, userID, keyID string) (*types.SetupKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSetupKey", ctx, accountID, userID, keyID) + ret0, _ := ret[0].(*types.SetupKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSetupKey indicates an expected call of GetSetupKey. +func (mr *MockManagerMockRecorder) GetSetupKey(ctx, accountID, userID, keyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSetupKey", reflect.TypeOf((*MockManager)(nil).GetSetupKey), ctx, accountID, userID, keyID) +} + +// GetStore mocks base method. +func (m *MockManager) GetStore() store.Store { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStore") + ret0, _ := ret[0].(store.Store) + return ret0 +} + +// GetStore indicates an expected call of GetStore. +func (mr *MockManagerMockRecorder) GetStore() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStore", reflect.TypeOf((*MockManager)(nil).GetStore)) +} + +// GetUserByID mocks base method. +func (m *MockManager) GetUserByID(ctx context.Context, id string) (*types.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserByID", ctx, id) + ret0, _ := ret[0].(*types.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserByID indicates an expected call of GetUserByID. +func (mr *MockManagerMockRecorder) GetUserByID(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserByID", reflect.TypeOf((*MockManager)(nil).GetUserByID), ctx, id) +} + +// GetUserFromUserAuth mocks base method. +func (m *MockManager) GetUserFromUserAuth(ctx context.Context, userAuth auth.UserAuth) (*types.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserFromUserAuth", ctx, userAuth) + ret0, _ := ret[0].(*types.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserFromUserAuth indicates an expected call of GetUserFromUserAuth. +func (mr *MockManagerMockRecorder) GetUserFromUserAuth(ctx, userAuth interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserFromUserAuth", reflect.TypeOf((*MockManager)(nil).GetUserFromUserAuth), ctx, userAuth) +} + +// GetUserIDByPeerKey mocks base method. +func (m *MockManager) GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserIDByPeerKey", ctx, peerKey) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserIDByPeerKey indicates an expected call of GetUserIDByPeerKey. +func (mr *MockManagerMockRecorder) GetUserIDByPeerKey(ctx, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserIDByPeerKey", reflect.TypeOf((*MockManager)(nil).GetUserIDByPeerKey), ctx, peerKey) +} + +// GetUserInviteInfo mocks base method. +func (m *MockManager) GetUserInviteInfo(ctx context.Context, token string) (*types.UserInviteInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserInviteInfo", ctx, token) + ret0, _ := ret[0].(*types.UserInviteInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserInviteInfo indicates an expected call of GetUserInviteInfo. +func (mr *MockManagerMockRecorder) GetUserInviteInfo(ctx, token interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserInviteInfo", reflect.TypeOf((*MockManager)(nil).GetUserInviteInfo), ctx, token) +} + +// GetUsersFromAccount mocks base method. +func (m *MockManager) GetUsersFromAccount(ctx context.Context, accountID, userID string) (map[string]*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUsersFromAccount", ctx, accountID, userID) + ret0, _ := ret[0].(map[string]*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUsersFromAccount indicates an expected call of GetUsersFromAccount. +func (mr *MockManagerMockRecorder) GetUsersFromAccount(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUsersFromAccount", reflect.TypeOf((*MockManager)(nil).GetUsersFromAccount), ctx, accountID, userID) +} + +// GetValidatedPeers mocks base method. +func (m *MockManager) GetValidatedPeers(ctx context.Context, accountID string) (map[string]struct{}, map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidatedPeers", ctx, accountID) + ret0, _ := ret[0].(map[string]struct{}) + ret1, _ := ret[1].(map[string]string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetValidatedPeers indicates an expected call of GetValidatedPeers. +func (mr *MockManagerMockRecorder) GetValidatedPeers(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatedPeers", reflect.TypeOf((*MockManager)(nil).GetValidatedPeers), ctx, accountID) +} + +// GroupAddPeer mocks base method. +func (m *MockManager) GroupAddPeer(ctx context.Context, accountId, groupID, peerID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GroupAddPeer", ctx, accountId, groupID, peerID) + ret0, _ := ret[0].(error) + return ret0 +} + +// GroupAddPeer indicates an expected call of GroupAddPeer. +func (mr *MockManagerMockRecorder) GroupAddPeer(ctx, accountId, groupID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupAddPeer", reflect.TypeOf((*MockManager)(nil).GroupAddPeer), ctx, accountId, groupID, peerID) +} + +// GroupDeletePeer mocks base method. +func (m *MockManager) GroupDeletePeer(ctx context.Context, accountId, groupID, peerID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GroupDeletePeer", ctx, accountId, groupID, peerID) + ret0, _ := ret[0].(error) + return ret0 +} + +// GroupDeletePeer indicates an expected call of GroupDeletePeer. +func (mr *MockManagerMockRecorder) GroupDeletePeer(ctx, accountId, groupID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupDeletePeer", reflect.TypeOf((*MockManager)(nil).GroupDeletePeer), ctx, accountId, groupID, peerID) +} + +// GroupValidation mocks base method. +func (m *MockManager) GroupValidation(ctx context.Context, accountId string, groups []string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GroupValidation", ctx, accountId, groups) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GroupValidation indicates an expected call of GroupValidation. +func (mr *MockManagerMockRecorder) GroupValidation(ctx, accountId, groups interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupValidation", reflect.TypeOf((*MockManager)(nil).GroupValidation), ctx, accountId, groups) +} + +// InviteUser mocks base method. +func (m *MockManager) InviteUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InviteUser", ctx, accountID, initiatorUserID, targetUserID) + ret0, _ := ret[0].(error) + return ret0 +} + +// InviteUser indicates an expected call of InviteUser. +func (mr *MockManagerMockRecorder) InviteUser(ctx, accountID, initiatorUserID, targetUserID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InviteUser", reflect.TypeOf((*MockManager)(nil).InviteUser), ctx, accountID, initiatorUserID, targetUserID) +} + +// ListNameServerGroups mocks base method. +func (m *MockManager) ListNameServerGroups(ctx context.Context, accountID, userID string) ([]*dns.NameServerGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNameServerGroups", ctx, accountID, userID) + ret0, _ := ret[0].([]*dns.NameServerGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNameServerGroups indicates an expected call of ListNameServerGroups. +func (mr *MockManagerMockRecorder) ListNameServerGroups(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNameServerGroups", reflect.TypeOf((*MockManager)(nil).ListNameServerGroups), ctx, accountID, userID) +} + +// ListPolicies mocks base method. +func (m *MockManager) ListPolicies(ctx context.Context, accountID, userID string) ([]*types.Policy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPolicies", ctx, accountID, userID) + ret0, _ := ret[0].([]*types.Policy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPolicies indicates an expected call of ListPolicies. +func (mr *MockManagerMockRecorder) ListPolicies(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicies", reflect.TypeOf((*MockManager)(nil).ListPolicies), ctx, accountID, userID) +} + +// ListPostureChecks mocks base method. +func (m *MockManager) ListPostureChecks(ctx context.Context, accountID, userID string) ([]*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPostureChecks", ctx, accountID, userID) + ret0, _ := ret[0].([]*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPostureChecks indicates an expected call of ListPostureChecks. +func (mr *MockManagerMockRecorder) ListPostureChecks(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPostureChecks", reflect.TypeOf((*MockManager)(nil).ListPostureChecks), ctx, accountID, userID) +} + +// ListRoutes mocks base method. +func (m *MockManager) ListRoutes(ctx context.Context, accountID, userID string) ([]*route.Route, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListRoutes", ctx, accountID, userID) + ret0, _ := ret[0].([]*route.Route) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListRoutes indicates an expected call of ListRoutes. +func (mr *MockManagerMockRecorder) ListRoutes(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRoutes", reflect.TypeOf((*MockManager)(nil).ListRoutes), ctx, accountID, userID) +} + +// ListSetupKeys mocks base method. +func (m *MockManager) ListSetupKeys(ctx context.Context, accountID, userID string) ([]*types.SetupKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSetupKeys", ctx, accountID, userID) + ret0, _ := ret[0].([]*types.SetupKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSetupKeys indicates an expected call of ListSetupKeys. +func (mr *MockManagerMockRecorder) ListSetupKeys(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSetupKeys", reflect.TypeOf((*MockManager)(nil).ListSetupKeys), ctx, accountID, userID) +} + +// ListUserInvites mocks base method. +func (m *MockManager) ListUserInvites(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListUserInvites", ctx, accountID, initiatorUserID) + ret0, _ := ret[0].([]*types.UserInvite) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListUserInvites indicates an expected call of ListUserInvites. +func (mr *MockManagerMockRecorder) ListUserInvites(ctx, accountID, initiatorUserID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserInvites", reflect.TypeOf((*MockManager)(nil).ListUserInvites), ctx, accountID, initiatorUserID) +} + +// ListUsers mocks base method. +func (m *MockManager) ListUsers(ctx context.Context, accountID string) ([]*types.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListUsers", ctx, accountID) + ret0, _ := ret[0].([]*types.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListUsers indicates an expected call of ListUsers. +func (mr *MockManagerMockRecorder) ListUsers(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockManager)(nil).ListUsers), ctx, accountID) +} + +// LoginPeer mocks base method. +func (m *MockManager) LoginPeer(ctx context.Context, login types.PeerLogin) (*peer.Peer, *types.NetworkMap, []*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoginPeer", ctx, login) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(*types.NetworkMap) + ret2, _ := ret[2].([]*posture.Checks) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// LoginPeer indicates an expected call of LoginPeer. +func (mr *MockManagerMockRecorder) LoginPeer(ctx, login interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoginPeer", reflect.TypeOf((*MockManager)(nil).LoginPeer), ctx, login) +} + +// MarkPeerConnected mocks base method. +func (m *MockManager) MarkPeerConnected(ctx context.Context, peerKey string, connected bool, realIP net.IP, accountID string, syncTime time.Time) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkPeerConnected", ctx, peerKey, connected, realIP, accountID, syncTime) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkPeerConnected indicates an expected call of MarkPeerConnected. +func (mr *MockManagerMockRecorder) MarkPeerConnected(ctx, peerKey, connected, realIP, accountID, syncTime interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkPeerConnected", reflect.TypeOf((*MockManager)(nil).MarkPeerConnected), ctx, peerKey, connected, realIP, accountID, syncTime) +} + +// OnPeerDisconnected mocks base method. +func (m *MockManager) OnPeerDisconnected(ctx context.Context, accountID, peerPubKey string, streamStartTime time.Time) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnPeerDisconnected", ctx, accountID, peerPubKey, streamStartTime) + ret0, _ := ret[0].(error) + return ret0 +} + +// OnPeerDisconnected indicates an expected call of OnPeerDisconnected. +func (mr *MockManagerMockRecorder) OnPeerDisconnected(ctx, accountID, peerPubKey, streamStartTime interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnPeerDisconnected", reflect.TypeOf((*MockManager)(nil).OnPeerDisconnected), ctx, accountID, peerPubKey, streamStartTime) +} + +// RegenerateUserInvite mocks base method. +func (m *MockManager) RegenerateUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegenerateUserInvite", ctx, accountID, initiatorUserID, inviteID, expiresIn) + ret0, _ := ret[0].(*types.UserInvite) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegenerateUserInvite indicates an expected call of RegenerateUserInvite. +func (mr *MockManagerMockRecorder) RegenerateUserInvite(ctx, accountID, initiatorUserID, inviteID, expiresIn interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegenerateUserInvite", reflect.TypeOf((*MockManager)(nil).RegenerateUserInvite), ctx, accountID, initiatorUserID, inviteID, expiresIn) +} + +// RejectUser mocks base method. +func (m *MockManager) RejectUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RejectUser", ctx, accountID, initiatorUserID, targetUserID) + ret0, _ := ret[0].(error) + return ret0 +} + +// RejectUser indicates an expected call of RejectUser. +func (mr *MockManagerMockRecorder) RejectUser(ctx, accountID, initiatorUserID, targetUserID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RejectUser", reflect.TypeOf((*MockManager)(nil).RejectUser), ctx, accountID, initiatorUserID, targetUserID) +} + +// SaveDNSSettings mocks base method. +func (m *MockManager) SaveDNSSettings(ctx context.Context, accountID, userID string, dnsSettingsToSave *types.DNSSettings) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveDNSSettings", ctx, accountID, userID, dnsSettingsToSave) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveDNSSettings indicates an expected call of SaveDNSSettings. +func (mr *MockManagerMockRecorder) SaveDNSSettings(ctx, accountID, userID, dnsSettingsToSave interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveDNSSettings", reflect.TypeOf((*MockManager)(nil).SaveDNSSettings), ctx, accountID, userID, dnsSettingsToSave) +} + +// SaveNameServerGroup mocks base method. +func (m *MockManager) SaveNameServerGroup(ctx context.Context, accountID, userID string, nsGroupToSave *dns.NameServerGroup) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveNameServerGroup", ctx, accountID, userID, nsGroupToSave) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveNameServerGroup indicates an expected call of SaveNameServerGroup. +func (mr *MockManagerMockRecorder) SaveNameServerGroup(ctx, accountID, userID, nsGroupToSave interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNameServerGroup", reflect.TypeOf((*MockManager)(nil).SaveNameServerGroup), ctx, accountID, userID, nsGroupToSave) +} + +// SaveOrAddUser mocks base method. +func (m *MockManager) SaveOrAddUser(ctx context.Context, accountID, initiatorUserID string, update *types.User, addIfNotExists bool) (*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveOrAddUser", ctx, accountID, initiatorUserID, update, addIfNotExists) + ret0, _ := ret[0].(*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SaveOrAddUser indicates an expected call of SaveOrAddUser. +func (mr *MockManagerMockRecorder) SaveOrAddUser(ctx, accountID, initiatorUserID, update, addIfNotExists interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveOrAddUser", reflect.TypeOf((*MockManager)(nil).SaveOrAddUser), ctx, accountID, initiatorUserID, update, addIfNotExists) +} + +// SaveOrAddUsers mocks base method. +func (m *MockManager) SaveOrAddUsers(ctx context.Context, accountID, initiatorUserID string, updates []*types.User, addIfNotExists bool) ([]*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveOrAddUsers", ctx, accountID, initiatorUserID, updates, addIfNotExists) + ret0, _ := ret[0].([]*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SaveOrAddUsers indicates an expected call of SaveOrAddUsers. +func (mr *MockManagerMockRecorder) SaveOrAddUsers(ctx, accountID, initiatorUserID, updates, addIfNotExists interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveOrAddUsers", reflect.TypeOf((*MockManager)(nil).SaveOrAddUsers), ctx, accountID, initiatorUserID, updates, addIfNotExists) +} + +// SavePolicy mocks base method. +func (m *MockManager) SavePolicy(ctx context.Context, accountID, userID string, policy *types.Policy, create bool) (*types.Policy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePolicy", ctx, accountID, userID, policy, create) + ret0, _ := ret[0].(*types.Policy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SavePolicy indicates an expected call of SavePolicy. +func (mr *MockManagerMockRecorder) SavePolicy(ctx, accountID, userID, policy, create interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePolicy", reflect.TypeOf((*MockManager)(nil).SavePolicy), ctx, accountID, userID, policy, create) +} + +// SavePostureChecks mocks base method. +func (m *MockManager) SavePostureChecks(ctx context.Context, accountID, userID string, postureChecks *posture.Checks, create bool) (*posture.Checks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SavePostureChecks", ctx, accountID, userID, postureChecks, create) + ret0, _ := ret[0].(*posture.Checks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SavePostureChecks indicates an expected call of SavePostureChecks. +func (mr *MockManagerMockRecorder) SavePostureChecks(ctx, accountID, userID, postureChecks, create interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePostureChecks", reflect.TypeOf((*MockManager)(nil).SavePostureChecks), ctx, accountID, userID, postureChecks, create) +} + +// SaveRoute mocks base method. +func (m *MockManager) SaveRoute(ctx context.Context, accountID, userID string, route *route.Route) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveRoute", ctx, accountID, userID, route) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveRoute indicates an expected call of SaveRoute. +func (mr *MockManagerMockRecorder) SaveRoute(ctx, accountID, userID, route interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveRoute", reflect.TypeOf((*MockManager)(nil).SaveRoute), ctx, accountID, userID, route) +} + +// SaveSetupKey mocks base method. +func (m *MockManager) SaveSetupKey(ctx context.Context, accountID string, key *types.SetupKey, userID string) (*types.SetupKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveSetupKey", ctx, accountID, key, userID) + ret0, _ := ret[0].(*types.SetupKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SaveSetupKey indicates an expected call of SaveSetupKey. +func (mr *MockManagerMockRecorder) SaveSetupKey(ctx, accountID, key, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSetupKey", reflect.TypeOf((*MockManager)(nil).SaveSetupKey), ctx, accountID, key, userID) +} + +// SaveUser mocks base method. +func (m *MockManager) SaveUser(ctx context.Context, accountID, initiatorUserID string, update *types.User) (*types.UserInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveUser", ctx, accountID, initiatorUserID, update) + ret0, _ := ret[0].(*types.UserInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SaveUser indicates an expected call of SaveUser. +func (mr *MockManagerMockRecorder) SaveUser(ctx, accountID, initiatorUserID, update interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveUser", reflect.TypeOf((*MockManager)(nil).SaveUser), ctx, accountID, initiatorUserID, update) +} + +// SetServiceManager mocks base method. +func (m *MockManager) SetServiceManager(serviceManager reverseproxy.Manager) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetServiceManager", serviceManager) +} + +// SetServiceManager indicates an expected call of SetServiceManager. +func (mr *MockManagerMockRecorder) SetServiceManager(serviceManager interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetServiceManager", reflect.TypeOf((*MockManager)(nil).SetServiceManager), serviceManager) +} + +// StoreEvent mocks base method. +func (m *MockManager) StoreEvent(ctx context.Context, initiatorID, targetID, accountID string, activityID activity.ActivityDescriber, meta map[string]any) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StoreEvent", ctx, initiatorID, targetID, accountID, activityID, meta) +} + +// StoreEvent indicates an expected call of StoreEvent. +func (mr *MockManagerMockRecorder) StoreEvent(ctx, initiatorID, targetID, accountID, activityID, meta interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreEvent", reflect.TypeOf((*MockManager)(nil).StoreEvent), ctx, initiatorID, targetID, accountID, activityID, meta) +} + +// SyncAndMarkPeer mocks base method. +func (m *MockManager) SyncAndMarkPeer(ctx context.Context, accountID, peerPubKey string, meta peer.PeerSystemMeta, realIP net.IP, syncTime time.Time) (*peer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncAndMarkPeer", ctx, accountID, peerPubKey, meta, realIP, syncTime) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(*types.NetworkMap) + ret2, _ := ret[2].([]*posture.Checks) + ret3, _ := ret[3].(int64) + ret4, _ := ret[4].(error) + return ret0, ret1, ret2, ret3, ret4 +} + +// SyncAndMarkPeer indicates an expected call of SyncAndMarkPeer. +func (mr *MockManagerMockRecorder) SyncAndMarkPeer(ctx, accountID, peerPubKey, meta, realIP, syncTime interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncAndMarkPeer", reflect.TypeOf((*MockManager)(nil).SyncAndMarkPeer), ctx, accountID, peerPubKey, meta, realIP, syncTime) +} + +// SyncPeer mocks base method. +func (m *MockManager) SyncPeer(ctx context.Context, sync types.PeerSync, accountID string) (*peer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncPeer", ctx, sync, accountID) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(*types.NetworkMap) + ret2, _ := ret[2].([]*posture.Checks) + ret3, _ := ret[3].(int64) + ret4, _ := ret[4].(error) + return ret0, ret1, ret2, ret3, ret4 +} + +// SyncPeer indicates an expected call of SyncPeer. +func (mr *MockManagerMockRecorder) SyncPeer(ctx, sync, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPeer", reflect.TypeOf((*MockManager)(nil).SyncPeer), ctx, sync, accountID) +} + +// SyncPeerMeta mocks base method. +func (m *MockManager) SyncPeerMeta(ctx context.Context, peerPubKey string, meta peer.PeerSystemMeta) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncPeerMeta", ctx, peerPubKey, meta) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncPeerMeta indicates an expected call of SyncPeerMeta. +func (mr *MockManagerMockRecorder) SyncPeerMeta(ctx, peerPubKey, meta interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPeerMeta", reflect.TypeOf((*MockManager)(nil).SyncPeerMeta), ctx, peerPubKey, meta) +} + +// SyncUserJWTGroups mocks base method. +func (m *MockManager) SyncUserJWTGroups(ctx context.Context, userAuth auth.UserAuth) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncUserJWTGroups", ctx, userAuth) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncUserJWTGroups indicates an expected call of SyncUserJWTGroups. +func (mr *MockManagerMockRecorder) SyncUserJWTGroups(ctx, userAuth interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUserJWTGroups", reflect.TypeOf((*MockManager)(nil).SyncUserJWTGroups), ctx, userAuth) +} + +// UpdateAccountOnboarding mocks base method. +func (m *MockManager) UpdateAccountOnboarding(ctx context.Context, accountID, userID string, newOnboarding *types.AccountOnboarding) (*types.AccountOnboarding, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAccountOnboarding", ctx, accountID, userID, newOnboarding) + ret0, _ := ret[0].(*types.AccountOnboarding) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAccountOnboarding indicates an expected call of UpdateAccountOnboarding. +func (mr *MockManagerMockRecorder) UpdateAccountOnboarding(ctx, accountID, userID, newOnboarding interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountOnboarding", reflect.TypeOf((*MockManager)(nil).UpdateAccountOnboarding), ctx, accountID, userID, newOnboarding) +} + +// UpdateAccountPeers mocks base method. +func (m *MockManager) UpdateAccountPeers(ctx context.Context, accountID string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateAccountPeers", ctx, accountID) +} + +// UpdateAccountPeers indicates an expected call of UpdateAccountPeers. +func (mr *MockManagerMockRecorder) UpdateAccountPeers(ctx, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPeers", reflect.TypeOf((*MockManager)(nil).UpdateAccountPeers), ctx, accountID) +} + +// UpdateAccountSettings mocks base method. +func (m *MockManager) UpdateAccountSettings(ctx context.Context, accountID, userID string, newSettings *types.Settings) (*types.Settings, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAccountSettings", ctx, accountID, userID, newSettings) + ret0, _ := ret[0].(*types.Settings) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAccountSettings indicates an expected call of UpdateAccountSettings. +func (mr *MockManagerMockRecorder) UpdateAccountSettings(ctx, accountID, userID, newSettings interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountSettings", reflect.TypeOf((*MockManager)(nil).UpdateAccountSettings), ctx, accountID, userID, newSettings) +} + +// UpdateGroup mocks base method. +func (m *MockManager) UpdateGroup(ctx context.Context, accountID, userID string, group *types.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateGroup", ctx, accountID, userID, group) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateGroup indicates an expected call of UpdateGroup. +func (mr *MockManagerMockRecorder) UpdateGroup(ctx, accountID, userID, group interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroup", reflect.TypeOf((*MockManager)(nil).UpdateGroup), ctx, accountID, userID, group) +} + +// UpdateGroups mocks base method. +func (m *MockManager) UpdateGroups(ctx context.Context, accountID, userID string, newGroups []*types.Group) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateGroups", ctx, accountID, userID, newGroups) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateGroups indicates an expected call of UpdateGroups. +func (mr *MockManagerMockRecorder) UpdateGroups(ctx, accountID, userID, newGroups interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroups", reflect.TypeOf((*MockManager)(nil).UpdateGroups), ctx, accountID, userID, newGroups) +} + +// UpdateIdentityProvider mocks base method. +func (m *MockManager) UpdateIdentityProvider(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateIdentityProvider", ctx, accountID, idpID, userID, idp) + ret0, _ := ret[0].(*types.IdentityProvider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateIdentityProvider indicates an expected call of UpdateIdentityProvider. +func (mr *MockManagerMockRecorder) UpdateIdentityProvider(ctx, accountID, idpID, userID, idp interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateIdentityProvider", reflect.TypeOf((*MockManager)(nil).UpdateIdentityProvider), ctx, accountID, idpID, userID, idp) +} + +// UpdateIntegratedValidator mocks base method. +func (m *MockManager) UpdateIntegratedValidator(ctx context.Context, accountID, userID, validator string, groups []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateIntegratedValidator", ctx, accountID, userID, validator, groups) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateIntegratedValidator indicates an expected call of UpdateIntegratedValidator. +func (mr *MockManagerMockRecorder) UpdateIntegratedValidator(ctx, accountID, userID, validator, groups interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateIntegratedValidator", reflect.TypeOf((*MockManager)(nil).UpdateIntegratedValidator), ctx, accountID, userID, validator, groups) +} + +// UpdatePeer mocks base method. +func (m *MockManager) UpdatePeer(ctx context.Context, accountID, userID string, p *peer.Peer) (*peer.Peer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePeer", ctx, accountID, userID, p) + ret0, _ := ret[0].(*peer.Peer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdatePeer indicates an expected call of UpdatePeer. +func (mr *MockManagerMockRecorder) UpdatePeer(ctx, accountID, userID, p interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePeer", reflect.TypeOf((*MockManager)(nil).UpdatePeer), ctx, accountID, userID, p) +} + +// UpdatePeerIP mocks base method. +func (m *MockManager) UpdatePeerIP(ctx context.Context, accountID, userID, peerID string, newIP netip.Addr) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePeerIP", ctx, accountID, userID, peerID, newIP) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdatePeerIP indicates an expected call of UpdatePeerIP. +func (mr *MockManagerMockRecorder) UpdatePeerIP(ctx, accountID, userID, peerID, newIP interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePeerIP", reflect.TypeOf((*MockManager)(nil).UpdatePeerIP), ctx, accountID, userID, peerID, newIP) +} + +// UpdateToPrimaryAccount mocks base method. +func (m *MockManager) UpdateToPrimaryAccount(ctx context.Context, accountId string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateToPrimaryAccount", ctx, accountId) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateToPrimaryAccount indicates an expected call of UpdateToPrimaryAccount. +func (mr *MockManagerMockRecorder) UpdateToPrimaryAccount(ctx, accountId interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateToPrimaryAccount", reflect.TypeOf((*MockManager)(nil).UpdateToPrimaryAccount), ctx, accountId) +} + +// UpdateUserPassword mocks base method. +func (m *MockManager) UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID, oldPassword, newPassword string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateUserPassword", ctx, accountID, currentUserID, targetUserID, oldPassword, newPassword) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateUserPassword indicates an expected call of UpdateUserPassword. +func (mr *MockManagerMockRecorder) UpdateUserPassword(ctx, accountID, currentUserID, targetUserID, oldPassword, newPassword interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUserPassword", reflect.TypeOf((*MockManager)(nil).UpdateUserPassword), ctx, accountID, currentUserID, targetUserID, oldPassword, newPassword) +} diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 92524e49a..89fe22cec 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -4895,6 +4895,46 @@ func (s *SqlStore) DeleteService(ctx context.Context, accountID, serviceID strin return nil } +func (s *SqlStore) DeleteTarget(ctx context.Context, accountID string, serviceID string, targetID uint) error { + result := s.db.Delete(&reverseproxy.Target{}, "account_id = ? AND service_id = ? AND id = ?", accountID, serviceID, targetID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete target from store: %v", result.Error) + return status.Errorf(status.Internal, "failed to delete target from store") + } + + if result.RowsAffected == 0 { + return status.Errorf(status.NotFound, "target not found for service %s", serviceID) + } + + return nil +} + +func (s *SqlStore) DeleteServiceTargets(ctx context.Context, accountID string, serviceID string) error { + result := s.db.Delete(&reverseproxy.Target{}, "account_id = ? AND service_id = ?", accountID, serviceID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete targets from store: %v", result.Error) + return status.Errorf(status.Internal, "failed to delete targets from store") + } + + return nil +} + +// GetTargetsByServiceID retrieves all targets for a given service +func (s *SqlStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*reverseproxy.Target, error) { + var targets []*reverseproxy.Target + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + result := tx.Where("account_id = ? AND service_id = ?", accountID, serviceID).Find(&targets) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get targets from store: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get targets from store") + } + + return targets, nil +} + func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) { tx := s.db.Preload("Targets") if lockStrength != LockingStrengthNone { diff --git a/management/server/store/store.go b/management/server/store/store.go index d5de63c03..9e982f70b 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -272,6 +272,9 @@ type Store interface { GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) + GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*reverseproxy.Target, error) + DeleteTarget(ctx context.Context, accountID string, serviceID string, targetID uint) error + DeleteServiceTargets(ctx context.Context, accountID string, serviceID string) error // GetCustomDomainsCounts returns the total and validated custom domain counts. GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index d3de457e2..682ecc4d8 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -559,6 +559,20 @@ func (mr *MockStoreMockRecorder) DeleteService(ctx, accountID, serviceID interfa return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockStore)(nil).DeleteService), ctx, accountID, serviceID) } +// DeleteServiceTargets mocks base method. +func (m *MockStore) DeleteServiceTargets(ctx context.Context, accountID, serviceID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteServiceTargets", ctx, accountID, serviceID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteServiceTargets indicates an expected call of DeleteServiceTargets. +func (mr *MockStoreMockRecorder) DeleteServiceTargets(ctx, accountID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceTargets", reflect.TypeOf((*MockStore)(nil).DeleteServiceTargets), ctx, accountID, serviceID) +} + // DeleteSetupKey mocks base method. func (m *MockStore) DeleteSetupKey(ctx context.Context, accountID, keyID string) error { m.ctrl.T.Helper() @@ -573,6 +587,20 @@ func (mr *MockStoreMockRecorder) DeleteSetupKey(ctx, accountID, keyID interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSetupKey", reflect.TypeOf((*MockStore)(nil).DeleteSetupKey), ctx, accountID, keyID) } +// DeleteTarget mocks base method. +func (m *MockStore) DeleteTarget(ctx context.Context, accountID, serviceID string, targetID uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTarget", ctx, accountID, serviceID, targetID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTarget indicates an expected call of DeleteTarget. +func (mr *MockStoreMockRecorder) DeleteTarget(ctx, accountID, serviceID, targetID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTarget", reflect.TypeOf((*MockStore)(nil).DeleteTarget), ctx, accountID, serviceID, targetID) +} + // DeleteTokenID2UserIDIndex mocks base method. func (m *MockStore) DeleteTokenID2UserIDIndex(tokenID string) error { m.ctrl.T.Helper() @@ -1109,21 +1137,6 @@ func (mr *MockStoreMockRecorder) GetAccountServices(ctx, lockStrength, accountID return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountServices", reflect.TypeOf((*MockStore)(nil).GetAccountServices), ctx, lockStrength, accountID) } -// GetServicesByAccountID mocks base method. -func (m *MockStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetServicesByAccountID", ctx, lockStrength, accountID) - ret0, _ := ret[0].([]*reverseproxy.Service) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetServicesByAccountID indicates an expected call of GetServicesByAccountID. -func (mr *MockStoreMockRecorder) GetServicesByAccountID(ctx, lockStrength, accountID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicesByAccountID", reflect.TypeOf((*MockStore)(nil).GetServicesByAccountID), ctx, lockStrength, accountID) -} - // GetAccountSettings mocks base method. func (m *MockStore) GetAccountSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types2.Settings, error) { m.ctrl.T.Helper() @@ -1288,6 +1301,22 @@ func (mr *MockStoreMockRecorder) GetCustomDomain(ctx, accountID, domainID interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomain", reflect.TypeOf((*MockStore)(nil).GetCustomDomain), ctx, accountID, domainID) } +// GetCustomDomainsCounts mocks base method. +func (m *MockStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCustomDomainsCounts", ctx) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(int64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetCustomDomainsCounts indicates an expected call of GetCustomDomainsCounts. +func (mr *MockStoreMockRecorder) GetCustomDomainsCounts(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomainsCounts", reflect.TypeOf((*MockStore)(nil).GetCustomDomainsCounts), ctx) +} + // GetDNSRecordByID mocks base method. func (m *MockStore) GetDNSRecordByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, recordID string) (*records.Record, error) { m.ctrl.T.Helper() @@ -1872,22 +1901,6 @@ func (mr *MockStoreMockRecorder) GetServiceTargetByTargetID(ctx, lockStrength, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceTargetByTargetID", reflect.TypeOf((*MockStore)(nil).GetServiceTargetByTargetID), ctx, lockStrength, accountID, targetID) } -// GetCustomDomainsCounts mocks base method. -func (m *MockStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCustomDomainsCounts", ctx) - ret0, _ := ret[0].(int64) - ret1, _ := ret[1].(int64) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetCustomDomainsCounts indicates an expected call of GetCustomDomainsCounts. -func (mr *MockStoreMockRecorder) GetCustomDomainsCounts(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomainsCounts", reflect.TypeOf((*MockStore)(nil).GetCustomDomainsCounts), ctx) -} - // GetServices mocks base method. func (m *MockStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) { m.ctrl.T.Helper() @@ -1903,6 +1916,21 @@ func (mr *MockStoreMockRecorder) GetServices(ctx, lockStrength interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServices", reflect.TypeOf((*MockStore)(nil).GetServices), ctx, lockStrength) } +// GetServicesByAccountID mocks base method. +func (m *MockStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServicesByAccountID", ctx, lockStrength, accountID) + ret0, _ := ret[0].([]*reverseproxy.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServicesByAccountID indicates an expected call of GetServicesByAccountID. +func (mr *MockStoreMockRecorder) GetServicesByAccountID(ctx, lockStrength, accountID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicesByAccountID", reflect.TypeOf((*MockStore)(nil).GetServicesByAccountID), ctx, lockStrength, accountID) +} + // GetSetupKeyByID mocks base method. func (m *MockStore) GetSetupKeyByID(ctx context.Context, lockStrength LockingStrength, accountID, setupKeyID string) (*types2.SetupKey, error) { m.ctrl.T.Helper() @@ -1962,6 +1990,21 @@ func (mr *MockStoreMockRecorder) GetTakenIPs(ctx, lockStrength, accountId interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTakenIPs", reflect.TypeOf((*MockStore)(nil).GetTakenIPs), ctx, lockStrength, accountId) } +// GetTargetsByServiceID mocks base method. +func (m *MockStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) ([]*reverseproxy.Target, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTargetsByServiceID", ctx, lockStrength, accountID, serviceID) + ret0, _ := ret[0].([]*reverseproxy.Target) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTargetsByServiceID indicates an expected call of GetTargetsByServiceID. +func (mr *MockStoreMockRecorder) GetTargetsByServiceID(ctx, lockStrength, accountID, serviceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTargetsByServiceID", reflect.TypeOf((*MockStore)(nil).GetTargetsByServiceID), ctx, lockStrength, accountID, serviceID) +} + // GetTokenIDByHashedToken mocks base method. func (m *MockStore) GetTokenIDByHashedToken(ctx context.Context, secret string) (string, error) { m.ctrl.T.Helper() From 47133031e57074c62192481f465a00c9ef9436cd Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 3 Mar 2026 08:44:08 +0100 Subject: [PATCH 178/374] [client] fix: client/Dockerfile to reduce vulnerabilities (#5217) Co-authored-by: snyk-bot --- client/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/Dockerfile b/client/Dockerfile index 2ff0cca19..13e44096f 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -4,7 +4,7 @@ # sudo podman build -t localhost/netbird:latest -f client/Dockerfile --ignorefile .dockerignore-client . # sudo podman run --rm -it --cap-add={BPF,NET_ADMIN,NET_RAW} localhost/netbird:latest -FROM alpine:3.23.2 +FROM alpine:3.23.3 # iproute2: busybox doesn't display ip rules properly RUN apk add --no-cache \ bash \ From 403babd433cbf7feaee21be3cc4746cb12e887c2 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 3 Mar 2026 12:53:16 +0200 Subject: [PATCH 179/374] [self-hosted] specify sql file location of auth, activity and main store (#5487) --- combined/cmd/config.go | 10 ++++++- combined/cmd/root.go | 6 +++++ combined/cmd/token.go | 3 +++ combined/config.yaml.example | 3 +++ management/server/activity/store/sql_store.go | 10 ++++++- management/server/store/sql_store.go | 26 ++++++++++++++----- 6 files changed, 50 insertions(+), 8 deletions(-) diff --git a/combined/cmd/config.go b/combined/cmd/config.go index f52d38ccf..85664d0d2 100644 --- a/combined/cmd/config.go +++ b/combined/cmd/config.go @@ -7,6 +7,7 @@ import ( "net/netip" "os" "path" + "path/filepath" "strings" "time" @@ -172,7 +173,8 @@ type RelaysConfig struct { type StoreConfig struct { Engine string `yaml:"engine"` EncryptionKey string `yaml:"encryptionKey"` - DSN string `yaml:"dsn"` // Connection string for postgres or mysql engines + DSN string `yaml:"dsn"` // Connection string for postgres or mysql engines + File string `yaml:"file"` // SQLite database file path (optional, defaults to dataDir) } // ReverseProxyConfig contains reverse proxy settings @@ -568,6 +570,12 @@ func (c *CombinedConfig) buildEmbeddedIdPConfig(mgmt ManagementConfig) (*idp.Emb } } else { authStorageFile = path.Join(mgmt.DataDir, "idp.db") + if c.Server.AuthStore.File != "" { + authStorageFile = c.Server.AuthStore.File + if !filepath.IsAbs(authStorageFile) { + authStorageFile = filepath.Join(mgmt.DataDir, authStorageFile) + } + } } cfg := &idp.EmbeddedIdPConfig{ diff --git a/combined/cmd/root.go b/combined/cmd/root.go index 00edcb5d4..153260341 100644 --- a/combined/cmd/root.go +++ b/combined/cmd/root.go @@ -140,6 +140,9 @@ func initializeConfig() error { os.Setenv("NB_STORE_ENGINE_MYSQL_DSN", dsn) } } + if file := config.Server.Store.File; file != "" { + os.Setenv("NB_STORE_ENGINE_SQLITE_FILE", file) + } if engine := config.Server.ActivityStore.Engine; engine != "" { engineLower := strings.ToLower(engine) @@ -151,6 +154,9 @@ func initializeConfig() error { os.Setenv("NB_ACTIVITY_EVENT_POSTGRES_DSN", dsn) } } + if file := config.Server.ActivityStore.File; file != "" { + os.Setenv("NB_ACTIVITY_EVENT_SQLITE_FILE", file) + } log.Infof("Starting combined NetBird server") logConfig(config) diff --git a/combined/cmd/token.go b/combined/cmd/token.go index 9393c6c46..550480062 100644 --- a/combined/cmd/token.go +++ b/combined/cmd/token.go @@ -42,6 +42,9 @@ func withTokenStore(cmd *cobra.Command, fn func(ctx context.Context, s store.Sto os.Setenv("NB_STORE_ENGINE_MYSQL_DSN", dsn) } } + if file := cfg.Server.Store.File; file != "" { + os.Setenv("NB_STORE_ENGINE_SQLITE_FILE", file) + } datadir := cfg.Management.DataDir engine := types.Engine(cfg.Management.Store.Engine) diff --git a/combined/config.yaml.example b/combined/config.yaml.example index f81973c6b..dce658d89 100644 --- a/combined/config.yaml.example +++ b/combined/config.yaml.example @@ -103,16 +103,19 @@ server: engine: "sqlite" # sqlite, postgres, or mysql dsn: "" # Connection string for postgres or mysql encryptionKey: "" + # file: "" # Custom SQLite file path (optional, defaults to {dataDir}/store.db) # Activity events store configuration (optional, defaults to sqlite in dataDir) # activityStore: # engine: "sqlite" # sqlite or postgres # dsn: "" # Connection string for postgres + # file: "" # Custom SQLite file path (optional, defaults to {dataDir}/events.db) # Auth (embedded IdP) store configuration (optional, defaults to sqlite3 in dataDir/idp.db) # authStore: # engine: "sqlite3" # sqlite3 or postgres # dsn: "" # Connection string for postgres (e.g., "host=localhost port=5432 user=postgres password=postgres dbname=netbird_idp sslmode=disable") + # file: "" # Custom SQLite file path (optional, defaults to {dataDir}/idp.db) # Reverse proxy settings (optional) # reverseProxy: diff --git a/management/server/activity/store/sql_store.go b/management/server/activity/store/sql_store.go index db614d0cd..73e8e295c 100644 --- a/management/server/activity/store/sql_store.go +++ b/management/server/activity/store/sql_store.go @@ -249,7 +249,15 @@ func initDatabase(ctx context.Context, dataDir string) (*gorm.DB, error) { switch storeEngine { case types.SqliteStoreEngine: - dialector = sqlite.Open(filepath.Join(dataDir, eventSinkDB)) + dbFile := eventSinkDB + if envFile, ok := os.LookupEnv("NB_ACTIVITY_EVENT_SQLITE_FILE"); ok && envFile != "" { + dbFile = envFile + } + connStr := dbFile + if !filepath.IsAbs(dbFile) { + connStr = filepath.Join(dataDir, dbFile) + } + dialector = sqlite.Open(connStr) case types.PostgresStoreEngine: dsn, ok := os.LookupEnv(postgresDsnEnv) if !ok { diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 89fe22cec..04045f226 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2728,14 +2728,28 @@ func (s *SqlStore) GetStoreEngine() types.Engine { // NewSqliteStore creates a new SQLite store. func NewSqliteStore(ctx context.Context, dataDir string, metrics telemetry.AppMetrics, skipMigration bool) (*SqlStore, error) { - storeStr := fmt.Sprintf("%s?cache=shared", storeSqliteFileName) - if runtime.GOOS == "windows" { - // Vo avoid `The process cannot access the file because it is being used by another process` on Windows - storeStr = storeSqliteFileName + storeFile := storeSqliteFileName + if envFile, ok := os.LookupEnv("NB_STORE_ENGINE_SQLITE_FILE"); ok && envFile != "" { + storeFile = envFile } - file := filepath.Join(dataDir, storeStr) - db, err := gorm.Open(sqlite.Open(file), getGormConfig()) + // Separate file path from any SQLite URI query parameters (e.g., "store.db?mode=rwc") + filePath, query, hasQuery := strings.Cut(storeFile, "?") + + connStr := filePath + if !filepath.IsAbs(filePath) { + connStr = filepath.Join(dataDir, filePath) + } + + // Append query parameters: user-provided take precedence, otherwise default to cache=shared on non-Windows + if hasQuery { + connStr += "?" + query + } else if runtime.GOOS != "windows" { + // To avoid `The process cannot access the file because it is being used by another process` on Windows + connStr += "?cache=shared" + } + + db, err := gorm.Open(sqlite.Open(connStr), getGormConfig()) if err != nil { return nil, err } From 01ceedac898103e939fe02cb2762b7a49384bf7a Mon Sep 17 00:00:00 2001 From: Jeremie Deray Date: Tue, 3 Mar 2026 13:48:51 +0100 Subject: [PATCH 180/374] [client] Fix profile config directory permissions (#5457) * fix user profile dir perm * fix fileExists * revert return var change * fix anti-pattern --- client/internal/profilemanager/config.go | 39 ++++++++++++++++++----- client/internal/profilemanager/service.go | 12 +++++-- client/internal/profilemanager/state.go | 6 +++- 3 files changed, 46 insertions(+), 11 deletions(-) diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index 8f3ff8b11..b27f1932f 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -198,7 +198,7 @@ func getConfigDirForUser(username string) (string, error) { configDir := filepath.Join(DefaultConfigPathDir, username) if _, err := os.Stat(configDir); os.IsNotExist(err) { - if err := os.MkdirAll(configDir, 0600); err != nil { + if err := os.MkdirAll(configDir, 0700); err != nil { return "", err } } @@ -206,9 +206,15 @@ func getConfigDirForUser(username string) (string, error) { return configDir, nil } -func fileExists(path string) bool { +func fileExists(path string) (bool, error) { _, err := os.Stat(path) - return !os.IsNotExist(err) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err } // createNewConfig creates a new config generating a new Wireguard key and saving to file @@ -635,7 +641,11 @@ func isPreSharedKeyHidden(preSharedKey *string) bool { // UpdateConfig update existing configuration according to input configuration and return with the configuration func UpdateConfig(input ConfigInput) (*Config, error) { - if !fileExists(input.ConfigPath) { + configExists, err := fileExists(input.ConfigPath) + if err != nil { + return nil, fmt.Errorf("failed to check if config file exists: %w", err) + } + if !configExists { return nil, fmt.Errorf("config file %s does not exist", input.ConfigPath) } @@ -644,7 +654,11 @@ func UpdateConfig(input ConfigInput) (*Config, error) { // UpdateOrCreateConfig reads existing config or generates a new one func UpdateOrCreateConfig(input ConfigInput) (*Config, error) { - if !fileExists(input.ConfigPath) { + configExists, err := fileExists(input.ConfigPath) + if err != nil { + return nil, fmt.Errorf("failed to check if config file exists: %w", err) + } + if !configExists { log.Infof("generating new config %s", input.ConfigPath) cfg, err := createNewConfig(input) if err != nil { @@ -657,7 +671,7 @@ func UpdateOrCreateConfig(input ConfigInput) (*Config, error) { if isPreSharedKeyHidden(input.PreSharedKey) { input.PreSharedKey = nil } - err := util.EnforcePermission(input.ConfigPath) + err = util.EnforcePermission(input.ConfigPath) if err != nil { log.Errorf("failed to enforce permission on config dir: %v", err) } @@ -784,7 +798,12 @@ func ReadConfig(configPath string) (*Config, error) { // ReadConfig read config file and return with Config. If it is not exists create a new with default values func readConfig(configPath string, createIfMissing bool) (*Config, error) { - if fileExists(configPath) { + configExists, err := fileExists(configPath) + if err != nil { + return nil, fmt.Errorf("failed to check if config file exists: %w", err) + } + + if configExists { err := util.EnforcePermission(configPath) if err != nil { log.Errorf("failed to enforce permission on config dir: %v", err) @@ -831,7 +850,11 @@ func DirectWriteOutConfig(path string, config *Config) error { // DirectUpdateOrCreateConfig is like UpdateOrCreateConfig but uses direct (non-atomic) writes. // Use this on platforms where atomic writes are blocked (e.g., tvOS sandbox). func DirectUpdateOrCreateConfig(input ConfigInput) (*Config, error) { - if !fileExists(input.ConfigPath) { + configExists, err := fileExists(input.ConfigPath) + if err != nil { + return nil, fmt.Errorf("failed to check if config file exists: %w", err) + } + if !configExists { log.Infof("generating new config %s", input.ConfigPath) cfg, err := createNewConfig(input) if err != nil { diff --git a/client/internal/profilemanager/service.go b/client/internal/profilemanager/service.go index bdb722c67..ef3eb1114 100644 --- a/client/internal/profilemanager/service.go +++ b/client/internal/profilemanager/service.go @@ -256,7 +256,11 @@ func (s *ServiceManager) AddProfile(profileName, username string) error { } profPath := filepath.Join(configDir, profileName+".json") - if fileExists(profPath) { + profileExists, err := fileExists(profPath) + if err != nil { + return fmt.Errorf("failed to check if profile exists: %w", err) + } + if profileExists { return ErrProfileAlreadyExists } @@ -285,7 +289,11 @@ func (s *ServiceManager) RemoveProfile(profileName, username string) error { return fmt.Errorf("cannot remove profile with reserved name: %s", defaultProfileName) } profPath := filepath.Join(configDir, profileName+".json") - if !fileExists(profPath) { + profileExists, err := fileExists(profPath) + if err != nil { + return fmt.Errorf("failed to check if profile exists: %w", err) + } + if !profileExists { return ErrProfileNotFound } diff --git a/client/internal/profilemanager/state.go b/client/internal/profilemanager/state.go index f84cb1032..f09391ede 100644 --- a/client/internal/profilemanager/state.go +++ b/client/internal/profilemanager/state.go @@ -20,7 +20,11 @@ func (pm *ProfileManager) GetProfileState(profileName string) (*ProfileState, er } stateFile := filepath.Join(configDir, profileName+".state.json") - if !fileExists(stateFile) { + stateFileExists, err := fileExists(stateFile) + if err != nil { + return nil, fmt.Errorf("failed to check if profile state file exists: %w", err) + } + if !stateFileExists { return nil, errors.New("profile state file does not exist") } From 05b66e73bce0ff44b8cda440dbfcb8bb60b4c637 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 3 Mar 2026 13:50:46 +0100 Subject: [PATCH 181/374] [client] Fix deadlock in route peer status watcher (#5489) Wrap peerStateUpdate send in a nested select to prevent goroutine blocking when the consumer has exited, which could fill the subscription buffer and deadlock the Status mutex. --- client/internal/routemanager/client/client.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/client/internal/routemanager/client/client.go b/client/internal/routemanager/client/client.go index 0b8e161d2..bad616271 100644 --- a/client/internal/routemanager/client/client.go +++ b/client/internal/routemanager/client/client.go @@ -263,8 +263,14 @@ func (w *Watcher) watchPeerStatusChanges(ctx context.Context, peerKey string, pe case <-closer: return case routerStates := <-subscription.Events(): - peerStateUpdate <- routerStates - log.Debugf("triggered route state update for Peer: %s", peerKey) + select { + case peerStateUpdate <- routerStates: + log.Debugf("triggered route state update for Peer: %s", peerKey) + case <-ctx.Done(): + return + case <-closer: + return + } } } } From d7c8e37ff475e5f896ed89ac3ec936144bb57a74 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 3 Mar 2026 18:39:46 +0100 Subject: [PATCH 182/374] [management] Store connected proxies in DB (#5472) Co-authored-by: mlsmaycon --- .../reverseproxy/domain/manager/manager.go | 48 +-- .../modules/reverseproxy/proxy/manager.go | 36 +++ .../reverseproxy/proxy/manager/controller.go | 88 +++++ .../reverseproxy/proxy/manager/manager.go | 115 +++++++ .../reverseproxy/proxy/manager/metrics.go | 74 +++++ .../reverseproxy/proxy/manager_mock.go | 199 ++++++++++++ .../modules/reverseproxy/proxy/proxy.go | 20 ++ .../reverseproxy/{ => service}/interface.go | 6 +- .../{ => service}/interface_mock.go | 6 +- .../reverseproxy/{ => service}/manager/api.go | 10 +- .../{ => service}/manager/expose_tracker.go | 2 +- .../manager/expose_tracker_test.go | 8 +- .../{ => service}/manager/manager.go | 301 ++++++++---------- .../{ => service}/manager/manager_test.go | 265 +++++++-------- .../{reverseproxy.go => service/service.go} | 40 +-- .../service_test.go} | 9 +- management/internals/server/boot.go | 22 +- management/internals/server/controllers.go | 12 + management/internals/server/modules.go | 40 ++- management/internals/server/server.go | 2 +- .../internals/shared/grpc/expose_service.go | 8 +- .../internals/shared/grpc/onetime_token.go | 141 ++++---- management/internals/shared/grpc/proxy.go | 218 +++++++------ .../shared/grpc/proxy_group_access_test.go | 92 +++--- .../internals/shared/grpc/proxy_test.go | 89 ++++-- management/internals/shared/grpc/server.go | 4 +- .../shared/grpc/validate_session_test.go | 84 +++-- management/server/account.go | 16 +- management/server/account/manager.go | 4 +- management/server/account/manager_mock.go | 4 +- management/server/account_test.go | 25 +- management/server/group_test.go | 2 +- management/server/http/handler.go | 10 +- .../proxy/auth_callback_integration_test.go | 50 +-- .../testing/testing_tools/channel/channel.go | 31 +- management/server/metrics/selfhosted.go | 10 +- management/server/metrics/selfhosted_test.go | 20 +- management/server/mock_server/account_mock.go | 4 +- .../server/networks/resources/manager.go | 28 +- .../server/networks/resources/manager_test.go | 72 ++--- management/server/peer.go | 2 +- management/server/peer/peer.go | 7 +- management/server/store/sql_store.go | 146 ++++++--- .../server/store/sqlstore_bench_test.go | 4 +- management/server/store/store.go | 26 +- management/server/store/store_mock.go | 103 ++++-- management/server/types/account.go | 14 +- proxy/cmd/proxy/cmd/root.go | 6 + proxy/internal/acme/manager.go | 30 +- proxy/internal/acme/manager_test.go | 4 +- proxy/management_integration_test.go | 88 ++++- proxy/server.go | 6 +- 52 files changed, 1727 insertions(+), 924 deletions(-) create mode 100644 management/internals/modules/reverseproxy/proxy/manager.go create mode 100644 management/internals/modules/reverseproxy/proxy/manager/controller.go create mode 100644 management/internals/modules/reverseproxy/proxy/manager/manager.go create mode 100644 management/internals/modules/reverseproxy/proxy/manager/metrics.go create mode 100644 management/internals/modules/reverseproxy/proxy/manager_mock.go create mode 100644 management/internals/modules/reverseproxy/proxy/proxy.go rename management/internals/modules/reverseproxy/{ => service}/interface.go (88%) rename management/internals/modules/reverseproxy/{ => service}/interface_mock.go (99%) rename management/internals/modules/reverseproxy/{ => service}/manager/api.go (93%) rename management/internals/modules/reverseproxy/{ => service}/manager/expose_tracker.go (99%) rename management/internals/modules/reverseproxy/{ => service}/manager/expose_tracker_test.go (97%) rename management/internals/modules/reverseproxy/{ => service}/manager/manager.go (65%) rename management/internals/modules/reverseproxy/{ => service}/manager/manager_test.go (83%) rename management/internals/modules/reverseproxy/{reverseproxy.go => service/service.go} (94%) rename management/internals/modules/reverseproxy/{reverseproxy_test.go => service/service_test.go} (98%) diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index 55ca24ac2..12dd051fd 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -27,21 +27,21 @@ type store interface { DeleteCustomDomain(ctx context.Context, accountID string, domainID string) error } -type proxyURLProvider interface { - GetConnectedProxyURLs() []string +type proxyManager interface { + GetActiveClusterAddresses(ctx context.Context) ([]string, error) } type Manager struct { store store validator domain.Validator - proxyURLProvider proxyURLProvider + proxyManager proxyManager permissionsManager permissions.Manager } -func NewManager(store store, proxyURLProvider proxyURLProvider, permissionsManager permissions.Manager) Manager { +func NewManager(store store, proxyMgr proxyManager, permissionsManager permissions.Manager) Manager { return Manager{ - store: store, - proxyURLProvider: proxyURLProvider, + store: store, + proxyManager: proxyMgr, validator: domain.Validator{ Resolver: net.DefaultResolver, }, @@ -67,8 +67,12 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d // Add connected proxy clusters as free domains. // The cluster address itself is the free domain base (e.g., "eu.proxy.netbird.io"). - allowList := m.proxyURLAllowList() - log.WithFields(log.Fields{ + allowList, err := m.proxyManager.GetActiveClusterAddresses(ctx) + if err != nil { + log.WithContext(ctx).Errorf("failed to get active proxy cluster addresses: %v", err) + return nil, err + } + log.WithContext(ctx).WithFields(log.Fields{ "accountID": accountID, "proxyAllowList": allowList, }).Debug("getting domains with proxy allow list") @@ -107,7 +111,10 @@ func (m Manager) CreateDomain(ctx context.Context, accountID, userID, domainName } // Verify the target cluster is in the available clusters - allowList := m.proxyURLAllowList() + allowList, err := m.proxyManager.GetActiveClusterAddresses(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get active proxy cluster addresses: %w", err) + } clusterValid := false for _, cluster := range allowList { if cluster == targetCluster { @@ -221,25 +228,26 @@ func (m Manager) ValidateDomain(ctx context.Context, accountID, userID, domainID } } +// GetClusterDomains returns a list of proxy cluster domains. func (m Manager) GetClusterDomains() []string { - return m.proxyURLAllowList() -} - -// proxyURLAllowList retrieves a list of currently connected proxies and -// their URLs -func (m Manager) proxyURLAllowList() []string { - var reverseProxyAddresses []string - if m.proxyURLProvider != nil { - reverseProxyAddresses = m.proxyURLProvider.GetConnectedProxyURLs() + if m.proxyManager == nil { + return nil } - return reverseProxyAddresses + addresses, err := m.proxyManager.GetActiveClusterAddresses(context.Background()) + if err != nil { + return nil + } + return addresses } // DeriveClusterFromDomain determines the proxy cluster for a given domain. // For free domains (those ending with a known cluster suffix), the cluster is extracted from the domain. // For custom domains, the cluster is determined by checking the registered custom domain's target cluster. func (m Manager) DeriveClusterFromDomain(ctx context.Context, accountID, domain string) (string, error) { - allowList := m.proxyURLAllowList() + allowList, err := m.proxyManager.GetActiveClusterAddresses(ctx) + if err != nil { + return "", fmt.Errorf("failed to get active proxy cluster addresses: %w", err) + } if len(allowList) == 0 { return "", fmt.Errorf("no proxy clusters available") } diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go new file mode 100644 index 000000000..15f2f9f54 --- /dev/null +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -0,0 +1,36 @@ +package proxy + +//go:generate go run github.com/golang/mock/mockgen -package proxy -destination=manager_mock.go -source=./manager.go -build_flags=-mod=mod + +import ( + "context" + "time" + + "github.com/netbirdio/netbird/shared/management/proto" +) + +// Manager defines the interface for proxy operations +type Manager interface { + Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error + Disconnect(ctx context.Context, proxyID string) error + Heartbeat(ctx context.Context, proxyID string) error + GetActiveClusterAddresses(ctx context.Context) ([]string, error) + CleanupStale(ctx context.Context, inactivityDuration time.Duration) error +} + +// OIDCValidationConfig contains the OIDC configuration needed for token validation. +type OIDCValidationConfig struct { + Issuer string + Audiences []string + KeysLocation string + MaxTokenAgeSeconds int64 +} + +// Controller is responsible for managing proxy clusters and routing service updates. +type Controller interface { + SendServiceUpdateToCluster(ctx context.Context, accountID string, update *proto.ProxyMapping, clusterAddr string) + GetOIDCValidationConfig() OIDCValidationConfig + RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error + UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error + GetProxiesForCluster(clusterAddr string) []string +} diff --git a/management/internals/modules/reverseproxy/proxy/manager/controller.go b/management/internals/modules/reverseproxy/proxy/manager/controller.go new file mode 100644 index 000000000..e5b3e9886 --- /dev/null +++ b/management/internals/modules/reverseproxy/proxy/manager/controller.go @@ -0,0 +1,88 @@ +package manager + +import ( + "context" + "sync" + + log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/metric" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/shared/management/proto" +) + +// GRPCController is a concrete implementation that manages proxy clusters and sends updates directly via gRPC. +type GRPCController struct { + proxyGRPCServer *nbgrpc.ProxyServiceServer + // Map of cluster address -> set of proxy IDs + clusterProxies sync.Map + metrics *metrics +} + +// NewGRPCController creates a new GRPCController. +func NewGRPCController(proxyGRPCServer *nbgrpc.ProxyServiceServer, meter metric.Meter) (*GRPCController, error) { + m, err := newMetrics(meter) + if err != nil { + return nil, err + } + + return &GRPCController{ + proxyGRPCServer: proxyGRPCServer, + metrics: m, + }, nil +} + +// SendServiceUpdateToCluster sends a service update to a specific proxy cluster. +func (c *GRPCController) SendServiceUpdateToCluster(ctx context.Context, accountID string, update *proto.ProxyMapping, clusterAddr string) { + c.proxyGRPCServer.SendServiceUpdateToCluster(ctx, update, clusterAddr) + c.metrics.IncrementServiceUpdateSendCount(clusterAddr) +} + +// GetOIDCValidationConfig returns the OIDC validation configuration from the gRPC server. +func (c *GRPCController) GetOIDCValidationConfig() proxy.OIDCValidationConfig { + return c.proxyGRPCServer.GetOIDCValidationConfig() +} + +// RegisterProxyToCluster registers a proxy to a specific cluster for routing. +func (c *GRPCController) RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error { + if clusterAddr == "" { + return nil + } + proxySet, _ := c.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{}) + proxySet.(*sync.Map).Store(proxyID, struct{}{}) + log.WithContext(ctx).Debugf("Registered proxy %s to cluster %s", proxyID, clusterAddr) + + c.metrics.IncrementProxyConnectionCount(clusterAddr) + + return nil +} + +// UnregisterProxyFromCluster removes a proxy from a cluster. +func (c *GRPCController) UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error { + if clusterAddr == "" { + return nil + } + if proxySet, ok := c.clusterProxies.Load(clusterAddr); ok { + proxySet.(*sync.Map).Delete(proxyID) + log.WithContext(ctx).Debugf("Unregistered proxy %s from cluster %s", proxyID, clusterAddr) + + c.metrics.DecrementProxyConnectionCount(clusterAddr) + } + return nil +} + +// GetProxiesForCluster returns all proxy IDs registered for a specific cluster. +func (c *GRPCController) GetProxiesForCluster(clusterAddr string) []string { + proxySet, ok := c.clusterProxies.Load(clusterAddr) + if !ok { + return nil + } + + var proxies []string + proxySet.(*sync.Map).Range(func(key, _ interface{}) bool { + proxies = append(proxies, key.(string)) + return true + }) + return proxies +} diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager.go b/management/internals/modules/reverseproxy/proxy/manager/manager.go new file mode 100644 index 000000000..4c0964b5c --- /dev/null +++ b/management/internals/modules/reverseproxy/proxy/manager/manager.go @@ -0,0 +1,115 @@ +package manager + +import ( + "context" + "time" + + log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/metric" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" +) + +// store defines the interface for proxy persistence operations +type store interface { + SaveProxy(ctx context.Context, p *proxy.Proxy) error + UpdateProxyHeartbeat(ctx context.Context, proxyID string) error + GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) + CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error +} + +// Manager handles all proxy operations +type Manager struct { + store store + metrics *metrics +} + +// NewManager creates a new proxy Manager +func NewManager(store store, meter metric.Meter) (*Manager, error) { + m, err := newMetrics(meter) + if err != nil { + return nil, err + } + + return &Manager{ + store: store, + metrics: m, + }, nil +} + +// Connect registers a new proxy connection in the database +func (m Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { + now := time.Now() + p := &proxy.Proxy{ + ID: proxyID, + ClusterAddress: clusterAddress, + IPAddress: ipAddress, + LastSeen: now, + ConnectedAt: &now, + Status: "connected", + } + + if err := m.store.SaveProxy(ctx, p); err != nil { + log.WithContext(ctx).Errorf("failed to register proxy %s: %v", proxyID, err) + return err + } + + log.WithContext(ctx).WithFields(log.Fields{ + "proxyID": proxyID, + "clusterAddress": clusterAddress, + "ipAddress": ipAddress, + }).Info("proxy connected") + + return nil +} + +// Disconnect marks a proxy as disconnected in the database +func (m Manager) Disconnect(ctx context.Context, proxyID string) error { + now := time.Now() + p := &proxy.Proxy{ + ID: proxyID, + Status: "disconnected", + DisconnectedAt: &now, + LastSeen: now, + } + + if err := m.store.SaveProxy(ctx, p); err != nil { + log.WithContext(ctx).Errorf("failed to disconnect proxy %s: %v", proxyID, err) + return err + } + + log.WithContext(ctx).WithFields(log.Fields{ + "proxyID": proxyID, + }).Info("proxy disconnected") + + return nil +} + +// Heartbeat updates the proxy's last seen timestamp +func (m Manager) Heartbeat(ctx context.Context, proxyID string) error { + if err := m.store.UpdateProxyHeartbeat(ctx, proxyID); err != nil { + log.WithContext(ctx).Debugf("failed to update proxy %s heartbeat: %v", proxyID, err) + return err + } + m.metrics.IncrementProxyHeartbeatCount() + return nil +} + +// GetActiveClusterAddresses returns all unique cluster addresses for active proxies +func (m Manager) GetActiveClusterAddresses(ctx context.Context) ([]string, error) { + addresses, err := m.store.GetActiveProxyClusterAddresses(ctx) + if err != nil { + log.WithContext(ctx).Errorf("failed to get active proxy cluster addresses: %v", err) + return nil, err + } + return addresses, nil +} + +// CleanupStale removes proxies that haven't sent heartbeat in the specified duration +func (m Manager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error { + if err := m.store.CleanupStaleProxies(ctx, inactivityDuration); err != nil { + log.WithContext(ctx).Errorf("failed to cleanup stale proxies: %v", err) + return err + } + return nil +} diff --git a/management/internals/modules/reverseproxy/proxy/manager/metrics.go b/management/internals/modules/reverseproxy/proxy/manager/metrics.go new file mode 100644 index 000000000..2b402cead --- /dev/null +++ b/management/internals/modules/reverseproxy/proxy/manager/metrics.go @@ -0,0 +1,74 @@ +package manager + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +type metrics struct { + proxyConnectionCount metric.Int64UpDownCounter + serviceUpdateSendCount metric.Int64Counter + proxyHeartbeatCount metric.Int64Counter +} + +func newMetrics(meter metric.Meter) (*metrics, error) { + proxyConnectionCount, err := meter.Int64UpDownCounter( + "management_proxy_connection_count", + metric.WithDescription("Number of active proxy connections"), + metric.WithUnit("{connection}"), + ) + if err != nil { + return nil, err + } + + serviceUpdateSendCount, err := meter.Int64Counter( + "management_proxy_service_update_send_count", + metric.WithDescription("Total number of service updates sent to proxies"), + metric.WithUnit("{update}"), + ) + if err != nil { + return nil, err + } + + proxyHeartbeatCount, err := meter.Int64Counter( + "management_proxy_heartbeat_count", + metric.WithDescription("Total number of proxy heartbeats received"), + metric.WithUnit("{heartbeat}"), + ) + if err != nil { + return nil, err + } + + return &metrics{ + proxyConnectionCount: proxyConnectionCount, + serviceUpdateSendCount: serviceUpdateSendCount, + proxyHeartbeatCount: proxyHeartbeatCount, + }, nil +} + +func (m *metrics) IncrementProxyConnectionCount(clusterAddr string) { + m.proxyConnectionCount.Add(context.Background(), 1, + metric.WithAttributes( + attribute.String("cluster", clusterAddr), + )) +} + +func (m *metrics) DecrementProxyConnectionCount(clusterAddr string) { + m.proxyConnectionCount.Add(context.Background(), -1, + metric.WithAttributes( + attribute.String("cluster", clusterAddr), + )) +} + +func (m *metrics) IncrementServiceUpdateSendCount(clusterAddr string) { + m.serviceUpdateSendCount.Add(context.Background(), 1, + metric.WithAttributes( + attribute.String("cluster", clusterAddr), + )) +} + +func (m *metrics) IncrementProxyHeartbeatCount() { + m.proxyHeartbeatCount.Add(context.Background(), 1) +} diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go new file mode 100644 index 000000000..d9645ba88 --- /dev/null +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -0,0 +1,199 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./manager.go + +// Package proxy is a generated GoMock package. +package proxy + +import ( + context "context" + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" + proto "github.com/netbirdio/netbird/shared/management/proto" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// CleanupStale mocks base method. +func (m *MockManager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupStale", ctx, inactivityDuration) + ret0, _ := ret[0].(error) + return ret0 +} + +// CleanupStale indicates an expected call of CleanupStale. +func (mr *MockManagerMockRecorder) CleanupStale(ctx, inactivityDuration interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStale", reflect.TypeOf((*MockManager)(nil).CleanupStale), ctx, inactivityDuration) +} + +// Connect mocks base method. +func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connect", ctx, proxyID, clusterAddress, ipAddress) + ret0, _ := ret[0].(error) + return ret0 +} + +// Connect indicates an expected call of Connect. +func (mr *MockManagerMockRecorder) Connect(ctx, proxyID, clusterAddress, ipAddress interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockManager)(nil).Connect), ctx, proxyID, clusterAddress, ipAddress) +} + +// Disconnect mocks base method. +func (m *MockManager) Disconnect(ctx context.Context, proxyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Disconnect", ctx, proxyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// Disconnect indicates an expected call of Disconnect. +func (mr *MockManagerMockRecorder) Disconnect(ctx, proxyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnect", reflect.TypeOf((*MockManager)(nil).Disconnect), ctx, proxyID) +} + +// GetActiveClusterAddresses mocks base method. +func (m *MockManager) GetActiveClusterAddresses(ctx context.Context) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveClusterAddresses", ctx) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveClusterAddresses indicates an expected call of GetActiveClusterAddresses. +func (mr *MockManagerMockRecorder) GetActiveClusterAddresses(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveClusterAddresses", reflect.TypeOf((*MockManager)(nil).GetActiveClusterAddresses), ctx) +} + +// Heartbeat mocks base method. +func (m *MockManager) Heartbeat(ctx context.Context, proxyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Heartbeat", ctx, proxyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// Heartbeat indicates an expected call of Heartbeat. +func (mr *MockManagerMockRecorder) Heartbeat(ctx, proxyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Heartbeat", reflect.TypeOf((*MockManager)(nil).Heartbeat), ctx, proxyID) +} + +// MockController is a mock of Controller interface. +type MockController struct { + ctrl *gomock.Controller + recorder *MockControllerMockRecorder +} + +// MockControllerMockRecorder is the mock recorder for MockController. +type MockControllerMockRecorder struct { + mock *MockController +} + +// NewMockController creates a new mock instance. +func NewMockController(ctrl *gomock.Controller) *MockController { + mock := &MockController{ctrl: ctrl} + mock.recorder = &MockControllerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockController) EXPECT() *MockControllerMockRecorder { + return m.recorder +} + +// GetOIDCValidationConfig mocks base method. +func (m *MockController) GetOIDCValidationConfig() OIDCValidationConfig { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOIDCValidationConfig") + ret0, _ := ret[0].(OIDCValidationConfig) + return ret0 +} + +// GetOIDCValidationConfig indicates an expected call of GetOIDCValidationConfig. +func (mr *MockControllerMockRecorder) GetOIDCValidationConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOIDCValidationConfig", reflect.TypeOf((*MockController)(nil).GetOIDCValidationConfig)) +} + +// GetProxiesForCluster mocks base method. +func (m *MockController) GetProxiesForCluster(clusterAddr string) []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProxiesForCluster", clusterAddr) + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetProxiesForCluster indicates an expected call of GetProxiesForCluster. +func (mr *MockControllerMockRecorder) GetProxiesForCluster(clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProxiesForCluster", reflect.TypeOf((*MockController)(nil).GetProxiesForCluster), clusterAddr) +} + +// RegisterProxyToCluster mocks base method. +func (m *MockController) RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterProxyToCluster", ctx, clusterAddr, proxyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// RegisterProxyToCluster indicates an expected call of RegisterProxyToCluster. +func (mr *MockControllerMockRecorder) RegisterProxyToCluster(ctx, clusterAddr, proxyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterProxyToCluster", reflect.TypeOf((*MockController)(nil).RegisterProxyToCluster), ctx, clusterAddr, proxyID) +} + +// SendServiceUpdateToCluster mocks base method. +func (m *MockController) SendServiceUpdateToCluster(ctx context.Context, accountID string, update *proto.ProxyMapping, clusterAddr string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SendServiceUpdateToCluster", ctx, accountID, update, clusterAddr) +} + +// SendServiceUpdateToCluster indicates an expected call of SendServiceUpdateToCluster. +func (mr *MockControllerMockRecorder) SendServiceUpdateToCluster(ctx, accountID, update, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendServiceUpdateToCluster", reflect.TypeOf((*MockController)(nil).SendServiceUpdateToCluster), ctx, accountID, update, clusterAddr) +} + +// UnregisterProxyFromCluster mocks base method. +func (m *MockController) UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnregisterProxyFromCluster", ctx, clusterAddr, proxyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnregisterProxyFromCluster indicates an expected call of UnregisterProxyFromCluster. +func (mr *MockControllerMockRecorder) UnregisterProxyFromCluster(ctx, clusterAddr, proxyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnregisterProxyFromCluster", reflect.TypeOf((*MockController)(nil).UnregisterProxyFromCluster), ctx, clusterAddr, proxyID) +} diff --git a/management/internals/modules/reverseproxy/proxy/proxy.go b/management/internals/modules/reverseproxy/proxy/proxy.go new file mode 100644 index 000000000..699e1ed02 --- /dev/null +++ b/management/internals/modules/reverseproxy/proxy/proxy.go @@ -0,0 +1,20 @@ +package proxy + +import "time" + +// Proxy represents a reverse proxy instance +type Proxy struct { + ID string `gorm:"primaryKey;type:varchar(255)"` + ClusterAddress string `gorm:"type:varchar(255);not null;index:idx_proxy_cluster_status"` + IPAddress string `gorm:"type:varchar(45)"` + LastSeen time.Time `gorm:"not null;index:idx_proxy_last_seen"` + ConnectedAt *time.Time + DisconnectedAt *time.Time + Status string `gorm:"type:varchar(20);not null;index:idx_proxy_cluster_status"` + CreatedAt time.Time + UpdatedAt time.Time +} + +func (Proxy) TableName() string { + return "proxies" +} diff --git a/management/internals/modules/reverseproxy/interface.go b/management/internals/modules/reverseproxy/service/interface.go similarity index 88% rename from management/internals/modules/reverseproxy/interface.go rename to management/internals/modules/reverseproxy/service/interface.go index e7a21a24c..b420f22a8 100644 --- a/management/internals/modules/reverseproxy/interface.go +++ b/management/internals/modules/reverseproxy/service/interface.go @@ -1,6 +1,6 @@ -package reverseproxy +package service -//go:generate go run github.com/golang/mock/mockgen -package reverseproxy -destination=interface_mock.go -source=./interface.go -build_flags=-mod=mod +//go:generate go run github.com/golang/mock/mockgen -package service -destination=interface_mock.go -source=./interface.go -build_flags=-mod=mod import ( "context" @@ -14,7 +14,7 @@ type Manager interface { DeleteService(ctx context.Context, accountID, userID, serviceID string) error DeleteAllServices(ctx context.Context, accountID, userID string) error SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error - SetStatus(ctx context.Context, accountID, serviceID string, status ProxyStatus) error + SetStatus(ctx context.Context, accountID, serviceID string, status Status) error ReloadAllServicesForAccount(ctx context.Context, accountID string) error ReloadService(ctx context.Context, accountID, serviceID string) error GetGlobalServices(ctx context.Context) ([]*Service, error) diff --git a/management/internals/modules/reverseproxy/interface_mock.go b/management/internals/modules/reverseproxy/service/interface_mock.go similarity index 99% rename from management/internals/modules/reverseproxy/interface_mock.go rename to management/internals/modules/reverseproxy/service/interface_mock.go index 893025195..727b2c7de 100644 --- a/management/internals/modules/reverseproxy/interface_mock.go +++ b/management/internals/modules/reverseproxy/service/interface_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. // Source: ./interface.go -// Package reverseproxy is a generated GoMock package. -package reverseproxy +// Package service is a generated GoMock package. +package service import ( context "context" @@ -239,7 +239,7 @@ func (mr *MockManagerMockRecorder) SetCertificateIssuedAt(ctx, accountID, servic } // SetStatus mocks base method. -func (m *MockManager) SetStatus(ctx context.Context, accountID, serviceID string, status ProxyStatus) error { +func (m *MockManager) SetStatus(ctx context.Context, accountID, serviceID string, status Status) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetStatus", ctx, accountID, serviceID, status) ret0, _ := ret[0].(error) diff --git a/management/internals/modules/reverseproxy/manager/api.go b/management/internals/modules/reverseproxy/service/manager/api.go similarity index 93% rename from management/internals/modules/reverseproxy/manager/api.go rename to management/internals/modules/reverseproxy/service/manager/api.go index 9117ecd38..70b09e603 100644 --- a/management/internals/modules/reverseproxy/manager/api.go +++ b/management/internals/modules/reverseproxy/service/manager/api.go @@ -6,10 +6,10 @@ import ( "github.com/gorilla/mux" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager" domainmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbcontext "github.com/netbirdio/netbird/management/server/context" "github.com/netbirdio/netbird/shared/management/http/api" "github.com/netbirdio/netbird/shared/management/http/util" @@ -17,11 +17,11 @@ import ( ) type handler struct { - manager reverseproxy.Manager + manager rpservice.Manager } // RegisterEndpoints registers all service HTTP endpoints. -func RegisterEndpoints(manager reverseproxy.Manager, domainManager domainmanager.Manager, accessLogsManager accesslogs.Manager, router *mux.Router) { +func RegisterEndpoints(manager rpservice.Manager, domainManager domainmanager.Manager, accessLogsManager accesslogs.Manager, router *mux.Router) { h := &handler{ manager: manager, } @@ -72,7 +72,7 @@ func (h *handler) createService(w http.ResponseWriter, r *http.Request) { return } - service := new(reverseproxy.Service) + service := new(rpservice.Service) service.FromAPIRequest(&req, userAuth.AccountId) if err = service.Validate(); err != nil { @@ -130,7 +130,7 @@ func (h *handler) updateService(w http.ResponseWriter, r *http.Request) { return } - service := new(reverseproxy.Service) + service := new(rpservice.Service) service.ID = serviceID service.FromAPIRequest(&req, userAuth.AccountId) diff --git a/management/internals/modules/reverseproxy/manager/expose_tracker.go b/management/internals/modules/reverseproxy/service/manager/expose_tracker.go similarity index 99% rename from management/internals/modules/reverseproxy/manager/expose_tracker.go rename to management/internals/modules/reverseproxy/service/manager/expose_tracker.go index ef285e923..11e1f0110 100644 --- a/management/internals/modules/reverseproxy/manager/expose_tracker.go +++ b/management/internals/modules/reverseproxy/service/manager/expose_tracker.go @@ -27,7 +27,7 @@ type trackedExpose struct { type exposeTracker struct { activeExposes sync.Map exposeCreateMu sync.Mutex - manager *managerImpl + manager *Manager } func exposeKey(peerID, domain string) string { diff --git a/management/internals/modules/reverseproxy/manager/expose_tracker_test.go b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go similarity index 97% rename from management/internals/modules/reverseproxy/manager/expose_tracker_test.go rename to management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go index 2dc726590..154239fb1 100644 --- a/management/internals/modules/reverseproxy/manager/expose_tracker_test.go +++ b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" ) func TestExposeKey(t *testing.T) { @@ -120,7 +120,7 @@ func TestReapExpiredExposes(t *testing.T) { tracker := mgr.exposeTracker ctx := context.Background() - resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", }) @@ -156,7 +156,7 @@ func TestReapExpiredExposes_SetsExpiringFlag(t *testing.T) { tracker := mgr.exposeTracker ctx := context.Background() - resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", }) @@ -191,7 +191,7 @@ func TestConcurrentTrackAndCount(t *testing.T) { ctx := context.Background() for i := range 5 { - _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080 + i, Protocol: "http", }) diff --git a/management/internals/modules/reverseproxy/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go similarity index 65% rename from management/internals/modules/reverseproxy/manager/manager.go rename to management/internals/modules/reverseproxy/service/manager/manager.go index 3c02e117b..16a57abb6 100644 --- a/management/internals/modules/reverseproxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -11,17 +11,15 @@ import ( nbpeer "github.com/netbirdio/netbird/management/server/peer" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" - nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" - "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" - "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/shared/management/status" ) @@ -33,24 +31,22 @@ type ClusterDeriver interface { GetClusterDomains() []string } -type managerImpl struct { +type Manager struct { store store.Store accountManager account.Manager permissionsManager permissions.Manager - settingsManager settings.Manager - proxyGRPCServer *nbgrpc.ProxyServiceServer + proxyController proxy.Controller clusterDeriver ClusterDeriver exposeTracker *exposeTracker } // NewManager creates a new service manager. -func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, settingsManager settings.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, clusterDeriver ClusterDeriver) reverseproxy.Manager { - mgr := &managerImpl{ +func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyController proxy.Controller, clusterDeriver ClusterDeriver) *Manager { + mgr := &Manager{ store: store, accountManager: accountManager, permissionsManager: permissionsManager, - settingsManager: settingsManager, - proxyGRPCServer: proxyGRPCServer, + proxyController: proxyController, clusterDeriver: clusterDeriver, } mgr.exposeTracker = &exposeTracker{manager: mgr} @@ -58,11 +54,11 @@ func NewManager(store store.Store, accountManager account.Manager, permissionsMa } // StartExposeReaper delegates to the expose tracker. -func (m *managerImpl) StartExposeReaper(ctx context.Context) { +func (m *Manager) StartExposeReaper(ctx context.Context) { m.exposeTracker.StartExposeReaper(ctx) } -func (m *managerImpl) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { +func (m *Manager) GetAllServices(ctx context.Context, accountID, userID string) ([]*service.Service, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) if err != nil { return nil, status.NewPermissionValidationError(err) @@ -86,34 +82,34 @@ func (m *managerImpl) GetAllServices(ctx context.Context, accountID, userID stri return services, nil } -func (m *managerImpl) replaceHostByLookup(ctx context.Context, accountID string, service *reverseproxy.Service) error { - for _, target := range service.Targets { +func (m *Manager) replaceHostByLookup(ctx context.Context, accountID string, s *service.Service) error { + for _, target := range s.Targets { switch target.TargetType { - case reverseproxy.TargetTypePeer: + case service.TargetTypePeer: peer, err := m.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, target.TargetId) if err != nil { - log.WithContext(ctx).Warnf("failed to get peer by id %s for service %s: %v", target.TargetId, service.ID, err) + log.WithContext(ctx).Warnf("failed to get peer by id %s for service %s: %v", target.TargetId, s.ID, err) target.Host = unknownHostPlaceholder continue } target.Host = peer.IP.String() - case reverseproxy.TargetTypeHost: + case service.TargetTypeHost: resource, err := m.store.GetNetworkResourceByID(ctx, store.LockingStrengthNone, accountID, target.TargetId) if err != nil { - log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, service.ID, err) + log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, s.ID, err) target.Host = unknownHostPlaceholder continue } target.Host = resource.Prefix.Addr().String() - case reverseproxy.TargetTypeDomain: + case service.TargetTypeDomain: resource, err := m.store.GetNetworkResourceByID(ctx, store.LockingStrengthNone, accountID, target.TargetId) if err != nil { - log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, service.ID, err) + log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, s.ID, err) target.Host = unknownHostPlaceholder continue } target.Host = resource.Domain - case reverseproxy.TargetTypeSubnet: + case service.TargetTypeSubnet: // For subnets we do not do any lookups on the resource default: return fmt.Errorf("unknown target type: %s", target.TargetType) @@ -122,7 +118,7 @@ func (m *managerImpl) replaceHostByLookup(ctx context.Context, accountID string, return nil } -func (m *managerImpl) GetService(ctx context.Context, accountID, userID, serviceID string) (*reverseproxy.Service, error) { +func (m *Manager) GetService(ctx context.Context, accountID, userID, serviceID string) (*service.Service, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) if err != nil { return nil, status.NewPermissionValidationError(err) @@ -143,7 +139,7 @@ func (m *managerImpl) GetService(ctx context.Context, accountID, userID, service return service, nil } -func (m *managerImpl) CreateService(ctx context.Context, accountID, userID string, service *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *Manager) CreateService(ctx context.Context, accountID, userID string, s *service.Service) (*service.Service, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Create) if err != nil { return nil, status.NewPermissionValidationError(err) @@ -152,29 +148,29 @@ func (m *managerImpl) CreateService(ctx context.Context, accountID, userID strin return nil, status.NewPermissionDeniedError() } - if err := m.initializeServiceForCreate(ctx, accountID, service); err != nil { + if err := m.initializeServiceForCreate(ctx, accountID, s); err != nil { return nil, err } - if err := m.persistNewService(ctx, accountID, service); err != nil { + if err := m.persistNewService(ctx, accountID, s); err != nil { return nil, err } - m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceCreated, service.EventMeta()) + m.accountManager.StoreEvent(ctx, userID, s.ID, accountID, activity.ServiceCreated, s.EventMeta()) - err = m.replaceHostByLookup(ctx, accountID, service) + err = m.replaceHostByLookup(ctx, accountID, s) if err != nil { - return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", s.ID, err) } - m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) m.accountManager.UpdateAccountPeers(ctx, accountID) - return service, nil + return s, nil } -func (m *managerImpl) initializeServiceForCreate(ctx context.Context, accountID string, service *reverseproxy.Service) error { +func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID string, service *service.Service) error { if m.clusterDeriver != nil { proxyCluster, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, service.Domain) if err != nil { @@ -201,7 +197,7 @@ func (m *managerImpl) initializeServiceForCreate(ctx context.Context, accountID return nil } -func (m *managerImpl) persistNewService(ctx context.Context, accountID string, service *reverseproxy.Service) error { +func (m *Manager) persistNewService(ctx context.Context, accountID string, service *service.Service) error { return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, ""); err != nil { return err @@ -219,7 +215,7 @@ func (m *managerImpl) persistNewService(ctx context.Context, accountID string, s }) } -func (m *managerImpl) checkDomainAvailable(ctx context.Context, transaction store.Store, accountID, domain, excludeServiceID string) error { +func (m *Manager) checkDomainAvailable(ctx context.Context, transaction store.Store, accountID, domain, excludeServiceID string) error { existingService, err := transaction.GetServiceByDomain(ctx, accountID, domain) if err != nil { if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { @@ -235,7 +231,7 @@ func (m *managerImpl) checkDomainAvailable(ctx context.Context, transaction stor return nil } -func (m *managerImpl) UpdateService(ctx context.Context, accountID, userID string, service *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *Manager) UpdateService(ctx context.Context, accountID, userID string, service *service.Service) (*service.Service, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Update) if err != nil { return nil, status.NewPermissionValidationError(err) @@ -259,7 +255,7 @@ func (m *managerImpl) UpdateService(ctx context.Context, accountID, userID strin return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) } - m.sendServiceUpdateNotifications(service, updateInfo) + m.sendServiceUpdateNotifications(ctx, accountID, service, updateInfo) m.accountManager.UpdateAccountPeers(ctx, accountID) return service, nil @@ -271,7 +267,7 @@ type serviceUpdateInfo struct { serviceEnabledChanged bool } -func (m *managerImpl) persistServiceUpdate(ctx context.Context, accountID string, service *reverseproxy.Service) (*serviceUpdateInfo, error) { +func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, service *service.Service) (*serviceUpdateInfo, error) { var updateInfo serviceUpdateInfo err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { @@ -309,7 +305,7 @@ func (m *managerImpl) persistServiceUpdate(ctx context.Context, accountID string return &updateInfo, err } -func (m *managerImpl) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, service *reverseproxy.Service) error { +func (m *Manager) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, service *service.Service) error { if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, service.ID); err != nil { return err } @@ -326,7 +322,7 @@ func (m *managerImpl) handleDomainChange(ctx context.Context, transaction store. return nil } -func (m *managerImpl) preserveExistingAuthSecrets(service, existingService *reverseproxy.Service) { +func (m *Manager) preserveExistingAuthSecrets(service, existingService *service.Service) { if service.Auth.PasswordAuth != nil && service.Auth.PasswordAuth.Enabled && existingService.Auth.PasswordAuth != nil && existingService.Auth.PasswordAuth.Enabled && service.Auth.PasswordAuth.Password == "" { @@ -340,54 +336,40 @@ func (m *managerImpl) preserveExistingAuthSecrets(service, existingService *reve } } -func (m *managerImpl) preserveServiceMetadata(service, existingService *reverseproxy.Service) { +func (m *Manager) preserveServiceMetadata(service, existingService *service.Service) { service.Meta = existingService.Meta service.SessionPrivateKey = existingService.SessionPrivateKey service.SessionPublicKey = existingService.SessionPublicKey } -func (m *managerImpl) sendServiceUpdateNotifications(service *reverseproxy.Service, updateInfo *serviceUpdateInfo) { +func (m *Manager) sendServiceUpdateNotifications(ctx context.Context, accountID string, s *service.Service, updateInfo *serviceUpdateInfo) { + oidcCfg := m.proxyController.GetOIDCValidationConfig() + switch { - case updateInfo.domainChanged && updateInfo.oldCluster != service.ProxyCluster: - m.sendServiceUpdate(service, reverseproxy.Delete, updateInfo.oldCluster, "") - m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") - case !service.Enabled && updateInfo.serviceEnabledChanged: - m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "") - case service.Enabled && updateInfo.serviceEnabledChanged: - m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") + case updateInfo.domainChanged && updateInfo.oldCluster != s.ProxyCluster: + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", oidcCfg), updateInfo.oldCluster) + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", oidcCfg), s.ProxyCluster) + case !s.Enabled && updateInfo.serviceEnabledChanged: + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", oidcCfg), s.ProxyCluster) + case s.Enabled && updateInfo.serviceEnabledChanged: + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", oidcCfg), s.ProxyCluster) default: - m.sendServiceUpdate(service, reverseproxy.Update, service.ProxyCluster, "") + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Update, "", oidcCfg), s.ProxyCluster) } } -func (m *managerImpl) sendServiceUpdate(service *reverseproxy.Service, operation reverseproxy.Operation, cluster, oldService string) { - oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() - mapping := service.ToProtoMapping(operation, oldService, oidcCfg) - m.sendMappingsToCluster([]*proto.ProxyMapping{mapping}, cluster) -} - -func (m *managerImpl) sendMappingsToCluster(mappings []*proto.ProxyMapping, cluster string) { - if len(mappings) == 0 { - return - } - update := &proto.GetMappingUpdateResponse{ - Mapping: mappings, - } - m.proxyGRPCServer.SendServiceUpdateToCluster(update, cluster) -} - // validateTargetReferences checks that all target IDs reference existing peers or resources in the account. -func validateTargetReferences(ctx context.Context, transaction store.Store, accountID string, targets []*reverseproxy.Target) error { +func validateTargetReferences(ctx context.Context, transaction store.Store, accountID string, targets []*service.Target) error { for _, target := range targets { switch target.TargetType { - case reverseproxy.TargetTypePeer: + case service.TargetTypePeer: if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil { if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { return status.Errorf(status.InvalidArgument, "peer target %q not found in account", target.TargetId) } return fmt.Errorf("look up peer target %q: %w", target.TargetId, err) } - case reverseproxy.TargetTypeHost, reverseproxy.TargetTypeSubnet, reverseproxy.TargetTypeDomain: + case service.TargetTypeHost, service.TargetTypeSubnet, service.TargetTypeDomain: if _, err := transaction.GetNetworkResourceByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil { if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { return status.Errorf(status.InvalidArgument, "resource target %q not found in account", target.TargetId) @@ -399,7 +381,7 @@ func validateTargetReferences(ctx context.Context, transaction store.Store, acco return nil } -func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { +func (m *Manager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete) if err != nil { return status.NewPermissionValidationError(err) @@ -408,9 +390,10 @@ func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serv return status.NewPermissionDeniedError() } - var service *reverseproxy.Service + var s *service.Service err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - service, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) + var err error + s, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) if err != nil { return err } @@ -429,20 +412,20 @@ func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serv return err } - if service.Source == reverseproxy.SourceEphemeral { - m.exposeTracker.UntrackExpose(service.SourcePeer, service.Domain) + if s.Source == service.SourceEphemeral { + m.exposeTracker.UntrackExpose(s.SourcePeer, s.Domain) } - m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, service.EventMeta()) + m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, s.EventMeta()) - m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "") + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) m.accountManager.UpdateAccountPeers(ctx, accountID) return nil } -func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID string) error { +func (m *Manager) DeleteAllServices(ctx context.Context, accountID, userID string) error { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete) if err != nil { return status.NewPermissionValidationError(err) @@ -451,16 +434,16 @@ func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID s return status.NewPermissionDeniedError() } - var services []*reverseproxy.Service + var services []*service.Service err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { var err error - services, err = transaction.GetServicesByAccountID(ctx, store.LockingStrengthUpdate, accountID) + services, err = transaction.GetAccountServices(ctx, store.LockingStrengthUpdate, accountID) if err != nil { return err } - for _, service := range services { - if err = transaction.DeleteService(ctx, accountID, service.ID); err != nil { + for _, svc := range services { + if err = transaction.DeleteService(ctx, accountID, svc.ID); err != nil { return fmt.Errorf("failed to delete service: %w", err) } } @@ -471,20 +454,14 @@ func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID s return err } - clusterMappings := make(map[string][]*proto.ProxyMapping) - oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() + oidcCfg := m.proxyController.GetOIDCValidationConfig() - for _, service := range services { - if service.Source == reverseproxy.SourceEphemeral { - m.exposeTracker.UntrackExpose(service.SourcePeer, service.Domain) + for _, svc := range services { + if svc.Source == service.SourceEphemeral { + m.exposeTracker.UntrackExpose(svc.SourcePeer, svc.Domain) } - m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceDeleted, service.EventMeta()) - mapping := service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg) - clusterMappings[service.ProxyCluster] = append(clusterMappings[service.ProxyCluster], mapping) - } - - for cluster, mappings := range clusterMappings { - m.sendMappingsToCluster(mappings, cluster) + m.accountManager.StoreEvent(ctx, userID, svc.ID, accountID, activity.ServiceDeleted, svc.EventMeta()) + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", oidcCfg), svc.ProxyCluster) } m.accountManager.UpdateAccountPeers(ctx, accountID) @@ -494,7 +471,7 @@ func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID s // SetCertificateIssuedAt sets the certificate issued timestamp to the current time. // Call this when receiving a gRPC notification that the certificate was issued. -func (m *managerImpl) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error { +func (m *Manager) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error { return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { service, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) if err != nil { @@ -513,7 +490,7 @@ func (m *managerImpl) SetCertificateIssuedAt(ctx context.Context, accountID, ser } // SetStatus updates the status of the service (e.g., "active", "tunnel_not_created", etc.) -func (m *managerImpl) SetStatus(ctx context.Context, accountID, serviceID string, status reverseproxy.ProxyStatus) error { +func (m *Manager) SetStatus(ctx context.Context, accountID, serviceID string, status service.Status) error { return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { service, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) if err != nil { @@ -530,50 +507,42 @@ func (m *managerImpl) SetStatus(ctx context.Context, accountID, serviceID string }) } -func (m *managerImpl) ReloadService(ctx context.Context, accountID, serviceID string) error { - service, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) +func (m *Manager) ReloadService(ctx context.Context, accountID, serviceID string) error { + s, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) if err != nil { return fmt.Errorf("failed to get service: %w", err) } - err = m.replaceHostByLookup(ctx, accountID, service) + err = m.replaceHostByLookup(ctx, accountID, s) if err != nil { - return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + return fmt.Errorf("failed to replace host by lookup for service %s: %w", s.ID, err) } - m.sendServiceUpdate(service, reverseproxy.Update, service.ProxyCluster, "") + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Update, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) m.accountManager.UpdateAccountPeers(ctx, accountID) return nil } -func (m *managerImpl) ReloadAllServicesForAccount(ctx context.Context, accountID string) error { +func (m *Manager) ReloadAllServicesForAccount(ctx context.Context, accountID string) error { services, err := m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) if err != nil { return fmt.Errorf("failed to get services: %w", err) } - clusterMappings := make(map[string][]*proto.ProxyMapping) - oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig() - - for _, service := range services { - err = m.replaceHostByLookup(ctx, accountID, service) + for _, s := range services { + err = m.replaceHostByLookup(ctx, accountID, s) if err != nil { - return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err) + return fmt.Errorf("failed to replace host by lookup for service %s: %w", s.ID, err) } - mapping := service.ToProtoMapping(reverseproxy.Update, "", oidcCfg) - clusterMappings[service.ProxyCluster] = append(clusterMappings[service.ProxyCluster], mapping) - } - - for cluster, mappings := range clusterMappings { - m.sendMappingsToCluster(mappings, cluster) + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Update, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) } return nil } -func (m *managerImpl) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { +func (m *Manager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) { services, err := m.store.GetServices(ctx, store.LockingStrengthNone) if err != nil { return nil, fmt.Errorf("failed to get services: %w", err) @@ -589,7 +558,7 @@ func (m *managerImpl) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Se return services, nil } -func (m *managerImpl) GetServiceByID(ctx context.Context, accountID, serviceID string) (*reverseproxy.Service, error) { +func (m *Manager) GetServiceByID(ctx context.Context, accountID, serviceID string) (*service.Service, error) { service, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) if err != nil { return nil, fmt.Errorf("failed to get service: %w", err) @@ -603,7 +572,7 @@ func (m *managerImpl) GetServiceByID(ctx context.Context, accountID, serviceID s return service, nil } -func (m *managerImpl) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { +func (m *Manager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) { services, err := m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) if err != nil { return nil, fmt.Errorf("failed to get services: %w", err) @@ -619,7 +588,7 @@ func (m *managerImpl) GetAccountServices(ctx context.Context, accountID string) return services, nil } -func (m *managerImpl) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) { +func (m *Manager) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) { target, err := m.store.GetServiceTargetByTargetID(ctx, store.LockingStrengthNone, accountID, resourceID) if err != nil { if s, ok := status.FromError(err); ok && s.Type() == status.NotFound { @@ -637,7 +606,7 @@ func (m *managerImpl) GetServiceIDByTargetID(ctx context.Context, accountID stri // validateExposePermission checks whether the peer is allowed to use the expose feature. // It verifies the account has peer expose enabled and that the peer belongs to an allowed group. -func (m *managerImpl) validateExposePermission(ctx context.Context, accountID, peerID string) error { +func (m *Manager) validateExposePermission(ctx context.Context, accountID, peerID string) error { settings, err := m.store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) if err != nil { log.WithContext(ctx).Errorf("failed to get account settings: %v", err) @@ -670,7 +639,7 @@ func (m *managerImpl) validateExposePermission(ctx context.Context, accountID, p // CreateServiceFromPeer creates a service initiated by a peer expose request. // It validates the request, checks expose permissions, enforces the per-peer limit, // creates the service, and tracks it for TTL-based reaping. -func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { +func (m *Manager) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) { if err := req.Validate(); err != nil { return nil, status.Errorf(status.InvalidArgument, "validate expose request: %v", err) } @@ -679,31 +648,31 @@ func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peer return nil, err } - serviceName, err := reverseproxy.GenerateExposeName(req.NamePrefix) + serviceName, err := service.GenerateExposeName(req.NamePrefix) if err != nil { return nil, status.Errorf(status.InvalidArgument, "generate service name: %v", err) } - service := req.ToService(accountID, peerID, serviceName) - service.Source = reverseproxy.SourceEphemeral + svc := req.ToService(accountID, peerID, serviceName) + svc.Source = service.SourceEphemeral - if service.Domain == "" { - domain, err := m.buildRandomDomain(service.Name) + if svc.Domain == "" { + domain, err := m.buildRandomDomain(svc.Name) if err != nil { - return nil, fmt.Errorf("build random domain for service %s: %w", service.Name, err) + return nil, fmt.Errorf("build random domain for service %s: %w", svc.Name, err) } - service.Domain = domain + svc.Domain = domain } - if service.Auth.BearerAuth != nil && service.Auth.BearerAuth.Enabled { - groupIDs, err := m.getGroupIDsFromNames(ctx, accountID, service.Auth.BearerAuth.DistributionGroups) + if svc.Auth.BearerAuth != nil && svc.Auth.BearerAuth.Enabled { + groupIDs, err := m.getGroupIDsFromNames(ctx, accountID, svc.Auth.BearerAuth.DistributionGroups) if err != nil { - return nil, fmt.Errorf("get group ids for service %s: %w", service.Name, err) + return nil, fmt.Errorf("get group ids for service %s: %w", svc.Name, err) } - service.Auth.BearerAuth.DistributionGroups = groupIDs + svc.Auth.BearerAuth.DistributionGroups = groupIDs } - if err := m.initializeServiceForCreate(ctx, accountID, service); err != nil { + if err := m.initializeServiceForCreate(ctx, accountID, svc); err != nil { return nil, err } @@ -713,45 +682,45 @@ func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peer } now := time.Now() - service.Meta.LastRenewedAt = &now - service.SourcePeer = peerID + svc.Meta.LastRenewedAt = &now + svc.SourcePeer = peerID - if err := m.persistNewService(ctx, accountID, service); err != nil { + if err := m.persistNewService(ctx, accountID, svc); err != nil { return nil, err } - alreadyTracked, allowed := m.exposeTracker.TrackExposeIfAllowed(peerID, service.Domain, accountID) + alreadyTracked, allowed := m.exposeTracker.TrackExposeIfAllowed(peerID, svc.Domain, accountID) if alreadyTracked { - if err := m.deleteServiceFromPeer(ctx, accountID, peerID, service.Domain, false); err != nil { - log.WithContext(ctx).Debugf("failed to delete duplicate expose service for domain %s: %v", service.Domain, err) + if err := m.deleteServiceFromPeer(ctx, accountID, peerID, svc.Domain, false); err != nil { + log.WithContext(ctx).Debugf("failed to delete duplicate expose service for domain %s: %v", svc.Domain, err) } return nil, status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain") } if !allowed { - if err := m.deleteServiceFromPeer(ctx, accountID, peerID, service.Domain, false); err != nil { - log.WithContext(ctx).Debugf("failed to delete service after limit exceeded for domain %s: %v", service.Domain, err) + if err := m.deleteServiceFromPeer(ctx, accountID, peerID, svc.Domain, false); err != nil { + log.WithContext(ctx).Debugf("failed to delete service after limit exceeded for domain %s: %v", svc.Domain, err) } return nil, status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) } - meta := addPeerInfoToEventMeta(service.EventMeta(), peer) - m.accountManager.StoreEvent(ctx, peerID, service.ID, accountID, activity.PeerServiceExposed, meta) + meta := addPeerInfoToEventMeta(svc.EventMeta(), peer) + m.accountManager.StoreEvent(ctx, peerID, svc.ID, accountID, activity.PeerServiceExposed, meta) - if err := m.replaceHostByLookup(ctx, accountID, service); err != nil { - return nil, fmt.Errorf("replace host by lookup for service %s: %w", service.ID, err) + if err := m.replaceHostByLookup(ctx, accountID, svc); err != nil { + return nil, fmt.Errorf("replace host by lookup for service %s: %w", svc.ID, err) } - m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "") + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Create, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster) m.accountManager.UpdateAccountPeers(ctx, accountID) - return &reverseproxy.ExposeServiceResponse{ - ServiceName: service.Name, - ServiceURL: "https://" + service.Domain, - Domain: service.Domain, + return &service.ExposeServiceResponse{ + ServiceName: svc.Name, + ServiceURL: "https://" + svc.Domain, + Domain: svc.Domain, }, nil } -func (m *managerImpl) getGroupIDsFromNames(ctx context.Context, accountID string, groupNames []string) ([]string, error) { +func (m *Manager) getGroupIDsFromNames(ctx context.Context, accountID string, groupNames []string) ([]string, error) { if len(groupNames) == 0 { return []string{}, fmt.Errorf("no group names provided") } @@ -766,7 +735,7 @@ func (m *managerImpl) getGroupIDsFromNames(ctx context.Context, accountID string return groupIDs, nil } -func (m *managerImpl) buildRandomDomain(name string) (string, error) { +func (m *Manager) buildRandomDomain(name string) (string, error) { if m.clusterDeriver == nil { return "", fmt.Errorf("unable to get random domain") } @@ -781,7 +750,7 @@ func (m *managerImpl) buildRandomDomain(name string) (string, error) { // RenewServiceFromPeer renews the in-memory TTL tracker for the peer's expose session. // Returns an error if the expose is not actively tracked. -func (m *managerImpl) RenewServiceFromPeer(_ context.Context, _, peerID, domain string) error { +func (m *Manager) RenewServiceFromPeer(_ context.Context, _, peerID, domain string) error { if !m.exposeTracker.RenewTrackedExpose(peerID, domain) { return status.Errorf(status.NotFound, "no active expose session for domain %s", domain) } @@ -789,7 +758,7 @@ func (m *managerImpl) RenewServiceFromPeer(_ context.Context, _, peerID, domain } // StopServiceFromPeer stops a peer's active expose session by untracking and deleting the service. -func (m *managerImpl) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { +func (m *Manager) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { if err := m.deleteServiceFromPeer(ctx, accountID, peerID, domain, false); err != nil { log.WithContext(ctx).Errorf("failed to delete peer-exposed service for domain %s: %v", domain, err) return err @@ -804,8 +773,8 @@ func (m *managerImpl) StopServiceFromPeer(ctx context.Context, accountID, peerID // deleteServiceFromPeer deletes a peer-initiated service identified by domain. // When expired is true, the activity is recorded as PeerServiceExposeExpired instead of PeerServiceUnexposed. -func (m *managerImpl) deleteServiceFromPeer(ctx context.Context, accountID, peerID, domain string, expired bool) error { - service, err := m.lookupPeerService(ctx, accountID, peerID, domain) +func (m *Manager) deleteServiceFromPeer(ctx context.Context, accountID, peerID, domain string, expired bool) error { + svc, err := m.lookupPeerService(ctx, accountID, peerID, domain) if err != nil { return err } @@ -814,41 +783,41 @@ func (m *managerImpl) deleteServiceFromPeer(ctx context.Context, accountID, peer if expired { activityCode = activity.PeerServiceExposeExpired } - return m.deletePeerService(ctx, accountID, peerID, service.ID, activityCode) + return m.deletePeerService(ctx, accountID, peerID, svc.ID, activityCode) } // lookupPeerService finds a peer-initiated service by domain and validates ownership. -func (m *managerImpl) lookupPeerService(ctx context.Context, accountID, peerID, domain string) (*reverseproxy.Service, error) { - service, err := m.store.GetServiceByDomain(ctx, accountID, domain) +func (m *Manager) lookupPeerService(ctx context.Context, accountID, peerID, domain string) (*service.Service, error) { + svc, err := m.store.GetServiceByDomain(ctx, accountID, domain) if err != nil { return nil, err } - if service.Source != reverseproxy.SourceEphemeral { + if svc.Source != service.SourceEphemeral { return nil, status.Errorf(status.PermissionDenied, "cannot operate on API-created service via peer expose") } - if service.SourcePeer != peerID { + if svc.SourcePeer != peerID { return nil, status.Errorf(status.PermissionDenied, "cannot operate on service exposed by another peer") } - return service, nil + return svc, nil } -func (m *managerImpl) deletePeerService(ctx context.Context, accountID, peerID, serviceID string, activityCode activity.Activity) error { - var service *reverseproxy.Service +func (m *Manager) deletePeerService(ctx context.Context, accountID, peerID, serviceID string, activityCode activity.Activity) error { + var svc *service.Service err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { var err error - service, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) + svc, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) if err != nil { return err } - if service.Source != reverseproxy.SourceEphemeral { + if svc.Source != service.SourceEphemeral { return status.Errorf(status.PermissionDenied, "cannot delete API-created service via peer expose") } - if service.SourcePeer != peerID { + if svc.SourcePeer != peerID { return status.Errorf(status.PermissionDenied, "cannot delete service exposed by another peer") } @@ -868,11 +837,11 @@ func (m *managerImpl) deletePeerService(ctx context.Context, accountID, peerID, peer = nil } - meta := addPeerInfoToEventMeta(service.EventMeta(), peer) + meta := addPeerInfoToEventMeta(svc.EventMeta(), peer) m.accountManager.StoreEvent(ctx, peerID, serviceID, accountID, activityCode, meta) - m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "") + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster) m.accountManager.UpdateAccountPeers(ctx, accountID) diff --git a/management/internals/modules/reverseproxy/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go similarity index 83% rename from management/internals/modules/reverseproxy/manager/manager_test.go rename to management/internals/modules/reverseproxy/service/manager/manager_test.go index 8e6b0e876..99409e235 100644 --- a/management/internals/modules/reverseproxy/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -10,21 +10,21 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/metric/noop" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" - "github.com/netbirdio/netbird/management/server/integrations/extra_settings" "github.com/netbirdio/netbird/management/server/mock_server" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" - "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" - "github.com/netbirdio/netbird/management/server/users" "github.com/netbirdio/netbird/shared/management/status" ) @@ -33,13 +33,13 @@ func TestInitializeServiceForCreate(t *testing.T) { accountID := "test-account" t.Run("successful initialization without cluster deriver", func(t *testing.T) { - mgr := &managerImpl{ + mgr := &Manager{ clusterDeriver: nil, } - service := &reverseproxy.Service{ + service := &rpservice.Service{ Domain: "example.com", - Auth: reverseproxy.AuthConfig{}, + Auth: rpservice.AuthConfig{}, } err := mgr.initializeServiceForCreate(ctx, accountID, service) @@ -53,12 +53,12 @@ func TestInitializeServiceForCreate(t *testing.T) { }) t.Run("verifies session keys are different", func(t *testing.T) { - mgr := &managerImpl{ + mgr := &Manager{ clusterDeriver: nil, } - service1 := &reverseproxy.Service{Domain: "test1.com", Auth: reverseproxy.AuthConfig{}} - service2 := &reverseproxy.Service{Domain: "test2.com", Auth: reverseproxy.AuthConfig{}} + service1 := &rpservice.Service{Domain: "test1.com", Auth: rpservice.AuthConfig{}} + service2 := &rpservice.Service{Domain: "test2.com", Auth: rpservice.AuthConfig{}} err1 := mgr.initializeServiceForCreate(ctx, accountID, service1) err2 := mgr.initializeServiceForCreate(ctx, accountID, service2) @@ -100,7 +100,7 @@ func TestCheckDomainAvailable(t *testing.T) { setupMock: func(ms *store.MockStore) { ms.EXPECT(). GetServiceByDomain(ctx, accountID, "exists.com"). - Return(&reverseproxy.Service{ID: "existing-id", Domain: "exists.com"}, nil) + Return(&rpservice.Service{ID: "existing-id", Domain: "exists.com"}, nil) }, expectedError: true, errorType: status.AlreadyExists, @@ -112,7 +112,7 @@ func TestCheckDomainAvailable(t *testing.T) { setupMock: func(ms *store.MockStore) { ms.EXPECT(). GetServiceByDomain(ctx, accountID, "exists.com"). - Return(&reverseproxy.Service{ID: "service-123", Domain: "exists.com"}, nil) + Return(&rpservice.Service{ID: "service-123", Domain: "exists.com"}, nil) }, expectedError: false, }, @@ -123,7 +123,7 @@ func TestCheckDomainAvailable(t *testing.T) { setupMock: func(ms *store.MockStore) { ms.EXPECT(). GetServiceByDomain(ctx, accountID, "exists.com"). - Return(&reverseproxy.Service{ID: "service-123", Domain: "exists.com"}, nil) + Return(&rpservice.Service{ID: "service-123", Domain: "exists.com"}, nil) }, expectedError: true, errorType: status.AlreadyExists, @@ -149,7 +149,7 @@ func TestCheckDomainAvailable(t *testing.T) { mockStore := store.NewMockStore(ctrl) tt.setupMock(mockStore) - mgr := &managerImpl{} + mgr := &Manager{} err := mgr.checkDomainAvailable(ctx, mockStore, accountID, tt.domain, tt.excludeServiceID) if tt.expectedError { @@ -179,7 +179,7 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) { GetServiceByDomain(ctx, accountID, ""). Return(nil, status.Errorf(status.NotFound, "not found")) - mgr := &managerImpl{} + mgr := &Manager{} err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "", "") assert.NoError(t, err) @@ -192,9 +192,9 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) { mockStore := store.NewMockStore(ctrl) mockStore.EXPECT(). GetServiceByDomain(ctx, accountID, "test.com"). - Return(&reverseproxy.Service{ID: "some-id", Domain: "test.com"}, nil) + Return(&rpservice.Service{ID: "some-id", Domain: "test.com"}, nil) - mgr := &managerImpl{} + mgr := &Manager{} err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "test.com", "") assert.Error(t, err) @@ -212,7 +212,7 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) { GetServiceByDomain(ctx, accountID, "nil.com"). Return(nil, nil) - mgr := &managerImpl{} + mgr := &Manager{} err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "nil.com", "") assert.NoError(t, err) @@ -228,10 +228,10 @@ func TestPersistNewService(t *testing.T) { defer ctrl.Finish() mockStore := store.NewMockStore(ctrl) - service := &reverseproxy.Service{ + service := &rpservice.Service{ ID: "service-123", Domain: "new.com", - Targets: []*reverseproxy.Target{}, + Targets: []*rpservice.Target{}, } // Mock ExecuteInTransaction to execute the function immediately @@ -250,7 +250,7 @@ func TestPersistNewService(t *testing.T) { return fn(txMock) }) - mgr := &managerImpl{store: mockStore} + mgr := &Manager{store: mockStore} err := mgr.persistNewService(ctx, accountID, service) assert.NoError(t, err) @@ -261,10 +261,10 @@ func TestPersistNewService(t *testing.T) { defer ctrl.Finish() mockStore := store.NewMockStore(ctrl) - service := &reverseproxy.Service{ + service := &rpservice.Service{ ID: "service-123", Domain: "existing.com", - Targets: []*reverseproxy.Target{}, + Targets: []*rpservice.Target{}, } mockStore.EXPECT(). @@ -273,12 +273,12 @@ func TestPersistNewService(t *testing.T) { txMock := store.NewMockStore(ctrl) txMock.EXPECT(). GetServiceByDomain(ctx, accountID, "existing.com"). - Return(&reverseproxy.Service{ID: "other-id", Domain: "existing.com"}, nil) + Return(&rpservice.Service{ID: "other-id", Domain: "existing.com"}, nil) return fn(txMock) }) - mgr := &managerImpl{store: mockStore} + mgr := &Manager{store: mockStore} err := mgr.persistNewService(ctx, accountID, service) require.Error(t, err) @@ -288,21 +288,21 @@ func TestPersistNewService(t *testing.T) { }) } func TestPreserveExistingAuthSecrets(t *testing.T) { - mgr := &managerImpl{} + mgr := &Manager{} t.Run("preserve password when empty", func(t *testing.T) { - existing := &reverseproxy.Service{ - Auth: reverseproxy.AuthConfig{ - PasswordAuth: &reverseproxy.PasswordAuthConfig{ + existing := &rpservice.Service{ + Auth: rpservice.AuthConfig{ + PasswordAuth: &rpservice.PasswordAuthConfig{ Enabled: true, Password: "hashed-password", }, }, } - updated := &reverseproxy.Service{ - Auth: reverseproxy.AuthConfig{ - PasswordAuth: &reverseproxy.PasswordAuthConfig{ + updated := &rpservice.Service{ + Auth: rpservice.AuthConfig{ + PasswordAuth: &rpservice.PasswordAuthConfig{ Enabled: true, Password: "", }, @@ -315,18 +315,18 @@ func TestPreserveExistingAuthSecrets(t *testing.T) { }) t.Run("preserve pin when empty", func(t *testing.T) { - existing := &reverseproxy.Service{ - Auth: reverseproxy.AuthConfig{ - PinAuth: &reverseproxy.PINAuthConfig{ + existing := &rpservice.Service{ + Auth: rpservice.AuthConfig{ + PinAuth: &rpservice.PINAuthConfig{ Enabled: true, Pin: "hashed-pin", }, }, } - updated := &reverseproxy.Service{ - Auth: reverseproxy.AuthConfig{ - PinAuth: &reverseproxy.PINAuthConfig{ + updated := &rpservice.Service{ + Auth: rpservice.AuthConfig{ + PinAuth: &rpservice.PINAuthConfig{ Enabled: true, Pin: "", }, @@ -339,18 +339,18 @@ func TestPreserveExistingAuthSecrets(t *testing.T) { }) t.Run("do not preserve when password is provided", func(t *testing.T) { - existing := &reverseproxy.Service{ - Auth: reverseproxy.AuthConfig{ - PasswordAuth: &reverseproxy.PasswordAuthConfig{ + existing := &rpservice.Service{ + Auth: rpservice.AuthConfig{ + PasswordAuth: &rpservice.PasswordAuthConfig{ Enabled: true, Password: "old-password", }, }, } - updated := &reverseproxy.Service{ - Auth: reverseproxy.AuthConfig{ - PasswordAuth: &reverseproxy.PasswordAuthConfig{ + updated := &rpservice.Service{ + Auth: rpservice.AuthConfig{ + PasswordAuth: &rpservice.PasswordAuthConfig{ Enabled: true, Password: "new-password", }, @@ -365,10 +365,10 @@ func TestPreserveExistingAuthSecrets(t *testing.T) { } func TestPreserveServiceMetadata(t *testing.T) { - mgr := &managerImpl{} + mgr := &Manager{} - existing := &reverseproxy.Service{ - Meta: reverseproxy.ServiceMeta{ + existing := &rpservice.Service{ + Meta: rpservice.Meta{ CertificateIssuedAt: func() *time.Time { t := time.Now(); return &t }(), Status: "active", }, @@ -376,7 +376,7 @@ func TestPreserveServiceMetadata(t *testing.T) { SessionPublicKey: "public-key", } - updated := &reverseproxy.Service{ + updated := &rpservice.Service{ Domain: "updated.com", } @@ -400,31 +400,32 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { IP: net.ParseIP("100.64.0.1"), } - newEphemeralService := func() *reverseproxy.Service { - return &reverseproxy.Service{ + newEphemeralService := func() *rpservice.Service { + return &rpservice.Service{ ID: serviceID, AccountID: accountID, Name: "test-service", Domain: "test.example.com", - Source: reverseproxy.SourceEphemeral, + Source: rpservice.SourceEphemeral, SourcePeer: ownerPeerID, } } - newPermanentService := func() *reverseproxy.Service { - return &reverseproxy.Service{ + newPermanentService := func() *rpservice.Service { + return &rpservice.Service{ ID: serviceID, AccountID: accountID, Name: "api-service", Domain: "api.example.com", - Source: reverseproxy.SourcePermanent, + Source: rpservice.SourcePermanent, } } newProxyServer := func(t *testing.T) *nbgrpc.ProxyServiceServer { t.Helper() - tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Hour) - srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil) + tokenStore, err := nbgrpc.NewOneTimeTokenStore(context.Background(), 1*time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) t.Cleanup(srv.Close) return srv } @@ -458,10 +459,14 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID). Return(testPeer, nil) - mgr := &managerImpl{ - store: mockStore, - accountManager: mockAccountMgr, - proxyGRPCServer: newProxyServer(t), + mgr := &Manager{ + store: mockStore, + accountManager: mockAccountMgr, + proxyController: func() proxy.Controller { + c, err := proxymanager.NewGRPCController(newProxyServer(t), noop.NewMeterProvider().Meter("")) + require.NoError(t, err) + return c + }(), } err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceUnexposed) @@ -485,7 +490,7 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { return fn(txMock) }) - mgr := &managerImpl{ + mgr := &Manager{ store: mockStore, } @@ -514,7 +519,7 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { return fn(txMock) }) - mgr := &managerImpl{ + mgr := &Manager{ store: mockStore, } @@ -556,10 +561,14 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID). Return(testPeer, nil) - mgr := &managerImpl{ - store: mockStore, - accountManager: mockAccountMgr, - proxyGRPCServer: newProxyServer(t), + mgr := &Manager{ + store: mockStore, + accountManager: mockAccountMgr, + proxyController: func() proxy.Controller { + c, err := proxymanager.NewGRPCController(newProxyServer(t), noop.NewMeterProvider().Meter("")) + require.NoError(t, err) + return c + }(), } err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceExposeExpired) @@ -596,10 +605,14 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID). Return(testPeer, nil) - mgr := &managerImpl{ - store: mockStore, - accountManager: mockAccountMgr, - proxyGRPCServer: newProxyServer(t), + mgr := &Manager{ + store: mockStore, + accountManager: mockAccountMgr, + proxyController: func() proxy.Controller { + c, err := proxymanager.NewGRPCController(newProxyServer(t), noop.NewMeterProvider().Meter("")) + require.NoError(t, err) + return c + }(), } err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceUnexposed) @@ -612,19 +625,6 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { }) } -// noopExtraSettings is a minimal extra_settings.Manager for tests without external integrations. -type noopExtraSettings struct{} - -func (n *noopExtraSettings) GetExtraSettings(_ context.Context, _ string) (*types.ExtraSettings, error) { - return &types.ExtraSettings{}, nil -} - -func (n *noopExtraSettings) UpdateExtraSettings(_ context.Context, _, _ string, _ *types.ExtraSettings) (bool, error) { - return false, nil -} - -var _ extra_settings.Manager = (*noopExtraSettings)(nil) - // testClusterDeriver is a minimal ClusterDeriver that returns a fixed domain list. type testClusterDeriver struct { domains []string @@ -646,7 +646,7 @@ const ( ) // setupIntegrationTest creates a real SQLite store with seeded test data for integration tests. -func setupIntegrationTest(t *testing.T) (*managerImpl, store.Store) { +func setupIntegrationTest(t *testing.T) (*Manager, store.Store) { t.Helper() ctx := context.Background() @@ -694,30 +694,28 @@ func setupIntegrationTest(t *testing.T) (*managerImpl, store.Store) { require.NoError(t, err) permsMgr := permissions.NewManager(testStore) - usersMgr := users.NewManager(testStore) - settingsMgr := settings.NewManager(testStore, usersMgr, &noopExtraSettings{}, permsMgr, settings.IdpConfig{}) - var storedEvents []activity.Activity accountMgr := &mock_server.MockAccountManager{ - StoreEventFunc: func(_ context.Context, _, _, _ string, activityID activity.ActivityDescriber, _ map[string]any) { - storedEvents = append(storedEvents, activityID.(activity.Activity)) - }, + StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) { return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID) }, } - tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Hour) - proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil) + tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) t.Cleanup(proxySrv.Close) - mgr := &managerImpl{ + proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter("")) + require.NoError(t, err) + + mgr := &Manager{ store: testStore, accountManager: accountMgr, permissionsManager: permsMgr, - settingsManager: settingsMgr, - proxyGRPCServer: proxySrv, + proxyController: proxyController, clusterDeriver: &testClusterDeriver{ domains: []string{"test.netbird.io"}, }, @@ -791,7 +789,7 @@ func Test_validateExposePermission(t *testing.T) { ctrl := gomock.NewController(t) mockStore := store.NewMockStore(ctrl) mockStore.EXPECT().GetAccountSettings(gomock.Any(), gomock.Any(), testAccountID).Return(nil, errors.New("store error")) - mgr := &managerImpl{store: mockStore} + mgr := &Manager{store: mockStore} err := mgr.validateExposePermission(ctx, testAccountID, testPeerID) require.Error(t, err) assert.Contains(t, err.Error(), "get account settings") @@ -804,7 +802,7 @@ func TestCreateServiceFromPeer(t *testing.T) { t.Run("creates service with random domain", func(t *testing.T) { mgr, testStore := setupIntegrationTest(t) - req := &reverseproxy.ExposeServiceRequest{ + req := &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", } @@ -819,7 +817,7 @@ func TestCreateServiceFromPeer(t *testing.T) { persisted, err := testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) require.NoError(t, err) assert.Equal(t, resp.Domain, persisted.Domain) - assert.Equal(t, reverseproxy.SourceEphemeral, persisted.Source, "source should be ephemeral") + assert.Equal(t, rpservice.SourceEphemeral, persisted.Source, "source should be ephemeral") assert.Equal(t, testPeerID, persisted.SourcePeer, "source peer should be set") assert.NotNil(t, persisted.Meta.LastRenewedAt, "last renewed should be set") }) @@ -827,7 +825,7 @@ func TestCreateServiceFromPeer(t *testing.T) { t.Run("creates service with custom domain", func(t *testing.T) { mgr, _ := setupIntegrationTest(t) - req := &reverseproxy.ExposeServiceRequest{ + req := &rpservice.ExposeServiceRequest{ Port: 80, Protocol: "http", Domain: "example.com", @@ -848,7 +846,7 @@ func TestCreateServiceFromPeer(t *testing.T) { err = testStore.SaveAccountSettings(ctx, testAccountID, s) require.NoError(t, err) - req := &reverseproxy.ExposeServiceRequest{ + req := &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", } @@ -861,7 +859,7 @@ func TestCreateServiceFromPeer(t *testing.T) { t.Run("validates request fields", func(t *testing.T) { mgr, _ := setupIntegrationTest(t) - req := &reverseproxy.ExposeServiceRequest{ + req := &rpservice.ExposeServiceRequest{ Port: 0, Protocol: "http", } @@ -875,67 +873,67 @@ func TestCreateServiceFromPeer(t *testing.T) { func TestExposeServiceRequestValidate(t *testing.T) { tests := []struct { name string - req reverseproxy.ExposeServiceRequest + req rpservice.ExposeServiceRequest wantErr string }{ { name: "valid http request", - req: reverseproxy.ExposeServiceRequest{Port: 8080, Protocol: "http"}, + req: rpservice.ExposeServiceRequest{Port: 8080, Protocol: "http"}, wantErr: "", }, { name: "valid https request with pin", - req: reverseproxy.ExposeServiceRequest{Port: 443, Protocol: "https", Pin: "123456"}, + req: rpservice.ExposeServiceRequest{Port: 443, Protocol: "https", Pin: "123456"}, wantErr: "", }, { name: "port zero rejected", - req: reverseproxy.ExposeServiceRequest{Port: 0, Protocol: "http"}, + req: rpservice.ExposeServiceRequest{Port: 0, Protocol: "http"}, wantErr: "port must be between 1 and 65535", }, { name: "negative port rejected", - req: reverseproxy.ExposeServiceRequest{Port: -1, Protocol: "http"}, + req: rpservice.ExposeServiceRequest{Port: -1, Protocol: "http"}, wantErr: "port must be between 1 and 65535", }, { name: "port above 65535 rejected", - req: reverseproxy.ExposeServiceRequest{Port: 65536, Protocol: "http"}, + req: rpservice.ExposeServiceRequest{Port: 65536, Protocol: "http"}, wantErr: "port must be between 1 and 65535", }, { name: "unsupported protocol", - req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "tcp"}, + req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "tcp"}, wantErr: "unsupported protocol", }, { name: "invalid pin format", - req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "abc"}, + req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "abc"}, wantErr: "invalid pin", }, { name: "pin too short", - req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "12345"}, + req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "12345"}, wantErr: "invalid pin", }, { name: "valid 6-digit pin", - req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "000000"}, + req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "000000"}, wantErr: "", }, { name: "empty user group name", - req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", UserGroups: []string{"valid", ""}}, + req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", UserGroups: []string{"valid", ""}}, wantErr: "user group name cannot be empty", }, { name: "invalid name prefix", - req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "INVALID"}, + req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "INVALID"}, wantErr: "invalid name prefix", }, { name: "valid name prefix", - req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "my-service"}, + req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "my-service"}, wantErr: "", }, } @@ -953,7 +951,7 @@ func TestExposeServiceRequestValidate(t *testing.T) { } t.Run("nil receiver", func(t *testing.T) { - var req *reverseproxy.ExposeServiceRequest + var req *rpservice.ExposeServiceRequest err := req.Validate() require.Error(t, err) assert.Contains(t, err.Error(), "request cannot be nil") @@ -967,7 +965,7 @@ func TestDeleteServiceFromPeer_ByDomain(t *testing.T) { mgr, testStore := setupIntegrationTest(t) // First create a service - req := &reverseproxy.ExposeServiceRequest{ + req := &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", } @@ -986,7 +984,7 @@ func TestDeleteServiceFromPeer_ByDomain(t *testing.T) { t.Run("expire uses correct activity", func(t *testing.T) { mgr, _ := setupIntegrationTest(t) - req := &reverseproxy.ExposeServiceRequest{ + req := &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", } @@ -1004,7 +1002,7 @@ func TestStopServiceFromPeer(t *testing.T) { t.Run("stops service by domain", func(t *testing.T) { mgr, testStore := setupIntegrationTest(t) - req := &reverseproxy.ExposeServiceRequest{ + req := &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", } @@ -1023,7 +1021,7 @@ func TestDeleteService_UntracksEphemeralExpose(t *testing.T) { ctx := context.Background() mgr, _ := setupIntegrationTest(t) - resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", }) @@ -1041,7 +1039,7 @@ func TestDeleteService_UntracksEphemeralExpose(t *testing.T) { assert.Equal(t, 0, mgr.exposeTracker.CountPeerExposes(testPeerID), "expose should be untracked after API delete") // A new expose should succeed (not blocked by stale tracking) - _, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + _, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 9090, Protocol: "http", }) @@ -1053,7 +1051,7 @@ func TestDeleteAllServices_UntracksEphemeralExposes(t *testing.T) { mgr, _ := setupIntegrationTest(t) for i := range 3 { - _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080 + i, Protocol: "http", }) @@ -1074,7 +1072,7 @@ func TestRenewServiceFromPeer(t *testing.T) { t.Run("renews tracked expose", func(t *testing.T) { mgr, _ := setupIntegrationTest(t) - resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{ + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", }) @@ -1129,25 +1127,32 @@ func TestDeleteService_DeletesTargets(t *testing.T) { mockPerms := permissions.NewMockManager(ctrl) mockAcct := account.NewMockManager(ctrl) - mockGRPC := &nbgrpc.ProxyServiceServer{} - mgr := &managerImpl{ + tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) + t.Cleanup(proxySrv.Close) + + proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter("")) + require.NoError(t, err) + + mgr := &Manager{ store: sqlStore, permissionsManager: mockPerms, accountManager: mockAcct, - proxyGRPCServer: mockGRPC, + proxyController: proxyController, } - service := &reverseproxy.Service{ + service := &rpservice.Service{ ID: "service-1", AccountID: accountID, Domain: "test.example.com", ProxyCluster: "cluster1", Enabled: true, - Targets: []*reverseproxy.Target{ - {AccountID: accountID, ServiceID: "service-1", TargetType: reverseproxy.TargetTypePeer, TargetId: "peer-1"}, - {AccountID: accountID, ServiceID: "service-1", TargetType: reverseproxy.TargetTypePeer, TargetId: "peer-2"}, - {AccountID: accountID, ServiceID: "service-1", TargetType: reverseproxy.TargetTypePeer, TargetId: "peer-3"}, + Targets: []*rpservice.Target{ + {AccountID: accountID, ServiceID: "service-1", TargetType: rpservice.TargetTypePeer, TargetId: "peer-1"}, + {AccountID: accountID, ServiceID: "service-1", TargetType: rpservice.TargetTypePeer, TargetId: "peer-2"}, + {AccountID: accountID, ServiceID: "service-1", TargetType: rpservice.TargetTypePeer, TargetId: "peer-3"}, }, } diff --git a/management/internals/modules/reverseproxy/reverseproxy.go b/management/internals/modules/reverseproxy/service/service.go similarity index 94% rename from management/internals/modules/reverseproxy/reverseproxy.go rename to management/internals/modules/reverseproxy/service/service.go index 10226710b..46ae185d6 100644 --- a/management/internals/modules/reverseproxy/reverseproxy.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -1,4 +1,4 @@ -package reverseproxy +package service import ( "crypto/rand" @@ -14,6 +14,7 @@ import ( "github.com/rs/xid" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/shared/hash/argon2id" "github.com/netbirdio/netbird/util/crypt" @@ -29,15 +30,15 @@ const ( Delete Operation = "delete" ) -type ProxyStatus string +type Status string const ( - StatusPending ProxyStatus = "pending" - StatusActive ProxyStatus = "active" - StatusTunnelNotCreated ProxyStatus = "tunnel_not_created" - StatusCertificatePending ProxyStatus = "certificate_pending" - StatusCertificateFailed ProxyStatus = "certificate_failed" - StatusError ProxyStatus = "error" + StatusPending Status = "pending" + StatusActive Status = "active" + StatusTunnelNotCreated Status = "tunnel_not_created" + StatusCertificatePending Status = "certificate_pending" + StatusCertificateFailed Status = "certificate_failed" + StatusError Status = "error" TargetTypePeer = "peer" TargetTypeHost = "host" @@ -111,14 +112,7 @@ func (a *AuthConfig) ClearSecrets() { } } -type OIDCValidationConfig struct { - Issuer string - Audiences []string - KeysLocation string - MaxTokenAgeSeconds int64 -} - -type ServiceMeta struct { +type Meta struct { CreatedAt time.Time CertificateIssuedAt *time.Time Status string @@ -135,11 +129,11 @@ type Service struct { Enabled bool PassHostHeader bool RewriteRedirects bool - Auth AuthConfig `gorm:"serializer:json"` - Meta ServiceMeta `gorm:"embedded;embeddedPrefix:meta_"` - SessionPrivateKey string `gorm:"column:session_private_key"` - SessionPublicKey string `gorm:"column:session_public_key"` - Source string `gorm:"default:'permanent'"` + Auth AuthConfig `gorm:"serializer:json"` + Meta Meta `gorm:"embedded;embeddedPrefix:meta_"` + SessionPrivateKey string `gorm:"column:session_private_key"` + SessionPublicKey string `gorm:"column:session_public_key"` + Source string `gorm:"default:'permanent'"` SourcePeer string } @@ -165,7 +159,7 @@ func NewService(accountID, name, domain, proxyCluster string, targets []*Target, // only be called during initial creation, not for updates. func (s *Service) InitNewRecord() { s.ID = xid.New().String() - s.Meta = ServiceMeta{ + s.Meta = Meta{ CreatedAt: time.Now(), Status: string(StatusPending), } @@ -239,7 +233,7 @@ func (s *Service) ToAPIResponse() *api.Service { return resp } -func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConfig OIDCValidationConfig) *proto.ProxyMapping { +func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConfig proxy.OIDCValidationConfig) *proto.ProxyMapping { pathMappings := make([]*proto.PathMapping, 0, len(s.Targets)) for _, target := range s.Targets { if !target.Enabled { diff --git a/management/internals/modules/reverseproxy/reverseproxy_test.go b/management/internals/modules/reverseproxy/service/service_test.go similarity index 98% rename from management/internals/modules/reverseproxy/reverseproxy_test.go rename to management/internals/modules/reverseproxy/service/service_test.go index cb75ee61f..8b09ab827 100644 --- a/management/internals/modules/reverseproxy/reverseproxy_test.go +++ b/management/internals/modules/reverseproxy/service/service_test.go @@ -1,4 +1,4 @@ -package reverseproxy +package service import ( "errors" @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/shared/hash/argon2id" "github.com/netbirdio/netbird/shared/management/proto" ) @@ -109,7 +110,7 @@ func TestIsDefaultPort(t *testing.T) { } func TestToProtoMapping_PortInTargetURL(t *testing.T) { - oidcConfig := OIDCValidationConfig{} + oidcConfig := proxy.OIDCValidationConfig{} tests := []struct { name string @@ -202,7 +203,7 @@ func TestToProtoMapping_DisabledTargetSkipped(t *testing.T) { {TargetId: "peer-2", TargetType: TargetTypePeer, Host: "10.0.0.2", Port: 9090, Protocol: "http", Enabled: true}, }, } - pm := rp.ToProtoMapping(Create, "token", OIDCValidationConfig{}) + pm := rp.ToProtoMapping(Create, "token", proxy.OIDCValidationConfig{}) require.Len(t, pm.Path, 1) assert.Equal(t, "http://10.0.0.2:9090/", pm.Path[0].Target) } @@ -219,7 +220,7 @@ func TestToProtoMapping_OperationTypes(t *testing.T) { } for _, tt := range tests { t.Run(string(tt.op), func(t *testing.T) { - pm := rp.ToProtoMapping(tt.op, "", OIDCValidationConfig{}) + pm := rp.ToProtoMapping(tt.op, "", proxy.OIDCValidationConfig{}) assert.Equal(t, tt.want, pm.Type) }) } diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 45c1b763f..2049f0051 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -94,7 +94,7 @@ func (s *BaseServer) EventStore() activity.Store { func (s *BaseServer) APIHandler() http.Handler { return Create(s, func() http.Handler { - httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ReverseProxyManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies) + httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies) if err != nil { log.Fatalf("failed to create API handler: %v", err) } @@ -134,7 +134,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server { if s.Config.HttpConfig.LetsEncryptDomain != "" { certManager, err := encryption.CreateCertManager(s.Config.Datadir, s.Config.HttpConfig.LetsEncryptDomain) if err != nil { - log.Fatalf("failed to create certificate manager: %v", err) + log.Fatalf("failed to create certificate service: %v", err) } transportCredentials := credentials.NewTLS(certManager.TLSConfig()) gRPCOpts = append(gRPCOpts, grpc.Creds(transportCredentials)) @@ -152,10 +152,10 @@ func (s *BaseServer) GRPCServer() *grpc.Server { if err != nil { log.Fatalf("failed to create management server: %v", err) } - reverseProxyMgr := s.ReverseProxyManager() - srv.SetReverseProxyManager(reverseProxyMgr) - if reverseProxyMgr != nil { - reverseProxyMgr.StartExposeReaper(context.Background()) + serviceMgr := s.ServiceManager() + srv.SetReverseProxyManager(serviceMgr) + if serviceMgr != nil { + serviceMgr.StartExposeReaper(context.Background()) } mgmtProto.RegisterManagementServiceServer(gRPCAPIHandler, srv) @@ -168,9 +168,10 @@ func (s *BaseServer) GRPCServer() *grpc.Server { func (s *BaseServer) ReverseProxyGRPCServer() *nbgrpc.ProxyServiceServer { return Create(s, func() *nbgrpc.ProxyServiceServer { - proxyService := nbgrpc.NewProxyServiceServer(s.AccessLogsManager(), s.ProxyTokenStore(), s.proxyOIDCConfig(), s.PeersManager(), s.UsersManager()) + proxyService := nbgrpc.NewProxyServiceServer(s.AccessLogsManager(), s.ProxyTokenStore(), s.proxyOIDCConfig(), s.PeersManager(), s.UsersManager(), s.ProxyManager()) s.AfterInit(func(s *BaseServer) { - proxyService.SetProxyManager(s.ReverseProxyManager()) + proxyService.SetServiceManager(s.ServiceManager()) + proxyService.SetProxyController(s.ServiceProxyController()) }) return proxyService }) @@ -193,7 +194,10 @@ func (s *BaseServer) proxyOIDCConfig() nbgrpc.ProxyOIDCConfig { func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore { return Create(s, func() *nbgrpc.OneTimeTokenStore { - tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Minute) + tokenStore, err := nbgrpc.NewOneTimeTokenStore(context.Background(), 5*time.Minute, 10*time.Minute, 100) + if err != nil { + log.Fatalf("failed to create proxy token store: %v", err) + } log.Info("One-time token store initialized for proxy authentication") return tokenStore }) diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index 4ea86900a..62ed659c0 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -6,6 +6,8 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/management-integrations/integrations" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager" "github.com/netbirdio/netbird/management/internals/controllers/network_map" nmapcontroller "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" @@ -106,6 +108,16 @@ func (s *BaseServer) NetworkMapController() network_map.Controller { }) } +func (s *BaseServer) ServiceProxyController() proxy.Controller { + return Create(s, func() proxy.Controller { + controller, err := proxymanager.NewGRPCController(s.ReverseProxyGRPCServer(), s.Metrics().GetMeter()) + if err != nil { + log.Fatalf("failed to create service proxy controller: %v", err) + } + return controller + }) +} + func (s *BaseServer) AccountRequestBuffer() *server.AccountRequestBuffer { return Create(s, func() *server.AccountRequestBuffer { return server.NewAccountRequestBuffer(context.Background(), s.Store()) diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index faec5b99c..2383019e2 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -8,9 +8,11 @@ import ( "github.com/netbirdio/management-integrations/integrations" "github.com/netbirdio/netbird/management/internals/modules/peers" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" - nbreverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" + nbreverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager" "github.com/netbirdio/netbird/management/internals/modules/zones" zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" "github.com/netbirdio/netbird/management/internals/modules/zones/records" @@ -99,11 +101,11 @@ func (s *BaseServer) AccountManager() account.Manager { return Create(s, func() account.Manager { accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.JobManager(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy) if err != nil { - log.Fatalf("failed to create account manager: %v", err) + log.Fatalf("failed to create account service: %v", err) } s.AfterInit(func(s *BaseServer) { - accountManager.SetServiceManager(s.ReverseProxyManager()) + accountManager.SetServiceManager(s.ServiceManager()) }) return accountManager @@ -114,28 +116,28 @@ func (s *BaseServer) IdpManager() idp.Manager { return Create(s, func() idp.Manager { var idpManager idp.Manager var err error - // Use embedded IdP manager if embedded Dex is configured and enabled. + // Use embedded IdP service if embedded Dex is configured and enabled. // Legacy IdpManager won't be used anymore even if configured. if s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled { idpManager, err = idp.NewEmbeddedIdPManager(context.Background(), s.Config.EmbeddedIdP, s.Metrics()) if err != nil { - log.Fatalf("failed to create embedded IDP manager: %v", err) + log.Fatalf("failed to create embedded IDP service: %v", err) } return idpManager } - // Fall back to external IdP manager + // Fall back to external IdP service if s.Config.IdpManagerConfig != nil { idpManager, err = idp.NewManager(context.Background(), *s.Config.IdpManagerConfig, s.Metrics()) if err != nil { - log.Fatalf("failed to create IDP manager: %v", err) + log.Fatalf("failed to create IDP service: %v", err) } } return idpManager }) } -// OAuthConfigProvider is only relevant when we have an embedded IdP manager. Otherwise must be nil +// OAuthConfigProvider is only relevant when we have an embedded IdP service. Otherwise must be nil func (s *BaseServer) OAuthConfigProvider() idp.OAuthConfigProvider { if s.Config.EmbeddedIdP == nil || !s.Config.EmbeddedIdP.Enabled { return nil @@ -162,7 +164,7 @@ func (s *BaseServer) GroupsManager() groups.Manager { func (s *BaseServer) ResourcesManager() resources.Manager { return Create(s, func() resources.Manager { - return resources.NewManager(s.Store(), s.PermissionsManager(), s.GroupsManager(), s.AccountManager(), s.ReverseProxyManager()) + return resources.NewManager(s.Store(), s.PermissionsManager(), s.GroupsManager(), s.AccountManager(), s.ServiceManager()) }) } @@ -190,15 +192,25 @@ func (s *BaseServer) RecordsManager() records.Manager { }) } -func (s *BaseServer) ReverseProxyManager() reverseproxy.Manager { - return Create(s, func() reverseproxy.Manager { - return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.SettingsManager(), s.ReverseProxyGRPCServer(), s.ReverseProxyDomainManager()) +func (s *BaseServer) ServiceManager() service.Manager { + return Create(s, func() service.Manager { + return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ServiceProxyController(), s.ReverseProxyDomainManager()) + }) +} + +func (s *BaseServer) ProxyManager() proxy.Manager { + return Create(s, func() proxy.Manager { + manager, err := proxymanager.NewManager(s.Store(), s.Metrics().GetMeter()) + if err != nil { + log.Fatalf("failed to create proxy manager: %v", err) + } + return manager }) } func (s *BaseServer) ReverseProxyDomainManager() *manager.Manager { return Create(s, func() *manager.Manager { - m := manager.NewManager(s.Store(), s.ReverseProxyGRPCServer(), s.PermissionsManager()) + m := manager.NewManager(s.Store(), s.ProxyManager(), s.PermissionsManager()) return &m }) } diff --git a/management/internals/server/server.go b/management/internals/server/server.go index 3f7f9c4c0..5149c338b 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -157,7 +157,7 @@ func (s *BaseServer) Start(ctx context.Context) error { // Eagerly create the gRPC server so that all AfterInit hooks are registered // before we iterate them. Lazy creation after the loop would miss hooks - // registered during GRPCServer() construction (e.g., SetProxyManager). + // registered during GRPCServer() construction (e.g., SetServiceManager). s.GRPCServer() for _, fn := range s.afterInit { diff --git a/management/internals/shared/grpc/expose_service.go b/management/internals/shared/grpc/expose_service.go index ef00354af..c444471b0 100644 --- a/management/internals/shared/grpc/expose_service.go +++ b/management/internals/shared/grpc/expose_service.go @@ -10,7 +10,7 @@ import ( "google.golang.org/grpc/status" "github.com/netbirdio/netbird/encryption" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbContext "github.com/netbirdio/netbird/management/server/context" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/store" @@ -39,7 +39,7 @@ func (s *Server) CreateExpose(ctx context.Context, req *proto.EncryptedMessage) return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") } - created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, &reverseproxy.ExposeServiceRequest{ + created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, &rpservice.ExposeServiceRequest{ NamePrefix: exposeReq.NamePrefix, Port: int(exposeReq.Port), Protocol: exposeProtocolToString(exposeReq.Protocol), @@ -167,14 +167,14 @@ func (s *Server) authenticateExposePeer(ctx context.Context, peerKey wgtypes.Key return accountID, peer, nil } -func (s *Server) getReverseProxyManager() reverseproxy.Manager { +func (s *Server) getReverseProxyManager() rpservice.Manager { s.reverseProxyMu.RLock() defer s.reverseProxyMu.RUnlock() return s.reverseProxyManager } // SetReverseProxyManager sets the reverse proxy manager on the server. -func (s *Server) SetReverseProxyManager(mgr reverseproxy.Manager) { +func (s *Server) SetReverseProxyManager(mgr rpservice.Manager) { s.reverseProxyMu.Lock() defer s.reverseProxyMu.Unlock() s.reverseProxyManager = mgr diff --git a/management/internals/shared/grpc/onetime_token.go b/management/internals/shared/grpc/onetime_token.go index dcc37c639..7999407db 100644 --- a/management/internals/shared/grpc/onetime_token.go +++ b/management/internals/shared/grpc/onetime_token.go @@ -1,28 +1,23 @@ package grpc import ( + "context" "crypto/rand" + "crypto/sha256" "crypto/subtle" "encoding/base64" + "encoding/hex" + "encoding/json" "fmt" - "sync" "time" + "github.com/eko/gocache/lib/v4/cache" + "github.com/eko/gocache/lib/v4/store" log "github.com/sirupsen/logrus" + + nbcache "github.com/netbirdio/netbird/management/server/cache" ) -// OneTimeTokenStore manages short-lived, single-use authentication tokens -// for proxy-to-management RPC authentication. Tokens are generated when -// a service is created and must be used exactly once by the proxy -// to authenticate a subsequent RPC call. -type OneTimeTokenStore struct { - tokens map[string]*tokenMetadata - mu sync.RWMutex - cleanup *time.Ticker - cleanupDone chan struct{} -} - -// tokenMetadata stores information about a one-time token type tokenMetadata struct { ServiceID string AccountID string @@ -30,20 +25,24 @@ type tokenMetadata struct { CreatedAt time.Time } -// NewOneTimeTokenStore creates a new token store with automatic cleanup -// of expired tokens. The cleanupInterval determines how often expired -// tokens are removed from memory. -func NewOneTimeTokenStore(cleanupInterval time.Duration) *OneTimeTokenStore { - store := &OneTimeTokenStore{ - tokens: make(map[string]*tokenMetadata), - cleanup: time.NewTicker(cleanupInterval), - cleanupDone: make(chan struct{}), +// OneTimeTokenStore manages single-use authentication tokens for proxy-to-management RPC. +// Supports both in-memory and Redis storage via NB_IDP_CACHE_REDIS_ADDRESS env var. +type OneTimeTokenStore struct { + cache *cache.Cache[string] + ctx context.Context +} + +// NewOneTimeTokenStore creates a token store with automatic backend selection +func NewOneTimeTokenStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, maxConn int) (*OneTimeTokenStore, error) { + cacheStore, err := nbcache.NewStore(ctx, maxTimeout, cleanupInterval, maxConn) + if err != nil { + return nil, fmt.Errorf("failed to create cache store: %w", err) } - // Start background cleanup goroutine - go store.cleanupExpired() - - return store + return &OneTimeTokenStore{ + cache: cache.New[string](cacheStore), + ctx: ctx, + }, nil } // GenerateToken creates a new cryptographically secure one-time token @@ -52,25 +51,30 @@ func NewOneTimeTokenStore(cleanupInterval time.Duration) *OneTimeTokenStore { // // Returns the generated token string or an error if random generation fails. func (s *OneTimeTokenStore) GenerateToken(accountID, serviceID string, ttl time.Duration) (string, error) { - // Generate 32 bytes (256 bits) of cryptographically secure random data randomBytes := make([]byte, 32) if _, err := rand.Read(randomBytes); err != nil { return "", fmt.Errorf("failed to generate random token: %w", err) } - // Encode as URL-safe base64 for easy transmission in gRPC token := base64.URLEncoding.EncodeToString(randomBytes) + hashedToken := hashToken(token) - s.mu.Lock() - defer s.mu.Unlock() - - s.tokens[token] = &tokenMetadata{ + metadata := &tokenMetadata{ ServiceID: serviceID, AccountID: accountID, ExpiresAt: time.Now().Add(ttl), CreatedAt: time.Now(), } + metadataJSON, err := json.Marshal(metadata) + if err != nil { + return "", fmt.Errorf("failed to serialize token metadata: %w", err) + } + + if err := s.cache.Set(s.ctx, hashedToken, string(metadataJSON), store.WithExpiration(ttl)); err != nil { + return "", fmt.Errorf("failed to store token: %w", err) + } + log.Debugf("Generated one-time token for proxy %s in account %s (expires in %s)", serviceID, accountID, ttl) @@ -88,80 +92,45 @@ func (s *OneTimeTokenStore) GenerateToken(accountID, serviceID string, ttl time. // - Account ID doesn't match // - Reverse proxy ID doesn't match func (s *OneTimeTokenStore) ValidateAndConsume(token, accountID, serviceID string) error { - s.mu.Lock() - defer s.mu.Unlock() + hashedToken := hashToken(token) - metadata, exists := s.tokens[token] - if !exists { - log.Warnf("Token validation failed: token not found (proxy: %s, account: %s)", - serviceID, accountID) + metadataJSON, err := s.cache.Get(s.ctx, hashedToken) + if err != nil { + log.Warnf("Token validation failed: token not found (proxy: %s, account: %s)", serviceID, accountID) return fmt.Errorf("invalid token") } - // Check expiration + metadata := &tokenMetadata{} + if err := json.Unmarshal([]byte(metadataJSON), metadata); err != nil { + log.Warnf("Token validation failed: failed to unmarshal metadata (proxy: %s, account: %s): %v", serviceID, accountID, err) + return fmt.Errorf("invalid token metadata") + } + if time.Now().After(metadata.ExpiresAt) { - delete(s.tokens, token) - log.Warnf("Token validation failed: token expired (proxy: %s, account: %s)", - serviceID, accountID) + log.Warnf("Token validation failed: token expired (proxy: %s, account: %s)", serviceID, accountID) return fmt.Errorf("token expired") } - // Validate account ID using constant-time comparison (prevents timing attacks) if subtle.ConstantTimeCompare([]byte(metadata.AccountID), []byte(accountID)) != 1 { - log.Warnf("Token validation failed: account ID mismatch (expected: %s, got: %s)", - metadata.AccountID, accountID) + log.Warnf("Token validation failed: account ID mismatch (expected: %s, got: %s)", metadata.AccountID, accountID) return fmt.Errorf("account ID mismatch") } - // Validate service ID using constant-time comparison if subtle.ConstantTimeCompare([]byte(metadata.ServiceID), []byte(serviceID)) != 1 { - log.Warnf("Token validation failed: service ID mismatch (expected: %s, got: %s)", - metadata.ServiceID, serviceID) + log.Warnf("Token validation failed: service ID mismatch (expected: %s, got: %s)", metadata.ServiceID, serviceID) return fmt.Errorf("service ID mismatch") } - // Delete token immediately to enforce single-use - delete(s.tokens, token) + if err := s.cache.Delete(s.ctx, hashedToken); err != nil { + log.Warnf("Token deletion warning (proxy: %s, account: %s): %v", serviceID, accountID, err) + } - log.Infof("Token validated and consumed for proxy %s in account %s", - serviceID, accountID) + log.Infof("Token validated and consumed for proxy %s in account %s", serviceID, accountID) return nil } -// cleanupExpired removes expired tokens in the background to prevent memory leaks -func (s *OneTimeTokenStore) cleanupExpired() { - for { - select { - case <-s.cleanup.C: - s.mu.Lock() - now := time.Now() - removed := 0 - for token, metadata := range s.tokens { - if now.After(metadata.ExpiresAt) { - delete(s.tokens, token) - removed++ - } - } - if removed > 0 { - log.Debugf("Cleaned up %d expired one-time tokens", removed) - } - s.mu.Unlock() - case <-s.cleanupDone: - return - } - } -} - -// Close stops the cleanup goroutine and releases resources -func (s *OneTimeTokenStore) Close() { - s.cleanup.Stop() - close(s.cleanupDone) -} - -// GetTokenCount returns the current number of tokens in the store (for debugging/metrics) -func (s *OneTimeTokenStore) GetTokenCount() int { - s.mu.RLock() - defer s.mu.RUnlock() - return len(s.tokens) +func hashToken(token string) string { + hash := sha256.Sum256([]byte(token)) + return hex.EncodeToString(hash[:]) } diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index e47ea5315..676757c1e 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -24,8 +24,9 @@ import ( "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/management/internals/modules/peers" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/management/server/users" @@ -58,14 +59,17 @@ type ProxyServiceServer struct { // Map of connected proxies: proxy_id -> proxy connection connectedProxies sync.Map - // Map of cluster address -> set of proxy IDs - clusterProxies sync.Map - // Manager for access logs accessLogManager accesslogs.Manager // Manager for reverse proxy operations - reverseProxyManager reverseproxy.Manager + serviceManager rpservice.Manager + + // ProxyController for service updates and cluster management + proxyController proxy.Controller + + // Manager for proxy connections + proxyManager proxy.Manager // Manager for peers peersManager peers.Manager @@ -104,7 +108,7 @@ type proxyConnection struct { } // NewProxyServiceServer creates a new proxy service server. -func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager) *ProxyServiceServer { +func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager, proxyMgr proxy.Manager) *ProxyServiceServer { ctx, cancel := context.WithCancel(context.Background()) s := &ProxyServiceServer{ accessLogManager: accessLogMgr, @@ -112,9 +116,11 @@ func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeT tokenStore: tokenStore, peersManager: peersManager, usersManager: usersManager, + proxyManager: proxyMgr, pkceCleanupCancel: cancel, } go s.cleanupPKCEVerifiers(ctx) + go s.cleanupStaleProxies(ctx) return s } @@ -138,13 +144,33 @@ func (s *ProxyServiceServer) cleanupPKCEVerifiers(ctx context.Context) { } } +// cleanupStaleProxies periodically removes proxies that haven't sent heartbeat in 10 minutes +func (s *ProxyServiceServer) cleanupStaleProxies(ctx context.Context) { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if err := s.proxyManager.CleanupStale(ctx, 10*time.Minute); err != nil { + log.WithContext(ctx).Debugf("Failed to cleanup stale proxies: %v", err) + } + } + } +} + // Close stops background goroutines. func (s *ProxyServiceServer) Close() { s.pkceCleanupCancel() } -func (s *ProxyServiceServer) SetProxyManager(manager reverseproxy.Manager) { - s.reverseProxyManager = manager +func (s *ProxyServiceServer) SetServiceManager(manager rpservice.Manager) { + s.serviceManager = manager +} + +func (s *ProxyServiceServer) SetProxyController(proxyController proxy.Controller) { + s.proxyController = proxyController } // GetMappingUpdate handles the control stream with proxy clients @@ -179,7 +205,15 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest } s.connectedProxies.Store(proxyID, conn) - s.addToCluster(conn.address, proxyID) + if err := s.proxyController.RegisterProxyToCluster(ctx, conn.address, proxyID); err != nil { + log.WithContext(ctx).Warnf("Failed to register proxy %s in cluster: %v", proxyID, err) + } + + // Register proxy in database + if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo); err != nil { + log.WithContext(ctx).Warnf("Failed to register proxy %s in database: %v", proxyID, err) + } + log.WithFields(log.Fields{ "proxy_id": proxyID, "address": proxyAddress, @@ -187,8 +221,15 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest "total_proxies": len(s.GetConnectedProxies()), }).Info("Proxy registered in cluster") defer func() { + if err := s.proxyManager.Disconnect(context.Background(), proxyID); err != nil { + log.Warnf("Failed to mark proxy %s as disconnected: %v", proxyID, err) + } + s.connectedProxies.Delete(proxyID) - s.removeFromCluster(conn.address, proxyID) + if err := s.proxyController.UnregisterProxyFromCluster(context.Background(), conn.address, proxyID); err != nil { + log.Warnf("Failed to unregister proxy %s from cluster: %v", proxyID, err) + } + cancel() log.Infof("Proxy %s disconnected", proxyID) }() @@ -200,6 +241,9 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest errChan := make(chan error, 2) go s.sender(conn, errChan) + // Start heartbeat goroutine + go s.heartbeat(connCtx, proxyID) + select { case err := <-errChan: return fmt.Errorf("send update to proxy %s: %w", proxyID, err) @@ -208,10 +252,27 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest } } +// heartbeat updates the proxy's last_seen timestamp every minute +func (s *ProxyServiceServer) heartbeat(ctx context.Context, proxyID string) { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := s.proxyManager.Heartbeat(ctx, proxyID); err != nil { + log.WithContext(ctx).Debugf("Failed to update proxy %s heartbeat: %v", proxyID, err) + } + case <-ctx.Done(): + return + } + } +} + // sendSnapshot sends the initial snapshot of services to the connecting proxy. // Only services matching the proxy's cluster address are sent. func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnection) error { - services, err := s.reverseProxyManager.GetGlobalServices(ctx) + services, err := s.serviceManager.GetGlobalServices(ctx) if err != nil { return fmt.Errorf("get services from store: %w", err) } @@ -220,7 +281,7 @@ func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnec return fmt.Errorf("proxy address is invalid") } - var filtered []*reverseproxy.Service + var filtered []*rpservice.Service for _, service := range services { if !service.Enabled { continue @@ -255,7 +316,7 @@ func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnec if err := conn.stream.Send(&proto.GetMappingUpdateResponse{ Mapping: []*proto.ProxyMapping{ service.ToProtoMapping( - reverseproxy.Create, // Initial snapshot, all records are "new" for the proxy. + rpservice.Create, // Initial snapshot, all records are "new" for the proxy. token, s.GetOIDCValidationConfig(), ), @@ -389,61 +450,47 @@ func (s *ProxyServiceServer) GetConnectedProxyURLs() []string { return urls } -// addToCluster registers a proxy in a cluster. -func (s *ProxyServiceServer) addToCluster(clusterAddr, proxyID string) { - if clusterAddr == "" { - return - } - proxySet, _ := s.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{}) - proxySet.(*sync.Map).Store(proxyID, struct{}{}) - log.Debugf("Added proxy %s to cluster %s", proxyID, clusterAddr) -} - -// removeFromCluster removes a proxy from a cluster. -func (s *ProxyServiceServer) removeFromCluster(clusterAddr, proxyID string) { - if clusterAddr == "" { - return - } - if proxySet, ok := s.clusterProxies.Load(clusterAddr); ok { - proxySet.(*sync.Map).Delete(proxyID) - log.Debugf("Removed proxy %s from cluster %s", proxyID, clusterAddr) - } -} - // SendServiceUpdateToCluster sends a service update to all proxy servers in a specific cluster. // If clusterAddr is empty, broadcasts to all connected proxy servers (backward compatibility). // For create/update operations a unique one-time auth token is generated per // proxy so that every replica can independently authenticate with management. -func (s *ProxyServiceServer) SendServiceUpdateToCluster(update *proto.GetMappingUpdateResponse, clusterAddr string) { +func (s *ProxyServiceServer) SendServiceUpdateToCluster(ctx context.Context, update *proto.ProxyMapping, clusterAddr string) { + updateResponse := &proto.GetMappingUpdateResponse{ + Mapping: []*proto.ProxyMapping{update}, + } + if clusterAddr == "" { - s.SendServiceUpdate(update) + s.SendServiceUpdate(updateResponse) return } - proxySet, ok := s.clusterProxies.Load(clusterAddr) - if !ok { - log.Debugf("No proxies connected for cluster %s", clusterAddr) + if s.proxyController == nil { + log.WithContext(ctx).Debugf("ProxyController not set, cannot send to cluster %s", clusterAddr) + return + } + + proxyIDs := s.proxyController.GetProxiesForCluster(clusterAddr) + if len(proxyIDs) == 0 { + log.WithContext(ctx).Debugf("No proxies connected for cluster %s", clusterAddr) return } log.Debugf("Sending service update to cluster %s", clusterAddr) - proxySet.(*sync.Map).Range(func(key, _ interface{}) bool { - proxyID := key.(string) + for _, proxyID := range proxyIDs { if connVal, ok := s.connectedProxies.Load(proxyID); ok { conn := connVal.(*proxyConnection) - msg := s.perProxyMessage(update, proxyID) + msg := s.perProxyMessage(updateResponse, proxyID) if msg == nil { - return true + continue } select { case conn.sendChan <- msg: - log.Debugf("Sent service update to proxy %s in cluster %s", proxyID, clusterAddr) + log.WithContext(ctx).Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr) default: - log.Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) + log.WithContext(ctx).Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) } } - return true - }) + } } // perProxyMessage returns a copy of update with a fresh one-time token for @@ -490,35 +537,8 @@ func shallowCloneMapping(m *proto.ProxyMapping) *proto.ProxyMapping { } } -// GetAvailableClusters returns information about all connected proxy clusters. -func (s *ProxyServiceServer) GetAvailableClusters() []ClusterInfo { - clusterCounts := make(map[string]int) - s.clusterProxies.Range(func(key, value interface{}) bool { - clusterAddr := key.(string) - proxySet := value.(*sync.Map) - count := 0 - proxySet.Range(func(_, _ interface{}) bool { - count++ - return true - }) - if count > 0 { - clusterCounts[clusterAddr] = count - } - return true - }) - - clusters := make([]ClusterInfo, 0, len(clusterCounts)) - for addr, count := range clusterCounts { - clusters = append(clusters, ClusterInfo{ - Address: addr, - ConnectedProxies: count, - }) - } - return clusters -} - func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { - service, err := s.reverseProxyManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId()) + service, err := s.serviceManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId()) if err != nil { log.WithContext(ctx).Debugf("failed to get service from store: %v", err) return nil, status.Errorf(codes.FailedPrecondition, "get service from store: %v", err) @@ -537,7 +557,7 @@ func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.Authen }, nil } -func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto.AuthenticateRequest, service *reverseproxy.Service) (bool, string, proxyauth.Method) { +func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto.AuthenticateRequest, service *rpservice.Service) (bool, string, proxyauth.Method) { switch v := req.GetRequest().(type) { case *proto.AuthenticateRequest_Pin: return s.authenticatePIN(ctx, req.GetId(), v, service.Auth.PinAuth) @@ -548,7 +568,7 @@ func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto } } -func (s *ProxyServiceServer) authenticatePIN(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Pin, auth *reverseproxy.PINAuthConfig) (bool, string, proxyauth.Method) { +func (s *ProxyServiceServer) authenticatePIN(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Pin, auth *rpservice.PINAuthConfig) (bool, string, proxyauth.Method) { if auth == nil || !auth.Enabled { log.WithContext(ctx).Debugf("PIN authentication attempted but not enabled for service %s", serviceID) return false, "", "" @@ -562,7 +582,7 @@ func (s *ProxyServiceServer) authenticatePIN(ctx context.Context, serviceID stri return true, "pin-user", proxyauth.MethodPIN } -func (s *ProxyServiceServer) authenticatePassword(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Password, auth *reverseproxy.PasswordAuthConfig) (bool, string, proxyauth.Method) { +func (s *ProxyServiceServer) authenticatePassword(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Password, auth *rpservice.PasswordAuthConfig) (bool, string, proxyauth.Method) { if auth == nil || !auth.Enabled { log.WithContext(ctx).Debugf("password authentication attempted but not enabled for service %s", serviceID) return false, "", "" @@ -584,7 +604,7 @@ func (s *ProxyServiceServer) logAuthenticationError(ctx context.Context, err err } } -func (s *ProxyServiceServer) generateSessionToken(ctx context.Context, authenticated bool, service *reverseproxy.Service, userId string, method proxyauth.Method) (string, error) { +func (s *ProxyServiceServer) generateSessionToken(ctx context.Context, authenticated bool, service *rpservice.Service, userId string, method proxyauth.Method) (string, error) { if !authenticated || service.SessionPrivateKey == "" { return "", nil } @@ -624,7 +644,7 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se } if certificateIssued { - if err := s.reverseProxyManager.SetCertificateIssuedAt(ctx, accountID, serviceID); err != nil { + if err := s.serviceManager.SetCertificateIssuedAt(ctx, accountID, serviceID); err != nil { log.WithContext(ctx).WithError(err).Error("failed to set certificate issued timestamp") return nil, status.Errorf(codes.Internal, "update certificate timestamp: %v", err) } @@ -636,7 +656,7 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se internalStatus := protoStatusToInternal(protoStatus) - if err := s.reverseProxyManager.SetStatus(ctx, accountID, serviceID, internalStatus); err != nil { + if err := s.serviceManager.SetStatus(ctx, accountID, serviceID, internalStatus); err != nil { log.WithContext(ctx).WithError(err).Error("failed to update service status") return nil, status.Errorf(codes.Internal, "update service status: %v", err) } @@ -651,22 +671,22 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se } // protoStatusToInternal maps proto status to internal status -func protoStatusToInternal(protoStatus proto.ProxyStatus) reverseproxy.ProxyStatus { +func protoStatusToInternal(protoStatus proto.ProxyStatus) rpservice.Status { switch protoStatus { case proto.ProxyStatus_PROXY_STATUS_PENDING: - return reverseproxy.StatusPending + return rpservice.StatusPending case proto.ProxyStatus_PROXY_STATUS_ACTIVE: - return reverseproxy.StatusActive + return rpservice.StatusActive case proto.ProxyStatus_PROXY_STATUS_TUNNEL_NOT_CREATED: - return reverseproxy.StatusTunnelNotCreated + return rpservice.StatusTunnelNotCreated case proto.ProxyStatus_PROXY_STATUS_CERTIFICATE_PENDING: - return reverseproxy.StatusCertificatePending + return rpservice.StatusCertificatePending case proto.ProxyStatus_PROXY_STATUS_CERTIFICATE_FAILED: - return reverseproxy.StatusCertificateFailed + return rpservice.StatusCertificateFailed case proto.ProxyStatus_PROXY_STATUS_ERROR: - return reverseproxy.StatusError + return rpservice.StatusError default: - return reverseproxy.StatusError + return rpservice.StatusError } } @@ -731,7 +751,7 @@ func (s *ProxyServiceServer) GetOIDCURL(ctx context.Context, req *proto.GetOIDCU return nil, status.Errorf(codes.InvalidArgument, "parse redirect url: %v", err) } // Validate redirectURL against known service endpoints to avoid abuse of OIDC redirection. - services, err := s.reverseProxyManager.GetAccountServices(ctx, req.GetAccountId()) + services, err := s.serviceManager.GetAccountServices(ctx, req.GetAccountId()) if err != nil { log.WithContext(ctx).Errorf("failed to get account services: %v", err) return nil, status.Errorf(codes.FailedPrecondition, "get account services: %v", err) @@ -794,8 +814,8 @@ func (s *ProxyServiceServer) GetOIDCConfig() ProxyOIDCConfig { // GetOIDCValidationConfig returns the OIDC configuration for token validation // in the format needed by ToProtoMapping. -func (s *ProxyServiceServer) GetOIDCValidationConfig() reverseproxy.OIDCValidationConfig { - return reverseproxy.OIDCValidationConfig{ +func (s *ProxyServiceServer) GetOIDCValidationConfig() proxy.OIDCValidationConfig { + return proxy.OIDCValidationConfig{ Issuer: s.oidcConfig.Issuer, Audiences: []string{s.oidcConfig.Audience}, KeysLocation: s.oidcConfig.KeysLocation, @@ -854,12 +874,12 @@ func (s *ProxyServiceServer) ValidateState(state string) (verifier, redirectURL // GenerateSessionToken creates a signed session JWT for the given domain and user. func (s *ProxyServiceServer) GenerateSessionToken(ctx context.Context, domain, userID string, method proxyauth.Method) (string, error) { // Find the service by domain to get its signing key - services, err := s.reverseProxyManager.GetGlobalServices(ctx) + services, err := s.serviceManager.GetGlobalServices(ctx) if err != nil { return "", fmt.Errorf("get services: %w", err) } - var service *reverseproxy.Service + var service *rpservice.Service for _, svc := range services { if svc.Domain == domain { service = svc @@ -925,8 +945,8 @@ func (s *ProxyServiceServer) ValidateUserGroupAccess(ctx context.Context, domain return fmt.Errorf("user %s not in allowed groups for domain %s", user.Id, domain) } -func (s *ProxyServiceServer) getAccountServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) { - services, err := s.reverseProxyManager.GetAccountServices(ctx, accountID) +func (s *ProxyServiceServer) getAccountServiceByDomain(ctx context.Context, accountID, domain string) (*rpservice.Service, error) { + services, err := s.serviceManager.GetAccountServices(ctx, accountID) if err != nil { return nil, fmt.Errorf("get account services: %w", err) } @@ -1047,8 +1067,8 @@ func (s *ProxyServiceServer) ValidateSession(ctx context.Context, req *proto.Val }, nil } -func (s *ProxyServiceServer) getServiceByDomain(ctx context.Context, domain string) (*reverseproxy.Service, error) { - services, err := s.reverseProxyManager.GetGlobalServices(ctx) +func (s *ProxyServiceServer) getServiceByDomain(ctx context.Context, domain string) (*rpservice.Service, error) { + services, err := s.serviceManager.GetGlobalServices(ctx) if err != nil { return nil, fmt.Errorf("get services: %w", err) } @@ -1062,7 +1082,7 @@ func (s *ProxyServiceServer) getServiceByDomain(ctx context.Context, domain stri return nil, fmt.Errorf("service not found for domain: %s", domain) } -func (s *ProxyServiceServer) checkGroupAccess(service *reverseproxy.Service, user *types.User) error { +func (s *ProxyServiceServer) checkGroupAccess(service *rpservice.Service, user *types.User) error { if service.Auth.BearerAuth == nil || !service.Auth.BearerAuth.Enabled { return nil } diff --git a/management/internals/shared/grpc/proxy_group_access_test.go b/management/internals/shared/grpc/proxy_group_access_test.go index 827897981..22fe4506b 100644 --- a/management/internals/shared/grpc/proxy_group_access_test.go +++ b/management/internals/shared/grpc/proxy_group_access_test.go @@ -8,12 +8,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/server/types" ) type mockReverseProxyManager struct { - proxiesByAccount map[string][]*reverseproxy.Service + proxiesByAccount map[string][]*service.Service err error } @@ -21,31 +21,31 @@ func (m *mockReverseProxyManager) DeleteAllServices(ctx context.Context, account return nil } -func (m *mockReverseProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { +func (m *mockReverseProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) { if m.err != nil { return nil, m.err } return m.proxiesByAccount[accountID], nil } -func (m *mockReverseProxyManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { +func (m *mockReverseProxyManager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) { return nil, nil } -func (m *mockReverseProxyManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { - return []*reverseproxy.Service{}, nil +func (m *mockReverseProxyManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*service.Service, error) { + return []*service.Service{}, nil } -func (m *mockReverseProxyManager) GetService(ctx context.Context, accountID, userID, reverseProxyID string) (*reverseproxy.Service, error) { - return &reverseproxy.Service{}, nil +func (m *mockReverseProxyManager) GetService(ctx context.Context, accountID, userID, reverseProxyID string) (*service.Service, error) { + return &service.Service{}, nil } -func (m *mockReverseProxyManager) CreateService(ctx context.Context, accountID, userID string, rp *reverseproxy.Service) (*reverseproxy.Service, error) { - return &reverseproxy.Service{}, nil +func (m *mockReverseProxyManager) CreateService(ctx context.Context, accountID, userID string, rp *service.Service) (*service.Service, error) { + return &service.Service{}, nil } -func (m *mockReverseProxyManager) UpdateService(ctx context.Context, accountID, userID string, rp *reverseproxy.Service) (*reverseproxy.Service, error) { - return &reverseproxy.Service{}, nil +func (m *mockReverseProxyManager) UpdateService(ctx context.Context, accountID, userID string, rp *service.Service) (*service.Service, error) { + return &service.Service{}, nil } func (m *mockReverseProxyManager) DeleteService(ctx context.Context, accountID, userID, reverseProxyID string) error { @@ -56,7 +56,7 @@ func (m *mockReverseProxyManager) SetCertificateIssuedAt(ctx context.Context, ac return nil } -func (m *mockReverseProxyManager) SetStatus(ctx context.Context, accountID, reverseProxyID string, status reverseproxy.ProxyStatus) error { +func (m *mockReverseProxyManager) SetStatus(ctx context.Context, accountID, reverseProxyID string, status service.Status) error { return nil } @@ -68,16 +68,16 @@ func (m *mockReverseProxyManager) ReloadService(ctx context.Context, accountID, return nil } -func (m *mockReverseProxyManager) GetServiceByID(ctx context.Context, accountID, reverseProxyID string) (*reverseproxy.Service, error) { - return &reverseproxy.Service{}, nil +func (m *mockReverseProxyManager) GetServiceByID(ctx context.Context, accountID, reverseProxyID string) (*service.Service, error) { + return &service.Service{}, nil } func (m *mockReverseProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) { return "", nil } -func (m *mockReverseProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { - return &reverseproxy.ExposeServiceResponse{}, nil +func (m *mockReverseProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) { + return &service.ExposeServiceResponse{}, nil } func (m *mockReverseProxyManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { @@ -111,7 +111,7 @@ func TestValidateUserGroupAccess(t *testing.T) { name string domain string userID string - proxiesByAccount map[string][]*reverseproxy.Service + proxiesByAccount map[string][]*service.Service users map[string]*types.User proxyErr error userErr error @@ -122,7 +122,7 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "user not found", domain: "app.example.com", userID: "unknown-user", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": {{Domain: "app.example.com", AccountID: "account1"}}, }, users: map[string]*types.User{}, @@ -133,7 +133,7 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "proxy not found in user's account", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{}, + proxiesByAccount: map[string][]*service.Service{}, users: map[string]*types.User{ "user1": {Id: "user1", AccountID: "account1"}, }, @@ -144,7 +144,7 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "proxy exists in different account - not accessible", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account2": {{Domain: "app.example.com", AccountID: "account2"}}, }, users: map[string]*types.User{ @@ -157,8 +157,8 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "no bearer auth configured - same account allows access", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ - "account1": {{Domain: "app.example.com", AccountID: "account1", Auth: reverseproxy.AuthConfig{}}}, + proxiesByAccount: map[string][]*service.Service{ + "account1": {{Domain: "app.example.com", AccountID: "account1", Auth: service.AuthConfig{}}}, }, users: map[string]*types.User{ "user1": {Id: "user1", AccountID: "account1"}, @@ -169,12 +169,12 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "bearer auth disabled - same account allows access", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": {{ Domain: "app.example.com", AccountID: "account1", - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{Enabled: false}, + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{Enabled: false}, }, }}, }, @@ -187,12 +187,12 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "bearer auth enabled but no groups configured - same account allows access", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": {{ Domain: "app.example.com", AccountID: "account1", - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, DistributionGroups: []string{}, }, @@ -208,12 +208,12 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "user not in allowed groups", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": {{ Domain: "app.example.com", AccountID: "account1", - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, DistributionGroups: []string{"group1", "group2"}, }, @@ -230,12 +230,12 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "user in one of the allowed groups - allow access", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": {{ Domain: "app.example.com", AccountID: "account1", - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, DistributionGroups: []string{"group1", "group2"}, }, @@ -251,12 +251,12 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "user in all allowed groups - allow access", domain: "app.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": {{ Domain: "app.example.com", AccountID: "account1", - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, DistributionGroups: []string{"group1", "group2"}, }, @@ -284,10 +284,10 @@ func TestValidateUserGroupAccess(t *testing.T) { name: "multiple proxies in account - finds correct one", domain: "app2.example.com", userID: "user1", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": { {Domain: "app1.example.com", AccountID: "account1"}, - {Domain: "app2.example.com", AccountID: "account1", Auth: reverseproxy.AuthConfig{}}, + {Domain: "app2.example.com", AccountID: "account1", Auth: service.AuthConfig{}}, {Domain: "app3.example.com", AccountID: "account1"}, }, }, @@ -301,7 +301,7 @@ func TestValidateUserGroupAccess(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { server := &ProxyServiceServer{ - reverseProxyManager: &mockReverseProxyManager{ + serviceManager: &mockReverseProxyManager{ proxiesByAccount: tt.proxiesByAccount, err: tt.proxyErr, }, @@ -328,7 +328,7 @@ func TestGetAccountProxyByDomain(t *testing.T) { name string accountID string domain string - proxiesByAccount map[string][]*reverseproxy.Service + proxiesByAccount map[string][]*service.Service err error expectProxy bool expectErr bool @@ -337,7 +337,7 @@ func TestGetAccountProxyByDomain(t *testing.T) { name: "proxy found", accountID: "account1", domain: "app.example.com", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": { {Domain: "other.example.com", AccountID: "account1"}, {Domain: "app.example.com", AccountID: "account1"}, @@ -350,7 +350,7 @@ func TestGetAccountProxyByDomain(t *testing.T) { name: "proxy not found in account", accountID: "account1", domain: "unknown.example.com", - proxiesByAccount: map[string][]*reverseproxy.Service{ + proxiesByAccount: map[string][]*service.Service{ "account1": {{Domain: "app.example.com", AccountID: "account1"}}, }, expectProxy: false, @@ -360,7 +360,7 @@ func TestGetAccountProxyByDomain(t *testing.T) { name: "empty proxy list for account", accountID: "account1", domain: "app.example.com", - proxiesByAccount: map[string][]*reverseproxy.Service{}, + proxiesByAccount: map[string][]*service.Service{}, expectProxy: false, expectErr: true, }, @@ -378,7 +378,7 @@ func TestGetAccountProxyByDomain(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { server := &ProxyServiceServer{ - reverseProxyManager: &mockReverseProxyManager{ + serviceManager: &mockReverseProxyManager{ proxiesByAccount: tt.proxiesByAccount, err: tt.err, }, diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index de8ca3c84..ddeadac5a 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -1,19 +1,73 @@ package grpc import ( + "context" "crypto/rand" "encoding/base64" "strings" - "sync" "testing" "time" + "sync" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/shared/management/proto" ) +type testProxyController struct { + mu sync.Mutex + clusterProxies map[string]map[string]struct{} +} + +func newTestProxyController() *testProxyController { + return &testProxyController{ + clusterProxies: make(map[string]map[string]struct{}), + } +} + +func (c *testProxyController) SendServiceUpdateToCluster(_ context.Context, _ string, _ *proto.ProxyMapping, _ string) { +} + +func (c *testProxyController) GetOIDCValidationConfig() proxy.OIDCValidationConfig { + return proxy.OIDCValidationConfig{} +} + +func (c *testProxyController) RegisterProxyToCluster(_ context.Context, clusterAddr, proxyID string) error { + c.mu.Lock() + defer c.mu.Unlock() + if _, ok := c.clusterProxies[clusterAddr]; !ok { + c.clusterProxies[clusterAddr] = make(map[string]struct{}) + } + c.clusterProxies[clusterAddr][proxyID] = struct{}{} + return nil +} + +func (c *testProxyController) UnregisterProxyFromCluster(_ context.Context, clusterAddr, proxyID string) error { + c.mu.Lock() + defer c.mu.Unlock() + if proxies, ok := c.clusterProxies[clusterAddr]; ok { + delete(proxies, proxyID) + } + return nil +} + +func (c *testProxyController) GetProxiesForCluster(clusterAddr string) []string { + c.mu.Lock() + defer c.mu.Unlock() + proxies, ok := c.clusterProxies[clusterAddr] + if !ok { + return nil + } + result := make([]string, 0, len(proxies)) + for id := range proxies { + result = append(result, id) + } + return result +} + // registerFakeProxy adds a fake proxy connection to the server's internal maps // and returns the channel where messages will be received. func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan *proto.GetMappingUpdateResponse { @@ -25,8 +79,7 @@ func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan } s.connectedProxies.Store(proxyID, conn) - proxySet, _ := s.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{}) - proxySet.(*sync.Map).Store(proxyID, struct{}{}) + _ = s.proxyController.RegisterProxyToCluster(context.Background(), clusterAddr, proxyID) return ch } @@ -41,12 +94,13 @@ func drainChannel(ch chan *proto.GetMappingUpdateResponse) *proto.GetMappingUpda } func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { - tokenStore := NewOneTimeTokenStore(time.Hour) - defer tokenStore.Close() + tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + require.NoError(t, err) s := &ProxyServiceServer{ tokenStore: tokenStore, } + s.SetProxyController(newTestProxyController()) const cluster = "proxy.example.com" const numProxies = 3 @@ -67,11 +121,7 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { }, } - update := &proto.GetMappingUpdateResponse{ - Mapping: []*proto.ProxyMapping{mapping}, - } - - s.SendServiceUpdateToCluster(update, cluster) + s.SendServiceUpdateToCluster(context.Background(), mapping, cluster) tokens := make([]string, numProxies) for i, ch := range channels { @@ -101,12 +151,13 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { } func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { - tokenStore := NewOneTimeTokenStore(time.Hour) - defer tokenStore.Close() + tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + require.NoError(t, err) s := &ProxyServiceServer{ tokenStore: tokenStore, } + s.SetProxyController(newTestProxyController()) const cluster = "proxy.example.com" ch1 := registerFakeProxy(s, "proxy-a", cluster) @@ -119,11 +170,7 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { Domain: "test.example.com", } - update := &proto.GetMappingUpdateResponse{ - Mapping: []*proto.ProxyMapping{mapping}, - } - - s.SendServiceUpdateToCluster(update, cluster) + s.SendServiceUpdateToCluster(context.Background(), mapping, cluster) resp1 := drainChannel(ch1) resp2 := drainChannel(ch2) @@ -135,18 +182,16 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { // Delete operations should not generate tokens assert.Empty(t, resp1.Mapping[0].AuthToken) assert.Empty(t, resp2.Mapping[0].AuthToken) - - // No tokens should have been created - assert.Equal(t, 0, tokenStore.GetTokenCount()) } func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) { - tokenStore := NewOneTimeTokenStore(time.Hour) - defer tokenStore.Close() + tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + require.NoError(t, err) s := &ProxyServiceServer{ tokenStore: tokenStore, } + s.SetProxyController(newTestProxyController()) // Register proxies in different clusters (SendServiceUpdate broadcasts to all) ch1 := registerFakeProxy(s, "proxy-a", "cluster-a") diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 029d71e2e..a07cafe90 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -26,7 +26,7 @@ import ( "github.com/netbirdio/netbird/shared/management/client/common" "github.com/netbirdio/netbird/management/internals/controllers/network_map" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/job" @@ -82,7 +82,7 @@ type Server struct { syncLimEnabled bool syncLim int32 - reverseProxyManager reverseproxy.Manager + reverseProxyManager rpservice.Manager reverseProxyMu sync.RWMutex } diff --git a/management/internals/shared/grpc/validate_session_test.go b/management/internals/shared/grpc/validate_session_test.go index 640a27bb2..124ddf620 100644 --- a/management/internals/shared/grpc/validate_session_test.go +++ b/management/internals/shared/grpc/validate_session_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" @@ -34,11 +34,15 @@ func setupValidateSessionTest(t *testing.T) *validateSessionTestSetup { testStore, storeCleanup, err := store.NewTestStoreFromSQL(ctx, "../../../server/testdata/auth_callback.sql", t.TempDir()) require.NoError(t, err) - proxyManager := &testValidateSessionProxyManager{store: testStore} + serviceManager := &testValidateSessionServiceManager{store: testStore} usersManager := &testValidateSessionUsersManager{store: testStore} + proxyManager := &testValidateSessionProxyManager{} - proxyService := NewProxyServiceServer(nil, NewOneTimeTokenStore(time.Minute), ProxyOIDCConfig{}, nil, usersManager) - proxyService.SetProxyManager(proxyManager) + tokenStore, err := NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + + proxyService := NewProxyServiceServer(nil, tokenStore, ProxyOIDCConfig{}, nil, usersManager, proxyManager) + proxyService.SetServiceManager(serviceManager) createTestProxies(t, ctx, testStore) @@ -54,7 +58,7 @@ func createTestProxies(t *testing.T, ctx context.Context, testStore store.Store) pubKey, privKey := generateSessionKeyPair(t) - testProxy := &reverseproxy.Service{ + testProxy := &service.Service{ ID: "testProxyId", AccountID: "testAccountId", Name: "Test Proxy", @@ -62,15 +66,15 @@ func createTestProxies(t *testing.T, ctx context.Context, testStore store.Store) Enabled: true, SessionPrivateKey: privKey, SessionPublicKey: pubKey, - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, }, }, } require.NoError(t, testStore.CreateService(ctx, testProxy)) - restrictedProxy := &reverseproxy.Service{ + restrictedProxy := &service.Service{ ID: "restrictedProxyId", AccountID: "testAccountId", Name: "Restricted Proxy", @@ -78,8 +82,8 @@ func createTestProxies(t *testing.T, ctx context.Context, testStore store.Store) Enabled: true, SessionPrivateKey: privKey, SessionPublicKey: pubKey, - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, DistributionGroups: []string{"allowedGroupId"}, }, @@ -239,79 +243,101 @@ func TestValidateSession_MissingToken(t *testing.T) { assert.Contains(t, resp.DeniedReason, "missing") } -type testValidateSessionProxyManager struct { +type testValidateSessionServiceManager struct { store store.Store } -func (m *testValidateSessionProxyManager) GetAllServices(_ context.Context, _, _ string) ([]*reverseproxy.Service, error) { +func (m *testValidateSessionServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*service.Service, error) { return nil, nil } -func (m *testValidateSessionProxyManager) GetService(_ context.Context, _, _, _ string) (*reverseproxy.Service, error) { +func (m *testValidateSessionServiceManager) GetService(_ context.Context, _, _, _ string) (*service.Service, error) { return nil, nil } -func (m *testValidateSessionProxyManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *testValidateSessionServiceManager) CreateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) { return nil, nil } -func (m *testValidateSessionProxyManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *testValidateSessionServiceManager) UpdateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) { return nil, nil } -func (m *testValidateSessionProxyManager) DeleteService(_ context.Context, _, _, _ string) error { +func (m *testValidateSessionServiceManager) DeleteService(_ context.Context, _, _, _ string) error { return nil } -func (m *testValidateSessionProxyManager) DeleteAllServices(_ context.Context, _, _ string) error { +func (m *testValidateSessionServiceManager) DeleteAllServices(_ context.Context, _, _ string) error { return nil } -func (m *testValidateSessionProxyManager) SetCertificateIssuedAt(_ context.Context, _, _ string) error { +func (m *testValidateSessionServiceManager) SetCertificateIssuedAt(_ context.Context, _, _ string) error { return nil } -func (m *testValidateSessionProxyManager) SetStatus(_ context.Context, _, _ string, _ reverseproxy.ProxyStatus) error { +func (m *testValidateSessionServiceManager) SetStatus(_ context.Context, _, _ string, _ service.Status) error { return nil } -func (m *testValidateSessionProxyManager) ReloadAllServicesForAccount(_ context.Context, _ string) error { +func (m *testValidateSessionServiceManager) ReloadAllServicesForAccount(_ context.Context, _ string) error { return nil } -func (m *testValidateSessionProxyManager) ReloadService(_ context.Context, _, _ string) error { +func (m *testValidateSessionServiceManager) ReloadService(_ context.Context, _, _ string) error { return nil } -func (m *testValidateSessionProxyManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { +func (m *testValidateSessionServiceManager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) { return m.store.GetServices(ctx, store.LockingStrengthNone) } -func (m *testValidateSessionProxyManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*reverseproxy.Service, error) { +func (m *testValidateSessionServiceManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*service.Service, error) { return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, proxyID) } -func (m *testValidateSessionProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { +func (m *testValidateSessionServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) { return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) } -func (m *testValidateSessionProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) { +func (m *testValidateSessionServiceManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) { return "", nil } -func (m *testValidateSessionProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { +func (m *testValidateSessionServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) { return nil, nil } -func (m *testValidateSessionProxyManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *testValidateSessionServiceManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *testValidateSessionProxyManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error { +func (m *testValidateSessionServiceManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error { return nil } -func (m *testValidateSessionProxyManager) StartExposeReaper(_ context.Context) {} +func (m *testValidateSessionServiceManager) StartExposeReaper(_ context.Context) {} + +type testValidateSessionProxyManager struct{} + +func (m *testValidateSessionProxyManager) Connect(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) Disconnect(_ context.Context, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) Heartbeat(_ context.Context, _ string) error { + return nil +} + +func (m *testValidateSessionProxyManager) GetActiveClusterAddresses(_ context.Context) ([]string, error) { + return nil, nil +} + +func (m *testValidateSessionProxyManager) CleanupStale(_ context.Context, _ time.Duration) error { + return nil +} type testValidateSessionUsersManager struct { store store.Store diff --git a/management/server/account.go b/management/server/account.go index fb8592164..550971337 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/shared/auth" @@ -83,9 +83,9 @@ type DefaultAccountManager struct { requestBuffer *AccountRequestBuffer - proxyController port_forwarding.Controller - settingsManager settings.Manager - reverseProxyManager reverseproxy.Manager + proxyController port_forwarding.Controller + settingsManager settings.Manager + serviceManager service.Manager // config contains the management server configuration config *nbconfig.Config @@ -115,8 +115,8 @@ type DefaultAccountManager struct { var _ account.Manager = (*DefaultAccountManager)(nil) -func (am *DefaultAccountManager) SetServiceManager(serviceManager reverseproxy.Manager) { - am.reverseProxyManager = serviceManager +func (am *DefaultAccountManager) SetServiceManager(serviceManager service.Manager) { + am.serviceManager = serviceManager } func isUniqueConstraintError(err error) bool { @@ -395,7 +395,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountNetworkRangeUpdated, eventMeta) } if reloadReverseProxy { - if err = am.reverseProxyManager.ReloadAllServicesForAccount(ctx, accountID); err != nil { + if err = am.serviceManager.ReloadAllServicesForAccount(ctx, accountID); err != nil { log.WithContext(ctx).Warnf("failed to reload all services for account %s: %v", accountID, err) } } @@ -730,7 +730,7 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u return status.Errorf(status.Internal, "failed to build user infos for account %s: %v", accountID, err) } - err = am.reverseProxyManager.DeleteAllServices(ctx, accountID, userID) + err = am.serviceManager.DeleteAllServices(ctx, accountID, userID) if err != nil { return status.Errorf(status.Internal, "failed to delete service %s: %v", accountID, err) } diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 893e894e1..45af63ae8 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -8,7 +8,7 @@ import ( "net/netip" "time" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/shared/auth" nbdns "github.com/netbirdio/netbird/dns" @@ -142,5 +142,5 @@ type Manager interface { CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) - SetServiceManager(serviceManager reverseproxy.Manager) + SetServiceManager(serviceManager service.Manager) } diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go index ab6e8b1c9..90700c795 100644 --- a/management/server/account/manager_mock.go +++ b/management/server/account/manager_mock.go @@ -13,7 +13,7 @@ import ( gomock "github.com/golang/mock/gomock" dns "github.com/netbirdio/netbird/dns" - reverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + service "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" activity "github.com/netbirdio/netbird/management/server/activity" idp "github.com/netbirdio/netbird/management/server/idp" peer "github.com/netbirdio/netbird/management/server/peer" @@ -1494,7 +1494,7 @@ func (mr *MockManagerMockRecorder) SaveUser(ctx, accountID, initiatorUserID, upd } // SetServiceManager mocks base method. -func (m *MockManager) SetServiceManager(serviceManager reverseproxy.Manager) { +func (m *MockManager) SetServiceManager(serviceManager service.Manager) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetServiceManager", serviceManager) } diff --git a/management/server/account_test.go b/management/server/account_test.go index 340e130d9..65bab6c18 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -19,6 +19,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/metric/noop" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" nbdns "github.com/netbirdio/netbird/dns" @@ -27,8 +28,10 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" "github.com/netbirdio/netbird/management/internals/modules/peers" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" - reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" + reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/server/config" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" @@ -1803,12 +1806,12 @@ func TestAccount_Copy(t *testing.T) { Address: "172.12.6.1/24", }, }, - Services: []*reverseproxy.Service{ + Services: []*service.Service{ { ID: "service1", Name: "test-service", AccountID: "account1", - Targets: []*reverseproxy.Target{}, + Targets: []*service.Target{}, }, }, NetworkMapCache: &types.NetworkMapBuilder{}, @@ -3113,6 +3116,12 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU permissionsManager := permissions.NewManager(store) peersManager := peers.NewManager(store, permissionsManager) + proxyManager := proxy.NewMockManager(ctrl) + proxyManager.EXPECT(). + CleanupStale(gomock.Any(), gomock.Any()). + Return(nil). + AnyTimes() + ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) @@ -3123,8 +3132,12 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU return nil, nil, err } - proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil) - manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, settingsMockManager, proxyGrpcServer, nil)) + proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil, proxyManager) + proxyController, err := proxymanager.NewGRPCController(proxyGrpcServer, noop.Meter{}) + if err != nil { + return nil, nil, err + } + manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyController, nil)) return manager, updateManager, nil } diff --git a/management/server/group_test.go b/management/server/group_test.go index dd6869d50..fa818e532 100644 --- a/management/server/group_test.go +++ b/management/server/group_test.go @@ -766,7 +766,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { t.Run("saving group linked to network router", func(t *testing.T) { permissionsManager := permissions.NewManager(manager.Store) groupsManager := groups.NewManager(manager.Store, permissionsManager, manager) - resourcesManager := resources.NewManager(manager.Store, permissionsManager, groupsManager, manager, manager.reverseProxyManager) + resourcesManager := resources.NewManager(manager.Store, permissionsManager, groupsManager, manager, manager.serviceManager) routersManager := routers.NewManager(manager.Store, permissionsManager, manager) networksManager := networks.NewManager(manager.Store, permissionsManager, resourcesManager, routersManager, manager) diff --git a/management/server/http/handler.go b/management/server/http/handler.go index 9d2384cae..ddeda6d7f 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -17,9 +17,9 @@ import ( "github.com/netbirdio/netbird/management/server/types" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" - reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" + reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" idpmanager "github.com/netbirdio/netbird/management/server/idp" @@ -73,7 +73,7 @@ const ( ) // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. -func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, reverseProxyManager reverseproxy.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix) (http.Handler, error) { +func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix) (http.Handler, error) { // Register bypass paths for unauthenticated endpoints if err := bypass.AddBypassPath("/api/instance"); err != nil { @@ -173,8 +173,8 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks idp.AddEndpoints(accountManager, router) instance.AddEndpoints(instanceManager, router) instance.AddVersionEndpoint(instanceManager, router) - if reverseProxyManager != nil && reverseProxyDomainManager != nil { - reverseproxymanager.RegisterEndpoints(reverseProxyManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, router) + if serviceManager != nil && reverseProxyDomainManager != nil { + reverseproxymanager.RegisterEndpoints(serviceManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, router) } // Register OAuth callback handler for proxy authentication diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index 12634dda4..c7fd08da8 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -18,8 +18,8 @@ import ( "github.com/gorilla/mux" "github.com/stretchr/testify/require" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" @@ -190,7 +190,8 @@ func setupAuthCallbackTest(t *testing.T) *testSetup { oidcServer := newFakeOIDCServer() - tokenStore := nbgrpc.NewOneTimeTokenStore(time.Minute) + tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100) + require.NoError(t, err) usersManager := users.NewManager(testStore) @@ -208,9 +209,10 @@ func setupAuthCallbackTest(t *testing.T) *testSetup { oidcConfig, nil, usersManager, + nil, ) - proxyService.SetProxyManager(&testServiceManager{store: testStore}) + proxyService.SetServiceManager(&testServiceManager{store: testStore}) handler := NewAuthCallbackHandler(proxyService, nil) @@ -239,12 +241,12 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store pubKey := base64.StdEncoding.EncodeToString(pub) privKey := base64.StdEncoding.EncodeToString(priv) - testProxy := &reverseproxy.Service{ + testProxy := &service.Service{ ID: "testProxyId", AccountID: "testAccountId", Name: "Test Proxy", Domain: "test-proxy.example.com", - Targets: []*reverseproxy.Target{{ + Targets: []*service.Target{{ Path: strPtr("/"), Host: "localhost", Port: 8080, @@ -254,8 +256,8 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store Enabled: true, }}, Enabled: true, - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, DistributionGroups: []string{"allowedGroupId"}, }, @@ -265,12 +267,12 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store } require.NoError(t, testStore.CreateService(ctx, testProxy)) - restrictedProxy := &reverseproxy.Service{ + restrictedProxy := &service.Service{ ID: "restrictedProxyId", AccountID: "testAccountId", Name: "Restricted Proxy", Domain: "restricted-proxy.example.com", - Targets: []*reverseproxy.Target{{ + Targets: []*service.Target{{ Path: strPtr("/"), Host: "localhost", Port: 8080, @@ -280,8 +282,8 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store Enabled: true, }}, Enabled: true, - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: true, DistributionGroups: []string{"restrictedGroupId"}, }, @@ -291,12 +293,12 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store } require.NoError(t, testStore.CreateService(ctx, restrictedProxy)) - noAuthProxy := &reverseproxy.Service{ + noAuthProxy := &service.Service{ ID: "noAuthProxyId", AccountID: "testAccountId", Name: "No Auth Proxy", Domain: "no-auth-proxy.example.com", - Targets: []*reverseproxy.Target{{ + Targets: []*service.Target{{ Path: strPtr("/"), Host: "localhost", Port: 8080, @@ -306,8 +308,8 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store Enabled: true, }}, Enabled: true, - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{ + Auth: service.AuthConfig{ + BearerAuth: &service.BearerAuthConfig{ Enabled: false, }, }, @@ -361,19 +363,19 @@ func (m *testServiceManager) DeleteAllServices(ctx context.Context, accountID, u return nil } -func (m *testServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*reverseproxy.Service, error) { +func (m *testServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*service.Service, error) { return nil, nil } -func (m *testServiceManager) GetService(_ context.Context, _, _, _ string) (*reverseproxy.Service, error) { +func (m *testServiceManager) GetService(_ context.Context, _, _, _ string) (*service.Service, error) { return nil, nil } -func (m *testServiceManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *testServiceManager) CreateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) { return nil, nil } -func (m *testServiceManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *testServiceManager) UpdateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) { return nil, nil } @@ -385,7 +387,7 @@ func (m *testServiceManager) SetCertificateIssuedAt(_ context.Context, _, _ stri return nil } -func (m *testServiceManager) SetStatus(_ context.Context, _, _ string, _ reverseproxy.ProxyStatus) error { +func (m *testServiceManager) SetStatus(_ context.Context, _, _ string, _ service.Status) error { return nil } @@ -397,15 +399,15 @@ func (m *testServiceManager) ReloadService(_ context.Context, _, _ string) error return nil } -func (m *testServiceManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { +func (m *testServiceManager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) { return m.store.GetServices(ctx, store.LockingStrengthNone) } -func (m *testServiceManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*reverseproxy.Service, error) { +func (m *testServiceManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*service.Service, error) { return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, proxyID) } -func (m *testServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { +func (m *testServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) { return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) } @@ -413,7 +415,7 @@ func (m *testServiceManager) GetServiceIDByTargetID(_ context.Context, _, _ stri return "", nil } -func (m *testServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { +func (m *testServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) { return nil, nil } diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index fd2dc5848..1d74f88d5 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -9,10 +9,13 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/otel/metric/noop" + "github.com/netbirdio/management-integrations/integrations" accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" - reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager" + proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager" + reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" @@ -91,12 +94,24 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee } accessLogsManager := accesslogsmanager.NewManager(store, permissionsManager, nil) - proxyTokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Minute) - proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager) - domainManager := manager.NewManager(store, proxyServiceServer, permissionsManager) - reverseProxyManager := reverseproxymanager.NewManager(store, am, permissionsManager, settingsManager, proxyServiceServer, domainManager) - proxyServiceServer.SetProxyManager(reverseProxyManager) - am.SetServiceManager(reverseProxyManager) + proxyTokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) + if err != nil { + t.Fatalf("Failed to create proxy token store: %v", err) + } + noopMeter := noop.NewMeterProvider().Meter("") + proxyMgr, err := proxymanager.NewManager(store, noopMeter) + if err != nil { + t.Fatalf("Failed to create proxy manager: %v", err) + } + proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager, proxyMgr) + domainManager := manager.NewManager(store, proxyMgr, permissionsManager) + serviceProxyController, err := proxymanager.NewGRPCController(proxyServiceServer, noopMeter) + if err != nil { + t.Fatalf("Failed to create proxy controller: %v", err) + } + serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, domainManager) + proxyServiceServer.SetServiceManager(serviceManager) + am.SetServiceManager(serviceManager) // @note this is required so that PAT's validate from store, but JWT's are mocked authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false) @@ -114,7 +129,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, reverseProxyManager, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } diff --git a/management/server/metrics/selfhosted.go b/management/server/metrics/selfhosted.go index 9b1383c6c..f25a72181 100644 --- a/management/server/metrics/selfhosted.go +++ b/management/server/metrics/selfhosted.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/go-version" "github.com/netbirdio/netbird/idp/dex" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/management/server/types" @@ -358,12 +358,12 @@ func (w *Worker) generateProperties(ctx context.Context) properties { } servicesTargets += len(service.Targets) - switch reverseproxy.ProxyStatus(service.Meta.Status) { - case reverseproxy.StatusActive: + switch rpservice.Status(service.Meta.Status) { + case rpservice.StatusActive: servicesStatusActive++ - case reverseproxy.StatusPending: + case rpservice.StatusPending: servicesStatusPending++ - case reverseproxy.StatusError, reverseproxy.StatusCertificateFailed, reverseproxy.StatusTunnelNotCreated: + case rpservice.StatusError, rpservice.StatusCertificateFailed, rpservice.StatusTunnelNotCreated: servicesStatusError++ } diff --git a/management/server/metrics/selfhosted_test.go b/management/server/metrics/selfhosted_test.go index bc4d68178..412559bff 100644 --- a/management/server/metrics/selfhosted_test.go +++ b/management/server/metrics/selfhosted_test.go @@ -6,7 +6,7 @@ import ( nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/idp/dex" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -116,29 +116,29 @@ func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { }, }, }, - Services: []*reverseproxy.Service{ + Services: []*rpservice.Service{ { ID: "svc1", Enabled: true, - Targets: []*reverseproxy.Target{ + Targets: []*rpservice.Target{ {TargetType: "peer"}, {TargetType: "host"}, }, - Auth: reverseproxy.AuthConfig{ - PasswordAuth: &reverseproxy.PasswordAuthConfig{Enabled: true}, + Auth: rpservice.AuthConfig{ + PasswordAuth: &rpservice.PasswordAuthConfig{Enabled: true}, }, - Meta: reverseproxy.ServiceMeta{Status: string(reverseproxy.StatusActive)}, + Meta: rpservice.Meta{Status: string(rpservice.StatusActive)}, }, { ID: "svc2", Enabled: false, - Targets: []*reverseproxy.Target{ + Targets: []*rpservice.Target{ {TargetType: "domain"}, }, - Auth: reverseproxy.AuthConfig{ - BearerAuth: &reverseproxy.BearerAuthConfig{Enabled: true}, + Auth: rpservice.AuthConfig{ + BearerAuth: &rpservice.BearerAuthConfig{Enabled: true}, }, - Meta: reverseproxy.ServiceMeta{Status: string(reverseproxy.StatusPending)}, + Meta: rpservice.Meta{Status: string(rpservice.StatusPending)}, }, }, }, diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index ea848328f..afd2021ac 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -12,7 +12,7 @@ import ( "google.golang.org/grpc/status" nbdns "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/idp" @@ -148,7 +148,7 @@ type MockAccountManager struct { DeleteUserInviteFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string) error } -func (am *MockAccountManager) SetServiceManager(serviceManager reverseproxy.Manager) { +func (am *MockAccountManager) SetServiceManager(serviceManager service.Manager) { // Mock implementation - no-op } diff --git a/management/server/networks/resources/manager.go b/management/server/networks/resources/manager.go index 843ca93e5..86f9b6579 100644 --- a/management/server/networks/resources/manager.go +++ b/management/server/networks/resources/manager.go @@ -7,7 +7,7 @@ import ( log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/groups" @@ -33,23 +33,23 @@ type Manager interface { } type managerImpl struct { - store store.Store - permissionsManager permissions.Manager - groupsManager groups.Manager - accountManager account.Manager - reverseProxyManager reverseproxy.Manager + store store.Store + permissionsManager permissions.Manager + groupsManager groups.Manager + accountManager account.Manager + serviceManager service.Manager } type mockManager struct { } -func NewManager(store store.Store, permissionsManager permissions.Manager, groupsManager groups.Manager, accountManager account.Manager, reverseproxyManager reverseproxy.Manager) Manager { +func NewManager(store store.Store, permissionsManager permissions.Manager, groupsManager groups.Manager, accountManager account.Manager, reverseproxyManager service.Manager) Manager { return &managerImpl{ - store: store, - permissionsManager: permissionsManager, - groupsManager: groupsManager, - accountManager: accountManager, - reverseProxyManager: reverseproxyManager, + store: store, + permissionsManager: permissionsManager, + groupsManager: groupsManager, + accountManager: accountManager, + serviceManager: reverseproxyManager, } } @@ -264,7 +264,7 @@ func (m *managerImpl) UpdateResource(ctx context.Context, userID string, resourc // TODO: optimize to only reload reverse proxies that are affected by the resource update instead of all of them go func() { - err := m.reverseProxyManager.ReloadAllServicesForAccount(ctx, resource.AccountID) + err := m.serviceManager.ReloadAllServicesForAccount(ctx, resource.AccountID) if err != nil { log.WithContext(ctx).Warnf("failed to reload all proxies for account: %v", err) } @@ -322,7 +322,7 @@ func (m *managerImpl) DeleteResource(ctx context.Context, accountID, userID, net return status.NewPermissionDeniedError() } - serviceID, err := m.reverseProxyManager.GetServiceIDByTargetID(ctx, accountID, resourceID) + serviceID, err := m.serviceManager.GetServiceIDByTargetID(ctx, accountID, resourceID) if err != nil { return fmt.Errorf("failed to check if resource is used by service: %w", err) } diff --git a/management/server/networks/resources/manager_test.go b/management/server/networks/resources/manager_test.go index 99de484e5..c6d8e7bcc 100644 --- a/management/server/networks/resources/manager_test.go +++ b/management/server/networks/resources/manager_test.go @@ -7,7 +7,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + reverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/mock_server" "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -31,8 +31,8 @@ func Test_GetAllResourcesInNetworkReturnsResources(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) resources, err := manager.GetAllResourcesInNetwork(ctx, accountID, userID, networkID) require.NoError(t, err) @@ -54,8 +54,8 @@ func Test_GetAllResourcesInNetworkReturnsPermissionDenied(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) resources, err := manager.GetAllResourcesInNetwork(ctx, accountID, userID, networkID) require.Error(t, err) @@ -76,8 +76,8 @@ func Test_GetAllResourcesInAccountReturnsResources(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) resources, err := manager.GetAllResourcesInAccount(ctx, accountID, userID) require.NoError(t, err) @@ -98,8 +98,8 @@ func Test_GetAllResourcesInAccountReturnsPermissionDenied(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) resources, err := manager.GetAllResourcesInAccount(ctx, accountID, userID) require.Error(t, err) @@ -123,8 +123,8 @@ func Test_GetResourceInNetworkReturnsResources(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) resource, err := manager.GetResource(ctx, accountID, userID, networkID, resourceID) require.NoError(t, err) @@ -147,8 +147,8 @@ func Test_GetResourceInNetworkReturnsPermissionDenied(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) resources, err := manager.GetResource(ctx, accountID, userID, networkID, resourceID) require.Error(t, err) @@ -176,9 +176,9 @@ func Test_CreateResourceSuccessfully(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - reverseProxyManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), resource.AccountID).Return(nil).AnyTimes() - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + serviceManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), resource.AccountID).Return(nil).AnyTimes() + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.NoError(t, err) @@ -205,8 +205,8 @@ func Test_CreateResourceFailsWithPermissionDenied(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.Error(t, err) @@ -234,8 +234,8 @@ func Test_CreateResourceFailsWithInvalidAddress(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.Error(t, err) @@ -262,8 +262,8 @@ func Test_CreateResourceFailsWithUsedName(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) createdResource, err := manager.CreateResource(ctx, userID, resource) require.Error(t, err) @@ -294,9 +294,9 @@ func Test_UpdateResourceSuccessfully(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - reverseProxyManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), accountID).Return(nil).AnyTimes() - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + serviceManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), accountID).Return(nil).AnyTimes() + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.NoError(t, err) @@ -329,8 +329,8 @@ func Test_UpdateResourceFailsWithResourceNotFound(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.Error(t, err) @@ -361,8 +361,8 @@ func Test_UpdateResourceFailsWithNameInUse(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.Error(t, err) @@ -392,8 +392,8 @@ func Test_UpdateResourceFailsWithPermissionDenied(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) updatedResource, err := manager.UpdateResource(ctx, userID, resource) require.Error(t, err) @@ -416,9 +416,9 @@ func Test_DeleteResourceSuccessfully(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - reverseProxyManager.EXPECT().GetServiceIDByTargetID(gomock.Any(), accountID, resourceID).Return("", nil).AnyTimes() - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + serviceManager.EXPECT().GetServiceIDByTargetID(gomock.Any(), accountID, resourceID).Return("", nil).AnyTimes() + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) err = manager.DeleteResource(ctx, accountID, userID, networkID, resourceID) require.NoError(t, err) @@ -440,8 +440,8 @@ func Test_DeleteResourceFailsWithPermissionDenied(t *testing.T) { am := mock_server.MockAccountManager{} groupsManager := groups.NewManagerMock() ctrl := gomock.NewController(t) - reverseProxyManager := reverseproxy.NewMockManager(ctrl) - manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager) + serviceManager := reverseproxy.NewMockManager(ctrl) + manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager) err = manager.DeleteResource(ctx, accountID, userID, networkID, resourceID) require.Error(t, err) diff --git a/management/server/peer.go b/management/server/peer.go index a2ca97208..78ecbfcae 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -493,7 +493,7 @@ func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peer var settings *types.Settings var eventsToStore []func() - serviceID, err := am.reverseProxyManager.GetServiceIDByTargetID(ctx, accountID, peerID) + serviceID, err := am.serviceManager.GetServiceIDByTargetID(ctx, accountID, peerID) if err != nil { return fmt.Errorf("failed to check if resource is used by service: %w", err) } diff --git a/management/server/peer/peer.go b/management/server/peer/peer.go index 269b30822..db392ddda 100644 --- a/management/server/peer/peer.go +++ b/management/server/peer/peer.go @@ -352,9 +352,10 @@ func (p *Peer) FromAPITemporaryAccessRequest(a *api.PeerTemporaryAccessRequest) p.Name = a.Name p.Key = a.WgPubKey p.Meta = PeerSystemMeta{ - Hostname: a.Name, - GoOS: "js", - OS: "js", + Hostname: a.Name, + GoOS: "js", + OS: "js", + KernelVersion: "wasm", } } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 04045f226..41c53980b 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -28,9 +28,10 @@ import ( "gorm.io/gorm/logger" nbdns "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -131,8 +132,8 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met &types.Account{}, &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &installation{}, &types.ExtraSettings{}, &posture.Checks{}, &nbpeer.NetworkAddress{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, - &types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, &reverseproxy.Service{}, &reverseproxy.Target{}, &domain.Domain{}, - &accesslogs.AccessLogEntry{}, + &types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, &rpservice.Service{}, &rpservice.Target{}, &domain.Domain{}, + &accesslogs.AccessLogEntry{}, &proxy.Proxy{}, ) if err != nil { return nil, fmt.Errorf("auto migratePreAuto: %w", err) @@ -2075,7 +2076,7 @@ func (s *SqlStore) getPostureChecks(ctx context.Context, accountID string) ([]*p return checks, nil } -func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { +func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpservice.Service, error) { const serviceQuery = `SELECT id, account_id, name, domain, enabled, auth, meta_created_at, meta_certificate_issued_at, meta_status, proxy_cluster, pass_host_header, rewrite_redirects, session_private_key, session_public_key @@ -2090,8 +2091,8 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers return nil, err } - services, err := pgx.CollectRows(serviceRows, func(row pgx.CollectableRow) (*reverseproxy.Service, error) { - var s reverseproxy.Service + services, err := pgx.CollectRows(serviceRows, func(row pgx.CollectableRow) (*rpservice.Service, error) { + var s rpservice.Service var auth []byte var createdAt, certIssuedAt sql.NullTime var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString @@ -2121,7 +2122,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers } } - s.Meta = reverseproxy.ServiceMeta{} + s.Meta = rpservice.Meta{} if createdAt.Valid { s.Meta.CreatedAt = createdAt.Time } @@ -2142,7 +2143,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers s.SessionPublicKey = sessionPublicKey.String } - s.Targets = []*reverseproxy.Target{} + s.Targets = []*rpservice.Target{} return &s, nil }) if err != nil { @@ -2154,7 +2155,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers } serviceIDs := make([]string, len(services)) - serviceMap := make(map[string]*reverseproxy.Service) + serviceMap := make(map[string]*rpservice.Service) for i, s := range services { serviceIDs[i] = s.ID serviceMap[s.ID] = s @@ -2165,8 +2166,8 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers return nil, err } - targets, err := pgx.CollectRows(targetRows, func(row pgx.CollectableRow) (*reverseproxy.Target, error) { - var t reverseproxy.Target + targets, err := pgx.CollectRows(targetRows, func(row pgx.CollectableRow) (*rpservice.Target, error) { + var t rpservice.Target var path sql.NullString err := row.Scan( &t.ID, @@ -4852,7 +4853,7 @@ func (s *SqlStore) GetPeerIDByKey(ctx context.Context, lockStrength LockingStren return peerID, nil } -func (s *SqlStore) CreateService(ctx context.Context, service *reverseproxy.Service) error { +func (s *SqlStore) CreateService(ctx context.Context, service *rpservice.Service) error { serviceCopy := service.Copy() if err := serviceCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { return fmt.Errorf("encrypt service data: %w", err) @@ -4866,16 +4867,19 @@ func (s *SqlStore) CreateService(ctx context.Context, service *reverseproxy.Serv return nil } -func (s *SqlStore) UpdateService(ctx context.Context, service *reverseproxy.Service) error { +func (s *SqlStore) UpdateService(ctx context.Context, service *rpservice.Service) error { serviceCopy := service.Copy() if err := serviceCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { return fmt.Errorf("encrypt service data: %w", err) } + // Create target type instance outside transaction to avoid variable shadowing + targetType := &rpservice.Target{} + // Use a transaction to ensure atomic updates of the service and its targets err := s.db.Transaction(func(tx *gorm.DB) error { // Delete existing targets - if err := tx.Where("service_id = ?", serviceCopy.ID).Delete(&reverseproxy.Target{}).Error; err != nil { + if err := tx.Where("service_id = ?", serviceCopy.ID).Delete(targetType).Error; err != nil { return err } @@ -4896,7 +4900,7 @@ func (s *SqlStore) UpdateService(ctx context.Context, service *reverseproxy.Serv } func (s *SqlStore) DeleteService(ctx context.Context, accountID, serviceID string) error { - result := s.db.Delete(&reverseproxy.Service{}, accountAndIDQueryCondition, accountID, serviceID) + result := s.db.Delete(&rpservice.Service{}, accountAndIDQueryCondition, accountID, serviceID) if result.Error != nil { log.WithContext(ctx).Errorf("failed to delete service from store: %v", result.Error) return status.Errorf(status.Internal, "failed to delete service from store") @@ -4910,7 +4914,7 @@ func (s *SqlStore) DeleteService(ctx context.Context, accountID, serviceID strin } func (s *SqlStore) DeleteTarget(ctx context.Context, accountID string, serviceID string, targetID uint) error { - result := s.db.Delete(&reverseproxy.Target{}, "account_id = ? AND service_id = ? AND id = ?", accountID, serviceID, targetID) + result := s.db.Delete(&rpservice.Target{}, "account_id = ? AND service_id = ? AND id = ?", accountID, serviceID, targetID) if result.Error != nil { log.WithContext(ctx).Errorf("failed to delete target from store: %v", result.Error) return status.Errorf(status.Internal, "failed to delete target from store") @@ -4924,7 +4928,7 @@ func (s *SqlStore) DeleteTarget(ctx context.Context, accountID string, serviceID } func (s *SqlStore) DeleteServiceTargets(ctx context.Context, accountID string, serviceID string) error { - result := s.db.Delete(&reverseproxy.Target{}, "account_id = ? AND service_id = ?", accountID, serviceID) + result := s.db.Delete(&rpservice.Target{}, "account_id = ? AND service_id = ?", accountID, serviceID) if result.Error != nil { log.WithContext(ctx).Errorf("failed to delete targets from store: %v", result.Error) return status.Errorf(status.Internal, "failed to delete targets from store") @@ -4934,8 +4938,8 @@ func (s *SqlStore) DeleteServiceTargets(ctx context.Context, accountID string, s } // GetTargetsByServiceID retrieves all targets for a given service -func (s *SqlStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*reverseproxy.Target, error) { - var targets []*reverseproxy.Target +func (s *SqlStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*rpservice.Target, error) { + var targets []*rpservice.Target tx := s.db if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) @@ -4949,13 +4953,13 @@ func (s *SqlStore) GetTargetsByServiceID(ctx context.Context, lockStrength Locki return targets, nil } -func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) { +func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*rpservice.Service, error) { tx := s.db.Preload("Targets") if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } - var service *reverseproxy.Service + var service *rpservice.Service result := tx.Take(&service, accountAndIDQueryCondition, accountID, serviceID) if result.Error != nil { if errors.Is(result.Error, gorm.ErrRecordNotFound) { @@ -4973,30 +4977,8 @@ func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStren return service, nil } -func (s *SqlStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { - tx := s.db.Preload("Targets") - if lockStrength != LockingStrengthNone { - tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) - } - - var serviceList []*reverseproxy.Service - result := tx.Find(&serviceList, accountIDCondition, accountID) - if result.Error != nil { - log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error) - return nil, status.Errorf(status.Internal, "failed to get services from store") - } - - for _, service := range serviceList { - if err := service.DecryptSensitiveData(s.fieldEncrypt); err != nil { - return nil, fmt.Errorf("decrypt service data: %w", err) - } - } - - return serviceList, nil -} - -func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) { - var service *reverseproxy.Service +func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*rpservice.Service, error) { + var service *rpservice.Service result := s.db.Preload("Targets").Where("account_id = ? AND domain = ?", accountID, domain).First(&service) if result.Error != nil { if errors.Is(result.Error, gorm.ErrRecordNotFound) { @@ -5014,13 +4996,13 @@ func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain str return service, nil } -func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) { +func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) { tx := s.db.Preload("Targets") if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } - var serviceList []*reverseproxy.Service + var serviceList []*rpservice.Service result := tx.Find(&serviceList) if result.Error != nil { log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error) @@ -5036,13 +5018,13 @@ func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength return serviceList, nil } -func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { +func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*rpservice.Service, error) { tx := s.db.Preload("Targets") if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } - var serviceList []*reverseproxy.Service + var serviceList []*rpservice.Service result := tx.Find(&serviceList, accountIDCondition, accountID) if result.Error != nil { log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error) @@ -5270,13 +5252,13 @@ func (s *SqlStore) applyAccessLogFilters(query *gorm.DB, filter accesslogs.Acces return query } -func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) { +func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*rpservice.Target, error) { tx := s.db if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } - var target *reverseproxy.Target + var target *rpservice.Target result := tx.Take(&target, "account_id = ? AND target_id = ?", accountID, targetID) if result.Error != nil { if errors.Is(result.Error, gorm.ErrRecordNotFound) { @@ -5289,3 +5271,65 @@ func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength return target, nil } + +// SaveProxy saves or updates a proxy in the database +func (s *SqlStore) SaveProxy(ctx context.Context, p *proxy.Proxy) error { + result := s.db.WithContext(ctx).Save(p) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to save proxy: %v", result.Error) + return status.Errorf(status.Internal, "failed to save proxy") + } + return nil +} + +// UpdateProxyHeartbeat updates the last_seen timestamp for a proxy +func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID string) error { + result := s.db.WithContext(ctx). + Model(&proxy.Proxy{}). + Where("id = ? AND status = ?", proxyID, "connected"). + Update("last_seen", time.Now()) + + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to update proxy heartbeat: %v", result.Error) + return status.Errorf(status.Internal, "failed to update proxy heartbeat") + } + return nil +} + +// GetActiveProxyClusterAddresses returns all unique cluster addresses for active proxies +func (s *SqlStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) { + var addresses []string + + result := s.db.WithContext(ctx). + Model(&proxy.Proxy{}). + Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-2*time.Minute)). + Distinct("cluster_address"). + Pluck("cluster_address", &addresses) + + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get active proxy cluster addresses: %v", result.Error) + return nil, status.Errorf(status.Internal, "failed to get active proxy cluster addresses") + } + + return addresses, nil +} + +// CleanupStaleProxies deletes proxies that haven't sent heartbeat in the specified duration +func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error { + cutoffTime := time.Now().Add(-inactivityDuration) + + result := s.db.WithContext(ctx). + Where("last_seen < ?", cutoffTime). + Delete(&proxy.Proxy{}) + + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to cleanup stale proxies: %v", result.Error) + return status.Errorf(status.Internal, "failed to cleanup stale proxies") + } + + if result.RowsAffected > 0 { + log.WithContext(ctx).Infof("Cleaned up %d stale proxies", result.RowsAffected) + } + + return nil +} diff --git a/management/server/store/sqlstore_bench_test.go b/management/server/store/sqlstore_bench_test.go index fa9a9dbf5..f2abafceb 100644 --- a/management/server/store/sqlstore_bench_test.go +++ b/management/server/store/sqlstore_bench_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" nbdns "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -264,7 +264,7 @@ func setupBenchmarkDB(b testing.TB) (*SqlStore, func(), string) { &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &posture.Checks{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, - &types.AccountOnboarding{}, &reverseproxy.Service{}, &reverseproxy.Target{}, + &types.AccountOnboarding{}, &service.Service{}, &service.Target{}, } for i := len(models) - 1; i >= 0; i-- { diff --git a/management/server/store/store.go b/management/server/store/store.go index 9e982f70b..941aca08a 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -25,9 +25,10 @@ import ( "gorm.io/gorm" "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" "github.com/netbirdio/netbird/management/server/telemetry" @@ -252,14 +253,13 @@ type Store interface { MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error) - CreateService(ctx context.Context, service *reverseproxy.Service) error - UpdateService(ctx context.Context, service *reverseproxy.Service) error + CreateService(ctx context.Context, service *rpservice.Service) error + UpdateService(ctx context.Context, service *rpservice.Service) error DeleteService(ctx context.Context, accountID, serviceID string) error - GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) - GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) - GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) - GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) - GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) + GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*rpservice.Service, error) + GetServiceByDomain(ctx context.Context, accountID, domain string) (*rpservice.Service, error) + GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) + GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*rpservice.Service, error) GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) ListFreeDomains(ctx context.Context, accountID string) ([]string, error) @@ -271,12 +271,16 @@ type Store interface { CreateAccessLog(ctx context.Context, log *accesslogs.AccessLogEntry) error GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error) DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) - GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) - GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*reverseproxy.Target, error) + GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*rpservice.Target, error) + GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*rpservice.Target, error) DeleteTarget(ctx context.Context, accountID string, serviceID string, targetID uint) error DeleteServiceTargets(ctx context.Context, accountID string, serviceID string) error - // GetCustomDomainsCounts returns the total and validated custom domain counts. + SaveProxy(ctx context.Context, proxy *proxy.Proxy) error + UpdateProxyHeartbeat(ctx context.Context, proxyID string) error + GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) + CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error + GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) } diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 682ecc4d8..9e11f85fb 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -12,9 +12,10 @@ import ( gomock "github.com/golang/mock/gomock" dns "github.com/netbirdio/netbird/dns" - reverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" accesslogs "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" domain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + proxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + service "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" zones "github.com/netbirdio/netbird/management/internals/modules/zones" records "github.com/netbirdio/netbird/management/internals/modules/zones/records" types "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -150,6 +151,20 @@ func (mr *MockStoreMockRecorder) ApproveAccountPeers(ctx, accountID interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApproveAccountPeers", reflect.TypeOf((*MockStore)(nil).ApproveAccountPeers), ctx, accountID) } +// CleanupStaleProxies mocks base method. +func (m *MockStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupStaleProxies", ctx, inactivityDuration) + ret0, _ := ret[0].(error) + return ret0 +} + +// CleanupStaleProxies indicates an expected call of CleanupStaleProxies. +func (mr *MockStoreMockRecorder) CleanupStaleProxies(ctx, inactivityDuration interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStaleProxies", reflect.TypeOf((*MockStore)(nil).CleanupStaleProxies), ctx, inactivityDuration) +} + // Close mocks base method. func (m *MockStore) Close(ctx context.Context) error { m.ctrl.T.Helper() @@ -293,7 +308,7 @@ func (mr *MockStoreMockRecorder) CreatePolicy(ctx, policy interface{}) *gomock.C } // CreateService mocks base method. -func (m *MockStore) CreateService(ctx context.Context, service *reverseproxy.Service) error { +func (m *MockStore) CreateService(ctx context.Context, service *service.Service) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateService", ctx, service) ret0, _ := ret[0].(error) @@ -1123,10 +1138,10 @@ func (mr *MockStoreMockRecorder) GetAccountRoutes(ctx, lockStrength, accountID i } // GetAccountServices mocks base method. -func (m *MockStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { +func (m *MockStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*service.Service, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetAccountServices", ctx, lockStrength, accountID) - ret0, _ := ret[0].([]*reverseproxy.Service) + ret0, _ := ret[0].([]*service.Service) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -1227,6 +1242,21 @@ func (mr *MockStoreMockRecorder) GetAccountsCounter(ctx interface{}) *gomock.Cal return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountsCounter", reflect.TypeOf((*MockStore)(nil).GetAccountsCounter), ctx) } +// GetActiveProxyClusterAddresses mocks base method. +func (m *MockStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveProxyClusterAddresses", ctx) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveProxyClusterAddresses indicates an expected call of GetActiveProxyClusterAddresses. +func (mr *MockStoreMockRecorder) GetActiveProxyClusterAddresses(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveProxyClusterAddresses", reflect.TypeOf((*MockStore)(nil).GetActiveProxyClusterAddresses), ctx) +} + // GetAllAccounts mocks base method. func (m *MockStore) GetAllAccounts(ctx context.Context) []*types2.Account { m.ctrl.T.Helper() @@ -1857,10 +1887,10 @@ func (mr *MockStoreMockRecorder) GetRouteByID(ctx, lockStrength, accountID, rout } // GetServiceByDomain mocks base method. -func (m *MockStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) { +func (m *MockStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*service.Service, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetServiceByDomain", ctx, accountID, domain) - ret0, _ := ret[0].(*reverseproxy.Service) + ret0, _ := ret[0].(*service.Service) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -1872,10 +1902,10 @@ func (mr *MockStoreMockRecorder) GetServiceByDomain(ctx, accountID, domain inter } // GetServiceByID mocks base method. -func (m *MockStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) { +func (m *MockStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*service.Service, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetServiceByID", ctx, lockStrength, accountID, serviceID) - ret0, _ := ret[0].(*reverseproxy.Service) + ret0, _ := ret[0].(*service.Service) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -1887,10 +1917,10 @@ func (mr *MockStoreMockRecorder) GetServiceByID(ctx, lockStrength, accountID, se } // GetServiceTargetByTargetID mocks base method. -func (m *MockStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID, targetID string) (*reverseproxy.Target, error) { +func (m *MockStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID, targetID string) (*service.Target, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetServiceTargetByTargetID", ctx, lockStrength, accountID, targetID) - ret0, _ := ret[0].(*reverseproxy.Target) + ret0, _ := ret[0].(*service.Target) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -1902,10 +1932,10 @@ func (mr *MockStoreMockRecorder) GetServiceTargetByTargetID(ctx, lockStrength, a } // GetServices mocks base method. -func (m *MockStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) { +func (m *MockStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*service.Service, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetServices", ctx, lockStrength) - ret0, _ := ret[0].([]*reverseproxy.Service) + ret0, _ := ret[0].([]*service.Service) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -1916,21 +1946,6 @@ func (mr *MockStoreMockRecorder) GetServices(ctx, lockStrength interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServices", reflect.TypeOf((*MockStore)(nil).GetServices), ctx, lockStrength) } -// GetServicesByAccountID mocks base method. -func (m *MockStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetServicesByAccountID", ctx, lockStrength, accountID) - ret0, _ := ret[0].([]*reverseproxy.Service) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetServicesByAccountID indicates an expected call of GetServicesByAccountID. -func (mr *MockStoreMockRecorder) GetServicesByAccountID(ctx, lockStrength, accountID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicesByAccountID", reflect.TypeOf((*MockStore)(nil).GetServicesByAccountID), ctx, lockStrength, accountID) -} - // GetSetupKeyByID mocks base method. func (m *MockStore) GetSetupKeyByID(ctx context.Context, lockStrength LockingStrength, accountID, setupKeyID string) (*types2.SetupKey, error) { m.ctrl.T.Helper() @@ -1991,10 +2006,10 @@ func (mr *MockStoreMockRecorder) GetTakenIPs(ctx, lockStrength, accountId interf } // GetTargetsByServiceID mocks base method. -func (m *MockStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) ([]*reverseproxy.Target, error) { +func (m *MockStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) ([]*service.Target, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetTargetsByServiceID", ctx, lockStrength, accountID, serviceID) - ret0, _ := ret[0].([]*reverseproxy.Target) + ret0, _ := ret[0].([]*service.Target) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -2610,6 +2625,20 @@ func (mr *MockStoreMockRecorder) SavePostureChecks(ctx, postureCheck interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePostureChecks", reflect.TypeOf((*MockStore)(nil).SavePostureChecks), ctx, postureCheck) } +// SaveProxy mocks base method. +func (m *MockStore) SaveProxy(ctx context.Context, proxy *proxy.Proxy) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveProxy", ctx, proxy) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveProxy indicates an expected call of SaveProxy. +func (mr *MockStoreMockRecorder) SaveProxy(ctx, proxy interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveProxy", reflect.TypeOf((*MockStore)(nil).SaveProxy), ctx, proxy) +} + // SaveProxyAccessToken mocks base method. func (m *MockStore) SaveProxyAccessToken(ctx context.Context, token *types2.ProxyAccessToken) error { m.ctrl.T.Helper() @@ -2805,8 +2834,22 @@ func (mr *MockStoreMockRecorder) UpdateGroups(ctx, accountID, groups interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroups", reflect.TypeOf((*MockStore)(nil).UpdateGroups), ctx, accountID, groups) } +// UpdateProxyHeartbeat mocks base method. +func (m *MockStore) UpdateProxyHeartbeat(ctx context.Context, proxyID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateProxyHeartbeat", ctx, proxyID) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateProxyHeartbeat indicates an expected call of UpdateProxyHeartbeat. +func (mr *MockStoreMockRecorder) UpdateProxyHeartbeat(ctx, proxyID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProxyHeartbeat", reflect.TypeOf((*MockStore)(nil).UpdateProxyHeartbeat), ctx, proxyID) +} + // UpdateService mocks base method. -func (m *MockStore) UpdateService(ctx context.Context, service *reverseproxy.Service) error { +func (m *MockStore) UpdateService(ctx context.Context, service *service.Service) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateService", ctx, service) ret0, _ := ret[0].(error) diff --git a/management/server/types/account.go b/management/server/types/account.go index 3208cc89a..6145ceeb2 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -18,7 +18,7 @@ import ( "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -100,7 +100,7 @@ type Account struct { NameServerGroupsG []nbdns.NameServerGroup `json:"-" gorm:"foreignKey:AccountID;references:id"` DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"` PostureChecks []*posture.Checks `gorm:"foreignKey:AccountID;references:id"` - Services []*reverseproxy.Service `gorm:"foreignKey:AccountID;references:id"` + Services []*service.Service `gorm:"foreignKey:AccountID;references:id"` // Settings is a dictionary of Account settings Settings *Settings `gorm:"embedded;embeddedPrefix:settings_"` Networks []*networkTypes.Network `gorm:"foreignKey:AccountID;references:id"` @@ -906,7 +906,7 @@ func (a *Account) Copy() *Account { networkResources = append(networkResources, resource.Copy()) } - services := []*reverseproxy.Service{} + services := []*service.Service{} for _, service := range a.Services { services = append(services, service.Copy()) } @@ -1814,7 +1814,7 @@ func (a *Account) InjectProxyPolicies(ctx context.Context) { } } -func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *reverseproxy.Service, proxyPeersByCluster map[string][]*nbpeer.Peer) { +func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *service.Service, proxyPeersByCluster map[string][]*nbpeer.Peer) { for _, target := range service.Targets { if !target.Enabled { continue @@ -1823,7 +1823,7 @@ func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *rever } } -func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *reverseproxy.Service, target *reverseproxy.Target, proxyPeers []*nbpeer.Peer) { +func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *service.Service, target *service.Target, proxyPeers []*nbpeer.Peer) { port, ok := a.resolveTargetPort(ctx, target) if !ok { return @@ -1840,7 +1840,7 @@ func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *revers } } -func (a *Account) resolveTargetPort(ctx context.Context, target *reverseproxy.Target) (int, bool) { +func (a *Account) resolveTargetPort(ctx context.Context, target *service.Target) (int, bool) { if target.Port != 0 { return target.Port, true } @@ -1856,7 +1856,7 @@ func (a *Account) resolveTargetPort(ctx context.Context, target *reverseproxy.Ta } } -func (a *Account) createProxyPolicy(service *reverseproxy.Service, target *reverseproxy.Target, proxyPeer *nbpeer.Peer, port int, path string) *Policy { +func (a *Account) createProxyPolicy(service *service.Service, target *service.Target, proxyPeer *nbpeer.Peer, port int, path string) *Policy { policyID := fmt.Sprintf("proxy-access-%s-%s-%s", service.ID, proxyPeer.ID, path) return &Policy{ ID: policyID, diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index c594f9800..50aa38b29 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -42,6 +42,8 @@ var ( acmeCerts bool acmeAddr string acmeDir string + acmeEABKID string + acmeEABHMACKey string acmeChallengeType string debugEndpoint bool debugEndpointAddr string @@ -74,6 +76,8 @@ func init() { rootCmd.Flags().BoolVar(&acmeCerts, "acme-certs", envBoolOrDefault("NB_PROXY_ACME_CERTIFICATES", false), "Generate ACME certificates automatically") rootCmd.Flags().StringVar(&acmeAddr, "acme-addr", envStringOrDefault("NB_PROXY_ACME_ADDRESS", ":80"), "HTTP address for ACME HTTP-01 challenges (only used when acme-challenge-type is http-01)") rootCmd.Flags().StringVar(&acmeDir, "acme-dir", envStringOrDefault("NB_PROXY_ACME_DIRECTORY", acme.LetsEncryptURL), "URL of ACME challenge directory") + rootCmd.Flags().StringVar(&acmeEABKID, "acme-eab-kid", envStringOrDefault("NB_PROXY_ACME_EAB_KID", ""), "ACME EAB KID for account registration") + rootCmd.Flags().StringVar(&acmeEABHMACKey, "acme-eab-hmac-key", envStringOrDefault("NB_PROXY_ACME_EAB_HMAC_KEY", ""), "ACME EAB HMAC key for account registration") rootCmd.Flags().StringVar(&acmeChallengeType, "acme-challenge-type", envStringOrDefault("NB_PROXY_ACME_CHALLENGE_TYPE", "tls-alpn-01"), "ACME challenge type: tls-alpn-01 (default, port 443 only) or http-01 (requires port 80)") rootCmd.Flags().BoolVar(&debugEndpoint, "debug-endpoint", envBoolOrDefault("NB_PROXY_DEBUG_ENDPOINT", false), "Enable debug HTTP endpoint") rootCmd.Flags().StringVar(&debugEndpointAddr, "debug-endpoint-addr", envStringOrDefault("NB_PROXY_DEBUG_ENDPOINT_ADDRESS", "localhost:8444"), "Address for the debug HTTP endpoint") @@ -149,6 +153,8 @@ func runServer(cmd *cobra.Command, args []string) error { GenerateACMECertificates: acmeCerts, ACMEChallengeAddress: acmeAddr, ACMEDirectory: acmeDir, + ACMEEABKID: acmeEABKID, + ACMEEABHMACKey: acmeEABHMACKey, ACMEChallengeType: acmeChallengeType, DebugEndpointEnabled: debugEndpoint, DebugEndpointAddress: debugEndpointAddr, diff --git a/proxy/internal/acme/manager.go b/proxy/internal/acme/manager.go index a663b8138..d491d65a3 100644 --- a/proxy/internal/acme/manager.go +++ b/proxy/internal/acme/manager.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/asn1" + "encoding/base64" "encoding/binary" "fmt" "net" @@ -59,7 +60,10 @@ type Manager struct { // NewManager creates a new ACME certificate manager. The certDir is used // for caching certificates. The lockMethod controls cross-replica // coordination strategy (see CertLockMethod constants). -func NewManager(certDir, acmeURL string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod) *Manager { +// eabKID and eabHMACKey are optional External Account Binding credentials +// required for some CAs like ZeroSSL. The eabHMACKey should be the base64 +// URL-encoded string provided by the CA. +func NewManager(certDir, acmeURL, eabKID, eabHMACKey string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod) *Manager { if logger == nil { logger = log.StandardLogger() } @@ -70,10 +74,26 @@ func NewManager(certDir, acmeURL string, notifier certificateNotifier, logger *l certNotifier: notifier, logger: logger, } + + var eab *acme.ExternalAccountBinding + if eabKID != "" && eabHMACKey != "" { + decodedKey, err := base64.RawURLEncoding.DecodeString(eabHMACKey) + if err != nil { + logger.Errorf("failed to decode EAB HMAC key: %v", err) + } else { + eab = &acme.ExternalAccountBinding{ + KID: eabKID, + Key: decodedKey, + } + logger.Infof("configured External Account Binding with KID: %s", eabKID) + } + } + mgr.Manager = &autocert.Manager{ - Prompt: autocert.AcceptTOS, - HostPolicy: mgr.hostPolicy, - Cache: autocert.DirCache(certDir), + Prompt: autocert.AcceptTOS, + HostPolicy: mgr.hostPolicy, + Cache: autocert.DirCache(certDir), + ExternalAccountBinding: eab, Client: &acme.Client{ DirectoryURL: acmeURL, }, @@ -136,7 +156,7 @@ func (mgr *Manager) prefetchCertificate(d domain.Domain) { cert, err := mgr.GetCertificate(hello) elapsed := time.Since(start) if err != nil { - mgr.logger.Warnf("prefetch certificate for domain %q: %v", name, err) + mgr.logger.Warnf("prefetch certificate for domain %q in %s: %v", name, elapsed.String(), err) mgr.setDomainState(d, domainFailed, err.Error()) return } diff --git a/proxy/internal/acme/manager_test.go b/proxy/internal/acme/manager_test.go index 3b554e360..f7efe5933 100644 --- a/proxy/internal/acme/manager_test.go +++ b/proxy/internal/acme/manager_test.go @@ -10,7 +10,7 @@ import ( ) func TestHostPolicy(t *testing.T) { - mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", nil, nil, "") + mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "") mgr.AddDomain("example.com", "acc1", "rp1") // Wait for the background prefetch goroutine to finish so the temp dir @@ -70,7 +70,7 @@ func TestHostPolicy(t *testing.T) { } func TestDomainStates(t *testing.T) { - mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", nil, nil, "") + mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "") assert.Equal(t, 0, mgr.PendingCerts(), "initially zero") assert.Equal(t, 0, mgr.TotalDomains(), "initially zero domains") diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index e91335a81..3e5a21400 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -18,8 +18,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "github.com/netbirdio/netbird/management/internals/modules/reverseproxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + nbproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" @@ -37,7 +38,7 @@ type integrationTestSetup struct { grpcServer *grpc.Server grpcAddr string cleanup func() - services []*reverseproxy.Service + services []*service.Service } func setupIntegrationTest(t *testing.T) *integrationTestSetup { @@ -66,13 +67,13 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { privKey := base64.StdEncoding.EncodeToString(priv) // Create test services in the store - services := []*reverseproxy.Service{ + services := []*service.Service{ { ID: "rp-1", AccountID: "test-account-1", Name: "Test App 1", Domain: "app1.test.proxy.io", - Targets: []*reverseproxy.Target{{ + Targets: []*service.Target{{ Path: strPtr("/"), Host: "10.0.0.1", Port: 8080, @@ -91,7 +92,7 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { AccountID: "test-account-1", Name: "Test App 2", Domain: "app2.test.proxy.io", - Targets: []*reverseproxy.Target{{ + Targets: []*service.Target{{ Path: strPtr("/"), Host: "10.0.0.2", Port: 8080, @@ -112,7 +113,8 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { } // Create real token store - tokenStore := nbgrpc.NewOneTimeTokenStore(5 * time.Minute) + tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) // Create real users manager usersManager := users.NewManager(testStore) @@ -124,17 +126,23 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { HMACKey: []byte("test-hmac-key"), } + proxyManager := &testProxyManager{} + proxyService := nbgrpc.NewProxyServiceServer( &testAccessLogManager{}, tokenStore, oidcConfig, nil, usersManager, + proxyManager, ) // Use store-backed service manager svcMgr := &storeBackedServiceManager{store: testStore, tokenStore: tokenStore} - proxyService.SetProxyManager(svcMgr) + proxyService.SetServiceManager(svcMgr) + + proxyController := &testProxyController{} + proxyService.SetProxyController(proxyController) // Start real gRPC server lis, err := net.Listen("tcp", "127.0.0.1:0") @@ -185,6 +193,52 @@ func (m *testAccessLogManager) GetAllAccessLogs(_ context.Context, _, _ string, return nil, 0, nil } +// testProxyManager is a mock implementation of proxy.Manager for testing. +type testProxyManager struct{} + +func (m *testProxyManager) Connect(_ context.Context, _, _, _ string) error { + return nil +} + +func (m *testProxyManager) Disconnect(_ context.Context, _ string) error { + return nil +} + +func (m *testProxyManager) Heartbeat(_ context.Context, _ string) error { + return nil +} + +func (m *testProxyManager) GetActiveClusterAddresses(_ context.Context) ([]string, error) { + return nil, nil +} + +func (m *testProxyManager) CleanupStale(_ context.Context, _ time.Duration) error { + return nil +} + +// testProxyController is a mock implementation of rpservice.ProxyController for testing. +type testProxyController struct{} + +func (c *testProxyController) SendServiceUpdateToCluster(_ context.Context, _ string, _ *proto.ProxyMapping, _ string) { + // noop +} + +func (c *testProxyController) GetOIDCValidationConfig() nbproxy.OIDCValidationConfig { + return nbproxy.OIDCValidationConfig{} +} + +func (c *testProxyController) RegisterProxyToCluster(_ context.Context, _, _ string) error { + return nil +} + +func (c *testProxyController) UnregisterProxyFromCluster(_ context.Context, _, _ string) error { + return nil +} + +func (c *testProxyController) GetProxiesForCluster(_ string) []string { + return nil +} + // storeBackedServiceManager reads directly from the real store. type storeBackedServiceManager struct { store store.Store @@ -195,19 +249,19 @@ func (m *storeBackedServiceManager) DeleteAllServices(ctx context.Context, accou return nil } -func (m *storeBackedServiceManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) { +func (m *storeBackedServiceManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*service.Service, error) { return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) } -func (m *storeBackedServiceManager) GetService(ctx context.Context, accountID, userID, serviceID string) (*reverseproxy.Service, error) { +func (m *storeBackedServiceManager) GetService(ctx context.Context, accountID, userID, serviceID string) (*service.Service, error) { return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) } -func (m *storeBackedServiceManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *storeBackedServiceManager) CreateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) { return nil, errors.New("not implemented") } -func (m *storeBackedServiceManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) { +func (m *storeBackedServiceManager) UpdateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) { return nil, errors.New("not implemented") } @@ -219,7 +273,7 @@ func (m *storeBackedServiceManager) SetCertificateIssuedAt(ctx context.Context, return nil } -func (m *storeBackedServiceManager) SetStatus(ctx context.Context, accountID, serviceID string, status reverseproxy.ProxyStatus) error { +func (m *storeBackedServiceManager) SetStatus(ctx context.Context, accountID, serviceID string, status service.Status) error { return nil } @@ -231,15 +285,15 @@ func (m *storeBackedServiceManager) ReloadService(ctx context.Context, accountID return nil } -func (m *storeBackedServiceManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) { +func (m *storeBackedServiceManager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) { return m.store.GetAccountServices(ctx, store.LockingStrengthNone, "test-account-1") } -func (m *storeBackedServiceManager) GetServiceByID(ctx context.Context, accountID, serviceID string) (*reverseproxy.Service, error) { +func (m *storeBackedServiceManager) GetServiceByID(ctx context.Context, accountID, serviceID string) (*service.Service, error) { return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID) } -func (m *storeBackedServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) { +func (m *storeBackedServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) { return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID) } @@ -247,8 +301,8 @@ func (m *storeBackedServiceManager) GetServiceIDByTargetID(ctx context.Context, return "", nil } -func (m *storeBackedServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) { - return &reverseproxy.ExposeServiceResponse{}, nil +func (m *storeBackedServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) { + return &service.ExposeServiceResponse{}, nil } func (m *storeBackedServiceManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error { diff --git a/proxy/server.go b/proxy/server.go index 48a876899..155610305 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -84,6 +84,10 @@ type Server struct { GenerateACMECertificates bool ACMEChallengeAddress string ACMEDirectory string + // ACMEEABKID is the External Account Binding Key ID for CAs that require EAB (e.g., ZeroSSL). + ACMEEABKID string + // ACMEEABHMACKey is the External Account Binding HMAC key (base64 URL-encoded) for CAs that require EAB. + ACMEEABHMACKey string // ACMEChallengeType specifies the ACME challenge type: "http-01" or "tls-alpn-01". // Defaults to "tls-alpn-01" if not specified. ACMEChallengeType string @@ -419,7 +423,7 @@ func (s *Server) configureTLS(ctx context.Context) (*tls.Config, error) { "acme_server": s.ACMEDirectory, "challenge_type": s.ACMEChallengeType, }).Debug("ACME certificates enabled, configuring certificate manager") - s.acme = acme.NewManager(s.CertificateDirectory, s.ACMEDirectory, s, s.Logger, s.CertLockMethod) + s.acme = acme.NewManager(s.CertificateDirectory, s.ACMEDirectory, s.ACMEEABKID, s.ACMEEABHMACKey, s, s.Logger, s.CertLockMethod) if s.ACMEChallengeType == "http-01" { s.http = &http.Server{ From b3bbc0e5c686b0b7d5e2cb0bf4b6a83135da2bd8 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Wed, 4 Mar 2026 12:34:11 +0200 Subject: [PATCH 183/374] Fix embedded IdP metrics to count local and generic OIDC users (#5498) --- management/server/metrics/selfhosted.go | 7 +++-- management/server/metrics/selfhosted_test.go | 33 ++++++++++++++------ 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/management/server/metrics/selfhosted.go b/management/server/metrics/selfhosted.go index f25a72181..bfefce388 100644 --- a/management/server/metrics/selfhosted.go +++ b/management/server/metrics/selfhosted.go @@ -294,9 +294,9 @@ func (w *Worker) generateProperties(ctx context.Context) properties { localUsers++ } else { idpUsers++ - idpType := extractIdpType(idpID) - embeddedIdpTypes[idpType]++ } + idpType := extractIdpType(idpID) + embeddedIdpTypes[idpType]++ } } } @@ -531,6 +531,9 @@ func createPostRequest(ctx context.Context, endpoint string, payloadStr string) // Connector IDs are formatted as "-" (e.g., "okta-abc123", "zitadel-xyz"). // Returns the type prefix, or "oidc" if no known prefix is found. func extractIdpType(connectorID string) string { + if connectorID == "local" { + return "local" + } idx := strings.LastIndex(connectorID, "-") if idx <= 0 { return "oidc" diff --git a/management/server/metrics/selfhosted_test.go b/management/server/metrics/selfhosted_test.go index 412559bff..78f5c53be 100644 --- a/management/server/metrics/selfhosted_test.go +++ b/management/server/metrics/selfhosted_test.go @@ -29,6 +29,7 @@ func (mockDatasource) GetAllConnectedPeers() map[string]struct{} { func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { localUserID := dex.EncodeDexUserID("10", "local") idpUserID := dex.EncodeDexUserID("20", "zitadel-d5uv82dra0haedlf6kv0") + oidcUserID := dex.EncodeDexUserID("30", "d6jvvp69kmnc73c9pl40") return []*types.Account{ { Id: "1", @@ -206,6 +207,13 @@ func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account { "1": {}, }, }, + oidcUserID: { + Id: oidcUserID, + IsServiceUser: false, + PATs: map[string]*types.PersonalAccessToken{ + "1": {}, + }, + }, }, Networks: []*networkTypes.Network{ { @@ -278,14 +286,14 @@ func TestGenerateProperties(t *testing.T) { if properties["rules"] != 4 { t.Errorf("expected 4 rules, got %d", properties["rules"]) } - if properties["users"] != 2 { - t.Errorf("expected 1 users, got %d", properties["users"]) + if properties["users"] != 3 { + t.Errorf("expected 3 users, got %d", properties["users"]) } if properties["setup_keys_usage"] != 2 { t.Errorf("expected 1 setup_keys_usage, got %d", properties["setup_keys_usage"]) } - if properties["pats"] != 4 { - t.Errorf("expected 4 personal_access_tokens, got %d", properties["pats"]) + if properties["pats"] != 5 { + t.Errorf("expected 5 personal_access_tokens, got %d", properties["pats"]) } if properties["peers_ssh_enabled"] != 2 { t.Errorf("expected 2 peers_ssh_enabled, got %d", properties["peers_ssh_enabled"]) @@ -369,14 +377,20 @@ func TestGenerateProperties(t *testing.T) { if properties["local_users_count"] != 1 { t.Errorf("expected 1 local_users_count, got %d", properties["local_users_count"]) } - if properties["idp_users_count"] != 1 { - t.Errorf("expected 1 idp_users_count, got %d", properties["idp_users_count"]) + if properties["idp_users_count"] != 2 { + t.Errorf("expected 2 idp_users_count, got %d", properties["idp_users_count"]) + } + if properties["embedded_idp_users_local"] != 1 { + t.Errorf("expected 1 embedded_idp_users_local, got %v", properties["embedded_idp_users_local"]) } if properties["embedded_idp_users_zitadel"] != 1 { t.Errorf("expected 1 embedded_idp_users_zitadel, got %v", properties["embedded_idp_users_zitadel"]) } - if properties["embedded_idp_count"] != 1 { - t.Errorf("expected 1 embedded_idp_count, got %v", properties["embedded_idp_count"]) + if properties["embedded_idp_users_oidc"] != 1 { + t.Errorf("expected 1 embedded_idp_users_oidc, got %v", properties["embedded_idp_users_oidc"]) + } + if properties["embedded_idp_count"] != 3 { + t.Errorf("expected 3 embedded_idp_count, got %v", properties["embedded_idp_count"]) } if properties["services"] != 2 { @@ -436,7 +450,8 @@ func TestExtractIdpType(t *testing.T) { {"microsoft-abc123", "microsoft"}, {"authentik-abc123", "authentik"}, {"keycloak-d5uv82dra0haedlf6kv0", "keycloak"}, - {"local", "oidc"}, + {"local", "local"}, + {"d6jvvp69kmnc73c9pl40", "oidc"}, {"", "oidc"}, } From cfc7ec8bb990e6ccce530335583ffeecca312973 Mon Sep 17 00:00:00 2001 From: hbzhost <145801687+hbzhost@users.noreply.github.com> Date: Wed, 4 Mar 2026 06:11:14 -0700 Subject: [PATCH 184/374] [client] Fix SSH JWT auth failure with Azure Entra ID iat backdating (#5471) Increase DefaultJWTMaxTokenAge from 5 to 10 minutes to accommodate identity providers like Azure Entra ID that backdate the iat claim by up to 5 minutes, causing tokens to be immediately rejected. Fixes #5449 Co-authored-by: Claude Opus 4.6 (1M context) --- client/ssh/server/server.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index 1ddb60f8e..4431ae423 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -46,8 +46,10 @@ const ( cmdSFTP = "" cmdNonInteractive = "" - // DefaultJWTMaxTokenAge is the default maximum age for JWT tokens accepted by the SSH server - DefaultJWTMaxTokenAge = 5 * 60 + // DefaultJWTMaxTokenAge is the default maximum age for JWT tokens accepted by the SSH server. + // Set to 10 minutes to accommodate identity providers like Azure Entra ID + // that backdate the iat claim by up to 5 minutes. + DefaultJWTMaxTokenAge = 10 * 60 ) var ( From 9e01ea7aae2be6e314164cefd5e05d29123f8e8c Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 4 Mar 2026 14:30:54 +0100 Subject: [PATCH 185/374] [misc] Add ISSUE_TEMPLATE configuration file (#5500) Add issue template config file with support and troubleshooting links --- .github/ISSUE_TEMPLATE/config.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..e9ffaf8a3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,14 @@ +blank_issues_enabled: true +contact_links: + - name: Community Support + url: https://forum.netbird.io/ + about: Community support forum + - name: Cloud Support + url: https://docs.netbird.io/help/report-bug-issues + about: Contact us for support + - name: Client/Connection Troubleshooting + url: https://docs.netbird.io/help/troubleshooting-client + about: See our client troubleshooting guide for help addressing common issues + - name: Self-host Troubleshooting + url: https://docs.netbird.io/selfhosted/troubleshooting + about: See our self-host troubleshooting guide for help addressing common issues From 8e7b016be2dc14fe1e64a8f7abc25ca7c848bbd9 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 4 Mar 2026 18:15:13 +0100 Subject: [PATCH 186/374] [management] Replace in-memory expose tracker with SQL-backed operations (#5494) The expose tracker used sync.Map for in-memory TTL tracking of active expose sessions, which broke and lost all sessions on restart. Replace with SQL-backed operations that reuse the existing meta_last_renewed_at column: - Add store methods: RenewEphemeralService, GetExpiredEphemeralServices, CountEphemeralServicesByPeer, EphemeralServiceExists - Move duplicate/limit checks inside a transaction with row-level locking (SELECT ... FOR UPDATE) to prevent concurrent bypass - Reaper re-checks expiry under row lock to avoid deleting a just-renewed service and prevent duplicate event emission - Add composite index on (source, source_peer) for efficient queries - Batch-limit and column-select the reaper query to avoid DB/GC spikes - Filter out malformed rows with empty source_peer --- .../service/manager/expose_tracker.go | 154 ++------ .../service/manager/expose_tracker_test.go | 338 ++++++++---------- .../reverseproxy/service/manager/manager.go | 150 +++++--- .../service/manager/manager_test.go | 34 +- .../modules/reverseproxy/service/service.go | 4 +- management/server/store/sql_store.go | 93 +++++ management/server/store/store.go | 5 + management/server/store/store_mock.go | 59 +++ 8 files changed, 461 insertions(+), 376 deletions(-) diff --git a/management/internals/modules/reverseproxy/service/manager/expose_tracker.go b/management/internals/modules/reverseproxy/service/manager/expose_tracker.go index 11e1f0110..911add3bb 100644 --- a/management/internals/modules/reverseproxy/service/manager/expose_tracker.go +++ b/management/internals/modules/reverseproxy/service/manager/expose_tracker.go @@ -2,7 +2,7 @@ package manager import ( "context" - "sync" + "math/rand/v2" "time" "github.com/netbirdio/netbird/shared/management/status" @@ -13,108 +13,20 @@ const ( exposeTTL = 90 * time.Second exposeReapInterval = 30 * time.Second maxExposesPerPeer = 10 + exposeReapBatch = 100 ) -type trackedExpose struct { - mu sync.Mutex - domain string - accountID string - peerID string - lastRenewed time.Time - expiring bool +type exposeReaper struct { + manager *Manager } -type exposeTracker struct { - activeExposes sync.Map - exposeCreateMu sync.Mutex - manager *Manager -} - -func exposeKey(peerID, domain string) string { - return peerID + ":" + domain -} - -// TrackExposeIfAllowed atomically checks the per-peer limit and registers a new -// active expose session under the same lock. Returns (true, false) if the expose -// was already tracked (duplicate), (false, true) if tracking succeeded, and -// (false, false) if the peer has reached the limit. -func (t *exposeTracker) TrackExposeIfAllowed(peerID, domain, accountID string) (alreadyTracked, ok bool) { - t.exposeCreateMu.Lock() - defer t.exposeCreateMu.Unlock() - - key := exposeKey(peerID, domain) - _, loaded := t.activeExposes.LoadOrStore(key, &trackedExpose{ - domain: domain, - accountID: accountID, - peerID: peerID, - lastRenewed: time.Now(), - }) - if loaded { - return true, false - } - - if t.CountPeerExposes(peerID) > maxExposesPerPeer { - t.activeExposes.Delete(key) - return false, false - } - - return false, true -} - -// UntrackExpose removes an active expose session from tracking. -func (t *exposeTracker) UntrackExpose(peerID, domain string) { - t.activeExposes.Delete(exposeKey(peerID, domain)) -} - -// CountPeerExposes returns the number of active expose sessions for a peer. -func (t *exposeTracker) CountPeerExposes(peerID string) int { - count := 0 - t.activeExposes.Range(func(_, val any) bool { - if expose := val.(*trackedExpose); expose.peerID == peerID { - count++ - } - return true - }) - return count -} - -// MaxExposesPerPeer returns the maximum number of concurrent exposes allowed per peer. -func (t *exposeTracker) MaxExposesPerPeer() int { - return maxExposesPerPeer -} - -// RenewTrackedExpose updates the in-memory lastRenewed timestamp for a tracked expose. -// Returns false if the expose is not tracked or is being reaped. -func (t *exposeTracker) RenewTrackedExpose(peerID, domain string) bool { - key := exposeKey(peerID, domain) - val, ok := t.activeExposes.Load(key) - if !ok { - return false - } - - expose := val.(*trackedExpose) - expose.mu.Lock() - if expose.expiring { - expose.mu.Unlock() - return false - } - expose.lastRenewed = time.Now() - expose.mu.Unlock() - - return true -} - -// StopTrackedExpose removes an active expose session from tracking. -// Returns false if the expose was not tracked. -func (t *exposeTracker) StopTrackedExpose(peerID, domain string) bool { - key := exposeKey(peerID, domain) - _, ok := t.activeExposes.LoadAndDelete(key) - return ok -} - -// StartExposeReaper starts a background goroutine that reaps expired expose sessions. -func (t *exposeTracker) StartExposeReaper(ctx context.Context) { +// StartExposeReaper starts a background goroutine that reaps expired ephemeral services from the DB. +func (r *exposeReaper) StartExposeReaper(ctx context.Context) { go func() { + // start with a random delay + rn := rand.IntN(10) + time.Sleep(time.Duration(rn) * time.Second) + ticker := time.NewTicker(exposeReapInterval) defer ticker.Stop() @@ -123,41 +35,31 @@ func (t *exposeTracker) StartExposeReaper(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - t.reapExpiredExposes() + r.reapExpiredExposes(ctx) } } }() } -func (t *exposeTracker) reapExpiredExposes() { - t.activeExposes.Range(func(key, val any) bool { - expose := val.(*trackedExpose) - expose.mu.Lock() - expired := time.Since(expose.lastRenewed) > exposeTTL - if expired { - expose.expiring = true - } - expose.mu.Unlock() +func (r *exposeReaper) reapExpiredExposes(ctx context.Context) { + expired, err := r.manager.store.GetExpiredEphemeralServices(ctx, exposeTTL, exposeReapBatch) + if err != nil { + log.Errorf("failed to get expired ephemeral services: %v", err) + return + } - if !expired { - return true + for _, svc := range expired { + log.Infof("reaping expired expose session for peer %s, domain %s", svc.SourcePeer, svc.Domain) + + err := r.manager.deleteExpiredPeerService(ctx, svc.AccountID, svc.SourcePeer, svc.ID) + if err == nil { + continue } - log.Infof("reaping expired expose session for peer %s, domain %s", expose.peerID, expose.domain) - - err := t.manager.deleteServiceFromPeer(context.Background(), expose.accountID, expose.peerID, expose.domain, true) - - s, _ := status.FromError(err) - - switch { - case err == nil: - t.activeExposes.Delete(key) - case s.ErrorType == status.NotFound: - log.Debugf("service %s was already deleted", expose.domain) - default: - log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", expose.domain, err) + if s, ok := status.FromError(err); ok && s.ErrorType == status.NotFound { + log.Debugf("service %s was already deleted by another instance", svc.Domain) + } else { + log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", svc.Domain, err) } - - return true - }) + } } diff --git a/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go index 154239fb1..bd9f4b93b 100644 --- a/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go +++ b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go @@ -10,184 +10,62 @@ import ( "github.com/stretchr/testify/require" rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" + "github.com/netbirdio/netbird/management/server/store" ) -func TestExposeKey(t *testing.T) { - assert.Equal(t, "peer1:example.com", exposeKey("peer1", "example.com")) - assert.Equal(t, "peer2:other.com", exposeKey("peer2", "other.com")) - assert.NotEqual(t, exposeKey("peer1", "a.com"), exposeKey("peer1", "b.com")) -} - -func TestTrackExposeIfAllowed(t *testing.T) { - t.Run("first track succeeds", func(t *testing.T) { - tracker := &exposeTracker{} - alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") - assert.False(t, alreadyTracked, "first track should not be duplicate") - assert.True(t, ok, "first track should be allowed") - }) - - t.Run("duplicate track detected", func(t *testing.T) { - tracker := &exposeTracker{} - tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") - - alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") - assert.True(t, alreadyTracked, "second track should be duplicate") - assert.False(t, ok) - }) - - t.Run("rejects when at limit", func(t *testing.T) { - tracker := &exposeTracker{} - for i := range maxExposesPerPeer { - _, ok := tracker.TrackExposeIfAllowed("peer1", "domain-"+string(rune('a'+i))+".com", "acct1") - assert.True(t, ok, "track %d should be allowed", i) - } - - alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "over-limit.com", "acct1") - assert.False(t, alreadyTracked) - assert.False(t, ok, "should reject when at limit") - }) - - t.Run("other peer unaffected by limit", func(t *testing.T) { - tracker := &exposeTracker{} - for i := range maxExposesPerPeer { - tracker.TrackExposeIfAllowed("peer1", "domain-"+string(rune('a'+i))+".com", "acct1") - } - - _, ok := tracker.TrackExposeIfAllowed("peer2", "a.com", "acct1") - assert.True(t, ok, "other peer should still be within limit") - }) -} - -func TestUntrackExpose(t *testing.T) { - tracker := &exposeTracker{} - - tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") - assert.Equal(t, 1, tracker.CountPeerExposes("peer1")) - - tracker.UntrackExpose("peer1", "a.com") - assert.Equal(t, 0, tracker.CountPeerExposes("peer1")) -} - -func TestCountPeerExposes(t *testing.T) { - tracker := &exposeTracker{} - - assert.Equal(t, 0, tracker.CountPeerExposes("peer1")) - - tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") - tracker.TrackExposeIfAllowed("peer1", "b.com", "acct1") - tracker.TrackExposeIfAllowed("peer2", "a.com", "acct1") - - assert.Equal(t, 2, tracker.CountPeerExposes("peer1"), "peer1 should have 2 exposes") - assert.Equal(t, 1, tracker.CountPeerExposes("peer2"), "peer2 should have 1 expose") - assert.Equal(t, 0, tracker.CountPeerExposes("peer3"), "peer3 should have 0 exposes") -} - -func TestMaxExposesPerPeer(t *testing.T) { - tracker := &exposeTracker{} - assert.Equal(t, maxExposesPerPeer, tracker.MaxExposesPerPeer()) -} - -func TestRenewTrackedExpose(t *testing.T) { - tracker := &exposeTracker{} - - found := tracker.RenewTrackedExpose("peer1", "a.com") - assert.False(t, found, "should not find untracked expose") - - tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") - - found = tracker.RenewTrackedExpose("peer1", "a.com") - assert.True(t, found, "should find tracked expose") -} - -func TestRenewTrackedExpose_RejectsExpiring(t *testing.T) { - tracker := &exposeTracker{} - tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1") - - // Simulate reaper marking the expose as expiring - key := exposeKey("peer1", "a.com") - val, _ := tracker.activeExposes.Load(key) - expose := val.(*trackedExpose) - expose.mu.Lock() - expose.expiring = true - expose.mu.Unlock() - - found := tracker.RenewTrackedExpose("peer1", "a.com") - assert.False(t, found, "should reject renewal when expiring") -} - func TestReapExpiredExposes(t *testing.T) { - mgr, _ := setupIntegrationTest(t) - tracker := mgr.exposeTracker - + mgr, testStore := setupIntegrationTest(t) ctx := context.Background() + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", }) require.NoError(t, err) - // Manually expire the tracked entry - key := exposeKey(testPeerID, resp.Domain) - val, _ := tracker.activeExposes.Load(key) - expose := val.(*trackedExpose) - expose.mu.Lock() - expose.lastRenewed = time.Now().Add(-2 * exposeTTL) - expose.mu.Unlock() + // Manually expire the service by backdating meta_last_renewed_at + expireEphemeralService(t, testStore, testAccountID, resp.Domain) - // Add an active (non-expired) tracking entry - tracker.activeExposes.Store(exposeKey("peer1", "active.com"), &trackedExpose{ - domain: "active.com", - accountID: testAccountID, - peerID: "peer1", - lastRenewed: time.Now(), + // Create a non-expired service + resp2, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 8081, + Protocol: "http", }) + require.NoError(t, err) - tracker.reapExpiredExposes() + mgr.exposeReaper.reapExpiredExposes(ctx) - _, exists := tracker.activeExposes.Load(key) - assert.False(t, exists, "expired expose should be removed") + // Expired service should be deleted + _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + require.Error(t, err, "expired service should be deleted") - _, exists = tracker.activeExposes.Load(exposeKey("peer1", "active.com")) - assert.True(t, exists, "active expose should remain") + // Non-expired service should remain + _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp2.Domain) + require.NoError(t, err, "active service should remain") } -func TestReapExpiredExposes_SetsExpiringFlag(t *testing.T) { - mgr, _ := setupIntegrationTest(t) - tracker := mgr.exposeTracker - +func TestReapAlreadyDeletedService(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) ctx := context.Background() + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", }) require.NoError(t, err) - key := exposeKey(testPeerID, resp.Domain) - val, _ := tracker.activeExposes.Load(key) - expose := val.(*trackedExpose) + expireEphemeralService(t, testStore, testAccountID, resp.Domain) - // Expire it - expose.mu.Lock() - expose.lastRenewed = time.Now().Add(-2 * exposeTTL) - expose.mu.Unlock() + // Delete the service before reaping + err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + require.NoError(t, err) - // Renew should succeed before reaping - assert.True(t, tracker.RenewTrackedExpose(testPeerID, resp.Domain), "renew should succeed before reaper runs") - - // Re-expire and reap - expose.mu.Lock() - expose.lastRenewed = time.Now().Add(-2 * exposeTTL) - expose.mu.Unlock() - - tracker.reapExpiredExposes() - - // Entry is deleted, renew returns false - assert.False(t, tracker.RenewTrackedExpose(testPeerID, resp.Domain), "renew should fail after reap") + // Reaping should handle the already-deleted service gracefully + mgr.exposeReaper.reapExpiredExposes(ctx) } -func TestConcurrentTrackAndCount(t *testing.T) { - mgr, _ := setupIntegrationTest(t) - tracker := mgr.exposeTracker +func TestConcurrentReapAndRenew(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) ctx := context.Background() for i := range 5 { @@ -198,59 +76,133 @@ func TestConcurrentTrackAndCount(t *testing.T) { require.NoError(t, err) } - // Manually expire all tracked entries - tracker.activeExposes.Range(func(_, val any) bool { - expose := val.(*trackedExpose) - expose.mu.Lock() - expose.lastRenewed = time.Now().Add(-2 * exposeTTL) - expose.mu.Unlock() - return true - }) - - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - tracker.reapExpiredExposes() - }() - go func() { - defer wg.Done() - tracker.CountPeerExposes(testPeerID) - }() - wg.Wait() - - assert.Equal(t, 0, tracker.CountPeerExposes(testPeerID), "all expired exposes should be reaped") -} - -func TestTrackedExposeMutexProtectsLastRenewed(t *testing.T) { - expose := &trackedExpose{ - lastRenewed: time.Now().Add(-1 * time.Hour), + // Expire all services + services, err := testStore.GetAccountServices(ctx, store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + for _, svc := range services { + if svc.Source == rpservice.SourceEphemeral { + expireEphemeralService(t, testStore, testAccountID, svc.Domain) + } } var wg sync.WaitGroup wg.Add(2) - go func() { defer wg.Done() - for range 100 { - expose.mu.Lock() - expose.lastRenewed = time.Now() - expose.mu.Unlock() - } + mgr.exposeReaper.reapExpiredExposes(ctx) }() - go func() { defer wg.Done() - for range 100 { - expose.mu.Lock() - _ = time.Since(expose.lastRenewed) - expose.mu.Unlock() - } + _, _ = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) }() - wg.Wait() - expose.mu.Lock() - require.False(t, expose.lastRenewed.IsZero(), "lastRenewed should not be zero after concurrent access") - expose.mu.Unlock() + count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) + require.NoError(t, err) + assert.Equal(t, int64(0), count, "all expired services should be reaped") +} + +func TestRenewEphemeralService(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + ctx := context.Background() + + t.Run("renew succeeds for active service", func(t *testing.T) { + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 8082, + Protocol: "http", + }) + require.NoError(t, err) + + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + require.NoError(t, err) + }) + + t.Run("renew fails for nonexistent domain", func(t *testing.T) { + err := mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, "nonexistent.com") + require.Error(t, err) + assert.Contains(t, err.Error(), "no active expose session") + }) +} + +func TestCountAndExistsEphemeralServices(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + ctx := context.Background() + + count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) + require.NoError(t, err) + assert.Equal(t, int64(0), count) + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 8083, + Protocol: "http", + }) + require.NoError(t, err) + + count, err = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) + require.NoError(t, err) + assert.Equal(t, int64(1), count) + + exists, err := mgr.store.EphemeralServiceExists(ctx, store.LockingStrengthNone, testAccountID, testPeerID, resp.Domain) + require.NoError(t, err) + assert.True(t, exists, "service should exist") + + exists, err = mgr.store.EphemeralServiceExists(ctx, store.LockingStrengthNone, testAccountID, testPeerID, "no-such.domain") + require.NoError(t, err) + assert.False(t, exists, "non-existent service should not exist") +} + +func TestMaxExposesPerPeerEnforced(t *testing.T) { + mgr, _ := setupIntegrationTest(t) + ctx := context.Background() + + for i := range maxExposesPerPeer { + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 8090 + i, + Protocol: "http", + }) + require.NoError(t, err, "expose %d should succeed", i) + } + + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 9999, + Protocol: "http", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "maximum number of active expose sessions") +} + +func TestReapSkipsRenewedService(t *testing.T) { + mgr, testStore := setupIntegrationTest(t) + ctx := context.Background() + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 8086, + Protocol: "http", + }) + require.NoError(t, err) + + // Expire the service + expireEphemeralService(t, testStore, testAccountID, resp.Domain) + + // Renew it before the reaper runs + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + require.NoError(t, err) + + // Reaper should skip it because the re-check sees a fresh timestamp + mgr.exposeReaper.reapExpiredExposes(ctx) + + _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + require.NoError(t, err, "renewed service should survive reaping") +} + +// expireEphemeralService backdates meta_last_renewed_at to force expiration. +func expireEphemeralService(t *testing.T, s store.Store, accountID, domain string) { + t.Helper() + svc, err := s.GetServiceByDomain(context.Background(), accountID, domain) + require.NoError(t, err) + + expired := time.Now().Add(-2 * exposeTTL) + svc.Meta.LastRenewedAt = &expired + err = s.UpdateService(context.Background(), svc) + require.NoError(t, err) } diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index 16a57abb6..b5e643799 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -37,7 +37,7 @@ type Manager struct { permissionsManager permissions.Manager proxyController proxy.Controller clusterDeriver ClusterDeriver - exposeTracker *exposeTracker + exposeReaper *exposeReaper } // NewManager creates a new service manager. @@ -49,13 +49,13 @@ func NewManager(store store.Store, accountManager account.Manager, permissionsMa proxyController: proxyController, clusterDeriver: clusterDeriver, } - mgr.exposeTracker = &exposeTracker{manager: mgr} + mgr.exposeReaper = &exposeReaper{manager: mgr} return mgr } -// StartExposeReaper delegates to the expose tracker. +// StartExposeReaper starts the background goroutine that reaps expired ephemeral services. func (m *Manager) StartExposeReaper(ctx context.Context) { - m.exposeTracker.StartExposeReaper(ctx) + m.exposeReaper.StartExposeReaper(ctx) } func (m *Manager) GetAllServices(ctx context.Context, accountID, userID string) ([]*service.Service, error) { @@ -215,6 +215,52 @@ func (m *Manager) persistNewService(ctx context.Context, accountID string, servi }) } +// persistNewEphemeralService creates an ephemeral service inside a single transaction +// that also enforces the duplicate and per-peer limit checks atomically. +// The count and exists queries use FOR UPDATE locking to serialize concurrent creates +// for the same peer, preventing the per-peer limit from being bypassed. +func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, peerID string, svc *service.Service) error { + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + // Lock the peer row to serialize concurrent creates for the same peer. + // Without this, when no ephemeral rows exist yet, FOR UPDATE on the services + // table returns no rows and acquires no locks, allowing concurrent inserts + // to bypass the per-peer limit. + if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthUpdate, accountID, peerID); err != nil { + return fmt.Errorf("lock peer row: %w", err) + } + + exists, err := transaction.EphemeralServiceExists(ctx, store.LockingStrengthUpdate, accountID, peerID, svc.Domain) + if err != nil { + return fmt.Errorf("check existing expose: %w", err) + } + if exists { + return status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain") + } + + count, err := transaction.CountEphemeralServicesByPeer(ctx, store.LockingStrengthUpdate, accountID, peerID) + if err != nil { + return fmt.Errorf("count peer exposes: %w", err) + } + if count >= int64(maxExposesPerPeer) { + return status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) + } + + if err := m.checkDomainAvailable(ctx, transaction, accountID, svc.Domain, ""); err != nil { + return err + } + + if err := validateTargetReferences(ctx, transaction, accountID, svc.Targets); err != nil { + return err + } + + if err := transaction.CreateService(ctx, svc); err != nil { + return fmt.Errorf("create service: %w", err) + } + + return nil + }) +} + func (m *Manager) checkDomainAvailable(ctx context.Context, transaction store.Store, accountID, domain, excludeServiceID string) error { existingService, err := transaction.GetServiceByDomain(ctx, accountID, domain) if err != nil { @@ -412,10 +458,6 @@ func (m *Manager) DeleteService(ctx context.Context, accountID, userID, serviceI return err } - if s.Source == service.SourceEphemeral { - m.exposeTracker.UntrackExpose(s.SourcePeer, s.Domain) - } - m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, s.EventMeta()) m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) @@ -457,9 +499,6 @@ func (m *Manager) DeleteAllServices(ctx context.Context, accountID, userID strin oidcCfg := m.proxyController.GetOIDCValidationConfig() for _, svc := range services { - if svc.Source == service.SourceEphemeral { - m.exposeTracker.UntrackExpose(svc.SourcePeer, svc.Domain) - } m.accountManager.StoreEvent(ctx, userID, svc.ID, accountID, activity.ServiceDeleted, svc.EventMeta()) m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", oidcCfg), svc.ProxyCluster) } @@ -681,26 +720,13 @@ func (m *Manager) CreateServiceFromPeer(ctx context.Context, accountID, peerID s return nil, err } - now := time.Now() - svc.Meta.LastRenewedAt = &now svc.SourcePeer = peerID - if err := m.persistNewService(ctx, accountID, svc); err != nil { - return nil, err - } + now := time.Now() + svc.Meta.LastRenewedAt = &now - alreadyTracked, allowed := m.exposeTracker.TrackExposeIfAllowed(peerID, svc.Domain, accountID) - if alreadyTracked { - if err := m.deleteServiceFromPeer(ctx, accountID, peerID, svc.Domain, false); err != nil { - log.WithContext(ctx).Debugf("failed to delete duplicate expose service for domain %s: %v", svc.Domain, err) - } - return nil, status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain") - } - if !allowed { - if err := m.deleteServiceFromPeer(ctx, accountID, peerID, svc.Domain, false); err != nil { - log.WithContext(ctx).Debugf("failed to delete service after limit exceeded for domain %s: %v", svc.Domain, err) - } - return nil, status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) + if err := m.persistNewEphemeralService(ctx, accountID, peerID, svc); err != nil { + return nil, err } meta := addPeerInfoToEventMeta(svc.EventMeta(), peer) @@ -748,26 +774,17 @@ func (m *Manager) buildRandomDomain(name string) (string, error) { return domain, nil } -// RenewServiceFromPeer renews the in-memory TTL tracker for the peer's expose session. -// Returns an error if the expose is not actively tracked. -func (m *Manager) RenewServiceFromPeer(_ context.Context, _, peerID, domain string) error { - if !m.exposeTracker.RenewTrackedExpose(peerID, domain) { - return status.Errorf(status.NotFound, "no active expose session for domain %s", domain) - } - return nil +// RenewServiceFromPeer updates the DB timestamp for the peer's ephemeral service. +func (m *Manager) RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { + return m.store.RenewEphemeralService(ctx, accountID, peerID, domain) } -// StopServiceFromPeer stops a peer's active expose session by untracking and deleting the service. +// StopServiceFromPeer stops a peer's active expose session by deleting the service from the DB. func (m *Manager) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { if err := m.deleteServiceFromPeer(ctx, accountID, peerID, domain, false); err != nil { log.WithContext(ctx).Errorf("failed to delete peer-exposed service for domain %s: %v", domain, err) return err } - - if !m.exposeTracker.StopTrackedExpose(peerID, domain) { - log.WithContext(ctx).Warnf("expose tracker entry for domain %s already removed; service was deleted", domain) - } - return nil } @@ -848,6 +865,57 @@ func (m *Manager) deletePeerService(ctx context.Context, accountID, peerID, serv return nil } +// deleteExpiredPeerService deletes an ephemeral service by ID after re-checking +// that it is still expired under a row lock. This prevents deleting a service +// that was renewed between the batch query and this delete, and ensures only one +// management instance processes the deletion +func (m *Manager) deleteExpiredPeerService(ctx context.Context, accountID, peerID, serviceID string) error { + var svc *service.Service + deleted := false + err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + var err error + svc, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID) + if err != nil { + return err + } + + if svc.Source != service.SourceEphemeral || svc.SourcePeer != peerID { + return status.Errorf(status.PermissionDenied, "service does not match expected ephemeral owner") + } + + if svc.Meta.LastRenewedAt != nil && time.Since(*svc.Meta.LastRenewedAt) <= exposeTTL { + return nil + } + + if err = transaction.DeleteService(ctx, accountID, serviceID); err != nil { + return fmt.Errorf("delete service: %w", err) + } + deleted = true + + return nil + }) + if err != nil { + return err + } + + if !deleted { + return nil + } + + peer, err := m.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) + if err != nil { + log.WithContext(ctx).Debugf("failed to get peer %s for event metadata: %v", peerID, err) + peer = nil + } + + meta := addPeerInfoToEventMeta(svc.EventMeta(), peer) + m.accountManager.StoreEvent(ctx, peerID, serviceID, accountID, activity.PeerServiceExposeExpired, meta) + m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster) + m.accountManager.UpdateAccountPeers(ctx, accountID) + + return nil +} + func addPeerInfoToEventMeta(meta map[string]any, peer *nbpeer.Peer) map[string]any { if peer == nil { return meta diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index 99409e235..196eead22 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -720,7 +720,7 @@ func setupIntegrationTest(t *testing.T) (*Manager, store.Store) { domains: []string{"test.netbird.io"}, }, } - mgr.exposeTracker = &exposeTracker{manager: mgr} + mgr.exposeReaper = &exposeReaper{manager: mgr} return mgr, testStore } @@ -1017,36 +1017,38 @@ func TestStopServiceFromPeer(t *testing.T) { }) } -func TestDeleteService_UntracksEphemeralExpose(t *testing.T) { +func TestDeleteService_DeletesEphemeralExpose(t *testing.T) { ctx := context.Background() - mgr, _ := setupIntegrationTest(t) + mgr, testStore := setupIntegrationTest(t) resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 8080, Protocol: "http", }) require.NoError(t, err) - assert.Equal(t, 1, mgr.exposeTracker.CountPeerExposes(testPeerID), "expose should be tracked after create") - // Look up the service by domain to get its store ID - svc, err := mgr.store.GetServiceByDomain(ctx, testAccountID, resp.Domain) + count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) + require.NoError(t, err) + assert.Equal(t, int64(1), count, "one ephemeral service should exist after create") + + svc, err := testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) require.NoError(t, err) - // Delete via the API path (user-initiated) err = mgr.DeleteService(ctx, testAccountID, testUserID, svc.ID) require.NoError(t, err) - assert.Equal(t, 0, mgr.exposeTracker.CountPeerExposes(testPeerID), "expose should be untracked after API delete") + count, err = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) + require.NoError(t, err) + assert.Equal(t, int64(0), count, "ephemeral service should be deleted after API delete") - // A new expose should succeed (not blocked by stale tracking) _, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ Port: 9090, Protocol: "http", }) - assert.NoError(t, err, "new expose should succeed after API delete cleared tracking") + assert.NoError(t, err, "new expose should succeed after API delete") } -func TestDeleteAllServices_UntracksEphemeralExposes(t *testing.T) { +func TestDeleteAllServices_DeletesEphemeralExposes(t *testing.T) { ctx := context.Background() mgr, _ := setupIntegrationTest(t) @@ -1058,12 +1060,16 @@ func TestDeleteAllServices_UntracksEphemeralExposes(t *testing.T) { require.NoError(t, err) } - assert.Equal(t, 3, mgr.exposeTracker.CountPeerExposes(testPeerID), "all exposes should be tracked") + count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) + require.NoError(t, err) + assert.Equal(t, int64(3), count, "all ephemeral services should exist") - err := mgr.DeleteAllServices(ctx, testAccountID, testUserID) + err = mgr.DeleteAllServices(ctx, testAccountID, testUserID) require.NoError(t, err) - assert.Equal(t, 0, mgr.exposeTracker.CountPeerExposes(testPeerID), "all exposes should be untracked after DeleteAllServices") + count, err = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID) + require.NoError(t, err) + assert.Equal(t, int64(0), count, "all ephemeral services should be deleted after DeleteAllServices") } func TestRenewServiceFromPeer(t *testing.T) { diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index 46ae185d6..ee4a91e1f 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -133,8 +133,8 @@ type Service struct { Meta Meta `gorm:"embedded;embeddedPrefix:meta_"` SessionPrivateKey string `gorm:"column:session_private_key"` SessionPublicKey string `gorm:"column:session_public_key"` - Source string `gorm:"default:'permanent'"` - SourcePeer string + Source string `gorm:"default:'permanent';index:idx_service_source_peer"` + SourcePeer string `gorm:"index:idx_service_source_peer"` } func NewService(accountID, name, domain, proxyCluster string, targets []*Target, enabled bool) *Service { diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 41c53980b..8f147d915 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -5040,6 +5040,99 @@ func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingS return serviceList, nil } +// RenewEphemeralService updates the last_renewed_at timestamp for an ephemeral service. +func (s *SqlStore) RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error { + result := s.db.Model(&rpservice.Service{}). + Where("account_id = ? AND source_peer = ? AND domain = ? AND source = ?", accountID, peerID, domain, rpservice.SourceEphemeral). + Update("meta_last_renewed_at", time.Now()) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to renew ephemeral service: %v", result.Error) + return status.Errorf(status.Internal, "renew ephemeral service") + } + if result.RowsAffected == 0 { + return status.Errorf(status.NotFound, "no active expose session for domain %s", domain) + } + return nil +} + +// GetExpiredEphemeralServices returns ephemeral services whose last renewal exceeds the given TTL. +// Only the fields needed for reaping are selected. The limit parameter caps the batch size to +// avoid loading too many rows in a single tick. Rows with empty source_peer are excluded to +// skip malformed legacy data. +func (s *SqlStore) GetExpiredEphemeralServices(ctx context.Context, ttl time.Duration, limit int) ([]*rpservice.Service, error) { + cutoff := time.Now().Add(-ttl) + var services []*rpservice.Service + result := s.db. + Select("id", "account_id", "source_peer", "domain"). + Where("source = ? AND source_peer <> '' AND meta_last_renewed_at < ?", rpservice.SourceEphemeral, cutoff). + Limit(limit). + Find(&services) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get expired ephemeral services: %v", result.Error) + return nil, status.Errorf(status.Internal, "get expired ephemeral services") + } + return services, nil +} + +// CountEphemeralServicesByPeer returns the count of ephemeral services for a specific peer. +// Use LockingStrengthUpdate inside a transaction to serialize concurrent create operations. +// The locking is applied via a row-level SELECT ... FOR UPDATE (not on the aggregate) to +// stay compatible with Postgres, which disallows FOR UPDATE on COUNT(*). +func (s *SqlStore) CountEphemeralServicesByPeer(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (int64, error) { + if lockStrength == LockingStrengthNone { + var count int64 + result := s.db.Model(&rpservice.Service{}). + Where("account_id = ? AND source_peer = ? AND source = ?", accountID, peerID, rpservice.SourceEphemeral). + Count(&count) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to count ephemeral services: %v", result.Error) + return 0, status.Errorf(status.Internal, "count ephemeral services") + } + return count, nil + } + + var ids []string + result := s.db.Model(&rpservice.Service{}). + Clauses(clause.Locking{Strength: string(lockStrength)}). + Select("id"). + Where("account_id = ? AND source_peer = ? AND source = ?", accountID, peerID, rpservice.SourceEphemeral). + Pluck("id", &ids) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to count ephemeral services: %v", result.Error) + return 0, status.Errorf(status.Internal, "count ephemeral services") + } + return int64(len(ids)), nil +} + +// EphemeralServiceExists checks if an ephemeral service exists for the given peer and domain. +// Use LockingStrengthUpdate inside a transaction to serialize concurrent create operations. +func (s *SqlStore) EphemeralServiceExists(ctx context.Context, lockStrength LockingStrength, accountID, peerID, domain string) (bool, error) { + if lockStrength == LockingStrengthNone { + var count int64 + result := s.db.Model(&rpservice.Service{}). + Where("account_id = ? AND source_peer = ? AND domain = ? AND source = ?", accountID, peerID, domain, rpservice.SourceEphemeral). + Count(&count) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to check ephemeral service existence: %v", result.Error) + return false, status.Errorf(status.Internal, "check ephemeral service existence") + } + return count > 0, nil + } + + var id string + result := s.db.Model(&rpservice.Service{}). + Clauses(clause.Locking{Strength: string(lockStrength)}). + Select("id"). + Where("account_id = ? AND source_peer = ? AND domain = ? AND source = ?", accountID, peerID, domain, rpservice.SourceEphemeral). + Limit(1). + Pluck("id", &id) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to check ephemeral service existence: %v", result.Error) + return false, status.Errorf(status.Internal, "check ephemeral service existence") + } + return id != "", nil +} + func (s *SqlStore) GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) { tx := s.db diff --git a/management/server/store/store.go b/management/server/store/store.go index 941aca08a..5123cde72 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -261,6 +261,11 @@ type Store interface { GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*rpservice.Service, error) + RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error + GetExpiredEphemeralServices(ctx context.Context, ttl time.Duration, limit int) ([]*rpservice.Service, error) + CountEphemeralServicesByPeer(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (int64, error) + EphemeralServiceExists(ctx context.Context, lockStrength LockingStrength, accountID, peerID, domain string) (bool, error) + GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) ListFreeDomains(ctx context.Context, accountID string) ([]string, error) ListCustomDomains(ctx context.Context, accountID string) ([]*domain.Domain, error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 9e11f85fb..414872fbb 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -208,6 +208,21 @@ func (mr *MockStoreMockRecorder) CountAccountsByPrivateDomain(ctx, domain interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountAccountsByPrivateDomain", reflect.TypeOf((*MockStore)(nil).CountAccountsByPrivateDomain), ctx, domain) } +// CountEphemeralServicesByPeer mocks base method. +func (m *MockStore) CountEphemeralServicesByPeer(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountEphemeralServicesByPeer", ctx, lockStrength, accountID, peerID) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountEphemeralServicesByPeer indicates an expected call of CountEphemeralServicesByPeer. +func (mr *MockStoreMockRecorder) CountEphemeralServicesByPeer(ctx, lockStrength, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountEphemeralServicesByPeer", reflect.TypeOf((*MockStore)(nil).CountEphemeralServicesByPeer), ctx, lockStrength, accountID, peerID) +} + // CreateAccessLog mocks base method. func (m *MockStore) CreateAccessLog(ctx context.Context, log *accesslogs.AccessLogEntry) error { m.ctrl.T.Helper() @@ -686,6 +701,21 @@ func (mr *MockStoreMockRecorder) DeleteZoneDNSRecords(ctx, accountID, zoneID int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteZoneDNSRecords", reflect.TypeOf((*MockStore)(nil).DeleteZoneDNSRecords), ctx, accountID, zoneID) } +// EphemeralServiceExists mocks base method. +func (m *MockStore) EphemeralServiceExists(ctx context.Context, lockStrength LockingStrength, accountID, peerID, domain string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EphemeralServiceExists", ctx, lockStrength, accountID, peerID, domain) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EphemeralServiceExists indicates an expected call of EphemeralServiceExists. +func (mr *MockStoreMockRecorder) EphemeralServiceExists(ctx, lockStrength, accountID, peerID, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EphemeralServiceExists", reflect.TypeOf((*MockStore)(nil).EphemeralServiceExists), ctx, lockStrength, accountID, peerID, domain) +} + // ExecuteInTransaction mocks base method. func (m *MockStore) ExecuteInTransaction(ctx context.Context, f func(Store) error) error { m.ctrl.T.Helper() @@ -1362,6 +1392,21 @@ func (mr *MockStoreMockRecorder) GetDNSRecordByID(ctx, lockStrength, accountID, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDNSRecordByID", reflect.TypeOf((*MockStore)(nil).GetDNSRecordByID), ctx, lockStrength, accountID, zoneID, recordID) } +// GetExpiredEphemeralServices mocks base method. +func (m *MockStore) GetExpiredEphemeralServices(ctx context.Context, ttl time.Duration, limit int) ([]*service.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExpiredEphemeralServices", ctx, ttl, limit) + ret0, _ := ret[0].([]*service.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExpiredEphemeralServices indicates an expected call of GetExpiredEphemeralServices. +func (mr *MockStoreMockRecorder) GetExpiredEphemeralServices(ctx, ttl, limit interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExpiredEphemeralServices", reflect.TypeOf((*MockStore)(nil).GetExpiredEphemeralServices), ctx, ttl, limit) +} + // GetGroupByID mocks base method. func (m *MockStore) GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types2.Group, error) { m.ctrl.T.Helper() @@ -2401,6 +2446,20 @@ func (mr *MockStoreMockRecorder) RemoveResourceFromGroup(ctx, accountId, groupID return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveResourceFromGroup", reflect.TypeOf((*MockStore)(nil).RemoveResourceFromGroup), ctx, accountId, groupID, resourceID) } +// RenewEphemeralService mocks base method. +func (m *MockStore) RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RenewEphemeralService", ctx, accountID, peerID, domain) + ret0, _ := ret[0].(error) + return ret0 +} + +// RenewEphemeralService indicates an expected call of RenewEphemeralService. +func (mr *MockStoreMockRecorder) RenewEphemeralService(ctx, accountID, peerID, domain interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewEphemeralService", reflect.TypeOf((*MockStore)(nil).RenewEphemeralService), ctx, accountID, peerID, domain) +} + // RevokeProxyAccessToken mocks base method. func (m *MockStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) error { m.ctrl.T.Helper() From e6012781176c4d0a6465dbf20264f292452cdbd3 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 5 Mar 2026 17:03:26 +0800 Subject: [PATCH 187/374] [management,proxy] Add per-target options to reverse proxy (#5501) --- .github/workflows/golangci-lint.yml | 2 +- .../reverseproxy/service/manager/api.go | 10 +- .../modules/reverseproxy/service/service.go | 240 ++++- .../reverseproxy/service/service_test.go | 183 ++++ proxy/internal/proxy/proxy_bench_test.go | 26 +- proxy/internal/proxy/reverseproxy.go | 46 +- proxy/internal/proxy/reverseproxy_test.go | 140 ++- proxy/internal/proxy/servicemapping.go | 34 +- proxy/internal/roundtrip/context_test.go | 32 + proxy/internal/roundtrip/netbird.go | 76 +- proxy/server.go | 23 +- .../rest/reverse_proxy_services_test.go | 271 ++++++ shared/management/http/api/openapi.yml | 24 + shared/management/http/api/types.gen.go | 59 +- shared/management/proto/proxy_service.pb.go | 864 +++++++++++------- shared/management/proto/proxy_service.proto | 14 + 16 files changed, 1599 insertions(+), 445 deletions(-) create mode 100644 proxy/internal/roundtrip/context_test.go create mode 100644 shared/management/client/rest/reverse_proxy_services_test.go diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 56450d45f..9e753ce73 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: codespell uses: codespell-project/actions-codespell@v2 with: - ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver + ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver,te skip: go.mod,go.sum,**/proxy/web/** golangci: strategy: diff --git a/management/internals/modules/reverseproxy/service/manager/api.go b/management/internals/modules/reverseproxy/service/manager/api.go index 70b09e603..f28b633b8 100644 --- a/management/internals/modules/reverseproxy/service/manager/api.go +++ b/management/internals/modules/reverseproxy/service/manager/api.go @@ -73,7 +73,10 @@ func (h *handler) createService(w http.ResponseWriter, r *http.Request) { } service := new(rpservice.Service) - service.FromAPIRequest(&req, userAuth.AccountId) + if err = service.FromAPIRequest(&req, userAuth.AccountId); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } if err = service.Validate(); err != nil { util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) @@ -132,7 +135,10 @@ func (h *handler) updateService(w http.ResponseWriter, r *http.Request) { service := new(rpservice.Service) service.ID = serviceID - service.FromAPIRequest(&req, userAuth.AccountId) + if err = service.FromAPIRequest(&req, userAuth.AccountId); err != nil { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) + return + } if err = service.Validate(); err != nil { util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w) diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index ee4a91e1f..cd9311b44 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -6,13 +6,16 @@ import ( "fmt" "math/big" "net" + "net/http" "net/url" "regexp" "strconv" + "strings" "time" "github.com/rs/xid" log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/durationpb" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/shared/hash/argon2id" @@ -49,17 +52,25 @@ const ( SourceEphemeral = "ephemeral" ) +type TargetOptions struct { + SkipTLSVerify bool `json:"skip_tls_verify"` + RequestTimeout time.Duration `json:"request_timeout,omitempty"` + PathRewrite PathRewriteMode `json:"path_rewrite,omitempty"` + CustomHeaders map[string]string `gorm:"serializer:json" json:"custom_headers,omitempty"` +} + type Target struct { - ID uint `gorm:"primaryKey" json:"-"` - AccountID string `gorm:"index:idx_target_account;not null" json:"-"` - ServiceID string `gorm:"index:idx_service_targets;not null" json:"-"` - Path *string `json:"path,omitempty"` - Host string `json:"host"` // the Host field is only used for subnet targets, otherwise ignored - Port int `gorm:"index:idx_target_port" json:"port"` - Protocol string `gorm:"index:idx_target_protocol" json:"protocol"` - TargetId string `gorm:"index:idx_target_id" json:"target_id"` - TargetType string `gorm:"index:idx_target_type" json:"target_type"` - Enabled bool `gorm:"index:idx_target_enabled" json:"enabled"` + ID uint `gorm:"primaryKey" json:"-"` + AccountID string `gorm:"index:idx_target_account;not null" json:"-"` + ServiceID string `gorm:"index:idx_service_targets;not null" json:"-"` + Path *string `json:"path,omitempty"` + Host string `json:"host"` // the Host field is only used for subnet targets, otherwise ignored + Port int `gorm:"index:idx_target_port" json:"port"` + Protocol string `gorm:"index:idx_target_protocol" json:"protocol"` + TargetId string `gorm:"index:idx_target_id" json:"target_id"` + TargetType string `gorm:"index:idx_target_type" json:"target_type"` + Enabled bool `gorm:"index:idx_target_enabled" json:"enabled"` + Options TargetOptions `gorm:"embedded" json:"options"` } type PasswordAuthConfig struct { @@ -194,7 +205,7 @@ func (s *Service) ToAPIResponse() *api.Service { // Convert internal targets to API targets apiTargets := make([]api.ServiceTarget, 0, len(s.Targets)) for _, target := range s.Targets { - apiTargets = append(apiTargets, api.ServiceTarget{ + st := api.ServiceTarget{ Path: target.Path, Host: &target.Host, Port: target.Port, @@ -202,7 +213,9 @@ func (s *Service) ToAPIResponse() *api.Service { TargetId: target.TargetId, TargetType: api.ServiceTargetTargetType(target.TargetType), Enabled: target.Enabled, - }) + } + st.Options = targetOptionsToAPI(target.Options) + apiTargets = append(apiTargets, st) } meta := api.ServiceMeta{ @@ -256,10 +269,14 @@ func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConf if target.Path != nil { path = *target.Path } - pathMappings = append(pathMappings, &proto.PathMapping{ + + pm := &proto.PathMapping{ Path: path, Target: targetURL.String(), - }) + } + + pm.Options = targetOptionsToProto(target.Options) + pathMappings = append(pathMappings, pm) } auth := &proto.Authentication{ @@ -312,13 +329,87 @@ func isDefaultPort(scheme string, port int) bool { return (scheme == "https" && port == 443) || (scheme == "http" && port == 80) } -func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) { +// PathRewriteMode controls how the request path is rewritten before forwarding. +type PathRewriteMode string + +const ( + PathRewritePreserve PathRewriteMode = "preserve" +) + +func pathRewriteToProto(mode PathRewriteMode) proto.PathRewriteMode { + switch mode { + case PathRewritePreserve: + return proto.PathRewriteMode_PATH_REWRITE_PRESERVE + default: + return proto.PathRewriteMode_PATH_REWRITE_DEFAULT + } +} + +func targetOptionsToAPI(opts TargetOptions) *api.ServiceTargetOptions { + if !opts.SkipTLSVerify && opts.RequestTimeout == 0 && opts.PathRewrite == "" && len(opts.CustomHeaders) == 0 { + return nil + } + apiOpts := &api.ServiceTargetOptions{} + if opts.SkipTLSVerify { + apiOpts.SkipTlsVerify = &opts.SkipTLSVerify + } + if opts.RequestTimeout != 0 { + s := opts.RequestTimeout.String() + apiOpts.RequestTimeout = &s + } + if opts.PathRewrite != "" { + pr := api.ServiceTargetOptionsPathRewrite(opts.PathRewrite) + apiOpts.PathRewrite = &pr + } + if len(opts.CustomHeaders) > 0 { + apiOpts.CustomHeaders = &opts.CustomHeaders + } + return apiOpts +} + +func targetOptionsToProto(opts TargetOptions) *proto.PathTargetOptions { + if !opts.SkipTLSVerify && opts.PathRewrite == "" && opts.RequestTimeout == 0 && len(opts.CustomHeaders) == 0 { + return nil + } + popts := &proto.PathTargetOptions{ + SkipTlsVerify: opts.SkipTLSVerify, + PathRewrite: pathRewriteToProto(opts.PathRewrite), + CustomHeaders: opts.CustomHeaders, + } + if opts.RequestTimeout != 0 { + popts.RequestTimeout = durationpb.New(opts.RequestTimeout) + } + return popts +} + +func targetOptionsFromAPI(idx int, o *api.ServiceTargetOptions) (TargetOptions, error) { + var opts TargetOptions + if o.SkipTlsVerify != nil { + opts.SkipTLSVerify = *o.SkipTlsVerify + } + if o.RequestTimeout != nil { + d, err := time.ParseDuration(*o.RequestTimeout) + if err != nil { + return opts, fmt.Errorf("target %d: parse request_timeout %q: %w", idx, *o.RequestTimeout, err) + } + opts.RequestTimeout = d + } + if o.PathRewrite != nil { + opts.PathRewrite = PathRewriteMode(*o.PathRewrite) + } + if o.CustomHeaders != nil { + opts.CustomHeaders = *o.CustomHeaders + } + return opts, nil +} + +func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) error { s.Name = req.Name s.Domain = req.Domain s.AccountID = accountID targets := make([]*Target, 0, len(req.Targets)) - for _, apiTarget := range req.Targets { + for i, apiTarget := range req.Targets { target := &Target{ AccountID: accountID, Path: apiTarget.Path, @@ -331,6 +422,13 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) { if apiTarget.Host != nil { target.Host = *apiTarget.Host } + if apiTarget.Options != nil { + opts, err := targetOptionsFromAPI(i, apiTarget.Options) + if err != nil { + return err + } + target.Options = opts + } targets = append(targets, target) } s.Targets = targets @@ -368,6 +466,8 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) { } s.Auth.BearerAuth = bearerAuth } + + return nil } func (s *Service) Validate() error { @@ -400,11 +500,113 @@ func (s *Service) Validate() error { if target.TargetId == "" { return fmt.Errorf("target %d has empty target_id", i) } + if err := validateTargetOptions(i, &target.Options); err != nil { + return err + } } return nil } +const ( + maxRequestTimeout = 5 * time.Minute + maxCustomHeaders = 16 + maxHeaderKeyLen = 128 + maxHeaderValueLen = 4096 +) + +// httpHeaderNameRe matches valid HTTP header field names per RFC 7230 token definition. +var httpHeaderNameRe = regexp.MustCompile(`^[!#$%&'*+\-.^_` + "`" + `|~0-9A-Za-z]+$`) + +// hopByHopHeaders are headers that must not be set as custom headers +// because they are connection-level and stripped by the proxy. +var hopByHopHeaders = map[string]struct{}{ + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Proxy-Connection": {}, + "Te": {}, + "Trailer": {}, + "Transfer-Encoding": {}, + "Upgrade": {}, +} + +// reservedHeaders are set authoritatively by the proxy or control HTTP framing +// and cannot be overridden. +var reservedHeaders = map[string]struct{}{ + "Content-Length": {}, + "Content-Type": {}, + "Cookie": {}, + "Forwarded": {}, + "X-Forwarded-For": {}, + "X-Forwarded-Host": {}, + "X-Forwarded-Port": {}, + "X-Forwarded-Proto": {}, + "X-Real-Ip": {}, +} + +func validateTargetOptions(idx int, opts *TargetOptions) error { + if opts.PathRewrite != "" && opts.PathRewrite != PathRewritePreserve { + return fmt.Errorf("target %d: unknown path_rewrite mode %q", idx, opts.PathRewrite) + } + + if opts.RequestTimeout != 0 { + if opts.RequestTimeout <= 0 { + return fmt.Errorf("target %d: request_timeout must be positive", idx) + } + if opts.RequestTimeout > maxRequestTimeout { + return fmt.Errorf("target %d: request_timeout exceeds maximum of %s", idx, maxRequestTimeout) + } + } + + if err := validateCustomHeaders(idx, opts.CustomHeaders); err != nil { + return err + } + + return nil +} + +func validateCustomHeaders(idx int, headers map[string]string) error { + if len(headers) > maxCustomHeaders { + return fmt.Errorf("target %d: custom_headers count %d exceeds maximum of %d", idx, len(headers), maxCustomHeaders) + } + seen := make(map[string]string, len(headers)) + for key, value := range headers { + if !httpHeaderNameRe.MatchString(key) { + return fmt.Errorf("target %d: custom header key %q is not a valid HTTP header name", idx, key) + } + if len(key) > maxHeaderKeyLen { + return fmt.Errorf("target %d: custom header key %q exceeds maximum length of %d", idx, key, maxHeaderKeyLen) + } + if len(value) > maxHeaderValueLen { + return fmt.Errorf("target %d: custom header %q value exceeds maximum length of %d", idx, key, maxHeaderValueLen) + } + if containsCRLF(key) || containsCRLF(value) { + return fmt.Errorf("target %d: custom header %q contains invalid characters", idx, key) + } + canonical := http.CanonicalHeaderKey(key) + if prev, ok := seen[canonical]; ok { + return fmt.Errorf("target %d: custom header keys %q and %q collide (both canonicalize to %q)", idx, prev, key, canonical) + } + seen[canonical] = key + if _, ok := hopByHopHeaders[canonical]; ok { + return fmt.Errorf("target %d: custom header %q is a hop-by-hop header and cannot be set", idx, key) + } + if _, ok := reservedHeaders[canonical]; ok { + return fmt.Errorf("target %d: custom header %q is managed by the proxy and cannot be overridden", idx, key) + } + if canonical == "Host" { + return fmt.Errorf("target %d: use pass_host_header instead of setting Host as a custom header", idx) + } + } + return nil +} + +func containsCRLF(s string) bool { + return strings.ContainsAny(s, "\r\n") +} + func (s *Service) EventMeta() map[string]any { return map[string]any{"name": s.Name, "domain": s.Domain, "proxy_cluster": s.ProxyCluster, "source": s.Source, "auth": s.isAuthEnabled()} } @@ -417,6 +619,12 @@ func (s *Service) Copy() *Service { targets := make([]*Target, len(s.Targets)) for i, target := range s.Targets { targetCopy := *target + if len(target.Options.CustomHeaders) > 0 { + targetCopy.Options.CustomHeaders = make(map[string]string, len(target.Options.CustomHeaders)) + for k, v := range target.Options.CustomHeaders { + targetCopy.Options.CustomHeaders[k] = v + } + } targets[i] = &targetCopy } diff --git a/management/internals/modules/reverseproxy/service/service_test.go b/management/internals/modules/reverseproxy/service/service_test.go index 8b09ab827..79c98fc14 100644 --- a/management/internals/modules/reverseproxy/service/service_test.go +++ b/management/internals/modules/reverseproxy/service/service_test.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -87,6 +88,188 @@ func TestValidate_MultipleTargetsOneInvalid(t *testing.T) { assert.Contains(t, err.Error(), "empty target_id") } +func TestValidateTargetOptions_PathRewrite(t *testing.T) { + tests := []struct { + name string + mode PathRewriteMode + wantErr string + }{ + {"empty is default", "", ""}, + {"preserve is valid", PathRewritePreserve, ""}, + {"unknown rejected", "regex", "unknown path_rewrite mode"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.PathRewrite = tt.mode + err := rp.Validate() + if tt.wantErr == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, tt.wantErr) + } + }) + } +} + +func TestValidateTargetOptions_RequestTimeout(t *testing.T) { + tests := []struct { + name string + timeout time.Duration + wantErr string + }{ + {"valid 30s", 30 * time.Second, ""}, + {"valid 2m", 2 * time.Minute, ""}, + {"zero is fine", 0, ""}, + {"negative", -1 * time.Second, "must be positive"}, + {"exceeds max", 10 * time.Minute, "exceeds maximum"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.RequestTimeout = tt.timeout + err := rp.Validate() + if tt.wantErr == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, tt.wantErr) + } + }) + } +} + +func TestValidateTargetOptions_CustomHeaders(t *testing.T) { + t.Run("valid headers", func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{ + "X-Custom": "value", + "X-Trace": "abc123", + } + assert.NoError(t, rp.Validate()) + }) + + t.Run("CRLF in key", func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{"X-Bad\r\nKey": "value"} + assert.ErrorContains(t, rp.Validate(), "not a valid HTTP header name") + }) + + t.Run("CRLF in value", func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{"X-Good": "bad\nvalue"} + assert.ErrorContains(t, rp.Validate(), "invalid characters") + }) + + t.Run("hop-by-hop header rejected", func(t *testing.T) { + for _, h := range []string{"Connection", "Transfer-Encoding", "Keep-Alive", "Upgrade", "Proxy-Connection"} { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{h: "value"} + assert.ErrorContains(t, rp.Validate(), "hop-by-hop", "header %q should be rejected", h) + } + }) + + t.Run("reserved header rejected", func(t *testing.T) { + for _, h := range []string{"X-Forwarded-For", "X-Real-IP", "X-Forwarded-Proto", "X-Forwarded-Host", "X-Forwarded-Port", "Cookie", "Forwarded", "Content-Length", "Content-Type"} { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{h: "value"} + assert.ErrorContains(t, rp.Validate(), "managed by the proxy", "header %q should be rejected", h) + } + }) + + t.Run("Host header rejected", func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{"Host": "evil.com"} + assert.ErrorContains(t, rp.Validate(), "pass_host_header") + }) + + t.Run("too many headers", func(t *testing.T) { + rp := validProxy() + headers := make(map[string]string, 17) + for i := range 17 { + headers[fmt.Sprintf("X-H%d", i)] = "v" + } + rp.Targets[0].Options.CustomHeaders = headers + assert.ErrorContains(t, rp.Validate(), "exceeds maximum of 16") + }) + + t.Run("key too long", func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{strings.Repeat("X", 129): "v"} + assert.ErrorContains(t, rp.Validate(), "key") + assert.ErrorContains(t, rp.Validate(), "exceeds maximum length") + }) + + t.Run("value too long", func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{"X-Ok": strings.Repeat("v", 4097)} + assert.ErrorContains(t, rp.Validate(), "value exceeds maximum length") + }) + + t.Run("duplicate canonical keys rejected", func(t *testing.T) { + rp := validProxy() + rp.Targets[0].Options.CustomHeaders = map[string]string{ + "x-custom": "a", + "X-Custom": "b", + } + assert.ErrorContains(t, rp.Validate(), "collide") + }) +} + +func TestToProtoMapping_TargetOptions(t *testing.T) { + rp := &Service{ + ID: "svc-1", + AccountID: "acc-1", + Domain: "example.com", + Targets: []*Target{ + { + TargetId: "peer-1", + TargetType: TargetTypePeer, + Host: "10.0.0.1", + Port: 8080, + Protocol: "http", + Enabled: true, + Options: TargetOptions{ + SkipTLSVerify: true, + RequestTimeout: 30 * time.Second, + PathRewrite: PathRewritePreserve, + CustomHeaders: map[string]string{"X-Custom": "val"}, + }, + }, + }, + } + pm := rp.ToProtoMapping(Create, "token", proxy.OIDCValidationConfig{}) + require.Len(t, pm.Path, 1) + + opts := pm.Path[0].Options + require.NotNil(t, opts, "options should be populated") + assert.True(t, opts.SkipTlsVerify) + assert.Equal(t, proto.PathRewriteMode_PATH_REWRITE_PRESERVE, opts.PathRewrite) + assert.Equal(t, map[string]string{"X-Custom": "val"}, opts.CustomHeaders) + require.NotNil(t, opts.RequestTimeout) + assert.Equal(t, int64(30), opts.RequestTimeout.Seconds) +} + +func TestToProtoMapping_NoOptionsWhenDefault(t *testing.T) { + rp := &Service{ + ID: "svc-1", + AccountID: "acc-1", + Domain: "example.com", + Targets: []*Target{ + { + TargetId: "peer-1", + TargetType: TargetTypePeer, + Host: "10.0.0.1", + Port: 8080, + Protocol: "http", + Enabled: true, + }, + }, + } + pm := rp.ToProtoMapping(Create, "token", proxy.OIDCValidationConfig{}) + require.Len(t, pm.Path, 1) + assert.Nil(t, pm.Path[0].Options, "options should be nil when all defaults") +} + func TestIsDefaultPort(t *testing.T) { tests := []struct { scheme string diff --git a/proxy/internal/proxy/proxy_bench_test.go b/proxy/internal/proxy/proxy_bench_test.go index b7526e26b..5af2167e6 100644 --- a/proxy/internal/proxy/proxy_bench_test.go +++ b/proxy/internal/proxy/proxy_bench_test.go @@ -28,10 +28,12 @@ func BenchmarkServeHTTP(b *testing.B) { ID: rand.Text(), AccountID: types.AccountID(rand.Text()), Host: "app.example.com", - Paths: map[string]*url.URL{ + Paths: map[string]*proxy.PathTarget{ "/": { - Scheme: "http", - Host: "10.0.0.1:8080", + URL: &url.URL{ + Scheme: "http", + Host: "10.0.0.1:8080", + }, }, }, }) @@ -67,10 +69,12 @@ func BenchmarkServeHTTPHostCount(b *testing.B) { ID: id, AccountID: types.AccountID(rand.Text()), Host: host, - Paths: map[string]*url.URL{ + Paths: map[string]*proxy.PathTarget{ "/": { - Scheme: "http", - Host: "10.0.0.1:8080", + URL: &url.URL{ + Scheme: "http", + Host: "10.0.0.1:8080", + }, }, }, }) @@ -100,15 +104,17 @@ func BenchmarkServeHTTPPathCount(b *testing.B) { b.Fatal(err) } - paths := make(map[string]*url.URL, pathCount) + paths := make(map[string]*proxy.PathTarget, pathCount) for i := range pathCount { path := "/" + rand.Text() if int64(i) == targetIndex.Int64() { target = path } - paths[path] = &url.URL{ - Scheme: "http", - Host: "10.0.0.1:" + fmt.Sprintf("%d", 8080+i), + paths[path] = &proxy.PathTarget{ + URL: &url.URL{ + Scheme: "http", + Host: "10.0.0.1:" + fmt.Sprintf("%d", 8080+i), + }, } } rp.AddMapping(proxy.Mapping{ diff --git a/proxy/internal/proxy/reverseproxy.go b/proxy/internal/proxy/reverseproxy.go index ee45ccfbb..b0001d5b9 100644 --- a/proxy/internal/proxy/reverseproxy.go +++ b/proxy/internal/proxy/reverseproxy.go @@ -80,14 +80,30 @@ func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { capturedData.SetAccountId(result.accountID) } + pt := result.target + + if pt.SkipTLSVerify { + ctx = roundtrip.WithSkipTLSVerify(ctx) + } + if pt.RequestTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, pt.RequestTimeout) + defer cancel() + } + + rewriteMatchedPath := result.matchedPath + if pt.PathRewrite == PathRewritePreserve { + rewriteMatchedPath = "" + } + rp := &httputil.ReverseProxy{ - Rewrite: p.rewriteFunc(result.url, result.matchedPath, result.passHostHeader), + Rewrite: p.rewriteFunc(pt.URL, rewriteMatchedPath, result.passHostHeader, pt.PathRewrite, pt.CustomHeaders), Transport: p.transport, FlushInterval: -1, ErrorHandler: proxyErrorHandler, } if result.rewriteRedirects { - rp.ModifyResponse = p.rewriteLocationFunc(result.url, result.matchedPath, r) //nolint:bodyclose + rp.ModifyResponse = p.rewriteLocationFunc(pt.URL, rewriteMatchedPath, r) //nolint:bodyclose } rp.ServeHTTP(w, r.WithContext(ctx)) } @@ -97,16 +113,22 @@ func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { // forwarding headers and stripping proxy authentication credentials. // When passHostHeader is true, the original client Host header is preserved // instead of being rewritten to the backend's address. -func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHostHeader bool) func(r *httputil.ProxyRequest) { +// The pathRewrite parameter controls how the request path is transformed. +func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHostHeader bool, pathRewrite PathRewriteMode, customHeaders map[string]string) func(r *httputil.ProxyRequest) { return func(r *httputil.ProxyRequest) { - // Strip the matched path prefix from the incoming request path before - // SetURL joins it with the target's base path, avoiding path duplication. - if matchedPath != "" && matchedPath != "/" { - r.Out.URL.Path = strings.TrimPrefix(r.Out.URL.Path, matchedPath) - if r.Out.URL.Path == "" { - r.Out.URL.Path = "/" + switch pathRewrite { + case PathRewritePreserve: + // Keep the full original request path as-is. + default: + if matchedPath != "" && matchedPath != "/" { + // Strip the matched path prefix from the incoming request path before + // SetURL joins it with the target's base path, avoiding path duplication. + r.Out.URL.Path = strings.TrimPrefix(r.Out.URL.Path, matchedPath) + if r.Out.URL.Path == "" { + r.Out.URL.Path = "/" + } + r.Out.URL.RawPath = "" } - r.Out.URL.RawPath = "" } r.SetURL(target) @@ -116,6 +138,10 @@ func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHost r.Out.Host = target.Host } + for k, v := range customHeaders { + r.Out.Header.Set(k, v) + } + clientIP := extractClientIP(r.In.RemoteAddr) if IsTrustedProxy(clientIP, p.trustedProxies) { diff --git a/proxy/internal/proxy/reverseproxy_test.go b/proxy/internal/proxy/reverseproxy_test.go index f7f231db4..be2fb9105 100644 --- a/proxy/internal/proxy/reverseproxy_test.go +++ b/proxy/internal/proxy/reverseproxy_test.go @@ -28,7 +28,7 @@ func TestRewriteFunc_HostRewriting(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} t.Run("rewrites host to backend by default", func(t *testing.T) { - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "https://public.example.com/path", "203.0.113.1:12345") rewrite(pr) @@ -37,7 +37,7 @@ func TestRewriteFunc_HostRewriting(t *testing.T) { }) t.Run("preserves original host when passHostHeader is true", func(t *testing.T) { - rewrite := p.rewriteFunc(target, "", true) + rewrite := p.rewriteFunc(target, "", true, PathRewriteDefault, nil) pr := newProxyRequest(t, "https://public.example.com/path", "203.0.113.1:12345") rewrite(pr) @@ -52,7 +52,7 @@ func TestRewriteFunc_HostRewriting(t *testing.T) { func TestRewriteFunc_XForwardedForStripping(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) t.Run("sets X-Forwarded-For from direct connection IP", func(t *testing.T) { pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") @@ -89,7 +89,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("sets X-Forwarded-Host to original host", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://myapp.example.com:8443/path", "1.2.3.4:5000") rewrite(pr) @@ -99,7 +99,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("sets X-Forwarded-Port from explicit host port", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com:8443/path", "1.2.3.4:5000") rewrite(pr) @@ -109,7 +109,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("defaults X-Forwarded-Port to 443 for https", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") pr.In.TLS = &tls.ConnectionState{} @@ -120,7 +120,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("defaults X-Forwarded-Port to 80 for http", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") rewrite(pr) @@ -130,7 +130,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("auto detects https from TLS", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") pr.In.TLS = &tls.ConnectionState{} @@ -141,7 +141,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("auto detects http without TLS", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") rewrite(pr) @@ -151,7 +151,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("forced proto overrides TLS detection", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "https"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") // No TLS, but forced to https @@ -162,7 +162,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("forced http proto", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "http"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") pr.In.TLS = &tls.ConnectionState{} @@ -175,7 +175,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { func TestRewriteFunc_SessionCookieStripping(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) t.Run("strips nb_session cookie", func(t *testing.T) { pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") @@ -220,7 +220,7 @@ func TestRewriteFunc_SessionCookieStripping(t *testing.T) { func TestRewriteFunc_SessionTokenQueryStripping(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) t.Run("strips session_token query parameter", func(t *testing.T) { pr := newProxyRequest(t, "http://example.com/callback?session_token=secret123&other=keep", "1.2.3.4:5000") @@ -248,7 +248,7 @@ func TestRewriteFunc_URLRewriting(t *testing.T) { t.Run("rewrites URL to target with path prefix", func(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080/app") - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/somepath", "1.2.3.4:5000") rewrite(pr) @@ -261,7 +261,7 @@ func TestRewriteFunc_URLRewriting(t *testing.T) { t.Run("strips matched path prefix to avoid duplication", func(t *testing.T) { target, _ := url.Parse("https://backend.example.org:443/app") - rewrite := p.rewriteFunc(target, "/app", false) + rewrite := p.rewriteFunc(target, "/app", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/app", "1.2.3.4:5000") rewrite(pr) @@ -274,7 +274,7 @@ func TestRewriteFunc_URLRewriting(t *testing.T) { t.Run("strips matched prefix and preserves subpath", func(t *testing.T) { target, _ := url.Parse("https://backend.example.org:443/app") - rewrite := p.rewriteFunc(target, "/app", false) + rewrite := p.rewriteFunc(target, "/app", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/app/article/123", "1.2.3.4:5000") rewrite(pr) @@ -332,7 +332,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("appends to X-Forwarded-For", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") @@ -344,7 +344,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Real-IP", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") @@ -357,7 +357,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("resolves X-Real-IP from XFF when not set by upstream", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50, 10.0.0.2") @@ -370,7 +370,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Forwarded-Host", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://proxy.internal/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-Host", "original.example.com") @@ -382,7 +382,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Forwarded-Proto", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-Proto", "https") @@ -394,7 +394,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Forwarded-Port", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-Port", "8443") @@ -406,7 +406,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("falls back to local proto when upstream does not set it", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "https", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") @@ -418,7 +418,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("sets X-Forwarded-Host from request when upstream does not set it", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") @@ -429,7 +429,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("untrusted RemoteAddr strips headers even with trusted list", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") pr.In.Header.Set("X-Forwarded-For", "10.0.0.1, 172.16.0.1") @@ -454,7 +454,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("empty trusted list behaves as untrusted", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: nil} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") @@ -467,7 +467,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("XFF starts fresh when trusted proxy has no upstream XFF", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") @@ -490,7 +490,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { t.Run("path prefix baked into target URL is a no-op", func(t *testing.T) { // Management builds: path="/heise", target="https://heise.de:443/heise" target, _ := url.Parse("https://heise.de:443/heise") - rewrite := p.rewriteFunc(target, "/heise", false) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") rewrite(pr) @@ -501,7 +501,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { t.Run("subpath under prefix also preserved", func(t *testing.T) { target, _ := url.Parse("https://heise.de:443/heise") - rewrite := p.rewriteFunc(target, "/heise", false) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://external.test/heise/article/123", "1.2.3.4:5000") rewrite(pr) @@ -513,7 +513,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { // What the behavior WOULD be if target URL had no path (true stripping) t.Run("target without path prefix gives true stripping", func(t *testing.T) { target, _ := url.Parse("https://heise.de:443") - rewrite := p.rewriteFunc(target, "/heise", false) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") rewrite(pr) @@ -524,7 +524,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { t.Run("target without path prefix strips and preserves subpath", func(t *testing.T) { target, _ := url.Parse("https://heise.de:443") - rewrite := p.rewriteFunc(target, "/heise", false) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://external.test/heise/article/123", "1.2.3.4:5000") rewrite(pr) @@ -536,7 +536,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { // Root path "/" — no stripping expected t.Run("root path forwards full request path unchanged", func(t *testing.T) { target, _ := url.Parse("https://backend.example.com:443/") - rewrite := p.rewriteFunc(target, "/", false) + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, nil) pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") rewrite(pr) @@ -546,6 +546,82 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { }) } +func TestRewriteFunc_PreservePath(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + target, _ := url.Parse("http://backend.internal:8080") + + t.Run("preserve keeps full request path", func(t *testing.T) { + rewrite := p.rewriteFunc(target, "/api", false, PathRewritePreserve, nil) + pr := newProxyRequest(t, "http://example.com/api/users/123", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/api/users/123", pr.Out.URL.Path, + "preserve should keep the full original request path") + }) + + t.Run("preserve with root matchedPath", func(t *testing.T) { + rewrite := p.rewriteFunc(target, "/", false, PathRewritePreserve, nil) + pr := newProxyRequest(t, "http://example.com/anything", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/anything", pr.Out.URL.Path) + }) +} + +func TestRewriteFunc_CustomHeaders(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + target, _ := url.Parse("http://backend.internal:8080") + + t.Run("injects custom headers", func(t *testing.T) { + headers := map[string]string{ + "X-Custom-Auth": "token-abc", + "X-Env": "production", + } + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, headers) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "token-abc", pr.Out.Header.Get("X-Custom-Auth")) + assert.Equal(t, "production", pr.Out.Header.Get("X-Env")) + }) + + t.Run("nil customHeaders is fine", func(t *testing.T) { + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, nil) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "backend.internal:8080", pr.Out.Host) + }) + + t.Run("custom headers override existing request headers", func(t *testing.T) { + headers := map[string]string{"X-Override": "new-value"} + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, headers) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + pr.In.Header.Set("X-Override", "old-value") + + rewrite(pr) + + assert.Equal(t, "new-value", pr.Out.Header.Get("X-Override")) + }) +} + +func TestRewriteFunc_PreservePathWithCustomHeaders(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + target, _ := url.Parse("http://backend.internal:8080") + + rewrite := p.rewriteFunc(target, "/api", false, PathRewritePreserve, map[string]string{"X-Via": "proxy"}) + pr := newProxyRequest(t, "http://example.com/api/deep/path", "1.2.3.4:5000") + + rewrite(pr) + + assert.Equal(t, "/api/deep/path", pr.Out.URL.Path, "preserve should keep the full original path") + assert.Equal(t, "proxy", pr.Out.Header.Get("X-Via"), "custom header should be set") +} + func TestRewriteLocationFunc(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") newProxy := func(proto string) *ReverseProxy { return &ReverseProxy{forwardedProto: proto} } diff --git a/proxy/internal/proxy/servicemapping.go b/proxy/internal/proxy/servicemapping.go index 6f5829ebb..58b92ff9e 100644 --- a/proxy/internal/proxy/servicemapping.go +++ b/proxy/internal/proxy/servicemapping.go @@ -6,21 +6,41 @@ import ( "net/url" "sort" "strings" + "time" "github.com/netbirdio/netbird/proxy/internal/types" ) +// PathRewriteMode controls how the request path is rewritten before forwarding. +type PathRewriteMode int + +const ( + // PathRewriteDefault strips the matched prefix and joins with the target path. + PathRewriteDefault PathRewriteMode = iota + // PathRewritePreserve keeps the full original request path as-is. + PathRewritePreserve +) + +// PathTarget holds a backend URL and per-target behavioral options. +type PathTarget struct { + URL *url.URL + SkipTLSVerify bool + RequestTimeout time.Duration + PathRewrite PathRewriteMode + CustomHeaders map[string]string +} + type Mapping struct { ID string AccountID types.AccountID Host string - Paths map[string]*url.URL + Paths map[string]*PathTarget PassHostHeader bool RewriteRedirects bool } type targetResult struct { - url *url.URL + target *PathTarget matchedPath string serviceID string accountID types.AccountID @@ -55,10 +75,14 @@ func (p *ReverseProxy) findTargetForRequest(req *http.Request) (targetResult, bo for _, path := range paths { if strings.HasPrefix(req.URL.Path, path) { - target := m.Paths[path] - p.logger.Debugf("matched host: %s, path: %s -> %s", host, path, target) + pt := m.Paths[path] + if pt == nil || pt.URL == nil { + p.logger.Warnf("invalid mapping for host: %s, path: %s (nil target)", host, path) + continue + } + p.logger.Debugf("matched host: %s, path: %s -> %s", host, path, pt.URL) return targetResult{ - url: target, + target: pt, matchedPath: path, serviceID: m.ID, accountID: m.AccountID, diff --git a/proxy/internal/roundtrip/context_test.go b/proxy/internal/roundtrip/context_test.go new file mode 100644 index 000000000..c4e8267f8 --- /dev/null +++ b/proxy/internal/roundtrip/context_test.go @@ -0,0 +1,32 @@ +package roundtrip + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/proxy/internal/types" +) + +func TestAccountIDContext(t *testing.T) { + t.Run("returns empty when missing", func(t *testing.T) { + assert.Equal(t, types.AccountID(""), AccountIDFromContext(context.Background())) + }) + + t.Run("round-trips value", func(t *testing.T) { + ctx := WithAccountID(context.Background(), "acc-123") + assert.Equal(t, types.AccountID("acc-123"), AccountIDFromContext(ctx)) + }) +} + +func TestSkipTLSVerifyContext(t *testing.T) { + t.Run("false by default", func(t *testing.T) { + assert.False(t, skipTLSVerifyFromContext(context.Background())) + }) + + t.Run("true when set", func(t *testing.T) { + ctx := WithSkipTLSVerify(context.Background()) + assert.True(t, skipTLSVerifyFromContext(ctx)) + }) +} diff --git a/proxy/internal/roundtrip/netbird.go b/proxy/internal/roundtrip/netbird.go index 481b42d2b..57770f4a5 100644 --- a/proxy/internal/roundtrip/netbird.go +++ b/proxy/internal/roundtrip/netbird.go @@ -2,6 +2,7 @@ package roundtrip import ( "context" + "crypto/tls" "errors" "fmt" "net/http" @@ -52,9 +53,12 @@ type domainNotification struct { type clientEntry struct { client *embed.Client transport *http.Transport - domains map[domain.Domain]domainInfo - createdAt time.Time - started bool + // insecureTransport is a clone of transport with TLS verification disabled, + // used when per-target skip_tls_verify is set. + insecureTransport *http.Transport + domains map[domain.Domain]domainInfo + createdAt time.Time + started bool // Per-backend in-flight limiting keyed by target host:port. // TODO: clean up stale entries when backend targets change. inflightMu sync.Mutex @@ -130,6 +134,9 @@ type ClientDebugInfo struct { // accountIDContextKey is the context key for storing the account ID. type accountIDContextKey struct{} +// skipTLSVerifyContextKey is the context key for requesting insecure TLS. +type skipTLSVerifyContextKey struct{} + // AddPeer registers a domain for an account. If the account doesn't have a client yet, // one is created by authenticating with the management server using the provided token. // Multiple domains can share the same client. @@ -249,27 +256,33 @@ func (n *NetBird) createClientEntry(ctx context.Context, accountID types.Account // Create a transport using the client dialer. We do this instead of using // the client's HTTPClient to avoid issues with request validation that do // not work with reverse proxied requests. + transport := &http.Transport{ + DialContext: client.DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: n.transportCfg.maxIdleConns, + MaxIdleConnsPerHost: n.transportCfg.maxIdleConnsPerHost, + MaxConnsPerHost: n.transportCfg.maxConnsPerHost, + IdleConnTimeout: n.transportCfg.idleConnTimeout, + TLSHandshakeTimeout: n.transportCfg.tlsHandshakeTimeout, + ExpectContinueTimeout: n.transportCfg.expectContinueTimeout, + ResponseHeaderTimeout: n.transportCfg.responseHeaderTimeout, + WriteBufferSize: n.transportCfg.writeBufferSize, + ReadBufferSize: n.transportCfg.readBufferSize, + DisableCompression: n.transportCfg.disableCompression, + } + + insecureTransport := transport.Clone() + insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint:gosec + return &clientEntry{ - client: client, - domains: map[domain.Domain]domainInfo{d: {serviceID: serviceID}}, - transport: &http.Transport{ - DialContext: client.DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: n.transportCfg.maxIdleConns, - MaxIdleConnsPerHost: n.transportCfg.maxIdleConnsPerHost, - MaxConnsPerHost: n.transportCfg.maxConnsPerHost, - IdleConnTimeout: n.transportCfg.idleConnTimeout, - TLSHandshakeTimeout: n.transportCfg.tlsHandshakeTimeout, - ExpectContinueTimeout: n.transportCfg.expectContinueTimeout, - ResponseHeaderTimeout: n.transportCfg.responseHeaderTimeout, - WriteBufferSize: n.transportCfg.writeBufferSize, - ReadBufferSize: n.transportCfg.readBufferSize, - DisableCompression: n.transportCfg.disableCompression, - }, - createdAt: time.Now(), - started: false, - inflightMap: make(map[backendKey]chan struct{}), - maxInflight: n.transportCfg.maxInflight, + client: client, + domains: map[domain.Domain]domainInfo{d: {serviceID: serviceID}}, + transport: transport, + insecureTransport: insecureTransport, + createdAt: time.Now(), + started: false, + inflightMap: make(map[backendKey]chan struct{}), + maxInflight: n.transportCfg.maxInflight, }, nil } @@ -373,6 +386,7 @@ func (n *NetBird) RemovePeer(ctx context.Context, accountID types.AccountID, d d client := entry.client transport := entry.transport + insecureTransport := entry.insecureTransport delete(n.clients, accountID) n.clientsMux.Unlock() @@ -387,6 +401,7 @@ func (n *NetBird) RemovePeer(ctx context.Context, accountID types.AccountID, d d } transport.CloseIdleConnections() + insecureTransport.CloseIdleConnections() if err := client.Stop(ctx); err != nil { n.logger.WithFields(log.Fields{ @@ -415,6 +430,9 @@ func (n *NetBird) RoundTrip(req *http.Request) (*http.Response, error) { } client := entry.client transport := entry.transport + if skipTLSVerifyFromContext(req.Context()) { + transport = entry.insecureTransport + } n.clientsMux.RUnlock() release, ok := entry.acquireInflight(req.URL.Host) @@ -457,6 +475,7 @@ func (n *NetBird) StopAll(ctx context.Context) error { var merr *multierror.Error for accountID, entry := range n.clients { entry.transport.CloseIdleConnections() + entry.insecureTransport.CloseIdleConnections() if err := entry.client.Stop(ctx); err != nil { n.logger.WithFields(log.Fields{ "account_id": accountID, @@ -579,3 +598,14 @@ func AccountIDFromContext(ctx context.Context) types.AccountID { } return accountID } + +// WithSkipTLSVerify marks the context to use an insecure transport that skips +// TLS certificate verification for the backend connection. +func WithSkipTLSVerify(ctx context.Context) context.Context { + return context.WithValue(ctx, skipTLSVerifyContextKey{}, true) +} + +func skipTLSVerifyFromContext(ctx context.Context) bool { + v, _ := ctx.Value(skipTLSVerifyContextKey{}).(bool) + return v +} diff --git a/proxy/server.go b/proxy/server.go index 155610305..0d1aa2f6c 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -720,7 +720,7 @@ func (s *Server) removeMapping(ctx context.Context, mapping *proto.ProxyMapping) } func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { - paths := make(map[string]*url.URL) + paths := make(map[string]*proxy.PathTarget) for _, pathMapping := range mapping.GetPath() { targetURL, err := url.Parse(pathMapping.GetTarget()) if err != nil { @@ -734,7 +734,17 @@ func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { }).WithError(err).Error("failed to parse target URL for path, skipping") continue } - paths[pathMapping.GetPath()] = targetURL + + pt := &proxy.PathTarget{URL: targetURL} + if opts := pathMapping.GetOptions(); opts != nil { + pt.SkipTLSVerify = opts.GetSkipTlsVerify() + pt.PathRewrite = protoToPathRewrite(opts.GetPathRewrite()) + pt.CustomHeaders = opts.GetCustomHeaders() + if d := opts.GetRequestTimeout(); d != nil { + pt.RequestTimeout = d.AsDuration() + } + } + paths[pathMapping.GetPath()] = pt } return proxy.Mapping{ ID: mapping.GetId(), @@ -746,6 +756,15 @@ func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { } } +func protoToPathRewrite(mode proto.PathRewriteMode) proxy.PathRewriteMode { + switch mode { + case proto.PathRewriteMode_PATH_REWRITE_PRESERVE: + return proxy.PathRewritePreserve + default: + return proxy.PathRewriteDefault + } +} + // debugEndpointAddr returns the address for the debug endpoint. // If addr is empty, it defaults to localhost:8444 for security. func debugEndpointAddr(addr string) string { diff --git a/shared/management/client/rest/reverse_proxy_services_test.go b/shared/management/client/rest/reverse_proxy_services_test.go new file mode 100644 index 000000000..164563e97 --- /dev/null +++ b/shared/management/client/rest/reverse_proxy_services_test.go @@ -0,0 +1,271 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testServiceTarget = api.ServiceTarget{ + TargetId: "peer-123", + TargetType: "peer", + Protocol: "https", + Port: 8443, + Enabled: true, +} + +var testService = api.Service{ + Id: "svc-1", + Name: "test-service", + Domain: "test.example.com", + Enabled: true, + Auth: api.ServiceAuthConfig{}, + Meta: api.ServiceMeta{ + CreatedAt: time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC), + Status: "active", + }, + Targets: []api.ServiceTarget{testServiceTarget}, +} + +func TestReverseProxyServices_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal([]api.Service{testService}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.List(context.Background()) + require.NoError(t, err) + require.Len(t, ret, 1) + assert.Equal(t, testService.Id, ret[0].Id) + assert.Equal(t, testService.Name, ret[0].Name) + }) +} + +func TestReverseProxyServices_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestReverseProxyServices_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services/svc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(testService) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.Get(context.Background(), "svc-1") + require.NoError(t, err) + assert.Equal(t, testService.Id, ret.Id) + assert.Equal(t, testService.Domain, ret.Domain) + }) +} + +func TestReverseProxyServices_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services/svc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.Get(context.Background(), "svc-1") + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestReverseProxyServices_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.ServiceRequest + require.NoError(t, json.Unmarshal(reqBytes, &req)) + assert.Equal(t, "test-service", req.Name) + assert.Equal(t, "test.example.com", req.Domain) + retBytes, _ := json.Marshal(testService) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.Create(context.Background(), api.PostApiReverseProxiesServicesJSONRequestBody{ + Name: "test-service", + Domain: "test.example.com", + Enabled: true, + Auth: api.ServiceAuthConfig{}, + Targets: []api.ServiceTarget{testServiceTarget}, + }) + require.NoError(t, err) + assert.Equal(t, testService.Id, ret.Id) + }) +} + +func TestReverseProxyServices_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.Create(context.Background(), api.PostApiReverseProxiesServicesJSONRequestBody{ + Name: "test-service", + Domain: "test.example.com", + Enabled: true, + Auth: api.ServiceAuthConfig{}, + Targets: []api.ServiceTarget{testServiceTarget}, + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestReverseProxyServices_Create_WithPerTargetOptions(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.ServiceRequest + require.NoError(t, json.Unmarshal(reqBytes, &req)) + + require.Len(t, req.Targets, 1) + target := req.Targets[0] + require.NotNil(t, target.Options, "options should be present") + opts := target.Options + require.NotNil(t, opts.SkipTlsVerify, "skip_tls_verify should be present") + assert.True(t, *opts.SkipTlsVerify) + require.NotNil(t, opts.RequestTimeout, "request_timeout should be present") + assert.Equal(t, "30s", *opts.RequestTimeout) + require.NotNil(t, opts.PathRewrite, "path_rewrite should be present") + assert.Equal(t, api.ServiceTargetOptionsPathRewrite("preserve"), *opts.PathRewrite) + require.NotNil(t, opts.CustomHeaders, "custom_headers should be present") + assert.Equal(t, "bar", (*opts.CustomHeaders)["X-Foo"]) + + retBytes, _ := json.Marshal(testService) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + + pathRewrite := api.ServiceTargetOptionsPathRewrite("preserve") + ret, err := c.ReverseProxyServices.Create(context.Background(), api.PostApiReverseProxiesServicesJSONRequestBody{ + Name: "test-service", + Domain: "test.example.com", + Enabled: true, + Auth: api.ServiceAuthConfig{}, + Targets: []api.ServiceTarget{ + { + TargetId: "peer-123", + TargetType: "peer", + Protocol: "https", + Port: 8443, + Enabled: true, + Options: &api.ServiceTargetOptions{ + SkipTlsVerify: ptr(true), + RequestTimeout: ptr("30s"), + PathRewrite: &pathRewrite, + CustomHeaders: &map[string]string{"X-Foo": "bar"}, + }, + }, + }, + }) + require.NoError(t, err) + assert.Equal(t, testService.Id, ret.Id) + }) +} + +func TestReverseProxyServices_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services/svc-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.ServiceRequest + require.NoError(t, json.Unmarshal(reqBytes, &req)) + assert.Equal(t, "updated-service", req.Name) + retBytes, _ := json.Marshal(testService) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.Update(context.Background(), "svc-1", api.PutApiReverseProxiesServicesServiceIdJSONRequestBody{ + Name: "updated-service", + Domain: "test.example.com", + Enabled: true, + Auth: api.ServiceAuthConfig{}, + Targets: []api.ServiceTarget{testServiceTarget}, + }) + require.NoError(t, err) + assert.Equal(t, testService.Id, ret.Id) + }) +} + +func TestReverseProxyServices_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services/svc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.ReverseProxyServices.Update(context.Background(), "svc-1", api.PutApiReverseProxiesServicesServiceIdJSONRequestBody{ + Name: "updated-service", + Domain: "test.example.com", + Enabled: true, + Auth: api.ServiceAuthConfig{}, + Targets: []api.ServiceTarget{testServiceTarget}, + }) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestReverseProxyServices_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services/svc-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.ReverseProxyServices.Delete(context.Background(), "svc-1") + require.NoError(t, err) + }) +} + +func TestReverseProxyServices_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/reverse-proxies/services/svc-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.ReverseProxyServices.Delete(context.Background(), "svc-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 2927d0319..7f03d6986 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -3027,6 +3027,28 @@ components: - targets - auth - enabled + ServiceTargetOptions: + type: object + properties: + skip_tls_verify: + type: boolean + description: Skip TLS certificate verification for this backend + request_timeout: + type: string + description: Per-target response timeout as a Go duration string (e.g. "30s", "2m") + path_rewrite: + type: string + description: Controls how the request path is rewritten before forwarding to the backend. Default strips the matched prefix. "preserve" keeps the full original request path. + enum: [preserve] + custom_headers: + type: object + description: Extra headers sent to the backend. Hop-by-hop and proxy-managed headers (Host, Connection, Transfer-Encoding, etc.) are rejected. + propertyNames: + type: string + pattern: '^[!#$%&''*+.^_`|~0-9A-Za-z-]+$' + additionalProperties: + type: string + pattern: '^[^\r\n]*$' ServiceTarget: type: object properties: @@ -3053,6 +3075,8 @@ components: enabled: type: boolean description: Whether this target is enabled + options: + $ref: '#/components/schemas/ServiceTargetOptions' required: - target_id - target_type diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index e53b876c2..d4a07f806 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -326,6 +326,11 @@ const ( ServiceTargetTargetTypeResource ServiceTargetTargetType = "resource" ) +// Defines values for ServiceTargetOptionsPathRewrite. +const ( + ServiceTargetOptionsPathRewritePreserve ServiceTargetOptionsPathRewrite = "preserve" +) + // Defines values for TenantResponseStatus. const ( TenantResponseStatusActive TenantResponseStatus = "active" @@ -367,6 +372,27 @@ const ( GetApiEventsNetworkTrafficParamsDirectionINGRESS GetApiEventsNetworkTrafficParamsDirection = "INGRESS" ) +// Defines values for GetApiEventsProxyParamsSortBy. +const ( + GetApiEventsProxyParamsSortByAuthMethod GetApiEventsProxyParamsSortBy = "auth_method" + GetApiEventsProxyParamsSortByDuration GetApiEventsProxyParamsSortBy = "duration" + GetApiEventsProxyParamsSortByHost GetApiEventsProxyParamsSortBy = "host" + GetApiEventsProxyParamsSortByMethod GetApiEventsProxyParamsSortBy = "method" + GetApiEventsProxyParamsSortByPath GetApiEventsProxyParamsSortBy = "path" + GetApiEventsProxyParamsSortByReason GetApiEventsProxyParamsSortBy = "reason" + GetApiEventsProxyParamsSortBySourceIp GetApiEventsProxyParamsSortBy = "source_ip" + GetApiEventsProxyParamsSortByStatusCode GetApiEventsProxyParamsSortBy = "status_code" + GetApiEventsProxyParamsSortByTimestamp GetApiEventsProxyParamsSortBy = "timestamp" + GetApiEventsProxyParamsSortByUrl GetApiEventsProxyParamsSortBy = "url" + GetApiEventsProxyParamsSortByUserId GetApiEventsProxyParamsSortBy = "user_id" +) + +// Defines values for GetApiEventsProxyParamsSortOrder. +const ( + GetApiEventsProxyParamsSortOrderAsc GetApiEventsProxyParamsSortOrder = "asc" + GetApiEventsProxyParamsSortOrderDesc GetApiEventsProxyParamsSortOrder = "desc" +) + // Defines values for GetApiEventsProxyParamsMethod. const ( GetApiEventsProxyParamsMethodDELETE GetApiEventsProxyParamsMethod = "DELETE" @@ -2741,7 +2767,8 @@ type ServiceTarget struct { Enabled bool `json:"enabled"` // Host Backend ip or domain for this target - Host *string `json:"host,omitempty"` + Host *string `json:"host,omitempty"` + Options *ServiceTargetOptions `json:"options,omitempty"` // Path URL path prefix for this target Path *string `json:"path,omitempty"` @@ -2765,6 +2792,24 @@ type ServiceTargetProtocol string // ServiceTargetTargetType Target type (e.g., "peer", "resource") type ServiceTargetTargetType string +// ServiceTargetOptions defines model for ServiceTargetOptions. +type ServiceTargetOptions struct { + // CustomHeaders Extra headers sent to the backend. Hop-by-hop and proxy-managed headers (Host, Connection, Transfer-Encoding, etc.) are rejected. + CustomHeaders *map[string]string `json:"custom_headers,omitempty"` + + // PathRewrite Controls how the request path is rewritten before forwarding to the backend. Default strips the matched prefix. "preserve" keeps the full original request path. + PathRewrite *ServiceTargetOptionsPathRewrite `json:"path_rewrite,omitempty"` + + // RequestTimeout Per-target response timeout as a Go duration string (e.g. "30s", "2m") + RequestTimeout *string `json:"request_timeout,omitempty"` + + // SkipTlsVerify Skip TLS certificate verification for this backend + SkipTlsVerify *bool `json:"skip_tls_verify,omitempty"` +} + +// ServiceTargetOptionsPathRewrite Controls how the request path is rewritten before forwarding to the backend. Default strips the matched prefix. "preserve" keeps the full original request path. +type ServiceTargetOptionsPathRewrite string + // SetupKey defines model for SetupKey. type SetupKey struct { // AllowExtraDnsLabels Allow extra DNS labels to be added to the peer @@ -3335,6 +3380,12 @@ type GetApiEventsProxyParams struct { // PageSize Number of items per page (max 100) PageSize *int `form:"page_size,omitempty" json:"page_size,omitempty"` + // SortBy Field to sort by (url sorts by host then path) + SortBy *GetApiEventsProxyParamsSortBy `form:"sort_by,omitempty" json:"sort_by,omitempty"` + + // SortOrder Sort order (ascending or descending) + SortOrder *GetApiEventsProxyParamsSortOrder `form:"sort_order,omitempty" json:"sort_order,omitempty"` + // Search General search across request ID, host, path, source IP, user email, and user name Search *string `form:"search,omitempty" json:"search,omitempty"` @@ -3372,6 +3423,12 @@ type GetApiEventsProxyParams struct { EndDate *time.Time `form:"end_date,omitempty" json:"end_date,omitempty"` } +// GetApiEventsProxyParamsSortBy defines parameters for GetApiEventsProxy. +type GetApiEventsProxyParamsSortBy string + +// GetApiEventsProxyParamsSortOrder defines parameters for GetApiEventsProxy. +type GetApiEventsProxyParamsSortOrder string + // GetApiEventsProxyParamsMethod defines parameters for GetApiEventsProxy. type GetApiEventsProxyParamsMethod string diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index c89157eb5..77c8ea4f4 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -9,6 +9,7 @@ package proto import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" @@ -70,6 +71,52 @@ func (ProxyMappingUpdateType) EnumDescriptor() ([]byte, []int) { return file_proxy_service_proto_rawDescGZIP(), []int{0} } +type PathRewriteMode int32 + +const ( + PathRewriteMode_PATH_REWRITE_DEFAULT PathRewriteMode = 0 + PathRewriteMode_PATH_REWRITE_PRESERVE PathRewriteMode = 1 +) + +// Enum value maps for PathRewriteMode. +var ( + PathRewriteMode_name = map[int32]string{ + 0: "PATH_REWRITE_DEFAULT", + 1: "PATH_REWRITE_PRESERVE", + } + PathRewriteMode_value = map[string]int32{ + "PATH_REWRITE_DEFAULT": 0, + "PATH_REWRITE_PRESERVE": 1, + } +) + +func (x PathRewriteMode) Enum() *PathRewriteMode { + p := new(PathRewriteMode) + *p = x + return p +} + +func (x PathRewriteMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PathRewriteMode) Descriptor() protoreflect.EnumDescriptor { + return file_proxy_service_proto_enumTypes[1].Descriptor() +} + +func (PathRewriteMode) Type() protoreflect.EnumType { + return &file_proxy_service_proto_enumTypes[1] +} + +func (x PathRewriteMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PathRewriteMode.Descriptor instead. +func (PathRewriteMode) EnumDescriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{1} +} + type ProxyStatus int32 const ( @@ -112,11 +159,11 @@ func (x ProxyStatus) String() string { } func (ProxyStatus) Descriptor() protoreflect.EnumDescriptor { - return file_proxy_service_proto_enumTypes[1].Descriptor() + return file_proxy_service_proto_enumTypes[2].Descriptor() } func (ProxyStatus) Type() protoreflect.EnumType { - return &file_proxy_service_proto_enumTypes[1] + return &file_proxy_service_proto_enumTypes[2] } func (x ProxyStatus) Number() protoreflect.EnumNumber { @@ -125,7 +172,7 @@ func (x ProxyStatus) Number() protoreflect.EnumNumber { // Deprecated: Use ProxyStatus.Descriptor instead. func (ProxyStatus) EnumDescriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{1} + return file_proxy_service_proto_rawDescGZIP(), []int{2} } // GetMappingUpdateRequest is sent to initialise a mapping stream. @@ -260,19 +307,91 @@ func (x *GetMappingUpdateResponse) GetInitialSyncComplete() bool { return false } +type PathTargetOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SkipTlsVerify bool `protobuf:"varint,1,opt,name=skip_tls_verify,json=skipTlsVerify,proto3" json:"skip_tls_verify,omitempty"` + RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` + CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *PathTargetOptions) Reset() { + *x = PathTargetOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTargetOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTargetOptions) ProtoMessage() {} + +func (x *PathTargetOptions) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTargetOptions.ProtoReflect.Descriptor instead. +func (*PathTargetOptions) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{2} +} + +func (x *PathTargetOptions) GetSkipTlsVerify() bool { + if x != nil { + return x.SkipTlsVerify + } + return false +} + +func (x *PathTargetOptions) GetRequestTimeout() *durationpb.Duration { + if x != nil { + return x.RequestTimeout + } + return nil +} + +func (x *PathTargetOptions) GetPathRewrite() PathRewriteMode { + if x != nil { + return x.PathRewrite + } + return PathRewriteMode_PATH_REWRITE_DEFAULT +} + +func (x *PathTargetOptions) GetCustomHeaders() map[string]string { + if x != nil { + return x.CustomHeaders + } + return nil +} + type PathMapping struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` + Options *PathTargetOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` } func (x *PathMapping) Reset() { *x = PathMapping{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[2] + mi := &file_proxy_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -285,7 +404,7 @@ func (x *PathMapping) String() string { func (*PathMapping) ProtoMessage() {} func (x *PathMapping) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[2] + mi := &file_proxy_service_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -298,7 +417,7 @@ func (x *PathMapping) ProtoReflect() protoreflect.Message { // Deprecated: Use PathMapping.ProtoReflect.Descriptor instead. func (*PathMapping) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{2} + return file_proxy_service_proto_rawDescGZIP(), []int{3} } func (x *PathMapping) GetPath() string { @@ -315,6 +434,13 @@ func (x *PathMapping) GetTarget() string { return "" } +func (x *PathMapping) GetOptions() *PathTargetOptions { + if x != nil { + return x.Options + } + return nil +} + type Authentication struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -330,7 +456,7 @@ type Authentication struct { func (x *Authentication) Reset() { *x = Authentication{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[3] + mi := &file_proxy_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -343,7 +469,7 @@ func (x *Authentication) String() string { func (*Authentication) ProtoMessage() {} func (x *Authentication) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[3] + mi := &file_proxy_service_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -356,7 +482,7 @@ func (x *Authentication) ProtoReflect() protoreflect.Message { // Deprecated: Use Authentication.ProtoReflect.Descriptor instead. func (*Authentication) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{3} + return file_proxy_service_proto_rawDescGZIP(), []int{4} } func (x *Authentication) GetSessionKey() string { @@ -417,7 +543,7 @@ type ProxyMapping struct { func (x *ProxyMapping) Reset() { *x = ProxyMapping{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[4] + mi := &file_proxy_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -430,7 +556,7 @@ func (x *ProxyMapping) String() string { func (*ProxyMapping) ProtoMessage() {} func (x *ProxyMapping) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[4] + mi := &file_proxy_service_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -443,7 +569,7 @@ func (x *ProxyMapping) ProtoReflect() protoreflect.Message { // Deprecated: Use ProxyMapping.ProtoReflect.Descriptor instead. func (*ProxyMapping) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{4} + return file_proxy_service_proto_rawDescGZIP(), []int{5} } func (x *ProxyMapping) GetType() ProxyMappingUpdateType { @@ -521,7 +647,7 @@ type SendAccessLogRequest struct { func (x *SendAccessLogRequest) Reset() { *x = SendAccessLogRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[5] + mi := &file_proxy_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -534,7 +660,7 @@ func (x *SendAccessLogRequest) String() string { func (*SendAccessLogRequest) ProtoMessage() {} func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[5] + mi := &file_proxy_service_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -547,7 +673,7 @@ func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAccessLogRequest.ProtoReflect.Descriptor instead. func (*SendAccessLogRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{5} + return file_proxy_service_proto_rawDescGZIP(), []int{6} } func (x *SendAccessLogRequest) GetLog() *AccessLog { @@ -567,7 +693,7 @@ type SendAccessLogResponse struct { func (x *SendAccessLogResponse) Reset() { *x = SendAccessLogResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[6] + mi := &file_proxy_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -580,7 +706,7 @@ func (x *SendAccessLogResponse) String() string { func (*SendAccessLogResponse) ProtoMessage() {} func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[6] + mi := &file_proxy_service_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -593,7 +719,7 @@ func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAccessLogResponse.ProtoReflect.Descriptor instead. func (*SendAccessLogResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{6} + return file_proxy_service_proto_rawDescGZIP(), []int{7} } type AccessLog struct { @@ -619,7 +745,7 @@ type AccessLog struct { func (x *AccessLog) Reset() { *x = AccessLog{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[7] + mi := &file_proxy_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -632,7 +758,7 @@ func (x *AccessLog) String() string { func (*AccessLog) ProtoMessage() {} func (x *AccessLog) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[7] + mi := &file_proxy_service_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -645,7 +771,7 @@ func (x *AccessLog) ProtoReflect() protoreflect.Message { // Deprecated: Use AccessLog.ProtoReflect.Descriptor instead. func (*AccessLog) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{7} + return file_proxy_service_proto_rawDescGZIP(), []int{8} } func (x *AccessLog) GetTimestamp() *timestamppb.Timestamp { @@ -756,7 +882,7 @@ type AuthenticateRequest struct { func (x *AuthenticateRequest) Reset() { *x = AuthenticateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[8] + mi := &file_proxy_service_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -769,7 +895,7 @@ func (x *AuthenticateRequest) String() string { func (*AuthenticateRequest) ProtoMessage() {} func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[8] + mi := &file_proxy_service_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -782,7 +908,7 @@ func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthenticateRequest.ProtoReflect.Descriptor instead. func (*AuthenticateRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{8} + return file_proxy_service_proto_rawDescGZIP(), []int{9} } func (x *AuthenticateRequest) GetId() string { @@ -847,7 +973,7 @@ type PasswordRequest struct { func (x *PasswordRequest) Reset() { *x = PasswordRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[9] + mi := &file_proxy_service_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -860,7 +986,7 @@ func (x *PasswordRequest) String() string { func (*PasswordRequest) ProtoMessage() {} func (x *PasswordRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[9] + mi := &file_proxy_service_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -873,7 +999,7 @@ func (x *PasswordRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PasswordRequest.ProtoReflect.Descriptor instead. func (*PasswordRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{9} + return file_proxy_service_proto_rawDescGZIP(), []int{10} } func (x *PasswordRequest) GetPassword() string { @@ -894,7 +1020,7 @@ type PinRequest struct { func (x *PinRequest) Reset() { *x = PinRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[10] + mi := &file_proxy_service_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -907,7 +1033,7 @@ func (x *PinRequest) String() string { func (*PinRequest) ProtoMessage() {} func (x *PinRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[10] + mi := &file_proxy_service_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -920,7 +1046,7 @@ func (x *PinRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PinRequest.ProtoReflect.Descriptor instead. func (*PinRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{10} + return file_proxy_service_proto_rawDescGZIP(), []int{11} } func (x *PinRequest) GetPin() string { @@ -942,7 +1068,7 @@ type AuthenticateResponse struct { func (x *AuthenticateResponse) Reset() { *x = AuthenticateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[11] + mi := &file_proxy_service_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -955,7 +1081,7 @@ func (x *AuthenticateResponse) String() string { func (*AuthenticateResponse) ProtoMessage() {} func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[11] + mi := &file_proxy_service_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -968,7 +1094,7 @@ func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthenticateResponse.ProtoReflect.Descriptor instead. func (*AuthenticateResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{11} + return file_proxy_service_proto_rawDescGZIP(), []int{12} } func (x *AuthenticateResponse) GetSuccess() bool { @@ -1001,7 +1127,7 @@ type SendStatusUpdateRequest struct { func (x *SendStatusUpdateRequest) Reset() { *x = SendStatusUpdateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[12] + mi := &file_proxy_service_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1014,7 +1140,7 @@ func (x *SendStatusUpdateRequest) String() string { func (*SendStatusUpdateRequest) ProtoMessage() {} func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[12] + mi := &file_proxy_service_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1027,7 +1153,7 @@ func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SendStatusUpdateRequest.ProtoReflect.Descriptor instead. func (*SendStatusUpdateRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{12} + return file_proxy_service_proto_rawDescGZIP(), []int{13} } func (x *SendStatusUpdateRequest) GetServiceId() string { @@ -1075,7 +1201,7 @@ type SendStatusUpdateResponse struct { func (x *SendStatusUpdateResponse) Reset() { *x = SendStatusUpdateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[13] + mi := &file_proxy_service_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1088,7 +1214,7 @@ func (x *SendStatusUpdateResponse) String() string { func (*SendStatusUpdateResponse) ProtoMessage() {} func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[13] + mi := &file_proxy_service_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1101,7 +1227,7 @@ func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SendStatusUpdateResponse.ProtoReflect.Descriptor instead. func (*SendStatusUpdateResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{13} + return file_proxy_service_proto_rawDescGZIP(), []int{14} } // CreateProxyPeerRequest is sent by the proxy to create a peer connection @@ -1121,7 +1247,7 @@ type CreateProxyPeerRequest struct { func (x *CreateProxyPeerRequest) Reset() { *x = CreateProxyPeerRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[14] + mi := &file_proxy_service_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1134,7 +1260,7 @@ func (x *CreateProxyPeerRequest) String() string { func (*CreateProxyPeerRequest) ProtoMessage() {} func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[14] + mi := &file_proxy_service_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1147,7 +1273,7 @@ func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateProxyPeerRequest.ProtoReflect.Descriptor instead. func (*CreateProxyPeerRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{14} + return file_proxy_service_proto_rawDescGZIP(), []int{15} } func (x *CreateProxyPeerRequest) GetServiceId() string { @@ -1198,7 +1324,7 @@ type CreateProxyPeerResponse struct { func (x *CreateProxyPeerResponse) Reset() { *x = CreateProxyPeerResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[15] + mi := &file_proxy_service_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1211,7 +1337,7 @@ func (x *CreateProxyPeerResponse) String() string { func (*CreateProxyPeerResponse) ProtoMessage() {} func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[15] + mi := &file_proxy_service_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1224,7 +1350,7 @@ func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateProxyPeerResponse.ProtoReflect.Descriptor instead. func (*CreateProxyPeerResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{15} + return file_proxy_service_proto_rawDescGZIP(), []int{16} } func (x *CreateProxyPeerResponse) GetSuccess() bool { @@ -1254,7 +1380,7 @@ type GetOIDCURLRequest struct { func (x *GetOIDCURLRequest) Reset() { *x = GetOIDCURLRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[16] + mi := &file_proxy_service_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1267,7 +1393,7 @@ func (x *GetOIDCURLRequest) String() string { func (*GetOIDCURLRequest) ProtoMessage() {} func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[16] + mi := &file_proxy_service_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1280,7 +1406,7 @@ func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetOIDCURLRequest.ProtoReflect.Descriptor instead. func (*GetOIDCURLRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{16} + return file_proxy_service_proto_rawDescGZIP(), []int{17} } func (x *GetOIDCURLRequest) GetId() string { @@ -1315,7 +1441,7 @@ type GetOIDCURLResponse struct { func (x *GetOIDCURLResponse) Reset() { *x = GetOIDCURLResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[17] + mi := &file_proxy_service_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1328,7 +1454,7 @@ func (x *GetOIDCURLResponse) String() string { func (*GetOIDCURLResponse) ProtoMessage() {} func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[17] + mi := &file_proxy_service_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1341,7 +1467,7 @@ func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetOIDCURLResponse.ProtoReflect.Descriptor instead. func (*GetOIDCURLResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{17} + return file_proxy_service_proto_rawDescGZIP(), []int{18} } func (x *GetOIDCURLResponse) GetUrl() string { @@ -1363,7 +1489,7 @@ type ValidateSessionRequest struct { func (x *ValidateSessionRequest) Reset() { *x = ValidateSessionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[18] + mi := &file_proxy_service_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1376,7 +1502,7 @@ func (x *ValidateSessionRequest) String() string { func (*ValidateSessionRequest) ProtoMessage() {} func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[18] + mi := &file_proxy_service_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1389,7 +1515,7 @@ func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSessionRequest.ProtoReflect.Descriptor instead. func (*ValidateSessionRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{18} + return file_proxy_service_proto_rawDescGZIP(), []int{19} } func (x *ValidateSessionRequest) GetDomain() string { @@ -1420,7 +1546,7 @@ type ValidateSessionResponse struct { func (x *ValidateSessionResponse) Reset() { *x = ValidateSessionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[19] + mi := &file_proxy_service_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1433,7 +1559,7 @@ func (x *ValidateSessionResponse) String() string { func (*ValidateSessionResponse) ProtoMessage() {} func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[19] + mi := &file_proxy_service_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1446,7 +1572,7 @@ func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSessionResponse.ProtoReflect.Descriptor instead. func (*ValidateSessionResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{19} + return file_proxy_service_proto_rawDescGZIP(), []int{20} } func (x *ValidateSessionResponse) GetValid() bool { @@ -1482,7 +1608,9 @@ var File_proxy_service_proto protoreflect.FileDescriptor var file_proxy_service_proto_rawDesc = []byte{ 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x74, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, @@ -1502,217 +1630,247 @@ var file_proxy_service_proto_rawDesc = []byte{ 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x39, 0x0a, - 0x0b, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, - 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, - 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, - 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x04, 0x6f, 0x69, 0x64, 0x63, 0x22, 0xe0, 0x02, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, - 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, - 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, - 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, - 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, - 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, - 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, - 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, - 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, - 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0xda, 0x02, + 0x0a, 0x11, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, + 0x69, 0x70, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, + 0x3e, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x52, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x57, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, + 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xaa, + 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, + 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x22, 0xe0, 0x02, 0x0a, 0x0c, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, + 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x22, 0x3f, + 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, + 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x09, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, + 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, + 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, + 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, + 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x13, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, + 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x2d, 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, + 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, + 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, + 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, + 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, + 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, + 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, + 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, - 0x70, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2d, - 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, - 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, - 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, - 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, - 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, - 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, - 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, - 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, - 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, - 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, - 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, - 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, - 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, - 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x02, 0x2a, 0xc8, 0x01, - 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, - 0x14, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, - 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, - 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, - 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, - 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, - 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, - 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, - 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, - 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, - 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, + 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, + 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, + 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, + 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, + 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, + 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, + 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, + 0x44, 0x10, 0x02, 0x2a, 0x46, 0x0a, 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, + 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, + 0x12, 0x19, 0x0a, 0x15, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, + 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, + 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, + 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, + 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, + 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, + 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, + 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, + 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, + 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, - 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, - 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, - 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, - 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, - 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, - 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, + 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, + 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, + 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1727,63 +1885,71 @@ func file_proxy_service_proto_rawDescGZIP() []byte { return file_proxy_service_proto_rawDescData } -var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 22) var file_proxy_service_proto_goTypes = []interface{}{ (ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType - (ProxyStatus)(0), // 1: management.ProxyStatus - (*GetMappingUpdateRequest)(nil), // 2: management.GetMappingUpdateRequest - (*GetMappingUpdateResponse)(nil), // 3: management.GetMappingUpdateResponse - (*PathMapping)(nil), // 4: management.PathMapping - (*Authentication)(nil), // 5: management.Authentication - (*ProxyMapping)(nil), // 6: management.ProxyMapping - (*SendAccessLogRequest)(nil), // 7: management.SendAccessLogRequest - (*SendAccessLogResponse)(nil), // 8: management.SendAccessLogResponse - (*AccessLog)(nil), // 9: management.AccessLog - (*AuthenticateRequest)(nil), // 10: management.AuthenticateRequest - (*PasswordRequest)(nil), // 11: management.PasswordRequest - (*PinRequest)(nil), // 12: management.PinRequest - (*AuthenticateResponse)(nil), // 13: management.AuthenticateResponse - (*SendStatusUpdateRequest)(nil), // 14: management.SendStatusUpdateRequest - (*SendStatusUpdateResponse)(nil), // 15: management.SendStatusUpdateResponse - (*CreateProxyPeerRequest)(nil), // 16: management.CreateProxyPeerRequest - (*CreateProxyPeerResponse)(nil), // 17: management.CreateProxyPeerResponse - (*GetOIDCURLRequest)(nil), // 18: management.GetOIDCURLRequest - (*GetOIDCURLResponse)(nil), // 19: management.GetOIDCURLResponse - (*ValidateSessionRequest)(nil), // 20: management.ValidateSessionRequest - (*ValidateSessionResponse)(nil), // 21: management.ValidateSessionResponse - (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp + (PathRewriteMode)(0), // 1: management.PathRewriteMode + (ProxyStatus)(0), // 2: management.ProxyStatus + (*GetMappingUpdateRequest)(nil), // 3: management.GetMappingUpdateRequest + (*GetMappingUpdateResponse)(nil), // 4: management.GetMappingUpdateResponse + (*PathTargetOptions)(nil), // 5: management.PathTargetOptions + (*PathMapping)(nil), // 6: management.PathMapping + (*Authentication)(nil), // 7: management.Authentication + (*ProxyMapping)(nil), // 8: management.ProxyMapping + (*SendAccessLogRequest)(nil), // 9: management.SendAccessLogRequest + (*SendAccessLogResponse)(nil), // 10: management.SendAccessLogResponse + (*AccessLog)(nil), // 11: management.AccessLog + (*AuthenticateRequest)(nil), // 12: management.AuthenticateRequest + (*PasswordRequest)(nil), // 13: management.PasswordRequest + (*PinRequest)(nil), // 14: management.PinRequest + (*AuthenticateResponse)(nil), // 15: management.AuthenticateResponse + (*SendStatusUpdateRequest)(nil), // 16: management.SendStatusUpdateRequest + (*SendStatusUpdateResponse)(nil), // 17: management.SendStatusUpdateResponse + (*CreateProxyPeerRequest)(nil), // 18: management.CreateProxyPeerRequest + (*CreateProxyPeerResponse)(nil), // 19: management.CreateProxyPeerResponse + (*GetOIDCURLRequest)(nil), // 20: management.GetOIDCURLRequest + (*GetOIDCURLResponse)(nil), // 21: management.GetOIDCURLResponse + (*ValidateSessionRequest)(nil), // 22: management.ValidateSessionRequest + (*ValidateSessionResponse)(nil), // 23: management.ValidateSessionResponse + nil, // 24: management.PathTargetOptions.CustomHeadersEntry + (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 26: google.protobuf.Duration } var file_proxy_service_proto_depIdxs = []int32{ - 22, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp - 6, // 1: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping - 0, // 2: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType - 4, // 3: management.ProxyMapping.path:type_name -> management.PathMapping - 5, // 4: management.ProxyMapping.auth:type_name -> management.Authentication - 9, // 5: management.SendAccessLogRequest.log:type_name -> management.AccessLog - 22, // 6: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp - 11, // 7: management.AuthenticateRequest.password:type_name -> management.PasswordRequest - 12, // 8: management.AuthenticateRequest.pin:type_name -> management.PinRequest - 1, // 9: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus - 2, // 10: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest - 7, // 11: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest - 10, // 12: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest - 14, // 13: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest - 16, // 14: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest - 18, // 15: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest - 20, // 16: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest - 3, // 17: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse - 8, // 18: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse - 13, // 19: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse - 15, // 20: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse - 17, // 21: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse - 19, // 22: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse - 21, // 23: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse - 17, // [17:24] is the sub-list for method output_type - 10, // [10:17] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 25, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp + 8, // 1: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping + 26, // 2: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration + 1, // 3: management.PathTargetOptions.path_rewrite:type_name -> management.PathRewriteMode + 24, // 4: management.PathTargetOptions.custom_headers:type_name -> management.PathTargetOptions.CustomHeadersEntry + 5, // 5: management.PathMapping.options:type_name -> management.PathTargetOptions + 0, // 6: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType + 6, // 7: management.ProxyMapping.path:type_name -> management.PathMapping + 7, // 8: management.ProxyMapping.auth:type_name -> management.Authentication + 11, // 9: management.SendAccessLogRequest.log:type_name -> management.AccessLog + 25, // 10: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp + 13, // 11: management.AuthenticateRequest.password:type_name -> management.PasswordRequest + 14, // 12: management.AuthenticateRequest.pin:type_name -> management.PinRequest + 2, // 13: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus + 3, // 14: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest + 9, // 15: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest + 12, // 16: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest + 16, // 17: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest + 18, // 18: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest + 20, // 19: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest + 22, // 20: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest + 4, // 21: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse + 10, // 22: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse + 15, // 23: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse + 17, // 24: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse + 19, // 25: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse + 21, // 26: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse + 23, // 27: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse + 21, // [21:28] is the sub-list for method output_type + 14, // [14:21] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name } func init() { file_proxy_service_proto_init() } @@ -1817,7 +1983,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathMapping); i { + switch v := v.(*PathTargetOptions); i { case 0: return &v.state case 1: @@ -1829,7 +1995,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authentication); i { + switch v := v.(*PathMapping); i { case 0: return &v.state case 1: @@ -1841,7 +2007,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProxyMapping); i { + switch v := v.(*Authentication); i { case 0: return &v.state case 1: @@ -1853,7 +2019,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAccessLogRequest); i { + switch v := v.(*ProxyMapping); i { case 0: return &v.state case 1: @@ -1865,7 +2031,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAccessLogResponse); i { + switch v := v.(*SendAccessLogRequest); i { case 0: return &v.state case 1: @@ -1877,7 +2043,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AccessLog); i { + switch v := v.(*SendAccessLogResponse); i { case 0: return &v.state case 1: @@ -1889,7 +2055,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticateRequest); i { + switch v := v.(*AccessLog); i { case 0: return &v.state case 1: @@ -1901,7 +2067,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PasswordRequest); i { + switch v := v.(*AuthenticateRequest); i { case 0: return &v.state case 1: @@ -1913,7 +2079,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PinRequest); i { + switch v := v.(*PasswordRequest); i { case 0: return &v.state case 1: @@ -1925,7 +2091,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticateResponse); i { + switch v := v.(*PinRequest); i { case 0: return &v.state case 1: @@ -1937,7 +2103,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendStatusUpdateRequest); i { + switch v := v.(*AuthenticateResponse); i { case 0: return &v.state case 1: @@ -1949,7 +2115,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendStatusUpdateResponse); i { + switch v := v.(*SendStatusUpdateRequest); i { case 0: return &v.state case 1: @@ -1961,7 +2127,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateProxyPeerRequest); i { + switch v := v.(*SendStatusUpdateResponse); i { case 0: return &v.state case 1: @@ -1973,7 +2139,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateProxyPeerResponse); i { + switch v := v.(*CreateProxyPeerRequest); i { case 0: return &v.state case 1: @@ -1985,7 +2151,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOIDCURLRequest); i { + switch v := v.(*CreateProxyPeerResponse); i { case 0: return &v.state case 1: @@ -1997,7 +2163,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOIDCURLResponse); i { + switch v := v.(*GetOIDCURLRequest); i { case 0: return &v.state case 1: @@ -2009,7 +2175,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSessionRequest); i { + switch v := v.(*GetOIDCURLResponse); i { case 0: return &v.state case 1: @@ -2021,6 +2187,18 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateSessionResponse); i { case 0: return &v.state @@ -2033,19 +2211,19 @@ func file_proxy_service_proto_init() { } } } - file_proxy_service_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_proxy_service_proto_msgTypes[9].OneofWrappers = []interface{}{ (*AuthenticateRequest_Password)(nil), (*AuthenticateRequest_Pin)(nil), } - file_proxy_service_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_proxy_service_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[16].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proxy_service_proto_rawDesc, - NumEnums: 2, - NumMessages: 20, + NumEnums: 3, + NumMessages: 22, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto index b4e62a52a..be553095d 100644 --- a/shared/management/proto/proxy_service.proto +++ b/shared/management/proto/proxy_service.proto @@ -4,6 +4,7 @@ package management; option go_package = "/proto"; +import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; // ProxyService - Management is the SERVER, Proxy is the CLIENT @@ -50,9 +51,22 @@ enum ProxyMappingUpdateType { UPDATE_TYPE_REMOVED = 2; } +enum PathRewriteMode { + PATH_REWRITE_DEFAULT = 0; + PATH_REWRITE_PRESERVE = 1; +} + +message PathTargetOptions { + bool skip_tls_verify = 1; + google.protobuf.Duration request_timeout = 2; + PathRewriteMode path_rewrite = 3; + map custom_headers = 4; +} + message PathMapping { string path = 1; string target = 2; + PathTargetOptions options = 3; } message Authentication { From 44655ca9b5dda6e8a8a917bf5dc6bb0dc3f6a796 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 5 Mar 2026 11:43:18 +0100 Subject: [PATCH 188/374] [misc] add PR title validation workflow (#5503) --- .github/workflows/pr-title-check.yml | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 .github/workflows/pr-title-check.yml diff --git a/.github/workflows/pr-title-check.yml b/.github/workflows/pr-title-check.yml new file mode 100644 index 000000000..a2e6ce219 --- /dev/null +++ b/.github/workflows/pr-title-check.yml @@ -0,0 +1,51 @@ +name: PR Title Check + +on: + pull_request: + types: [opened, edited, synchronize, reopened] + +jobs: + check-title: + runs-on: ubuntu-latest + steps: + - name: Validate PR title prefix + uses: actions/github-script@v7 + with: + script: | + const title = context.payload.pull_request.title; + const allowedTags = [ + 'management', + 'client', + 'signal', + 'proxy', + 'relay', + 'misc', + 'infrastructure', + 'self-hosted', + 'doc', + ]; + + const pattern = /^\[([^\]]+)\]\s+.+/; + const match = title.match(pattern); + + if (!match) { + core.setFailed( + `PR title must start with a tag in brackets.\n` + + `Example: [client] fix something\n` + + `Allowed tags: ${allowedTags.join(', ')}` + ); + return; + } + + const tags = match[1].split(',').map(t => t.trim().toLowerCase()); + + const invalid = tags.filter(t => !allowedTags.includes(t)); + if (invalid.length > 0) { + core.setFailed( + `Invalid tag(s): ${invalid.join(', ')}\n` + + `Allowed tags: ${allowedTags.join(', ')}` + ); + return; + } + + console.log(`Valid PR title tags: [${tags.join(', ')}]`); From 4f0a3a77ad3e3fcdeab09e1945ebab8a66b94e6f Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 5 Mar 2026 14:30:31 +0100 Subject: [PATCH 189/374] [management] Avoid breaking single acc mode when switching domains (#5511) * **Bug Fixes** * Fixed domain configuration handling in single account mode to properly retrieve and apply domain settings from account data. * Improved error handling when account data is unavailable with fallback to configured default domain. * **Tests** * Added comprehensive test coverage for single account mode domain configuration scenarios, including edge cases for missing or unavailable account data. --- management/server/account.go | 36 +++++++++- management/server/account_test.go | 114 ++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+), 3 deletions(-) diff --git a/management/server/account.go b/management/server/account.go index 550971337..01d0eebfa 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -1379,9 +1379,10 @@ func (am *DefaultAccountManager) GetAccountIDFromUserAuth(ctx context.Context, u if am.singleAccountMode && am.singleAccountModeDomain != "" { // This section is mostly related to self-hosted installations. // We override incoming domain claims to group users under a single account. - userAuth.Domain = am.singleAccountModeDomain - userAuth.DomainCategory = types.PrivateCategory - log.WithContext(ctx).Debugf("overriding JWT Domain and DomainCategory claims since single account mode is enabled") + err := am.updateUserAuthWithSingleMode(ctx, &userAuth) + if err != nil { + return "", "", err + } } accountID, err := am.getAccountIDWithAuthorizationClaims(ctx, userAuth) @@ -1414,6 +1415,35 @@ func (am *DefaultAccountManager) GetAccountIDFromUserAuth(ctx context.Context, u return accountID, user.Id, nil } +// updateUserAuthWithSingleMode modifies the userAuth with the single account domain, or if there is an existing account, with the domain of that account +func (am *DefaultAccountManager) updateUserAuthWithSingleMode(ctx context.Context, userAuth *auth.UserAuth) error { + userAuth.DomainCategory = types.PrivateCategory + userAuth.Domain = am.singleAccountModeDomain + + accountID, err := am.Store.GetAnyAccountID(ctx) + if err != nil { + if e, ok := status.FromError(err); !ok || e.Type() != status.NotFound { + return err + } + log.WithContext(ctx).Debugf("using singleAccountModeDomain to override JWT Domain and DomainCategory claims in single account mode") + return nil + } + + if accountID == "" { + log.WithContext(ctx).Debugf("using singleAccountModeDomain to override JWT Domain and DomainCategory claims in single account mode") + return nil + } + + domain, _, err := am.Store.GetAccountDomainAndCategory(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return err + } + userAuth.Domain = domain + + log.WithContext(ctx).Debugf("overriding JWT Domain and DomainCategory claims since single account mode is enabled") + return nil +} + // syncJWTGroups processes the JWT groups for a user, updates the account based on the groups, // and propagates changes to peers if group propagation is enabled. // requires userAuth to have been ValidateAndParseToken and EnsureUserAccessByJWTGroups by the AuthManager diff --git a/management/server/account_test.go b/management/server/account_test.go index 65bab6c18..a073d4fca 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -15,6 +15,7 @@ import ( "time" "github.com/golang/mock/gomock" + "github.com/netbirdio/netbird/shared/management/status" "github.com/prometheus/client_golang/prometheus/push" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -3966,3 +3967,116 @@ func TestDefaultAccountManager_UpdateAccountSettings_NetworkRangeChange(t *testi t.Fatal("UpdateAccountSettings deadlocked when changing NetworkRange") } } + +func TestUpdateUserAuthWithSingleMode(t *testing.T) { + t.Run("sets defaults and overrides domain from store", func(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetAnyAccountID(gomock.Any()). + Return("account-1", nil) + mockStore.EXPECT(). + GetAccountDomainAndCategory(gomock.Any(), store.LockingStrengthNone, "account-1"). + Return("real-domain.com", "private", nil) + + am := &DefaultAccountManager{ + Store: mockStore, + singleAccountModeDomain: "fallback.com", + } + + userAuth := &auth.UserAuth{} + err := am.updateUserAuthWithSingleMode(context.Background(), userAuth) + require.NoError(t, err) + assert.Equal(t, "real-domain.com", userAuth.Domain) + assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory) + }) + + t.Run("falls back to singleAccountModeDomain when account ID is empty", func(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetAnyAccountID(gomock.Any()). + Return("", nil) + + am := &DefaultAccountManager{ + Store: mockStore, + singleAccountModeDomain: "fallback.com", + } + + userAuth := &auth.UserAuth{} + err := am.updateUserAuthWithSingleMode(context.Background(), userAuth) + require.NoError(t, err) + assert.Equal(t, "fallback.com", userAuth.Domain) + assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory) + }) + + t.Run("falls back to singleAccountModeDomain on NotFound error", func(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetAnyAccountID(gomock.Any()). + Return("", status.Errorf(status.NotFound, "no accounts")) + + am := &DefaultAccountManager{ + Store: mockStore, + singleAccountModeDomain: "fallback.com", + } + + userAuth := &auth.UserAuth{} + err := am.updateUserAuthWithSingleMode(context.Background(), userAuth) + require.NoError(t, err) + assert.Equal(t, "fallback.com", userAuth.Domain) + assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory) + }) + + t.Run("propagates non-NotFound error from GetAnyAccountID", func(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetAnyAccountID(gomock.Any()). + Return("", status.Errorf(status.Internal, "db down")) + + am := &DefaultAccountManager{ + Store: mockStore, + singleAccountModeDomain: "fallback.com", + } + + userAuth := &auth.UserAuth{} + err := am.updateUserAuthWithSingleMode(context.Background(), userAuth) + require.Error(t, err) + assert.Contains(t, err.Error(), "db down") + // Defaults should still be set before error path + assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory) + }) + + t.Run("propagates error from GetAccountDomainAndCategory", func(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT(). + GetAnyAccountID(gomock.Any()). + Return("account-1", nil) + mockStore.EXPECT(). + GetAccountDomainAndCategory(gomock.Any(), store.LockingStrengthNone, "account-1"). + Return("", "", status.Errorf(status.Internal, "query failed")) + + am := &DefaultAccountManager{ + Store: mockStore, + singleAccountModeDomain: "fallback.com", + } + + userAuth := &auth.UserAuth{} + err := am.updateUserAuthWithSingleMode(context.Background(), userAuth) + require.Error(t, err) + assert.Contains(t, err.Error(), "query failed") + }) +} From a7f3ba03eb6a284d9355192e1c3992a7ab6ef088 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 5 Mar 2026 22:10:45 +0100 Subject: [PATCH 190/374] [management] aggregate grpc metrics by accountID (#5486) --- .../server/telemetry/account_aggregator.go | 185 +++++++++++++++ .../telemetry/account_aggregator_test.go | 219 ++++++++++++++++++ management/server/telemetry/grpc_metrics.go | 127 +++++++--- 3 files changed, 505 insertions(+), 26 deletions(-) create mode 100644 management/server/telemetry/account_aggregator.go create mode 100644 management/server/telemetry/account_aggregator_test.go diff --git a/management/server/telemetry/account_aggregator.go b/management/server/telemetry/account_aggregator.go new file mode 100644 index 000000000..cd0863ed6 --- /dev/null +++ b/management/server/telemetry/account_aggregator.go @@ -0,0 +1,185 @@ +package telemetry + +import ( + "context" + "math" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// AccountDurationAggregator uses OpenTelemetry histograms per account to calculate P95 +// without publishing individual account labels +type AccountDurationAggregator struct { + mu sync.RWMutex + accounts map[string]*accountHistogram + meterProvider *sdkmetric.MeterProvider + manualReader *sdkmetric.ManualReader + + FlushInterval time.Duration + MaxAge time.Duration + ctx context.Context +} + +type accountHistogram struct { + histogram metric.Int64Histogram + lastUpdate time.Time +} + +// NewAccountDurationAggregator creates aggregator using OTel histograms +func NewAccountDurationAggregator(ctx context.Context, flushInterval, maxAge time.Duration) *AccountDurationAggregator { + manualReader := sdkmetric.NewManualReader( + sdkmetric.WithTemporalitySelector(func(kind sdkmetric.InstrumentKind) metricdata.Temporality { + return metricdata.DeltaTemporality + }), + ) + + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithReader(manualReader), + ) + + return &AccountDurationAggregator{ + accounts: make(map[string]*accountHistogram), + meterProvider: meterProvider, + manualReader: manualReader, + FlushInterval: flushInterval, + MaxAge: maxAge, + ctx: ctx, + } +} + +// Record adds a duration for an account using OTel histogram +func (a *AccountDurationAggregator) Record(accountID string, duration time.Duration) { + a.mu.Lock() + defer a.mu.Unlock() + + accHist, exists := a.accounts[accountID] + if !exists { + meter := a.meterProvider.Meter("account-aggregator") + histogram, err := meter.Int64Histogram( + "sync_duration_per_account", + metric.WithUnit("milliseconds"), + ) + if err != nil { + return + } + + accHist = &accountHistogram{ + histogram: histogram, + } + a.accounts[accountID] = accHist + } + + accHist.histogram.Record(a.ctx, duration.Milliseconds(), + metric.WithAttributes(attribute.String("account_id", accountID))) + accHist.lastUpdate = time.Now() +} + +// FlushAndGetP95s extracts P95 from each account's histogram +func (a *AccountDurationAggregator) FlushAndGetP95s() []int64 { + a.mu.Lock() + defer a.mu.Unlock() + + var rm metricdata.ResourceMetrics + err := a.manualReader.Collect(a.ctx, &rm) + if err != nil { + return nil + } + + now := time.Now() + p95s := make([]int64, 0, len(a.accounts)) + + for _, scopeMetrics := range rm.ScopeMetrics { + for _, metric := range scopeMetrics.Metrics { + histogramData, ok := metric.Data.(metricdata.Histogram[int64]) + if !ok { + continue + } + + for _, dataPoint := range histogramData.DataPoints { + a.processDataPoint(dataPoint, now, &p95s) + } + } + } + + a.cleanupStaleAccounts(now) + + return p95s +} + +// processDataPoint extracts P95 from a single histogram data point +func (a *AccountDurationAggregator) processDataPoint(dataPoint metricdata.HistogramDataPoint[int64], now time.Time, p95s *[]int64) { + accountID := extractAccountID(dataPoint) + if accountID == "" { + return + } + + if p95 := calculateP95FromHistogram(dataPoint); p95 > 0 { + *p95s = append(*p95s, p95) + } +} + +// cleanupStaleAccounts removes accounts that haven't been updated recently +func (a *AccountDurationAggregator) cleanupStaleAccounts(now time.Time) { + for accountID := range a.accounts { + if a.isStaleAccount(accountID, now) { + delete(a.accounts, accountID) + } + } +} + +// extractAccountID retrieves the account_id from histogram data point attributes +func extractAccountID(dp metricdata.HistogramDataPoint[int64]) string { + for _, attr := range dp.Attributes.ToSlice() { + if attr.Key == "account_id" { + return attr.Value.AsString() + } + } + return "" +} + +// isStaleAccount checks if an account hasn't been updated recently +func (a *AccountDurationAggregator) isStaleAccount(accountID string, now time.Time) bool { + accHist, exists := a.accounts[accountID] + if !exists { + return false + } + return now.Sub(accHist.lastUpdate) > a.MaxAge +} + +// calculateP95FromHistogram computes P95 from OTel histogram data +func calculateP95FromHistogram(dp metricdata.HistogramDataPoint[int64]) int64 { + if dp.Count == 0 { + return 0 + } + + targetCount := uint64(math.Ceil(float64(dp.Count) * 0.95)) + if targetCount == 0 { + targetCount = 1 + } + var cumulativeCount uint64 + + for i, bucketCount := range dp.BucketCounts { + cumulativeCount += bucketCount + if cumulativeCount >= targetCount { + if i < len(dp.Bounds) { + return int64(dp.Bounds[i]) + } + if maxVal, defined := dp.Max.Value(); defined { + return maxVal + } + return dp.Sum / int64(dp.Count) + } + } + + return dp.Sum / int64(dp.Count) +} + +// Shutdown cleans up resources +func (a *AccountDurationAggregator) Shutdown() error { + return a.meterProvider.Shutdown(a.ctx) +} diff --git a/management/server/telemetry/account_aggregator_test.go b/management/server/telemetry/account_aggregator_test.go new file mode 100644 index 000000000..63b74b1db --- /dev/null +++ b/management/server/telemetry/account_aggregator_test.go @@ -0,0 +1,219 @@ +package telemetry + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDeltaTemporality_P95ReflectsCurrentWindow(t *testing.T) { + // Verify that with delta temporality, each flush window only reflects + // recordings since the last flush — not all-time data. + ctx := context.Background() + agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute) + defer func(agg *AccountDurationAggregator) { + err := agg.Shutdown() + if err != nil { + t.Errorf("failed to shutdown aggregator: %v", err) + } + }(agg) + + // Window 1: Record 100 slow requests (500ms each) + for range 100 { + agg.Record("account-A", 500*time.Millisecond) + } + + p95sWindow1 := agg.FlushAndGetP95s() + require.Len(t, p95sWindow1, 1, "should have P95 for one account") + firstP95 := p95sWindow1[0] + assert.GreaterOrEqual(t, firstP95, int64(200), + "first window P95 should reflect the 500ms recordings") + + // Window 2: Record 100 FAST requests (10ms each) + for range 100 { + agg.Record("account-A", 10*time.Millisecond) + } + + p95sWindow2 := agg.FlushAndGetP95s() + require.Len(t, p95sWindow2, 1, "should have P95 for one account") + secondP95 := p95sWindow2[0] + + // With delta temporality the P95 should drop significantly because + // the first window's slow recordings are no longer included. + assert.Less(t, secondP95, firstP95, + "second window P95 should be lower than first — delta temporality "+ + "ensures each window only reflects recent recordings") +} + +func TestEqualWeightPerAccount(t *testing.T) { + // Verify that each account contributes exactly one P95 value, + // regardless of how many requests it made. + ctx := context.Background() + agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute) + defer func(agg *AccountDurationAggregator) { + err := agg.Shutdown() + if err != nil { + t.Errorf("failed to shutdown aggregator: %v", err) + } + }(agg) + + // Account A: 10,000 requests at 500ms (noisy customer) + for range 10000 { + agg.Record("account-A", 500*time.Millisecond) + } + + // Accounts B, C, D: 10 requests each at 50ms (normal customers) + for _, id := range []string{"account-B", "account-C", "account-D"} { + for range 10 { + agg.Record(id, 50*time.Millisecond) + } + } + + p95s := agg.FlushAndGetP95s() + + // Should get exactly 4 P95 values — one per account + assert.Len(t, p95s, 4, "each account should contribute exactly one P95") +} + +func TestStaleAccountEviction(t *testing.T) { + ctx := context.Background() + // Use a very short MaxAge so we can test staleness + agg := NewAccountDurationAggregator(ctx, time.Minute, 50*time.Millisecond) + defer func(agg *AccountDurationAggregator) { + err := agg.Shutdown() + if err != nil { + t.Errorf("failed to shutdown aggregator: %v", err) + } + }(agg) + + agg.Record("account-A", 100*time.Millisecond) + agg.Record("account-B", 200*time.Millisecond) + + // Both accounts should appear + p95s := agg.FlushAndGetP95s() + assert.Len(t, p95s, 2, "both accounts should have P95 values") + + // Wait for account-A to become stale, then only update account-B + time.Sleep(60 * time.Millisecond) + agg.Record("account-B", 200*time.Millisecond) + + p95s = agg.FlushAndGetP95s() + assert.Len(t, p95s, 1, "both accounts should have P95 values") + + // account-A should have been evicted from the accounts map + agg.mu.RLock() + _, accountAExists := agg.accounts["account-A"] + _, accountBExists := agg.accounts["account-B"] + agg.mu.RUnlock() + + assert.False(t, accountAExists, "stale account-A should be evicted from map") + assert.True(t, accountBExists, "active account-B should remain in map") +} + +func TestStaleAccountEviction_DoesNotReappear(t *testing.T) { + // Verify that with delta temporality, an evicted stale account does not + // reappear in subsequent flushes. + ctx := context.Background() + agg := NewAccountDurationAggregator(ctx, time.Minute, 50*time.Millisecond) + defer func(agg *AccountDurationAggregator) { + err := agg.Shutdown() + if err != nil { + t.Errorf("failed to shutdown aggregator: %v", err) + } + }(agg) + + agg.Record("account-stale", 100*time.Millisecond) + + // Wait for it to become stale + time.Sleep(60 * time.Millisecond) + + // First flush: should detect staleness and evict + _ = agg.FlushAndGetP95s() + + agg.mu.RLock() + _, exists := agg.accounts["account-stale"] + agg.mu.RUnlock() + assert.False(t, exists, "account should be evicted after first flush") + + // Second flush: with delta temporality, the stale account should NOT reappear + p95sSecond := agg.FlushAndGetP95s() + assert.Empty(t, p95sSecond, + "evicted account should not reappear in subsequent flushes with delta temporality") +} + +func TestP95Calculation_SingleSample(t *testing.T) { + ctx := context.Background() + agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute) + defer func(agg *AccountDurationAggregator) { + err := agg.Shutdown() + if err != nil { + t.Errorf("failed to shutdown aggregator: %v", err) + } + }(agg) + + agg.Record("account-A", 150*time.Millisecond) + + p95s := agg.FlushAndGetP95s() + require.Len(t, p95s, 1) + // With a single sample, P95 should be the bucket bound containing 150ms + assert.Greater(t, p95s[0], int64(0), "P95 of a single sample should be positive") +} + +func TestP95Calculation_AllSameValue(t *testing.T) { + ctx := context.Background() + agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute) + defer func(agg *AccountDurationAggregator) { + err := agg.Shutdown() + if err != nil { + t.Errorf("failed to shutdown aggregator: %v", err) + } + }(agg) + + // All samples are 100ms — P95 should be the bucket bound containing 100ms + for range 100 { + agg.Record("account-A", 100*time.Millisecond) + } + + p95s := agg.FlushAndGetP95s() + require.Len(t, p95s, 1) + assert.Greater(t, p95s[0], int64(0)) +} + +func TestMultipleAccounts_IndependentP95s(t *testing.T) { + ctx := context.Background() + agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute) + defer func(agg *AccountDurationAggregator) { + err := agg.Shutdown() + if err != nil { + t.Errorf("failed to shutdown aggregator: %v", err) + } + }(agg) + + // Account A: all fast (10ms) + for range 100 { + agg.Record("account-fast", 10*time.Millisecond) + } + + // Account B: all slow (5000ms) + for range 100 { + agg.Record("account-slow", 5000*time.Millisecond) + } + + p95s := agg.FlushAndGetP95s() + require.Len(t, p95s, 2, "should have two P95 values") + + // Find min and max — they should differ significantly + minP95 := p95s[0] + maxP95 := p95s[1] + if minP95 > maxP95 { + minP95, maxP95 = maxP95, minP95 + } + + assert.Less(t, minP95, int64(1000), + "fast account P95 should be well under 1000ms") + assert.Greater(t, maxP95, int64(1000), + "slow account P95 should be well over 1000ms") +} diff --git a/management/server/telemetry/grpc_metrics.go b/management/server/telemetry/grpc_metrics.go index bd7fbc235..d3239c57a 100644 --- a/management/server/telemetry/grpc_metrics.go +++ b/management/server/telemetry/grpc_metrics.go @@ -13,18 +13,24 @@ const HighLatencyThreshold = time.Second * 7 // GRPCMetrics are gRPC server metrics type GRPCMetrics struct { - meter metric.Meter - syncRequestsCounter metric.Int64Counter - syncRequestsBlockedCounter metric.Int64Counter - loginRequestsCounter metric.Int64Counter - loginRequestsBlockedCounter metric.Int64Counter - loginRequestHighLatencyCounter metric.Int64Counter - getKeyRequestsCounter metric.Int64Counter - activeStreamsGauge metric.Int64ObservableGauge - syncRequestDuration metric.Int64Histogram - loginRequestDuration metric.Int64Histogram - channelQueueLength metric.Int64Histogram - ctx context.Context + meter metric.Meter + syncRequestsCounter metric.Int64Counter + syncRequestsBlockedCounter metric.Int64Counter + loginRequestsCounter metric.Int64Counter + loginRequestsBlockedCounter metric.Int64Counter + loginRequestHighLatencyCounter metric.Int64Counter + getKeyRequestsCounter metric.Int64Counter + activeStreamsGauge metric.Int64ObservableGauge + syncRequestDuration metric.Int64Histogram + syncRequestDurationP95ByAccount metric.Int64Histogram + loginRequestDuration metric.Int64Histogram + loginRequestDurationP95ByAccount metric.Int64Histogram + channelQueueLength metric.Int64Histogram + ctx context.Context + + // Per-account aggregation + syncDurationAggregator *AccountDurationAggregator + loginDurationAggregator *AccountDurationAggregator } // NewGRPCMetrics creates new GRPCMetrics struct and registers common metrics of the gRPC server @@ -93,6 +99,14 @@ func NewGRPCMetrics(ctx context.Context, meter metric.Meter) (*GRPCMetrics, erro return nil, err } + syncRequestDurationP95ByAccount, err := meter.Int64Histogram("management.grpc.sync.request.duration.p95.by.account.ms", + metric.WithUnit("milliseconds"), + metric.WithDescription("P95 duration of sync requests aggregated per account - each data point represents one account's P95"), + ) + if err != nil { + return nil, err + } + loginRequestDuration, err := meter.Int64Histogram("management.grpc.login.request.duration.ms", metric.WithUnit("milliseconds"), metric.WithDescription("Duration of the login gRPC requests from the peers to authenticate and receive initial configuration and relay credentials"), @@ -101,6 +115,14 @@ func NewGRPCMetrics(ctx context.Context, meter metric.Meter) (*GRPCMetrics, erro return nil, err } + loginRequestDurationP95ByAccount, err := meter.Int64Histogram("management.grpc.login.request.duration.p95.by.account.ms", + metric.WithUnit("milliseconds"), + metric.WithDescription("P95 duration of login requests aggregated per account - each data point represents one account's P95"), + ) + if err != nil { + return nil, err + } + // We use histogram here as we have multiple channel at the same time and we want to see a slice at any given time // Then we should be able to extract min, manx, mean and the percentiles. // TODO(yury): This needs custom bucketing as we are interested in the values from 0 to server.channelBufferSize (100) @@ -113,20 +135,32 @@ func NewGRPCMetrics(ctx context.Context, meter metric.Meter) (*GRPCMetrics, erro return nil, err } - return &GRPCMetrics{ - meter: meter, - syncRequestsCounter: syncRequestsCounter, - syncRequestsBlockedCounter: syncRequestsBlockedCounter, - loginRequestsCounter: loginRequestsCounter, - loginRequestsBlockedCounter: loginRequestsBlockedCounter, - loginRequestHighLatencyCounter: loginRequestHighLatencyCounter, - getKeyRequestsCounter: getKeyRequestsCounter, - activeStreamsGauge: activeStreamsGauge, - syncRequestDuration: syncRequestDuration, - loginRequestDuration: loginRequestDuration, - channelQueueLength: channelQueue, - ctx: ctx, - }, err + syncDurationAggregator := NewAccountDurationAggregator(ctx, 60*time.Second, 5*time.Minute) + loginDurationAggregator := NewAccountDurationAggregator(ctx, 60*time.Second, 5*time.Minute) + + grpcMetrics := &GRPCMetrics{ + meter: meter, + syncRequestsCounter: syncRequestsCounter, + syncRequestsBlockedCounter: syncRequestsBlockedCounter, + loginRequestsCounter: loginRequestsCounter, + loginRequestsBlockedCounter: loginRequestsBlockedCounter, + loginRequestHighLatencyCounter: loginRequestHighLatencyCounter, + getKeyRequestsCounter: getKeyRequestsCounter, + activeStreamsGauge: activeStreamsGauge, + syncRequestDuration: syncRequestDuration, + syncRequestDurationP95ByAccount: syncRequestDurationP95ByAccount, + loginRequestDuration: loginRequestDuration, + loginRequestDurationP95ByAccount: loginRequestDurationP95ByAccount, + channelQueueLength: channelQueue, + ctx: ctx, + syncDurationAggregator: syncDurationAggregator, + loginDurationAggregator: loginDurationAggregator, + } + + go grpcMetrics.startSyncP95Flusher() + go grpcMetrics.startLoginP95Flusher() + + return grpcMetrics, err } // CountSyncRequest counts the number of gRPC sync requests coming to the gRPC API @@ -157,6 +191,9 @@ func (grpcMetrics *GRPCMetrics) CountLoginRequestBlocked() { // CountLoginRequestDuration counts the duration of the login gRPC requests func (grpcMetrics *GRPCMetrics) CountLoginRequestDuration(duration time.Duration, accountID string) { grpcMetrics.loginRequestDuration.Record(grpcMetrics.ctx, duration.Milliseconds()) + + grpcMetrics.loginDurationAggregator.Record(accountID, duration) + if duration > HighLatencyThreshold { grpcMetrics.loginRequestHighLatencyCounter.Add(grpcMetrics.ctx, 1, metric.WithAttributes(attribute.String(AccountIDLabel, accountID))) } @@ -165,6 +202,44 @@ func (grpcMetrics *GRPCMetrics) CountLoginRequestDuration(duration time.Duration // CountSyncRequestDuration counts the duration of the sync gRPC requests func (grpcMetrics *GRPCMetrics) CountSyncRequestDuration(duration time.Duration, accountID string) { grpcMetrics.syncRequestDuration.Record(grpcMetrics.ctx, duration.Milliseconds()) + + grpcMetrics.syncDurationAggregator.Record(accountID, duration) +} + +// startSyncP95Flusher periodically flushes per-account sync P95 values to the histogram +func (grpcMetrics *GRPCMetrics) startSyncP95Flusher() { + ticker := time.NewTicker(grpcMetrics.syncDurationAggregator.FlushInterval) + defer ticker.Stop() + + for { + select { + case <-grpcMetrics.ctx.Done(): + return + case <-ticker.C: + p95s := grpcMetrics.syncDurationAggregator.FlushAndGetP95s() + for _, p95 := range p95s { + grpcMetrics.syncRequestDurationP95ByAccount.Record(grpcMetrics.ctx, p95) + } + } + } +} + +// startLoginP95Flusher periodically flushes per-account login P95 values to the histogram +func (grpcMetrics *GRPCMetrics) startLoginP95Flusher() { + ticker := time.NewTicker(grpcMetrics.loginDurationAggregator.FlushInterval) + defer ticker.Stop() + + for { + select { + case <-grpcMetrics.ctx.Done(): + return + case <-ticker.C: + p95s := grpcMetrics.loginDurationAggregator.FlushAndGetP95s() + for _, p95 := range p95s { + grpcMetrics.loginRequestDurationP95ByAccount.Record(grpcMetrics.ctx, p95) + } + } + } } // RegisterConnectedStreams registers a function that collects number of active streams and feeds it to the metrics gauge. From 85451ab4cd48101ba2d68832db4902e3cca9bf1b Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Fri, 6 Mar 2026 08:43:46 +0100 Subject: [PATCH 191/374] [management] Add stable domain resolution for combined server (#5515) The combined server was using the hostname from exposedAddress for both singleAccountModeDomain and dnsDomain, causing fresh installs to get the wrong domain and existing installs to break if the config changed. Add resolveDomains() to BaseServer that reads domain from the store: - Fresh install (0 accounts): uses "netbird.selfhosted" default - Existing install: reads persisted domain from the account in DB - Store errors: falls back to default safely The combined server opts in via AutoResolveDomains flag, while the standalone management server is unaffected. --- combined/cmd/root.go | 8 +-- management/internals/server/server.go | 71 ++++++++++++++++++- .../server/server_resolve_domains_test.go | 63 ++++++++++++++++ 3 files changed, 134 insertions(+), 8 deletions(-) create mode 100644 management/internals/server/server_resolve_domains_test.go diff --git a/combined/cmd/root.go b/combined/cmd/root.go index 153260341..ea1ff908a 100644 --- a/combined/cmd/root.go +++ b/combined/cmd/root.go @@ -493,9 +493,6 @@ func handleTLSConfig(cfg *CombinedConfig) (*tls.Config, bool, error) { func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (*mgmtServer.BaseServer, error) { mgmt := cfg.Management - dnsDomain := mgmt.DnsDomain - singleAccModeDomain := dnsDomain - // Extract port from listen address _, portStr, err := net.SplitHostPort(cfg.Server.ListenAddress) if err != nil { @@ -507,8 +504,9 @@ func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (* mgmtSrv := mgmtServer.NewServer( &mgmtServer.Config{ NbConfig: mgmtConfig, - DNSDomain: dnsDomain, - MgmtSingleAccModeDomain: singleAccModeDomain, + DNSDomain: "", + MgmtSingleAccModeDomain: "", + AutoResolveDomains: true, MgmtPort: mgmtPort, MgmtMetricsPort: cfg.Server.MetricsPort, DisableMetrics: mgmt.DisableAnonymousMetrics, diff --git a/management/internals/server/server.go b/management/internals/server/server.go index 5149c338b..573983a79 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -28,9 +28,13 @@ import ( "github.com/netbirdio/netbird/version" ) -// ManagementLegacyPort is the port that was used before by the Management gRPC server. -// It is used for backward compatibility now. -const ManagementLegacyPort = 33073 +const ( + // ManagementLegacyPort is the port that was used before by the Management gRPC server. + // It is used for backward compatibility now. + ManagementLegacyPort = 33073 + // DefaultSelfHostedDomain is the default domain used for self-hosted fresh installs. + DefaultSelfHostedDomain = "netbird.selfhosted" +) type Server interface { Start(ctx context.Context) error @@ -58,6 +62,7 @@ type BaseServer struct { mgmtMetricsPort int mgmtPort int disableLegacyManagementPort bool + autoResolveDomains bool proxyAuthClose func() @@ -81,6 +86,7 @@ type Config struct { DisableMetrics bool DisableGeoliteUpdate bool UserDeleteFromIDPEnabled bool + AutoResolveDomains bool } // NewServer initializes and configures a new Server instance @@ -96,6 +102,7 @@ func NewServer(cfg *Config) *BaseServer { mgmtPort: cfg.MgmtPort, disableLegacyManagementPort: cfg.DisableLegacyManagementPort, mgmtMetricsPort: cfg.MgmtMetricsPort, + autoResolveDomains: cfg.AutoResolveDomains, } } @@ -109,6 +116,10 @@ func (s *BaseServer) Start(ctx context.Context) error { s.cancel = cancel s.errCh = make(chan error, 4) + if s.autoResolveDomains { + s.resolveDomains(srvCtx) + } + s.PeersManager() s.GeoLocationManager() @@ -381,6 +392,60 @@ func (s *BaseServer) serveGRPCWithHTTP(ctx context.Context, listener net.Listene }() } +// resolveDomains determines dnsDomain and mgmtSingleAccModeDomain based on store state. +// Fresh installs use the default self-hosted domain, while existing installs reuse the +// persisted account domain to keep addressing stable across config changes. +func (s *BaseServer) resolveDomains(ctx context.Context) { + st := s.Store() + + setDefault := func(logMsg string, args ...any) { + if logMsg != "" { + log.WithContext(ctx).Warnf(logMsg, args...) + } + s.dnsDomain = DefaultSelfHostedDomain + s.mgmtSingleAccModeDomain = DefaultSelfHostedDomain + } + + accountsCount, err := st.GetAccountsCounter(ctx) + if err != nil { + setDefault("resolve domains: failed to read accounts counter: %v; using default domain %q", err, DefaultSelfHostedDomain) + return + } + + if accountsCount == 0 { + s.dnsDomain = DefaultSelfHostedDomain + s.mgmtSingleAccModeDomain = DefaultSelfHostedDomain + log.WithContext(ctx).Infof("resolve domains: fresh install detected, using default domain %q", DefaultSelfHostedDomain) + return + } + + accountID, err := st.GetAnyAccountID(ctx) + if err != nil { + setDefault("resolve domains: failed to get existing account ID: %v; using default domain %q", err, DefaultSelfHostedDomain) + return + } + + if accountID == "" { + setDefault("resolve domains: empty account ID returned for existing accounts; using default domain %q", DefaultSelfHostedDomain) + return + } + + domain, _, err := st.GetAccountDomainAndCategory(ctx, store.LockingStrengthNone, accountID) + if err != nil { + setDefault("resolve domains: failed to get account domain for account %q: %v; using default domain %q", accountID, err, DefaultSelfHostedDomain) + return + } + + if domain == "" { + setDefault("resolve domains: account %q has empty domain; using default domain %q", accountID, DefaultSelfHostedDomain) + return + } + + s.dnsDomain = domain + s.mgmtSingleAccModeDomain = domain + log.WithContext(ctx).Infof("resolve domains: using persisted account domain %q", domain) +} + func getInstallationID(ctx context.Context, store store.Store) (string, error) { installationID := store.GetInstallationID() if installationID != "" { diff --git a/management/internals/server/server_resolve_domains_test.go b/management/internals/server/server_resolve_domains_test.go new file mode 100644 index 000000000..db1d7e8ca --- /dev/null +++ b/management/internals/server/server_resolve_domains_test.go @@ -0,0 +1,63 @@ +package server + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + "github.com/netbirdio/netbird/management/server/store" +) + +func TestResolveDomains_FreshInstallUsesDefault(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT().GetAccountsCounter(gomock.Any()).Return(int64(0), nil) + + srv := NewServer(&Config{NbConfig: &nbconfig.Config{}}) + Inject[store.Store](srv, mockStore) + + srv.resolveDomains(context.Background()) + + require.Equal(t, DefaultSelfHostedDomain, srv.dnsDomain) + require.Equal(t, DefaultSelfHostedDomain, srv.mgmtSingleAccModeDomain) +} + +func TestResolveDomains_ExistingInstallUsesPersistedDomain(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT().GetAccountsCounter(gomock.Any()).Return(int64(1), nil) + mockStore.EXPECT().GetAnyAccountID(gomock.Any()).Return("acc-1", nil) + mockStore.EXPECT().GetAccountDomainAndCategory(gomock.Any(), store.LockingStrengthNone, "acc-1").Return("vpn.mycompany.com", "", nil) + + srv := NewServer(&Config{NbConfig: &nbconfig.Config{}}) + Inject[store.Store](srv, mockStore) + + srv.resolveDomains(context.Background()) + + require.Equal(t, "vpn.mycompany.com", srv.dnsDomain) + require.Equal(t, "vpn.mycompany.com", srv.mgmtSingleAccModeDomain) +} + +func TestResolveDomains_StoreErrorFallsBackToDefault(t *testing.T) { + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + mockStore := store.NewMockStore(ctrl) + mockStore.EXPECT().GetAccountsCounter(gomock.Any()).Return(int64(0), errors.New("db failed")) + + srv := NewServer(&Config{NbConfig: &nbconfig.Config{}}) + Inject[store.Store](srv, mockStore) + + srv.resolveDomains(context.Background()) + + require.Equal(t, DefaultSelfHostedDomain, srv.dnsDomain) + require.Equal(t, DefaultSelfHostedDomain, srv.mgmtSingleAccModeDomain) +} From e6587b071dac374ed2f4ca3d2219977d2e13ff02 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 6 Mar 2026 16:11:44 +0100 Subject: [PATCH 192/374] [management] use realip for proxy registration (#5525) --- management/internals/shared/grpc/proxy.go | 7 +------ management/internals/shared/grpc/proxy_auth.go | 2 +- management/internals/shared/grpc/proxy_auth_ratelimit.go | 4 ++-- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 676757c1e..308da5e2f 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -18,7 +18,6 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/oauth2" "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "github.com/netbirdio/netbird/shared/management/domain" @@ -177,11 +176,7 @@ func (s *ProxyServiceServer) SetProxyController(proxyController proxy.Controller func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest, stream proto.ProxyService_GetMappingUpdateServer) error { ctx := stream.Context() - peerInfo := "" - if p, ok := peer.FromContext(ctx); ok { - peerInfo = p.Addr.String() - } - + peerInfo := PeerIPFromContext(ctx) log.Infof("New proxy connection from %s", peerInfo) proxyID := req.GetProxyId() diff --git a/management/internals/shared/grpc/proxy_auth.go b/management/internals/shared/grpc/proxy_auth.go index 6daeab5f2..dd593dfa0 100644 --- a/management/internals/shared/grpc/proxy_auth.go +++ b/management/internals/shared/grpc/proxy_auth.go @@ -107,7 +107,7 @@ func NewProxyAuthInterceptors(tokenStore proxyTokenStore) (grpc.UnaryServerInter } func (i *proxyAuthInterceptor) validateProxyToken(ctx context.Context) (*types.ProxyAccessToken, error) { - clientIP := peerIPFromContext(ctx) + clientIP := PeerIPFromContext(ctx) if clientIP != "" && i.failureLimiter.isLimited(clientIP) { return nil, status.Errorf(codes.ResourceExhausted, "too many failed authentication attempts") diff --git a/management/internals/shared/grpc/proxy_auth_ratelimit.go b/management/internals/shared/grpc/proxy_auth_ratelimit.go index 447e531b0..78ab1bd20 100644 --- a/management/internals/shared/grpc/proxy_auth_ratelimit.go +++ b/management/internals/shared/grpc/proxy_auth_ratelimit.go @@ -115,9 +115,9 @@ func (l *authFailureLimiter) stop() { l.cancel() } -// peerIPFromContext extracts the client IP from the gRPC context. +// PeerIPFromContext extracts the client IP from the gRPC context. // Uses realip (from trusted proxy headers) first, falls back to the transport peer address. -func peerIPFromContext(ctx context.Context) clientIP { +func PeerIPFromContext(ctx context.Context) string { if addr, ok := realip.FromContext(ctx); ok { return addr.String() } From 5c20f13c48768a80d9e963f2d14565ff16f99563 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Sat, 7 Mar 2026 10:46:37 +0100 Subject: [PATCH 193/374] [management] fix domain uniqueness (#5529) --- .../service/manager/expose_tracker_test.go | 8 ++-- .../reverseproxy/service/manager/manager.go | 14 +++---- .../service/manager/manager_test.go | 38 +++++++++---------- .../modules/reverseproxy/service/service.go | 20 +++++----- management/server/store/sql_store.go | 4 +- management/server/store/store.go | 2 +- management/server/store/store_mock.go | 8 ++-- 7 files changed, 46 insertions(+), 48 deletions(-) diff --git a/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go index bd9f4b93b..c831b4a22 100644 --- a/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go +++ b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go @@ -36,11 +36,11 @@ func TestReapExpiredExposes(t *testing.T) { mgr.exposeReaper.reapExpiredExposes(ctx) // Expired service should be deleted - _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + _, err = testStore.GetServiceByDomain(ctx, resp.Domain) require.Error(t, err, "expired service should be deleted") // Non-expired service should remain - _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp2.Domain) + _, err = testStore.GetServiceByDomain(ctx, resp2.Domain) require.NoError(t, err, "active service should remain") } @@ -191,14 +191,14 @@ func TestReapSkipsRenewedService(t *testing.T) { // Reaper should skip it because the re-check sees a fresh timestamp mgr.exposeReaper.reapExpiredExposes(ctx) - _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + _, err = testStore.GetServiceByDomain(ctx, resp.Domain) require.NoError(t, err, "renewed service should survive reaping") } // expireEphemeralService backdates meta_last_renewed_at to force expiration. func expireEphemeralService(t *testing.T, s store.Store, accountID, domain string) { t.Helper() - svc, err := s.GetServiceByDomain(context.Background(), accountID, domain) + svc, err := s.GetServiceByDomain(context.Background(), domain) require.NoError(t, err) expired := time.Now().Add(-2 * exposeTTL) diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index b5e643799..56a1fc98a 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -199,7 +199,7 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri func (m *Manager) persistNewService(ctx context.Context, accountID string, service *service.Service) error { return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, ""); err != nil { + if err := m.checkDomainAvailable(ctx, transaction, service.Domain, ""); err != nil { return err } @@ -245,7 +245,7 @@ func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, pee return status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) } - if err := m.checkDomainAvailable(ctx, transaction, accountID, svc.Domain, ""); err != nil { + if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, ""); err != nil { return err } @@ -261,8 +261,8 @@ func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, pee }) } -func (m *Manager) checkDomainAvailable(ctx context.Context, transaction store.Store, accountID, domain, excludeServiceID string) error { - existingService, err := transaction.GetServiceByDomain(ctx, accountID, domain) +func (m *Manager) checkDomainAvailable(ctx context.Context, transaction store.Store, domain, excludeServiceID string) error { + existingService, err := transaction.GetServiceByDomain(ctx, domain) if err != nil { if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { return fmt.Errorf("failed to check existing service: %w", err) @@ -271,7 +271,7 @@ func (m *Manager) checkDomainAvailable(ctx context.Context, transaction store.St } if existingService != nil && existingService.ID != excludeServiceID { - return status.Errorf(status.AlreadyExists, "service with domain %s already exists", domain) + return status.Errorf(status.AlreadyExists, "domain already taken") } return nil @@ -352,7 +352,7 @@ func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, se } func (m *Manager) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, service *service.Service) error { - if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, service.ID); err != nil { + if err := m.checkDomainAvailable(ctx, transaction, service.Domain, service.ID); err != nil { return err } @@ -805,7 +805,7 @@ func (m *Manager) deleteServiceFromPeer(ctx context.Context, accountID, peerID, // lookupPeerService finds a peer-initiated service by domain and validates ownership. func (m *Manager) lookupPeerService(ctx context.Context, accountID, peerID, domain string) (*service.Service, error) { - svc, err := m.store.GetServiceByDomain(ctx, accountID, domain) + svc, err := m.store.GetServiceByDomain(ctx, domain) if err != nil { return nil, err } diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index 196eead22..0cb8fa02a 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -72,7 +72,6 @@ func TestInitializeServiceForCreate(t *testing.T) { func TestCheckDomainAvailable(t *testing.T) { ctx := context.Background() - accountID := "test-account" tests := []struct { name string @@ -88,7 +87,7 @@ func TestCheckDomainAvailable(t *testing.T) { excludeServiceID: "", setupMock: func(ms *store.MockStore) { ms.EXPECT(). - GetServiceByDomain(ctx, accountID, "available.com"). + GetServiceByDomain(ctx, "available.com"). Return(nil, status.Errorf(status.NotFound, "not found")) }, expectedError: false, @@ -99,7 +98,7 @@ func TestCheckDomainAvailable(t *testing.T) { excludeServiceID: "", setupMock: func(ms *store.MockStore) { ms.EXPECT(). - GetServiceByDomain(ctx, accountID, "exists.com"). + GetServiceByDomain(ctx, "exists.com"). Return(&rpservice.Service{ID: "existing-id", Domain: "exists.com"}, nil) }, expectedError: true, @@ -111,7 +110,7 @@ func TestCheckDomainAvailable(t *testing.T) { excludeServiceID: "service-123", setupMock: func(ms *store.MockStore) { ms.EXPECT(). - GetServiceByDomain(ctx, accountID, "exists.com"). + GetServiceByDomain(ctx, "exists.com"). Return(&rpservice.Service{ID: "service-123", Domain: "exists.com"}, nil) }, expectedError: false, @@ -122,7 +121,7 @@ func TestCheckDomainAvailable(t *testing.T) { excludeServiceID: "service-456", setupMock: func(ms *store.MockStore) { ms.EXPECT(). - GetServiceByDomain(ctx, accountID, "exists.com"). + GetServiceByDomain(ctx, "exists.com"). Return(&rpservice.Service{ID: "service-123", Domain: "exists.com"}, nil) }, expectedError: true, @@ -134,7 +133,7 @@ func TestCheckDomainAvailable(t *testing.T) { excludeServiceID: "", setupMock: func(ms *store.MockStore) { ms.EXPECT(). - GetServiceByDomain(ctx, accountID, "error.com"). + GetServiceByDomain(ctx, "error.com"). Return(nil, errors.New("database error")) }, expectedError: true, @@ -150,7 +149,7 @@ func TestCheckDomainAvailable(t *testing.T) { tt.setupMock(mockStore) mgr := &Manager{} - err := mgr.checkDomainAvailable(ctx, mockStore, accountID, tt.domain, tt.excludeServiceID) + err := mgr.checkDomainAvailable(ctx, mockStore, tt.domain, tt.excludeServiceID) if tt.expectedError { require.Error(t, err) @@ -168,7 +167,6 @@ func TestCheckDomainAvailable(t *testing.T) { func TestCheckDomainAvailable_EdgeCases(t *testing.T) { ctx := context.Background() - accountID := "test-account" t.Run("empty domain", func(t *testing.T) { ctrl := gomock.NewController(t) @@ -176,11 +174,11 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) { mockStore := store.NewMockStore(ctrl) mockStore.EXPECT(). - GetServiceByDomain(ctx, accountID, ""). + GetServiceByDomain(ctx, ""). Return(nil, status.Errorf(status.NotFound, "not found")) mgr := &Manager{} - err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "", "") + err := mgr.checkDomainAvailable(ctx, mockStore, "", "") assert.NoError(t, err) }) @@ -191,11 +189,11 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) { mockStore := store.NewMockStore(ctrl) mockStore.EXPECT(). - GetServiceByDomain(ctx, accountID, "test.com"). + GetServiceByDomain(ctx, "test.com"). Return(&rpservice.Service{ID: "some-id", Domain: "test.com"}, nil) mgr := &Manager{} - err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "test.com", "") + err := mgr.checkDomainAvailable(ctx, mockStore, "test.com", "") assert.Error(t, err) sErr, ok := status.FromError(err) @@ -209,11 +207,11 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) { mockStore := store.NewMockStore(ctrl) mockStore.EXPECT(). - GetServiceByDomain(ctx, accountID, "nil.com"). + GetServiceByDomain(ctx, "nil.com"). Return(nil, nil) mgr := &Manager{} - err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "nil.com", "") + err := mgr.checkDomainAvailable(ctx, mockStore, "nil.com", "") assert.NoError(t, err) }) @@ -241,7 +239,7 @@ func TestPersistNewService(t *testing.T) { // Create another mock for the transaction txMock := store.NewMockStore(ctrl) txMock.EXPECT(). - GetServiceByDomain(ctx, accountID, "new.com"). + GetServiceByDomain(ctx, "new.com"). Return(nil, status.Errorf(status.NotFound, "not found")) txMock.EXPECT(). CreateService(ctx, service). @@ -272,7 +270,7 @@ func TestPersistNewService(t *testing.T) { DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error { txMock := store.NewMockStore(ctrl) txMock.EXPECT(). - GetServiceByDomain(ctx, accountID, "existing.com"). + GetServiceByDomain(ctx, "existing.com"). Return(&rpservice.Service{ID: "other-id", Domain: "existing.com"}, nil) return fn(txMock) @@ -814,7 +812,7 @@ func TestCreateServiceFromPeer(t *testing.T) { assert.NotEmpty(t, resp.ServiceURL, "service URL should be set") // Verify service is persisted in store - persisted, err := testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + persisted, err := testStore.GetServiceByDomain(ctx, resp.Domain) require.NoError(t, err) assert.Equal(t, resp.Domain, persisted.Domain) assert.Equal(t, rpservice.SourceEphemeral, persisted.Source, "source should be ephemeral") @@ -977,7 +975,7 @@ func TestDeleteServiceFromPeer_ByDomain(t *testing.T) { require.NoError(t, err) // Verify service is deleted - _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + _, err = testStore.GetServiceByDomain(ctx, resp.Domain) require.Error(t, err, "service should be deleted") }) @@ -1012,7 +1010,7 @@ func TestStopServiceFromPeer(t *testing.T) { err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) require.NoError(t, err) - _, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + _, err = testStore.GetServiceByDomain(ctx, resp.Domain) require.Error(t, err, "service should be deleted") }) } @@ -1031,7 +1029,7 @@ func TestDeleteService_DeletesEphemeralExpose(t *testing.T) { require.NoError(t, err) assert.Equal(t, int64(1), count, "one ephemeral service should exist after create") - svc, err := testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain) + svc, err := testStore.GetServiceByDomain(ctx, resp.Domain) require.NoError(t, err) err = mgr.DeleteService(ctx, testAccountID, testUserID, svc.ID) diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index cd9311b44..bfad7fe9a 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -134,7 +134,7 @@ type Service struct { ID string `gorm:"primaryKey"` AccountID string `gorm:"index"` Name string - Domain string `gorm:"index"` + Domain string `gorm:"type:varchar(255);uniqueIndex"` ProxyCluster string `gorm:"index"` Targets []*Target `gorm:"foreignKey:ServiceID;constraint:OnDelete:CASCADE"` Enabled bool @@ -535,15 +535,15 @@ var hopByHopHeaders = map[string]struct{}{ // reservedHeaders are set authoritatively by the proxy or control HTTP framing // and cannot be overridden. var reservedHeaders = map[string]struct{}{ - "Content-Length": {}, - "Content-Type": {}, - "Cookie": {}, - "Forwarded": {}, - "X-Forwarded-For": {}, - "X-Forwarded-Host": {}, - "X-Forwarded-Port": {}, - "X-Forwarded-Proto": {}, - "X-Real-Ip": {}, + "Content-Length": {}, + "Content-Type": {}, + "Cookie": {}, + "Forwarded": {}, + "X-Forwarded-For": {}, + "X-Forwarded-Host": {}, + "X-Forwarded-Port": {}, + "X-Forwarded-Proto": {}, + "X-Real-Ip": {}, } func validateTargetOptions(idx int, opts *TargetOptions) error { diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 8f147d915..5997c10e2 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -4977,9 +4977,9 @@ func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStren return service, nil } -func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*rpservice.Service, error) { +func (s *SqlStore) GetServiceByDomain(ctx context.Context, domain string) (*rpservice.Service, error) { var service *rpservice.Service - result := s.db.Preload("Targets").Where("account_id = ? AND domain = ?", accountID, domain).First(&service) + result := s.db.Preload("Targets").Where("domain = ?", domain).First(&service) if result.Error != nil { if errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, status.Errorf(status.NotFound, "service with domain %s not found", domain) diff --git a/management/server/store/store.go b/management/server/store/store.go index 5123cde72..1fa99fd05 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -257,7 +257,7 @@ type Store interface { UpdateService(ctx context.Context, service *rpservice.Service) error DeleteService(ctx context.Context, accountID, serviceID string) error GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*rpservice.Service, error) - GetServiceByDomain(ctx context.Context, accountID, domain string) (*rpservice.Service, error) + GetServiceByDomain(ctx context.Context, domain string) (*rpservice.Service, error) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*rpservice.Service, error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 414872fbb..130df4485 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -1932,18 +1932,18 @@ func (mr *MockStoreMockRecorder) GetRouteByID(ctx, lockStrength, accountID, rout } // GetServiceByDomain mocks base method. -func (m *MockStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*service.Service, error) { +func (m *MockStore) GetServiceByDomain(ctx context.Context, domain string) (*service.Service, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetServiceByDomain", ctx, accountID, domain) + ret := m.ctrl.Call(m, "GetServiceByDomain", ctx, domain) ret0, _ := ret[0].(*service.Service) ret1, _ := ret[1].(error) return ret0, ret1 } // GetServiceByDomain indicates an expected call of GetServiceByDomain. -func (mr *MockStoreMockRecorder) GetServiceByDomain(ctx, accountID, domain interface{}) *gomock.Call { +func (mr *MockStoreMockRecorder) GetServiceByDomain(ctx, domain interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceByDomain", reflect.TypeOf((*MockStore)(nil).GetServiceByDomain), ctx, accountID, domain) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceByDomain", reflect.TypeOf((*MockStore)(nil).GetServiceByDomain), ctx, domain) } // GetServiceByID mocks base method. From 3acd86e34686caaf43748f9d780cb2252c229110 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 9 Mar 2026 10:25:51 +0100 Subject: [PATCH 194/374] [client] "reset connection" error on wake from sleep (#5522) Capture engine reference before actCancel() in cleanupConnection(). After actCancel(), the connectWithRetryRuns goroutine sets engine to nil, causing connectClient.Stop() to skip shutdown. This allows the goroutine to set ErrResetConnection on the shared state after Down() clears it, causing the next Up() to fail. --- client/server/server.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index cab94238f..2c7d5abc3 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -849,14 +849,26 @@ func (s *Server) cleanupConnection() error { if s.actCancel == nil { return ErrServiceNotUp } + + // Capture the engine reference before cancelling the context. + // After actCancel(), the connectWithRetryRuns goroutine wakes up + // and sets connectClient.engine = nil, causing connectClient.Stop() + // to skip the engine shutdown entirely. + var engine *internal.Engine + if s.connectClient != nil { + engine = s.connectClient.Engine() + } + s.actCancel() if s.connectClient == nil { return nil } - if err := s.connectClient.Stop(); err != nil { - return err + if engine != nil { + if err := engine.Stop(); err != nil { + return err + } } s.connectClient = nil From 30c02ab78c7bf05d610a636f7a2446144dd0ba01 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 9 Mar 2026 12:23:06 +0100 Subject: [PATCH 195/374] [management] use the cache for the pkce state (#5516) --- .../service/manager/manager_test.go | 15 +++-- management/internals/server/boot.go | 12 +++- management/internals/server/server.go | 1 - .../internals/shared/grpc/pkce_verifier.go | 61 +++++++++++++++++++ management/internals/shared/grpc/proxy.go | 59 +++--------------- .../internals/shared/grpc/proxy_test.go | 55 +++++++++++++---- .../shared/grpc/validate_session_test.go | 5 +- management/server/account_test.go | 2 +- .../proxy/auth_callback_integration_test.go | 4 ++ .../testing/testing_tools/channel/channel.go | 6 +- proxy/management_integration_test.go | 4 ++ 11 files changed, 152 insertions(+), 72 deletions(-) create mode 100644 management/internals/shared/grpc/pkce_verifier.go diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index 0cb8fa02a..ba4e1c805 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -423,8 +423,9 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { t.Helper() tokenStore, err := nbgrpc.NewOneTimeTokenStore(context.Background(), 1*time.Hour, 10*time.Minute, 100) require.NoError(t, err) - srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) - t.Cleanup(srv.Close) + pkceStore, err := nbgrpc.NewPKCEVerifierStore(context.Background(), 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) return srv } @@ -703,8 +704,9 @@ func setupIntegrationTest(t *testing.T) (*Manager, store.Store) { tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100) require.NoError(t, err) - proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) - t.Cleanup(proxySrv.Close) + pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter("")) require.NoError(t, err) @@ -1134,8 +1136,9 @@ func TestDeleteService_DeletesTargets(t *testing.T) { tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100) require.NoError(t, err) - proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) - t.Cleanup(proxySrv.Close) + pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter("")) require.NoError(t, err) diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 2049f0051..eb13a15e3 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -168,7 +168,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server { func (s *BaseServer) ReverseProxyGRPCServer() *nbgrpc.ProxyServiceServer { return Create(s, func() *nbgrpc.ProxyServiceServer { - proxyService := nbgrpc.NewProxyServiceServer(s.AccessLogsManager(), s.ProxyTokenStore(), s.proxyOIDCConfig(), s.PeersManager(), s.UsersManager(), s.ProxyManager()) + proxyService := nbgrpc.NewProxyServiceServer(s.AccessLogsManager(), s.ProxyTokenStore(), s.PKCEVerifierStore(), s.proxyOIDCConfig(), s.PeersManager(), s.UsersManager(), s.ProxyManager()) s.AfterInit(func(s *BaseServer) { proxyService.SetServiceManager(s.ServiceManager()) proxyService.SetProxyController(s.ServiceProxyController()) @@ -203,6 +203,16 @@ func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore { }) } +func (s *BaseServer) PKCEVerifierStore() *nbgrpc.PKCEVerifierStore { + return Create(s, func() *nbgrpc.PKCEVerifierStore { + pkceStore, err := nbgrpc.NewPKCEVerifierStore(context.Background(), 10*time.Minute, 10*time.Minute, 100) + if err != nil { + log.Fatalf("failed to create PKCE verifier store: %v", err) + } + return pkceStore + }) +} + func (s *BaseServer) AccessLogsManager() accesslogs.Manager { return Create(s, func() accesslogs.Manager { accessLogManager := accesslogsmanager.NewManager(s.Store(), s.PermissionsManager(), s.GeoLocationManager()) diff --git a/management/internals/server/server.go b/management/internals/server/server.go index 573983a79..9b8716da1 100644 --- a/management/internals/server/server.go +++ b/management/internals/server/server.go @@ -248,7 +248,6 @@ func (s *BaseServer) Stop() error { _ = s.certManager.Listener().Close() } s.GRPCServer().Stop() - s.ReverseProxyGRPCServer().Close() if s.proxyAuthClose != nil { s.proxyAuthClose() s.proxyAuthClose = nil diff --git a/management/internals/shared/grpc/pkce_verifier.go b/management/internals/shared/grpc/pkce_verifier.go new file mode 100644 index 000000000..441e8b051 --- /dev/null +++ b/management/internals/shared/grpc/pkce_verifier.go @@ -0,0 +1,61 @@ +package grpc + +import ( + "context" + "fmt" + "time" + + "github.com/eko/gocache/lib/v4/cache" + "github.com/eko/gocache/lib/v4/store" + log "github.com/sirupsen/logrus" + + nbcache "github.com/netbirdio/netbird/management/server/cache" +) + +// PKCEVerifierStore manages PKCE verifiers for OAuth flows. +// Supports both in-memory and Redis storage via NB_IDP_CACHE_REDIS_ADDRESS env var. +type PKCEVerifierStore struct { + cache *cache.Cache[string] + ctx context.Context +} + +// NewPKCEVerifierStore creates a PKCE verifier store with automatic backend selection +func NewPKCEVerifierStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, maxConn int) (*PKCEVerifierStore, error) { + cacheStore, err := nbcache.NewStore(ctx, maxTimeout, cleanupInterval, maxConn) + if err != nil { + return nil, fmt.Errorf("failed to create cache store: %w", err) + } + + return &PKCEVerifierStore{ + cache: cache.New[string](cacheStore), + ctx: ctx, + }, nil +} + +// Store saves a PKCE verifier associated with an OAuth state parameter. +// The verifier is stored with the specified TTL and will be automatically deleted after expiration. +func (s *PKCEVerifierStore) Store(state, verifier string, ttl time.Duration) error { + if err := s.cache.Set(s.ctx, state, verifier, store.WithExpiration(ttl)); err != nil { + return fmt.Errorf("failed to store PKCE verifier: %w", err) + } + + log.Debugf("Stored PKCE verifier for state (expires in %s)", ttl) + return nil +} + +// LoadAndDelete retrieves and removes a PKCE verifier for the given state. +// Returns the verifier and true if found, or empty string and false if not found. +// This enforces single-use semantics for PKCE verifiers. +func (s *PKCEVerifierStore) LoadAndDelete(state string) (string, bool) { + verifier, err := s.cache.Get(s.ctx, state) + if err != nil { + log.Debugf("PKCE verifier not found for state") + return "", false + } + + if err := s.cache.Delete(s.ctx, state); err != nil { + log.Warnf("Failed to delete PKCE verifier for state: %v", err) + } + + return verifier, true +} diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 308da5e2f..e2d0f1abe 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -82,20 +82,12 @@ type ProxyServiceServer struct { // OIDC configuration for proxy authentication oidcConfig ProxyOIDCConfig - // TODO: use database to store these instead? - // pkceVerifiers stores PKCE code verifiers keyed by OAuth state. - // Entries expire after pkceVerifierTTL to prevent unbounded growth. - pkceVerifiers sync.Map - pkceCleanupCancel context.CancelFunc + // Store for PKCE verifiers + pkceVerifierStore *PKCEVerifierStore } const pkceVerifierTTL = 10 * time.Minute -type pkceEntry struct { - verifier string - createdAt time.Time -} - // proxyConnection represents a connected proxy type proxyConnection struct { proxyID string @@ -107,42 +99,21 @@ type proxyConnection struct { } // NewProxyServiceServer creates a new proxy service server. -func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager, proxyMgr proxy.Manager) *ProxyServiceServer { - ctx, cancel := context.WithCancel(context.Background()) +func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, pkceStore *PKCEVerifierStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager, proxyMgr proxy.Manager) *ProxyServiceServer { + ctx := context.Background() s := &ProxyServiceServer{ accessLogManager: accessLogMgr, oidcConfig: oidcConfig, tokenStore: tokenStore, + pkceVerifierStore: pkceStore, peersManager: peersManager, usersManager: usersManager, proxyManager: proxyMgr, - pkceCleanupCancel: cancel, } - go s.cleanupPKCEVerifiers(ctx) go s.cleanupStaleProxies(ctx) return s } -// cleanupPKCEVerifiers periodically removes expired PKCE verifiers. -func (s *ProxyServiceServer) cleanupPKCEVerifiers(ctx context.Context) { - ticker := time.NewTicker(pkceVerifierTTL) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - now := time.Now() - s.pkceVerifiers.Range(func(key, value any) bool { - if entry, ok := value.(pkceEntry); ok && now.Sub(entry.createdAt) > pkceVerifierTTL { - s.pkceVerifiers.Delete(key) - } - return true - }) - } - } -} - // cleanupStaleProxies periodically removes proxies that haven't sent heartbeat in 10 minutes func (s *ProxyServiceServer) cleanupStaleProxies(ctx context.Context) { ticker := time.NewTicker(5 * time.Minute) @@ -159,11 +130,6 @@ func (s *ProxyServiceServer) cleanupStaleProxies(ctx context.Context) { } } -// Close stops background goroutines. -func (s *ProxyServiceServer) Close() { - s.pkceCleanupCancel() -} - func (s *ProxyServiceServer) SetServiceManager(manager rpservice.Manager) { s.serviceManager = manager } @@ -790,7 +756,10 @@ func (s *ProxyServiceServer) GetOIDCURL(ctx context.Context, req *proto.GetOIDCU state := fmt.Sprintf("%s|%s|%s", base64.URLEncoding.EncodeToString([]byte(redirectURL.String())), nonceB64, hmacSum) codeVerifier := oauth2.GenerateVerifier() - s.pkceVerifiers.Store(state, pkceEntry{verifier: codeVerifier, createdAt: time.Now()}) + if err := s.pkceVerifierStore.Store(state, codeVerifier, pkceVerifierTTL); err != nil { + log.WithContext(ctx).Errorf("failed to store PKCE verifier: %v", err) + return nil, status.Errorf(codes.Internal, "store PKCE verifier: %v", err) + } return &proto.GetOIDCURLResponse{ Url: (&oauth2.Config{ @@ -827,18 +796,10 @@ func (s *ProxyServiceServer) generateHMAC(input string) string { // ValidateState validates the state parameter from an OAuth callback. // Returns the original redirect URL if valid, or an error if invalid. func (s *ProxyServiceServer) ValidateState(state string) (verifier, redirectURL string, err error) { - v, ok := s.pkceVerifiers.LoadAndDelete(state) + verifier, ok := s.pkceVerifierStore.LoadAndDelete(state) if !ok { return "", "", errors.New("no verifier for state") } - entry, ok := v.(pkceEntry) - if !ok { - return "", "", errors.New("invalid verifier for state") - } - if time.Since(entry.createdAt) > pkceVerifierTTL { - return "", "", errors.New("PKCE verifier expired") - } - verifier = entry.verifier // State format: base64(redirectURL)|nonce|hmac(redirectURL|nonce) parts := strings.Split(state, "|") diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index ddeadac5a..b7abb28b6 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -5,11 +5,10 @@ import ( "crypto/rand" "encoding/base64" "strings" + "sync" "testing" "time" - "sync" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -94,11 +93,16 @@ func drainChannel(ch chan *proto.GetMappingUpdateResponse) *proto.GetMappingUpda } func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { - tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + ctx := context.Background() + tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + + pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) require.NoError(t, err) s := &ProxyServiceServer{ - tokenStore: tokenStore, + tokenStore: tokenStore, + pkceVerifierStore: pkceStore, } s.SetProxyController(newTestProxyController()) @@ -151,11 +155,16 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { } func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { - tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + ctx := context.Background() + tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + + pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) require.NoError(t, err) s := &ProxyServiceServer{ - tokenStore: tokenStore, + tokenStore: tokenStore, + pkceVerifierStore: pkceStore, } s.SetProxyController(newTestProxyController()) @@ -185,11 +194,16 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { } func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) { - tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + ctx := context.Background() + tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + + pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) require.NoError(t, err) s := &ProxyServiceServer{ - tokenStore: tokenStore, + tokenStore: tokenStore, + pkceVerifierStore: pkceStore, } s.SetProxyController(newTestProxyController()) @@ -241,10 +255,15 @@ func generateState(s *ProxyServiceServer, redirectURL string) string { } func TestOAuthState_NeverTheSame(t *testing.T) { + ctx := context.Background() + pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + s := &ProxyServiceServer{ oidcConfig: ProxyOIDCConfig{ HMACKey: []byte("test-hmac-key"), }, + pkceVerifierStore: pkceStore, } redirectURL := "https://app.example.com/callback" @@ -265,31 +284,43 @@ func TestOAuthState_NeverTheSame(t *testing.T) { } func TestValidateState_RejectsOldTwoPartFormat(t *testing.T) { + ctx := context.Background() + pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + s := &ProxyServiceServer{ oidcConfig: ProxyOIDCConfig{ HMACKey: []byte("test-hmac-key"), }, + pkceVerifierStore: pkceStore, } // Old format had only 2 parts: base64(url)|hmac - s.pkceVerifiers.Store("base64url|hmac", pkceEntry{verifier: "test", createdAt: time.Now()}) + err = s.pkceVerifierStore.Store("base64url|hmac", "test", 10*time.Minute) + require.NoError(t, err) - _, _, err := s.ValidateState("base64url|hmac") + _, _, err = s.ValidateState("base64url|hmac") require.Error(t, err) assert.Contains(t, err.Error(), "invalid state format") } func TestValidateState_RejectsInvalidHMAC(t *testing.T) { + ctx := context.Background() + pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + s := &ProxyServiceServer{ oidcConfig: ProxyOIDCConfig{ HMACKey: []byte("test-hmac-key"), }, + pkceVerifierStore: pkceStore, } // Store with tampered HMAC - s.pkceVerifiers.Store("dGVzdA==|nonce|wrong-hmac", pkceEntry{verifier: "test", createdAt: time.Now()}) + err = s.pkceVerifierStore.Store("dGVzdA==|nonce|wrong-hmac", "test", 10*time.Minute) + require.NoError(t, err) - _, _, err := s.ValidateState("dGVzdA==|nonce|wrong-hmac") + _, _, err = s.ValidateState("dGVzdA==|nonce|wrong-hmac") require.Error(t, err) assert.Contains(t, err.Error(), "invalid state signature") } diff --git a/management/internals/shared/grpc/validate_session_test.go b/management/internals/shared/grpc/validate_session_test.go index 124ddf620..647e8443b 100644 --- a/management/internals/shared/grpc/validate_session_test.go +++ b/management/internals/shared/grpc/validate_session_test.go @@ -41,7 +41,10 @@ func setupValidateSessionTest(t *testing.T) *validateSessionTestSetup { tokenStore, err := NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100) require.NoError(t, err) - proxyService := NewProxyServiceServer(nil, tokenStore, ProxyOIDCConfig{}, nil, usersManager, proxyManager) + pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + + proxyService := NewProxyServiceServer(nil, tokenStore, pkceStore, ProxyOIDCConfig{}, nil, usersManager, proxyManager) proxyService.SetServiceManager(serviceManager) createTestProxies(t, ctx, testStore) diff --git a/management/server/account_test.go b/management/server/account_test.go index a073d4fca..fdec43617 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -3133,7 +3133,7 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU return nil, nil, err } - proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil, proxyManager) + proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil, proxyManager) proxyController, err := proxymanager.NewGRPCController(proxyGrpcServer, noop.Meter{}) if err != nil { return nil, nil, err diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index c7fd08da8..3bed54e80 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -193,6 +193,9 @@ func setupAuthCallbackTest(t *testing.T) *testSetup { tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100) require.NoError(t, err) + pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + usersManager := users.NewManager(testStore) oidcConfig := nbgrpc.ProxyOIDCConfig{ @@ -206,6 +209,7 @@ func setupAuthCallbackTest(t *testing.T) *testSetup { proxyService := nbgrpc.NewProxyServiceServer( &testAccessLogManager{}, tokenStore, + pkceStore, oidcConfig, nil, usersManager, diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 1d74f88d5..5e33ad652 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -98,12 +98,16 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee if err != nil { t.Fatalf("Failed to create proxy token store: %v", err) } + pkceverifierStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + if err != nil { + t.Fatalf("Failed to create PKCE verifier store: %v", err) + } noopMeter := noop.NewMeterProvider().Meter("") proxyMgr, err := proxymanager.NewManager(store, noopMeter) if err != nil { t.Fatalf("Failed to create proxy manager: %v", err) } - proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager, proxyMgr) + proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, pkceverifierStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager, proxyMgr) domainManager := manager.NewManager(store, proxyMgr, permissionsManager) serviceProxyController, err := proxymanager.NewGRPCController(proxyServiceServer, noopMeter) if err != nil { diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 3e5a21400..6a0ecce30 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -116,6 +116,9 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) require.NoError(t, err) + pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + // Create real users manager usersManager := users.NewManager(testStore) @@ -131,6 +134,7 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { proxyService := nbgrpc.NewProxyServiceServer( &testAccessLogManager{}, tokenStore, + pkceStore, oidcConfig, nil, usersManager, From 11eb725ac8c56fb2430f0b940903ccba9c15c781 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 9 Mar 2026 14:56:46 +0100 Subject: [PATCH 196/374] [management] only count login request duration for successful logins (#5545) --- management/internals/shared/grpc/server.go | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index a07cafe90..6e8358f02 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -330,13 +330,12 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S s.secretsManager.SetupRefresh(ctx, accountID, peer.ID) - if s.appMetrics != nil { - s.appMetrics.GRPCMetrics().CountSyncRequestDuration(time.Since(reqStart), accountID) - } - unlock() unlock = nil + if s.appMetrics != nil { + s.appMetrics.GRPCMetrics().CountSyncRequestDuration(time.Since(reqStart), accountID) + } log.WithContext(ctx).Debugf("Sync took %s", time.Since(reqStart)) s.syncSem.Add(-1) @@ -743,13 +742,6 @@ func (s *Server) Login(ctx context.Context, req *proto.EncryptedMessage) (*proto log.WithContext(ctx).Debugf("Login request from peer [%s] [%s]", req.WgPubKey, sRealIP) - defer func() { - if s.appMetrics != nil { - s.appMetrics.GRPCMetrics().CountLoginRequestDuration(time.Since(reqStart), accountID) - } - log.WithContext(ctx).Debugf("Login took %s", time.Since(reqStart)) - }() - if loginReq.GetMeta() == nil { msg := status.Errorf(codes.FailedPrecondition, "peer system meta has to be provided to log in. Peer %s, remote addr %s", peerKey.String(), realIP) @@ -799,6 +791,11 @@ func (s *Server) Login(ctx context.Context, req *proto.EncryptedMessage) (*proto return nil, status.Errorf(codes.Internal, "failed logging in peer") } + if s.appMetrics != nil { + s.appMetrics.GRPCMetrics().CountLoginRequestDuration(time.Since(reqStart), accountID) + } + log.WithContext(ctx).Debugf("Login took %s", time.Since(reqStart)) + return &proto.EncryptedMessage{ WgPubKey: key.PublicKey().String(), Body: encryptedResp, From 15aa6bae1b393459fe2300b815be1fe69331b20e Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 9 Mar 2026 18:39:11 +0100 Subject: [PATCH 197/374] [client] Fix exit node menu not refreshing on Windows (#5553) * [client] Fix exit node menu not refreshing on Windows TrayOpenedCh is not implemented in the systray library on Windows, so exit nodes were never refreshed after the initial connect. Combined with the management sync not having populated routes yet when the Connected status fires, this caused the exit node menu to remain empty permanently after disconnect/reconnect cycles. Add a background poller on Windows that refreshes exit nodes while connected, with fast initial polling to catch routes from management sync followed by a steady 10s interval. On macOS/Linux, TrayOpenedCh continues to handle refreshes on each tray open. Also fix a data race on connectClient assignment in the server's connect() method and add nil checks in CleanState/DeleteState to prevent panics when connectClient is nil. * Remove unused exitNodeIDs * Remove unused exitNodeState struct --- client/server/server.go | 11 +- client/server/server_connect_test.go | 187 +++++++++++++++++++++++++++ client/server/state.go | 4 +- client/ui/client_ui.go | 5 +- client/ui/event_handler.go | 3 +- client/ui/network.go | 95 +++++++++----- 6 files changed, 267 insertions(+), 38 deletions(-) create mode 100644 client/server/server_connect_test.go diff --git a/client/server/server.go b/client/server/server.go index 2c7d5abc3..69d79d9cd 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1625,9 +1625,14 @@ func (s *Server) GetFeatures(ctx context.Context, msg *proto.GetFeaturesRequest) func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}) error { log.Tracef("running client connection") - s.connectClient = internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate) - s.connectClient.SetSyncResponsePersistence(s.persistSyncResponse) - if err := s.connectClient.Run(runningChan, s.logFile); err != nil { + client := internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate) + client.SetSyncResponsePersistence(s.persistSyncResponse) + + s.mutex.Lock() + s.connectClient = client + s.mutex.Unlock() + + if err := client.Run(runningChan, s.logFile); err != nil { return err } return nil diff --git a/client/server/server_connect_test.go b/client/server/server_connect_test.go new file mode 100644 index 000000000..8d31c2ae6 --- /dev/null +++ b/client/server/server_connect_test.go @@ -0,0 +1,187 @@ +package server + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/client/proto" +) + +func newTestServer() *Server { + return &Server{ + rootCtx: context.Background(), + statusRecorder: peer.NewRecorder(""), + } +} + +func newDummyConnectClient(ctx context.Context) *internal.ConnectClient { + return internal.NewConnectClient(ctx, nil, nil, false) +} + +// TestConnectSetsClientWithMutex validates that connect() sets s.connectClient +// under mutex protection so concurrent readers see a consistent value. +func TestConnectSetsClientWithMutex(t *testing.T) { + s := newTestServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Manually simulate what connect() does (without calling Run which panics without full setup) + client := newDummyConnectClient(ctx) + + s.mutex.Lock() + s.connectClient = client + s.mutex.Unlock() + + // Verify the assignment is visible under mutex + s.mutex.Lock() + assert.Equal(t, client, s.connectClient, "connectClient should be set") + s.mutex.Unlock() +} + +// TestConcurrentConnectClientAccess validates that concurrent reads of +// s.connectClient under mutex don't race with a write. +func TestConcurrentConnectClientAccess(t *testing.T) { + s := newTestServer() + ctx := context.Background() + client := newDummyConnectClient(ctx) + + var wg sync.WaitGroup + nilCount := 0 + setCount := 0 + var mu sync.Mutex + + // Start readers + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s.mutex.Lock() + c := s.connectClient + s.mutex.Unlock() + + mu.Lock() + defer mu.Unlock() + if c == nil { + nilCount++ + } else { + setCount++ + } + }() + } + + // Simulate connect() writing under mutex + time.Sleep(5 * time.Millisecond) + s.mutex.Lock() + s.connectClient = client + s.mutex.Unlock() + + wg.Wait() + + assert.Equal(t, 50, nilCount+setCount, "all goroutines should complete without panic") +} + +// TestCleanupConnection_ClearsConnectClient validates that cleanupConnection +// properly nils out connectClient. +func TestCleanupConnection_ClearsConnectClient(t *testing.T) { + s := newTestServer() + _, cancel := context.WithCancel(context.Background()) + s.actCancel = cancel + + s.connectClient = newDummyConnectClient(context.Background()) + s.clientRunning = true + + err := s.cleanupConnection() + require.NoError(t, err) + + assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup") +} + +// TestCleanState_NilConnectClient validates that CleanState doesn't panic +// when connectClient is nil. +func TestCleanState_NilConnectClient(t *testing.T) { + s := newTestServer() + s.connectClient = nil + s.profileManager = nil // will cause error if it tries to proceed past the nil check + + // Should not panic — the nil check should prevent calling Status() on nil + assert.NotPanics(t, func() { + _, _ = s.CleanState(context.Background(), &proto.CleanStateRequest{All: true}) + }) +} + +// TestDeleteState_NilConnectClient validates that DeleteState doesn't panic +// when connectClient is nil. +func TestDeleteState_NilConnectClient(t *testing.T) { + s := newTestServer() + s.connectClient = nil + s.profileManager = nil + + assert.NotPanics(t, func() { + _, _ = s.DeleteState(context.Background(), &proto.DeleteStateRequest{All: true}) + }) +} + +// TestDownThenUp_StaleRunningChan documents the known state issue where +// clientRunningChan from a previous connection is already closed, causing +// waitForUp() to return immediately on reconnect. +func TestDownThenUp_StaleRunningChan(t *testing.T) { + s := newTestServer() + + // Simulate state after a successful connection + s.clientRunning = true + s.clientRunningChan = make(chan struct{}) + close(s.clientRunningChan) // closed when engine started + s.clientGiveUpChan = make(chan struct{}) + s.connectClient = newDummyConnectClient(context.Background()) + + _, cancel := context.WithCancel(context.Background()) + s.actCancel = cancel + + // Simulate Down(): cleanupConnection sets connectClient = nil + s.mutex.Lock() + err := s.cleanupConnection() + s.mutex.Unlock() + require.NoError(t, err) + + // After cleanup: connectClient is nil, clientRunning still true + // (goroutine hasn't exited yet) + s.mutex.Lock() + assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup") + assert.True(t, s.clientRunning, "clientRunning still true until goroutine exits") + s.mutex.Unlock() + + // waitForUp() returns immediately due to stale closed clientRunningChan + ctx, ctxCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer ctxCancel() + + waitDone := make(chan error, 1) + go func() { + _, err := s.waitForUp(ctx) + waitDone <- err + }() + + select { + case err := <-waitDone: + assert.NoError(t, err, "waitForUp returns success on stale channel") + // But connectClient is still nil — this is the stale state issue + s.mutex.Lock() + assert.Nil(t, s.connectClient, "connectClient is nil despite waitForUp success") + s.mutex.Unlock() + case <-time.After(1 * time.Second): + t.Fatal("waitForUp should have returned immediately due to stale closed channel") + } +} + +// TestConnectClient_EngineNilOnFreshClient validates that a newly created +// ConnectClient has nil Engine (before Run is called). +func TestConnectClient_EngineNilOnFreshClient(t *testing.T) { + client := newDummyConnectClient(context.Background()) + assert.Nil(t, client.Engine(), "engine should be nil on fresh ConnectClient") +} diff --git a/client/server/state.go b/client/server/state.go index 1cf85cd37..8dca6bde1 100644 --- a/client/server/state.go +++ b/client/server/state.go @@ -39,7 +39,7 @@ func (s *Server) ListStates(_ context.Context, _ *proto.ListStatesRequest) (*pro // CleanState handles cleaning of states (performing cleanup operations) func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) (*proto.CleanStateResponse, error) { - if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting { + if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) { return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.") } @@ -82,7 +82,7 @@ func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) ( // DeleteState handles deletion of states without cleanup func (s *Server) DeleteState(ctx context.Context, req *proto.DeleteStateRequest) (*proto.DeleteStateResponse, error) { - if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting { + if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) { return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.") } diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 0290e17d5..7af00cd20 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -323,7 +323,7 @@ type serviceClient struct { exitNodeMu sync.Mutex mExitNodeItems []menuHandler - exitNodeStates []exitNodeState + exitNodeRetryCancel context.CancelFunc mExitNodeDeselectAll *systray.MenuItem logFile string wLoginURL fyne.Window @@ -924,7 +924,7 @@ func (s *serviceClient) updateStatus() error { s.mDown.Enable() s.mNetworks.Enable() s.mExitNode.Enable() - go s.updateExitNodes() + s.startExitNodeRefresh() systrayIconState = true case status.Status == string(internal.StatusConnecting): s.setConnectingStatus() @@ -985,6 +985,7 @@ func (s *serviceClient) setDisconnectedStatus() { s.mUp.Enable() s.mNetworks.Disable() s.mExitNode.Disable() + s.cancelExitNodeRetry() go s.updateExitNodes() } diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 2216c8aeb..6adf8778c 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -100,8 +100,7 @@ func (h *eventHandler) handleConnectClick() { func (h *eventHandler) handleDisconnectClick() { h.client.mDown.Disable() - - h.client.exitNodeStates = []exitNodeState{} + h.client.cancelExitNodeRetry() if h.client.connectCancel != nil { log.Debugf("cancelling ongoing connect operation") diff --git a/client/ui/network.go b/client/ui/network.go index 9a5ad7662..ed03f5ada 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "runtime" - "slices" "sort" "strings" "time" @@ -34,11 +33,6 @@ const ( type filter string -type exitNodeState struct { - id string - selected bool -} - func (s *serviceClient) showNetworksUI() { s.wNetworks = s.app.NewWindow("Networks") s.wNetworks.SetOnClosed(s.cancel) @@ -335,16 +329,75 @@ func (s *serviceClient) updateNetworksBasedOnDisplayTab(tabs *container.AppTabs, s.updateNetworks(grid, f) } -func (s *serviceClient) updateExitNodes() { +// startExitNodeRefresh initiates exit node menu refresh after connecting. +// On Windows, TrayOpenedCh is not supported by the systray library, so we use +// a background poller to keep exit nodes in sync while connected. +// On macOS/Linux, TrayOpenedCh handles refreshes on each tray open. +func (s *serviceClient) startExitNodeRefresh() { + s.cancelExitNodeRetry() + + if runtime.GOOS == "windows" { + ctx, cancel := context.WithCancel(s.ctx) + s.exitNodeMu.Lock() + s.exitNodeRetryCancel = cancel + s.exitNodeMu.Unlock() + + go s.pollExitNodes(ctx) + } else { + go s.updateExitNodes() + } +} + +func (s *serviceClient) cancelExitNodeRetry() { + s.exitNodeMu.Lock() + if s.exitNodeRetryCancel != nil { + s.exitNodeRetryCancel() + s.exitNodeRetryCancel = nil + } + s.exitNodeMu.Unlock() +} + +// pollExitNodes periodically refreshes exit nodes while connected. +// Uses a short initial interval to catch routes from the management sync, +// then switches to a longer interval for ongoing updates. +func (s *serviceClient) pollExitNodes(ctx context.Context) { + // Initial fast polling to catch routes as they appear after connect. + for i := 0; i < 5; i++ { + if s.updateExitNodes() { + break + } + select { + case <-ctx.Done(): + return + case <-time.After(2 * time.Second): + } + } + + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + s.updateExitNodes() + } + } +} + +// updateExitNodes fetches exit nodes from the daemon and recreates the menu. +// Returns true if exit nodes were found. +func (s *serviceClient) updateExitNodes() bool { conn, err := s.getSrvClient(defaultFailTimeout) if err != nil { log.Errorf("get client: %v", err) - return + return false } exitNodes, err := s.getExitNodes(conn) if err != nil { log.Errorf("get exit nodes: %v", err) - return + return false } s.exitNodeMu.Lock() @@ -354,28 +407,14 @@ func (s *serviceClient) updateExitNodes() { if len(s.mExitNodeItems) > 0 { s.mExitNode.Enable() - } else { - s.mExitNode.Disable() + return true } + + s.mExitNode.Disable() + return false } func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { - var exitNodeIDs []exitNodeState - for _, node := range exitNodes { - exitNodeIDs = append(exitNodeIDs, exitNodeState{ - id: node.ID, - selected: node.Selected, - }) - } - - sort.Slice(exitNodeIDs, func(i, j int) bool { - return exitNodeIDs[i].id < exitNodeIDs[j].id - }) - if slices.Equal(s.exitNodeStates, exitNodeIDs) { - log.Debug("Exit node menu already up to date") - return - } - for _, node := range s.mExitNodeItems { node.cancel() node.Hide() @@ -413,8 +452,6 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { go s.handleChecked(ctx, node.ID, menuItem) } - s.exitNodeStates = exitNodeIDs - if showDeselectAll { s.mExitNode.AddSeparator() deselectAllItem := s.mExitNode.AddSubMenuItem("Deselect All", "Deselect All") From f88429982306156f9edecb3ae8046135c9f0398a Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 9 Mar 2026 18:45:45 +0100 Subject: [PATCH 198/374] [proxy] refactor metrics and add usage logs (#5533) * **New Features** * Access logs now include bytes_upload and bytes_download (API and schemas updated, fields required). * Certificate issuance duration is now recorded as a metric. * **Refactor** * Metrics switched from Prometheus client to OpenTelemetry-backed meters; health endpoint now exposes OpenMetrics via OTLP exporter. * **Tests** * Metric tests updated to use OpenTelemetry Prometheus exporter and MeterProvider. --- .../reverseproxy/accesslogs/accesslogentry.go | 6 + proxy/internal/accesslog/logger.go | 124 ++- proxy/internal/accesslog/middleware.go | 16 + proxy/internal/accesslog/statuswriter.go | 25 +- proxy/internal/acme/manager.go | 12 +- proxy/internal/acme/manager_test.go | 4 +- proxy/internal/metrics/metrics.go | 216 +++-- proxy/internal/metrics/metrics_test.go | 20 +- proxy/server.go | 32 +- shared/management/http/api/openapi.yml | 12 + shared/management/http/api/types.gen.go | 814 +++++++++++++++++- shared/management/proto/management.pb.go | 2 +- shared/management/proto/proxy_service.pb.go | 309 +++---- shared/management/proto/proxy_service.proto | 2 + 14 files changed, 1343 insertions(+), 251 deletions(-) diff --git a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go index 019cb634a..0bcc59b68 100644 --- a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go +++ b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go @@ -24,6 +24,8 @@ type AccessLogEntry struct { Reason string UserId string `gorm:"index"` AuthMethodUsed string `gorm:"index"` + BytesUpload int64 `gorm:"index"` + BytesDownload int64 `gorm:"index"` } // FromProto creates an AccessLogEntry from a proto.AccessLog @@ -39,6 +41,8 @@ func (a *AccessLogEntry) FromProto(serviceLog *proto.AccessLog) { a.UserId = serviceLog.GetUserId() a.AuthMethodUsed = serviceLog.GetAuthMechanism() a.AccountID = serviceLog.GetAccountId() + a.BytesUpload = serviceLog.GetBytesUpload() + a.BytesDownload = serviceLog.GetBytesDownload() if sourceIP := serviceLog.GetSourceIp(); sourceIP != "" { if ip, err := netip.ParseAddr(sourceIP); err == nil { @@ -101,5 +105,7 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { AuthMethodUsed: authMethod, CountryCode: countryCode, CityName: cityName, + BytesUpload: a.BytesUpload, + BytesDownload: a.BytesDownload, } } diff --git a/proxy/internal/accesslog/logger.go b/proxy/internal/accesslog/logger.go index 9e204be65..4ba5a7755 100644 --- a/proxy/internal/accesslog/logger.go +++ b/proxy/internal/accesslog/logger.go @@ -3,6 +3,7 @@ package accesslog import ( "context" "net/netip" + "sync" "time" log "github.com/sirupsen/logrus" @@ -13,6 +14,23 @@ import ( "github.com/netbirdio/netbird/shared/management/proto" ) +const ( + requestThreshold = 10000 // Log every 10k requests + bytesThreshold = 1024 * 1024 * 1024 // Log every 1GB + usageCleanupPeriod = 1 * time.Hour // Clean up stale counters every hour + usageInactiveWindow = 24 * time.Hour // Consider domain inactive if no traffic for 24 hours +) + +type domainUsage struct { + requestCount int64 + requestStartTime time.Time + + bytesTransferred int64 + bytesStartTime time.Time + + lastActivity time.Time // Track last activity for cleanup +} + type gRPCClient interface { SendAccessLog(ctx context.Context, in *proto.SendAccessLogRequest, opts ...grpc.CallOption) (*proto.SendAccessLogResponse, error) } @@ -22,6 +40,11 @@ type Logger struct { client gRPCClient logger *log.Logger trustedProxies []netip.Prefix + + usageMux sync.Mutex + domainUsage map[string]*domainUsage + + cleanupCancel context.CancelFunc } // NewLogger creates a new access log Logger. The trustedProxies parameter @@ -31,10 +54,26 @@ func NewLogger(client gRPCClient, logger *log.Logger, trustedProxies []netip.Pre if logger == nil { logger = log.StandardLogger() } - return &Logger{ + + ctx, cancel := context.WithCancel(context.Background()) + l := &Logger{ client: client, logger: logger, trustedProxies: trustedProxies, + domainUsage: make(map[string]*domainUsage), + cleanupCancel: cancel, + } + + // Start background cleanup routine + go l.cleanupStaleUsage(ctx) + + return l +} + +// Close stops the cleanup routine. Should be called during graceful shutdown. +func (l *Logger) Close() { + if l.cleanupCancel != nil { + l.cleanupCancel() } } @@ -51,6 +90,8 @@ type logEntry struct { AuthMechanism string UserId string AuthSuccess bool + BytesUpload int64 + BytesDownload int64 } func (l *Logger) log(ctx context.Context, entry logEntry) { @@ -84,6 +125,8 @@ func (l *Logger) log(ctx context.Context, entry logEntry) { AuthMechanism: entry.AuthMechanism, UserId: entry.UserId, AuthSuccess: entry.AuthSuccess, + BytesUpload: entry.BytesUpload, + BytesDownload: entry.BytesDownload, }, }); err != nil { // If it fails to send on the gRPC connection, then at least log it to the error log. @@ -103,3 +146,82 @@ func (l *Logger) log(ctx context.Context, entry logEntry) { } }() } + +// trackUsage records request and byte counts per domain, logging when thresholds are hit. +func (l *Logger) trackUsage(domain string, bytesTransferred int64) { + if domain == "" { + return + } + + l.usageMux.Lock() + defer l.usageMux.Unlock() + + now := time.Now() + usage, exists := l.domainUsage[domain] + if !exists { + usage = &domainUsage{ + requestStartTime: now, + bytesStartTime: now, + lastActivity: now, + } + l.domainUsage[domain] = usage + } + + usage.lastActivity = now + + usage.requestCount++ + if usage.requestCount >= requestThreshold { + elapsed := time.Since(usage.requestStartTime) + l.logger.WithFields(log.Fields{ + "domain": domain, + "requests": usage.requestCount, + "duration": elapsed.String(), + }).Infof("domain %s had %d requests over %s", domain, usage.requestCount, elapsed) + + usage.requestCount = 0 + usage.requestStartTime = now + } + + usage.bytesTransferred += bytesTransferred + if usage.bytesTransferred >= bytesThreshold { + elapsed := time.Since(usage.bytesStartTime) + bytesInGB := float64(usage.bytesTransferred) / (1024 * 1024 * 1024) + l.logger.WithFields(log.Fields{ + "domain": domain, + "bytes": usage.bytesTransferred, + "bytes_gb": bytesInGB, + "duration": elapsed.String(), + }).Infof("domain %s transferred %.2f GB over %s", domain, bytesInGB, elapsed) + + usage.bytesTransferred = 0 + usage.bytesStartTime = now + } +} + +// cleanupStaleUsage removes usage entries for domains that have been inactive. +func (l *Logger) cleanupStaleUsage(ctx context.Context) { + ticker := time.NewTicker(usageCleanupPeriod) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + l.usageMux.Lock() + now := time.Now() + removed := 0 + for domain, usage := range l.domainUsage { + if now.Sub(usage.lastActivity) > usageInactiveWindow { + delete(l.domainUsage, domain) + removed++ + } + } + l.usageMux.Unlock() + + if removed > 0 { + l.logger.Debugf("cleaned up %d stale domain usage entries", removed) + } + } + } +} diff --git a/proxy/internal/accesslog/middleware.go b/proxy/internal/accesslog/middleware.go index dd4798975..7368185c0 100644 --- a/proxy/internal/accesslog/middleware.go +++ b/proxy/internal/accesslog/middleware.go @@ -32,6 +32,14 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { status: http.StatusOK, } + var bytesRead int64 + if r.Body != nil { + r.Body = &bodyCounter{ + ReadCloser: r.Body, + bytesRead: &bytesRead, + } + } + // Resolve the source IP using trusted proxy configuration before passing // the request on, as the proxy will modify forwarding headers. sourceIp := extractSourceIP(r, l.trustedProxies) @@ -53,6 +61,9 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { host = r.Host } + bytesUpload := bytesRead + bytesDownload := sw.bytesWritten + entry := logEntry{ ID: requestID, ServiceId: capturedData.GetServiceId(), @@ -66,10 +77,15 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { AuthMechanism: capturedData.GetAuthMethod(), UserId: capturedData.GetUserID(), AuthSuccess: sw.status != http.StatusUnauthorized && sw.status != http.StatusForbidden, + BytesUpload: bytesUpload, + BytesDownload: bytesDownload, } l.logger.Debugf("response: request_id=%s method=%s host=%s path=%s status=%d duration=%dms source=%s origin=%s service=%s account=%s", requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceId(), capturedData.GetAccountId()) l.log(r.Context(), entry) + + // Track usage for cost monitoring (upload + download) by domain + l.trackUsage(host, bytesUpload+bytesDownload) }) } diff --git a/proxy/internal/accesslog/statuswriter.go b/proxy/internal/accesslog/statuswriter.go index 43cda59f9..24f7b35e9 100644 --- a/proxy/internal/accesslog/statuswriter.go +++ b/proxy/internal/accesslog/statuswriter.go @@ -1,18 +1,39 @@ package accesslog import ( + "io" + "github.com/netbirdio/netbird/proxy/internal/responsewriter" ) -// statusWriter captures the HTTP status code from WriteHeader calls. +// statusWriter captures the HTTP status code and bytes written from responses. // It embeds responsewriter.PassthroughWriter which handles all the optional // interfaces (Hijacker, Flusher, Pusher) automatically. type statusWriter struct { *responsewriter.PassthroughWriter - status int + status int + bytesWritten int64 } func (w *statusWriter) WriteHeader(status int) { w.status = status w.PassthroughWriter.WriteHeader(status) } + +func (w *statusWriter) Write(b []byte) (int, error) { + n, err := w.PassthroughWriter.Write(b) + w.bytesWritten += int64(n) + return n, err +} + +// bodyCounter wraps an io.ReadCloser and counts bytes read from the request body. +type bodyCounter struct { + io.ReadCloser + bytesRead *int64 +} + +func (bc *bodyCounter) Read(p []byte) (int, error) { + n, err := bc.ReadCloser.Read(p) + *bc.bytesRead += int64(n) + return n, err +} diff --git a/proxy/internal/acme/manager.go b/proxy/internal/acme/manager.go index d491d65a3..ebc15314b 100644 --- a/proxy/internal/acme/manager.go +++ b/proxy/internal/acme/manager.go @@ -42,6 +42,10 @@ type domainInfo struct { err string } +type metricsRecorder interface { + RecordCertificateIssuance(duration time.Duration) +} + // Manager wraps autocert.Manager with domain tracking and cross-replica // coordination via a pluggable locking strategy. The locker prevents // duplicate ACME requests when multiple replicas share a certificate cache. @@ -55,6 +59,7 @@ type Manager struct { certNotifier certificateNotifier logger *log.Logger + metrics metricsRecorder } // NewManager creates a new ACME certificate manager. The certDir is used @@ -63,7 +68,7 @@ type Manager struct { // eabKID and eabHMACKey are optional External Account Binding credentials // required for some CAs like ZeroSSL. The eabHMACKey should be the base64 // URL-encoded string provided by the CA. -func NewManager(certDir, acmeURL, eabKID, eabHMACKey string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod) *Manager { +func NewManager(certDir, acmeURL, eabKID, eabHMACKey string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod, metrics metricsRecorder) *Manager { if logger == nil { logger = log.StandardLogger() } @@ -73,6 +78,7 @@ func NewManager(certDir, acmeURL, eabKID, eabHMACKey string, notifier certificat domains: make(map[domain.Domain]*domainInfo), certNotifier: notifier, logger: logger, + metrics: metrics, } var eab *acme.ExternalAccountBinding @@ -161,6 +167,10 @@ func (mgr *Manager) prefetchCertificate(d domain.Domain) { return } + if mgr.metrics != nil { + mgr.metrics.RecordCertificateIssuance(elapsed) + } + mgr.setDomainState(d, domainReady, "") now := time.Now() diff --git a/proxy/internal/acme/manager_test.go b/proxy/internal/acme/manager_test.go index f7efe5933..30a27c612 100644 --- a/proxy/internal/acme/manager_test.go +++ b/proxy/internal/acme/manager_test.go @@ -10,7 +10,7 @@ import ( ) func TestHostPolicy(t *testing.T) { - mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "") + mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "", nil) mgr.AddDomain("example.com", "acc1", "rp1") // Wait for the background prefetch goroutine to finish so the temp dir @@ -70,7 +70,7 @@ func TestHostPolicy(t *testing.T) { } func TestDomainStates(t *testing.T) { - mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "") + mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "", nil) assert.Equal(t, 0, mgr.PendingCerts(), "initially zero") assert.Equal(t, 0, mgr.TotalDomains(), "initially zero domains") diff --git a/proxy/internal/metrics/metrics.go b/proxy/internal/metrics/metrics.go index 954020f77..68ff55fe5 100644 --- a/proxy/internal/metrics/metrics.go +++ b/proxy/internal/metrics/metrics.go @@ -1,64 +1,106 @@ package metrics import ( + "context" "net/http" - "strconv" + "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel/metric" "github.com/netbirdio/netbird/proxy/internal/proxy" "github.com/netbirdio/netbird/proxy/internal/responsewriter" ) type Metrics struct { - requestsTotal prometheus.Counter - activeRequests prometheus.Gauge - configuredDomains prometheus.Gauge - pathsPerDomain *prometheus.GaugeVec - requestDuration *prometheus.HistogramVec - backendDuration *prometheus.HistogramVec + ctx context.Context + requestsTotal metric.Int64Counter + activeRequests metric.Int64UpDownCounter + configuredDomains metric.Int64UpDownCounter + totalPaths metric.Int64UpDownCounter + requestDuration metric.Int64Histogram + backendDuration metric.Int64Histogram + certificateIssueDuration metric.Int64Histogram + + mappingsMux sync.Mutex + mappingPaths map[string]int } -func New(reg prometheus.Registerer) *Metrics { - promFactory := promauto.With(reg) - return &Metrics{ - requestsTotal: promFactory.NewCounter(prometheus.CounterOpts{ - Name: "netbird_proxy_requests_total", - Help: "Total number of requests made to the netbird proxy", - }), - activeRequests: promFactory.NewGauge(prometheus.GaugeOpts{ - Name: "netbird_proxy_active_requests_count", - Help: "Current in-flight requests handled by the netbird proxy", - }), - configuredDomains: promFactory.NewGauge(prometheus.GaugeOpts{ - Name: "netbird_proxy_domains_count", - Help: "Current number of domains configured on the netbird proxy", - }), - pathsPerDomain: promFactory.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "netbird_proxy_paths_count", - Help: "Current number of paths configured on the netbird proxy labelled by domain", - }, - []string{"domain"}, - ), - requestDuration: promFactory.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "netbird_proxy_request_duration_seconds", - Help: "Duration of requests made to the netbird proxy", - Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}, - }, - []string{"status", "size", "method", "host", "path"}, - ), - backendDuration: promFactory.NewHistogramVec(prometheus.HistogramOpts{ - Name: "netbird_proxy_backend_duration_seconds", - Help: "Duration of peer round trip time from the netbird proxy", - Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}, - }, - []string{"status", "size", "method", "host", "path"}, - ), +func New(ctx context.Context, meter metric.Meter) (*Metrics, error) { + requestsTotal, err := meter.Int64Counter( + "proxy.http.request.counter", + metric.WithUnit("1"), + metric.WithDescription("Total number of requests made to the netbird proxy"), + ) + if err != nil { + return nil, err } + + activeRequests, err := meter.Int64UpDownCounter( + "proxy.http.active_requests", + metric.WithUnit("1"), + metric.WithDescription("Current in-flight requests handled by the netbird proxy"), + ) + if err != nil { + return nil, err + } + + configuredDomains, err := meter.Int64UpDownCounter( + "proxy.domains.count", + metric.WithUnit("1"), + metric.WithDescription("Current number of domains configured on the netbird proxy"), + ) + if err != nil { + return nil, err + } + + totalPaths, err := meter.Int64UpDownCounter( + "proxy.paths.count", + metric.WithUnit("1"), + metric.WithDescription("Total number of paths configured on the netbird proxy"), + ) + if err != nil { + return nil, err + } + + requestDuration, err := meter.Int64Histogram( + "proxy.http.request.duration.ms", + metric.WithUnit("milliseconds"), + metric.WithDescription("Duration of requests made to the netbird proxy"), + ) + if err != nil { + return nil, err + } + + backendDuration, err := meter.Int64Histogram( + "proxy.backend.duration.ms", + metric.WithUnit("milliseconds"), + metric.WithDescription("Duration of peer round trip time from the netbird proxy"), + ) + if err != nil { + return nil, err + } + + certificateIssueDuration, err := meter.Int64Histogram( + "proxy.certificate.issue.duration.ms", + metric.WithUnit("milliseconds"), + metric.WithDescription("Duration of ACME certificate issuance"), + ) + if err != nil { + return nil, err + } + + return &Metrics{ + ctx: ctx, + requestsTotal: requestsTotal, + activeRequests: activeRequests, + configuredDomains: configuredDomains, + totalPaths: totalPaths, + requestDuration: requestDuration, + backendDuration: backendDuration, + certificateIssueDuration: certificateIssueDuration, + mappingPaths: make(map[string]int), + }, nil } type responseInterceptor struct { @@ -80,23 +122,19 @@ func (w *responseInterceptor) Write(b []byte) (int, error) { func (m *Metrics) Middleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - m.requestsTotal.Inc() - m.activeRequests.Inc() + m.requestsTotal.Add(m.ctx, 1) + m.activeRequests.Add(m.ctx, 1) interceptor := &responseInterceptor{PassthroughWriter: responsewriter.New(w)} start := time.Now() - next.ServeHTTP(interceptor, r) - duration := time.Since(start) + defer func() { + duration := time.Since(start) + m.activeRequests.Add(m.ctx, -1) + m.requestDuration.Record(m.ctx, duration.Milliseconds()) + }() - m.activeRequests.Desc() - m.requestDuration.With(prometheus.Labels{ - "status": strconv.Itoa(interceptor.status), - "size": strconv.Itoa(interceptor.size), - "method": r.Method, - "host": r.Host, - "path": r.URL.Path, - }).Observe(duration.Seconds()) + next.ServeHTTP(interceptor, r) }) } @@ -108,44 +146,52 @@ func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { func (m *Metrics) RoundTripper(next http.RoundTripper) http.RoundTripper { return roundTripperFunc(func(req *http.Request) (*http.Response, error) { - labels := prometheus.Labels{ - "method": req.Method, - "host": req.Host, - // Fill potentially empty labels with default values to avoid cardinality issues. - "path": "/", - "status": "0", - "size": "0", - } - if req.URL != nil { - labels["path"] = req.URL.Path - } - start := time.Now() res, err := next.RoundTrip(req) duration := time.Since(start) - // Not all labels will be available if there was an error. - if res != nil { - labels["status"] = strconv.Itoa(res.StatusCode) - labels["size"] = strconv.Itoa(int(res.ContentLength)) - } - - m.backendDuration.With(labels).Observe(duration.Seconds()) + m.backendDuration.Record(m.ctx, duration.Milliseconds()) return res, err }) } func (m *Metrics) AddMapping(mapping proxy.Mapping) { - m.configuredDomains.Inc() - m.pathsPerDomain.With(prometheus.Labels{ - "domain": mapping.Host, - }).Set(float64(len(mapping.Paths))) + m.mappingsMux.Lock() + defer m.mappingsMux.Unlock() + + newPathCount := len(mapping.Paths) + oldPathCount, exists := m.mappingPaths[mapping.Host] + + if !exists { + m.configuredDomains.Add(m.ctx, 1) + } + + pathDelta := newPathCount - oldPathCount + if pathDelta != 0 { + m.totalPaths.Add(m.ctx, int64(pathDelta)) + } + + m.mappingPaths[mapping.Host] = newPathCount } func (m *Metrics) RemoveMapping(mapping proxy.Mapping) { - m.configuredDomains.Dec() - m.pathsPerDomain.With(prometheus.Labels{ - "domain": mapping.Host, - }).Set(0) + m.mappingsMux.Lock() + defer m.mappingsMux.Unlock() + + oldPathCount, exists := m.mappingPaths[mapping.Host] + if !exists { + // Nothing to remove + return + } + + m.configuredDomains.Add(m.ctx, -1) + m.totalPaths.Add(m.ctx, -int64(oldPathCount)) + + delete(m.mappingPaths, mapping.Host) +} + +// RecordCertificateIssuance records the duration of a certificate issuance. +func (m *Metrics) RecordCertificateIssuance(duration time.Duration) { + m.certificateIssueDuration.Record(m.ctx, duration.Milliseconds()) } diff --git a/proxy/internal/metrics/metrics_test.go b/proxy/internal/metrics/metrics_test.go index 31e00ae64..f81072eda 100644 --- a/proxy/internal/metrics/metrics_test.go +++ b/proxy/internal/metrics/metrics_test.go @@ -1,13 +1,17 @@ package metrics_test import ( + "context" "net/http" "net/url" + "reflect" "testing" "github.com/google/go-cmp/cmp" + "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/sdk/metric" + "github.com/netbirdio/netbird/proxy/internal/metrics" - "github.com/prometheus/client_golang/prometheus" ) type testRoundTripper struct { @@ -47,7 +51,19 @@ func TestMetrics_RoundTripper(t *testing.T) { }, } - m := metrics.New(prometheus.NewRegistry()) + exporter, err := prometheus.New() + if err != nil { + t.Fatalf("create prometheus exporter: %v", err) + } + + provider := metric.NewMeterProvider(metric.WithReader(exporter)) + pkg := reflect.TypeOf(metrics.Metrics{}).PkgPath() + meter := provider.Meter(pkg) + + m, err := metrics.New(context.Background(), meter) + if err != nil { + t.Fatalf("create metrics: %v", err) + } for name, test := range tests { t.Run(name, func(t *testing.T) { diff --git a/proxy/server.go b/proxy/server.go index 0d1aa2f6c..123b14648 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -19,14 +19,17 @@ import ( "net/netip" "net/url" "path/filepath" + "reflect" "sync" "time" "github.com/cenkalti/backoff/v4" "github.com/pires/go-proxyproto" - "github.com/prometheus/client_golang/prometheus" + prometheus2 "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/sdk/metric" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -42,7 +45,7 @@ import ( proxygrpc "github.com/netbirdio/netbird/proxy/internal/grpc" "github.com/netbirdio/netbird/proxy/internal/health" "github.com/netbirdio/netbird/proxy/internal/k8s" - "github.com/netbirdio/netbird/proxy/internal/metrics" + proxymetrics "github.com/netbirdio/netbird/proxy/internal/metrics" "github.com/netbirdio/netbird/proxy/internal/proxy" "github.com/netbirdio/netbird/proxy/internal/roundtrip" "github.com/netbirdio/netbird/proxy/internal/types" @@ -63,7 +66,7 @@ type Server struct { debug *http.Server healthServer *health.Server healthChecker *health.Checker - meter *metrics.Metrics + meter *proxymetrics.Metrics // hijackTracker tracks hijacked connections (e.g. WebSocket upgrades) // so they can be closed during graceful shutdown, since http.Server.Shutdown @@ -152,8 +155,19 @@ func (s *Server) NotifyCertificateIssued(ctx context.Context, accountID, service func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { s.initDefaults() - reg := prometheus.NewRegistry() - s.meter = metrics.New(reg) + exporter, err := prometheus.New() + if err != nil { + return fmt.Errorf("create prometheus exporter: %w", err) + } + + provider := metric.NewMeterProvider(metric.WithReader(exporter)) + pkg := reflect.TypeOf(Server{}).PkgPath() + meter := provider.Meter(pkg) + + s.meter, err = proxymetrics.New(ctx, meter) + if err != nil { + return fmt.Errorf("create metrics: %w", err) + } mgmtConn, err := s.dialManagement() if err != nil { @@ -193,7 +207,7 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { s.startDebugEndpoint() - if err := s.startHealthServer(reg); err != nil { + if err := s.startHealthServer(); err != nil { return err } @@ -284,12 +298,12 @@ func (s *Server) startDebugEndpoint() { } // startHealthServer launches the health probe and metrics server. -func (s *Server) startHealthServer(reg *prometheus.Registry) error { +func (s *Server) startHealthServer() error { healthAddr := s.HealthAddress if healthAddr == "" { healthAddr = defaultHealthAddr } - s.healthServer = health.NewServer(healthAddr, s.healthChecker, s.Logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) + s.healthServer = health.NewServer(healthAddr, s.healthChecker, s.Logger, promhttp.HandlerFor(prometheus2.DefaultGatherer, promhttp.HandlerOpts{EnableOpenMetrics: true})) healthListener, err := net.Listen("tcp", healthAddr) if err != nil { return fmt.Errorf("health probe server listen on %s: %w", healthAddr, err) @@ -423,7 +437,7 @@ func (s *Server) configureTLS(ctx context.Context) (*tls.Config, error) { "acme_server": s.ACMEDirectory, "challenge_type": s.ACMEChallengeType, }).Debug("ACME certificates enabled, configuring certificate manager") - s.acme = acme.NewManager(s.CertificateDirectory, s.ACMEDirectory, s.ACMEEABKID, s.ACMEEABHMACKey, s, s.Logger, s.CertLockMethod) + s.acme = acme.NewManager(s.CertificateDirectory, s.ACMEDirectory, s.ACMEEABKID, s.ACMEEABHMACKey, s, s.Logger, s.CertLockMethod, s.meter) if s.ACMEChallengeType == "http-01" { s.http = &http.Server{ diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 7f03d6986..c67231342 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -2822,6 +2822,16 @@ components: type: string description: "City name from geolocation" example: "San Francisco" + bytes_upload: + type: integer + format: int64 + description: "Bytes uploaded (request body size)" + example: 1024 + bytes_download: + type: integer + format: int64 + description: "Bytes downloaded (response body size)" + example: 8192 required: - id - service_id @@ -2831,6 +2841,8 @@ components: - path - duration_ms - status_code + - bytes_upload + - bytes_download ProxyAccessLogsResponse: type: object properties: diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index d4a07f806..f218679c0 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1,6 +1,6 @@ // Package api provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.1 DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.6.0 DO NOT EDIT. package api import ( @@ -24,6 +24,22 @@ const ( CreateIntegrationRequestPlatformS3 CreateIntegrationRequestPlatform = "s3" ) +// Valid indicates whether the value is a known member of the CreateIntegrationRequestPlatform enum. +func (e CreateIntegrationRequestPlatform) Valid() bool { + switch e { + case CreateIntegrationRequestPlatformDatadog: + return true + case CreateIntegrationRequestPlatformFirehose: + return true + case CreateIntegrationRequestPlatformGenericHttp: + return true + case CreateIntegrationRequestPlatformS3: + return true + default: + return false + } +} + // Defines values for DNSRecordType. const ( DNSRecordTypeA DNSRecordType = "A" @@ -31,6 +47,20 @@ const ( DNSRecordTypeCNAME DNSRecordType = "CNAME" ) +// Valid indicates whether the value is a known member of the DNSRecordType enum. +func (e DNSRecordType) Valid() bool { + switch e { + case DNSRecordTypeA: + return true + case DNSRecordTypeAAAA: + return true + case DNSRecordTypeCNAME: + return true + default: + return false + } +} + // Defines values for EventActivityCode. const ( EventActivityCodeAccountCreate EventActivityCode = "account.create" @@ -147,12 +177,256 @@ const ( EventActivityCodeUserUnblock EventActivityCode = "user.unblock" ) +// Valid indicates whether the value is a known member of the EventActivityCode enum. +func (e EventActivityCode) Valid() bool { + switch e { + case EventActivityCodeAccountCreate: + return true + case EventActivityCodeAccountDelete: + return true + case EventActivityCodeAccountDnsDomainUpdate: + return true + case EventActivityCodeAccountNetworkRangeUpdate: + return true + case EventActivityCodeAccountPeerInactivityExpirationDisable: + return true + case EventActivityCodeAccountPeerInactivityExpirationEnable: + return true + case EventActivityCodeAccountPeerInactivityExpirationUpdate: + return true + case EventActivityCodeAccountSettingGroupPropagationDisable: + return true + case EventActivityCodeAccountSettingGroupPropagationEnable: + return true + case EventActivityCodeAccountSettingLazyConnectionDisable: + return true + case EventActivityCodeAccountSettingLazyConnectionEnable: + return true + case EventActivityCodeAccountSettingPeerApprovalDisable: + return true + case EventActivityCodeAccountSettingPeerApprovalEnable: + return true + case EventActivityCodeAccountSettingPeerLoginExpirationDisable: + return true + case EventActivityCodeAccountSettingPeerLoginExpirationEnable: + return true + case EventActivityCodeAccountSettingPeerLoginExpirationUpdate: + return true + case EventActivityCodeAccountSettingRoutingPeerDnsResolutionDisable: + return true + case EventActivityCodeAccountSettingRoutingPeerDnsResolutionEnable: + return true + case EventActivityCodeAccountSettingsAutoVersionUpdate: + return true + case EventActivityCodeDashboardLogin: + return true + case EventActivityCodeDnsSettingDisabledManagementGroupAdd: + return true + case EventActivityCodeDnsSettingDisabledManagementGroupDelete: + return true + case EventActivityCodeDnsZoneCreate: + return true + case EventActivityCodeDnsZoneDelete: + return true + case EventActivityCodeDnsZoneRecordCreate: + return true + case EventActivityCodeDnsZoneRecordDelete: + return true + case EventActivityCodeDnsZoneRecordUpdate: + return true + case EventActivityCodeDnsZoneUpdate: + return true + case EventActivityCodeGroupAdd: + return true + case EventActivityCodeGroupDelete: + return true + case EventActivityCodeGroupUpdate: + return true + case EventActivityCodeIdentityproviderCreate: + return true + case EventActivityCodeIdentityproviderDelete: + return true + case EventActivityCodeIdentityproviderUpdate: + return true + case EventActivityCodeIntegrationCreate: + return true + case EventActivityCodeIntegrationDelete: + return true + case EventActivityCodeIntegrationUpdate: + return true + case EventActivityCodeNameserverGroupAdd: + return true + case EventActivityCodeNameserverGroupDelete: + return true + case EventActivityCodeNameserverGroupUpdate: + return true + case EventActivityCodeNetworkCreate: + return true + case EventActivityCodeNetworkDelete: + return true + case EventActivityCodeNetworkResourceCreate: + return true + case EventActivityCodeNetworkResourceDelete: + return true + case EventActivityCodeNetworkResourceUpdate: + return true + case EventActivityCodeNetworkRouterCreate: + return true + case EventActivityCodeNetworkRouterDelete: + return true + case EventActivityCodeNetworkRouterUpdate: + return true + case EventActivityCodeNetworkUpdate: + return true + case EventActivityCodePeerApprovalRevoke: + return true + case EventActivityCodePeerApprove: + return true + case EventActivityCodePeerGroupAdd: + return true + case EventActivityCodePeerGroupDelete: + return true + case EventActivityCodePeerInactivityExpirationDisable: + return true + case EventActivityCodePeerInactivityExpirationEnable: + return true + case EventActivityCodePeerIpUpdate: + return true + case EventActivityCodePeerJobCreate: + return true + case EventActivityCodePeerLoginExpirationDisable: + return true + case EventActivityCodePeerLoginExpirationEnable: + return true + case EventActivityCodePeerLoginExpire: + return true + case EventActivityCodePeerRename: + return true + case EventActivityCodePeerSetupkeyAdd: + return true + case EventActivityCodePeerSshDisable: + return true + case EventActivityCodePeerSshEnable: + return true + case EventActivityCodePeerUserAdd: + return true + case EventActivityCodePersonalAccessTokenCreate: + return true + case EventActivityCodePersonalAccessTokenDelete: + return true + case EventActivityCodePolicyAdd: + return true + case EventActivityCodePolicyDelete: + return true + case EventActivityCodePolicyUpdate: + return true + case EventActivityCodePostureCheckCreate: + return true + case EventActivityCodePostureCheckDelete: + return true + case EventActivityCodePostureCheckUpdate: + return true + case EventActivityCodeResourceGroupAdd: + return true + case EventActivityCodeResourceGroupDelete: + return true + case EventActivityCodeRouteAdd: + return true + case EventActivityCodeRouteDelete: + return true + case EventActivityCodeRouteUpdate: + return true + case EventActivityCodeRuleAdd: + return true + case EventActivityCodeRuleDelete: + return true + case EventActivityCodeRuleUpdate: + return true + case EventActivityCodeServiceCreate: + return true + case EventActivityCodeServiceDelete: + return true + case EventActivityCodeServiceUpdate: + return true + case EventActivityCodeServiceUserCreate: + return true + case EventActivityCodeServiceUserDelete: + return true + case EventActivityCodeSetupkeyAdd: + return true + case EventActivityCodeSetupkeyDelete: + return true + case EventActivityCodeSetupkeyGroupAdd: + return true + case EventActivityCodeSetupkeyGroupDelete: + return true + case EventActivityCodeSetupkeyOveruse: + return true + case EventActivityCodeSetupkeyRevoke: + return true + case EventActivityCodeSetupkeyUpdate: + return true + case EventActivityCodeTransferredOwnerRole: + return true + case EventActivityCodeUserApprove: + return true + case EventActivityCodeUserBlock: + return true + case EventActivityCodeUserCreate: + return true + case EventActivityCodeUserDelete: + return true + case EventActivityCodeUserGroupAdd: + return true + case EventActivityCodeUserGroupDelete: + return true + case EventActivityCodeUserInvite: + return true + case EventActivityCodeUserInviteLinkAccept: + return true + case EventActivityCodeUserInviteLinkCreate: + return true + case EventActivityCodeUserInviteLinkDelete: + return true + case EventActivityCodeUserInviteLinkRegenerate: + return true + case EventActivityCodeUserJoin: + return true + case EventActivityCodeUserPasswordChange: + return true + case EventActivityCodeUserPeerDelete: + return true + case EventActivityCodeUserPeerLogin: + return true + case EventActivityCodeUserReject: + return true + case EventActivityCodeUserRoleUpdate: + return true + case EventActivityCodeUserUnblock: + return true + default: + return false + } +} + // Defines values for GeoLocationCheckAction. const ( GeoLocationCheckActionAllow GeoLocationCheckAction = "allow" GeoLocationCheckActionDeny GeoLocationCheckAction = "deny" ) +// Valid indicates whether the value is a known member of the GeoLocationCheckAction enum. +func (e GeoLocationCheckAction) Valid() bool { + switch e { + case GeoLocationCheckActionAllow: + return true + case GeoLocationCheckActionDeny: + return true + default: + return false + } +} + // Defines values for GroupIssued. const ( GroupIssuedApi GroupIssued = "api" @@ -160,6 +434,20 @@ const ( GroupIssuedJwt GroupIssued = "jwt" ) +// Valid indicates whether the value is a known member of the GroupIssued enum. +func (e GroupIssued) Valid() bool { + switch e { + case GroupIssuedApi: + return true + case GroupIssuedIntegration: + return true + case GroupIssuedJwt: + return true + default: + return false + } +} + // Defines values for GroupMinimumIssued. const ( GroupMinimumIssuedApi GroupMinimumIssued = "api" @@ -167,6 +455,20 @@ const ( GroupMinimumIssuedJwt GroupMinimumIssued = "jwt" ) +// Valid indicates whether the value is a known member of the GroupMinimumIssued enum. +func (e GroupMinimumIssued) Valid() bool { + switch e { + case GroupMinimumIssuedApi: + return true + case GroupMinimumIssuedIntegration: + return true + case GroupMinimumIssuedJwt: + return true + default: + return false + } +} + // Defines values for IdentityProviderType. const ( IdentityProviderTypeEntra IdentityProviderType = "entra" @@ -178,6 +480,28 @@ const ( IdentityProviderTypeZitadel IdentityProviderType = "zitadel" ) +// Valid indicates whether the value is a known member of the IdentityProviderType enum. +func (e IdentityProviderType) Valid() bool { + switch e { + case IdentityProviderTypeEntra: + return true + case IdentityProviderTypeGoogle: + return true + case IdentityProviderTypeMicrosoft: + return true + case IdentityProviderTypeOidc: + return true + case IdentityProviderTypeOkta: + return true + case IdentityProviderTypePocketid: + return true + case IdentityProviderTypeZitadel: + return true + default: + return false + } +} + // Defines values for IngressPortAllocationPortMappingProtocol. const ( IngressPortAllocationPortMappingProtocolTcp IngressPortAllocationPortMappingProtocol = "tcp" @@ -185,6 +509,20 @@ const ( IngressPortAllocationPortMappingProtocolUdp IngressPortAllocationPortMappingProtocol = "udp" ) +// Valid indicates whether the value is a known member of the IngressPortAllocationPortMappingProtocol enum. +func (e IngressPortAllocationPortMappingProtocol) Valid() bool { + switch e { + case IngressPortAllocationPortMappingProtocolTcp: + return true + case IngressPortAllocationPortMappingProtocolTcpudp: + return true + case IngressPortAllocationPortMappingProtocolUdp: + return true + default: + return false + } +} + // Defines values for IngressPortAllocationRequestDirectPortProtocol. const ( IngressPortAllocationRequestDirectPortProtocolTcp IngressPortAllocationRequestDirectPortProtocol = "tcp" @@ -192,6 +530,20 @@ const ( IngressPortAllocationRequestDirectPortProtocolUdp IngressPortAllocationRequestDirectPortProtocol = "udp" ) +// Valid indicates whether the value is a known member of the IngressPortAllocationRequestDirectPortProtocol enum. +func (e IngressPortAllocationRequestDirectPortProtocol) Valid() bool { + switch e { + case IngressPortAllocationRequestDirectPortProtocolTcp: + return true + case IngressPortAllocationRequestDirectPortProtocolTcpudp: + return true + case IngressPortAllocationRequestDirectPortProtocolUdp: + return true + default: + return false + } +} + // Defines values for IngressPortAllocationRequestPortRangeProtocol. const ( IngressPortAllocationRequestPortRangeProtocolTcp IngressPortAllocationRequestPortRangeProtocol = "tcp" @@ -199,6 +551,20 @@ const ( IngressPortAllocationRequestPortRangeProtocolUdp IngressPortAllocationRequestPortRangeProtocol = "udp" ) +// Valid indicates whether the value is a known member of the IngressPortAllocationRequestPortRangeProtocol enum. +func (e IngressPortAllocationRequestPortRangeProtocol) Valid() bool { + switch e { + case IngressPortAllocationRequestPortRangeProtocolTcp: + return true + case IngressPortAllocationRequestPortRangeProtocolTcpudp: + return true + case IngressPortAllocationRequestPortRangeProtocolUdp: + return true + default: + return false + } +} + // Defines values for IntegrationResponsePlatform. const ( IntegrationResponsePlatformDatadog IntegrationResponsePlatform = "datadog" @@ -207,12 +573,40 @@ const ( IntegrationResponsePlatformS3 IntegrationResponsePlatform = "s3" ) +// Valid indicates whether the value is a known member of the IntegrationResponsePlatform enum. +func (e IntegrationResponsePlatform) Valid() bool { + switch e { + case IntegrationResponsePlatformDatadog: + return true + case IntegrationResponsePlatformFirehose: + return true + case IntegrationResponsePlatformGenericHttp: + return true + case IntegrationResponsePlatformS3: + return true + default: + return false + } +} + // Defines values for InvoiceResponseType. const ( InvoiceResponseTypeAccount InvoiceResponseType = "account" InvoiceResponseTypeTenants InvoiceResponseType = "tenants" ) +// Valid indicates whether the value is a known member of the InvoiceResponseType enum. +func (e InvoiceResponseType) Valid() bool { + switch e { + case InvoiceResponseTypeAccount: + return true + case InvoiceResponseTypeTenants: + return true + default: + return false + } +} + // Defines values for JobResponseStatus. const ( JobResponseStatusFailed JobResponseStatus = "failed" @@ -220,11 +614,35 @@ const ( JobResponseStatusSucceeded JobResponseStatus = "succeeded" ) +// Valid indicates whether the value is a known member of the JobResponseStatus enum. +func (e JobResponseStatus) Valid() bool { + switch e { + case JobResponseStatusFailed: + return true + case JobResponseStatusPending: + return true + case JobResponseStatusSucceeded: + return true + default: + return false + } +} + // Defines values for NameserverNsType. const ( NameserverNsTypeUdp NameserverNsType = "udp" ) +// Valid indicates whether the value is a known member of the NameserverNsType enum. +func (e NameserverNsType) Valid() bool { + switch e { + case NameserverNsTypeUdp: + return true + default: + return false + } +} + // Defines values for NetworkResourceType. const ( NetworkResourceTypeDomain NetworkResourceType = "domain" @@ -232,18 +650,56 @@ const ( NetworkResourceTypeSubnet NetworkResourceType = "subnet" ) +// Valid indicates whether the value is a known member of the NetworkResourceType enum. +func (e NetworkResourceType) Valid() bool { + switch e { + case NetworkResourceTypeDomain: + return true + case NetworkResourceTypeHost: + return true + case NetworkResourceTypeSubnet: + return true + default: + return false + } +} + // Defines values for PeerNetworkRangeCheckAction. const ( PeerNetworkRangeCheckActionAllow PeerNetworkRangeCheckAction = "allow" PeerNetworkRangeCheckActionDeny PeerNetworkRangeCheckAction = "deny" ) +// Valid indicates whether the value is a known member of the PeerNetworkRangeCheckAction enum. +func (e PeerNetworkRangeCheckAction) Valid() bool { + switch e { + case PeerNetworkRangeCheckActionAllow: + return true + case PeerNetworkRangeCheckActionDeny: + return true + default: + return false + } +} + // Defines values for PolicyRuleAction. const ( PolicyRuleActionAccept PolicyRuleAction = "accept" PolicyRuleActionDrop PolicyRuleAction = "drop" ) +// Valid indicates whether the value is a known member of the PolicyRuleAction enum. +func (e PolicyRuleAction) Valid() bool { + switch e { + case PolicyRuleActionAccept: + return true + case PolicyRuleActionDrop: + return true + default: + return false + } +} + // Defines values for PolicyRuleProtocol. const ( PolicyRuleProtocolAll PolicyRuleProtocol = "all" @@ -253,12 +709,42 @@ const ( PolicyRuleProtocolUdp PolicyRuleProtocol = "udp" ) +// Valid indicates whether the value is a known member of the PolicyRuleProtocol enum. +func (e PolicyRuleProtocol) Valid() bool { + switch e { + case PolicyRuleProtocolAll: + return true + case PolicyRuleProtocolIcmp: + return true + case PolicyRuleProtocolNetbirdSsh: + return true + case PolicyRuleProtocolTcp: + return true + case PolicyRuleProtocolUdp: + return true + default: + return false + } +} + // Defines values for PolicyRuleMinimumAction. const ( PolicyRuleMinimumActionAccept PolicyRuleMinimumAction = "accept" PolicyRuleMinimumActionDrop PolicyRuleMinimumAction = "drop" ) +// Valid indicates whether the value is a known member of the PolicyRuleMinimumAction enum. +func (e PolicyRuleMinimumAction) Valid() bool { + switch e { + case PolicyRuleMinimumActionAccept: + return true + case PolicyRuleMinimumActionDrop: + return true + default: + return false + } +} + // Defines values for PolicyRuleMinimumProtocol. const ( PolicyRuleMinimumProtocolAll PolicyRuleMinimumProtocol = "all" @@ -268,12 +754,42 @@ const ( PolicyRuleMinimumProtocolUdp PolicyRuleMinimumProtocol = "udp" ) +// Valid indicates whether the value is a known member of the PolicyRuleMinimumProtocol enum. +func (e PolicyRuleMinimumProtocol) Valid() bool { + switch e { + case PolicyRuleMinimumProtocolAll: + return true + case PolicyRuleMinimumProtocolIcmp: + return true + case PolicyRuleMinimumProtocolNetbirdSsh: + return true + case PolicyRuleMinimumProtocolTcp: + return true + case PolicyRuleMinimumProtocolUdp: + return true + default: + return false + } +} + // Defines values for PolicyRuleUpdateAction. const ( PolicyRuleUpdateActionAccept PolicyRuleUpdateAction = "accept" PolicyRuleUpdateActionDrop PolicyRuleUpdateAction = "drop" ) +// Valid indicates whether the value is a known member of the PolicyRuleUpdateAction enum. +func (e PolicyRuleUpdateAction) Valid() bool { + switch e { + case PolicyRuleUpdateActionAccept: + return true + case PolicyRuleUpdateActionDrop: + return true + default: + return false + } +} + // Defines values for PolicyRuleUpdateProtocol. const ( PolicyRuleUpdateProtocolAll PolicyRuleUpdateProtocol = "all" @@ -283,6 +799,24 @@ const ( PolicyRuleUpdateProtocolUdp PolicyRuleUpdateProtocol = "udp" ) +// Valid indicates whether the value is a known member of the PolicyRuleUpdateProtocol enum. +func (e PolicyRuleUpdateProtocol) Valid() bool { + switch e { + case PolicyRuleUpdateProtocolAll: + return true + case PolicyRuleUpdateProtocolIcmp: + return true + case PolicyRuleUpdateProtocolNetbirdSsh: + return true + case PolicyRuleUpdateProtocolTcp: + return true + case PolicyRuleUpdateProtocolUdp: + return true + default: + return false + } +} + // Defines values for ResourceType. const ( ResourceTypeDomain ResourceType = "domain" @@ -291,12 +825,40 @@ const ( ResourceTypeSubnet ResourceType = "subnet" ) +// Valid indicates whether the value is a known member of the ResourceType enum. +func (e ResourceType) Valid() bool { + switch e { + case ResourceTypeDomain: + return true + case ResourceTypeHost: + return true + case ResourceTypePeer: + return true + case ResourceTypeSubnet: + return true + default: + return false + } +} + // Defines values for ReverseProxyDomainType. const ( ReverseProxyDomainTypeCustom ReverseProxyDomainType = "custom" ReverseProxyDomainTypeFree ReverseProxyDomainType = "free" ) +// Valid indicates whether the value is a known member of the ReverseProxyDomainType enum. +func (e ReverseProxyDomainType) Valid() bool { + switch e { + case ReverseProxyDomainTypeCustom: + return true + case ReverseProxyDomainTypeFree: + return true + default: + return false + } +} + // Defines values for SentinelOneMatchAttributesNetworkStatus. const ( SentinelOneMatchAttributesNetworkStatusConnected SentinelOneMatchAttributesNetworkStatus = "connected" @@ -304,6 +866,20 @@ const ( SentinelOneMatchAttributesNetworkStatusQuarantined SentinelOneMatchAttributesNetworkStatus = "quarantined" ) +// Valid indicates whether the value is a known member of the SentinelOneMatchAttributesNetworkStatus enum. +func (e SentinelOneMatchAttributesNetworkStatus) Valid() bool { + switch e { + case SentinelOneMatchAttributesNetworkStatusConnected: + return true + case SentinelOneMatchAttributesNetworkStatusDisconnected: + return true + case SentinelOneMatchAttributesNetworkStatusQuarantined: + return true + default: + return false + } +} + // Defines values for ServiceMetaStatus. const ( ServiceMetaStatusActive ServiceMetaStatus = "active" @@ -314,23 +890,77 @@ const ( ServiceMetaStatusTunnelNotCreated ServiceMetaStatus = "tunnel_not_created" ) +// Valid indicates whether the value is a known member of the ServiceMetaStatus enum. +func (e ServiceMetaStatus) Valid() bool { + switch e { + case ServiceMetaStatusActive: + return true + case ServiceMetaStatusCertificateFailed: + return true + case ServiceMetaStatusCertificatePending: + return true + case ServiceMetaStatusError: + return true + case ServiceMetaStatusPending: + return true + case ServiceMetaStatusTunnelNotCreated: + return true + default: + return false + } +} + // Defines values for ServiceTargetProtocol. const ( ServiceTargetProtocolHttp ServiceTargetProtocol = "http" ServiceTargetProtocolHttps ServiceTargetProtocol = "https" ) +// Valid indicates whether the value is a known member of the ServiceTargetProtocol enum. +func (e ServiceTargetProtocol) Valid() bool { + switch e { + case ServiceTargetProtocolHttp: + return true + case ServiceTargetProtocolHttps: + return true + default: + return false + } +} + // Defines values for ServiceTargetTargetType. const ( ServiceTargetTargetTypePeer ServiceTargetTargetType = "peer" ServiceTargetTargetTypeResource ServiceTargetTargetType = "resource" ) +// Valid indicates whether the value is a known member of the ServiceTargetTargetType enum. +func (e ServiceTargetTargetType) Valid() bool { + switch e { + case ServiceTargetTargetTypePeer: + return true + case ServiceTargetTargetTypeResource: + return true + default: + return false + } +} + // Defines values for ServiceTargetOptionsPathRewrite. const ( ServiceTargetOptionsPathRewritePreserve ServiceTargetOptionsPathRewrite = "preserve" ) +// Valid indicates whether the value is a known member of the ServiceTargetOptionsPathRewrite enum. +func (e ServiceTargetOptionsPathRewrite) Valid() bool { + switch e { + case ServiceTargetOptionsPathRewritePreserve: + return true + default: + return false + } +} + // Defines values for TenantResponseStatus. const ( TenantResponseStatusActive TenantResponseStatus = "active" @@ -339,6 +969,22 @@ const ( TenantResponseStatusPending TenantResponseStatus = "pending" ) +// Valid indicates whether the value is a known member of the TenantResponseStatus enum. +func (e TenantResponseStatus) Valid() bool { + switch e { + case TenantResponseStatusActive: + return true + case TenantResponseStatusExisting: + return true + case TenantResponseStatusInvited: + return true + case TenantResponseStatusPending: + return true + default: + return false + } +} + // Defines values for UserStatus. const ( UserStatusActive UserStatus = "active" @@ -346,11 +992,35 @@ const ( UserStatusInvited UserStatus = "invited" ) +// Valid indicates whether the value is a known member of the UserStatus enum. +func (e UserStatus) Valid() bool { + switch e { + case UserStatusActive: + return true + case UserStatusBlocked: + return true + case UserStatusInvited: + return true + default: + return false + } +} + // Defines values for WorkloadType. const ( WorkloadTypeBundle WorkloadType = "bundle" ) +// Valid indicates whether the value is a known member of the WorkloadType enum. +func (e WorkloadType) Valid() bool { + switch e { + case WorkloadTypeBundle: + return true + default: + return false + } +} + // Defines values for GetApiEventsNetworkTrafficParamsType. const ( GetApiEventsNetworkTrafficParamsTypeTYPEDROP GetApiEventsNetworkTrafficParamsType = "TYPE_DROP" @@ -359,12 +1029,40 @@ const ( GetApiEventsNetworkTrafficParamsTypeTYPEUNKNOWN GetApiEventsNetworkTrafficParamsType = "TYPE_UNKNOWN" ) +// Valid indicates whether the value is a known member of the GetApiEventsNetworkTrafficParamsType enum. +func (e GetApiEventsNetworkTrafficParamsType) Valid() bool { + switch e { + case GetApiEventsNetworkTrafficParamsTypeTYPEDROP: + return true + case GetApiEventsNetworkTrafficParamsTypeTYPEEND: + return true + case GetApiEventsNetworkTrafficParamsTypeTYPESTART: + return true + case GetApiEventsNetworkTrafficParamsTypeTYPEUNKNOWN: + return true + default: + return false + } +} + // Defines values for GetApiEventsNetworkTrafficParamsConnectionType. const ( GetApiEventsNetworkTrafficParamsConnectionTypeP2P GetApiEventsNetworkTrafficParamsConnectionType = "P2P" GetApiEventsNetworkTrafficParamsConnectionTypeROUTED GetApiEventsNetworkTrafficParamsConnectionType = "ROUTED" ) +// Valid indicates whether the value is a known member of the GetApiEventsNetworkTrafficParamsConnectionType enum. +func (e GetApiEventsNetworkTrafficParamsConnectionType) Valid() bool { + switch e { + case GetApiEventsNetworkTrafficParamsConnectionTypeP2P: + return true + case GetApiEventsNetworkTrafficParamsConnectionTypeROUTED: + return true + default: + return false + } +} + // Defines values for GetApiEventsNetworkTrafficParamsDirection. const ( GetApiEventsNetworkTrafficParamsDirectionDIRECTIONUNKNOWN GetApiEventsNetworkTrafficParamsDirection = "DIRECTION_UNKNOWN" @@ -372,6 +1070,20 @@ const ( GetApiEventsNetworkTrafficParamsDirectionINGRESS GetApiEventsNetworkTrafficParamsDirection = "INGRESS" ) +// Valid indicates whether the value is a known member of the GetApiEventsNetworkTrafficParamsDirection enum. +func (e GetApiEventsNetworkTrafficParamsDirection) Valid() bool { + switch e { + case GetApiEventsNetworkTrafficParamsDirectionDIRECTIONUNKNOWN: + return true + case GetApiEventsNetworkTrafficParamsDirectionEGRESS: + return true + case GetApiEventsNetworkTrafficParamsDirectionINGRESS: + return true + default: + return false + } +} + // Defines values for GetApiEventsProxyParamsSortBy. const ( GetApiEventsProxyParamsSortByAuthMethod GetApiEventsProxyParamsSortBy = "auth_method" @@ -387,12 +1099,54 @@ const ( GetApiEventsProxyParamsSortByUserId GetApiEventsProxyParamsSortBy = "user_id" ) +// Valid indicates whether the value is a known member of the GetApiEventsProxyParamsSortBy enum. +func (e GetApiEventsProxyParamsSortBy) Valid() bool { + switch e { + case GetApiEventsProxyParamsSortByAuthMethod: + return true + case GetApiEventsProxyParamsSortByDuration: + return true + case GetApiEventsProxyParamsSortByHost: + return true + case GetApiEventsProxyParamsSortByMethod: + return true + case GetApiEventsProxyParamsSortByPath: + return true + case GetApiEventsProxyParamsSortByReason: + return true + case GetApiEventsProxyParamsSortBySourceIp: + return true + case GetApiEventsProxyParamsSortByStatusCode: + return true + case GetApiEventsProxyParamsSortByTimestamp: + return true + case GetApiEventsProxyParamsSortByUrl: + return true + case GetApiEventsProxyParamsSortByUserId: + return true + default: + return false + } +} + // Defines values for GetApiEventsProxyParamsSortOrder. const ( GetApiEventsProxyParamsSortOrderAsc GetApiEventsProxyParamsSortOrder = "asc" GetApiEventsProxyParamsSortOrderDesc GetApiEventsProxyParamsSortOrder = "desc" ) +// Valid indicates whether the value is a known member of the GetApiEventsProxyParamsSortOrder enum. +func (e GetApiEventsProxyParamsSortOrder) Valid() bool { + switch e { + case GetApiEventsProxyParamsSortOrderAsc: + return true + case GetApiEventsProxyParamsSortOrderDesc: + return true + default: + return false + } +} + // Defines values for GetApiEventsProxyParamsMethod. const ( GetApiEventsProxyParamsMethodDELETE GetApiEventsProxyParamsMethod = "DELETE" @@ -404,18 +1158,64 @@ const ( GetApiEventsProxyParamsMethodPUT GetApiEventsProxyParamsMethod = "PUT" ) +// Valid indicates whether the value is a known member of the GetApiEventsProxyParamsMethod enum. +func (e GetApiEventsProxyParamsMethod) Valid() bool { + switch e { + case GetApiEventsProxyParamsMethodDELETE: + return true + case GetApiEventsProxyParamsMethodGET: + return true + case GetApiEventsProxyParamsMethodHEAD: + return true + case GetApiEventsProxyParamsMethodOPTIONS: + return true + case GetApiEventsProxyParamsMethodPATCH: + return true + case GetApiEventsProxyParamsMethodPOST: + return true + case GetApiEventsProxyParamsMethodPUT: + return true + default: + return false + } +} + // Defines values for GetApiEventsProxyParamsStatus. const ( GetApiEventsProxyParamsStatusFailed GetApiEventsProxyParamsStatus = "failed" GetApiEventsProxyParamsStatusSuccess GetApiEventsProxyParamsStatus = "success" ) +// Valid indicates whether the value is a known member of the GetApiEventsProxyParamsStatus enum. +func (e GetApiEventsProxyParamsStatus) Valid() bool { + switch e { + case GetApiEventsProxyParamsStatusFailed: + return true + case GetApiEventsProxyParamsStatusSuccess: + return true + default: + return false + } +} + // Defines values for PutApiIntegrationsMspTenantsIdInviteJSONBodyValue. const ( PutApiIntegrationsMspTenantsIdInviteJSONBodyValueAccept PutApiIntegrationsMspTenantsIdInviteJSONBodyValue = "accept" PutApiIntegrationsMspTenantsIdInviteJSONBodyValueDecline PutApiIntegrationsMspTenantsIdInviteJSONBodyValue = "decline" ) +// Valid indicates whether the value is a known member of the PutApiIntegrationsMspTenantsIdInviteJSONBodyValue enum. +func (e PutApiIntegrationsMspTenantsIdInviteJSONBodyValue) Valid() bool { + switch e { + case PutApiIntegrationsMspTenantsIdInviteJSONBodyValueAccept: + return true + case PutApiIntegrationsMspTenantsIdInviteJSONBodyValueDecline: + return true + default: + return false + } +} + // AccessiblePeer defines model for AccessiblePeer. type AccessiblePeer struct { // CityName Commonly used English name of the city @@ -598,7 +1398,7 @@ type BundleParameters struct { // BundleResult defines model for BundleResult. type BundleResult struct { - UploadKey *string `json:"upload_key"` + UploadKey *string `json:"upload_key,omitempty"` } // BundleWorkloadRequest defines model for BundleWorkloadRequest. @@ -1406,9 +2206,9 @@ type JobRequest struct { // JobResponse defines model for JobResponse. type JobResponse struct { - CompletedAt *time.Time `json:"completed_at"` + CompletedAt *time.Time `json:"completed_at,omitempty"` CreatedAt time.Time `json:"created_at"` - FailedReason *string `json:"failed_reason"` + FailedReason *string `json:"failed_reason,omitempty"` Id string `json:"id"` Status JobResponseStatus `json:"status"` TriggeredBy string `json:"triggered_by"` @@ -2419,6 +3219,12 @@ type ProxyAccessLog struct { // AuthMethodUsed Authentication method used (e.g., password, pin, oidc) AuthMethodUsed *string `json:"auth_method_used,omitempty"` + // BytesDownload Bytes downloaded (response body size) + BytesDownload int64 `json:"bytes_download"` + + // BytesUpload Bytes uploaded (request body size) + BytesUpload int64 `json:"bytes_upload"` + // CityName City name from geolocation CityName *string `json:"city_name,omitempty"` diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 97a2a4d18..2c66bb946 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.3 +// protoc v6.33.0 // source: management.proto package proto diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index 77c8ea4f4..275e8be37 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.3 +// protoc v6.33.0 // source: proxy_service.proto package proto @@ -740,6 +740,8 @@ type AccessLog struct { AuthMechanism string `protobuf:"bytes,11,opt,name=auth_mechanism,json=authMechanism,proto3" json:"auth_mechanism,omitempty"` UserId string `protobuf:"bytes,12,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` AuthSuccess bool `protobuf:"varint,13,opt,name=auth_success,json=authSuccess,proto3" json:"auth_success,omitempty"` + BytesUpload int64 `protobuf:"varint,14,opt,name=bytes_upload,json=bytesUpload,proto3" json:"bytes_upload,omitempty"` + BytesDownload int64 `protobuf:"varint,15,opt,name=bytes_download,json=bytesDownload,proto3" json:"bytes_download,omitempty"` } func (x *AccessLog) Reset() { @@ -865,6 +867,20 @@ func (x *AccessLog) GetAuthSuccess() bool { return false } +func (x *AccessLog) GetBytesUpload() int64 { + if x != nil { + return x.BytesUpload + } + return 0 +} + +func (x *AccessLog) GetBytesDownload() int64 { + if x != nil { + return x.BytesDownload + } + return 0 +} + type AuthenticateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1698,7 +1714,7 @@ var file_proxy_service_proto_rawDesc = []byte{ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x09, 0x41, 0x63, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xea, 0x03, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, @@ -1724,153 +1740,158 @@ var file_proxy_service_proto_rawDesc = []byte{ 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x13, - 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, - 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x2d, 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, - 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, - 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, - 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, - 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, - 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, - 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, - 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, + 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, + 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb6, 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, - 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, - 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, - 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, - 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, - 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, - 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, - 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, - 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, - 0x44, 0x10, 0x02, 0x2a, 0x46, 0x0a, 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, - 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, - 0x12, 0x19, 0x0a, 0x15, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, - 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, - 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, - 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, - 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, - 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, + 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, + 0x70, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2d, + 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, + 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, + 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, + 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, + 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, + 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, + 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, + 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, + 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x46, 0x0a, + 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, + 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x41, + 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, + 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, + 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, + 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, + 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, + 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, + 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, - 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, - 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, - 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, - 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, - 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, - 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, - 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, - 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, - 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, + 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, + 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, + 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, + 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, - 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, - 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, - 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, - 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, + 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, + 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto index be553095d..195b60f01 100644 --- a/shared/management/proto/proxy_service.proto +++ b/shared/management/proto/proxy_service.proto @@ -115,6 +115,8 @@ message AccessLog { string auth_mechanism = 11; string user_id = 12; bool auth_success = 13; + int64 bytes_upload = 14; + int64 bytes_download = 15; } message AuthenticateRequest { From 5585adce18cea6ba6e5673039b5d1f104006cedc Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 9 Mar 2026 19:04:04 +0100 Subject: [PATCH 199/374] [management] add activity events for domains (#5548) * add activity events for domains * fix test * update activity codes * update activity codes --- .../modules/reverseproxy/domain/domain.go | 9 +++++++ .../reverseproxy/domain/manager/manager.go | 27 ++++++++++++++----- management/internals/server/modules.go | 2 +- management/server/activity/codes.go | 11 ++++++++ .../testing/testing_tools/channel/channel.go | 2 +- 5 files changed, 43 insertions(+), 8 deletions(-) diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go index da3432626..83fd669af 100644 --- a/management/internals/modules/reverseproxy/domain/domain.go +++ b/management/internals/modules/reverseproxy/domain/domain.go @@ -15,3 +15,12 @@ type Domain struct { Type Type `gorm:"-"` Validated bool } + +// EventMeta returns activity event metadata for a domain +func (d *Domain) EventMeta() map[string]any { + return map[string]any{ + "domain": d.Domain, + "target_cluster": d.TargetCluster, + "validated": d.Validated, + } +} diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index 12dd051fd..8bbc98726 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -9,6 +9,8 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + "github.com/netbirdio/netbird/management/server/account" + "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" @@ -36,16 +38,16 @@ type Manager struct { validator domain.Validator proxyManager proxyManager permissionsManager permissions.Manager + accountManager account.Manager } -func NewManager(store store, proxyMgr proxyManager, permissionsManager permissions.Manager) Manager { +func NewManager(store store, proxyMgr proxyManager, permissionsManager permissions.Manager, accountManager account.Manager) Manager { return Manager{ - store: store, - proxyManager: proxyMgr, - validator: domain.Validator{ - Resolver: net.DefaultResolver, - }, + store: store, + proxyManager: proxyMgr, + validator: domain.Validator{Resolver: net.DefaultResolver}, permissionsManager: permissionsManager, + accountManager: accountManager, } } @@ -136,6 +138,9 @@ func (m Manager) CreateDomain(ctx context.Context, accountID, userID, domainName if err != nil { return d, fmt.Errorf("create domain in store: %w", err) } + + m.accountManager.StoreEvent(ctx, userID, d.ID, accountID, activity.DomainAdded, d.EventMeta()) + return d, nil } @@ -148,10 +153,18 @@ func (m Manager) DeleteDomain(ctx context.Context, accountID, userID, domainID s return status.NewPermissionDeniedError() } + d, err := m.store.GetCustomDomain(ctx, accountID, domainID) + if err != nil { + return fmt.Errorf("get domain from store: %w", err) + } + if err := m.store.DeleteCustomDomain(ctx, accountID, domainID); err != nil { // TODO: check for "no records" type error. Because that is a success condition. return fmt.Errorf("delete domain from store: %w", err) } + + m.accountManager.StoreEvent(ctx, userID, domainID, accountID, activity.DomainDeleted, d.EventMeta()) + return nil } @@ -218,6 +231,8 @@ func (m Manager) ValidateDomain(ctx context.Context, accountID, userID, domainID }).WithError(err).Error("update custom domain in store") return } + + m.accountManager.StoreEvent(context.Background(), userID, domainID, accountID, activity.DomainValidated, d.EventMeta()) } else { log.WithFields(log.Fields{ "accountID": accountID, diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index 2383019e2..29a8953ac 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -210,7 +210,7 @@ func (s *BaseServer) ProxyManager() proxy.Manager { func (s *BaseServer) ReverseProxyDomainManager() *manager.Manager { return Create(s, func() *manager.Manager { - m := manager.NewManager(s.Store(), s.ProxyManager(), s.PermissionsManager()) + m := manager.NewManager(s.Store(), s.ProxyManager(), s.PermissionsManager(), s.AccountManager()) return &m }) } diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index 53cf30d4c..948d599ba 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -220,6 +220,13 @@ const ( // AccountPeerExposeDisabled indicates that a user disabled peer expose for the account AccountPeerExposeDisabled Activity = 115 + // DomainAdded indicates that a user added a custom domain + DomainAdded Activity = 118 + // DomainDeleted indicates that a user deleted a custom domain + DomainDeleted Activity = 119 + // DomainValidated indicates that a custom domain was validated + DomainValidated Activity = 120 + AccountDeleted Activity = 99999 ) @@ -364,6 +371,10 @@ var activityMap = map[Activity]Code{ AccountPeerExposeEnabled: {"Account peer expose enabled", "account.setting.peer.expose.enable"}, AccountPeerExposeDisabled: {"Account peer expose disabled", "account.setting.peer.expose.disable"}, + + DomainAdded: {"Domain added", "domain.add"}, + DomainDeleted: {"Domain deleted", "domain.delete"}, + DomainValidated: {"Domain validated", "domain.validate"}, } // StringCode returns a string code of the activity diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 5e33ad652..462013963 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -108,7 +108,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee t.Fatalf("Failed to create proxy manager: %v", err) } proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, pkceverifierStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager, proxyMgr) - domainManager := manager.NewManager(store, proxyMgr, permissionsManager) + domainManager := manager.NewManager(store, proxyMgr, permissionsManager, am) serviceProxyController, err := proxymanager.NewGRPCController(proxyServiceServer, noopMeter) if err != nil { t.Fatalf("Failed to create proxy controller: %v", err) From 11f891220e2ee5b1395e33a2e7a3672cd07f4bef Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 11 Mar 2026 13:01:13 +0100 Subject: [PATCH 200/374] [management] create a shallow copy of the account when buffering (#5572) --- management/server/account_request_buffer.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/management/server/account_request_buffer.go b/management/server/account_request_buffer.go index fa6c45856..e1672c2d0 100644 --- a/management/server/account_request_buffer.go +++ b/management/server/account_request_buffer.go @@ -86,7 +86,14 @@ func (ac *AccountRequestBuffer) processGetAccountBatch(ctx context.Context, acco result := &AccountResult{Account: account, Err: err} for _, req := range requests { - req.ResultChan <- result + if account != nil { + // Shallow copy the account so each goroutine gets its own struct value. + // This prevents data races when callers mutate fields like Policies. + accountCopy := *account + req.ResultChan <- &AccountResult{Account: &accountCopy, Err: err} + } else { + req.ResultChan <- result + } close(req.ResultChan) } } From 7a23c57cf8157f1c62619e68c6c104202c58a96b Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 11 Mar 2026 15:52:42 +0100 Subject: [PATCH 201/374] [self-hosted] Remove extra proxy domain from getting started (#5573) --- infrastructure_files/getting-started.sh | 47 ++----------------------- 1 file changed, 2 insertions(+), 45 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 7fd87ee8e..70088d66a 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -182,44 +182,6 @@ read_enable_proxy() { return 0 } -read_proxy_domain() { - local suggested_proxy="proxy.${BASE_DOMAIN}" - - echo "" > /dev/stderr - echo "NOTE: The proxy domain must be different from the management domain ($NETBIRD_DOMAIN)" > /dev/stderr - echo "to avoid TLS certificate conflicts." > /dev/stderr - echo "" > /dev/stderr - echo "You also need to add a wildcard DNS record for the proxy domain," > /dev/stderr - echo "e.g. *.${suggested_proxy} pointing to the same server domain as $NETBIRD_DOMAIN with a CNAME record." > /dev/stderr - echo "" > /dev/stderr - echo -n "Enter the domain for the NetBird Proxy (e.g. ${suggested_proxy}): " > /dev/stderr - read -r READ_PROXY_DOMAIN < /dev/tty - - if [[ -z "$READ_PROXY_DOMAIN" ]]; then - echo "The proxy domain cannot be empty." > /dev/stderr - read_proxy_domain - return - fi - - if [[ "$READ_PROXY_DOMAIN" == "$NETBIRD_DOMAIN" ]]; then - echo "" > /dev/stderr - echo "WARNING: The proxy domain cannot be the same as the management domain ($NETBIRD_DOMAIN)." > /dev/stderr - read_proxy_domain - return - fi - - echo ${READ_PROXY_DOMAIN} | grep ${NETBIRD_DOMAIN} > /dev/null - if [[ $? -eq 0 ]]; then - echo "" > /dev/stderr - echo "WARNING: The proxy domain cannot be a subdomain of the management domain ($NETBIRD_DOMAIN)." > /dev/stderr - read_proxy_domain - return - fi - - echo "$READ_PROXY_DOMAIN" - return 0 -} - read_traefik_acme_email() { echo "" > /dev/stderr echo "Enter your email for Let's Encrypt certificate notifications." > /dev/stderr @@ -334,7 +296,6 @@ initialize_default_values() { # NetBird Proxy configuration ENABLE_PROXY="false" - PROXY_DOMAIN="" PROXY_TOKEN="" return 0 } @@ -364,9 +325,6 @@ configure_reverse_proxy() { if [[ "$REVERSE_PROXY_TYPE" == "0" ]]; then TRAEFIK_ACME_EMAIL=$(read_traefik_acme_email) ENABLE_PROXY=$(read_enable_proxy) - if [[ "$ENABLE_PROXY" == "true" ]]; then - PROXY_DOMAIN=$(read_proxy_domain) - fi fi # Handle external Traefik-specific prompts (option 1) @@ -813,7 +771,7 @@ NB_PROXY_MANAGEMENT_ADDRESS=http://netbird-server:80 # Allow insecure gRPC connection to management (required for internal Docker network) NB_PROXY_ALLOW_INSECURE=true # Public URL where this proxy is reachable (used for cluster registration) -NB_PROXY_DOMAIN=$PROXY_DOMAIN +NB_PROXY_DOMAIN=$NETBIRD_DOMAIN NB_PROXY_ADDRESS=:8443 NB_PROXY_TOKEN=$PROXY_TOKEN NB_PROXY_CERTIFICATE_DIRECTORY=/certs @@ -1203,8 +1161,7 @@ print_builtin_traefik_instructions() { echo " The proxy handles its own TLS certificates via ACME TLS-ALPN-01 challenge." echo " Point your proxy domain to this server's domain address like in the examples below:" echo "" - echo " $PROXY_DOMAIN CNAME $NETBIRD_DOMAIN" - echo " *.$PROXY_DOMAIN CNAME $NETBIRD_DOMAIN" + echo " *.$NETBIRD_DOMAIN CNAME $NETBIRD_DOMAIN" echo "" fi return 0 From b5489d4986e6131f663ba24e330cbb3af4b92580 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Wed, 11 Mar 2026 18:19:17 +0100 Subject: [PATCH 202/374] [management] set components network map by default and optimize memory usage (#5575) * Network map now defaults to compacted mode at startup; environment parsing issues yield clearer warnings and disabling compacted mode is logged. * **Bug Fixes** * DNS enablement and nameserver selection now correctly respect group membership, reducing incorrect DNS assignments. * **Refactor** * Internal routing and firewall rule generation streamlined for more consistent rule IDs and safer peer handling. * **Performance** * Minor memory and slice allocation improvements for peer/group processing. --- .../network_map/controller/controller.go | 11 +- management/server/types/account_components.go | 4 +- .../server/types/networkmap_components.go | 119 ++++++------------ 3 files changed, 51 insertions(+), 83 deletions(-) diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index 121c55ac5..4b414df6f 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -87,9 +87,14 @@ func NewController(ctx context.Context, store store.Store, metrics telemetry.App newNetworkMapBuilder = false } - compactedNetworkMap, err := strconv.ParseBool(os.Getenv(types.EnvNewNetworkMapCompacted)) - if err != nil { - log.WithContext(ctx).Warnf("failed to parse %s, using default value false: %v", types.EnvNewNetworkMapCompacted, err) + compactedNetworkMap := true + compactedEnv := os.Getenv(types.EnvNewNetworkMapCompacted) + parsedCompactedNmap, err := strconv.ParseBool(compactedEnv) + if err != nil && len(compactedEnv) > 0 { + log.WithContext(ctx).Warnf("failed to parse %s, using default value true: %v", types.EnvNewNetworkMapCompacted, err) + } + if err == nil && !parsedCompactedNmap { + log.WithContext(ctx).Info("disabling compacted mode") compactedNetworkMap = false } diff --git a/management/server/types/account_components.go b/management/server/types/account_components.go index 1eb25cecc..bd4244546 100644 --- a/management/server/types/account_components.go +++ b/management/server/types/account_components.go @@ -368,7 +368,7 @@ func (a *Account) getPeersGroupsPoliciesRoutes( func (a *Account) getPeersFromGroups(ctx context.Context, groups []string, peerID string, sourcePostureChecksIDs []string, validatedPeersMap map[string]struct{}, postureFailedPeers *map[string]map[string]struct{}) ([]string, bool) { peerInGroups := false - filteredPeerIDs := make([]string, 0, len(a.Peers)) + filteredPeerIDs := make([]string, 0, len(groups)) seenPeerIds := make(map[string]struct{}, len(groups)) for _, gid := range groups { @@ -378,7 +378,7 @@ func (a *Account) getPeersFromGroups(ctx context.Context, groups []string, peerI } if group.IsGroupAll() || len(groups) == 1 { - filteredPeerIDs = filteredPeerIDs[:0] + filteredPeerIDs = make([]string, 0, len(group.Peers)) peerInGroups = false for _, pid := range group.Peers { peer, ok := a.Peers[pid] diff --git a/management/server/types/networkmap_components.go b/management/server/types/networkmap_components.go index ab6b006e6..23d84a994 100644 --- a/management/server/types/networkmap_components.go +++ b/management/server/types/networkmap_components.go @@ -134,7 +134,7 @@ func (c *NetworkMapComponents) Calculate(ctx context.Context) *NetworkMap { sourcePeers, ) - dnsManagementStatus := c.getPeerDNSManagementStatus(targetPeerID) + dnsManagementStatus := c.getPeerDNSManagementStatusFromGroups(peerGroups) dnsUpdate := nbdns.Config{ ServiceEnable: dnsManagementStatus, } @@ -152,7 +152,7 @@ func (c *NetworkMapComponents) Calculate(ctx context.Context) *NetworkMap { customZones = append(customZones, c.AccountZones...) dnsUpdate.CustomZones = customZones - dnsUpdate.NameServerGroups = c.getPeerNSGroups(targetPeerID) + dnsUpdate.NameServerGroups = c.getPeerNSGroupsFromGroups(targetPeerID, peerGroups) } return &NetworkMap{ @@ -278,6 +278,16 @@ func (c *NetworkMapComponents) connResourcesGenerator(targetPeer *nbpeer.Peer) ( peers := make([]*nbpeer.Peer, 0) return func(rule *PolicyRule, groupPeers []*nbpeer.Peer, direction int) { + protocol := rule.Protocol + if protocol == PolicyRuleProtocolNetbirdSSH { + protocol = PolicyRuleProtocolTCP + } + + protocolStr := string(protocol) + actionStr := string(rule.Action) + dirStr := strconv.Itoa(direction) + portsJoined := strings.Join(rule.Ports, ",") + for _, peer := range groupPeers { if peer == nil { continue @@ -288,21 +298,18 @@ func (c *NetworkMapComponents) connResourcesGenerator(targetPeer *nbpeer.Peer) ( peersExists[peer.ID] = struct{}{} } - protocol := rule.Protocol - if protocol == PolicyRuleProtocolNetbirdSSH { - protocol = PolicyRuleProtocolTCP - } + peerIP := net.IP(peer.IP).String() fr := FirewallRule{ PolicyID: rule.ID, - PeerIP: net.IP(peer.IP).String(), + PeerIP: peerIP, Direction: direction, - Action: string(rule.Action), - Protocol: string(protocol), + Action: actionStr, + Protocol: protocolStr, } - ruleID := rule.ID + fr.PeerIP + strconv.Itoa(direction) + - fr.Protocol + fr.Action + strings.Join(rule.Ports, ",") + ruleID := rule.ID + peerIP + dirStr + + protocolStr + actionStr + portsJoined if _, ok := rulesExists[ruleID]; ok { continue } @@ -313,13 +320,7 @@ func (c *NetworkMapComponents) connResourcesGenerator(targetPeer *nbpeer.Peer) ( continue } - rules = append(rules, expandPortsAndRanges(fr, &PolicyRule{ - ID: rule.ID, - Ports: rule.Ports, - PortRanges: rule.PortRanges, - Protocol: rule.Protocol, - Action: rule.Action, - }, targetPeer)...) + rules = append(rules, expandPortsAndRanges(fr, rule, targetPeer)...) } }, func() ([]*nbpeer.Peer, []*FirewallRule) { return peers, rules @@ -395,7 +396,7 @@ func (c *NetworkMapComponents) getPeerFromResource(resource Resource, peerID str } func (c *NetworkMapComponents) filterPeersByLoginExpiration(aclPeers []*nbpeer.Peer) ([]*nbpeer.Peer, []*nbpeer.Peer) { - var peersToConnect []*nbpeer.Peer + peersToConnect := make([]*nbpeer.Peer, 0, len(aclPeers)) var expiredPeers []*nbpeer.Peer for _, p := range aclPeers { @@ -410,35 +411,35 @@ func (c *NetworkMapComponents) filterPeersByLoginExpiration(aclPeers []*nbpeer.P return peersToConnect, expiredPeers } -func (c *NetworkMapComponents) getPeerDNSManagementStatus(peerID string) bool { - peerGroups := c.GetPeerGroups(peerID) - enabled := true +func (c *NetworkMapComponents) getPeerDNSManagementStatusFromGroups(peerGroups map[string]struct{}) bool { for _, groupID := range c.DNSSettings.DisabledManagementGroups { if _, found := peerGroups[groupID]; found { - enabled = false - break + return false } } - return enabled + return true } -func (c *NetworkMapComponents) getPeerNSGroups(peerID string) []*nbdns.NameServerGroup { - groupList := c.GetPeerGroups(peerID) - +func (c *NetworkMapComponents) getPeerNSGroupsFromGroups(peerID string, groupList map[string]struct{}) []*nbdns.NameServerGroup { var peerNSGroups []*nbdns.NameServerGroup + targetPeerInfo := c.GetPeerInfo(peerID) + if targetPeerInfo == nil { + return peerNSGroups + } + + peerIPStr := targetPeerInfo.IP.String() + for _, nsGroup := range c.NameServerGroups { if !nsGroup.Enabled { continue } for _, gID := range nsGroup.Groups { - _, found := groupList[gID] - if found { - targetPeerInfo := c.GetPeerInfo(peerID) - if targetPeerInfo != nil && !c.peerIsNameserver(targetPeerInfo, nsGroup) { + if _, found := groupList[gID]; found { + if !c.peerIsNameserver(peerIPStr, nsGroup) { peerNSGroups = append(peerNSGroups, nsGroup.Copy()) - break } + break } } } @@ -446,9 +447,9 @@ func (c *NetworkMapComponents) getPeerNSGroups(peerID string) []*nbdns.NameServe return peerNSGroups } -func (c *NetworkMapComponents) peerIsNameserver(peerInfo *nbpeer.Peer, nsGroup *nbdns.NameServerGroup) bool { +func (c *NetworkMapComponents) peerIsNameserver(peerIPStr string, nsGroup *nbdns.NameServerGroup) bool { for _, ns := range nsGroup.NameServers { - if peerInfo.IP.String() == ns.IP.String() { + if peerIPStr == ns.IP.String() { return true } } @@ -489,14 +490,13 @@ func (c *NetworkMapComponents) getRoutingPeerRoutes(peerID string) (enabledRoute } seenRoute[r.ID] = struct{}{} - routeObj := c.copyRoute(r) - routeObj.Peer = peerInfo.Key + r.Peer = peerInfo.Key if r.Enabled { - enabledRoutes = append(enabledRoutes, routeObj) + enabledRoutes = append(enabledRoutes, r) return } - disabledRoutes = append(disabledRoutes, routeObj) + disabledRoutes = append(disabledRoutes, r) } for _, r := range c.Routes { @@ -510,7 +510,7 @@ func (c *NetworkMapComponents) getRoutingPeerRoutes(peerID string) (enabledRoute continue } - newPeerRoute := c.copyRoute(r) + newPeerRoute := r.Copy() newPeerRoute.Peer = id newPeerRoute.PeerGroups = nil newPeerRoute.ID = route.ID(string(r.ID) + ":" + id) @@ -519,50 +519,13 @@ func (c *NetworkMapComponents) getRoutingPeerRoutes(peerID string) (enabledRoute } } if r.Peer == peerID { - takeRoute(c.copyRoute(r)) + takeRoute(r.Copy()) } } return enabledRoutes, disabledRoutes } -func (c *NetworkMapComponents) copyRoute(r *route.Route) *route.Route { - var groups, accessControlGroups, peerGroups []string - var domains domain.List - - if r.Groups != nil { - groups = append([]string{}, r.Groups...) - } - if r.AccessControlGroups != nil { - accessControlGroups = append([]string{}, r.AccessControlGroups...) - } - if r.PeerGroups != nil { - peerGroups = append([]string{}, r.PeerGroups...) - } - if r.Domains != nil { - domains = append(domain.List{}, r.Domains...) - } - - return &route.Route{ - ID: r.ID, - AccountID: r.AccountID, - Network: r.Network, - NetworkType: r.NetworkType, - Description: r.Description, - Peer: r.Peer, - PeerID: r.PeerID, - Metric: r.Metric, - Masquerade: r.Masquerade, - NetID: r.NetID, - Enabled: r.Enabled, - Groups: groups, - AccessControlGroups: accessControlGroups, - PeerGroups: peerGroups, - Domains: domains, - KeepRoute: r.KeepRoute, - SkipAutoApply: r.SkipAutoApply, - } -} func (c *NetworkMapComponents) filterRoutesByGroups(routes []*route.Route, groupListMap LookupMap) []*route.Route { var filteredRoutes []*route.Route From d3d6a327e002245ff6cb12350c7c0e2253ee092c Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 11 Mar 2026 19:18:37 +0100 Subject: [PATCH 203/374] [proxy] read cert from disk if available instead of cert manager (#5574) * **New Features** * Asynchronous certificate prefetch that races live issuance with periodic on-disk cache checks to surface certificates faster. * Centralized recording and notification when certificates become available. * New on-disk certificate reading and validation to allow immediate use of cached certs. * **Bug Fixes & Performance** * Optimized retrieval by polling disk while fetching in background to reduce latency. * Added cancellation and timeout handling to fail stalled certificate operations reliably. --- proxy/internal/acme/manager.go | 117 ++++++++++++++++++++++++++++----- 1 file changed, 101 insertions(+), 16 deletions(-) diff --git a/proxy/internal/acme/manager.go b/proxy/internal/acme/manager.go index ebc15314b..b1e532e83 100644 --- a/proxy/internal/acme/manager.go +++ b/proxy/internal/acme/manager.go @@ -7,9 +7,12 @@ import ( "encoding/asn1" "encoding/base64" "encoding/binary" + "encoding/pem" "fmt" + "math/rand/v2" "net" "slices" + "strings" "sync" "time" @@ -137,7 +140,12 @@ func (mgr *Manager) AddDomain(d domain.Domain, accountID, serviceID string) { // It acquires a distributed lock to prevent multiple replicas from issuing // duplicate ACME requests. The second replica will block until the first // finishes, then find the certificate in the cache. +// ACME and periodic disk reads race; whichever produces a valid certificate +// first wins. This handles cases where locking is unreliable and another +// replica already wrote the cert to the shared cache. func (mgr *Manager) prefetchCertificate(d domain.Domain) { + time.Sleep(time.Duration(rand.IntN(200)) * time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() @@ -153,26 +161,105 @@ func (mgr *Manager) prefetchCertificate(d domain.Domain) { defer unlock() } - hello := &tls.ClientHelloInfo{ - ServerName: name, - Conn: &dummyConn{ctx: ctx}, - } - - start := time.Now() - cert, err := mgr.GetCertificate(hello) - elapsed := time.Since(start) - if err != nil { - mgr.logger.Warnf("prefetch certificate for domain %q in %s: %v", name, elapsed.String(), err) - mgr.setDomainState(d, domainFailed, err.Error()) + if cert, err := mgr.readCertFromDisk(ctx, name); err == nil { + mgr.logger.Infof("certificate for domain %q already on disk, skipping ACME", name) + mgr.recordAndNotify(ctx, d, name, cert, 0) return } - if mgr.metrics != nil { + // Run ACME in a goroutine so we can race it against periodic disk reads. + // autocert uses its own internal context and cannot be cancelled externally. + type acmeResult struct { + cert *tls.Certificate + err error + } + acmeCh := make(chan acmeResult, 1) + hello := &tls.ClientHelloInfo{ServerName: name, Conn: &dummyConn{ctx: ctx}} + go func() { + cert, err := mgr.GetCertificate(hello) + acmeCh <- acmeResult{cert, err} + }() + + start := time.Now() + diskTicker := time.NewTicker(5 * time.Second) + defer diskTicker.Stop() + + for { + select { + case res := <-acmeCh: + elapsed := time.Since(start) + if res.err != nil { + mgr.logger.Warnf("prefetch certificate for domain %q in %s: %v", name, elapsed.String(), res.err) + mgr.setDomainState(d, domainFailed, res.err.Error()) + return + } + mgr.recordAndNotify(ctx, d, name, res.cert, elapsed) + return + + case <-diskTicker.C: + cert, err := mgr.readCertFromDisk(context.Background(), name) + if err != nil { + continue + } + mgr.logger.Infof("certificate for domain %q appeared on disk after %s", name, time.Since(start).Round(time.Millisecond)) + // Drain the ACME goroutine before marking ready — autocert holds + // an internal write lock on certState while ACME is in flight. + go func() { + select { + case <-acmeCh: + default: + } + mgr.recordAndNotify(context.Background(), d, name, cert, 0) + }() + return + + case <-ctx.Done(): + mgr.logger.Warnf("prefetch certificate for domain %q timed out", name) + mgr.setDomainState(d, domainFailed, ctx.Err().Error()) + return + } + } +} + +// readCertFromDisk reads and parses a certificate directly from the autocert +// DirCache, bypassing autocert's internal certState mutex. Safe to call +// concurrently with an in-flight ACME request for the same domain. +func (mgr *Manager) readCertFromDisk(ctx context.Context, name string) (*tls.Certificate, error) { + if mgr.Cache == nil { + return nil, fmt.Errorf("no cache configured") + } + data, err := mgr.Cache.Get(ctx, name) + if err != nil { + return nil, err + } + privBlock, certsPEM := pem.Decode(data) + if privBlock == nil || !strings.Contains(privBlock.Type, "PRIVATE") { + return nil, fmt.Errorf("no private key in cache for %q", name) + } + cert, err := tls.X509KeyPair(certsPEM, pem.EncodeToMemory(privBlock)) + if err != nil { + return nil, fmt.Errorf("parse cached certificate for %q: %w", name, err) + } + if len(cert.Certificate) > 0 { + leaf, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return nil, fmt.Errorf("parse leaf for %q: %w", name, err) + } + if time.Now().After(leaf.NotAfter) { + return nil, fmt.Errorf("cached certificate for %q expired at %s", name, leaf.NotAfter) + } + cert.Leaf = leaf + } + return &cert, nil +} + +// recordAndNotify records metrics, marks the domain ready, logs cert details, +// and notifies the cert notifier. +func (mgr *Manager) recordAndNotify(ctx context.Context, d domain.Domain, name string, cert *tls.Certificate, elapsed time.Duration) { + if elapsed > 0 && mgr.metrics != nil { mgr.metrics.RecordCertificateIssuance(elapsed) } - mgr.setDomainState(d, domainReady, "") - now := time.Now() if cert != nil && cert.Leaf != nil { leaf := cert.Leaf @@ -188,11 +275,9 @@ func (mgr *Manager) prefetchCertificate(d domain.Domain) { } else { mgr.logger.Infof("certificate for domain %q ready in %s", name, elapsed.Round(time.Millisecond)) } - mgr.mu.RLock() info := mgr.domains[d] mgr.mu.RUnlock() - if info != nil && mgr.certNotifier != nil { if err := mgr.certNotifier.NotifyCertificateIssued(ctx, info.accountID, info.serviceID, name); err != nil { mgr.logger.Warnf("notify certificate ready for domain %q: %v", name, err) From 8f389fef19cf31fb74e219e03ca392b43cbe96d4 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Thu, 12 Mar 2026 15:57:36 +0100 Subject: [PATCH 204/374] [management] fix some concurrency potential issues (#5584) --- management/server/account_request_buffer.go | 17 ++++++++++---- .../server/http/middleware/bypass/bypass.go | 23 +++++++++++++------ management/server/job/channel.go | 8 ++++++- management/server/types/network.go | 2 ++ 4 files changed, 38 insertions(+), 12 deletions(-) diff --git a/management/server/account_request_buffer.go b/management/server/account_request_buffer.go index e1672c2d0..ac53a9fa8 100644 --- a/management/server/account_request_buffer.go +++ b/management/server/account_request_buffer.go @@ -63,11 +63,20 @@ func (ac *AccountRequestBuffer) GetAccountWithBackpressure(ctx context.Context, log.WithContext(ctx).Tracef("requesting account %s with backpressure", accountID) startTime := time.Now() - ac.getAccountRequestCh <- req - result := <-req.ResultChan - log.WithContext(ctx).Tracef("got account with backpressure after %s", time.Since(startTime)) - return result.Account, result.Err + select { + case <-ctx.Done(): + return nil, ctx.Err() + case ac.getAccountRequestCh <- req: + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case result := <-req.ResultChan: + log.WithContext(ctx).Tracef("got account with backpressure after %s", time.Since(startTime)) + return result.Account, result.Err + } } func (ac *AccountRequestBuffer) processGetAccountBatch(ctx context.Context, accountID string) { diff --git a/management/server/http/middleware/bypass/bypass.go b/management/server/http/middleware/bypass/bypass.go index 9447704cb..ddece7152 100644 --- a/management/server/http/middleware/bypass/bypass.go +++ b/management/server/http/middleware/bypass/bypass.go @@ -51,19 +51,28 @@ func GetList() []string { // This can be used to bypass authz/authn middlewares for certain paths, such as webhooks that implement their own authentication. func ShouldBypass(requestPath string, h http.Handler, w http.ResponseWriter, r *http.Request) bool { byPassMutex.RLock() - defer byPassMutex.RUnlock() - + var matched bool for bypassPath := range bypassPaths { - matched, err := path.Match(bypassPath, requestPath) + m, err := path.Match(bypassPath, requestPath) if err != nil { - log.WithContext(r.Context()).Errorf("Error matching path %s with %s from %s: %v", bypassPath, requestPath, GetList(), err) + list := make([]string, 0, len(bypassPaths)) + for k := range bypassPaths { + list = append(list, k) + } + log.WithContext(r.Context()).Errorf("Error matching path %s with %s from %v: %v", bypassPath, requestPath, list, err) continue } - if matched { - h.ServeHTTP(w, r) - return true + if m { + matched = true + break } } + byPassMutex.RUnlock() + + if matched { + h.ServeHTTP(w, r) + return true + } return false } diff --git a/management/server/job/channel.go b/management/server/job/channel.go index c4dc98a68..c4454c4c9 100644 --- a/management/server/job/channel.go +++ b/management/server/job/channel.go @@ -28,7 +28,13 @@ func NewChannel() *Channel { return jc } -func (jc *Channel) AddEvent(ctx context.Context, responseWait time.Duration, event *Event) error { +func (jc *Channel) AddEvent(ctx context.Context, responseWait time.Duration, event *Event) (err error) { + defer func() { + if r := recover(); r != nil { + err = ErrJobChannelClosed + } + }() + select { case <-ctx.Done(): return ctx.Err() diff --git a/management/server/types/network.go b/management/server/types/network.go index d3708d80a..0d13de10f 100644 --- a/management/server/types/network.go +++ b/management/server/types/network.go @@ -152,6 +152,8 @@ func (n *Network) CurrentSerial() uint64 { } func (n *Network) Copy() *Network { + n.Mu.Lock() + defer n.Mu.Unlock() return &Network{ Identifier: n.Identifier, Net: n.Net, From c545689448b0630502242a5935aeff2f73d52a87 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 12 Mar 2026 16:00:28 +0100 Subject: [PATCH 205/374] [proxy] Wildcard certificate support (#5583) --- proxy/cmd/proxy/cmd/root.go | 3 + proxy/internal/acme/manager.go | 227 +++++++++++++++++++++++++--- proxy/internal/acme/manager_test.go | 206 ++++++++++++++++++++++++- proxy/internal/certwatch/watcher.go | 7 + proxy/server.go | 34 ++++- 5 files changed, 455 insertions(+), 22 deletions(-) diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index 50aa38b29..61ed5871e 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -53,6 +53,7 @@ var ( certFile string certKeyFile string certLockMethod string + wildcardCertDir string wgPort int proxyProtocol bool preSharedKey string @@ -87,6 +88,7 @@ func init() { rootCmd.Flags().StringVar(&certFile, "cert-file", envStringOrDefault("NB_PROXY_CERTIFICATE_FILE", "tls.crt"), "TLS certificate filename within the certificate directory") rootCmd.Flags().StringVar(&certKeyFile, "cert-key-file", envStringOrDefault("NB_PROXY_CERTIFICATE_KEY_FILE", "tls.key"), "TLS certificate key filename within the certificate directory") rootCmd.Flags().StringVar(&certLockMethod, "cert-lock-method", envStringOrDefault("NB_PROXY_CERT_LOCK_METHOD", "auto"), "Certificate lock method for cross-replica coordination: auto, flock, or k8s-lease") + rootCmd.Flags().StringVar(&wildcardCertDir, "wildcard-cert-dir", envStringOrDefault("NB_PROXY_WILDCARD_CERT_DIR", ""), "Directory containing wildcard certificate pairs (.crt/.key). Wildcard patterns are extracted from SANs automatically") rootCmd.Flags().IntVar(&wgPort, "wg-port", envIntOrDefault("NB_PROXY_WG_PORT", 0), "WireGuard listen port (0 = random). Fixed port only works with single-account deployments") rootCmd.Flags().BoolVar(&proxyProtocol, "proxy-protocol", envBoolOrDefault("NB_PROXY_PROXY_PROTOCOL", false), "Enable PROXY protocol on TCP listeners to preserve client IPs behind L4 proxies") rootCmd.Flags().StringVar(&preSharedKey, "preshared-key", envStringOrDefault("NB_PROXY_PRESHARED_KEY", ""), "Define a pre-shared key for the tunnel between proxy and peers") @@ -162,6 +164,7 @@ func runServer(cmd *cobra.Command, args []string) error { ForwardedProto: forwardedProto, TrustedProxies: parsedTrustedProxies, CertLockMethod: nbacme.CertLockMethod(certLockMethod), + WildcardCertDir: wildcardCertDir, WireguardPort: wgPort, ProxyProtocol: proxyProtocol, PreSharedKey: preSharedKey, diff --git a/proxy/internal/acme/manager.go b/proxy/internal/acme/manager.go index b1e532e83..395da7d88 100644 --- a/proxy/internal/acme/manager.go +++ b/proxy/internal/acme/manager.go @@ -11,6 +11,8 @@ import ( "fmt" "math/rand/v2" "net" + "os" + "path/filepath" "slices" "strings" "sync" @@ -20,6 +22,7 @@ import ( "golang.org/x/crypto/acme" "golang.org/x/crypto/acme/autocert" + "github.com/netbirdio/netbird/proxy/internal/certwatch" "github.com/netbirdio/netbird/shared/management/domain" ) @@ -49,6 +52,34 @@ type metricsRecorder interface { RecordCertificateIssuance(duration time.Duration) } +// wildcardEntry maps a domain suffix (e.g. ".example.com") to a certwatch +// watcher that hot-reloads the corresponding wildcard certificate from disk. +type wildcardEntry struct { + suffix string // e.g. ".example.com" + pattern string // e.g. "*.example.com" + watcher *certwatch.Watcher +} + +// ManagerConfig holds the configuration values for the ACME certificate manager. +type ManagerConfig struct { + // CertDir is the directory used for caching ACME certificates. + CertDir string + // ACMEURL is the ACME directory URL (e.g. Let's Encrypt). + ACMEURL string + // EABKID and EABHMACKey are optional External Account Binding credentials + // required by some CAs (e.g. ZeroSSL). EABHMACKey is the base64 + // URL-encoded string provided by the CA. + EABKID string + EABHMACKey string + // LockMethod controls the cross-replica coordination strategy. + LockMethod CertLockMethod + // WildcardDir is an optional path to a directory containing wildcard + // certificate pairs (.crt / .key). Wildcard patterns are + // extracted from the certificates' SAN lists. Domains matching a + // wildcard are served from disk; all others go through ACME. + WildcardDir string +} + // Manager wraps autocert.Manager with domain tracking and cross-replica // coordination via a pluggable locking strategy. The locker prevents // duplicate ACME requests when multiple replicas share a certificate cache. @@ -60,54 +91,182 @@ type Manager struct { mu sync.RWMutex domains map[domain.Domain]*domainInfo + // wildcards holds all loaded wildcard certificates, keyed by suffix. + wildcards []wildcardEntry + certNotifier certificateNotifier logger *log.Logger metrics metricsRecorder } -// NewManager creates a new ACME certificate manager. The certDir is used -// for caching certificates. The lockMethod controls cross-replica -// coordination strategy (see CertLockMethod constants). -// eabKID and eabHMACKey are optional External Account Binding credentials -// required for some CAs like ZeroSSL. The eabHMACKey should be the base64 -// URL-encoded string provided by the CA. -func NewManager(certDir, acmeURL, eabKID, eabHMACKey string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod, metrics metricsRecorder) *Manager { +// NewManager creates a new ACME certificate manager. +func NewManager(cfg ManagerConfig, notifier certificateNotifier, logger *log.Logger, metrics metricsRecorder) (*Manager, error) { if logger == nil { logger = log.StandardLogger() } mgr := &Manager{ - certDir: certDir, - locker: newCertLocker(lockMethod, certDir, logger), + certDir: cfg.CertDir, + locker: newCertLocker(cfg.LockMethod, cfg.CertDir, logger), domains: make(map[domain.Domain]*domainInfo), certNotifier: notifier, logger: logger, metrics: metrics, } + if cfg.WildcardDir != "" { + entries, err := loadWildcardDir(cfg.WildcardDir, logger) + if err != nil { + return nil, fmt.Errorf("load wildcard certificates from %q: %w", cfg.WildcardDir, err) + } + mgr.wildcards = entries + } + var eab *acme.ExternalAccountBinding - if eabKID != "" && eabHMACKey != "" { - decodedKey, err := base64.RawURLEncoding.DecodeString(eabHMACKey) + if cfg.EABKID != "" && cfg.EABHMACKey != "" { + decodedKey, err := base64.RawURLEncoding.DecodeString(cfg.EABHMACKey) if err != nil { logger.Errorf("failed to decode EAB HMAC key: %v", err) } else { eab = &acme.ExternalAccountBinding{ - KID: eabKID, + KID: cfg.EABKID, Key: decodedKey, } - logger.Infof("configured External Account Binding with KID: %s", eabKID) + logger.Infof("configured External Account Binding with KID: %s", cfg.EABKID) } } mgr.Manager = &autocert.Manager{ Prompt: autocert.AcceptTOS, HostPolicy: mgr.hostPolicy, - Cache: autocert.DirCache(certDir), + Cache: autocert.DirCache(cfg.CertDir), ExternalAccountBinding: eab, Client: &acme.Client{ - DirectoryURL: acmeURL, + DirectoryURL: cfg.ACMEURL, }, } - return mgr + return mgr, nil +} + +// WatchWildcards starts watching all wildcard certificate files for changes. +// It blocks until ctx is cancelled. It is a no-op if no wildcards are loaded. +func (mgr *Manager) WatchWildcards(ctx context.Context) { + if len(mgr.wildcards) == 0 { + return + } + seen := make(map[*certwatch.Watcher]struct{}) + var wg sync.WaitGroup + for i := range mgr.wildcards { + w := mgr.wildcards[i].watcher + if _, ok := seen[w]; ok { + continue + } + seen[w] = struct{}{} + wg.Add(1) + go func() { + defer wg.Done() + w.Watch(ctx) + }() + } + wg.Wait() +} + +// loadWildcardDir scans dir for .crt files, pairs each with a matching .key +// file, loads them, and extracts wildcard SANs (*.example.com) to build +// the suffix lookup entries. +func loadWildcardDir(dir string, logger *log.Logger) ([]wildcardEntry, error) { + crtFiles, err := filepath.Glob(filepath.Join(dir, "*.crt")) + if err != nil { + return nil, fmt.Errorf("glob certificate files: %w", err) + } + + if len(crtFiles) == 0 { + return nil, fmt.Errorf("no .crt files found in %s", dir) + } + + var entries []wildcardEntry + + for _, crtPath := range crtFiles { + base := strings.TrimSuffix(filepath.Base(crtPath), ".crt") + keyPath := filepath.Join(dir, base+".key") + if _, err := os.Stat(keyPath); err != nil { + logger.Warnf("skipping %s: no matching key file %s", crtPath, keyPath) + continue + } + + watcher, err := certwatch.NewWatcher(crtPath, keyPath, logger) + if err != nil { + logger.Warnf("skipping %s: %v", crtPath, err) + continue + } + + leaf := watcher.Leaf() + if leaf == nil { + logger.Warnf("skipping %s: no parsed leaf certificate", crtPath) + continue + } + + for _, san := range leaf.DNSNames { + suffix, ok := parseWildcard(san) + if !ok { + continue + } + entries = append(entries, wildcardEntry{ + suffix: suffix, + pattern: san, + watcher: watcher, + }) + logger.Infof("wildcard certificate loaded: %s (from %s)", san, filepath.Base(crtPath)) + } + } + + if len(entries) == 0 { + return nil, fmt.Errorf("no wildcard SANs (*.example.com) found in certificates in %s", dir) + } + + return entries, nil +} + +// parseWildcard validates a wildcard domain pattern like "*.example.com" +// and returns the suffix ".example.com" for matching. +func parseWildcard(pattern string) (suffix string, ok bool) { + if !strings.HasPrefix(pattern, "*.") { + return "", false + } + parent := pattern[1:] // ".example.com" + if strings.Count(parent, ".") < 1 { + return "", false + } + return strings.ToLower(parent), true +} + +// findWildcardEntry returns the wildcard entry that covers host, or nil. +func (mgr *Manager) findWildcardEntry(host string) *wildcardEntry { + if len(mgr.wildcards) == 0 { + return nil + } + host = strings.ToLower(host) + for i := range mgr.wildcards { + e := &mgr.wildcards[i] + if !strings.HasSuffix(host, e.suffix) { + continue + } + // Single-level match: prefix before suffix must have no dots. + prefix := strings.TrimSuffix(host, e.suffix) + if len(prefix) > 0 && !strings.Contains(prefix, ".") { + return e + } + } + return nil +} + +// WildcardPatterns returns the wildcard patterns that are currently loaded. +func (mgr *Manager) WildcardPatterns() []string { + patterns := make([]string, len(mgr.wildcards)) + for i, e := range mgr.wildcards { + patterns[i] = e.pattern + } + slices.Sort(patterns) + return patterns } func (mgr *Manager) hostPolicy(_ context.Context, host string) error { @@ -123,8 +282,39 @@ func (mgr *Manager) hostPolicy(_ context.Context, host string) error { return nil } -// AddDomain registers a domain for ACME certificate prefetching. -func (mgr *Manager) AddDomain(d domain.Domain, accountID, serviceID string) { +// GetCertificate returns the TLS certificate for the given ClientHello. +// If the requested domain matches a loaded wildcard, the static wildcard +// certificate is returned. Otherwise, the ACME autocert manager handles +// the request. +func (mgr *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if e := mgr.findWildcardEntry(hello.ServerName); e != nil { + return e.watcher.GetCertificate(hello) + } + return mgr.Manager.GetCertificate(hello) +} + +// AddDomain registers a domain for certificate management. Domains that +// match a loaded wildcard are marked ready immediately (they use the +// static wildcard certificate) and the method returns true. All other +// domains go through ACME prefetch and the method returns false. +// +// When AddDomain returns true the caller is responsible for sending any +// certificate-ready notifications after the surrounding operation (e.g. +// mapping update) has committed successfully. +func (mgr *Manager) AddDomain(d domain.Domain, accountID, serviceID string) (wildcardHit bool) { + name := d.PunycodeString() + if e := mgr.findWildcardEntry(name); e != nil { + mgr.mu.Lock() + mgr.domains[d] = &domainInfo{ + accountID: accountID, + serviceID: serviceID, + state: domainReady, + } + mgr.mu.Unlock() + mgr.logger.Debugf("domain %q matches wildcard %q, using static certificate", name, e.pattern) + return true + } + mgr.mu.Lock() mgr.domains[d] = &domainInfo{ accountID: accountID, @@ -134,6 +324,7 @@ func (mgr *Manager) AddDomain(d domain.Domain, accountID, serviceID string) { mgr.mu.Unlock() go mgr.prefetchCertificate(d) + return false } // prefetchCertificate proactively triggers certificate generation for a domain. diff --git a/proxy/internal/acme/manager_test.go b/proxy/internal/acme/manager_test.go index 30a27c612..9a3ed9efd 100644 --- a/proxy/internal/acme/manager_test.go +++ b/proxy/internal/acme/manager_test.go @@ -2,6 +2,16 @@ package acme import ( "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "os" + "path/filepath" "testing" "time" @@ -10,7 +20,8 @@ import ( ) func TestHostPolicy(t *testing.T) { - mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "", nil) + mgr, err := NewManager(ManagerConfig{CertDir: t.TempDir(), ACMEURL: "https://acme.example.com/directory"}, nil, nil, nil) + require.NoError(t, err) mgr.AddDomain("example.com", "acc1", "rp1") // Wait for the background prefetch goroutine to finish so the temp dir @@ -70,7 +81,8 @@ func TestHostPolicy(t *testing.T) { } func TestDomainStates(t *testing.T) { - mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "", nil) + mgr, err := NewManager(ManagerConfig{CertDir: t.TempDir(), ACMEURL: "https://acme.example.com/directory"}, nil, nil, nil) + require.NoError(t, err) assert.Equal(t, 0, mgr.PendingCerts(), "initially zero") assert.Equal(t, 0, mgr.TotalDomains(), "initially zero domains") @@ -100,3 +112,193 @@ func TestDomainStates(t *testing.T) { assert.Contains(t, failed, "b.example.com") assert.Empty(t, mgr.ReadyDomains()) } + +func TestParseWildcard(t *testing.T) { + tests := []struct { + pattern string + wantSuffix string + wantOK bool + }{ + {"*.example.com", ".example.com", true}, + {"*.foo.example.com", ".foo.example.com", true}, + {"*.COM", ".com", true}, // single-label TLD + {"example.com", "", false}, // no wildcard prefix + {"*example.com", "", false}, // missing dot + {"**.example.com", "", false}, // double star + {"", "", false}, + } + + for _, tc := range tests { + t.Run(tc.pattern, func(t *testing.T) { + suffix, ok := parseWildcard(tc.pattern) + assert.Equal(t, tc.wantOK, ok) + if ok { + assert.Equal(t, tc.wantSuffix, suffix) + } + }) + } +} + +func TestMatchesWildcard(t *testing.T) { + wcDir := t.TempDir() + generateSelfSignedCert(t, wcDir, "example", "*.example.com") + + acmeDir := t.TempDir() + mgr, err := NewManager(ManagerConfig{CertDir: acmeDir, ACMEURL: "https://acme.example.com/directory", WildcardDir: wcDir}, nil, nil, nil) + require.NoError(t, err) + + tests := []struct { + host string + match bool + }{ + {"foo.example.com", true}, + {"bar.example.com", true}, + {"FOO.Example.COM", true}, // case insensitive + {"example.com", false}, // bare parent + {"sub.foo.example.com", false}, // multi-level + {"notexample.com", false}, + {"", false}, + } + + for _, tc := range tests { + t.Run(tc.host, func(t *testing.T) { + assert.Equal(t, tc.match, mgr.findWildcardEntry(tc.host) != nil) + }) + } +} + +// generateSelfSignedCert creates a temporary self-signed certificate and key +// for testing purposes. The baseName controls the output filenames: +// .crt and .key. +func generateSelfSignedCert(t *testing.T, dir, baseName string, dnsNames ...string) { + t.Helper() + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: dnsNames[0]}, + DNSNames: dnsNames, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(24 * time.Hour), + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) + require.NoError(t, err) + + certFile, err := os.Create(filepath.Join(dir, baseName+".crt")) + require.NoError(t, err) + require.NoError(t, pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: certDER})) + require.NoError(t, certFile.Close()) + + keyDER, err := x509.MarshalECPrivateKey(key) + require.NoError(t, err) + keyFile, err := os.Create(filepath.Join(dir, baseName+".key")) + require.NoError(t, err) + require.NoError(t, pem.Encode(keyFile, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyDER})) + require.NoError(t, keyFile.Close()) +} + +func TestWildcardAddDomainSkipsACME(t *testing.T) { + wcDir := t.TempDir() + generateSelfSignedCert(t, wcDir, "example", "*.example.com") + + acmeDir := t.TempDir() + mgr, err := NewManager(ManagerConfig{CertDir: acmeDir, ACMEURL: "https://acme.example.com/directory", WildcardDir: wcDir}, nil, nil, nil) + require.NoError(t, err) + + // Add a wildcard-matching domain — should be immediately ready. + mgr.AddDomain("foo.example.com", "acc1", "svc1") + assert.Equal(t, 0, mgr.PendingCerts(), "wildcard domain should not be pending") + assert.Equal(t, []string{"foo.example.com"}, mgr.ReadyDomains()) + + // Add a non-wildcard domain — should go through ACME (pending then failed). + mgr.AddDomain("other.net", "acc2", "svc2") + assert.Equal(t, 2, mgr.TotalDomains()) + + // Wait for the ACME prefetch to fail. + assert.Eventually(t, func() bool { + return mgr.PendingCerts() == 0 + }, 30*time.Second, 100*time.Millisecond) + + assert.Equal(t, []string{"foo.example.com"}, mgr.ReadyDomains()) + assert.Contains(t, mgr.FailedDomains(), "other.net") +} + +func TestWildcardGetCertificate(t *testing.T) { + wcDir := t.TempDir() + generateSelfSignedCert(t, wcDir, "example", "*.example.com") + + acmeDir := t.TempDir() + mgr, err := NewManager(ManagerConfig{CertDir: acmeDir, ACMEURL: "https://acme.example.com/directory", WildcardDir: wcDir}, nil, nil, nil) + require.NoError(t, err) + + mgr.AddDomain("foo.example.com", "acc1", "svc1") + + // GetCertificate for a wildcard-matching domain should return the static cert. + cert, err := mgr.GetCertificate(&tls.ClientHelloInfo{ServerName: "foo.example.com"}) + require.NoError(t, err) + require.NotNil(t, cert) + assert.Contains(t, cert.Leaf.DNSNames, "*.example.com") +} + +func TestMultipleWildcards(t *testing.T) { + wcDir := t.TempDir() + generateSelfSignedCert(t, wcDir, "example", "*.example.com") + generateSelfSignedCert(t, wcDir, "other", "*.other.org") + + acmeDir := t.TempDir() + mgr, err := NewManager(ManagerConfig{CertDir: acmeDir, ACMEURL: "https://acme.example.com/directory", WildcardDir: wcDir}, nil, nil, nil) + require.NoError(t, err) + + assert.ElementsMatch(t, []string{"*.example.com", "*.other.org"}, mgr.WildcardPatterns()) + + // Both wildcards should resolve. + mgr.AddDomain("foo.example.com", "acc1", "svc1") + mgr.AddDomain("bar.other.org", "acc2", "svc2") + + assert.Equal(t, 0, mgr.PendingCerts()) + assert.ElementsMatch(t, []string{"foo.example.com", "bar.other.org"}, mgr.ReadyDomains()) + + // GetCertificate routes to the correct cert. + cert1, err := mgr.GetCertificate(&tls.ClientHelloInfo{ServerName: "foo.example.com"}) + require.NoError(t, err) + assert.Contains(t, cert1.Leaf.DNSNames, "*.example.com") + + cert2, err := mgr.GetCertificate(&tls.ClientHelloInfo{ServerName: "bar.other.org"}) + require.NoError(t, err) + assert.Contains(t, cert2.Leaf.DNSNames, "*.other.org") + + // Non-matching domain falls through to ACME. + mgr.AddDomain("custom.net", "acc3", "svc3") + assert.Eventually(t, func() bool { + return mgr.PendingCerts() == 0 + }, 30*time.Second, 100*time.Millisecond) + assert.Contains(t, mgr.FailedDomains(), "custom.net") +} + +func TestWildcardDirEmpty(t *testing.T) { + wcDir := t.TempDir() + // Empty directory — no .crt files. + _, err := NewManager(ManagerConfig{CertDir: t.TempDir(), ACMEURL: "https://acme.example.com/directory", WildcardDir: wcDir}, nil, nil, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "no .crt files found") +} + +func TestWildcardDirNonWildcardCert(t *testing.T) { + wcDir := t.TempDir() + // Certificate without a wildcard SAN. + generateSelfSignedCert(t, wcDir, "plain", "plain.example.com") + + _, err := NewManager(ManagerConfig{CertDir: t.TempDir(), ACMEURL: "https://acme.example.com/directory", WildcardDir: wcDir}, nil, nil, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "no wildcard SANs") +} + +func TestNoWildcardDir(t *testing.T) { + // Empty string means no wildcard dir — pure ACME mode. + mgr, err := NewManager(ManagerConfig{CertDir: t.TempDir(), ACMEURL: "https://acme.example.com/directory"}, nil, nil, nil) + require.NoError(t, err) + assert.Empty(t, mgr.WildcardPatterns()) +} diff --git a/proxy/internal/certwatch/watcher.go b/proxy/internal/certwatch/watcher.go index 78ad1ab7c..6366a53c6 100644 --- a/proxy/internal/certwatch/watcher.go +++ b/proxy/internal/certwatch/watcher.go @@ -67,6 +67,13 @@ func (w *Watcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, erro return w.cert, nil } +// Leaf returns the parsed leaf certificate, or nil if not yet loaded. +func (w *Watcher) Leaf() *x509.Certificate { + w.mu.RLock() + defer w.mu.RUnlock() + return w.leaf +} + // Watch starts watching for certificate file changes. It blocks until // ctx is cancelled. It uses fsnotify for immediate detection and falls // back to polling if fsnotify is unavailable (e.g. on NFS). diff --git a/proxy/server.go b/proxy/server.go index 123b14648..62e8368e6 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -97,6 +97,11 @@ type Server struct { // CertLockMethod controls how ACME certificate locks are coordinated // across replicas. Default: CertLockAuto (detect environment). CertLockMethod acme.CertLockMethod + // WildcardCertDir is an optional directory containing wildcard certificate + // pairs (.crt / .key). Wildcard patterns are extracted from + // the certificates' SAN lists. Matching domains use these static certs + // instead of ACME. + WildcardCertDir string // DebugEndpointEnabled enables the debug HTTP endpoint. DebugEndpointEnabled bool @@ -437,7 +442,20 @@ func (s *Server) configureTLS(ctx context.Context) (*tls.Config, error) { "acme_server": s.ACMEDirectory, "challenge_type": s.ACMEChallengeType, }).Debug("ACME certificates enabled, configuring certificate manager") - s.acme = acme.NewManager(s.CertificateDirectory, s.ACMEDirectory, s.ACMEEABKID, s.ACMEEABHMACKey, s, s.Logger, s.CertLockMethod, s.meter) + var err error + s.acme, err = acme.NewManager(acme.ManagerConfig{ + CertDir: s.CertificateDirectory, + ACMEURL: s.ACMEDirectory, + EABKID: s.ACMEEABKID, + EABHMACKey: s.ACMEEABHMACKey, + LockMethod: s.CertLockMethod, + WildcardDir: s.WildcardCertDir, + }, s, s.Logger, s.meter) + if err != nil { + return nil, fmt.Errorf("create ACME manager: %w", err) + } + + go s.acme.WatchWildcards(ctx) if s.ACMEChallengeType == "http-01" { s.http = &http.Server{ @@ -453,6 +471,10 @@ func (s *Server) configureTLS(ctx context.Context) (*tls.Config, error) { } tlsConfig = s.acme.TLSConfig() + // autocert.Manager.TLSConfig() wires its own GetCertificate, which + // bypasses our override that checks wildcards first. + tlsConfig.GetCertificate = s.acme.GetCertificate + // ServerName needs to be set to allow for ACME to work correctly // when using CNAME URLs to access the proxy. tlsConfig.ServerName = s.ProxyURL @@ -675,8 +697,9 @@ func (s *Server) addMapping(ctx context.Context, mapping *proto.ProxyMapping) er if err := s.netbird.AddPeer(ctx, accountID, d, authToken, serviceID); err != nil { return fmt.Errorf("create peer for domain %q: %w", d, err) } + var wildcardHit bool if s.acme != nil { - s.acme.AddDomain(d, string(accountID), serviceID) + wildcardHit = s.acme.AddDomain(d, string(accountID), serviceID) } // Pass the mapping through to the update function to avoid duplicating the @@ -686,6 +709,13 @@ func (s *Server) addMapping(ctx context.Context, mapping *proto.ProxyMapping) er s.removeMapping(ctx, mapping) return fmt.Errorf("update mapping for domain %q: %w", d, err) } + + if wildcardHit { + if err := s.NotifyCertificateIssued(ctx, string(accountID), serviceID, string(d)); err != nil { + s.Logger.Warnf("notify certificate ready for domain %q: %v", d, err) + } + } + return nil } From e50e124e70b92510fb69c4928fc81ab9bc9470c3 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 12 Mar 2026 17:12:26 +0100 Subject: [PATCH 206/374] [proxy] Fix domain switching update (#5585) --- .../internals/modules/reverseproxy/service/manager/manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index 56a1fc98a..cae3d3bda 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -392,7 +392,7 @@ func (m *Manager) sendServiceUpdateNotifications(ctx context.Context, accountID oidcCfg := m.proxyController.GetOIDCValidationConfig() switch { - case updateInfo.domainChanged && updateInfo.oldCluster != s.ProxyCluster: + case updateInfo.domainChanged || updateInfo.oldCluster != s.ProxyCluster: m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", oidcCfg), updateInfo.oldCluster) m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", oidcCfg), s.ProxyCluster) case !s.Enabled && updateInfo.serviceEnabledChanged: From 967c6f3cd34e36a2f4e0199ea5691a6a6bf8e24d Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Fri, 13 Mar 2026 09:47:00 +0100 Subject: [PATCH 207/374] [misc] Add GPG signing key support for rpm packages (#5581) * [misc] Add GPG signing key support for deb and rpm packages * [misc] Improve GPG key management for deb and rpm signing * [misc] Extract GPG key import logic into a reusable script * [misc] Add key fingerprint extraction and targeted export for GPG keys * [misc] Remove passphrase from GPG keys before exporting * [misc] Simplify GPG key management by removing import script * [misc] Bump GoReleaser version to v2.14.3 in release workflow * [misc] Replace GPG passphrase variables with NFPM-prefixed alternatives in workflows and configs * [misc] Update naming conventions for package IDs and passphrase variables in workflows and configs * [misc] Standardize NFPM variable naming in release workflow * [misc] Adjust NFPM variable names for consistency in release workflow * [misc] Remove Debian signing GPG key usage in workflows and configs --- .github/workflows/release.yml | 52 ++++++++++++++++++++++++++++++++++- .goreleaser.yaml | 13 +++++---- .goreleaser_ui.yaml | 11 +++++--- 3 files changed, 65 insertions(+), 11 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d1f085b47..7ac5103d9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,7 @@ on: env: SIGN_PIPE_VER: "v0.1.1" - GORELEASER_VER: "v2.3.2" + GORELEASER_VER: "v2.14.3" PRODUCT_NAME: "NetBird" COPYRIGHT: "NetBird GmbH" @@ -169,6 +169,13 @@ jobs: - name: Install OS build dependencies run: sudo apt update && sudo apt install -y -q gcc-arm-linux-gnueabihf gcc-aarch64-linux-gnu + - name: Decode GPG signing key + env: + GPG_RPM_PRIVATE_KEY: ${{ secrets.GPG_RPM_PRIVATE_KEY }} + run: | + echo "$GPG_RPM_PRIVATE_KEY" | base64 -d > /tmp/gpg-rpm-signing-key.asc + echo "GPG_RPM_KEY_FILE=/tmp/gpg-rpm-signing-key.asc" >> $GITHUB_ENV + - name: Install goversioninfo run: go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@233067e - name: Generate windows syso amd64 @@ -186,6 +193,24 @@ jobs: HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }} UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }} UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }} + GPG_RPM_KEY_FILE: ${{ env.GPG_RPM_KEY_FILE }} + NFPM_NETBIRD_RPM_PASSPHRASE: ${{ secrets.GPG_RPM_PASSPHRASE }} + - name: Verify RPM signatures + run: | + docker run --rm -v $(pwd)/dist:/dist fedora:41 bash -c ' + dnf install -y -q rpm-sign curl >/dev/null 2>&1 + curl -sSL https://pkgs.netbird.io/yum/repodata/repomd.xml.key -o /tmp/rpm-pub.key + rpm --import /tmp/rpm-pub.key + echo "=== Verifying RPM signatures ===" + for rpm_file in /dist/*amd64*.rpm; do + [ -f "$rpm_file" ] || continue + echo "--- $(basename $rpm_file) ---" + rpm -K "$rpm_file" + done + ' + - name: Clean up GPG key + if: always() + run: rm -f /tmp/gpg-rpm-signing-key.asc - name: Tag and push PR images (amd64 only) if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository run: | @@ -265,6 +290,13 @@ jobs: - name: Install dependencies run: sudo apt update && sudo apt install -y -q libappindicator3-dev gir1.2-appindicator3-0.1 libxxf86vm-dev gcc-mingw-w64-x86-64 + - name: Decode GPG signing key + env: + GPG_RPM_PRIVATE_KEY: ${{ secrets.GPG_RPM_PRIVATE_KEY }} + run: | + echo "$GPG_RPM_PRIVATE_KEY" | base64 -d > /tmp/gpg-rpm-signing-key.asc + echo "GPG_RPM_KEY_FILE=/tmp/gpg-rpm-signing-key.asc" >> $GITHUB_ENV + - name: Install LLVM-MinGW for ARM64 cross-compilation run: | cd /tmp @@ -289,6 +321,24 @@ jobs: HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }} UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }} UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }} + GPG_RPM_KEY_FILE: ${{ env.GPG_RPM_KEY_FILE }} + NFPM_NETBIRD_UI_RPM_PASSPHRASE: ${{ secrets.GPG_RPM_PASSPHRASE }} + - name: Verify RPM signatures + run: | + docker run --rm -v $(pwd)/dist:/dist fedora:41 bash -c ' + dnf install -y -q rpm-sign curl >/dev/null 2>&1 + curl -sSL https://pkgs.netbird.io/yum/repodata/repomd.xml.key -o /tmp/rpm-pub.key + rpm --import /tmp/rpm-pub.key + echo "=== Verifying RPM signatures ===" + for rpm_file in /dist/*.rpm; do + [ -f "$rpm_file" ] || continue + echo "--- $(basename $rpm_file) ---" + rpm -K "$rpm_file" + done + ' + - name: Clean up GPG key + if: always() + run: rm -f /tmp/gpg-rpm-signing-key.asc - name: upload non tags for debug purposes uses: actions/upload-artifact@v4 with: diff --git a/.goreleaser.yaml b/.goreleaser.yaml index c0a5efbbe..0f81229cd 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -171,13 +171,12 @@ nfpms: - maintainer: Netbird description: Netbird client. homepage: https://netbird.io/ - id: netbird-deb + id: netbird_deb bindir: /usr/bin builds: - netbird formats: - deb - scripts: postinstall: "release_files/post_install.sh" preremove: "release_files/pre_remove.sh" @@ -185,16 +184,18 @@ nfpms: - maintainer: Netbird description: Netbird client. homepage: https://netbird.io/ - id: netbird-rpm + id: netbird_rpm bindir: /usr/bin builds: - netbird formats: - rpm - scripts: postinstall: "release_files/post_install.sh" preremove: "release_files/pre_remove.sh" + rpm: + signature: + key_file: '{{ if index .Env "GPG_RPM_KEY_FILE" }}{{ .Env.GPG_RPM_KEY_FILE }}{{ end }}' dockers: - image_templates: - netbirdio/netbird:{{ .Version }}-amd64 @@ -876,7 +877,7 @@ brews: uploads: - name: debian ids: - - netbird-deb + - netbird_deb mode: archive target: https://pkgs.wiretrustee.com/debian/pool/{{ .ArtifactName }};deb.distribution=stable;deb.component=main;deb.architecture={{ if .Arm }}armhf{{ else }}{{ .Arch }}{{ end }};deb.package= username: dev@wiretrustee.com @@ -884,7 +885,7 @@ uploads: - name: yum ids: - - netbird-rpm + - netbird_rpm mode: archive target: https://pkgs.wiretrustee.com/yum/{{ .Arch }}{{ if .Arm }}{{ .Arm }}{{ end }} username: dev@wiretrustee.com diff --git a/.goreleaser_ui.yaml b/.goreleaser_ui.yaml index a243702ea..470f1deaa 100644 --- a/.goreleaser_ui.yaml +++ b/.goreleaser_ui.yaml @@ -61,7 +61,7 @@ nfpms: - maintainer: Netbird description: Netbird client UI. homepage: https://netbird.io/ - id: netbird-ui-deb + id: netbird_ui_deb package_name: netbird-ui builds: - netbird-ui @@ -80,7 +80,7 @@ nfpms: - maintainer: Netbird description: Netbird client UI. homepage: https://netbird.io/ - id: netbird-ui-rpm + id: netbird_ui_rpm package_name: netbird-ui builds: - netbird-ui @@ -95,11 +95,14 @@ nfpms: dst: /usr/share/pixmaps/netbird.png dependencies: - netbird + rpm: + signature: + key_file: '{{ if index .Env "GPG_RPM_KEY_FILE" }}{{ .Env.GPG_RPM_KEY_FILE }}{{ end }}' uploads: - name: debian ids: - - netbird-ui-deb + - netbird_ui_deb mode: archive target: https://pkgs.wiretrustee.com/debian/pool/{{ .ArtifactName }};deb.distribution=stable;deb.component=main;deb.architecture={{ if .Arm }}armhf{{ else }}{{ .Arch }}{{ end }};deb.package= username: dev@wiretrustee.com @@ -107,7 +110,7 @@ uploads: - name: yum ids: - - netbird-ui-rpm + - netbird_ui_rpm mode: archive target: https://pkgs.wiretrustee.com/yum/{{ .Arch }}{{ if .Arm }}{{ .Arm }}{{ end }} username: dev@wiretrustee.com From f80fe506d5c1f3847015db6223efdf262bb85d77 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 13 Mar 2026 13:22:43 +0100 Subject: [PATCH 208/374] [client] Fix DNS probe thread safety and avoid blocking engine sync (#5576) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix DNS probe thread safety and avoid blocking engine sync Refactor ProbeAvailability to prevent blocking the engine's sync mutex during slow DNS probes. The probe now derives its context from the server's own context (s.ctx) instead of accepting one from the caller, and uses a mutex to ensure only one probe runs at a time — new calls cancel the previous probe before starting. Also fixes a data race in Stop() when accessing probeCancel without the probe mutex. * Ensure DNS probe thread safety by locking critical sections Add proper locking to prevent data races when accessing shared resources during DNS probe execution and Stop(). Update handlers snapshot logic to avoid conflicts with concurrent writers. * Rename context and remove redundant cancellation * Cancel first and lock * Add locking to ensure thread safety when reactivating upstream servers --- client/internal/dns/local/local.go | 2 +- client/internal/dns/server.go | 66 ++++++++++++++++++++++++---- client/internal/dns/server_test.go | 2 +- client/internal/dns/upstream.go | 61 ++++++++++++++++--------- client/internal/dns/upstream_test.go | 2 +- client/internal/engine.go | 3 +- 6 files changed, 101 insertions(+), 35 deletions(-) diff --git a/client/internal/dns/local/local.go b/client/internal/dns/local/local.go index b374bcc6a..a67a23945 100644 --- a/client/internal/dns/local/local.go +++ b/client/internal/dns/local/local.go @@ -77,7 +77,7 @@ func (d *Resolver) ID() types.HandlerID { return "local-resolver" } -func (d *Resolver) ProbeAvailability() {} +func (d *Resolver) ProbeAvailability(context.Context) {} // ServeDNS handles a DNS request func (d *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 179517bbd..6ca4f7957 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -104,12 +104,16 @@ type DefaultServer struct { statusRecorder *peer.Status stateManager *statemanager.Manager + + probeMu sync.Mutex + probeCancel context.CancelFunc + probeWg sync.WaitGroup } type handlerWithStop interface { dns.Handler Stop() - ProbeAvailability() + ProbeAvailability(context.Context) ID() types.HandlerID } @@ -362,7 +366,13 @@ func (s *DefaultServer) DnsIP() netip.Addr { // Stop stops the server func (s *DefaultServer) Stop() { + s.probeMu.Lock() + if s.probeCancel != nil { + s.probeCancel() + } s.ctxCancel() + s.probeMu.Unlock() + s.probeWg.Wait() s.shutdownWg.Wait() s.mux.Lock() @@ -479,7 +489,8 @@ func (s *DefaultServer) SearchDomains() []string { } // ProbeAvailability tests each upstream group's servers for availability -// and deactivates the group if no server responds +// and deactivates the group if no server responds. +// If a previous probe is still running, it will be cancelled before starting a new one. func (s *DefaultServer) ProbeAvailability() { if val := os.Getenv(envSkipDNSProbe); val != "" { skipProbe, err := strconv.ParseBool(val) @@ -492,15 +503,52 @@ func (s *DefaultServer) ProbeAvailability() { } } - var wg sync.WaitGroup - for _, mux := range s.dnsMuxMap { - wg.Add(1) - go func(mux handlerWithStop) { - defer wg.Done() - mux.ProbeAvailability() - }(mux.handler) + s.probeMu.Lock() + + // don't start probes on a stopped server + if s.ctx.Err() != nil { + s.probeMu.Unlock() + return } + + // cancel any running probe + if s.probeCancel != nil { + s.probeCancel() + s.probeCancel = nil + } + + // wait for the previous probe goroutines to finish while holding + // the mutex so no other caller can start a new probe concurrently + s.probeWg.Wait() + + // start a new probe + probeCtx, probeCancel := context.WithCancel(s.ctx) + s.probeCancel = probeCancel + + s.probeWg.Add(1) + defer s.probeWg.Done() + + // Snapshot handlers under s.mux to avoid racing with updateMux/dnsMuxMap writers. + s.mux.Lock() + handlers := make([]handlerWithStop, 0, len(s.dnsMuxMap)) + for _, mux := range s.dnsMuxMap { + handlers = append(handlers, mux.handler) + } + s.mux.Unlock() + + var wg sync.WaitGroup + for _, handler := range handlers { + wg.Add(1) + go func(h handlerWithStop) { + defer wg.Done() + h.ProbeAvailability(probeCtx) + }(handler) + } + + s.probeMu.Unlock() + wg.Wait() + probeCancel() } func (s *DefaultServer) UpdateServerConfig(domains dnsconfig.ServerDomains) error { diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index 3606d48b9..d3b0c250d 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -1065,7 +1065,7 @@ type mockHandler struct { func (m *mockHandler) ServeDNS(dns.ResponseWriter, *dns.Msg) {} func (m *mockHandler) Stop() {} -func (m *mockHandler) ProbeAvailability() {} +func (m *mockHandler) ProbeAvailability(context.Context) {} func (m *mockHandler) ID() types.HandlerID { return types.HandlerID(m.Id) } type mockService struct{} diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 375f6df1c..18128a942 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -65,6 +65,7 @@ type upstreamResolverBase struct { mutex sync.Mutex reactivatePeriod time.Duration upstreamTimeout time.Duration + wg sync.WaitGroup deactivate func(error) reactivate func() @@ -115,6 +116,11 @@ func (u *upstreamResolverBase) MatchSubdomains() bool { func (u *upstreamResolverBase) Stop() { log.Debugf("stopping serving DNS for upstreams %s", u.upstreamServers) u.cancel() + + u.mutex.Lock() + u.wg.Wait() + u.mutex.Unlock() + } // ServeDNS handles a DNS request @@ -260,16 +266,10 @@ func formatFailures(failures []upstreamFailure) string { // ProbeAvailability tests all upstream servers simultaneously and // disables the resolver if none work -func (u *upstreamResolverBase) ProbeAvailability() { +func (u *upstreamResolverBase) ProbeAvailability(ctx context.Context) { u.mutex.Lock() defer u.mutex.Unlock() - select { - case <-u.ctx.Done(): - return - default: - } - // avoid probe if upstreams could resolve at least one query if u.successCount.Load() > 0 { return @@ -279,31 +279,39 @@ func (u *upstreamResolverBase) ProbeAvailability() { var mu sync.Mutex var wg sync.WaitGroup - var errors *multierror.Error + var errs *multierror.Error for _, upstream := range u.upstreamServers { - upstream := upstream - wg.Add(1) - go func() { + go func(upstream netip.AddrPort) { defer wg.Done() - err := u.testNameserver(upstream, 500*time.Millisecond) + err := u.testNameserver(u.ctx, ctx, upstream, 500*time.Millisecond) if err != nil { - errors = multierror.Append(errors, err) + mu.Lock() + errs = multierror.Append(errs, err) + mu.Unlock() log.Warnf("probing upstream nameserver %s: %s", upstream, err) return } mu.Lock() - defer mu.Unlock() success = true - }() + mu.Unlock() + }(upstream) } wg.Wait() + select { + case <-ctx.Done(): + return + case <-u.ctx.Done(): + return + default: + } + // didn't find a working upstream server, let's disable and try later if !success { - u.disable(errors.ErrorOrNil()) + u.disable(errs.ErrorOrNil()) if u.statusRecorder == nil { return @@ -339,7 +347,7 @@ func (u *upstreamResolverBase) waitUntilResponse() { } for _, upstream := range u.upstreamServers { - if err := u.testNameserver(upstream, probeTimeout); err != nil { + if err := u.testNameserver(u.ctx, nil, upstream, probeTimeout); err != nil { log.Tracef("upstream check for %s: %s", upstream, err) } else { // at least one upstream server is available, stop probing @@ -364,7 +372,9 @@ func (u *upstreamResolverBase) waitUntilResponse() { log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServersString()) u.successCount.Add(1) u.reactivate() + u.mutex.Lock() u.disabled = false + u.mutex.Unlock() } // isTimeout returns true if the given error is a network timeout error. @@ -387,7 +397,11 @@ func (u *upstreamResolverBase) disable(err error) { u.successCount.Store(0) u.deactivate(err) u.disabled = true - go u.waitUntilResponse() + u.wg.Add(1) + go func() { + defer u.wg.Done() + u.waitUntilResponse() + }() } func (u *upstreamResolverBase) upstreamServersString() string { @@ -398,13 +412,18 @@ func (u *upstreamResolverBase) upstreamServersString() string { return strings.Join(servers, ", ") } -func (u *upstreamResolverBase) testNameserver(server netip.AddrPort, timeout time.Duration) error { - ctx, cancel := context.WithTimeout(u.ctx, timeout) +func (u *upstreamResolverBase) testNameserver(baseCtx context.Context, externalCtx context.Context, server netip.AddrPort, timeout time.Duration) error { + mergedCtx, cancel := context.WithTimeout(baseCtx, timeout) defer cancel() + if externalCtx != nil { + stop2 := context.AfterFunc(externalCtx, cancel) + defer stop2() + } + r := new(dns.Msg).SetQuestion(testRecord, dns.TypeSOA) - _, _, err := u.upstreamClient.exchange(ctx, server.String(), r) + _, _, err := u.upstreamClient.exchange(mergedCtx, server.String(), r) return err } diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go index 8b06e4475..ab164c30b 100644 --- a/client/internal/dns/upstream_test.go +++ b/client/internal/dns/upstream_test.go @@ -188,7 +188,7 @@ func TestUpstreamResolver_DeactivationReactivation(t *testing.T) { reactivated = true } - resolver.ProbeAvailability() + resolver.ProbeAvailability(context.TODO()) if !failed { t.Errorf("expected that resolving was deactivated") diff --git a/client/internal/engine.go b/client/internal/engine.go index b0ae841f8..858202155 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1315,8 +1315,7 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { // Test received (upstream) servers for availability right away instead of upon usage. // If no server of a server group responds this will disable the respective handler and retry later. - e.dnsServer.ProbeAvailability() - + go e.dnsServer.ProbeAvailability() return nil } From d86875aeac88d9a57021df5105cf9ccc2242b1e1 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 13 Mar 2026 15:01:59 +0100 Subject: [PATCH 209/374] [management] Exclude proxy from peer approval (#5588) --- management/internals/modules/peers/manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index 2f796a5d1..7cb0f3908 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -210,7 +210,7 @@ func (m *managerImpl) CreateProxyPeer(ctx context.Context, accountID string, pee }, } - _, _, _, err = m.accountManager.AddPeer(ctx, accountID, "", "", peer, false) + _, _, _, err = m.accountManager.AddPeer(ctx, accountID, "", "", peer, true) if err != nil { return fmt.Errorf("failed to create proxy peer: %w", err) } From 529c0314f84b6344d086b4771f8a1086e61f5e14 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 13 Mar 2026 22:22:02 +0800 Subject: [PATCH 210/374] [client] Fall back to getent/id for SSH user lookup in static builds (#5510) --- client/ssh/server/getent_cgo_unix.go | 24 ++ client/ssh/server/getent_nocgo_unix.go | 74 +++++ client/ssh/server/getent_test.go | 172 ++++++++++ client/ssh/server/getent_unix.go | 122 +++++++ client/ssh/server/getent_unix_test.go | 410 ++++++++++++++++++++++++ client/ssh/server/getent_windows.go | 26 ++ client/ssh/server/shell.go | 10 +- client/ssh/server/user_utils.go | 4 +- client/ssh/server/userswitching_unix.go | 24 +- 9 files changed, 848 insertions(+), 18 deletions(-) create mode 100644 client/ssh/server/getent_cgo_unix.go create mode 100644 client/ssh/server/getent_nocgo_unix.go create mode 100644 client/ssh/server/getent_test.go create mode 100644 client/ssh/server/getent_unix.go create mode 100644 client/ssh/server/getent_unix_test.go create mode 100644 client/ssh/server/getent_windows.go diff --git a/client/ssh/server/getent_cgo_unix.go b/client/ssh/server/getent_cgo_unix.go new file mode 100644 index 000000000..4afbfc627 --- /dev/null +++ b/client/ssh/server/getent_cgo_unix.go @@ -0,0 +1,24 @@ +//go:build cgo && !osusergo && !windows + +package server + +import "os/user" + +// lookupWithGetent with CGO delegates directly to os/user.Lookup. +// When CGO is enabled, os/user uses libc (getpwnam_r) which goes through +// the NSS stack natively. If it fails, the user truly doesn't exist and +// getent would also fail. +func lookupWithGetent(username string) (*user.User, error) { + return user.Lookup(username) +} + +// currentUserWithGetent with CGO delegates directly to os/user.Current. +func currentUserWithGetent() (*user.User, error) { + return user.Current() +} + +// groupIdsWithFallback with CGO delegates directly to user.GroupIds. +// libc's getgrouplist handles NSS groups natively. +func groupIdsWithFallback(u *user.User) ([]string, error) { + return u.GroupIds() +} diff --git a/client/ssh/server/getent_nocgo_unix.go b/client/ssh/server/getent_nocgo_unix.go new file mode 100644 index 000000000..314daae4c --- /dev/null +++ b/client/ssh/server/getent_nocgo_unix.go @@ -0,0 +1,74 @@ +//go:build (!cgo || osusergo) && !windows + +package server + +import ( + "os" + "os/user" + "strconv" + + log "github.com/sirupsen/logrus" +) + +// lookupWithGetent looks up a user by name, falling back to getent if os/user fails. +// Without CGO, os/user only reads /etc/passwd and misses NSS-provided users. +// getent goes through the host's NSS stack. +func lookupWithGetent(username string) (*user.User, error) { + u, err := user.Lookup(username) + if err == nil { + return u, nil + } + + stdErr := err + log.Debugf("os/user.Lookup(%q) failed, trying getent: %v", username, err) + + u, _, getentErr := runGetent(username) + if getentErr != nil { + log.Debugf("getent fallback for %q also failed: %v", username, getentErr) + return nil, stdErr + } + + return u, nil +} + +// currentUserWithGetent gets the current user, falling back to getent if os/user fails. +func currentUserWithGetent() (*user.User, error) { + u, err := user.Current() + if err == nil { + return u, nil + } + + stdErr := err + uid := strconv.Itoa(os.Getuid()) + log.Debugf("os/user.Current() failed, trying getent with UID %s: %v", uid, err) + + u, _, getentErr := runGetent(uid) + if getentErr != nil { + return nil, stdErr + } + + return u, nil +} + +// groupIdsWithFallback gets group IDs for a user via the id command first, +// falling back to user.GroupIds(). +// NOTE: unlike lookupWithGetent/currentUserWithGetent which try stdlib first, +// this intentionally tries `id -G` first because without CGO, user.GroupIds() +// only reads /etc/group and silently returns incomplete results for NSS users +// (no error, just missing groups). The id command goes through NSS and returns +// the full set. +func groupIdsWithFallback(u *user.User) ([]string, error) { + ids, err := runIdGroups(u.Username) + if err == nil { + return ids, nil + } + + log.Debugf("id -G %q failed, falling back to user.GroupIds(): %v", u.Username, err) + + ids, stdErr := u.GroupIds() + if stdErr != nil { + return nil, stdErr + } + + return ids, nil +} diff --git a/client/ssh/server/getent_test.go b/client/ssh/server/getent_test.go new file mode 100644 index 000000000..5eac2fdbe --- /dev/null +++ b/client/ssh/server/getent_test.go @@ -0,0 +1,172 @@ +package server + +import ( + "os/user" + "runtime" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLookupWithGetent_CurrentUser(t *testing.T) { + // The current user should always be resolvable on any platform + current, err := user.Current() + require.NoError(t, err) + + u, err := lookupWithGetent(current.Username) + require.NoError(t, err) + assert.Equal(t, current.Username, u.Username) + assert.Equal(t, current.Uid, u.Uid) + assert.Equal(t, current.Gid, u.Gid) +} + +func TestLookupWithGetent_NonexistentUser(t *testing.T) { + _, err := lookupWithGetent("nonexistent_user_xyzzy_12345") + require.Error(t, err, "should fail for nonexistent user") +} + +func TestCurrentUserWithGetent(t *testing.T) { + stdUser, err := user.Current() + require.NoError(t, err) + + u, err := currentUserWithGetent() + require.NoError(t, err) + assert.Equal(t, stdUser.Uid, u.Uid) + assert.Equal(t, stdUser.Username, u.Username) +} + +func TestGroupIdsWithFallback_CurrentUser(t *testing.T) { + current, err := user.Current() + require.NoError(t, err) + + groups, err := groupIdsWithFallback(current) + require.NoError(t, err) + require.NotEmpty(t, groups, "current user should have at least one group") + + if runtime.GOOS != "windows" { + for _, gid := range groups { + _, err := strconv.ParseUint(gid, 10, 32) + assert.NoError(t, err, "group ID %q should be a valid uint32", gid) + } + } +} + +func TestGetShellFromGetent_CurrentUser(t *testing.T) { + if runtime.GOOS == "windows" { + // Windows stub always returns empty, which is correct + shell := getShellFromGetent("1000") + assert.Empty(t, shell, "Windows stub should return empty") + return + } + + current, err := user.Current() + require.NoError(t, err) + + // getent may not be available on all systems (e.g., macOS without Homebrew getent) + shell := getShellFromGetent(current.Uid) + if shell == "" { + t.Log("getShellFromGetent returned empty, getent may not be available") + return + } + assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell) +} + +func TestLookupWithGetent_RootUser(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("no root user on Windows") + } + + u, err := lookupWithGetent("root") + if err != nil { + t.Skip("root user not available on this system") + } + assert.Equal(t, "0", u.Uid, "root should have UID 0") +} + +// TestIntegration_FullLookupChain exercises the complete user lookup chain +// against the real system, testing that all wrappers (lookupWithGetent, +// currentUserWithGetent, groupIdsWithFallback, getShellFromGetent) produce +// consistent and correct results when composed together. +func TestIntegration_FullLookupChain(t *testing.T) { + // Step 1: currentUserWithGetent must resolve the running user. + current, err := currentUserWithGetent() + require.NoError(t, err, "currentUserWithGetent must resolve the running user") + require.NotEmpty(t, current.Uid) + require.NotEmpty(t, current.Username) + + // Step 2: lookupWithGetent by the same username must return matching identity. + byName, err := lookupWithGetent(current.Username) + require.NoError(t, err) + assert.Equal(t, current.Uid, byName.Uid, "lookup by name should return same UID") + assert.Equal(t, current.Gid, byName.Gid, "lookup by name should return same GID") + assert.Equal(t, current.HomeDir, byName.HomeDir, "lookup by name should return same home") + + // Step 3: groupIdsWithFallback must return at least the primary GID. + groups, err := groupIdsWithFallback(current) + require.NoError(t, err) + require.NotEmpty(t, groups, "user must have at least one group") + + foundPrimary := false + for _, gid := range groups { + if runtime.GOOS != "windows" { + _, err := strconv.ParseUint(gid, 10, 32) + require.NoError(t, err, "group ID %q must be a valid uint32", gid) + } + if gid == current.Gid { + foundPrimary = true + } + } + assert.True(t, foundPrimary, "primary GID %s should appear in supplementary groups", current.Gid) + + // Step 4: getShellFromGetent should either return a valid shell path or empty + // (empty is OK when getent is not available, e.g. macOS without Homebrew getent). + if runtime.GOOS != "windows" { + shell := getShellFromGetent(current.Uid) + if shell != "" { + assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell) + } + } +} + +// TestIntegration_LookupAndGroupsConsistency verifies that a user resolved via +// lookupWithGetent can have their groups resolved via groupIdsWithFallback, +// testing the handoff between the two functions as used by the SSH server. +func TestIntegration_LookupAndGroupsConsistency(t *testing.T) { + current, err := user.Current() + require.NoError(t, err) + + // Simulate the SSH server flow: lookup user, then get their groups. + resolved, err := lookupWithGetent(current.Username) + require.NoError(t, err) + + groups, err := groupIdsWithFallback(resolved) + require.NoError(t, err) + require.NotEmpty(t, groups, "resolved user must have groups") + + // On Unix, all returned GIDs must be valid numeric values. + // On Windows, group IDs are SIDs (e.g., "S-1-5-32-544"). + if runtime.GOOS != "windows" { + for _, gid := range groups { + _, err := strconv.ParseUint(gid, 10, 32) + assert.NoError(t, err, "group ID %q should be numeric", gid) + } + } +} + +// TestIntegration_ShellLookupChain tests the full shell resolution chain +// (getShellFromPasswd -> getShellFromGetent -> $SHELL -> default) on Unix. +func TestIntegration_ShellLookupChain(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix shell lookup not applicable on Windows") + } + + current, err := user.Current() + require.NoError(t, err) + + // getUserShell is the top-level function used by the SSH server. + shell := getUserShell(current.Uid) + require.NotEmpty(t, shell, "getUserShell must always return a shell") + assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell) +} diff --git a/client/ssh/server/getent_unix.go b/client/ssh/server/getent_unix.go new file mode 100644 index 000000000..18edb2fdf --- /dev/null +++ b/client/ssh/server/getent_unix.go @@ -0,0 +1,122 @@ +//go:build !windows + +package server + +import ( + "context" + "fmt" + "os/exec" + "os/user" + "runtime" + "strings" + "time" +) + +const getentTimeout = 5 * time.Second + +// getShellFromGetent gets a user's login shell via getent by UID. +// This is needed even with CGO because getShellFromPasswd reads /etc/passwd +// directly and won't find NSS-provided users there. +func getShellFromGetent(userID string) string { + _, shell, err := runGetent(userID) + if err != nil { + return "" + } + return shell +} + +// runGetent executes `getent passwd ` and returns the user and login shell. +func runGetent(query string) (*user.User, string, error) { + if !validateGetentInput(query) { + return nil, "", fmt.Errorf("invalid getent input: %q", query) + } + + ctx, cancel := context.WithTimeout(context.Background(), getentTimeout) + defer cancel() + + out, err := exec.CommandContext(ctx, "getent", "passwd", query).Output() + if err != nil { + return nil, "", fmt.Errorf("getent passwd %s: %w", query, err) + } + + return parseGetentPasswd(string(out)) +} + +// parseGetentPasswd parses getent passwd output: "name:x:uid:gid:gecos:home:shell" +func parseGetentPasswd(output string) (*user.User, string, error) { + fields := strings.SplitN(strings.TrimSpace(output), ":", 8) + if len(fields) < 6 { + return nil, "", fmt.Errorf("unexpected getent output (need 6+ fields): %q", output) + } + + if fields[0] == "" || fields[2] == "" || fields[3] == "" { + return nil, "", fmt.Errorf("missing required fields in getent output: %q", output) + } + + var shell string + if len(fields) >= 7 { + shell = fields[6] + } + + return &user.User{ + Username: fields[0], + Uid: fields[2], + Gid: fields[3], + Name: fields[4], + HomeDir: fields[5], + }, shell, nil +} + +// validateGetentInput checks that the input is safe to pass to getent or id. +// Allows POSIX usernames, numeric UIDs, and common NSS extensions +// (@ for Kerberos, $ for Samba, + for NIS compat). +func validateGetentInput(input string) bool { + maxLen := 32 + if runtime.GOOS == "linux" { + maxLen = 256 + } + + if len(input) == 0 || len(input) > maxLen { + return false + } + + for _, r := range input { + if isAllowedGetentChar(r) { + continue + } + return false + } + return true +} + +func isAllowedGetentChar(r rune) bool { + if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' { + return true + } + switch r { + case '.', '_', '-', '@', '+', '$': + return true + } + return false +} + +// runIdGroups runs `id -G ` and returns the space-separated group IDs. +func runIdGroups(username string) ([]string, error) { + if !validateGetentInput(username) { + return nil, fmt.Errorf("invalid username for id command: %q", username) + } + + ctx, cancel := context.WithTimeout(context.Background(), getentTimeout) + defer cancel() + + out, err := exec.CommandContext(ctx, "id", "-G", username).Output() + if err != nil { + return nil, fmt.Errorf("id -G %s: %w", username, err) + } + + trimmed := strings.TrimSpace(string(out)) + if trimmed == "" { + return nil, fmt.Errorf("id -G %s: empty output", username) + } + return strings.Fields(trimmed), nil +} diff --git a/client/ssh/server/getent_unix_test.go b/client/ssh/server/getent_unix_test.go new file mode 100644 index 000000000..e44563b79 --- /dev/null +++ b/client/ssh/server/getent_unix_test.go @@ -0,0 +1,410 @@ +//go:build !windows + +package server + +import ( + "os/exec" + "os/user" + "runtime" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseGetentPasswd(t *testing.T) { + tests := []struct { + name string + input string + wantUser *user.User + wantShell string + wantErr bool + errContains string + }{ + { + name: "standard entry", + input: "alice:x:1001:1001:Alice Smith:/home/alice:/bin/bash\n", + wantUser: &user.User{ + Username: "alice", + Uid: "1001", + Gid: "1001", + Name: "Alice Smith", + HomeDir: "/home/alice", + }, + wantShell: "/bin/bash", + }, + { + name: "root entry", + input: "root:x:0:0:root:/root:/bin/bash", + wantUser: &user.User{ + Username: "root", + Uid: "0", + Gid: "0", + Name: "root", + HomeDir: "/root", + }, + wantShell: "/bin/bash", + }, + { + name: "empty gecos field", + input: "svc:x:999:999::/var/lib/svc:/usr/sbin/nologin", + wantUser: &user.User{ + Username: "svc", + Uid: "999", + Gid: "999", + Name: "", + HomeDir: "/var/lib/svc", + }, + wantShell: "/usr/sbin/nologin", + }, + { + name: "gecos with commas", + input: "john:x:1002:1002:John Doe,Room 101,555-1234,555-4321:/home/john:/bin/zsh", + wantUser: &user.User{ + Username: "john", + Uid: "1002", + Gid: "1002", + Name: "John Doe,Room 101,555-1234,555-4321", + HomeDir: "/home/john", + }, + wantShell: "/bin/zsh", + }, + { + name: "remote user with large UID", + input: "remoteuser:*:50001:50001:Remote User:/home/remoteuser:/bin/bash\n", + wantUser: &user.User{ + Username: "remoteuser", + Uid: "50001", + Gid: "50001", + Name: "Remote User", + HomeDir: "/home/remoteuser", + }, + wantShell: "/bin/bash", + }, + { + name: "no shell field (only 6 fields)", + input: "minimal:x:1000:1000::/home/minimal", + wantUser: &user.User{ + Username: "minimal", + Uid: "1000", + Gid: "1000", + Name: "", + HomeDir: "/home/minimal", + }, + wantShell: "", + }, + { + name: "too few fields", + input: "bad:x:1000", + wantErr: true, + errContains: "need 6+ fields", + }, + { + name: "empty username", + input: ":x:1000:1000::/home/test:/bin/bash", + wantErr: true, + errContains: "missing required fields", + }, + { + name: "empty UID", + input: "test:x::1000::/home/test:/bin/bash", + wantErr: true, + errContains: "missing required fields", + }, + { + name: "empty GID", + input: "test:x:1000:::/home/test:/bin/bash", + wantErr: true, + errContains: "missing required fields", + }, + { + name: "empty input", + input: "", + wantErr: true, + errContains: "need 6+ fields", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + u, shell, err := parseGetentPasswd(tt.input) + if tt.wantErr { + require.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + return + } + require.NoError(t, err) + assert.Equal(t, tt.wantUser.Username, u.Username, "username") + assert.Equal(t, tt.wantUser.Uid, u.Uid, "UID") + assert.Equal(t, tt.wantUser.Gid, u.Gid, "GID") + assert.Equal(t, tt.wantUser.Name, u.Name, "name/gecos") + assert.Equal(t, tt.wantUser.HomeDir, u.HomeDir, "home directory") + assert.Equal(t, tt.wantShell, shell, "shell") + }) + } +} + +func TestValidateGetentInput(t *testing.T) { + tests := []struct { + name string + input string + want bool + }{ + {"normal username", "alice", true}, + {"numeric UID", "1001", true}, + {"dots and underscores", "alice.bob_test", true}, + {"hyphen", "alice-bob", true}, + {"kerberos principal", "user@REALM", true}, + {"samba machine account", "MACHINE$", true}, + {"NIS compat", "+user", true}, + {"empty", "", false}, + {"null byte", "alice\x00bob", false}, + {"newline", "alice\nbob", false}, + {"tab", "alice\tbob", false}, + {"control char", "alice\x01bob", false}, + {"DEL char", "alice\x7fbob", false}, + {"space rejected", "alice bob", false}, + {"semicolon rejected", "alice;bob", false}, + {"backtick rejected", "alice`bob", false}, + {"pipe rejected", "alice|bob", false}, + {"33 chars exceeds non-linux max", makeLongString(33), runtime.GOOS == "linux"}, + {"256 chars at linux max", makeLongString(256), runtime.GOOS == "linux"}, + {"257 chars exceeds all limits", makeLongString(257), false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, validateGetentInput(tt.input)) + }) + } +} + +func makeLongString(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = 'a' + } + return string(b) +} + +func TestRunGetent_RootUser(t *testing.T) { + if _, err := exec.LookPath("getent"); err != nil { + t.Skip("getent not available on this system") + } + + u, shell, err := runGetent("root") + require.NoError(t, err) + assert.Equal(t, "root", u.Username) + assert.Equal(t, "0", u.Uid) + assert.Equal(t, "0", u.Gid) + assert.NotEmpty(t, shell, "root should have a shell") +} + +func TestRunGetent_ByUID(t *testing.T) { + if _, err := exec.LookPath("getent"); err != nil { + t.Skip("getent not available on this system") + } + + u, _, err := runGetent("0") + require.NoError(t, err) + assert.Equal(t, "root", u.Username) + assert.Equal(t, "0", u.Uid) +} + +func TestRunGetent_NonexistentUser(t *testing.T) { + if _, err := exec.LookPath("getent"); err != nil { + t.Skip("getent not available on this system") + } + + _, _, err := runGetent("nonexistent_user_xyzzy_12345") + assert.Error(t, err) +} + +func TestRunGetent_InvalidInput(t *testing.T) { + _, _, err := runGetent("") + assert.Error(t, err) + + _, _, err = runGetent("user\x00name") + assert.Error(t, err) +} + +func TestRunGetent_NotAvailable(t *testing.T) { + if _, err := exec.LookPath("getent"); err == nil { + t.Skip("getent is available, can't test missing case") + } + + _, _, err := runGetent("root") + assert.Error(t, err, "should fail when getent is not installed") +} + +func TestRunIdGroups_CurrentUser(t *testing.T) { + if _, err := exec.LookPath("id"); err != nil { + t.Skip("id not available on this system") + } + + current, err := user.Current() + require.NoError(t, err) + + groups, err := runIdGroups(current.Username) + require.NoError(t, err) + require.NotEmpty(t, groups, "current user should have at least one group") + + for _, gid := range groups { + _, err := strconv.ParseUint(gid, 10, 32) + assert.NoError(t, err, "group ID %q should be a valid uint32", gid) + } +} + +func TestRunIdGroups_NonexistentUser(t *testing.T) { + if _, err := exec.LookPath("id"); err != nil { + t.Skip("id not available on this system") + } + + _, err := runIdGroups("nonexistent_user_xyzzy_12345") + assert.Error(t, err) +} + +func TestRunIdGroups_InvalidInput(t *testing.T) { + _, err := runIdGroups("") + assert.Error(t, err) + + _, err = runIdGroups("user\x00name") + assert.Error(t, err) +} + +func TestGetentResultsMatchStdlib(t *testing.T) { + if _, err := exec.LookPath("getent"); err != nil { + t.Skip("getent not available on this system") + } + + current, err := user.Current() + require.NoError(t, err) + + getentUser, _, err := runGetent(current.Username) + require.NoError(t, err) + + assert.Equal(t, current.Username, getentUser.Username, "username should match") + assert.Equal(t, current.Uid, getentUser.Uid, "UID should match") + assert.Equal(t, current.Gid, getentUser.Gid, "GID should match") + assert.Equal(t, current.HomeDir, getentUser.HomeDir, "home directory should match") +} + +func TestGetentResultsMatchStdlib_ByUID(t *testing.T) { + if _, err := exec.LookPath("getent"); err != nil { + t.Skip("getent not available on this system") + } + + current, err := user.Current() + require.NoError(t, err) + + getentUser, _, err := runGetent(current.Uid) + require.NoError(t, err) + + assert.Equal(t, current.Username, getentUser.Username, "username should match when looked up by UID") + assert.Equal(t, current.Uid, getentUser.Uid, "UID should match") +} + +func TestIdGroupsMatchStdlib(t *testing.T) { + if _, err := exec.LookPath("id"); err != nil { + t.Skip("id not available on this system") + } + + current, err := user.Current() + require.NoError(t, err) + + stdGroups, err := current.GroupIds() + if err != nil { + t.Skip("os/user.GroupIds() not working, likely CGO_ENABLED=0") + } + + idGroups, err := runIdGroups(current.Username) + require.NoError(t, err) + + // Deduplicate both lists: id -G can return duplicates (e.g., root in Docker) + // and ElementsMatch treats duplicates as distinct. + assert.ElementsMatch(t, uniqueStrings(stdGroups), uniqueStrings(idGroups), "id -G should return same groups as os/user") +} + +func uniqueStrings(ss []string) []string { + seen := make(map[string]struct{}, len(ss)) + out := make([]string, 0, len(ss)) + for _, s := range ss { + if _, ok := seen[s]; ok { + continue + } + seen[s] = struct{}{} + out = append(out, s) + } + return out +} + +// TestGetShellFromPasswd_CurrentUser verifies that getShellFromPasswd correctly +// reads the current user's shell from /etc/passwd by comparing it against what +// getent reports (which goes through NSS). +func TestGetShellFromPasswd_CurrentUser(t *testing.T) { + current, err := user.Current() + require.NoError(t, err) + + shell := getShellFromPasswd(current.Uid) + if shell == "" { + t.Skip("current user not found in /etc/passwd (may be an NSS-only user)") + } + + assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell) + + if _, err := exec.LookPath("getent"); err == nil { + _, getentShell, getentErr := runGetent(current.Uid) + if getentErr == nil && getentShell != "" { + assert.Equal(t, getentShell, shell, "shell from /etc/passwd should match getent") + } + } +} + +// TestGetShellFromPasswd_RootUser verifies that getShellFromPasswd can read +// root's shell from /etc/passwd. Root is guaranteed to be in /etc/passwd on +// any standard Unix system. +func TestGetShellFromPasswd_RootUser(t *testing.T) { + shell := getShellFromPasswd("0") + require.NotEmpty(t, shell, "root (UID 0) must be in /etc/passwd") + assert.True(t, shell[0] == '/', "root shell should be an absolute path, got %q", shell) +} + +// TestGetShellFromPasswd_NonexistentUID verifies that getShellFromPasswd +// returns empty for a UID that doesn't exist in /etc/passwd. +func TestGetShellFromPasswd_NonexistentUID(t *testing.T) { + shell := getShellFromPasswd("4294967294") + assert.Empty(t, shell, "nonexistent UID should return empty shell") +} + +// TestGetShellFromPasswd_MatchesGetentForKnownUsers reads /etc/passwd directly +// and cross-validates every entry against getent to ensure parseGetentPasswd +// and getShellFromPasswd agree on shell values. +func TestGetShellFromPasswd_MatchesGetentForKnownUsers(t *testing.T) { + if _, err := exec.LookPath("getent"); err != nil { + t.Skip("getent not available") + } + + // Pick a few well-known system UIDs that are virtually always in /etc/passwd. + uids := []string{"0"} // root + + current, err := user.Current() + require.NoError(t, err) + uids = append(uids, current.Uid) + + for _, uid := range uids { + passwdShell := getShellFromPasswd(uid) + if passwdShell == "" { + continue + } + + _, getentShell, err := runGetent(uid) + if err != nil { + continue + } + + assert.Equal(t, getentShell, passwdShell, "shell mismatch for UID %s", uid) + } +} diff --git a/client/ssh/server/getent_windows.go b/client/ssh/server/getent_windows.go new file mode 100644 index 000000000..3e76b3e8e --- /dev/null +++ b/client/ssh/server/getent_windows.go @@ -0,0 +1,26 @@ +//go:build windows + +package server + +import "os/user" + +// lookupWithGetent on Windows just delegates to os/user.Lookup. +// Windows does not use NSS/getent; its user lookup works without CGO. +func lookupWithGetent(username string) (*user.User, error) { + return user.Lookup(username) +} + +// currentUserWithGetent on Windows just delegates to os/user.Current. +func currentUserWithGetent() (*user.User, error) { + return user.Current() +} + +// getShellFromGetent is a no-op on Windows; shell resolution uses PowerShell detection. +func getShellFromGetent(_ string) string { + return "" +} + +// groupIdsWithFallback on Windows just delegates to u.GroupIds(). +func groupIdsWithFallback(u *user.User) ([]string, error) { + return u.GroupIds() +} diff --git a/client/ssh/server/shell.go b/client/ssh/server/shell.go index fea9d2910..1e8ff5e31 100644 --- a/client/ssh/server/shell.go +++ b/client/ssh/server/shell.go @@ -49,10 +49,14 @@ func getWindowsUserShell() string { return `C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe` } -// getUnixUserShell returns the shell for Unix-like systems +// getUnixUserShell returns the shell for Unix-like systems. +// Tries /etc/passwd first (fast, no subprocess), falls back to getent for NSS users. func getUnixUserShell(userID string) string { - shell := getShellFromPasswd(userID) - if shell != "" { + if shell := getShellFromPasswd(userID); shell != "" { + return shell + } + + if shell := getShellFromGetent(userID); shell != "" { return shell } diff --git a/client/ssh/server/user_utils.go b/client/ssh/server/user_utils.go index 799882cbb..bc2aa2d7d 100644 --- a/client/ssh/server/user_utils.go +++ b/client/ssh/server/user_utils.go @@ -23,8 +23,8 @@ func isPlatformUnix() bool { // Dependency injection variables for testing - allows mocking dynamic runtime checks var ( - getCurrentUser = user.Current - lookupUser = user.Lookup + getCurrentUser = currentUserWithGetent + lookupUser = lookupWithGetent getCurrentOS = func() string { return runtime.GOOS } getIsProcessPrivileged = isCurrentProcessPrivileged diff --git a/client/ssh/server/userswitching_unix.go b/client/ssh/server/userswitching_unix.go index d80b77042..220e2240f 100644 --- a/client/ssh/server/userswitching_unix.go +++ b/client/ssh/server/userswitching_unix.go @@ -146,32 +146,30 @@ func (s *Server) parseUserCredentials(localUser *user.User) (uint32, uint32, []u } gid := uint32(gid64) - groups, err := s.getSupplementaryGroups(localUser.Username) - if err != nil { - log.Warnf("failed to get supplementary groups for user %s: %v", localUser.Username, err) + groups, err := s.getSupplementaryGroups(localUser) + if err != nil || len(groups) == 0 { + if err != nil { + log.Warnf("failed to get supplementary groups for user %s: %v", localUser.Username, err) + } groups = []uint32{gid} } return uid, gid, groups, nil } -// getSupplementaryGroups retrieves supplementary group IDs for a user -func (s *Server) getSupplementaryGroups(username string) ([]uint32, error) { - u, err := user.Lookup(username) +// getSupplementaryGroups retrieves supplementary group IDs for a user. +// Uses id/getent fallback for NSS users in CGO_ENABLED=0 builds. +func (s *Server) getSupplementaryGroups(u *user.User) ([]uint32, error) { + groupIDStrings, err := groupIdsWithFallback(u) if err != nil { - return nil, fmt.Errorf("lookup user %s: %w", username, err) - } - - groupIDStrings, err := u.GroupIds() - if err != nil { - return nil, fmt.Errorf("get group IDs for user %s: %w", username, err) + return nil, fmt.Errorf("get group IDs for user %s: %w", u.Username, err) } groups := make([]uint32, len(groupIDStrings)) for i, gidStr := range groupIDStrings { gid64, err := strconv.ParseUint(gidStr, 10, 32) if err != nil { - return nil, fmt.Errorf("invalid group ID %s for user %s: %w", gidStr, username, err) + return nil, fmt.Errorf("invalid group ID %s for user %s: %w", gidStr, u.Username, err) } groups[i] = uint32(gid64) } From 2e1aa497d25c907e11d2f1a525f968daac0fb253 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 13 Mar 2026 15:28:25 +0100 Subject: [PATCH 211/374] [proxy] add log-level flag (#5594) --- proxy/cmd/proxy/cmd/root.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index 61ed5871e..60e81feb5 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -34,6 +34,7 @@ var ( ) var ( + logLevel string debugLogs bool mgmtAddr string addr string @@ -69,7 +70,9 @@ var rootCmd = &cobra.Command{ } func init() { + rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", envStringOrDefault("NB_PROXY_LOG_LEVEL", "info"), "Log level: panic, fatal, error, warn, info, debug, trace") rootCmd.PersistentFlags().BoolVar(&debugLogs, "debug", envBoolOrDefault("NB_PROXY_DEBUG_LOGS", false), "Enable debug logs") + _ = rootCmd.PersistentFlags().MarkDeprecated("debug", "use --log-level instead") rootCmd.Flags().StringVar(&mgmtAddr, "mgmt", envStringOrDefault("NB_PROXY_MANAGEMENT_ADDRESS", DefaultManagementURL), "Management address to connect to") rootCmd.Flags().StringVar(&addr, "addr", envStringOrDefault("NB_PROXY_ADDRESS", ":443"), "Reverse proxy address to listen on") rootCmd.Flags().StringVar(&proxyDomain, "domain", envStringOrDefault("NB_PROXY_DOMAIN", ""), "The Domain at which this proxy will be reached. e.g., netbird.example.com") @@ -117,7 +120,7 @@ func runServer(cmd *cobra.Command, args []string) error { return fmt.Errorf("proxy token is required: set %s environment variable", envProxyToken) } - level := "error" + level := logLevel if debugLogs { level = "debug" } From fe9b844511187b3d5433e84456bcf3719f83175c Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 13 Mar 2026 17:01:28 +0100 Subject: [PATCH 212/374] [client] refactor auto update workflow (#5448) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Auto-update logic moved out of the UI into a dedicated updatemanager.Manager service that runs in the connection layer. The UI no longer polls or checks for updates independently. The update manager supports three modes driven by the management server's auto-update policy: No policy set by mgm: checks GitHub for the latest version and notifies the user (previous behavior, now centralized) mgm enforces update: the "About" menu triggers installation directly instead of just downloading the file — user still initiates the action mgm forces update: installation proceeds automatically without user interaction updateManager lifecycle is now owned by daemon, giving the daemon server direct control via a new TriggerUpdate RPC Introduces EngineServices struct to group external service dependencies passed to NewEngine, reducing its argument count from 11 to 4 --- client/android/client.go | 4 +- client/cmd/signer/artifactkey.go | 2 +- client/cmd/signer/artifactsign.go | 2 +- client/cmd/signer/revocation.go | 2 +- client/cmd/signer/rootkey.go | 2 +- client/cmd/up.go | 2 +- client/cmd/update_supported.go | 2 +- client/embed/embed.go | 2 +- client/internal/connect.go | 62 +-- client/internal/debug/debug.go | 2 +- client/internal/engine.go | 84 ++-- client/internal/engine_test.go | 55 ++- client/internal/updatemanager/manager_test.go | 214 ---------- .../updatemanager/manager_unsupported.go | 39 -- .../{updatemanager => updater}/doc.go | 4 +- .../downloader/downloader.go | 0 .../downloader/downloader_test.go | 0 .../installer/binary_nowindows.go | 0 .../installer/binary_windows.go | 0 .../installer/doc.go | 0 .../installer/installer.go | 0 .../installer/installer_common.go | 4 +- .../installer/installer_log_darwin.go | 0 .../installer/installer_log_windows.go | 0 .../installer/installer_run_darwin.go | 0 .../installer/installer_run_windows.go | 0 .../installer/log.go | 0 .../installer/procattr_darwin.go | 0 .../installer/procattr_windows.go | 0 .../installer/repourl_dev.go | 0 .../installer/repourl_prod.go | 0 .../installer/result.go | 5 +- .../installer/types.go | 0 .../installer/types_darwin.go | 0 .../installer/types_windows.go | 0 .../{updatemanager => updater}/manager.go | 268 ++++++++++--- client/internal/updater/manager_linux_test.go | 111 ++++++ client/internal/updater/manager_test.go | 227 +++++++++++ .../updater/manager_test_helpers_test.go | 56 +++ .../reposign/artifact.go | 0 .../reposign/artifact_test.go | 0 .../reposign/certs/root-pub.pem | 0 .../reposign/certsdev/root-pub.pem | 0 .../reposign/doc.go | 0 .../reposign/embed_dev.go | 0 .../reposign/embed_prod.go | 0 .../reposign/key.go | 0 .../reposign/key_test.go | 0 .../reposign/revocation.go | 0 .../reposign/revocation_test.go | 0 .../reposign/root.go | 0 .../reposign/root_test.go | 0 .../reposign/signature.go | 0 .../reposign/signature_test.go | 0 .../reposign/verify.go | 2 +- .../reposign/verify_test.go | 0 client/internal/updater/supported_darwin.go | 22 + client/internal/updater/supported_other.go | 7 + client/internal/updater/supported_windows.go | 5 + .../{updatemanager => updater}/update.go | 2 +- client/ios/NetBirdSDK/client.go | 2 +- client/proto/daemon.pb.go | 377 +++++++++++------- client/proto/daemon.proto | 13 +- client/proto/daemon_grpc.pb.go | 40 ++ client/server/event.go | 1 + client/server/server.go | 38 +- client/server/server_connect_test.go | 2 +- client/server/server_test.go | 2 +- client/server/triggerupdate.go | 24 ++ client/server/updateresult.go | 2 +- client/ui/client_ui.go | 44 +- client/ui/event/event.go | 7 +- client/ui/event_handler.go | 39 +- client/ui/profile.go | 6 +- client/ui/quickactions.go | 2 +- .../internals/shared/grpc/conversion.go | 3 +- management/server/account.go | 14 +- management/server/activity/codes.go | 7 + .../handlers/accounts/accounts_handler.go | 4 + .../accounts/accounts_handler_test.go | 6 + management/server/types/settings.go | 5 + shared/management/http/api/openapi.yml | 4 + shared/management/http/api/types.gen.go | 3 + shared/management/proto/management.proto | 4 +- 84 files changed, 1210 insertions(+), 626 deletions(-) delete mode 100644 client/internal/updatemanager/manager_test.go delete mode 100644 client/internal/updatemanager/manager_unsupported.go rename client/internal/{updatemanager => updater}/doc.go (93%) rename client/internal/{updatemanager => updater}/downloader/downloader.go (100%) rename client/internal/{updatemanager => updater}/downloader/downloader_test.go (100%) rename client/internal/{updatemanager => updater}/installer/binary_nowindows.go (100%) rename client/internal/{updatemanager => updater}/installer/binary_windows.go (100%) rename client/internal/{updatemanager => updater}/installer/doc.go (100%) rename client/internal/{updatemanager => updater}/installer/installer.go (100%) rename client/internal/{updatemanager => updater}/installer/installer_common.go (97%) rename client/internal/{updatemanager => updater}/installer/installer_log_darwin.go (100%) rename client/internal/{updatemanager => updater}/installer/installer_log_windows.go (100%) rename client/internal/{updatemanager => updater}/installer/installer_run_darwin.go (100%) rename client/internal/{updatemanager => updater}/installer/installer_run_windows.go (100%) rename client/internal/{updatemanager => updater}/installer/log.go (100%) rename client/internal/{updatemanager => updater}/installer/procattr_darwin.go (100%) rename client/internal/{updatemanager => updater}/installer/procattr_windows.go (100%) rename client/internal/{updatemanager => updater}/installer/repourl_dev.go (100%) rename client/internal/{updatemanager => updater}/installer/repourl_prod.go (100%) rename client/internal/{updatemanager => updater}/installer/result.go (98%) rename client/internal/{updatemanager => updater}/installer/types.go (100%) rename client/internal/{updatemanager => updater}/installer/types_darwin.go (100%) rename client/internal/{updatemanager => updater}/installer/types_windows.go (100%) rename client/internal/{updatemanager => updater}/manager.go (52%) create mode 100644 client/internal/updater/manager_linux_test.go create mode 100644 client/internal/updater/manager_test.go create mode 100644 client/internal/updater/manager_test_helpers_test.go rename client/internal/{updatemanager => updater}/reposign/artifact.go (100%) rename client/internal/{updatemanager => updater}/reposign/artifact_test.go (100%) rename client/internal/{updatemanager => updater}/reposign/certs/root-pub.pem (100%) rename client/internal/{updatemanager => updater}/reposign/certsdev/root-pub.pem (100%) rename client/internal/{updatemanager => updater}/reposign/doc.go (100%) rename client/internal/{updatemanager => updater}/reposign/embed_dev.go (100%) rename client/internal/{updatemanager => updater}/reposign/embed_prod.go (100%) rename client/internal/{updatemanager => updater}/reposign/key.go (100%) rename client/internal/{updatemanager => updater}/reposign/key_test.go (100%) rename client/internal/{updatemanager => updater}/reposign/revocation.go (100%) rename client/internal/{updatemanager => updater}/reposign/revocation_test.go (100%) rename client/internal/{updatemanager => updater}/reposign/root.go (100%) rename client/internal/{updatemanager => updater}/reposign/root_test.go (100%) rename client/internal/{updatemanager => updater}/reposign/signature.go (100%) rename client/internal/{updatemanager => updater}/reposign/signature_test.go (100%) rename client/internal/{updatemanager => updater}/reposign/verify.go (98%) rename client/internal/{updatemanager => updater}/reposign/verify_test.go (100%) create mode 100644 client/internal/updater/supported_darwin.go create mode 100644 client/internal/updater/supported_other.go create mode 100644 client/internal/updater/supported_windows.go rename client/internal/{updatemanager => updater}/update.go (90%) create mode 100644 client/server/triggerupdate.go diff --git a/client/android/client.go b/client/android/client.go index ccf32a90c..3fc571559 100644 --- a/client/android/client.go +++ b/client/android/client.go @@ -124,7 +124,7 @@ func (c *Client) Run(platformFiles PlatformFiles, urlOpener URLOpener, isAndroid // todo do not throw error in case of cancelled context ctx = internal.CtxInitState(ctx) - c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false) + c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder) return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile) } @@ -157,7 +157,7 @@ func (c *Client) RunWithoutLogin(platformFiles PlatformFiles, dns *DNSList, dnsR // todo do not throw error in case of cancelled context ctx = internal.CtxInitState(ctx) - c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false) + c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder) return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile) } diff --git a/client/cmd/signer/artifactkey.go b/client/cmd/signer/artifactkey.go index 5e656650b..ee12326db 100644 --- a/client/cmd/signer/artifactkey.go +++ b/client/cmd/signer/artifactkey.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" - "github.com/netbirdio/netbird/client/internal/updatemanager/reposign" + "github.com/netbirdio/netbird/client/internal/updater/reposign" ) var ( diff --git a/client/cmd/signer/artifactsign.go b/client/cmd/signer/artifactsign.go index 881be9367..7c02323dc 100644 --- a/client/cmd/signer/artifactsign.go +++ b/client/cmd/signer/artifactsign.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" - "github.com/netbirdio/netbird/client/internal/updatemanager/reposign" + "github.com/netbirdio/netbird/client/internal/updater/reposign" ) const ( diff --git a/client/cmd/signer/revocation.go b/client/cmd/signer/revocation.go index 1d84b65c3..5ff636dcb 100644 --- a/client/cmd/signer/revocation.go +++ b/client/cmd/signer/revocation.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" - "github.com/netbirdio/netbird/client/internal/updatemanager/reposign" + "github.com/netbirdio/netbird/client/internal/updater/reposign" ) const ( diff --git a/client/cmd/signer/rootkey.go b/client/cmd/signer/rootkey.go index 78ac36b41..eae0da84d 100644 --- a/client/cmd/signer/rootkey.go +++ b/client/cmd/signer/rootkey.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" - "github.com/netbirdio/netbird/client/internal/updatemanager/reposign" + "github.com/netbirdio/netbird/client/internal/updater/reposign" ) var ( diff --git a/client/cmd/up.go b/client/cmd/up.go index 9559287d5..f5766522a 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -197,7 +197,7 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command, activeProf *pr r := peer.NewRecorder(config.ManagementURL.String()) r.GetFullStatus() - connectClient := internal.NewConnectClient(ctx, config, r, false) + connectClient := internal.NewConnectClient(ctx, config, r) SetupDebugHandler(ctx, config, r, connectClient, "") return connectClient.Run(nil, util.FindFirstLogPath(logFiles)) diff --git a/client/cmd/update_supported.go b/client/cmd/update_supported.go index 977875093..0b197f4c5 100644 --- a/client/cmd/update_supported.go +++ b/client/cmd/update_supported.go @@ -11,7 +11,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/netbirdio/netbird/client/internal/updatemanager/installer" + "github.com/netbirdio/netbird/client/internal/updater/installer" "github.com/netbirdio/netbird/util" ) diff --git a/client/embed/embed.go b/client/embed/embed.go index 4fbe0eada..21043cf96 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -202,7 +202,7 @@ func (c *Client) Start(startCtx context.Context) error { if err, _ := authClient.Login(ctx, c.setupKey, c.jwtToken); err != nil { return fmt.Errorf("login: %w", err) } - client := internal.NewConnectClient(ctx, c.config, c.recorder, false) + client := internal.NewConnectClient(ctx, c.config, c.recorder) client.SetSyncResponsePersistence(true) // either startup error (permanent backoff err) or nil err (successful engine up) diff --git a/client/internal/connect.go b/client/internal/connect.go index 68a0cb8da..ccd7b6c33 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -27,8 +27,8 @@ import ( "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/statemanager" "github.com/netbirdio/netbird/client/internal/stdnet" - "github.com/netbirdio/netbird/client/internal/updatemanager" - "github.com/netbirdio/netbird/client/internal/updatemanager/installer" + "github.com/netbirdio/netbird/client/internal/updater" + "github.com/netbirdio/netbird/client/internal/updater/installer" nbnet "github.com/netbirdio/netbird/client/net" cProto "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/client/ssh" @@ -44,13 +44,13 @@ import ( ) type ConnectClient struct { - ctx context.Context - config *profilemanager.Config - statusRecorder *peer.Status - doInitialAutoUpdate bool + ctx context.Context + config *profilemanager.Config + statusRecorder *peer.Status - engine *Engine - engineMutex sync.Mutex + engine *Engine + engineMutex sync.Mutex + updateManager *updater.Manager persistSyncResponse bool } @@ -59,17 +59,19 @@ func NewConnectClient( ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, - doInitalAutoUpdate bool, ) *ConnectClient { return &ConnectClient{ - ctx: ctx, - config: config, - statusRecorder: statusRecorder, - doInitialAutoUpdate: doInitalAutoUpdate, - engineMutex: sync.Mutex{}, + ctx: ctx, + config: config, + statusRecorder: statusRecorder, + engineMutex: sync.Mutex{}, } } +func (c *ConnectClient) SetUpdateManager(um *updater.Manager) { + c.updateManager = um +} + // Run with main logic. func (c *ConnectClient) Run(runningChan chan struct{}, logPath string) error { return c.run(MobileDependency{}, runningChan, logPath) @@ -187,14 +189,13 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan stateManager := statemanager.New(path) stateManager.RegisterState(&sshconfig.ShutdownState{}) - updateManager, err := updatemanager.NewManager(c.statusRecorder, stateManager) - if err == nil { - updateManager.CheckUpdateSuccess(c.ctx) + if c.updateManager != nil { + c.updateManager.CheckUpdateSuccess(c.ctx) + } - inst := installer.New() - if err := inst.CleanUpInstallerFiles(); err != nil { - log.Errorf("failed to clean up temporary installer file: %v", err) - } + inst := installer.New() + if err := inst.CleanUpInstallerFiles(); err != nil { + log.Errorf("failed to clean up temporary installer file: %v", err) } defer c.statusRecorder.ClientStop() @@ -308,7 +309,15 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan checks := loginResp.GetChecks() c.engineMutex.Lock() - engine := NewEngine(engineCtx, cancel, signalClient, mgmClient, relayManager, engineConfig, mobileDependency, c.statusRecorder, checks, stateManager) + engine := NewEngine(engineCtx, cancel, engineConfig, EngineServices{ + SignalClient: signalClient, + MgmClient: mgmClient, + RelayManager: relayManager, + StatusRecorder: c.statusRecorder, + Checks: checks, + StateManager: stateManager, + UpdateManager: c.updateManager, + }, mobileDependency) engine.SetSyncResponsePersistence(c.persistSyncResponse) c.engine = engine c.engineMutex.Unlock() @@ -318,15 +327,6 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan return wrapErr(err) } - if loginResp.PeerConfig != nil && loginResp.PeerConfig.AutoUpdate != nil { - // AutoUpdate will be true when the user click on "Connect" menu on the UI - if c.doInitialAutoUpdate { - log.Infof("start engine by ui, run auto-update check") - c.engine.InitialUpdateHandling(loginResp.PeerConfig.AutoUpdate) - c.doInitialAutoUpdate = false - } - } - log.Infof("Netbird engine started, the IP is: %s", peerConfig.GetAddress()) state.Set(StatusConnected) diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 0f8243e7a..f0f399bef 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -27,7 +27,7 @@ import ( "github.com/netbirdio/netbird/client/anonymize" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" - "github.com/netbirdio/netbird/client/internal/updatemanager/installer" + "github.com/netbirdio/netbird/client/internal/updater/installer" nbstatus "github.com/netbirdio/netbird/client/status" mgmProto "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/util" diff --git a/client/internal/engine.go b/client/internal/engine.go index 858202155..fd3bdf7af 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -51,7 +51,7 @@ import ( "github.com/netbirdio/netbird/client/internal/routemanager" "github.com/netbirdio/netbird/client/internal/routemanager/systemops" "github.com/netbirdio/netbird/client/internal/statemanager" - "github.com/netbirdio/netbird/client/internal/updatemanager" + "github.com/netbirdio/netbird/client/internal/updater" "github.com/netbirdio/netbird/client/jobexec" cProto "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/client/system" @@ -79,7 +79,6 @@ const ( var ErrResetConnection = fmt.Errorf("reset connection") -// EngineConfig is a config for the Engine type EngineConfig struct { WgPort int WgIfaceName string @@ -141,6 +140,17 @@ type EngineConfig struct { LogPath string } +// EngineServices holds the external service dependencies required by the Engine. +type EngineServices struct { + SignalClient signal.Client + MgmClient mgm.Client + RelayManager *relayClient.Manager + StatusRecorder *peer.Status + Checks []*mgmProto.Checks + StateManager *statemanager.Manager + UpdateManager *updater.Manager +} + // Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers. type Engine struct { // signal is a Signal Service client @@ -209,7 +219,7 @@ type Engine struct { flowManager nftypes.FlowManager // auto-update - updateManager *updatemanager.Manager + updateManager *updater.Manager // WireGuard interface monitor wgIfaceMonitor *WGIfaceMonitor @@ -239,22 +249,17 @@ type localIpUpdater interface { func NewEngine( clientCtx context.Context, clientCancel context.CancelFunc, - signalClient signal.Client, - mgmClient mgm.Client, - relayManager *relayClient.Manager, config *EngineConfig, + services EngineServices, mobileDep MobileDependency, - statusRecorder *peer.Status, - checks []*mgmProto.Checks, - stateManager *statemanager.Manager, ) *Engine { engine := &Engine{ clientCtx: clientCtx, clientCancel: clientCancel, - signal: signalClient, - signaler: peer.NewSignaler(signalClient, config.WgPrivateKey), - mgmClient: mgmClient, - relayManager: relayManager, + signal: services.SignalClient, + signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey), + mgmClient: services.MgmClient, + relayManager: services.RelayManager, peerStore: peerstore.NewConnStore(), syncMsgMux: &sync.Mutex{}, config: config, @@ -262,11 +267,12 @@ func NewEngine( STUNs: []*stun.URI{}, TURNs: []*stun.URI{}, networkSerial: 0, - statusRecorder: statusRecorder, - stateManager: stateManager, - checks: checks, + statusRecorder: services.StatusRecorder, + stateManager: services.StateManager, + checks: services.Checks, probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), jobExecutor: jobexec.NewExecutor(), + updateManager: services.UpdateManager, } log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String()) @@ -309,7 +315,7 @@ func (e *Engine) Stop() error { } if e.updateManager != nil { - e.updateManager.Stop() + e.updateManager.SetDownloadOnly() } log.Info("cleaning up status recorder states") @@ -559,13 +565,6 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) return nil } -func (e *Engine) InitialUpdateHandling(autoUpdateSettings *mgmProto.AutoUpdateSettings) { - e.syncMsgMux.Lock() - defer e.syncMsgMux.Unlock() - - e.handleAutoUpdateVersion(autoUpdateSettings, true) -} - func (e *Engine) createFirewall() error { if e.config.DisableFirewall { log.Infof("firewall is disabled") @@ -793,39 +792,22 @@ func (e *Engine) PopulateNetbirdConfig(netbirdConfig *mgmProto.NetbirdConfig, mg return nil } -func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdateSettings, initialCheck bool) { +func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdateSettings) { + if e.updateManager == nil { + return + } + if autoUpdateSettings == nil { return } - disabled := autoUpdateSettings.Version == disableAutoUpdate - - // stop and cleanup if disabled - if e.updateManager != nil && disabled { - log.Infof("auto-update is disabled, stopping update manager") - e.updateManager.Stop() - e.updateManager = nil + if autoUpdateSettings.Version == disableAutoUpdate { + log.Infof("auto-update is disabled") + e.updateManager.SetDownloadOnly() return } - // Skip check unless AlwaysUpdate is enabled or this is the initial check at startup - if !autoUpdateSettings.AlwaysUpdate && !initialCheck { - log.Debugf("skipping auto-update check, AlwaysUpdate is false and this is not the initial check") - return - } - - // Start manager if needed - if e.updateManager == nil { - log.Infof("starting auto-update manager") - updateManager, err := updatemanager.NewManager(e.statusRecorder, e.stateManager) - if err != nil { - return - } - e.updateManager = updateManager - e.updateManager.Start(e.ctx) - } - log.Infof("handling auto-update version: %s", autoUpdateSettings.Version) - e.updateManager.SetVersion(autoUpdateSettings.Version) + e.updateManager.SetVersion(autoUpdateSettings.Version, autoUpdateSettings.AlwaysUpdate) } func (e *Engine) handleSync(update *mgmProto.SyncResponse) error { @@ -842,7 +824,7 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error { } if update.NetworkMap != nil && update.NetworkMap.PeerConfig != nil { - e.handleAutoUpdateVersion(update.NetworkMap.PeerConfig.AutoUpdate, false) + e.handleAutoUpdateVersion(update.NetworkMap.PeerConfig.AutoUpdate) } if update.GetNetbirdConfig() != nil { diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index 012c8ad6e..f9e7f8fa0 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -251,9 +251,6 @@ func TestEngine_SSH(t *testing.T) { relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) engine := NewEngine( ctx, cancel, - &signal.MockClient{}, - &mgmt.MockClient{}, - relayMgr, &EngineConfig{ WgIfaceName: "utun101", WgAddr: "100.64.0.1/24", @@ -263,10 +260,13 @@ func TestEngine_SSH(t *testing.T) { MTU: iface.DefaultMTU, SSHKey: sshKey, }, + EngineServices{ + SignalClient: &signal.MockClient{}, + MgmClient: &mgmt.MockClient{}, + RelayManager: relayMgr, + StatusRecorder: peer.NewRecorder("https://mgm"), + }, MobileDependency{}, - peer.NewRecorder("https://mgm"), - nil, - nil, ) engine.dnsServer = &dns.MockServer{ @@ -428,13 +428,18 @@ func TestEngine_UpdateNetworkMap(t *testing.T) { defer cancel() relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) - engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{ + engine := NewEngine(ctx, cancel, &EngineConfig{ WgIfaceName: "utun102", WgAddr: "100.64.0.1/24", WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil) + }, EngineServices{ + SignalClient: &signal.MockClient{}, + MgmClient: &mgmt.MockClient{}, + RelayManager: relayMgr, + StatusRecorder: peer.NewRecorder("https://mgm"), + }, MobileDependency{}) wgIface := &MockWGIface{ NameFunc: func() string { return "utun102" }, @@ -647,13 +652,18 @@ func TestEngine_Sync(t *testing.T) { return nil } relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) - engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{SyncFunc: syncFunc}, relayMgr, &EngineConfig{ + engine := NewEngine(ctx, cancel, &EngineConfig{ WgIfaceName: "utun103", WgAddr: "100.64.0.1/24", WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil) + }, EngineServices{ + SignalClient: &signal.MockClient{}, + MgmClient: &mgmt.MockClient{SyncFunc: syncFunc}, + RelayManager: relayMgr, + StatusRecorder: peer.NewRecorder("https://mgm"), + }, MobileDependency{}) engine.ctx = ctx engine.dnsServer = &dns.MockServer{ @@ -812,13 +822,18 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) { wgAddr := fmt.Sprintf("100.66.%d.1/24", n) relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) - engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{ + engine := NewEngine(ctx, cancel, &EngineConfig{ WgIfaceName: wgIfaceName, WgAddr: wgAddr, WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil) + }, EngineServices{ + SignalClient: &signal.MockClient{}, + MgmClient: &mgmt.MockClient{}, + RelayManager: relayMgr, + StatusRecorder: peer.NewRecorder("https://mgm"), + }, MobileDependency{}) engine.ctx = ctx newNet, err := stdnet.NewNet(context.Background(), nil) if err != nil { @@ -1014,13 +1029,18 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) { wgAddr := fmt.Sprintf("100.66.%d.1/24", n) relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) - engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{ + engine := NewEngine(ctx, cancel, &EngineConfig{ WgIfaceName: wgIfaceName, WgAddr: wgAddr, WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil) + }, EngineServices{ + SignalClient: &signal.MockClient{}, + MgmClient: &mgmt.MockClient{}, + RelayManager: relayMgr, + StatusRecorder: peer.NewRecorder("https://mgm"), + }, MobileDependency{}) engine.ctx = ctx newNet, err := stdnet.NewNet(context.Background(), nil) @@ -1546,7 +1566,12 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin } relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) - e, err := NewEngine(ctx, cancel, signalClient, mgmtClient, relayMgr, conf, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil), nil + e, err := NewEngine(ctx, cancel, conf, EngineServices{ + SignalClient: signalClient, + MgmClient: mgmtClient, + RelayManager: relayMgr, + StatusRecorder: peer.NewRecorder("https://mgm"), + }, MobileDependency{}), nil e.ctx = ctx return e, err } diff --git a/client/internal/updatemanager/manager_test.go b/client/internal/updatemanager/manager_test.go deleted file mode 100644 index 20ddec10d..000000000 --- a/client/internal/updatemanager/manager_test.go +++ /dev/null @@ -1,214 +0,0 @@ -//go:build windows || darwin - -package updatemanager - -import ( - "context" - "fmt" - "path" - "testing" - "time" - - v "github.com/hashicorp/go-version" - - "github.com/netbirdio/netbird/client/internal/peer" - "github.com/netbirdio/netbird/client/internal/statemanager" -) - -type versionUpdateMock struct { - latestVersion *v.Version - onUpdate func() -} - -func (v versionUpdateMock) StopWatch() {} - -func (v versionUpdateMock) SetDaemonVersion(newVersion string) bool { - return false -} - -func (v *versionUpdateMock) SetOnUpdateListener(updateFn func()) { - v.onUpdate = updateFn -} - -func (v versionUpdateMock) LatestVersion() *v.Version { - return v.latestVersion -} - -func (v versionUpdateMock) StartFetcher() {} - -func Test_LatestVersion(t *testing.T) { - testMatrix := []struct { - name string - daemonVersion string - initialLatestVersion *v.Version - latestVersion *v.Version - shouldUpdateInit bool - shouldUpdateLater bool - }{ - { - name: "Should only trigger update once due to time between triggers being < 5 Minutes", - daemonVersion: "1.0.0", - initialLatestVersion: v.Must(v.NewSemver("1.0.1")), - latestVersion: v.Must(v.NewSemver("1.0.2")), - shouldUpdateInit: true, - shouldUpdateLater: false, - }, - { - name: "Shouldn't update initially, but should update as soon as latest version is fetched", - daemonVersion: "1.0.0", - initialLatestVersion: nil, - latestVersion: v.Must(v.NewSemver("1.0.1")), - shouldUpdateInit: false, - shouldUpdateLater: true, - }, - } - - for idx, c := range testMatrix { - mockUpdate := &versionUpdateMock{latestVersion: c.initialLatestVersion} - tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx)) - m, _ := newManager(peer.NewRecorder(""), statemanager.New(tmpFile)) - m.update = mockUpdate - - targetVersionChan := make(chan string, 1) - - m.triggerUpdateFn = func(ctx context.Context, targetVersion string) error { - targetVersionChan <- targetVersion - return nil - } - m.currentVersion = c.daemonVersion - m.Start(context.Background()) - m.SetVersion("latest") - var triggeredInit bool - select { - case targetVersion := <-targetVersionChan: - if targetVersion != c.initialLatestVersion.String() { - t.Errorf("%s: Initial update version mismatch, expected %v, got %v", c.name, c.initialLatestVersion.String(), targetVersion) - } - triggeredInit = true - case <-time.After(10 * time.Millisecond): - triggeredInit = false - } - if triggeredInit != c.shouldUpdateInit { - t.Errorf("%s: Initial update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateInit, triggeredInit) - } - - mockUpdate.latestVersion = c.latestVersion - mockUpdate.onUpdate() - - var triggeredLater bool - select { - case targetVersion := <-targetVersionChan: - if targetVersion != c.latestVersion.String() { - t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), targetVersion) - } - triggeredLater = true - case <-time.After(10 * time.Millisecond): - triggeredLater = false - } - if triggeredLater != c.shouldUpdateLater { - t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateLater, triggeredLater) - } - - m.Stop() - } -} - -func Test_HandleUpdate(t *testing.T) { - testMatrix := []struct { - name string - daemonVersion string - latestVersion *v.Version - expectedVersion string - shouldUpdate bool - }{ - { - name: "Update to a specific version should update regardless of if latestVersion is available yet", - daemonVersion: "0.55.0", - latestVersion: nil, - expectedVersion: "0.56.0", - shouldUpdate: true, - }, - { - name: "Update to specific version should not update if version matches", - daemonVersion: "0.55.0", - latestVersion: nil, - expectedVersion: "0.55.0", - shouldUpdate: false, - }, - { - name: "Update to specific version should not update if current version is newer", - daemonVersion: "0.55.0", - latestVersion: nil, - expectedVersion: "0.54.0", - shouldUpdate: false, - }, - { - name: "Update to latest version should update if latest is newer", - daemonVersion: "0.55.0", - latestVersion: v.Must(v.NewSemver("0.56.0")), - expectedVersion: "latest", - shouldUpdate: true, - }, - { - name: "Update to latest version should not update if latest == current", - daemonVersion: "0.56.0", - latestVersion: v.Must(v.NewSemver("0.56.0")), - expectedVersion: "latest", - shouldUpdate: false, - }, - { - name: "Should not update if daemon version is invalid", - daemonVersion: "development", - latestVersion: v.Must(v.NewSemver("1.0.0")), - expectedVersion: "latest", - shouldUpdate: false, - }, - { - name: "Should not update if expecting latest and latest version is unavailable", - daemonVersion: "0.55.0", - latestVersion: nil, - expectedVersion: "latest", - shouldUpdate: false, - }, - { - name: "Should not update if expected version is invalid", - daemonVersion: "0.55.0", - latestVersion: nil, - expectedVersion: "development", - shouldUpdate: false, - }, - } - for idx, c := range testMatrix { - tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx)) - m, _ := newManager(peer.NewRecorder(""), statemanager.New(tmpFile)) - m.update = &versionUpdateMock{latestVersion: c.latestVersion} - targetVersionChan := make(chan string, 1) - - m.triggerUpdateFn = func(ctx context.Context, targetVersion string) error { - targetVersionChan <- targetVersion - return nil - } - - m.currentVersion = c.daemonVersion - m.Start(context.Background()) - m.SetVersion(c.expectedVersion) - - var updateTriggered bool - select { - case targetVersion := <-targetVersionChan: - if c.expectedVersion == "latest" && targetVersion != c.latestVersion.String() { - t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), targetVersion) - } else if c.expectedVersion != "latest" && targetVersion != c.expectedVersion { - t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.expectedVersion, targetVersion) - } - updateTriggered = true - case <-time.After(10 * time.Millisecond): - updateTriggered = false - } - - if updateTriggered != c.shouldUpdate { - t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdate, updateTriggered) - } - m.Stop() - } -} diff --git a/client/internal/updatemanager/manager_unsupported.go b/client/internal/updatemanager/manager_unsupported.go deleted file mode 100644 index 4e87c2d77..000000000 --- a/client/internal/updatemanager/manager_unsupported.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !windows && !darwin - -package updatemanager - -import ( - "context" - "fmt" - - "github.com/netbirdio/netbird/client/internal/peer" - "github.com/netbirdio/netbird/client/internal/statemanager" -) - -// Manager is a no-op stub for unsupported platforms -type Manager struct{} - -// NewManager returns a no-op manager for unsupported platforms -func NewManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) (*Manager, error) { - return nil, fmt.Errorf("update manager is not supported on this platform") -} - -// CheckUpdateSuccess is a no-op on unsupported platforms -func (m *Manager) CheckUpdateSuccess(ctx context.Context) { - // no-op -} - -// Start is a no-op on unsupported platforms -func (m *Manager) Start(ctx context.Context) { - // no-op -} - -// SetVersion is a no-op on unsupported platforms -func (m *Manager) SetVersion(expectedVersion string) { - // no-op -} - -// Stop is a no-op on unsupported platforms -func (m *Manager) Stop() { - // no-op -} diff --git a/client/internal/updatemanager/doc.go b/client/internal/updater/doc.go similarity index 93% rename from client/internal/updatemanager/doc.go rename to client/internal/updater/doc.go index 54d1bdeab..e1924aa43 100644 --- a/client/internal/updatemanager/doc.go +++ b/client/internal/updater/doc.go @@ -1,4 +1,4 @@ -// Package updatemanager provides automatic update management for the NetBird client. +// Package updater provides automatic update management for the NetBird client. // It monitors for new versions, handles update triggers from management server directives, // and orchestrates the download and installation of client updates. // @@ -32,4 +32,4 @@ // // This enables verification of successful updates and appropriate user notification // after the client restarts with the new version. -package updatemanager +package updater diff --git a/client/internal/updatemanager/downloader/downloader.go b/client/internal/updater/downloader/downloader.go similarity index 100% rename from client/internal/updatemanager/downloader/downloader.go rename to client/internal/updater/downloader/downloader.go diff --git a/client/internal/updatemanager/downloader/downloader_test.go b/client/internal/updater/downloader/downloader_test.go similarity index 100% rename from client/internal/updatemanager/downloader/downloader_test.go rename to client/internal/updater/downloader/downloader_test.go diff --git a/client/internal/updatemanager/installer/binary_nowindows.go b/client/internal/updater/installer/binary_nowindows.go similarity index 100% rename from client/internal/updatemanager/installer/binary_nowindows.go rename to client/internal/updater/installer/binary_nowindows.go diff --git a/client/internal/updatemanager/installer/binary_windows.go b/client/internal/updater/installer/binary_windows.go similarity index 100% rename from client/internal/updatemanager/installer/binary_windows.go rename to client/internal/updater/installer/binary_windows.go diff --git a/client/internal/updatemanager/installer/doc.go b/client/internal/updater/installer/doc.go similarity index 100% rename from client/internal/updatemanager/installer/doc.go rename to client/internal/updater/installer/doc.go diff --git a/client/internal/updatemanager/installer/installer.go b/client/internal/updater/installer/installer.go similarity index 100% rename from client/internal/updatemanager/installer/installer.go rename to client/internal/updater/installer/installer.go diff --git a/client/internal/updatemanager/installer/installer_common.go b/client/internal/updater/installer/installer_common.go similarity index 97% rename from client/internal/updatemanager/installer/installer_common.go rename to client/internal/updater/installer/installer_common.go index 03378d55f..8e44bee82 100644 --- a/client/internal/updatemanager/installer/installer_common.go +++ b/client/internal/updater/installer/installer_common.go @@ -16,8 +16,8 @@ import ( goversion "github.com/hashicorp/go-version" log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/client/internal/updatemanager/downloader" - "github.com/netbirdio/netbird/client/internal/updatemanager/reposign" + "github.com/netbirdio/netbird/client/internal/updater/downloader" + "github.com/netbirdio/netbird/client/internal/updater/reposign" ) type Installer struct { diff --git a/client/internal/updatemanager/installer/installer_log_darwin.go b/client/internal/updater/installer/installer_log_darwin.go similarity index 100% rename from client/internal/updatemanager/installer/installer_log_darwin.go rename to client/internal/updater/installer/installer_log_darwin.go diff --git a/client/internal/updatemanager/installer/installer_log_windows.go b/client/internal/updater/installer/installer_log_windows.go similarity index 100% rename from client/internal/updatemanager/installer/installer_log_windows.go rename to client/internal/updater/installer/installer_log_windows.go diff --git a/client/internal/updatemanager/installer/installer_run_darwin.go b/client/internal/updater/installer/installer_run_darwin.go similarity index 100% rename from client/internal/updatemanager/installer/installer_run_darwin.go rename to client/internal/updater/installer/installer_run_darwin.go diff --git a/client/internal/updatemanager/installer/installer_run_windows.go b/client/internal/updater/installer/installer_run_windows.go similarity index 100% rename from client/internal/updatemanager/installer/installer_run_windows.go rename to client/internal/updater/installer/installer_run_windows.go diff --git a/client/internal/updatemanager/installer/log.go b/client/internal/updater/installer/log.go similarity index 100% rename from client/internal/updatemanager/installer/log.go rename to client/internal/updater/installer/log.go diff --git a/client/internal/updatemanager/installer/procattr_darwin.go b/client/internal/updater/installer/procattr_darwin.go similarity index 100% rename from client/internal/updatemanager/installer/procattr_darwin.go rename to client/internal/updater/installer/procattr_darwin.go diff --git a/client/internal/updatemanager/installer/procattr_windows.go b/client/internal/updater/installer/procattr_windows.go similarity index 100% rename from client/internal/updatemanager/installer/procattr_windows.go rename to client/internal/updater/installer/procattr_windows.go diff --git a/client/internal/updatemanager/installer/repourl_dev.go b/client/internal/updater/installer/repourl_dev.go similarity index 100% rename from client/internal/updatemanager/installer/repourl_dev.go rename to client/internal/updater/installer/repourl_dev.go diff --git a/client/internal/updatemanager/installer/repourl_prod.go b/client/internal/updater/installer/repourl_prod.go similarity index 100% rename from client/internal/updatemanager/installer/repourl_prod.go rename to client/internal/updater/installer/repourl_prod.go diff --git a/client/internal/updatemanager/installer/result.go b/client/internal/updater/installer/result.go similarity index 98% rename from client/internal/updatemanager/installer/result.go rename to client/internal/updater/installer/result.go index 03d08d527..526c3eb53 100644 --- a/client/internal/updatemanager/installer/result.go +++ b/client/internal/updater/installer/result.go @@ -203,7 +203,10 @@ func (rh *ResultHandler) write(result Result) error { func (rh *ResultHandler) cleanup() error { err := os.Remove(rh.resultFile) - if err != nil && !os.IsNotExist(err) { + if err != nil { + if os.IsNotExist(err) { + return nil + } return err } log.Debugf("delete installer result file: %s", rh.resultFile) diff --git a/client/internal/updatemanager/installer/types.go b/client/internal/updater/installer/types.go similarity index 100% rename from client/internal/updatemanager/installer/types.go rename to client/internal/updater/installer/types.go diff --git a/client/internal/updatemanager/installer/types_darwin.go b/client/internal/updater/installer/types_darwin.go similarity index 100% rename from client/internal/updatemanager/installer/types_darwin.go rename to client/internal/updater/installer/types_darwin.go diff --git a/client/internal/updatemanager/installer/types_windows.go b/client/internal/updater/installer/types_windows.go similarity index 100% rename from client/internal/updatemanager/installer/types_windows.go rename to client/internal/updater/installer/types_windows.go diff --git a/client/internal/updatemanager/manager.go b/client/internal/updater/manager.go similarity index 52% rename from client/internal/updatemanager/manager.go rename to client/internal/updater/manager.go index eae11de56..dfcb93177 100644 --- a/client/internal/updatemanager/manager.go +++ b/client/internal/updater/manager.go @@ -1,12 +1,9 @@ -//go:build windows || darwin - -package updatemanager +package updater import ( "context" "errors" "fmt" - "runtime" "sync" "time" @@ -15,7 +12,7 @@ import ( "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/statemanager" - "github.com/netbirdio/netbird/client/internal/updatemanager/installer" + "github.com/netbirdio/netbird/client/internal/updater/installer" cProto "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/version" ) @@ -41,6 +38,9 @@ type Manager struct { statusRecorder *peer.Status stateManager *statemanager.Manager + downloadOnly bool // true when no enforcement from management; notifies UI to download latest + forceUpdate bool // true when management sets AlwaysUpdate; skips UI interaction and installs directly + lastTrigger time.Time mgmUpdateChan chan struct{} updateChannel chan struct{} @@ -53,37 +53,38 @@ type Manager struct { expectedVersion *v.Version updateToLatestVersion bool - // updateMutex protect update and expectedVersion fields + pendingVersion *v.Version + + // updateMutex protects update, expectedVersion, updateToLatestVersion, + // downloadOnly, forceUpdate, pendingVersion, and lastTrigger fields updateMutex sync.Mutex - triggerUpdateFn func(context.Context, string) error + // installMutex and installing guard against concurrent installation attempts + installMutex sync.Mutex + installing bool + + // protect to start the service multiple times + mu sync.Mutex + + autoUpdateSupported func() bool } -func NewManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) (*Manager, error) { - if runtime.GOOS == "darwin" { - isBrew := !installer.TypeOfInstaller(context.Background()).Downloadable() - if isBrew { - log.Warnf("auto-update disabled on Home Brew installation") - return nil, fmt.Errorf("auto-update not supported on Home Brew installation yet") - } - } - return newManager(statusRecorder, stateManager) -} - -func newManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) (*Manager, error) { +// NewManager creates a new update manager. The manager is single-use: once Stop() is called, it cannot be restarted. +func NewManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) *Manager { manager := &Manager{ - statusRecorder: statusRecorder, - stateManager: stateManager, - mgmUpdateChan: make(chan struct{}, 1), - updateChannel: make(chan struct{}, 1), - currentVersion: version.NetbirdVersion(), - update: version.NewUpdate("nb/client"), + statusRecorder: statusRecorder, + stateManager: stateManager, + mgmUpdateChan: make(chan struct{}, 1), + updateChannel: make(chan struct{}, 1), + currentVersion: version.NetbirdVersion(), + update: version.NewUpdate("nb/client"), + downloadOnly: true, + autoUpdateSupported: isAutoUpdateSupported, } - manager.triggerUpdateFn = manager.triggerUpdate stateManager.RegisterState(&UpdateState{}) - return manager, nil + return manager } // CheckUpdateSuccess checks if the update was successful and send a notification. @@ -124,8 +125,10 @@ func (m *Manager) CheckUpdateSuccess(ctx context.Context) { } func (m *Manager) Start(ctx context.Context) { + log.Infof("starting update manager") + m.mu.Lock() + defer m.mu.Unlock() if m.cancel != nil { - log.Errorf("Manager already started") return } @@ -142,13 +145,32 @@ func (m *Manager) Start(ctx context.Context) { m.cancel = cancel m.wg.Add(1) - go m.updateLoop(ctx) + go func() { + defer m.wg.Done() + m.updateLoop(ctx) + }() } -func (m *Manager) SetVersion(expectedVersion string) { - log.Infof("set expected agent version for upgrade: %s", expectedVersion) - if m.cancel == nil { - log.Errorf("manager not started") +func (m *Manager) SetDownloadOnly() { + m.updateMutex.Lock() + m.downloadOnly = true + m.forceUpdate = false + m.expectedVersion = nil + m.updateToLatestVersion = false + m.lastTrigger = time.Time{} + m.updateMutex.Unlock() + + select { + case m.mgmUpdateChan <- struct{}{}: + default: + } +} + +func (m *Manager) SetVersion(expectedVersion string, forceUpdate bool) { + log.Infof("expected version changed to %s, force update: %t", expectedVersion, forceUpdate) + + if !m.autoUpdateSupported() { + log.Warnf("auto-update not supported on this platform") return } @@ -159,6 +181,7 @@ func (m *Manager) SetVersion(expectedVersion string) { log.Errorf("empty expected version provided") m.expectedVersion = nil m.updateToLatestVersion = false + m.downloadOnly = true return } @@ -178,12 +201,97 @@ func (m *Manager) SetVersion(expectedVersion string) { m.updateToLatestVersion = false } + m.lastTrigger = time.Time{} + m.downloadOnly = false + m.forceUpdate = forceUpdate + select { case m.mgmUpdateChan <- struct{}{}: default: } } +// Install triggers the installation of the pending version. It is called when the user clicks the install button in the UI. +func (m *Manager) Install(ctx context.Context) error { + if !m.autoUpdateSupported() { + return fmt.Errorf("auto-update not supported on this platform") + } + + m.updateMutex.Lock() + pending := m.pendingVersion + m.updateMutex.Unlock() + + if pending == nil { + return fmt.Errorf("no pending version to install") + } + + return m.tryInstall(ctx, pending) +} + +// tryInstall ensures only one installation runs at a time. Concurrent callers +// receive an error immediately rather than queuing behind a running install. +func (m *Manager) tryInstall(ctx context.Context, targetVersion *v.Version) error { + m.installMutex.Lock() + if m.installing { + m.installMutex.Unlock() + return fmt.Errorf("installation already in progress") + } + m.installing = true + m.installMutex.Unlock() + + defer func() { + m.installMutex.Lock() + m.installing = false + m.installMutex.Unlock() + }() + + return m.install(ctx, targetVersion) +} + +// NotifyUI re-publishes the current update state to a newly connected UI client. +// Only needed for download-only mode where the latest version is already cached +// NotifyUI re-publishes the current update state so a newly connected UI gets the info. +func (m *Manager) NotifyUI() { + m.updateMutex.Lock() + if m.update == nil { + m.updateMutex.Unlock() + return + } + downloadOnly := m.downloadOnly + pendingVersion := m.pendingVersion + latestVersion := m.update.LatestVersion() + m.updateMutex.Unlock() + + if downloadOnly { + if latestVersion == nil { + return + } + currentVersion, err := v.NewVersion(m.currentVersion) + if err != nil || currentVersion.GreaterThanOrEqual(latestVersion) { + return + } + m.statusRecorder.PublishEvent( + cProto.SystemEvent_INFO, + cProto.SystemEvent_SYSTEM, + "New version available", + "", + map[string]string{"new_version_available": latestVersion.String()}, + ) + return + } + + if pendingVersion != nil { + m.statusRecorder.PublishEvent( + cProto.SystemEvent_INFO, + cProto.SystemEvent_SYSTEM, + "New version available", + "", + map[string]string{"new_version_available": pendingVersion.String(), "enforced": "true"}, + ) + } +} + +// Stop is not used at the moment because it fully depends on the daemon. In a future refactor it may make sense to use it. func (m *Manager) Stop() { if m.cancel == nil { return @@ -214,8 +322,6 @@ func (m *Manager) onContextCancel() { } func (m *Manager) updateLoop(ctx context.Context) { - defer m.wg.Done() - for { select { case <-ctx.Done(): @@ -239,55 +345,89 @@ func (m *Manager) handleUpdate(ctx context.Context) { return } - expectedVersion := m.expectedVersion - useLatest := m.updateToLatestVersion + downloadOnly := m.downloadOnly + forceUpdate := m.forceUpdate curLatestVersion := m.update.LatestVersion() - m.updateMutex.Unlock() switch { - // Resolve "latest" to actual version - case useLatest: + // Download-only mode or resolve "latest" to actual version + case downloadOnly, m.updateToLatestVersion: if curLatestVersion == nil { log.Tracef("latest version not fetched yet") + m.updateMutex.Unlock() return } updateVersion = curLatestVersion - // Update to specific version - case expectedVersion != nil: - updateVersion = expectedVersion + // Install to specific version + case m.expectedVersion != nil: + updateVersion = m.expectedVersion default: log.Debugf("no expected version information set") + m.updateMutex.Unlock() return } log.Debugf("checking update option, current version: %s, target version: %s", m.currentVersion, updateVersion) - if !m.shouldUpdate(updateVersion) { + if !m.shouldUpdate(updateVersion, forceUpdate) { + m.updateMutex.Unlock() return } m.lastTrigger = time.Now() - log.Infof("Auto-update triggered, current version: %s, target version: %s", m.currentVersion, updateVersion) - m.statusRecorder.PublishEvent( - cProto.SystemEvent_CRITICAL, - cProto.SystemEvent_SYSTEM, - "Automatically updating client", - "Your client version is older than auto-update version set in Management, updating client now.", - nil, - ) + log.Infof("new version available: %s", updateVersion) + + if !downloadOnly && !forceUpdate { + m.pendingVersion = updateVersion + } + m.updateMutex.Unlock() + + if downloadOnly { + m.statusRecorder.PublishEvent( + cProto.SystemEvent_INFO, + cProto.SystemEvent_SYSTEM, + "New version available", + "", + map[string]string{"new_version_available": updateVersion.String()}, + ) + return + } + + if forceUpdate { + if err := m.tryInstall(ctx, updateVersion); err != nil { + log.Errorf("force update failed: %v", err) + } + return + } + m.statusRecorder.PublishEvent( + cProto.SystemEvent_INFO, + cProto.SystemEvent_SYSTEM, + "New version available", + "", + map[string]string{"new_version_available": updateVersion.String(), "enforced": "true"}, + ) +} + +func (m *Manager) install(ctx context.Context, pendingVersion *v.Version) error { + m.statusRecorder.PublishEvent( + cProto.SystemEvent_CRITICAL, + cProto.SystemEvent_SYSTEM, + "Updating client", + "Installing update now.", + nil, + ) m.statusRecorder.PublishEvent( cProto.SystemEvent_CRITICAL, cProto.SystemEvent_SYSTEM, "", "", - map[string]string{"progress_window": "show", "version": updateVersion.String()}, + map[string]string{"progress_window": "show", "version": pendingVersion.String()}, ) updateState := UpdateState{ PreUpdateVersion: m.currentVersion, - TargetVersion: updateVersion.String(), + TargetVersion: pendingVersion.String(), } - if err := m.stateManager.UpdateState(updateState); err != nil { log.Warnf("failed to update state: %v", err) } else { @@ -296,8 +436,9 @@ func (m *Manager) handleUpdate(ctx context.Context) { } } - if err := m.triggerUpdateFn(ctx, updateVersion.String()); err != nil { - log.Errorf("Error triggering auto-update: %v", err) + inst := installer.New() + if err := inst.RunInstallation(ctx, pendingVersion.String()); err != nil { + log.Errorf("error triggering update: %v", err) m.statusRecorder.PublishEvent( cProto.SystemEvent_ERROR, cProto.SystemEvent_SYSTEM, @@ -305,7 +446,9 @@ func (m *Manager) handleUpdate(ctx context.Context) { fmt.Sprintf("Auto-update failed: %v", err), nil, ) + return err } + return nil } // loadAndDeleteUpdateState loads the update state, deletes it from storage, and returns it. @@ -339,7 +482,7 @@ func (m *Manager) loadAndDeleteUpdateState(ctx context.Context) (*UpdateState, e return updateState, nil } -func (m *Manager) shouldUpdate(updateVersion *v.Version) bool { +func (m *Manager) shouldUpdate(updateVersion *v.Version, forceUpdate bool) bool { if m.currentVersion == developmentVersion { log.Debugf("skipping auto-update, running development version") return false @@ -354,8 +497,8 @@ func (m *Manager) shouldUpdate(updateVersion *v.Version) bool { return false } - if time.Since(m.lastTrigger) < 5*time.Minute { - log.Debugf("skipping auto-update, last update was %s ago", time.Since(m.lastTrigger)) + if forceUpdate && time.Since(m.lastTrigger) < 3*time.Minute { + log.Infof("skipping auto-update, last update was %s ago", time.Since(m.lastTrigger)) return false } @@ -367,8 +510,3 @@ func (m *Manager) lastResultErrReason() string { result := installer.NewResultHandler(inst.TempDir()) return result.GetErrorResultReason() } - -func (m *Manager) triggerUpdate(ctx context.Context, targetVersion string) error { - inst := installer.New() - return inst.RunInstallation(ctx, targetVersion) -} diff --git a/client/internal/updater/manager_linux_test.go b/client/internal/updater/manager_linux_test.go new file mode 100644 index 000000000..b05dd7e7d --- /dev/null +++ b/client/internal/updater/manager_linux_test.go @@ -0,0 +1,111 @@ +//go:build !windows && !darwin + +package updater + +import ( + "context" + "fmt" + "path" + "testing" + "time" + + v "github.com/hashicorp/go-version" + + "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/client/internal/statemanager" +) + +// On Linux, only Mode 1 (downloadOnly) is supported. +// SetVersion is a no-op because auto-update installation is not supported. + +func Test_LatestVersion_Linux(t *testing.T) { + testMatrix := []struct { + name string + daemonVersion string + initialLatestVersion *v.Version + latestVersion *v.Version + shouldUpdateInit bool + shouldUpdateLater bool + }{ + { + name: "Should notify again when a newer version arrives even within 5 minutes", + daemonVersion: "1.0.0", + initialLatestVersion: v.Must(v.NewSemver("1.0.1")), + latestVersion: v.Must(v.NewSemver("1.0.2")), + shouldUpdateInit: true, + shouldUpdateLater: true, + }, + { + name: "Shouldn't notify initially, but should notify as soon as latest version is fetched", + daemonVersion: "1.0.0", + initialLatestVersion: nil, + latestVersion: v.Must(v.NewSemver("1.0.1")), + shouldUpdateInit: false, + shouldUpdateLater: true, + }, + } + + for idx, c := range testMatrix { + mockUpdate := &versionUpdateMock{latestVersion: c.initialLatestVersion} + tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx)) + recorder := peer.NewRecorder("") + sub := recorder.SubscribeToEvents() + defer recorder.UnsubscribeFromEvents(sub) + + m := NewManager(recorder, statemanager.New(tmpFile)) + m.update = mockUpdate + m.currentVersion = c.daemonVersion + m.Start(context.Background()) + m.SetDownloadOnly() + + ver, enforced := waitForUpdateEvent(sub, 500*time.Millisecond) + triggeredInit := ver != "" + if enforced { + t.Errorf("%s: Linux Mode 1 must never have enforced metadata", c.name) + } + if triggeredInit != c.shouldUpdateInit { + t.Errorf("%s: Initial notify mismatch, expected %v, got %v", c.name, c.shouldUpdateInit, triggeredInit) + } + if triggeredInit && c.initialLatestVersion != nil && ver != c.initialLatestVersion.String() { + t.Errorf("%s: Initial version mismatch, expected %v, got %v", c.name, c.initialLatestVersion.String(), ver) + } + + mockUpdate.latestVersion = c.latestVersion + mockUpdate.onUpdate() + + ver, enforced = waitForUpdateEvent(sub, 500*time.Millisecond) + triggeredLater := ver != "" + if enforced { + t.Errorf("%s: Linux Mode 1 must never have enforced metadata", c.name) + } + if triggeredLater != c.shouldUpdateLater { + t.Errorf("%s: Later notify mismatch, expected %v, got %v", c.name, c.shouldUpdateLater, triggeredLater) + } + if triggeredLater && c.latestVersion != nil && ver != c.latestVersion.String() { + t.Errorf("%s: Later version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), ver) + } + + m.Stop() + } +} + +func Test_SetVersion_NoOp_Linux(t *testing.T) { + // On Linux, SetVersion should be a no-op — no events fired + tmpFile := path.Join(t.TempDir(), "update-test-noop.json") + recorder := peer.NewRecorder("") + sub := recorder.SubscribeToEvents() + defer recorder.UnsubscribeFromEvents(sub) + + m := NewManager(recorder, statemanager.New(tmpFile)) + m.update = &versionUpdateMock{latestVersion: v.Must(v.NewSemver("1.0.1"))} + m.currentVersion = "1.0.0" + m.Start(context.Background()) + m.SetVersion("1.0.1", false) + + ver, _ := waitForUpdateEvent(sub, 500*time.Millisecond) + if ver != "" { + t.Errorf("SetVersion should be a no-op on Linux, but got event with version %s", ver) + } + + m.Stop() +} diff --git a/client/internal/updater/manager_test.go b/client/internal/updater/manager_test.go new file mode 100644 index 000000000..107dca2b3 --- /dev/null +++ b/client/internal/updater/manager_test.go @@ -0,0 +1,227 @@ +//go:build windows || darwin + +package updater + +import ( + "context" + "fmt" + "path" + "testing" + "time" + + v "github.com/hashicorp/go-version" + + "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/client/internal/statemanager" + cProto "github.com/netbirdio/netbird/client/proto" +) + +func Test_LatestVersion(t *testing.T) { + testMatrix := []struct { + name string + daemonVersion string + initialLatestVersion *v.Version + latestVersion *v.Version + shouldUpdateInit bool + shouldUpdateLater bool + }{ + { + name: "Should notify again when a newer version arrives even within 5 minutes", + daemonVersion: "1.0.0", + initialLatestVersion: v.Must(v.NewSemver("1.0.1")), + latestVersion: v.Must(v.NewSemver("1.0.2")), + shouldUpdateInit: true, + shouldUpdateLater: true, + }, + { + name: "Shouldn't update initially, but should update as soon as latest version is fetched", + daemonVersion: "1.0.0", + initialLatestVersion: nil, + latestVersion: v.Must(v.NewSemver("1.0.1")), + shouldUpdateInit: false, + shouldUpdateLater: true, + }, + } + + for idx, c := range testMatrix { + mockUpdate := &versionUpdateMock{latestVersion: c.initialLatestVersion} + tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx)) + recorder := peer.NewRecorder("") + sub := recorder.SubscribeToEvents() + defer recorder.UnsubscribeFromEvents(sub) + + m := NewManager(recorder, statemanager.New(tmpFile)) + m.update = mockUpdate + m.currentVersion = c.daemonVersion + m.autoUpdateSupported = func() bool { return true } + m.Start(context.Background()) + m.SetVersion("latest", false) + + ver, _ := waitForUpdateEvent(sub, 500*time.Millisecond) + triggeredInit := ver != "" + if triggeredInit != c.shouldUpdateInit { + t.Errorf("%s: Initial update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateInit, triggeredInit) + } + if triggeredInit && c.initialLatestVersion != nil && ver != c.initialLatestVersion.String() { + t.Errorf("%s: Initial update version mismatch, expected %v, got %v", c.name, c.initialLatestVersion.String(), ver) + } + + mockUpdate.latestVersion = c.latestVersion + mockUpdate.onUpdate() + + ver, _ = waitForUpdateEvent(sub, 500*time.Millisecond) + triggeredLater := ver != "" + if triggeredLater != c.shouldUpdateLater { + t.Errorf("%s: Later update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateLater, triggeredLater) + } + if triggeredLater && c.latestVersion != nil && ver != c.latestVersion.String() { + t.Errorf("%s: Later update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), ver) + } + + m.Stop() + } +} + +func Test_HandleUpdate(t *testing.T) { + testMatrix := []struct { + name string + daemonVersion string + latestVersion *v.Version + expectedVersion string + shouldUpdate bool + }{ + { + name: "Install to a specific version should update regardless of if latestVersion is available yet", + daemonVersion: "0.55.0", + latestVersion: nil, + expectedVersion: "0.56.0", + shouldUpdate: true, + }, + { + name: "Install to specific version should not update if version matches", + daemonVersion: "0.55.0", + latestVersion: nil, + expectedVersion: "0.55.0", + shouldUpdate: false, + }, + { + name: "Install to specific version should not update if current version is newer", + daemonVersion: "0.55.0", + latestVersion: nil, + expectedVersion: "0.54.0", + shouldUpdate: false, + }, + { + name: "Install to latest version should update if latest is newer", + daemonVersion: "0.55.0", + latestVersion: v.Must(v.NewSemver("0.56.0")), + expectedVersion: "latest", + shouldUpdate: true, + }, + { + name: "Install to latest version should not update if latest == current", + daemonVersion: "0.56.0", + latestVersion: v.Must(v.NewSemver("0.56.0")), + expectedVersion: "latest", + shouldUpdate: false, + }, + { + name: "Should not update if daemon version is invalid", + daemonVersion: "development", + latestVersion: v.Must(v.NewSemver("1.0.0")), + expectedVersion: "latest", + shouldUpdate: false, + }, + { + name: "Should not update if expecting latest and latest version is unavailable", + daemonVersion: "0.55.0", + latestVersion: nil, + expectedVersion: "latest", + shouldUpdate: false, + }, + { + name: "Should not update if expected version is invalid", + daemonVersion: "0.55.0", + latestVersion: nil, + expectedVersion: "development", + shouldUpdate: false, + }, + } + for idx, c := range testMatrix { + tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx)) + recorder := peer.NewRecorder("") + sub := recorder.SubscribeToEvents() + defer recorder.UnsubscribeFromEvents(sub) + + m := NewManager(recorder, statemanager.New(tmpFile)) + m.update = &versionUpdateMock{latestVersion: c.latestVersion} + m.currentVersion = c.daemonVersion + m.autoUpdateSupported = func() bool { return true } + m.Start(context.Background()) + m.SetVersion(c.expectedVersion, false) + + ver, _ := waitForUpdateEvent(sub, 500*time.Millisecond) + updateTriggered := ver != "" + + if updateTriggered { + if c.expectedVersion == "latest" && c.latestVersion != nil && ver != c.latestVersion.String() { + t.Errorf("%s: Version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), ver) + } else if c.expectedVersion != "latest" && c.expectedVersion != "development" && ver != c.expectedVersion { + t.Errorf("%s: Version mismatch, expected %v, got %v", c.name, c.expectedVersion, ver) + } + } + + if updateTriggered != c.shouldUpdate { + t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdate, updateTriggered) + } + m.Stop() + } +} + +func Test_EnforcedMetadata(t *testing.T) { + // Mode 1 (downloadOnly): no enforced metadata + tmpFile := path.Join(t.TempDir(), "update-test-mode1.json") + recorder := peer.NewRecorder("") + sub := recorder.SubscribeToEvents() + defer recorder.UnsubscribeFromEvents(sub) + + m := NewManager(recorder, statemanager.New(tmpFile)) + m.update = &versionUpdateMock{latestVersion: v.Must(v.NewSemver("1.0.1"))} + m.currentVersion = "1.0.0" + m.Start(context.Background()) + m.SetDownloadOnly() + + ver, enforced := waitForUpdateEvent(sub, 500*time.Millisecond) + if ver == "" { + t.Fatal("Mode 1: expected new_version_available event") + } + if enforced { + t.Error("Mode 1: expected no enforced metadata") + } + m.Stop() + + // Mode 2 (enforced, forceUpdate=false): enforced metadata present, no auto-install + tmpFile2 := path.Join(t.TempDir(), "update-test-mode2.json") + recorder2 := peer.NewRecorder("") + sub2 := recorder2.SubscribeToEvents() + defer recorder2.UnsubscribeFromEvents(sub2) + + m2 := NewManager(recorder2, statemanager.New(tmpFile2)) + m2.update = &versionUpdateMock{latestVersion: nil} + m2.currentVersion = "1.0.0" + m2.autoUpdateSupported = func() bool { return true } + m2.Start(context.Background()) + m2.SetVersion("1.0.1", false) + + ver, enforced2 := waitForUpdateEvent(sub2, 500*time.Millisecond) + if ver == "" { + t.Fatal("Mode 2: expected new_version_available event") + } + if !enforced2 { + t.Error("Mode 2: expected enforced metadata") + } + m2.Stop() +} + +// ensure the proto import is used +var _ = cProto.SystemEvent_INFO diff --git a/client/internal/updater/manager_test_helpers_test.go b/client/internal/updater/manager_test_helpers_test.go new file mode 100644 index 000000000..c7faee1f4 --- /dev/null +++ b/client/internal/updater/manager_test_helpers_test.go @@ -0,0 +1,56 @@ +package updater + +import ( + "strconv" + "time" + + v "github.com/hashicorp/go-version" + + "github.com/netbirdio/netbird/client/internal/peer" +) + +type versionUpdateMock struct { + latestVersion *v.Version + onUpdate func() +} + +func (m versionUpdateMock) StopWatch() {} + +func (m versionUpdateMock) SetDaemonVersion(newVersion string) bool { + return false +} + +func (m *versionUpdateMock) SetOnUpdateListener(updateFn func()) { + m.onUpdate = updateFn +} + +func (m versionUpdateMock) LatestVersion() *v.Version { + return m.latestVersion +} + +func (m versionUpdateMock) StartFetcher() {} + +// waitForUpdateEvent waits for a new_version_available event, returns the version string or "" on timeout. +func waitForUpdateEvent(sub *peer.EventSubscription, timeout time.Duration) (version string, enforced bool) { + timer := time.NewTimer(timeout) + defer timer.Stop() + for { + select { + case event, ok := <-sub.Events(): + if !ok { + return "", false + } + if val, ok := event.Metadata["new_version_available"]; ok { + enforced := false + if raw, ok := event.Metadata["enforced"]; ok { + if parsed, err := strconv.ParseBool(raw); err == nil { + enforced = parsed + } + } + return val, enforced + } + case <-timer.C: + return "", false + } + } +} diff --git a/client/internal/updatemanager/reposign/artifact.go b/client/internal/updater/reposign/artifact.go similarity index 100% rename from client/internal/updatemanager/reposign/artifact.go rename to client/internal/updater/reposign/artifact.go diff --git a/client/internal/updatemanager/reposign/artifact_test.go b/client/internal/updater/reposign/artifact_test.go similarity index 100% rename from client/internal/updatemanager/reposign/artifact_test.go rename to client/internal/updater/reposign/artifact_test.go diff --git a/client/internal/updatemanager/reposign/certs/root-pub.pem b/client/internal/updater/reposign/certs/root-pub.pem similarity index 100% rename from client/internal/updatemanager/reposign/certs/root-pub.pem rename to client/internal/updater/reposign/certs/root-pub.pem diff --git a/client/internal/updatemanager/reposign/certsdev/root-pub.pem b/client/internal/updater/reposign/certsdev/root-pub.pem similarity index 100% rename from client/internal/updatemanager/reposign/certsdev/root-pub.pem rename to client/internal/updater/reposign/certsdev/root-pub.pem diff --git a/client/internal/updatemanager/reposign/doc.go b/client/internal/updater/reposign/doc.go similarity index 100% rename from client/internal/updatemanager/reposign/doc.go rename to client/internal/updater/reposign/doc.go diff --git a/client/internal/updatemanager/reposign/embed_dev.go b/client/internal/updater/reposign/embed_dev.go similarity index 100% rename from client/internal/updatemanager/reposign/embed_dev.go rename to client/internal/updater/reposign/embed_dev.go diff --git a/client/internal/updatemanager/reposign/embed_prod.go b/client/internal/updater/reposign/embed_prod.go similarity index 100% rename from client/internal/updatemanager/reposign/embed_prod.go rename to client/internal/updater/reposign/embed_prod.go diff --git a/client/internal/updatemanager/reposign/key.go b/client/internal/updater/reposign/key.go similarity index 100% rename from client/internal/updatemanager/reposign/key.go rename to client/internal/updater/reposign/key.go diff --git a/client/internal/updatemanager/reposign/key_test.go b/client/internal/updater/reposign/key_test.go similarity index 100% rename from client/internal/updatemanager/reposign/key_test.go rename to client/internal/updater/reposign/key_test.go diff --git a/client/internal/updatemanager/reposign/revocation.go b/client/internal/updater/reposign/revocation.go similarity index 100% rename from client/internal/updatemanager/reposign/revocation.go rename to client/internal/updater/reposign/revocation.go diff --git a/client/internal/updatemanager/reposign/revocation_test.go b/client/internal/updater/reposign/revocation_test.go similarity index 100% rename from client/internal/updatemanager/reposign/revocation_test.go rename to client/internal/updater/reposign/revocation_test.go diff --git a/client/internal/updatemanager/reposign/root.go b/client/internal/updater/reposign/root.go similarity index 100% rename from client/internal/updatemanager/reposign/root.go rename to client/internal/updater/reposign/root.go diff --git a/client/internal/updatemanager/reposign/root_test.go b/client/internal/updater/reposign/root_test.go similarity index 100% rename from client/internal/updatemanager/reposign/root_test.go rename to client/internal/updater/reposign/root_test.go diff --git a/client/internal/updatemanager/reposign/signature.go b/client/internal/updater/reposign/signature.go similarity index 100% rename from client/internal/updatemanager/reposign/signature.go rename to client/internal/updater/reposign/signature.go diff --git a/client/internal/updatemanager/reposign/signature_test.go b/client/internal/updater/reposign/signature_test.go similarity index 100% rename from client/internal/updatemanager/reposign/signature_test.go rename to client/internal/updater/reposign/signature_test.go diff --git a/client/internal/updatemanager/reposign/verify.go b/client/internal/updater/reposign/verify.go similarity index 98% rename from client/internal/updatemanager/reposign/verify.go rename to client/internal/updater/reposign/verify.go index 0af2a8c9e..f64b26a30 100644 --- a/client/internal/updatemanager/reposign/verify.go +++ b/client/internal/updater/reposign/verify.go @@ -10,7 +10,7 @@ import ( log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/client/internal/updatemanager/downloader" + "github.com/netbirdio/netbird/client/internal/updater/downloader" ) const ( diff --git a/client/internal/updatemanager/reposign/verify_test.go b/client/internal/updater/reposign/verify_test.go similarity index 100% rename from client/internal/updatemanager/reposign/verify_test.go rename to client/internal/updater/reposign/verify_test.go diff --git a/client/internal/updater/supported_darwin.go b/client/internal/updater/supported_darwin.go new file mode 100644 index 000000000..b27754366 --- /dev/null +++ b/client/internal/updater/supported_darwin.go @@ -0,0 +1,22 @@ +package updater + +import ( + "context" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/updater/installer" +) + +func isAutoUpdateSupported() bool { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + isBrew := !installer.TypeOfInstaller(ctx).Downloadable() + if isBrew { + log.Warnf("auto-update disabled on Homebrew installation") + return false + } + return true +} diff --git a/client/internal/updater/supported_other.go b/client/internal/updater/supported_other.go new file mode 100644 index 000000000..e09e8c3a3 --- /dev/null +++ b/client/internal/updater/supported_other.go @@ -0,0 +1,7 @@ +//go:build !windows && !darwin + +package updater + +func isAutoUpdateSupported() bool { + return false +} diff --git a/client/internal/updater/supported_windows.go b/client/internal/updater/supported_windows.go new file mode 100644 index 000000000..0c28878c7 --- /dev/null +++ b/client/internal/updater/supported_windows.go @@ -0,0 +1,5 @@ +package updater + +func isAutoUpdateSupported() bool { + return true +} diff --git a/client/internal/updatemanager/update.go b/client/internal/updater/update.go similarity index 90% rename from client/internal/updatemanager/update.go rename to client/internal/updater/update.go index 875b50b49..3056c77e1 100644 --- a/client/internal/updatemanager/update.go +++ b/client/internal/updater/update.go @@ -1,4 +1,4 @@ -package updatemanager +package updater import v "github.com/hashicorp/go-version" diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index aafef41d3..3e2da7f4e 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -160,7 +160,7 @@ func (c *Client) Run(fd int32, interfaceName string, envList *EnvList) error { c.onHostDnsFn = func([]string) {} cfg.WgIface = interfaceName - c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false) + c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder) return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, c.stateFile) } diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 3879beba3..fd3c18f56 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.6 -// protoc v6.33.3 +// protoc v6.33.1 // source: daemon.proto package proto @@ -945,7 +945,6 @@ type UpRequest struct { state protoimpl.MessageState `protogen:"open.v1"` ProfileName *string `protobuf:"bytes,1,opt,name=profileName,proto3,oneof" json:"profileName,omitempty"` Username *string `protobuf:"bytes,2,opt,name=username,proto3,oneof" json:"username,omitempty"` - AutoUpdate *bool `protobuf:"varint,3,opt,name=autoUpdate,proto3,oneof" json:"autoUpdate,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -994,13 +993,6 @@ func (x *UpRequest) GetUsername() string { return "" } -func (x *UpRequest) GetAutoUpdate() bool { - if x != nil && x.AutoUpdate != nil { - return *x.AutoUpdate - } - return false -} - type UpResponse struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -5032,6 +5024,94 @@ func (x *GetFeaturesResponse) GetDisableUpdateSettings() bool { return false } +type TriggerUpdateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TriggerUpdateRequest) Reset() { + *x = TriggerUpdateRequest{} + mi := &file_daemon_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TriggerUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerUpdateRequest) ProtoMessage() {} + +func (x *TriggerUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[73] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerUpdateRequest.ProtoReflect.Descriptor instead. +func (*TriggerUpdateRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{73} +} + +type TriggerUpdateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + ErrorMsg string `protobuf:"bytes,2,opt,name=errorMsg,proto3" json:"errorMsg,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TriggerUpdateResponse) Reset() { + *x = TriggerUpdateResponse{} + mi := &file_daemon_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TriggerUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerUpdateResponse) ProtoMessage() {} + +func (x *TriggerUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[74] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerUpdateResponse.ProtoReflect.Descriptor instead. +func (*TriggerUpdateResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{74} +} + +func (x *TriggerUpdateResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *TriggerUpdateResponse) GetErrorMsg() string { + if x != nil { + return x.ErrorMsg + } + return "" +} + // GetPeerSSHHostKeyRequest for retrieving SSH host key for a specific peer type GetPeerSSHHostKeyRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -5043,7 +5123,7 @@ type GetPeerSSHHostKeyRequest struct { func (x *GetPeerSSHHostKeyRequest) Reset() { *x = GetPeerSSHHostKeyRequest{} - mi := &file_daemon_proto_msgTypes[73] + mi := &file_daemon_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5055,7 +5135,7 @@ func (x *GetPeerSSHHostKeyRequest) String() string { func (*GetPeerSSHHostKeyRequest) ProtoMessage() {} func (x *GetPeerSSHHostKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[73] + mi := &file_daemon_proto_msgTypes[75] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5068,7 +5148,7 @@ func (x *GetPeerSSHHostKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetPeerSSHHostKeyRequest.ProtoReflect.Descriptor instead. func (*GetPeerSSHHostKeyRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{73} + return file_daemon_proto_rawDescGZIP(), []int{75} } func (x *GetPeerSSHHostKeyRequest) GetPeerAddress() string { @@ -5095,7 +5175,7 @@ type GetPeerSSHHostKeyResponse struct { func (x *GetPeerSSHHostKeyResponse) Reset() { *x = GetPeerSSHHostKeyResponse{} - mi := &file_daemon_proto_msgTypes[74] + mi := &file_daemon_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5107,7 +5187,7 @@ func (x *GetPeerSSHHostKeyResponse) String() string { func (*GetPeerSSHHostKeyResponse) ProtoMessage() {} func (x *GetPeerSSHHostKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[74] + mi := &file_daemon_proto_msgTypes[76] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5120,7 +5200,7 @@ func (x *GetPeerSSHHostKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetPeerSSHHostKeyResponse.ProtoReflect.Descriptor instead. func (*GetPeerSSHHostKeyResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{74} + return file_daemon_proto_rawDescGZIP(), []int{76} } func (x *GetPeerSSHHostKeyResponse) GetSshHostKey() []byte { @@ -5162,7 +5242,7 @@ type RequestJWTAuthRequest struct { func (x *RequestJWTAuthRequest) Reset() { *x = RequestJWTAuthRequest{} - mi := &file_daemon_proto_msgTypes[75] + mi := &file_daemon_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5174,7 +5254,7 @@ func (x *RequestJWTAuthRequest) String() string { func (*RequestJWTAuthRequest) ProtoMessage() {} func (x *RequestJWTAuthRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[75] + mi := &file_daemon_proto_msgTypes[77] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5187,7 +5267,7 @@ func (x *RequestJWTAuthRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestJWTAuthRequest.ProtoReflect.Descriptor instead. func (*RequestJWTAuthRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{75} + return file_daemon_proto_rawDescGZIP(), []int{77} } func (x *RequestJWTAuthRequest) GetHint() string { @@ -5220,7 +5300,7 @@ type RequestJWTAuthResponse struct { func (x *RequestJWTAuthResponse) Reset() { *x = RequestJWTAuthResponse{} - mi := &file_daemon_proto_msgTypes[76] + mi := &file_daemon_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5232,7 +5312,7 @@ func (x *RequestJWTAuthResponse) String() string { func (*RequestJWTAuthResponse) ProtoMessage() {} func (x *RequestJWTAuthResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[76] + mi := &file_daemon_proto_msgTypes[78] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5245,7 +5325,7 @@ func (x *RequestJWTAuthResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestJWTAuthResponse.ProtoReflect.Descriptor instead. func (*RequestJWTAuthResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{76} + return file_daemon_proto_rawDescGZIP(), []int{78} } func (x *RequestJWTAuthResponse) GetVerificationURI() string { @@ -5310,7 +5390,7 @@ type WaitJWTTokenRequest struct { func (x *WaitJWTTokenRequest) Reset() { *x = WaitJWTTokenRequest{} - mi := &file_daemon_proto_msgTypes[77] + mi := &file_daemon_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5322,7 +5402,7 @@ func (x *WaitJWTTokenRequest) String() string { func (*WaitJWTTokenRequest) ProtoMessage() {} func (x *WaitJWTTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[77] + mi := &file_daemon_proto_msgTypes[79] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5335,7 +5415,7 @@ func (x *WaitJWTTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitJWTTokenRequest.ProtoReflect.Descriptor instead. func (*WaitJWTTokenRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{77} + return file_daemon_proto_rawDescGZIP(), []int{79} } func (x *WaitJWTTokenRequest) GetDeviceCode() string { @@ -5367,7 +5447,7 @@ type WaitJWTTokenResponse struct { func (x *WaitJWTTokenResponse) Reset() { *x = WaitJWTTokenResponse{} - mi := &file_daemon_proto_msgTypes[78] + mi := &file_daemon_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5379,7 +5459,7 @@ func (x *WaitJWTTokenResponse) String() string { func (*WaitJWTTokenResponse) ProtoMessage() {} func (x *WaitJWTTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[78] + mi := &file_daemon_proto_msgTypes[80] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5392,7 +5472,7 @@ func (x *WaitJWTTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitJWTTokenResponse.ProtoReflect.Descriptor instead. func (*WaitJWTTokenResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{78} + return file_daemon_proto_rawDescGZIP(), []int{80} } func (x *WaitJWTTokenResponse) GetToken() string { @@ -5425,7 +5505,7 @@ type StartCPUProfileRequest struct { func (x *StartCPUProfileRequest) Reset() { *x = StartCPUProfileRequest{} - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5437,7 +5517,7 @@ func (x *StartCPUProfileRequest) String() string { func (*StartCPUProfileRequest) ProtoMessage() {} func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[81] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5450,7 +5530,7 @@ func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartCPUProfileRequest.ProtoReflect.Descriptor instead. func (*StartCPUProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{79} + return file_daemon_proto_rawDescGZIP(), []int{81} } // StartCPUProfileResponse confirms CPU profiling has started @@ -5462,7 +5542,7 @@ type StartCPUProfileResponse struct { func (x *StartCPUProfileResponse) Reset() { *x = StartCPUProfileResponse{} - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5474,7 +5554,7 @@ func (x *StartCPUProfileResponse) String() string { func (*StartCPUProfileResponse) ProtoMessage() {} func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[82] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5487,7 +5567,7 @@ func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartCPUProfileResponse.ProtoReflect.Descriptor instead. func (*StartCPUProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{80} + return file_daemon_proto_rawDescGZIP(), []int{82} } // StopCPUProfileRequest for stopping CPU profiling @@ -5499,7 +5579,7 @@ type StopCPUProfileRequest struct { func (x *StopCPUProfileRequest) Reset() { *x = StopCPUProfileRequest{} - mi := &file_daemon_proto_msgTypes[81] + mi := &file_daemon_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5511,7 +5591,7 @@ func (x *StopCPUProfileRequest) String() string { func (*StopCPUProfileRequest) ProtoMessage() {} func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[81] + mi := &file_daemon_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5524,7 +5604,7 @@ func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StopCPUProfileRequest.ProtoReflect.Descriptor instead. func (*StopCPUProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{81} + return file_daemon_proto_rawDescGZIP(), []int{83} } // StopCPUProfileResponse confirms CPU profiling has stopped @@ -5536,7 +5616,7 @@ type StopCPUProfileResponse struct { func (x *StopCPUProfileResponse) Reset() { *x = StopCPUProfileResponse{} - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5548,7 +5628,7 @@ func (x *StopCPUProfileResponse) String() string { func (*StopCPUProfileResponse) ProtoMessage() {} func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5561,7 +5641,7 @@ func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StopCPUProfileResponse.ProtoReflect.Descriptor instead. func (*StopCPUProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{82} + return file_daemon_proto_rawDescGZIP(), []int{84} } type InstallerResultRequest struct { @@ -5572,7 +5652,7 @@ type InstallerResultRequest struct { func (x *InstallerResultRequest) Reset() { *x = InstallerResultRequest{} - mi := &file_daemon_proto_msgTypes[83] + mi := &file_daemon_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5584,7 +5664,7 @@ func (x *InstallerResultRequest) String() string { func (*InstallerResultRequest) ProtoMessage() {} func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[83] + mi := &file_daemon_proto_msgTypes[85] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5597,7 +5677,7 @@ func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultRequest.ProtoReflect.Descriptor instead. func (*InstallerResultRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{83} + return file_daemon_proto_rawDescGZIP(), []int{85} } type InstallerResultResponse struct { @@ -5610,7 +5690,7 @@ type InstallerResultResponse struct { func (x *InstallerResultResponse) Reset() { *x = InstallerResultResponse{} - mi := &file_daemon_proto_msgTypes[84] + mi := &file_daemon_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5622,7 +5702,7 @@ func (x *InstallerResultResponse) String() string { func (*InstallerResultResponse) ProtoMessage() {} func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[84] + mi := &file_daemon_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5635,7 +5715,7 @@ func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultResponse.ProtoReflect.Descriptor instead. func (*InstallerResultResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{84} + return file_daemon_proto_rawDescGZIP(), []int{86} } func (x *InstallerResultResponse) GetSuccess() bool { @@ -5667,7 +5747,7 @@ type ExposeServiceRequest struct { func (x *ExposeServiceRequest) Reset() { *x = ExposeServiceRequest{} - mi := &file_daemon_proto_msgTypes[85] + mi := &file_daemon_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5679,7 +5759,7 @@ func (x *ExposeServiceRequest) String() string { func (*ExposeServiceRequest) ProtoMessage() {} func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[85] + mi := &file_daemon_proto_msgTypes[87] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5692,7 +5772,7 @@ func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExposeServiceRequest.ProtoReflect.Descriptor instead. func (*ExposeServiceRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{85} + return file_daemon_proto_rawDescGZIP(), []int{87} } func (x *ExposeServiceRequest) GetPort() uint32 { @@ -5756,7 +5836,7 @@ type ExposeServiceEvent struct { func (x *ExposeServiceEvent) Reset() { *x = ExposeServiceEvent{} - mi := &file_daemon_proto_msgTypes[86] + mi := &file_daemon_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5768,7 +5848,7 @@ func (x *ExposeServiceEvent) String() string { func (*ExposeServiceEvent) ProtoMessage() {} func (x *ExposeServiceEvent) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[86] + mi := &file_daemon_proto_msgTypes[88] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5781,7 +5861,7 @@ func (x *ExposeServiceEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use ExposeServiceEvent.ProtoReflect.Descriptor instead. func (*ExposeServiceEvent) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{86} + return file_daemon_proto_rawDescGZIP(), []int{88} } func (x *ExposeServiceEvent) GetEvent() isExposeServiceEvent_Event { @@ -5821,7 +5901,7 @@ type ExposeServiceReady struct { func (x *ExposeServiceReady) Reset() { *x = ExposeServiceReady{} - mi := &file_daemon_proto_msgTypes[87] + mi := &file_daemon_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5833,7 +5913,7 @@ func (x *ExposeServiceReady) String() string { func (*ExposeServiceReady) ProtoMessage() {} func (x *ExposeServiceReady) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[87] + mi := &file_daemon_proto_msgTypes[89] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5846,7 +5926,7 @@ func (x *ExposeServiceReady) ProtoReflect() protoreflect.Message { // Deprecated: Use ExposeServiceReady.ProtoReflect.Descriptor instead. func (*ExposeServiceReady) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{87} + return file_daemon_proto_rawDescGZIP(), []int{89} } func (x *ExposeServiceReady) GetServiceName() string { @@ -5880,7 +5960,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} - mi := &file_daemon_proto_msgTypes[89] + mi := &file_daemon_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5892,7 +5972,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[89] + mi := &file_daemon_proto_msgTypes[91] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6016,16 +6096,12 @@ const file_daemon_proto_rawDesc = "" + "\buserCode\x18\x01 \x01(\tR\buserCode\x12\x1a\n" + "\bhostname\x18\x02 \x01(\tR\bhostname\",\n" + "\x14WaitSSOLoginResponse\x12\x14\n" + - "\x05email\x18\x01 \x01(\tR\x05email\"\xa4\x01\n" + + "\x05email\x18\x01 \x01(\tR\x05email\"v\n" + "\tUpRequest\x12%\n" + "\vprofileName\x18\x01 \x01(\tH\x00R\vprofileName\x88\x01\x01\x12\x1f\n" + - "\busername\x18\x02 \x01(\tH\x01R\busername\x88\x01\x01\x12#\n" + - "\n" + - "autoUpdate\x18\x03 \x01(\bH\x02R\n" + - "autoUpdate\x88\x01\x01B\x0e\n" + + "\busername\x18\x02 \x01(\tH\x01R\busername\x88\x01\x01B\x0e\n" + "\f_profileNameB\v\n" + - "\t_usernameB\r\n" + - "\v_autoUpdate\"\f\n" + + "\t_usernameJ\x04\b\x03\x10\x04\"\f\n" + "\n" + "UpResponse\"\xa1\x01\n" + "\rStatusRequest\x12,\n" + @@ -6380,7 +6456,11 @@ const file_daemon_proto_rawDesc = "" + "\x12GetFeaturesRequest\"x\n" + "\x13GetFeaturesResponse\x12)\n" + "\x10disable_profiles\x18\x01 \x01(\bR\x0fdisableProfiles\x126\n" + - "\x17disable_update_settings\x18\x02 \x01(\bR\x15disableUpdateSettings\"<\n" + + "\x17disable_update_settings\x18\x02 \x01(\bR\x15disableUpdateSettings\"\x16\n" + + "\x14TriggerUpdateRequest\"M\n" + + "\x15TriggerUpdateResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" + + "\berrorMsg\x18\x02 \x01(\tR\berrorMsg\"<\n" + "\x18GetPeerSSHHostKeyRequest\x12 \n" + "\vpeerAddress\x18\x01 \x01(\tR\vpeerAddress\"\x85\x01\n" + "\x19GetPeerSSHHostKeyResponse\x12\x1e\n" + @@ -6453,7 +6533,7 @@ const file_daemon_proto_rawDesc = "" + "\n" + "EXPOSE_TCP\x10\x02\x12\x0e\n" + "\n" + - "EXPOSE_UDP\x10\x032\xac\x15\n" + + "EXPOSE_UDP\x10\x032\xfc\x15\n" + "\rDaemonService\x126\n" + "\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" + "\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" + @@ -6485,7 +6565,8 @@ const file_daemon_proto_rawDesc = "" + "\fListProfiles\x12\x1b.daemon.ListProfilesRequest\x1a\x1c.daemon.ListProfilesResponse\"\x00\x12W\n" + "\x10GetActiveProfile\x12\x1f.daemon.GetActiveProfileRequest\x1a .daemon.GetActiveProfileResponse\"\x00\x129\n" + "\x06Logout\x12\x15.daemon.LogoutRequest\x1a\x16.daemon.LogoutResponse\"\x00\x12H\n" + - "\vGetFeatures\x12\x1a.daemon.GetFeaturesRequest\x1a\x1b.daemon.GetFeaturesResponse\"\x00\x12Z\n" + + "\vGetFeatures\x12\x1a.daemon.GetFeaturesRequest\x1a\x1b.daemon.GetFeaturesResponse\"\x00\x12N\n" + + "\rTriggerUpdate\x12\x1c.daemon.TriggerUpdateRequest\x1a\x1d.daemon.TriggerUpdateResponse\"\x00\x12Z\n" + "\x11GetPeerSSHHostKey\x12 .daemon.GetPeerSSHHostKeyRequest\x1a!.daemon.GetPeerSSHHostKeyResponse\"\x00\x12Q\n" + "\x0eRequestJWTAuth\x12\x1d.daemon.RequestJWTAuthRequest\x1a\x1e.daemon.RequestJWTAuthResponse\"\x00\x12K\n" + "\fWaitJWTToken\x12\x1b.daemon.WaitJWTTokenRequest\x1a\x1c.daemon.WaitJWTTokenResponse\"\x00\x12T\n" + @@ -6508,7 +6589,7 @@ func file_daemon_proto_rawDescGZIP() []byte { } var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 91) +var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 93) var file_daemon_proto_goTypes = []any{ (LogLevel)(0), // 0: daemon.LogLevel (ExposeProtocol)(0), // 1: daemon.ExposeProtocol @@ -6588,34 +6669,36 @@ var file_daemon_proto_goTypes = []any{ (*LogoutResponse)(nil), // 75: daemon.LogoutResponse (*GetFeaturesRequest)(nil), // 76: daemon.GetFeaturesRequest (*GetFeaturesResponse)(nil), // 77: daemon.GetFeaturesResponse - (*GetPeerSSHHostKeyRequest)(nil), // 78: daemon.GetPeerSSHHostKeyRequest - (*GetPeerSSHHostKeyResponse)(nil), // 79: daemon.GetPeerSSHHostKeyResponse - (*RequestJWTAuthRequest)(nil), // 80: daemon.RequestJWTAuthRequest - (*RequestJWTAuthResponse)(nil), // 81: daemon.RequestJWTAuthResponse - (*WaitJWTTokenRequest)(nil), // 82: daemon.WaitJWTTokenRequest - (*WaitJWTTokenResponse)(nil), // 83: daemon.WaitJWTTokenResponse - (*StartCPUProfileRequest)(nil), // 84: daemon.StartCPUProfileRequest - (*StartCPUProfileResponse)(nil), // 85: daemon.StartCPUProfileResponse - (*StopCPUProfileRequest)(nil), // 86: daemon.StopCPUProfileRequest - (*StopCPUProfileResponse)(nil), // 87: daemon.StopCPUProfileResponse - (*InstallerResultRequest)(nil), // 88: daemon.InstallerResultRequest - (*InstallerResultResponse)(nil), // 89: daemon.InstallerResultResponse - (*ExposeServiceRequest)(nil), // 90: daemon.ExposeServiceRequest - (*ExposeServiceEvent)(nil), // 91: daemon.ExposeServiceEvent - (*ExposeServiceReady)(nil), // 92: daemon.ExposeServiceReady - nil, // 93: daemon.Network.ResolvedIPsEntry - (*PortInfo_Range)(nil), // 94: daemon.PortInfo.Range - nil, // 95: daemon.SystemEvent.MetadataEntry - (*durationpb.Duration)(nil), // 96: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 97: google.protobuf.Timestamp + (*TriggerUpdateRequest)(nil), // 78: daemon.TriggerUpdateRequest + (*TriggerUpdateResponse)(nil), // 79: daemon.TriggerUpdateResponse + (*GetPeerSSHHostKeyRequest)(nil), // 80: daemon.GetPeerSSHHostKeyRequest + (*GetPeerSSHHostKeyResponse)(nil), // 81: daemon.GetPeerSSHHostKeyResponse + (*RequestJWTAuthRequest)(nil), // 82: daemon.RequestJWTAuthRequest + (*RequestJWTAuthResponse)(nil), // 83: daemon.RequestJWTAuthResponse + (*WaitJWTTokenRequest)(nil), // 84: daemon.WaitJWTTokenRequest + (*WaitJWTTokenResponse)(nil), // 85: daemon.WaitJWTTokenResponse + (*StartCPUProfileRequest)(nil), // 86: daemon.StartCPUProfileRequest + (*StartCPUProfileResponse)(nil), // 87: daemon.StartCPUProfileResponse + (*StopCPUProfileRequest)(nil), // 88: daemon.StopCPUProfileRequest + (*StopCPUProfileResponse)(nil), // 89: daemon.StopCPUProfileResponse + (*InstallerResultRequest)(nil), // 90: daemon.InstallerResultRequest + (*InstallerResultResponse)(nil), // 91: daemon.InstallerResultResponse + (*ExposeServiceRequest)(nil), // 92: daemon.ExposeServiceRequest + (*ExposeServiceEvent)(nil), // 93: daemon.ExposeServiceEvent + (*ExposeServiceReady)(nil), // 94: daemon.ExposeServiceReady + nil, // 95: daemon.Network.ResolvedIPsEntry + (*PortInfo_Range)(nil), // 96: daemon.PortInfo.Range + nil, // 97: daemon.SystemEvent.MetadataEntry + (*durationpb.Duration)(nil), // 98: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 99: google.protobuf.Timestamp } var file_daemon_proto_depIdxs = []int32{ 2, // 0: daemon.OSLifecycleRequest.type:type_name -> daemon.OSLifecycleRequest.CycleType - 96, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 98, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration 28, // 2: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus - 97, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp - 97, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp - 96, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration + 99, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp + 99, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp + 98, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration 26, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo 23, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState 22, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState @@ -6626,8 +6709,8 @@ var file_daemon_proto_depIdxs = []int32{ 58, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent 27, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState 34, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 93, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 94, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 95, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 96, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range 35, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo 35, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo 36, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule @@ -6638,13 +6721,13 @@ var file_daemon_proto_depIdxs = []int32{ 55, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage 3, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity 4, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 97, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 95, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 99, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 97, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry 58, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 96, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 98, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration 71, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile 1, // 33: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol - 92, // 34: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady + 94, // 34: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady 33, // 35: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList 8, // 36: daemon.DaemonService.Login:input_type -> daemon.LoginRequest 10, // 37: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest @@ -6674,52 +6757,54 @@ var file_daemon_proto_depIdxs = []int32{ 72, // 61: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest 74, // 62: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest 76, // 63: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest - 78, // 64: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest - 80, // 65: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest - 82, // 66: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 84, // 67: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest - 86, // 68: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest - 6, // 69: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest - 88, // 70: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 90, // 71: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest - 9, // 72: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 11, // 73: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 13, // 74: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 15, // 75: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 17, // 76: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 19, // 77: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 30, // 78: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 32, // 79: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 32, // 80: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 37, // 81: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 39, // 82: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 41, // 83: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 43, // 84: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 46, // 85: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 48, // 86: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 50, // 87: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 52, // 88: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 56, // 89: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 58, // 90: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 60, // 91: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 62, // 92: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 64, // 93: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 66, // 94: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 68, // 95: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 70, // 96: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 73, // 97: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 75, // 98: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 77, // 99: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 79, // 100: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 81, // 101: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 83, // 102: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 85, // 103: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse - 87, // 104: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse - 7, // 105: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse - 89, // 106: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 91, // 107: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent - 72, // [72:108] is the sub-list for method output_type - 36, // [36:72] is the sub-list for method input_type + 78, // 64: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest + 80, // 65: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest + 82, // 66: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest + 84, // 67: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest + 86, // 68: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 88, // 69: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 6, // 70: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest + 90, // 71: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 92, // 72: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest + 9, // 73: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 11, // 74: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 13, // 75: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 15, // 76: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 17, // 77: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 19, // 78: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 30, // 79: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 32, // 80: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 32, // 81: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 37, // 82: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 39, // 83: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 41, // 84: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 43, // 85: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 46, // 86: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 48, // 87: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 50, // 88: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 52, // 89: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 56, // 90: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 58, // 91: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 60, // 92: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 62, // 93: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 64, // 94: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 66, // 95: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 68, // 96: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 70, // 97: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 73, // 98: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 75, // 99: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 77, // 100: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 79, // 101: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse + 81, // 102: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 83, // 103: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 85, // 104: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 87, // 105: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 89, // 106: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 7, // 107: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse + 91, // 108: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 93, // 109: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent + 73, // [73:110] is the sub-list for method output_type + 36, // [36:73] is the sub-list for method input_type 36, // [36:36] is the sub-list for extension type_name 36, // [36:36] is the sub-list for extension extendee 0, // [0:36] is the sub-list for field type_name @@ -6742,8 +6827,8 @@ func file_daemon_proto_init() { file_daemon_proto_msgTypes[56].OneofWrappers = []any{} file_daemon_proto_msgTypes[58].OneofWrappers = []any{} file_daemon_proto_msgTypes[69].OneofWrappers = []any{} - file_daemon_proto_msgTypes[75].OneofWrappers = []any{} - file_daemon_proto_msgTypes[86].OneofWrappers = []any{ + file_daemon_proto_msgTypes[77].OneofWrappers = []any{} + file_daemon_proto_msgTypes[88].OneofWrappers = []any{ (*ExposeServiceEvent_Ready)(nil), } type x struct{} @@ -6752,7 +6837,7 @@ func file_daemon_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_proto_rawDesc), len(file_daemon_proto_rawDesc)), NumEnums: 5, - NumMessages: 91, + NumMessages: 93, NumExtensions: 0, NumServices: 1, }, diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 4dc41d401..efafe3af7 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -85,6 +85,10 @@ service DaemonService { rpc GetFeatures(GetFeaturesRequest) returns (GetFeaturesResponse) {} + // TriggerUpdate initiates installation of the pending enforced version. + // Called when the user clicks the install button in the UI (Mode 2 / enforced update). + rpc TriggerUpdate(TriggerUpdateRequest) returns (TriggerUpdateResponse) {} + // GetPeerSSHHostKey retrieves SSH host key for a specific peer rpc GetPeerSSHHostKey(GetPeerSSHHostKeyRequest) returns (GetPeerSSHHostKeyResponse) {} @@ -226,7 +230,7 @@ message WaitSSOLoginResponse { message UpRequest { optional string profileName = 1; optional string username = 2; - optional bool autoUpdate = 3; + reserved 3; } message UpResponse {} @@ -725,6 +729,13 @@ message GetFeaturesResponse{ bool disable_update_settings = 2; } +message TriggerUpdateRequest {} + +message TriggerUpdateResponse { + bool success = 1; + string errorMsg = 2; +} + // GetPeerSSHHostKeyRequest for retrieving SSH host key for a specific peer message GetPeerSSHHostKeyRequest { // peer IP address or FQDN to get SSH host key for diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index 4154dce59..e5bd89597 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -64,6 +64,9 @@ type DaemonServiceClient interface { // Logout disconnects from the network and deletes the peer from the management server Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error) GetFeatures(ctx context.Context, in *GetFeaturesRequest, opts ...grpc.CallOption) (*GetFeaturesResponse, error) + // TriggerUpdate initiates installation of the pending enforced version. + // Called when the user clicks the install button in the UI (Mode 2 / enforced update). + TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error) // GetPeerSSHHostKey retrieves SSH host key for a specific peer GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error) // RequestJWTAuth initiates JWT authentication flow for SSH @@ -363,6 +366,15 @@ func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRe return out, nil } +func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error) { + out := new(TriggerUpdateResponse) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TriggerUpdate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error) { out := new(GetPeerSSHHostKeyResponse) err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetPeerSSHHostKey", in, out, opts...) @@ -508,6 +520,9 @@ type DaemonServiceServer interface { // Logout disconnects from the network and deletes the peer from the management server Logout(context.Context, *LogoutRequest) (*LogoutResponse, error) GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error) + // TriggerUpdate initiates installation of the pending enforced version. + // Called when the user clicks the install button in the UI (Mode 2 / enforced update). + TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error) // GetPeerSSHHostKey retrieves SSH host key for a specific peer GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error) // RequestJWTAuth initiates JWT authentication flow for SSH @@ -613,6 +628,9 @@ func (UnimplementedDaemonServiceServer) Logout(context.Context, *LogoutRequest) func (UnimplementedDaemonServiceServer) GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetFeatures not implemented") } +func (UnimplementedDaemonServiceServer) TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TriggerUpdate not implemented") +} func (UnimplementedDaemonServiceServer) GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") } @@ -1157,6 +1175,24 @@ func _DaemonService_GetFeatures_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _DaemonService_TriggerUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TriggerUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).TriggerUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/daemon.DaemonService/TriggerUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).TriggerUpdate(ctx, req.(*TriggerUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _DaemonService_GetPeerSSHHostKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetPeerSSHHostKeyRequest) if err := dec(in); err != nil { @@ -1419,6 +1455,10 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetFeatures", Handler: _DaemonService_GetFeatures_Handler, }, + { + MethodName: "TriggerUpdate", + Handler: _DaemonService_TriggerUpdate_Handler, + }, { MethodName: "GetPeerSSHHostKey", Handler: _DaemonService_GetPeerSSHHostKey_Handler, diff --git a/client/server/event.go b/client/server/event.go index b5c12a3a6..d93151c96 100644 --- a/client/server/event.go +++ b/client/server/event.go @@ -14,6 +14,7 @@ func (s *Server) SubscribeEvents(req *proto.SubscribeRequest, stream proto.Daemo }() log.Debug("client subscribed to events") + s.startUpdateManagerForGUI() for { select { diff --git a/client/server/server.go b/client/server/server.go index 69d79d9cd..1d83366ca 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -30,6 +30,8 @@ import ( "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/client/internal/statemanager" + "github.com/netbirdio/netbird/client/internal/updater" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/version" ) @@ -89,6 +91,8 @@ type Server struct { sleepHandler *sleephandler.SleepHandler + updateManager *updater.Manager + jwtCache *jwtCache } @@ -135,6 +139,12 @@ func (s *Server) Start() error { log.Warnf(errRestoreResidualState, err) } + if s.updateManager == nil { + stateMgr := statemanager.New(s.profileManager.GetStatePath()) + s.updateManager = updater.NewManager(s.statusRecorder, stateMgr) + s.updateManager.CheckUpdateSuccess(s.rootCtx) + } + // if current state contains any error, return it // in all other cases we can continue execution only if status is idle and up command was // not in the progress or already successfully established connection. @@ -192,14 +202,14 @@ func (s *Server) Start() error { s.clientRunning = true s.clientRunningChan = make(chan struct{}) s.clientGiveUpChan = make(chan struct{}) - go s.connectWithRetryRuns(ctx, config, s.statusRecorder, false, s.clientRunningChan, s.clientGiveUpChan) + go s.connectWithRetryRuns(ctx, config, s.statusRecorder, s.clientRunningChan, s.clientGiveUpChan) return nil } // connectWithRetryRuns runs the client connection with a backoff strategy where we retry the operation as additional // mechanism to keep the client connected even when the connection is lost. // we cancel retry if the client receive a stop or down command, or if disable auto connect is configured. -func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}, giveUpChan chan struct{}) { +func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}, giveUpChan chan struct{}) { defer func() { s.mutex.Lock() s.clientRunning = false @@ -207,7 +217,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil }() if s.config.DisableAutoConnect { - if err := s.connect(ctx, s.config, s.statusRecorder, doInitialAutoUpdate, runningChan); err != nil { + if err := s.connect(ctx, s.config, s.statusRecorder, runningChan); err != nil { log.Debugf("run client connection exited with error: %v", err) } log.Tracef("client connection exited") @@ -236,8 +246,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil }() runOperation := func() error { - err := s.connect(ctx, profileConfig, statusRecorder, doInitialAutoUpdate, runningChan) - doInitialAutoUpdate = false + err := s.connect(ctx, profileConfig, statusRecorder, runningChan) if err != nil { log.Debugf("run client connection exited with error: %v. Will retry in the background", err) return err @@ -717,11 +726,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR s.clientRunningChan = make(chan struct{}) s.clientGiveUpChan = make(chan struct{}) - var doAutoUpdate bool - if msg != nil && msg.AutoUpdate != nil && *msg.AutoUpdate { - doAutoUpdate = true - } - go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, doAutoUpdate, s.clientRunningChan, s.clientGiveUpChan) + go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, s.clientRunningChan, s.clientGiveUpChan) s.mutex.Unlock() return s.waitForUp(callerCtx) @@ -1623,9 +1628,10 @@ func (s *Server) GetFeatures(ctx context.Context, msg *proto.GetFeaturesRequest) return features, nil } -func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}) error { +func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}) error { log.Tracef("running client connection") - client := internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate) + client := internal.NewConnectClient(ctx, config, statusRecorder) + client.SetUpdateManager(s.updateManager) client.SetSyncResponsePersistence(s.persistSyncResponse) s.mutex.Lock() @@ -1656,6 +1662,14 @@ func (s *Server) checkUpdateSettingsDisabled() bool { return false } +func (s *Server) startUpdateManagerForGUI() { + if s.updateManager == nil { + return + } + s.updateManager.Start(s.rootCtx) + s.updateManager.NotifyUI() +} + func (s *Server) onSessionExpire() { if runtime.GOOS != "windows" { isUIActive := internal.CheckUIApp() diff --git a/client/server/server_connect_test.go b/client/server/server_connect_test.go index 8d31c2ae6..faea7da39 100644 --- a/client/server/server_connect_test.go +++ b/client/server/server_connect_test.go @@ -22,7 +22,7 @@ func newTestServer() *Server { } func newDummyConnectClient(ctx context.Context) *internal.ConnectClient { - return internal.NewConnectClient(ctx, nil, nil, false) + return internal.NewConnectClient(ctx, nil, nil) } // TestConnectSetsClientWithMutex validates that connect() sets s.connectClient diff --git a/client/server/server_test.go b/client/server/server_test.go index 82079c531..6de23d501 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -113,7 +113,7 @@ func TestConnectWithRetryRuns(t *testing.T) { t.Setenv(maxRetryTimeVar, "5s") t.Setenv(retryMultiplierVar, "1") - s.connectWithRetryRuns(ctx, config, s.statusRecorder, false, nil, nil) + s.connectWithRetryRuns(ctx, config, s.statusRecorder, nil, nil) if counter < 3 { t.Fatalf("expected counter > 2, got %d", counter) } diff --git a/client/server/triggerupdate.go b/client/server/triggerupdate.go new file mode 100644 index 000000000..ffcb527e7 --- /dev/null +++ b/client/server/triggerupdate.go @@ -0,0 +1,24 @@ +package server + +import ( + "context" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/proto" +) + +// TriggerUpdate initiates installation of the pending enforced version. +// It is called when the user clicks the install button in the UI (Mode 2 / enforced update). +func (s *Server) TriggerUpdate(ctx context.Context, _ *proto.TriggerUpdateRequest) (*proto.TriggerUpdateResponse, error) { + if s.updateManager == nil { + return &proto.TriggerUpdateResponse{Success: false, ErrorMsg: "update manager not available"}, nil + } + + if err := s.updateManager.Install(ctx); err != nil { + log.Warnf("TriggerUpdate failed: %v", err) + return &proto.TriggerUpdateResponse{Success: false, ErrorMsg: err.Error()}, nil + } + + return &proto.TriggerUpdateResponse{Success: true}, nil +} diff --git a/client/server/updateresult.go b/client/server/updateresult.go index 8e00d5062..8d1ef0e5f 100644 --- a/client/server/updateresult.go +++ b/client/server/updateresult.go @@ -5,7 +5,7 @@ import ( log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/client/internal/updatemanager/installer" + "github.com/netbirdio/netbird/client/internal/updater/installer" "github.com/netbirdio/netbird/client/proto" ) diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 7af00cd20..0574e53d0 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -34,7 +34,6 @@ import ( "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - protobuf "google.golang.org/protobuf/proto" "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/internal" @@ -308,10 +307,11 @@ type serviceClient struct { sshJWTCacheTTL int connected bool - update *version.Update daemonVersion string updateIndicationLock sync.Mutex isUpdateIconActive bool + isEnforcedUpdate bool + lastNotifiedVersion string settingsEnabled bool profilesEnabled bool showNetworks bool @@ -367,7 +367,6 @@ func newServiceClient(args *newServiceClientArgs) *serviceClient { showAdvancedSettings: args.showSettings, showNetworks: args.showNetworks, - update: version.NewUpdateAndStart("nb/client-ui"), } s.eventHandler = newEventHandler(s) @@ -828,7 +827,7 @@ func (s *serviceClient) handleSSOLogin(ctx context.Context, loginResp *proto.Log return nil } -func (s *serviceClient) menuUpClick(ctx context.Context, wannaAutoUpdate bool) error { +func (s *serviceClient) menuUpClick(ctx context.Context) error { systray.SetTemplateIcon(iconConnectingMacOS, s.icConnecting) conn, err := s.getSrvClient(defaultFailTimeout) if err != nil { @@ -850,9 +849,7 @@ func (s *serviceClient) menuUpClick(ctx context.Context, wannaAutoUpdate bool) e return nil } - if _, err := s.conn.Up(s.ctx, &proto.UpRequest{ - AutoUpdate: protobuf.Bool(wannaAutoUpdate), - }); err != nil { + if _, err := s.conn.Up(s.ctx, &proto.UpRequest{}); err != nil { return fmt.Errorf("start connection: %w", err) } @@ -933,13 +930,13 @@ func (s *serviceClient) updateStatus() error { systrayIconState = false } - // the updater struct notify by the upgrades available only, but if meanwhile the daemon has successfully - // updated must reset the mUpdate visibility state + // if the daemon version changed (e.g. after a successful update), reset the update indication if s.daemonVersion != status.DaemonVersion { - s.mUpdate.Hide() + if s.daemonVersion != "" { + s.mUpdate.Hide() + s.isUpdateIconActive = false + } s.daemonVersion = status.DaemonVersion - - s.isUpdateIconActive = s.update.SetDaemonVersion(status.DaemonVersion) if !s.isUpdateIconActive { if systrayIconState { systray.SetTemplateIcon(iconConnectedMacOS, s.icConnected) @@ -1091,7 +1088,6 @@ func (s *serviceClient) onTrayReady() { // update exit node menu in case service is already connected go s.updateExitNodes() - s.update.SetOnUpdateListener(s.onUpdateAvailable) go func() { s.getSrvConfig() time.Sleep(100 * time.Millisecond) // To prevent race condition caused by systray not being fully initialized and ignoring setIcon @@ -1135,6 +1131,13 @@ func (s *serviceClient) onTrayReady() { } } }) + s.eventManager.AddHandler(func(event *proto.SystemEvent) { + if newVersion, ok := event.Metadata["new_version_available"]; ok { + _, enforced := event.Metadata["enforced"] + log.Infof("received new_version_available event: version=%s enforced=%v", newVersion, enforced) + s.onUpdateAvailable(newVersion, enforced) + } + }) go s.eventManager.Start(s.ctx) go s.eventHandler.listen(s.ctx) @@ -1507,10 +1510,18 @@ func protoConfigToConfig(cfg *proto.GetConfigResponse) *profilemanager.Config { return &config } -func (s *serviceClient) onUpdateAvailable() { +func (s *serviceClient) onUpdateAvailable(newVersion string, enforced bool) { s.updateIndicationLock.Lock() defer s.updateIndicationLock.Unlock() + s.isEnforcedUpdate = enforced + if enforced { + s.mUpdate.SetTitle("Install version " + newVersion) + } else { + s.lastNotifiedVersion = "" + s.mUpdate.SetTitle("Download latest version") + } + s.mUpdate.Show() s.isUpdateIconActive = true @@ -1519,6 +1530,11 @@ func (s *serviceClient) onUpdateAvailable() { } else { systray.SetTemplateIcon(iconUpdateDisconnectedMacOS, s.icUpdateDisconnected) } + + if enforced && s.lastNotifiedVersion != newVersion { + s.lastNotifiedVersion = newVersion + s.app.SendNotification(fyne.NewNotification("Update available", "A new version "+newVersion+" is ready to install")) + } } // onSessionExpire sends a notification to the user when the session expires. diff --git a/client/ui/event/event.go b/client/ui/event/event.go index 4d949416d..b8ed09a5c 100644 --- a/client/ui/event/event.go +++ b/client/ui/event/event.go @@ -107,12 +107,7 @@ func (e *Manager) handleEvent(event *proto.SystemEvent) { handlers := slices.Clone(e.handlers) e.mu.Unlock() - // critical events are always shown - if !enabled && event.Severity != proto.SystemEvent_CRITICAL { - return - } - - if event.UserMessage != "" { + if event.UserMessage != "" && (enabled || event.Severity == proto.SystemEvent_CRITICAL) { title := e.getEventTitle(event) body := event.UserMessage id := event.Metadata["id"] diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 6adf8778c..60a580dae 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -82,7 +82,7 @@ func (h *eventHandler) handleConnectClick() { go func() { defer connectCancel() - if err := h.client.menuUpClick(connectCtx, true); err != nil { + if err := h.client.menuUpClick(connectCtx); err != nil { st, ok := status.FromError(err) if errors.Is(err, context.Canceled) || (ok && st.Code() == codes.Canceled) { log.Debugf("connect operation cancelled by user") @@ -211,9 +211,42 @@ func (h *eventHandler) handleGitHubClick() { } func (h *eventHandler) handleUpdateClick() { - if err := openURL(version.DownloadUrl()); err != nil { - log.Errorf("failed to open download URL: %v", err) + h.client.updateIndicationLock.Lock() + enforced := h.client.isEnforcedUpdate + h.client.updateIndicationLock.Unlock() + + if !enforced { + if err := openURL(version.DownloadUrl()); err != nil { + log.Errorf("failed to open download URL: %v", err) + } + return } + + // prevent blocking against a busy server + h.client.mUpdate.Disable() + go func() { + defer h.client.mUpdate.Enable() + conn, err := h.client.getSrvClient(defaultFailTimeout) + if err != nil { + log.Errorf("failed to get service client for update: %v", err) + _ = openURL(version.DownloadUrl()) + return + } + + resp, err := conn.TriggerUpdate(h.client.ctx, &proto.TriggerUpdateRequest{}) + if err != nil { + log.Errorf("TriggerUpdate failed: %v", err) + _ = openURL(version.DownloadUrl()) + return + } + if !resp.Success { + log.Errorf("TriggerUpdate failed: %s", resp.ErrorMsg) + _ = openURL(version.DownloadUrl()) + return + } + + log.Infof("update triggered via daemon") + }() } func (h *eventHandler) handleNetworksClick() { diff --git a/client/ui/profile.go b/client/ui/profile.go index a38d8918a..74189c9a0 100644 --- a/client/ui/profile.go +++ b/client/ui/profile.go @@ -397,7 +397,7 @@ type profileMenu struct { logoutSubItem *subItem profilesState []Profile downClickCallback func() error - upClickCallback func(context.Context, bool) error + upClickCallback func(context.Context) error getSrvClientCallback func(timeout time.Duration) (proto.DaemonServiceClient, error) loadSettingsCallback func() app fyne.App @@ -411,7 +411,7 @@ type newProfileMenuArgs struct { profileMenuItem *systray.MenuItem emailMenuItem *systray.MenuItem downClickCallback func() error - upClickCallback func(context.Context, bool) error + upClickCallback func(context.Context) error getSrvClientCallback func(timeout time.Duration) (proto.DaemonServiceClient, error) loadSettingsCallback func() app fyne.App @@ -579,7 +579,7 @@ func (p *profileMenu) refresh() { connectCtx, connectCancel := context.WithCancel(p.ctx) p.serviceClient.connectCancel = connectCancel - if err := p.upClickCallback(connectCtx, false); err != nil { + if err := p.upClickCallback(connectCtx); err != nil { log.Errorf("failed to handle up click after switching profile: %v", err) } diff --git a/client/ui/quickactions.go b/client/ui/quickactions.go index 76440d684..bf47ac434 100644 --- a/client/ui/quickactions.go +++ b/client/ui/quickactions.go @@ -267,7 +267,7 @@ func (s *serviceClient) showQuickActionsUI() { connCmd := connectCommand{ connectClient: func() error { - return s.menuUpClick(s.ctx, false) + return s.menuUpClick(s.ctx) }, } diff --git a/management/internals/shared/grpc/conversion.go b/management/internals/shared/grpc/conversion.go index c74fa2660..ef417d3cf 100644 --- a/management/internals/shared/grpc/conversion.go +++ b/management/internals/shared/grpc/conversion.go @@ -107,7 +107,8 @@ func toPeerConfig(peer *nbpeer.Peer, network *types.Network, dnsName string, set RoutingPeerDnsResolutionEnabled: settings.RoutingPeerDNSResolutionEnabled, LazyConnectionEnabled: settings.LazyConnectionEnabled, AutoUpdate: &proto.AutoUpdateSettings{ - Version: settings.AutoUpdateVersion, + Version: settings.AutoUpdateVersion, + AlwaysUpdate: settings.AutoUpdateAlways, }, } } diff --git a/management/server/account.go b/management/server/account.go index 01d0eebfa..75db36a5f 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -335,7 +335,8 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco if oldSettings.RoutingPeerDNSResolutionEnabled != newSettings.RoutingPeerDNSResolutionEnabled || oldSettings.LazyConnectionEnabled != newSettings.LazyConnectionEnabled || oldSettings.DNSDomain != newSettings.DNSDomain || - oldSettings.AutoUpdateVersion != newSettings.AutoUpdateVersion { + oldSettings.AutoUpdateVersion != newSettings.AutoUpdateVersion || + oldSettings.AutoUpdateAlways != newSettings.AutoUpdateAlways { updateAccountPeers = true } @@ -376,6 +377,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco am.handlePeerLoginExpirationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleGroupsPropagationSettings(ctx, oldSettings, newSettings, userID, accountID) am.handleAutoUpdateVersionSettings(ctx, oldSettings, newSettings, userID, accountID) + am.handleAutoUpdateAlwaysSettings(ctx, oldSettings, newSettings, userID, accountID) am.handlePeerExposeSettings(ctx, oldSettings, newSettings, userID, accountID) if err = am.handleInactivityExpirationSettings(ctx, oldSettings, newSettings, userID, accountID); err != nil { return nil, err @@ -493,6 +495,16 @@ func (am *DefaultAccountManager) handleAutoUpdateVersionSettings(ctx context.Con } } +func (am *DefaultAccountManager) handleAutoUpdateAlwaysSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { + if oldSettings.AutoUpdateAlways != newSettings.AutoUpdateAlways { + if newSettings.AutoUpdateAlways { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountAutoUpdateAlwaysEnabled, nil) + } else { + am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountAutoUpdateAlwaysDisabled, nil) + } + } +} + func (am *DefaultAccountManager) handlePeerExposeSettings(ctx context.Context, oldSettings, newSettings *types.Settings, userID, accountID string) { oldEnabled := oldSettings.PeerExposeEnabled newEnabled := newSettings.PeerExposeEnabled diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index 948d599ba..ddc3e00c3 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -220,6 +220,11 @@ const ( // AccountPeerExposeDisabled indicates that a user disabled peer expose for the account AccountPeerExposeDisabled Activity = 115 + // AccountAutoUpdateAlwaysEnabled indicates that a user enabled always auto-update for the account + AccountAutoUpdateAlwaysEnabled Activity = 116 + // AccountAutoUpdateAlwaysDisabled indicates that a user disabled always auto-update for the account + AccountAutoUpdateAlwaysDisabled Activity = 117 + // DomainAdded indicates that a user added a custom domain DomainAdded Activity = 118 // DomainDeleted indicates that a user deleted a custom domain @@ -339,6 +344,8 @@ var activityMap = map[Activity]Code{ UserCreated: {"User created", "user.create"}, AccountAutoUpdateVersionUpdated: {"Account AutoUpdate Version updated", "account.settings.auto.version.update"}, + AccountAutoUpdateAlwaysEnabled: {"Account auto-update always enabled", "account.setting.auto.update.always.enable"}, + AccountAutoUpdateAlwaysDisabled: {"Account auto-update always disabled", "account.setting.auto.update.always.disable"}, IdentityProviderCreated: {"Identity provider created", "identityprovider.create"}, IdentityProviderUpdated: {"Identity provider updated", "identityprovider.update"}, diff --git a/management/server/http/handlers/accounts/accounts_handler.go b/management/server/http/handlers/accounts/accounts_handler.go index 27a57c434..cc5567e3d 100644 --- a/management/server/http/handlers/accounts/accounts_handler.go +++ b/management/server/http/handlers/accounts/accounts_handler.go @@ -225,6 +225,9 @@ func (h *handler) updateAccountRequestSettings(req api.PutApiAccountsAccountIdJS return nil, fmt.Errorf("invalid AutoUpdateVersion") } } + if req.Settings.AutoUpdateAlways != nil { + returnSettings.AutoUpdateAlways = *req.Settings.AutoUpdateAlways + } return returnSettings, nil } @@ -348,6 +351,7 @@ func toAccountResponse(accountID string, settings *types.Settings, meta *types.A LazyConnectionEnabled: &settings.LazyConnectionEnabled, DnsDomain: &settings.DNSDomain, AutoUpdateVersion: &settings.AutoUpdateVersion, + AutoUpdateAlways: &settings.AutoUpdateAlways, EmbeddedIdpEnabled: &settings.EmbeddedIdpEnabled, LocalAuthDisabled: &settings.LocalAuthDisabled, } diff --git a/management/server/http/handlers/accounts/accounts_handler_test.go b/management/server/http/handlers/accounts/accounts_handler_test.go index 6cbd5908d..739dfe2f6 100644 --- a/management/server/http/handlers/accounts/accounts_handler_test.go +++ b/management/server/http/handlers/accounts/accounts_handler_test.go @@ -121,6 +121,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), DnsDomain: sr(""), + AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), LocalAuthDisabled: br(false), @@ -146,6 +147,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), DnsDomain: sr(""), + AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), LocalAuthDisabled: br(false), @@ -171,6 +173,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), DnsDomain: sr(""), + AutoUpdateAlways: br(false), AutoUpdateVersion: sr("latest"), EmbeddedIdpEnabled: br(false), LocalAuthDisabled: br(false), @@ -196,6 +199,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), DnsDomain: sr(""), + AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), LocalAuthDisabled: br(false), @@ -221,6 +225,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), DnsDomain: sr(""), + AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), LocalAuthDisabled: br(false), @@ -246,6 +251,7 @@ func TestAccounts_AccountsHandler(t *testing.T) { RoutingPeerDnsResolutionEnabled: br(false), LazyConnectionEnabled: br(false), DnsDomain: sr(""), + AutoUpdateAlways: br(false), AutoUpdateVersion: sr(""), EmbeddedIdpEnabled: br(false), LocalAuthDisabled: br(false), diff --git a/management/server/types/settings.go b/management/server/types/settings.go index e165968fc..4ea79ec72 100644 --- a/management/server/types/settings.go +++ b/management/server/types/settings.go @@ -61,6 +61,10 @@ type Settings struct { // AutoUpdateVersion client auto-update version AutoUpdateVersion string `gorm:"default:'disabled'"` + // AutoUpdateAlways when true, updates are installed automatically in the background; + // when false, updates require user interaction from the UI + AutoUpdateAlways bool `gorm:"default:false"` + // EmbeddedIdpEnabled indicates if the embedded identity provider is enabled. // This is a runtime-only field, not stored in the database. EmbeddedIdpEnabled bool `gorm:"-"` @@ -91,6 +95,7 @@ func (s *Settings) Copy() *Settings { DNSDomain: s.DNSDomain, NetworkRange: s.NetworkRange, AutoUpdateVersion: s.AutoUpdateVersion, + AutoUpdateAlways: s.AutoUpdateAlways, EmbeddedIdpEnabled: s.EmbeddedIdpEnabled, LocalAuthDisabled: s.LocalAuthDisabled, } diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index c67231342..6d2967aa9 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -347,6 +347,10 @@ components: description: Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") type: string example: "0.51.2" + auto_update_always: + description: When true, updates are installed automatically in the background. When false, updates require user interaction from the UI. + type: boolean + example: false embedded_idp_enabled: description: Indicates whether the embedded identity provider (Dex) is enabled for this account. This is a read-only field. type: boolean diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index f218679c0..f5a2b7ced 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1307,6 +1307,9 @@ type AccountRequest struct { // AccountSettings defines model for AccountSettings. type AccountSettings struct { + // AutoUpdateAlways When true, updates are installed automatically in the background. When false, updates require user interaction from the UI. + AutoUpdateAlways *bool `json:"auto_update_always,omitempty"` + // AutoUpdateVersion Set Clients auto-update version. "latest", "disabled", or a specific version (e.g "0.50.1") AutoUpdateVersion *string `json:"auto_update_version,omitempty"` diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index 3667ae27f..fdbe3a365 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -340,8 +340,8 @@ message PeerConfig { message AutoUpdateSettings { string version = 1; /* - alwaysUpdate = true → Updates happen automatically in the background - alwaysUpdate = false → Updates only happen when triggered by a peer connection + alwaysUpdate = true → Updates are installed automatically in the background + alwaysUpdate = false → Updates require user interaction from the UI */ bool alwaysUpdate = 2; } From 3e6baea4052a47f12a3029508ae5806cd87c2faa Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Sat, 14 Mar 2026 01:36:44 +0800 Subject: [PATCH 213/374] [management,proxy,client] Add L4 capabilities (TLS/TCP/UDP) (#5530) --- client/cmd/expose.go | 144 +- client/internal/expose/manager.go | 8 +- client/internal/expose/request.go | 9 +- client/proto/daemon.pb.go | 48 +- client/proto/daemon.proto | 3 + client/server/server.go | 7 +- .../reverseproxy/accesslogs/accesslogentry.go | 42 +- .../modules/reverseproxy/domain/domain.go | 3 + .../reverseproxy/domain/manager/api.go | 9 +- .../reverseproxy/domain/manager/manager.go | 36 +- .../modules/reverseproxy/proxy/manager.go | 1 + .../reverseproxy/proxy/manager/controller.go | 5 + .../reverseproxy/proxy/manager_mock.go | 14 + .../modules/reverseproxy/service/interface.go | 4 +- .../reverseproxy/service/interface_mock.go | 16 +- .../reverseproxy/service/manager/api.go | 9 +- .../service/manager/expose_tracker_test.go | 57 +- .../service/manager/l4_port_test.go | 582 ++++++ .../reverseproxy/service/manager/manager.go | 364 +++- .../service/manager/manager_test.go | 133 +- .../modules/reverseproxy/service/service.go | 580 ++++-- .../reverseproxy/service/service_test.go | 191 +- management/internals/server/boot.go | 1 + management/internals/server/modules.go | 4 + .../internals/shared/grpc/expose_service.go | 95 +- management/internals/shared/grpc/proxy.go | 185 +- .../internals/shared/grpc/proxy_test.go | 380 +++- management/server/http/handler.go | 3 +- .../testing/testing_tools/channel/channel.go | 2 + management/server/metrics/selfhosted.go | 6 +- management/server/store/sql_store.go | 39 +- management/server/store/store.go | 4 +- management/server/store/store_mock.go | 38 +- management/server/types/account.go | 37 +- proxy/cmd/proxy/cmd/root.go | 73 +- proxy/handle_mapping_stream_test.go | 11 +- proxy/internal/accesslog/logger.go | 70 +- proxy/internal/accesslog/middleware.go | 7 +- proxy/internal/accesslog/requestip.go | 2 +- proxy/internal/acme/manager.go | 9 +- proxy/internal/acme/manager_test.go | 20 +- proxy/internal/auth/middleware.go | 8 +- proxy/internal/auth/oidc.go | 11 +- proxy/internal/auth/password.go | 12 +- proxy/internal/auth/pin.go | 12 +- proxy/internal/conntrack/conn.go | 8 +- proxy/internal/conntrack/hijacked.go | 85 +- proxy/internal/conntrack/hijacked_test.go | 142 ++ proxy/internal/debug/client.go | 14 +- proxy/internal/debug/handler.go | 68 +- proxy/internal/debug/templates/clients.html | 4 +- proxy/internal/debug/templates/index.html | 6 +- proxy/internal/metrics/l4_metrics_test.go | 69 + proxy/internal/metrics/metrics.go | 244 ++- proxy/internal/netutil/errors.go | 40 + proxy/internal/netutil/errors_test.go | 92 + proxy/internal/proxy/context.go | 19 +- proxy/internal/proxy/proxy_bench_test.go | 6 +- proxy/internal/proxy/reverseproxy.go | 42 +- proxy/internal/proxy/reverseproxy_test.go | 20 +- proxy/internal/proxy/servicemapping.go | 12 +- proxy/internal/proxy/trustedproxy.go | 57 +- proxy/internal/proxy/trustedproxy_test.go | 22 +- proxy/internal/roundtrip/netbird.go | 256 +-- .../internal/roundtrip/netbird_bench_test.go | 33 +- proxy/internal/roundtrip/netbird_test.go | 145 +- proxy/internal/tcp/bench_test.go | 133 ++ proxy/internal/tcp/chanlistener.go | 76 + proxy/internal/tcp/peekedconn.go | 39 + proxy/internal/tcp/proxyprotocol.go | 29 + proxy/internal/tcp/proxyprotocol_test.go | 128 ++ proxy/internal/tcp/relay.go | 156 ++ proxy/internal/tcp/relay_test.go | 210 +++ proxy/internal/tcp/router.go | 570 ++++++ proxy/internal/tcp/router_test.go | 1670 +++++++++++++++++ proxy/internal/tcp/snipeek.go | 191 ++ proxy/internal/tcp/snipeek_test.go | 251 +++ proxy/internal/types/types.go | 51 + proxy/internal/types/types_test.go | 54 + proxy/internal/udp/relay.go | 496 +++++ proxy/internal/udp/relay_test.go | 493 +++++ proxy/management_integration_test.go | 10 +- proxy/server.go | 782 +++++++- shared/management/client/grpc.go | 18 +- shared/management/http/api/openapi.yml | 107 +- shared/management/http/api/types.gen.go | 118 +- shared/management/proto/management.pb.go | 53 +- shared/management/proto/management.proto | 3 + shared/management/proto/proxy_service.pb.go | 676 ++++--- shared/management/proto/proxy_service.proto | 16 + 90 files changed, 9611 insertions(+), 1397 deletions(-) create mode 100644 management/internals/modules/reverseproxy/service/manager/l4_port_test.go create mode 100644 proxy/internal/conntrack/hijacked_test.go create mode 100644 proxy/internal/metrics/l4_metrics_test.go create mode 100644 proxy/internal/netutil/errors.go create mode 100644 proxy/internal/netutil/errors_test.go create mode 100644 proxy/internal/tcp/bench_test.go create mode 100644 proxy/internal/tcp/chanlistener.go create mode 100644 proxy/internal/tcp/peekedconn.go create mode 100644 proxy/internal/tcp/proxyprotocol.go create mode 100644 proxy/internal/tcp/proxyprotocol_test.go create mode 100644 proxy/internal/tcp/relay.go create mode 100644 proxy/internal/tcp/relay_test.go create mode 100644 proxy/internal/tcp/router.go create mode 100644 proxy/internal/tcp/router_test.go create mode 100644 proxy/internal/tcp/snipeek.go create mode 100644 proxy/internal/tcp/snipeek_test.go create mode 100644 proxy/internal/types/types_test.go create mode 100644 proxy/internal/udp/relay.go create mode 100644 proxy/internal/udp/relay_test.go diff --git a/client/cmd/expose.go b/client/cmd/expose.go index 991d3ab86..1334617d8 100644 --- a/client/cmd/expose.go +++ b/client/cmd/expose.go @@ -22,20 +22,24 @@ import ( var pinRegexp = regexp.MustCompile(`^\d{6}$`) var ( - exposePin string - exposePassword string - exposeUserGroups []string - exposeDomain string - exposeNamePrefix string - exposeProtocol string + exposePin string + exposePassword string + exposeUserGroups []string + exposeDomain string + exposeNamePrefix string + exposeProtocol string + exposeExternalPort uint16 ) var exposeCmd = &cobra.Command{ - Use: "expose ", - Short: "Expose a local port via the NetBird reverse proxy", - Args: cobra.ExactArgs(1), - Example: "netbird expose --with-password safe-pass 8080", - RunE: exposeFn, + Use: "expose ", + Short: "Expose a local port via the NetBird reverse proxy", + Args: cobra.ExactArgs(1), + Example: ` netbird expose --with-password safe-pass 8080 + netbird expose --protocol tcp 5432 + netbird expose --protocol tcp --with-external-port 5433 5432 + netbird expose --protocol tls --with-custom-domain tls.example.com 4443`, + RunE: exposeFn, } func init() { @@ -44,7 +48,52 @@ func init() { exposeCmd.Flags().StringSliceVar(&exposeUserGroups, "with-user-groups", nil, "Restrict access to specific user groups with SSO (e.g. --with-user-groups devops,Backend)") exposeCmd.Flags().StringVar(&exposeDomain, "with-custom-domain", "", "Custom domain for the exposed service, must be configured to your account (e.g. --with-custom-domain myapp.example.com)") exposeCmd.Flags().StringVar(&exposeNamePrefix, "with-name-prefix", "", "Prefix for the generated service name (e.g. --with-name-prefix my-app)") - exposeCmd.Flags().StringVar(&exposeProtocol, "protocol", "http", "Protocol to use, http/https is supported (e.g. --protocol http)") + exposeCmd.Flags().StringVar(&exposeProtocol, "protocol", "http", "Protocol to use: http, https, tcp, udp, or tls (e.g. --protocol tcp)") + exposeCmd.Flags().Uint16Var(&exposeExternalPort, "with-external-port", 0, "Public-facing external port on the proxy cluster (defaults to the target port for L4)") +} + +// isClusterProtocol returns true for L4/TLS protocols that reject HTTP-style auth flags. +func isClusterProtocol(protocol string) bool { + switch strings.ToLower(protocol) { + case "tcp", "udp", "tls": + return true + default: + return false + } +} + +// isPortBasedProtocol returns true for pure port-based protocols (TCP/UDP) +// where domain display doesn't apply. TLS uses SNI so it has a domain. +func isPortBasedProtocol(protocol string) bool { + switch strings.ToLower(protocol) { + case "tcp", "udp": + return true + default: + return false + } +} + +// extractPort returns the port portion of a URL like "tcp://host:12345", or +// falls back to the given default formatted as a string. +func extractPort(serviceURL string, fallback uint16) string { + u := serviceURL + if idx := strings.Index(u, "://"); idx != -1 { + u = u[idx+3:] + } + if i := strings.LastIndex(u, ":"); i != -1 { + if p := u[i+1:]; p != "" { + return p + } + } + return strconv.FormatUint(uint64(fallback), 10) +} + +// resolveExternalPort returns the effective external port, defaulting to the target port. +func resolveExternalPort(targetPort uint64) uint16 { + if exposeExternalPort != 0 { + return exposeExternalPort + } + return uint16(targetPort) } func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) { @@ -57,7 +106,15 @@ func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) { } if !isProtocolValid(exposeProtocol) { - return 0, fmt.Errorf("unsupported protocol %q: only 'http' or 'https' are supported", exposeProtocol) + return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol) + } + + if isClusterProtocol(exposeProtocol) { + if exposePin != "" || exposePassword != "" || len(exposeUserGroups) > 0 { + return 0, fmt.Errorf("auth flags (--with-pin, --with-password, --with-user-groups) are not supported for %s protocol", exposeProtocol) + } + } else if cmd.Flags().Changed("with-external-port") { + return 0, fmt.Errorf("--with-external-port is not supported for %s protocol", exposeProtocol) } if exposePin != "" && !pinRegexp.MatchString(exposePin) { @@ -76,7 +133,12 @@ func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) { } func isProtocolValid(exposeProtocol string) bool { - return strings.ToLower(exposeProtocol) == "http" || strings.ToLower(exposeProtocol) == "https" + switch strings.ToLower(exposeProtocol) { + case "http", "https", "tcp", "udp", "tls": + return true + default: + return false + } } func exposeFn(cmd *cobra.Command, args []string) error { @@ -123,7 +185,7 @@ func exposeFn(cmd *cobra.Command, args []string) error { return err } - stream, err := client.ExposeService(ctx, &proto.ExposeServiceRequest{ + req := &proto.ExposeServiceRequest{ Port: uint32(port), Protocol: protocol, Pin: exposePin, @@ -131,7 +193,12 @@ func exposeFn(cmd *cobra.Command, args []string) error { UserGroups: exposeUserGroups, Domain: exposeDomain, NamePrefix: exposeNamePrefix, - }) + } + if isClusterProtocol(exposeProtocol) { + req.ListenPort = uint32(resolveExternalPort(port)) + } + + stream, err := client.ExposeService(ctx, req) if err != nil { return fmt.Errorf("expose service: %w", err) } @@ -149,8 +216,14 @@ func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) { return proto.ExposeProtocol_EXPOSE_HTTP, nil case "https": return proto.ExposeProtocol_EXPOSE_HTTPS, nil + case "tcp": + return proto.ExposeProtocol_EXPOSE_TCP, nil + case "udp": + return proto.ExposeProtocol_EXPOSE_UDP, nil + case "tls": + return proto.ExposeProtocol_EXPOSE_TLS, nil default: - return 0, fmt.Errorf("unsupported protocol %q: only 'http' or 'https' are supported", exposeProtocol) + return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol) } } @@ -160,20 +233,33 @@ func handleExposeReady(cmd *cobra.Command, stream proto.DaemonService_ExposeServ return fmt.Errorf("receive expose event: %w", err) } - switch e := event.Event.(type) { - case *proto.ExposeServiceEvent_Ready: - cmd.Println("Service exposed successfully!") - cmd.Printf(" Name: %s\n", e.Ready.ServiceName) - cmd.Printf(" URL: %s\n", e.Ready.ServiceUrl) - cmd.Printf(" Domain: %s\n", e.Ready.Domain) - cmd.Printf(" Protocol: %s\n", exposeProtocol) - cmd.Printf(" Port: %d\n", port) - cmd.Println() - cmd.Println("Press Ctrl+C to stop exposing.") - return nil - default: + ready, ok := event.Event.(*proto.ExposeServiceEvent_Ready) + if !ok { return fmt.Errorf("unexpected expose event: %T", event.Event) } + printExposeReady(cmd, ready.Ready, port) + return nil +} + +func printExposeReady(cmd *cobra.Command, r *proto.ExposeServiceReady, port uint64) { + cmd.Println("Service exposed successfully!") + cmd.Printf(" Name: %s\n", r.ServiceName) + if r.ServiceUrl != "" { + cmd.Printf(" URL: %s\n", r.ServiceUrl) + } + if r.Domain != "" && !isPortBasedProtocol(exposeProtocol) { + cmd.Printf(" Domain: %s\n", r.Domain) + } + cmd.Printf(" Protocol: %s\n", exposeProtocol) + cmd.Printf(" Internal: %d\n", port) + if isClusterProtocol(exposeProtocol) { + cmd.Printf(" External: %s\n", extractPort(r.ServiceUrl, resolveExternalPort(port))) + } + if r.PortAutoAssigned && exposeExternalPort != 0 { + cmd.Printf("\n Note: requested port %d was reassigned\n", exposeExternalPort) + } + cmd.Println() + cmd.Println("Press Ctrl+C to stop exposing.") } func waitForExposeEvents(cmd *cobra.Command, ctx context.Context, stream proto.DaemonService_ExposeServiceClient) error { diff --git a/client/internal/expose/manager.go b/client/internal/expose/manager.go index 8cd93685e..c59a1a7bd 100644 --- a/client/internal/expose/manager.go +++ b/client/internal/expose/manager.go @@ -12,9 +12,10 @@ const renewTimeout = 10 * time.Second // Response holds the response from exposing a service. type Response struct { - ServiceName string - ServiceURL string - Domain string + ServiceName string + ServiceURL string + Domain string + PortAutoAssigned bool } type Request struct { @@ -25,6 +26,7 @@ type Request struct { Pin string Password string UserGroups []string + ListenPort uint16 } type ManagementClient interface { diff --git a/client/internal/expose/request.go b/client/internal/expose/request.go index 7e12d0513..bff4f2ce7 100644 --- a/client/internal/expose/request.go +++ b/client/internal/expose/request.go @@ -15,6 +15,7 @@ func NewRequest(req *daemonProto.ExposeServiceRequest) *Request { UserGroups: req.UserGroups, Domain: req.Domain, NamePrefix: req.NamePrefix, + ListenPort: uint16(req.ListenPort), } } @@ -27,13 +28,15 @@ func toClientExposeRequest(req Request) mgm.ExposeRequest { Pin: req.Pin, Password: req.Password, UserGroups: req.UserGroups, + ListenPort: req.ListenPort, } } func fromClientExposeResponse(response *mgm.ExposeResponse) *Response { return &Response{ - ServiceName: response.ServiceName, - Domain: response.Domain, - ServiceURL: response.ServiceURL, + ServiceName: response.ServiceName, + Domain: response.Domain, + ServiceURL: response.ServiceURL, + PortAutoAssigned: response.PortAutoAssigned, } } diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index fd3c18f56..fa0b2f93b 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -95,6 +95,7 @@ const ( ExposeProtocol_EXPOSE_HTTPS ExposeProtocol = 1 ExposeProtocol_EXPOSE_TCP ExposeProtocol = 2 ExposeProtocol_EXPOSE_UDP ExposeProtocol = 3 + ExposeProtocol_EXPOSE_TLS ExposeProtocol = 4 ) // Enum value maps for ExposeProtocol. @@ -104,12 +105,14 @@ var ( 1: "EXPOSE_HTTPS", 2: "EXPOSE_TCP", 3: "EXPOSE_UDP", + 4: "EXPOSE_TLS", } ExposeProtocol_value = map[string]int32{ "EXPOSE_HTTP": 0, "EXPOSE_HTTPS": 1, "EXPOSE_TCP": 2, "EXPOSE_UDP": 3, + "EXPOSE_TLS": 4, } ) @@ -5741,6 +5744,7 @@ type ExposeServiceRequest struct { UserGroups []string `protobuf:"bytes,5,rep,name=user_groups,json=userGroups,proto3" json:"user_groups,omitempty"` Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` NamePrefix string `protobuf:"bytes,7,opt,name=name_prefix,json=namePrefix,proto3" json:"name_prefix,omitempty"` + ListenPort uint32 `protobuf:"varint,8,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -5824,6 +5828,13 @@ func (x *ExposeServiceRequest) GetNamePrefix() string { return "" } +func (x *ExposeServiceRequest) GetListenPort() uint32 { + if x != nil { + return x.ListenPort + } + return 0 +} + type ExposeServiceEvent struct { state protoimpl.MessageState `protogen:"open.v1"` // Types that are valid to be assigned to Event: @@ -5891,12 +5902,13 @@ type ExposeServiceEvent_Ready struct { func (*ExposeServiceEvent_Ready) isExposeServiceEvent_Event() {} type ExposeServiceReady struct { - state protoimpl.MessageState `protogen:"open.v1"` - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"` - Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` + PortAutoAssigned bool `protobuf:"varint,4,opt,name=port_auto_assigned,json=portAutoAssigned,proto3" json:"port_auto_assigned,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExposeServiceReady) Reset() { @@ -5950,6 +5962,13 @@ func (x *ExposeServiceReady) GetDomain() string { return "" } +func (x *ExposeServiceReady) GetPortAutoAssigned() bool { + if x != nil { + return x.PortAutoAssigned + } + return false +} + type PortInfo_Range struct { state protoimpl.MessageState `protogen:"open.v1"` Start uint32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` @@ -6499,7 +6518,7 @@ const file_daemon_proto_rawDesc = "" + "\x16InstallerResultRequest\"O\n" + "\x17InstallerResultResponse\x12\x18\n" + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" + - "\berrorMsg\x18\x02 \x01(\tR\berrorMsg\"\xe6\x01\n" + + "\berrorMsg\x18\x02 \x01(\tR\berrorMsg\"\x87\x02\n" + "\x14ExposeServiceRequest\x12\x12\n" + "\x04port\x18\x01 \x01(\rR\x04port\x122\n" + "\bprotocol\x18\x02 \x01(\x0e2\x16.daemon.ExposeProtocolR\bprotocol\x12\x10\n" + @@ -6509,15 +6528,18 @@ const file_daemon_proto_rawDesc = "" + "userGroups\x12\x16\n" + "\x06domain\x18\x06 \x01(\tR\x06domain\x12\x1f\n" + "\vname_prefix\x18\a \x01(\tR\n" + - "namePrefix\"Q\n" + + "namePrefix\x12\x1f\n" + + "\vlisten_port\x18\b \x01(\rR\n" + + "listenPort\"Q\n" + "\x12ExposeServiceEvent\x122\n" + "\x05ready\x18\x01 \x01(\v2\x1a.daemon.ExposeServiceReadyH\x00R\x05readyB\a\n" + - "\x05event\"p\n" + + "\x05event\"\x9e\x01\n" + "\x12ExposeServiceReady\x12!\n" + "\fservice_name\x18\x01 \x01(\tR\vserviceName\x12\x1f\n" + "\vservice_url\x18\x02 \x01(\tR\n" + "serviceUrl\x12\x16\n" + - "\x06domain\x18\x03 \x01(\tR\x06domain*b\n" + + "\x06domain\x18\x03 \x01(\tR\x06domain\x12,\n" + + "\x12port_auto_assigned\x18\x04 \x01(\bR\x10portAutoAssigned*b\n" + "\bLogLevel\x12\v\n" + "\aUNKNOWN\x10\x00\x12\t\n" + "\x05PANIC\x10\x01\x12\t\n" + @@ -6526,14 +6548,16 @@ const file_daemon_proto_rawDesc = "" + "\x04WARN\x10\x04\x12\b\n" + "\x04INFO\x10\x05\x12\t\n" + "\x05DEBUG\x10\x06\x12\t\n" + - "\x05TRACE\x10\a*S\n" + + "\x05TRACE\x10\a*c\n" + "\x0eExposeProtocol\x12\x0f\n" + "\vEXPOSE_HTTP\x10\x00\x12\x10\n" + "\fEXPOSE_HTTPS\x10\x01\x12\x0e\n" + "\n" + "EXPOSE_TCP\x10\x02\x12\x0e\n" + "\n" + - "EXPOSE_UDP\x10\x032\xfc\x15\n" + + "EXPOSE_UDP\x10\x03\x12\x0e\n" + + "\n" + + "EXPOSE_TLS\x10\x042\xfc\x15\n" + "\rDaemonService\x126\n" + "\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" + "\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" + diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index efafe3af7..89302c8c3 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -821,6 +821,7 @@ enum ExposeProtocol { EXPOSE_HTTPS = 1; EXPOSE_TCP = 2; EXPOSE_UDP = 3; + EXPOSE_TLS = 4; } message ExposeServiceRequest { @@ -831,6 +832,7 @@ message ExposeServiceRequest { repeated string user_groups = 5; string domain = 6; string name_prefix = 7; + uint32 listen_port = 8; } message ExposeServiceEvent { @@ -843,4 +845,5 @@ message ExposeServiceReady { string service_name = 1; string service_url = 2; string domain = 3; + bool port_auto_assigned = 4; } diff --git a/client/server/server.go b/client/server/server.go index 1d83366ca..7c1e70692 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1378,9 +1378,10 @@ func (s *Server) ExposeService(req *proto.ExposeServiceRequest, srv proto.Daemon if err := srv.Send(&proto.ExposeServiceEvent{ Event: &proto.ExposeServiceEvent_Ready{ Ready: &proto.ExposeServiceReady{ - ServiceName: result.ServiceName, - ServiceUrl: result.ServiceURL, - Domain: result.Domain, + ServiceName: result.ServiceName, + ServiceUrl: result.ServiceURL, + Domain: result.Domain, + PortAutoAssigned: result.PortAutoAssigned, }, }, }); err != nil { diff --git a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go index 0bcc59b68..619a34684 100644 --- a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go +++ b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go @@ -10,6 +10,15 @@ import ( "github.com/netbirdio/netbird/shared/management/proto" ) +// AccessLogProtocol identifies the transport protocol of an access log entry. +type AccessLogProtocol string + +const ( + AccessLogProtocolHTTP AccessLogProtocol = "http" + AccessLogProtocolTCP AccessLogProtocol = "tcp" + AccessLogProtocolUDP AccessLogProtocol = "udp" +) + type AccessLogEntry struct { ID string `gorm:"primaryKey"` AccountID string `gorm:"index"` @@ -22,10 +31,11 @@ type AccessLogEntry struct { Duration time.Duration `gorm:"index"` StatusCode int `gorm:"index"` Reason string - UserId string `gorm:"index"` - AuthMethodUsed string `gorm:"index"` - BytesUpload int64 `gorm:"index"` - BytesDownload int64 `gorm:"index"` + UserId string `gorm:"index"` + AuthMethodUsed string `gorm:"index"` + BytesUpload int64 `gorm:"index"` + BytesDownload int64 `gorm:"index"` + Protocol AccessLogProtocol `gorm:"index"` } // FromProto creates an AccessLogEntry from a proto.AccessLog @@ -43,17 +53,22 @@ func (a *AccessLogEntry) FromProto(serviceLog *proto.AccessLog) { a.AccountID = serviceLog.GetAccountId() a.BytesUpload = serviceLog.GetBytesUpload() a.BytesDownload = serviceLog.GetBytesDownload() + a.Protocol = AccessLogProtocol(serviceLog.GetProtocol()) if sourceIP := serviceLog.GetSourceIp(); sourceIP != "" { - if ip, err := netip.ParseAddr(sourceIP); err == nil { - a.GeoLocation.ConnectionIP = net.IP(ip.AsSlice()) + if addr, err := netip.ParseAddr(sourceIP); err == nil { + addr = addr.Unmap() + a.GeoLocation.ConnectionIP = net.IP(addr.AsSlice()) } } - if !serviceLog.GetAuthSuccess() { - a.Reason = "Authentication failed" - } else if serviceLog.GetResponseCode() >= 400 { - a.Reason = "Request failed" + // Only set reason for HTTP entries. L4 entries have no auth or status code. + if a.Protocol == "" || a.Protocol == AccessLogProtocolHTTP { + if !serviceLog.GetAuthSuccess() { + a.Reason = "Authentication failed" + } else if serviceLog.GetResponseCode() >= 400 { + a.Reason = "Request failed" + } } } @@ -90,6 +105,12 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { cityName = &a.GeoLocation.CityName } + var protocol *string + if a.Protocol != "" { + p := string(a.Protocol) + protocol = &p + } + return &api.ProxyAccessLog{ Id: a.ID, ServiceId: a.ServiceID, @@ -107,5 +128,6 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { CityName: cityName, BytesUpload: a.BytesUpload, BytesDownload: a.BytesDownload, + Protocol: protocol, } } diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go index 83fd669af..861d026a7 100644 --- a/management/internals/modules/reverseproxy/domain/domain.go +++ b/management/internals/modules/reverseproxy/domain/domain.go @@ -14,6 +14,9 @@ type Domain struct { TargetCluster string // The proxy cluster this domain should be validated against Type Type `gorm:"-"` Validated bool + // SupportsCustomPorts is populated at query time for free domains from the + // proxy cluster capabilities. Not persisted. + SupportsCustomPorts *bool `gorm:"-"` } // EventMeta returns activity event metadata for a domain diff --git a/management/internals/modules/reverseproxy/domain/manager/api.go b/management/internals/modules/reverseproxy/domain/manager/api.go index 2fbcdd5b8..d26a6a418 100644 --- a/management/internals/modules/reverseproxy/domain/manager/api.go +++ b/management/internals/modules/reverseproxy/domain/manager/api.go @@ -42,10 +42,11 @@ func domainTypeToApi(t domain.Type) api.ReverseProxyDomainType { func domainToApi(d *domain.Domain) api.ReverseProxyDomain { resp := api.ReverseProxyDomain{ - Domain: d.Domain, - Id: d.ID, - Type: domainTypeToApi(d.Type), - Validated: d.Validated, + Domain: d.Domain, + Id: d.ID, + Type: domainTypeToApi(d.Type), + Validated: d.Validated, + SupportsCustomPorts: d.SupportsCustomPorts, } if d.TargetCluster != "" { resp.TargetCluster = &d.TargetCluster diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index 8bbc98726..813027ea2 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -33,11 +33,16 @@ type proxyManager interface { GetActiveClusterAddresses(ctx context.Context) ([]string, error) } +type clusterCapabilities interface { + ClusterSupportsCustomPorts(clusterAddr string) *bool +} + type Manager struct { - store store - validator domain.Validator - proxyManager proxyManager - permissionsManager permissions.Manager + store store + validator domain.Validator + proxyManager proxyManager + clusterCapabilities clusterCapabilities + permissionsManager permissions.Manager accountManager account.Manager } @@ -51,6 +56,11 @@ func NewManager(store store, proxyMgr proxyManager, permissionsManager permissio } } +// SetClusterCapabilities sets the cluster capabilities provider for domain queries. +func (m *Manager) SetClusterCapabilities(caps clusterCapabilities) { + m.clusterCapabilities = caps +} + func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*domain.Domain, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) if err != nil { @@ -80,24 +90,32 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d }).Debug("getting domains with proxy allow list") for _, cluster := range allowList { - ret = append(ret, &domain.Domain{ + d := &domain.Domain{ Domain: cluster, AccountID: accountID, Type: domain.TypeFree, Validated: true, - }) + } + if m.clusterCapabilities != nil { + d.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(cluster) + } + ret = append(ret, d) } // Add custom domains. for _, d := range domains { - ret = append(ret, &domain.Domain{ + cd := &domain.Domain{ ID: d.ID, Domain: d.Domain, AccountID: accountID, TargetCluster: d.TargetCluster, Type: domain.TypeCustom, Validated: d.Validated, - }) + } + if m.clusterCapabilities != nil && d.TargetCluster != "" { + cd.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(d.TargetCluster) + } + ret = append(ret, cd) } return ret, nil @@ -298,7 +316,7 @@ func extractClusterFromCustomDomains(domain string, customDomains []*domain.Doma // It matches the domain suffix against available clusters and returns the matching cluster. func ExtractClusterFromFreeDomain(domain string, availableClusters []string) (string, bool) { for _, cluster := range availableClusters { - if strings.HasSuffix(domain, "."+cluster) { + if domain == cluster || strings.HasSuffix(domain, "."+cluster) { return cluster, true } } diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go index 15f2f9f54..67a8e74fa 100644 --- a/management/internals/modules/reverseproxy/proxy/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -33,4 +33,5 @@ type Controller interface { RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error GetProxiesForCluster(clusterAddr string) []string + ClusterSupportsCustomPorts(clusterAddr string) *bool } diff --git a/management/internals/modules/reverseproxy/proxy/manager/controller.go b/management/internals/modules/reverseproxy/proxy/manager/controller.go index e5b3e9886..acb49c45b 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/controller.go +++ b/management/internals/modules/reverseproxy/proxy/manager/controller.go @@ -72,6 +72,11 @@ func (c *GRPCController) UnregisterProxyFromCluster(ctx context.Context, cluster return nil } +// ClusterSupportsCustomPorts returns whether any proxy in the cluster supports custom ports. +func (c *GRPCController) ClusterSupportsCustomPorts(clusterAddr string) *bool { + return c.proxyGRPCServer.ClusterSupportsCustomPorts(clusterAddr) +} + // GetProxiesForCluster returns all proxy IDs registered for a specific cluster. func (c *GRPCController) GetProxiesForCluster(clusterAddr string) []string { proxySet, ok := c.clusterProxies.Load(clusterAddr) diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go index d9645ba88..b07a21122 100644 --- a/management/internals/modules/reverseproxy/proxy/manager_mock.go +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -144,6 +144,20 @@ func (mr *MockControllerMockRecorder) GetOIDCValidationConfig() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOIDCValidationConfig", reflect.TypeOf((*MockController)(nil).GetOIDCValidationConfig)) } +// ClusterSupportsCustomPorts mocks base method. +func (m *MockController) ClusterSupportsCustomPorts(clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterSupportsCustomPorts", clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// ClusterSupportsCustomPorts indicates an expected call of ClusterSupportsCustomPorts. +func (mr *MockControllerMockRecorder) ClusterSupportsCustomPorts(clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCustomPorts", reflect.TypeOf((*MockController)(nil).ClusterSupportsCustomPorts), clusterAddr) +} + // GetProxiesForCluster mocks base method. func (m *MockController) GetProxiesForCluster(clusterAddr string) []string { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/service/interface.go b/management/internals/modules/reverseproxy/service/interface.go index b420f22a8..39fd7e3ae 100644 --- a/management/internals/modules/reverseproxy/service/interface.go +++ b/management/internals/modules/reverseproxy/service/interface.go @@ -22,7 +22,7 @@ type Manager interface { GetAccountServices(ctx context.Context, accountID string) ([]*Service, error) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *ExposeServiceRequest) (*ExposeServiceResponse, error) - RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error - StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error + RenewServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error + StopServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error StartExposeReaper(ctx context.Context) } diff --git a/management/internals/modules/reverseproxy/service/interface_mock.go b/management/internals/modules/reverseproxy/service/interface_mock.go index 727b2c7de..bdc1f3e65 100644 --- a/management/internals/modules/reverseproxy/service/interface_mock.go +++ b/management/internals/modules/reverseproxy/service/interface_mock.go @@ -211,17 +211,17 @@ func (mr *MockManagerMockRecorder) ReloadService(ctx, accountID, serviceID inter } // RenewServiceFromPeer mocks base method. -func (m *MockManager) RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { +func (m *MockManager) RenewServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RenewServiceFromPeer", ctx, accountID, peerID, domain) + ret := m.ctrl.Call(m, "RenewServiceFromPeer", ctx, accountID, peerID, serviceID) ret0, _ := ret[0].(error) return ret0 } // RenewServiceFromPeer indicates an expected call of RenewServiceFromPeer. -func (mr *MockManagerMockRecorder) RenewServiceFromPeer(ctx, accountID, peerID, domain interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RenewServiceFromPeer(ctx, accountID, peerID, serviceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewServiceFromPeer", reflect.TypeOf((*MockManager)(nil).RenewServiceFromPeer), ctx, accountID, peerID, domain) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewServiceFromPeer", reflect.TypeOf((*MockManager)(nil).RenewServiceFromPeer), ctx, accountID, peerID, serviceID) } // SetCertificateIssuedAt mocks base method. @@ -265,17 +265,17 @@ func (mr *MockManagerMockRecorder) StartExposeReaper(ctx interface{}) *gomock.Ca } // StopServiceFromPeer mocks base method. -func (m *MockManager) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { +func (m *MockManager) StopServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopServiceFromPeer", ctx, accountID, peerID, domain) + ret := m.ctrl.Call(m, "StopServiceFromPeer", ctx, accountID, peerID, serviceID) ret0, _ := ret[0].(error) return ret0 } // StopServiceFromPeer indicates an expected call of StopServiceFromPeer. -func (mr *MockManagerMockRecorder) StopServiceFromPeer(ctx, accountID, peerID, domain interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) StopServiceFromPeer(ctx, accountID, peerID, serviceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopServiceFromPeer", reflect.TypeOf((*MockManager)(nil).StopServiceFromPeer), ctx, accountID, peerID, domain) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopServiceFromPeer", reflect.TypeOf((*MockManager)(nil).StopServiceFromPeer), ctx, accountID, peerID, serviceID) } // UpdateService mocks base method. diff --git a/management/internals/modules/reverseproxy/service/manager/api.go b/management/internals/modules/reverseproxy/service/manager/api.go index f28b633b8..c53219d2e 100644 --- a/management/internals/modules/reverseproxy/service/manager/api.go +++ b/management/internals/modules/reverseproxy/service/manager/api.go @@ -11,19 +11,22 @@ import ( domainmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/shared/management/http/api" "github.com/netbirdio/netbird/shared/management/http/util" "github.com/netbirdio/netbird/shared/management/status" ) type handler struct { - manager rpservice.Manager + manager rpservice.Manager + permissionsManager permissions.Manager } // RegisterEndpoints registers all service HTTP endpoints. -func RegisterEndpoints(manager rpservice.Manager, domainManager domainmanager.Manager, accessLogsManager accesslogs.Manager, router *mux.Router) { +func RegisterEndpoints(manager rpservice.Manager, domainManager domainmanager.Manager, accessLogsManager accesslogs.Manager, permissionsManager permissions.Manager, router *mux.Router) { h := &handler{ - manager: manager, + manager: manager, + permissionsManager: permissionsManager, } domainRouter := router.PathPrefix("/reverse-proxies").Subrouter() diff --git a/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go index c831b4a22..6ff8343b9 100644 --- a/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go +++ b/management/internals/modules/reverseproxy/service/manager/expose_tracker_test.go @@ -18,8 +18,8 @@ func TestReapExpiredExposes(t *testing.T) { ctx := context.Background() resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", }) require.NoError(t, err) @@ -28,8 +28,8 @@ func TestReapExpiredExposes(t *testing.T) { // Create a non-expired service resp2, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8081, - Protocol: "http", + Port: 8081, + Mode: "http", }) require.NoError(t, err) @@ -49,15 +49,16 @@ func TestReapAlreadyDeletedService(t *testing.T) { ctx := context.Background() resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", }) require.NoError(t, err) expireEphemeralService(t, testStore, testAccountID, resp.Domain) // Delete the service before reaping - err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + svcID := resolveServiceIDByDomain(t, testStore, resp.Domain) + err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, svcID) require.NoError(t, err) // Reaping should handle the already-deleted service gracefully @@ -70,8 +71,8 @@ func TestConcurrentReapAndRenew(t *testing.T) { for i := range 5 { _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8080 + i, - Protocol: "http", + Port: uint16(8080 + i), + Mode: "http", }) require.NoError(t, err) } @@ -108,17 +109,19 @@ func TestRenewEphemeralService(t *testing.T) { t.Run("renew succeeds for active service", func(t *testing.T) { resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8082, - Protocol: "http", + Port: 8082, + Mode: "http", }) require.NoError(t, err) - err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + svc, lookupErr := mgr.store.GetServiceByDomain(ctx, resp.Domain) + require.NoError(t, lookupErr) + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, svc.ID) require.NoError(t, err) }) t.Run("renew fails for nonexistent domain", func(t *testing.T) { - err := mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, "nonexistent.com") + err := mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, "nonexistent-service-id") require.Error(t, err) assert.Contains(t, err.Error(), "no active expose session") }) @@ -133,8 +136,8 @@ func TestCountAndExistsEphemeralServices(t *testing.T) { assert.Equal(t, int64(0), count) resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8083, - Protocol: "http", + Port: 8083, + Mode: "http", }) require.NoError(t, err) @@ -157,15 +160,15 @@ func TestMaxExposesPerPeerEnforced(t *testing.T) { for i := range maxExposesPerPeer { _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8090 + i, - Protocol: "http", + Port: uint16(8090 + i), + Mode: "http", }) require.NoError(t, err, "expose %d should succeed", i) } _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 9999, - Protocol: "http", + Port: 9999, + Mode: "http", }) require.Error(t, err) assert.Contains(t, err.Error(), "maximum number of active expose sessions") @@ -176,8 +179,8 @@ func TestReapSkipsRenewedService(t *testing.T) { ctx := context.Background() resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8086, - Protocol: "http", + Port: 8086, + Mode: "http", }) require.NoError(t, err) @@ -185,7 +188,9 @@ func TestReapSkipsRenewedService(t *testing.T) { expireEphemeralService(t, testStore, testAccountID, resp.Domain) // Renew it before the reaper runs - err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + svc, err := testStore.GetServiceByDomain(ctx, resp.Domain) + require.NoError(t, err) + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, svc.ID) require.NoError(t, err) // Reaper should skip it because the re-check sees a fresh timestamp @@ -195,6 +200,14 @@ func TestReapSkipsRenewedService(t *testing.T) { require.NoError(t, err, "renewed service should survive reaping") } +// resolveServiceIDByDomain looks up a service ID by domain in tests. +func resolveServiceIDByDomain(t *testing.T, s store.Store, domain string) string { + t.Helper() + svc, err := s.GetServiceByDomain(context.Background(), domain) + require.NoError(t, err) + return svc.ID +} + // expireEphemeralService backdates meta_last_renewed_at to force expiration. func expireEphemeralService(t *testing.T, s store.Store, accountID, domain string) { t.Helper() diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go new file mode 100644 index 000000000..c7a61ddcf --- /dev/null +++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go @@ -0,0 +1,582 @@ +package manager + +import ( + "context" + "net" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/mock_server" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" +) + +const testCluster = "test-cluster" + +func boolPtr(v bool) *bool { return &v } + +// setupL4Test creates a manager with a mock proxy controller for L4 port tests. +func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Store, *proxy.MockController) { + t.Helper() + + ctrl := gomock.NewController(t) + + ctx := context.Background() + testStore, cleanup, err := store.NewTestStoreFromSQL(ctx, "", t.TempDir()) + require.NoError(t, err) + t.Cleanup(cleanup) + + err = testStore.SaveAccount(ctx, &types.Account{ + Id: testAccountID, + CreatedBy: testUserID, + Settings: &types.Settings{ + PeerExposeEnabled: true, + PeerExposeGroups: []string{testGroupID}, + }, + Users: map[string]*types.User{ + testUserID: { + Id: testUserID, + AccountID: testAccountID, + Role: types.UserRoleAdmin, + }, + }, + Peers: map[string]*nbpeer.Peer{ + testPeerID: { + ID: testPeerID, + AccountID: testAccountID, + Key: "test-key", + DNSLabel: "test-peer", + Name: "test-peer", + IP: net.ParseIP("100.64.0.1"), + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, + Meta: nbpeer.PeerSystemMeta{Hostname: "test-peer"}, + }, + }, + Groups: map[string]*types.Group{ + testGroupID: { + ID: testGroupID, + AccountID: testAccountID, + Name: "Expose Group", + }, + }, + }) + require.NoError(t, err) + + err = testStore.AddPeerToGroup(ctx, testAccountID, testPeerID, testGroupID) + require.NoError(t, err) + + mockCtrl := proxy.NewMockController(ctrl) + mockCtrl.EXPECT().ClusterSupportsCustomPorts(gomock.Any()).Return(customPortsSupported).AnyTimes() + mockCtrl.EXPECT().SendServiceUpdateToCluster(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mockCtrl.EXPECT().GetOIDCValidationConfig().Return(proxy.OIDCValidationConfig{}).AnyTimes() + + accountMgr := &mock_server.MockAccountManager{ + StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, + UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) { + return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID) + }, + } + + mgr := &Manager{ + store: testStore, + accountManager: accountMgr, + permissionsManager: permissions.NewManager(testStore), + proxyController: mockCtrl, + clusterDeriver: &testClusterDeriver{domains: []string{"test.netbird.io"}}, + } + mgr.exposeReaper = &exposeReaper{manager: mgr} + + return mgr, testStore, mockCtrl +} + +// seedService creates a service directly in the store for test setup. +func seedService(t *testing.T, s store.Store, name, protocol, domain, cluster string, port uint16) *rpservice.Service { + t.Helper() + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: name, + Mode: protocol, + Domain: domain, + ProxyCluster: cluster, + ListenPort: port, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: protocol, Port: 8080, Enabled: true}, + }, + } + svc.InitNewRecord() + err := s.CreateService(context.Background(), svc) + require.NoError(t, err) + return svc +} + +func TestPortConflict_TCPSamePortCluster(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + seedService(t, testStore, "existing-tcp", "tcp", testCluster, testCluster, 5432) + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "conflicting-tcp", + Mode: "tcp", + Domain: "conflicting-tcp." + testCluster, + ProxyCluster: testCluster, + ListenPort: 5432, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 9090, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + require.Error(t, err, "TCP+TCP on same port/cluster should be rejected") + assert.Contains(t, err.Error(), "already in use") +} + +func TestPortConflict_UDPSamePortCluster(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + seedService(t, testStore, "existing-udp", "udp", testCluster, testCluster, 5432) + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "conflicting-udp", + Mode: "udp", + Domain: "conflicting-udp." + testCluster, + ProxyCluster: testCluster, + ListenPort: 5432, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "udp", Port: 9090, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + require.Error(t, err, "UDP+UDP on same port/cluster should be rejected") + assert.Contains(t, err.Error(), "already in use") +} + +func TestPortConflict_TLSSamePortDifferentDomain(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + seedService(t, testStore, "existing-tls", "tls", "app1.example.com", testCluster, 443) + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "new-tls", + Mode: "tls", + Domain: "app2.example.com", + ProxyCluster: testCluster, + ListenPort: 443, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8443, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + assert.NoError(t, err, "TLS+TLS on same port with different domains should be allowed (SNI routing)") +} + +func TestPortConflict_TLSSamePortSameDomain(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + seedService(t, testStore, "existing-tls", "tls", "app.example.com", testCluster, 443) + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "duplicate-tls", + Mode: "tls", + Domain: "app.example.com", + ProxyCluster: testCluster, + ListenPort: 443, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8443, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + require.Error(t, err, "TLS+TLS on same domain should be rejected") + assert.Contains(t, err.Error(), "domain already taken") +} + +func TestPortConflict_TLSAndTCPSamePort(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + seedService(t, testStore, "existing-tls", "tls", "app.example.com", testCluster, 443) + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "new-tcp", + Mode: "tcp", + Domain: testCluster, + ProxyCluster: testCluster, + ListenPort: 443, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8080, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + assert.NoError(t, err, "TLS+TCP on same port should be allowed (multiplexed)") +} + +func TestAutoAssign_TCPNoListenPort(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(false)) + ctx := context.Background() + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "auto-tcp", + Mode: "tcp", + Domain: testCluster, + ProxyCluster: testCluster, + ListenPort: 0, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8080, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + require.NoError(t, err) + assert.True(t, svc.ListenPort >= autoAssignPortMin && svc.ListenPort <= autoAssignPortMax, + "auto-assigned port %d should be in range [%d, %d]", svc.ListenPort, autoAssignPortMin, autoAssignPortMax) + assert.True(t, svc.PortAutoAssigned, "PortAutoAssigned should be set") +} + +func TestAutoAssign_TCPCustomPortRejectedWhenNotSupported(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(false)) + ctx := context.Background() + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "custom-tcp", + Mode: "tcp", + Domain: testCluster, + ProxyCluster: testCluster, + ListenPort: 5555, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8080, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + require.Error(t, err, "TCP with custom port should be rejected when cluster doesn't support it") + assert.Contains(t, err.Error(), "custom ports") +} + +func TestAutoAssign_TLSCustomPortAlwaysAllowed(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(false)) + ctx := context.Background() + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "custom-tls", + Mode: "tls", + Domain: "app.example.com", + ProxyCluster: testCluster, + ListenPort: 9999, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8443, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + assert.NoError(t, err, "TLS with custom port should always be allowed regardless of cluster capability") + assert.Equal(t, uint16(9999), svc.ListenPort, "TLS listen port should not be overridden") + assert.False(t, svc.PortAutoAssigned, "PortAutoAssigned should not be set for TLS") +} + +func TestAutoAssign_EphemeralOverridesPortWhenNotSupported(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(false)) + ctx := context.Background() + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "ephemeral-tcp", + Mode: "tcp", + Domain: testCluster, + ProxyCluster: testCluster, + ListenPort: 5555, + Enabled: true, + Source: "ephemeral", + SourcePeer: testPeerID, + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8080, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewEphemeralService(ctx, testAccountID, testPeerID, svc) + require.NoError(t, err) + assert.NotEqual(t, uint16(5555), svc.ListenPort, "requested port should be overridden") + assert.True(t, svc.ListenPort >= autoAssignPortMin && svc.ListenPort <= autoAssignPortMax, + "auto-assigned port %d should be in range", svc.ListenPort) + assert.True(t, svc.PortAutoAssigned) +} + +func TestAutoAssign_EphemeralTLSKeepsCustomPort(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(false)) + ctx := context.Background() + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "ephemeral-tls", + Mode: "tls", + Domain: "app.example.com", + ProxyCluster: testCluster, + ListenPort: 9999, + Enabled: true, + Source: "ephemeral", + SourcePeer: testPeerID, + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8443, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewEphemeralService(ctx, testAccountID, testPeerID, svc) + require.NoError(t, err) + assert.Equal(t, uint16(9999), svc.ListenPort, "TLS listen port should not be overridden") + assert.False(t, svc.PortAutoAssigned) +} + +func TestAutoAssign_AvoidsExistingPorts(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + existingPort := uint16(20000) + seedService(t, testStore, "existing", "tcp", testCluster, testCluster, existingPort) + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "auto-tcp", + Mode: "tcp", + Domain: "auto-tcp." + testCluster, + ProxyCluster: testCluster, + ListenPort: 0, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8080, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + require.NoError(t, err) + assert.NotEqual(t, existingPort, svc.ListenPort, "auto-assigned port should not collide with existing") + assert.True(t, svc.PortAutoAssigned) +} + +func TestAutoAssign_TCPCustomPortAllowedWhenSupported(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + svc := &rpservice.Service{ + AccountID: testAccountID, + Name: "custom-tcp", + Mode: "tcp", + Domain: testCluster, + ProxyCluster: testCluster, + ListenPort: 5555, + Enabled: true, + Source: "permanent", + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 8080, Enabled: true}, + }, + } + svc.InitNewRecord() + + err := mgr.persistNewService(ctx, testAccountID, svc) + require.NoError(t, err) + assert.Equal(t, uint16(5555), svc.ListenPort, "custom port should be preserved when supported") + assert.False(t, svc.PortAutoAssigned) +} + +func TestUpdate_PreservesExistingListenPort(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + existing := seedService(t, testStore, "tcp-svc", "tcp", testCluster, testCluster, 12345) + + updated := &rpservice.Service{ + ID: existing.ID, + AccountID: testAccountID, + Name: "tcp-svc-renamed", + Mode: "tcp", + Domain: testCluster, + ProxyCluster: testCluster, + ListenPort: 0, + Enabled: true, + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 9090, Enabled: true}, + }, + } + + _, err := mgr.persistServiceUpdate(ctx, testAccountID, updated) + require.NoError(t, err) + assert.Equal(t, uint16(12345), updated.ListenPort, "existing listen port should be preserved when update sends 0") +} + +func TestUpdate_AllowsPortChange(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + existing := seedService(t, testStore, "tcp-svc", "tcp", testCluster, testCluster, 12345) + + updated := &rpservice.Service{ + ID: existing.ID, + AccountID: testAccountID, + Name: "tcp-svc", + Mode: "tcp", + Domain: testCluster, + ProxyCluster: testCluster, + ListenPort: 54321, + Enabled: true, + Targets: []*rpservice.Target{ + {AccountID: testAccountID, TargetId: testPeerID, TargetType: rpservice.TargetTypePeer, Protocol: "tcp", Port: 9090, Enabled: true}, + }, + } + + _, err := mgr.persistServiceUpdate(ctx, testAccountID, updated) + require.NoError(t, err) + assert.Equal(t, uint16(54321), updated.ListenPort, "explicit port change should be applied") +} + +func TestCreateServiceFromPeer_TCP(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(false)) + ctx := context.Background() + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 5432, + Mode: "tcp", + }) + require.NoError(t, err) + + assert.NotEmpty(t, resp.ServiceName) + assert.Contains(t, resp.Domain, ".test.netbird.io", "TCP uses unique subdomain") + assert.True(t, resp.PortAutoAssigned, "port should be auto-assigned when cluster doesn't support custom ports") + assert.Contains(t, resp.ServiceURL, "tcp://") +} + +func TestCreateServiceFromPeer_TCP_CustomPort(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 5432, + Mode: "tcp", + ListenPort: 15432, + }) + require.NoError(t, err) + + assert.False(t, resp.PortAutoAssigned) + assert.Contains(t, resp.ServiceURL, ":15432") +} + +func TestCreateServiceFromPeer_TCP_DefaultListenPort(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 5432, + Mode: "tcp", + }) + require.NoError(t, err) + + // When no explicit listen port, defaults to target port + assert.Contains(t, resp.ServiceURL, ":5432") + assert.False(t, resp.PortAutoAssigned) +} + +func TestCreateServiceFromPeer_TLS(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(false)) + ctx := context.Background() + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 443, + Mode: "tls", + }) + require.NoError(t, err) + + assert.Contains(t, resp.Domain, ".test.netbird.io", "TLS uses subdomain") + assert.Contains(t, resp.ServiceURL, "tls://") + assert.Contains(t, resp.ServiceURL, ":443") + // TLS always keeps its port (not port-based protocol for auto-assign) + assert.False(t, resp.PortAutoAssigned) +} + +func TestCreateServiceFromPeer_TCP_StopAndRenew(t *testing.T) { + mgr, testStore, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 8080, + Mode: "tcp", + }) + require.NoError(t, err) + + svcID := resolveServiceIDByDomain(t, testStore, resp.Domain) + + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, svcID) + require.NoError(t, err) + + err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, svcID) + require.NoError(t, err) + + // Renew after stop should fail + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, svcID) + require.Error(t, err) +} + +func TestCreateServiceFromPeer_L4_RejectsAuth(t *testing.T) { + mgr, _, _ := setupL4Test(t, boolPtr(true)) + ctx := context.Background() + + _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ + Port: 8080, + Mode: "tcp", + Pin: "123456", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "authentication is not supported") +} diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index cae3d3bda..c40961fdc 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -4,7 +4,9 @@ import ( "context" "fmt" "math/rand/v2" + "os" "slices" + "strconv" "time" log "github.com/sirupsen/logrus" @@ -23,6 +25,45 @@ import ( "github.com/netbirdio/netbird/shared/management/status" ) +const ( + defaultAutoAssignPortMin uint16 = 10000 + defaultAutoAssignPortMax uint16 = 49151 + + // EnvAutoAssignPortMin overrides the lower bound for auto-assigned L4 listen ports. + EnvAutoAssignPortMin = "NB_PROXY_PORT_MIN" + // EnvAutoAssignPortMax overrides the upper bound for auto-assigned L4 listen ports. + EnvAutoAssignPortMax = "NB_PROXY_PORT_MAX" +) + +var ( + autoAssignPortMin = defaultAutoAssignPortMin + autoAssignPortMax = defaultAutoAssignPortMax +) + +func init() { + autoAssignPortMin = portFromEnv(EnvAutoAssignPortMin, defaultAutoAssignPortMin) + autoAssignPortMax = portFromEnv(EnvAutoAssignPortMax, defaultAutoAssignPortMax) + if autoAssignPortMin > autoAssignPortMax { + log.Warnf("port range invalid: %s (%d) > %s (%d), using defaults", + EnvAutoAssignPortMin, autoAssignPortMin, EnvAutoAssignPortMax, autoAssignPortMax) + autoAssignPortMin = defaultAutoAssignPortMin + autoAssignPortMax = defaultAutoAssignPortMax + } +} + +func portFromEnv(key string, fallback uint16) uint16 { + val := os.Getenv(key) + if val == "" { + return fallback + } + n, err := strconv.ParseUint(val, 10, 16) + if err != nil { + log.Warnf("invalid %s value %q, using default %d: %v", key, val, fallback, err) + return fallback + } + return uint16(n) +} + const unknownHostPlaceholder = "unknown" // ClusterDeriver derives the proxy cluster from a domain. @@ -115,6 +156,7 @@ func (m *Manager) replaceHostByLookup(ctx context.Context, accountID string, s * return fmt.Errorf("unknown target type: %s", target.TargetType) } } + return nil } @@ -197,55 +239,19 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri return nil } -func (m *Manager) persistNewService(ctx context.Context, accountID string, service *service.Service) error { +func (m *Manager) persistNewService(ctx context.Context, accountID string, svc *service.Service) error { return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - if err := m.checkDomainAvailable(ctx, transaction, service.Domain, ""); err != nil { + if svc.Domain != "" { + if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, ""); err != nil { + return err + } + } + + if err := m.ensureL4Port(ctx, transaction, svc); err != nil { return err } - if err := validateTargetReferences(ctx, transaction, accountID, service.Targets); err != nil { - return err - } - - if err := transaction.CreateService(ctx, service); err != nil { - return fmt.Errorf("failed to create service: %w", err) - } - - return nil - }) -} - -// persistNewEphemeralService creates an ephemeral service inside a single transaction -// that also enforces the duplicate and per-peer limit checks atomically. -// The count and exists queries use FOR UPDATE locking to serialize concurrent creates -// for the same peer, preventing the per-peer limit from being bypassed. -func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, peerID string, svc *service.Service) error { - return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - // Lock the peer row to serialize concurrent creates for the same peer. - // Without this, when no ephemeral rows exist yet, FOR UPDATE on the services - // table returns no rows and acquires no locks, allowing concurrent inserts - // to bypass the per-peer limit. - if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthUpdate, accountID, peerID); err != nil { - return fmt.Errorf("lock peer row: %w", err) - } - - exists, err := transaction.EphemeralServiceExists(ctx, store.LockingStrengthUpdate, accountID, peerID, svc.Domain) - if err != nil { - return fmt.Errorf("check existing expose: %w", err) - } - if exists { - return status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain") - } - - count, err := transaction.CountEphemeralServicesByPeer(ctx, store.LockingStrengthUpdate, accountID, peerID) - if err != nil { - return fmt.Errorf("count peer exposes: %w", err) - } - if count >= int64(maxExposesPerPeer) { - return status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) - } - - if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, ""); err != nil { + if err := m.checkPortConflict(ctx, transaction, svc); err != nil { return err } @@ -261,11 +267,155 @@ func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, pee }) } +// ensureL4Port auto-assigns a listen port when needed and validates cluster support. +func (m *Manager) ensureL4Port(ctx context.Context, tx store.Store, svc *service.Service) error { + if !service.IsL4Protocol(svc.Mode) { + return nil + } + customPorts := m.proxyController.ClusterSupportsCustomPorts(svc.ProxyCluster) + if service.IsPortBasedProtocol(svc.Mode) && svc.ListenPort > 0 && (customPorts == nil || !*customPorts) { + if svc.Source != service.SourceEphemeral { + return status.Errorf(status.InvalidArgument, "custom ports not supported on cluster %s", svc.ProxyCluster) + } + svc.ListenPort = 0 + } + if svc.ListenPort == 0 { + port, err := m.assignPort(ctx, tx, svc.ProxyCluster) + if err != nil { + return err + } + svc.ListenPort = port + svc.PortAutoAssigned = true + } + return nil +} + +// checkPortConflict rejects L4 services that would conflict on the same listener. +// For TCP/UDP: unique per cluster+protocol+port. +// For TLS: unique per cluster+port+domain (SNI routing allows sharing ports). +// Cross-protocol conflicts (TLS vs raw TCP) are intentionally not checked: +// the proxy router multiplexes TLS (via SNI) and raw TCP (via fallback) on the same listener. +func (m *Manager) checkPortConflict(ctx context.Context, transaction store.Store, svc *service.Service) error { + if !service.IsL4Protocol(svc.Mode) || svc.ListenPort == 0 { + return nil + } + + existing, err := transaction.GetServicesByClusterAndPort(ctx, store.LockingStrengthUpdate, svc.ProxyCluster, svc.Mode, svc.ListenPort) + if err != nil { + return fmt.Errorf("query port conflicts: %w", err) + } + for _, s := range existing { + if s.ID == svc.ID { + continue + } + // TLS services on the same port are allowed if they have different domains (SNI routing) + if svc.Mode == service.ModeTLS && s.Domain != svc.Domain { + continue + } + return status.Errorf(status.AlreadyExists, + "%s port %d is already in use by service %q on cluster %s", + svc.Mode, svc.ListenPort, s.Name, svc.ProxyCluster) + } + + return nil +} + +// assignPort picks a random available port on the cluster within the auto-assign range. +func (m *Manager) assignPort(ctx context.Context, tx store.Store, cluster string) (uint16, error) { + services, err := tx.GetServicesByCluster(ctx, store.LockingStrengthUpdate, cluster) + if err != nil { + return 0, fmt.Errorf("query cluster ports: %w", err) + } + + occupied := make(map[uint16]struct{}, len(services)) + for _, s := range services { + if s.ListenPort > 0 { + occupied[s.ListenPort] = struct{}{} + } + } + + portRange := int(autoAssignPortMax-autoAssignPortMin) + 1 + for range 100 { + port := autoAssignPortMin + uint16(rand.IntN(portRange)) + if _, taken := occupied[port]; !taken { + return port, nil + } + } + + for port := autoAssignPortMin; port <= autoAssignPortMax; port++ { + if _, taken := occupied[port]; !taken { + return port, nil + } + } + + return 0, status.Errorf(status.PreconditionFailed, "no available ports on cluster %s", cluster) +} + +// persistNewEphemeralService creates an ephemeral service inside a single transaction +// that also enforces the duplicate and per-peer limit checks atomically. +// The count and exists queries use FOR UPDATE locking to serialize concurrent creates +// for the same peer, preventing the per-peer limit from being bypassed. +func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, peerID string, svc *service.Service) error { + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + if err := m.validateEphemeralPreconditions(ctx, transaction, accountID, peerID, svc); err != nil { + return err + } + + if err := m.ensureL4Port(ctx, transaction, svc); err != nil { + return err + } + + if err := m.checkPortConflict(ctx, transaction, svc); err != nil { + return err + } + + if err := validateTargetReferences(ctx, transaction, accountID, svc.Targets); err != nil { + return err + } + + if err := transaction.CreateService(ctx, svc); err != nil { + return fmt.Errorf("create service: %w", err) + } + + return nil + }) +} + +func (m *Manager) validateEphemeralPreconditions(ctx context.Context, transaction store.Store, accountID, peerID string, svc *service.Service) error { + // Lock the peer row to serialize concurrent creates for the same peer. + if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthUpdate, accountID, peerID); err != nil { + return fmt.Errorf("lock peer row: %w", err) + } + + exists, err := transaction.EphemeralServiceExists(ctx, store.LockingStrengthUpdate, accountID, peerID, svc.Domain) + if err != nil { + return fmt.Errorf("check existing expose: %w", err) + } + if exists { + return status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain") + } + + if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, ""); err != nil { + return err + } + + count, err := transaction.CountEphemeralServicesByPeer(ctx, store.LockingStrengthUpdate, accountID, peerID) + if err != nil { + return fmt.Errorf("count peer exposes: %w", err) + } + if count >= int64(maxExposesPerPeer) { + return status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer) + } + + return nil +} + +// checkDomainAvailable checks that no other service already uses this domain. func (m *Manager) checkDomainAvailable(ctx context.Context, transaction store.Store, domain, excludeServiceID string) error { existingService, err := transaction.GetServiceByDomain(ctx, domain) if err != nil { if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { - return fmt.Errorf("failed to check existing service: %w", err) + return fmt.Errorf("check existing service: %w", err) } return nil } @@ -322,6 +472,10 @@ func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, se return err } + if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { + return err + } + updateInfo.oldCluster = existingService.ProxyCluster updateInfo.domainChanged = existingService.Domain != service.Domain @@ -335,12 +489,18 @@ func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, se m.preserveExistingAuthSecrets(service, existingService) m.preserveServiceMetadata(service, existingService) + m.preserveListenPort(service, existingService) updateInfo.serviceEnabledChanged = existingService.Enabled != service.Enabled + if err := m.ensureL4Port(ctx, transaction, service); err != nil { + return err + } + if err := m.checkPortConflict(ctx, transaction, service); err != nil { + return err + } if err := validateTargetReferences(ctx, transaction, accountID, service.Targets); err != nil { return err } - if err := transaction.UpdateService(ctx, service); err != nil { return fmt.Errorf("update service: %w", err) } @@ -351,23 +511,39 @@ func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, se return &updateInfo, err } -func (m *Manager) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, service *service.Service) error { - if err := m.checkDomainAvailable(ctx, transaction, service.Domain, service.ID); err != nil { +func (m *Manager) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, svc *service.Service) error { + if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, svc.ID); err != nil { return err } if m.clusterDeriver != nil { - newCluster, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, service.Domain) + newCluster, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, svc.Domain) if err != nil { - log.WithError(err).Warnf("could not derive cluster from domain %s", service.Domain) + log.WithError(err).Warnf("could not derive cluster from domain %s", svc.Domain) } else { - service.ProxyCluster = newCluster + svc.ProxyCluster = newCluster } } return nil } +// validateProtocolChange rejects mode changes on update. +// Only empty<->HTTP is allowed; all other transitions are rejected. +func validateProtocolChange(oldMode, newMode string) error { + if newMode == "" || newMode == oldMode { + return nil + } + if isHTTPFamily(oldMode) && isHTTPFamily(newMode) { + return nil + } + return status.Errorf(status.InvalidArgument, "cannot change mode from %q to %q", oldMode, newMode) +} + +func isHTTPFamily(mode string) bool { + return mode == "" || mode == "http" +} + func (m *Manager) preserveExistingAuthSecrets(service, existingService *service.Service) { if service.Auth.PasswordAuth != nil && service.Auth.PasswordAuth.Enabled && existingService.Auth.PasswordAuth != nil && existingService.Auth.PasswordAuth.Enabled && @@ -388,6 +564,13 @@ func (m *Manager) preserveServiceMetadata(service, existingService *service.Serv service.SessionPublicKey = existingService.SessionPublicKey } +func (m *Manager) preserveListenPort(svc, existing *service.Service) { + if existing.ListenPort > 0 && svc.ListenPort == 0 { + svc.ListenPort = existing.ListenPort + svc.PortAutoAssigned = existing.PortAutoAssigned + } +} + func (m *Manager) sendServiceUpdateNotifications(ctx context.Context, accountID string, s *service.Service, updateInfo *serviceUpdateInfo) { oidcCfg := m.proxyController.GetOIDCValidationConfig() @@ -675,6 +858,10 @@ func (m *Manager) validateExposePermission(ctx context.Context, accountID, peerI return status.Errorf(status.PermissionDenied, "peer is not in an allowed expose group") } +func (m *Manager) resolveDefaultDomain(serviceName string) (string, error) { + return m.buildRandomDomain(serviceName) +} + // CreateServiceFromPeer creates a service initiated by a peer expose request. // It validates the request, checks expose permissions, enforces the per-peer limit, // creates the service, and tracks it for TTL-based reaping. @@ -696,9 +883,9 @@ func (m *Manager) CreateServiceFromPeer(ctx context.Context, accountID, peerID s svc.Source = service.SourceEphemeral if svc.Domain == "" { - domain, err := m.buildRandomDomain(svc.Name) + domain, err := m.resolveDefaultDomain(svc.Name) if err != nil { - return nil, fmt.Errorf("build random domain for service %s: %w", svc.Name, err) + return nil, err } svc.Domain = domain } @@ -739,10 +926,16 @@ func (m *Manager) CreateServiceFromPeer(ctx context.Context, accountID, peerID s m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Create, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster) m.accountManager.UpdateAccountPeers(ctx, accountID) + serviceURL := "https://" + svc.Domain + if service.IsL4Protocol(svc.Mode) { + serviceURL = fmt.Sprintf("%s://%s:%d", svc.Mode, svc.Domain, svc.ListenPort) + } + return &service.ExposeServiceResponse{ - ServiceName: svc.Name, - ServiceURL: "https://" + svc.Domain, - Domain: svc.Domain, + ServiceName: svc.Name, + ServiceURL: serviceURL, + Domain: svc.Domain, + PortAutoAssigned: svc.PortAutoAssigned, }, nil } @@ -761,64 +954,47 @@ func (m *Manager) getGroupIDsFromNames(ctx context.Context, accountID string, gr return groupIDs, nil } -func (m *Manager) buildRandomDomain(name string) (string, error) { +func (m *Manager) getDefaultClusterDomain() (string, error) { if m.clusterDeriver == nil { - return "", fmt.Errorf("unable to get random domain") + return "", fmt.Errorf("unable to get cluster domain") } clusterDomains := m.clusterDeriver.GetClusterDomains() if len(clusterDomains) == 0 { - return "", fmt.Errorf("no cluster domains found for service %s", name) + return "", fmt.Errorf("no cluster domains available") } - index := rand.IntN(len(clusterDomains)) - domain := name + "." + clusterDomains[index] - return domain, nil + return clusterDomains[rand.IntN(len(clusterDomains))], nil +} + +func (m *Manager) buildRandomDomain(name string) (string, error) { + domain, err := m.getDefaultClusterDomain() + if err != nil { + return "", err + } + return name + "." + domain, nil } // RenewServiceFromPeer updates the DB timestamp for the peer's ephemeral service. -func (m *Manager) RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { - return m.store.RenewEphemeralService(ctx, accountID, peerID, domain) +func (m *Manager) RenewServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { + return m.store.RenewEphemeralService(ctx, accountID, peerID, serviceID) } // StopServiceFromPeer stops a peer's active expose session by deleting the service from the DB. -func (m *Manager) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error { - if err := m.deleteServiceFromPeer(ctx, accountID, peerID, domain, false); err != nil { - log.WithContext(ctx).Errorf("failed to delete peer-exposed service for domain %s: %v", domain, err) +func (m *Manager) StopServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string) error { + if err := m.deleteServiceFromPeer(ctx, accountID, peerID, serviceID, false); err != nil { + log.WithContext(ctx).Errorf("failed to delete peer-exposed service %s: %v", serviceID, err) return err } return nil } -// deleteServiceFromPeer deletes a peer-initiated service identified by domain. +// deleteServiceFromPeer deletes a peer-initiated service identified by service ID. // When expired is true, the activity is recorded as PeerServiceExposeExpired instead of PeerServiceUnexposed. -func (m *Manager) deleteServiceFromPeer(ctx context.Context, accountID, peerID, domain string, expired bool) error { - svc, err := m.lookupPeerService(ctx, accountID, peerID, domain) - if err != nil { - return err - } - +func (m *Manager) deleteServiceFromPeer(ctx context.Context, accountID, peerID, serviceID string, expired bool) error { activityCode := activity.PeerServiceUnexposed if expired { activityCode = activity.PeerServiceExposeExpired } - return m.deletePeerService(ctx, accountID, peerID, svc.ID, activityCode) -} - -// lookupPeerService finds a peer-initiated service by domain and validates ownership. -func (m *Manager) lookupPeerService(ctx context.Context, accountID, peerID, domain string) (*service.Service, error) { - svc, err := m.store.GetServiceByDomain(ctx, domain) - if err != nil { - return nil, err - } - - if svc.Source != service.SourceEphemeral { - return nil, status.Errorf(status.PermissionDenied, "cannot operate on API-created service via peer expose") - } - - if svc.SourcePeer != peerID { - return nil, status.Errorf(status.PermissionDenied, "cannot operate on service exposed by another peer") - } - - return svc, nil + return m.deletePeerService(ctx, accountID, peerID, serviceID, activityCode) } func (m *Manager) deletePeerService(ctx context.Context, accountID, peerID, serviceID string, activityCode activity.Activity) error { diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index ba4e1c805..d23c91017 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -803,8 +803,8 @@ func TestCreateServiceFromPeer(t *testing.T) { mgr, testStore := setupIntegrationTest(t) req := &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", } resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) @@ -826,9 +826,9 @@ func TestCreateServiceFromPeer(t *testing.T) { mgr, _ := setupIntegrationTest(t) req := &rpservice.ExposeServiceRequest{ - Port: 80, - Protocol: "http", - Domain: "example.com", + Port: 80, + Mode: "http", + Domain: "example.com", } resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) @@ -847,8 +847,8 @@ func TestCreateServiceFromPeer(t *testing.T) { require.NoError(t, err) req := &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", } _, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) @@ -860,8 +860,8 @@ func TestCreateServiceFromPeer(t *testing.T) { mgr, _ := setupIntegrationTest(t) req := &rpservice.ExposeServiceRequest{ - Port: 0, - Protocol: "http", + Port: 0, + Mode: "http", } _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) @@ -878,62 +878,52 @@ func TestExposeServiceRequestValidate(t *testing.T) { }{ { name: "valid http request", - req: rpservice.ExposeServiceRequest{Port: 8080, Protocol: "http"}, + req: rpservice.ExposeServiceRequest{Port: 8080, Mode: "http"}, wantErr: "", }, { - name: "valid https request with pin", - req: rpservice.ExposeServiceRequest{Port: 443, Protocol: "https", Pin: "123456"}, - wantErr: "", + name: "https mode rejected", + req: rpservice.ExposeServiceRequest{Port: 443, Mode: "https", Pin: "123456"}, + wantErr: "unsupported mode", }, { name: "port zero rejected", - req: rpservice.ExposeServiceRequest{Port: 0, Protocol: "http"}, + req: rpservice.ExposeServiceRequest{Port: 0, Mode: "http"}, wantErr: "port must be between 1 and 65535", }, { - name: "negative port rejected", - req: rpservice.ExposeServiceRequest{Port: -1, Protocol: "http"}, - wantErr: "port must be between 1 and 65535", - }, - { - name: "port above 65535 rejected", - req: rpservice.ExposeServiceRequest{Port: 65536, Protocol: "http"}, - wantErr: "port must be between 1 and 65535", - }, - { - name: "unsupported protocol", - req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "tcp"}, - wantErr: "unsupported protocol", + name: "unsupported mode", + req: rpservice.ExposeServiceRequest{Port: 80, Mode: "ftp"}, + wantErr: "unsupported mode", }, { name: "invalid pin format", - req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "abc"}, + req: rpservice.ExposeServiceRequest{Port: 80, Mode: "http", Pin: "abc"}, wantErr: "invalid pin", }, { name: "pin too short", - req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "12345"}, + req: rpservice.ExposeServiceRequest{Port: 80, Mode: "http", Pin: "12345"}, wantErr: "invalid pin", }, { name: "valid 6-digit pin", - req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "000000"}, + req: rpservice.ExposeServiceRequest{Port: 80, Mode: "http", Pin: "000000"}, wantErr: "", }, { name: "empty user group name", - req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", UserGroups: []string{"valid", ""}}, + req: rpservice.ExposeServiceRequest{Port: 80, Mode: "http", UserGroups: []string{"valid", ""}}, wantErr: "user group name cannot be empty", }, { name: "invalid name prefix", - req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "INVALID"}, + req: rpservice.ExposeServiceRequest{Port: 80, Mode: "http", NamePrefix: "INVALID"}, wantErr: "invalid name prefix", }, { name: "valid name prefix", - req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "my-service"}, + req: rpservice.ExposeServiceRequest{Port: 80, Mode: "http", NamePrefix: "my-service"}, wantErr: "", }, } @@ -966,14 +956,14 @@ func TestDeleteServiceFromPeer_ByDomain(t *testing.T) { // First create a service req := &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", } resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) require.NoError(t, err) - // Delete by domain using unexported method - err = mgr.deleteServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain, false) + svcID := resolveServiceIDByDomain(t, testStore, resp.Domain) + err = mgr.deleteServiceFromPeer(ctx, testAccountID, testPeerID, svcID, false) require.NoError(t, err) // Verify service is deleted @@ -982,16 +972,17 @@ func TestDeleteServiceFromPeer_ByDomain(t *testing.T) { }) t.Run("expire uses correct activity", func(t *testing.T) { - mgr, _ := setupIntegrationTest(t) + mgr, testStore := setupIntegrationTest(t) req := &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", } resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) require.NoError(t, err) - err = mgr.deleteServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain, true) + svcID := resolveServiceIDByDomain(t, testStore, resp.Domain) + err = mgr.deleteServiceFromPeer(ctx, testAccountID, testPeerID, svcID, true) require.NoError(t, err) }) } @@ -1003,13 +994,14 @@ func TestStopServiceFromPeer(t *testing.T) { mgr, testStore := setupIntegrationTest(t) req := &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", } resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, req) require.NoError(t, err) - err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + svcID := resolveServiceIDByDomain(t, testStore, resp.Domain) + err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, svcID) require.NoError(t, err) _, err = testStore.GetServiceByDomain(ctx, resp.Domain) @@ -1022,8 +1014,8 @@ func TestDeleteService_DeletesEphemeralExpose(t *testing.T) { mgr, testStore := setupIntegrationTest(t) resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", }) require.NoError(t, err) @@ -1042,8 +1034,8 @@ func TestDeleteService_DeletesEphemeralExpose(t *testing.T) { assert.Equal(t, int64(0), count, "ephemeral service should be deleted after API delete") _, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 9090, - Protocol: "http", + Port: 9090, + Mode: "http", }) assert.NoError(t, err, "new expose should succeed after API delete") } @@ -1054,8 +1046,8 @@ func TestDeleteAllServices_DeletesEphemeralExposes(t *testing.T) { for i := range 3 { _, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8080 + i, - Protocol: "http", + Port: uint16(8080 + i), + Mode: "http", }) require.NoError(t, err) } @@ -1076,21 +1068,22 @@ func TestRenewServiceFromPeer(t *testing.T) { ctx := context.Background() t.Run("renews tracked expose", func(t *testing.T) { - mgr, _ := setupIntegrationTest(t) + mgr, testStore := setupIntegrationTest(t) resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", }) require.NoError(t, err) - err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain) + svcID := resolveServiceIDByDomain(t, testStore, resp.Domain) + err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, svcID) require.NoError(t, err) }) t.Run("fails for untracked domain", func(t *testing.T) { mgr, _ := setupIntegrationTest(t) - err := mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, "nonexistent.com") + err := mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, "nonexistent-service-id") require.Error(t, err) }) } @@ -1191,3 +1184,33 @@ func TestDeleteService_DeletesTargets(t *testing.T) { require.NoError(t, err) assert.Len(t, targets, 0, "All targets should be deleted when service is deleted") } + +func TestValidateProtocolChange(t *testing.T) { + tests := []struct { + name string + oldP string + newP string + wantErr bool + }{ + {"empty to http", "", "http", false}, + {"http to http", "http", "http", false}, + {"same protocol", "tcp", "tcp", false}, + {"empty new proto", "tcp", "", false}, + {"http to tcp", "http", "tcp", true}, + {"tcp to udp", "tcp", "udp", true}, + {"tls to http", "tls", "http", true}, + {"udp to tls", "udp", "tls", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateProtocolChange(tt.oldP, tt.newP) + if tt.wantErr { + require.Error(t, err) + assert.Contains(t, err.Error(), "cannot change mode") + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index bfad7fe9a..623284404 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -34,6 +34,7 @@ const ( ) type Status string +type TargetType string const ( StatusPending Status = "pending" @@ -43,34 +44,36 @@ const ( StatusCertificateFailed Status = "certificate_failed" StatusError Status = "error" - TargetTypePeer = "peer" - TargetTypeHost = "host" - TargetTypeDomain = "domain" - TargetTypeSubnet = "subnet" + TargetTypePeer TargetType = "peer" + TargetTypeHost TargetType = "host" + TargetTypeDomain TargetType = "domain" + TargetTypeSubnet TargetType = "subnet" SourcePermanent = "permanent" SourceEphemeral = "ephemeral" ) type TargetOptions struct { - SkipTLSVerify bool `json:"skip_tls_verify"` - RequestTimeout time.Duration `json:"request_timeout,omitempty"` - PathRewrite PathRewriteMode `json:"path_rewrite,omitempty"` - CustomHeaders map[string]string `gorm:"serializer:json" json:"custom_headers,omitempty"` + SkipTLSVerify bool `json:"skip_tls_verify"` + RequestTimeout time.Duration `json:"request_timeout,omitempty"` + SessionIdleTimeout time.Duration `json:"session_idle_timeout,omitempty"` + PathRewrite PathRewriteMode `json:"path_rewrite,omitempty"` + CustomHeaders map[string]string `gorm:"serializer:json" json:"custom_headers,omitempty"` } type Target struct { - ID uint `gorm:"primaryKey" json:"-"` - AccountID string `gorm:"index:idx_target_account;not null" json:"-"` - ServiceID string `gorm:"index:idx_service_targets;not null" json:"-"` - Path *string `json:"path,omitempty"` - Host string `json:"host"` // the Host field is only used for subnet targets, otherwise ignored - Port int `gorm:"index:idx_target_port" json:"port"` - Protocol string `gorm:"index:idx_target_protocol" json:"protocol"` - TargetId string `gorm:"index:idx_target_id" json:"target_id"` - TargetType string `gorm:"index:idx_target_type" json:"target_type"` - Enabled bool `gorm:"index:idx_target_enabled" json:"enabled"` - Options TargetOptions `gorm:"embedded" json:"options"` + ID uint `gorm:"primaryKey" json:"-"` + AccountID string `gorm:"index:idx_target_account;not null" json:"-"` + ServiceID string `gorm:"index:idx_service_targets;not null" json:"-"` + Path *string `json:"path,omitempty"` + Host string `json:"host"` // the Host field is only used for subnet targets, otherwise ignored + Port uint16 `gorm:"index:idx_target_port" json:"port"` + Protocol string `gorm:"index:idx_target_protocol" json:"protocol"` + TargetId string `gorm:"index:idx_target_id" json:"target_id"` + TargetType TargetType `gorm:"index:idx_target_type" json:"target_type"` + Enabled bool `gorm:"index:idx_target_enabled" json:"enabled"` + Options TargetOptions `gorm:"embedded" json:"options"` + ProxyProtocol bool `json:"proxy_protocol"` } type PasswordAuthConfig struct { @@ -146,23 +149,10 @@ type Service struct { SessionPublicKey string `gorm:"column:session_public_key"` Source string `gorm:"default:'permanent';index:idx_service_source_peer"` SourcePeer string `gorm:"index:idx_service_source_peer"` -} - -func NewService(accountID, name, domain, proxyCluster string, targets []*Target, enabled bool) *Service { - for _, target := range targets { - target.AccountID = accountID - } - - s := &Service{ - AccountID: accountID, - Name: name, - Domain: domain, - ProxyCluster: proxyCluster, - Targets: targets, - Enabled: enabled, - } - s.InitNewRecord() - return s + // Mode determines the service type: "http", "tcp", "udp", or "tls". + Mode string `gorm:"default:'http'"` + ListenPort uint16 + PortAutoAssigned bool } // InitNewRecord generates a new unique ID and resets metadata for a newly created @@ -177,21 +167,17 @@ func (s *Service) InitNewRecord() { } func (s *Service) ToAPIResponse() *api.Service { - s.Auth.ClearSecrets() - authConfig := api.ServiceAuthConfig{} if s.Auth.PasswordAuth != nil { authConfig.PasswordAuth = &api.PasswordAuthConfig{ - Enabled: s.Auth.PasswordAuth.Enabled, - Password: s.Auth.PasswordAuth.Password, + Enabled: s.Auth.PasswordAuth.Enabled, } } if s.Auth.PinAuth != nil { authConfig.PinAuth = &api.PINAuthConfig{ Enabled: s.Auth.PinAuth.Enabled, - Pin: s.Auth.PinAuth.Pin, } } @@ -208,13 +194,18 @@ func (s *Service) ToAPIResponse() *api.Service { st := api.ServiceTarget{ Path: target.Path, Host: &target.Host, - Port: target.Port, + Port: int(target.Port), Protocol: api.ServiceTargetProtocol(target.Protocol), TargetId: target.TargetId, TargetType: api.ServiceTargetTargetType(target.TargetType), Enabled: target.Enabled, } - st.Options = targetOptionsToAPI(target.Options) + opts := targetOptionsToAPI(target.Options) + if opts == nil { + opts = &api.ServiceTargetOptions{} + } + opts.ProxyProtocol = &target.ProxyProtocol + st.Options = opts apiTargets = append(apiTargets, st) } @@ -227,6 +218,9 @@ func (s *Service) ToAPIResponse() *api.Service { meta.CertificateIssuedAt = s.Meta.CertificateIssuedAt } + mode := api.ServiceMode(s.Mode) + listenPort := int(s.ListenPort) + resp := &api.Service{ Id: s.ID, Name: s.Name, @@ -237,6 +231,9 @@ func (s *Service) ToAPIResponse() *api.Service { RewriteRedirects: &s.RewriteRedirects, Auth: authConfig, Meta: meta, + Mode: &mode, + ListenPort: &listenPort, + PortAutoAssigned: &s.PortAutoAssigned, } if s.ProxyCluster != "" { @@ -247,37 +244,7 @@ func (s *Service) ToAPIResponse() *api.Service { } func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConfig proxy.OIDCValidationConfig) *proto.ProxyMapping { - pathMappings := make([]*proto.PathMapping, 0, len(s.Targets)) - for _, target := range s.Targets { - if !target.Enabled { - continue - } - - // TODO: Make path prefix stripping configurable per-target. - // Currently the matching prefix is baked into the target URL path, - // so the proxy strips-then-re-adds it (effectively a no-op). - targetURL := url.URL{ - Scheme: target.Protocol, - Host: target.Host, - Path: "/", // TODO: support service path - } - if target.Port > 0 && !isDefaultPort(target.Protocol, target.Port) { - targetURL.Host = net.JoinHostPort(targetURL.Host, strconv.Itoa(target.Port)) - } - - path := "/" - if target.Path != nil { - path = *target.Path - } - - pm := &proto.PathMapping{ - Path: path, - Target: targetURL.String(), - } - - pm.Options = targetOptionsToProto(target.Options) - pathMappings = append(pathMappings, pm) - } + pathMappings := s.buildPathMappings() auth := &proto.Authentication{ SessionKey: s.SessionPublicKey, @@ -306,9 +273,58 @@ func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConf AccountId: s.AccountID, PassHostHeader: s.PassHostHeader, RewriteRedirects: s.RewriteRedirects, + Mode: s.Mode, + ListenPort: int32(s.ListenPort), //nolint:gosec } } +// buildPathMappings constructs PathMapping entries from targets. +// For HTTP/HTTPS, each target becomes a path-based route with a full URL. +// For L4/TLS, a single target maps to a host:port address. +func (s *Service) buildPathMappings() []*proto.PathMapping { + pathMappings := make([]*proto.PathMapping, 0, len(s.Targets)) + for _, target := range s.Targets { + if !target.Enabled { + continue + } + + if IsL4Protocol(s.Mode) { + pm := &proto.PathMapping{ + Target: net.JoinHostPort(target.Host, strconv.FormatUint(uint64(target.Port), 10)), + } + opts := l4TargetOptionsToProto(target) + if opts != nil { + pm.Options = opts + } + pathMappings = append(pathMappings, pm) + continue + } + + // HTTP/HTTPS: build full URL + targetURL := url.URL{ + Scheme: target.Protocol, + Host: target.Host, + Path: "/", + } + if target.Port > 0 && !isDefaultPort(target.Protocol, target.Port) { + targetURL.Host = net.JoinHostPort(targetURL.Host, strconv.FormatUint(uint64(target.Port), 10)) + } + + path := "/" + if target.Path != nil { + path = *target.Path + } + + pm := &proto.PathMapping{ + Path: path, + Target: targetURL.String(), + } + pm.Options = targetOptionsToProto(target.Options) + pathMappings = append(pathMappings, pm) + } + return pathMappings +} + func operationToProtoType(op Operation) proto.ProxyMappingUpdateType { switch op { case Create: @@ -325,8 +341,8 @@ func operationToProtoType(op Operation) proto.ProxyMappingUpdateType { // isDefaultPort reports whether port is the standard default for the given scheme // (443 for https, 80 for http). -func isDefaultPort(scheme string, port int) bool { - return (scheme == "https" && port == 443) || (scheme == "http" && port == 80) +func isDefaultPort(scheme string, port uint16) bool { + return (scheme == TargetProtoHTTPS && port == 443) || (scheme == TargetProtoHTTP && port == 80) } // PathRewriteMode controls how the request path is rewritten before forwarding. @@ -346,7 +362,7 @@ func pathRewriteToProto(mode PathRewriteMode) proto.PathRewriteMode { } func targetOptionsToAPI(opts TargetOptions) *api.ServiceTargetOptions { - if !opts.SkipTLSVerify && opts.RequestTimeout == 0 && opts.PathRewrite == "" && len(opts.CustomHeaders) == 0 { + if !opts.SkipTLSVerify && opts.RequestTimeout == 0 && opts.SessionIdleTimeout == 0 && opts.PathRewrite == "" && len(opts.CustomHeaders) == 0 { return nil } apiOpts := &api.ServiceTargetOptions{} @@ -357,6 +373,10 @@ func targetOptionsToAPI(opts TargetOptions) *api.ServiceTargetOptions { s := opts.RequestTimeout.String() apiOpts.RequestTimeout = &s } + if opts.SessionIdleTimeout != 0 { + s := opts.SessionIdleTimeout.String() + apiOpts.SessionIdleTimeout = &s + } if opts.PathRewrite != "" { pr := api.ServiceTargetOptionsPathRewrite(opts.PathRewrite) apiOpts.PathRewrite = &pr @@ -382,6 +402,23 @@ func targetOptionsToProto(opts TargetOptions) *proto.PathTargetOptions { return popts } +// l4TargetOptionsToProto converts L4-relevant target options to proto. +func l4TargetOptionsToProto(target *Target) *proto.PathTargetOptions { + if !target.ProxyProtocol && target.Options.RequestTimeout == 0 && target.Options.SessionIdleTimeout == 0 { + return nil + } + opts := &proto.PathTargetOptions{ + ProxyProtocol: target.ProxyProtocol, + } + if target.Options.RequestTimeout > 0 { + opts.RequestTimeout = durationpb.New(target.Options.RequestTimeout) + } + if target.Options.SessionIdleTimeout > 0 { + opts.SessionIdleTimeout = durationpb.New(target.Options.SessionIdleTimeout) + } + return opts +} + func targetOptionsFromAPI(idx int, o *api.ServiceTargetOptions) (TargetOptions, error) { var opts TargetOptions if o.SkipTlsVerify != nil { @@ -394,6 +431,13 @@ func targetOptionsFromAPI(idx int, o *api.ServiceTargetOptions) (TargetOptions, } opts.RequestTimeout = d } + if o.SessionIdleTimeout != nil { + d, err := time.ParseDuration(*o.SessionIdleTimeout) + if err != nil { + return opts, fmt.Errorf("target %d: parse session_idle_timeout %q: %w", idx, *o.SessionIdleTimeout, err) + } + opts.SessionIdleTimeout = d + } if o.PathRewrite != nil { opts.PathRewrite = PathRewriteMode(*o.PathRewrite) } @@ -408,15 +452,49 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) erro s.Domain = req.Domain s.AccountID = accountID - targets := make([]*Target, 0, len(req.Targets)) - for i, apiTarget := range req.Targets { + if req.Mode != nil { + s.Mode = string(*req.Mode) + } + if req.ListenPort != nil { + s.ListenPort = uint16(*req.ListenPort) //nolint:gosec + } + + targets, err := targetsFromAPI(accountID, req.Targets) + if err != nil { + return err + } + s.Targets = targets + s.Enabled = req.Enabled + + if req.PassHostHeader != nil { + s.PassHostHeader = *req.PassHostHeader + } + if req.RewriteRedirects != nil { + s.RewriteRedirects = *req.RewriteRedirects + } + + if req.Auth != nil { + s.Auth = authFromAPI(req.Auth) + } + + return nil +} + +func targetsFromAPI(accountID string, apiTargetsPtr *[]api.ServiceTarget) ([]*Target, error) { + var apiTargets []api.ServiceTarget + if apiTargetsPtr != nil { + apiTargets = *apiTargetsPtr + } + + targets := make([]*Target, 0, len(apiTargets)) + for i, apiTarget := range apiTargets { target := &Target{ AccountID: accountID, Path: apiTarget.Path, - Port: apiTarget.Port, + Port: uint16(apiTarget.Port), //nolint:gosec // validated by API layer Protocol: string(apiTarget.Protocol), TargetId: apiTarget.TargetId, - TargetType: string(apiTarget.TargetType), + TargetType: TargetType(apiTarget.TargetType), Enabled: apiTarget.Enabled, } if apiTarget.Host != nil { @@ -425,49 +503,42 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) erro if apiTarget.Options != nil { opts, err := targetOptionsFromAPI(i, apiTarget.Options) if err != nil { - return err + return nil, err } target.Options = opts + if apiTarget.Options.ProxyProtocol != nil { + target.ProxyProtocol = *apiTarget.Options.ProxyProtocol + } } targets = append(targets, target) } - s.Targets = targets + return targets, nil +} - s.Enabled = req.Enabled - - if req.PassHostHeader != nil { - s.PassHostHeader = *req.PassHostHeader - } - - if req.RewriteRedirects != nil { - s.RewriteRedirects = *req.RewriteRedirects - } - - if req.Auth.PasswordAuth != nil { - s.Auth.PasswordAuth = &PasswordAuthConfig{ - Enabled: req.Auth.PasswordAuth.Enabled, - Password: req.Auth.PasswordAuth.Password, +func authFromAPI(reqAuth *api.ServiceAuthConfig) AuthConfig { + var auth AuthConfig + if reqAuth.PasswordAuth != nil { + auth.PasswordAuth = &PasswordAuthConfig{ + Enabled: reqAuth.PasswordAuth.Enabled, + Password: reqAuth.PasswordAuth.Password, } } - - if req.Auth.PinAuth != nil { - s.Auth.PinAuth = &PINAuthConfig{ - Enabled: req.Auth.PinAuth.Enabled, - Pin: req.Auth.PinAuth.Pin, + if reqAuth.PinAuth != nil { + auth.PinAuth = &PINAuthConfig{ + Enabled: reqAuth.PinAuth.Enabled, + Pin: reqAuth.PinAuth.Pin, } } - - if req.Auth.BearerAuth != nil { + if reqAuth.BearerAuth != nil { bearerAuth := &BearerAuthConfig{ - Enabled: req.Auth.BearerAuth.Enabled, + Enabled: reqAuth.BearerAuth.Enabled, } - if req.Auth.BearerAuth.DistributionGroups != nil { - bearerAuth.DistributionGroups = *req.Auth.BearerAuth.DistributionGroups + if reqAuth.BearerAuth.DistributionGroups != nil { + bearerAuth.DistributionGroups = *reqAuth.BearerAuth.DistributionGroups } - s.Auth.BearerAuth = bearerAuth + auth.BearerAuth = bearerAuth } - - return nil + return auth } func (s *Service) Validate() error { @@ -478,14 +549,69 @@ func (s *Service) Validate() error { return errors.New("service name exceeds maximum length of 255 characters") } - if s.Domain == "" { - return errors.New("service domain is required") - } - if len(s.Targets) == 0 { return errors.New("at least one target is required") } + if s.Mode == "" { + s.Mode = ModeHTTP + } + + switch s.Mode { + case ModeHTTP: + return s.validateHTTPMode() + case ModeTCP, ModeUDP: + return s.validateTCPUDPMode() + case ModeTLS: + return s.validateTLSMode() + default: + return fmt.Errorf("unsupported mode %q", s.Mode) + } +} + +func (s *Service) validateHTTPMode() error { + if s.Domain == "" { + return errors.New("service domain is required") + } + if s.ListenPort != 0 { + return errors.New("listen_port is not supported for HTTP services") + } + return s.validateHTTPTargets() +} + +func (s *Service) validateTCPUDPMode() error { + if s.Domain == "" { + return errors.New("domain is required for TCP/UDP services (used for cluster derivation)") + } + if s.isAuthEnabled() { + return errors.New("auth is not supported for TCP/UDP services") + } + if len(s.Targets) != 1 { + return errors.New("TCP/UDP services must have exactly one target") + } + if s.Mode == ModeUDP && s.Targets[0].ProxyProtocol { + return errors.New("proxy_protocol is not supported for UDP services") + } + return s.validateL4Target(s.Targets[0]) +} + +func (s *Service) validateTLSMode() error { + if s.Domain == "" { + return errors.New("domain is required for TLS services (used for SNI matching)") + } + if s.isAuthEnabled() { + return errors.New("auth is not supported for TLS services") + } + if s.ListenPort == 0 { + return errors.New("listen_port is required for TLS services") + } + if len(s.Targets) != 1 { + return errors.New("TLS services must have exactly one target") + } + return s.validateL4Target(s.Targets[0]) +} + +func (s *Service) validateHTTPTargets() error { for i, target := range s.Targets { switch target.TargetType { case TargetTypePeer, TargetTypeHost, TargetTypeDomain: @@ -500,6 +626,9 @@ func (s *Service) Validate() error { if target.TargetId == "" { return fmt.Errorf("target %d has empty target_id", i) } + if target.ProxyProtocol { + return fmt.Errorf("target %d: proxy_protocol is not supported for HTTP services", i) + } if err := validateTargetOptions(i, &target.Options); err != nil { return err } @@ -508,11 +637,62 @@ func (s *Service) Validate() error { return nil } +func (s *Service) validateL4Target(target *Target) error { + if target.Port == 0 { + return errors.New("target port is required for L4 services") + } + if target.TargetId == "" { + return errors.New("target_id is required for L4 services") + } + switch target.TargetType { + case TargetTypePeer, TargetTypeHost: + // OK + case TargetTypeSubnet: + if target.Host == "" { + return errors.New("target host is required for subnet targets") + } + default: + return fmt.Errorf("invalid target_type %q for L4 service", target.TargetType) + } + if target.Path != nil && *target.Path != "" && *target.Path != "/" { + return errors.New("path is not supported for L4 services") + } + return nil +} + +// Service mode constants. const ( - maxRequestTimeout = 5 * time.Minute - maxCustomHeaders = 16 - maxHeaderKeyLen = 128 - maxHeaderValueLen = 4096 + ModeHTTP = "http" + ModeTCP = "tcp" + ModeUDP = "udp" + ModeTLS = "tls" +) + +// Target protocol constants (URL scheme for backend connections). +const ( + TargetProtoHTTP = "http" + TargetProtoHTTPS = "https" + TargetProtoTCP = "tcp" + TargetProtoUDP = "udp" +) + +// IsL4Protocol returns true if the mode requires port-based routing (TCP, UDP, or TLS). +func IsL4Protocol(mode string) bool { + return mode == ModeTCP || mode == ModeUDP || mode == ModeTLS +} + +// IsPortBasedProtocol returns true if the mode relies on dedicated port allocation. +// TLS is excluded because it uses SNI routing and can share ports with other TLS services. +func IsPortBasedProtocol(mode string) bool { + return mode == ModeTCP || mode == ModeUDP +} + +const ( + maxRequestTimeout = 5 * time.Minute + maxSessionIdleTimeout = 10 * time.Minute + maxCustomHeaders = 16 + maxHeaderKeyLen = 128 + maxHeaderValueLen = 4096 ) // httpHeaderNameRe matches valid HTTP header field names per RFC 7230 token definition. @@ -560,6 +740,15 @@ func validateTargetOptions(idx int, opts *TargetOptions) error { } } + if opts.SessionIdleTimeout != 0 { + if opts.SessionIdleTimeout <= 0 { + return fmt.Errorf("target %d: session_idle_timeout must be positive", idx) + } + if opts.SessionIdleTimeout > maxSessionIdleTimeout { + return fmt.Errorf("target %d: session_idle_timeout exceeds maximum of %s", idx, maxSessionIdleTimeout) + } + } + if err := validateCustomHeaders(idx, opts.CustomHeaders); err != nil { return err } @@ -608,17 +797,49 @@ func containsCRLF(s string) bool { } func (s *Service) EventMeta() map[string]any { - return map[string]any{"name": s.Name, "domain": s.Domain, "proxy_cluster": s.ProxyCluster, "source": s.Source, "auth": s.isAuthEnabled()} + meta := map[string]any{ + "name": s.Name, + "domain": s.Domain, + "proxy_cluster": s.ProxyCluster, + "source": s.Source, + "auth": s.isAuthEnabled(), + "mode": s.Mode, + } + + if s.ListenPort != 0 { + meta["listen_port"] = s.ListenPort + } + + if len(s.Targets) > 0 { + t := s.Targets[0] + if t.ProxyProtocol { + meta["proxy_protocol"] = true + } + if t.Options.RequestTimeout != 0 { + meta["request_timeout"] = t.Options.RequestTimeout.String() + } + if t.Options.SessionIdleTimeout != 0 { + meta["session_idle_timeout"] = t.Options.SessionIdleTimeout.String() + } + } + + return meta } func (s *Service) isAuthEnabled() bool { - return s.Auth.PasswordAuth != nil || s.Auth.PinAuth != nil || s.Auth.BearerAuth != nil + return (s.Auth.PasswordAuth != nil && s.Auth.PasswordAuth.Enabled) || + (s.Auth.PinAuth != nil && s.Auth.PinAuth.Enabled) || + (s.Auth.BearerAuth != nil && s.Auth.BearerAuth.Enabled) } func (s *Service) Copy() *Service { targets := make([]*Target, len(s.Targets)) for i, target := range s.Targets { targetCopy := *target + if target.Path != nil { + p := *target.Path + targetCopy.Path = &p + } if len(target.Options.CustomHeaders) > 0 { targetCopy.Options.CustomHeaders = make(map[string]string, len(target.Options.CustomHeaders)) for k, v := range target.Options.CustomHeaders { @@ -628,6 +849,24 @@ func (s *Service) Copy() *Service { targets[i] = &targetCopy } + authCopy := s.Auth + if s.Auth.PasswordAuth != nil { + pa := *s.Auth.PasswordAuth + authCopy.PasswordAuth = &pa + } + if s.Auth.PinAuth != nil { + pa := *s.Auth.PinAuth + authCopy.PinAuth = &pa + } + if s.Auth.BearerAuth != nil { + ba := *s.Auth.BearerAuth + if len(s.Auth.BearerAuth.DistributionGroups) > 0 { + ba.DistributionGroups = make([]string, len(s.Auth.BearerAuth.DistributionGroups)) + copy(ba.DistributionGroups, s.Auth.BearerAuth.DistributionGroups) + } + authCopy.BearerAuth = &ba + } + return &Service{ ID: s.ID, AccountID: s.AccountID, @@ -638,12 +877,15 @@ func (s *Service) Copy() *Service { Enabled: s.Enabled, PassHostHeader: s.PassHostHeader, RewriteRedirects: s.RewriteRedirects, - Auth: s.Auth, + Auth: authCopy, Meta: s.Meta, SessionPrivateKey: s.SessionPrivateKey, SessionPublicKey: s.SessionPublicKey, Source: s.Source, SourcePeer: s.SourcePeer, + Mode: s.Mode, + ListenPort: s.ListenPort, + PortAutoAssigned: s.PortAutoAssigned, } } @@ -688,12 +930,16 @@ var validNamePrefix = regexp.MustCompile(`^[a-z0-9]([a-z0-9-]{0,30}[a-z0-9])?$`) // ExposeServiceRequest contains the parameters for creating a peer-initiated expose service. type ExposeServiceRequest struct { NamePrefix string - Port int - Protocol string - Domain string - Pin string - Password string - UserGroups []string + Port uint16 + Mode string + // TargetProtocol is the protocol used to connect to the peer backend. + // For HTTP mode: "http" (default) or "https". For L4 modes: "tcp" or "udp". + TargetProtocol string + Domain string + Pin string + Password string + UserGroups []string + ListenPort uint16 } // Validate checks all fields of the expose request. @@ -702,12 +948,20 @@ func (r *ExposeServiceRequest) Validate() error { return errors.New("request cannot be nil") } - if r.Port < 1 || r.Port > 65535 { + if r.Port == 0 { return fmt.Errorf("port must be between 1 and 65535, got %d", r.Port) } - if r.Protocol != "http" && r.Protocol != "https" { - return fmt.Errorf("unsupported protocol %q: must be http or https", r.Protocol) + switch r.Mode { + case ModeHTTP, ModeTCP, ModeUDP, ModeTLS: + default: + return fmt.Errorf("unsupported mode %q", r.Mode) + } + + if IsL4Protocol(r.Mode) { + if r.Pin != "" || r.Password != "" || len(r.UserGroups) > 0 { + return fmt.Errorf("authentication is not supported for %s mode", r.Mode) + } } if r.Pin != "" && !pinRegexp.MatchString(r.Pin) { @@ -729,55 +983,79 @@ func (r *ExposeServiceRequest) Validate() error { // ToService builds a Service from the expose request. func (r *ExposeServiceRequest) ToService(accountID, peerID, serviceName string) *Service { - service := &Service{ + svc := &Service{ AccountID: accountID, Name: serviceName, + Mode: r.Mode, Enabled: true, - Targets: []*Target{ - { - AccountID: accountID, - Port: r.Port, - Protocol: r.Protocol, - TargetId: peerID, - TargetType: TargetTypePeer, - Enabled: true, - }, + } + + // If domain is empty, CreateServiceFromPeer generates a unique subdomain. + // When explicitly provided, the service name is prepended as a subdomain. + if r.Domain != "" { + svc.Domain = serviceName + "." + r.Domain + } + + if IsL4Protocol(r.Mode) { + svc.ListenPort = r.Port + if r.ListenPort > 0 { + svc.ListenPort = r.ListenPort + } + } + + var targetProto string + switch { + case !IsL4Protocol(r.Mode): + targetProto = TargetProtoHTTP + if r.TargetProtocol != "" { + targetProto = r.TargetProtocol + } + case r.Mode == ModeUDP: + targetProto = TargetProtoUDP + default: + targetProto = TargetProtoTCP + } + svc.Targets = []*Target{ + { + AccountID: accountID, + Port: r.Port, + Protocol: targetProto, + TargetId: peerID, + TargetType: TargetTypePeer, + Enabled: true, }, } - if r.Domain != "" { - service.Domain = serviceName + "." + r.Domain - } - if r.Pin != "" { - service.Auth.PinAuth = &PINAuthConfig{ + svc.Auth.PinAuth = &PINAuthConfig{ Enabled: true, Pin: r.Pin, } } if r.Password != "" { - service.Auth.PasswordAuth = &PasswordAuthConfig{ + svc.Auth.PasswordAuth = &PasswordAuthConfig{ Enabled: true, Password: r.Password, } } if len(r.UserGroups) > 0 { - service.Auth.BearerAuth = &BearerAuthConfig{ + svc.Auth.BearerAuth = &BearerAuthConfig{ Enabled: true, DistributionGroups: r.UserGroups, } } - return service + return svc } // ExposeServiceResponse contains the result of a successful peer expose creation. type ExposeServiceResponse struct { - ServiceName string - ServiceURL string - Domain string + ServiceName string + ServiceURL string + Domain string + PortAutoAssigned bool } // GenerateExposeName generates a random service name for peer-exposed services. diff --git a/management/internals/modules/reverseproxy/service/service_test.go b/management/internals/modules/reverseproxy/service/service_test.go index 79c98fc14..a8a8ae5d6 100644 --- a/management/internals/modules/reverseproxy/service/service_test.go +++ b/management/internals/modules/reverseproxy/service/service_test.go @@ -44,7 +44,7 @@ func TestValidate_EmptyDomain(t *testing.T) { func TestValidate_NoTargets(t *testing.T) { rp := validProxy() rp.Targets = nil - assert.ErrorContains(t, rp.Validate(), "at least one target") + assert.ErrorContains(t, rp.Validate(), "at least one target is required") } func TestValidate_EmptyTargetId(t *testing.T) { @@ -273,7 +273,7 @@ func TestToProtoMapping_NoOptionsWhenDefault(t *testing.T) { func TestIsDefaultPort(t *testing.T) { tests := []struct { scheme string - port int + port uint16 want bool }{ {"http", 80, true}, @@ -299,7 +299,7 @@ func TestToProtoMapping_PortInTargetURL(t *testing.T) { name string protocol string host string - port int + port uint16 wantTarget string }{ { @@ -645,8 +645,8 @@ func TestGenerateExposeName(t *testing.T) { func TestExposeServiceRequest_ToService(t *testing.T) { t.Run("basic HTTP service", func(t *testing.T) { req := &ExposeServiceRequest{ - Port: 8080, - Protocol: "http", + Port: 8080, + Mode: "http", } service := req.ToService("account-1", "peer-1", "mysvc") @@ -658,7 +658,7 @@ func TestExposeServiceRequest_ToService(t *testing.T) { require.Len(t, service.Targets, 1) target := service.Targets[0] - assert.Equal(t, 8080, target.Port) + assert.Equal(t, uint16(8080), target.Port) assert.Equal(t, "http", target.Protocol) assert.Equal(t, "peer-1", target.TargetId) assert.Equal(t, TargetTypePeer, target.TargetType) @@ -730,3 +730,182 @@ func TestExposeServiceRequest_ToService(t *testing.T) { require.NotNil(t, service.Auth.BearerAuth) }) } + +func TestValidate_TLSOnly(t *testing.T) { + rp := &Service{ + Name: "tls-svc", + Mode: "tls", + Domain: "example.com", + ListenPort: 8443, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 443, Enabled: true}, + }, + } + require.NoError(t, rp.Validate()) +} + +func TestValidate_TLSMissingListenPort(t *testing.T) { + rp := &Service{ + Name: "tls-svc", + Mode: "tls", + Domain: "example.com", + ListenPort: 0, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 443, Enabled: true}, + }, + } + assert.ErrorContains(t, rp.Validate(), "listen_port is required") +} + +func TestValidate_TLSMissingDomain(t *testing.T) { + rp := &Service{ + Name: "tls-svc", + Mode: "tls", + ListenPort: 8443, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 443, Enabled: true}, + }, + } + assert.ErrorContains(t, rp.Validate(), "domain is required") +} + +func TestValidate_TCPValid(t *testing.T) { + rp := &Service{ + Name: "tcp-svc", + Mode: "tcp", + Domain: "cluster.test", + ListenPort: 5432, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 5432, Enabled: true}, + }, + } + require.NoError(t, rp.Validate()) +} + +func TestValidate_TCPMissingListenPort(t *testing.T) { + rp := &Service{ + Name: "tcp-svc", + Mode: "tcp", + Domain: "cluster.test", + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 5432, Enabled: true}, + }, + } + require.NoError(t, rp.Validate(), "TCP with listen_port=0 is valid (auto-assigned by manager)") +} + +func TestValidate_L4MultipleTargets(t *testing.T) { + rp := &Service{ + Name: "tcp-svc", + Mode: "tcp", + Domain: "cluster.test", + ListenPort: 5432, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 5432, Enabled: true}, + {TargetId: "peer-2", TargetType: TargetTypePeer, Protocol: "tcp", Port: 5432, Enabled: true}, + }, + } + assert.ErrorContains(t, rp.Validate(), "exactly one target") +} + +func TestValidate_L4TargetMissingPort(t *testing.T) { + rp := &Service{ + Name: "tcp-svc", + Mode: "tcp", + Domain: "cluster.test", + ListenPort: 5432, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 0, Enabled: true}, + }, + } + assert.ErrorContains(t, rp.Validate(), "port is required") +} + +func TestValidate_TLSInvalidTargetType(t *testing.T) { + rp := &Service{ + Name: "tls-svc", + Mode: "tls", + Domain: "example.com", + ListenPort: 443, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: "invalid", Protocol: "tcp", Port: 443, Enabled: true}, + }, + } + assert.Error(t, rp.Validate()) +} + +func TestValidate_TLSSubnetValid(t *testing.T) { + rp := &Service{ + Name: "tls-subnet", + Mode: "tls", + Domain: "example.com", + ListenPort: 8443, + Targets: []*Target{ + {TargetId: "subnet-1", TargetType: TargetTypeSubnet, Protocol: "tcp", Port: 443, Host: "10.0.0.5", Enabled: true}, + }, + } + require.NoError(t, rp.Validate()) +} + +func TestValidate_HTTPProxyProtocolRejected(t *testing.T) { + rp := validProxy() + rp.Targets[0].ProxyProtocol = true + assert.ErrorContains(t, rp.Validate(), "proxy_protocol is not supported for HTTP") +} + +func TestValidate_UDPProxyProtocolRejected(t *testing.T) { + rp := &Service{ + Name: "udp-svc", + Mode: "udp", + Domain: "cluster.test", + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "udp", Port: 5432, Enabled: true, ProxyProtocol: true}, + }, + } + assert.ErrorContains(t, rp.Validate(), "proxy_protocol is not supported for UDP") +} + +func TestValidate_TCPProxyProtocolAllowed(t *testing.T) { + rp := &Service{ + Name: "tcp-svc", + Mode: "tcp", + Domain: "cluster.test", + ListenPort: 5432, + Targets: []*Target{ + {TargetId: "peer-1", TargetType: TargetTypePeer, Protocol: "tcp", Port: 5432, Enabled: true, ProxyProtocol: true}, + }, + } + require.NoError(t, rp.Validate()) +} + +func TestExposeServiceRequest_Validate_L4RejectsAuth(t *testing.T) { + tests := []struct { + name string + req ExposeServiceRequest + }{ + { + name: "tcp with pin", + req: ExposeServiceRequest{Port: 8080, Mode: "tcp", Pin: "123456"}, + }, + { + name: "udp with password", + req: ExposeServiceRequest{Port: 8080, Mode: "udp", Password: "secret"}, + }, + { + name: "tls with user groups", + req: ExposeServiceRequest{Port: 443, Mode: "tls", UserGroups: []string{"admins"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.req.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "authentication is not supported") + }) + } +} + +func TestExposeServiceRequest_Validate_HTTPAllowsAuth(t *testing.T) { + req := ExposeServiceRequest{Port: 8080, Mode: "http", Pin: "123456"} + require.NoError(t, req.Validate()) +} diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index eb13a15e3..88d37ca80 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -19,6 +19,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/netbirdio/management-integrations/integrations" + "github.com/netbirdio/netbird/encryption" "github.com/netbirdio/netbird/formatter/hook" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index 29a8953ac..a32cf6046 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -7,6 +7,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/management-integrations/integrations" + "github.com/netbirdio/netbird/management/internals/modules/peers" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" @@ -211,6 +212,9 @@ func (s *BaseServer) ProxyManager() proxy.Manager { func (s *BaseServer) ReverseProxyDomainManager() *manager.Manager { return Create(s, func() *manager.Manager { m := manager.NewManager(s.Store(), s.ProxyManager(), s.PermissionsManager(), s.AccountManager()) + s.AfterInit(func(s *BaseServer) { + m.SetClusterCapabilities(s.ServiceProxyController()) + }) return &m }) } diff --git a/management/internals/shared/grpc/expose_service.go b/management/internals/shared/grpc/expose_service.go index c444471b0..1b87f7ede 100644 --- a/management/internals/shared/grpc/expose_service.go +++ b/management/internals/shared/grpc/expose_service.go @@ -2,6 +2,7 @@ package grpc import ( "context" + "fmt" pb "github.com/golang/protobuf/proto" // nolint log "github.com/sirupsen/logrus" @@ -39,23 +40,38 @@ func (s *Server) CreateExpose(ctx context.Context, req *proto.EncryptedMessage) return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") } + if exposeReq.Port > 65535 { + return nil, status.Errorf(codes.InvalidArgument, "port out of range: %d", exposeReq.Port) + } + if exposeReq.ListenPort > 65535 { + return nil, status.Errorf(codes.InvalidArgument, "listen_port out of range: %d", exposeReq.ListenPort) + } + + mode, err := exposeProtocolToString(exposeReq.Protocol) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "%v", err) + } + created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, &rpservice.ExposeServiceRequest{ - NamePrefix: exposeReq.NamePrefix, - Port: int(exposeReq.Port), - Protocol: exposeProtocolToString(exposeReq.Protocol), - Domain: exposeReq.Domain, - Pin: exposeReq.Pin, - Password: exposeReq.Password, - UserGroups: exposeReq.UserGroups, + NamePrefix: exposeReq.NamePrefix, + Port: uint16(exposeReq.Port), //nolint:gosec // validated above + Mode: mode, + TargetProtocol: exposeTargetProtocol(exposeReq.Protocol), + Domain: exposeReq.Domain, + Pin: exposeReq.Pin, + Password: exposeReq.Password, + UserGroups: exposeReq.UserGroups, + ListenPort: uint16(exposeReq.ListenPort), //nolint:gosec // validated above }) if err != nil { return nil, mapExposeError(ctx, err) } return s.encryptResponse(peerKey, &proto.ExposeServiceResponse{ - ServiceName: created.ServiceName, - ServiceUrl: created.ServiceURL, - Domain: created.Domain, + ServiceName: created.ServiceName, + ServiceUrl: created.ServiceURL, + Domain: created.Domain, + PortAutoAssigned: created.PortAutoAssigned, }) } @@ -77,7 +93,12 @@ func (s *Server) RenewExpose(ctx context.Context, req *proto.EncryptedMessage) ( return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") } - if err := reverseProxyMgr.RenewServiceFromPeer(ctx, accountID, peer.ID, renewReq.Domain); err != nil { + serviceID, err := s.resolveServiceID(ctx, renewReq.Domain) + if err != nil { + return nil, mapExposeError(ctx, err) + } + + if err := reverseProxyMgr.RenewServiceFromPeer(ctx, accountID, peer.ID, serviceID); err != nil { return nil, mapExposeError(ctx, err) } @@ -102,7 +123,12 @@ func (s *Server) StopExpose(ctx context.Context, req *proto.EncryptedMessage) (* return nil, status.Errorf(codes.Internal, "reverse proxy manager not available") } - if err := reverseProxyMgr.StopServiceFromPeer(ctx, accountID, peer.ID, stopReq.Domain); err != nil { + serviceID, err := s.resolveServiceID(ctx, stopReq.Domain) + if err != nil { + return nil, mapExposeError(ctx, err) + } + + if err := reverseProxyMgr.StopServiceFromPeer(ctx, accountID, peer.ID, serviceID); err != nil { return nil, mapExposeError(ctx, err) } @@ -180,13 +206,46 @@ func (s *Server) SetReverseProxyManager(mgr rpservice.Manager) { s.reverseProxyManager = mgr } -func exposeProtocolToString(p proto.ExposeProtocol) string { +// resolveServiceID looks up the service by its globally unique domain. +func (s *Server) resolveServiceID(ctx context.Context, domain string) (string, error) { + if domain == "" { + return "", status.Errorf(codes.InvalidArgument, "domain is required") + } + + svc, err := s.accountManager.GetStore().GetServiceByDomain(ctx, domain) + if err != nil { + return "", err + } + return svc.ID, nil +} + +func exposeProtocolToString(p proto.ExposeProtocol) (string, error) { switch p { - case proto.ExposeProtocol_EXPOSE_HTTP: - return "http" - case proto.ExposeProtocol_EXPOSE_HTTPS: - return "https" + case proto.ExposeProtocol_EXPOSE_HTTP, proto.ExposeProtocol_EXPOSE_HTTPS: + return "http", nil + case proto.ExposeProtocol_EXPOSE_TCP: + return "tcp", nil + case proto.ExposeProtocol_EXPOSE_UDP: + return "udp", nil + case proto.ExposeProtocol_EXPOSE_TLS: + return "tls", nil default: - return "http" + return "", fmt.Errorf("unsupported expose protocol: %v", p) + } +} + +// exposeTargetProtocol returns the target protocol for the given expose protocol. +// For HTTP mode, this is http or https (the scheme used to connect to the backend). +// For L4 modes, this is tcp or udp (the transport used to connect to the backend). +func exposeTargetProtocol(p proto.ExposeProtocol) string { + switch p { + case proto.ExposeProtocol_EXPOSE_HTTPS: + return rpservice.TargetProtoHTTPS + case proto.ExposeProtocol_EXPOSE_TCP, proto.ExposeProtocol_EXPOSE_TLS: + return rpservice.TargetProtoTCP + case proto.ExposeProtocol_EXPOSE_UDP: + return rpservice.TargetProtoUDP + default: + return rpservice.TargetProtoHTTP } } diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index e2d0f1abe..31a0ba0db 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -32,6 +32,7 @@ import ( proxyauth "github.com/netbirdio/netbird/proxy/auth" "github.com/netbirdio/netbird/shared/hash/argon2id" "github.com/netbirdio/netbird/shared/management/proto" + nbstatus "github.com/netbirdio/netbird/shared/management/status" ) type ProxyOIDCConfig struct { @@ -45,12 +46,6 @@ type ProxyOIDCConfig struct { KeysLocation string } -// ClusterInfo contains information about a proxy cluster. -type ClusterInfo struct { - Address string - ConnectedProxies int -} - // ProxyServiceServer implements the ProxyService gRPC server type ProxyServiceServer struct { proto.UnimplementedProxyServiceServer @@ -61,9 +56,9 @@ type ProxyServiceServer struct { // Manager for access logs accessLogManager accesslogs.Manager + mu sync.RWMutex // Manager for reverse proxy operations serviceManager rpservice.Manager - // ProxyController for service updates and cluster management proxyController proxy.Controller @@ -84,23 +79,26 @@ type ProxyServiceServer struct { // Store for PKCE verifiers pkceVerifierStore *PKCEVerifierStore + + cancel context.CancelFunc } const pkceVerifierTTL = 10 * time.Minute // proxyConnection represents a connected proxy type proxyConnection struct { - proxyID string - address string - stream proto.ProxyService_GetMappingUpdateServer - sendChan chan *proto.GetMappingUpdateResponse - ctx context.Context - cancel context.CancelFunc + proxyID string + address string + capabilities *proto.ProxyCapabilities + stream proto.ProxyService_GetMappingUpdateServer + sendChan chan *proto.GetMappingUpdateResponse + ctx context.Context + cancel context.CancelFunc } // NewProxyServiceServer creates a new proxy service server. func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, pkceStore *PKCEVerifierStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager, proxyMgr proxy.Manager) *ProxyServiceServer { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) s := &ProxyServiceServer{ accessLogManager: accessLogMgr, oidcConfig: oidcConfig, @@ -109,6 +107,7 @@ func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeT peersManager: peersManager, usersManager: usersManager, proxyManager: proxyMgr, + cancel: cancel, } go s.cleanupStaleProxies(ctx) return s @@ -130,11 +129,22 @@ func (s *ProxyServiceServer) cleanupStaleProxies(ctx context.Context) { } } +// Close stops background goroutines. +func (s *ProxyServiceServer) Close() { + s.cancel() +} + +// SetServiceManager sets the service manager. Must be called before serving. func (s *ProxyServiceServer) SetServiceManager(manager rpservice.Manager) { + s.mu.Lock() + defer s.mu.Unlock() s.serviceManager = manager } +// SetProxyController sets the proxy controller. Must be called before serving. func (s *ProxyServiceServer) SetProxyController(proxyController proxy.Controller) { + s.mu.Lock() + defer s.mu.Unlock() s.proxyController = proxyController } @@ -157,12 +167,13 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest connCtx, cancel := context.WithCancel(ctx) conn := &proxyConnection{ - proxyID: proxyID, - address: proxyAddress, - stream: stream, - sendChan: make(chan *proto.GetMappingUpdateResponse, 100), - ctx: connCtx, - cancel: cancel, + proxyID: proxyID, + address: proxyAddress, + capabilities: req.GetCapabilities(), + stream: stream, + sendChan: make(chan *proto.GetMappingUpdateResponse, 100), + ctx: connCtx, + cancel: cancel, } s.connectedProxies.Store(proxyID, conn) @@ -231,29 +242,18 @@ func (s *ProxyServiceServer) heartbeat(ctx context.Context, proxyID string) { } // sendSnapshot sends the initial snapshot of services to the connecting proxy. -// Only services matching the proxy's cluster address are sent. +// Only entries matching the proxy's cluster address are sent. func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnection) error { - services, err := s.serviceManager.GetGlobalServices(ctx) - if err != nil { - return fmt.Errorf("get services from store: %w", err) - } - if !isProxyAddressValid(conn.address) { return fmt.Errorf("proxy address is invalid") } - var filtered []*rpservice.Service - for _, service := range services { - if !service.Enabled { - continue - } - if service.ProxyCluster == "" || service.ProxyCluster != conn.address { - continue - } - filtered = append(filtered, service) + mappings, err := s.snapshotServiceMappings(ctx, conn) + if err != nil { + return err } - if len(filtered) == 0 { + if len(mappings) == 0 { if err := conn.stream.Send(&proto.GetMappingUpdateResponse{ InitialSyncComplete: true, }); err != nil { @@ -262,9 +262,30 @@ func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnec return nil } - for i, service := range filtered { - // Generate one-time authentication token for each service in the snapshot - // Tokens are not persistent on the proxy, so we need to generate new ones on reconnection + for i, m := range mappings { + if err := conn.stream.Send(&proto.GetMappingUpdateResponse{ + Mapping: []*proto.ProxyMapping{m}, + InitialSyncComplete: i == len(mappings)-1, + }); err != nil { + return fmt.Errorf("send proxy mapping: %w", err) + } + } + + return nil +} + +func (s *ProxyServiceServer) snapshotServiceMappings(ctx context.Context, conn *proxyConnection) ([]*proto.ProxyMapping, error) { + services, err := s.serviceManager.GetGlobalServices(ctx) + if err != nil { + return nil, fmt.Errorf("get services from store: %w", err) + } + + var mappings []*proto.ProxyMapping + for _, service := range services { + if !service.Enabled || service.ProxyCluster == "" || service.ProxyCluster != conn.address { + continue + } + token, err := s.tokenStore.GenerateToken(service.AccountID, service.ID, 5*time.Minute) if err != nil { log.WithFields(log.Fields{ @@ -274,25 +295,10 @@ func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnec continue } - if err := conn.stream.Send(&proto.GetMappingUpdateResponse{ - Mapping: []*proto.ProxyMapping{ - service.ToProtoMapping( - rpservice.Create, // Initial snapshot, all records are "new" for the proxy. - token, - s.GetOIDCValidationConfig(), - ), - }, - InitialSyncComplete: i == len(filtered)-1, - }); err != nil { - log.WithFields(log.Fields{ - "domain": service.Domain, - "account": service.AccountID, - }).WithError(err).Error("failed to send proxy mapping") - return fmt.Errorf("send proxy mapping: %w", err) - } + m := service.ToProtoMapping(rpservice.Create, token, s.GetOIDCValidationConfig()) + mappings = append(mappings, m) } - - return nil + return mappings, nil } // isProxyAddressValid validates a proxy address @@ -305,8 +311,8 @@ func isProxyAddressValid(addr string) bool { func (s *ProxyServiceServer) sender(conn *proxyConnection, errChan chan<- error) { for { select { - case msg := <-conn.sendChan: - if err := conn.stream.Send(msg); err != nil { + case resp := <-conn.sendChan: + if err := conn.stream.Send(resp); err != nil { errChan <- err return } @@ -361,12 +367,12 @@ func (s *ProxyServiceServer) SendServiceUpdate(update *proto.GetMappingUpdateRes log.Debugf("Broadcasting service update to all connected proxy servers") s.connectedProxies.Range(func(key, value interface{}) bool { conn := value.(*proxyConnection) - msg := s.perProxyMessage(update, conn.proxyID) - if msg == nil { + resp := s.perProxyMessage(update, conn.proxyID) + if resp == nil { return true } select { - case conn.sendChan <- msg: + case conn.sendChan <- resp: log.Debugf("Sent service update to proxy server %s", conn.proxyID) default: log.Warnf("Failed to send service update to proxy server %s (channel full)", conn.proxyID) @@ -495,9 +501,40 @@ func shallowCloneMapping(m *proto.ProxyMapping) *proto.ProxyMapping { Auth: m.Auth, PassHostHeader: m.PassHostHeader, RewriteRedirects: m.RewriteRedirects, + Mode: m.Mode, + ListenPort: m.ListenPort, } } +// ClusterSupportsCustomPorts returns whether any connected proxy in the given +// cluster reports custom port support. Returns nil if no proxy has reported +// capabilities (old proxies that predate the field). +func (s *ProxyServiceServer) ClusterSupportsCustomPorts(clusterAddr string) *bool { + if s.proxyController == nil { + return nil + } + + var hasCapabilities bool + for _, pid := range s.proxyController.GetProxiesForCluster(clusterAddr) { + connVal, ok := s.connectedProxies.Load(pid) + if !ok { + continue + } + conn := connVal.(*proxyConnection) + if conn.capabilities == nil || conn.capabilities.SupportsCustomPorts == nil { + continue + } + if *conn.capabilities.SupportsCustomPorts { + return ptr(true) + } + hasCapabilities = true + } + if hasCapabilities { + return ptr(false) + } + return nil +} + func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { service, err := s.serviceManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId()) if err != nil { @@ -585,7 +622,7 @@ func (s *ProxyServiceServer) generateSessionToken(ctx context.Context, authentic return token, nil } -// SendStatusUpdate handles status updates from proxy clients +// SendStatusUpdate handles status updates from proxy clients. func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.SendStatusUpdateRequest) (*proto.SendStatusUpdateResponse, error) { accountID := req.GetAccountId() serviceID := req.GetServiceId() @@ -604,6 +641,17 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se return nil, status.Errorf(codes.InvalidArgument, "service_id and account_id are required") } + internalStatus := protoStatusToInternal(protoStatus) + + if err := s.serviceManager.SetStatus(ctx, accountID, serviceID, internalStatus); err != nil { + sErr, isNbErr := nbstatus.FromError(err) + if isNbErr && sErr.Type() == nbstatus.NotFound { + return nil, status.Errorf(codes.NotFound, "service %s not found", serviceID) + } + log.WithContext(ctx).WithError(err).Error("failed to update service status") + return nil, status.Errorf(codes.Internal, "update service status: %v", err) + } + if certificateIssued { if err := s.serviceManager.SetCertificateIssuedAt(ctx, accountID, serviceID); err != nil { log.WithContext(ctx).WithError(err).Error("failed to set certificate issued timestamp") @@ -615,13 +663,6 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se }).Info("Certificate issued timestamp updated") } - internalStatus := protoStatusToInternal(protoStatus) - - if err := s.serviceManager.SetStatus(ctx, accountID, serviceID, internalStatus); err != nil { - log.WithContext(ctx).WithError(err).Error("failed to update service status") - return nil, status.Errorf(codes.Internal, "update service status: %v", err) - } - log.WithFields(log.Fields{ "service_id": serviceID, "account_id": accountID, @@ -631,7 +672,7 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se return &proto.SendStatusUpdateResponse{}, nil } -// protoStatusToInternal maps proto status to internal status +// protoStatusToInternal maps proto status to internal service status. func protoStatusToInternal(protoStatus proto.ProxyStatus) rpservice.Status { switch protoStatus { case proto.ProxyStatus_PROXY_STATUS_PENDING: @@ -1061,3 +1102,5 @@ func (s *ProxyServiceServer) checkGroupAccess(service *rpservice.Service, user * return fmt.Errorf("user not in allowed groups") } + +func ptr[T any](v T) *T { return &v } diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index b7abb28b6..1a4ea3330 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -53,6 +53,10 @@ func (c *testProxyController) UnregisterProxyFromCluster(_ context.Context, clus return nil } +func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { + return ptr(true) +} + func (c *testProxyController) GetProxiesForCluster(clusterAddr string) []string { c.mu.Lock() defer c.mu.Unlock() @@ -70,11 +74,17 @@ func (c *testProxyController) GetProxiesForCluster(clusterAddr string) []string // registerFakeProxy adds a fake proxy connection to the server's internal maps // and returns the channel where messages will be received. func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan *proto.GetMappingUpdateResponse { + return registerFakeProxyWithCaps(s, proxyID, clusterAddr, nil) +} + +// registerFakeProxyWithCaps adds a fake proxy connection with explicit capabilities. +func registerFakeProxyWithCaps(s *ProxyServiceServer, proxyID, clusterAddr string, caps *proto.ProxyCapabilities) chan *proto.GetMappingUpdateResponse { ch := make(chan *proto.GetMappingUpdateResponse, 10) conn := &proxyConnection{ - proxyID: proxyID, - address: clusterAddr, - sendChan: ch, + proxyID: proxyID, + address: clusterAddr, + capabilities: caps, + sendChan: ch, } s.connectedProxies.Store(proxyID, conn) @@ -83,15 +93,29 @@ func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan return ch } -func drainChannel(ch chan *proto.GetMappingUpdateResponse) *proto.GetMappingUpdateResponse { +// drainMapping drains a single ProxyMapping from the channel. +func drainMapping(ch chan *proto.GetMappingUpdateResponse) *proto.ProxyMapping { select { - case msg := <-ch: - return msg + case resp := <-ch: + if len(resp.Mapping) > 0 { + return resp.Mapping[0] + } + return nil case <-time.After(time.Second): return nil } } +// drainEmpty checks if a channel has no message within timeout. +func drainEmpty(ch chan *proto.GetMappingUpdateResponse) bool { + select { + case <-ch: + return false + case <-time.After(100 * time.Millisecond): + return true + } +} + func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { ctx := context.Background() tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100) @@ -129,10 +153,8 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { tokens := make([]string, numProxies) for i, ch := range channels { - resp := drainChannel(ch) - require.NotNil(t, resp, "proxy %d should receive a message", i) - require.Len(t, resp.Mapping, 1, "proxy %d should receive exactly one mapping", i) - msg := resp.Mapping[0] + msg := drainMapping(ch) + require.NotNil(t, msg, "proxy %d should receive a message", i) assert.Equal(t, mapping.Domain, msg.Domain) assert.Equal(t, mapping.Id, msg.Id) assert.NotEmpty(t, msg.AuthToken, "proxy %d should have a non-empty token", i) @@ -181,16 +203,14 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { s.SendServiceUpdateToCluster(context.Background(), mapping, cluster) - resp1 := drainChannel(ch1) - resp2 := drainChannel(ch2) - require.NotNil(t, resp1) - require.NotNil(t, resp2) - require.Len(t, resp1.Mapping, 1) - require.Len(t, resp2.Mapping, 1) + msg1 := drainMapping(ch1) + msg2 := drainMapping(ch2) + require.NotNil(t, msg1) + require.NotNil(t, msg2) // Delete operations should not generate tokens - assert.Empty(t, resp1.Mapping[0].AuthToken) - assert.Empty(t, resp2.Mapping[0].AuthToken) + assert.Empty(t, msg1.AuthToken) + assert.Empty(t, msg2.AuthToken) } func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) { @@ -224,15 +244,10 @@ func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) { s.SendServiceUpdate(update) - resp1 := drainChannel(ch1) - resp2 := drainChannel(ch2) - require.NotNil(t, resp1) - require.NotNil(t, resp2) - require.Len(t, resp1.Mapping, 1) - require.Len(t, resp2.Mapping, 1) - - msg1 := resp1.Mapping[0] - msg2 := resp2.Mapping[0] + msg1 := drainMapping(ch1) + msg2 := drainMapping(ch2) + require.NotNil(t, msg1) + require.NotNil(t, msg2) assert.NotEmpty(t, msg1.AuthToken) assert.NotEmpty(t, msg2.AuthToken) @@ -324,3 +339,314 @@ func TestValidateState_RejectsInvalidHMAC(t *testing.T) { require.Error(t, err) assert.Contains(t, err.Error(), "invalid state signature") } + +func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { + tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + + s := &ProxyServiceServer{ + tokenStore: tokenStore, + } + s.SetProxyController(newTestProxyController()) + + const cluster = "proxy.example.com" + + // Proxy A supports custom ports. + chA := registerFakeProxyWithCaps(s, "proxy-a", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) + // Proxy B does NOT support custom ports (shared cloud proxy). + chB := registerFakeProxyWithCaps(s, "proxy-b", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + + ctx := context.Background() + + // TLS passthrough works on all proxies regardless of custom port support. + tlsMapping := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, + Id: "service-tls", + AccountId: "account-1", + Domain: "db.example.com", + Mode: "tls", + ListenPort: 8443, + Path: []*proto.PathMapping{{Target: "10.0.0.5:5432"}}, + } + + s.SendServiceUpdateToCluster(ctx, tlsMapping, cluster) + + msgA := drainMapping(chA) + msgB := drainMapping(chB) + assert.NotNil(t, msgA, "proxy-a should receive TLS mapping") + assert.NotNil(t, msgB, "proxy-b should receive TLS mapping (passthrough works on all proxies)") + + // Send an HTTP mapping: both should receive it. + httpMapping := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, + Id: "service-http", + AccountId: "account-1", + Domain: "app.example.com", + Path: []*proto.PathMapping{{Path: "/", Target: "http://10.0.0.1:80"}}, + } + + s.SendServiceUpdateToCluster(ctx, httpMapping, cluster) + + msgA = drainMapping(chA) + msgB = drainMapping(chB) + assert.NotNil(t, msgA, "proxy-a should receive HTTP mapping") + assert.NotNil(t, msgB, "proxy-b should receive HTTP mapping") +} + +func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { + tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + + s := &ProxyServiceServer{ + tokenStore: tokenStore, + } + s.SetProxyController(newTestProxyController()) + + const cluster = "proxy.example.com" + + chShared := registerFakeProxyWithCaps(s, "proxy-shared", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + + tlsMapping := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, + Id: "service-tls", + AccountId: "account-1", + Domain: "db.example.com", + Mode: "tls", + Path: []*proto.PathMapping{{Target: "10.0.0.5:5432"}}, + } + + s.SendServiceUpdateToCluster(context.Background(), tlsMapping, cluster) + + msg := drainMapping(chShared) + assert.NotNil(t, msg, "shared proxy should receive TLS mapping even without custom port support") +} + +// TestServiceModifyNotifications exercises every possible modification +// scenario for an existing service, verifying the correct update types +// reach the correct clusters. +func TestServiceModifyNotifications(t *testing.T) { + tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) + require.NoError(t, err) + + newServer := func() (*ProxyServiceServer, map[string]chan *proto.GetMappingUpdateResponse) { + s := &ProxyServiceServer{ + tokenStore: tokenStore, + } + s.SetProxyController(newTestProxyController()) + chs := map[string]chan *proto.GetMappingUpdateResponse{ + "cluster-a": registerFakeProxyWithCaps(s, "proxy-a", "cluster-a", &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}), + "cluster-b": registerFakeProxyWithCaps(s, "proxy-b", "cluster-b", &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}), + } + return s, chs + } + + httpMapping := func(updateType proto.ProxyMappingUpdateType) *proto.ProxyMapping { + return &proto.ProxyMapping{ + Type: updateType, + Id: "svc-1", + AccountId: "acct-1", + Domain: "app.example.com", + Path: []*proto.PathMapping{{Path: "/", Target: "http://10.0.0.1:8080"}}, + } + } + + tlsOnlyMapping := func(updateType proto.ProxyMappingUpdateType) *proto.ProxyMapping { + return &proto.ProxyMapping{ + Type: updateType, + Id: "svc-1", + AccountId: "acct-1", + Domain: "app.example.com", + Mode: "tls", + ListenPort: 8443, + Path: []*proto.PathMapping{{Target: "10.0.0.1:443"}}, + } + } + + ctx := context.Background() + + t.Run("targets changed sends MODIFIED to same cluster", func(t *testing.T) { + s, chs := newServer() + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED), "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg, "cluster-a should receive update") + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED, msg.Type) + assert.NotEmpty(t, msg.AuthToken, "MODIFIED should include token") + assert.True(t, drainEmpty(chs["cluster-b"]), "cluster-b should not receive update") + }) + + t.Run("auth config changed sends MODIFIED", func(t *testing.T) { + s, chs := newServer() + mapping := httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED) + mapping.Auth = &proto.Authentication{Password: true, Pin: true} + s.SendServiceUpdateToCluster(ctx, mapping, "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED, msg.Type) + assert.True(t, msg.Auth.Password) + assert.True(t, msg.Auth.Pin) + }) + + t.Run("HTTP to TLS transition sends MODIFIED with TLS config", func(t *testing.T) { + s, chs := newServer() + s.SendServiceUpdateToCluster(ctx, tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED), "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED, msg.Type) + assert.Equal(t, "tls", msg.Mode, "mode should be tls") + assert.Equal(t, int32(8443), msg.ListenPort) + assert.Len(t, msg.Path, 1, "should have one path entry with target address") + assert.Equal(t, "10.0.0.1:443", msg.Path[0].Target) + }) + + t.Run("TLS to HTTP transition sends MODIFIED without TLS", func(t *testing.T) { + s, chs := newServer() + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED), "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED, msg.Type) + assert.Empty(t, msg.Mode, "mode should be empty for HTTP") + assert.True(t, len(msg.Path) > 0) + }) + + t.Run("TLS port changed sends MODIFIED with new port", func(t *testing.T) { + s, chs := newServer() + mapping := tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED) + mapping.ListenPort = 9443 + s.SendServiceUpdateToCluster(ctx, mapping, "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + assert.Equal(t, int32(9443), msg.ListenPort) + }) + + t.Run("disable sends REMOVED to cluster", func(t *testing.T) { + s, chs := newServer() + // Manager sends Delete when service is disabled + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED), "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED, msg.Type) + assert.Empty(t, msg.AuthToken, "DELETE should not have token") + }) + + t.Run("enable sends CREATED to cluster", func(t *testing.T) { + s, chs := newServer() + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED), "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, msg.Type) + assert.NotEmpty(t, msg.AuthToken) + }) + + t.Run("domain change with cluster change sends DELETE to old CREATE to new", func(t *testing.T) { + s, chs := newServer() + // This is the pattern the manager produces: + // 1. DELETE on old cluster + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED), "cluster-a") + // 2. CREATE on new cluster + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED), "cluster-b") + + msgA := drainMapping(chs["cluster-a"]) + require.NotNil(t, msgA, "old cluster should receive DELETE") + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED, msgA.Type) + + msgB := drainMapping(chs["cluster-b"]) + require.NotNil(t, msgB, "new cluster should receive CREATE") + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, msgB.Type) + assert.NotEmpty(t, msgB.AuthToken) + }) + + t.Run("domain change same cluster sends DELETE then CREATE", func(t *testing.T) { + s, chs := newServer() + // Domain changes within same cluster: manager sends DELETE (old domain) + CREATE (new domain). + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED), "cluster-a") + s.SendServiceUpdateToCluster(ctx, httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED), "cluster-a") + + msgDel := drainMapping(chs["cluster-a"]) + require.NotNil(t, msgDel, "same cluster should receive DELETE") + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED, msgDel.Type) + + msgCreate := drainMapping(chs["cluster-a"]) + require.NotNil(t, msgCreate, "same cluster should receive CREATE") + assert.Equal(t, proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, msgCreate.Type) + assert.NotEmpty(t, msgCreate.AuthToken) + }) + + t.Run("TLS passthrough sent to all proxies", func(t *testing.T) { + s := &ProxyServiceServer{ + tokenStore: tokenStore, + } + s.SetProxyController(newTestProxyController()) + const cluster = "proxy.example.com" + chModern := registerFakeProxyWithCaps(s, "modern", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) + chLegacy := registerFakeProxyWithCaps(s, "legacy", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + + // TLS passthrough works on all proxies regardless of custom port support + s.SendServiceUpdateToCluster(ctx, tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED), cluster) + + msgModern := drainMapping(chModern) + require.NotNil(t, msgModern, "modern proxy receives TLS update") + assert.Equal(t, "tls", msgModern.Mode) + + msgLegacy := drainMapping(chLegacy) + assert.NotNil(t, msgLegacy, "legacy proxy should also receive TLS passthrough") + }) + + t.Run("TLS on default port NOT filtered for legacy proxy", func(t *testing.T) { + s := &ProxyServiceServer{ + tokenStore: tokenStore, + } + s.SetProxyController(newTestProxyController()) + const cluster = "proxy.example.com" + chLegacy := registerFakeProxyWithCaps(s, "legacy", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + + mapping := tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED) + mapping.ListenPort = 0 // default port + s.SendServiceUpdateToCluster(ctx, mapping, cluster) + + msgLegacy := drainMapping(chLegacy) + assert.NotNil(t, msgLegacy, "legacy proxy should receive TLS on default port") + }) + + t.Run("passthrough and rewrite flags propagated", func(t *testing.T) { + s, chs := newServer() + mapping := httpMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED) + mapping.PassHostHeader = true + mapping.RewriteRedirects = true + s.SendServiceUpdateToCluster(ctx, mapping, "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + assert.True(t, msg.PassHostHeader) + assert.True(t, msg.RewriteRedirects) + }) + + t.Run("multiple paths propagated in MODIFIED", func(t *testing.T) { + s, chs := newServer() + mapping := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED, + Id: "svc-multi", + AccountId: "acct-1", + Domain: "multi.example.com", + Path: []*proto.PathMapping{ + {Path: "/", Target: "http://10.0.0.1:8080"}, + {Path: "/api", Target: "http://10.0.0.2:9090"}, + {Path: "/ws", Target: "http://10.0.0.3:3000"}, + }, + } + s.SendServiceUpdateToCluster(ctx, mapping, "cluster-a") + + msg := drainMapping(chs["cluster-a"]) + require.NotNil(t, msg) + require.Len(t, msg.Path, 3, "all paths should be present") + assert.Equal(t, "/", msg.Path[0].Path) + assert.Equal(t, "/api", msg.Path[1].Path) + assert.Equal(t, "/ws", msg.Path[2].Path) + }) +} diff --git a/management/server/http/handler.go b/management/server/http/handler.go index ddeda6d7f..ad36b9d46 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -174,9 +174,8 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks instance.AddEndpoints(instanceManager, router) instance.AddVersionEndpoint(instanceManager, router) if serviceManager != nil && reverseProxyDomainManager != nil { - reverseproxymanager.RegisterEndpoints(serviceManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, router) + reverseproxymanager.RegisterEndpoints(serviceManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, permissionsManager, router) } - // Register OAuth callback handler for proxy authentication if proxyGRPCServer != nil { oauthHandler := proxy.NewAuthCallbackHandler(proxyGRPCServer, trustedHTTPProxies) diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 462013963..6bd269a2c 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/otel/metric/noop" "github.com/netbirdio/management-integrations/integrations" + accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager" proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager" @@ -113,6 +114,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee if err != nil { t.Fatalf("Failed to create proxy controller: %v", err) } + domainManager.SetClusterCapabilities(serviceProxyController) serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, domainManager) proxyServiceServer.SetServiceManager(serviceManager) am.SetServiceManager(serviceManager) diff --git a/management/server/metrics/selfhosted.go b/management/server/metrics/selfhosted.go index bfefce388..8732cf89f 100644 --- a/management/server/metrics/selfhosted.go +++ b/management/server/metrics/selfhosted.go @@ -219,7 +219,7 @@ func (w *Worker) generateProperties(ctx context.Context) properties { servicesStatusActive int servicesStatusPending int servicesStatusError int - servicesTargetType map[string]int + servicesTargetType map[rpservice.TargetType]int servicesAuthPassword int servicesAuthPin int servicesAuthOIDC int @@ -232,7 +232,7 @@ func (w *Worker) generateProperties(ctx context.Context) properties { rulesDirection = make(map[string]int) activeUsersLastDay = make(map[string]struct{}) embeddedIdpTypes = make(map[string]int) - servicesTargetType = make(map[string]int) + servicesTargetType = make(map[rpservice.TargetType]int) uptime = time.Since(w.startupTime).Seconds() connections := w.connManager.GetAllConnectedPeers() version = nbversion.NetbirdVersion() @@ -434,7 +434,7 @@ func (w *Worker) generateProperties(ctx context.Context) properties { metricsProperties["custom_domains_validated"] = customDomainsValidated for targetType, count := range servicesTargetType { - metricsProperties["services_target_type_"+targetType] = count + metricsProperties["services_target_type_"+string(targetType)] = count } for idpType, count := range embeddedIdpTypes { diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 5997c10e2..b3fbfe141 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -30,6 +30,7 @@ import ( nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" @@ -4996,6 +4997,7 @@ func (s *SqlStore) GetServiceByDomain(ctx context.Context, domain string) (*rpse return service, nil } + func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) { tx := s.db.Preload("Targets") if lockStrength != LockingStrengthNone { @@ -5041,16 +5043,16 @@ func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingS } // RenewEphemeralService updates the last_renewed_at timestamp for an ephemeral service. -func (s *SqlStore) RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error { +func (s *SqlStore) RenewEphemeralService(ctx context.Context, accountID, peerID, serviceID string) error { result := s.db.Model(&rpservice.Service{}). - Where("account_id = ? AND source_peer = ? AND domain = ? AND source = ?", accountID, peerID, domain, rpservice.SourceEphemeral). + Where("id = ? AND account_id = ? AND source_peer = ? AND source = ?", serviceID, accountID, peerID, rpservice.SourceEphemeral). Update("meta_last_renewed_at", time.Now()) if result.Error != nil { log.WithContext(ctx).Errorf("failed to renew ephemeral service: %v", result.Error) return status.Errorf(status.Internal, "renew ephemeral service") } if result.RowsAffected == 0 { - return status.Errorf(status.NotFound, "no active expose session for domain %s", domain) + return status.Errorf(status.NotFound, "no active expose session for service %s", serviceID) } return nil } @@ -5133,6 +5135,37 @@ func (s *SqlStore) EphemeralServiceExists(ctx context.Context, lockStrength Lock return id != "", nil } +// GetServicesByClusterAndPort returns services matching the given proxy cluster, mode, and listen port. +func (s *SqlStore) GetServicesByClusterAndPort(ctx context.Context, lockStrength LockingStrength, proxyCluster string, mode string, listenPort uint16) ([]*rpservice.Service, error) { + tx := s.db.WithContext(ctx) + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var services []*rpservice.Service + result := tx.Where("proxy_cluster = ? AND mode = ? AND listen_port = ?", proxyCluster, mode, listenPort).Find(&services) + if result.Error != nil { + return nil, status.Errorf(status.Internal, "query services by cluster and port") + } + + return services, nil +} + +// GetServicesByCluster returns all services for the given proxy cluster. +func (s *SqlStore) GetServicesByCluster(ctx context.Context, lockStrength LockingStrength, proxyCluster string) ([]*rpservice.Service, error) { + tx := s.db.WithContext(ctx) + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var services []*rpservice.Service + result := tx.Where("proxy_cluster = ?", proxyCluster).Find(&services) + if result.Error != nil { + return nil, status.Errorf(status.Internal, "query services by cluster") + } + return services, nil +} + func (s *SqlStore) GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) { tx := s.db diff --git a/management/server/store/store.go b/management/server/store/store.go index 1fa99fd05..8bb52f38a 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -261,10 +261,12 @@ type Store interface { GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*rpservice.Service, error) - RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error + RenewEphemeralService(ctx context.Context, accountID, peerID, serviceID string) error GetExpiredEphemeralServices(ctx context.Context, ttl time.Duration, limit int) ([]*rpservice.Service, error) CountEphemeralServicesByPeer(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (int64, error) EphemeralServiceExists(ctx context.Context, lockStrength LockingStrength, accountID, peerID, domain string) (bool, error) + GetServicesByClusterAndPort(ctx context.Context, lockStrength LockingStrength, proxyCluster string, mode string, listenPort uint16) ([]*rpservice.Service, error) + GetServicesByCluster(ctx context.Context, lockStrength LockingStrength, proxyCluster string) ([]*rpservice.Service, error) GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) ListFreeDomains(ctx context.Context, accountID string) ([]string, error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 130df4485..e75e35b94 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -1991,6 +1991,36 @@ func (mr *MockStoreMockRecorder) GetServices(ctx, lockStrength interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServices", reflect.TypeOf((*MockStore)(nil).GetServices), ctx, lockStrength) } +// GetServicesByCluster mocks base method. +func (m *MockStore) GetServicesByCluster(ctx context.Context, lockStrength LockingStrength, proxyCluster string) ([]*service.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServicesByCluster", ctx, lockStrength, proxyCluster) + ret0, _ := ret[0].([]*service.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServicesByCluster indicates an expected call of GetServicesByCluster. +func (mr *MockStoreMockRecorder) GetServicesByCluster(ctx, lockStrength, proxyCluster interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicesByCluster", reflect.TypeOf((*MockStore)(nil).GetServicesByCluster), ctx, lockStrength, proxyCluster) +} + +// GetServicesByClusterAndPort mocks base method. +func (m *MockStore) GetServicesByClusterAndPort(ctx context.Context, lockStrength LockingStrength, proxyCluster, mode string, listenPort uint16) ([]*service.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServicesByClusterAndPort", ctx, lockStrength, proxyCluster, mode, listenPort) + ret0, _ := ret[0].([]*service.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServicesByClusterAndPort indicates an expected call of GetServicesByClusterAndPort. +func (mr *MockStoreMockRecorder) GetServicesByClusterAndPort(ctx, lockStrength, proxyCluster, mode, listenPort interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicesByClusterAndPort", reflect.TypeOf((*MockStore)(nil).GetServicesByClusterAndPort), ctx, lockStrength, proxyCluster, mode, listenPort) +} + // GetSetupKeyByID mocks base method. func (m *MockStore) GetSetupKeyByID(ctx context.Context, lockStrength LockingStrength, accountID, setupKeyID string) (*types2.SetupKey, error) { m.ctrl.T.Helper() @@ -2447,17 +2477,17 @@ func (mr *MockStoreMockRecorder) RemoveResourceFromGroup(ctx, accountId, groupID } // RenewEphemeralService mocks base method. -func (m *MockStore) RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error { +func (m *MockStore) RenewEphemeralService(ctx context.Context, accountID, peerID, serviceID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RenewEphemeralService", ctx, accountID, peerID, domain) + ret := m.ctrl.Call(m, "RenewEphemeralService", ctx, accountID, peerID, serviceID) ret0, _ := ret[0].(error) return ret0 } // RenewEphemeralService indicates an expected call of RenewEphemeralService. -func (mr *MockStoreMockRecorder) RenewEphemeralService(ctx, accountID, peerID, domain interface{}) *gomock.Call { +func (mr *MockStoreMockRecorder) RenewEphemeralService(ctx, accountID, peerID, serviceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewEphemeralService", reflect.TypeOf((*MockStore)(nil).RenewEphemeralService), ctx, accountID, peerID, domain) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewEphemeralService", reflect.TypeOf((*MockStore)(nil).RenewEphemeralService), ctx, accountID, peerID, serviceID) } // RevokeProxyAccessToken mocks base method. diff --git a/management/server/types/account.go b/management/server/types/account.go index 6145ceeb2..269fc7a88 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -907,8 +907,8 @@ func (a *Account) Copy() *Account { } services := []*service.Service{} - for _, service := range a.Services { - services = append(services, service.Copy()) + for _, svc := range a.Services { + services = append(services, svc.Copy()) } return &Account{ @@ -1605,12 +1605,12 @@ func (a *Account) GetPoliciesForNetworkResource(resourceId string) []*Policy { networkResourceGroups := a.getNetworkResourceGroups(resourceId) for _, policy := range a.Policies { - if !policy.Enabled { + if policy == nil || !policy.Enabled { continue } for _, rule := range policy.Rules { - if !rule.Enabled { + if rule == nil || !rule.Enabled { continue } @@ -1812,15 +1812,18 @@ func (a *Account) InjectProxyPolicies(ctx context.Context) { } a.injectServiceProxyPolicies(ctx, service, proxyPeersByCluster) } + } func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *service.Service, proxyPeersByCluster map[string][]*nbpeer.Peer) { + proxyPeers := proxyPeersByCluster[service.ProxyCluster] for _, target := range service.Targets { if !target.Enabled { continue } - a.injectTargetProxyPolicies(ctx, service, target, proxyPeersByCluster[service.ProxyCluster]) + a.injectTargetProxyPolicies(ctx, service, target, proxyPeers) } + } func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *service.Service, target *service.Target, proxyPeers []*nbpeer.Peer) { @@ -1840,13 +1843,13 @@ func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *servic } } -func (a *Account) resolveTargetPort(ctx context.Context, target *service.Target) (int, bool) { +func (a *Account) resolveTargetPort(ctx context.Context, target *service.Target) (uint16, bool) { if target.Port != 0 { return target.Port, true } switch target.Protocol { - case "https": + case "https", "tls": return 443, true case "http": return 80, true @@ -1856,17 +1859,23 @@ func (a *Account) resolveTargetPort(ctx context.Context, target *service.Target) } } -func (a *Account) createProxyPolicy(service *service.Service, target *service.Target, proxyPeer *nbpeer.Peer, port int, path string) *Policy { - policyID := fmt.Sprintf("proxy-access-%s-%s-%s", service.ID, proxyPeer.ID, path) +func (a *Account) createProxyPolicy(svc *service.Service, target *service.Target, proxyPeer *nbpeer.Peer, port uint16, path string) *Policy { + policyID := fmt.Sprintf("proxy-access-%s-%s-%s", svc.ID, proxyPeer.ID, path) + + protocol := PolicyRuleProtocolTCP + if svc.Mode == service.ModeUDP { + protocol = PolicyRuleProtocolUDP + } + return &Policy{ ID: policyID, - Name: fmt.Sprintf("Proxy Access to %s", service.Name), + Name: fmt.Sprintf("Proxy Access to %s", svc.Name), Enabled: true, Rules: []*PolicyRule{ { ID: policyID, PolicyID: policyID, - Name: fmt.Sprintf("Allow access to %s", service.Name), + Name: fmt.Sprintf("Allow access to %s", svc.Name), Enabled: true, SourceResource: Resource{ ID: proxyPeer.ID, @@ -1877,12 +1886,12 @@ func (a *Account) createProxyPolicy(service *service.Service, target *service.Ta Type: ResourceType(target.TargetType), }, Bidirectional: false, - Protocol: PolicyRuleProtocolTCP, + Protocol: protocol, Action: PolicyTrafficActionAccept, PortRanges: []RulePortRange{ { - Start: uint16(port), - End: uint16(port), + Start: port, + End: port, }, }, }, diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index 60e81feb5..d82f5b7fc 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -7,6 +7,7 @@ import ( "os/signal" "strconv" "syscall" + "time" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -34,30 +35,32 @@ var ( ) var ( - logLevel string - debugLogs bool - mgmtAddr string - addr string - proxyDomain string - certDir string - acmeCerts bool - acmeAddr string - acmeDir string - acmeEABKID string - acmeEABHMACKey string - acmeChallengeType string - debugEndpoint bool - debugEndpointAddr string - healthAddr string - forwardedProto string - trustedProxies string - certFile string - certKeyFile string - certLockMethod string - wildcardCertDir string - wgPort int - proxyProtocol bool - preSharedKey string + logLevel string + debugLogs bool + mgmtAddr string + addr string + proxyDomain string + defaultDialTimeout time.Duration + certDir string + acmeCerts bool + acmeAddr string + acmeDir string + acmeEABKID string + acmeEABHMACKey string + acmeChallengeType string + debugEndpoint bool + debugEndpointAddr string + healthAddr string + forwardedProto string + trustedProxies string + certFile string + certKeyFile string + certLockMethod string + wildcardCertDir string + wgPort uint16 + proxyProtocol bool + preSharedKey string + supportsCustomPorts bool ) var rootCmd = &cobra.Command{ @@ -92,9 +95,11 @@ func init() { rootCmd.Flags().StringVar(&certKeyFile, "cert-key-file", envStringOrDefault("NB_PROXY_CERTIFICATE_KEY_FILE", "tls.key"), "TLS certificate key filename within the certificate directory") rootCmd.Flags().StringVar(&certLockMethod, "cert-lock-method", envStringOrDefault("NB_PROXY_CERT_LOCK_METHOD", "auto"), "Certificate lock method for cross-replica coordination: auto, flock, or k8s-lease") rootCmd.Flags().StringVar(&wildcardCertDir, "wildcard-cert-dir", envStringOrDefault("NB_PROXY_WILDCARD_CERT_DIR", ""), "Directory containing wildcard certificate pairs (.crt/.key). Wildcard patterns are extracted from SANs automatically") - rootCmd.Flags().IntVar(&wgPort, "wg-port", envIntOrDefault("NB_PROXY_WG_PORT", 0), "WireGuard listen port (0 = random). Fixed port only works with single-account deployments") + rootCmd.Flags().Uint16Var(&wgPort, "wg-port", envUint16OrDefault("NB_PROXY_WG_PORT", 0), "WireGuard listen port (0 = random). Fixed port only works with single-account deployments") rootCmd.Flags().BoolVar(&proxyProtocol, "proxy-protocol", envBoolOrDefault("NB_PROXY_PROXY_PROTOCOL", false), "Enable PROXY protocol on TCP listeners to preserve client IPs behind L4 proxies") rootCmd.Flags().StringVar(&preSharedKey, "preshared-key", envStringOrDefault("NB_PROXY_PRESHARED_KEY", ""), "Define a pre-shared key for the tunnel between proxy and peers") + rootCmd.Flags().BoolVar(&supportsCustomPorts, "supports-custom-ports", envBoolOrDefault("NB_PROXY_SUPPORTS_CUSTOM_PORTS", true), "Whether the proxy can bind arbitrary ports for UDP/TCP passthrough") + rootCmd.Flags().DurationVar(&defaultDialTimeout, "default-dial-timeout", envDurationOrDefault("NB_PROXY_DEFAULT_DIAL_TIMEOUT", 0), "Default backend dial timeout when no per-service timeout is set (e.g. 30s)") } // Execute runs the root command. @@ -171,6 +176,8 @@ func runServer(cmd *cobra.Command, args []string) error { WireguardPort: wgPort, ProxyProtocol: proxyProtocol, PreSharedKey: preSharedKey, + SupportsCustomPorts: supportsCustomPorts, + DefaultDialTimeout: defaultDialTimeout, } ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT) @@ -203,12 +210,24 @@ func envStringOrDefault(key string, def string) string { return v } -func envIntOrDefault(key string, def int) int { +func envUint16OrDefault(key string, def uint16) uint16 { v, exists := os.LookupEnv(key) if !exists { return def } - parsed, err := strconv.Atoi(v) + parsed, err := strconv.ParseUint(v, 10, 16) + if err != nil { + return def + } + return uint16(parsed) +} + +func envDurationOrDefault(key string, def time.Duration) time.Duration { + v, exists := os.LookupEnv(key) + if !exists { + return def + } + parsed, err := time.ParseDuration(v) if err != nil { return def } diff --git a/proxy/handle_mapping_stream_test.go b/proxy/handle_mapping_stream_test.go index d2ad3f67e..cb16c0814 100644 --- a/proxy/handle_mapping_stream_test.go +++ b/proxy/handle_mapping_stream_test.go @@ -38,11 +38,18 @@ func (m *mockMappingStream) Context() context.Context { return context.Backgroun func (m *mockMappingStream) SendMsg(any) error { return nil } func (m *mockMappingStream) RecvMsg(any) error { return nil } +func closedChan() chan struct{} { + ch := make(chan struct{}) + close(ch) + return ch +} + func TestHandleMappingStream_SyncCompleteFlag(t *testing.T) { checker := health.NewChecker(nil, nil) s := &Server{ Logger: log.StandardLogger(), healthChecker: checker, + routerReady: closedChan(), } stream := &mockMappingStream{ @@ -62,6 +69,7 @@ func TestHandleMappingStream_NoSyncFlagDoesNotMarkDone(t *testing.T) { s := &Server{ Logger: log.StandardLogger(), healthChecker: checker, + routerReady: closedChan(), } stream := &mockMappingStream{ @@ -78,7 +86,8 @@ func TestHandleMappingStream_NoSyncFlagDoesNotMarkDone(t *testing.T) { func TestHandleMappingStream_NilHealthChecker(t *testing.T) { s := &Server{ - Logger: log.StandardLogger(), + Logger: log.StandardLogger(), + routerReady: closedChan(), } stream := &mockMappingStream{ diff --git a/proxy/internal/accesslog/logger.go b/proxy/internal/accesslog/logger.go index 4ba5a7755..5b05ab195 100644 --- a/proxy/internal/accesslog/logger.go +++ b/proxy/internal/accesslog/logger.go @@ -6,11 +6,13 @@ import ( "sync" "time" + "github.com/rs/xid" log "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/timestamppb" "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/types" "github.com/netbirdio/netbird/shared/management/proto" ) @@ -19,6 +21,7 @@ const ( bytesThreshold = 1024 * 1024 * 1024 // Log every 1GB usageCleanupPeriod = 1 * time.Hour // Clean up stale counters every hour usageInactiveWindow = 24 * time.Hour // Consider domain inactive if no traffic for 24 hours + logSendTimeout = 10 * time.Second ) type domainUsage struct { @@ -79,22 +82,63 @@ func (l *Logger) Close() { type logEntry struct { ID string - AccountID string - ServiceId string + AccountID types.AccountID + ServiceId types.ServiceID Host string Path string DurationMs int64 Method string ResponseCode int32 - SourceIp string + SourceIP netip.Addr AuthMechanism string UserId string AuthSuccess bool BytesUpload int64 BytesDownload int64 + Protocol Protocol } -func (l *Logger) log(ctx context.Context, entry logEntry) { +// Protocol identifies the transport protocol of an access log entry. +type Protocol string + +const ( + ProtocolHTTP Protocol = "http" + ProtocolTCP Protocol = "tcp" + ProtocolUDP Protocol = "udp" + ProtocolTLS Protocol = "tls" +) + +// L4Entry holds the data for a layer-4 (TCP/UDP) access log entry. +type L4Entry struct { + AccountID types.AccountID + ServiceID types.ServiceID + Protocol Protocol + Host string // SNI hostname or listen address + SourceIP netip.Addr + DurationMs int64 + BytesUpload int64 + BytesDownload int64 +} + +// LogL4 sends an access log entry for a layer-4 connection (TCP or UDP). +// The call is non-blocking: the gRPC send happens in a background goroutine. +func (l *Logger) LogL4(entry L4Entry) { + le := logEntry{ + ID: xid.New().String(), + AccountID: entry.AccountID, + ServiceId: entry.ServiceID, + Protocol: entry.Protocol, + Host: entry.Host, + SourceIP: entry.SourceIP, + DurationMs: entry.DurationMs, + BytesUpload: entry.BytesUpload, + BytesDownload: entry.BytesDownload, + } + l.log(le) + l.trackUsage(entry.Host, entry.BytesUpload+entry.BytesDownload) +} + +func (l *Logger) log(entry logEntry) { // Fire off the log request in a separate routine. // This increases the possibility of losing a log message // (although it should still get logged in the event of an error), @@ -105,31 +149,37 @@ func (l *Logger) log(ctx context.Context, entry logEntry) { // allow for resolving that on the server. now := timestamppb.Now() // Grab the timestamp before launching the goroutine to try to prevent weird timing issues. This is probably unnecessary. go func() { - logCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + logCtx, cancel := context.WithTimeout(context.Background(), logSendTimeout) defer cancel() if entry.AuthMechanism != auth.MethodOIDC.String() { entry.UserId = "" } + + var sourceIP string + if entry.SourceIP.IsValid() { + sourceIP = entry.SourceIP.String() + } + if _, err := l.client.SendAccessLog(logCtx, &proto.SendAccessLogRequest{ Log: &proto.AccessLog{ LogId: entry.ID, - AccountId: entry.AccountID, + AccountId: string(entry.AccountID), Timestamp: now, - ServiceId: entry.ServiceId, + ServiceId: string(entry.ServiceId), Host: entry.Host, Path: entry.Path, DurationMs: entry.DurationMs, Method: entry.Method, ResponseCode: entry.ResponseCode, - SourceIp: entry.SourceIp, + SourceIp: sourceIP, AuthMechanism: entry.AuthMechanism, UserId: entry.UserId, AuthSuccess: entry.AuthSuccess, BytesUpload: entry.BytesUpload, BytesDownload: entry.BytesDownload, + Protocol: string(entry.Protocol), }, }); err != nil { - // If it fails to send on the gRPC connection, then at least log it to the error log. l.logger.WithFields(log.Fields{ "service_id": entry.ServiceId, "host": entry.Host, @@ -137,7 +187,7 @@ func (l *Logger) log(ctx context.Context, entry logEntry) { "duration": entry.DurationMs, "method": entry.Method, "response_code": entry.ResponseCode, - "source_ip": entry.SourceIp, + "source_ip": sourceIP, "auth_mechanism": entry.AuthMechanism, "user_id": entry.UserId, "auth_success": entry.AuthSuccess, diff --git a/proxy/internal/accesslog/middleware.go b/proxy/internal/accesslog/middleware.go index 7368185c0..593a77ef2 100644 --- a/proxy/internal/accesslog/middleware.go +++ b/proxy/internal/accesslog/middleware.go @@ -67,23 +67,24 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { entry := logEntry{ ID: requestID, ServiceId: capturedData.GetServiceId(), - AccountID: string(capturedData.GetAccountId()), + AccountID: capturedData.GetAccountId(), Host: host, Path: r.URL.Path, DurationMs: duration.Milliseconds(), Method: r.Method, ResponseCode: int32(sw.status), - SourceIp: sourceIp, + SourceIP: sourceIp, AuthMechanism: capturedData.GetAuthMethod(), UserId: capturedData.GetUserID(), AuthSuccess: sw.status != http.StatusUnauthorized && sw.status != http.StatusForbidden, BytesUpload: bytesUpload, BytesDownload: bytesDownload, + Protocol: ProtocolHTTP, } l.logger.Debugf("response: request_id=%s method=%s host=%s path=%s status=%d duration=%dms source=%s origin=%s service=%s account=%s", requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceId(), capturedData.GetAccountId()) - l.log(r.Context(), entry) + l.log(entry) // Track usage for cost monitoring (upload + download) by domain l.trackUsage(host, bytesUpload+bytesDownload) diff --git a/proxy/internal/accesslog/requestip.go b/proxy/internal/accesslog/requestip.go index f111c1322..30c483fd9 100644 --- a/proxy/internal/accesslog/requestip.go +++ b/proxy/internal/accesslog/requestip.go @@ -11,6 +11,6 @@ import ( // proxy configuration. When trustedProxies is non-empty and the direct // connection is from a trusted source, it walks X-Forwarded-For right-to-left // skipping trusted IPs. Otherwise it returns RemoteAddr directly. -func extractSourceIP(r *http.Request, trustedProxies []netip.Prefix) string { +func extractSourceIP(r *http.Request, trustedProxies []netip.Prefix) netip.Addr { return proxy.ResolveClientIP(r.RemoteAddr, r.Header.Get("X-Forwarded-For"), trustedProxies) } diff --git a/proxy/internal/acme/manager.go b/proxy/internal/acme/manager.go index 395da7d88..a4a220ed7 100644 --- a/proxy/internal/acme/manager.go +++ b/proxy/internal/acme/manager.go @@ -23,6 +23,7 @@ import ( "golang.org/x/crypto/acme/autocert" "github.com/netbirdio/netbird/proxy/internal/certwatch" + "github.com/netbirdio/netbird/proxy/internal/types" "github.com/netbirdio/netbird/shared/management/domain" ) @@ -30,7 +31,7 @@ import ( var oidSCTList = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} type certificateNotifier interface { - NotifyCertificateIssued(ctx context.Context, accountID, serviceID, domain string) error + NotifyCertificateIssued(ctx context.Context, accountID types.AccountID, serviceID types.ServiceID, domain string) error } type domainState int @@ -42,8 +43,8 @@ const ( ) type domainInfo struct { - accountID string - serviceID string + accountID types.AccountID + serviceID types.ServiceID state domainState err string } @@ -301,7 +302,7 @@ func (mgr *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate // When AddDomain returns true the caller is responsible for sending any // certificate-ready notifications after the surrounding operation (e.g. // mapping update) has committed successfully. -func (mgr *Manager) AddDomain(d domain.Domain, accountID, serviceID string) (wildcardHit bool) { +func (mgr *Manager) AddDomain(d domain.Domain, accountID types.AccountID, serviceID types.ServiceID) (wildcardHit bool) { name := d.PunycodeString() if e := mgr.findWildcardEntry(name); e != nil { mgr.mu.Lock() diff --git a/proxy/internal/acme/manager_test.go b/proxy/internal/acme/manager_test.go index 9a3ed9efd..ceb9ca13a 100644 --- a/proxy/internal/acme/manager_test.go +++ b/proxy/internal/acme/manager_test.go @@ -17,12 +17,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/proxy/internal/types" ) func TestHostPolicy(t *testing.T) { mgr, err := NewManager(ManagerConfig{CertDir: t.TempDir(), ACMEURL: "https://acme.example.com/directory"}, nil, nil, nil) require.NoError(t, err) - mgr.AddDomain("example.com", "acc1", "rp1") + mgr.AddDomain("example.com", types.AccountID("acc1"), types.ServiceID("rp1")) // Wait for the background prefetch goroutine to finish so the temp dir // can be cleaned up without a race. @@ -92,8 +94,8 @@ func TestDomainStates(t *testing.T) { // AddDomain starts as pending, then the prefetch goroutine will fail // (no real ACME server) and transition to failed. - mgr.AddDomain("a.example.com", "acc1", "rp1") - mgr.AddDomain("b.example.com", "acc1", "rp1") + mgr.AddDomain("a.example.com", types.AccountID("acc1"), types.ServiceID("rp1")) + mgr.AddDomain("b.example.com", types.AccountID("acc1"), types.ServiceID("rp1")) assert.Equal(t, 2, mgr.TotalDomains(), "two domains registered") @@ -209,12 +211,12 @@ func TestWildcardAddDomainSkipsACME(t *testing.T) { require.NoError(t, err) // Add a wildcard-matching domain — should be immediately ready. - mgr.AddDomain("foo.example.com", "acc1", "svc1") + mgr.AddDomain("foo.example.com", types.AccountID("acc1"), types.ServiceID("svc1")) assert.Equal(t, 0, mgr.PendingCerts(), "wildcard domain should not be pending") assert.Equal(t, []string{"foo.example.com"}, mgr.ReadyDomains()) // Add a non-wildcard domain — should go through ACME (pending then failed). - mgr.AddDomain("other.net", "acc2", "svc2") + mgr.AddDomain("other.net", types.AccountID("acc2"), types.ServiceID("svc2")) assert.Equal(t, 2, mgr.TotalDomains()) // Wait for the ACME prefetch to fail. @@ -234,7 +236,7 @@ func TestWildcardGetCertificate(t *testing.T) { mgr, err := NewManager(ManagerConfig{CertDir: acmeDir, ACMEURL: "https://acme.example.com/directory", WildcardDir: wcDir}, nil, nil, nil) require.NoError(t, err) - mgr.AddDomain("foo.example.com", "acc1", "svc1") + mgr.AddDomain("foo.example.com", types.AccountID("acc1"), types.ServiceID("svc1")) // GetCertificate for a wildcard-matching domain should return the static cert. cert, err := mgr.GetCertificate(&tls.ClientHelloInfo{ServerName: "foo.example.com"}) @@ -255,8 +257,8 @@ func TestMultipleWildcards(t *testing.T) { assert.ElementsMatch(t, []string{"*.example.com", "*.other.org"}, mgr.WildcardPatterns()) // Both wildcards should resolve. - mgr.AddDomain("foo.example.com", "acc1", "svc1") - mgr.AddDomain("bar.other.org", "acc2", "svc2") + mgr.AddDomain("foo.example.com", types.AccountID("acc1"), types.ServiceID("svc1")) + mgr.AddDomain("bar.other.org", types.AccountID("acc2"), types.ServiceID("svc2")) assert.Equal(t, 0, mgr.PendingCerts()) assert.ElementsMatch(t, []string{"foo.example.com", "bar.other.org"}, mgr.ReadyDomains()) @@ -271,7 +273,7 @@ func TestMultipleWildcards(t *testing.T) { assert.Contains(t, cert2.Leaf.DNSNames, "*.other.org") // Non-matching domain falls through to ACME. - mgr.AddDomain("custom.net", "acc3", "svc3") + mgr.AddDomain("custom.net", types.AccountID("acc3"), types.ServiceID("svc3")) assert.Eventually(t, func() bool { return mgr.PendingCerts() == 0 }, 30*time.Second, 100*time.Millisecond) diff --git a/proxy/internal/auth/middleware.go b/proxy/internal/auth/middleware.go index 8a966faa3..3cf86e4b3 100644 --- a/proxy/internal/auth/middleware.go +++ b/proxy/internal/auth/middleware.go @@ -44,8 +44,8 @@ type DomainConfig struct { Schemes []Scheme SessionPublicKey ed25519.PublicKey SessionExpiration time.Duration - AccountID string - ServiceID string + AccountID types.AccountID + ServiceID types.ServiceID } type validationResult struct { @@ -124,7 +124,7 @@ func (mw *Middleware) getDomainConfig(host string) (DomainConfig, bool) { func setCapturedIDs(r *http.Request, config DomainConfig) { if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { - cd.SetAccountId(types.AccountID(config.AccountID)) + cd.SetAccountId(config.AccountID) cd.SetServiceId(config.ServiceID) } } @@ -275,7 +275,7 @@ func wasCredentialSubmitted(r *http.Request, method auth.Method) bool { // session JWTs. Returns an error if the key is missing or invalid. // Callers must not serve the domain if this returns an error, to avoid // exposing an unauthenticated service. -func (mw *Middleware) AddDomain(domain string, schemes []Scheme, publicKeyB64 string, expiration time.Duration, accountID, serviceID string) error { +func (mw *Middleware) AddDomain(domain string, schemes []Scheme, publicKeyB64 string, expiration time.Duration, accountID types.AccountID, serviceID types.ServiceID) error { if len(schemes) == 0 { mw.domainsMux.Lock() defer mw.domainsMux.Unlock() diff --git a/proxy/internal/auth/oidc.go b/proxy/internal/auth/oidc.go index bf178d432..a60e6437a 100644 --- a/proxy/internal/auth/oidc.go +++ b/proxy/internal/auth/oidc.go @@ -9,6 +9,7 @@ import ( "google.golang.org/grpc" "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/types" "github.com/netbirdio/netbird/shared/management/proto" ) @@ -17,14 +18,14 @@ type urlGenerator interface { } type OIDC struct { - id string - accountId string + id types.ServiceID + accountId types.AccountID forwardedProto string client urlGenerator } // NewOIDC creates a new OIDC authentication scheme -func NewOIDC(client urlGenerator, id, accountId, forwardedProto string) OIDC { +func NewOIDC(client urlGenerator, id types.ServiceID, accountId types.AccountID, forwardedProto string) OIDC { return OIDC{ id: id, accountId: accountId, @@ -53,8 +54,8 @@ func (o OIDC) Authenticate(r *http.Request) (string, string, error) { } res, err := o.client.GetOIDCURL(r.Context(), &proto.GetOIDCURLRequest{ - Id: o.id, - AccountId: o.accountId, + Id: string(o.id), + AccountId: string(o.accountId), RedirectUrl: redirectURL.String(), }) if err != nil { diff --git a/proxy/internal/auth/password.go b/proxy/internal/auth/password.go index 208423465..6a7eda3e1 100644 --- a/proxy/internal/auth/password.go +++ b/proxy/internal/auth/password.go @@ -5,17 +5,19 @@ import ( "net/http" "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/types" "github.com/netbirdio/netbird/shared/management/proto" ) const passwordFormId = "password" type Password struct { - id, accountId string - client authenticator + id types.ServiceID + accountId types.AccountID + client authenticator } -func NewPassword(client authenticator, id, accountId string) Password { +func NewPassword(client authenticator, id types.ServiceID, accountId types.AccountID) Password { return Password{ id: id, accountId: accountId, @@ -41,8 +43,8 @@ func (p Password) Authenticate(r *http.Request) (string, string, error) { } res, err := p.client.Authenticate(r.Context(), &proto.AuthenticateRequest{ - Id: p.id, - AccountId: p.accountId, + Id: string(p.id), + AccountId: string(p.accountId), Request: &proto.AuthenticateRequest_Password{ Password: &proto.PasswordRequest{ Password: password, diff --git a/proxy/internal/auth/pin.go b/proxy/internal/auth/pin.go index c1eb56071..4d08f3dc6 100644 --- a/proxy/internal/auth/pin.go +++ b/proxy/internal/auth/pin.go @@ -5,17 +5,19 @@ import ( "net/http" "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/types" "github.com/netbirdio/netbird/shared/management/proto" ) const pinFormId = "pin" type Pin struct { - id, accountId string - client authenticator + id types.ServiceID + accountId types.AccountID + client authenticator } -func NewPin(client authenticator, id, accountId string) Pin { +func NewPin(client authenticator, id types.ServiceID, accountId types.AccountID) Pin { return Pin{ id: id, accountId: accountId, @@ -41,8 +43,8 @@ func (p Pin) Authenticate(r *http.Request) (string, string, error) { } res, err := p.client.Authenticate(r.Context(), &proto.AuthenticateRequest{ - Id: p.id, - AccountId: p.accountId, + Id: string(p.id), + AccountId: string(p.accountId), Request: &proto.AuthenticateRequest_Pin{ Pin: &proto.PinRequest{ Pin: pin, diff --git a/proxy/internal/conntrack/conn.go b/proxy/internal/conntrack/conn.go index 97055d992..8446d638f 100644 --- a/proxy/internal/conntrack/conn.go +++ b/proxy/internal/conntrack/conn.go @@ -10,10 +10,11 @@ import ( type trackedConn struct { net.Conn tracker *HijackTracker + host string } func (c *trackedConn) Close() error { - c.tracker.conns.Delete(c) + c.tracker.remove(c) return c.Conn.Close() } @@ -22,6 +23,7 @@ func (c *trackedConn) Close() error { type trackingWriter struct { http.ResponseWriter tracker *HijackTracker + host string } func (w *trackingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { @@ -33,8 +35,8 @@ func (w *trackingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { if err != nil { return nil, nil, err } - tc := &trackedConn{Conn: conn, tracker: w.tracker} - w.tracker.conns.Store(tc, struct{}{}) + tc := &trackedConn{Conn: conn, tracker: w.tracker, host: w.host} + w.tracker.add(tc) return tc, buf, nil } diff --git a/proxy/internal/conntrack/hijacked.go b/proxy/internal/conntrack/hijacked.go index d76cebc08..911f93f3d 100644 --- a/proxy/internal/conntrack/hijacked.go +++ b/proxy/internal/conntrack/hijacked.go @@ -1,7 +1,6 @@ package conntrack import ( - "net" "net/http" "sync" ) @@ -10,10 +9,14 @@ import ( // upgrades). http.Server.Shutdown does not close hijacked connections, so // they must be tracked and closed explicitly during graceful shutdown. // +// Connections are indexed by the request Host so they can be closed +// per-domain when a service mapping is removed. +// // Use Middleware as the outermost HTTP middleware to ensure hijacked // connections are tracked and automatically deregistered when closed. type HijackTracker struct { - conns sync.Map // net.Conn → struct{} + mu sync.Mutex + conns map[*trackedConn]struct{} } // Middleware returns an HTTP middleware that wraps the ResponseWriter so that @@ -21,21 +24,73 @@ type HijackTracker struct { // tracker when closed. This should be the outermost middleware in the chain. func (t *HijackTracker) Middleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(&trackingWriter{ResponseWriter: w, tracker: t}, r) + next.ServeHTTP(&trackingWriter{ + ResponseWriter: w, + tracker: t, + host: hostOnly(r.Host), + }, r) }) } -// CloseAll closes all tracked hijacked connections and returns the number -// of connections that were closed. +// CloseAll closes all tracked hijacked connections and returns the count. func (t *HijackTracker) CloseAll() int { - var count int - t.conns.Range(func(key, _ any) bool { - if conn, ok := key.(net.Conn); ok { - _ = conn.Close() - count++ - } - t.conns.Delete(key) - return true - }) - return count + t.mu.Lock() + conns := t.conns + t.conns = nil + t.mu.Unlock() + + for tc := range conns { + _ = tc.Conn.Close() + } + return len(conns) +} + +// CloseByHost closes all tracked hijacked connections for the given host +// and returns the number of connections closed. +func (t *HijackTracker) CloseByHost(host string) int { + host = hostOnly(host) + t.mu.Lock() + var toClose []*trackedConn + for tc := range t.conns { + if tc.host == host { + toClose = append(toClose, tc) + } + } + for _, tc := range toClose { + delete(t.conns, tc) + } + t.mu.Unlock() + + for _, tc := range toClose { + _ = tc.Conn.Close() + } + return len(toClose) +} + +func (t *HijackTracker) add(tc *trackedConn) { + t.mu.Lock() + if t.conns == nil { + t.conns = make(map[*trackedConn]struct{}) + } + t.conns[tc] = struct{}{} + t.mu.Unlock() +} + +func (t *HijackTracker) remove(tc *trackedConn) { + t.mu.Lock() + delete(t.conns, tc) + t.mu.Unlock() +} + +// hostOnly strips the port from a host:port string. +func hostOnly(hostport string) string { + for i := len(hostport) - 1; i >= 0; i-- { + if hostport[i] == ':' { + return hostport[:i] + } + if hostport[i] < '0' || hostport[i] > '9' { + return hostport + } + } + return hostport } diff --git a/proxy/internal/conntrack/hijacked_test.go b/proxy/internal/conntrack/hijacked_test.go new file mode 100644 index 000000000..9ceefff78 --- /dev/null +++ b/proxy/internal/conntrack/hijacked_test.go @@ -0,0 +1,142 @@ +package conntrack + +import ( + "bufio" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// fakeHijackWriter implements http.ResponseWriter and http.Hijacker for testing. +type fakeHijackWriter struct { + http.ResponseWriter + conn net.Conn +} + +func (f *fakeHijackWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + rw := bufio.NewReadWriter(bufio.NewReader(f.conn), bufio.NewWriter(f.conn)) + return f.conn, rw, nil +} + +func TestCloseByHost(t *testing.T) { + var tracker HijackTracker + + // Simulate hijacking two connections for different hosts. + connA1, connA2 := net.Pipe() + defer connA2.Close() + connB1, connB2 := net.Pipe() + defer connB2.Close() + + twA := &trackingWriter{ + ResponseWriter: httptest.NewRecorder(), + tracker: &tracker, + host: "a.example.com", + } + twB := &trackingWriter{ + ResponseWriter: httptest.NewRecorder(), + tracker: &tracker, + host: "b.example.com", + } + + // Use fakeHijackWriter to provide the Hijack method. + twA.ResponseWriter = &fakeHijackWriter{ResponseWriter: twA.ResponseWriter, conn: connA1} + twB.ResponseWriter = &fakeHijackWriter{ResponseWriter: twB.ResponseWriter, conn: connB1} + + _, _, err := twA.Hijack() + require.NoError(t, err) + _, _, err = twB.Hijack() + require.NoError(t, err) + + tracker.mu.Lock() + assert.Equal(t, 2, len(tracker.conns), "should track 2 connections") + tracker.mu.Unlock() + + // Close only host A. + n := tracker.CloseByHost("a.example.com") + assert.Equal(t, 1, n, "should close 1 connection for host A") + + tracker.mu.Lock() + assert.Equal(t, 1, len(tracker.conns), "should have 1 remaining connection") + tracker.mu.Unlock() + + // Verify host A's conn is actually closed. + buf := make([]byte, 1) + _, err = connA2.Read(buf) + assert.Error(t, err, "host A pipe should be closed") + + // Host B should still be alive. + go func() { _, _ = connB1.Write([]byte("x")) }() + + // Close all remaining. + n = tracker.CloseAll() + assert.Equal(t, 1, n, "should close remaining 1 connection") + + tracker.mu.Lock() + assert.Equal(t, 0, len(tracker.conns), "should have 0 connections after CloseAll") + tracker.mu.Unlock() +} + +func TestCloseAll(t *testing.T) { + var tracker HijackTracker + + for range 5 { + c1, c2 := net.Pipe() + defer c2.Close() + tc := &trackedConn{Conn: c1, tracker: &tracker, host: "test.com"} + tracker.add(tc) + } + + tracker.mu.Lock() + assert.Equal(t, 5, len(tracker.conns)) + tracker.mu.Unlock() + + n := tracker.CloseAll() + assert.Equal(t, 5, n) + + // Double CloseAll is safe. + n = tracker.CloseAll() + assert.Equal(t, 0, n) +} + +func TestTrackedConn_AutoDeregister(t *testing.T) { + var tracker HijackTracker + + c1, c2 := net.Pipe() + defer c2.Close() + + tc := &trackedConn{Conn: c1, tracker: &tracker, host: "auto.com"} + tracker.add(tc) + + tracker.mu.Lock() + assert.Equal(t, 1, len(tracker.conns)) + tracker.mu.Unlock() + + // Close the tracked conn: should auto-deregister. + require.NoError(t, tc.Close()) + + tracker.mu.Lock() + assert.Equal(t, 0, len(tracker.conns), "should auto-deregister on close") + tracker.mu.Unlock() +} + +func TestHostOnly(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"example.com:443", "example.com"}, + {"example.com", "example.com"}, + {"127.0.0.1:8080", "127.0.0.1"}, + {"[::1]:443", "[::1]"}, + {"", ""}, + } + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + assert.Equal(t, tt.want, hostOnly(tt.input)) + }) + } +} diff --git a/proxy/internal/debug/client.go b/proxy/internal/debug/client.go index 885c574bc..01b0bc8e6 100644 --- a/proxy/internal/debug/client.go +++ b/proxy/internal/debug/client.go @@ -152,7 +152,7 @@ func (c *Client) printClients(data map[string]any) { return } - _, _ = fmt.Fprintf(c.out, "%-38s %-12s %-40s %s\n", "ACCOUNT ID", "AGE", "DOMAINS", "HAS CLIENT") + _, _ = fmt.Fprintf(c.out, "%-38s %-12s %-40s %s\n", "ACCOUNT ID", "AGE", "SERVICES", "HAS CLIENT") _, _ = fmt.Fprintln(c.out, strings.Repeat("-", 110)) for _, item := range clients { @@ -166,7 +166,7 @@ func (c *Client) printClientRow(item any) { return } - domains := c.extractDomains(client) + services := c.extractServiceKeys(client) hasClient := "no" if hc, ok := client["has_client"].(bool); ok && hc { hasClient = "yes" @@ -175,20 +175,20 @@ func (c *Client) printClientRow(item any) { _, _ = fmt.Fprintf(c.out, "%-38s %-12v %s %s\n", client["account_id"], client["age"], - domains, + services, hasClient, ) } -func (c *Client) extractDomains(client map[string]any) string { - d, ok := client["domains"].([]any) +func (c *Client) extractServiceKeys(client map[string]any) string { + d, ok := client["service_keys"].([]any) if !ok || len(d) == 0 { return "-" } parts := make([]string, len(d)) - for i, domain := range d { - parts[i] = fmt.Sprint(domain) + for i, key := range d { + parts[i] = fmt.Sprint(key) } return strings.Join(parts, ", ") } diff --git a/proxy/internal/debug/handler.go b/proxy/internal/debug/handler.go index ab75c8b72..237010922 100644 --- a/proxy/internal/debug/handler.go +++ b/proxy/internal/debug/handler.go @@ -189,7 +189,7 @@ type indexData struct { Version string Uptime string ClientCount int - TotalDomains int + TotalServices int CertsTotal int CertsReady int CertsPending int @@ -202,7 +202,7 @@ type indexData struct { type clientData struct { AccountID string - Domains string + Services string Age string Status string } @@ -211,9 +211,9 @@ func (h *Handler) handleIndex(w http.ResponseWriter, _ *http.Request, wantJSON b clients := h.provider.ListClientsForDebug() sortedIDs := sortedAccountIDs(clients) - totalDomains := 0 + totalServices := 0 for _, info := range clients { - totalDomains += info.DomainCount + totalServices += info.ServiceCount } var certsTotal, certsReady, certsPending, certsFailed int @@ -234,24 +234,24 @@ func (h *Handler) handleIndex(w http.ResponseWriter, _ *http.Request, wantJSON b for _, id := range sortedIDs { info := clients[id] clientsJSON = append(clientsJSON, map[string]interface{}{ - "account_id": info.AccountID, - "domain_count": info.DomainCount, - "domains": info.Domains, - "has_client": info.HasClient, - "created_at": info.CreatedAt, - "age": time.Since(info.CreatedAt).Round(time.Second).String(), + "account_id": info.AccountID, + "service_count": info.ServiceCount, + "service_keys": info.ServiceKeys, + "has_client": info.HasClient, + "created_at": info.CreatedAt, + "age": time.Since(info.CreatedAt).Round(time.Second).String(), }) } resp := map[string]interface{}{ - "version": version.NetbirdVersion(), - "uptime": time.Since(h.startTime).Round(time.Second).String(), - "client_count": len(clients), - "total_domains": totalDomains, - "certs_total": certsTotal, - "certs_ready": certsReady, - "certs_pending": certsPending, - "certs_failed": certsFailed, - "clients": clientsJSON, + "version": version.NetbirdVersion(), + "uptime": time.Since(h.startTime).Round(time.Second).String(), + "client_count": len(clients), + "total_services": totalServices, + "certs_total": certsTotal, + "certs_ready": certsReady, + "certs_pending": certsPending, + "certs_failed": certsFailed, + "clients": clientsJSON, } if len(certsPendingDomains) > 0 { resp["certs_pending_domains"] = certsPendingDomains @@ -278,7 +278,7 @@ func (h *Handler) handleIndex(w http.ResponseWriter, _ *http.Request, wantJSON b Version: version.NetbirdVersion(), Uptime: time.Since(h.startTime).Round(time.Second).String(), ClientCount: len(clients), - TotalDomains: totalDomains, + TotalServices: totalServices, CertsTotal: certsTotal, CertsReady: certsReady, CertsPending: certsPending, @@ -291,9 +291,9 @@ func (h *Handler) handleIndex(w http.ResponseWriter, _ *http.Request, wantJSON b for _, id := range sortedIDs { info := clients[id] - domains := info.Domains.SafeString() - if domains == "" { - domains = "-" + services := strings.Join(info.ServiceKeys, ", ") + if services == "" { + services = "-" } status := "No client" if info.HasClient { @@ -301,7 +301,7 @@ func (h *Handler) handleIndex(w http.ResponseWriter, _ *http.Request, wantJSON b } data.Clients = append(data.Clients, clientData{ AccountID: string(info.AccountID), - Domains: domains, + Services: services, Age: time.Since(info.CreatedAt).Round(time.Second).String(), Status: status, }) @@ -324,12 +324,12 @@ func (h *Handler) handleListClients(w http.ResponseWriter, _ *http.Request, want for _, id := range sortedIDs { info := clients[id] clientsJSON = append(clientsJSON, map[string]interface{}{ - "account_id": info.AccountID, - "domain_count": info.DomainCount, - "domains": info.Domains, - "has_client": info.HasClient, - "created_at": info.CreatedAt, - "age": time.Since(info.CreatedAt).Round(time.Second).String(), + "account_id": info.AccountID, + "service_count": info.ServiceCount, + "service_keys": info.ServiceKeys, + "has_client": info.HasClient, + "created_at": info.CreatedAt, + "age": time.Since(info.CreatedAt).Round(time.Second).String(), }) } h.writeJSON(w, map[string]interface{}{ @@ -347,9 +347,9 @@ func (h *Handler) handleListClients(w http.ResponseWriter, _ *http.Request, want for _, id := range sortedIDs { info := clients[id] - domains := info.Domains.SafeString() - if domains == "" { - domains = "-" + services := strings.Join(info.ServiceKeys, ", ") + if services == "" { + services = "-" } status := "No client" if info.HasClient { @@ -357,7 +357,7 @@ func (h *Handler) handleListClients(w http.ResponseWriter, _ *http.Request, want } data.Clients = append(data.Clients, clientData{ AccountID: string(info.AccountID), - Domains: domains, + Services: services, Age: time.Since(info.CreatedAt).Round(time.Second).String(), Status: status, }) diff --git a/proxy/internal/debug/templates/clients.html b/proxy/internal/debug/templates/clients.html index 4d455b2bb..bfc25f95a 100644 --- a/proxy/internal/debug/templates/clients.html +++ b/proxy/internal/debug/templates/clients.html @@ -12,14 +12,14 @@ - + {{range .Clients}} - + diff --git a/proxy/internal/debug/templates/index.html b/proxy/internal/debug/templates/index.html index 16ab3d979..5bd25adfc 100644 --- a/proxy/internal/debug/templates/index.html +++ b/proxy/internal/debug/templates/index.html @@ -27,19 +27,19 @@
      {{range .CertsFailedDomains}}
    • {{.Domain}}: {{.Error}}
    • {{end}}
    {{end}} -

    Clients ({{.ClientCount}}) | Domains ({{.TotalDomains}})

    +

    Clients ({{.ClientCount}}) | Services ({{.TotalServices}})

    {{if .Clients}}
    Account IDDomainsServices Age Status
    {{.AccountID}}{{.Domains}}{{.Services}} {{.Age}} {{.Status}}
    - + {{range .Clients}} - + diff --git a/proxy/internal/metrics/l4_metrics_test.go b/proxy/internal/metrics/l4_metrics_test.go new file mode 100644 index 000000000..055158828 --- /dev/null +++ b/proxy/internal/metrics/l4_metrics_test.go @@ -0,0 +1,69 @@ +package metrics_test + +import ( + "context" + "reflect" + "testing" + "time" + + promexporter "go.opentelemetry.io/otel/exporters/prometheus" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + + "github.com/netbirdio/netbird/proxy/internal/metrics" + "github.com/netbirdio/netbird/proxy/internal/types" +) + +func newTestMetrics(t *testing.T) *metrics.Metrics { + t.Helper() + + exporter, err := promexporter.New() + if err != nil { + t.Fatalf("create prometheus exporter: %v", err) + } + + provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(exporter)) + pkg := reflect.TypeOf(metrics.Metrics{}).PkgPath() + meter := provider.Meter(pkg) + + m, err := metrics.New(context.Background(), meter) + if err != nil { + t.Fatalf("create metrics: %v", err) + } + return m +} + +func TestL4ServiceGauge(t *testing.T) { + m := newTestMetrics(t) + + m.L4ServiceAdded(types.ServiceModeTCP) + m.L4ServiceAdded(types.ServiceModeTCP) + m.L4ServiceAdded(types.ServiceModeUDP) + m.L4ServiceRemoved(types.ServiceModeTCP) +} + +func TestTCPRelayMetrics(t *testing.T) { + m := newTestMetrics(t) + + acct := types.AccountID("acct-1") + + m.TCPRelayStarted(acct) + m.TCPRelayStarted(acct) + m.TCPRelayEnded(acct, 10*time.Second, 1000, 500) + m.TCPRelayDialError(acct) + m.TCPRelayRejected(acct) +} + +func TestUDPSessionMetrics(t *testing.T) { + m := newTestMetrics(t) + + acct := types.AccountID("acct-2") + + m.UDPSessionStarted(acct) + m.UDPSessionStarted(acct) + m.UDPSessionEnded(acct) + m.UDPSessionDialError(acct) + m.UDPSessionRejected(acct) + m.UDPPacketRelayed(types.RelayDirectionClientToBackend, 100) + m.UDPPacketRelayed(types.RelayDirectionClientToBackend, 200) + m.UDPPacketRelayed(types.RelayDirectionBackendToClient, 150) +} diff --git a/proxy/internal/metrics/metrics.go b/proxy/internal/metrics/metrics.go index 68ff55fe5..573485625 100644 --- a/proxy/internal/metrics/metrics.go +++ b/proxy/internal/metrics/metrics.go @@ -6,12 +6,15 @@ import ( "sync" "time" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "github.com/netbirdio/netbird/proxy/internal/proxy" "github.com/netbirdio/netbird/proxy/internal/responsewriter" + "github.com/netbirdio/netbird/proxy/internal/types" ) +// Metrics collects OpenTelemetry metrics for the proxy. type Metrics struct { ctx context.Context requestsTotal metric.Int64Counter @@ -22,85 +25,188 @@ type Metrics struct { backendDuration metric.Int64Histogram certificateIssueDuration metric.Int64Histogram + // L4 service-level metrics. + l4Services metric.Int64UpDownCounter + + // L4 TCP connection-level metrics. + tcpActiveConns metric.Int64UpDownCounter + tcpConnsTotal metric.Int64Counter + tcpConnDuration metric.Int64Histogram + tcpBytesTotal metric.Int64Counter + + // L4 UDP session-level metrics. + udpActiveSess metric.Int64UpDownCounter + udpSessionsTotal metric.Int64Counter + udpPacketsTotal metric.Int64Counter + udpBytesTotal metric.Int64Counter + mappingsMux sync.Mutex mappingPaths map[string]int } +// New creates a Metrics instance using the given OpenTelemetry meter. func New(ctx context.Context, meter metric.Meter) (*Metrics, error) { - requestsTotal, err := meter.Int64Counter( + m := &Metrics{ + ctx: ctx, + mappingPaths: make(map[string]int), + } + + if err := m.initHTTPMetrics(meter); err != nil { + return nil, err + } + if err := m.initL4Metrics(meter); err != nil { + return nil, err + } + + return m, nil +} + +func (m *Metrics) initHTTPMetrics(meter metric.Meter) error { + var err error + + m.requestsTotal, err = meter.Int64Counter( "proxy.http.request.counter", metric.WithUnit("1"), metric.WithDescription("Total number of requests made to the netbird proxy"), ) if err != nil { - return nil, err + return err } - activeRequests, err := meter.Int64UpDownCounter( + m.activeRequests, err = meter.Int64UpDownCounter( "proxy.http.active_requests", metric.WithUnit("1"), metric.WithDescription("Current in-flight requests handled by the netbird proxy"), ) if err != nil { - return nil, err + return err } - configuredDomains, err := meter.Int64UpDownCounter( + m.configuredDomains, err = meter.Int64UpDownCounter( "proxy.domains.count", metric.WithUnit("1"), metric.WithDescription("Current number of domains configured on the netbird proxy"), ) if err != nil { - return nil, err + return err } - totalPaths, err := meter.Int64UpDownCounter( + m.totalPaths, err = meter.Int64UpDownCounter( "proxy.paths.count", metric.WithUnit("1"), metric.WithDescription("Total number of paths configured on the netbird proxy"), ) if err != nil { - return nil, err + return err } - requestDuration, err := meter.Int64Histogram( + m.requestDuration, err = meter.Int64Histogram( "proxy.http.request.duration.ms", metric.WithUnit("milliseconds"), metric.WithDescription("Duration of requests made to the netbird proxy"), ) if err != nil { - return nil, err + return err } - backendDuration, err := meter.Int64Histogram( + m.backendDuration, err = meter.Int64Histogram( "proxy.backend.duration.ms", metric.WithUnit("milliseconds"), metric.WithDescription("Duration of peer round trip time from the netbird proxy"), ) if err != nil { - return nil, err + return err } - certificateIssueDuration, err := meter.Int64Histogram( + m.certificateIssueDuration, err = meter.Int64Histogram( "proxy.certificate.issue.duration.ms", metric.WithUnit("milliseconds"), metric.WithDescription("Duration of ACME certificate issuance"), ) + return err +} + +func (m *Metrics) initL4Metrics(meter metric.Meter) error { + var err error + + m.l4Services, err = meter.Int64UpDownCounter( + "proxy.l4.services.count", + metric.WithUnit("1"), + metric.WithDescription("Current number of configured L4 services (TCP/TLS/UDP) by mode"), + ) if err != nil { - return nil, err + return err } - return &Metrics{ - ctx: ctx, - requestsTotal: requestsTotal, - activeRequests: activeRequests, - configuredDomains: configuredDomains, - totalPaths: totalPaths, - requestDuration: requestDuration, - backendDuration: backendDuration, - certificateIssueDuration: certificateIssueDuration, - mappingPaths: make(map[string]int), - }, nil + m.tcpActiveConns, err = meter.Int64UpDownCounter( + "proxy.tcp.active_connections", + metric.WithUnit("1"), + metric.WithDescription("Current number of active TCP/TLS relay connections"), + ) + if err != nil { + return err + } + + m.tcpConnsTotal, err = meter.Int64Counter( + "proxy.tcp.connections.total", + metric.WithUnit("1"), + metric.WithDescription("Total TCP/TLS relay connections by result and account"), + ) + if err != nil { + return err + } + + m.tcpConnDuration, err = meter.Int64Histogram( + "proxy.tcp.connection.duration.ms", + metric.WithUnit("milliseconds"), + metric.WithDescription("Duration of TCP/TLS relay connections"), + ) + if err != nil { + return err + } + + m.tcpBytesTotal, err = meter.Int64Counter( + "proxy.tcp.bytes.total", + metric.WithUnit("bytes"), + metric.WithDescription("Total bytes transferred through TCP/TLS relay by direction"), + ) + if err != nil { + return err + } + + m.udpActiveSess, err = meter.Int64UpDownCounter( + "proxy.udp.active_sessions", + metric.WithUnit("1"), + metric.WithDescription("Current number of active UDP relay sessions"), + ) + if err != nil { + return err + } + + m.udpSessionsTotal, err = meter.Int64Counter( + "proxy.udp.sessions.total", + metric.WithUnit("1"), + metric.WithDescription("Total UDP relay sessions by result and account"), + ) + if err != nil { + return err + } + + m.udpPacketsTotal, err = meter.Int64Counter( + "proxy.udp.packets.total", + metric.WithUnit("1"), + metric.WithDescription("Total UDP packets relayed by direction"), + ) + if err != nil { + return err + } + + m.udpBytesTotal, err = meter.Int64Counter( + "proxy.udp.bytes.total", + metric.WithUnit("bytes"), + metric.WithDescription("Total bytes transferred through UDP relay by direction"), + ) + return err } type responseInterceptor struct { @@ -120,6 +226,13 @@ func (w *responseInterceptor) Write(b []byte) (int, error) { return size, err } +// Unwrap returns the underlying ResponseWriter so http.ResponseController +// can reach through to the original writer for Hijack/Flush operations. +func (w *responseInterceptor) Unwrap() http.ResponseWriter { + return w.PassthroughWriter +} + +// Middleware wraps an HTTP handler with request metrics. func (m *Metrics) Middleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { m.requestsTotal.Add(m.ctx, 1) @@ -144,6 +257,7 @@ func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } +// RoundTripper wraps an http.RoundTripper with backend duration metrics. func (m *Metrics) RoundTripper(next http.RoundTripper) http.RoundTripper { return roundTripperFunc(func(req *http.Request) (*http.Response, error) { start := time.Now() @@ -156,6 +270,7 @@ func (m *Metrics) RoundTripper(next http.RoundTripper) http.RoundTripper { }) } +// AddMapping records that a domain mapping was added. func (m *Metrics) AddMapping(mapping proxy.Mapping) { m.mappingsMux.Lock() defer m.mappingsMux.Unlock() @@ -175,13 +290,13 @@ func (m *Metrics) AddMapping(mapping proxy.Mapping) { m.mappingPaths[mapping.Host] = newPathCount } +// RemoveMapping records that a domain mapping was removed. func (m *Metrics) RemoveMapping(mapping proxy.Mapping) { m.mappingsMux.Lock() defer m.mappingsMux.Unlock() oldPathCount, exists := m.mappingPaths[mapping.Host] if !exists { - // Nothing to remove return } @@ -195,3 +310,80 @@ func (m *Metrics) RemoveMapping(mapping proxy.Mapping) { func (m *Metrics) RecordCertificateIssuance(duration time.Duration) { m.certificateIssueDuration.Record(m.ctx, duration.Milliseconds()) } + +// L4ServiceAdded increments the L4 service gauge for the given mode. +func (m *Metrics) L4ServiceAdded(mode types.ServiceMode) { + m.l4Services.Add(m.ctx, 1, metric.WithAttributes(attribute.String("mode", string(mode)))) +} + +// L4ServiceRemoved decrements the L4 service gauge for the given mode. +func (m *Metrics) L4ServiceRemoved(mode types.ServiceMode) { + m.l4Services.Add(m.ctx, -1, metric.WithAttributes(attribute.String("mode", string(mode)))) +} + +// TCPRelayStarted records a new TCP relay connection starting. +func (m *Metrics) TCPRelayStarted(accountID types.AccountID) { + acct := attribute.String("account_id", string(accountID)) + m.tcpActiveConns.Add(m.ctx, 1, metric.WithAttributes(acct)) + m.tcpConnsTotal.Add(m.ctx, 1, metric.WithAttributes(acct, attribute.String("result", "success"))) +} + +// TCPRelayEnded records a TCP relay connection ending and accumulates bytes and duration. +func (m *Metrics) TCPRelayEnded(accountID types.AccountID, duration time.Duration, srcToDst, dstToSrc int64) { + acct := attribute.String("account_id", string(accountID)) + m.tcpActiveConns.Add(m.ctx, -1, metric.WithAttributes(acct)) + m.tcpConnDuration.Record(m.ctx, duration.Milliseconds(), metric.WithAttributes(acct)) + m.tcpBytesTotal.Add(m.ctx, srcToDst, metric.WithAttributes(attribute.String("direction", "client_to_backend"))) + m.tcpBytesTotal.Add(m.ctx, dstToSrc, metric.WithAttributes(attribute.String("direction", "backend_to_client"))) +} + +// TCPRelayDialError records a dial failure for a TCP relay. +func (m *Metrics) TCPRelayDialError(accountID types.AccountID) { + m.tcpConnsTotal.Add(m.ctx, 1, metric.WithAttributes( + attribute.String("account_id", string(accountID)), + attribute.String("result", "dial_error"), + )) +} + +// TCPRelayRejected records a rejected TCP relay (semaphore full). +func (m *Metrics) TCPRelayRejected(accountID types.AccountID) { + m.tcpConnsTotal.Add(m.ctx, 1, metric.WithAttributes( + attribute.String("account_id", string(accountID)), + attribute.String("result", "rejected"), + )) +} + +// UDPSessionStarted records a new UDP session starting. +func (m *Metrics) UDPSessionStarted(accountID types.AccountID) { + acct := attribute.String("account_id", string(accountID)) + m.udpActiveSess.Add(m.ctx, 1, metric.WithAttributes(acct)) + m.udpSessionsTotal.Add(m.ctx, 1, metric.WithAttributes(acct, attribute.String("result", "success"))) +} + +// UDPSessionEnded records a UDP session ending. +func (m *Metrics) UDPSessionEnded(accountID types.AccountID) { + m.udpActiveSess.Add(m.ctx, -1, metric.WithAttributes(attribute.String("account_id", string(accountID)))) +} + +// UDPSessionDialError records a dial failure for a UDP session. +func (m *Metrics) UDPSessionDialError(accountID types.AccountID) { + m.udpSessionsTotal.Add(m.ctx, 1, metric.WithAttributes( + attribute.String("account_id", string(accountID)), + attribute.String("result", "dial_error"), + )) +} + +// UDPSessionRejected records a rejected UDP session (limit or rate limited). +func (m *Metrics) UDPSessionRejected(accountID types.AccountID) { + m.udpSessionsTotal.Add(m.ctx, 1, metric.WithAttributes( + attribute.String("account_id", string(accountID)), + attribute.String("result", "rejected"), + )) +} + +// UDPPacketRelayed records a packet relayed in the given direction with its size in bytes. +func (m *Metrics) UDPPacketRelayed(direction types.RelayDirection, bytes int) { + dir := attribute.String("direction", string(direction)) + m.udpPacketsTotal.Add(m.ctx, 1, metric.WithAttributes(dir)) + m.udpBytesTotal.Add(m.ctx, int64(bytes), metric.WithAttributes(dir)) +} diff --git a/proxy/internal/netutil/errors.go b/proxy/internal/netutil/errors.go new file mode 100644 index 000000000..ff24e33d4 --- /dev/null +++ b/proxy/internal/netutil/errors.go @@ -0,0 +1,40 @@ +package netutil + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "syscall" +) + +// ValidatePort converts an int32 proto port to uint16, returning an error +// if the value is out of the valid 1–65535 range. +func ValidatePort(port int32) (uint16, error) { + if port <= 0 || port > math.MaxUint16 { + return 0, fmt.Errorf("invalid port %d: must be 1–65535", port) + } + return uint16(port), nil +} + +// IsExpectedError returns true for errors that are normal during +// connection teardown and should not be logged as warnings. +func IsExpectedError(err error) bool { + return errors.Is(err, net.ErrClosed) || + errors.Is(err, context.Canceled) || + errors.Is(err, io.EOF) || + errors.Is(err, syscall.ECONNRESET) || + errors.Is(err, syscall.EPIPE) || + errors.Is(err, syscall.ECONNABORTED) +} + +// IsTimeout checks whether the error is a network timeout. +func IsTimeout(err error) bool { + var netErr net.Error + if errors.As(err, &netErr) { + return netErr.Timeout() + } + return false +} diff --git a/proxy/internal/netutil/errors_test.go b/proxy/internal/netutil/errors_test.go new file mode 100644 index 000000000..7d6be10ff --- /dev/null +++ b/proxy/internal/netutil/errors_test.go @@ -0,0 +1,92 @@ +package netutil + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "syscall" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidatePort(t *testing.T) { + tests := []struct { + name string + port int32 + want uint16 + wantErr bool + }{ + {"valid min", 1, 1, false}, + {"valid mid", 8080, 8080, false}, + {"valid max", 65535, 65535, false}, + {"zero", 0, 0, true}, + {"negative", -1, 0, true}, + {"too large", 65536, 0, true}, + {"way too large", 100000, 0, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ValidatePort(tt.port) + if tt.wantErr { + assert.Error(t, err) + assert.Zero(t, got) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + } + }) + } +} + +func TestIsExpectedError(t *testing.T) { + tests := []struct { + name string + err error + want bool + }{ + {"net.ErrClosed", net.ErrClosed, true}, + {"context.Canceled", context.Canceled, true}, + {"io.EOF", io.EOF, true}, + {"ECONNRESET", syscall.ECONNRESET, true}, + {"EPIPE", syscall.EPIPE, true}, + {"ECONNABORTED", syscall.ECONNABORTED, true}, + {"wrapped expected", fmt.Errorf("wrap: %w", net.ErrClosed), true}, + {"unexpected EOF", io.ErrUnexpectedEOF, false}, + {"generic error", errors.New("something"), false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, IsExpectedError(tt.err)) + }) + } +} + +type timeoutErr struct{ timeout bool } + +func (e *timeoutErr) Error() string { return "timeout" } +func (e *timeoutErr) Timeout() bool { return e.timeout } +func (e *timeoutErr) Temporary() bool { return false } + +func TestIsTimeout(t *testing.T) { + tests := []struct { + name string + err error + want bool + }{ + {"net timeout", &timeoutErr{timeout: true}, true}, + {"net non-timeout", &timeoutErr{timeout: false}, false}, + {"wrapped timeout", fmt.Errorf("wrap: %w", &timeoutErr{timeout: true}), true}, + {"generic error", errors.New("not a timeout"), false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, IsTimeout(tt.err)) + }) + } +} diff --git a/proxy/internal/proxy/context.go b/proxy/internal/proxy/context.go index 22ebbf371..4a61f6bcf 100644 --- a/proxy/internal/proxy/context.go +++ b/proxy/internal/proxy/context.go @@ -2,6 +2,7 @@ package proxy import ( "context" + "net/netip" "sync" "github.com/netbirdio/netbird/proxy/internal/types" @@ -47,10 +48,10 @@ func (o ResponseOrigin) String() string { type CapturedData struct { mu sync.RWMutex RequestID string - ServiceId string + ServiceId types.ServiceID AccountId types.AccountID Origin ResponseOrigin - ClientIP string + ClientIP netip.Addr UserID string AuthMethod string } @@ -63,14 +64,14 @@ func (c *CapturedData) GetRequestID() string { } // SetServiceId safely sets the service ID -func (c *CapturedData) SetServiceId(serviceId string) { +func (c *CapturedData) SetServiceId(serviceId types.ServiceID) { c.mu.Lock() defer c.mu.Unlock() c.ServiceId = serviceId } // GetServiceId safely gets the service ID -func (c *CapturedData) GetServiceId() string { +func (c *CapturedData) GetServiceId() types.ServiceID { c.mu.RLock() defer c.mu.RUnlock() return c.ServiceId @@ -105,14 +106,14 @@ func (c *CapturedData) GetOrigin() ResponseOrigin { } // SetClientIP safely sets the resolved client IP. -func (c *CapturedData) SetClientIP(ip string) { +func (c *CapturedData) SetClientIP(ip netip.Addr) { c.mu.Lock() defer c.mu.Unlock() c.ClientIP = ip } // GetClientIP safely gets the resolved client IP. -func (c *CapturedData) GetClientIP() string { +func (c *CapturedData) GetClientIP() netip.Addr { c.mu.RLock() defer c.mu.RUnlock() return c.ClientIP @@ -161,13 +162,13 @@ func CapturedDataFromContext(ctx context.Context) *CapturedData { return data } -func withServiceId(ctx context.Context, serviceId string) context.Context { +func withServiceId(ctx context.Context, serviceId types.ServiceID) context.Context { return context.WithValue(ctx, serviceIdKey, serviceId) } -func ServiceIdFromContext(ctx context.Context) string { +func ServiceIdFromContext(ctx context.Context) types.ServiceID { v := ctx.Value(serviceIdKey) - serviceId, ok := v.(string) + serviceId, ok := v.(types.ServiceID) if !ok { return "" } diff --git a/proxy/internal/proxy/proxy_bench_test.go b/proxy/internal/proxy/proxy_bench_test.go index 5af2167e6..b59ef75c0 100644 --- a/proxy/internal/proxy/proxy_bench_test.go +++ b/proxy/internal/proxy/proxy_bench_test.go @@ -25,7 +25,7 @@ func (nopTransport) RoundTrip(*http.Request) (*http.Response, error) { func BenchmarkServeHTTP(b *testing.B) { rp := proxy.NewReverseProxy(nopTransport{}, "http", nil, nil) rp.AddMapping(proxy.Mapping{ - ID: rand.Text(), + ID: types.ServiceID(rand.Text()), AccountID: types.AccountID(rand.Text()), Host: "app.example.com", Paths: map[string]*proxy.PathTarget{ @@ -66,7 +66,7 @@ func BenchmarkServeHTTPHostCount(b *testing.B) { target = id } rp.AddMapping(proxy.Mapping{ - ID: id, + ID: types.ServiceID(id), AccountID: types.AccountID(rand.Text()), Host: host, Paths: map[string]*proxy.PathTarget{ @@ -118,7 +118,7 @@ func BenchmarkServeHTTPPathCount(b *testing.B) { } } rp.AddMapping(proxy.Mapping{ - ID: rand.Text(), + ID: types.ServiceID(rand.Text()), AccountID: types.AccountID(rand.Text()), Host: "app.example.com", Paths: paths, diff --git a/proxy/internal/proxy/reverseproxy.go b/proxy/internal/proxy/reverseproxy.go index b0001d5b9..1ee9b2a42 100644 --- a/proxy/internal/proxy/reverseproxy.go +++ b/proxy/internal/proxy/reverseproxy.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/proxy/auth" "github.com/netbirdio/netbird/proxy/internal/roundtrip" + "github.com/netbirdio/netbird/proxy/internal/types" "github.com/netbirdio/netbird/proxy/web" ) @@ -86,9 +87,7 @@ func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { ctx = roundtrip.WithSkipTLSVerify(ctx) } if pt.RequestTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, pt.RequestTimeout) - defer cancel() + ctx = types.WithDialTimeout(ctx, pt.RequestTimeout) } rewriteMatchedPath := result.matchedPath @@ -142,9 +141,9 @@ func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHost r.Out.Header.Set(k, v) } - clientIP := extractClientIP(r.In.RemoteAddr) + clientIP := extractHostIP(r.In.RemoteAddr) - if IsTrustedProxy(clientIP, p.trustedProxies) { + if isTrustedAddr(clientIP, p.trustedProxies) { p.setTrustedForwardingHeaders(r, clientIP) } else { p.setUntrustedForwardingHeaders(r, clientIP) @@ -214,12 +213,14 @@ func normalizeHost(u *url.URL) string { // setTrustedForwardingHeaders appends to the existing forwarding header chain // and preserves upstream-provided headers when the direct connection is from // a trusted proxy. -func (p *ReverseProxy) setTrustedForwardingHeaders(r *httputil.ProxyRequest, clientIP string) { +func (p *ReverseProxy) setTrustedForwardingHeaders(r *httputil.ProxyRequest, clientIP netip.Addr) { + ipStr := clientIP.String() + // Append the direct connection IP to the existing X-Forwarded-For chain. if existing := r.In.Header.Get("X-Forwarded-For"); existing != "" { - r.Out.Header.Set("X-Forwarded-For", existing+", "+clientIP) + r.Out.Header.Set("X-Forwarded-For", existing+", "+ipStr) } else { - r.Out.Header.Set("X-Forwarded-For", clientIP) + r.Out.Header.Set("X-Forwarded-For", ipStr) } // Preserve upstream X-Real-IP if present; otherwise resolve through the chain. @@ -227,7 +228,7 @@ func (p *ReverseProxy) setTrustedForwardingHeaders(r *httputil.ProxyRequest, cli r.Out.Header.Set("X-Real-IP", realIP) } else { resolved := ResolveClientIP(r.In.RemoteAddr, r.In.Header.Get("X-Forwarded-For"), p.trustedProxies) - r.Out.Header.Set("X-Real-IP", resolved) + r.Out.Header.Set("X-Real-IP", resolved.String()) } // Preserve upstream X-Forwarded-Host if present. @@ -257,10 +258,11 @@ func (p *ReverseProxy) setTrustedForwardingHeaders(r *httputil.ProxyRequest, cli // sets them fresh based on the direct connection. This is the default // behavior when no trusted proxies are configured or the direct connection // is from an untrusted source. -func (p *ReverseProxy) setUntrustedForwardingHeaders(r *httputil.ProxyRequest, clientIP string) { +func (p *ReverseProxy) setUntrustedForwardingHeaders(r *httputil.ProxyRequest, clientIP netip.Addr) { + ipStr := clientIP.String() proto := auth.ResolveProto(p.forwardedProto, r.In.TLS) - r.Out.Header.Set("X-Forwarded-For", clientIP) - r.Out.Header.Set("X-Real-IP", clientIP) + r.Out.Header.Set("X-Forwarded-For", ipStr) + r.Out.Header.Set("X-Real-IP", ipStr) r.Out.Header.Set("X-Forwarded-Host", r.In.Host) r.Out.Header.Set("X-Forwarded-Proto", proto) r.Out.Header.Set("X-Forwarded-Port", extractForwardedPort(r.In.Host, proto)) @@ -288,16 +290,6 @@ func stripSessionTokenQuery(r *httputil.ProxyRequest) { } } -// extractClientIP extracts the IP address from an http.Request.RemoteAddr -// which is always in host:port format. -func extractClientIP(remoteAddr string) string { - ip, _, err := net.SplitHostPort(remoteAddr) - if err != nil { - return remoteAddr - } - return ip -} - // extractForwardedPort returns the port from the Host header if present, // otherwise defaults to the standard port for the resolved protocol. func extractForwardedPort(host, resolvedProto string) string { @@ -327,10 +319,12 @@ func proxyErrorHandler(w http.ResponseWriter, r *http.Request, err error) { web.ServeErrorPage(w, r, code, title, message, requestID, status) } -// getClientIP retrieves the resolved client IP from context. +// getClientIP retrieves the resolved client IP string from context. func getClientIP(r *http.Request) string { if capturedData := CapturedDataFromContext(r.Context()); capturedData != nil { - return capturedData.GetClientIP() + if ip := capturedData.GetClientIP(); ip.IsValid() { + return ip.String() + } } return "" } diff --git a/proxy/internal/proxy/reverseproxy_test.go b/proxy/internal/proxy/reverseproxy_test.go index be2fb9105..b05ead198 100644 --- a/proxy/internal/proxy/reverseproxy_test.go +++ b/proxy/internal/proxy/reverseproxy_test.go @@ -284,23 +284,23 @@ func TestRewriteFunc_URLRewriting(t *testing.T) { }) } -func TestExtractClientIP(t *testing.T) { +func TestExtractHostIP(t *testing.T) { tests := []struct { name string remoteAddr string - expected string + expected netip.Addr }{ - {"IPv4 with port", "192.168.1.1:12345", "192.168.1.1"}, - {"IPv6 with port", "[::1]:12345", "::1"}, - {"IPv6 full with port", "[2001:db8::1]:443", "2001:db8::1"}, - {"IPv4 without port fallback", "192.168.1.1", "192.168.1.1"}, - {"IPv6 without brackets fallback", "::1", "::1"}, - {"empty string fallback", "", ""}, - {"public IP", "203.0.113.50:9999", "203.0.113.50"}, + {"IPv4 with port", "192.168.1.1:12345", netip.MustParseAddr("192.168.1.1")}, + {"IPv6 with port", "[::1]:12345", netip.MustParseAddr("::1")}, + {"IPv6 full with port", "[2001:db8::1]:443", netip.MustParseAddr("2001:db8::1")}, + {"IPv4 without port fallback", "192.168.1.1", netip.MustParseAddr("192.168.1.1")}, + {"IPv6 without brackets fallback", "::1", netip.MustParseAddr("::1")}, + {"empty string fallback", "", netip.Addr{}}, + {"public IP", "203.0.113.50:9999", netip.MustParseAddr("203.0.113.50")}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, extractClientIP(tt.remoteAddr)) + assert.Equal(t, tt.expected, extractHostIP(tt.remoteAddr)) }) } } diff --git a/proxy/internal/proxy/servicemapping.go b/proxy/internal/proxy/servicemapping.go index 58b92ff9e..1513fbe45 100644 --- a/proxy/internal/proxy/servicemapping.go +++ b/proxy/internal/proxy/servicemapping.go @@ -30,8 +30,9 @@ type PathTarget struct { CustomHeaders map[string]string } +// Mapping describes how a domain is routed by the HTTP reverse proxy. type Mapping struct { - ID string + ID types.ServiceID AccountID types.AccountID Host string Paths map[string]*PathTarget @@ -42,7 +43,7 @@ type Mapping struct { type targetResult struct { target *PathTarget matchedPath string - serviceID string + serviceID types.ServiceID accountID types.AccountID passHostHeader bool rewriteRedirects bool @@ -101,8 +102,13 @@ func (p *ReverseProxy) AddMapping(m Mapping) { p.mappings[m.Host] = m } -func (p *ReverseProxy) RemoveMapping(m Mapping) { +// RemoveMapping removes the mapping for the given host and reports whether it existed. +func (p *ReverseProxy) RemoveMapping(m Mapping) bool { p.mappingsMux.Lock() defer p.mappingsMux.Unlock() + if _, ok := p.mappings[m.Host]; !ok { + return false + } delete(p.mappings, m.Host) + return true } diff --git a/proxy/internal/proxy/trustedproxy.go b/proxy/internal/proxy/trustedproxy.go index ad9a5b6c0..0fe693f90 100644 --- a/proxy/internal/proxy/trustedproxy.go +++ b/proxy/internal/proxy/trustedproxy.go @@ -7,21 +7,11 @@ import ( // IsTrustedProxy checks if the given IP string falls within any of the trusted prefixes. func IsTrustedProxy(ipStr string, trusted []netip.Prefix) bool { - if len(trusted) == 0 { - return false - } - addr, err := netip.ParseAddr(ipStr) - if err != nil { + if err != nil || len(trusted) == 0 { return false } - - for _, prefix := range trusted { - if prefix.Contains(addr) { - return true - } - } - return false + return isTrustedAddr(addr.Unmap(), trusted) } // ResolveClientIP extracts the real client IP from X-Forwarded-For using the trusted proxy list. @@ -30,10 +20,10 @@ func IsTrustedProxy(ipStr string, trusted []netip.Prefix) bool { // // If the trusted list is empty or remoteAddr is not trusted, it returns the // remoteAddr IP directly (ignoring any forwarding headers). -func ResolveClientIP(remoteAddr, xff string, trusted []netip.Prefix) string { - remoteIP := extractClientIP(remoteAddr) +func ResolveClientIP(remoteAddr, xff string, trusted []netip.Prefix) netip.Addr { + remoteIP := extractHostIP(remoteAddr) - if len(trusted) == 0 || !IsTrustedProxy(remoteIP, trusted) { + if len(trusted) == 0 || !isTrustedAddr(remoteIP, trusted) { return remoteIP } @@ -47,14 +37,45 @@ func ResolveClientIP(remoteAddr, xff string, trusted []netip.Prefix) string { if ip == "" { continue } - if !IsTrustedProxy(ip, trusted) { - return ip + addr, err := netip.ParseAddr(ip) + if err != nil { + continue + } + addr = addr.Unmap() + if !isTrustedAddr(addr, trusted) { + return addr } } // All IPs in XFF are trusted; return the leftmost as best guess. if first := strings.TrimSpace(parts[0]); first != "" { - return first + if addr, err := netip.ParseAddr(first); err == nil { + return addr.Unmap() + } } return remoteIP } + +// extractHostIP parses the IP from a host:port string and returns it unmapped. +func extractHostIP(hostPort string) netip.Addr { + if ap, err := netip.ParseAddrPort(hostPort); err == nil { + return ap.Addr().Unmap() + } + if addr, err := netip.ParseAddr(hostPort); err == nil { + return addr.Unmap() + } + return netip.Addr{} +} + +// isTrustedAddr checks if the given address falls within any of the trusted prefixes. +func isTrustedAddr(addr netip.Addr, trusted []netip.Prefix) bool { + if !addr.IsValid() { + return false + } + for _, prefix := range trusted { + if prefix.Contains(addr) { + return true + } + } + return false +} diff --git a/proxy/internal/proxy/trustedproxy_test.go b/proxy/internal/proxy/trustedproxy_test.go index 827b7babf..35ed1f5c2 100644 --- a/proxy/internal/proxy/trustedproxy_test.go +++ b/proxy/internal/proxy/trustedproxy_test.go @@ -48,77 +48,77 @@ func TestResolveClientIP(t *testing.T) { remoteAddr string xff string trusted []netip.Prefix - want string + want netip.Addr }{ { name: "empty trusted list returns RemoteAddr", remoteAddr: "203.0.113.50:9999", xff: "1.2.3.4", trusted: nil, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, { name: "untrusted RemoteAddr ignores XFF", remoteAddr: "203.0.113.50:9999", xff: "1.2.3.4, 10.0.0.1", trusted: trusted, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, { name: "trusted RemoteAddr with single client in XFF", remoteAddr: "10.0.0.1:5000", xff: "203.0.113.50", trusted: trusted, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, { name: "trusted RemoteAddr walks past trusted entries in XFF", remoteAddr: "10.0.0.1:5000", xff: "203.0.113.50, 10.0.0.2, 172.16.0.5", trusted: trusted, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, { name: "trusted RemoteAddr with empty XFF falls back to RemoteAddr", remoteAddr: "10.0.0.1:5000", xff: "", trusted: trusted, - want: "10.0.0.1", + want: netip.MustParseAddr("10.0.0.1"), }, { name: "all XFF IPs trusted returns leftmost", remoteAddr: "10.0.0.1:5000", xff: "10.0.0.2, 172.16.0.1, 10.0.0.3", trusted: trusted, - want: "10.0.0.2", + want: netip.MustParseAddr("10.0.0.2"), }, { name: "XFF with whitespace", remoteAddr: "10.0.0.1:5000", xff: " 203.0.113.50 , 10.0.0.2 ", trusted: trusted, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, { name: "XFF with empty segments", remoteAddr: "10.0.0.1:5000", xff: "203.0.113.50,,10.0.0.2", trusted: trusted, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, { name: "multi-hop with mixed trust", remoteAddr: "10.0.0.1:5000", xff: "8.8.8.8, 203.0.113.50, 172.16.0.1", trusted: trusted, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, { name: "RemoteAddr without port", remoteAddr: "10.0.0.1", xff: "203.0.113.50", trusted: trusted, - want: "203.0.113.50", + want: netip.MustParseAddr("203.0.113.50"), }, } for _, tt := range tests { diff --git a/proxy/internal/roundtrip/netbird.go b/proxy/internal/roundtrip/netbird.go index 57770f4a5..e38e3dc4e 100644 --- a/proxy/internal/roundtrip/netbird.go +++ b/proxy/internal/roundtrip/netbird.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "errors" "fmt" + "net" "net/http" "sync" "time" @@ -14,11 +15,12 @@ import ( "golang.org/x/exp/maps" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" "github.com/netbirdio/netbird/client/embed" nberrors "github.com/netbirdio/netbird/client/errors" "github.com/netbirdio/netbird/proxy/internal/types" - "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/util" ) @@ -26,7 +28,22 @@ import ( const deviceNamePrefix = "ingress-proxy-" // backendKey identifies a backend by its host:port from the target URL. -type backendKey = string +type backendKey string + +// ServiceKey uniquely identifies a service (HTTP reverse proxy or L4 service) +// that holds a reference to an embedded NetBird client. Callers should use the +// DomainServiceKey and L4ServiceKey constructors to avoid namespace collisions. +type ServiceKey string + +// DomainServiceKey returns a ServiceKey for an HTTP/TLS domain-based service. +func DomainServiceKey(domain string) ServiceKey { + return ServiceKey("domain:" + domain) +} + +// L4ServiceKey returns a ServiceKey for an L4 service (TCP/UDP). +func L4ServiceKey(id types.ServiceID) ServiceKey { + return ServiceKey("l4:" + id) +} var ( // ErrNoAccountID is returned when a request context is missing the account ID. @@ -39,24 +56,24 @@ var ( ErrTooManyInflight = errors.New("too many in-flight requests") ) -// domainInfo holds metadata about a registered domain. -type domainInfo struct { - serviceID string +// serviceInfo holds metadata about a registered service. +type serviceInfo struct { + serviceID types.ServiceID } -type domainNotification struct { - domain domain.Domain - serviceID string +type serviceNotification struct { + key ServiceKey + serviceID types.ServiceID } -// clientEntry holds an embedded NetBird client and tracks which domains use it. +// clientEntry holds an embedded NetBird client and tracks which services use it. type clientEntry struct { client *embed.Client transport *http.Transport // insecureTransport is a clone of transport with TLS verification disabled, // used when per-target skip_tls_verify is set. insecureTransport *http.Transport - domains map[domain.Domain]domainInfo + services map[ServiceKey]serviceInfo createdAt time.Time started bool // Per-backend in-flight limiting keyed by target host:port. @@ -93,12 +110,12 @@ func (e *clientEntry) acquireInflight(backend backendKey) (release func(), ok bo // ClientConfig holds configuration for the embedded NetBird client. type ClientConfig struct { MgmtAddr string - WGPort int + WGPort uint16 PreSharedKey string } type statusNotifier interface { - NotifyStatus(ctx context.Context, accountID, serviceID, domain string, connected bool) error + NotifyStatus(ctx context.Context, accountID types.AccountID, serviceID types.ServiceID, connected bool) error } type managementClient interface { @@ -107,7 +124,7 @@ type managementClient interface { // NetBird provides an http.RoundTripper implementation // backed by underlying NetBird connections. -// Clients are keyed by AccountID, allowing multiple domains to share the same connection. +// Clients are keyed by AccountID, allowing multiple services to share the same connection. type NetBird struct { proxyID string proxyAddr string @@ -124,11 +141,11 @@ type NetBird struct { // ClientDebugInfo contains debug information about a client. type ClientDebugInfo struct { - AccountID types.AccountID - DomainCount int - Domains domain.List - HasClient bool - CreatedAt time.Time + AccountID types.AccountID + ServiceCount int + ServiceKeys []string + HasClient bool + CreatedAt time.Time } // accountIDContextKey is the context key for storing the account ID. @@ -137,37 +154,37 @@ type accountIDContextKey struct{} // skipTLSVerifyContextKey is the context key for requesting insecure TLS. type skipTLSVerifyContextKey struct{} -// AddPeer registers a domain for an account. If the account doesn't have a client yet, +// AddPeer registers a service for an account. If the account doesn't have a client yet, // one is created by authenticating with the management server using the provided token. -// Multiple domains can share the same client. -func (n *NetBird) AddPeer(ctx context.Context, accountID types.AccountID, d domain.Domain, authToken, serviceID string) error { +// Multiple services can share the same client. +func (n *NetBird) AddPeer(ctx context.Context, accountID types.AccountID, key ServiceKey, authToken string, serviceID types.ServiceID) error { + si := serviceInfo{serviceID: serviceID} + n.clientsMux.Lock() entry, exists := n.clients[accountID] if exists { - // Client already exists for this account, just register the domain - entry.domains[d] = domainInfo{serviceID: serviceID} + entry.services[key] = si started := entry.started n.clientsMux.Unlock() n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": d, - }).Debug("registered domain with existing client") + "account_id": accountID, + "service_key": key, + }).Debug("registered service with existing client") - // If client is already started, notify this domain as connected immediately if started && n.statusNotifier != nil { - if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), serviceID, string(d), true); err != nil { + if err := n.statusNotifier.NotifyStatus(ctx, accountID, serviceID, true); err != nil { n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": d, + "account_id": accountID, + "service_key": key, }).WithError(err).Warn("failed to notify status for existing client") } } return nil } - entry, err := n.createClientEntry(ctx, accountID, d, authToken, serviceID) + entry, err := n.createClientEntry(ctx, accountID, key, authToken, si) if err != nil { n.clientsMux.Unlock() return err @@ -177,8 +194,8 @@ func (n *NetBird) AddPeer(ctx context.Context, accountID types.AccountID, d doma n.clientsMux.Unlock() n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": d, + "account_id": accountID, + "service_key": key, }).Info("created new client for account") // Attempt to start the client in the background; if this fails we will @@ -190,7 +207,8 @@ func (n *NetBird) AddPeer(ctx context.Context, accountID types.AccountID, d doma // createClientEntry generates a WireGuard keypair, authenticates with management, // and creates an embedded NetBird client. Must be called with clientsMux held. -func (n *NetBird) createClientEntry(ctx context.Context, accountID types.AccountID, d domain.Domain, authToken, serviceID string) (*clientEntry, error) { +func (n *NetBird) createClientEntry(ctx context.Context, accountID types.AccountID, key ServiceKey, authToken string, si serviceInfo) (*clientEntry, error) { + serviceID := si.serviceID n.logger.WithFields(log.Fields{ "account_id": accountID, "service_id": serviceID, @@ -209,7 +227,7 @@ func (n *NetBird) createClientEntry(ctx context.Context, accountID types.Account }).Debug("authenticating new proxy peer with management") resp, err := n.mgmtClient.CreateProxyPeer(ctx, &proto.CreateProxyPeerRequest{ - ServiceId: serviceID, + ServiceId: string(serviceID), AccountId: string(accountID), Token: authToken, WireguardPublicKey: publicKey.String(), @@ -240,13 +258,14 @@ func (n *NetBird) createClientEntry(ctx context.Context, accountID types.Account // Create embedded NetBird client with the generated private key. // The peer has already been created via CreateProxyPeer RPC with the public key. + wgPort := int(n.clientCfg.WGPort) client, err := embed.New(embed.Options{ DeviceName: deviceNamePrefix + n.proxyID, ManagementURL: n.clientCfg.MgmtAddr, PrivateKey: privateKey.String(), LogLevel: log.WarnLevel.String(), BlockInbound: true, - WireguardPort: &n.clientCfg.WGPort, + WireguardPort: &wgPort, PreSharedKey: n.clientCfg.PreSharedKey, }) if err != nil { @@ -257,7 +276,7 @@ func (n *NetBird) createClientEntry(ctx context.Context, accountID types.Account // the client's HTTPClient to avoid issues with request validation that do // not work with reverse proxied requests. transport := &http.Transport{ - DialContext: client.DialContext, + DialContext: dialWithTimeout(client.DialContext), ForceAttemptHTTP2: true, MaxIdleConns: n.transportCfg.maxIdleConns, MaxIdleConnsPerHost: n.transportCfg.maxIdleConnsPerHost, @@ -276,7 +295,7 @@ func (n *NetBird) createClientEntry(ctx context.Context, accountID types.Account return &clientEntry{ client: client, - domains: map[domain.Domain]domainInfo{d: {serviceID: serviceID}}, + services: map[ServiceKey]serviceInfo{key: si}, transport: transport, insecureTransport: insecureTransport, createdAt: time.Now(), @@ -286,7 +305,7 @@ func (n *NetBird) createClientEntry(ctx context.Context, accountID types.Account }, nil } -// runClientStartup starts the client and notifies registered domains on success. +// runClientStartup starts the client and notifies registered services on success. func (n *NetBird) runClientStartup(ctx context.Context, accountID types.AccountID, client *embed.Client) { startCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -300,16 +319,16 @@ func (n *NetBird) runClientStartup(ctx context.Context, accountID types.AccountI return } - // Mark client as started and collect domains to notify outside the lock. + // Mark client as started and collect services to notify outside the lock. n.clientsMux.Lock() entry, exists := n.clients[accountID] if exists { entry.started = true } - var domainsToNotify []domainNotification + var toNotify []serviceNotification if exists { - for dom, info := range entry.domains { - domainsToNotify = append(domainsToNotify, domainNotification{domain: dom, serviceID: info.serviceID}) + for key, info := range entry.services { + toNotify = append(toNotify, serviceNotification{key: key, serviceID: info.serviceID}) } } n.clientsMux.Unlock() @@ -317,24 +336,24 @@ func (n *NetBird) runClientStartup(ctx context.Context, accountID types.AccountI if n.statusNotifier == nil { return } - for _, dn := range domainsToNotify { - if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), dn.serviceID, string(dn.domain), true); err != nil { + for _, sn := range toNotify { + if err := n.statusNotifier.NotifyStatus(ctx, accountID, sn.serviceID, true); err != nil { n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": dn.domain, + "account_id": accountID, + "service_key": sn.key, }).WithError(err).Warn("failed to notify tunnel connection status") } else { n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": dn.domain, + "account_id": accountID, + "service_key": sn.key, }).Info("notified management about tunnel connection") } } } -// RemovePeer unregisters a domain from an account. The client is only stopped -// when no domains are using it anymore. -func (n *NetBird) RemovePeer(ctx context.Context, accountID types.AccountID, d domain.Domain) error { +// RemovePeer unregisters a service from an account. The client is only stopped +// when no services are using it anymore. +func (n *NetBird) RemovePeer(ctx context.Context, accountID types.AccountID, key ServiceKey) error { n.clientsMux.Lock() entry, exists := n.clients[accountID] @@ -344,74 +363,65 @@ func (n *NetBird) RemovePeer(ctx context.Context, accountID types.AccountID, d d return nil } - // Get domain info before deleting - domInfo, domainExists := entry.domains[d] - if !domainExists { + si, svcExists := entry.services[key] + if !svcExists { n.clientsMux.Unlock() n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": d, - }).Debug("remove peer: domain not registered") + "account_id": accountID, + "service_key": key, + }).Debug("remove peer: service not registered") return nil } - delete(entry.domains, d) - - // If there are still domains using this client, keep it running - if len(entry.domains) > 0 { - n.clientsMux.Unlock() + delete(entry.services, key) + stopClient := len(entry.services) == 0 + var client *embed.Client + var transport, insecureTransport *http.Transport + if stopClient { + n.logger.WithField("account_id", accountID).Info("stopping client, no more services") + client = entry.client + transport = entry.transport + insecureTransport = entry.insecureTransport + delete(n.clients, accountID) + } else { n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": d, - "remaining_domains": len(entry.domains), - }).Debug("unregistered domain, client still in use") - - // Notify this domain as disconnected - if n.statusNotifier != nil { - if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), domInfo.serviceID, string(d), false); err != nil { - n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": d, - }).WithError(err).Warn("failed to notify tunnel disconnection status") - } - } - return nil + "account_id": accountID, + "service_key": key, + "remaining_services": len(entry.services), + }).Debug("unregistered service, client still in use") } - - // No more domains using this client, stop it - n.logger.WithFields(log.Fields{ - "account_id": accountID, - }).Info("stopping client, no more domains") - - client := entry.client - transport := entry.transport - insecureTransport := entry.insecureTransport - delete(n.clients, accountID) n.clientsMux.Unlock() - // Notify disconnection before stopping - if n.statusNotifier != nil { - if err := n.statusNotifier.NotifyStatus(ctx, string(accountID), domInfo.serviceID, string(d), false); err != nil { - n.logger.WithFields(log.Fields{ - "account_id": accountID, - "domain": d, - }).WithError(err).Warn("failed to notify tunnel disconnection status") + n.notifyDisconnect(ctx, accountID, key, si.serviceID) + + if stopClient { + transport.CloseIdleConnections() + insecureTransport.CloseIdleConnections() + if err := client.Stop(ctx); err != nil { + n.logger.WithField("account_id", accountID).WithError(err).Warn("failed to stop netbird client") } } - transport.CloseIdleConnections() - insecureTransport.CloseIdleConnections() - - if err := client.Stop(ctx); err != nil { - n.logger.WithFields(log.Fields{ - "account_id": accountID, - }).WithError(err).Warn("failed to stop netbird client") - } - return nil } +func (n *NetBird) notifyDisconnect(ctx context.Context, accountID types.AccountID, key ServiceKey, serviceID types.ServiceID) { + if n.statusNotifier == nil { + return + } + if err := n.statusNotifier.NotifyStatus(ctx, accountID, serviceID, false); err != nil { + if s, ok := grpcstatus.FromError(err); ok && s.Code() == codes.NotFound { + n.logger.WithField("service_key", key).Debug("service already removed, skipping disconnect notification") + } else { + n.logger.WithFields(log.Fields{ + "account_id": accountID, + "service_key": key, + }).WithError(err).Warn("failed to notify tunnel disconnection status") + } + } +} + // RoundTrip implements http.RoundTripper. It looks up the client for the account // specified in the request context and uses it to dial the backend. func (n *NetBird) RoundTrip(req *http.Request) (*http.Response, error) { @@ -435,7 +445,7 @@ func (n *NetBird) RoundTrip(req *http.Request) (*http.Response, error) { } n.clientsMux.RUnlock() - release, ok := entry.acquireInflight(req.URL.Host) + release, ok := entry.acquireInflight(backendKey(req.URL.Host)) defer release() if !ok { return nil, ErrTooManyInflight @@ -496,16 +506,16 @@ func (n *NetBird) HasClient(accountID types.AccountID) bool { return exists } -// DomainCount returns the number of domains registered for the given account. +// ServiceCount returns the number of services registered for the given account. // Returns 0 if the account has no client. -func (n *NetBird) DomainCount(accountID types.AccountID) int { +func (n *NetBird) ServiceCount(accountID types.AccountID) int { n.clientsMux.RLock() defer n.clientsMux.RUnlock() entry, exists := n.clients[accountID] if !exists { return 0 } - return len(entry.domains) + return len(entry.services) } // ClientCount returns the total number of active clients. @@ -533,16 +543,16 @@ func (n *NetBird) ListClientsForDebug() map[types.AccountID]ClientDebugInfo { result := make(map[types.AccountID]ClientDebugInfo) for accountID, entry := range n.clients { - domains := make(domain.List, 0, len(entry.domains)) - for d := range entry.domains { - domains = append(domains, d) + keys := make([]string, 0, len(entry.services)) + for k := range entry.services { + keys = append(keys, string(k)) } result[accountID] = ClientDebugInfo{ - AccountID: accountID, - DomainCount: len(entry.domains), - Domains: domains, - HasClient: entry.client != nil, - CreatedAt: entry.createdAt, + AccountID: accountID, + ServiceCount: len(entry.services), + ServiceKeys: keys, + HasClient: entry.client != nil, + CreatedAt: entry.createdAt, } } return result @@ -581,6 +591,20 @@ func NewNetBird(proxyID, proxyAddr string, clientCfg ClientConfig, logger *log.L } } +// dialWithTimeout wraps a DialContext function so that any dial timeout +// stored in the context (via types.WithDialTimeout) is applied only to +// the connection establishment phase, not the full request lifetime. +func dialWithTimeout(dial func(ctx context.Context, network, addr string) (net.Conn, error)) func(ctx context.Context, network, addr string) (net.Conn, error) { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + if d, ok := types.DialTimeoutFromContext(ctx); ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + return dial(ctx, network, addr) + } +} + // WithAccountID adds the account ID to the context. func WithAccountID(ctx context.Context, accountID types.AccountID) context.Context { return context.WithValue(ctx, accountIDContextKey{}, accountID) diff --git a/proxy/internal/roundtrip/netbird_bench_test.go b/proxy/internal/roundtrip/netbird_bench_test.go index e89213c33..330ea0332 100644 --- a/proxy/internal/roundtrip/netbird_bench_test.go +++ b/proxy/internal/roundtrip/netbird_bench_test.go @@ -1,6 +1,7 @@ package roundtrip import ( + "context" "crypto/rand" "math/big" "sync" @@ -8,7 +9,6 @@ import ( "time" "github.com/netbirdio/netbird/proxy/internal/types" - "github.com/netbirdio/netbird/shared/management/domain" ) // Simple benchmark for comparison with AddPeer contention. @@ -29,9 +29,9 @@ func BenchmarkHasClient(b *testing.B) { target = id } nb.clients[id] = &clientEntry{ - domains: map[domain.Domain]domainInfo{ - domain.Domain(rand.Text()): { - serviceID: rand.Text(), + services: map[ServiceKey]serviceInfo{ + ServiceKey(rand.Text()): { + serviceID: types.ServiceID(rand.Text()), }, }, createdAt: time.Now(), @@ -70,9 +70,9 @@ func BenchmarkHasClientDuringAddPeer(b *testing.B) { target = id } nb.clients[id] = &clientEntry{ - domains: map[domain.Domain]domainInfo{ - domain.Domain(rand.Text()): { - serviceID: rand.Text(), + services: map[ServiceKey]serviceInfo{ + ServiceKey(rand.Text()): { + serviceID: types.ServiceID(rand.Text()), }, }, createdAt: time.Now(), @@ -81,19 +81,22 @@ func BenchmarkHasClientDuringAddPeer(b *testing.B) { } // Launch workers that continuously call AddPeer with new random accountIDs. + ctx, cancel := context.WithCancel(b.Context()) var wg sync.WaitGroup for range addPeerWorkers { - wg.Go(func() { - for { - if err := nb.AddPeer(b.Context(), + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + if err := nb.AddPeer(ctx, types.AccountID(rand.Text()), - domain.Domain(rand.Text()), + ServiceKey(rand.Text()), rand.Text(), - rand.Text()); err != nil { - b.Log(err) + types.ServiceID(rand.Text())); err != nil { + return } } - }) + }() } // Benchmark calling HasClient during AddPeer contention. @@ -104,4 +107,6 @@ func BenchmarkHasClientDuringAddPeer(b *testing.B) { } }) b.StopTimer() + cancel() + wg.Wait() } diff --git a/proxy/internal/roundtrip/netbird_test.go b/proxy/internal/roundtrip/netbird_test.go index 0a742c2fa..5444f6c11 100644 --- a/proxy/internal/roundtrip/netbird_test.go +++ b/proxy/internal/roundtrip/netbird_test.go @@ -11,7 +11,6 @@ import ( "google.golang.org/grpc" "github.com/netbirdio/netbird/proxy/internal/types" - "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/proto" ) @@ -27,16 +26,15 @@ type mockStatusNotifier struct { } type statusCall struct { - accountID string - serviceID string - domain string + accountID types.AccountID + serviceID types.ServiceID connected bool } -func (m *mockStatusNotifier) NotifyStatus(_ context.Context, accountID, serviceID, domain string, connected bool) error { +func (m *mockStatusNotifier) NotifyStatus(_ context.Context, accountID types.AccountID, serviceID types.ServiceID, connected bool) error { m.mu.Lock() defer m.mu.Unlock() - m.statuses = append(m.statuses, statusCall{accountID, serviceID, domain, connected}) + m.statuses = append(m.statuses, statusCall{accountID, serviceID, connected}) return nil } @@ -62,36 +60,34 @@ func TestNetBird_AddPeer_CreatesClientForNewAccount(t *testing.T) { // Initially no client exists. assert.False(t, nb.HasClient(accountID), "should not have client before AddPeer") - assert.Equal(t, 0, nb.DomainCount(accountID), "domain count should be 0") + assert.Equal(t, 0, nb.ServiceCount(accountID), "service count should be 0") - // Add first domain - this should create a new client. - // Note: This will fail to actually connect since we use an invalid URL, - // but the client entry should still be created. - err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + // Add first service - this should create a new client. + err := nb.AddPeer(context.Background(), accountID, "domain1.test", "setup-key-1", types.ServiceID("proxy-1")) require.NoError(t, err) assert.True(t, nb.HasClient(accountID), "should have client after AddPeer") - assert.Equal(t, 1, nb.DomainCount(accountID), "domain count should be 1") + assert.Equal(t, 1, nb.ServiceCount(accountID), "service count should be 1") } func TestNetBird_AddPeer_ReuseClientForSameAccount(t *testing.T) { nb := mockNetBird() accountID := types.AccountID("account-1") - // Add first domain. - err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + // Add first service. + err := nb.AddPeer(context.Background(), accountID, "domain1.test", "setup-key-1", types.ServiceID("proxy-1")) require.NoError(t, err) - assert.Equal(t, 1, nb.DomainCount(accountID)) + assert.Equal(t, 1, nb.ServiceCount(accountID)) - // Add second domain for the same account - should reuse existing client. - err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "setup-key-1", "proxy-2") + // Add second service for the same account - should reuse existing client. + err = nb.AddPeer(context.Background(), accountID, "domain2.test", "setup-key-1", types.ServiceID("proxy-2")) require.NoError(t, err) - assert.Equal(t, 2, nb.DomainCount(accountID), "domain count should be 2 after adding second domain") + assert.Equal(t, 2, nb.ServiceCount(accountID), "service count should be 2 after adding second service") - // Add third domain. - err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain3.test"), "setup-key-1", "proxy-3") + // Add third service. + err = nb.AddPeer(context.Background(), accountID, "domain3.test", "setup-key-1", types.ServiceID("proxy-3")) require.NoError(t, err) - assert.Equal(t, 3, nb.DomainCount(accountID), "domain count should be 3 after adding third domain") + assert.Equal(t, 3, nb.ServiceCount(accountID), "service count should be 3 after adding third service") // Still only one client. assert.True(t, nb.HasClient(accountID)) @@ -102,64 +98,62 @@ func TestNetBird_AddPeer_SeparateClientsForDifferentAccounts(t *testing.T) { account1 := types.AccountID("account-1") account2 := types.AccountID("account-2") - // Add domain for account 1. - err := nb.AddPeer(context.Background(), account1, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + // Add service for account 1. + err := nb.AddPeer(context.Background(), account1, "domain1.test", "setup-key-1", types.ServiceID("proxy-1")) require.NoError(t, err) - // Add domain for account 2. - err = nb.AddPeer(context.Background(), account2, domain.Domain("domain2.test"), "setup-key-2", "proxy-2") + // Add service for account 2. + err = nb.AddPeer(context.Background(), account2, "domain2.test", "setup-key-2", types.ServiceID("proxy-2")) require.NoError(t, err) // Both accounts should have their own clients. assert.True(t, nb.HasClient(account1), "account1 should have client") assert.True(t, nb.HasClient(account2), "account2 should have client") - assert.Equal(t, 1, nb.DomainCount(account1), "account1 domain count should be 1") - assert.Equal(t, 1, nb.DomainCount(account2), "account2 domain count should be 1") + assert.Equal(t, 1, nb.ServiceCount(account1), "account1 service count should be 1") + assert.Equal(t, 1, nb.ServiceCount(account2), "account2 service count should be 1") } -func TestNetBird_RemovePeer_KeepsClientWhenDomainsRemain(t *testing.T) { +func TestNetBird_RemovePeer_KeepsClientWhenServicesRemain(t *testing.T) { nb := mockNetBird() accountID := types.AccountID("account-1") - // Add multiple domains. - err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + // Add multiple services. + err := nb.AddPeer(context.Background(), accountID, "domain1.test", "setup-key-1", types.ServiceID("proxy-1")) require.NoError(t, err) - err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "setup-key-1", "proxy-2") + err = nb.AddPeer(context.Background(), accountID, "domain2.test", "setup-key-1", types.ServiceID("proxy-2")) require.NoError(t, err) - err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain3.test"), "setup-key-1", "proxy-3") + err = nb.AddPeer(context.Background(), accountID, "domain3.test", "setup-key-1", types.ServiceID("proxy-3")) require.NoError(t, err) - assert.Equal(t, 3, nb.DomainCount(accountID)) + assert.Equal(t, 3, nb.ServiceCount(accountID)) - // Remove one domain - client should remain. + // Remove one service - client should remain. err = nb.RemovePeer(context.Background(), accountID, "domain1.test") require.NoError(t, err) - assert.True(t, nb.HasClient(accountID), "client should remain after removing one domain") - assert.Equal(t, 2, nb.DomainCount(accountID), "domain count should be 2") + assert.True(t, nb.HasClient(accountID), "client should remain after removing one service") + assert.Equal(t, 2, nb.ServiceCount(accountID), "service count should be 2") - // Remove another domain - client should still remain. + // Remove another service - client should still remain. err = nb.RemovePeer(context.Background(), accountID, "domain2.test") require.NoError(t, err) - assert.True(t, nb.HasClient(accountID), "client should remain after removing second domain") - assert.Equal(t, 1, nb.DomainCount(accountID), "domain count should be 1") + assert.True(t, nb.HasClient(accountID), "client should remain after removing second service") + assert.Equal(t, 1, nb.ServiceCount(accountID), "service count should be 1") } -func TestNetBird_RemovePeer_RemovesClientWhenLastDomainRemoved(t *testing.T) { +func TestNetBird_RemovePeer_RemovesClientWhenLastServiceRemoved(t *testing.T) { nb := mockNetBird() accountID := types.AccountID("account-1") - // Add single domain. - err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + // Add single service. + err := nb.AddPeer(context.Background(), accountID, "domain1.test", "setup-key-1", types.ServiceID("proxy-1")) require.NoError(t, err) assert.True(t, nb.HasClient(accountID)) - // Remove the only domain - client should be removed. - // Note: Stop() may fail since the client never actually connected, - // but the entry should still be removed from the map. + // Remove the only service - client should be removed. _ = nb.RemovePeer(context.Background(), accountID, "domain1.test") - // After removing all domains, client should be gone. - assert.False(t, nb.HasClient(accountID), "client should be removed after removing last domain") - assert.Equal(t, 0, nb.DomainCount(accountID), "domain count should be 0") + // After removing all services, client should be gone. + assert.False(t, nb.HasClient(accountID), "client should be removed after removing last service") + assert.Equal(t, 0, nb.ServiceCount(accountID), "service count should be 0") } func TestNetBird_RemovePeer_NonExistentAccountIsNoop(t *testing.T) { @@ -171,21 +165,21 @@ func TestNetBird_RemovePeer_NonExistentAccountIsNoop(t *testing.T) { assert.NoError(t, err, "removing from non-existent account should not error") } -func TestNetBird_RemovePeer_NonExistentDomainIsNoop(t *testing.T) { +func TestNetBird_RemovePeer_NonExistentServiceIsNoop(t *testing.T) { nb := mockNetBird() accountID := types.AccountID("account-1") - // Add one domain. - err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "setup-key-1", "proxy-1") + // Add one service. + err := nb.AddPeer(context.Background(), accountID, "domain1.test", "setup-key-1", types.ServiceID("proxy-1")) require.NoError(t, err) - // Remove non-existent domain - should not affect existing domain. - err = nb.RemovePeer(context.Background(), accountID, domain.Domain("nonexistent.test")) + // Remove non-existent service - should not affect existing service. + err = nb.RemovePeer(context.Background(), accountID, "nonexistent.test") require.NoError(t, err) - // Original domain should still be registered. + // Original service should still be registered. assert.True(t, nb.HasClient(accountID)) - assert.Equal(t, 1, nb.DomainCount(accountID), "original domain should remain") + assert.Equal(t, 1, nb.ServiceCount(accountID), "original service should remain") } func TestWithAccountID_AndAccountIDFromContext(t *testing.T) { @@ -216,19 +210,17 @@ func TestNetBird_StopAll_StopsAllClients(t *testing.T) { account2 := types.AccountID("account-2") account3 := types.AccountID("account-3") - // Add domains for multiple accounts. - err := nb.AddPeer(context.Background(), account1, domain.Domain("domain1.test"), "key-1", "proxy-1") + // Add services for multiple accounts. + err := nb.AddPeer(context.Background(), account1, "domain1.test", "key-1", types.ServiceID("proxy-1")) require.NoError(t, err) - err = nb.AddPeer(context.Background(), account2, domain.Domain("domain2.test"), "key-2", "proxy-2") + err = nb.AddPeer(context.Background(), account2, "domain2.test", "key-2", types.ServiceID("proxy-2")) require.NoError(t, err) - err = nb.AddPeer(context.Background(), account3, domain.Domain("domain3.test"), "key-3", "proxy-3") + err = nb.AddPeer(context.Background(), account3, "domain3.test", "key-3", types.ServiceID("proxy-3")) require.NoError(t, err) assert.Equal(t, 3, nb.ClientCount(), "should have 3 clients") // Stop all clients. - // Note: StopAll may return errors since clients never actually connected, - // but the clients should still be removed from the map. _ = nb.StopAll(context.Background()) assert.Equal(t, 0, nb.ClientCount(), "should have 0 clients after StopAll") @@ -243,18 +235,18 @@ func TestNetBird_ClientCount(t *testing.T) { assert.Equal(t, 0, nb.ClientCount(), "should start with 0 clients") // Add clients for different accounts. - err := nb.AddPeer(context.Background(), types.AccountID("account-1"), domain.Domain("domain1.test"), "key-1", "proxy-1") + err := nb.AddPeer(context.Background(), types.AccountID("account-1"), "domain1.test", "key-1", types.ServiceID("proxy-1")) require.NoError(t, err) assert.Equal(t, 1, nb.ClientCount()) - err = nb.AddPeer(context.Background(), types.AccountID("account-2"), domain.Domain("domain2.test"), "key-2", "proxy-2") + err = nb.AddPeer(context.Background(), types.AccountID("account-2"), "domain2.test", "key-2", types.ServiceID("proxy-2")) require.NoError(t, err) assert.Equal(t, 2, nb.ClientCount()) - // Adding domain to existing account should not increase count. - err = nb.AddPeer(context.Background(), types.AccountID("account-1"), domain.Domain("domain1b.test"), "key-1", "proxy-1b") + // Adding service to existing account should not increase count. + err = nb.AddPeer(context.Background(), types.AccountID("account-1"), "domain1b.test", "key-1", types.ServiceID("proxy-1b")) require.NoError(t, err) - assert.Equal(t, 2, nb.ClientCount(), "adding domain to existing account should not increase client count") + assert.Equal(t, 2, nb.ClientCount(), "adding service to existing account should not increase client count") } func TestNetBird_RoundTrip_RequiresAccountIDInContext(t *testing.T) { @@ -293,8 +285,8 @@ func TestNetBird_AddPeer_ExistingStartedClient_NotifiesStatus(t *testing.T) { }, nil, notifier, &mockMgmtClient{}) accountID := types.AccountID("account-1") - // Add first domain — creates a new client entry. - err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "key-1", "svc-1") + // Add first service — creates a new client entry. + err := nb.AddPeer(context.Background(), accountID, "domain1.test", "key-1", types.ServiceID("svc-1")) require.NoError(t, err) // Manually mark client as started to simulate background startup completing. @@ -302,15 +294,14 @@ func TestNetBird_AddPeer_ExistingStartedClient_NotifiesStatus(t *testing.T) { nb.clients[accountID].started = true nb.clientsMux.Unlock() - // Add second domain — should notify immediately since client is already started. - err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "key-1", "svc-2") + // Add second service — should notify immediately since client is already started. + err = nb.AddPeer(context.Background(), accountID, "domain2.test", "key-1", types.ServiceID("svc-2")) require.NoError(t, err) calls := notifier.calls() require.Len(t, calls, 1) - assert.Equal(t, string(accountID), calls[0].accountID) - assert.Equal(t, "svc-2", calls[0].serviceID) - assert.Equal(t, "domain2.test", calls[0].domain) + assert.Equal(t, accountID, calls[0].accountID) + assert.Equal(t, types.ServiceID("svc-2"), calls[0].serviceID) assert.True(t, calls[0].connected) } @@ -323,18 +314,18 @@ func TestNetBird_RemovePeer_NotifiesDisconnection(t *testing.T) { }, nil, notifier, &mockMgmtClient{}) accountID := types.AccountID("account-1") - err := nb.AddPeer(context.Background(), accountID, domain.Domain("domain1.test"), "key-1", "svc-1") + err := nb.AddPeer(context.Background(), accountID, "domain1.test", "key-1", types.ServiceID("svc-1")) require.NoError(t, err) - err = nb.AddPeer(context.Background(), accountID, domain.Domain("domain2.test"), "key-1", "svc-2") + err = nb.AddPeer(context.Background(), accountID, "domain2.test", "key-1", types.ServiceID("svc-2")) require.NoError(t, err) - // Remove one domain — client stays, but disconnection notification fires. + // Remove one service — client stays, but disconnection notification fires. err = nb.RemovePeer(context.Background(), accountID, "domain1.test") require.NoError(t, err) assert.True(t, nb.HasClient(accountID)) calls := notifier.calls() require.Len(t, calls, 1) - assert.Equal(t, "domain1.test", calls[0].domain) + assert.Equal(t, types.ServiceID("svc-1"), calls[0].serviceID) assert.False(t, calls[0].connected) } diff --git a/proxy/internal/tcp/bench_test.go b/proxy/internal/tcp/bench_test.go new file mode 100644 index 000000000..049f8395d --- /dev/null +++ b/proxy/internal/tcp/bench_test.go @@ -0,0 +1,133 @@ +package tcp + +import ( + "bytes" + "crypto/tls" + "io" + "net" + "testing" +) + +// BenchmarkPeekClientHello_TLS measures the overhead of peeking at a real +// TLS ClientHello and extracting the SNI. This is the per-connection cost +// added to every TLS connection on the main listener. +func BenchmarkPeekClientHello_TLS(b *testing.B) { + // Pre-generate a ClientHello by capturing what crypto/tls sends. + clientConn, serverConn := net.Pipe() + go func() { + tlsConn := tls.Client(clientConn, &tls.Config{ + ServerName: "app.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + _ = tlsConn.Handshake() + }() + + var hello []byte + buf := make([]byte, 16384) + n, _ := serverConn.Read(buf) + hello = make([]byte, n) + copy(hello, buf[:n]) + clientConn.Close() + serverConn.Close() + + b.ResetTimer() + b.ReportAllocs() + + for b.Loop() { + r := bytes.NewReader(hello) + conn := &readerConn{Reader: r} + sni, wrapped, err := PeekClientHello(conn) + if err != nil { + b.Fatal(err) + } + if sni != "app.example.com" { + b.Fatalf("unexpected SNI: %q", sni) + } + // Simulate draining the peeked bytes (what the HTTP server would do). + _, _ = io.Copy(io.Discard, wrapped) + } +} + +// BenchmarkPeekClientHello_NonTLS measures peek overhead for non-TLS +// connections that hit the fast non-handshake exit path. +func BenchmarkPeekClientHello_NonTLS(b *testing.B) { + httpReq := []byte("GET / HTTP/1.1\r\nHost: example.com\r\n\r\n") + + b.ResetTimer() + b.ReportAllocs() + + for b.Loop() { + r := bytes.NewReader(httpReq) + conn := &readerConn{Reader: r} + _, wrapped, err := PeekClientHello(conn) + if err != nil { + b.Fatal(err) + } + _, _ = io.Copy(io.Discard, wrapped) + } +} + +// BenchmarkPeekedConn_Read measures the read overhead of the peekedConn +// wrapper compared to a plain connection read. The peeked bytes use +// io.MultiReader which adds one indirection per Read call. +func BenchmarkPeekedConn_Read(b *testing.B) { + data := make([]byte, 4096) + peeked := make([]byte, 512) + buf := make([]byte, 1024) + + b.ResetTimer() + b.ReportAllocs() + + for b.Loop() { + r := bytes.NewReader(data) + conn := &readerConn{Reader: r} + pc := newPeekedConn(conn, peeked) + for { + _, err := pc.Read(buf) + if err != nil { + break + } + } + } +} + +// BenchmarkExtractSNI measures just the in-memory SNI parsing cost, +// excluding I/O. +func BenchmarkExtractSNI(b *testing.B) { + clientConn, serverConn := net.Pipe() + go func() { + tlsConn := tls.Client(clientConn, &tls.Config{ + ServerName: "app.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + _ = tlsConn.Handshake() + }() + + buf := make([]byte, 16384) + n, _ := serverConn.Read(buf) + payload := make([]byte, n-tlsRecordHeaderLen) + copy(payload, buf[tlsRecordHeaderLen:n]) + clientConn.Close() + serverConn.Close() + + b.ResetTimer() + b.ReportAllocs() + + for b.Loop() { + sni := extractSNI(payload) + if sni != "app.example.com" { + b.Fatalf("unexpected SNI: %q", sni) + } + } +} + +// readerConn wraps an io.Reader as a net.Conn for benchmarking. +// Only Read is functional; all other methods are no-ops. +type readerConn struct { + io.Reader + net.Conn +} + +func (c *readerConn) Read(b []byte) (int, error) { + return c.Reader.Read(b) +} diff --git a/proxy/internal/tcp/chanlistener.go b/proxy/internal/tcp/chanlistener.go new file mode 100644 index 000000000..ee64bc0a2 --- /dev/null +++ b/proxy/internal/tcp/chanlistener.go @@ -0,0 +1,76 @@ +package tcp + +import ( + "net" + "sync" +) + +// chanListener implements net.Listener by reading connections from a channel. +// It allows the SNI router to feed HTTP connections to http.Server.ServeTLS. +type chanListener struct { + ch chan net.Conn + addr net.Addr + once sync.Once + closed chan struct{} +} + +func newChanListener(ch chan net.Conn, addr net.Addr) *chanListener { + return &chanListener{ + ch: ch, + addr: addr, + closed: make(chan struct{}), + } +} + +// Accept waits for and returns the next connection from the channel. +func (l *chanListener) Accept() (net.Conn, error) { + for { + select { + case conn, ok := <-l.ch: + if !ok { + return nil, net.ErrClosed + } + return conn, nil + case <-l.closed: + // Drain buffered connections before returning. + for { + select { + case conn, ok := <-l.ch: + if !ok { + return nil, net.ErrClosed + } + _ = conn.Close() + default: + return nil, net.ErrClosed + } + } + } + } +} + +// Close signals the listener to stop accepting connections and drains +// any buffered connections that have not yet been accepted. +func (l *chanListener) Close() error { + l.once.Do(func() { + close(l.closed) + for { + select { + case conn, ok := <-l.ch: + if !ok { + return + } + _ = conn.Close() + default: + return + } + } + }) + return nil +} + +// Addr returns the listener's network address. +func (l *chanListener) Addr() net.Addr { + return l.addr +} + +var _ net.Listener = (*chanListener)(nil) diff --git a/proxy/internal/tcp/peekedconn.go b/proxy/internal/tcp/peekedconn.go new file mode 100644 index 000000000..26f3e5c7c --- /dev/null +++ b/proxy/internal/tcp/peekedconn.go @@ -0,0 +1,39 @@ +package tcp + +import ( + "bytes" + "io" + "net" +) + +// peekedConn wraps a net.Conn and prepends previously peeked bytes +// so that readers see the full original stream transparently. +type peekedConn struct { + net.Conn + reader io.Reader +} + +func newPeekedConn(conn net.Conn, peeked []byte) *peekedConn { + return &peekedConn{ + Conn: conn, + reader: io.MultiReader(bytes.NewReader(peeked), conn), + } +} + +// Read replays the peeked bytes first, then reads from the underlying conn. +func (c *peekedConn) Read(b []byte) (int, error) { + return c.reader.Read(b) +} + +// CloseWrite delegates to the underlying connection if it supports +// half-close (e.g. *net.TCPConn). Without this, embedding net.Conn +// as an interface hides the concrete type's CloseWrite method, making +// half-close a silent no-op for all SNI-routed connections. +func (c *peekedConn) CloseWrite() error { + if hc, ok := c.Conn.(halfCloser); ok { + return hc.CloseWrite() + } + return nil +} + +var _ halfCloser = (*peekedConn)(nil) diff --git a/proxy/internal/tcp/proxyprotocol.go b/proxy/internal/tcp/proxyprotocol.go new file mode 100644 index 000000000..699b75a5d --- /dev/null +++ b/proxy/internal/tcp/proxyprotocol.go @@ -0,0 +1,29 @@ +package tcp + +import ( + "fmt" + "net" + + "github.com/pires/go-proxyproto" +) + +// writeProxyProtoV2 sends a PROXY protocol v2 header to the backend connection, +// conveying the real client address. +func writeProxyProtoV2(client, backend net.Conn) error { + tp := proxyproto.TCPv4 + if addr, ok := client.RemoteAddr().(*net.TCPAddr); ok && addr.IP.To4() == nil { + tp = proxyproto.TCPv6 + } + + header := &proxyproto.Header{ + Version: 2, + Command: proxyproto.PROXY, + TransportProtocol: tp, + SourceAddr: client.RemoteAddr(), + DestinationAddr: client.LocalAddr(), + } + if _, err := header.WriteTo(backend); err != nil { + return fmt.Errorf("write PROXY protocol v2 header: %w", err) + } + return nil +} diff --git a/proxy/internal/tcp/proxyprotocol_test.go b/proxy/internal/tcp/proxyprotocol_test.go new file mode 100644 index 000000000..f8c48b2ab --- /dev/null +++ b/proxy/internal/tcp/proxyprotocol_test.go @@ -0,0 +1,128 @@ +package tcp + +import ( + "bufio" + "net" + "testing" + + "github.com/pires/go-proxyproto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWriteProxyProtoV2_IPv4(t *testing.T) { + // Set up a real TCP listener and dial to get connections with real addresses. + ln, err := net.Listen("tcp4", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + var serverConn net.Conn + accepted := make(chan struct{}) + go func() { + var err error + serverConn, err = ln.Accept() + if err != nil { + t.Error("accept failed:", err) + } + close(accepted) + }() + + clientConn, err := net.Dial("tcp4", ln.Addr().String()) + require.NoError(t, err) + defer clientConn.Close() + + <-accepted + defer serverConn.Close() + + // Use a pipe as the backend: write the header to one end, read from the other. + backendRead, backendWrite := net.Pipe() + defer backendRead.Close() + defer backendWrite.Close() + + // serverConn is the "client" arg: RemoteAddr is the source, LocalAddr is the destination. + writeDone := make(chan error, 1) + go func() { + writeDone <- writeProxyProtoV2(serverConn, backendWrite) + }() + + // Read the PROXY protocol header from the backend read side. + header, err := proxyproto.Read(bufio.NewReader(backendRead)) + require.NoError(t, err) + require.NotNil(t, header, "should have received a proxy protocol header") + + writeErr := <-writeDone + require.NoError(t, writeErr) + + assert.Equal(t, byte(2), header.Version, "version should be 2") + assert.Equal(t, proxyproto.PROXY, header.Command, "command should be PROXY") + assert.Equal(t, proxyproto.TCPv4, header.TransportProtocol, "transport should be TCPv4") + + // serverConn.RemoteAddr() is the client's address (source in the header). + expectedSrc := serverConn.RemoteAddr().(*net.TCPAddr) + actualSrc := header.SourceAddr.(*net.TCPAddr) + assert.Equal(t, expectedSrc.IP.String(), actualSrc.IP.String(), "source IP should match client remote addr") + assert.Equal(t, expectedSrc.Port, actualSrc.Port, "source port should match client remote addr") + + // serverConn.LocalAddr() is the server's address (destination in the header). + expectedDst := serverConn.LocalAddr().(*net.TCPAddr) + actualDst := header.DestinationAddr.(*net.TCPAddr) + assert.Equal(t, expectedDst.IP.String(), actualDst.IP.String(), "destination IP should match server local addr") + assert.Equal(t, expectedDst.Port, actualDst.Port, "destination port should match server local addr") +} + +func TestWriteProxyProtoV2_IPv6(t *testing.T) { + // Set up a real TCP6 listener on loopback. + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Skip("IPv6 not available:", err) + } + defer ln.Close() + + var serverConn net.Conn + accepted := make(chan struct{}) + go func() { + var err error + serverConn, err = ln.Accept() + if err != nil { + t.Error("accept failed:", err) + } + close(accepted) + }() + + clientConn, err := net.Dial("tcp6", ln.Addr().String()) + require.NoError(t, err) + defer clientConn.Close() + + <-accepted + defer serverConn.Close() + + backendRead, backendWrite := net.Pipe() + defer backendRead.Close() + defer backendWrite.Close() + + writeDone := make(chan error, 1) + go func() { + writeDone <- writeProxyProtoV2(serverConn, backendWrite) + }() + + header, err := proxyproto.Read(bufio.NewReader(backendRead)) + require.NoError(t, err) + require.NotNil(t, header, "should have received a proxy protocol header") + + writeErr := <-writeDone + require.NoError(t, writeErr) + + assert.Equal(t, byte(2), header.Version, "version should be 2") + assert.Equal(t, proxyproto.PROXY, header.Command, "command should be PROXY") + assert.Equal(t, proxyproto.TCPv6, header.TransportProtocol, "transport should be TCPv6") + + expectedSrc := serverConn.RemoteAddr().(*net.TCPAddr) + actualSrc := header.SourceAddr.(*net.TCPAddr) + assert.Equal(t, expectedSrc.IP.String(), actualSrc.IP.String(), "source IP should match client remote addr") + assert.Equal(t, expectedSrc.Port, actualSrc.Port, "source port should match client remote addr") + + expectedDst := serverConn.LocalAddr().(*net.TCPAddr) + actualDst := header.DestinationAddr.(*net.TCPAddr) + assert.Equal(t, expectedDst.IP.String(), actualDst.IP.String(), "destination IP should match server local addr") + assert.Equal(t, expectedDst.Port, actualDst.Port, "destination port should match server local addr") +} diff --git a/proxy/internal/tcp/relay.go b/proxy/internal/tcp/relay.go new file mode 100644 index 000000000..39949818d --- /dev/null +++ b/proxy/internal/tcp/relay.go @@ -0,0 +1,156 @@ +package tcp + +import ( + "context" + "errors" + "io" + "net" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/internal/netutil" +) + +// errIdleTimeout is returned when a relay connection is closed due to inactivity. +var errIdleTimeout = errors.New("idle timeout") + +// DefaultIdleTimeout is the default idle timeout for TCP relay connections. +// A zero value disables idle timeout checking. +const DefaultIdleTimeout = 5 * time.Minute + +// halfCloser is implemented by connections that support half-close +// (e.g. *net.TCPConn). When one copy direction finishes, we signal +// EOF to the remote by closing the write side while keeping the read +// side open so the other direction can drain. +type halfCloser interface { + CloseWrite() error +} + +// copyBufPool avoids allocating a new 32KB buffer per io.Copy call. +var copyBufPool = sync.Pool{ + New: func() any { + buf := make([]byte, 32*1024) + return &buf + }, +} + +// Relay copies data bidirectionally between src and dst until both +// sides are done or the context is canceled. When idleTimeout is +// non-zero, each direction's read is deadline-guarded; if no data +// flows within the timeout the connection is torn down. When one +// direction finishes, it half-closes the write side of the +// destination (if supported) to signal EOF, allowing the other +// direction to drain gracefully before the full connection teardown. +func Relay(ctx context.Context, logger *log.Entry, src, dst net.Conn, idleTimeout time.Duration) (srcToDst, dstToSrc int64) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + <-ctx.Done() + _ = src.Close() + _ = dst.Close() + }() + + var wg sync.WaitGroup + wg.Add(2) + + var errSrcToDst, errDstToSrc error + + go func() { + defer wg.Done() + srcToDst, errSrcToDst = copyWithIdleTimeout(dst, src, idleTimeout) + halfClose(dst) + cancel() + }() + + go func() { + defer wg.Done() + dstToSrc, errDstToSrc = copyWithIdleTimeout(src, dst, idleTimeout) + halfClose(src) + cancel() + }() + + wg.Wait() + + if errors.Is(errSrcToDst, errIdleTimeout) || errors.Is(errDstToSrc, errIdleTimeout) { + logger.Debug("relay closed due to idle timeout") + } + if errSrcToDst != nil && !isExpectedCopyError(errSrcToDst) { + logger.Debugf("relay copy error (src→dst): %v", errSrcToDst) + } + if errDstToSrc != nil && !isExpectedCopyError(errDstToSrc) { + logger.Debugf("relay copy error (dst→src): %v", errDstToSrc) + } + + return srcToDst, dstToSrc +} + +// copyWithIdleTimeout copies from src to dst using a pooled buffer. +// When idleTimeout > 0 it sets a read deadline on src before each +// read and treats a timeout as an idle-triggered close. +func copyWithIdleTimeout(dst io.Writer, src io.Reader, idleTimeout time.Duration) (int64, error) { + bufp := copyBufPool.Get().(*[]byte) + defer copyBufPool.Put(bufp) + + if idleTimeout <= 0 { + return io.CopyBuffer(dst, src, *bufp) + } + + conn, ok := src.(net.Conn) + if !ok { + return io.CopyBuffer(dst, src, *bufp) + } + + buf := *bufp + var total int64 + for { + if err := conn.SetReadDeadline(time.Now().Add(idleTimeout)); err != nil { + return total, err + } + nr, readErr := src.Read(buf) + if nr > 0 { + n, err := checkedWrite(dst, buf[:nr]) + total += n + if err != nil { + return total, err + } + } + if readErr != nil { + if netutil.IsTimeout(readErr) { + return total, errIdleTimeout + } + return total, readErr + } + } +} + +// checkedWrite writes buf to dst and returns the number of bytes written. +// It guards against short writes and negative counts per io.Copy convention. +func checkedWrite(dst io.Writer, buf []byte) (int64, error) { + nw, err := dst.Write(buf) + if nw < 0 || nw > len(buf) { + nw = 0 + } + if err != nil { + return int64(nw), err + } + if nw != len(buf) { + return int64(nw), io.ErrShortWrite + } + return int64(nw), nil +} + +func isExpectedCopyError(err error) bool { + return errors.Is(err, errIdleTimeout) || netutil.IsExpectedError(err) +} + +// halfClose attempts to half-close the write side of the connection. +// If the connection does not support half-close, this is a no-op. +func halfClose(conn net.Conn) { + if hc, ok := conn.(halfCloser); ok { + // Best-effort; the full close will follow shortly. + _ = hc.CloseWrite() + } +} diff --git a/proxy/internal/tcp/relay_test.go b/proxy/internal/tcp/relay_test.go new file mode 100644 index 000000000..e42d65b9d --- /dev/null +++ b/proxy/internal/tcp/relay_test.go @@ -0,0 +1,210 @@ +package tcp + +import ( + "context" + "fmt" + "io" + "net" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/proxy/internal/netutil" +) + +func TestRelay_BidirectionalCopy(t *testing.T) { + srcClient, srcServer := net.Pipe() + dstClient, dstServer := net.Pipe() + + logger := log.NewEntry(log.StandardLogger()) + ctx := context.Background() + + srcData := []byte("hello from src") + dstData := []byte("hello from dst") + + // dst side: write response first, then read + close. + go func() { + _, _ = dstClient.Write(dstData) + buf := make([]byte, 256) + _, _ = dstClient.Read(buf) + dstClient.Close() + }() + + // src side: read the response, then send data + close. + go func() { + buf := make([]byte, 256) + _, _ = srcClient.Read(buf) + _, _ = srcClient.Write(srcData) + srcClient.Close() + }() + + s2d, d2s := Relay(ctx, logger, srcServer, dstServer, 0) + + assert.Equal(t, int64(len(srcData)), s2d, "bytes src→dst") + assert.Equal(t, int64(len(dstData)), d2s, "bytes dst→src") +} + +func TestRelay_ContextCancellation(t *testing.T) { + srcClient, srcServer := net.Pipe() + dstClient, dstServer := net.Pipe() + defer srcClient.Close() + defer dstClient.Close() + + logger := log.NewEntry(log.StandardLogger()) + ctx, cancel := context.WithCancel(context.Background()) + + done := make(chan struct{}) + go func() { + Relay(ctx, logger, srcServer, dstServer, 0) + close(done) + }() + + // Cancel should cause Relay to return. + cancel() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("Relay did not return after context cancellation") + } +} + +func TestRelay_OneSideClosed(t *testing.T) { + srcClient, srcServer := net.Pipe() + dstClient, dstServer := net.Pipe() + defer dstClient.Close() + + logger := log.NewEntry(log.StandardLogger()) + ctx := context.Background() + + // Close src immediately. Relay should complete without hanging. + srcClient.Close() + + done := make(chan struct{}) + go func() { + Relay(ctx, logger, srcServer, dstServer, 0) + close(done) + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("Relay did not return after one side closed") + } +} + +func TestRelay_LargeTransfer(t *testing.T) { + srcClient, srcServer := net.Pipe() + dstClient, dstServer := net.Pipe() + + logger := log.NewEntry(log.StandardLogger()) + ctx := context.Background() + + // 1MB of data. + data := make([]byte, 1<<20) + for i := range data { + data[i] = byte(i % 256) + } + + go func() { + _, _ = srcClient.Write(data) + srcClient.Close() + }() + + errCh := make(chan error, 1) + go func() { + received, err := io.ReadAll(dstClient) + if err != nil { + errCh <- err + return + } + if len(received) != len(data) { + errCh <- fmt.Errorf("expected %d bytes, got %d", len(data), len(received)) + return + } + errCh <- nil + dstClient.Close() + }() + + s2d, _ := Relay(ctx, logger, srcServer, dstServer, 0) + assert.Equal(t, int64(len(data)), s2d, "should transfer all bytes") + require.NoError(t, <-errCh) +} + +func TestRelay_IdleTimeout(t *testing.T) { + // Use real TCP connections so SetReadDeadline works (net.Pipe + // does not support deadlines). + srcLn, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer srcLn.Close() + + dstLn, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer dstLn.Close() + + srcClient, err := net.Dial("tcp", srcLn.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer srcClient.Close() + + srcServer, err := srcLn.Accept() + if err != nil { + t.Fatal(err) + } + + dstClient, err := net.Dial("tcp", dstLn.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer dstClient.Close() + + dstServer, err := dstLn.Accept() + if err != nil { + t.Fatal(err) + } + + logger := log.NewEntry(log.StandardLogger()) + ctx := context.Background() + + // Send initial data to prove the relay works. + go func() { + _, _ = srcClient.Write([]byte("ping")) + }() + + done := make(chan struct{}) + var s2d, d2s int64 + go func() { + s2d, d2s = Relay(ctx, logger, srcServer, dstServer, 200*time.Millisecond) + close(done) + }() + + // Read the forwarded data on the dst side. + buf := make([]byte, 64) + n, err := dstClient.Read(buf) + assert.NoError(t, err) + assert.Equal(t, "ping", string(buf[:n])) + + // Now stop sending. The relay should close after the idle timeout. + select { + case <-done: + assert.Greater(t, s2d, int64(0), "should have transferred initial data") + _ = d2s + case <-time.After(5 * time.Second): + t.Fatal("Relay did not exit after idle timeout") + } +} + +func TestIsExpectedError(t *testing.T) { + assert.True(t, netutil.IsExpectedError(net.ErrClosed)) + assert.True(t, netutil.IsExpectedError(context.Canceled)) + assert.True(t, netutil.IsExpectedError(io.EOF)) + assert.False(t, netutil.IsExpectedError(io.ErrUnexpectedEOF)) +} diff --git a/proxy/internal/tcp/router.go b/proxy/internal/tcp/router.go new file mode 100644 index 000000000..84fde0731 --- /dev/null +++ b/proxy/internal/tcp/router.go @@ -0,0 +1,570 @@ +package tcp + +import ( + "context" + "errors" + "fmt" + "net" + "net/netip" + "slices" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/internal/accesslog" + "github.com/netbirdio/netbird/proxy/internal/types" +) + +// defaultDialTimeout is the fallback dial timeout when no per-route +// timeout is configured. +const defaultDialTimeout = 30 * time.Second + +// SNIHost is a typed key for SNI hostname lookups. +type SNIHost string + +// RouteType specifies how a connection should be handled. +type RouteType int + +const ( + // RouteHTTP routes the connection through the HTTP reverse proxy. + RouteHTTP RouteType = iota + // RouteTCP relays the connection directly to the backend (TLS passthrough). + RouteTCP +) + +const ( + // sniPeekTimeout is the deadline for reading the TLS ClientHello. + sniPeekTimeout = 5 * time.Second + // DefaultDrainTimeout is the default grace period for in-flight relay + // connections to finish during shutdown. + DefaultDrainTimeout = 30 * time.Second + // DefaultMaxRelayConns is the default cap on concurrent TCP relay connections per router. + DefaultMaxRelayConns = 4096 + // httpChannelBuffer is the capacity of the channel feeding HTTP connections. + httpChannelBuffer = 4096 +) + +// DialResolver returns a DialContextFunc for the given account. +type DialResolver func(accountID types.AccountID) (types.DialContextFunc, error) + +// Route describes where a connection for a given SNI should be sent. +type Route struct { + Type RouteType + AccountID types.AccountID + ServiceID types.ServiceID + // Domain is the service's configured domain, used for access log entries. + Domain string + // Protocol is the frontend protocol (tcp, tls), used for access log entries. + Protocol accesslog.Protocol + // Target is the backend address for TCP relay (e.g. "10.0.0.5:5432"). + Target string + // ProxyProtocol enables sending a PROXY protocol v2 header to the backend. + ProxyProtocol bool + // DialTimeout overrides the default dial timeout for this route. + // Zero uses defaultDialTimeout. + DialTimeout time.Duration +} + +// l4Logger sends layer-4 access log entries to the management server. +type l4Logger interface { + LogL4(entry accesslog.L4Entry) +} + +// RelayObserver receives callbacks for TCP relay lifecycle events. +// All methods must be safe for concurrent use. +type RelayObserver interface { + TCPRelayStarted(accountID types.AccountID) + TCPRelayEnded(accountID types.AccountID, duration time.Duration, srcToDst, dstToSrc int64) + TCPRelayDialError(accountID types.AccountID) + TCPRelayRejected(accountID types.AccountID) +} + +// Router accepts raw TCP connections on a shared listener, peeks at +// the TLS ClientHello to extract the SNI, and routes the connection +// to either the HTTP reverse proxy or a direct TCP relay. +type Router struct { + logger *log.Logger + // httpCh is immutable after construction: set only in NewRouter, nil in NewPortRouter. + httpCh chan net.Conn + httpListener *chanListener + mu sync.RWMutex + routes map[SNIHost][]Route + fallback *Route + draining bool + dialResolve DialResolver + activeConns sync.WaitGroup + activeRelays sync.WaitGroup + relaySem chan struct{} + drainDone chan struct{} + observer RelayObserver + accessLog l4Logger + // svcCtxs tracks a context per service ID. All relay goroutines for a + // service derive from its context; canceling it kills them immediately. + svcCtxs map[types.ServiceID]context.Context + svcCancels map[types.ServiceID]context.CancelFunc +} + +// NewRouter creates a new SNI-based connection router. +func NewRouter(logger *log.Logger, dialResolve DialResolver, addr net.Addr) *Router { + httpCh := make(chan net.Conn, httpChannelBuffer) + return &Router{ + logger: logger, + httpCh: httpCh, + httpListener: newChanListener(httpCh, addr), + routes: make(map[SNIHost][]Route), + dialResolve: dialResolve, + relaySem: make(chan struct{}, DefaultMaxRelayConns), + svcCtxs: make(map[types.ServiceID]context.Context), + svcCancels: make(map[types.ServiceID]context.CancelFunc), + } +} + +// NewPortRouter creates a Router for a dedicated port without an HTTP +// channel. Connections that don't match any SNI route fall through to +// the fallback relay (if set) or are closed. +func NewPortRouter(logger *log.Logger, dialResolve DialResolver) *Router { + return &Router{ + logger: logger, + routes: make(map[SNIHost][]Route), + dialResolve: dialResolve, + relaySem: make(chan struct{}, DefaultMaxRelayConns), + svcCtxs: make(map[types.ServiceID]context.Context), + svcCancels: make(map[types.ServiceID]context.CancelFunc), + } +} + +// HTTPListener returns a net.Listener that yields connections routed +// to the HTTP handler. Use this with http.Server.ServeTLS. +func (r *Router) HTTPListener() net.Listener { + return r.httpListener +} + +// AddRoute registers an SNI route. Multiple routes for the same host are +// stored and resolved by priority at lookup time (HTTP > TCP). +// Empty host is ignored to prevent conflicts with ECH/ESNI fallback. +func (r *Router) AddRoute(host SNIHost, route Route) { + if host == "" { + return + } + + r.mu.Lock() + defer r.mu.Unlock() + + routes := r.routes[host] + for i, existing := range routes { + if existing.ServiceID == route.ServiceID { + r.cancelServiceLocked(route.ServiceID) + routes[i] = route + return + } + } + r.routes[host] = append(routes, route) +} + +// RemoveRoute removes the route for the given host and service ID. +// Active relay connections for the service are closed immediately. +// If other routes remain for the host, they are preserved. +func (r *Router) RemoveRoute(host SNIHost, svcID types.ServiceID) { + r.mu.Lock() + defer r.mu.Unlock() + + r.routes[host] = slices.DeleteFunc(r.routes[host], func(route Route) bool { + return route.ServiceID == svcID + }) + if len(r.routes[host]) == 0 { + delete(r.routes, host) + } + r.cancelServiceLocked(svcID) +} + +// SetFallback registers a catch-all route for connections that don't +// match any SNI route. On a port router this handles plain TCP relay; +// on the main router it takes priority over the HTTP channel. +func (r *Router) SetFallback(route Route) { + r.mu.Lock() + defer r.mu.Unlock() + r.fallback = &route +} + +// RemoveFallback clears the catch-all fallback route and closes any +// active relay connections for the given service. +func (r *Router) RemoveFallback(svcID types.ServiceID) { + r.mu.Lock() + defer r.mu.Unlock() + r.fallback = nil + r.cancelServiceLocked(svcID) +} + +// SetObserver sets the relay lifecycle observer. Must be called before Serve. +func (r *Router) SetObserver(obs RelayObserver) { + r.mu.Lock() + defer r.mu.Unlock() + r.observer = obs +} + +// SetAccessLogger sets the L4 access logger. Must be called before Serve. +func (r *Router) SetAccessLogger(l l4Logger) { + r.mu.Lock() + defer r.mu.Unlock() + r.accessLog = l +} + +// getObserver returns the current relay observer under the read lock. +func (r *Router) getObserver() RelayObserver { + r.mu.RLock() + defer r.mu.RUnlock() + return r.observer +} + +// IsEmpty returns true when the router has no SNI routes and no fallback. +func (r *Router) IsEmpty() bool { + r.mu.RLock() + defer r.mu.RUnlock() + return len(r.routes) == 0 && r.fallback == nil +} + +// Serve accepts connections from ln and routes them based on SNI. +// It blocks until ctx is canceled or ln is closed, then drains +// active relay connections up to DefaultDrainTimeout. +func (r *Router) Serve(ctx context.Context, ln net.Listener) error { + done := make(chan struct{}) + defer close(done) + + go func() { + select { + case <-ctx.Done(): + _ = ln.Close() + if r.httpListener != nil { + r.httpListener.Close() + } + case <-done: + } + }() + + for { + conn, err := ln.Accept() + if err != nil { + if ctx.Err() != nil || errors.Is(err, net.ErrClosed) { + if ok := r.Drain(DefaultDrainTimeout); !ok { + r.logger.Warn("timed out waiting for connections to drain") + } + return nil + } + r.logger.Debugf("SNI router accept: %v", err) + continue + } + r.activeConns.Add(1) + go func() { + defer r.activeConns.Done() + r.handleConn(ctx, conn) + }() + } +} + +// handleConn peeks at the TLS ClientHello and routes the connection. +func (r *Router) handleConn(ctx context.Context, conn net.Conn) { + // Fast path: when no SNI routes and no HTTP channel exist (pure TCP + // fallback port), skip the TLS peek entirely to avoid read errors on + // non-TLS connections and reduce latency. + if r.isFallbackOnly() { + r.handleUnmatched(ctx, conn) + return + } + + if err := conn.SetReadDeadline(time.Now().Add(sniPeekTimeout)); err != nil { + r.logger.Debugf("set SNI peek deadline: %v", err) + _ = conn.Close() + return + } + + sni, wrapped, err := PeekClientHello(conn) + if err != nil { + r.logger.Debugf("SNI peek: %v", err) + if wrapped != nil { + r.handleUnmatched(ctx, wrapped) + } else { + _ = conn.Close() + } + return + } + + if err := wrapped.SetReadDeadline(time.Time{}); err != nil { + r.logger.Debugf("clear SNI peek deadline: %v", err) + _ = wrapped.Close() + return + } + + host := SNIHost(sni) + route, ok := r.lookupRoute(host) + if !ok { + r.handleUnmatched(ctx, wrapped) + return + } + + if route.Type == RouteHTTP { + r.sendToHTTP(wrapped) + return + } + + if err := r.relayTCP(ctx, wrapped, host, route); err != nil { + r.logger.WithFields(log.Fields{ + "sni": host, + "service_id": route.ServiceID, + "target": route.Target, + }).Warnf("TCP relay: %v", err) + _ = wrapped.Close() + } +} + +// isFallbackOnly returns true when the router has no SNI routes and no HTTP +// channel, meaning all connections should go directly to the fallback relay. +func (r *Router) isFallbackOnly() bool { + r.mu.RLock() + defer r.mu.RUnlock() + return len(r.routes) == 0 && r.httpCh == nil +} + +// handleUnmatched routes a connection that didn't match any SNI route. +// This includes ECH/ESNI connections where the cleartext SNI is empty. +// It tries the fallback relay first, then the HTTP channel, and closes +// the connection if neither is available. +func (r *Router) handleUnmatched(ctx context.Context, conn net.Conn) { + r.mu.RLock() + fb := r.fallback + r.mu.RUnlock() + + if fb != nil { + if err := r.relayTCP(ctx, conn, SNIHost("fallback"), *fb); err != nil { + r.logger.WithFields(log.Fields{ + "service_id": fb.ServiceID, + "target": fb.Target, + }).Warnf("TCP relay (fallback): %v", err) + _ = conn.Close() + } + return + } + r.sendToHTTP(conn) +} + +// lookupRoute returns the highest-priority route for the given SNI host. +// HTTP routes take precedence over TCP routes. +func (r *Router) lookupRoute(host SNIHost) (Route, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + routes, ok := r.routes[host] + if !ok || len(routes) == 0 { + return Route{}, false + } + best := routes[0] + for _, route := range routes[1:] { + if route.Type < best.Type { + best = route + } + } + return best, true +} + +// sendToHTTP feeds the connection to the HTTP handler via the channel. +// If no HTTP channel is configured (port router), the router is +// draining, or the channel is full, the connection is closed. +func (r *Router) sendToHTTP(conn net.Conn) { + if r.httpCh == nil { + _ = conn.Close() + return + } + + r.mu.RLock() + draining := r.draining + r.mu.RUnlock() + + if draining { + _ = conn.Close() + return + } + + select { + case r.httpCh <- conn: + default: + r.logger.Warnf("HTTP channel full, dropping connection from %s", conn.RemoteAddr()) + _ = conn.Close() + } +} + +// Drain prevents new relay connections from starting and waits for all +// in-flight connection handlers and active relays to finish, up to the +// given timeout. Returns true if all completed, false on timeout. +func (r *Router) Drain(timeout time.Duration) bool { + r.mu.Lock() + r.draining = true + if r.drainDone == nil { + done := make(chan struct{}) + go func() { + r.activeConns.Wait() + r.activeRelays.Wait() + close(done) + }() + r.drainDone = done + } + done := r.drainDone + r.mu.Unlock() + + select { + case <-done: + return true + case <-time.After(timeout): + return false + } +} + +// cancelServiceLocked cancels and removes the context for the given service, +// closing all its active relay connections. Must be called with mu held. +func (r *Router) cancelServiceLocked(svcID types.ServiceID) { + if cancel, ok := r.svcCancels[svcID]; ok { + cancel() + delete(r.svcCtxs, svcID) + delete(r.svcCancels, svcID) + } +} + +// relayTCP sets up and runs a bidirectional TCP relay. +// The caller owns conn and must close it if this method returns an error. +// On success (nil error), both conn and backend are closed by the relay. +func (r *Router) relayTCP(ctx context.Context, conn net.Conn, sni SNIHost, route Route) error { + svcCtx, err := r.acquireRelay(ctx, route) + if err != nil { + return err + } + defer func() { + <-r.relaySem + r.activeRelays.Done() + }() + + backend, err := r.dialBackend(svcCtx, route) + if err != nil { + obs := r.getObserver() + if obs != nil { + obs.TCPRelayDialError(route.AccountID) + } + return err + } + + if route.ProxyProtocol { + if err := writeProxyProtoV2(conn, backend); err != nil { + _ = backend.Close() + return fmt.Errorf("write PROXY protocol header: %w", err) + } + } + + obs := r.getObserver() + if obs != nil { + obs.TCPRelayStarted(route.AccountID) + } + + entry := r.logger.WithFields(log.Fields{ + "sni": sni, + "service_id": route.ServiceID, + "target": route.Target, + }) + entry.Debug("TCP relay started") + + start := time.Now() + s2d, d2s := Relay(svcCtx, entry, conn, backend, DefaultIdleTimeout) + elapsed := time.Since(start) + + if obs != nil { + obs.TCPRelayEnded(route.AccountID, elapsed, s2d, d2s) + } + entry.Debugf("TCP relay ended (client→backend: %d bytes, backend→client: %d bytes)", s2d, d2s) + + r.logL4Entry(route, conn, elapsed, s2d, d2s) + return nil +} + +// acquireRelay checks draining state, increments activeRelays, and acquires +// a semaphore slot. Returns the per-service context on success. +// The caller must release the semaphore and call activeRelays.Done() when done. +func (r *Router) acquireRelay(ctx context.Context, route Route) (context.Context, error) { + r.mu.Lock() + if r.draining { + r.mu.Unlock() + return nil, errors.New("router is draining") + } + r.activeRelays.Add(1) + svcCtx := r.getOrCreateServiceCtxLocked(ctx, route.ServiceID) + r.mu.Unlock() + + select { + case r.relaySem <- struct{}{}: + return svcCtx, nil + default: + r.activeRelays.Done() + obs := r.getObserver() + if obs != nil { + obs.TCPRelayRejected(route.AccountID) + } + return nil, errors.New("TCP relay connection limit reached") + } +} + +// dialBackend resolves the dialer for the route's account and dials the backend. +func (r *Router) dialBackend(svcCtx context.Context, route Route) (net.Conn, error) { + dialFn, err := r.dialResolve(route.AccountID) + if err != nil { + return nil, fmt.Errorf("resolve dialer: %w", err) + } + + dialTimeout := route.DialTimeout + if dialTimeout <= 0 { + dialTimeout = defaultDialTimeout + } + dialCtx, dialCancel := context.WithTimeout(svcCtx, dialTimeout) + backend, err := dialFn(dialCtx, "tcp", route.Target) + dialCancel() + if err != nil { + return nil, fmt.Errorf("dial backend %s: %w", route.Target, err) + } + return backend, nil +} + +// logL4Entry sends a TCP relay access log entry if an access logger is configured. +func (r *Router) logL4Entry(route Route, conn net.Conn, duration time.Duration, bytesUp, bytesDown int64) { + r.mu.RLock() + al := r.accessLog + r.mu.RUnlock() + + if al == nil { + return + } + + var sourceIP netip.Addr + if remote := conn.RemoteAddr(); remote != nil { + if ap, err := netip.ParseAddrPort(remote.String()); err == nil { + sourceIP = ap.Addr().Unmap() + } + } + + al.LogL4(accesslog.L4Entry{ + AccountID: route.AccountID, + ServiceID: route.ServiceID, + Protocol: route.Protocol, + Host: route.Domain, + SourceIP: sourceIP, + DurationMs: duration.Milliseconds(), + BytesUpload: bytesUp, + BytesDownload: bytesDown, + }) +} + +// getOrCreateServiceCtxLocked returns the context for a service, creating one +// if it doesn't exist yet. The context is a child of the server context. +// Must be called with mu held. +func (r *Router) getOrCreateServiceCtxLocked(parent context.Context, svcID types.ServiceID) context.Context { + if ctx, ok := r.svcCtxs[svcID]; ok { + return ctx + } + ctx, cancel := context.WithCancel(parent) + r.svcCtxs[svcID] = ctx + r.svcCancels[svcID] = cancel + return ctx +} diff --git a/proxy/internal/tcp/router_test.go b/proxy/internal/tcp/router_test.go new file mode 100644 index 000000000..0e2cfe3e1 --- /dev/null +++ b/proxy/internal/tcp/router_test.go @@ -0,0 +1,1670 @@ +package tcp + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "math/big" + "net" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/proxy/internal/types" +) + +func TestRouter_HTTPRouting(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + + router := NewRouter(logger, nil, addr) + router.AddRoute("example.com", Route{Type: RouteHTTP}) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + _ = router.Serve(ctx, ln) + }() + + // Dial in a goroutine. The TLS handshake will block since nothing + // completes it on the HTTP side, but we only care about routing. + go func() { + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + if err != nil { + return + } + // Send a TLS ClientHello manually. + tlsConn := tls.Client(conn, &tls.Config{ + ServerName: "example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + _ = tlsConn.Handshake() + tlsConn.Close() + }() + + // Verify the connection was routed to the HTTP channel. + select { + case conn := <-router.httpCh: + assert.NotNil(t, conn) + conn.Close() + case <-time.After(5 * time.Second): + t.Fatal("no connection received on HTTP channel") + } +} + +func TestRouter_TCPRouting(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + + // Set up a TLS backend that the relay will connect to. + backendCert := generateSelfSignedCert(t) + backendLn, err := tls.Listen("tcp", "127.0.0.1:0", &tls.Config{ + Certificates: []tls.Certificate{backendCert}, + }) + require.NoError(t, err) + defer backendLn.Close() + + backendAddr := backendLn.Addr().String() + + // Accept one connection on the backend, echo data back. + backendReady := make(chan struct{}) + go func() { + close(backendReady) + conn, err := backendLn.Accept() + if err != nil { + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, _ := conn.Read(buf) + _, _ = conn.Write(buf[:n]) + }() + <-backendReady + + dialResolve := func(accountID types.AccountID) (types.DialContextFunc, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + router := NewRouter(logger, dialResolve, addr) + router.AddRoute("tcp.example.com", Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "test-service", + Target: backendAddr, + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + _ = router.Serve(ctx, ln) + }() + + // Connect as a TLS client; the proxy should passthrough to the backend. + clientConn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "tcp.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + defer clientConn.Close() + + testData := []byte("hello through TCP passthrough") + _, err = clientConn.Write(testData) + require.NoError(t, err) + + buf := make([]byte, 1024) + n, err := clientConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, testData, buf[:n], "should receive echoed data through TCP passthrough") +} + +func TestRouter_UnknownSNIGoesToHTTP(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + + router := NewRouter(logger, nil, addr) + // No routes registered. + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + _ = router.Serve(ctx, ln) + }() + + go func() { + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + if err != nil { + return + } + tlsConn := tls.Client(conn, &tls.Config{ + ServerName: "unknown.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + _ = tlsConn.Handshake() + tlsConn.Close() + }() + + select { + case conn := <-router.httpCh: + assert.NotNil(t, conn) + conn.Close() + case <-time.After(5 * time.Second): + t.Fatal("unknown SNI should be routed to HTTP") + } +} + +// TestRouter_NonTLSConnectionDropped verifies that a non-TLS connection +// on the shared port is closed by the router (SNI peek fails to find a +// valid ClientHello, so there is no route match). +func TestRouter_NonTLSConnectionDropped(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + + // Register a TLS passthrough route. Non-TLS should NOT match. + dialResolve := func(accountID types.AccountID) (types.DialContextFunc, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + router := NewRouter(logger, dialResolve, addr) + router.AddRoute("tcp.example.com", Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "test-service", + Target: "127.0.0.1:9999", + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + _ = router.Serve(ctx, ln) + }() + + // Send plain HTTP (non-TLS) data. + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + defer conn.Close() + + _, _ = conn.Write([]byte("GET / HTTP/1.1\r\nHost: tcp.example.com\r\n\r\n")) + + // Non-TLS traffic on a port with RouteTCP goes to the HTTP channel + // because there's no valid SNI to match. Verify it reaches HTTP. + select { + case httpConn := <-router.httpCh: + assert.NotNil(t, httpConn, "non-TLS connection should fall through to HTTP") + httpConn.Close() + case <-time.After(5 * time.Second): + t.Fatal("non-TLS connection was not routed to HTTP") + } +} + +// TestRouter_TLSAndHTTPCoexist verifies that a shared port with both HTTP +// and TLS passthrough routes correctly demuxes based on the SNI hostname. +func TestRouter_TLSAndHTTPCoexist(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + + backendCert := generateSelfSignedCert(t) + backendLn, err := tls.Listen("tcp", "127.0.0.1:0", &tls.Config{ + Certificates: []tls.Certificate{backendCert}, + }) + require.NoError(t, err) + defer backendLn.Close() + + // Backend echoes data. + go func() { + conn, err := backendLn.Accept() + if err != nil { + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, _ := conn.Read(buf) + _, _ = conn.Write(buf[:n]) + }() + + dialResolve := func(accountID types.AccountID) (types.DialContextFunc, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + router := NewRouter(logger, dialResolve, addr) + // HTTP route. + router.AddRoute("app.example.com", Route{Type: RouteHTTP}) + // TLS passthrough route. + router.AddRoute("tcp.example.com", Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "test-service", + Target: backendLn.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + _ = router.Serve(ctx, ln) + }() + + // 1. TLS connection with SNI "tcp.example.com" → TLS passthrough. + tlsConn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "tcp.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + + testData := []byte("passthrough data") + _, err = tlsConn.Write(testData) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := tlsConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, testData, buf[:n], "TLS passthrough should relay data") + tlsConn.Close() + + // 2. TLS connection with SNI "app.example.com" → HTTP handler. + go func() { + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + if err != nil { + return + } + c := tls.Client(conn, &tls.Config{ + ServerName: "app.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + _ = c.Handshake() + c.Close() + }() + + select { + case httpConn := <-router.httpCh: + assert.NotNil(t, httpConn, "HTTP SNI should go to HTTP handler") + httpConn.Close() + case <-time.After(5 * time.Second): + t.Fatal("HTTP-route connection was not delivered to HTTP handler") + } +} + +func TestRouter_AddRemoveRoute(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + router := NewRouter(logger, nil, addr) + + router.AddRoute("a.example.com", Route{Type: RouteHTTP, ServiceID: "svc-a"}) + router.AddRoute("b.example.com", Route{Type: RouteTCP, ServiceID: "svc-b", Target: "10.0.0.1:5432"}) + + route, ok := router.lookupRoute("a.example.com") + assert.True(t, ok) + assert.Equal(t, RouteHTTP, route.Type) + + route, ok = router.lookupRoute("b.example.com") + assert.True(t, ok) + assert.Equal(t, RouteTCP, route.Type) + + router.RemoveRoute("a.example.com", "svc-a") + _, ok = router.lookupRoute("a.example.com") + assert.False(t, ok) +} + +func TestChanListener_AcceptAndClose(t *testing.T) { + ch := make(chan net.Conn, 1) + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + ln := newChanListener(ch, addr) + + assert.Equal(t, addr, ln.Addr()) + + // Send a connection. + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + defer serverConn.Close() + + ch <- serverConn + + conn, err := ln.Accept() + require.NoError(t, err) + assert.Equal(t, serverConn, conn) + + // Close should cause Accept to return error. + require.NoError(t, ln.Close()) + // Double close should be safe. + require.NoError(t, ln.Close()) + + _, err = ln.Accept() + assert.ErrorIs(t, err, net.ErrClosed) +} + +func TestRouter_HTTPPrecedenceGuard(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + router := NewRouter(logger, nil, addr) + + host := SNIHost("app.example.com") + + t.Run("http takes precedence over tcp at lookup", func(t *testing.T) { + router.AddRoute(host, Route{Type: RouteHTTP, ServiceID: "svc-http"}) + router.AddRoute(host, Route{Type: RouteTCP, ServiceID: "svc-tcp", Target: "10.0.0.1:443"}) + + route, ok := router.lookupRoute(host) + require.True(t, ok) + assert.Equal(t, RouteHTTP, route.Type, "HTTP route must take precedence over TCP") + assert.Equal(t, types.ServiceID("svc-http"), route.ServiceID) + + router.RemoveRoute(host, "svc-http") + router.RemoveRoute(host, "svc-tcp") + }) + + t.Run("tcp becomes active when http is removed", func(t *testing.T) { + router.AddRoute(host, Route{Type: RouteHTTP, ServiceID: "svc-http"}) + router.AddRoute(host, Route{Type: RouteTCP, ServiceID: "svc-tcp", Target: "10.0.0.1:443"}) + + router.RemoveRoute(host, "svc-http") + + route, ok := router.lookupRoute(host) + require.True(t, ok) + assert.Equal(t, RouteTCP, route.Type, "TCP should take over after HTTP removal") + assert.Equal(t, types.ServiceID("svc-tcp"), route.ServiceID) + + router.RemoveRoute(host, "svc-tcp") + }) + + t.Run("order of add does not matter", func(t *testing.T) { + router.AddRoute(host, Route{Type: RouteTCP, ServiceID: "svc-tcp", Target: "10.0.0.1:443"}) + router.AddRoute(host, Route{Type: RouteHTTP, ServiceID: "svc-http"}) + + route, ok := router.lookupRoute(host) + require.True(t, ok) + assert.Equal(t, RouteHTTP, route.Type, "HTTP takes precedence regardless of add order") + + router.RemoveRoute(host, "svc-http") + router.RemoveRoute(host, "svc-tcp") + }) + + t.Run("same service id updates in place", func(t *testing.T) { + router.AddRoute(host, Route{Type: RouteTCP, ServiceID: "svc-1", Target: "10.0.0.1:443"}) + router.AddRoute(host, Route{Type: RouteTCP, ServiceID: "svc-1", Target: "10.0.0.2:443"}) + + route, ok := router.lookupRoute(host) + require.True(t, ok) + assert.Equal(t, "10.0.0.2:443", route.Target, "route should be updated in place") + + router.RemoveRoute(host, "svc-1") + _, ok = router.lookupRoute(host) + assert.False(t, ok) + }) + + t.Run("double remove is safe", func(t *testing.T) { + router.AddRoute(host, Route{Type: RouteHTTP, ServiceID: "svc-1"}) + router.RemoveRoute(host, "svc-1") + router.RemoveRoute(host, "svc-1") + + _, ok := router.lookupRoute(host) + assert.False(t, ok, "route should be gone after removal") + }) + + t.Run("remove does not affect other hosts", func(t *testing.T) { + router.AddRoute("a.example.com", Route{Type: RouteHTTP, ServiceID: "svc-a"}) + router.AddRoute("b.example.com", Route{Type: RouteTCP, ServiceID: "svc-b", Target: "10.0.0.2:22"}) + + router.RemoveRoute("a.example.com", "svc-a") + + _, ok := router.lookupRoute(SNIHost("a.example.com")) + assert.False(t, ok) + + route, ok := router.lookupRoute(SNIHost("b.example.com")) + require.True(t, ok) + assert.Equal(t, RouteTCP, route.Type, "removing one host must not affect another") + + router.RemoveRoute("b.example.com", "svc-b") + }) +} + +func TestRouter_SetRemoveFallback(t *testing.T) { + logger := log.StandardLogger() + router := NewPortRouter(logger, nil) + + assert.True(t, router.IsEmpty(), "new port router should be empty") + + router.SetFallback(Route{Type: RouteTCP, ServiceID: "svc-fb", Target: "10.0.0.1:5432"}) + assert.False(t, router.IsEmpty(), "router with fallback should not be empty") + + router.AddRoute("a.example.com", Route{Type: RouteTCP, ServiceID: "svc-a", Target: "10.0.0.2:443"}) + assert.False(t, router.IsEmpty()) + + router.RemoveFallback("svc-fb") + assert.False(t, router.IsEmpty(), "router with SNI route should not be empty") + + router.RemoveRoute("a.example.com", "svc-a") + assert.True(t, router.IsEmpty(), "router with no routes and no fallback should be empty") +} + +func TestPortRouter_FallbackRelaysData(t *testing.T) { + // Backend echo server. + backendLn, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer backendLn.Close() + + go func() { + conn, err := backendLn.Accept() + if err != nil { + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, _ := conn.Read(buf) + _, _ = conn.Write(buf[:n]) + }() + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + router.SetFallback(Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "test-service", + Target: backendLn.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Plain TCP (non-TLS) connection should be relayed via fallback. + // Use exactly 5 bytes. PeekClientHello reads 5 bytes as the TLS + // header, so a single 5-byte write lands as one chunk at the backend. + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + defer conn.Close() + + testData := []byte("hello") + _, err = conn.Write(testData) + require.NoError(t, err) + + buf := make([]byte, 1024) + n, err := conn.Read(buf) + require.NoError(t, err) + assert.Equal(t, testData, buf[:n], "should receive echoed data through fallback relay") +} + +func TestPortRouter_FallbackOnUnknownSNI(t *testing.T) { + // Backend TLS echo server. + backendCert := generateSelfSignedCert(t) + backendLn, err := tls.Listen("tcp", "127.0.0.1:0", &tls.Config{ + Certificates: []tls.Certificate{backendCert}, + }) + require.NoError(t, err) + defer backendLn.Close() + + go func() { + conn, err := backendLn.Accept() + if err != nil { + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, _ := conn.Read(buf) + _, _ = conn.Write(buf[:n]) + }() + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + // Only a fallback, no SNI route for "unknown.example.com". + router.SetFallback(Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "test-service", + Target: backendLn.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // TLS with unknown SNI → fallback relay to TLS backend. + tlsConn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "tcp.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + defer tlsConn.Close() + + testData := []byte("hello through fallback TLS") + _, err = tlsConn.Write(testData) + require.NoError(t, err) + + buf := make([]byte, 1024) + n, err := tlsConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, testData, buf[:n], "unknown SNI should relay through fallback") +} + +func TestPortRouter_SNIWinsOverFallback(t *testing.T) { + // Two backend echo servers: one for SNI match, one for fallback. + sniBacked := startEchoTLS(t) + fbBacked := startEchoTLS(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + router.AddRoute("tcp.example.com", Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "sni-service", + Target: sniBacked.Addr().String(), + }) + router.SetFallback(Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "fb-service", + Target: fbBacked.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // TLS with matching SNI should go to SNI backend, not fallback. + tlsConn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "tcp.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + defer tlsConn.Close() + + testData := []byte("SNI route data") + _, err = tlsConn.Write(testData) + require.NoError(t, err) + + buf := make([]byte, 1024) + n, err := tlsConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, testData, buf[:n], "SNI match should use SNI route, not fallback") +} + +func TestPortRouter_NoFallbackNoHTTP_Closes(t *testing.T) { + logger := log.StandardLogger() + router := NewPortRouter(logger, nil) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + defer conn.Close() + + _, _ = conn.Write([]byte("hello")) + + // Connection should be closed by the router (no fallback, no HTTP). + buf := make([]byte, 1) + _ = conn.SetReadDeadline(time.Now().Add(3 * time.Second)) + _, err = conn.Read(buf) + assert.Error(t, err, "connection should be closed when no fallback and no HTTP channel") +} + +func TestRouter_FallbackAndHTTPCoexist(t *testing.T) { + // Fallback backend echo server (plain TCP). + fbBackend, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer fbBackend.Close() + + go func() { + conn, err := fbBackend.Accept() + if err != nil { + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, _ := conn.Read(buf) + _, _ = conn.Write(buf[:n]) + }() + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + router := NewRouter(logger, dialResolve, addr) + + // HTTP route for known SNI. + router.AddRoute("app.example.com", Route{Type: RouteHTTP}) + // Fallback for non-TLS / unknown SNI. + router.SetFallback(Route{ + Type: RouteTCP, + AccountID: "test-account", + ServiceID: "fb-service", + Target: fbBackend.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // 1. TLS with known HTTP SNI → should go to HTTP channel. + go func() { + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + if err != nil { + return + } + c := tls.Client(conn, &tls.Config{ + ServerName: "app.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + _ = c.Handshake() + c.Close() + }() + + select { + case httpConn := <-router.httpCh: + assert.NotNil(t, httpConn, "known HTTP SNI should go to HTTP channel") + httpConn.Close() + case <-time.After(5 * time.Second): + t.Fatal("HTTP-route connection was not delivered to HTTP handler") + } + + // 2. Plain TCP (non-TLS) → should go to fallback, not HTTP. + // Use exactly 5 bytes to match PeekClientHello header size. + conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + defer conn.Close() + + testData := []byte("plain") + _, err = conn.Write(testData) + require.NoError(t, err) + + buf := make([]byte, 1024) + n, err := conn.Read(buf) + require.NoError(t, err) + assert.Equal(t, testData, buf[:n], "non-TLS should be relayed via fallback, not HTTP") +} + +// startEchoTLS starts a TLS echo server and returns the listener. +func startEchoTLS(t *testing.T) net.Listener { + t.Helper() + + cert := generateSelfSignedCert(t) + ln, err := tls.Listen("tcp", "127.0.0.1:0", &tls.Config{ + Certificates: []tls.Certificate{cert}, + }) + require.NoError(t, err) + t.Cleanup(func() { ln.Close() }) + + go func() { + conn, err := ln.Accept() + if err != nil { + return + } + defer conn.Close() + buf := make([]byte, 1024) + for { + n, err := conn.Read(buf) + if err != nil { + return + } + if _, err := conn.Write(buf[:n]); err != nil { + return + } + } + }() + + return ln +} + +func generateSelfSignedCert(t *testing.T) tls.Certificate { + t.Helper() + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + template := &x509.Certificate{ + SerialNumber: big.NewInt(1), + DNSNames: []string{"tcp.example.com"}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) + require.NoError(t, err) + + return tls.Certificate{ + Certificate: [][]byte{certDER}, + PrivateKey: key, + } +} + +func TestRouter_DrainWaitsForRelays(t *testing.T) { + logger := log.StandardLogger() + backendLn, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer backendLn.Close() + + // Accept connections: echo first message, then hold open until told to close. + closeBackend := make(chan struct{}) + go func() { + for { + conn, err := backendLn.Accept() + if err != nil { + return + } + go func(c net.Conn) { + defer c.Close() + buf := make([]byte, 1024) + n, _ := c.Read(buf) + _, _ = c.Write(buf[:n]) + <-closeBackend + }(conn) + } + }() + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return (&net.Dialer{}).DialContext, nil + } + + router := NewPortRouter(logger, dialResolve) + router.SetFallback(Route{ + Type: RouteTCP, + Target: backendLn.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + serveDone := make(chan struct{}) + go func() { + _ = router.Serve(ctx, ln) + close(serveDone) + }() + + // Open a relay connection (non-TLS, hits fallback). + conn, err := net.Dial("tcp", ln.Addr().String()) + require.NoError(t, err) + _, _ = conn.Write([]byte("hello")) + + // Wait for the echo to confirm the relay is fully established. + buf := make([]byte, 16) + _ = conn.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err := conn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "hello", string(buf[:n])) + _ = conn.SetReadDeadline(time.Time{}) + + // Drain with a short timeout should fail because the relay is still active. + assert.False(t, router.Drain(50*time.Millisecond), "drain should timeout with active relay") + + // Close backend connections so relays finish. + close(closeBackend) + _ = conn.Close() + + // Drain should now complete quickly. + assert.True(t, router.Drain(2*time.Second), "drain should succeed after relays end") + + cancel() + <-serveDone +} + +func TestRouter_DrainEmptyReturnsImmediately(t *testing.T) { + logger := log.StandardLogger() + router := NewPortRouter(logger, nil) + + start := time.Now() + ok := router.Drain(5 * time.Second) + elapsed := time.Since(start) + + assert.True(t, ok) + assert.Less(t, elapsed, 100*time.Millisecond, "drain with no relays should return immediately") +} + +// TestRemoveRoute_KillsActiveRelays verifies that removing a route +// immediately kills active relay connections for that service. +func TestRemoveRoute_KillsActiveRelays(t *testing.T) { + backendLn, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer backendLn.Close() + + // Backend echoes first message, then holds connection open. + go func() { + for { + conn, err := backendLn.Accept() + if err != nil { + return + } + go func(c net.Conn) { + defer c.Close() + buf := make([]byte, 1024) + n, _ := c.Read(buf) + _, _ = c.Write(buf[:n]) + // Hold the connection open. + for { + if _, err := c.Read(buf); err != nil { + return + } + } + }(conn) + } + }() + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return (&net.Dialer{}).DialContext, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + router.SetFallback(Route{ + Type: RouteTCP, + ServiceID: "svc-1", + Target: backendLn.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Establish a relay connection. + conn, err := net.Dial("tcp", ln.Addr().String()) + require.NoError(t, err) + defer conn.Close() + _, err = conn.Write([]byte("hello")) + require.NoError(t, err) + + // Wait for echo to confirm relay is established. + buf := make([]byte, 16) + _ = conn.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err := conn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "hello", string(buf[:n])) + _ = conn.SetReadDeadline(time.Time{}) + + // Remove the fallback: should kill the active relay. + router.RemoveFallback("svc-1") + + // The client connection should see an error (server closed). + _ = conn.SetReadDeadline(time.Now().Add(2 * time.Second)) + _, err = conn.Read(buf) + assert.Error(t, err, "connection should be killed after service removal") +} + +// TestRemoveRoute_KillsSNIRelays verifies that removing an SNI route +// kills its active relays without affecting other services. +func TestRemoveRoute_KillsSNIRelays(t *testing.T) { + backend := startEchoTLS(t) + defer backend.Close() + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return (&net.Dialer{}).DialContext, nil + } + + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + router := NewRouter(logger, dialResolve, addr) + router.AddRoute("tls.example.com", Route{ + Type: RouteTCP, + ServiceID: "svc-tls", + Target: backend.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Establish a TLS relay. + tlsConn, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: "tls.example.com", InsecureSkipVerify: true}, + ) + require.NoError(t, err) + defer tlsConn.Close() + + _, err = tlsConn.Write([]byte("ping")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := tlsConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "ping", string(buf[:n])) + + // Remove the route: active relay should die. + router.RemoveRoute("tls.example.com", "svc-tls") + + _ = tlsConn.SetReadDeadline(time.Now().Add(2 * time.Second)) + _, err = tlsConn.Read(buf) + assert.Error(t, err, "TLS relay should be killed after route removal") +} + +// TestPortRouter_SNIAndTCPFallbackCoexist verifies that a single port can +// serve both SNI-routed TLS passthrough and plain TCP fallback simultaneously. +func TestPortRouter_SNIAndTCPFallbackCoexist(t *testing.T) { + sniBackend := startEchoTLS(t) + fbBackend := startEchoPlain(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + + // SNI route for a specific domain. + router.AddRoute("tcp.example.com", Route{ + Type: RouteTCP, + AccountID: "acct-1", + ServiceID: "svc-sni", + Target: sniBackend.Addr().String(), + }) + // TCP fallback for everything else. + router.SetFallback(Route{ + Type: RouteTCP, + AccountID: "acct-2", + ServiceID: "svc-fb", + Target: fbBackend.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // 1. TLS with matching SNI → goes to SNI backend. + tlsConn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "tcp.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + + _, err = tlsConn.Write([]byte("sni-data")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := tlsConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "sni-data", string(buf[:n]), "SNI match → SNI backend") + tlsConn.Close() + + // 2. Plain TCP (no TLS) → goes to fallback. + tcpConn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + + _, err = tcpConn.Write([]byte("plain")) + require.NoError(t, err) + n, err = tcpConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "plain", string(buf[:n]), "plain TCP → fallback backend") + tcpConn.Close() + + // 3. TLS with unknown SNI → also goes to fallback. + unknownBackend := startEchoTLS(t) + router.SetFallback(Route{ + Type: RouteTCP, + AccountID: "acct-2", + ServiceID: "svc-fb", + Target: unknownBackend.Addr().String(), + }) + + unknownTLS, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "unknown.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + + _, err = unknownTLS.Write([]byte("unknown-sni")) + require.NoError(t, err) + n, err = unknownTLS.Read(buf) + require.NoError(t, err) + assert.Equal(t, "unknown-sni", string(buf[:n]), "unknown SNI → fallback backend") + unknownTLS.Close() +} + +// TestPortRouter_UpdateRouteSwapsSNI verifies that updating a route +// (remove + add with different target) correctly routes to the new backend. +func TestPortRouter_UpdateRouteSwapsSNI(t *testing.T) { + backend1 := startEchoTLS(t) + backend2 := startEchoTLS(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Initial route → backend1. + router.AddRoute("db.example.com", Route{ + Type: RouteTCP, + ServiceID: "svc-db", + Target: backend1.Addr().String(), + }) + + conn1, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "db.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + _, err = conn1.Write([]byte("v1")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := conn1.Read(buf) + require.NoError(t, err) + assert.Equal(t, "v1", string(buf[:n])) + conn1.Close() + + // Update: remove old route, add new → backend2. + router.RemoveRoute("db.example.com", "svc-db") + router.AddRoute("db.example.com", Route{ + Type: RouteTCP, + ServiceID: "svc-db", + Target: backend2.Addr().String(), + }) + + conn2, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "db.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + _, err = conn2.Write([]byte("v2")) + require.NoError(t, err) + n, err = conn2.Read(buf) + require.NoError(t, err) + assert.Equal(t, "v2", string(buf[:n])) + conn2.Close() +} + +// TestPortRouter_RemoveSNIFallsThrough verifies that after removing an +// SNI route, connections for that domain fall through to the fallback. +func TestPortRouter_RemoveSNIFallsThrough(t *testing.T) { + sniBackend := startEchoTLS(t) + fbBackend := startEchoTLS(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + router.AddRoute("db.example.com", Route{ + Type: RouteTCP, + ServiceID: "svc-db", + Target: sniBackend.Addr().String(), + }) + router.SetFallback(Route{ + Type: RouteTCP, + Target: fbBackend.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Before removal: SNI matches → sniBackend. + conn1, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "db.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + _, err = conn1.Write([]byte("before")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := conn1.Read(buf) + require.NoError(t, err) + assert.Equal(t, "before", string(buf[:n])) + conn1.Close() + + // Remove SNI route. Should fall through to fallback. + router.RemoveRoute("db.example.com", "svc-db") + + conn2, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ + ServerName: "db.example.com", + InsecureSkipVerify: true, //nolint:gosec + }) + require.NoError(t, err) + _, err = conn2.Write([]byte("after")) + require.NoError(t, err) + n, err = conn2.Read(buf) + require.NoError(t, err) + assert.Equal(t, "after", string(buf[:n]), "after removal, should reach fallback") + conn2.Close() +} + +// TestPortRouter_RemoveFallbackCloses verifies that after removing the +// fallback, non-matching connections are closed. +func TestPortRouter_RemoveFallbackCloses(t *testing.T) { + fbBackend := startEchoPlain(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return func(_ context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + }, nil + } + + logger := log.StandardLogger() + router := NewPortRouter(logger, dialResolve) + router.SetFallback(Route{ + Type: RouteTCP, + ServiceID: "svc-fb", + Target: fbBackend.Addr().String(), + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // With fallback: plain TCP works. + conn1, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + _, err = conn1.Write([]byte("hello")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := conn1.Read(buf) + require.NoError(t, err) + assert.Equal(t, "hello", string(buf[:n])) + conn1.Close() + + // Remove fallback. + router.RemoveFallback("svc-fb") + + // Without fallback on a port router (no HTTP channel): connection should be closed. + conn2, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + defer conn2.Close() + _, _ = conn2.Write([]byte("bye")) + _ = conn2.SetReadDeadline(time.Now().Add(3 * time.Second)) + _, err = conn2.Read(buf) + assert.Error(t, err, "without fallback, connection should be closed") +} + +// TestPortRouter_HTTPToTLSTransition verifies that switching a service from +// HTTP-only to TLS-only via remove+add doesn't orphan the old HTTP route. +func TestPortRouter_HTTPToTLSTransition(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + tlsBackend := startEchoTLS(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return (&net.Dialer{}).DialContext, nil + } + + router := NewRouter(logger, dialResolve, addr) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Phase 1: HTTP-only. SNI connections go to HTTP channel. + router.AddRoute("app.example.com", Route{Type: RouteHTTP, AccountID: "acct-1", ServiceID: "svc-1"}) + + httpConn := router.HTTPListener() + connDone := make(chan struct{}) + go func() { + defer close(connDone) + c, err := httpConn.Accept() + if err == nil { + c.Close() + } + }() + tlsConn, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: "app.example.com", InsecureSkipVerify: true}, + ) + if err == nil { + tlsConn.Close() + } + select { + case <-connDone: + case <-time.After(2 * time.Second): + t.Fatal("HTTP listener did not receive connection for HTTP-only route") + } + + // Phase 2: Simulate update to TLS-only (removeMapping + addMapping). + router.RemoveRoute("app.example.com", "svc-1") + router.AddRoute("app.example.com", Route{ + Type: RouteTCP, + AccountID: "acct-1", + ServiceID: "svc-1", + Target: tlsBackend.Addr().String(), + }) + + tlsConn2, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: "app.example.com", InsecureSkipVerify: true}, + ) + require.NoError(t, err, "TLS connection should succeed after HTTP→TLS transition") + defer tlsConn2.Close() + + _, err = tlsConn2.Write([]byte("hello-tls")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := tlsConn2.Read(buf) + require.NoError(t, err) + assert.Equal(t, "hello-tls", string(buf[:n]), "data should relay to TLS backend") +} + +// TestPortRouter_TLSToHTTPTransition verifies that switching a service from +// TLS-only to HTTP-only via remove+add doesn't orphan the old TLS route. +func TestPortRouter_TLSToHTTPTransition(t *testing.T) { + logger := log.StandardLogger() + addr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 443} + tlsBackend := startEchoTLS(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return (&net.Dialer{}).DialContext, nil + } + + router := NewRouter(logger, dialResolve, addr) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Phase 1: TLS-only. Route relays to backend. + router.AddRoute("app.example.com", Route{ + Type: RouteTCP, + AccountID: "acct-1", + ServiceID: "svc-1", + Target: tlsBackend.Addr().String(), + }) + + tlsConn, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: "app.example.com", InsecureSkipVerify: true}, + ) + require.NoError(t, err, "TLS relay should work before transition") + _, err = tlsConn.Write([]byte("tls-data")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := tlsConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "tls-data", string(buf[:n])) + tlsConn.Close() + + // Phase 2: Simulate update to HTTP-only (removeMapping + addMapping). + router.RemoveRoute("app.example.com", "svc-1") + router.AddRoute("app.example.com", Route{Type: RouteHTTP, AccountID: "acct-1", ServiceID: "svc-1"}) + + // TLS connection should now go to the HTTP listener, NOT to the old TLS backend. + httpConn := router.HTTPListener() + connDone := make(chan struct{}) + go func() { + defer close(connDone) + c, err := httpConn.Accept() + if err == nil { + c.Close() + } + }() + tlsConn2, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: "app.example.com", InsecureSkipVerify: true}, + ) + if err == nil { + tlsConn2.Close() + } + select { + case <-connDone: + case <-time.After(2 * time.Second): + t.Fatal("HTTP listener should receive connection after TLS→HTTP transition") + } +} + +// TestPortRouter_MultiDomainSamePort verifies that two TLS services sharing +// the same port router are independently routable and removable. +func TestPortRouter_MultiDomainSamePort(t *testing.T) { + logger := log.StandardLogger() + backend1 := startEchoTLSMulti(t) + backend2 := startEchoTLSMulti(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return (&net.Dialer{}).DialContext, nil + } + + router := NewPortRouter(logger, dialResolve) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + router.AddRoute("svc1.example.com", Route{Type: RouteTCP, AccountID: "acct-1", ServiceID: "svc-1", Target: backend1.Addr().String()}) + router.AddRoute("svc2.example.com", Route{Type: RouteTCP, AccountID: "acct-1", ServiceID: "svc-2", Target: backend2.Addr().String()}) + assert.False(t, router.IsEmpty()) + + // Both domains route independently. + for _, tc := range []struct { + sni string + data string + }{ + {"svc1.example.com", "hello-svc1"}, + {"svc2.example.com", "hello-svc2"}, + } { + conn, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: tc.sni, InsecureSkipVerify: true}, + ) + require.NoError(t, err, "dial %s", tc.sni) + _, err = conn.Write([]byte(tc.data)) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := conn.Read(buf) + require.NoError(t, err) + assert.Equal(t, tc.data, string(buf[:n])) + conn.Close() + } + + // Remove svc1. Router should NOT be empty (svc2 still present). + router.RemoveRoute("svc1.example.com", "svc-1") + assert.False(t, router.IsEmpty(), "router should not be empty with one route remaining") + + // svc2 still works. + conn2, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: "svc2.example.com", InsecureSkipVerify: true}, + ) + require.NoError(t, err) + _, err = conn2.Write([]byte("still-alive")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := conn2.Read(buf) + require.NoError(t, err) + assert.Equal(t, "still-alive", string(buf[:n])) + conn2.Close() + + // Remove svc2. Router is now empty. + router.RemoveRoute("svc2.example.com", "svc-2") + assert.True(t, router.IsEmpty(), "router should be empty after removing all routes") +} + +// TestPortRouter_SNIAndFallbackLifecycle verifies the full lifecycle of SNI +// routes and TCP fallback coexisting on the same port router, including the +// ordering of add/remove operations. +func TestPortRouter_SNIAndFallbackLifecycle(t *testing.T) { + logger := log.StandardLogger() + sniBackend := startEchoTLS(t) + fallbackBackend := startEchoPlain(t) + + dialResolve := func(_ types.AccountID) (types.DialContextFunc, error) { + return (&net.Dialer{}).DialContext, nil + } + + router := NewPortRouter(logger, dialResolve) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { _ = router.Serve(ctx, ln) }() + + // Step 1: Add fallback first (port mapping), then SNI route (TLS service). + router.SetFallback(Route{Type: RouteTCP, AccountID: "acct-1", ServiceID: "pm-1", Target: fallbackBackend.Addr().String()}) + router.AddRoute("tls.example.com", Route{Type: RouteTCP, AccountID: "acct-1", ServiceID: "svc-1", Target: sniBackend.Addr().String()}) + assert.False(t, router.IsEmpty()) + + // SNI traffic goes to TLS backend. + tlsConn, err := tls.DialWithDialer( + &net.Dialer{Timeout: 2 * time.Second}, + "tcp", ln.Addr().String(), + &tls.Config{ServerName: "tls.example.com", InsecureSkipVerify: true}, + ) + require.NoError(t, err) + _, err = tlsConn.Write([]byte("sni-traffic")) + require.NoError(t, err) + buf := make([]byte, 1024) + n, err := tlsConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "sni-traffic", string(buf[:n])) + tlsConn.Close() + + // Plain TCP goes to fallback. + plainConn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + _, err = plainConn.Write([]byte("plain")) + require.NoError(t, err) + n, err = plainConn.Read(buf) + require.NoError(t, err) + assert.Equal(t, "plain", string(buf[:n])) + plainConn.Close() + + // Step 2: Remove SNI route. Fallback still works, router not empty. + router.RemoveRoute("tls.example.com", "svc-1") + assert.False(t, router.IsEmpty(), "fallback still present") + + plainConn2, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) + require.NoError(t, err) + // Must send >= 5 bytes so the SNI peek completes immediately + // without waiting for the 5-second peek timeout. + _, err = plainConn2.Write([]byte("after")) + require.NoError(t, err) + n, err = plainConn2.Read(buf) + require.NoError(t, err) + assert.Equal(t, "after", string(buf[:n])) + plainConn2.Close() + + // Step 3: Remove fallback. Router is now empty. + router.RemoveFallback("pm-1") + assert.True(t, router.IsEmpty()) +} + +// TestPortRouter_IsEmptyTransitions verifies IsEmpty reflects correct state +// through all add/remove operations. +func TestPortRouter_IsEmptyTransitions(t *testing.T) { + logger := log.StandardLogger() + router := NewPortRouter(logger, nil) + + assert.True(t, router.IsEmpty(), "new router") + + router.AddRoute("a.com", Route{Type: RouteTCP, ServiceID: "svc-a"}) + assert.False(t, router.IsEmpty(), "after adding route") + + router.SetFallback(Route{Type: RouteTCP, ServiceID: "svc-fb1"}) + assert.False(t, router.IsEmpty(), "route + fallback") + + router.RemoveRoute("a.com", "svc-a") + assert.False(t, router.IsEmpty(), "fallback only") + + router.RemoveFallback("svc-fb1") + assert.True(t, router.IsEmpty(), "all removed") + + // Reverse order: fallback first, then route. + router.SetFallback(Route{Type: RouteTCP, ServiceID: "svc-fb2"}) + assert.False(t, router.IsEmpty()) + + router.AddRoute("b.com", Route{Type: RouteTCP, ServiceID: "svc-b"}) + assert.False(t, router.IsEmpty()) + + router.RemoveFallback("svc-fb2") + assert.False(t, router.IsEmpty(), "route still present") + + router.RemoveRoute("b.com", "svc-b") + assert.True(t, router.IsEmpty(), "fully empty again") +} + +// startEchoTLSMulti starts a TLS echo server that accepts multiple connections. +func startEchoTLSMulti(t *testing.T) net.Listener { + t.Helper() + + cert := generateSelfSignedCert(t) + ln, err := tls.Listen("tcp", "127.0.0.1:0", &tls.Config{ + Certificates: []tls.Certificate{cert}, + }) + require.NoError(t, err) + t.Cleanup(func() { ln.Close() }) + + go func() { + for { + conn, err := ln.Accept() + if err != nil { + return + } + go func(c net.Conn) { + defer c.Close() + buf := make([]byte, 1024) + n, _ := c.Read(buf) + _, _ = c.Write(buf[:n]) + }(conn) + } + }() + + return ln +} + +// startEchoPlain starts a plain TCP echo server that reads until newline +// or connection close, then echoes the received data. +func startEchoPlain(t *testing.T) net.Listener { + t.Helper() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + t.Cleanup(func() { ln.Close() }) + + go func() { + for { + conn, err := ln.Accept() + if err != nil { + return + } + go func(c net.Conn) { + defer c.Close() + // Set a read deadline so we don't block forever waiting for more data. + _ = c.SetReadDeadline(time.Now().Add(2 * time.Second)) + buf := make([]byte, 1024) + n, _ := c.Read(buf) + _, _ = c.Write(buf[:n]) + }(conn) + } + }() + + return ln +} diff --git a/proxy/internal/tcp/snipeek.go b/proxy/internal/tcp/snipeek.go new file mode 100644 index 000000000..25ab8e5ef --- /dev/null +++ b/proxy/internal/tcp/snipeek.go @@ -0,0 +1,191 @@ +package tcp + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" +) + +const ( + // TLS record header is 5 bytes: ContentType(1) + Version(2) + Length(2). + tlsRecordHeaderLen = 5 + // TLS handshake type for ClientHello. + handshakeTypeClientHello = 1 + // TLS ContentType for handshake messages. + contentTypeHandshake = 22 + // SNI extension type (RFC 6066). + extensionServerName = 0 + // SNI host name type. + sniHostNameType = 0 + // maxClientHelloLen caps the ClientHello size we're willing to buffer. + maxClientHelloLen = 16384 + // maxSNILen is the maximum valid DNS hostname length per RFC 1035. + maxSNILen = 253 +) + +// PeekClientHello reads the TLS ClientHello from conn, extracts the SNI +// server name, and returns a wrapped connection that replays the peeked +// bytes transparently. If the data is not a valid TLS ClientHello or +// contains no SNI extension, sni is empty and err is nil. +// +// ECH/ESNI: When the client uses Encrypted Client Hello (TLS 1.3), the +// real server name is encrypted inside the encrypted_client_hello +// extension. This parser only reads the cleartext server_name extension +// (type 0x0000), so ECH connections return sni="" and are routed through +// the fallback path (or HTTP channel), which is the correct behavior +// for a transparent proxy that does not terminate TLS. +func PeekClientHello(conn net.Conn) (sni string, wrapped net.Conn, err error) { + // Read the 5-byte TLS record header into a small stack-friendly buffer. + var header [tlsRecordHeaderLen]byte + if _, err := io.ReadFull(conn, header[:]); err != nil { + return "", nil, fmt.Errorf("read TLS record header: %w", err) + } + + if header[0] != contentTypeHandshake { + return "", newPeekedConn(conn, header[:]), nil + } + + recordLen := int(binary.BigEndian.Uint16(header[3:5])) + if recordLen == 0 || recordLen > maxClientHelloLen { + return "", newPeekedConn(conn, header[:]), nil + } + + // Single allocation for header + payload. The peekedConn takes + // ownership of this buffer, so no further copies are needed. + buf := make([]byte, tlsRecordHeaderLen+recordLen) + copy(buf, header[:]) + + n, err := io.ReadFull(conn, buf[tlsRecordHeaderLen:]) + if err != nil { + return "", newPeekedConn(conn, buf[:tlsRecordHeaderLen+n]), fmt.Errorf("read TLS handshake payload: %w", err) + } + + sni = extractSNI(buf[tlsRecordHeaderLen:]) + return sni, newPeekedConn(conn, buf), nil +} + +// extractSNI parses a TLS handshake payload to find the SNI extension. +// Returns empty string if the payload is not a ClientHello or has no SNI. +func extractSNI(payload []byte) string { + if len(payload) < 4 { + return "" + } + + if payload[0] != handshakeTypeClientHello { + return "" + } + + // Handshake length (3 bytes, big-endian). + handshakeLen := int(payload[1])<<16 | int(payload[2])<<8 | int(payload[3]) + if handshakeLen > len(payload)-4 { + return "" + } + + return parseSNIFromClientHello(payload[4 : 4+handshakeLen]) +} + +// parseSNIFromClientHello walks the ClientHello message fields to reach +// the extensions block and extract the server_name extension value. +func parseSNIFromClientHello(msg []byte) string { + // ClientHello layout: + // ProtocolVersion(2) + Random(32) = 34 bytes minimum before session_id + if len(msg) < 34 { + return "" + } + + pos := 34 + + // Session ID (variable, 1 byte length prefix). + if pos >= len(msg) { + return "" + } + sessionIDLen := int(msg[pos]) + pos++ + pos += sessionIDLen + + // Cipher suites (variable, 2 byte length prefix). + if pos+2 > len(msg) { + return "" + } + cipherSuitesLen := int(binary.BigEndian.Uint16(msg[pos : pos+2])) + pos += 2 + cipherSuitesLen + + // Compression methods (variable, 1 byte length prefix). + if pos >= len(msg) { + return "" + } + compMethodsLen := int(msg[pos]) + pos++ + pos += compMethodsLen + + // Extensions (variable, 2 byte length prefix). + if pos+2 > len(msg) { + return "" + } + extensionsLen := int(binary.BigEndian.Uint16(msg[pos : pos+2])) + pos += 2 + + extensionsEnd := pos + extensionsLen + if extensionsEnd > len(msg) { + return "" + } + + return findSNIExtension(msg[pos:extensionsEnd]) +} + +// findSNIExtension iterates over TLS extensions and returns the host +// name from the server_name extension, if present. +func findSNIExtension(extensions []byte) string { + pos := 0 + for pos+4 <= len(extensions) { + extType := binary.BigEndian.Uint16(extensions[pos : pos+2]) + extLen := int(binary.BigEndian.Uint16(extensions[pos+2 : pos+4])) + pos += 4 + + if pos+extLen > len(extensions) { + return "" + } + + if extType == extensionServerName { + return parseSNIExtensionData(extensions[pos : pos+extLen]) + } + pos += extLen + } + return "" +} + +// parseSNIExtensionData parses the ServerNameList structure inside an +// SNI extension to extract the host name. +func parseSNIExtensionData(data []byte) string { + if len(data) < 2 { + return "" + } + listLen := int(binary.BigEndian.Uint16(data[0:2])) + if listLen > len(data)-2 { + return "" + } + + list := data[2 : 2+listLen] + pos := 0 + for pos+3 <= len(list) { + nameType := list[pos] + nameLen := int(binary.BigEndian.Uint16(list[pos+1 : pos+3])) + pos += 3 + + if pos+nameLen > len(list) { + return "" + } + + if nameType == sniHostNameType { + name := list[pos : pos+nameLen] + if nameLen > maxSNILen || bytes.ContainsRune(name, 0) { + return "" + } + return string(name) + } + pos += nameLen + } + return "" +} diff --git a/proxy/internal/tcp/snipeek_test.go b/proxy/internal/tcp/snipeek_test.go new file mode 100644 index 000000000..9afe6261d --- /dev/null +++ b/proxy/internal/tcp/snipeek_test.go @@ -0,0 +1,251 @@ +package tcp + +import ( + "crypto/tls" + "io" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPeekClientHello_ValidSNI(t *testing.T) { + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + defer serverConn.Close() + + const expectedSNI = "example.com" + trailingData := []byte("trailing data after handshake") + + go func() { + tlsConn := tls.Client(clientConn, &tls.Config{ + ServerName: expectedSNI, + InsecureSkipVerify: true, //nolint:gosec + }) + // The Handshake will send the ClientHello. It will fail because + // our server side isn't doing a real TLS handshake, but that's + // fine: we only need the ClientHello to be sent. + _ = tlsConn.Handshake() + }() + + sni, wrapped, err := PeekClientHello(serverConn) + require.NoError(t, err) + assert.Equal(t, expectedSNI, sni, "should extract SNI from ClientHello") + assert.NotNil(t, wrapped, "wrapped connection should not be nil") + + // Verify the wrapped connection replays the peeked bytes. + // Read the first 5 bytes (TLS record header) to confirm replay. + buf := make([]byte, 5) + n, err := wrapped.Read(buf) + require.NoError(t, err) + assert.Equal(t, 5, n) + assert.Equal(t, byte(contentTypeHandshake), buf[0], "first byte should be TLS handshake content type") + + // Write trailing data from the client side and verify it arrives + // through the wrapped connection after the peeked bytes. + go func() { + _, _ = clientConn.Write(trailingData) + }() + + // Drain the rest of the peeked ClientHello first. + peekedRest := make([]byte, 16384) + _, _ = wrapped.Read(peekedRest) + + got := make([]byte, len(trailingData)) + n, err = io.ReadFull(wrapped, got) + require.NoError(t, err) + assert.Equal(t, trailingData, got[:n]) +} + +func TestPeekClientHello_MultipleSNIs(t *testing.T) { + tests := []struct { + name string + serverName string + expectedSNI string + }{ + {"simple domain", "example.com", "example.com"}, + {"subdomain", "sub.example.com", "sub.example.com"}, + {"deep subdomain", "a.b.c.example.com", "a.b.c.example.com"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + defer serverConn.Close() + + go func() { + tlsConn := tls.Client(clientConn, &tls.Config{ + ServerName: tt.serverName, + InsecureSkipVerify: true, //nolint:gosec + }) + _ = tlsConn.Handshake() + }() + + sni, wrapped, err := PeekClientHello(serverConn) + require.NoError(t, err) + assert.Equal(t, tt.expectedSNI, sni) + assert.NotNil(t, wrapped) + }) + } +} + +func TestPeekClientHello_NonTLSData(t *testing.T) { + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + defer serverConn.Close() + + // Send plain HTTP data (not TLS). + httpData := []byte("GET / HTTP/1.1\r\nHost: example.com\r\n\r\n") + go func() { + _, _ = clientConn.Write(httpData) + }() + + sni, wrapped, err := PeekClientHello(serverConn) + require.NoError(t, err) + assert.Empty(t, sni, "should return empty SNI for non-TLS data") + assert.NotNil(t, wrapped) + + // Verify the wrapped connection still provides the original data. + buf := make([]byte, len(httpData)) + n, err := io.ReadFull(wrapped, buf) + require.NoError(t, err) + assert.Equal(t, httpData, buf[:n], "wrapped connection should replay original data") +} + +func TestPeekClientHello_TruncatedHeader(t *testing.T) { + clientConn, serverConn := net.Pipe() + defer serverConn.Close() + + // Write only 3 bytes then close, fewer than the 5-byte TLS header. + go func() { + _, _ = clientConn.Write([]byte{0x16, 0x03, 0x01}) + clientConn.Close() + }() + + _, _, err := PeekClientHello(serverConn) + assert.Error(t, err, "should error on truncated header") +} + +func TestPeekClientHello_TruncatedPayload(t *testing.T) { + clientConn, serverConn := net.Pipe() + defer serverConn.Close() + + // Write a valid TLS header claiming 100 bytes, but only send 10. + go func() { + header := []byte{0x16, 0x03, 0x01, 0x00, 0x64} // 100 bytes claimed + _, _ = clientConn.Write(header) + _, _ = clientConn.Write(make([]byte, 10)) + clientConn.Close() + }() + + _, _, err := PeekClientHello(serverConn) + assert.Error(t, err, "should error on truncated payload") +} + +func TestPeekClientHello_ZeroLengthRecord(t *testing.T) { + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + defer serverConn.Close() + + // TLS handshake header with zero-length payload. + go func() { + _, _ = clientConn.Write([]byte{0x16, 0x03, 0x01, 0x00, 0x00}) + }() + + sni, wrapped, err := PeekClientHello(serverConn) + require.NoError(t, err) + assert.Empty(t, sni) + assert.NotNil(t, wrapped) +} + +func TestExtractSNI_InvalidPayload(t *testing.T) { + tests := []struct { + name string + payload []byte + }{ + {"nil", nil}, + {"empty", []byte{}}, + {"too short", []byte{0x01, 0x00}}, + {"wrong handshake type", []byte{0x02, 0x00, 0x00, 0x05, 0x03, 0x03, 0x00, 0x00, 0x00}}, + {"truncated client hello", []byte{0x01, 0x00, 0x00, 0x20}}, // claims 32 bytes but has none + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Empty(t, extractSNI(tt.payload)) + }) + } +} + +func TestPeekedConn_CloseWrite(t *testing.T) { + t.Run("delegates to underlying TCPConn", func(t *testing.T) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + accepted := make(chan net.Conn, 1) + go func() { + c, err := ln.Accept() + if err == nil { + accepted <- c + } + }() + + client, err := net.Dial("tcp", ln.Addr().String()) + require.NoError(t, err) + defer client.Close() + + server := <-accepted + defer server.Close() + + wrapped := newPeekedConn(server, []byte("peeked")) + + // CloseWrite should succeed on a real TCP connection. + err = wrapped.CloseWrite() + assert.NoError(t, err) + + // The client should see EOF on reads after CloseWrite. + buf := make([]byte, 1) + _, err = client.Read(buf) + assert.Equal(t, io.EOF, err, "client should see EOF after half-close") + }) + + t.Run("no-op on non-halfcloser", func(t *testing.T) { + // net.Pipe does not implement CloseWrite. + _, server := net.Pipe() + defer server.Close() + + wrapped := newPeekedConn(server, []byte("peeked")) + err := wrapped.CloseWrite() + assert.NoError(t, err, "should be no-op on non-halfcloser") + }) +} + +func TestPeekedConn_ReplayAndPassthrough(t *testing.T) { + clientConn, serverConn := net.Pipe() + defer clientConn.Close() + defer serverConn.Close() + + peeked := []byte("peeked-data") + subsequent := []byte("subsequent-data") + + wrapped := newPeekedConn(serverConn, peeked) + + go func() { + _, _ = clientConn.Write(subsequent) + }() + + // Read should return peeked data first. + buf := make([]byte, len(peeked)) + n, err := io.ReadFull(wrapped, buf) + require.NoError(t, err) + assert.Equal(t, peeked, buf[:n]) + + // Then subsequent data from the real connection. + buf = make([]byte, len(subsequent)) + n, err = io.ReadFull(wrapped, buf) + require.NoError(t, err) + assert.Equal(t, subsequent, buf[:n]) +} diff --git a/proxy/internal/types/types.go b/proxy/internal/types/types.go index 41acfef40..bf3731803 100644 --- a/proxy/internal/types/types.go +++ b/proxy/internal/types/types.go @@ -1,5 +1,56 @@ // Package types defines common types used across the proxy package. package types +import ( + "context" + "net" + "time" +) + // AccountID represents a unique identifier for a NetBird account. type AccountID string + +// ServiceID represents a unique identifier for a proxy service. +type ServiceID string + +// ServiceMode describes how a reverse proxy service is exposed. +type ServiceMode string + +const ( + ServiceModeHTTP ServiceMode = "http" + ServiceModeTCP ServiceMode = "tcp" + ServiceModeUDP ServiceMode = "udp" + ServiceModeTLS ServiceMode = "tls" +) + +// IsL4 returns true for TCP, UDP, and TLS modes. +func (m ServiceMode) IsL4() bool { + return m == ServiceModeTCP || m == ServiceModeUDP || m == ServiceModeTLS +} + +// RelayDirection indicates the direction of a relayed packet. +type RelayDirection string + +const ( + RelayDirectionClientToBackend RelayDirection = "client_to_backend" + RelayDirectionBackendToClient RelayDirection = "backend_to_client" +) + +// DialContextFunc dials a backend through the WireGuard tunnel. +type DialContextFunc func(ctx context.Context, network, address string) (net.Conn, error) + +// dialTimeoutKey is the context key for a per-request dial timeout. +type dialTimeoutKey struct{} + +// WithDialTimeout returns a context carrying a dial timeout that +// DialContext wrappers can use to scope the timeout to just the +// connection establishment phase. +func WithDialTimeout(ctx context.Context, d time.Duration) context.Context { + return context.WithValue(ctx, dialTimeoutKey{}, d) +} + +// DialTimeoutFromContext returns the dial timeout from the context, if set. +func DialTimeoutFromContext(ctx context.Context) (time.Duration, bool) { + d, ok := ctx.Value(dialTimeoutKey{}).(time.Duration) + return d, ok && d > 0 +} diff --git a/proxy/internal/types/types_test.go b/proxy/internal/types/types_test.go new file mode 100644 index 000000000..dd9738442 --- /dev/null +++ b/proxy/internal/types/types_test.go @@ -0,0 +1,54 @@ +package types + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestServiceMode_IsL4(t *testing.T) { + tests := []struct { + mode ServiceMode + want bool + }{ + {ServiceModeHTTP, false}, + {ServiceModeTCP, true}, + {ServiceModeUDP, true}, + {ServiceModeTLS, true}, + {ServiceMode("unknown"), false}, + } + + for _, tt := range tests { + t.Run(string(tt.mode), func(t *testing.T) { + assert.Equal(t, tt.want, tt.mode.IsL4()) + }) + } +} + +func TestDialTimeoutContext(t *testing.T) { + t.Run("round trip", func(t *testing.T) { + ctx := WithDialTimeout(context.Background(), 5*time.Second) + d, ok := DialTimeoutFromContext(ctx) + assert.True(t, ok) + assert.Equal(t, 5*time.Second, d) + }) + + t.Run("missing", func(t *testing.T) { + _, ok := DialTimeoutFromContext(context.Background()) + assert.False(t, ok) + }) + + t.Run("zero returns false", func(t *testing.T) { + ctx := WithDialTimeout(context.Background(), 0) + _, ok := DialTimeoutFromContext(ctx) + assert.False(t, ok, "zero duration should return ok=false") + }) + + t.Run("negative returns false", func(t *testing.T) { + ctx := WithDialTimeout(context.Background(), -1*time.Second) + _, ok := DialTimeoutFromContext(ctx) + assert.False(t, ok, "negative duration should return ok=false") + }) +} diff --git a/proxy/internal/udp/relay.go b/proxy/internal/udp/relay.go new file mode 100644 index 000000000..f2f58e858 --- /dev/null +++ b/proxy/internal/udp/relay.go @@ -0,0 +1,496 @@ +package udp + +import ( + "context" + "errors" + "fmt" + "net" + "net/netip" + "sync" + "sync/atomic" + "time" + + log "github.com/sirupsen/logrus" + "golang.org/x/time/rate" + + "github.com/netbirdio/netbird/proxy/internal/accesslog" + "github.com/netbirdio/netbird/proxy/internal/netutil" + "github.com/netbirdio/netbird/proxy/internal/types" +) + +const ( + // DefaultSessionTTL is the default idle timeout for UDP sessions before cleanup. + DefaultSessionTTL = 30 * time.Second + // cleanupInterval is how often the cleaner goroutine runs. + cleanupInterval = time.Minute + // maxPacketSize is the maximum UDP packet size we'll handle. + maxPacketSize = 65535 + // DefaultMaxSessions is the default cap on concurrent UDP sessions per relay. + DefaultMaxSessions = 1024 + // sessionCreateRate limits new session creation per second. + sessionCreateRate = 50 + // sessionCreateBurst is the burst allowance for session creation. + sessionCreateBurst = 100 + // defaultDialTimeout is the fallback dial timeout for backend connections. + defaultDialTimeout = 30 * time.Second +) + +// l4Logger sends layer-4 access log entries to the management server. +type l4Logger interface { + LogL4(entry accesslog.L4Entry) +} + +// SessionObserver receives callbacks for UDP session lifecycle events. +// All methods must be safe for concurrent use. +type SessionObserver interface { + UDPSessionStarted(accountID types.AccountID) + UDPSessionEnded(accountID types.AccountID) + UDPSessionDialError(accountID types.AccountID) + UDPSessionRejected(accountID types.AccountID) + UDPPacketRelayed(direction types.RelayDirection, bytes int) +} + +// clientAddr is a typed key for UDP session lookups. +type clientAddr string + +// Relay listens for incoming UDP packets on a dedicated port and +// maintains per-client sessions that relay packets to a backend +// through the WireGuard tunnel. +type Relay struct { + logger *log.Entry + listener net.PacketConn + target string + domain string + accountID types.AccountID + serviceID types.ServiceID + dialFunc types.DialContextFunc + dialTimeout time.Duration + sessionTTL time.Duration + maxSessions int + + mu sync.RWMutex + sessions map[clientAddr]*session + + bufPool sync.Pool + sessLimiter *rate.Limiter + sessWg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc + observer SessionObserver + accessLog l4Logger +} + +type session struct { + backend net.Conn + addr net.Addr + createdAt time.Time + // lastSeen stores the last activity timestamp as unix nanoseconds. + lastSeen atomic.Int64 + cancel context.CancelFunc + // bytesIn tracks total bytes received from the client. + bytesIn atomic.Int64 + // bytesOut tracks total bytes sent back to the client. + bytesOut atomic.Int64 +} + +func (s *session) updateLastSeen() { + s.lastSeen.Store(time.Now().UnixNano()) +} + +func (s *session) idleDuration() time.Duration { + return time.Since(time.Unix(0, s.lastSeen.Load())) +} + +// RelayConfig holds the configuration for a UDP relay. +type RelayConfig struct { + Logger *log.Entry + Listener net.PacketConn + Target string + Domain string + AccountID types.AccountID + ServiceID types.ServiceID + DialFunc types.DialContextFunc + DialTimeout time.Duration + SessionTTL time.Duration + MaxSessions int + AccessLog l4Logger +} + +// New creates a UDP relay for the given listener and backend target. +// MaxSessions caps the number of concurrent sessions; use 0 for DefaultMaxSessions. +// DialTimeout controls how long to wait for backend connections; use 0 for default. +// SessionTTL is the idle timeout before a session is reaped; use 0 for DefaultSessionTTL. +func New(parentCtx context.Context, cfg RelayConfig) *Relay { + maxSessions := cfg.MaxSessions + dialTimeout := cfg.DialTimeout + sessionTTL := cfg.SessionTTL + if maxSessions <= 0 { + maxSessions = DefaultMaxSessions + } + if dialTimeout <= 0 { + dialTimeout = defaultDialTimeout + } + if sessionTTL <= 0 { + sessionTTL = DefaultSessionTTL + } + ctx, cancel := context.WithCancel(parentCtx) + return &Relay{ + logger: cfg.Logger, + listener: cfg.Listener, + target: cfg.Target, + domain: cfg.Domain, + accountID: cfg.AccountID, + serviceID: cfg.ServiceID, + accessLog: cfg.AccessLog, + dialFunc: cfg.DialFunc, + dialTimeout: dialTimeout, + sessionTTL: sessionTTL, + maxSessions: maxSessions, + sessions: make(map[clientAddr]*session), + bufPool: sync.Pool{ + New: func() any { + buf := make([]byte, maxPacketSize) + return &buf + }, + }, + sessLimiter: rate.NewLimiter(sessionCreateRate, sessionCreateBurst), + ctx: ctx, + cancel: cancel, + } +} + +// ServiceID returns the service ID associated with this relay. +func (r *Relay) ServiceID() types.ServiceID { + return r.serviceID +} + +// SetObserver sets the session lifecycle observer. Must be called before Serve. +func (r *Relay) SetObserver(obs SessionObserver) { + r.observer = obs +} + +// Serve starts the relay loop. It blocks until the context is canceled +// or the listener is closed. +func (r *Relay) Serve() { + go r.cleanupLoop() + + for { + bufp := r.bufPool.Get().(*[]byte) + buf := *bufp + + n, addr, err := r.listener.ReadFrom(buf) + if err != nil { + r.bufPool.Put(bufp) + if r.ctx.Err() != nil || errors.Is(err, net.ErrClosed) { + return + } + r.logger.Debugf("UDP read: %v", err) + continue + } + + data := buf[:n] + sess, err := r.getOrCreateSession(addr) + if err != nil { + r.bufPool.Put(bufp) + r.logger.Debugf("create UDP session for %s: %v", addr, err) + continue + } + + sess.updateLastSeen() + + nw, err := sess.backend.Write(data) + if err != nil { + r.bufPool.Put(bufp) + if !netutil.IsExpectedError(err) { + r.logger.Debugf("UDP write to backend for %s: %v", addr, err) + } + r.removeSession(sess) + continue + } + sess.bytesIn.Add(int64(nw)) + + if r.observer != nil { + r.observer.UDPPacketRelayed(types.RelayDirectionClientToBackend, nw) + } + r.bufPool.Put(bufp) + } +} + +// getOrCreateSession returns an existing session or creates a new one. +func (r *Relay) getOrCreateSession(addr net.Addr) (*session, error) { + key := clientAddr(addr.String()) + + r.mu.RLock() + sess, ok := r.sessions[key] + r.mu.RUnlock() + if ok && sess != nil { + return sess, nil + } + + // Check before taking the write lock: if the relay is shutting down, + // don't create new sessions. This prevents orphaned goroutines when + // Serve() processes a packet that was already read before Close(). + if r.ctx.Err() != nil { + return nil, r.ctx.Err() + } + + r.mu.Lock() + + if sess, ok = r.sessions[key]; ok && sess != nil { + r.mu.Unlock() + return sess, nil + } + if ok { + // Another goroutine is dialing for this key, skip. + r.mu.Unlock() + return nil, fmt.Errorf("session dial in progress for %s", key) + } + + if len(r.sessions) >= r.maxSessions { + r.mu.Unlock() + if r.observer != nil { + r.observer.UDPSessionRejected(r.accountID) + } + return nil, fmt.Errorf("session limit reached (%d)", r.maxSessions) + } + + if !r.sessLimiter.Allow() { + r.mu.Unlock() + if r.observer != nil { + r.observer.UDPSessionRejected(r.accountID) + } + return nil, fmt.Errorf("session creation rate limited") + } + + // Reserve the slot with a nil session so concurrent callers for the same + // key see it exists and wait. Release the lock before dialing. + r.sessions[key] = nil + r.mu.Unlock() + + dialCtx, dialCancel := context.WithTimeout(r.ctx, r.dialTimeout) + backend, err := r.dialFunc(dialCtx, "udp", r.target) + dialCancel() + if err != nil { + r.mu.Lock() + delete(r.sessions, key) + r.mu.Unlock() + if r.observer != nil { + r.observer.UDPSessionDialError(r.accountID) + } + return nil, fmt.Errorf("dial backend %s: %w", r.target, err) + } + + sessCtx, sessCancel := context.WithCancel(r.ctx) + sess = &session{ + backend: backend, + addr: addr, + createdAt: time.Now(), + cancel: sessCancel, + } + sess.updateLastSeen() + + r.mu.Lock() + r.sessions[key] = sess + r.mu.Unlock() + + if r.observer != nil { + r.observer.UDPSessionStarted(r.accountID) + } + + r.sessWg.Go(func() { + r.relayBackendToClient(sessCtx, sess) + }) + + r.logger.Debugf("UDP session created for %s", addr) + return sess, nil +} + +// relayBackendToClient reads packets from the backend and writes them +// back to the client through the public-facing listener. +func (r *Relay) relayBackendToClient(ctx context.Context, sess *session) { + bufp := r.bufPool.Get().(*[]byte) + defer r.bufPool.Put(bufp) + defer r.removeSession(sess) + + for ctx.Err() == nil { + data, ok := r.readBackendPacket(sess, *bufp) + if !ok { + return + } + if data == nil { + continue + } + + sess.updateLastSeen() + + nw, err := r.listener.WriteTo(data, sess.addr) + if err != nil { + if !netutil.IsExpectedError(err) { + r.logger.Debugf("UDP write to client %s: %v", sess.addr, err) + } + return + } + sess.bytesOut.Add(int64(nw)) + + if r.observer != nil { + r.observer.UDPPacketRelayed(types.RelayDirectionBackendToClient, nw) + } + } +} + +// readBackendPacket reads one packet from the backend with an idle deadline. +// Returns (data, true) on success, (nil, true) on idle timeout that should +// retry, or (nil, false) when the session should be torn down. +func (r *Relay) readBackendPacket(sess *session, buf []byte) ([]byte, bool) { + if err := sess.backend.SetReadDeadline(time.Now().Add(r.sessionTTL)); err != nil { + r.logger.Debugf("set backend read deadline for %s: %v", sess.addr, err) + return nil, false + } + + n, err := sess.backend.Read(buf) + if err != nil { + if netutil.IsTimeout(err) { + if sess.idleDuration() > r.sessionTTL { + return nil, false + } + return nil, true + } + if !netutil.IsExpectedError(err) { + r.logger.Debugf("UDP read from backend for %s: %v", sess.addr, err) + } + return nil, false + } + + return buf[:n], true +} + +// cleanupLoop periodically removes idle sessions. +func (r *Relay) cleanupLoop() { + ticker := time.NewTicker(cleanupInterval) + defer ticker.Stop() + + for { + select { + case <-r.ctx.Done(): + return + case <-ticker.C: + r.cleanupIdleSessions() + } + } +} + +// cleanupIdleSessions closes sessions that have been idle for too long. +func (r *Relay) cleanupIdleSessions() { + var expired []*session + + r.mu.Lock() + for key, sess := range r.sessions { + if sess == nil { + continue + } + idle := sess.idleDuration() + if idle > r.sessionTTL { + r.logger.Debugf("UDP session %s idle for %s, closing (client→backend: %d bytes, backend→client: %d bytes)", + sess.addr, idle, sess.bytesIn.Load(), sess.bytesOut.Load()) + delete(r.sessions, key) + sess.cancel() + if err := sess.backend.Close(); err != nil { + r.logger.Debugf("close idle session %s backend: %v", sess.addr, err) + } + expired = append(expired, sess) + } + } + r.mu.Unlock() + + for _, sess := range expired { + if r.observer != nil { + r.observer.UDPSessionEnded(r.accountID) + } + r.logSessionEnd(sess) + } +} + +// removeSession removes a session from the map if it still matches the +// given pointer. This is safe to call concurrently with cleanupIdleSessions +// because the identity check prevents double-close when both paths race. +func (r *Relay) removeSession(sess *session) { + r.mu.Lock() + key := clientAddr(sess.addr.String()) + removed := r.sessions[key] == sess + if removed { + delete(r.sessions, key) + sess.cancel() + if err := sess.backend.Close(); err != nil { + r.logger.Debugf("close session %s backend: %v", sess.addr, err) + } + } + r.mu.Unlock() + + if removed { + r.logger.Debugf("UDP session %s ended (client→backend: %d bytes, backend→client: %d bytes)", + sess.addr, sess.bytesIn.Load(), sess.bytesOut.Load()) + if r.observer != nil { + r.observer.UDPSessionEnded(r.accountID) + } + r.logSessionEnd(sess) + } +} + +// logSessionEnd sends an access log entry for a completed UDP session. +func (r *Relay) logSessionEnd(sess *session) { + if r.accessLog == nil { + return + } + + var sourceIP netip.Addr + if ap, err := netip.ParseAddrPort(sess.addr.String()); err == nil { + sourceIP = ap.Addr().Unmap() + } + + r.accessLog.LogL4(accesslog.L4Entry{ + AccountID: r.accountID, + ServiceID: r.serviceID, + Protocol: accesslog.ProtocolUDP, + Host: r.domain, + SourceIP: sourceIP, + DurationMs: time.Unix(0, sess.lastSeen.Load()).Sub(sess.createdAt).Milliseconds(), + BytesUpload: sess.bytesIn.Load(), + BytesDownload: sess.bytesOut.Load(), + }) +} + +// Close stops the relay, waits for all session goroutines to exit, +// and cleans up remaining sessions. +func (r *Relay) Close() { + r.cancel() + if err := r.listener.Close(); err != nil { + r.logger.Debugf("close UDP listener: %v", err) + } + + var closedSessions []*session + r.mu.Lock() + for key, sess := range r.sessions { + if sess == nil { + delete(r.sessions, key) + continue + } + r.logger.Debugf("UDP session %s closed (client→backend: %d bytes, backend→client: %d bytes)", + sess.addr, sess.bytesIn.Load(), sess.bytesOut.Load()) + sess.cancel() + if err := sess.backend.Close(); err != nil { + r.logger.Debugf("close session %s backend: %v", sess.addr, err) + } + delete(r.sessions, key) + closedSessions = append(closedSessions, sess) + } + r.mu.Unlock() + + for _, sess := range closedSessions { + if r.observer != nil { + r.observer.UDPSessionEnded(r.accountID) + } + r.logSessionEnd(sess) + } + + r.sessWg.Wait() +} diff --git a/proxy/internal/udp/relay_test.go b/proxy/internal/udp/relay_test.go new file mode 100644 index 000000000..a1e91b290 --- /dev/null +++ b/proxy/internal/udp/relay_test.go @@ -0,0 +1,493 @@ +package udp + +import ( + "context" + "fmt" + "net" + "sync" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/proxy/internal/types" +) + +func TestRelay_BasicPacketExchange(t *testing.T) { + // Set up a UDP backend that echoes packets. + backend, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer backend.Close() + + go func() { + buf := make([]byte, 65535) + for { + n, addr, err := backend.ReadFrom(buf) + if err != nil { + return + } + _, _ = backend.WriteTo(buf[:n], addr) + } + }() + + // Set up the relay's public-facing listener. + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer listener.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + backendAddr := backend.LocalAddr().String() + + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + relay := New(ctx, RelayConfig{Logger: logger, Listener: listener, Target: backendAddr, DialFunc: dialFunc}) + go relay.Serve() + defer relay.Close() + + // Create a client and send a packet to the relay. + client, err := net.Dial("udp", listener.LocalAddr().String()) + require.NoError(t, err) + defer client.Close() + + testData := []byte("hello UDP relay") + _, err = client.Write(testData) + require.NoError(t, err) + + // Read the echoed response. + if err := client.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatal(err) + } + buf := make([]byte, 1024) + n, err := client.Read(buf) + require.NoError(t, err) + assert.Equal(t, testData, buf[:n], "should receive echoed packet") +} + +func TestRelay_MultipleClients(t *testing.T) { + backend, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer backend.Close() + + go func() { + buf := make([]byte, 65535) + for { + n, addr, err := backend.ReadFrom(buf) + if err != nil { + return + } + _, _ = backend.WriteTo(buf[:n], addr) + } + }() + + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer listener.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + relay := New(ctx, RelayConfig{Logger: logger, Listener: listener, Target: backend.LocalAddr().String(), DialFunc: dialFunc}) + go relay.Serve() + defer relay.Close() + + // Two clients, each should get their own session. + for i, msg := range []string{"client-1", "client-2"} { + client, err := net.Dial("udp", listener.LocalAddr().String()) + require.NoError(t, err, "client %d", i) + defer client.Close() + + _, err = client.Write([]byte(msg)) + require.NoError(t, err) + + if err := client.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatal(err) + } + buf := make([]byte, 1024) + n, err := client.Read(buf) + require.NoError(t, err, "client %d read", i) + assert.Equal(t, msg, string(buf[:n]), "client %d should get own echo", i) + } + + // Verify two sessions were created. + relay.mu.RLock() + sessionCount := len(relay.sessions) + relay.mu.RUnlock() + assert.Equal(t, 2, sessionCount, "should have two sessions") +} + +func TestRelay_Close(t *testing.T) { + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + relay := New(ctx, RelayConfig{Logger: logger, Listener: listener, Target: "127.0.0.1:9999", DialFunc: dialFunc}) + + done := make(chan struct{}) + go func() { + relay.Serve() + close(done) + }() + + relay.Close() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("Serve did not return after Close") + } +} + +func TestRelay_SessionCleanup(t *testing.T) { + backend, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer backend.Close() + + go func() { + buf := make([]byte, 65535) + for { + n, addr, err := backend.ReadFrom(buf) + if err != nil { + return + } + _, _ = backend.WriteTo(buf[:n], addr) + } + }() + + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer listener.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + relay := New(ctx, RelayConfig{Logger: logger, Listener: listener, Target: backend.LocalAddr().String(), DialFunc: dialFunc}) + go relay.Serve() + defer relay.Close() + + // Create a session. + client, err := net.Dial("udp", listener.LocalAddr().String()) + require.NoError(t, err) + _, err = client.Write([]byte("hello")) + require.NoError(t, err) + + if err := client.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatal(err) + } + buf := make([]byte, 1024) + _, err = client.Read(buf) + require.NoError(t, err) + client.Close() + + // Verify session exists. + relay.mu.RLock() + assert.Equal(t, 1, len(relay.sessions)) + relay.mu.RUnlock() + + // Make session appear idle by setting lastSeen to the past. + relay.mu.Lock() + for _, sess := range relay.sessions { + sess.lastSeen.Store(time.Now().Add(-2 * DefaultSessionTTL).UnixNano()) + } + relay.mu.Unlock() + + // Trigger cleanup manually. + relay.cleanupIdleSessions() + + relay.mu.RLock() + assert.Equal(t, 0, len(relay.sessions), "idle sessions should be cleaned up") + relay.mu.RUnlock() +} + +// TestRelay_CloseAndRecreate verifies that closing a relay and creating a new +// one on the same port works cleanly (simulates port mapping modify cycle). +func TestRelay_CloseAndRecreate(t *testing.T) { + backend, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer backend.Close() + + go func() { + buf := make([]byte, 65535) + for { + n, addr, err := backend.ReadFrom(buf) + if err != nil { + return + } + _, _ = backend.WriteTo(buf[:n], addr) + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + // First relay. + ln1, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + + relay1 := New(ctx, RelayConfig{Logger: logger, Listener: ln1, Target: backend.LocalAddr().String(), DialFunc: dialFunc}) + go relay1.Serve() + + client1, err := net.Dial("udp", ln1.LocalAddr().String()) + require.NoError(t, err) + _, err = client1.Write([]byte("relay1")) + require.NoError(t, err) + require.NoError(t, client1.SetReadDeadline(time.Now().Add(2*time.Second))) + buf := make([]byte, 1024) + n, err := client1.Read(buf) + require.NoError(t, err) + assert.Equal(t, "relay1", string(buf[:n])) + client1.Close() + + // Close first relay. + relay1.Close() + + // Second relay on same port. + port := ln1.LocalAddr().(*net.UDPAddr).Port + ln2, err := net.ListenPacket("udp", fmt.Sprintf("127.0.0.1:%d", port)) + require.NoError(t, err) + + relay2 := New(ctx, RelayConfig{Logger: logger, Listener: ln2, Target: backend.LocalAddr().String(), DialFunc: dialFunc}) + go relay2.Serve() + defer relay2.Close() + + client2, err := net.Dial("udp", ln2.LocalAddr().String()) + require.NoError(t, err) + defer client2.Close() + _, err = client2.Write([]byte("relay2")) + require.NoError(t, err) + require.NoError(t, client2.SetReadDeadline(time.Now().Add(2*time.Second))) + n, err = client2.Read(buf) + require.NoError(t, err) + assert.Equal(t, "relay2", string(buf[:n]), "second relay should work on same port") +} + +func TestRelay_SessionLimit(t *testing.T) { + backend, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer backend.Close() + + go func() { + buf := make([]byte, 65535) + for { + n, addr, err := backend.ReadFrom(buf) + if err != nil { + return + } + _, _ = backend.WriteTo(buf[:n], addr) + } + }() + + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer listener.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + // Create a relay with a max of 2 sessions. + relay := New(ctx, RelayConfig{Logger: logger, Listener: listener, Target: backend.LocalAddr().String(), DialFunc: dialFunc, MaxSessions: 2}) + go relay.Serve() + defer relay.Close() + + // Create 2 clients to fill up the session limit. + for i := range 2 { + client, err := net.Dial("udp", listener.LocalAddr().String()) + require.NoError(t, err, "client %d", i) + defer client.Close() + + _, err = client.Write([]byte("hello")) + require.NoError(t, err) + + require.NoError(t, client.SetReadDeadline(time.Now().Add(2*time.Second))) + buf := make([]byte, 1024) + _, err = client.Read(buf) + require.NoError(t, err, "client %d should get response", i) + } + + relay.mu.RLock() + assert.Equal(t, 2, len(relay.sessions), "should have exactly 2 sessions") + relay.mu.RUnlock() + + // Third client should get its packet dropped (session creation fails). + client3, err := net.Dial("udp", listener.LocalAddr().String()) + require.NoError(t, err) + defer client3.Close() + + _, err = client3.Write([]byte("should be dropped")) + require.NoError(t, err) + + require.NoError(t, client3.SetReadDeadline(time.Now().Add(500*time.Millisecond))) + buf := make([]byte, 1024) + _, err = client3.Read(buf) + assert.Error(t, err, "third client should time out because session was rejected") + + relay.mu.RLock() + assert.Equal(t, 2, len(relay.sessions), "session count should not exceed limit") + relay.mu.RUnlock() +} + +// testObserver records UDP session lifecycle events for test assertions. +type testObserver struct { + mu sync.Mutex + started int + ended int + rejected int + dialErr int + packets int + bytes int +} + +func (o *testObserver) UDPSessionStarted(types.AccountID) { o.mu.Lock(); o.started++; o.mu.Unlock() } +func (o *testObserver) UDPSessionEnded(types.AccountID) { o.mu.Lock(); o.ended++; o.mu.Unlock() } +func (o *testObserver) UDPSessionDialError(types.AccountID) { o.mu.Lock(); o.dialErr++; o.mu.Unlock() } +func (o *testObserver) UDPSessionRejected(types.AccountID) { o.mu.Lock(); o.rejected++; o.mu.Unlock() } +func (o *testObserver) UDPPacketRelayed(_ types.RelayDirection, b int) { + o.mu.Lock() + o.packets++ + o.bytes += b + o.mu.Unlock() +} + +func TestRelay_CloseFiresObserverEnded(t *testing.T) { + backend, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer backend.Close() + + go func() { + buf := make([]byte, 65535) + for { + n, addr, err := backend.ReadFrom(buf) + if err != nil { + return + } + _, _ = backend.WriteTo(buf[:n], addr) + } + }() + + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer listener.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + obs := &testObserver{} + relay := New(ctx, RelayConfig{Logger: logger, Listener: listener, Target: backend.LocalAddr().String(), AccountID: "test-acct", DialFunc: dialFunc}) + relay.SetObserver(obs) + go relay.Serve() + + // Create two sessions. + for i := range 2 { + client, err := net.Dial("udp", listener.LocalAddr().String()) + require.NoError(t, err, "client %d", i) + + _, err = client.Write([]byte("hello")) + require.NoError(t, err) + + require.NoError(t, client.SetReadDeadline(time.Now().Add(2*time.Second))) + buf := make([]byte, 1024) + _, err = client.Read(buf) + require.NoError(t, err) + client.Close() + } + + obs.mu.Lock() + assert.Equal(t, 2, obs.started, "should have 2 started events") + obs.mu.Unlock() + + // Close should fire UDPSessionEnded for all remaining sessions. + relay.Close() + + obs.mu.Lock() + assert.Equal(t, 2, obs.ended, "Close should fire UDPSessionEnded for each session") + obs.mu.Unlock() +} + +func TestRelay_SessionRateLimit(t *testing.T) { + backend, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer backend.Close() + + go func() { + buf := make([]byte, 65535) + for { + n, addr, err := backend.ReadFrom(buf) + if err != nil { + return + } + _, _ = backend.WriteTo(buf[:n], addr) + } + }() + + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + defer listener.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewEntry(log.StandardLogger()) + dialFunc := func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial(network, address) + } + + obs := &testObserver{} + // High max sessions (1000) but the relay uses a rate limiter internally + // (default: 50/s burst 100). We exhaust the burst by creating sessions + // rapidly, then verify that subsequent creates are rejected. + relay := New(ctx, RelayConfig{Logger: logger, Listener: listener, Target: backend.LocalAddr().String(), AccountID: "test-acct", DialFunc: dialFunc, MaxSessions: 1000}) + relay.SetObserver(obs) + go relay.Serve() + defer relay.Close() + + // Exhaust the burst by calling getOrCreateSession directly with + // synthetic addresses. This is faster than real UDP round-trips. + for i := range sessionCreateBurst + 20 { + addr := &net.UDPAddr{IP: net.IPv4(10, 0, byte(i/256), byte(i%256)), Port: 10000 + i} + _, _ = relay.getOrCreateSession(addr) + } + + obs.mu.Lock() + rejected := obs.rejected + obs.mu.Unlock() + + assert.Greater(t, rejected, 0, "some sessions should be rate-limited") +} diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 6a0ecce30..ebecfc6f6 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -243,6 +243,10 @@ func (c *testProxyController) GetProxiesForCluster(_ string) []string { return nil } +func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { + return nil +} + // storeBackedServiceManager reads directly from the real store. type storeBackedServiceManager struct { store store.Store @@ -505,15 +509,15 @@ func TestIntegration_ProxyConnection_ReconnectDoesNotDuplicateState(t *testing.T nil, "", 0, - mapping.GetAccountId(), - mapping.GetId(), + proxytypes.AccountID(mapping.GetAccountId()), + proxytypes.ServiceID(mapping.GetId()), ) require.NoError(t, err) // Apply to real proxy (idempotent) proxyHandler.AddMapping(proxy.Mapping{ Host: mapping.GetDomain(), - ID: mapping.GetId(), + ID: proxytypes.ServiceID(mapping.GetId()), AccountID: proxytypes.AccountID(mapping.GetAccountId()), }) } diff --git a/proxy/server.go b/proxy/server.go index 62e8368e6..649d49c9a 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -30,6 +30,7 @@ import ( log "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/sdk/metric" + "golang.org/x/exp/maps" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -46,15 +47,26 @@ import ( "github.com/netbirdio/netbird/proxy/internal/health" "github.com/netbirdio/netbird/proxy/internal/k8s" proxymetrics "github.com/netbirdio/netbird/proxy/internal/metrics" + "github.com/netbirdio/netbird/proxy/internal/netutil" "github.com/netbirdio/netbird/proxy/internal/proxy" "github.com/netbirdio/netbird/proxy/internal/roundtrip" + nbtcp "github.com/netbirdio/netbird/proxy/internal/tcp" "github.com/netbirdio/netbird/proxy/internal/types" + udprelay "github.com/netbirdio/netbird/proxy/internal/udp" "github.com/netbirdio/netbird/proxy/web" "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/util/embeddedroots" ) + +// portRouter bundles a per-port Router with its listener and cancel func. +type portRouter struct { + router *nbtcp.Router + listener net.Listener + cancel context.CancelFunc +} + type Server struct { mgmtClient proto.ProxyServiceClient proxy *proxy.ReverseProxy @@ -67,12 +79,27 @@ type Server struct { healthServer *health.Server healthChecker *health.Checker meter *proxymetrics.Metrics + accessLog *accesslog.Logger + mainRouter *nbtcp.Router + mainPort uint16 + udpMu sync.Mutex + udpRelays map[types.ServiceID]*udprelay.Relay + udpRelayWg sync.WaitGroup + portMu sync.RWMutex + portRouters map[uint16]*portRouter + svcPorts map[types.ServiceID][]uint16 + lastMappings map[types.ServiceID]*proto.ProxyMapping + portRouterWg sync.WaitGroup // hijackTracker tracks hijacked connections (e.g. WebSocket upgrades) // so they can be closed during graceful shutdown, since http.Server.Shutdown // does not handle them. hijackTracker conntrack.HijackTracker + // routerReady is closed once mainRouter is fully initialized. + // The mapping worker waits on this before processing updates. + routerReady chan struct{} + // Mostly used for debugging on management. startTime time.Time @@ -118,28 +145,36 @@ type Server struct { // When set, forwarding headers from these sources are preserved and // appended to instead of being stripped. TrustedProxies []netip.Prefix - // WireguardPort is the port for the WireGuard interface. Use 0 for a - // random OS-assigned port. A fixed port only works with single-account - // deployments; multiple accounts will fail to bind the same port. - WireguardPort int + // WireguardPort is the port for the NetBird tunnel interface. Use 0 + // for a random OS-assigned port. A fixed port only works with + // single-account deployments; multiple accounts will fail to bind + // the same port. + WireguardPort uint16 // ProxyProtocol enables PROXY protocol (v1/v2) on TCP listeners. // When enabled, the real client IP is extracted from the PROXY header // sent by upstream L4 proxies that support PROXY protocol. ProxyProtocol bool // PreSharedKey used for tunnel between proxy and peers (set globally not per account) PreSharedKey string + // SupportsCustomPorts indicates whether the proxy can bind arbitrary + // ports for TCP/UDP/TLS services. + SupportsCustomPorts bool + // DefaultDialTimeout is the default timeout for establishing backend + // connections when no per-service timeout is configured. Zero means + // each transport uses its own hardcoded default (typically 30s). + DefaultDialTimeout time.Duration } -// NotifyStatus sends a status update to management about tunnel connectivity -func (s *Server) NotifyStatus(ctx context.Context, accountID, serviceID, domain string, connected bool) error { +// NotifyStatus sends a status update to management about tunnel connectivity. +func (s *Server) NotifyStatus(ctx context.Context, accountID types.AccountID, serviceID types.ServiceID, connected bool) error { status := proto.ProxyStatus_PROXY_STATUS_TUNNEL_NOT_CREATED if connected { status = proto.ProxyStatus_PROXY_STATUS_ACTIVE } _, err := s.mgmtClient.SendStatusUpdate(ctx, &proto.SendStatusUpdateRequest{ - ServiceId: serviceID, - AccountId: accountID, + ServiceId: string(serviceID), + AccountId: string(accountID), Status: status, CertificateIssued: false, }) @@ -147,10 +182,10 @@ func (s *Server) NotifyStatus(ctx context.Context, accountID, serviceID, domain } // NotifyCertificateIssued sends a notification to management that a certificate was issued -func (s *Server) NotifyCertificateIssued(ctx context.Context, accountID, serviceID, domain string) error { +func (s *Server) NotifyCertificateIssued(ctx context.Context, accountID types.AccountID, serviceID types.ServiceID, domain string) error { _, err := s.mgmtClient.SendStatusUpdate(ctx, &proto.SendStatusUpdateRequest{ - ServiceId: serviceID, - AccountId: accountID, + ServiceId: string(serviceID), + AccountId: string(accountID), Status: proto.ProxyStatus_PROXY_STATUS_ACTIVE, CertificateIssued: true, }) @@ -159,6 +194,11 @@ func (s *Server) NotifyCertificateIssued(ctx context.Context, accountID, service func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { s.initDefaults() + s.routerReady = make(chan struct{}) + s.udpRelays = make(map[types.ServiceID]*udprelay.Relay) + s.portRouters = make(map[uint16]*portRouter) + s.svcPorts = make(map[types.ServiceID][]uint16) + s.lastMappings = make(map[types.ServiceID]*proto.ProxyMapping) exporter, err := prometheus.New() if err != nil { @@ -184,7 +224,9 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { } }() s.mgmtClient = proto.NewProxyServiceClient(mgmtConn) - go s.newManagementMappingWorker(ctx, s.mgmtClient) + runCtx, runCancel := context.WithCancel(ctx) + defer runCancel() + go s.newManagementMappingWorker(runCtx, s.mgmtClient) // Initialize the netbird client, this is required to build peer connections // to proxy over. @@ -206,7 +248,7 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { s.auth = auth.NewMiddleware(s.Logger, s.mgmtClient) // Configure Access logs to management server. - accessLog := accesslog.NewLogger(s.mgmtClient, s.Logger, s.TrustedProxies) + s.accessLog = accesslog.NewLogger(s.mgmtClient, s.Logger, s.TrustedProxies) s.healthChecker = health.NewChecker(s.Logger, s.netbird) @@ -220,18 +262,12 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { handler := http.Handler(s.proxy) handler = s.auth.Protect(handler) handler = web.AssetHandler(handler) - handler = accessLog.Middleware(handler) + handler = s.accessLog.Middleware(handler) handler = s.meter.Middleware(handler) handler = s.hijackTracker.Middleware(handler) - // Start the reverse proxy HTTPS server. - s.https = &http.Server{ - Addr: addr, - Handler: handler, - TLSConfig: tlsConfig, - ErrorLog: newHTTPServerLogger(s.Logger, logtagValueHTTPS), - } - + // Start a raw TCP listener; the SNI router peeks at ClientHello + // and routes to either the HTTP handler or a TCP relay. lc := net.ListenConfig{} ln, err := lc.Listen(ctx, "tcp", addr) if err != nil { @@ -240,11 +276,34 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { if s.ProxyProtocol { ln = s.wrapProxyProtocol(ln) } + s.mainPort = uint16(ln.Addr().(*net.TCPAddr).Port) //nolint:gosec // port from OS is always valid + + // Set up the SNI router for TCP/HTTP multiplexing on the main port. + s.mainRouter = nbtcp.NewRouter(s.Logger, s.resolveDialFunc, ln.Addr()) + s.mainRouter.SetObserver(s.meter) + s.mainRouter.SetAccessLogger(s.accessLog) + close(s.routerReady) + + // The HTTP server uses the chanListener fed by the SNI router. + s.https = &http.Server{ + Addr: addr, + Handler: handler, + TLSConfig: tlsConfig, + ReadHeaderTimeout: httpReadHeaderTimeout, + IdleTimeout: httpIdleTimeout, + ErrorLog: newHTTPServerLogger(s.Logger, logtagValueHTTPS), + } httpsErr := make(chan error, 1) go func() { - s.Logger.Debugf("starting reverse proxy server on %s", addr) - httpsErr <- s.https.ServeTLS(ln, "", "") + s.Logger.Debug("starting HTTPS server on SNI router HTTP channel") + httpsErr <- s.https.ServeTLS(s.mainRouter.HTTPListener(), "", "") + }() + + routerErr := make(chan error, 1) + go func() { + s.Logger.Debugf("starting SNI router on %s", addr) + routerErr <- s.mainRouter.Serve(runCtx, ln) }() select { @@ -254,6 +313,12 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { return fmt.Errorf("https server: %w", err) } return nil + case err := <-routerErr: + s.shutdownServices() + if err != nil { + return fmt.Errorf("SNI router: %w", err) + } + return nil case <-ctx.Done(): s.gracefulShutdown() return nil @@ -381,6 +446,13 @@ const ( // shutdownServiceTimeout is the maximum time to wait for auxiliary // services (health probe, debug endpoint, ACME) to shut down. shutdownServiceTimeout = 5 * time.Second + + // httpReadHeaderTimeout limits how long the server waits to read + // request headers after accepting a connection. Prevents slowloris. + httpReadHeaderTimeout = 10 * time.Second + // httpIdleTimeout limits how long an idle keep-alive connection + // stays open before the server closes it. + httpIdleTimeout = 120 * time.Second ) func (s *Server) dialManagement() (*grpc.ClientConn, error) { @@ -518,6 +590,9 @@ func (s *Server) gracefulShutdown() { s.Logger.Infof("closed %d hijacked connection(s)", n) } + // Drain all router relay connections (main + per-port) in parallel. + s.drainAllRouters(shutdownDrainTimeout) + // Step 5: Stop all remaining background services. s.shutdownServices() s.Logger.Info("graceful shutdown complete") @@ -525,6 +600,34 @@ func (s *Server) gracefulShutdown() { // shutdownServices stops all background services concurrently and waits for // them to finish. +// drainAllRouters drains active relay connections on the main router and +// all per-port routers in parallel, up to the given timeout. +func (s *Server) drainAllRouters(timeout time.Duration) { + var wg sync.WaitGroup + + drain := func(name string, router *nbtcp.Router) { + wg.Add(1) + go func() { + defer wg.Done() + if ok := router.Drain(timeout); !ok { + s.Logger.Warnf("timed out draining %s relay connections", name) + } + }() + } + + if s.mainRouter != nil { + drain("main router", s.mainRouter) + } + + s.portMu.RLock() + for port, pr := range s.portRouters { + drain(fmt.Sprintf("port %d", port), pr.router) + } + s.portMu.RUnlock() + + wg.Wait() +} + func (s *Server) shutdownServices() { var wg sync.WaitGroup @@ -562,9 +665,165 @@ func (s *Server) shutdownServices() { }() } + // Close all UDP relays and wait for their goroutines to exit. + s.udpMu.Lock() + for id, relay := range s.udpRelays { + relay.Close() + delete(s.udpRelays, id) + } + s.udpMu.Unlock() + s.udpRelayWg.Wait() + + // Close all per-port routers. + s.portMu.Lock() + for port, pr := range s.portRouters { + pr.cancel() + if err := pr.listener.Close(); err != nil { + s.Logger.Debugf("close listener on port %d: %v", port, err) + } + delete(s.portRouters, port) + } + maps.Clear(s.svcPorts) + maps.Clear(s.lastMappings) + s.portMu.Unlock() + + // Wait for per-port router serve goroutines to exit. + s.portRouterWg.Wait() + wg.Wait() } +// resolveDialFunc returns a DialContextFunc that dials through the +// NetBird tunnel for the given account. +func (s *Server) resolveDialFunc(accountID types.AccountID) (types.DialContextFunc, error) { + client, ok := s.netbird.GetClient(accountID) + if !ok { + return nil, fmt.Errorf("no client for account %s", accountID) + } + return client.DialContext, nil +} + +// notifyError reports a resource error back to management so it can be +// surfaced to the user (e.g. port bind failure, dialer resolution error). +func (s *Server) notifyError(ctx context.Context, mapping *proto.ProxyMapping, err error) { + s.sendStatusUpdate(ctx, types.AccountID(mapping.GetAccountId()), types.ServiceID(mapping.GetId()), proto.ProxyStatus_PROXY_STATUS_ERROR, err) +} + +// sendStatusUpdate sends a status update for a service to management. +func (s *Server) sendStatusUpdate(ctx context.Context, accountID types.AccountID, serviceID types.ServiceID, st proto.ProxyStatus, err error) { + req := &proto.SendStatusUpdateRequest{ + ServiceId: string(serviceID), + AccountId: string(accountID), + Status: st, + } + if err != nil { + msg := err.Error() + req.ErrorMessage = &msg + } + if _, sendErr := s.mgmtClient.SendStatusUpdate(ctx, req); sendErr != nil { + s.Logger.Debugf("failed to send status update for %s: %v", serviceID, sendErr) + } +} + +// routerForPort returns the router that handles the given listen port. If port +// is 0 or matches the main listener port, the main router is returned. +// Otherwise a new per-port router is created and started. +func (s *Server) routerForPort(ctx context.Context, port uint16) (*nbtcp.Router, error) { + if port == 0 || port == s.mainPort { + return s.mainRouter, nil + } + return s.getOrCreatePortRouter(ctx, port) +} + +// routerForPortExisting returns the router for the given port without creating +// one. Returns the main router for port 0 / mainPort, or nil if no per-port +// router exists. +func (s *Server) routerForPortExisting(port uint16) *nbtcp.Router { + if port == 0 || port == s.mainPort { + return s.mainRouter + } + s.portMu.RLock() + pr := s.portRouters[port] + s.portMu.RUnlock() + if pr != nil { + return pr.router + } + return nil +} + +// getOrCreatePortRouter returns an existing per-port router or creates one +// with a new TCP listener and starts serving. +func (s *Server) getOrCreatePortRouter(ctx context.Context, port uint16) (*nbtcp.Router, error) { + s.portMu.Lock() + defer s.portMu.Unlock() + + if pr, ok := s.portRouters[port]; ok { + return pr.router, nil + } + + listenAddr := fmt.Sprintf(":%d", port) + ln, err := net.Listen("tcp", listenAddr) + if err != nil { + return nil, fmt.Errorf("listen TCP on %s: %w", listenAddr, err) + } + if s.ProxyProtocol { + ln = s.wrapProxyProtocol(ln) + } + + router := nbtcp.NewPortRouter(s.Logger, s.resolveDialFunc) + router.SetObserver(s.meter) + router.SetAccessLogger(s.accessLog) + portCtx, cancel := context.WithCancel(ctx) + + s.portRouters[port] = &portRouter{ + router: router, + listener: ln, + cancel: cancel, + } + + s.portRouterWg.Add(1) + go func() { + defer s.portRouterWg.Done() + if err := router.Serve(portCtx, ln); err != nil { + s.Logger.Debugf("port %d router stopped: %v", port, err) + } + }() + + s.Logger.Debugf("started per-port router on %s", listenAddr) + return router, nil +} + +// cleanupPortIfEmpty tears down a per-port router if it has no remaining +// routes or fallback. The main port is never cleaned up. Active relay +// connections are drained before the listener is closed. +func (s *Server) cleanupPortIfEmpty(port uint16) { + if port == 0 || port == s.mainPort { + return + } + + s.portMu.Lock() + pr, ok := s.portRouters[port] + if !ok || !pr.router.IsEmpty() { + s.portMu.Unlock() + return + } + + // Cancel and close the listener while holding the lock so that + // getOrCreatePortRouter sees the entry is gone before we drain. + pr.cancel() + if err := pr.listener.Close(); err != nil { + s.Logger.Debugf("close listener on port %d: %v", port, err) + } + delete(s.portRouters, port) + s.portMu.Unlock() + + // Drain active relay connections outside the lock. + if ok := pr.router.Drain(nbtcp.DefaultDrainTimeout); !ok { + s.Logger.Warnf("timed out draining relay connections on port %d", port) + } + s.Logger.Debugf("cleaned up empty per-port router on port %d", port) +} + func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.ProxyServiceClient) { bo := &backoff.ExponentialBackOff{ InitialInterval: 800 * time.Millisecond, @@ -590,6 +849,9 @@ func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.Pr Version: s.Version, StartedAt: timestamppb.New(s.startTime), Address: s.ProxyURL, + Capabilities: &proto.ProxyCapabilities{ + SupportsCustomPorts: &s.SupportsCustomPorts, + }, }) if err != nil { return fmt.Errorf("create mapping stream: %w", err) @@ -626,6 +888,12 @@ func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.Pr } func (s *Server) handleMappingStream(ctx context.Context, mappingClient proto.ProxyService_GetMappingUpdateClient, initialSyncDone *bool) error { + select { + case <-s.routerReady: + case <-ctx.Done(): + return ctx.Err() + } + for { // Check for context completion to gracefully shutdown. select { @@ -662,25 +930,28 @@ func (s *Server) processMappings(ctx context.Context, mappings []*proto.ProxyMap s.Logger.WithFields(log.Fields{ "type": mapping.GetType(), "domain": mapping.GetDomain(), - "path": mapping.GetPath(), + "mode": mapping.GetMode(), + "port": mapping.GetListenPort(), "id": mapping.GetId(), }).Debug("Processing mapping update") switch mapping.GetType() { case proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED: if err := s.addMapping(ctx, mapping); err != nil { - // TODO: Retry this? Or maybe notify the management server that this mapping has failed? s.Logger.WithFields(log.Fields{ "service_id": mapping.GetId(), "domain": mapping.GetDomain(), "error": err, }).Error("Error adding new mapping, ignoring this mapping and continuing processing") + s.notifyError(ctx, mapping, err) } case proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED: - if err := s.updateMapping(ctx, mapping); err != nil { + if err := s.modifyMapping(ctx, mapping); err != nil { s.Logger.WithFields(log.Fields{ "service_id": mapping.GetId(), "domain": mapping.GetDomain(), - }).Errorf("failed to update mapping: %v", err) + "error": err, + }).Error("failed to modify mapping") + s.notifyError(ctx, mapping, err) } case proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED: s.removeMapping(ctx, mapping) @@ -688,30 +959,89 @@ func (s *Server) processMappings(ctx context.Context, mappings []*proto.ProxyMap } } +// addMapping registers a service mapping and starts the appropriate relay or routes. func (s *Server) addMapping(ctx context.Context, mapping *proto.ProxyMapping) error { - d := domain.Domain(mapping.GetDomain()) accountID := types.AccountID(mapping.GetAccountId()) - serviceID := mapping.GetId() + svcID := types.ServiceID(mapping.GetId()) authToken := mapping.GetAuthToken() - if err := s.netbird.AddPeer(ctx, accountID, d, authToken, serviceID); err != nil { - return fmt.Errorf("create peer for domain %q: %w", d, err) - } - var wildcardHit bool - if s.acme != nil { - wildcardHit = s.acme.AddDomain(d, string(accountID), serviceID) + svcKey := s.serviceKeyForMapping(mapping) + if err := s.netbird.AddPeer(ctx, accountID, svcKey, authToken, svcID); err != nil { + return fmt.Errorf("create peer for service %s: %w", svcID, err) } - // Pass the mapping through to the update function to avoid duplicating the - // setup, currently update is simply a subset of this function, so this - // separation makes sense...to me at least. + if err := s.setupMappingRoutes(ctx, mapping); err != nil { + s.cleanupMappingRoutes(mapping) + if peerErr := s.netbird.RemovePeer(ctx, accountID, svcKey); peerErr != nil { + s.Logger.WithError(peerErr).WithField("service_id", svcID).Warn("failed to remove peer after setup failure") + } + return err + } + s.storeMapping(mapping) + return nil +} + +// modifyMapping updates a service mapping in place without tearing down the +// NetBird peer. It cleans up old routes using the previously stored mapping +// state and re-applies them from the new mapping. +func (s *Server) modifyMapping(ctx context.Context, mapping *proto.ProxyMapping) error { + if old := s.loadMapping(types.ServiceID(mapping.GetId())); old != nil { + s.cleanupMappingRoutes(old) + if mode := types.ServiceMode(old.GetMode()); mode.IsL4() { + s.meter.L4ServiceRemoved(mode) + } + } else { + s.cleanupMappingRoutes(mapping) + } + if err := s.setupMappingRoutes(ctx, mapping); err != nil { + s.cleanupMappingRoutes(mapping) + return err + } + s.storeMapping(mapping) + return nil +} + +// setupMappingRoutes configures the appropriate routes or relays for the given +// service mapping based on its mode. The NetBird peer must already exist. +func (s *Server) setupMappingRoutes(ctx context.Context, mapping *proto.ProxyMapping) error { + switch types.ServiceMode(mapping.GetMode()) { + case types.ServiceModeTCP: + return s.setupTCPMapping(ctx, mapping) + case types.ServiceModeUDP: + return s.setupUDPMapping(ctx, mapping) + case types.ServiceModeTLS: + return s.setupTLSMapping(ctx, mapping) + default: + return s.setupHTTPMapping(ctx, mapping) + } +} + +// setupHTTPMapping configures HTTP reverse proxy, auth, and ACME routes. +func (s *Server) setupHTTPMapping(ctx context.Context, mapping *proto.ProxyMapping) error { + d := domain.Domain(mapping.GetDomain()) + accountID := types.AccountID(mapping.GetAccountId()) + svcID := types.ServiceID(mapping.GetId()) + + if len(mapping.GetPath()) == 0 { + return nil + } + + var wildcardHit bool + if s.acme != nil { + wildcardHit = s.acme.AddDomain(d, accountID, svcID) + } + s.mainRouter.AddRoute(nbtcp.SNIHost(mapping.GetDomain()), nbtcp.Route{ + Type: nbtcp.RouteHTTP, + AccountID: accountID, + ServiceID: svcID, + Domain: mapping.GetDomain(), + }) if err := s.updateMapping(ctx, mapping); err != nil { - s.removeMapping(ctx, mapping) return fmt.Errorf("update mapping for domain %q: %w", d, err) } if wildcardHit { - if err := s.NotifyCertificateIssued(ctx, string(accountID), serviceID, string(d)); err != nil { + if err := s.NotifyCertificateIssued(ctx, accountID, svcID, string(d)); err != nil { s.Logger.Warnf("notify certificate ready for domain %q: %v", d, err) } } @@ -719,56 +1049,386 @@ func (s *Server) addMapping(ctx context.Context, mapping *proto.ProxyMapping) er return nil } +// setupTCPMapping sets up a TCP port-forwarding fallback route on the listen port. +func (s *Server) setupTCPMapping(ctx context.Context, mapping *proto.ProxyMapping) error { + svcID := types.ServiceID(mapping.GetId()) + accountID := types.AccountID(mapping.GetAccountId()) + + port, err := netutil.ValidatePort(mapping.GetListenPort()) + if err != nil { + return fmt.Errorf("TCP service %s: %w", svcID, err) + } + + targetAddr := s.l4TargetAddress(mapping) + if targetAddr == "" { + return fmt.Errorf("empty target address for TCP service %s", svcID) + } + + if s.WireguardPort != 0 && port == s.WireguardPort { + return fmt.Errorf("port %d conflicts with tunnel port", port) + } + + router, err := s.routerForPort(ctx, port) + if err != nil { + return fmt.Errorf("router for TCP port %d: %w", port, err) + } + + router.SetFallback(nbtcp.Route{ + Type: nbtcp.RouteTCP, + AccountID: accountID, + ServiceID: svcID, + Domain: mapping.GetDomain(), + Protocol: accesslog.ProtocolTCP, + Target: targetAddr, + ProxyProtocol: s.l4ProxyProtocol(mapping), + DialTimeout: s.l4DialTimeout(mapping), + }) + + s.portMu.Lock() + s.svcPorts[svcID] = []uint16{port} + s.portMu.Unlock() + + s.meter.L4ServiceAdded(types.ServiceModeTCP) + s.sendStatusUpdate(ctx, accountID, svcID, proto.ProxyStatus_PROXY_STATUS_ACTIVE, nil) + return nil +} + +// setupUDPMapping starts a UDP relay on the listen port. +func (s *Server) setupUDPMapping(ctx context.Context, mapping *proto.ProxyMapping) error { + svcID := types.ServiceID(mapping.GetId()) + accountID := types.AccountID(mapping.GetAccountId()) + + port, err := netutil.ValidatePort(mapping.GetListenPort()) + if err != nil { + return fmt.Errorf("UDP service %s: %w", svcID, err) + } + + targetAddr := s.l4TargetAddress(mapping) + if targetAddr == "" { + return fmt.Errorf("empty target address for UDP service %s", svcID) + } + + if err := s.addUDPRelay(ctx, mapping, targetAddr, port); err != nil { + return fmt.Errorf("UDP relay for service %s: %w", svcID, err) + } + + s.meter.L4ServiceAdded(types.ServiceModeUDP) + s.sendStatusUpdate(ctx, accountID, svcID, proto.ProxyStatus_PROXY_STATUS_ACTIVE, nil) + return nil +} + +// setupTLSMapping configures a TLS SNI-routed passthrough on the listen port. +func (s *Server) setupTLSMapping(ctx context.Context, mapping *proto.ProxyMapping) error { + svcID := types.ServiceID(mapping.GetId()) + accountID := types.AccountID(mapping.GetAccountId()) + + tlsPort, err := netutil.ValidatePort(mapping.GetListenPort()) + if err != nil { + return fmt.Errorf("TLS service %s: %w", svcID, err) + } + + targetAddr := s.l4TargetAddress(mapping) + if targetAddr == "" { + return fmt.Errorf("empty target address for TLS service %s", svcID) + } + + if s.WireguardPort != 0 && tlsPort == s.WireguardPort { + return fmt.Errorf("port %d conflicts with tunnel port", tlsPort) + } + + router, err := s.routerForPort(ctx, tlsPort) + if err != nil { + return fmt.Errorf("router for TLS port %d: %w", tlsPort, err) + } + + router.AddRoute(nbtcp.SNIHost(mapping.GetDomain()), nbtcp.Route{ + Type: nbtcp.RouteTCP, + AccountID: accountID, + ServiceID: svcID, + Domain: mapping.GetDomain(), + Protocol: accesslog.ProtocolTLS, + Target: targetAddr, + ProxyProtocol: s.l4ProxyProtocol(mapping), + DialTimeout: s.l4DialTimeout(mapping), + }) + + if tlsPort != s.mainPort { + s.portMu.Lock() + s.svcPorts[svcID] = []uint16{tlsPort} + s.portMu.Unlock() + } + + s.Logger.WithFields(log.Fields{ + "domain": mapping.GetDomain(), + "target": targetAddr, + "port": tlsPort, + "service": svcID, + }).Info("TLS passthrough mapping added") + + s.meter.L4ServiceAdded(types.ServiceModeTLS) + s.sendStatusUpdate(ctx, accountID, svcID, proto.ProxyStatus_PROXY_STATUS_ACTIVE, nil) + return nil +} + +// serviceKeyForMapping returns the appropriate ServiceKey for a mapping. +// TCP/UDP use an ID-based key; HTTP/TLS use a domain-based key. +func (s *Server) serviceKeyForMapping(mapping *proto.ProxyMapping) roundtrip.ServiceKey { + switch types.ServiceMode(mapping.GetMode()) { + case types.ServiceModeTCP, types.ServiceModeUDP: + return roundtrip.L4ServiceKey(types.ServiceID(mapping.GetId())) + default: + return roundtrip.DomainServiceKey(mapping.GetDomain()) + } +} + +// l4TargetAddress extracts and validates the target address from a mapping's +// first path entry. Returns empty string if no paths exist or the address is +// not a valid host:port. +func (s *Server) l4TargetAddress(mapping *proto.ProxyMapping) string { + paths := mapping.GetPath() + if len(paths) == 0 { + return "" + } + target := paths[0].GetTarget() + if _, _, err := net.SplitHostPort(target); err != nil { + s.Logger.WithFields(log.Fields{ + "service_id": mapping.GetId(), + "target": target, + }).Warnf("invalid L4 target address: %v", err) + return "" + } + return target +} + +// l4ProxyProtocol returns whether the first target has PROXY protocol enabled. +func (s *Server) l4ProxyProtocol(mapping *proto.ProxyMapping) bool { + paths := mapping.GetPath() + if len(paths) == 0 { + return false + } + return paths[0].GetOptions().GetProxyProtocol() +} + +// l4DialTimeout returns the dial timeout from the first target's options, +// falling back to the server's DefaultDialTimeout. +func (s *Server) l4DialTimeout(mapping *proto.ProxyMapping) time.Duration { + paths := mapping.GetPath() + if len(paths) > 0 { + if d := paths[0].GetOptions().GetRequestTimeout(); d != nil { + return d.AsDuration() + } + } + return s.DefaultDialTimeout +} + +// l4SessionIdleTimeout returns the configured session idle timeout from the +// mapping options, or 0 to use the relay's default. +func l4SessionIdleTimeout(mapping *proto.ProxyMapping) time.Duration { + paths := mapping.GetPath() + if len(paths) > 0 { + if d := paths[0].GetOptions().GetSessionIdleTimeout(); d != nil { + return d.AsDuration() + } + } + return 0 +} + +// addUDPRelay starts a UDP relay on the specified listen port. +func (s *Server) addUDPRelay(ctx context.Context, mapping *proto.ProxyMapping, targetAddress string, listenPort uint16) error { + svcID := types.ServiceID(mapping.GetId()) + accountID := types.AccountID(mapping.GetAccountId()) + + if s.WireguardPort != 0 && listenPort == s.WireguardPort { + return fmt.Errorf("UDP port %d conflicts with tunnel port", listenPort) + } + + // Close existing relay if present (idempotent re-add). + s.removeUDPRelay(svcID) + + listenAddr := fmt.Sprintf(":%d", listenPort) + + listener, err := net.ListenPacket("udp", listenAddr) + if err != nil { + return fmt.Errorf("listen UDP on %s: %w", listenAddr, err) + } + + dialFn, err := s.resolveDialFunc(accountID) + if err != nil { + _ = listener.Close() + return fmt.Errorf("resolve dialer for UDP: %w", err) + } + + entry := s.Logger.WithFields(log.Fields{ + "target": targetAddress, + "listen_port": listenPort, + "service_id": svcID, + }) + + relay := udprelay.New(ctx, udprelay.RelayConfig{ + Logger: entry, + Listener: listener, + Target: targetAddress, + Domain: mapping.GetDomain(), + AccountID: accountID, + ServiceID: svcID, + DialFunc: dialFn, + DialTimeout: s.l4DialTimeout(mapping), + SessionTTL: l4SessionIdleTimeout(mapping), + AccessLog: s.accessLog, + }) + relay.SetObserver(s.meter) + + s.udpMu.Lock() + s.udpRelays[svcID] = relay + s.udpMu.Unlock() + + s.udpRelayWg.Go(relay.Serve) + entry.Info("UDP relay added") + return nil +} + func (s *Server) updateMapping(ctx context.Context, mapping *proto.ProxyMapping) error { // Very simple implementation here, we don't touch the existing peer // connection or any existing TLS configuration, we simply overwrite // the auth and proxy mappings. // Note: this does require the management server to always send a // full mapping rather than deltas during a modification. + accountID := types.AccountID(mapping.GetAccountId()) + svcID := types.ServiceID(mapping.GetId()) + var schemes []auth.Scheme if mapping.GetAuth().GetPassword() { - schemes = append(schemes, auth.NewPassword(s.mgmtClient, mapping.GetId(), mapping.GetAccountId())) + schemes = append(schemes, auth.NewPassword(s.mgmtClient, svcID, accountID)) } if mapping.GetAuth().GetPin() { - schemes = append(schemes, auth.NewPin(s.mgmtClient, mapping.GetId(), mapping.GetAccountId())) + schemes = append(schemes, auth.NewPin(s.mgmtClient, svcID, accountID)) } if mapping.GetAuth().GetOidc() { - schemes = append(schemes, auth.NewOIDC(s.mgmtClient, mapping.GetId(), mapping.GetAccountId(), s.ForwardedProto)) + schemes = append(schemes, auth.NewOIDC(s.mgmtClient, svcID, accountID, s.ForwardedProto)) } maxSessionAge := time.Duration(mapping.GetAuth().GetMaxSessionAgeSeconds()) * time.Second - if err := s.auth.AddDomain(mapping.GetDomain(), schemes, mapping.GetAuth().GetSessionKey(), maxSessionAge, mapping.GetAccountId(), mapping.GetId()); err != nil { + if err := s.auth.AddDomain(mapping.GetDomain(), schemes, mapping.GetAuth().GetSessionKey(), maxSessionAge, accountID, svcID); err != nil { return fmt.Errorf("auth setup for domain %s: %w", mapping.GetDomain(), err) } - s.proxy.AddMapping(s.protoToMapping(mapping)) - s.meter.AddMapping(s.protoToMapping(mapping)) + m := s.protoToMapping(ctx, mapping) + s.proxy.AddMapping(m) + s.meter.AddMapping(m) return nil } +// removeMapping tears down routes/relays and the NetBird peer for a service. +// Uses the stored mapping state when available to ensure all previously +// configured routes are cleaned up. func (s *Server) removeMapping(ctx context.Context, mapping *proto.ProxyMapping) { - d := domain.Domain(mapping.GetDomain()) accountID := types.AccountID(mapping.GetAccountId()) - if err := s.netbird.RemovePeer(ctx, accountID, d); err != nil { + svcKey := s.serviceKeyForMapping(mapping) + if err := s.netbird.RemovePeer(ctx, accountID, svcKey); err != nil { s.Logger.WithFields(log.Fields{ "account_id": accountID, - "domain": d, + "service_id": mapping.GetId(), "error": err, - }).Error("Error removing NetBird peer connection for domain, continuing additional domain cleanup but peer connection may still exist") + }).Error("failed to remove NetBird peer, continuing cleanup") } - if s.acme != nil { - s.acme.RemoveDomain(d) + + if old := s.deleteMapping(types.ServiceID(mapping.GetId())); old != nil { + s.cleanupMappingRoutes(old) + if mode := types.ServiceMode(old.GetMode()); mode.IsL4() { + s.meter.L4ServiceRemoved(mode) + } + } else { + s.cleanupMappingRoutes(mapping) } - s.auth.RemoveDomain(mapping.GetDomain()) - s.proxy.RemoveMapping(s.protoToMapping(mapping)) - s.meter.RemoveMapping(s.protoToMapping(mapping)) } -func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { +// cleanupMappingRoutes removes HTTP/TLS/L4 routes and custom port state for a +// service without touching the NetBird peer. This is used for both full +// removal and in-place modification of mappings. +func (s *Server) cleanupMappingRoutes(mapping *proto.ProxyMapping) { + svcID := types.ServiceID(mapping.GetId()) + host := mapping.GetDomain() + + // HTTP/TLS cleanup (only relevant when a domain is set). + if host != "" { + d := domain.Domain(host) + if s.acme != nil { + s.acme.RemoveDomain(d) + } + s.auth.RemoveDomain(host) + if s.proxy.RemoveMapping(proxy.Mapping{Host: host}) { + s.meter.RemoveMapping(proxy.Mapping{Host: host}) + } + // Close hijacked connections (WebSocket) for this domain. + if n := s.hijackTracker.CloseByHost(host); n > 0 { + s.Logger.Debugf("closed %d hijacked connection(s) for %s", n, host) + } + // Remove SNI route from the main router (covers both HTTP and main-port TLS). + s.mainRouter.RemoveRoute(nbtcp.SNIHost(host), svcID) + } + + // Extract and delete tracked custom-port entries atomically. + s.portMu.Lock() + entries := s.svcPorts[svcID] + delete(s.svcPorts, svcID) + s.portMu.Unlock() + + for _, entry := range entries { + if router := s.routerForPortExisting(entry); router != nil { + if host != "" { + router.RemoveRoute(nbtcp.SNIHost(host), svcID) + } else { + router.RemoveFallback(svcID) + } + } + s.cleanupPortIfEmpty(entry) + } + + // UDP relay cleanup (idempotent). + s.removeUDPRelay(svcID) + +} + +// removeUDPRelay stops and removes a UDP relay by service ID. +func (s *Server) removeUDPRelay(svcID types.ServiceID) { + s.udpMu.Lock() + relay, ok := s.udpRelays[svcID] + if ok { + delete(s.udpRelays, svcID) + } + s.udpMu.Unlock() + + if ok { + relay.Close() + s.Logger.WithField("service_id", svcID).Info("UDP relay removed") + } +} + +func (s *Server) storeMapping(mapping *proto.ProxyMapping) { + s.portMu.Lock() + s.lastMappings[types.ServiceID(mapping.GetId())] = mapping + s.portMu.Unlock() +} + +func (s *Server) loadMapping(svcID types.ServiceID) *proto.ProxyMapping { + s.portMu.RLock() + m := s.lastMappings[svcID] + s.portMu.RUnlock() + return m +} + +func (s *Server) deleteMapping(svcID types.ServiceID) *proto.ProxyMapping { + s.portMu.Lock() + m := s.lastMappings[svcID] + delete(s.lastMappings, svcID) + s.portMu.Unlock() + return m +} + +func (s *Server) protoToMapping(ctx context.Context, mapping *proto.ProxyMapping) proxy.Mapping { paths := make(map[string]*proxy.PathTarget) for _, pathMapping := range mapping.GetPath() { targetURL, err := url.Parse(pathMapping.GetTarget()) if err != nil { - // TODO: Should we warn management about this so it can be bubbled up to a user to reconfigure? s.Logger.WithFields(log.Fields{ "service_id": mapping.GetId(), "account_id": mapping.GetAccountId(), @@ -776,6 +1436,7 @@ func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { "path": pathMapping.GetPath(), "target": pathMapping.GetTarget(), }).WithError(err).Error("failed to parse target URL for path, skipping") + s.notifyError(ctx, mapping, fmt.Errorf("invalid target URL %q for path %q: %w", pathMapping.GetTarget(), pathMapping.GetPath(), err)) continue } @@ -788,10 +1449,13 @@ func (s *Server) protoToMapping(mapping *proto.ProxyMapping) proxy.Mapping { pt.RequestTimeout = d.AsDuration() } } + if pt.RequestTimeout == 0 && s.DefaultDialTimeout > 0 { + pt.RequestTimeout = s.DefaultDialTimeout + } paths[pathMapping.GetPath()] = pt } return proxy.Mapping{ - ID: mapping.GetId(), + ID: types.ServiceID(mapping.GetId()), AccountID: types.AccountID(mapping.GetAccountId()), Host: mapping.GetDomain(), Paths: paths, diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 9505b3fdf..333f0bf00 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -56,12 +56,14 @@ type ExposeRequest struct { Pin string Password string UserGroups []string + ListenPort uint16 } type ExposeResponse struct { - ServiceName string - Domain string - ServiceURL string + ServiceName string + Domain string + ServiceURL string + PortAutoAssigned bool } // NewClient creates a new client to Management service @@ -790,9 +792,10 @@ func (c *GrpcClient) StopExpose(ctx context.Context, domain string) error { func fromProtoExposeResponse(resp *proto.ExposeServiceResponse) *ExposeResponse { return &ExposeResponse{ - ServiceName: resp.ServiceName, - Domain: resp.Domain, - ServiceURL: resp.ServiceUrl, + ServiceName: resp.ServiceName, + Domain: resp.Domain, + ServiceURL: resp.ServiceUrl, + PortAutoAssigned: resp.PortAutoAssigned, } } @@ -808,6 +811,8 @@ func toProtoExposeServiceRequest(req ExposeRequest) (*proto.ExposeServiceRequest protocol = proto.ExposeProtocol_EXPOSE_TCP case int(proto.ExposeProtocol_EXPOSE_UDP): protocol = proto.ExposeProtocol_EXPOSE_UDP + case int(proto.ExposeProtocol_EXPOSE_TLS): + protocol = proto.ExposeProtocol_EXPOSE_TLS default: return nil, fmt.Errorf("invalid expose protocol: %d", req.Protocol) } @@ -820,6 +825,7 @@ func toProtoExposeServiceRequest(req ExposeRequest) (*proto.ExposeServiceRequest Pin: req.Pin, Password: req.Password, UserGroups: req.UserGroups, + ListenPort: uint32(req.ListenPort), }, nil } diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 6d2967aa9..4b851bf19 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -2836,6 +2836,10 @@ components: format: int64 description: "Bytes downloaded (response body size)" example: 8192 + protocol: + type: string + description: "Protocol type: http, tcp, or udp" + example: "http" required: - id - service_id @@ -2954,6 +2958,20 @@ components: domain: type: string description: Domain for the service + mode: + type: string + description: Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. + enum: [http, tcp, udp, tls] + default: http + listen_port: + type: integer + minimum: 0 + maximum: 65535 + description: Port the proxy listens on (L4/TLS only) + port_auto_assigned: + type: boolean + description: Whether the listen port was auto-assigned + readOnly: true proxy_cluster: type: string description: The proxy cluster handling this service (derived from domain) @@ -3020,6 +3038,16 @@ components: domain: type: string description: Domain for the service + mode: + type: string + description: Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. + enum: [http, tcp, udp, tls] + default: http + listen_port: + type: integer + minimum: 0 + maximum: 65535 + description: Port the proxy listens on (L4/TLS only). Set to 0 for auto-assignment. targets: type: array items: @@ -3040,8 +3068,6 @@ components: required: - name - domain - - targets - - auth - enabled ServiceTargetOptions: type: object @@ -3065,6 +3091,12 @@ components: additionalProperties: type: string pattern: '^[^\r\n]*$' + proxy_protocol: + type: boolean + description: Send PROXY Protocol v2 header to this backend (TCP/TLS only) + session_idle_timeout: + type: string + description: Idle timeout before a UDP session is reaped, as a Go duration string (e.g. "30s", "2m"). Maximum 10m. ServiceTarget: type: object properties: @@ -3073,21 +3105,23 @@ components: description: Target ID target_type: type: string - description: Target type (e.g., "peer", "resource") - enum: [peer, resource] + description: Target type + enum: [peer, host, domain, subnet] path: type: string - description: URL path prefix for this target + description: URL path prefix for this target (HTTP only) protocol: type: string description: Protocol to use when connecting to the backend - enum: [http, https] + enum: [http, https, tcp, udp] host: type: string description: Backend ip or domain for this target port: type: integer - description: Backend port for this target. Use 0 or omit to use the scheme default (80 for http, 443 for https). + minimum: 1 + maximum: 65535 + description: Backend port for this target enabled: type: boolean description: Whether this target is enabled @@ -3194,6 +3228,9 @@ components: target_cluster: type: string description: The proxy cluster this domain is validated against (only for custom domains) + supports_custom_ports: + type: boolean + description: Whether the cluster supports binding arbitrary TCP/UDP ports required: - id - domain @@ -4277,6 +4314,12 @@ components: requires_authentication: description: Requires authentication content: { } + conflict: + description: Conflict + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' securitySchemes: BearerAuth: type: http @@ -9621,6 +9664,29 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorResponse' + /api/reverse-proxies/clusters: + get: + summary: List available proxy clusters + description: Returns a list of available proxy clusters with their connection status + tags: [ Services ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: A JSON Array of proxy clusters + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ProxyCluster' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" /api/reverse-proxies/services: get: summary: List all Services @@ -9670,29 +9736,8 @@ paths: "$ref": "#/components/responses/requires_authentication" '403': "$ref": "#/components/responses/forbidden" - '500': - "$ref": "#/components/responses/internal_error" - /api/reverse-proxies/clusters: - get: - summary: List available proxy clusters - description: Returns a list of available proxy clusters with their connection status - tags: [ Services ] - security: - - BearerAuth: [ ] - - TokenAuth: [ ] - responses: - '200': - description: A JSON Array of proxy clusters - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/ProxyCluster' - '401': - "$ref": "#/components/responses/requires_authentication" - '403': - "$ref": "#/components/responses/forbidden" + '409': + "$ref": "#/components/responses/conflict" '500': "$ref": "#/components/responses/internal_error" /api/reverse-proxies/services/{serviceId}: @@ -9762,6 +9807,8 @@ paths: "$ref": "#/components/responses/forbidden" '404': "$ref": "#/components/responses/not_found" + '409': + "$ref": "#/components/responses/conflict" '500': "$ref": "#/components/responses/internal_error" delete: diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index f5a2b7ced..4ec3b871a 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -880,6 +880,30 @@ func (e SentinelOneMatchAttributesNetworkStatus) Valid() bool { } } +// Defines values for ServiceMode. +const ( + ServiceModeHttp ServiceMode = "http" + ServiceModeTcp ServiceMode = "tcp" + ServiceModeTls ServiceMode = "tls" + ServiceModeUdp ServiceMode = "udp" +) + +// Valid indicates whether the value is a known member of the ServiceMode enum. +func (e ServiceMode) Valid() bool { + switch e { + case ServiceModeHttp: + return true + case ServiceModeTcp: + return true + case ServiceModeTls: + return true + case ServiceModeUdp: + return true + default: + return false + } +} + // Defines values for ServiceMetaStatus. const ( ServiceMetaStatusActive ServiceMetaStatus = "active" @@ -910,10 +934,36 @@ func (e ServiceMetaStatus) Valid() bool { } } +// Defines values for ServiceRequestMode. +const ( + ServiceRequestModeHttp ServiceRequestMode = "http" + ServiceRequestModeTcp ServiceRequestMode = "tcp" + ServiceRequestModeTls ServiceRequestMode = "tls" + ServiceRequestModeUdp ServiceRequestMode = "udp" +) + +// Valid indicates whether the value is a known member of the ServiceRequestMode enum. +func (e ServiceRequestMode) Valid() bool { + switch e { + case ServiceRequestModeHttp: + return true + case ServiceRequestModeTcp: + return true + case ServiceRequestModeTls: + return true + case ServiceRequestModeUdp: + return true + default: + return false + } +} + // Defines values for ServiceTargetProtocol. const ( ServiceTargetProtocolHttp ServiceTargetProtocol = "http" ServiceTargetProtocolHttps ServiceTargetProtocol = "https" + ServiceTargetProtocolTcp ServiceTargetProtocol = "tcp" + ServiceTargetProtocolUdp ServiceTargetProtocol = "udp" ) // Valid indicates whether the value is a known member of the ServiceTargetProtocol enum. @@ -923,6 +973,10 @@ func (e ServiceTargetProtocol) Valid() bool { return true case ServiceTargetProtocolHttps: return true + case ServiceTargetProtocolTcp: + return true + case ServiceTargetProtocolUdp: + return true default: return false } @@ -930,16 +984,22 @@ func (e ServiceTargetProtocol) Valid() bool { // Defines values for ServiceTargetTargetType. const ( - ServiceTargetTargetTypePeer ServiceTargetTargetType = "peer" - ServiceTargetTargetTypeResource ServiceTargetTargetType = "resource" + ServiceTargetTargetTypeDomain ServiceTargetTargetType = "domain" + ServiceTargetTargetTypeHost ServiceTargetTargetType = "host" + ServiceTargetTargetTypePeer ServiceTargetTargetType = "peer" + ServiceTargetTargetTypeSubnet ServiceTargetTargetType = "subnet" ) // Valid indicates whether the value is a known member of the ServiceTargetTargetType enum. func (e ServiceTargetTargetType) Valid() bool { switch e { + case ServiceTargetTargetTypeDomain: + return true + case ServiceTargetTargetTypeHost: + return true case ServiceTargetTargetTypePeer: return true - case ServiceTargetTargetTypeResource: + case ServiceTargetTargetTypeSubnet: return true default: return false @@ -3249,6 +3309,9 @@ type ProxyAccessLog struct { // Path Path of the request Path string `json:"path"` + // Protocol Protocol type: http, tcp, or udp + Protocol *string `json:"protocol,omitempty"` + // Reason Reason for the request result (e.g., authentication failure) Reason *string `json:"reason,omitempty"` @@ -3313,6 +3376,9 @@ type ReverseProxyDomain struct { // Id Domain ID Id string `json:"id"` + // SupportsCustomPorts Whether the cluster supports binding arbitrary TCP/UDP ports + SupportsCustomPorts *bool `json:"supports_custom_ports,omitempty"` + // TargetCluster The proxy cluster this domain is validated against (only for custom domains) TargetCluster *string `json:"target_cluster,omitempty"` @@ -3505,8 +3571,14 @@ type Service struct { Enabled bool `json:"enabled"` // Id Service ID - Id string `json:"id"` - Meta ServiceMeta `json:"meta"` + Id string `json:"id"` + + // ListenPort Port the proxy listens on (L4/TLS only) + ListenPort *int `json:"listen_port,omitempty"` + Meta ServiceMeta `json:"meta"` + + // Mode Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. + Mode *ServiceMode `json:"mode,omitempty"` // Name Service name Name string `json:"name"` @@ -3514,6 +3586,9 @@ type Service struct { // PassHostHeader When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address PassHostHeader *bool `json:"pass_host_header,omitempty"` + // PortAutoAssigned Whether the listen port was auto-assigned + PortAutoAssigned *bool `json:"port_auto_assigned,omitempty"` + // ProxyCluster The proxy cluster handling this service (derived from domain) ProxyCluster *string `json:"proxy_cluster,omitempty"` @@ -3524,6 +3599,9 @@ type Service struct { Targets []ServiceTarget `json:"targets"` } +// ServiceMode Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. +type ServiceMode string + // ServiceAuthConfig defines model for ServiceAuthConfig. type ServiceAuthConfig struct { BearerAuth *BearerAuthConfig `json:"bearer_auth,omitempty"` @@ -3549,7 +3627,7 @@ type ServiceMetaStatus string // ServiceRequest defines model for ServiceRequest. type ServiceRequest struct { - Auth ServiceAuthConfig `json:"auth"` + Auth *ServiceAuthConfig `json:"auth,omitempty"` // Domain Domain for the service Domain string `json:"domain"` @@ -3557,6 +3635,12 @@ type ServiceRequest struct { // Enabled Whether the service is enabled Enabled bool `json:"enabled"` + // ListenPort Port the proxy listens on (L4/TLS only). Set to 0 for auto-assignment. + ListenPort *int `json:"listen_port,omitempty"` + + // Mode Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. + Mode *ServiceRequestMode `json:"mode,omitempty"` + // Name Service name Name string `json:"name"` @@ -3567,9 +3651,12 @@ type ServiceRequest struct { RewriteRedirects *bool `json:"rewrite_redirects,omitempty"` // Targets List of target backends for this service - Targets []ServiceTarget `json:"targets"` + Targets *[]ServiceTarget `json:"targets,omitempty"` } +// ServiceRequestMode Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. +type ServiceRequestMode string + // ServiceTarget defines model for ServiceTarget. type ServiceTarget struct { // Enabled Whether this target is enabled @@ -3579,10 +3666,10 @@ type ServiceTarget struct { Host *string `json:"host,omitempty"` Options *ServiceTargetOptions `json:"options,omitempty"` - // Path URL path prefix for this target + // Path URL path prefix for this target (HTTP only) Path *string `json:"path,omitempty"` - // Port Backend port for this target. Use 0 or omit to use the scheme default (80 for http, 443 for https). + // Port Backend port for this target Port int `json:"port"` // Protocol Protocol to use when connecting to the backend @@ -3591,14 +3678,14 @@ type ServiceTarget struct { // TargetId Target ID TargetId string `json:"target_id"` - // TargetType Target type (e.g., "peer", "resource") + // TargetType Target type TargetType ServiceTargetTargetType `json:"target_type"` } // ServiceTargetProtocol Protocol to use when connecting to the backend type ServiceTargetProtocol string -// ServiceTargetTargetType Target type (e.g., "peer", "resource") +// ServiceTargetTargetType Target type type ServiceTargetTargetType string // ServiceTargetOptions defines model for ServiceTargetOptions. @@ -3609,9 +3696,15 @@ type ServiceTargetOptions struct { // PathRewrite Controls how the request path is rewritten before forwarding to the backend. Default strips the matched prefix. "preserve" keeps the full original request path. PathRewrite *ServiceTargetOptionsPathRewrite `json:"path_rewrite,omitempty"` + // ProxyProtocol Send PROXY Protocol v2 header to this backend (TCP/TLS only) + ProxyProtocol *bool `json:"proxy_protocol,omitempty"` + // RequestTimeout Per-target response timeout as a Go duration string (e.g. "30s", "2m") RequestTimeout *string `json:"request_timeout,omitempty"` + // SessionIdleTimeout Idle timeout before a UDP session is reaped, as a Go duration string (e.g. "30s", "2m"). Maximum 10m. + SessionIdleTimeout *string `json:"session_idle_timeout,omitempty"` + // SkipTlsVerify Skip TLS certificate verification for this backend SkipTlsVerify *bool `json:"skip_tls_verify,omitempty"` } @@ -4136,6 +4229,9 @@ type ZoneRequest struct { Name string `json:"name"` } +// Conflict Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided. +type Conflict = ErrorResponse + // GetApiEventsNetworkTrafficParams defines parameters for GetApiEventsNetworkTraffic. type GetApiEventsNetworkTrafficParams struct { // Page Page number diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index 2c66bb946..c5581296c 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -228,6 +228,7 @@ const ( ExposeProtocol_EXPOSE_HTTPS ExposeProtocol = 1 ExposeProtocol_EXPOSE_TCP ExposeProtocol = 2 ExposeProtocol_EXPOSE_UDP ExposeProtocol = 3 + ExposeProtocol_EXPOSE_TLS ExposeProtocol = 4 ) // Enum value maps for ExposeProtocol. @@ -237,12 +238,14 @@ var ( 1: "EXPOSE_HTTPS", 2: "EXPOSE_TCP", 3: "EXPOSE_UDP", + 4: "EXPOSE_TLS", } ExposeProtocol_value = map[string]int32{ "EXPOSE_HTTP": 0, "EXPOSE_HTTPS": 1, "EXPOSE_TCP": 2, "EXPOSE_UDP": 3, + "EXPOSE_TLS": 4, } ) @@ -4047,6 +4050,7 @@ type ExposeServiceRequest struct { UserGroups []string `protobuf:"bytes,5,rep,name=user_groups,json=userGroups,proto3" json:"user_groups,omitempty"` Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` NamePrefix string `protobuf:"bytes,7,opt,name=name_prefix,json=namePrefix,proto3" json:"name_prefix,omitempty"` + ListenPort uint32 `protobuf:"varint,8,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` } func (x *ExposeServiceRequest) Reset() { @@ -4130,14 +4134,22 @@ func (x *ExposeServiceRequest) GetNamePrefix() string { return "" } +func (x *ExposeServiceRequest) GetListenPort() uint32 { + if x != nil { + return x.ListenPort + } + return 0 +} + type ExposeServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"` - Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` + PortAutoAssigned bool `protobuf:"varint,4,opt,name=port_auto_assigned,json=portAutoAssigned,proto3" json:"port_auto_assigned,omitempty"` } func (x *ExposeServiceResponse) Reset() { @@ -4193,6 +4205,13 @@ func (x *ExposeServiceResponse) GetDomain() string { return "" } +func (x *ExposeServiceResponse) GetPortAutoAssigned() bool { + if x != nil { + return x.PortAutoAssigned + } + return false +} + type RenewExposeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4996,7 +5015,7 @@ var file_management_proto_rawDesc = []byte{ 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, - 0x74, 0x22, 0xea, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, @@ -5010,15 +5029,20 @@ var file_management_proto_rawDesc = []byte{ 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x73, - 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, + 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, + 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, + 0xa1, 0x01, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, @@ -5039,12 +5063,13 @@ var file_management_proto_rawDesc = []byte{ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, 0x53, 0x0a, 0x0e, 0x45, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x44, 0x50, 0x10, 0x03, + 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x4c, 0x53, 0x10, 0x04, 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index fdbe3a365..9acf7e2b3 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -652,6 +652,7 @@ enum ExposeProtocol { EXPOSE_HTTPS = 1; EXPOSE_TCP = 2; EXPOSE_UDP = 3; + EXPOSE_TLS = 4; } message ExposeServiceRequest { @@ -662,12 +663,14 @@ message ExposeServiceRequest { repeated string user_groups = 5; string domain = 6; string name_prefix = 7; + uint32 listen_port = 8; } message ExposeServiceResponse { string service_name = 1; string service_url = 2; string domain = 3; + bool port_auto_assigned = 4; } message RenewExposeRequest { diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index 275e8be37..115ac5101 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.0 +// protoc v6.33.3 // source: proxy_service.proto package proto @@ -175,22 +175,72 @@ func (ProxyStatus) EnumDescriptor() ([]byte, []int) { return file_proxy_service_proto_rawDescGZIP(), []int{2} } +// ProxyCapabilities describes what a proxy can handle. +type ProxyCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the proxy can bind arbitrary ports for TCP/UDP/TLS services. + SupportsCustomPorts *bool `protobuf:"varint,1,opt,name=supports_custom_ports,json=supportsCustomPorts,proto3,oneof" json:"supports_custom_ports,omitempty"` +} + +func (x *ProxyCapabilities) Reset() { + *x = ProxyCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyCapabilities) ProtoMessage() {} + +func (x *ProxyCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyCapabilities.ProtoReflect.Descriptor instead. +func (*ProxyCapabilities) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ProxyCapabilities) GetSupportsCustomPorts() bool { + if x != nil && x.SupportsCustomPorts != nil { + return *x.SupportsCustomPorts + } + return false +} + // GetMappingUpdateRequest is sent to initialise a mapping stream. type GetMappingUpdateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` + ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` + Capabilities *ProxyCapabilities `protobuf:"bytes,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"` } func (x *GetMappingUpdateRequest) Reset() { *x = GetMappingUpdateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[0] + mi := &file_proxy_service_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -203,7 +253,7 @@ func (x *GetMappingUpdateRequest) String() string { func (*GetMappingUpdateRequest) ProtoMessage() {} func (x *GetMappingUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[0] + mi := &file_proxy_service_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -216,7 +266,7 @@ func (x *GetMappingUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMappingUpdateRequest.ProtoReflect.Descriptor instead. func (*GetMappingUpdateRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{0} + return file_proxy_service_proto_rawDescGZIP(), []int{1} } func (x *GetMappingUpdateRequest) GetProxyId() string { @@ -247,6 +297,13 @@ func (x *GetMappingUpdateRequest) GetAddress() string { return "" } +func (x *GetMappingUpdateRequest) GetCapabilities() *ProxyCapabilities { + if x != nil { + return x.Capabilities + } + return nil +} + // GetMappingUpdateResponse contains zero or more ProxyMappings. // No mappings may be sent to test the liveness of the Proxy. // Mappings that are sent should be interpreted by the Proxy appropriately. @@ -264,7 +321,7 @@ type GetMappingUpdateResponse struct { func (x *GetMappingUpdateResponse) Reset() { *x = GetMappingUpdateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[1] + mi := &file_proxy_service_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -277,7 +334,7 @@ func (x *GetMappingUpdateResponse) String() string { func (*GetMappingUpdateResponse) ProtoMessage() {} func (x *GetMappingUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[1] + mi := &file_proxy_service_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -290,7 +347,7 @@ func (x *GetMappingUpdateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMappingUpdateResponse.ProtoReflect.Descriptor instead. func (*GetMappingUpdateResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{1} + return file_proxy_service_proto_rawDescGZIP(), []int{2} } func (x *GetMappingUpdateResponse) GetMapping() []*ProxyMapping { @@ -316,12 +373,16 @@ type PathTargetOptions struct { RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Send PROXY protocol v2 header to this backend. + ProxyProtocol bool `protobuf:"varint,5,opt,name=proxy_protocol,json=proxyProtocol,proto3" json:"proxy_protocol,omitempty"` + // Idle timeout before a UDP session is reaped. + SessionIdleTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=session_idle_timeout,json=sessionIdleTimeout,proto3" json:"session_idle_timeout,omitempty"` } func (x *PathTargetOptions) Reset() { *x = PathTargetOptions{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[2] + mi := &file_proxy_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -334,7 +395,7 @@ func (x *PathTargetOptions) String() string { func (*PathTargetOptions) ProtoMessage() {} func (x *PathTargetOptions) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[2] + mi := &file_proxy_service_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -347,7 +408,7 @@ func (x *PathTargetOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use PathTargetOptions.ProtoReflect.Descriptor instead. func (*PathTargetOptions) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{2} + return file_proxy_service_proto_rawDescGZIP(), []int{3} } func (x *PathTargetOptions) GetSkipTlsVerify() bool { @@ -378,6 +439,20 @@ func (x *PathTargetOptions) GetCustomHeaders() map[string]string { return nil } +func (x *PathTargetOptions) GetProxyProtocol() bool { + if x != nil { + return x.ProxyProtocol + } + return false +} + +func (x *PathTargetOptions) GetSessionIdleTimeout() *durationpb.Duration { + if x != nil { + return x.SessionIdleTimeout + } + return nil +} + type PathMapping struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -391,7 +466,7 @@ type PathMapping struct { func (x *PathMapping) Reset() { *x = PathMapping{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[3] + mi := &file_proxy_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -404,7 +479,7 @@ func (x *PathMapping) String() string { func (*PathMapping) ProtoMessage() {} func (x *PathMapping) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[3] + mi := &file_proxy_service_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -417,7 +492,7 @@ func (x *PathMapping) ProtoReflect() protoreflect.Message { // Deprecated: Use PathMapping.ProtoReflect.Descriptor instead. func (*PathMapping) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{3} + return file_proxy_service_proto_rawDescGZIP(), []int{4} } func (x *PathMapping) GetPath() string { @@ -456,7 +531,7 @@ type Authentication struct { func (x *Authentication) Reset() { *x = Authentication{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[4] + mi := &file_proxy_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -469,7 +544,7 @@ func (x *Authentication) String() string { func (*Authentication) ProtoMessage() {} func (x *Authentication) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[4] + mi := &file_proxy_service_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -482,7 +557,7 @@ func (x *Authentication) ProtoReflect() protoreflect.Message { // Deprecated: Use Authentication.ProtoReflect.Descriptor instead. func (*Authentication) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{4} + return file_proxy_service_proto_rawDescGZIP(), []int{5} } func (x *Authentication) GetSessionKey() string { @@ -538,12 +613,16 @@ type ProxyMapping struct { // When true, Location headers in backend responses are rewritten to replace // the backend address with the public-facing domain. RewriteRedirects bool `protobuf:"varint,9,opt,name=rewrite_redirects,json=rewriteRedirects,proto3" json:"rewrite_redirects,omitempty"` + // Service mode: "http", "tcp", "udp", or "tls". + Mode string `protobuf:"bytes,10,opt,name=mode,proto3" json:"mode,omitempty"` + // For L4/TLS: the port the proxy listens on. + ListenPort int32 `protobuf:"varint,11,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` } func (x *ProxyMapping) Reset() { *x = ProxyMapping{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[5] + mi := &file_proxy_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -556,7 +635,7 @@ func (x *ProxyMapping) String() string { func (*ProxyMapping) ProtoMessage() {} func (x *ProxyMapping) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[5] + mi := &file_proxy_service_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -569,7 +648,7 @@ func (x *ProxyMapping) ProtoReflect() protoreflect.Message { // Deprecated: Use ProxyMapping.ProtoReflect.Descriptor instead. func (*ProxyMapping) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{5} + return file_proxy_service_proto_rawDescGZIP(), []int{6} } func (x *ProxyMapping) GetType() ProxyMappingUpdateType { @@ -635,6 +714,20 @@ func (x *ProxyMapping) GetRewriteRedirects() bool { return false } +func (x *ProxyMapping) GetMode() string { + if x != nil { + return x.Mode + } + return "" +} + +func (x *ProxyMapping) GetListenPort() int32 { + if x != nil { + return x.ListenPort + } + return 0 +} + // SendAccessLogRequest consists of one or more AccessLogs from a Proxy. type SendAccessLogRequest struct { state protoimpl.MessageState @@ -647,7 +740,7 @@ type SendAccessLogRequest struct { func (x *SendAccessLogRequest) Reset() { *x = SendAccessLogRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[6] + mi := &file_proxy_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -660,7 +753,7 @@ func (x *SendAccessLogRequest) String() string { func (*SendAccessLogRequest) ProtoMessage() {} func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[6] + mi := &file_proxy_service_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -673,7 +766,7 @@ func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAccessLogRequest.ProtoReflect.Descriptor instead. func (*SendAccessLogRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{6} + return file_proxy_service_proto_rawDescGZIP(), []int{7} } func (x *SendAccessLogRequest) GetLog() *AccessLog { @@ -693,7 +786,7 @@ type SendAccessLogResponse struct { func (x *SendAccessLogResponse) Reset() { *x = SendAccessLogResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[7] + mi := &file_proxy_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -706,7 +799,7 @@ func (x *SendAccessLogResponse) String() string { func (*SendAccessLogResponse) ProtoMessage() {} func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[7] + mi := &file_proxy_service_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -719,7 +812,7 @@ func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAccessLogResponse.ProtoReflect.Descriptor instead. func (*SendAccessLogResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{7} + return file_proxy_service_proto_rawDescGZIP(), []int{8} } type AccessLog struct { @@ -742,12 +835,13 @@ type AccessLog struct { AuthSuccess bool `protobuf:"varint,13,opt,name=auth_success,json=authSuccess,proto3" json:"auth_success,omitempty"` BytesUpload int64 `protobuf:"varint,14,opt,name=bytes_upload,json=bytesUpload,proto3" json:"bytes_upload,omitempty"` BytesDownload int64 `protobuf:"varint,15,opt,name=bytes_download,json=bytesDownload,proto3" json:"bytes_download,omitempty"` + Protocol string `protobuf:"bytes,16,opt,name=protocol,proto3" json:"protocol,omitempty"` } func (x *AccessLog) Reset() { *x = AccessLog{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[8] + mi := &file_proxy_service_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -760,7 +854,7 @@ func (x *AccessLog) String() string { func (*AccessLog) ProtoMessage() {} func (x *AccessLog) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[8] + mi := &file_proxy_service_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -773,7 +867,7 @@ func (x *AccessLog) ProtoReflect() protoreflect.Message { // Deprecated: Use AccessLog.ProtoReflect.Descriptor instead. func (*AccessLog) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{8} + return file_proxy_service_proto_rawDescGZIP(), []int{9} } func (x *AccessLog) GetTimestamp() *timestamppb.Timestamp { @@ -881,6 +975,13 @@ func (x *AccessLog) GetBytesDownload() int64 { return 0 } +func (x *AccessLog) GetProtocol() string { + if x != nil { + return x.Protocol + } + return "" +} + type AuthenticateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -898,7 +999,7 @@ type AuthenticateRequest struct { func (x *AuthenticateRequest) Reset() { *x = AuthenticateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[9] + mi := &file_proxy_service_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -911,7 +1012,7 @@ func (x *AuthenticateRequest) String() string { func (*AuthenticateRequest) ProtoMessage() {} func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[9] + mi := &file_proxy_service_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -924,7 +1025,7 @@ func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthenticateRequest.ProtoReflect.Descriptor instead. func (*AuthenticateRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{9} + return file_proxy_service_proto_rawDescGZIP(), []int{10} } func (x *AuthenticateRequest) GetId() string { @@ -989,7 +1090,7 @@ type PasswordRequest struct { func (x *PasswordRequest) Reset() { *x = PasswordRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[10] + mi := &file_proxy_service_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1002,7 +1103,7 @@ func (x *PasswordRequest) String() string { func (*PasswordRequest) ProtoMessage() {} func (x *PasswordRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[10] + mi := &file_proxy_service_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1015,7 +1116,7 @@ func (x *PasswordRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PasswordRequest.ProtoReflect.Descriptor instead. func (*PasswordRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{10} + return file_proxy_service_proto_rawDescGZIP(), []int{11} } func (x *PasswordRequest) GetPassword() string { @@ -1036,7 +1137,7 @@ type PinRequest struct { func (x *PinRequest) Reset() { *x = PinRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[11] + mi := &file_proxy_service_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1049,7 +1150,7 @@ func (x *PinRequest) String() string { func (*PinRequest) ProtoMessage() {} func (x *PinRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[11] + mi := &file_proxy_service_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1062,7 +1163,7 @@ func (x *PinRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PinRequest.ProtoReflect.Descriptor instead. func (*PinRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{11} + return file_proxy_service_proto_rawDescGZIP(), []int{12} } func (x *PinRequest) GetPin() string { @@ -1084,7 +1185,7 @@ type AuthenticateResponse struct { func (x *AuthenticateResponse) Reset() { *x = AuthenticateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[12] + mi := &file_proxy_service_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1097,7 +1198,7 @@ func (x *AuthenticateResponse) String() string { func (*AuthenticateResponse) ProtoMessage() {} func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[12] + mi := &file_proxy_service_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1110,7 +1211,7 @@ func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthenticateResponse.ProtoReflect.Descriptor instead. func (*AuthenticateResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{12} + return file_proxy_service_proto_rawDescGZIP(), []int{13} } func (x *AuthenticateResponse) GetSuccess() bool { @@ -1143,7 +1244,7 @@ type SendStatusUpdateRequest struct { func (x *SendStatusUpdateRequest) Reset() { *x = SendStatusUpdateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[13] + mi := &file_proxy_service_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1156,7 +1257,7 @@ func (x *SendStatusUpdateRequest) String() string { func (*SendStatusUpdateRequest) ProtoMessage() {} func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[13] + mi := &file_proxy_service_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1169,7 +1270,7 @@ func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SendStatusUpdateRequest.ProtoReflect.Descriptor instead. func (*SendStatusUpdateRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{13} + return file_proxy_service_proto_rawDescGZIP(), []int{14} } func (x *SendStatusUpdateRequest) GetServiceId() string { @@ -1217,7 +1318,7 @@ type SendStatusUpdateResponse struct { func (x *SendStatusUpdateResponse) Reset() { *x = SendStatusUpdateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[14] + mi := &file_proxy_service_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1230,7 +1331,7 @@ func (x *SendStatusUpdateResponse) String() string { func (*SendStatusUpdateResponse) ProtoMessage() {} func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[14] + mi := &file_proxy_service_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1243,7 +1344,7 @@ func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SendStatusUpdateResponse.ProtoReflect.Descriptor instead. func (*SendStatusUpdateResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{14} + return file_proxy_service_proto_rawDescGZIP(), []int{15} } // CreateProxyPeerRequest is sent by the proxy to create a peer connection @@ -1263,7 +1364,7 @@ type CreateProxyPeerRequest struct { func (x *CreateProxyPeerRequest) Reset() { *x = CreateProxyPeerRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[15] + mi := &file_proxy_service_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1276,7 +1377,7 @@ func (x *CreateProxyPeerRequest) String() string { func (*CreateProxyPeerRequest) ProtoMessage() {} func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[15] + mi := &file_proxy_service_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1289,7 +1390,7 @@ func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateProxyPeerRequest.ProtoReflect.Descriptor instead. func (*CreateProxyPeerRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{15} + return file_proxy_service_proto_rawDescGZIP(), []int{16} } func (x *CreateProxyPeerRequest) GetServiceId() string { @@ -1340,7 +1441,7 @@ type CreateProxyPeerResponse struct { func (x *CreateProxyPeerResponse) Reset() { *x = CreateProxyPeerResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[16] + mi := &file_proxy_service_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1353,7 +1454,7 @@ func (x *CreateProxyPeerResponse) String() string { func (*CreateProxyPeerResponse) ProtoMessage() {} func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[16] + mi := &file_proxy_service_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1366,7 +1467,7 @@ func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateProxyPeerResponse.ProtoReflect.Descriptor instead. func (*CreateProxyPeerResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{16} + return file_proxy_service_proto_rawDescGZIP(), []int{17} } func (x *CreateProxyPeerResponse) GetSuccess() bool { @@ -1396,7 +1497,7 @@ type GetOIDCURLRequest struct { func (x *GetOIDCURLRequest) Reset() { *x = GetOIDCURLRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[17] + mi := &file_proxy_service_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1409,7 +1510,7 @@ func (x *GetOIDCURLRequest) String() string { func (*GetOIDCURLRequest) ProtoMessage() {} func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[17] + mi := &file_proxy_service_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1422,7 +1523,7 @@ func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetOIDCURLRequest.ProtoReflect.Descriptor instead. func (*GetOIDCURLRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{17} + return file_proxy_service_proto_rawDescGZIP(), []int{18} } func (x *GetOIDCURLRequest) GetId() string { @@ -1457,7 +1558,7 @@ type GetOIDCURLResponse struct { func (x *GetOIDCURLResponse) Reset() { *x = GetOIDCURLResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[18] + mi := &file_proxy_service_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1470,7 +1571,7 @@ func (x *GetOIDCURLResponse) String() string { func (*GetOIDCURLResponse) ProtoMessage() {} func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[18] + mi := &file_proxy_service_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1483,7 +1584,7 @@ func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetOIDCURLResponse.ProtoReflect.Descriptor instead. func (*GetOIDCURLResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{18} + return file_proxy_service_proto_rawDescGZIP(), []int{19} } func (x *GetOIDCURLResponse) GetUrl() string { @@ -1505,7 +1606,7 @@ type ValidateSessionRequest struct { func (x *ValidateSessionRequest) Reset() { *x = ValidateSessionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[19] + mi := &file_proxy_service_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1518,7 +1619,7 @@ func (x *ValidateSessionRequest) String() string { func (*ValidateSessionRequest) ProtoMessage() {} func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[19] + mi := &file_proxy_service_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1531,7 +1632,7 @@ func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSessionRequest.ProtoReflect.Descriptor instead. func (*ValidateSessionRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{19} + return file_proxy_service_proto_rawDescGZIP(), []int{20} } func (x *ValidateSessionRequest) GetDomain() string { @@ -1562,7 +1663,7 @@ type ValidateSessionResponse struct { func (x *ValidateSessionResponse) Reset() { *x = ValidateSessionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[20] + mi := &file_proxy_service_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1575,7 +1676,7 @@ func (x *ValidateSessionResponse) String() string { func (*ValidateSessionResponse) ProtoMessage() {} func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[20] + mi := &file_proxy_service_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1588,7 +1689,7 @@ func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSessionResponse.ProtoReflect.Descriptor instead. func (*ValidateSessionResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{20} + return file_proxy_service_proto_rawDescGZIP(), []int{21} } func (x *ValidateSessionResponse) GetValid() bool { @@ -1628,124 +1729,147 @@ var file_proxy_service_proto_rawDesc = []byte{ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xa3, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, - 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, - 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0xda, 0x02, - 0x0a, 0x11, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x74, 0x6c, 0x73, 0x5f, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, - 0x69, 0x70, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, - 0x3e, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x52, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, - 0x57, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, - 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xaa, - 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, - 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x22, 0xe0, 0x02, 0x0a, 0x0c, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, - 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, - 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x22, 0x3f, - 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, - 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xea, 0x03, 0x0a, 0x09, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, - 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, - 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, - 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, - 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, - 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, - 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb6, 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x6f, 0x22, 0x66, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x73, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x88, 0x01, 0x01, + 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x17, 0x47, + 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, + 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61, 0x74, + 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c, 0x73, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, + 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x70, + 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, 0x74, + 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xaa, 0x01, + 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x67, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x22, 0x95, 0x03, 0x0a, 0x0c, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, + 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, + 0x72, 0x74, 0x22, 0x3f, 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, + 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x04, 0x0a, + 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, + 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, + 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xb6, 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, @@ -1907,70 +2031,73 @@ func file_proxy_service_proto_rawDescGZIP() []byte { } var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 23) var file_proxy_service_proto_goTypes = []interface{}{ (ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType (PathRewriteMode)(0), // 1: management.PathRewriteMode (ProxyStatus)(0), // 2: management.ProxyStatus - (*GetMappingUpdateRequest)(nil), // 3: management.GetMappingUpdateRequest - (*GetMappingUpdateResponse)(nil), // 4: management.GetMappingUpdateResponse - (*PathTargetOptions)(nil), // 5: management.PathTargetOptions - (*PathMapping)(nil), // 6: management.PathMapping - (*Authentication)(nil), // 7: management.Authentication - (*ProxyMapping)(nil), // 8: management.ProxyMapping - (*SendAccessLogRequest)(nil), // 9: management.SendAccessLogRequest - (*SendAccessLogResponse)(nil), // 10: management.SendAccessLogResponse - (*AccessLog)(nil), // 11: management.AccessLog - (*AuthenticateRequest)(nil), // 12: management.AuthenticateRequest - (*PasswordRequest)(nil), // 13: management.PasswordRequest - (*PinRequest)(nil), // 14: management.PinRequest - (*AuthenticateResponse)(nil), // 15: management.AuthenticateResponse - (*SendStatusUpdateRequest)(nil), // 16: management.SendStatusUpdateRequest - (*SendStatusUpdateResponse)(nil), // 17: management.SendStatusUpdateResponse - (*CreateProxyPeerRequest)(nil), // 18: management.CreateProxyPeerRequest - (*CreateProxyPeerResponse)(nil), // 19: management.CreateProxyPeerResponse - (*GetOIDCURLRequest)(nil), // 20: management.GetOIDCURLRequest - (*GetOIDCURLResponse)(nil), // 21: management.GetOIDCURLResponse - (*ValidateSessionRequest)(nil), // 22: management.ValidateSessionRequest - (*ValidateSessionResponse)(nil), // 23: management.ValidateSessionResponse - nil, // 24: management.PathTargetOptions.CustomHeadersEntry - (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 26: google.protobuf.Duration + (*ProxyCapabilities)(nil), // 3: management.ProxyCapabilities + (*GetMappingUpdateRequest)(nil), // 4: management.GetMappingUpdateRequest + (*GetMappingUpdateResponse)(nil), // 5: management.GetMappingUpdateResponse + (*PathTargetOptions)(nil), // 6: management.PathTargetOptions + (*PathMapping)(nil), // 7: management.PathMapping + (*Authentication)(nil), // 8: management.Authentication + (*ProxyMapping)(nil), // 9: management.ProxyMapping + (*SendAccessLogRequest)(nil), // 10: management.SendAccessLogRequest + (*SendAccessLogResponse)(nil), // 11: management.SendAccessLogResponse + (*AccessLog)(nil), // 12: management.AccessLog + (*AuthenticateRequest)(nil), // 13: management.AuthenticateRequest + (*PasswordRequest)(nil), // 14: management.PasswordRequest + (*PinRequest)(nil), // 15: management.PinRequest + (*AuthenticateResponse)(nil), // 16: management.AuthenticateResponse + (*SendStatusUpdateRequest)(nil), // 17: management.SendStatusUpdateRequest + (*SendStatusUpdateResponse)(nil), // 18: management.SendStatusUpdateResponse + (*CreateProxyPeerRequest)(nil), // 19: management.CreateProxyPeerRequest + (*CreateProxyPeerResponse)(nil), // 20: management.CreateProxyPeerResponse + (*GetOIDCURLRequest)(nil), // 21: management.GetOIDCURLRequest + (*GetOIDCURLResponse)(nil), // 22: management.GetOIDCURLResponse + (*ValidateSessionRequest)(nil), // 23: management.ValidateSessionRequest + (*ValidateSessionResponse)(nil), // 24: management.ValidateSessionResponse + nil, // 25: management.PathTargetOptions.CustomHeadersEntry + (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 27: google.protobuf.Duration } var file_proxy_service_proto_depIdxs = []int32{ - 25, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp - 8, // 1: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping - 26, // 2: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration - 1, // 3: management.PathTargetOptions.path_rewrite:type_name -> management.PathRewriteMode - 24, // 4: management.PathTargetOptions.custom_headers:type_name -> management.PathTargetOptions.CustomHeadersEntry - 5, // 5: management.PathMapping.options:type_name -> management.PathTargetOptions - 0, // 6: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType - 6, // 7: management.ProxyMapping.path:type_name -> management.PathMapping - 7, // 8: management.ProxyMapping.auth:type_name -> management.Authentication - 11, // 9: management.SendAccessLogRequest.log:type_name -> management.AccessLog - 25, // 10: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp - 13, // 11: management.AuthenticateRequest.password:type_name -> management.PasswordRequest - 14, // 12: management.AuthenticateRequest.pin:type_name -> management.PinRequest - 2, // 13: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus - 3, // 14: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest - 9, // 15: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest - 12, // 16: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest - 16, // 17: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest - 18, // 18: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest - 20, // 19: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest - 22, // 20: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest - 4, // 21: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse - 10, // 22: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse - 15, // 23: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse - 17, // 24: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse - 19, // 25: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse - 21, // 26: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse - 23, // 27: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse - 21, // [21:28] is the sub-list for method output_type - 14, // [14:21] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 26, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp + 3, // 1: management.GetMappingUpdateRequest.capabilities:type_name -> management.ProxyCapabilities + 9, // 2: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping + 27, // 3: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration + 1, // 4: management.PathTargetOptions.path_rewrite:type_name -> management.PathRewriteMode + 25, // 5: management.PathTargetOptions.custom_headers:type_name -> management.PathTargetOptions.CustomHeadersEntry + 27, // 6: management.PathTargetOptions.session_idle_timeout:type_name -> google.protobuf.Duration + 6, // 7: management.PathMapping.options:type_name -> management.PathTargetOptions + 0, // 8: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType + 7, // 9: management.ProxyMapping.path:type_name -> management.PathMapping + 8, // 10: management.ProxyMapping.auth:type_name -> management.Authentication + 12, // 11: management.SendAccessLogRequest.log:type_name -> management.AccessLog + 26, // 12: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp + 14, // 13: management.AuthenticateRequest.password:type_name -> management.PasswordRequest + 15, // 14: management.AuthenticateRequest.pin:type_name -> management.PinRequest + 2, // 15: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus + 4, // 16: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest + 10, // 17: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest + 13, // 18: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest + 17, // 19: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest + 19, // 20: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest + 21, // 21: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest + 23, // 22: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest + 5, // 23: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse + 11, // 24: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse + 16, // 25: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse + 18, // 26: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse + 20, // 27: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse + 22, // 28: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse + 24, // 29: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse + 23, // [23:30] is the sub-list for method output_type + 16, // [16:23] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_proxy_service_proto_init() } @@ -1980,7 +2107,7 @@ func file_proxy_service_proto_init() { } if !protoimpl.UnsafeEnabled { file_proxy_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMappingUpdateRequest); i { + switch v := v.(*ProxyCapabilities); i { case 0: return &v.state case 1: @@ -1992,7 +2119,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMappingUpdateResponse); i { + switch v := v.(*GetMappingUpdateRequest); i { case 0: return &v.state case 1: @@ -2004,7 +2131,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathTargetOptions); i { + switch v := v.(*GetMappingUpdateResponse); i { case 0: return &v.state case 1: @@ -2016,7 +2143,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathMapping); i { + switch v := v.(*PathTargetOptions); i { case 0: return &v.state case 1: @@ -2028,7 +2155,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authentication); i { + switch v := v.(*PathMapping); i { case 0: return &v.state case 1: @@ -2040,7 +2167,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProxyMapping); i { + switch v := v.(*Authentication); i { case 0: return &v.state case 1: @@ -2052,7 +2179,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAccessLogRequest); i { + switch v := v.(*ProxyMapping); i { case 0: return &v.state case 1: @@ -2064,7 +2191,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAccessLogResponse); i { + switch v := v.(*SendAccessLogRequest); i { case 0: return &v.state case 1: @@ -2076,7 +2203,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AccessLog); i { + switch v := v.(*SendAccessLogResponse); i { case 0: return &v.state case 1: @@ -2088,7 +2215,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticateRequest); i { + switch v := v.(*AccessLog); i { case 0: return &v.state case 1: @@ -2100,7 +2227,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PasswordRequest); i { + switch v := v.(*AuthenticateRequest); i { case 0: return &v.state case 1: @@ -2112,7 +2239,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PinRequest); i { + switch v := v.(*PasswordRequest); i { case 0: return &v.state case 1: @@ -2124,7 +2251,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticateResponse); i { + switch v := v.(*PinRequest); i { case 0: return &v.state case 1: @@ -2136,7 +2263,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendStatusUpdateRequest); i { + switch v := v.(*AuthenticateResponse); i { case 0: return &v.state case 1: @@ -2148,7 +2275,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendStatusUpdateResponse); i { + switch v := v.(*SendStatusUpdateRequest); i { case 0: return &v.state case 1: @@ -2160,7 +2287,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateProxyPeerRequest); i { + switch v := v.(*SendStatusUpdateResponse); i { case 0: return &v.state case 1: @@ -2172,7 +2299,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateProxyPeerResponse); i { + switch v := v.(*CreateProxyPeerRequest); i { case 0: return &v.state case 1: @@ -2184,7 +2311,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOIDCURLRequest); i { + switch v := v.(*CreateProxyPeerResponse); i { case 0: return &v.state case 1: @@ -2196,7 +2323,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOIDCURLResponse); i { + switch v := v.(*GetOIDCURLRequest); i { case 0: return &v.state case 1: @@ -2208,7 +2335,7 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSessionRequest); i { + switch v := v.(*GetOIDCURLResponse); i { case 0: return &v.state case 1: @@ -2220,6 +2347,18 @@ func file_proxy_service_proto_init() { } } file_proxy_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateSessionResponse); i { case 0: return &v.state @@ -2232,19 +2371,20 @@ func file_proxy_service_proto_init() { } } } - file_proxy_service_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_proxy_service_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[10].OneofWrappers = []interface{}{ (*AuthenticateRequest_Password)(nil), (*AuthenticateRequest_Pin)(nil), } - file_proxy_service_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_proxy_service_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[14].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[17].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proxy_service_proto_rawDesc, NumEnums: 3, - NumMessages: 22, + NumMessages: 23, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto index 195b60f01..457d12e85 100644 --- a/shared/management/proto/proxy_service.proto +++ b/shared/management/proto/proxy_service.proto @@ -27,12 +27,19 @@ service ProxyService { rpc ValidateSession(ValidateSessionRequest) returns (ValidateSessionResponse); } +// ProxyCapabilities describes what a proxy can handle. +message ProxyCapabilities { + // Whether the proxy can bind arbitrary ports for TCP/UDP/TLS services. + optional bool supports_custom_ports = 1; +} + // GetMappingUpdateRequest is sent to initialise a mapping stream. message GetMappingUpdateRequest { string proxy_id = 1; string version = 2; google.protobuf.Timestamp started_at = 3; string address = 4; + ProxyCapabilities capabilities = 5; } // GetMappingUpdateResponse contains zero or more ProxyMappings. @@ -61,6 +68,10 @@ message PathTargetOptions { google.protobuf.Duration request_timeout = 2; PathRewriteMode path_rewrite = 3; map custom_headers = 4; + // Send PROXY protocol v2 header to this backend. + bool proxy_protocol = 5; + // Idle timeout before a UDP session is reaped. + google.protobuf.Duration session_idle_timeout = 6; } message PathMapping { @@ -91,6 +102,10 @@ message ProxyMapping { // When true, Location headers in backend responses are rewritten to replace // the backend address with the public-facing domain. bool rewrite_redirects = 9; + // Service mode: "http", "tcp", "udp", or "tls". + string mode = 10; + // For L4/TLS: the port the proxy listens on. + int32 listen_port = 11; } // SendAccessLogRequest consists of one or more AccessLogs from a Proxy. @@ -117,6 +132,7 @@ message AccessLog { bool auth_success = 13; int64 bytes_upload = 14; int64 bytes_download = 15; + string protocol = 16; } message AuthenticateRequest { From 387e374e4b3250c88c263f994c582872922ceb03 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 16 Mar 2026 22:22:00 +0800 Subject: [PATCH 214/374] [proxy, management] Add header auth, access restrictions, and session idle timeout (#5587) --- .../reverseproxy/accesslogs/accesslogentry.go | 73 +- .../accesslogs/manager/manager.go | 3 + .../reverseproxy/service/manager/manager.go | 60 +- .../modules/reverseproxy/service/service.go | 340 +++- .../reverseproxy/service/service_test.go | 2 +- management/internals/shared/grpc/proxy.go | 69 +- management/server/geolocation/geolocation.go | 6 + proxy/Dockerfile | 2 +- proxy/Dockerfile.multistage | 2 +- proxy/auth/auth.go | 3 +- proxy/cmd/proxy/cmd/root.go | 69 +- proxy/internal/accesslog/logger.go | 147 +- proxy/internal/accesslog/middleware.go | 12 +- proxy/internal/auth/header.go | 69 + proxy/internal/auth/middleware.go | 241 ++- proxy/internal/auth/middleware_test.go | 380 +++- proxy/internal/geolocation/download.go | 264 +++ proxy/internal/geolocation/geolocation.go | 152 ++ proxy/internal/proxy/context.go | 110 +- proxy/internal/proxy/reverseproxy.go | 27 +- proxy/internal/proxy/reverseproxy_test.go | 103 +- proxy/internal/proxy/servicemapping.go | 29 +- proxy/internal/restrict/restrict.go | 183 ++ proxy/internal/restrict/restrict_test.go | 278 +++ proxy/internal/tcp/router.go | 122 +- proxy/internal/tcp/router_test.go | 71 + proxy/internal/udp/relay.go | 100 +- proxy/management_integration_test.go | 3 +- proxy/server.go | 188 +- shared/management/http/api/openapi.yml | 109 +- shared/management/http/api/types.gen.go | 41 +- shared/management/proto/proxy_service.pb.go | 1605 +++++++---------- shared/management/proto/proxy_service.proto | 22 + shared/relay/client/early_msg_buffer.go | 4 +- 34 files changed, 3509 insertions(+), 1380 deletions(-) create mode 100644 proxy/internal/auth/header.go create mode 100644 proxy/internal/geolocation/download.go create mode 100644 proxy/internal/geolocation/geolocation.go create mode 100644 proxy/internal/restrict/restrict.go create mode 100644 proxy/internal/restrict/restrict_test.go diff --git a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go index 619a34684..a7f692569 100644 --- a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go +++ b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go @@ -20,22 +20,23 @@ const ( ) type AccessLogEntry struct { - ID string `gorm:"primaryKey"` - AccountID string `gorm:"index"` - ServiceID string `gorm:"index"` - Timestamp time.Time `gorm:"index"` - GeoLocation peer.Location `gorm:"embedded;embeddedPrefix:location_"` - Method string `gorm:"index"` - Host string `gorm:"index"` - Path string `gorm:"index"` - Duration time.Duration `gorm:"index"` - StatusCode int `gorm:"index"` - Reason string - UserId string `gorm:"index"` - AuthMethodUsed string `gorm:"index"` - BytesUpload int64 `gorm:"index"` - BytesDownload int64 `gorm:"index"` - Protocol AccessLogProtocol `gorm:"index"` + ID string `gorm:"primaryKey"` + AccountID string `gorm:"index"` + ServiceID string `gorm:"index"` + Timestamp time.Time `gorm:"index"` + GeoLocation peer.Location `gorm:"embedded;embeddedPrefix:location_"` + SubdivisionCode string + Method string `gorm:"index"` + Host string `gorm:"index"` + Path string `gorm:"index"` + Duration time.Duration `gorm:"index"` + StatusCode int `gorm:"index"` + Reason string + UserId string `gorm:"index"` + AuthMethodUsed string `gorm:"index"` + BytesUpload int64 `gorm:"index"` + BytesDownload int64 `gorm:"index"` + Protocol AccessLogProtocol `gorm:"index"` } // FromProto creates an AccessLogEntry from a proto.AccessLog @@ -105,6 +106,11 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { cityName = &a.GeoLocation.CityName } + var subdivisionCode *string + if a.SubdivisionCode != "" { + subdivisionCode = &a.SubdivisionCode + } + var protocol *string if a.Protocol != "" { p := string(a.Protocol) @@ -112,22 +118,23 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { } return &api.ProxyAccessLog{ - Id: a.ID, - ServiceId: a.ServiceID, - Timestamp: a.Timestamp, - Method: a.Method, - Host: a.Host, - Path: a.Path, - DurationMs: int(a.Duration.Milliseconds()), - StatusCode: a.StatusCode, - SourceIp: sourceIP, - Reason: reason, - UserId: userID, - AuthMethodUsed: authMethod, - CountryCode: countryCode, - CityName: cityName, - BytesUpload: a.BytesUpload, - BytesDownload: a.BytesDownload, - Protocol: protocol, + Id: a.ID, + ServiceId: a.ServiceID, + Timestamp: a.Timestamp, + Method: a.Method, + Host: a.Host, + Path: a.Path, + DurationMs: int(a.Duration.Milliseconds()), + StatusCode: a.StatusCode, + SourceIp: sourceIP, + Reason: reason, + UserId: userID, + AuthMethodUsed: authMethod, + CountryCode: countryCode, + CityName: cityName, + SubdivisionCode: subdivisionCode, + BytesUpload: a.BytesUpload, + BytesDownload: a.BytesDownload, + Protocol: protocol, } } diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go index e7fba7bed..e8d0ce763 100644 --- a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go @@ -41,6 +41,9 @@ func (m *managerImpl) SaveAccessLog(ctx context.Context, logEntry *accesslogs.Ac logEntry.GeoLocation.CountryCode = location.Country.ISOCode logEntry.GeoLocation.CityName = location.City.Names.En logEntry.GeoLocation.GeoNameID = location.City.GeonameID + if len(location.Subdivisions) > 0 { + logEntry.SubdivisionCode = location.Subdivisions[0].ISOCode + } } } diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index c40961fdc..65177bf5d 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/rand/v2" + "net/http" "os" "slices" "strconv" @@ -229,6 +230,12 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri return fmt.Errorf("hash secrets: %w", err) } + for i, h := range service.Auth.HeaderAuths { + if h != nil && h.Enabled && h.Value == "" { + return status.Errorf(status.InvalidArgument, "header_auths[%d]: value is required", i) + } + } + keyPair, err := sessionkey.GenerateKeyPair() if err != nil { return fmt.Errorf("generate session keys: %w", err) @@ -488,6 +495,9 @@ func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, se } m.preserveExistingAuthSecrets(service, existingService) + if err := validateHeaderAuthValues(service.Auth.HeaderAuths); err != nil { + return err + } m.preserveServiceMetadata(service, existingService) m.preserveListenPort(service, existingService) updateInfo.serviceEnabledChanged = existingService.Enabled != service.Enabled @@ -544,18 +554,52 @@ func isHTTPFamily(mode string) bool { return mode == "" || mode == "http" } -func (m *Manager) preserveExistingAuthSecrets(service, existingService *service.Service) { - if service.Auth.PasswordAuth != nil && service.Auth.PasswordAuth.Enabled && +func (m *Manager) preserveExistingAuthSecrets(svc, existingService *service.Service) { + if svc.Auth.PasswordAuth != nil && svc.Auth.PasswordAuth.Enabled && existingService.Auth.PasswordAuth != nil && existingService.Auth.PasswordAuth.Enabled && - service.Auth.PasswordAuth.Password == "" { - service.Auth.PasswordAuth = existingService.Auth.PasswordAuth + svc.Auth.PasswordAuth.Password == "" { + svc.Auth.PasswordAuth = existingService.Auth.PasswordAuth } - if service.Auth.PinAuth != nil && service.Auth.PinAuth.Enabled && + if svc.Auth.PinAuth != nil && svc.Auth.PinAuth.Enabled && existingService.Auth.PinAuth != nil && existingService.Auth.PinAuth.Enabled && - service.Auth.PinAuth.Pin == "" { - service.Auth.PinAuth = existingService.Auth.PinAuth + svc.Auth.PinAuth.Pin == "" { + svc.Auth.PinAuth = existingService.Auth.PinAuth } + + preserveHeaderAuthHashes(svc.Auth.HeaderAuths, existingService.Auth.HeaderAuths) +} + +// preserveHeaderAuthHashes fills in empty header auth values from the existing +// service so that unchanged secrets are not lost on update. +func preserveHeaderAuthHashes(headers, existing []*service.HeaderAuthConfig) { + if len(headers) == 0 || len(existing) == 0 { + return + } + existingByHeader := make(map[string]string, len(existing)) + for _, h := range existing { + if h != nil && h.Value != "" { + existingByHeader[http.CanonicalHeaderKey(h.Header)] = h.Value + } + } + for _, h := range headers { + if h != nil && h.Enabled && h.Value == "" { + if hash, ok := existingByHeader[http.CanonicalHeaderKey(h.Header)]; ok { + h.Value = hash + } + } + } +} + +// validateHeaderAuthValues checks that all enabled header auths have a value +// (either freshly provided or preserved from the existing service). +func validateHeaderAuthValues(headers []*service.HeaderAuthConfig) error { + for i, h := range headers { + if h != nil && h.Enabled && h.Value == "" { + return status.Errorf(status.InvalidArgument, "header_auths[%d]: value is required", i) + } + } + return nil } func (m *Manager) preserveServiceMetadata(service, existingService *service.Service) { @@ -605,6 +649,8 @@ func validateTargetReferences(ctx context.Context, transaction store.Store, acco } return fmt.Errorf("look up resource target %q: %w", target.TargetId, err) } + default: + return status.Errorf(status.InvalidArgument, "unknown target type %q for target %q", target.TargetType, target.TargetId) } } return nil diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index 623284404..6c7c80806 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -7,14 +7,15 @@ import ( "math/big" "net" "net/http" + "net/netip" "net/url" "regexp" + "slices" "strconv" "strings" "time" "github.com/rs/xid" - log "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/durationpb" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" @@ -91,10 +92,37 @@ type BearerAuthConfig struct { DistributionGroups []string `json:"distribution_groups,omitempty" gorm:"serializer:json"` } +// HeaderAuthConfig defines a static header-value auth check. +// The proxy compares the incoming header value against the stored hash. +type HeaderAuthConfig struct { + Enabled bool `json:"enabled"` + Header string `json:"header"` + Value string `json:"value"` +} + type AuthConfig struct { PasswordAuth *PasswordAuthConfig `json:"password_auth,omitempty" gorm:"serializer:json"` PinAuth *PINAuthConfig `json:"pin_auth,omitempty" gorm:"serializer:json"` BearerAuth *BearerAuthConfig `json:"bearer_auth,omitempty" gorm:"serializer:json"` + HeaderAuths []*HeaderAuthConfig `json:"header_auths,omitempty" gorm:"serializer:json"` +} + +// AccessRestrictions controls who can connect to the service based on IP or geography. +type AccessRestrictions struct { + AllowedCIDRs []string `json:"allowed_cidrs,omitempty" gorm:"serializer:json"` + BlockedCIDRs []string `json:"blocked_cidrs,omitempty" gorm:"serializer:json"` + AllowedCountries []string `json:"allowed_countries,omitempty" gorm:"serializer:json"` + BlockedCountries []string `json:"blocked_countries,omitempty" gorm:"serializer:json"` +} + +// Copy returns a deep copy of the AccessRestrictions. +func (r AccessRestrictions) Copy() AccessRestrictions { + return AccessRestrictions{ + AllowedCIDRs: slices.Clone(r.AllowedCIDRs), + BlockedCIDRs: slices.Clone(r.BlockedCIDRs), + AllowedCountries: slices.Clone(r.AllowedCountries), + BlockedCountries: slices.Clone(r.BlockedCountries), + } } func (a *AuthConfig) HashSecrets() error { @@ -114,6 +142,16 @@ func (a *AuthConfig) HashSecrets() error { a.PinAuth.Pin = hashedPin } + for i, h := range a.HeaderAuths { + if h != nil && h.Enabled && h.Value != "" { + hashedValue, err := argon2id.Hash(h.Value) + if err != nil { + return fmt.Errorf("hash header auth[%d] value: %w", i, err) + } + h.Value = hashedValue + } + } + return nil } @@ -124,6 +162,11 @@ func (a *AuthConfig) ClearSecrets() { if a.PinAuth != nil { a.PinAuth.Pin = "" } + for _, h := range a.HeaderAuths { + if h != nil { + h.Value = "" + } + } } type Meta struct { @@ -143,12 +186,13 @@ type Service struct { Enabled bool PassHostHeader bool RewriteRedirects bool - Auth AuthConfig `gorm:"serializer:json"` - Meta Meta `gorm:"embedded;embeddedPrefix:meta_"` - SessionPrivateKey string `gorm:"column:session_private_key"` - SessionPublicKey string `gorm:"column:session_public_key"` - Source string `gorm:"default:'permanent';index:idx_service_source_peer"` - SourcePeer string `gorm:"index:idx_service_source_peer"` + Auth AuthConfig `gorm:"serializer:json"` + Restrictions AccessRestrictions `gorm:"serializer:json"` + Meta Meta `gorm:"embedded;embeddedPrefix:meta_"` + SessionPrivateKey string `gorm:"column:session_private_key"` + SessionPublicKey string `gorm:"column:session_public_key"` + Source string `gorm:"default:'permanent';index:idx_service_source_peer"` + SourcePeer string `gorm:"index:idx_service_source_peer"` // Mode determines the service type: "http", "tcp", "udp", or "tls". Mode string `gorm:"default:'http'"` ListenPort uint16 @@ -188,6 +232,20 @@ func (s *Service) ToAPIResponse() *api.Service { } } + if len(s.Auth.HeaderAuths) > 0 { + apiHeaders := make([]api.HeaderAuthConfig, 0, len(s.Auth.HeaderAuths)) + for _, h := range s.Auth.HeaderAuths { + if h == nil { + continue + } + apiHeaders = append(apiHeaders, api.HeaderAuthConfig{ + Enabled: h.Enabled, + Header: h.Header, + }) + } + authConfig.HeaderAuths = &apiHeaders + } + // Convert internal targets to API targets apiTargets := make([]api.ServiceTarget, 0, len(s.Targets)) for _, target := range s.Targets { @@ -222,18 +280,19 @@ func (s *Service) ToAPIResponse() *api.Service { listenPort := int(s.ListenPort) resp := &api.Service{ - Id: s.ID, - Name: s.Name, - Domain: s.Domain, - Targets: apiTargets, - Enabled: s.Enabled, - PassHostHeader: &s.PassHostHeader, - RewriteRedirects: &s.RewriteRedirects, - Auth: authConfig, - Meta: meta, - Mode: &mode, - ListenPort: &listenPort, - PortAutoAssigned: &s.PortAutoAssigned, + Id: s.ID, + Name: s.Name, + Domain: s.Domain, + Targets: apiTargets, + Enabled: s.Enabled, + PassHostHeader: &s.PassHostHeader, + RewriteRedirects: &s.RewriteRedirects, + Auth: authConfig, + AccessRestrictions: restrictionsToAPI(s.Restrictions), + Meta: meta, + Mode: &mode, + ListenPort: &listenPort, + PortAutoAssigned: &s.PortAutoAssigned, } if s.ProxyCluster != "" { @@ -263,7 +322,16 @@ func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConf auth.Oidc = true } - return &proto.ProxyMapping{ + for _, h := range s.Auth.HeaderAuths { + if h != nil && h.Enabled { + auth.HeaderAuths = append(auth.HeaderAuths, &proto.HeaderAuth{ + Header: h.Header, + HashedValue: h.Value, + }) + } + } + + mapping := &proto.ProxyMapping{ Type: operationToProtoType(operation), Id: s.ID, Domain: s.Domain, @@ -276,6 +344,12 @@ func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConf Mode: s.Mode, ListenPort: int32(s.ListenPort), //nolint:gosec } + + if r := restrictionsToProto(s.Restrictions); r != nil { + mapping.AccessRestrictions = r + } + + return mapping } // buildPathMappings constructs PathMapping entries from targets. @@ -334,8 +408,7 @@ func operationToProtoType(op Operation) proto.ProxyMappingUpdateType { case Delete: return proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED default: - log.Fatalf("unknown operation type: %v", op) - return proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED + panic(fmt.Sprintf("unknown operation type: %v", op)) } } @@ -477,6 +550,10 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) erro s.Auth = authFromAPI(req.Auth) } + if req.AccessRestrictions != nil { + s.Restrictions = restrictionsFromAPI(req.AccessRestrictions) + } + return nil } @@ -538,9 +615,70 @@ func authFromAPI(reqAuth *api.ServiceAuthConfig) AuthConfig { } auth.BearerAuth = bearerAuth } + if reqAuth.HeaderAuths != nil { + for _, h := range *reqAuth.HeaderAuths { + auth.HeaderAuths = append(auth.HeaderAuths, &HeaderAuthConfig{ + Enabled: h.Enabled, + Header: h.Header, + Value: h.Value, + }) + } + } return auth } +func restrictionsFromAPI(r *api.AccessRestrictions) AccessRestrictions { + if r == nil { + return AccessRestrictions{} + } + var res AccessRestrictions + if r.AllowedCidrs != nil { + res.AllowedCIDRs = *r.AllowedCidrs + } + if r.BlockedCidrs != nil { + res.BlockedCIDRs = *r.BlockedCidrs + } + if r.AllowedCountries != nil { + res.AllowedCountries = *r.AllowedCountries + } + if r.BlockedCountries != nil { + res.BlockedCountries = *r.BlockedCountries + } + return res +} + +func restrictionsToAPI(r AccessRestrictions) *api.AccessRestrictions { + if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 { + return nil + } + res := &api.AccessRestrictions{} + if len(r.AllowedCIDRs) > 0 { + res.AllowedCidrs = &r.AllowedCIDRs + } + if len(r.BlockedCIDRs) > 0 { + res.BlockedCidrs = &r.BlockedCIDRs + } + if len(r.AllowedCountries) > 0 { + res.AllowedCountries = &r.AllowedCountries + } + if len(r.BlockedCountries) > 0 { + res.BlockedCountries = &r.BlockedCountries + } + return res +} + +func restrictionsToProto(r AccessRestrictions) *proto.AccessRestrictions { + if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 { + return nil + } + return &proto.AccessRestrictions{ + AllowedCidrs: r.AllowedCIDRs, + BlockedCidrs: r.BlockedCIDRs, + AllowedCountries: r.AllowedCountries, + BlockedCountries: r.BlockedCountries, + } +} + func (s *Service) Validate() error { if s.Name == "" { return errors.New("service name is required") @@ -557,6 +695,13 @@ func (s *Service) Validate() error { s.Mode = ModeHTTP } + if err := validateHeaderAuths(s.Auth.HeaderAuths); err != nil { + return err + } + if err := validateAccessRestrictions(&s.Restrictions); err != nil { + return err + } + switch s.Mode { case ModeHTTP: return s.validateHTTPMode() @@ -657,6 +802,21 @@ func (s *Service) validateL4Target(target *Target) error { if target.Path != nil && *target.Path != "" && *target.Path != "/" { return errors.New("path is not supported for L4 services") } + if target.Options.SessionIdleTimeout < 0 { + return errors.New("session_idle_timeout must be positive for L4 services") + } + if target.Options.RequestTimeout < 0 { + return errors.New("request_timeout must be positive for L4 services") + } + if target.Options.SkipTLSVerify { + return errors.New("skip_tls_verify is not supported for L4 services") + } + if target.Options.PathRewrite != "" { + return errors.New("path_rewrite is not supported for L4 services") + } + if len(target.Options.CustomHeaders) > 0 { + return errors.New("custom_headers is not supported for L4 services") + } return nil } @@ -688,11 +848,9 @@ func IsPortBasedProtocol(mode string) bool { } const ( - maxRequestTimeout = 5 * time.Minute - maxSessionIdleTimeout = 10 * time.Minute - maxCustomHeaders = 16 - maxHeaderKeyLen = 128 - maxHeaderValueLen = 4096 + maxCustomHeaders = 16 + maxHeaderKeyLen = 128 + maxHeaderValueLen = 4096 ) // httpHeaderNameRe matches valid HTTP header field names per RFC 7230 token definition. @@ -731,22 +889,12 @@ func validateTargetOptions(idx int, opts *TargetOptions) error { return fmt.Errorf("target %d: unknown path_rewrite mode %q", idx, opts.PathRewrite) } - if opts.RequestTimeout != 0 { - if opts.RequestTimeout <= 0 { - return fmt.Errorf("target %d: request_timeout must be positive", idx) - } - if opts.RequestTimeout > maxRequestTimeout { - return fmt.Errorf("target %d: request_timeout exceeds maximum of %s", idx, maxRequestTimeout) - } + if opts.RequestTimeout < 0 { + return fmt.Errorf("target %d: request_timeout must be positive", idx) } - if opts.SessionIdleTimeout != 0 { - if opts.SessionIdleTimeout <= 0 { - return fmt.Errorf("target %d: session_idle_timeout must be positive", idx) - } - if opts.SessionIdleTimeout > maxSessionIdleTimeout { - return fmt.Errorf("target %d: session_idle_timeout exceeds maximum of %s", idx, maxSessionIdleTimeout) - } + if opts.SessionIdleTimeout < 0 { + return fmt.Errorf("target %d: session_idle_timeout must be positive", idx) } if err := validateCustomHeaders(idx, opts.CustomHeaders); err != nil { @@ -796,6 +944,93 @@ func containsCRLF(s string) bool { return strings.ContainsAny(s, "\r\n") } +func validateHeaderAuths(headers []*HeaderAuthConfig) error { + seen := make(map[string]struct{}) + for i, h := range headers { + if h == nil || !h.Enabled { + continue + } + if h.Header == "" { + return fmt.Errorf("header_auths[%d]: header name is required", i) + } + if !httpHeaderNameRe.MatchString(h.Header) { + return fmt.Errorf("header_auths[%d]: header name %q is not a valid HTTP header name", i, h.Header) + } + canonical := http.CanonicalHeaderKey(h.Header) + if _, ok := hopByHopHeaders[canonical]; ok { + return fmt.Errorf("header_auths[%d]: header %q is a hop-by-hop header and cannot be used for auth", i, h.Header) + } + if _, ok := reservedHeaders[canonical]; ok { + return fmt.Errorf("header_auths[%d]: header %q is managed by the proxy and cannot be used for auth", i, h.Header) + } + if canonical == "Host" { + return fmt.Errorf("header_auths[%d]: Host header cannot be used for auth", i) + } + if _, dup := seen[canonical]; dup { + return fmt.Errorf("header_auths[%d]: duplicate header %q (same canonical form already configured)", i, h.Header) + } + seen[canonical] = struct{}{} + if len(h.Value) > maxHeaderValueLen { + return fmt.Errorf("header_auths[%d]: value exceeds maximum length of %d", i, maxHeaderValueLen) + } + } + return nil +} + +const ( + maxCIDREntries = 200 + maxCountryEntries = 50 +) + +// validateAccessRestrictions validates and normalizes access restriction +// entries. Country codes are uppercased in place. +func validateAccessRestrictions(r *AccessRestrictions) error { + if len(r.AllowedCIDRs) > maxCIDREntries { + return fmt.Errorf("allowed_cidrs: exceeds maximum of %d entries", maxCIDREntries) + } + if len(r.BlockedCIDRs) > maxCIDREntries { + return fmt.Errorf("blocked_cidrs: exceeds maximum of %d entries", maxCIDREntries) + } + if len(r.AllowedCountries) > maxCountryEntries { + return fmt.Errorf("allowed_countries: exceeds maximum of %d entries", maxCountryEntries) + } + if len(r.BlockedCountries) > maxCountryEntries { + return fmt.Errorf("blocked_countries: exceeds maximum of %d entries", maxCountryEntries) + } + + for i, raw := range r.AllowedCIDRs { + prefix, err := netip.ParsePrefix(raw) + if err != nil { + return fmt.Errorf("allowed_cidrs[%d]: %w", i, err) + } + if prefix != prefix.Masked() { + return fmt.Errorf("allowed_cidrs[%d]: %q has host bits set, use %s instead", i, raw, prefix.Masked()) + } + } + for i, raw := range r.BlockedCIDRs { + prefix, err := netip.ParsePrefix(raw) + if err != nil { + return fmt.Errorf("blocked_cidrs[%d]: %w", i, err) + } + if prefix != prefix.Masked() { + return fmt.Errorf("blocked_cidrs[%d]: %q has host bits set, use %s instead", i, raw, prefix.Masked()) + } + } + for i, code := range r.AllowedCountries { + if len(code) != 2 { + return fmt.Errorf("allowed_countries[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", i, code) + } + r.AllowedCountries[i] = strings.ToUpper(code) + } + for i, code := range r.BlockedCountries { + if len(code) != 2 { + return fmt.Errorf("blocked_countries[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", i, code) + } + r.BlockedCountries[i] = strings.ToUpper(code) + } + return nil +} + func (s *Service) EventMeta() map[string]any { meta := map[string]any{ "name": s.Name, @@ -827,9 +1062,17 @@ func (s *Service) EventMeta() map[string]any { } func (s *Service) isAuthEnabled() bool { - return (s.Auth.PasswordAuth != nil && s.Auth.PasswordAuth.Enabled) || + if (s.Auth.PasswordAuth != nil && s.Auth.PasswordAuth.Enabled) || (s.Auth.PinAuth != nil && s.Auth.PinAuth.Enabled) || - (s.Auth.BearerAuth != nil && s.Auth.BearerAuth.Enabled) + (s.Auth.BearerAuth != nil && s.Auth.BearerAuth.Enabled) { + return true + } + for _, h := range s.Auth.HeaderAuths { + if h != nil && h.Enabled { + return true + } + } + return false } func (s *Service) Copy() *Service { @@ -866,6 +1109,16 @@ func (s *Service) Copy() *Service { } authCopy.BearerAuth = &ba } + if len(s.Auth.HeaderAuths) > 0 { + authCopy.HeaderAuths = make([]*HeaderAuthConfig, len(s.Auth.HeaderAuths)) + for i, h := range s.Auth.HeaderAuths { + if h == nil { + continue + } + hCopy := *h + authCopy.HeaderAuths[i] = &hCopy + } + } return &Service{ ID: s.ID, @@ -878,6 +1131,7 @@ func (s *Service) Copy() *Service { PassHostHeader: s.PassHostHeader, RewriteRedirects: s.RewriteRedirects, Auth: authCopy, + Restrictions: s.Restrictions.Copy(), Meta: s.Meta, SessionPrivateKey: s.SessionPrivateKey, SessionPublicKey: s.SessionPublicKey, diff --git a/management/internals/modules/reverseproxy/service/service_test.go b/management/internals/modules/reverseproxy/service/service_test.go index a8a8ae5d6..9daf729fe 100644 --- a/management/internals/modules/reverseproxy/service/service_test.go +++ b/management/internals/modules/reverseproxy/service/service_test.go @@ -120,9 +120,9 @@ func TestValidateTargetOptions_RequestTimeout(t *testing.T) { }{ {"valid 30s", 30 * time.Second, ""}, {"valid 2m", 2 * time.Minute, ""}, + {"valid 10m", 10 * time.Minute, ""}, {"zero is fine", 0, ""}, {"negative", -1 * time.Second, "must be positive"}, - {"exceeds max", 10 * time.Minute, "exceeds maximum"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 31a0ba0db..fd993fb40 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -9,6 +9,7 @@ import ( "encoding/hex" "errors" "fmt" + "net/http" "net/url" "strings" "sync" @@ -493,16 +494,17 @@ func (s *ProxyServiceServer) perProxyMessage(update *proto.GetMappingUpdateRespo // should be set on the copy. func shallowCloneMapping(m *proto.ProxyMapping) *proto.ProxyMapping { return &proto.ProxyMapping{ - Type: m.Type, - Id: m.Id, - AccountId: m.AccountId, - Domain: m.Domain, - Path: m.Path, - Auth: m.Auth, - PassHostHeader: m.PassHostHeader, - RewriteRedirects: m.RewriteRedirects, - Mode: m.Mode, - ListenPort: m.ListenPort, + Type: m.Type, + Id: m.Id, + AccountId: m.AccountId, + Domain: m.Domain, + Path: m.Path, + Auth: m.Auth, + PassHostHeader: m.PassHostHeader, + RewriteRedirects: m.RewriteRedirects, + Mode: m.Mode, + ListenPort: m.ListenPort, + AccessRestrictions: m.AccessRestrictions, } } @@ -561,6 +563,8 @@ func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto return s.authenticatePIN(ctx, req.GetId(), v, service.Auth.PinAuth) case *proto.AuthenticateRequest_Password: return s.authenticatePassword(ctx, req.GetId(), v, service.Auth.PasswordAuth) + case *proto.AuthenticateRequest_HeaderAuth: + return s.authenticateHeader(ctx, req.GetId(), v, service.Auth.HeaderAuths) default: return false, "", "" } @@ -594,6 +598,35 @@ func (s *ProxyServiceServer) authenticatePassword(ctx context.Context, serviceID return true, "password-user", proxyauth.MethodPassword } +func (s *ProxyServiceServer) authenticateHeader(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_HeaderAuth, auths []*rpservice.HeaderAuthConfig) (bool, string, proxyauth.Method) { + if len(auths) == 0 { + log.WithContext(ctx).Debugf("header authentication attempted but no header auths configured for service %s", serviceID) + return false, "", "" + } + + headerName := http.CanonicalHeaderKey(req.HeaderAuth.GetHeaderName()) + + var lastErr error + for _, auth := range auths { + if auth == nil || !auth.Enabled { + continue + } + if headerName != "" && http.CanonicalHeaderKey(auth.Header) != headerName { + continue + } + if err := argon2id.Verify(req.HeaderAuth.GetHeaderValue(), auth.Value); err != nil { + lastErr = err + continue + } + return true, "header-user", proxyauth.MethodHeader + } + + if lastErr != nil { + s.logAuthenticationError(ctx, lastErr, "Header") + } + return false, "", "" +} + func (s *ProxyServiceServer) logAuthenticationError(ctx context.Context, err error, authType string) { if errors.Is(err, argon2id.ErrMismatchedHashAndPassword) { log.WithContext(ctx).Tracef("%s authentication failed: invalid credentials", authType) @@ -752,6 +785,9 @@ func (s *ProxyServiceServer) GetOIDCURL(ctx context.Context, req *proto.GetOIDCU if err != nil { return nil, status.Errorf(codes.InvalidArgument, "parse redirect url: %v", err) } + if redirectURL.Scheme != "https" && redirectURL.Scheme != "http" { + return nil, status.Errorf(codes.InvalidArgument, "redirect URL must use http or https scheme") + } // Validate redirectURL against known service endpoints to avoid abuse of OIDC redirection. services, err := s.serviceManager.GetAccountServices(ctx, req.GetAccountId()) if err != nil { @@ -836,12 +872,9 @@ func (s *ProxyServiceServer) generateHMAC(input string) string { // ValidateState validates the state parameter from an OAuth callback. // Returns the original redirect URL if valid, or an error if invalid. +// The HMAC is verified before consuming the PKCE verifier to prevent +// an attacker from invalidating a legitimate user's auth flow. func (s *ProxyServiceServer) ValidateState(state string) (verifier, redirectURL string, err error) { - verifier, ok := s.pkceVerifierStore.LoadAndDelete(state) - if !ok { - return "", "", errors.New("no verifier for state") - } - // State format: base64(redirectURL)|nonce|hmac(redirectURL|nonce) parts := strings.Split(state, "|") if len(parts) != 3 { @@ -865,6 +898,12 @@ func (s *ProxyServiceServer) ValidateState(state string) (verifier, redirectURL return "", "", errors.New("invalid state signature") } + // Consume the PKCE verifier only after HMAC validation passes. + verifier, ok := s.pkceVerifierStore.LoadAndDelete(state) + if !ok { + return "", "", errors.New("no verifier for state") + } + return verifier, redirectURL, nil } diff --git a/management/server/geolocation/geolocation.go b/management/server/geolocation/geolocation.go index c0179a1c4..30fd493e8 100644 --- a/management/server/geolocation/geolocation.go +++ b/management/server/geolocation/geolocation.go @@ -44,6 +44,12 @@ type Record struct { GeonameID uint `maxminddb:"geoname_id"` ISOCode string `maxminddb:"iso_code"` } `maxminddb:"country"` + Subdivisions []struct { + ISOCode string `maxminddb:"iso_code"` + Names struct { + En string `maxminddb:"en"` + } `maxminddb:"names"` + } `maxminddb:"subdivisions"` } type City struct { diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 096c71f21..e64680fd6 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -10,7 +10,7 @@ FROM gcr.io/distroless/base:debug COPY netbird-proxy /go/bin/netbird-proxy COPY --from=builder /tmp/passwd /etc/passwd COPY --from=builder /tmp/group /etc/group -COPY --from=builder /tmp/var/lib/netbird /var/lib/netbird +COPY --from=builder --chown=1000:1000 /tmp/var/lib/netbird /var/lib/netbird COPY --from=builder --chown=1000:1000 --chmod=755 /tmp/certs /certs USER netbird:netbird ENV HOME=/var/lib/netbird diff --git a/proxy/Dockerfile.multistage b/proxy/Dockerfile.multistage index 2e3ac3561..01e342c0e 100644 --- a/proxy/Dockerfile.multistage +++ b/proxy/Dockerfile.multistage @@ -28,7 +28,7 @@ FROM gcr.io/distroless/base:debug COPY --from=builder /app/netbird-proxy /usr/bin/netbird-proxy COPY --from=builder /tmp/passwd /etc/passwd COPY --from=builder /tmp/group /etc/group -COPY --from=builder /tmp/var/lib/netbird /var/lib/netbird +COPY --from=builder --chown=1000:1000 /tmp/var/lib/netbird /var/lib/netbird COPY --from=builder --chown=1000:1000 --chmod=755 /tmp/certs /certs USER netbird:netbird ENV HOME=/var/lib/netbird diff --git a/proxy/auth/auth.go b/proxy/auth/auth.go index 14caa03b3..ca9c260b7 100644 --- a/proxy/auth/auth.go +++ b/proxy/auth/auth.go @@ -13,10 +13,11 @@ import ( type Method string -var ( +const ( MethodPassword Method = "password" MethodPIN Method = "pin" MethodOIDC Method = "oidc" + MethodHeader Method = "header" ) func (m Method) String() string { diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index d82f5b7fc..a2252cc20 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -36,31 +36,33 @@ var ( var ( logLevel string - debugLogs bool - mgmtAddr string - addr string - proxyDomain string - defaultDialTimeout time.Duration - certDir string - acmeCerts bool - acmeAddr string - acmeDir string - acmeEABKID string - acmeEABHMACKey string - acmeChallengeType string - debugEndpoint bool - debugEndpointAddr string - healthAddr string - forwardedProto string - trustedProxies string - certFile string - certKeyFile string - certLockMethod string - wildcardCertDir string - wgPort uint16 - proxyProtocol bool - preSharedKey string - supportsCustomPorts bool + debugLogs bool + mgmtAddr string + addr string + proxyDomain string + maxDialTimeout time.Duration + maxSessionIdleTimeout time.Duration + certDir string + acmeCerts bool + acmeAddr string + acmeDir string + acmeEABKID string + acmeEABHMACKey string + acmeChallengeType string + debugEndpoint bool + debugEndpointAddr string + healthAddr string + forwardedProto string + trustedProxies string + certFile string + certKeyFile string + certLockMethod string + wildcardCertDir string + wgPort uint16 + proxyProtocol bool + preSharedKey string + supportsCustomPorts bool + geoDataDir string ) var rootCmd = &cobra.Command{ @@ -99,7 +101,9 @@ func init() { rootCmd.Flags().BoolVar(&proxyProtocol, "proxy-protocol", envBoolOrDefault("NB_PROXY_PROXY_PROTOCOL", false), "Enable PROXY protocol on TCP listeners to preserve client IPs behind L4 proxies") rootCmd.Flags().StringVar(&preSharedKey, "preshared-key", envStringOrDefault("NB_PROXY_PRESHARED_KEY", ""), "Define a pre-shared key for the tunnel between proxy and peers") rootCmd.Flags().BoolVar(&supportsCustomPorts, "supports-custom-ports", envBoolOrDefault("NB_PROXY_SUPPORTS_CUSTOM_PORTS", true), "Whether the proxy can bind arbitrary ports for UDP/TCP passthrough") - rootCmd.Flags().DurationVar(&defaultDialTimeout, "default-dial-timeout", envDurationOrDefault("NB_PROXY_DEFAULT_DIAL_TIMEOUT", 0), "Default backend dial timeout when no per-service timeout is set (e.g. 30s)") + rootCmd.Flags().DurationVar(&maxDialTimeout, "max-dial-timeout", envDurationOrDefault("NB_PROXY_MAX_DIAL_TIMEOUT", 0), "Cap per-service backend dial timeout (0 = no cap)") + rootCmd.Flags().DurationVar(&maxSessionIdleTimeout, "max-session-idle-timeout", envDurationOrDefault("NB_PROXY_MAX_SESSION_IDLE_TIMEOUT", 0), "Cap per-service session idle timeout (0 = no cap)") + rootCmd.Flags().StringVar(&geoDataDir, "geo-data-dir", envStringOrDefault("NB_PROXY_GEO_DATA_DIR", "/var/lib/netbird/geolocation"), "Directory for the GeoLite2 MMDB file (auto-downloaded if missing)") } // Execute runs the root command. @@ -177,17 +181,15 @@ func runServer(cmd *cobra.Command, args []string) error { ProxyProtocol: proxyProtocol, PreSharedKey: preSharedKey, SupportsCustomPorts: supportsCustomPorts, - DefaultDialTimeout: defaultDialTimeout, + MaxDialTimeout: maxDialTimeout, + MaxSessionIdleTimeout: maxSessionIdleTimeout, + GeoDataDir: geoDataDir, } ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT) defer stop() - if err := srv.ListenAndServe(ctx, addr); err != nil { - logger.Error(err) - return err - } - return nil + return srv.ListenAndServe(ctx, addr) } func envBoolOrDefault(key string, def bool) bool { @@ -197,6 +199,7 @@ func envBoolOrDefault(key string, def bool) bool { } parsed, err := strconv.ParseBool(v) if err != nil { + log.Warnf("parse %s=%q: %v, using default %v", key, v, err, def) return def } return parsed @@ -217,6 +220,7 @@ func envUint16OrDefault(key string, def uint16) uint16 { } parsed, err := strconv.ParseUint(v, 10, 16) if err != nil { + log.Warnf("parse %s=%q: %v, using default %d", key, v, err, def) return def } return uint16(parsed) @@ -229,6 +233,7 @@ func envDurationOrDefault(key string, def time.Duration) time.Duration { } parsed, err := time.ParseDuration(v) if err != nil { + log.Warnf("parse %s=%q: %v, using default %s", key, v, err, def) return def } return parsed diff --git a/proxy/internal/accesslog/logger.go b/proxy/internal/accesslog/logger.go index 5b05ab195..3ed3275b5 100644 --- a/proxy/internal/accesslog/logger.go +++ b/proxy/internal/accesslog/logger.go @@ -4,6 +4,7 @@ import ( "context" "net/netip" "sync" + "sync/atomic" "time" "github.com/rs/xid" @@ -22,6 +23,16 @@ const ( usageCleanupPeriod = 1 * time.Hour // Clean up stale counters every hour usageInactiveWindow = 24 * time.Hour // Consider domain inactive if no traffic for 24 hours logSendTimeout = 10 * time.Second + + // denyCooldown is the min interval between deny log entries per service+reason + // to prevent flooding from denied connections (e.g. UDP packets from blocked IPs). + denyCooldown = 10 * time.Second + + // maxDenyBuckets caps tracked deny rate-limit entries to bound memory under DDoS. + maxDenyBuckets = 10000 + + // maxLogWorkers caps concurrent gRPC send goroutines. + maxLogWorkers = 4096 ) type domainUsage struct { @@ -38,6 +49,18 @@ type gRPCClient interface { SendAccessLog(ctx context.Context, in *proto.SendAccessLogRequest, opts ...grpc.CallOption) (*proto.SendAccessLogResponse, error) } +// denyBucketKey identifies a rate-limited deny log stream. +type denyBucketKey struct { + ServiceID types.ServiceID + Reason string +} + +// denyBucket tracks rate-limited deny log entries. +type denyBucket struct { + lastLogged time.Time + suppressed int64 +} + // Logger sends access log entries to the management server via gRPC. type Logger struct { client gRPCClient @@ -47,7 +70,12 @@ type Logger struct { usageMux sync.Mutex domainUsage map[string]*domainUsage + denyMu sync.Mutex + denyBuckets map[denyBucketKey]*denyBucket + + logSem chan struct{} cleanupCancel context.CancelFunc + dropped atomic.Int64 } // NewLogger creates a new access log Logger. The trustedProxies parameter @@ -64,6 +92,8 @@ func NewLogger(client gRPCClient, logger *log.Logger, trustedProxies []netip.Pre logger: logger, trustedProxies: trustedProxies, domainUsage: make(map[string]*domainUsage), + denyBuckets: make(map[denyBucketKey]*denyBucket), + logSem: make(chan struct{}, maxLogWorkers), cleanupCancel: cancel, } @@ -83,7 +113,7 @@ func (l *Logger) Close() { type logEntry struct { ID string AccountID types.AccountID - ServiceId types.ServiceID + ServiceID types.ServiceID Host string Path string DurationMs int64 @@ -91,7 +121,7 @@ type logEntry struct { ResponseCode int32 SourceIP netip.Addr AuthMechanism string - UserId string + UserID string AuthSuccess bool BytesUpload int64 BytesDownload int64 @@ -118,6 +148,10 @@ type L4Entry struct { DurationMs int64 BytesUpload int64 BytesDownload int64 + // DenyReason, when non-empty, indicates the connection was denied. + // Values match the HTTP auth mechanism strings: "ip_restricted", + // "country_restricted", "geo_unavailable". + DenyReason string } // LogL4 sends an access log entry for a layer-4 connection (TCP or UDP). @@ -126,7 +160,7 @@ func (l *Logger) LogL4(entry L4Entry) { le := logEntry{ ID: xid.New().String(), AccountID: entry.AccountID, - ServiceId: entry.ServiceID, + ServiceID: entry.ServiceID, Protocol: entry.Protocol, Host: entry.Host, SourceIP: entry.SourceIP, @@ -134,10 +168,47 @@ func (l *Logger) LogL4(entry L4Entry) { BytesUpload: entry.BytesUpload, BytesDownload: entry.BytesDownload, } + if entry.DenyReason != "" { + if !l.allowDenyLog(entry.ServiceID, entry.DenyReason) { + return + } + le.AuthMechanism = entry.DenyReason + le.AuthSuccess = false + } l.log(le) l.trackUsage(entry.Host, entry.BytesUpload+entry.BytesDownload) } +// allowDenyLog rate-limits deny log entries per service+reason combination. +func (l *Logger) allowDenyLog(serviceID types.ServiceID, reason string) bool { + key := denyBucketKey{ServiceID: serviceID, Reason: reason} + now := time.Now() + + l.denyMu.Lock() + defer l.denyMu.Unlock() + + b, ok := l.denyBuckets[key] + if !ok { + if len(l.denyBuckets) >= maxDenyBuckets { + return false + } + l.denyBuckets[key] = &denyBucket{lastLogged: now} + return true + } + + if now.Sub(b.lastLogged) >= denyCooldown { + if b.suppressed > 0 { + l.logger.Debugf("access restriction: suppressed %d deny log entries for %s (%s)", b.suppressed, serviceID, reason) + } + b.lastLogged = now + b.suppressed = 0 + return true + } + + b.suppressed++ + return false +} + func (l *Logger) log(entry logEntry) { // Fire off the log request in a separate routine. // This increases the possibility of losing a log message @@ -147,12 +218,21 @@ func (l *Logger) log(entry logEntry) { // There is also a chance that log messages will arrive at // the server out of order; however, the timestamp should // allow for resolving that on the server. - now := timestamppb.Now() // Grab the timestamp before launching the goroutine to try to prevent weird timing issues. This is probably unnecessary. + now := timestamppb.Now() + select { + case l.logSem <- struct{}{}: + default: + total := l.dropped.Add(1) + l.logger.Debugf("access log send dropped: worker limit reached (total dropped: %d)", total) + return + } go func() { + defer func() { <-l.logSem }() logCtx, cancel := context.WithTimeout(context.Background(), logSendTimeout) defer cancel() + // Only OIDC sessions have a meaningful user identity. if entry.AuthMechanism != auth.MethodOIDC.String() { - entry.UserId = "" + entry.UserID = "" } var sourceIP string @@ -165,7 +245,7 @@ func (l *Logger) log(entry logEntry) { LogId: entry.ID, AccountId: string(entry.AccountID), Timestamp: now, - ServiceId: string(entry.ServiceId), + ServiceId: string(entry.ServiceID), Host: entry.Host, Path: entry.Path, DurationMs: entry.DurationMs, @@ -173,7 +253,7 @@ func (l *Logger) log(entry logEntry) { ResponseCode: entry.ResponseCode, SourceIp: sourceIP, AuthMechanism: entry.AuthMechanism, - UserId: entry.UserId, + UserId: entry.UserID, AuthSuccess: entry.AuthSuccess, BytesUpload: entry.BytesUpload, BytesDownload: entry.BytesDownload, @@ -181,7 +261,7 @@ func (l *Logger) log(entry logEntry) { }, }); err != nil { l.logger.WithFields(log.Fields{ - "service_id": entry.ServiceId, + "service_id": entry.ServiceID, "host": entry.Host, "path": entry.Path, "duration": entry.DurationMs, @@ -189,7 +269,7 @@ func (l *Logger) log(entry logEntry) { "response_code": entry.ResponseCode, "source_ip": sourceIP, "auth_mechanism": entry.AuthMechanism, - "user_id": entry.UserId, + "user_id": entry.UserID, "auth_success": entry.AuthSuccess, "error": err, }).Error("Error sending access log on gRPC connection") @@ -248,7 +328,7 @@ func (l *Logger) trackUsage(domain string, bytesTransferred int64) { } } -// cleanupStaleUsage removes usage entries for domains that have been inactive. +// cleanupStaleUsage removes usage and deny-rate-limit entries that have been inactive. func (l *Logger) cleanupStaleUsage(ctx context.Context) { ticker := time.NewTicker(usageCleanupPeriod) defer ticker.Stop() @@ -258,20 +338,41 @@ func (l *Logger) cleanupStaleUsage(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - l.usageMux.Lock() now := time.Now() - removed := 0 - for domain, usage := range l.domainUsage { - if now.Sub(usage.lastActivity) > usageInactiveWindow { - delete(l.domainUsage, domain) - removed++ - } - } - l.usageMux.Unlock() - - if removed > 0 { - l.logger.Debugf("cleaned up %d stale domain usage entries", removed) - } + l.cleanupDomainUsage(now) + l.cleanupDenyBuckets(now) } } } + +func (l *Logger) cleanupDomainUsage(now time.Time) { + l.usageMux.Lock() + defer l.usageMux.Unlock() + + removed := 0 + for domain, usage := range l.domainUsage { + if now.Sub(usage.lastActivity) > usageInactiveWindow { + delete(l.domainUsage, domain) + removed++ + } + } + if removed > 0 { + l.logger.Debugf("cleaned up %d stale domain usage entries", removed) + } +} + +func (l *Logger) cleanupDenyBuckets(now time.Time) { + l.denyMu.Lock() + defer l.denyMu.Unlock() + + removed := 0 + for key, bucket := range l.denyBuckets { + if now.Sub(bucket.lastLogged) > usageInactiveWindow { + delete(l.denyBuckets, key) + removed++ + } + } + if removed > 0 { + l.logger.Debugf("cleaned up %d stale deny rate-limit entries", removed) + } +} diff --git a/proxy/internal/accesslog/middleware.go b/proxy/internal/accesslog/middleware.go index 593a77ef2..81c790b17 100644 --- a/proxy/internal/accesslog/middleware.go +++ b/proxy/internal/accesslog/middleware.go @@ -13,6 +13,7 @@ import ( "github.com/netbirdio/netbird/proxy/web" ) +// Middleware wraps an HTTP handler to log access entries and resolve client IPs. func (l *Logger) Middleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Skip logging for internal proxy assets (CSS, JS, etc.) @@ -47,8 +48,9 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { // Create a mutable struct to capture data from downstream handlers. // We pass a pointer in the context - the pointer itself flows down immutably, // but the struct it points to can be mutated by inner handlers. - capturedData := &proxy.CapturedData{RequestID: requestID} + capturedData := proxy.NewCapturedData(requestID) capturedData.SetClientIP(sourceIp) + ctx := proxy.WithCapturedData(r.Context(), capturedData) start := time.Now() @@ -66,8 +68,8 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { entry := logEntry{ ID: requestID, - ServiceId: capturedData.GetServiceId(), - AccountID: capturedData.GetAccountId(), + ServiceID: capturedData.GetServiceID(), + AccountID: capturedData.GetAccountID(), Host: host, Path: r.URL.Path, DurationMs: duration.Milliseconds(), @@ -75,14 +77,14 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { ResponseCode: int32(sw.status), SourceIP: sourceIp, AuthMechanism: capturedData.GetAuthMethod(), - UserId: capturedData.GetUserID(), + UserID: capturedData.GetUserID(), AuthSuccess: sw.status != http.StatusUnauthorized && sw.status != http.StatusForbidden, BytesUpload: bytesUpload, BytesDownload: bytesDownload, Protocol: ProtocolHTTP, } l.logger.Debugf("response: request_id=%s method=%s host=%s path=%s status=%d duration=%dms source=%s origin=%s service=%s account=%s", - requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceId(), capturedData.GetAccountId()) + requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceID(), capturedData.GetAccountID()) l.log(entry) diff --git a/proxy/internal/auth/header.go b/proxy/internal/auth/header.go new file mode 100644 index 000000000..194800a49 --- /dev/null +++ b/proxy/internal/auth/header.go @@ -0,0 +1,69 @@ +package auth + +import ( + "errors" + "fmt" + "net/http" + + "github.com/netbirdio/netbird/proxy/auth" + "github.com/netbirdio/netbird/proxy/internal/types" + "github.com/netbirdio/netbird/shared/management/proto" +) + +// ErrHeaderAuthFailed indicates that the header was present but the +// credential did not validate. Callers should return 401 instead of +// falling through to other auth schemes. +var ErrHeaderAuthFailed = errors.New("header authentication failed") + +// Header implements header-based authentication. The proxy checks for the +// configured header in each request and validates its value via gRPC. +type Header struct { + id types.ServiceID + accountId types.AccountID + headerName string + client authenticator +} + +// NewHeader creates a Header authentication scheme for the given header name. +func NewHeader(client authenticator, id types.ServiceID, accountId types.AccountID, headerName string) Header { + return Header{ + id: id, + accountId: accountId, + headerName: headerName, + client: client, + } +} + +// Type returns auth.MethodHeader. +func (Header) Type() auth.Method { + return auth.MethodHeader +} + +// Authenticate checks for the configured header in the request. If absent, +// returns empty (unauthenticated). If present, validates via gRPC. +func (h Header) Authenticate(r *http.Request) (string, string, error) { + value := r.Header.Get(h.headerName) + if value == "" { + return "", "", nil + } + + res, err := h.client.Authenticate(r.Context(), &proto.AuthenticateRequest{ + Id: string(h.id), + AccountId: string(h.accountId), + Request: &proto.AuthenticateRequest_HeaderAuth{ + HeaderAuth: &proto.HeaderAuthRequest{ + HeaderValue: value, + HeaderName: h.headerName, + }, + }, + }) + if err != nil { + return "", "", fmt.Errorf("authenticate header: %w", err) + } + + if res.GetSuccess() { + return res.GetSessionToken(), "", nil + } + + return "", "", ErrHeaderAuthFailed +} diff --git a/proxy/internal/auth/middleware.go b/proxy/internal/auth/middleware.go index 3cf86e4b3..670cafb68 100644 --- a/proxy/internal/auth/middleware.go +++ b/proxy/internal/auth/middleware.go @@ -4,9 +4,12 @@ import ( "context" "crypto/ed25519" "encoding/base64" + "errors" "fmt" + "html" "net" "net/http" + "net/netip" "net/url" "sync" "time" @@ -16,11 +19,16 @@ import ( "github.com/netbirdio/netbird/proxy/auth" "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/restrict" "github.com/netbirdio/netbird/proxy/internal/types" "github.com/netbirdio/netbird/proxy/web" "github.com/netbirdio/netbird/shared/management/proto" ) +// errValidationUnavailable indicates that session validation failed due to +// an infrastructure error (e.g. gRPC unavailable), not an invalid token. +var errValidationUnavailable = errors.New("session validation unavailable") + type authenticator interface { Authenticate(ctx context.Context, in *proto.AuthenticateRequest, opts ...grpc.CallOption) (*proto.AuthenticateResponse, error) } @@ -40,12 +48,14 @@ type Scheme interface { Authenticate(*http.Request) (token string, promptData string, err error) } +// DomainConfig holds the authentication and restriction settings for a protected domain. type DomainConfig struct { Schemes []Scheme SessionPublicKey ed25519.PublicKey SessionExpiration time.Duration AccountID types.AccountID ServiceID types.ServiceID + IPRestrictions *restrict.Filter } type validationResult struct { @@ -54,17 +64,18 @@ type validationResult struct { DeniedReason string } +// Middleware applies per-domain authentication and IP restriction checks. type Middleware struct { domainsMux sync.RWMutex domains map[string]DomainConfig logger *log.Logger sessionValidator SessionValidator + geo restrict.GeoResolver } -// NewMiddleware creates a new authentication middleware. -// The sessionValidator is optional; if nil, OIDC session tokens will be validated -// locally without group access checks. -func NewMiddleware(logger *log.Logger, sessionValidator SessionValidator) *Middleware { +// NewMiddleware creates a new authentication middleware. The sessionValidator is +// optional; if nil, OIDC session tokens are validated locally without group access checks. +func NewMiddleware(logger *log.Logger, sessionValidator SessionValidator, geo restrict.GeoResolver) *Middleware { if logger == nil { logger = log.StandardLogger() } @@ -72,18 +83,12 @@ func NewMiddleware(logger *log.Logger, sessionValidator SessionValidator) *Middl domains: make(map[string]DomainConfig), logger: logger, sessionValidator: sessionValidator, + geo: geo, } } -// Protect applies authentication middleware to the passed handler. -// For each incoming request it will be checked against the middleware's -// internal list of protected domains. -// If the Host domain in the inbound request is not present, then it will -// simply be passed through. -// However, if the Host domain is present, then the specified authentication -// schemes for that domain will be applied to the request. -// In the event that no authentication schemes are defined for the domain, -// then the request will also be simply passed through. +// Protect wraps next with per-domain authentication and IP restriction checks. +// Requests whose Host is not registered pass through unchanged. func (mw *Middleware) Protect(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { host, _, err := net.SplitHostPort(r.Host) @@ -94,8 +99,7 @@ func (mw *Middleware) Protect(next http.Handler) http.Handler { config, exists := mw.getDomainConfig(host) mw.logger.Debugf("checking authentication for host: %s, exists: %t", host, exists) - // Domains that are not configured here or have no authentication schemes applied should simply pass through. - if !exists || len(config.Schemes) == 0 { + if !exists { next.ServeHTTP(w, r) return } @@ -103,6 +107,16 @@ func (mw *Middleware) Protect(next http.Handler) http.Handler { // Set account and service IDs in captured data for access logging. setCapturedIDs(r, config) + if !mw.checkIPRestrictions(w, r, config) { + return + } + + // Domains with no authentication schemes pass through after IP checks. + if len(config.Schemes) == 0 { + next.ServeHTTP(w, r) + return + } + if mw.handleOAuthCallbackError(w, r) { return } @@ -111,6 +125,10 @@ func (mw *Middleware) Protect(next http.Handler) http.Handler { return } + if mw.forwardWithHeaderAuth(w, r, host, config, next) { + return + } + mw.authenticateWithSchemes(w, r, host, config) }) } @@ -124,11 +142,65 @@ func (mw *Middleware) getDomainConfig(host string) (DomainConfig, bool) { func setCapturedIDs(r *http.Request, config DomainConfig) { if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { - cd.SetAccountId(config.AccountID) - cd.SetServiceId(config.ServiceID) + cd.SetAccountID(config.AccountID) + cd.SetServiceID(config.ServiceID) } } +// checkIPRestrictions validates the client IP against the domain's IP restrictions. +// Uses the resolved client IP from CapturedData (which accounts for trusted proxies) +// rather than r.RemoteAddr directly. +func (mw *Middleware) checkIPRestrictions(w http.ResponseWriter, r *http.Request, config DomainConfig) bool { + if config.IPRestrictions == nil { + return true + } + + clientIP := mw.resolveClientIP(r) + if !clientIP.IsValid() { + mw.logger.Debugf("IP restriction: cannot resolve client address for %q, denying", r.RemoteAddr) + http.Error(w, "Forbidden", http.StatusForbidden) + return false + } + + verdict := config.IPRestrictions.Check(clientIP, mw.geo) + if verdict == restrict.Allow { + return true + } + + reason := verdict.String() + mw.blockIPRestriction(r, reason) + http.Error(w, "Forbidden", http.StatusForbidden) + return false +} + +// resolveClientIP extracts the real client IP from CapturedData, falling back to r.RemoteAddr. +func (mw *Middleware) resolveClientIP(r *http.Request) netip.Addr { + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + if ip := cd.GetClientIP(); ip.IsValid() { + return ip + } + } + + clientIPStr, _, _ := net.SplitHostPort(r.RemoteAddr) + if clientIPStr == "" { + clientIPStr = r.RemoteAddr + } + addr, err := netip.ParseAddr(clientIPStr) + if err != nil { + return netip.Addr{} + } + return addr.Unmap() +} + +// blockIPRestriction sets captured data fields for an IP-restriction block event. +func (mw *Middleware) blockIPRestriction(r *http.Request, reason string) { + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + cd.SetAuthMethod(reason) + } + mw.logger.Debugf("IP restriction: %s for %s", reason, r.RemoteAddr) +} + // handleOAuthCallbackError checks for error query parameters from an OAuth // callback and renders the access denied page if present. func (mw *Middleware) handleOAuthCallbackError(w http.ResponseWriter, r *http.Request) bool { @@ -146,6 +218,8 @@ func (mw *Middleware) handleOAuthCallbackError(w http.ResponseWriter, r *http.Re errDesc := r.URL.Query().Get("error_description") if errDesc == "" { errDesc = "An error occurred during authentication" + } else { + errDesc = html.EscapeString(errDesc) } web.ServeAccessDeniedPage(w, r, http.StatusForbidden, "Access Denied", errDesc, requestID) return true @@ -170,6 +244,85 @@ func (mw *Middleware) forwardWithSessionCookie(w http.ResponseWriter, r *http.Re return true } +// forwardWithHeaderAuth checks for a Header auth scheme. If the header validates, +// the request is forwarded directly (no redirect), which is important for API clients. +func (mw *Middleware) forwardWithHeaderAuth(w http.ResponseWriter, r *http.Request, host string, config DomainConfig, next http.Handler) bool { + for _, scheme := range config.Schemes { + hdr, ok := scheme.(Header) + if !ok { + continue + } + + handled := mw.tryHeaderScheme(w, r, host, config, hdr, next) + if handled { + return true + } + } + return false +} + +func (mw *Middleware) tryHeaderScheme(w http.ResponseWriter, r *http.Request, host string, config DomainConfig, hdr Header, next http.Handler) bool { + token, _, err := hdr.Authenticate(r) + if err != nil { + return mw.handleHeaderAuthError(w, r, err) + } + if token == "" { + return false + } + + result, err := mw.validateSessionToken(r.Context(), host, token, config.SessionPublicKey, auth.MethodHeader) + if err != nil { + setHeaderCapturedData(r.Context(), "") + status := http.StatusBadRequest + msg := "invalid session token" + if errors.Is(err, errValidationUnavailable) { + status = http.StatusBadGateway + msg = "authentication service unavailable" + } + http.Error(w, msg, status) + return true + } + + if !result.Valid { + setHeaderCapturedData(r.Context(), result.UserID) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return true + } + + setSessionCookie(w, token, config.SessionExpiration) + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetUserID(result.UserID) + cd.SetAuthMethod(auth.MethodHeader.String()) + } + + next.ServeHTTP(w, r) + return true +} + +func (mw *Middleware) handleHeaderAuthError(w http.ResponseWriter, r *http.Request, err error) bool { + if errors.Is(err, ErrHeaderAuthFailed) { + setHeaderCapturedData(r.Context(), "") + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return true + } + mw.logger.WithField("scheme", "header").Warnf("header auth infrastructure error: %v", err) + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + } + http.Error(w, "authentication service unavailable", http.StatusBadGateway) + return true +} + +func setHeaderCapturedData(ctx context.Context, userID string) { + cd := proxy.CapturedDataFromContext(ctx) + if cd == nil { + return + } + cd.SetOrigin(proxy.OriginAuth) + cd.SetAuthMethod(auth.MethodHeader.String()) + cd.SetUserID(userID) +} + // authenticateWithSchemes tries each configured auth scheme in order. // On success it sets a session cookie and redirects; on failure it renders the login page. func (mw *Middleware) authenticateWithSchemes(w http.ResponseWriter, r *http.Request, host string, config DomainConfig) { @@ -217,7 +370,13 @@ func (mw *Middleware) handleAuthenticatedToken(w http.ResponseWriter, r *http.Re cd.SetOrigin(proxy.OriginAuth) cd.SetAuthMethod(scheme.Type().String()) } - http.Error(w, err.Error(), http.StatusBadRequest) + status := http.StatusBadRequest + msg := "invalid session token" + if errors.Is(err, errValidationUnavailable) { + status = http.StatusBadGateway + msg = "authentication service unavailable" + } + http.Error(w, msg, status) return } @@ -233,7 +392,21 @@ func (mw *Middleware) handleAuthenticatedToken(w http.ResponseWriter, r *http.Re return } - expiration := config.SessionExpiration + setSessionCookie(w, token, config.SessionExpiration) + + // Redirect instead of forwarding the auth POST to the backend. + // The browser will follow with a GET carrying the new session cookie. + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetOrigin(proxy.OriginAuth) + cd.SetUserID(result.UserID) + cd.SetAuthMethod(scheme.Type().String()) + } + redirectURL := stripSessionTokenParam(r.URL) + http.Redirect(w, r, redirectURL, http.StatusSeeOther) +} + +// setSessionCookie writes a session cookie with secure defaults. +func setSessionCookie(w http.ResponseWriter, token string, expiration time.Duration) { if expiration == 0 { expiration = auth.DefaultSessionExpiry } @@ -245,16 +418,6 @@ func (mw *Middleware) handleAuthenticatedToken(w http.ResponseWriter, r *http.Re SameSite: http.SameSiteLaxMode, MaxAge: int(expiration.Seconds()), }) - - // Redirect instead of forwarding the auth POST to the backend. - // The browser will follow with a GET carrying the new session cookie. - if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { - cd.SetOrigin(proxy.OriginAuth) - cd.SetUserID(result.UserID) - cd.SetAuthMethod(scheme.Type().String()) - } - redirectURL := stripSessionTokenParam(r.URL) - http.Redirect(w, r, redirectURL, http.StatusSeeOther) } // wasCredentialSubmitted checks if credentials were submitted for the given auth method. @@ -275,13 +438,14 @@ func wasCredentialSubmitted(r *http.Request, method auth.Method) bool { // session JWTs. Returns an error if the key is missing or invalid. // Callers must not serve the domain if this returns an error, to avoid // exposing an unauthenticated service. -func (mw *Middleware) AddDomain(domain string, schemes []Scheme, publicKeyB64 string, expiration time.Duration, accountID types.AccountID, serviceID types.ServiceID) error { +func (mw *Middleware) AddDomain(domain string, schemes []Scheme, publicKeyB64 string, expiration time.Duration, accountID types.AccountID, serviceID types.ServiceID, ipRestrictions *restrict.Filter) error { if len(schemes) == 0 { mw.domainsMux.Lock() defer mw.domainsMux.Unlock() mw.domains[domain] = DomainConfig{ - AccountID: accountID, - ServiceID: serviceID, + AccountID: accountID, + ServiceID: serviceID, + IPRestrictions: ipRestrictions, } return nil } @@ -302,30 +466,28 @@ func (mw *Middleware) AddDomain(domain string, schemes []Scheme, publicKeyB64 st SessionExpiration: expiration, AccountID: accountID, ServiceID: serviceID, + IPRestrictions: ipRestrictions, } return nil } +// RemoveDomain unregisters authentication for the given domain. func (mw *Middleware) RemoveDomain(domain string) { mw.domainsMux.Lock() defer mw.domainsMux.Unlock() delete(mw.domains, domain) } -// validateSessionToken validates a session token, optionally checking group access via gRPC. -// For OIDC tokens with a configured validator, it calls ValidateSession to check group access. -// For other auth methods (PIN, password), it validates the JWT locally. -// Returns a validationResult with user ID and validity status, or error for invalid tokens. +// validateSessionToken validates a session token. OIDC tokens with a configured +// validator go through gRPC for group access checks; other methods validate locally. func (mw *Middleware) validateSessionToken(ctx context.Context, host, token string, publicKey ed25519.PublicKey, method auth.Method) (*validationResult, error) { - // For OIDC with a session validator, call the gRPC service to check group access if method == auth.MethodOIDC && mw.sessionValidator != nil { resp, err := mw.sessionValidator.ValidateSession(ctx, &proto.ValidateSessionRequest{ Domain: host, SessionToken: token, }) if err != nil { - mw.logger.WithError(err).Error("ValidateSession gRPC call failed") - return nil, fmt.Errorf("session validation failed") + return nil, fmt.Errorf("%w: %w", errValidationUnavailable, err) } if !resp.Valid { mw.logger.WithFields(log.Fields{ @@ -342,7 +504,6 @@ func (mw *Middleware) validateSessionToken(ctx context.Context, host, token stri return &validationResult{UserID: resp.UserId, Valid: true}, nil } - // For non-OIDC methods or when no validator is configured, validate JWT locally userID, _, err := auth.ValidateSessionJWT(token, host, publicKey) if err != nil { return nil, err diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go index 7d9ac1bd5..a4924d380 100644 --- a/proxy/internal/auth/middleware_test.go +++ b/proxy/internal/auth/middleware_test.go @@ -1,11 +1,14 @@ package auth import ( + "context" "crypto/ed25519" "crypto/rand" "encoding/base64" + "errors" "net/http" "net/http/httptest" + "net/netip" "net/url" "strings" "testing" @@ -14,10 +17,13 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" "github.com/netbirdio/netbird/proxy/auth" "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/restrict" + "github.com/netbirdio/netbird/shared/management/proto" ) func generateTestKeyPair(t *testing.T) *sessionkey.KeyPair { @@ -52,11 +58,11 @@ func newPassthroughHandler() http.Handler { } func TestAddDomain_ValidKey(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - err := mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "") + err := mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil) require.NoError(t, err) mw.domainsMux.RLock() @@ -70,10 +76,10 @@ func TestAddDomain_ValidKey(t *testing.T) { } func TestAddDomain_EmptyKey(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - err := mw.AddDomain("example.com", []Scheme{scheme}, "", time.Hour, "", "") + err := mw.AddDomain("example.com", []Scheme{scheme}, "", time.Hour, "", "", nil) require.Error(t, err) assert.Contains(t, err.Error(), "invalid session public key size") @@ -84,10 +90,10 @@ func TestAddDomain_EmptyKey(t *testing.T) { } func TestAddDomain_InvalidBase64(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - err := mw.AddDomain("example.com", []Scheme{scheme}, "not-valid-base64!!!", time.Hour, "", "") + err := mw.AddDomain("example.com", []Scheme{scheme}, "not-valid-base64!!!", time.Hour, "", "", nil) require.Error(t, err) assert.Contains(t, err.Error(), "decode session public key") @@ -98,11 +104,11 @@ func TestAddDomain_InvalidBase64(t *testing.T) { } func TestAddDomain_WrongKeySize(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) shortKey := base64.StdEncoding.EncodeToString([]byte("tooshort")) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - err := mw.AddDomain("example.com", []Scheme{scheme}, shortKey, time.Hour, "", "") + err := mw.AddDomain("example.com", []Scheme{scheme}, shortKey, time.Hour, "", "", nil) require.Error(t, err) assert.Contains(t, err.Error(), "invalid session public key size") @@ -113,9 +119,9 @@ func TestAddDomain_WrongKeySize(t *testing.T) { } func TestAddDomain_NoSchemes_NoKeyRequired(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) - err := mw.AddDomain("example.com", nil, "", time.Hour, "", "") + err := mw.AddDomain("example.com", nil, "", time.Hour, "", "", nil) require.NoError(t, err, "domains with no auth schemes should not require a key") mw.domainsMux.RLock() @@ -125,14 +131,14 @@ func TestAddDomain_NoSchemes_NoKeyRequired(t *testing.T) { } func TestAddDomain_OverwritesPreviousConfig(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp1 := generateTestKeyPair(t) kp2 := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp1.PublicKey, time.Hour, "", "")) - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp2.PublicKey, 2*time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp1.PublicKey, time.Hour, "", "", nil)) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp2.PublicKey, 2*time.Hour, "", "", nil)) mw.domainsMux.RLock() config := mw.domains["example.com"] @@ -144,11 +150,11 @@ func TestAddDomain_OverwritesPreviousConfig(t *testing.T) { } func TestRemoveDomain(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) mw.RemoveDomain("example.com") @@ -159,7 +165,7 @@ func TestRemoveDomain(t *testing.T) { } func TestProtect_UnknownDomainPassesThrough(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) handler := mw.Protect(newPassthroughHandler()) req := httptest.NewRequest(http.MethodGet, "http://unknown.com/", nil) @@ -171,8 +177,8 @@ func TestProtect_UnknownDomainPassesThrough(t *testing.T) { } func TestProtect_DomainWithNoSchemesPassesThrough(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) - require.NoError(t, mw.AddDomain("example.com", nil, "", time.Hour, "", "")) + mw := NewMiddleware(log.StandardLogger(), nil, nil) + require.NoError(t, mw.AddDomain("example.com", nil, "", time.Hour, "", "", nil)) handler := mw.Protect(newPassthroughHandler()) @@ -185,11 +191,11 @@ func TestProtect_DomainWithNoSchemesPassesThrough(t *testing.T) { } func TestProtect_UnauthenticatedRequestIsBlocked(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) var backendCalled bool backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { @@ -206,11 +212,11 @@ func TestProtect_UnauthenticatedRequestIsBlocked(t *testing.T) { } func TestProtect_HostWithPortIsMatched(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) var backendCalled bool backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { @@ -227,16 +233,16 @@ func TestProtect_HostWithPortIsMatched(t *testing.T) { } func TestProtect_ValidSessionCookiePassesThrough(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) token, err := sessionkey.SignToken(kp.PrivateKey, "test-user", "example.com", auth.MethodPIN, time.Hour) require.NoError(t, err) - capturedData := &proxy.CapturedData{} + capturedData := proxy.NewCapturedData("") handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cd := proxy.CapturedDataFromContext(r.Context()) require.NotNil(t, cd) @@ -257,11 +263,11 @@ func TestProtect_ValidSessionCookiePassesThrough(t *testing.T) { } func TestProtect_ExpiredSessionCookieIsRejected(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) // Sign a token that expired 1 second ago. token, err := sessionkey.SignToken(kp.PrivateKey, "test-user", "example.com", auth.MethodPIN, -time.Second) @@ -283,11 +289,11 @@ func TestProtect_ExpiredSessionCookieIsRejected(t *testing.T) { } func TestProtect_WrongDomainCookieIsRejected(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) // Token signed for a different domain audience. token, err := sessionkey.SignToken(kp.PrivateKey, "test-user", "other.com", auth.MethodPIN, time.Hour) @@ -309,12 +315,12 @@ func TestProtect_WrongDomainCookieIsRejected(t *testing.T) { } func TestProtect_WrongKeyCookieIsRejected(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp1 := generateTestKeyPair(t) kp2 := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp1.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp1.PublicKey, time.Hour, "", "", nil)) // Token signed with a different private key. token, err := sessionkey.SignToken(kp2.PrivateKey, "test-user", "example.com", auth.MethodPIN, time.Hour) @@ -336,7 +342,7 @@ func TestProtect_WrongKeyCookieIsRejected(t *testing.T) { } func TestProtect_SchemeAuthRedirectsWithCookie(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) token, err := sessionkey.SignToken(kp.PrivateKey, "pin-user", "example.com", auth.MethodPIN, time.Hour) @@ -351,7 +357,7 @@ func TestProtect_SchemeAuthRedirectsWithCookie(t *testing.T) { return "", "pin", nil }, } - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) var backendCalled bool backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { @@ -386,7 +392,7 @@ func TestProtect_SchemeAuthRedirectsWithCookie(t *testing.T) { } func TestProtect_FailedAuthDoesNotSetCookie(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{ @@ -395,7 +401,7 @@ func TestProtect_FailedAuthDoesNotSetCookie(t *testing.T) { return "", "pin", nil }, } - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) handler := mw.Protect(newPassthroughHandler()) @@ -409,7 +415,7 @@ func TestProtect_FailedAuthDoesNotSetCookie(t *testing.T) { } func TestProtect_MultipleSchemes(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) token, err := sessionkey.SignToken(kp.PrivateKey, "password-user", "example.com", auth.MethodPassword, time.Hour) @@ -431,7 +437,7 @@ func TestProtect_MultipleSchemes(t *testing.T) { return "", "password", nil }, } - require.NoError(t, mw.AddDomain("example.com", []Scheme{pinScheme, passwordScheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{pinScheme, passwordScheme}, kp.PublicKey, time.Hour, "", "", nil)) var backendCalled bool backend := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { @@ -451,7 +457,7 @@ func TestProtect_MultipleSchemes(t *testing.T) { } func TestProtect_InvalidTokenFromSchemeReturns400(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) // Return a garbage token that won't validate. @@ -461,7 +467,7 @@ func TestProtect_InvalidTokenFromSchemeReturns400(t *testing.T) { return "invalid-jwt-token", "", nil }, } - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) handler := mw.Protect(newPassthroughHandler()) @@ -473,7 +479,7 @@ func TestProtect_InvalidTokenFromSchemeReturns400(t *testing.T) { } func TestAddDomain_RandomBytes32NotEd25519(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) // 32 random bytes that happen to be valid base64 and correct size // but are actually a valid ed25519 public key length-wise. @@ -485,19 +491,19 @@ func TestAddDomain_RandomBytes32NotEd25519(t *testing.T) { key := base64.StdEncoding.EncodeToString(randomBytes) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - err = mw.AddDomain("example.com", []Scheme{scheme}, key, time.Hour, "", "") + err = mw.AddDomain("example.com", []Scheme{scheme}, key, time.Hour, "", "", nil) require.NoError(t, err, "any 32-byte key should be accepted at registration time") } func TestAddDomain_InvalidKeyDoesNotCorruptExistingConfig(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) // Attempt to overwrite with an invalid key. - err := mw.AddDomain("example.com", []Scheme{scheme}, "bad", time.Hour, "", "") + err := mw.AddDomain("example.com", []Scheme{scheme}, "bad", time.Hour, "", "", nil) require.Error(t, err) // The original valid config should still be intact. @@ -511,7 +517,7 @@ func TestAddDomain_InvalidKeyDoesNotCorruptExistingConfig(t *testing.T) { } func TestProtect_FailedPinAuthCapturesAuthMethod(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) // Scheme that always fails authentication (returns empty token) @@ -521,9 +527,9 @@ func TestProtect_FailedPinAuthCapturesAuthMethod(t *testing.T) { return "", "pin", nil }, } - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) - capturedData := &proxy.CapturedData{} + capturedData := proxy.NewCapturedData("") handler := mw.Protect(newPassthroughHandler()) // Submit wrong PIN - should capture auth method @@ -539,7 +545,7 @@ func TestProtect_FailedPinAuthCapturesAuthMethod(t *testing.T) { } func TestProtect_FailedPasswordAuthCapturesAuthMethod(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{ @@ -548,9 +554,9 @@ func TestProtect_FailedPasswordAuthCapturesAuthMethod(t *testing.T) { return "", "password", nil }, } - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) - capturedData := &proxy.CapturedData{} + capturedData := proxy.NewCapturedData("") handler := mw.Protect(newPassthroughHandler()) // Submit wrong password - should capture auth method @@ -566,7 +572,7 @@ func TestProtect_FailedPasswordAuthCapturesAuthMethod(t *testing.T) { } func TestProtect_NoCredentialsDoesNotCaptureAuthMethod(t *testing.T) { - mw := NewMiddleware(log.StandardLogger(), nil) + mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) scheme := &stubScheme{ @@ -575,9 +581,9 @@ func TestProtect_NoCredentialsDoesNotCaptureAuthMethod(t *testing.T) { return "", "pin", nil }, } - require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "")) + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) - capturedData := &proxy.CapturedData{} + capturedData := proxy.NewCapturedData("") handler := mw.Protect(newPassthroughHandler()) // No credentials submitted - should not capture auth method @@ -658,3 +664,271 @@ func TestWasCredentialSubmitted(t *testing.T) { }) } } + +func TestCheckIPRestrictions_UnparseableAddress(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + + err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1", + restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)) + require.NoError(t, err) + + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + tests := []struct { + name string + remoteAddr string + wantCode int + }{ + {"unparsable address denies", "not-an-ip:1234", http.StatusForbidden}, + {"empty address denies", "", http.StatusForbidden}, + {"allowed address passes", "10.1.2.3:5678", http.StatusOK}, + {"denied address blocked", "192.168.1.1:5678", http.StatusForbidden}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.RemoteAddr = tt.remoteAddr + req.Host = "example.com" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + assert.Equal(t, tt.wantCode, rr.Code) + }) + } +} + +func TestCheckIPRestrictions_UsesCapturedDataClientIP(t *testing.T) { + // When CapturedData is set (by the access log middleware, which resolves + // trusted proxies), checkIPRestrictions should use that IP, not RemoteAddr. + mw := NewMiddleware(log.StandardLogger(), nil, nil) + + err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1", + restrict.ParseFilter([]string{"203.0.113.0/24"}, nil, nil, nil)) + require.NoError(t, err) + + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // RemoteAddr is a trusted proxy, but CapturedData has the real client IP. + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.RemoteAddr = "10.0.0.1:5000" + req.Host = "example.com" + + cd := proxy.NewCapturedData("") + cd.SetClientIP(netip.MustParseAddr("203.0.113.50")) + ctx := proxy.WithCapturedData(req.Context(), cd) + req = req.WithContext(ctx) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + assert.Equal(t, http.StatusOK, rr.Code, "should use CapturedData IP (203.0.113.50), not RemoteAddr (10.0.0.1)") + + // Same request but CapturedData has a blocked IP. + req2 := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req2.RemoteAddr = "203.0.113.50:5000" + req2.Host = "example.com" + + cd2 := proxy.NewCapturedData("") + cd2.SetClientIP(netip.MustParseAddr("10.0.0.1")) + ctx2 := proxy.WithCapturedData(req2.Context(), cd2) + req2 = req2.WithContext(ctx2) + + rr2 := httptest.NewRecorder() + handler.ServeHTTP(rr2, req2) + assert.Equal(t, http.StatusForbidden, rr2.Code, "should use CapturedData IP (10.0.0.1), not RemoteAddr (203.0.113.50)") +} + +func TestCheckIPRestrictions_NilGeoWithCountryRules(t *testing.T) { + // Geo is nil, country restrictions are configured: must deny (fail-close). + mw := NewMiddleware(log.StandardLogger(), nil, nil) + + err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1", + restrict.ParseFilter(nil, nil, []string{"US"}, nil)) + require.NoError(t, err) + + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.RemoteAddr = "1.2.3.4:5678" + req.Host = "example.com" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + assert.Equal(t, http.StatusForbidden, rr.Code, "country restrictions with nil geo must deny") +} + +// mockAuthenticator is a minimal mock for the authenticator gRPC interface +// used by the Header scheme. +type mockAuthenticator struct { + fn func(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) +} + +func (m *mockAuthenticator) Authenticate(ctx context.Context, in *proto.AuthenticateRequest, _ ...grpc.CallOption) (*proto.AuthenticateResponse, error) { + return m.fn(ctx, in) +} + +// newHeaderSchemeWithToken creates a Header scheme backed by a mock that +// returns a signed session token when the expected header value is provided. +func newHeaderSchemeWithToken(t *testing.T, kp *sessionkey.KeyPair, headerName, expectedValue string) Header { + t.Helper() + token, err := sessionkey.SignToken(kp.PrivateKey, "header-user", "example.com", auth.MethodHeader, time.Hour) + require.NoError(t, err) + + mock := &mockAuthenticator{fn: func(_ context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { + ha := req.GetHeaderAuth() + if ha != nil && ha.GetHeaderValue() == expectedValue { + return &proto.AuthenticateResponse{Success: true, SessionToken: token}, nil + } + return &proto.AuthenticateResponse{Success: false}, nil + }} + return NewHeader(mock, "svc1", "acc1", headerName) +} + +func TestProtect_HeaderAuth_ForwardsOnSuccess(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + hdr := newHeaderSchemeWithToken(t, kp, "X-API-Key", "secret-key") + require.NoError(t, mw.AddDomain("example.com", []Scheme{hdr}, kp.PublicKey, time.Hour, "acc1", "svc1", nil)) + + var backendCalled bool + capturedData := proxy.NewCapturedData("") + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) + })) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/path", nil) + req.Header.Set("X-API-Key", "secret-key") + req = req.WithContext(proxy.WithCapturedData(req.Context(), capturedData)) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.True(t, backendCalled, "backend should be called directly for header auth (no redirect)") + assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "ok", rec.Body.String()) + + // Session cookie should be set. + var sessionCookie *http.Cookie + for _, c := range rec.Result().Cookies() { + if c.Name == auth.SessionCookieName { + sessionCookie = c + break + } + } + require.NotNil(t, sessionCookie, "session cookie should be set after successful header auth") + assert.True(t, sessionCookie.HttpOnly) + assert.True(t, sessionCookie.Secure) + + assert.Equal(t, "header-user", capturedData.GetUserID()) + assert.Equal(t, "header", capturedData.GetAuthMethod()) +} + +func TestProtect_HeaderAuth_MissingHeaderFallsThrough(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + hdr := newHeaderSchemeWithToken(t, kp, "X-API-Key", "secret-key") + // Also add a PIN scheme so we can verify fallthrough behavior. + pinScheme := &stubScheme{method: auth.MethodPIN, promptID: "pin"} + require.NoError(t, mw.AddDomain("example.com", []Scheme{hdr, pinScheme}, kp.PublicKey, time.Hour, "acc1", "svc1", nil)) + + handler := mw.Protect(newPassthroughHandler()) + + // No X-API-Key header: should fall through to PIN login page (401). + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code, "missing header should fall through to login page") +} + +func TestProtect_HeaderAuth_WrongValueReturns401(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + mock := &mockAuthenticator{fn: func(_ context.Context, _ *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { + return &proto.AuthenticateResponse{Success: false}, nil + }} + hdr := NewHeader(mock, "svc1", "acc1", "X-API-Key") + require.NoError(t, mw.AddDomain("example.com", []Scheme{hdr}, kp.PublicKey, time.Hour, "acc1", "svc1", nil)) + + capturedData := proxy.NewCapturedData("") + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("X-API-Key", "wrong-key") + req = req.WithContext(proxy.WithCapturedData(req.Context(), capturedData)) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code) + assert.Equal(t, "header", capturedData.GetAuthMethod()) +} + +func TestProtect_HeaderAuth_InfraErrorReturns502(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + mock := &mockAuthenticator{fn: func(_ context.Context, _ *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { + return nil, errors.New("gRPC unavailable") + }} + hdr := NewHeader(mock, "svc1", "acc1", "X-API-Key") + require.NoError(t, mw.AddDomain("example.com", []Scheme{hdr}, kp.PublicKey, time.Hour, "acc1", "svc1", nil)) + + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("X-API-Key", "some-key") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusBadGateway, rec.Code) +} + +func TestProtect_HeaderAuth_SubsequentRequestUsesSessionCookie(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + hdr := newHeaderSchemeWithToken(t, kp, "X-API-Key", "secret-key") + require.NoError(t, mw.AddDomain("example.com", []Scheme{hdr}, kp.PublicKey, time.Hour, "acc1", "svc1", nil)) + + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // First request with header auth. + req1 := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req1.Header.Set("X-API-Key", "secret-key") + req1 = req1.WithContext(proxy.WithCapturedData(req1.Context(), proxy.NewCapturedData(""))) + rec1 := httptest.NewRecorder() + handler.ServeHTTP(rec1, req1) + require.Equal(t, http.StatusOK, rec1.Code) + + // Extract session cookie. + var sessionCookie *http.Cookie + for _, c := range rec1.Result().Cookies() { + if c.Name == auth.SessionCookieName { + sessionCookie = c + break + } + } + require.NotNil(t, sessionCookie) + + // Second request with only the session cookie (no header). + capturedData2 := proxy.NewCapturedData("") + req2 := httptest.NewRequest(http.MethodGet, "http://example.com/other", nil) + req2.AddCookie(sessionCookie) + req2 = req2.WithContext(proxy.WithCapturedData(req2.Context(), capturedData2)) + rec2 := httptest.NewRecorder() + handler.ServeHTTP(rec2, req2) + + assert.Equal(t, http.StatusOK, rec2.Code) + assert.Equal(t, "header-user", capturedData2.GetUserID()) + assert.Equal(t, "header", capturedData2.GetAuthMethod()) +} diff --git a/proxy/internal/geolocation/download.go b/proxy/internal/geolocation/download.go new file mode 100644 index 000000000..64d515275 --- /dev/null +++ b/proxy/internal/geolocation/download.go @@ -0,0 +1,264 @@ +package geolocation + +import ( + "archive/tar" + "bufio" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + log "github.com/sirupsen/logrus" +) + +const ( + mmdbTarGZURL = "https://pkgs.netbird.io/geolocation-dbs/GeoLite2-City/download?suffix=tar.gz" + mmdbSha256URL = "https://pkgs.netbird.io/geolocation-dbs/GeoLite2-City/download?suffix=tar.gz.sha256" + mmdbInnerName = "GeoLite2-City.mmdb" + + downloadTimeout = 2 * time.Minute + maxMMDBSize = 256 << 20 // 256 MB +) + +// ensureMMDB checks for an existing MMDB file in dataDir. If none is found, +// it downloads from pkgs.netbird.io with SHA256 verification. +func ensureMMDB(logger *log.Logger, dataDir string) (string, error) { + if err := os.MkdirAll(dataDir, 0o755); err != nil { + return "", fmt.Errorf("create geo data directory %s: %w", dataDir, err) + } + + pattern := filepath.Join(dataDir, mmdbGlob) + if files, _ := filepath.Glob(pattern); len(files) > 0 { + mmdbPath := files[len(files)-1] + logger.Debugf("using existing geolocation database: %s", mmdbPath) + return mmdbPath, nil + } + + logger.Info("geolocation database not found, downloading from pkgs.netbird.io") + return downloadMMDB(logger, dataDir) +} + +func downloadMMDB(logger *log.Logger, dataDir string) (string, error) { + client := &http.Client{Timeout: downloadTimeout} + + datedName, err := fetchRemoteFilename(client, mmdbTarGZURL) + if err != nil { + return "", fmt.Errorf("get remote filename: %w", err) + } + + mmdbFilename := deriveMMDBFilename(datedName) + mmdbPath := filepath.Join(dataDir, mmdbFilename) + + tmp, err := os.MkdirTemp("", "geolite-proxy-*") + if err != nil { + return "", fmt.Errorf("create temp directory: %w", err) + } + defer os.RemoveAll(tmp) + + checksumFile := filepath.Join(tmp, "checksum.sha256") + if err := downloadToFile(client, mmdbSha256URL, checksumFile); err != nil { + return "", fmt.Errorf("download checksum: %w", err) + } + + expectedHash, err := readChecksumFile(checksumFile) + if err != nil { + return "", fmt.Errorf("read checksum: %w", err) + } + + tarFile := filepath.Join(tmp, datedName) + logger.Debugf("downloading geolocation database (%s)", datedName) + if err := downloadToFile(client, mmdbTarGZURL, tarFile); err != nil { + return "", fmt.Errorf("download database: %w", err) + } + + if err := verifySHA256(tarFile, expectedHash); err != nil { + return "", fmt.Errorf("verify database checksum: %w", err) + } + + if err := extractMMDBFromTarGZ(tarFile, mmdbPath); err != nil { + return "", fmt.Errorf("extract database: %w", err) + } + + logger.Infof("geolocation database downloaded: %s", mmdbPath) + return mmdbPath, nil +} + +// deriveMMDBFilename converts a tar.gz filename to an MMDB filename. +// Example: GeoLite2-City_20240101.tar.gz -> GeoLite2-City_20240101.mmdb +func deriveMMDBFilename(tarName string) string { + base, _, _ := strings.Cut(tarName, ".") + if !strings.Contains(base, "_") { + return "GeoLite2-City.mmdb" + } + return base + ".mmdb" +} + +func fetchRemoteFilename(client *http.Client, url string) (string, error) { + resp, err := client.Head(url) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("HEAD request: HTTP %d", resp.StatusCode) + } + + cd := resp.Header.Get("Content-Disposition") + if cd == "" { + return "", errors.New("no Content-Disposition header") + } + + _, params, err := mime.ParseMediaType(cd) + if err != nil { + return "", fmt.Errorf("parse Content-Disposition: %w", err) + } + + name := filepath.Base(params["filename"]) + if name == "" || name == "." { + return "", errors.New("no filename in Content-Disposition") + } + return name, nil +} + +func downloadToFile(client *http.Client, url, dest string) error { + resp, err := client.Get(url) //nolint:gosec + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024)) + return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)) + } + + f, err := os.Create(dest) //nolint:gosec + if err != nil { + return err + } + defer f.Close() + + // Cap download at 256 MB to prevent unbounded reads from a compromised server. + if _, err := io.Copy(f, io.LimitReader(resp.Body, maxMMDBSize)); err != nil { + return err + } + return nil +} + +func readChecksumFile(path string) (string, error) { + f, err := os.Open(path) //nolint:gosec + if err != nil { + return "", err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + if scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) > 0 { + return parts[0], nil + } + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", errors.New("empty checksum file") +} + +func verifySHA256(path, expected string) error { + f, err := os.Open(path) //nolint:gosec + if err != nil { + return err + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return err + } + + actual := fmt.Sprintf("%x", h.Sum(nil)) + if actual != expected { + return fmt.Errorf("SHA256 mismatch: expected %s, got %s", expected, actual) + } + return nil +} + +func extractMMDBFromTarGZ(tarGZPath, destPath string) error { + f, err := os.Open(tarGZPath) //nolint:gosec + if err != nil { + return err + } + defer f.Close() + + gz, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gz.Close() + + tr := tar.NewReader(gz) + for { + hdr, err := tr.Next() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + if hdr.Typeflag == tar.TypeReg && filepath.Base(hdr.Name) == mmdbInnerName { + if hdr.Size < 0 || hdr.Size > maxMMDBSize { + return fmt.Errorf("mmdb entry size %d exceeds limit %d", hdr.Size, maxMMDBSize) + } + if err := extractToFileAtomic(io.LimitReader(tr, hdr.Size), destPath); err != nil { + return err + } + return nil + } + } + + return fmt.Errorf("%s not found in archive", mmdbInnerName) +} + +// extractToFileAtomic writes r to a temporary file in the same directory as +// destPath, then renames it into place so a crash never leaves a truncated file. +func extractToFileAtomic(r io.Reader, destPath string) error { + dir := filepath.Dir(destPath) + tmp, err := os.CreateTemp(dir, ".mmdb-*.tmp") + if err != nil { + return fmt.Errorf("create temp file: %w", err) + } + tmpPath := tmp.Name() + + if _, err := io.Copy(tmp, r); err != nil { //nolint:gosec // G110: caller bounds with LimitReader + if closeErr := tmp.Close(); closeErr != nil { + log.Debugf("failed to close temp file %s: %v", tmpPath, closeErr) + } + if removeErr := os.Remove(tmpPath); removeErr != nil { + log.Debugf("failed to remove temp file %s: %v", tmpPath, removeErr) + } + return fmt.Errorf("write mmdb: %w", err) + } + if err := tmp.Close(); err != nil { + if removeErr := os.Remove(tmpPath); removeErr != nil { + log.Debugf("failed to remove temp file %s: %v", tmpPath, removeErr) + } + return fmt.Errorf("close temp file: %w", err) + } + if err := os.Rename(tmpPath, destPath); err != nil { + if removeErr := os.Remove(tmpPath); removeErr != nil { + log.Debugf("failed to remove temp file %s: %v", tmpPath, removeErr) + } + return fmt.Errorf("rename to %s: %w", destPath, err) + } + return nil +} diff --git a/proxy/internal/geolocation/geolocation.go b/proxy/internal/geolocation/geolocation.go new file mode 100644 index 000000000..81b02efb3 --- /dev/null +++ b/proxy/internal/geolocation/geolocation.go @@ -0,0 +1,152 @@ +// Package geolocation provides IP-to-country lookups using MaxMind GeoLite2 databases. +package geolocation + +import ( + "fmt" + "net/netip" + "os" + "strconv" + "sync" + + "github.com/oschwald/maxminddb-golang" + log "github.com/sirupsen/logrus" +) + +const ( + // EnvDisable disables geolocation lookups entirely when set to a truthy value. + EnvDisable = "NB_PROXY_DISABLE_GEOLOCATION" + + mmdbGlob = "GeoLite2-City_*.mmdb" +) + +type record struct { + Country struct { + ISOCode string `maxminddb:"iso_code"` + } `maxminddb:"country"` + City struct { + Names struct { + En string `maxminddb:"en"` + } `maxminddb:"names"` + } `maxminddb:"city"` + Subdivisions []struct { + ISOCode string `maxminddb:"iso_code"` + Names struct { + En string `maxminddb:"en"` + } `maxminddb:"names"` + } `maxminddb:"subdivisions"` +} + +// Result holds the outcome of a geo lookup. +type Result struct { + CountryCode string + CityName string + SubdivisionCode string + SubdivisionName string +} + +// Lookup provides IP geolocation lookups. +type Lookup struct { + mu sync.RWMutex + db *maxminddb.Reader + logger *log.Logger +} + +// NewLookup opens or downloads the GeoLite2-City MMDB in dataDir. +// Returns nil without error if geolocation is disabled via environment +// variable, no data directory is configured, or the download fails +// (graceful degradation: country restrictions will deny all requests). +func NewLookup(logger *log.Logger, dataDir string) (*Lookup, error) { + if isDisabledByEnv(logger) { + logger.Info("geolocation disabled via environment variable") + return nil, nil //nolint:nilnil + } + + if dataDir == "" { + return nil, nil //nolint:nilnil + } + + mmdbPath, err := ensureMMDB(logger, dataDir) + if err != nil { + logger.Warnf("geolocation database unavailable: %v", err) + logger.Warn("country-based access restrictions will deny all requests until a database is available") + return nil, nil //nolint:nilnil + } + + db, err := maxminddb.Open(mmdbPath) + if err != nil { + return nil, fmt.Errorf("open GeoLite2 database %s: %w", mmdbPath, err) + } + + logger.Infof("geolocation database loaded from %s", mmdbPath) + return &Lookup{db: db, logger: logger}, nil +} + +// LookupAddr returns the country ISO code and city name for the given IP. +// Returns an empty Result if the database is nil or the lookup fails. +func (l *Lookup) LookupAddr(addr netip.Addr) Result { + if l == nil { + return Result{} + } + + l.mu.RLock() + defer l.mu.RUnlock() + + if l.db == nil { + return Result{} + } + + addr = addr.Unmap() + + var rec record + if err := l.db.Lookup(addr.AsSlice(), &rec); err != nil { + l.logger.Debugf("geolocation lookup %s: %v", addr, err) + return Result{} + } + r := Result{ + CountryCode: rec.Country.ISOCode, + CityName: rec.City.Names.En, + } + if len(rec.Subdivisions) > 0 { + r.SubdivisionCode = rec.Subdivisions[0].ISOCode + r.SubdivisionName = rec.Subdivisions[0].Names.En + } + return r +} + +// Available reports whether the lookup has a loaded database. +func (l *Lookup) Available() bool { + if l == nil { + return false + } + l.mu.RLock() + defer l.mu.RUnlock() + return l.db != nil +} + +// Close releases the database resources. +func (l *Lookup) Close() error { + if l == nil { + return nil + } + l.mu.Lock() + defer l.mu.Unlock() + if l.db != nil { + err := l.db.Close() + l.db = nil + return err + } + return nil +} + +func isDisabledByEnv(logger *log.Logger) bool { + val := os.Getenv(EnvDisable) + if val == "" { + return false + } + disabled, err := strconv.ParseBool(val) + if err != nil { + logger.Warnf("parse %s=%q: %v", EnvDisable, val, err) + return false + } + return disabled +} diff --git a/proxy/internal/proxy/context.go b/proxy/internal/proxy/context.go index 4a61f6bcf..d3f67dc57 100644 --- a/proxy/internal/proxy/context.go +++ b/proxy/internal/proxy/context.go @@ -11,8 +11,6 @@ import ( type requestContextKey string const ( - serviceIdKey requestContextKey = "serviceId" - accountIdKey requestContextKey = "accountId" capturedDataKey requestContextKey = "capturedData" ) @@ -47,112 +45,117 @@ func (o ResponseOrigin) String() string { // to pass data back up the middleware chain. type CapturedData struct { mu sync.RWMutex - RequestID string - ServiceId types.ServiceID - AccountId types.AccountID - Origin ResponseOrigin - ClientIP netip.Addr - UserID string - AuthMethod string + requestID string + serviceID types.ServiceID + accountID types.AccountID + origin ResponseOrigin + clientIP netip.Addr + userID string + authMethod string } -// GetRequestID safely gets the request ID +// NewCapturedData creates a CapturedData with the given request ID. +func NewCapturedData(requestID string) *CapturedData { + return &CapturedData{requestID: requestID} +} + +// GetRequestID returns the request ID. func (c *CapturedData) GetRequestID() string { c.mu.RLock() defer c.mu.RUnlock() - return c.RequestID + return c.requestID } -// SetServiceId safely sets the service ID -func (c *CapturedData) SetServiceId(serviceId types.ServiceID) { +// SetServiceID sets the service ID. +func (c *CapturedData) SetServiceID(serviceID types.ServiceID) { c.mu.Lock() defer c.mu.Unlock() - c.ServiceId = serviceId + c.serviceID = serviceID } -// GetServiceId safely gets the service ID -func (c *CapturedData) GetServiceId() types.ServiceID { +// GetServiceID returns the service ID. +func (c *CapturedData) GetServiceID() types.ServiceID { c.mu.RLock() defer c.mu.RUnlock() - return c.ServiceId + return c.serviceID } -// SetAccountId safely sets the account ID -func (c *CapturedData) SetAccountId(accountId types.AccountID) { +// SetAccountID sets the account ID. +func (c *CapturedData) SetAccountID(accountID types.AccountID) { c.mu.Lock() defer c.mu.Unlock() - c.AccountId = accountId + c.accountID = accountID } -// GetAccountId safely gets the account ID -func (c *CapturedData) GetAccountId() types.AccountID { +// GetAccountID returns the account ID. +func (c *CapturedData) GetAccountID() types.AccountID { c.mu.RLock() defer c.mu.RUnlock() - return c.AccountId + return c.accountID } -// SetOrigin safely sets the response origin +// SetOrigin sets the response origin. func (c *CapturedData) SetOrigin(origin ResponseOrigin) { c.mu.Lock() defer c.mu.Unlock() - c.Origin = origin + c.origin = origin } -// GetOrigin safely gets the response origin +// GetOrigin returns the response origin. func (c *CapturedData) GetOrigin() ResponseOrigin { c.mu.RLock() defer c.mu.RUnlock() - return c.Origin + return c.origin } -// SetClientIP safely sets the resolved client IP. +// SetClientIP sets the resolved client IP. func (c *CapturedData) SetClientIP(ip netip.Addr) { c.mu.Lock() defer c.mu.Unlock() - c.ClientIP = ip + c.clientIP = ip } -// GetClientIP safely gets the resolved client IP. +// GetClientIP returns the resolved client IP. func (c *CapturedData) GetClientIP() netip.Addr { c.mu.RLock() defer c.mu.RUnlock() - return c.ClientIP + return c.clientIP } -// SetUserID safely sets the authenticated user ID. +// SetUserID sets the authenticated user ID. func (c *CapturedData) SetUserID(userID string) { c.mu.Lock() defer c.mu.Unlock() - c.UserID = userID + c.userID = userID } -// GetUserID safely gets the authenticated user ID. +// GetUserID returns the authenticated user ID. func (c *CapturedData) GetUserID() string { c.mu.RLock() defer c.mu.RUnlock() - return c.UserID + return c.userID } -// SetAuthMethod safely sets the authentication method used. +// SetAuthMethod sets the authentication method used. func (c *CapturedData) SetAuthMethod(method string) { c.mu.Lock() defer c.mu.Unlock() - c.AuthMethod = method + c.authMethod = method } -// GetAuthMethod safely gets the authentication method used. +// GetAuthMethod returns the authentication method used. func (c *CapturedData) GetAuthMethod() string { c.mu.RLock() defer c.mu.RUnlock() - return c.AuthMethod + return c.authMethod } -// WithCapturedData adds a CapturedData struct to the context +// WithCapturedData adds a CapturedData struct to the context. func WithCapturedData(ctx context.Context, data *CapturedData) context.Context { return context.WithValue(ctx, capturedDataKey, data) } -// CapturedDataFromContext retrieves the CapturedData from context +// CapturedDataFromContext retrieves the CapturedData from context. func CapturedDataFromContext(ctx context.Context) *CapturedData { v := ctx.Value(capturedDataKey) data, ok := v.(*CapturedData) @@ -161,28 +164,3 @@ func CapturedDataFromContext(ctx context.Context) *CapturedData { } return data } - -func withServiceId(ctx context.Context, serviceId types.ServiceID) context.Context { - return context.WithValue(ctx, serviceIdKey, serviceId) -} - -func ServiceIdFromContext(ctx context.Context) types.ServiceID { - v := ctx.Value(serviceIdKey) - serviceId, ok := v.(types.ServiceID) - if !ok { - return "" - } - return serviceId -} -func withAccountId(ctx context.Context, accountId types.AccountID) context.Context { - return context.WithValue(ctx, accountIdKey, accountId) -} - -func AccountIdFromContext(ctx context.Context) types.AccountID { - v := ctx.Value(accountIdKey) - accountId, ok := v.(types.AccountID) - if !ok { - return "" - } - return accountId -} diff --git a/proxy/internal/proxy/reverseproxy.go b/proxy/internal/proxy/reverseproxy.go index 1ee9b2a42..246851d24 100644 --- a/proxy/internal/proxy/reverseproxy.go +++ b/proxy/internal/proxy/reverseproxy.go @@ -66,19 +66,16 @@ func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - // Set the serviceId in the context for later retrieval. - ctx := withServiceId(r.Context(), result.serviceID) - // Set the accountId in the context for later retrieval (for middleware). - ctx = withAccountId(ctx, result.accountID) - // Set the accountId in the context for the roundtripper to use. + ctx := r.Context() + // Set the account ID in the context for the roundtripper to use. ctx = roundtrip.WithAccountID(ctx, result.accountID) - // Also populate captured data if it exists (allows middleware to read after handler completes). + // Populate captured data if it exists (allows middleware to read after handler completes). // This solves the problem of passing data UP the middleware chain: we put a mutable struct // pointer in the context, and mutate the struct here so outer middleware can read it. if capturedData := CapturedDataFromContext(ctx); capturedData != nil { - capturedData.SetServiceId(result.serviceID) - capturedData.SetAccountId(result.accountID) + capturedData.SetServiceID(result.serviceID) + capturedData.SetAccountID(result.accountID) } pt := result.target @@ -96,10 +93,10 @@ func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { } rp := &httputil.ReverseProxy{ - Rewrite: p.rewriteFunc(pt.URL, rewriteMatchedPath, result.passHostHeader, pt.PathRewrite, pt.CustomHeaders), + Rewrite: p.rewriteFunc(pt.URL, rewriteMatchedPath, result.passHostHeader, pt.PathRewrite, pt.CustomHeaders, result.stripAuthHeaders), Transport: p.transport, FlushInterval: -1, - ErrorHandler: proxyErrorHandler, + ErrorHandler: p.proxyErrorHandler, } if result.rewriteRedirects { rp.ModifyResponse = p.rewriteLocationFunc(pt.URL, rewriteMatchedPath, r) //nolint:bodyclose @@ -113,7 +110,7 @@ func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { // When passHostHeader is true, the original client Host header is preserved // instead of being rewritten to the backend's address. // The pathRewrite parameter controls how the request path is transformed. -func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHostHeader bool, pathRewrite PathRewriteMode, customHeaders map[string]string) func(r *httputil.ProxyRequest) { +func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHostHeader bool, pathRewrite PathRewriteMode, customHeaders map[string]string, stripAuthHeaders []string) func(r *httputil.ProxyRequest) { return func(r *httputil.ProxyRequest) { switch pathRewrite { case PathRewritePreserve: @@ -137,6 +134,10 @@ func (p *ReverseProxy) rewriteFunc(target *url.URL, matchedPath string, passHost r.Out.Host = target.Host } + for _, h := range stripAuthHeaders { + r.Out.Header.Del(h) + } + for k, v := range customHeaders { r.Out.Header.Set(k, v) } @@ -305,7 +306,7 @@ func extractForwardedPort(host, resolvedProto string) string { // proxyErrorHandler handles errors from the reverse proxy and serves // user-friendly error pages instead of raw error responses. -func proxyErrorHandler(w http.ResponseWriter, r *http.Request, err error) { +func (p *ReverseProxy) proxyErrorHandler(w http.ResponseWriter, r *http.Request, err error) { if cd := CapturedDataFromContext(r.Context()); cd != nil { cd.SetOrigin(OriginProxyError) } @@ -313,7 +314,7 @@ func proxyErrorHandler(w http.ResponseWriter, r *http.Request, err error) { clientIP := getClientIP(r) title, message, code, status := classifyProxyError(err) - log.Warnf("proxy error: request_id=%s client_ip=%s method=%s host=%s path=%s status=%d title=%q err=%v", + p.logger.Warnf("proxy error: request_id=%s client_ip=%s method=%s host=%s path=%s status=%d title=%q err=%v", requestID, clientIP, r.Method, r.Host, r.URL.Path, code, title, err) web.ServeErrorPage(w, r, code, title, message, requestID, status) diff --git a/proxy/internal/proxy/reverseproxy_test.go b/proxy/internal/proxy/reverseproxy_test.go index b05ead198..c53307837 100644 --- a/proxy/internal/proxy/reverseproxy_test.go +++ b/proxy/internal/proxy/reverseproxy_test.go @@ -28,7 +28,7 @@ func TestRewriteFunc_HostRewriting(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} t.Run("rewrites host to backend by default", func(t *testing.T) { - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "https://public.example.com/path", "203.0.113.1:12345") rewrite(pr) @@ -37,7 +37,7 @@ func TestRewriteFunc_HostRewriting(t *testing.T) { }) t.Run("preserves original host when passHostHeader is true", func(t *testing.T) { - rewrite := p.rewriteFunc(target, "", true, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", true, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "https://public.example.com/path", "203.0.113.1:12345") rewrite(pr) @@ -52,7 +52,7 @@ func TestRewriteFunc_HostRewriting(t *testing.T) { func TestRewriteFunc_XForwardedForStripping(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) t.Run("sets X-Forwarded-For from direct connection IP", func(t *testing.T) { pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") @@ -89,7 +89,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("sets X-Forwarded-Host to original host", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://myapp.example.com:8443/path", "1.2.3.4:5000") rewrite(pr) @@ -99,7 +99,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("sets X-Forwarded-Port from explicit host port", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com:8443/path", "1.2.3.4:5000") rewrite(pr) @@ -109,7 +109,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("defaults X-Forwarded-Port to 443 for https", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") pr.In.TLS = &tls.ConnectionState{} @@ -120,7 +120,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("defaults X-Forwarded-Port to 80 for http", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") rewrite(pr) @@ -130,7 +130,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("auto detects https from TLS", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") pr.In.TLS = &tls.ConnectionState{} @@ -141,7 +141,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("auto detects http without TLS", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") rewrite(pr) @@ -151,7 +151,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("forced proto overrides TLS detection", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "https"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") // No TLS, but forced to https @@ -162,7 +162,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { t.Run("forced http proto", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "http"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "https://example.com/", "1.2.3.4:5000") pr.In.TLS = &tls.ConnectionState{} @@ -175,7 +175,7 @@ func TestRewriteFunc_ForwardedHostAndProto(t *testing.T) { func TestRewriteFunc_SessionCookieStripping(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) t.Run("strips nb_session cookie", func(t *testing.T) { pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") @@ -220,7 +220,7 @@ func TestRewriteFunc_SessionCookieStripping(t *testing.T) { func TestRewriteFunc_SessionTokenQueryStripping(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") p := &ReverseProxy{forwardedProto: "auto"} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) t.Run("strips session_token query parameter", func(t *testing.T) { pr := newProxyRequest(t, "http://example.com/callback?session_token=secret123&other=keep", "1.2.3.4:5000") @@ -248,7 +248,7 @@ func TestRewriteFunc_URLRewriting(t *testing.T) { t.Run("rewrites URL to target with path prefix", func(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080/app") - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/somepath", "1.2.3.4:5000") rewrite(pr) @@ -261,7 +261,7 @@ func TestRewriteFunc_URLRewriting(t *testing.T) { t.Run("strips matched path prefix to avoid duplication", func(t *testing.T) { target, _ := url.Parse("https://backend.example.org:443/app") - rewrite := p.rewriteFunc(target, "/app", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/app", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/app", "1.2.3.4:5000") rewrite(pr) @@ -274,7 +274,7 @@ func TestRewriteFunc_URLRewriting(t *testing.T) { t.Run("strips matched prefix and preserves subpath", func(t *testing.T) { target, _ := url.Parse("https://backend.example.org:443/app") - rewrite := p.rewriteFunc(target, "/app", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/app", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/app/article/123", "1.2.3.4:5000") rewrite(pr) @@ -332,7 +332,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("appends to X-Forwarded-For", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") @@ -344,7 +344,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Real-IP", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") @@ -357,7 +357,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("resolves X-Real-IP from XFF when not set by upstream", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50, 10.0.0.2") @@ -370,7 +370,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Forwarded-Host", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://proxy.internal/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-Host", "original.example.com") @@ -382,7 +382,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Forwarded-Proto", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-Proto", "https") @@ -394,7 +394,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("preserves upstream X-Forwarded-Port", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-Port", "8443") @@ -406,7 +406,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("falls back to local proto when upstream does not set it", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "https", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") @@ -418,7 +418,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("sets X-Forwarded-Host from request when upstream does not set it", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") @@ -429,7 +429,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("untrusted RemoteAddr strips headers even with trusted list", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "203.0.113.50:9999") pr.In.Header.Set("X-Forwarded-For", "10.0.0.1, 172.16.0.1") @@ -454,7 +454,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("empty trusted list behaves as untrusted", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: nil} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") pr.In.Header.Set("X-Forwarded-For", "203.0.113.50") @@ -467,7 +467,7 @@ func TestRewriteFunc_TrustedProxy(t *testing.T) { t.Run("XFF starts fresh when trusted proxy has no upstream XFF", func(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto", trustedProxies: trusted} - rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "10.0.0.1:5000") @@ -490,7 +490,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { t.Run("path prefix baked into target URL is a no-op", func(t *testing.T) { // Management builds: path="/heise", target="https://heise.de:443/heise" target, _ := url.Parse("https://heise.de:443/heise") - rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") rewrite(pr) @@ -501,7 +501,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { t.Run("subpath under prefix also preserved", func(t *testing.T) { target, _ := url.Parse("https://heise.de:443/heise") - rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://external.test/heise/article/123", "1.2.3.4:5000") rewrite(pr) @@ -513,7 +513,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { // What the behavior WOULD be if target URL had no path (true stripping) t.Run("target without path prefix gives true stripping", func(t *testing.T) { target, _ := url.Parse("https://heise.de:443") - rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") rewrite(pr) @@ -524,7 +524,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { t.Run("target without path prefix strips and preserves subpath", func(t *testing.T) { target, _ := url.Parse("https://heise.de:443") - rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/heise", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://external.test/heise/article/123", "1.2.3.4:5000") rewrite(pr) @@ -536,7 +536,7 @@ func TestRewriteFunc_PathForwarding(t *testing.T) { // Root path "/" — no stripping expected t.Run("root path forwards full request path unchanged", func(t *testing.T) { target, _ := url.Parse("https://backend.example.com:443/") - rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://external.test/heise", "1.2.3.4:5000") rewrite(pr) @@ -551,7 +551,7 @@ func TestRewriteFunc_PreservePath(t *testing.T) { target, _ := url.Parse("http://backend.internal:8080") t.Run("preserve keeps full request path", func(t *testing.T) { - rewrite := p.rewriteFunc(target, "/api", false, PathRewritePreserve, nil) + rewrite := p.rewriteFunc(target, "/api", false, PathRewritePreserve, nil, nil) pr := newProxyRequest(t, "http://example.com/api/users/123", "1.2.3.4:5000") rewrite(pr) @@ -561,7 +561,7 @@ func TestRewriteFunc_PreservePath(t *testing.T) { }) t.Run("preserve with root matchedPath", func(t *testing.T) { - rewrite := p.rewriteFunc(target, "/", false, PathRewritePreserve, nil) + rewrite := p.rewriteFunc(target, "/", false, PathRewritePreserve, nil, nil) pr := newProxyRequest(t, "http://example.com/anything", "1.2.3.4:5000") rewrite(pr) @@ -579,7 +579,7 @@ func TestRewriteFunc_CustomHeaders(t *testing.T) { "X-Custom-Auth": "token-abc", "X-Env": "production", } - rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, headers) + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, headers, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") rewrite(pr) @@ -589,7 +589,7 @@ func TestRewriteFunc_CustomHeaders(t *testing.T) { }) t.Run("nil customHeaders is fine", func(t *testing.T) { - rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, nil) + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, nil, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") rewrite(pr) @@ -599,7 +599,7 @@ func TestRewriteFunc_CustomHeaders(t *testing.T) { t.Run("custom headers override existing request headers", func(t *testing.T) { headers := map[string]string{"X-Override": "new-value"} - rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, headers) + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, headers, nil) pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") pr.In.Header.Set("X-Override", "old-value") @@ -609,11 +609,38 @@ func TestRewriteFunc_CustomHeaders(t *testing.T) { }) } +func TestRewriteFunc_StripsAuthorizationHeader(t *testing.T) { + p := &ReverseProxy{forwardedProto: "auto"} + target, _ := url.Parse("http://backend.internal:8080") + + t.Run("strips incoming Authorization when no custom Authorization set", func(t *testing.T) { + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, nil, []string{"Authorization"}) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + pr.In.Header.Set("Authorization", "Bearer proxy-token") + + rewrite(pr) + + assert.Empty(t, pr.Out.Header.Get("Authorization"), "Authorization should be stripped") + }) + + t.Run("custom Authorization replaces incoming", func(t *testing.T) { + headers := map[string]string{"Authorization": "Basic YmFja2VuZDpzZWNyZXQ="} + rewrite := p.rewriteFunc(target, "/", false, PathRewriteDefault, headers, []string{"Authorization"}) + pr := newProxyRequest(t, "http://example.com/", "1.2.3.4:5000") + pr.In.Header.Set("Authorization", "Bearer proxy-token") + + rewrite(pr) + + assert.Equal(t, "Basic YmFja2VuZDpzZWNyZXQ=", pr.Out.Header.Get("Authorization"), + "backend Authorization from custom headers should be set") + }) +} + func TestRewriteFunc_PreservePathWithCustomHeaders(t *testing.T) { p := &ReverseProxy{forwardedProto: "auto"} target, _ := url.Parse("http://backend.internal:8080") - rewrite := p.rewriteFunc(target, "/api", false, PathRewritePreserve, map[string]string{"X-Via": "proxy"}) + rewrite := p.rewriteFunc(target, "/api", false, PathRewritePreserve, map[string]string{"X-Via": "proxy"}, nil) pr := newProxyRequest(t, "http://example.com/api/deep/path", "1.2.3.4:5000") rewrite(pr) diff --git a/proxy/internal/proxy/servicemapping.go b/proxy/internal/proxy/servicemapping.go index 1513fbe45..fe470cf01 100644 --- a/proxy/internal/proxy/servicemapping.go +++ b/proxy/internal/proxy/servicemapping.go @@ -38,6 +38,11 @@ type Mapping struct { Paths map[string]*PathTarget PassHostHeader bool RewriteRedirects bool + // StripAuthHeaders are header names used for header-based auth. + // These headers are stripped from requests before forwarding. + StripAuthHeaders []string + // sortedPaths caches the paths sorted by length (longest first). + sortedPaths []string } type targetResult struct { @@ -47,6 +52,7 @@ type targetResult struct { accountID types.AccountID passHostHeader bool rewriteRedirects bool + stripAuthHeaders []string } func (p *ReverseProxy) findTargetForRequest(req *http.Request) (targetResult, bool) { @@ -65,16 +71,7 @@ func (p *ReverseProxy) findTargetForRequest(req *http.Request) (targetResult, bo return targetResult{}, false } - // Sort paths by length (longest first) in a naive attempt to match the most specific route first. - paths := make([]string, 0, len(m.Paths)) - for path := range m.Paths { - paths = append(paths, path) - } - sort.Slice(paths, func(i, j int) bool { - return len(paths[i]) > len(paths[j]) - }) - - for _, path := range paths { + for _, path := range m.sortedPaths { if strings.HasPrefix(req.URL.Path, path) { pt := m.Paths[path] if pt == nil || pt.URL == nil { @@ -89,6 +86,7 @@ func (p *ReverseProxy) findTargetForRequest(req *http.Request) (targetResult, bo accountID: m.AccountID, passHostHeader: m.PassHostHeader, rewriteRedirects: m.RewriteRedirects, + stripAuthHeaders: m.StripAuthHeaders, }, true } } @@ -96,7 +94,18 @@ func (p *ReverseProxy) findTargetForRequest(req *http.Request) (targetResult, bo return targetResult{}, false } +// AddMapping registers a host-to-backend mapping for the reverse proxy. func (p *ReverseProxy) AddMapping(m Mapping) { + // Sort paths longest-first to match the most specific route first. + paths := make([]string, 0, len(m.Paths)) + for path := range m.Paths { + paths = append(paths, path) + } + sort.Slice(paths, func(i, j int) bool { + return len(paths[i]) > len(paths[j]) + }) + m.sortedPaths = paths + p.mappingsMux.Lock() defer p.mappingsMux.Unlock() p.mappings[m.Host] = m diff --git a/proxy/internal/restrict/restrict.go b/proxy/internal/restrict/restrict.go new file mode 100644 index 000000000..a0d99ce93 --- /dev/null +++ b/proxy/internal/restrict/restrict.go @@ -0,0 +1,183 @@ +// Package restrict provides connection-level access control based on +// IP CIDR ranges and geolocation (country codes). +package restrict + +import ( + "net/netip" + "slices" + "strings" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/internal/geolocation" +) + +// GeoResolver resolves an IP address to geographic information. +type GeoResolver interface { + LookupAddr(addr netip.Addr) geolocation.Result + Available() bool +} + +// Filter evaluates IP restrictions. CIDR checks are performed first +// (cheap), followed by country lookups (more expensive) only when needed. +type Filter struct { + AllowedCIDRs []netip.Prefix + BlockedCIDRs []netip.Prefix + AllowedCountries []string + BlockedCountries []string +} + +// ParseFilter builds a Filter from the raw string slices. Returns nil +// if all slices are empty. +func ParseFilter(allowedCIDRs, blockedCIDRs, allowedCountries, blockedCountries []string) *Filter { + if len(allowedCIDRs) == 0 && len(blockedCIDRs) == 0 && + len(allowedCountries) == 0 && len(blockedCountries) == 0 { + return nil + } + + f := &Filter{ + AllowedCountries: normalizeCountryCodes(allowedCountries), + BlockedCountries: normalizeCountryCodes(blockedCountries), + } + for _, cidr := range allowedCIDRs { + prefix, err := netip.ParsePrefix(cidr) + if err != nil { + log.Warnf("skip invalid allowed CIDR %q: %v", cidr, err) + continue + } + f.AllowedCIDRs = append(f.AllowedCIDRs, prefix.Masked()) + } + for _, cidr := range blockedCIDRs { + prefix, err := netip.ParsePrefix(cidr) + if err != nil { + log.Warnf("skip invalid blocked CIDR %q: %v", cidr, err) + continue + } + f.BlockedCIDRs = append(f.BlockedCIDRs, prefix.Masked()) + } + return f +} + +func normalizeCountryCodes(codes []string) []string { + if len(codes) == 0 { + return nil + } + out := make([]string, len(codes)) + for i, c := range codes { + out[i] = strings.ToUpper(c) + } + return out +} + +// Verdict is the result of an access check. +type Verdict int + +const ( + // Allow indicates the address passed all checks. + Allow Verdict = iota + // DenyCIDR indicates the address was blocked by a CIDR rule. + DenyCIDR + // DenyCountry indicates the address was blocked by a country rule. + DenyCountry + // DenyGeoUnavailable indicates that country restrictions are configured + // but the geo lookup is unavailable. + DenyGeoUnavailable +) + +// String returns the deny reason string matching the HTTP auth mechanism names. +func (v Verdict) String() string { + switch v { + case Allow: + return "allow" + case DenyCIDR: + return "ip_restricted" + case DenyCountry: + return "country_restricted" + case DenyGeoUnavailable: + return "geo_unavailable" + default: + return "unknown" + } +} + +// Check evaluates whether addr is permitted. CIDR rules are evaluated +// first because they are O(n) prefix comparisons. Country rules run +// only when CIDR checks pass and require a geo lookup. +func (f *Filter) Check(addr netip.Addr, geo GeoResolver) Verdict { + if f == nil { + return Allow + } + + // Normalize v4-mapped-v6 (e.g. ::ffff:10.1.2.3) to plain v4 so that + // IPv4 CIDR rules match regardless of how the address was received. + addr = addr.Unmap() + + if v := f.checkCIDR(addr); v != Allow { + return v + } + return f.checkCountry(addr, geo) +} + +func (f *Filter) checkCIDR(addr netip.Addr) Verdict { + if len(f.AllowedCIDRs) > 0 { + allowed := false + for _, prefix := range f.AllowedCIDRs { + if prefix.Contains(addr) { + allowed = true + break + } + } + if !allowed { + return DenyCIDR + } + } + + for _, prefix := range f.BlockedCIDRs { + if prefix.Contains(addr) { + return DenyCIDR + } + } + return Allow +} + +func (f *Filter) checkCountry(addr netip.Addr, geo GeoResolver) Verdict { + if len(f.AllowedCountries) == 0 && len(f.BlockedCountries) == 0 { + return Allow + } + + if geo == nil || !geo.Available() { + return DenyGeoUnavailable + } + + result := geo.LookupAddr(addr) + if result.CountryCode == "" { + // Unknown country: deny if an allowlist is active, allow otherwise. + // Blocklists are best-effort: unknown countries pass through since + // the default policy is allow. + if len(f.AllowedCountries) > 0 { + return DenyCountry + } + return Allow + } + + if len(f.AllowedCountries) > 0 { + if !slices.Contains(f.AllowedCountries, result.CountryCode) { + return DenyCountry + } + } + + if slices.Contains(f.BlockedCountries, result.CountryCode) { + return DenyCountry + } + + return Allow +} + +// HasRestrictions returns true if any restriction rules are configured. +func (f *Filter) HasRestrictions() bool { + if f == nil { + return false + } + return len(f.AllowedCIDRs) > 0 || len(f.BlockedCIDRs) > 0 || + len(f.AllowedCountries) > 0 || len(f.BlockedCountries) > 0 +} diff --git a/proxy/internal/restrict/restrict_test.go b/proxy/internal/restrict/restrict_test.go new file mode 100644 index 000000000..17a5848d8 --- /dev/null +++ b/proxy/internal/restrict/restrict_test.go @@ -0,0 +1,278 @@ +package restrict + +import ( + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/proxy/internal/geolocation" +) + +type mockGeo struct { + countries map[string]string +} + +func (m *mockGeo) LookupAddr(addr netip.Addr) geolocation.Result { + return geolocation.Result{CountryCode: m.countries[addr.String()]} +} + +func (m *mockGeo) Available() bool { return true } + +func newMockGeo(entries map[string]string) *mockGeo { + return &mockGeo{countries: entries} +} + +func TestFilter_Check_NilFilter(t *testing.T) { + var f *Filter + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) +} + +func TestFilter_Check_AllowedCIDR(t *testing.T) { + f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil)) + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("192.168.1.1"), nil)) +} + +func TestFilter_Check_BlockedCIDR(t *testing.T) { + f := ParseFilter(nil, []string{"10.0.0.0/8"}, nil, nil) + + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("10.1.2.3"), nil)) + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("192.168.1.1"), nil)) +} + +func TestFilter_Check_AllowedAndBlockedCIDR(t *testing.T) { + f := ParseFilter([]string{"10.0.0.0/8"}, []string{"10.1.0.0/16"}, nil, nil) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.2.3.4"), nil), "allowed by allowlist, not in blocklist") + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("10.1.2.3"), nil), "allowed by allowlist but in blocklist") + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("192.168.1.1"), nil), "not in allowlist") +} + +func TestFilter_Check_AllowedCountry(t *testing.T) { + geo := newMockGeo(map[string]string{ + "1.1.1.1": "US", + "2.2.2.2": "DE", + "3.3.3.3": "CN", + }) + f := ParseFilter(nil, nil, []string{"US", "DE"}, nil) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "US in allowlist") + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "DE in allowlist") + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("3.3.3.3"), geo), "CN not in allowlist") +} + +func TestFilter_Check_BlockedCountry(t *testing.T) { + geo := newMockGeo(map[string]string{ + "1.1.1.1": "CN", + "2.2.2.2": "RU", + "3.3.3.3": "US", + }) + f := ParseFilter(nil, nil, nil, []string{"CN", "RU"}) + + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "CN in blocklist") + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "RU in blocklist") + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("3.3.3.3"), geo), "US not in blocklist") +} + +func TestFilter_Check_AllowedAndBlockedCountry(t *testing.T) { + geo := newMockGeo(map[string]string{ + "1.1.1.1": "US", + "2.2.2.2": "DE", + "3.3.3.3": "CN", + }) + // Allow US and DE, but block DE explicitly. + f := ParseFilter(nil, nil, []string{"US", "DE"}, []string{"DE"}) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "US allowed and not blocked") + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "DE allowed but also blocked, block wins") + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("3.3.3.3"), geo), "CN not in allowlist") +} + +func TestFilter_Check_UnknownCountryWithAllowlist(t *testing.T) { + geo := newMockGeo(map[string]string{ + "1.1.1.1": "US", + }) + f := ParseFilter(nil, nil, []string{"US"}, nil) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "known US in allowlist") + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("9.9.9.9"), geo), "unknown country denied when allowlist is active") +} + +func TestFilter_Check_UnknownCountryWithBlocklistOnly(t *testing.T) { + geo := newMockGeo(map[string]string{ + "1.1.1.1": "CN", + }) + f := ParseFilter(nil, nil, nil, []string{"CN"}) + + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "known CN in blocklist") + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("9.9.9.9"), geo), "unknown country allowed when only blocklist is active") +} + +func TestFilter_Check_CountryWithoutGeo(t *testing.T) { + f := ParseFilter(nil, nil, []string{"US"}, nil) + assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil), "nil geo with country allowlist") +} + +func TestFilter_Check_CountryBlocklistWithoutGeo(t *testing.T) { + f := ParseFilter(nil, nil, nil, []string{"CN"}) + assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil), "nil geo with country blocklist") +} + +func TestFilter_Check_GeoUnavailable(t *testing.T) { + geo := &unavailableGeo{} + + f := ParseFilter(nil, nil, []string{"US"}, nil) + assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), geo), "unavailable geo with country allowlist") + + f2 := ParseFilter(nil, nil, nil, []string{"CN"}) + assert.Equal(t, DenyGeoUnavailable, f2.Check(netip.MustParseAddr("1.2.3.4"), geo), "unavailable geo with country blocklist") +} + +func TestFilter_Check_CIDROnlySkipsGeo(t *testing.T) { + f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + + // CIDR-only filter should never touch geo, so nil geo is fine. + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil)) + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("192.168.1.1"), nil)) +} + +func TestFilter_Check_CIDRAllowThenCountryBlock(t *testing.T) { + geo := newMockGeo(map[string]string{ + "10.1.2.3": "CN", + "10.2.3.4": "US", + }) + f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, []string{"CN"}) + + assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("10.1.2.3"), geo), "CIDR allowed but country blocked") + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.2.3.4"), geo), "CIDR allowed and country not blocked") + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("192.168.1.1"), geo), "CIDR denied before country check") +} + +func TestParseFilter_Empty(t *testing.T) { + f := ParseFilter(nil, nil, nil, nil) + assert.Nil(t, f) +} + +func TestParseFilter_InvalidCIDR(t *testing.T) { + f := ParseFilter([]string{"invalid", "10.0.0.0/8"}, nil, nil, nil) + + assert.NotNil(t, f) + assert.Len(t, f.AllowedCIDRs, 1, "invalid CIDR should be skipped") + assert.Equal(t, netip.MustParsePrefix("10.0.0.0/8"), f.AllowedCIDRs[0]) +} + +func TestFilter_HasRestrictions(t *testing.T) { + assert.False(t, (*Filter)(nil).HasRestrictions()) + assert.False(t, (&Filter{}).HasRestrictions()) + assert.True(t, ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil).HasRestrictions()) + assert.True(t, ParseFilter(nil, nil, []string{"US"}, nil).HasRestrictions()) +} + +func TestFilter_Check_IPv6CIDR(t *testing.T) { + f := ParseFilter([]string{"2001:db8::/32"}, nil, nil, nil) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2001:db8::1"), nil), "v6 addr in v6 allowlist") + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("2001:db9::1"), nil), "v6 addr not in v6 allowlist") + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("10.1.2.3"), nil), "v4 addr not in v6 allowlist") +} + +func TestFilter_Check_IPv4MappedIPv6(t *testing.T) { + f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + + // A v4-mapped-v6 address like ::ffff:10.1.2.3 must match a v4 CIDR. + v4mapped := netip.MustParseAddr("::ffff:10.1.2.3") + assert.True(t, v4mapped.Is4In6(), "precondition: address is v4-in-v6") + assert.Equal(t, Allow, f.Check(v4mapped, nil), "v4-mapped-v6 must match v4 CIDR after Unmap") + + v4mappedOutside := netip.MustParseAddr("::ffff:192.168.1.1") + assert.Equal(t, DenyCIDR, f.Check(v4mappedOutside, nil), "v4-mapped-v6 outside v4 CIDR") +} + +func TestFilter_Check_MixedV4V6CIDRs(t *testing.T) { + f := ParseFilter([]string{"10.0.0.0/8", "2001:db8::/32"}, nil, nil, nil) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil), "v4 in v4 CIDR") + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2001:db8::1"), nil), "v6 in v6 CIDR") + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("::ffff:10.1.2.3"), nil), "v4-mapped matches v4 CIDR") + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("192.168.1.1"), nil), "v4 not in either CIDR") + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("fe80::1"), nil), "v6 not in either CIDR") +} + +func TestParseFilter_CanonicalizesNonMaskedCIDR(t *testing.T) { + // 1.1.1.1/24 has host bits set; ParseFilter should canonicalize to 1.1.1.0/24. + f := ParseFilter([]string{"1.1.1.1/24"}, nil, nil, nil) + assert.Equal(t, netip.MustParsePrefix("1.1.1.0/24"), f.AllowedCIDRs[0]) + + // Verify it still matches correctly. + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.100"), nil)) + assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("1.1.2.1"), nil)) +} + +func TestFilter_Check_CountryCodeCaseInsensitive(t *testing.T) { + geo := newMockGeo(map[string]string{ + "1.1.1.1": "US", + "2.2.2.2": "DE", + "3.3.3.3": "CN", + }) + + tests := []struct { + name string + allowedCountries []string + blockedCountries []string + addr string + want Verdict + }{ + { + name: "lowercase allowlist matches uppercase MaxMind code", + allowedCountries: []string{"us", "de"}, + addr: "1.1.1.1", + want: Allow, + }, + { + name: "mixed-case allowlist matches", + allowedCountries: []string{"Us", "dE"}, + addr: "2.2.2.2", + want: Allow, + }, + { + name: "lowercase allowlist rejects non-matching country", + allowedCountries: []string{"us", "de"}, + addr: "3.3.3.3", + want: DenyCountry, + }, + { + name: "lowercase blocklist blocks matching country", + blockedCountries: []string{"cn"}, + addr: "3.3.3.3", + want: DenyCountry, + }, + { + name: "mixed-case blocklist blocks matching country", + blockedCountries: []string{"Cn"}, + addr: "3.3.3.3", + want: DenyCountry, + }, + { + name: "lowercase blocklist does not block non-matching country", + blockedCountries: []string{"cn"}, + addr: "1.1.1.1", + want: Allow, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + f := ParseFilter(nil, nil, tc.allowedCountries, tc.blockedCountries) + got := f.Check(netip.MustParseAddr(tc.addr), geo) + assert.Equal(t, tc.want, got) + }) + } +} + +// unavailableGeo simulates a GeoResolver whose database is not loaded. +type unavailableGeo struct{} + +func (u *unavailableGeo) LookupAddr(_ netip.Addr) geolocation.Result { return geolocation.Result{} } +func (u *unavailableGeo) Available() bool { return false } diff --git a/proxy/internal/tcp/router.go b/proxy/internal/tcp/router.go index 84fde0731..8255c36d3 100644 --- a/proxy/internal/tcp/router.go +++ b/proxy/internal/tcp/router.go @@ -7,12 +7,14 @@ import ( "net" "net/netip" "slices" + "strings" "sync" "time" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/proxy/internal/accesslog" + "github.com/netbirdio/netbird/proxy/internal/restrict" "github.com/netbirdio/netbird/proxy/internal/types" ) @@ -20,6 +22,10 @@ import ( // timeout is configured. const defaultDialTimeout = 30 * time.Second +// errAccessRestricted is returned by relayTCP for access restriction +// denials so callers can skip warn-level logging (already logged at debug). +var errAccessRestricted = errors.New("rejected by access restrictions") + // SNIHost is a typed key for SNI hostname lookups. type SNIHost string @@ -64,6 +70,11 @@ type Route struct { // DialTimeout overrides the default dial timeout for this route. // Zero uses defaultDialTimeout. DialTimeout time.Duration + // SessionIdleTimeout overrides the default idle timeout for relay connections. + // Zero uses DefaultIdleTimeout. + SessionIdleTimeout time.Duration + // Filter holds connection-level IP/geo restrictions. Nil means no restrictions. + Filter *restrict.Filter } // l4Logger sends layer-4 access log entries to the management server. @@ -99,6 +110,7 @@ type Router struct { drainDone chan struct{} observer RelayObserver accessLog l4Logger + geo restrict.GeoResolver // svcCtxs tracks a context per service ID. All relay goroutines for a // service derive from its context; canceling it kills them immediately. svcCtxs map[types.ServiceID]context.Context @@ -144,6 +156,7 @@ func (r *Router) HTTPListener() net.Listener { // stored and resolved by priority at lookup time (HTTP > TCP). // Empty host is ignored to prevent conflicts with ECH/ESNI fallback. func (r *Router) AddRoute(host SNIHost, route Route) { + host = SNIHost(strings.ToLower(string(host))) if host == "" { return } @@ -166,6 +179,8 @@ func (r *Router) AddRoute(host SNIHost, route Route) { // Active relay connections for the service are closed immediately. // If other routes remain for the host, they are preserved. func (r *Router) RemoveRoute(host SNIHost, svcID types.ServiceID) { + host = SNIHost(strings.ToLower(string(host))) + r.mu.Lock() defer r.mu.Unlock() @@ -295,7 +310,7 @@ func (r *Router) handleConn(ctx context.Context, conn net.Conn) { return } - host := SNIHost(sni) + host := SNIHost(strings.ToLower(sni)) route, ok := r.lookupRoute(host) if !ok { r.handleUnmatched(ctx, wrapped) @@ -308,11 +323,13 @@ func (r *Router) handleConn(ctx context.Context, conn net.Conn) { } if err := r.relayTCP(ctx, wrapped, host, route); err != nil { - r.logger.WithFields(log.Fields{ - "sni": host, - "service_id": route.ServiceID, - "target": route.Target, - }).Warnf("TCP relay: %v", err) + if !errors.Is(err, errAccessRestricted) { + r.logger.WithFields(log.Fields{ + "sni": host, + "service_id": route.ServiceID, + "target": route.Target, + }).Warnf("TCP relay: %v", err) + } _ = wrapped.Close() } } @@ -336,10 +353,12 @@ func (r *Router) handleUnmatched(ctx context.Context, conn net.Conn) { if fb != nil { if err := r.relayTCP(ctx, conn, SNIHost("fallback"), *fb); err != nil { - r.logger.WithFields(log.Fields{ - "service_id": fb.ServiceID, - "target": fb.Target, - }).Warnf("TCP relay (fallback): %v", err) + if !errors.Is(err, errAccessRestricted) { + r.logger.WithFields(log.Fields{ + "service_id": fb.ServiceID, + "target": fb.Target, + }).Warnf("TCP relay (fallback): %v", err) + } _ = conn.Close() } return @@ -427,10 +446,44 @@ func (r *Router) cancelServiceLocked(svcID types.ServiceID) { } } +// SetGeo sets the geolocation lookup used for country-based restrictions. +func (r *Router) SetGeo(geo restrict.GeoResolver) { + r.mu.Lock() + defer r.mu.Unlock() + r.geo = geo +} + +// checkRestrictions evaluates the route's access filter against the +// connection's remote address. Returns Allow if the connection is +// permitted, or a deny verdict indicating the reason. +func (r *Router) checkRestrictions(conn net.Conn, route Route) restrict.Verdict { + if route.Filter == nil { + return restrict.Allow + } + + addr, err := addrFromConn(conn) + if err != nil { + r.logger.Debugf("cannot parse client address %s for restriction check, denying", conn.RemoteAddr()) + return restrict.DenyCIDR + } + + r.mu.RLock() + geo := r.geo + r.mu.RUnlock() + + return route.Filter.Check(addr, geo) +} + // relayTCP sets up and runs a bidirectional TCP relay. // The caller owns conn and must close it if this method returns an error. // On success (nil error), both conn and backend are closed by the relay. func (r *Router) relayTCP(ctx context.Context, conn net.Conn, sni SNIHost, route Route) error { + if verdict := r.checkRestrictions(conn, route); verdict != restrict.Allow { + r.logger.Debugf("connection from %s rejected by access restrictions: %s", conn.RemoteAddr(), verdict) + r.logL4Deny(route, conn, verdict) + return errAccessRestricted + } + svcCtx, err := r.acquireRelay(ctx, route) if err != nil { return err @@ -468,8 +521,13 @@ func (r *Router) relayTCP(ctx context.Context, conn net.Conn, sni SNIHost, route }) entry.Debug("TCP relay started") + idleTimeout := route.SessionIdleTimeout + if idleTimeout <= 0 { + idleTimeout = DefaultIdleTimeout + } + start := time.Now() - s2d, d2s := Relay(svcCtx, entry, conn, backend, DefaultIdleTimeout) + s2d, d2s := Relay(svcCtx, entry, conn, backend, idleTimeout) elapsed := time.Since(start) if obs != nil { @@ -537,12 +595,7 @@ func (r *Router) logL4Entry(route Route, conn net.Conn, duration time.Duration, return } - var sourceIP netip.Addr - if remote := conn.RemoteAddr(); remote != nil { - if ap, err := netip.ParseAddrPort(remote.String()); err == nil { - sourceIP = ap.Addr().Unmap() - } - } + sourceIP, _ := addrFromConn(conn) al.LogL4(accesslog.L4Entry{ AccountID: route.AccountID, @@ -556,6 +609,28 @@ func (r *Router) logL4Entry(route Route, conn net.Conn, duration time.Duration, }) } +// logL4Deny sends an access log entry for a denied connection. +func (r *Router) logL4Deny(route Route, conn net.Conn, verdict restrict.Verdict) { + r.mu.RLock() + al := r.accessLog + r.mu.RUnlock() + + if al == nil { + return + } + + sourceIP, _ := addrFromConn(conn) + + al.LogL4(accesslog.L4Entry{ + AccountID: route.AccountID, + ServiceID: route.ServiceID, + Protocol: route.Protocol, + Host: route.Domain, + SourceIP: sourceIP, + DenyReason: verdict.String(), + }) +} + // getOrCreateServiceCtxLocked returns the context for a service, creating one // if it doesn't exist yet. The context is a child of the server context. // Must be called with mu held. @@ -568,3 +643,16 @@ func (r *Router) getOrCreateServiceCtxLocked(parent context.Context, svcID types r.svcCancels[svcID] = cancel return ctx } + +// addrFromConn extracts a netip.Addr from a connection's remote address. +func addrFromConn(conn net.Conn) (netip.Addr, error) { + remote := conn.RemoteAddr() + if remote == nil { + return netip.Addr{}, errors.New("no remote address") + } + ap, err := netip.ParseAddrPort(remote.String()) + if err != nil { + return netip.Addr{}, err + } + return ap.Addr().Unmap(), nil +} diff --git a/proxy/internal/tcp/router_test.go b/proxy/internal/tcp/router_test.go index 0e2cfe3e1..189cdc622 100644 --- a/proxy/internal/tcp/router_test.go +++ b/proxy/internal/tcp/router_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/proxy/internal/restrict" "github.com/netbirdio/netbird/proxy/internal/types" ) @@ -1668,3 +1669,73 @@ func startEchoPlain(t *testing.T) net.Listener { return ln } + +// fakeAddr implements net.Addr with a custom string representation. +type fakeAddr string + +func (f fakeAddr) Network() string { return "tcp" } +func (f fakeAddr) String() string { return string(f) } + +// fakeConn is a minimal net.Conn with a controllable RemoteAddr. +type fakeConn struct { + net.Conn + remote net.Addr +} + +func (f *fakeConn) RemoteAddr() net.Addr { return f.remote } + +func TestCheckRestrictions_UnparseableAddress(t *testing.T) { + router := NewPortRouter(log.StandardLogger(), nil) + filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + route := Route{Filter: filter} + + conn := &fakeConn{remote: fakeAddr("not-an-ip")} + assert.NotEqual(t, restrict.Allow, router.checkRestrictions(conn, route), "unparsable address must be denied") +} + +func TestCheckRestrictions_NilRemoteAddr(t *testing.T) { + router := NewPortRouter(log.StandardLogger(), nil) + filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + route := Route{Filter: filter} + + conn := &fakeConn{remote: nil} + assert.NotEqual(t, restrict.Allow, router.checkRestrictions(conn, route), "nil remote address must be denied") +} + +func TestCheckRestrictions_AllowedAndDenied(t *testing.T) { + router := NewPortRouter(log.StandardLogger(), nil) + filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + route := Route{Filter: filter} + + allowed := &fakeConn{remote: &net.TCPAddr{IP: net.IPv4(10, 1, 2, 3), Port: 1234}} + assert.Equal(t, restrict.Allow, router.checkRestrictions(allowed, route), "10.1.2.3 in allowlist") + + denied := &fakeConn{remote: &net.TCPAddr{IP: net.IPv4(192, 168, 1, 1), Port: 1234}} + assert.NotEqual(t, restrict.Allow, router.checkRestrictions(denied, route), "192.168.1.1 not in allowlist") +} + +func TestCheckRestrictions_NilFilter(t *testing.T) { + router := NewPortRouter(log.StandardLogger(), nil) + route := Route{Filter: nil} + + conn := &fakeConn{remote: fakeAddr("not-an-ip")} + assert.Equal(t, restrict.Allow, router.checkRestrictions(conn, route), "nil filter should allow everything") +} + +func TestCheckRestrictions_IPv4MappedIPv6(t *testing.T) { + router := NewPortRouter(log.StandardLogger(), nil) + filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + route := Route{Filter: filter} + + // net.IPv4() returns a 16-byte v4-in-v6 representation internally. + // The restriction check must Unmap it to match the v4 CIDR. + conn := &fakeConn{remote: &net.TCPAddr{IP: net.IPv4(10, 1, 2, 3), Port: 5678}} + assert.Equal(t, restrict.Allow, router.checkRestrictions(conn, route), "v4-in-v6 TCPAddr must match v4 CIDR") + + // Explicitly v4-mapped-v6 address string. + conn6 := &fakeConn{remote: fakeAddr("[::ffff:10.1.2.3]:5678")} + assert.Equal(t, restrict.Allow, router.checkRestrictions(conn6, route), "::ffff:10.1.2.3 must match v4 CIDR") + + connOutside := &fakeConn{remote: fakeAddr("[::ffff:192.168.1.1]:5678")} + assert.NotEqual(t, restrict.Allow, router.checkRestrictions(connOutside, route), "::ffff:192.168.1.1 not in v4 CIDR") +} diff --git a/proxy/internal/udp/relay.go b/proxy/internal/udp/relay.go index f2f58e858..d20ecf48b 100644 --- a/proxy/internal/udp/relay.go +++ b/proxy/internal/udp/relay.go @@ -15,6 +15,7 @@ import ( "github.com/netbirdio/netbird/proxy/internal/accesslog" "github.com/netbirdio/netbird/proxy/internal/netutil" + "github.com/netbirdio/netbird/proxy/internal/restrict" "github.com/netbirdio/netbird/proxy/internal/types" ) @@ -67,6 +68,8 @@ type Relay struct { dialTimeout time.Duration sessionTTL time.Duration maxSessions int + filter *restrict.Filter + geo restrict.GeoResolver mu sync.RWMutex sessions map[clientAddr]*session @@ -114,6 +117,10 @@ type RelayConfig struct { SessionTTL time.Duration MaxSessions int AccessLog l4Logger + // Filter holds connection-level IP/geo restrictions. Nil means no restrictions. + Filter *restrict.Filter + // Geo is the geolocation lookup used for country-based restrictions. + Geo restrict.GeoResolver } // New creates a UDP relay for the given listener and backend target. @@ -146,6 +153,8 @@ func New(parentCtx context.Context, cfg RelayConfig) *Relay { dialTimeout: dialTimeout, sessionTTL: sessionTTL, maxSessions: maxSessions, + filter: cfg.Filter, + geo: cfg.Geo, sessions: make(map[clientAddr]*session), bufPool: sync.Pool{ New: func() any { @@ -166,9 +175,18 @@ func (r *Relay) ServiceID() types.ServiceID { // SetObserver sets the session lifecycle observer. Must be called before Serve. func (r *Relay) SetObserver(obs SessionObserver) { + r.mu.Lock() + defer r.mu.Unlock() r.observer = obs } +// getObserver returns the current session lifecycle observer. +func (r *Relay) getObserver() SessionObserver { + r.mu.RLock() + defer r.mu.RUnlock() + return r.observer +} + // Serve starts the relay loop. It blocks until the context is canceled // or the listener is closed. func (r *Relay) Serve() { @@ -209,8 +227,8 @@ func (r *Relay) Serve() { } sess.bytesIn.Add(int64(nw)) - if r.observer != nil { - r.observer.UDPPacketRelayed(types.RelayDirectionClientToBackend, nw) + if obs := r.getObserver(); obs != nil { + obs.UDPPacketRelayed(types.RelayDirectionClientToBackend, nw) } r.bufPool.Put(bufp) } @@ -234,6 +252,10 @@ func (r *Relay) getOrCreateSession(addr net.Addr) (*session, error) { return nil, r.ctx.Err() } + if err := r.checkAccessRestrictions(addr); err != nil { + return nil, err + } + r.mu.Lock() if sess, ok = r.sessions[key]; ok && sess != nil { @@ -248,16 +270,16 @@ func (r *Relay) getOrCreateSession(addr net.Addr) (*session, error) { if len(r.sessions) >= r.maxSessions { r.mu.Unlock() - if r.observer != nil { - r.observer.UDPSessionRejected(r.accountID) + if obs := r.getObserver(); obs != nil { + obs.UDPSessionRejected(r.accountID) } return nil, fmt.Errorf("session limit reached (%d)", r.maxSessions) } if !r.sessLimiter.Allow() { r.mu.Unlock() - if r.observer != nil { - r.observer.UDPSessionRejected(r.accountID) + if obs := r.getObserver(); obs != nil { + obs.UDPSessionRejected(r.accountID) } return nil, fmt.Errorf("session creation rate limited") } @@ -274,8 +296,8 @@ func (r *Relay) getOrCreateSession(addr net.Addr) (*session, error) { r.mu.Lock() delete(r.sessions, key) r.mu.Unlock() - if r.observer != nil { - r.observer.UDPSessionDialError(r.accountID) + if obs := r.getObserver(); obs != nil { + obs.UDPSessionDialError(r.accountID) } return nil, fmt.Errorf("dial backend %s: %w", r.target, err) } @@ -293,8 +315,8 @@ func (r *Relay) getOrCreateSession(addr net.Addr) (*session, error) { r.sessions[key] = sess r.mu.Unlock() - if r.observer != nil { - r.observer.UDPSessionStarted(r.accountID) + if obs := r.getObserver(); obs != nil { + obs.UDPSessionStarted(r.accountID) } r.sessWg.Go(func() { @@ -305,6 +327,21 @@ func (r *Relay) getOrCreateSession(addr net.Addr) (*session, error) { return sess, nil } +func (r *Relay) checkAccessRestrictions(addr net.Addr) error { + if r.filter == nil { + return nil + } + clientIP, err := addrFromUDPAddr(addr) + if err != nil { + return fmt.Errorf("parse client address %s for restriction check: %w", addr, err) + } + if v := r.filter.Check(clientIP, r.geo); v != restrict.Allow { + r.logDeny(clientIP, v) + return fmt.Errorf("access restricted for %s", addr) + } + return nil +} + // relayBackendToClient reads packets from the backend and writes them // back to the client through the public-facing listener. func (r *Relay) relayBackendToClient(ctx context.Context, sess *session) { @@ -332,8 +369,8 @@ func (r *Relay) relayBackendToClient(ctx context.Context, sess *session) { } sess.bytesOut.Add(int64(nw)) - if r.observer != nil { - r.observer.UDPPacketRelayed(types.RelayDirectionBackendToClient, nw) + if obs := r.getObserver(); obs != nil { + obs.UDPPacketRelayed(types.RelayDirectionBackendToClient, nw) } } } @@ -402,9 +439,10 @@ func (r *Relay) cleanupIdleSessions() { } r.mu.Unlock() + obs := r.getObserver() for _, sess := range expired { - if r.observer != nil { - r.observer.UDPSessionEnded(r.accountID) + if obs != nil { + obs.UDPSessionEnded(r.accountID) } r.logSessionEnd(sess) } @@ -429,8 +467,8 @@ func (r *Relay) removeSession(sess *session) { if removed { r.logger.Debugf("UDP session %s ended (client→backend: %d bytes, backend→client: %d bytes)", sess.addr, sess.bytesIn.Load(), sess.bytesOut.Load()) - if r.observer != nil { - r.observer.UDPSessionEnded(r.accountID) + if obs := r.getObserver(); obs != nil { + obs.UDPSessionEnded(r.accountID) } r.logSessionEnd(sess) } @@ -459,6 +497,22 @@ func (r *Relay) logSessionEnd(sess *session) { }) } +// logDeny sends an access log entry for a denied UDP packet. +func (r *Relay) logDeny(clientIP netip.Addr, verdict restrict.Verdict) { + if r.accessLog == nil { + return + } + + r.accessLog.LogL4(accesslog.L4Entry{ + AccountID: r.accountID, + ServiceID: r.serviceID, + Protocol: accesslog.ProtocolUDP, + Host: r.domain, + SourceIP: clientIP, + DenyReason: verdict.String(), + }) +} + // Close stops the relay, waits for all session goroutines to exit, // and cleans up remaining sessions. func (r *Relay) Close() { @@ -485,12 +539,22 @@ func (r *Relay) Close() { } r.mu.Unlock() + obs := r.getObserver() for _, sess := range closedSessions { - if r.observer != nil { - r.observer.UDPSessionEnded(r.accountID) + if obs != nil { + obs.UDPSessionEnded(r.accountID) } r.logSessionEnd(sess) } r.sessWg.Wait() } + +// addrFromUDPAddr extracts a netip.Addr from a net.Addr. +func addrFromUDPAddr(addr net.Addr) (netip.Addr, error) { + ap, err := netip.ParseAddrPort(addr.String()) + if err != nil { + return netip.Addr{}, err + } + return ap.Addr().Unmap(), nil +} diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index ebecfc6f6..8af151446 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -490,7 +490,7 @@ func TestIntegration_ProxyConnection_ReconnectDoesNotDuplicateState(t *testing.T logger := log.New() logger.SetLevel(log.WarnLevel) - authMw := auth.NewMiddleware(logger, nil) + authMw := auth.NewMiddleware(logger, nil, nil) proxyHandler := proxy.NewReverseProxy(nil, "auto", nil, logger) clusterAddress := "test.proxy.io" @@ -511,6 +511,7 @@ func TestIntegration_ProxyConnection_ReconnectDoesNotDuplicateState(t *testing.T 0, proxytypes.AccountID(mapping.GetAccountId()), proxytypes.ServiceID(mapping.GetId()), + nil, ) require.NoError(t, err) diff --git a/proxy/server.go b/proxy/server.go index 649d49c9a..c4d12859b 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -43,12 +43,14 @@ import ( "github.com/netbirdio/netbird/proxy/internal/certwatch" "github.com/netbirdio/netbird/proxy/internal/conntrack" "github.com/netbirdio/netbird/proxy/internal/debug" + "github.com/netbirdio/netbird/proxy/internal/geolocation" proxygrpc "github.com/netbirdio/netbird/proxy/internal/grpc" "github.com/netbirdio/netbird/proxy/internal/health" "github.com/netbirdio/netbird/proxy/internal/k8s" proxymetrics "github.com/netbirdio/netbird/proxy/internal/metrics" "github.com/netbirdio/netbird/proxy/internal/netutil" "github.com/netbirdio/netbird/proxy/internal/proxy" + "github.com/netbirdio/netbird/proxy/internal/restrict" "github.com/netbirdio/netbird/proxy/internal/roundtrip" nbtcp "github.com/netbirdio/netbird/proxy/internal/tcp" "github.com/netbirdio/netbird/proxy/internal/types" @@ -59,7 +61,6 @@ import ( "github.com/netbirdio/netbird/util/embeddedroots" ) - // portRouter bundles a per-port Router with its listener and cancel func. type portRouter struct { router *nbtcp.Router @@ -95,6 +96,9 @@ type Server struct { // so they can be closed during graceful shutdown, since http.Server.Shutdown // does not handle them. hijackTracker conntrack.HijackTracker + // geo resolves IP addresses to country/city for access restrictions and access logs. + geo restrict.GeoResolver + geoRaw *geolocation.Lookup // routerReady is closed once mainRouter is fully initialized. // The mapping worker waits on this before processing updates. @@ -159,10 +163,38 @@ type Server struct { // SupportsCustomPorts indicates whether the proxy can bind arbitrary // ports for TCP/UDP/TLS services. SupportsCustomPorts bool - // DefaultDialTimeout is the default timeout for establishing backend - // connections when no per-service timeout is configured. Zero means - // each transport uses its own hardcoded default (typically 30s). - DefaultDialTimeout time.Duration + // MaxDialTimeout caps the per-service backend dial timeout. + // When the API sends a timeout, it is clamped to this value. + // When the API sends no timeout, this value is used as the default. + // Zero means no cap (the proxy honors whatever management sends). + MaxDialTimeout time.Duration + // GeoDataDir is the directory containing GeoLite2 MMDB files for + // country-based access restrictions. Empty disables geo lookups. + GeoDataDir string + // MaxSessionIdleTimeout caps the per-service session idle timeout. + // Zero means no cap (the proxy honors whatever management sends). + // Set via NB_PROXY_MAX_SESSION_IDLE_TIMEOUT for shared deployments. + MaxSessionIdleTimeout time.Duration +} + +// clampIdleTimeout returns d capped to MaxSessionIdleTimeout when configured. +func (s *Server) clampIdleTimeout(d time.Duration) time.Duration { + if s.MaxSessionIdleTimeout > 0 && d > s.MaxSessionIdleTimeout { + return s.MaxSessionIdleTimeout + } + return d +} + +// clampDialTimeout returns d capped to MaxDialTimeout when configured. +// If d is zero, MaxDialTimeout is used as the default. +func (s *Server) clampDialTimeout(d time.Duration) time.Duration { + if s.MaxDialTimeout <= 0 { + return d + } + if d <= 0 || d > s.MaxDialTimeout { + return s.MaxDialTimeout + } + return d } // NotifyStatus sends a status update to management about tunnel connectivity. @@ -226,7 +258,6 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { s.mgmtClient = proto.NewProxyServiceClient(mgmtConn) runCtx, runCancel := context.WithCancel(ctx) defer runCancel() - go s.newManagementMappingWorker(runCtx, s.mgmtClient) // Initialize the netbird client, this is required to build peer connections // to proxy over. @@ -236,6 +267,12 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { PreSharedKey: s.PreSharedKey, }, s.Logger, s, s.mgmtClient) + // Create health checker before the mapping worker so it can track + // management connectivity from the first stream connection. + s.healthChecker = health.NewChecker(s.Logger, s.netbird) + + go s.newManagementMappingWorker(runCtx, s.mgmtClient) + tlsConfig, err := s.configureTLS(ctx) if err != nil { return err @@ -244,14 +281,33 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { // Configure the reverse proxy using NetBird's HTTP Client Transport for proxying. s.proxy = proxy.NewReverseProxy(s.meter.RoundTripper(s.netbird), s.ForwardedProto, s.TrustedProxies, s.Logger) + geoLookup, err := geolocation.NewLookup(s.Logger, s.GeoDataDir) + if err != nil { + return fmt.Errorf("initialize geolocation: %w", err) + } + s.geoRaw = geoLookup + if geoLookup != nil { + s.geo = geoLookup + } + + var startupOK bool + defer func() { + if startupOK { + return + } + if s.geoRaw != nil { + if err := s.geoRaw.Close(); err != nil { + s.Logger.Debugf("close geolocation on startup failure: %v", err) + } + } + }() + // Configure the authentication middleware with session validator for OIDC group checks. - s.auth = auth.NewMiddleware(s.Logger, s.mgmtClient) + s.auth = auth.NewMiddleware(s.Logger, s.mgmtClient, s.geo) // Configure Access logs to management server. s.accessLog = accesslog.NewLogger(s.mgmtClient, s.Logger, s.TrustedProxies) - s.healthChecker = health.NewChecker(s.Logger, s.netbird) - s.startDebugEndpoint() if err := s.startHealthServer(); err != nil { @@ -294,6 +350,8 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { ErrorLog: newHTTPServerLogger(s.Logger, logtagValueHTTPS), } + startupOK = true + httpsErr := make(chan error, 1) go func() { s.Logger.Debug("starting HTTPS server on SNI router HTTP channel") @@ -691,6 +749,16 @@ func (s *Server) shutdownServices() { s.portRouterWg.Wait() wg.Wait() + + if s.accessLog != nil { + s.accessLog.Close() + } + + if s.geoRaw != nil { + if err := s.geoRaw.Close(); err != nil { + s.Logger.Debugf("close geolocation: %v", err) + } + } } // resolveDialFunc returns a DialContextFunc that dials through the @@ -1073,15 +1141,20 @@ func (s *Server) setupTCPMapping(ctx context.Context, mapping *proto.ProxyMappin return fmt.Errorf("router for TCP port %d: %w", port, err) } + s.warnIfGeoUnavailable(mapping.GetDomain(), mapping.GetAccessRestrictions()) + + router.SetGeo(s.geo) router.SetFallback(nbtcp.Route{ - Type: nbtcp.RouteTCP, - AccountID: accountID, - ServiceID: svcID, - Domain: mapping.GetDomain(), - Protocol: accesslog.ProtocolTCP, - Target: targetAddr, - ProxyProtocol: s.l4ProxyProtocol(mapping), - DialTimeout: s.l4DialTimeout(mapping), + Type: nbtcp.RouteTCP, + AccountID: accountID, + ServiceID: svcID, + Domain: mapping.GetDomain(), + Protocol: accesslog.ProtocolTCP, + Target: targetAddr, + ProxyProtocol: s.l4ProxyProtocol(mapping), + DialTimeout: s.l4DialTimeout(mapping), + SessionIdleTimeout: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)), + Filter: parseRestrictions(mapping), }) s.portMu.Lock() @@ -1108,6 +1181,8 @@ func (s *Server) setupUDPMapping(ctx context.Context, mapping *proto.ProxyMappin return fmt.Errorf("empty target address for UDP service %s", svcID) } + s.warnIfGeoUnavailable(mapping.GetDomain(), mapping.GetAccessRestrictions()) + if err := s.addUDPRelay(ctx, mapping, targetAddr, port); err != nil { return fmt.Errorf("UDP relay for service %s: %w", svcID, err) } @@ -1141,15 +1216,20 @@ func (s *Server) setupTLSMapping(ctx context.Context, mapping *proto.ProxyMappin return fmt.Errorf("router for TLS port %d: %w", tlsPort, err) } + s.warnIfGeoUnavailable(mapping.GetDomain(), mapping.GetAccessRestrictions()) + + router.SetGeo(s.geo) router.AddRoute(nbtcp.SNIHost(mapping.GetDomain()), nbtcp.Route{ - Type: nbtcp.RouteTCP, - AccountID: accountID, - ServiceID: svcID, - Domain: mapping.GetDomain(), - Protocol: accesslog.ProtocolTLS, - Target: targetAddr, - ProxyProtocol: s.l4ProxyProtocol(mapping), - DialTimeout: s.l4DialTimeout(mapping), + Type: nbtcp.RouteTCP, + AccountID: accountID, + ServiceID: svcID, + Domain: mapping.GetDomain(), + Protocol: accesslog.ProtocolTLS, + Target: targetAddr, + ProxyProtocol: s.l4ProxyProtocol(mapping), + DialTimeout: s.l4DialTimeout(mapping), + SessionIdleTimeout: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)), + Filter: parseRestrictions(mapping), }) if tlsPort != s.mainPort { @@ -1181,6 +1261,32 @@ func (s *Server) serviceKeyForMapping(mapping *proto.ProxyMapping) roundtrip.Ser } } +// parseRestrictions converts a proto mapping's access restrictions into +// a restrict.Filter. Returns nil if the mapping has no restrictions. +func parseRestrictions(mapping *proto.ProxyMapping) *restrict.Filter { + r := mapping.GetAccessRestrictions() + if r == nil { + return nil + } + return restrict.ParseFilter(r.GetAllowedCidrs(), r.GetBlockedCidrs(), r.GetAllowedCountries(), r.GetBlockedCountries()) +} + +// warnIfGeoUnavailable logs a warning if the mapping has country restrictions +// but the proxy has no geolocation database loaded. All requests to this +// service will be denied at runtime (fail-close). +func (s *Server) warnIfGeoUnavailable(domain string, r *proto.AccessRestrictions) { + if r == nil { + return + } + if len(r.GetAllowedCountries()) == 0 && len(r.GetBlockedCountries()) == 0 { + return + } + if s.geo != nil && s.geo.Available() { + return + } + s.Logger.Warnf("service %s has country restrictions but no geolocation database is loaded: all requests will be denied", domain) +} + // l4TargetAddress extracts and validates the target address from a mapping's // first path entry. Returns empty string if no paths exist or the address is // not a valid host:port. @@ -1210,15 +1316,15 @@ func (s *Server) l4ProxyProtocol(mapping *proto.ProxyMapping) bool { } // l4DialTimeout returns the dial timeout from the first target's options, -// falling back to the server's DefaultDialTimeout. +// clamped to MaxDialTimeout. func (s *Server) l4DialTimeout(mapping *proto.ProxyMapping) time.Duration { paths := mapping.GetPath() if len(paths) > 0 { if d := paths[0].GetOptions().GetRequestTimeout(); d != nil { - return d.AsDuration() + return s.clampDialTimeout(d.AsDuration()) } } - return s.DefaultDialTimeout + return s.clampDialTimeout(0) } // l4SessionIdleTimeout returns the configured session idle timeout from the @@ -1254,7 +1360,9 @@ func (s *Server) addUDPRelay(ctx context.Context, mapping *proto.ProxyMapping, t dialFn, err := s.resolveDialFunc(accountID) if err != nil { - _ = listener.Close() + if err := listener.Close(); err != nil { + s.Logger.Debugf("close UDP listener on %s: %v", listenAddr, err) + } return fmt.Errorf("resolve dialer for UDP: %w", err) } @@ -1273,8 +1381,10 @@ func (s *Server) addUDPRelay(ctx context.Context, mapping *proto.ProxyMapping, t ServiceID: svcID, DialFunc: dialFn, DialTimeout: s.l4DialTimeout(mapping), - SessionTTL: l4SessionIdleTimeout(mapping), + SessionTTL: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)), AccessLog: s.accessLog, + Filter: parseRestrictions(mapping), + Geo: s.geo, }) relay.SetObserver(s.meter) @@ -1306,9 +1416,15 @@ func (s *Server) updateMapping(ctx context.Context, mapping *proto.ProxyMapping) if mapping.GetAuth().GetOidc() { schemes = append(schemes, auth.NewOIDC(s.mgmtClient, svcID, accountID, s.ForwardedProto)) } + for _, ha := range mapping.GetAuth().GetHeaderAuths() { + schemes = append(schemes, auth.NewHeader(s.mgmtClient, svcID, accountID, ha.GetHeader())) + } + + ipRestrictions := parseRestrictions(mapping) + s.warnIfGeoUnavailable(mapping.GetDomain(), mapping.GetAccessRestrictions()) maxSessionAge := time.Duration(mapping.GetAuth().GetMaxSessionAgeSeconds()) * time.Second - if err := s.auth.AddDomain(mapping.GetDomain(), schemes, mapping.GetAuth().GetSessionKey(), maxSessionAge, accountID, svcID); err != nil { + if err := s.auth.AddDomain(mapping.GetDomain(), schemes, mapping.GetAuth().GetSessionKey(), maxSessionAge, accountID, svcID, ipRestrictions); err != nil { return fmt.Errorf("auth setup for domain %s: %w", mapping.GetDomain(), err) } m := s.protoToMapping(ctx, mapping) @@ -1449,12 +1565,10 @@ func (s *Server) protoToMapping(ctx context.Context, mapping *proto.ProxyMapping pt.RequestTimeout = d.AsDuration() } } - if pt.RequestTimeout == 0 && s.DefaultDialTimeout > 0 { - pt.RequestTimeout = s.DefaultDialTimeout - } + pt.RequestTimeout = s.clampDialTimeout(pt.RequestTimeout) paths[pathMapping.GetPath()] = pt } - return proxy.Mapping{ + m := proxy.Mapping{ ID: types.ServiceID(mapping.GetId()), AccountID: types.AccountID(mapping.GetAccountId()), Host: mapping.GetDomain(), @@ -1462,6 +1576,10 @@ func (s *Server) protoToMapping(ctx context.Context, mapping *proto.ProxyMapping PassHostHeader: mapping.GetPassHostHeader(), RewriteRedirects: mapping.GetRewriteRedirects(), } + for _, ha := range mapping.GetAuth().GetHeaderAuths() { + m.StripAuthHeaders = append(m.StripAuthHeaders, ha.GetHeader()) + } + return m } func protoToPathRewrite(mode proto.PathRewriteMode) proxy.PathRewriteMode { diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 4b851bf19..66f39b92f 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -2826,6 +2826,10 @@ components: type: string description: "City name from geolocation" example: "San Francisco" + subdivision_code: + type: string + description: "First-level administrative subdivision ISO code (e.g. state/province)" + example: "CA" bytes_upload: type: integer format: int64 @@ -2952,26 +2956,32 @@ components: id: type: string description: Service ID + example: "cs8i4ug6lnn4g9hqv7mg" name: type: string description: Service name + example: "myapp.example.netbird.app" domain: type: string description: Domain for the service + example: "myapp.example.netbird.app" mode: type: string description: Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. enum: [http, tcp, udp, tls] default: http + example: "http" listen_port: type: integer minimum: 0 maximum: 65535 description: Port the proxy listens on (L4/TLS only) + example: 8443 port_auto_assigned: type: boolean description: Whether the listen port was auto-assigned readOnly: true + example: false proxy_cluster: type: string description: The proxy cluster handling this service (derived from domain) @@ -2984,14 +2994,19 @@ components: enabled: type: boolean description: Whether the service is enabled + example: true pass_host_header: type: boolean description: When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address + example: false rewrite_redirects: type: boolean description: When true, Location headers in backend responses are rewritten to replace the backend address with the public-facing domain + example: false auth: $ref: '#/components/schemas/ServiceAuthConfig' + access_restrictions: + $ref: '#/components/schemas/AccessRestrictions' meta: $ref: '#/components/schemas/ServiceMeta' required: @@ -3035,19 +3050,23 @@ components: name: type: string description: Service name + example: "myapp.example.netbird.app" domain: type: string description: Domain for the service + example: "myapp.example.netbird.app" mode: type: string description: Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. enum: [http, tcp, udp, tls] default: http + example: "http" listen_port: type: integer minimum: 0 maximum: 65535 description: Port the proxy listens on (L4/TLS only). Set to 0 for auto-assignment. + example: 5432 targets: type: array items: @@ -3057,14 +3076,19 @@ components: type: boolean description: Whether the service is enabled default: true + example: true pass_host_header: type: boolean description: When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address + example: false rewrite_redirects: type: boolean description: When true, Location headers in backend responses are rewritten to replace the backend address with the public-facing domain + example: false auth: $ref: '#/components/schemas/ServiceAuthConfig' + access_restrictions: + $ref: '#/components/schemas/AccessRestrictions' required: - name - domain @@ -3075,13 +3099,16 @@ components: skip_tls_verify: type: boolean description: Skip TLS certificate verification for this backend + example: false request_timeout: type: string description: Per-target response timeout as a Go duration string (e.g. "30s", "2m") + example: "30s" path_rewrite: type: string description: Controls how the request path is rewritten before forwarding to the backend. Default strips the matched prefix. "preserve" keeps the full original request path. enum: [preserve] + example: "preserve" custom_headers: type: object description: Extra headers sent to the backend. Hop-by-hop and proxy-managed headers (Host, Connection, Transfer-Encoding, etc.) are rejected. @@ -3091,40 +3118,50 @@ components: additionalProperties: type: string pattern: '^[^\r\n]*$' + example: {"X-Custom-Header": "value"} proxy_protocol: type: boolean description: Send PROXY Protocol v2 header to this backend (TCP/TLS only) + example: false session_idle_timeout: type: string - description: Idle timeout before a UDP session is reaped, as a Go duration string (e.g. "30s", "2m"). Maximum 10m. + description: Idle timeout before a UDP session is reaped, as a Go duration string (e.g. "30s", "2m"). + example: "2m" ServiceTarget: type: object properties: target_id: type: string description: Target ID + example: "cs8i4ug6lnn4g9hqv7mg" target_type: type: string description: Target type enum: [peer, host, domain, subnet] + example: "subnet" path: type: string description: URL path prefix for this target (HTTP only) + example: "/" protocol: type: string description: Protocol to use when connecting to the backend enum: [http, https, tcp, udp] + example: "http" host: type: string description: Backend ip or domain for this target + example: "10.10.0.1" port: type: integer minimum: 1 maximum: 65535 description: Backend port for this target + example: 8080 enabled: type: boolean description: Whether this target is enabled + example: true options: $ref: '#/components/schemas/ServiceTargetOptions' required: @@ -3144,15 +3181,73 @@ components: $ref: '#/components/schemas/BearerAuthConfig' link_auth: $ref: '#/components/schemas/LinkAuthConfig' + header_auths: + type: array + items: + $ref: '#/components/schemas/HeaderAuthConfig' + HeaderAuthConfig: + type: object + description: Static header-value authentication. The proxy checks that the named header matches the configured value. + properties: + enabled: + type: boolean + description: Whether header auth is enabled + example: true + header: + type: string + description: HTTP header name to check (e.g. "Authorization", "X-API-Key") + example: "X-API-Key" + value: + type: string + description: Expected header value. For Basic auth use "Basic base64(user:pass)". For Bearer use "Bearer token". Cleared in responses. + example: "my-secret-api-key" + required: + - enabled + - header + - value + AccessRestrictions: + type: object + description: Connection-level access restrictions based on IP address or geography. Applies to both HTTP and L4 services. + properties: + allowed_cidrs: + type: array + items: + type: string + format: cidr + example: "192.168.1.0/24" + description: CIDR allowlist. If non-empty, only IPs matching these CIDRs are allowed. + blocked_cidrs: + type: array + items: + type: string + format: cidr + example: "10.0.0.0/8" + description: CIDR blocklist. Connections from these CIDRs are rejected. Evaluated after allowed_cidrs. + allowed_countries: + type: array + items: + type: string + pattern: '^[a-zA-Z]{2}$' + example: "US" + description: ISO 3166-1 alpha-2 country codes to allow. If non-empty, only these countries are permitted. + blocked_countries: + type: array + items: + type: string + pattern: '^[a-zA-Z]{2}$' + example: "DE" + description: ISO 3166-1 alpha-2 country codes to block. PasswordAuthConfig: type: object properties: enabled: type: boolean description: Whether password auth is enabled + example: true password: type: string description: Auth password + example: "s3cret" required: - enabled - password @@ -3162,9 +3257,11 @@ components: enabled: type: boolean description: Whether PIN auth is enabled + example: false pin: type: string description: PIN value + example: "1234" required: - enabled - pin @@ -3174,10 +3271,12 @@ components: enabled: type: boolean description: Whether bearer auth is enabled + example: true distribution_groups: type: array items: type: string + example: "ch8i4ug6lnn4g9hqv7mg" description: List of group IDs that can use bearer auth required: - enabled @@ -3187,6 +3286,7 @@ components: enabled: type: boolean description: Whether link auth is enabled + example: false required: - enabled ProxyCluster: @@ -3217,20 +3317,25 @@ components: id: type: string description: Domain ID + example: "ds8i4ug6lnn4g9hqv7mg" domain: type: string description: Domain name + example: "example.netbird.app" validated: type: boolean description: Whether the domain has been validated + example: true type: $ref: '#/components/schemas/ReverseProxyDomainType' target_cluster: type: string description: The proxy cluster this domain is validated against (only for custom domains) + example: "eu.proxy.netbird.io" supports_custom_ports: type: boolean description: Whether the cluster supports binding arbitrary TCP/UDP ports + example: true required: - id - domain @@ -3242,9 +3347,11 @@ components: domain: type: string description: Domain name + example: "myapp.example.com" target_cluster: type: string description: The proxy cluster this domain should be validated against + example: "eu.proxy.netbird.io" required: - domain - target_cluster diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 4ec3b871a..693449d14 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1276,6 +1276,21 @@ func (e PutApiIntegrationsMspTenantsIdInviteJSONBodyValue) Valid() bool { } } +// AccessRestrictions Connection-level access restrictions based on IP address or geography. Applies to both HTTP and L4 services. +type AccessRestrictions struct { + // AllowedCidrs CIDR allowlist. If non-empty, only IPs matching these CIDRs are allowed. + AllowedCidrs *[]string `json:"allowed_cidrs,omitempty"` + + // AllowedCountries ISO 3166-1 alpha-2 country codes to allow. If non-empty, only these countries are permitted. + AllowedCountries *[]string `json:"allowed_countries,omitempty"` + + // BlockedCidrs CIDR blocklist. Connections from these CIDRs are rejected. Evaluated after allowed_cidrs. + BlockedCidrs *[]string `json:"blocked_cidrs,omitempty"` + + // BlockedCountries ISO 3166-1 alpha-2 country codes to block. + BlockedCountries *[]string `json:"blocked_countries,omitempty"` +} + // AccessiblePeer defines model for AccessiblePeer. type AccessiblePeer struct { // CityName Commonly used English name of the city @@ -1988,6 +2003,18 @@ type GroupRequest struct { Resources *[]Resource `json:"resources,omitempty"` } +// HeaderAuthConfig Static header-value authentication. The proxy checks that the named header matches the configured value. +type HeaderAuthConfig struct { + // Enabled Whether header auth is enabled + Enabled bool `json:"enabled"` + + // Header HTTP header name to check (e.g. "Authorization", "X-API-Key") + Header string `json:"header"` + + // Value Expected header value. For Basic auth use "Basic base64(user:pass)". For Bearer use "Bearer token". Cleared in responses. + Value string `json:"value"` +} + // HuntressMatchAttributes Attribute conditions to match when approving agents type HuntressMatchAttributes struct { // DefenderPolicyStatus Policy status of Defender AV for Managed Antivirus. @@ -3324,6 +3351,9 @@ type ProxyAccessLog struct { // StatusCode HTTP status code returned StatusCode int `json:"status_code"` + // SubdivisionCode First-level administrative subdivision ISO code (e.g. state/province) + SubdivisionCode *string `json:"subdivision_code,omitempty"` + // Timestamp Timestamp when the request was made Timestamp time.Time `json:"timestamp"` @@ -3562,7 +3592,9 @@ type SentinelOneMatchAttributesNetworkStatus string // Service defines model for Service. type Service struct { - Auth ServiceAuthConfig `json:"auth"` + // AccessRestrictions Connection-level access restrictions based on IP address or geography. Applies to both HTTP and L4 services. + AccessRestrictions *AccessRestrictions `json:"access_restrictions,omitempty"` + Auth ServiceAuthConfig `json:"auth"` // Domain Domain for the service Domain string `json:"domain"` @@ -3605,6 +3637,7 @@ type ServiceMode string // ServiceAuthConfig defines model for ServiceAuthConfig. type ServiceAuthConfig struct { BearerAuth *BearerAuthConfig `json:"bearer_auth,omitempty"` + HeaderAuths *[]HeaderAuthConfig `json:"header_auths,omitempty"` LinkAuth *LinkAuthConfig `json:"link_auth,omitempty"` PasswordAuth *PasswordAuthConfig `json:"password_auth,omitempty"` PinAuth *PINAuthConfig `json:"pin_auth,omitempty"` @@ -3627,7 +3660,9 @@ type ServiceMetaStatus string // ServiceRequest defines model for ServiceRequest. type ServiceRequest struct { - Auth *ServiceAuthConfig `json:"auth,omitempty"` + // AccessRestrictions Connection-level access restrictions based on IP address or geography. Applies to both HTTP and L4 services. + AccessRestrictions *AccessRestrictions `json:"access_restrictions,omitempty"` + Auth *ServiceAuthConfig `json:"auth,omitempty"` // Domain Domain for the service Domain string `json:"domain"` @@ -3702,7 +3737,7 @@ type ServiceTargetOptions struct { // RequestTimeout Per-target response timeout as a Go duration string (e.g. "30s", "2m") RequestTimeout *string `json:"request_timeout,omitempty"` - // SessionIdleTimeout Idle timeout before a UDP session is reaped, as a Go duration string (e.g. "30s", "2m"). Maximum 10m. + // SessionIdleTimeout Idle timeout before a UDP session is reaped, as a Go duration string (e.g. "30s", "2m"). SessionIdleTimeout *string `json:"session_idle_timeout,omitempty"` // SkipTlsVerify Skip TLS certificate verification for this backend diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index 115ac5101..e5a2d6a98 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.36.6 // protoc v6.33.3 // source: proxy_service.proto @@ -13,6 +13,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -177,21 +178,18 @@ func (ProxyStatus) EnumDescriptor() ([]byte, []int) { // ProxyCapabilities describes what a proxy can handle. type ProxyCapabilities struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Whether the proxy can bind arbitrary ports for TCP/UDP/TLS services. SupportsCustomPorts *bool `protobuf:"varint,1,opt,name=supports_custom_ports,json=supportsCustomPorts,proto3,oneof" json:"supports_custom_ports,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ProxyCapabilities) Reset() { *x = ProxyCapabilities{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ProxyCapabilities) String() string { @@ -202,7 +200,7 @@ func (*ProxyCapabilities) ProtoMessage() {} func (x *ProxyCapabilities) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -226,24 +224,21 @@ func (x *ProxyCapabilities) GetSupportsCustomPorts() bool { // GetMappingUpdateRequest is sent to initialise a mapping stream. type GetMappingUpdateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` + Capabilities *ProxyCapabilities `protobuf:"bytes,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"` unknownFields protoimpl.UnknownFields - - ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` - Capabilities *ProxyCapabilities `protobuf:"bytes,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetMappingUpdateRequest) Reset() { *x = GetMappingUpdateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetMappingUpdateRequest) String() string { @@ -254,7 +249,7 @@ func (*GetMappingUpdateRequest) ProtoMessage() {} func (x *GetMappingUpdateRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -308,23 +303,20 @@ func (x *GetMappingUpdateRequest) GetCapabilities() *ProxyCapabilities { // No mappings may be sent to test the liveness of the Proxy. // Mappings that are sent should be interpreted by the Proxy appropriately. type GetMappingUpdateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Mapping []*ProxyMapping `protobuf:"bytes,1,rep,name=mapping,proto3" json:"mapping,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Mapping []*ProxyMapping `protobuf:"bytes,1,rep,name=mapping,proto3" json:"mapping,omitempty"` // initial_sync_complete is set on the last message of the initial snapshot. // The proxy uses this to signal that startup is complete. InitialSyncComplete bool `protobuf:"varint,2,opt,name=initial_sync_complete,json=initialSyncComplete,proto3" json:"initial_sync_complete,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetMappingUpdateResponse) Reset() { *x = GetMappingUpdateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetMappingUpdateResponse) String() string { @@ -335,7 +327,7 @@ func (*GetMappingUpdateResponse) ProtoMessage() {} func (x *GetMappingUpdateResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -365,27 +357,24 @@ func (x *GetMappingUpdateResponse) GetInitialSyncComplete() bool { } type PathTargetOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SkipTlsVerify bool `protobuf:"varint,1,opt,name=skip_tls_verify,json=skipTlsVerify,proto3" json:"skip_tls_verify,omitempty"` - RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` - PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` - CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + SkipTlsVerify bool `protobuf:"varint,1,opt,name=skip_tls_verify,json=skipTlsVerify,proto3" json:"skip_tls_verify,omitempty"` + RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` + CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Send PROXY protocol v2 header to this backend. ProxyProtocol bool `protobuf:"varint,5,opt,name=proxy_protocol,json=proxyProtocol,proto3" json:"proxy_protocol,omitempty"` // Idle timeout before a UDP session is reaped. SessionIdleTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=session_idle_timeout,json=sessionIdleTimeout,proto3" json:"session_idle_timeout,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PathTargetOptions) Reset() { *x = PathTargetOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathTargetOptions) String() string { @@ -396,7 +385,7 @@ func (*PathTargetOptions) ProtoMessage() {} func (x *PathTargetOptions) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -454,22 +443,19 @@ func (x *PathTargetOptions) GetSessionIdleTimeout() *durationpb.Duration { } type PathMapping struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` + Options *PathTargetOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` - Options *PathTargetOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PathMapping) Reset() { *x = PathMapping{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathMapping) String() string { @@ -480,7 +466,7 @@ func (*PathMapping) ProtoMessage() {} func (x *PathMapping) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -516,25 +502,77 @@ func (x *PathMapping) GetOptions() *PathTargetOptions { return nil } -type Authentication struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type HeaderAuth struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Header name to check, e.g. "Authorization", "X-API-Key". + Header string `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // argon2id hash of the expected full header value. + HashedValue string `protobuf:"bytes,2,opt,name=hashed_value,json=hashedValue,proto3" json:"hashed_value,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - SessionKey string `protobuf:"bytes,1,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` - MaxSessionAgeSeconds int64 `protobuf:"varint,2,opt,name=max_session_age_seconds,json=maxSessionAgeSeconds,proto3" json:"max_session_age_seconds,omitempty"` - Password bool `protobuf:"varint,3,opt,name=password,proto3" json:"password,omitempty"` - Pin bool `protobuf:"varint,4,opt,name=pin,proto3" json:"pin,omitempty"` - Oidc bool `protobuf:"varint,5,opt,name=oidc,proto3" json:"oidc,omitempty"` +func (x *HeaderAuth) Reset() { + *x = HeaderAuth{} + mi := &file_proxy_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderAuth) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderAuth) ProtoMessage() {} + +func (x *HeaderAuth) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderAuth.ProtoReflect.Descriptor instead. +func (*HeaderAuth) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{5} +} + +func (x *HeaderAuth) GetHeader() string { + if x != nil { + return x.Header + } + return "" +} + +func (x *HeaderAuth) GetHashedValue() string { + if x != nil { + return x.HashedValue + } + return "" +} + +type Authentication struct { + state protoimpl.MessageState `protogen:"open.v1"` + SessionKey string `protobuf:"bytes,1,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` + MaxSessionAgeSeconds int64 `protobuf:"varint,2,opt,name=max_session_age_seconds,json=maxSessionAgeSeconds,proto3" json:"max_session_age_seconds,omitempty"` + Password bool `protobuf:"varint,3,opt,name=password,proto3" json:"password,omitempty"` + Pin bool `protobuf:"varint,4,opt,name=pin,proto3" json:"pin,omitempty"` + Oidc bool `protobuf:"varint,5,opt,name=oidc,proto3" json:"oidc,omitempty"` + HeaderAuths []*HeaderAuth `protobuf:"bytes,6,rep,name=header_auths,json=headerAuths,proto3" json:"header_auths,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Authentication) Reset() { *x = Authentication{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Authentication) String() string { @@ -544,8 +582,8 @@ func (x *Authentication) String() string { func (*Authentication) ProtoMessage() {} func (x *Authentication) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -557,7 +595,7 @@ func (x *Authentication) ProtoReflect() protoreflect.Message { // Deprecated: Use Authentication.ProtoReflect.Descriptor instead. func (*Authentication) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{5} + return file_proxy_service_proto_rawDescGZIP(), []int{6} } func (x *Authentication) GetSessionKey() string { @@ -595,11 +633,83 @@ func (x *Authentication) GetOidc() bool { return false } -type ProxyMapping struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Authentication) GetHeaderAuths() []*HeaderAuth { + if x != nil { + return x.HeaderAuths + } + return nil +} +type AccessRestrictions struct { + state protoimpl.MessageState `protogen:"open.v1"` + AllowedCidrs []string `protobuf:"bytes,1,rep,name=allowed_cidrs,json=allowedCidrs,proto3" json:"allowed_cidrs,omitempty"` + BlockedCidrs []string `protobuf:"bytes,2,rep,name=blocked_cidrs,json=blockedCidrs,proto3" json:"blocked_cidrs,omitempty"` + AllowedCountries []string `protobuf:"bytes,3,rep,name=allowed_countries,json=allowedCountries,proto3" json:"allowed_countries,omitempty"` + BlockedCountries []string `protobuf:"bytes,4,rep,name=blocked_countries,json=blockedCountries,proto3" json:"blocked_countries,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AccessRestrictions) Reset() { + *x = AccessRestrictions{} + mi := &file_proxy_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AccessRestrictions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessRestrictions) ProtoMessage() {} + +func (x *AccessRestrictions) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessRestrictions.ProtoReflect.Descriptor instead. +func (*AccessRestrictions) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{7} +} + +func (x *AccessRestrictions) GetAllowedCidrs() []string { + if x != nil { + return x.AllowedCidrs + } + return nil +} + +func (x *AccessRestrictions) GetBlockedCidrs() []string { + if x != nil { + return x.BlockedCidrs + } + return nil +} + +func (x *AccessRestrictions) GetAllowedCountries() []string { + if x != nil { + return x.AllowedCountries + } + return nil +} + +func (x *AccessRestrictions) GetBlockedCountries() []string { + if x != nil { + return x.BlockedCountries + } + return nil +} + +type ProxyMapping struct { + state protoimpl.MessageState `protogen:"open.v1"` Type ProxyMappingUpdateType `protobuf:"varint,1,opt,name=type,proto3,enum=management.ProxyMappingUpdateType" json:"type,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` @@ -616,16 +726,17 @@ type ProxyMapping struct { // Service mode: "http", "tcp", "udp", or "tls". Mode string `protobuf:"bytes,10,opt,name=mode,proto3" json:"mode,omitempty"` // For L4/TLS: the port the proxy listens on. - ListenPort int32 `protobuf:"varint,11,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` + ListenPort int32 `protobuf:"varint,11,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` + AccessRestrictions *AccessRestrictions `protobuf:"bytes,12,opt,name=access_restrictions,json=accessRestrictions,proto3" json:"access_restrictions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ProxyMapping) Reset() { *x = ProxyMapping{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ProxyMapping) String() string { @@ -635,8 +746,8 @@ func (x *ProxyMapping) String() string { func (*ProxyMapping) ProtoMessage() {} func (x *ProxyMapping) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -648,7 +759,7 @@ func (x *ProxyMapping) ProtoReflect() protoreflect.Message { // Deprecated: Use ProxyMapping.ProtoReflect.Descriptor instead. func (*ProxyMapping) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{6} + return file_proxy_service_proto_rawDescGZIP(), []int{8} } func (x *ProxyMapping) GetType() ProxyMappingUpdateType { @@ -728,22 +839,26 @@ func (x *ProxyMapping) GetListenPort() int32 { return 0 } +func (x *ProxyMapping) GetAccessRestrictions() *AccessRestrictions { + if x != nil { + return x.AccessRestrictions + } + return nil +} + // SendAccessLogRequest consists of one or more AccessLogs from a Proxy. type SendAccessLogRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Log *AccessLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` unknownFields protoimpl.UnknownFields - - Log *AccessLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SendAccessLogRequest) Reset() { *x = SendAccessLogRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SendAccessLogRequest) String() string { @@ -753,8 +868,8 @@ func (x *SendAccessLogRequest) String() string { func (*SendAccessLogRequest) ProtoMessage() {} func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -766,7 +881,7 @@ func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAccessLogRequest.ProtoReflect.Descriptor instead. func (*SendAccessLogRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{7} + return file_proxy_service_proto_rawDescGZIP(), []int{9} } func (x *SendAccessLogRequest) GetLog() *AccessLog { @@ -778,18 +893,16 @@ func (x *SendAccessLogRequest) GetLog() *AccessLog { // SendAccessLogResponse is intentionally empty to allow for future expansion. type SendAccessLogResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SendAccessLogResponse) Reset() { *x = SendAccessLogResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SendAccessLogResponse) String() string { @@ -799,8 +912,8 @@ func (x *SendAccessLogResponse) String() string { func (*SendAccessLogResponse) ProtoMessage() {} func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[10] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -812,14 +925,11 @@ func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAccessLogResponse.ProtoReflect.Descriptor instead. func (*SendAccessLogResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{8} + return file_proxy_service_proto_rawDescGZIP(), []int{10} } type AccessLog struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` LogId string `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` @@ -836,15 +946,15 @@ type AccessLog struct { BytesUpload int64 `protobuf:"varint,14,opt,name=bytes_upload,json=bytesUpload,proto3" json:"bytes_upload,omitempty"` BytesDownload int64 `protobuf:"varint,15,opt,name=bytes_download,json=bytesDownload,proto3" json:"bytes_download,omitempty"` Protocol string `protobuf:"bytes,16,opt,name=protocol,proto3" json:"protocol,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AccessLog) Reset() { *x = AccessLog{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AccessLog) String() string { @@ -854,8 +964,8 @@ func (x *AccessLog) String() string { func (*AccessLog) ProtoMessage() {} func (x *AccessLog) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -867,7 +977,7 @@ func (x *AccessLog) ProtoReflect() protoreflect.Message { // Deprecated: Use AccessLog.ProtoReflect.Descriptor instead. func (*AccessLog) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{9} + return file_proxy_service_proto_rawDescGZIP(), []int{11} } func (x *AccessLog) GetTimestamp() *timestamppb.Timestamp { @@ -983,26 +1093,24 @@ func (x *AccessLog) GetProtocol() string { } type AuthenticateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - // Types that are assignable to Request: + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + // Types that are valid to be assigned to Request: // // *AuthenticateRequest_Password // *AuthenticateRequest_Pin - Request isAuthenticateRequest_Request `protobuf_oneof:"request"` + // *AuthenticateRequest_HeaderAuth + Request isAuthenticateRequest_Request `protobuf_oneof:"request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AuthenticateRequest) Reset() { *x = AuthenticateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AuthenticateRequest) String() string { @@ -1012,8 +1120,8 @@ func (x *AuthenticateRequest) String() string { func (*AuthenticateRequest) ProtoMessage() {} func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1025,7 +1133,7 @@ func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthenticateRequest.ProtoReflect.Descriptor instead. func (*AuthenticateRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{10} + return file_proxy_service_proto_rawDescGZIP(), []int{12} } func (x *AuthenticateRequest) GetId() string { @@ -1042,23 +1150,36 @@ func (x *AuthenticateRequest) GetAccountId() string { return "" } -func (m *AuthenticateRequest) GetRequest() isAuthenticateRequest_Request { - if m != nil { - return m.Request +func (x *AuthenticateRequest) GetRequest() isAuthenticateRequest_Request { + if x != nil { + return x.Request } return nil } func (x *AuthenticateRequest) GetPassword() *PasswordRequest { - if x, ok := x.GetRequest().(*AuthenticateRequest_Password); ok { - return x.Password + if x != nil { + if x, ok := x.Request.(*AuthenticateRequest_Password); ok { + return x.Password + } } return nil } func (x *AuthenticateRequest) GetPin() *PinRequest { - if x, ok := x.GetRequest().(*AuthenticateRequest_Pin); ok { - return x.Pin + if x != nil { + if x, ok := x.Request.(*AuthenticateRequest_Pin); ok { + return x.Pin + } + } + return nil +} + +func (x *AuthenticateRequest) GetHeaderAuth() *HeaderAuthRequest { + if x != nil { + if x, ok := x.Request.(*AuthenticateRequest_HeaderAuth); ok { + return x.HeaderAuth + } } return nil } @@ -1075,25 +1196,80 @@ type AuthenticateRequest_Pin struct { Pin *PinRequest `protobuf:"bytes,4,opt,name=pin,proto3,oneof"` } +type AuthenticateRequest_HeaderAuth struct { + HeaderAuth *HeaderAuthRequest `protobuf:"bytes,5,opt,name=header_auth,json=headerAuth,proto3,oneof"` +} + func (*AuthenticateRequest_Password) isAuthenticateRequest_Request() {} func (*AuthenticateRequest_Pin) isAuthenticateRequest_Request() {} -type PasswordRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (*AuthenticateRequest_HeaderAuth) isAuthenticateRequest_Request() {} - Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` +type HeaderAuthRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + HeaderValue string `protobuf:"bytes,1,opt,name=header_value,json=headerValue,proto3" json:"header_value,omitempty"` + HeaderName string `protobuf:"bytes,2,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderAuthRequest) Reset() { + *x = HeaderAuthRequest{} + mi := &file_proxy_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderAuthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderAuthRequest) ProtoMessage() {} + +func (x *HeaderAuthRequest) ProtoReflect() protoreflect.Message { + mi := &file_proxy_service_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderAuthRequest.ProtoReflect.Descriptor instead. +func (*HeaderAuthRequest) Descriptor() ([]byte, []int) { + return file_proxy_service_proto_rawDescGZIP(), []int{13} +} + +func (x *HeaderAuthRequest) GetHeaderValue() string { + if x != nil { + return x.HeaderValue + } + return "" +} + +func (x *HeaderAuthRequest) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +type PasswordRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PasswordRequest) Reset() { *x = PasswordRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PasswordRequest) String() string { @@ -1103,8 +1279,8 @@ func (x *PasswordRequest) String() string { func (*PasswordRequest) ProtoMessage() {} func (x *PasswordRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[14] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1116,7 +1292,7 @@ func (x *PasswordRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PasswordRequest.ProtoReflect.Descriptor instead. func (*PasswordRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{11} + return file_proxy_service_proto_rawDescGZIP(), []int{14} } func (x *PasswordRequest) GetPassword() string { @@ -1127,20 +1303,17 @@ func (x *PasswordRequest) GetPassword() string { } type PinRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Pin string `protobuf:"bytes,1,opt,name=pin,proto3" json:"pin,omitempty"` unknownFields protoimpl.UnknownFields - - Pin string `protobuf:"bytes,1,opt,name=pin,proto3" json:"pin,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PinRequest) Reset() { *x = PinRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PinRequest) String() string { @@ -1150,8 +1323,8 @@ func (x *PinRequest) String() string { func (*PinRequest) ProtoMessage() {} func (x *PinRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[15] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1163,7 +1336,7 @@ func (x *PinRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PinRequest.ProtoReflect.Descriptor instead. func (*PinRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{12} + return file_proxy_service_proto_rawDescGZIP(), []int{15} } func (x *PinRequest) GetPin() string { @@ -1174,21 +1347,18 @@ func (x *PinRequest) GetPin() string { } type AuthenticateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` unknownFields protoimpl.UnknownFields - - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AuthenticateResponse) Reset() { *x = AuthenticateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AuthenticateResponse) String() string { @@ -1198,8 +1368,8 @@ func (x *AuthenticateResponse) String() string { func (*AuthenticateResponse) ProtoMessage() {} func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[16] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1211,7 +1381,7 @@ func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthenticateResponse.ProtoReflect.Descriptor instead. func (*AuthenticateResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{13} + return file_proxy_service_proto_rawDescGZIP(), []int{16} } func (x *AuthenticateResponse) GetSuccess() bool { @@ -1230,24 +1400,21 @@ func (x *AuthenticateResponse) GetSessionToken() string { // SendStatusUpdateRequest is sent by the proxy to update its status type SendStatusUpdateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - Status ProxyStatus `protobuf:"varint,3,opt,name=status,proto3,enum=management.ProxyStatus" json:"status,omitempty"` - CertificateIssued bool `protobuf:"varint,4,opt,name=certificate_issued,json=certificateIssued,proto3" json:"certificate_issued,omitempty"` - ErrorMessage *string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Status ProxyStatus `protobuf:"varint,3,opt,name=status,proto3,enum=management.ProxyStatus" json:"status,omitempty"` + CertificateIssued bool `protobuf:"varint,4,opt,name=certificate_issued,json=certificateIssued,proto3" json:"certificate_issued,omitempty"` + ErrorMessage *string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SendStatusUpdateRequest) Reset() { *x = SendStatusUpdateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SendStatusUpdateRequest) String() string { @@ -1257,8 +1424,8 @@ func (x *SendStatusUpdateRequest) String() string { func (*SendStatusUpdateRequest) ProtoMessage() {} func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[17] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1270,7 +1437,7 @@ func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SendStatusUpdateRequest.ProtoReflect.Descriptor instead. func (*SendStatusUpdateRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{14} + return file_proxy_service_proto_rawDescGZIP(), []int{17} } func (x *SendStatusUpdateRequest) GetServiceId() string { @@ -1310,18 +1477,16 @@ func (x *SendStatusUpdateRequest) GetErrorMessage() string { // SendStatusUpdateResponse is intentionally empty to allow for future expansion type SendStatusUpdateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SendStatusUpdateResponse) Reset() { *x = SendStatusUpdateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SendStatusUpdateResponse) String() string { @@ -1331,8 +1496,8 @@ func (x *SendStatusUpdateResponse) String() string { func (*SendStatusUpdateResponse) ProtoMessage() {} func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[18] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1344,30 +1509,27 @@ func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SendStatusUpdateResponse.ProtoReflect.Descriptor instead. func (*SendStatusUpdateResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{15} + return file_proxy_service_proto_rawDescGZIP(), []int{18} } // CreateProxyPeerRequest is sent by the proxy to create a peer connection // The token is a one-time authentication token sent via ProxyMapping type CreateProxyPeerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` - WireguardPublicKey string `protobuf:"bytes,4,opt,name=wireguard_public_key,json=wireguardPublicKey,proto3" json:"wireguard_public_key,omitempty"` - Cluster string `protobuf:"bytes,5,opt,name=cluster,proto3" json:"cluster,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + WireguardPublicKey string `protobuf:"bytes,4,opt,name=wireguard_public_key,json=wireguardPublicKey,proto3" json:"wireguard_public_key,omitempty"` + Cluster string `protobuf:"bytes,5,opt,name=cluster,proto3" json:"cluster,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CreateProxyPeerRequest) Reset() { *x = CreateProxyPeerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateProxyPeerRequest) String() string { @@ -1377,8 +1539,8 @@ func (x *CreateProxyPeerRequest) String() string { func (*CreateProxyPeerRequest) ProtoMessage() {} func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[19] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1390,7 +1552,7 @@ func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateProxyPeerRequest.ProtoReflect.Descriptor instead. func (*CreateProxyPeerRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{16} + return file_proxy_service_proto_rawDescGZIP(), []int{19} } func (x *CreateProxyPeerRequest) GetServiceId() string { @@ -1430,21 +1592,18 @@ func (x *CreateProxyPeerRequest) GetCluster() string { // CreateProxyPeerResponse contains the result of peer creation type CreateProxyPeerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` unknownFields protoimpl.UnknownFields - - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CreateProxyPeerResponse) Reset() { *x = CreateProxyPeerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateProxyPeerResponse) String() string { @@ -1454,8 +1613,8 @@ func (x *CreateProxyPeerResponse) String() string { func (*CreateProxyPeerResponse) ProtoMessage() {} func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[20] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1467,7 +1626,7 @@ func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateProxyPeerResponse.ProtoReflect.Descriptor instead. func (*CreateProxyPeerResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{17} + return file_proxy_service_proto_rawDescGZIP(), []int{20} } func (x *CreateProxyPeerResponse) GetSuccess() bool { @@ -1485,22 +1644,19 @@ func (x *CreateProxyPeerResponse) GetErrorMessage() string { } type GetOIDCURLRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + RedirectUrl string `protobuf:"bytes,3,opt,name=redirect_url,json=redirectUrl,proto3" json:"redirect_url,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - RedirectUrl string `protobuf:"bytes,3,opt,name=redirect_url,json=redirectUrl,proto3" json:"redirect_url,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetOIDCURLRequest) Reset() { *x = GetOIDCURLRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetOIDCURLRequest) String() string { @@ -1510,8 +1666,8 @@ func (x *GetOIDCURLRequest) String() string { func (*GetOIDCURLRequest) ProtoMessage() {} func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[21] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1523,7 +1679,7 @@ func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetOIDCURLRequest.ProtoReflect.Descriptor instead. func (*GetOIDCURLRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{18} + return file_proxy_service_proto_rawDescGZIP(), []int{21} } func (x *GetOIDCURLRequest) GetId() string { @@ -1548,20 +1704,17 @@ func (x *GetOIDCURLRequest) GetRedirectUrl() string { } type GetOIDCURLResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` unknownFields protoimpl.UnknownFields - - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetOIDCURLResponse) Reset() { *x = GetOIDCURLResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetOIDCURLResponse) String() string { @@ -1571,8 +1724,8 @@ func (x *GetOIDCURLResponse) String() string { func (*GetOIDCURLResponse) ProtoMessage() {} func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[22] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1584,7 +1737,7 @@ func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetOIDCURLResponse.ProtoReflect.Descriptor instead. func (*GetOIDCURLResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{19} + return file_proxy_service_proto_rawDescGZIP(), []int{22} } func (x *GetOIDCURLResponse) GetUrl() string { @@ -1595,21 +1748,18 @@ func (x *GetOIDCURLResponse) GetUrl() string { } type ValidateSessionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` unknownFields protoimpl.UnknownFields - - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ValidateSessionRequest) Reset() { *x = ValidateSessionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidateSessionRequest) String() string { @@ -1619,8 +1769,8 @@ func (x *ValidateSessionRequest) String() string { func (*ValidateSessionRequest) ProtoMessage() {} func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[23] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1632,7 +1782,7 @@ func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSessionRequest.ProtoReflect.Descriptor instead. func (*ValidateSessionRequest) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{20} + return file_proxy_service_proto_rawDescGZIP(), []int{23} } func (x *ValidateSessionRequest) GetDomain() string { @@ -1650,23 +1800,20 @@ func (x *ValidateSessionRequest) GetSessionToken() string { } type ValidateSessionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail,proto3" json:"user_email,omitempty"` + DeniedReason string `protobuf:"bytes,4,opt,name=denied_reason,json=deniedReason,proto3" json:"denied_reason,omitempty"` unknownFields protoimpl.UnknownFields - - Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail,proto3" json:"user_email,omitempty"` - DeniedReason string `protobuf:"bytes,4,opt,name=denied_reason,json=deniedReason,proto3" json:"denied_reason,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ValidateSessionResponse) Reset() { *x = ValidateSessionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proxy_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_proxy_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidateSessionResponse) String() string { @@ -1676,8 +1823,8 @@ func (x *ValidateSessionResponse) String() string { func (*ValidateSessionResponse) ProtoMessage() {} func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { - mi := &file_proxy_service_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_proxy_service_proto_msgTypes[24] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1689,7 +1836,7 @@ func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSessionResponse.ProtoReflect.Descriptor instead. func (*ValidateSessionResponse) Descriptor() ([]byte, []int) { - return file_proxy_service_proto_rawDescGZIP(), []int{21} + return file_proxy_service_proto_rawDescGZIP(), []int{24} } func (x *ValidateSessionResponse) GetValid() bool { @@ -1722,317 +1869,193 @@ func (x *ValidateSessionResponse) GetDeniedReason() string { var File_proxy_service_proto protoreflect.FileDescriptor -var file_proxy_service_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x66, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x73, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x88, 0x01, 0x01, - 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x17, 0x47, - 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, - 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61, 0x74, - 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c, 0x73, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x61, - 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, - 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x70, - 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, 0x74, - 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xaa, 0x01, - 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x67, - 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x22, 0x95, 0x03, 0x0a, 0x0c, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, - 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, - 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, - 0x72, 0x74, 0x22, 0x3f, 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, - 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, - 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x04, 0x0a, - 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, - 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, - 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, - 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, - 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xb6, 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, - 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, - 0x70, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2d, - 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, - 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, - 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, - 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, - 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, - 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, - 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, - 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, - 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, - 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, - 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, - 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, - 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, - 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x46, 0x0a, - 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, - 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x41, - 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, - 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, - 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, - 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, - 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, - 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, - 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, - 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, - 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, - 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, - 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, - 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, - 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, - 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_proxy_service_proto_rawDesc = "" + + "\n" + + "\x13proxy_service.proto\x12\n" + + "management\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"f\n" + + "\x11ProxyCapabilities\x127\n" + + "\x15supports_custom_ports\x18\x01 \x01(\bH\x00R\x13supportsCustomPorts\x88\x01\x01B\x18\n" + + "\x16_supports_custom_ports\"\xe6\x01\n" + + "\x17GetMappingUpdateRequest\x12\x19\n" + + "\bproxy_id\x18\x01 \x01(\tR\aproxyId\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x129\n" + + "\n" + + "started_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tstartedAt\x12\x18\n" + + "\aaddress\x18\x04 \x01(\tR\aaddress\x12A\n" + + "\fcapabilities\x18\x05 \x01(\v2\x1d.management.ProxyCapabilitiesR\fcapabilities\"\x82\x01\n" + + "\x18GetMappingUpdateResponse\x122\n" + + "\amapping\x18\x01 \x03(\v2\x18.management.ProxyMappingR\amapping\x122\n" + + "\x15initial_sync_complete\x18\x02 \x01(\bR\x13initialSyncComplete\"\xce\x03\n" + + "\x11PathTargetOptions\x12&\n" + + "\x0fskip_tls_verify\x18\x01 \x01(\bR\rskipTlsVerify\x12B\n" + + "\x0frequest_timeout\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x0erequestTimeout\x12>\n" + + "\fpath_rewrite\x18\x03 \x01(\x0e2\x1b.management.PathRewriteModeR\vpathRewrite\x12W\n" + + "\x0ecustom_headers\x18\x04 \x03(\v20.management.PathTargetOptions.CustomHeadersEntryR\rcustomHeaders\x12%\n" + + "\x0eproxy_protocol\x18\x05 \x01(\bR\rproxyProtocol\x12K\n" + + "\x14session_idle_timeout\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x12sessionIdleTimeout\x1a@\n" + + "\x12CustomHeadersEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"r\n" + + "\vPathMapping\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12\x16\n" + + "\x06target\x18\x02 \x01(\tR\x06target\x127\n" + + "\aoptions\x18\x03 \x01(\v2\x1d.management.PathTargetOptionsR\aoptions\"G\n" + + "\n" + + "HeaderAuth\x12\x16\n" + + "\x06header\x18\x01 \x01(\tR\x06header\x12!\n" + + "\fhashed_value\x18\x02 \x01(\tR\vhashedValue\"\xe5\x01\n" + + "\x0eAuthentication\x12\x1f\n" + + "\vsession_key\x18\x01 \x01(\tR\n" + + "sessionKey\x125\n" + + "\x17max_session_age_seconds\x18\x02 \x01(\x03R\x14maxSessionAgeSeconds\x12\x1a\n" + + "\bpassword\x18\x03 \x01(\bR\bpassword\x12\x10\n" + + "\x03pin\x18\x04 \x01(\bR\x03pin\x12\x12\n" + + "\x04oidc\x18\x05 \x01(\bR\x04oidc\x129\n" + + "\fheader_auths\x18\x06 \x03(\v2\x16.management.HeaderAuthR\vheaderAuths\"\xb8\x01\n" + + "\x12AccessRestrictions\x12#\n" + + "\rallowed_cidrs\x18\x01 \x03(\tR\fallowedCidrs\x12#\n" + + "\rblocked_cidrs\x18\x02 \x03(\tR\fblockedCidrs\x12+\n" + + "\x11allowed_countries\x18\x03 \x03(\tR\x10allowedCountries\x12+\n" + + "\x11blocked_countries\x18\x04 \x03(\tR\x10blockedCountries\"\xe6\x03\n" + + "\fProxyMapping\x126\n" + + "\x04type\x18\x01 \x01(\x0e2\".management.ProxyMappingUpdateTypeR\x04type\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x12\x1d\n" + + "\n" + + "account_id\x18\x03 \x01(\tR\taccountId\x12\x16\n" + + "\x06domain\x18\x04 \x01(\tR\x06domain\x12+\n" + + "\x04path\x18\x05 \x03(\v2\x17.management.PathMappingR\x04path\x12\x1d\n" + + "\n" + + "auth_token\x18\x06 \x01(\tR\tauthToken\x12.\n" + + "\x04auth\x18\a \x01(\v2\x1a.management.AuthenticationR\x04auth\x12(\n" + + "\x10pass_host_header\x18\b \x01(\bR\x0epassHostHeader\x12+\n" + + "\x11rewrite_redirects\x18\t \x01(\bR\x10rewriteRedirects\x12\x12\n" + + "\x04mode\x18\n" + + " \x01(\tR\x04mode\x12\x1f\n" + + "\vlisten_port\x18\v \x01(\x05R\n" + + "listenPort\x12O\n" + + "\x13access_restrictions\x18\f \x01(\v2\x1e.management.AccessRestrictionsR\x12accessRestrictions\"?\n" + + "\x14SendAccessLogRequest\x12'\n" + + "\x03log\x18\x01 \x01(\v2\x15.management.AccessLogR\x03log\"\x17\n" + + "\x15SendAccessLogResponse\"\x86\x04\n" + + "\tAccessLog\x128\n" + + "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x15\n" + + "\x06log_id\x18\x02 \x01(\tR\x05logId\x12\x1d\n" + + "\n" + + "account_id\x18\x03 \x01(\tR\taccountId\x12\x1d\n" + + "\n" + + "service_id\x18\x04 \x01(\tR\tserviceId\x12\x12\n" + + "\x04host\x18\x05 \x01(\tR\x04host\x12\x12\n" + + "\x04path\x18\x06 \x01(\tR\x04path\x12\x1f\n" + + "\vduration_ms\x18\a \x01(\x03R\n" + + "durationMs\x12\x16\n" + + "\x06method\x18\b \x01(\tR\x06method\x12#\n" + + "\rresponse_code\x18\t \x01(\x05R\fresponseCode\x12\x1b\n" + + "\tsource_ip\x18\n" + + " \x01(\tR\bsourceIp\x12%\n" + + "\x0eauth_mechanism\x18\v \x01(\tR\rauthMechanism\x12\x17\n" + + "\auser_id\x18\f \x01(\tR\x06userId\x12!\n" + + "\fauth_success\x18\r \x01(\bR\vauthSuccess\x12!\n" + + "\fbytes_upload\x18\x0e \x01(\x03R\vbytesUpload\x12%\n" + + "\x0ebytes_download\x18\x0f \x01(\x03R\rbytesDownload\x12\x1a\n" + + "\bprotocol\x18\x10 \x01(\tR\bprotocol\"\xf8\x01\n" + + "\x13AuthenticateRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + + "\n" + + "account_id\x18\x02 \x01(\tR\taccountId\x129\n" + + "\bpassword\x18\x03 \x01(\v2\x1b.management.PasswordRequestH\x00R\bpassword\x12*\n" + + "\x03pin\x18\x04 \x01(\v2\x16.management.PinRequestH\x00R\x03pin\x12@\n" + + "\vheader_auth\x18\x05 \x01(\v2\x1d.management.HeaderAuthRequestH\x00R\n" + + "headerAuthB\t\n" + + "\arequest\"W\n" + + "\x11HeaderAuthRequest\x12!\n" + + "\fheader_value\x18\x01 \x01(\tR\vheaderValue\x12\x1f\n" + + "\vheader_name\x18\x02 \x01(\tR\n" + + "headerName\"-\n" + + "\x0fPasswordRequest\x12\x1a\n" + + "\bpassword\x18\x01 \x01(\tR\bpassword\"\x1e\n" + + "\n" + + "PinRequest\x12\x10\n" + + "\x03pin\x18\x01 \x01(\tR\x03pin\"U\n" + + "\x14AuthenticateResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12#\n" + + "\rsession_token\x18\x02 \x01(\tR\fsessionToken\"\xf3\x01\n" + + "\x17SendStatusUpdateRequest\x12\x1d\n" + + "\n" + + "service_id\x18\x01 \x01(\tR\tserviceId\x12\x1d\n" + + "\n" + + "account_id\x18\x02 \x01(\tR\taccountId\x12/\n" + + "\x06status\x18\x03 \x01(\x0e2\x17.management.ProxyStatusR\x06status\x12-\n" + + "\x12certificate_issued\x18\x04 \x01(\bR\x11certificateIssued\x12(\n" + + "\rerror_message\x18\x05 \x01(\tH\x00R\ferrorMessage\x88\x01\x01B\x10\n" + + "\x0e_error_message\"\x1a\n" + + "\x18SendStatusUpdateResponse\"\xb8\x01\n" + + "\x16CreateProxyPeerRequest\x12\x1d\n" + + "\n" + + "service_id\x18\x01 \x01(\tR\tserviceId\x12\x1d\n" + + "\n" + + "account_id\x18\x02 \x01(\tR\taccountId\x12\x14\n" + + "\x05token\x18\x03 \x01(\tR\x05token\x120\n" + + "\x14wireguard_public_key\x18\x04 \x01(\tR\x12wireguardPublicKey\x12\x18\n" + + "\acluster\x18\x05 \x01(\tR\acluster\"o\n" + + "\x17CreateProxyPeerResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12(\n" + + "\rerror_message\x18\x02 \x01(\tH\x00R\ferrorMessage\x88\x01\x01B\x10\n" + + "\x0e_error_message\"e\n" + + "\x11GetOIDCURLRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + + "\n" + + "account_id\x18\x02 \x01(\tR\taccountId\x12!\n" + + "\fredirect_url\x18\x03 \x01(\tR\vredirectUrl\"&\n" + + "\x12GetOIDCURLResponse\x12\x10\n" + + "\x03url\x18\x01 \x01(\tR\x03url\"U\n" + + "\x16ValidateSessionRequest\x12\x16\n" + + "\x06domain\x18\x01 \x01(\tR\x06domain\x12#\n" + + "\rsession_token\x18\x02 \x01(\tR\fsessionToken\"\x8c\x01\n" + + "\x17ValidateSessionResponse\x12\x14\n" + + "\x05valid\x18\x01 \x01(\bR\x05valid\x12\x17\n" + + "\auser_id\x18\x02 \x01(\tR\x06userId\x12\x1d\n" + + "\n" + + "user_email\x18\x03 \x01(\tR\tuserEmail\x12#\n" + + "\rdenied_reason\x18\x04 \x01(\tR\fdeniedReason*d\n" + + "\x16ProxyMappingUpdateType\x12\x17\n" + + "\x13UPDATE_TYPE_CREATED\x10\x00\x12\x18\n" + + "\x14UPDATE_TYPE_MODIFIED\x10\x01\x12\x17\n" + + "\x13UPDATE_TYPE_REMOVED\x10\x02*F\n" + + "\x0fPathRewriteMode\x12\x18\n" + + "\x14PATH_REWRITE_DEFAULT\x10\x00\x12\x19\n" + + "\x15PATH_REWRITE_PRESERVE\x10\x01*\xc8\x01\n" + + "\vProxyStatus\x12\x18\n" + + "\x14PROXY_STATUS_PENDING\x10\x00\x12\x17\n" + + "\x13PROXY_STATUS_ACTIVE\x10\x01\x12#\n" + + "\x1fPROXY_STATUS_TUNNEL_NOT_CREATED\x10\x02\x12$\n" + + " PROXY_STATUS_CERTIFICATE_PENDING\x10\x03\x12#\n" + + "\x1fPROXY_STATUS_CERTIFICATE_FAILED\x10\x04\x12\x16\n" + + "\x12PROXY_STATUS_ERROR\x10\x052\xfc\x04\n" + + "\fProxyService\x12_\n" + + "\x10GetMappingUpdate\x12#.management.GetMappingUpdateRequest\x1a$.management.GetMappingUpdateResponse0\x01\x12T\n" + + "\rSendAccessLog\x12 .management.SendAccessLogRequest\x1a!.management.SendAccessLogResponse\x12Q\n" + + "\fAuthenticate\x12\x1f.management.AuthenticateRequest\x1a .management.AuthenticateResponse\x12]\n" + + "\x10SendStatusUpdate\x12#.management.SendStatusUpdateRequest\x1a$.management.SendStatusUpdateResponse\x12Z\n" + + "\x0fCreateProxyPeer\x12\".management.CreateProxyPeerRequest\x1a#.management.CreateProxyPeerResponse\x12K\n" + + "\n" + + "GetOIDCURL\x12\x1d.management.GetOIDCURLRequest\x1a\x1e.management.GetOIDCURLResponse\x12Z\n" + + "\x0fValidateSession\x12\".management.ValidateSessionRequest\x1a#.management.ValidateSessionResponseB\bZ\x06/protob\x06proto3" var ( file_proxy_service_proto_rawDescOnce sync.Once - file_proxy_service_proto_rawDescData = file_proxy_service_proto_rawDesc + file_proxy_service_proto_rawDescData []byte ) func file_proxy_service_proto_rawDescGZIP() []byte { file_proxy_service_proto_rawDescOnce.Do(func() { - file_proxy_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_proxy_service_proto_rawDescData) + file_proxy_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proxy_service_proto_rawDesc), len(file_proxy_service_proto_rawDesc))) }) return file_proxy_service_proto_rawDescData } var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 23) -var file_proxy_service_proto_goTypes = []interface{}{ +var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 26) +var file_proxy_service_proto_goTypes = []any{ (ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType (PathRewriteMode)(0), // 1: management.PathRewriteMode (ProxyStatus)(0), // 2: management.ProxyStatus @@ -2041,63 +2064,69 @@ var file_proxy_service_proto_goTypes = []interface{}{ (*GetMappingUpdateResponse)(nil), // 5: management.GetMappingUpdateResponse (*PathTargetOptions)(nil), // 6: management.PathTargetOptions (*PathMapping)(nil), // 7: management.PathMapping - (*Authentication)(nil), // 8: management.Authentication - (*ProxyMapping)(nil), // 9: management.ProxyMapping - (*SendAccessLogRequest)(nil), // 10: management.SendAccessLogRequest - (*SendAccessLogResponse)(nil), // 11: management.SendAccessLogResponse - (*AccessLog)(nil), // 12: management.AccessLog - (*AuthenticateRequest)(nil), // 13: management.AuthenticateRequest - (*PasswordRequest)(nil), // 14: management.PasswordRequest - (*PinRequest)(nil), // 15: management.PinRequest - (*AuthenticateResponse)(nil), // 16: management.AuthenticateResponse - (*SendStatusUpdateRequest)(nil), // 17: management.SendStatusUpdateRequest - (*SendStatusUpdateResponse)(nil), // 18: management.SendStatusUpdateResponse - (*CreateProxyPeerRequest)(nil), // 19: management.CreateProxyPeerRequest - (*CreateProxyPeerResponse)(nil), // 20: management.CreateProxyPeerResponse - (*GetOIDCURLRequest)(nil), // 21: management.GetOIDCURLRequest - (*GetOIDCURLResponse)(nil), // 22: management.GetOIDCURLResponse - (*ValidateSessionRequest)(nil), // 23: management.ValidateSessionRequest - (*ValidateSessionResponse)(nil), // 24: management.ValidateSessionResponse - nil, // 25: management.PathTargetOptions.CustomHeadersEntry - (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 27: google.protobuf.Duration + (*HeaderAuth)(nil), // 8: management.HeaderAuth + (*Authentication)(nil), // 9: management.Authentication + (*AccessRestrictions)(nil), // 10: management.AccessRestrictions + (*ProxyMapping)(nil), // 11: management.ProxyMapping + (*SendAccessLogRequest)(nil), // 12: management.SendAccessLogRequest + (*SendAccessLogResponse)(nil), // 13: management.SendAccessLogResponse + (*AccessLog)(nil), // 14: management.AccessLog + (*AuthenticateRequest)(nil), // 15: management.AuthenticateRequest + (*HeaderAuthRequest)(nil), // 16: management.HeaderAuthRequest + (*PasswordRequest)(nil), // 17: management.PasswordRequest + (*PinRequest)(nil), // 18: management.PinRequest + (*AuthenticateResponse)(nil), // 19: management.AuthenticateResponse + (*SendStatusUpdateRequest)(nil), // 20: management.SendStatusUpdateRequest + (*SendStatusUpdateResponse)(nil), // 21: management.SendStatusUpdateResponse + (*CreateProxyPeerRequest)(nil), // 22: management.CreateProxyPeerRequest + (*CreateProxyPeerResponse)(nil), // 23: management.CreateProxyPeerResponse + (*GetOIDCURLRequest)(nil), // 24: management.GetOIDCURLRequest + (*GetOIDCURLResponse)(nil), // 25: management.GetOIDCURLResponse + (*ValidateSessionRequest)(nil), // 26: management.ValidateSessionRequest + (*ValidateSessionResponse)(nil), // 27: management.ValidateSessionResponse + nil, // 28: management.PathTargetOptions.CustomHeadersEntry + (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 30: google.protobuf.Duration } var file_proxy_service_proto_depIdxs = []int32{ - 26, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp + 29, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp 3, // 1: management.GetMappingUpdateRequest.capabilities:type_name -> management.ProxyCapabilities - 9, // 2: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping - 27, // 3: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration + 11, // 2: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping + 30, // 3: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration 1, // 4: management.PathTargetOptions.path_rewrite:type_name -> management.PathRewriteMode - 25, // 5: management.PathTargetOptions.custom_headers:type_name -> management.PathTargetOptions.CustomHeadersEntry - 27, // 6: management.PathTargetOptions.session_idle_timeout:type_name -> google.protobuf.Duration + 28, // 5: management.PathTargetOptions.custom_headers:type_name -> management.PathTargetOptions.CustomHeadersEntry + 30, // 6: management.PathTargetOptions.session_idle_timeout:type_name -> google.protobuf.Duration 6, // 7: management.PathMapping.options:type_name -> management.PathTargetOptions - 0, // 8: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType - 7, // 9: management.ProxyMapping.path:type_name -> management.PathMapping - 8, // 10: management.ProxyMapping.auth:type_name -> management.Authentication - 12, // 11: management.SendAccessLogRequest.log:type_name -> management.AccessLog - 26, // 12: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp - 14, // 13: management.AuthenticateRequest.password:type_name -> management.PasswordRequest - 15, // 14: management.AuthenticateRequest.pin:type_name -> management.PinRequest - 2, // 15: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus - 4, // 16: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest - 10, // 17: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest - 13, // 18: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest - 17, // 19: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest - 19, // 20: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest - 21, // 21: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest - 23, // 22: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest - 5, // 23: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse - 11, // 24: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse - 16, // 25: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse - 18, // 26: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse - 20, // 27: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse - 22, // 28: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse - 24, // 29: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse - 23, // [23:30] is the sub-list for method output_type - 16, // [16:23] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 8, // 8: management.Authentication.header_auths:type_name -> management.HeaderAuth + 0, // 9: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType + 7, // 10: management.ProxyMapping.path:type_name -> management.PathMapping + 9, // 11: management.ProxyMapping.auth:type_name -> management.Authentication + 10, // 12: management.ProxyMapping.access_restrictions:type_name -> management.AccessRestrictions + 14, // 13: management.SendAccessLogRequest.log:type_name -> management.AccessLog + 29, // 14: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp + 17, // 15: management.AuthenticateRequest.password:type_name -> management.PasswordRequest + 18, // 16: management.AuthenticateRequest.pin:type_name -> management.PinRequest + 16, // 17: management.AuthenticateRequest.header_auth:type_name -> management.HeaderAuthRequest + 2, // 18: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus + 4, // 19: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest + 12, // 20: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest + 15, // 21: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest + 20, // 22: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest + 22, // 23: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest + 24, // 24: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest + 26, // 25: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest + 5, // 26: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse + 13, // 27: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse + 19, // 28: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse + 21, // 29: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse + 23, // 30: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse + 25, // 31: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse + 27, // 32: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse + 26, // [26:33] is the sub-list for method output_type + 19, // [19:26] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_proxy_service_proto_init() } @@ -2105,286 +2134,21 @@ func file_proxy_service_proto_init() { if File_proxy_service_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_proxy_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProxyCapabilities); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMappingUpdateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMappingUpdateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathTargetOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathMapping); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authentication); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProxyMapping); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAccessLogRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAccessLogResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AccessLog); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PasswordRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PinRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendStatusUpdateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendStatusUpdateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateProxyPeerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateProxyPeerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOIDCURLRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOIDCURLResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSessionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proxy_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSessionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_proxy_service_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_proxy_service_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_proxy_service_proto_msgTypes[0].OneofWrappers = []any{} + file_proxy_service_proto_msgTypes[12].OneofWrappers = []any{ (*AuthenticateRequest_Password)(nil), (*AuthenticateRequest_Pin)(nil), + (*AuthenticateRequest_HeaderAuth)(nil), } - file_proxy_service_proto_msgTypes[14].OneofWrappers = []interface{}{} - file_proxy_service_proto_msgTypes[17].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[17].OneofWrappers = []any{} + file_proxy_service_proto_msgTypes[20].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_proxy_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proxy_service_proto_rawDesc), len(file_proxy_service_proto_rawDesc)), NumEnums: 3, - NumMessages: 23, + NumMessages: 26, NumExtensions: 0, NumServices: 1, }, @@ -2394,7 +2158,6 @@ func file_proxy_service_proto_init() { MessageInfos: file_proxy_service_proto_msgTypes, }.Build() File_proxy_service_proto = out.File - file_proxy_service_proto_rawDesc = nil file_proxy_service_proto_goTypes = nil file_proxy_service_proto_depIdxs = nil } diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto index 457d12e85..2d7bed548 100644 --- a/shared/management/proto/proxy_service.proto +++ b/shared/management/proto/proxy_service.proto @@ -80,12 +80,27 @@ message PathMapping { PathTargetOptions options = 3; } +message HeaderAuth { + // Header name to check, e.g. "Authorization", "X-API-Key". + string header = 1; + // argon2id hash of the expected full header value. + string hashed_value = 2; +} + message Authentication { string session_key = 1; int64 max_session_age_seconds = 2; bool password = 3; bool pin = 4; bool oidc = 5; + repeated HeaderAuth header_auths = 6; +} + +message AccessRestrictions { + repeated string allowed_cidrs = 1; + repeated string blocked_cidrs = 2; + repeated string allowed_countries = 3; + repeated string blocked_countries = 4; } message ProxyMapping { @@ -106,6 +121,7 @@ message ProxyMapping { string mode = 10; // For L4/TLS: the port the proxy listens on. int32 listen_port = 11; + AccessRestrictions access_restrictions = 12; } // SendAccessLogRequest consists of one or more AccessLogs from a Proxy. @@ -141,9 +157,15 @@ message AuthenticateRequest { oneof request { PasswordRequest password = 3; PinRequest pin = 4; + HeaderAuthRequest header_auth = 5; } } +message HeaderAuthRequest { + string header_value = 1; + string header_name = 2; +} + message PasswordRequest { string password = 1; } diff --git a/shared/relay/client/early_msg_buffer.go b/shared/relay/client/early_msg_buffer.go index 3ead94de1..52ff4d42e 100644 --- a/shared/relay/client/early_msg_buffer.go +++ b/shared/relay/client/early_msg_buffer.go @@ -65,8 +65,8 @@ func (b *earlyMsgBuffer) put(peerID messages.PeerID, msg Msg) bool { } entry := earlyMsg{ - peerID: peerID, - msg: msg, + peerID: peerID, + msg: msg, createdAt: time.Now(), } elem := b.order.PushBack(entry) From 80a8816b1dbb46d9dd3525f54abdb948ce04da66 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:00:23 +0100 Subject: [PATCH 215/374] [misc] Add image build after merge to main (#5605) --- .github/workflows/release.yml | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7ac5103d9..1a4676625 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -211,18 +211,36 @@ jobs: - name: Clean up GPG key if: always() run: rm -f /tmp/gpg-rpm-signing-key.asc - - name: Tag and push PR images (amd64 only) - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository + - name: Tag and push images (amd64 only) + if: | + (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || + (github.event_name == 'push' && github.ref == 'refs/heads/main') run: | - PR_TAG="pr-${{ github.event.pull_request.number }}" + resolve_tags() { + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + echo "pr-${{ github.event.pull_request.number }}" + else + echo "main sha-$(git rev-parse --short HEAD)" + fi + } + + tag_and_push() { + local src="$1" img_name tag dst + img_name="${src%%:*}" + for tag in $(resolve_tags); do + dst="${img_name}:${tag}" + echo "Tagging ${src} -> ${dst}" + docker tag "$src" "$dst" + docker push "$dst" + done + } + + export -f tag_and_push resolve_tags + echo '${{ steps.goreleaser.outputs.artifacts }}' | \ jq -r '.[] | select(.type == "Docker Image") | select(.goarch == "amd64") | .name' | \ grep '^ghcr.io/' | while read -r SRC; do - IMG_NAME="${SRC%%:*}" - DST="${IMG_NAME}:${PR_TAG}" - echo "Tagging ${SRC} -> ${DST}" - docker tag "$SRC" "$DST" - docker push "$DST" + tag_and_push "$SRC" done - name: upload non tags for debug purposes uses: actions/upload-artifact@v4 From dff06d089874bc58b0bcdea1f54454083406f69a Mon Sep 17 00:00:00 2001 From: n0pashkov Date: Tue, 17 Mar 2026 07:33:13 +0300 Subject: [PATCH 216/374] [misc] Add netbird-tui to community projects (#5568) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index bca81c20b..dc84af2fd 100644 --- a/README.md +++ b/README.md @@ -126,6 +126,7 @@ See a complete [architecture overview](https://docs.netbird.io/about-netbird/how ### Community projects - [NetBird installer script](https://github.com/physk/netbird-installer) - [NetBird ansible collection by Dominion Solutions](https://galaxy.ansible.com/ui/repo/published/dominion_solutions/netbird/) +- [netbird-tui](https://github.com/n0pashkov/netbird-tui) — terminal UI for managing NetBird peers, routes, and settings **Note**: The `main` branch may be in an *unstable or even broken state* during development. For stable versions, see [releases](https://github.com/netbirdio/netbird/releases). From 59f5b34280c2adef6b836239115ffd9d7d293dbb Mon Sep 17 00:00:00 2001 From: tham-le <45093611+tham-le@users.noreply.github.com> Date: Tue, 17 Mar 2026 06:03:10 +0100 Subject: [PATCH 217/374] [client] add MTU option to embed.Options (#5550) Expose MTU configuration in the embed package so embedded clients can set the WireGuard tunnel MTU without the config file workaround. This is needed for protocols like QUIC that require larger datagrams than the default MTU of 1280. Validates MTU range via iface.ValidateMTU() at construction time to prevent invalid values from being persisted to config. Closes #5549 --- client/embed/embed.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/client/embed/embed.go b/client/embed/embed.go index 21043cf96..70013989a 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -14,6 +14,7 @@ import ( "github.com/sirupsen/logrus" wgnetstack "golang.zx2c4.com/wireguard/tun/netstack" + "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/auth" @@ -81,6 +82,12 @@ type Options struct { BlockInbound bool // WireguardPort is the port for the WireGuard interface. Use 0 for a random port. WireguardPort *int + // MTU is the MTU for the WireGuard interface. + // Valid values are in the range 576..8192 bytes. + // If non-nil, this value overrides any value stored in the config file. + // If nil, the existing config MTU (if non-zero) is preserved; otherwise it defaults to 1280. + // Set to a higher value (e.g. 1400) if carrying QUIC or other protocols that require larger datagrams. + MTU *uint16 } // validateCredentials checks that exactly one credential type is provided @@ -112,6 +119,12 @@ func New(opts Options) (*Client, error) { return nil, err } + if opts.MTU != nil { + if err := iface.ValidateMTU(*opts.MTU); err != nil { + return nil, fmt.Errorf("invalid MTU: %w", err) + } + } + if opts.LogOutput != nil { logrus.SetOutput(opts.LogOutput) } @@ -151,6 +164,7 @@ func New(opts Options) (*Client, error) { DisableClientRoutes: &opts.DisableClientRoutes, BlockInbound: &opts.BlockInbound, WireguardPort: opts.WireguardPort, + MTU: opts.MTU, } if opts.ConfigPath != "" { config, err = profilemanager.UpdateOrCreateConfig(input) From 4e149c9222aaefccac1fe7cbe841e946c3b76ea4 Mon Sep 17 00:00:00 2001 From: Wesley Gimenes Date: Tue, 17 Mar 2026 02:09:12 -0300 Subject: [PATCH 218/374] [client] update gvisor to build with Go 1.26.x (#5447) Building the client with Go 1.26.x fails with errors: ``` [...] /builder/dl/go-mod-cache/gvisor.dev/gvisor@v0.0.0-20251031020517-ecfcdd2f171c/pkg/sync/runtime_constants_go126.go:22:2: WaitReasonSelect redeclared in this block /builder/dl/go-mod-cache/gvisor.dev/gvisor@v0.0.0-20251031020517-ecfcdd2f171c/pkg/sync/runtime_constants_go125.go:22:2: other declaration of WaitReasonSelect /builder/dl/go-mod-cache/gvisor.dev/gvisor@v0.0.0-20251031020517-ecfcdd2f171c/pkg/sync/runtime_constants_go126.go:23:2: WaitReasonChanReceive redeclared in this block /builder/dl/go-mod-cache/gvisor.dev/gvisor@v0.0.0-20251031020517-ecfcdd2f171c/pkg/sync/runtime_constants_go125.go:23:2: other declaration of WaitReasonChanReceive /builder/dl/go-mod-cache/gvisor.dev/gvisor@v0.0.0-20251031020517-ecfcdd2f171c/pkg/sync/runtime_constants_go126.go:24:2: WaitReasonSemacquire redeclared in this block /builder/dl/go-mod-cache/gvisor.dev/gvisor@v0.0.0-20251031020517-ecfcdd2f171c/pkg/sync/runtime_constants_go125.go:24:2: other declaration of WaitReasonSemacquire [...] ``` Fixes: https://github.com/netbirdio/netbird/issues/5290 ("Does not build with Go 1.26rc3") Signed-off-by: Wesley Gimenes --- go.mod | 6 ++---- go.sum | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 4bcdbdc78..f8b27aca0 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/netbirdio/netbird -go 1.25 - -toolchain go1.25.5 +go 1.25.5 require ( cunicu.li/go-rosenpass v0.4.0 @@ -125,7 +123,7 @@ require ( gorm.io/driver/postgres v1.5.7 gorm.io/driver/sqlite v1.5.7 gorm.io/gorm v1.25.12 - gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c + gvisor.dev/gvisor v0.0.0-20260219192049-0f2374377e89 ) require ( diff --git a/go.sum b/go.sum index 1bd9396bb..d4b42eab4 100644 --- a/go.sum +++ b/go.sum @@ -852,5 +852,5 @@ gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c h1:pfzmXIkkDgydR4ZRP+e1hXywZfYR21FA0Fbk6ptMkiA= -gvisor.dev/gvisor v0.0.0-20251031020517-ecfcdd2f171c/go.mod h1:/mc6CfwbOm5KKmqoV7Qx20Q+Ja8+vO4g7FuCdlVoAfQ= +gvisor.dev/gvisor v0.0.0-20260219192049-0f2374377e89 h1:mGJaeA61P8dEHTqdvAgc70ZIV3QoUoJcXCRyyjO26OA= +gvisor.dev/gvisor v0.0.0-20260219192049-0f2374377e89/go.mod h1:QkHjoMIBaYtpVufgwv3keYAbln78mBoCuShZrPrer1Q= From a590c38d8b1f4429535f49ff5cac9655a29483a7 Mon Sep 17 00:00:00 2001 From: eason <85663565+mango766@users.noreply.github.com> Date: Tue, 17 Mar 2026 13:27:47 +0800 Subject: [PATCH 219/374] [client] Fix IPv6 address formatting in DNS address construction (#5603) Replace fmt.Sprintf("%s:%d", ip, port) with net.JoinHostPort() to properly handle IPv6 addresses that need bracket wrapping (e.g., [2606:4700:4700::1111]:53 instead of 2606:4700:4700::1111:53). Without this fix, configuring IPv6 nameservers causes "too many colons in address" errors because Go's net.Dial cannot parse the malformed address string. Fixes #5601 Related to #4074 Co-authored-by: easonysliu --- client/internal/dns/service_listener.go | 5 +++-- client/internal/routemanager/client/client.go | 4 +++- client/internal/routemanager/dnsinterceptor/handler.go | 4 +++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/client/internal/dns/service_listener.go b/client/internal/dns/service_listener.go index 806559444..f7ddfd40f 100644 --- a/client/internal/dns/service_listener.go +++ b/client/internal/dns/service_listener.go @@ -6,6 +6,7 @@ import ( "net" "net/netip" "runtime" + "strconv" "sync" "time" @@ -69,7 +70,7 @@ func (s *serviceViaListener) Listen() error { return fmt.Errorf("eval listen address: %w", err) } s.listenIP = s.listenIP.Unmap() - s.server.Addr = fmt.Sprintf("%s:%d", s.listenIP, s.listenPort) + s.server.Addr = net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort))) log.Debugf("starting dns on %s", s.server.Addr) go func() { s.setListenerStatus(true) @@ -186,7 +187,7 @@ func (s *serviceViaListener) testFreePort(port int) (netip.Addr, bool) { } func (s *serviceViaListener) tryToBind(ip netip.Addr, port int) bool { - addrString := fmt.Sprintf("%s:%d", ip, port) + addrString := net.JoinHostPort(ip.String(), strconv.Itoa(port)) udpAddr := net.UDPAddrFromAddrPort(netip.MustParseAddrPort(addrString)) probeListener, err := net.ListenUDP("udp", udpAddr) if err != nil { diff --git a/client/internal/routemanager/client/client.go b/client/internal/routemanager/client/client.go index bad616271..e6ef8b876 100644 --- a/client/internal/routemanager/client/client.go +++ b/client/internal/routemanager/client/client.go @@ -3,7 +3,9 @@ package client import ( "context" "fmt" + "net" "reflect" + "strconv" "time" log "github.com/sirupsen/logrus" @@ -564,7 +566,7 @@ func HandlerFromRoute(params common.HandlerParams) RouteHandler { return dnsinterceptor.New(params) case handlerTypeDynamic: dns := nbdns.NewServiceViaMemory(params.WgInterface) - dnsAddr := fmt.Sprintf("%s:%d", dns.RuntimeIP(), dns.RuntimePort()) + dnsAddr := net.JoinHostPort(dns.RuntimeIP().String(), strconv.Itoa(dns.RuntimePort())) return dynamic.NewRoute(params, dnsAddr) default: return static.NewRoute(params) diff --git a/client/internal/routemanager/dnsinterceptor/handler.go b/client/internal/routemanager/dnsinterceptor/handler.go index 4bf0d5476..64f2a8789 100644 --- a/client/internal/routemanager/dnsinterceptor/handler.go +++ b/client/internal/routemanager/dnsinterceptor/handler.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "net" "net/netip" "runtime" + "strconv" "strings" "sync" "sync/atomic" @@ -249,7 +251,7 @@ func (d *DnsInterceptor) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { r.MsgHdr.AuthenticatedData = true } - upstream := fmt.Sprintf("%s:%d", upstreamIP.String(), uint16(d.forwarderPort.Load())) + upstream := net.JoinHostPort(upstreamIP.String(), strconv.FormatUint(uint64(d.forwarderPort.Load()), 10)) ctx, cancel := context.WithTimeout(context.Background(), dnsTimeout) defer cancel() From 628eb5607398d61b833bbd3259fa6335acc89621 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 17 Mar 2026 23:10:38 +0800 Subject: [PATCH 220/374] [client] Update go-m1cpu to v0.2.0 to fix SIGSEGV on macOS Tahoe (#5613) --- go.mod | 2 +- go.sum | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f8b27aca0..2c911aa4f 100644 --- a/go.mod +++ b/go.mod @@ -255,7 +255,7 @@ require ( github.com/russellhaering/goxmldsig v1.5.0 // indirect github.com/rymdport/portal v0.4.2 // indirect github.com/shirou/gopsutil/v4 v4.25.1 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shoenig/go-m1cpu v0.2.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c // indirect diff --git a/go.sum b/go.sum index d4b42eab4..e26b4edaf 100644 --- a/go.sum +++ b/go.sum @@ -512,10 +512,12 @@ github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRB github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8= github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs= github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/go-m1cpu v0.2.0 h1:t4GNqvPZ84Vjtpboo/kT3pIkbaK3vc+JIlD/Wz1zSFY= +github.com/shoenig/go-m1cpu v0.2.0/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= +github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= From 19d94c6158203e62584b71a09f7f9fc90c61f60a Mon Sep 17 00:00:00 2001 From: Wouter van Os Date: Tue, 17 Mar 2026 16:12:37 +0100 Subject: [PATCH 221/374] [client] Allow setting DNSLabels on client embed (#5493) --- client/embed/embed.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/client/embed/embed.go b/client/embed/embed.go index 70013989a..9fa797f18 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -22,6 +22,7 @@ import ( "github.com/netbirdio/netbird/client/internal/profilemanager" sshcommon "github.com/netbirdio/netbird/client/ssh" "github.com/netbirdio/netbird/client/system" + "github.com/netbirdio/netbird/shared/management/domain" mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) @@ -88,6 +89,8 @@ type Options struct { // If nil, the existing config MTU (if non-zero) is preserved; otherwise it defaults to 1280. // Set to a higher value (e.g. 1400) if carrying QUIC or other protocols that require larger datagrams. MTU *uint16 + // DNSLabels defines additional DNS labels configured in the peer. + DNSLabels []string } // validateCredentials checks that exactly one credential type is provided @@ -153,9 +156,14 @@ func New(opts Options) (*Client, error) { } } + var err error + var parsedLabels domain.List + if parsedLabels, err = domain.FromStringList(opts.DNSLabels); err != nil { + return nil, fmt.Errorf("invalid dns labels: %w", err) + } + t := true var config *profilemanager.Config - var err error input := profilemanager.ConfigInput{ ConfigPath: opts.ConfigPath, ManagementURL: opts.ManagementURL, @@ -165,6 +173,7 @@ func New(opts Options) (*Client, error) { BlockInbound: &opts.BlockInbound, WireguardPort: opts.WireguardPort, MTU: opts.MTU, + DNSLabels: parsedLabels, } if opts.ConfigPath != "" { config, err = profilemanager.UpdateOrCreateConfig(input) From f0eed506780fac7c19a276d23fc57eca7901aa44 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 17 Mar 2026 23:29:03 +0800 Subject: [PATCH 222/374] [management] Accept domain target type for L4 reverse proxy services (#5612) --- .../reverseproxy/service/manager/manager.go | 49 ++++++++++++---- .../service/manager/manager_test.go | 58 +++++++++++++++++++ .../modules/reverseproxy/service/service.go | 2 +- .../reverseproxy/service/service_test.go | 26 +++++++++ 4 files changed, 124 insertions(+), 11 deletions(-) diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index 65177bf5d..2251f5084 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -14,6 +14,8 @@ import ( nbpeer "github.com/netbirdio/netbird/management/server/peer" + resourcetypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" @@ -636,18 +638,12 @@ func validateTargetReferences(ctx context.Context, transaction store.Store, acco for _, target := range targets { switch target.TargetType { case service.TargetTypePeer: - if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil { - if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { - return status.Errorf(status.InvalidArgument, "peer target %q not found in account", target.TargetId) - } - return fmt.Errorf("look up peer target %q: %w", target.TargetId, err) + if err := validatePeerTarget(ctx, transaction, accountID, target); err != nil { + return err } case service.TargetTypeHost, service.TargetTypeSubnet, service.TargetTypeDomain: - if _, err := transaction.GetNetworkResourceByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil { - if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { - return status.Errorf(status.InvalidArgument, "resource target %q not found in account", target.TargetId) - } - return fmt.Errorf("look up resource target %q: %w", target.TargetId, err) + if err := validateResourceTarget(ctx, transaction, accountID, target); err != nil { + return err } default: return status.Errorf(status.InvalidArgument, "unknown target type %q for target %q", target.TargetType, target.TargetId) @@ -656,6 +652,39 @@ func validateTargetReferences(ctx context.Context, transaction store.Store, acco return nil } +func validatePeerTarget(ctx context.Context, transaction store.Store, accountID string, target *service.Target) error { + if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil { + if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { + return status.Errorf(status.InvalidArgument, "peer target %q not found in account", target.TargetId) + } + return fmt.Errorf("look up peer target %q: %w", target.TargetId, err) + } + return nil +} + +func validateResourceTarget(ctx context.Context, transaction store.Store, accountID string, target *service.Target) error { + resource, err := transaction.GetNetworkResourceByID(ctx, store.LockingStrengthShare, accountID, target.TargetId) + if err != nil { + if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound { + return status.Errorf(status.InvalidArgument, "resource target %q not found in account", target.TargetId) + } + return fmt.Errorf("look up resource target %q: %w", target.TargetId, err) + } + return validateResourceTargetType(target, resource) +} + +// validateResourceTargetType checks that target_type matches the actual network resource type. +func validateResourceTargetType(target *service.Target, resource *resourcetypes.NetworkResource) error { + expected := resourcetypes.NetworkResourceType(target.TargetType) + if resource.Type != expected { + return status.Errorf(status.InvalidArgument, + "target %q has target_type %q but resource is of type %q", + target.TargetId, target.TargetType, resource.Type, + ) + } + return nil +} + func (m *Manager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete) if err != nil { diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index d23c91017..0c34f81a2 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -19,6 +19,7 @@ import ( "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/mock_server" + resourcetypes "github.com/netbirdio/netbird/management/server/networks/resources/types" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/permissions/modules" @@ -1214,3 +1215,60 @@ func TestValidateProtocolChange(t *testing.T) { }) } } + +func TestValidateTargetReferences_ResourceTypeMismatch(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + mockStore := store.NewMockStore(ctrl) + accountID := "test-account" + + tests := []struct { + name string + targetType rpservice.TargetType + resourceType resourcetypes.NetworkResourceType + wantErr bool + }{ + {"host matches host", rpservice.TargetTypeHost, resourcetypes.Host, false}, + {"domain matches domain", rpservice.TargetTypeDomain, resourcetypes.Domain, false}, + {"subnet matches subnet", rpservice.TargetTypeSubnet, resourcetypes.Subnet, false}, + {"host but resource is domain", rpservice.TargetTypeHost, resourcetypes.Domain, true}, + {"domain but resource is host", rpservice.TargetTypeDomain, resourcetypes.Host, true}, + {"host but resource is subnet", rpservice.TargetTypeHost, resourcetypes.Subnet, true}, + {"subnet but resource is domain", rpservice.TargetTypeSubnet, resourcetypes.Domain, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockStore.EXPECT(). + GetNetworkResourceByID(gomock.Any(), store.LockingStrengthShare, accountID, "resource-1"). + Return(&resourcetypes.NetworkResource{Type: tt.resourceType}, nil) + + targets := []*rpservice.Target{ + {TargetId: "resource-1", TargetType: tt.targetType, Host: "10.0.0.1"}, + } + err := validateTargetReferences(ctx, mockStore, accountID, targets) + if tt.wantErr { + require.Error(t, err) + assert.Contains(t, err.Error(), "target_type") + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateTargetReferences_PeerValid(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + mockStore := store.NewMockStore(ctrl) + accountID := "test-account" + + mockStore.EXPECT(). + GetPeerByID(gomock.Any(), store.LockingStrengthShare, accountID, "peer-1"). + Return(&nbpeer.Peer{}, nil) + + targets := []*rpservice.Target{ + {TargetId: "peer-1", TargetType: rpservice.TargetTypePeer}, + } + require.NoError(t, validateTargetReferences(ctx, mockStore, accountID, targets)) +} diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index 6c7c80806..c00d49421 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -790,7 +790,7 @@ func (s *Service) validateL4Target(target *Target) error { return errors.New("target_id is required for L4 services") } switch target.TargetType { - case TargetTypePeer, TargetTypeHost: + case TargetTypePeer, TargetTypeHost, TargetTypeDomain: // OK case TargetTypeSubnet: if target.Host == "" { diff --git a/management/internals/modules/reverseproxy/service/service_test.go b/management/internals/modules/reverseproxy/service/service_test.go index 9daf729fe..3fe07b1d0 100644 --- a/management/internals/modules/reverseproxy/service/service_test.go +++ b/management/internals/modules/reverseproxy/service/service_test.go @@ -847,6 +847,32 @@ func TestValidate_TLSSubnetValid(t *testing.T) { require.NoError(t, rp.Validate()) } +func TestValidate_L4DomainTargetValid(t *testing.T) { + modes := []struct { + mode string + port uint16 + proto string + }{ + {"tcp", 5432, "tcp"}, + {"tls", 443, "tcp"}, + {"udp", 5432, "udp"}, + } + for _, m := range modes { + t.Run(m.mode, func(t *testing.T) { + rp := &Service{ + Name: m.mode + "-domain", + Mode: m.mode, + Domain: "cluster.test", + ListenPort: m.port, + Targets: []*Target{ + {TargetId: "resource-1", TargetType: TargetTypeDomain, Protocol: m.proto, Port: m.port, Enabled: true}, + }, + } + require.NoError(t, rp.Validate()) + }) + } +} + func TestValidate_HTTPProxyProtocolRejected(t *testing.T) { rp := validProxy() rp.Targets[0].ProxyProtocol = true From af8eaa23e234caa77172c1ed231411adb1b21865 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 18 Mar 2026 00:00:24 +0800 Subject: [PATCH 223/374] [client] Restart engine when peer IP address changes (#5614) --- client/internal/engine.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/client/internal/engine.go b/client/internal/engine.go index fd3bdf7af..e9c92471c 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -989,10 +989,11 @@ func (e *Engine) updateConfig(conf *mgmProto.PeerConfig) error { return errors.New("wireguard interface is not initialized") } - // Cannot update the IP address without restarting the engine because - // the firewall, route manager, and other components cache the old address if e.wgInterface.Address().String() != conf.Address { - log.Infof("peer IP address has changed from %s to %s", e.wgInterface.Address().String(), conf.Address) + log.Infof("peer IP address changed from %s to %s, restarting client", e.wgInterface.Address().String(), conf.Address) + _ = CtxGetState(e.ctx).Wrap(ErrResetConnection) + e.clientCancel() + return ErrResetConnection } if conf.GetSshConfig() != nil { From 212b34f639d8d711fd7a614cb1605d2b3ec70ffc Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 18 Mar 2026 11:15:56 +0800 Subject: [PATCH 224/374] [management] Add GET /reverse-proxies/clusters endpoint (#5611) --- .../modules/reverseproxy/proxy/manager.go | 1 + .../reverseproxy/proxy/manager/manager.go | 11 +++++ .../reverseproxy/proxy/manager_mock.go | 43 +++++++++++++------ .../modules/reverseproxy/proxy/proxy.go | 6 +++ .../modules/reverseproxy/service/interface.go | 3 ++ .../reverseproxy/service/interface_mock.go | 16 +++++++ .../reverseproxy/service/manager/api.go | 25 +++++++++++ .../reverseproxy/service/manager/manager.go | 13 ++++++ .../shared/grpc/proxy_group_access_test.go | 5 +++ .../shared/grpc/validate_session_test.go | 9 ++++ .../proxy/auth_callback_integration_test.go | 5 +++ management/server/store/sql_store.go | 18 ++++++++ management/server/store/store.go | 1 + management/server/store/store_mock.go | 15 +++++++ proxy/management_integration_test.go | 8 ++++ 15 files changed, 165 insertions(+), 14 deletions(-) diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go index 67a8e74fa..262c2af9b 100644 --- a/management/internals/modules/reverseproxy/proxy/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -15,6 +15,7 @@ type Manager interface { Disconnect(ctx context.Context, proxyID string) error Heartbeat(ctx context.Context, proxyID string) error GetActiveClusterAddresses(ctx context.Context) ([]string, error) + GetActiveClusters(ctx context.Context) ([]Cluster, error) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error } diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager.go b/management/internals/modules/reverseproxy/proxy/manager/manager.go index 4c0964b5c..6350b36bd 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager/manager.go @@ -15,6 +15,7 @@ type store interface { SaveProxy(ctx context.Context, p *proxy.Proxy) error UpdateProxyHeartbeat(ctx context.Context, proxyID string) error GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) + GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error } @@ -105,6 +106,16 @@ func (m Manager) GetActiveClusterAddresses(ctx context.Context) ([]string, error return addresses, nil } +// GetActiveClusters returns all active proxy clusters with their connected proxy count. +func (m Manager) GetActiveClusters(ctx context.Context) ([]proxy.Cluster, error) { + clusters, err := m.store.GetActiveProxyClusters(ctx) + if err != nil { + log.WithContext(ctx).Errorf("failed to get active proxy clusters: %v", err) + return nil, err + } + return clusters, nil +} + // CleanupStale removes proxies that haven't sent heartbeat in the specified duration func (m Manager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error { if err := m.store.CleanupStaleProxies(ctx, inactivityDuration); err != nil { diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go index b07a21122..e2dc4c2b6 100644 --- a/management/internals/modules/reverseproxy/proxy/manager_mock.go +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -93,6 +93,21 @@ func (mr *MockManagerMockRecorder) GetActiveClusterAddresses(ctx interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveClusterAddresses", reflect.TypeOf((*MockManager)(nil).GetActiveClusterAddresses), ctx) } +// GetActiveClusters mocks base method. +func (m *MockManager) GetActiveClusters(ctx context.Context) ([]Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveClusters", ctx) + ret0, _ := ret[0].([]Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveClusters indicates an expected call of GetActiveClusters. +func (mr *MockManagerMockRecorder) GetActiveClusters(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveClusters", reflect.TypeOf((*MockManager)(nil).GetActiveClusters), ctx) +} + // Heartbeat mocks base method. func (m *MockManager) Heartbeat(ctx context.Context, proxyID string) error { m.ctrl.T.Helper() @@ -130,20 +145,6 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { return m.recorder } -// GetOIDCValidationConfig mocks base method. -func (m *MockController) GetOIDCValidationConfig() OIDCValidationConfig { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOIDCValidationConfig") - ret0, _ := ret[0].(OIDCValidationConfig) - return ret0 -} - -// GetOIDCValidationConfig indicates an expected call of GetOIDCValidationConfig. -func (mr *MockControllerMockRecorder) GetOIDCValidationConfig() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOIDCValidationConfig", reflect.TypeOf((*MockController)(nil).GetOIDCValidationConfig)) -} - // ClusterSupportsCustomPorts mocks base method. func (m *MockController) ClusterSupportsCustomPorts(clusterAddr string) *bool { m.ctrl.T.Helper() @@ -158,6 +159,20 @@ func (mr *MockControllerMockRecorder) ClusterSupportsCustomPorts(clusterAddr int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCustomPorts", reflect.TypeOf((*MockController)(nil).ClusterSupportsCustomPorts), clusterAddr) } +// GetOIDCValidationConfig mocks base method. +func (m *MockController) GetOIDCValidationConfig() OIDCValidationConfig { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOIDCValidationConfig") + ret0, _ := ret[0].(OIDCValidationConfig) + return ret0 +} + +// GetOIDCValidationConfig indicates an expected call of GetOIDCValidationConfig. +func (mr *MockControllerMockRecorder) GetOIDCValidationConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOIDCValidationConfig", reflect.TypeOf((*MockController)(nil).GetOIDCValidationConfig)) +} + // GetProxiesForCluster mocks base method. func (m *MockController) GetProxiesForCluster(clusterAddr string) []string { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/proxy/proxy.go b/management/internals/modules/reverseproxy/proxy/proxy.go index 699e1ed02..671eb109f 100644 --- a/management/internals/modules/reverseproxy/proxy/proxy.go +++ b/management/internals/modules/reverseproxy/proxy/proxy.go @@ -18,3 +18,9 @@ type Proxy struct { func (Proxy) TableName() string { return "proxies" } + +// Cluster represents a group of proxy nodes serving the same address. +type Cluster struct { + Address string + ConnectedProxies int +} diff --git a/management/internals/modules/reverseproxy/service/interface.go b/management/internals/modules/reverseproxy/service/interface.go index 39fd7e3ae..a49cbea35 100644 --- a/management/internals/modules/reverseproxy/service/interface.go +++ b/management/internals/modules/reverseproxy/service/interface.go @@ -4,9 +4,12 @@ package service import ( "context" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" ) type Manager interface { + GetActiveClusters(ctx context.Context, accountID, userID string) ([]proxy.Cluster, error) GetAllServices(ctx context.Context, accountID, userID string) ([]*Service, error) GetService(ctx context.Context, accountID, userID, serviceID string) (*Service, error) CreateService(ctx context.Context, accountID, userID string, service *Service) (*Service, error) diff --git a/management/internals/modules/reverseproxy/service/interface_mock.go b/management/internals/modules/reverseproxy/service/interface_mock.go index bdc1f3e65..cc5ccbb8e 100644 --- a/management/internals/modules/reverseproxy/service/interface_mock.go +++ b/management/internals/modules/reverseproxy/service/interface_mock.go @@ -9,6 +9,7 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + proxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" ) // MockManager is a mock of Manager interface. @@ -107,6 +108,21 @@ func (mr *MockManagerMockRecorder) GetAccountServices(ctx, accountID interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountServices", reflect.TypeOf((*MockManager)(nil).GetAccountServices), ctx, accountID) } +// GetActiveClusters mocks base method. +func (m *MockManager) GetActiveClusters(ctx context.Context, accountID, userID string) ([]proxy.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveClusters", ctx, accountID, userID) + ret0, _ := ret[0].([]proxy.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveClusters indicates an expected call of GetActiveClusters. +func (mr *MockManagerMockRecorder) GetActiveClusters(ctx, accountID, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveClusters", reflect.TypeOf((*MockManager)(nil).GetActiveClusters), ctx, accountID, userID) +} + // GetAllServices mocks base method. func (m *MockManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*Service, error) { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/service/manager/api.go b/management/internals/modules/reverseproxy/service/manager/api.go index c53219d2e..cd81efa88 100644 --- a/management/internals/modules/reverseproxy/service/manager/api.go +++ b/management/internals/modules/reverseproxy/service/manager/api.go @@ -34,6 +34,7 @@ func RegisterEndpoints(manager rpservice.Manager, domainManager domainmanager.Ma accesslogsmanager.RegisterEndpoints(router, accessLogsManager) + router.HandleFunc("/reverse-proxies/clusters", h.getClusters).Methods("GET", "OPTIONS") router.HandleFunc("/reverse-proxies/services", h.getAllServices).Methods("GET", "OPTIONS") router.HandleFunc("/reverse-proxies/services", h.createService).Methods("POST", "OPTIONS") router.HandleFunc("/reverse-proxies/services/{serviceId}", h.getService).Methods("GET", "OPTIONS") @@ -177,3 +178,27 @@ func (h *handler) deleteService(w http.ResponseWriter, r *http.Request) { util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) } + +func (h *handler) getClusters(w http.ResponseWriter, r *http.Request) { + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + clusters, err := h.manager.GetActiveClusters(r.Context(), userAuth.AccountId, userAuth.UserId) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + apiClusters := make([]api.ProxyCluster, 0, len(clusters)) + for _, c := range clusters { + apiClusters = append(apiClusters, api.ProxyCluster{ + Address: c.Address, + ConnectedProxies: c.ConnectedProxies, + }) + } + + util.WriteJSONObject(r.Context(), w, apiClusters) +} diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index 2251f5084..a7173a131 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -102,6 +102,19 @@ func (m *Manager) StartExposeReaper(ctx context.Context) { m.exposeReaper.StartExposeReaper(ctx) } +// GetActiveClusters returns all active proxy clusters with their connected proxy count. +func (m *Manager) GetActiveClusters(ctx context.Context, accountID, userID string) ([]proxy.Cluster, error) { + ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !ok { + return nil, status.NewPermissionDeniedError() + } + + return m.store.GetActiveProxyClusters(ctx) +} + func (m *Manager) GetAllServices(ctx context.Context, accountID, userID string) ([]*service.Service, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) if err != nil { diff --git a/management/internals/shared/grpc/proxy_group_access_test.go b/management/internals/shared/grpc/proxy_group_access_test.go index 22fe4506b..0fa9a0dc1 100644 --- a/management/internals/shared/grpc/proxy_group_access_test.go +++ b/management/internals/shared/grpc/proxy_group_access_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/server/types" ) @@ -90,6 +91,10 @@ func (m *mockReverseProxyManager) StopServiceFromPeer(_ context.Context, _, _, _ func (m *mockReverseProxyManager) StartExposeReaper(_ context.Context) {} +func (m *mockReverseProxyManager) GetActiveClusters(_ context.Context, _, _ string) ([]proxy.Cluster, error) { + return nil, nil +} + type mockUsersManager struct { users map[string]*types.User err error diff --git a/management/internals/shared/grpc/validate_session_test.go b/management/internals/shared/grpc/validate_session_test.go index 647e8443b..2f77de86e 100644 --- a/management/internals/shared/grpc/validate_session_test.go +++ b/management/internals/shared/grpc/validate_session_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey" "github.com/netbirdio/netbird/management/server/store" @@ -320,6 +321,10 @@ func (m *testValidateSessionServiceManager) StopServiceFromPeer(_ context.Contex func (m *testValidateSessionServiceManager) StartExposeReaper(_ context.Context) {} +func (m *testValidateSessionServiceManager) GetActiveClusters(_ context.Context, _, _ string) ([]proxy.Cluster, error) { + return nil, nil +} + type testValidateSessionProxyManager struct{} func (m *testValidateSessionProxyManager) Connect(_ context.Context, _, _, _ string) error { @@ -338,6 +343,10 @@ func (m *testValidateSessionProxyManager) GetActiveClusterAddresses(_ context.Co return nil, nil } +func (m *testValidateSessionProxyManager) GetActiveClusters(_ context.Context) ([]proxy.Cluster, error) { + return nil, nil +} + func (m *testValidateSessionProxyManager) CleanupStale(_ context.Context, _ time.Duration) error { return nil } diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index 3bed54e80..922bf4352 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs" + nbproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/store" @@ -433,6 +434,10 @@ func (m *testServiceManager) StopServiceFromPeer(_ context.Context, _, _, _ stri func (m *testServiceManager) StartExposeReaper(_ context.Context) {} +func (m *testServiceManager) GetActiveClusters(_ context.Context, _, _ string) ([]nbproxy.Cluster, error) { + return nil, nil +} + func createTestState(t *testing.T, ps *nbgrpc.ProxyServiceServer, redirectURL string) string { t.Helper() diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index b3fbfe141..32f2f8540 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -5440,6 +5440,24 @@ func (s *SqlStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string return addresses, nil } +// GetActiveProxyClusters returns all active proxy clusters with their connected proxy count. +func (s *SqlStore) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) { + var clusters []proxy.Cluster + + result := s.db.Model(&proxy.Proxy{}). + Select("cluster_address as address, COUNT(*) as connected_proxies"). + Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-2*time.Minute)). + Group("cluster_address"). + Scan(&clusters) + + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get active proxy clusters: %v", result.Error) + return nil, status.Errorf(status.Internal, "get active proxy clusters") + } + + return clusters, nil +} + // CleanupStaleProxies deletes proxies that haven't sent heartbeat in the specified duration func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error { cutoffTime := time.Now().Add(-inactivityDuration) diff --git a/management/server/store/store.go b/management/server/store/store.go index 8bb52f38a..5dbfbd177 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -286,6 +286,7 @@ type Store interface { SaveProxy(ctx context.Context, proxy *proxy.Proxy) error UpdateProxyHeartbeat(ctx context.Context, proxyID string) error GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) + GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index e75e35b94..05a6fe39f 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -1287,6 +1287,21 @@ func (mr *MockStoreMockRecorder) GetActiveProxyClusterAddresses(ctx interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveProxyClusterAddresses", reflect.TypeOf((*MockStore)(nil).GetActiveProxyClusterAddresses), ctx) } +// GetActiveProxyClusters mocks base method. +func (m *MockStore) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveProxyClusters", ctx) + ret0, _ := ret[0].([]proxy.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveProxyClusters indicates an expected call of GetActiveProxyClusters. +func (mr *MockStoreMockRecorder) GetActiveProxyClusters(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveProxyClusters", reflect.TypeOf((*MockStore)(nil).GetActiveProxyClusters), ctx) +} + // GetAllAccounts mocks base method. func (m *MockStore) GetAllAccounts(ctx context.Context) []*types2.Account { m.ctrl.T.Helper() diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 8af151446..2fcbfe3cf 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -216,6 +216,10 @@ func (m *testProxyManager) GetActiveClusterAddresses(_ context.Context) ([]strin return nil, nil } +func (m *testProxyManager) GetActiveClusters(_ context.Context) ([]nbproxy.Cluster, error) { + return nil, nil +} + func (m *testProxyManager) CleanupStale(_ context.Context, _ time.Duration) error { return nil } @@ -323,6 +327,10 @@ func (m *storeBackedServiceManager) StopServiceFromPeer(_ context.Context, _, _, func (m *storeBackedServiceManager) StartExposeReaper(_ context.Context) {} +func (m *storeBackedServiceManager) GetActiveClusters(_ context.Context, _, _ string) ([]nbproxy.Cluster, error) { + return nil, nil +} + func strPtr(s string) *string { return &s } From a1858a9cb7fca7a18047b54f32e9b978342b29a6 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 18 Mar 2026 11:48:38 +0100 Subject: [PATCH 225/374] [management] recover proxies after cleanup if heartbeat is still running (#5617) --- .../modules/reverseproxy/proxy/manager.go | 2 +- .../reverseproxy/proxy/manager/manager.go | 8 +++--- .../reverseproxy/proxy/manager_mock.go | 8 +++--- management/internals/shared/grpc/proxy.go | 8 +++--- management/server/store/sql_store.go | 25 ++++++++++++++++--- management/server/store/store.go | 2 +- management/server/store/store_mock.go | 8 +++--- proxy/management_integration_test.go | 2 +- 8 files changed, 41 insertions(+), 22 deletions(-) diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go index 262c2af9b..5b13cb0a2 100644 --- a/management/internals/modules/reverseproxy/proxy/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -13,7 +13,7 @@ import ( type Manager interface { Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error Disconnect(ctx context.Context, proxyID string) error - Heartbeat(ctx context.Context, proxyID string) error + Heartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error GetActiveClusterAddresses(ctx context.Context) ([]string, error) GetActiveClusters(ctx context.Context) ([]Cluster, error) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager.go b/management/internals/modules/reverseproxy/proxy/manager/manager.go index 6350b36bd..dac6d3ce3 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager/manager.go @@ -13,7 +13,7 @@ import ( // store defines the interface for proxy persistence operations type store interface { SaveProxy(ctx context.Context, p *proxy.Proxy) error - UpdateProxyHeartbeat(ctx context.Context, proxyID string) error + UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error @@ -87,11 +87,13 @@ func (m Manager) Disconnect(ctx context.Context, proxyID string) error { } // Heartbeat updates the proxy's last seen timestamp -func (m Manager) Heartbeat(ctx context.Context, proxyID string) error { - if err := m.store.UpdateProxyHeartbeat(ctx, proxyID); err != nil { +func (m Manager) Heartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { + if err := m.store.UpdateProxyHeartbeat(ctx, proxyID, clusterAddress, ipAddress); err != nil { log.WithContext(ctx).Debugf("failed to update proxy %s heartbeat: %v", proxyID, err) return err } + + log.WithContext(ctx).Tracef("updated heartbeat for proxy %s", proxyID) m.metrics.IncrementProxyHeartbeatCount() return nil } diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go index e2dc4c2b6..ec67aaedc 100644 --- a/management/internals/modules/reverseproxy/proxy/manager_mock.go +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -109,17 +109,17 @@ func (mr *MockManagerMockRecorder) GetActiveClusters(ctx interface{}) *gomock.Ca } // Heartbeat mocks base method. -func (m *MockManager) Heartbeat(ctx context.Context, proxyID string) error { +func (m *MockManager) Heartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Heartbeat", ctx, proxyID) + ret := m.ctrl.Call(m, "Heartbeat", ctx, proxyID, clusterAddress, ipAddress) ret0, _ := ret[0].(error) return ret0 } // Heartbeat indicates an expected call of Heartbeat. -func (mr *MockManagerMockRecorder) Heartbeat(ctx, proxyID interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Heartbeat(ctx, proxyID, clusterAddress, ipAddress interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Heartbeat", reflect.TypeOf((*MockManager)(nil).Heartbeat), ctx, proxyID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Heartbeat", reflect.TypeOf((*MockManager)(nil).Heartbeat), ctx, proxyID, clusterAddress, ipAddress) } // MockController is a mock of Controller interface. diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index fd993fb40..01c52b138 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -123,7 +123,7 @@ func (s *ProxyServiceServer) cleanupStaleProxies(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - if err := s.proxyManager.CleanupStale(ctx, 10*time.Minute); err != nil { + if err := s.proxyManager.CleanupStale(ctx, 1*time.Hour); err != nil { log.WithContext(ctx).Debugf("Failed to cleanup stale proxies: %v", err) } } @@ -215,7 +215,7 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest go s.sender(conn, errChan) // Start heartbeat goroutine - go s.heartbeat(connCtx, proxyID) + go s.heartbeat(connCtx, proxyID, proxyAddress, peerInfo) select { case err := <-errChan: @@ -226,14 +226,14 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest } // heartbeat updates the proxy's last_seen timestamp every minute -func (s *ProxyServiceServer) heartbeat(ctx context.Context, proxyID string) { +func (s *ProxyServiceServer) heartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) { ticker := time.NewTicker(1 * time.Minute) defer ticker.Stop() for { select { case <-ticker.C: - if err := s.proxyManager.Heartbeat(ctx, proxyID); err != nil { + if err := s.proxyManager.Heartbeat(ctx, proxyID, clusterAddress, ipAddress); err != nil { log.WithContext(ctx).Debugf("Failed to update proxy %s heartbeat: %v", proxyID, err) } case <-ctx.Done(): diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 32f2f8540..2e499dc74 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -4997,7 +4997,6 @@ func (s *SqlStore) GetServiceByDomain(ctx context.Context, domain string) (*rpse return service, nil } - func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) { tx := s.db.Preload("Targets") if lockStrength != LockingStrengthNone { @@ -5408,17 +5407,35 @@ func (s *SqlStore) SaveProxy(ctx context.Context, p *proxy.Proxy) error { return nil } -// UpdateProxyHeartbeat updates the last_seen timestamp for a proxy -func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID string) error { +// UpdateProxyHeartbeat updates the last_seen timestamp for a proxy or creates a new entry if it doesn't exist +func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { + now := time.Now() + result := s.db.WithContext(ctx). Model(&proxy.Proxy{}). Where("id = ? AND status = ?", proxyID, "connected"). - Update("last_seen", time.Now()) + Update("last_seen", now) if result.Error != nil { log.WithContext(ctx).Errorf("failed to update proxy heartbeat: %v", result.Error) return status.Errorf(status.Internal, "failed to update proxy heartbeat") } + + if result.RowsAffected == 0 { + p := &proxy.Proxy{ + ID: proxyID, + ClusterAddress: clusterAddress, + IPAddress: ipAddress, + LastSeen: now, + ConnectedAt: &now, + Status: "connected", + } + if err := s.db.WithContext(ctx).Save(p).Error; err != nil { + log.WithContext(ctx).Errorf("failed to create proxy on heartbeat: %v", err) + return status.Errorf(status.Internal, "failed to create proxy on heartbeat") + } + } + return nil } diff --git a/management/server/store/store.go b/management/server/store/store.go index 5dbfbd177..816dff4fa 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -284,7 +284,7 @@ type Store interface { DeleteServiceTargets(ctx context.Context, accountID string, serviceID string) error SaveProxy(ctx context.Context, proxy *proxy.Proxy) error - UpdateProxyHeartbeat(ctx context.Context, proxyID string) error + UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 05a6fe39f..d779a7bcd 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -2939,17 +2939,17 @@ func (mr *MockStoreMockRecorder) UpdateGroups(ctx, accountID, groups interface{} } // UpdateProxyHeartbeat mocks base method. -func (m *MockStore) UpdateProxyHeartbeat(ctx context.Context, proxyID string) error { +func (m *MockStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateProxyHeartbeat", ctx, proxyID) + ret := m.ctrl.Call(m, "UpdateProxyHeartbeat", ctx, proxyID, clusterAddress, ipAddress) ret0, _ := ret[0].(error) return ret0 } // UpdateProxyHeartbeat indicates an expected call of UpdateProxyHeartbeat. -func (mr *MockStoreMockRecorder) UpdateProxyHeartbeat(ctx, proxyID interface{}) *gomock.Call { +func (mr *MockStoreMockRecorder) UpdateProxyHeartbeat(ctx, proxyID, clusterAddress, ipAddress interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProxyHeartbeat", reflect.TypeOf((*MockStore)(nil).UpdateProxyHeartbeat), ctx, proxyID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProxyHeartbeat", reflect.TypeOf((*MockStore)(nil).UpdateProxyHeartbeat), ctx, proxyID, clusterAddress, ipAddress) } // UpdateService mocks base method. diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 2fcbfe3cf..b3f0b2989 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -208,7 +208,7 @@ func (m *testProxyManager) Disconnect(_ context.Context, _ string) error { return nil } -func (m *testProxyManager) Heartbeat(_ context.Context, _ string) error { +func (m *testProxyManager) Heartbeat(_ context.Context, _, _, _ string) error { return nil } From 5ffaa5cdd622c8def65134d3f593e4d7d23e3bc5 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 19 Mar 2026 22:53:05 +0800 Subject: [PATCH 226/374] [client] Fix duplicate log lines in containers (#5609) --- client/cmd/service_controller.go | 2 +- util/log.go | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/client/cmd/service_controller.go b/client/cmd/service_controller.go index 0545ce6b7..5fe318ddf 100644 --- a/client/cmd/service_controller.go +++ b/client/cmd/service_controller.go @@ -103,7 +103,7 @@ func (p *program) Stop(srv service.Service) error { // Common setup for service control commands func setupServiceControlCommand(cmd *cobra.Command, ctx context.Context, cancel context.CancelFunc) (service.Service, error) { - SetFlagsFromEnvVars(rootCmd) + // rootCmd env vars are already applied by PersistentPreRunE. SetFlagsFromEnvVars(serviceCmd) cmd.SetOut(cmd.OutOrStdout()) diff --git a/util/log.go b/util/log.go index 03547024a..b1de2d999 100644 --- a/util/log.go +++ b/util/log.go @@ -43,7 +43,13 @@ func InitLogger(logger *log.Logger, logLevel string, logs ...string) error { var writers []io.Writer logFmt := os.Getenv("NB_LOG_FORMAT") + seen := make(map[string]bool, len(logs)) for _, logPath := range logs { + if seen[logPath] { + continue + } + seen[logPath] = true + switch logPath { case LogSyslog: AddSyslogHookToLogger(logger) From b9462f5c6bd7a19165b263d7aa70e92fb7c191b5 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 20 Mar 2026 00:33:38 +0800 Subject: [PATCH 227/374] [client] Make raw table initialization non-fatal in firewall managers (#5621) --- client/firewall/iptables/manager_linux.go | 18 ++++++++++++++---- client/firewall/nftables/manager_linux.go | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index 716385705..04c338375 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -23,9 +23,10 @@ type Manager struct { wgIface iFaceMapper - ipv4Client *iptables.IPTables - aclMgr *aclManager - router *router + ipv4Client *iptables.IPTables + aclMgr *aclManager + router *router + rawSupported bool } // iFaceMapper defines subset methods of interface required for manager @@ -84,7 +85,7 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { } if err := m.initNoTrackChain(); err != nil { - return fmt.Errorf("init notrack chain: %w", err) + log.Warnf("raw table not available, notrack rules will be disabled: %v", err) } // persist early to ensure cleanup of chains @@ -318,6 +319,10 @@ func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { m.mutex.Lock() defer m.mutex.Unlock() + if !m.rawSupported { + return fmt.Errorf("raw table not available") + } + wgPortStr := fmt.Sprintf("%d", wgPort) proxyPortStr := fmt.Sprintf("%d", proxyPort) @@ -375,12 +380,16 @@ func (m *Manager) initNoTrackChain() error { return fmt.Errorf("add prerouting jump rule: %w", err) } + m.rawSupported = true return nil } func (m *Manager) cleanupNoTrackChain() error { exists, err := m.ipv4Client.ChainExists(tableRaw, chainNameRaw) if err != nil { + if !m.rawSupported { + return nil + } return fmt.Errorf("check chain exists: %w", err) } if !exists { @@ -401,6 +410,7 @@ func (m *Manager) cleanupNoTrackChain() error { return fmt.Errorf("clear and delete chain: %w", err) } + m.rawSupported = false return nil } diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index acf482f86..f57b28abc 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -95,7 +95,7 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { } if err := m.initNoTrackChains(workTable); err != nil { - return fmt.Errorf("init notrack chains: %w", err) + log.Warnf("raw priority chains not available, notrack rules will be disabled: %v", err) } stateManager.RegisterState(&ShutdownState{}) From ab775089504d5254a5236c2a15d2a749476ad91f Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 20 Mar 2026 00:33:50 +0800 Subject: [PATCH 228/374] [client] Add env var for management gRPC max receive message size (#5622) --- client/grpc/dialer.go | 11 ++-- shared/management/client/grpc.go | 36 +++++++++- shared/management/client/grpc_test.go | 95 +++++++++++++++++++++++++++ 3 files changed, 136 insertions(+), 6 deletions(-) create mode 100644 shared/management/client/grpc_test.go diff --git a/client/grpc/dialer.go b/client/grpc/dialer.go index 54966b50e..9a6bc0670 100644 --- a/client/grpc/dialer.go +++ b/client/grpc/dialer.go @@ -28,7 +28,7 @@ func Backoff(ctx context.Context) backoff.BackOff { // CreateConnection creates a gRPC client connection with the appropriate transport options. // The component parameter specifies the WebSocket proxy component path (e.g., "/management", "/signal"). -func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, component string) (*grpc.ClientConn, error) { +func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, component string, extraOpts ...grpc.DialOption) (*grpc.ClientConn, error) { transportOption := grpc.WithTransportCredentials(insecure.NewCredentials()) // for js, the outer websocket layer takes care of tls if tlsEnabled && runtime.GOOS != "js" { @@ -46,9 +46,7 @@ func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, compone connCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - conn, err := grpc.DialContext( - connCtx, - addr, + opts := []grpc.DialOption{ transportOption, WithCustomDialer(tlsEnabled, component), grpc.WithBlock(), @@ -56,7 +54,10 @@ func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, compone Time: 30 * time.Second, Timeout: 10 * time.Second, }), - ) + } + opts = append(opts, extraOpts...) + + conn, err := grpc.DialContext(connCtx, addr, opts...) if err != nil { return nil, fmt.Errorf("dial context: %w", err) } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 333f0bf00..e95db0089 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "io" + "os" + "strconv" "sync" "time" @@ -29,6 +31,10 @@ import ( const ConnectTimeout = 10 * time.Second const ( + // EnvMaxRecvMsgSize overrides the default gRPC max receive message size (4 MB) + // for the management client connection. Value is in bytes. + EnvMaxRecvMsgSize = "NB_MANAGEMENT_GRPC_MAX_MSG_SIZE" + errMsgMgmtPublicKey = "failed getting Management Service public key: %s" errMsgNoMgmtConnection = "no connection to management" ) @@ -66,13 +72,41 @@ type ExposeResponse struct { PortAutoAssigned bool } +// MaxRecvMsgSize returns the configured max gRPC receive message size from +// the environment, or 0 if unset (which uses the gRPC default of 4 MB). +func MaxRecvMsgSize() int { + val := os.Getenv(EnvMaxRecvMsgSize) + if val == "" { + return 0 + } + + size, err := strconv.Atoi(val) + if err != nil { + log.Warnf("invalid %s value %q, using default: %v", EnvMaxRecvMsgSize, val, err) + return 0 + } + + if size <= 0 { + log.Warnf("invalid %s value %d, must be positive, using default", EnvMaxRecvMsgSize, size) + return 0 + } + + return size +} + // NewClient creates a new client to Management service func NewClient(ctx context.Context, addr string, ourPrivateKey wgtypes.Key, tlsEnabled bool) (*GrpcClient, error) { var conn *grpc.ClientConn + var extraOpts []grpc.DialOption + if maxSize := MaxRecvMsgSize(); maxSize > 0 { + extraOpts = append(extraOpts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxSize))) + log.Infof("management gRPC max receive message size set to %d bytes", maxSize) + } + operation := func() error { var err error - conn, err = nbgrpc.CreateConnection(ctx, addr, tlsEnabled, wsproxy.ManagementComponent) + conn, err = nbgrpc.CreateConnection(ctx, addr, tlsEnabled, wsproxy.ManagementComponent, extraOpts...) if err != nil { return fmt.Errorf("create connection: %w", err) } diff --git a/shared/management/client/grpc_test.go b/shared/management/client/grpc_test.go new file mode 100644 index 000000000..462cc43af --- /dev/null +++ b/shared/management/client/grpc_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "context" + "net" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + mgmtProto "github.com/netbirdio/netbird/shared/management/proto" +) + +func TestMaxRecvMsgSize(t *testing.T) { + tests := []struct { + name string + envValue string + expected int + }{ + {name: "unset returns 0", envValue: "", expected: 0}, + {name: "valid value", envValue: "10485760", expected: 10485760}, + {name: "non-numeric returns 0", envValue: "abc", expected: 0}, + {name: "negative returns 0", envValue: "-1", expected: 0}, + {name: "zero returns 0", envValue: "0", expected: 0}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Setenv(EnvMaxRecvMsgSize, tt.envValue) + if tt.envValue == "" { + os.Unsetenv(EnvMaxRecvMsgSize) + } + assert.Equal(t, tt.expected, MaxRecvMsgSize()) + }) + } +} + +// largeSyncServer implements just the Sync RPC, returning a response larger than the default 4MB limit. +type largeSyncServer struct { + mgmtProto.UnimplementedManagementServiceServer + responseSize int +} + +func (s *largeSyncServer) GetServerKey(_ context.Context, _ *mgmtProto.Empty) (*mgmtProto.ServerKeyResponse, error) { + // Return a response with a large WiretrusteeConfig to exceed the default limit. + padding := strings.Repeat("x", s.responseSize) + return &mgmtProto.ServerKeyResponse{ + Key: padding, + }, nil +} + +func TestMaxRecvMsgSizeIntegration(t *testing.T) { + const payloadSize = 5 * 1024 * 1024 // 5MB, exceeds 4MB default + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv := grpc.NewServer() + mgmtProto.RegisterManagementServiceServer(srv, &largeSyncServer{responseSize: payloadSize}) + go func() { _ = srv.Serve(lis) }() + t.Cleanup(srv.Stop) + + t.Run("default limit rejects large message", func(t *testing.T) { + conn, err := grpc.NewClient( + lis.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + require.NoError(t, err) + defer conn.Close() + + client := mgmtProto.NewManagementServiceClient(conn) + _, err = client.GetServerKey(context.Background(), &mgmtProto.Empty{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "received message larger than max") + }) + + t.Run("increased limit accepts large message", func(t *testing.T) { + conn, err := grpc.NewClient( + lis.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(10*1024*1024)), + ) + require.NoError(t, err) + defer conn.Close() + + client := mgmtProto.NewManagementServiceClient(conn) + resp, err := client.GetServerKey(context.Background(), &mgmtProto.Empty{}) + require.NoError(t, err) + assert.Len(t, resp.Key, payloadSize) + }) +} From b550a2faced7e9fa8e70778eb861eb8afd48cdf6 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 20 Mar 2026 18:29:50 +0800 Subject: [PATCH 229/374] [management, proxy] Add require_subdomain capability for proxy clusters (#5628) --- .../modules/reverseproxy/domain/domain.go | 3 + .../reverseproxy/domain/manager/api.go | 1 + .../domain/manager/domain_test.go | 172 ++++++++++++++++++ .../reverseproxy/domain/manager/manager.go | 20 +- .../modules/reverseproxy/proxy/manager.go | 1 + .../reverseproxy/proxy/manager/controller.go | 6 + .../reverseproxy/proxy/manager_mock.go | 14 ++ .../service/manager/l4_port_test.go | 1 + .../reverseproxy/service/manager/manager.go | 110 ++++++----- .../service/manager/manager_test.go | 66 +++++++ management/internals/shared/grpc/proxy.go | 29 +++ .../internals/shared/grpc/proxy_test.go | 4 + proxy/cmd/proxy/cmd/root.go | 3 + proxy/management_integration_test.go | 4 + proxy/server.go | 5 + shared/management/http/api/openapi.yml | 4 + shared/management/http/api/types.gen.go | 3 + shared/management/proto/proxy_service.pb.go | 22 ++- shared/management/proto/proxy_service.proto | 3 + 19 files changed, 419 insertions(+), 52 deletions(-) create mode 100644 management/internals/modules/reverseproxy/domain/manager/domain_test.go diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go index 861d026a7..859f1c5b2 100644 --- a/management/internals/modules/reverseproxy/domain/domain.go +++ b/management/internals/modules/reverseproxy/domain/domain.go @@ -17,6 +17,9 @@ type Domain struct { // SupportsCustomPorts is populated at query time for free domains from the // proxy cluster capabilities. Not persisted. SupportsCustomPorts *bool `gorm:"-"` + // RequireSubdomain is populated at query time. When true, the domain + // cannot be used bare and a subdomain label must be prepended. Not persisted. + RequireSubdomain *bool `gorm:"-"` } // EventMeta returns activity event metadata for a domain diff --git a/management/internals/modules/reverseproxy/domain/manager/api.go b/management/internals/modules/reverseproxy/domain/manager/api.go index d26a6a418..640ab28a5 100644 --- a/management/internals/modules/reverseproxy/domain/manager/api.go +++ b/management/internals/modules/reverseproxy/domain/manager/api.go @@ -47,6 +47,7 @@ func domainToApi(d *domain.Domain) api.ReverseProxyDomain { Type: domainTypeToApi(d.Type), Validated: d.Validated, SupportsCustomPorts: d.SupportsCustomPorts, + RequireSubdomain: d.RequireSubdomain, } if d.TargetCluster != "" { resp.TargetCluster = &d.TargetCluster diff --git a/management/internals/modules/reverseproxy/domain/manager/domain_test.go b/management/internals/modules/reverseproxy/domain/manager/domain_test.go new file mode 100644 index 000000000..523920a99 --- /dev/null +++ b/management/internals/modules/reverseproxy/domain/manager/domain_test.go @@ -0,0 +1,172 @@ +package manager + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" +) + +func TestExtractClusterFromFreeDomain(t *testing.T) { + clusters := []string{"eu1.proxy.netbird.io", "us1.proxy.netbird.io"} + + tests := []struct { + name string + domain string + wantOK bool + wantVal string + }{ + { + name: "subdomain of cluster matches", + domain: "myapp.eu1.proxy.netbird.io", + wantOK: true, + wantVal: "eu1.proxy.netbird.io", + }, + { + name: "deep subdomain of cluster matches", + domain: "foo.bar.eu1.proxy.netbird.io", + wantOK: true, + wantVal: "eu1.proxy.netbird.io", + }, + { + name: "bare cluster domain matches", + domain: "eu1.proxy.netbird.io", + wantOK: true, + wantVal: "eu1.proxy.netbird.io", + }, + { + name: "unrelated domain does not match", + domain: "example.com", + wantOK: false, + }, + { + name: "partial suffix does not match", + domain: "fakeu1.proxy.netbird.io", + wantOK: false, + }, + { + name: "second cluster matches", + domain: "app.us1.proxy.netbird.io", + wantOK: true, + wantVal: "us1.proxy.netbird.io", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cluster, ok := ExtractClusterFromFreeDomain(tc.domain, clusters) + assert.Equal(t, tc.wantOK, ok) + if ok { + assert.Equal(t, tc.wantVal, cluster) + } + }) + } +} + +func TestExtractClusterFromCustomDomains(t *testing.T) { + customDomains := []*domain.Domain{ + {Domain: "example.com", TargetCluster: "eu1.proxy.netbird.io"}, + {Domain: "proxy.corp.io", TargetCluster: "us1.proxy.netbird.io"}, + } + + tests := []struct { + name string + domain string + wantOK bool + wantVal string + }{ + { + name: "subdomain of custom domain matches", + domain: "app.example.com", + wantOK: true, + wantVal: "eu1.proxy.netbird.io", + }, + { + name: "bare custom domain matches", + domain: "example.com", + wantOK: true, + wantVal: "eu1.proxy.netbird.io", + }, + { + name: "deep subdomain of custom domain matches", + domain: "a.b.example.com", + wantOK: true, + wantVal: "eu1.proxy.netbird.io", + }, + { + name: "subdomain of multi-level custom domain matches", + domain: "app.proxy.corp.io", + wantOK: true, + wantVal: "us1.proxy.netbird.io", + }, + { + name: "bare multi-level custom domain matches", + domain: "proxy.corp.io", + wantOK: true, + wantVal: "us1.proxy.netbird.io", + }, + { + name: "unrelated domain does not match", + domain: "other.com", + wantOK: false, + }, + { + name: "partial suffix does not match custom domain", + domain: "fakeexample.com", + wantOK: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cluster, ok := extractClusterFromCustomDomains(tc.domain, customDomains) + assert.Equal(t, tc.wantOK, ok) + if ok { + assert.Equal(t, tc.wantVal, cluster) + } + }) + } +} + +func TestExtractClusterFromCustomDomains_OverlappingDomains(t *testing.T) { + customDomains := []*domain.Domain{ + {Domain: "example.com", TargetCluster: "cluster-generic"}, + {Domain: "app.example.com", TargetCluster: "cluster-app"}, + } + + tests := []struct { + name string + domain string + wantVal string + }{ + { + name: "exact match on more specific domain", + domain: "app.example.com", + wantVal: "cluster-app", + }, + { + name: "subdomain of more specific domain", + domain: "api.app.example.com", + wantVal: "cluster-app", + }, + { + name: "subdomain of generic domain", + domain: "other.example.com", + wantVal: "cluster-generic", + }, + { + name: "bare generic domain", + domain: "example.com", + wantVal: "cluster-generic", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cluster, ok := extractClusterFromCustomDomains(tc.domain, customDomains) + assert.True(t, ok) + assert.Equal(t, tc.wantVal, cluster) + }) + } +} diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index 813027ea2..901cdf0e3 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -35,6 +35,7 @@ type proxyManager interface { type clusterCapabilities interface { ClusterSupportsCustomPorts(clusterAddr string) *bool + ClusterRequireSubdomain(clusterAddr string) *bool } type Manager struct { @@ -98,6 +99,7 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d } if m.clusterCapabilities != nil { d.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(cluster) + d.RequireSubdomain = m.clusterCapabilities.ClusterRequireSubdomain(cluster) } ret = append(ret, d) } @@ -115,6 +117,8 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d if m.clusterCapabilities != nil && d.TargetCluster != "" { cd.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(d.TargetCluster) } + // Custom domains never require a subdomain by default since + // the account owns them and should be able to use the bare domain. ret = append(ret, cd) } @@ -302,13 +306,19 @@ func (m Manager) DeriveClusterFromDomain(ctx context.Context, accountID, domain return "", fmt.Errorf("domain %s does not match any available proxy cluster", domain) } -func extractClusterFromCustomDomains(domain string, customDomains []*domain.Domain) (string, bool) { - for _, customDomain := range customDomains { - if strings.HasSuffix(domain, "."+customDomain.Domain) { - return customDomain.TargetCluster, true +func extractClusterFromCustomDomains(serviceDomain string, customDomains []*domain.Domain) (string, bool) { + bestCluster := "" + bestLen := -1 + for _, cd := range customDomains { + if serviceDomain != cd.Domain && !strings.HasSuffix(serviceDomain, "."+cd.Domain) { + continue + } + if l := len(cd.Domain); l > bestLen { + bestLen = l + bestCluster = cd.TargetCluster } } - return "", false + return bestCluster, bestLen >= 0 } // ExtractClusterFromFreeDomain extracts the cluster address from a free domain. diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go index 5b13cb0a2..9b0de53b4 100644 --- a/management/internals/modules/reverseproxy/proxy/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -35,4 +35,5 @@ type Controller interface { UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error GetProxiesForCluster(clusterAddr string) []string ClusterSupportsCustomPorts(clusterAddr string) *bool + ClusterRequireSubdomain(clusterAddr string) *bool } diff --git a/management/internals/modules/reverseproxy/proxy/manager/controller.go b/management/internals/modules/reverseproxy/proxy/manager/controller.go index acb49c45b..05a0c9048 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/controller.go +++ b/management/internals/modules/reverseproxy/proxy/manager/controller.go @@ -77,6 +77,12 @@ func (c *GRPCController) ClusterSupportsCustomPorts(clusterAddr string) *bool { return c.proxyGRPCServer.ClusterSupportsCustomPorts(clusterAddr) } +// ClusterRequireSubdomain returns whether the cluster requires a subdomain label. +// Returns nil when no proxy has reported the capability (defaults to false). +func (c *GRPCController) ClusterRequireSubdomain(clusterAddr string) *bool { + return c.proxyGRPCServer.ClusterRequireSubdomain(clusterAddr) +} + // GetProxiesForCluster returns all proxy IDs registered for a specific cluster. func (c *GRPCController) GetProxiesForCluster(clusterAddr string) []string { proxySet, ok := c.clusterProxies.Load(clusterAddr) diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go index ec67aaedc..da3df12a2 100644 --- a/management/internals/modules/reverseproxy/proxy/manager_mock.go +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -159,6 +159,20 @@ func (mr *MockControllerMockRecorder) ClusterSupportsCustomPorts(clusterAddr int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCustomPorts", reflect.TypeOf((*MockController)(nil).ClusterSupportsCustomPorts), clusterAddr) } +// ClusterRequireSubdomain mocks base method. +func (m *MockController) ClusterRequireSubdomain(clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterRequireSubdomain", clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// ClusterRequireSubdomain indicates an expected call of ClusterRequireSubdomain. +func (mr *MockControllerMockRecorder) ClusterRequireSubdomain(clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRequireSubdomain", reflect.TypeOf((*MockController)(nil).ClusterRequireSubdomain), clusterAddr) +} + // GetOIDCValidationConfig mocks base method. func (m *MockController) GetOIDCValidationConfig() OIDCValidationConfig { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go index c7a61ddcf..8b652c7e1 100644 --- a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go +++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go @@ -76,6 +76,7 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor mockCtrl := proxy.NewMockController(ctrl) mockCtrl.EXPECT().ClusterSupportsCustomPorts(gomock.Any()).Return(customPortsSupported).AnyTimes() + mockCtrl.EXPECT().ClusterRequireSubdomain(gomock.Any()).Return((*bool)(nil)).AnyTimes() mockCtrl.EXPECT().SendServiceUpdateToCluster(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() mockCtrl.EXPECT().GetOIDCValidationConfig().Return(proxy.OIDCValidationConfig{}).AnyTimes() diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index a7173a131..808fdaf60 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -236,6 +236,10 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri return status.Errorf(status.PreconditionFailed, "could not derive cluster from domain %s: %v", service.Domain, err) } service.ProxyCluster = proxyCluster + + if err := m.validateSubdomainRequirement(service.Domain, proxyCluster); err != nil { + return err + } } service.AccountID = accountID @@ -261,6 +265,20 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri return nil } +// validateSubdomainRequirement checks whether the domain can be used bare +// (without a subdomain label) on the given cluster. If the cluster reports +// require_subdomain=true and the domain equals the cluster domain, it rejects. +func (m *Manager) validateSubdomainRequirement(domain, cluster string) error { + if domain != cluster { + return nil + } + requireSub := m.proxyController.ClusterRequireSubdomain(cluster) + if requireSub != nil && *requireSub { + return status.Errorf(status.InvalidArgument, "domain %s requires a subdomain label", domain) + } + return nil +} + func (m *Manager) persistNewService(ctx context.Context, accountID string, svc *service.Service) error { return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { if svc.Domain != "" { @@ -489,53 +507,61 @@ func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, se var updateInfo serviceUpdateInfo err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - existingService, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, service.ID) - if err != nil { - return err - } - - if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { - return err - } - - updateInfo.oldCluster = existingService.ProxyCluster - updateInfo.domainChanged = existingService.Domain != service.Domain - - if updateInfo.domainChanged { - if err := m.handleDomainChange(ctx, transaction, accountID, service); err != nil { - return err - } - } else { - service.ProxyCluster = existingService.ProxyCluster - } - - m.preserveExistingAuthSecrets(service, existingService) - if err := validateHeaderAuthValues(service.Auth.HeaderAuths); err != nil { - return err - } - m.preserveServiceMetadata(service, existingService) - m.preserveListenPort(service, existingService) - updateInfo.serviceEnabledChanged = existingService.Enabled != service.Enabled - - if err := m.ensureL4Port(ctx, transaction, service); err != nil { - return err - } - if err := m.checkPortConflict(ctx, transaction, service); err != nil { - return err - } - if err := validateTargetReferences(ctx, transaction, accountID, service.Targets); err != nil { - return err - } - if err := transaction.UpdateService(ctx, service); err != nil { - return fmt.Errorf("update service: %w", err) - } - - return nil + return m.executeServiceUpdate(ctx, transaction, accountID, service, &updateInfo) }) return &updateInfo, err } +func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.Store, accountID string, service *service.Service, updateInfo *serviceUpdateInfo) error { + existingService, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, service.ID) + if err != nil { + return err + } + + if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { + return err + } + + updateInfo.oldCluster = existingService.ProxyCluster + updateInfo.domainChanged = existingService.Domain != service.Domain + + if updateInfo.domainChanged { + if err := m.handleDomainChange(ctx, transaction, accountID, service); err != nil { + return err + } + } else { + service.ProxyCluster = existingService.ProxyCluster + } + + if err := m.validateSubdomainRequirement(service.Domain, service.ProxyCluster); err != nil { + return err + } + + m.preserveExistingAuthSecrets(service, existingService) + if err := validateHeaderAuthValues(service.Auth.HeaderAuths); err != nil { + return err + } + m.preserveServiceMetadata(service, existingService) + m.preserveListenPort(service, existingService) + updateInfo.serviceEnabledChanged = existingService.Enabled != service.Enabled + + if err := m.ensureL4Port(ctx, transaction, service); err != nil { + return err + } + if err := m.checkPortConflict(ctx, transaction, service); err != nil { + return err + } + if err := validateTargetReferences(ctx, transaction, accountID, service.Targets); err != nil { + return err + } + if err := transaction.UpdateService(ctx, service); err != nil { + return fmt.Errorf("update service: %w", err) + } + + return nil +} + func (m *Manager) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, svc *service.Service) error { if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, svc.ID); err != nil { return err diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index 0c34f81a2..18e1be26e 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -1272,3 +1272,69 @@ func TestValidateTargetReferences_PeerValid(t *testing.T) { } require.NoError(t, validateTargetReferences(ctx, mockStore, accountID, targets)) } + +func TestValidateSubdomainRequirement(t *testing.T) { + ptrBool := func(b bool) *bool { return &b } + + tests := []struct { + name string + domain string + cluster string + requireSubdomain *bool + wantErr bool + }{ + { + name: "subdomain present, require_subdomain true", + domain: "app.eu1.proxy.netbird.io", + cluster: "eu1.proxy.netbird.io", + requireSubdomain: ptrBool(true), + wantErr: false, + }, + { + name: "bare cluster domain, require_subdomain true", + domain: "eu1.proxy.netbird.io", + cluster: "eu1.proxy.netbird.io", + requireSubdomain: ptrBool(true), + wantErr: true, + }, + { + name: "bare cluster domain, require_subdomain false", + domain: "eu1.proxy.netbird.io", + cluster: "eu1.proxy.netbird.io", + requireSubdomain: ptrBool(false), + wantErr: false, + }, + { + name: "bare cluster domain, require_subdomain nil (default)", + domain: "eu1.proxy.netbird.io", + cluster: "eu1.proxy.netbird.io", + requireSubdomain: nil, + wantErr: false, + }, + { + name: "custom domain apex is not the cluster", + domain: "example.com", + cluster: "eu1.proxy.netbird.io", + requireSubdomain: ptrBool(true), + wantErr: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + + mockCtrl := proxy.NewMockController(ctrl) + mockCtrl.EXPECT().ClusterRequireSubdomain(tc.cluster).Return(tc.requireSubdomain).AnyTimes() + + mgr := &Manager{proxyController: mockCtrl} + err := mgr.validateSubdomainRequirement(tc.domain, tc.cluster) + if tc.wantErr { + require.Error(t, err) + assert.Contains(t, err.Error(), "requires a subdomain label") + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 01c52b138..5fa382af0 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -537,6 +537,35 @@ func (s *ProxyServiceServer) ClusterSupportsCustomPorts(clusterAddr string) *boo return nil } +// ClusterRequireSubdomain returns whether any connected proxy in the given +// cluster reports that a subdomain is required. Returns nil if no proxy has +// reported the capability (defaults to not required). +func (s *ProxyServiceServer) ClusterRequireSubdomain(clusterAddr string) *bool { + if s.proxyController == nil { + return nil + } + + var hasCapabilities bool + for _, pid := range s.proxyController.GetProxiesForCluster(clusterAddr) { + connVal, ok := s.connectedProxies.Load(pid) + if !ok { + continue + } + conn := connVal.(*proxyConnection) + if conn.capabilities == nil || conn.capabilities.RequireSubdomain == nil { + continue + } + if *conn.capabilities.RequireSubdomain { + return ptr(true) + } + hasCapabilities = true + } + if hasCapabilities { + return ptr(false) + } + return nil +} + func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { service, err := s.serviceManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId()) if err != nil { diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index 1a4ea3330..83c99020d 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -57,6 +57,10 @@ func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { return ptr(true) } +func (c *testProxyController) ClusterRequireSubdomain(_ string) *bool { + return nil +} + func (c *testProxyController) GetProxiesForCluster(clusterAddr string) []string { c.mu.Lock() defer c.mu.Unlock() diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index a2252cc20..1c36ee334 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -62,6 +62,7 @@ var ( proxyProtocol bool preSharedKey string supportsCustomPorts bool + requireSubdomain bool geoDataDir string ) @@ -101,6 +102,7 @@ func init() { rootCmd.Flags().BoolVar(&proxyProtocol, "proxy-protocol", envBoolOrDefault("NB_PROXY_PROXY_PROTOCOL", false), "Enable PROXY protocol on TCP listeners to preserve client IPs behind L4 proxies") rootCmd.Flags().StringVar(&preSharedKey, "preshared-key", envStringOrDefault("NB_PROXY_PRESHARED_KEY", ""), "Define a pre-shared key for the tunnel between proxy and peers") rootCmd.Flags().BoolVar(&supportsCustomPorts, "supports-custom-ports", envBoolOrDefault("NB_PROXY_SUPPORTS_CUSTOM_PORTS", true), "Whether the proxy can bind arbitrary ports for UDP/TCP passthrough") + rootCmd.Flags().BoolVar(&requireSubdomain, "require-subdomain", envBoolOrDefault("NB_PROXY_REQUIRE_SUBDOMAIN", false), "Require a subdomain label in front of the cluster domain") rootCmd.Flags().DurationVar(&maxDialTimeout, "max-dial-timeout", envDurationOrDefault("NB_PROXY_MAX_DIAL_TIMEOUT", 0), "Cap per-service backend dial timeout (0 = no cap)") rootCmd.Flags().DurationVar(&maxSessionIdleTimeout, "max-session-idle-timeout", envDurationOrDefault("NB_PROXY_MAX_SESSION_IDLE_TIMEOUT", 0), "Cap per-service session idle timeout (0 = no cap)") rootCmd.Flags().StringVar(&geoDataDir, "geo-data-dir", envStringOrDefault("NB_PROXY_GEO_DATA_DIR", "/var/lib/netbird/geolocation"), "Directory for the GeoLite2 MMDB file (auto-downloaded if missing)") @@ -181,6 +183,7 @@ func runServer(cmd *cobra.Command, args []string) error { ProxyProtocol: proxyProtocol, PreSharedKey: preSharedKey, SupportsCustomPorts: supportsCustomPorts, + RequireSubdomain: requireSubdomain, MaxDialTimeout: maxDialTimeout, MaxSessionIdleTimeout: maxSessionIdleTimeout, GeoDataDir: geoDataDir, diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index b3f0b2989..c30234b5a 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -251,6 +251,10 @@ func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { return nil } +func (c *testProxyController) ClusterRequireSubdomain(_ string) *bool { + return nil +} + // storeBackedServiceManager reads directly from the real store. type storeBackedServiceManager struct { store store.Store diff --git a/proxy/server.go b/proxy/server.go index c4d12859b..acfe3c12d 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -163,6 +163,10 @@ type Server struct { // SupportsCustomPorts indicates whether the proxy can bind arbitrary // ports for TCP/UDP/TLS services. SupportsCustomPorts bool + // RequireSubdomain indicates whether a subdomain label is required + // in front of this proxy's cluster domain. When true, accounts cannot + // create services on the bare cluster domain. + RequireSubdomain bool // MaxDialTimeout caps the per-service backend dial timeout. // When the API sends a timeout, it is clamped to this value. // When the API sends no timeout, this value is used as the default. @@ -919,6 +923,7 @@ func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.Pr Address: s.ProxyURL, Capabilities: &proto.ProxyCapabilities{ SupportsCustomPorts: &s.SupportsCustomPorts, + RequireSubdomain: &s.RequireSubdomain, }, }) if err != nil { diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 66f39b92f..6b766731c 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -3336,6 +3336,10 @@ components: type: boolean description: Whether the cluster supports binding arbitrary TCP/UDP ports example: true + require_subdomain: + type: boolean + description: Whether a subdomain label is required in front of this domain. When true, the domain cannot be used bare. + example: false required: - id - domain diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 693449d14..067cc4093 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -3406,6 +3406,9 @@ type ReverseProxyDomain struct { // Id Domain ID Id string `json:"id"` + // RequireSubdomain Whether a subdomain label is required in front of this domain. When true, the domain cannot be used bare. + RequireSubdomain *bool `json:"require_subdomain,omitempty"` + // SupportsCustomPorts Whether the cluster supports binding arbitrary TCP/UDP ports SupportsCustomPorts *bool `json:"supports_custom_ports,omitempty"` diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index e5a2d6a98..93295e857 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -181,8 +181,11 @@ type ProxyCapabilities struct { state protoimpl.MessageState `protogen:"open.v1"` // Whether the proxy can bind arbitrary ports for TCP/UDP/TLS services. SupportsCustomPorts *bool `protobuf:"varint,1,opt,name=supports_custom_ports,json=supportsCustomPorts,proto3,oneof" json:"supports_custom_ports,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Whether the proxy requires a subdomain label in front of its cluster domain. + // When true, tenants cannot use the cluster domain bare. + RequireSubdomain *bool `protobuf:"varint,2,opt,name=require_subdomain,json=requireSubdomain,proto3,oneof" json:"require_subdomain,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ProxyCapabilities) Reset() { @@ -222,6 +225,13 @@ func (x *ProxyCapabilities) GetSupportsCustomPorts() bool { return false } +func (x *ProxyCapabilities) GetRequireSubdomain() bool { + if x != nil && x.RequireSubdomain != nil { + return *x.RequireSubdomain + } + return false +} + // GetMappingUpdateRequest is sent to initialise a mapping stream. type GetMappingUpdateRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1872,10 +1882,12 @@ var File_proxy_service_proto protoreflect.FileDescriptor const file_proxy_service_proto_rawDesc = "" + "\n" + "\x13proxy_service.proto\x12\n" + - "management\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"f\n" + + "management\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xae\x01\n" + "\x11ProxyCapabilities\x127\n" + - "\x15supports_custom_ports\x18\x01 \x01(\bH\x00R\x13supportsCustomPorts\x88\x01\x01B\x18\n" + - "\x16_supports_custom_ports\"\xe6\x01\n" + + "\x15supports_custom_ports\x18\x01 \x01(\bH\x00R\x13supportsCustomPorts\x88\x01\x01\x120\n" + + "\x11require_subdomain\x18\x02 \x01(\bH\x01R\x10requireSubdomain\x88\x01\x01B\x18\n" + + "\x16_supports_custom_portsB\x14\n" + + "\x12_require_subdomain\"\xe6\x01\n" + "\x17GetMappingUpdateRequest\x12\x19\n" + "\bproxy_id\x18\x01 \x01(\tR\aproxyId\x12\x18\n" + "\aversion\x18\x02 \x01(\tR\aversion\x129\n" + diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto index 2d7bed548..f77071eb0 100644 --- a/shared/management/proto/proxy_service.proto +++ b/shared/management/proto/proxy_service.proto @@ -31,6 +31,9 @@ service ProxyService { message ProxyCapabilities { // Whether the proxy can bind arbitrary ports for TCP/UDP/TLS services. optional bool supports_custom_ports = 1; + // Whether the proxy requires a subdomain label in front of its cluster domain. + // When true, accounts cannot use the cluster domain bare. + optional bool require_subdomain = 2; } // GetMappingUpdateRequest is sent to initialise a mapping stream. From 82762280eec3e33c738cafb0d874e96a3dd9139c Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Sun, 22 Mar 2026 19:39:40 +0800 Subject: [PATCH 230/374] [client] Add health check flag to status command and expose daemon status in output (#5650) --- client/Dockerfile | 4 +- client/Dockerfile-rootless | 4 +- client/cmd/status.go | 107 ++++++++++++++++++++++++++++++-- client/internal/debug/debug.go | 6 +- client/netbird-entrypoint.sh | 80 ++++++++++-------------- client/status/status.go | 45 ++++++++++++-- client/status/status_test.go | 8 ++- client/wasm/cmd/main.go | 3 +- proxy/internal/debug/handler.go | 18 +++--- 9 files changed, 198 insertions(+), 77 deletions(-) diff --git a/client/Dockerfile b/client/Dockerfile index 13e44096f..66a418828 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -17,8 +17,8 @@ ENV \ NETBIRD_BIN="/usr/local/bin/netbird" \ NB_LOG_FILE="console,/var/log/netbird/client.log" \ NB_DAEMON_ADDR="unix:///var/run/netbird.sock" \ - NB_ENTRYPOINT_SERVICE_TIMEOUT="5" \ - NB_ENTRYPOINT_LOGIN_TIMEOUT="5" + NB_ENTRYPOINT_SERVICE_TIMEOUT="30" \ + NB_ENTRYPOINT_LOGIN_TIMEOUT="30" ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ] diff --git a/client/Dockerfile-rootless b/client/Dockerfile-rootless index 5fa8de0a5..b0d59fdf8 100644 --- a/client/Dockerfile-rootless +++ b/client/Dockerfile-rootless @@ -23,8 +23,8 @@ ENV \ NB_DAEMON_ADDR="unix:///var/lib/netbird/netbird.sock" \ NB_LOG_FILE="console,/var/lib/netbird/client.log" \ NB_DISABLE_DNS="true" \ - NB_ENTRYPOINT_SERVICE_TIMEOUT="5" \ - NB_ENTRYPOINT_LOGIN_TIMEOUT="1" + NB_ENTRYPOINT_SERVICE_TIMEOUT="30" \ + NB_ENTRYPOINT_LOGIN_TIMEOUT="30" ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ] diff --git a/client/cmd/status.go b/client/cmd/status.go index f09c35c2c..07dbf9101 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -28,6 +28,7 @@ var ( ipsFilterMap map[string]struct{} prefixNamesFilterMap map[string]struct{} connectionTypeFilter string + checkFlag string ) var statusCmd = &cobra.Command{ @@ -49,6 +50,7 @@ func init() { statusCmd.PersistentFlags().StringSliceVar(&prefixNamesFilter, "filter-by-names", []string{}, "filters the detailed output by a list of one or more peer FQDN or hostnames, e.g., --filter-by-names peer-a,peer-b.netbird.cloud") statusCmd.PersistentFlags().StringVar(&statusFilter, "filter-by-status", "", "filters the detailed output by connection status(idle|connecting|connected), e.g., --filter-by-status connected") statusCmd.PersistentFlags().StringVar(&connectionTypeFilter, "filter-by-connection-type", "", "filters the detailed output by connection type (P2P|Relayed), e.g., --filter-by-connection-type P2P") + statusCmd.PersistentFlags().StringVar(&checkFlag, "check", "", "run a health check and exit with code 0 on success, 1 on failure (live|ready|startup)") } func statusFunc(cmd *cobra.Command, args []string) error { @@ -56,6 +58,10 @@ func statusFunc(cmd *cobra.Command, args []string) error { cmd.SetOut(cmd.OutOrStdout()) + if checkFlag != "" { + return runHealthCheck(cmd) + } + err := parseFilters() if err != nil { return err @@ -68,15 +74,17 @@ func statusFunc(cmd *cobra.Command, args []string) error { ctx := internal.CtxInitState(cmd.Context()) - resp, err := getStatus(ctx, false) + resp, err := getStatus(ctx, true, false) if err != nil { return err } status := resp.GetStatus() - if status == string(internal.StatusNeedsLogin) || status == string(internal.StatusLoginFailed) || - status == string(internal.StatusSessionExpired) { + needsAuth := status == string(internal.StatusNeedsLogin) || status == string(internal.StatusLoginFailed) || + status == string(internal.StatusSessionExpired) + + if needsAuth && !jsonFlag && !yamlFlag { cmd.Printf("Daemon status: %s\n\n"+ "Run UP command to log in with SSO (interactive login):\n\n"+ " netbird up \n\n"+ @@ -99,7 +107,17 @@ func statusFunc(cmd *cobra.Command, args []string) error { profName = activeProf.Name } - var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp.GetFullStatus(), anonymizeFlag, resp.GetDaemonVersion(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilterMap, connectionTypeFilter, profName) + var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp.GetFullStatus(), nbstatus.ConvertOptions{ + Anonymize: anonymizeFlag, + DaemonVersion: resp.GetDaemonVersion(), + DaemonStatus: nbstatus.ParseDaemonStatus(status), + StatusFilter: statusFilter, + PrefixNamesFilter: prefixNamesFilter, + PrefixNamesFilterMap: prefixNamesFilterMap, + IPsFilter: ipsFilterMap, + ConnectionTypeFilter: connectionTypeFilter, + ProfileName: profName, + }) var statusOutputString string switch { case detailFlag: @@ -121,7 +139,7 @@ func statusFunc(cmd *cobra.Command, args []string) error { return nil } -func getStatus(ctx context.Context, shouldRunProbes bool) (*proto.StatusResponse, error) { +func getStatus(ctx context.Context, fullPeerStatus bool, shouldRunProbes bool) (*proto.StatusResponse, error) { conn, err := DialClientGRPCServer(ctx, daemonAddr) if err != nil { //nolint @@ -131,7 +149,7 @@ func getStatus(ctx context.Context, shouldRunProbes bool) (*proto.StatusResponse } defer conn.Close() - resp, err := proto.NewDaemonServiceClient(conn).Status(ctx, &proto.StatusRequest{GetFullPeerStatus: true, ShouldRunProbes: shouldRunProbes}) + resp, err := proto.NewDaemonServiceClient(conn).Status(ctx, &proto.StatusRequest{GetFullPeerStatus: fullPeerStatus, ShouldRunProbes: shouldRunProbes}) if err != nil { return nil, fmt.Errorf("status failed: %v", status.Convert(err).Message()) } @@ -185,6 +203,83 @@ func enableDetailFlagWhenFilterFlag() { } } +func runHealthCheck(cmd *cobra.Command) error { + check := strings.ToLower(checkFlag) + switch check { + case "live", "ready", "startup": + default: + return fmt.Errorf("unknown check %q, must be one of: live, ready, startup", checkFlag) + } + + if err := util.InitLog(logLevel, util.LogConsole); err != nil { + return fmt.Errorf("init log: %w", err) + } + + ctx := internal.CtxInitState(cmd.Context()) + + isStartup := check == "startup" + resp, err := getStatus(ctx, isStartup, isStartup) + if err != nil { + return err + } + + switch check { + case "live": + return nil + case "ready": + return checkReadiness(resp) + case "startup": + return checkStartup(resp) + default: + return nil + } +} + +func checkReadiness(resp *proto.StatusResponse) error { + daemonStatus := internal.StatusType(resp.GetStatus()) + switch daemonStatus { + case internal.StatusIdle, internal.StatusConnecting, internal.StatusConnected: + return nil + case internal.StatusNeedsLogin, internal.StatusLoginFailed, internal.StatusSessionExpired: + return fmt.Errorf("readiness check: daemon status is %s", daemonStatus) + default: + return fmt.Errorf("readiness check: unexpected daemon status %q", daemonStatus) + } +} + +func checkStartup(resp *proto.StatusResponse) error { + fullStatus := resp.GetFullStatus() + if fullStatus == nil { + return fmt.Errorf("startup check: no full status available") + } + + if !fullStatus.GetManagementState().GetConnected() { + return fmt.Errorf("startup check: management not connected") + } + + if !fullStatus.GetSignalState().GetConnected() { + return fmt.Errorf("startup check: signal not connected") + } + + var relayCount, relaysConnected int + for _, r := range fullStatus.GetRelays() { + uri := r.GetURI() + if !strings.HasPrefix(uri, "rel://") && !strings.HasPrefix(uri, "rels://") { + continue + } + relayCount++ + if r.GetAvailable() { + relaysConnected++ + } + } + + if relayCount > 0 && relaysConnected == 0 { + return fmt.Errorf("startup check: no relay servers available (0/%d connected)", relayCount) + } + + return nil +} + func parseInterfaceIP(interfaceIP string) string { ip, _, err := net.ParseCIDR(interfaceIP) if err != nil { diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index f0f399bef..00a6e8160 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -31,7 +31,6 @@ import ( nbstatus "github.com/netbirdio/netbird/client/status" mgmProto "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/util" - "github.com/netbirdio/netbird/version" ) const readmeContent = `Netbird debug bundle @@ -418,7 +417,10 @@ func (g *BundleGenerator) addStatus() error { fullStatus := g.statusRecorder.GetFullStatus() protoFullStatus := nbstatus.ToProtoFullStatus(fullStatus) protoFullStatus.Events = g.statusRecorder.GetEventHistory() - overview := nbstatus.ConvertToStatusOutputOverview(protoFullStatus, g.anonymize, version.NetbirdVersion(), "", nil, nil, nil, "", profName) + overview := nbstatus.ConvertToStatusOutputOverview(protoFullStatus, nbstatus.ConvertOptions{ + Anonymize: g.anonymize, + ProfileName: profName, + }) statusOutput := overview.FullDetailSummary() statusReader := strings.NewReader(statusOutput) diff --git a/client/netbird-entrypoint.sh b/client/netbird-entrypoint.sh index 7c9fa021a..f8267d6ee 100755 --- a/client/netbird-entrypoint.sh +++ b/client/netbird-entrypoint.sh @@ -1,12 +1,11 @@ #!/usr/bin/env bash set -eEuo pipefail -: ${NB_ENTRYPOINT_SERVICE_TIMEOUT:="5"} -: ${NB_ENTRYPOINT_LOGIN_TIMEOUT:="5"} +: ${NB_ENTRYPOINT_SERVICE_TIMEOUT:="30"} +: ${NB_ENTRYPOINT_LOGIN_TIMEOUT:="30"} NETBIRD_BIN="${NETBIRD_BIN:-"netbird"}" export NB_LOG_FILE="${NB_LOG_FILE:-"console,/var/log/netbird/client.log"}" service_pids=() -log_file_path="" _log() { # mimic Go logger's output for easier parsing @@ -33,60 +32,50 @@ on_exit() { fi } -wait_for_message() { - local timeout="${1}" message="${2}" - if test "${timeout}" -eq 0; then - info "not waiting for log line ${message@Q} due to zero timeout." - elif test -n "${log_file_path}"; then - info "waiting for log line ${message@Q} for ${timeout} seconds..." - grep -E -q "${message}" <(timeout "${timeout}" tail -F "${log_file_path}" 2>/dev/null) - else - info "log file unsupported, sleeping for ${timeout} seconds..." - sleep "${timeout}" - fi -} - -locate_log_file() { - local log_files_string="${1}" - - while read -r log_file; do - case "${log_file}" in - console | syslog) ;; - *) - log_file_path="${log_file}" - return - ;; - esac - done < <(sed 's#,#\n#g' <<<"${log_files_string}") - - warn "log files parsing for ${log_files_string@Q} is not supported by debug bundles" - warn "please consider removing the \$NB_LOG_FILE or setting it to real file, before gathering debug bundles." -} - wait_for_daemon_startup() { local timeout="${1}" - - if test -n "${log_file_path}"; then - if ! wait_for_message "${timeout}" "started daemon server"; then - warn "log line containing 'started daemon server' not found after ${timeout} seconds" - warn "daemon failed to start, exiting..." - exit 1 - fi - else - warn "daemon service startup not discovered, sleeping ${timeout} instead" - sleep "${timeout}" + if [[ "${timeout}" -eq 0 ]]; then + info "not waiting for daemon startup due to zero timeout." + return fi + + local deadline=$((SECONDS + timeout)) + while [[ "${SECONDS}" -lt "${deadline}" ]]; do + if "${NETBIRD_BIN}" status --check live 2>/dev/null; then + return + fi + sleep 1 + done + + warn "daemon did not become responsive after ${timeout} seconds, exiting..." + exit 1 } login_if_needed() { local timeout="${1}" - if test -n "${log_file_path}" && wait_for_message "${timeout}" 'peer has been successfully registered|management connection state READY'; then + if "${NETBIRD_BIN}" status --check ready 2>/dev/null; then info "already logged in, skipping 'netbird up'..." - else + return + fi + + if [[ "${timeout}" -eq 0 ]]; then info "logging in..." "${NETBIRD_BIN}" up + return fi + + local deadline=$((SECONDS + timeout)) + while [[ "${SECONDS}" -lt "${deadline}" ]]; do + if "${NETBIRD_BIN}" status --check ready 2>/dev/null; then + info "already logged in, skipping 'netbird up'..." + return + fi + sleep 1 + done + + info "logging in..." + "${NETBIRD_BIN}" up } main() { @@ -95,7 +84,6 @@ main() { service_pids+=("$!") info "registered new service process 'netbird service run', currently running: ${service_pids[@]@Q}" - locate_log_file "${NB_LOG_FILE}" wait_for_daemon_startup "${NB_ENTRYPOINT_SERVICE_TIMEOUT}" login_if_needed "${NB_ENTRYPOINT_LOGIN_TIMEOUT}" diff --git a/client/status/status.go b/client/status/status.go index f13163a41..8c932bbab 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -25,6 +25,38 @@ import ( "github.com/netbirdio/netbird/version" ) +// DaemonStatus represents the current state of the NetBird daemon. +// These values mirror internal.StatusType but are defined here to avoid an import cycle. +type DaemonStatus string + +const ( + DaemonStatusIdle DaemonStatus = "Idle" + DaemonStatusConnecting DaemonStatus = "Connecting" + DaemonStatusConnected DaemonStatus = "Connected" + DaemonStatusNeedsLogin DaemonStatus = "NeedsLogin" + DaemonStatusLoginFailed DaemonStatus = "LoginFailed" + DaemonStatusSessionExpired DaemonStatus = "SessionExpired" +) + +// ParseDaemonStatus converts a raw status string to DaemonStatus. +// Unrecognized values are preserved as-is to remain visible during version skew. +func ParseDaemonStatus(s string) DaemonStatus { + return DaemonStatus(s) +} + +// ConvertOptions holds parameters for ConvertToStatusOutputOverview. +type ConvertOptions struct { + Anonymize bool + DaemonVersion string + DaemonStatus DaemonStatus + StatusFilter string + PrefixNamesFilter []string + PrefixNamesFilterMap map[string]struct{} + IPsFilter map[string]struct{} + ConnectionTypeFilter string + ProfileName string +} + type PeerStateDetailOutput struct { FQDN string `json:"fqdn" yaml:"fqdn"` IP string `json:"netbirdIp" yaml:"netbirdIp"` @@ -102,6 +134,7 @@ type OutputOverview struct { Peers PeersStateOutput `json:"peers" yaml:"peers"` CliVersion string `json:"cliVersion" yaml:"cliVersion"` DaemonVersion string `json:"daemonVersion" yaml:"daemonVersion"` + DaemonStatus DaemonStatus `json:"daemonStatus" yaml:"daemonStatus"` ManagementState ManagementStateOutput `json:"management" yaml:"management"` SignalState SignalStateOutput `json:"signal" yaml:"signal"` Relays RelayStateOutput `json:"relays" yaml:"relays"` @@ -120,7 +153,8 @@ type OutputOverview struct { SSHServerState SSHServerStateOutput `json:"sshServer" yaml:"sshServer"` } -func ConvertToStatusOutputOverview(pbFullStatus *proto.FullStatus, anon bool, daemonVersion string, statusFilter string, prefixNamesFilter []string, prefixNamesFilterMap map[string]struct{}, ipsFilter map[string]struct{}, connectionTypeFilter string, profName string) OutputOverview { +// ConvertToStatusOutputOverview converts protobuf status to the output overview. +func ConvertToStatusOutputOverview(pbFullStatus *proto.FullStatus, opts ConvertOptions) OutputOverview { managementState := pbFullStatus.GetManagementState() managementOverview := ManagementStateOutput{ URL: managementState.GetURL(), @@ -137,12 +171,13 @@ func ConvertToStatusOutputOverview(pbFullStatus *proto.FullStatus, anon bool, da relayOverview := mapRelays(pbFullStatus.GetRelays()) sshServerOverview := mapSSHServer(pbFullStatus.GetSshServerState()) - peersOverview := mapPeers(pbFullStatus.GetPeers(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilter, connectionTypeFilter) + peersOverview := mapPeers(pbFullStatus.GetPeers(), opts.StatusFilter, opts.PrefixNamesFilter, opts.PrefixNamesFilterMap, opts.IPsFilter, opts.ConnectionTypeFilter) overview := OutputOverview{ Peers: peersOverview, CliVersion: version.NetbirdVersion(), - DaemonVersion: daemonVersion, + DaemonVersion: opts.DaemonVersion, + DaemonStatus: opts.DaemonStatus, ManagementState: managementOverview, SignalState: signalOverview, Relays: relayOverview, @@ -157,11 +192,11 @@ func ConvertToStatusOutputOverview(pbFullStatus *proto.FullStatus, anon bool, da NSServerGroups: mapNSGroups(pbFullStatus.GetDnsServers()), Events: mapEvents(pbFullStatus.GetEvents()), LazyConnectionEnabled: pbFullStatus.GetLazyConnectionEnabled(), - ProfileName: profName, + ProfileName: opts.ProfileName, SSHServerState: sshServerOverview, } - if anon { + if opts.Anonymize { anonymizer := anonymize.NewAnonymizer(anonymize.DefaultAddresses()) anonymizeOverview(anonymizer, &overview) } diff --git a/client/status/status_test.go b/client/status/status_test.go index b02d78d64..7754eebae 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -176,6 +176,7 @@ var overview = OutputOverview{ Events: []SystemEventOutput{}, CliVersion: version.NetbirdVersion(), DaemonVersion: "0.14.1", + DaemonStatus: DaemonStatusConnected, ManagementState: ManagementStateOutput{ URL: "my-awesome-management.com:443", Connected: true, @@ -238,7 +239,10 @@ var overview = OutputOverview{ } func TestConversionFromFullStatusToOutputOverview(t *testing.T) { - convertedResult := ConvertToStatusOutputOverview(resp.GetFullStatus(), false, resp.GetDaemonVersion(), "", nil, nil, nil, "", "") + convertedResult := ConvertToStatusOutputOverview(resp.GetFullStatus(), ConvertOptions{ + DaemonVersion: resp.GetDaemonVersion(), + DaemonStatus: ParseDaemonStatus(resp.GetStatus()), + }) assert.Equal(t, overview, convertedResult) } @@ -329,6 +333,7 @@ func TestParsingToJSON(t *testing.T) { }, "cliVersion": "development", "daemonVersion": "0.14.1", + "daemonStatus": "Connected", "management": { "url": "my-awesome-management.com:443", "connected": true, @@ -452,6 +457,7 @@ func TestParsingToYAML(t *testing.T) { networks: [] cliVersion: development daemonVersion: 0.14.1 +daemonStatus: Connected management: url: my-awesome-management.com:443 connected: true diff --git a/client/wasm/cmd/main.go b/client/wasm/cmd/main.go index 26022ffc7..d8e50ab6d 100644 --- a/client/wasm/cmd/main.go +++ b/client/wasm/cmd/main.go @@ -18,7 +18,6 @@ import ( "github.com/netbirdio/netbird/client/wasm/internal/rdp" "github.com/netbirdio/netbird/client/wasm/internal/ssh" "github.com/netbirdio/netbird/util" - "github.com/netbirdio/netbird/version" ) const ( @@ -350,7 +349,7 @@ func getStatusOverview(client *netbird.Client) (nbstatus.OutputOverview, error) pbFullStatus := fullStatus.ToProto() - return nbstatus.ConvertToStatusOutputOverview(pbFullStatus, false, version.NetbirdVersion(), "", nil, nil, nil, "", ""), nil + return nbstatus.ConvertToStatusOutputOverview(pbFullStatus, nbstatus.ConvertOptions{}), nil } // createStatusMethod creates the status method that returns JSON diff --git a/proxy/internal/debug/handler.go b/proxy/internal/debug/handler.go index 237010922..c507cfad9 100644 --- a/proxy/internal/debug/handler.go +++ b/proxy/internal/debug/handler.go @@ -409,17 +409,13 @@ func (h *Handler) handleClientStatus(w http.ResponseWriter, r *http.Request, acc } pbStatus := nbstatus.ToProtoFullStatus(fullStatus) - overview := nbstatus.ConvertToStatusOutputOverview( - pbStatus, - false, - version.NetbirdVersion(), - statusFilter, - prefixNamesFilter, - prefixNamesFilterMap, - ipsFilterMap, - connectionTypeFilter, - "", - ) + overview := nbstatus.ConvertToStatusOutputOverview(pbStatus, nbstatus.ConvertOptions{ + StatusFilter: statusFilter, + PrefixNamesFilter: prefixNamesFilter, + PrefixNamesFilterMap: prefixNamesFilterMap, + IPsFilter: ipsFilterMap, + ConnectionTypeFilter: connectionTypeFilter, + }) if wantJSON { h.writeJSON(w, map[string]interface{}{ From 91f0d5cefd59b38a65fccc99abae0d749df0d61b Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Sun, 22 Mar 2026 12:45:41 +0100 Subject: [PATCH 231/374] [client] Feature/client metrics (#5512) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add client metrics * Add client metrics system with OpenTelemetry and VictoriaMetrics support Implements a comprehensive client metrics system to track peer connection stages and performance. The system supports multiple backend implementations (OpenTelemetry, VictoriaMetrics, and no-op) and tracks detailed connection stage durations from creation through WireGuard handshake. Key changes: - Add metrics package with pluggable backend implementations - Implement OpenTelemetry metrics backend - Implement VictoriaMetrics metrics backend - Add no-op metrics implementation for disabled state - Track connection stages: creation, semaphore, signaling, connection ready, and WireGuard handshake - Move WireGuard watcher functionality to conn.go - Refactor engine to integrate metrics tracking - Add metrics export endpoint in debug server * Add signaling metrics tracking for initial and reconnection attempts * Reset connection stage timestamps during reconnections to exclude unnecessary metrics tracking * Delete otel lib from client * Update unit tests * Invoke callback on handshake success in WireGuard watcher * Add Netbird version tracking to client metrics Integrate Netbird version into VictoriaMetrics backend and metrics labels. Update `ClientMetrics` constructor and metric name formatting to include version information. * Add sync duration tracking to client metrics Introduce `RecordSyncDuration` for measuring sync message processing time. Update all metrics implementations (VictoriaMetrics, no-op) to support the new method. Refactor `ClientMetrics` to use `AgentInfo` for static agent data. * Remove no-op metrics implementation and simplify ClientMetrics constructor Eliminate unused `noopMetrics` and refactor `ClientMetrics` to always use the VictoriaMetrics implementation. Update associated logic to reflect these changes. * Add total duration tracking for connection attempts Calculate total duration for both initial connections and reconnections, accounting for different timestamp scenarios. Update `Export` method to include Prometheus HELP comments. * Add metrics push support to VictoriaMetrics integration * [client] anchor connection metrics to first signal received * Remove creation_to_semaphore connection stage metric The semaphore queuing stage (Created → SemaphoreAcquired) is no longer tracked. Connection metrics now start from SignalingReceived. Updated docs and Grafana dashboard accordingly. * [client] Add remote push config for metrics with version-based eligibility Introduce remoteconfig.Manager that fetches a remote JSON config to control metrics push interval and restrict pushing to a specific agent version range. When NB_METRICS_INTERVAL is set, remote config is bypassed entirely for local override. * [client] Add WASM-compatible NewClientMetrics implementation Replace NewClientMetrics in metrics.go with a WASM-specific stub in metrics_js.go, returning nil for compatibility with JS builds. Simplify method usage for WASM targets. * Add missing file * Update default case in DeploymentType.String to return "unknown" instead of "selfhosted" * [client] Rework metrics to use timestamped samples instead of histograms Replace cumulative Prometheus histograms with timestamped point-in-time samples that are pushed once and cleared. This fixes metrics for sparse events (connections/syncs that happen once at startup) where rate() and increase() produced incorrect or empty results. Changes: - Switch from VictoriaMetrics histogram library to raw Prometheus text format with explicit millisecond timestamps - Reset samples after successful push (no resending stale data) - Rename connection_to_handshake → connection_to_wg_handshake - Add netbird_peer_connection_count metric for ICE vs Relay tracking - Simplify dashboard: point-based scatter plots, donut pie chart - Add maxStalenessInterval=1m to VictoriaMetrics to prevent forward-fill - Fix deployment_type Unknown returning "selfhosted" instead of "unknown" - Fix inverted shouldPush condition in push.go * [client] Add InfluxDB metrics backend alongside VictoriaMetrics Add influxdb.go with timestamped line protocol export for sparse one-shot events. Restore victoria.go to use proper Prometheus histograms. Update Grafana dashboards, add InfluxDB datasource, and update docs. Co-Authored-By: Claude Opus 4.6 * [client] Fix metrics issues and update dev docker setup - Fix StopPush not clearing push state, preventing restart - Fix race condition reading currentConnPriority without lock in recordConnectionMetrics - Fix stale comment referencing old metrics server URL - Update docker-compose for InfluxDB: add scoped tokens, .env config, init scripts - Rename docker-compose.victoria.yml to docker-compose.yml * [client] Add anonymised peer tracking to pushed metrics Introduce peer_id and connection_pair_id tags to InfluxDB metrics. Public keys are hashed (truncated SHA-256) for anonymisation. The connection pair ID is deterministic regardless of which side computes it, enabling deduplication of reconnections in the ICE vs Relay dashboard. Also pin Grafana to v11.6.0 for file-based provisioning and fix datasource UID references. * Remove unused dependencies from go.mod and go.sum * Refactor InfluxDB ingest pipeline: extract validation logic - Move line validation logic to `validateLine` and `validateField` helper functions. - Improve error handling with structured validation and clearer separation of concerns. - Add stderr redirection for error messages in `create-tokens.sh`. * Set non-root user in Dockerfile for Ingest service * Fix Windows CI: command line too long * Remove Victoria metrics * Add hashed peer ID as Authorization header in metrics push * Revert influxdb in docker compose * Enable gzip compression and authorization validation for metrics push and ingest * Reducate code of complexity * Update debug documentation to include metrics.txt description * Increase `maxBodySize` limit to 50 MB and update gzip reader wrapping logic * Refactor deployment type detection to use URL parsing for improved accuracy * Update readme * Throttle remote config retries on fetch failure * Preserve first WG handshake timestamp, ignore rekeys * Skip adding empty metrics.txt to debug bundle in debug mode * Update default metrics server URL to https://ingest.netbird.io * Atomic metrics export-and-reset to prevent sample loss between Export and Reset calls * Fix doc * Refactor Push configuration to improve clarity and enforce minimum push interval * Remove `minPushInterval` and update push interval validation logic * Revert ExportAndReset, it is acceptable data loss * Fix metrics review issues: rename env var, remove stale infra, add tests - Rename NB_METRICS_ENABLED to NB_METRICS_PUSH_ENABLED to clarify that collection is always active (for debug bundles) and only push is opt-in - Change default config URL from staging to production (ingest.netbird.io) - Delete broken Prometheus dashboard (used non-existent metric names) - Delete unused VictoriaMetrics datasource config - Replace committed .env with .env.example containing placeholder values - Wire Grafana admin credentials through env vars in docker-compose - Make metricsStages a pointer to prevent reset-vs-write race on reconnect - Fix typed-nil interface in debug bundle path (GetClientMetrics) - Use deterministic field order in InfluxDB Export (sorted keys) - Replace Authorization header with X-Peer-ID for metrics push - Fix ingest server timeout to use time.Second instead of float - Fix gzip double-close, stale comments, trim log levels - Add tests for influxdb.go and MetricsStages * Add login duration metric, ingest tag validation, and duration bounds - Add netbird_login measurement recording login/auth duration to management server, with success/failure result tag - Validate InfluxDB tags against per-measurement allowlists in ingest server to prevent arbitrary tag injection - Cap all duration fields (*_seconds) at 300s instead of only total_seconds - Add ingest server tests for tag/field validation, bounds, and auth * Add arch tag to all metrics * Fix Grafana dashboard: add arch to drop columns, add login panels * Validate NB_METRICS_SERVER_URL is an absolute HTTP(S) URL * Address review comments: fix README wording, update stale comments * Clarify env var precedence does not bypass remote config eligibility * Remove accidentally committed pprof files --------- Co-authored-by: Viktor Liu --- .github/workflows/golang-test-windows.yml | 9 +- client/internal/connect.go | 40 ++ client/internal/debug/debug.go | 37 ++ client/internal/engine.go | 27 +- client/internal/engine_test.go | 6 +- client/internal/metrics/connection_type.go | 17 + client/internal/metrics/deployment_type.go | 51 +++ client/internal/metrics/env.go | 93 +++++ client/internal/metrics/influxdb.go | 219 +++++++++++ client/internal/metrics/influxdb_test.go | 229 +++++++++++ client/internal/metrics/infra/.env.example | 16 + client/internal/metrics/infra/.gitignore | 1 + client/internal/metrics/infra/README.md | 194 ++++++++++ .../internal/metrics/infra/docker-compose.yml | 69 ++++ .../provisioning/dashboards/dashboard.yml | 12 + .../json/netbird-influxdb-metrics.json | 280 ++++++++++++++ .../provisioning/datasources/influxdb.yml | 15 + .../infra/influxdb/scripts/create-tokens.sh | 25 ++ .../internal/metrics/infra/ingest/Dockerfile | 10 + client/internal/metrics/infra/ingest/go.mod | 11 + client/internal/metrics/infra/ingest/go.sum | 10 + client/internal/metrics/infra/ingest/main.go | 355 ++++++++++++++++++ .../metrics/infra/ingest/main_test.go | 124 ++++++ client/internal/metrics/metrics.go | 224 +++++++++++ client/internal/metrics/metrics_default.go | 11 + client/internal/metrics/metrics_js.go | 8 + client/internal/metrics/push.go | 289 ++++++++++++++ client/internal/metrics/push_test.go | 343 +++++++++++++++++ .../internal/metrics/remoteconfig/manager.go | 149 ++++++++ .../metrics/remoteconfig/manager_test.go | 197 ++++++++++ client/internal/peer/conn.go | 103 ++++- client/internal/peer/handshaker.go | 28 +- client/internal/peer/metrics_saver.go | 73 ++++ client/internal/peer/metrics_saver_test.go | 125 ++++++ client/internal/peer/wg_watcher.go | 10 +- client/internal/peer/wg_watcher_test.go | 10 +- client/server/debug.go | 10 + shared/management/client/client.go | 1 + shared/management/client/grpc.go | 7 + shared/management/client/mock.go | 9 + 40 files changed, 3405 insertions(+), 42 deletions(-) create mode 100644 client/internal/metrics/connection_type.go create mode 100644 client/internal/metrics/deployment_type.go create mode 100644 client/internal/metrics/env.go create mode 100644 client/internal/metrics/influxdb.go create mode 100644 client/internal/metrics/influxdb_test.go create mode 100644 client/internal/metrics/infra/.env.example create mode 100644 client/internal/metrics/infra/.gitignore create mode 100644 client/internal/metrics/infra/README.md create mode 100644 client/internal/metrics/infra/docker-compose.yml create mode 100644 client/internal/metrics/infra/grafana/provisioning/dashboards/dashboard.yml create mode 100644 client/internal/metrics/infra/grafana/provisioning/dashboards/json/netbird-influxdb-metrics.json create mode 100644 client/internal/metrics/infra/grafana/provisioning/datasources/influxdb.yml create mode 100755 client/internal/metrics/infra/influxdb/scripts/create-tokens.sh create mode 100644 client/internal/metrics/infra/ingest/Dockerfile create mode 100644 client/internal/metrics/infra/ingest/go.mod create mode 100644 client/internal/metrics/infra/ingest/go.sum create mode 100644 client/internal/metrics/infra/ingest/main.go create mode 100644 client/internal/metrics/infra/ingest/main_test.go create mode 100644 client/internal/metrics/metrics.go create mode 100644 client/internal/metrics/metrics_default.go create mode 100644 client/internal/metrics/metrics_js.go create mode 100644 client/internal/metrics/push.go create mode 100644 client/internal/metrics/push_test.go create mode 100644 client/internal/metrics/remoteconfig/manager.go create mode 100644 client/internal/metrics/remoteconfig/manager_test.go create mode 100644 client/internal/peer/metrics_saver.go create mode 100644 client/internal/peer/metrics_saver_test.go diff --git a/.github/workflows/golang-test-windows.yml b/.github/workflows/golang-test-windows.yml index 8af4046a7..8e672043d 100644 --- a/.github/workflows/golang-test-windows.yml +++ b/.github/workflows/golang-test-windows.yml @@ -63,10 +63,15 @@ jobs: - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOMODCACHE=${{ env.cache }} - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOCACHE=${{ env.modcache }} - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe mod tidy - - run: echo "files=$(go list ./... | ForEach-Object { $_ } | Where-Object { $_ -notmatch '/management' } | Where-Object { $_ -notmatch '/relay' } | Where-Object { $_ -notmatch '/signal' } | Where-Object { $_ -notmatch '/proxy' } | Where-Object { $_ -notmatch '/combined' })" >> $env:GITHUB_ENV + - name: Generate test script + run: | + $packages = go list ./... | Where-Object { $_ -notmatch '/management' } | Where-Object { $_ -notmatch '/relay' } | Where-Object { $_ -notmatch '/signal' } | Where-Object { $_ -notmatch '/proxy' } | Where-Object { $_ -notmatch '/combined' } + $goExe = "C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe" + $cmd = "$goExe test -tags=devcert -timeout 10m -p 1 $($packages -join ' ') > test-out.txt 2>&1" + Set-Content -Path "${{ github.workspace }}\run-tests.cmd" -Value $cmd - name: test - run: PsExec64 -s -w ${{ github.workspace }} cmd.exe /c "C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe test -tags=devcert -timeout 10m -p 1 ${{ env.files }} > test-out.txt 2>&1" + run: PsExec64 -s -w ${{ github.workspace }} cmd.exe /c "${{ github.workspace }}\run-tests.cmd" - name: test output if: ${{ always() }} run: Get-Content test-out.txt diff --git a/client/internal/connect.go b/client/internal/connect.go index ccd7b6c33..242b25b44 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -23,6 +23,7 @@ import ( "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal/dns" "github.com/netbirdio/netbird/client/internal/listener" + "github.com/netbirdio/netbird/client/internal/metrics" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/statemanager" @@ -50,6 +51,7 @@ type ConnectClient struct { engine *Engine engineMutex sync.Mutex + clientMetrics *metrics.ClientMetrics updateManager *updater.Manager persistSyncResponse bool @@ -133,10 +135,34 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan } }() + // Stop metrics push on exit + defer func() { + if c.clientMetrics != nil { + c.clientMetrics.StopPush() + } + }() + log.Infof("starting NetBird client version %s on %s/%s", version.NetbirdVersion(), runtime.GOOS, runtime.GOARCH) nbnet.Init() + // Initialize metrics once at startup (always active for debug bundles) + if c.clientMetrics == nil { + agentInfo := metrics.AgentInfo{ + DeploymentType: metrics.DeploymentTypeUnknown, + Version: version.NetbirdVersion(), + OS: runtime.GOOS, + Arch: runtime.GOARCH, + } + c.clientMetrics = metrics.NewClientMetrics(agentInfo) + log.Debugf("initialized client metrics") + + // Start metrics push if enabled (uses daemon context, persists across engine restarts) + if metrics.IsMetricsPushEnabled() { + c.clientMetrics.StartPush(c.ctx, metrics.PushConfigFromEnv()) + } + } + backOff := &backoff.ExponentialBackOff{ InitialInterval: time.Second, RandomizationFactor: 1, @@ -223,6 +249,16 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan mgmNotifier := statusRecorderToMgmConnStateNotifier(c.statusRecorder) mgmClient.SetConnStateListener(mgmNotifier) + // Update metrics with actual deployment type after connection + deploymentType := metrics.DetermineDeploymentType(mgmClient.GetServerURL()) + agentInfo := metrics.AgentInfo{ + DeploymentType: deploymentType, + Version: version.NetbirdVersion(), + OS: runtime.GOOS, + Arch: runtime.GOARCH, + } + c.clientMetrics.UpdateAgentInfo(agentInfo, myPrivateKey.PublicKey().String()) + log.Debugf("connected to the Management service %s", c.config.ManagementURL.Host) defer func() { if err = mgmClient.Close(); err != nil { @@ -231,8 +267,10 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan }() // connect (just a connection, no stream yet) and login to Management Service to get an initial global Netbird config + loginStarted := time.Now() loginResp, err := loginToManagement(engineCtx, mgmClient, publicSSHKey, c.config) if err != nil { + c.clientMetrics.RecordLoginDuration(engineCtx, time.Since(loginStarted), false) log.Debug(err) if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) { state.Set(StatusNeedsLogin) @@ -241,6 +279,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan } return wrapErr(err) } + c.clientMetrics.RecordLoginDuration(engineCtx, time.Since(loginStarted), true) c.statusRecorder.MarkManagementConnected() localPeerState := peer.LocalPeerState{ @@ -317,6 +356,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan Checks: checks, StateManager: stateManager, UpdateManager: c.updateManager, + ClientMetrics: c.clientMetrics, }, mobileDependency) engine.SetSyncResponsePersistence(c.persistSyncResponse) c.engine = engine diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 00a6e8160..c9ebf25e5 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -52,6 +52,7 @@ resolved_domains.txt: Anonymized resolved domain IP addresses from the status re config.txt: Anonymized configuration information of the NetBird client. network_map.json: Anonymized sync response containing peer configurations, routes, DNS settings, and firewall rules. state.json: Anonymized client state dump containing netbird states for the active profile. +metrics.txt: Buffered client metrics in InfluxDB line protocol format. Only present when metrics collection is enabled. Peer identifiers are anonymized. mutex.prof: Mutex profiling information. goroutine.prof: Goroutine profiling information. block.prof: Block profiling information. @@ -218,6 +219,11 @@ const ( darwinStdoutLogPath = "/var/log/netbird.err.log" ) +// MetricsExporter is an interface for exporting metrics +type MetricsExporter interface { + Export(w io.Writer) error +} + type BundleGenerator struct { anonymizer *anonymize.Anonymizer @@ -228,6 +234,7 @@ type BundleGenerator struct { logPath string cpuProfile []byte refreshStatus func() // Optional callback to refresh status before bundle generation + clientMetrics MetricsExporter anonymize bool includeSystemInfo bool @@ -249,6 +256,7 @@ type GeneratorDependencies struct { LogPath string CPUProfile []byte RefreshStatus func() // Optional callback to refresh status before bundle generation + ClientMetrics MetricsExporter } func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGenerator { @@ -267,6 +275,7 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen logPath: deps.LogPath, cpuProfile: deps.CPUProfile, refreshStatus: deps.RefreshStatus, + clientMetrics: deps.ClientMetrics, anonymize: cfg.Anonymize, includeSystemInfo: cfg.IncludeSystemInfo, @@ -350,6 +359,10 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add corrupted state files to debug bundle: %v", err) } + if err := g.addMetrics(); err != nil { + log.Errorf("failed to add metrics to debug bundle: %v", err) + } + if err := g.addWgShow(); err != nil { log.Errorf("failed to add wg show output: %v", err) } @@ -746,6 +759,30 @@ func (g *BundleGenerator) addCorruptedStateFiles() error { return nil } +func (g *BundleGenerator) addMetrics() error { + if g.clientMetrics == nil { + log.Debugf("skipping metrics in debug bundle: no metrics collector") + return nil + } + + var buf bytes.Buffer + if err := g.clientMetrics.Export(&buf); err != nil { + return fmt.Errorf("export metrics: %w", err) + } + + if buf.Len() == 0 { + log.Debugf("skipping metrics.txt in debug bundle: no metrics data") + return nil + } + + if err := g.addFileToZip(&buf, "metrics.txt"); err != nil { + return fmt.Errorf("add metrics file to zip: %w", err) + } + + log.Debugf("added metrics to debug bundle") + return nil +} + func (g *BundleGenerator) addLogfile() error { if g.logPath == "" { log.Debugf("skipping empty log file in debug bundle") diff --git a/client/internal/engine.go b/client/internal/engine.go index e9c92471c..ea1d3bec9 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -38,6 +38,7 @@ import ( "github.com/netbirdio/netbird/client/internal/dnsfwd" "github.com/netbirdio/netbird/client/internal/expose" "github.com/netbirdio/netbird/client/internal/ingressgw" + "github.com/netbirdio/netbird/client/internal/metrics" "github.com/netbirdio/netbird/client/internal/netflow" nftypes "github.com/netbirdio/netbird/client/internal/netflow/types" "github.com/netbirdio/netbird/client/internal/networkmonitor" @@ -149,6 +150,7 @@ type EngineServices struct { Checks []*mgmProto.Checks StateManager *statemanager.Manager UpdateManager *updater.Manager + ClientMetrics *metrics.ClientMetrics } // Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers. @@ -229,6 +231,9 @@ type Engine struct { probeStunTurn *relay.StunTurnProbe + // clientMetrics collects and pushes metrics + clientMetrics *metrics.ClientMetrics + jobExecutor *jobexec.Executor jobExecutorWG sync.WaitGroup @@ -272,6 +277,7 @@ func NewEngine( checks: services.Checks, probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), jobExecutor: jobexec.NewExecutor(), + clientMetrics: services.ClientMetrics, updateManager: services.UpdateManager, } @@ -813,7 +819,9 @@ func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdate func (e *Engine) handleSync(update *mgmProto.SyncResponse) error { started := time.Now() defer func() { - log.Infof("sync finished in %s", time.Since(started)) + duration := time.Since(started) + log.Infof("sync finished in %s", duration) + e.clientMetrics.RecordSyncDuration(e.ctx, duration) }() e.syncMsgMux.Lock() defer e.syncMsgMux.Unlock() @@ -1061,6 +1069,7 @@ func (e *Engine) handleBundle(params *mgmProto.BundleParameters) (*mgmProto.JobR StatusRecorder: e.statusRecorder, SyncResponse: syncResponse, LogPath: e.config.LogPath, + ClientMetrics: e.clientMetrics, RefreshStatus: func() { e.RunHealthProbes(true) }, @@ -1515,11 +1524,12 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV } serviceDependencies := peer.ServiceDependencies{ - StatusRecorder: e.statusRecorder, - Signaler: e.signaler, - IFaceDiscover: e.mobileDep.IFaceDiscover, - RelayManager: e.relayManager, - SrWatcher: e.srWatcher, + StatusRecorder: e.statusRecorder, + Signaler: e.signaler, + IFaceDiscover: e.mobileDep.IFaceDiscover, + RelayManager: e.relayManager, + SrWatcher: e.srWatcher, + MetricsRecorder: e.clientMetrics, } peerConn, err := peer.NewConn(config, serviceDependencies) if err != nil { @@ -1816,6 +1826,11 @@ func (e *Engine) GetExposeManager() *expose.Manager { return e.exposeManager } +// GetClientMetrics returns the client metrics +func (e *Engine) GetClientMetrics() *metrics.ClientMetrics { + return e.clientMetrics +} + func findIPFromInterfaceName(ifaceName string) (net.IP, error) { iface, err := net.InterfaceByName(ifaceName) if err != nil { diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index f9e7f8fa0..77fe9049b 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -828,7 +828,7 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) { WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, EngineServices{ + }, EngineServices{ SignalClient: &signal.MockClient{}, MgmClient: &mgmt.MockClient{}, RelayManager: relayMgr, @@ -1035,7 +1035,7 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) { WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, EngineServices{ + }, EngineServices{ SignalClient: &signal.MockClient{}, MgmClient: &mgmt.MockClient{}, RelayManager: relayMgr, @@ -1566,7 +1566,7 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin } relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) - e, err := NewEngine(ctx, cancel, conf, EngineServices{ +e, err := NewEngine(ctx, cancel, conf, EngineServices{ SignalClient: signalClient, MgmClient: mgmtClient, RelayManager: relayMgr, diff --git a/client/internal/metrics/connection_type.go b/client/internal/metrics/connection_type.go new file mode 100644 index 000000000..a3406a6b8 --- /dev/null +++ b/client/internal/metrics/connection_type.go @@ -0,0 +1,17 @@ +package metrics + +// ConnectionType represents the type of peer connection +type ConnectionType string + +const ( + // ConnectionTypeICE represents a direct peer-to-peer connection using ICE + ConnectionTypeICE ConnectionType = "ice" + + // ConnectionTypeRelay represents a relayed connection + ConnectionTypeRelay ConnectionType = "relay" +) + +// String returns the string representation of the connection type +func (c ConnectionType) String() string { + return string(c) +} diff --git a/client/internal/metrics/deployment_type.go b/client/internal/metrics/deployment_type.go new file mode 100644 index 000000000..141173cb8 --- /dev/null +++ b/client/internal/metrics/deployment_type.go @@ -0,0 +1,51 @@ +package metrics + +import ( + "net/url" + "strings" +) + +// DeploymentType represents the type of NetBird deployment +type DeploymentType int + +const ( + // DeploymentTypeUnknown represents an unknown or uninitialized deployment type + DeploymentTypeUnknown DeploymentType = iota + + // DeploymentTypeCloud represents a cloud-hosted NetBird deployment + DeploymentTypeCloud + + // DeploymentTypeSelfHosted represents a self-hosted NetBird deployment + DeploymentTypeSelfHosted +) + +// String returns the string representation of the deployment type +func (d DeploymentType) String() string { + switch d { + case DeploymentTypeCloud: + return "cloud" + case DeploymentTypeSelfHosted: + return "selfhosted" + default: + return "unknown" + } +} + +// DetermineDeploymentType determines if the deployment is cloud or self-hosted +// based on the management URL string +func DetermineDeploymentType(managementURL string) DeploymentType { + if managementURL == "" { + return DeploymentTypeUnknown + } + + u, err := url.Parse(managementURL) + if err != nil { + return DeploymentTypeSelfHosted + } + + if strings.ToLower(u.Hostname()) == "api.netbird.io" { + return DeploymentTypeCloud + } + + return DeploymentTypeSelfHosted +} diff --git a/client/internal/metrics/env.go b/client/internal/metrics/env.go new file mode 100644 index 000000000..1f06ce484 --- /dev/null +++ b/client/internal/metrics/env.go @@ -0,0 +1,93 @@ +package metrics + +import ( + "net/url" + "os" + "strconv" + "time" + + log "github.com/sirupsen/logrus" +) + +const ( + // EnvMetricsPushEnabled controls whether collected metrics are pushed to the backend. + // Metrics collection itself is always active (for debug bundles). + // Disabled by default. Set NB_METRICS_PUSH_ENABLED=true to enable push. + EnvMetricsPushEnabled = "NB_METRICS_PUSH_ENABLED" + + // EnvMetricsForceSending if set to true, skips remote configuration fetch and forces metric sending + EnvMetricsForceSending = "NB_METRICS_FORCE_SENDING" + + // EnvMetricsConfigURL is the environment variable to override the metrics push config ServerAddress + EnvMetricsConfigURL = "NB_METRICS_CONFIG_URL" + + // EnvMetricsServerURL is the environment variable to override the metrics server address. + // When set, this takes precedence over the server_url from remote push config. + EnvMetricsServerURL = "NB_METRICS_SERVER_URL" + + // EnvMetricsInterval overrides the push interval from the remote config. + // Only affects how often metrics are pushed; remote config availability + // and version range checks are still respected. + // Format: duration string like "1h", "30m", "4h" + EnvMetricsInterval = "NB_METRICS_INTERVAL" + + defaultMetricsConfigURL = "https://ingest.netbird.io/config" +) + +// IsMetricsPushEnabled returns true if metrics push is enabled via NB_METRICS_PUSH_ENABLED env var. +// Disabled by default. Metrics collection is always active for debug bundles. +func IsMetricsPushEnabled() bool { + enabled, _ := strconv.ParseBool(os.Getenv(EnvMetricsPushEnabled)) + return enabled +} + +// getMetricsInterval returns the metrics push interval from NB_METRICS_INTERVAL env var. +// Returns 0 if not set or invalid. +func getMetricsInterval() time.Duration { + intervalStr := os.Getenv(EnvMetricsInterval) + if intervalStr == "" { + return 0 + } + interval, err := time.ParseDuration(intervalStr) + if err != nil { + log.Warnf("invalid metrics interval from env %q: %v", intervalStr, err) + return 0 + } + if interval <= 0 { + log.Warnf("invalid metrics interval from env %q: must be positive", intervalStr) + return 0 + } + return interval +} + +func isForceSending() bool { + force, _ := strconv.ParseBool(os.Getenv(EnvMetricsForceSending)) + return force +} + +// getMetricsConfigURL returns the URL to fetch push configuration from +func getMetricsConfigURL() string { + if envURL := os.Getenv(EnvMetricsConfigURL); envURL != "" { + return envURL + } + return defaultMetricsConfigURL +} + +// getMetricsServerURL returns the metrics server URL from NB_METRICS_SERVER_URL env var. +// Returns nil if not set or invalid. +func getMetricsServerURL() *url.URL { + envURL := os.Getenv(EnvMetricsServerURL) + if envURL == "" { + return nil + } + parsed, err := url.ParseRequestURI(envURL) + if err != nil || parsed.Host == "" { + log.Warnf("invalid metrics server URL %q: must be an absolute HTTP(S) URL", envURL) + return nil + } + if parsed.Scheme != "http" && parsed.Scheme != "https" { + log.Warnf("invalid metrics server URL %q: unsupported scheme %q", envURL, parsed.Scheme) + return nil + } + return parsed +} diff --git a/client/internal/metrics/influxdb.go b/client/internal/metrics/influxdb.go new file mode 100644 index 000000000..531f6a986 --- /dev/null +++ b/client/internal/metrics/influxdb.go @@ -0,0 +1,219 @@ +package metrics + +import ( + "context" + "fmt" + "io" + "maps" + "slices" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +const ( + maxSampleAge = 5 * 24 * time.Hour // drop samples older than 5 days + maxBufferSize = 5 * 1024 * 1024 // drop oldest samples when estimated size exceeds 5 MB + // estimatedSampleSize is a rough per-sample memory estimate (measurement + tags + fields + timestamp) + estimatedSampleSize = 256 +) + +// influxSample is a single InfluxDB line protocol entry. +type influxSample struct { + measurement string + tags string + fields map[string]float64 + timestamp time.Time +} + +// influxDBMetrics collects metric events as timestamped samples. +// Each event is recorded with its exact timestamp, pushed once, then cleared. +type influxDBMetrics struct { + mu sync.Mutex + samples []influxSample +} + +func newInfluxDBMetrics() metricsImplementation { + return &influxDBMetrics{} +} +func (m *influxDBMetrics) RecordConnectionStages( + _ context.Context, + agentInfo AgentInfo, + connectionPairID string, + connectionType ConnectionType, + isReconnection bool, + timestamps ConnectionStageTimestamps, +) { + var signalingReceivedToConnection, connectionToWgHandshake, totalDuration float64 + + if !timestamps.SignalingReceived.IsZero() && !timestamps.ConnectionReady.IsZero() { + signalingReceivedToConnection = timestamps.ConnectionReady.Sub(timestamps.SignalingReceived).Seconds() + } + + if !timestamps.ConnectionReady.IsZero() && !timestamps.WgHandshakeSuccess.IsZero() { + connectionToWgHandshake = timestamps.WgHandshakeSuccess.Sub(timestamps.ConnectionReady).Seconds() + } + + if !timestamps.SignalingReceived.IsZero() && !timestamps.WgHandshakeSuccess.IsZero() { + totalDuration = timestamps.WgHandshakeSuccess.Sub(timestamps.SignalingReceived).Seconds() + } + + attemptType := "initial" + if isReconnection { + attemptType = "reconnection" + } + + connTypeStr := connectionType.String() + tags := fmt.Sprintf("deployment_type=%s,connection_type=%s,attempt_type=%s,version=%s,os=%s,arch=%s,peer_id=%s,connection_pair_id=%s", + agentInfo.DeploymentType.String(), + connTypeStr, + attemptType, + agentInfo.Version, + agentInfo.OS, + agentInfo.Arch, + agentInfo.peerID, + connectionPairID, + ) + + now := time.Now() + + m.mu.Lock() + defer m.mu.Unlock() + + m.samples = append(m.samples, influxSample{ + measurement: "netbird_peer_connection", + tags: tags, + fields: map[string]float64{ + "signaling_to_connection_seconds": signalingReceivedToConnection, + "connection_to_wg_handshake_seconds": connectionToWgHandshake, + "total_seconds": totalDuration, + }, + timestamp: now, + }) + m.trimLocked() + + log.Tracef("peer connection metrics [%s, %s, %s]: signalingReceived→connection: %.3fs, connection→wg_handshake: %.3fs, total: %.3fs", + agentInfo.DeploymentType.String(), connTypeStr, attemptType, signalingReceivedToConnection, connectionToWgHandshake, totalDuration) +} + +func (m *influxDBMetrics) RecordSyncDuration(_ context.Context, agentInfo AgentInfo, duration time.Duration) { + tags := fmt.Sprintf("deployment_type=%s,version=%s,os=%s,arch=%s,peer_id=%s", + agentInfo.DeploymentType.String(), + agentInfo.Version, + agentInfo.OS, + agentInfo.Arch, + agentInfo.peerID, + ) + + m.mu.Lock() + defer m.mu.Unlock() + + m.samples = append(m.samples, influxSample{ + measurement: "netbird_sync", + tags: tags, + fields: map[string]float64{ + "duration_seconds": duration.Seconds(), + }, + timestamp: time.Now(), + }) + m.trimLocked() +} + +func (m *influxDBMetrics) RecordLoginDuration(_ context.Context, agentInfo AgentInfo, duration time.Duration, success bool) { + result := "success" + if !success { + result = "failure" + } + + tags := fmt.Sprintf("deployment_type=%s,result=%s,version=%s,os=%s,arch=%s,peer_id=%s", + agentInfo.DeploymentType.String(), + result, + agentInfo.Version, + agentInfo.OS, + agentInfo.Arch, + agentInfo.peerID, + ) + + m.mu.Lock() + defer m.mu.Unlock() + + m.samples = append(m.samples, influxSample{ + measurement: "netbird_login", + tags: tags, + fields: map[string]float64{ + "duration_seconds": duration.Seconds(), + }, + timestamp: time.Now(), + }) + m.trimLocked() + + log.Tracef("login metrics [%s, %s]: duration=%.3fs", agentInfo.DeploymentType.String(), result, duration.Seconds()) +} + +// Export writes pending samples in InfluxDB line protocol format. +// Format: measurement,tag=val,tag=val field=val,field=val timestamp_ns +func (m *influxDBMetrics) Export(w io.Writer) error { + m.mu.Lock() + samples := make([]influxSample, len(m.samples)) + copy(samples, m.samples) + m.mu.Unlock() + + for _, s := range samples { + if _, err := fmt.Fprintf(w, "%s,%s ", s.measurement, s.tags); err != nil { + return err + } + + sortedKeys := slices.Sorted(maps.Keys(s.fields)) + first := true + for _, k := range sortedKeys { + if !first { + if _, err := fmt.Fprint(w, ","); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s=%g", k, s.fields[k]); err != nil { + return err + } + first = false + } + + if _, err := fmt.Fprintf(w, " %d\n", s.timestamp.UnixNano()); err != nil { + return err + } + } + return nil +} + +// Reset clears pending samples after a successful push +func (m *influxDBMetrics) Reset() { + m.mu.Lock() + defer m.mu.Unlock() + m.samples = m.samples[:0] +} + +// trimLocked removes samples that exceed age or size limits. +// Must be called with m.mu held. +func (m *influxDBMetrics) trimLocked() { + now := time.Now() + + // drop samples older than maxSampleAge + cutoff := 0 + for cutoff < len(m.samples) && now.Sub(m.samples[cutoff].timestamp) > maxSampleAge { + cutoff++ + } + if cutoff > 0 { + copy(m.samples, m.samples[cutoff:]) + m.samples = m.samples[:len(m.samples)-cutoff] + log.Debugf("influxdb metrics: dropped %d samples older than %s", cutoff, maxSampleAge) + } + + // drop oldest samples if estimated size exceeds maxBufferSize + maxSamples := maxBufferSize / estimatedSampleSize + if len(m.samples) > maxSamples { + drop := len(m.samples) - maxSamples + copy(m.samples, m.samples[drop:]) + m.samples = m.samples[:maxSamples] + log.Debugf("influxdb metrics: dropped %d oldest samples to stay under %d MB size limit", drop, maxBufferSize/(1024*1024)) + } +} diff --git a/client/internal/metrics/influxdb_test.go b/client/internal/metrics/influxdb_test.go new file mode 100644 index 000000000..b964e31a3 --- /dev/null +++ b/client/internal/metrics/influxdb_test.go @@ -0,0 +1,229 @@ +package metrics + +import ( + "bytes" + "context" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInfluxDBMetrics_RecordAndExport(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + agentInfo := AgentInfo{ + DeploymentType: DeploymentTypeCloud, + Version: "1.0.0", + OS: "linux", + Arch: "amd64", + peerID: "abc123", + } + + ts := ConnectionStageTimestamps{ + SignalingReceived: time.Now().Add(-3 * time.Second), + ConnectionReady: time.Now().Add(-2 * time.Second), + WgHandshakeSuccess: time.Now().Add(-1 * time.Second), + } + + m.RecordConnectionStages(context.Background(), agentInfo, "pair123", ConnectionTypeICE, false, ts) + + var buf bytes.Buffer + err := m.Export(&buf) + require.NoError(t, err) + + output := buf.String() + assert.Contains(t, output, "netbird_peer_connection,") + assert.Contains(t, output, "connection_to_wg_handshake_seconds=") + assert.Contains(t, output, "signaling_to_connection_seconds=") + assert.Contains(t, output, "total_seconds=") +} + +func TestInfluxDBMetrics_ExportDeterministicFieldOrder(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + agentInfo := AgentInfo{ + DeploymentType: DeploymentTypeCloud, + Version: "1.0.0", + OS: "linux", + Arch: "amd64", + peerID: "abc123", + } + + ts := ConnectionStageTimestamps{ + SignalingReceived: time.Now().Add(-3 * time.Second), + ConnectionReady: time.Now().Add(-2 * time.Second), + WgHandshakeSuccess: time.Now().Add(-1 * time.Second), + } + + // Record multiple times and verify consistent field order + for i := 0; i < 10; i++ { + m.RecordConnectionStages(context.Background(), agentInfo, "pair123", ConnectionTypeICE, false, ts) + } + + var buf bytes.Buffer + err := m.Export(&buf) + require.NoError(t, err) + + lines := strings.Split(strings.TrimSpace(buf.String()), "\n") + require.Len(t, lines, 10) + + // Extract field portion from each line and verify they're all identical + var fieldSections []string + for _, line := range lines { + parts := strings.SplitN(line, " ", 3) + require.Len(t, parts, 3, "each line should have measurement, fields, timestamp") + fieldSections = append(fieldSections, parts[1]) + } + + for i := 1; i < len(fieldSections); i++ { + assert.Equal(t, fieldSections[0], fieldSections[i], "field order should be deterministic across samples") + } + + // Fields should be alphabetically sorted + assert.True(t, strings.HasPrefix(fieldSections[0], "connection_to_wg_handshake_seconds="), + "fields should be sorted: connection_to_wg < signaling_to < total") +} + +func TestInfluxDBMetrics_RecordSyncDuration(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + agentInfo := AgentInfo{ + DeploymentType: DeploymentTypeSelfHosted, + Version: "2.0.0", + OS: "darwin", + Arch: "arm64", + peerID: "def456", + } + + m.RecordSyncDuration(context.Background(), agentInfo, 1500*time.Millisecond) + + var buf bytes.Buffer + err := m.Export(&buf) + require.NoError(t, err) + + output := buf.String() + assert.Contains(t, output, "netbird_sync,") + assert.Contains(t, output, "duration_seconds=1.5") + assert.Contains(t, output, "deployment_type=selfhosted") +} + +func TestInfluxDBMetrics_Reset(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + agentInfo := AgentInfo{ + DeploymentType: DeploymentTypeCloud, + Version: "1.0.0", + OS: "linux", + Arch: "amd64", + peerID: "abc123", + } + + m.RecordSyncDuration(context.Background(), agentInfo, time.Second) + + var buf bytes.Buffer + err := m.Export(&buf) + require.NoError(t, err) + assert.NotEmpty(t, buf.String()) + + m.Reset() + + buf.Reset() + err = m.Export(&buf) + require.NoError(t, err) + assert.Empty(t, buf.String(), "should be empty after reset") +} + +func TestInfluxDBMetrics_ExportEmpty(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + var buf bytes.Buffer + err := m.Export(&buf) + require.NoError(t, err) + assert.Empty(t, buf.String()) +} + +func TestInfluxDBMetrics_TrimByAge(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + m.mu.Lock() + m.samples = append(m.samples, influxSample{ + measurement: "old", + tags: "t=1", + fields: map[string]float64{"v": 1}, + timestamp: time.Now().Add(-maxSampleAge - time.Hour), + }) + m.trimLocked() + remaining := len(m.samples) + m.mu.Unlock() + + assert.Equal(t, 0, remaining, "old samples should be trimmed") +} + +func TestInfluxDBMetrics_RecordLoginDuration(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + agentInfo := AgentInfo{ + DeploymentType: DeploymentTypeCloud, + Version: "1.0.0", + OS: "linux", + Arch: "amd64", + peerID: "abc123", + } + + m.RecordLoginDuration(context.Background(), agentInfo, 2500*time.Millisecond, true) + + var buf bytes.Buffer + err := m.Export(&buf) + require.NoError(t, err) + + output := buf.String() + assert.Contains(t, output, "netbird_login,") + assert.Contains(t, output, "duration_seconds=2.5") + assert.Contains(t, output, "result=success") +} + +func TestInfluxDBMetrics_RecordLoginDurationFailure(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + agentInfo := AgentInfo{ + DeploymentType: DeploymentTypeSelfHosted, + Version: "1.0.0", + OS: "darwin", + Arch: "arm64", + peerID: "xyz789", + } + + m.RecordLoginDuration(context.Background(), agentInfo, 5*time.Second, false) + + var buf bytes.Buffer + err := m.Export(&buf) + require.NoError(t, err) + + output := buf.String() + assert.Contains(t, output, "netbird_login,") + assert.Contains(t, output, "result=failure") + assert.Contains(t, output, "deployment_type=selfhosted") +} + +func TestInfluxDBMetrics_TrimBySize(t *testing.T) { + m := newInfluxDBMetrics().(*influxDBMetrics) + + maxSamples := maxBufferSize / estimatedSampleSize + m.mu.Lock() + for i := 0; i < maxSamples+100; i++ { + m.samples = append(m.samples, influxSample{ + measurement: "test", + tags: "t=1", + fields: map[string]float64{"v": float64(i)}, + timestamp: time.Now(), + }) + } + m.trimLocked() + remaining := len(m.samples) + m.mu.Unlock() + + assert.Equal(t, maxSamples, remaining, "should trim to max samples") +} diff --git a/client/internal/metrics/infra/.env.example b/client/internal/metrics/infra/.env.example new file mode 100644 index 000000000..9c5c1a258 --- /dev/null +++ b/client/internal/metrics/infra/.env.example @@ -0,0 +1,16 @@ +# Copy to .env and adjust values before running docker compose + +# InfluxDB admin (server-side only, never exposed to clients) +INFLUXDB_ADMIN_PASSWORD=changeme +INFLUXDB_ADMIN_TOKEN=changeme + +# Grafana admin credentials +GRAFANA_ADMIN_USER=admin +GRAFANA_ADMIN_PASSWORD=changeme + +# Remote config served by ingest at /config +# Set CONFIG_METRICS_SERVER_URL to the ingest server's public address to enable +CONFIG_METRICS_SERVER_URL= +CONFIG_VERSION_SINCE=0.0.0 +CONFIG_VERSION_UNTIL=99.99.99 +CONFIG_PERIOD_MINUTES=5 diff --git a/client/internal/metrics/infra/.gitignore b/client/internal/metrics/infra/.gitignore new file mode 100644 index 000000000..4c49bd78f --- /dev/null +++ b/client/internal/metrics/infra/.gitignore @@ -0,0 +1 @@ +.env diff --git a/client/internal/metrics/infra/README.md b/client/internal/metrics/infra/README.md new file mode 100644 index 000000000..5a93dbd87 --- /dev/null +++ b/client/internal/metrics/infra/README.md @@ -0,0 +1,194 @@ +# Client Metrics + +Internal documentation for the NetBird client metrics system. + +## Overview + +Client metrics track connection performance and sync durations using InfluxDB line protocol (`influxdb.go`). Each event is pushed once then cleared. + +Metrics collection is always active (for debug bundles). Push to backend is: +- Disabled by default (opt-in via `NB_METRICS_PUSH_ENABLED=true`) +- Managed at daemon layer (survives engine restarts) + +## Architecture + +### Layer Separation + +```text +Daemon Layer (connect.go) + ├─ Creates ClientMetrics instance once + ├─ Starts/stops push lifecycle + └─ Updates AgentInfo on profile switch + │ + ▼ +Engine Layer (engine.go) + └─ Records metrics via ClientMetrics methods +``` + +### Ingest Server + +Clients do not talk to InfluxDB directly. An ingest server sits between clients and InfluxDB: + +```text +Client ──POST──▶ Ingest Server (:8087) ──▶ InfluxDB (internal) + │ + ├─ Validates line protocol + ├─ Allowlists measurements, fields, and tags + ├─ Rejects out-of-bound values + └─ Serves remote config at /config +``` + +- **No secret/token-based client auth** — the ingest server holds the InfluxDB token server-side. Clients must send a hashed peer ID via `X-Peer-ID` header. +- **InfluxDB is not exposed** — only accessible within the docker network +- Source: `ingest/main.go` + +## Metrics Collected + +### Connection Stage Timing + +Measurement: `netbird_peer_connection` + +| Field | Timestamps | Description | +|-------|-----------|-------------| +| `signaling_to_connection_seconds` | `SignalingReceived → ConnectionReady` | ICE/relay negotiation time after the first signal is received from the remote peer | +| `connection_to_wg_handshake_seconds` | `ConnectionReady → WgHandshakeSuccess` | WireGuard cryptographic handshake latency once the transport layer is ready | +| `total_seconds` | `SignalingReceived → WgHandshakeSuccess` | End-to-end connection time anchored at the first received signal | + +Tags: +- `deployment_type`: "cloud" | "selfhosted" | "unknown" +- `connection_type`: "ice" | "relay" +- `attempt_type`: "initial" | "reconnection" +- `version`: NetBird version string +- `os`: Operating system (linux, darwin, windows, android, ios, etc.) +- `arch`: CPU architecture (amd64, arm64, etc.) + +**Note:** `SignalingReceived` is set when the first offer or answer arrives from the remote peer (in both initial and reconnection paths). It excludes the potentially unbounded wait for the remote peer to come online. + +### Sync Duration + +Measurement: `netbird_sync` + +| Field | Description | +|-------|-------------| +| `duration_seconds` | Time to process a sync message from management server | + +Tags: +- `deployment_type`: "cloud" | "selfhosted" | "unknown" +- `version`: NetBird version string +- `os`: Operating system (linux, darwin, windows, android, ios, etc.) +- `arch`: CPU architecture (amd64, arm64, etc.) + +### Login Duration + +Measurement: `netbird_login` + +| Field | Description | +|-------|-------------| +| `duration_seconds` | Time to complete the login/auth exchange with management server | + +Tags: +- `deployment_type`: "cloud" | "selfhosted" | "unknown" +- `result`: "success" | "failure" +- `version`: NetBird version string +- `os`: Operating system (linux, darwin, windows, android, ios, etc.) +- `arch`: CPU architecture (amd64, arm64, etc.) + +## Buffer Limits + +The InfluxDB backend limits in-memory sample storage to prevent unbounded growth when pushes fail: +- **Max age:** Samples older than 5 days are dropped +- **Max size:** Estimated buffer size capped at 5 MB (~20k samples) + +## Configuration + +### Client Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `NB_METRICS_PUSH_ENABLED` | `false` | Enable metrics push to backend | +| `NB_METRICS_SERVER_URL` | *(from remote config)* | Ingest server URL (e.g., `https://ingest.netbird.io`) | +| `NB_METRICS_INTERVAL` | *(from remote config)* | Push interval (e.g., "1m", "30m", "4h") | +| `NB_METRICS_FORCE_SENDING` | `false` | Skip remote config, push unconditionally | +| `NB_METRICS_CONFIG_URL` | `https://ingest.netbird.io/config` | Remote push config URL | + +`NB_METRICS_SERVER_URL` and `NB_METRICS_INTERVAL` override their respective values but do not bypass remote config eligibility checks (version range). Use `NB_METRICS_FORCE_SENDING=true` to skip all remote config gating. + +### Ingest Server Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `INGEST_LISTEN_ADDR` | `:8087` | Listen address | +| `INFLUXDB_URL` | `http://influxdb:8086/api/v2/write?org=netbird&bucket=metrics&precision=ns` | InfluxDB write endpoint | +| `INFLUXDB_TOKEN` | *(required)* | InfluxDB auth token (server-side only) | +| `CONFIG_METRICS_SERVER_URL` | *(empty — disables /config)* | `server_url` in the remote config JSON (the URL clients push metrics to) | +| `CONFIG_VERSION_SINCE` | `0.0.0` | Minimum client version to push metrics | +| `CONFIG_VERSION_UNTIL` | `99.99.99` | Maximum client version to push metrics | +| `CONFIG_PERIOD_MINUTES` | `5` | Push interval in minutes | + +The ingest server serves a remote config JSON at `GET /config` when `CONFIG_METRICS_SERVER_URL` is set. Clients can use `NB_METRICS_CONFIG_URL=http:///config` to fetch it. + +### Configuration Precedence + +For URL and Interval, the precedence is: +1. **Environment variable** - `NB_METRICS_SERVER_URL` / `NB_METRICS_INTERVAL` +2. **Remote config** - fetched from `NB_METRICS_CONFIG_URL` +3. **Default** - 5 minute interval, URL from remote config + +## Push Behavior + +1. `StartPush()` spawns background goroutine with timer +2. First push happens immediately on startup +3. Periodically: `push()` → `Export()` → HTTP POST to ingest server +4. On failure: log error, continue (non-blocking) +5. On success: `Reset()` clears pushed samples +6. `StopPush()` cancels context and waits for goroutine + +Samples are collected with exact timestamps, pushed once, then cleared. No data is resent. + +## Local Development Setup + +### 1. Configure and Start Services + +```bash +# From this directory (client/internal/metrics/infra) +cp .env.example .env +# Edit .env to set INFLUXDB_ADMIN_PASSWORD, INFLUXDB_ADMIN_TOKEN, and GRAFANA_ADMIN_PASSWORD +docker compose up -d +``` + +This starts: +- **Ingest server** on http://localhost:8087 — accepts client metrics (requires `X-Peer-ID` header, no secret/token auth) +- **InfluxDB** — internal only, not exposed to host +- **Grafana** on http://localhost:3001 + +### 2. Configure Client + +```bash +export NB_METRICS_PUSH_ENABLED=true +export NB_METRICS_FORCE_SENDING=true +export NB_METRICS_SERVER_URL=http://localhost:8087 +export NB_METRICS_INTERVAL=1m +``` + +### 3. Run Client + +```bash +cd ../../../.. +go run ./client/ up +``` + +### 4. View in Grafana + +- **InfluxDB dashboard:** http://localhost:3001/d/netbird-influxdb-metrics + +### 5. Verify Data + +```bash +# Query via InfluxDB (using admin token from .env) +docker compose exec influxdb influx query \ + 'from(bucket: "metrics") |> range(start: -1h)' \ + --org netbird + +# Check ingest server health +curl http://localhost:8087/health +``` \ No newline at end of file diff --git a/client/internal/metrics/infra/docker-compose.yml b/client/internal/metrics/infra/docker-compose.yml new file mode 100644 index 000000000..0f2b6b889 --- /dev/null +++ b/client/internal/metrics/infra/docker-compose.yml @@ -0,0 +1,69 @@ +version: '3.8' + +services: + ingest: + container_name: ingest + build: + context: ./ingest + ports: + - "8087:8087" + environment: + - INGEST_LISTEN_ADDR=:8087 + - INFLUXDB_URL=http://influxdb:8086/api/v2/write?org=netbird&bucket=metrics&precision=ns + - INFLUXDB_TOKEN=${INFLUXDB_ADMIN_TOKEN:?required} + - CONFIG_METRICS_SERVER_URL=${CONFIG_METRICS_SERVER_URL:-} + - CONFIG_VERSION_SINCE=${CONFIG_VERSION_SINCE:-0.0.0} + - CONFIG_VERSION_UNTIL=${CONFIG_VERSION_UNTIL:-99.99.99} + - CONFIG_PERIOD_MINUTES=${CONFIG_PERIOD_MINUTES:-5} + depends_on: + - influxdb + restart: unless-stopped + networks: + - metrics + + influxdb: + container_name: influxdb + image: influxdb:2 + # No ports exposed — only accessible within the metrics network + volumes: + - influxdb-data:/var/lib/influxdb2 + - ./influxdb/scripts:/docker-entrypoint-initdb.d + environment: + - DOCKER_INFLUXDB_INIT_MODE=setup + - DOCKER_INFLUXDB_INIT_USERNAME=admin + - DOCKER_INFLUXDB_INIT_PASSWORD=${INFLUXDB_ADMIN_PASSWORD:?required} + - DOCKER_INFLUXDB_INIT_ORG=netbird + - DOCKER_INFLUXDB_INIT_BUCKET=metrics + - DOCKER_INFLUXDB_INIT_RETENTION=365d + - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=${INFLUXDB_ADMIN_TOKEN:-} + restart: unless-stopped + networks: + - metrics + + grafana: + container_name: grafana + image: grafana/grafana:11.6.0 + ports: + - "3001:3000" + environment: + - GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER:-admin} + - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:?required} + - GF_USERS_ALLOW_SIGN_UP=false + - GF_INSTALL_PLUGINS= + - INFLUXDB_ADMIN_TOKEN=${INFLUXDB_ADMIN_TOKEN:-} + volumes: + - grafana-data:/var/lib/grafana + - ./grafana/provisioning:/etc/grafana/provisioning + depends_on: + - influxdb + restart: unless-stopped + networks: + - metrics + +volumes: + influxdb-data: + grafana-data: + +networks: + metrics: + driver: bridge diff --git a/client/internal/metrics/infra/grafana/provisioning/dashboards/dashboard.yml b/client/internal/metrics/infra/grafana/provisioning/dashboards/dashboard.yml new file mode 100644 index 000000000..a7e8d3989 --- /dev/null +++ b/client/internal/metrics/infra/grafana/provisioning/dashboards/dashboard.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: 'NetBird Dashboards' + orgId: 1 + folder: '' + type: file + disableDeletion: false + updateIntervalSeconds: 10 + allowUiUpdates: true + options: + path: /etc/grafana/provisioning/dashboards/json \ No newline at end of file diff --git a/client/internal/metrics/infra/grafana/provisioning/dashboards/json/netbird-influxdb-metrics.json b/client/internal/metrics/infra/grafana/provisioning/dashboards/json/netbird-influxdb-metrics.json new file mode 100644 index 000000000..2bcc9cbab --- /dev/null +++ b/client/internal/metrics/infra/grafana/provisioning/dashboards/json/netbird-influxdb-metrics.json @@ -0,0 +1,280 @@ +{ + "uid": "netbird-influxdb-metrics", + "title": "NetBird Client Metrics (InfluxDB)", + "tags": ["netbird", "connections", "influxdb"], + "timezone": "browser", + "panels": [ + { + "id": 5, + "title": "Sync Duration Extremes", + "type": "stat", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_sync\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> min()\n |> set(key: \"_field\", value: \"Min\")", + "refId": "A" + }, + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_sync\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> max()\n |> set(key: \"_field\", value: \"Max\")", + "refId": "B" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ms", + "min": 0 + } + }, + "options": { + "reduceOptions": { + "calcs": ["lastNotNull"] + }, + "colorMode": "value", + "graphMode": "none", + "textMode": "auto" + } + }, + { + "id": 6, + "title": "Total Connection Time Extremes", + "type": "stat", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> min()\n |> set(key: \"_field\", value: \"Min\")", + "refId": "A" + }, + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> max()\n |> set(key: \"_field\", value: \"Max\")", + "refId": "B" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ms", + "min": 0 + } + }, + "options": { + "reduceOptions": { + "calcs": ["lastNotNull"] + }, + "colorMode": "value", + "graphMode": "none", + "textMode": "auto" + } + }, + { + "id": 1, + "title": "Sync Duration", + "type": "timeseries", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_sync\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> set(key: \"_field\", value: \"Sync Duration\")", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ms", + "min": 0, + "custom": { + "drawStyle": "points", + "pointSize": 5 + } + } + } + }, + { + "id": 4, + "title": "ICE vs Relay", + "type": "piechart", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> drop(columns: [\"deployment_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> group(columns: [\"connection_pair_id\"])\n |> last()\n |> group(columns: [\"connection_type\"])\n |> count()", + "refId": "A" + } + ], + "options": { + "reduceOptions": { + "calcs": ["lastNotNull"] + }, + "pieType": "donut", + "tooltip": { + "mode": "multi" + } + } + }, + { + "id": 2, + "title": "Connection Stage Durations (avg)", + "type": "bargauge", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"signaling_to_connection_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> mean()\n |> drop(columns: [\"_start\", \"_stop\", \"_measurement\", \"_time\", \"_field\"])\n |> rename(columns: {_value: \"Avg Signaling to Connection\"})", + "refId": "A" + }, + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"connection_to_wg_handshake_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> mean()\n |> drop(columns: [\"_start\", \"_stop\", \"_measurement\", \"_time\", \"_field\"])\n |> rename(columns: {_value: \"Avg Connection to WG Handshake\"})", + "refId": "B" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ms", + "min": 0 + } + }, + "options": { + "reduceOptions": { + "calcs": ["lastNotNull"] + }, + "orientation": "horizontal", + "displayMode": "gradient" + } + }, + { + "id": 3, + "title": "Total Connection Time", + "type": "timeseries", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> set(key: \"_field\", value: \"Total Connection Time\")", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ms", + "min": 0, + "custom": { + "drawStyle": "points", + "pointSize": 5 + } + } + } + }, + { + "id": 7, + "title": "Login Duration", + "type": "timeseries", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_login\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> set(key: \"_field\", value: \"Login Duration\")", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ms", + "min": 0, + "custom": { + "drawStyle": "points", + "pointSize": 5 + } + } + } + }, + { + "id": 8, + "title": "Login Success vs Failure", + "type": "piechart", + "datasource": { + "type": "influxdb", + "uid": "influxdb" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "targets": [ + { + "query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_login\" and r._field == \"duration_seconds\")\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> group(columns: [\"result\"])\n |> count()", + "refId": "A" + } + ], + "options": { + "reduceOptions": { + "calcs": ["lastNotNull"] + }, + "pieType": "donut", + "tooltip": { + "mode": "multi" + } + } + } + ], + "schemaVersion": 27, + "version": 2, + "refresh": "30s" +} diff --git a/client/internal/metrics/infra/grafana/provisioning/datasources/influxdb.yml b/client/internal/metrics/infra/grafana/provisioning/datasources/influxdb.yml new file mode 100644 index 000000000..69b96a93a --- /dev/null +++ b/client/internal/metrics/infra/grafana/provisioning/datasources/influxdb.yml @@ -0,0 +1,15 @@ +apiVersion: 1 + +datasources: + - name: InfluxDB + uid: influxdb + type: influxdb + access: proxy + url: http://influxdb:8086 + editable: true + jsonData: + version: Flux + organization: netbird + defaultBucket: metrics + secureJsonData: + token: ${INFLUXDB_ADMIN_TOKEN} \ No newline at end of file diff --git a/client/internal/metrics/infra/influxdb/scripts/create-tokens.sh b/client/internal/metrics/infra/influxdb/scripts/create-tokens.sh new file mode 100755 index 000000000..2464803e8 --- /dev/null +++ b/client/internal/metrics/infra/influxdb/scripts/create-tokens.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Creates a scoped InfluxDB read-only token for Grafana. +# Clients do not need a token — they push via the ingest server. + +BUCKET_ID=$(influx bucket list --org netbird --name metrics --json | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1) +ORG_ID=$(influx org list --name netbird --json | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1) + +if [[ -z "$BUCKET_ID" ]] || [[ -z "$ORG_ID" ]]; then + echo "ERROR: Could not determine bucket or org ID" >&2 + echo "BUCKET_ID=$BUCKET_ID ORG_ID=$ORG_ID" >&2 + exit 1 +fi + +# Create read-only token for Grafana +READ_TOKEN=$(influx auth create \ + --org netbird \ + --read-bucket "$BUCKET_ID" \ + --description "Grafana read-only token" \ + --json | grep -oP '"token"\s*:\s*"\K[^"]+' | head -1) + +echo "" +echo "============================================" +echo "GRAFANA READ-ONLY TOKEN:" +echo "$READ_TOKEN" +echo "============================================" \ No newline at end of file diff --git a/client/internal/metrics/infra/ingest/Dockerfile b/client/internal/metrics/infra/ingest/Dockerfile new file mode 100644 index 000000000..3620c524b --- /dev/null +++ b/client/internal/metrics/infra/ingest/Dockerfile @@ -0,0 +1,10 @@ +FROM golang:1.25-alpine AS build +WORKDIR /app +COPY go.mod main.go ./ +RUN CGO_ENABLED=0 go build -o ingest . + +FROM alpine:3.20 +RUN adduser -D -H ingest +COPY --from=build /app/ingest /usr/local/bin/ingest +USER ingest +ENTRYPOINT ["ingest"] \ No newline at end of file diff --git a/client/internal/metrics/infra/ingest/go.mod b/client/internal/metrics/infra/ingest/go.mod new file mode 100644 index 000000000..aaf1ea9da --- /dev/null +++ b/client/internal/metrics/infra/ingest/go.mod @@ -0,0 +1,11 @@ +module github.com/netbirdio/netbird/client/internal/metrics/infra/ingest + +go 1.25 + +require github.com/stretchr/testify v1.11.1 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/client/internal/metrics/infra/ingest/go.sum b/client/internal/metrics/infra/ingest/go.sum new file mode 100644 index 000000000..c4c1710c4 --- /dev/null +++ b/client/internal/metrics/infra/ingest/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/client/internal/metrics/infra/ingest/main.go b/client/internal/metrics/infra/ingest/main.go new file mode 100644 index 000000000..a5031a873 --- /dev/null +++ b/client/internal/metrics/infra/ingest/main.go @@ -0,0 +1,355 @@ +package main + +import ( + "bytes" + "compress/gzip" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "os" + "strconv" + "strings" + "time" +) + +const ( + defaultListenAddr = ":8087" + defaultInfluxDBURL = "http://influxdb:8086/api/v2/write?org=netbird&bucket=metrics&precision=ns" + maxBodySize = 50 * 1024 * 1024 // 50 MB max request body + maxDurationSeconds = 300.0 // reject any duration field > 5 minutes + peerIDLength = 16 // truncated SHA-256: 8 bytes = 16 hex chars + maxTagValueLength = 64 // reject tag values longer than this +) + +type measurementSpec struct { + allowedFields map[string]bool + allowedTags map[string]bool +} + +var allowedMeasurements = map[string]measurementSpec{ + "netbird_peer_connection": { + allowedFields: map[string]bool{ + "signaling_to_connection_seconds": true, + "connection_to_wg_handshake_seconds": true, + "total_seconds": true, + }, + allowedTags: map[string]bool{ + "deployment_type": true, + "connection_type": true, + "attempt_type": true, + "version": true, + "os": true, + "arch": true, + "peer_id": true, + "connection_pair_id": true, + }, + }, + "netbird_sync": { + allowedFields: map[string]bool{ + "duration_seconds": true, + }, + allowedTags: map[string]bool{ + "deployment_type": true, + "version": true, + "os": true, + "arch": true, + "peer_id": true, + }, + }, + "netbird_login": { + allowedFields: map[string]bool{ + "duration_seconds": true, + }, + allowedTags: map[string]bool{ + "deployment_type": true, + "result": true, + "version": true, + "os": true, + "arch": true, + "peer_id": true, + }, + }, +} + +func main() { + listenAddr := envOr("INGEST_LISTEN_ADDR", defaultListenAddr) + influxURL := envOr("INFLUXDB_URL", defaultInfluxDBURL) + influxToken := os.Getenv("INFLUXDB_TOKEN") + + if influxToken == "" { + log.Fatal("INFLUXDB_TOKEN is required") + } + + client := &http.Client{Timeout: 10 * time.Second} + + http.HandleFunc("/", handleIngest(client, influxURL, influxToken)) + + // Build config JSON once at startup from env vars + configJSON := buildConfigJSON() + if configJSON != nil { + log.Printf("serving remote config at /config") + } + + http.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + if configJSON == nil { + http.Error(w, "config not configured", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(configJSON) //nolint:errcheck + }) + + http.HandleFunc("/health", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, "ok") //nolint:errcheck + }) + + log.Printf("ingest server listening on %s, forwarding to %s", listenAddr, influxURL) + if err := http.ListenAndServe(listenAddr, nil); err != nil { //nolint:gosec + log.Fatal(err) + } +} + +func handleIngest(client *http.Client, influxURL, influxToken string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + if err := validateAuth(r); err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + body, err := readBody(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if len(body) > maxBodySize { + http.Error(w, "body too large", http.StatusRequestEntityTooLarge) + return + } + + validated, err := validateLineProtocol(body) + if err != nil { + log.Printf("WARN validation failed from %s: %v", r.RemoteAddr, err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + forwardToInflux(w, r, client, influxURL, influxToken, validated) + } +} + +func forwardToInflux(w http.ResponseWriter, r *http.Request, client *http.Client, influxURL, influxToken string, body []byte) { + req, err := http.NewRequestWithContext(r.Context(), http.MethodPost, influxURL, bytes.NewReader(body)) + if err != nil { + log.Printf("ERROR create request: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + req.Header.Set("Content-Type", "text/plain; charset=utf-8") + req.Header.Set("Authorization", "Token "+influxToken) + + resp, err := client.Do(req) + if err != nil { + log.Printf("ERROR forward to influxdb: %v", err) + http.Error(w, "upstream error", http.StatusBadGateway) + return + } + defer func(Body io.ReadCloser) { + _ = Body.Close() + }(resp.Body) + + w.WriteHeader(resp.StatusCode) + io.Copy(w, resp.Body) //nolint:errcheck +} + +// validateAuth checks that the X-Peer-ID header contains a valid hashed peer ID. +func validateAuth(r *http.Request) error { + peerID := r.Header.Get("X-Peer-ID") + if peerID == "" { + return fmt.Errorf("missing X-Peer-ID header") + } + if len(peerID) != peerIDLength { + return fmt.Errorf("invalid X-Peer-ID header length") + } + if _, err := hex.DecodeString(peerID); err != nil { + return fmt.Errorf("invalid X-Peer-ID header format") + } + return nil +} + +// readBody reads the request body, decompressing gzip if Content-Encoding indicates it. +func readBody(r *http.Request) ([]byte, error) { + reader := io.LimitReader(r.Body, maxBodySize+1) + + if r.Header.Get("Content-Encoding") == "gzip" { + gz, err := gzip.NewReader(reader) + if err != nil { + return nil, fmt.Errorf("invalid gzip: %w", err) + } + defer gz.Close() + reader = io.LimitReader(gz, maxBodySize+1) + } + + return io.ReadAll(reader) +} + +// validateLineProtocol parses InfluxDB line protocol lines, +// whitelists measurements and fields, and checks value bounds. +func validateLineProtocol(body []byte) ([]byte, error) { + lines := strings.Split(strings.TrimSpace(string(body)), "\n") + var valid []string + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + if err := validateLine(line); err != nil { + return nil, err + } + + valid = append(valid, line) + } + + if len(valid) == 0 { + return nil, fmt.Errorf("no valid lines") + } + + return []byte(strings.Join(valid, "\n") + "\n"), nil +} + +func validateLine(line string) error { + // line protocol: measurement,tag=val,tag=val field=val,field=val timestamp + parts := strings.SplitN(line, " ", 3) + if len(parts) < 2 { + return fmt.Errorf("invalid line protocol: %q", truncate(line, 100)) + } + + // parts[0] is "measurement,tag=val,tag=val" + measurementAndTags := strings.Split(parts[0], ",") + measurement := measurementAndTags[0] + + spec, ok := allowedMeasurements[measurement] + if !ok { + return fmt.Errorf("unknown measurement: %q", measurement) + } + + // Validate tags (everything after measurement name in parts[0]) + for _, tagPair := range measurementAndTags[1:] { + if err := validateTag(tagPair, measurement, spec.allowedTags); err != nil { + return err + } + } + + // Validate fields + for _, pair := range strings.Split(parts[1], ",") { + if err := validateField(pair, measurement, spec.allowedFields); err != nil { + return err + } + } + + return nil +} + +func validateTag(pair, measurement string, allowedTags map[string]bool) error { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("invalid tag: %q", pair) + } + + tagName := kv[0] + if !allowedTags[tagName] { + return fmt.Errorf("unknown tag %q in measurement %q", tagName, measurement) + } + + if len(kv[1]) > maxTagValueLength { + return fmt.Errorf("tag value too long for %q: %d > %d", tagName, len(kv[1]), maxTagValueLength) + } + + return nil +} + +func validateField(pair, measurement string, allowedFields map[string]bool) error { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("invalid field: %q", pair) + } + + fieldName := kv[0] + if !allowedFields[fieldName] { + return fmt.Errorf("unknown field %q in measurement %q", fieldName, measurement) + } + + val, err := strconv.ParseFloat(kv[1], 64) + if err != nil { + return fmt.Errorf("invalid field value %q for %q", kv[1], fieldName) + } + if val < 0 { + return fmt.Errorf("negative value for %q: %g", fieldName, val) + } + if strings.HasSuffix(fieldName, "_seconds") && val > maxDurationSeconds { + return fmt.Errorf("%q too large: %g > %g", fieldName, val, maxDurationSeconds) + } + + return nil +} + +// buildConfigJSON builds the remote config JSON from env vars. +// Returns nil if required vars are not set. +func buildConfigJSON() []byte { + serverURL := os.Getenv("CONFIG_METRICS_SERVER_URL") + versionSince := envOr("CONFIG_VERSION_SINCE", "0.0.0") + versionUntil := envOr("CONFIG_VERSION_UNTIL", "99.99.99") + periodMinutes := envOr("CONFIG_PERIOD_MINUTES", "5") + + if serverURL == "" { + return nil + } + + period, err := strconv.Atoi(periodMinutes) + if err != nil || period <= 0 { + log.Printf("WARN invalid CONFIG_PERIOD_MINUTES: %q, using 5", periodMinutes) + period = 5 + } + + cfg := map[string]any{ + "server_url": serverURL, + "version-since": versionSince, + "version-until": versionUntil, + "period_minutes": period, + } + + data, err := json.Marshal(cfg) + if err != nil { + log.Printf("ERROR failed to marshal config: %v", err) + return nil + } + return data +} + +func envOr(key, defaultVal string) string { + if v := os.Getenv(key); v != "" { + return v + } + return defaultVal +} + +func truncate(s string, n int) string { + if len(s) <= n { + return s + } + return s[:n] + "..." +} diff --git a/client/internal/metrics/infra/ingest/main_test.go b/client/internal/metrics/infra/ingest/main_test.go new file mode 100644 index 000000000..bacaa4588 --- /dev/null +++ b/client/internal/metrics/infra/ingest/main_test.go @@ -0,0 +1,124 @@ +package main + +import ( + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateLine_ValidPeerConnection(t *testing.T) { + line := `netbird_peer_connection,deployment_type=cloud,connection_type=ice,attempt_type=initial,version=1.0.0,os=linux,arch=amd64,peer_id=abcdef0123456789,connection_pair_id=pair1234 signaling_to_connection_seconds=1.5,connection_to_wg_handshake_seconds=0.5,total_seconds=2 1234567890` + assert.NoError(t, validateLine(line)) +} + +func TestValidateLine_ValidSync(t *testing.T) { + line := `netbird_sync,deployment_type=selfhosted,version=2.0.0,os=darwin,arch=arm64,peer_id=abcdef0123456789 duration_seconds=1.5 1234567890` + assert.NoError(t, validateLine(line)) +} + +func TestValidateLine_ValidLogin(t *testing.T) { + line := `netbird_login,deployment_type=cloud,result=success,version=1.0.0,os=linux,arch=amd64,peer_id=abcdef0123456789 duration_seconds=3.2 1234567890` + assert.NoError(t, validateLine(line)) +} + +func TestValidateLine_UnknownMeasurement(t *testing.T) { + line := `unknown_metric,foo=bar value=1 1234567890` + err := validateLine(line) + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown measurement") +} + +func TestValidateLine_UnknownTag(t *testing.T) { + line := `netbird_sync,deployment_type=cloud,evil_tag=injected,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890` + err := validateLine(line) + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown tag") +} + +func TestValidateLine_UnknownField(t *testing.T) { + line := `netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc injected_field=1 1234567890` + err := validateLine(line) + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown field") +} + +func TestValidateLine_NegativeValue(t *testing.T) { + line := `netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=-1.5 1234567890` + err := validateLine(line) + require.Error(t, err) + assert.Contains(t, err.Error(), "negative") +} + +func TestValidateLine_DurationTooLarge(t *testing.T) { + line := `netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=999 1234567890` + err := validateLine(line) + require.Error(t, err) + assert.Contains(t, err.Error(), "too large") +} + +func TestValidateLine_TotalSecondsTooLarge(t *testing.T) { + line := `netbird_peer_connection,deployment_type=cloud,connection_type=ice,attempt_type=initial,version=1.0.0,os=linux,arch=amd64,peer_id=abc,connection_pair_id=pair total_seconds=500 1234567890` + err := validateLine(line) + require.Error(t, err) + assert.Contains(t, err.Error(), "too large") +} + +func TestValidateLine_TagValueTooLong(t *testing.T) { + longTag := strings.Repeat("a", maxTagValueLength+1) + line := `netbird_sync,deployment_type=` + longTag + `,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890` + err := validateLine(line) + require.Error(t, err) + assert.Contains(t, err.Error(), "tag value too long") +} + +func TestValidateLineProtocol_MultipleLines(t *testing.T) { + body := []byte( + "netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890\n" + + "netbird_login,deployment_type=cloud,result=success,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=2.0 1234567890\n", + ) + validated, err := validateLineProtocol(body) + require.NoError(t, err) + assert.Contains(t, string(validated), "netbird_sync") + assert.Contains(t, string(validated), "netbird_login") +} + +func TestValidateLineProtocol_RejectsOnBadLine(t *testing.T) { + body := []byte( + "netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890\n" + + "evil_metric,foo=bar value=1 1234567890\n", + ) + _, err := validateLineProtocol(body) + require.Error(t, err) +} + +func TestValidateAuth(t *testing.T) { + tests := []struct { + name string + peerID string + wantErr bool + }{ + {"valid hex", "abcdef0123456789", false}, + {"empty", "", true}, + {"too short", "abcdef01234567", true}, + {"too long", "abcdef01234567890", true}, + {"invalid hex", "ghijklmnopqrstuv", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r, _ := http.NewRequest(http.MethodPost, "/", nil) + if tt.peerID != "" { + r.Header.Set("X-Peer-ID", tt.peerID) + } + err := validateAuth(r) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/client/internal/metrics/metrics.go b/client/internal/metrics/metrics.go new file mode 100644 index 000000000..4ebb43496 --- /dev/null +++ b/client/internal/metrics/metrics.go @@ -0,0 +1,224 @@ +package metrics + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/metrics/remoteconfig" +) + +// AgentInfo holds static information about the agent +type AgentInfo struct { + DeploymentType DeploymentType + Version string + OS string // runtime.GOOS (linux, darwin, windows, etc.) + Arch string // runtime.GOARCH (amd64, arm64, etc.) + peerID string // anonymised peer identifier (SHA-256 of WireGuard public key) +} + +// peerIDFromPublicKey returns a truncated SHA-256 hash (8 bytes / 16 hex chars) of the given WireGuard public key. +func peerIDFromPublicKey(pubKey string) string { + hash := sha256.Sum256([]byte(pubKey)) + return hex.EncodeToString(hash[:8]) +} + +// connectionPairID returns a deterministic identifier for a connection between two peers. +// It sorts the two peer IDs before hashing so the same pair always produces the same ID +// regardless of which side computes it. +func connectionPairID(peerID1, peerID2 string) string { + a, b := peerID1, peerID2 + if a > b { + a, b = b, a + } + hash := sha256.Sum256([]byte(a + b)) + return hex.EncodeToString(hash[:8]) +} + +// metricsImplementation defines the internal interface for metrics implementations +type metricsImplementation interface { + // RecordConnectionStages records connection stage metrics from timestamps + RecordConnectionStages( + ctx context.Context, + agentInfo AgentInfo, + connectionPairID string, + connectionType ConnectionType, + isReconnection bool, + timestamps ConnectionStageTimestamps, + ) + + // RecordSyncDuration records how long it took to process a sync message + RecordSyncDuration(ctx context.Context, agentInfo AgentInfo, duration time.Duration) + + // RecordLoginDuration records how long the login to management took + RecordLoginDuration(ctx context.Context, agentInfo AgentInfo, duration time.Duration, success bool) + + // Export exports metrics in InfluxDB line protocol format + Export(w io.Writer) error + + // Reset clears all collected metrics + Reset() +} + +type ClientMetrics struct { + impl metricsImplementation + + agentInfo AgentInfo + mu sync.RWMutex + + push *Push + pushMu sync.Mutex + wg sync.WaitGroup + pushCancel context.CancelFunc +} + +// ConnectionStageTimestamps holds timestamps for each connection stage +type ConnectionStageTimestamps struct { + SignalingReceived time.Time // First signal received from remote peer (both initial and reconnection) + ConnectionReady time.Time + WgHandshakeSuccess time.Time +} + +// String returns a human-readable representation of the connection stage timestamps +func (c ConnectionStageTimestamps) String() string { + return fmt.Sprintf("ConnectionStageTimestamps{SignalingReceived=%v, ConnectionReady=%v, WgHandshakeSuccess=%v}", + c.SignalingReceived.Format(time.RFC3339Nano), + c.ConnectionReady.Format(time.RFC3339Nano), + c.WgHandshakeSuccess.Format(time.RFC3339Nano), + ) +} + +// RecordConnectionStages calculates stage durations from timestamps and records them. +// remotePubKey is the remote peer's WireGuard public key; it will be hashed for anonymisation. +func (c *ClientMetrics) RecordConnectionStages( + ctx context.Context, + remotePubKey string, + connectionType ConnectionType, + isReconnection bool, + timestamps ConnectionStageTimestamps, +) { + if c == nil { + return + } + c.mu.RLock() + agentInfo := c.agentInfo + c.mu.RUnlock() + + remotePeerID := peerIDFromPublicKey(remotePubKey) + pairID := connectionPairID(agentInfo.peerID, remotePeerID) + c.impl.RecordConnectionStages(ctx, agentInfo, pairID, connectionType, isReconnection, timestamps) +} + +// RecordSyncDuration records the duration of sync message processing +func (c *ClientMetrics) RecordSyncDuration(ctx context.Context, duration time.Duration) { + if c == nil { + return + } + c.mu.RLock() + agentInfo := c.agentInfo + c.mu.RUnlock() + + c.impl.RecordSyncDuration(ctx, agentInfo, duration) +} + +// RecordLoginDuration records how long the login to management server took +func (c *ClientMetrics) RecordLoginDuration(ctx context.Context, duration time.Duration, success bool) { + if c == nil { + return + } + c.mu.RLock() + agentInfo := c.agentInfo + c.mu.RUnlock() + + c.impl.RecordLoginDuration(ctx, agentInfo, duration, success) +} + +// UpdateAgentInfo updates the agent information (e.g., when switching profiles). +// publicKey is the WireGuard public key; it will be hashed for anonymisation. +func (c *ClientMetrics) UpdateAgentInfo(agentInfo AgentInfo, publicKey string) { + if c == nil { + return + } + + agentInfo.peerID = peerIDFromPublicKey(publicKey) + + c.mu.Lock() + c.agentInfo = agentInfo + c.mu.Unlock() + + c.pushMu.Lock() + push := c.push + c.pushMu.Unlock() + if push != nil { + push.SetPeerID(agentInfo.peerID) + } +} + +// Export exports metrics to the writer +func (c *ClientMetrics) Export(w io.Writer) error { + if c == nil { + return nil + } + + return c.impl.Export(w) +} + +// StartPush starts periodic pushing of metrics with the given configuration +// Precedence: PushConfig.ServerAddress > remote config server_url +func (c *ClientMetrics) StartPush(ctx context.Context, config PushConfig) { + if c == nil { + return + } + + c.pushMu.Lock() + defer c.pushMu.Unlock() + + if c.push != nil { + log.Warnf("metrics push already running") + return + } + + c.mu.RLock() + agentVersion := c.agentInfo.Version + peerID := c.agentInfo.peerID + c.mu.RUnlock() + + configManager := remoteconfig.NewManager(getMetricsConfigURL(), remoteconfig.DefaultMinRefreshInterval) + push, err := NewPush(c.impl, configManager, config, agentVersion) + if err != nil { + log.Errorf("failed to create metrics push: %v", err) + return + } + push.SetPeerID(peerID) + + ctx, cancel := context.WithCancel(ctx) + c.pushCancel = cancel + + c.wg.Add(1) + go func() { + defer c.wg.Done() + push.Start(ctx) + }() + c.push = push +} + +func (c *ClientMetrics) StopPush() { + if c == nil { + return + } + c.pushMu.Lock() + defer c.pushMu.Unlock() + if c.push == nil { + return + } + + c.pushCancel() + c.wg.Wait() + c.push = nil +} diff --git a/client/internal/metrics/metrics_default.go b/client/internal/metrics/metrics_default.go new file mode 100644 index 000000000..927ab51d1 --- /dev/null +++ b/client/internal/metrics/metrics_default.go @@ -0,0 +1,11 @@ +//go:build !js + +package metrics + +// NewClientMetrics creates a new ClientMetrics instance +func NewClientMetrics(agentInfo AgentInfo) *ClientMetrics { + return &ClientMetrics{ + impl: newInfluxDBMetrics(), + agentInfo: agentInfo, + } +} diff --git a/client/internal/metrics/metrics_js.go b/client/internal/metrics/metrics_js.go new file mode 100644 index 000000000..dfa6d8243 --- /dev/null +++ b/client/internal/metrics/metrics_js.go @@ -0,0 +1,8 @@ +//go:build js + +package metrics + +// NewClientMetrics returns nil on WASM builds — all ClientMetrics methods are nil-safe. +func NewClientMetrics(AgentInfo) *ClientMetrics { + return nil +} diff --git a/client/internal/metrics/push.go b/client/internal/metrics/push.go new file mode 100644 index 000000000..ee0508f36 --- /dev/null +++ b/client/internal/metrics/push.go @@ -0,0 +1,289 @@ +package metrics + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "net/http" + "net/url" + "sync" + "time" + + goversion "github.com/hashicorp/go-version" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/metrics/remoteconfig" +) + +const ( + // defaultPushInterval is the default interval for pushing metrics + defaultPushInterval = 5 * time.Minute +) + +// defaultMetricsServerURL is used as fallback when NB_METRICS_FORCE_SENDING is true +var defaultMetricsServerURL *url.URL + +func init() { + defaultMetricsServerURL, _ = url.Parse("https://ingest.netbird.io") +} + +// PushConfig holds configuration for metrics push +type PushConfig struct { + // ServerAddress is the metrics server URL. If nil, uses remote config server_url. + ServerAddress *url.URL + // Interval is how often to push metrics. If 0, uses remote config interval or defaultPushInterval. + Interval time.Duration + // ForceSending skips remote configuration fetch and version checks, pushing unconditionally. + ForceSending bool +} + +// PushConfigFromEnv builds a PushConfig from environment variables. +func PushConfigFromEnv() PushConfig { + config := PushConfig{} + + config.ForceSending = isForceSending() + config.ServerAddress = getMetricsServerURL() + config.Interval = getMetricsInterval() + + return config +} + +// remoteConfigProvider abstracts remote push config fetching for testability +type remoteConfigProvider interface { + RefreshIfNeeded(ctx context.Context) *remoteconfig.Config +} + +// Push handles periodic pushing of metrics +type Push struct { + metrics metricsImplementation + configManager remoteConfigProvider + agentVersion *goversion.Version + + peerID string + peerMu sync.RWMutex + + client *http.Client + cfgForceSending bool + cfgInterval time.Duration + cfgAddress *url.URL +} + +// NewPush creates a new Push instance with configuration resolution +func NewPush(metrics metricsImplementation, configManager remoteConfigProvider, config PushConfig, agentVersion string) (*Push, error) { + var cfgInterval time.Duration + var cfgAddress *url.URL + + if config.ForceSending { + cfgInterval = config.Interval + if config.Interval <= 0 { + cfgInterval = defaultPushInterval + } + + cfgAddress = config.ServerAddress + if cfgAddress == nil { + cfgAddress = defaultMetricsServerURL + } + } else { + cfgAddress = config.ServerAddress + + if config.Interval < 0 { + log.Warnf("negative metrics push interval %s", config.Interval) + } else { + cfgInterval = config.Interval + } + } + + parsedVersion, err := goversion.NewVersion(agentVersion) + if err != nil { + if !config.ForceSending { + return nil, fmt.Errorf("parse agent version %q: %w", agentVersion, err) + } + } + + return &Push{ + metrics: metrics, + configManager: configManager, + agentVersion: parsedVersion, + cfgForceSending: config.ForceSending, + cfgInterval: cfgInterval, + cfgAddress: cfgAddress, + client: &http.Client{ + Timeout: 10 * time.Second, + }, + }, nil +} + +// SetPeerID updates the hashed peer ID used for the Authorization header. +func (p *Push) SetPeerID(peerID string) { + p.peerMu.Lock() + p.peerID = peerID + p.peerMu.Unlock() +} + +// Start starts the periodic push loop. +// The env interval override controls tick frequency but does not bypass remote config +// version gating. Use ForceSending to skip remote config entirely. +func (p *Push) Start(ctx context.Context) { + // Log initial state + switch { + case p.cfgForceSending: + log.Infof("started metrics push with force sending to %s, interval %s", p.cfgAddress, p.cfgInterval) + case p.cfgAddress != nil: + log.Infof("started metrics push with server URL override: %s", p.cfgAddress.String()) + default: + log.Infof("started metrics push, server URL will be resolved from remote config") + } + + timer := time.NewTimer(0) // fire immediately on first iteration + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + log.Debug("stopping metrics push") + return + case <-timer.C: + } + + pushURL, interval := p.resolve(ctx) + if pushURL != "" { + if err := p.push(ctx, pushURL); err != nil { + log.Errorf("failed to push metrics: %v", err) + } + } + + if interval <= 0 { + interval = defaultPushInterval + } + timer.Reset(interval) + } +} + +// resolve returns the push URL and interval for the next cycle. +// Returns empty pushURL to skip this cycle. +func (p *Push) resolve(ctx context.Context) (pushURL string, interval time.Duration) { + if p.cfgForceSending { + return p.resolveServerURL(nil), p.cfgInterval + } + + config := p.configManager.RefreshIfNeeded(ctx) + if config == nil { + log.Debug("no metrics push config available, waiting to retry") + return "", defaultPushInterval + } + + // prefer env variables instead of remote config + if p.cfgInterval > 0 { + interval = p.cfgInterval + } else { + interval = config.Interval + } + + if !isVersionInRange(p.agentVersion, config.VersionSince, config.VersionUntil) { + log.Debugf("agent version %s not in range [%s, %s), skipping metrics push", + p.agentVersion, config.VersionSince, config.VersionUntil) + return "", interval + } + + pushURL = p.resolveServerURL(&config.ServerURL) + if pushURL == "" { + log.Warn("no metrics server URL available, skipping push") + } + return pushURL, interval +} + +// push exports metrics and sends them to the metrics server +func (p *Push) push(ctx context.Context, pushURL string) error { + // Export metrics without clearing + var buf bytes.Buffer + if err := p.metrics.Export(&buf); err != nil { + return fmt.Errorf("export metrics: %w", err) + } + + // Don't push if there are no metrics + if buf.Len() == 0 { + log.Tracef("no metrics to push") + return nil + } + + // Gzip compress the body + compressed, err := gzipCompress(buf.Bytes()) + if err != nil { + return fmt.Errorf("gzip compress: %w", err) + } + + // Create HTTP request + req, err := http.NewRequestWithContext(ctx, "POST", pushURL, compressed) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + req.Header.Set("Content-Type", "text/plain; charset=utf-8") + req.Header.Set("Content-Encoding", "gzip") + + p.peerMu.RLock() + peerID := p.peerID + p.peerMu.RUnlock() + if peerID != "" { + req.Header.Set("X-Peer-ID", peerID) + } + + // Send request + resp, err := p.client.Do(req) + if err != nil { + return fmt.Errorf("send request: %w", err) + } + defer func() { + if resp.Body == nil { + return + } + if err := resp.Body.Close(); err != nil { + log.Warnf("failed to close response body: %v", err) + } + }() + + // Check response status + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("push failed with status %d", resp.StatusCode) + } + + log.Debugf("successfully pushed metrics to %s", pushURL) + p.metrics.Reset() + return nil +} + +// resolveServerURL determines the push URL. +// Precedence: envAddress (env var) > remote config server_url +func (p *Push) resolveServerURL(remoteServerURL *url.URL) string { + var baseURL *url.URL + if p.cfgAddress != nil { + baseURL = p.cfgAddress + } else { + baseURL = remoteServerURL + } + + if baseURL == nil { + return "" + } + + return baseURL.String() +} + +// gzipCompress compresses data using gzip and returns the compressed buffer. +func gzipCompress(data []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + gz := gzip.NewWriter(&buf) + if _, err := gz.Write(data); err != nil { + _ = gz.Close() + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + return &buf, nil +} + +// isVersionInRange checks if current falls within [since, until) +func isVersionInRange(current, since, until *goversion.Version) bool { + return !current.LessThan(since) && current.LessThan(until) +} diff --git a/client/internal/metrics/push_test.go b/client/internal/metrics/push_test.go new file mode 100644 index 000000000..20a509da1 --- /dev/null +++ b/client/internal/metrics/push_test.go @@ -0,0 +1,343 @@ +package metrics + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "net/url" + "sync/atomic" + "testing" + "time" + + goversion "github.com/hashicorp/go-version" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/internal/metrics/remoteconfig" +) + +func mustVersion(s string) *goversion.Version { + v, err := goversion.NewVersion(s) + if err != nil { + panic(err) + } + return v +} + +func mustURL(s string) url.URL { + u, err := url.Parse(s) + if err != nil { + panic(err) + } + return *u +} + +func parseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(err) + } + return u +} + +func testConfig(serverURL, since, until string, period time.Duration) *remoteconfig.Config { + return &remoteconfig.Config{ + ServerURL: mustURL(serverURL), + VersionSince: mustVersion(since), + VersionUntil: mustVersion(until), + Interval: period, + } +} + +// mockConfigProvider implements remoteConfigProvider for testing +type mockConfigProvider struct { + config *remoteconfig.Config +} + +func (m *mockConfigProvider) RefreshIfNeeded(_ context.Context) *remoteconfig.Config { + return m.config +} + +// mockMetrics implements metricsImplementation for testing +type mockMetrics struct { + exportData string +} + +func (m *mockMetrics) RecordConnectionStages(_ context.Context, _ AgentInfo, _ string, _ ConnectionType, _ bool, _ ConnectionStageTimestamps) { +} + +func (m *mockMetrics) RecordSyncDuration(_ context.Context, _ AgentInfo, _ time.Duration) { +} + +func (m *mockMetrics) RecordLoginDuration(_ context.Context, _ AgentInfo, _ time.Duration, _ bool) { +} + +func (m *mockMetrics) Export(w io.Writer) error { + if m.exportData != "" { + _, err := w.Write([]byte(m.exportData)) + return err + } + return nil +} + +func (m *mockMetrics) Reset() { +} + +func TestPush_OverrideIntervalPushes(t *testing.T) { + var pushCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + pushCount.Add(1) + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 60*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{ + Interval: 50 * time.Millisecond, + ServerAddress: parseURL(server.URL), + }, "1.0.0") + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + push.Start(ctx) + close(done) + }() + + require.Eventually(t, func() bool { + return pushCount.Load() >= 3 + }, 2*time.Second, 10*time.Millisecond) + + cancel() + <-done +} + +func TestPush_RemoteConfigVersionInRange(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 1*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{}, "1.5.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.NotEmpty(t, pushURL) + assert.Equal(t, 1*time.Minute, interval) +} + +func TestPush_RemoteConfigVersionOutOfRange(t *testing.T) { + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: testConfig("http://localhost", "1.0.0", "1.5.0", 1*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{}, "2.0.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.Empty(t, pushURL) + assert.Equal(t, 1*time.Minute, interval) +} + +func TestPush_NoConfigReturnsDefault(t *testing.T) { + metrics := &mockMetrics{} + configProvider := &mockConfigProvider{config: nil} + + push, err := NewPush(metrics, configProvider, PushConfig{}, "1.0.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.Empty(t, pushURL) + assert.Equal(t, defaultPushInterval, interval) +} + +func TestPush_OverrideIntervalRespectsVersionCheck(t *testing.T) { + metrics := &mockMetrics{} + configProvider := &mockConfigProvider{config: testConfig("http://localhost", "3.0.0", "4.0.0", 60*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{ + Interval: 30 * time.Second, + ServerAddress: parseURL("http://localhost"), + }, "1.0.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.Empty(t, pushURL) // version out of range + assert.Equal(t, 30*time.Second, interval) // but uses override interval +} + +func TestPush_OverrideIntervalUsedWhenVersionInRange(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{} + configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 60*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{ + Interval: 30 * time.Second, + }, "1.5.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.NotEmpty(t, pushURL) + assert.Equal(t, 30*time.Second, interval) +} + +func TestPush_NoMetricsSkipsPush(t *testing.T) { + var pushCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + pushCount.Add(1) + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{exportData: ""} // no metrics to export + configProvider := &mockConfigProvider{config: nil} + + push, err := NewPush(metrics, configProvider, PushConfig{}, "1.0.0") + require.NoError(t, err) + + err = push.push(context.Background(), server.URL) + assert.NoError(t, err) + assert.Equal(t, int32(0), pushCount.Load()) +} + +func TestPush_ServerURLFromRemoteConfig(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 1*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{}, "1.5.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.Contains(t, pushURL, server.URL) + assert.Equal(t, 1*time.Minute, interval) +} + +func TestPush_ServerAddressOverridesTakePrecedenceOverRemoteConfig(t *testing.T) { + overrideServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer overrideServer.Close() + + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: testConfig("http://remote-config-server", "1.0.0", "2.0.0", 1*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{ + ServerAddress: parseURL(overrideServer.URL), + }, "1.5.0") + require.NoError(t, err) + + pushURL, _ := push.resolve(context.Background()) + assert.Contains(t, pushURL, overrideServer.URL) + assert.NotContains(t, pushURL, "remote-config-server") +} + +func TestPush_OverrideIntervalWithoutOverrideURL_UsesRemoteConfigURL(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 60*time.Minute)} + + push, err := NewPush(metrics, configProvider, PushConfig{ + Interval: 30 * time.Second, + }, "1.0.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.Contains(t, pushURL, server.URL) + assert.Equal(t, 30*time.Second, interval) +} + +func TestPush_NoConfigSkipsPush(t *testing.T) { + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: nil} + + push, err := NewPush(metrics, configProvider, PushConfig{ + Interval: 30 * time.Second, + }, "1.0.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.Empty(t, pushURL) + assert.Equal(t, defaultPushInterval, interval) // no config available, use default retry interval +} + +func TestPush_ForceSendingSkipsRemoteConfig(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: nil} + + push, err := NewPush(metrics, configProvider, PushConfig{ + ForceSending: true, + Interval: 1 * time.Minute, + ServerAddress: parseURL(server.URL), + }, "1.0.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.NotEmpty(t, pushURL) + assert.Equal(t, 1*time.Minute, interval) +} + +func TestPush_ForceSendingUsesDefaultInterval(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + metrics := &mockMetrics{exportData: "test_metric 1\n"} + configProvider := &mockConfigProvider{config: nil} + + push, err := NewPush(metrics, configProvider, PushConfig{ + ForceSending: true, + ServerAddress: parseURL(server.URL), + }, "1.0.0") + require.NoError(t, err) + + pushURL, interval := push.resolve(context.Background()) + assert.NotEmpty(t, pushURL) + assert.Equal(t, defaultPushInterval, interval) +} + +func TestIsVersionInRange(t *testing.T) { + tests := []struct { + name string + current string + since string + until string + expected bool + }{ + {"at lower bound inclusive", "1.2.2", "1.2.2", "1.2.3", true}, + {"in range", "1.2.2", "1.2.0", "1.3.0", true}, + {"at upper bound exclusive", "1.2.3", "1.2.2", "1.2.3", false}, + {"below range", "1.2.1", "1.2.2", "1.2.3", false}, + {"above range", "1.3.0", "1.2.2", "1.2.3", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, isVersionInRange(mustVersion(tt.current), mustVersion(tt.since), mustVersion(tt.until))) + }) + } +} diff --git a/client/internal/metrics/remoteconfig/manager.go b/client/internal/metrics/remoteconfig/manager.go new file mode 100644 index 000000000..01c37891f --- /dev/null +++ b/client/internal/metrics/remoteconfig/manager.go @@ -0,0 +1,149 @@ +package remoteconfig + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "sync" + "time" + + goversion "github.com/hashicorp/go-version" + log "github.com/sirupsen/logrus" +) + +const ( + DefaultMinRefreshInterval = 30 * time.Minute +) + +// Config holds the parsed remote push configuration +type Config struct { + ServerURL url.URL + VersionSince *goversion.Version + VersionUntil *goversion.Version + Interval time.Duration +} + +// rawConfig is the JSON wire format fetched from the remote server +type rawConfig struct { + ServerURL string `json:"server_url"` + VersionSince string `json:"version-since"` + VersionUntil string `json:"version-until"` + PeriodMinutes int `json:"period_minutes"` +} + +// Manager handles fetching and caching remote push configuration +type Manager struct { + configURL string + minRefreshInterval time.Duration + client *http.Client + + mu sync.Mutex + lastConfig *Config + lastFetched time.Time +} + +func NewManager(configURL string, minRefreshInterval time.Duration) *Manager { + return &Manager{ + configURL: configURL, + minRefreshInterval: minRefreshInterval, + client: &http.Client{ + Timeout: 10 * time.Second, + }, + } +} + +// RefreshIfNeeded fetches new config if the cached one is stale. +// Returns the current config (possibly just fetched) or nil if unavailable. +func (m *Manager) RefreshIfNeeded(ctx context.Context) *Config { + m.mu.Lock() + defer m.mu.Unlock() + + if m.isConfigFresh() { + return m.lastConfig + } + + fetchedConfig, err := m.fetch(ctx) + m.lastFetched = time.Now() + if err != nil { + log.Warnf("failed to fetch metrics remote config: %v", err) + return m.lastConfig // return cached (may be nil) + } + + m.lastConfig = fetchedConfig + + log.Tracef("fetched metrics remote config: version-since=%s version-until=%s period=%s", + fetchedConfig.VersionSince, fetchedConfig.VersionUntil, fetchedConfig.Interval) + + return fetchedConfig +} + +func (m *Manager) isConfigFresh() bool { + if m.lastConfig == nil { + return false + } + return time.Since(m.lastFetched) < m.minRefreshInterval +} + +func (m *Manager) fetch(ctx context.Context) (*Config, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, m.configURL, nil) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + + resp, err := m.client.Do(req) + if err != nil { + return nil, fmt.Errorf("send request: %w", err) + } + defer func() { + if resp.Body != nil { + _ = resp.Body.Close() + } + }() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + body, err := io.ReadAll(io.LimitReader(resp.Body, 4096)) + if err != nil { + return nil, fmt.Errorf("read body: %w", err) + } + + var raw rawConfig + if err := json.Unmarshal(body, &raw); err != nil { + return nil, fmt.Errorf("parse config: %w", err) + } + + if raw.PeriodMinutes <= 0 { + return nil, fmt.Errorf("invalid period_minutes: %d", raw.PeriodMinutes) + } + + if raw.ServerURL == "" { + return nil, fmt.Errorf("server_url is required") + } + + serverURL, err := url.Parse(raw.ServerURL) + if err != nil { + return nil, fmt.Errorf("parse server_url %q: %w", raw.ServerURL, err) + } + + since, err := goversion.NewVersion(raw.VersionSince) + if err != nil { + return nil, fmt.Errorf("parse version-since %q: %w", raw.VersionSince, err) + } + + until, err := goversion.NewVersion(raw.VersionUntil) + if err != nil { + return nil, fmt.Errorf("parse version-until %q: %w", raw.VersionUntil, err) + } + + return &Config{ + ServerURL: *serverURL, + VersionSince: since, + VersionUntil: until, + Interval: time.Duration(raw.PeriodMinutes) * time.Minute, + }, nil +} diff --git a/client/internal/metrics/remoteconfig/manager_test.go b/client/internal/metrics/remoteconfig/manager_test.go new file mode 100644 index 000000000..68ca3b4c4 --- /dev/null +++ b/client/internal/metrics/remoteconfig/manager_test.go @@ -0,0 +1,197 @@ +package remoteconfig + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testMinRefresh = 100 * time.Millisecond + +func TestManager_FetchSuccess(t *testing.T) { + server := newConfigServer(t, rawConfig{ + ServerURL: "https://ingest.example.com", + VersionSince: "1.0.0", + VersionUntil: "2.0.0", + PeriodMinutes: 60, + }) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + config := mgr.RefreshIfNeeded(context.Background()) + + require.NotNil(t, config) + assert.Equal(t, "https://ingest.example.com", config.ServerURL.String()) + assert.Equal(t, "1.0.0", config.VersionSince.String()) + assert.Equal(t, "2.0.0", config.VersionUntil.String()) + assert.Equal(t, 60*time.Minute, config.Interval) +} + +func TestManager_CachesConfig(t *testing.T) { + var fetchCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fetchCount.Add(1) + err := json.NewEncoder(w).Encode(rawConfig{ + ServerURL: "https://ingest.example.com", + VersionSince: "1.0.0", + VersionUntil: "2.0.0", + PeriodMinutes: 60, + }) + require.NoError(t, err) + })) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + + // First call fetches + config1 := mgr.RefreshIfNeeded(context.Background()) + require.NotNil(t, config1) + assert.Equal(t, int32(1), fetchCount.Load()) + + // Second call uses cache (within minRefreshInterval) + config2 := mgr.RefreshIfNeeded(context.Background()) + require.NotNil(t, config2) + assert.Equal(t, int32(1), fetchCount.Load()) + assert.Equal(t, config1, config2) +} + +func TestManager_RefetchesWhenStale(t *testing.T) { + var fetchCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fetchCount.Add(1) + err := json.NewEncoder(w).Encode(rawConfig{ + ServerURL: "https://ingest.example.com", + VersionSince: "1.0.0", + VersionUntil: "2.0.0", + PeriodMinutes: 60, + }) + require.NoError(t, err) + })) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + + // First fetch + mgr.RefreshIfNeeded(context.Background()) + assert.Equal(t, int32(1), fetchCount.Load()) + + // Wait for config to become stale + time.Sleep(testMinRefresh + 10*time.Millisecond) + + // Should refetch + mgr.RefreshIfNeeded(context.Background()) + assert.Equal(t, int32(2), fetchCount.Load()) +} + +func TestManager_FetchFailureReturnsNil(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + config := mgr.RefreshIfNeeded(context.Background()) + + assert.Nil(t, config) +} + +func TestManager_FetchFailureReturnsCached(t *testing.T) { + var fetchCount atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fetchCount.Add(1) + if fetchCount.Load() > 1 { + w.WriteHeader(http.StatusInternalServerError) + return + } + err := json.NewEncoder(w).Encode(rawConfig{ + ServerURL: "https://ingest.example.com", + VersionSince: "1.0.0", + VersionUntil: "2.0.0", + PeriodMinutes: 60, + }) + require.NoError(t, err) + })) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + + // First call succeeds + config1 := mgr.RefreshIfNeeded(context.Background()) + require.NotNil(t, config1) + + // Wait for config to become stale + time.Sleep(testMinRefresh + 10*time.Millisecond) + + // Second call fails but returns cached + config2 := mgr.RefreshIfNeeded(context.Background()) + require.NotNil(t, config2) + assert.Equal(t, config1, config2) +} + +func TestManager_RejectsInvalidPeriod(t *testing.T) { + tests := []struct { + name string + period int + }{ + {"zero", 0}, + {"negative", -5}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := newConfigServer(t, rawConfig{ + ServerURL: "https://ingest.example.com", + VersionSince: "1.0.0", + VersionUntil: "2.0.0", + PeriodMinutes: tt.period, + }) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + config := mgr.RefreshIfNeeded(context.Background()) + assert.Nil(t, config) + }) + } +} + +func TestManager_RejectsEmptyServerURL(t *testing.T) { + server := newConfigServer(t, rawConfig{ + ServerURL: "", + VersionSince: "1.0.0", + VersionUntil: "2.0.0", + PeriodMinutes: 60, + }) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + config := mgr.RefreshIfNeeded(context.Background()) + assert.Nil(t, config) +} + +func TestManager_RejectsInvalidJSON(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write([]byte("not json")) + require.NoError(t, err) + })) + defer server.Close() + + mgr := NewManager(server.URL, testMinRefresh) + config := mgr.RefreshIfNeeded(context.Background()) + assert.Nil(t, config) +} + +func newConfigServer(t *testing.T, config rawConfig) *httptest.Server { + t.Helper() + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(config) + require.NoError(t, err) + })) +} diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index b4f97016d..bea0725f2 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -15,6 +15,7 @@ import ( "github.com/netbirdio/netbird/client/iface/configurer" "github.com/netbirdio/netbird/client/iface/wgproxy" + "github.com/netbirdio/netbird/client/internal/metrics" "github.com/netbirdio/netbird/client/internal/peer/conntype" "github.com/netbirdio/netbird/client/internal/peer/dispatcher" "github.com/netbirdio/netbird/client/internal/peer/guard" @@ -26,6 +27,17 @@ import ( relayClient "github.com/netbirdio/netbird/shared/relay/client" ) +// MetricsRecorder is an interface for recording peer connection metrics +type MetricsRecorder interface { + RecordConnectionStages( + ctx context.Context, + remotePubKey string, + connectionType metrics.ConnectionType, + isReconnection bool, + timestamps metrics.ConnectionStageTimestamps, + ) +} + type ServiceDependencies struct { StatusRecorder *Status Signaler *Signaler @@ -33,6 +45,7 @@ type ServiceDependencies struct { RelayManager *relayClient.Manager SrWatcher *guard.SRWatcher PeerConnDispatcher *dispatcher.ConnectionDispatcher + MetricsRecorder MetricsRecorder } type WgConfig struct { @@ -115,6 +128,10 @@ type Conn struct { dumpState *stateDump endpointUpdater *EndpointUpdater + + // Connection stage timestamps for metrics + metricsRecorder MetricsRecorder + metricsStages *MetricsStages } // NewConn creates a new not opened Conn to the remote peer. @@ -140,6 +157,7 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { dumpState: dumpState, endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)), wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState), + metricsRecorder: services.MetricsRecorder, } return conn, nil @@ -156,6 +174,9 @@ func (conn *Conn) Open(engineCtx context.Context) error { return nil } + // Allocate new metrics stages so old goroutines don't corrupt new state + conn.metricsStages = &MetricsStages{} + conn.ctx, conn.ctxCancel = context.WithCancel(engineCtx) conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager) @@ -167,7 +188,7 @@ func (conn *Conn) Open(engineCtx context.Context) error { } conn.workerICE = workerICE - conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay) + conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay, conn.metricsStages) conn.handshaker.AddRelayListener(conn.workerRelay.OnNewOffer) if !isForceRelayed() { @@ -335,7 +356,7 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn if conn.currentConnPriority > priority { conn.Log.Infof("current connection priority (%s) is higher than the new one (%s), do not upgrade connection", conn.currentConnPriority, priority) conn.statusICE.SetConnected() - conn.updateIceState(iceConnInfo) + conn.updateIceState(iceConnInfo, time.Now()) return } @@ -375,7 +396,8 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn } conn.Log.Infof("configure WireGuard endpoint to: %s", ep.String()) - conn.enableWgWatcherIfNeeded() + updateTime := time.Now() + conn.enableWgWatcherIfNeeded(updateTime) presharedKey := conn.presharedKey(iceConnInfo.RosenpassPubKey) if err = conn.endpointUpdater.ConfigureWGEndpoint(ep, presharedKey); err != nil { @@ -391,8 +413,8 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn conn.currentConnPriority = priority conn.statusICE.SetConnected() - conn.updateIceState(iceConnInfo) - conn.doOnConnected(iceConnInfo.RosenpassPubKey, iceConnInfo.RosenpassAddr) + conn.updateIceState(iceConnInfo, updateTime) + conn.doOnConnected(iceConnInfo.RosenpassPubKey, iceConnInfo.RosenpassAddr, updateTime) } func (conn *Conn) onICEStateDisconnected(sessionChanged bool) { @@ -444,6 +466,10 @@ func (conn *Conn) onICEStateDisconnected(sessionChanged bool) { conn.disableWgWatcherIfNeeded() + if conn.currentConnPriority == conntype.None { + conn.metricsStages.Disconnected() + } + peerState := State{ PubKey: conn.config.Key, ConnStatus: conn.evalStatus(), @@ -484,7 +510,7 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { conn.Log.Debugf("do not switch to relay because current priority is: %s", conn.currentConnPriority.String()) conn.setRelayedProxy(wgProxy) conn.statusRelay.SetConnected() - conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey) + conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey, time.Now()) return } @@ -493,7 +519,8 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { if controller { wgProxy.Work() } - conn.enableWgWatcherIfNeeded() + updateTime := time.Now() + conn.enableWgWatcherIfNeeded(updateTime) if err := conn.endpointUpdater.ConfigureWGEndpoint(wgProxy.EndpointAddr(), conn.presharedKey(rci.rosenpassPubKey)); err != nil { if err := wgProxy.CloseConn(); err != nil { conn.Log.Warnf("Failed to close relay connection: %v", err) @@ -504,13 +531,16 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { if !controller { wgProxy.Work() } + + wgConfigWorkaround() + conn.rosenpassRemoteKey = rci.rosenpassPubKey conn.currentConnPriority = conntype.Relay conn.statusRelay.SetConnected() conn.setRelayedProxy(wgProxy) - conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey) + conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey, updateTime) conn.Log.Infof("start to communicate with peer via relay") - conn.doOnConnected(rci.rosenpassPubKey, rci.rosenpassAddr) + conn.doOnConnected(rci.rosenpassPubKey, rci.rosenpassAddr, updateTime) } func (conn *Conn) onRelayDisconnected() { @@ -548,6 +578,10 @@ func (conn *Conn) handleRelayDisconnectedLocked() { conn.disableWgWatcherIfNeeded() + if conn.currentConnPriority == conntype.None { + conn.metricsStages.Disconnected() + } + peerState := State{ PubKey: conn.config.Key, ConnStatus: conn.evalStatus(), @@ -588,10 +622,10 @@ func (conn *Conn) onWGDisconnected() { } } -func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []byte) { +func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []byte, updateTime time.Time) { peerState := State{ PubKey: conn.config.Key, - ConnStatusUpdate: time.Now(), + ConnStatusUpdate: updateTime, ConnStatus: conn.evalStatus(), Relayed: conn.isRelayed(), RelayServerAddress: relayServerAddr, @@ -604,10 +638,10 @@ func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []by } } -func (conn *Conn) updateIceState(iceConnInfo ICEConnInfo) { +func (conn *Conn) updateIceState(iceConnInfo ICEConnInfo, updateTime time.Time) { peerState := State{ PubKey: conn.config.Key, - ConnStatusUpdate: time.Now(), + ConnStatusUpdate: updateTime, ConnStatus: conn.evalStatus(), Relayed: iceConnInfo.Relayed, LocalIceCandidateType: iceConnInfo.LocalIceCandidateType, @@ -645,11 +679,13 @@ func (conn *Conn) setStatusToDisconnected() { } } -func (conn *Conn) doOnConnected(remoteRosenpassPubKey []byte, remoteRosenpassAddr string) { +func (conn *Conn) doOnConnected(remoteRosenpassPubKey []byte, remoteRosenpassAddr string, updateTime time.Time) { if runtime.GOOS == "ios" { runtime.GC() } + conn.metricsStages.RecordConnectionReady(updateTime) + if conn.onConnected != nil { conn.onConnected(conn.config.Key, remoteRosenpassPubKey, conn.config.WgConfig.AllowedIps[0].Addr().String(), remoteRosenpassAddr) } @@ -701,14 +737,14 @@ func (conn *Conn) isConnectedOnAllWay() (connected bool) { return true } -func (conn *Conn) enableWgWatcherIfNeeded() { +func (conn *Conn) enableWgWatcherIfNeeded(enabledTime time.Time) { if !conn.wgWatcher.IsEnabled() { wgWatcherCtx, wgWatcherCancel := context.WithCancel(conn.ctx) conn.wgWatcherCancel = wgWatcherCancel conn.wgWatcherWg.Add(1) go func() { defer conn.wgWatcherWg.Done() - conn.wgWatcher.EnableWgWatcher(wgWatcherCtx, conn.onWGDisconnected) + conn.wgWatcher.EnableWgWatcher(wgWatcherCtx, enabledTime, conn.onWGDisconnected, conn.onWGHandshakeSuccess) }() } } @@ -783,6 +819,41 @@ func (conn *Conn) setRelayedProxy(proxy wgproxy.Proxy) { conn.wgProxyRelay = proxy } +// onWGHandshakeSuccess is called when the first WireGuard handshake is detected +func (conn *Conn) onWGHandshakeSuccess(when time.Time) { + conn.metricsStages.RecordWGHandshakeSuccess(when) + conn.recordConnectionMetrics() +} + +// recordConnectionMetrics records connection stage timestamps as metrics +func (conn *Conn) recordConnectionMetrics() { + if conn.metricsRecorder == nil { + return + } + + // Determine connection type based on current priority + conn.mu.Lock() + priority := conn.currentConnPriority + conn.mu.Unlock() + + var connType metrics.ConnectionType + switch priority { + case conntype.Relay: + connType = metrics.ConnectionTypeRelay + default: + connType = metrics.ConnectionTypeICE + } + + // Record metrics with timestamps - duration calculation happens in metrics package + conn.metricsRecorder.RecordConnectionStages( + context.Background(), + conn.config.Key, + connType, + conn.metricsStages.IsReconnection(), + conn.metricsStages.GetTimestamps(), + ) +} + // AllowedIP returns the allowed IP of the remote peer func (conn *Conn) AllowedIP() netip.Addr { return conn.config.WgConfig.AllowedIps[0].Addr() diff --git a/client/internal/peer/handshaker.go b/client/internal/peer/handshaker.go index aff26f847..9b50cecd1 100644 --- a/client/internal/peer/handshaker.go +++ b/client/internal/peer/handshaker.go @@ -44,12 +44,13 @@ type OfferAnswer struct { } type Handshaker struct { - mu sync.Mutex - log *log.Entry - config ConnConfig - signaler *Signaler - ice *WorkerICE - relay *WorkerRelay + mu sync.Mutex + log *log.Entry + config ConnConfig + signaler *Signaler + ice *WorkerICE + relay *WorkerRelay + metricsStages *MetricsStages // relayListener is not blocking because the listener is using a goroutine to process the messages // and it will only keep the latest message if multiple offers are received in a short time // this is to avoid blocking the handshaker if the listener is doing some heavy processing @@ -64,13 +65,14 @@ type Handshaker struct { remoteAnswerCh chan OfferAnswer } -func NewHandshaker(log *log.Entry, config ConnConfig, signaler *Signaler, ice *WorkerICE, relay *WorkerRelay) *Handshaker { +func NewHandshaker(log *log.Entry, config ConnConfig, signaler *Signaler, ice *WorkerICE, relay *WorkerRelay, metricsStages *MetricsStages) *Handshaker { return &Handshaker{ log: log, config: config, signaler: signaler, ice: ice, relay: relay, + metricsStages: metricsStages, remoteOffersCh: make(chan OfferAnswer), remoteAnswerCh: make(chan OfferAnswer), } @@ -89,6 +91,12 @@ func (h *Handshaker) Listen(ctx context.Context) { select { case remoteOfferAnswer := <-h.remoteOffersCh: h.log.Infof("received offer, running version %s, remote WireGuard listen port %d, session id: %s", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString()) + + // Record signaling received for reconnection attempts + if h.metricsStages != nil { + h.metricsStages.RecordSignalingReceived() + } + if h.relayListener != nil { h.relayListener.Notify(&remoteOfferAnswer) } @@ -103,6 +111,12 @@ func (h *Handshaker) Listen(ctx context.Context) { } case remoteOfferAnswer := <-h.remoteAnswerCh: h.log.Infof("received answer, running version %s, remote WireGuard listen port %d, session id: %s", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString()) + + // Record signaling received for reconnection attempts + if h.metricsStages != nil { + h.metricsStages.RecordSignalingReceived() + } + if h.relayListener != nil { h.relayListener.Notify(&remoteOfferAnswer) } diff --git a/client/internal/peer/metrics_saver.go b/client/internal/peer/metrics_saver.go new file mode 100644 index 000000000..e32afbfe5 --- /dev/null +++ b/client/internal/peer/metrics_saver.go @@ -0,0 +1,73 @@ +package peer + +import ( + "sync" + "time" + + "github.com/netbirdio/netbird/client/internal/metrics" +) + +type MetricsStages struct { + isReconnectionAttempt bool // Track if current attempt is a reconnection + stageTimestamps metrics.ConnectionStageTimestamps + mu sync.Mutex +} + +// RecordSignalingReceived records when the first signal is received from the remote peer. +// Used as the base for all subsequent stage durations to avoid inflating metrics when +// the remote peer was offline. +func (s *MetricsStages) RecordSignalingReceived() { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stageTimestamps.SignalingReceived.IsZero() { + s.stageTimestamps.SignalingReceived = time.Now() + } +} + +func (s *MetricsStages) RecordConnectionReady(when time.Time) { + s.mu.Lock() + defer s.mu.Unlock() + if s.stageTimestamps.ConnectionReady.IsZero() { + s.stageTimestamps.ConnectionReady = when + } +} + +func (s *MetricsStages) RecordWGHandshakeSuccess(handshakeTime time.Time) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.stageTimestamps.ConnectionReady.IsZero() && s.stageTimestamps.WgHandshakeSuccess.IsZero() { + // WireGuard only reports handshake times with second precision, but ConnectionReady + // is captured with microsecond precision. If handshake appears before ConnectionReady + // due to truncation (e.g., handshake at 6.042s truncated to 6.000s), normalize to + // ConnectionReady to avoid negative duration metrics. + if handshakeTime.Before(s.stageTimestamps.ConnectionReady) { + s.stageTimestamps.WgHandshakeSuccess = s.stageTimestamps.ConnectionReady + } else { + s.stageTimestamps.WgHandshakeSuccess = handshakeTime + } + } +} + +// Disconnected sets the mode to reconnection. It is called only when both ICE and Relay have been disconnected at the same time. +func (s *MetricsStages) Disconnected() { + s.mu.Lock() + defer s.mu.Unlock() + + // Reset all timestamps for reconnection + s.stageTimestamps = metrics.ConnectionStageTimestamps{} + s.isReconnectionAttempt = true +} + +func (s *MetricsStages) IsReconnection() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.isReconnectionAttempt +} + +func (s *MetricsStages) GetTimestamps() metrics.ConnectionStageTimestamps { + s.mu.Lock() + defer s.mu.Unlock() + return s.stageTimestamps +} diff --git a/client/internal/peer/metrics_saver_test.go b/client/internal/peer/metrics_saver_test.go new file mode 100644 index 000000000..01c0aa9ac --- /dev/null +++ b/client/internal/peer/metrics_saver_test.go @@ -0,0 +1,125 @@ +package peer + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/internal/metrics" +) + +func TestMetricsStages_RecordSignalingReceived(t *testing.T) { + s := &MetricsStages{} + + s.RecordSignalingReceived() + ts := s.GetTimestamps() + require.False(t, ts.SignalingReceived.IsZero()) + + // Second call should not overwrite + first := ts.SignalingReceived + time.Sleep(time.Millisecond) + s.RecordSignalingReceived() + ts = s.GetTimestamps() + assert.Equal(t, first, ts.SignalingReceived, "should keep the first signaling timestamp") +} + +func TestMetricsStages_RecordConnectionReady(t *testing.T) { + s := &MetricsStages{} + + now := time.Now() + s.RecordConnectionReady(now) + ts := s.GetTimestamps() + assert.Equal(t, now, ts.ConnectionReady) + + // Second call should not overwrite + later := now.Add(time.Second) + s.RecordConnectionReady(later) + ts = s.GetTimestamps() + assert.Equal(t, now, ts.ConnectionReady, "should keep the first connection ready timestamp") +} + +func TestMetricsStages_RecordWGHandshakeSuccess(t *testing.T) { + s := &MetricsStages{} + + connReady := time.Now() + s.RecordConnectionReady(connReady) + + handshake := connReady.Add(500 * time.Millisecond) + s.RecordWGHandshakeSuccess(handshake) + + ts := s.GetTimestamps() + assert.Equal(t, handshake, ts.WgHandshakeSuccess) +} + +func TestMetricsStages_HandshakeBeforeConnectionReady_Normalizes(t *testing.T) { + s := &MetricsStages{} + + connReady := time.Now() + s.RecordConnectionReady(connReady) + + // WG handshake appears before ConnectionReady due to second-precision truncation + handshake := connReady.Add(-100 * time.Millisecond) + s.RecordWGHandshakeSuccess(handshake) + + ts := s.GetTimestamps() + assert.Equal(t, connReady, ts.WgHandshakeSuccess, "should normalize to ConnectionReady when handshake appears earlier") +} + +func TestMetricsStages_HandshakeIgnoredWithoutConnectionReady(t *testing.T) { + s := &MetricsStages{} + + s.RecordWGHandshakeSuccess(time.Now()) + ts := s.GetTimestamps() + assert.True(t, ts.WgHandshakeSuccess.IsZero(), "should not record handshake without connection ready") +} + +func TestMetricsStages_HandshakeRecordedOnce(t *testing.T) { + s := &MetricsStages{} + + connReady := time.Now() + s.RecordConnectionReady(connReady) + + first := connReady.Add(time.Second) + s.RecordWGHandshakeSuccess(first) + + // Second call (rekey) should be ignored + second := connReady.Add(2 * time.Second) + s.RecordWGHandshakeSuccess(second) + + ts := s.GetTimestamps() + assert.Equal(t, first, ts.WgHandshakeSuccess, "should preserve first handshake, ignore rekeys") +} + +func TestMetricsStages_Disconnected(t *testing.T) { + s := &MetricsStages{} + + s.RecordSignalingReceived() + s.RecordConnectionReady(time.Now()) + assert.False(t, s.IsReconnection()) + + s.Disconnected() + + assert.True(t, s.IsReconnection()) + ts := s.GetTimestamps() + assert.True(t, ts.SignalingReceived.IsZero(), "timestamps should be reset after disconnect") + assert.True(t, ts.ConnectionReady.IsZero(), "timestamps should be reset after disconnect") + assert.True(t, ts.WgHandshakeSuccess.IsZero(), "timestamps should be reset after disconnect") +} + +func TestMetricsStages_GetTimestamps(t *testing.T) { + s := &MetricsStages{} + + ts := s.GetTimestamps() + assert.Equal(t, metrics.ConnectionStageTimestamps{}, ts) + + now := time.Now() + s.RecordSignalingReceived() + s.RecordConnectionReady(now) + + ts = s.GetTimestamps() + assert.False(t, ts.SignalingReceived.IsZero()) + assert.Equal(t, now, ts.ConnectionReady) + assert.True(t, ts.WgHandshakeSuccess.IsZero()) +} diff --git a/client/internal/peer/wg_watcher.go b/client/internal/peer/wg_watcher.go index 799a9375e..805a6f24a 100644 --- a/client/internal/peer/wg_watcher.go +++ b/client/internal/peer/wg_watcher.go @@ -48,7 +48,7 @@ func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey strin // EnableWgWatcher starts the WireGuard watcher. If it is already enabled, it will return immediately and do nothing. // The watcher runs until ctx is cancelled. Caller is responsible for context lifecycle management. -func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func()) { +func (w *WGWatcher) EnableWgWatcher(ctx context.Context, enabledTime time.Time, onDisconnectedFn func(), onHandshakeSuccessFn func(when time.Time)) { w.muEnabled.Lock() if w.enabled { w.muEnabled.Unlock() @@ -56,7 +56,6 @@ func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func() } w.log.Debugf("enable WireGuard watcher") - enabledTime := time.Now() w.enabled = true w.muEnabled.Unlock() @@ -65,7 +64,7 @@ func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func() w.log.Warnf("failed to read initial wg stats: %v", err) } - w.periodicHandshakeCheck(ctx, onDisconnectedFn, enabledTime, initialHandshake) + w.periodicHandshakeCheck(ctx, onDisconnectedFn, onHandshakeSuccessFn, enabledTime, initialHandshake) w.muEnabled.Lock() w.enabled = false @@ -89,7 +88,7 @@ func (w *WGWatcher) Reset() { } // wgStateCheck help to check the state of the WireGuard handshake and relay connection -func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn func(), enabledTime time.Time, initialHandshake time.Time) { +func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn func(), onHandshakeSuccessFn func(when time.Time), enabledTime time.Time, initialHandshake time.Time) { w.log.Infof("WireGuard watcher started") timer := time.NewTimer(wgHandshakeOvertime) @@ -108,6 +107,9 @@ func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn if lastHandshake.IsZero() { elapsed := calcElapsed(enabledTime, *handshake) w.log.Infof("first wg handshake detected within: %.2fsec, (%s)", elapsed, handshake) + if onHandshakeSuccessFn != nil { + onHandshakeSuccessFn(*handshake) + } } lastHandshake = *handshake diff --git a/client/internal/peer/wg_watcher_test.go b/client/internal/peer/wg_watcher_test.go index f79405a01..3ce91cd46 100644 --- a/client/internal/peer/wg_watcher_test.go +++ b/client/internal/peer/wg_watcher_test.go @@ -35,9 +35,11 @@ func TestWGWatcher_EnableWgWatcher(t *testing.T) { defer cancel() onDisconnected := make(chan struct{}, 1) - go watcher.EnableWgWatcher(ctx, func() { + go watcher.EnableWgWatcher(ctx, time.Now(), func() { mlog.Infof("onDisconnectedFn") onDisconnected <- struct{}{} + }, func(when time.Time) { + mlog.Infof("onHandshakeSuccess: %v", when) }) // wait for initial reading @@ -64,7 +66,7 @@ func TestWGWatcher_ReEnable(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - watcher.EnableWgWatcher(ctx, func() {}) + watcher.EnableWgWatcher(ctx, time.Now(), func() {}, func(when time.Time) {}) }() cancel() @@ -75,9 +77,9 @@ func TestWGWatcher_ReEnable(t *testing.T) { defer cancel() onDisconnected := make(chan struct{}, 1) - go watcher.EnableWgWatcher(ctx, func() { + go watcher.EnableWgWatcher(ctx, time.Now(), func() { onDisconnected <- struct{}{} - }) + }, func(when time.Time) {}) time.Sleep(2 * time.Second) mocWgIface.disconnect() diff --git a/client/server/debug.go b/client/server/debug.go index 4c531efba..81708e576 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -26,6 +26,15 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( log.Warnf("failed to get latest sync response: %v", err) } + var clientMetrics debug.MetricsExporter + if s.connectClient != nil { + if engine := s.connectClient.Engine(); engine != nil { + if cm := engine.GetClientMetrics(); cm != nil { + clientMetrics = cm + } + } + } + var cpuProfileData []byte if s.cpuProfileBuf != nil && !s.cpuProfiling { cpuProfileData = s.cpuProfileBuf.Bytes() @@ -54,6 +63,7 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( LogPath: s.logFile, CPUProfile: cpuProfileData, RefreshStatus: refreshStatus, + ClientMetrics: clientMetrics, }, debug.BundleConfig{ Anonymize: req.GetAnonymize(), diff --git a/shared/management/client/client.go b/shared/management/client/client.go index ba525602e..a15301223 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -22,6 +22,7 @@ type Client interface { GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, error) + GetServerURL() string IsHealthy() bool SyncMeta(sysInfo *system.Info) error Logout() error diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index e95db0089..252199498 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -52,6 +52,7 @@ type GrpcClient struct { conn *grpc.ClientConn connStateCallback ConnStateNotifier connStateCallbackLock sync.RWMutex + serverURL string } type ExposeRequest struct { @@ -127,9 +128,15 @@ func NewClient(ctx context.Context, addr string, ourPrivateKey wgtypes.Key, tlsE ctx: ctx, conn: conn, connStateCallbackLock: sync.RWMutex{}, + serverURL: addr, }, nil } +// GetServerURL returns the management server URL +func (c *GrpcClient) GetServerURL() string { + return c.serverURL +} + // Close closes connection to the Management Service func (c *GrpcClient) Close() error { return c.conn.Close() diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index 57256d6d4..548e379e8 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -19,6 +19,7 @@ type MockClient struct { LoginFunc func(serverKey wgtypes.Key, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) GetDeviceAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) GetPKCEAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) + GetServerURLFunc func() string SyncMetaFunc func(sysInfo *system.Info) error LogoutFunc func() error JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error @@ -92,6 +93,14 @@ func (m *MockClient) GetNetworkMap(_ *system.Info) (*proto.NetworkMap, error) { return nil, nil } +// GetServerURL mock implementation of GetServerURL from mgm.Client interface +func (m *MockClient) GetServerURL() string { + if m.GetServerURLFunc == nil { + return "" + } + return m.GetServerURLFunc() +} + func (m *MockClient) SyncMeta(sysInfo *system.Info) error { if m.SyncMetaFunc == nil { return nil From fd9d43033448d4fc0d0a98a4dc520f45614daa6f Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 23 Mar 2026 16:39:32 +0800 Subject: [PATCH 232/374] [client] Simplify entrypoint by running netbird up unconditionally (#5652) --- client/Dockerfile | 3 +-- client/Dockerfile-rootless | 3 +-- client/cmd/status.go | 2 +- client/netbird-entrypoint.sh | 30 ++++-------------------------- 4 files changed, 7 insertions(+), 31 deletions(-) diff --git a/client/Dockerfile b/client/Dockerfile index 66a418828..64d5ba04f 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -17,8 +17,7 @@ ENV \ NETBIRD_BIN="/usr/local/bin/netbird" \ NB_LOG_FILE="console,/var/log/netbird/client.log" \ NB_DAEMON_ADDR="unix:///var/run/netbird.sock" \ - NB_ENTRYPOINT_SERVICE_TIMEOUT="30" \ - NB_ENTRYPOINT_LOGIN_TIMEOUT="30" + NB_ENTRYPOINT_SERVICE_TIMEOUT="30" ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ] diff --git a/client/Dockerfile-rootless b/client/Dockerfile-rootless index b0d59fdf8..69d00aaf2 100644 --- a/client/Dockerfile-rootless +++ b/client/Dockerfile-rootless @@ -23,8 +23,7 @@ ENV \ NB_DAEMON_ADDR="unix:///var/lib/netbird/netbird.sock" \ NB_LOG_FILE="console,/var/lib/netbird/client.log" \ NB_DISABLE_DNS="true" \ - NB_ENTRYPOINT_SERVICE_TIMEOUT="30" \ - NB_ENTRYPOINT_LOGIN_TIMEOUT="30" + NB_ENTRYPOINT_SERVICE_TIMEOUT="30" ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ] diff --git a/client/cmd/status.go b/client/cmd/status.go index 07dbf9101..c35a06eb3 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -218,7 +218,7 @@ func runHealthCheck(cmd *cobra.Command) error { ctx := internal.CtxInitState(cmd.Context()) isStartup := check == "startup" - resp, err := getStatus(ctx, isStartup, isStartup) + resp, err := getStatus(ctx, isStartup, false) if err != nil { return err } diff --git a/client/netbird-entrypoint.sh b/client/netbird-entrypoint.sh index f8267d6ee..0e330bdac 100755 --- a/client/netbird-entrypoint.sh +++ b/client/netbird-entrypoint.sh @@ -2,7 +2,6 @@ set -eEuo pipefail : ${NB_ENTRYPOINT_SERVICE_TIMEOUT:="30"} -: ${NB_ENTRYPOINT_LOGIN_TIMEOUT:="30"} NETBIRD_BIN="${NETBIRD_BIN:-"netbird"}" export NB_LOG_FILE="${NB_LOG_FILE:-"console,/var/log/netbird/client.log"}" service_pids=() @@ -51,31 +50,10 @@ wait_for_daemon_startup() { exit 1 } -login_if_needed() { - local timeout="${1}" - - if "${NETBIRD_BIN}" status --check ready 2>/dev/null; then - info "already logged in, skipping 'netbird up'..." - return - fi - - if [[ "${timeout}" -eq 0 ]]; then - info "logging in..." - "${NETBIRD_BIN}" up - return - fi - - local deadline=$((SECONDS + timeout)) - while [[ "${SECONDS}" -lt "${deadline}" ]]; do - if "${NETBIRD_BIN}" status --check ready 2>/dev/null; then - info "already logged in, skipping 'netbird up'..." - return - fi - sleep 1 - done - - info "logging in..." +connect() { + info "running 'netbird up'..." "${NETBIRD_BIN}" up + return $? } main() { @@ -85,7 +63,7 @@ main() { info "registered new service process 'netbird service run', currently running: ${service_pids[@]@Q}" wait_for_daemon_startup "${NB_ENTRYPOINT_SERVICE_TIMEOUT}" - login_if_needed "${NB_ENTRYPOINT_LOGIN_TIMEOUT}" + connect wait "${service_pids[@]}" } From 2313494e0e62dc1159d8525e576c5446dd2cf19b Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 23 Mar 2026 21:04:03 +0800 Subject: [PATCH 233/374] [client] Don't abort debug for command when up/down fails (#5657) --- client/cmd/debug.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/client/cmd/debug.go b/client/cmd/debug.go index e480df4d7..0e2717756 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -181,10 +181,11 @@ func runForDuration(cmd *cobra.Command, args []string) error { if stateWasDown { if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil { - return fmt.Errorf("failed to up: %v", status.Convert(err).Message()) + cmd.PrintErrf("Failed to bring service up: %v\n", status.Convert(err).Message()) + } else { + cmd.Println("netbird up") + time.Sleep(time.Second * 10) } - cmd.Println("netbird up") - time.Sleep(time.Second * 10) } initialLevelTrace := initialLogLevel.GetLevel() >= proto.LogLevel_TRACE @@ -199,9 +200,10 @@ func runForDuration(cmd *cobra.Command, args []string) error { } if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil { - return fmt.Errorf("failed to down: %v", status.Convert(err).Message()) + cmd.PrintErrf("Failed to bring service down: %v\n", status.Convert(err).Message()) + } else { + cmd.Println("netbird down") } - cmd.Println("netbird down") time.Sleep(1 * time.Second) @@ -209,13 +211,14 @@ func runForDuration(cmd *cobra.Command, args []string) error { if _, err := client.SetSyncResponsePersistence(cmd.Context(), &proto.SetSyncResponsePersistenceRequest{ Enabled: true, }); err != nil { - return fmt.Errorf("failed to enable sync response persistence: %v", status.Convert(err).Message()) + cmd.PrintErrf("Failed to enable sync response persistence: %v\n", status.Convert(err).Message()) } if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil { - return fmt.Errorf("failed to up: %v", status.Convert(err).Message()) + cmd.PrintErrf("Failed to bring service up: %v\n", status.Convert(err).Message()) + } else { + cmd.Println("netbird up") } - cmd.Println("netbird up") time.Sleep(3 * time.Second) @@ -263,16 +266,18 @@ func runForDuration(cmd *cobra.Command, args []string) error { if stateWasDown { if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil { - return fmt.Errorf("failed to down: %v", status.Convert(err).Message()) + cmd.PrintErrf("Failed to restore service down state: %v\n", status.Convert(err).Message()) + } else { + cmd.Println("netbird down") } - cmd.Println("netbird down") } if !initialLevelTrace { if _, err := client.SetLogLevel(cmd.Context(), &proto.SetLogLevelRequest{Level: initialLogLevel.GetLevel()}); err != nil { - return fmt.Errorf("failed to restore log level: %v", status.Convert(err).Message()) + cmd.PrintErrf("Failed to restore log level: %v\n", status.Convert(err).Message()) + } else { + cmd.Println("Log level restored to", initialLogLevel.GetLevel()) } - cmd.Println("Log level restored to", initialLogLevel.GetLevel()) } cmd.Printf("Local file:\n%s\n", resp.GetPath()) From 17cfa5fe1edb8ffe8fdd46b5f22637ca0911bf03 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 23 Mar 2026 17:16:23 +0100 Subject: [PATCH 234/374] [misc] Set signing env only if not fork and set license (#5659) * Add condition to GPG key decoding to handle pull requests * Add license field to deb and rpm package configurations * Add condition to GPG key decoding for external pull requests --- .github/workflows/release.yml | 2 ++ .goreleaser.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1a4676625..83444b541 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -170,6 +170,7 @@ jobs: run: sudo apt update && sudo apt install -y -q gcc-arm-linux-gnueabihf gcc-aarch64-linux-gnu - name: Decode GPG signing key + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository env: GPG_RPM_PRIVATE_KEY: ${{ secrets.GPG_RPM_PRIVATE_KEY }} run: | @@ -309,6 +310,7 @@ jobs: run: sudo apt update && sudo apt install -y -q libappindicator3-dev gir1.2-appindicator3-0.1 libxxf86vm-dev gcc-mingw-w64-x86-64 - name: Decode GPG signing key + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository env: GPG_RPM_PRIVATE_KEY: ${{ secrets.GPG_RPM_PRIVATE_KEY }} run: | diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 0f81229cd..65e63dfa8 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -171,6 +171,7 @@ nfpms: - maintainer: Netbird description: Netbird client. homepage: https://netbird.io/ + license: BSD-3-Clause id: netbird_deb bindir: /usr/bin builds: @@ -184,6 +185,7 @@ nfpms: - maintainer: Netbird description: Netbird client. homepage: https://netbird.io/ + license: BSD-3-Clause id: netbird_rpm bindir: /usr/bin builds: From 5b85edb753d5edb94ec8a2521cfe85f2a4784fec Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 24 Mar 2026 00:53:17 +0800 Subject: [PATCH 235/374] [management] Omit proxy_protocol from API response when false (#5656) The internal Target model uses a plain bool for ProxyProtocol, which was always serialized to the API response as false even when not configured. Only set the API field when true so it gets omitted via omitempty when unset. --- management/internals/modules/reverseproxy/service/service.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index c00d49421..be04777a1 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -262,7 +262,9 @@ func (s *Service) ToAPIResponse() *api.Service { if opts == nil { opts = &api.ServiceTargetOptions{} } - opts.ProxyProtocol = &target.ProxyProtocol + if target.ProxyProtocol { + opts.ProxyProtocol = &target.ProxyProtocol + } st.Options = opts apiTargets = append(apiTargets, st) } From 0b329f7881d093fb399abc61b05bb252df7f9834 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 24 Mar 2026 13:21:42 +0300 Subject: [PATCH 236/374] [management] Replace JumpCloud SDK with direct HTTP calls (#5591) --- go.mod | 1 - go.sum | 2 - management/server/idp/idp.go | 1 + management/server/idp/jumpcloud.go | 198 ++++++++++++++-------- management/server/idp/jumpcloud_test.go | 216 ++++++++++++++++++++++++ 5 files changed, 343 insertions(+), 75 deletions(-) diff --git a/go.mod b/go.mod index 2c911aa4f..3661d6fe0 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,6 @@ require ( require ( fyne.io/fyne/v2 v2.7.0 fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 - github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible github.com/awnumar/memguard v0.23.0 github.com/aws/aws-sdk-go-v2 v1.36.3 github.com/aws/aws-sdk-go-v2/config v1.29.14 diff --git a/go.sum b/go.sum index e26b4edaf..57e07dd7b 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSC github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible h1:hqcTK6ZISdip65SR792lwYJTa/axESA0889D3UlZbLo= -github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible/go.mod h1:6B1nuc1MUs6c62ODZDl7hVE5Pv7O2XGSkgg2olnq34I= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= diff --git a/management/server/idp/idp.go b/management/server/idp/idp.go index 28e3d81f9..20d6cacd5 100644 --- a/management/server/idp/idp.go +++ b/management/server/idp/idp.go @@ -197,6 +197,7 @@ func NewManager(ctx context.Context, config Config, appMetrics telemetry.AppMetr case "jumpcloud": return NewJumpCloudManager(JumpCloudClientConfig{ APIToken: config.ExtraConfig["ApiToken"], + ApiUrl: config.ExtraConfig["ApiUrl"], }, appMetrics) case "pocketid": return NewPocketIdManager(PocketIdClientConfig{ diff --git a/management/server/idp/jumpcloud.go b/management/server/idp/jumpcloud.go index 8c4a9d089..f0dec3a9b 100644 --- a/management/server/idp/jumpcloud.go +++ b/management/server/idp/jumpcloud.go @@ -1,24 +1,40 @@ package idp import ( + "bytes" "context" + "encoding/json" "fmt" + "io" "net/http" "strings" - v1 "github.com/TheJumpCloud/jcapi-go/v1" - "github.com/netbirdio/netbird/management/server/telemetry" ) const ( - contentType = "application/json" - accept = "application/json" + jumpCloudDefaultApiUrl = "https://console.jumpcloud.com" + jumpCloudSearchPageSize = 100 ) +// jumpCloudUser represents a JumpCloud V1 API system user. +type jumpCloudUser struct { + ID string `json:"_id"` + Email string `json:"email"` + Firstname string `json:"firstname"` + Middlename string `json:"middlename"` + Lastname string `json:"lastname"` +} + +// jumpCloudUserList represents the response from the JumpCloud search endpoint. +type jumpCloudUserList struct { + Results []jumpCloudUser `json:"results"` + TotalCount int `json:"totalCount"` +} + // JumpCloudManager JumpCloud manager client instance. type JumpCloudManager struct { - client *v1.APIClient + apiBase string apiToken string httpClient ManagerHTTPClient credentials ManagerCredentials @@ -29,6 +45,7 @@ type JumpCloudManager struct { // JumpCloudClientConfig JumpCloud manager client configurations. type JumpCloudClientConfig struct { APIToken string + ApiUrl string } // JumpCloudCredentials JumpCloud authentication information. @@ -55,7 +72,15 @@ func NewJumpCloudManager(config JumpCloudClientConfig, appMetrics telemetry.AppM return nil, fmt.Errorf("jumpCloud IdP configuration is incomplete, ApiToken is missing") } - client := v1.NewAPIClient(v1.NewConfiguration()) + apiBase := config.ApiUrl + if apiBase == "" { + apiBase = jumpCloudDefaultApiUrl + } + apiBase = strings.TrimSuffix(apiBase, "/") + if !strings.HasSuffix(apiBase, "/api") { + apiBase += "/api" + } + credentials := &JumpCloudCredentials{ clientConfig: config, httpClient: httpClient, @@ -64,7 +89,7 @@ func NewJumpCloudManager(config JumpCloudClientConfig, appMetrics telemetry.AppM } return &JumpCloudManager{ - client: client, + apiBase: apiBase, apiToken: config.APIToken, httpClient: httpClient, credentials: credentials, @@ -78,37 +103,58 @@ func (jc *JumpCloudCredentials) Authenticate(_ context.Context) (JWTToken, error return JWTToken{}, nil } -func (jm *JumpCloudManager) authenticationContext() context.Context { - return context.WithValue(context.Background(), v1.ContextAPIKey, v1.APIKey{ - Key: jm.apiToken, - }) -} - -// UpdateUserAppMetadata updates user app metadata based on userID and metadata map. -func (jm *JumpCloudManager) UpdateUserAppMetadata(_ context.Context, _ string, _ AppMetadata) error { - return nil -} - -// GetUserDataByID requests user data from JumpCloud via ID. -func (jm *JumpCloudManager) GetUserDataByID(_ context.Context, userID string, appMetadata AppMetadata) (*UserData, error) { - authCtx := jm.authenticationContext() - user, resp, err := jm.client.SystemusersApi.SystemusersGet(authCtx, userID, contentType, accept, nil) +// doRequest executes an HTTP request against the JumpCloud V1 API. +func (jm *JumpCloudManager) doRequest(ctx context.Context, method, path string, body io.Reader) ([]byte, error) { + reqURL := jm.apiBase + path + req, err := http.NewRequestWithContext(ctx, method, reqURL, body) if err != nil { return nil, err } + + req.Header.Set("x-api-key", jm.apiToken) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + + resp, err := jm.httpClient.Do(req) + if err != nil { + if jm.appMetrics != nil { + jm.appMetrics.IDPMetrics().CountRequestError() + } + return nil, err + } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { if jm.appMetrics != nil { jm.appMetrics.IDPMetrics().CountRequestStatusError() } - return nil, fmt.Errorf("unable to get user %s, statusCode %d", userID, resp.StatusCode) + return nil, fmt.Errorf("JumpCloud API request %s %s failed with status %d", method, path, resp.StatusCode) + } + + return io.ReadAll(resp.Body) +} + +// UpdateUserAppMetadata updates user app metadata based on userID and metadata map. +func (jm *JumpCloudManager) UpdateUserAppMetadata(_ context.Context, _ string, _ AppMetadata) error { + return nil +} + +// GetUserDataByID requests user data from JumpCloud via ID. +func (jm *JumpCloudManager) GetUserDataByID(ctx context.Context, userID string, appMetadata AppMetadata) (*UserData, error) { + body, err := jm.doRequest(ctx, http.MethodGet, "/systemusers/"+userID, nil) + if err != nil { + return nil, err } if jm.appMetrics != nil { jm.appMetrics.IDPMetrics().CountGetUserDataByID() } + var user jumpCloudUser + if err = jm.helper.Unmarshal(body, &user); err != nil { + return nil, err + } + userData := parseJumpCloudUser(user) userData.AppMetadata = appMetadata @@ -116,30 +162,20 @@ func (jm *JumpCloudManager) GetUserDataByID(_ context.Context, userID string, ap } // GetAccount returns all the users for a given profile. -func (jm *JumpCloudManager) GetAccount(_ context.Context, accountID string) ([]*UserData, error) { - authCtx := jm.authenticationContext() - userList, resp, err := jm.client.SearchApi.SearchSystemusersPost(authCtx, contentType, accept, nil) +func (jm *JumpCloudManager) GetAccount(ctx context.Context, accountID string) ([]*UserData, error) { + allUsers, err := jm.searchAllUsers(ctx) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - if jm.appMetrics != nil { - jm.appMetrics.IDPMetrics().CountRequestStatusError() - } - return nil, fmt.Errorf("unable to get account %s users, statusCode %d", accountID, resp.StatusCode) - } if jm.appMetrics != nil { jm.appMetrics.IDPMetrics().CountGetAccount() } - users := make([]*UserData, 0) - for _, user := range userList.Results { + users := make([]*UserData, 0, len(allUsers)) + for _, user := range allUsers { userData := parseJumpCloudUser(user) userData.AppMetadata.WTAccountID = accountID - users = append(users, userData) } @@ -148,27 +184,18 @@ func (jm *JumpCloudManager) GetAccount(_ context.Context, accountID string) ([]* // GetAllAccounts gets all registered accounts with corresponding user data. // It returns a list of users indexed by accountID. -func (jm *JumpCloudManager) GetAllAccounts(_ context.Context) (map[string][]*UserData, error) { - authCtx := jm.authenticationContext() - userList, resp, err := jm.client.SearchApi.SearchSystemusersPost(authCtx, contentType, accept, nil) +func (jm *JumpCloudManager) GetAllAccounts(ctx context.Context) (map[string][]*UserData, error) { + allUsers, err := jm.searchAllUsers(ctx) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - if jm.appMetrics != nil { - jm.appMetrics.IDPMetrics().CountRequestStatusError() - } - return nil, fmt.Errorf("unable to get all accounts, statusCode %d", resp.StatusCode) - } if jm.appMetrics != nil { jm.appMetrics.IDPMetrics().CountGetAllAccounts() } indexedUsers := make(map[string][]*UserData) - for _, user := range userList.Results { + for _, user := range allUsers { userData := parseJumpCloudUser(user) indexedUsers[UnsetAccountID] = append(indexedUsers[UnsetAccountID], userData) } @@ -176,6 +203,41 @@ func (jm *JumpCloudManager) GetAllAccounts(_ context.Context) (map[string][]*Use return indexedUsers, nil } +// searchAllUsers paginates through all system users using limit/skip. +func (jm *JumpCloudManager) searchAllUsers(ctx context.Context) ([]jumpCloudUser, error) { + var allUsers []jumpCloudUser + + for skip := 0; ; skip += jumpCloudSearchPageSize { + searchReq := map[string]int{ + "limit": jumpCloudSearchPageSize, + "skip": skip, + } + + payload, err := json.Marshal(searchReq) + if err != nil { + return nil, err + } + + body, err := jm.doRequest(ctx, http.MethodPost, "/search/systemusers", bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + var userList jumpCloudUserList + if err = jm.helper.Unmarshal(body, &userList); err != nil { + return nil, err + } + + allUsers = append(allUsers, userList.Results...) + + if skip+len(userList.Results) >= userList.TotalCount { + break + } + } + + return allUsers, nil +} + // CreateUser creates a new user in JumpCloud Idp and sends an invitation. func (jm *JumpCloudManager) CreateUser(_ context.Context, _, _, _, _ string) (*UserData, error) { return nil, fmt.Errorf("method CreateUser not implemented") @@ -183,7 +245,7 @@ func (jm *JumpCloudManager) CreateUser(_ context.Context, _, _, _, _ string) (*U // GetUserByEmail searches users with a given email. // If no users have been found, this function returns an empty list. -func (jm *JumpCloudManager) GetUserByEmail(_ context.Context, email string) ([]*UserData, error) { +func (jm *JumpCloudManager) GetUserByEmail(ctx context.Context, email string) ([]*UserData, error) { searchFilter := map[string]interface{}{ "searchFilter": map[string]interface{}{ "filter": []string{email}, @@ -191,25 +253,26 @@ func (jm *JumpCloudManager) GetUserByEmail(_ context.Context, email string) ([]* }, } - authCtx := jm.authenticationContext() - userList, resp, err := jm.client.SearchApi.SearchSystemusersPost(authCtx, contentType, accept, searchFilter) + payload, err := json.Marshal(searchFilter) if err != nil { return nil, err } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - if jm.appMetrics != nil { - jm.appMetrics.IDPMetrics().CountRequestStatusError() - } - return nil, fmt.Errorf("unable to get user %s, statusCode %d", email, resp.StatusCode) + body, err := jm.doRequest(ctx, http.MethodPost, "/search/systemusers", bytes.NewReader(payload)) + if err != nil { + return nil, err } if jm.appMetrics != nil { jm.appMetrics.IDPMetrics().CountGetUserByEmail() } - usersData := make([]*UserData, 0) + var userList jumpCloudUserList + if err = jm.helper.Unmarshal(body, &userList); err != nil { + return nil, err + } + + usersData := make([]*UserData, 0, len(userList.Results)) for _, user := range userList.Results { usersData = append(usersData, parseJumpCloudUser(user)) } @@ -224,20 +287,11 @@ func (jm *JumpCloudManager) InviteUserByID(_ context.Context, _ string) error { } // DeleteUser from jumpCloud directory -func (jm *JumpCloudManager) DeleteUser(_ context.Context, userID string) error { - authCtx := jm.authenticationContext() - _, resp, err := jm.client.SystemusersApi.SystemusersDelete(authCtx, userID, contentType, accept, nil) +func (jm *JumpCloudManager) DeleteUser(ctx context.Context, userID string) error { + _, err := jm.doRequest(ctx, http.MethodDelete, "/systemusers/"+userID, nil) if err != nil { return err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - if jm.appMetrics != nil { - jm.appMetrics.IDPMetrics().CountRequestStatusError() - } - return fmt.Errorf("unable to delete user, statusCode %d", resp.StatusCode) - } if jm.appMetrics != nil { jm.appMetrics.IDPMetrics().CountDeleteUser() @@ -247,11 +301,11 @@ func (jm *JumpCloudManager) DeleteUser(_ context.Context, userID string) error { } // parseJumpCloudUser parse JumpCloud system user returned from API V1 to UserData. -func parseJumpCloudUser(user v1.Systemuserreturn) *UserData { +func parseJumpCloudUser(user jumpCloudUser) *UserData { names := []string{user.Firstname, user.Middlename, user.Lastname} return &UserData{ Email: user.Email, Name: strings.Join(names, " "), - ID: user.Id, + ID: user.ID, } } diff --git a/management/server/idp/jumpcloud_test.go b/management/server/idp/jumpcloud_test.go index 1bfdcefcc..dc7a9cb6c 100644 --- a/management/server/idp/jumpcloud_test.go +++ b/management/server/idp/jumpcloud_test.go @@ -1,8 +1,15 @@ package idp import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/management/server/telemetry" @@ -44,3 +51,212 @@ func TestNewJumpCloudManager(t *testing.T) { }) } } + +func TestJumpCloudGetUserDataByID(t *testing.T) { + userResponse := jumpCloudUser{ + ID: "user123", + Email: "test@example.com", + Firstname: "John", + Middlename: "", + Lastname: "Doe", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/systemusers/user123", r.URL.Path) + assert.Equal(t, http.MethodGet, r.Method) + assert.Equal(t, "test-api-key", r.Header.Get("x-api-key")) + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(userResponse) + })) + defer server.Close() + + manager := newTestJumpCloudManager(t, server.URL) + + userData, err := manager.GetUserDataByID(context.Background(), "user123", AppMetadata{WTAccountID: "acc1"}) + require.NoError(t, err) + + assert.Equal(t, "user123", userData.ID) + assert.Equal(t, "test@example.com", userData.Email) + assert.Equal(t, "John Doe", userData.Name) + assert.Equal(t, "acc1", userData.AppMetadata.WTAccountID) +} + +func TestJumpCloudGetAccount(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/search/systemusers", r.URL.Path) + assert.Equal(t, http.MethodPost, r.Method) + + var reqBody map[string]any + assert.NoError(t, json.NewDecoder(r.Body).Decode(&reqBody)) + assert.Contains(t, reqBody, "limit") + assert.Contains(t, reqBody, "skip") + + resp := jumpCloudUserList{ + Results: []jumpCloudUser{ + {ID: "u1", Email: "a@test.com", Firstname: "Alice", Lastname: "Smith"}, + {ID: "u2", Email: "b@test.com", Firstname: "Bob", Lastname: "Jones"}, + }, + TotalCount: 2, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + })) + defer server.Close() + + manager := newTestJumpCloudManager(t, server.URL) + + users, err := manager.GetAccount(context.Background(), "testAccount") + require.NoError(t, err) + assert.Len(t, users, 2) + assert.Equal(t, "testAccount", users[0].AppMetadata.WTAccountID) + assert.Equal(t, "testAccount", users[1].AppMetadata.WTAccountID) +} + +func TestJumpCloudGetAllAccounts(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + resp := jumpCloudUserList{ + Results: []jumpCloudUser{ + {ID: "u1", Email: "a@test.com", Firstname: "Alice"}, + {ID: "u2", Email: "b@test.com", Firstname: "Bob"}, + }, + TotalCount: 2, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + })) + defer server.Close() + + manager := newTestJumpCloudManager(t, server.URL) + + indexedUsers, err := manager.GetAllAccounts(context.Background()) + require.NoError(t, err) + assert.Len(t, indexedUsers[UnsetAccountID], 2) +} + +func TestJumpCloudGetAllAccountsPagination(t *testing.T) { + totalUsers := 250 + allUsers := make([]jumpCloudUser, totalUsers) + for i := range allUsers { + allUsers[i] = jumpCloudUser{ + ID: fmt.Sprintf("u%d", i), + Email: fmt.Sprintf("user%d@test.com", i), + Firstname: fmt.Sprintf("User%d", i), + } + } + + requestCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var reqBody map[string]int + assert.NoError(t, json.NewDecoder(r.Body).Decode(&reqBody)) + + limit := reqBody["limit"] + skip := reqBody["skip"] + requestCount++ + + end := skip + limit + if end > totalUsers { + end = totalUsers + } + + resp := jumpCloudUserList{ + Results: allUsers[skip:end], + TotalCount: totalUsers, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + })) + defer server.Close() + + manager := newTestJumpCloudManager(t, server.URL) + + indexedUsers, err := manager.GetAllAccounts(context.Background()) + require.NoError(t, err) + assert.Len(t, indexedUsers[UnsetAccountID], totalUsers) + assert.Equal(t, 3, requestCount, "should require 3 pages for 250 users at page size 100") +} + +func TestJumpCloudGetUserByEmail(t *testing.T) { + searchResponse := jumpCloudUserList{ + Results: []jumpCloudUser{ + {ID: "u1", Email: "alice@test.com", Firstname: "Alice", Lastname: "Smith"}, + }, + TotalCount: 1, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/search/systemusers", r.URL.Path) + assert.Equal(t, http.MethodPost, r.Method) + + body, err := io.ReadAll(r.Body) + assert.NoError(t, err) + assert.Contains(t, string(body), "alice@test.com") + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(searchResponse) + })) + defer server.Close() + + manager := newTestJumpCloudManager(t, server.URL) + + users, err := manager.GetUserByEmail(context.Background(), "alice@test.com") + require.NoError(t, err) + assert.Len(t, users, 1) + assert.Equal(t, "alice@test.com", users[0].Email) +} + +func TestJumpCloudDeleteUser(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/systemusers/user123", r.URL.Path) + assert.Equal(t, http.MethodDelete, r.Method) + assert.Equal(t, "test-api-key", r.Header.Get("x-api-key")) + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]string{"_id": "user123"}) + })) + defer server.Close() + + manager := newTestJumpCloudManager(t, server.URL) + + err := manager.DeleteUser(context.Background(), "user123") + require.NoError(t, err) +} + +func TestJumpCloudAPIError(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer server.Close() + + manager := newTestJumpCloudManager(t, server.URL) + + _, err := manager.GetUserDataByID(context.Background(), "user123", AppMetadata{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "401") +} + +func TestParseJumpCloudUser(t *testing.T) { + user := jumpCloudUser{ + ID: "abc123", + Email: "test@example.com", + Firstname: "John", + Middlename: "M", + Lastname: "Doe", + } + + userData := parseJumpCloudUser(user) + assert.Equal(t, "abc123", userData.ID) + assert.Equal(t, "test@example.com", userData.Email) + assert.Equal(t, "John M Doe", userData.Name) +} + +func newTestJumpCloudManager(t *testing.T, apiBase string) *JumpCloudManager { + t.Helper() + return &JumpCloudManager{ + apiBase: apiBase, + apiToken: "test-api-key", + httpClient: http.DefaultClient, + helper: JsonParser{}, + appMetrics: nil, + } +} From 0fc63ea0baf9fc71bab876ce250e6b5f85e84731 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 24 Mar 2026 23:18:21 +0800 Subject: [PATCH 237/374] [management] Allow multiple header auths with same header name (#5678) --- .../modules/reverseproxy/service/service.go | 7 +- .../reverseproxy/service/service_test.go | 104 ++++++++++++++++++ proxy/internal/auth/middleware_test.go | 68 ++++++++++++ 3 files changed, 173 insertions(+), 6 deletions(-) diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index be04777a1..7ca2c3043 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -850,7 +850,7 @@ func IsPortBasedProtocol(mode string) bool { } const ( - maxCustomHeaders = 16 + maxCustomHeaders = 16 maxHeaderKeyLen = 128 maxHeaderValueLen = 4096 ) @@ -947,7 +947,6 @@ func containsCRLF(s string) bool { } func validateHeaderAuths(headers []*HeaderAuthConfig) error { - seen := make(map[string]struct{}) for i, h := range headers { if h == nil || !h.Enabled { continue @@ -968,10 +967,6 @@ func validateHeaderAuths(headers []*HeaderAuthConfig) error { if canonical == "Host" { return fmt.Errorf("header_auths[%d]: Host header cannot be used for auth", i) } - if _, dup := seen[canonical]; dup { - return fmt.Errorf("header_auths[%d]: duplicate header %q (same canonical form already configured)", i, h.Header) - } - seen[canonical] = struct{}{} if len(h.Value) > maxHeaderValueLen { return fmt.Errorf("header_auths[%d]: value exceeds maximum length of %d", i, maxHeaderValueLen) } diff --git a/management/internals/modules/reverseproxy/service/service_test.go b/management/internals/modules/reverseproxy/service/service_test.go index 3fe07b1d0..ff54cb79f 100644 --- a/management/internals/modules/reverseproxy/service/service_test.go +++ b/management/internals/modules/reverseproxy/service/service_test.go @@ -935,3 +935,107 @@ func TestExposeServiceRequest_Validate_HTTPAllowsAuth(t *testing.T) { req := ExposeServiceRequest{Port: 8080, Mode: "http", Pin: "123456"} require.NoError(t, req.Validate()) } + +func TestValidate_HeaderAuths(t *testing.T) { + t.Run("single valid header", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "X-API-Key", Value: "secret"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("multiple headers same canonical name allowed", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Authorization", Value: "Bearer token-1"}, + {Enabled: true, Header: "Authorization", Value: "Bearer token-2"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("multiple headers different case same canonical allowed", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "x-api-key", Value: "key-1"}, + {Enabled: true, Header: "X-Api-Key", Value: "key-2"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("multiple different headers allowed", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Authorization", Value: "Bearer tok"}, + {Enabled: true, Header: "X-API-Key", Value: "key"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("empty header name rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "", Value: "val"}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "header name is required") + }) + + t.Run("hop-by-hop header rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Connection", Value: "val"}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "hop-by-hop") + }) + + t.Run("host header rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Host", Value: "val"}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "Host header cannot be used") + }) + + t.Run("disabled entries skipped", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: false, Header: "", Value: ""}, + {Enabled: true, Header: "X-Key", Value: "val"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("value too long rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "X-Key", Value: strings.Repeat("a", maxHeaderValueLen+1)}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "exceeds maximum length") + }) +} diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go index a4924d380..6063f070e 100644 --- a/proxy/internal/auth/middleware_test.go +++ b/proxy/internal/auth/middleware_test.go @@ -932,3 +932,71 @@ func TestProtect_HeaderAuth_SubsequentRequestUsesSessionCookie(t *testing.T) { assert.Equal(t, "header-user", capturedData2.GetUserID()) assert.Equal(t, "header", capturedData2.GetAuthMethod()) } + +// TestProtect_HeaderAuth_MultipleValuesSameHeader verifies that the proxy +// correctly handles multiple valid credentials for the same header name. +// In production, the mgmt gRPC authenticateHeader iterates all configured +// header auths and accepts if any hash matches (OR semantics). The proxy +// creates one Header scheme per entry, but a single gRPC call checks all. +func TestProtect_HeaderAuth_MultipleValuesSameHeader(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + // Mock simulates mgmt behavior: accepts either token-a or token-b. + accepted := map[string]bool{"Bearer token-a": true, "Bearer token-b": true} + mock := &mockAuthenticator{fn: func(_ context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { + ha := req.GetHeaderAuth() + if ha != nil && accepted[ha.GetHeaderValue()] { + token, err := sessionkey.SignToken(kp.PrivateKey, "header-user", "example.com", auth.MethodHeader, time.Hour) + require.NoError(t, err) + return &proto.AuthenticateResponse{Success: true, SessionToken: token}, nil + } + return &proto.AuthenticateResponse{Success: false}, nil + }} + + // Single Header scheme (as if one entry existed), but the mock checks both values. + hdr := NewHeader(mock, "svc1", "acc1", "Authorization") + require.NoError(t, mw.AddDomain("example.com", []Scheme{hdr}, kp.PublicKey, time.Hour, "acc1", "svc1", nil)) + + var backendCalled bool + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + })) + + t.Run("first value accepted", func(t *testing.T) { + backendCalled = false + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("Authorization", "Bearer token-a") + req = req.WithContext(proxy.WithCapturedData(req.Context(), proxy.NewCapturedData(""))) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.True(t, backendCalled, "first token should be accepted") + }) + + t.Run("second value accepted", func(t *testing.T) { + backendCalled = false + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("Authorization", "Bearer token-b") + req = req.WithContext(proxy.WithCapturedData(req.Context(), proxy.NewCapturedData(""))) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.True(t, backendCalled, "second token should be accepted") + }) + + t.Run("unknown value rejected", func(t *testing.T) { + backendCalled = false + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("Authorization", "Bearer token-c") + req = req.WithContext(proxy.WithCapturedData(req.Context(), proxy.NewCapturedData(""))) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code) + assert.False(t, backendCalled, "unknown token should be rejected") + }) +} From 0af5a0441fd4e80f289c3246440c07f8a9dcc129 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 24 Mar 2026 20:25:29 +0300 Subject: [PATCH 238/374] [management] Fix DNS label uniqueness check on peer rename (#5679) --- management/server/peer.go | 2 +- management/server/peer_test.go | 68 ++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/management/server/peer.go b/management/server/peer.go index 78ecbfcae..f7cb6a0f1 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -249,7 +249,7 @@ func (am *DefaultAccountManager) UpdatePeer(ctx context.Context, accountID, user if err != nil { newLabel = "" } else { - _, err := transaction.GetPeerIdByLabel(ctx, store.LockingStrengthNone, accountID, update.Name) + _, err := transaction.GetPeerIdByLabel(ctx, store.LockingStrengthNone, accountID, newLabel) if err == nil { newLabel = "" } diff --git a/management/server/peer_test.go b/management/server/peer_test.go index b17757ffd..51c16d730 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -37,6 +37,7 @@ import ( "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" + "github.com/netbirdio/netbird/shared/auth" "github.com/netbirdio/netbird/shared/management/status" "github.com/netbirdio/netbird/management/server/util" @@ -2738,3 +2739,70 @@ func TestProcessPeerAddAuth(t *testing.T) { assert.Empty(t, config.GroupsToAdd) }) } + +func TestUpdatePeer_DnsLabelCollisionWithFQDN(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err, "unable to create account manager") + + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err, "unable to create an account") + + // Add first peer with hostname that produces DNS label "netbird1" + key1, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer1, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key1.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "netbird1.netbird.cloud"}, + }, false) + require.NoError(t, err, "unable to add first peer") + assert.Equal(t, "netbird1", peer1.DNSLabel) + + // Add second peer with a different hostname + key2, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer2, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key2.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "ip-10-29-5-130"}, + }, false) + require.NoError(t, err) + + update := peer2.Copy() + update.Name = "netbird1.demo.netbird.cloud" + updated, err := manager.UpdatePeer(context.Background(), accountID, userID, update) + require.NoError(t, err, "renaming peer should not fail with duplicate DNS label error") + assert.Equal(t, "netbird1.demo.netbird.cloud", updated.Name) + assert.NotEqual(t, "netbird1", updated.DNSLabel, "DNS label should not collide with existing peer") + assert.Contains(t, updated.DNSLabel, "netbird1-", "DNS label should be IP-based fallback") +} + +func TestUpdatePeer_DnsLabelUniqueName(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err, "unable to create account manager") + + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err, "unable to create an account") + + key1, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer1, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key1.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "web-server"}, + }, false) + require.NoError(t, err) + assert.Equal(t, "web-server", peer1.DNSLabel) + + // Add second peer and rename it to a unique FQDN whose first label doesn't collide + key2, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer2, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key2.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "old-name"}, + }, false) + require.NoError(t, err) + + update := peer2.Copy() + update.Name = "api-server.example.com" + updated, err := manager.UpdatePeer(context.Background(), accountID, userID, update) + require.NoError(t, err, "renaming to unique FQDN should succeed") + assert.Equal(t, "api-server", updated.DNSLabel, "DNS label should be first label of FQDN") +} From 9aaa05e8eab86df349d73cb2ba408648f600dcc2 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 25 Mar 2026 15:51:29 +0800 Subject: [PATCH 239/374] Replace discontinued LocalStack image with MinIO in S3 test (#5680) --- go.mod | 2 +- upload-server/server/s3_test.go | 78 ++++++++++++++++++--------------- 2 files changed, 43 insertions(+), 37 deletions(-) diff --git a/go.mod b/go.mod index 3661d6fe0..50e0ae3b3 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/awnumar/memguard v0.23.0 github.com/aws/aws-sdk-go-v2 v1.36.3 github.com/aws/aws-sdk-go-v2/config v1.29.14 + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2 github.com/c-robinson/iplib v1.0.3 github.com/caddyserver/certmagic v0.21.3 @@ -143,7 +144,6 @@ require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/awnumar/memcall v0.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect diff --git a/upload-server/server/s3_test.go b/upload-server/server/s3_test.go index 26b0ecd09..7ab1bb379 100644 --- a/upload-server/server/s3_test.go +++ b/upload-server/server/s3_test.go @@ -5,13 +5,12 @@ import ( "encoding/json" "net/http" "net/http/httptest" - "os" "runtime" "testing" "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" @@ -20,45 +19,55 @@ import ( ) func Test_S3HandlerGetUploadURL(t *testing.T) { - if runtime.GOOS != "linux" && os.Getenv("CI") == "true" { - t.Skip("Skipping test on non-Linux and CI environment due to docker dependency") - } - if runtime.GOOS == "windows" { - t.Skip("Skipping test on Windows due to potential docker dependency") + if runtime.GOOS != "linux" { + t.Skip("Skipping test on non-Linux due to docker dependency") } - awsEndpoint := "http://127.0.0.1:4566" awsRegion := "us-east-1" ctx := context.Background() - containerRequest := testcontainers.ContainerRequest{ - Image: "localstack/localstack:s3-latest", - ExposedPorts: []string{"4566:4566/tcp"}, - WaitingFor: wait.ForLog("Ready"), - } - c, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ - ContainerRequest: containerRequest, - Started: true, + ContainerRequest: testcontainers.ContainerRequest{ + Image: "minio/minio:RELEASE.2025-04-22T22-12-26Z", + ExposedPorts: []string{"9000/tcp"}, + Env: map[string]string{ + "MINIO_ROOT_USER": "minioadmin", + "MINIO_ROOT_PASSWORD": "minioadmin", + }, + Cmd: []string{"server", "/data"}, + WaitingFor: wait.ForHTTP("/minio/health/ready").WithPort("9000"), + }, + Started: true, }) - if err != nil { - t.Error(err) - } - defer func(c testcontainers.Container, ctx context.Context) { + require.NoError(t, err) + t.Cleanup(func() { if err := c.Terminate(ctx); err != nil { t.Log(err) } - }(c, ctx) + }) + + mappedPort, err := c.MappedPort(ctx, "9000") + require.NoError(t, err) + + hostIP, err := c.Host(ctx) + require.NoError(t, err) + + awsEndpoint := "http://" + hostIP + ":" + mappedPort.Port() t.Setenv("AWS_REGION", awsRegion) t.Setenv("AWS_ENDPOINT_URL", awsEndpoint) - t.Setenv("AWS_ACCESS_KEY_ID", "test") - t.Setenv("AWS_SECRET_ACCESS_KEY", "test") + t.Setenv("AWS_ACCESS_KEY_ID", "minioadmin") + t.Setenv("AWS_SECRET_ACCESS_KEY", "minioadmin") + t.Setenv("AWS_CONFIG_FILE", "") + t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "") + t.Setenv("AWS_PROFILE", "") - cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(awsRegion), config.WithBaseEndpoint(awsEndpoint)) - if err != nil { - t.Error(err) - } + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(awsRegion), + config.WithBaseEndpoint(awsEndpoint), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("minioadmin", "minioadmin", "")), + ) + require.NoError(t, err) client := s3.NewFromConfig(cfg, func(o *s3.Options) { o.UsePathStyle = true @@ -66,19 +75,16 @@ func Test_S3HandlerGetUploadURL(t *testing.T) { }) bucketName := "test" - if _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ + _, err = client.CreateBucket(ctx, &s3.CreateBucketInput{ Bucket: &bucketName, - }); err != nil { - t.Error(err) - } + }) + require.NoError(t, err) list, err := client.ListBuckets(ctx, &s3.ListBucketsInput{}) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - assert.Equal(t, len(list.Buckets), 1) - assert.Equal(t, *list.Buckets[0].Name, bucketName) + require.Len(t, list.Buckets, 1) + require.Equal(t, bucketName, *list.Buckets[0].Name) t.Setenv(bucketVar, bucketName) From 6ff6d84646b15976472c8a69c45d930a2cba7c07 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 26 Mar 2026 16:49:02 +0800 Subject: [PATCH 240/374] [client] Bump go-m1cpu to v0.2.1 to fix segfault on macOS 26 / M5 chips (#5701) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 50e0ae3b3..d99a14df3 100644 --- a/go.mod +++ b/go.mod @@ -254,7 +254,7 @@ require ( github.com/russellhaering/goxmldsig v1.5.0 // indirect github.com/rymdport/portal v0.4.2 // indirect github.com/shirou/gopsutil/v4 v4.25.1 // indirect - github.com/shoenig/go-m1cpu v0.2.0 // indirect + github.com/shoenig/go-m1cpu v0.2.1 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c // indirect diff --git a/go.sum b/go.sum index 57e07dd7b..3624898c7 100644 --- a/go.sum +++ b/go.sum @@ -511,8 +511,8 @@ github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKd github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs= github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/go-m1cpu v0.2.0 h1:t4GNqvPZ84Vjtpboo/kT3pIkbaK3vc+JIlD/Wz1zSFY= -github.com/shoenig/go-m1cpu v0.2.0/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w= +github.com/shoenig/go-m1cpu v0.2.1 h1:yqRB4fvOge2+FyRXFkXqsyMoqPazv14Yyy+iyccT2E4= +github.com/shoenig/go-m1cpu v0.2.1/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= From a8b95707009e16f808d442d2b741b580dc8e4593 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 26 Mar 2026 16:50:43 +0800 Subject: [PATCH 241/374] [client] Enable RPM package signature verification in install script (#5676) --- release_files/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release_files/install.sh b/release_files/install.sh index 6a2c5f458..1e71936f3 100755 --- a/release_files/install.sh +++ b/release_files/install.sh @@ -128,7 +128,7 @@ cat <<-EOF | ${SUDO} tee /etc/yum.repos.d/netbird.repo name=NetBird baseurl=https://pkgs.netbird.io/yum/ enabled=1 -gpgcheck=0 +gpgcheck=1 gpgkey=https://pkgs.netbird.io/yum/repodata/repomd.xml.key repo_gpgcheck=1 EOF From 145d82f322e9dfddf2fca981fea3c06b6a3bfeff Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 26 Mar 2026 18:11:05 +0800 Subject: [PATCH 242/374] [client] Replace iOS DNS IsPrivate heuristic with route manager check (#5694) --- client/internal/dns/mock_server.go | 5 +++++ client/internal/dns/server.go | 13 +++++++++++++ client/internal/dns/upstream.go | 1 + client/internal/dns/upstream_ios.go | 8 +++++--- client/internal/engine.go | 11 +++++++++++ 5 files changed, 35 insertions(+), 3 deletions(-) diff --git a/client/internal/dns/mock_server.go b/client/internal/dns/mock_server.go index fe160e20a..1df57d1db 100644 --- a/client/internal/dns/mock_server.go +++ b/client/internal/dns/mock_server.go @@ -85,6 +85,11 @@ func (m *MockServer) PopulateManagementDomain(mgmtURL *url.URL) error { return nil } +// SetRouteChecker mock implementation of SetRouteChecker from Server interface +func (m *MockServer) SetRouteChecker(func(netip.Addr) bool) { + // Mock implementation - no-op +} + // BeginBatch mock implementation of BeginBatch from Server interface func (m *MockServer) BeginBatch() { // Mock implementation - no-op diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 6ca4f7957..3c47f4ee6 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -57,6 +57,7 @@ type Server interface { ProbeAvailability() UpdateServerConfig(domains dnsconfig.ServerDomains) error PopulateManagementDomain(mgmtURL *url.URL) error + SetRouteChecker(func(netip.Addr) bool) } type nsGroupsByDomain struct { @@ -104,6 +105,7 @@ type DefaultServer struct { statusRecorder *peer.Status stateManager *statemanager.Manager + routeMatch func(netip.Addr) bool probeMu sync.Mutex probeCancel context.CancelFunc @@ -229,6 +231,14 @@ func newDefaultServer( return defaultServer } +// SetRouteChecker sets the function used by upstream resolvers to determine +// whether an IP is routed through the tunnel. +func (s *DefaultServer) SetRouteChecker(f func(netip.Addr) bool) { + s.mux.Lock() + defer s.mux.Unlock() + s.routeMatch = f +} + // RegisterHandler registers a handler for the given domains with the given priority. // Any previously registered handler for the same domain and priority will be replaced. func (s *DefaultServer) RegisterHandler(domains domain.List, handler dns.Handler, priority int) { @@ -743,6 +753,7 @@ func (s *DefaultServer) registerFallback(config HostDNSConfig) { log.Errorf("failed to create upstream resolver for original nameservers: %v", err) return } + handler.routeMatch = s.routeMatch for _, ns := range originalNameservers { if ns == config.ServerIP { @@ -852,6 +863,7 @@ func (s *DefaultServer) createHandlersForDomainGroup(domainGroup nsGroupsByDomai if err != nil { return nil, fmt.Errorf("create upstream resolver: %v", err) } + handler.routeMatch = s.routeMatch for _, ns := range nsGroup.NameServers { if ns.NSType != nbdns.UDPNameServerType { @@ -1036,6 +1048,7 @@ func (s *DefaultServer) addHostRootZone() { log.Errorf("unable to create a new upstream resolver, error: %v", err) return } + handler.routeMatch = s.routeMatch handler.upstreamServers = maps.Keys(hostDNSServers) handler.deactivate = func(error) {} diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 18128a942..5b8135132 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -70,6 +70,7 @@ type upstreamResolverBase struct { deactivate func(error) reactivate func() statusRecorder *peer.Status + routeMatch func(netip.Addr) bool } type upstreamFailure struct { diff --git a/client/internal/dns/upstream_ios.go b/client/internal/dns/upstream_ios.go index 4d053a5a1..02c11173b 100644 --- a/client/internal/dns/upstream_ios.go +++ b/client/internal/dns/upstream_ios.go @@ -65,11 +65,13 @@ func (u *upstreamResolverIOS) exchange(ctx context.Context, upstream string, r * } else { upstreamIP = upstreamIP.Unmap() } - if u.lNet.Contains(upstreamIP) || upstreamIP.IsPrivate() { - log.Debugf("using private client to query upstream: %s", upstream) + needsPrivate := u.lNet.Contains(upstreamIP) || + (u.routeMatch != nil && u.routeMatch(upstreamIP)) + if needsPrivate { + log.Debugf("using private client to query %s via upstream %s", r.Question[0].Name, upstream) client, err = GetClientPrivate(u.lIP, u.interfaceName, timeout) if err != nil { - return nil, 0, fmt.Errorf("error while creating private client: %s", err) + return nil, 0, fmt.Errorf("create private client: %s", err) } } diff --git a/client/internal/engine.go b/client/internal/engine.go index ea1d3bec9..7b100bd0c 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -499,6 +499,17 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.routeManager.SetRouteChangeListener(e.mobileDep.NetworkChangeListener) + e.dnsServer.SetRouteChecker(func(ip netip.Addr) bool { + for _, routes := range e.routeManager.GetClientRoutes() { + for _, r := range routes { + if r.Network.Contains(ip) { + return true + } + } + } + return false + }) + if err = e.wgInterfaceCreate(); err != nil { log.Errorf("failed creating tunnel interface %s: [%s]", e.config.WgIfaceName, err.Error()) e.close() From 7be8752a00f20b1d24e7fb5052c1e8e01c2d462f Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Thu, 26 Mar 2026 18:26:33 +0300 Subject: [PATCH 243/374] [management] Add notification endpoints (#5590) --- management/server/store/sql_store.go | 58 +++++ management/server/store/store.go | 2 + management/server/store/store_mock.go | 15 ++ shared/management/http/api/openapi.yml | 290 ++++++++++++++++++++++++ shared/management/http/api/types.gen.go | 226 ++++++++++++++++++ 5 files changed, 591 insertions(+) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 2e499dc74..cf030f51e 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -5494,3 +5494,61 @@ func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration t return nil } + +// GetRoutingPeerNetworks returns the distinct network names where the peer is assigned as a routing peer +// in an enabled network router, either directly or via peer groups. +func (s *SqlStore) GetRoutingPeerNetworks(_ context.Context, accountID, peerID string) ([]string, error) { + var routers []*routerTypes.NetworkRouter + if err := s.db.Select("peer, peer_groups, network_id").Where("account_id = ? AND enabled = true", accountID).Find(&routers).Error; err != nil { + return nil, status.Errorf(status.Internal, "failed to get enabled routers: %v", err) + } + + if len(routers) == 0 { + return nil, nil + } + + var groupPeers []types.GroupPeer + if err := s.db.Select("group_id").Where("account_id = ? AND peer_id = ?", accountID, peerID).Find(&groupPeers).Error; err != nil { + return nil, status.Errorf(status.Internal, "failed to get peer group memberships: %v", err) + } + + groupSet := make(map[string]struct{}, len(groupPeers)) + for _, gp := range groupPeers { + groupSet[gp.GroupID] = struct{}{} + } + + networkIDs := make(map[string]struct{}) + for _, r := range routers { + if r.Peer == peerID { + networkIDs[r.NetworkID] = struct{}{} + } else if r.Peer == "" { + for _, pg := range r.PeerGroups { + if _, ok := groupSet[pg]; ok { + networkIDs[r.NetworkID] = struct{}{} + break + } + } + } + } + + if len(networkIDs) == 0 { + return nil, nil + } + + ids := make([]string, 0, len(networkIDs)) + for id := range networkIDs { + ids = append(ids, id) + } + + var networks []*networkTypes.Network + if err := s.db.Select("name").Where("account_id = ? AND id IN ?", accountID, ids).Find(&networks).Error; err != nil { + return nil, status.Errorf(status.Internal, "failed to get networks: %v", err) + } + + names := make([]string, 0, len(networks)) + for _, n := range networks { + names = append(names, n.Name) + } + + return names, nil +} diff --git a/management/server/store/store.go b/management/server/store/store.go index 816dff4fa..d00dcde38 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -290,6 +290,8 @@ type Store interface { CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) + + GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) } const ( diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index d779a7bcd..235405861 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -2333,6 +2333,21 @@ func (mr *MockStoreMockRecorder) IncrementSetupKeyUsage(ctx, setupKeyID interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementSetupKeyUsage", reflect.TypeOf((*MockStore)(nil).IncrementSetupKeyUsage), ctx, setupKeyID) } +// GetRoutingPeerNetworks mocks base method. +func (m *MockStore) GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRoutingPeerNetworks", ctx, accountID, peerID) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRoutingPeerNetworks indicates an expected call of GetRoutingPeerNetworks. +func (mr *MockStoreMockRecorder) GetRoutingPeerNetworks(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoutingPeerNetworks", reflect.TypeOf((*MockStore)(nil).GetRoutingPeerNetworks), ctx, accountID, peerID) +} + // IsPrimaryAccount mocks base method. func (m *MockStore) IsPrimaryAccount(ctx context.Context, accountID string) (bool, string, error) { m.ctrl.T.Helper() diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 6b766731c..d81c371db 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -89,6 +89,10 @@ tags: - name: Event Streaming Integrations description: Manage event streaming integrations. x-cloud-only: true + - name: Notifications + description: Manage notification channels for account event alerts. + x-cloud-only: true + components: schemas: @@ -4385,6 +4389,123 @@ components: type: string description: The newly generated SCIM API token example: "nbs_F3f0d..." + NotificationChannelType: + type: string + description: The type of notification channel. + enum: + - email + - webhook + example: "email" + NotificationEventType: + type: string + description: | + An activity event type code. See `GET /api/integrations/notifications/types` for the full list + of supported event types and their human-readable descriptions. + example: "user.join" + EmailTarget: + type: object + description: Target configuration for email notification channels. + properties: + emails: + type: array + description: List of email addresses to send notifications to. + minItems: 1 + items: + type: string + format: email + example: [ "admin@example.com", "ops@example.com" ] + required: + - emails + WebhookTarget: + type: object + description: Target configuration for webhook notification channels. + properties: + url: + type: string + format: uri + description: The webhook endpoint URL to send notifications to. + example: "https://hooks.example.com/netbird" + headers: + type: object + additionalProperties: + type: string + description: | + Custom HTTP headers sent with each webhook request. + Values are write-only; in GET responses all values are masked. + example: + Authorization: "Bearer token" + X-Webhook-Secret: "secret" + required: + - url + NotificationChannelRequest: + type: object + description: Request body for creating or updating a notification channel. + properties: + type: + $ref: '#/components/schemas/NotificationChannelType' + target: + description: | + Channel-specific target configuration. The shape depends on the `type` field: + - `email`: requires an `EmailTarget` object + - `webhook`: requires a `WebhookTarget` object + oneOf: + - $ref: '#/components/schemas/EmailTarget' + - $ref: '#/components/schemas/WebhookTarget' + event_types: + type: array + description: List of activity event type codes this channel subscribes to. + items: + $ref: '#/components/schemas/NotificationEventType' + example: [ "user.join", "peer.user.add", "peer.login.expire" ] + enabled: + type: boolean + description: Whether this notification channel is active. + example: true + required: + - type + - event_types + - enabled + NotificationChannelResponse: + type: object + description: A notification channel configuration. + properties: + id: + type: string + description: Unique identifier of the notification channel. + readOnly: true + example: "ch8i4ug6lnn4g9hqv7m0" + type: + $ref: '#/components/schemas/NotificationChannelType' + target: + description: | + Channel-specific target configuration. The shape depends on the `type` field: + - `email`: an `EmailTarget` object + - `webhook`: a `WebhookTarget` object + oneOf: + - $ref: '#/components/schemas/EmailTarget' + - $ref: '#/components/schemas/WebhookTarget' + event_types: + type: array + description: List of activity event type codes this channel subscribes to. + items: + $ref: '#/components/schemas/NotificationEventType' + example: [ "user.join", "peer.user.add", "peer.login.expire" ] + enabled: + type: boolean + description: Whether this notification channel is active. + example: true + required: + - id + - type + - event_types + - enabled + NotificationTypeEntry: + type: object + description: A map of event type codes to their human-readable descriptions. + additionalProperties: + type: string + example: + user.join: "User joined" BypassResponse: type: object description: Response for bypassed peer operations. @@ -10062,3 +10183,172 @@ paths: "$ref": "#/components/responses/not_found" '500': "$ref": "#/components/responses/internal_error" + /api/integrations/notifications/types: + get: + tags: + - Notifications + summary: List Notification Event Types + description: | + Returns a map of all supported activity event type codes to their + human-readable descriptions. Use these codes when configuring + `event_types` on notification channels. + operationId: listNotificationEventTypes + responses: + '200': + description: A map of event type codes to descriptions. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationTypeEntry' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/integrations/notifications/channels: + get: + tags: + - Notifications + summary: List Notification Channels + description: Retrieves all notification channels configured for the authenticated account. + operationId: listNotificationChannels + responses: + '200': + description: A list of notification channels. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + post: + tags: + - Notifications + summary: Create Notification Channel + description: | + Creates a new notification channel for the authenticated account. + Supported channel types are `email` and `webhook`. + operationId: createNotificationChannel + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelRequest' + responses: + '200': + description: Notification channel created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/integrations/notifications/channels/{channelId}: + parameters: + - name: channelId + in: path + required: true + description: The unique identifier of the notification channel. + schema: + type: string + example: "ch8i4ug6lnn4g9hqv7m0" + get: + tags: + - Notifications + summary: Get Notification Channel + description: Retrieves a specific notification channel by its ID. + operationId: getNotificationChannel + responses: + '200': + description: Successfully retrieved the notification channel. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + put: + tags: + - Notifications + summary: Update Notification Channel + description: Updates an existing notification channel. + operationId: updateNotificationChannel + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelRequest' + responses: + '200': + description: Notification channel updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + delete: + tags: + - Notifications + summary: Delete Notification Channel + description: Deletes a notification channel by its ID. + operationId: deleteNotificationChannel + responses: + '200': + description: Notification channel deleted successfully. + content: + application/json: + schema: + type: object + example: { } + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 067cc4093..19d2706e1 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -9,6 +9,7 @@ import ( "time" "github.com/oapi-codegen/runtime" + openapi_types "github.com/oapi-codegen/runtime/types" ) const ( @@ -664,6 +665,24 @@ func (e NetworkResourceType) Valid() bool { } } +// Defines values for NotificationChannelType. +const ( + NotificationChannelTypeEmail NotificationChannelType = "email" + NotificationChannelTypeWebhook NotificationChannelType = "webhook" +) + +// Valid indicates whether the value is a known member of the NotificationChannelType enum. +func (e NotificationChannelType) Valid() bool { + switch e { + case NotificationChannelTypeEmail: + return true + case NotificationChannelTypeWebhook: + return true + default: + return false + } +} + // Defines values for PeerNetworkRangeCheckAction. const ( PeerNetworkRangeCheckActionAllow PeerNetworkRangeCheckAction = "allow" @@ -1893,6 +1912,12 @@ type EDRSentinelOneResponse struct { UpdatedAt time.Time `json:"updated_at"` } +// EmailTarget Target configuration for email notification channels. +type EmailTarget struct { + // Emails List of email addresses to send notifications to. + Emails []openapi_types.Email `json:"emails"` +} + // ErrorResponse Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided. type ErrorResponse struct { // Message A human-readable error message. @@ -2666,6 +2691,67 @@ type NetworkTrafficUser struct { Name string `json:"name"` } +// NotificationChannelRequest Request body for creating or updating a notification channel. +type NotificationChannelRequest struct { + // Enabled Whether this notification channel is active. + Enabled bool `json:"enabled"` + + // EventTypes List of activity event type codes this channel subscribes to. + EventTypes []NotificationEventType `json:"event_types"` + + // Target Channel-specific target configuration. The shape depends on the `type` field: + // - `email`: requires an `EmailTarget` object + // - `webhook`: requires a `WebhookTarget` object + Target *NotificationChannelRequest_Target `json:"target,omitempty"` + + // Type The type of notification channel. + Type NotificationChannelType `json:"type"` +} + +// NotificationChannelRequest_Target Channel-specific target configuration. The shape depends on the `type` field: +// - `email`: requires an `EmailTarget` object +// - `webhook`: requires a `WebhookTarget` object +type NotificationChannelRequest_Target struct { + union json.RawMessage +} + +// NotificationChannelResponse A notification channel configuration. +type NotificationChannelResponse struct { + // Enabled Whether this notification channel is active. + Enabled bool `json:"enabled"` + + // EventTypes List of activity event type codes this channel subscribes to. + EventTypes []NotificationEventType `json:"event_types"` + + // Id Unique identifier of the notification channel. + Id *string `json:"id,omitempty"` + + // Target Channel-specific target configuration. The shape depends on the `type` field: + // - `email`: an `EmailTarget` object + // - `webhook`: a `WebhookTarget` object + Target *NotificationChannelResponse_Target `json:"target,omitempty"` + + // Type The type of notification channel. + Type NotificationChannelType `json:"type"` +} + +// NotificationChannelResponse_Target Channel-specific target configuration. The shape depends on the `type` field: +// - `email`: an `EmailTarget` object +// - `webhook`: a `WebhookTarget` object +type NotificationChannelResponse_Target struct { + union json.RawMessage +} + +// NotificationChannelType The type of notification channel. +type NotificationChannelType string + +// NotificationEventType An activity event type code. See `GET /api/integrations/notifications/types` for the full list +// of supported event types and their human-readable descriptions. +type NotificationEventType = string + +// NotificationTypeEntry A map of event type codes to their human-readable descriptions. +type NotificationTypeEntry map[string]string + // OSVersionCheck Posture check for the version of operating system type OSVersionCheck struct { // Android Posture check for the version of operating system @@ -4211,6 +4297,16 @@ type UserRequest struct { Role string `json:"role"` } +// WebhookTarget Target configuration for webhook notification channels. +type WebhookTarget struct { + // Headers Custom HTTP headers sent with each webhook request. + // Values are write-only; in GET responses all values are masked. + Headers *map[string]string `json:"headers,omitempty"` + + // Url The webhook endpoint URL to send notifications to. + Url string `json:"url"` +} + // WorkloadRequest defines model for WorkloadRequest. type WorkloadRequest struct { union json.RawMessage @@ -4564,6 +4660,12 @@ type PostApiIntegrationsMspTenantsIdSubscriptionJSONRequestBody PostApiIntegrati // PostApiIntegrationsMspTenantsIdUnlinkJSONRequestBody defines body for PostApiIntegrationsMspTenantsIdUnlink for application/json ContentType. type PostApiIntegrationsMspTenantsIdUnlinkJSONRequestBody PostApiIntegrationsMspTenantsIdUnlinkJSONBody +// CreateNotificationChannelJSONRequestBody defines body for CreateNotificationChannel for application/json ContentType. +type CreateNotificationChannelJSONRequestBody = NotificationChannelRequest + +// UpdateNotificationChannelJSONRequestBody defines body for UpdateNotificationChannel for application/json ContentType. +type UpdateNotificationChannelJSONRequestBody = NotificationChannelRequest + // CreateSCIMIntegrationJSONRequestBody defines body for CreateSCIMIntegration for application/json ContentType. type CreateSCIMIntegrationJSONRequestBody = CreateScimIntegrationRequest @@ -4660,6 +4762,130 @@ type PutApiUsersUserIdPasswordJSONRequestBody = PasswordChangeRequest // PostApiUsersUserIdTokensJSONRequestBody defines body for PostApiUsersUserIdTokens for application/json ContentType. type PostApiUsersUserIdTokensJSONRequestBody = PersonalAccessTokenRequest +// AsEmailTarget returns the union data inside the NotificationChannelRequest_Target as a EmailTarget +func (t NotificationChannelRequest_Target) AsEmailTarget() (EmailTarget, error) { + var body EmailTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromEmailTarget overwrites any union data inside the NotificationChannelRequest_Target as the provided EmailTarget +func (t *NotificationChannelRequest_Target) FromEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeEmailTarget performs a merge with any union data inside the NotificationChannelRequest_Target, using the provided EmailTarget +func (t *NotificationChannelRequest_Target) MergeEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +// AsWebhookTarget returns the union data inside the NotificationChannelRequest_Target as a WebhookTarget +func (t NotificationChannelRequest_Target) AsWebhookTarget() (WebhookTarget, error) { + var body WebhookTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromWebhookTarget overwrites any union data inside the NotificationChannelRequest_Target as the provided WebhookTarget +func (t *NotificationChannelRequest_Target) FromWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeWebhookTarget performs a merge with any union data inside the NotificationChannelRequest_Target, using the provided WebhookTarget +func (t *NotificationChannelRequest_Target) MergeWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t NotificationChannelRequest_Target) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *NotificationChannelRequest_Target) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} + +// AsEmailTarget returns the union data inside the NotificationChannelResponse_Target as a EmailTarget +func (t NotificationChannelResponse_Target) AsEmailTarget() (EmailTarget, error) { + var body EmailTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromEmailTarget overwrites any union data inside the NotificationChannelResponse_Target as the provided EmailTarget +func (t *NotificationChannelResponse_Target) FromEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeEmailTarget performs a merge with any union data inside the NotificationChannelResponse_Target, using the provided EmailTarget +func (t *NotificationChannelResponse_Target) MergeEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +// AsWebhookTarget returns the union data inside the NotificationChannelResponse_Target as a WebhookTarget +func (t NotificationChannelResponse_Target) AsWebhookTarget() (WebhookTarget, error) { + var body WebhookTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromWebhookTarget overwrites any union data inside the NotificationChannelResponse_Target as the provided WebhookTarget +func (t *NotificationChannelResponse_Target) FromWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeWebhookTarget performs a merge with any union data inside the NotificationChannelResponse_Target, using the provided WebhookTarget +func (t *NotificationChannelResponse_Target) MergeWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t NotificationChannelResponse_Target) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *NotificationChannelResponse_Target) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} + // AsBundleWorkloadRequest returns the union data inside the WorkloadRequest as a BundleWorkloadRequest func (t WorkloadRequest) AsBundleWorkloadRequest() (BundleWorkloadRequest, error) { var body BundleWorkloadRequest From 7e1cce4b9fdd5dee75588d8b77ff2c97848ebccf Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 26 Mar 2026 16:59:08 +0100 Subject: [PATCH 244/374] [management] add terminated field to service (#5700) --- .../modules/reverseproxy/service/manager/manager.go | 10 +++++++--- .../internals/modules/reverseproxy/service/service.go | 7 +++++-- shared/management/http/api/openapi.yml | 5 +++++ shared/management/http/api/types.gen.go | 3 +++ 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index 808fdaf60..ea4fa9d1e 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -519,9 +519,13 @@ func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.St return err } - if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { - return err - } + if existingService.Terminated { + return status.Errorf(status.PermissionDenied, "service is terminated and cannot be updated") + } + + if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { + return err + } updateInfo.oldCluster = existingService.ProxyCluster updateInfo.domainChanged = existingService.Domain != service.Domain diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index 7ca2c3043..d956013ea 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -184,6 +184,7 @@ type Service struct { ProxyCluster string `gorm:"index"` Targets []*Target `gorm:"foreignKey:ServiceID;constraint:OnDelete:CASCADE"` Enabled bool + Terminated bool PassHostHeader bool RewriteRedirects bool Auth AuthConfig `gorm:"serializer:json"` @@ -256,7 +257,7 @@ func (s *Service) ToAPIResponse() *api.Service { Protocol: api.ServiceTargetProtocol(target.Protocol), TargetId: target.TargetId, TargetType: api.ServiceTargetTargetType(target.TargetType), - Enabled: target.Enabled, + Enabled: target.Enabled && !s.Terminated, } opts := targetOptionsToAPI(target.Options) if opts == nil { @@ -286,7 +287,8 @@ func (s *Service) ToAPIResponse() *api.Service { Name: s.Name, Domain: s.Domain, Targets: apiTargets, - Enabled: s.Enabled, + Enabled: s.Enabled && !s.Terminated, + Terminated: &s.Terminated, PassHostHeader: &s.PassHostHeader, RewriteRedirects: &s.RewriteRedirects, Auth: authConfig, @@ -1125,6 +1127,7 @@ func (s *Service) Copy() *Service { ProxyCluster: s.ProxyCluster, Targets: targets, Enabled: s.Enabled, + Terminated: s.Terminated, PassHostHeader: s.PassHostHeader, RewriteRedirects: s.RewriteRedirects, Auth: authCopy, diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index d81c371db..519d3ca12 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -2999,6 +2999,11 @@ components: type: boolean description: Whether the service is enabled example: true + terminated: + type: boolean + description: Whether the service has been terminated. Terminated services cannot be updated. Services that violate the Terms of Service will be terminated. + readOnly: true + example: false pass_host_header: type: boolean description: When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 19d2706e1..84ee125b1 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -3718,6 +3718,9 @@ type Service struct { // Targets List of target backends for this service Targets []ServiceTarget `json:"targets"` + + // Terminated Whether the service has been terminated. Terminated services cannot be updated. Services that violate the Terms of Service will be terminated. + Terminated *bool `json:"terminated,omitempty"` } // ServiceMode Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. From ec96c5ecafc26726cde9e7e8a034924e68b92189 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 26 Mar 2026 16:59:49 +0100 Subject: [PATCH 245/374] [management] Extend blackbox tests (#5699) --- .../accounts_handler_integration_test.go | 238 +++ .../dns_handler_integration_test.go | 554 +++++++ .../events_handler_integration_test.go | 105 ++ .../groups_handler_integration_test.go | 382 +++++ .../networks_handler_integration_test.go | 1434 +++++++++++++++++ .../peers_handler_integration_test.go | 605 +++++++ .../policies_handler_integration_test.go | 488 ++++++ .../routes_handler_integration_test.go | 455 ++++++ .../setupkeys_handler_integration_test.go | 72 +- .../users_handler_integration_test.go | 701 ++++++++ .../server/http/testing/testdata/accounts.sql | 18 + .../server/http/testing/testdata/dns.sql | 21 + .../server/http/testing/testdata/events.sql | 18 + .../server/http/testing/testdata/groups.sql | 19 + .../server/http/testing/testdata/networks.sql | 25 + .../testing/testdata/peers_integration.sql | 20 + .../server/http/testing/testdata/policies.sql | 23 + .../server/http/testing/testdata/routes.sql | 23 + .../testing/testdata/users_integration.sql | 24 + .../testing/testing_tools/channel/channel.go | 116 +- .../http/testing/testing_tools/db_verify.go | 222 +++ 21 files changed, 5525 insertions(+), 38 deletions(-) create mode 100644 management/server/http/testing/integration/accounts_handler_integration_test.go create mode 100644 management/server/http/testing/integration/dns_handler_integration_test.go create mode 100644 management/server/http/testing/integration/events_handler_integration_test.go create mode 100644 management/server/http/testing/integration/groups_handler_integration_test.go create mode 100644 management/server/http/testing/integration/networks_handler_integration_test.go create mode 100644 management/server/http/testing/integration/peers_handler_integration_test.go create mode 100644 management/server/http/testing/integration/policies_handler_integration_test.go create mode 100644 management/server/http/testing/integration/routes_handler_integration_test.go create mode 100644 management/server/http/testing/integration/users_handler_integration_test.go create mode 100644 management/server/http/testing/testdata/accounts.sql create mode 100644 management/server/http/testing/testdata/dns.sql create mode 100644 management/server/http/testing/testdata/events.sql create mode 100644 management/server/http/testing/testdata/groups.sql create mode 100644 management/server/http/testing/testdata/networks.sql create mode 100644 management/server/http/testing/testdata/peers_integration.sql create mode 100644 management/server/http/testing/testdata/policies.sql create mode 100644 management/server/http/testing/testdata/routes.sql create mode 100644 management/server/http/testing/testdata/users_integration.sql create mode 100644 management/server/http/testing/testing_tools/db_verify.go diff --git a/management/server/http/testing/integration/accounts_handler_integration_test.go b/management/server/http/testing/integration/accounts_handler_integration_test.go new file mode 100644 index 000000000..511730ee5 --- /dev/null +++ b/management/server/http/testing/integration/accounts_handler_integration_test.go @@ -0,0 +1,238 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Accounts_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all accounts", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/accounts.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/accounts", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Account{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + account := got[0] + assert.Equal(t, "test.com", account.Domain) + assert.Equal(t, "private", account.DomainCategory) + assert.Equal(t, true, account.Settings.PeerLoginExpirationEnabled) + assert.Equal(t, 86400, account.Settings.PeerLoginExpiration) + assert.Equal(t, false, account.Settings.RegularUsersViewBlocked) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Accounts_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + trueVal := true + falseVal := false + + tt := []struct { + name string + expectedStatus int + requestBody *api.AccountRequest + verifyResponse func(t *testing.T, account *api.Account) + verifyDB func(t *testing.T, account *types.Account) + }{ + { + name: "Disable peer login expiration", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: false, + PeerLoginExpiration: 86400, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.Equal(t, false, account.Settings.PeerLoginExpirationEnabled) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, false, dbAccount.Settings.PeerLoginExpirationEnabled) + }, + }, + { + name: "Update peer login expiration to 48h", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 172800, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.Equal(t, 172800, account.Settings.PeerLoginExpiration) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, 172800*time.Second, dbAccount.Settings.PeerLoginExpiration) + }, + }, + { + name: "Enable regular users view blocked", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 86400, + RegularUsersViewBlocked: true, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.Equal(t, true, account.Settings.RegularUsersViewBlocked) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, true, dbAccount.Settings.RegularUsersViewBlocked) + }, + }, + { + name: "Enable groups propagation", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 86400, + GroupsPropagationEnabled: &trueVal, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.NotNil(t, account.Settings.GroupsPropagationEnabled) + assert.Equal(t, true, *account.Settings.GroupsPropagationEnabled) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, true, dbAccount.Settings.GroupsPropagationEnabled) + }, + }, + { + name: "Enable JWT groups", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 86400, + GroupsPropagationEnabled: &falseVal, + JwtGroupsEnabled: &trueVal, + JwtGroupsClaimName: stringPointer("groups"), + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.NotNil(t, account.Settings.JwtGroupsEnabled) + assert.Equal(t, true, *account.Settings.JwtGroupsEnabled) + assert.NotNil(t, account.Settings.JwtGroupsClaimName) + assert.Equal(t, "groups", *account.Settings.JwtGroupsClaimName) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, true, dbAccount.Settings.JWTGroupsEnabled) + assert.Equal(t, "groups", dbAccount.Settings.JWTGroupsClaimName) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/accounts.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/accounts/{accountId}", "{accountId}", testing_tools.TestAccountId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + got := &api.Account{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, testing_tools.TestAccountId, got.Id) + assert.Equal(t, "test.com", got.Domain) + tc.verifyResponse(t, got) + + db := testing_tools.GetDB(t, am.GetStore()) + dbAccount := testing_tools.VerifyAccountSettings(t, db) + tc.verifyDB(t, dbAccount) + }) + } + } +} + +func stringPointer(s string) *string { + return &s +} diff --git a/management/server/http/testing/integration/dns_handler_integration_test.go b/management/server/http/testing/integration/dns_handler_integration_test.go new file mode 100644 index 000000000..7ada5e462 --- /dev/null +++ b/management/server/http/testing/integration/dns_handler_integration_test.go @@ -0,0 +1,554 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Nameservers_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all nameservers", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/dns/nameservers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.NameserverGroup{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testNSGroup", got[0].Name) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Nameservers_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + nsGroupId string + expectedStatus int + expectGroup bool + }{ + { + name: "Get existing nameserver group", + nsGroupId: "testNSGroupId", + expectedStatus: http.StatusOK, + expectGroup: true, + }, + { + name: "Get non-existing nameserver group", + nsGroupId: "nonExistingNSGroupId", + expectedStatus: http.StatusNotFound, + expectGroup: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/dns/nameservers/{nsgroupId}", "{nsgroupId}", tc.nsGroupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectGroup { + got := &api.NameserverGroup{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, "testNSGroupId", got.Id) + assert.Equal(t, "testNSGroup", got.Name) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Nameservers_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.PostApiDnsNameserversJSONRequestBody + expectedStatus int + verifyResponse func(t *testing.T, nsGroup *api.NameserverGroup) + }{ + { + name: "Create nameserver group with single NS", + requestBody: &api.PostApiDnsNameserversJSONRequestBody{ + Name: "newNSGroup", + Description: "a new nameserver group", + Nameservers: []api.Nameserver{ + {Ip: "8.8.8.8", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: false, + Domains: []string{"test.com"}, + Enabled: true, + SearchDomainsEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, nsGroup *api.NameserverGroup) { + t.Helper() + assert.NotEmpty(t, nsGroup.Id) + assert.Equal(t, "newNSGroup", nsGroup.Name) + assert.Equal(t, 1, len(nsGroup.Nameservers)) + assert.Equal(t, false, nsGroup.Primary) + }, + }, + { + name: "Create primary nameserver group", + requestBody: &api.PostApiDnsNameserversJSONRequestBody{ + Name: "primaryNS", + Description: "primary nameserver", + Nameservers: []api.Nameserver{ + {Ip: "1.1.1.1", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: true, + Domains: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, nsGroup *api.NameserverGroup) { + t.Helper() + assert.Equal(t, true, nsGroup.Primary) + }, + }, + { + name: "Create nameserver group with empty groups", + requestBody: &api.PostApiDnsNameserversJSONRequestBody{ + Name: "emptyGroupsNS", + Description: "no groups", + Nameservers: []api.Nameserver{ + {Ip: "8.8.8.8", NsType: "udp", Port: 53}, + }, + Groups: []string{}, + Primary: true, + Domains: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/dns/nameservers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NameserverGroup{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify the created NS group directly in the DB + db := testing_tools.GetDB(t, am.GetStore()) + dbNS := testing_tools.VerifyNSGroupInDB(t, db, got.Id) + assert.Equal(t, got.Name, dbNS.Name) + assert.Equal(t, got.Primary, dbNS.Primary) + assert.Equal(t, len(got.Nameservers), len(dbNS.NameServers)) + assert.Equal(t, got.Enabled, dbNS.Enabled) + assert.Equal(t, got.SearchDomainsEnabled, dbNS.SearchDomainsEnabled) + } + }) + } + } +} + +func Test_Nameservers_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + nsGroupId string + requestBody *api.PutApiDnsNameserversNsgroupIdJSONRequestBody + expectedStatus int + verifyResponse func(t *testing.T, nsGroup *api.NameserverGroup) + }{ + { + name: "Update nameserver group name", + nsGroupId: "testNSGroupId", + requestBody: &api.PutApiDnsNameserversNsgroupIdJSONRequestBody{ + Name: "updatedNSGroup", + Description: "updated description", + Nameservers: []api.Nameserver{ + {Ip: "1.1.1.1", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: false, + Domains: []string{"example.com"}, + Enabled: true, + SearchDomainsEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, nsGroup *api.NameserverGroup) { + t.Helper() + assert.Equal(t, "updatedNSGroup", nsGroup.Name) + assert.Equal(t, "updated description", nsGroup.Description) + }, + }, + { + name: "Update non-existing nameserver group", + nsGroupId: "nonExistingNSGroupId", + requestBody: &api.PutApiDnsNameserversNsgroupIdJSONRequestBody{ + Name: "whatever", + Nameservers: []api.Nameserver{ + {Ip: "1.1.1.1", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: true, + Domains: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/dns/nameservers/{nsgroupId}", "{nsgroupId}", tc.nsGroupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NameserverGroup{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify the updated NS group directly in the DB + db := testing_tools.GetDB(t, am.GetStore()) + dbNS := testing_tools.VerifyNSGroupInDB(t, db, tc.nsGroupId) + assert.Equal(t, "updatedNSGroup", dbNS.Name) + assert.Equal(t, "updated description", dbNS.Description) + assert.Equal(t, false, dbNS.Primary) + assert.Equal(t, true, dbNS.Enabled) + assert.Equal(t, 1, len(dbNS.NameServers)) + assert.Equal(t, false, dbNS.SearchDomainsEnabled) + } + }) + } + } +} + +func Test_Nameservers_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + nsGroupId string + expectedStatus int + }{ + { + name: "Delete existing nameserver group", + nsGroupId: "testNSGroupId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing nameserver group", + nsGroupId: "nonExistingNSGroupId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/dns/nameservers/{nsgroupId}", "{nsgroupId}", tc.nsGroupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify deletion in DB for successful deletes by privileged users + if tc.expectedStatus == http.StatusOK && user.expectResponse { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyNSGroupNotInDB(t, db, tc.nsGroupId) + } + }) + } + } +} + +func Test_DnsSettings_Get(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get DNS settings", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/dns/settings", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := &api.DNSSettings{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.NotNil(t, got.DisabledManagementGroups) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_DnsSettings_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.PutApiDnsSettingsJSONRequestBody + expectedStatus int + verifyResponse func(t *testing.T, settings *api.DNSSettings) + expectedDBDisabledMgmtLen int + expectedDBDisabledMgmtItem string + }{ + { + name: "Update disabled management groups", + requestBody: &api.PutApiDnsSettingsJSONRequestBody{ + DisabledManagementGroups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, settings *api.DNSSettings) { + t.Helper() + assert.Equal(t, 1, len(settings.DisabledManagementGroups)) + assert.Equal(t, testing_tools.TestGroupId, settings.DisabledManagementGroups[0]) + }, + expectedDBDisabledMgmtLen: 1, + expectedDBDisabledMgmtItem: testing_tools.TestGroupId, + }, + { + name: "Update with empty disabled management groups", + requestBody: &api.PutApiDnsSettingsJSONRequestBody{ + DisabledManagementGroups: []string{}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, settings *api.DNSSettings) { + t.Helper() + assert.Equal(t, 0, len(settings.DisabledManagementGroups)) + }, + expectedDBDisabledMgmtLen: 0, + }, + { + name: "Update with non-existing group", + requestBody: &api.PutApiDnsSettingsJSONRequestBody{ + DisabledManagementGroups: []string{"nonExistingGroupId"}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, "/api/dns/settings", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.DNSSettings{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify DNS settings directly in the DB + db := testing_tools.GetDB(t, am.GetStore()) + dbAccount := testing_tools.VerifyAccountSettings(t, db) + assert.Equal(t, tc.expectedDBDisabledMgmtLen, len(dbAccount.DNSSettings.DisabledManagementGroups)) + if tc.expectedDBDisabledMgmtItem != "" { + assert.Contains(t, dbAccount.DNSSettings.DisabledManagementGroups, tc.expectedDBDisabledMgmtItem) + } + } + }) + } + } +} diff --git a/management/server/http/testing/integration/events_handler_integration_test.go b/management/server/http/testing/integration/events_handler_integration_test.go new file mode 100644 index 000000000..6611b60ee --- /dev/null +++ b/management/server/http/testing/integration/events_handler_integration_test.go @@ -0,0 +1,105 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Events_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all events", func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/events.sql", nil, false) + + // First, perform a mutation to generate an event (create a group as admin) + groupBody, err := json.Marshal(&api.GroupRequest{Name: "eventTestGroup"}) + if err != nil { + t.Fatalf("Failed to marshal group request: %v", err) + } + createReq := testing_tools.BuildRequest(t, groupBody, http.MethodPost, "/api/groups", testing_tools.TestAdminId) + createRecorder := httptest.NewRecorder() + apiHandler.ServeHTTP(createRecorder, createReq) + assert.Equal(t, http.StatusOK, createRecorder.Code, "Failed to create group to generate event") + + // Now query events + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/events", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Event{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1, "Expected at least one event after creating a group") + + // Verify the group creation event exists + found := false + for _, event := range got { + if event.ActivityCode == "group.add" { + found = true + assert.Equal(t, testing_tools.TestAdminId, event.InitiatorId) + assert.Equal(t, "Group created", event.Activity) + break + } + } + assert.True(t, found, "Expected to find a group.add event") + }) + } +} + +func Test_Events_GetAll_Empty(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/events.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/events", testing_tools.TestAdminId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + if !expectResponse { + return + } + + got := []api.Event{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 0, len(got), "Expected empty events list when no mutations have been performed") + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } +} diff --git a/management/server/http/testing/integration/groups_handler_integration_test.go b/management/server/http/testing/integration/groups_handler_integration_test.go new file mode 100644 index 000000000..edb43f3f3 --- /dev/null +++ b/management/server/http/testing/integration/groups_handler_integration_test.go @@ -0,0 +1,382 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Groups_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all groups", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/groups", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Group{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 2) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Groups_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + groupId string + expectedStatus int + expectGroup bool + }{ + { + name: "Get existing group", + groupId: testing_tools.TestGroupId, + expectedStatus: http.StatusOK, + expectGroup: true, + }, + { + name: "Get non-existing group", + groupId: "nonExistingGroupId", + expectedStatus: http.StatusNotFound, + expectGroup: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/groups/{groupId}", "{groupId}", tc.groupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectGroup { + got := &api.Group{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.groupId, got.Id) + assert.Equal(t, "testGroupName", got.Name) + assert.Equal(t, 1, got.PeersCount) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Groups_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.GroupRequest + expectedStatus int + verifyResponse func(t *testing.T, group *api.Group) + }{ + { + name: "Create group with valid name", + requestBody: &api.GroupRequest{ + Name: "brandNewGroup", + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.NotEmpty(t, group.Id) + assert.Equal(t, "brandNewGroup", group.Name) + assert.Equal(t, 0, group.PeersCount) + }, + }, + { + name: "Create group with peers", + requestBody: &api.GroupRequest{ + Name: "groupWithPeers", + Peers: &[]string{testing_tools.TestPeerId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.NotEmpty(t, group.Id) + assert.Equal(t, "groupWithPeers", group.Name) + assert.Equal(t, 1, group.PeersCount) + }, + }, + { + name: "Create group with empty name", + requestBody: &api.GroupRequest{ + Name: "", + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/groups", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Group{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify group exists in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbGroup := testing_tools.VerifyGroupInDB(t, db, got.Id) + assert.Equal(t, tc.requestBody.Name, dbGroup.Name) + } + }) + } + } +} + +func Test_Groups_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + groupId string + requestBody *api.GroupRequest + expectedStatus int + verifyResponse func(t *testing.T, group *api.Group) + }{ + { + name: "Update group name", + groupId: testing_tools.TestGroupId, + requestBody: &api.GroupRequest{ + Name: "updatedGroupName", + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.Equal(t, testing_tools.TestGroupId, group.Id) + assert.Equal(t, "updatedGroupName", group.Name) + }, + }, + { + name: "Update group peers", + groupId: testing_tools.TestGroupId, + requestBody: &api.GroupRequest{ + Name: "testGroupName", + Peers: &[]string{}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.Equal(t, 0, group.PeersCount) + }, + }, + { + name: "Update with empty name", + groupId: testing_tools.TestGroupId, + requestBody: &api.GroupRequest{ + Name: "", + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Update non-existing group", + groupId: "nonExistingGroupId", + requestBody: &api.GroupRequest{ + Name: "someName", + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/groups/{groupId}", "{groupId}", tc.groupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Group{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated group in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbGroup := testing_tools.VerifyGroupInDB(t, db, tc.groupId) + assert.Equal(t, tc.requestBody.Name, dbGroup.Name) + } + }) + } + } +} + +func Test_Groups_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + groupId string + expectedStatus int + }{ + { + name: "Delete existing group not in use", + groupId: testing_tools.NewGroupId, + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing group", + groupId: "nonExistingGroupId", + expectedStatus: http.StatusBadRequest, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/groups/{groupId}", "{groupId}", tc.groupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyGroupNotInDB(t, db, tc.groupId) + } + }) + } + } +} diff --git a/management/server/http/testing/integration/networks_handler_integration_test.go b/management/server/http/testing/integration/networks_handler_integration_test.go new file mode 100644 index 000000000..4cb6b268b --- /dev/null +++ b/management/server/http/testing/integration/networks_handler_integration_test.go @@ -0,0 +1,1434 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Networks_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all networks", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.Network{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testNetworkId", got[0].Id) + assert.Equal(t, "testNetwork", got[0].Name) + assert.Equal(t, "test network description", *got[0].Description) + assert.GreaterOrEqual(t, len(got[0].Routers), 1) + assert.GreaterOrEqual(t, len(got[0].Resources), 1) + assert.GreaterOrEqual(t, got[0].RoutingPeersCount, 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Networks_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + expectedStatus int + expectNetwork bool + }{ + { + name: "Get existing network", + networkId: "testNetworkId", + expectedStatus: http.StatusOK, + expectNetwork: true, + }, + { + name: "Get non-existing network", + networkId: "nonExistingNetworkId", + expectedStatus: http.StatusNotFound, + expectNetwork: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/networks/{networkId}", "{networkId}", tc.networkId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectNetwork { + got := &api.Network{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.networkId, got.Id) + assert.Equal(t, "testNetwork", got.Name) + assert.Equal(t, "test network description", *got.Description) + assert.GreaterOrEqual(t, len(got.Routers), 1) + assert.GreaterOrEqual(t, len(got.Resources), 1) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Networks_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + desc := "new network description" + + tt := []struct { + name string + requestBody *api.NetworkRequest + expectedStatus int + verifyResponse func(t *testing.T, network *api.Network) + }{ + { + name: "Create network with name and description", + requestBody: &api.NetworkRequest{ + Name: "newNetwork", + Description: &desc, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, network *api.Network) { + t.Helper() + assert.NotEmpty(t, network.Id) + assert.Equal(t, "newNetwork", network.Name) + assert.Equal(t, "new network description", *network.Description) + assert.Empty(t, network.Routers) + assert.Empty(t, network.Resources) + assert.Equal(t, 0, network.RoutingPeersCount) + }, + }, + { + name: "Create network with name only", + requestBody: &api.NetworkRequest{ + Name: "simpleNetwork", + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, network *api.Network) { + t.Helper() + assert.NotEmpty(t, network.Id) + assert.Equal(t, "simpleNetwork", network.Name) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/networks", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Network{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_Networks_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + updatedDesc := "updated description" + + tt := []struct { + name string + networkId string + requestBody *api.NetworkRequest + expectedStatus int + verifyResponse func(t *testing.T, network *api.Network) + }{ + { + name: "Update network name", + networkId: "testNetworkId", + requestBody: &api.NetworkRequest{ + Name: "updatedNetwork", + Description: &updatedDesc, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, network *api.Network) { + t.Helper() + assert.Equal(t, "testNetworkId", network.Id) + assert.Equal(t, "updatedNetwork", network.Name) + assert.Equal(t, "updated description", *network.Description) + }, + }, + { + name: "Update non-existing network", + networkId: "nonExistingNetworkId", + requestBody: &api.NetworkRequest{ + Name: "whatever", + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/networks/{networkId}", "{networkId}", tc.networkId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Network{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_Networks_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + expectedStatus int + }{ + { + name: "Delete existing network", + networkId: "testNetworkId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing network", + networkId: "nonExistingNetworkId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/networks/{networkId}", "{networkId}", tc.networkId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + }) + } + } +} + +func Test_Networks_Delete_Cascades(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + // Delete the network + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, "/api/networks/testNetworkId", testing_tools.TestAdminId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + + // Verify network is gone + req = testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId", testing_tools.TestAdminId) + recorder = httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + testing_tools.ReadResponse(t, recorder, http.StatusNotFound, true) + + // Verify routers in that network are gone + req = testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/routers", testing_tools.TestAdminId) + recorder = httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + content, _ := testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + var routers []*api.NetworkRouter + require.NoError(t, json.Unmarshal(content, &routers)) + assert.Empty(t, routers) + + // Verify resources in that network are gone + req = testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/resources", testing_tools.TestAdminId) + recorder = httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + content, _ = testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + var resources []*api.NetworkResource + require.NoError(t, json.Unmarshal(content, &resources)) + assert.Empty(t, resources) +} + +func Test_NetworkResources_GetAllInNetwork(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all resources in network", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/resources", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkResource{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testResourceId", got[0].Id) + assert.Equal(t, "testResource", got[0].Name) + assert.Equal(t, api.NetworkResourceType("host"), got[0].Type) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkResources_GetAllInAccount(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all resources in account", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/resources", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkResource{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkResources_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + resourceId string + expectedStatus int + expectResource bool + }{ + { + name: "Get existing resource", + networkId: "testNetworkId", + resourceId: "testResourceId", + expectedStatus: http.StatusOK, + expectResource: true, + }, + { + name: "Get non-existing resource", + networkId: "testNetworkId", + resourceId: "nonExistingResourceId", + expectedStatus: http.StatusNotFound, + expectResource: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + path := fmt.Sprintf("/api/networks/%s/resources/%s", tc.networkId, tc.resourceId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectResource { + got := &api.NetworkResource{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.resourceId, got.Id) + assert.Equal(t, "testResource", got.Name) + assert.Equal(t, api.NetworkResourceType("host"), got.Type) + assert.Equal(t, "3.3.3.3/32", got.Address) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_NetworkResources_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + desc := "new resource" + + tt := []struct { + name string + networkId string + requestBody *api.NetworkResourceRequest + expectedStatus int + verifyResponse func(t *testing.T, resource *api.NetworkResource) + }{ + { + name: "Create host resource with IP", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "hostResource", + Description: &desc, + Address: "1.1.1.1", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.NotEmpty(t, resource.Id) + assert.Equal(t, "hostResource", resource.Name) + assert.Equal(t, api.NetworkResourceType("host"), resource.Type) + assert.Equal(t, "1.1.1.1/32", resource.Address) + assert.True(t, resource.Enabled) + }, + }, + { + name: "Create host resource with CIDR /32", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "hostCIDR", + Address: "10.0.0.1/32", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("host"), resource.Type) + assert.Equal(t, "10.0.0.1/32", resource.Address) + }, + }, + { + name: "Create subnet resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "subnetResource", + Address: "192.168.0.0/24", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("subnet"), resource.Type) + assert.Equal(t, "192.168.0.0/24", resource.Address) + }, + }, + { + name: "Create domain resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "domainResource", + Address: "example.com", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("domain"), resource.Type) + assert.Equal(t, "example.com", resource.Address) + }, + }, + { + name: "Create wildcard domain resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "wildcardDomain", + Address: "*.example.com", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("domain"), resource.Type) + assert.Equal(t, "*.example.com", resource.Address) + }, + }, + { + name: "Create disabled resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "disabledResource", + Address: "5.5.5.5", + Groups: []string{testing_tools.TestGroupId}, + Enabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.False(t, resource.Enabled) + }, + }, + { + name: "Create resource with invalid address", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "invalidResource", + Address: "not-a-valid-address!!!", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusInternalServerError, + }, + { + name: "Create resource with empty groups", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "noGroupsResource", + Address: "7.7.7.7", + Groups: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.NotEmpty(t, resource.Id) + }, + }, + { + name: "Create resource with duplicate name", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "testResource", + Address: "8.8.8.8", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/resources", tc.networkId) + req := testing_tools.BuildRequest(t, body, http.MethodPost, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkResource{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkResources_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + updatedDesc := "updated resource" + + tt := []struct { + name string + networkId string + resourceId string + requestBody *api.NetworkResourceRequest + expectedStatus int + verifyResponse func(t *testing.T, resource *api.NetworkResource) + }{ + { + name: "Update resource name and address", + networkId: "testNetworkId", + resourceId: "testResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "updatedResource", + Description: &updatedDesc, + Address: "4.4.4.4", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, "testResourceId", resource.Id) + assert.Equal(t, "updatedResource", resource.Name) + assert.Equal(t, "updated resource", *resource.Description) + assert.Equal(t, "4.4.4.4/32", resource.Address) + }, + }, + { + name: "Update resource to subnet type", + networkId: "testNetworkId", + resourceId: "testResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "testResource", + Address: "10.0.0.0/16", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("subnet"), resource.Type) + assert.Equal(t, "10.0.0.0/16", resource.Address) + }, + }, + { + name: "Update resource to domain type", + networkId: "testNetworkId", + resourceId: "testResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "testResource", + Address: "myservice.example.com", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("domain"), resource.Type) + assert.Equal(t, "myservice.example.com", resource.Address) + }, + }, + { + name: "Update non-existing resource", + networkId: "testNetworkId", + resourceId: "nonExistingResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "whatever", + Address: "1.2.3.4", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/resources/%s", tc.networkId, tc.resourceId) + req := testing_tools.BuildRequest(t, body, http.MethodPut, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkResource{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkResources_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + resourceId string + expectedStatus int + }{ + { + name: "Delete existing resource", + networkId: "testNetworkId", + resourceId: "testResourceId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing resource", + networkId: "testNetworkId", + resourceId: "nonExistingResourceId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + path := fmt.Sprintf("/api/networks/%s/resources/%s", tc.networkId, tc.resourceId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + }) + } + } +} + +func Test_NetworkRouters_GetAllInNetwork(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all routers in network", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/routers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkRouter{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testRouterId", got[0].Id) + assert.Equal(t, "testPeerId", *got[0].Peer) + assert.True(t, got[0].Masquerade) + assert.Equal(t, 100, got[0].Metric) + assert.True(t, got[0].Enabled) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkRouters_GetAllInAccount(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all routers in account", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/routers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkRouter{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkRouters_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + routerId string + expectedStatus int + expectRouter bool + }{ + { + name: "Get existing router", + networkId: "testNetworkId", + routerId: "testRouterId", + expectedStatus: http.StatusOK, + expectRouter: true, + }, + { + name: "Get non-existing router", + networkId: "testNetworkId", + routerId: "nonExistingRouterId", + expectedStatus: http.StatusNotFound, + expectRouter: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + path := fmt.Sprintf("/api/networks/%s/routers/%s", tc.networkId, tc.routerId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectRouter { + got := &api.NetworkRouter{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.routerId, got.Id) + assert.Equal(t, "testPeerId", *got.Peer) + assert.True(t, got.Masquerade) + assert.Equal(t, 100, got.Metric) + assert.True(t, got.Enabled) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_NetworkRouters_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + peerID := "testPeerId" + peerGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + networkId string + requestBody *api.NetworkRouterRequest + expectedStatus int + verifyResponse func(t *testing.T, router *api.NetworkRouter) + }{ + { + name: "Create router with peer", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 200, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotEmpty(t, router.Id) + assert.Equal(t, peerID, *router.Peer) + assert.True(t, router.Masquerade) + assert.Equal(t, 200, router.Metric) + assert.True(t, router.Enabled) + }, + }, + { + name: "Create router with peer groups", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + PeerGroups: &peerGroups, + Masquerade: false, + Metric: 300, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotEmpty(t, router.Id) + assert.NotNil(t, router.PeerGroups) + assert.Equal(t, 1, len(*router.PeerGroups)) + assert.False(t, router.Masquerade) + assert.Equal(t, 300, router.Metric) + assert.True(t, router.Enabled) // always true on creation + }, + }, + { + name: "Create router with both peer and peer_groups", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + PeerGroups: &peerGroups, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotEmpty(t, router.Id) + assert.Equal(t, peerID, *router.Peer) + assert.Equal(t, 1, len(*router.PeerGroups)) + }, + }, + { + name: "Create router in non-existing network", + networkId: "nonExistingNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusNotFound, + }, + { + name: "Create router enabled is always true", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: false, + Metric: 50, + Enabled: false, // handler sets to true + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.True(t, router.Enabled) // always true on creation + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/routers", tc.networkId) + req := testing_tools.BuildRequest(t, body, http.MethodPost, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkRouter{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkRouters_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + peerID := "testPeerId" + peerGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + networkId string + routerId string + requestBody *api.NetworkRouterRequest + expectedStatus int + verifyResponse func(t *testing.T, router *api.NetworkRouter) + }{ + { + name: "Update router metric and masquerade", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: false, + Metric: 500, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.Equal(t, "testRouterId", router.Id) + assert.False(t, router.Masquerade) + assert.Equal(t, 500, router.Metric) + }, + }, + { + name: "Update router to use peer groups", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + PeerGroups: &peerGroups, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotNil(t, router.PeerGroups) + assert.Equal(t, 1, len(*router.PeerGroups)) + }, + }, + { + name: "Update router disabled", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 100, + Enabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.False(t, router.Enabled) + }, + }, + { + name: "Update non-existing router creates it", + networkId: "testNetworkId", + routerId: "nonExistingRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.Equal(t, "nonExistingRouterId", router.Id) + }, + }, + { + name: "Update router with both peer and peer_groups", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + PeerGroups: &peerGroups, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.Equal(t, "testRouterId", router.Id) + assert.Equal(t, peerID, *router.Peer) + assert.Equal(t, 1, len(*router.PeerGroups)) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/routers/%s", tc.networkId, tc.routerId) + req := testing_tools.BuildRequest(t, body, http.MethodPut, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkRouter{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkRouters_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + routerId string + expectedStatus int + }{ + { + name: "Delete existing router", + networkId: "testNetworkId", + routerId: "testRouterId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing router", + networkId: "testNetworkId", + routerId: "nonExistingRouterId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + path := fmt.Sprintf("/api/networks/%s/routers/%s", tc.networkId, tc.routerId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + }) + } + } +} diff --git a/management/server/http/testing/integration/peers_handler_integration_test.go b/management/server/http/testing/integration/peers_handler_integration_test.go new file mode 100644 index 000000000..17a9e94a6 --- /dev/null +++ b/management/server/http/testing/integration/peers_handler_integration_test.go @@ -0,0 +1,605 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +const ( + testPeerId2 = "testPeerId2" +) + +func Test_Peers_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: true, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + for _, user := range users { + t.Run(user.name+" - Get all peers", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/peers", user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + var got []api.PeerBatch + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 2, "Expected at least 2 peers") + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Peers_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: true, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestType string + requestPath string + requestId string + verifyResponse func(t *testing.T, peer *api.Peer) + }{ + { + name: "Get existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, "test-peer-1", peer.Name) + assert.Equal(t, "test-host-1", peer.Hostname) + assert.Equal(t, "Debian GNU/Linux ", peer.Os) + assert.Equal(t, "0.12.0", peer.Version) + assert.Equal(t, false, peer.SshEnabled) + assert.Equal(t, true, peer.LoginExpirationEnabled) + }, + }, + { + name: "Get second existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}", + requestId: testPeerId2, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testPeerId2, peer.Id) + assert.Equal(t, "test-peer-2", peer.Name) + assert.Equal(t, "test-host-2", peer.Hostname) + assert.Equal(t, "Ubuntu ", peer.Os) + assert.Equal(t, true, peer.SshEnabled) + assert.Equal(t, false, peer.LoginExpirationEnabled) + assert.Equal(t, true, peer.Connected) + }, + }, + { + name: "Get non-existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}", + requestId: "nonExistingPeerId", + expectedStatus: http.StatusNotFound, + verifyResponse: nil, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Peer{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Peers_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: false, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestBody *api.PeerRequest + requestType string + requestPath string + requestId string + verifyResponse func(t *testing.T, peer *api.Peer) + }{ + { + name: "Update peer name", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + requestBody: &api.PeerRequest{ + Name: "updated-peer-name", + SshEnabled: false, + LoginExpirationEnabled: true, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, "updated-peer-name", peer.Name) + assert.Equal(t, false, peer.SshEnabled) + assert.Equal(t, true, peer.LoginExpirationEnabled) + }, + }, + { + name: "Enable SSH on peer", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + requestBody: &api.PeerRequest{ + Name: "test-peer-1", + SshEnabled: true, + LoginExpirationEnabled: true, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, "test-peer-1", peer.Name) + assert.Equal(t, true, peer.SshEnabled) + assert.Equal(t, true, peer.LoginExpirationEnabled) + }, + }, + { + name: "Disable login expiration on peer", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + requestBody: &api.PeerRequest{ + Name: "test-peer-1", + SshEnabled: false, + LoginExpirationEnabled: false, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, false, peer.LoginExpirationEnabled) + }, + }, + { + name: "Update non-existing peer", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: "nonExistingPeerId", + requestBody: &api.PeerRequest{ + Name: "updated-name", + SshEnabled: false, + LoginExpirationEnabled: false, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusNotFound, + verifyResponse: nil, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Peer{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated peer in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbPeer := testing_tools.VerifyPeerInDB(t, db, tc.requestId) + assert.Equal(t, tc.requestBody.Name, dbPeer.Name) + assert.Equal(t, tc.requestBody.SshEnabled, dbPeer.SSHEnabled) + assert.Equal(t, tc.requestBody.LoginExpirationEnabled, dbPeer.LoginExpirationEnabled) + assert.Equal(t, tc.requestBody.InactivityExpirationEnabled, dbPeer.InactivityExpirationEnabled) + } + }) + } + } +} + +func Test_Peers_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: false, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestType string + requestPath string + requestId string + }{ + { + name: "Delete existing peer", + requestType: http.MethodDelete, + requestPath: "/api/peers/{peerId}", + requestId: testPeerId2, + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing peer", + requestType: http.MethodDelete, + requestPath: "/api/peers/{peerId}", + requestId: "nonExistingPeerId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + // Verify peer is actually deleted in DB + if tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyPeerNotInDB(t, db, tc.requestId) + } + }) + } + } +} + +func Test_Peers_GetAccessiblePeers(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: false, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestType string + requestPath string + requestId string + }{ + { + name: "Get accessible peers for existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}/accessible-peers", + requestId: testing_tools.TestPeerId, + expectedStatus: http.StatusOK, + }, + { + name: "Get accessible peers for non-existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}/accessible-peers", + requestId: "nonExistingPeerId", + expectedStatus: http.StatusOK, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectedStatus == http.StatusOK { + var got []api.AccessiblePeer + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + // The accessible peers list should be a valid array (may be empty if no policies connect peers) + assert.NotNil(t, got, "Expected accessible peers to be a valid array") + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} diff --git a/management/server/http/testing/integration/policies_handler_integration_test.go b/management/server/http/testing/integration/policies_handler_integration_test.go new file mode 100644 index 000000000..6f3624fb5 --- /dev/null +++ b/management/server/http/testing/integration/policies_handler_integration_test.go @@ -0,0 +1,488 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Policies_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all policies", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/policies", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Policy{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testPolicy", got[0].Name) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Policies_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + policyId string + expectedStatus int + expectPolicy bool + }{ + { + name: "Get existing policy", + policyId: "testPolicyId", + expectedStatus: http.StatusOK, + expectPolicy: true, + }, + { + name: "Get non-existing policy", + policyId: "nonExistingPolicyId", + expectedStatus: http.StatusNotFound, + expectPolicy: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/policies/{policyId}", "{policyId}", tc.policyId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectPolicy { + got := &api.Policy{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.NotNil(t, got.Id) + assert.Equal(t, tc.policyId, *got.Id) + assert.Equal(t, "testPolicy", got.Name) + assert.Equal(t, true, got.Enabled) + assert.GreaterOrEqual(t, len(got.Rules), 1) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Policies_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + srcGroups := []string{testing_tools.TestGroupId} + dstGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + requestBody *api.PolicyCreate + expectedStatus int + verifyResponse func(t *testing.T, policy *api.Policy) + }{ + { + name: "Create policy with accept rule", + requestBody: &api.PolicyCreate{ + Name: "newPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "allowAll", + Enabled: true, + Action: "accept", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.NotNil(t, policy.Id) + assert.Equal(t, "newPolicy", policy.Name) + assert.Equal(t, true, policy.Enabled) + assert.Equal(t, 1, len(policy.Rules)) + assert.Equal(t, "allowAll", policy.Rules[0].Name) + }, + }, + { + name: "Create policy with drop rule", + requestBody: &api.PolicyCreate{ + Name: "dropPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "dropAll", + Enabled: true, + Action: "drop", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, "dropPolicy", policy.Name) + }, + }, + { + name: "Create policy with TCP rule and ports", + requestBody: &api.PolicyCreate{ + Name: "tcpPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "tcpRule", + Enabled: true, + Action: "accept", + Protocol: "tcp", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + Ports: &[]string{"80", "443"}, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, "tcpPolicy", policy.Name) + assert.NotNil(t, policy.Rules[0].Ports) + assert.Equal(t, 2, len(*policy.Rules[0].Ports)) + }, + }, + { + name: "Create policy with empty name", + requestBody: &api.PolicyCreate{ + Name: "", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "rule", + Enabled: true, + Action: "accept", + Protocol: "all", + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create policy with no rules", + requestBody: &api.PolicyCreate{ + Name: "noRulesPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/policies", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Policy{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify policy exists in DB with correct fields + db := testing_tools.GetDB(t, am.GetStore()) + dbPolicy := testing_tools.VerifyPolicyInDB(t, db, *got.Id) + assert.Equal(t, tc.requestBody.Name, dbPolicy.Name) + assert.Equal(t, tc.requestBody.Enabled, dbPolicy.Enabled) + assert.Equal(t, len(tc.requestBody.Rules), len(dbPolicy.Rules)) + } + }) + } + } +} + +func Test_Policies_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + srcGroups := []string{testing_tools.TestGroupId} + dstGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + policyId string + requestBody *api.PolicyCreate + expectedStatus int + verifyResponse func(t *testing.T, policy *api.Policy) + }{ + { + name: "Update policy name", + policyId: "testPolicyId", + requestBody: &api.PolicyCreate{ + Name: "updatedPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "testRule", + Enabled: true, + Action: "accept", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, "updatedPolicy", policy.Name) + }, + }, + { + name: "Update policy enabled state", + policyId: "testPolicyId", + requestBody: &api.PolicyCreate{ + Name: "testPolicy", + Enabled: false, + Rules: []api.PolicyRuleUpdate{ + { + Name: "testRule", + Enabled: true, + Action: "accept", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, false, policy.Enabled) + }, + }, + { + name: "Update non-existing policy", + policyId: "nonExistingPolicyId", + requestBody: &api.PolicyCreate{ + Name: "whatever", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "rule", + Enabled: true, + Action: "accept", + Protocol: "all", + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/policies/{policyId}", "{policyId}", tc.policyId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Policy{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated policy in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbPolicy := testing_tools.VerifyPolicyInDB(t, db, tc.policyId) + assert.Equal(t, tc.requestBody.Name, dbPolicy.Name) + assert.Equal(t, tc.requestBody.Enabled, dbPolicy.Enabled) + } + }) + } + } +} + +func Test_Policies_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + policyId string + expectedStatus int + }{ + { + name: "Delete existing policy", + policyId: "testPolicyId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing policy", + policyId: "nonExistingPolicyId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/policies/{policyId}", "{policyId}", tc.policyId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyPolicyNotInDB(t, db, tc.policyId) + } + }) + } + } +} diff --git a/management/server/http/testing/integration/routes_handler_integration_test.go b/management/server/http/testing/integration/routes_handler_integration_test.go new file mode 100644 index 000000000..eeb0c3025 --- /dev/null +++ b/management/server/http/testing/integration/routes_handler_integration_test.go @@ -0,0 +1,455 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Routes_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all routes", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/routes", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Route{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 2, len(got)) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Routes_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + routeId string + expectedStatus int + expectRoute bool + }{ + { + name: "Get existing route", + routeId: "testRouteId", + expectedStatus: http.StatusOK, + expectRoute: true, + }, + { + name: "Get non-existing route", + routeId: "nonExistingRouteId", + expectedStatus: http.StatusNotFound, + expectRoute: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/routes/{routeId}", "{routeId}", tc.routeId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectRoute { + got := &api.Route{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.routeId, got.Id) + assert.Equal(t, "Test Network Route", got.Description) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Routes_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + networkCIDR := "10.10.0.0/24" + peerID := testing_tools.TestPeerId + peerGroups := []string{"peerGroupId"} + + tt := []struct { + name string + requestBody *api.RouteRequest + expectedStatus int + verifyResponse func(t *testing.T, route *api.Route) + }{ + { + name: "Create network route with peer", + requestBody: &api.RouteRequest{ + Description: "New network route", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "newNet", + Metric: 100, + Masquerade: true, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.NotEmpty(t, route.Id) + assert.Equal(t, "New network route", route.Description) + assert.Equal(t, 100, route.Metric) + assert.Equal(t, true, route.Masquerade) + assert.Equal(t, true, route.Enabled) + }, + }, + { + name: "Create network route with peer groups", + requestBody: &api.RouteRequest{ + Description: "Route with peer groups", + Network: &networkCIDR, + PeerGroups: &peerGroups, + NetworkId: "peerGroupNet", + Metric: 150, + Masquerade: false, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.NotEmpty(t, route.Id) + assert.Equal(t, "Route with peer groups", route.Description) + }, + }, + { + name: "Create route with empty network_id", + requestBody: &api.RouteRequest{ + Description: "Empty net id", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "", + Metric: 100, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create route with metric 0", + requestBody: &api.RouteRequest{ + Description: "Zero metric", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "zeroMetric", + Metric: 0, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create route with metric 10000", + requestBody: &api.RouteRequest{ + Description: "High metric", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "highMetric", + Metric: 10000, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/routes", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Route{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify route exists in DB with correct fields + db := testing_tools.GetDB(t, am.GetStore()) + dbRoute := testing_tools.VerifyRouteInDB(t, db, route.ID(got.Id)) + assert.Equal(t, tc.requestBody.Description, dbRoute.Description) + assert.Equal(t, tc.requestBody.Metric, dbRoute.Metric) + assert.Equal(t, tc.requestBody.Masquerade, dbRoute.Masquerade) + assert.Equal(t, tc.requestBody.Enabled, dbRoute.Enabled) + assert.Equal(t, route.NetID(tc.requestBody.NetworkId), dbRoute.NetID) + } + }) + } + } +} + +func Test_Routes_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + networkCIDR := "10.0.0.0/24" + peerID := testing_tools.TestPeerId + + tt := []struct { + name string + routeId string + requestBody *api.RouteRequest + expectedStatus int + verifyResponse func(t *testing.T, route *api.Route) + }{ + { + name: "Update route description", + routeId: "testRouteId", + requestBody: &api.RouteRequest{ + Description: "Updated description", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "testNet", + Metric: 100, + Masquerade: true, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.Equal(t, "testRouteId", route.Id) + assert.Equal(t, "Updated description", route.Description) + }, + }, + { + name: "Update route metric", + routeId: "testRouteId", + requestBody: &api.RouteRequest{ + Description: "Test Network Route", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "testNet", + Metric: 500, + Masquerade: true, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.Equal(t, 500, route.Metric) + }, + }, + { + name: "Update non-existing route", + routeId: "nonExistingRouteId", + requestBody: &api.RouteRequest{ + Description: "whatever", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "testNet", + Metric: 100, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/routes/{routeId}", "{routeId}", tc.routeId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Route{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated route in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbRoute := testing_tools.VerifyRouteInDB(t, db, route.ID(got.Id)) + assert.Equal(t, tc.requestBody.Description, dbRoute.Description) + assert.Equal(t, tc.requestBody.Metric, dbRoute.Metric) + assert.Equal(t, tc.requestBody.Masquerade, dbRoute.Masquerade) + assert.Equal(t, tc.requestBody.Enabled, dbRoute.Enabled) + } + }) + } + } +} + +func Test_Routes_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + routeId string + expectedStatus int + }{ + { + name: "Delete existing route", + routeId: "testRouteId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing route", + routeId: "nonExistingRouteId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/routes/{routeId}", "{routeId}", tc.routeId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify route was deleted from DB for successful deletes + if tc.expectedStatus == http.StatusOK && user.expectResponse { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyRouteNotInDB(t, db, route.ID(tc.routeId)) + } + }) + } + } +} diff --git a/management/server/http/testing/integration/setupkeys_handler_integration_test.go b/management/server/http/testing/integration/setupkeys_handler_integration_test.go index c1a9829da..0d3aaac82 100644 --- a/management/server/http/testing/integration/setupkeys_handler_integration_test.go +++ b/management/server/http/testing/integration/setupkeys_handler_integration_test.go @@ -3,7 +3,6 @@ package integration import ( - "context" "encoding/json" "net/http" "net/http/httptest" @@ -14,7 +13,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/netbirdio/netbird/management/server/http/handlers/setup_keys" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" "github.com/netbirdio/netbird/shared/management/http/api" @@ -254,7 +252,7 @@ func Test_SetupKeys_Create(t *testing.T) { expectedResponse: nil, }, { - name: "Create Setup Key", + name: "Create Setup Key with nil AutoGroups", requestType: http.MethodPost, requestPath: "/api/setup-keys", requestBody: &api.CreateSetupKeyRequest{ @@ -308,14 +306,15 @@ func Test_SetupKeys_Create(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } + gotID := got.Id validateCreatedKey(t, tc.expectedResponse, got) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse, setup_keys.ToResponseBody(key)) + // Verify setup key exists in DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, tc.expectedResponse.Name, dbKey.Name) + assert.Equal(t, tc.expectedResponse.Revoked, dbKey.Revoked) + assert.Equal(t, tc.expectedResponse.UsageLimit, dbKey.UsageLimit) select { case <-done: @@ -571,7 +570,7 @@ func Test_SetupKeys_Update(t *testing.T) { for _, tc := range tt { for _, user := range users { - t.Run(tc.name, func(t *testing.T) { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/setup_keys.sql", nil, true) body, err := json.Marshal(tc.requestBody) @@ -594,14 +593,16 @@ func Test_SetupKeys_Update(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } + gotID := got.Id + gotRevoked := got.Revoked + gotUsageLimit := got.UsageLimit validateCreatedKey(t, tc.expectedResponse, got) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse, setup_keys.ToResponseBody(key)) + // Verify updated setup key in DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, gotRevoked, dbKey.Revoked) + assert.Equal(t, gotUsageLimit, dbKey.UsageLimit) select { case <-done: @@ -759,8 +760,8 @@ func Test_SetupKeys_Get(t *testing.T) { apiHandler.ServeHTTP(recorder, req) - content, expectRespnose := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) - if !expectRespnose { + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { return } got := &api.SetupKey{} @@ -768,14 +769,16 @@ func Test_SetupKeys_Get(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } + gotID := got.Id + gotName := got.Name + gotRevoked := got.Revoked validateCreatedKey(t, tc.expectedResponse, got) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse, setup_keys.ToResponseBody(key)) + // Verify setup key in DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, gotName, dbKey.Name) + assert.Equal(t, gotRevoked, dbKey.Revoked) select { case <-done: @@ -928,15 +931,17 @@ func Test_SetupKeys_GetAll(t *testing.T) { return tc.expectedResponse[i].UsageLimit < tc.expectedResponse[j].UsageLimit }) + db := testing_tools.GetDB(t, am.GetStore()) for i := range tc.expectedResponse { + gotID := got[i].Id + gotName := got[i].Name + gotRevoked := got[i].Revoked validateCreatedKey(t, tc.expectedResponse[i], &got[i]) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got[i].Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse[i], setup_keys.ToResponseBody(key)) + // Verify each setup key in DB via gorm + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, gotName, dbKey.Name) + assert.Equal(t, gotRevoked, dbKey.Revoked) } select { @@ -1104,8 +1109,9 @@ func Test_SetupKeys_Delete(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } - _, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - assert.Errorf(t, err, "Expected error when trying to get deleted key") + // Verify setup key deleted from DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifySetupKeyNotInDB(t, db, got.Id) select { case <-done: @@ -1120,7 +1126,7 @@ func Test_SetupKeys_Delete(t *testing.T) { func validateCreatedKey(t *testing.T, expectedKey *api.SetupKey, got *api.SetupKey) { t.Helper() - if got.Expires.After(time.Now().Add(-1*time.Minute)) && got.Expires.Before(time.Now().Add(testing_tools.ExpiresIn*time.Second)) || + if (got.Expires.After(time.Now().Add(-1*time.Minute)) && got.Expires.Before(time.Now().Add(testing_tools.ExpiresIn*time.Second))) || got.Expires.After(time.Date(2300, 01, 01, 0, 0, 0, 0, time.Local)) || got.Expires.Before(time.Date(1950, 01, 01, 0, 0, 0, 0, time.Local)) { got.Expires = time.Time{} diff --git a/management/server/http/testing/integration/users_handler_integration_test.go b/management/server/http/testing/integration/users_handler_integration_test.go new file mode 100644 index 000000000..eae3b4ad5 --- /dev/null +++ b/management/server/http/testing/integration/users_handler_integration_test.go @@ -0,0 +1,701 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Users_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, true}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all users", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/users", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.User{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Users_GetAll_ServiceUsers(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all service users", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/users?service_user=true", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.User{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + for _, u := range got { + assert.NotNil(t, u.IsServiceUser) + assert.Equal(t, true, *u.IsServiceUser) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Users_Create_ServiceUser(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.UserCreateRequest + expectedStatus int + verifyResponse func(t *testing.T, user *api.User) + }{ + { + name: "Create service user with admin role", + requestBody: &api.UserCreateRequest{ + Role: "admin", + IsServiceUser: true, + AutoGroups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.NotEmpty(t, user.Id) + assert.Equal(t, "admin", user.Role) + assert.NotNil(t, user.IsServiceUser) + assert.Equal(t, true, *user.IsServiceUser) + }, + }, + { + name: "Create service user with user role", + requestBody: &api.UserCreateRequest{ + Role: "user", + IsServiceUser: true, + AutoGroups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.NotEmpty(t, user.Id) + assert.Equal(t, "user", user.Role) + }, + }, + { + name: "Create service user with empty auto_groups", + requestBody: &api.UserCreateRequest{ + Role: "admin", + IsServiceUser: true, + AutoGroups: []string{}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.NotEmpty(t, user.Id) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/users", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.User{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify user in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbUser := testing_tools.VerifyUserInDB(t, db, got.Id) + assert.True(t, dbUser.IsServiceUser) + assert.Equal(t, string(dbUser.Role), string(tc.requestBody.Role)) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Users_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + targetUserId string + requestBody *api.UserRequest + expectedStatus int + verifyResponse func(t *testing.T, user *api.User) + }{ + { + name: "Update user role to admin", + targetUserId: testing_tools.TestUserId, + requestBody: &api.UserRequest{ + Role: "admin", + AutoGroups: []string{}, + IsBlocked: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.Equal(t, "admin", user.Role) + }, + }, + { + name: "Update user auto_groups", + targetUserId: testing_tools.TestUserId, + requestBody: &api.UserRequest{ + Role: "user", + AutoGroups: []string{testing_tools.TestGroupId}, + IsBlocked: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.Equal(t, 1, len(user.AutoGroups)) + }, + }, + { + name: "Block user", + targetUserId: testing_tools.TestUserId, + requestBody: &api.UserRequest{ + Role: "user", + AutoGroups: []string{}, + IsBlocked: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.Equal(t, true, user.IsBlocked) + }, + }, + { + name: "Update non-existing user", + targetUserId: "nonExistingUserId", + requestBody: &api.UserRequest{ + Role: "user", + AutoGroups: []string{}, + IsBlocked: false, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/users/{userId}", "{userId}", tc.targetUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.User{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated fields in DB + if tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + dbUser := testing_tools.VerifyUserInDB(t, db, tc.targetUserId) + assert.Equal(t, string(dbUser.Role), string(tc.requestBody.Role)) + assert.Equal(t, dbUser.Blocked, tc.requestBody.IsBlocked) + assert.ElementsMatch(t, dbUser.AutoGroups, tc.requestBody.AutoGroups) + } + } + }) + } + } +} + +func Test_Users_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + targetUserId string + expectedStatus int + }{ + { + name: "Delete existing service user", + targetUserId: "deletableServiceUserId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing user", + targetUserId: "nonExistingUserId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/users/{userId}", "{userId}", tc.targetUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify user deleted from DB for successful deletes + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyUserNotInDB(t, db, tc.targetUserId) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_PATs_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all PATs for service user", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/users/{userId}/tokens", "{userId}", testing_tools.TestServiceUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.PersonalAccessToken{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "serviceToken", got[0].Name) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_PATs_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + tokenId string + expectedStatus int + expectToken bool + }{ + { + name: "Get existing PAT", + tokenId: "serviceTokenId", + expectedStatus: http.StatusOK, + expectToken: true, + }, + { + name: "Get non-existing PAT", + tokenId: "nonExistingTokenId", + expectedStatus: http.StatusNotFound, + expectToken: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + path := strings.Replace("/api/users/{userId}/tokens/{tokenId}", "{userId}", testing_tools.TestServiceUserId, 1) + path = strings.Replace(path, "{tokenId}", tc.tokenId, 1) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectToken { + got := &api.PersonalAccessToken{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, "serviceTokenId", got.Id) + assert.Equal(t, "serviceToken", got.Name) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_PATs_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + targetUserId string + requestBody *api.PersonalAccessTokenRequest + expectedStatus int + verifyResponse func(t *testing.T, pat *api.PersonalAccessTokenGenerated) + }{ + { + name: "Create PAT with 30 day expiry", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "newPAT", + ExpiresIn: 30, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, pat *api.PersonalAccessTokenGenerated) { + t.Helper() + assert.NotEmpty(t, pat.PlainToken) + assert.Equal(t, "newPAT", pat.PersonalAccessToken.Name) + }, + }, + { + name: "Create PAT with 365 day expiry", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "longPAT", + ExpiresIn: 365, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, pat *api.PersonalAccessTokenGenerated) { + t.Helper() + assert.NotEmpty(t, pat.PlainToken) + assert.Equal(t, "longPAT", pat.PersonalAccessToken.Name) + }, + }, + { + name: "Create PAT with empty name", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "", + ExpiresIn: 30, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create PAT with 0 day expiry", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "zeroPAT", + ExpiresIn: 0, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create PAT with expiry over 365 days", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "tooLongPAT", + ExpiresIn: 400, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, strings.Replace("/api/users/{userId}/tokens", "{userId}", tc.targetUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.PersonalAccessTokenGenerated{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify PAT in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbPAT := testing_tools.VerifyPATInDB(t, db, got.PersonalAccessToken.Id) + assert.Equal(t, tc.requestBody.Name, dbPAT.Name) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_PATs_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + tokenId string + expectedStatus int + }{ + { + name: "Delete existing PAT", + tokenId: "serviceTokenId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing PAT", + tokenId: "nonExistingTokenId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + path := strings.Replace("/api/users/{userId}/tokens/{tokenId}", "{userId}", testing_tools.TestServiceUserId, 1) + path = strings.Replace(path, "{tokenId}", tc.tokenId, 1) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify PAT deleted from DB for successful deletes + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyPATNotInDB(t, db, tc.tokenId) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} diff --git a/management/server/http/testing/testdata/accounts.sql b/management/server/http/testing/testdata/accounts.sql new file mode 100644 index 000000000..35f00d419 --- /dev/null +++ b/management/server/http/testing/testdata/accounts.sql @@ -0,0 +1,18 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); diff --git a/management/server/http/testing/testdata/dns.sql b/management/server/http/testing/testdata/dns.sql new file mode 100644 index 000000000..9ed4daf7e --- /dev/null +++ b/management/server/http/testing/testdata/dns.sql @@ -0,0 +1,21 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `name_server_groups` (`id` text,`account_id` text,`name` text,`description` text,`name_servers` text,`groups` text,`primary` numeric,`domains` text,`enabled` numeric,`search_domains_enabled` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_name_server_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO name_server_groups VALUES('testNSGroupId','testAccountId','testNSGroup','test nameserver group','[{"IP":"1.1.1.1","NSType":1,"Port":53}]','["testGroupId"]',0,'["example.com"]',1,0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/events.sql b/management/server/http/testing/testdata/events.sql new file mode 100644 index 000000000..27fd01aea --- /dev/null +++ b/management/server/http/testing/testdata/events.sql @@ -0,0 +1,18 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/groups.sql b/management/server/http/testing/testdata/groups.sql new file mode 100644 index 000000000..eb874f036 --- /dev/null +++ b/management/server/http/testing/testdata/groups.sql @@ -0,0 +1,19 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('allGroupId','testAccountId','All','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/networks.sql b/management/server/http/testing/testdata/networks.sql new file mode 100644 index 000000000..39ec8e646 --- /dev/null +++ b/management/server/http/testing/testdata/networks.sql @@ -0,0 +1,25 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `networks` (`id` text,`account_id` text,`name` text,`description` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_networks` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `network_routers` (`id` text,`network_id` text,`account_id` text,`peer` text,`peer_groups` text,`masquerade` numeric,`metric` integer,`enabled` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_network_routers` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `network_resources` (`id` text,`network_id` text,`account_id` text,`name` text,`description` text,`type` text,`domain` text,`prefix` text,`enabled` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_network_resources` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'testServiceUser','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'testServiceAdmin','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:00',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO networks VALUES('testNetworkId','testAccountId','testNetwork','test network description'); +INSERT INTO network_routers VALUES('testRouterId','testNetworkId','testAccountId','testPeerId','[]',1,100,1); +INSERT INTO network_resources VALUES('testResourceId','testNetworkId','testAccountId','testResource','test resource description','host','','"3.3.3.3/32"',1); \ No newline at end of file diff --git a/management/server/http/testing/testdata/peers_integration.sql b/management/server/http/testing/testdata/peers_integration.sql new file mode 100644 index 000000000..62a7760e7 --- /dev/null +++ b/management/server/http/testing/testdata/peers_integration.sql @@ -0,0 +1,20 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId","testPeerId2"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); + +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','test-host-1','linux','Linux','','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'test-peer-1','test-peer-1','2023-03-02 09:21:02.189035775+01:00',0,0,0,'testUserId','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); +INSERT INTO peers VALUES('testPeerId2','testAccountId','6rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYBg=','82546A29-6BC8-4311-BCFC-9CDBF33F1A49','"100.64.114.32"','test-host-2','linux','Linux','','unknown','Ubuntu','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'test-peer-2','test-peer-2','2023-03-02 09:21:02.189035775+01:00',1,0,0,'testAdminId','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',1,0,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/policies.sql b/management/server/http/testing/testdata/policies.sql new file mode 100644 index 000000000..7e6cc883b --- /dev/null +++ b/management/server/http/testing/testdata/policies.sql @@ -0,0 +1,23 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `policies` (`id` text,`account_id` text,`name` text,`description` text,`enabled` numeric,`source_posture_checks` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_policies_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `policy_rules` (`id` text,`policy_id` text,`name` text,`description` text,`enabled` numeric,`action` text,`protocol` text,`bidirectional` numeric,`sources` text,`destinations` text,`source_resource` text,`destination_resource` text,`ports` text,`port_ranges` text,`authorized_groups` text,`authorized_user` text,PRIMARY KEY (`id`),CONSTRAINT `fk_policies_rules_g` FOREIGN KEY (`policy_id`) REFERENCES `policies`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO policies VALUES('testPolicyId','testAccountId','testPolicy','test policy description',1,NULL); +INSERT INTO policy_rules VALUES('testRuleId','testPolicyId','testRule','test rule',1,'accept','all',1,'["testGroupId"]','["testGroupId"]',NULL,NULL,NULL,NULL,NULL,''); \ No newline at end of file diff --git a/management/server/http/testing/testdata/routes.sql b/management/server/http/testing/testdata/routes.sql new file mode 100644 index 000000000..48aa02052 --- /dev/null +++ b/management/server/http/testing/testdata/routes.sql @@ -0,0 +1,23 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `routes` (`id` text,`account_id` text,`network` text,`domains` text,`keep_route` numeric,`net_id` text,`description` text,`peer` text,`peer_groups` text,`network_type` integer,`masquerade` numeric,`metric` integer,`enabled` numeric,`groups` text,`access_control_groups` text,`skip_auto_apply` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_routes_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('peerGroupId','testAccountId','peerGroupName','api','["testPeerId"]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO routes VALUES('testRouteId','testAccountId','"10.0.0.0/24"',NULL,0,'testNet','Test Network Route','testPeerId',NULL,1,1,100,1,'["testGroupId"]',NULL,0); +INSERT INTO routes VALUES('testDomainRouteId','testAccountId','"0.0.0.0/0"','["example.com"]',0,'testDomainNet','Test Domain Route','','["peerGroupId"]',3,1,200,1,'["testGroupId"]',NULL,0); diff --git a/management/server/http/testing/testdata/users_integration.sql b/management/server/http/testing/testdata/users_integration.sql new file mode 100644 index 000000000..57df73e8c --- /dev/null +++ b/management/server/http/testing/testdata/users_integration.sql @@ -0,0 +1,24 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `personal_access_tokens` (`id` text,`user_id` text,`name` text,`hashed_token` text,`expiration_date` datetime,`created_by` text,`created_at` datetime,`last_used` datetime DEFAULT NULL,PRIMARY KEY (`id`),CONSTRAINT `fk_users_pa_ts_g` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`)); +CREATE INDEX `idx_personal_access_tokens_user_id` ON `personal_access_tokens`(`user_id`); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'testServiceUser','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'testServiceAdmin','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('deletableServiceUserId','testAccountId','user',1,0,'deletableServiceUser','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO personal_access_tokens VALUES('testTokenId','testUserId','testToken','hashedTokenValue123','2325-10-02 16:01:38.000000000+00:00','testUserId','2024-10-02 16:01:38.000000000+00:00',NULL); +INSERT INTO personal_access_tokens VALUES('serviceTokenId','testServiceUserId','serviceToken','hashedServiceTokenValue123','2325-10-02 16:01:38.000000000+00:00','testAdminId','2024-10-02 16:01:38.000000000+00:00',NULL); \ No newline at end of file diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 6bd269a2c..55095bbb7 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -128,14 +128,14 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee GetPATInfoFunc: authManager.GetPATInfo, } - networksManagerMock := networks.NewManagerMock() - resourcesManagerMock := resources.NewManagerMock() - routersManagerMock := routers.NewManagerMock() - groupsManagerMock := groups.NewManagerMock() + groupsManager := groups.NewManager(store, permissionsManager, am) + routersManager := routers.NewManager(store, permissionsManager, am) + resourcesManager := resources.NewManager(store, permissionsManager, groupsManager, am, serviceManager) + networksManager := networks.NewManager(store, permissionsManager, resourcesManager, routersManager, am) customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } @@ -167,6 +167,112 @@ func peerShouldReceiveUpdate(t testing_tools.TB, updateMessage <-chan *network_m } } +// PeerShouldReceiveAnyUpdate waits for a peer update message and returns it. +// Fails the test if no update is received within timeout. +func PeerShouldReceiveAnyUpdate(t testing_tools.TB, updateMessage <-chan *network_map.UpdateMessage) *network_map.UpdateMessage { + t.Helper() + select { + case msg := <-updateMessage: + if msg == nil { + t.Errorf("Received nil update message, expected valid message") + } + return msg + case <-time.After(500 * time.Millisecond): + t.Errorf("Timed out waiting for update message") + return nil + } +} + +// PeerShouldNotReceiveAnyUpdate verifies no peer update message is received. +func PeerShouldNotReceiveAnyUpdate(t testing_tools.TB, updateMessage <-chan *network_map.UpdateMessage) { + t.Helper() + peerShouldNotReceiveUpdate(t, updateMessage) +} + +// BuildApiBlackBoxWithDBStateAndPeerChannel creates the API handler and returns +// the peer update channel directly so tests can verify updates inline. +func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile string) (http.Handler, account.Manager, <-chan *network_map.UpdateMessage) { + store, cleanup, err := store.NewTestStoreFromSQL(context.Background(), sqlFile, t.TempDir()) + if err != nil { + t.Fatalf("Failed to create test store: %v", err) + } + t.Cleanup(cleanup) + + metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) + if err != nil { + t.Fatalf("Failed to create metrics: %v", err) + } + + peersUpdateManager := update_channel.NewPeersUpdateManager(nil) + updMsg := peersUpdateManager.CreateChannel(context.Background(), testing_tools.TestPeerId) + + geoMock := &geolocation.Mock{} + validatorMock := server.MockIntegratedValidator{} + proxyController := integrations.NewController(store) + userManager := users.NewManager(store) + permissionsManager := permissions.NewManager(store) + settingsManager := settings.NewManager(store, userManager, integrations.NewManager(&activity.InMemoryEventStore{}), permissionsManager, settings.IdpConfig{}) + peersManager := peers.NewManager(store, permissionsManager) + + jobManager := job.NewJobManager(nil, store, peersManager) + + ctx := context.Background() + requestBuffer := server.NewAccountRequestBuffer(ctx, store) + networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManager), &config.Config{}) + am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) + if err != nil { + t.Fatalf("Failed to create manager: %v", err) + } + + accessLogsManager := accesslogsmanager.NewManager(store, permissionsManager, nil) + proxyTokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) + if err != nil { + t.Fatalf("Failed to create proxy token store: %v", err) + } + pkceverifierStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + if err != nil { + t.Fatalf("Failed to create PKCE verifier store: %v", err) + } + noopMeter := noop.NewMeterProvider().Meter("") + proxyMgr, err := proxymanager.NewManager(store, noopMeter) + if err != nil { + t.Fatalf("Failed to create proxy manager: %v", err) + } + proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, pkceverifierStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager, proxyMgr) + domainManager := manager.NewManager(store, proxyMgr, permissionsManager, am) + serviceProxyController, err := proxymanager.NewGRPCController(proxyServiceServer, noopMeter) + if err != nil { + t.Fatalf("Failed to create proxy controller: %v", err) + } + domainManager.SetClusterCapabilities(serviceProxyController) + serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, domainManager) + proxyServiceServer.SetServiceManager(serviceManager) + am.SetServiceManager(serviceManager) + + // @note this is required so that PAT's validate from store, but JWT's are mocked + authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false) + authManagerMock := &serverauth.MockManager{ + ValidateAndParseTokenFunc: mockValidateAndParseToken, + EnsureUserAccessByJWTGroupsFunc: authManager.EnsureUserAccessByJWTGroups, + MarkPATUsedFunc: authManager.MarkPATUsed, + GetPATInfoFunc: authManager.GetPATInfo, + } + + groupsManager := groups.NewManager(store, permissionsManager, am) + routersManager := routers.NewManager(store, permissionsManager, am) + resourcesManager := resources.NewManager(store, permissionsManager, groupsManager, am, serviceManager) + networksManager := networks.NewManager(store, permissionsManager, resourcesManager, routersManager, am) + customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") + zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) + + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) + if err != nil { + t.Fatalf("Failed to create API handler: %v", err) + } + + return apiHandler, am, updMsg +} + func mockValidateAndParseToken(_ context.Context, token string) (auth.UserAuth, *jwt.Token, error) { userAuth := auth.UserAuth{} diff --git a/management/server/http/testing/testing_tools/db_verify.go b/management/server/http/testing/testing_tools/db_verify.go new file mode 100644 index 000000000..f8af6a41f --- /dev/null +++ b/management/server/http/testing/testing_tools/db_verify.go @@ -0,0 +1,222 @@ +package testing_tools + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/gorm" + + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + networkTypes "github.com/netbirdio/netbird/management/server/networks/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/route" +) + +// GetDB extracts the *gorm.DB from a store.Store (must be *SqlStore). +func GetDB(t *testing.T, s store.Store) *gorm.DB { + t.Helper() + sqlStore, ok := s.(*store.SqlStore) + require.True(t, ok, "Store is not a *SqlStore, cannot get gorm.DB") + return sqlStore.GetDB() +} + +// VerifyGroupInDB reads a group directly from the DB and returns it. +func VerifyGroupInDB(t *testing.T, db *gorm.DB, groupID string) *types.Group { + t.Helper() + var group types.Group + err := db.Where("id = ? AND account_id = ?", groupID, TestAccountId).First(&group).Error + require.NoError(t, err, "Expected group %s to exist in DB", groupID) + return &group +} + +// VerifyGroupNotInDB verifies that a group does not exist in the DB. +func VerifyGroupNotInDB(t *testing.T, db *gorm.DB, groupID string) { + t.Helper() + var count int64 + db.Model(&types.Group{}).Where("id = ? AND account_id = ?", groupID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected group %s to NOT exist in DB", groupID) +} + +// VerifyPolicyInDB reads a policy directly from the DB and returns it. +func VerifyPolicyInDB(t *testing.T, db *gorm.DB, policyID string) *types.Policy { + t.Helper() + var policy types.Policy + err := db.Preload("Rules").Where("id = ? AND account_id = ?", policyID, TestAccountId).First(&policy).Error + require.NoError(t, err, "Expected policy %s to exist in DB", policyID) + return &policy +} + +// VerifyPolicyNotInDB verifies that a policy does not exist in the DB. +func VerifyPolicyNotInDB(t *testing.T, db *gorm.DB, policyID string) { + t.Helper() + var count int64 + db.Model(&types.Policy{}).Where("id = ? AND account_id = ?", policyID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected policy %s to NOT exist in DB", policyID) +} + +// VerifyRouteInDB reads a route directly from the DB and returns it. +func VerifyRouteInDB(t *testing.T, db *gorm.DB, routeID route.ID) *route.Route { + t.Helper() + var r route.Route + err := db.Where("id = ? AND account_id = ?", routeID, TestAccountId).First(&r).Error + require.NoError(t, err, "Expected route %s to exist in DB", routeID) + return &r +} + +// VerifyRouteNotInDB verifies that a route does not exist in the DB. +func VerifyRouteNotInDB(t *testing.T, db *gorm.DB, routeID route.ID) { + t.Helper() + var count int64 + db.Model(&route.Route{}).Where("id = ? AND account_id = ?", routeID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected route %s to NOT exist in DB", routeID) +} + +// VerifyNSGroupInDB reads a nameserver group directly from the DB and returns it. +func VerifyNSGroupInDB(t *testing.T, db *gorm.DB, nsGroupID string) *nbdns.NameServerGroup { + t.Helper() + var nsGroup nbdns.NameServerGroup + err := db.Where("id = ? AND account_id = ?", nsGroupID, TestAccountId).First(&nsGroup).Error + require.NoError(t, err, "Expected NS group %s to exist in DB", nsGroupID) + return &nsGroup +} + +// VerifyNSGroupNotInDB verifies that a nameserver group does not exist in the DB. +func VerifyNSGroupNotInDB(t *testing.T, db *gorm.DB, nsGroupID string) { + t.Helper() + var count int64 + db.Model(&nbdns.NameServerGroup{}).Where("id = ? AND account_id = ?", nsGroupID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected NS group %s to NOT exist in DB", nsGroupID) +} + +// VerifyPeerInDB reads a peer directly from the DB and returns it. +func VerifyPeerInDB(t *testing.T, db *gorm.DB, peerID string) *nbpeer.Peer { + t.Helper() + var peer nbpeer.Peer + err := db.Where("id = ? AND account_id = ?", peerID, TestAccountId).First(&peer).Error + require.NoError(t, err, "Expected peer %s to exist in DB", peerID) + return &peer +} + +// VerifyPeerNotInDB verifies that a peer does not exist in the DB. +func VerifyPeerNotInDB(t *testing.T, db *gorm.DB, peerID string) { + t.Helper() + var count int64 + db.Model(&nbpeer.Peer{}).Where("id = ? AND account_id = ?", peerID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected peer %s to NOT exist in DB", peerID) +} + +// VerifySetupKeyInDB reads a setup key directly from the DB and returns it. +func VerifySetupKeyInDB(t *testing.T, db *gorm.DB, keyID string) *types.SetupKey { + t.Helper() + var key types.SetupKey + err := db.Where("id = ? AND account_id = ?", keyID, TestAccountId).First(&key).Error + require.NoError(t, err, "Expected setup key %s to exist in DB", keyID) + return &key +} + +// VerifySetupKeyNotInDB verifies that a setup key does not exist in the DB. +func VerifySetupKeyNotInDB(t *testing.T, db *gorm.DB, keyID string) { + t.Helper() + var count int64 + db.Model(&types.SetupKey{}).Where("id = ? AND account_id = ?", keyID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected setup key %s to NOT exist in DB", keyID) +} + +// VerifyUserInDB reads a user directly from the DB and returns it. +func VerifyUserInDB(t *testing.T, db *gorm.DB, userID string) *types.User { + t.Helper() + var user types.User + err := db.Where("id = ? AND account_id = ?", userID, TestAccountId).First(&user).Error + require.NoError(t, err, "Expected user %s to exist in DB", userID) + return &user +} + +// VerifyUserNotInDB verifies that a user does not exist in the DB. +func VerifyUserNotInDB(t *testing.T, db *gorm.DB, userID string) { + t.Helper() + var count int64 + db.Model(&types.User{}).Where("id = ? AND account_id = ?", userID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected user %s to NOT exist in DB", userID) +} + +// VerifyPATInDB reads a PAT directly from the DB and returns it. +func VerifyPATInDB(t *testing.T, db *gorm.DB, tokenID string) *types.PersonalAccessToken { + t.Helper() + var pat types.PersonalAccessToken + err := db.Where("id = ?", tokenID).First(&pat).Error + require.NoError(t, err, "Expected PAT %s to exist in DB", tokenID) + return &pat +} + +// VerifyPATNotInDB verifies that a PAT does not exist in the DB. +func VerifyPATNotInDB(t *testing.T, db *gorm.DB, tokenID string) { + t.Helper() + var count int64 + db.Model(&types.PersonalAccessToken{}).Where("id = ?", tokenID).Count(&count) + assert.Equal(t, int64(0), count, "Expected PAT %s to NOT exist in DB", tokenID) +} + +// VerifyAccountSettings reads the account and returns its settings from the DB. +func VerifyAccountSettings(t *testing.T, db *gorm.DB) *types.Account { + t.Helper() + var account types.Account + err := db.Where("id = ?", TestAccountId).First(&account).Error + require.NoError(t, err, "Expected account %s to exist in DB", TestAccountId) + return &account +} + +// VerifyNetworkInDB reads a network directly from the store and returns it. +func VerifyNetworkInDB(t *testing.T, db *gorm.DB, networkID string) *networkTypes.Network { + t.Helper() + var network networkTypes.Network + err := db.Where("id = ? AND account_id = ?", networkID, TestAccountId).First(&network).Error + require.NoError(t, err, "Expected network %s to exist in DB", networkID) + return &network +} + +// VerifyNetworkNotInDB verifies that a network does not exist in the DB. +func VerifyNetworkNotInDB(t *testing.T, db *gorm.DB, networkID string) { + t.Helper() + var count int64 + db.Model(&networkTypes.Network{}).Where("id = ? AND account_id = ?", networkID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected network %s to NOT exist in DB", networkID) +} + +// VerifyNetworkResourceInDB reads a network resource directly from the DB and returns it. +func VerifyNetworkResourceInDB(t *testing.T, db *gorm.DB, resourceID string) *resourceTypes.NetworkResource { + t.Helper() + var resource resourceTypes.NetworkResource + err := db.Where("id = ? AND account_id = ?", resourceID, TestAccountId).First(&resource).Error + require.NoError(t, err, "Expected network resource %s to exist in DB", resourceID) + return &resource +} + +// VerifyNetworkResourceNotInDB verifies that a network resource does not exist in the DB. +func VerifyNetworkResourceNotInDB(t *testing.T, db *gorm.DB, resourceID string) { + t.Helper() + var count int64 + db.Model(&resourceTypes.NetworkResource{}).Where("id = ? AND account_id = ?", resourceID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected network resource %s to NOT exist in DB", resourceID) +} + +// VerifyNetworkRouterInDB reads a network router directly from the DB and returns it. +func VerifyNetworkRouterInDB(t *testing.T, db *gorm.DB, routerID string) *routerTypes.NetworkRouter { + t.Helper() + var router routerTypes.NetworkRouter + err := db.Where("id = ? AND account_id = ?", routerID, TestAccountId).First(&router).Error + require.NoError(t, err, "Expected network router %s to exist in DB", routerID) + return &router +} + +// VerifyNetworkRouterNotInDB verifies that a network router does not exist in the DB. +func VerifyNetworkRouterNotInDB(t *testing.T, db *gorm.DB, routerID string) { + t.Helper() + var count int64 + db.Model(&routerTypes.NetworkRouter{}).Where("id = ? AND account_id = ?", routerID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected network router %s to NOT exist in DB", routerID) +} From 7abf730d77170513a031715a731ed2839ff792cb Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 27 Mar 2026 15:22:23 +0100 Subject: [PATCH 246/374] [management] update to latest grpc version (#5716) --- .github/workflows/wasm-build-validation.yml | 4 +- go.mod | 41 +++++----- go.sum | 86 +++++++++++---------- 3 files changed, 67 insertions(+), 64 deletions(-) diff --git a/.github/workflows/wasm-build-validation.yml b/.github/workflows/wasm-build-validation.yml index 47e45165b..81ae36e78 100644 --- a/.github/workflows/wasm-build-validation.yml +++ b/.github/workflows/wasm-build-validation.yml @@ -61,8 +61,8 @@ jobs: echo "Size: ${SIZE} bytes (${SIZE_MB} MB)" - if [ ${SIZE} -gt 57671680 ]; then - echo "Wasm binary size (${SIZE_MB}MB) exceeds 55MB limit!" + if [ ${SIZE} -gt 58720256 ]; then + echo "Wasm binary size (${SIZE_MB}MB) exceeds 56MB limit!" exit 1 fi diff --git a/go.mod b/go.mod index d99a14df3..89bc06fea 100644 --- a/go.mod +++ b/go.mod @@ -17,13 +17,13 @@ require ( github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.9 github.com/vishvananda/netlink v1.3.1 - golang.org/x/crypto v0.46.0 - golang.org/x/sys v0.39.0 + golang.org/x/crypto v0.48.0 + golang.org/x/sys v0.41.0 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 golang.zx2c4.com/wireguard/windows v0.5.3 - google.golang.org/grpc v1.77.0 - google.golang.org/protobuf v1.36.10 + google.golang.org/grpc v1.79.3 + google.golang.org/protobuf v1.36.11 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -101,21 +101,21 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 github.com/yusufpapurcu/wmi v1.2.4 github.com/zcalusic/sysinfo v1.1.3 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 - go.opentelemetry.io/otel v1.38.0 - go.opentelemetry.io/otel/exporters/prometheus v0.48.0 - go.opentelemetry.io/otel/metric v1.38.0 - go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 + go.opentelemetry.io/otel v1.42.0 + go.opentelemetry.io/otel/exporters/prometheus v0.64.0 + go.opentelemetry.io/otel/metric v1.42.0 + go.opentelemetry.io/otel/sdk/metric v1.42.0 go.uber.org/mock v0.5.2 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/mobile v0.0.0-20251113184115-a159579294ab - golang.org/x/mod v0.30.0 - golang.org/x/net v0.47.0 + golang.org/x/mod v0.32.0 + golang.org/x/net v0.51.0 golang.org/x/oauth2 v0.34.0 golang.org/x/sync v0.19.0 - golang.org/x/term v0.38.0 + golang.org/x/term v0.40.0 golang.org/x/time v0.14.0 google.golang.org/api v0.257.0 gopkg.in/yaml.v3 v3.0.1 @@ -249,8 +249,9 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/otlptranslator v1.0.0 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/russellhaering/goxmldsig v1.5.0 // indirect github.com/rymdport/portal v0.4.2 // indirect github.com/shirou/gopsutil/v4 v4.25.1 // indirect @@ -269,15 +270,15 @@ require ( github.com/zeebo/blake3 v0.2.3 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.42.0 // indirect + go.opentelemetry.io/otel/trace v1.42.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/image v0.33.0 // indirect - golang.org/x/text v0.32.0 // indirect - golang.org/x/tools v0.39.0 // indirect + golang.org/x/text v0.34.0 // indirect + golang.org/x/tools v0.41.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect ) diff --git a/go.sum b/go.sum index 3624898c7..629388ccb 100644 --- a/go.sum +++ b/go.sum @@ -487,10 +487,12 @@ github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos= +github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= @@ -603,26 +605,26 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= -go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/exporters/prometheus v0.64.0 h1:g0LRDXMX/G1SEZtK8zl8Chm4K6GBwRkjPKE36LxiTYs= +go.opentelemetry.io/otel/exporters/prometheus v0.64.0/go.mod h1:UrgcjnarfdlBDP3GjDIJWe6HTprwSazNjwsI+Ru6hro= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -633,8 +635,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= goauthentik.io/api/v3 v3.2023051.3 h1:NebAhD/TeTWNo/9X3/Uj+rM5fG1HaiLOlKTNLQv9Qq4= goauthentik.io/api/v3 v3.2023051.3/go.mod h1:nYECml4jGbp/541hj8GcylKQG1gVBsKppHy4+7G8u4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -648,8 +650,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ= @@ -666,8 +668,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -686,8 +688,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= +golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= @@ -738,8 +740,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -752,8 +754,8 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -765,8 +767,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -780,8 +782,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -799,12 +801,12 @@ google.golang.org/api v0.257.0/go.mod h1:4eJrr+vbVaZSqs7vovFd1Jb/A6ml6iw2e6FBYf3 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 h1:Wgl1rcDNThT+Zn47YyCXOXyX/COgMTIdhJ717F0l4xk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -815,8 +817,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From be6fd119d88df95cda8fddcf14799e34a073452b Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Mon, 30 Mar 2026 10:08:02 +0200 Subject: [PATCH 247/374] [management] no events for temporary peers (#5719) --- management/internals/modules/peers/manager.go | 8 +++++--- management/server/peer.go | 12 ++++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index 7cb0f3908..d3f8f44ff 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -154,9 +154,11 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs return err } - eventsToStore = append(eventsToStore, func() { - m.accountManager.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) - }) + if !(peer.ProxyMeta.Embedded || peer.Meta.KernelVersion == "wasm") { + eventsToStore = append(eventsToStore, func() { + m.accountManager.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) + }) + } return nil }) diff --git a/management/server/peer.go b/management/server/peer.go index f7cb6a0f1..a02e34e0d 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -859,7 +859,9 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe opEvent.Meta["setup_key_name"] = peerAddConfig.SetupKeyName } - am.StoreEvent(ctx, opEvent.InitiatorID, opEvent.TargetID, opEvent.AccountID, opEvent.Activity, opEvent.Meta) + if !temporary { + am.StoreEvent(ctx, opEvent.InitiatorID, opEvent.TargetID, opEvent.AccountID, opEvent.Activity, opEvent.Meta) + } if err := am.networkMapController.OnPeersAdded(ctx, accountID, []string{newPeer.ID}); err != nil { log.WithContext(ctx).Errorf("failed to update network map cache for peer %s: %v", newPeer.ID, err) @@ -1480,9 +1482,11 @@ func deletePeers(ctx context.Context, am *DefaultAccountManager, transaction sto if err = transaction.DeletePeer(ctx, accountID, peer.ID); err != nil { return nil, err } - peerDeletedEvents = append(peerDeletedEvents, func() { - am.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) - }) + if !(peer.ProxyMeta.Embedded || peer.Meta.KernelVersion == "wasm") { + peerDeletedEvents = append(peerDeletedEvents, func() { + am.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) + }) + } } return peerDeletedEvents, nil From c919ea149e49ed5e008644dce3d6affb53c3f20e Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Mon, 30 Mar 2026 11:20:17 +0300 Subject: [PATCH 248/374] [misc] Add missing OpenAPI definitions (#5690) --- shared/management/client/rest/azure_idp.go | 112 ++ .../management/client/rest/azure_idp_test.go | 252 ++++ shared/management/client/rest/client.go | 12 + shared/management/client/rest/google_idp.go | 112 ++ .../management/client/rest/google_idp_test.go | 248 ++++ .../management/client/rest/okta_scim_idp.go | 112 ++ .../client/rest/okta_scim_idp_test.go | 246 ++++ shared/management/http/api/openapi.yml | 1299 +++++++++++++++-- shared/management/http/api/types.gen.go | 263 +++- 9 files changed, 2548 insertions(+), 108 deletions(-) create mode 100644 shared/management/client/rest/azure_idp.go create mode 100644 shared/management/client/rest/azure_idp_test.go create mode 100644 shared/management/client/rest/google_idp.go create mode 100644 shared/management/client/rest/google_idp_test.go create mode 100644 shared/management/client/rest/okta_scim_idp.go create mode 100644 shared/management/client/rest/okta_scim_idp_test.go diff --git a/shared/management/client/rest/azure_idp.go b/shared/management/client/rest/azure_idp.go new file mode 100644 index 000000000..40b90bc30 --- /dev/null +++ b/shared/management/client/rest/azure_idp.go @@ -0,0 +1,112 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// AzureIDPAPI APIs for Azure AD IDP integrations +type AzureIDPAPI struct { + c *Client +} + +// List retrieves all Azure AD IDP integrations +func (a *AzureIDPAPI) List(ctx context.Context) ([]api.AzureIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/azure-idp", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.AzureIntegration](resp) + return ret, err +} + +// Get retrieves a specific Azure AD IDP integration by ID +func (a *AzureIDPAPI) Get(ctx context.Context, integrationID string) (*api.AzureIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/azure-idp/"+integrationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.AzureIntegration](resp) + return &ret, err +} + +// Create creates a new Azure AD IDP integration +func (a *AzureIDPAPI) Create(ctx context.Context, request api.CreateAzureIntegrationRequest) (*api.AzureIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/azure-idp", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.AzureIntegration](resp) + return &ret, err +} + +// Update updates an existing Azure AD IDP integration +func (a *AzureIDPAPI) Update(ctx context.Context, integrationID string, request api.UpdateAzureIntegrationRequest) (*api.AzureIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/azure-idp/"+integrationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.AzureIntegration](resp) + return &ret, err +} + +// Delete deletes an Azure AD IDP integration +func (a *AzureIDPAPI) Delete(ctx context.Context, integrationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/azure-idp/"+integrationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// Sync triggers a manual sync for an Azure AD IDP integration +func (a *AzureIDPAPI) Sync(ctx context.Context, integrationID string) (*api.SyncResult, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/azure-idp/"+integrationID+"/sync", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.SyncResult](resp) + return &ret, err +} + +// GetLogs retrieves synchronization logs for an Azure AD IDP integration +func (a *AzureIDPAPI) GetLogs(ctx context.Context, integrationID string) ([]api.IdpIntegrationSyncLog, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/azure-idp/"+integrationID+"/logs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdpIntegrationSyncLog](resp) + return ret, err +} diff --git a/shared/management/client/rest/azure_idp_test.go b/shared/management/client/rest/azure_idp_test.go new file mode 100644 index 000000000..480d2a313 --- /dev/null +++ b/shared/management/client/rest/azure_idp_test.go @@ -0,0 +1,252 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testAzureIntegration = api.AzureIntegration{ + Id: 1, + Enabled: true, + ClientId: "12345678-1234-1234-1234-123456789012", + TenantId: "87654321-4321-4321-4321-210987654321", + SyncInterval: 300, + GroupPrefixes: []string{"eng-"}, + UserGroupPrefixes: []string{"dev-"}, + Host: "microsoft.com", + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), +} + +func TestAzureIDP_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.AzureIntegration{testAzureIntegration}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testAzureIntegration, ret[0]) + }) +} + +func TestAzureIDP_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestAzureIDP_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testAzureIntegration) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Get(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testAzureIntegration, *ret) + }) +} + +func TestAzureIDP_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Get(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateAzureIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "12345678-1234-1234-1234-123456789012", req.ClientId) + retBytes, _ := json.Marshal(testAzureIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Create(context.Background(), api.CreateAzureIntegrationRequest{ + ClientId: "12345678-1234-1234-1234-123456789012", + ClientSecret: "secret", + TenantId: "87654321-4321-4321-4321-210987654321", + Host: api.CreateAzureIntegrationRequestHostMicrosoftCom, + GroupPrefixes: &[]string{"eng-"}, + }) + require.NoError(t, err) + assert.Equal(t, testAzureIntegration, *ret) + }) +} + +func TestAzureIDP_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Create(context.Background(), api.CreateAzureIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateAzureIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, *req.Enabled) + retBytes, _ := json.Marshal(testAzureIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Update(context.Background(), "int-1", api.UpdateAzureIntegrationRequest{ + Enabled: ptr(true), + }) + require.NoError(t, err) + assert.Equal(t, testAzureIntegration, *ret) + }) +} + +func TestAzureIDP_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Update(context.Background(), "int-1", api.UpdateAzureIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.AzureIDP.Delete(context.Background(), "int-1") + require.NoError(t, err) + }) +} + +func TestAzureIDP_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.AzureIDP.Delete(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestAzureIDP_Sync_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(api.SyncResult{Result: ptr("ok")}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Sync(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, "ok", *ret.Result) + }) +} + +func TestAzureIDP_Sync_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Sync(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_GetLogs_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IdpIntegrationSyncLog{testSyncLog}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.GetLogs(context.Background(), "int-1") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testSyncLog, ret[0]) + }) +} + +func TestAzureIDP_GetLogs_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.GetLogs(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index f308761fb..f0cb4d2d1 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -110,6 +110,15 @@ type Client struct { // see more: https://docs.netbird.io/api/resources/scim SCIM *SCIMAPI + // GoogleIDP NetBird Google Workspace IDP integration APIs + GoogleIDP *GoogleIDPAPI + + // AzureIDP NetBird Azure AD IDP integration APIs + AzureIDP *AzureIDPAPI + + // OktaScimIDP NetBird Okta SCIM IDP integration APIs + OktaScimIDP *OktaScimIDPAPI + // EventStreaming NetBird Event Streaming integration APIs // see more: https://docs.netbird.io/api/resources/event-streaming EventStreaming *EventStreamingAPI @@ -185,6 +194,9 @@ func (c *Client) initialize() { c.MSP = &MSPAPI{c} c.EDR = &EDRAPI{c} c.SCIM = &SCIMAPI{c} + c.GoogleIDP = &GoogleIDPAPI{c} + c.AzureIDP = &AzureIDPAPI{c} + c.OktaScimIDP = &OktaScimIDPAPI{c} c.EventStreaming = &EventStreamingAPI{c} c.IdentityProviders = &IdentityProvidersAPI{c} c.Ingress = &IngressAPI{c} diff --git a/shared/management/client/rest/google_idp.go b/shared/management/client/rest/google_idp.go new file mode 100644 index 000000000..b86436503 --- /dev/null +++ b/shared/management/client/rest/google_idp.go @@ -0,0 +1,112 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// GoogleIDPAPI APIs for Google Workspace IDP integrations +type GoogleIDPAPI struct { + c *Client +} + +// List retrieves all Google Workspace IDP integrations +func (a *GoogleIDPAPI) List(ctx context.Context) ([]api.GoogleIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/google-idp", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.GoogleIntegration](resp) + return ret, err +} + +// Get retrieves a specific Google Workspace IDP integration by ID +func (a *GoogleIDPAPI) Get(ctx context.Context, integrationID string) (*api.GoogleIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/google-idp/"+integrationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.GoogleIntegration](resp) + return &ret, err +} + +// Create creates a new Google Workspace IDP integration +func (a *GoogleIDPAPI) Create(ctx context.Context, request api.CreateGoogleIntegrationRequest) (*api.GoogleIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/google-idp", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.GoogleIntegration](resp) + return &ret, err +} + +// Update updates an existing Google Workspace IDP integration +func (a *GoogleIDPAPI) Update(ctx context.Context, integrationID string, request api.UpdateGoogleIntegrationRequest) (*api.GoogleIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/google-idp/"+integrationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.GoogleIntegration](resp) + return &ret, err +} + +// Delete deletes a Google Workspace IDP integration +func (a *GoogleIDPAPI) Delete(ctx context.Context, integrationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/google-idp/"+integrationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// Sync triggers a manual sync for a Google Workspace IDP integration +func (a *GoogleIDPAPI) Sync(ctx context.Context, integrationID string) (*api.SyncResult, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/google-idp/"+integrationID+"/sync", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.SyncResult](resp) + return &ret, err +} + +// GetLogs retrieves synchronization logs for a Google Workspace IDP integration +func (a *GoogleIDPAPI) GetLogs(ctx context.Context, integrationID string) ([]api.IdpIntegrationSyncLog, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/google-idp/"+integrationID+"/logs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdpIntegrationSyncLog](resp) + return ret, err +} diff --git a/shared/management/client/rest/google_idp_test.go b/shared/management/client/rest/google_idp_test.go new file mode 100644 index 000000000..03a6c161e --- /dev/null +++ b/shared/management/client/rest/google_idp_test.go @@ -0,0 +1,248 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testGoogleIntegration = api.GoogleIntegration{ + Id: 1, + Enabled: true, + CustomerId: "C01234567", + SyncInterval: 300, + GroupPrefixes: []string{"eng-"}, + UserGroupPrefixes: []string{"dev-"}, + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), +} + +func TestGoogleIDP_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.GoogleIntegration{testGoogleIntegration}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testGoogleIntegration, ret[0]) + }) +} + +func TestGoogleIDP_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestGoogleIDP_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testGoogleIntegration) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Get(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testGoogleIntegration, *ret) + }) +} + +func TestGoogleIDP_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Get(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateGoogleIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "C01234567", req.CustomerId) + retBytes, _ := json.Marshal(testGoogleIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Create(context.Background(), api.CreateGoogleIntegrationRequest{ + CustomerId: "C01234567", + ServiceAccountKey: "key-data", + GroupPrefixes: &[]string{"eng-"}, + }) + require.NoError(t, err) + assert.Equal(t, testGoogleIntegration, *ret) + }) +} + +func TestGoogleIDP_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Create(context.Background(), api.CreateGoogleIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateGoogleIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, *req.Enabled) + retBytes, _ := json.Marshal(testGoogleIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Update(context.Background(), "int-1", api.UpdateGoogleIntegrationRequest{ + Enabled: ptr(true), + }) + require.NoError(t, err) + assert.Equal(t, testGoogleIntegration, *ret) + }) +} + +func TestGoogleIDP_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Update(context.Background(), "int-1", api.UpdateGoogleIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.GoogleIDP.Delete(context.Background(), "int-1") + require.NoError(t, err) + }) +} + +func TestGoogleIDP_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.GoogleIDP.Delete(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestGoogleIDP_Sync_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(api.SyncResult{Result: ptr("ok")}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Sync(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, "ok", *ret.Result) + }) +} + +func TestGoogleIDP_Sync_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Sync(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_GetLogs_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IdpIntegrationSyncLog{testSyncLog}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.GetLogs(context.Background(), "int-1") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testSyncLog, ret[0]) + }) +} + +func TestGoogleIDP_GetLogs_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.GetLogs(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/client/rest/okta_scim_idp.go b/shared/management/client/rest/okta_scim_idp.go new file mode 100644 index 000000000..eb677dae8 --- /dev/null +++ b/shared/management/client/rest/okta_scim_idp.go @@ -0,0 +1,112 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// OktaScimIDPAPI APIs for Okta SCIM IDP integrations +type OktaScimIDPAPI struct { + c *Client +} + +// List retrieves all Okta SCIM IDP integrations +func (a *OktaScimIDPAPI) List(ctx context.Context) ([]api.OktaScimIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/okta-scim-idp", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.OktaScimIntegration](resp) + return ret, err +} + +// Get retrieves a specific Okta SCIM IDP integration by ID +func (a *OktaScimIDPAPI) Get(ctx context.Context, integrationID string) (*api.OktaScimIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/okta-scim-idp/"+integrationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.OktaScimIntegration](resp) + return &ret, err +} + +// Create creates a new Okta SCIM IDP integration +func (a *OktaScimIDPAPI) Create(ctx context.Context, request api.CreateOktaScimIntegrationRequest) (*api.OktaScimIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/okta-scim-idp", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.OktaScimIntegration](resp) + return &ret, err +} + +// Update updates an existing Okta SCIM IDP integration +func (a *OktaScimIDPAPI) Update(ctx context.Context, integrationID string, request api.UpdateOktaScimIntegrationRequest) (*api.OktaScimIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/okta-scim-idp/"+integrationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.OktaScimIntegration](resp) + return &ret, err +} + +// Delete deletes an Okta SCIM IDP integration +func (a *OktaScimIDPAPI) Delete(ctx context.Context, integrationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/okta-scim-idp/"+integrationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// RegenerateToken regenerates the SCIM API token for an Okta SCIM integration +func (a *OktaScimIDPAPI) RegenerateToken(ctx context.Context, integrationID string) (*api.ScimTokenResponse, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/okta-scim-idp/"+integrationID+"/token", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.ScimTokenResponse](resp) + return &ret, err +} + +// GetLogs retrieves synchronization logs for an Okta SCIM IDP integration +func (a *OktaScimIDPAPI) GetLogs(ctx context.Context, integrationID string) ([]api.IdpIntegrationSyncLog, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/okta-scim-idp/"+integrationID+"/logs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdpIntegrationSyncLog](resp) + return ret, err +} diff --git a/shared/management/client/rest/okta_scim_idp_test.go b/shared/management/client/rest/okta_scim_idp_test.go new file mode 100644 index 000000000..d8d1f2b51 --- /dev/null +++ b/shared/management/client/rest/okta_scim_idp_test.go @@ -0,0 +1,246 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testOktaScimIntegration = api.OktaScimIntegration{ + Id: 1, + AuthToken: "****", + Enabled: true, + GroupPrefixes: []string{"eng-"}, + UserGroupPrefixes: []string{"dev-"}, + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), +} + +func TestOktaScimIDP_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.OktaScimIntegration{testOktaScimIntegration}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testOktaScimIntegration, ret[0]) + }) +} + +func TestOktaScimIDP_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestOktaScimIDP_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testOktaScimIntegration) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Get(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testOktaScimIntegration, *ret) + }) +} + +func TestOktaScimIDP_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Get(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateOktaScimIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "my-okta-connection", req.ConnectionName) + retBytes, _ := json.Marshal(testOktaScimIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Create(context.Background(), api.CreateOktaScimIntegrationRequest{ + ConnectionName: "my-okta-connection", + GroupPrefixes: &[]string{"eng-"}, + }) + require.NoError(t, err) + assert.Equal(t, testOktaScimIntegration, *ret) + }) +} + +func TestOktaScimIDP_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Create(context.Background(), api.CreateOktaScimIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateOktaScimIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, *req.Enabled) + retBytes, _ := json.Marshal(testOktaScimIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Update(context.Background(), "int-1", api.UpdateOktaScimIntegrationRequest{ + Enabled: ptr(true), + }) + require.NoError(t, err) + assert.Equal(t, testOktaScimIntegration, *ret) + }) +} + +func TestOktaScimIDP_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Update(context.Background(), "int-1", api.UpdateOktaScimIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.OktaScimIDP.Delete(context.Background(), "int-1") + require.NoError(t, err) + }) +} + +func TestOktaScimIDP_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.OktaScimIDP.Delete(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestOktaScimIDP_RegenerateToken_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/token", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testScimToken) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.RegenerateToken(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testScimToken, *ret) + }) +} + +func TestOktaScimIDP_RegenerateToken_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/token", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.RegenerateToken(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_GetLogs_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IdpIntegrationSyncLog{testSyncLog}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.GetLogs(context.Background(), "int-1") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testSyncLog, ret[0]) + }) +} + +func TestOktaScimIDP_GetLogs_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.GetLogs(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 519d3ca12..833468676 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -68,8 +68,17 @@ tags: - name: MSP description: MSP portal for Tenant management. x-cloud-only: true - - name: IDP - description: Manage identity provider integrations for user and group sync. + - name: IDP SCIM Integrations + description: Manage generic SCIM identity provider integrations for user and group sync. + x-cloud-only: true + - name: IDP Google Integrations + description: Manage Google Workspace identity provider integrations for user and group sync. + x-cloud-only: true + - name: IDP Azure Integrations + description: Manage Azure AD identity provider integrations for user and group sync. + x-cloud-only: true + - name: IDP Okta SCIM Integrations + description: Manage Okta SCIM identity provider integrations for user and group sync. x-cloud-only: true - name: EDR Intune Integrations description: Manage Microsoft Intune EDR integrations. @@ -4267,96 +4276,89 @@ components: description: Status of agent firewall. Can be one of Disabled, Enabled, Pending Isolation, Isolated, Pending Release. example: "Enabled" + IntegrationSyncFilters: + type: object + properties: + group_prefixes: + type: array + description: List of start_with string patterns for groups to sync + items: + type: string + example: [ "Engineering", "Sales" ] + user_group_prefixes: + type: array + description: List of start_with string patterns for groups which users to sync + items: + type: string + example: [ "Users" ] + IntegrationEnabled: + type: object + properties: + enabled: + type: boolean + description: Whether the integration is enabled + example: true CreateScimIntegrationRequest: - type: object - description: Request payload for creating an SCIM IDP integration - required: - - prefix - - provider - properties: - prefix: - type: string - description: The connection prefix used for the SCIM provider - provider: - type: string - description: Name of the SCIM identity provider - group_prefixes: - type: array - description: List of start_with string patterns for groups to sync - items: - type: string - example: [ "Engineering", "Sales" ] - user_group_prefixes: - type: array - description: List of start_with string patterns for groups which users to sync - items: - type: string - example: [ "Users" ] + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating an SCIM IDP integration + required: + - prefix + - provider + properties: + prefix: + type: string + description: The connection prefix used for the SCIM provider + provider: + type: string + description: Name of the SCIM identity provider UpdateScimIntegrationRequest: - type: object - description: Request payload for updating an SCIM IDP integration - properties: - enabled: - type: boolean - description: Indicates whether the integration is enabled - example: true - group_prefixes: - type: array - description: List of start_with string patterns for groups to sync - items: - type: string - example: [ "Engineering", "Sales" ] - user_group_prefixes: - type: array - description: List of start_with string patterns for groups which users to sync - items: - type: string - example: [ "Users" ] + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating an SCIM IDP integration + properties: + prefix: + type: string + description: The connection prefix used for the SCIM provider ScimIntegration: - type: object - description: Represents a SCIM IDP integration - required: - - id - - enabled - - provider - - group_prefixes - - user_group_prefixes - - auth_token - - last_synced_at - properties: - id: - type: integer - format: int64 - description: The unique identifier for the integration - example: 123 - enabled: - type: boolean - description: Indicates whether the integration is enabled - example: true - provider: - type: string - description: Name of the SCIM identity provider - group_prefixes: - type: array - description: List of start_with string patterns for groups to sync - items: - type: string - example: [ "Engineering", "Sales" ] - user_group_prefixes: - type: array - description: List of start_with string patterns for groups which users to sync - items: - type: string - example: [ "Users" ] - auth_token: - type: string - description: SCIM API token (full on creation, masked otherwise) - example: "nbs_abc***********************************" - last_synced_at: - type: string - format: date-time - description: Timestamp of when the integration was last synced - example: "2023-05-15T10:30:00Z" + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents a SCIM IDP integration + required: + - id + - enabled + - prefix + - provider + - group_prefixes + - user_group_prefixes + - auth_token + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 123 + prefix: + type: string + description: The connection prefix used for the SCIM provider + provider: + type: string + description: Name of the SCIM identity provider + auth_token: + type: string + description: SCIM API token (full on creation, masked otherwise) + example: "nbs_abc***********************************" + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced + example: "2023-05-15T10:30:00Z" IdpIntegrationSyncLog: type: object description: Represents a synchronization log entry for an integration @@ -4394,6 +4396,229 @@ components: type: string description: The newly generated SCIM API token example: "nbs_F3f0d..." + CreateGoogleIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating a Google Workspace IDP integration + required: + - service_account_key + - customer_id + properties: + service_account_key: + type: string + description: Base64-encoded Google service account key + example: "eyJ0eXBlIjoic2VydmljZV9hY2NvdW50Ii..." + customer_id: + type: string + description: Customer ID from Google Workspace Account Settings + example: "C01234567" + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + minimum: 300 + example: 300 + UpdateGoogleIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating a Google Workspace IDP integration. All fields are optional. + properties: + service_account_key: + type: string + description: Base64-encoded Google service account key + customer_id: + type: string + description: Customer ID from Google Workspace Account Settings + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300) + minimum: 300 + GoogleIntegration: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents a Google Workspace IDP integration + required: + - id + - customer_id + - sync_interval + - enabled + - group_prefixes + - user_group_prefixes + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 1 + customer_id: + type: string + description: Customer ID from Google Workspace + example: "C01234567" + sync_interval: + type: integer + description: Sync interval in seconds + example: 300 + last_synced_at: + type: string + format: date-time + description: Timestamp of the last synchronization + example: "2023-05-15T10:30:00Z" + CreateAzureIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating an Azure AD IDP integration + required: + - client_secret + - client_id + - tenant_id + - host + properties: + client_secret: + type: string + description: Base64-encoded Azure AD client secret + example: "c2VjcmV0..." + client_id: + type: string + description: Azure AD application (client) ID + example: "12345678-1234-1234-1234-123456789012" + tenant_id: + type: string + description: Azure AD tenant ID + example: "87654321-4321-4321-4321-210987654321" + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + minimum: 300 + example: 300 + host: + type: string + description: Azure host domain for the Graph API + enum: + - microsoft.com + - microsoft.us + example: "microsoft.com" + UpdateAzureIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating an Azure AD IDP integration. All fields are optional. + properties: + client_secret: + type: string + description: Base64-encoded Azure AD client secret + client_id: + type: string + description: Azure AD application (client) ID + tenant_id: + type: string + description: Azure AD tenant ID + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300) + minimum: 300 + AzureIntegration: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents an Azure AD IDP integration + required: + - id + - client_id + - tenant_id + - sync_interval + - enabled + - group_prefixes + - user_group_prefixes + - host + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 1 + client_id: + type: string + description: Azure AD application (client) ID + example: "12345678-1234-1234-1234-123456789012" + tenant_id: + type: string + description: Azure AD tenant ID + example: "87654321-4321-4321-4321-210987654321" + sync_interval: + type: integer + description: Sync interval in seconds + example: 300 + host: + type: string + description: Azure host domain for the Graph API + example: "microsoft.com" + last_synced_at: + type: string + format: date-time + description: Timestamp of the last synchronization + example: "2023-05-15T10:30:00Z" + CreateOktaScimIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating an Okta SCIM IDP integration + required: + - connection_name + properties: + connection_name: + type: string + description: The Okta enterprise connection name on Auth0 + example: "my-okta-connection" + UpdateOktaScimIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating an Okta SCIM IDP integration. All fields are optional. + OktaScimIntegration: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents an Okta SCIM IDP integration + required: + - id + - enabled + - group_prefixes + - user_group_prefixes + - auth_token + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 1 + auth_token: + type: string + description: SCIM API token (full on creation/regeneration, masked on retrieval) + example: "nbs_abc***********************************" + last_synced_at: + type: string + format: date-time + description: Timestamp of the last synchronization + example: "2023-05-15T10:30:00Z" + SyncResult: + type: object + description: Response for a manual sync trigger + properties: + result: + type: string + example: "ok" NotificationChannelType: type: string description: The type of notification channel. @@ -9147,10 +9372,877 @@ paths: schema: $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp: + post: + tags: + - IDP Google Integrations + summary: Create Google IDP Integration + description: Creates a new Google Workspace IDP integration + operationId: createGoogleIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateGoogleIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/GoogleIntegration' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - IDP Google Integrations + summary: Get All Google IDP Integrations + description: Retrieves all Google Workspace IDP integrations for the authenticated account + operationId: getAllGoogleIntegrations + responses: + '200': + description: A list of Google IDP integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/GoogleIntegration' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp/{id}: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Google IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Google Integrations + summary: Get Google IDP Integration + description: Retrieves a Google IDP integration by ID. + operationId: getGoogleIntegration + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/GoogleIntegration' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - IDP Google Integrations + summary: Update Google IDP Integration + description: Updates an existing Google Workspace IDP integration. + operationId: updateGoogleIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateGoogleIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/GoogleIntegration' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - IDP Google Integrations + summary: Delete Google IDP Integration + description: Deletes a Google IDP integration by ID. + operationId: deleteGoogleIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp/{id}/sync: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Google IDP integration. + schema: + type: integer + format: int64 + example: 1 + post: + tags: + - IDP Google Integrations + summary: Sync Google IDP Integration + description: Triggers a manual synchronization for a Google IDP integration. + operationId: syncGoogleIntegration + responses: + '200': + description: Sync triggered successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/SyncResult' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp/{id}/logs: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Google IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Google Integrations + summary: Get Google Integration Sync Logs + description: Retrieves synchronization logs for a Google IDP integration. + operationId: getGoogleIntegrationLogs + responses: + '200': + description: Successfully retrieved the integration sync logs. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdpIntegrationSyncLog' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp: + post: + tags: + - IDP Azure Integrations + summary: Create Azure IDP Integration + description: Creates a new Azure AD IDP integration + operationId: createAzureIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAzureIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/AzureIntegration' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - IDP Azure Integrations + summary: Get All Azure IDP Integrations + description: Retrieves all Azure AD IDP integrations for the authenticated account + operationId: getAllAzureIntegrations + responses: + '200': + description: A list of Azure IDP integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/AzureIntegration' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp/{id}: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Azure IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Azure Integrations + summary: Get Azure IDP Integration + description: Retrieves an Azure IDP integration by ID. + operationId: getAzureIntegration + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/AzureIntegration' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - IDP Azure Integrations + summary: Update Azure IDP Integration + description: Updates an existing Azure AD IDP integration. + operationId: updateAzureIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateAzureIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/AzureIntegration' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - IDP Azure Integrations + summary: Delete Azure IDP Integration + description: Deletes an Azure IDP integration by ID. + operationId: deleteAzureIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp/{id}/sync: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Azure IDP integration. + schema: + type: integer + format: int64 + example: 1 + post: + tags: + - IDP Azure Integrations + summary: Sync Azure IDP Integration + description: Triggers a manual synchronization for an Azure IDP integration. + operationId: syncAzureIntegration + responses: + '200': + description: Sync triggered successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/SyncResult' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp/{id}/logs: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Azure IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Azure Integrations + summary: Get Azure Integration Sync Logs + description: Retrieves synchronization logs for an Azure IDP integration. + operationId: getAzureIntegrationLogs + responses: + '200': + description: Successfully retrieved the integration sync logs. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdpIntegrationSyncLog' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp: + post: + tags: + - IDP Okta SCIM Integrations + summary: Create Okta SCIM IDP Integration + description: Creates a new Okta SCIM IDP integration + operationId: createOktaScimIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateOktaScimIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/OktaScimIntegration' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - IDP Okta SCIM Integrations + summary: Get All Okta SCIM IDP Integrations + description: Retrieves all Okta SCIM IDP integrations for the authenticated account + operationId: getAllOktaScimIntegrations + responses: + '200': + description: A list of Okta SCIM IDP integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/OktaScimIntegration' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp/{id}: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Okta SCIM IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Okta SCIM Integrations + summary: Get Okta SCIM IDP Integration + description: Retrieves an Okta SCIM IDP integration by ID. + operationId: getOktaScimIntegration + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/OktaScimIntegration' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - IDP Okta SCIM Integrations + summary: Update Okta SCIM IDP Integration + description: Updates an existing Okta SCIM IDP integration. + operationId: updateOktaScimIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateOktaScimIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/OktaScimIntegration' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - IDP Okta SCIM Integrations + summary: Delete Okta SCIM IDP Integration + description: Deletes an Okta SCIM IDP integration by ID. + operationId: deleteOktaScimIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp/{id}/token: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Okta SCIM IDP integration. + schema: + type: integer + format: int64 + example: 1 + post: + tags: + - IDP Okta SCIM Integrations + summary: Regenerate Okta SCIM Token + description: Regenerates the SCIM API token for an Okta SCIM IDP integration. + operationId: regenerateOktaScimToken + responses: + '200': + description: Token regenerated successfully. Returns the new token. + content: + application/json: + schema: + $ref: '#/components/schemas/ScimTokenResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp/{id}/logs: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Okta SCIM IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Okta SCIM Integrations + summary: Get Okta SCIM Integration Sync Logs + description: Retrieves synchronization logs for an Okta SCIM IDP integration. + operationId: getOktaScimIntegrationLogs + responses: + '200': + description: Successfully retrieved the integration sync logs. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdpIntegrationSyncLog' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' /api/integrations/scim-idp: post: tags: - - IDP + - IDP SCIM Integrations summary: Create SCIM IDP Integration description: Creates a new SCIM integration operationId: createSCIMIntegration @@ -9187,7 +10279,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' get: tags: - - IDP + - IDP SCIM Integrations summary: Get All SCIM IDP Integrations description: Retrieves all SCIM IDP integrations for the authenticated account operationId: getAllSCIMIntegrations @@ -9219,11 +10311,12 @@ paths: required: true description: The unique identifier of the SCIM IDP integration. schema: - type: string - example: "ch8i4ug6lnn4g9hqv7m0" + type: integer + format: int64 + example: 1 get: tags: - - IDP + - IDP SCIM Integrations summary: Get SCIM IDP Integration description: Retrieves an SCIM IDP integration by ID. operationId: getSCIMIntegration @@ -9260,7 +10353,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' put: tags: - - IDP + - IDP SCIM Integrations summary: Update SCIM IDP Integration description: Updates an existing SCIM IDP Integration. operationId: updateSCIMIntegration @@ -9303,7 +10396,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' delete: tags: - - IDP + - IDP SCIM Integrations summary: Delete SCIM IDP Integration description: Deletes an SCIM IDP integration by ID. operationId: deleteSCIMIntegration @@ -9346,11 +10439,12 @@ paths: required: true description: The unique identifier of the SCIM IDP integration. schema: - type: string - example: "ch8i4ug6lnn4g9hqv7m0" + type: integer + format: int64 + example: 1 post: tags: - - IDP + - IDP SCIM Integrations summary: Regenerate SCIM Token description: Regenerates the SCIM API token for an SCIM IDP integration. operationId: regenerateSCIMToken @@ -9392,11 +10486,12 @@ paths: required: true description: The unique identifier of the SCIM IDP integration. schema: - type: string - example: "ch8i4ug6lnn4g9hqv7m0" + type: integer + format: int64 + example: 1 get: tags: - - IDP + - IDP SCIM Integrations summary: Get SCIM Integration Sync Logs description: Retrieves synchronization logs for a SCIM IDP integration. operationId: getSCIMIntegrationLogs diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 84ee125b1..fb9976c89 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -17,6 +17,24 @@ const ( TokenAuthScopes = "TokenAuth.Scopes" ) +// Defines values for CreateAzureIntegrationRequestHost. +const ( + CreateAzureIntegrationRequestHostMicrosoftCom CreateAzureIntegrationRequestHost = "microsoft.com" + CreateAzureIntegrationRequestHostMicrosoftUs CreateAzureIntegrationRequestHost = "microsoft.us" +) + +// Valid indicates whether the value is a known member of the CreateAzureIntegrationRequestHost enum. +func (e CreateAzureIntegrationRequestHost) Valid() bool { + switch e { + case CreateAzureIntegrationRequestHostMicrosoftCom: + return true + case CreateAzureIntegrationRequestHostMicrosoftUs: + return true + default: + return false + } +} + // Defines values for CreateIntegrationRequestPlatform. const ( CreateIntegrationRequestPlatformDatadog CreateIntegrationRequestPlatform = "datadog" @@ -1469,6 +1487,36 @@ type AvailablePorts struct { Udp int `json:"udp"` } +// AzureIntegration defines model for AzureIntegration. +type AzureIntegration struct { + // ClientId Azure AD application (client) ID + ClientId string `json:"client_id"` + + // Enabled Whether the integration is enabled + Enabled bool `json:"enabled"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes []string `json:"group_prefixes"` + + // Host Azure host domain for the Graph API + Host string `json:"host"` + + // Id The unique identifier for the integration + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of the last synchronization + LastSyncedAt time.Time `json:"last_synced_at"` + + // SyncInterval Sync interval in seconds + SyncInterval int `json:"sync_interval"` + + // TenantId Azure AD tenant ID + TenantId string `json:"tenant_id"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes []string `json:"user_group_prefixes"` +} + // BearerAuthConfig defines model for BearerAuthConfig. type BearerAuthConfig struct { // DistributionGroups List of group IDs that can use bearer auth @@ -1576,6 +1624,51 @@ type Country struct { // CountryCode 2-letter ISO 3166-1 alpha-2 code that represents the country type CountryCode = string +// CreateAzureIntegrationRequest defines model for CreateAzureIntegrationRequest. +type CreateAzureIntegrationRequest struct { + // ClientId Azure AD application (client) ID + ClientId string `json:"client_id"` + + // ClientSecret Base64-encoded Azure AD client secret + ClientSecret string `json:"client_secret"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // Host Azure host domain for the Graph API + Host CreateAzureIntegrationRequestHost `json:"host"` + + // SyncInterval Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + SyncInterval *int `json:"sync_interval,omitempty"` + + // TenantId Azure AD tenant ID + TenantId string `json:"tenant_id"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// CreateAzureIntegrationRequestHost Azure host domain for the Graph API +type CreateAzureIntegrationRequestHost string + +// CreateGoogleIntegrationRequest defines model for CreateGoogleIntegrationRequest. +type CreateGoogleIntegrationRequest struct { + // CustomerId Customer ID from Google Workspace Account Settings + CustomerId string `json:"customer_id"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // ServiceAccountKey Base64-encoded Google service account key + ServiceAccountKey string `json:"service_account_key"` + + // SyncInterval Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + SyncInterval *int `json:"sync_interval,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + // CreateIntegrationRequest Request payload for creating a new event streaming integration. Also used as the structure for the PUT request body, but not all fields are applicable for updates (see PUT operation description). type CreateIntegrationRequest struct { // Config Platform-specific configuration as key-value pairs. For creation, all necessary credentials and settings must be provided. For updates, provide the fields to change or the entire new configuration. @@ -1591,7 +1684,19 @@ type CreateIntegrationRequest struct { // CreateIntegrationRequestPlatform The event streaming platform to integrate with (e.g., "datadog", "s3", "firehose"). This field is used for creation. For updates (PUT), this field, if sent, is ignored by the backend. type CreateIntegrationRequestPlatform string -// CreateScimIntegrationRequest Request payload for creating an SCIM IDP integration +// CreateOktaScimIntegrationRequest defines model for CreateOktaScimIntegrationRequest. +type CreateOktaScimIntegrationRequest struct { + // ConnectionName The Okta enterprise connection name on Auth0 + ConnectionName string `json:"connection_name"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// CreateScimIntegrationRequest defines model for CreateScimIntegrationRequest. type CreateScimIntegrationRequest struct { // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` @@ -1972,6 +2077,30 @@ type GeoLocationCheckAction string // GetTenantsResponse defines model for GetTenantsResponse. type GetTenantsResponse = []TenantResponse +// GoogleIntegration defines model for GoogleIntegration. +type GoogleIntegration struct { + // CustomerId Customer ID from Google Workspace + CustomerId string `json:"customer_id"` + + // Enabled Whether the integration is enabled + Enabled bool `json:"enabled"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes []string `json:"group_prefixes"` + + // Id The unique identifier for the integration + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of the last synchronization + LastSyncedAt time.Time `json:"last_synced_at"` + + // SyncInterval Sync interval in seconds + SyncInterval int `json:"sync_interval"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes []string `json:"user_group_prefixes"` +} + // Group defines model for Group. type Group struct { // Id Group ID @@ -2263,6 +2392,12 @@ type InstanceVersionInfo struct { ManagementUpdateAvailable bool `json:"management_update_available"` } +// IntegrationEnabled defines model for IntegrationEnabled. +type IntegrationEnabled struct { + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` +} + // IntegrationResponse Represents an event streaming integration. type IntegrationResponse struct { // AccountId The identifier of the account this integration belongs to. @@ -2290,6 +2425,15 @@ type IntegrationResponse struct { // IntegrationResponsePlatform The event streaming platform. type IntegrationResponsePlatform string +// IntegrationSyncFilters defines model for IntegrationSyncFilters. +type IntegrationSyncFilters struct { + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + // InvoicePDFResponse defines model for InvoicePDFResponse. type InvoicePDFResponse struct { // Url URL to redirect the user to invoice. @@ -2770,6 +2914,27 @@ type OSVersionCheck struct { Windows *MinKernelVersionCheck `json:"windows,omitempty"` } +// OktaScimIntegration defines model for OktaScimIntegration. +type OktaScimIntegration struct { + // AuthToken SCIM API token (full on creation/regeneration, masked on retrieval) + AuthToken string `json:"auth_token"` + + // Enabled Whether the integration is enabled + Enabled bool `json:"enabled"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes []string `json:"group_prefixes"` + + // Id The unique identifier for the integration + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of the last synchronization + LastSyncedAt time.Time `json:"last_synced_at"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes []string `json:"user_group_prefixes"` +} + // PINAuthConfig defines model for PINAuthConfig. type PINAuthConfig struct { // Enabled Whether PIN auth is enabled @@ -3619,12 +3784,12 @@ type RulePortRange struct { Start int `json:"start"` } -// ScimIntegration Represents a SCIM IDP integration +// ScimIntegration defines model for ScimIntegration. type ScimIntegration struct { // AuthToken SCIM API token (full on creation, masked otherwise) AuthToken string `json:"auth_token"` - // Enabled Indicates whether the integration is enabled + // Enabled Whether the integration is enabled Enabled bool `json:"enabled"` // GroupPrefixes List of start_with string patterns for groups to sync @@ -3636,6 +3801,9 @@ type ScimIntegration struct { // LastSyncedAt Timestamp of when the integration was last synced LastSyncedAt time.Time `json:"last_synced_at"` + // Prefix The connection prefix used for the SCIM provider + Prefix string `json:"prefix"` + // Provider Name of the SCIM identity provider Provider string `json:"provider"` @@ -4040,6 +4208,11 @@ type Subscription struct { UpdatedAt time.Time `json:"updated_at"` } +// SyncResult Response for a manual sync trigger +type SyncResult struct { + Result *string `json:"result,omitempty"` +} + // TenantGroupResponse defines model for TenantGroupResponse. type TenantGroupResponse struct { // Id The Group ID @@ -4085,14 +4258,74 @@ type TenantResponse struct { // TenantResponseStatus The status of the tenant type TenantResponseStatus string -// UpdateScimIntegrationRequest Request payload for updating an SCIM IDP integration -type UpdateScimIntegrationRequest struct { - // Enabled Indicates whether the integration is enabled +// UpdateAzureIntegrationRequest defines model for UpdateAzureIntegrationRequest. +type UpdateAzureIntegrationRequest struct { + // ClientId Azure AD application (client) ID + ClientId *string `json:"client_id,omitempty"` + + // ClientSecret Base64-encoded Azure AD client secret + ClientSecret *string `json:"client_secret,omitempty"` + + // Enabled Whether the integration is enabled Enabled *bool `json:"enabled,omitempty"` // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + // SyncInterval Sync interval in seconds (minimum 300) + SyncInterval *int `json:"sync_interval,omitempty"` + + // TenantId Azure AD tenant ID + TenantId *string `json:"tenant_id,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// UpdateGoogleIntegrationRequest defines model for UpdateGoogleIntegrationRequest. +type UpdateGoogleIntegrationRequest struct { + // CustomerId Customer ID from Google Workspace Account Settings + CustomerId *string `json:"customer_id,omitempty"` + + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // ServiceAccountKey Base64-encoded Google service account key + ServiceAccountKey *string `json:"service_account_key,omitempty"` + + // SyncInterval Sync interval in seconds (minimum 300) + SyncInterval *int `json:"sync_interval,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// UpdateOktaScimIntegrationRequest defines model for UpdateOktaScimIntegrationRequest. +type UpdateOktaScimIntegrationRequest struct { + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// UpdateScimIntegrationRequest defines model for UpdateScimIntegrationRequest. +type UpdateScimIntegrationRequest struct { + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // Prefix The connection prefix used for the SCIM provider + Prefix *string `json:"prefix,omitempty"` + // UserGroupPrefixes List of start_with string patterns for groups which users to sync UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` } @@ -4612,6 +4845,12 @@ type PostApiIngressPeersJSONRequestBody = IngressPeerCreateRequest // PutApiIngressPeersIngressPeerIdJSONRequestBody defines body for PutApiIngressPeersIngressPeerId for application/json ContentType. type PutApiIngressPeersIngressPeerIdJSONRequestBody = IngressPeerUpdateRequest +// CreateAzureIntegrationJSONRequestBody defines body for CreateAzureIntegration for application/json ContentType. +type CreateAzureIntegrationJSONRequestBody = CreateAzureIntegrationRequest + +// UpdateAzureIntegrationJSONRequestBody defines body for UpdateAzureIntegration for application/json ContentType. +type UpdateAzureIntegrationJSONRequestBody = UpdateAzureIntegrationRequest + // PostApiIntegrationsBillingAwsMarketplaceActivateJSONRequestBody defines body for PostApiIntegrationsBillingAwsMarketplaceActivate for application/json ContentType. type PostApiIntegrationsBillingAwsMarketplaceActivateJSONRequestBody PostApiIntegrationsBillingAwsMarketplaceActivateJSONBody @@ -4648,6 +4887,12 @@ type CreateSentinelOneEDRIntegrationJSONRequestBody = EDRSentinelOneRequest // UpdateSentinelOneEDRIntegrationJSONRequestBody defines body for UpdateSentinelOneEDRIntegration for application/json ContentType. type UpdateSentinelOneEDRIntegrationJSONRequestBody = EDRSentinelOneRequest +// CreateGoogleIntegrationJSONRequestBody defines body for CreateGoogleIntegration for application/json ContentType. +type CreateGoogleIntegrationJSONRequestBody = CreateGoogleIntegrationRequest + +// UpdateGoogleIntegrationJSONRequestBody defines body for UpdateGoogleIntegration for application/json ContentType. +type UpdateGoogleIntegrationJSONRequestBody = UpdateGoogleIntegrationRequest + // PostApiIntegrationsMspTenantsJSONRequestBody defines body for PostApiIntegrationsMspTenants for application/json ContentType. type PostApiIntegrationsMspTenantsJSONRequestBody = CreateTenantRequest @@ -4669,6 +4914,12 @@ type CreateNotificationChannelJSONRequestBody = NotificationChannelRequest // UpdateNotificationChannelJSONRequestBody defines body for UpdateNotificationChannel for application/json ContentType. type UpdateNotificationChannelJSONRequestBody = NotificationChannelRequest +// CreateOktaScimIntegrationJSONRequestBody defines body for CreateOktaScimIntegration for application/json ContentType. +type CreateOktaScimIntegrationJSONRequestBody = CreateOktaScimIntegrationRequest + +// UpdateOktaScimIntegrationJSONRequestBody defines body for UpdateOktaScimIntegration for application/json ContentType. +type UpdateOktaScimIntegrationJSONRequestBody = UpdateOktaScimIntegrationRequest + // CreateSCIMIntegrationJSONRequestBody defines body for CreateSCIMIntegration for application/json ContentType. type CreateSCIMIntegrationJSONRequestBody = CreateScimIntegrationRequest From 13807f1b3d75d06b2b513da5b7a11cfe09363fd7 Mon Sep 17 00:00:00 2001 From: tobsec <8700196+tobsec@users.noreply.github.com> Date: Mon, 30 Mar 2026 10:41:38 +0200 Subject: [PATCH 249/374] [client] Fix Exit Node submenu separator accumulation on Windows (#5691) * client/ui: fix Exit Node submenu separator accumulation on Windows On Windows the tray uses a background poller (every 10s) instead of TrayOpenedCh to keep the Exit Node menu fresh. Each poll that has a selected exit node called s.mExitNode.AddSeparator() before the "Deselect All" item. Because AddSeparator() returns no handle the separator was never removed in the cleanup pass of recreateExitNodeMenu(), while every other item (exit node checkboxes and the "Deselect All" entry) was properly tracked and removed. After the client has been running for a while with an exit node selected this leaves hundreds of separator lines stacked in the submenu, filling the screen height with blank entries (#4702). On Linux/FreeBSD this is masked because the parent mExitNode item itself is removed and recreated each cycle, wiping all children including orphaned separators. Fix: replace the untracked AddSeparator() call with a regular disabled sub-menu item that is stored in mExitNodeSeparator and removed at the start of each recreateExitNodeMenu() call alongside mExitNodeDeselectAll. Fixes #4702 * client/ui: extract addExitNodeDeselectAll to reduce cognitive complexity Move the separator + deselect-all creation and its goroutine listener out of recreateExitNodeMenu into a dedicated helper, bringing the function's cognitive complexity back under the SonarCloud threshold. --- client/ui/client_ui.go | 1 + client/ui/network.go | 52 +++++++++++++++++++++++++----------------- 2 files changed, 32 insertions(+), 21 deletions(-) diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 0574e53d0..b1e0aec41 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -324,6 +324,7 @@ type serviceClient struct { exitNodeMu sync.Mutex mExitNodeItems []menuHandler exitNodeRetryCancel context.CancelFunc + mExitNodeSeparator *systray.MenuItem mExitNodeDeselectAll *systray.MenuItem logFile string wLoginURL fyne.Window diff --git a/client/ui/network.go b/client/ui/network.go index ed03f5ada..571e871bb 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -421,6 +421,10 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { node.Remove() } s.mExitNodeItems = nil + if s.mExitNodeSeparator != nil { + s.mExitNodeSeparator.Remove() + s.mExitNodeSeparator = nil + } if s.mExitNodeDeselectAll != nil { s.mExitNodeDeselectAll.Remove() s.mExitNodeDeselectAll = nil @@ -453,31 +457,37 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { } if showDeselectAll { - s.mExitNode.AddSeparator() - deselectAllItem := s.mExitNode.AddSubMenuItem("Deselect All", "Deselect All") - s.mExitNodeDeselectAll = deselectAllItem - go func() { - for { - _, ok := <-deselectAllItem.ClickedCh - if !ok { - // channel closed: exit the goroutine - return - } - exitNodes, err := s.handleExitNodeMenuDeselectAll() - if err != nil { - log.Warnf("failed to handle deselect all exit nodes: %v", err) - } else { - s.exitNodeMu.Lock() - s.recreateExitNodeMenu(exitNodes) - s.exitNodeMu.Unlock() - } - } - - }() + s.addExitNodeDeselectAll() } } +func (s *serviceClient) addExitNodeDeselectAll() { + sep := s.mExitNode.AddSubMenuItem("───────────────", "") + sep.Disable() + s.mExitNodeSeparator = sep + + deselectAllItem := s.mExitNode.AddSubMenuItem("Deselect All", "Deselect All") + s.mExitNodeDeselectAll = deselectAllItem + + go func() { + for { + _, ok := <-deselectAllItem.ClickedCh + if !ok { + return + } + exitNodes, err := s.handleExitNodeMenuDeselectAll() + if err != nil { + log.Warnf("failed to handle deselect all exit nodes: %v", err) + } else { + s.exitNodeMu.Lock() + s.recreateExitNodeMenu(exitNodes) + s.exitNodeMu.Unlock() + } + } + }() +} + func (s *serviceClient) getExitNodes(conn proto.DaemonServiceClient) ([]*proto.Network, error) { ctx, cancel := context.WithTimeout(s.ctx, defaultFailTimeout) defer cancel() From 0765352c99a0aaaf051e0178a3b9e7012647f1d6 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 30 Mar 2026 19:03:42 +0800 Subject: [PATCH 250/374] [management] Persist proxy capabilities to database (#5720) --- .../reverseproxy/domain/manager/manager.go | 31 ++--- .../modules/reverseproxy/proxy/manager.go | 6 +- .../reverseproxy/proxy/manager/controller.go | 11 -- .../reverseproxy/proxy/manager/manager.go | 24 +++- .../reverseproxy/proxy/manager_mock.go | 66 ++++----- .../modules/reverseproxy/proxy/proxy.go | 12 ++ .../service/manager/l4_port_test.go | 7 +- .../reverseproxy/service/manager/manager.go | 30 +++-- .../service/manager/manager_test.go | 8 +- management/internals/server/modules.go | 5 +- management/internals/shared/grpc/proxy.go | 127 ++++++++---------- .../internals/shared/grpc/proxy_test.go | 67 +++++---- management/server/account_test.go | 2 +- .../testing/testing_tools/channel/channel.go | 6 +- management/server/store/sql_store.go | 61 ++++++++- management/server/store/store.go | 2 + management/server/store/store_mock.go | 28 ++++ proxy/management_integration_test.go | 18 +-- 18 files changed, 304 insertions(+), 207 deletions(-) diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index 901cdf0e3..c6c41bfe5 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -31,19 +31,15 @@ type store interface { type proxyManager interface { GetActiveClusterAddresses(ctx context.Context) ([]string, error) -} - -type clusterCapabilities interface { - ClusterSupportsCustomPorts(clusterAddr string) *bool - ClusterRequireSubdomain(clusterAddr string) *bool + ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool } type Manager struct { - store store - validator domain.Validator - proxyManager proxyManager - clusterCapabilities clusterCapabilities - permissionsManager permissions.Manager + store store + validator domain.Validator + proxyManager proxyManager + permissionsManager permissions.Manager accountManager account.Manager } @@ -57,11 +53,6 @@ func NewManager(store store, proxyMgr proxyManager, permissionsManager permissio } } -// SetClusterCapabilities sets the cluster capabilities provider for domain queries. -func (m *Manager) SetClusterCapabilities(caps clusterCapabilities) { - m.clusterCapabilities = caps -} - func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*domain.Domain, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) if err != nil { @@ -97,10 +88,8 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d Type: domain.TypeFree, Validated: true, } - if m.clusterCapabilities != nil { - d.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(cluster) - d.RequireSubdomain = m.clusterCapabilities.ClusterRequireSubdomain(cluster) - } + d.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, cluster) + d.RequireSubdomain = m.proxyManager.ClusterRequireSubdomain(ctx, cluster) ret = append(ret, d) } @@ -114,8 +103,8 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d Type: domain.TypeCustom, Validated: d.Validated, } - if m.clusterCapabilities != nil && d.TargetCluster != "" { - cd.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(d.TargetCluster) + if d.TargetCluster != "" { + cd.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, d.TargetCluster) } // Custom domains never require a subdomain by default since // the account owns them and should be able to use the bare domain. diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go index 9b0de53b4..0368b84de 100644 --- a/management/internals/modules/reverseproxy/proxy/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -11,11 +11,13 @@ import ( // Manager defines the interface for proxy operations type Manager interface { - Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error + Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, capabilities *Capabilities) error Disconnect(ctx context.Context, proxyID string) error Heartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error GetActiveClusterAddresses(ctx context.Context) ([]string, error) GetActiveClusters(ctx context.Context) ([]Cluster, error) + ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool CleanupStale(ctx context.Context, inactivityDuration time.Duration) error } @@ -34,6 +36,4 @@ type Controller interface { RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error GetProxiesForCluster(clusterAddr string) []string - ClusterSupportsCustomPorts(clusterAddr string) *bool - ClusterRequireSubdomain(clusterAddr string) *bool } diff --git a/management/internals/modules/reverseproxy/proxy/manager/controller.go b/management/internals/modules/reverseproxy/proxy/manager/controller.go index 05a0c9048..e5b3e9886 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/controller.go +++ b/management/internals/modules/reverseproxy/proxy/manager/controller.go @@ -72,17 +72,6 @@ func (c *GRPCController) UnregisterProxyFromCluster(ctx context.Context, cluster return nil } -// ClusterSupportsCustomPorts returns whether any proxy in the cluster supports custom ports. -func (c *GRPCController) ClusterSupportsCustomPorts(clusterAddr string) *bool { - return c.proxyGRPCServer.ClusterSupportsCustomPorts(clusterAddr) -} - -// ClusterRequireSubdomain returns whether the cluster requires a subdomain label. -// Returns nil when no proxy has reported the capability (defaults to false). -func (c *GRPCController) ClusterRequireSubdomain(clusterAddr string) *bool { - return c.proxyGRPCServer.ClusterRequireSubdomain(clusterAddr) -} - // GetProxiesForCluster returns all proxy IDs registered for a specific cluster. func (c *GRPCController) GetProxiesForCluster(clusterAddr string) []string { proxySet, ok := c.clusterProxies.Load(clusterAddr) diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager.go b/management/internals/modules/reverseproxy/proxy/manager/manager.go index dac6d3ce3..a92fffab9 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager/manager.go @@ -16,6 +16,8 @@ type store interface { UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) + GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error } @@ -38,9 +40,14 @@ func NewManager(store store, meter metric.Meter) (*Manager, error) { }, nil } -// Connect registers a new proxy connection in the database -func (m Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { +// Connect registers a new proxy connection in the database. +// capabilities may be nil for old proxies that do not report them. +func (m Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, capabilities *proxy.Capabilities) error { now := time.Now() + var caps proxy.Capabilities + if capabilities != nil { + caps = *capabilities + } p := &proxy.Proxy{ ID: proxyID, ClusterAddress: clusterAddress, @@ -48,6 +55,7 @@ func (m Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress LastSeen: now, ConnectedAt: &now, Status: "connected", + Capabilities: caps, } if err := m.store.SaveProxy(ctx, p); err != nil { @@ -118,6 +126,18 @@ func (m Manager) GetActiveClusters(ctx context.Context) ([]proxy.Cluster, error) return clusters, nil } +// ClusterSupportsCustomPorts returns whether any active proxy in the cluster +// supports custom ports. Returns nil when no proxy has reported capabilities. +func (m Manager) ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { + return m.store.GetClusterSupportsCustomPorts(ctx, clusterAddr) +} + +// ClusterRequireSubdomain returns whether any active proxy in the cluster +// requires a subdomain. Returns nil when no proxy has reported capabilities. +func (m Manager) ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + return m.store.GetClusterRequireSubdomain(ctx, clusterAddr) +} + // CleanupStale removes proxies that haven't sent heartbeat in the specified duration func (m Manager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error { if err := m.store.CleanupStaleProxies(ctx, inactivityDuration); err != nil { diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go index da3df12a2..97466c503 100644 --- a/management/internals/modules/reverseproxy/proxy/manager_mock.go +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -50,18 +50,46 @@ func (mr *MockManagerMockRecorder) CleanupStale(ctx, inactivityDuration interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStale", reflect.TypeOf((*MockManager)(nil).CleanupStale), ctx, inactivityDuration) } -// Connect mocks base method. -func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { +// ClusterSupportsCustomPorts mocks base method. +func (m *MockManager) ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connect", ctx, proxyID, clusterAddress, ipAddress) + ret := m.ctrl.Call(m, "ClusterSupportsCustomPorts", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// ClusterSupportsCustomPorts indicates an expected call of ClusterSupportsCustomPorts. +func (mr *MockManagerMockRecorder) ClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCustomPorts", reflect.TypeOf((*MockManager)(nil).ClusterSupportsCustomPorts), ctx, clusterAddr) +} + +// ClusterRequireSubdomain mocks base method. +func (m *MockManager) ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterRequireSubdomain", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// ClusterRequireSubdomain indicates an expected call of ClusterRequireSubdomain. +func (mr *MockManagerMockRecorder) ClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRequireSubdomain", reflect.TypeOf((*MockManager)(nil).ClusterRequireSubdomain), ctx, clusterAddr) +} + +// Connect mocks base method. +func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, capabilities *Capabilities) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connect", ctx, proxyID, clusterAddress, ipAddress, capabilities) ret0, _ := ret[0].(error) return ret0 } // Connect indicates an expected call of Connect. -func (mr *MockManagerMockRecorder) Connect(ctx, proxyID, clusterAddress, ipAddress interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Connect(ctx, proxyID, clusterAddress, ipAddress, capabilities interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockManager)(nil).Connect), ctx, proxyID, clusterAddress, ipAddress) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockManager)(nil).Connect), ctx, proxyID, clusterAddress, ipAddress, capabilities) } // Disconnect mocks base method. @@ -145,34 +173,6 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { return m.recorder } -// ClusterSupportsCustomPorts mocks base method. -func (m *MockController) ClusterSupportsCustomPorts(clusterAddr string) *bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClusterSupportsCustomPorts", clusterAddr) - ret0, _ := ret[0].(*bool) - return ret0 -} - -// ClusterSupportsCustomPorts indicates an expected call of ClusterSupportsCustomPorts. -func (mr *MockControllerMockRecorder) ClusterSupportsCustomPorts(clusterAddr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCustomPorts", reflect.TypeOf((*MockController)(nil).ClusterSupportsCustomPorts), clusterAddr) -} - -// ClusterRequireSubdomain mocks base method. -func (m *MockController) ClusterRequireSubdomain(clusterAddr string) *bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClusterRequireSubdomain", clusterAddr) - ret0, _ := ret[0].(*bool) - return ret0 -} - -// ClusterRequireSubdomain indicates an expected call of ClusterRequireSubdomain. -func (mr *MockControllerMockRecorder) ClusterRequireSubdomain(clusterAddr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRequireSubdomain", reflect.TypeOf((*MockController)(nil).ClusterRequireSubdomain), clusterAddr) -} - // GetOIDCValidationConfig mocks base method. func (m *MockController) GetOIDCValidationConfig() OIDCValidationConfig { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/proxy/proxy.go b/management/internals/modules/reverseproxy/proxy/proxy.go index 671eb109f..4102e50fe 100644 --- a/management/internals/modules/reverseproxy/proxy/proxy.go +++ b/management/internals/modules/reverseproxy/proxy/proxy.go @@ -2,6 +2,17 @@ package proxy import "time" +// Capabilities describes what a proxy can handle, as reported via gRPC. +// Nil fields mean the proxy never reported this capability. +type Capabilities struct { + // SupportsCustomPorts indicates whether this proxy can bind arbitrary + // ports for TCP/UDP services. TLS uses SNI routing and is not gated. + SupportsCustomPorts *bool + // RequireSubdomain indicates whether a subdomain label is required in + // front of the cluster domain. + RequireSubdomain *bool +} + // Proxy represents a reverse proxy instance type Proxy struct { ID string `gorm:"primaryKey;type:varchar(255)"` @@ -11,6 +22,7 @@ type Proxy struct { ConnectedAt *time.Time DisconnectedAt *time.Time Status string `gorm:"type:varchar(20);not null;index:idx_proxy_cluster_status"` + Capabilities Capabilities `gorm:"embedded"` CreatedAt time.Time UpdatedAt time.Time } diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go index 8b652c7e1..4a7647d90 100644 --- a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go +++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go @@ -75,11 +75,13 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor require.NoError(t, err) mockCtrl := proxy.NewMockController(ctrl) - mockCtrl.EXPECT().ClusterSupportsCustomPorts(gomock.Any()).Return(customPortsSupported).AnyTimes() - mockCtrl.EXPECT().ClusterRequireSubdomain(gomock.Any()).Return((*bool)(nil)).AnyTimes() mockCtrl.EXPECT().SendServiceUpdateToCluster(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() mockCtrl.EXPECT().GetOIDCValidationConfig().Return(proxy.OIDCValidationConfig{}).AnyTimes() + mockCaps := proxy.NewMockManager(ctrl) + mockCaps.EXPECT().ClusterSupportsCustomPorts(gomock.Any(), testCluster).Return(customPortsSupported).AnyTimes() + mockCaps.EXPECT().ClusterRequireSubdomain(gomock.Any(), testCluster).Return((*bool)(nil)).AnyTimes() + accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, @@ -93,6 +95,7 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor accountManager: accountMgr, permissionsManager: permissions.NewManager(testStore), proxyController: mockCtrl, + capabilities: mockCaps, clusterDeriver: &testClusterDeriver{domains: []string{"test.netbird.io"}}, } mgr.exposeReaper = &exposeReaper{manager: mgr} diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index ea4fa9d1e..db393ef38 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -75,22 +75,30 @@ type ClusterDeriver interface { GetClusterDomains() []string } +// CapabilityProvider queries proxy cluster capabilities from the database. +type CapabilityProvider interface { + ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool +} + type Manager struct { store store.Store accountManager account.Manager permissionsManager permissions.Manager proxyController proxy.Controller + capabilities CapabilityProvider clusterDeriver ClusterDeriver exposeReaper *exposeReaper } // NewManager creates a new service manager. -func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyController proxy.Controller, clusterDeriver ClusterDeriver) *Manager { +func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyController proxy.Controller, capabilities CapabilityProvider, clusterDeriver ClusterDeriver) *Manager { mgr := &Manager{ store: store, accountManager: accountManager, permissionsManager: permissionsManager, proxyController: proxyController, + capabilities: capabilities, clusterDeriver: clusterDeriver, } mgr.exposeReaper = &exposeReaper{manager: mgr} @@ -237,7 +245,7 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri } service.ProxyCluster = proxyCluster - if err := m.validateSubdomainRequirement(service.Domain, proxyCluster); err != nil { + if err := m.validateSubdomainRequirement(ctx, service.Domain, proxyCluster); err != nil { return err } } @@ -268,11 +276,11 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri // validateSubdomainRequirement checks whether the domain can be used bare // (without a subdomain label) on the given cluster. If the cluster reports // require_subdomain=true and the domain equals the cluster domain, it rejects. -func (m *Manager) validateSubdomainRequirement(domain, cluster string) error { +func (m *Manager) validateSubdomainRequirement(ctx context.Context, domain, cluster string) error { if domain != cluster { return nil } - requireSub := m.proxyController.ClusterRequireSubdomain(cluster) + requireSub := m.capabilities.ClusterRequireSubdomain(ctx, cluster) if requireSub != nil && *requireSub { return status.Errorf(status.InvalidArgument, "domain %s requires a subdomain label", domain) } @@ -312,7 +320,7 @@ func (m *Manager) ensureL4Port(ctx context.Context, tx store.Store, svc *service if !service.IsL4Protocol(svc.Mode) { return nil } - customPorts := m.proxyController.ClusterSupportsCustomPorts(svc.ProxyCluster) + customPorts := m.capabilities.ClusterSupportsCustomPorts(ctx, svc.ProxyCluster) if service.IsPortBasedProtocol(svc.Mode) && svc.ListenPort > 0 && (customPorts == nil || !*customPorts) { if svc.Source != service.SourceEphemeral { return status.Errorf(status.InvalidArgument, "custom ports not supported on cluster %s", svc.ProxyCluster) @@ -520,12 +528,12 @@ func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.St } if existingService.Terminated { - return status.Errorf(status.PermissionDenied, "service is terminated and cannot be updated") - } + return status.Errorf(status.PermissionDenied, "service is terminated and cannot be updated") + } - if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { - return err - } + if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { + return err + } updateInfo.oldCluster = existingService.ProxyCluster updateInfo.domainChanged = existingService.Domain != service.Domain @@ -538,7 +546,7 @@ func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.St service.ProxyCluster = existingService.ProxyCluster } - if err := m.validateSubdomainRequirement(service.Domain, service.ProxyCluster); err != nil { + if err := m.validateSubdomainRequirement(ctx, service.Domain, service.ProxyCluster); err != nil { return err } diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index 18e1be26e..f6e532118 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -1324,11 +1324,11 @@ func TestValidateSubdomainRequirement(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctrl := gomock.NewController(t) - mockCtrl := proxy.NewMockController(ctrl) - mockCtrl.EXPECT().ClusterRequireSubdomain(tc.cluster).Return(tc.requireSubdomain).AnyTimes() + mockCaps := proxy.NewMockManager(ctrl) + mockCaps.EXPECT().ClusterRequireSubdomain(gomock.Any(), tc.cluster).Return(tc.requireSubdomain).AnyTimes() - mgr := &Manager{proxyController: mockCtrl} - err := mgr.validateSubdomainRequirement(tc.domain, tc.cluster) + mgr := &Manager{capabilities: mockCaps} + err := mgr.validateSubdomainRequirement(context.Background(), tc.domain, tc.cluster) if tc.wantErr { require.Error(t, err) assert.Contains(t, err.Error(), "requires a subdomain label") diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index a32cf6046..6064bd5b6 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -195,7 +195,7 @@ func (s *BaseServer) RecordsManager() records.Manager { func (s *BaseServer) ServiceManager() service.Manager { return Create(s, func() service.Manager { - return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ServiceProxyController(), s.ReverseProxyDomainManager()) + return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ServiceProxyController(), s.ProxyManager(), s.ReverseProxyDomainManager()) }) } @@ -212,9 +212,6 @@ func (s *BaseServer) ProxyManager() proxy.Manager { func (s *BaseServer) ReverseProxyDomainManager() *manager.Manager { return Create(s, func() *manager.Manager { m := manager.NewManager(s.Store(), s.ProxyManager(), s.PermissionsManager(), s.AccountManager()) - s.AfterInit(func(s *BaseServer) { - m.SetClusterCapabilities(s.ServiceProxyController()) - }) return &m }) } diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 5fa382af0..07732cea6 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -182,9 +182,21 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest log.WithContext(ctx).Warnf("Failed to register proxy %s in cluster: %v", proxyID, err) } - // Register proxy in database - if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo); err != nil { - log.WithContext(ctx).Warnf("Failed to register proxy %s in database: %v", proxyID, err) + // Register proxy in database with capabilities + var caps *proxy.Capabilities + if c := req.GetCapabilities(); c != nil { + caps = &proxy.Capabilities{ + SupportsCustomPorts: c.SupportsCustomPorts, + RequireSubdomain: c.RequireSubdomain, + } + } + if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo, caps); err != nil { + log.WithContext(ctx).Warnf("failed to register proxy %s in database: %v", proxyID, err) + s.connectedProxies.Delete(proxyID) + if unregErr := s.proxyController.UnregisterProxyFromCluster(ctx, conn.address, proxyID); unregErr != nil { + log.WithContext(ctx).Debugf("cleanup after Connect failure for proxy %s: %v", proxyID, unregErr) + } + return status.Errorf(codes.Internal, "register proxy in database: %v", err) } log.WithFields(log.Fields{ @@ -297,6 +309,9 @@ func (s *ProxyServiceServer) snapshotServiceMappings(ctx context.Context, conn * } m := service.ToProtoMapping(rpservice.Create, token, s.GetOIDCValidationConfig()) + if !proxyAcceptsMapping(conn, m) { + continue + } mappings = append(mappings, m) } return mappings, nil @@ -445,22 +460,46 @@ func (s *ProxyServiceServer) SendServiceUpdateToCluster(ctx context.Context, upd log.Debugf("Sending service update to cluster %s", clusterAddr) for _, proxyID := range proxyIDs { - if connVal, ok := s.connectedProxies.Load(proxyID); ok { - conn := connVal.(*proxyConnection) - msg := s.perProxyMessage(updateResponse, proxyID) - if msg == nil { - continue - } - select { - case conn.sendChan <- msg: - log.WithContext(ctx).Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr) - default: - log.WithContext(ctx).Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) - } + connVal, ok := s.connectedProxies.Load(proxyID) + if !ok { + continue + } + conn := connVal.(*proxyConnection) + if !proxyAcceptsMapping(conn, update) { + log.WithContext(ctx).Debugf("Skipping proxy %s: does not support custom ports for mapping %s", proxyID, update.Id) + continue + } + msg := s.perProxyMessage(updateResponse, proxyID) + if msg == nil { + continue + } + select { + case conn.sendChan <- msg: + log.WithContext(ctx).Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr) + default: + log.WithContext(ctx).Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) } } } +// proxyAcceptsMapping returns whether the proxy should receive this mapping. +// Old proxies that never reported capabilities are skipped for non-TLS L4 +// mappings with a custom listen port, since they don't understand the +// protocol. Proxies that report capabilities (even SupportsCustomPorts=false) +// are new enough to handle the mapping. TLS uses SNI routing and works on +// any proxy. Delete operations are always sent so proxies can clean up. +func proxyAcceptsMapping(conn *proxyConnection, mapping *proto.ProxyMapping) bool { + if mapping.Type == proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED { + return true + } + if mapping.ListenPort == 0 || mapping.Mode == "tls" { + return true + } + // Old proxies that never reported capabilities don't understand + // custom port mappings. + return conn.capabilities != nil && conn.capabilities.SupportsCustomPorts != nil +} + // perProxyMessage returns a copy of update with a fresh one-time token for // create/update operations. For delete operations the original mapping is // used unchanged because proxies do not need to authenticate for removal. @@ -508,64 +547,6 @@ func shallowCloneMapping(m *proto.ProxyMapping) *proto.ProxyMapping { } } -// ClusterSupportsCustomPorts returns whether any connected proxy in the given -// cluster reports custom port support. Returns nil if no proxy has reported -// capabilities (old proxies that predate the field). -func (s *ProxyServiceServer) ClusterSupportsCustomPorts(clusterAddr string) *bool { - if s.proxyController == nil { - return nil - } - - var hasCapabilities bool - for _, pid := range s.proxyController.GetProxiesForCluster(clusterAddr) { - connVal, ok := s.connectedProxies.Load(pid) - if !ok { - continue - } - conn := connVal.(*proxyConnection) - if conn.capabilities == nil || conn.capabilities.SupportsCustomPorts == nil { - continue - } - if *conn.capabilities.SupportsCustomPorts { - return ptr(true) - } - hasCapabilities = true - } - if hasCapabilities { - return ptr(false) - } - return nil -} - -// ClusterRequireSubdomain returns whether any connected proxy in the given -// cluster reports that a subdomain is required. Returns nil if no proxy has -// reported the capability (defaults to not required). -func (s *ProxyServiceServer) ClusterRequireSubdomain(clusterAddr string) *bool { - if s.proxyController == nil { - return nil - } - - var hasCapabilities bool - for _, pid := range s.proxyController.GetProxiesForCluster(clusterAddr) { - connVal, ok := s.connectedProxies.Load(pid) - if !ok { - continue - } - conn := connVal.(*proxyConnection) - if conn.capabilities == nil || conn.capabilities.RequireSubdomain == nil { - continue - } - if *conn.capabilities.RequireSubdomain { - return ptr(true) - } - hasCapabilities = true - } - if hasCapabilities { - return ptr(false) - } - return nil -} - func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { service, err := s.serviceManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId()) if err != nil { diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index 83c99020d..d5aed3dee 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -53,14 +53,6 @@ func (c *testProxyController) UnregisterProxyFromCluster(_ context.Context, clus return nil } -func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { - return ptr(true) -} - -func (c *testProxyController) ClusterRequireSubdomain(_ string) *bool { - return nil -} - func (c *testProxyController) GetProxiesForCluster(clusterAddr string) []string { c.mu.Lock() defer c.mu.Unlock() @@ -355,14 +347,14 @@ func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { const cluster = "proxy.example.com" - // Proxy A supports custom ports. - chA := registerFakeProxyWithCaps(s, "proxy-a", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) - // Proxy B does NOT support custom ports (shared cloud proxy). - chB := registerFakeProxyWithCaps(s, "proxy-b", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + // Modern proxy reports capabilities. + chModern := registerFakeProxyWithCaps(s, "proxy-modern", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) + // Legacy proxy never reported capabilities (nil). + chLegacy := registerFakeProxy(s, "proxy-legacy", cluster) ctx := context.Background() - // TLS passthrough works on all proxies regardless of custom port support. + // TLS passthrough with custom port: all proxies receive it (SNI routing). tlsMapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, Id: "service-tls", @@ -375,12 +367,26 @@ func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { s.SendServiceUpdateToCluster(ctx, tlsMapping, cluster) - msgA := drainMapping(chA) - msgB := drainMapping(chB) - assert.NotNil(t, msgA, "proxy-a should receive TLS mapping") - assert.NotNil(t, msgB, "proxy-b should receive TLS mapping (passthrough works on all proxies)") + assert.NotNil(t, drainMapping(chModern), "modern proxy should receive TLS mapping") + assert.NotNil(t, drainMapping(chLegacy), "legacy proxy should receive TLS mapping (SNI works on all)") - // Send an HTTP mapping: both should receive it. + // TCP mapping with custom port: only modern proxy receives it. + tcpMapping := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, + Id: "service-tcp", + AccountId: "account-1", + Domain: "db.example.com", + Mode: "tcp", + ListenPort: 5432, + Path: []*proto.PathMapping{{Target: "10.0.0.5:5432"}}, + } + + s.SendServiceUpdateToCluster(ctx, tcpMapping, cluster) + + assert.NotNil(t, drainMapping(chModern), "modern proxy should receive TCP custom-port mapping") + assert.Nil(t, drainMapping(chLegacy), "legacy proxy should NOT receive TCP custom-port mapping") + + // HTTP mapping (no listen port): both receive it. httpMapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, Id: "service-http", @@ -391,10 +397,16 @@ func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { s.SendServiceUpdateToCluster(ctx, httpMapping, cluster) - msgA = drainMapping(chA) - msgB = drainMapping(chB) - assert.NotNil(t, msgA, "proxy-a should receive HTTP mapping") - assert.NotNil(t, msgB, "proxy-b should receive HTTP mapping") + assert.NotNil(t, drainMapping(chModern), "modern proxy should receive HTTP mapping") + assert.NotNil(t, drainMapping(chLegacy), "legacy proxy should receive HTTP mapping") + + // Proxy that reports SupportsCustomPorts=false still receives custom-port + // mappings because it understands the protocol (it's new enough). + chNewNoCustom := registerFakeProxyWithCaps(s, "proxy-new-no-custom", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + + s.SendServiceUpdateToCluster(ctx, tcpMapping, cluster) + + assert.NotNil(t, drainMapping(chNewNoCustom), "new proxy with SupportsCustomPorts=false should still receive mapping") } func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { @@ -408,7 +420,8 @@ func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { const cluster = "proxy.example.com" - chShared := registerFakeProxyWithCaps(s, "proxy-shared", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + // Legacy proxy (no capabilities) still receives TLS since it uses SNI. + chLegacy := registerFakeProxy(s, "proxy-legacy", cluster) tlsMapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, @@ -421,8 +434,8 @@ func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { s.SendServiceUpdateToCluster(context.Background(), tlsMapping, cluster) - msg := drainMapping(chShared) - assert.NotNil(t, msg, "shared proxy should receive TLS mapping even without custom port support") + msg := drainMapping(chLegacy) + assert.NotNil(t, msg, "legacy proxy should receive TLS mapping (SNI works without custom port support)") } // TestServiceModifyNotifications exercises every possible modification @@ -589,7 +602,7 @@ func TestServiceModifyNotifications(t *testing.T) { s.SetProxyController(newTestProxyController()) const cluster = "proxy.example.com" chModern := registerFakeProxyWithCaps(s, "modern", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) - chLegacy := registerFakeProxyWithCaps(s, "legacy", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + chLegacy := registerFakeProxy(s, "legacy", cluster) // TLS passthrough works on all proxies regardless of custom port support s.SendServiceUpdateToCluster(ctx, tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED), cluster) @@ -608,7 +621,7 @@ func TestServiceModifyNotifications(t *testing.T) { } s.SetProxyController(newTestProxyController()) const cluster = "proxy.example.com" - chLegacy := registerFakeProxyWithCaps(s, "legacy", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + chLegacy := registerFakeProxy(s, "legacy", cluster) mapping := tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED) mapping.ListenPort = 0 // default port diff --git a/management/server/account_test.go b/management/server/account_test.go index fdec43617..548cf31d4 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -3138,7 +3138,7 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU if err != nil { return nil, nil, err } - manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyController, nil)) + manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyController, proxyManager, nil)) return manager, updateManager, nil } diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 55095bbb7..c6e57b1be 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -114,8 +114,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee if err != nil { t.Fatalf("Failed to create proxy controller: %v", err) } - domainManager.SetClusterCapabilities(serviceProxyController) - serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, domainManager) + serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, proxyMgr, domainManager) proxyServiceServer.SetServiceManager(serviceManager) am.SetServiceManager(serviceManager) @@ -244,8 +243,7 @@ func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile strin if err != nil { t.Fatalf("Failed to create proxy controller: %v", err) } - domainManager.SetClusterCapabilities(serviceProxyController) - serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, domainManager) + serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, proxyMgr, domainManager) proxyServiceServer.SetServiceManager(serviceManager) am.SetServiceManager(serviceManager) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index cf030f51e..ee1947b18 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -5445,7 +5445,7 @@ func (s *SqlStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string result := s.db.WithContext(ctx). Model(&proxy.Proxy{}). - Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-2*time.Minute)). + Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-proxyActiveThreshold)). Distinct("cluster_address"). Pluck("cluster_address", &addresses) @@ -5463,7 +5463,7 @@ func (s *SqlStore) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, result := s.db.Model(&proxy.Proxy{}). Select("cluster_address as address, COUNT(*) as connected_proxies"). - Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-2*time.Minute)). + Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-proxyActiveThreshold)). Group("cluster_address"). Scan(&clusters) @@ -5475,6 +5475,63 @@ func (s *SqlStore) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, return clusters, nil } +// proxyActiveThreshold is the maximum age of a heartbeat for a proxy to be +// considered active. Must be at least 2x the heartbeat interval (1 min). +const proxyActiveThreshold = 2 * time.Minute + +var validCapabilityColumns = map[string]struct{}{ + "supports_custom_ports": {}, + "require_subdomain": {}, +} + +// GetClusterSupportsCustomPorts returns whether any active proxy in the cluster +// supports custom ports. Returns nil when no proxy reported the capability. +func (s *SqlStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { + return s.getClusterCapability(ctx, clusterAddr, "supports_custom_ports") +} + +// GetClusterRequireSubdomain returns whether any active proxy in the cluster +// requires a subdomain. Returns nil when no proxy reported the capability. +func (s *SqlStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + return s.getClusterCapability(ctx, clusterAddr, "require_subdomain") +} + +// getClusterCapability returns an aggregated boolean capability for the given +// cluster. It checks active (connected, recently seen) proxies and returns: +// - *true if any proxy in the cluster has the capability set to true, +// - *false if at least one proxy reported but none set it to true, +// - nil if no proxy reported the capability at all. +func (s *SqlStore) getClusterCapability(ctx context.Context, clusterAddr, column string) *bool { + if _, ok := validCapabilityColumns[column]; !ok { + log.WithContext(ctx).Errorf("invalid capability column: %s", column) + return nil + } + + var result struct { + HasCapability bool + AnyTrue bool + } + + err := s.db.WithContext(ctx). + Model(&proxy.Proxy{}). + Select("COUNT(CASE WHEN "+column+" IS NOT NULL THEN 1 END) > 0 AS has_capability, "+ + "COALESCE(MAX(CASE WHEN "+column+" = true THEN 1 ELSE 0 END), 0) = 1 AS any_true"). + Where("cluster_address = ? AND status = ? AND last_seen > ?", + clusterAddr, "connected", time.Now().Add(-proxyActiveThreshold)). + Scan(&result).Error + + if err != nil { + log.WithContext(ctx).Errorf("query cluster capability %s for %s: %v", column, clusterAddr, err) + return nil + } + + if !result.HasCapability { + return nil + } + + return &result.AnyTrue +} + // CleanupStaleProxies deletes proxies that haven't sent heartbeat in the specified duration func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error { cutoffTime := time.Now().Add(-inactivityDuration) diff --git a/management/server/store/store.go b/management/server/store/store.go index d00dcde38..e24a1efef 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -287,6 +287,8 @@ type Store interface { UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) + GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 235405861..a8648aed7 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -165,6 +165,34 @@ func (mr *MockStoreMockRecorder) CleanupStaleProxies(ctx, inactivityDuration int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStaleProxies", reflect.TypeOf((*MockStore)(nil).CleanupStaleProxies), ctx, inactivityDuration) } +// GetClusterSupportsCustomPorts mocks base method. +func (m *MockStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterSupportsCustomPorts", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// GetClusterSupportsCustomPorts indicates an expected call of GetClusterSupportsCustomPorts. +func (mr *MockStoreMockRecorder) GetClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCustomPorts", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCustomPorts), ctx, clusterAddr) +} + +// GetClusterRequireSubdomain mocks base method. +func (m *MockStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterRequireSubdomain", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// GetClusterRequireSubdomain indicates an expected call of GetClusterRequireSubdomain. +func (mr *MockStoreMockRecorder) GetClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterRequireSubdomain", reflect.TypeOf((*MockStore)(nil).GetClusterRequireSubdomain), ctx, clusterAddr) +} + // Close mocks base method. func (m *MockStore) Close(ctx context.Context) error { m.ctrl.T.Helper() diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index c30234b5a..796cad622 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -200,7 +200,7 @@ func (m *testAccessLogManager) GetAllAccessLogs(_ context.Context, _, _ string, // testProxyManager is a mock implementation of proxy.Manager for testing. type testProxyManager struct{} -func (m *testProxyManager) Connect(_ context.Context, _, _, _ string) error { +func (m *testProxyManager) Connect(_ context.Context, _, _, _ string, _ *nbproxy.Capabilities) error { return nil } @@ -220,6 +220,14 @@ func (m *testProxyManager) GetActiveClusters(_ context.Context) ([]nbproxy.Clust return nil, nil } +func (m *testProxyManager) ClusterSupportsCustomPorts(_ context.Context, _ string) *bool { + return nil +} + +func (m *testProxyManager) ClusterRequireSubdomain(_ context.Context, _ string) *bool { + return nil +} + func (m *testProxyManager) CleanupStale(_ context.Context, _ time.Duration) error { return nil } @@ -247,14 +255,6 @@ func (c *testProxyController) GetProxiesForCluster(_ string) []string { return nil } -func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { - return nil -} - -func (c *testProxyController) ClusterRequireSubdomain(_ string) *bool { - return nil -} - // storeBackedServiceManager reads directly from the real store. type storeBackedServiceManager struct { store store.Store From c5225068494829fff2f8b1a048f389fc7428eff8 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 30 Mar 2026 15:53:50 +0200 Subject: [PATCH 251/374] [client] Add Expose support to embed library (#5695) * [client] Add Expose support to embed library Add ability to expose local services via the NetBird reverse proxy from embedded client code. Introduce ExposeSession with a blocking Wait method that keeps the session alive until the context is cancelled. Extract ProtocolType with ParseProtocolType into the expose package and use it across CLI and embed layers. * Fix TestNewRequest assertion to use ProtocolType instead of int * Add documentation for Request and KeepAlive in expose manager * Refactor ExposeSession to pass context explicitly in Wait method * Refactor ExposeSession Wait method to explicitly pass context * Update client/embed/expose.go Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> * Fix build * Update client/embed/expose.go Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --------- Co-authored-by: Viktor Liu Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: Viktor Liu <17948409+lixmal@users.noreply.github.com> --- client/cmd/expose.go | 20 ++++++++---- client/embed/embed.go | 32 ++++++++++++++++-- client/embed/expose.go | 45 ++++++++++++++++++++++++++ client/internal/expose/manager.go | 13 ++++++-- client/internal/expose/manager_test.go | 2 +- client/internal/expose/protocol.go | 40 +++++++++++++++++++++++ client/internal/expose/request.go | 4 +-- 7 files changed, 140 insertions(+), 16 deletions(-) create mode 100644 client/embed/expose.go create mode 100644 client/internal/expose/protocol.go diff --git a/client/cmd/expose.go b/client/cmd/expose.go index 1334617d8..f4727703e 100644 --- a/client/cmd/expose.go +++ b/client/cmd/expose.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/netbirdio/netbird/client/internal/expose" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/util" ) @@ -211,19 +212,24 @@ func exposeFn(cmd *cobra.Command, args []string) error { } func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) { - switch strings.ToLower(exposeProtocol) { - case "http": + p, err := expose.ParseProtocolType(exposeProtocol) + if err != nil { + return 0, fmt.Errorf("invalid protocol: %w", err) + } + + switch p { + case expose.ProtocolHTTP: return proto.ExposeProtocol_EXPOSE_HTTP, nil - case "https": + case expose.ProtocolHTTPS: return proto.ExposeProtocol_EXPOSE_HTTPS, nil - case "tcp": + case expose.ProtocolTCP: return proto.ExposeProtocol_EXPOSE_TCP, nil - case "udp": + case expose.ProtocolUDP: return proto.ExposeProtocol_EXPOSE_UDP, nil - case "tls": + case expose.ProtocolTLS: return proto.ExposeProtocol_EXPOSE_TLS, nil default: - return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol) + return 0, fmt.Errorf("unhandled protocol type: %d", p) } } diff --git a/client/embed/embed.go b/client/embed/embed.go index 9fa797f18..88f7e541c 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -33,14 +33,14 @@ var ( ErrConfigNotInitialized = errors.New("config not initialized") ) -// PeerConnStatus is a peer's connection status. -type PeerConnStatus = peer.ConnStatus - const ( // PeerStatusConnected indicates the peer is in connected state. PeerStatusConnected = peer.StatusConnected ) +// PeerConnStatus is a peer's connection status. +type PeerConnStatus = peer.ConnStatus + // Client manages a netbird embedded client instance. type Client struct { deviceName string @@ -375,6 +375,32 @@ func (c *Client) NewHTTPClient() *http.Client { } } +// Expose exposes a local service via the NetBird reverse proxy, making it accessible through a public URL. +// It returns an ExposeSession. Call Wait on the session to keep it alive. +func (c *Client) Expose(ctx context.Context, req ExposeRequest) (*ExposeSession, error) { + engine, err := c.getEngine() + if err != nil { + return nil, err + } + + mgr := engine.GetExposeManager() + if mgr == nil { + return nil, fmt.Errorf("expose manager not available") + } + + resp, err := mgr.Expose(ctx, req) + if err != nil { + return nil, fmt.Errorf("expose: %w", err) + } + + return &ExposeSession{ + Domain: resp.Domain, + ServiceName: resp.ServiceName, + ServiceURL: resp.ServiceURL, + mgr: mgr, + }, nil +} + // Status returns the current status of the client. func (c *Client) Status() (peer.FullStatus, error) { c.mu.Lock() diff --git a/client/embed/expose.go b/client/embed/expose.go new file mode 100644 index 000000000..825bb90ee --- /dev/null +++ b/client/embed/expose.go @@ -0,0 +1,45 @@ +package embed + +import ( + "context" + "errors" + + "github.com/netbirdio/netbird/client/internal/expose" +) + +const ( + // ExposeProtocolHTTP exposes the service as HTTP. + ExposeProtocolHTTP = expose.ProtocolHTTP + // ExposeProtocolHTTPS exposes the service as HTTPS. + ExposeProtocolHTTPS = expose.ProtocolHTTPS + // ExposeProtocolTCP exposes the service as TCP. + ExposeProtocolTCP = expose.ProtocolTCP + // ExposeProtocolUDP exposes the service as UDP. + ExposeProtocolUDP = expose.ProtocolUDP + // ExposeProtocolTLS exposes the service as TLS. + ExposeProtocolTLS = expose.ProtocolTLS +) + +// ExposeRequest is a request to expose a local service via the NetBird reverse proxy. +type ExposeRequest = expose.Request + +// ExposeProtocolType represents the protocol used for exposing a service. +type ExposeProtocolType = expose.ProtocolType + +// ExposeSession represents an active expose session. Use Wait to block until the session ends. +type ExposeSession struct { + Domain string + ServiceName string + ServiceURL string + + mgr *expose.Manager +} + +// Wait blocks while keeping the expose session alive. +// It returns when ctx is cancelled or a keep-alive error occurs, then terminates the session. +func (s *ExposeSession) Wait(ctx context.Context) error { + if s == nil || s.mgr == nil { + return errors.New("expose session is not initialized") + } + return s.mgr.KeepAlive(ctx, s.Domain) +} diff --git a/client/internal/expose/manager.go b/client/internal/expose/manager.go index c59a1a7bd..076f92043 100644 --- a/client/internal/expose/manager.go +++ b/client/internal/expose/manager.go @@ -4,11 +4,14 @@ import ( "context" "time" - mgm "github.com/netbirdio/netbird/shared/management/client" log "github.com/sirupsen/logrus" + + mgm "github.com/netbirdio/netbird/shared/management/client" ) -const renewTimeout = 10 * time.Second +const ( + renewTimeout = 10 * time.Second +) // Response holds the response from exposing a service. type Response struct { @@ -18,11 +21,13 @@ type Response struct { PortAutoAssigned bool } +// Request holds the parameters for exposing a local service via the management server. +// It is part of the embed API surface and exposed via a type alias. type Request struct { NamePrefix string Domain string Port uint16 - Protocol int + Protocol ProtocolType Pin string Password string UserGroups []string @@ -59,6 +64,8 @@ func (m *Manager) Expose(ctx context.Context, req Request) (*Response, error) { return fromClientExposeResponse(resp), nil } +// KeepAlive periodically renews the expose session for the given domain until the context is canceled or an error occurs. +// It is part of the embed API surface and exposed via a type alias. func (m *Manager) KeepAlive(ctx context.Context, domain string) error { ticker := time.NewTicker(30 * time.Second) defer ticker.Stop() diff --git a/client/internal/expose/manager_test.go b/client/internal/expose/manager_test.go index 87d43cdb0..7d76c9838 100644 --- a/client/internal/expose/manager_test.go +++ b/client/internal/expose/manager_test.go @@ -86,7 +86,7 @@ func TestNewRequest(t *testing.T) { exposeReq := NewRequest(req) assert.Equal(t, uint16(8080), exposeReq.Port, "port should match") - assert.Equal(t, int(daemonProto.ExposeProtocol_EXPOSE_HTTPS), exposeReq.Protocol, "protocol should match") + assert.Equal(t, ProtocolType(daemonProto.ExposeProtocol_EXPOSE_HTTPS), exposeReq.Protocol, "protocol should match") assert.Equal(t, "123456", exposeReq.Pin, "pin should match") assert.Equal(t, "secret", exposeReq.Password, "password should match") assert.Equal(t, []string{"group1", "group2"}, exposeReq.UserGroups, "user groups should match") diff --git a/client/internal/expose/protocol.go b/client/internal/expose/protocol.go new file mode 100644 index 000000000..d5026d51e --- /dev/null +++ b/client/internal/expose/protocol.go @@ -0,0 +1,40 @@ +package expose + +import ( + "fmt" + "strings" +) + +// ProtocolType represents the protocol used for exposing a service. +type ProtocolType int + +const ( + // ProtocolHTTP exposes the service as HTTP. + ProtocolHTTP ProtocolType = 0 + // ProtocolHTTPS exposes the service as HTTPS. + ProtocolHTTPS ProtocolType = 1 + // ProtocolTCP exposes the service as TCP. + ProtocolTCP ProtocolType = 2 + // ProtocolUDP exposes the service as UDP. + ProtocolUDP ProtocolType = 3 + // ProtocolTLS exposes the service as TLS. + ProtocolTLS ProtocolType = 4 +) + +// ParseProtocolType parses a protocol string into a ProtocolType. +func ParseProtocolType(s string) (ProtocolType, error) { + switch strings.ToLower(s) { + case "http": + return ProtocolHTTP, nil + case "https": + return ProtocolHTTPS, nil + case "tcp": + return ProtocolTCP, nil + case "udp": + return ProtocolUDP, nil + case "tls": + return ProtocolTLS, nil + default: + return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", s) + } +} diff --git a/client/internal/expose/request.go b/client/internal/expose/request.go index bff4f2ce7..ec75bb276 100644 --- a/client/internal/expose/request.go +++ b/client/internal/expose/request.go @@ -9,7 +9,7 @@ import ( func NewRequest(req *daemonProto.ExposeServiceRequest) *Request { return &Request{ Port: uint16(req.Port), - Protocol: int(req.Protocol), + Protocol: ProtocolType(req.Protocol), Pin: req.Pin, Password: req.Password, UserGroups: req.UserGroups, @@ -24,7 +24,7 @@ func toClientExposeRequest(req Request) mgm.ExposeRequest { NamePrefix: req.NamePrefix, Domain: req.Domain, Port: req.Port, - Protocol: req.Protocol, + Protocol: int(req.Protocol), Pin: req.Pin, Password: req.Password, UserGroups: req.UserGroups, From 04dcaadabf2ce031a25ef3de3c499dc2499eaaab Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 30 Mar 2026 22:25:14 +0800 Subject: [PATCH 252/374] [client] Persist service install parameters across reinstalls (#5732) --- client/cmd/service.go | 2 +- client/cmd/service_installer.go | 16 + client/cmd/service_params.go | 201 ++++++++++++ client/cmd/service_params_test.go | 523 ++++++++++++++++++++++++++++++ 4 files changed, 741 insertions(+), 1 deletion(-) create mode 100644 client/cmd/service_params.go create mode 100644 client/cmd/service_params_test.go diff --git a/client/cmd/service.go b/client/cmd/service.go index e55465875..5ff16eaeb 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -41,7 +41,7 @@ func init() { defaultServiceName = "Netbird" } - serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd, svcStatusCmd, installCmd, uninstallCmd, reconfigureCmd) + serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd, svcStatusCmd, installCmd, uninstallCmd, reconfigureCmd, resetParamsCmd) serviceCmd.PersistentFlags().BoolVar(&profilesDisabled, "disable-profiles", false, "Disables profiles feature. If enabled, the client will not be able to change or edit any profile. To persist this setting, use: netbird service install --disable-profiles") serviceCmd.PersistentFlags().BoolVar(&updateSettingsDisabled, "disable-update-settings", false, "Disables update settings feature. If enabled, the client will not be able to change or edit any settings. To persist this setting, use: netbird service install --disable-update-settings") diff --git a/client/cmd/service_installer.go b/client/cmd/service_installer.go index f6828d96a..28770ea16 100644 --- a/client/cmd/service_installer.go +++ b/client/cmd/service_installer.go @@ -119,6 +119,10 @@ var installCmd = &cobra.Command{ return err } + if err := loadAndApplyServiceParams(cmd); err != nil { + cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) + } + svcConfig, err := createServiceConfigForInstall() if err != nil { return err @@ -136,6 +140,10 @@ var installCmd = &cobra.Command{ return fmt.Errorf("install service: %w", err) } + if err := saveServiceParams(currentServiceParams()); err != nil { + cmd.PrintErrf("Warning: failed to save service params: %v\n", err) + } + cmd.Println("NetBird service has been installed") return nil }, @@ -187,6 +195,10 @@ This command will temporarily stop the service, update its configuration, and re return err } + if err := loadAndApplyServiceParams(cmd); err != nil { + cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) + } + wasRunning, err := isServiceRunning() if err != nil && !errors.Is(err, ErrGetServiceStatus) { return fmt.Errorf("check service status: %w", err) @@ -222,6 +234,10 @@ This command will temporarily stop the service, update its configuration, and re return fmt.Errorf("install service with new config: %w", err) } + if err := saveServiceParams(currentServiceParams()); err != nil { + cmd.PrintErrf("Warning: failed to save service params: %v\n", err) + } + if wasRunning { cmd.Println("Starting NetBird service...") if err := s.Start(); err != nil { diff --git a/client/cmd/service_params.go b/client/cmd/service_params.go new file mode 100644 index 000000000..81bd2dbb5 --- /dev/null +++ b/client/cmd/service_params.go @@ -0,0 +1,201 @@ +//go:build !ios && !android + +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "maps" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/netbirdio/netbird/client/configs" + "github.com/netbirdio/netbird/util" +) + +const serviceParamsFile = "service.json" + +// serviceParams holds install-time service parameters that persist across +// uninstall/reinstall cycles. Saved to /service.json. +type serviceParams struct { + LogLevel string `json:"log_level"` + DaemonAddr string `json:"daemon_addr"` + ManagementURL string `json:"management_url,omitempty"` + ConfigPath string `json:"config_path,omitempty"` + LogFiles []string `json:"log_files,omitempty"` + DisableProfiles bool `json:"disable_profiles,omitempty"` + DisableUpdateSettings bool `json:"disable_update_settings,omitempty"` + ServiceEnvVars map[string]string `json:"service_env_vars,omitempty"` +} + +// serviceParamsPath returns the path to the service params file. +func serviceParamsPath() string { + return filepath.Join(configs.StateDir, serviceParamsFile) +} + +// loadServiceParams reads saved service parameters from disk. +// Returns nil with no error if the file does not exist. +func loadServiceParams() (*serviceParams, error) { + path := serviceParamsPath() + + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil //nolint:nilnil + } + return nil, fmt.Errorf("read service params %s: %w", path, err) + } + + var params serviceParams + if err := json.Unmarshal(data, ¶ms); err != nil { + return nil, fmt.Errorf("parse service params %s: %w", path, err) + } + + return ¶ms, nil +} + +// saveServiceParams writes current service parameters to disk atomically +// with restricted permissions. +func saveServiceParams(params *serviceParams) error { + path := serviceParamsPath() + if err := util.WriteJsonWithRestrictedPermission(context.Background(), path, params); err != nil { + return fmt.Errorf("save service params: %w", err) + } + return nil +} + +// currentServiceParams captures the current state of all package-level +// variables into a serviceParams struct. +func currentServiceParams() *serviceParams { + params := &serviceParams{ + LogLevel: logLevel, + DaemonAddr: daemonAddr, + ManagementURL: managementURL, + ConfigPath: configPath, + LogFiles: logFiles, + DisableProfiles: profilesDisabled, + DisableUpdateSettings: updateSettingsDisabled, + } + + if len(serviceEnvVars) > 0 { + parsed, err := parseServiceEnvVars(serviceEnvVars) + if err == nil && len(parsed) > 0 { + params.ServiceEnvVars = parsed + } + } + + return params +} + +// loadAndApplyServiceParams loads saved params from disk and applies them +// to any flags that were not explicitly set. +func loadAndApplyServiceParams(cmd *cobra.Command) error { + params, err := loadServiceParams() + if err != nil { + return err + } + applyServiceParams(cmd, params) + return nil +} + +// applyServiceParams merges saved parameters into package-level variables +// for any flag that was not explicitly set by the user (via CLI or env var). +// Flags that were Changed() are left untouched. +func applyServiceParams(cmd *cobra.Command, params *serviceParams) { + if params == nil { + return + } + + // For fields with non-empty defaults (log-level, daemon-addr), keep the + // != "" guard so that an older service.json missing the field doesn't + // clobber the default with an empty string. + if !rootCmd.PersistentFlags().Changed("log-level") && params.LogLevel != "" { + logLevel = params.LogLevel + } + + if !rootCmd.PersistentFlags().Changed("daemon-addr") && params.DaemonAddr != "" { + daemonAddr = params.DaemonAddr + } + + // For optional fields where empty means "use default", always apply so + // that an explicit clear (--management-url "") persists across reinstalls. + if !rootCmd.PersistentFlags().Changed("management-url") { + managementURL = params.ManagementURL + } + + if !rootCmd.PersistentFlags().Changed("config") { + configPath = params.ConfigPath + } + + if !rootCmd.PersistentFlags().Changed("log-file") { + logFiles = params.LogFiles + } + + if !serviceCmd.PersistentFlags().Changed("disable-profiles") { + profilesDisabled = params.DisableProfiles + } + + if !serviceCmd.PersistentFlags().Changed("disable-update-settings") { + updateSettingsDisabled = params.DisableUpdateSettings + } + + applyServiceEnvParams(cmd, params) +} + +// applyServiceEnvParams merges saved service environment variables. +// If --service-env was explicitly set, explicit values win on key conflict +// but saved keys not in the explicit set are carried over. +// If --service-env was not set, saved env vars are used entirely. +func applyServiceEnvParams(cmd *cobra.Command, params *serviceParams) { + if len(params.ServiceEnvVars) == 0 { + return + } + + if !cmd.Flags().Changed("service-env") { + // No explicit env vars: rebuild serviceEnvVars from saved params. + serviceEnvVars = envMapToSlice(params.ServiceEnvVars) + return + } + + // Explicit env vars were provided: merge saved values underneath. + explicit, err := parseServiceEnvVars(serviceEnvVars) + if err != nil { + cmd.PrintErrf("Warning: parse explicit service env vars for merge: %v\n", err) + return + } + + merged := make(map[string]string, len(params.ServiceEnvVars)+len(explicit)) + maps.Copy(merged, params.ServiceEnvVars) + maps.Copy(merged, explicit) // explicit wins on conflict + serviceEnvVars = envMapToSlice(merged) +} + +var resetParamsCmd = &cobra.Command{ + Use: "reset-params", + Short: "Remove saved service install parameters", + Long: "Removes the saved service.json file so the next install uses default parameters.", + RunE: func(cmd *cobra.Command, args []string) error { + path := serviceParamsPath() + if err := os.Remove(path); err != nil { + if os.IsNotExist(err) { + cmd.Println("No saved service parameters found") + return nil + } + return fmt.Errorf("remove service params: %w", err) + } + cmd.Printf("Removed saved service parameters (%s)\n", path) + return nil + }, +} + +// envMapToSlice converts a map of env vars to a KEY=VALUE slice. +func envMapToSlice(m map[string]string) []string { + s := make([]string, 0, len(m)) + for k, v := range m { + s = append(s, k+"="+v) + } + return s +} diff --git a/client/cmd/service_params_test.go b/client/cmd/service_params_test.go new file mode 100644 index 000000000..684593a00 --- /dev/null +++ b/client/cmd/service_params_test.go @@ -0,0 +1,523 @@ +//go:build !ios && !android + +package cmd + +import ( + "encoding/json" + "go/ast" + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/configs" +) + +func TestServiceParamsPath(t *testing.T) { + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + + configs.StateDir = "/var/lib/netbird" + assert.Equal(t, "/var/lib/netbird/service.json", serviceParamsPath()) + + configs.StateDir = "/custom/state" + assert.Equal(t, "/custom/state/service.json", serviceParamsPath()) +} + +func TestSaveAndLoadServiceParams(t *testing.T) { + tmpDir := t.TempDir() + + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + configs.StateDir = tmpDir + + params := &serviceParams{ + LogLevel: "debug", + DaemonAddr: "unix:///var/run/netbird.sock", + ManagementURL: "https://my.server.com", + ConfigPath: "/etc/netbird/config.json", + LogFiles: []string{"/var/log/netbird/client.log", "console"}, + DisableProfiles: true, + DisableUpdateSettings: false, + ServiceEnvVars: map[string]string{"NB_LOG_FORMAT": "json", "CUSTOM": "val"}, + } + + err := saveServiceParams(params) + require.NoError(t, err) + + // Verify the file exists and is valid JSON. + data, err := os.ReadFile(filepath.Join(tmpDir, "service.json")) + require.NoError(t, err) + assert.True(t, json.Valid(data)) + + loaded, err := loadServiceParams() + require.NoError(t, err) + require.NotNil(t, loaded) + + assert.Equal(t, params.LogLevel, loaded.LogLevel) + assert.Equal(t, params.DaemonAddr, loaded.DaemonAddr) + assert.Equal(t, params.ManagementURL, loaded.ManagementURL) + assert.Equal(t, params.ConfigPath, loaded.ConfigPath) + assert.Equal(t, params.LogFiles, loaded.LogFiles) + assert.Equal(t, params.DisableProfiles, loaded.DisableProfiles) + assert.Equal(t, params.DisableUpdateSettings, loaded.DisableUpdateSettings) + assert.Equal(t, params.ServiceEnvVars, loaded.ServiceEnvVars) +} + +func TestLoadServiceParams_FileNotExists(t *testing.T) { + tmpDir := t.TempDir() + + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + configs.StateDir = tmpDir + + params, err := loadServiceParams() + assert.NoError(t, err) + assert.Nil(t, params) +} + +func TestLoadServiceParams_InvalidJSON(t *testing.T) { + tmpDir := t.TempDir() + + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + configs.StateDir = tmpDir + + err := os.WriteFile(filepath.Join(tmpDir, "service.json"), []byte("not json"), 0600) + require.NoError(t, err) + + params, err := loadServiceParams() + assert.Error(t, err) + assert.Nil(t, params) +} + +func TestCurrentServiceParams(t *testing.T) { + origLogLevel := logLevel + origDaemonAddr := daemonAddr + origManagementURL := managementURL + origConfigPath := configPath + origLogFiles := logFiles + origProfilesDisabled := profilesDisabled + origUpdateSettingsDisabled := updateSettingsDisabled + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { + logLevel = origLogLevel + daemonAddr = origDaemonAddr + managementURL = origManagementURL + configPath = origConfigPath + logFiles = origLogFiles + profilesDisabled = origProfilesDisabled + updateSettingsDisabled = origUpdateSettingsDisabled + serviceEnvVars = origServiceEnvVars + }) + + logLevel = "trace" + daemonAddr = "tcp://127.0.0.1:9999" + managementURL = "https://mgmt.example.com" + configPath = "/tmp/test-config.json" + logFiles = []string{"/tmp/test.log"} + profilesDisabled = true + updateSettingsDisabled = true + serviceEnvVars = []string{"FOO=bar", "BAZ=qux"} + + params := currentServiceParams() + + assert.Equal(t, "trace", params.LogLevel) + assert.Equal(t, "tcp://127.0.0.1:9999", params.DaemonAddr) + assert.Equal(t, "https://mgmt.example.com", params.ManagementURL) + assert.Equal(t, "/tmp/test-config.json", params.ConfigPath) + assert.Equal(t, []string{"/tmp/test.log"}, params.LogFiles) + assert.True(t, params.DisableProfiles) + assert.True(t, params.DisableUpdateSettings) + assert.Equal(t, map[string]string{"FOO": "bar", "BAZ": "qux"}, params.ServiceEnvVars) +} + +func TestApplyServiceParams_OnlyUnchangedFlags(t *testing.T) { + origLogLevel := logLevel + origDaemonAddr := daemonAddr + origManagementURL := managementURL + origConfigPath := configPath + origLogFiles := logFiles + origProfilesDisabled := profilesDisabled + origUpdateSettingsDisabled := updateSettingsDisabled + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { + logLevel = origLogLevel + daemonAddr = origDaemonAddr + managementURL = origManagementURL + configPath = origConfigPath + logFiles = origLogFiles + profilesDisabled = origProfilesDisabled + updateSettingsDisabled = origUpdateSettingsDisabled + serviceEnvVars = origServiceEnvVars + }) + + // Reset all flags to defaults. + logLevel = "info" + daemonAddr = "unix:///var/run/netbird.sock" + managementURL = "" + configPath = "/etc/netbird/config.json" + logFiles = []string{"/var/log/netbird/client.log"} + profilesDisabled = false + updateSettingsDisabled = false + serviceEnvVars = nil + + // Reset Changed state on all relevant flags. + rootCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + serviceCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + + // Simulate user explicitly setting --log-level via CLI. + logLevel = "warn" + require.NoError(t, rootCmd.PersistentFlags().Set("log-level", "warn")) + + saved := &serviceParams{ + LogLevel: "debug", + DaemonAddr: "tcp://127.0.0.1:5555", + ManagementURL: "https://saved.example.com", + ConfigPath: "/saved/config.json", + LogFiles: []string{"/saved/client.log"}, + DisableProfiles: true, + DisableUpdateSettings: true, + ServiceEnvVars: map[string]string{"SAVED_KEY": "saved_val"}, + } + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + applyServiceParams(cmd, saved) + + // log-level was Changed, so it should keep "warn", not use saved "debug". + assert.Equal(t, "warn", logLevel) + + // All other fields were not Changed, so they should use saved values. + assert.Equal(t, "tcp://127.0.0.1:5555", daemonAddr) + assert.Equal(t, "https://saved.example.com", managementURL) + assert.Equal(t, "/saved/config.json", configPath) + assert.Equal(t, []string{"/saved/client.log"}, logFiles) + assert.True(t, profilesDisabled) + assert.True(t, updateSettingsDisabled) + assert.Equal(t, []string{"SAVED_KEY=saved_val"}, serviceEnvVars) +} + +func TestApplyServiceParams_BooleanRevertToFalse(t *testing.T) { + origProfilesDisabled := profilesDisabled + origUpdateSettingsDisabled := updateSettingsDisabled + t.Cleanup(func() { + profilesDisabled = origProfilesDisabled + updateSettingsDisabled = origUpdateSettingsDisabled + }) + + // Simulate current state where booleans are true (e.g. set by previous install). + profilesDisabled = true + updateSettingsDisabled = true + + // Reset Changed state so flags appear unset. + serviceCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + + // Saved params have both as false. + saved := &serviceParams{ + DisableProfiles: false, + DisableUpdateSettings: false, + } + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + applyServiceParams(cmd, saved) + + assert.False(t, profilesDisabled, "saved false should override current true") + assert.False(t, updateSettingsDisabled, "saved false should override current true") +} + +func TestApplyServiceParams_ClearManagementURL(t *testing.T) { + origManagementURL := managementURL + t.Cleanup(func() { managementURL = origManagementURL }) + + managementURL = "https://leftover.example.com" + + // Simulate saved params where management URL was explicitly cleared. + saved := &serviceParams{ + LogLevel: "info", + DaemonAddr: "unix:///var/run/netbird.sock", + // ManagementURL intentionally empty: was cleared with --management-url "". + } + + rootCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + applyServiceParams(cmd, saved) + + assert.Equal(t, "", managementURL, "saved empty management URL should clear the current value") +} + +func TestApplyServiceParams_NilParams(t *testing.T) { + origLogLevel := logLevel + t.Cleanup(func() { logLevel = origLogLevel }) + + logLevel = "info" + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + + // Should be a no-op. + applyServiceParams(cmd, nil) + assert.Equal(t, "info", logLevel) +} + +func TestApplyServiceEnvParams_MergeExplicitAndSaved(t *testing.T) { + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { serviceEnvVars = origServiceEnvVars }) + + // Set up a command with --service-env marked as Changed. + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + require.NoError(t, cmd.Flags().Set("service-env", "EXPLICIT=yes,OVERLAP=explicit")) + + serviceEnvVars = []string{"EXPLICIT=yes", "OVERLAP=explicit"} + + saved := &serviceParams{ + ServiceEnvVars: map[string]string{ + "SAVED": "val", + "OVERLAP": "saved", + }, + } + + applyServiceEnvParams(cmd, saved) + + // Parse result for easier assertion. + result, err := parseServiceEnvVars(serviceEnvVars) + require.NoError(t, err) + + assert.Equal(t, "yes", result["EXPLICIT"]) + assert.Equal(t, "val", result["SAVED"]) + // Explicit wins on conflict. + assert.Equal(t, "explicit", result["OVERLAP"]) +} + +func TestApplyServiceEnvParams_NotChanged(t *testing.T) { + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { serviceEnvVars = origServiceEnvVars }) + + serviceEnvVars = nil + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + + saved := &serviceParams{ + ServiceEnvVars: map[string]string{"FROM_SAVED": "val"}, + } + + applyServiceEnvParams(cmd, saved) + + result, err := parseServiceEnvVars(serviceEnvVars) + require.NoError(t, err) + assert.Equal(t, map[string]string{"FROM_SAVED": "val"}, result) +} + +// TestServiceParams_FieldsCoveredInFunctions ensures that all serviceParams fields are +// referenced in both currentServiceParams() and applyServiceParams(). If a new field is +// added to serviceParams but not wired into these functions, this test fails. +func TestServiceParams_FieldsCoveredInFunctions(t *testing.T) { + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "service_params.go", nil, 0) + require.NoError(t, err) + + // Collect all JSON field names from the serviceParams struct. + structFields := extractStructJSONFields(t, file, "serviceParams") + require.NotEmpty(t, structFields, "failed to find serviceParams struct fields") + + // Collect field names referenced in currentServiceParams and applyServiceParams. + currentFields := extractFuncFieldRefs(t, file, "currentServiceParams", structFields) + applyFields := extractFuncFieldRefs(t, file, "applyServiceParams", structFields) + // applyServiceEnvParams handles ServiceEnvVars indirectly. + applyEnvFields := extractFuncFieldRefs(t, file, "applyServiceEnvParams", structFields) + for k, v := range applyEnvFields { + applyFields[k] = v + } + + for _, field := range structFields { + assert.Contains(t, currentFields, field, + "serviceParams field %q is not captured in currentServiceParams()", field) + assert.Contains(t, applyFields, field, + "serviceParams field %q is not restored in applyServiceParams()/applyServiceEnvParams()", field) + } +} + +// TestServiceParams_BuildArgsCoversAllFlags ensures that buildServiceArguments references +// all serviceParams fields that should become CLI args. ServiceEnvVars is excluded because +// it flows through newSVCConfig() EnvVars, not CLI args. +func TestServiceParams_BuildArgsCoversAllFlags(t *testing.T) { + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "service_params.go", nil, 0) + require.NoError(t, err) + + structFields := extractStructJSONFields(t, file, "serviceParams") + require.NotEmpty(t, structFields) + + installerFile, err := parser.ParseFile(fset, "service_installer.go", nil, 0) + require.NoError(t, err) + + // Fields that are handled outside of buildServiceArguments (env vars go through newSVCConfig). + fieldsNotInArgs := map[string]bool{ + "ServiceEnvVars": true, + } + + buildFields := extractFuncGlobalRefs(t, installerFile, "buildServiceArguments") + + // Forward: every struct field must appear in buildServiceArguments. + for _, field := range structFields { + if fieldsNotInArgs[field] { + continue + } + globalVar := fieldToGlobalVar(field) + assert.Contains(t, buildFields, globalVar, + "serviceParams field %q (global %q) is not referenced in buildServiceArguments()", field, globalVar) + } + + // Reverse: every service-related global used in buildServiceArguments must + // have a corresponding serviceParams field. This catches a developer adding + // a new flag to buildServiceArguments without adding it to the struct. + globalToField := make(map[string]string, len(structFields)) + for _, field := range structFields { + globalToField[fieldToGlobalVar(field)] = field + } + // Identifiers in buildServiceArguments that are not service params + // (builtins, boilerplate, loop variables). + nonParamGlobals := map[string]bool{ + "args": true, "append": true, "string": true, "_": true, + "logFile": true, // range variable over logFiles + } + for ref := range buildFields { + if nonParamGlobals[ref] { + continue + } + _, inStruct := globalToField[ref] + assert.True(t, inStruct, + "buildServiceArguments() references global %q which has no corresponding serviceParams field", ref) + } +} + +// extractStructJSONFields returns field names from a named struct type. +func extractStructJSONFields(t *testing.T, file *ast.File, structName string) []string { + t.Helper() + var fields []string + ast.Inspect(file, func(n ast.Node) bool { + ts, ok := n.(*ast.TypeSpec) + if !ok || ts.Name.Name != structName { + return true + } + st, ok := ts.Type.(*ast.StructType) + if !ok { + return false + } + for _, f := range st.Fields.List { + if len(f.Names) > 0 { + fields = append(fields, f.Names[0].Name) + } + } + return false + }) + return fields +} + +// extractFuncFieldRefs returns which of the given field names appear inside the +// named function, either as selector expressions (params.FieldName) or as +// composite literal keys (&serviceParams{FieldName: ...}). +func extractFuncFieldRefs(t *testing.T, file *ast.File, funcName string, fields []string) map[string]bool { + t.Helper() + fieldSet := make(map[string]bool, len(fields)) + for _, f := range fields { + fieldSet[f] = true + } + + found := make(map[string]bool) + fn := findFuncDecl(file, funcName) + require.NotNil(t, fn, "function %s not found", funcName) + + ast.Inspect(fn.Body, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.SelectorExpr: + if fieldSet[v.Sel.Name] { + found[v.Sel.Name] = true + } + case *ast.KeyValueExpr: + if ident, ok := v.Key.(*ast.Ident); ok && fieldSet[ident.Name] { + found[ident.Name] = true + } + } + return true + }) + return found +} + +// extractFuncGlobalRefs returns all identifier names referenced in the named function body. +func extractFuncGlobalRefs(t *testing.T, file *ast.File, funcName string) map[string]bool { + t.Helper() + fn := findFuncDecl(file, funcName) + require.NotNil(t, fn, "function %s not found", funcName) + + refs := make(map[string]bool) + ast.Inspect(fn.Body, func(n ast.Node) bool { + if ident, ok := n.(*ast.Ident); ok { + refs[ident.Name] = true + } + return true + }) + return refs +} + +func findFuncDecl(file *ast.File, name string) *ast.FuncDecl { + for _, decl := range file.Decls { + fn, ok := decl.(*ast.FuncDecl) + if ok && fn.Name.Name == name { + return fn + } + } + return nil +} + +// fieldToGlobalVar maps serviceParams field names to the package-level variable +// names used in buildServiceArguments and applyServiceParams. +func fieldToGlobalVar(field string) string { + m := map[string]string{ + "LogLevel": "logLevel", + "DaemonAddr": "daemonAddr", + "ManagementURL": "managementURL", + "ConfigPath": "configPath", + "LogFiles": "logFiles", + "DisableProfiles": "profilesDisabled", + "DisableUpdateSettings": "updateSettingsDisabled", + "ServiceEnvVars": "serviceEnvVars", + } + if v, ok := m[field]; ok { + return v + } + // Default: lowercase first letter. + return strings.ToLower(field[:1]) + field[1:] +} + +func TestEnvMapToSlice(t *testing.T) { + m := map[string]string{"A": "1", "B": "2"} + s := envMapToSlice(m) + assert.Len(t, s, 2) + assert.Contains(t, s, "A=1") + assert.Contains(t, s, "B=2") +} + +func TestEnvMapToSlice_Empty(t *testing.T) { + s := envMapToSlice(map[string]string{}) + assert.Empty(t, s) +} From 7bbe71c3ac4b8d95cb6e5b764dd87d6ba8c9c271 Mon Sep 17 00:00:00 2001 From: Akshay Ubale <31108741+iakshayubale@users.noreply.github.com> Date: Mon, 30 Mar 2026 17:55:01 +0200 Subject: [PATCH 253/374] [client] Refactor Android PeerInfo to use proper ConnStatus enum type (#5644) * Simplify Android ConnStatus API with integer constants Replace dual field PeerInfo design with unified integer based ConnStatus field and exported gomobile friendly constants. Changes: > PeerInfo.ConnStatus: changed from string to int > Export three constants: ConnStatusIdle, ConnStatusConnecting,ConnStatusConnected (mapped to peer.ConnStatus enum values) > Updated PeersList() to convert peer enum directly to int Benefits: > Simpler API surface with single ConnStatus field > Better gomobile compatibility for cross-platform usage > Type-safe integer constants across language boundaries * test: add All group to setupTestAccount fixture The setupTestAccount() test helper was missing the required "All" group, causing "failed to get group all: no group ALL found" errors during test execution. Add the All group with all test peers to match the expected account structure. Fixes the failing account and types package tests when GetGroupAll() is called in test scenarios. --- client/android/client.go | 2 +- client/android/peer_notifier.go | 11 ++++++++++- management/server/types/account_test.go | 6 ++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/client/android/client.go b/client/android/client.go index 3fc571559..d35bf4279 100644 --- a/client/android/client.go +++ b/client/android/client.go @@ -205,7 +205,7 @@ func (c *Client) PeersList() *PeerInfoArray { pi := PeerInfo{ p.IP, p.FQDN, - p.ConnStatus.String(), + int(p.ConnStatus), PeerRoutes{routes: maps.Keys(p.GetRoutes())}, } peerInfos[n] = pi diff --git a/client/android/peer_notifier.go b/client/android/peer_notifier.go index b03947da1..4ec22f3ab 100644 --- a/client/android/peer_notifier.go +++ b/client/android/peer_notifier.go @@ -2,11 +2,20 @@ package android +import "github.com/netbirdio/netbird/client/internal/peer" + +// Connection status constants exported via gomobile. +const ( + ConnStatusIdle = int(peer.StatusIdle) + ConnStatusConnecting = int(peer.StatusConnecting) + ConnStatusConnected = int(peer.StatusConnected) +) + // PeerInfo describe information about the peers. It designed for the UI usage type PeerInfo struct { IP string FQDN string - ConnStatus string // Todo replace to enum + ConnStatus int Routes PeerRoutes } diff --git a/management/server/types/account_test.go b/management/server/types/account_test.go index af2896216..00ba29b7f 100644 --- a/management/server/types/account_test.go +++ b/management/server/types/account_test.go @@ -84,6 +84,12 @@ func setupTestAccount() *Account { }, }, Groups: map[string]*Group{ + "groupAll": { + ID: "groupAll", + Name: "All", + Peers: []string{"peer1", "peer2", "peer3", "peer11", "peer12", "peer21", "peer31", "peer32", "peer41", "peer51", "peer61"}, + Issued: GroupIssuedAPI, + }, "group1": { ID: "group1", Peers: []string{"peer11", "peer12"}, From 434ac7f0f59ac6575cd3ce30301308d90e6e5737 Mon Sep 17 00:00:00 2001 From: Eduard Gert Date: Tue, 31 Mar 2026 09:31:03 +0200 Subject: [PATCH 254/374] [docs] Update CONTRIBUTOR_LICENSE_AGREEMENT.md (#5131) --- CONTRIBUTOR_LICENSE_AGREEMENT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTOR_LICENSE_AGREEMENT.md b/CONTRIBUTOR_LICENSE_AGREEMENT.md index 1fdd072c9..b0a6ee218 100644 --- a/CONTRIBUTOR_LICENSE_AGREEMENT.md +++ b/CONTRIBUTOR_LICENSE_AGREEMENT.md @@ -1,7 +1,7 @@ ## Contributor License Agreement This Contributor License Agreement (referred to as the "Agreement") is entered into by the individual -submitting this Agreement and NetBird GmbH, c/o Max-Beer-Straße 2-4 Münzstraße 12 10178 Berlin, Germany, +submitting this Agreement and NetBird GmbH, Brunnenstraße 196, 10119 Berlin, Germany, referred to as "NetBird" (collectively, the "Parties"). The Agreement outlines the terms and conditions under which NetBird may utilize software contributions provided by the Contributor for inclusion in its software development projects. By submitting this Agreement, the Contributor confirms their acceptance From a62d472bc4db383551db8083f7eae7b71822e728 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 31 Mar 2026 16:36:27 +0800 Subject: [PATCH 255/374] [client] Include fake IP block routes in Android TUN rebuilds (#5739) --- .../routemanager/notifier/notifier_android.go | 57 ++++++++++++------- 1 file changed, 36 insertions(+), 21 deletions(-) diff --git a/client/internal/routemanager/notifier/notifier_android.go b/client/internal/routemanager/notifier/notifier_android.go index dec0af87c..3d2784ae1 100644 --- a/client/internal/routemanager/notifier/notifier_android.go +++ b/client/internal/routemanager/notifier/notifier_android.go @@ -31,26 +31,11 @@ func (n *Notifier) SetListener(listener listener.NetworkChangeListener) { n.listener = listener } +// SetInitialClientRoutes stores the full initial route set (including fake IP blocks) +// and a separate comparison set (without fake IP blocks) for diff detection. func (n *Notifier) SetInitialClientRoutes(initialRoutes []*route.Route, routesForComparison []*route.Route) { - // initialRoutes contains fake IP block for interface configuration - filteredInitial := make([]*route.Route, 0) - for _, r := range initialRoutes { - if r.IsDynamic() { - continue - } - filteredInitial = append(filteredInitial, r) - } - n.initialRoutes = filteredInitial - - // routesForComparison excludes fake IP block for comparison with new routes - filteredComparison := make([]*route.Route, 0) - for _, r := range routesForComparison { - if r.IsDynamic() { - continue - } - filteredComparison = append(filteredComparison, r) - } - n.currentRoutes = filteredComparison + n.initialRoutes = filterStatic(initialRoutes) + n.currentRoutes = filterStatic(routesForComparison) } func (n *Notifier) OnNewRoutes(idMap route.HAMap) { @@ -83,13 +68,43 @@ func (n *Notifier) notify() { return } - routeStrings := n.routesToStrings(n.currentRoutes) + allRoutes := slices.Clone(n.currentRoutes) + allRoutes = append(allRoutes, n.extraInitialRoutes()...) + + routeStrings := n.routesToStrings(allRoutes) sort.Strings(routeStrings) go func(l listener.NetworkChangeListener) { - l.OnNetworkChanged(strings.Join(n.addIPv6RangeIfNeeded(routeStrings, n.currentRoutes), ",")) + l.OnNetworkChanged(strings.Join(n.addIPv6RangeIfNeeded(routeStrings, allRoutes), ",")) }(n.listener) } +// extraInitialRoutes returns initialRoutes whose network prefix is absent +// from currentRoutes (e.g. the fake IP block added at setup time). +func (n *Notifier) extraInitialRoutes() []*route.Route { + currentNets := make(map[netip.Prefix]struct{}, len(n.currentRoutes)) + for _, r := range n.currentRoutes { + currentNets[r.Network] = struct{}{} + } + + var extra []*route.Route + for _, r := range n.initialRoutes { + if _, ok := currentNets[r.Network]; !ok { + extra = append(extra, r) + } + } + return extra +} + +func filterStatic(routes []*route.Route) []*route.Route { + out := make([]*route.Route, 0, len(routes)) + for _, r := range routes { + if !r.IsDynamic() { + out = append(out, r) + } + } + return out +} + func (n *Notifier) routesToStrings(routes []*route.Route) []string { nets := make([]string, 0, len(routes)) for _, r := range routes { From 6553ce4ceae3f3fd27c6eaa29906cae4709b02cb Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 31 Mar 2026 16:49:06 +0800 Subject: [PATCH 256/374] [client] Mock management client in TestUpdateOldManagementURL to fix CI flakiness (#5703) --- client/internal/profilemanager/config.go | 17 +++++++-- client/internal/profilemanager/config_test.go | 38 ++++++++++++++----- 2 files changed, 43 insertions(+), 12 deletions(-) diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index b27f1932f..f128ee903 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -39,6 +39,18 @@ const ( DefaultAdminURL = "https://app.netbird.io:443" ) +// mgmProber is the subset of management client needed for URL migration probes. +type mgmProber interface { + GetServerPublicKey() (*wgtypes.Key, error) + Close() error +} + +// newMgmProber creates a management client for probing URL reachability. +// Overridden in tests to avoid real network calls. +var newMgmProber = func(ctx context.Context, addr string, key wgtypes.Key, tlsEnabled bool) (mgmProber, error) { + return mgm.NewClient(ctx, addr, key, tlsEnabled) +} + var DefaultInterfaceBlacklist = []string{ iface.WgInterfaceDefault, "wt", "utun", "tun0", "zt", "ZeroTier", "wg", "ts", "Tailscale", "tailscale", "docker", "veth", "br-", "lo", @@ -753,14 +765,13 @@ func UpdateOldManagementURL(ctx context.Context, config *Config, configPath stri return config, err } - client, err := mgm.NewClient(ctx, newURL.Host, key, mgmTlsEnabled) + client, err := newMgmProber(ctx, newURL.Host, key, mgmTlsEnabled) if err != nil { log.Infof("couldn't switch to the new Management %s", newURL.String()) return config, err } defer func() { - err = client.Close() - if err != nil { + if err := client.Close(); err != nil { log.Warnf("failed to close the Management service client %v", err) } }() diff --git a/client/internal/profilemanager/config_test.go b/client/internal/profilemanager/config_test.go index ab13cf389..c3efb48e6 100644 --- a/client/internal/profilemanager/config_test.go +++ b/client/internal/profilemanager/config_test.go @@ -10,12 +10,23 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/internal/routemanager/dynamic" "github.com/netbirdio/netbird/util" ) +type mockMgmProber struct { + key wgtypes.Key +} + +func (m *mockMgmProber) GetServerPublicKey() (*wgtypes.Key, error) { + return &m.key, nil +} + +func (m *mockMgmProber) Close() error { return nil } + func TestGetConfig(t *testing.T) { // case 1: new default config has to be generated config, err := UpdateOrCreateConfig(ConfigInput{ @@ -234,6 +245,16 @@ func TestWireguardPortDefaultVsExplicit(t *testing.T) { } func TestUpdateOldManagementURL(t *testing.T) { + origProber := newMgmProber + newMgmProber = func(_ context.Context, _ string, _ wgtypes.Key, _ bool) (mgmProber, error) { + key, err := wgtypes.GenerateKey() + if err != nil { + return nil, err + } + return &mockMgmProber{key: key.PublicKey()}, nil + } + t.Cleanup(func() { newMgmProber = origProber }) + tests := []struct { name string previousManagementURL string @@ -273,18 +294,17 @@ func TestUpdateOldManagementURL(t *testing.T) { ConfigPath: configPath, }) require.NoError(t, err, "failed to create testing config") - previousStats, err := os.Stat(configPath) - require.NoError(t, err, "failed to create testing config stats") + previousContent, err := os.ReadFile(configPath) + require.NoError(t, err, "failed to read initial config") resultConfig, err := UpdateOldManagementURL(context.TODO(), config, configPath) require.NoError(t, err, "got error when updating old management url") require.Equal(t, tt.expectedManagementURL, resultConfig.ManagementURL.String()) - newStats, err := os.Stat(configPath) - require.NoError(t, err, "failed to create testing config stats") - switch tt.fileShouldNotChange { - case true: - require.Equal(t, previousStats.ModTime(), newStats.ModTime(), "file should not change") - case false: - require.NotEqual(t, previousStats.ModTime(), newStats.ModTime(), "file should have changed") + newContent, err := os.ReadFile(configPath) + require.NoError(t, err, "failed to read updated config") + if tt.fileShouldNotChange { + require.Equal(t, string(previousContent), string(newContent), "file should not change") + } else { + require.NotEqual(t, string(previousContent), string(newContent), "file should have changed") } }) } From 405c3f4003700945d4722c65bda2166ac8d447cc Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 31 Mar 2026 14:03:34 +0200 Subject: [PATCH 257/374] [management] Feature/fleetdm api spec (#5597) add fleetdm api spec --- shared/management/client/rest/edr.go | 59 +++++ shared/management/http/api/openapi.yml | 278 ++++++++++++++++++++++++ shared/management/http/api/types.gen.go | 81 +++++++ 3 files changed, 418 insertions(+) diff --git a/shared/management/client/rest/edr.go b/shared/management/client/rest/edr.go index 7dfc891c2..f9b7f2a88 100644 --- a/shared/management/client/rest/edr.go +++ b/shared/management/client/rest/edr.go @@ -265,6 +265,65 @@ func (a *EDRAPI) DeleteHuntressIntegration(ctx context.Context) error { return nil } +// GetFleetDMIntegration retrieves the EDR FleetDM integration. +func (a *EDRAPI) GetFleetDMIntegration(ctx context.Context) (*api.EDRFleetDMResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/edr/fleetdm", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFleetDMResponse](resp) + return &ret, err +} + +// CreateFleetDMIntegration creates a new EDR FleetDM integration. +func (a *EDRAPI) CreateFleetDMIntegration(ctx context.Context, request api.EDRFleetDMRequest) (*api.EDRFleetDMResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/edr/fleetdm", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFleetDMResponse](resp) + return &ret, err +} + +// UpdateFleetDMIntegration updates an existing EDR FleetDM integration. +func (a *EDRAPI) UpdateFleetDMIntegration(ctx context.Context, request api.EDRFleetDMRequest) (*api.EDRFleetDMResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/edr/fleetdm", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFleetDMResponse](resp) + return &ret, err +} + +// DeleteFleetDMIntegration deletes the EDR FleetDM integration. +func (a *EDRAPI) DeleteFleetDMIntegration(ctx context.Context) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/edr/fleetdm", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + // BypassPeerCompliance bypasses compliance for a non-compliant peer // See more: https://docs.netbird.io/api/resources/edr#bypass-peer-compliance func (a *EDRAPI) BypassPeerCompliance(ctx context.Context, peerID string) (*api.BypassResponse, error) { diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 833468676..d35b32be0 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -92,6 +92,9 @@ tags: - name: EDR Huntress Integrations description: Manage Huntress EDR integrations. x-cloud-only: true + - name: EDR FleetDM Integrations + description: Manage FleetDM EDR integrations. + x-cloud-only: true - name: EDR Peers description: Manage EDR compliance bypass for peers. x-cloud-only: true @@ -4276,6 +4279,126 @@ components: description: Status of agent firewall. Can be one of Disabled, Enabled, Pending Isolation, Isolated, Pending Release. example: "Enabled" + EDRFleetDMRequest: + type: object + description: Request payload for creating or updating a FleetDM EDR integration + properties: + api_url: + type: string + description: FleetDM server URL + api_token: + type: string + description: FleetDM API token + groups: + type: array + description: The Groups this integrations applies to + items: + type: string + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. Minimum value is 24 hours + minimum: 24 + enabled: + type: boolean + description: Indicates whether the integration is enabled + default: true + match_attributes: + $ref: '#/components/schemas/FleetDMMatchAttributes' + required: + - api_url + - api_token + - groups + - last_synced_interval + - match_attributes + EDRFleetDMResponse: + type: object + description: Represents a FleetDM EDR integration configuration + required: + - id + - account_id + - api_url + - created_by + - last_synced_at + - created_at + - updated_at + - groups + - last_synced_interval + - match_attributes + - enabled + properties: + id: + type: integer + format: int64 + description: The unique numeric identifier for the integration. + example: 123 + account_id: + type: string + description: The identifier of the account this integration belongs to. + example: "ch8i4ug6lnn4g9hqv7l0" + api_url: + type: string + description: FleetDM server URL + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced. + example: "2023-05-15T10:30:00Z" + created_by: + type: string + description: The user id that created the integration + created_at: + type: string + format: date-time + description: Timestamp of when the integration was created. + example: "2023-05-15T10:30:00Z" + updated_at: + type: string + format: date-time + description: Timestamp of when the integration was last updated. + example: "2023-05-16T11:45:00Z" + groups: + type: array + description: List of groups + items: + $ref: '#/components/schemas/Group' + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. + enabled: + type: boolean + description: Indicates whether the integration is enabled + default: true + match_attributes: + $ref: '#/components/schemas/FleetDMMatchAttributes' + + FleetDMMatchAttributes: + type: object + description: Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly + additionalProperties: false + properties: + disk_encryption_enabled: + type: boolean + description: Whether disk encryption (FileVault/BitLocker) must be enabled on the host + failing_policies_count_max: + type: integer + description: Maximum number of allowed failing policies. Use 0 to require all policies to pass + minimum: 0 + example: 0 + vulnerable_software_count_max: + type: integer + description: Maximum number of allowed vulnerable software on the host + minimum: 0 + example: 0 + status_online: + type: boolean + description: Whether the host must be online (recently seen by Fleet) + required_policies: + type: array + description: List of FleetDM policy IDs that must be passing on the host. If any of these policies is failing, the host is non-compliant + items: + type: integer + example: [1, 5, 12] + IntegrationSyncFilters: type: object properties: @@ -10684,6 +10807,161 @@ paths: schema: $ref: '#/components/schemas/ErrorResponse' + /api/integrations/edr/fleetdm: + post: + tags: + - EDR FleetDM Integrations + summary: Create EDR FleetDM Integration + description: Creates a new EDR FleetDM integration + operationId: createFleetDMEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMResponse' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - EDR FleetDM Integrations + summary: Get EDR FleetDM Integration + description: Retrieves a specific EDR FleetDM integration by its ID. + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - EDR FleetDM Integrations + summary: Update EDR FleetDM Integration + description: Updates an existing EDR FleetDM Integration. + operationId: updateFleetDMEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMResponse' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - EDR FleetDM Integrations + summary: Delete EDR FleetDM Integration + description: Deletes an EDR FleetDM Integration by its ID. + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/peers/{peer-id}/edr/bypass: parameters: - name: peer-id diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index fb9976c89..c47b77455 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1849,6 +1849,63 @@ type EDRFalconResponse struct { ZtaScoreThreshold int `json:"zta_score_threshold"` } +// EDRFleetDMRequest Request payload for creating or updating a FleetDM EDR integration +type EDRFleetDMRequest struct { + // ApiToken FleetDM API token + ApiToken string `json:"api_token"` + + // ApiUrl FleetDM server URL + ApiUrl string `json:"api_url"` + + // Enabled Indicates whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // Groups The Groups this integrations applies to + Groups []string `json:"groups"` + + // LastSyncedInterval The devices last sync requirement interval in hours. Minimum value is 24 hours + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly + MatchAttributes FleetDMMatchAttributes `json:"match_attributes"` +} + +// EDRFleetDMResponse Represents a FleetDM EDR integration configuration +type EDRFleetDMResponse struct { + // AccountId The identifier of the account this integration belongs to. + AccountId string `json:"account_id"` + + // ApiUrl FleetDM server URL + ApiUrl string `json:"api_url"` + + // CreatedAt Timestamp of when the integration was created. + CreatedAt time.Time `json:"created_at"` + + // CreatedBy The user id that created the integration + CreatedBy string `json:"created_by"` + + // Enabled Indicates whether the integration is enabled + Enabled bool `json:"enabled"` + + // Groups List of groups + Groups []Group `json:"groups"` + + // Id The unique numeric identifier for the integration. + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of when the integration was last synced. + LastSyncedAt time.Time `json:"last_synced_at"` + + // LastSyncedInterval The devices last sync requirement interval in hours. + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly + MatchAttributes FleetDMMatchAttributes `json:"match_attributes"` + + // UpdatedAt Timestamp of when the integration was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + // EDRHuntressRequest Request payload for creating or updating a EDR Huntress integration type EDRHuntressRequest struct { // ApiKey Huntress API key @@ -2062,6 +2119,24 @@ type Event struct { // EventActivityCode The string code of the activity that occurred during the event type EventActivityCode string +// FleetDMMatchAttributes Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly +type FleetDMMatchAttributes struct { + // DiskEncryptionEnabled Whether disk encryption (FileVault/BitLocker) must be enabled on the host + DiskEncryptionEnabled *bool `json:"disk_encryption_enabled,omitempty"` + + // FailingPoliciesCountMax Maximum number of allowed failing policies. Use 0 to require all policies to pass + FailingPoliciesCountMax *int `json:"failing_policies_count_max,omitempty"` + + // RequiredPolicies List of FleetDM policy IDs that must be passing on the host. If any of these policies is failing, the host is non-compliant + RequiredPolicies *[]int `json:"required_policies,omitempty"` + + // StatusOnline Whether the host must be online (recently seen by Fleet) + StatusOnline *bool `json:"status_online,omitempty"` + + // VulnerableSoftwareCountMax Maximum number of allowed vulnerable software on the host + VulnerableSoftwareCountMax *int `json:"vulnerable_software_count_max,omitempty"` +} + // GeoLocationCheck Posture check for geo location type GeoLocationCheck struct { // Action Action to take upon policy match @@ -4869,6 +4944,12 @@ type CreateFalconEDRIntegrationJSONRequestBody = EDRFalconRequest // UpdateFalconEDRIntegrationJSONRequestBody defines body for UpdateFalconEDRIntegration for application/json ContentType. type UpdateFalconEDRIntegrationJSONRequestBody = EDRFalconRequest +// CreateFleetDMEDRIntegrationJSONRequestBody defines body for CreateFleetDMEDRIntegration for application/json ContentType. +type CreateFleetDMEDRIntegrationJSONRequestBody = EDRFleetDMRequest + +// UpdateFleetDMEDRIntegrationJSONRequestBody defines body for UpdateFleetDMEDRIntegration for application/json ContentType. +type UpdateFleetDMEDRIntegrationJSONRequestBody = EDRFleetDMRequest + // CreateHuntressEDRIntegrationJSONRequestBody defines body for CreateHuntressEDRIntegration for application/json ContentType. type CreateHuntressEDRIntegrationJSONRequestBody = EDRHuntressRequest From 3c3097ea74afa835d6a3462dc2cd6aa7d3c9bb1d Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 31 Mar 2026 16:43:16 +0200 Subject: [PATCH 258/374] [management] add target user account validation (#5741) --- .github/workflows/golangci-lint.yml | 2 +- management/server/user.go | 16 +++++ management/server/user_test.go | 98 +++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9e753ce73..62dfe9bce 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: codespell uses: codespell-project/actions-codespell@v2 with: - ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver,te + ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver,te,userA skip: go.mod,go.sum,**/proxy/web/** golangci: strategy: diff --git a/management/server/user.go b/management/server/user.go index 327aec2d0..8742daed6 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -417,6 +417,10 @@ func (am *DefaultAccountManager) CreatePAT(ctx context.Context, accountID string return nil, err } + if targetUser.AccountID != accountID { + return nil, status.NewPermissionDeniedError() + } + // @note this is essential to prevent non admin users with Pats create permission frpm creating one for a service user if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return nil, status.NewAdminPermissionError() @@ -457,6 +461,10 @@ func (am *DefaultAccountManager) DeletePAT(ctx context.Context, accountID string return err } + if targetUser.AccountID != accountID { + return status.NewPermissionDeniedError() + } + if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return status.NewAdminPermissionError() } @@ -496,6 +504,10 @@ func (am *DefaultAccountManager) GetPAT(ctx context.Context, accountID string, i return nil, err } + if targetUser.AccountID != accountID { + return nil, status.NewPermissionDeniedError() + } + if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return nil, status.NewAdminPermissionError() } @@ -523,6 +535,10 @@ func (am *DefaultAccountManager) GetAllPATs(ctx context.Context, accountID strin return nil, err } + if targetUser.AccountID != accountID { + return nil, status.NewPermissionDeniedError() + } + if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return nil, status.NewAdminPermissionError() } diff --git a/management/server/user_test.go b/management/server/user_test.go index 800d2406c..8fdfbd633 100644 --- a/management/server/user_test.go +++ b/management/server/user_test.go @@ -336,6 +336,104 @@ func TestUser_GetAllPATs(t *testing.T) { assert.Equal(t, 2, len(pats)) } +func TestUser_PAT_CrossAccountProtection(t *testing.T) { + const ( + accountAID = "accountA" + accountBID = "accountB" + userAID = "userA" + adminBID = "adminB" + serviceUserBID = "serviceUserB" + regularUserBID = "regularUserB" + tokenBID = "tokenB1" + hashedTokenB = "SoMeHaShEdToKeNB" + ) + + setupStore := func(t *testing.T) (*DefaultAccountManager, func()) { + t.Helper() + + s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir()) + require.NoError(t, err, "creating store") + + accountA := newAccountWithId(context.Background(), accountAID, userAID, "", "", "", false) + require.NoError(t, s.SaveAccount(context.Background(), accountA)) + + accountB := newAccountWithId(context.Background(), accountBID, adminBID, "", "", "", false) + accountB.Users[serviceUserBID] = &types.User{ + Id: serviceUserBID, + AccountID: accountBID, + IsServiceUser: true, + ServiceUserName: "svcB", + Role: types.UserRoleAdmin, + PATs: map[string]*types.PersonalAccessToken{ + tokenBID: { + ID: tokenBID, + HashedToken: hashedTokenB, + }, + }, + } + accountB.Users[regularUserBID] = &types.User{ + Id: regularUserBID, + AccountID: accountBID, + Role: types.UserRoleUser, + } + require.NoError(t, s.SaveAccount(context.Background(), accountB)) + + pm := permissions.NewManager(s) + am := &DefaultAccountManager{ + Store: s, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: pm, + } + return am, cleanup + } + + t.Run("CreatePAT for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.CreatePAT(context.Background(), accountAID, userAID, serviceUserBID, "xss-token", 7) + require.Error(t, err, "cross-account CreatePAT must fail") + + _, err = am.CreatePAT(context.Background(), accountAID, userAID, regularUserBID, "xss-token", 7) + require.Error(t, err, "cross-account CreatePAT for regular user must fail") + + _, err = am.CreatePAT(context.Background(), accountBID, adminBID, serviceUserBID, "legit-token", 7) + require.NoError(t, err, "same-account CreatePAT should succeed") + }) + + t.Run("DeletePAT for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + err := am.DeletePAT(context.Background(), accountAID, userAID, serviceUserBID, tokenBID) + require.Error(t, err, "cross-account DeletePAT must fail") + }) + + t.Run("GetPAT for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.GetPAT(context.Background(), accountAID, userAID, serviceUserBID, tokenBID) + require.Error(t, err, "cross-account GetPAT must fail") + }) + + t.Run("GetAllPATs for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.GetAllPATs(context.Background(), accountAID, userAID, serviceUserBID) + require.Error(t, err, "cross-account GetAllPATs must fail") + }) + + t.Run("CreatePAT with forged accountID targeting foreign user is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.CreatePAT(context.Background(), accountAID, userAID, adminBID, "forged", 7) + require.Error(t, err, "forged accountID CreatePAT must fail") + }) +} + func TestUser_Copy(t *testing.T) { // this is an imaginary case which will never be in DB this way user := types.User{ From c238f5425f8a9b4a521809d5cd11afd3fadf05e0 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 31 Mar 2026 16:43:49 +0200 Subject: [PATCH 259/374] [management] proper module permission validation for posture check delete (#5742) --- management/server/posture_checks.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/management/server/posture_checks.go b/management/server/posture_checks.go index ba901c771..9562487c0 100644 --- a/management/server/posture_checks.go +++ b/management/server/posture_checks.go @@ -84,7 +84,7 @@ func (am *DefaultAccountManager) SavePostureChecks(ctx context.Context, accountI // DeletePostureChecks deletes a posture check by ID. func (am *DefaultAccountManager) DeletePostureChecks(ctx context.Context, accountID, postureChecksID, userID string) error { - allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Routes, operations.Read) + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Policies, operations.Delete) if err != nil { return status.NewPermissionValidationError(err) } From e5914e4e8b0ae030701529eb4f492d49eb10534b Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 31 Mar 2026 18:50:49 +0300 Subject: [PATCH 260/374] [management,client] Remove client secret from gRPC auth flow (#5751) Remove client secret from gRPC auth flow. The secret was originally included to support providers like Google Workspace that don't offer a proper PKCE flow, but this is no longer necessary with the embedded IdP. Deployments using such providers should migrate to the embedded IdP instead. --- client/internal/auth/auth.go | 2 - client/internal/auth/device_flow.go | 2 - client/internal/auth/pkce_flow.go | 5 +- management/internals/shared/grpc/server.go | 2 - shared/management/client/client_test.go | 4 +- shared/management/proto/management.pb.go | 549 ++++---- shared/management/proto/management.proto | 4 +- shared/management/proto/proxy_service.pb.go | 1366 +++++++++++++------ 8 files changed, 1228 insertions(+), 706 deletions(-) diff --git a/client/internal/auth/auth.go b/client/internal/auth/auth.go index 44e98bede..7879848e3 100644 --- a/client/internal/auth/auth.go +++ b/client/internal/auth/auth.go @@ -221,7 +221,6 @@ func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, erro config := &PKCEAuthProviderConfig{ Audience: protoConfig.GetAudience(), ClientID: protoConfig.GetClientID(), - ClientSecret: protoConfig.GetClientSecret(), TokenEndpoint: protoConfig.GetTokenEndpoint(), AuthorizationEndpoint: protoConfig.GetAuthorizationEndpoint(), Scope: protoConfig.GetScope(), @@ -266,7 +265,6 @@ func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, config := &DeviceAuthProviderConfig{ Audience: protoConfig.GetAudience(), ClientID: protoConfig.GetClientID(), - ClientSecret: protoConfig.GetClientSecret(), Domain: protoConfig.Domain, TokenEndpoint: protoConfig.GetTokenEndpoint(), DeviceAuthEndpoint: protoConfig.GetDeviceAuthEndpoint(), diff --git a/client/internal/auth/device_flow.go b/client/internal/auth/device_flow.go index e33765300..f1dcfbdc9 100644 --- a/client/internal/auth/device_flow.go +++ b/client/internal/auth/device_flow.go @@ -29,8 +29,6 @@ var _ OAuthFlow = &DeviceAuthorizationFlow{} type DeviceAuthProviderConfig struct { // ClientID An IDP application client id ClientID string - // ClientSecret An IDP application client secret - ClientSecret string // Domain An IDP API domain // Deprecated. Use OIDCConfigEndpoint instead Domain string diff --git a/client/internal/auth/pkce_flow.go b/client/internal/auth/pkce_flow.go index 2e16836d8..f8d733769 100644 --- a/client/internal/auth/pkce_flow.go +++ b/client/internal/auth/pkce_flow.go @@ -38,8 +38,6 @@ const ( type PKCEAuthProviderConfig struct { // ClientID An IDP application client id ClientID string - // ClientSecret An IDP application client secret - ClientSecret string // Audience An Audience for to authorization validation Audience string // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token @@ -111,8 +109,7 @@ func NewPKCEAuthorizationFlow(config PKCEAuthProviderConfig) (*PKCEAuthorization } cfg := &oauth2.Config{ - ClientID: config.ClientID, - ClientSecret: config.ClientSecret, + ClientID: config.ClientID, Endpoint: oauth2.Endpoint{ AuthURL: config.AuthorizationEndpoint, TokenURL: config.TokenEndpoint, diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 6e8358f02..4f0fcc545 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -966,7 +966,6 @@ func (s *Server) GetDeviceAuthorizationFlow(ctx context.Context, req *proto.Encr Provider: proto.DeviceAuthorizationFlowProvider(provider), ProviderConfig: &proto.ProviderConfig{ ClientID: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientID, - ClientSecret: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientSecret, Domain: s.config.DeviceAuthorizationFlow.ProviderConfig.Domain, Audience: s.config.DeviceAuthorizationFlow.ProviderConfig.Audience, DeviceAuthEndpoint: s.config.DeviceAuthorizationFlow.ProviderConfig.DeviceAuthEndpoint, @@ -1037,7 +1036,6 @@ func (s *Server) GetPKCEAuthorizationFlow(ctx context.Context, req *proto.Encryp ProviderConfig: &proto.ProviderConfig{ Audience: s.config.PKCEAuthorizationFlow.ProviderConfig.Audience, ClientID: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientID, - ClientSecret: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientSecret, TokenEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.TokenEndpoint, AuthorizationEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.AuthorizationEndpoint, Scope: s.config.PKCEAuthorizationFlow.ProviderConfig.Scope, diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index a11f863a7..bfb00c0b7 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -545,8 +545,7 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { expectedFlowInfo := &mgmtProto.PKCEAuthorizationFlow{ ProviderConfig: &mgmtProto.ProviderConfig{ - ClientID: "client", - ClientSecret: "secret", + ClientID: "client", }, } @@ -569,5 +568,4 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { } assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientID, flowInfo.ProviderConfig.ClientID, "provider configured client ID should match") - assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientSecret, flowInfo.ProviderConfig.ClientSecret, "provider configured client secret should match") } diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index c5581296c..604f9c793 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.0 +// protoc v7.34.1 // source: management.proto package proto @@ -2259,8 +2259,8 @@ type AutoUpdateSettings struct { unknownFields protoimpl.UnknownFields Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // alwaysUpdate = true → Updates happen automatically in the background - // alwaysUpdate = false → Updates only happen when triggered by a peer connection + // alwaysUpdate = true → Updates are installed automatically in the background + // alwaysUpdate = false → Updates require user interaction from the UI AlwaysUpdate bool `protobuf:"varint,2,opt,name=alwaysUpdate,proto3" json:"alwaysUpdate,omitempty"` } @@ -2928,7 +2928,9 @@ type ProviderConfig struct { // An IDP application client id ClientID string `protobuf:"bytes,1,opt,name=ClientID,proto3" json:"ClientID,omitempty"` - // An IDP application client secret + // Deprecated: use embedded IdP for providers that require a client secret (e.g. Google Workspace). + // + // Deprecated: Do not use. ClientSecret string `protobuf:"bytes,2,opt,name=ClientSecret,proto3" json:"ClientSecret,omitempty"` // An IDP API domain // Deprecated. Use a DeviceAuthEndpoint and TokenEndpoint @@ -2992,6 +2994,7 @@ func (x *ProviderConfig) GetClientID() string { return "" } +// Deprecated: Do not use. func (x *ProviderConfig) GetClientSecret() string { if x != nil { return x.ClientSecret @@ -4847,287 +4850,287 @@ var file_management_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, + 0x69, 0x67, 0x22, 0xbc, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, - 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, - 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x12, - 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, - 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x44, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, - 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x93, 0x02, - 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, - 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x0a, - 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x10, - 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x0b, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x46, 0x6f, - 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, - 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, - 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x07, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, - 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x4e, - 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x22, - 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, - 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, - 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, - 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, - 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x53, 0x54, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, - 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x12, 0x37, - 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, - 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x44, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x12, 0x0a, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x22, - 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, 0x06, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x50, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, - 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, - 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x26, - 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x22, 0xf2, 0x01, 0x0a, - 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, - 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, - 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, - 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, - 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, - 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, - 0xa1, 0x01, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x73, 0x73, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, - 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x70, + 0x44, 0x12, 0x26, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, + 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, + 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, + 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, + 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, + 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, + 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, + 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, + 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, + 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, + 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, + 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, + 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, + 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, + 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, + 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, + 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, + 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, + 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, + 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, + 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, + 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, + 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, + 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, + 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, + 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, + 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, + 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, + 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, + 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, + 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, + 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, + 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, + 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, - 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, - 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, - 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, - 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, - 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, 0x63, 0x0a, 0x0e, 0x45, - 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0f, 0x0a, - 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x10, 0x00, 0x12, 0x10, - 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x01, - 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x43, 0x50, 0x10, 0x02, - 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x44, 0x50, 0x10, 0x03, - 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x4c, 0x53, 0x10, 0x04, - 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, - 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, - 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, + 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, + 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, + 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, + 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, + 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, + 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, + 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, + 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, + 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, + 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, + 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, + 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, + 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, + 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, + 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, + 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, + 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, + 0x44, 0x50, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, + 0x4c, 0x53, 0x10, 0x04, 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, - 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, - 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0c, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x65, 0x6e, 0x65, 0x77, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, + 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, + 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, + 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, + 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, + 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, + 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, - 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, - 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x67, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index 9acf7e2b3..70a530679 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -464,8 +464,8 @@ message PKCEAuthorizationFlow { message ProviderConfig { // An IDP application client id string ClientID = 1; - // An IDP application client secret - string ClientSecret = 2; + // Deprecated: use embedded IdP for providers that require a client secret (e.g. Google Workspace). + string ClientSecret = 2 [deprecated = true]; // An IDP API domain // Deprecated. Use a DeviceAuthEndpoint and TokenEndpoint string Domain = 3; diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index 93295e857..81637f69e 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v6.33.3 +// protoc-gen-go v1.26.0 +// protoc v7.34.1 // source: proxy_service.proto package proto @@ -13,7 +13,6 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -178,21 +177,24 @@ func (ProxyStatus) EnumDescriptor() ([]byte, []int) { // ProxyCapabilities describes what a proxy can handle. type ProxyCapabilities struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Whether the proxy can bind arbitrary ports for TCP/UDP/TLS services. SupportsCustomPorts *bool `protobuf:"varint,1,opt,name=supports_custom_ports,json=supportsCustomPorts,proto3,oneof" json:"supports_custom_ports,omitempty"` // Whether the proxy requires a subdomain label in front of its cluster domain. - // When true, tenants cannot use the cluster domain bare. + // When true, accounts cannot use the cluster domain bare. RequireSubdomain *bool `protobuf:"varint,2,opt,name=require_subdomain,json=requireSubdomain,proto3,oneof" json:"require_subdomain,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *ProxyCapabilities) Reset() { *x = ProxyCapabilities{} - mi := &file_proxy_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ProxyCapabilities) String() string { @@ -203,7 +205,7 @@ func (*ProxyCapabilities) ProtoMessage() {} func (x *ProxyCapabilities) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -234,21 +236,24 @@ func (x *ProxyCapabilities) GetRequireSubdomain() bool { // GetMappingUpdateRequest is sent to initialise a mapping stream. type GetMappingUpdateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` - Capabilities *ProxyCapabilities `protobuf:"bytes,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` + Capabilities *ProxyCapabilities `protobuf:"bytes,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"` } func (x *GetMappingUpdateRequest) Reset() { *x = GetMappingUpdateRequest{} - mi := &file_proxy_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetMappingUpdateRequest) String() string { @@ -259,7 +264,7 @@ func (*GetMappingUpdateRequest) ProtoMessage() {} func (x *GetMappingUpdateRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -313,20 +318,23 @@ func (x *GetMappingUpdateRequest) GetCapabilities() *ProxyCapabilities { // No mappings may be sent to test the liveness of the Proxy. // Mappings that are sent should be interpreted by the Proxy appropriately. type GetMappingUpdateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Mapping []*ProxyMapping `protobuf:"bytes,1,rep,name=mapping,proto3" json:"mapping,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mapping []*ProxyMapping `protobuf:"bytes,1,rep,name=mapping,proto3" json:"mapping,omitempty"` // initial_sync_complete is set on the last message of the initial snapshot. // The proxy uses this to signal that startup is complete. InitialSyncComplete bool `protobuf:"varint,2,opt,name=initial_sync_complete,json=initialSyncComplete,proto3" json:"initial_sync_complete,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *GetMappingUpdateResponse) Reset() { *x = GetMappingUpdateResponse{} - mi := &file_proxy_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetMappingUpdateResponse) String() string { @@ -337,7 +345,7 @@ func (*GetMappingUpdateResponse) ProtoMessage() {} func (x *GetMappingUpdateResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -367,24 +375,27 @@ func (x *GetMappingUpdateResponse) GetInitialSyncComplete() bool { } type PathTargetOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - SkipTlsVerify bool `protobuf:"varint,1,opt,name=skip_tls_verify,json=skipTlsVerify,proto3" json:"skip_tls_verify,omitempty"` - RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` - PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` - CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SkipTlsVerify bool `protobuf:"varint,1,opt,name=skip_tls_verify,json=skipTlsVerify,proto3" json:"skip_tls_verify,omitempty"` + RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` + CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Send PROXY protocol v2 header to this backend. ProxyProtocol bool `protobuf:"varint,5,opt,name=proxy_protocol,json=proxyProtocol,proto3" json:"proxy_protocol,omitempty"` // Idle timeout before a UDP session is reaped. SessionIdleTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=session_idle_timeout,json=sessionIdleTimeout,proto3" json:"session_idle_timeout,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *PathTargetOptions) Reset() { *x = PathTargetOptions{} - mi := &file_proxy_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PathTargetOptions) String() string { @@ -395,7 +406,7 @@ func (*PathTargetOptions) ProtoMessage() {} func (x *PathTargetOptions) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -453,19 +464,22 @@ func (x *PathTargetOptions) GetSessionIdleTimeout() *durationpb.Duration { } type PathMapping struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` - Options *PathTargetOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` + Options *PathTargetOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` } func (x *PathMapping) Reset() { *x = PathMapping{} - mi := &file_proxy_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PathMapping) String() string { @@ -476,7 +490,7 @@ func (*PathMapping) ProtoMessage() {} func (x *PathMapping) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -513,20 +527,23 @@ func (x *PathMapping) GetOptions() *PathTargetOptions { } type HeaderAuth struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Header name to check, e.g. "Authorization", "X-API-Key". Header string `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` // argon2id hash of the expected full header value. - HashedValue string `protobuf:"bytes,2,opt,name=hashed_value,json=hashedValue,proto3" json:"hashed_value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + HashedValue string `protobuf:"bytes,2,opt,name=hashed_value,json=hashedValue,proto3" json:"hashed_value,omitempty"` } func (x *HeaderAuth) Reset() { *x = HeaderAuth{} - mi := &file_proxy_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *HeaderAuth) String() string { @@ -537,7 +554,7 @@ func (*HeaderAuth) ProtoMessage() {} func (x *HeaderAuth) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -567,22 +584,25 @@ func (x *HeaderAuth) GetHashedValue() string { } type Authentication struct { - state protoimpl.MessageState `protogen:"open.v1"` - SessionKey string `protobuf:"bytes,1,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` - MaxSessionAgeSeconds int64 `protobuf:"varint,2,opt,name=max_session_age_seconds,json=maxSessionAgeSeconds,proto3" json:"max_session_age_seconds,omitempty"` - Password bool `protobuf:"varint,3,opt,name=password,proto3" json:"password,omitempty"` - Pin bool `protobuf:"varint,4,opt,name=pin,proto3" json:"pin,omitempty"` - Oidc bool `protobuf:"varint,5,opt,name=oidc,proto3" json:"oidc,omitempty"` - HeaderAuths []*HeaderAuth `protobuf:"bytes,6,rep,name=header_auths,json=headerAuths,proto3" json:"header_auths,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SessionKey string `protobuf:"bytes,1,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` + MaxSessionAgeSeconds int64 `protobuf:"varint,2,opt,name=max_session_age_seconds,json=maxSessionAgeSeconds,proto3" json:"max_session_age_seconds,omitempty"` + Password bool `protobuf:"varint,3,opt,name=password,proto3" json:"password,omitempty"` + Pin bool `protobuf:"varint,4,opt,name=pin,proto3" json:"pin,omitempty"` + Oidc bool `protobuf:"varint,5,opt,name=oidc,proto3" json:"oidc,omitempty"` + HeaderAuths []*HeaderAuth `protobuf:"bytes,6,rep,name=header_auths,json=headerAuths,proto3" json:"header_auths,omitempty"` } func (x *Authentication) Reset() { *x = Authentication{} - mi := &file_proxy_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Authentication) String() string { @@ -593,7 +613,7 @@ func (*Authentication) ProtoMessage() {} func (x *Authentication) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -651,20 +671,23 @@ func (x *Authentication) GetHeaderAuths() []*HeaderAuth { } type AccessRestrictions struct { - state protoimpl.MessageState `protogen:"open.v1"` - AllowedCidrs []string `protobuf:"bytes,1,rep,name=allowed_cidrs,json=allowedCidrs,proto3" json:"allowed_cidrs,omitempty"` - BlockedCidrs []string `protobuf:"bytes,2,rep,name=blocked_cidrs,json=blockedCidrs,proto3" json:"blocked_cidrs,omitempty"` - AllowedCountries []string `protobuf:"bytes,3,rep,name=allowed_countries,json=allowedCountries,proto3" json:"allowed_countries,omitempty"` - BlockedCountries []string `protobuf:"bytes,4,rep,name=blocked_countries,json=blockedCountries,proto3" json:"blocked_countries,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AllowedCidrs []string `protobuf:"bytes,1,rep,name=allowed_cidrs,json=allowedCidrs,proto3" json:"allowed_cidrs,omitempty"` + BlockedCidrs []string `protobuf:"bytes,2,rep,name=blocked_cidrs,json=blockedCidrs,proto3" json:"blocked_cidrs,omitempty"` + AllowedCountries []string `protobuf:"bytes,3,rep,name=allowed_countries,json=allowedCountries,proto3" json:"allowed_countries,omitempty"` + BlockedCountries []string `protobuf:"bytes,4,rep,name=blocked_countries,json=blockedCountries,proto3" json:"blocked_countries,omitempty"` } func (x *AccessRestrictions) Reset() { *x = AccessRestrictions{} - mi := &file_proxy_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AccessRestrictions) String() string { @@ -675,7 +698,7 @@ func (*AccessRestrictions) ProtoMessage() {} func (x *AccessRestrictions) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -719,7 +742,10 @@ func (x *AccessRestrictions) GetBlockedCountries() []string { } type ProxyMapping struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Type ProxyMappingUpdateType `protobuf:"varint,1,opt,name=type,proto3,enum=management.ProxyMappingUpdateType" json:"type,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` @@ -738,15 +764,15 @@ type ProxyMapping struct { // For L4/TLS: the port the proxy listens on. ListenPort int32 `protobuf:"varint,11,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` AccessRestrictions *AccessRestrictions `protobuf:"bytes,12,opt,name=access_restrictions,json=accessRestrictions,proto3" json:"access_restrictions,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *ProxyMapping) Reset() { *x = ProxyMapping{} - mi := &file_proxy_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ProxyMapping) String() string { @@ -757,7 +783,7 @@ func (*ProxyMapping) ProtoMessage() {} func (x *ProxyMapping) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[8] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -858,17 +884,20 @@ func (x *ProxyMapping) GetAccessRestrictions() *AccessRestrictions { // SendAccessLogRequest consists of one or more AccessLogs from a Proxy. type SendAccessLogRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Log *AccessLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Log *AccessLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` } func (x *SendAccessLogRequest) Reset() { *x = SendAccessLogRequest{} - mi := &file_proxy_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendAccessLogRequest) String() string { @@ -879,7 +908,7 @@ func (*SendAccessLogRequest) ProtoMessage() {} func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[9] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -903,16 +932,18 @@ func (x *SendAccessLogRequest) GetLog() *AccessLog { // SendAccessLogResponse is intentionally empty to allow for future expansion. type SendAccessLogResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *SendAccessLogResponse) Reset() { *x = SendAccessLogResponse{} - mi := &file_proxy_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendAccessLogResponse) String() string { @@ -923,7 +954,7 @@ func (*SendAccessLogResponse) ProtoMessage() {} func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[10] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -939,7 +970,10 @@ func (*SendAccessLogResponse) Descriptor() ([]byte, []int) { } type AccessLog struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` LogId string `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` @@ -956,15 +990,15 @@ type AccessLog struct { BytesUpload int64 `protobuf:"varint,14,opt,name=bytes_upload,json=bytesUpload,proto3" json:"bytes_upload,omitempty"` BytesDownload int64 `protobuf:"varint,15,opt,name=bytes_download,json=bytesDownload,proto3" json:"bytes_download,omitempty"` Protocol string `protobuf:"bytes,16,opt,name=protocol,proto3" json:"protocol,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *AccessLog) Reset() { *x = AccessLog{} - mi := &file_proxy_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AccessLog) String() string { @@ -975,7 +1009,7 @@ func (*AccessLog) ProtoMessage() {} func (x *AccessLog) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[11] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1103,24 +1137,27 @@ func (x *AccessLog) GetProtocol() string { } type AuthenticateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - // Types that are valid to be assigned to Request: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + // Types that are assignable to Request: // // *AuthenticateRequest_Password // *AuthenticateRequest_Pin // *AuthenticateRequest_HeaderAuth - Request isAuthenticateRequest_Request `protobuf_oneof:"request"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Request isAuthenticateRequest_Request `protobuf_oneof:"request"` } func (x *AuthenticateRequest) Reset() { *x = AuthenticateRequest{} - mi := &file_proxy_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AuthenticateRequest) String() string { @@ -1131,7 +1168,7 @@ func (*AuthenticateRequest) ProtoMessage() {} func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[12] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1160,36 +1197,30 @@ func (x *AuthenticateRequest) GetAccountId() string { return "" } -func (x *AuthenticateRequest) GetRequest() isAuthenticateRequest_Request { - if x != nil { - return x.Request +func (m *AuthenticateRequest) GetRequest() isAuthenticateRequest_Request { + if m != nil { + return m.Request } return nil } func (x *AuthenticateRequest) GetPassword() *PasswordRequest { - if x != nil { - if x, ok := x.Request.(*AuthenticateRequest_Password); ok { - return x.Password - } + if x, ok := x.GetRequest().(*AuthenticateRequest_Password); ok { + return x.Password } return nil } func (x *AuthenticateRequest) GetPin() *PinRequest { - if x != nil { - if x, ok := x.Request.(*AuthenticateRequest_Pin); ok { - return x.Pin - } + if x, ok := x.GetRequest().(*AuthenticateRequest_Pin); ok { + return x.Pin } return nil } func (x *AuthenticateRequest) GetHeaderAuth() *HeaderAuthRequest { - if x != nil { - if x, ok := x.Request.(*AuthenticateRequest_HeaderAuth); ok { - return x.HeaderAuth - } + if x, ok := x.GetRequest().(*AuthenticateRequest_HeaderAuth); ok { + return x.HeaderAuth } return nil } @@ -1217,18 +1248,21 @@ func (*AuthenticateRequest_Pin) isAuthenticateRequest_Request() {} func (*AuthenticateRequest_HeaderAuth) isAuthenticateRequest_Request() {} type HeaderAuthRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - HeaderValue string `protobuf:"bytes,1,opt,name=header_value,json=headerValue,proto3" json:"header_value,omitempty"` - HeaderName string `protobuf:"bytes,2,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HeaderValue string `protobuf:"bytes,1,opt,name=header_value,json=headerValue,proto3" json:"header_value,omitempty"` + HeaderName string `protobuf:"bytes,2,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` } func (x *HeaderAuthRequest) Reset() { *x = HeaderAuthRequest{} - mi := &file_proxy_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *HeaderAuthRequest) String() string { @@ -1239,7 +1273,7 @@ func (*HeaderAuthRequest) ProtoMessage() {} func (x *HeaderAuthRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[13] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1269,17 +1303,20 @@ func (x *HeaderAuthRequest) GetHeaderName() string { } type PasswordRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` } func (x *PasswordRequest) Reset() { *x = PasswordRequest{} - mi := &file_proxy_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PasswordRequest) String() string { @@ -1290,7 +1327,7 @@ func (*PasswordRequest) ProtoMessage() {} func (x *PasswordRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[14] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1313,17 +1350,20 @@ func (x *PasswordRequest) GetPassword() string { } type PinRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Pin string `protobuf:"bytes,1,opt,name=pin,proto3" json:"pin,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pin string `protobuf:"bytes,1,opt,name=pin,proto3" json:"pin,omitempty"` } func (x *PinRequest) Reset() { *x = PinRequest{} - mi := &file_proxy_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PinRequest) String() string { @@ -1334,7 +1374,7 @@ func (*PinRequest) ProtoMessage() {} func (x *PinRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[15] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1357,18 +1397,21 @@ func (x *PinRequest) GetPin() string { } type AuthenticateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` } func (x *AuthenticateResponse) Reset() { *x = AuthenticateResponse{} - mi := &file_proxy_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AuthenticateResponse) String() string { @@ -1379,7 +1422,7 @@ func (*AuthenticateResponse) ProtoMessage() {} func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[16] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1410,21 +1453,24 @@ func (x *AuthenticateResponse) GetSessionToken() string { // SendStatusUpdateRequest is sent by the proxy to update its status type SendStatusUpdateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - Status ProxyStatus `protobuf:"varint,3,opt,name=status,proto3,enum=management.ProxyStatus" json:"status,omitempty"` - CertificateIssued bool `protobuf:"varint,4,opt,name=certificate_issued,json=certificateIssued,proto3" json:"certificate_issued,omitempty"` - ErrorMessage *string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Status ProxyStatus `protobuf:"varint,3,opt,name=status,proto3,enum=management.ProxyStatus" json:"status,omitempty"` + CertificateIssued bool `protobuf:"varint,4,opt,name=certificate_issued,json=certificateIssued,proto3" json:"certificate_issued,omitempty"` + ErrorMessage *string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` } func (x *SendStatusUpdateRequest) Reset() { *x = SendStatusUpdateRequest{} - mi := &file_proxy_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendStatusUpdateRequest) String() string { @@ -1435,7 +1481,7 @@ func (*SendStatusUpdateRequest) ProtoMessage() {} func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[17] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1487,16 +1533,18 @@ func (x *SendStatusUpdateRequest) GetErrorMessage() string { // SendStatusUpdateResponse is intentionally empty to allow for future expansion type SendStatusUpdateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *SendStatusUpdateResponse) Reset() { *x = SendStatusUpdateResponse{} - mi := &file_proxy_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendStatusUpdateResponse) String() string { @@ -1507,7 +1555,7 @@ func (*SendStatusUpdateResponse) ProtoMessage() {} func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[18] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1525,21 +1573,24 @@ func (*SendStatusUpdateResponse) Descriptor() ([]byte, []int) { // CreateProxyPeerRequest is sent by the proxy to create a peer connection // The token is a one-time authentication token sent via ProxyMapping type CreateProxyPeerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` - WireguardPublicKey string `protobuf:"bytes,4,opt,name=wireguard_public_key,json=wireguardPublicKey,proto3" json:"wireguard_public_key,omitempty"` - Cluster string `protobuf:"bytes,5,opt,name=cluster,proto3" json:"cluster,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + WireguardPublicKey string `protobuf:"bytes,4,opt,name=wireguard_public_key,json=wireguardPublicKey,proto3" json:"wireguard_public_key,omitempty"` + Cluster string `protobuf:"bytes,5,opt,name=cluster,proto3" json:"cluster,omitempty"` } func (x *CreateProxyPeerRequest) Reset() { *x = CreateProxyPeerRequest{} - mi := &file_proxy_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CreateProxyPeerRequest) String() string { @@ -1550,7 +1601,7 @@ func (*CreateProxyPeerRequest) ProtoMessage() {} func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[19] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1602,18 +1653,21 @@ func (x *CreateProxyPeerRequest) GetCluster() string { // CreateProxyPeerResponse contains the result of peer creation type CreateProxyPeerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` } func (x *CreateProxyPeerResponse) Reset() { *x = CreateProxyPeerResponse{} - mi := &file_proxy_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CreateProxyPeerResponse) String() string { @@ -1624,7 +1678,7 @@ func (*CreateProxyPeerResponse) ProtoMessage() {} func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[20] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1654,19 +1708,22 @@ func (x *CreateProxyPeerResponse) GetErrorMessage() string { } type GetOIDCURLRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - RedirectUrl string `protobuf:"bytes,3,opt,name=redirect_url,json=redirectUrl,proto3" json:"redirect_url,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + RedirectUrl string `protobuf:"bytes,3,opt,name=redirect_url,json=redirectUrl,proto3" json:"redirect_url,omitempty"` } func (x *GetOIDCURLRequest) Reset() { *x = GetOIDCURLRequest{} - mi := &file_proxy_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetOIDCURLRequest) String() string { @@ -1677,7 +1734,7 @@ func (*GetOIDCURLRequest) ProtoMessage() {} func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[21] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1714,17 +1771,20 @@ func (x *GetOIDCURLRequest) GetRedirectUrl() string { } type GetOIDCURLResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` } func (x *GetOIDCURLResponse) Reset() { *x = GetOIDCURLResponse{} - mi := &file_proxy_service_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetOIDCURLResponse) String() string { @@ -1735,7 +1795,7 @@ func (*GetOIDCURLResponse) ProtoMessage() {} func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[22] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1758,18 +1818,21 @@ func (x *GetOIDCURLResponse) GetUrl() string { } type ValidateSessionRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` } func (x *ValidateSessionRequest) Reset() { *x = ValidateSessionRequest{} - mi := &file_proxy_service_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ValidateSessionRequest) String() string { @@ -1780,7 +1843,7 @@ func (*ValidateSessionRequest) ProtoMessage() {} func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[23] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1810,20 +1873,23 @@ func (x *ValidateSessionRequest) GetSessionToken() string { } type ValidateSessionResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail,proto3" json:"user_email,omitempty"` - DeniedReason string `protobuf:"bytes,4,opt,name=denied_reason,json=deniedReason,proto3" json:"denied_reason,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail,proto3" json:"user_email,omitempty"` + DeniedReason string `protobuf:"bytes,4,opt,name=denied_reason,json=deniedReason,proto3" json:"denied_reason,omitempty"` } func (x *ValidateSessionResponse) Reset() { *x = ValidateSessionResponse{} - mi := &file_proxy_service_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ValidateSessionResponse) String() string { @@ -1834,7 +1900,7 @@ func (*ValidateSessionResponse) ProtoMessage() {} func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[24] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1879,195 +1945,356 @@ func (x *ValidateSessionResponse) GetDeniedReason() string { var File_proxy_service_proto protoreflect.FileDescriptor -const file_proxy_service_proto_rawDesc = "" + - "\n" + - "\x13proxy_service.proto\x12\n" + - "management\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xae\x01\n" + - "\x11ProxyCapabilities\x127\n" + - "\x15supports_custom_ports\x18\x01 \x01(\bH\x00R\x13supportsCustomPorts\x88\x01\x01\x120\n" + - "\x11require_subdomain\x18\x02 \x01(\bH\x01R\x10requireSubdomain\x88\x01\x01B\x18\n" + - "\x16_supports_custom_portsB\x14\n" + - "\x12_require_subdomain\"\xe6\x01\n" + - "\x17GetMappingUpdateRequest\x12\x19\n" + - "\bproxy_id\x18\x01 \x01(\tR\aproxyId\x12\x18\n" + - "\aversion\x18\x02 \x01(\tR\aversion\x129\n" + - "\n" + - "started_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tstartedAt\x12\x18\n" + - "\aaddress\x18\x04 \x01(\tR\aaddress\x12A\n" + - "\fcapabilities\x18\x05 \x01(\v2\x1d.management.ProxyCapabilitiesR\fcapabilities\"\x82\x01\n" + - "\x18GetMappingUpdateResponse\x122\n" + - "\amapping\x18\x01 \x03(\v2\x18.management.ProxyMappingR\amapping\x122\n" + - "\x15initial_sync_complete\x18\x02 \x01(\bR\x13initialSyncComplete\"\xce\x03\n" + - "\x11PathTargetOptions\x12&\n" + - "\x0fskip_tls_verify\x18\x01 \x01(\bR\rskipTlsVerify\x12B\n" + - "\x0frequest_timeout\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x0erequestTimeout\x12>\n" + - "\fpath_rewrite\x18\x03 \x01(\x0e2\x1b.management.PathRewriteModeR\vpathRewrite\x12W\n" + - "\x0ecustom_headers\x18\x04 \x03(\v20.management.PathTargetOptions.CustomHeadersEntryR\rcustomHeaders\x12%\n" + - "\x0eproxy_protocol\x18\x05 \x01(\bR\rproxyProtocol\x12K\n" + - "\x14session_idle_timeout\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x12sessionIdleTimeout\x1a@\n" + - "\x12CustomHeadersEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"r\n" + - "\vPathMapping\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x16\n" + - "\x06target\x18\x02 \x01(\tR\x06target\x127\n" + - "\aoptions\x18\x03 \x01(\v2\x1d.management.PathTargetOptionsR\aoptions\"G\n" + - "\n" + - "HeaderAuth\x12\x16\n" + - "\x06header\x18\x01 \x01(\tR\x06header\x12!\n" + - "\fhashed_value\x18\x02 \x01(\tR\vhashedValue\"\xe5\x01\n" + - "\x0eAuthentication\x12\x1f\n" + - "\vsession_key\x18\x01 \x01(\tR\n" + - "sessionKey\x125\n" + - "\x17max_session_age_seconds\x18\x02 \x01(\x03R\x14maxSessionAgeSeconds\x12\x1a\n" + - "\bpassword\x18\x03 \x01(\bR\bpassword\x12\x10\n" + - "\x03pin\x18\x04 \x01(\bR\x03pin\x12\x12\n" + - "\x04oidc\x18\x05 \x01(\bR\x04oidc\x129\n" + - "\fheader_auths\x18\x06 \x03(\v2\x16.management.HeaderAuthR\vheaderAuths\"\xb8\x01\n" + - "\x12AccessRestrictions\x12#\n" + - "\rallowed_cidrs\x18\x01 \x03(\tR\fallowedCidrs\x12#\n" + - "\rblocked_cidrs\x18\x02 \x03(\tR\fblockedCidrs\x12+\n" + - "\x11allowed_countries\x18\x03 \x03(\tR\x10allowedCountries\x12+\n" + - "\x11blocked_countries\x18\x04 \x03(\tR\x10blockedCountries\"\xe6\x03\n" + - "\fProxyMapping\x126\n" + - "\x04type\x18\x01 \x01(\x0e2\".management.ProxyMappingUpdateTypeR\x04type\x12\x0e\n" + - "\x02id\x18\x02 \x01(\tR\x02id\x12\x1d\n" + - "\n" + - "account_id\x18\x03 \x01(\tR\taccountId\x12\x16\n" + - "\x06domain\x18\x04 \x01(\tR\x06domain\x12+\n" + - "\x04path\x18\x05 \x03(\v2\x17.management.PathMappingR\x04path\x12\x1d\n" + - "\n" + - "auth_token\x18\x06 \x01(\tR\tauthToken\x12.\n" + - "\x04auth\x18\a \x01(\v2\x1a.management.AuthenticationR\x04auth\x12(\n" + - "\x10pass_host_header\x18\b \x01(\bR\x0epassHostHeader\x12+\n" + - "\x11rewrite_redirects\x18\t \x01(\bR\x10rewriteRedirects\x12\x12\n" + - "\x04mode\x18\n" + - " \x01(\tR\x04mode\x12\x1f\n" + - "\vlisten_port\x18\v \x01(\x05R\n" + - "listenPort\x12O\n" + - "\x13access_restrictions\x18\f \x01(\v2\x1e.management.AccessRestrictionsR\x12accessRestrictions\"?\n" + - "\x14SendAccessLogRequest\x12'\n" + - "\x03log\x18\x01 \x01(\v2\x15.management.AccessLogR\x03log\"\x17\n" + - "\x15SendAccessLogResponse\"\x86\x04\n" + - "\tAccessLog\x128\n" + - "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x15\n" + - "\x06log_id\x18\x02 \x01(\tR\x05logId\x12\x1d\n" + - "\n" + - "account_id\x18\x03 \x01(\tR\taccountId\x12\x1d\n" + - "\n" + - "service_id\x18\x04 \x01(\tR\tserviceId\x12\x12\n" + - "\x04host\x18\x05 \x01(\tR\x04host\x12\x12\n" + - "\x04path\x18\x06 \x01(\tR\x04path\x12\x1f\n" + - "\vduration_ms\x18\a \x01(\x03R\n" + - "durationMs\x12\x16\n" + - "\x06method\x18\b \x01(\tR\x06method\x12#\n" + - "\rresponse_code\x18\t \x01(\x05R\fresponseCode\x12\x1b\n" + - "\tsource_ip\x18\n" + - " \x01(\tR\bsourceIp\x12%\n" + - "\x0eauth_mechanism\x18\v \x01(\tR\rauthMechanism\x12\x17\n" + - "\auser_id\x18\f \x01(\tR\x06userId\x12!\n" + - "\fauth_success\x18\r \x01(\bR\vauthSuccess\x12!\n" + - "\fbytes_upload\x18\x0e \x01(\x03R\vbytesUpload\x12%\n" + - "\x0ebytes_download\x18\x0f \x01(\x03R\rbytesDownload\x12\x1a\n" + - "\bprotocol\x18\x10 \x01(\tR\bprotocol\"\xf8\x01\n" + - "\x13AuthenticateRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x129\n" + - "\bpassword\x18\x03 \x01(\v2\x1b.management.PasswordRequestH\x00R\bpassword\x12*\n" + - "\x03pin\x18\x04 \x01(\v2\x16.management.PinRequestH\x00R\x03pin\x12@\n" + - "\vheader_auth\x18\x05 \x01(\v2\x1d.management.HeaderAuthRequestH\x00R\n" + - "headerAuthB\t\n" + - "\arequest\"W\n" + - "\x11HeaderAuthRequest\x12!\n" + - "\fheader_value\x18\x01 \x01(\tR\vheaderValue\x12\x1f\n" + - "\vheader_name\x18\x02 \x01(\tR\n" + - "headerName\"-\n" + - "\x0fPasswordRequest\x12\x1a\n" + - "\bpassword\x18\x01 \x01(\tR\bpassword\"\x1e\n" + - "\n" + - "PinRequest\x12\x10\n" + - "\x03pin\x18\x01 \x01(\tR\x03pin\"U\n" + - "\x14AuthenticateResponse\x12\x18\n" + - "\asuccess\x18\x01 \x01(\bR\asuccess\x12#\n" + - "\rsession_token\x18\x02 \x01(\tR\fsessionToken\"\xf3\x01\n" + - "\x17SendStatusUpdateRequest\x12\x1d\n" + - "\n" + - "service_id\x18\x01 \x01(\tR\tserviceId\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x12/\n" + - "\x06status\x18\x03 \x01(\x0e2\x17.management.ProxyStatusR\x06status\x12-\n" + - "\x12certificate_issued\x18\x04 \x01(\bR\x11certificateIssued\x12(\n" + - "\rerror_message\x18\x05 \x01(\tH\x00R\ferrorMessage\x88\x01\x01B\x10\n" + - "\x0e_error_message\"\x1a\n" + - "\x18SendStatusUpdateResponse\"\xb8\x01\n" + - "\x16CreateProxyPeerRequest\x12\x1d\n" + - "\n" + - "service_id\x18\x01 \x01(\tR\tserviceId\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x12\x14\n" + - "\x05token\x18\x03 \x01(\tR\x05token\x120\n" + - "\x14wireguard_public_key\x18\x04 \x01(\tR\x12wireguardPublicKey\x12\x18\n" + - "\acluster\x18\x05 \x01(\tR\acluster\"o\n" + - "\x17CreateProxyPeerResponse\x12\x18\n" + - "\asuccess\x18\x01 \x01(\bR\asuccess\x12(\n" + - "\rerror_message\x18\x02 \x01(\tH\x00R\ferrorMessage\x88\x01\x01B\x10\n" + - "\x0e_error_message\"e\n" + - "\x11GetOIDCURLRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x12!\n" + - "\fredirect_url\x18\x03 \x01(\tR\vredirectUrl\"&\n" + - "\x12GetOIDCURLResponse\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\"U\n" + - "\x16ValidateSessionRequest\x12\x16\n" + - "\x06domain\x18\x01 \x01(\tR\x06domain\x12#\n" + - "\rsession_token\x18\x02 \x01(\tR\fsessionToken\"\x8c\x01\n" + - "\x17ValidateSessionResponse\x12\x14\n" + - "\x05valid\x18\x01 \x01(\bR\x05valid\x12\x17\n" + - "\auser_id\x18\x02 \x01(\tR\x06userId\x12\x1d\n" + - "\n" + - "user_email\x18\x03 \x01(\tR\tuserEmail\x12#\n" + - "\rdenied_reason\x18\x04 \x01(\tR\fdeniedReason*d\n" + - "\x16ProxyMappingUpdateType\x12\x17\n" + - "\x13UPDATE_TYPE_CREATED\x10\x00\x12\x18\n" + - "\x14UPDATE_TYPE_MODIFIED\x10\x01\x12\x17\n" + - "\x13UPDATE_TYPE_REMOVED\x10\x02*F\n" + - "\x0fPathRewriteMode\x12\x18\n" + - "\x14PATH_REWRITE_DEFAULT\x10\x00\x12\x19\n" + - "\x15PATH_REWRITE_PRESERVE\x10\x01*\xc8\x01\n" + - "\vProxyStatus\x12\x18\n" + - "\x14PROXY_STATUS_PENDING\x10\x00\x12\x17\n" + - "\x13PROXY_STATUS_ACTIVE\x10\x01\x12#\n" + - "\x1fPROXY_STATUS_TUNNEL_NOT_CREATED\x10\x02\x12$\n" + - " PROXY_STATUS_CERTIFICATE_PENDING\x10\x03\x12#\n" + - "\x1fPROXY_STATUS_CERTIFICATE_FAILED\x10\x04\x12\x16\n" + - "\x12PROXY_STATUS_ERROR\x10\x052\xfc\x04\n" + - "\fProxyService\x12_\n" + - "\x10GetMappingUpdate\x12#.management.GetMappingUpdateRequest\x1a$.management.GetMappingUpdateResponse0\x01\x12T\n" + - "\rSendAccessLog\x12 .management.SendAccessLogRequest\x1a!.management.SendAccessLogResponse\x12Q\n" + - "\fAuthenticate\x12\x1f.management.AuthenticateRequest\x1a .management.AuthenticateResponse\x12]\n" + - "\x10SendStatusUpdate\x12#.management.SendStatusUpdateRequest\x1a$.management.SendStatusUpdateResponse\x12Z\n" + - "\x0fCreateProxyPeer\x12\".management.CreateProxyPeerRequest\x1a#.management.CreateProxyPeerResponse\x12K\n" + - "\n" + - "GetOIDCURL\x12\x1d.management.GetOIDCURLRequest\x1a\x1e.management.GetOIDCURLResponse\x12Z\n" + - "\x0fValidateSession\x12\".management.ValidateSessionRequest\x1a#.management.ValidateSessionResponseB\bZ\x06/protob\x06proto3" +var file_proxy_service_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x30, 0x0a, 0x11, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x10, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x88, 0x01, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, + 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x42, 0x14, 0x0a, + 0x12, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x22, 0xe6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, + 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, + 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, + 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, + 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x41, 0x75, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, + 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0xe5, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x12, 0x39, 0x0a, 0x0c, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x69, + 0x64, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, + 0x69, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x22, 0xe6, 0x03, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, + 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, + 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, + 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, + 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, + 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, + 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53, + 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, + 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x04, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, + 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, + 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, + 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, + 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, + 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xf8, + 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0b, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x42, 0x09, + 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x57, 0x0a, 0x11, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, + 0x6e, 0x22, 0x55, 0x0a, 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, + 0x65, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, + 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, + 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, + 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, + 0x72, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, + 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, + 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, + 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, + 0x65, 0x6e, 0x69, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, + 0x0a, 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, + 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, + 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, + 0x02, 0x2a, 0x46, 0x0a, 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, + 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19, + 0x0a, 0x15, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, + 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, + 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, + 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e, + 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, + 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, + 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, + 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_proxy_service_proto_rawDescOnce sync.Once - file_proxy_service_proto_rawDescData []byte + file_proxy_service_proto_rawDescData = file_proxy_service_proto_rawDesc ) func file_proxy_service_proto_rawDescGZIP() []byte { file_proxy_service_proto_rawDescOnce.Do(func() { - file_proxy_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proxy_service_proto_rawDesc), len(file_proxy_service_proto_rawDesc))) + file_proxy_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_proxy_service_proto_rawDescData) }) return file_proxy_service_proto_rawDescData } var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 26) -var file_proxy_service_proto_goTypes = []any{ +var file_proxy_service_proto_goTypes = []interface{}{ (ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType (PathRewriteMode)(0), // 1: management.PathRewriteMode (ProxyStatus)(0), // 2: management.ProxyStatus @@ -2146,19 +2373,321 @@ func file_proxy_service_proto_init() { if File_proxy_service_proto != nil { return } - file_proxy_service_proto_msgTypes[0].OneofWrappers = []any{} - file_proxy_service_proto_msgTypes[12].OneofWrappers = []any{ + if !protoimpl.UnsafeEnabled { + file_proxy_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyCapabilities); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMappingUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMappingUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTargetOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathMapping); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderAuth); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Authentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessRestrictions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyMapping); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendAccessLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendAccessLogResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderAuthRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PasswordRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PinRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendStatusUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendStatusUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateProxyPeerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateProxyPeerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOIDCURLRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOIDCURLResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proxy_service_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[12].OneofWrappers = []interface{}{ (*AuthenticateRequest_Password)(nil), (*AuthenticateRequest_Pin)(nil), (*AuthenticateRequest_HeaderAuth)(nil), } - file_proxy_service_proto_msgTypes[17].OneofWrappers = []any{} - file_proxy_service_proto_msgTypes[20].OneofWrappers = []any{} + file_proxy_service_proto_msgTypes[17].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[20].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_proxy_service_proto_rawDesc), len(file_proxy_service_proto_rawDesc)), + RawDescriptor: file_proxy_service_proto_rawDesc, NumEnums: 3, NumMessages: 26, NumExtensions: 0, @@ -2170,6 +2699,7 @@ func file_proxy_service_proto_init() { MessageInfos: file_proxy_service_proto_msgTypes, }.Build() File_proxy_service_proto = out.File + file_proxy_service_proto_rawDesc = nil file_proxy_service_proto_goTypes = nil file_proxy_service_proto_depIdxs = nil } From 5ae986e1c40193733b1e836b4413949428d2ef67 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Wed, 1 Apr 2026 12:31:30 +0200 Subject: [PATCH 261/374] [management] fix panic on management reboot (#5759) --- management/server/geolocation/geolocation.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/management/server/geolocation/geolocation.go b/management/server/geolocation/geolocation.go index 30fd493e8..0af3ce2f6 100644 --- a/management/server/geolocation/geolocation.go +++ b/management/server/geolocation/geolocation.go @@ -130,6 +130,10 @@ func (gl *geolocationImpl) Lookup(ip net.IP) (*Record, error) { gl.mux.RLock() defer gl.mux.RUnlock() + if gl.db == nil { + return nil, fmt.Errorf("geolocation database is not available") + } + var record Record err := gl.db.Lookup(ip, &record) if err != nil { @@ -173,8 +177,14 @@ func (gl *geolocationImpl) GetCitiesByCountry(countryISOCode string) ([]City, er func (gl *geolocationImpl) Stop() error { close(gl.stopCh) - if gl.db != nil { - if err := gl.db.Close(); err != nil { + + gl.mux.Lock() + db := gl.db + gl.db = nil + gl.mux.Unlock() + + if db != nil { + if err := db.Close(); err != nil { return err } } From 4d3e2f8ad3acd9af2be51f307e7381c0d24f9919 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 1 Apr 2026 13:21:19 +0200 Subject: [PATCH 262/374] Fix path join (#5762) --- client/cmd/service_params_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/cmd/service_params_test.go b/client/cmd/service_params_test.go index 684593a00..3bc8e4f60 100644 --- a/client/cmd/service_params_test.go +++ b/client/cmd/service_params_test.go @@ -25,10 +25,10 @@ func TestServiceParamsPath(t *testing.T) { t.Cleanup(func() { configs.StateDir = original }) configs.StateDir = "/var/lib/netbird" - assert.Equal(t, "/var/lib/netbird/service.json", serviceParamsPath()) + assert.Equal(t, filepath.Join("/var/lib/netbird", "service.json"), serviceParamsPath()) configs.StateDir = "/custom/state" - assert.Equal(t, "/custom/state/service.json", serviceParamsPath()) + assert.Equal(t, filepath.Join("/custom/state", "service.json"), serviceParamsPath()) } func TestSaveAndLoadServiceParams(t *testing.T) { From 940f530ac24aaed2e68cb4508dbc092d749c3139 Mon Sep 17 00:00:00 2001 From: shuuri-labs <61762328+shuuri-labs@users.noreply.github.com> Date: Wed, 1 Apr 2026 12:53:19 +0100 Subject: [PATCH 263/374] [management] Legacy to embedded IdP migration tool (#5586) --- .../workflows/check-license-dependencies.yml | 4 +- .goreleaser.yaml | 24 + go.mod | 2 +- idp/dex/config.go | 50 +- idp/dex/provider.go | 47 + idp/dex/provider_test.go | 292 ++++++ management/internals/server/controllers.go | 12 +- management/internals/server/modules.go | 4 +- .../activity/store/sql_store_idp_migration.go | 61 ++ .../store/sql_store_idp_migration_test.go | 161 ++++ management/server/auth/manager.go | 23 +- management/server/auth/manager_test.go | 8 +- .../testing/testing_tools/channel/channel.go | 4 +- management/server/idp/embedded.go | 12 + management/server/idp/migration/migration.go | 235 +++++ .../server/idp/migration/migration_test.go | 828 ++++++++++++++++++ management/server/idp/migration/store.go | 82 ++ .../server/store/sql_store_idp_migration.go | 177 ++++ shared/auth/jwt/validator.go | 48 +- tools/idp-migrate/DEVELOPMENT.md | 209 +++++ tools/idp-migrate/LICENSE | 661 ++++++++++++++ tools/idp-migrate/config.go | 174 ++++ tools/idp-migrate/main.go | 449 ++++++++++ tools/idp-migrate/main_test.go | 487 ++++++++++ 24 files changed, 4023 insertions(+), 31 deletions(-) create mode 100644 management/server/activity/store/sql_store_idp_migration.go create mode 100644 management/server/activity/store/sql_store_idp_migration_test.go create mode 100644 management/server/idp/migration/migration.go create mode 100644 management/server/idp/migration/migration_test.go create mode 100644 management/server/idp/migration/store.go create mode 100644 management/server/store/sql_store_idp_migration.go create mode 100644 tools/idp-migrate/DEVELOPMENT.md create mode 100644 tools/idp-migrate/LICENSE create mode 100644 tools/idp-migrate/config.go create mode 100644 tools/idp-migrate/main.go create mode 100644 tools/idp-migrate/main_test.go diff --git a/.github/workflows/check-license-dependencies.yml b/.github/workflows/check-license-dependencies.yml index d1d2a8e50..a721cb516 100644 --- a/.github/workflows/check-license-dependencies.yml +++ b/.github/workflows/check-license-dependencies.yml @@ -31,7 +31,7 @@ jobs: while IFS= read -r dir; do echo "=== Checking $dir ===" # Search for problematic imports, excluding test files - RESULTS=$(grep -r "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\)" "$dir" --include="*.go" 2>/dev/null | grep -v "_test.go" | grep -v "test_" | grep -v "/test/" || true) + RESULTS=$(grep -r "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\)" "$dir" --include="*.go" 2>/dev/null | grep -v "_test.go" | grep -v "test_" | grep -v "/test/" | grep -v "tools/idp-migrate/" || true) if [ -n "$RESULTS" ]; then echo "❌ Found problematic dependencies:" echo "$RESULTS" @@ -88,7 +88,7 @@ jobs: IMPORTERS=$(go list -json -deps ./... 2>/dev/null | jq -r "select(.Imports[]? == \"$package\") | .ImportPath") # Check if any importer is NOT in management/signal/relay - BSD_IMPORTER=$(echo "$IMPORTERS" | grep -v "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\|combined\)" | head -1) + BSD_IMPORTER=$(echo "$IMPORTERS" | grep -v "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\|combined\|tools/idp-migrate\)" | head -1) if [ -n "$BSD_IMPORTER" ]; then echo "❌ $package ($license) is imported by BSD-licensed code: $BSD_IMPORTER" diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 65e63dfa8..5ea479148 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -154,6 +154,26 @@ builds: - -s -w -X main.Version={{.Version}} -X main.Commit={{.Commit}} -X main.BuildDate={{.CommitDate}} mod_timestamp: "{{ .CommitTimestamp }}" + - id: netbird-idp-migrate + dir: tools/idp-migrate + env: + - CGO_ENABLED=1 + - >- + {{- if eq .Runtime.Goos "linux" }} + {{- if eq .Arch "arm64"}}CC=aarch64-linux-gnu-gcc{{- end }} + {{- if eq .Arch "arm"}}CC=arm-linux-gnueabihf-gcc{{- end }} + {{- end }} + binary: netbird-idp-migrate + goos: + - linux + goarch: + - amd64 + - arm64 + - arm + ldflags: + - -s -w -X github.com/netbirdio/netbird/version.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser + mod_timestamp: "{{ .CommitTimestamp }}" + universal_binaries: - id: netbird @@ -166,6 +186,10 @@ archives: - netbird-wasm name_template: "{{ .ProjectName }}_{{ .Version }}" format: binary + - id: netbird-idp-migrate + builds: + - netbird-idp-migrate + name_template: "netbird-idp-migrate_{{ .Version }}_{{ .Os }}_{{ .Arch }}" nfpms: - maintainer: Netbird diff --git a/go.mod b/go.mod index 89bc06fea..e9334f85b 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( github.com/eko/gocache/store/redis/v4 v4.2.2 github.com/fsnotify/fsnotify v1.9.0 github.com/gliderlabs/ssh v0.3.8 + github.com/go-jose/go-jose/v4 v4.1.3 github.com/godbus/dbus/v5 v5.1.0 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/golang/mock v1.6.0 @@ -181,7 +182,6 @@ require ( github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect github.com/go-gl/gl v0.0.0-20231021071112-07e5d0ea2e71 // indirect github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-ldap/ldap/v3 v3.4.12 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect diff --git a/idp/dex/config.go b/idp/dex/config.go index 3db04a4cb..7f5300f14 100644 --- a/idp/dex/config.go +++ b/idp/dex/config.go @@ -170,20 +170,66 @@ type Connector struct { } // ToStorageConnector converts a Connector to storage.Connector type. +// It maps custom connector types (e.g., "zitadel", "entra") to Dex-native types +// and augments the config with OIDC defaults when needed. func (c *Connector) ToStorageConnector() (storage.Connector, error) { - data, err := json.Marshal(c.Config) + dexType, augmentedConfig := mapConnectorToDex(c.Type, c.Config) + + data, err := json.Marshal(augmentedConfig) if err != nil { return storage.Connector{}, fmt.Errorf("failed to marshal connector config: %v", err) } return storage.Connector{ ID: c.ID, - Type: c.Type, + Type: dexType, Name: c.Name, Config: data, }, nil } +// mapConnectorToDex maps custom connector types to Dex-native types and applies +// OIDC defaults. This ensures static connectors from config files or env vars +// are stored with types that Dex can open. +func mapConnectorToDex(connType string, config map[string]interface{}) (string, map[string]interface{}) { + switch connType { + case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": + return "oidc", applyOIDCDefaults(connType, config) + default: + return connType, config + } +} + +// applyOIDCDefaults clones the config map, sets common OIDC defaults, +// and applies provider-specific overrides. +func applyOIDCDefaults(connType string, config map[string]interface{}) map[string]interface{} { + augmented := make(map[string]interface{}, len(config)+4) + for k, v := range config { + augmented[k] = v + } + setDefault(augmented, "scopes", []string{"openid", "profile", "email"}) + setDefault(augmented, "insecureEnableGroups", true) + setDefault(augmented, "insecureSkipEmailVerified", true) + + switch connType { + case "zitadel": + setDefault(augmented, "getUserInfo", true) + case "entra": + setDefault(augmented, "claimMapping", map[string]string{"email": "preferred_username"}) + case "okta", "pocketid": + augmented["scopes"] = []string{"openid", "profile", "email", "groups"} + } + + return augmented +} + +// setDefault sets a key in the map only if it doesn't already exist. +func setDefault(m map[string]interface{}, key string, value interface{}) { + if _, ok := m[key]; !ok { + m[key] = value + } +} + // StorageConfig is a configuration that can create a storage. type StorageConfig interface { Open(logger *slog.Logger) (storage.Storage, error) diff --git a/idp/dex/provider.go b/idp/dex/provider.go index 68fe48486..24aed1b99 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -4,6 +4,7 @@ package dex import ( "context" "encoding/base64" + "encoding/json" "errors" "fmt" "log/slog" @@ -19,10 +20,13 @@ import ( "github.com/dexidp/dex/server" "github.com/dexidp/dex/storage" "github.com/dexidp/dex/storage/sql" + jose "github.com/go-jose/go-jose/v4" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc" + + nbjwt "github.com/netbirdio/netbird/shared/auth/jwt" ) // Config matches what management/internals/server/server.go expects @@ -666,3 +670,46 @@ func (p *Provider) GetAuthorizationEndpoint() string { } return issuer + "/auth" } + +// GetJWKS reads signing keys directly from Dex storage and returns them as Jwks. +// This avoids HTTP round-trips when the embedded IDP is co-located with the management server. +// The key retrieval mirrors Dex's own handlePublicKeys/ValidationKeys logic: +// SigningKeyPub first, then all VerificationKeys, serialized via go-jose. +func (p *Provider) GetJWKS(ctx context.Context) (*nbjwt.Jwks, error) { + keys, err := p.storage.GetKeys(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get keys from storage: %w", err) + } + + if keys.SigningKeyPub == nil { + return nil, fmt.Errorf("no public keys found in storage") + } + + // Build the key set exactly as Dex's localSigner.ValidationKeys does: + // signing key first, then all verification (rotated) keys. + joseKeys := make([]jose.JSONWebKey, 0, len(keys.VerificationKeys)+1) + joseKeys = append(joseKeys, *keys.SigningKeyPub) + for _, vk := range keys.VerificationKeys { + if vk.PublicKey != nil { + joseKeys = append(joseKeys, *vk.PublicKey) + } + } + + // Serialize through go-jose (same as Dex's handlePublicKeys handler) + // then deserialize into our Jwks type, so the JSON field mapping is identical + // to what the /keys HTTP endpoint would return. + joseSet := jose.JSONWebKeySet{Keys: joseKeys} + data, err := json.Marshal(joseSet) + if err != nil { + return nil, fmt.Errorf("failed to marshal JWKS: %w", err) + } + + jwks := &nbjwt.Jwks{} + if err := json.Unmarshal(data, jwks); err != nil { + return nil, fmt.Errorf("failed to unmarshal JWKS: %w", err) + } + + jwks.ExpiresInTime = keys.NextRotation + + return jwks, nil +} diff --git a/idp/dex/provider_test.go b/idp/dex/provider_test.go index bd2f676fb..4ed89fd2e 100644 --- a/idp/dex/provider_test.go +++ b/idp/dex/provider_test.go @@ -2,11 +2,14 @@ package dex import ( "context" + "encoding/json" "log/slog" "os" "path/filepath" "testing" + "github.com/dexidp/dex/storage" + sqllib "github.com/dexidp/dex/storage/sql" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -197,6 +200,295 @@ enablePasswordDB: true t.Logf("User lookup successful: rawID=%s, connectorID=%s", rawID, connID) } +// openTestStorage creates a SQLite storage in the given directory for testing. +func openTestStorage(t *testing.T, tmpDir string) storage.Storage { + t.Helper() + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + stor, err := (&sqllib.SQLite3{File: filepath.Join(tmpDir, "dex.db")}).Open(logger) + require.NoError(t, err) + return stor +} + +func TestStaticConnectors_CreatedFromYAML(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: My OIDC Provider + config: + issuer: https://accounts.example.com + clientID: test-client-id + clientSecret: test-client-secret + redirectURI: http://localhost:5556/dex/callback +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + // Open storage and run initializeStorage directly (avoids Dex server + // trying to dial the OIDC issuer) + stor := openTestStorage(t, tmpDir) + defer stor.Close() + + err = initializeStorage(ctx, stor, yamlConfig) + require.NoError(t, err) + + // Verify connector was created in storage + conn, err := stor.GetConnector(ctx, "my-oidc") + require.NoError(t, err) + assert.Equal(t, "my-oidc", conn.ID) + assert.Equal(t, "My OIDC Provider", conn.Name) + assert.Equal(t, "oidc", conn.Type) + + // Verify config fields were serialized correctly + var configMap map[string]interface{} + err = json.Unmarshal(conn.Config, &configMap) + require.NoError(t, err) + assert.Equal(t, "https://accounts.example.com", configMap["issuer"]) + assert.Equal(t, "test-client-id", configMap["clientID"]) +} + +func TestStaticConnectors_UpdatedOnRestart(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-update-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + dbFile := filepath.Join(tmpDir, "dex.db") + + // First: load config with initial connector + yamlContent1 := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + dbFile + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: Original Name + config: + issuer: https://accounts.example.com + clientID: original-client-id + clientSecret: original-secret +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent1), 0644) + require.NoError(t, err) + + yamlConfig1, err := LoadConfig(configPath) + require.NoError(t, err) + + stor := openTestStorage(t, tmpDir) + err = initializeStorage(ctx, stor, yamlConfig1) + require.NoError(t, err) + + // Verify initial state + conn, err := stor.GetConnector(ctx, "my-oidc") + require.NoError(t, err) + assert.Equal(t, "Original Name", conn.Name) + + var configMap1 map[string]interface{} + err = json.Unmarshal(conn.Config, &configMap1) + require.NoError(t, err) + assert.Equal(t, "original-client-id", configMap1["clientID"]) + + // Close storage to simulate restart + stor.Close() + + // Second: load updated config against the same DB + yamlContent2 := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + dbFile + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: Updated Name + config: + issuer: https://accounts.example.com + clientID: updated-client-id + clientSecret: updated-secret +` + err = os.WriteFile(configPath, []byte(yamlContent2), 0644) + require.NoError(t, err) + + yamlConfig2, err := LoadConfig(configPath) + require.NoError(t, err) + + stor2 := openTestStorage(t, tmpDir) + defer stor2.Close() + + err = initializeStorage(ctx, stor2, yamlConfig2) + require.NoError(t, err) + + // Verify connector was updated, not duplicated + allConnectors, err := stor2.ListConnectors(ctx) + require.NoError(t, err) + + nonLocalCount := 0 + for _, c := range allConnectors { + if c.ID != "local" { + nonLocalCount++ + } + } + assert.Equal(t, 1, nonLocalCount, "connector should be updated, not duplicated") + + conn2, err := stor2.GetConnector(ctx, "my-oidc") + require.NoError(t, err) + assert.Equal(t, "Updated Name", conn2.Name) + + var configMap2 map[string]interface{} + err = json.Unmarshal(conn2.Config, &configMap2) + require.NoError(t, err) + assert.Equal(t, "updated-client-id", configMap2["clientID"]) +} + +func TestStaticConnectors_MultipleConnectors(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-multi-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: My OIDC Provider + config: + issuer: https://accounts.example.com + clientID: oidc-client-id + clientSecret: oidc-secret +- type: google + id: my-google + name: Google Login + config: + clientID: google-client-id + clientSecret: google-secret +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + stor := openTestStorage(t, tmpDir) + defer stor.Close() + + err = initializeStorage(ctx, stor, yamlConfig) + require.NoError(t, err) + + allConnectors, err := stor.ListConnectors(ctx) + require.NoError(t, err) + + // Build a map for easier assertion + connByID := make(map[string]storage.Connector) + for _, c := range allConnectors { + connByID[c.ID] = c + } + + // Verify both static connectors exist + oidcConn, ok := connByID["my-oidc"] + require.True(t, ok, "oidc connector should exist") + assert.Equal(t, "My OIDC Provider", oidcConn.Name) + assert.Equal(t, "oidc", oidcConn.Type) + + var oidcConfig map[string]interface{} + err = json.Unmarshal(oidcConn.Config, &oidcConfig) + require.NoError(t, err) + assert.Equal(t, "oidc-client-id", oidcConfig["clientID"]) + + googleConn, ok := connByID["my-google"] + require.True(t, ok, "google connector should exist") + assert.Equal(t, "Google Login", googleConn.Name) + assert.Equal(t, "google", googleConn.Type) + + var googleConfig map[string]interface{} + err = json.Unmarshal(googleConn.Config, &googleConfig) + require.NoError(t, err) + assert.Equal(t, "google-client-id", googleConfig["clientID"]) + + // Verify local connector still exists alongside them (enablePasswordDB: true) + localConn, ok := connByID["local"] + require.True(t, ok, "local connector should exist") + assert.Equal(t, "local", localConn.Type) +} + +func TestStaticConnectors_EmptyList(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-empty-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + provider, err := NewProviderFromYAML(ctx, yamlConfig) + require.NoError(t, err) + defer func() { _ = provider.Stop(ctx) }() + + // No static connectors configured, so ListConnectors should return empty + connectors, err := provider.ListConnectors(ctx) + require.NoError(t, err) + assert.Empty(t, connectors) + + // But local connector should still exist + localConn, err := provider.Storage().GetConnector(ctx, "local") + require.NoError(t, err) + assert.Equal(t, "local", localConn.ID) +} + func TestNewProvider_ContinueOnConnectorFailure(t *testing.T) { ctx := context.Background() diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index 62ed659c0..c7eab3d19 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" + nbjwt "github.com/netbirdio/netbird/shared/auth/jwt" ) func (s *BaseServer) PeersUpdateManager() network_map.PeersUpdateManager { @@ -71,6 +72,7 @@ func (s *BaseServer) AuthManager() auth.Manager { signingKeyRefreshEnabled := s.Config.HttpConfig.IdpSignKeyRefreshEnabled issuer := s.Config.HttpConfig.AuthIssuer userIDClaim := s.Config.HttpConfig.AuthUserIDClaim + var keyFetcher nbjwt.KeyFetcher // Use embedded IdP configuration if available if oauthProvider := s.OAuthConfigProvider(); oauthProvider != nil { @@ -78,8 +80,11 @@ func (s *BaseServer) AuthManager() auth.Manager { if len(audiences) > 0 { audience = audiences[0] // Use the first client ID as the primary audience } - // Use localhost keys location for internal validation (management has embedded Dex) - keysLocation = oauthProvider.GetLocalKeysLocation() + keyFetcher = oauthProvider.GetKeyFetcher() + // Fall back to default keys location if direct key fetching is not available + if keyFetcher == nil { + keysLocation = oauthProvider.GetLocalKeysLocation() + } signingKeyRefreshEnabled = true issuer = oauthProvider.GetIssuer() userIDClaim = oauthProvider.GetUserIDClaim() @@ -92,7 +97,8 @@ func (s *BaseServer) AuthManager() auth.Manager { keysLocation, userIDClaim, audiences, - signingKeyRefreshEnabled) + signingKeyRefreshEnabled, + keyFetcher) }) } diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index 6064bd5b6..374ea5c81 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -117,9 +117,11 @@ func (s *BaseServer) IdpManager() idp.Manager { return Create(s, func() idp.Manager { var idpManager idp.Manager var err error + // Use embedded IdP service if embedded Dex is configured and enabled. // Legacy IdpManager won't be used anymore even if configured. - if s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled { + embeddedEnabled := s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled + if embeddedEnabled { idpManager, err = idp.NewEmbeddedIdPManager(context.Background(), s.Config.EmbeddedIdP, s.Metrics()) if err != nil { log.Fatalf("failed to create embedded IDP service: %v", err) diff --git a/management/server/activity/store/sql_store_idp_migration.go b/management/server/activity/store/sql_store_idp_migration.go new file mode 100644 index 000000000..1b3a9ecd9 --- /dev/null +++ b/management/server/activity/store/sql_store_idp_migration.go @@ -0,0 +1,61 @@ +package store + +// This file contains migration-only methods on Store. +// They satisfy the migration.MigrationEventStore interface via duck typing. +// Delete this file when migration tooling is no longer needed. + +import ( + "context" + "fmt" + + "gorm.io/gorm" + + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/idp/migration" +) + +// CheckSchema verifies that all tables and columns required by the migration exist in the event database. +func (store *Store) CheckSchema(checks []migration.SchemaCheck) []migration.SchemaError { + migrator := store.db.Migrator() + var errs []migration.SchemaError + + for _, check := range checks { + if !migrator.HasTable(check.Table) { + errs = append(errs, migration.SchemaError{Table: check.Table}) + continue + } + for _, col := range check.Columns { + if !migrator.HasColumn(check.Table, col) { + errs = append(errs, migration.SchemaError{Table: check.Table, Column: col}) + } + } + } + + return errs +} + +// UpdateUserID updates all references to oldUserID in events and deleted_users tables. +func (store *Store) UpdateUserID(ctx context.Context, oldUserID, newUserID string) error { + return store.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + if err := tx.Model(&activity.Event{}). + Where("initiator_id = ?", oldUserID). + Update("initiator_id", newUserID).Error; err != nil { + return fmt.Errorf("update events.initiator_id: %w", err) + } + + if err := tx.Model(&activity.Event{}). + Where("target_id = ?", oldUserID). + Update("target_id", newUserID).Error; err != nil { + return fmt.Errorf("update events.target_id: %w", err) + } + + // Raw exec: GORM can't update a PK via Model().Update() + if err := tx.Exec( + "UPDATE deleted_users SET id = ? WHERE id = ?", newUserID, oldUserID, + ).Error; err != nil { + return fmt.Errorf("update deleted_users.id: %w", err) + } + + return nil + }) +} diff --git a/management/server/activity/store/sql_store_idp_migration_test.go b/management/server/activity/store/sql_store_idp_migration_test.go new file mode 100644 index 000000000..98b6e1327 --- /dev/null +++ b/management/server/activity/store/sql_store_idp_migration_test.go @@ -0,0 +1,161 @@ +package store + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/util/crypt" +) + +func TestUpdateUserID(t *testing.T) { + ctx := context.Background() + + newStore := func(t *testing.T) *Store { + t.Helper() + key, _ := crypt.GenerateKey() + s, err := NewSqlStore(ctx, t.TempDir(), key) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { s.Close(ctx) }) //nolint + return s + } + + t.Run("updates initiator_id in events", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "old-user", + TargetID: "some-peer", + AccountID: accountID, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "old-user", "new-user") + assert.NoError(t, err) + + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 1) + assert.Equal(t, "new-user", result[0].InitiatorID) + }) + + t.Run("updates target_id in events", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "some-admin", + TargetID: "old-user", + AccountID: accountID, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "old-user", "new-user") + assert.NoError(t, err) + + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 1) + assert.Equal(t, "new-user", result[0].TargetID) + }) + + t.Run("updates deleted_users id", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + // Save an event with email/name meta to create a deleted_users row for "old-user" + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "admin", + TargetID: "old-user", + AccountID: accountID, + Meta: map[string]any{ + "email": "user@example.com", + "name": "Test User", + }, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "old-user", "new-user") + assert.NoError(t, err) + + // Save another event referencing new-user with email/name meta. + // This should upsert (not conflict) because the PK was already migrated. + _, err = store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "admin", + TargetID: "new-user", + AccountID: accountID, + Meta: map[string]any{ + "email": "user@example.com", + "name": "Test User", + }, + }) + assert.NoError(t, err) + + // The deleted user info should be retrievable via Get (joined on target_id) + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 2) + for _, ev := range result { + assert.Equal(t, "new-user", ev.TargetID) + } + }) + + t.Run("no-op when old user ID does not exist", func(t *testing.T) { + store := newStore(t) + + err := store.UpdateUserID(ctx, "nonexistent-user", "new-user") + assert.NoError(t, err) + }) + + t.Run("only updates matching user leaves others unchanged", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "user-a", + TargetID: "peer-1", + AccountID: accountID, + }) + assert.NoError(t, err) + + _, err = store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "user-b", + TargetID: "peer-2", + AccountID: accountID, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "user-a", "user-a-new") + assert.NoError(t, err) + + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 2) + + for _, ev := range result { + if ev.TargetID == "peer-1" { + assert.Equal(t, "user-a-new", ev.InitiatorID) + } else { + assert.Equal(t, "user-b", ev.InitiatorID) + } + } + }) +} diff --git a/management/server/auth/manager.go b/management/server/auth/manager.go index 76cc750b6..27346a604 100644 --- a/management/server/auth/manager.go +++ b/management/server/auth/manager.go @@ -33,15 +33,20 @@ type manager struct { extractor *nbjwt.ClaimsExtractor } -func NewManager(store store.Store, issuer, audience, keysLocation, userIdClaim string, allAudiences []string, idpRefreshKeys bool) Manager { - // @note if invalid/missing parameters are sent the validator will instantiate - // but it will fail when validating and parsing the token - jwtValidator := nbjwt.NewValidator( - issuer, - allAudiences, - keysLocation, - idpRefreshKeys, - ) +func NewManager(store store.Store, issuer, audience, keysLocation, userIdClaim string, allAudiences []string, idpRefreshKeys bool, keyFetcher nbjwt.KeyFetcher) Manager { + var jwtValidator *nbjwt.Validator + if keyFetcher != nil { + jwtValidator = nbjwt.NewValidatorWithKeyFetcher(issuer, allAudiences, keyFetcher) + } else { + // @note if invalid/missing parameters are sent the validator will instantiate + // but it will fail when validating and parsing the token + jwtValidator = nbjwt.NewValidator( + issuer, + allAudiences, + keysLocation, + idpRefreshKeys, + ) + } claimsExtractor := nbjwt.NewClaimsExtractor( nbjwt.WithAudience(audience), diff --git a/management/server/auth/manager_test.go b/management/server/auth/manager_test.go index b9f091b1e..469737f47 100644 --- a/management/server/auth/manager_test.go +++ b/management/server/auth/manager_test.go @@ -52,7 +52,7 @@ func TestAuthManager_GetAccountInfoFromPAT(t *testing.T) { t.Fatalf("Error when saving account: %s", err) } - manager := auth.NewManager(store, "", "", "", "", []string{}, false) + manager := auth.NewManager(store, "", "", "", "", []string{}, false, nil) user, pat, _, _, err := manager.GetPATInfo(context.Background(), token) if err != nil { @@ -92,7 +92,7 @@ func TestAuthManager_MarkPATUsed(t *testing.T) { t.Fatalf("Error when saving account: %s", err) } - manager := auth.NewManager(store, "", "", "", "", []string{}, false) + manager := auth.NewManager(store, "", "", "", "", []string{}, false, nil) err = manager.MarkPATUsed(context.Background(), "tokenId") if err != nil { @@ -142,7 +142,7 @@ func TestAuthManager_EnsureUserAccessByJWTGroups(t *testing.T) { // these tests only assert groups are parsed from token as per account settings token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{"idp-groups": []interface{}{"group1", "group2"}}) - manager := auth.NewManager(store, "", "", "", "", []string{}, false) + manager := auth.NewManager(store, "", "", "", "", []string{}, false, nil) t.Run("JWT groups disabled", func(t *testing.T) { userAuth, err := manager.EnsureUserAccessByJWTGroups(context.Background(), userAuth, token) @@ -225,7 +225,7 @@ func TestAuthManager_ValidateAndParseToken(t *testing.T) { keyId := "test-key" // note, we can use a nil store because ValidateAndParseToken does not use it in it's flow - manager := auth.NewManager(nil, issuer, audience, server.URL, userIdClaim, []string{audience}, false) + manager := auth.NewManager(nil, issuer, audience, server.URL, userIdClaim, []string{audience}, false, nil) customClaim := func(name string) string { return fmt.Sprintf("%s/%s", audience, name) diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index c6e57b1be..d9d85a0a2 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -119,7 +119,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee am.SetServiceManager(serviceManager) // @note this is required so that PAT's validate from store, but JWT's are mocked - authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false) + authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false, nil) authManagerMock := &serverauth.MockManager{ ValidateAndParseTokenFunc: mockValidateAndParseToken, EnsureUserAccessByJWTGroupsFunc: authManager.EnsureUserAccessByJWTGroups, @@ -248,7 +248,7 @@ func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile strin am.SetServiceManager(serviceManager) // @note this is required so that PAT's validate from store, but JWT's are mocked - authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false) + authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false, nil) authManagerMock := &serverauth.MockManager{ ValidateAndParseTokenFunc: mockValidateAndParseToken, EnsureUserAccessByJWTGroupsFunc: authManager.EnsureUserAccessByJWTGroups, diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 2cc7b9743..48d3221cc 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -13,6 +13,7 @@ import ( "github.com/netbirdio/netbird/idp/dex" "github.com/netbirdio/netbird/management/server/telemetry" + nbjwt "github.com/netbirdio/netbird/shared/auth/jwt" ) const ( @@ -48,6 +49,8 @@ type EmbeddedIdPConfig struct { // Existing local users are preserved and will be able to login again if re-enabled. // Cannot be enabled if no external identity provider connectors are configured. LocalAuthDisabled bool + // StaticConnectors are additional connectors to seed during initialization + StaticConnectors []dex.Connector } // EmbeddedStorageConfig holds storage configuration for the embedded IdP. @@ -157,6 +160,7 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { RedirectURIs: cliRedirectURIs, }, }, + StaticConnectors: c.StaticConnectors, } // Add owner user if provided @@ -193,6 +197,9 @@ type OAuthConfigProvider interface { // Management server has embedded Dex and can validate tokens via localhost, // avoiding external network calls and DNS resolution issues during startup. GetLocalKeysLocation() string + // GetKeyFetcher returns a KeyFetcher that reads keys directly from the IDP storage, + // or nil if direct key fetching is not supported (falls back to HTTP). + GetKeyFetcher() nbjwt.KeyFetcher GetClientIDs() []string GetUserIDClaim() string GetTokenEndpoint() string @@ -593,6 +600,11 @@ func (m *EmbeddedIdPManager) GetCLIRedirectURLs() []string { return m.config.CLIRedirectURIs } +// GetKeyFetcher returns a KeyFetcher that reads keys directly from Dex storage. +func (m *EmbeddedIdPManager) GetKeyFetcher() nbjwt.KeyFetcher { + return m.provider.GetJWKS +} + // GetKeysLocation returns the JWKS endpoint URL for token validation. func (m *EmbeddedIdPManager) GetKeysLocation() string { return m.provider.GetKeysLocation() diff --git a/management/server/idp/migration/migration.go b/management/server/idp/migration/migration.go new file mode 100644 index 000000000..01cadb86d --- /dev/null +++ b/management/server/idp/migration/migration.go @@ -0,0 +1,235 @@ +// Package migration provides utility functions for migrating from the external IdP solution in pre v0.62.0 +// to the new embedded IdP manager (Dex based), which is the default in v0.62.0 and later. +// It includes functions to seed connectors and migrate existing users to use these connectors. +package migration + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/types" +) + +// Server is the dependency interface that migration functions use to access +// the main data store and the activity event store. +type Server interface { + Store() Store + EventStore() EventStore // may return nil +} + +const idpSeedInfoKey = "IDP_SEED_INFO" +const dryRunEnvKey = "NB_IDP_MIGRATION_DRY_RUN" + +func isDryRun() bool { + return os.Getenv(dryRunEnvKey) == "true" +} + +var ErrNoSeedInfo = errors.New("no seed info found in environment") + +// SeedConnectorFromEnv reads the IDP_SEED_INFO env var, base64-decodes it, +// and JSON-unmarshals it into a dex.Connector. Returns nil if not set. +func SeedConnectorFromEnv() (*dex.Connector, error) { + val, ok := os.LookupEnv(idpSeedInfoKey) + if !ok || val == "" { + return nil, ErrNoSeedInfo + } + + decoded, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return nil, fmt.Errorf("base64 decode: %w", err) + } + + var conn dex.Connector + if err := json.Unmarshal(decoded, &conn); err != nil { + return nil, fmt.Errorf("json unmarshal: %w", err) + } + + return &conn, nil +} + +// MigrateUsersToStaticConnectors re-keys every user ID in the main store (and +// the activity store, if present) so that it encodes the given connector ID, +// skipping users that have already been migrated. Set NB_IDP_MIGRATION_DRY_RUN=true +// to log what would happen without writing any changes. +func MigrateUsersToStaticConnectors(s Server, conn *dex.Connector) error { + ctx := context.Background() + + if isDryRun() { + log.Info("[DRY RUN] migration dry-run mode enabled, no changes will be written") + } + + users, err := s.Store().ListUsers(ctx) + if err != nil { + return fmt.Errorf("failed to list users: %w", err) + } + + // Reconciliation pass: fix activity store for users already migrated in main DB + // but whose activity references may still use old IDs (from a previous partial failure). + if s.EventStore() != nil && !isDryRun() { + if err := reconcileActivityStore(ctx, s.EventStore(), users); err != nil { + return err + } + } + + var migratedCount, skippedCount int + + for _, user := range users { + _, _, decErr := dex.DecodeDexUserID(user.Id) + if decErr == nil { + skippedCount++ + continue + } + + newUserID := dex.EncodeDexUserID(user.Id, conn.ID) + + if isDryRun() { + log.Infof("[DRY RUN] would migrate user %s -> %s (account: %s)", user.Id, newUserID, user.AccountID) + migratedCount++ + continue + } + + if err := migrateUser(ctx, s, user.Id, user.AccountID, newUserID); err != nil { + return err + } + + migratedCount++ + } + + if isDryRun() { + log.Infof("[DRY RUN] migration summary: %d users would be migrated, %d already migrated", migratedCount, skippedCount) + } else { + log.Infof("migration complete: %d users migrated, %d already migrated", migratedCount, skippedCount) + } + + return nil +} + +// reconcileActivityStore updates activity store references for users already migrated +// in the main DB whose activity entries may still use old IDs from a previous partial failure. +func reconcileActivityStore(ctx context.Context, eventStore EventStore, users []*types.User) error { + for _, user := range users { + originalID, _, err := dex.DecodeDexUserID(user.Id) + if err != nil { + // skip users that aren't migrated, they will be handled in the main migration loop + continue + } + if err := eventStore.UpdateUserID(ctx, originalID, user.Id); err != nil { + return fmt.Errorf("reconcile activity store for user %s: %w", user.Id, err) + } + } + return nil +} + +// migrateUser updates a single user's ID in both the main store and the activity store. +func migrateUser(ctx context.Context, s Server, oldID, accountID, newID string) error { + if err := s.Store().UpdateUserID(ctx, accountID, oldID, newID); err != nil { + return fmt.Errorf("failed to update user ID for user %s: %w", oldID, err) + } + + if s.EventStore() == nil { + return nil + } + + if err := s.EventStore().UpdateUserID(ctx, oldID, newID); err != nil { + return fmt.Errorf("failed to update activity store user ID for user %s: %w", oldID, err) + } + + return nil +} + +// PopulateUserInfo fetches user email and name from the external IDP and updates +// the store for users that are missing this information. +func PopulateUserInfo(s Server, idpManager idp.Manager, dryRun bool) error { + ctx := context.Background() + + users, err := s.Store().ListUsers(ctx) + if err != nil { + return fmt.Errorf("failed to list users: %w", err) + } + + // Build a map of IDP user ID -> UserData from the external IDP + allAccounts, err := idpManager.GetAllAccounts(ctx) + if err != nil { + return fmt.Errorf("failed to fetch accounts from IDP: %w", err) + } + + idpUsers := make(map[string]*idp.UserData) + for _, accountUsers := range allAccounts { + for _, userData := range accountUsers { + idpUsers[userData.ID] = userData + } + } + + log.Infof("fetched %d users from IDP", len(idpUsers)) + + var updatedCount, skippedCount, notFoundCount int + + for _, user := range users { + if user.IsServiceUser { + skippedCount++ + continue + } + + if user.Email != "" && user.Name != "" { + skippedCount++ + continue + } + + // The user ID in the store may be the original IDP ID or a Dex-encoded ID. + // Try to decode the Dex format first to get the original IDP ID. + lookupID := user.Id + if originalID, _, decErr := dex.DecodeDexUserID(user.Id); decErr == nil { + lookupID = originalID + } + + idpUser, found := idpUsers[lookupID] + if !found { + notFoundCount++ + log.Debugf("user %s (lookup: %s) not found in IDP, skipping", user.Id, lookupID) + continue + } + + email := user.Email + name := user.Name + if email == "" && idpUser.Email != "" { + email = idpUser.Email + } + if name == "" && idpUser.Name != "" { + name = idpUser.Name + } + + if email == user.Email && name == user.Name { + skippedCount++ + continue + } + + if dryRun { + log.Infof("[DRY RUN] would update user %s: email=%q, name=%q", user.Id, email, name) + updatedCount++ + continue + } + + if err := s.Store().UpdateUserInfo(ctx, user.Id, email, name); err != nil { + return fmt.Errorf("failed to update user info for %s: %w", user.Id, err) + } + + log.Infof("updated user %s: email=%q, name=%q", user.Id, email, name) + updatedCount++ + } + + if dryRun { + log.Infof("[DRY RUN] user info summary: %d would be updated, %d skipped, %d not found in IDP", updatedCount, skippedCount, notFoundCount) + } else { + log.Infof("user info population complete: %d updated, %d skipped, %d not found in IDP", updatedCount, skippedCount, notFoundCount) + } + + return nil +} diff --git a/management/server/idp/migration/migration_test.go b/management/server/idp/migration/migration_test.go new file mode 100644 index 000000000..2ff71347e --- /dev/null +++ b/management/server/idp/migration/migration_test.go @@ -0,0 +1,828 @@ +package migration + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/types" +) + +// testStore is a hand-written mock for MigrationStore. +type testStore struct { + listUsersFunc func(ctx context.Context) ([]*types.User, error) + updateUserIDFunc func(ctx context.Context, accountID, oldUserID, newUserID string) error + updateUserInfoFunc func(ctx context.Context, userID, email, name string) error + checkSchemaFunc func(checks []SchemaCheck) []SchemaError + updateCalls []updateUserIDCall + updateInfoCalls []updateUserInfoCall +} + +type updateUserIDCall struct { + AccountID string + OldUserID string + NewUserID string +} + +type updateUserInfoCall struct { + UserID string + Email string + Name string +} + +func (s *testStore) ListUsers(ctx context.Context) ([]*types.User, error) { + return s.listUsersFunc(ctx) +} + +func (s *testStore) UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error { + s.updateCalls = append(s.updateCalls, updateUserIDCall{accountID, oldUserID, newUserID}) + return s.updateUserIDFunc(ctx, accountID, oldUserID, newUserID) +} + +func (s *testStore) UpdateUserInfo(ctx context.Context, userID, email, name string) error { + s.updateInfoCalls = append(s.updateInfoCalls, updateUserInfoCall{userID, email, name}) + if s.updateUserInfoFunc != nil { + return s.updateUserInfoFunc(ctx, userID, email, name) + } + return nil +} + +func (s *testStore) CheckSchema(checks []SchemaCheck) []SchemaError { + if s.checkSchemaFunc != nil { + return s.checkSchemaFunc(checks) + } + return nil +} + +type testServer struct { + store Store + eventStore EventStore +} + +func (s *testServer) Store() Store { return s.store } +func (s *testServer) EventStore() EventStore { return s.eventStore } + +func TestSeedConnectorFromEnv(t *testing.T) { + t.Run("returns ErrNoSeedInfo when env var is not set", func(t *testing.T) { + os.Unsetenv(idpSeedInfoKey) + + conn, err := SeedConnectorFromEnv() + assert.ErrorIs(t, err, ErrNoSeedInfo) + assert.Nil(t, conn) + }) + + t.Run("returns ErrNoSeedInfo when env var is empty", func(t *testing.T) { + t.Setenv(idpSeedInfoKey, "") + + conn, err := SeedConnectorFromEnv() + assert.ErrorIs(t, err, ErrNoSeedInfo) + assert.Nil(t, conn) + }) + + t.Run("returns error on invalid base64", func(t *testing.T) { + t.Setenv(idpSeedInfoKey, "not-valid-base64!!!") + + conn, err := SeedConnectorFromEnv() + assert.NotErrorIs(t, err, ErrNoSeedInfo) + assert.Error(t, err) + assert.Nil(t, conn) + assert.Contains(t, err.Error(), "base64 decode") + }) + + t.Run("returns error on invalid JSON", func(t *testing.T) { + encoded := base64.StdEncoding.EncodeToString([]byte("not json")) + t.Setenv(idpSeedInfoKey, encoded) + + conn, err := SeedConnectorFromEnv() + assert.NotErrorIs(t, err, ErrNoSeedInfo) + assert.Error(t, err) + assert.Nil(t, conn) + assert.Contains(t, err.Error(), "json unmarshal") + }) + + t.Run("successfully decodes valid connector", func(t *testing.T) { + expected := dex.Connector{ + Type: "oidc", + Name: "Test Provider", + ID: "test-provider", + Config: map[string]any{ + "issuer": "https://example.com", + "clientID": "my-client-id", + "clientSecret": "my-secret", + }, + } + + data, err := json.Marshal(expected) + require.NoError(t, err) + + encoded := base64.StdEncoding.EncodeToString(data) + t.Setenv(idpSeedInfoKey, encoded) + + conn, err := SeedConnectorFromEnv() + assert.NoError(t, err) + require.NotNil(t, conn) + assert.Equal(t, expected.Type, conn.Type) + assert.Equal(t, expected.Name, conn.Name) + assert.Equal(t, expected.ID, conn.ID) + assert.Equal(t, expected.Config["issuer"], conn.Config["issuer"]) + }) +} + +func TestMigrateUsersToStaticConnectors(t *testing.T) { + connector := &dex.Connector{ + Type: "oidc", + Name: "Test Provider", + ID: "test-connector", + } + + t.Run("succeeds with no users", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { return nil, nil }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { return nil }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + }) + + t.Run("returns error when ListUsers fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return nil, fmt.Errorf("db error") + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { return nil }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to list users") + }) + + t.Run("migrates single user with correct encoded ID", func(t *testing.T) { + user := &types.User{Id: "user-1", AccountID: "account-1"} + expectedNewID := dex.EncodeDexUserID("user-1", "test-connector") + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{user}, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + require.Len(t, ms.updateCalls, 1) + assert.Equal(t, "account-1", ms.updateCalls[0].AccountID) + assert.Equal(t, "user-1", ms.updateCalls[0].OldUserID) + assert.Equal(t, expectedNewID, ms.updateCalls[0].NewUserID) + }) + + t.Run("migrates multiple users", func(t *testing.T) { + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + {Id: "user-3", AccountID: "account-2"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 3) + }) + + t.Run("returns error when UpdateUserID fails", func(t *testing.T) { + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + callCount := 0 + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + callCount++ + if callCount == 2 { + return fmt.Errorf("update failed") + } + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to update user ID for user user-2") + }) + + t.Run("stops on first UpdateUserID error", func(t *testing.T) { + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return fmt.Errorf("update failed") + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.Error(t, err) + assert.Len(t, ms.updateCalls, 1) // stopped after first error + }) + + t.Run("skips already migrated users", func(t *testing.T) { + alreadyMigratedID := dex.EncodeDexUserID("user-1", "test-connector") + users := []*types.User{ + {Id: alreadyMigratedID, AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 0) + }) + + t.Run("migrates only non-migrated users in mixed state", func(t *testing.T) { + alreadyMigratedID := dex.EncodeDexUserID("user-1", "test-connector") + users := []*types.User{ + {Id: alreadyMigratedID, AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + {Id: "user-3", AccountID: "account-2"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + // Only user-2 and user-3 should be migrated + assert.Len(t, ms.updateCalls, 2) + assert.Equal(t, "user-2", ms.updateCalls[0].OldUserID) + assert.Equal(t, "user-3", ms.updateCalls[1].OldUserID) + }) + + t.Run("dry run does not call UpdateUserID", func(t *testing.T) { + t.Setenv(dryRunEnvKey, "true") + + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + t.Fatal("UpdateUserID should not be called in dry-run mode") + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 0) + }) + + t.Run("dry run skips already migrated users", func(t *testing.T) { + t.Setenv(dryRunEnvKey, "true") + + alreadyMigratedID := dex.EncodeDexUserID("user-1", "test-connector") + users := []*types.User{ + {Id: alreadyMigratedID, AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + t.Fatal("UpdateUserID should not be called in dry-run mode") + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + }) + + t.Run("dry run disabled by default", func(t *testing.T) { + user := &types.User{Id: "user-1", AccountID: "account-1"} + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{user}, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 1) // proves it's not in dry-run + }) +} + +func TestPopulateUserInfo(t *testing.T) { + noopUpdateID := func(ctx context.Context, accountID, oldUserID, newUserID string) error { return nil } + + t.Run("succeeds with no users", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { return nil, nil }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{}, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("returns error when ListUsers fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return nil, fmt.Errorf("db error") + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{} + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to list users") + }) + + t.Run("returns error when GetAllAccounts fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{{Id: "user-1", AccountID: "acc-1"}}, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return nil, fmt.Errorf("idp error") + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to fetch accounts from IDP") + }) + + t.Run("updates user with missing email and name", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "user1@example.com", Name: "User One"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, "user-1", ms.updateInfoCalls[0].UserID) + assert.Equal(t, "user1@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "User One", ms.updateInfoCalls[0].Name) + }) + + t.Run("updates only missing email when name exists", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: "Existing Name"}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "user1@example.com", Name: "IDP Name"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, "user1@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "Existing Name", ms.updateInfoCalls[0].Name) + }) + + t.Run("updates only missing name when email exists", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "existing@example.com", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "idp@example.com", Name: "IDP Name"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, "existing@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "IDP Name", ms.updateInfoCalls[0].Name) + }) + + t.Run("skips users that already have both email and name", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "user1@example.com", Name: "User One"}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "different@example.com", Name: "Different Name"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("skips service users", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "svc-1", AccountID: "acc-1", Email: "", Name: "", IsServiceUser: true}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "svc-1", Email: "svc@example.com", Name: "Service"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("skips users not found in IDP", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "different-user", Email: "other@example.com", Name: "Other"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("looks up dex-encoded user IDs by original ID", func(t *testing.T) { + dexEncodedID := dex.EncodeDexUserID("original-idp-id", "my-connector") + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: dexEncodedID, AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "original-idp-id", Email: "user@example.com", Name: "User"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, dexEncodedID, ms.updateInfoCalls[0].UserID) + assert.Equal(t, "user@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "User", ms.updateInfoCalls[0].Name) + }) + + t.Run("handles multiple users across multiple accounts", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + {Id: "user-2", AccountID: "acc-1", Email: "already@set.com", Name: "Already Set"}, + {Id: "user-3", AccountID: "acc-2", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "u1@example.com", Name: "User 1"}, + {ID: "user-2", Email: "u2@example.com", Name: "User 2"}, + }, + "acc-2": { + {ID: "user-3", Email: "u3@example.com", Name: "User 3"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 2) + assert.Equal(t, "user-1", ms.updateInfoCalls[0].UserID) + assert.Equal(t, "u1@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "user-3", ms.updateInfoCalls[1].UserID) + assert.Equal(t, "u3@example.com", ms.updateInfoCalls[1].Email) + }) + + t.Run("returns error when UpdateUserInfo fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + updateUserInfoFunc: func(ctx context.Context, userID, email, name string) error { + return fmt.Errorf("db write error") + }, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "u1@example.com", Name: "User 1"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to update user info for user-1") + }) + + t.Run("stops on first UpdateUserInfo error", func(t *testing.T) { + callCount := 0 + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + {Id: "user-2", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + updateUserInfoFunc: func(ctx context.Context, userID, email, name string) error { + callCount++ + return fmt.Errorf("db write error") + }, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "u1@example.com", Name: "U1"}, + {ID: "user-2", Email: "u2@example.com", Name: "U2"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Equal(t, 1, callCount) + }) + + t.Run("dry run does not call UpdateUserInfo", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + {Id: "user-2", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + updateUserInfoFunc: func(ctx context.Context, userID, email, name string) error { + t.Fatal("UpdateUserInfo should not be called in dry-run mode") + return nil + }, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "u1@example.com", Name: "U1"}, + {ID: "user-2", Email: "u2@example.com", Name: "U2"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, true) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("skips user when IDP has empty email and name too", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "", Name: ""}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) +} + +func TestSchemaError_String(t *testing.T) { + t.Run("missing table", func(t *testing.T) { + e := SchemaError{Table: "jobs"} + assert.Equal(t, `table "jobs" is missing`, e.String()) + }) + + t.Run("missing column", func(t *testing.T) { + e := SchemaError{Table: "users", Column: "email"} + assert.Equal(t, `column "email" on table "users" is missing`, e.String()) + }) +} + +func TestRequiredSchema(t *testing.T) { + // Verify RequiredSchema covers all the tables touched by UpdateUserID and UpdateUserInfo. + expectedTables := []string{ + "users", + "personal_access_tokens", + "peers", + "accounts", + "user_invites", + "proxy_access_tokens", + "jobs", + } + + schemaTableNames := make([]string, len(RequiredSchema)) + for i, s := range RequiredSchema { + schemaTableNames[i] = s.Table + } + + for _, expected := range expectedTables { + assert.Contains(t, schemaTableNames, expected, "RequiredSchema should include table %q", expected) + } +} + +func TestCheckSchema_MockStore(t *testing.T) { + t.Run("returns nil when all schema exists", func(t *testing.T) { + ms := &testStore{ + checkSchemaFunc: func(checks []SchemaCheck) []SchemaError { + return nil + }, + } + errs := ms.CheckSchema(RequiredSchema) + assert.Empty(t, errs) + }) + + t.Run("returns errors for missing tables", func(t *testing.T) { + ms := &testStore{ + checkSchemaFunc: func(checks []SchemaCheck) []SchemaError { + return []SchemaError{ + {Table: "jobs"}, + {Table: "proxy_access_tokens"}, + } + }, + } + errs := ms.CheckSchema(RequiredSchema) + require.Len(t, errs, 2) + assert.Equal(t, "jobs", errs[0].Table) + assert.Equal(t, "", errs[0].Column) + assert.Equal(t, "proxy_access_tokens", errs[1].Table) + }) + + t.Run("returns errors for missing columns", func(t *testing.T) { + ms := &testStore{ + checkSchemaFunc: func(checks []SchemaCheck) []SchemaError { + return []SchemaError{ + {Table: "users", Column: "email"}, + {Table: "users", Column: "name"}, + } + }, + } + errs := ms.CheckSchema(RequiredSchema) + require.Len(t, errs, 2) + assert.Equal(t, "users", errs[0].Table) + assert.Equal(t, "email", errs[0].Column) + }) +} diff --git a/management/server/idp/migration/store.go b/management/server/idp/migration/store.go new file mode 100644 index 000000000..e7cc54a41 --- /dev/null +++ b/management/server/idp/migration/store.go @@ -0,0 +1,82 @@ +package migration + +import ( + "context" + "fmt" + + "github.com/netbirdio/netbird/management/server/types" +) + +// SchemaCheck represents a table and the columns required on it. +type SchemaCheck struct { + Table string + Columns []string +} + +// RequiredSchema lists all tables and columns that the migration tool needs. +// If any are missing, the user must upgrade their management server first so +// that the automatic GORM migrations create them. +var RequiredSchema = []SchemaCheck{ + {Table: "users", Columns: []string{"id", "email", "name", "account_id"}}, + {Table: "personal_access_tokens", Columns: []string{"user_id", "created_by"}}, + {Table: "peers", Columns: []string{"user_id"}}, + {Table: "accounts", Columns: []string{"created_by"}}, + {Table: "user_invites", Columns: []string{"created_by"}}, + {Table: "proxy_access_tokens", Columns: []string{"created_by"}}, + {Table: "jobs", Columns: []string{"triggered_by"}}, +} + +// SchemaError describes a single missing table or column. +type SchemaError struct { + Table string + Column string // empty when the whole table is missing +} + +func (e SchemaError) String() string { + if e.Column == "" { + return fmt.Sprintf("table %q is missing", e.Table) + } + return fmt.Sprintf("column %q on table %q is missing", e.Column, e.Table) +} + +// Store defines the data store operations required for IdP user migration. +// This interface is separate from the main store.Store interface because these methods +// are only used during one-time migration and should be removed once migration tooling +// is no longer needed. +// +// The SQL store implementations (SqlStore) already have these methods on their concrete +// types, so they satisfy this interface via Go's structural typing with zero code changes. +type Store interface { + // ListUsers returns all users across all accounts. + ListUsers(ctx context.Context) ([]*types.User, error) + + // UpdateUserID atomically updates a user's ID and all foreign key references + // across the database (peers, groups, policies, PATs, etc.). + UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error + + // UpdateUserInfo updates a user's email and name in the store. + UpdateUserInfo(ctx context.Context, userID, email, name string) error + + // CheckSchema verifies that all tables and columns required by the migration + // exist in the database. Returns a list of problems; an empty slice means OK. + CheckSchema(checks []SchemaCheck) []SchemaError +} + +// RequiredEventSchema lists all tables and columns that the migration tool needs +// in the activity/event store. +var RequiredEventSchema = []SchemaCheck{ + {Table: "events", Columns: []string{"initiator_id", "target_id"}}, + {Table: "deleted_users", Columns: []string{"id"}}, +} + +// EventStore defines the activity event store operations required for migration. +// Like Store, this is a temporary interface for migration tooling only. +type EventStore interface { + // CheckSchema verifies that all tables and columns required by the migration + // exist in the event database. Returns a list of problems; an empty slice means OK. + CheckSchema(checks []SchemaCheck) []SchemaError + + // UpdateUserID updates all event references (initiator_id, target_id) and + // deleted_users records to use the new user ID format. + UpdateUserID(ctx context.Context, oldUserID, newUserID string) error +} diff --git a/management/server/store/sql_store_idp_migration.go b/management/server/store/sql_store_idp_migration.go new file mode 100644 index 000000000..64962845b --- /dev/null +++ b/management/server/store/sql_store_idp_migration.go @@ -0,0 +1,177 @@ +package store + +// This file contains migration-only methods on SqlStore. +// They satisfy the migration.Store interface via duck typing. +// Delete this file when migration tooling is no longer needed. + +import ( + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "gorm.io/gorm" + + "github.com/netbirdio/netbird/management/server/idp/migration" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" +) + +func (s *SqlStore) CheckSchema(checks []migration.SchemaCheck) []migration.SchemaError { + migrator := s.db.Migrator() + var errs []migration.SchemaError + + for _, check := range checks { + if !migrator.HasTable(check.Table) { + errs = append(errs, migration.SchemaError{Table: check.Table}) + continue + } + for _, col := range check.Columns { + if !migrator.HasColumn(check.Table, col) { + errs = append(errs, migration.SchemaError{Table: check.Table, Column: col}) + } + } + } + + return errs +} + +func (s *SqlStore) ListUsers(ctx context.Context) ([]*types.User, error) { + tx := s.db + var users []*types.User + result := tx.Find(&users) + if result.Error != nil { + log.WithContext(ctx).Errorf("error when listing users from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "issue listing users from store") + } + + for _, user := range users { + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } + } + + return users, nil +} + +// txDeferFKConstraints defers foreign key constraint checks for the duration of the transaction. +// MySQL is already handled by s.transaction (SET FOREIGN_KEY_CHECKS = 0). +func (s *SqlStore) txDeferFKConstraints(tx *gorm.DB) error { + if s.storeEngine == types.SqliteStoreEngine { + return tx.Exec("PRAGMA defer_foreign_keys = ON").Error + } + + if s.storeEngine != types.PostgresStoreEngine { + return nil + } + + // GORM creates FK constraints as NOT DEFERRABLE by default, so + // SET CONSTRAINTS ALL DEFERRED is a no-op unless we ALTER them first. + err := tx.Exec(` + DO $$ DECLARE r RECORD; + BEGIN + FOR r IN SELECT conname, conrelid::regclass AS tbl + FROM pg_constraint WHERE contype = 'f' AND NOT condeferrable + LOOP + EXECUTE format('ALTER TABLE %s ALTER CONSTRAINT %I DEFERRABLE INITIALLY IMMEDIATE', r.tbl, r.conname); + END LOOP; + END $$ + `).Error + if err != nil { + return fmt.Errorf("make FK constraints deferrable: %w", err) + } + return tx.Exec("SET CONSTRAINTS ALL DEFERRED").Error +} + +// txRestoreFKConstraints reverts FK constraints back to NOT DEFERRABLE after the +// deferred updates are done but before the transaction commits. +func (s *SqlStore) txRestoreFKConstraints(tx *gorm.DB) error { + if s.storeEngine != types.PostgresStoreEngine { + return nil + } + + return tx.Exec(` + DO $$ DECLARE r RECORD; + BEGIN + FOR r IN SELECT conname, conrelid::regclass AS tbl + FROM pg_constraint WHERE contype = 'f' AND condeferrable + LOOP + EXECUTE format('ALTER TABLE %s ALTER CONSTRAINT %I NOT DEFERRABLE', r.tbl, r.conname); + END LOOP; + END $$ + `).Error +} + +func (s *SqlStore) UpdateUserInfo(ctx context.Context, userID, email, name string) error { + user := &types.User{Email: email, Name: name} + if err := user.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt user info: %w", err) + } + + result := s.db.Model(&types.User{}).Where("id = ?", userID).Updates(map[string]any{ + "email": user.Email, + "name": user.Name, + }) + if result.Error != nil { + log.WithContext(ctx).Errorf("error updating user info for %s: %s", userID, result.Error) + return status.Errorf(status.Internal, "failed to update user info") + } + + return nil +} + +func (s *SqlStore) UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error { + type fkUpdate struct { + model any + column string + where string + } + + updates := []fkUpdate{ + {&types.PersonalAccessToken{}, "user_id", "user_id = ?"}, + {&types.PersonalAccessToken{}, "created_by", "created_by = ?"}, + {&nbpeer.Peer{}, "user_id", "user_id = ?"}, + {&types.UserInviteRecord{}, "created_by", "created_by = ?"}, + {&types.Account{}, "created_by", "created_by = ?"}, + {&types.ProxyAccessToken{}, "created_by", "created_by = ?"}, + {&types.Job{}, "triggered_by", "triggered_by = ?"}, + } + + log.Info("Updating user ID in the store") + err := s.transaction(func(tx *gorm.DB) error { + if err := s.txDeferFKConstraints(tx); err != nil { + return err + } + + for _, u := range updates { + if err := tx.Model(u.model).Where(u.where, oldUserID).Update(u.column, newUserID).Error; err != nil { + return fmt.Errorf("update %s: %w", u.column, err) + } + } + + if err := tx.Model(&types.User{}).Where(accountAndIDQueryCondition, accountID, oldUserID).Update("id", newUserID).Error; err != nil { + return fmt.Errorf("update users: %w", err) + } + + return nil + }) + if err != nil { + log.WithContext(ctx).Errorf("failed to update user ID in the store: %s", err) + return status.Errorf(status.Internal, "failed to update user ID in store") + } + + log.Info("Restoring FK constraints") + err = s.transaction(func(tx *gorm.DB) error { + if err := s.txRestoreFKConstraints(tx); err != nil { + return fmt.Errorf("restore FK constraints: %w", err) + } + + return nil + }) + if err != nil { + log.WithContext(ctx).Errorf("failed to restore FK constraints after user ID update: %s", err) + return status.Errorf(status.Internal, "failed to restore FK constraints after user ID update") + } + + return nil +} diff --git a/shared/auth/jwt/validator.go b/shared/auth/jwt/validator.go index aeaa5842c..cf18b2cf6 100644 --- a/shared/auth/jwt/validator.go +++ b/shared/auth/jwt/validator.go @@ -25,7 +25,7 @@ import ( // Jwks is a collection of JSONWebKey obtained from Config.HttpServerConfig.AuthKeysLocation type Jwks struct { Keys []JSONWebKey `json:"keys"` - expiresInTime time.Time + ExpiresInTime time.Time `json:"-"` } // The supported elliptic curves types @@ -53,12 +53,17 @@ type JSONWebKey struct { X5c []string `json:"x5c"` } +// KeyFetcher is a function that retrieves JWKS keys directly (e.g., from Dex storage) +// bypassing HTTP. When set on a Validator, it is used instead of the HTTP-based getPemKeys. +type KeyFetcher func(ctx context.Context) (*Jwks, error) + type Validator struct { lock sync.Mutex issuer string audienceList []string keysLocation string idpSignkeyRefreshEnabled bool + keyFetcher KeyFetcher keys *Jwks lastForcedRefresh time.Time } @@ -85,10 +90,39 @@ func NewValidator(issuer string, audienceList []string, keysLocation string, idp } } +// NewValidatorWithKeyFetcher creates a Validator that fetches keys directly using the +// provided KeyFetcher (e.g., from Dex storage) instead of via HTTP. +func NewValidatorWithKeyFetcher(issuer string, audienceList []string, keyFetcher KeyFetcher) *Validator { + ctx := context.Background() + keys, err := keyFetcher(ctx) + if err != nil { + log.Warnf("could not get keys from key fetcher: %s, it will try again on the next http request", err) + } + if keys == nil { + keys = &Jwks{} + } + + return &Validator{ + keys: keys, + issuer: issuer, + audienceList: audienceList, + idpSignkeyRefreshEnabled: true, + keyFetcher: keyFetcher, + } +} + // forcedRefreshCooldown is the minimum time between forced key refreshes // to prevent abuse from invalid tokens with fake kid values const forcedRefreshCooldown = 30 * time.Second +// fetchKeys retrieves keys using the keyFetcher if available, otherwise falls back to HTTP. +func (v *Validator) fetchKeys(ctx context.Context) (*Jwks, error) { + if v.keyFetcher != nil { + return v.keyFetcher(ctx) + } + return getPemKeys(v.keysLocation) +} + func (v *Validator) getKeyFunc(ctx context.Context) jwt.Keyfunc { return func(token *jwt.Token) (interface{}, error) { // If keys are rotated, verify the keys prior to token validation @@ -131,13 +165,13 @@ func (v *Validator) refreshKeys(ctx context.Context) { v.lock.Lock() defer v.lock.Unlock() - refreshedKeys, err := getPemKeys(v.keysLocation) + refreshedKeys, err := v.fetchKeys(ctx) if err != nil { log.WithContext(ctx).Debugf("cannot get JSONWebKey: %v, falling back to old keys", err) return } - log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.expiresInTime.UTC()) + log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.ExpiresInTime.UTC()) v.keys = refreshedKeys } @@ -155,13 +189,13 @@ func (v *Validator) forceRefreshKeys(ctx context.Context) bool { log.WithContext(ctx).Debugf("key not found in cache, forcing JWKS refresh") - refreshedKeys, err := getPemKeys(v.keysLocation) + refreshedKeys, err := v.fetchKeys(ctx) if err != nil { log.WithContext(ctx).Debugf("cannot get JSONWebKey: %v, falling back to old keys", err) return false } - log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.expiresInTime.UTC()) + log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.ExpiresInTime.UTC()) v.keys = refreshedKeys v.lastForcedRefresh = time.Now() return true @@ -203,7 +237,7 @@ func (v *Validator) ValidateAndParse(ctx context.Context, token string) (*jwt.To // stillValid returns true if the JSONWebKey still valid and have enough time to be used func (jwks *Jwks) stillValid() bool { - return !jwks.expiresInTime.IsZero() && time.Now().Add(5*time.Second).Before(jwks.expiresInTime) + return !jwks.ExpiresInTime.IsZero() && time.Now().Add(5*time.Second).Before(jwks.ExpiresInTime) } func getPemKeys(keysLocation string) (*Jwks, error) { @@ -227,7 +261,7 @@ func getPemKeys(keysLocation string) (*Jwks, error) { cacheControlHeader := resp.Header.Get("Cache-Control") expiresIn := getMaxAgeFromCacheHeader(cacheControlHeader) - jwks.expiresInTime = time.Now().Add(time.Duration(expiresIn) * time.Second) + jwks.ExpiresInTime = time.Now().Add(time.Duration(expiresIn) * time.Second) return jwks, nil } diff --git a/tools/idp-migrate/DEVELOPMENT.md b/tools/idp-migrate/DEVELOPMENT.md new file mode 100644 index 000000000..5697ead40 --- /dev/null +++ b/tools/idp-migrate/DEVELOPMENT.md @@ -0,0 +1,209 @@ +# IdP Migration Tool — Developer Guide + +## Overview + +This tool migrates NetBird deployments from an external IdP (Auth0, Zitadel, Okta, etc.) to the embedded Dex IdP introduced in v0.62.0. It does two things: + +1. **DB migration** — Re-encodes every user ID from `{original_id}` to Dex's protobuf-encoded format `base64(proto{original_id, connector_id})`. +2. **Config generation** — Transforms `management.json`: removes `IdpManagerConfig`, `PKCEAuthorizationFlow`, and `DeviceAuthorizationFlow`; strips `HttpConfig` to only `CertFile`/`CertKey`; adds `EmbeddedIdP` with the static connector configuration. + +## Code Layout + +``` +tools/idp-migrate/ +├── config.go # migrationConfig struct, CLI flags, env vars, validation +├── main.go # CLI entry point, migration phases, config generation +├── main_test.go # 8 test functions (18 subtests) covering config, connector, URL builder, config generation +└── DEVELOPMENT.md # this file + +management/server/idp/migration/ +├── migration.go # Server interface, MigrateUsersToStaticConnectors(), PopulateUserInfo(), migrateUser(), reconcileActivityStore() +├── migration_test.go # 6 top-level tests (with subtests) using hand-written mocks +└── store.go # Store, EventStore interfaces, SchemaCheck, RequiredSchema, SchemaError types + +management/server/store/ +└── sql_store_idp_migration.go # CheckSchema(), ListUsers(), UpdateUserInfo(), UpdateUserID(), txDeferFKConstraints() on SqlStore + +management/server/activity/store/ +├── sql_store_idp_migration.go # UpdateUserID() on activity Store +└── sql_store_idp_migration_test.go # 5 subtests for activity UpdateUserID + +``` + +## Release / Distribution + +The tool is included in `.goreleaser.yaml` as the `netbird-idp-migrate` build target. Each NetBird release produces pre-built archives for Linux (amd64, arm64, arm) that are uploaded to GitHub Releases. The archive naming convention is: + +``` +netbird-idp-migrate__linux_.tar.gz +``` + +The build requires `CGO_ENABLED=1` because it links the SQLite driver used by `SqlStore`. The cross-compilation setup (CC env for arm64/arm) mirrors the `netbird-mgmt` build. + +## CLI Flags + +| Flag | Type | Default | Description | +|------|------|---------|-------------| +| `--config` | string | *(required)* | Path to management.json | +| `--datadir` | string | *(required)* | Data directory (containing store.db / events.db) | +| `--idp-seed-info` | string | *(required)* | Base64-encoded connector JSON | +| `--domain` | string | `""` | Sets both dashboard and API domain (convenience shorthand) | +| `--dashboard-domain` | string | *(required)* | Dashboard domain (for redirect URIs) | +| `--api-domain` | string | *(required)* | API domain (for Dex issuer and callback URLs) | +| `--dry-run` | bool | `false` | Preview changes without writing | +| `--force` | bool | `false` | Skip interactive confirmation prompt | +| `--skip-config` | bool | `false` | Skip config generation (DB-only migration) | +| `--skip-populate-user-info` | bool | `false` | Skip populating user info (user ID migration only) | +| `--log-level` | string | `"info"` | Log level (debug, info, warn, error) | + +## Environment Variables + +All flags can be overridden via environment variables. Env vars take precedence over flags. + +| Env Var | Overrides | +|---------|-----------| +| `NETBIRD_DOMAIN` | Sets both `--dashboard-domain` and `--api-domain` | +| `NETBIRD_API_URL` | `--api-domain` | +| `NETBIRD_DASHBOARD_URL` | `--dashboard-domain` | +| `NETBIRD_CONFIG_PATH` | `--config` | +| `NETBIRD_DATA_DIR` | `--datadir` | +| `NETBIRD_IDP_SEED_INFO` | `--idp-seed-info` | +| `NETBIRD_DRY_RUN` | `--dry-run` (set to `"true"`) | +| `NETBIRD_FORCE` | `--force` (set to `"true"`) | +| `NETBIRD_SKIP_CONFIG` | `--skip-config` (set to `"true"`) | +| `NETBIRD_SKIP_POPULATE_USER_INFO` | `--skip-populate-user-info` (set to `"true"`) | +| `NETBIRD_LOG_LEVEL` | `--log-level` | + +Resolution order: CLI flags are parsed first, then `--domain` sets both URLs, then `NETBIRD_DOMAIN` overrides both, then `NETBIRD_API_URL` / `NETBIRD_DASHBOARD_URL` override individually. After all resolution, `validateConfig()` ensures all required fields are set. + +## Migration Flow + +### Phase 0: Schema Validation + +`validateSchema()` opens the store and calls `CheckSchema(RequiredSchema)` to verify that all tables and columns required by the migration exist in the database. If anything is missing, the tool exits with a descriptive error instructing the operator to start the management server (v0.66.4+) at least once so that automatic GORM migrations create the required schema. + +### Phase 1: Populate User Info + +Unless `--skip-populate-user-info` is set, `populateUserInfoFromIDP()` runs before connector resolution: + +1. Creates an IDP manager from the existing `IdpManagerConfig` in management.json. +2. Calls `idpManager.GetAllAccounts()` to fetch email and name for all users from the external IDP. +3. Calls `migration.PopulateUserInfo()` which iterates over all store users, skipping service users and users that already have both email and name populated. For Dex-encoded user IDs, it decodes back to the original IDP ID for lookup. +4. Updates the store with any missing email/name values. + +This ensures user contact info is preserved before the ID migration makes the original IDP IDs inaccessible. + +### Phase 2: Connector Decoding + +`decodeConnectorConfig()` base64-decodes and JSON-unmarshals the connector JSON provided via `--idp-seed-info` (or `NETBIRD_IDP_SEED_INFO`). It validates that the connector ID is non-empty. There is no auto-detection or fallback — the operator must provide the full connector configuration. + +### Phase 3: DB Migration + +`migrateDB()` orchestrates the database migration: + +1. `openStores()` opens the main store (`SqlStore`) and activity store (non-fatal if missing). +2. Type-asserts both to `migration.Store` / `migration.EventStore`. +3. `previewUsers()` scans all users — counts pending vs already-migrated (using `DecodeDexUserID`). +4. `confirmPrompt()` asks for interactive confirmation (unless `--force` or `--dry-run`). +5. Calls `migration.MigrateUsersToStaticConnectors(srv, conn)`: + - **Reconciliation pass**: fixes activity store references for users already migrated in the main DB but whose events still reference old IDs (from a previous partial failure). + - **Main loop**: for each non-migrated user, calls `migrateUser()` which atomically updates the user ID in both the main store and activity store. + - **Dry-run**: logs what would happen, skips all writes. + +`SqlStore.UpdateUserID()` atomically updates the user's primary key and all foreign key references (peers, PATs, groups, policies, jobs, etc.) in a single transaction. + +### Phase 4: Config Generation + +Unless `--skip-config` is set, `generateConfig()` runs: + +1. **Read** — loads existing `management.json` as raw JSON to preserve unknown fields. + +2. **Strip** — removes keys that are no longer needed: + - `IdpManagerConfig` + - `PKCEAuthorizationFlow` + - `DeviceAuthorizationFlow` + - All `HttpConfig` fields except `CertFile` and `CertKey` + +3. **Add EmbeddedIdP** — inserts a minimal section with: + - `Enabled: true` + - `Issuer` built from `--api-domain` + `/oauth2` + - `DashboardRedirectURIs` built from `--dashboard-domain` + `/nb-auth` and `/nb-silent-auth` + - `StaticConnectors` containing the decoded connector, with `redirectURI` overridden to `--api-domain` + `/oauth2/callback` + +4. **Write** — backs up original as `management.json.bak`, writes new config. In dry-run mode, prints to stdout instead. + +## Interface Decoupling + +Migration methods (`ListUsers`, `UpdateUserID`) are **not** on the core `store.Store` or `activity.Store` interfaces. Instead, they're defined in `migration/store.go`: + +```go +type Store interface { + ListUsers(ctx context.Context) ([]*types.User, error) + UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error + UpdateUserInfo(ctx context.Context, userID, email, name string) error + CheckSchema(checks []SchemaCheck) []SchemaError +} + +type EventStore interface { + UpdateUserID(ctx context.Context, oldUserID, newUserID string) error +} +``` + +A `Server` interface wraps both stores for dependency injection: + +```go +type Server interface { + Store() Store + EventStore() EventStore // may return nil +} +``` + +The concrete `SqlStore` types already have these methods (in their respective `sql_store_idp_migration.go` files), so they satisfy the interfaces via Go's structural typing — zero changes needed on the core store interfaces. At runtime, the standalone tool type-asserts: + +```go +migStore, ok := mainStore.(migration.Store) +``` + +This keeps migration concerns completely separate from the core store contract. + +## Dex User ID Encoding + +`EncodeDexUserID(userID, connectorID)` produces a manually-encoded protobuf with two string fields, then base64-encodes the result (raw, no padding). `DecodeDexUserID` reverses this. The migration loop uses `DecodeDexUserID` to detect already-migrated users (decode succeeds → skip). + +See `idp/dex/provider.go` for the implementation. + +## Standalone Tool + +The standalone tool (`tools/idp-migrate/main.go`) is the primary migration entry point. It opens stores directly, runs schema validation, populates user info from the external IDP, migrates user IDs, and generates the new config — then exits. Configuration is handled entirely through `config.go` which parses CLI flags and environment variables. + +## Running Tests + +```bash +# Migration library +go test -v ./management/server/idp/migration/... + +# Standalone tool +go test -v ./tools/idp-migrate/... + +# Activity store migration tests +go test -v -run TestUpdateUserID ./management/server/activity/store/... + +# Build locally +go build ./tools/idp-migrate/ +``` + +## Clean Removal + +When migration tooling is no longer needed, delete: + +1. `tools/idp-migrate/` — entire directory +2. `management/server/idp/migration/` — entire directory +3. `management/server/store/sql_store_idp_migration.go` — migration methods on main SqlStore +4. `management/server/activity/store/sql_store_idp_migration.go` — migration method on activity Store +5. `management/server/activity/store/sql_store_idp_migration_test.go` — tests for the above +6. In `.goreleaser.yaml`: + - Remove the `netbird-idp-migrate` build entry + - Remove the `netbird-idp-migrate` archive entry +7. Run `go mod tidy` + +No core interfaces or mocks need editing — that's the point of the decoupling. diff --git a/tools/idp-migrate/LICENSE b/tools/idp-migrate/LICENSE new file mode 100644 index 000000000..be3f7b28e --- /dev/null +++ b/tools/idp-migrate/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/tools/idp-migrate/config.go b/tools/idp-migrate/config.go new file mode 100644 index 000000000..f4d6b9ea2 --- /dev/null +++ b/tools/idp-migrate/config.go @@ -0,0 +1,174 @@ +package main + +import ( + "flag" + "fmt" + "os" + "strconv" + + "github.com/netbirdio/netbird/util" +) + +type migrationConfig struct { + // Data + dashboardURL string + apiURL string + configPath string + dataDir string + idpSeedInfo string + + // Options + dryRun bool + force bool + skipConfig bool + skipPopulateUserInfo bool + + // Logging + logLevel string +} + +func config() (*migrationConfig, error) { + cfg, err := configFromArgs(os.Args[1:]) + if err != nil { + return nil, err + } + + if err := util.InitLog(cfg.logLevel, util.LogConsole); err != nil { + return nil, fmt.Errorf("init logger: %w", err) + } + + return cfg, nil +} + +func configFromArgs(args []string) (*migrationConfig, error) { + var cfg migrationConfig + var domain string + + fs := flag.NewFlagSet("netbird-idp-migrate", flag.ContinueOnError) + fs.StringVar(&domain, "domain", "", "domain for both dashboard and API") + fs.StringVar(&cfg.dashboardURL, "dashboard-url", "", "dashboard URL") + fs.StringVar(&cfg.apiURL, "api-url", "", "API URL") + fs.StringVar(&cfg.configPath, "config", "", "path to management.json (required)") + fs.StringVar(&cfg.dataDir, "datadir", "", "override data directory from config") + fs.StringVar(&cfg.idpSeedInfo, "idp-seed-info", "", "base64-encoded connector JSON (overrides auto-detection)") + fs.BoolVar(&cfg.dryRun, "dry-run", false, "preview changes without writing") + fs.BoolVar(&cfg.force, "force", false, "skip confirmation prompt") + fs.BoolVar(&cfg.skipConfig, "skip-config", false, "skip config generation (DB migration only)") + fs.BoolVar(&cfg.skipPopulateUserInfo, "skip-populate-user-info", false, "skip populating user info (user id migration only)") + fs.StringVar(&cfg.logLevel, "log-level", "info", "log level (debug, info, warn, error)") + + if err := fs.Parse(args); err != nil { + return nil, err + } + + applyOverrides(&cfg, domain) + + if err := validateConfig(&cfg); err != nil { + return nil, err + } + + return &cfg, nil +} + +// applyOverrides resolves domain configuration from broad to narrow sources. +// The most granular value always wins: +// +// --domain flag (broadest, only fills blanks) +// NETBIRD_DOMAIN env (overrides flags, sets both) +// --api-domain / --dashboard-domain flags (more specific than --domain) +// NETBIRD_API_URL / NETBIRD_DASHBOARD_URL env (most specific, always wins) +// +// Other env vars unconditionally override their corresponding flags. +func applyOverrides(cfg *migrationConfig, domain string) { + // --domain is a convenience shorthand: only fills in values not already + // set by the more specific --api-domain / --dashboard-domain flags. + if domain != "" { + if cfg.apiURL == "" { + cfg.apiURL = domain + } + if cfg.dashboardURL == "" { + cfg.dashboardURL = domain + } + } + + // Env vars override flags. Broad env var first, then narrow ones on top, + // so the most granular value always wins. + if val, ok := os.LookupEnv("NETBIRD_DOMAIN"); ok { + cfg.dashboardURL = val + cfg.apiURL = val + } + + if val, ok := os.LookupEnv("NETBIRD_API_URL"); ok { + cfg.apiURL = val + } + + if val, ok := os.LookupEnv("NETBIRD_DASHBOARD_URL"); ok { + cfg.dashboardURL = val + } + + if val, ok := os.LookupEnv("NETBIRD_CONFIG_PATH"); ok { + cfg.configPath = val + } + + if val, ok := os.LookupEnv("NETBIRD_DATA_DIR"); ok { + cfg.dataDir = val + } + + if val, ok := os.LookupEnv("NETBIRD_IDP_SEED_INFO"); ok { + cfg.idpSeedInfo = val + } + + // Enforce dry run if any value is provided + if sval, ok := os.LookupEnv("NETBIRD_DRY_RUN"); ok { + if val, err := strconv.ParseBool(sval); err == nil { + cfg.dryRun = val + } + } + + cfg.dryRun = parseBool("NETBIRD_DRY_RUN", cfg.dryRun) + cfg.force = parseBool("NETBIRD_FORCE", cfg.force) + cfg.skipConfig = parseBool("NETBIRD_SKIP_CONFIG", cfg.skipConfig) + cfg.skipPopulateUserInfo = parseBool("NETBIRD_SKIP_POPULATE_USER_INFO", cfg.skipPopulateUserInfo) + + if val, ok := os.LookupEnv("NETBIRD_LOG_LEVEL"); ok { + cfg.logLevel = val + } +} + +func parseBool(varName string, defaultVal bool) bool { + stringValue, ok := os.LookupEnv(varName) + if !ok { + return defaultVal + } + + boolValue, err := strconv.ParseBool(stringValue) + if err != nil { + return defaultVal + } + + return boolValue +} + +func validateConfig(cfg *migrationConfig) error { + if cfg.configPath == "" { + return fmt.Errorf("--config is required") + } + + if cfg.dataDir == "" { + return fmt.Errorf("--datadir is required") + } + + if cfg.idpSeedInfo == "" { + return fmt.Errorf("--idp-seed-info is required") + } + + if cfg.apiURL == "" { + return fmt.Errorf("--api-domain is required") + } + + if cfg.dashboardURL == "" { + return fmt.Errorf("--dashboard-domain is required") + } + + return nil +} diff --git a/tools/idp-migrate/main.go b/tools/idp-migrate/main.go new file mode 100644 index 000000000..a8cba0750 --- /dev/null +++ b/tools/idp-migrate/main.go @@ -0,0 +1,449 @@ +// Package main provides a standalone CLI tool to migrate user IDs from an +// external IdP format to the embedded Dex IdP format used by NetBird >= v0.62.0. +// +// This tool reads management.json to auto-detect the current external IdP +// configuration (issuer, clientID, clientSecret, type) and re-encodes all user +// IDs in the database to the Dex protobuf-encoded format. It works independently +// of migrate.sh and the combined server, allowing operators to migrate their +// database before switching to the combined server. +// +// Usage: +// +// netbird-idp-migrate --config /etc/netbird/management.json [--dry-run] [--force] +package main + +import ( + "bufio" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "maps" + "net/url" + "os" + "strings" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/idp/dex" + nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + activitystore "github.com/netbirdio/netbird/management/server/activity/store" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/idp/migration" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/util/crypt" +) + +// migrationServer implements migration.Server by wrapping the migration-specific interfaces. +type migrationServer struct { + store migration.Store + eventStore migration.EventStore +} + +func (s *migrationServer) Store() migration.Store { return s.store } +func (s *migrationServer) EventStore() migration.EventStore { return s.eventStore } + +func main() { + cfg, err := config() + if err != nil { + log.Fatalf("config error: %v", err) + } + + if err := run(cfg); err != nil { + log.Fatalf("migration failed: %v", err) + } + + if !cfg.dryRun { + printPostMigrationInstructions(cfg) + } +} + +func run(cfg *migrationConfig) error { + mgmtConfig := &nbconfig.Config{} + if _, err := util.ReadJsonWithEnvSub(cfg.configPath, mgmtConfig); err != nil { + return err + } + + // Validate the database schema before attempting any operations. + if err := validateSchema(mgmtConfig, cfg.dataDir); err != nil { + return err + } + + if !cfg.skipPopulateUserInfo { + err := populateUserInfoFromIDP(cfg, mgmtConfig) + if err != nil { + return fmt.Errorf("populate user info: %w", err) + } + } + + connectorConfig, err := decodeConnectorConfig(cfg.idpSeedInfo) + if err != nil { + return fmt.Errorf("resolve connector: %w", err) + } + + log.Infof( + "resolved connector: type=%s, id=%s, name=%s", + connectorConfig.Type, + connectorConfig.ID, + connectorConfig.Name, + ) + + if err := migrateDB(cfg, mgmtConfig, connectorConfig); err != nil { + return err + } + + if cfg.skipConfig { + log.Info("skipping config generation (--skip-config)") + return nil + } + + return generateConfig(cfg, connectorConfig) +} + +// validateSchema opens the store and checks that all required tables and columns +// exist. If anything is missing, it returns a descriptive error telling the user +// to upgrade their management server. +func validateSchema(mgmtConfig *nbconfig.Config, dataDir string) error { + ctx := context.Background() + migStore, migEventStore, cleanup, err := openStores(ctx, mgmtConfig, dataDir) + if err != nil { + return err + } + defer cleanup() + + errs := migStore.CheckSchema(migration.RequiredSchema) + if len(errs) > 0 { + return fmt.Errorf("%s", formatSchemaErrors(errs)) + } + + if migEventStore != nil { + eventErrs := migEventStore.CheckSchema(migration.RequiredEventSchema) + if len(eventErrs) > 0 { + return fmt.Errorf("activity store schema check failed (upgrade management server first):\n%s", formatSchemaErrors(eventErrs)) + } + } + + log.Info("database schema check passed") + return nil +} + +// formatSchemaErrors returns a user-friendly message listing all missing schema +// elements and instructing the operator to upgrade. +func formatSchemaErrors(errs []migration.SchemaError) string { + var b strings.Builder + b.WriteString("database schema is incomplete — the following tables/columns are missing:\n") + for _, e := range errs { + fmt.Fprintf(&b, " - %s\n", e.String()) + } + b.WriteString("\nPlease start the NetBird management server (v0.66.4+) at least once so that automatic database migrations create the required schema, then re-run this tool.\n") + return b.String() +} + +// populateUserInfoFromIDP creates an IDP manager from the config, fetches all +// user data (email, name) from the external IDP, and updates the store for users +// that are missing this information. +func populateUserInfoFromIDP(cfg *migrationConfig, mgmtConfig *nbconfig.Config) error { + ctx := context.Background() + + if mgmtConfig.IdpManagerConfig == nil { + return fmt.Errorf("IdpManagerConfig is not set in management.json; cannot fetch user info from IDP") + } + + idpManager, err := idp.NewManager(ctx, *mgmtConfig.IdpManagerConfig, nil) + if err != nil { + return fmt.Errorf("create IDP manager: %w", err) + } + if idpManager == nil { + return fmt.Errorf("IDP manager type is 'none' or empty; cannot fetch user info") + } + + log.Infof("created IDP manager (type: %s)", mgmtConfig.IdpManagerConfig.ManagerType) + + migStore, _, cleanup, err := openStores(ctx, mgmtConfig, cfg.dataDir) + if err != nil { + return err + } + defer cleanup() + + srv := &migrationServer{store: migStore} + return migration.PopulateUserInfo(srv, idpManager, cfg.dryRun) +} + +// openStores opens the main and activity stores, returning migration-specific interfaces. +// The caller must call the returned cleanup function to close the stores. +func openStores(ctx context.Context, cfg *nbconfig.Config, dataDir string) (migration.Store, migration.EventStore, func(), error) { + engine := cfg.StoreConfig.Engine + if engine == "" { + engine = types.SqliteStoreEngine + } + + mainStore, err := store.NewStore(ctx, engine, dataDir, nil, true) + if err != nil { + return nil, nil, nil, fmt.Errorf("open main store: %w", err) + } + + if cfg.DataStoreEncryptionKey != "" { + fieldEncrypt, err := crypt.NewFieldEncrypt(cfg.DataStoreEncryptionKey) + if err != nil { + _ = mainStore.Close(ctx) + return nil, nil, nil, fmt.Errorf("init field encryption: %w", err) + } + mainStore.SetFieldEncrypt(fieldEncrypt) + } + + migStore, ok := mainStore.(migration.Store) + if !ok { + _ = mainStore.Close(ctx) + return nil, nil, nil, fmt.Errorf("store does not support migration operations (ListUsers/UpdateUserID)") + } + + cleanup := func() { _ = mainStore.Close(ctx) } + + var migEventStore migration.EventStore + actStore, err := activitystore.NewSqlStore(ctx, dataDir, cfg.DataStoreEncryptionKey) + if err != nil { + log.Warnf("could not open activity store (events.db may not exist): %v", err) + } else { + migEventStore = actStore + prevCleanup := cleanup + cleanup = func() { _ = actStore.Close(ctx); prevCleanup() } + } + + return migStore, migEventStore, cleanup, nil +} + +// migrateDB opens the stores, previews pending users, and runs the DB migration. +func migrateDB(cfg *migrationConfig, mgmtConfig *nbconfig.Config, connectorConfig *dex.Connector) error { + ctx := context.Background() + + migStore, migEventStore, cleanup, err := openStores(ctx, mgmtConfig, cfg.dataDir) + if err != nil { + return err + } + defer cleanup() + + pending, err := previewUsers(ctx, migStore) + if err != nil { + return err + } + + if cfg.dryRun { + if err := os.Setenv("NB_IDP_MIGRATION_DRY_RUN", "true"); err != nil { + return fmt.Errorf("set dry-run env: %w", err) + } + defer os.Unsetenv("NB_IDP_MIGRATION_DRY_RUN") //nolint:errcheck + } + + if !cfg.dryRun && !cfg.force { + if !confirmPrompt(pending) { + log.Info("migration cancelled by user") + return nil + } + } + + srv := &migrationServer{store: migStore, eventStore: migEventStore} + if err := migration.MigrateUsersToStaticConnectors(srv, connectorConfig); err != nil { + return fmt.Errorf("migrate users: %w", err) + } + + if !cfg.dryRun { + log.Info("DB migration completed successfully") + } + return nil +} + +// previewUsers counts pending vs already-migrated users and logs a summary. +// Returns the number of users still needing migration. +func previewUsers(ctx context.Context, migStore migration.Store) (int, error) { + users, err := migStore.ListUsers(ctx) + if err != nil { + return 0, fmt.Errorf("list users: %w", err) + } + + var pending, alreadyMigrated int + for _, u := range users { + if _, _, decErr := dex.DecodeDexUserID(u.Id); decErr == nil { + alreadyMigrated++ + } else { + pending++ + } + } + + log.Infof("found %d total users: %d pending migration, %d already migrated", len(users), pending, alreadyMigrated) + return pending, nil +} + +// confirmPrompt asks the user for interactive confirmation. Returns true if they accept. +func confirmPrompt(pending int) bool { + log.Infof("About to migrate %d users. This cannot be easily undone. Continue? [y/N] ", pending) + reader := bufio.NewReader(os.Stdin) + answer, _ := reader.ReadString('\n') + answer = strings.TrimSpace(strings.ToLower(answer)) + return answer == "y" || answer == "yes" +} + +// decodeConnectorConfig base64-decodes and JSON-unmarshals a connector. +func decodeConnectorConfig(encoded string) (*dex.Connector, error) { + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + return nil, fmt.Errorf("base64 decode: %w", err) + } + + var conn dex.Connector + if err := json.Unmarshal(decoded, &conn); err != nil { + return nil, fmt.Errorf("json unmarshal: %w", err) + } + + if conn.ID == "" { + return nil, fmt.Errorf("connector ID is empty") + } + + return &conn, nil +} + +// generateConfig reads the existing management.json as raw JSON, removes +// IdpManagerConfig, adds EmbeddedIdP, updates HttpConfig fields, and writes +// the result. In dry-run mode, it prints the new config to stdout instead. +func generateConfig(cfg *migrationConfig, connectorConfig *dex.Connector) error { + // Read existing config as raw JSON to preserve all fields + raw, err := os.ReadFile(cfg.configPath) + if err != nil { + return fmt.Errorf("read config file: %w", err) + } + + var configMap map[string]any + if err := json.Unmarshal(raw, &configMap); err != nil { + return fmt.Errorf("parse config JSON: %w", err) + } + + // Remove unused information + delete(configMap, "IdpManagerConfig") + delete(configMap, "PKCEAuthorizationFlow") + delete(configMap, "DeviceAuthorizationFlow") + + httpConfig, ok := configMap["HttpConfig"].(map[string]any) + if httpConfig != nil && ok { + certFilePath := httpConfig["CertFile"] + certKeyPath := httpConfig["CertKey"] + + delete(configMap, "HttpConfig") + + configMap["HttpConfig"] = map[string]any{ + "CertFile": certFilePath, + "CertKey": certKeyPath, + } + } + + // Ensure the connector's redirectURI points to the management server (Dex callback), + // not the external IdP. The auto-detection may have used the IdP issuer URL. + connConfig := make(map[string]any, len(connectorConfig.Config)) + maps.Copy(connConfig, connectorConfig.Config) + + redirectURI, err := buildURL(cfg.apiURL, "/oauth2/callback") + if err != nil { + return fmt.Errorf("build redirect URI: %w", err) + } + connConfig["redirectURI"] = redirectURI + + issuer, err := buildURL(cfg.apiURL, "/oauth2") + if err != nil { + return fmt.Errorf("build issuer URL: %w", err) + } + + dashboardRedirectURL, err := buildURL(cfg.dashboardURL, "/nb-auth") + if err != nil { + return fmt.Errorf("build dashboard redirect URL: %w", err) + } + + dashboardSilentRedirectURL, err := buildURL(cfg.dashboardURL, "/nb-silent-auth") + if err != nil { + return fmt.Errorf("build dashboard silent redirect URL: %w", err) + } + + // Add minimal EmbeddedIdP section + configMap["EmbeddedIdP"] = map[string]any{ + "Enabled": true, + "Issuer": issuer, + "DashboardRedirectURIs": []string{ + dashboardRedirectURL, + dashboardSilentRedirectURL, + }, + "StaticConnectors": []any{ + map[string]any{ + "type": connectorConfig.Type, + "name": connectorConfig.Name, + "id": connectorConfig.ID, + "config": connConfig, + }, + }, + } + + newJSON, err := json.MarshalIndent(configMap, "", " ") + if err != nil { + return fmt.Errorf("marshal new config: %w", err) + } + + if cfg.dryRun { + log.Info("[DRY RUN] new management.json would be:") + log.Infoln(string(newJSON)) + return nil + } + + // Backup original + backupPath := cfg.configPath + ".bak" + if err := os.WriteFile(backupPath, raw, 0o600); err != nil { + return fmt.Errorf("write backup: %w", err) + } + log.Infof("backed up original config to %s", backupPath) + + // Write new config + if err := os.WriteFile(cfg.configPath, newJSON, 0o600); err != nil { + return fmt.Errorf("write new config: %w", err) + } + log.Infof("wrote new config to %s", cfg.configPath) + + return nil +} + +func buildURL(uri, path string) (string, error) { + // Case for domain without scheme, e.g. "example.com" or "example.com:8080" + if !strings.HasPrefix(uri, "http://") && !strings.HasPrefix(uri, "https://") { + uri = "https://" + uri + } + + val, err := url.JoinPath(uri, path) + if err != nil { + return "", err + } + + return val, nil +} + +func printPostMigrationInstructions(cfg *migrationConfig) { + authAuthority, err := buildURL(cfg.apiURL, "/oauth2") + if err != nil { + authAuthority = "https:///oauth2" + } + + log.Info("Congratulations! You have successfully migrated your NetBird management server to the embedded Dex IdP.") + log.Info("Next steps:") + log.Info("1. Make sure the following environment variables are set for your dashboard server:") + log.Infof(` +AUTH_AUDIENCE=netbird-dashboard +AUTH_CLIENT_ID=netbird-dashboard +AUTH_AUTHORITY=%s +AUTH_SUPPORTED_SCOPES=openid profile email groups +AUTH_REDIRECT_URI=/nb-auth +AUTH_SILENT_REDIRECT_URI=/nb-silent-auth + `, + authAuthority, + ) + log.Info("2. Make sure you restart the dashboard & management servers to pick up the new config and environment variables.") + log.Info("eg. docker compose up -d --force-recreate management dashboard") + log.Info("3. Optional: If you have a reverse proxy configured, make sure the path `/oauth2/*` points to the management api server.") +} + +// Compile-time check that migrationServer implements migration.Server. +var _ migration.Server = (*migrationServer)(nil) diff --git a/tools/idp-migrate/main_test.go b/tools/idp-migrate/main_test.go new file mode 100644 index 000000000..75d0bd7eb --- /dev/null +++ b/tools/idp-migrate/main_test.go @@ -0,0 +1,487 @@ +package main + +import ( + "encoding/base64" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/idp/migration" +) + +// TestMigrationServerInterface is a compile-time check that migrationServer +// implements the migration.Server interface. +func TestMigrationServerInterface(t *testing.T) { + var _ migration.Server = (*migrationServer)(nil) +} + +func TestDecodeConnectorConfig(t *testing.T) { + conn := dex.Connector{ + Type: "oidc", + Name: "test", + ID: "test-id", + Config: map[string]any{ + "issuer": "https://example.com", + "clientID": "cid", + "clientSecret": "csecret", + }, + } + + data, err := json.Marshal(conn) + require.NoError(t, err) + encoded := base64.StdEncoding.EncodeToString(data) + + result, err := decodeConnectorConfig(encoded) + require.NoError(t, err) + assert.Equal(t, "test-id", result.ID) + assert.Equal(t, "oidc", result.Type) + assert.Equal(t, "https://example.com", result.Config["issuer"]) +} + +func TestDecodeConnectorConfig_InvalidBase64(t *testing.T) { + _, err := decodeConnectorConfig("not-valid-base64!!!") + require.Error(t, err) + assert.Contains(t, err.Error(), "base64 decode") +} + +func TestDecodeConnectorConfig_InvalidJSON(t *testing.T) { + encoded := base64.StdEncoding.EncodeToString([]byte("not json")) + _, err := decodeConnectorConfig(encoded) + require.Error(t, err) + assert.Contains(t, err.Error(), "json unmarshal") +} + +func TestDecodeConnectorConfig_EmptyConnectorID(t *testing.T) { + conn := dex.Connector{ + Type: "oidc", + Name: "no-id", + ID: "", + } + data, err := json.Marshal(conn) + require.NoError(t, err) + + encoded := base64.StdEncoding.EncodeToString(data) + _, err = decodeConnectorConfig(encoded) + require.Error(t, err) + assert.Contains(t, err.Error(), "connector ID is empty") +} + +func TestValidateConfig(t *testing.T) { + valid := &migrationConfig{ + configPath: "/etc/netbird/management.json", + dataDir: "/var/lib/netbird", + idpSeedInfo: "some-base64", + apiURL: "https://api.example.com", + dashboardURL: "https://dash.example.com", + } + + t.Run("valid config", func(t *testing.T) { + require.NoError(t, validateConfig(valid)) + }) + + t.Run("missing configPath", func(t *testing.T) { + cfg := *valid + cfg.configPath = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--config") + }) + + t.Run("missing dataDir", func(t *testing.T) { + cfg := *valid + cfg.dataDir = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--datadir") + }) + + t.Run("missing idpSeedInfo", func(t *testing.T) { + cfg := *valid + cfg.idpSeedInfo = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--idp-seed-info") + }) + + t.Run("missing apiUrl", func(t *testing.T) { + cfg := *valid + cfg.apiURL = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--api-domain") + }) + + t.Run("missing dashboardUrl", func(t *testing.T) { + cfg := *valid + cfg.dashboardURL = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--dashboard-domain") + }) +} + +func TestConfigFromArgs_EnvVarsApplied(t *testing.T) { + t.Run("env vars fill in for missing flags", func(t *testing.T) { + t.Setenv("NETBIRD_CONFIG_PATH", "/env/management.json") + t.Setenv("NETBIRD_DATA_DIR", "/env/data") + t.Setenv("NETBIRD_IDP_SEED_INFO", "env-seed") + t.Setenv("NETBIRD_API_URL", "https://api.env.com") + t.Setenv("NETBIRD_DASHBOARD_URL", "https://dash.env.com") + + cfg, err := configFromArgs([]string{}) + require.NoError(t, err) + + assert.Equal(t, "/env/management.json", cfg.configPath) + assert.Equal(t, "/env/data", cfg.dataDir) + assert.Equal(t, "env-seed", cfg.idpSeedInfo) + assert.Equal(t, "https://api.env.com", cfg.apiURL) + assert.Equal(t, "https://dash.env.com", cfg.dashboardURL) + }) + + t.Run("flags work without env vars", func(t *testing.T) { + cfg, err := configFromArgs([]string{ + "--config", "/flag/management.json", + "--datadir", "/flag/data", + "--idp-seed-info", "flag-seed", + "--api-url", "https://api.flag.com", + "--dashboard-url", "https://dash.flag.com", + }) + require.NoError(t, err) + + assert.Equal(t, "/flag/management.json", cfg.configPath) + assert.Equal(t, "/flag/data", cfg.dataDir) + assert.Equal(t, "flag-seed", cfg.idpSeedInfo) + assert.Equal(t, "https://api.flag.com", cfg.apiURL) + assert.Equal(t, "https://dash.flag.com", cfg.dashboardURL) + }) + + t.Run("env vars override flags", func(t *testing.T) { + t.Setenv("NETBIRD_CONFIG_PATH", "/env/management.json") + t.Setenv("NETBIRD_API_URL", "https://api.env.com") + + cfg, err := configFromArgs([]string{ + "--config", "/flag/management.json", + "--datadir", "/flag/data", + "--idp-seed-info", "flag-seed", + "--api-url", "https://api.flag.com", + "--dashboard-url", "https://dash.flag.com", + }) + require.NoError(t, err) + + assert.Equal(t, "/env/management.json", cfg.configPath, "env should override flag") + assert.Equal(t, "https://api.env.com", cfg.apiURL, "env should override flag") + assert.Equal(t, "https://dash.flag.com", cfg.dashboardURL, "flag preserved when no env override") + }) + + t.Run("--domain flag with specific env var override", func(t *testing.T) { + t.Setenv("NETBIRD_API_URL", "https://api.env.com") + + cfg, err := configFromArgs([]string{ + "--domain", "both.flag.com", + "--config", "/path", + "--datadir", "/data", + "--idp-seed-info", "seed", + }) + require.NoError(t, err) + + assert.Equal(t, "https://api.env.com", cfg.apiURL, "specific env beats --domain") + assert.Equal(t, "both.flag.com", cfg.dashboardURL, "--domain fills dashboard") + }) +} + +func TestApplyOverrides_MostGranularWins(t *testing.T) { + t.Run("specific flags beat --domain", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.specific.com", + dashboardURL: "dash.specific.com", + } + applyOverrides(cfg, "broad.com") + + assert.Equal(t, "api.specific.com", cfg.apiURL) + assert.Equal(t, "dash.specific.com", cfg.dashboardURL) + }) + + t.Run("--domain fills blanks when specific flags missing", func(t *testing.T) { + cfg := &migrationConfig{} + applyOverrides(cfg, "broad.com") + + assert.Equal(t, "broad.com", cfg.apiURL) + assert.Equal(t, "broad.com", cfg.dashboardURL) + }) + + t.Run("--domain fills only the missing specific flag", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.specific.com", + } + applyOverrides(cfg, "broad.com") + + assert.Equal(t, "api.specific.com", cfg.apiURL) + assert.Equal(t, "broad.com", cfg.dashboardURL) + }) + + t.Run("NETBIRD_DOMAIN overrides flags", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.flag.com", + dashboardURL: "dash.flag.com", + } + t.Setenv("NETBIRD_DOMAIN", "env-broad.com") + + applyOverrides(cfg, "") + + assert.Equal(t, "env-broad.com", cfg.apiURL) + assert.Equal(t, "env-broad.com", cfg.dashboardURL) + }) + + t.Run("specific env vars beat NETBIRD_DOMAIN", func(t *testing.T) { + cfg := &migrationConfig{} + t.Setenv("NETBIRD_DOMAIN", "env-broad.com") + t.Setenv("NETBIRD_API_URL", "api.env-specific.com") + t.Setenv("NETBIRD_DASHBOARD_URL", "dash.env-specific.com") + + applyOverrides(cfg, "") + + assert.Equal(t, "api.env-specific.com", cfg.apiURL) + assert.Equal(t, "dash.env-specific.com", cfg.dashboardURL) + }) + + t.Run("one specific env var overrides only its field", func(t *testing.T) { + cfg := &migrationConfig{} + t.Setenv("NETBIRD_DOMAIN", "env-broad.com") + t.Setenv("NETBIRD_API_URL", "api.env-specific.com") + + applyOverrides(cfg, "") + + assert.Equal(t, "api.env-specific.com", cfg.apiURL) + assert.Equal(t, "env-broad.com", cfg.dashboardURL) + }) + + t.Run("specific env vars beat all flags combined", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.flag.com", + dashboardURL: "dash.flag.com", + } + t.Setenv("NETBIRD_API_URL", "api.env.com") + t.Setenv("NETBIRD_DASHBOARD_URL", "dash.env.com") + + applyOverrides(cfg, "domain-flag.com") + + assert.Equal(t, "api.env.com", cfg.apiURL) + assert.Equal(t, "dash.env.com", cfg.dashboardURL) + }) + + t.Run("env vars override all non-domain flags", func(t *testing.T) { + cfg := &migrationConfig{ + configPath: "/flag/path", + dataDir: "/flag/data", + idpSeedInfo: "flag-seed", + dryRun: false, + force: false, + skipConfig: false, + skipPopulateUserInfo: false, + logLevel: "info", + } + t.Setenv("NETBIRD_CONFIG_PATH", "/env/path") + t.Setenv("NETBIRD_DATA_DIR", "/env/data") + t.Setenv("NETBIRD_IDP_SEED_INFO", "env-seed") + t.Setenv("NETBIRD_DRY_RUN", "true") + t.Setenv("NETBIRD_FORCE", "true") + t.Setenv("NETBIRD_SKIP_CONFIG", "true") + t.Setenv("NETBIRD_SKIP_POPULATE_USER_INFO", "true") + t.Setenv("NETBIRD_LOG_LEVEL", "debug") + + applyOverrides(cfg, "") + + assert.Equal(t, "/env/path", cfg.configPath) + assert.Equal(t, "/env/data", cfg.dataDir) + assert.Equal(t, "env-seed", cfg.idpSeedInfo) + assert.True(t, cfg.dryRun) + assert.True(t, cfg.force) + assert.True(t, cfg.skipConfig) + assert.True(t, cfg.skipPopulateUserInfo) + assert.Equal(t, "debug", cfg.logLevel) + }) + + t.Run("boolean env vars properly parse false values", func(t *testing.T) { + cfg := &migrationConfig{} + t.Setenv("NETBIRD_DRY_RUN", "false") + t.Setenv("NETBIRD_FORCE", "yes") + t.Setenv("NETBIRD_SKIP_CONFIG", "0") + + applyOverrides(cfg, "") + + assert.False(t, cfg.dryRun) + assert.False(t, cfg.force) + assert.False(t, cfg.skipConfig) + }) + + t.Run("unset env vars do not override flags", func(t *testing.T) { + cfg := &migrationConfig{ + configPath: "/flag/path", + dataDir: "/flag/data", + idpSeedInfo: "flag-seed", + dryRun: true, + logLevel: "warn", + } + + applyOverrides(cfg, "") + + assert.Equal(t, "/flag/path", cfg.configPath) + assert.Equal(t, "/flag/data", cfg.dataDir) + assert.Equal(t, "flag-seed", cfg.idpSeedInfo) + assert.True(t, cfg.dryRun) + assert.Equal(t, "warn", cfg.logLevel) + }) +} + +func TestBuildUrl(t *testing.T) { + tests := []struct { + name string + uri string + path string + expected string + }{ + {"with https scheme", "https://example.com", "/oauth2", "https://example.com/oauth2"}, + {"with http scheme", "http://example.com", "/oauth2/callback", "http://example.com/oauth2/callback"}, + {"bare domain", "example.com", "/oauth2", "https://example.com/oauth2"}, + {"domain with port", "example.com:8080", "/nb-auth", "https://example.com:8080/nb-auth"}, + {"trailing slash on uri", "https://example.com/", "/oauth2", "https://example.com/oauth2"}, + {"nested path", "https://example.com", "/oauth2/callback", "https://example.com/oauth2/callback"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + url, err := buildURL(tt.uri, tt.path) + assert.NoError(t, err) + assert.Equal(t, tt.expected, url) + }) + } +} + +func TestGenerateConfig(t *testing.T) { + t.Run("generates valid config", func(t *testing.T) { + dir := t.TempDir() + configPath := filepath.Join(dir, "management.json") + + originalConfig := `{ + "Datadir": "/var/lib/netbird", + "HttpConfig": { + "LetsEncryptDomain": "mgmt.example.com", + "CertFile": "/etc/ssl/cert.pem", + "CertKey": "/etc/ssl/key.pem", + "AuthIssuer": "https://zitadel.example.com/oauth2", + "AuthKeysLocation": "https://zitadel.example.com/oauth2/keys", + "OIDCConfigEndpoint": "https://zitadel.example.com/.well-known/openid-configuration", + "AuthClientID": "old-client-id", + "AuthUserIDClaim": "preferred_username" + }, + "IdpManagerConfig": { + "ManagerType": "zitadel", + "ClientConfig": { + "Issuer": "https://zitadel.example.com", + "ClientID": "zit-id", + "ClientSecret": "zit-secret" + } + } +}` + require.NoError(t, os.WriteFile(configPath, []byte(originalConfig), 0o600)) + + cfg := &migrationConfig{ + configPath: configPath, + dashboardURL: "https://mgmt.example.com", + apiURL: "https://mgmt.example.com", + } + conn := &dex.Connector{ + Type: "zitadel", + Name: "zitadel", + ID: "zitadel", + Config: map[string]any{ + "issuer": "https://zitadel.example.com", + "clientID": "zit-id", + "clientSecret": "zit-secret", + }, + } + + err := generateConfig(cfg, conn) + require.NoError(t, err) + + // Check backup was created + backupPath := configPath + ".bak" + backupData, err := os.ReadFile(backupPath) + require.NoError(t, err) + assert.Equal(t, originalConfig, string(backupData)) + + // Read and parse the new config + newData, err := os.ReadFile(configPath) + require.NoError(t, err) + + var result map[string]any + require.NoError(t, json.Unmarshal(newData, &result)) + + // IdpManagerConfig should be removed + _, hasOldIdp := result["IdpManagerConfig"] + assert.False(t, hasOldIdp, "IdpManagerConfig should be removed") + + _, hasPKCE := result["PKCEAuthorizationFlow"] + assert.False(t, hasPKCE, "PKCEAuthorizationFlow should be removed") + + // EmbeddedIdP should be present with minimal fields + embeddedIDP, ok := result["EmbeddedIdP"].(map[string]any) + require.True(t, ok, "EmbeddedIdP should be present") + assert.Equal(t, true, embeddedIDP["Enabled"]) + assert.Equal(t, "https://mgmt.example.com/oauth2", embeddedIDP["Issuer"]) + assert.Nil(t, embeddedIDP["LocalAuthDisabled"], "LocalAuthDisabled should not be set") + assert.Nil(t, embeddedIDP["SignKeyRefreshEnabled"], "SignKeyRefreshEnabled should not be set") + assert.Nil(t, embeddedIDP["CLIRedirectURIs"], "CLIRedirectURIs should not be set") + + // Static connector's redirectURI should use the management domain + connectors := embeddedIDP["StaticConnectors"].([]any) + require.Len(t, connectors, 1) + firstConn := connectors[0].(map[string]any) + connCfg := firstConn["config"].(map[string]any) + assert.Equal(t, "https://mgmt.example.com/oauth2/callback", connCfg["redirectURI"], + "redirectURI should be overridden to use the management domain") + + // HttpConfig should only have CertFile and CertKey + httpConfig, ok := result["HttpConfig"].(map[string]any) + require.True(t, ok, "HttpConfig should be present") + assert.Equal(t, "/etc/ssl/cert.pem", httpConfig["CertFile"]) + assert.Equal(t, "/etc/ssl/key.pem", httpConfig["CertKey"]) + assert.Nil(t, httpConfig["AuthIssuer"], "AuthIssuer should be stripped") + + // Datadir should be preserved + assert.Equal(t, "/var/lib/netbird", result["Datadir"]) + }) + + t.Run("dry run does not write files", func(t *testing.T) { + dir := t.TempDir() + configPath := filepath.Join(dir, "management.json") + + originalConfig := `{"HttpConfig": {"CertFile": "", "CertKey": ""}}` + require.NoError(t, os.WriteFile(configPath, []byte(originalConfig), 0o600)) + + cfg := &migrationConfig{ + configPath: configPath, + dashboardURL: "https://mgmt.example.com", + apiURL: "https://mgmt.example.com", + dryRun: true, + } + conn := &dex.Connector{Type: "oidc", Name: "test", ID: "test"} + + err := generateConfig(cfg, conn) + require.NoError(t, err) + + // Original should be unchanged + data, err := os.ReadFile(configPath) + require.NoError(t, err) + assert.Equal(t, originalConfig, string(data)) + + // No backup should exist + _, err = os.Stat(configPath + ".bak") + assert.True(t, os.IsNotExist(err)) + }) +} From 2477f99d8936b0d2f4d1156b3fd54f8bcb408c3c Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 1 Apr 2026 14:10:41 +0200 Subject: [PATCH 264/374] [proxy] Add pprof (#5764) --- proxy/cmd/proxy/main.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/proxy/cmd/proxy/main.go b/proxy/cmd/proxy/main.go index 14e540a2e..16e7e8ac2 100644 --- a/proxy/cmd/proxy/main.go +++ b/proxy/cmd/proxy/main.go @@ -1,8 +1,13 @@ package main import ( + "net/http" + // nolint:gosec + _ "net/http/pprof" "runtime" + log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/proxy/cmd/proxy/cmd" ) @@ -21,6 +26,9 @@ var ( ) func main() { + go func() { + log.Println(http.ListenAndServe("localhost:6060", nil)) + }() cmd.SetVersionInfo(Version, Commit, BuildDate, GoVersion) cmd.Execute() } From f5c41e301800eebed72ccf60fe4c764bcd78c534 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 1 Apr 2026 14:13:53 +0200 Subject: [PATCH 265/374] [misc] set permissions on env file for getting started scripts (#5761) --- infrastructure_files/getting-started-with-dex.sh | 7 +++++-- infrastructure_files/getting-started-with-zitadel.sh | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/infrastructure_files/getting-started-with-dex.sh b/infrastructure_files/getting-started-with-dex.sh index a14c6134e..5e605f19c 100755 --- a/infrastructure_files/getting-started-with-dex.sh +++ b/infrastructure_files/getting-started-with-dex.sh @@ -172,8 +172,11 @@ init_environment() { echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" echo "" echo "Login with the following credentials:" - echo "Email: admin@$NETBIRD_DOMAIN" | tee .env - echo "Password: $NETBIRD_ADMIN_PASSWORD" | tee -a .env + install -m 600 /dev/null .env + printf 'Email: admin@%s\nPassword: %s\n' \ + "$NETBIRD_DOMAIN" "$NETBIRD_ADMIN_PASSWORD" >> .env + echo "Email: admin@$NETBIRD_DOMAIN" + echo "Password: $NETBIRD_ADMIN_PASSWORD" echo "" echo "Dex admin UI is not available (Dex has no built-in UI)." echo "To add more users, edit dex.yaml and restart: $DOCKER_COMPOSE_COMMAND restart dex" diff --git a/infrastructure_files/getting-started-with-zitadel.sh b/infrastructure_files/getting-started-with-zitadel.sh index 09c5225ad..f503cbeac 100644 --- a/infrastructure_files/getting-started-with-zitadel.sh +++ b/infrastructure_files/getting-started-with-zitadel.sh @@ -563,8 +563,11 @@ initEnvironment() { echo -e "\nDone!\n" echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" echo "Login with the following credentials:" - echo "Username: $ZITADEL_ADMIN_USERNAME" | tee .env - echo "Password: $ZITADEL_ADMIN_PASSWORD" | tee -a .env + install -m 600 /dev/null .env + printf 'Username: %s\nPassword: %s\n' \ + "$ZITADEL_ADMIN_USERNAME" "$ZITADEL_ADMIN_PASSWORD" >> .env + echo "Username: $ZITADEL_ADMIN_USERNAME" + echo "Password: $ZITADEL_ADMIN_PASSWORD" } renderCaddyfile() { From cd8c6863394d02b470d1f5b2a4d36a6510b40dfa Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 1 Apr 2026 14:23:24 +0200 Subject: [PATCH 266/374] [misc] add path traversal and file size protections (#5755) --- upload-server/server/local.go | 49 +++++++++++++---- upload-server/server/local_test.go | 87 ++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 9 deletions(-) diff --git a/upload-server/server/local.go b/upload-server/server/local.go index f12c472d2..f7ca50011 100644 --- a/upload-server/server/local.go +++ b/upload-server/server/local.go @@ -7,6 +7,7 @@ import ( "net/url" "os" "path/filepath" + "strings" log "github.com/sirupsen/logrus" @@ -82,15 +83,18 @@ func (l *local) getUploadURL(objectKey string) (string, error) { return newURL.String(), nil } +const maxUploadSize = 150 << 20 + func (l *local) handlePutRequest(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPut { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } + r.Body = http.MaxBytesReader(w, r.Body, maxUploadSize) body, err := io.ReadAll(r.Body) if err != nil { - http.Error(w, fmt.Sprintf("failed to read body: %v", err), http.StatusInternalServerError) + http.Error(w, "request body too large or failed to read", http.StatusRequestEntityTooLarge) return } @@ -105,20 +109,47 @@ func (l *local) handlePutRequest(w http.ResponseWriter, r *http.Request) { return } - dirPath := filepath.Join(l.dir, uploadDir) - err = os.MkdirAll(dirPath, 0750) - if err != nil { + cleanBase := filepath.Clean(l.dir) + string(filepath.Separator) + + dirPath := filepath.Clean(filepath.Join(l.dir, uploadDir)) + if !strings.HasPrefix(dirPath, cleanBase) { + http.Error(w, "invalid path", http.StatusBadRequest) + log.Warnf("Path traversal attempt blocked (dir): %s", dirPath) + return + } + + filePath := filepath.Clean(filepath.Join(dirPath, uploadFile)) + if !strings.HasPrefix(filePath, cleanBase) { + http.Error(w, "invalid path", http.StatusBadRequest) + log.Warnf("Path traversal attempt blocked (file): %s", filePath) + return + } + + if err = os.MkdirAll(dirPath, 0750); err != nil { http.Error(w, "failed to create upload dir", http.StatusInternalServerError) log.Errorf("Failed to create upload dir: %v", err) return } - file := filepath.Join(dirPath, uploadFile) - if err := os.WriteFile(file, body, 0600); err != nil { - http.Error(w, "failed to write file", http.StatusInternalServerError) - log.Errorf("Failed to write file %s: %v", file, err) + flags := os.O_WRONLY | os.O_CREATE | os.O_EXCL + f, err := os.OpenFile(filePath, flags, 0600) + if err != nil { + if os.IsExist(err) { + http.Error(w, "file already exists", http.StatusConflict) + return + } + http.Error(w, "failed to create file", http.StatusInternalServerError) + log.Errorf("Failed to create file %s: %v", filePath, err) return } - log.Infof("Uploading file %s", file) + defer func() { _ = f.Close() }() + + if _, err = f.Write(body); err != nil { + http.Error(w, "failed to write file", http.StatusInternalServerError) + log.Errorf("Failed to write file %s: %v", filePath, err) + return + } + + log.Infof("Uploaded file %s", filePath) w.WriteHeader(http.StatusOK) } diff --git a/upload-server/server/local_test.go b/upload-server/server/local_test.go index bd8a87809..64b8fd228 100644 --- a/upload-server/server/local_test.go +++ b/upload-server/server/local_test.go @@ -63,3 +63,90 @@ func Test_LocalHandlePutRequest(t *testing.T) { require.NoError(t, err) require.Equal(t, fileContent, createdFileContent) } + +func Test_LocalHandlePutRequest_PathTraversal(t *testing.T) { + mockDir := t.TempDir() + mockURL := "http://localhost:8080" + t.Setenv("SERVER_URL", mockURL) + t.Setenv("STORE_DIR", mockDir) + + mux := http.NewServeMux() + err := configureLocalHandlers(mux) + require.NoError(t, err) + + fileContent := []byte("malicious content") + req := httptest.NewRequest(http.MethodPut, putURLPath+"/uploads/%2e%2e%2f%2e%2e%2fetc%2fpasswd", bytes.NewReader(fileContent)) + + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) + + _, err = os.Stat(filepath.Join(mockDir, "..", "..", "etc", "passwd")) + require.True(t, os.IsNotExist(err), "traversal file should not exist") +} + +func Test_LocalHandlePutRequest_DirTraversal(t *testing.T) { + mockDir := t.TempDir() + t.Setenv("SERVER_URL", "http://localhost:8080") + t.Setenv("STORE_DIR", mockDir) + + l := &local{url: "http://localhost:8080", dir: mockDir} + + body := bytes.NewReader([]byte("bad")) + req := httptest.NewRequest(http.MethodPut, putURLPath+"/x/evil.txt", body) + req.SetPathValue("dir", "../../../tmp") + req.SetPathValue("file", "evil.txt") + + rec := httptest.NewRecorder() + l.handlePutRequest(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) + + _, err := os.Stat(filepath.Join("/tmp", "evil.txt")) + require.True(t, os.IsNotExist(err), "traversal file should not exist outside store dir") +} + +func Test_LocalHandlePutRequest_DuplicateFile(t *testing.T) { + mockDir := t.TempDir() + t.Setenv("SERVER_URL", "http://localhost:8080") + t.Setenv("STORE_DIR", mockDir) + + mux := http.NewServeMux() + err := configureLocalHandlers(mux) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPut, putURLPath+"/dir/dup.txt", bytes.NewReader([]byte("first"))) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + req = httptest.NewRequest(http.MethodPut, putURLPath+"/dir/dup.txt", bytes.NewReader([]byte("second"))) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + require.Equal(t, http.StatusConflict, rec.Code) + + content, err := os.ReadFile(filepath.Join(mockDir, "dir", "dup.txt")) + require.NoError(t, err) + require.Equal(t, []byte("first"), content) +} + +func Test_LocalHandlePutRequest_BodyTooLarge(t *testing.T) { + mockDir := t.TempDir() + t.Setenv("SERVER_URL", "http://localhost:8080") + t.Setenv("STORE_DIR", mockDir) + + mux := http.NewServeMux() + err := configureLocalHandlers(mux) + require.NoError(t, err) + + largeBody := make([]byte, maxUploadSize+1) + req := httptest.NewRequest(http.MethodPut, putURLPath+"/dir/big.txt", bytes.NewReader(largeBody)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + require.Equal(t, http.StatusRequestEntityTooLarge, rec.Code) + + _, err = os.Stat(filepath.Join(mockDir, "dir", "big.txt")) + require.True(t, os.IsNotExist(err)) +} From d670e7382af35d335efa006b64fc73fa9f37d34d Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 1 Apr 2026 15:11:23 +0200 Subject: [PATCH 267/374] [client] Fix ipv6 address in quic server (#5763) * [client] Use `net.JoinHostPort` for consistency in constructing host-port pairs * [client] Fix handling of IPv6 addresses by trimming brackets in `net.JoinHostPort` --- shared/relay/client/dialer/quic/quic.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared/relay/client/dialer/quic/quic.go b/shared/relay/client/dialer/quic/quic.go index 78462837d..2d7b00a80 100644 --- a/shared/relay/client/dialer/quic/quic.go +++ b/shared/relay/client/dialer/quic/quic.go @@ -89,12 +89,12 @@ func prepareURL(address string) (string, error) { finalHost, finalPort, err := net.SplitHostPort(host) if err != nil { if strings.Contains(err.Error(), "missing port") { - return host + ":" + defaultPort, nil + return net.JoinHostPort(strings.Trim(host, "[]"), defaultPort), nil } // return any other split error as is return "", err } - return finalHost + ":" + finalPort, nil + return net.JoinHostPort(finalHost, finalPort), nil } From 81f45dab21d4bfb549d8780b5bba6ab4d195c9c4 Mon Sep 17 00:00:00 2001 From: tham-le <45093611+tham-le@users.noreply.github.com> Date: Wed, 1 Apr 2026 16:19:34 +0200 Subject: [PATCH 268/374] [client] Support embed.Client on Android with netstack mode (#5623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [client] Support embed.Client on Android with netstack mode embed.Client.Start() calls ConnectClient.Run() which passes an empty MobileDependency{}. On Android, the engine dereferences nil fields (IFaceDiscover, NetworkChangeListener, DnsReadyListener) causing panics. Provide complete no-op stubs so the engine's existing Android code paths work unchanged — zero modifications to engine.go: - Add androidRunOverride hook in Run() for Android-specific dispatch - Add runOnAndroidEmbed() with complete MobileDependency (all stubs) - Wire default stubs via init() in connect_android_default.go: noopIFaceDiscover, noopNetworkChangeListener, noopDnsReadyListener - Forward logPath to c.run() Tested: embed.Client starts on Android arm64, joins mesh via relay, discovers peers, localhost proxy works for TCP+UDP forwarding. * [client] Fix TestServiceParamsPath for Windows path separators Use filepath.Join in test assertions instead of hardcoded POSIX paths so the test passes on Windows where filepath.Join uses backslashes. --- client/internal/connect.go | 7 +++ client/internal/connect_android_default.go | 73 ++++++++++++++++++++++ client/internal/connect_android_embed.go | 32 ++++++++++ 3 files changed, 112 insertions(+) create mode 100644 client/internal/connect_android_default.go create mode 100644 client/internal/connect_android_embed.go diff --git a/client/internal/connect.go b/client/internal/connect.go index 242b25b44..1e8f87c08 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -44,6 +44,10 @@ import ( "github.com/netbirdio/netbird/version" ) +// androidRunOverride is set on Android to inject mobile dependencies +// when using embed.Client (which calls Run() with empty MobileDependency). +var androidRunOverride func(c *ConnectClient, runningChan chan struct{}, logPath string) error + type ConnectClient struct { ctx context.Context config *profilemanager.Config @@ -76,6 +80,9 @@ func (c *ConnectClient) SetUpdateManager(um *updater.Manager) { // Run with main logic. func (c *ConnectClient) Run(runningChan chan struct{}, logPath string) error { + if androidRunOverride != nil { + return androidRunOverride(c, runningChan, logPath) + } return c.run(MobileDependency{}, runningChan, logPath) } diff --git a/client/internal/connect_android_default.go b/client/internal/connect_android_default.go new file mode 100644 index 000000000..190341c4a --- /dev/null +++ b/client/internal/connect_android_default.go @@ -0,0 +1,73 @@ +//go:build android + +package internal + +import ( + "net/netip" + + "github.com/netbirdio/netbird/client/internal/dns" + "github.com/netbirdio/netbird/client/internal/listener" + "github.com/netbirdio/netbird/client/internal/stdnet" +) + +// noopIFaceDiscover is a stub ExternalIFaceDiscover for embed.Client on Android. +// It returns an empty interface list, which means ICE P2P candidates won't be +// discovered — connections will fall back to relay. Applications that need P2P +// should provide a real implementation via runOnAndroidEmbed that uses +// Android's ConnectivityManager to enumerate network interfaces. +type noopIFaceDiscover struct{} + +func (noopIFaceDiscover) IFaces() (string, error) { + // Return empty JSON array — no local interfaces advertised for ICE. + // This is intentional: without Android's ConnectivityManager, we cannot + // reliably enumerate interfaces (netlink is restricted on Android 11+). + // Relay connections still work; only P2P hole-punching is disabled. + return "[]", nil +} + +// noopNetworkChangeListener is a stub for embed.Client on Android. +// Network change events are ignored since the embed client manages its own +// reconnection logic via the engine's built-in retry mechanism. +type noopNetworkChangeListener struct{} + +func (noopNetworkChangeListener) OnNetworkChanged(string) { + // No-op: embed.Client relies on the engine's internal reconnection + // logic rather than OS-level network change notifications. +} + +func (noopNetworkChangeListener) SetInterfaceIP(string) { + // No-op: in netstack mode, the overlay IP is managed by the userspace + // network stack, not by OS-level interface configuration. +} + +// noopDnsReadyListener is a stub for embed.Client on Android. +// DNS readiness notifications are not needed in netstack/embed mode +// since system DNS is disabled and DNS resolution happens externally. +type noopDnsReadyListener struct{} + +func (noopDnsReadyListener) OnReady() { + // No-op: embed.Client does not need DNS readiness notifications. + // System DNS is disabled in netstack mode. +} + +var _ stdnet.ExternalIFaceDiscover = noopIFaceDiscover{} +var _ listener.NetworkChangeListener = noopNetworkChangeListener{} +var _ dns.ReadyListener = noopDnsReadyListener{} + +func init() { + // Wire up the default override so embed.Client.Start() works on Android + // with netstack mode. Provides complete no-op stubs for all mobile + // dependencies so the engine's existing Android code paths work unchanged. + // Applications that need P2P ICE or real DNS should replace this by + // setting androidRunOverride before calling Start(). + androidRunOverride = func(c *ConnectClient, runningChan chan struct{}, logPath string) error { + return c.runOnAndroidEmbed( + noopIFaceDiscover{}, + noopNetworkChangeListener{}, + []netip.AddrPort{}, + noopDnsReadyListener{}, + runningChan, + logPath, + ) + } +} diff --git a/client/internal/connect_android_embed.go b/client/internal/connect_android_embed.go new file mode 100644 index 000000000..18f72e841 --- /dev/null +++ b/client/internal/connect_android_embed.go @@ -0,0 +1,32 @@ +//go:build android + +package internal + +import ( + "net/netip" + + "github.com/netbirdio/netbird/client/internal/dns" + "github.com/netbirdio/netbird/client/internal/listener" + "github.com/netbirdio/netbird/client/internal/stdnet" +) + +// runOnAndroidEmbed is like RunOnAndroid but accepts a runningChan +// so embed.Client.Start() can detect when the engine is ready. +// It provides complete MobileDependency so the engine's existing +// Android code paths work unchanged. +func (c *ConnectClient) runOnAndroidEmbed( + iFaceDiscover stdnet.ExternalIFaceDiscover, + networkChangeListener listener.NetworkChangeListener, + dnsAddresses []netip.AddrPort, + dnsReadyListener dns.ReadyListener, + runningChan chan struct{}, + logPath string, +) error { + mobileDependency := MobileDependency{ + IFaceDiscover: iFaceDiscover, + NetworkChangeListener: networkChangeListener, + HostDNSAddresses: dnsAddresses, + DnsReadyListener: dnsReadyListener, + } + return c.run(mobileDependency, runningChan, logPath) +} From d97fe84296ef64a1a0e9f9d0677de87d38d4af90 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Wed, 1 Apr 2026 16:25:35 +0200 Subject: [PATCH 269/374] [management] fix race condition in the setup flow that enables creation of multiple owner users (#5754) --- management/server/instance/manager.go | 71 +++- management/server/instance/manager_test.go | 358 ++++++++++++--------- 2 files changed, 257 insertions(+), 172 deletions(-) diff --git a/management/server/instance/manager.go b/management/server/instance/manager.go index 19e3abdc0..9579d7a35 100644 --- a/management/server/instance/manager.go +++ b/management/server/instance/manager.go @@ -64,10 +64,19 @@ type Manager interface { GetVersionInfo(ctx context.Context) (*VersionInfo, error) } +type instanceStore interface { + GetAccountsCounter(ctx context.Context) (int64, error) +} + +type embeddedIdP interface { + CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) + GetAllAccounts(ctx context.Context) (map[string][]*idp.UserData, error) +} + // DefaultManager is the default implementation of Manager. type DefaultManager struct { - store store.Store - embeddedIdpManager *idp.EmbeddedIdPManager + store instanceStore + embeddedIdpManager embeddedIdP setupRequired bool setupMu sync.RWMutex @@ -82,18 +91,18 @@ type DefaultManager struct { // NewManager creates a new instance manager. // If idpManager is not an EmbeddedIdPManager, setup-related operations will return appropriate defaults. func NewManager(ctx context.Context, store store.Store, idpManager idp.Manager) (Manager, error) { - embeddedIdp, _ := idpManager.(*idp.EmbeddedIdPManager) + embeddedIdp, ok := idpManager.(*idp.EmbeddedIdPManager) m := &DefaultManager{ - store: store, - embeddedIdpManager: embeddedIdp, - setupRequired: false, + store: store, + setupRequired: false, httpClient: &http.Client{ Timeout: httpTimeout, }, } - if embeddedIdp != nil { + if ok && embeddedIdp != nil { + m.embeddedIdpManager = embeddedIdp err := m.loadSetupRequired(ctx) if err != nil { return nil, err @@ -143,36 +152,61 @@ func (m *DefaultManager) IsSetupRequired(_ context.Context) (bool, error) { // CreateOwnerUser creates the initial owner user in the embedded IDP. func (m *DefaultManager) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { - if err := m.validateSetupInfo(email, password, name); err != nil { - return nil, err - } - if m.embeddedIdpManager == nil { return nil, errors.New("embedded IDP is not enabled") } - m.setupMu.RLock() - setupRequired := m.setupRequired - m.setupMu.RUnlock() + if err := m.validateSetupInfo(email, password, name); err != nil { + return nil, err + } - if !setupRequired { + m.setupMu.Lock() + defer m.setupMu.Unlock() + + if !m.setupRequired { return nil, status.Errorf(status.PreconditionFailed, "setup already completed") } + if err := m.checkSetupRequiredFromDB(ctx); err != nil { + var sErr *status.Error + if errors.As(err, &sErr) && sErr.Type() == status.PreconditionFailed { + m.setupRequired = false + } + return nil, err + } + userData, err := m.embeddedIdpManager.CreateUserWithPassword(ctx, email, password, name) if err != nil { return nil, fmt.Errorf("failed to create user in embedded IdP: %w", err) } - m.setupMu.Lock() m.setupRequired = false - m.setupMu.Unlock() log.WithContext(ctx).Infof("created owner user %s in embedded IdP", email) return userData, nil } +func (m *DefaultManager) checkSetupRequiredFromDB(ctx context.Context) error { + numAccounts, err := m.store.GetAccountsCounter(ctx) + if err != nil { + return fmt.Errorf("failed to check accounts: %w", err) + } + if numAccounts > 0 { + return status.Errorf(status.PreconditionFailed, "setup already completed") + } + + users, err := m.embeddedIdpManager.GetAllAccounts(ctx) + if err != nil { + return fmt.Errorf("failed to check IdP users: %w", err) + } + if len(users) > 0 { + return status.Errorf(status.PreconditionFailed, "setup already completed") + } + + return nil +} + func (m *DefaultManager) validateSetupInfo(email, password, name string) error { if email == "" { return status.Errorf(status.InvalidArgument, "email is required") @@ -189,6 +223,9 @@ func (m *DefaultManager) validateSetupInfo(email, password, name string) error { if len(password) < 8 { return status.Errorf(status.InvalidArgument, "password must be at least 8 characters") } + if len(password) > 72 { + return status.Errorf(status.InvalidArgument, "password must be at most 72 characters") + } return nil } diff --git a/management/server/instance/manager_test.go b/management/server/instance/manager_test.go index 35d0ff53c..e3be9cfea 100644 --- a/management/server/instance/manager_test.go +++ b/management/server/instance/manager_test.go @@ -3,7 +3,12 @@ package instance import ( "context" "errors" + "fmt" + "net/http" + "sync" + "sync/atomic" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -11,173 +16,215 @@ import ( "github.com/netbirdio/netbird/management/server/idp" ) -// mockStore implements a minimal store.Store for testing +type mockIdP struct { + mu sync.Mutex + createUserFunc func(ctx context.Context, email, password, name string) (*idp.UserData, error) + users map[string][]*idp.UserData + getAllAccountsErr error +} + +func (m *mockIdP) CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) { + if m.createUserFunc != nil { + return m.createUserFunc(ctx, email, password, name) + } + return &idp.UserData{ID: "test-user-id", Email: email, Name: name}, nil +} + +func (m *mockIdP) GetAllAccounts(_ context.Context) (map[string][]*idp.UserData, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.getAllAccountsErr != nil { + return nil, m.getAllAccountsErr + } + return m.users, nil +} + type mockStore struct { accountsCount int64 err error } -func (m *mockStore) GetAccountsCounter(ctx context.Context) (int64, error) { +func (m *mockStore) GetAccountsCounter(_ context.Context) (int64, error) { if m.err != nil { return 0, m.err } return m.accountsCount, nil } -// mockEmbeddedIdPManager wraps the real EmbeddedIdPManager for testing -type mockEmbeddedIdPManager struct { - createUserFunc func(ctx context.Context, email, password, name string) (*idp.UserData, error) -} - -func (m *mockEmbeddedIdPManager) CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) { - if m.createUserFunc != nil { - return m.createUserFunc(ctx, email, password, name) +func newTestManager(idpMock *mockIdP, storeMock *mockStore) *DefaultManager { + return &DefaultManager{ + store: storeMock, + embeddedIdpManager: idpMock, + setupRequired: true, + httpClient: &http.Client{Timeout: httpTimeout}, } - return &idp.UserData{ - ID: "test-user-id", - Email: email, - Name: name, - }, nil -} - -// testManager is a test implementation that accepts our mock types -type testManager struct { - store *mockStore - embeddedIdpManager *mockEmbeddedIdPManager -} - -func (m *testManager) IsSetupRequired(ctx context.Context) (bool, error) { - if m.embeddedIdpManager == nil { - return false, nil - } - - count, err := m.store.GetAccountsCounter(ctx) - if err != nil { - return false, err - } - - return count == 0, nil -} - -func (m *testManager) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { - if m.embeddedIdpManager == nil { - return nil, errors.New("embedded IDP is not enabled") - } - - return m.embeddedIdpManager.CreateUserWithPassword(ctx, email, password, name) -} - -func TestIsSetupRequired_EmbeddedIdPDisabled(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: nil, // No embedded IDP - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.False(t, required, "setup should not be required when embedded IDP is disabled") -} - -func TestIsSetupRequired_NoAccounts(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.True(t, required, "setup should be required when no accounts exist") -} - -func TestIsSetupRequired_AccountsExist(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 1}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.False(t, required, "setup should not be required when accounts exist") -} - -func TestIsSetupRequired_MultipleAccounts(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 5}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.False(t, required, "setup should not be required when multiple accounts exist") -} - -func TestIsSetupRequired_StoreError(t *testing.T) { - manager := &testManager{ - store: &mockStore{err: errors.New("database error")}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - _, err := manager.IsSetupRequired(context.Background()) - assert.Error(t, err, "should return error when store fails") } func TestCreateOwnerUser_Success(t *testing.T) { - expectedEmail := "admin@example.com" - expectedName := "Admin User" - expectedPassword := "securepassword123" + idpMock := &mockIdP{} + mgr := newTestManager(idpMock, &mockStore{}) - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: &mockEmbeddedIdPManager{ - createUserFunc: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { - assert.Equal(t, expectedEmail, email) - assert.Equal(t, expectedPassword, password) - assert.Equal(t, expectedName, name) - return &idp.UserData{ - ID: "created-user-id", - Email: email, - Name: name, - }, nil - }, - }, - } - - userData, err := manager.CreateOwnerUser(context.Background(), expectedEmail, expectedPassword, expectedName) + userData, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") require.NoError(t, err) - assert.Equal(t, "created-user-id", userData.ID) - assert.Equal(t, expectedEmail, userData.Email) - assert.Equal(t, expectedName, userData.Name) + assert.Equal(t, "admin@example.com", userData.Email) + + _, err = mgr.CreateOwnerUser(context.Background(), "admin2@example.com", "password123", "Admin2") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") +} + +func TestCreateOwnerUser_SetupAlreadyCompleted(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{}) + mgr.setupRequired = false + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") } func TestCreateOwnerUser_EmbeddedIdPDisabled(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: nil, - } + mgr := &DefaultManager{setupRequired: true} - _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") - assert.Error(t, err, "should return error when embedded IDP is disabled") + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) assert.Contains(t, err.Error(), "embedded IDP is not enabled") } func TestCreateOwnerUser_IdPError(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: &mockEmbeddedIdPManager{ - createUserFunc: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { - return nil, errors.New("user already exists") - }, + idpMock := &mockIdP{ + createUserFunc: func(_ context.Context, _, _, _ string) (*idp.UserData, error) { + return nil, errors.New("provider error") }, } + mgr := newTestManager(idpMock, &mockStore{}) - _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") - assert.Error(t, err, "should return error when IDP fails") + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "provider error") + + required, _ := mgr.IsSetupRequired(context.Background()) + assert.True(t, required, "setup should still be required after IdP error") +} + +func TestCreateOwnerUser_TransientDBError_DoesNotBlockSetup(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{err: errors.New("connection refused")}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "connection refused") + + required, _ := mgr.IsSetupRequired(context.Background()) + assert.True(t, required, "setup should still be required after transient DB error") + + mgr.store = &mockStore{} + userData, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.NoError(t, err) + assert.Equal(t, "admin@example.com", userData.Email) +} + +func TestCreateOwnerUser_TransientIdPError_DoesNotBlockSetup(t *testing.T) { + idpMock := &mockIdP{getAllAccountsErr: errors.New("connection reset")} + mgr := newTestManager(idpMock, &mockStore{}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "connection reset") + + required, _ := mgr.IsSetupRequired(context.Background()) + assert.True(t, required, "setup should still be required after transient IdP error") + + idpMock.getAllAccountsErr = nil + userData, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.NoError(t, err) + assert.Equal(t, "admin@example.com", userData.Email) +} + +func TestCreateOwnerUser_DBCheckBlocksConcurrent(t *testing.T) { + idpMock := &mockIdP{ + users: map[string][]*idp.UserData{ + "acc1": {{ID: "existing-user"}}, + }, + } + mgr := newTestManager(idpMock, &mockStore{}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") +} + +func TestCreateOwnerUser_DBCheckBlocksWhenAccountsExist(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{accountsCount: 1}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") +} + +func TestCreateOwnerUser_ConcurrentRequests(t *testing.T) { + var idpCallCount atomic.Int32 + var successCount atomic.Int32 + var failCount atomic.Int32 + + idpMock := &mockIdP{ + createUserFunc: func(_ context.Context, email, _, _ string) (*idp.UserData, error) { + idpCallCount.Add(1) + time.Sleep(50 * time.Millisecond) + return &idp.UserData{ID: "user-1", Email: email, Name: "Owner"}, nil + }, + } + mgr := newTestManager(idpMock, &mockStore{}) + + var wg sync.WaitGroup + for i := range 10 { + wg.Add(1) + go func(idx int) { + defer wg.Done() + _, err := mgr.CreateOwnerUser( + context.Background(), + fmt.Sprintf("owner%d@example.com", idx), + "password1234", + fmt.Sprintf("Owner%d", idx), + ) + if err != nil { + failCount.Add(1) + } else { + successCount.Add(1) + } + }(i) + } + wg.Wait() + + assert.Equal(t, int32(1), successCount.Load(), "exactly one concurrent setup request should succeed") + assert.Equal(t, int32(9), failCount.Load(), "remaining concurrent requests should fail") + assert.Equal(t, int32(1), idpCallCount.Load(), "IdP CreateUser should be called exactly once") +} + +func TestIsSetupRequired_EmbeddedIdPDisabled(t *testing.T) { + mgr := &DefaultManager{} + + required, err := mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required) +} + +func TestIsSetupRequired_ReturnsFlag(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{}) + + required, err := mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.True(t, required) + + mgr.setupMu.Lock() + mgr.setupRequired = false + mgr.setupMu.Unlock() + + required, err = mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required) } func TestDefaultManager_ValidateSetupRequest(t *testing.T) { - manager := &DefaultManager{ - setupRequired: true, - } + manager := &DefaultManager{setupRequired: true} tests := []struct { name string @@ -188,11 +235,10 @@ func TestDefaultManager_ValidateSetupRequest(t *testing.T) { errorMsg string }{ { - name: "valid request", - email: "admin@example.com", - password: "password123", - userName: "Admin User", - expectError: false, + name: "valid request", + email: "admin@example.com", + password: "password123", + userName: "Admin User", }, { name: "empty email", @@ -235,11 +281,24 @@ func TestDefaultManager_ValidateSetupRequest(t *testing.T) { errorMsg: "password must be at least 8 characters", }, { - name: "password exactly 8 characters", + name: "password exactly 8 characters", + email: "admin@example.com", + password: "12345678", + userName: "Admin User", + }, + { + name: "password exactly 72 characters", + email: "admin@example.com", + password: "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhhiiiiiiii", + userName: "Admin User", + }, + { + name: "password too long", email: "admin@example.com", - password: "12345678", + password: "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhhiiiiiiiij", userName: "Admin User", - expectError: false, + expectError: true, + errorMsg: "password must be at most 72 characters", }, } @@ -255,14 +314,3 @@ func TestDefaultManager_ValidateSetupRequest(t *testing.T) { }) } } - -func TestDefaultManager_CreateOwnerUser_SetupAlreadyCompleted(t *testing.T) { - manager := &DefaultManager{ - setupRequired: false, - embeddedIdpManager: &idp.EmbeddedIdPManager{}, - } - - _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") - require.Error(t, err) - assert.Contains(t, err.Error(), "setup already completed") -} From aaf813fc0cb80a8ab84d5398bc10b248d2a3481b Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Wed, 1 Apr 2026 19:23:39 +0200 Subject: [PATCH 270/374] Add selfhosted scaling note (#5769) --- infrastructure_files/getting-started.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 70088d66a..9236d851d 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -1154,7 +1154,16 @@ print_builtin_traefik_instructions() { echo " - $NETBIRD_STUN_PORT/udp (STUN - required for NAT traversal)" if [[ "$ENABLE_PROXY" == "true" ]]; then echo " - 51820/udp (WIREGUARD - (optional) for P2P proxy connections)" - echo "" + fi + echo "" + echo "This setup is ideal for homelabs and smaller organization deployments." + echo "For enterprise environments requiring high availability and advanced integrations," + echo "consider a commercial on-prem license or scaling your open source deployment:" + echo "" + echo " Commercial license: https://netbird.io/pricing#on-prem" + echo " Scaling guide: https://docs.netbird.io/scaling-your-self-hosted-deployment" + echo "" + if [[ "$ENABLE_PROXY" == "true" ]]; then echo "NetBird Proxy:" echo " The proxy service is enabled and running." echo " Any domain NOT matching $NETBIRD_DOMAIN will be passed through to the proxy." From c2c6396a0428f560b59099254425a152eef2ebce Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Thu, 2 Apr 2026 13:02:10 +0300 Subject: [PATCH 271/374] [management] Allow updating embedded IdP user name and email (#5721) --- management/server/user.go | 8 ++++- shared/management/http/api/openapi.yml | 3 ++ shared/management/http/api/types.gen.go | 39 +++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/management/server/user.go b/management/server/user.go index 8742daed6..c1f984f2f 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -780,9 +780,15 @@ func (am *DefaultAccountManager) processUserUpdate(ctx context.Context, transact updatedUser.Role = update.Role updatedUser.Blocked = update.Blocked updatedUser.AutoGroups = update.AutoGroups - // these two fields can't be set via API, only via direct call to the method + // these fields can't be set via API, only via direct call to the method updatedUser.Issued = update.Issued updatedUser.IntegrationReference = update.IntegrationReference + if update.Name != "" { + updatedUser.Name = update.Name + } + if update.Email != "" { + updatedUser.Email = update.Email + } var transferredOwnerRole bool result, err := handleOwnerRoleTransfer(ctx, transaction, initiatorUser, update) diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index d35b32be0..766fdf0de 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -4414,6 +4414,9 @@ components: items: type: string example: [ "Users" ] + connector_id: + type: string + description: DEX connector ID for embedded IDP setups IntegrationEnabled: type: object properties: diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index c47b77455..14bb6ee03 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1492,6 +1492,9 @@ type AzureIntegration struct { // ClientId Azure AD application (client) ID ClientId string `json:"client_id"` + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // Enabled Whether the integration is enabled Enabled bool `json:"enabled"` @@ -1632,6 +1635,9 @@ type CreateAzureIntegrationRequest struct { // ClientSecret Base64-encoded Azure AD client secret ClientSecret string `json:"client_secret"` + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` @@ -1653,6 +1659,9 @@ type CreateAzureIntegrationRequestHost string // CreateGoogleIntegrationRequest defines model for CreateGoogleIntegrationRequest. type CreateGoogleIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // CustomerId Customer ID from Google Workspace Account Settings CustomerId string `json:"customer_id"` @@ -1689,6 +1698,9 @@ type CreateOktaScimIntegrationRequest struct { // ConnectionName The Okta enterprise connection name on Auth0 ConnectionName string `json:"connection_name"` + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` @@ -1698,6 +1710,9 @@ type CreateOktaScimIntegrationRequest struct { // CreateScimIntegrationRequest defines model for CreateScimIntegrationRequest. type CreateScimIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` @@ -2154,6 +2169,9 @@ type GetTenantsResponse = []TenantResponse // GoogleIntegration defines model for GoogleIntegration. type GoogleIntegration struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // CustomerId Customer ID from Google Workspace CustomerId string `json:"customer_id"` @@ -2502,6 +2520,9 @@ type IntegrationResponsePlatform string // IntegrationSyncFilters defines model for IntegrationSyncFilters. type IntegrationSyncFilters struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` @@ -2994,6 +3015,9 @@ type OktaScimIntegration struct { // AuthToken SCIM API token (full on creation/regeneration, masked on retrieval) AuthToken string `json:"auth_token"` + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // Enabled Whether the integration is enabled Enabled bool `json:"enabled"` @@ -3864,6 +3888,9 @@ type ScimIntegration struct { // AuthToken SCIM API token (full on creation, masked otherwise) AuthToken string `json:"auth_token"` + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // Enabled Whether the integration is enabled Enabled bool `json:"enabled"` @@ -4341,6 +4368,9 @@ type UpdateAzureIntegrationRequest struct { // ClientSecret Base64-encoded Azure AD client secret ClientSecret *string `json:"client_secret,omitempty"` + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // Enabled Whether the integration is enabled Enabled *bool `json:"enabled,omitempty"` @@ -4359,6 +4389,9 @@ type UpdateAzureIntegrationRequest struct { // UpdateGoogleIntegrationRequest defines model for UpdateGoogleIntegrationRequest. type UpdateGoogleIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // CustomerId Customer ID from Google Workspace Account Settings CustomerId *string `json:"customer_id,omitempty"` @@ -4380,6 +4413,9 @@ type UpdateGoogleIntegrationRequest struct { // UpdateOktaScimIntegrationRequest defines model for UpdateOktaScimIntegrationRequest. type UpdateOktaScimIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // Enabled Whether the integration is enabled Enabled *bool `json:"enabled,omitempty"` @@ -4392,6 +4428,9 @@ type UpdateOktaScimIntegrationRequest struct { // UpdateScimIntegrationRequest defines model for UpdateScimIntegrationRequest. type UpdateScimIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // Enabled Whether the integration is enabled Enabled *bool `json:"enabled,omitempty"` From 5bf2372c4d03e1c4952ef21fbc02f6dec26d2984 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 2 Apr 2026 20:46:14 +0800 Subject: [PATCH 272/374] [management] Fix L4 service creation deadlock on single-connection databases (#5779) --- .../reverseproxy/service/manager/manager.go | 66 ++++++++++++++++--- .../modules/reverseproxy/service/service.go | 5 ++ 2 files changed, 62 insertions(+), 9 deletions(-) diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index db393ef38..989187826 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -288,6 +288,8 @@ func (m *Manager) validateSubdomainRequirement(ctx context.Context, domain, clus } func (m *Manager) persistNewService(ctx context.Context, accountID string, svc *service.Service) error { + customPorts := m.clusterCustomPorts(ctx, svc) + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { if svc.Domain != "" { if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, ""); err != nil { @@ -295,7 +297,7 @@ func (m *Manager) persistNewService(ctx context.Context, accountID string, svc * } } - if err := m.ensureL4Port(ctx, transaction, svc); err != nil { + if err := m.ensureL4Port(ctx, transaction, svc, customPorts); err != nil { return err } @@ -315,12 +317,23 @@ func (m *Manager) persistNewService(ctx context.Context, accountID string, svc * }) } -// ensureL4Port auto-assigns a listen port when needed and validates cluster support. -func (m *Manager) ensureL4Port(ctx context.Context, tx store.Store, svc *service.Service) error { +// clusterCustomPorts queries whether the cluster supports custom ports. +// Must be called before entering a transaction: the underlying query uses +// the main DB handle, which deadlocks when called inside a transaction +// that already holds the connection. +func (m *Manager) clusterCustomPorts(ctx context.Context, svc *service.Service) *bool { + if !service.IsL4Protocol(svc.Mode) { + return nil + } + return m.capabilities.ClusterSupportsCustomPorts(ctx, svc.ProxyCluster) +} + +// ensureL4Port auto-assigns a listen port when needed and validates cluster support. +// customPorts must be pre-computed via clusterCustomPorts before entering a transaction. +func (m *Manager) ensureL4Port(ctx context.Context, tx store.Store, svc *service.Service, customPorts *bool) error { if !service.IsL4Protocol(svc.Mode) { return nil } - customPorts := m.capabilities.ClusterSupportsCustomPorts(ctx, svc.ProxyCluster) if service.IsPortBasedProtocol(svc.Mode) && svc.ListenPort > 0 && (customPorts == nil || !*customPorts) { if svc.Source != service.SourceEphemeral { return status.Errorf(status.InvalidArgument, "custom ports not supported on cluster %s", svc.ProxyCluster) @@ -404,12 +417,14 @@ func (m *Manager) assignPort(ctx context.Context, tx store.Store, cluster string // The count and exists queries use FOR UPDATE locking to serialize concurrent creates // for the same peer, preventing the per-peer limit from being bypassed. func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, peerID string, svc *service.Service) error { + customPorts := m.clusterCustomPorts(ctx, svc) + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { if err := m.validateEphemeralPreconditions(ctx, transaction, accountID, peerID, svc); err != nil { return err } - if err := m.ensureL4Port(ctx, transaction, svc); err != nil { + if err := m.ensureL4Port(ctx, transaction, svc, customPorts); err != nil { return err } @@ -512,16 +527,49 @@ type serviceUpdateInfo struct { } func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, service *service.Service) (*serviceUpdateInfo, error) { + effectiveCluster, err := m.resolveEffectiveCluster(ctx, accountID, service) + if err != nil { + return nil, err + } + + svcForCaps := *service + svcForCaps.ProxyCluster = effectiveCluster + customPorts := m.clusterCustomPorts(ctx, &svcForCaps) + var updateInfo serviceUpdateInfo - err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - return m.executeServiceUpdate(ctx, transaction, accountID, service, &updateInfo) + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + return m.executeServiceUpdate(ctx, transaction, accountID, service, &updateInfo, customPorts) }) return &updateInfo, err } -func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.Store, accountID string, service *service.Service, updateInfo *serviceUpdateInfo) error { +// resolveEffectiveCluster determines the cluster that will be used after the update. +// It reads the existing service without locking and derives the new cluster if the domain changed. +func (m *Manager) resolveEffectiveCluster(ctx context.Context, accountID string, svc *service.Service) (string, error) { + existing, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, svc.ID) + if err != nil { + return "", err + } + + if existing.Domain == svc.Domain { + return existing.ProxyCluster, nil + } + + if m.clusterDeriver != nil { + derived, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, svc.Domain) + if err != nil { + log.WithError(err).Warnf("could not derive cluster from domain %s", svc.Domain) + } else { + return derived, nil + } + } + + return existing.ProxyCluster, nil +} + +func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.Store, accountID string, service *service.Service, updateInfo *serviceUpdateInfo, customPorts *bool) error { existingService, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, service.ID) if err != nil { return err @@ -558,7 +606,7 @@ func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.St m.preserveListenPort(service, existingService) updateInfo.serviceEnabledChanged = existingService.Enabled != service.Enabled - if err := m.ensureL4Port(ctx, transaction, service); err != nil { + if err := m.ensureL4Port(ctx, transaction, service, customPorts); err != nil { return err } if err := m.checkPortConflict(ctx, transaction, service); err != nil { diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index d956013ea..60b36917c 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -787,6 +787,11 @@ func (s *Service) validateHTTPTargets() error { } func (s *Service) validateL4Target(target *Target) error { + // L4 services have a single target; per-target disable is meaningless + // (use the service-level Enabled flag instead). Force it on so that + // buildPathMappings always includes the target in the proto. + target.Enabled = true + if target.Port == 0 { return errors.New("target port is required for L4 services") } From 9d1a37c64455798aa4b15b7404700d78993588c4 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Thu, 2 Apr 2026 19:21:00 +0300 Subject: [PATCH 273/374] [management,client] Revert gRPC client secret removal (#5781) * This reverts commit e5914e4e8b0ae030701529eb4f492d49eb10534b Signed-off-by: bcmmbaga * Deprecate client secret in proto Signed-off-by: bcmmbaga * Fix lint Signed-off-by: bcmmbaga --------- Signed-off-by: bcmmbaga --- client/internal/auth/auth.go | 2 ++ client/internal/auth/device_flow.go | 2 ++ client/internal/auth/pkce_flow.go | 5 ++++- management/internals/shared/grpc/server.go | 2 ++ shared/management/client/client_test.go | 4 +++- 5 files changed, 13 insertions(+), 2 deletions(-) diff --git a/client/internal/auth/auth.go b/client/internal/auth/auth.go index 7879848e3..bc768748e 100644 --- a/client/internal/auth/auth.go +++ b/client/internal/auth/auth.go @@ -221,6 +221,7 @@ func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, erro config := &PKCEAuthProviderConfig{ Audience: protoConfig.GetAudience(), ClientID: protoConfig.GetClientID(), + ClientSecret: protoConfig.GetClientSecret(), //nolint:staticcheck TokenEndpoint: protoConfig.GetTokenEndpoint(), AuthorizationEndpoint: protoConfig.GetAuthorizationEndpoint(), Scope: protoConfig.GetScope(), @@ -265,6 +266,7 @@ func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, config := &DeviceAuthProviderConfig{ Audience: protoConfig.GetAudience(), ClientID: protoConfig.GetClientID(), + ClientSecret: protoConfig.GetClientSecret(), //nolint:staticcheck Domain: protoConfig.Domain, TokenEndpoint: protoConfig.GetTokenEndpoint(), DeviceAuthEndpoint: protoConfig.GetDeviceAuthEndpoint(), diff --git a/client/internal/auth/device_flow.go b/client/internal/auth/device_flow.go index f1dcfbdc9..e33765300 100644 --- a/client/internal/auth/device_flow.go +++ b/client/internal/auth/device_flow.go @@ -29,6 +29,8 @@ var _ OAuthFlow = &DeviceAuthorizationFlow{} type DeviceAuthProviderConfig struct { // ClientID An IDP application client id ClientID string + // ClientSecret An IDP application client secret + ClientSecret string // Domain An IDP API domain // Deprecated. Use OIDCConfigEndpoint instead Domain string diff --git a/client/internal/auth/pkce_flow.go b/client/internal/auth/pkce_flow.go index f8d733769..2e16836d8 100644 --- a/client/internal/auth/pkce_flow.go +++ b/client/internal/auth/pkce_flow.go @@ -38,6 +38,8 @@ const ( type PKCEAuthProviderConfig struct { // ClientID An IDP application client id ClientID string + // ClientSecret An IDP application client secret + ClientSecret string // Audience An Audience for to authorization validation Audience string // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token @@ -109,7 +111,8 @@ func NewPKCEAuthorizationFlow(config PKCEAuthProviderConfig) (*PKCEAuthorization } cfg := &oauth2.Config{ - ClientID: config.ClientID, + ClientID: config.ClientID, + ClientSecret: config.ClientSecret, Endpoint: oauth2.Endpoint{ AuthURL: config.AuthorizationEndpoint, TokenURL: config.TokenEndpoint, diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 4f0fcc545..6e8358f02 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -966,6 +966,7 @@ func (s *Server) GetDeviceAuthorizationFlow(ctx context.Context, req *proto.Encr Provider: proto.DeviceAuthorizationFlowProvider(provider), ProviderConfig: &proto.ProviderConfig{ ClientID: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientID, + ClientSecret: s.config.DeviceAuthorizationFlow.ProviderConfig.ClientSecret, Domain: s.config.DeviceAuthorizationFlow.ProviderConfig.Domain, Audience: s.config.DeviceAuthorizationFlow.ProviderConfig.Audience, DeviceAuthEndpoint: s.config.DeviceAuthorizationFlow.ProviderConfig.DeviceAuthEndpoint, @@ -1036,6 +1037,7 @@ func (s *Server) GetPKCEAuthorizationFlow(ctx context.Context, req *proto.Encryp ProviderConfig: &proto.ProviderConfig{ Audience: s.config.PKCEAuthorizationFlow.ProviderConfig.Audience, ClientID: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientID, + ClientSecret: s.config.PKCEAuthorizationFlow.ProviderConfig.ClientSecret, TokenEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.TokenEndpoint, AuthorizationEndpoint: s.config.PKCEAuthorizationFlow.ProviderConfig.AuthorizationEndpoint, Scope: s.config.PKCEAuthorizationFlow.ProviderConfig.Scope, diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index bfb00c0b7..01957154c 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -545,7 +545,8 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { expectedFlowInfo := &mgmtProto.PKCEAuthorizationFlow{ ProviderConfig: &mgmtProto.ProviderConfig{ - ClientID: "client", + ClientID: "client", + ClientSecret: "secret", }, } @@ -568,4 +569,5 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { } assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientID, flowInfo.ProviderConfig.ClientID, "provider configured client ID should match") + assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientSecret, flowInfo.ProviderConfig.ClientSecret, "provider configured client secret should match") //nolint:staticcheck } From 28fbf96b2ad00f7bf523c5cda2ca0b9480858e00 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 3 Apr 2026 03:45:49 +0800 Subject: [PATCH 274/374] [client] Fix flaky TestServiceLifecycle/Restart on FreeBSD (#5786) --- client/cmd/service_test.go | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/client/cmd/service_test.go b/client/cmd/service_test.go index 6d75ca524..ce6f71550 100644 --- a/client/cmd/service_test.go +++ b/client/cmd/service_test.go @@ -4,7 +4,9 @@ import ( "context" "fmt" "os" + "os/signal" "runtime" + "syscall" "testing" "time" @@ -13,6 +15,22 @@ import ( "github.com/stretchr/testify/require" ) +// TestMain intercepts when this test binary is run as a daemon subprocess. +// On FreeBSD, the rc.d service script runs the binary via daemon(8) -r with +// "service run ..." arguments. Since the test binary can't handle cobra CLI +// args, it exits immediately, causing daemon -r to respawn rapidly until +// hitting the rate limit and exiting. This makes service restart unreliable. +// Blocking here keeps the subprocess alive until the init system sends SIGTERM. +func TestMain(m *testing.M) { + if len(os.Args) > 2 && os.Args[1] == "service" && os.Args[2] == "run" { + sig := make(chan os.Signal, 1) + signal.Notify(sig, syscall.SIGTERM, os.Interrupt) + <-sig + return + } + os.Exit(m.Run()) +} + const ( serviceStartTimeout = 10 * time.Second serviceStopTimeout = 5 * time.Second @@ -79,6 +97,34 @@ func TestServiceLifecycle(t *testing.T) { logLevel = "info" daemonAddr = fmt.Sprintf("unix://%s/netbird-test.sock", tempDir) + // Ensure cleanup even if a subtest fails and Stop/Uninstall subtests don't run. + t.Cleanup(func() { + cfg, err := newSVCConfig() + if err != nil { + t.Errorf("cleanup: create service config: %v", err) + return + } + ctxSvc, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := newSVC(newProgram(ctxSvc, cancel), cfg) + if err != nil { + t.Errorf("cleanup: create service: %v", err) + return + } + + // If the subtests already cleaned up, there's nothing to do. + if _, err := s.Status(); err != nil { + return + } + + if err := s.Stop(); err != nil { + t.Errorf("cleanup: stop service: %v", err) + } + if err := s.Uninstall(); err != nil { + t.Errorf("cleanup: uninstall service: %v", err) + } + }) + ctx := context.Background() t.Run("Install", func(t *testing.T) { From decb5dd3af84d70844208d25ffea1f3409ebe5bf Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Sun, 5 Apr 2026 13:44:53 +0200 Subject: [PATCH 275/374] [client] Add GetSelectedClientRoutes to route manager and update DNS route check (#5802) - DNS resolution broke after deselecting an exit node because the route checker used all client routes (including deselected ones) to decide how to forward upstream DNS queries - Added GetSelectedClientRoutes() to the route manager that filters out deselected exit nodes, and switched the DNS route checker to use it - Confirmed fix via device testing: after deselecting exit node, DNS queries now correctly use a regular network socket instead of binding to the utun interface --- client/internal/engine.go | 2 +- client/internal/routemanager/manager.go | 11 +++++++++++ client/internal/routemanager/mock.go | 11 ++++++++++- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/client/internal/engine.go b/client/internal/engine.go index 7b100bd0c..0f09ee364 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -500,7 +500,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.routeManager.SetRouteChangeListener(e.mobileDep.NetworkChangeListener) e.dnsServer.SetRouteChecker(func(ip netip.Addr) bool { - for _, routes := range e.routeManager.GetClientRoutes() { + for _, routes := range e.routeManager.GetSelectedClientRoutes() { for _, r := range routes { if r.Network.Contains(ip) { return true diff --git a/client/internal/routemanager/manager.go b/client/internal/routemanager/manager.go index 9afe2049d..e7ca44239 100644 --- a/client/internal/routemanager/manager.go +++ b/client/internal/routemanager/manager.go @@ -52,6 +52,7 @@ type Manager interface { TriggerSelection(route.HAMap) GetRouteSelector() *routeselector.RouteSelector GetClientRoutes() route.HAMap + GetSelectedClientRoutes() route.HAMap GetClientRoutesWithNetID() map[route.NetID][]*route.Route SetRouteChangeListener(listener listener.NetworkChangeListener) InitialRouteRange() []string @@ -465,6 +466,16 @@ func (m *DefaultManager) GetClientRoutes() route.HAMap { return maps.Clone(m.clientRoutes) } +// GetSelectedClientRoutes returns only the currently selected/active client routes, +// filtering out deselected exit nodes. Use this instead of GetClientRoutes when checking +// if traffic should be routed through the tunnel. +func (m *DefaultManager) GetSelectedClientRoutes() route.HAMap { + m.mux.Lock() + defer m.mux.Unlock() + + return m.routeSelector.FilterSelectedExitNodes(maps.Clone(m.clientRoutes)) +} + // GetClientRoutesWithNetID returns the current routes from the route map, but the keys consist of the network ID only func (m *DefaultManager) GetClientRoutesWithNetID() map[route.NetID][]*route.Route { m.mux.Lock() diff --git a/client/internal/routemanager/mock.go b/client/internal/routemanager/mock.go index 6b06144b2..66b5e30dd 100644 --- a/client/internal/routemanager/mock.go +++ b/client/internal/routemanager/mock.go @@ -18,6 +18,7 @@ type MockManager struct { TriggerSelectionFunc func(haMap route.HAMap) GetRouteSelectorFunc func() *routeselector.RouteSelector GetClientRoutesFunc func() route.HAMap + GetSelectedClientRoutesFunc func() route.HAMap GetClientRoutesWithNetIDFunc func() map[route.NetID][]*route.Route StopFunc func(manager *statemanager.Manager) } @@ -61,7 +62,7 @@ func (m *MockManager) GetRouteSelector() *routeselector.RouteSelector { return nil } -// GetClientRoutes mock implementation of GetClientRoutes from Manager interface +// GetClientRoutes mock implementation of GetClientRoutes from the Manager interface func (m *MockManager) GetClientRoutes() route.HAMap { if m.GetClientRoutesFunc != nil { return m.GetClientRoutesFunc() @@ -69,6 +70,14 @@ func (m *MockManager) GetClientRoutes() route.HAMap { return nil } +// GetSelectedClientRoutes mock implementation of GetSelectedClientRoutes from the Manager interface +func (m *MockManager) GetSelectedClientRoutes() route.HAMap { + if m.GetSelectedClientRoutesFunc != nil { + return m.GetSelectedClientRoutesFunc() + } + return nil +} + // GetClientRoutesWithNetID mock implementation of GetClientRoutesWithNetID from Manager interface func (m *MockManager) GetClientRoutesWithNetID() map[route.NetID][]*route.Route { if m.GetClientRoutesWithNetIDFunc != nil { From 435203b13ba4d6fc221015734f3908785fc4955a Mon Sep 17 00:00:00 2001 From: Eduard Gert Date: Tue, 7 Apr 2026 10:35:09 +0200 Subject: [PATCH 276/374] [proxy] Update proxy web packages (#5661) * [proxy] Update package-lock.json * Update packages --- proxy/web/package-lock.json | 260 ++++++++++++++++++------------------ proxy/web/package.json | 4 +- 2 files changed, 132 insertions(+), 132 deletions(-) diff --git a/proxy/web/package-lock.json b/proxy/web/package-lock.json index d16196d77..1611323a7 100644 --- a/proxy/web/package-lock.json +++ b/proxy/web/package-lock.json @@ -15,7 +15,7 @@ "tailwind-merge": "^2.6.0" }, "devDependencies": { - "@eslint/js": "^9.39.1", + "@eslint/js": "9.39.2", "@tailwindcss/vite": "^4.1.18", "@types/node": "^24.10.1", "@types/react": "^19.2.5", @@ -29,7 +29,7 @@ "tsx": "^4.21.0", "typescript": "~5.9.3", "typescript-eslint": "^8.46.4", - "vite": "^7.2.4" + "vite": "7.3.2" } }, "node_modules/@babel/code-frame": { @@ -1024,9 +1024,9 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", - "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.0.tgz", + "integrity": "sha512-WOhNW9K8bR3kf4zLxbfg6Pxu2ybOUbB2AjMDHSQx86LIF4rH4Ft7vmMwNt0loO0eonglSNy4cpD3MKXXKQu0/A==", "cpu": [ "arm" ], @@ -1038,9 +1038,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", - "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.0.tgz", + "integrity": "sha512-u6JHLll5QKRvjciE78bQXDmqRqNs5M/3GVqZeMwvmjaNODJih/WIrJlFVEihvV0MiYFmd+ZyPr9wxOVbPAG2Iw==", "cpu": [ "arm64" ], @@ -1052,9 +1052,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", - "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.0.tgz", + "integrity": "sha512-qEF7CsKKzSRc20Ciu2Zw1wRrBz4g56F7r/vRwY430UPp/nt1x21Q/fpJ9N5l47WWvJlkNCPJz3QRVw008fi7yA==", "cpu": [ "arm64" ], @@ -1066,9 +1066,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", - "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.0.tgz", + "integrity": "sha512-WADYozJ4QCnXCH4wPB+3FuGmDPoFseVCUrANmA5LWwGmC6FL14BWC7pcq+FstOZv3baGX65tZ378uT6WG8ynTw==", "cpu": [ "x64" ], @@ -1080,9 +1080,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", - "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.0.tgz", + "integrity": "sha512-6b8wGHJlDrGeSE3aH5mGNHBjA0TTkxdoNHik5EkvPHCt351XnigA4pS7Wsj/Eo9Y8RBU6f35cjN9SYmCFBtzxw==", "cpu": [ "arm64" ], @@ -1094,9 +1094,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", - "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.0.tgz", + "integrity": "sha512-h25Ga0t4jaylMB8M/JKAyrvvfxGRjnPQIR8lnCayyzEjEOx2EJIlIiMbhpWxDRKGKF8jbNH01NnN663dH638mA==", "cpu": [ "x64" ], @@ -1108,9 +1108,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", - "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.0.tgz", + "integrity": "sha512-RzeBwv0B3qtVBWtcuABtSuCzToo2IEAIQrcyB/b2zMvBWVbjo8bZDjACUpnaafaxhTw2W+imQbP2BD1usasK4g==", "cpu": [ "arm" ], @@ -1122,9 +1122,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", - "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.0.tgz", + "integrity": "sha512-Sf7zusNI2CIU1HLzuu9Tc5YGAHEZs5Lu7N1ssJG4Tkw6e0MEsN7NdjUDDfGNHy2IU+ENyWT+L2obgWiguWibWQ==", "cpu": [ "arm" ], @@ -1136,9 +1136,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", - "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.0.tgz", + "integrity": "sha512-DX2x7CMcrJzsE91q7/O02IJQ5/aLkVtYFryqCjduJhUfGKG6yJV8hxaw8pZa93lLEpPTP/ohdN4wFz7yp/ry9A==", "cpu": [ "arm64" ], @@ -1150,9 +1150,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", - "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.0.tgz", + "integrity": "sha512-09EL+yFVbJZlhcQfShpswwRZ0Rg+z/CsSELFCnPt3iK+iqwGsI4zht3secj5vLEs957QvFFXnzAT0FFPIxSrkQ==", "cpu": [ "arm64" ], @@ -1164,9 +1164,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", - "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.0.tgz", + "integrity": "sha512-i9IcCMPr3EXm8EQg5jnja0Zyc1iFxJjZWlb4wr7U2Wx/GrddOuEafxRdMPRYVaXjgbhvqalp6np07hN1w9kAKw==", "cpu": [ "loong64" ], @@ -1178,9 +1178,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", - "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.0.tgz", + "integrity": "sha512-DGzdJK9kyJ+B78MCkWeGnpXJ91tK/iKA6HwHxF4TAlPIY7GXEvMe8hBFRgdrR9Ly4qebR/7gfUs9y2IoaVEyog==", "cpu": [ "loong64" ], @@ -1192,9 +1192,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", - "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.0.tgz", + "integrity": "sha512-RwpnLsqC8qbS8z1H1AxBA1H6qknR4YpPR9w2XX0vo2Sz10miu57PkNcnHVaZkbqyw/kUWfKMI73jhmfi9BRMUQ==", "cpu": [ "ppc64" ], @@ -1206,9 +1206,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", - "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.0.tgz", + "integrity": "sha512-Z8pPf54Ly3aqtdWC3G4rFigZgNvd+qJlOE52fmko3KST9SoGfAdSRCwyoyG05q1HrrAblLbk1/PSIV+80/pxLg==", "cpu": [ "ppc64" ], @@ -1220,9 +1220,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", - "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.0.tgz", + "integrity": "sha512-3a3qQustp3COCGvnP4SvrMHnPQ9d1vzCakQVRTliaz8cIp/wULGjiGpbcqrkv0WrHTEp8bQD/B3HBjzujVWLOA==", "cpu": [ "riscv64" ], @@ -1234,9 +1234,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", - "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.0.tgz", + "integrity": "sha512-pjZDsVH/1VsghMJ2/kAaxt6dL0psT6ZexQVrijczOf+PeP2BUqTHYejk3l6TlPRydggINOeNRhvpLa0AYpCWSQ==", "cpu": [ "riscv64" ], @@ -1248,9 +1248,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", - "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.0.tgz", + "integrity": "sha512-3ObQs0BhvPgiUVZrN7gqCSvmFuMWvWvsjG5ayJ3Lraqv+2KhOsp+pUbigqbeWqueGIsnn+09HBw27rJ+gYK4VQ==", "cpu": [ "s390x" ], @@ -1262,9 +1262,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", - "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.0.tgz", + "integrity": "sha512-EtylprDtQPdS5rXvAayrNDYoJhIz1/vzN2fEubo3yLE7tfAw+948dO0g4M0vkTVFhKojnF+n6C8bDNe+gDRdTg==", "cpu": [ "x64" ], @@ -1276,9 +1276,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", - "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.0.tgz", + "integrity": "sha512-k09oiRCi/bHU9UVFqD17r3eJR9bn03TyKraCrlz5ULFJGdJGi7VOmm9jl44vOJvRJ6P7WuBi/s2A97LxxHGIdw==", "cpu": [ "x64" ], @@ -1290,9 +1290,9 @@ ] }, "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", - "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.0.tgz", + "integrity": "sha512-1o/0/pIhozoSaDJoDcec+IVLbnRtQmHwPV730+AOD29lHEEo4F5BEUB24H0OBdhbBBDwIOSuf7vgg0Ywxdfiiw==", "cpu": [ "x64" ], @@ -1304,9 +1304,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", - "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.0.tgz", + "integrity": "sha512-pESDkos/PDzYwtyzB5p/UoNU/8fJo68vcXM9ZW2V0kjYayj1KaaUfi1NmTUTUpMn4UhU4gTuK8gIaFO4UGuMbA==", "cpu": [ "arm64" ], @@ -1318,9 +1318,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", - "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.0.tgz", + "integrity": "sha512-hj1wFStD7B1YBeYmvY+lWXZ7ey73YGPcViMShYikqKT1GtstIKQAtfUI6yrzPjAy/O7pO0VLXGmUVWXQMaYgTQ==", "cpu": [ "arm64" ], @@ -1332,9 +1332,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", - "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.0.tgz", + "integrity": "sha512-SyaIPFoxmUPlNDq5EHkTbiKzmSEmq/gOYFI/3HHJ8iS/v1mbugVa7dXUzcJGQfoytp9DJFLhHH4U3/eTy2Bq4w==", "cpu": [ "ia32" ], @@ -1346,9 +1346,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", - "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.0.tgz", + "integrity": "sha512-RdcryEfzZr+lAr5kRm2ucN9aVlCCa2QNq4hXelZxb8GG0NJSazq44Z3PCCc8wISRuCVnGs0lQJVX5Vp6fKA+IA==", "cpu": [ "x64" ], @@ -1360,9 +1360,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", - "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.0.tgz", + "integrity": "sha512-PrsWNQ8BuE00O3Xsx3ALh2Df8fAj9+cvvX9AIA6o4KpATR98c9mud4XtDWVvsEuyia5U4tVSTKygawyJkjm60w==", "cpu": [ "x64" ], @@ -1926,9 +1926,9 @@ } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.3.tgz", + "integrity": "sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==", "dev": true, "license": "MIT", "dependencies": { @@ -1936,13 +1936,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", "dev": true, "license": "ISC", "dependencies": { - "brace-expansion": "^2.0.1" + "brace-expansion": "^2.0.2" }, "engines": { "node": ">=16 || 14 >=14.17" @@ -2052,9 +2052,9 @@ } }, "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", "dev": true, "license": "MIT", "dependencies": { @@ -2109,9 +2109,9 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", + "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", "dev": true, "license": "MIT", "dependencies": { @@ -2657,9 +2657,9 @@ } }, "node_modules/flatted": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", - "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz", + "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==", "dev": true, "license": "ISC" }, @@ -3243,9 +3243,9 @@ } }, "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", + "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", "dev": true, "license": "ISC", "dependencies": { @@ -3386,9 +3386,9 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, "license": "MIT", "peer": true, @@ -3501,9 +3501,9 @@ } }, "node_modules/rollup": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", - "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.0.tgz", + "integrity": "sha512-yqjxruMGBQJ2gG4HtjZtAfXArHomazDHoFwFFmZZl0r7Pdo7qCIXKqKHZc8yeoMgzJJ+pO6pEEHa+V7uzWlrAQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3517,31 +3517,31 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.57.1", - "@rollup/rollup-android-arm64": "4.57.1", - "@rollup/rollup-darwin-arm64": "4.57.1", - "@rollup/rollup-darwin-x64": "4.57.1", - "@rollup/rollup-freebsd-arm64": "4.57.1", - "@rollup/rollup-freebsd-x64": "4.57.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", - "@rollup/rollup-linux-arm-musleabihf": "4.57.1", - "@rollup/rollup-linux-arm64-gnu": "4.57.1", - "@rollup/rollup-linux-arm64-musl": "4.57.1", - "@rollup/rollup-linux-loong64-gnu": "4.57.1", - "@rollup/rollup-linux-loong64-musl": "4.57.1", - "@rollup/rollup-linux-ppc64-gnu": "4.57.1", - "@rollup/rollup-linux-ppc64-musl": "4.57.1", - "@rollup/rollup-linux-riscv64-gnu": "4.57.1", - "@rollup/rollup-linux-riscv64-musl": "4.57.1", - "@rollup/rollup-linux-s390x-gnu": "4.57.1", - "@rollup/rollup-linux-x64-gnu": "4.57.1", - "@rollup/rollup-linux-x64-musl": "4.57.1", - "@rollup/rollup-openbsd-x64": "4.57.1", - "@rollup/rollup-openharmony-arm64": "4.57.1", - "@rollup/rollup-win32-arm64-msvc": "4.57.1", - "@rollup/rollup-win32-ia32-msvc": "4.57.1", - "@rollup/rollup-win32-x64-gnu": "4.57.1", - "@rollup/rollup-win32-x64-msvc": "4.57.1", + "@rollup/rollup-android-arm-eabi": "4.60.0", + "@rollup/rollup-android-arm64": "4.60.0", + "@rollup/rollup-darwin-arm64": "4.60.0", + "@rollup/rollup-darwin-x64": "4.60.0", + "@rollup/rollup-freebsd-arm64": "4.60.0", + "@rollup/rollup-freebsd-x64": "4.60.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.0", + "@rollup/rollup-linux-arm-musleabihf": "4.60.0", + "@rollup/rollup-linux-arm64-gnu": "4.60.0", + "@rollup/rollup-linux-arm64-musl": "4.60.0", + "@rollup/rollup-linux-loong64-gnu": "4.60.0", + "@rollup/rollup-linux-loong64-musl": "4.60.0", + "@rollup/rollup-linux-ppc64-gnu": "4.60.0", + "@rollup/rollup-linux-ppc64-musl": "4.60.0", + "@rollup/rollup-linux-riscv64-gnu": "4.60.0", + "@rollup/rollup-linux-riscv64-musl": "4.60.0", + "@rollup/rollup-linux-s390x-gnu": "4.60.0", + "@rollup/rollup-linux-x64-gnu": "4.60.0", + "@rollup/rollup-linux-x64-musl": "4.60.0", + "@rollup/rollup-openbsd-x64": "4.60.0", + "@rollup/rollup-openharmony-arm64": "4.60.0", + "@rollup/rollup-win32-arm64-msvc": "4.60.0", + "@rollup/rollup-win32-ia32-msvc": "4.60.0", + "@rollup/rollup-win32-x64-gnu": "4.60.0", + "@rollup/rollup-win32-x64-msvc": "4.60.0", "fsevents": "~2.3.2" } }, @@ -3803,9 +3803,9 @@ } }, "node_modules/vite": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", - "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.2.tgz", + "integrity": "sha512-Bby3NOsna2jsjfLVOHKes8sGwgl4TT0E6vvpYgnAYDIF/tie7MRaFthmKuHx1NSXjiTueXH3do80FMQgvEktRg==", "dev": true, "license": "MIT", "peer": true, diff --git a/proxy/web/package.json b/proxy/web/package.json index 97ec1ec0d..9a7c84ed4 100644 --- a/proxy/web/package.json +++ b/proxy/web/package.json @@ -17,7 +17,7 @@ "tailwind-merge": "^2.6.0" }, "devDependencies": { - "@eslint/js": "^9.39.1", + "@eslint/js": "9.39.2", "@tailwindcss/vite": "^4.1.18", "@types/node": "^24.10.1", "@types/react": "^19.2.5", @@ -31,6 +31,6 @@ "tsx": "^4.21.0", "typescript": "~5.9.3", "typescript-eslint": "^8.46.4", - "vite": "^7.2.4" + "vite": "7.3.2" } } From 0efef671d7663e3bd5d007e779cc50644f8fae21 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 7 Apr 2026 12:18:21 +0200 Subject: [PATCH 277/374] [client] Unexport GetServerPublicKey, add HealthCheck method (#5735) * Unexport GetServerPublicKey, add HealthCheck method Internalize server key fetching into Login, Register, GetDeviceAuthorizationFlow, and GetPKCEAuthorizationFlow methods, removing the need for callers to fetch and pass the key separately. Replace the exported GetServerPublicKey with a HealthCheck() error method for connection validation, keeping IsHealthy() bool for non-blocking background monitoring. Fix test encryption to use correct key pairs (client public key as remotePubKey instead of server private key). * Refactor `doMgmLogin` to return only error, removing unused response --- client/internal/auth/auth.go | 42 +++---------- client/internal/connect.go | 13 +--- client/internal/engine_test.go | 13 ++-- client/internal/profilemanager/config.go | 3 +- shared/management/client/client.go | 16 ++--- shared/management/client/client_test.go | 46 ++++---------- shared/management/client/grpc.go | 76 ++++++++++++++++-------- shared/management/client/mock.go | 42 +++++++------ 8 files changed, 106 insertions(+), 145 deletions(-) diff --git a/client/internal/auth/auth.go b/client/internal/auth/auth.go index bc768748e..bdfd07430 100644 --- a/client/internal/auth/auth.go +++ b/client/internal/auth/auth.go @@ -155,7 +155,7 @@ func (a *Auth) IsLoginRequired(ctx context.Context) (bool, error) { var needsLogin bool err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { - _, _, err := a.doMgmLogin(client, ctx, pubSSHKey) + err := a.doMgmLogin(client, ctx, pubSSHKey) if isLoginNeeded(err) { needsLogin = true return nil @@ -179,8 +179,8 @@ func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (err var isAuthError bool err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { - serverKey, _, err := a.doMgmLogin(client, ctx, pubSSHKey) - if serverKey != nil && isRegistrationNeeded(err) { + err := a.doMgmLogin(client, ctx, pubSSHKey) + if isRegistrationNeeded(err) { log.Debugf("peer registration required") _, err = a.registerPeer(client, ctx, setupKey, jwtToken, pubSSHKey) if err != nil { @@ -201,13 +201,7 @@ func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (err // getPKCEFlow retrieves PKCE authorization flow configuration and creates a flow instance func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, error) { - serverKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, err - } - - protoFlow, err := client.GetPKCEAuthorizationFlow(*serverKey) + protoFlow, err := client.GetPKCEAuthorizationFlow() if err != nil { if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { log.Warnf("server couldn't find pkce flow, contact admin: %v", err) @@ -246,13 +240,7 @@ func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, erro // getDeviceFlow retrieves device authorization flow configuration and creates a flow instance func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, error) { - serverKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, err - } - - protoFlow, err := client.GetDeviceAuthorizationFlow(*serverKey) + protoFlow, err := client.GetDeviceAuthorizationFlow() if err != nil { if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { log.Warnf("server couldn't find device flow, contact admin: %v", err) @@ -292,28 +280,16 @@ func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, } // doMgmLogin performs the actual login operation with the management service -func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) (*wgtypes.Key, *mgmProto.LoginResponse, error) { - serverKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, nil, err - } - +func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) error { sysInfo := system.GetInfo(ctx) a.setSystemInfoFlags(sysInfo) - loginResp, err := client.Login(*serverKey, sysInfo, pubSSHKey, a.config.DNSLabels) - return serverKey, loginResp, err + _, err := client.Login(sysInfo, pubSSHKey, a.config.DNSLabels) + return err } // registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key. // Otherwise tries to register with the provided setupKey via command line. func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKey string, jwtToken string, pubSSHKey []byte) (*mgmProto.LoginResponse, error) { - serverPublicKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, err - } - validSetupKey, err := uuid.Parse(setupKey) if err != nil && jwtToken == "" { return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err) @@ -322,7 +298,7 @@ func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKe log.Debugf("sending peer registration request to Management Service") info := system.GetInfo(ctx) a.setSystemInfoFlags(info) - loginResp, err := client.Register(*serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels) + loginResp, err := client.Register(validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels) if err != nil { log.Errorf("failed registering peer %v", err) return nil, err diff --git a/client/internal/connect.go b/client/internal/connect.go index 1e8f87c08..ab12cfab3 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -617,12 +617,6 @@ func connectToSignal(ctx context.Context, wtConfig *mgmProto.NetbirdConfig, ourP // loginToManagement creates Management ServiceDependencies client, establishes a connection, logs-in and gets a global Netbird config (signal, turn, stun hosts, etc) func loginToManagement(ctx context.Context, client mgm.Client, pubSSHKey []byte, config *profilemanager.Config) (*mgmProto.LoginResponse, error) { - - serverPublicKey, err := client.GetServerPublicKey() - if err != nil { - return nil, gstatus.Errorf(codes.FailedPrecondition, "failed while getting Management Service public key: %s", err) - } - sysInfo := system.GetInfo(ctx) sysInfo.SetFlags( config.RosenpassEnabled, @@ -641,12 +635,7 @@ func loginToManagement(ctx context.Context, client mgm.Client, pubSSHKey []byte, config.EnableSSHRemotePortForwarding, config.DisableSSHAuth, ) - loginResp, err := client.Login(*serverPublicKey, sysInfo, pubSSHKey, config.DNSLabels) - if err != nil { - return nil, err - } - - return loginResp, nil + return client.Login(sysInfo, pubSSHKey, config.DNSLabels) } func statusRecorderToMgmConnStateNotifier(statusRecorder *peer.Status) mgm.ConnStateNotifier { diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index 77fe9049b..1f6fe384a 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -828,7 +828,7 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) { WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, EngineServices{ + }, EngineServices{ SignalClient: &signal.MockClient{}, MgmClient: &mgmt.MockClient{}, RelayManager: relayMgr, @@ -1035,7 +1035,7 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) { WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, EngineServices{ + }, EngineServices{ SignalClient: &signal.MockClient{}, MgmClient: &mgmt.MockClient{}, RelayManager: relayMgr, @@ -1538,13 +1538,8 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin return nil, err } - publicKey, err := mgmtClient.GetServerPublicKey() - if err != nil { - return nil, err - } - info := system.GetInfo(ctx) - resp, err := mgmtClient.Register(*publicKey, setupKey, "", info, nil, nil) + resp, err := mgmtClient.Register(setupKey, "", info, nil, nil) if err != nil { return nil, err } @@ -1566,7 +1561,7 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin } relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) -e, err := NewEngine(ctx, cancel, conf, EngineServices{ + e, err := NewEngine(ctx, cancel, conf, EngineServices{ SignalClient: signalClient, MgmClient: mgmtClient, RelayManager: relayMgr, diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index f128ee903..e78ee4439 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -777,8 +777,7 @@ func UpdateOldManagementURL(ctx context.Context, config *Config, configPath stri }() // gRPC check - _, err = client.GetServerPublicKey() - if err != nil { + if err = client.HealthCheck(); err != nil { log.Infof("couldn't switch to the new Management %s", newURL.String()) return nil, err } diff --git a/shared/management/client/client.go b/shared/management/client/client.go index a15301223..18efba87b 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -4,8 +4,6 @@ import ( "context" "io" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "github.com/netbirdio/netbird/client/system" "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/proto" @@ -16,14 +14,18 @@ type Client interface { io.Closer Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error - GetServerPublicKey() (*wgtypes.Key, error) - Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - Login(serverKey wgtypes.Key, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) - GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) + Register(setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + Login(sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) + GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, error) GetServerURL() string + // IsHealthy returns the current connection status without blocking. + // Used by the engine to monitor connectivity in the background. IsHealthy() bool + // HealthCheck actively probes the management server and returns an error if unreachable. + // Used to validate connectivity before committing configuration changes. + HealthCheck() error SyncMeta(sysInfo *system.Info) error Logout() error CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index 01957154c..f5edb6b95 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -189,7 +189,7 @@ func closeManagementSilently(s *grpc.Server, listener net.Listener) { } } -func TestClient_GetServerPublicKey(t *testing.T) { +func TestClient_HealthCheck(t *testing.T) { testKey, err := wgtypes.GenerateKey() if err != nil { t.Fatal(err) @@ -203,12 +203,8 @@ func TestClient_GetServerPublicKey(t *testing.T) { t.Fatal(err) } - key, err := client.GetServerPublicKey() - if err != nil { - t.Error("couldn't retrieve management public key") - } - if key == nil { - t.Error("got an empty management public key") + if err := client.HealthCheck(); err != nil { + t.Errorf("health check failed: %v", err) } } @@ -225,12 +221,8 @@ func TestClient_LoginUnregistered_ShouldThrow_401(t *testing.T) { if err != nil { t.Fatal(err) } - key, err := client.GetServerPublicKey() - if err != nil { - t.Fatal(err) - } sysInfo := system.GetInfo(context.TODO()) - _, err = client.Login(*key, sysInfo, nil, nil) + _, err = client.Login(sysInfo, nil, nil) if err == nil { t.Error("expecting err on unregistered login, got nil") } @@ -253,12 +245,8 @@ func TestClient_LoginRegistered(t *testing.T) { t.Fatal(err) } - key, err := client.GetServerPublicKey() - if err != nil { - t.Error(err) - } info := system.GetInfo(context.TODO()) - resp, err := client.Register(*key, ValidKey, "", info, nil, nil) + resp, err := client.Register(ValidKey, "", info, nil, nil) if err != nil { t.Error(err) } @@ -282,13 +270,8 @@ func TestClient_Sync(t *testing.T) { t.Fatal(err) } - serverKey, err := client.GetServerPublicKey() - if err != nil { - t.Error(err) - } - info := system.GetInfo(context.TODO()) - _, err = client.Register(*serverKey, ValidKey, "", info, nil, nil) + _, err = client.Register(ValidKey, "", info, nil, nil) if err != nil { t.Error(err) } @@ -304,7 +287,7 @@ func TestClient_Sync(t *testing.T) { } info = system.GetInfo(context.TODO()) - _, err = remoteClient.Register(*serverKey, ValidKey, "", info, nil, nil) + _, err = remoteClient.Register(ValidKey, "", info, nil, nil) if err != nil { t.Fatal(err) } @@ -364,11 +347,6 @@ func Test_SystemMetaDataFromClient(t *testing.T) { t.Fatalf("error while creating testClient: %v", err) } - key, err := testClient.GetServerPublicKey() - if err != nil { - t.Fatalf("error while getting server public key from testclient, %v", err) - } - var actualMeta *mgmtProto.PeerSystemMeta var actualValidKey string var wg sync.WaitGroup @@ -405,7 +383,7 @@ func Test_SystemMetaDataFromClient(t *testing.T) { } info := system.GetInfo(context.TODO()) - _, err = testClient.Register(*key, ValidKey, "", info, nil, nil) + _, err = testClient.Register(ValidKey, "", info, nil, nil) if err != nil { t.Errorf("error while trying to register client: %v", err) } @@ -505,7 +483,7 @@ func Test_GetDeviceAuthorizationFlow(t *testing.T) { } mgmtMockServer.GetDeviceAuthorizationFlowFunc = func(ctx context.Context, req *mgmtProto.EncryptedMessage) (*mgmtProto.EncryptedMessage, error) { - encryptedResp, err := encryption.EncryptMessage(serverKey, client.key, expectedFlowInfo) + encryptedResp, err := encryption.EncryptMessage(client.key.PublicKey(), serverKey, expectedFlowInfo) if err != nil { return nil, err } @@ -517,7 +495,7 @@ func Test_GetDeviceAuthorizationFlow(t *testing.T) { }, nil } - flowInfo, err := client.GetDeviceAuthorizationFlow(serverKey) + flowInfo, err := client.GetDeviceAuthorizationFlow() if err != nil { t.Error("error while retrieving device auth flow information") } @@ -551,7 +529,7 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { } mgmtMockServer.GetPKCEAuthorizationFlowFunc = func(ctx context.Context, req *mgmtProto.EncryptedMessage) (*mgmtProto.EncryptedMessage, error) { - encryptedResp, err := encryption.EncryptMessage(serverKey, client.key, expectedFlowInfo) + encryptedResp, err := encryption.EncryptMessage(client.key.PublicKey(), serverKey, expectedFlowInfo) if err != nil { return nil, err } @@ -563,7 +541,7 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { }, nil } - flowInfo, err := client.GetPKCEAuthorizationFlow(serverKey) + flowInfo, err := client.GetPKCEAuthorizationFlow() if err != nil { t.Error("error while retrieving pkce auth flow information") } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 252199498..a01e51abc 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -202,7 +202,7 @@ func (c *GrpcClient) withMgmtStream( return fmt.Errorf("connection to management is not ready and in %s state", connState) } - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { log.Debugf(errMsgMgmtPublicKey, err) return err @@ -404,7 +404,7 @@ func (c *GrpcClient) handleSyncStream(ctx context.Context, serverPubKey wgtypes. // GetNetworkMap return with the network map func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, error) { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { log.Debugf("failed getting Management Service public key: %s", err) return nil, err @@ -490,18 +490,24 @@ func (c *GrpcClient) receiveUpdatesEvents(stream proto.ManagementService_SyncCli } } -// GetServerPublicKey returns server's WireGuard public key (used later for encrypting messages sent to the server) -func (c *GrpcClient) GetServerPublicKey() (*wgtypes.Key, error) { +// HealthCheck actively probes the management server and returns an error if unreachable. +// Used to validate connectivity before committing configuration changes. +func (c *GrpcClient) HealthCheck() error { if !c.ready() { - return nil, errors.New(errMsgNoMgmtConnection) + return errors.New(errMsgNoMgmtConnection) } + _, err := c.getServerPublicKey() + return err +} + +// getServerPublicKey fetches the server's WireGuard public key. +func (c *GrpcClient) getServerPublicKey() (*wgtypes.Key, error) { mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second) defer cancel() resp, err := c.realClient.GetServerKey(mgmCtx, &proto.Empty{}) if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, fmt.Errorf("failed while getting Management Service public key") + return nil, fmt.Errorf("failed getting Management Service public key: %w", err) } serverKey, err := wgtypes.ParseKey(resp.Key) @@ -512,7 +518,8 @@ func (c *GrpcClient) GetServerPublicKey() (*wgtypes.Key, error) { return &serverKey, nil } -// IsHealthy probes the gRPC connection and returns false on errors +// IsHealthy returns the current connection status without blocking. +// Used by the engine to monitor connectivity in the background. func (c *GrpcClient) IsHealthy() bool { switch c.conn.GetState() { case connectivity.TransientFailure: @@ -538,12 +545,17 @@ func (c *GrpcClient) IsHealthy() bool { return true } -func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*proto.LoginResponse, error) { +func (c *GrpcClient) login(req *proto.LoginRequest) (*proto.LoginResponse, error) { if !c.ready() { return nil, errors.New(errMsgNoMgmtConnection) } - loginReq, err := encryption.EncryptMessage(serverKey, c.key, req) + serverKey, err := c.getServerPublicKey() + if err != nil { + return nil, err + } + + loginReq, err := encryption.EncryptMessage(*serverKey, c.key, req) if err != nil { log.Errorf("failed to encrypt message: %s", err) return nil, err @@ -577,7 +589,7 @@ func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*pro } loginResp := &proto.LoginResponse{} - err = encryption.DecryptMessage(serverKey, c.key, resp.Body, loginResp) + err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, loginResp) if err != nil { log.Errorf("failed to decrypt login response: %s", err) return nil, err @@ -589,34 +601,40 @@ func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*pro // Register registers peer on Management Server. It actually calls a Login endpoint with a provided setup key // Takes care of encrypting and decrypting messages. // This method will also collect system info and send it with the request (e.g. hostname, os, etc) -func (c *GrpcClient) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (c *GrpcClient) Register(setupKey string, jwtToken string, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { keys := &proto.PeerKeys{ SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(serverKey, &proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // Login attempts login to Management Server. Takes care of encrypting and decrypting messages. -func (c *GrpcClient) Login(serverKey wgtypes.Key, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (c *GrpcClient) Login(sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { keys := &proto.PeerKeys{ SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(serverKey, &proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // GetDeviceAuthorizationFlow returns a device authorization flow information. // It also takes care of encrypting and decrypting messages. -func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) { +func (c *GrpcClient) GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) { if !c.ready() { return nil, fmt.Errorf("no connection to management in order to get device authorization flow") } + + serverKey, err := c.getServerPublicKey() + if err != nil { + return nil, err + } + mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2) defer cancel() message := &proto.DeviceAuthorizationFlowRequest{} - encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message) + encryptedMSG, err := encryption.EncryptMessage(*serverKey, c.key, message) if err != nil { return nil, err } @@ -630,7 +648,7 @@ func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.D } flowInfoResp := &proto.DeviceAuthorizationFlow{} - err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp) + err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, flowInfoResp) if err != nil { errWithMSG := fmt.Errorf("failed to decrypt device authorization flow message: %s", err) log.Error(errWithMSG) @@ -642,15 +660,21 @@ func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.D // GetPKCEAuthorizationFlow returns a pkce authorization flow information. // It also takes care of encrypting and decrypting messages. -func (c *GrpcClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) { +func (c *GrpcClient) GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) { if !c.ready() { return nil, fmt.Errorf("no connection to management in order to get pkce authorization flow") } + + serverKey, err := c.getServerPublicKey() + if err != nil { + return nil, err + } + mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2) defer cancel() message := &proto.PKCEAuthorizationFlowRequest{} - encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message) + encryptedMSG, err := encryption.EncryptMessage(*serverKey, c.key, message) if err != nil { return nil, err } @@ -664,7 +688,7 @@ func (c *GrpcClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKC } flowInfoResp := &proto.PKCEAuthorizationFlow{} - err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp) + err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, flowInfoResp) if err != nil { errWithMSG := fmt.Errorf("failed to decrypt pkce authorization flow message: %s", err) log.Error(errWithMSG) @@ -681,7 +705,7 @@ func (c *GrpcClient) SyncMeta(sysInfo *system.Info) error { return errors.New(errMsgNoMgmtConnection) } - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { log.Debugf(errMsgMgmtPublicKey, err) return err @@ -724,7 +748,7 @@ func (c *GrpcClient) notifyConnected() { } func (c *GrpcClient) Logout() error { - serverKey, err := c.GetServerPublicKey() + serverKey, err := c.getServerPublicKey() if err != nil { return fmt.Errorf("get server public key: %w", err) } @@ -751,7 +775,7 @@ func (c *GrpcClient) Logout() error { // CreateExpose calls the management server to create a new expose service. func (c *GrpcClient) CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { return nil, err } @@ -787,7 +811,7 @@ func (c *GrpcClient) CreateExpose(ctx context.Context, req ExposeRequest) (*Expo // RenewExpose extends the TTL of an active expose session on the management server. func (c *GrpcClient) RenewExpose(ctx context.Context, domain string) error { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { return err } @@ -810,7 +834,7 @@ func (c *GrpcClient) RenewExpose(ctx context.Context, domain string) error { // StopExpose terminates an active expose session on the management server. func (c *GrpcClient) StopExpose(ctx context.Context, domain string) error { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { return err } diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index 548e379e8..361e8ffad 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -3,8 +3,6 @@ package client import ( "context" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "github.com/netbirdio/netbird/client/system" "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/proto" @@ -14,12 +12,12 @@ import ( type MockClient struct { CloseFunc func() error SyncFunc func(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error - GetServerPublicKeyFunc func() (*wgtypes.Key, error) - RegisterFunc func(serverKey wgtypes.Key, setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - LoginFunc func(serverKey wgtypes.Key, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - GetDeviceAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) - GetPKCEAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) + RegisterFunc func(setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + LoginFunc func(info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + GetDeviceAuthorizationFlowFunc func() (*proto.DeviceAuthorizationFlow, error) + GetPKCEAuthorizationFlowFunc func() (*proto.PKCEAuthorizationFlow, error) GetServerURLFunc func() string + HealthCheckFunc func() error SyncMetaFunc func(sysInfo *system.Info) error LogoutFunc func() error JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error @@ -53,39 +51,39 @@ func (m *MockClient) Job(ctx context.Context, msgHandler func(msg *proto.JobRequ return m.JobFunc(ctx, msgHandler) } -func (m *MockClient) GetServerPublicKey() (*wgtypes.Key, error) { - if m.GetServerPublicKeyFunc == nil { - return nil, nil - } - return m.GetServerPublicKeyFunc() -} - -func (m *MockClient) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (m *MockClient) Register(setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { if m.RegisterFunc == nil { return nil, nil } - return m.RegisterFunc(serverKey, setupKey, jwtToken, info, sshKey, dnsLabels) + return m.RegisterFunc(setupKey, jwtToken, info, sshKey, dnsLabels) } -func (m *MockClient) Login(serverKey wgtypes.Key, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (m *MockClient) Login(info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { if m.LoginFunc == nil { return nil, nil } - return m.LoginFunc(serverKey, info, sshKey, dnsLabels) + return m.LoginFunc(info, sshKey, dnsLabels) } -func (m *MockClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) { +func (m *MockClient) GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) { if m.GetDeviceAuthorizationFlowFunc == nil { return nil, nil } - return m.GetDeviceAuthorizationFlowFunc(serverKey) + return m.GetDeviceAuthorizationFlowFunc() } -func (m *MockClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) { +func (m *MockClient) GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) { if m.GetPKCEAuthorizationFlowFunc == nil { return nil, nil } - return m.GetPKCEAuthorizationFlowFunc(serverKey) + return m.GetPKCEAuthorizationFlowFunc() +} + +func (m *MockClient) HealthCheck() error { + if m.HealthCheckFunc == nil { + return nil + } + return m.HealthCheckFunc() } // GetNetworkMap mock implementation of GetNetworkMap from Client interface. From 6da34e483c3de6fbef10c692ce84a62d47792384 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 7 Apr 2026 13:13:38 +0200 Subject: [PATCH 278/374] [client] Fix mgmProber interface to match unexported GetServerPublicKey (#5815) Update the mgmProber interface to use HealthCheck() instead of the now-unexported GetServerPublicKey(), aligning with the changes in the management client API. --- client/internal/profilemanager/config.go | 2 +- client/internal/profilemanager/config_test.go | 14 ++++---------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index e78ee4439..20c615d57 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -41,7 +41,7 @@ const ( // mgmProber is the subset of management client needed for URL migration probes. type mgmProber interface { - GetServerPublicKey() (*wgtypes.Key, error) + HealthCheck() error Close() error } diff --git a/client/internal/profilemanager/config_test.go b/client/internal/profilemanager/config_test.go index c3efb48e6..5216f2423 100644 --- a/client/internal/profilemanager/config_test.go +++ b/client/internal/profilemanager/config_test.go @@ -17,12 +17,10 @@ import ( "github.com/netbirdio/netbird/util" ) -type mockMgmProber struct { - key wgtypes.Key -} +type mockMgmProber struct{} -func (m *mockMgmProber) GetServerPublicKey() (*wgtypes.Key, error) { - return &m.key, nil +func (m *mockMgmProber) HealthCheck() error { + return nil } func (m *mockMgmProber) Close() error { return nil } @@ -247,11 +245,7 @@ func TestWireguardPortDefaultVsExplicit(t *testing.T) { func TestUpdateOldManagementURL(t *testing.T) { origProber := newMgmProber newMgmProber = func(_ context.Context, _ string, _ wgtypes.Key, _ bool) (mgmProber, error) { - key, err := wgtypes.GenerateKey() - if err != nil { - return nil, err - } - return &mockMgmProber{key: key.PublicKey()}, nil + return &mockMgmProber{}, nil } t.Cleanup(func() { newMgmProber = origProber }) From 14b3b77bda7251240689791eda71fa4e9a68dd7c Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 7 Apr 2026 14:13:09 +0200 Subject: [PATCH 279/374] [management] validate permissions on groups read with name (#5749) --- .../service/manager/l4_port_test.go | 4 +- .../reverseproxy/service/manager/manager.go | 2 +- .../service/manager/manager_test.go | 4 +- management/server/account/manager.go | 2 +- management/server/account/manager_mock.go | 8 +- management/server/group.go | 5 +- .../http/handlers/groups/groups_handler.go | 4 +- .../handlers/groups/groups_handler_test.go | 2 +- management/server/mock_server/account_mock.go | 6 +- management/server/store/store.go | 2 +- management/server/store/store_mock.go | 94 +++++++++---------- 11 files changed, 68 insertions(+), 65 deletions(-) diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go index 4a7647d90..47dce3a64 100644 --- a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go +++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go @@ -85,8 +85,8 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, - GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) { - return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID) + GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { + return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) }, } diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index 989187826..ed9d4201b 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -1119,7 +1119,7 @@ func (m *Manager) getGroupIDsFromNames(ctx context.Context, accountID string, gr } groupIDs := make([]string, 0, len(groupNames)) for _, groupName := range groupNames { - g, err := m.accountManager.GetGroupByName(ctx, groupName, accountID) + g, err := m.accountManager.GetGroupByName(ctx, groupName, accountID, activity.SystemInitiator) if err != nil { return nil, fmt.Errorf("failed to get group by name %s: %w", groupName, err) } diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index f6e532118..69d48f10a 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -698,8 +698,8 @@ func setupIntegrationTest(t *testing.T) (*Manager, store.Store) { accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, - GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) { - return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID) + GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { + return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) }, } diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 45af63ae8..b4516d512 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -75,7 +75,7 @@ type Manager interface { GetUsersFromAccount(ctx context.Context, accountID, userID string) (map[string]*types.UserInfo, error) GetGroup(ctx context.Context, accountId, groupID, userID string) (*types.Group, error) GetAllGroups(ctx context.Context, accountID, userID string) ([]*types.Group, error) - GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) + GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) CreateGroup(ctx context.Context, accountID, userID string, group *types.Group) error UpdateGroup(ctx context.Context, accountID, userID string, group *types.Group) error CreateGroups(ctx context.Context, accountID, userID string, newGroups []*types.Group) error diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go index 90700c795..36e5fe39f 100644 --- a/management/server/account/manager_mock.go +++ b/management/server/account/manager_mock.go @@ -736,18 +736,18 @@ func (mr *MockManagerMockRecorder) GetGroup(ctx, accountId, groupID, userID inte } // GetGroupByName mocks base method. -func (m *MockManager) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) { +func (m *MockManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetGroupByName", ctx, groupName, accountID) + ret := m.ctrl.Call(m, "GetGroupByName", ctx, groupName, accountID, userID) ret0, _ := ret[0].(*types.Group) ret1, _ := ret[1].(error) return ret0, ret1 } // GetGroupByName indicates an expected call of GetGroupByName. -func (mr *MockManagerMockRecorder) GetGroupByName(ctx, groupName, accountID interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetGroupByName(ctx, groupName, accountID, userID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockManager)(nil).GetGroupByName), ctx, groupName, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockManager)(nil).GetGroupByName), ctx, groupName, accountID, userID) } // GetIdentityProvider mocks base method. diff --git a/management/server/group.go b/management/server/group.go index 326b167cf..7b5b9b86c 100644 --- a/management/server/group.go +++ b/management/server/group.go @@ -61,7 +61,10 @@ func (am *DefaultAccountManager) GetAllGroups(ctx context.Context, accountID, us } // GetGroupByName filters all groups in an account by name and returns the one with the most peers -func (am *DefaultAccountManager) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) { +func (am *DefaultAccountManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { + if err := am.CheckGroupPermissions(ctx, accountID, userID); err != nil { + return nil, err + } return am.Store.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) } diff --git a/management/server/http/handlers/groups/groups_handler.go b/management/server/http/handlers/groups/groups_handler.go index 56ccc9d0b..f8d161a87 100644 --- a/management/server/http/handlers/groups/groups_handler.go +++ b/management/server/http/handlers/groups/groups_handler.go @@ -52,7 +52,7 @@ func (h *handler) getAllGroups(w http.ResponseWriter, r *http.Request) { groupName := r.URL.Query().Get("name") if groupName != "" { // Get single group by name - group, err := h.accountManager.GetGroupByName(r.Context(), groupName, accountID) + group, err := h.accountManager.GetGroupByName(r.Context(), groupName, accountID, userID) if err != nil { util.WriteError(r.Context(), err, w) return @@ -118,7 +118,7 @@ func (h *handler) updateGroup(w http.ResponseWriter, r *http.Request) { return } - allGroup, err := h.accountManager.GetGroupByName(r.Context(), "All", accountID) + allGroup, err := h.accountManager.GetGroupByName(r.Context(), "All", accountID, userID) if err != nil { util.WriteError(r.Context(), err, w) return diff --git a/management/server/http/handlers/groups/groups_handler_test.go b/management/server/http/handlers/groups/groups_handler_test.go index 458a15c11..c7b4cbcdd 100644 --- a/management/server/http/handlers/groups/groups_handler_test.go +++ b/management/server/http/handlers/groups/groups_handler_test.go @@ -71,7 +71,7 @@ func initGroupTestData(initGroups ...*types.Group) *handler { return groups, nil }, - GetGroupByNameFunc: func(ctx context.Context, groupName, _ string) (*types.Group, error) { + GetGroupByNameFunc: func(ctx context.Context, groupName, _, _ string) (*types.Group, error) { if groupName == "All" { return &types.Group{ID: "id-all", Name: "All", Issued: types.GroupIssuedAPI}, nil } diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index afd2021ac..ff369355e 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -46,7 +46,7 @@ type MockAccountManager struct { AddPeerFunc func(ctx context.Context, accountID string, setupKey string, userId string, peer *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) GetGroupFunc func(ctx context.Context, accountID, groupID, userID string) (*types.Group, error) GetAllGroupsFunc func(ctx context.Context, accountID, userID string) ([]*types.Group, error) - GetGroupByNameFunc func(ctx context.Context, accountID, groupName string) (*types.Group, error) + GetGroupByNameFunc func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) SaveGroupFunc func(ctx context.Context, accountID, userID string, group *types.Group, create bool) error SaveGroupsFunc func(ctx context.Context, accountID, userID string, groups []*types.Group, create bool) error DeleteGroupFunc func(ctx context.Context, accountID, userId, groupID string) error @@ -406,9 +406,9 @@ func (am *MockAccountManager) AddPeer( } // GetGroupByName mock implementation of GetGroupByName from server.AccountManager interface -func (am *MockAccountManager) GetGroupByName(ctx context.Context, accountID, groupName string) (*types.Group, error) { +func (am *MockAccountManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { if am.GetGroupByNameFunc != nil { - return am.GetGroupByNameFunc(ctx, accountID, groupName) + return am.GetGroupByNameFunc(ctx, groupName, accountID, userID) } return nil, status.Errorf(codes.Unimplemented, "method GetGroupByName is not implemented") } diff --git a/management/server/store/store.go b/management/server/store/store.go index e24a1efef..f0c34ffa9 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -121,7 +121,7 @@ type Store interface { GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.Group, error) GetResourceGroups(ctx context.Context, lockStrength LockingStrength, accountID, resourceID string) ([]*types.Group, error) GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types.Group, error) - GetGroupByName(ctx context.Context, lockStrength LockingStrength, groupName, accountID string) (*types.Group, error) + GetGroupByName(ctx context.Context, lockStrength LockingStrength, accountID, groupName string) (*types.Group, error) GetGroupsByIDs(ctx context.Context, lockStrength LockingStrength, accountID string, groupIDs []string) (map[string]*types.Group, error) CreateGroups(ctx context.Context, accountID string, groups []*types.Group) error UpdateGroups(ctx context.Context, accountID string, groups []*types.Group) error diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index a8648aed7..5e609c4ec 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -165,34 +165,6 @@ func (mr *MockStoreMockRecorder) CleanupStaleProxies(ctx, inactivityDuration int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStaleProxies", reflect.TypeOf((*MockStore)(nil).CleanupStaleProxies), ctx, inactivityDuration) } -// GetClusterSupportsCustomPorts mocks base method. -func (m *MockStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClusterSupportsCustomPorts", ctx, clusterAddr) - ret0, _ := ret[0].(*bool) - return ret0 -} - -// GetClusterSupportsCustomPorts indicates an expected call of GetClusterSupportsCustomPorts. -func (mr *MockStoreMockRecorder) GetClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCustomPorts", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCustomPorts), ctx, clusterAddr) -} - -// GetClusterRequireSubdomain mocks base method. -func (m *MockStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetClusterRequireSubdomain", ctx, clusterAddr) - ret0, _ := ret[0].(*bool) - return ret0 -} - -// GetClusterRequireSubdomain indicates an expected call of GetClusterRequireSubdomain. -func (mr *MockStoreMockRecorder) GetClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterRequireSubdomain", reflect.TypeOf((*MockStore)(nil).GetClusterRequireSubdomain), ctx, clusterAddr) -} - // Close mocks base method. func (m *MockStore) Close(ctx context.Context) error { m.ctrl.T.Helper() @@ -1389,6 +1361,34 @@ func (mr *MockStoreMockRecorder) GetAnyAccountID(ctx interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnyAccountID", reflect.TypeOf((*MockStore)(nil).GetAnyAccountID), ctx) } +// GetClusterRequireSubdomain mocks base method. +func (m *MockStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterRequireSubdomain", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// GetClusterRequireSubdomain indicates an expected call of GetClusterRequireSubdomain. +func (mr *MockStoreMockRecorder) GetClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterRequireSubdomain", reflect.TypeOf((*MockStore)(nil).GetClusterRequireSubdomain), ctx, clusterAddr) +} + +// GetClusterSupportsCustomPorts mocks base method. +func (m *MockStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterSupportsCustomPorts", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// GetClusterSupportsCustomPorts indicates an expected call of GetClusterSupportsCustomPorts. +func (mr *MockStoreMockRecorder) GetClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCustomPorts", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCustomPorts), ctx, clusterAddr) +} + // GetCustomDomain mocks base method. func (m *MockStore) GetCustomDomain(ctx context.Context, accountID, domainID string) (*domain.Domain, error) { m.ctrl.T.Helper() @@ -1466,18 +1466,18 @@ func (mr *MockStoreMockRecorder) GetGroupByID(ctx, lockStrength, accountID, grou } // GetGroupByName mocks base method. -func (m *MockStore) GetGroupByName(ctx context.Context, lockStrength LockingStrength, groupName, accountID string) (*types2.Group, error) { +func (m *MockStore) GetGroupByName(ctx context.Context, lockStrength LockingStrength, accountID, groupName string) (*types2.Group, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetGroupByName", ctx, lockStrength, groupName, accountID) + ret := m.ctrl.Call(m, "GetGroupByName", ctx, lockStrength, accountID, groupName) ret0, _ := ret[0].(*types2.Group) ret1, _ := ret[1].(error) return ret0, ret1 } // GetGroupByName indicates an expected call of GetGroupByName. -func (mr *MockStoreMockRecorder) GetGroupByName(ctx, lockStrength, groupName, accountID interface{}) *gomock.Call { +func (mr *MockStoreMockRecorder) GetGroupByName(ctx, lockStrength, accountID, groupName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockStore)(nil).GetGroupByName), ctx, lockStrength, groupName, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockStore)(nil).GetGroupByName), ctx, lockStrength, accountID, groupName) } // GetGroupsByIDs mocks base method. @@ -1974,6 +1974,21 @@ func (mr *MockStoreMockRecorder) GetRouteByID(ctx, lockStrength, accountID, rout return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRouteByID", reflect.TypeOf((*MockStore)(nil).GetRouteByID), ctx, lockStrength, accountID, routeID) } +// GetRoutingPeerNetworks mocks base method. +func (m *MockStore) GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRoutingPeerNetworks", ctx, accountID, peerID) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRoutingPeerNetworks indicates an expected call of GetRoutingPeerNetworks. +func (mr *MockStoreMockRecorder) GetRoutingPeerNetworks(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoutingPeerNetworks", reflect.TypeOf((*MockStore)(nil).GetRoutingPeerNetworks), ctx, accountID, peerID) +} + // GetServiceByDomain mocks base method. func (m *MockStore) GetServiceByDomain(ctx context.Context, domain string) (*service.Service, error) { m.ctrl.T.Helper() @@ -2361,21 +2376,6 @@ func (mr *MockStoreMockRecorder) IncrementSetupKeyUsage(ctx, setupKeyID interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementSetupKeyUsage", reflect.TypeOf((*MockStore)(nil).IncrementSetupKeyUsage), ctx, setupKeyID) } -// GetRoutingPeerNetworks mocks base method. -func (m *MockStore) GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRoutingPeerNetworks", ctx, accountID, peerID) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRoutingPeerNetworks indicates an expected call of GetRoutingPeerNetworks. -func (mr *MockStoreMockRecorder) GetRoutingPeerNetworks(ctx, accountID, peerID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoutingPeerNetworks", reflect.TypeOf((*MockStore)(nil).GetRoutingPeerNetworks), ctx, accountID, peerID) -} - // IsPrimaryAccount mocks base method. func (m *MockStore) IsPrimaryAccount(ctx context.Context, accountID string) (bool, string, error) { m.ctrl.T.Helper() From 0588d2dbe1b601be2e514316ae47394871a1c17c Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 7 Apr 2026 20:56:56 +0800 Subject: [PATCH 280/374] [management] Load missing service columns in pgx account loader (#5816) --- management/server/store/sql_store.go | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index ee1947b18..397b8673d 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2080,7 +2080,8 @@ func (s *SqlStore) getPostureChecks(ctx context.Context, accountID string) ([]*p func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpservice.Service, error) { const serviceQuery = `SELECT id, account_id, name, domain, enabled, auth, meta_created_at, meta_certificate_issued_at, meta_status, proxy_cluster, - pass_host_header, rewrite_redirects, session_private_key, session_public_key + pass_host_header, rewrite_redirects, session_private_key, session_public_key, + mode, listen_port, port_auto_assigned, source, source_peer, terminated FROM services WHERE account_id = $1` const targetsQuery = `SELECT id, account_id, service_id, path, host, port, protocol, @@ -2097,6 +2098,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv var auth []byte var createdAt, certIssuedAt sql.NullTime var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString + var mode, source, sourcePeer sql.NullString err := row.Scan( &s.ID, &s.AccountID, @@ -2112,6 +2114,12 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv &s.RewriteRedirects, &sessionPrivateKey, &sessionPublicKey, + &mode, + &s.ListenPort, + &s.PortAutoAssigned, + &source, + &sourcePeer, + &s.Terminated, ) if err != nil { return nil, err @@ -2143,6 +2151,15 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv if sessionPublicKey.Valid { s.SessionPublicKey = sessionPublicKey.String } + if mode.Valid { + s.Mode = mode.String + } + if source.Valid { + s.Source = source.String + } + if sourcePeer.Valid { + s.SourcePeer = sourcePeer.String + } s.Targets = []*rpservice.Target{} return &s, nil From aba5d6f0d23e3fee0f6a58fc3301f53943768a03 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 7 Apr 2026 23:55:35 +0800 Subject: [PATCH 281/374] [client] Error out on netbird expose when block inbound is enabled (#5818) --- client/cmd/expose.go | 5 +++-- client/internal/engine.go | 5 +++++ client/server/server.go | 4 ++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/client/cmd/expose.go b/client/cmd/expose.go index f4727703e..c48a6adac 100644 --- a/client/cmd/expose.go +++ b/client/cmd/expose.go @@ -14,6 +14,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "google.golang.org/grpc/status" "github.com/netbirdio/netbird/client/internal/expose" "github.com/netbirdio/netbird/client/proto" @@ -201,7 +202,7 @@ func exposeFn(cmd *cobra.Command, args []string) error { stream, err := client.ExposeService(ctx, req) if err != nil { - return fmt.Errorf("expose service: %w", err) + return fmt.Errorf("expose service: %v", status.Convert(err).Message()) } if err := handleExposeReady(cmd, stream, port); err != nil { @@ -236,7 +237,7 @@ func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) { func handleExposeReady(cmd *cobra.Command, stream proto.DaemonService_ExposeServiceClient, port uint64) error { event, err := stream.Recv() if err != nil { - return fmt.Errorf("receive expose event: %w", err) + return fmt.Errorf("receive expose event: %v", status.Convert(err).Message()) } ready, ok := event.Event.(*proto.ExposeServiceEvent_Ready) diff --git a/client/internal/engine.go b/client/internal/engine.go index 0f09ee364..6c7beb32f 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1837,6 +1837,11 @@ func (e *Engine) GetExposeManager() *expose.Manager { return e.exposeManager } +// IsBlockInbound returns whether inbound connections are blocked. +func (e *Engine) IsBlockInbound() bool { + return e.config.BlockInbound +} + // GetClientMetrics returns the client metrics func (e *Engine) GetClientMetrics() *metrics.ClientMetrics { return e.clientMetrics diff --git a/client/server/server.go b/client/server/server.go index 7c1e70692..e12b6df5b 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1359,6 +1359,10 @@ func (s *Server) ExposeService(req *proto.ExposeServiceRequest, srv proto.Daemon return gstatus.Errorf(codes.FailedPrecondition, "engine not initialized") } + if engine.IsBlockInbound() { + return gstatus.Errorf(codes.FailedPrecondition, "expose requires inbound connections but 'block inbound' is enabled, disable it first") + } + mgr := engine.GetExposeManager() if mgr == nil { return gstatus.Errorf(codes.Internal, "expose manager not available") From bb85eee40a8d2813c5e7a9f1a04dad20e87b4094 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 7 Apr 2026 23:56:48 +0800 Subject: [PATCH 282/374] [client] Skip down interfaces in network address collection for posture checks (#5768) --- client/firewall/uspfilter/localip.go | 2 ++ client/system/info.go | 3 +++ 2 files changed, 5 insertions(+) diff --git a/client/firewall/uspfilter/localip.go b/client/firewall/uspfilter/localip.go index ffc807f46..f63fe3e45 100644 --- a/client/firewall/uspfilter/localip.go +++ b/client/firewall/uspfilter/localip.go @@ -144,6 +144,8 @@ func (m *localIPManager) UpdateLocalIPs(iface common.IFaceMapper) (err error) { if err != nil { log.Warnf("failed to get interfaces: %v", err) } else { + // TODO: filter out down interfaces (net.FlagUp). Also handle the reverse + // case where an interface comes up between refreshes. for _, intf := range interfaces { m.processInterface(intf, &newIPv4Bitmap, ipv4Set, &ipv4Addresses) } diff --git a/client/system/info.go b/client/system/info.go index 01176e765..f2546cfe6 100644 --- a/client/system/info.go +++ b/client/system/info.go @@ -153,6 +153,9 @@ func networkAddresses() ([]NetworkAddress, error) { var netAddresses []NetworkAddress for _, iface := range interfaces { + if iface.Flags&net.FlagUp == 0 { + continue + } if iface.HardwareAddr.String() == "" { continue } From 1d920d700c3aa7e4d305dce9714cb0176e35637d Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 7 Apr 2026 23:56:54 +0800 Subject: [PATCH 283/374] [client] Fix SSH server Stop() deadlock when sessions are active (#5717) --- client/ssh/server/server.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index 4431ae423..82d3b700f 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -284,19 +284,21 @@ func (s *Server) closeListener(ln net.Listener) { // Stop closes the SSH server func (s *Server) Stop() error { s.mu.Lock() - defer s.mu.Unlock() - - if s.sshServer == nil { + sshServer := s.sshServer + if sshServer == nil { + s.mu.Unlock() return nil } + s.sshServer = nil + s.listener = nil + s.mu.Unlock() - if err := s.sshServer.Close(); err != nil { + // Close outside the lock: session handlers need s.mu for unregisterSession. + if err := sshServer.Close(); err != nil { log.Debugf("close SSH server: %v", err) } - s.sshServer = nil - s.listener = nil - + s.mu.Lock() maps.Clear(s.sessions) maps.Clear(s.pendingAuthJWT) maps.Clear(s.connections) @@ -307,6 +309,7 @@ func (s *Server) Stop() error { } } maps.Clear(s.remoteForwardListeners) + s.mu.Unlock() return nil } From cb73b94ffb01dd0371240df3a02eac46beb64e1f Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 8 Apr 2026 13:40:36 +0800 Subject: [PATCH 284/374] [client] Add TCP DNS support for local listener (#5758) --- client/firewall/iptables/manager_linux.go | 16 + client/firewall/iptables/router_linux.go | 86 ++++ client/firewall/manager/firewall.go | 8 + client/firewall/nftables/manager_linux.go | 16 + client/firewall/nftables/router_linux.go | 125 ++++++ client/firewall/uspfilter/filter.go | 136 ++---- client/firewall/uspfilter/filter_test.go | 153 ++----- client/firewall/uspfilter/nat.go | 17 + client/firewall/uspfilter/rule.go | 4 +- client/firewall/uspfilter/tracer_test.go | 16 +- client/iface/device/device_filter.go | 17 +- client/iface/mocks/filter.go | 40 +- client/iface/mocks/iface/mocks/filter.go | 87 ---- client/internal/dns/handler_chain.go | 15 +- client/internal/dns/mock_server.go | 5 + client/internal/dns/response_writer.go | 20 + client/internal/dns/server.go | 22 +- client/internal/dns/server_test.go | 6 +- client/internal/dns/service.go | 12 +- client/internal/dns/service_listener.go | 121 +++-- client/internal/dns/service_listener_test.go | 86 ++++ client/internal/dns/service_memory.go | 83 +++- client/internal/dns/tcpstack.go | 444 +++++++++++++++++++ client/internal/dns/upstream.go | 173 +++++++- client/internal/dns/upstream_android.go | 4 +- client/internal/dns/upstream_test.go | 295 ++++++++++++ client/internal/dnsfwd/forwarder.go | 20 +- client/internal/engine.go | 5 + 28 files changed, 1615 insertions(+), 417 deletions(-) delete mode 100644 client/iface/mocks/iface/mocks/filter.go create mode 100644 client/internal/dns/service_listener_test.go create mode 100644 client/internal/dns/tcpstack.go diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index 04c338375..2fc6f8ec8 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -286,6 +286,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + const ( chainNameRaw = "NETBIRD-RAW" chainOUTPUT = "OUTPUT" diff --git a/client/firewall/iptables/router_linux.go b/client/firewall/iptables/router_linux.go index 1fe4c149f..a7c4f67dd 100644 --- a/client/firewall/iptables/router_linux.go +++ b/client/firewall/iptables/router_linux.go @@ -36,6 +36,7 @@ const ( chainRTFWDOUT = "NETBIRD-RT-FWD-OUT" chainRTPRE = "NETBIRD-RT-PRE" chainRTRDR = "NETBIRD-RT-RDR" + chainNATOutput = "NETBIRD-NAT-OUTPUT" chainRTMSSCLAMP = "NETBIRD-RT-MSSCLAMP" routingFinalForwardJump = "ACCEPT" routingFinalNatJump = "MASQUERADE" @@ -43,6 +44,7 @@ const ( jumpManglePre = "jump-mangle-pre" jumpNatPre = "jump-nat-pre" jumpNatPost = "jump-nat-post" + jumpNatOutput = "jump-nat-output" jumpMSSClamp = "jump-mss-clamp" markManglePre = "mark-mangle-pre" markManglePost = "mark-mangle-post" @@ -387,6 +389,14 @@ func (r *router) cleanUpDefaultForwardRules() error { } log.Debug("flushing routing related tables") + + // Remove jump rules from built-in chains before deleting custom chains, + // otherwise the chain deletion fails with "device or resource busy". + jumpRule := []string{"-j", chainNATOutput} + if err := r.iptablesClient.Delete(tableNat, "OUTPUT", jumpRule...); err != nil { + log.Debugf("clean OUTPUT jump rule: %v", err) + } + for _, chainInfo := range []struct { chain string table string @@ -396,6 +406,7 @@ func (r *router) cleanUpDefaultForwardRules() error { {chainRTPRE, tableMangle}, {chainRTNAT, tableNat}, {chainRTRDR, tableNat}, + {chainNATOutput, tableNat}, {chainRTMSSCLAMP, tableMangle}, } { ok, err := r.iptablesClient.ChainExists(chainInfo.table, chainInfo.chain) @@ -970,6 +981,81 @@ func (r *router) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Proto return nil } +// ensureNATOutputChain lazily creates the OUTPUT NAT chain and jump rule on first use. +func (r *router) ensureNATOutputChain() error { + if _, exists := r.rules[jumpNatOutput]; exists { + return nil + } + + chainExists, err := r.iptablesClient.ChainExists(tableNat, chainNATOutput) + if err != nil { + return fmt.Errorf("check chain %s: %w", chainNATOutput, err) + } + if !chainExists { + if err := r.iptablesClient.NewChain(tableNat, chainNATOutput); err != nil { + return fmt.Errorf("create chain %s: %w", chainNATOutput, err) + } + } + + jumpRule := []string{"-j", chainNATOutput} + if err := r.iptablesClient.Insert(tableNat, "OUTPUT", 1, jumpRule...); err != nil { + if !chainExists { + if delErr := r.iptablesClient.ClearAndDeleteChain(tableNat, chainNATOutput); delErr != nil { + log.Warnf("failed to rollback chain %s: %v", chainNATOutput, delErr) + } + } + return fmt.Errorf("add OUTPUT jump rule: %w", err) + } + r.rules[jumpNatOutput] = jumpRule + + r.updateState() + return nil +} + +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (r *router) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + if _, exists := r.rules[ruleID]; exists { + return nil + } + + if err := r.ensureNATOutputChain(); err != nil { + return err + } + + dnatRule := []string{ + "-p", strings.ToLower(string(protocol)), + "--dport", strconv.Itoa(int(sourcePort)), + "-d", localAddr.String(), + "-j", "DNAT", + "--to-destination", ":" + strconv.Itoa(int(targetPort)), + } + + if err := r.iptablesClient.Append(tableNat, chainNATOutput, dnatRule...); err != nil { + return fmt.Errorf("add output DNAT rule: %w", err) + } + r.rules[ruleID] = dnatRule + + r.updateState() + return nil +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (r *router) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + if dnatRule, exists := r.rules[ruleID]; exists { + if err := r.iptablesClient.Delete(tableNat, chainNATOutput, dnatRule...); err != nil { + return fmt.Errorf("delete output DNAT rule: %w", err) + } + delete(r.rules, ruleID) + } + + r.updateState() + return nil +} + func applyPort(flag string, port *firewall.Port) []string { if port == nil { return nil diff --git a/client/firewall/manager/firewall.go b/client/firewall/manager/firewall.go index 3511a5463..d65d717b3 100644 --- a/client/firewall/manager/firewall.go +++ b/client/firewall/manager/firewall.go @@ -169,6 +169,14 @@ type Manager interface { // RemoveInboundDNAT removes inbound DNAT rule RemoveInboundDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + // AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. + // localAddr must be IPv4; the underlying iptables/nftables backends are IPv4-only. + AddOutputDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + + // RemoveOutputDNAT removes an OUTPUT chain DNAT rule. + // localAddr must be IPv4; the underlying iptables/nftables backends are IPv4-only. + RemoveOutputDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + // SetupEBPFProxyNoTrack creates static notrack rules for eBPF proxy loopback traffic. // This prevents conntrack from interfering with WireGuard proxy communication. SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index f57b28abc..beb5b70a7 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -346,6 +346,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + const ( chainNameRawOutput = "netbird-raw-out" chainNameRawPrerouting = "netbird-raw-pre" diff --git a/client/firewall/nftables/router_linux.go b/client/firewall/nftables/router_linux.go index fde654c20..904daf7cb 100644 --- a/client/firewall/nftables/router_linux.go +++ b/client/firewall/nftables/router_linux.go @@ -36,6 +36,7 @@ const ( chainNameRoutingFw = "netbird-rt-fwd" chainNameRoutingNat = "netbird-rt-postrouting" chainNameRoutingRdr = "netbird-rt-redirect" + chainNameNATOutput = "netbird-nat-output" chainNameForward = "FORWARD" chainNameMangleForward = "netbird-mangle-forward" @@ -1853,6 +1854,130 @@ func (r *router) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Proto return nil } +// ensureNATOutputChain lazily creates the OUTPUT NAT chain on first use. +func (r *router) ensureNATOutputChain() error { + if _, exists := r.chains[chainNameNATOutput]; exists { + return nil + } + + r.chains[chainNameNATOutput] = r.conn.AddChain(&nftables.Chain{ + Name: chainNameNATOutput, + Table: r.workTable, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityNATDest, + Type: nftables.ChainTypeNAT, + }) + + if err := r.conn.Flush(); err != nil { + delete(r.chains, chainNameNATOutput) + return fmt.Errorf("create NAT output chain: %w", err) + } + return nil +} + +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (r *router) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + if _, exists := r.rules[ruleID]; exists { + return nil + } + + if err := r.ensureNATOutputChain(); err != nil { + return err + } + + protoNum, err := protoToInt(protocol) + if err != nil { + return fmt.Errorf("convert protocol to number: %w", err) + } + + exprs := []expr.Any{ + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte{protoNum}, + }, + &expr.Payload{ + DestRegister: 2, + Base: expr.PayloadBaseTransportHeader, + Offset: 2, + Len: 2, + }, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 2, + Data: binaryutil.BigEndian.PutUint16(sourcePort), + }, + } + + exprs = append(exprs, applyPrefix(netip.PrefixFrom(localAddr, 32), false)...) + + exprs = append(exprs, + &expr.Immediate{ + Register: 1, + Data: localAddr.AsSlice(), + }, + &expr.Immediate{ + Register: 2, + Data: binaryutil.BigEndian.PutUint16(targetPort), + }, + &expr.NAT{ + Type: expr.NATTypeDestNAT, + Family: uint32(nftables.TableFamilyIPv4), + RegAddrMin: 1, + RegProtoMin: 2, + }, + ) + + dnatRule := &nftables.Rule{ + Table: r.workTable, + Chain: r.chains[chainNameNATOutput], + Exprs: exprs, + UserData: []byte(ruleID), + } + r.conn.AddRule(dnatRule) + + if err := r.conn.Flush(); err != nil { + return fmt.Errorf("add output DNAT rule: %w", err) + } + + r.rules[ruleID] = dnatRule + + return nil +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (r *router) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + if err := r.refreshRulesMap(); err != nil { + return fmt.Errorf(refreshRulesMapError, err) + } + + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + rule, exists := r.rules[ruleID] + if !exists { + return nil + } + + if rule.Handle == 0 { + log.Warnf("output DNAT rule %s has no handle, removing stale entry", ruleID) + delete(r.rules, ruleID) + return nil + } + + if err := r.conn.DelRule(rule); err != nil { + return fmt.Errorf("delete output DNAT rule %s: %w", ruleID, err) + } + if err := r.conn.Flush(); err != nil { + return fmt.Errorf("flush delete output DNAT rule: %w", err) + } + delete(r.rules, ruleID) + + return nil +} + // applyNetwork generates nftables expressions for networks (CIDR) or sets func (r *router) applyNetwork( network firewall.Network, diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index df2e274eb..cb9e1bb0a 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -140,6 +140,17 @@ type Manager struct { mtu uint16 mssClampValue uint16 mssClampEnabled bool + + // Only one hook per protocol is supported. Outbound direction only. + udpHookOut atomic.Pointer[packetHook] + tcpHookOut atomic.Pointer[packetHook] +} + +// packetHook stores a registered hook for a specific IP:port. +type packetHook struct { + ip netip.Addr + port uint16 + fn func([]byte) bool } // decoder for packages @@ -594,6 +605,8 @@ func (m *Manager) resetState() { maps.Clear(m.incomingRules) maps.Clear(m.routeRulesMap) m.routeRules = m.routeRules[:0] + m.udpHookOut.Store(nil) + m.tcpHookOut.Store(nil) if m.udpTracker != nil { m.udpTracker.Close() @@ -713,6 +726,9 @@ func (m *Manager) filterOutbound(packetData []byte, size int) bool { return true } case layers.LayerTypeTCP: + if m.tcpHooksDrop(uint16(d.tcp.DstPort), dstIP, packetData) { + return true + } // Clamp MSS on all TCP SYN packets, including those from local IPs. // SNATed routed traffic may appear as local IP but still requires clamping. if m.mssClampEnabled { @@ -895,38 +911,21 @@ func (m *Manager) trackInbound(d *decoder, srcIP, dstIP netip.Addr, ruleID []byt d.dnatOrigPort = 0 } -// udpHooksDrop checks if any UDP hooks should drop the packet func (m *Manager) udpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool { - m.mutex.RLock() - defer m.mutex.RUnlock() + return hookMatches(m.udpHookOut.Load(), dstIP, dport, packetData) +} - // Check specific destination IP first - if rules, exists := m.outgoingRules[dstIP]; exists { - for _, rule := range rules { - if rule.udpHook != nil && portsMatch(rule.dPort, dport) { - return rule.udpHook(packetData) - } - } +func (m *Manager) tcpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool { + return hookMatches(m.tcpHookOut.Load(), dstIP, dport, packetData) +} + +func hookMatches(h *packetHook, dstIP netip.Addr, dport uint16, packetData []byte) bool { + if h == nil { + return false } - - // Check IPv4 unspecified address - if rules, exists := m.outgoingRules[netip.IPv4Unspecified()]; exists { - for _, rule := range rules { - if rule.udpHook != nil && portsMatch(rule.dPort, dport) { - return rule.udpHook(packetData) - } - } + if h.ip == dstIP && h.port == dport { + return h.fn(packetData) } - - // Check IPv6 unspecified address - if rules, exists := m.outgoingRules[netip.IPv6Unspecified()]; exists { - for _, rule := range rules { - if rule.udpHook != nil && portsMatch(rule.dPort, dport) { - return rule.udpHook(packetData) - } - } - } - return false } @@ -1278,12 +1277,6 @@ func validateRule(ip netip.Addr, packetData []byte, rules map[string]PeerRule, d return rule.mgmtId, rule.drop, true } case layers.LayerTypeUDP: - // if rule has UDP hook (and if we are here we match this rule) - // we ignore rule.drop and call this hook - if rule.udpHook != nil { - return rule.mgmtId, rule.udpHook(packetData), true - } - if portsMatch(rule.sPort, uint16(d.udp.SrcPort)) && portsMatch(rule.dPort, uint16(d.udp.DstPort)) { return rule.mgmtId, rule.drop, true } @@ -1342,65 +1335,30 @@ func (m *Manager) ruleMatches(rule *RouteRule, srcAddr, dstAddr netip.Addr, prot return sourceMatched } -// AddUDPPacketHook calls hook when UDP packet from given direction matched -// -// Hook function returns flag which indicates should be the matched package dropped or not -func (m *Manager) AddUDPPacketHook(in bool, ip netip.Addr, dPort uint16, hook func(packet []byte) bool) string { - r := PeerRule{ - id: uuid.New().String(), - ip: ip, - protoLayer: layers.LayerTypeUDP, - dPort: &firewall.Port{Values: []uint16{dPort}}, - ipLayer: layers.LayerTypeIPv6, - udpHook: hook, +// SetUDPPacketHook sets the outbound UDP packet hook. Pass nil hook to remove. +func (m *Manager) SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) { + if hook == nil { + m.udpHookOut.Store(nil) + return } - - if ip.Is4() { - r.ipLayer = layers.LayerTypeIPv4 - } - - m.mutex.Lock() - if in { - // Incoming UDP hooks are stored in allow rules map - if _, ok := m.incomingRules[r.ip]; !ok { - m.incomingRules[r.ip] = make(map[string]PeerRule) - } - m.incomingRules[r.ip][r.id] = r - } else { - if _, ok := m.outgoingRules[r.ip]; !ok { - m.outgoingRules[r.ip] = make(map[string]PeerRule) - } - m.outgoingRules[r.ip][r.id] = r - } - m.mutex.Unlock() - - return r.id + m.udpHookOut.Store(&packetHook{ + ip: ip, + port: dPort, + fn: hook, + }) } -// RemovePacketHook removes packet hook by given ID -func (m *Manager) RemovePacketHook(hookID string) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - // Check incoming hooks (stored in allow rules) - for _, arr := range m.incomingRules { - for _, r := range arr { - if r.id == hookID { - delete(arr, r.id) - return nil - } - } +// SetTCPPacketHook sets the outbound TCP packet hook. Pass nil hook to remove. +func (m *Manager) SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) { + if hook == nil { + m.tcpHookOut.Store(nil) + return } - // Check outgoing hooks - for _, arr := range m.outgoingRules { - for _, r := range arr { - if r.id == hookID { - delete(arr, r.id) - return nil - } - } - } - return fmt.Errorf("hook with given id not found") + m.tcpHookOut.Store(&packetHook{ + ip: ip, + port: dPort, + fn: hook, + }) } // SetLogLevel sets the log level for the firewall manager diff --git a/client/firewall/uspfilter/filter_test.go b/client/firewall/uspfilter/filter_test.go index 55a8e723c..5f0f9f860 100644 --- a/client/firewall/uspfilter/filter_test.go +++ b/client/firewall/uspfilter/filter_test.go @@ -12,6 +12,7 @@ import ( "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" wgdevice "golang.zx2c4.com/wireguard/device" @@ -186,81 +187,52 @@ func TestManagerDeleteRule(t *testing.T) { } } -func TestAddUDPPacketHook(t *testing.T) { - tests := []struct { - name string - in bool - expDir fw.RuleDirection - ip netip.Addr - dPort uint16 - hook func([]byte) bool - expectedID string - }{ - { - name: "Test Outgoing UDP Packet Hook", - in: false, - expDir: fw.RuleDirectionOUT, - ip: netip.MustParseAddr("10.168.0.1"), - dPort: 8000, - hook: func([]byte) bool { return true }, - }, - { - name: "Test Incoming UDP Packet Hook", - in: true, - expDir: fw.RuleDirectionIN, - ip: netip.MustParseAddr("::1"), - dPort: 9000, - hook: func([]byte) bool { return false }, - }, - } +func TestSetUDPPacketHook(t *testing.T) { + manager, err := Create(&IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + }, false, flowLogger, nbiface.DefaultMTU) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, manager.Close(nil)) }) - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - manager, err := Create(&IFaceMock{ - SetFilterFunc: func(device.PacketFilter) error { return nil }, - }, false, flowLogger, nbiface.DefaultMTU) - require.NoError(t, err) + var called bool + manager.SetUDPPacketHook(netip.MustParseAddr("10.168.0.1"), 8000, func([]byte) bool { + called = true + return true + }) - manager.AddUDPPacketHook(tt.in, tt.ip, tt.dPort, tt.hook) + h := manager.udpHookOut.Load() + require.NotNil(t, h) + assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.ip) + assert.Equal(t, uint16(8000), h.port) + assert.True(t, h.fn(nil)) + assert.True(t, called) - var addedRule PeerRule - if tt.in { - // Incoming UDP hooks are stored in allow rules map - if len(manager.incomingRules[tt.ip]) != 1 { - t.Errorf("expected 1 incoming rule, got %d", len(manager.incomingRules[tt.ip])) - return - } - for _, rule := range manager.incomingRules[tt.ip] { - addedRule = rule - } - } else { - if len(manager.outgoingRules[tt.ip]) != 1 { - t.Errorf("expected 1 outgoing rule, got %d", len(manager.outgoingRules[tt.ip])) - return - } - for _, rule := range manager.outgoingRules[tt.ip] { - addedRule = rule - } - } + manager.SetUDPPacketHook(netip.MustParseAddr("10.168.0.1"), 8000, nil) + assert.Nil(t, manager.udpHookOut.Load()) +} - if tt.ip.Compare(addedRule.ip) != 0 { - t.Errorf("expected ip %s, got %s", tt.ip, addedRule.ip) - return - } - if tt.dPort != addedRule.dPort.Values[0] { - t.Errorf("expected dPort %d, got %d", tt.dPort, addedRule.dPort.Values[0]) - return - } - if layers.LayerTypeUDP != addedRule.protoLayer { - t.Errorf("expected protoLayer %s, got %s", layers.LayerTypeUDP, addedRule.protoLayer) - return - } - if addedRule.udpHook == nil { - t.Errorf("expected udpHook to be set") - return - } - }) - } +func TestSetTCPPacketHook(t *testing.T) { + manager, err := Create(&IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + }, false, flowLogger, nbiface.DefaultMTU) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, manager.Close(nil)) }) + + var called bool + manager.SetTCPPacketHook(netip.MustParseAddr("10.168.0.1"), 53, func([]byte) bool { + called = true + return true + }) + + h := manager.tcpHookOut.Load() + require.NotNil(t, h) + assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.ip) + assert.Equal(t, uint16(53), h.port) + assert.True(t, h.fn(nil)) + assert.True(t, called) + + manager.SetTCPPacketHook(netip.MustParseAddr("10.168.0.1"), 53, nil) + assert.Nil(t, manager.tcpHookOut.Load()) } // TestPeerRuleLifecycleDenyRules verifies that deny rules are correctly added @@ -530,39 +502,12 @@ func TestRemovePacketHook(t *testing.T) { require.NoError(t, manager.Close(nil)) }() - // Add a UDP packet hook - hookFunc := func(data []byte) bool { return true } - hookID := manager.AddUDPPacketHook(false, netip.MustParseAddr("192.168.0.1"), 8080, hookFunc) + manager.SetUDPPacketHook(netip.MustParseAddr("192.168.0.1"), 8080, func([]byte) bool { return true }) - // Assert the hook is added by finding it in the manager's outgoing rules - found := false - for _, arr := range manager.outgoingRules { - for _, rule := range arr { - if rule.id == hookID { - found = true - break - } - } - } + require.NotNil(t, manager.udpHookOut.Load(), "hook should be registered") - if !found { - t.Fatalf("The hook was not added properly.") - } - - // Now remove the packet hook - err = manager.RemovePacketHook(hookID) - if err != nil { - t.Fatalf("Failed to remove hook: %s", err) - } - - // Assert the hook is removed by checking it in the manager's outgoing rules - for _, arr := range manager.outgoingRules { - for _, rule := range arr { - if rule.id == hookID { - t.Fatalf("The hook was not removed properly.") - } - } - } + manager.SetUDPPacketHook(netip.MustParseAddr("192.168.0.1"), 8080, nil) + assert.Nil(t, manager.udpHookOut.Load(), "hook should be removed") } func TestProcessOutgoingHooks(t *testing.T) { @@ -592,8 +537,7 @@ func TestProcessOutgoingHooks(t *testing.T) { } hookCalled := false - hookID := manager.AddUDPPacketHook( - false, + manager.SetUDPPacketHook( netip.MustParseAddr("100.10.0.100"), 53, func([]byte) bool { @@ -601,7 +545,6 @@ func TestProcessOutgoingHooks(t *testing.T) { return true }, ) - require.NotEmpty(t, hookID) // Create test UDP packet ipv4 := &layers.IPv4{ diff --git a/client/firewall/uspfilter/nat.go b/client/firewall/uspfilter/nat.go index 597f892cf..8ed32eb5e 100644 --- a/client/firewall/uspfilter/nat.go +++ b/client/firewall/uspfilter/nat.go @@ -421,6 +421,7 @@ func (m *Manager) addPortRedirection(targetIP netip.Addr, protocol gopacket.Laye } // AddInboundDNAT adds an inbound DNAT rule redirecting traffic from NetBird peers to local services. +// TODO: also delegate to nativeFirewall when available for kernel WG mode func (m *Manager) AddInboundDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { var layerType gopacket.LayerType switch protocol { @@ -466,6 +467,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.removePortRedirection(localAddr, layerType, sourcePort, targetPort) } +// AddOutputDNAT delegates to the native firewall if available. +func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + if m.nativeFirewall == nil { + return fmt.Errorf("output DNAT not supported without native firewall") + } + return m.nativeFirewall.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + +// RemoveOutputDNAT delegates to the native firewall if available. +func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + if m.nativeFirewall == nil { + return nil + } + return m.nativeFirewall.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + // translateInboundPortDNAT applies port-specific DNAT translation to inbound packets. func (m *Manager) translateInboundPortDNAT(packetData []byte, d *decoder, srcIP, dstIP netip.Addr) bool { if !m.portDNATEnabled.Load() { diff --git a/client/firewall/uspfilter/rule.go b/client/firewall/uspfilter/rule.go index dbe3a7858..08d68a78e 100644 --- a/client/firewall/uspfilter/rule.go +++ b/client/firewall/uspfilter/rule.go @@ -18,9 +18,7 @@ type PeerRule struct { protoLayer gopacket.LayerType sPort *firewall.Port dPort *firewall.Port - drop bool - - udpHook func([]byte) bool + drop bool } // ID returns the rule id diff --git a/client/firewall/uspfilter/tracer_test.go b/client/firewall/uspfilter/tracer_test.go index d9f9f1aa8..657f96fc0 100644 --- a/client/firewall/uspfilter/tracer_test.go +++ b/client/firewall/uspfilter/tracer_test.go @@ -399,21 +399,17 @@ func TestTracePacket(t *testing.T) { { name: "UDPTraffic_WithHook", setup: func(m *Manager) { - hookFunc := func([]byte) bool { - return true - } - m.AddUDPPacketHook(true, netip.MustParseAddr("1.1.1.1"), 53, hookFunc) + m.SetUDPPacketHook(netip.MustParseAddr("100.10.255.254"), 53, func([]byte) bool { + return true // drop (intercepted by hook) + }) }, packetBuilder: func() *PacketBuilder { - return createPacketBuilder("1.1.1.1", "100.10.0.100", "udp", 12345, 53, fw.RuleDirectionIN) + return createPacketBuilder("100.10.0.100", "100.10.255.254", "udp", 12345, 53, fw.RuleDirectionOUT) }, expectedStages: []PacketStage{ StageReceived, - StageInboundPortDNAT, - StageInbound1to1NAT, - StageConntrack, - StageRouting, - StagePeerACL, + StageOutbound1to1NAT, + StageOutboundPortReverse, StageCompleted, }, expectedAllow: false, diff --git a/client/iface/device/device_filter.go b/client/iface/device/device_filter.go index 708f38d26..4357d1916 100644 --- a/client/iface/device/device_filter.go +++ b/client/iface/device/device_filter.go @@ -15,14 +15,17 @@ type PacketFilter interface { // FilterInbound filter incoming packets from external sources to host FilterInbound(packetData []byte, size int) bool - // AddUDPPacketHook calls hook when UDP packet from given direction matched - // - // Hook function returns flag which indicates should be the matched package dropped or not. - // Hook function receives raw network packet data as argument. - AddUDPPacketHook(in bool, ip netip.Addr, dPort uint16, hook func(packet []byte) bool) string + // SetUDPPacketHook registers a hook for outbound UDP packets matching the given IP and port. + // Hook function returns true if the packet should be dropped. + // Only one UDP hook is supported; calling again replaces the previous hook. + // Pass nil hook to remove. + SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) - // RemovePacketHook removes hook by ID - RemovePacketHook(hookID string) error + // SetTCPPacketHook registers a hook for outbound TCP packets matching the given IP and port. + // Hook function returns true if the packet should be dropped. + // Only one TCP hook is supported; calling again replaces the previous hook. + // Pass nil hook to remove. + SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) } // FilteredDevice to override Read or Write of packets diff --git a/client/iface/mocks/filter.go b/client/iface/mocks/filter.go index 566068aa5..5ae98039c 100644 --- a/client/iface/mocks/filter.go +++ b/client/iface/mocks/filter.go @@ -34,18 +34,28 @@ func (m *MockPacketFilter) EXPECT() *MockPacketFilterMockRecorder { return m.recorder } -// AddUDPPacketHook mocks base method. -func (m *MockPacketFilter) AddUDPPacketHook(arg0 bool, arg1 netip.Addr, arg2 uint16, arg3 func([]byte) bool) string { +// SetUDPPacketHook mocks base method. +func (m *MockPacketFilter) SetUDPPacketHook(arg0 netip.Addr, arg1 uint16, arg2 func([]byte) bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddUDPPacketHook", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(string) - return ret0 + m.ctrl.Call(m, "SetUDPPacketHook", arg0, arg1, arg2) } -// AddUDPPacketHook indicates an expected call of AddUDPPacketHook. -func (mr *MockPacketFilterMockRecorder) AddUDPPacketHook(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// SetUDPPacketHook indicates an expected call of SetUDPPacketHook. +func (mr *MockPacketFilterMockRecorder) SetUDPPacketHook(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).AddUDPPacketHook), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).SetUDPPacketHook), arg0, arg1, arg2) +} + +// SetTCPPacketHook mocks base method. +func (m *MockPacketFilter) SetTCPPacketHook(arg0 netip.Addr, arg1 uint16, arg2 func([]byte) bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTCPPacketHook", arg0, arg1, arg2) +} + +// SetTCPPacketHook indicates an expected call of SetTCPPacketHook. +func (mr *MockPacketFilterMockRecorder) SetTCPPacketHook(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTCPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).SetTCPPacketHook), arg0, arg1, arg2) } // FilterInbound mocks base method. @@ -75,17 +85,3 @@ func (mr *MockPacketFilterMockRecorder) FilterOutbound(arg0 interface{}, arg1 an mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterOutbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterOutbound), arg0, arg1) } - -// RemovePacketHook mocks base method. -func (m *MockPacketFilter) RemovePacketHook(arg0 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemovePacketHook", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemovePacketHook indicates an expected call of RemovePacketHook. -func (mr *MockPacketFilterMockRecorder) RemovePacketHook(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePacketHook", reflect.TypeOf((*MockPacketFilter)(nil).RemovePacketHook), arg0) -} diff --git a/client/iface/mocks/iface/mocks/filter.go b/client/iface/mocks/iface/mocks/filter.go deleted file mode 100644 index 291ab9ab5..000000000 --- a/client/iface/mocks/iface/mocks/filter.go +++ /dev/null @@ -1,87 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/netbirdio/netbird/client/iface (interfaces: PacketFilter) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - net "net" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockPacketFilter is a mock of PacketFilter interface. -type MockPacketFilter struct { - ctrl *gomock.Controller - recorder *MockPacketFilterMockRecorder -} - -// MockPacketFilterMockRecorder is the mock recorder for MockPacketFilter. -type MockPacketFilterMockRecorder struct { - mock *MockPacketFilter -} - -// NewMockPacketFilter creates a new mock instance. -func NewMockPacketFilter(ctrl *gomock.Controller) *MockPacketFilter { - mock := &MockPacketFilter{ctrl: ctrl} - mock.recorder = &MockPacketFilterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPacketFilter) EXPECT() *MockPacketFilterMockRecorder { - return m.recorder -} - -// AddUDPPacketHook mocks base method. -func (m *MockPacketFilter) AddUDPPacketHook(arg0 bool, arg1 net.IP, arg2 uint16, arg3 func(*net.UDPAddr, []byte) bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddUDPPacketHook", arg0, arg1, arg2, arg3) -} - -// AddUDPPacketHook indicates an expected call of AddUDPPacketHook. -func (mr *MockPacketFilterMockRecorder) AddUDPPacketHook(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).AddUDPPacketHook), arg0, arg1, arg2, arg3) -} - -// FilterInbound mocks base method. -func (m *MockPacketFilter) FilterInbound(arg0 []byte) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FilterInbound", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// FilterInbound indicates an expected call of FilterInbound. -func (mr *MockPacketFilterMockRecorder) FilterInbound(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterInbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterInbound), arg0) -} - -// FilterOutbound mocks base method. -func (m *MockPacketFilter) FilterOutbound(arg0 []byte) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FilterOutbound", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// FilterOutbound indicates an expected call of FilterOutbound. -func (mr *MockPacketFilterMockRecorder) FilterOutbound(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterOutbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterOutbound), arg0) -} - -// SetNetwork mocks base method. -func (m *MockPacketFilter) SetNetwork(arg0 *net.IPNet) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNetwork", arg0) -} - -// SetNetwork indicates an expected call of SetNetwork. -func (mr *MockPacketFilterMockRecorder) SetNetwork(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNetwork", reflect.TypeOf((*MockPacketFilter)(nil).SetNetwork), arg0) -} diff --git a/client/internal/dns/handler_chain.go b/client/internal/dns/handler_chain.go index 06a2056b1..6fbdedc59 100644 --- a/client/internal/dns/handler_chain.go +++ b/client/internal/dns/handler_chain.go @@ -73,6 +73,9 @@ func (w *ResponseWriterChain) WriteMsg(m *dns.Msg) error { return nil } w.response = m + if m.MsgHdr.Truncated { + w.SetMeta("truncated", "true") + } return w.ResponseWriter.WriteMsg(m) } @@ -195,10 +198,14 @@ func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { startTime := time.Now() requestID := resutil.GenerateRequestID() - logger := log.WithFields(log.Fields{ + fields := log.Fields{ "request_id": requestID, "dns_id": fmt.Sprintf("%04x", r.Id), - }) + } + if addr := w.RemoteAddr(); addr != nil { + fields["client"] = addr.String() + } + logger := log.WithFields(fields) question := r.Question[0] qname := strings.ToLower(question.Name) @@ -261,9 +268,9 @@ func (c *HandlerChain) logResponse(logger *log.Entry, cw *ResponseWriterChain, q meta += " " + k + "=" + v } - logger.Tracef("response: domain=%s rcode=%s answers=%s%s took=%s", + logger.Tracef("response: domain=%s rcode=%s answers=%s size=%dB%s took=%s", qname, dns.RcodeToString[cw.response.Rcode], resutil.FormatAnswers(cw.response.Answer), - meta, time.Since(startTime)) + cw.response.Len(), meta, time.Since(startTime)) } func (c *HandlerChain) isHandlerMatch(qname string, entry HandlerEntry) bool { diff --git a/client/internal/dns/mock_server.go b/client/internal/dns/mock_server.go index 1df57d1db..548b1f54f 100644 --- a/client/internal/dns/mock_server.go +++ b/client/internal/dns/mock_server.go @@ -90,6 +90,11 @@ func (m *MockServer) SetRouteChecker(func(netip.Addr) bool) { // Mock implementation - no-op } +// SetFirewall mock implementation of SetFirewall from Server interface +func (m *MockServer) SetFirewall(Firewall) { + // Mock implementation - no-op +} + // BeginBatch mock implementation of BeginBatch from Server interface func (m *MockServer) BeginBatch() { // Mock implementation - no-op diff --git a/client/internal/dns/response_writer.go b/client/internal/dns/response_writer.go index edc65a5d9..287cf28b0 100644 --- a/client/internal/dns/response_writer.go +++ b/client/internal/dns/response_writer.go @@ -104,3 +104,23 @@ func (r *responseWriter) TsigTimersOnly(bool) { // After a call to Hijack(), the DNS package will not do anything with the connection. func (r *responseWriter) Hijack() { } + +// remoteAddrFromPacket extracts the source IP:port from a decoded packet for logging. +func remoteAddrFromPacket(packet gopacket.Packet) *net.UDPAddr { + var srcIP net.IP + if ipv4 := packet.Layer(layers.LayerTypeIPv4); ipv4 != nil { + srcIP = ipv4.(*layers.IPv4).SrcIP + } else if ipv6 := packet.Layer(layers.LayerTypeIPv6); ipv6 != nil { + srcIP = ipv6.(*layers.IPv6).SrcIP + } + + var srcPort int + if udp := packet.Layer(layers.LayerTypeUDP); udp != nil { + srcPort = int(udp.(*layers.UDP).SrcPort) + } + + if srcIP == nil { + return nil + } + return &net.UDPAddr{IP: srcIP, Port: srcPort} +} diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 3c47f4ee6..d4fda5db3 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -58,6 +58,7 @@ type Server interface { UpdateServerConfig(domains dnsconfig.ServerDomains) error PopulateManagementDomain(mgmtURL *url.URL) error SetRouteChecker(func(netip.Addr) bool) + SetFirewall(Firewall) } type nsGroupsByDomain struct { @@ -151,7 +152,7 @@ func NewDefaultServer(ctx context.Context, config DefaultServerConfig) (*Default if config.WgInterface.IsUserspaceBind() { dnsService = NewServiceViaMemory(config.WgInterface) } else { - dnsService = newServiceViaListener(config.WgInterface, addrPort) + dnsService = newServiceViaListener(config.WgInterface, addrPort, nil) } server := newDefaultServer(ctx, config.WgInterface, dnsService, config.StatusRecorder, config.StateManager, config.DisableSys) @@ -374,6 +375,17 @@ func (s *DefaultServer) DnsIP() netip.Addr { return s.service.RuntimeIP() } +// SetFirewall sets the firewall used for DNS port DNAT rules. +// This must be called before Initialize when using the listener-based service, +// because the firewall is typically not available at construction time. +func (s *DefaultServer) SetFirewall(fw Firewall) { + if svc, ok := s.service.(*serviceViaListener); ok { + svc.listenerFlagLock.Lock() + svc.firewall = fw + svc.listenerFlagLock.Unlock() + } +} + // Stop stops the server func (s *DefaultServer) Stop() { s.probeMu.Lock() @@ -395,8 +407,12 @@ func (s *DefaultServer) Stop() { maps.Clear(s.extraDomains) } -func (s *DefaultServer) disableDNS() error { - defer s.service.Stop() +func (s *DefaultServer) disableDNS() (retErr error) { + defer func() { + if err := s.service.Stop(); err != nil { + retErr = errors.Join(retErr, fmt.Errorf("stop DNS service: %w", err)) + } + }() if s.isUsingNoopHostManager() { return nil diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index d3b0c250d..f77f6e898 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -476,8 +476,8 @@ func TestDNSFakeResolverHandleUpdates(t *testing.T) { packetfilter := pfmock.NewMockPacketFilter(ctrl) packetfilter.EXPECT().FilterOutbound(gomock.Any(), gomock.Any()).AnyTimes() - packetfilter.EXPECT().AddUDPPacketHook(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) - packetfilter.EXPECT().RemovePacketHook(gomock.Any()) + packetfilter.EXPECT().SetUDPPacketHook(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + packetfilter.EXPECT().SetTCPPacketHook(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() if err := wgIface.SetFilter(packetfilter); err != nil { t.Errorf("set packet filter: %v", err) @@ -1071,7 +1071,7 @@ func (m *mockHandler) ID() types.HandlerID { return types.Hand type mockService struct{} func (m *mockService) Listen() error { return nil } -func (m *mockService) Stop() {} +func (m *mockService) Stop() error { return nil } func (m *mockService) RuntimeIP() netip.Addr { return netip.MustParseAddr("127.0.0.1") } func (m *mockService) RuntimePort() int { return 53 } func (m *mockService) RegisterMux(string, dns.Handler) {} diff --git a/client/internal/dns/service.go b/client/internal/dns/service.go index 6a76c53e3..1c6ce7849 100644 --- a/client/internal/dns/service.go +++ b/client/internal/dns/service.go @@ -4,15 +4,25 @@ import ( "net/netip" "github.com/miekg/dns" + + firewall "github.com/netbirdio/netbird/client/firewall/manager" ) const ( DefaultPort = 53 ) +// Firewall provides DNAT capabilities for DNS port redirection. +// This is used when the DNS server cannot bind port 53 directly +// and needs firewall rules to redirect traffic. +type Firewall interface { + AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error + RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error +} + type service interface { Listen() error - Stop() + Stop() error RegisterMux(domain string, handler dns.Handler) DeregisterMux(key string) RuntimePort() int diff --git a/client/internal/dns/service_listener.go b/client/internal/dns/service_listener.go index f7ddfd40f..4e09f1b7f 100644 --- a/client/internal/dns/service_listener.go +++ b/client/internal/dns/service_listener.go @@ -10,9 +10,13 @@ import ( "sync" "time" + "github.com/hashicorp/go-multierror" "github.com/miekg/dns" log "github.com/sirupsen/logrus" + nberrors "github.com/netbirdio/netbird/client/errors" + + firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/internal/ebpf" ebpfMgr "github.com/netbirdio/netbird/client/internal/ebpf/manager" ) @@ -31,25 +35,33 @@ type serviceViaListener struct { dnsMux *dns.ServeMux customAddr *netip.AddrPort server *dns.Server + tcpServer *dns.Server listenIP netip.Addr listenPort uint16 listenerIsRunning bool listenerFlagLock sync.Mutex ebpfService ebpfMgr.Manager + firewall Firewall + tcpDNATConfigured bool } -func newServiceViaListener(wgIface WGIface, customAddr *netip.AddrPort) *serviceViaListener { +func newServiceViaListener(wgIface WGIface, customAddr *netip.AddrPort, fw Firewall) *serviceViaListener { mux := dns.NewServeMux() s := &serviceViaListener{ wgInterface: wgIface, dnsMux: mux, customAddr: customAddr, + firewall: fw, server: &dns.Server{ Net: "udp", Handler: mux, UDPSize: 65535, }, + tcpServer: &dns.Server{ + Net: "tcp", + Handler: mux, + }, } return s @@ -70,43 +82,86 @@ func (s *serviceViaListener) Listen() error { return fmt.Errorf("eval listen address: %w", err) } s.listenIP = s.listenIP.Unmap() - s.server.Addr = net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort))) - log.Debugf("starting dns on %s", s.server.Addr) - go func() { - s.setListenerStatus(true) - defer s.setListenerStatus(false) + addr := net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort))) + s.server.Addr = addr + s.tcpServer.Addr = addr - err := s.server.ListenAndServe() - if err != nil { - log.Errorf("dns server running with %d port returned an error: %v. Will not retry", s.listenPort, err) + log.Debugf("starting dns on %s (UDP + TCP)", addr) + s.listenerIsRunning = true + + go func() { + if err := s.server.ListenAndServe(); err != nil { + log.Errorf("failed to run DNS UDP server on port %d: %v", s.listenPort, err) + } + + s.listenerFlagLock.Lock() + unexpected := s.listenerIsRunning + s.listenerIsRunning = false + s.listenerFlagLock.Unlock() + + if unexpected { + if err := s.tcpServer.Shutdown(); err != nil { + log.Debugf("failed to shutdown DNS TCP server: %v", err) + } } }() + go func() { + if err := s.tcpServer.ListenAndServe(); err != nil { + log.Errorf("failed to run DNS TCP server on port %d: %v", s.listenPort, err) + } + }() + + // When eBPF redirects UDP port 53 to our listen port, TCP still needs + // a DNAT rule because eBPF only handles UDP. + if s.ebpfService != nil && s.firewall != nil && s.listenPort != DefaultPort { + if err := s.firewall.AddOutputDNAT(s.listenIP, firewall.ProtocolTCP, DefaultPort, s.listenPort); err != nil { + log.Warnf("failed to add DNS TCP DNAT rule, TCP DNS on port 53 will not work: %v", err) + } else { + s.tcpDNATConfigured = true + log.Infof("added DNS TCP DNAT rule: %s:%d -> %s:%d", s.listenIP, DefaultPort, s.listenIP, s.listenPort) + } + } + return nil } -func (s *serviceViaListener) Stop() { +func (s *serviceViaListener) Stop() error { s.listenerFlagLock.Lock() defer s.listenerFlagLock.Unlock() if !s.listenerIsRunning { - return + return nil } + s.listenerIsRunning = false ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - err := s.server.ShutdownContext(ctx) - if err != nil { - log.Errorf("stopping dns server listener returned an error: %v", err) + var merr *multierror.Error + + if err := s.server.ShutdownContext(ctx); err != nil { + merr = multierror.Append(merr, fmt.Errorf("stop DNS UDP server: %w", err)) + } + + if err := s.tcpServer.ShutdownContext(ctx); err != nil { + merr = multierror.Append(merr, fmt.Errorf("stop DNS TCP server: %w", err)) + } + + if s.tcpDNATConfigured && s.firewall != nil { + if err := s.firewall.RemoveOutputDNAT(s.listenIP, firewall.ProtocolTCP, DefaultPort, s.listenPort); err != nil { + merr = multierror.Append(merr, fmt.Errorf("remove DNS TCP DNAT rule: %w", err)) + } + s.tcpDNATConfigured = false } if s.ebpfService != nil { - err = s.ebpfService.FreeDNSFwd() - if err != nil { - log.Errorf("stopping traffic forwarder returned an error: %v", err) + if err := s.ebpfService.FreeDNSFwd(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("stop traffic forwarder: %w", err)) } } + + return nberrors.FormatErrorOrNil(merr) } func (s *serviceViaListener) RegisterMux(pattern string, handler dns.Handler) { @@ -133,12 +188,6 @@ func (s *serviceViaListener) RuntimeIP() netip.Addr { return s.listenIP } -func (s *serviceViaListener) setListenerStatus(running bool) { - s.listenerFlagLock.Lock() - defer s.listenerFlagLock.Unlock() - - s.listenerIsRunning = running -} // evalListenAddress figure out the listen address for the DNS server // first check the 53 port availability on WG interface or lo, if not success @@ -187,18 +236,28 @@ func (s *serviceViaListener) testFreePort(port int) (netip.Addr, bool) { } func (s *serviceViaListener) tryToBind(ip netip.Addr, port int) bool { - addrString := net.JoinHostPort(ip.String(), strconv.Itoa(port)) - udpAddr := net.UDPAddrFromAddrPort(netip.MustParseAddrPort(addrString)) - probeListener, err := net.ListenUDP("udp", udpAddr) + addrPort := netip.AddrPortFrom(ip, uint16(port)) + + udpAddr := net.UDPAddrFromAddrPort(addrPort) + udpLn, err := net.ListenUDP("udp", udpAddr) if err != nil { - log.Warnf("binding dns on %s is not available, error: %s", addrString, err) + log.Warnf("binding dns UDP on %s is not available: %s", addrPort, err) return false } - - err = probeListener.Close() - if err != nil { - log.Errorf("got an error closing the probe listener, error: %s", err) + if err := udpLn.Close(); err != nil { + log.Debugf("close UDP probe listener: %s", err) } + + tcpAddr := net.TCPAddrFromAddrPort(addrPort) + tcpLn, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + log.Warnf("binding dns TCP on %s is not available: %s", addrPort, err) + return false + } + if err := tcpLn.Close(); err != nil { + log.Debugf("close TCP probe listener: %s", err) + } + return true } diff --git a/client/internal/dns/service_listener_test.go b/client/internal/dns/service_listener_test.go new file mode 100644 index 000000000..90ef71d19 --- /dev/null +++ b/client/internal/dns/service_listener_test.go @@ -0,0 +1,86 @@ +package dns + +import ( + "fmt" + "net" + "net/netip" + "testing" + "time" + + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestServiceViaListener_TCPAndUDP(t *testing.T) { + handler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("192.0.2.1"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + // Create a service using a custom address to avoid needing root + svc := newServiceViaListener(nil, nil, nil) + svc.dnsMux.Handle(".", handler) + + // Bind both transports up front to avoid TOCTOU races. + udpAddr := net.UDPAddrFromAddrPort(netip.AddrPortFrom(customIP, 0)) + udpConn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + t.Skip("cannot bind to 127.0.0.153, skipping") + } + port := uint16(udpConn.LocalAddr().(*net.UDPAddr).Port) + + tcpAddr := net.TCPAddrFromAddrPort(netip.AddrPortFrom(customIP, port)) + tcpLn, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + udpConn.Close() + t.Skip("cannot bind TCP on same port, skipping") + } + + addr := fmt.Sprintf("%s:%d", customIP, port) + svc.server.PacketConn = udpConn + svc.tcpServer.Listener = tcpLn + svc.listenIP = customIP + svc.listenPort = port + + go func() { + if err := svc.server.ActivateAndServe(); err != nil { + t.Logf("udp server: %v", err) + } + }() + go func() { + if err := svc.tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + svc.listenerIsRunning = true + + defer func() { + require.NoError(t, svc.Stop()) + }() + + q := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + // Test UDP query + udpClient := &dns.Client{Net: "udp", Timeout: 2 * time.Second} + udpResp, _, err := udpClient.Exchange(q, addr) + require.NoError(t, err, "UDP query should succeed") + require.NotNil(t, udpResp) + require.NotEmpty(t, udpResp.Answer) + assert.Contains(t, udpResp.Answer[0].String(), "192.0.2.1", "UDP response should contain expected IP") + + // Test TCP query + tcpClient := &dns.Client{Net: "tcp", Timeout: 2 * time.Second} + tcpResp, _, err := tcpClient.Exchange(q, addr) + require.NoError(t, err, "TCP query should succeed") + require.NotNil(t, tcpResp) + require.NotEmpty(t, tcpResp.Answer) + assert.Contains(t, tcpResp.Answer[0].String(), "192.0.2.1", "TCP response should contain expected IP") +} diff --git a/client/internal/dns/service_memory.go b/client/internal/dns/service_memory.go index 6ef0ab526..e8c036076 100644 --- a/client/internal/dns/service_memory.go +++ b/client/internal/dns/service_memory.go @@ -1,6 +1,7 @@ package dns import ( + "errors" "fmt" "net/netip" "sync" @@ -10,6 +11,7 @@ import ( "github.com/miekg/dns" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/client/iface" nbnet "github.com/netbirdio/netbird/client/net" ) @@ -18,7 +20,8 @@ type ServiceViaMemory struct { dnsMux *dns.ServeMux runtimeIP netip.Addr runtimePort int - udpFilterHookID string + tcpDNS *tcpDNSServer + tcpHookSet bool listenerIsRunning bool listenerFlagLock sync.Mutex } @@ -28,14 +31,13 @@ func NewServiceViaMemory(wgIface WGIface) *ServiceViaMemory { if err != nil { log.Errorf("get last ip from network: %v", err) } - s := &ServiceViaMemory{ + + return &ServiceViaMemory{ wgInterface: wgIface, dnsMux: dns.NewServeMux(), - runtimeIP: lastIP, runtimePort: DefaultPort, } - return s } func (s *ServiceViaMemory) Listen() error { @@ -46,10 +48,8 @@ func (s *ServiceViaMemory) Listen() error { return nil } - var err error - s.udpFilterHookID, err = s.filterDNSTraffic() - if err != nil { - return fmt.Errorf("filter dns traffice: %w", err) + if err := s.filterDNSTraffic(); err != nil { + return fmt.Errorf("filter dns traffic: %w", err) } s.listenerIsRunning = true @@ -57,19 +57,29 @@ func (s *ServiceViaMemory) Listen() error { return nil } -func (s *ServiceViaMemory) Stop() { +func (s *ServiceViaMemory) Stop() error { s.listenerFlagLock.Lock() defer s.listenerFlagLock.Unlock() if !s.listenerIsRunning { - return + return nil } - if err := s.wgInterface.GetFilter().RemovePacketHook(s.udpFilterHookID); err != nil { - log.Errorf("unable to remove DNS packet hook: %s", err) + filter := s.wgInterface.GetFilter() + if filter != nil { + filter.SetUDPPacketHook(s.runtimeIP, uint16(s.runtimePort), nil) + if s.tcpHookSet { + filter.SetTCPPacketHook(s.runtimeIP, uint16(s.runtimePort), nil) + } + } + + if s.tcpDNS != nil { + s.tcpDNS.Stop() } s.listenerIsRunning = false + + return nil } func (s *ServiceViaMemory) RegisterMux(pattern string, handler dns.Handler) { @@ -88,10 +98,18 @@ func (s *ServiceViaMemory) RuntimeIP() netip.Addr { return s.runtimeIP } -func (s *ServiceViaMemory) filterDNSTraffic() (string, error) { +func (s *ServiceViaMemory) filterDNSTraffic() error { filter := s.wgInterface.GetFilter() if filter == nil { - return "", fmt.Errorf("can't set DNS filter, filter not initialized") + return errors.New("DNS filter not initialized") + } + + // Create TCP DNS server lazily here since the device may not exist at construction time. + if s.tcpDNS == nil { + if dev := s.wgInterface.GetDevice(); dev != nil { + // MTU only affects TCP segment sizing; DNS messages are small so this has no practical impact. + s.tcpDNS = newTCPDNSServer(s.dnsMux, dev.Device, s.runtimeIP, uint16(s.runtimePort), iface.DefaultMTU) + } } firstLayerDecoder := layers.LayerTypeIPv4 @@ -100,12 +118,16 @@ func (s *ServiceViaMemory) filterDNSTraffic() (string, error) { } hook := func(packetData []byte) bool { - // Decode the packet packet := gopacket.NewPacket(packetData, firstLayerDecoder, gopacket.Default) - // Get the UDP layer udpLayer := packet.Layer(layers.LayerTypeUDP) - udp := udpLayer.(*layers.UDP) + if udpLayer == nil { + return true + } + udp, ok := udpLayer.(*layers.UDP) + if !ok { + return true + } msg := new(dns.Msg) if err := msg.Unpack(udp.Payload); err != nil { @@ -113,13 +135,30 @@ func (s *ServiceViaMemory) filterDNSTraffic() (string, error) { return true } - writer := responseWriter{ - packet: packet, - device: s.wgInterface.GetDevice().Device, + dev := s.wgInterface.GetDevice() + if dev == nil { + return true } - go s.dnsMux.ServeDNS(&writer, msg) + + writer := &responseWriter{ + remote: remoteAddrFromPacket(packet), + packet: packet, + device: dev.Device, + } + go s.dnsMux.ServeDNS(writer, msg) return true } - return filter.AddUDPPacketHook(false, s.runtimeIP, uint16(s.runtimePort), hook), nil + filter.SetUDPPacketHook(s.runtimeIP, uint16(s.runtimePort), hook) + + if s.tcpDNS != nil { + tcpHook := func(packetData []byte) bool { + s.tcpDNS.InjectPacket(packetData) + return true + } + filter.SetTCPPacketHook(s.runtimeIP, uint16(s.runtimePort), tcpHook) + s.tcpHookSet = true + } + + return nil } diff --git a/client/internal/dns/tcpstack.go b/client/internal/dns/tcpstack.go new file mode 100644 index 000000000..88e72e767 --- /dev/null +++ b/client/internal/dns/tcpstack.go @@ -0,0 +1,444 @@ +package dns + +import ( + "errors" + "fmt" + "io" + "net" + "net/netip" + "sync" + "sync/atomic" + "time" + + "github.com/miekg/dns" + log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/tun" + "gvisor.dev/gvisor/pkg/buffer" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" + "gvisor.dev/gvisor/pkg/waiter" +) + +const ( + dnsTCPReceiveWindow = 8192 + dnsTCPMaxInFlight = 16 + dnsTCPIdleTimeout = 30 * time.Second + dnsTCPReadTimeout = 5 * time.Second +) + +// tcpDNSServer is an on-demand TCP DNS server backed by a minimal gvisor stack. +// It is started lazily when a truncated DNS response is detected and shuts down +// after a period of inactivity to conserve resources. +type tcpDNSServer struct { + mu sync.Mutex + s *stack.Stack + ep *dnsEndpoint + mux *dns.ServeMux + tunDev tun.Device + ip netip.Addr + port uint16 + mtu uint16 + + running bool + closed bool + timerID uint64 + timer *time.Timer +} + +func newTCPDNSServer(mux *dns.ServeMux, tunDev tun.Device, ip netip.Addr, port uint16, mtu uint16) *tcpDNSServer { + return &tcpDNSServer{ + mux: mux, + tunDev: tunDev, + ip: ip, + port: port, + mtu: mtu, + } +} + +// InjectPacket ensures the stack is running and delivers a raw IP packet into +// the gvisor stack for TCP processing. Combining both operations under a single +// lock prevents a race where the idle timer could stop the stack between +// start and delivery. +func (t *tcpDNSServer) InjectPacket(payload []byte) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.closed { + return + } + + if !t.running { + if err := t.startLocked(); err != nil { + log.Errorf("failed to start TCP DNS stack: %v", err) + return + } + t.running = true + log.Debugf("TCP DNS stack started on %s:%d (triggered by %s)", t.ip, t.port, srcAddrFromPacket(payload)) + } + t.resetTimerLocked() + + ep := t.ep + if ep == nil || ep.dispatcher == nil { + return + } + + pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(payload), + }) + // DeliverNetworkPacket takes ownership of the packet buffer; do not DecRef. + ep.dispatcher.DeliverNetworkPacket(ipv4.ProtocolNumber, pkt) +} + +// Stop tears down the gvisor stack and releases resources permanently. +// After Stop, InjectPacket becomes a no-op. +func (t *tcpDNSServer) Stop() { + t.mu.Lock() + defer t.mu.Unlock() + + t.stopLocked() + t.closed = true +} + +func (t *tcpDNSServer) startLocked() error { + // TODO: add ipv6.NewProtocol when IPv6 overlay support lands. + s := stack.New(stack.Options{ + NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol}, + TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol}, + HandleLocal: false, + }) + + nicID := tcpip.NICID(1) + ep := &dnsEndpoint{ + tunDev: t.tunDev, + } + ep.mtu.Store(uint32(t.mtu)) + + if err := s.CreateNIC(nicID, ep); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("create NIC: %v", err) + } + + protoAddr := tcpip.ProtocolAddress{ + Protocol: ipv4.ProtocolNumber, + AddressWithPrefix: tcpip.AddressWithPrefix{ + Address: tcpip.AddrFromSlice(t.ip.AsSlice()), + PrefixLen: 32, + }, + } + if err := s.AddProtocolAddress(nicID, protoAddr, stack.AddressProperties{}); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("add protocol address: %s", err) + } + + if err := s.SetPromiscuousMode(nicID, true); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("set promiscuous mode: %s", err) + } + if err := s.SetSpoofing(nicID, true); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("set spoofing: %s", err) + } + + defaultSubnet, err := tcpip.NewSubnet( + tcpip.AddrFrom4([4]byte{0, 0, 0, 0}), + tcpip.MaskFromBytes([]byte{0, 0, 0, 0}), + ) + if err != nil { + s.Close() + s.Wait() + return fmt.Errorf("create default subnet: %w", err) + } + + s.SetRouteTable([]tcpip.Route{ + {Destination: defaultSubnet, NIC: nicID}, + }) + + tcpFwd := tcp.NewForwarder(s, dnsTCPReceiveWindow, dnsTCPMaxInFlight, func(r *tcp.ForwarderRequest) { + t.handleTCPDNS(r) + }) + s.SetTransportProtocolHandler(tcp.ProtocolNumber, tcpFwd.HandlePacket) + + t.s = s + t.ep = ep + return nil +} + +func (t *tcpDNSServer) stopLocked() { + if !t.running { + return + } + + if t.timer != nil { + t.timer.Stop() + t.timer = nil + } + + if t.s != nil { + t.s.Close() + t.s.Wait() + t.s = nil + } + t.ep = nil + t.running = false + + log.Debugf("TCP DNS stack stopped") +} + +func (t *tcpDNSServer) resetTimerLocked() { + if t.timer != nil { + t.timer.Stop() + } + t.timerID++ + id := t.timerID + t.timer = time.AfterFunc(dnsTCPIdleTimeout, func() { + t.mu.Lock() + defer t.mu.Unlock() + + // Only stop if this timer is still the active one. + // A racing InjectPacket may have replaced it. + if t.timerID != id { + return + } + t.stopLocked() + }) +} + +func (t *tcpDNSServer) handleTCPDNS(r *tcp.ForwarderRequest) { + id := r.ID() + + wq := waiter.Queue{} + ep, epErr := r.CreateEndpoint(&wq) + if epErr != nil { + log.Debugf("TCP DNS: failed to create endpoint: %v", epErr) + r.Complete(true) + return + } + r.Complete(false) + + conn := gonet.NewTCPConn(&wq, ep) + defer func() { + if err := conn.Close(); err != nil { + log.Tracef("TCP DNS: close conn: %v", err) + } + }() + + // Reset idle timer on activity + t.mu.Lock() + t.resetTimerLocked() + t.mu.Unlock() + + localAddr := &net.TCPAddr{ + IP: id.LocalAddress.AsSlice(), + Port: int(id.LocalPort), + } + remoteAddr := &net.TCPAddr{ + IP: id.RemoteAddress.AsSlice(), + Port: int(id.RemotePort), + } + + for { + if err := conn.SetReadDeadline(time.Now().Add(dnsTCPReadTimeout)); err != nil { + log.Debugf("TCP DNS: set deadline for %s: %v", remoteAddr, err) + break + } + + msg, err := readTCPDNSMessage(conn) + if err != nil { + if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) { + log.Debugf("TCP DNS: read from %s: %v", remoteAddr, err) + } + break + } + + writer := &tcpResponseWriter{ + conn: conn, + localAddr: localAddr, + remoteAddr: remoteAddr, + } + t.mux.ServeDNS(writer, msg) + } +} + +// dnsEndpoint implements stack.LinkEndpoint for writing packets back via the tun device. +type dnsEndpoint struct { + dispatcher stack.NetworkDispatcher + tunDev tun.Device + mtu atomic.Uint32 +} + +func (e *dnsEndpoint) Attach(dispatcher stack.NetworkDispatcher) { e.dispatcher = dispatcher } +func (e *dnsEndpoint) IsAttached() bool { return e.dispatcher != nil } +func (e *dnsEndpoint) MTU() uint32 { return e.mtu.Load() } +func (e *dnsEndpoint) Capabilities() stack.LinkEndpointCapabilities { return stack.CapabilityNone } +func (e *dnsEndpoint) MaxHeaderLength() uint16 { return 0 } +func (e *dnsEndpoint) LinkAddress() tcpip.LinkAddress { return "" } +func (e *dnsEndpoint) Wait() { /* no async work */ } +func (e *dnsEndpoint) ARPHardwareType() header.ARPHardwareType { return header.ARPHardwareNone } +func (e *dnsEndpoint) AddHeader(*stack.PacketBuffer) { /* IP-level endpoint, no link header */ } +func (e *dnsEndpoint) ParseHeader(*stack.PacketBuffer) bool { return true } +func (e *dnsEndpoint) Close() { /* lifecycle managed by tcpDNSServer */ } +func (e *dnsEndpoint) SetLinkAddress(tcpip.LinkAddress) { /* no link address for tun */ } +func (e *dnsEndpoint) SetMTU(mtu uint32) { e.mtu.Store(mtu) } +func (e *dnsEndpoint) SetOnCloseAction(func()) { /* not needed */ } + +const tunPacketOffset = 40 + +func (e *dnsEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { + var written int + for _, pkt := range pkts.AsSlice() { + data := stack.PayloadSince(pkt.NetworkHeader()) + if data == nil { + continue + } + + raw := data.AsSlice() + buf := make([]byte, tunPacketOffset, tunPacketOffset+len(raw)) + buf = append(buf, raw...) + data.Release() + + if _, err := e.tunDev.Write([][]byte{buf}, tunPacketOffset); err != nil { + log.Tracef("TCP DNS endpoint: failed to write packet: %v", err) + continue + } + written++ + } + return written, nil +} + +// tcpResponseWriter implements dns.ResponseWriter for TCP DNS connections. +type tcpResponseWriter struct { + conn *gonet.TCPConn + localAddr net.Addr + remoteAddr net.Addr +} + +func (w *tcpResponseWriter) LocalAddr() net.Addr { + return w.localAddr +} + +func (w *tcpResponseWriter) RemoteAddr() net.Addr { + return w.remoteAddr +} + +func (w *tcpResponseWriter) WriteMsg(msg *dns.Msg) error { + data, err := msg.Pack() + if err != nil { + return fmt.Errorf("pack: %w", err) + } + + // DNS TCP: 2-byte length prefix + message + buf := make([]byte, 2+len(data)) + buf[0] = byte(len(data) >> 8) + buf[1] = byte(len(data)) + copy(buf[2:], data) + + if _, err = w.conn.Write(buf); err != nil { + return err + } + return nil +} + +func (w *tcpResponseWriter) Write(data []byte) (int, error) { + buf := make([]byte, 2+len(data)) + buf[0] = byte(len(data) >> 8) + buf[1] = byte(len(data)) + copy(buf[2:], data) + if _, err := w.conn.Write(buf); err != nil { + return 0, err + } + return len(data), nil +} + +func (w *tcpResponseWriter) Close() error { + return w.conn.Close() +} + +func (w *tcpResponseWriter) TsigStatus() error { return nil } +func (w *tcpResponseWriter) TsigTimersOnly(bool) { /* TSIG not supported */ } +func (w *tcpResponseWriter) Hijack() { /* not supported */ } + +// readTCPDNSMessage reads a single DNS message from a TCP connection (length-prefixed). +func readTCPDNSMessage(conn *gonet.TCPConn) (*dns.Msg, error) { + // DNS over TCP uses a 2-byte length prefix + lenBuf := make([]byte, 2) + if _, err := io.ReadFull(conn, lenBuf); err != nil { + return nil, fmt.Errorf("read length: %w", err) + } + + msgLen := int(lenBuf[0])<<8 | int(lenBuf[1]) + if msgLen == 0 || msgLen > 65535 { + return nil, fmt.Errorf("invalid message length: %d", msgLen) + } + + msgBuf := make([]byte, msgLen) + if _, err := io.ReadFull(conn, msgBuf); err != nil { + return nil, fmt.Errorf("read message: %w", err) + } + + msg := new(dns.Msg) + if err := msg.Unpack(msgBuf); err != nil { + return nil, fmt.Errorf("unpack: %w", err) + } + return msg, nil +} + +// srcAddrFromPacket extracts the source IP:port from a raw IP+TCP packet for logging. +// Supports both IPv4 and IPv6. +func srcAddrFromPacket(pkt []byte) netip.AddrPort { + if len(pkt) == 0 { + return netip.AddrPort{} + } + + srcIP, transportOffset := srcIPFromPacket(pkt) + if !srcIP.IsValid() || len(pkt) < transportOffset+2 { + return netip.AddrPort{} + } + + srcPort := uint16(pkt[transportOffset])<<8 | uint16(pkt[transportOffset+1]) + return netip.AddrPortFrom(srcIP.Unmap(), srcPort) +} + +func srcIPFromPacket(pkt []byte) (netip.Addr, int) { + switch header.IPVersion(pkt) { + case 4: + return srcIPv4(pkt) + case 6: + return srcIPv6(pkt) + default: + return netip.Addr{}, 0 + } +} + +func srcIPv4(pkt []byte) (netip.Addr, int) { + if len(pkt) < header.IPv4MinimumSize { + return netip.Addr{}, 0 + } + hdr := header.IPv4(pkt) + src := hdr.SourceAddress() + ip, ok := netip.AddrFromSlice(src.AsSlice()) + if !ok { + return netip.Addr{}, 0 + } + return ip, int(hdr.HeaderLength()) +} + +func srcIPv6(pkt []byte) (netip.Addr, int) { + if len(pkt) < header.IPv6MinimumSize { + return netip.Addr{}, 0 + } + hdr := header.IPv6(pkt) + src := hdr.SourceAddress() + ip, ok := netip.AddrFromSlice(src.AsSlice()) + if !ok { + return netip.Addr{}, 0 + } + return ip, header.IPv6MinimumSize +} diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 5b8135132..746b73ca7 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -41,10 +41,61 @@ const ( reactivatePeriod = 30 * time.Second probeTimeout = 2 * time.Second + + // ipv6HeaderSize + udpHeaderSize, used to derive the maximum DNS UDP + // payload from the tunnel MTU. + ipUDPHeaderSize = 60 + 8 ) const testRecord = "com." +const ( + protoUDP = "udp" + protoTCP = "tcp" +) + +type dnsProtocolKey struct{} + +// contextWithDNSProtocol stores the inbound DNS protocol ("udp" or "tcp") in context. +func contextWithDNSProtocol(ctx context.Context, network string) context.Context { + return context.WithValue(ctx, dnsProtocolKey{}, network) +} + +// dnsProtocolFromContext retrieves the inbound DNS protocol from context. +func dnsProtocolFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + if v, ok := ctx.Value(dnsProtocolKey{}).(string); ok { + return v + } + return "" +} + +type upstreamProtocolKey struct{} + +// upstreamProtocolResult holds the protocol used for the upstream exchange. +// Stored as a pointer in context so the exchange function can set it. +type upstreamProtocolResult struct { + protocol string +} + +// contextWithupstreamProtocolResult stores a mutable result holder in the context. +func contextWithupstreamProtocolResult(ctx context.Context) (context.Context, *upstreamProtocolResult) { + r := &upstreamProtocolResult{} + return context.WithValue(ctx, upstreamProtocolKey{}, r), r +} + +// setUpstreamProtocol sets the upstream protocol on the result holder in context, if present. +func setUpstreamProtocol(ctx context.Context, protocol string) { + if ctx == nil { + return + } + if r, ok := ctx.Value(upstreamProtocolKey{}).(*upstreamProtocolResult); ok && r != nil { + r.protocol = protocol + } +} + type upstreamClient interface { exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error) } @@ -138,7 +189,16 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { return } - ok, failures := u.tryUpstreamServers(w, r, logger) + // Propagate inbound protocol so upstream exchange can use TCP directly + // when the request came in over TCP. + ctx := u.ctx + if addr := w.RemoteAddr(); addr != nil { + network := addr.Network() + ctx = contextWithDNSProtocol(ctx, network) + resutil.SetMeta(w, "protocol", network) + } + + ok, failures := u.tryUpstreamServers(ctx, w, r, logger) if len(failures) > 0 { u.logUpstreamFailures(r.Question[0].Name, failures, ok, logger) } @@ -153,7 +213,7 @@ func (u *upstreamResolverBase) prepareRequest(r *dns.Msg) { } } -func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) { +func (u *upstreamResolverBase) tryUpstreamServers(ctx context.Context, w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) { timeout := u.upstreamTimeout if len(u.upstreamServers) > 1 { maxTotal := 5 * time.Second @@ -168,7 +228,7 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M var failures []upstreamFailure for _, upstream := range u.upstreamServers { - if failure := u.queryUpstream(w, r, upstream, timeout, logger); failure != nil { + if failure := u.queryUpstream(ctx, w, r, upstream, timeout, logger); failure != nil { failures = append(failures, *failure) } else { return true, failures @@ -178,15 +238,17 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M } // queryUpstream queries a single upstream server. Returns nil on success, or failure info to try next upstream. -func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure { +func (u *upstreamResolverBase) queryUpstream(parentCtx context.Context, w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure { var rm *dns.Msg var t time.Duration var err error var startTime time.Time + var upstreamProto *upstreamProtocolResult func() { - ctx, cancel := context.WithTimeout(u.ctx, timeout) + ctx, cancel := context.WithTimeout(parentCtx, timeout) defer cancel() + ctx, upstreamProto = contextWithupstreamProtocolResult(ctx) startTime = time.Now() rm, t, err = u.upstreamClient.exchange(ctx, upstream.String(), r) }() @@ -203,7 +265,7 @@ func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, u return &upstreamFailure{upstream: upstream, reason: dns.RcodeToString[rm.Rcode]} } - u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, logger) + u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, upstreamProto, logger) return nil } @@ -220,10 +282,13 @@ func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.Add return &upstreamFailure{upstream: upstream, reason: reason} } -func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, logger *log.Entry) bool { +func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, upstreamProto *upstreamProtocolResult, logger *log.Entry) bool { u.successCount.Add(1) resutil.SetMeta(w, "upstream", upstream.String()) + if upstreamProto != nil && upstreamProto.protocol != "" { + resutil.SetMeta(w, "upstream_protocol", upstreamProto.protocol) + } // Clear Zero bit from external responses to prevent upstream servers from // manipulating our internal fallthrough signaling mechanism @@ -428,13 +493,42 @@ func (u *upstreamResolverBase) testNameserver(baseCtx context.Context, externalC return err } +// clientUDPMaxSize returns the maximum UDP response size the client accepts. +func clientUDPMaxSize(r *dns.Msg) int { + if opt := r.IsEdns0(); opt != nil { + return int(opt.UDPSize()) + } + return dns.MinMsgSize +} + // ExchangeWithFallback exchanges a DNS message with the upstream server. // It first tries to use UDP, and if it is truncated, it falls back to TCP. +// If the inbound request came over TCP (via context), it skips the UDP attempt. // If the passed context is nil, this will use Exchange instead of ExchangeContext. func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, upstream string) (*dns.Msg, time.Duration, error) { - // MTU - ip + udp headers - // Note: this could be sent out on an interface that is not ours, but higher MTU settings could break truncation handling. - client.UDPSize = uint16(currentMTU - (60 + 8)) + // If the request came in over TCP, go straight to TCP upstream. + if dnsProtocolFromContext(ctx) == protoTCP { + tcpClient := *client + tcpClient.Net = protoTCP + rm, t, err := tcpClient.ExchangeContext(ctx, r, upstream) + if err != nil { + return nil, t, fmt.Errorf("with tcp: %w", err) + } + setUpstreamProtocol(ctx, protoTCP) + return rm, t, nil + } + + clientMaxSize := clientUDPMaxSize(r) + + // Cap EDNS0 to our tunnel MTU so the upstream doesn't send a + // response larger than our read buffer. + // Note: the query could be sent out on an interface that is not ours, + // but higher MTU settings could break truncation handling. + maxUDPPayload := uint16(currentMTU - ipUDPHeaderSize) + client.UDPSize = maxUDPPayload + if opt := r.IsEdns0(); opt != nil && opt.UDPSize() > maxUDPPayload { + opt.SetUDPSize(maxUDPPayload) + } var ( rm *dns.Msg @@ -453,25 +547,32 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u } if rm == nil || !rm.MsgHdr.Truncated { + setUpstreamProtocol(ctx, protoUDP) return rm, t, nil } - log.Tracef("udp response for domain=%s type=%v class=%v is truncated, trying TCP.", - r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) + // TODO: if the upstream's truncated UDP response already contains more + // data than the client's buffer, we could truncate locally and skip + // the TCP retry. - client.Net = "tcp" + tcpClient := *client + tcpClient.Net = protoTCP if ctx == nil { - rm, t, err = client.Exchange(r, upstream) + rm, t, err = tcpClient.Exchange(r, upstream) } else { - rm, t, err = client.ExchangeContext(ctx, r, upstream) + rm, t, err = tcpClient.ExchangeContext(ctx, r, upstream) } if err != nil { return nil, t, fmt.Errorf("with tcp: %w", err) } - // TODO: once TCP is implemented, rm.Truncate() if the request came in over UDP + setUpstreamProtocol(ctx, protoTCP) + + if rm.Len() > clientMaxSize { + rm.Truncate(clientMaxSize) + } return rm, t, nil } @@ -479,18 +580,46 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u // ExchangeWithNetstack performs a DNS exchange using netstack for dialing. // This is needed when netstack is enabled to reach peer IPs through the tunnel. func ExchangeWithNetstack(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upstream string) (*dns.Msg, error) { - reply, err := netstackExchange(ctx, nsNet, r, upstream, "udp") + // If request came in over TCP, go straight to TCP upstream + if dnsProtocolFromContext(ctx) == protoTCP { + rm, err := netstackExchange(ctx, nsNet, r, upstream, protoTCP) + if err != nil { + return nil, err + } + setUpstreamProtocol(ctx, protoTCP) + return rm, nil + } + + clientMaxSize := clientUDPMaxSize(r) + + // Cap EDNS0 to our tunnel MTU so the upstream doesn't send a + // response larger than what we can read over UDP. + maxUDPPayload := uint16(currentMTU - ipUDPHeaderSize) + if opt := r.IsEdns0(); opt != nil && opt.UDPSize() > maxUDPPayload { + opt.SetUDPSize(maxUDPPayload) + } + + reply, err := netstackExchange(ctx, nsNet, r, upstream, protoUDP) if err != nil { return nil, err } - // If response is truncated, retry with TCP if reply != nil && reply.MsgHdr.Truncated { - log.Tracef("udp response for domain=%s type=%v class=%v is truncated, trying TCP", - r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) - return netstackExchange(ctx, nsNet, r, upstream, "tcp") + rm, err := netstackExchange(ctx, nsNet, r, upstream, protoTCP) + if err != nil { + return nil, err + } + + setUpstreamProtocol(ctx, protoTCP) + if rm.Len() > clientMaxSize { + rm.Truncate(clientMaxSize) + } + + return rm, nil } + setUpstreamProtocol(ctx, protoUDP) + return reply, nil } @@ -511,7 +640,7 @@ func netstackExchange(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upst } } - dnsConn := &dns.Conn{Conn: conn} + dnsConn := &dns.Conn{Conn: conn, UDPSize: uint16(currentMTU - ipUDPHeaderSize)} if err := dnsConn.WriteMsg(r); err != nil { return nil, fmt.Errorf("write %s message: %w", network, err) diff --git a/client/internal/dns/upstream_android.go b/client/internal/dns/upstream_android.go index d7cff377b..ee1ca42fe 100644 --- a/client/internal/dns/upstream_android.go +++ b/client/internal/dns/upstream_android.go @@ -51,7 +51,7 @@ func (u *upstreamResolver) exchangeWithinVPN(ctx context.Context, upstream strin upstreamExchangeClient := &dns.Client{ Timeout: ClientTimeout, } - return upstreamExchangeClient.ExchangeContext(ctx, r, upstream) + return ExchangeWithFallback(ctx, upstreamExchangeClient, r, upstream) } // exchangeWithoutVPN protect the UDP socket by Android SDK to avoid to goes through the VPN @@ -76,7 +76,7 @@ func (u *upstreamResolver) exchangeWithoutVPN(ctx context.Context, upstream stri Timeout: timeout, } - return upstreamExchangeClient.ExchangeContext(ctx, r, upstream) + return ExchangeWithFallback(ctx, upstreamExchangeClient, r, upstream) } func (u *upstreamResolver) isLocalResolver(upstream string) bool { diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go index ab164c30b..1797fdad8 100644 --- a/client/internal/dns/upstream_test.go +++ b/client/internal/dns/upstream_test.go @@ -475,3 +475,298 @@ func TestFormatFailures(t *testing.T) { }) } } + +func TestDNSProtocolContext(t *testing.T) { + t.Run("roundtrip udp", func(t *testing.T) { + ctx := contextWithDNSProtocol(context.Background(), protoUDP) + assert.Equal(t, protoUDP, dnsProtocolFromContext(ctx)) + }) + + t.Run("roundtrip tcp", func(t *testing.T) { + ctx := contextWithDNSProtocol(context.Background(), protoTCP) + assert.Equal(t, protoTCP, dnsProtocolFromContext(ctx)) + }) + + t.Run("missing returns empty", func(t *testing.T) { + assert.Equal(t, "", dnsProtocolFromContext(context.Background())) + }) +} + +func TestExchangeWithFallback_TCPContext(t *testing.T) { + // Start a local DNS server that responds on TCP only + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + tcpServer := &dns.Server{ + Addr: "127.0.0.1:0", + Net: "tcp", + Handler: tcpHandler, + } + + tcpLn, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + tcpServer.Listener = tcpLn + + go func() { + if err := tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + defer func() { + _ = tcpServer.Shutdown() + }() + + upstream := tcpLn.Addr().String() + + // With TCP context, should connect directly via TCP without trying UDP + ctx := contextWithDNSProtocol(context.Background(), protoTCP) + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + rm, _, err := ExchangeWithFallback(ctx, client, r, upstream) + require.NoError(t, err) + require.NotNil(t, rm) + require.NotEmpty(t, rm.Answer) + assert.Contains(t, rm.Answer[0].String(), "10.0.0.1") +} + +func TestExchangeWithFallback_UDPFallbackToTCP(t *testing.T) { + // UDP handler returns a truncated response to trigger TCP retry. + udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Truncated = true + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + // TCP handler returns the full answer. + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.3"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + udpPC, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + addr := udpPC.LocalAddr().String() + + udpServer := &dns.Server{ + PacketConn: udpPC, + Net: "udp", + Handler: udpHandler, + } + + tcpLn, err := net.Listen("tcp", addr) + require.NoError(t, err) + + tcpServer := &dns.Server{ + Listener: tcpLn, + Net: "tcp", + Handler: tcpHandler, + } + + go func() { + if err := udpServer.ActivateAndServe(); err != nil { + t.Logf("udp server: %v", err) + } + }() + go func() { + if err := tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + defer func() { + _ = udpServer.Shutdown() + _ = tcpServer.Shutdown() + }() + + ctx := context.Background() + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + rm, _, err := ExchangeWithFallback(ctx, client, r, addr) + require.NoError(t, err, "should fall back to TCP after truncated UDP response") + require.NotNil(t, rm) + require.NotEmpty(t, rm.Answer, "TCP response should contain the full answer") + assert.Contains(t, rm.Answer[0].String(), "10.0.0.3") + assert.False(t, rm.Truncated, "TCP response should not be truncated") +} + +func TestExchangeWithFallback_TCPContextSkipsUDP(t *testing.T) { + // Start only a TCP server (no UDP). With TCP context it should succeed. + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.2"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + tcpLn, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + tcpServer := &dns.Server{ + Listener: tcpLn, + Net: "tcp", + Handler: tcpHandler, + } + + go func() { + if err := tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + defer func() { + _ = tcpServer.Shutdown() + }() + + upstream := tcpLn.Addr().String() + + // TCP context: should skip UDP entirely and go directly to TCP + ctx := contextWithDNSProtocol(context.Background(), protoTCP) + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + rm, _, err := ExchangeWithFallback(ctx, client, r, upstream) + require.NoError(t, err) + require.NotNil(t, rm) + require.NotEmpty(t, rm.Answer) + assert.Contains(t, rm.Answer[0].String(), "10.0.0.2") + + // Without TCP context, trying to reach a TCP-only server via UDP should fail + ctx2 := context.Background() + client2 := &dns.Client{Timeout: 500 * time.Millisecond} + _, _, err = ExchangeWithFallback(ctx2, client2, r, upstream) + assert.Error(t, err, "should fail when no UDP server and no TCP context") +} + +func TestExchangeWithFallback_EDNS0Capped(t *testing.T) { + // Verify that a client EDNS0 larger than our MTU-derived limit gets + // capped in the outgoing request so the upstream doesn't send a + // response larger than our read buffer. + var receivedUDPSize uint16 + udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + if opt := r.IsEdns0(); opt != nil { + receivedUDPSize = opt.UDPSize() + } + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + udpPC, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + addr := udpPC.LocalAddr().String() + + udpServer := &dns.Server{PacketConn: udpPC, Net: "udp", Handler: udpHandler} + go func() { _ = udpServer.ActivateAndServe() }() + t.Cleanup(func() { _ = udpServer.Shutdown() }) + + ctx := context.Background() + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + r.SetEdns0(4096, false) + + rm, _, err := ExchangeWithFallback(ctx, client, r, addr) + require.NoError(t, err) + require.NotNil(t, rm) + + expectedMax := uint16(currentMTU - ipUDPHeaderSize) + assert.Equal(t, expectedMax, receivedUDPSize, + "upstream should see capped EDNS0, not the client's 4096") +} + +func TestExchangeWithFallback_TCPTruncatesToClientSize(t *testing.T) { + // When the client advertises a large EDNS0 (4096) and the upstream + // truncates, the TCP response should NOT be truncated since the full + // answer fits within the client's original buffer. + udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Truncated = true + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + // Add enough records to exceed MTU but fit within 4096 + for i := range 20 { + m.Answer = append(m.Answer, &dns.TXT{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 60}, + Txt: []string{fmt.Sprintf("record-%d-padding-data-to-make-it-longer", i)}, + }) + } + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + udpPC, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + addr := udpPC.LocalAddr().String() + + udpServer := &dns.Server{PacketConn: udpPC, Net: "udp", Handler: udpHandler} + tcpLn, err := net.Listen("tcp", addr) + require.NoError(t, err) + tcpServer := &dns.Server{Listener: tcpLn, Net: "tcp", Handler: tcpHandler} + + go func() { _ = udpServer.ActivateAndServe() }() + go func() { _ = tcpServer.ActivateAndServe() }() + t.Cleanup(func() { + _ = udpServer.Shutdown() + _ = tcpServer.Shutdown() + }) + + ctx := context.Background() + client := &dns.Client{Timeout: 2 * time.Second} + + // Client with large buffer: should get all records without truncation + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeTXT) + r.SetEdns0(4096, false) + + rm, _, err := ExchangeWithFallback(ctx, client, r, addr) + require.NoError(t, err) + require.NotNil(t, rm) + assert.Len(t, rm.Answer, 20, "large EDNS0 client should get all records") + assert.False(t, rm.Truncated, "response should not be truncated for large buffer client") + + // Client with small buffer: should get truncated response + r2 := new(dns.Msg).SetQuestion("example.com.", dns.TypeTXT) + r2.SetEdns0(512, false) + + rm2, _, err := ExchangeWithFallback(ctx, &dns.Client{Timeout: 2 * time.Second}, r2, addr) + require.NoError(t, err) + require.NotNil(t, rm2) + assert.Less(t, len(rm2.Answer), 20, "small EDNS0 client should get fewer records") + assert.True(t, rm2.Truncated, "response should be truncated for small buffer client") +} diff --git a/client/internal/dnsfwd/forwarder.go b/client/internal/dnsfwd/forwarder.go index 5c7cb31fc..2e8ef84ab 100644 --- a/client/internal/dnsfwd/forwarder.go +++ b/client/internal/dnsfwd/forwarder.go @@ -237,8 +237,8 @@ func (f *DNSForwarder) writeResponse(logger *log.Entry, w dns.ResponseWriter, re return } - logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s", - qname, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime)) + logger.Tracef("response: domain=%s rcode=%s answers=%s size=%dB took=%s", + qname, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), resp.Len(), time.Since(startTime)) } // udpResponseWriter wraps a dns.ResponseWriter to handle UDP-specific truncation. @@ -263,20 +263,28 @@ func (u *udpResponseWriter) WriteMsg(resp *dns.Msg) error { func (f *DNSForwarder) handleDNSQueryUDP(w dns.ResponseWriter, query *dns.Msg) { startTime := time.Now() - logger := log.WithFields(log.Fields{ + fields := log.Fields{ "request_id": resutil.GenerateRequestID(), "dns_id": fmt.Sprintf("%04x", query.Id), - }) + } + if addr := w.RemoteAddr(); addr != nil { + fields["client"] = addr.String() + } + logger := log.WithFields(fields) f.handleDNSQuery(logger, &udpResponseWriter{ResponseWriter: w, query: query}, query, startTime) } func (f *DNSForwarder) handleDNSQueryTCP(w dns.ResponseWriter, query *dns.Msg) { startTime := time.Now() - logger := log.WithFields(log.Fields{ + fields := log.Fields{ "request_id": resutil.GenerateRequestID(), "dns_id": fmt.Sprintf("%04x", query.Id), - }) + } + if addr := w.RemoteAddr(); addr != nil { + fields["client"] = addr.String() + } + logger := log.WithFields(fields) f.handleDNSQuery(logger, w, query, startTime) } diff --git a/client/internal/engine.go b/client/internal/engine.go index 6c7beb32f..1303e04f3 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -521,6 +521,11 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) return err } + // Inject firewall into DNS server now that it's available. + // The DNS server is created before the firewall because the route manager + // depends on the DNS server, and the firewall depends on the wg interface. + e.dnsServer.SetFirewall(e.firewall) + e.udpMux, err = e.wgInterface.Up() if err != nil { log.Errorf("failed to pull up wgInterface [%s]: %s", e.wgInterface.Name(), err.Error()) From e2c2f64be711936f21050cbcfaa0142f34401cfb Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 8 Apr 2026 08:43:48 +0200 Subject: [PATCH 285/374] [client] Fix iOS DNS upstream routing for deselected exit nodes (#5803) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add GetSelectedClientRoutes() to the route manager that filters through FilterSelectedExitNodes, returning only active routes instead of all management routes - Use GetSelectedClientRoutes() in the DNS route checker so deselected exit nodes' 0.0.0.0/0 no longer matches upstream DNS IPs — this prevented the resolver from switching away from the utun-bound socket after exit node deselection - Initialize iOS DNS server with host DNS fallback addresses (1.1.1.1:53, 1.0.0.1:53) and a permanent root zone handler, matching Android's behavior — without this, unmatched DNS queries arriving via the 0.0.0.0/0 tunnel route had no handler and were silently dropped --- client/internal/connect.go | 2 ++ client/internal/dns/server.go | 5 +++++ client/internal/engine.go | 2 +- client/internal/routemanager/notifier/notifier_ios.go | 1 - client/ios/NetBirdSDK/client.go | 6 +++++- 5 files changed, 13 insertions(+), 3 deletions(-) diff --git a/client/internal/connect.go b/client/internal/connect.go index ab12cfab3..bc2bd84d9 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -111,6 +111,7 @@ func (c *ConnectClient) RunOniOS( fileDescriptor int32, networkChangeListener listener.NetworkChangeListener, dnsManager dns.IosDnsManager, + dnsAddresses []netip.AddrPort, stateFilePath string, ) error { // Set GC percent to 5% to reduce memory usage as iOS only allows 50MB of memory for the extension. @@ -120,6 +121,7 @@ func (c *ConnectClient) RunOniOS( FileDescriptor: fileDescriptor, NetworkChangeListener: networkChangeListener, DnsManager: dnsManager, + HostDNSAddresses: dnsAddresses, StateFilePath: stateFilePath, } return c.run(mobileDependency, nil, "") diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index d4fda5db3..f7865047b 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -187,11 +187,16 @@ func NewDefaultServerIos( ctx context.Context, wgInterface WGIface, iosDnsManager IosDnsManager, + hostsDnsList []netip.AddrPort, statusRecorder *peer.Status, disableSys bool, ) *DefaultServer { + log.Debugf("iOS host dns address list is: %v", hostsDnsList) ds := newDefaultServer(ctx, wgInterface, NewServiceViaMemory(wgInterface), statusRecorder, nil, disableSys) ds.iosDnsManager = iosDnsManager + ds.hostsDNSHolder.set(hostsDnsList) + ds.permanent = true + ds.addHostRootZone() return ds } diff --git a/client/internal/engine.go b/client/internal/engine.go index 1303e04f3..53cac9b20 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -1805,7 +1805,7 @@ func (e *Engine) newDnsServer(dnsConfig *nbdns.Config) (dns.Server, error) { return dnsServer, nil case "ios": - dnsServer := dns.NewDefaultServerIos(e.ctx, e.wgInterface, e.mobileDep.DnsManager, e.statusRecorder, e.config.DisableDNS) + dnsServer := dns.NewDefaultServerIos(e.ctx, e.wgInterface, e.mobileDep.DnsManager, e.mobileDep.HostDNSAddresses, e.statusRecorder, e.config.DisableDNS) return dnsServer, nil default: diff --git a/client/internal/routemanager/notifier/notifier_ios.go b/client/internal/routemanager/notifier/notifier_ios.go index bb125cfa4..343d2799e 100644 --- a/client/internal/routemanager/notifier/notifier_ios.go +++ b/client/internal/routemanager/notifier/notifier_ios.go @@ -53,7 +53,6 @@ func (n *Notifier) OnNewPrefixes(prefixes []netip.Prefix) { n.currentPrefixes = newNets n.notify() } - func (n *Notifier) notify() { n.listenerMux.Lock() defer n.listenerMux.Unlock() diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index 3e2da7f4e..043673904 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -161,7 +161,11 @@ func (c *Client) Run(fd int32, interfaceName string, envList *EnvList) error { cfg.WgIface = interfaceName c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder) - return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, c.stateFile) + hostDNS := []netip.AddrPort{ + netip.MustParseAddrPort("9.9.9.9:53"), + netip.MustParseAddrPort("149.112.112.112:53"), + } + return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, hostDNS, c.stateFile) } // Stop the internal client and free the resources From d33cd4c95b87086cf907bd87e6840418c5b109bf Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 8 Apr 2026 15:29:32 +0800 Subject: [PATCH 286/374] [client] Add NAT-PMP/UPnP support (#5202) --- client/internal/dns/local/local_test.go | 8 +- client/internal/engine.go | 75 +++--- client/internal/peer/conn.go | 50 ++-- client/internal/peer/worker_ice.go | 99 +++++++- client/internal/portforward/env.go | 26 ++ client/internal/portforward/manager.go | 250 ++++++++++++++++++++ client/internal/portforward/manager_js.go | 36 +++ client/internal/portforward/manager_test.go | 159 +++++++++++++ client/internal/portforward/state.go | 50 ++++ client/server/state_generic.go | 5 + client/server/state_linux.go | 5 + go.mod | 4 + go.sum | 8 + 13 files changed, 716 insertions(+), 59 deletions(-) create mode 100644 client/internal/portforward/env.go create mode 100644 client/internal/portforward/manager.go create mode 100644 client/internal/portforward/manager_js.go create mode 100644 client/internal/portforward/manager_test.go create mode 100644 client/internal/portforward/state.go diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go index 73f70035f..2c6b7dbc3 100644 --- a/client/internal/dns/local/local_test.go +++ b/client/internal/dns/local/local_test.go @@ -1263,9 +1263,9 @@ func TestLocalResolver_AuthoritativeFlag(t *testing.T) { }) } -// TestLocalResolver_Stop tests cleanup on Stop +// TestLocalResolver_Stop tests cleanup on GracefullyStop func TestLocalResolver_Stop(t *testing.T) { - t.Run("Stop clears all state", func(t *testing.T) { + t.Run("GracefullyStop clears all state", func(t *testing.T) { resolver := NewResolver() resolver.Update([]nbdns.CustomZone{{ Domain: "example.com.", @@ -1285,7 +1285,7 @@ func TestLocalResolver_Stop(t *testing.T) { assert.False(t, resolver.isInManagedZone("host.example.com.")) }) - t.Run("Stop is safe to call multiple times", func(t *testing.T) { + t.Run("GracefullyStop is safe to call multiple times", func(t *testing.T) { resolver := NewResolver() resolver.Update([]nbdns.CustomZone{{ Domain: "example.com.", @@ -1299,7 +1299,7 @@ func TestLocalResolver_Stop(t *testing.T) { resolver.Stop() }) - t.Run("Stop cancels in-flight external resolution", func(t *testing.T) { + t.Run("GracefullyStop cancels in-flight external resolution", func(t *testing.T) { resolver := NewResolver() lookupStarted := make(chan struct{}) diff --git a/client/internal/engine.go b/client/internal/engine.go index 53cac9b20..be2d8bbf3 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -46,6 +46,7 @@ import ( "github.com/netbirdio/netbird/client/internal/peer/guard" icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" "github.com/netbirdio/netbird/client/internal/peerstore" + "github.com/netbirdio/netbird/client/internal/portforward" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/relay" "github.com/netbirdio/netbird/client/internal/rosenpass" @@ -210,9 +211,10 @@ type Engine struct { // checks are the client-applied posture checks that need to be evaluated on the client checks []*mgmProto.Checks - relayManager *relayClient.Manager - stateManager *statemanager.Manager - srWatcher *guard.SRWatcher + relayManager *relayClient.Manager + stateManager *statemanager.Manager + portForwardManager *portforward.Manager + srWatcher *guard.SRWatcher // Sync response persistence (protected by syncRespMux) syncRespMux sync.RWMutex @@ -259,26 +261,27 @@ func NewEngine( mobileDep MobileDependency, ) *Engine { engine := &Engine{ - clientCtx: clientCtx, - clientCancel: clientCancel, - signal: services.SignalClient, - signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey), - mgmClient: services.MgmClient, - relayManager: services.RelayManager, - peerStore: peerstore.NewConnStore(), - syncMsgMux: &sync.Mutex{}, - config: config, - mobileDep: mobileDep, - STUNs: []*stun.URI{}, - TURNs: []*stun.URI{}, - networkSerial: 0, - statusRecorder: services.StatusRecorder, - stateManager: services.StateManager, - checks: services.Checks, - probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), - jobExecutor: jobexec.NewExecutor(), - clientMetrics: services.ClientMetrics, - updateManager: services.UpdateManager, + clientCtx: clientCtx, + clientCancel: clientCancel, + signal: services.SignalClient, + signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey), + mgmClient: services.MgmClient, + relayManager: services.RelayManager, + peerStore: peerstore.NewConnStore(), + syncMsgMux: &sync.Mutex{}, + config: config, + mobileDep: mobileDep, + STUNs: []*stun.URI{}, + TURNs: []*stun.URI{}, + networkSerial: 0, + statusRecorder: services.StatusRecorder, + stateManager: services.StateManager, + portForwardManager: portforward.NewManager(), + checks: services.Checks, + probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), + jobExecutor: jobexec.NewExecutor(), + clientMetrics: services.ClientMetrics, + updateManager: services.UpdateManager, } log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String()) @@ -537,6 +540,13 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) // conntrack entries from being created before the rules are in place e.setupWGProxyNoTrack() + // Start after interface is up since port may have been resolved from 0 or changed if occupied + e.shutdownWg.Add(1) + go func() { + defer e.shutdownWg.Done() + e.portForwardManager.Start(e.ctx, uint16(e.config.WgPort)) + }() + // Set the WireGuard interface for rosenpass after interface is up if e.rpManager != nil { e.rpManager.SetInterface(e.wgInterface) @@ -1540,12 +1550,13 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV } serviceDependencies := peer.ServiceDependencies{ - StatusRecorder: e.statusRecorder, - Signaler: e.signaler, - IFaceDiscover: e.mobileDep.IFaceDiscover, - RelayManager: e.relayManager, - SrWatcher: e.srWatcher, - MetricsRecorder: e.clientMetrics, + StatusRecorder: e.statusRecorder, + Signaler: e.signaler, + IFaceDiscover: e.mobileDep.IFaceDiscover, + RelayManager: e.relayManager, + SrWatcher: e.srWatcher, + PortForwardManager: e.portForwardManager, + MetricsRecorder: e.clientMetrics, } peerConn, err := peer.NewConn(config, serviceDependencies) if err != nil { @@ -1702,6 +1713,12 @@ func (e *Engine) close() { if e.rpManager != nil { _ = e.rpManager.Close() } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := e.portForwardManager.GracefullyStop(ctx); err != nil { + log.Warnf("failed to gracefully stop port forwarding manager: %s", err) + } } func (e *Engine) readInitialSettings() ([]*route.Route, *nbdns.Config, bool, error) { diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index bea0725f2..8d1585b3f 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -22,6 +22,7 @@ import ( icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" "github.com/netbirdio/netbird/client/internal/peer/id" "github.com/netbirdio/netbird/client/internal/peer/worker" + "github.com/netbirdio/netbird/client/internal/portforward" "github.com/netbirdio/netbird/client/internal/stdnet" "github.com/netbirdio/netbird/route" relayClient "github.com/netbirdio/netbird/shared/relay/client" @@ -45,6 +46,7 @@ type ServiceDependencies struct { RelayManager *relayClient.Manager SrWatcher *guard.SRWatcher PeerConnDispatcher *dispatcher.ConnectionDispatcher + PortForwardManager *portforward.Manager MetricsRecorder MetricsRecorder } @@ -87,16 +89,17 @@ type ConnConfig struct { } type Conn struct { - Log *log.Entry - mu sync.Mutex - ctx context.Context - ctxCancel context.CancelFunc - config ConnConfig - statusRecorder *Status - signaler *Signaler - iFaceDiscover stdnet.ExternalIFaceDiscover - relayManager *relayClient.Manager - srWatcher *guard.SRWatcher + Log *log.Entry + mu sync.Mutex + ctx context.Context + ctxCancel context.CancelFunc + config ConnConfig + statusRecorder *Status + signaler *Signaler + iFaceDiscover stdnet.ExternalIFaceDiscover + relayManager *relayClient.Manager + srWatcher *guard.SRWatcher + portForwardManager *portforward.Manager onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) onDisconnected func(remotePeer string) @@ -145,19 +148,20 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { dumpState := newStateDump(config.Key, connLog, services.StatusRecorder) var conn = &Conn{ - Log: connLog, - config: config, - statusRecorder: services.StatusRecorder, - signaler: services.Signaler, - iFaceDiscover: services.IFaceDiscover, - relayManager: services.RelayManager, - srWatcher: services.SrWatcher, - statusRelay: worker.NewAtomicStatus(), - statusICE: worker.NewAtomicStatus(), - dumpState: dumpState, - endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)), - wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState), - metricsRecorder: services.MetricsRecorder, + Log: connLog, + config: config, + statusRecorder: services.StatusRecorder, + signaler: services.Signaler, + iFaceDiscover: services.IFaceDiscover, + relayManager: services.RelayManager, + srWatcher: services.SrWatcher, + portForwardManager: services.PortForwardManager, + statusRelay: worker.NewAtomicStatus(), + statusICE: worker.NewAtomicStatus(), + dumpState: dumpState, + endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)), + wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState), + metricsRecorder: services.MetricsRecorder, } return conn, nil diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index edd70fb20..29bf5aaaa 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/peer/conntype" icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" + "github.com/netbirdio/netbird/client/internal/portforward" "github.com/netbirdio/netbird/client/internal/stdnet" "github.com/netbirdio/netbird/route" ) @@ -61,6 +62,9 @@ type WorkerICE struct { // we record the last known state of the ICE agent to avoid duplicate on disconnected events lastKnownState ice.ConnectionState + + // portForwardAttempted tracks if we've already tried port forwarding this session + portForwardAttempted bool } func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, conn *Conn, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, hasRelayOnLocally bool) (*WorkerICE, error) { @@ -214,6 +218,8 @@ func (w *WorkerICE) Close() { } func (w *WorkerICE) reCreateAgent(dialerCancel context.CancelFunc, candidates []ice.CandidateType) (*icemaker.ThreadSafeAgent, error) { + w.portForwardAttempted = false + agent, err := icemaker.NewAgent(w.ctx, w.iFaceDiscover, w.config.ICEConfig, candidates, w.localUfrag, w.localPwd) if err != nil { return nil, fmt.Errorf("create agent: %w", err) @@ -370,6 +376,93 @@ func (w *WorkerICE) onICECandidate(candidate ice.Candidate) { w.log.Errorf("failed signaling candidate to the remote peer %s %s", w.config.Key, err) } }() + + if candidate.Type() == ice.CandidateTypeServerReflexive { + w.injectPortForwardedCandidate(candidate) + } +} + +// injectPortForwardedCandidate signals an additional candidate using the pre-created port mapping. +func (w *WorkerICE) injectPortForwardedCandidate(srflxCandidate ice.Candidate) { + pfManager := w.conn.portForwardManager + if pfManager == nil { + return + } + + mapping := pfManager.GetMapping() + if mapping == nil { + return + } + + w.muxAgent.Lock() + if w.portForwardAttempted { + w.muxAgent.Unlock() + return + } + w.portForwardAttempted = true + w.muxAgent.Unlock() + + forwardedCandidate, err := w.createForwardedCandidate(srflxCandidate, mapping) + if err != nil { + w.log.Warnf("create forwarded candidate: %v", err) + return + } + + w.log.Debugf("injecting port-forwarded candidate: %s (mapping: %d -> %d via %s, priority: %d)", + forwardedCandidate.String(), mapping.InternalPort, mapping.ExternalPort, mapping.NATType, forwardedCandidate.Priority()) + + go func() { + if err := w.signaler.SignalICECandidate(forwardedCandidate, w.config.Key); err != nil { + w.log.Errorf("signal port-forwarded candidate: %v", err) + } + }() +} + +// createForwardedCandidate creates a new server reflexive candidate with the forwarded port. +// It uses the NAT gateway's external IP with the forwarded port. +func (w *WorkerICE) createForwardedCandidate(srflxCandidate ice.Candidate, mapping *portforward.Mapping) (ice.Candidate, error) { + var externalIP string + if mapping.ExternalIP != nil && !mapping.ExternalIP.IsUnspecified() { + externalIP = mapping.ExternalIP.String() + } else { + // Fallback to STUN-discovered address if NAT didn't provide external IP + externalIP = srflxCandidate.Address() + } + + // Per RFC 8445, the related address for srflx is the base (host candidate address). + // If the original srflx has unspecified related address, use its own address as base. + relAddr := srflxCandidate.RelatedAddress().Address + if relAddr == "" || relAddr == "0.0.0.0" || relAddr == "::" { + relAddr = srflxCandidate.Address() + } + + // Arbitrary +1000 boost on top of RFC 8445 priority to favor port-forwarded candidates + // over regular srflx during ICE connectivity checks. + priority := srflxCandidate.Priority() + 1000 + + candidate, err := ice.NewCandidateServerReflexive(&ice.CandidateServerReflexiveConfig{ + Network: srflxCandidate.NetworkType().String(), + Address: externalIP, + Port: int(mapping.ExternalPort), + Component: srflxCandidate.Component(), + Priority: priority, + RelAddr: relAddr, + RelPort: int(mapping.InternalPort), + }) + if err != nil { + return nil, fmt.Errorf("create candidate: %w", err) + } + + for _, e := range srflxCandidate.Extensions() { + if e.Key == ice.ExtensionKeyCandidateID { + e.Value = srflxCandidate.ID() + } + if err := candidate.AddExtension(e); err != nil { + return nil, fmt.Errorf("add extension: %w", err) + } + } + + return candidate, nil } func (w *WorkerICE) onICESelectedCandidatePair(agent *icemaker.ThreadSafeAgent, c1, c2 ice.Candidate) { @@ -411,10 +504,10 @@ func (w *WorkerICE) logSuccessfulPaths(agent *icemaker.ThreadSafeAgent) { if !lok || !rok { continue } - w.log.Debugf("successful ICE path %s: [%s %s %s] <-> [%s %s %s] rtt=%.3fms", + w.log.Debugf("successful ICE path %s: [%s %s %s:%d] <-> [%s %s %s:%d] rtt=%.3fms", sessionID, - local.NetworkType(), local.Type(), local.Address(), - remote.NetworkType(), remote.Type(), remote.Address(), + local.NetworkType(), local.Type(), local.Address(), local.Port(), + remote.NetworkType(), remote.Type(), remote.Address(), remote.Port(), stat.CurrentRoundTripTime*1000) } } diff --git a/client/internal/portforward/env.go b/client/internal/portforward/env.go new file mode 100644 index 000000000..444a6b478 --- /dev/null +++ b/client/internal/portforward/env.go @@ -0,0 +1,26 @@ +package portforward + +import ( + "os" + "strconv" + + log "github.com/sirupsen/logrus" +) + +const ( + envDisableNATMapper = "NB_DISABLE_NAT_MAPPER" +) + +func isDisabledByEnv() bool { + val := os.Getenv(envDisableNATMapper) + if val == "" { + return false + } + + disabled, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", envDisableNATMapper, err) + return false + } + return disabled +} diff --git a/client/internal/portforward/manager.go b/client/internal/portforward/manager.go new file mode 100644 index 000000000..019c2ad86 --- /dev/null +++ b/client/internal/portforward/manager.go @@ -0,0 +1,250 @@ +//go:build !js + +package portforward + +import ( + "context" + "fmt" + "net" + "sync" + "time" + + "github.com/libp2p/go-nat" + log "github.com/sirupsen/logrus" +) + +const ( + defaultMappingTTL = 2 * time.Hour + renewalInterval = defaultMappingTTL / 2 + discoveryTimeout = 10 * time.Second + mappingDescription = "NetBird" +) + +type Mapping struct { + Protocol string + InternalPort uint16 + ExternalPort uint16 + ExternalIP net.IP + NATType string +} + +type Manager struct { + cancel context.CancelFunc + + mapping *Mapping + mappingLock sync.Mutex + + wgPort uint16 + + done chan struct{} + stopCtx chan context.Context + + // protect exported functions + mu sync.Mutex +} + +func NewManager() *Manager { + return &Manager{ + stopCtx: make(chan context.Context, 1), + } +} + +func (m *Manager) Start(ctx context.Context, wgPort uint16) { + m.mu.Lock() + if m.cancel != nil { + m.mu.Unlock() + return + } + + if isDisabledByEnv() { + log.Infof("NAT port mapper disabled via %s", envDisableNATMapper) + m.mu.Unlock() + return + } + + if wgPort == 0 { + log.Warnf("invalid WireGuard port 0; NAT mapping disabled") + m.mu.Unlock() + return + } + m.wgPort = wgPort + + m.done = make(chan struct{}) + defer close(m.done) + + ctx, m.cancel = context.WithCancel(ctx) + m.mu.Unlock() + + gateway, mapping, err := m.setup(ctx) + if err != nil { + log.Errorf("failed to setup NAT port mapping: %v", err) + + return + } + + m.mappingLock.Lock() + m.mapping = mapping + m.mappingLock.Unlock() + + m.renewLoop(ctx, gateway) + + select { + case cleanupCtx := <-m.stopCtx: + // block the Start while cleaned up gracefully + m.cleanup(cleanupCtx, gateway) + default: + // return Start immediately and cleanup in background + cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 10*time.Second) + go func() { + defer cleanupCancel() + m.cleanup(cleanupCtx, gateway) + }() + } +} + +// GetMapping returns the current mapping if ready, nil otherwise +func (m *Manager) GetMapping() *Mapping { + m.mappingLock.Lock() + defer m.mappingLock.Unlock() + + if m.mapping == nil { + return nil + } + + mapping := *m.mapping + return &mapping +} + +// GracefullyStop cancels the manager and attempts to delete the port mapping. +// After GracefullyStop returns, the manager cannot be restarted. +func (m *Manager) GracefullyStop(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + if m.cancel == nil { + return nil + } + + // Send cleanup context before cancelling, so Start picks it up after renewLoop exits. + m.startTearDown(ctx) + + m.cancel() + m.cancel = nil + + select { + case <-ctx.Done(): + return ctx.Err() + case <-m.done: + return nil + } +} + +func (m *Manager) setup(ctx context.Context) (nat.NAT, *Mapping, error) { + discoverCtx, discoverCancel := context.WithTimeout(ctx, discoveryTimeout) + defer discoverCancel() + + gateway, err := nat.DiscoverGateway(discoverCtx) + if err != nil { + log.Infof("NAT gateway discovery failed: %v (port forwarding disabled)", err) + return nil, nil, err + } + + log.Infof("discovered NAT gateway: %s", gateway.Type()) + + mapping, err := m.createMapping(ctx, gateway) + if err != nil { + log.Warnf("failed to create port mapping: %v", err) + return nil, nil, err + } + return gateway, mapping, nil +} + +func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, error) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + externalPort, err := gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, defaultMappingTTL) + if err != nil { + return nil, err + } + + externalIP, err := gateway.GetExternalAddress() + if err != nil { + log.Debugf("failed to get external address: %v", err) + // todo return with err? + } + + mapping := &Mapping{ + Protocol: "udp", + InternalPort: m.wgPort, + ExternalPort: uint16(externalPort), + ExternalIP: externalIP, + NATType: gateway.Type(), + } + + log.Infof("created port mapping: %d -> %d via %s (external IP: %s)", + m.wgPort, externalPort, gateway.Type(), externalIP) + return mapping, nil +} + +func (m *Manager) renewLoop(ctx context.Context, gateway nat.NAT) { + ticker := time.NewTicker(renewalInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if err := m.renewMapping(ctx, gateway); err != nil { + log.Warnf("failed to renew port mapping: %v", err) + continue + } + } + } +} + +func (m *Manager) renewMapping(ctx context.Context, gateway nat.NAT) error { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + externalPort, err := gateway.AddPortMapping(ctx, m.mapping.Protocol, int(m.mapping.InternalPort), mappingDescription, defaultMappingTTL) + if err != nil { + return fmt.Errorf("add port mapping: %w", err) + } + + if uint16(externalPort) != m.mapping.ExternalPort { + log.Warnf("external port changed on renewal: %d -> %d (candidate may be stale)", m.mapping.ExternalPort, externalPort) + m.mappingLock.Lock() + m.mapping.ExternalPort = uint16(externalPort) + m.mappingLock.Unlock() + } + + log.Debugf("renewed port mapping: %d -> %d", m.mapping.InternalPort, m.mapping.ExternalPort) + return nil +} + +func (m *Manager) cleanup(ctx context.Context, gateway nat.NAT) { + m.mappingLock.Lock() + mapping := m.mapping + m.mapping = nil + m.mappingLock.Unlock() + + if mapping == nil { + return + } + + if err := gateway.DeletePortMapping(ctx, mapping.Protocol, int(mapping.InternalPort)); err != nil { + log.Warnf("delete port mapping on stop: %v", err) + return + } + + log.Infof("deleted port mapping for port %d", mapping.InternalPort) +} + +func (m *Manager) startTearDown(ctx context.Context) { + select { + case m.stopCtx <- ctx: + default: + } +} diff --git a/client/internal/portforward/manager_js.go b/client/internal/portforward/manager_js.go new file mode 100644 index 000000000..d5db147f2 --- /dev/null +++ b/client/internal/portforward/manager_js.go @@ -0,0 +1,36 @@ +package portforward + +import ( + "context" + "net" +) + +// Mapping represents port mapping information. +type Mapping struct { + Protocol string + InternalPort uint16 + ExternalPort uint16 + ExternalIP net.IP + NATType string +} + +// Manager is a stub for js/wasm builds where NAT-PMP/UPnP is not supported. +type Manager struct{} + +// NewManager returns a stub manager for js/wasm builds. +func NewManager() *Manager { + return &Manager{} +} + +// Start is a no-op on js/wasm: NAT-PMP/UPnP is not available in browser environments. +func (m *Manager) Start(context.Context, uint16) { + // no NAT traversal in wasm +} + +// GracefullyStop is a no-op on js/wasm. +func (m *Manager) GracefullyStop(context.Context) error { return nil } + +// GetMapping always returns nil on js/wasm. +func (m *Manager) GetMapping() *Mapping { + return nil +} diff --git a/client/internal/portforward/manager_test.go b/client/internal/portforward/manager_test.go new file mode 100644 index 000000000..1029e87f5 --- /dev/null +++ b/client/internal/portforward/manager_test.go @@ -0,0 +1,159 @@ +//go:build !js + +package portforward + +import ( + "context" + "net" + "testing" + "time" + + "github.com/libp2p/go-nat" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockNAT struct { + natType string + deviceAddr net.IP + externalAddr net.IP + internalAddr net.IP + mappings map[int]int + addMappingErr error + deleteMappingErr error +} + +func newMockNAT() *mockNAT { + return &mockNAT{ + natType: "Mock-NAT", + deviceAddr: net.ParseIP("192.168.1.1"), + externalAddr: net.ParseIP("203.0.113.50"), + internalAddr: net.ParseIP("192.168.1.100"), + mappings: make(map[int]int), + } +} + +func (m *mockNAT) Type() string { + return m.natType +} + +func (m *mockNAT) GetDeviceAddress() (net.IP, error) { + return m.deviceAddr, nil +} + +func (m *mockNAT) GetExternalAddress() (net.IP, error) { + return m.externalAddr, nil +} + +func (m *mockNAT) GetInternalAddress() (net.IP, error) { + return m.internalAddr, nil +} + +func (m *mockNAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (int, error) { + if m.addMappingErr != nil { + return 0, m.addMappingErr + } + externalPort := internalPort + m.mappings[internalPort] = externalPort + return externalPort, nil +} + +func (m *mockNAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error { + if m.deleteMappingErr != nil { + return m.deleteMappingErr + } + delete(m.mappings, internalPort) + return nil +} + +func TestManager_CreateMapping(t *testing.T) { + m := NewManager() + m.wgPort = 51820 + + gateway := newMockNAT() + mapping, err := m.createMapping(context.Background(), gateway) + require.NoError(t, err) + require.NotNil(t, mapping) + + assert.Equal(t, "udp", mapping.Protocol) + assert.Equal(t, uint16(51820), mapping.InternalPort) + assert.Equal(t, uint16(51820), mapping.ExternalPort) + assert.Equal(t, "Mock-NAT", mapping.NATType) + assert.Equal(t, net.ParseIP("203.0.113.50").To4(), mapping.ExternalIP.To4()) +} + +func TestManager_GetMapping_ReturnsNilWhenNotReady(t *testing.T) { + m := NewManager() + assert.Nil(t, m.GetMapping()) +} + +func TestManager_GetMapping_ReturnsCopy(t *testing.T) { + m := NewManager() + m.mapping = &Mapping{ + Protocol: "udp", + InternalPort: 51820, + ExternalPort: 51820, + } + + mapping := m.GetMapping() + require.NotNil(t, mapping) + assert.Equal(t, uint16(51820), mapping.InternalPort) + + // Mutating the returned copy should not affect the manager's mapping. + mapping.ExternalPort = 9999 + assert.Equal(t, uint16(51820), m.GetMapping().ExternalPort) +} + +func TestManager_Cleanup_DeletesMapping(t *testing.T) { + m := NewManager() + m.mapping = &Mapping{ + Protocol: "udp", + InternalPort: 51820, + ExternalPort: 51820, + } + + gateway := newMockNAT() + // Seed the mock so we can verify deletion. + gateway.mappings[51820] = 51820 + + m.cleanup(context.Background(), gateway) + + _, exists := gateway.mappings[51820] + assert.False(t, exists, "mapping should be deleted from gateway") + assert.Nil(t, m.GetMapping(), "in-memory mapping should be cleared") +} + +func TestManager_Cleanup_NilMapping(t *testing.T) { + m := NewManager() + gateway := newMockNAT() + + // Should not panic or call gateway. + m.cleanup(context.Background(), gateway) +} + +func TestState_Cleanup(t *testing.T) { + origDiscover := discoverGateway + defer func() { discoverGateway = origDiscover }() + + mockGateway := newMockNAT() + mockGateway.mappings[51820] = 51820 + discoverGateway = func(ctx context.Context) (nat.NAT, error) { + return mockGateway, nil + } + + state := &State{ + Protocol: "udp", + InternalPort: 51820, + } + + err := state.Cleanup() + assert.NoError(t, err) + + _, exists := mockGateway.mappings[51820] + assert.False(t, exists, "mapping should be deleted after cleanup") +} + +func TestState_Name(t *testing.T) { + state := &State{} + assert.Equal(t, "port_forward_state", state.Name()) +} diff --git a/client/internal/portforward/state.go b/client/internal/portforward/state.go new file mode 100644 index 000000000..3f939751a --- /dev/null +++ b/client/internal/portforward/state.go @@ -0,0 +1,50 @@ +//go:build !js + +package portforward + +import ( + "context" + "fmt" + + "github.com/libp2p/go-nat" + log "github.com/sirupsen/logrus" +) + +// discoverGateway is the function used for NAT gateway discovery. +// It can be replaced in tests to avoid real network operations. +var discoverGateway = nat.DiscoverGateway + +// State is persisted only for crash recovery cleanup +type State struct { + InternalPort uint16 `json:"internal_port,omitempty"` + Protocol string `json:"protocol,omitempty"` +} + +func (s *State) Name() string { + return "port_forward_state" +} + +// Cleanup implements statemanager.CleanableState for crash recovery +func (s *State) Cleanup() error { + if s.InternalPort == 0 { + return nil + } + + log.Infof("cleaning up stale port mapping for port %d", s.InternalPort) + + ctx, cancel := context.WithTimeout(context.Background(), discoveryTimeout) + defer cancel() + + gateway, err := discoverGateway(ctx) + if err != nil { + // Discovery failure is not an error - gateway may not exist + log.Debugf("cleanup: no gateway found: %v", err) + return nil + } + + if err := gateway.DeletePortMapping(ctx, s.Protocol, int(s.InternalPort)); err != nil { + return fmt.Errorf("delete port mapping: %w", err) + } + + return nil +} diff --git a/client/server/state_generic.go b/client/server/state_generic.go index 980ba0cda..3f794b611 100644 --- a/client/server/state_generic.go +++ b/client/server/state_generic.go @@ -9,6 +9,11 @@ import ( "github.com/netbirdio/netbird/client/ssh/config" ) +// registerStates registers all states that need crash recovery cleanup. +// Note: portforward.State is intentionally NOT registered here to avoid blocking startup +// for up to 10 seconds during NAT gateway discovery when no gateway is present. +// The gateway reference cannot be persisted across restarts, so cleanup requires re-discovery. +// Port forward cleanup is handled by the Manager during normal operation instead. func registerStates(mgr *statemanager.Manager) { mgr.RegisterState(&dns.ShutdownState{}) mgr.RegisterState(&systemops.ShutdownState{}) diff --git a/client/server/state_linux.go b/client/server/state_linux.go index 019477d8e..655edfc53 100644 --- a/client/server/state_linux.go +++ b/client/server/state_linux.go @@ -11,6 +11,11 @@ import ( "github.com/netbirdio/netbird/client/ssh/config" ) +// registerStates registers all states that need crash recovery cleanup. +// Note: portforward.State is intentionally NOT registered here to avoid blocking startup +// for up to 10 seconds during NAT gateway discovery when no gateway is present. +// The gateway reference cannot be persisted across restarts, so cleanup requires re-discovery. +// Port forward cleanup is handled by the Manager during normal operation instead. func registerStates(mgr *statemanager.Manager) { mgr.RegisterState(&dns.ShutdownState{}) mgr.RegisterState(&systemops.ShutdownState{}) diff --git a/go.mod b/go.mod index e9334f85b..a95192600 100644 --- a/go.mod +++ b/go.mod @@ -63,6 +63,7 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/jackc/pgx/v5 v5.5.5 github.com/libdns/route53 v1.5.0 + github.com/libp2p/go-nat v0.2.0 github.com/libp2p/go-netroute v0.2.1 github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81 github.com/mdlayher/socket v0.5.1 @@ -200,10 +201,12 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/huandu/xstrings v1.5.0 // indirect + github.com/huin/goupnp v1.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jeandeaual/go-locale v0.0.0-20250612000132-0ef82f21eade // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect @@ -213,6 +216,7 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/fs v0.1.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/libdns/libdns v0.2.2 // indirect diff --git a/go.sum b/go.sum index 629388ccb..a1d2bb71f 100644 --- a/go.sum +++ b/go.sum @@ -281,6 +281,8 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -291,6 +293,8 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -328,6 +332,8 @@ github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYW github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -346,6 +352,8 @@ github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s= github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libdns/route53 v1.5.0 h1:2SKdpPFl/qgWsXQvsLNJJAoX7rSxlk7zgoL4jnWdXVA= github.com/libdns/route53 v1.5.0/go.mod h1:joT4hKmaTNKHEwb7GmZ65eoDz1whTu7KKYPS8ZqIh6Q= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81 h1:J56rFEfUTFT9j9CiRXhi1r8lUJ4W5idG3CiaBZGojNU= github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81/go.mod h1:RD8ML/YdXctQ7qbcizZkw5mZ6l8Ogrl1dodBzVJduwI= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= From 96806bf55fb9c5fabe7390a69744a69e68e495b7 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 8 Apr 2026 09:38:31 +0200 Subject: [PATCH 287/374] [relay] Replace net.Conn with context-aware Conn interface (#5770) * [relay] Replace net.Conn with context-aware Conn interface for relay transports Introduce a listener.Conn interface with context-based Read/Write methods, replacing net.Conn throughout the relay server. This enables proper timeout propagation (e.g. handshake timeout) without goroutine-based workarounds and removes unused LocalAddr/SetDeadline methods from WS and QUIC conns. * [relay] Refactor Peer context management to ensure proper cleanup Integrate context creation (`context.WithCancel`) directly in `NewPeer` and remove redundant initialization in `Work`. Add `ctxCancel` calls to ensure context is properly canceled during `Close` operations. --- combined/cmd/root.go | 13 ++---- relay/server/handshake.go | 20 ++++++--- relay/server/listener/conn.go | 14 ++++++ relay/server/listener/listener.go | 14 ------ relay/server/listener/quic/conn.go | 39 +++-------------- relay/server/listener/quic/listener.go | 4 +- relay/server/listener/ws/conn.go | 30 +++---------- relay/server/listener/ws/listener.go | 24 ++++------- relay/server/peer.go | 60 +++++++++++++------------- relay/server/relay.go | 19 ++++++-- relay/server/server.go | 9 ++-- 11 files changed, 103 insertions(+), 143 deletions(-) create mode 100644 relay/server/listener/conn.go delete mode 100644 relay/server/listener/listener.go diff --git a/combined/cmd/root.go b/combined/cmd/root.go index ea1ff908a..db986b4d4 100644 --- a/combined/cmd/root.go +++ b/combined/cmd/root.go @@ -29,6 +29,7 @@ import ( "github.com/netbirdio/netbird/management/server/telemetry" "github.com/netbirdio/netbird/relay/healthcheck" relayServer "github.com/netbirdio/netbird/relay/server" + "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/relay/server/listener/ws" sharedMetrics "github.com/netbirdio/netbird/shared/metrics" "github.com/netbirdio/netbird/shared/relay/auth" @@ -523,7 +524,7 @@ func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (* func createCombinedHandler(grpcServer *grpc.Server, httpHandler http.Handler, relaySrv *relayServer.Server, meter metric.Meter, cfg *CombinedConfig) http.Handler { wsProxy := wsproxyserver.New(grpcServer, wsproxyserver.WithOTelMeter(meter)) - var relayAcceptFn func(conn net.Conn) + var relayAcceptFn func(conn listener.Conn) if relaySrv != nil { relayAcceptFn = relaySrv.RelayAccept() } @@ -563,7 +564,7 @@ func createCombinedHandler(grpcServer *grpc.Server, httpHandler http.Handler, re } // handleRelayWebSocket handles incoming WebSocket connections for the relay service -func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(conn net.Conn), cfg *CombinedConfig) { +func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(conn listener.Conn), cfg *CombinedConfig) { acceptOptions := &websocket.AcceptOptions{ OriginPatterns: []string{"*"}, } @@ -585,15 +586,9 @@ func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func( return } - lAddr, err := net.ResolveTCPAddr("tcp", cfg.Server.ListenAddress) - if err != nil { - _ = wsConn.Close(websocket.StatusInternalError, "internal error") - return - } - log.Debugf("Relay WS client connected from: %s", rAddr) - conn := ws.NewConn(wsConn, lAddr, rAddr) + conn := ws.NewConn(wsConn, rAddr) acceptFn(conn) } diff --git a/relay/server/handshake.go b/relay/server/handshake.go index 8c3ee1899..067888406 100644 --- a/relay/server/handshake.go +++ b/relay/server/handshake.go @@ -1,11 +1,13 @@ package server import ( + "context" "fmt" - "net" + "time" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/shared/relay/messages" //nolint:staticcheck "github.com/netbirdio/netbird/shared/relay/messages/address" @@ -13,6 +15,12 @@ import ( authmsg "github.com/netbirdio/netbird/shared/relay/messages/auth" ) +const ( + // handshakeTimeout bounds how long a connection may remain in the + // pre-authentication handshake phase before being closed. + handshakeTimeout = 10 * time.Second +) + type Validator interface { Validate(any) error // Deprecated: Use Validate instead. @@ -58,7 +66,7 @@ func marshalResponseHelloMsg(instanceURL string) ([]byte, error) { } type handshake struct { - conn net.Conn + conn listener.Conn validator Validator preparedMsg *preparedMsg @@ -66,9 +74,9 @@ type handshake struct { peerID *messages.PeerID } -func (h *handshake) handshakeReceive() (*messages.PeerID, error) { +func (h *handshake) handshakeReceive(ctx context.Context) (*messages.PeerID, error) { buf := make([]byte, messages.MaxHandshakeSize) - n, err := h.conn.Read(buf) + n, err := h.conn.Read(ctx, buf) if err != nil { return nil, fmt.Errorf("read from %s: %w", h.conn.RemoteAddr(), err) } @@ -103,7 +111,7 @@ func (h *handshake) handshakeReceive() (*messages.PeerID, error) { return peerID, nil } -func (h *handshake) handshakeResponse() error { +func (h *handshake) handshakeResponse(ctx context.Context) error { var responseMsg []byte if h.handshakeMethodAuth { responseMsg = h.preparedMsg.responseAuthMsg @@ -111,7 +119,7 @@ func (h *handshake) handshakeResponse() error { responseMsg = h.preparedMsg.responseHelloMsg } - if _, err := h.conn.Write(responseMsg); err != nil { + if _, err := h.conn.Write(ctx, responseMsg); err != nil { return fmt.Errorf("handshake response write to %s (%s): %w", h.peerID, h.conn.RemoteAddr(), err) } diff --git a/relay/server/listener/conn.go b/relay/server/listener/conn.go new file mode 100644 index 000000000..ef0869594 --- /dev/null +++ b/relay/server/listener/conn.go @@ -0,0 +1,14 @@ +package listener + +import ( + "context" + "net" +) + +// Conn is the relay connection contract implemented by WS and QUIC transports. +type Conn interface { + Read(ctx context.Context, b []byte) (n int, err error) + Write(ctx context.Context, b []byte) (n int, err error) + RemoteAddr() net.Addr + Close() error +} diff --git a/relay/server/listener/listener.go b/relay/server/listener/listener.go deleted file mode 100644 index 0a79182f4..000000000 --- a/relay/server/listener/listener.go +++ /dev/null @@ -1,14 +0,0 @@ -package listener - -import ( - "context" - "net" - - "github.com/netbirdio/netbird/relay/protocol" -) - -type Listener interface { - Listen(func(conn net.Conn)) error - Shutdown(ctx context.Context) error - Protocol() protocol.Protocol -} diff --git a/relay/server/listener/quic/conn.go b/relay/server/listener/quic/conn.go index 6e2201bf7..d8dafcd1f 100644 --- a/relay/server/listener/quic/conn.go +++ b/relay/server/listener/quic/conn.go @@ -3,33 +3,26 @@ package quic import ( "context" "errors" - "fmt" "net" "sync" - "time" "github.com/quic-go/quic-go" ) type Conn struct { - session *quic.Conn - closed bool - closedMu sync.Mutex - ctx context.Context - ctxCancel context.CancelFunc + session *quic.Conn + closed bool + closedMu sync.Mutex } func NewConn(session *quic.Conn) *Conn { - ctx, cancel := context.WithCancel(context.Background()) return &Conn{ - session: session, - ctx: ctx, - ctxCancel: cancel, + session: session, } } -func (c *Conn) Read(b []byte) (n int, err error) { - dgram, err := c.session.ReceiveDatagram(c.ctx) +func (c *Conn) Read(ctx context.Context, b []byte) (n int, err error) { + dgram, err := c.session.ReceiveDatagram(ctx) if err != nil { return 0, c.remoteCloseErrHandling(err) } @@ -38,33 +31,17 @@ func (c *Conn) Read(b []byte) (n int, err error) { return n, nil } -func (c *Conn) Write(b []byte) (int, error) { +func (c *Conn) Write(_ context.Context, b []byte) (int, error) { if err := c.session.SendDatagram(b); err != nil { return 0, c.remoteCloseErrHandling(err) } return len(b), nil } -func (c *Conn) LocalAddr() net.Addr { - return c.session.LocalAddr() -} - func (c *Conn) RemoteAddr() net.Addr { return c.session.RemoteAddr() } -func (c *Conn) SetReadDeadline(t time.Time) error { - return nil -} - -func (c *Conn) SetWriteDeadline(t time.Time) error { - return fmt.Errorf("SetWriteDeadline is not implemented") -} - -func (c *Conn) SetDeadline(t time.Time) error { - return fmt.Errorf("SetDeadline is not implemented") -} - func (c *Conn) Close() error { c.closedMu.Lock() if c.closed { @@ -74,8 +51,6 @@ func (c *Conn) Close() error { c.closed = true c.closedMu.Unlock() - c.ctxCancel() // Cancel the context - sessionErr := c.session.CloseWithError(0, "normal closure") return sessionErr } diff --git a/relay/server/listener/quic/listener.go b/relay/server/listener/quic/listener.go index 797223e74..68f0e03c0 100644 --- a/relay/server/listener/quic/listener.go +++ b/relay/server/listener/quic/listener.go @@ -5,12 +5,12 @@ import ( "crypto/tls" "errors" "fmt" - "net" "github.com/quic-go/quic-go" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/relay/protocol" + relaylistener "github.com/netbirdio/netbird/relay/server/listener" nbRelay "github.com/netbirdio/netbird/shared/relay" ) @@ -25,7 +25,7 @@ type Listener struct { listener *quic.Listener } -func (l *Listener) Listen(acceptFn func(conn net.Conn)) error { +func (l *Listener) Listen(acceptFn func(conn relaylistener.Conn)) error { quicCfg := &quic.Config{ EnableDatagrams: true, InitialPacketSize: nbRelay.QUICInitialPacketSize, diff --git a/relay/server/listener/ws/conn.go b/relay/server/listener/ws/conn.go index d5bce56f7..c22b5719d 100644 --- a/relay/server/listener/ws/conn.go +++ b/relay/server/listener/ws/conn.go @@ -18,25 +18,21 @@ const ( type Conn struct { *websocket.Conn - lAddr *net.TCPAddr rAddr *net.TCPAddr closed bool closedMu sync.Mutex - ctx context.Context } -func NewConn(wsConn *websocket.Conn, lAddr, rAddr *net.TCPAddr) *Conn { +func NewConn(wsConn *websocket.Conn, rAddr *net.TCPAddr) *Conn { return &Conn{ Conn: wsConn, - lAddr: lAddr, rAddr: rAddr, - ctx: context.Background(), } } -func (c *Conn) Read(b []byte) (n int, err error) { - t, r, err := c.Reader(c.ctx) +func (c *Conn) Read(ctx context.Context, b []byte) (n int, err error) { + t, r, err := c.Reader(ctx) if err != nil { return 0, c.ioErrHandling(err) } @@ -56,34 +52,18 @@ func (c *Conn) Read(b []byte) (n int, err error) { // Write writes a binary message with the given payload. // It does not block until fill the internal buffer. // If the buffer filled up, wait until the buffer is drained or timeout. -func (c *Conn) Write(b []byte) (int, error) { - ctx, ctxCancel := context.WithTimeout(c.ctx, writeTimeout) +func (c *Conn) Write(ctx context.Context, b []byte) (int, error) { + ctx, ctxCancel := context.WithTimeout(ctx, writeTimeout) defer ctxCancel() err := c.Conn.Write(ctx, websocket.MessageBinary, b) return len(b), err } -func (c *Conn) LocalAddr() net.Addr { - return c.lAddr -} - func (c *Conn) RemoteAddr() net.Addr { return c.rAddr } -func (c *Conn) SetReadDeadline(t time.Time) error { - return fmt.Errorf("SetReadDeadline is not implemented") -} - -func (c *Conn) SetWriteDeadline(t time.Time) error { - return fmt.Errorf("SetWriteDeadline is not implemented") -} - -func (c *Conn) SetDeadline(t time.Time) error { - return fmt.Errorf("SetDeadline is not implemented") -} - func (c *Conn) Close() error { c.closedMu.Lock() c.closed = true diff --git a/relay/server/listener/ws/listener.go b/relay/server/listener/ws/listener.go index 12219e29b..ba175f901 100644 --- a/relay/server/listener/ws/listener.go +++ b/relay/server/listener/ws/listener.go @@ -7,11 +7,13 @@ import ( "fmt" "net" "net/http" + "time" "github.com/coder/websocket" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/relay/protocol" + relaylistener "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/shared/relay" ) @@ -27,18 +29,19 @@ type Listener struct { TLSConfig *tls.Config server *http.Server - acceptFn func(conn net.Conn) + acceptFn func(conn relaylistener.Conn) } -func (l *Listener) Listen(acceptFn func(conn net.Conn)) error { +func (l *Listener) Listen(acceptFn func(conn relaylistener.Conn)) error { l.acceptFn = acceptFn mux := http.NewServeMux() mux.HandleFunc(URLPath, l.onAccept) l.server = &http.Server{ - Addr: l.Address, - Handler: mux, - TLSConfig: l.TLSConfig, + Addr: l.Address, + Handler: mux, + TLSConfig: l.TLSConfig, + ReadHeaderTimeout: 5 * time.Second, } log.Infof("WS server listening address: %s", l.Address) @@ -93,18 +96,9 @@ func (l *Listener) onAccept(w http.ResponseWriter, r *http.Request) { return } - lAddr, err := net.ResolveTCPAddr("tcp", l.server.Addr) - if err != nil { - err = wsConn.Close(websocket.StatusInternalError, "internal error") - if err != nil { - log.Errorf("failed to close ws connection: %s", err) - } - return - } - log.Infof("WS client connected from: %s", rAddr) - conn := NewConn(wsConn, lAddr, rAddr) + conn := NewConn(wsConn, rAddr) l.acceptFn(conn) } diff --git a/relay/server/peer.go b/relay/server/peer.go index c5ff41857..8376cdfa7 100644 --- a/relay/server/peer.go +++ b/relay/server/peer.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/relay/metrics" + "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/relay/server/store" "github.com/netbirdio/netbird/shared/relay/healthcheck" "github.com/netbirdio/netbird/shared/relay/messages" @@ -26,11 +27,14 @@ type Peer struct { metrics *metrics.Metrics log *log.Entry id messages.PeerID - conn net.Conn + conn listener.Conn connMu sync.RWMutex store *store.Store notifier *store.PeerNotifier + ctx context.Context + ctxCancel context.CancelFunc + peersListener *store.Listener // between the online peer collection step and the notification sending should not be sent offline notifications from another thread @@ -38,14 +42,17 @@ type Peer struct { } // NewPeer creates a new Peer instance and prepare custom logging -func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn net.Conn, store *store.Store, notifier *store.PeerNotifier) *Peer { +func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn listener.Conn, store *store.Store, notifier *store.PeerNotifier) *Peer { + ctx, cancel := context.WithCancel(context.Background()) p := &Peer{ - metrics: metrics, - log: log.WithField("peer_id", id.String()), - id: id, - conn: conn, - store: store, - notifier: notifier, + metrics: metrics, + log: log.WithField("peer_id", id.String()), + id: id, + conn: conn, + store: store, + notifier: notifier, + ctx: ctx, + ctxCancel: cancel, } return p @@ -57,6 +64,7 @@ func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn net.Conn, store func (p *Peer) Work() { p.peersListener = p.notifier.NewListener(p.sendPeersOnline, p.sendPeersWentOffline) defer func() { + p.ctxCancel() p.notifier.RemoveListener(p.peersListener) if err := p.conn.Close(); err != nil && !errors.Is(err, net.ErrClosed) { @@ -64,8 +72,7 @@ func (p *Peer) Work() { } }() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := p.ctx hc := healthcheck.NewSender(p.log) go hc.StartHealthCheck(ctx) @@ -73,7 +80,7 @@ func (p *Peer) Work() { buf := make([]byte, bufferSize) for { - n, err := p.conn.Read(buf) + n, err := p.conn.Read(ctx, buf) if err != nil { if !errors.Is(err, net.ErrClosed) { p.log.Errorf("failed to read message: %s", err) @@ -131,10 +138,10 @@ func (p *Peer) handleMsgType(ctx context.Context, msgType messages.MsgType, hc * } // Write writes data to the connection -func (p *Peer) Write(b []byte) (int, error) { +func (p *Peer) Write(ctx context.Context, b []byte) (int, error) { p.connMu.RLock() defer p.connMu.RUnlock() - return p.conn.Write(b) + return p.conn.Write(ctx, b) } // CloseGracefully closes the connection with the peer gracefully. Send a close message to the client and close the @@ -147,6 +154,7 @@ func (p *Peer) CloseGracefully(ctx context.Context) { p.log.Errorf("failed to send close message to peer: %s", p.String()) } + p.ctxCancel() if err := p.conn.Close(); err != nil { p.log.Errorf(errCloseConn, err) } @@ -156,6 +164,7 @@ func (p *Peer) Close() { p.connMu.Lock() defer p.connMu.Unlock() + p.ctxCancel() if err := p.conn.Close(); err != nil { p.log.Errorf(errCloseConn, err) } @@ -170,26 +179,15 @@ func (p *Peer) writeWithTimeout(ctx context.Context, buf []byte) error { ctx, cancel := context.WithTimeout(ctx, 3*time.Second) defer cancel() - writeDone := make(chan struct{}) - var err error - go func() { - _, err = p.conn.Write(buf) - close(writeDone) - }() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-writeDone: - return err - } + _, err := p.conn.Write(ctx, buf) + return err } func (p *Peer) handleHealthcheckEvents(ctx context.Context, hc *healthcheck.Sender) { for { select { case <-hc.HealthCheck: - _, err := p.Write(messages.MarshalHealthcheck()) + _, err := p.Write(ctx, messages.MarshalHealthcheck()) if err != nil { p.log.Errorf("failed to send healthcheck message: %s", err) return @@ -228,12 +226,12 @@ func (p *Peer) handleTransportMsg(msg []byte) { return } - n, err := dp.Write(msg) + n, err := dp.Write(dp.ctx, msg) if err != nil { p.log.Errorf("failed to write transport message to: %s", dp.String()) return } - p.metrics.TransferBytesSent.Add(context.Background(), int64(n)) + p.metrics.TransferBytesSent.Add(p.ctx, int64(n)) } func (p *Peer) handleSubscribePeerState(msg []byte) { @@ -276,7 +274,7 @@ func (p *Peer) sendPeersOnline(peers []messages.PeerID) { } for n, msg := range msgs { - if _, err := p.Write(msg); err != nil { + if _, err := p.Write(p.ctx, msg); err != nil { p.log.Errorf("failed to write %d. peers offline message: %s", n, err) } } @@ -293,7 +291,7 @@ func (p *Peer) sendPeersWentOffline(peers []messages.PeerID) { } for n, msg := range msgs { - if _, err := p.Write(msg); err != nil { + if _, err := p.Write(p.ctx, msg); err != nil { p.log.Errorf("failed to write %d. peers offline message: %s", n, err) } } diff --git a/relay/server/relay.go b/relay/server/relay.go index bb355f58f..56add8bea 100644 --- a/relay/server/relay.go +++ b/relay/server/relay.go @@ -3,7 +3,6 @@ package server import ( "context" "fmt" - "net" "net/url" "sync" "time" @@ -13,11 +12,20 @@ import ( "go.opentelemetry.io/otel/metric" "github.com/netbirdio/netbird/relay/healthcheck/peerid" + "github.com/netbirdio/netbird/relay/protocol" + "github.com/netbirdio/netbird/relay/server/listener" + //nolint:staticcheck "github.com/netbirdio/netbird/relay/metrics" "github.com/netbirdio/netbird/relay/server/store" ) +type Listener interface { + Listen(func(conn listener.Conn)) error + Shutdown(ctx context.Context) error + Protocol() protocol.Protocol +} + type Config struct { Meter metric.Meter ExposedAddress string @@ -109,7 +117,7 @@ func NewRelay(config Config) (*Relay, error) { } // Accept start to handle a new peer connection -func (r *Relay) Accept(conn net.Conn) { +func (r *Relay) Accept(conn listener.Conn) { acceptTime := time.Now() r.closeMu.RLock() defer r.closeMu.RUnlock() @@ -117,12 +125,15 @@ func (r *Relay) Accept(conn net.Conn) { return } + hsCtx, hsCancel := context.WithTimeout(context.Background(), handshakeTimeout) + defer hsCancel() + h := handshake{ conn: conn, validator: r.validator, preparedMsg: r.preparedMsg, } - peerID, err := h.handshakeReceive() + peerID, err := h.handshakeReceive(hsCtx) if err != nil { if peerid.IsHealthCheck(peerID) { log.Debugf("health check connection from %s", conn.RemoteAddr()) @@ -154,7 +165,7 @@ func (r *Relay) Accept(conn net.Conn) { r.metrics.PeerDisconnected(peer.String()) }() - if err := h.handshakeResponse(); err != nil { + if err := h.handshakeResponse(hsCtx); err != nil { log.Errorf("failed to send handshake response, close peer: %s", err) peer.Close() } diff --git a/relay/server/server.go b/relay/server/server.go index a0f7eb73c..340da55b8 100644 --- a/relay/server/server.go +++ b/relay/server/server.go @@ -3,7 +3,6 @@ package server import ( "context" "crypto/tls" - "net" "net/url" "sync" @@ -31,7 +30,7 @@ type ListenerConfig struct { // In a new HTTP connection, the server will accept the connection and pass it to the Relay server via the Accept method. type Server struct { relay *Relay - listeners []listener.Listener + listeners []Listener listenerMux sync.Mutex } @@ -56,7 +55,7 @@ func NewServer(config Config) (*Server, error) { } return &Server{ relay: relay, - listeners: make([]listener.Listener, 0, 2), + listeners: make([]Listener, 0, 2), }, nil } @@ -86,7 +85,7 @@ func (r *Server) Listen(cfg ListenerConfig) error { wg := sync.WaitGroup{} for _, l := range r.listeners { wg.Add(1) - go func(listener listener.Listener) { + go func(listener Listener) { defer wg.Done() errChan <- listener.Listen(r.relay.Accept) }(l) @@ -139,6 +138,6 @@ func (r *Server) InstanceURL() url.URL { // RelayAccept returns the relay's Accept function for handling incoming connections. // This allows external HTTP handlers to route connections to the relay without // starting the relay's own listeners. -func (r *Server) RelayAccept() func(conn net.Conn) { +func (r *Server) RelayAccept() func(conn listener.Conn) { return r.relay.Accept } From dc160aff3656f86097e1c1ed0dc276b2603f8aa3 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 8 Apr 2026 16:25:57 +0800 Subject: [PATCH 288/374] [client] Fix SSH proxy stripping shell quoting from forwarded commands (#5669) --- client/ssh/proxy/proxy.go | 4 +- client/ssh/proxy/proxy_test.go | 186 ++++++++++++++++++++++++++ client/ssh/server/session_handlers.go | 2 +- 3 files changed, 189 insertions(+), 3 deletions(-) diff --git a/client/ssh/proxy/proxy.go b/client/ssh/proxy/proxy.go index 8897b9c7e..59007f75c 100644 --- a/client/ssh/proxy/proxy.go +++ b/client/ssh/proxy/proxy.go @@ -141,7 +141,7 @@ func (p *SSHProxy) runProxySSHServer(jwtToken string) error { func (p *SSHProxy) handleSSHSession(session ssh.Session) { ptyReq, winCh, isPty := session.Pty() - hasCommand := len(session.Command()) > 0 + hasCommand := session.RawCommand() != "" sshClient, err := p.getOrCreateBackendClient(session.Context(), session.User()) if err != nil { @@ -180,7 +180,7 @@ func (p *SSHProxy) handleSSHSession(session ssh.Session) { } if hasCommand { - if err := serverSession.Run(strings.Join(session.Command(), " ")); err != nil { + if err := serverSession.Run(session.RawCommand()); err != nil { log.Debugf("run command: %v", err) p.handleProxyExitCode(session, err) } diff --git a/client/ssh/proxy/proxy_test.go b/client/ssh/proxy/proxy_test.go index dba2e88da..b33d5f8f4 100644 --- a/client/ssh/proxy/proxy_test.go +++ b/client/ssh/proxy/proxy_test.go @@ -1,6 +1,7 @@ package proxy import ( + "bytes" "context" "crypto/rand" "crypto/rsa" @@ -245,6 +246,191 @@ func TestSSHProxy_Connect(t *testing.T) { cancel() } +// TestSSHProxy_CommandQuoting verifies that the proxy preserves shell quoting +// when forwarding commands to the backend. This is critical for tools like +// Ansible that send commands such as: +// +// /bin/sh -c '( umask 77 && mkdir -p ... ) && sleep 0' +// +// The single quotes must be preserved so the backend shell receives the +// subshell expression as a single argument to -c. +func TestSSHProxy_CommandQuoting(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + sshClient, cleanup := setupProxySSHClient(t) + defer cleanup() + + // These commands simulate what the SSH protocol delivers as exec payloads. + // When a user types: ssh host '/bin/sh -c "( echo hello )"' + // the local shell strips the outer single quotes, and the SSH exec request + // contains the raw string: /bin/sh -c "( echo hello )" + // + // The proxy must forward this string verbatim. Using session.Command() + // (shlex.Split + strings.Join) strips the inner double quotes, breaking + // the command on the backend. + tests := []struct { + name string + command string + expect string + }{ + { + name: "subshell_in_double_quotes", + command: `/bin/sh -c "( echo from-subshell ) && echo outer"`, + expect: "from-subshell\nouter\n", + }, + { + name: "printf_with_special_chars", + command: `/bin/sh -c "printf '%s\n' 'hello world'"`, + expect: "hello world\n", + }, + { + name: "nested_command_substitution", + command: `/bin/sh -c "echo $(echo nested)"`, + expect: "nested\n", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + session, err := sshClient.NewSession() + require.NoError(t, err) + defer func() { _ = session.Close() }() + + var stderrBuf bytes.Buffer + session.Stderr = &stderrBuf + + outputCh := make(chan []byte, 1) + errCh := make(chan error, 1) + go func() { + output, err := session.Output(tc.command) + outputCh <- output + errCh <- err + }() + + select { + case output := <-outputCh: + err := <-errCh + if stderrBuf.Len() > 0 { + t.Logf("stderr: %s", stderrBuf.String()) + } + require.NoError(t, err, "command should succeed: %s", tc.command) + assert.Equal(t, tc.expect, string(output), "output mismatch for: %s", tc.command) + case <-time.After(5 * time.Second): + t.Fatalf("command timed out: %s", tc.command) + } + }) + } +} + +// setupProxySSHClient creates a full proxy test environment and returns +// an SSH client connected through the proxy to a backend NetBird SSH server. +func setupProxySSHClient(t *testing.T) (*cryptossh.Client, func()) { + t.Helper() + + const ( + issuer = "https://test-issuer.example.com" + audience = "test-audience" + ) + + jwksServer, privateKey, jwksURL := setupJWKSServer(t) + + hostKey, err := nbssh.GeneratePrivateKey(nbssh.ED25519) + require.NoError(t, err) + hostPubKey, err := nbssh.GeneratePublicKey(hostKey) + require.NoError(t, err) + + serverConfig := &server.Config{ + HostKeyPEM: hostKey, + JWT: &server.JWTConfig{ + Issuer: issuer, + Audiences: []string{audience}, + KeysLocation: jwksURL, + }, + } + sshServer := server.New(serverConfig) + sshServer.SetAllowRootLogin(true) + + testUsername := testutil.GetTestUsername(t) + testJWTUser := "test-username" + testUserHash, err := sshuserhash.HashUserID(testJWTUser) + require.NoError(t, err) + + authConfig := &sshauth.Config{ + UserIDClaim: sshauth.DefaultUserIDClaim, + AuthorizedUsers: []sshuserhash.UserIDHash{testUserHash}, + MachineUsers: map[string][]uint32{ + testUsername: {0}, + }, + } + sshServer.UpdateSSHAuth(authConfig) + + sshServerAddr := server.StartTestServer(t, sshServer) + + mockDaemon := startMockDaemon(t) + + host, portStr, err := net.SplitHostPort(sshServerAddr) + require.NoError(t, err) + port, err := strconv.Atoi(portStr) + require.NoError(t, err) + + mockDaemon.setHostKey(host, hostPubKey) + + validToken := generateValidJWT(t, privateKey, issuer, audience, testJWTUser) + mockDaemon.setJWTToken(validToken) + + proxyInstance, err := New(mockDaemon.addr, host, port, io.Discard, nil) + require.NoError(t, err) + + origStdin := os.Stdin + origStdout := os.Stdout + + stdinReader, stdinWriter, err := os.Pipe() + require.NoError(t, err) + stdoutReader, stdoutWriter, err := os.Pipe() + require.NoError(t, err) + + os.Stdin = stdinReader + os.Stdout = stdoutWriter + + clientConn, proxyConn := net.Pipe() + + go func() { _, _ = io.Copy(stdinWriter, proxyConn) }() + go func() { _, _ = io.Copy(proxyConn, stdoutReader) }() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + go func() { + _ = proxyInstance.Connect(ctx) + }() + + sshConfig := &cryptossh.ClientConfig{ + User: testutil.GetTestUsername(t), + Auth: []cryptossh.AuthMethod{}, + HostKeyCallback: cryptossh.InsecureIgnoreHostKey(), + Timeout: 5 * time.Second, + } + + sshClientConn, chans, reqs, err := cryptossh.NewClientConn(clientConn, "test", sshConfig) + require.NoError(t, err) + + client := cryptossh.NewClient(sshClientConn, chans, reqs) + + cleanupFn := func() { + _ = client.Close() + _ = clientConn.Close() + cancel() + os.Stdin = origStdin + os.Stdout = origStdout + _ = sshServer.Stop() + mockDaemon.stop() + jwksServer.Close() + } + + return client, cleanupFn +} + type mockDaemonServer struct { proto.UnimplementedDaemonServiceServer hostKeys map[string][]byte diff --git a/client/ssh/server/session_handlers.go b/client/ssh/server/session_handlers.go index f12a75961..0e531bb96 100644 --- a/client/ssh/server/session_handlers.go +++ b/client/ssh/server/session_handlers.go @@ -60,7 +60,7 @@ func (s *Server) sessionHandler(session ssh.Session) { } ptyReq, winCh, isPty := session.Pty() - hasCommand := len(session.Command()) > 0 + hasCommand := session.RawCommand() != "" if isPty && !hasCommand { // ssh - PTY interactive session (login) From 332c624c55a486c0ab7b981a26cf8e00a6fa0774 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 8 Apr 2026 16:33:46 +0800 Subject: [PATCH 289/374] [client] Don't abort UI debug bundle when up/down fails (#5780) --- client/cmd/debug.go | 11 +++++++++ client/ui/debug.go | 56 +++++++++++++++++++++++++++------------------ 2 files changed, 45 insertions(+), 22 deletions(-) diff --git a/client/cmd/debug.go b/client/cmd/debug.go index 0e2717756..e3d3afe5f 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -199,9 +199,11 @@ func runForDuration(cmd *cobra.Command, args []string) error { cmd.Println("Log level set to trace.") } + needsRestoreUp := false if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil { cmd.PrintErrf("Failed to bring service down: %v\n", status.Convert(err).Message()) } else { + needsRestoreUp = !stateWasDown cmd.Println("netbird down") } @@ -217,6 +219,7 @@ func runForDuration(cmd *cobra.Command, args []string) error { if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil { cmd.PrintErrf("Failed to bring service up: %v\n", status.Convert(err).Message()) } else { + needsRestoreUp = false cmd.Println("netbird up") } @@ -264,6 +267,14 @@ func runForDuration(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to bundle debug: %v", status.Convert(err).Message()) } + if needsRestoreUp { + if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil { + cmd.PrintErrf("Failed to restore service up state: %v\n", status.Convert(err).Message()) + } else { + cmd.Println("netbird up (restored)") + } + } + if stateWasDown { if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil { cmd.PrintErrf("Failed to restore service down state: %v\n", status.Convert(err).Message()) diff --git a/client/ui/debug.go b/client/ui/debug.go index 29f73a66a..4ebe4d675 100644 --- a/client/ui/debug.go +++ b/client/ui/debug.go @@ -24,9 +24,10 @@ import ( // Initial state for the debug collection type debugInitialState struct { - wasDown bool - logLevel proto.LogLevel - isLevelTrace bool + wasDown bool + needsRestoreUp bool + logLevel proto.LogLevel + isLevelTrace bool } // Debug collection parameters @@ -371,46 +372,51 @@ func (s *serviceClient) configureServiceForDebug( conn proto.DaemonServiceClient, state *debugInitialState, enablePersistence bool, -) error { +) { if state.wasDown { if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil { - return fmt.Errorf("bring service up: %v", err) + log.Warnf("failed to bring service up: %v", err) + } else { + log.Info("Service brought up for debug") + time.Sleep(time.Second * 10) } - log.Info("Service brought up for debug") - time.Sleep(time.Second * 10) } if !state.isLevelTrace { if _, err := conn.SetLogLevel(s.ctx, &proto.SetLogLevelRequest{Level: proto.LogLevel_TRACE}); err != nil { - return fmt.Errorf("set log level to TRACE: %v", err) + log.Warnf("failed to set log level to TRACE: %v", err) + } else { + log.Info("Log level set to TRACE for debug") } - log.Info("Log level set to TRACE for debug") } if _, err := conn.Down(s.ctx, &proto.DownRequest{}); err != nil { - return fmt.Errorf("bring service down: %v", err) + log.Warnf("failed to bring service down: %v", err) + } else { + state.needsRestoreUp = !state.wasDown + time.Sleep(time.Second) } - time.Sleep(time.Second) if enablePersistence { if _, err := conn.SetSyncResponsePersistence(s.ctx, &proto.SetSyncResponsePersistenceRequest{ Enabled: true, }); err != nil { - return fmt.Errorf("enable sync response persistence: %v", err) + log.Warnf("failed to enable sync response persistence: %v", err) + } else { + log.Info("Sync response persistence enabled for debug") } - log.Info("Sync response persistence enabled for debug") } if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil { - return fmt.Errorf("bring service back up: %v", err) + log.Warnf("failed to bring service back up: %v", err) + } else { + state.needsRestoreUp = false + time.Sleep(time.Second * 3) } - time.Sleep(time.Second * 3) if _, err := conn.StartCPUProfile(s.ctx, &proto.StartCPUProfileRequest{}); err != nil { log.Warnf("failed to start CPU profiling: %v", err) } - - return nil } func (s *serviceClient) collectDebugData( @@ -424,9 +430,7 @@ func (s *serviceClient) collectDebugData( var wg sync.WaitGroup startProgressTracker(ctx, &wg, params.duration, progress) - if err := s.configureServiceForDebug(conn, state, params.enablePersistence); err != nil { - return err - } + s.configureServiceForDebug(conn, state, params.enablePersistence) wg.Wait() progress.progressBar.Hide() @@ -482,9 +486,17 @@ func (s *serviceClient) createDebugBundleFromCollection( // Restore service to original state func (s *serviceClient) restoreServiceState(conn proto.DaemonServiceClient, state *debugInitialState) { + if state.needsRestoreUp { + if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil { + log.Warnf("failed to restore up state: %v", err) + } else { + log.Info("Service state restored to up") + } + } + if state.wasDown { if _, err := conn.Down(s.ctx, &proto.DownRequest{}); err != nil { - log.Errorf("Failed to restore down state: %v", err) + log.Warnf("failed to restore down state: %v", err) } else { log.Info("Service state restored to down") } @@ -492,7 +504,7 @@ func (s *serviceClient) restoreServiceState(conn proto.DaemonServiceClient, stat if !state.isLevelTrace { if _, err := conn.SetLogLevel(s.ctx, &proto.SetLogLevelRequest{Level: state.logLevel}); err != nil { - log.Errorf("Failed to restore log level: %v", err) + log.Warnf("failed to restore log level: %v", err) } else { log.Info("Log level restored to original setting") } From 413d95b7400841bfa47310af241f02b912aed917 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 8 Apr 2026 21:10:31 +0800 Subject: [PATCH 290/374] [client] Include service.json in debug bundle (#5825) * Include service.json in debug bundle * Add tests for service params sanitization logic --- client/internal/debug/debug.go | 90 +++++++++++ client/internal/debug/debug_test.go | 225 ++++++++++++++++++++++++++++ 2 files changed, 315 insertions(+) diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index c9ebf25e5..6a8eae324 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -25,6 +25,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" "github.com/netbirdio/netbird/client/anonymize" + "github.com/netbirdio/netbird/client/configs" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/updater/installer" @@ -52,6 +53,7 @@ resolved_domains.txt: Anonymized resolved domain IP addresses from the status re config.txt: Anonymized configuration information of the NetBird client. network_map.json: Anonymized sync response containing peer configurations, routes, DNS settings, and firewall rules. state.json: Anonymized client state dump containing netbird states for the active profile. +service_params.json: Sanitized service install parameters (service.json). Sensitive environment variable values are masked. Only present when service.json exists. metrics.txt: Buffered client metrics in InfluxDB line protocol format. Only present when metrics collection is enabled. Peer identifiers are anonymized. mutex.prof: Mutex profiling information. goroutine.prof: Goroutine profiling information. @@ -359,6 +361,10 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add corrupted state files to debug bundle: %v", err) } + if err := g.addServiceParams(); err != nil { + log.Errorf("failed to add service params to debug bundle: %v", err) + } + if err := g.addMetrics(); err != nil { log.Errorf("failed to add metrics to debug bundle: %v", err) } @@ -488,6 +494,90 @@ func (g *BundleGenerator) addConfig() error { return nil } +const ( + serviceParamsFile = "service.json" + serviceParamsBundle = "service_params.json" + maskedValue = "***" + envVarPrefix = "NB_" + jsonKeyManagementURL = "management_url" + jsonKeyServiceEnv = "service_env_vars" +) + +var sensitiveEnvSubstrings = []string{"key", "token", "secret", "password", "credential"} + +// addServiceParams reads the service.json file and adds a sanitized version to the bundle. +// Non-NB_ env vars and vars with sensitive names are masked. Other NB_ values are anonymized. +func (g *BundleGenerator) addServiceParams() error { + path := filepath.Join(configs.StateDir, serviceParamsFile) + + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("read service params: %w", err) + } + + var params map[string]any + if err := json.Unmarshal(data, ¶ms); err != nil { + return fmt.Errorf("parse service params: %w", err) + } + + if g.anonymize { + if mgmtURL, ok := params[jsonKeyManagementURL].(string); ok && mgmtURL != "" { + params[jsonKeyManagementURL] = g.anonymizer.AnonymizeURI(mgmtURL) + } + } + + g.sanitizeServiceEnvVars(params) + + sanitizedData, err := json.MarshalIndent(params, "", " ") + if err != nil { + return fmt.Errorf("marshal sanitized service params: %w", err) + } + + if err := g.addFileToZip(bytes.NewReader(sanitizedData), serviceParamsBundle); err != nil { + return fmt.Errorf("add service params to zip: %w", err) + } + + return nil +} + +// sanitizeServiceEnvVars masks or anonymizes env var values in service params. +// Non-NB_ vars and vars with sensitive names (key, token, etc.) are fully masked. +// Other NB_ var values are passed through the anonymizer when anonymization is enabled. +func (g *BundleGenerator) sanitizeServiceEnvVars(params map[string]any) { + envVars, ok := params[jsonKeyServiceEnv].(map[string]any) + if !ok { + return + } + + sanitized := make(map[string]any, len(envVars)) + for k, v := range envVars { + val, _ := v.(string) + switch { + case !strings.HasPrefix(k, envVarPrefix) || isSensitiveEnvVar(k): + sanitized[k] = maskedValue + case g.anonymize: + sanitized[k] = g.anonymizer.AnonymizeString(val) + default: + sanitized[k] = val + } + } + params[jsonKeyServiceEnv] = sanitized +} + +// isSensitiveEnvVar returns true for env var names that may contain secrets. +func isSensitiveEnvVar(key string) bool { + lower := strings.ToLower(key) + for _, s := range sensitiveEnvSubstrings { + if strings.Contains(lower, s) { + return true + } + } + return false +} + func (g *BundleGenerator) addCommonConfigFields(configContent *strings.Builder) { configContent.WriteString("NetBird Client Configuration:\n\n") diff --git a/client/internal/debug/debug_test.go b/client/internal/debug/debug_test.go index 59837c328..6b5bb911c 100644 --- a/client/internal/debug/debug_test.go +++ b/client/internal/debug/debug_test.go @@ -1,8 +1,12 @@ package debug import ( + "archive/zip" + "bytes" "encoding/json" "net" + "os" + "path/filepath" "strings" "testing" @@ -10,6 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/client/anonymize" + "github.com/netbirdio/netbird/client/configs" mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) @@ -420,6 +425,226 @@ func TestAnonymizeNetworkMap(t *testing.T) { } } +func TestIsSensitiveEnvVar(t *testing.T) { + tests := []struct { + key string + sensitive bool + }{ + {"NB_SETUP_KEY", true}, + {"NB_API_TOKEN", true}, + {"NB_CLIENT_SECRET", true}, + {"NB_PASSWORD", true}, + {"NB_CREDENTIAL", true}, + {"NB_LOG_LEVEL", false}, + {"NB_MANAGEMENT_URL", false}, + {"NB_HOSTNAME", false}, + {"HOME", false}, + {"PATH", false}, + } + for _, tt := range tests { + t.Run(tt.key, func(t *testing.T) { + assert.Equal(t, tt.sensitive, isSensitiveEnvVar(tt.key)) + }) + } +} + +func TestSanitizeServiceEnvVars(t *testing.T) { + tests := []struct { + name string + anonymize bool + input map[string]any + check func(t *testing.T, params map[string]any) + }{ + { + name: "no env vars key", + anonymize: false, + input: map[string]any{"management_url": "https://mgmt.example.com"}, + check: func(t *testing.T, params map[string]any) { + t.Helper() + assert.Equal(t, "https://mgmt.example.com", params["management_url"], "non-env fields should be untouched") + _, ok := params[jsonKeyServiceEnv] + assert.False(t, ok, "service_env_vars should not be added") + }, + }, + { + name: "non-NB vars are masked", + anonymize: false, + input: map[string]any{ + jsonKeyServiceEnv: map[string]any{ + "HOME": "/root", + "PATH": "/usr/bin", + "NB_LOG_LEVEL": "debug", + }, + }, + check: func(t *testing.T, params map[string]any) { + t.Helper() + env := params[jsonKeyServiceEnv].(map[string]any) + assert.Equal(t, maskedValue, env["HOME"], "non-NB_ var should be masked") + assert.Equal(t, maskedValue, env["PATH"], "non-NB_ var should be masked") + assert.Equal(t, "debug", env["NB_LOG_LEVEL"], "safe NB_ var should pass through") + }, + }, + { + name: "sensitive NB vars are masked", + anonymize: false, + input: map[string]any{ + jsonKeyServiceEnv: map[string]any{ + "NB_SETUP_KEY": "abc123", + "NB_API_TOKEN": "tok_xyz", + "NB_LOG_LEVEL": "info", + }, + }, + check: func(t *testing.T, params map[string]any) { + t.Helper() + env := params[jsonKeyServiceEnv].(map[string]any) + assert.Equal(t, maskedValue, env["NB_SETUP_KEY"], "sensitive NB_ var should be masked") + assert.Equal(t, maskedValue, env["NB_API_TOKEN"], "sensitive NB_ var should be masked") + assert.Equal(t, "info", env["NB_LOG_LEVEL"], "safe NB_ var should pass through") + }, + }, + { + name: "safe NB vars anonymized when anonymize is true", + anonymize: true, + input: map[string]any{ + jsonKeyServiceEnv: map[string]any{ + "NB_MANAGEMENT_URL": "https://mgmt.example.com:443", + "NB_LOG_LEVEL": "debug", + "NB_SETUP_KEY": "secret", + "SOME_OTHER": "val", + }, + }, + check: func(t *testing.T, params map[string]any) { + t.Helper() + env := params[jsonKeyServiceEnv].(map[string]any) + // Safe NB_ values should be anonymized (not the original, not masked) + mgmtVal := env["NB_MANAGEMENT_URL"].(string) + assert.NotEqual(t, "https://mgmt.example.com:443", mgmtVal, "should be anonymized") + assert.NotEqual(t, maskedValue, mgmtVal, "should not be masked") + + logVal := env["NB_LOG_LEVEL"].(string) + assert.NotEqual(t, maskedValue, logVal, "safe NB_ var should not be masked") + + // Sensitive and non-NB_ still masked + assert.Equal(t, maskedValue, env["NB_SETUP_KEY"]) + assert.Equal(t, maskedValue, env["SOME_OTHER"]) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + anonymizer := anonymize.NewAnonymizer(anonymize.DefaultAddresses()) + g := &BundleGenerator{ + anonymize: tt.anonymize, + anonymizer: anonymizer, + } + g.sanitizeServiceEnvVars(tt.input) + tt.check(t, tt.input) + }) + } +} + +func TestAddServiceParams(t *testing.T) { + t.Run("missing service.json returns nil", func(t *testing.T) { + g := &BundleGenerator{ + anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()), + } + + origStateDir := configs.StateDir + configs.StateDir = t.TempDir() + t.Cleanup(func() { configs.StateDir = origStateDir }) + + err := g.addServiceParams() + assert.NoError(t, err) + }) + + t.Run("management_url anonymized when anonymize is true", func(t *testing.T) { + dir := t.TempDir() + origStateDir := configs.StateDir + configs.StateDir = dir + t.Cleanup(func() { configs.StateDir = origStateDir }) + + input := map[string]any{ + jsonKeyManagementURL: "https://api.example.com:443", + jsonKeyServiceEnv: map[string]any{ + "NB_LOG_LEVEL": "trace", + }, + } + data, err := json.Marshal(input) + require.NoError(t, err) + require.NoError(t, os.WriteFile(filepath.Join(dir, serviceParamsFile), data, 0600)) + + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + + g := &BundleGenerator{ + anonymize: true, + anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()), + archive: zw, + } + + require.NoError(t, g.addServiceParams()) + require.NoError(t, zw.Close()) + + zr, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + require.NoError(t, err) + require.Len(t, zr.File, 1) + assert.Equal(t, serviceParamsBundle, zr.File[0].Name) + + rc, err := zr.File[0].Open() + require.NoError(t, err) + defer rc.Close() + + var result map[string]any + require.NoError(t, json.NewDecoder(rc).Decode(&result)) + + mgmt := result[jsonKeyManagementURL].(string) + assert.NotEqual(t, "https://api.example.com:443", mgmt, "management_url should be anonymized") + assert.NotEmpty(t, mgmt) + + env := result[jsonKeyServiceEnv].(map[string]any) + assert.NotEqual(t, maskedValue, env["NB_LOG_LEVEL"], "safe NB_ var should not be masked") + }) + + t.Run("management_url preserved when anonymize is false", func(t *testing.T) { + dir := t.TempDir() + origStateDir := configs.StateDir + configs.StateDir = dir + t.Cleanup(func() { configs.StateDir = origStateDir }) + + input := map[string]any{ + jsonKeyManagementURL: "https://api.example.com:443", + } + data, err := json.Marshal(input) + require.NoError(t, err) + require.NoError(t, os.WriteFile(filepath.Join(dir, serviceParamsFile), data, 0600)) + + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + + g := &BundleGenerator{ + anonymize: false, + anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()), + archive: zw, + } + + require.NoError(t, g.addServiceParams()) + require.NoError(t, zw.Close()) + + zr, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + require.NoError(t, err) + + rc, err := zr.File[0].Open() + require.NoError(t, err) + defer rc.Close() + + var result map[string]any + require.NoError(t, json.NewDecoder(rc).Decode(&result)) + + assert.Equal(t, "https://api.example.com:443", result[jsonKeyManagementURL], "management_url should be preserved") + }) +} + // Helper function to check if IP is in CGNAT range func isInCGNATRange(ip net.IP) bool { cgnat := net.IPNet{ From c7ba93146651f0f20a413c0d4bca71637ac0c639 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 8 Apr 2026 23:14:16 +0800 Subject: [PATCH 291/374] [client] Populate network addresses in FreeBSD system info (#5827) --- client/system/info_freebsd.go | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/client/system/info_freebsd.go b/client/system/info_freebsd.go index 8e1353151..755172842 100644 --- a/client/system/info_freebsd.go +++ b/client/system/info_freebsd.go @@ -43,18 +43,24 @@ func GetInfo(ctx context.Context) *Info { systemHostname, _ := os.Hostname() + addrs, err := networkAddresses() + if err != nil { + log.Warnf("failed to discover network addresses: %s", err) + } + return &Info{ - GoOS: runtime.GOOS, - Kernel: osInfo[0], - Platform: runtime.GOARCH, - OS: osName, - OSVersion: osVersion, - Hostname: extractDeviceName(ctx, systemHostname), - CPUs: runtime.NumCPU(), - NetbirdVersion: version.NetbirdVersion(), - UIVersion: extractUserAgent(ctx), - KernelVersion: osInfo[1], - Environment: env, + GoOS: runtime.GOOS, + Kernel: osInfo[0], + Platform: runtime.GOARCH, + OS: osName, + OSVersion: osVersion, + Hostname: extractDeviceName(ctx, systemHostname), + CPUs: runtime.NumCPU(), + NetbirdVersion: version.NetbirdVersion(), + UIVersion: extractUserAgent(ctx), + KernelVersion: osInfo[1], + NetworkAddresses: addrs, + Environment: env, } } From 94a36cb53e4fa6580c82a444d87b2bec63aa6f0f Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 8 Apr 2026 23:59:59 +0800 Subject: [PATCH 292/374] [client] Handle UPnP routers that only support permanent leases (#5826) --- client/internal/portforward/manager.go | 56 +++++++++--- client/internal/portforward/manager_js.go | 5 +- client/internal/portforward/manager_test.go | 96 +++++++++++++++------ client/internal/portforward/state.go | 50 ----------- client/server/state_generic.go | 4 - client/server/state_linux.go | 4 - 6 files changed, 116 insertions(+), 99 deletions(-) delete mode 100644 client/internal/portforward/state.go diff --git a/client/internal/portforward/manager.go b/client/internal/portforward/manager.go index 019c2ad86..bf7533af9 100644 --- a/client/internal/portforward/manager.go +++ b/client/internal/portforward/manager.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "net" + "regexp" "sync" "time" @@ -15,19 +16,29 @@ import ( const ( defaultMappingTTL = 2 * time.Hour - renewalInterval = defaultMappingTTL / 2 discoveryTimeout = 10 * time.Second mappingDescription = "NetBird" ) +// upnpErrPermanentLeaseOnly matches UPnP error 725 in SOAP fault XML, +// allowing for whitespace/newlines between tags from different router firmware. +var upnpErrPermanentLeaseOnly = regexp.MustCompile(`\s*725\s*`) + +// Mapping represents an active NAT port mapping. type Mapping struct { Protocol string InternalPort uint16 ExternalPort uint16 ExternalIP net.IP NATType string + // TTL is the lease duration. Zero means a permanent lease that never expires. + TTL time.Duration } +// TODO: persist mapping state for crash recovery cleanup of permanent leases. +// Currently not done because State.Cleanup requires NAT gateway re-discovery, +// which blocks startup for ~10s when no gateway is present (affects all clients). + type Manager struct { cancel context.CancelFunc @@ -43,6 +54,7 @@ type Manager struct { mu sync.Mutex } +// NewManager creates a new port forwarding manager. func NewManager() *Manager { return &Manager{ stopCtx: make(chan context.Context, 1), @@ -77,8 +89,7 @@ func (m *Manager) Start(ctx context.Context, wgPort uint16) { gateway, mapping, err := m.setup(ctx) if err != nil { - log.Errorf("failed to setup NAT port mapping: %v", err) - + log.Infof("port forwarding setup: %v", err) return } @@ -86,7 +97,7 @@ func (m *Manager) Start(ctx context.Context, wgPort uint16) { m.mapping = mapping m.mappingLock.Unlock() - m.renewLoop(ctx, gateway) + m.renewLoop(ctx, gateway, mapping.TTL) select { case cleanupCtx := <-m.stopCtx: @@ -145,16 +156,14 @@ func (m *Manager) setup(ctx context.Context) (nat.NAT, *Mapping, error) { gateway, err := nat.DiscoverGateway(discoverCtx) if err != nil { - log.Infof("NAT gateway discovery failed: %v (port forwarding disabled)", err) - return nil, nil, err + return nil, nil, fmt.Errorf("discover gateway: %w", err) } log.Infof("discovered NAT gateway: %s", gateway.Type()) mapping, err := m.createMapping(ctx, gateway) if err != nil { - log.Warnf("failed to create port mapping: %v", err) - return nil, nil, err + return nil, nil, fmt.Errorf("create port mapping: %w", err) } return gateway, mapping, nil } @@ -163,9 +172,18 @@ func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - externalPort, err := gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, defaultMappingTTL) + ttl := defaultMappingTTL + externalPort, err := gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, ttl) if err != nil { - return nil, err + if !isPermanentLeaseRequired(err) { + return nil, err + } + log.Infof("gateway only supports permanent leases, retrying with indefinite duration") + ttl = 0 + externalPort, err = gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, ttl) + if err != nil { + return nil, err + } } externalIP, err := gateway.GetExternalAddress() @@ -180,6 +198,7 @@ func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, ExternalPort: uint16(externalPort), ExternalIP: externalIP, NATType: gateway.Type(), + TTL: ttl, } log.Infof("created port mapping: %d -> %d via %s (external IP: %s)", @@ -187,8 +206,14 @@ func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, return mapping, nil } -func (m *Manager) renewLoop(ctx context.Context, gateway nat.NAT) { - ticker := time.NewTicker(renewalInterval) +func (m *Manager) renewLoop(ctx context.Context, gateway nat.NAT, ttl time.Duration) { + if ttl == 0 { + // Permanent mappings don't expire, just wait for cancellation. + <-ctx.Done() + return + } + + ticker := time.NewTicker(ttl / 2) defer ticker.Stop() for { @@ -208,7 +233,7 @@ func (m *Manager) renewMapping(ctx context.Context, gateway nat.NAT) error { ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - externalPort, err := gateway.AddPortMapping(ctx, m.mapping.Protocol, int(m.mapping.InternalPort), mappingDescription, defaultMappingTTL) + externalPort, err := gateway.AddPortMapping(ctx, m.mapping.Protocol, int(m.mapping.InternalPort), mappingDescription, m.mapping.TTL) if err != nil { return fmt.Errorf("add port mapping: %w", err) } @@ -248,3 +273,8 @@ func (m *Manager) startTearDown(ctx context.Context) { default: } } + +// isPermanentLeaseRequired checks if a UPnP error indicates the gateway only supports permanent leases (error 725). +func isPermanentLeaseRequired(err error) bool { + return err != nil && upnpErrPermanentLeaseOnly.MatchString(err.Error()) +} diff --git a/client/internal/portforward/manager_js.go b/client/internal/portforward/manager_js.go index d5db147f2..36c55063b 100644 --- a/client/internal/portforward/manager_js.go +++ b/client/internal/portforward/manager_js.go @@ -3,15 +3,18 @@ package portforward import ( "context" "net" + "time" ) -// Mapping represents port mapping information. +// Mapping represents an active NAT port mapping. type Mapping struct { Protocol string InternalPort uint16 ExternalPort uint16 ExternalIP net.IP NATType string + // TTL is the lease duration. Zero means a permanent lease that never expires. + TTL time.Duration } // Manager is a stub for js/wasm builds where NAT-PMP/UPnP is not supported. diff --git a/client/internal/portforward/manager_test.go b/client/internal/portforward/manager_test.go index 1029e87f5..1f66f9ccd 100644 --- a/client/internal/portforward/manager_test.go +++ b/client/internal/portforward/manager_test.go @@ -4,23 +4,25 @@ package portforward import ( "context" + "fmt" "net" "testing" "time" - "github.com/libp2p/go-nat" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type mockNAT struct { - natType string - deviceAddr net.IP - externalAddr net.IP - internalAddr net.IP - mappings map[int]int - addMappingErr error - deleteMappingErr error + natType string + deviceAddr net.IP + externalAddr net.IP + internalAddr net.IP + mappings map[int]int + addMappingErr error + deleteMappingErr error + onlyPermanentLeases bool + lastTimeout time.Duration } func newMockNAT() *mockNAT { @@ -53,8 +55,12 @@ func (m *mockNAT) AddPortMapping(ctx context.Context, protocol string, internalP if m.addMappingErr != nil { return 0, m.addMappingErr } + if m.onlyPermanentLeases && timeout != 0 { + return 0, fmt.Errorf("SOAP fault. Code: | Explanation: | Detail: 725OnlyPermanentLeasesSupported") + } externalPort := internalPort m.mappings[internalPort] = externalPort + m.lastTimeout = timeout return externalPort, nil } @@ -80,6 +86,7 @@ func TestManager_CreateMapping(t *testing.T) { assert.Equal(t, uint16(51820), mapping.ExternalPort) assert.Equal(t, "Mock-NAT", mapping.NATType) assert.Equal(t, net.ParseIP("203.0.113.50").To4(), mapping.ExternalIP.To4()) + assert.Equal(t, defaultMappingTTL, mapping.TTL) } func TestManager_GetMapping_ReturnsNilWhenNotReady(t *testing.T) { @@ -131,29 +138,64 @@ func TestManager_Cleanup_NilMapping(t *testing.T) { m.cleanup(context.Background(), gateway) } -func TestState_Cleanup(t *testing.T) { - origDiscover := discoverGateway - defer func() { discoverGateway = origDiscover }() - mockGateway := newMockNAT() - mockGateway.mappings[51820] = 51820 - discoverGateway = func(ctx context.Context) (nat.NAT, error) { - return mockGateway, nil - } +func TestManager_CreateMapping_PermanentLeaseFallback(t *testing.T) { + m := NewManager() + m.wgPort = 51820 - state := &State{ - Protocol: "udp", - InternalPort: 51820, - } + gateway := newMockNAT() + gateway.onlyPermanentLeases = true - err := state.Cleanup() - assert.NoError(t, err) + mapping, err := m.createMapping(context.Background(), gateway) + require.NoError(t, err) + require.NotNil(t, mapping) - _, exists := mockGateway.mappings[51820] - assert.False(t, exists, "mapping should be deleted after cleanup") + assert.Equal(t, uint16(51820), mapping.InternalPort) + assert.Equal(t, time.Duration(0), mapping.TTL, "should return zero TTL for permanent lease") + assert.Equal(t, time.Duration(0), gateway.lastTimeout, "should have retried with zero duration") } -func TestState_Name(t *testing.T) { - state := &State{} - assert.Equal(t, "port_forward_state", state.Name()) +func TestIsPermanentLeaseRequired(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "nil error", + err: nil, + expected: false, + }, + { + name: "UPnP error 725", + err: fmt.Errorf("SOAP fault. Code: | Detail: 725OnlyPermanentLeasesSupported"), + expected: true, + }, + { + name: "wrapped error with 725", + err: fmt.Errorf("add port mapping: %w", fmt.Errorf("Detail: 725")), + expected: true, + }, + { + name: "error 725 with newlines in XML", + err: fmt.Errorf("\n 725\n"), + expected: true, + }, + { + name: "bare 725 without XML tag", + err: fmt.Errorf("error code 725"), + expected: false, + }, + { + name: "unrelated error", + err: fmt.Errorf("connection refused"), + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, isPermanentLeaseRequired(tt.err)) + }) + } } diff --git a/client/internal/portforward/state.go b/client/internal/portforward/state.go deleted file mode 100644 index 3f939751a..000000000 --- a/client/internal/portforward/state.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build !js - -package portforward - -import ( - "context" - "fmt" - - "github.com/libp2p/go-nat" - log "github.com/sirupsen/logrus" -) - -// discoverGateway is the function used for NAT gateway discovery. -// It can be replaced in tests to avoid real network operations. -var discoverGateway = nat.DiscoverGateway - -// State is persisted only for crash recovery cleanup -type State struct { - InternalPort uint16 `json:"internal_port,omitempty"` - Protocol string `json:"protocol,omitempty"` -} - -func (s *State) Name() string { - return "port_forward_state" -} - -// Cleanup implements statemanager.CleanableState for crash recovery -func (s *State) Cleanup() error { - if s.InternalPort == 0 { - return nil - } - - log.Infof("cleaning up stale port mapping for port %d", s.InternalPort) - - ctx, cancel := context.WithTimeout(context.Background(), discoveryTimeout) - defer cancel() - - gateway, err := discoverGateway(ctx) - if err != nil { - // Discovery failure is not an error - gateway may not exist - log.Debugf("cleanup: no gateway found: %v", err) - return nil - } - - if err := gateway.DeletePortMapping(ctx, s.Protocol, int(s.InternalPort)); err != nil { - return fmt.Errorf("delete port mapping: %w", err) - } - - return nil -} diff --git a/client/server/state_generic.go b/client/server/state_generic.go index 3f794b611..86475ca42 100644 --- a/client/server/state_generic.go +++ b/client/server/state_generic.go @@ -10,10 +10,6 @@ import ( ) // registerStates registers all states that need crash recovery cleanup. -// Note: portforward.State is intentionally NOT registered here to avoid blocking startup -// for up to 10 seconds during NAT gateway discovery when no gateway is present. -// The gateway reference cannot be persisted across restarts, so cleanup requires re-discovery. -// Port forward cleanup is handled by the Manager during normal operation instead. func registerStates(mgr *statemanager.Manager) { mgr.RegisterState(&dns.ShutdownState{}) mgr.RegisterState(&systemops.ShutdownState{}) diff --git a/client/server/state_linux.go b/client/server/state_linux.go index 655edfc53..b193d4dfa 100644 --- a/client/server/state_linux.go +++ b/client/server/state_linux.go @@ -12,10 +12,6 @@ import ( ) // registerStates registers all states that need crash recovery cleanup. -// Note: portforward.State is intentionally NOT registered here to avoid blocking startup -// for up to 10 seconds during NAT gateway discovery when no gateway is present. -// The gateway reference cannot be persisted across restarts, so cleanup requires re-discovery. -// Port forward cleanup is handled by the Manager during normal operation instead. func registerStates(mgr *statemanager.Manager) { mgr.RegisterState(&dns.ShutdownState{}) mgr.RegisterState(&systemops.ShutdownState{}) From c1d1229ae0cb402fd4e16ee61f9f7c4652099b08 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Wed, 8 Apr 2026 21:08:43 +0200 Subject: [PATCH 293/374] [management] use NullBool for terminated flag (#5829) --- management/server/store/sql_store.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 397b8673d..07dfe1914 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2099,6 +2099,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv var createdAt, certIssuedAt sql.NullTime var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString var mode, source, sourcePeer sql.NullString + var terminated sql.NullBool err := row.Scan( &s.ID, &s.AccountID, @@ -2119,7 +2120,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv &s.PortAutoAssigned, &source, &sourcePeer, - &s.Terminated, + &terminated, ) if err != nil { return nil, err @@ -2160,7 +2161,9 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv if sourcePeer.Valid { s.SourcePeer = sourcePeer.String } - + if terminated.Valid { + s.Terminated = terminated.Bool + } s.Targets = []*rpservice.Target{} return &s, nil }) From 099c493b1818cc9ae475627ff4f071bdfe3bf031 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 8 Apr 2026 21:28:29 +0200 Subject: [PATCH 294/374] [management] network map tests (#5795) * Add network map benchmark and correctness test files * Add tests for network map components correctness and edge cases * Skip benchmarks in CI and enhance network map test coverage with new helper functions * Remove legacy network map benchmarks and tests; refactor components-based test coverage for clarity and scalability. --- .../server/types/networkmap_benchmark_test.go | 217 +++ .../networkmap_components_correctness_test.go | 1192 +++++++++++++++++ 2 files changed, 1409 insertions(+) create mode 100644 management/server/types/networkmap_benchmark_test.go create mode 100644 management/server/types/networkmap_components_correctness_test.go diff --git a/management/server/types/networkmap_benchmark_test.go b/management/server/types/networkmap_benchmark_test.go new file mode 100644 index 000000000..38272e7b0 --- /dev/null +++ b/management/server/types/networkmap_benchmark_test.go @@ -0,0 +1,217 @@ +package types_test + +import ( + "context" + "fmt" + "os" + "testing" + + nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/server/types" +) + +type benchmarkScale struct { + name string + peers int + groups int +} + +var defaultScales = []benchmarkScale{ + {"100peers_5groups", 100, 5}, + {"500peers_20groups", 500, 20}, + {"1000peers_50groups", 1000, 50}, + {"5000peers_100groups", 5000, 100}, + {"10000peers_200groups", 10000, 200}, + {"20000peers_200groups", 20000, 200}, + {"30000peers_300groups", 30000, 300}, +} + +func skipCIBenchmark(b *testing.B) { + if os.Getenv("CI") == "true" { + b.Skip("Skipping benchmark in CI") + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Single Peer Network Map Generation +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_Components benchmarks the components-based approach for a single peer. +func BenchmarkNetworkMapGeneration_Components(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run(scale.name, func(b *testing.B) { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// All Peers (UpdateAccountPeers hot path) +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_AllPeers benchmarks generating network maps for ALL peers. +func BenchmarkNetworkMapGeneration_AllPeers(b *testing.B) { + skipCIBenchmark(b) + scales := []benchmarkScale{ + {"100peers_5groups", 100, 5}, + {"500peers_20groups", 500, 20}, + {"1000peers_50groups", 1000, 50}, + {"5000peers_100groups", 5000, 100}, + } + + for _, scale := range scales { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + + peerIDs := make([]string, 0, len(account.Peers)) + for peerID := range account.Peers { + peerIDs = append(peerIDs, peerID) + } + + b.Run("components/"+scale.name, func(b *testing.B) { + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + for _, peerID := range peerIDs { + _ = account.GetPeerNetworkMapFromComponents(ctx, peerID, nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Sub-operations +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_ComponentsCreation benchmarks components extraction. +func BenchmarkNetworkMapGeneration_ComponentsCreation(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run(scale.name, func(b *testing.B) { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, groupIDToUserIDs) + } + }) + } +} + +// BenchmarkNetworkMapGeneration_ComponentsCalculation benchmarks calculation from pre-built components. +func BenchmarkNetworkMapGeneration_ComponentsCalculation(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run(scale.name, func(b *testing.B) { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + components := account.GetPeerNetworkMapComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, groupIDToUserIDs) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = types.CalculateNetworkMapFromComponents(ctx, components) + } + }) + } +} + +// BenchmarkNetworkMapGeneration_PrecomputeMaps benchmarks precomputed map costs. +func BenchmarkNetworkMapGeneration_PrecomputeMaps(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run("ResourcePoliciesMap/"+scale.name, func(b *testing.B) { + account, _ := scalableTestAccount(scale.peers, scale.groups) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetResourcePoliciesMap() + } + }) + b.Run("ResourceRoutersMap/"+scale.name, func(b *testing.B) { + account, _ := scalableTestAccount(scale.peers, scale.groups) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetResourceRoutersMap() + } + }) + b.Run("ActiveGroupUsers/"+scale.name, func(b *testing.B) { + account, _ := scalableTestAccount(scale.peers, scale.groups) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetActiveGroupUsers() + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Scaling Analysis +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_GroupScaling tests group count impact on performance. +func BenchmarkNetworkMapGeneration_GroupScaling(b *testing.B) { + skipCIBenchmark(b) + groupCounts := []int{1, 5, 20, 50, 100, 200, 500} + for _, numGroups := range groupCounts { + b.Run(fmt.Sprintf("components_%dgroups", numGroups), func(b *testing.B) { + account, validatedPeers := scalableTestAccount(1000, numGroups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + }) + } +} + +// BenchmarkNetworkMapGeneration_PeerScaling tests peer count impact on performance. +func BenchmarkNetworkMapGeneration_PeerScaling(b *testing.B) { + skipCIBenchmark(b) + peerCounts := []int{50, 100, 500, 1000, 2000, 5000, 10000, 20000, 30000} + for _, numPeers := range peerCounts { + numGroups := numPeers / 20 + if numGroups < 1 { + numGroups = 1 + } + b.Run(fmt.Sprintf("components_%dpeers", numPeers), func(b *testing.B) { + account, validatedPeers := scalableTestAccount(numPeers, numGroups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + }) + } +} diff --git a/management/server/types/networkmap_components_correctness_test.go b/management/server/types/networkmap_components_correctness_test.go new file mode 100644 index 000000000..5cd41ff10 --- /dev/null +++ b/management/server/types/networkmap_components_correctness_test.go @@ -0,0 +1,1192 @@ +package types_test + +import ( + "context" + "fmt" + "net" + "net/netip" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + networkTypes "github.com/netbirdio/netbird/management/server/networks/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/posture" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/route" +) + +// scalableTestAccountWithoutDefaultPolicy creates an account without the blanket "Allow All" policy. +// Use this for tests that need to verify feature-specific connectivity in isolation. +func scalableTestAccountWithoutDefaultPolicy(numPeers, numGroups int) (*types.Account, map[string]struct{}) { + return buildScalableTestAccount(numPeers, numGroups, false) +} + +// scalableTestAccount creates a realistic account with a blanket "Allow All" policy +// plus per-group policies, routes, network resources, posture checks, and DNS settings. +func scalableTestAccount(numPeers, numGroups int) (*types.Account, map[string]struct{}) { + return buildScalableTestAccount(numPeers, numGroups, true) +} + +// buildScalableTestAccount is the core builder. When withDefaultPolicy is true it adds +// a blanket group-all <-> group-all allow rule; when false the only policies are the +// per-group ones, so tests can verify feature-specific connectivity in isolation. +func buildScalableTestAccount(numPeers, numGroups int, withDefaultPolicy bool) (*types.Account, map[string]struct{}) { + peers := make(map[string]*nbpeer.Peer, numPeers) + allGroupPeers := make([]string, 0, numPeers) + + for i := range numPeers { + peerID := fmt.Sprintf("peer-%d", i) + ip := net.IP{100, byte(64 + i/65536), byte((i / 256) % 256), byte(i % 256)} + wtVersion := "0.25.0" + if i%2 == 0 { + wtVersion = "0.40.0" + } + + p := &nbpeer.Peer{ + ID: peerID, + IP: ip, + Key: fmt.Sprintf("key-%s", peerID), + DNSLabel: fmt.Sprintf("peer%d", i), + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, + UserID: "user-admin", + Meta: nbpeer.PeerSystemMeta{WtVersion: wtVersion, GoOS: "linux"}, + } + + if i == numPeers-2 { + p.LoginExpirationEnabled = true + pastTimestamp := time.Now().Add(-2 * time.Hour) + p.LastLogin = &pastTimestamp + } + + peers[peerID] = p + allGroupPeers = append(allGroupPeers, peerID) + } + + groups := make(map[string]*types.Group, numGroups+1) + groups["group-all"] = &types.Group{ID: "group-all", Name: "All", Peers: allGroupPeers} + + peersPerGroup := numPeers / numGroups + if peersPerGroup < 1 { + peersPerGroup = 1 + } + + for g := range numGroups { + groupID := fmt.Sprintf("group-%d", g) + groupPeers := make([]string, 0, peersPerGroup) + start := g * peersPerGroup + end := start + peersPerGroup + if end > numPeers { + end = numPeers + } + for i := start; i < end; i++ { + groupPeers = append(groupPeers, fmt.Sprintf("peer-%d", i)) + } + groups[groupID] = &types.Group{ID: groupID, Name: fmt.Sprintf("Group %d", g), Peers: groupPeers} + } + + policies := make([]*types.Policy, 0, numGroups+2) + if withDefaultPolicy { + policies = append(policies, &types.Policy{ + ID: "policy-all", Name: "Default-Allow", Enabled: true, + Rules: []*types.PolicyRule{{ + ID: "rule-all", Name: "Allow All", Enabled: true, Action: types.PolicyTrafficActionAccept, + Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{"group-all"}, Destinations: []string{"group-all"}, + }}, + }) + } + + for g := range numGroups { + groupID := fmt.Sprintf("group-%d", g) + dstGroup := fmt.Sprintf("group-%d", (g+1)%numGroups) + policies = append(policies, &types.Policy{ + ID: fmt.Sprintf("policy-%d", g), Name: fmt.Sprintf("Policy %d", g), Enabled: true, + Rules: []*types.PolicyRule{{ + ID: fmt.Sprintf("rule-%d", g), Name: fmt.Sprintf("Rule %d", g), Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + Ports: []string{"8080"}, + Sources: []string{groupID}, Destinations: []string{dstGroup}, + }}, + }) + } + + if numGroups >= 2 { + policies = append(policies, &types.Policy{ + ID: "policy-drop", Name: "Drop DB traffic", Enabled: true, + Rules: []*types.PolicyRule{{ + ID: "rule-drop", Name: "Drop DB", Enabled: true, Action: types.PolicyTrafficActionDrop, + Protocol: types.PolicyRuleProtocolTCP, Ports: []string{"5432"}, Bidirectional: true, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }}, + }) + } + + numRoutes := numGroups + if numRoutes > 20 { + numRoutes = 20 + } + routes := make(map[route.ID]*route.Route, numRoutes) + for r := range numRoutes { + routeID := route.ID(fmt.Sprintf("route-%d", r)) + peerIdx := (numPeers / 2) + r + if peerIdx >= numPeers { + peerIdx = numPeers - 1 + } + routePeerID := fmt.Sprintf("peer-%d", peerIdx) + groupID := fmt.Sprintf("group-%d", r%numGroups) + routes[routeID] = &route.Route{ + ID: routeID, + Network: netip.MustParsePrefix(fmt.Sprintf("10.%d.0.0/16", r)), + Peer: peers[routePeerID].Key, + PeerID: routePeerID, + Description: fmt.Sprintf("Route %d", r), + Enabled: true, + PeerGroups: []string{groupID}, + Groups: []string{"group-all"}, + AccessControlGroups: []string{groupID}, + AccountID: "test-account", + } + } + + numResources := numGroups / 2 + if numResources < 1 { + numResources = 1 + } + if numResources > 50 { + numResources = 50 + } + + networkResources := make([]*resourceTypes.NetworkResource, 0, numResources) + networksList := make([]*networkTypes.Network, 0, numResources) + networkRouters := make([]*routerTypes.NetworkRouter, 0, numResources) + + routingPeerStart := numPeers * 3 / 4 + for nr := range numResources { + netID := fmt.Sprintf("net-%d", nr) + resID := fmt.Sprintf("res-%d", nr) + routerPeerIdx := routingPeerStart + nr + if routerPeerIdx >= numPeers { + routerPeerIdx = numPeers - 1 + } + routerPeerID := fmt.Sprintf("peer-%d", routerPeerIdx) + + networksList = append(networksList, &networkTypes.Network{ID: netID, Name: fmt.Sprintf("Network %d", nr), AccountID: "test-account"}) + networkResources = append(networkResources, &resourceTypes.NetworkResource{ + ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true, + Address: fmt.Sprintf("svc-%d.netbird.cloud", nr), + }) + networkRouters = append(networkRouters, &routerTypes.NetworkRouter{ + ID: fmt.Sprintf("router-%d", nr), NetworkID: netID, Peer: routerPeerID, + Enabled: true, AccountID: "test-account", + }) + + policies = append(policies, &types.Policy{ + ID: fmt.Sprintf("policy-res-%d", nr), Name: fmt.Sprintf("Resource Policy %d", nr), Enabled: true, + SourcePostureChecks: []string{"posture-check-ver"}, + Rules: []*types.PolicyRule{{ + ID: fmt.Sprintf("rule-res-%d", nr), Name: fmt.Sprintf("Allow Resource %d", nr), Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{fmt.Sprintf("group-%d", nr%numGroups)}, + DestinationResource: types.Resource{ID: resID}, + }}, + }) + } + + account := &types.Account{ + Id: "test-account", + Peers: peers, + Groups: groups, + Policies: policies, + Routes: routes, + Users: map[string]*types.User{ + "user-admin": {Id: "user-admin", Role: types.UserRoleAdmin, IsServiceUser: false, AccountID: "test-account"}, + }, + Network: &types.Network{ + Identifier: "net-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(10, 32)}, Serial: 1, + }, + DNSSettings: types.DNSSettings{DisabledManagementGroups: []string{}}, + NameServerGroups: map[string]*nbdns.NameServerGroup{ + "ns-group-main": { + ID: "ns-group-main", Name: "Main NS", Enabled: true, Groups: []string{"group-all"}, + NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("8.8.8.8"), NSType: nbdns.UDPNameServerType, Port: 53}}, + }, + }, + PostureChecks: []*posture.Checks{ + {ID: "posture-check-ver", Name: "Check version", Checks: posture.ChecksDefinition{ + NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.26.0"}, + }}, + }, + NetworkResources: networkResources, + Networks: networksList, + NetworkRouters: networkRouters, + Settings: &types.Settings{PeerLoginExpirationEnabled: true, PeerLoginExpiration: 1 * time.Hour}, + } + + for _, p := range account.Policies { + p.AccountID = account.Id + } + for _, r := range account.Routes { + r.AccountID = account.Id + } + + validatedPeers := make(map[string]struct{}, numPeers) + for i := range numPeers { + peerID := fmt.Sprintf("peer-%d", i) + if i != numPeers-1 { + validatedPeers[peerID] = struct{}{} + } + } + + return account, validatedPeers +} + +// componentsNetworkMap is a convenience wrapper for GetPeerNetworkMapFromComponents. +func componentsNetworkMap(account *types.Account, peerID string, validatedPeers map[string]struct{}) *types.NetworkMap { + return account.GetPeerNetworkMapFromComponents( + context.Background(), peerID, nbdns.CustomZone{}, nil, + validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), + nil, account.GetActiveGroupUsers(), + ) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 1. PEER VISIBILITY & GROUPS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_PeerVisibility(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.Equal(t, len(validatedPeers)-1-len(nm.OfflinePeers), len(nm.Peers), "peer should see all other validated non-expired peers") +} + +func TestComponents_PeerDoesNotSeeItself(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-0", p.ID, "peer should not see itself") + } +} + +func TestComponents_IntraGroupConnectivity(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-5"], "peer-0 should see peer-5 from same group") +} + +func TestComponents_CrossGroupConnectivity(t *testing.T) { + // Without default policy, only per-group policies provide connectivity + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-10"], "peer-0 should see peer-10 from cross-group policy") +} + +func TestComponents_BidirectionalPolicy(t *testing.T) { + // Without default policy so bidirectional visibility comes only from per-group policies + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(100, 5) + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + nm20 := componentsNetworkMap(account, "peer-20", validatedPeers) + require.NotNil(t, nm0) + require.NotNil(t, nm20) + + peer0SeesPeer20 := false + for _, p := range nm0.Peers { + if p.ID == "peer-20" { + peer0SeesPeer20 = true + } + } + peer20SeesPeer0 := false + for _, p := range nm20.Peers { + if p.ID == "peer-0" { + peer20SeesPeer0 = true + } + } + assert.True(t, peer0SeesPeer20, "peer-0 should see peer-20 via bidirectional policy") + assert.True(t, peer20SeesPeer0, "peer-20 should see peer-0 via bidirectional policy") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 2. PEER EXPIRATION & ACCOUNT SETTINGS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_ExpiredPeerInOfflineList(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + offlineIDs := make(map[string]bool, len(nm.OfflinePeers)) + for _, p := range nm.OfflinePeers { + offlineIDs[p.ID] = true + } + assert.True(t, offlineIDs["peer-98"], "expired peer should be in OfflinePeers") + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-98", p.ID, "expired peer should not be in active Peers") + } +} + +func TestComponents_ExpirationDisabledSetting(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + account.Settings.PeerLoginExpirationEnabled = false + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-98"], "with expiration disabled, peer-98 should be in active Peers") +} + +func TestComponents_LoginExpiration_PeerLevel(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + account.Settings.PeerLoginExpirationEnabled = true + account.Settings.PeerLoginExpiration = 1 * time.Hour + + pastLogin := time.Now().Add(-2 * time.Hour) + account.Peers["peer-5"].LastLogin = &pastLogin + account.Peers["peer-5"].LoginExpirationEnabled = true + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + offlineIDs := make(map[string]bool, len(nm.OfflinePeers)) + for _, p := range nm.OfflinePeers { + offlineIDs[p.ID] = true + } + assert.True(t, offlineIDs["peer-5"], "login-expired peer should be in OfflinePeers") + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-5", p.ID, "login-expired peer should not be in active Peers") + } +} + +func TestComponents_NetworkSerial(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + account.Network.Serial = 42 + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.Equal(t, uint64(42), nm.Network.Serial, "network serial should match") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 3. NON-VALIDATED PEERS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_NonValidatedPeerExcluded(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-99", p.ID, "non-validated peer should not appear in Peers") + } + for _, p := range nm.OfflinePeers { + assert.NotEqual(t, "peer-99", p.ID, "non-validated peer should not appear in OfflinePeers") + } +} + +func TestComponents_NonValidatedTargetPeerGetsEmptyMap(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-99", validatedPeers) + require.NotNil(t, nm) + assert.Empty(t, nm.Peers) + assert.Empty(t, nm.FirewallRules) +} + +func TestComponents_NonExistentPeerGetsEmptyMap(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-does-not-exist", validatedPeers) + require.NotNil(t, nm) + assert.Empty(t, nm.Peers) + assert.Empty(t, nm.FirewallRules) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 4. POLICIES & FIREWALL RULES +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_FirewallRulesGenerated(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.FirewallRules, "should have firewall rules from policies") +} + +func TestComponents_DropPolicyGeneratesDropRules(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasDropRule := false + for _, rule := range nm.FirewallRules { + if rule.Action == string(types.PolicyTrafficActionDrop) { + hasDropRule = true + break + } + } + assert.True(t, hasDropRule, "should have at least one drop firewall rule") +} + +func TestComponents_DisabledPolicyIgnored(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + for _, p := range account.Policies { + p.Enabled = false + } + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.Empty(t, nm.Peers, "disabled policies should yield no peers") + assert.Empty(t, nm.FirewallRules, "disabled policies should yield no firewall rules") +} + +func TestComponents_PortPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + has8080, has5432 := false, false + for _, rule := range nm.FirewallRules { + if rule.Port == "8080" { + has8080 = true + } + if rule.Port == "5432" { + has5432 = true + } + } + assert.True(t, has8080, "should have firewall rule for port 8080") + assert.True(t, has5432, "should have firewall rule for port 5432 (drop policy)") +} + +func TestComponents_PortRangePolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + account.Peers["peer-0"].Meta.WtVersion = "0.50.0" + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-port-range", Name: "Port Range", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-port-range", Name: "Port Range Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + PortRanges: []types.RulePortRange{{Start: 8000, End: 9000}}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }}, + }) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasPortRange := false + for _, rule := range nm.FirewallRules { + if rule.PortRange.Start == 8000 && rule.PortRange.End == 9000 { + hasPortRange = true + break + } + } + assert.True(t, hasPortRange, "should have firewall rule with port range 8000-9000") +} + +func TestComponents_FirewallRuleDirection(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasIn, hasOut := false, false + for _, rule := range nm.FirewallRules { + if rule.Direction == types.FirewallRuleDirectionIN { + hasIn = true + } + if rule.Direction == types.FirewallRuleDirectionOUT { + hasOut = true + } + } + assert.True(t, hasIn, "should have inbound firewall rules") + assert.True(t, hasOut, "should have outbound firewall rules") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 5. ROUTES +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_RoutesIncluded(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.Routes, "should have routes") +} + +func TestComponents_DisabledRouteExcluded(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + for _, r := range account.Routes { + r.Enabled = false + } + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + for _, r := range nm.Routes { + assert.True(t, r.Enabled, "only enabled routes should appear") + } +} + +func TestComponents_RoutesFirewallRulesForACG(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.RoutesFirewallRules, "should have route firewall rules for access-controlled routes") +} + +func TestComponents_HARouteDeduplication(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + + haNetwork := netip.MustParsePrefix("172.16.0.0/16") + account.Routes["route-ha-1"] = &route.Route{ + ID: "route-ha-1", Network: haNetwork, PeerID: "peer-10", + Peer: account.Peers["peer-10"].Key, Enabled: true, Metric: 100, + Groups: []string{"group-all"}, PeerGroups: []string{"group-0"}, AccountID: "test-account", + } + account.Routes["route-ha-2"] = &route.Route{ + ID: "route-ha-2", Network: haNetwork, PeerID: "peer-20", + Peer: account.Peers["peer-20"].Key, Enabled: true, Metric: 200, + Groups: []string{"group-all"}, PeerGroups: []string{"group-1"}, AccountID: "test-account", + } + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + haRoutes := 0 + for _, r := range nm.Routes { + if r.Network == haNetwork { + haRoutes++ + } + } + // Components deduplicates HA routes with the same HA unique ID, returning one entry per HA group + assert.Equal(t, 1, haRoutes, "HA routes with same network should be deduplicated into one entry") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 6. NETWORK RESOURCES & ROUTERS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_NetworkResourceRoutes_RouterPeer(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + + var routerPeerID string + for _, nr := range account.NetworkRouters { + routerPeerID = nr.Peer + break + } + require.NotEmpty(t, routerPeerID) + + nm := componentsNetworkMap(account, routerPeerID, validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.Peers, "router peer should see source peers") +} + +func TestComponents_NetworkResourceRoutes_SourcePeerSeesRouterPeer(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + + var routerPeerID string + for _, nr := range account.NetworkRouters { + routerPeerID = nr.Peer + break + } + require.NotEmpty(t, routerPeerID) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs[routerPeerID], "source peer should see router peer for network resource") +} + +func TestComponents_DisabledNetworkResourceIgnored(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + for _, nr := range account.NetworkResources { + nr.Enabled = false + } + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotNil(t, nm.Network) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 7. POSTURE CHECKS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_PostureCheckFiltering_PassingPeer(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.Routes, "passing peer should have routes including resource routes") +} + +func TestComponents_PostureCheckFiltering_FailingPeer(t *testing.T) { + // peer-0 has version 0.40.0 (passes posture check >= 0.26.0) + // peer-1 has version 0.25.0 (fails posture check >= 0.26.0) + // Resource policies require posture-check-ver, so the failing peer + // should not see the router peer for those resources. + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(100, 5) + + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + nm1 := componentsNetworkMap(account, "peer-1", validatedPeers) + require.NotNil(t, nm0) + require.NotNil(t, nm1) + + // The passing peer should have more peers visible (including resource router peers) + // than the failing peer, because the failing peer is excluded from resource policies. + assert.Greater(t, len(nm0.Peers), len(nm1.Peers), + "passing peer (0.40.0) should see more peers than failing peer (0.25.0) due to posture-gated resource policies") +} + +func TestComponents_MultiplePostureChecks(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(50, 2) + + // Keep only the posture-gated policy — remove per-group policies so connectivity is isolated + account.Policies = []*types.Policy{} + + // Set kernel version on peers so the OS posture check can evaluate + for _, p := range account.Peers { + p.Meta.KernelVersion = "5.15.0" + } + + account.PostureChecks = append(account.PostureChecks, &posture.Checks{ + ID: "posture-check-os", Name: "Check OS", + Checks: posture.ChecksDefinition{ + OSVersionCheck: &posture.OSVersionCheck{Linux: &posture.MinKernelVersionCheck{MinKernelVersion: "0.0.1"}}, + }, + }) + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-multi-posture", Name: "Multi Posture", Enabled: true, AccountID: "test-account", + SourcePostureChecks: []string{"posture-check-ver", "posture-check-os"}, + Rules: []*types.PolicyRule{{ + ID: "rule-multi-posture", Name: "Multi Check Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Bidirectional: true, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }}, + }) + + // peer-0 (0.40.0, kernel 5.15.0) passes both checks, should see group-1 peers + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + assert.NotEmpty(t, nm0.Peers, "peer passing both posture checks should see destination peers") + + // peer-1 (0.25.0, kernel 5.15.0) fails version check, should NOT see group-1 peers + nm1 := componentsNetworkMap(account, "peer-1", validatedPeers) + require.NotNil(t, nm1) + assert.Empty(t, nm1.Peers, + "peer failing posture check should see no peers when posture-gated policy is the only connectivity") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 8. DNS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_DNSConfigEnabled(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.DNSConfig.ServiceEnable, "DNS should be enabled") + assert.NotEmpty(t, nm.DNSConfig.NameServerGroups, "should have nameserver groups") +} + +func TestComponents_DNSDisabledByManagementGroup(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + account.DNSSettings.DisabledManagementGroups = []string{"group-all"} + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.False(t, nm.DNSConfig.ServiceEnable, "DNS should be disabled for peer in disabled group") +} + +func TestComponents_DNSNameServerGroupDistribution(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + account.NameServerGroups["ns-group-0"] = &nbdns.NameServerGroup{ + ID: "ns-group-0", Name: "Group 0 NS", Enabled: true, Groups: []string{"group-0"}, + NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("1.1.1.1"), NSType: nbdns.UDPNameServerType, Port: 53}}, + } + + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + hasGroup0NS := false + for _, ns := range nm0.DNSConfig.NameServerGroups { + if ns.ID == "ns-group-0" { + hasGroup0NS = true + } + } + assert.True(t, hasGroup0NS, "peer-0 in group-0 should receive ns-group-0") + + nm10 := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm10) + hasGroup0NSForPeer10 := false + for _, ns := range nm10.DNSConfig.NameServerGroups { + if ns.ID == "ns-group-0" { + hasGroup0NSForPeer10 = true + } + } + assert.False(t, hasGroup0NSForPeer10, "peer-10 in group-1 should NOT receive ns-group-0") +} + +func TestComponents_DNSCustomZone(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + customZone := nbdns.CustomZone{ + Domain: "netbird.cloud.", + Records: []nbdns.SimpleRecord{ + {Name: "peer0.netbird.cloud.", Type: 1, Class: "IN", TTL: 300, RData: account.Peers["peer-0"].IP.String()}, + {Name: "peer1.netbird.cloud.", Type: 1, Class: "IN", TTL: 300, RData: account.Peers["peer-1"].IP.String()}, + }, + } + + nm := account.GetPeerNetworkMapFromComponents( + context.Background(), "peer-0", customZone, nil, + validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), + nil, account.GetActiveGroupUsers(), + ) + require.NotNil(t, nm) + assert.True(t, nm.DNSConfig.ServiceEnable) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 9. SSH +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_SSHPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + account.Groups["ssh-users"] = &types.Group{ID: "ssh-users", Name: "SSH Users", Peers: []string{}} + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-ssh", Name: "SSH Access", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-ssh", Name: "Allow SSH", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolNetbirdSSH, + Bidirectional: false, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + AuthorizedGroups: map[string][]string{"ssh-users": {"root"}}, + }}, + }) + + nm := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.EnableSSH, "SSH should be enabled for destination peer of SSH policy") +} + +func TestComponents_SSHNotEnabledWithoutPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.False(t, nm.EnableSSH, "SSH should not be enabled without SSH policy") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 10. CROSS-PEER CONSISTENCY +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_AllPeersGetValidMaps verifies that every validated peer gets a +// non-nil map with a consistent network serial and non-empty peer list. +func TestComponents_AllPeersGetValidMaps(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + for peerID := range account.Peers { + if _, validated := validatedPeers[peerID]; !validated { + continue + } + nm := componentsNetworkMap(account, peerID, validatedPeers) + require.NotNil(t, nm, "network map should not be nil for %s", peerID) + assert.Equal(t, account.Network.Serial, nm.Network.Serial, "serial mismatch for %s", peerID) + assert.NotEmpty(t, nm.Peers, "validated peer %s should see other peers", peerID) + } +} + +// TestComponents_LargeScaleMapGeneration verifies that components can generate maps +// at larger scales without errors and with consistent output. +func TestComponents_LargeScaleMapGeneration(t *testing.T) { + scales := []struct{ peers, groups int }{ + {500, 20}, + {1000, 50}, + } + for _, s := range scales { + t.Run(fmt.Sprintf("%dpeers_%dgroups", s.peers, s.groups), func(t *testing.T) { + account, validatedPeers := scalableTestAccount(s.peers, s.groups) + testPeers := []string{"peer-0", fmt.Sprintf("peer-%d", s.peers/4), fmt.Sprintf("peer-%d", s.peers/2)} + for _, peerID := range testPeers { + nm := componentsNetworkMap(account, peerID, validatedPeers) + require.NotNil(t, nm, "network map should not be nil for %s", peerID) + assert.NotEmpty(t, nm.Peers, "peer %s should see other peers at scale", peerID) + assert.NotEmpty(t, nm.Routes, "peer %s should have routes at scale", peerID) + assert.Equal(t, account.Network.Serial, nm.Network.Serial, "serial mismatch for %s", peerID) + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// 11. PEER-AS-RESOURCE POLICIES +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_PeerAsSourceResource verifies that a policy with SourceResource.Type=Peer +// targets only that specific peer as the source. +func TestComponents_PeerAsSourceResource(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-peer-src", Name: "Peer Source Resource", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-peer-src", Name: "Peer Source Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, + Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + Ports: []string{"443"}, + SourceResource: types.Resource{ID: "peer-0", Type: types.ResourceTypePeer}, + Destinations: []string{"group-1"}, + }}, + }) + + // peer-0 is the source resource, should see group-1 peers + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + + has443 := false + for _, rule := range nm0.FirewallRules { + if rule.Port == "443" { + has443 = true + break + } + } + assert.True(t, has443, "peer-0 as source resource should have port 443 rule") +} + +// TestComponents_PeerAsDestinationResource verifies that a policy with DestinationResource.Type=Peer +// targets only that specific peer as the destination. +func TestComponents_PeerAsDestinationResource(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-peer-dst", Name: "Peer Dest Resource", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-peer-dst", Name: "Peer Dest Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, + Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + Ports: []string{"443"}, + Sources: []string{"group-0"}, + DestinationResource: types.Resource{ID: "peer-15", Type: types.ResourceTypePeer}, + }}, + }) + + // peer-0 is in group-0 (source), should see peer-15 as destination + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + + peerIDs := make(map[string]bool, len(nm0.Peers)) + for _, p := range nm0.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-15"], "peer-0 should see peer-15 via peer-as-destination-resource policy") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 12. MULTIPLE RULES PER POLICY +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_MultipleRulesPerPolicy verifies a policy with multiple rules generates +// firewall rules for each. +func TestComponents_MultipleRulesPerPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-multi-rule", Name: "Multi Rule Policy", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{ + { + ID: "rule-http", Name: "Allow HTTP", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"80"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + { + ID: "rule-https", Name: "Allow HTTPS", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"443"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + }, + }) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + has80, has443 := false, false + for _, rule := range nm.FirewallRules { + if rule.Port == "80" { + has80 = true + } + if rule.Port == "443" { + has443 = true + } + } + assert.True(t, has80, "should have firewall rule for port 80 from first rule") + assert.True(t, has443, "should have firewall rule for port 443 from second rule") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 13. SSH AUTHORIZED USERS CONTENT +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_SSHAuthorizedUsersContent verifies that SSH policies populate +// the AuthorizedUsers map with the correct users and machine mappings. +func TestComponents_SSHAuthorizedUsersContent(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Users["user-dev"] = &types.User{Id: "user-dev", Role: types.UserRoleUser, AccountID: "test-account", AutoGroups: []string{"ssh-users"}} + account.Groups["ssh-users"] = &types.Group{ID: "ssh-users", Name: "SSH Users", Peers: []string{}} + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-ssh", Name: "SSH Access", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-ssh", Name: "Allow SSH", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolNetbirdSSH, + Bidirectional: false, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + AuthorizedGroups: map[string][]string{"ssh-users": {"root", "admin"}}, + }}, + }) + + // peer-10 is in group-1 (destination) + nm := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.EnableSSH, "SSH should be enabled") + assert.NotNil(t, nm.AuthorizedUsers, "AuthorizedUsers should not be nil") + assert.NotEmpty(t, nm.AuthorizedUsers, "AuthorizedUsers should have entries") + + // Check that "root" machine user mapping exists + _, hasRoot := nm.AuthorizedUsers["root"] + _, hasAdmin := nm.AuthorizedUsers["admin"] + assert.True(t, hasRoot || hasAdmin, "AuthorizedUsers should contain 'root' or 'admin' machine user mapping") +} + +// TestComponents_SSHLegacyImpliedSSH verifies that a non-SSH ALL protocol policy with +// SSHEnabled peer implies legacy SSH access. +func TestComponents_SSHLegacyImpliedSSH(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + // Enable SSH on the destination peer + account.Peers["peer-10"].SSHEnabled = true + + // The default "Allow All" policy with Protocol=ALL + SSHEnabled peer should imply SSH + nm := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.EnableSSH, "SSH should be implied by ALL protocol policy with SSHEnabled peer") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 14. ROUTE DEFAULT PERMIT (no AccessControlGroups) +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_RouteDefaultPermit verifies that a route without AccessControlGroups +// generates default permit firewall rules (0.0.0.0/0 source). +func TestComponents_RouteDefaultPermit(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + // Add a route without ACGs — this peer is the routing peer + routingPeerID := "peer-5" + account.Routes["route-no-acg"] = &route.Route{ + ID: "route-no-acg", Network: netip.MustParsePrefix("192.168.99.0/24"), + PeerID: routingPeerID, Peer: account.Peers[routingPeerID].Key, + Enabled: true, Groups: []string{"group-all"}, PeerGroups: []string{"group-0"}, + AccessControlGroups: []string{}, + AccountID: "test-account", + } + + // The routing peer should get default permit route firewall rules + nm := componentsNetworkMap(account, routingPeerID, validatedPeers) + require.NotNil(t, nm) + + hasDefaultPermit := false + for _, rfr := range nm.RoutesFirewallRules { + for _, src := range rfr.SourceRanges { + if src == "0.0.0.0/0" || src == "::/0" { + hasDefaultPermit = true + break + } + } + } + assert.True(t, hasDefaultPermit, "route without ACG should have default permit rule with 0.0.0.0/0 source") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 15. MULTIPLE ROUTERS PER NETWORK +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_MultipleRoutersPerNetwork verifies that a network resource +// with multiple routers provides routes through all available routers. +func TestComponents_MultipleRoutersPerNetwork(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + + netID := "net-multi-router" + resID := "res-multi-router" + account.Networks = append(account.Networks, &networkTypes.Network{ID: netID, Name: "Multi Router Network", AccountID: "test-account"}) + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true, + Address: "multi-svc.netbird.cloud", + }) + account.NetworkRouters = append(account.NetworkRouters, + &routerTypes.NetworkRouter{ID: "router-a", NetworkID: netID, Peer: "peer-5", Enabled: true, AccountID: "test-account", Metric: 100}, + &routerTypes.NetworkRouter{ID: "router-b", NetworkID: netID, Peer: "peer-15", Enabled: true, AccountID: "test-account", Metric: 200}, + ) + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-multi-router-res", Name: "Multi Router Resource", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-multi-router-res", Name: "Allow Multi Router", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{"group-0"}, DestinationResource: types.Resource{ID: resID}, + }}, + }) + + // peer-0 is in group-0 (source), should see both router peers + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-5"], "source peer should see router-a (peer-5)") + assert.True(t, peerIDs["peer-15"], "source peer should see router-b (peer-15)") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 16. PEER-AS-NAMESERVER EXCLUSION +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_PeerIsNameserverExcludedFromNSGroup verifies that a peer serving +// as a nameserver does not receive its own NS group in DNS config. +func TestComponents_PeerIsNameserverExcludedFromNSGroup(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + // peer-0 has IP 100.64.0.0 — make it a nameserver + nsIP := account.Peers["peer-0"].IP + account.NameServerGroups["ns-self"] = &nbdns.NameServerGroup{ + ID: "ns-self", Name: "Self NS", Enabled: true, Groups: []string{"group-all"}, + NameServers: []nbdns.NameServer{{IP: netip.AddrFrom4([4]byte{nsIP[0], nsIP[1], nsIP[2], nsIP[3]}), NSType: nbdns.UDPNameServerType, Port: 53}}, + } + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasSelfNS := false + for _, ns := range nm.DNSConfig.NameServerGroups { + if ns.ID == "ns-self" { + hasSelfNS = true + } + } + assert.False(t, hasSelfNS, "peer serving as nameserver should NOT receive its own NS group") + + // peer-10 is NOT the nameserver, should receive the NS group + nm10 := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm10) + hasNSForPeer10 := false + for _, ns := range nm10.DNSConfig.NameServerGroups { + if ns.ID == "ns-self" { + hasNSForPeer10 = true + } + } + assert.True(t, hasNSForPeer10, "non-nameserver peer should receive the NS group") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 17. DOMAIN NETWORK RESOURCES +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_DomainNetworkResource verifies that domain-based network resources +// produce routes with the correct domain configuration. +func TestComponents_DomainNetworkResource(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + + netID := "net-domain" + resID := "res-domain" + account.Networks = append(account.Networks, &networkTypes.Network{ID: netID, Name: "Domain Network", AccountID: "test-account"}) + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true, + Address: "api.example.com", Type: "domain", + }) + account.NetworkRouters = append(account.NetworkRouters, &routerTypes.NetworkRouter{ + ID: "router-domain", NetworkID: netID, Peer: "peer-5", Enabled: true, AccountID: "test-account", + }) + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-domain-res", Name: "Domain Resource Policy", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-domain-res", Name: "Allow Domain", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{"group-0"}, DestinationResource: types.Resource{ID: resID}, + }}, + }) + + // peer-0 is source, should get route to the domain resource via peer-5 + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-5"], "source peer should see domain resource router peer") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 18. DISABLED RULE WITHIN ENABLED POLICY +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_DisabledRuleInEnabledPolicy verifies that a disabled rule within +// an enabled policy does not generate firewall rules. +func TestComponents_DisabledRuleInEnabledPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-mixed-rules", Name: "Mixed Rules", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{ + { + ID: "rule-enabled", Name: "Enabled Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"3000"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + { + ID: "rule-disabled", Name: "Disabled Rule", Enabled: false, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"3001"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + }, + }) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + has3000, has3001 := false, false + for _, rule := range nm.FirewallRules { + if rule.Port == "3000" { + has3000 = true + } + if rule.Port == "3001" { + has3001 = true + } + } + assert.True(t, has3000, "enabled rule should generate firewall rule for port 3000") + assert.False(t, has3001, "disabled rule should NOT generate firewall rule for port 3001") +} From ee343d5d773c89ff7c07f90d59699f022e5601c7 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 9 Apr 2026 18:12:38 +0200 Subject: [PATCH 295/374] [management] use sql null vars (#5844) --- management/server/store/sql_store.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 07dfe1914..35ee0662c 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2099,7 +2099,8 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv var createdAt, certIssuedAt sql.NullTime var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString var mode, source, sourcePeer sql.NullString - var terminated sql.NullBool + var terminated, portAutoAssigned sql.NullBool + var listenPort sql.NullInt64 err := row.Scan( &s.ID, &s.AccountID, @@ -2116,8 +2117,8 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv &sessionPrivateKey, &sessionPublicKey, &mode, - &s.ListenPort, - &s.PortAutoAssigned, + &listenPort, + &portAutoAssigned, &source, &sourcePeer, &terminated, @@ -2164,6 +2165,12 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv if terminated.Valid { s.Terminated = terminated.Bool } + if portAutoAssigned.Valid { + s.PortAutoAssigned = portAutoAssigned.Bool + } + if listenPort.Valid { + s.ListenPort = uint16(listenPort.Int64) + } s.Targets = []*rpservice.Target{} return &s, nil }) From d2cdc0efec3bf6fd553d7058427a454e64ccca53 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 10 Apr 2026 09:12:13 +0800 Subject: [PATCH 296/374] [client] Use native firewall for peer ACLs in userspace WireGuard mode (#5668) --- client/firewall/create_linux.go | 32 ++++++++++++++++--- client/firewall/iface.go | 6 ++++ client/firewall/iptables/manager_linux.go | 16 ++++------ .../firewall/iptables/manager_linux_test.go | 2 -- client/firewall/iptables/state_linux.go | 11 ++----- client/firewall/nftables/manager_linux.go | 16 ++++------ .../firewall/nftables/manager_linux_test.go | 2 -- client/firewall/nftables/state_linux.go | 11 ++----- client/internal/acl/manager_test.go | 7 ++++ 9 files changed, 58 insertions(+), 45 deletions(-) diff --git a/client/firewall/create_linux.go b/client/firewall/create_linux.go index 12dcaee8a..d781ebd77 100644 --- a/client/firewall/create_linux.go +++ b/client/firewall/create_linux.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "os" + "strconv" "github.com/coreos/go-iptables/iptables" "github.com/google/nftables" @@ -35,20 +36,27 @@ const SKIP_NFTABLES_ENV = "NB_SKIP_NFTABLES_CHECK" type FWType int func NewFirewall(iface IFaceMapper, stateManager *statemanager.Manager, flowLogger nftypes.FlowLogger, disableServerRoutes bool, mtu uint16) (firewall.Manager, error) { - // on the linux system we try to user nftables or iptables - // in any case, because we need to allow netbird interface traffic - // so we use AllowNetbird traffic from these firewall managers - // for the userspace packet filtering firewall + // We run in userspace mode and force userspace firewall was requested. We don't attempt native firewall. + if iface.IsUserspaceBind() && forceUserspaceFirewall() { + log.Info("forcing userspace firewall") + return createUserspaceFirewall(iface, nil, disableServerRoutes, flowLogger, mtu) + } + + // Use native firewall for either kernel or userspace, the interface appears identical to netfilter fm, err := createNativeFirewall(iface, stateManager, disableServerRoutes, mtu) + // Kernel cannot fall back to anything else, need to return error if !iface.IsUserspaceBind() { return fm, err } + // Fall back to the userspace packet filter if native is unavailable if err != nil { log.Warnf("failed to create native firewall: %v. Proceeding with userspace", err) + return createUserspaceFirewall(iface, nil, disableServerRoutes, flowLogger, mtu) } - return createUserspaceFirewall(iface, fm, disableServerRoutes, flowLogger, mtu) + + return fm, nil } func createNativeFirewall(iface IFaceMapper, stateManager *statemanager.Manager, routes bool, mtu uint16) (firewall.Manager, error) { @@ -160,3 +168,17 @@ func isIptablesClientAvailable(client *iptables.IPTables) bool { _, err := client.ListChains("filter") return err == nil } + +func forceUserspaceFirewall() bool { + val := os.Getenv(EnvForceUserspaceFirewall) + if val == "" { + return false + } + + force, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", EnvForceUserspaceFirewall, err) + return false + } + return force +} diff --git a/client/firewall/iface.go b/client/firewall/iface.go index b83c5f912..491f03269 100644 --- a/client/firewall/iface.go +++ b/client/firewall/iface.go @@ -7,6 +7,12 @@ import ( "github.com/netbirdio/netbird/client/iface/wgaddr" ) +// EnvForceUserspaceFirewall forces the use of the userspace packet filter even when +// native iptables/nftables is available. This only applies when the WireGuard interface +// runs in userspace mode. When set, peer ACLs are handled by USPFilter instead of +// kernel netfilter rules. +const EnvForceUserspaceFirewall = "NB_FORCE_USERSPACE_FIREWALL" + // IFaceMapper defines subset methods of interface required for manager type IFaceMapper interface { Name() string diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index 2fc6f8ec8..a1d4467d5 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -33,7 +33,6 @@ type Manager struct { type iFaceMapper interface { Name() string Address() wgaddr.Address - IsUserspaceBind() bool } // Create iptables firewall manager @@ -64,10 +63,9 @@ func Create(wgIface iFaceMapper, mtu uint16) (*Manager, error) { func (m *Manager) Init(stateManager *statemanager.Manager) error { state := &ShutdownState{ InterfaceState: &InterfaceState{ - NameStr: m.wgIface.Name(), - WGAddress: m.wgIface.Address(), - UserspaceBind: m.wgIface.IsUserspaceBind(), - MTU: m.router.mtu, + NameStr: m.wgIface.Name(), + WGAddress: m.wgIface.Address(), + MTU: m.router.mtu, }, } stateManager.RegisterState(state) @@ -203,12 +201,10 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error { return nberrors.FormatErrorOrNil(merr) } -// AllowNetbird allows netbird interface traffic +// AllowNetbird allows netbird interface traffic. +// This is called when USPFilter wraps the native firewall, adding blanket accept +// rules so that packet filtering is handled in userspace instead of by netfilter. func (m *Manager) AllowNetbird() error { - if !m.wgIface.IsUserspaceBind() { - return nil - } - _, err := m.AddPeerFiltering( nil, net.IP{0, 0, 0, 0}, diff --git a/client/firewall/iptables/manager_linux_test.go b/client/firewall/iptables/manager_linux_test.go index ee47a27c0..cc4bda0e0 100644 --- a/client/firewall/iptables/manager_linux_test.go +++ b/client/firewall/iptables/manager_linux_test.go @@ -47,8 +47,6 @@ func (i *iFaceMock) Address() wgaddr.Address { panic("AddressFunc is not set") } -func (i *iFaceMock) IsUserspaceBind() bool { return false } - func TestIptablesManager(t *testing.T) { ipv4Client, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) require.NoError(t, err) diff --git a/client/firewall/iptables/state_linux.go b/client/firewall/iptables/state_linux.go index c88774c1f..121c755e9 100644 --- a/client/firewall/iptables/state_linux.go +++ b/client/firewall/iptables/state_linux.go @@ -9,10 +9,9 @@ import ( ) type InterfaceState struct { - NameStr string `json:"name"` - WGAddress wgaddr.Address `json:"wg_address"` - UserspaceBind bool `json:"userspace_bind"` - MTU uint16 `json:"mtu"` + NameStr string `json:"name"` + WGAddress wgaddr.Address `json:"wg_address"` + MTU uint16 `json:"mtu"` } func (i *InterfaceState) Name() string { @@ -23,10 +22,6 @@ func (i *InterfaceState) Address() wgaddr.Address { return i.WGAddress } -func (i *InterfaceState) IsUserspaceBind() bool { - return i.UserspaceBind -} - type ShutdownState struct { sync.Mutex diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index beb5b70a7..0b5b61e04 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -40,7 +40,6 @@ func getTableName() string { type iFaceMapper interface { Name() string Address() wgaddr.Address - IsUserspaceBind() bool } // Manager of iptables firewall @@ -106,10 +105,9 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { // cleanup using Close() without needing to store specific rules. if err := stateManager.UpdateState(&ShutdownState{ InterfaceState: &InterfaceState{ - NameStr: m.wgIface.Name(), - WGAddress: m.wgIface.Address(), - UserspaceBind: m.wgIface.IsUserspaceBind(), - MTU: m.router.mtu, + NameStr: m.wgIface.Name(), + WGAddress: m.wgIface.Address(), + MTU: m.router.mtu, }, }); err != nil { log.Errorf("failed to update state: %v", err) @@ -205,12 +203,10 @@ func (m *Manager) RemoveNatRule(pair firewall.RouterPair) error { return m.router.RemoveNatRule(pair) } -// AllowNetbird allows netbird interface traffic +// AllowNetbird allows netbird interface traffic. +// This is called when USPFilter wraps the native firewall, adding blanket accept +// rules so that packet filtering is handled in userspace instead of by netfilter. func (m *Manager) AllowNetbird() error { - if !m.wgIface.IsUserspaceBind() { - return nil - } - m.mutex.Lock() defer m.mutex.Unlock() diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go index 75b1e2b6c..d48e4ba88 100644 --- a/client/firewall/nftables/manager_linux_test.go +++ b/client/firewall/nftables/manager_linux_test.go @@ -52,8 +52,6 @@ func (i *iFaceMock) Address() wgaddr.Address { panic("AddressFunc is not set") } -func (i *iFaceMock) IsUserspaceBind() bool { return false } - func TestNftablesManager(t *testing.T) { // just check on the local interface diff --git a/client/firewall/nftables/state_linux.go b/client/firewall/nftables/state_linux.go index 48b7b3741..462ad2556 100644 --- a/client/firewall/nftables/state_linux.go +++ b/client/firewall/nftables/state_linux.go @@ -8,10 +8,9 @@ import ( ) type InterfaceState struct { - NameStr string `json:"name"` - WGAddress wgaddr.Address `json:"wg_address"` - UserspaceBind bool `json:"userspace_bind"` - MTU uint16 `json:"mtu"` + NameStr string `json:"name"` + WGAddress wgaddr.Address `json:"wg_address"` + MTU uint16 `json:"mtu"` } func (i *InterfaceState) Name() string { @@ -22,10 +21,6 @@ func (i *InterfaceState) Address() wgaddr.Address { return i.WGAddress } -func (i *InterfaceState) IsUserspaceBind() bool { - return i.UserspaceBind -} - type ShutdownState struct { InterfaceState *InterfaceState `json:"interface_state,omitempty"` } diff --git a/client/internal/acl/manager_test.go b/client/internal/acl/manager_test.go index bd7adfaef..408ed992f 100644 --- a/client/internal/acl/manager_test.go +++ b/client/internal/acl/manager_test.go @@ -19,6 +19,9 @@ import ( var flowLogger = netflow.NewManager(nil, []byte{}, nil).GetLogger() func TestDefaultManager(t *testing.T) { + t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") + networkMap := &mgmProto.NetworkMap{ FirewallRules: []*mgmProto.FirewallRule{ { @@ -135,6 +138,7 @@ func TestDefaultManager(t *testing.T) { func TestDefaultManagerStateless(t *testing.T) { // stateless currently only in userspace, so we have to disable kernel t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") t.Setenv("NB_DISABLE_CONNTRACK", "true") networkMap := &mgmProto.NetworkMap{ @@ -194,6 +198,7 @@ func TestDefaultManagerStateless(t *testing.T) { // This tests the full ACL manager -> uspfilter integration. func TestDenyRulesNotAccumulatedOnRepeatedApply(t *testing.T) { t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") networkMap := &mgmProto.NetworkMap{ FirewallRules: []*mgmProto.FirewallRule{ @@ -258,6 +263,7 @@ func TestDenyRulesNotAccumulatedOnRepeatedApply(t *testing.T) { // up when they're removed from the network map in a subsequent update. func TestDenyRulesCleanedUpOnRemoval(t *testing.T) { t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -339,6 +345,7 @@ func TestDenyRulesCleanedUpOnRemoval(t *testing.T) { // one added without leaking. func TestRuleUpdateChangingAction(t *testing.T) { t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") ctrl := gomock.NewController(t) defer ctrl.Finish() From 789b4113fedbcb124f87a87963538a7327691f74 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 10 Apr 2026 12:15:58 +0200 Subject: [PATCH 297/374] [misc] update dashboards (#5840) --- .../grafana/dashboards/management.json | 66 +++++++++---------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/infrastructure_files/observability/grafana/dashboards/management.json b/infrastructure_files/observability/grafana/dashboards/management.json index 95983603f..f116a8bde 100644 --- a/infrastructure_files/observability/grafana/dashboards/management.json +++ b/infrastructure_files/observability/grafana/dashboards/management.json @@ -302,7 +302,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_account_peer_meta_update_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_account_peer_meta_update_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -410,7 +410,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.5,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.5,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -426,7 +426,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.9,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.9,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -443,7 +443,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.99,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.99,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -545,7 +545,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.5,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.5,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -561,7 +561,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.9,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.9,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -578,7 +578,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.99,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.99,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -694,7 +694,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.5,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.5,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -710,7 +710,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.9,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.9,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -727,7 +727,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.99,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.99,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -841,7 +841,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -853,7 +853,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -866,7 +866,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -963,7 +963,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -975,7 +975,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -988,7 +988,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1085,7 +1085,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -1097,7 +1097,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -1110,7 +1110,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1221,7 +1221,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_idp_authenticate_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_idp_authenticate_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -1317,7 +1317,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_idp_get_account_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_idp_get_account_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -1413,7 +1413,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_idp_update_user_meta_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_idp_update_user_meta_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -1523,7 +1523,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"GET|OPTIONS\"}[$__rate_interval])) by (job,method)", + "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"GET|OPTIONS\"}[$__rate_interval])) by (job,method)", "instant": false, "legendFormat": "{{method}}", "range": true, @@ -1619,7 +1619,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"POST|PUT|DELETE\"}[$__rate_interval])) by (job,method)", + "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"POST|PUT|DELETE\"}[$__rate_interval])) by (job,method)", "instant": false, "legendFormat": "{{method}}", "range": true, @@ -1715,7 +1715,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -1727,7 +1727,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -1740,7 +1740,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1837,7 +1837,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -1849,7 +1849,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -1862,7 +1862,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1963,7 +1963,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (job,exported_endpoint,method)", + "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (job,exported_endpoint,method)", "hide": false, "instant": false, "legendFormat": "{{method}}-{{exported_endpoint}}", @@ -3222,7 +3222,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum by(le) (increase(management_grpc_updatechannel_queue_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", + "expr": "sum by(le) (increase(management_grpc_updatechannel_queue_length_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -3323,7 +3323,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum by(le) (increase(management_account_update_account_peers_duration_ms_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", + "expr": "sum by(le) (increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, From 15709bc666c97f678cc03aef5665649656eed3d0 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 10 Apr 2026 13:08:04 +0200 Subject: [PATCH 298/374] [management] update account delete with proper proxy domain and service cleanup (#5817) --- .../modules/reverseproxy/domain/domain.go | 5 +++ management/server/account.go | 5 --- management/server/account_test.go | 11 ++++- management/server/store/sql_store.go | 5 +++ management/server/store/sql_store_test.go | 45 +++++++++++++++++++ .../server/store/sqlstore_bench_test.go | 2 + management/server/types/account.go | 8 ++++ 7 files changed, 75 insertions(+), 6 deletions(-) diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go index 859f1c5b2..ae13bffae 100644 --- a/management/internals/modules/reverseproxy/domain/domain.go +++ b/management/internals/modules/reverseproxy/domain/domain.go @@ -30,3 +30,8 @@ func (d *Domain) EventMeta() map[string]any { "validated": d.Validated, } } + +func (d *Domain) Copy() *Domain { + dCopy := *d + return &dCopy +} diff --git a/management/server/account.go b/management/server/account.go index 75db36a5f..d90b46659 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -742,11 +742,6 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u return status.Errorf(status.Internal, "failed to build user infos for account %s: %v", accountID, err) } - err = am.serviceManager.DeleteAllServices(ctx, accountID, userID) - if err != nil { - return status.Errorf(status.Internal, "failed to delete service %s: %v", accountID, err) - } - for _, otherUser := range account.Users { if otherUser.Id == userID { continue diff --git a/management/server/account_test.go b/management/server/account_test.go index 548cf31d4..2f0533281 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -15,7 +15,6 @@ import ( "time" "github.com/golang/mock/gomock" - "github.com/netbirdio/netbird/shared/management/status" "github.com/prometheus/client_golang/prometheus/push" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -23,6 +22,9 @@ import ( "go.opentelemetry.io/otel/metric/noop" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + "github.com/netbirdio/netbird/shared/management/status" + nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" @@ -1815,6 +1817,13 @@ func TestAccount_Copy(t *testing.T) { Targets: []*service.Target{}, }, }, + Domains: []*domain.Domain{ + { + ID: "domain1", + Domain: "test.com", + AccountID: "account1", + }, + }, NetworkMapCache: &types.NetworkMapBuilder{}, } account.InitOnce() diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 35ee0662c..0b463a724 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -396,6 +396,11 @@ func (s *SqlStore) DeleteAccount(ctx context.Context, account *types.Account) er return result.Error } + result = tx.Select(clause.Associations).Delete(account.Services, "account_id = ?", account.Id) + if result.Error != nil { + return result.Error + } + result = tx.Select(clause.Associations).Delete(account) if result.Error != nil { return result.Error diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index bafa63580..8ea6c2ae5 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/require" nbdns "github.com/netbirdio/netbird/dns" + proxydomain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -350,6 +352,35 @@ func TestSqlite_DeleteAccount(t *testing.T) { }, } + account.Services = []*rpservice.Service{ + { + ID: "service_id", + AccountID: account.Id, + Name: "test service", + Domain: "svc.example.com", + Enabled: true, + Targets: []*rpservice.Target{ + { + AccountID: account.Id, + ServiceID: "service_id", + Host: "localhost", + Port: 8080, + Protocol: "http", + Enabled: true, + }, + }, + }, + } + + account.Domains = []*proxydomain.Domain{ + { + ID: "domain_id", + Domain: "custom.example.com", + AccountID: account.Id, + Validated: true, + }, + } + err = store.SaveAccount(context.Background(), account) require.NoError(t, err) @@ -411,6 +442,20 @@ func TestSqlite_DeleteAccount(t *testing.T) { require.NoError(t, err, "expecting no error after removing DeleteAccount when searching for network resources") require.Len(t, resources, 0, "expecting no network resources to be found after DeleteAccount") } + + domains, err := store.ListCustomDomains(context.Background(), account.Id) + require.NoError(t, err, "expecting no error after DeleteAccount when searching for custom domains") + require.Len(t, domains, 0, "expecting no custom domains to be found after DeleteAccount") + + var services []*rpservice.Service + err = store.(*SqlStore).db.Model(&rpservice.Service{}).Find(&services, "account_id = ?", account.Id).Error + require.NoError(t, err, "expecting no error after DeleteAccount when searching for services") + require.Len(t, services, 0, "expecting no services to be found after DeleteAccount") + + var targets []*rpservice.Target + err = store.(*SqlStore).db.Model(&rpservice.Target{}).Find(&targets, "account_id = ?", account.Id).Error + require.NoError(t, err, "expecting no error after DeleteAccount when searching for service targets") + require.Len(t, targets, 0, "expecting no service targets to be found after DeleteAccount") } func Test_GetAccount(t *testing.T) { diff --git a/management/server/store/sqlstore_bench_test.go b/management/server/store/sqlstore_bench_test.go index f2abafceb..81c4b33ae 100644 --- a/management/server/store/sqlstore_bench_test.go +++ b/management/server/store/sqlstore_bench_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/assert" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" @@ -265,6 +266,7 @@ func setupBenchmarkDB(b testing.TB) (*SqlStore, func(), string) { &nbdns.NameServerGroup{}, &posture.Checks{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, &service.Service{}, &service.Target{}, + &domain.Domain{}, } for i := len(models) - 1; i >= 0; i-- { diff --git a/management/server/types/account.go b/management/server/types/account.go index 269fc7a88..c448813db 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" + proxydomain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" @@ -101,6 +102,7 @@ type Account struct { DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"` PostureChecks []*posture.Checks `gorm:"foreignKey:AccountID;references:id"` Services []*service.Service `gorm:"foreignKey:AccountID;references:id"` + Domains []*proxydomain.Domain `gorm:"foreignKey:AccountID;references:id"` // Settings is a dictionary of Account settings Settings *Settings `gorm:"embedded;embeddedPrefix:settings_"` Networks []*networkTypes.Network `gorm:"foreignKey:AccountID;references:id"` @@ -911,6 +913,11 @@ func (a *Account) Copy() *Account { services = append(services, svc.Copy()) } + domains := []*proxydomain.Domain{} + for _, domain := range a.Domains { + domains = append(domains, domain.Copy()) + } + return &Account{ Id: a.Id, CreatedBy: a.CreatedBy, @@ -936,6 +943,7 @@ func (a *Account) Copy() *Account { Onboarding: a.Onboarding, NetworkMapCache: a.NetworkMapCache, nmapInitOnce: a.nmapInitOnce, + Domains: domains, } } From 2a8aacc5c917a517e0077d49f9ad0c8745cc3834 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 10 Apr 2026 13:08:21 +0200 Subject: [PATCH 299/374] [management] allow local routing peer resource (#5814) --- .../server/networks/resources/manager.go | 2 +- .../networks/resources/types/resource.go | 89 ++++++++++--------- management/server/store/sql_store.go | 8 +- management/server/store/sql_store_test.go | 2 +- .../server/types/networkmap_components.go | 70 +++++++++++++-- shared/management/http/api/openapi.yml | 4 + shared/management/http/api/types.gen.go | 9 ++ 7 files changed, 134 insertions(+), 50 deletions(-) diff --git a/management/server/networks/resources/manager.go b/management/server/networks/resources/manager.go index 86f9b6579..ec20f2963 100644 --- a/management/server/networks/resources/manager.go +++ b/management/server/networks/resources/manager.go @@ -108,7 +108,7 @@ func (m *managerImpl) CreateResource(ctx context.Context, userID string, resourc return nil, status.NewPermissionDeniedError() } - resource, err = types.NewNetworkResource(resource.AccountID, resource.NetworkID, resource.Name, resource.Description, resource.Address, resource.GroupIDs, resource.Enabled) + resource, err = types.NewNetworkResource(resource.AccountID, resource.NetworkID, resource.Name, resource.Description, resource.Address, resource.GroupIDs, resource.OnRoutingPeer, resource.Enabled) if err != nil { return nil, fmt.Errorf("failed to create new network resource: %w", err) } diff --git a/management/server/networks/resources/types/resource.go b/management/server/networks/resources/types/resource.go index 1fa908393..3a25c4f03 100644 --- a/management/server/networks/resources/types/resource.go +++ b/management/server/networks/resources/types/resource.go @@ -29,37 +29,39 @@ func (p NetworkResourceType) String() string { } type NetworkResource struct { - ID string `gorm:"primaryKey"` - NetworkID string `gorm:"index"` - AccountID string `gorm:"index"` - Name string - Description string - Type NetworkResourceType - Address string `gorm:"-"` - GroupIDs []string `gorm:"-"` - Domain string - Prefix netip.Prefix `gorm:"serializer:json"` - Enabled bool + ID string `gorm:"primaryKey"` + NetworkID string `gorm:"index"` + AccountID string `gorm:"index"` + Name string + Description string + Type NetworkResourceType + Address string `gorm:"-"` + GroupIDs []string `gorm:"-"` + Domain string + Prefix netip.Prefix `gorm:"serializer:json"` + Enabled bool + OnRoutingPeer bool } -func NewNetworkResource(accountID, networkID, name, description, address string, groupIDs []string, enabled bool) (*NetworkResource, error) { +func NewNetworkResource(accountID, networkID, name, description, address string, groupIDs []string, onRoutingPeer, enabled bool) (*NetworkResource, error) { resourceType, domain, prefix, err := GetResourceType(address) if err != nil { return nil, fmt.Errorf("invalid address: %w", err) } return &NetworkResource{ - ID: xid.New().String(), - AccountID: accountID, - NetworkID: networkID, - Name: name, - Description: description, - Type: resourceType, - Address: address, - Domain: domain, - Prefix: prefix, - GroupIDs: groupIDs, - Enabled: enabled, + ID: xid.New().String(), + AccountID: accountID, + NetworkID: networkID, + Name: name, + Description: description, + Type: resourceType, + Address: address, + Domain: domain, + Prefix: prefix, + GroupIDs: groupIDs, + Enabled: enabled, + OnRoutingPeer: onRoutingPeer, }, nil } @@ -70,13 +72,14 @@ func (n *NetworkResource) ToAPIResponse(groups []api.GroupMinimum) *api.NetworkR } return &api.NetworkResource{ - Id: n.ID, - Name: n.Name, - Description: &n.Description, - Type: api.NetworkResourceType(n.Type.String()), - Address: addr, - Groups: groups, - Enabled: n.Enabled, + Id: n.ID, + Name: n.Name, + Description: &n.Description, + Type: api.NetworkResourceType(n.Type.String()), + Address: addr, + Groups: groups, + Enabled: n.Enabled, + OnRoutingPeer: &n.OnRoutingPeer, } } @@ -86,6 +89,9 @@ func (n *NetworkResource) FromAPIRequest(req *api.NetworkResourceRequest) { if req.Description != nil { n.Description = *req.Description } + if req.OnRoutingPeer != nil { + n.OnRoutingPeer = *req.OnRoutingPeer + } n.Address = req.Address n.GroupIDs = req.Groups n.Enabled = req.Enabled @@ -93,17 +99,18 @@ func (n *NetworkResource) FromAPIRequest(req *api.NetworkResourceRequest) { func (n *NetworkResource) Copy() *NetworkResource { return &NetworkResource{ - ID: n.ID, - AccountID: n.AccountID, - NetworkID: n.NetworkID, - Name: n.Name, - Description: n.Description, - Type: n.Type, - Address: n.Address, - Domain: n.Domain, - Prefix: n.Prefix, - GroupIDs: n.GroupIDs, - Enabled: n.Enabled, + ID: n.ID, + AccountID: n.AccountID, + NetworkID: n.NetworkID, + Name: n.Name, + Description: n.Description, + Type: n.Type, + Address: n.Address, + Domain: n.Domain, + Prefix: n.Prefix, + GroupIDs: n.GroupIDs, + Enabled: n.Enabled, + OnRoutingPeer: n.OnRoutingPeer, } } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 0b463a724..6f177f80e 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2291,7 +2291,7 @@ func (s *SqlStore) getNetworkRouters(ctx context.Context, accountID string) ([]* } func (s *SqlStore) getNetworkResources(ctx context.Context, accountID string) ([]*resourceTypes.NetworkResource, error) { - const query = `SELECT id, network_id, account_id, name, description, type, domain, prefix, enabled FROM network_resources WHERE account_id = $1` + const query = `SELECT id, network_id, account_id, name, description, type, domain, prefix, enabled, on_routing_peer FROM network_resources WHERE account_id = $1` rows, err := s.pool.Query(ctx, query, accountID) if err != nil { return nil, err @@ -2300,11 +2300,15 @@ func (s *SqlStore) getNetworkResources(ctx context.Context, accountID string) ([ var r resourceTypes.NetworkResource var prefix []byte var enabled sql.NullBool - err := row.Scan(&r.ID, &r.NetworkID, &r.AccountID, &r.Name, &r.Description, &r.Type, &r.Domain, &prefix, &enabled) + var onRoutingPeer sql.NullBool + err := row.Scan(&r.ID, &r.NetworkID, &r.AccountID, &r.Name, &r.Description, &r.Type, &r.Domain, &prefix, &enabled, &onRoutingPeer) if err == nil { if enabled.Valid { r.Enabled = enabled.Bool } + if onRoutingPeer.Valid { + r.OnRoutingPeer = onRoutingPeer.Bool + } if prefix != nil { _ = json.Unmarshal(prefix, &r.Prefix) } diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 8ea6c2ae5..da3be2803 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -2508,7 +2508,7 @@ func TestSqlStore_SaveNetworkResource(t *testing.T) { accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" networkID := "ct286bi7qv930dsrrug0" - netResource, err := resourceTypes.NewNetworkResource(accountID, networkID, "resource-name", "", "example.com", []string{}, true) + netResource, err := resourceTypes.NewNetworkResource(accountID, networkID, "resource-name", "", "example.com", []string{}, false, true) require.NoError(t, err) err = store.SaveNetworkResource(context.Background(), netResource) diff --git a/management/server/types/networkmap_components.go b/management/server/types/networkmap_components.go index 23d84a994..9f8f822c9 100644 --- a/management/server/types/networkmap_components.go +++ b/management/server/types/networkmap_components.go @@ -2,7 +2,6 @@ package types import ( "context" - "maps" "net" "net/netip" "slices" @@ -17,6 +16,7 @@ import ( nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/domain" + "golang.org/x/exp/maps" ) const EnvNewNetworkMapCompacted = "NB_NETWORK_MAP_COMPACTED" @@ -119,9 +119,10 @@ func (c *NetworkMapComponents) Calculate(ctx context.Context) *NetworkMap { routesUpdate := c.getRoutesToSync(targetPeerID, peersToConnect, peerGroups) routesFirewallRules := c.getPeerRoutesFirewallRules(ctx, targetPeerID) - isRouter, networkResourcesRoutes, sourcePeers := c.getNetworkResourcesRoutesToSync(targetPeerID) + isRouter, networkResourcesRoutes, sourcePeers, peerFirewallRules := c.getNetworkResourcesRoutesToSync(targetPeerID) var networkResourcesFirewallRules []*RouteFirewallRule if isRouter { + firewallRules = append(firewallRules, peerFirewallRules...) networkResourcesFirewallRules = c.getPeerNetworkResourceFirewallRules(ctx, targetPeerID, networkResourcesRoutes) } @@ -526,7 +527,6 @@ func (c *NetworkMapComponents) getRoutingPeerRoutes(peerID string) (enabledRoute return enabledRoutes, disabledRoutes } - func (c *NetworkMapComponents) filterRoutesByGroups(routes []*route.Route, groupListMap LookupMap) []*route.Route { var filteredRoutes []*route.Route for _, r := range routes { @@ -692,10 +692,11 @@ func (c *NetworkMapComponents) getRulePeers(rule *PolicyRule, postureChecks []st return distributionGroupPeers } -func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (bool, []*route.Route, map[string]struct{}) { +func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (bool, []*route.Route, map[string]struct{}, []*FirewallRule) { var isRoutingPeer bool var routes []*route.Route allSourcePeers := make(map[string]struct{}) + localResourceFwRule := make([]*FirewallRule, 0) for _, resource := range c.NetworkResources { if !resource.Enabled { @@ -714,6 +715,9 @@ func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (b addedResourceRoute := false for _, policy := range c.ResourcePoliciesMap[resource.ID] { + if isRoutingPeer && resource.OnRoutingPeer { + localResourceFwRule = append(localResourceFwRule, c.getLocalResourceFirewallRules(policy)...) + } var peers []string if policy.Rules[0].SourceResource.Type == ResourceTypePeer && policy.Rules[0].SourceResource.ID != "" { peers = []string{policy.Rules[0].SourceResource.ID} @@ -736,7 +740,63 @@ func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (b } } - return isRoutingPeer, routes, allSourcePeers + return isRoutingPeer, routes, allSourcePeers, localResourceFwRule +} + +func (c *NetworkMapComponents) getLocalResourceFirewallRules(policy *Policy) []*FirewallRule { + sourcePeerIDs := c.getPoliciesSourcePeers([]*Policy{policy}) + postureValidatedPeerIDs := c.getPostureValidPeers(maps.Keys(sourcePeerIDs), policy.SourcePostureChecks) + + rules := make([]*FirewallRule, 0) + for _, rule := range policy.Rules { + if !rule.Enabled { + continue + } + + protocol := rule.Protocol + if protocol == PolicyRuleProtocolNetbirdSSH { + continue + } + + for _, peerID := range postureValidatedPeerIDs { + peer := c.GetPeerInfo(peerID) + if peer == nil { + continue + } + peerIP := peer.IP.String() + + fr := FirewallRule{ + PolicyID: rule.ID, + PeerIP: peerIP, + Direction: FirewallRuleDirectionIN, + Action: string(rule.Action), + Protocol: string(protocol), + } + + if len(rule.Ports) == 0 && len(rule.PortRanges) == 0 { + rules = append(rules, &fr) + continue + } + + for _, port := range rule.Ports { + portRule := fr + portRule.Port = port + rules = append(rules, &portRule) + } + + for _, portRange := range rule.PortRanges { + if len(rule.Ports) > 0 { + break + } + rangeRule := fr + rangeRule.PortRange = portRange + rules = append(rules, &rangeRule) + } + + } + } + + return rules } func (c *NetworkMapComponents) getNetworkResourcesRoutes(resource *resourceTypes.NetworkResource, peerID string, router *routerTypes.NetworkRouter) []*route.Route { diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 766fdf0de..d471ba376 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -1980,6 +1980,10 @@ components: description: Network resource status type: boolean example: true + on_routing_peer: + description: Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself + type: boolean + example: true required: - name - address diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 14bb6ee03..03febb193 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -2728,6 +2728,9 @@ type NetworkResource struct { // Name Network resource name Name string `json:"name"` + // OnRoutingPeer Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself + OnRoutingPeer *bool `json:"on_routing_peer,omitempty"` + // Type Network resource type based of the address Type NetworkResourceType `json:"type"` } @@ -2745,6 +2748,9 @@ type NetworkResourceMinimum struct { // Name Network resource name Name string `json:"name"` + + // OnRoutingPeer Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself + OnRoutingPeer *bool `json:"on_routing_peer,omitempty"` } // NetworkResourceRequest defines model for NetworkResourceRequest. @@ -2763,6 +2769,9 @@ type NetworkResourceRequest struct { // Name Network resource name Name string `json:"name"` + + // OnRoutingPeer Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself + OnRoutingPeer *bool `json:"on_routing_peer,omitempty"` } // NetworkResourceType Network resource type based of the address From ee588e153606141f32c2fece81d57637a981f929 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 10 Apr 2026 14:53:47 +0200 Subject: [PATCH 300/374] Revert "[management] allow local routing peer resource (#5814)" (#5847) --- .../server/networks/resources/manager.go | 2 +- .../networks/resources/types/resource.go | 89 +++++++++---------- management/server/store/sql_store.go | 8 +- management/server/store/sql_store_test.go | 2 +- .../server/types/networkmap_components.go | 70 ++------------- shared/management/http/api/openapi.yml | 4 - shared/management/http/api/types.gen.go | 9 -- 7 files changed, 50 insertions(+), 134 deletions(-) diff --git a/management/server/networks/resources/manager.go b/management/server/networks/resources/manager.go index ec20f2963..86f9b6579 100644 --- a/management/server/networks/resources/manager.go +++ b/management/server/networks/resources/manager.go @@ -108,7 +108,7 @@ func (m *managerImpl) CreateResource(ctx context.Context, userID string, resourc return nil, status.NewPermissionDeniedError() } - resource, err = types.NewNetworkResource(resource.AccountID, resource.NetworkID, resource.Name, resource.Description, resource.Address, resource.GroupIDs, resource.OnRoutingPeer, resource.Enabled) + resource, err = types.NewNetworkResource(resource.AccountID, resource.NetworkID, resource.Name, resource.Description, resource.Address, resource.GroupIDs, resource.Enabled) if err != nil { return nil, fmt.Errorf("failed to create new network resource: %w", err) } diff --git a/management/server/networks/resources/types/resource.go b/management/server/networks/resources/types/resource.go index 3a25c4f03..1fa908393 100644 --- a/management/server/networks/resources/types/resource.go +++ b/management/server/networks/resources/types/resource.go @@ -29,39 +29,37 @@ func (p NetworkResourceType) String() string { } type NetworkResource struct { - ID string `gorm:"primaryKey"` - NetworkID string `gorm:"index"` - AccountID string `gorm:"index"` - Name string - Description string - Type NetworkResourceType - Address string `gorm:"-"` - GroupIDs []string `gorm:"-"` - Domain string - Prefix netip.Prefix `gorm:"serializer:json"` - Enabled bool - OnRoutingPeer bool + ID string `gorm:"primaryKey"` + NetworkID string `gorm:"index"` + AccountID string `gorm:"index"` + Name string + Description string + Type NetworkResourceType + Address string `gorm:"-"` + GroupIDs []string `gorm:"-"` + Domain string + Prefix netip.Prefix `gorm:"serializer:json"` + Enabled bool } -func NewNetworkResource(accountID, networkID, name, description, address string, groupIDs []string, onRoutingPeer, enabled bool) (*NetworkResource, error) { +func NewNetworkResource(accountID, networkID, name, description, address string, groupIDs []string, enabled bool) (*NetworkResource, error) { resourceType, domain, prefix, err := GetResourceType(address) if err != nil { return nil, fmt.Errorf("invalid address: %w", err) } return &NetworkResource{ - ID: xid.New().String(), - AccountID: accountID, - NetworkID: networkID, - Name: name, - Description: description, - Type: resourceType, - Address: address, - Domain: domain, - Prefix: prefix, - GroupIDs: groupIDs, - Enabled: enabled, - OnRoutingPeer: onRoutingPeer, + ID: xid.New().String(), + AccountID: accountID, + NetworkID: networkID, + Name: name, + Description: description, + Type: resourceType, + Address: address, + Domain: domain, + Prefix: prefix, + GroupIDs: groupIDs, + Enabled: enabled, }, nil } @@ -72,14 +70,13 @@ func (n *NetworkResource) ToAPIResponse(groups []api.GroupMinimum) *api.NetworkR } return &api.NetworkResource{ - Id: n.ID, - Name: n.Name, - Description: &n.Description, - Type: api.NetworkResourceType(n.Type.String()), - Address: addr, - Groups: groups, - Enabled: n.Enabled, - OnRoutingPeer: &n.OnRoutingPeer, + Id: n.ID, + Name: n.Name, + Description: &n.Description, + Type: api.NetworkResourceType(n.Type.String()), + Address: addr, + Groups: groups, + Enabled: n.Enabled, } } @@ -89,9 +86,6 @@ func (n *NetworkResource) FromAPIRequest(req *api.NetworkResourceRequest) { if req.Description != nil { n.Description = *req.Description } - if req.OnRoutingPeer != nil { - n.OnRoutingPeer = *req.OnRoutingPeer - } n.Address = req.Address n.GroupIDs = req.Groups n.Enabled = req.Enabled @@ -99,18 +93,17 @@ func (n *NetworkResource) FromAPIRequest(req *api.NetworkResourceRequest) { func (n *NetworkResource) Copy() *NetworkResource { return &NetworkResource{ - ID: n.ID, - AccountID: n.AccountID, - NetworkID: n.NetworkID, - Name: n.Name, - Description: n.Description, - Type: n.Type, - Address: n.Address, - Domain: n.Domain, - Prefix: n.Prefix, - GroupIDs: n.GroupIDs, - Enabled: n.Enabled, - OnRoutingPeer: n.OnRoutingPeer, + ID: n.ID, + AccountID: n.AccountID, + NetworkID: n.NetworkID, + Name: n.Name, + Description: n.Description, + Type: n.Type, + Address: n.Address, + Domain: n.Domain, + Prefix: n.Prefix, + GroupIDs: n.GroupIDs, + Enabled: n.Enabled, } } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 6f177f80e..0b463a724 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -2291,7 +2291,7 @@ func (s *SqlStore) getNetworkRouters(ctx context.Context, accountID string) ([]* } func (s *SqlStore) getNetworkResources(ctx context.Context, accountID string) ([]*resourceTypes.NetworkResource, error) { - const query = `SELECT id, network_id, account_id, name, description, type, domain, prefix, enabled, on_routing_peer FROM network_resources WHERE account_id = $1` + const query = `SELECT id, network_id, account_id, name, description, type, domain, prefix, enabled FROM network_resources WHERE account_id = $1` rows, err := s.pool.Query(ctx, query, accountID) if err != nil { return nil, err @@ -2300,15 +2300,11 @@ func (s *SqlStore) getNetworkResources(ctx context.Context, accountID string) ([ var r resourceTypes.NetworkResource var prefix []byte var enabled sql.NullBool - var onRoutingPeer sql.NullBool - err := row.Scan(&r.ID, &r.NetworkID, &r.AccountID, &r.Name, &r.Description, &r.Type, &r.Domain, &prefix, &enabled, &onRoutingPeer) + err := row.Scan(&r.ID, &r.NetworkID, &r.AccountID, &r.Name, &r.Description, &r.Type, &r.Domain, &prefix, &enabled) if err == nil { if enabled.Valid { r.Enabled = enabled.Bool } - if onRoutingPeer.Valid { - r.OnRoutingPeer = onRoutingPeer.Bool - } if prefix != nil { _ = json.Unmarshal(prefix, &r.Prefix) } diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index da3be2803..8ea6c2ae5 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -2508,7 +2508,7 @@ func TestSqlStore_SaveNetworkResource(t *testing.T) { accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" networkID := "ct286bi7qv930dsrrug0" - netResource, err := resourceTypes.NewNetworkResource(accountID, networkID, "resource-name", "", "example.com", []string{}, false, true) + netResource, err := resourceTypes.NewNetworkResource(accountID, networkID, "resource-name", "", "example.com", []string{}, true) require.NoError(t, err) err = store.SaveNetworkResource(context.Background(), netResource) diff --git a/management/server/types/networkmap_components.go b/management/server/types/networkmap_components.go index 9f8f822c9..23d84a994 100644 --- a/management/server/types/networkmap_components.go +++ b/management/server/types/networkmap_components.go @@ -2,6 +2,7 @@ package types import ( "context" + "maps" "net" "net/netip" "slices" @@ -16,7 +17,6 @@ import ( nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/domain" - "golang.org/x/exp/maps" ) const EnvNewNetworkMapCompacted = "NB_NETWORK_MAP_COMPACTED" @@ -119,10 +119,9 @@ func (c *NetworkMapComponents) Calculate(ctx context.Context) *NetworkMap { routesUpdate := c.getRoutesToSync(targetPeerID, peersToConnect, peerGroups) routesFirewallRules := c.getPeerRoutesFirewallRules(ctx, targetPeerID) - isRouter, networkResourcesRoutes, sourcePeers, peerFirewallRules := c.getNetworkResourcesRoutesToSync(targetPeerID) + isRouter, networkResourcesRoutes, sourcePeers := c.getNetworkResourcesRoutesToSync(targetPeerID) var networkResourcesFirewallRules []*RouteFirewallRule if isRouter { - firewallRules = append(firewallRules, peerFirewallRules...) networkResourcesFirewallRules = c.getPeerNetworkResourceFirewallRules(ctx, targetPeerID, networkResourcesRoutes) } @@ -527,6 +526,7 @@ func (c *NetworkMapComponents) getRoutingPeerRoutes(peerID string) (enabledRoute return enabledRoutes, disabledRoutes } + func (c *NetworkMapComponents) filterRoutesByGroups(routes []*route.Route, groupListMap LookupMap) []*route.Route { var filteredRoutes []*route.Route for _, r := range routes { @@ -692,11 +692,10 @@ func (c *NetworkMapComponents) getRulePeers(rule *PolicyRule, postureChecks []st return distributionGroupPeers } -func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (bool, []*route.Route, map[string]struct{}, []*FirewallRule) { +func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (bool, []*route.Route, map[string]struct{}) { var isRoutingPeer bool var routes []*route.Route allSourcePeers := make(map[string]struct{}) - localResourceFwRule := make([]*FirewallRule, 0) for _, resource := range c.NetworkResources { if !resource.Enabled { @@ -715,9 +714,6 @@ func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (b addedResourceRoute := false for _, policy := range c.ResourcePoliciesMap[resource.ID] { - if isRoutingPeer && resource.OnRoutingPeer { - localResourceFwRule = append(localResourceFwRule, c.getLocalResourceFirewallRules(policy)...) - } var peers []string if policy.Rules[0].SourceResource.Type == ResourceTypePeer && policy.Rules[0].SourceResource.ID != "" { peers = []string{policy.Rules[0].SourceResource.ID} @@ -740,63 +736,7 @@ func (c *NetworkMapComponents) getNetworkResourcesRoutesToSync(peerID string) (b } } - return isRoutingPeer, routes, allSourcePeers, localResourceFwRule -} - -func (c *NetworkMapComponents) getLocalResourceFirewallRules(policy *Policy) []*FirewallRule { - sourcePeerIDs := c.getPoliciesSourcePeers([]*Policy{policy}) - postureValidatedPeerIDs := c.getPostureValidPeers(maps.Keys(sourcePeerIDs), policy.SourcePostureChecks) - - rules := make([]*FirewallRule, 0) - for _, rule := range policy.Rules { - if !rule.Enabled { - continue - } - - protocol := rule.Protocol - if protocol == PolicyRuleProtocolNetbirdSSH { - continue - } - - for _, peerID := range postureValidatedPeerIDs { - peer := c.GetPeerInfo(peerID) - if peer == nil { - continue - } - peerIP := peer.IP.String() - - fr := FirewallRule{ - PolicyID: rule.ID, - PeerIP: peerIP, - Direction: FirewallRuleDirectionIN, - Action: string(rule.Action), - Protocol: string(protocol), - } - - if len(rule.Ports) == 0 && len(rule.PortRanges) == 0 { - rules = append(rules, &fr) - continue - } - - for _, port := range rule.Ports { - portRule := fr - portRule.Port = port - rules = append(rules, &portRule) - } - - for _, portRange := range rule.PortRanges { - if len(rule.Ports) > 0 { - break - } - rangeRule := fr - rangeRule.PortRange = portRange - rules = append(rules, &rangeRule) - } - - } - } - - return rules + return isRoutingPeer, routes, allSourcePeers } func (c *NetworkMapComponents) getNetworkResourcesRoutes(resource *resourceTypes.NetworkResource, peerID string, router *routerTypes.NetworkRouter) []*route.Route { diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index d471ba376..766fdf0de 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -1980,10 +1980,6 @@ components: description: Network resource status type: boolean example: true - on_routing_peer: - description: Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself - type: boolean - example: true required: - name - address diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 03febb193..14bb6ee03 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -2728,9 +2728,6 @@ type NetworkResource struct { // Name Network resource name Name string `json:"name"` - // OnRoutingPeer Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself - OnRoutingPeer *bool `json:"on_routing_peer,omitempty"` - // Type Network resource type based of the address Type NetworkResourceType `json:"type"` } @@ -2748,9 +2745,6 @@ type NetworkResourceMinimum struct { // Name Network resource name Name string `json:"name"` - - // OnRoutingPeer Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself - OnRoutingPeer *bool `json:"on_routing_peer,omitempty"` } // NetworkResourceRequest defines model for NetworkResourceRequest. @@ -2769,9 +2763,6 @@ type NetworkResourceRequest struct { // Name Network resource name Name string `json:"name"` - - // OnRoutingPeer Indicate if the resource is on a routing peer or not. It is needed if the resource is targeting the IP of the routing peer itself - OnRoutingPeer *bool `json:"on_routing_peer,omitempty"` } // NetworkResourceType Network resource type based of the address From cf86b9a528d2e31762ac8c14a92846d8e2b3ca46 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Fri, 10 Apr 2026 17:07:27 +0200 Subject: [PATCH 301/374] [management] enable access log cleanup by default (#5842) --- combined/cmd/config.go | 12 ++++++++---- .../reverseproxy/accesslogs/manager/manager.go | 14 ++++++++++++-- .../accesslogs/manager/manager_test.go | 4 ++-- management/internals/server/config/config.go | 2 +- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/combined/cmd/config.go b/combined/cmd/config.go index 85664d0d2..ce4df8394 100644 --- a/combined/cmd/config.go +++ b/combined/cmd/config.go @@ -179,9 +179,11 @@ type StoreConfig struct { // ReverseProxyConfig contains reverse proxy settings type ReverseProxyConfig struct { - TrustedHTTPProxies []string `yaml:"trustedHTTPProxies"` - TrustedHTTPProxiesCount uint `yaml:"trustedHTTPProxiesCount"` - TrustedPeers []string `yaml:"trustedPeers"` + TrustedHTTPProxies []string `yaml:"trustedHTTPProxies"` + TrustedHTTPProxiesCount uint `yaml:"trustedHTTPProxiesCount"` + TrustedPeers []string `yaml:"trustedPeers"` + AccessLogRetentionDays int `yaml:"accessLogRetentionDays"` + AccessLogCleanupIntervalHours int `yaml:"accessLogCleanupIntervalHours"` } // DefaultConfig returns a CombinedConfig with default values @@ -645,7 +647,9 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) { // Build reverse proxy config reverseProxy := nbconfig.ReverseProxy{ - TrustedHTTPProxiesCount: mgmt.ReverseProxy.TrustedHTTPProxiesCount, + TrustedHTTPProxiesCount: mgmt.ReverseProxy.TrustedHTTPProxiesCount, + AccessLogRetentionDays: mgmt.ReverseProxy.AccessLogRetentionDays, + AccessLogCleanupIntervalHours: mgmt.ReverseProxy.AccessLogCleanupIntervalHours, } for _, p := range mgmt.ReverseProxy.TrustedHTTPProxies { if prefix, err := netip.ParsePrefix(p); err == nil { diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go index e8d0ce763..59d7704eb 100644 --- a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go @@ -106,13 +106,23 @@ func (m *managerImpl) CleanupOldAccessLogs(ctx context.Context, retentionDays in // StartPeriodicCleanup starts a background goroutine that periodically cleans up old access logs func (m *managerImpl) StartPeriodicCleanup(ctx context.Context, retentionDays, cleanupIntervalHours int) { - if retentionDays <= 0 { - log.WithContext(ctx).Debug("periodic access log cleanup disabled: retention days is 0 or negative") + if retentionDays < 0 { + log.WithContext(ctx).Debug("periodic access log cleanup disabled: retention days is negative") return } + if retentionDays == 0 { + retentionDays = 7 + log.WithContext(ctx).Debugf("no retention days specified for access log cleanup, defaulting to %d days", retentionDays) + } else { + log.WithContext(ctx).Debugf("access log retention period set to %d days", retentionDays) + } + if cleanupIntervalHours <= 0 { cleanupIntervalHours = 24 + log.WithContext(ctx).Debugf("no cleanup interval specified for access log cleanup, defaulting to %d hours", cleanupIntervalHours) + } else { + log.WithContext(ctx).Debugf("access log cleanup interval set to %d hours", cleanupIntervalHours) } cleanupCtx, cancel := context.WithCancel(ctx) diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go index 8fadef85f..11bf60829 100644 --- a/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go @@ -121,7 +121,7 @@ func TestCleanupWithExactBoundary(t *testing.T) { } func TestStartPeriodicCleanup(t *testing.T) { - t.Run("periodic cleanup disabled with zero retention", func(t *testing.T) { + t.Run("periodic cleanup disabled with negative retention", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -135,7 +135,7 @@ func TestStartPeriodicCleanup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - manager.StartPeriodicCleanup(ctx, 0, 1) + manager.StartPeriodicCleanup(ctx, -1, 1) time.Sleep(100 * time.Millisecond) diff --git a/management/internals/server/config/config.go b/management/internals/server/config/config.go index 0ba393263..fb9c842b7 100644 --- a/management/internals/server/config/config.go +++ b/management/internals/server/config/config.go @@ -203,7 +203,7 @@ type ReverseProxy struct { // AccessLogRetentionDays specifies the number of days to retain access logs. // Logs older than this duration will be automatically deleted during cleanup. - // A value of 0 or negative means logs are kept indefinitely (no cleanup). + // A value of 0 will default to 7 days. Negative means logs are kept indefinitely (no cleanup). AccessLogRetentionDays int // AccessLogCleanupIntervalHours specifies how often (in hours) to run the cleanup routine. From ebd78e01220bc63f20329058cba29a961f2c3c8c Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 10 Apr 2026 20:51:04 +0200 Subject: [PATCH 302/374] [client] Update `RaceDial` to accept context for improved cancellation handling (#5849) --- shared/relay/client/client.go | 2 +- shared/relay/client/dialer/race_dialer.go | 4 ++-- shared/relay/client/dialer/race_dialer_test.go | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/shared/relay/client/client.go b/shared/relay/client/client.go index ed1b63435..b10b05617 100644 --- a/shared/relay/client/client.go +++ b/shared/relay/client/client.go @@ -333,7 +333,7 @@ func (c *Client) connect(ctx context.Context) (*RelayAddr, error) { dialers := c.getDialers() rd := dialer.NewRaceDial(c.log, dialer.DefaultConnectionTimeout, c.connectionURL, dialers...) - conn, err := rd.Dial() + conn, err := rd.Dial(ctx) if err != nil { return nil, err } diff --git a/shared/relay/client/dialer/race_dialer.go b/shared/relay/client/dialer/race_dialer.go index 0550fc63e..34359d17e 100644 --- a/shared/relay/client/dialer/race_dialer.go +++ b/shared/relay/client/dialer/race_dialer.go @@ -40,10 +40,10 @@ func NewRaceDial(log *log.Entry, connectionTimeout time.Duration, serverURL stri } } -func (r *RaceDial) Dial() (net.Conn, error) { +func (r *RaceDial) Dial(ctx context.Context) (net.Conn, error) { connChan := make(chan dialResult, len(r.dialerFns)) winnerConn := make(chan net.Conn, 1) - abortCtx, abort := context.WithCancel(context.Background()) + abortCtx, abort := context.WithCancel(ctx) defer abort() for _, dfn := range r.dialerFns { diff --git a/shared/relay/client/dialer/race_dialer_test.go b/shared/relay/client/dialer/race_dialer_test.go index d216ec5e7..aa18df578 100644 --- a/shared/relay/client/dialer/race_dialer_test.go +++ b/shared/relay/client/dialer/race_dialer_test.go @@ -78,7 +78,7 @@ func TestRaceDialEmptyDialers(t *testing.T) { serverURL := "test.server.com" rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err == nil { t.Errorf("Expected an error with empty dialers, got nil") } @@ -104,7 +104,7 @@ func TestRaceDialSingleSuccessfulDialer(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err != nil { t.Errorf("Expected no error, got %v", err) } @@ -137,7 +137,7 @@ func TestRaceDialMultipleDialersWithOneSuccess(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err != nil { t.Errorf("Expected no error, got %v", err) } @@ -160,7 +160,7 @@ func TestRaceDialTimeout(t *testing.T) { } rd := NewRaceDial(logger, 3*time.Second, serverURL, mockDialer) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err == nil { t.Errorf("Expected an error, got nil") } @@ -188,7 +188,7 @@ func TestRaceDialAllDialersFail(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err == nil { t.Errorf("Expected an error, got nil") } @@ -230,7 +230,7 @@ func TestRaceDialFirstSuccessfulDialerWins(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err != nil { t.Errorf("Expected no error, got %v", err) } From 5259e5df510aa50aa52182f81b2b41583e953de9 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Sat, 11 Apr 2026 12:00:40 +0200 Subject: [PATCH 303/374] [management] add domain and service cleanup migration (#5850) --- management/server/migration/migration.go | 96 +++++++++ management/server/migration/migration_test.go | 194 ++++++++++++++++++ management/server/store/store.go | 6 + 3 files changed, 296 insertions(+) diff --git a/management/server/migration/migration.go b/management/server/migration/migration.go index 29555ed0c..7a51cc200 100644 --- a/management/server/migration/migration.go +++ b/management/server/migration/migration.go @@ -489,6 +489,102 @@ func MigrateJsonToTable[T any](ctx context.Context, db *gorm.DB, columnName stri return nil } +// hasForeignKey checks whether a foreign key constraint exists on the given table and column. +func hasForeignKey(db *gorm.DB, table, column string) bool { + var count int64 + + switch db.Name() { + case "postgres": + db.Raw(` + SELECT COUNT(*) FROM information_schema.key_column_usage kcu + JOIN information_schema.table_constraints tc + ON tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema + WHERE tc.constraint_type = 'FOREIGN KEY' + AND kcu.table_name = ? + AND kcu.column_name = ? + `, table, column).Scan(&count) + case "mysql": + db.Raw(` + SELECT COUNT(*) FROM information_schema.key_column_usage + WHERE table_schema = DATABASE() + AND table_name = ? + AND column_name = ? + AND referenced_table_name IS NOT NULL + `, table, column).Scan(&count) + default: // sqlite + type fkInfo struct { + From string + } + var fks []fkInfo + db.Raw(fmt.Sprintf("PRAGMA foreign_key_list(%s)", table)).Scan(&fks) + for _, fk := range fks { + if fk.From == column { + return true + } + } + return false + } + + return count > 0 +} + +// CleanupOrphanedResources deletes rows from the table of model T where the foreign +// key column (fkColumn) references a row in the table of model R that no longer exists. +func CleanupOrphanedResources[T any, R any](ctx context.Context, db *gorm.DB, fkColumn string) error { + var model T + var refModel R + + if !db.Migrator().HasTable(&model) { + log.WithContext(ctx).Debugf("table for %T does not exist, no cleanup needed", model) + return nil + } + + if !db.Migrator().HasTable(&refModel) { + log.WithContext(ctx).Debugf("referenced table for %T does not exist, no cleanup needed", refModel) + return nil + } + + stmtT := &gorm.Statement{DB: db} + if err := stmtT.Parse(&model); err != nil { + return fmt.Errorf("parse model %T: %w", model, err) + } + childTable := stmtT.Schema.Table + + stmtR := &gorm.Statement{DB: db} + if err := stmtR.Parse(&refModel); err != nil { + return fmt.Errorf("parse reference model %T: %w", refModel, err) + } + parentTable := stmtR.Schema.Table + + if !db.Migrator().HasColumn(&model, fkColumn) { + log.WithContext(ctx).Debugf("column %s does not exist in table %s, no cleanup needed", fkColumn, childTable) + return nil + } + + // If a foreign key constraint already exists on the column, the DB itself + // enforces referential integrity and orphaned rows cannot exist. + if hasForeignKey(db, childTable, fkColumn) { + log.WithContext(ctx).Debugf("foreign key constraint for %s already exists on %s, no cleanup needed", fkColumn, childTable) + return nil + } + + result := db.Exec( + fmt.Sprintf( + "DELETE FROM %s WHERE %s NOT IN (SELECT id FROM %s)", + childTable, fkColumn, parentTable, + ), + ) + if result.Error != nil { + return fmt.Errorf("cleanup orphaned rows in %s: %w", childTable, result.Error) + } + + log.WithContext(ctx).Infof("Cleaned up %d orphaned rows from %s where %s had no matching row in %s", + result.RowsAffected, childTable, fkColumn, parentTable) + + return nil +} + func RemoveDuplicatePeerKeys(ctx context.Context, db *gorm.DB) error { if !db.Migrator().HasTable("peers") { log.WithContext(ctx).Debug("peers table does not exist, skipping duplicate key cleanup") diff --git a/management/server/migration/migration_test.go b/management/server/migration/migration_test.go index c1be8a3a3..5e00976c2 100644 --- a/management/server/migration/migration_test.go +++ b/management/server/migration/migration_test.go @@ -441,3 +441,197 @@ func TestRemoveDuplicatePeerKeys_NoTable(t *testing.T) { err := migration.RemoveDuplicatePeerKeys(context.Background(), db) require.NoError(t, err, "Should not fail when table does not exist") } + +type testParent struct { + ID string `gorm:"primaryKey"` +} + +func (testParent) TableName() string { + return "test_parents" +} + +type testChild struct { + ID string `gorm:"primaryKey"` + ParentID string +} + +func (testChild) TableName() string { + return "test_children" +} + +type testChildWithFK struct { + ID string `gorm:"primaryKey"` + ParentID string `gorm:"index"` + Parent *testParent `gorm:"foreignKey:ParentID"` +} + +func (testChildWithFK) TableName() string { + return "test_children" +} + +func setupOrphanTestDB(t *testing.T, models ...any) *gorm.DB { + t.Helper() + db := setupDatabase(t) + for _, m := range models { + _ = db.Migrator().DropTable(m) + } + err := db.AutoMigrate(models...) + require.NoError(t, err, "Failed to auto-migrate tables") + return db +} + +func TestCleanupOrphanedResources_NoChildTable(t *testing.T) { + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testChild{}) + _ = db.Migrator().DropTable(&testParent{}) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err, "Should not fail when child table does not exist") +} + +func TestCleanupOrphanedResources_NoParentTable(t *testing.T) { + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testParent{}) + _ = db.Migrator().DropTable(&testChild{}) + + err := db.AutoMigrate(&testChild{}) + require.NoError(t, err) + + err = migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err, "Should not fail when parent table does not exist") +} + +func TestCleanupOrphanedResources_EmptyTables(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err, "Should not fail on empty tables") + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(0), count) +} + +func TestCleanupOrphanedResources_NoOrphans(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testParent{ID: "p2"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c2", ParentID: "p2"}).Error) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(2), count, "All children should remain when no orphans") +} + +func TestCleanupOrphanedResources_AllOrphans(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c1", "gone1").Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c2", "gone2").Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c3", "gone3").Error) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(0), count, "All orphaned children should be deleted") +} + +func TestCleanupOrphanedResources_MixedValidAndOrphaned(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testParent{ID: "p2"}).Error) + + require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c2", ParentID: "p2"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c3", ParentID: "p1"}).Error) + + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c4", "gone1").Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c5", "gone2").Error) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var remaining []testChild + require.NoError(t, db.Order("id").Find(&remaining).Error) + + assert.Len(t, remaining, 3, "Only valid children should remain") + assert.Equal(t, "c1", remaining[0].ID) + assert.Equal(t, "c2", remaining[1].ID) + assert.Equal(t, "c3", remaining[2].ID) +} + +func TestCleanupOrphanedResources_Idempotent(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c2", "gone").Error) + + ctx := context.Background() + + err := migration.CleanupOrphanedResources[testChild, testParent](ctx, db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(1), count) + + err = migration.CleanupOrphanedResources[testChild, testParent](ctx, db, "parent_id") + require.NoError(t, err) + + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(1), count, "Count should remain the same after second run") +} + +func TestCleanupOrphanedResources_SkipsWhenForeignKeyExists(t *testing.T) { + engine := os.Getenv("NETBIRD_STORE_ENGINE") + if engine != "postgres" && engine != "mysql" { + t.Skip("FK constraint early-exit test requires postgres or mysql") + } + + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testChildWithFK{}) + _ = db.Migrator().DropTable(&testParent{}) + + err := db.AutoMigrate(&testParent{}, &testChildWithFK{}) + require.NoError(t, err) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testParent{ID: "p2"}).Error) + require.NoError(t, db.Create(&testChildWithFK{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Create(&testChildWithFK{ID: "c2", ParentID: "p2"}).Error) + + switch engine { + case "postgres": + require.NoError(t, db.Exec("ALTER TABLE test_children DROP CONSTRAINT fk_test_children_parent").Error) + require.NoError(t, db.Exec("DELETE FROM test_parents WHERE id = ?", "p2").Error) + require.NoError(t, db.Exec( + "ALTER TABLE test_children ADD CONSTRAINT fk_test_children_parent "+ + "FOREIGN KEY (parent_id) REFERENCES test_parents(id) NOT VALID", + ).Error) + case "mysql": + require.NoError(t, db.Exec("SET FOREIGN_KEY_CHECKS = 0").Error) + require.NoError(t, db.Exec("ALTER TABLE test_children DROP FOREIGN KEY fk_test_children_parent").Error) + require.NoError(t, db.Exec("DELETE FROM test_parents WHERE id = ?", "p2").Error) + require.NoError(t, db.Exec( + "ALTER TABLE test_children ADD CONSTRAINT fk_test_children_parent "+ + "FOREIGN KEY (parent_id) REFERENCES test_parents(id)", + ).Error) + require.NoError(t, db.Exec("SET FOREIGN_KEY_CHECKS = 1").Error) + } + + err = migration.CleanupOrphanedResources[testChildWithFK, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChildWithFK{}).Count(&count) + assert.Equal(t, int64(2), count, "Both rows should survive — migration must skip when FK constraint exists") +} diff --git a/management/server/store/store.go b/management/server/store/store.go index f0c34ffa9..efd9a28fd 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -448,6 +448,12 @@ func getMigrationsPreAuto(ctx context.Context) []migrationFunc { func(db *gorm.DB) error { return migration.RemoveDuplicatePeerKeys(ctx, db) }, + func(db *gorm.DB) error { + return migration.CleanupOrphanedResources[rpservice.Service, types.Account](ctx, db, "account_id") + }, + func(db *gorm.DB) error { + return migration.CleanupOrphanedResources[domain.Domain, types.Account](ctx, db, "account_id") + }, } } From 7483fec04882f348450d87f5455438dc178363db Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 13 Apr 2026 09:38:38 +0200 Subject: [PATCH 304/374] Fix Android internet blackhole caused by stale route re-injection on TUN rebuild (#5865) extraInitialRoutes() was meant to preserve only the fake IP route (240.0.0.0/8) across TUN rebuilds, but it re-injected any initial route missing from the current set. When the management server advertised exit node routes (0.0.0.0/0) that were later filtered by the route selector, extraInitialRoutes() re-added them, causing the Android VPN to capture all traffic with no peer to handle it. Store the fake IP route explicitly and append only that in notify(), removing the overly broad initial route diffing. --- client/internal/routemanager/manager.go | 1 + .../routemanager/notifier/notifier_android.go | 30 +++++++------------ .../routemanager/notifier/notifier_ios.go | 4 +++ .../routemanager/notifier/notifier_other.go | 4 +++ 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/client/internal/routemanager/manager.go b/client/internal/routemanager/manager.go index e7ca44239..3923e153b 100644 --- a/client/internal/routemanager/manager.go +++ b/client/internal/routemanager/manager.go @@ -168,6 +168,7 @@ func (m *DefaultManager) setupAndroidRoutes(config ManagerConfig) { NetworkType: route.IPv4Network, } cr = append(cr, fakeIPRoute) + m.notifier.SetFakeIPRoute(fakeIPRoute) } m.notifier.SetInitialClientRoutes(cr, routesForComparison) diff --git a/client/internal/routemanager/notifier/notifier_android.go b/client/internal/routemanager/notifier/notifier_android.go index 3d2784ae1..55e0b7421 100644 --- a/client/internal/routemanager/notifier/notifier_android.go +++ b/client/internal/routemanager/notifier/notifier_android.go @@ -16,6 +16,7 @@ import ( type Notifier struct { initialRoutes []*route.Route currentRoutes []*route.Route + fakeIPRoute *route.Route listener listener.NetworkChangeListener listenerMux sync.Mutex @@ -31,13 +32,17 @@ func (n *Notifier) SetListener(listener listener.NetworkChangeListener) { n.listener = listener } -// SetInitialClientRoutes stores the full initial route set (including fake IP blocks) -// and a separate comparison set (without fake IP blocks) for diff detection. +// SetInitialClientRoutes stores the initial route sets for TUN configuration. func (n *Notifier) SetInitialClientRoutes(initialRoutes []*route.Route, routesForComparison []*route.Route) { n.initialRoutes = filterStatic(initialRoutes) n.currentRoutes = filterStatic(routesForComparison) } +// SetFakeIPRoute stores the fake IP route to be included in every TUN rebuild. +func (n *Notifier) SetFakeIPRoute(r *route.Route) { + n.fakeIPRoute = r +} + func (n *Notifier) OnNewRoutes(idMap route.HAMap) { var newRoutes []*route.Route for _, routes := range idMap { @@ -69,7 +74,9 @@ func (n *Notifier) notify() { } allRoutes := slices.Clone(n.currentRoutes) - allRoutes = append(allRoutes, n.extraInitialRoutes()...) + if n.fakeIPRoute != nil { + allRoutes = append(allRoutes, n.fakeIPRoute) + } routeStrings := n.routesToStrings(allRoutes) sort.Strings(routeStrings) @@ -78,23 +85,6 @@ func (n *Notifier) notify() { }(n.listener) } -// extraInitialRoutes returns initialRoutes whose network prefix is absent -// from currentRoutes (e.g. the fake IP block added at setup time). -func (n *Notifier) extraInitialRoutes() []*route.Route { - currentNets := make(map[netip.Prefix]struct{}, len(n.currentRoutes)) - for _, r := range n.currentRoutes { - currentNets[r.Network] = struct{}{} - } - - var extra []*route.Route - for _, r := range n.initialRoutes { - if _, ok := currentNets[r.Network]; !ok { - extra = append(extra, r) - } - } - return extra -} - func filterStatic(routes []*route.Route) []*route.Route { out := make([]*route.Route, 0, len(routes)) for _, r := range routes { diff --git a/client/internal/routemanager/notifier/notifier_ios.go b/client/internal/routemanager/notifier/notifier_ios.go index 343d2799e..68c85067a 100644 --- a/client/internal/routemanager/notifier/notifier_ios.go +++ b/client/internal/routemanager/notifier/notifier_ios.go @@ -34,6 +34,10 @@ func (n *Notifier) SetInitialClientRoutes([]*route.Route, []*route.Route) { // iOS doesn't care about initial routes } +func (n *Notifier) SetFakeIPRoute(*route.Route) { + // Not used on iOS +} + func (n *Notifier) OnNewRoutes(route.HAMap) { // Not used on iOS } diff --git a/client/internal/routemanager/notifier/notifier_other.go b/client/internal/routemanager/notifier/notifier_other.go index 0521e3dc2..97c815cf0 100644 --- a/client/internal/routemanager/notifier/notifier_other.go +++ b/client/internal/routemanager/notifier/notifier_other.go @@ -23,6 +23,10 @@ func (n *Notifier) SetInitialClientRoutes([]*route.Route, []*route.Route) { // Not used on non-mobile platforms } +func (n *Notifier) SetFakeIPRoute(*route.Route) { + // Not used on non-mobile platforms +} + func (n *Notifier) OnNewRoutes(idMap route.HAMap) { // Not used on non-mobile platforms } From 13539543af0f0d5313afedbbe00a6037ed087637 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 13 Apr 2026 10:42:24 +0200 Subject: [PATCH 305/374] [client] Fix/grpc retry (#5750) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [client] Fix flow client Receive retry loop not stopping after Close Use backoff.Permanent for canceled gRPC errors so Receive returns immediately instead of retrying until context deadline when the connection is already closed. Add TestNewClient_PermanentClose to verify the behavior. The connectivity.Shutdown check was meaningless because when the connection is shut down, c.realClient.Events(ctx, grpc.WaitForReady(true)) on the nex line already fails with codes.Canceled — which is now handled as a permanent error. The explicit state check was just duplicating what gRPC already reports through its normal error path. * [client] remove WaitForReady from stream open call grpc.WaitForReady(true) parks the RPC call internally until the connection reaches READY, only unblocking on ctx cancellation. This means the external backoff.Retry loop in Receive() never gets control back during a connection outage — it cannot tick, log, or apply its retry intervals while WaitForReady is blocking. Removing it restores fail-fast behaviour: Events() returns immediately with codes.Unavailable when the connection is not ready, which is exactly what the backoff loop expects. The backoff becomes the single authority over retry timing and cadence, as originally intended. * [client] Add connection recreation and improve flow client error handling Store gRPC dial options on the client to enable connection recreation on Internal errors (RST_STREAM/PROTOCOL_ERROR). Treat Unauthenticated, PermissionDenied, and Unimplemented as permanent failures. Unify mutex usage and add reconnection logging for better observability. * [client] Remove Unauthenticated, PermissionDenied, and Unimplemented from permanent error handling * [client] Fix error handling in Receive to properly re-establish stream and improve reconnection messaging * Fix test * [client] Add graceful shutdown handling and test for concurrent Close during Receive Prevent reconnection attempts after client closure by tracking a `closed` flag. Use `backoff.Permanent` for errors caused by operations on a closed client. Add a test to ensure `Close` does not block when `Receive` is actively running. * [client] Fix connection swap to properly close old gRPC connection Close the old `gRPC.ClientConn` after successfully swapping to a new connection during reconnection. * [client] Reset backoff * [client] Ensure stream closure on error during initialization * [client] Add test for handling server-side stream closure and reconnection Introduce `TestReceive_ServerClosesStream` to verify the client's ability to recover and process acknowledgments after the server closes the stream. Enhance test server with a controlled stream closure mechanism. * [client] Add protocol error simulation and enhance reconnection test Introduce `connTrackListener` to simulate HTTP/2 RST_STREAM with PROTOCOL_ERROR for testing. Refactor and rename `TestReceive_ServerClosesStream` to `TestReceive_ProtocolErrorStreamReconnect` to verify client recovery on protocol errors. * [client] Update Close error message in test for clarity * [client] Fine-tune the tests * [client] Adjust connection tracking in reconnection test * [client] Wait for Events handler to exit in RST_STREAM reconnection test Ensure the old `Events` handler exits fully before proceeding in the reconnection test to avoid dropped acknowledgments on a broken stream. Add a `handlerDone` channel to synchronize handler exits. * [client] Prevent panic on nil connection during Close * [client] Refactor connection handling to use explicit target tracking Introduce `target` field to store the gRPC connection target directly, simplifying reconnections and ensuring consistent connection reuse logic. * [client] Rename `isCancellation` to `isContextDone` and extend handling for `DeadlineExceeded` Refactor error handling to include `DeadlineExceeded` scenarios alongside `Canceled`. Update related condition checks for consistency. * [client] Add connection generation tracking to prevent stale reconnections Introduce `connGen` to track connection generations and ensure that stale `recreateConnection` calls do not override newer connections. Update stream establishment and reconnection logic to incorporate generation validation. * [client] Add backoff reset condition to prevent short-lived retry cycles Refine backoff reset logic to ensure it only occurs for sufficiently long-lived stream connections, avoiding interference with `MaxElapsedTime`. * [client] Introduce `minHealthyDuration` to refine backoff reset logic Add `minHealthyDuration` constant to ensure stream retries only reset the backoff timer if the stream survives beyond a minimum duration. Prevents unhealthy, short-lived streams from interfering with `MaxElapsedTime`. * [client] IPv6 friendly connection parsedURL.Hostname() strips IPv6 brackets. For http://[::1]:443, this turns it into ::1:443, which is not a valid host:port target for gRPC. Additionally, fmt.Sprintf("%s:%s", hostname, port) produces a trailing colon when the URL has no explicit port—http://example.com becomes example.com:. Both cases break the initial dial and reconnect paths. Use parsedURL.Host directly instead. * [client] Add `handlerStarted` channel to synchronize stream establishment in tests Introduce `handlerStarted` channel in the test server to signal when the server-side handler begins, ensuring robust synchronization between client and server during stream establishment. Update relevant test cases to wait for this signal before proceeding. * [client] Replace `receivedAcks` map with atomic counter and improve stream establishment sync in tests Refactor acknowledgment tracking in tests to use an `atomic.Int32` counter instead of a map. Replace fixed sleep with robust synchronization by waiting on `handlerStarted` signal for stream establishment. * [client] Extract `handleReceiveError` to simplify receive logic Refactor error handling in `receive` to a dedicated `handleReceiveError` method. Streamlines the main logic and isolates error recovery, including backoff reset and connection recreation. * [client] recreate gRPC ClientConn on every retry to prevent dual backoff The flow client had two competing retry loops: our custom exponential backoff and gRPC's internal subchannel reconnection. When establishStream failed, the same ClientConn was reused, allowing gRPC's internal backoff state to accumulate and control dial timing independently. Changes: - Consolidate error handling into handleRetryableError, which now handles context cancellation, permanent errors, backoff reset, and connection recreation in a single path - Call recreateConnection on every retryable error so each retry gets a fresh ClientConn with no internal backoff state - Remove connGen tracking since Receive is sequential and protected by a new receiving guard against concurrent calls - Reduce RandomizationFactor from 1 to 0.5 to avoid near-zero backoff intervals --- flow/client/client.go | 193 ++++++++++++++++++----- flow/client/client_test.go | 312 +++++++++++++++++++++++++++++++++++-- 2 files changed, 451 insertions(+), 54 deletions(-) diff --git a/flow/client/client.go b/flow/client/client.go index 318fcfe1e..8ad637974 100644 --- a/flow/client/client.go +++ b/flow/client/client.go @@ -14,7 +14,6 @@ import ( log "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" @@ -26,11 +25,22 @@ import ( "github.com/netbirdio/netbird/util/wsproxy" ) +var ErrClientClosed = errors.New("client is closed") + +// minHealthyDuration is the minimum time a stream must survive before a failure +// resets the backoff timer. Streams that fail faster are considered unhealthy and +// should not reset backoff, so that MaxElapsedTime can eventually stop retries. +const minHealthyDuration = 5 * time.Second + type GRPCClient struct { realClient proto.FlowServiceClient clientConn *grpc.ClientConn stream proto.FlowService_EventsClient - streamMu sync.Mutex + target string + opts []grpc.DialOption + closed bool // prevent creating conn in the middle of the Close + receiving bool // prevent concurrent Receive calls + mu sync.Mutex // protects clientConn, realClient, stream, closed, and receiving } func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCClient, error) { @@ -65,7 +75,8 @@ func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCCl grpc.WithDefaultServiceConfig(`{"healthCheckConfig": {"serviceName": ""}}`), ) - conn, err := grpc.NewClient(fmt.Sprintf("%s:%s", parsedURL.Hostname(), parsedURL.Port()), opts...) + target := parsedURL.Host + conn, err := grpc.NewClient(target, opts...) if err != nil { return nil, fmt.Errorf("creating new grpc client: %w", err) } @@ -73,30 +84,73 @@ func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCCl return &GRPCClient{ realClient: proto.NewFlowServiceClient(conn), clientConn: conn, + target: target, + opts: opts, }, nil } func (c *GRPCClient) Close() error { - c.streamMu.Lock() - defer c.streamMu.Unlock() - + c.mu.Lock() + c.closed = true c.stream = nil - if err := c.clientConn.Close(); err != nil && !errors.Is(err, context.Canceled) { + conn := c.clientConn + c.clientConn = nil + c.mu.Unlock() + + if conn == nil { + return nil + } + + if err := conn.Close(); err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("close client connection: %w", err) } return nil } +func (c *GRPCClient) Send(event *proto.FlowEvent) error { + c.mu.Lock() + stream := c.stream + c.mu.Unlock() + + if stream == nil { + return errors.New("stream not initialized") + } + + if err := stream.Send(event); err != nil { + return fmt.Errorf("send flow event: %w", err) + } + + return nil +} + func (c *GRPCClient) Receive(ctx context.Context, interval time.Duration, msgHandler func(msg *proto.FlowEventAck) error) error { + c.mu.Lock() + if c.receiving { + c.mu.Unlock() + return errors.New("concurrent Receive calls are not supported") + } + c.receiving = true + c.mu.Unlock() + defer func() { + c.mu.Lock() + c.receiving = false + c.mu.Unlock() + }() + backOff := defaultBackoff(ctx, interval) operation := func() error { - if err := c.establishStreamAndReceive(ctx, msgHandler); err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.Canceled { - return fmt.Errorf("receive: %w: %w", err, context.Canceled) - } + stream, err := c.establishStream(ctx) + if err != nil { + log.Errorf("failed to establish flow stream, retrying: %v", err) + return c.handleRetryableError(err, time.Time{}, backOff) + } + + streamStart := time.Now() + + if err := c.receive(stream, msgHandler); err != nil { log.Errorf("receive failed: %v", err) - return fmt.Errorf("receive: %w", err) + return c.handleRetryableError(err, streamStart, backOff) } return nil } @@ -108,37 +162,106 @@ func (c *GRPCClient) Receive(ctx context.Context, interval time.Duration, msgHan return nil } -func (c *GRPCClient) establishStreamAndReceive(ctx context.Context, msgHandler func(msg *proto.FlowEventAck) error) error { - if c.clientConn.GetState() == connectivity.Shutdown { - return errors.New("connection to flow receiver has been shut down") +// handleRetryableError resets the backoff timer if the stream was healthy long +// enough and recreates the underlying ClientConn so that gRPC's internal +// subchannel backoff does not accumulate and compete with our own retry timer. +// A zero streamStart means the stream was never established. +func (c *GRPCClient) handleRetryableError(err error, streamStart time.Time, backOff backoff.BackOff) error { + if isContextDone(err) { + return backoff.Permanent(err) } - stream, err := c.realClient.Events(ctx, grpc.WaitForReady(true)) - if err != nil { - return fmt.Errorf("create event stream: %w", err) + var permErr *backoff.PermanentError + if errors.As(err, &permErr) { + return err } - err = stream.Send(&proto.FlowEvent{IsInitiator: true}) + // Reset the backoff so the next retry starts with a short delay instead of + // continuing the already-elapsed timer. Only do this if the stream was healthy + // long enough; short-lived connect/drop cycles must not defeat MaxElapsedTime. + if !streamStart.IsZero() && time.Since(streamStart) >= minHealthyDuration { + backOff.Reset() + } + + if recreateErr := c.recreateConnection(); recreateErr != nil { + log.Errorf("recreate connection: %v", recreateErr) + return recreateErr + } + + log.Infof("connection recreated, retrying stream") + return fmt.Errorf("retrying after error: %w", err) +} + +func (c *GRPCClient) recreateConnection() error { + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return backoff.Permanent(ErrClientClosed) + } + + conn, err := grpc.NewClient(c.target, c.opts...) if err != nil { - log.Infof("failed to send initiator message to flow receiver but will attempt to continue. Error: %s", err) + c.mu.Unlock() + return fmt.Errorf("create new connection: %w", err) + } + + old := c.clientConn + c.clientConn = conn + c.realClient = proto.NewFlowServiceClient(conn) + c.stream = nil + c.mu.Unlock() + + _ = old.Close() + + return nil +} + +func (c *GRPCClient) establishStream(ctx context.Context) (proto.FlowService_EventsClient, error) { + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return nil, backoff.Permanent(ErrClientClosed) + } + cl := c.realClient + c.mu.Unlock() + + // open stream outside the lock — blocking operation + stream, err := cl.Events(ctx) + if err != nil { + return nil, fmt.Errorf("create event stream: %w", err) + } + streamReady := false + defer func() { + if !streamReady { + _ = stream.CloseSend() + } + }() + + if err = stream.Send(&proto.FlowEvent{IsInitiator: true}); err != nil { + return nil, fmt.Errorf("send initiator: %w", err) } if err = checkHeader(stream); err != nil { - return fmt.Errorf("check header: %w", err) + return nil, fmt.Errorf("check header: %w", err) } - c.streamMu.Lock() + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return nil, backoff.Permanent(ErrClientClosed) + } c.stream = stream - c.streamMu.Unlock() + c.mu.Unlock() + streamReady = true - return c.receive(stream, msgHandler) + return stream, nil } func (c *GRPCClient) receive(stream proto.FlowService_EventsClient, msgHandler func(msg *proto.FlowEventAck) error) error { for { msg, err := stream.Recv() if err != nil { - return fmt.Errorf("receive from stream: %w", err) + return err } if msg.IsInitiator { @@ -169,7 +292,7 @@ func checkHeader(stream proto.FlowService_EventsClient) error { func defaultBackoff(ctx context.Context, interval time.Duration) backoff.BackOff { return backoff.WithContext(&backoff.ExponentialBackOff{ InitialInterval: 800 * time.Millisecond, - RandomizationFactor: 1, + RandomizationFactor: 0.5, Multiplier: 1.7, MaxInterval: interval / 2, MaxElapsedTime: 3 * 30 * 24 * time.Hour, // 3 months @@ -178,18 +301,12 @@ func defaultBackoff(ctx context.Context, interval time.Duration) backoff.BackOff }, ctx) } -func (c *GRPCClient) Send(event *proto.FlowEvent) error { - c.streamMu.Lock() - stream := c.stream - c.streamMu.Unlock() - - if stream == nil { - return errors.New("stream not initialized") +func isContextDone(err error) bool { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return true } - - if err := stream.Send(event); err != nil { - return fmt.Errorf("send flow event: %w", err) + if s, ok := status.FromError(err); ok { + return s.Code() == codes.Canceled || s.Code() == codes.DeadlineExceeded } - - return nil + return false } diff --git a/flow/client/client_test.go b/flow/client/client_test.go index efe01c003..55157acbc 100644 --- a/flow/client/client_test.go +++ b/flow/client/client_test.go @@ -2,8 +2,11 @@ package client_test import ( "context" + "encoding/binary" "errors" "net" + "sync" + "sync/atomic" "testing" "time" @@ -11,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" flow "github.com/netbirdio/netbird/flow/client" "github.com/netbirdio/netbird/flow/proto" @@ -18,21 +23,89 @@ import ( type testServer struct { proto.UnimplementedFlowServiceServer - events chan *proto.FlowEvent - acks chan *proto.FlowEventAck - grpcSrv *grpc.Server - addr string + events chan *proto.FlowEvent + acks chan *proto.FlowEventAck + grpcSrv *grpc.Server + addr string + listener *connTrackListener + closeStream chan struct{} // signal server to close the stream + handlerDone chan struct{} // signaled each time Events() exits + handlerStarted chan struct{} // signaled each time Events() begins +} + +// connTrackListener wraps a net.Listener to track accepted connections +// so tests can forcefully close them to simulate PROTOCOL_ERROR/RST_STREAM. +type connTrackListener struct { + net.Listener + mu sync.Mutex + conns []net.Conn +} + +func (l *connTrackListener) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + l.mu.Lock() + l.conns = append(l.conns, c) + l.mu.Unlock() + return c, nil +} + +// sendRSTStream writes a raw HTTP/2 RST_STREAM frame with PROTOCOL_ERROR +// (error code 0x1) on every tracked connection. This produces the exact error: +// +// rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR +// +// HTTP/2 RST_STREAM frame format (9-byte header + 4-byte payload): +// +// Length (3 bytes): 0x000004 +// Type (1 byte): 0x03 (RST_STREAM) +// Flags (1 byte): 0x00 +// Stream ID (4 bytes): target stream (must have bit 31 clear) +// Error Code (4 bytes): 0x00000001 (PROTOCOL_ERROR) +func (l *connTrackListener) connCount() int { + l.mu.Lock() + defer l.mu.Unlock() + return len(l.conns) +} + +func (l *connTrackListener) sendRSTStream(streamID uint32) { + l.mu.Lock() + defer l.mu.Unlock() + + frame := make([]byte, 13) // 9-byte header + 4-byte payload + // Length = 4 (3 bytes, big-endian) + frame[0], frame[1], frame[2] = 0, 0, 4 + // Type = RST_STREAM (0x03) + frame[3] = 0x03 + // Flags = 0 + frame[4] = 0x00 + // Stream ID (4 bytes, big-endian, bit 31 reserved = 0) + binary.BigEndian.PutUint32(frame[5:9], streamID) + // Error Code = PROTOCOL_ERROR (0x1) + binary.BigEndian.PutUint32(frame[9:13], 0x1) + + for _, c := range l.conns { + _, _ = c.Write(frame) + } } func newTestServer(t *testing.T) *testServer { - listener, err := net.Listen("tcp", "127.0.0.1:0") + rawListener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) + listener := &connTrackListener{Listener: rawListener} + s := &testServer{ - events: make(chan *proto.FlowEvent, 100), - acks: make(chan *proto.FlowEventAck, 100), - grpcSrv: grpc.NewServer(), - addr: listener.Addr().String(), + events: make(chan *proto.FlowEvent, 100), + acks: make(chan *proto.FlowEventAck, 100), + grpcSrv: grpc.NewServer(), + addr: rawListener.Addr().String(), + listener: listener, + closeStream: make(chan struct{}, 1), + handlerDone: make(chan struct{}, 10), + handlerStarted: make(chan struct{}, 10), } proto.RegisterFlowServiceServer(s.grpcSrv, s) @@ -51,11 +124,23 @@ func newTestServer(t *testing.T) *testServer { } func (s *testServer) Events(stream proto.FlowService_EventsServer) error { + defer func() { + select { + case s.handlerDone <- struct{}{}: + default: + } + }() + err := stream.Send(&proto.FlowEventAck{IsInitiator: true}) if err != nil { return err } + select { + case s.handlerStarted <- struct{}{}: + default: + } + ctx, cancel := context.WithCancel(stream.Context()) defer cancel() @@ -91,6 +176,8 @@ func (s *testServer) Events(stream proto.FlowService_EventsServer) error { if err := stream.Send(ack); err != nil { return err } + case <-s.closeStream: + return status.Errorf(codes.Internal, "server closing stream") case <-ctx.Done(): return ctx.Err() } @@ -110,16 +197,13 @@ func TestReceive(t *testing.T) { assert.NoError(t, err, "failed to close flow") }) - receivedAcks := make(map[string]bool) + var ackCount atomic.Int32 receiveDone := make(chan struct{}) go func() { err := client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { if !msg.IsInitiator && len(msg.EventId) > 0 { - id := string(msg.EventId) - receivedAcks[id] = true - - if len(receivedAcks) >= 3 { + if ackCount.Add(1) >= 3 { close(receiveDone) } } @@ -130,7 +214,11 @@ func TestReceive(t *testing.T) { } }() - time.Sleep(500 * time.Millisecond) + select { + case <-server.handlerStarted: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for stream to be established") + } for i := 0; i < 3; i++ { eventID := uuid.New().String() @@ -153,7 +241,7 @@ func TestReceive(t *testing.T) { t.Fatal("timeout waiting for acks to be processed") } - assert.Equal(t, 3, len(receivedAcks)) + assert.Equal(t, int32(3), ackCount.Load()) } func TestReceive_ContextCancellation(t *testing.T) { @@ -254,3 +342,195 @@ func TestSend(t *testing.T) { t.Fatal("timeout waiting for ack to be received by flow") } } + +func TestNewClient_PermanentClose(t *testing.T) { + server := newTestServer(t) + + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + + err = client.Close() + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + done := make(chan error, 1) + go func() { + done <- client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + return nil + }) + }() + + select { + case err := <-done: + require.ErrorIs(t, err, flow.ErrClientClosed) + case <-time.After(2 * time.Second): + t.Fatal("Receive did not return after Close — stuck in retry loop") + } +} + +func TestNewClient_CloseVerify(t *testing.T) { + server := newTestServer(t) + + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + done := make(chan error, 1) + go func() { + done <- client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + return nil + }) + }() + + closeDone := make(chan struct{}, 1) + go func() { + _ = client.Close() + closeDone <- struct{}{} + }() + + select { + case err := <-done: + require.Error(t, err) + case <-time.After(2 * time.Second): + t.Fatal("Receive did not return after Close — stuck in retry loop") + } + + select { + case <-closeDone: + return + case <-time.After(2 * time.Second): + t.Fatal("Close did not return — blocked in retry loop") + } + +} + +func TestClose_WhileReceiving(t *testing.T) { + server := newTestServer(t) + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + + ctx := context.Background() // no timeout — intentional + receiveDone := make(chan struct{}) + go func() { + _ = client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + return nil + }) + close(receiveDone) + }() + + // Wait for the server-side handler to confirm the stream is established. + select { + case <-server.handlerStarted: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for stream to be established") + } + + closeDone := make(chan struct{}) + go func() { + _ = client.Close() + close(closeDone) + }() + + select { + case <-closeDone: + // Close returned — good + case <-time.After(2 * time.Second): + t.Fatal("Close blocked forever — Receive stuck in retry loop") + } + + select { + case <-receiveDone: + case <-time.After(2 * time.Second): + t.Fatal("Receive did not exit after Close") + } +} + +func TestReceive_ProtocolErrorStreamReconnect(t *testing.T) { + server := newTestServer(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + t.Cleanup(func() { + err := client.Close() + assert.NoError(t, err, "failed to close flow") + }) + + // Track acks received before and after server-side stream close + var ackCount atomic.Int32 + receivedFirst := make(chan struct{}) + receivedAfterReconnect := make(chan struct{}) + + go func() { + err := client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + if msg.IsInitiator || len(msg.EventId) == 0 { + return nil + } + n := ackCount.Add(1) + if n == 1 { + close(receivedFirst) + } + if n == 2 { + close(receivedAfterReconnect) + } + return nil + }) + if err != nil && !errors.Is(err, context.Canceled) { + t.Logf("receive error: %v", err) + } + }() + + // Wait for stream to be established, then send first ack + select { + case <-server.handlerStarted: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for stream to be established") + } + server.acks <- &proto.FlowEventAck{EventId: []byte("before-close")} + + select { + case <-receivedFirst: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for first ack") + } + + // Snapshot connection count before injecting the fault. + connsBefore := server.listener.connCount() + + // Send a raw HTTP/2 RST_STREAM frame with PROTOCOL_ERROR on the TCP connection. + // gRPC multiplexes streams on stream IDs 1, 3, 5, ... (odd, client-initiated). + // Stream ID 1 is the client's first stream (our Events bidi stream). + // This produces the exact error the client sees in production: + // "stream terminated by RST_STREAM with error code: PROTOCOL_ERROR" + server.listener.sendRSTStream(1) + + // Wait for the old Events() handler to fully exit so it can no longer + // drain s.acks and drop our injected ack on a broken stream. + select { + case <-server.handlerDone: + case <-time.After(5 * time.Second): + t.Fatal("old Events() handler did not exit after RST_STREAM") + } + + require.Eventually(t, func() bool { + return server.listener.connCount() > connsBefore + }, 5*time.Second, 50*time.Millisecond, "client did not open a new TCP connection after RST_STREAM") + + server.acks <- &proto.FlowEventAck{EventId: []byte("after-close")} + + select { + case <-receivedAfterReconnect: + // Client successfully reconnected and received ack after server-side stream close + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for ack after server-side stream close — client did not reconnect") + } + + assert.GreaterOrEqual(t, int(ackCount.Load()), 2, "should have received acks before and after stream close") + assert.GreaterOrEqual(t, server.listener.connCount(), 2, "client should have created at least 2 TCP connections (original + reconnect)") +} From 4eed459f27bd7e90faa7fe99e1edf4a59dc71265 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 13 Apr 2026 23:23:57 +0900 Subject: [PATCH 306/374] [client] Fix DNS resolution with userspace WireGuard and kernel firewall (#5873) --- client/firewall/create_linux.go | 7 ++ client/firewall/uspfilter/common/hooks.go | 37 ++++++++++ client/firewall/uspfilter/filter.go | 45 ++---------- client/firewall/uspfilter/filter_test.go | 12 +-- client/firewall/uspfilter/hooks_filter.go | 90 +++++++++++++++++++++++ 5 files changed, 146 insertions(+), 45 deletions(-) create mode 100644 client/firewall/uspfilter/common/hooks.go create mode 100644 client/firewall/uspfilter/hooks_filter.go diff --git a/client/firewall/create_linux.go b/client/firewall/create_linux.go index d781ebd77..d916ebad4 100644 --- a/client/firewall/create_linux.go +++ b/client/firewall/create_linux.go @@ -56,6 +56,13 @@ func NewFirewall(iface IFaceMapper, stateManager *statemanager.Manager, flowLogg return createUserspaceFirewall(iface, nil, disableServerRoutes, flowLogger, mtu) } + // Native firewall handles packet filtering, but the userspace WireGuard bind + // needs a device filter for DNS interception hooks. Install a minimal + // hooks-only filter that passes all traffic through to the kernel firewall. + if err := iface.SetFilter(&uspfilter.HooksFilter{}); err != nil { + log.Warnf("failed to set hooks filter, DNS via memory hooks will not work: %v", err) + } + return fm, nil } diff --git a/client/firewall/uspfilter/common/hooks.go b/client/firewall/uspfilter/common/hooks.go new file mode 100644 index 000000000..dadd800dd --- /dev/null +++ b/client/firewall/uspfilter/common/hooks.go @@ -0,0 +1,37 @@ +package common + +import ( + "net/netip" + "sync/atomic" +) + +// PacketHook stores a registered hook for a specific IP:port. +type PacketHook struct { + IP netip.Addr + Port uint16 + Fn func([]byte) bool +} + +// HookMatches checks if a packet's destination matches the hook and invokes it. +func HookMatches(h *PacketHook, dstIP netip.Addr, dport uint16, packetData []byte) bool { + if h == nil { + return false + } + if h.IP == dstIP && h.Port == dport { + return h.Fn(packetData) + } + return false +} + +// SetHook atomically stores a hook, handling nil removal. +func SetHook(ptr *atomic.Pointer[PacketHook], ip netip.Addr, dPort uint16, hook func([]byte) bool) { + if hook == nil { + ptr.Store(nil) + return + } + ptr.Store(&PacketHook{ + IP: ip, + Port: dPort, + Fn: hook, + }) +} diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index cb9e1bb0a..24b3d0167 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -142,15 +142,8 @@ type Manager struct { mssClampEnabled bool // Only one hook per protocol is supported. Outbound direction only. - udpHookOut atomic.Pointer[packetHook] - tcpHookOut atomic.Pointer[packetHook] -} - -// packetHook stores a registered hook for a specific IP:port. -type packetHook struct { - ip netip.Addr - port uint16 - fn func([]byte) bool + udpHookOut atomic.Pointer[common.PacketHook] + tcpHookOut atomic.Pointer[common.PacketHook] } // decoder for packages @@ -912,21 +905,11 @@ func (m *Manager) trackInbound(d *decoder, srcIP, dstIP netip.Addr, ruleID []byt } func (m *Manager) udpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool { - return hookMatches(m.udpHookOut.Load(), dstIP, dport, packetData) + return common.HookMatches(m.udpHookOut.Load(), dstIP, dport, packetData) } func (m *Manager) tcpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool { - return hookMatches(m.tcpHookOut.Load(), dstIP, dport, packetData) -} - -func hookMatches(h *packetHook, dstIP netip.Addr, dport uint16, packetData []byte) bool { - if h == nil { - return false - } - if h.ip == dstIP && h.port == dport { - return h.fn(packetData) - } - return false + return common.HookMatches(m.tcpHookOut.Load(), dstIP, dport, packetData) } // filterInbound implements filtering logic for incoming packets. @@ -1337,28 +1320,12 @@ func (m *Manager) ruleMatches(rule *RouteRule, srcAddr, dstAddr netip.Addr, prot // SetUDPPacketHook sets the outbound UDP packet hook. Pass nil hook to remove. func (m *Manager) SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) { - if hook == nil { - m.udpHookOut.Store(nil) - return - } - m.udpHookOut.Store(&packetHook{ - ip: ip, - port: dPort, - fn: hook, - }) + common.SetHook(&m.udpHookOut, ip, dPort, hook) } // SetTCPPacketHook sets the outbound TCP packet hook. Pass nil hook to remove. func (m *Manager) SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) { - if hook == nil { - m.tcpHookOut.Store(nil) - return - } - m.tcpHookOut.Store(&packetHook{ - ip: ip, - port: dPort, - fn: hook, - }) + common.SetHook(&m.tcpHookOut, ip, dPort, hook) } // SetLogLevel sets the log level for the firewall manager diff --git a/client/firewall/uspfilter/filter_test.go b/client/firewall/uspfilter/filter_test.go index 5f0f9f860..39e8efa2c 100644 --- a/client/firewall/uspfilter/filter_test.go +++ b/client/firewall/uspfilter/filter_test.go @@ -202,9 +202,9 @@ func TestSetUDPPacketHook(t *testing.T) { h := manager.udpHookOut.Load() require.NotNil(t, h) - assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.ip) - assert.Equal(t, uint16(8000), h.port) - assert.True(t, h.fn(nil)) + assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.IP) + assert.Equal(t, uint16(8000), h.Port) + assert.True(t, h.Fn(nil)) assert.True(t, called) manager.SetUDPPacketHook(netip.MustParseAddr("10.168.0.1"), 8000, nil) @@ -226,9 +226,9 @@ func TestSetTCPPacketHook(t *testing.T) { h := manager.tcpHookOut.Load() require.NotNil(t, h) - assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.ip) - assert.Equal(t, uint16(53), h.port) - assert.True(t, h.fn(nil)) + assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.IP) + assert.Equal(t, uint16(53), h.Port) + assert.True(t, h.Fn(nil)) assert.True(t, called) manager.SetTCPPacketHook(netip.MustParseAddr("10.168.0.1"), 53, nil) diff --git a/client/firewall/uspfilter/hooks_filter.go b/client/firewall/uspfilter/hooks_filter.go new file mode 100644 index 000000000..8d3cc0f5c --- /dev/null +++ b/client/firewall/uspfilter/hooks_filter.go @@ -0,0 +1,90 @@ +package uspfilter + +import ( + "encoding/binary" + "net/netip" + "sync/atomic" + + "github.com/netbirdio/netbird/client/firewall/uspfilter/common" + "github.com/netbirdio/netbird/client/iface/device" +) + +const ( + ipv4HeaderMinLen = 20 + ipv4ProtoOffset = 9 + ipv4FlagsOffset = 6 + ipv4DstOffset = 16 + ipProtoUDP = 17 + ipProtoTCP = 6 + ipv4FragOffMask = 0x1fff + // dstPortOffset is the offset of the destination port within a UDP or TCP header. + dstPortOffset = 2 +) + +// HooksFilter is a minimal packet filter that only handles outbound DNS hooks. +// It is installed on the WireGuard interface when the userspace bind is active +// but a full firewall filter (Manager) is not needed because a native kernel +// firewall (nftables/iptables) handles packet filtering. +type HooksFilter struct { + udpHook atomic.Pointer[common.PacketHook] + tcpHook atomic.Pointer[common.PacketHook] +} + +var _ device.PacketFilter = (*HooksFilter)(nil) + +// FilterOutbound checks outbound packets for DNS hook matches. +// Only IPv4 packets matching the registered hook IP:port are intercepted. +// IPv6 and non-IP packets pass through unconditionally. +func (f *HooksFilter) FilterOutbound(packetData []byte, _ int) bool { + if len(packetData) < ipv4HeaderMinLen { + return false + } + + // Only process IPv4 packets, let everything else pass through. + if packetData[0]>>4 != 4 { + return false + } + + ihl := int(packetData[0]&0x0f) * 4 + if ihl < ipv4HeaderMinLen || len(packetData) < ihl+4 { + return false + } + + // Skip non-first fragments: they don't carry L4 headers. + flagsAndOffset := binary.BigEndian.Uint16(packetData[ipv4FlagsOffset : ipv4FlagsOffset+2]) + if flagsAndOffset&ipv4FragOffMask != 0 { + return false + } + + dstIP, ok := netip.AddrFromSlice(packetData[ipv4DstOffset : ipv4DstOffset+4]) + if !ok { + return false + } + + proto := packetData[ipv4ProtoOffset] + dstPort := binary.BigEndian.Uint16(packetData[ihl+dstPortOffset : ihl+dstPortOffset+2]) + + switch proto { + case ipProtoUDP: + return common.HookMatches(f.udpHook.Load(), dstIP, dstPort, packetData) + case ipProtoTCP: + return common.HookMatches(f.tcpHook.Load(), dstIP, dstPort, packetData) + default: + return false + } +} + +// FilterInbound allows all inbound packets (native firewall handles filtering). +func (f *HooksFilter) FilterInbound([]byte, int) bool { + return false +} + +// SetUDPPacketHook registers the UDP packet hook. +func (f *HooksFilter) SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func([]byte) bool) { + common.SetHook(&f.udpHook, ip, dPort, hook) +} + +// SetTCPPacketHook registers the TCP packet hook. +func (f *HooksFilter) SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func([]byte) bool) { + common.SetHook(&f.tcpHook, ip, dPort, hook) +} From 0a30b9b275e131a59cd6931b2c1cc4dc014909fa Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 14 Apr 2026 19:14:58 +0900 Subject: [PATCH 307/374] [management, proxy] Add CrowdSec IP reputation integration for reverse proxy (#5722) --- go.mod | 99 ++- go.sum | 201 +++-- .../reverseproxy/accesslogs/accesslogentry.go | 9 + .../modules/reverseproxy/domain/domain.go | 3 + .../reverseproxy/domain/manager/api.go | 1 + .../reverseproxy/domain/manager/manager.go | 3 + .../modules/reverseproxy/proxy/manager.go | 1 + .../reverseproxy/proxy/manager/manager.go | 7 + .../reverseproxy/proxy/manager_mock.go | 14 + .../modules/reverseproxy/proxy/proxy.go | 2 + .../service/manager/l4_port_test.go | 1 + .../modules/reverseproxy/service/service.go | 90 ++- management/internals/shared/grpc/proxy.go | 1 + management/server/store/sql_store.go | 54 ++ management/server/store/store.go | 1 + management/server/store/store_mock.go | 13 + proxy/cmd/proxy/cmd/root.go | 8 +- proxy/internal/accesslog/logger.go | 8 +- proxy/internal/accesslog/middleware.go | 1 + proxy/internal/auth/middleware.go | 14 + proxy/internal/auth/middleware_test.go | 6 +- proxy/internal/crowdsec/bouncer.go | 251 ++++++ proxy/internal/crowdsec/bouncer_test.go | 337 ++++++++ proxy/internal/crowdsec/registry.go | 103 +++ proxy/internal/crowdsec/registry_test.go | 66 ++ proxy/internal/proxy/context.go | 19 + proxy/internal/restrict/restrict.go | 162 +++- proxy/internal/restrict/restrict_test.go | 296 ++++++- proxy/internal/tcp/router.go | 25 +- proxy/internal/tcp/router_test.go | 8 +- proxy/internal/udp/relay.go | 23 +- proxy/management_integration_test.go | 4 + proxy/server.go | 87 ++- shared/management/http/api/openapi.yml | 17 + shared/management/http/api/types.gen.go | 33 + shared/management/proto/proxy_service.pb.go | 735 +++++++++--------- shared/management/proto/proxy_service.proto | 6 + 37 files changed, 2157 insertions(+), 552 deletions(-) create mode 100644 proxy/internal/crowdsec/bouncer.go create mode 100644 proxy/internal/crowdsec/bouncer_test.go create mode 100644 proxy/internal/crowdsec/registry.go create mode 100644 proxy/internal/crowdsec/registry_test.go diff --git a/go.mod b/go.mod index a95192600..76fb8b7be 100644 --- a/go.mod +++ b/go.mod @@ -13,28 +13,28 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.27.6 github.com/rs/cors v1.8.0 - github.com/sirupsen/logrus v1.9.3 + github.com/sirupsen/logrus v1.9.4 github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.9 github.com/vishvananda/netlink v1.3.1 golang.org/x/crypto v0.48.0 - golang.org/x/sys v0.41.0 + golang.org/x/sys v0.42.0 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 golang.zx2c4.com/wireguard/windows v0.5.3 google.golang.org/grpc v1.79.3 google.golang.org/protobuf v1.36.11 - gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) require ( fyne.io/fyne/v2 v2.7.0 fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 github.com/awnumar/memguard v0.23.0 - github.com/aws/aws-sdk-go-v2 v1.36.3 - github.com/aws/aws-sdk-go-v2/config v1.29.14 - github.com/aws/aws-sdk-go-v2/credentials v1.17.67 - github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2 + github.com/aws/aws-sdk-go-v2 v1.38.3 + github.com/aws/aws-sdk-go-v2/config v1.31.6 + github.com/aws/aws-sdk-go-v2/credentials v1.18.10 + github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3 github.com/c-robinson/iplib v1.0.3 github.com/caddyserver/certmagic v0.21.3 github.com/cilium/ebpf v0.15.0 @@ -42,6 +42,8 @@ require ( github.com/coreos/go-iptables v0.7.0 github.com/coreos/go-oidc/v3 v3.14.1 github.com/creack/pty v1.1.24 + github.com/crowdsecurity/crowdsec v1.7.7 + github.com/crowdsecurity/go-cs-bouncer v0.0.21 github.com/dexidp/dex v0.0.0-00010101000000-000000000000 github.com/dexidp/dex/api/v2 v2.4.0 github.com/eko/gocache/lib/v4 v4.2.0 @@ -60,7 +62,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.2-0.20240212192251-757544f21357 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 - github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/go-version v1.7.0 github.com/jackc/pgx/v5 v5.5.5 github.com/libdns/route53 v1.5.0 github.com/libp2p/go-nat v0.2.0 @@ -104,14 +106,14 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 github.com/zcalusic/sysinfo v1.1.3 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 - go.opentelemetry.io/otel v1.42.0 + go.opentelemetry.io/otel v1.43.0 go.opentelemetry.io/otel/exporters/prometheus v0.64.0 - go.opentelemetry.io/otel/metric v1.42.0 - go.opentelemetry.io/otel/sdk/metric v1.42.0 + go.opentelemetry.io/otel/metric v1.43.0 + go.opentelemetry.io/otel/sdk/metric v1.43.0 go.uber.org/mock v0.5.2 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mobile v0.0.0-20251113184115-a159579294ab golang.org/x/mod v0.32.0 golang.org/x/net v0.51.0 @@ -133,7 +135,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect dario.cat/mergo v1.0.1 // indirect - filippo.io/edwards25519 v1.1.0 // indirect + filippo.io/edwards25519 v1.1.1 // indirect github.com/AppsFlyer/go-sundheit v0.6.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect @@ -144,36 +146,39 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/awnumar/memcall v0.4.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6 // indirect github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect + github.com/aws/smithy-go v1.23.0 // indirect github.com/beevik/etree v1.6.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/crowdsecurity/go-cs-lib v0.0.25 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v28.0.1+incompatible // indirect - github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/ebitengine/purego v0.8.2 // indirect + github.com/ebitengine/purego v0.8.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fredbi/uri v1.1.1 // indirect github.com/fyne-io/gl-js v0.2.0 // indirect @@ -187,11 +192,23 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.2 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-sql-driver/mysql v1.9.3 // indirect github.com/go-text/render v0.2.0 // indirect github.com/go-text/typesetting v0.2.1 // indirect + github.com/goccy/go-yaml v1.18.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect @@ -212,16 +229,18 @@ require ( github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jonboulle/clockwork v0.5.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/fs v0.1.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/libdns/libdns v0.2.2 // indirect github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect github.com/magiconair/properties v1.8.10 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/mdelapenya/tlscert v0.2.0 // indirect @@ -229,6 +248,7 @@ require ( github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect github.com/mholt/acmez/v2 v2.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect @@ -240,7 +260,8 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect github.com/nicksnyder/go-i18n/v2 v2.5.1 // indirect - github.com/nxadm/tail v1.4.8 // indirect + github.com/nxadm/tail v1.4.11 // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect @@ -250,32 +271,33 @@ require ( github.com/pion/transport/v2 v2.2.4 // indirect github.com/pion/turn/v4 v4.1.1 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.5 // indirect github.com/prometheus/otlptranslator v1.0.0 // indirect github.com/prometheus/procfs v0.19.2 // indirect - github.com/russellhaering/goxmldsig v1.5.0 // indirect + github.com/russellhaering/goxmldsig v1.6.0 // indirect github.com/rymdport/portal v0.4.2 // indirect - github.com/shirou/gopsutil/v4 v4.25.1 // indirect + github.com/shirou/gopsutil/v4 v4.25.8 // indirect github.com/shoenig/go-m1cpu v0.2.1 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c // indirect github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/tklauser/go-sysconf v0.3.14 // indirect - github.com/tklauser/numcpus v0.8.0 // indirect + github.com/tklauser/go-sysconf v0.3.15 // indirect + github.com/tklauser/numcpus v0.10.0 // indirect github.com/vishvananda/netns v0.0.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/yuin/goldmark v1.7.8 // indirect github.com/zeebo/blake3 v0.2.3 // indirect + go.mongodb.org/mongo-driver v1.17.9 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel/sdk v1.42.0 // indirect - go.opentelemetry.io/otel/trace v1.42.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/image v0.33.0 // indirect @@ -285,6 +307,7 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect ) replace github.com/kardianos/service => github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502 diff --git a/go.sum b/go.sum index a1d2bb71f..f06f7deba 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,8 @@ cunicu.li/go-rosenpass v0.4.0 h1:LtPtBgFWY/9emfgC4glKLEqS0MJTylzV6+ChRhiZERw= cunicu.li/go-rosenpass v0.4.0/go.mod h1:MPbjH9nxV4l3vEagKVdFNwHOketqgS5/To1VYJplf/M= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= fyne.io/fyne/v2 v2.7.0 h1:GvZSpE3X0liU/fqstInVvRsaboIVpIWQ4/sfjDGIGGQ= fyne.io/fyne/v2 v2.7.0/go.mod h1:xClVlrhxl7D+LT+BWYmcrW4Nf+dJTvkhnPgji7spAwE= fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 h1:829+77I4TaMrcg9B3wf+gHhdSgoCVEgH2czlPXPbfj4= @@ -40,48 +40,50 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/awnumar/memcall v0.4.0 h1:B7hgZYdfH6Ot1Goaz8jGne/7i8xD4taZie/PNSFZ29g= github.com/awnumar/memcall v0.4.0/go.mod h1:8xOx1YbfyuCg3Fy6TO8DK0kZUua3V42/goA5Ru47E8w= github.com/awnumar/memguard v0.23.0 h1:sJ3a1/SWlcuKIQ7MV+R9p0Pvo9CWsMbGZvcZQtmc68A= github.com/awnumar/memguard v0.23.0/go.mod h1:olVofBrsPdITtJ2HgxQKrEYEMyIBAIciVG4wNnZhW9M= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= -github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= -github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= +github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo= +github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6 h1:R0tNFJqfjHL3900cqhXuwQ+1K4G0xc9Yf8EDbFXCKEw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6/go.mod h1:y/7sDdu+aJvPtGXr4xYosdpq9a6T9Z0jkXfugmti0rI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6 h1:hncKj/4gR+TPauZgTAsxOxNcvBayhUlYZ6LO/BYiQ30= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6/go.mod h1:OiIh45tp6HdJDDJGnja0mw8ihQGz3VGrUflLqSL0SmM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6 h1:nEXUSAwyUfLTgnc9cxlDWy637qsq4UWwp3sNAfl0Z3Y= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6/go.mod h1:HGzIULx4Ge3Do2V0FaiYKcyKzOqwrhUZgCI77NisswQ= github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 h1:MmLCRqP4U4Cw9gJ4bNrCG0mWqEtBlmAVleyelcHARMU= github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3/go.mod h1:AMPjK2YnRh0YgOID3PqhJA1BRNfXDfGOnSsKHtAe8yA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2 h1:tWUG+4wZqdMl/znThEk9tcCy8tTMxq8dW0JTgamohrY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3 h1:ETkfWcXP2KNPLecaDa++5bsQhCRa5M5sLUJa5DWYIIg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3/go.mod h1:+/3ZTqoYb3Ur7DObD00tarKMLMuKg8iqz5CHEanqTnw= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE= github.com/beevik/etree v1.6.0/go.mod h1:bh4zJxiIr62SOf9pRzN7UUYaEDa9HEKafK25+sLc0Gc= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -99,6 +101,8 @@ github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+Y github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= @@ -118,11 +122,18 @@ github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHf github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/crowdsecurity/crowdsec v1.7.7 h1:sduZN763iXsrZodocWDrsR//7nLeffGu+RVkkIsbQkE= +github.com/crowdsecurity/crowdsec v1.7.7/go.mod h1:L1HLGPDnBYCcY+yfSFnuBbQ1G9DHEJN9c+Kevv9F+4Q= +github.com/crowdsecurity/go-cs-bouncer v0.0.21 h1:arPz0VtdVSaz+auOSfHythzkZVLyy18CzYvYab8UJDU= +github.com/crowdsecurity/go-cs-bouncer v0.0.21/go.mod h1:4JiH0XXA4KKnnWThItUpe5+heJHWzsLOSA2IWJqUDBA= +github.com/crowdsecurity/go-cs-lib v0.0.25 h1:Ov6VPW9yV+OPsbAIQk1iTkEWhwkpaG0v3lrBzeqjzj4= +github.com/crowdsecurity/go-cs-lib v0.0.25/go.mod h1:X0GMJY2CxdA1S09SpuqIKaWQsvRGxXmecUp9cP599dE= github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6 h1:/DS5cDX3FJdl+XaN2D7XAwFpuanTxnp52DBLZAaJKx0= github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6/go.mod h1:+CauBF6R70Jqcyl8N2hC8pAXYbWkGIezuSbuGLtRhnw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dexidp/dex/api/v2 v2.4.0 h1:gNba7n6BKVp8X4Jp24cxYn5rIIGhM6kDOXcZoL6tr9A= github.com/dexidp/dex/api/v2 v2.4.0/go.mod h1:/p550ADvFFh7K95VmhUD+jgm15VdaNnab9td8DHOpyI= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= @@ -131,12 +142,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0= github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= -github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/eko/gocache/lib/v4 v4.2.0 h1:MNykyi5Xw+5Wu3+PUrvtOCaKSZM1nUSVftbzmeC7Yuw= github.com/eko/gocache/lib/v4 v4.2.0/go.mod h1:7ViVmbU+CzDHzRpmB4SXKyyzyuJ8A3UW3/cszpcqB4M= github.com/eko/gocache/store/go_cache/v4 v4.2.2 h1:tAI9nl6TLoJyKG1ujF0CS0n/IgTEMl+NivxtR5R3/hw= @@ -155,6 +166,7 @@ github.com/fredbi/uri v1.1.1 h1:xZHJC08GZNIUhbP5ImTHnt5Ya0T8FI2VAwI/37kh2Ko= github.com/fredbi/uri v1.1.1/go.mod h1:4+DZQ5zBjEwQCDmXW5JdIjz0PUA+yJbvtBv+u+adr5o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fyne-io/gl-js v0.2.0 h1:+EXMLVEa18EfkXBVKhifYB6OGs3HwKO3lUElA0LlAjs= @@ -187,6 +199,24 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg= +github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= @@ -203,10 +233,14 @@ github.com/go-text/typesetting v0.2.1 h1:x0jMOGyO3d1qFAPI0j4GSsh7M0Q3Ypjzr4+CEVg github.com/go-text/typesetting v0.2.1/go.mod h1:mTOxEwasOFpAMBjEQDhdWRckoLLeI/+qrQeBCTGEt6M= github.com/go-text/typesetting-utils v0.0.0-20241103174707-87a29e9e6066 h1:qCuYC+94v2xrb1PoS4NIDe7DGYtLnU2wWiQe9a1B1c0= github.com/go-text/typesetting-utils v0.0.0-20241103174707-87a29e9e6066/go.mod h1:DDxDdQEnB70R8owOx3LVpEFvpMK9eeH1o2r0yZhFI9o= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -230,6 +264,7 @@ github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl76 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -237,6 +272,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= @@ -276,8 +313,8 @@ github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PU github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -319,6 +356,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 h1:YLvr1eE6cdCqjOe972w/cYF+FjW34v27+9Vo5106B4M= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25/go.mod h1:kLgvv7o6UM+0QSf0QjAse3wReFDsb9qbZJdfexWlrQw= @@ -330,8 +369,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -361,6 +400,8 @@ github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tA github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= @@ -384,6 +425,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -423,10 +466,13 @@ github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S github.com/nicksnyder/go-i18n/v2 v2.5.1 h1:IxtPxYsR9Gp60cGXjfuR/llTqV8aYMsC472zD0D1vHk= github.com/nicksnyder/go-i18n/v2 v2.5.1/go.mod h1:DrhgsSDZxoAfvVrBVLXoxZn/pN5TXqaDbq7ju94viiQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/okta/okta-sdk-golang/v2 v2.18.0 h1:cfDasMb7CShbZvOrF6n+DnLevWwiHgedWMGJ8M8xKDc= github.com/okta/okta-sdk-golang/v2 v2.18.0/go.mod h1:dz30v3ctAiMb7jpsCngGfQUAEGm1/NsWT92uTbNDQIs= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -447,8 +493,8 @@ github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq5 github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/petermattis/goid v0.0.0-20250303134427-723919f7f203 h1:E7Kmf11E4K7B5hDti2K2NqPb1nlYlGYsu02S1JNd/Bs= github.com/petermattis/goid v0.0.0-20250303134427-723919f7f203/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= @@ -486,8 +532,9 @@ github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -511,15 +558,15 @@ github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/russellhaering/goxmldsig v1.5.0 h1:AU2UkkYIUOTyZRbe08XMThaOCelArgvNfYapcmSjBNw= -github.com/russellhaering/goxmldsig v1.5.0/go.mod h1:x98CjQNFJcWfMxeOrMnMKg70lvDP6tE0nTaeUnjXDmk= +github.com/russellhaering/goxmldsig v1.6.0 h1:8fdWXEPh2k/NZNQBPFNoVfS3JmzS4ZprY/sAOpKQLks= +github.com/russellhaering/goxmldsig v1.6.0/go.mod h1:TrnaquDcYxWXfJrOjeMBTX4mLBeYAqaHEyUeWPxZlBM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rymdport/portal v0.4.2 h1:7jKRSemwlTyVHHrTGgQg7gmNPJs88xkbKcIL3NlcmSU= github.com/rymdport/portal v0.4.2/go.mod h1:kFF4jslnJ8pD5uCi17brj/ODlfIidOxlgUDTO5ncnC4= github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8= -github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs= -github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI= +github.com/shirou/gopsutil/v4 v4.25.8 h1:NnAsw9lN7587WHxjJA9ryDnqhJpFH6A+wagYWTOH970= +github.com/shirou/gopsutil/v4 v4.25.8/go.mod h1:q9QdMmfAOVIw7a+eF86P7ISEU6ka+NLgkUxlopV4RwI= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/go-m1cpu v0.2.1 h1:yqRB4fvOge2+FyRXFkXqsyMoqPazv14Yyy+iyccT2E4= github.com/shoenig/go-m1cpu v0.2.1/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w= @@ -528,8 +575,8 @@ github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= @@ -578,11 +625,11 @@ github.com/ti-mo/conntrack v0.5.1/go.mod h1:T6NCbkMdVU4qEIgwL0njA6lw/iCAbzchlnwm github.com/ti-mo/netfilter v0.5.2 h1:CTjOwFuNNeZ9QPdRXt1MZFLFUf84cKtiQutNauHWd40= github.com/ti-mo/netfilter v0.5.2/go.mod h1:Btx3AtFiOVdHReTDmP9AE+hlkOcvIy403u7BXXbWZKo= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= -github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= +github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= -github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= +github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= @@ -611,28 +658,30 @@ github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= +go.mongodb.org/mongo-driver v1.17.9 h1:IexDdCuuNJ3BHrELgBlyaH9p60JXAvdzWR128q+U5tU= +go.mongodb.org/mongo-driver v1.17.9/go.mod h1:LlOhpH5NUEfhxcAwG0UEkMqwYcc4JU18gtCdGudk/tQ= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= -go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/exporters/prometheus v0.64.0 h1:g0LRDXMX/G1SEZtK8zl8Chm4K6GBwRkjPKE36LxiTYs= go.opentelemetry.io/otel/exporters/prometheus v0.64.0/go.mod h1:UrgcjnarfdlBDP3GjDIJWe6HTprwSazNjwsI+Ru6hro= -go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= -go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= -go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= -go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= -go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= -go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= -go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= -go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -660,8 +709,8 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ= golang.org/x/image v0.33.0/go.mod h1:DD3OsTYT9chzuzTQt+zMcOlBHgfoKQb1gry8p76Y1sc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -733,8 +782,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -748,8 +797,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= -golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -836,8 +885,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= diff --git a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go index a7f692569..f2ecfd5f9 100644 --- a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go +++ b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go @@ -1,6 +1,7 @@ package accesslogs import ( + "maps" "net" "net/netip" "time" @@ -37,6 +38,7 @@ type AccessLogEntry struct { BytesUpload int64 `gorm:"index"` BytesDownload int64 `gorm:"index"` Protocol AccessLogProtocol `gorm:"index"` + Metadata map[string]string `gorm:"serializer:json"` } // FromProto creates an AccessLogEntry from a proto.AccessLog @@ -55,6 +57,7 @@ func (a *AccessLogEntry) FromProto(serviceLog *proto.AccessLog) { a.BytesUpload = serviceLog.GetBytesUpload() a.BytesDownload = serviceLog.GetBytesDownload() a.Protocol = AccessLogProtocol(serviceLog.GetProtocol()) + a.Metadata = maps.Clone(serviceLog.GetMetadata()) if sourceIP := serviceLog.GetSourceIp(); sourceIP != "" { if addr, err := netip.ParseAddr(sourceIP); err == nil { @@ -117,6 +120,11 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { protocol = &p } + var metadata *map[string]string + if len(a.Metadata) > 0 { + metadata = &a.Metadata + } + return &api.ProxyAccessLog{ Id: a.ID, ServiceId: a.ServiceID, @@ -136,5 +144,6 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog { BytesUpload: a.BytesUpload, BytesDownload: a.BytesDownload, Protocol: protocol, + Metadata: metadata, } } diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go index ae13bffae..f65e31a07 100644 --- a/management/internals/modules/reverseproxy/domain/domain.go +++ b/management/internals/modules/reverseproxy/domain/domain.go @@ -20,6 +20,9 @@ type Domain struct { // RequireSubdomain is populated at query time. When true, the domain // cannot be used bare and a subdomain label must be prepended. Not persisted. RequireSubdomain *bool `gorm:"-"` + // SupportsCrowdSec is populated at query time from proxy cluster capabilities. + // Not persisted. + SupportsCrowdSec *bool `gorm:"-"` } // EventMeta returns activity event metadata for a domain diff --git a/management/internals/modules/reverseproxy/domain/manager/api.go b/management/internals/modules/reverseproxy/domain/manager/api.go index 640ab28a5..4493ef0ad 100644 --- a/management/internals/modules/reverseproxy/domain/manager/api.go +++ b/management/internals/modules/reverseproxy/domain/manager/api.go @@ -48,6 +48,7 @@ func domainToApi(d *domain.Domain) api.ReverseProxyDomain { Validated: d.Validated, SupportsCustomPorts: d.SupportsCustomPorts, RequireSubdomain: d.RequireSubdomain, + SupportsCrowdsec: d.SupportsCrowdSec, } if d.TargetCluster != "" { resp.TargetCluster = &d.TargetCluster diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index c6c41bfe5..2c4c1372e 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -33,6 +33,7 @@ type proxyManager interface { GetActiveClusterAddresses(ctx context.Context) ([]string, error) ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool + ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool } type Manager struct { @@ -90,6 +91,7 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d } d.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, cluster) d.RequireSubdomain = m.proxyManager.ClusterRequireSubdomain(ctx, cluster) + d.SupportsCrowdSec = m.proxyManager.ClusterSupportsCrowdSec(ctx, cluster) ret = append(ret, d) } @@ -105,6 +107,7 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d } if d.TargetCluster != "" { cd.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, d.TargetCluster) + cd.SupportsCrowdSec = m.proxyManager.ClusterSupportsCrowdSec(ctx, d.TargetCluster) } // Custom domains never require a subdomain by default since // the account owns them and should be able to use the bare domain. diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go index 0368b84de..aa7cd8630 100644 --- a/management/internals/modules/reverseproxy/proxy/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -18,6 +18,7 @@ type Manager interface { GetActiveClusters(ctx context.Context) ([]Cluster, error) ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool + ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool CleanupStale(ctx context.Context, inactivityDuration time.Duration) error } diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager.go b/management/internals/modules/reverseproxy/proxy/manager/manager.go index a92fffab9..d13334e83 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager/manager.go @@ -18,6 +18,7 @@ type store interface { GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool + GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error } @@ -138,6 +139,12 @@ func (m Manager) ClusterRequireSubdomain(ctx context.Context, clusterAddr string return m.store.GetClusterRequireSubdomain(ctx, clusterAddr) } +// ClusterSupportsCrowdSec returns whether all active proxies in the cluster +// have CrowdSec configured (unanimous). Returns nil when no proxy has reported capabilities. +func (m Manager) ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool { + return m.store.GetClusterSupportsCrowdSec(ctx, clusterAddr) +} + // CleanupStale removes proxies that haven't sent heartbeat in the specified duration func (m Manager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error { if err := m.store.CleanupStaleProxies(ctx, inactivityDuration); err != nil { diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go index 97466c503..282ca0ba5 100644 --- a/management/internals/modules/reverseproxy/proxy/manager_mock.go +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -78,6 +78,20 @@ func (mr *MockManagerMockRecorder) ClusterRequireSubdomain(ctx, clusterAddr inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRequireSubdomain", reflect.TypeOf((*MockManager)(nil).ClusterRequireSubdomain), ctx, clusterAddr) } +// ClusterSupportsCrowdSec mocks base method. +func (m *MockManager) ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterSupportsCrowdSec", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// ClusterSupportsCrowdSec indicates an expected call of ClusterSupportsCrowdSec. +func (mr *MockManagerMockRecorder) ClusterSupportsCrowdSec(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCrowdSec", reflect.TypeOf((*MockManager)(nil).ClusterSupportsCrowdSec), ctx, clusterAddr) +} + // Connect mocks base method. func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, capabilities *Capabilities) error { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/proxy/proxy.go b/management/internals/modules/reverseproxy/proxy/proxy.go index 4102e50fe..339c82446 100644 --- a/management/internals/modules/reverseproxy/proxy/proxy.go +++ b/management/internals/modules/reverseproxy/proxy/proxy.go @@ -11,6 +11,8 @@ type Capabilities struct { // RequireSubdomain indicates whether a subdomain label is required in // front of the cluster domain. RequireSubdomain *bool + // SupportsCrowdsec indicates whether this proxy has CrowdSec configured. + SupportsCrowdsec *bool } // Proxy represents a reverse proxy instance diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go index 47dce3a64..28461641d 100644 --- a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go +++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go @@ -81,6 +81,7 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor mockCaps := proxy.NewMockManager(ctrl) mockCaps.EXPECT().ClusterSupportsCustomPorts(gomock.Any(), testCluster).Return(customPortsSupported).AnyTimes() mockCaps.EXPECT().ClusterRequireSubdomain(gomock.Any(), testCluster).Return((*bool)(nil)).AnyTimes() + mockCaps.EXPECT().ClusterSupportsCrowdSec(gomock.Any(), testCluster).Return((*bool)(nil)).AnyTimes() accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index 60b36917c..769e037bc 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -113,6 +113,7 @@ type AccessRestrictions struct { BlockedCIDRs []string `json:"blocked_cidrs,omitempty" gorm:"serializer:json"` AllowedCountries []string `json:"allowed_countries,omitempty" gorm:"serializer:json"` BlockedCountries []string `json:"blocked_countries,omitempty" gorm:"serializer:json"` + CrowdSecMode string `json:"crowdsec_mode,omitempty" gorm:"serializer:json"` } // Copy returns a deep copy of the AccessRestrictions. @@ -122,6 +123,7 @@ func (r AccessRestrictions) Copy() AccessRestrictions { BlockedCIDRs: slices.Clone(r.BlockedCIDRs), AllowedCountries: slices.Clone(r.AllowedCountries), BlockedCountries: slices.Clone(r.BlockedCountries), + CrowdSecMode: r.CrowdSecMode, } } @@ -555,7 +557,11 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) erro } if req.AccessRestrictions != nil { - s.Restrictions = restrictionsFromAPI(req.AccessRestrictions) + restrictions, err := restrictionsFromAPI(req.AccessRestrictions) + if err != nil { + return err + } + s.Restrictions = restrictions } return nil @@ -631,9 +637,9 @@ func authFromAPI(reqAuth *api.ServiceAuthConfig) AuthConfig { return auth } -func restrictionsFromAPI(r *api.AccessRestrictions) AccessRestrictions { +func restrictionsFromAPI(r *api.AccessRestrictions) (AccessRestrictions, error) { if r == nil { - return AccessRestrictions{} + return AccessRestrictions{}, nil } var res AccessRestrictions if r.AllowedCidrs != nil { @@ -648,11 +654,19 @@ func restrictionsFromAPI(r *api.AccessRestrictions) AccessRestrictions { if r.BlockedCountries != nil { res.BlockedCountries = *r.BlockedCountries } - return res + if r.CrowdsecMode != nil { + if !r.CrowdsecMode.Valid() { + return AccessRestrictions{}, fmt.Errorf("invalid crowdsec_mode %q", *r.CrowdsecMode) + } + res.CrowdSecMode = string(*r.CrowdsecMode) + } + return res, nil } func restrictionsToAPI(r AccessRestrictions) *api.AccessRestrictions { - if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 { + if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && + len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 && + r.CrowdSecMode == "" { return nil } res := &api.AccessRestrictions{} @@ -668,11 +682,17 @@ func restrictionsToAPI(r AccessRestrictions) *api.AccessRestrictions { if len(r.BlockedCountries) > 0 { res.BlockedCountries = &r.BlockedCountries } + if r.CrowdSecMode != "" { + mode := api.AccessRestrictionsCrowdsecMode(r.CrowdSecMode) + res.CrowdsecMode = &mode + } return res } func restrictionsToProto(r AccessRestrictions) *proto.AccessRestrictions { - if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 { + if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && + len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 && + r.CrowdSecMode == "" { return nil } return &proto.AccessRestrictions{ @@ -680,6 +700,7 @@ func restrictionsToProto(r AccessRestrictions) *proto.AccessRestrictions { BlockedCidrs: r.BlockedCIDRs, AllowedCountries: r.AllowedCountries, BlockedCountries: r.BlockedCountries, + CrowdsecMode: r.CrowdSecMode, } } @@ -988,7 +1009,20 @@ const ( // validateAccessRestrictions validates and normalizes access restriction // entries. Country codes are uppercased in place. +func validateCrowdSecMode(mode string) error { + switch mode { + case "", "off", "enforce", "observe": + return nil + default: + return fmt.Errorf("crowdsec_mode %q is invalid", mode) + } +} + func validateAccessRestrictions(r *AccessRestrictions) error { + if err := validateCrowdSecMode(r.CrowdSecMode); err != nil { + return err + } + if len(r.AllowedCIDRs) > maxCIDREntries { return fmt.Errorf("allowed_cidrs: exceeds maximum of %d entries", maxCIDREntries) } @@ -1002,35 +1036,37 @@ func validateAccessRestrictions(r *AccessRestrictions) error { return fmt.Errorf("blocked_countries: exceeds maximum of %d entries", maxCountryEntries) } - for i, raw := range r.AllowedCIDRs { + if err := validateCIDRList("allowed_cidrs", r.AllowedCIDRs); err != nil { + return err + } + if err := validateCIDRList("blocked_cidrs", r.BlockedCIDRs); err != nil { + return err + } + if err := normalizeCountryList("allowed_countries", r.AllowedCountries); err != nil { + return err + } + return normalizeCountryList("blocked_countries", r.BlockedCountries) +} + +func validateCIDRList(field string, cidrs []string) error { + for i, raw := range cidrs { prefix, err := netip.ParsePrefix(raw) if err != nil { - return fmt.Errorf("allowed_cidrs[%d]: %w", i, err) + return fmt.Errorf("%s[%d]: %w", field, i, err) } if prefix != prefix.Masked() { - return fmt.Errorf("allowed_cidrs[%d]: %q has host bits set, use %s instead", i, raw, prefix.Masked()) + return fmt.Errorf("%s[%d]: %q has host bits set, use %s instead", field, i, raw, prefix.Masked()) } } - for i, raw := range r.BlockedCIDRs { - prefix, err := netip.ParsePrefix(raw) - if err != nil { - return fmt.Errorf("blocked_cidrs[%d]: %w", i, err) - } - if prefix != prefix.Masked() { - return fmt.Errorf("blocked_cidrs[%d]: %q has host bits set, use %s instead", i, raw, prefix.Masked()) - } - } - for i, code := range r.AllowedCountries { + return nil +} + +func normalizeCountryList(field string, codes []string) error { + for i, code := range codes { if len(code) != 2 { - return fmt.Errorf("allowed_countries[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", i, code) + return fmt.Errorf("%s[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", field, i, code) } - r.AllowedCountries[i] = strings.ToUpper(code) - } - for i, code := range r.BlockedCountries { - if len(code) != 2 { - return fmt.Errorf("blocked_countries[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", i, code) - } - r.BlockedCountries[i] = strings.ToUpper(code) + codes[i] = strings.ToUpper(code) } return nil } diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 07732cea6..a5e352e75 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -188,6 +188,7 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest caps = &proxy.Capabilities{ SupportsCustomPorts: c.SupportsCustomPorts, RequireSubdomain: c.RequireSubdomain, + SupportsCrowdsec: c.SupportsCrowdsec, } } if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo, caps); err != nil { diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 0b463a724..a34d9f70a 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -5514,6 +5514,7 @@ const proxyActiveThreshold = 2 * time.Minute var validCapabilityColumns = map[string]struct{}{ "supports_custom_ports": {}, "require_subdomain": {}, + "supports_crowdsec": {}, } // GetClusterSupportsCustomPorts returns whether any active proxy in the cluster @@ -5528,6 +5529,59 @@ func (s *SqlStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr s return s.getClusterCapability(ctx, clusterAddr, "require_subdomain") } +// GetClusterSupportsCrowdSec returns whether all active proxies in the cluster +// have CrowdSec configured. Returns nil when no proxy reported the capability. +// Unlike other capabilities that use ANY-true (for rolling upgrades), CrowdSec +// requires unanimous support: a single unconfigured proxy would let requests +// bypass reputation checks. +func (s *SqlStore) GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool { + return s.getClusterUnanimousCapability(ctx, clusterAddr, "supports_crowdsec") +} + +// getClusterUnanimousCapability returns an aggregated boolean capability +// requiring all active proxies in the cluster to report true. +func (s *SqlStore) getClusterUnanimousCapability(ctx context.Context, clusterAddr, column string) *bool { + if _, ok := validCapabilityColumns[column]; !ok { + log.WithContext(ctx).Errorf("invalid capability column: %s", column) + return nil + } + + var result struct { + Total int64 + Reported int64 + AllTrue bool + } + + // All active proxies must have reported the capability (no NULLs) and all + // must report true. A single unreported or false proxy means the cluster + // does not unanimously support the capability. + err := s.db.WithContext(ctx). + Model(&proxy.Proxy{}). + Select("COUNT(*) AS total, "+ + "COUNT(CASE WHEN "+column+" IS NOT NULL THEN 1 END) AS reported, "+ + "COUNT(*) > 0 AND COUNT(*) = COUNT(CASE WHEN "+column+" = true THEN 1 END) AS all_true"). + Where("cluster_address = ? AND status = ? AND last_seen > ?", + clusterAddr, "connected", time.Now().Add(-proxyActiveThreshold)). + Scan(&result).Error + + if err != nil { + log.WithContext(ctx).Errorf("query cluster capability %s for %s: %v", column, clusterAddr, err) + return nil + } + + if result.Total == 0 || result.Reported == 0 { + return nil + } + + // If any proxy has not reported (NULL), we can't confirm unanimous support. + if result.Reported < result.Total { + v := false + return &v + } + + return &result.AllTrue +} + // getClusterCapability returns an aggregated boolean capability for the given // cluster. It checks active (connected, recently seen) proxies and returns: // - *true if any proxy in the cluster has the capability set to true, diff --git a/management/server/store/store.go b/management/server/store/store.go index efd9a28fd..0d8b0678a 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -289,6 +289,7 @@ type Store interface { GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool + GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 5e609c4ec..beee13d96 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -165,6 +165,19 @@ func (mr *MockStoreMockRecorder) CleanupStaleProxies(ctx, inactivityDuration int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStaleProxies", reflect.TypeOf((*MockStore)(nil).CleanupStaleProxies), ctx, inactivityDuration) } +// GetClusterSupportsCrowdSec mocks base method. +func (m *MockStore) GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterSupportsCrowdSec", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// GetClusterSupportsCrowdSec indicates an expected call of GetClusterSupportsCrowdSec. +func (mr *MockStoreMockRecorder) GetClusterSupportsCrowdSec(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCrowdSec", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCrowdSec), ctx, clusterAddr) +} // Close mocks base method. func (m *MockStore) Close(ctx context.Context) error { m.ctrl.T.Helper() diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go index 1c36ee334..ec8980ad9 100644 --- a/proxy/cmd/proxy/cmd/root.go +++ b/proxy/cmd/proxy/cmd/root.go @@ -35,7 +35,7 @@ var ( ) var ( - logLevel string + logLevel string debugLogs bool mgmtAddr string addr string @@ -64,6 +64,8 @@ var ( supportsCustomPorts bool requireSubdomain bool geoDataDir string + crowdsecAPIURL string + crowdsecAPIKey string ) var rootCmd = &cobra.Command{ @@ -106,6 +108,8 @@ func init() { rootCmd.Flags().DurationVar(&maxDialTimeout, "max-dial-timeout", envDurationOrDefault("NB_PROXY_MAX_DIAL_TIMEOUT", 0), "Cap per-service backend dial timeout (0 = no cap)") rootCmd.Flags().DurationVar(&maxSessionIdleTimeout, "max-session-idle-timeout", envDurationOrDefault("NB_PROXY_MAX_SESSION_IDLE_TIMEOUT", 0), "Cap per-service session idle timeout (0 = no cap)") rootCmd.Flags().StringVar(&geoDataDir, "geo-data-dir", envStringOrDefault("NB_PROXY_GEO_DATA_DIR", "/var/lib/netbird/geolocation"), "Directory for the GeoLite2 MMDB file (auto-downloaded if missing)") + rootCmd.Flags().StringVar(&crowdsecAPIURL, "crowdsec-api-url", envStringOrDefault("NB_PROXY_CROWDSEC_API_URL", ""), "CrowdSec LAPI URL for IP reputation checks") + rootCmd.Flags().StringVar(&crowdsecAPIKey, "crowdsec-api-key", envStringOrDefault("NB_PROXY_CROWDSEC_API_KEY", ""), "CrowdSec bouncer API key") } // Execute runs the root command. @@ -187,6 +191,8 @@ func runServer(cmd *cobra.Command, args []string) error { MaxDialTimeout: maxDialTimeout, MaxSessionIdleTimeout: maxSessionIdleTimeout, GeoDataDir: geoDataDir, + CrowdSecAPIURL: crowdsecAPIURL, + CrowdSecAPIKey: crowdsecAPIKey, } ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT) diff --git a/proxy/internal/accesslog/logger.go b/proxy/internal/accesslog/logger.go index 3ed3275b5..3283f61db 100644 --- a/proxy/internal/accesslog/logger.go +++ b/proxy/internal/accesslog/logger.go @@ -2,6 +2,7 @@ package accesslog import ( "context" + "maps" "net/netip" "sync" "sync/atomic" @@ -126,6 +127,7 @@ type logEntry struct { BytesUpload int64 BytesDownload int64 Protocol Protocol + Metadata map[string]string } // Protocol identifies the transport protocol of an access log entry. @@ -150,8 +152,10 @@ type L4Entry struct { BytesDownload int64 // DenyReason, when non-empty, indicates the connection was denied. // Values match the HTTP auth mechanism strings: "ip_restricted", - // "country_restricted", "geo_unavailable". + // "country_restricted", "geo_unavailable", "crowdsec_ban", etc. DenyReason string + // Metadata carries extra context about the connection (e.g. CrowdSec verdict). + Metadata map[string]string } // LogL4 sends an access log entry for a layer-4 connection (TCP or UDP). @@ -167,6 +171,7 @@ func (l *Logger) LogL4(entry L4Entry) { DurationMs: entry.DurationMs, BytesUpload: entry.BytesUpload, BytesDownload: entry.BytesDownload, + Metadata: maps.Clone(entry.Metadata), } if entry.DenyReason != "" { if !l.allowDenyLog(entry.ServiceID, entry.DenyReason) { @@ -258,6 +263,7 @@ func (l *Logger) log(entry logEntry) { BytesUpload: entry.BytesUpload, BytesDownload: entry.BytesDownload, Protocol: string(entry.Protocol), + Metadata: entry.Metadata, }, }); err != nil { l.logger.WithFields(log.Fields{ diff --git a/proxy/internal/accesslog/middleware.go b/proxy/internal/accesslog/middleware.go index 81c790b17..5a0684c19 100644 --- a/proxy/internal/accesslog/middleware.go +++ b/proxy/internal/accesslog/middleware.go @@ -82,6 +82,7 @@ func (l *Logger) Middleware(next http.Handler) http.Handler { BytesUpload: bytesUpload, BytesDownload: bytesDownload, Protocol: ProtocolHTTP, + Metadata: capturedData.GetMetadata(), } l.logger.Debugf("response: request_id=%s method=%s host=%s path=%s status=%d duration=%dms source=%s origin=%s service=%s account=%s", requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceID(), capturedData.GetAccountID()) diff --git a/proxy/internal/auth/middleware.go b/proxy/internal/auth/middleware.go index 670cafb68..f1d1fcc59 100644 --- a/proxy/internal/auth/middleware.go +++ b/proxy/internal/auth/middleware.go @@ -167,6 +167,20 @@ func (mw *Middleware) checkIPRestrictions(w http.ResponseWriter, r *http.Request return true } + if verdict.IsCrowdSec() { + if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil { + cd.SetMetadata("crowdsec_verdict", verdict.String()) + if config.IPRestrictions.IsObserveOnly(verdict) { + cd.SetMetadata("crowdsec_mode", "observe") + } + } + } + + if config.IPRestrictions.IsObserveOnly(verdict) { + mw.logger.Debugf("CrowdSec observe: would block %s for %s (%s)", clientIP, r.Host, verdict) + return true + } + reason := verdict.String() mw.blockIPRestriction(r, reason) http.Error(w, "Forbidden", http.StatusForbidden) diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go index 6063f070e..7c634106c 100644 --- a/proxy/internal/auth/middleware_test.go +++ b/proxy/internal/auth/middleware_test.go @@ -669,7 +669,7 @@ func TestCheckIPRestrictions_UnparseableAddress(t *testing.T) { mw := NewMiddleware(log.StandardLogger(), nil, nil) err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1", - restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)) + restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})) require.NoError(t, err) handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -705,7 +705,7 @@ func TestCheckIPRestrictions_UsesCapturedDataClientIP(t *testing.T) { mw := NewMiddleware(log.StandardLogger(), nil, nil) err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1", - restrict.ParseFilter([]string{"203.0.113.0/24"}, nil, nil, nil)) + restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"203.0.113.0/24"}})) require.NoError(t, err) handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -746,7 +746,7 @@ func TestCheckIPRestrictions_NilGeoWithCountryRules(t *testing.T) { mw := NewMiddleware(log.StandardLogger(), nil, nil) err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1", - restrict.ParseFilter(nil, nil, []string{"US"}, nil)) + restrict.ParseFilter(restrict.FilterConfig{AllowedCountries: []string{"US"}})) require.NoError(t, err) handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/proxy/internal/crowdsec/bouncer.go b/proxy/internal/crowdsec/bouncer.go new file mode 100644 index 000000000..06a452520 --- /dev/null +++ b/proxy/internal/crowdsec/bouncer.go @@ -0,0 +1,251 @@ +// Package crowdsec provides a CrowdSec stream bouncer that maintains a local +// decision cache for IP reputation checks. +package crowdsec + +import ( + "context" + "errors" + "net/netip" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/models" + csbouncer "github.com/crowdsecurity/go-cs-bouncer" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/internal/restrict" +) + +// Bouncer wraps a CrowdSec StreamBouncer, maintaining a local cache of +// active decisions for fast IP lookups. It implements restrict.CrowdSecChecker. +type Bouncer struct { + mu sync.RWMutex + ips map[netip.Addr]*restrict.CrowdSecDecision + prefixes map[netip.Prefix]*restrict.CrowdSecDecision + ready atomic.Bool + + apiURL string + apiKey string + tickerInterval time.Duration + logger *log.Entry + + // lifeMu protects cancel and done from concurrent Start/Stop calls. + lifeMu sync.Mutex + cancel context.CancelFunc + done chan struct{} +} + +// compile-time check +var _ restrict.CrowdSecChecker = (*Bouncer)(nil) + +// NewBouncer creates a bouncer but does not start the stream. +func NewBouncer(apiURL, apiKey string, logger *log.Entry) *Bouncer { + return &Bouncer{ + apiURL: apiURL, + apiKey: apiKey, + logger: logger, + ips: make(map[netip.Addr]*restrict.CrowdSecDecision), + prefixes: make(map[netip.Prefix]*restrict.CrowdSecDecision), + } +} + +// Start launches the background goroutine that streams decisions from the +// CrowdSec LAPI. The stream runs until Stop is called or ctx is cancelled. +func (b *Bouncer) Start(ctx context.Context) error { + interval := b.tickerInterval + if interval == 0 { + interval = 10 * time.Second + } + stream := &csbouncer.StreamBouncer{ + APIKey: b.apiKey, + APIUrl: b.apiURL, + TickerInterval: interval.String(), + UserAgent: "netbird-proxy/1.0", + Scopes: []string{"ip", "range"}, + RetryInitialConnect: true, + } + + b.logger.Infof("connecting to CrowdSec LAPI at %s", b.apiURL) + + if err := stream.Init(); err != nil { + return err + } + + // Reset state from any previous run. + b.mu.Lock() + b.ips = make(map[netip.Addr]*restrict.CrowdSecDecision) + b.prefixes = make(map[netip.Prefix]*restrict.CrowdSecDecision) + b.mu.Unlock() + b.ready.Store(false) + + ctx, cancel := context.WithCancel(ctx) + done := make(chan struct{}) + + b.lifeMu.Lock() + if b.cancel != nil { + b.lifeMu.Unlock() + cancel() + return errors.New("bouncer already started") + } + b.cancel = cancel + b.done = done + b.lifeMu.Unlock() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + if err := stream.Run(ctx); err != nil && ctx.Err() == nil { + b.logger.Errorf("CrowdSec stream ended: %v", err) + } + }() + + go func() { + defer wg.Done() + b.consumeStream(ctx, stream) + }() + + go func() { + wg.Wait() + close(done) + }() + + return nil +} + +// Stop cancels the stream and waits for all goroutines to finish. +func (b *Bouncer) Stop() { + b.lifeMu.Lock() + cancel := b.cancel + done := b.done + b.cancel = nil + b.lifeMu.Unlock() + + if cancel != nil { + cancel() + <-done + } +} + +// Ready returns true after the first batch of decisions has been processed. +func (b *Bouncer) Ready() bool { + return b.ready.Load() +} + +// CheckIP looks up addr in the local decision cache. Returns nil if no +// active decision exists for the address. +// +// Prefix lookups are O(1): instead of scanning all stored prefixes, we +// probe the map for every possible containing prefix of the address +// (at most 33 for IPv4, 129 for IPv6). +func (b *Bouncer) CheckIP(addr netip.Addr) *restrict.CrowdSecDecision { + addr = addr.Unmap() + + b.mu.RLock() + defer b.mu.RUnlock() + + if d, ok := b.ips[addr]; ok { + return d + } + + maxBits := 32 + if addr.Is6() { + maxBits = 128 + } + // Walk from most-specific to least-specific prefix so the narrowest + // matching decision wins when ranges overlap. + for bits := maxBits; bits >= 0; bits-- { + prefix := netip.PrefixFrom(addr, bits).Masked() + if d, ok := b.prefixes[prefix]; ok { + return d + } + } + + return nil +} + +func (b *Bouncer) consumeStream(ctx context.Context, stream *csbouncer.StreamBouncer) { + first := true + for { + select { + case <-ctx.Done(): + return + case resp, ok := <-stream.Stream: + if !ok { + return + } + b.mu.Lock() + b.applyDeleted(resp.Deleted) + b.applyNew(resp.New) + b.mu.Unlock() + + if first { + b.ready.Store(true) + b.logger.Info("CrowdSec bouncer synced initial decisions") + first = false + } + } + } +} + +func (b *Bouncer) applyDeleted(decisions []*models.Decision) { + for _, d := range decisions { + if d.Value == nil || d.Scope == nil { + continue + } + value := *d.Value + + if strings.ToLower(*d.Scope) == "range" || strings.Contains(value, "/") { + prefix, err := netip.ParsePrefix(value) + if err != nil { + b.logger.Debugf("skip unparsable CrowdSec range deletion %q: %v", value, err) + continue + } + prefix = normalizePrefix(prefix) + delete(b.prefixes, prefix) + } else { + addr, err := netip.ParseAddr(value) + if err != nil { + b.logger.Debugf("skip unparsable CrowdSec IP deletion %q: %v", value, err) + continue + } + delete(b.ips, addr.Unmap()) + } + } +} + +func (b *Bouncer) applyNew(decisions []*models.Decision) { + for _, d := range decisions { + if d.Value == nil || d.Type == nil || d.Scope == nil { + continue + } + dec := &restrict.CrowdSecDecision{Type: restrict.DecisionType(*d.Type)} + value := *d.Value + + if strings.ToLower(*d.Scope) == "range" || strings.Contains(value, "/") { + prefix, err := netip.ParsePrefix(value) + if err != nil { + b.logger.Debugf("skip unparsable CrowdSec range %q: %v", value, err) + continue + } + prefix = normalizePrefix(prefix) + b.prefixes[prefix] = dec + } else { + addr, err := netip.ParseAddr(value) + if err != nil { + b.logger.Debugf("skip unparsable CrowdSec IP %q: %v", value, err) + continue + } + b.ips[addr.Unmap()] = dec + } + } +} + +// normalizePrefix unmaps v4-mapped-v6 addresses and zeros host bits so +// the prefix is a valid map key that matches CheckIP's probe logic. +func normalizePrefix(p netip.Prefix) netip.Prefix { + return netip.PrefixFrom(p.Addr().Unmap(), p.Bits()).Masked() +} diff --git a/proxy/internal/crowdsec/bouncer_test.go b/proxy/internal/crowdsec/bouncer_test.go new file mode 100644 index 000000000..3bd8aa068 --- /dev/null +++ b/proxy/internal/crowdsec/bouncer_test.go @@ -0,0 +1,337 @@ +package crowdsec + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "net/netip" + "sync" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/models" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/proxy/internal/restrict" +) + +func TestBouncer_CheckIP_Empty(t *testing.T) { + b := newTestBouncer() + b.ready.Store(true) + + assert.Nil(t, b.CheckIP(netip.MustParseAddr("1.2.3.4"))) +} + +func TestBouncer_CheckIP_ExactMatch(t *testing.T) { + b := newTestBouncer() + b.ready.Store(true) + b.ips[netip.MustParseAddr("10.0.0.1")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + + d := b.CheckIP(netip.MustParseAddr("10.0.0.1")) + require.NotNil(t, d) + assert.Equal(t, restrict.DecisionBan, d.Type) + + assert.Nil(t, b.CheckIP(netip.MustParseAddr("10.0.0.2"))) +} + +func TestBouncer_CheckIP_PrefixMatch(t *testing.T) { + b := newTestBouncer() + b.ready.Store(true) + b.prefixes[netip.MustParsePrefix("192.168.1.0/24")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + + d := b.CheckIP(netip.MustParseAddr("192.168.1.100")) + require.NotNil(t, d) + assert.Equal(t, restrict.DecisionBan, d.Type) + + assert.Nil(t, b.CheckIP(netip.MustParseAddr("192.168.2.1"))) +} + +func TestBouncer_CheckIP_UnmapsV4InV6(t *testing.T) { + b := newTestBouncer() + b.ready.Store(true) + b.ips[netip.MustParseAddr("10.0.0.1")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + + d := b.CheckIP(netip.MustParseAddr("::ffff:10.0.0.1")) + require.NotNil(t, d) + assert.Equal(t, restrict.DecisionBan, d.Type) +} + +func TestBouncer_Ready(t *testing.T) { + b := newTestBouncer() + assert.False(t, b.Ready()) + + b.ready.Store(true) + assert.True(t, b.Ready()) +} + +func TestBouncer_CheckIP_ExactBeforePrefix(t *testing.T) { + b := newTestBouncer() + b.ready.Store(true) + b.ips[netip.MustParseAddr("10.0.0.1")] = &restrict.CrowdSecDecision{Type: restrict.DecisionCaptcha} + b.prefixes[netip.MustParsePrefix("10.0.0.0/8")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + + d := b.CheckIP(netip.MustParseAddr("10.0.0.1")) + require.NotNil(t, d) + assert.Equal(t, restrict.DecisionCaptcha, d.Type) + + d2 := b.CheckIP(netip.MustParseAddr("10.0.0.2")) + require.NotNil(t, d2) + assert.Equal(t, restrict.DecisionBan, d2.Type) +} + +func TestBouncer_ApplyNew_IP(t *testing.T) { + b := newTestBouncer() + + b.applyNew(makeDecisions( + decision{scope: "ip", value: "1.2.3.4", dtype: "ban", scenario: "test/brute"}, + decision{scope: "ip", value: "5.6.7.8", dtype: "captcha", scenario: "test/crawl"}, + )) + + require.Len(t, b.ips, 2) + assert.Equal(t, restrict.DecisionBan, b.ips[netip.MustParseAddr("1.2.3.4")].Type) + assert.Equal(t, restrict.DecisionCaptcha, b.ips[netip.MustParseAddr("5.6.7.8")].Type) +} + +func TestBouncer_ApplyNew_Range(t *testing.T) { + b := newTestBouncer() + + b.applyNew(makeDecisions( + decision{scope: "range", value: "10.0.0.0/8", dtype: "ban"}, + )) + + require.Len(t, b.prefixes, 1) + assert.NotNil(t, b.prefixes[netip.MustParsePrefix("10.0.0.0/8")]) +} + +func TestBouncer_ApplyDeleted_IP(t *testing.T) { + b := newTestBouncer() + b.ips[netip.MustParseAddr("1.2.3.4")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + b.ips[netip.MustParseAddr("5.6.7.8")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + + b.applyDeleted(makeDecisions( + decision{scope: "ip", value: "1.2.3.4", dtype: "ban"}, + )) + + assert.Len(t, b.ips, 1) + assert.Nil(t, b.ips[netip.MustParseAddr("1.2.3.4")]) + assert.NotNil(t, b.ips[netip.MustParseAddr("5.6.7.8")]) +} + +func TestBouncer_ApplyDeleted_Range(t *testing.T) { + b := newTestBouncer() + b.prefixes[netip.MustParsePrefix("10.0.0.0/8")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + b.prefixes[netip.MustParsePrefix("192.168.0.0/16")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + + b.applyDeleted(makeDecisions( + decision{scope: "range", value: "10.0.0.0/8", dtype: "ban"}, + )) + + require.Len(t, b.prefixes, 1) + assert.NotNil(t, b.prefixes[netip.MustParsePrefix("192.168.0.0/16")]) +} + +func TestBouncer_ApplyNew_OverwritesExisting(t *testing.T) { + b := newTestBouncer() + b.ips[netip.MustParseAddr("1.2.3.4")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan} + + b.applyNew(makeDecisions( + decision{scope: "ip", value: "1.2.3.4", dtype: "captcha"}, + )) + + assert.Equal(t, restrict.DecisionCaptcha, b.ips[netip.MustParseAddr("1.2.3.4")].Type) +} + +func TestBouncer_ApplyNew_SkipsInvalid(t *testing.T) { + b := newTestBouncer() + + b.applyNew(makeDecisions( + decision{scope: "ip", value: "not-an-ip", dtype: "ban"}, + decision{scope: "range", value: "also-not-valid", dtype: "ban"}, + )) + + assert.Empty(t, b.ips) + assert.Empty(t, b.prefixes) +} + +// TestBouncer_StreamIntegration tests the full flow: fake LAPI → StreamBouncer → Bouncer cache → CheckIP. +func TestBouncer_StreamIntegration(t *testing.T) { + lapi := newFakeLAPI() + ts := httptest.NewServer(lapi) + defer ts.Close() + + // Seed the LAPI with initial decisions. + lapi.setDecisions( + decision{scope: "ip", value: "1.2.3.4", dtype: "ban", scenario: "crowdsecurity/ssh-bf"}, + decision{scope: "range", value: "10.0.0.0/8", dtype: "ban", scenario: "crowdsecurity/http-probing"}, + decision{scope: "ip", value: "5.5.5.5", dtype: "captcha", scenario: "crowdsecurity/http-crawl"}, + ) + + b := NewBouncer(ts.URL, "test-key", log.NewEntry(log.StandardLogger())) + b.tickerInterval = 200 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, b.Start(ctx)) + defer b.Stop() + + // Wait for initial sync. + require.Eventually(t, b.Ready, 5*time.Second, 50*time.Millisecond, "bouncer should become ready") + + // Verify decisions are cached. + d := b.CheckIP(netip.MustParseAddr("1.2.3.4")) + require.NotNil(t, d, "1.2.3.4 should be banned") + assert.Equal(t, restrict.DecisionBan, d.Type) + + d2 := b.CheckIP(netip.MustParseAddr("10.1.2.3")) + require.NotNil(t, d2, "10.1.2.3 should match range ban") + assert.Equal(t, restrict.DecisionBan, d2.Type) + + d3 := b.CheckIP(netip.MustParseAddr("5.5.5.5")) + require.NotNil(t, d3, "5.5.5.5 should have captcha") + assert.Equal(t, restrict.DecisionCaptcha, d3.Type) + + assert.Nil(t, b.CheckIP(netip.MustParseAddr("9.9.9.9")), "unknown IP should be nil") + + // Simulate a delta update: delete one IP, add a new one. + lapi.setDelta( + []decision{{scope: "ip", value: "1.2.3.4", dtype: "ban"}}, + []decision{{scope: "ip", value: "2.3.4.5", dtype: "throttle", scenario: "crowdsecurity/http-flood"}}, + ) + + // Wait for the delta to be picked up. + require.Eventually(t, func() bool { + return b.CheckIP(netip.MustParseAddr("2.3.4.5")) != nil + }, 5*time.Second, 50*time.Millisecond, "new decision should appear") + + assert.Nil(t, b.CheckIP(netip.MustParseAddr("1.2.3.4")), "deleted decision should be gone") + + d4 := b.CheckIP(netip.MustParseAddr("2.3.4.5")) + require.NotNil(t, d4) + assert.Equal(t, restrict.DecisionThrottle, d4.Type) + + // Range ban should still be active. + assert.NotNil(t, b.CheckIP(netip.MustParseAddr("10.99.99.99"))) +} + +// Helpers + +func newTestBouncer() *Bouncer { + return &Bouncer{ + ips: make(map[netip.Addr]*restrict.CrowdSecDecision), + prefixes: make(map[netip.Prefix]*restrict.CrowdSecDecision), + logger: log.NewEntry(log.StandardLogger()), + } +} + +type decision struct { + scope string + value string + dtype string + scenario string +} + +func makeDecisions(decs ...decision) []*models.Decision { + out := make([]*models.Decision, len(decs)) + for i, d := range decs { + out[i] = &models.Decision{ + Scope: strPtr(d.scope), + Value: strPtr(d.value), + Type: strPtr(d.dtype), + Scenario: strPtr(d.scenario), + Duration: strPtr("1h"), + Origin: strPtr("cscli"), + } + } + return out +} + +func strPtr(s string) *string { return &s } + +// fakeLAPI is a minimal fake CrowdSec LAPI that serves /v1/decisions/stream. +type fakeLAPI struct { + mu sync.Mutex + initial []decision + newDelta []decision + delDelta []decision + served bool // true after the initial snapshot has been served +} + +func newFakeLAPI() *fakeLAPI { + return &fakeLAPI{} +} + +func (f *fakeLAPI) setDecisions(decs ...decision) { + f.mu.Lock() + defer f.mu.Unlock() + f.initial = decs + f.served = false +} + +func (f *fakeLAPI) setDelta(deleted, added []decision) { + f.mu.Lock() + defer f.mu.Unlock() + f.delDelta = deleted + f.newDelta = added +} + +func (f *fakeLAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v1/decisions/stream" { + http.NotFound(w, r) + return + } + + f.mu.Lock() + defer f.mu.Unlock() + + resp := streamResponse{} + + if !f.served { + for _, d := range f.initial { + resp.New = append(resp.New, toLAPIDecision(d)) + } + f.served = true + } else { + for _, d := range f.delDelta { + resp.Deleted = append(resp.Deleted, toLAPIDecision(d)) + } + for _, d := range f.newDelta { + resp.New = append(resp.New, toLAPIDecision(d)) + } + // Clear delta after serving once. + f.delDelta = nil + f.newDelta = nil + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(resp) //nolint:errcheck +} + +// streamResponse mirrors the CrowdSec LAPI /v1/decisions/stream JSON structure. +type streamResponse struct { + New []*lapiDecision `json:"new"` + Deleted []*lapiDecision `json:"deleted"` +} + +type lapiDecision struct { + Duration *string `json:"duration"` + Origin *string `json:"origin"` + Scenario *string `json:"scenario"` + Scope *string `json:"scope"` + Type *string `json:"type"` + Value *string `json:"value"` +} + +func toLAPIDecision(d decision) *lapiDecision { + return &lapiDecision{ + Duration: strPtr("1h"), + Origin: strPtr("cscli"), + Scenario: strPtr(d.scenario), + Scope: strPtr(d.scope), + Type: strPtr(d.dtype), + Value: strPtr(d.value), + } +} diff --git a/proxy/internal/crowdsec/registry.go b/proxy/internal/crowdsec/registry.go new file mode 100644 index 000000000..652fb6f9f --- /dev/null +++ b/proxy/internal/crowdsec/registry.go @@ -0,0 +1,103 @@ +package crowdsec + +import ( + "context" + "sync" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/proxy/internal/types" +) + +// Registry manages a single shared Bouncer instance with reference counting. +// The bouncer starts when the first service acquires it and stops when the +// last service releases it. +type Registry struct { + mu sync.Mutex + bouncer *Bouncer + refs map[types.ServiceID]struct{} + apiURL string + apiKey string + logger *log.Entry + cancel context.CancelFunc +} + +// NewRegistry creates a registry. The bouncer is not started until Acquire is called. +func NewRegistry(apiURL, apiKey string, logger *log.Entry) *Registry { + return &Registry{ + apiURL: apiURL, + apiKey: apiKey, + logger: logger, + refs: make(map[types.ServiceID]struct{}), + } +} + +// Available returns true when the LAPI URL and API key are configured. +func (r *Registry) Available() bool { + return r.apiURL != "" && r.apiKey != "" +} + +// Acquire registers svcID as a consumer and starts the bouncer if this is the +// first consumer. Returns the shared Bouncer (which implements the restrict +// package's CrowdSecChecker interface). Returns nil if not Available. +func (r *Registry) Acquire(svcID types.ServiceID) *Bouncer { + r.mu.Lock() + defer r.mu.Unlock() + + if !r.Available() { + return nil + } + + if _, exists := r.refs[svcID]; exists { + return r.bouncer + } + + if r.bouncer == nil { + r.startLocked() + } + + // startLocked may fail, leaving r.bouncer nil. + if r.bouncer == nil { + return nil + } + + r.refs[svcID] = struct{}{} + return r.bouncer +} + +// Release removes svcID as a consumer. Stops the bouncer when the last +// consumer releases. +func (r *Registry) Release(svcID types.ServiceID) { + r.mu.Lock() + defer r.mu.Unlock() + + delete(r.refs, svcID) + + if len(r.refs) == 0 && r.bouncer != nil { + r.stopLocked() + } +} + +func (r *Registry) startLocked() { + b := NewBouncer(r.apiURL, r.apiKey, r.logger) + + ctx, cancel := context.WithCancel(context.Background()) + r.cancel = cancel + + if err := b.Start(ctx); err != nil { + r.logger.Errorf("failed to start CrowdSec bouncer: %v", err) + cancel() + return + } + + r.bouncer = b + r.logger.Info("CrowdSec bouncer started") +} + +func (r *Registry) stopLocked() { + r.bouncer.Stop() + r.cancel() + r.bouncer = nil + r.cancel = nil + r.logger.Info("CrowdSec bouncer stopped") +} diff --git a/proxy/internal/crowdsec/registry_test.go b/proxy/internal/crowdsec/registry_test.go new file mode 100644 index 000000000..f1567b186 --- /dev/null +++ b/proxy/internal/crowdsec/registry_test.go @@ -0,0 +1,66 @@ +package crowdsec + +import ( + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/proxy/internal/types" +) + +func TestRegistry_Available(t *testing.T) { + r := NewRegistry("http://localhost:8080/", "test-key", log.NewEntry(log.StandardLogger())) + assert.True(t, r.Available()) + + r2 := NewRegistry("", "", log.NewEntry(log.StandardLogger())) + assert.False(t, r2.Available()) + + r3 := NewRegistry("http://localhost:8080/", "", log.NewEntry(log.StandardLogger())) + assert.False(t, r3.Available()) +} + +func TestRegistry_Acquire_NotAvailable(t *testing.T) { + r := NewRegistry("", "", log.NewEntry(log.StandardLogger())) + b := r.Acquire("svc-1") + assert.Nil(t, b) +} + +func TestRegistry_Acquire_Idempotent(t *testing.T) { + r := newTestRegistry() + + b1 := r.Acquire("svc-1") + // Can't start without a real LAPI, but we can verify the ref tracking. + // The bouncer will be nil because Start fails, but the ref is tracked. + _ = b1 + + assert.Len(t, r.refs, 1) + + // Second acquire of same service should not add another ref. + r.Acquire("svc-1") + assert.Len(t, r.refs, 1) +} + +func TestRegistry_Release_Removes(t *testing.T) { + r := newTestRegistry() + r.refs[types.ServiceID("svc-1")] = struct{}{} + + r.Release("svc-1") + assert.Empty(t, r.refs) +} + +func TestRegistry_Release_Noop(t *testing.T) { + r := newTestRegistry() + // Releasing a service that was never acquired should not panic. + r.Release("nonexistent") + assert.Empty(t, r.refs) +} + +func newTestRegistry() *Registry { + return &Registry{ + apiURL: "http://localhost:8080/", + apiKey: "test-key", + logger: log.NewEntry(log.StandardLogger()), + refs: make(map[types.ServiceID]struct{}), + } +} diff --git a/proxy/internal/proxy/context.go b/proxy/internal/proxy/context.go index d3f67dc57..a888ad9ed 100644 --- a/proxy/internal/proxy/context.go +++ b/proxy/internal/proxy/context.go @@ -2,6 +2,7 @@ package proxy import ( "context" + "maps" "net/netip" "sync" @@ -52,6 +53,7 @@ type CapturedData struct { clientIP netip.Addr userID string authMethod string + metadata map[string]string } // NewCapturedData creates a CapturedData with the given request ID. @@ -150,6 +152,23 @@ func (c *CapturedData) GetAuthMethod() string { return c.authMethod } +// SetMetadata sets a key-value pair in the metadata map. +func (c *CapturedData) SetMetadata(key, value string) { + c.mu.Lock() + defer c.mu.Unlock() + if c.metadata == nil { + c.metadata = make(map[string]string) + } + c.metadata[key] = value +} + +// GetMetadata returns a copy of the metadata map. +func (c *CapturedData) GetMetadata() map[string]string { + c.mu.RLock() + defer c.mu.RUnlock() + return maps.Clone(c.metadata) +} + // WithCapturedData adds a CapturedData struct to the context. func WithCapturedData(ctx context.Context, data *CapturedData) context.Context { return context.WithValue(ctx, capturedDataKey, data) diff --git a/proxy/internal/restrict/restrict.go b/proxy/internal/restrict/restrict.go index a0d99ce93..f3e0fa695 100644 --- a/proxy/internal/restrict/restrict.go +++ b/proxy/internal/restrict/restrict.go @@ -12,12 +12,44 @@ import ( "github.com/netbirdio/netbird/proxy/internal/geolocation" ) +// defaultLogger is used when no logger is provided to ParseFilter. +var defaultLogger = log.NewEntry(log.StandardLogger()) + // GeoResolver resolves an IP address to geographic information. type GeoResolver interface { LookupAddr(addr netip.Addr) geolocation.Result Available() bool } +// DecisionType is the type of CrowdSec remediation action. +type DecisionType string + +const ( + DecisionBan DecisionType = "ban" + DecisionCaptcha DecisionType = "captcha" + DecisionThrottle DecisionType = "throttle" +) + +// CrowdSecDecision holds the type of a CrowdSec decision. +type CrowdSecDecision struct { + Type DecisionType +} + +// CrowdSecChecker queries CrowdSec decisions for an IP address. +type CrowdSecChecker interface { + CheckIP(addr netip.Addr) *CrowdSecDecision + Ready() bool +} + +// CrowdSecMode is the per-service enforcement mode. +type CrowdSecMode string + +const ( + CrowdSecOff CrowdSecMode = "" + CrowdSecEnforce CrowdSecMode = "enforce" + CrowdSecObserve CrowdSecMode = "observe" +) + // Filter evaluates IP restrictions. CIDR checks are performed first // (cheap), followed by country lookups (more expensive) only when needed. type Filter struct { @@ -25,32 +57,55 @@ type Filter struct { BlockedCIDRs []netip.Prefix AllowedCountries []string BlockedCountries []string + CrowdSec CrowdSecChecker + CrowdSecMode CrowdSecMode } -// ParseFilter builds a Filter from the raw string slices. Returns nil -// if all slices are empty. -func ParseFilter(allowedCIDRs, blockedCIDRs, allowedCountries, blockedCountries []string) *Filter { - if len(allowedCIDRs) == 0 && len(blockedCIDRs) == 0 && - len(allowedCountries) == 0 && len(blockedCountries) == 0 { +// FilterConfig holds the raw configuration for building a Filter. +type FilterConfig struct { + AllowedCIDRs []string + BlockedCIDRs []string + AllowedCountries []string + BlockedCountries []string + CrowdSec CrowdSecChecker + CrowdSecMode CrowdSecMode + Logger *log.Entry +} + +// ParseFilter builds a Filter from the config. Returns nil if no restrictions +// are configured. +func ParseFilter(cfg FilterConfig) *Filter { + hasCS := cfg.CrowdSecMode == CrowdSecEnforce || cfg.CrowdSecMode == CrowdSecObserve + if len(cfg.AllowedCIDRs) == 0 && len(cfg.BlockedCIDRs) == 0 && + len(cfg.AllowedCountries) == 0 && len(cfg.BlockedCountries) == 0 && !hasCS { return nil } - f := &Filter{ - AllowedCountries: normalizeCountryCodes(allowedCountries), - BlockedCountries: normalizeCountryCodes(blockedCountries), + logger := cfg.Logger + if logger == nil { + logger = defaultLogger } - for _, cidr := range allowedCIDRs { + + f := &Filter{ + AllowedCountries: normalizeCountryCodes(cfg.AllowedCountries), + BlockedCountries: normalizeCountryCodes(cfg.BlockedCountries), + } + if hasCS { + f.CrowdSec = cfg.CrowdSec + f.CrowdSecMode = cfg.CrowdSecMode + } + for _, cidr := range cfg.AllowedCIDRs { prefix, err := netip.ParsePrefix(cidr) if err != nil { - log.Warnf("skip invalid allowed CIDR %q: %v", cidr, err) + logger.Warnf("skip invalid allowed CIDR %q: %v", cidr, err) continue } f.AllowedCIDRs = append(f.AllowedCIDRs, prefix.Masked()) } - for _, cidr := range blockedCIDRs { + for _, cidr := range cfg.BlockedCIDRs { prefix, err := netip.ParsePrefix(cidr) if err != nil { - log.Warnf("skip invalid blocked CIDR %q: %v", cidr, err) + logger.Warnf("skip invalid blocked CIDR %q: %v", cidr, err) continue } f.BlockedCIDRs = append(f.BlockedCIDRs, prefix.Masked()) @@ -82,6 +137,15 @@ const ( // DenyGeoUnavailable indicates that country restrictions are configured // but the geo lookup is unavailable. DenyGeoUnavailable + // DenyCrowdSecBan indicates a CrowdSec "ban" decision. + DenyCrowdSecBan + // DenyCrowdSecCaptcha indicates a CrowdSec "captcha" decision. + DenyCrowdSecCaptcha + // DenyCrowdSecThrottle indicates a CrowdSec "throttle" decision. + DenyCrowdSecThrottle + // DenyCrowdSecUnavailable indicates enforce mode but the bouncer has not + // completed its initial sync. + DenyCrowdSecUnavailable ) // String returns the deny reason string matching the HTTP auth mechanism names. @@ -95,14 +159,42 @@ func (v Verdict) String() string { return "country_restricted" case DenyGeoUnavailable: return "geo_unavailable" + case DenyCrowdSecBan: + return "crowdsec_ban" + case DenyCrowdSecCaptcha: + return "crowdsec_captcha" + case DenyCrowdSecThrottle: + return "crowdsec_throttle" + case DenyCrowdSecUnavailable: + return "crowdsec_unavailable" default: return "unknown" } } +// IsCrowdSec returns true when the verdict originates from a CrowdSec check. +func (v Verdict) IsCrowdSec() bool { + switch v { + case DenyCrowdSecBan, DenyCrowdSecCaptcha, DenyCrowdSecThrottle, DenyCrowdSecUnavailable: + return true + default: + return false + } +} + +// IsObserveOnly returns true when v is a CrowdSec verdict and the filter is in +// observe mode. Callers should log the verdict but not block the request. +func (f *Filter) IsObserveOnly(v Verdict) bool { + if f == nil { + return false + } + return v.IsCrowdSec() && f.CrowdSecMode == CrowdSecObserve +} + // Check evaluates whether addr is permitted. CIDR rules are evaluated // first because they are O(n) prefix comparisons. Country rules run -// only when CIDR checks pass and require a geo lookup. +// only when CIDR checks pass and require a geo lookup. CrowdSec checks +// run last. func (f *Filter) Check(addr netip.Addr, geo GeoResolver) Verdict { if f == nil { return Allow @@ -115,7 +207,10 @@ func (f *Filter) Check(addr netip.Addr, geo GeoResolver) Verdict { if v := f.checkCIDR(addr); v != Allow { return v } - return f.checkCountry(addr, geo) + if v := f.checkCountry(addr, geo); v != Allow { + return v + } + return f.checkCrowdSec(addr) } func (f *Filter) checkCIDR(addr netip.Addr) Verdict { @@ -173,11 +268,48 @@ func (f *Filter) checkCountry(addr netip.Addr, geo GeoResolver) Verdict { return Allow } +func (f *Filter) checkCrowdSec(addr netip.Addr) Verdict { + if f.CrowdSecMode == CrowdSecOff { + return Allow + } + + // Checker nil with enforce means CrowdSec was requested but the proxy + // has no LAPI configured. Fail-closed. + if f.CrowdSec == nil { + if f.CrowdSecMode == CrowdSecEnforce { + return DenyCrowdSecUnavailable + } + return Allow + } + + if !f.CrowdSec.Ready() { + if f.CrowdSecMode == CrowdSecEnforce { + return DenyCrowdSecUnavailable + } + return Allow + } + + d := f.CrowdSec.CheckIP(addr) + if d == nil { + return Allow + } + + switch d.Type { + case DecisionCaptcha: + return DenyCrowdSecCaptcha + case DecisionThrottle: + return DenyCrowdSecThrottle + default: + return DenyCrowdSecBan + } +} + // HasRestrictions returns true if any restriction rules are configured. func (f *Filter) HasRestrictions() bool { if f == nil { return false } return len(f.AllowedCIDRs) > 0 || len(f.BlockedCIDRs) > 0 || - len(f.AllowedCountries) > 0 || len(f.BlockedCountries) > 0 + len(f.AllowedCountries) > 0 || len(f.BlockedCountries) > 0 || + f.CrowdSecMode == CrowdSecEnforce || f.CrowdSecMode == CrowdSecObserve } diff --git a/proxy/internal/restrict/restrict_test.go b/proxy/internal/restrict/restrict_test.go index 17a5848d8..abaa1afdc 100644 --- a/proxy/internal/restrict/restrict_test.go +++ b/proxy/internal/restrict/restrict_test.go @@ -29,21 +29,21 @@ func TestFilter_Check_NilFilter(t *testing.T) { } func TestFilter_Check_AllowedCIDR(t *testing.T) { - f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil)) assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("192.168.1.1"), nil)) } func TestFilter_Check_BlockedCIDR(t *testing.T) { - f := ParseFilter(nil, []string{"10.0.0.0/8"}, nil, nil) + f := ParseFilter(FilterConfig{BlockedCIDRs: []string{"10.0.0.0/8"}}) assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("10.1.2.3"), nil)) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("192.168.1.1"), nil)) } func TestFilter_Check_AllowedAndBlockedCIDR(t *testing.T) { - f := ParseFilter([]string{"10.0.0.0/8"}, []string{"10.1.0.0/16"}, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, BlockedCIDRs: []string{"10.1.0.0/16"}}) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.2.3.4"), nil), "allowed by allowlist, not in blocklist") assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("10.1.2.3"), nil), "allowed by allowlist but in blocklist") @@ -56,7 +56,7 @@ func TestFilter_Check_AllowedCountry(t *testing.T) { "2.2.2.2": "DE", "3.3.3.3": "CN", }) - f := ParseFilter(nil, nil, []string{"US", "DE"}, nil) + f := ParseFilter(FilterConfig{AllowedCountries: []string{"US", "DE"}}) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "US in allowlist") assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "DE in allowlist") @@ -69,7 +69,7 @@ func TestFilter_Check_BlockedCountry(t *testing.T) { "2.2.2.2": "RU", "3.3.3.3": "US", }) - f := ParseFilter(nil, nil, nil, []string{"CN", "RU"}) + f := ParseFilter(FilterConfig{BlockedCountries: []string{"CN", "RU"}}) assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "CN in blocklist") assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "RU in blocklist") @@ -83,7 +83,7 @@ func TestFilter_Check_AllowedAndBlockedCountry(t *testing.T) { "3.3.3.3": "CN", }) // Allow US and DE, but block DE explicitly. - f := ParseFilter(nil, nil, []string{"US", "DE"}, []string{"DE"}) + f := ParseFilter(FilterConfig{AllowedCountries: []string{"US", "DE"}, BlockedCountries: []string{"DE"}}) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "US allowed and not blocked") assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "DE allowed but also blocked, block wins") @@ -94,7 +94,7 @@ func TestFilter_Check_UnknownCountryWithAllowlist(t *testing.T) { geo := newMockGeo(map[string]string{ "1.1.1.1": "US", }) - f := ParseFilter(nil, nil, []string{"US"}, nil) + f := ParseFilter(FilterConfig{AllowedCountries: []string{"US"}}) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "known US in allowlist") assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("9.9.9.9"), geo), "unknown country denied when allowlist is active") @@ -104,34 +104,34 @@ func TestFilter_Check_UnknownCountryWithBlocklistOnly(t *testing.T) { geo := newMockGeo(map[string]string{ "1.1.1.1": "CN", }) - f := ParseFilter(nil, nil, nil, []string{"CN"}) + f := ParseFilter(FilterConfig{BlockedCountries: []string{"CN"}}) assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "known CN in blocklist") assert.Equal(t, Allow, f.Check(netip.MustParseAddr("9.9.9.9"), geo), "unknown country allowed when only blocklist is active") } func TestFilter_Check_CountryWithoutGeo(t *testing.T) { - f := ParseFilter(nil, nil, []string{"US"}, nil) + f := ParseFilter(FilterConfig{AllowedCountries: []string{"US"}}) assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil), "nil geo with country allowlist") } func TestFilter_Check_CountryBlocklistWithoutGeo(t *testing.T) { - f := ParseFilter(nil, nil, nil, []string{"CN"}) + f := ParseFilter(FilterConfig{BlockedCountries: []string{"CN"}}) assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil), "nil geo with country blocklist") } func TestFilter_Check_GeoUnavailable(t *testing.T) { geo := &unavailableGeo{} - f := ParseFilter(nil, nil, []string{"US"}, nil) + f := ParseFilter(FilterConfig{AllowedCountries: []string{"US"}}) assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), geo), "unavailable geo with country allowlist") - f2 := ParseFilter(nil, nil, nil, []string{"CN"}) + f2 := ParseFilter(FilterConfig{BlockedCountries: []string{"CN"}}) assert.Equal(t, DenyGeoUnavailable, f2.Check(netip.MustParseAddr("1.2.3.4"), geo), "unavailable geo with country blocklist") } func TestFilter_Check_CIDROnlySkipsGeo(t *testing.T) { - f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}) // CIDR-only filter should never touch geo, so nil geo is fine. assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil)) @@ -143,7 +143,7 @@ func TestFilter_Check_CIDRAllowThenCountryBlock(t *testing.T) { "10.1.2.3": "CN", "10.2.3.4": "US", }) - f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, []string{"CN"}) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, BlockedCountries: []string{"CN"}}) assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("10.1.2.3"), geo), "CIDR allowed but country blocked") assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.2.3.4"), geo), "CIDR allowed and country not blocked") @@ -151,12 +151,12 @@ func TestFilter_Check_CIDRAllowThenCountryBlock(t *testing.T) { } func TestParseFilter_Empty(t *testing.T) { - f := ParseFilter(nil, nil, nil, nil) + f := ParseFilter(FilterConfig{}) assert.Nil(t, f) } func TestParseFilter_InvalidCIDR(t *testing.T) { - f := ParseFilter([]string{"invalid", "10.0.0.0/8"}, nil, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"invalid", "10.0.0.0/8"}}) assert.NotNil(t, f) assert.Len(t, f.AllowedCIDRs, 1, "invalid CIDR should be skipped") @@ -166,12 +166,12 @@ func TestParseFilter_InvalidCIDR(t *testing.T) { func TestFilter_HasRestrictions(t *testing.T) { assert.False(t, (*Filter)(nil).HasRestrictions()) assert.False(t, (&Filter{}).HasRestrictions()) - assert.True(t, ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil).HasRestrictions()) - assert.True(t, ParseFilter(nil, nil, []string{"US"}, nil).HasRestrictions()) + assert.True(t, ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}).HasRestrictions()) + assert.True(t, ParseFilter(FilterConfig{AllowedCountries: []string{"US"}}).HasRestrictions()) } func TestFilter_Check_IPv6CIDR(t *testing.T) { - f := ParseFilter([]string{"2001:db8::/32"}, nil, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"2001:db8::/32"}}) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2001:db8::1"), nil), "v6 addr in v6 allowlist") assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("2001:db9::1"), nil), "v6 addr not in v6 allowlist") @@ -179,7 +179,7 @@ func TestFilter_Check_IPv6CIDR(t *testing.T) { } func TestFilter_Check_IPv4MappedIPv6(t *testing.T) { - f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}) // A v4-mapped-v6 address like ::ffff:10.1.2.3 must match a v4 CIDR. v4mapped := netip.MustParseAddr("::ffff:10.1.2.3") @@ -191,7 +191,7 @@ func TestFilter_Check_IPv4MappedIPv6(t *testing.T) { } func TestFilter_Check_MixedV4V6CIDRs(t *testing.T) { - f := ParseFilter([]string{"10.0.0.0/8", "2001:db8::/32"}, nil, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8", "2001:db8::/32"}}) assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil), "v4 in v4 CIDR") assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2001:db8::1"), nil), "v6 in v6 CIDR") @@ -202,7 +202,7 @@ func TestFilter_Check_MixedV4V6CIDRs(t *testing.T) { func TestParseFilter_CanonicalizesNonMaskedCIDR(t *testing.T) { // 1.1.1.1/24 has host bits set; ParseFilter should canonicalize to 1.1.1.0/24. - f := ParseFilter([]string{"1.1.1.1/24"}, nil, nil, nil) + f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"1.1.1.1/24"}}) assert.Equal(t, netip.MustParsePrefix("1.1.1.0/24"), f.AllowedCIDRs[0]) // Verify it still matches correctly. @@ -264,7 +264,7 @@ func TestFilter_Check_CountryCodeCaseInsensitive(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - f := ParseFilter(nil, nil, tc.allowedCountries, tc.blockedCountries) + f := ParseFilter(FilterConfig{AllowedCountries: tc.allowedCountries, BlockedCountries: tc.blockedCountries}) got := f.Check(netip.MustParseAddr(tc.addr), geo) assert.Equal(t, tc.want, got) }) @@ -275,4 +275,252 @@ func TestFilter_Check_CountryCodeCaseInsensitive(t *testing.T) { type unavailableGeo struct{} func (u *unavailableGeo) LookupAddr(_ netip.Addr) geolocation.Result { return geolocation.Result{} } -func (u *unavailableGeo) Available() bool { return false } +func (u *unavailableGeo) Available() bool { return false } + +// mockCrowdSec is a test implementation of CrowdSecChecker. +type mockCrowdSec struct { + decisions map[string]*CrowdSecDecision + ready bool +} + +func (m *mockCrowdSec) CheckIP(addr netip.Addr) *CrowdSecDecision { + return m.decisions[addr.Unmap().String()] +} + +func (m *mockCrowdSec) Ready() bool { return m.ready } + +func TestFilter_CrowdSec_Enforce_Ban(t *testing.T) { + cs := &mockCrowdSec{ + decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionBan}}, + ready: true, + } + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}) + + assert.Equal(t, DenyCrowdSecBan, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("5.6.7.8"), nil)) +} + +func TestFilter_CrowdSec_Enforce_Captcha(t *testing.T) { + cs := &mockCrowdSec{ + decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionCaptcha}}, + ready: true, + } + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}) + + assert.Equal(t, DenyCrowdSecCaptcha, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) +} + +func TestFilter_CrowdSec_Enforce_Throttle(t *testing.T) { + cs := &mockCrowdSec{ + decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionThrottle}}, + ready: true, + } + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}) + + assert.Equal(t, DenyCrowdSecThrottle, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) +} + +func TestFilter_CrowdSec_Observe_DoesNotBlock(t *testing.T) { + cs := &mockCrowdSec{ + decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionBan}}, + ready: true, + } + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecObserve}) + + verdict := f.Check(netip.MustParseAddr("1.2.3.4"), nil) + assert.Equal(t, DenyCrowdSecBan, verdict, "verdict should be ban") + assert.True(t, f.IsObserveOnly(verdict), "should be observe-only") +} + +func TestFilter_CrowdSec_Enforce_NotReady(t *testing.T) { + cs := &mockCrowdSec{ready: false} + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}) + + assert.Equal(t, DenyCrowdSecUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) +} + +func TestFilter_CrowdSec_Observe_NotReady_Allows(t *testing.T) { + cs := &mockCrowdSec{ready: false} + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecObserve}) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) +} + +func TestFilter_CrowdSec_Off(t *testing.T) { + cs := &mockCrowdSec{ + decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionBan}}, + ready: true, + } + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecOff}) + + // CrowdSecOff means the filter is nil (no restrictions). + assert.Nil(t, f) +} + +func TestFilter_IsObserveOnly(t *testing.T) { + f := &Filter{CrowdSecMode: CrowdSecObserve} + assert.True(t, f.IsObserveOnly(DenyCrowdSecBan)) + assert.True(t, f.IsObserveOnly(DenyCrowdSecCaptcha)) + assert.True(t, f.IsObserveOnly(DenyCrowdSecThrottle)) + assert.True(t, f.IsObserveOnly(DenyCrowdSecUnavailable)) + assert.False(t, f.IsObserveOnly(DenyCIDR)) + assert.False(t, f.IsObserveOnly(Allow)) + + f2 := &Filter{CrowdSecMode: CrowdSecEnforce} + assert.False(t, f2.IsObserveOnly(DenyCrowdSecBan)) +} + +// TestFilter_LayerInteraction exercises the evaluation order across all three +// restriction layers: CIDR -> Country -> CrowdSec. Each layer can only further +// restrict; no layer can relax a denial from an earlier layer. +// +// Layer order | Behavior +// ---------------|------------------------------------------------------- +// 1. CIDR | Allowlist narrows to specific ranges, blocklist removes +// | specific ranges. Deny here → stop, CrowdSec never runs. +// 2. Country | Allowlist/blocklist by geo. Deny here → stop. +// 3. CrowdSec | IP reputation. Can block IPs that passed layers 1-2. +// | Observe mode: verdict returned but caller doesn't block. +func TestFilter_LayerInteraction(t *testing.T) { + bannedIP := "10.1.2.3" + cleanIP := "10.2.3.4" + outsideIP := "192.168.1.1" + + cs := &mockCrowdSec{ + decisions: map[string]*CrowdSecDecision{bannedIP: {Type: DecisionBan}}, + ready: true, + } + geo := newMockGeo(map[string]string{ + bannedIP: "US", + cleanIP: "US", + outsideIP: "CN", + }) + + tests := []struct { + name string + config FilterConfig + addr string + want Verdict + }{ + // CIDR allowlist + CrowdSec enforce: CrowdSec blocks inside allowed range + { + name: "allowed CIDR + CrowdSec banned", + config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}, + addr: bannedIP, + want: DenyCrowdSecBan, + }, + { + name: "allowed CIDR + CrowdSec clean", + config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}, + addr: cleanIP, + want: Allow, + }, + { + name: "CIDR deny stops before CrowdSec", + config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}, + addr: outsideIP, + want: DenyCIDR, + }, + + // CIDR blocklist + CrowdSec enforce: blocklist blocks first, CrowdSec blocks remaining + { + name: "blocked CIDR stops before CrowdSec", + config: FilterConfig{BlockedCIDRs: []string{"10.1.0.0/16"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}, + addr: bannedIP, + want: DenyCIDR, + }, + { + name: "not in blocklist + CrowdSec clean", + config: FilterConfig{BlockedCIDRs: []string{"10.1.0.0/16"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}, + addr: cleanIP, + want: Allow, + }, + + // Country allowlist + CrowdSec enforce + { + name: "allowed country + CrowdSec banned", + config: FilterConfig{AllowedCountries: []string{"US"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}, + addr: bannedIP, + want: DenyCrowdSecBan, + }, + { + name: "country deny stops before CrowdSec", + config: FilterConfig{AllowedCountries: []string{"US"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}, + addr: outsideIP, + want: DenyCountry, + }, + + // All three layers: CIDR allowlist + country blocklist + CrowdSec + { + name: "all layers: CIDR allow + country allow + CrowdSec ban", + config: FilterConfig{ + AllowedCIDRs: []string{"10.0.0.0/8"}, + BlockedCountries: []string{"CN"}, + CrowdSec: cs, + CrowdSecMode: CrowdSecEnforce, + }, + addr: bannedIP, // 10.x (CIDR ok), US (country ok), banned (CrowdSec deny) + want: DenyCrowdSecBan, + }, + { + name: "all layers: CIDR deny short-circuits everything", + config: FilterConfig{ + AllowedCIDRs: []string{"10.0.0.0/8"}, + BlockedCountries: []string{"CN"}, + CrowdSec: cs, + CrowdSecMode: CrowdSecEnforce, + }, + addr: outsideIP, // 192.x (CIDR deny) + want: DenyCIDR, + }, + + // Observe mode: verdict returned but IsObserveOnly is true + { + name: "observe mode: CrowdSec banned inside allowed CIDR", + config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecObserve}, + addr: bannedIP, + want: DenyCrowdSecBan, // verdict is ban, caller checks IsObserveOnly + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + f := ParseFilter(tc.config) + got := f.Check(netip.MustParseAddr(tc.addr), geo) + assert.Equal(t, tc.want, got) + + // Verify observe mode flag when applicable. + if tc.config.CrowdSecMode == CrowdSecObserve && got.IsCrowdSec() { + assert.True(t, f.IsObserveOnly(got), "observe mode verdict should be observe-only") + } + if tc.config.CrowdSecMode == CrowdSecEnforce && got.IsCrowdSec() { + assert.False(t, f.IsObserveOnly(got), "enforce mode verdict should not be observe-only") + } + }) + } +} + +func TestFilter_CrowdSec_Enforce_NilChecker(t *testing.T) { + // LAPI not configured: checker is nil but mode is enforce. Must fail closed. + f := ParseFilter(FilterConfig{CrowdSec: nil, CrowdSecMode: CrowdSecEnforce}) + + assert.Equal(t, DenyCrowdSecUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) +} + +func TestFilter_CrowdSec_Observe_NilChecker(t *testing.T) { + // LAPI not configured: checker is nil but mode is observe. Must allow. + f := ParseFilter(FilterConfig{CrowdSec: nil, CrowdSecMode: CrowdSecObserve}) + + assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.2.3.4"), nil)) +} + +func TestFilter_HasRestrictions_CrowdSec(t *testing.T) { + cs := &mockCrowdSec{ready: true} + f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce}) + assert.True(t, f.HasRestrictions()) + + // Enforce mode without checker (LAPI not configured): still has restrictions + // because Check() will fail-closed with DenyCrowdSecUnavailable. + f2 := ParseFilter(FilterConfig{CrowdSec: nil, CrowdSecMode: CrowdSecEnforce}) + assert.True(t, f2.HasRestrictions()) +} diff --git a/proxy/internal/tcp/router.go b/proxy/internal/tcp/router.go index 8255c36d3..9f8660aeb 100644 --- a/proxy/internal/tcp/router.go +++ b/proxy/internal/tcp/router.go @@ -479,9 +479,14 @@ func (r *Router) checkRestrictions(conn net.Conn, route Route) restrict.Verdict // On success (nil error), both conn and backend are closed by the relay. func (r *Router) relayTCP(ctx context.Context, conn net.Conn, sni SNIHost, route Route) error { if verdict := r.checkRestrictions(conn, route); verdict != restrict.Allow { - r.logger.Debugf("connection from %s rejected by access restrictions: %s", conn.RemoteAddr(), verdict) - r.logL4Deny(route, conn, verdict) - return errAccessRestricted + if route.Filter != nil && route.Filter.IsObserveOnly(verdict) { + r.logger.Debugf("CrowdSec observe: would block %s for %s (%s)", conn.RemoteAddr(), sni, verdict) + r.logL4Deny(route, conn, verdict, true) + } else { + r.logger.Debugf("connection from %s rejected by access restrictions: %s", conn.RemoteAddr(), verdict) + r.logL4Deny(route, conn, verdict, false) + return errAccessRestricted + } } svcCtx, err := r.acquireRelay(ctx, route) @@ -610,7 +615,7 @@ func (r *Router) logL4Entry(route Route, conn net.Conn, duration time.Duration, } // logL4Deny sends an access log entry for a denied connection. -func (r *Router) logL4Deny(route Route, conn net.Conn, verdict restrict.Verdict) { +func (r *Router) logL4Deny(route Route, conn net.Conn, verdict restrict.Verdict, observeOnly bool) { r.mu.RLock() al := r.accessLog r.mu.RUnlock() @@ -621,14 +626,22 @@ func (r *Router) logL4Deny(route Route, conn net.Conn, verdict restrict.Verdict) sourceIP, _ := addrFromConn(conn) - al.LogL4(accesslog.L4Entry{ + entry := accesslog.L4Entry{ AccountID: route.AccountID, ServiceID: route.ServiceID, Protocol: route.Protocol, Host: route.Domain, SourceIP: sourceIP, DenyReason: verdict.String(), - }) + } + if verdict.IsCrowdSec() { + entry.Metadata = map[string]string{"crowdsec_verdict": verdict.String()} + if observeOnly { + entry.Metadata["crowdsec_mode"] = "observe" + entry.DenyReason = "" + } + } + al.LogL4(entry) } // getOrCreateServiceCtxLocked returns the context for a service, creating one diff --git a/proxy/internal/tcp/router_test.go b/proxy/internal/tcp/router_test.go index 189cdc622..93b6560f4 100644 --- a/proxy/internal/tcp/router_test.go +++ b/proxy/internal/tcp/router_test.go @@ -1686,7 +1686,7 @@ func (f *fakeConn) RemoteAddr() net.Addr { return f.remote } func TestCheckRestrictions_UnparseableAddress(t *testing.T) { router := NewPortRouter(log.StandardLogger(), nil) - filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}) route := Route{Filter: filter} conn := &fakeConn{remote: fakeAddr("not-an-ip")} @@ -1695,7 +1695,7 @@ func TestCheckRestrictions_UnparseableAddress(t *testing.T) { func TestCheckRestrictions_NilRemoteAddr(t *testing.T) { router := NewPortRouter(log.StandardLogger(), nil) - filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}) route := Route{Filter: filter} conn := &fakeConn{remote: nil} @@ -1704,7 +1704,7 @@ func TestCheckRestrictions_NilRemoteAddr(t *testing.T) { func TestCheckRestrictions_AllowedAndDenied(t *testing.T) { router := NewPortRouter(log.StandardLogger(), nil) - filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}) route := Route{Filter: filter} allowed := &fakeConn{remote: &net.TCPAddr{IP: net.IPv4(10, 1, 2, 3), Port: 1234}} @@ -1724,7 +1724,7 @@ func TestCheckRestrictions_NilFilter(t *testing.T) { func TestCheckRestrictions_IPv4MappedIPv6(t *testing.T) { router := NewPortRouter(log.StandardLogger(), nil) - filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil) + filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}) route := Route{Filter: filter} // net.IPv4() returns a 16-byte v4-in-v6 representation internally. diff --git a/proxy/internal/udp/relay.go b/proxy/internal/udp/relay.go index d20ecf48b..8293bfe81 100644 --- a/proxy/internal/udp/relay.go +++ b/proxy/internal/udp/relay.go @@ -336,8 +336,13 @@ func (r *Relay) checkAccessRestrictions(addr net.Addr) error { return fmt.Errorf("parse client address %s for restriction check: %w", addr, err) } if v := r.filter.Check(clientIP, r.geo); v != restrict.Allow { - r.logDeny(clientIP, v) - return fmt.Errorf("access restricted for %s", addr) + if r.filter.IsObserveOnly(v) { + r.logger.Debugf("CrowdSec observe: would block %s (%s)", clientIP, v) + r.logDeny(clientIP, v, true) + } else { + r.logDeny(clientIP, v, false) + return fmt.Errorf("access restricted for %s", addr) + } } return nil } @@ -498,19 +503,27 @@ func (r *Relay) logSessionEnd(sess *session) { } // logDeny sends an access log entry for a denied UDP packet. -func (r *Relay) logDeny(clientIP netip.Addr, verdict restrict.Verdict) { +func (r *Relay) logDeny(clientIP netip.Addr, verdict restrict.Verdict, observeOnly bool) { if r.accessLog == nil { return } - r.accessLog.LogL4(accesslog.L4Entry{ + entry := accesslog.L4Entry{ AccountID: r.accountID, ServiceID: r.serviceID, Protocol: accesslog.ProtocolUDP, Host: r.domain, SourceIP: clientIP, DenyReason: verdict.String(), - }) + } + if verdict.IsCrowdSec() { + entry.Metadata = map[string]string{"crowdsec_verdict": verdict.String()} + if observeOnly { + entry.Metadata["crowdsec_mode"] = "observe" + entry.DenyReason = "" + } + } + r.accessLog.LogL4(entry) } // Close stops the relay, waits for all session goroutines to exit, diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 796cad622..17510f37e 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -228,6 +228,10 @@ func (m *testProxyManager) ClusterRequireSubdomain(_ context.Context, _ string) return nil } +func (m *testProxyManager) ClusterSupportsCrowdSec(_ context.Context, _ string) *bool { + return nil +} + func (m *testProxyManager) CleanupStale(_ context.Context, _ time.Duration) error { return nil } diff --git a/proxy/server.go b/proxy/server.go index acfe3c12d..fbd0d058e 100644 --- a/proxy/server.go +++ b/proxy/server.go @@ -42,6 +42,7 @@ import ( "github.com/netbirdio/netbird/proxy/internal/auth" "github.com/netbirdio/netbird/proxy/internal/certwatch" "github.com/netbirdio/netbird/proxy/internal/conntrack" + "github.com/netbirdio/netbird/proxy/internal/crowdsec" "github.com/netbirdio/netbird/proxy/internal/debug" "github.com/netbirdio/netbird/proxy/internal/geolocation" proxygrpc "github.com/netbirdio/netbird/proxy/internal/grpc" @@ -100,6 +101,13 @@ type Server struct { geo restrict.GeoResolver geoRaw *geolocation.Lookup + // crowdsecRegistry manages the shared CrowdSec bouncer lifecycle. + crowdsecRegistry *crowdsec.Registry + // crowdsecServices tracks which services have CrowdSec enabled for + // proper acquire/release lifecycle management. + crowdsecMu sync.Mutex + crowdsecServices map[types.ServiceID]bool + // routerReady is closed once mainRouter is fully initialized. // The mapping worker waits on this before processing updates. routerReady chan struct{} @@ -175,6 +183,10 @@ type Server struct { // GeoDataDir is the directory containing GeoLite2 MMDB files for // country-based access restrictions. Empty disables geo lookups. GeoDataDir string + // CrowdSecAPIURL is the CrowdSec LAPI URL. Empty disables CrowdSec. + CrowdSecAPIURL string + // CrowdSecAPIKey is the CrowdSec bouncer API key. Empty disables CrowdSec. + CrowdSecAPIKey string // MaxSessionIdleTimeout caps the per-service session idle timeout. // Zero means no cap (the proxy honors whatever management sends). // Set via NB_PROXY_MAX_SESSION_IDLE_TIMEOUT for shared deployments. @@ -275,6 +287,9 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) { // management connectivity from the first stream connection. s.healthChecker = health.NewChecker(s.Logger, s.netbird) + s.crowdsecRegistry = crowdsec.NewRegistry(s.CrowdSecAPIURL, s.CrowdSecAPIKey, log.NewEntry(s.Logger)) + s.crowdsecServices = make(map[types.ServiceID]bool) + go s.newManagementMappingWorker(runCtx, s.mgmtClient) tlsConfig, err := s.configureTLS(ctx) @@ -763,6 +778,22 @@ func (s *Server) shutdownServices() { s.Logger.Debugf("close geolocation: %v", err) } } + + s.shutdownCrowdSec() +} + +func (s *Server) shutdownCrowdSec() { + if s.crowdsecRegistry == nil { + return + } + s.crowdsecMu.Lock() + services := maps.Clone(s.crowdsecServices) + maps.Clear(s.crowdsecServices) + s.crowdsecMu.Unlock() + + for svcID := range services { + s.crowdsecRegistry.Release(svcID) + } } // resolveDialFunc returns a DialContextFunc that dials through the @@ -916,6 +947,7 @@ func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.Pr s.healthChecker.SetManagementConnected(false) } + supportsCrowdSec := s.crowdsecRegistry.Available() mappingClient, err := client.GetMappingUpdate(ctx, &proto.GetMappingUpdateRequest{ ProxyId: s.ID, Version: s.Version, @@ -924,6 +956,7 @@ func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.Pr Capabilities: &proto.ProxyCapabilities{ SupportsCustomPorts: &s.SupportsCustomPorts, RequireSubdomain: &s.RequireSubdomain, + SupportsCrowdsec: &supportsCrowdSec, }, }) if err != nil { @@ -1159,7 +1192,7 @@ func (s *Server) setupTCPMapping(ctx context.Context, mapping *proto.ProxyMappin ProxyProtocol: s.l4ProxyProtocol(mapping), DialTimeout: s.l4DialTimeout(mapping), SessionIdleTimeout: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)), - Filter: parseRestrictions(mapping), + Filter: s.parseRestrictions(mapping), }) s.portMu.Lock() @@ -1234,7 +1267,7 @@ func (s *Server) setupTLSMapping(ctx context.Context, mapping *proto.ProxyMappin ProxyProtocol: s.l4ProxyProtocol(mapping), DialTimeout: s.l4DialTimeout(mapping), SessionIdleTimeout: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)), - Filter: parseRestrictions(mapping), + Filter: s.parseRestrictions(mapping), }) if tlsPort != s.mainPort { @@ -1268,12 +1301,51 @@ func (s *Server) serviceKeyForMapping(mapping *proto.ProxyMapping) roundtrip.Ser // parseRestrictions converts a proto mapping's access restrictions into // a restrict.Filter. Returns nil if the mapping has no restrictions. -func parseRestrictions(mapping *proto.ProxyMapping) *restrict.Filter { +func (s *Server) parseRestrictions(mapping *proto.ProxyMapping) *restrict.Filter { r := mapping.GetAccessRestrictions() if r == nil { return nil } - return restrict.ParseFilter(r.GetAllowedCidrs(), r.GetBlockedCidrs(), r.GetAllowedCountries(), r.GetBlockedCountries()) + + svcID := types.ServiceID(mapping.GetId()) + csMode := restrict.CrowdSecMode(r.GetCrowdsecMode()) + + var checker restrict.CrowdSecChecker + if csMode == restrict.CrowdSecEnforce || csMode == restrict.CrowdSecObserve { + if b := s.crowdsecRegistry.Acquire(svcID); b != nil { + checker = b + s.crowdsecMu.Lock() + s.crowdsecServices[svcID] = true + s.crowdsecMu.Unlock() + } else { + s.Logger.Warnf("service %s requests CrowdSec mode %q but proxy has no CrowdSec configured", svcID, csMode) + // Keep the mode: restrict.Filter will fail-closed for enforce (DenyCrowdSecUnavailable) + // and allow for observe. + } + } + + return restrict.ParseFilter(restrict.FilterConfig{ + AllowedCIDRs: r.GetAllowedCidrs(), + BlockedCIDRs: r.GetBlockedCidrs(), + AllowedCountries: r.GetAllowedCountries(), + BlockedCountries: r.GetBlockedCountries(), + CrowdSec: checker, + CrowdSecMode: csMode, + Logger: log.NewEntry(s.Logger), + }) +} + +// releaseCrowdSec releases the CrowdSec bouncer reference for the given +// service if it had one. +func (s *Server) releaseCrowdSec(svcID types.ServiceID) { + s.crowdsecMu.Lock() + had := s.crowdsecServices[svcID] + delete(s.crowdsecServices, svcID) + s.crowdsecMu.Unlock() + + if had { + s.crowdsecRegistry.Release(svcID) + } } // warnIfGeoUnavailable logs a warning if the mapping has country restrictions @@ -1388,7 +1460,7 @@ func (s *Server) addUDPRelay(ctx context.Context, mapping *proto.ProxyMapping, t DialTimeout: s.l4DialTimeout(mapping), SessionTTL: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)), AccessLog: s.accessLog, - Filter: parseRestrictions(mapping), + Filter: s.parseRestrictions(mapping), Geo: s.geo, }) relay.SetObserver(s.meter) @@ -1425,7 +1497,7 @@ func (s *Server) updateMapping(ctx context.Context, mapping *proto.ProxyMapping) schemes = append(schemes, auth.NewHeader(s.mgmtClient, svcID, accountID, ha.GetHeader())) } - ipRestrictions := parseRestrictions(mapping) + ipRestrictions := s.parseRestrictions(mapping) s.warnIfGeoUnavailable(mapping.GetDomain(), mapping.GetAccessRestrictions()) maxSessionAge := time.Duration(mapping.GetAuth().GetMaxSessionAgeSeconds()) * time.Second @@ -1507,6 +1579,9 @@ func (s *Server) cleanupMappingRoutes(mapping *proto.ProxyMapping) { // UDP relay cleanup (idempotent). s.removeUDPRelay(svcID) + // Release CrowdSec after all routes are removed so the shared bouncer + // isn't stopped while stale filters can still be reached by in-flight requests. + s.releaseCrowdSec(svcID) } // removeUDPRelay stops and removes a UDP relay by service ID. diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 766fdf0de..0b855db67 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -2860,6 +2860,11 @@ components: type: string description: "Protocol type: http, tcp, or udp" example: "http" + metadata: + type: object + additionalProperties: + type: string + description: "Extra context about the request (e.g. crowdsec_verdict)" required: - id - service_id @@ -3258,6 +3263,14 @@ components: pattern: '^[a-zA-Z]{2}$' example: "DE" description: ISO 3166-1 alpha-2 country codes to block. + crowdsec_mode: + type: string + enum: + - "off" + - "enforce" + - "observe" + default: "off" + description: CrowdSec IP reputation mode. Only available when the proxy cluster supports CrowdSec. PasswordAuthConfig: type: object properties: @@ -3361,6 +3374,10 @@ components: type: boolean description: Whether a subdomain label is required in front of this domain. When true, the domain cannot be used bare. example: false + supports_crowdsec: + type: boolean + description: Whether the proxy cluster has CrowdSec configured + example: false required: - id - domain diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 14bb6ee03..0317b8183 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -17,6 +17,27 @@ const ( TokenAuthScopes = "TokenAuth.Scopes" ) +// Defines values for AccessRestrictionsCrowdsecMode. +const ( + AccessRestrictionsCrowdsecModeEnforce AccessRestrictionsCrowdsecMode = "enforce" + AccessRestrictionsCrowdsecModeObserve AccessRestrictionsCrowdsecMode = "observe" + AccessRestrictionsCrowdsecModeOff AccessRestrictionsCrowdsecMode = "off" +) + +// Valid indicates whether the value is a known member of the AccessRestrictionsCrowdsecMode enum. +func (e AccessRestrictionsCrowdsecMode) Valid() bool { + switch e { + case AccessRestrictionsCrowdsecModeEnforce: + return true + case AccessRestrictionsCrowdsecModeObserve: + return true + case AccessRestrictionsCrowdsecModeOff: + return true + default: + return false + } +} + // Defines values for CreateAzureIntegrationRequestHost. const ( CreateAzureIntegrationRequestHostMicrosoftCom CreateAzureIntegrationRequestHost = "microsoft.com" @@ -1326,8 +1347,14 @@ type AccessRestrictions struct { // BlockedCountries ISO 3166-1 alpha-2 country codes to block. BlockedCountries *[]string `json:"blocked_countries,omitempty"` + + // CrowdsecMode CrowdSec IP reputation mode. Only available when the proxy cluster supports CrowdSec. + CrowdsecMode *AccessRestrictionsCrowdsecMode `json:"crowdsec_mode,omitempty"` } +// AccessRestrictionsCrowdsecMode CrowdSec IP reputation mode. Only available when the proxy cluster supports CrowdSec. +type AccessRestrictionsCrowdsecMode string + // AccessiblePeer defines model for AccessiblePeer. type AccessiblePeer struct { // CityName Commonly used English name of the city @@ -3680,6 +3707,9 @@ type ProxyAccessLog struct { // Id Unique identifier for the access log entry Id string `json:"id"` + // Metadata Extra context about the request (e.g. crowdsec_verdict) + Metadata *map[string]string `json:"metadata,omitempty"` + // Method HTTP method of the request Method string `json:"method"` @@ -3759,6 +3789,9 @@ type ReverseProxyDomain struct { // RequireSubdomain Whether a subdomain label is required in front of this domain. When true, the domain cannot be used bare. RequireSubdomain *bool `json:"require_subdomain,omitempty"` + // SupportsCrowdsec Whether the proxy cluster has CrowdSec configured + SupportsCrowdsec *bool `json:"supports_crowdsec,omitempty"` + // SupportsCustomPorts Whether the cluster supports binding arbitrary TCP/UDP ports SupportsCustomPorts *bool `json:"supports_custom_ports,omitempty"` diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index 81637f69e..1095b6411 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -186,6 +186,8 @@ type ProxyCapabilities struct { // Whether the proxy requires a subdomain label in front of its cluster domain. // When true, accounts cannot use the cluster domain bare. RequireSubdomain *bool `protobuf:"varint,2,opt,name=require_subdomain,json=requireSubdomain,proto3,oneof" json:"require_subdomain,omitempty"` + // Whether the proxy has CrowdSec configured and can enforce IP reputation checks. + SupportsCrowdsec *bool `protobuf:"varint,3,opt,name=supports_crowdsec,json=supportsCrowdsec,proto3,oneof" json:"supports_crowdsec,omitempty"` } func (x *ProxyCapabilities) Reset() { @@ -234,6 +236,13 @@ func (x *ProxyCapabilities) GetRequireSubdomain() bool { return false } +func (x *ProxyCapabilities) GetSupportsCrowdsec() bool { + if x != nil && x.SupportsCrowdsec != nil { + return *x.SupportsCrowdsec + } + return false +} + // GetMappingUpdateRequest is sent to initialise a mapping stream. type GetMappingUpdateRequest struct { state protoimpl.MessageState @@ -679,6 +688,8 @@ type AccessRestrictions struct { BlockedCidrs []string `protobuf:"bytes,2,rep,name=blocked_cidrs,json=blockedCidrs,proto3" json:"blocked_cidrs,omitempty"` AllowedCountries []string `protobuf:"bytes,3,rep,name=allowed_countries,json=allowedCountries,proto3" json:"allowed_countries,omitempty"` BlockedCountries []string `protobuf:"bytes,4,rep,name=blocked_countries,json=blockedCountries,proto3" json:"blocked_countries,omitempty"` + // CrowdSec IP reputation mode: "", "off", "enforce", or "observe". + CrowdsecMode string `protobuf:"bytes,5,opt,name=crowdsec_mode,json=crowdsecMode,proto3" json:"crowdsec_mode,omitempty"` } func (x *AccessRestrictions) Reset() { @@ -741,6 +752,13 @@ func (x *AccessRestrictions) GetBlockedCountries() []string { return nil } +func (x *AccessRestrictions) GetCrowdsecMode() string { + if x != nil { + return x.CrowdsecMode + } + return "" +} + type ProxyMapping struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -990,6 +1008,8 @@ type AccessLog struct { BytesUpload int64 `protobuf:"varint,14,opt,name=bytes_upload,json=bytesUpload,proto3" json:"bytes_upload,omitempty"` BytesDownload int64 `protobuf:"varint,15,opt,name=bytes_download,json=bytesDownload,proto3" json:"bytes_download,omitempty"` Protocol string `protobuf:"bytes,16,opt,name=protocol,proto3" json:"protocol,omitempty"` + // Extra key-value metadata for the access log entry (e.g. crowdsec_verdict, scenario). + Metadata map[string]string `protobuf:"bytes,17,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *AccessLog) Reset() { @@ -1136,6 +1156,13 @@ func (x *AccessLog) GetProtocol() string { return "" } +func (x *AccessLog) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + type AuthenticateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1952,7 +1979,7 @@ var file_proxy_service_proto_rawDesc = []byte{ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, + 0x74, 0x6f, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f, @@ -1960,324 +1987,338 @@ var file_proxy_service_proto_rawDesc = []byte{ 0x01, 0x12, 0x30, 0x0a, 0x11, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x88, 0x01, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, - 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x42, 0x14, 0x0a, - 0x12, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x22, 0xe6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, - 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, - 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, - 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, - 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, - 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x41, 0x75, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, - 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0xe5, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x12, 0x39, 0x0a, 0x0c, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x69, - 0x64, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, - 0x69, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x22, 0xe6, 0x03, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, - 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, - 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, - 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, - 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, - 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, - 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, - 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, - 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53, - 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, - 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x04, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, - 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, - 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, - 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, - 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, - 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, - 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, - 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xf8, - 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, + 0x88, 0x01, 0x01, 0x12, 0x30, 0x0a, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, + 0x63, 0x72, 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, + 0x52, 0x10, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x72, 0x6f, 0x77, 0x64, 0x73, + 0x65, 0x63, 0x88, 0x01, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x42, + 0x14, 0x0a, 0x12, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x73, 0x5f, 0x63, 0x72, 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x22, 0xe6, 0x01, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e, + 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61, + 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, + 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, + 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, + 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe5, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, + 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, + 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x6f, 0x69, 0x64, 0x63, 0x12, 0x39, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, + 0x75, 0x74, 0x68, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, + 0x74, 0x68, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x73, 0x22, + 0xdd, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, + 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, + 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, + 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x63, 0x72, 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x22, + 0xe6, 0x03, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, + 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, + 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61, + 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70, + 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, + 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, + 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x84, 0x05, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, + 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3f, 0x0a, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf8, 0x01, 0x0a, 0x13, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x70, + 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x57, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, + 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x2d, 0x0a, + 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, 0x0a, + 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, 0x14, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, + 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x73, + 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, 0x0a, + 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, 0x6e, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, + 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, + 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, + 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0b, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x42, 0x09, - 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x57, 0x0a, 0x11, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, - 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, - 0x6e, 0x22, 0x55, 0x0a, 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, - 0x65, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, - 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, - 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, - 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, - 0x72, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, - 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, - 0x12, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, - 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, - 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, - 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, - 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, - 0x65, 0x6e, 0x69, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, - 0x0a, 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, - 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, - 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, - 0x02, 0x2a, 0x46, 0x0a, 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, - 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19, - 0x0a, 0x15, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, - 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72, - 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, - 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, - 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e, - 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, - 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, - 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, - 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, - 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, - 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, - 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, - 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, - 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, - 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, - 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4f, + 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, 0x44, + 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x46, 0x0a, 0x0f, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x41, 0x54, + 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x52, + 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x17, + 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, + 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, + 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, + 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, + 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, + 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, + 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, + 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x65, + 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, + 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, + 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2293,7 +2334,7 @@ func file_proxy_service_proto_rawDescGZIP() []byte { } var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 26) +var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_proxy_service_proto_goTypes = []interface{}{ (ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType (PathRewriteMode)(0), // 1: management.PathRewriteMode @@ -2324,17 +2365,18 @@ var file_proxy_service_proto_goTypes = []interface{}{ (*ValidateSessionRequest)(nil), // 26: management.ValidateSessionRequest (*ValidateSessionResponse)(nil), // 27: management.ValidateSessionResponse nil, // 28: management.PathTargetOptions.CustomHeadersEntry - (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 30: google.protobuf.Duration + nil, // 29: management.AccessLog.MetadataEntry + (*timestamppb.Timestamp)(nil), // 30: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 31: google.protobuf.Duration } var file_proxy_service_proto_depIdxs = []int32{ - 29, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp + 30, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp 3, // 1: management.GetMappingUpdateRequest.capabilities:type_name -> management.ProxyCapabilities 11, // 2: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping - 30, // 3: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration + 31, // 3: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration 1, // 4: management.PathTargetOptions.path_rewrite:type_name -> management.PathRewriteMode 28, // 5: management.PathTargetOptions.custom_headers:type_name -> management.PathTargetOptions.CustomHeadersEntry - 30, // 6: management.PathTargetOptions.session_idle_timeout:type_name -> google.protobuf.Duration + 31, // 6: management.PathTargetOptions.session_idle_timeout:type_name -> google.protobuf.Duration 6, // 7: management.PathMapping.options:type_name -> management.PathTargetOptions 8, // 8: management.Authentication.header_auths:type_name -> management.HeaderAuth 0, // 9: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType @@ -2342,30 +2384,31 @@ var file_proxy_service_proto_depIdxs = []int32{ 9, // 11: management.ProxyMapping.auth:type_name -> management.Authentication 10, // 12: management.ProxyMapping.access_restrictions:type_name -> management.AccessRestrictions 14, // 13: management.SendAccessLogRequest.log:type_name -> management.AccessLog - 29, // 14: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp - 17, // 15: management.AuthenticateRequest.password:type_name -> management.PasswordRequest - 18, // 16: management.AuthenticateRequest.pin:type_name -> management.PinRequest - 16, // 17: management.AuthenticateRequest.header_auth:type_name -> management.HeaderAuthRequest - 2, // 18: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus - 4, // 19: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest - 12, // 20: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest - 15, // 21: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest - 20, // 22: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest - 22, // 23: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest - 24, // 24: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest - 26, // 25: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest - 5, // 26: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse - 13, // 27: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse - 19, // 28: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse - 21, // 29: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse - 23, // 30: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse - 25, // 31: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse - 27, // 32: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse - 26, // [26:33] is the sub-list for method output_type - 19, // [19:26] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 30, // 14: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp + 29, // 15: management.AccessLog.metadata:type_name -> management.AccessLog.MetadataEntry + 17, // 16: management.AuthenticateRequest.password:type_name -> management.PasswordRequest + 18, // 17: management.AuthenticateRequest.pin:type_name -> management.PinRequest + 16, // 18: management.AuthenticateRequest.header_auth:type_name -> management.HeaderAuthRequest + 2, // 19: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus + 4, // 20: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest + 12, // 21: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest + 15, // 22: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest + 20, // 23: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest + 22, // 24: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest + 24, // 25: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest + 26, // 26: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest + 5, // 27: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse + 13, // 28: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse + 19, // 29: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse + 21, // 30: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse + 23, // 31: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse + 25, // 32: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse + 27, // 33: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse + 27, // [27:34] is the sub-list for method output_type + 20, // [20:27] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_proxy_service_proto_init() } @@ -2689,7 +2732,7 @@ func file_proxy_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proxy_service_proto_rawDesc, NumEnums: 3, - NumMessages: 26, + NumMessages: 27, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto index f77071eb0..e359f0cbd 100644 --- a/shared/management/proto/proxy_service.proto +++ b/shared/management/proto/proxy_service.proto @@ -34,6 +34,8 @@ message ProxyCapabilities { // Whether the proxy requires a subdomain label in front of its cluster domain. // When true, accounts cannot use the cluster domain bare. optional bool require_subdomain = 2; + // Whether the proxy has CrowdSec configured and can enforce IP reputation checks. + optional bool supports_crowdsec = 3; } // GetMappingUpdateRequest is sent to initialise a mapping stream. @@ -104,6 +106,8 @@ message AccessRestrictions { repeated string blocked_cidrs = 2; repeated string allowed_countries = 3; repeated string blocked_countries = 4; + // CrowdSec IP reputation mode: "", "off", "enforce", or "observe". + string crowdsec_mode = 5; } message ProxyMapping { @@ -152,6 +156,8 @@ message AccessLog { int64 bytes_upload = 14; int64 bytes_download = 15; string protocol = 16; + // Extra key-value metadata for the access log entry (e.g. crowdsec_verdict, scenario). + map metadata = 17; } message AuthenticateRequest { From 7f666b80227f306cca366bd6a347e2cda925db79 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Tue, 14 Apr 2026 12:16:03 +0200 Subject: [PATCH 308/374] [management] revert ctx dependency in get account with backpressure (#5878) --- management/server/account_request_buffer.go | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/management/server/account_request_buffer.go b/management/server/account_request_buffer.go index ac53a9fa8..e1672c2d0 100644 --- a/management/server/account_request_buffer.go +++ b/management/server/account_request_buffer.go @@ -63,20 +63,11 @@ func (ac *AccountRequestBuffer) GetAccountWithBackpressure(ctx context.Context, log.WithContext(ctx).Tracef("requesting account %s with backpressure", accountID) startTime := time.Now() + ac.getAccountRequestCh <- req - select { - case <-ctx.Done(): - return nil, ctx.Err() - case ac.getAccountRequestCh <- req: - } - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case result := <-req.ResultChan: - log.WithContext(ctx).Tracef("got account with backpressure after %s", time.Since(startTime)) - return result.Account, result.Err - } + result := <-req.ResultChan + log.WithContext(ctx).Tracef("got account with backpressure after %s", time.Since(startTime)) + return result.Account, result.Err } func (ac *AccountRequestBuffer) processGetAccountBatch(ctx context.Context, accountID string) { From c5623307ccd9feb02a24ddaa6c44fbe1e39a0f79 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 14 Apr 2026 12:49:18 +0200 Subject: [PATCH 309/374] [management] add context cancel monitoring (#5879) --- management/server/store/sql_store.go | 36 +++++++++---------- .../server/telemetry/http_api_metrics.go | 11 ++++++ 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index a34d9f70a..8189548b7 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -1017,10 +1017,10 @@ func (s *SqlStore) GetAccountsCounter(ctx context.Context) (int64, error) { // GetCustomDomainsCounts returns the total and validated custom domain counts. func (s *SqlStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) { var total, validated int64 - if err := s.db.WithContext(ctx).Model(&domain.Domain{}).Count(&total).Error; err != nil { + if err := s.db.Model(&domain.Domain{}).Count(&total).Error; err != nil { return 0, 0, err } - if err := s.db.WithContext(ctx).Model(&domain.Domain{}).Where("validated = ?", true).Count(&validated).Error; err != nil { + if err := s.db.Model(&domain.Domain{}).Where("validated = ?", true).Count(&validated).Error; err != nil { return 0, 0, err } return total, validated, nil @@ -4442,7 +4442,7 @@ func (s *SqlStore) DeletePAT(ctx context.Context, userID, patID string) error { // GetProxyAccessTokenByHashedToken retrieves a proxy access token by its hashed value. func (s *SqlStore) GetProxyAccessTokenByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken types.HashedProxyToken) (*types.ProxyAccessToken, error) { - tx := s.db.WithContext(ctx) + tx := s.db if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } @@ -4461,7 +4461,7 @@ func (s *SqlStore) GetProxyAccessTokenByHashedToken(ctx context.Context, lockStr // GetAllProxyAccessTokens retrieves all proxy access tokens. func (s *SqlStore) GetAllProxyAccessTokens(ctx context.Context, lockStrength LockingStrength) ([]*types.ProxyAccessToken, error) { - tx := s.db.WithContext(ctx) + tx := s.db if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } @@ -4477,7 +4477,7 @@ func (s *SqlStore) GetAllProxyAccessTokens(ctx context.Context, lockStrength Loc // SaveProxyAccessToken saves a proxy access token to the database. func (s *SqlStore) SaveProxyAccessToken(ctx context.Context, token *types.ProxyAccessToken) error { - if result := s.db.WithContext(ctx).Create(token); result.Error != nil { + if result := s.db.Create(token); result.Error != nil { return status.Errorf(status.Internal, "save proxy access token: %v", result.Error) } return nil @@ -4485,7 +4485,7 @@ func (s *SqlStore) SaveProxyAccessToken(ctx context.Context, token *types.ProxyA // RevokeProxyAccessToken revokes a proxy access token by its ID. func (s *SqlStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) error { - result := s.db.WithContext(ctx).Model(&types.ProxyAccessToken{}).Where(idQueryCondition, tokenID).Update("revoked", true) + result := s.db.Model(&types.ProxyAccessToken{}).Where(idQueryCondition, tokenID).Update("revoked", true) if result.Error != nil { return status.Errorf(status.Internal, "revoke proxy access token: %v", result.Error) } @@ -4499,7 +4499,7 @@ func (s *SqlStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) e // MarkProxyAccessTokenUsed updates the last used timestamp for a proxy access token. func (s *SqlStore) MarkProxyAccessTokenUsed(ctx context.Context, tokenID string) error { - result := s.db.WithContext(ctx).Model(&types.ProxyAccessToken{}). + result := s.db.Model(&types.ProxyAccessToken{}). Where(idQueryCondition, tokenID). Update("last_used", time.Now().UTC()) if result.Error != nil { @@ -5168,7 +5168,7 @@ func (s *SqlStore) EphemeralServiceExists(ctx context.Context, lockStrength Lock // GetServicesByClusterAndPort returns services matching the given proxy cluster, mode, and listen port. func (s *SqlStore) GetServicesByClusterAndPort(ctx context.Context, lockStrength LockingStrength, proxyCluster string, mode string, listenPort uint16) ([]*rpservice.Service, error) { - tx := s.db.WithContext(ctx) + tx := s.db if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } @@ -5184,7 +5184,7 @@ func (s *SqlStore) GetServicesByClusterAndPort(ctx context.Context, lockStrength // GetServicesByCluster returns all services for the given proxy cluster. func (s *SqlStore) GetServicesByCluster(ctx context.Context, lockStrength LockingStrength, proxyCluster string) ([]*rpservice.Service, error) { - tx := s.db.WithContext(ctx) + tx := s.db if lockStrength != LockingStrengthNone { tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) } @@ -5294,7 +5294,7 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin var logs []*accesslogs.AccessLogEntry var totalCount int64 - baseQuery := s.db.WithContext(ctx). + baseQuery := s.db. Model(&accesslogs.AccessLogEntry{}). Where(accountIDCondition, accountID) @@ -5305,7 +5305,7 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin return nil, 0, status.Errorf(status.Internal, "failed to count access logs") } - query := s.db.WithContext(ctx). + query := s.db. Where(accountIDCondition, accountID) query = s.applyAccessLogFilters(query, filter) @@ -5342,7 +5342,7 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin // DeleteOldAccessLogs deletes all access logs older than the specified time func (s *SqlStore) DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) { - result := s.db.WithContext(ctx). + result := s.db. Where("timestamp < ?", olderThan). Delete(&accesslogs.AccessLogEntry{}) @@ -5431,7 +5431,7 @@ func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength // SaveProxy saves or updates a proxy in the database func (s *SqlStore) SaveProxy(ctx context.Context, p *proxy.Proxy) error { - result := s.db.WithContext(ctx).Save(p) + result := s.db.Save(p) if result.Error != nil { log.WithContext(ctx).Errorf("failed to save proxy: %v", result.Error) return status.Errorf(status.Internal, "failed to save proxy") @@ -5443,7 +5443,7 @@ func (s *SqlStore) SaveProxy(ctx context.Context, p *proxy.Proxy) error { func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error { now := time.Now() - result := s.db.WithContext(ctx). + result := s.db. Model(&proxy.Proxy{}). Where("id = ? AND status = ?", proxyID, "connected"). Update("last_seen", now) @@ -5462,7 +5462,7 @@ func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAdd ConnectedAt: &now, Status: "connected", } - if err := s.db.WithContext(ctx).Save(p).Error; err != nil { + if err := s.db.Save(p).Error; err != nil { log.WithContext(ctx).Errorf("failed to create proxy on heartbeat: %v", err) return status.Errorf(status.Internal, "failed to create proxy on heartbeat") } @@ -5475,7 +5475,7 @@ func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAdd func (s *SqlStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) { var addresses []string - result := s.db.WithContext(ctx). + result := s.db. Model(&proxy.Proxy{}). Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-proxyActiveThreshold)). Distinct("cluster_address"). @@ -5598,7 +5598,7 @@ func (s *SqlStore) getClusterCapability(ctx context.Context, clusterAddr, column AnyTrue bool } - err := s.db.WithContext(ctx). + err := s.db. Model(&proxy.Proxy{}). Select("COUNT(CASE WHEN "+column+" IS NOT NULL THEN 1 END) > 0 AS has_capability, "+ "COALESCE(MAX(CASE WHEN "+column+" = true THEN 1 ELSE 0 END), 0) = 1 AS any_true"). @@ -5622,7 +5622,7 @@ func (s *SqlStore) getClusterCapability(ctx context.Context, clusterAddr, column func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error { cutoffTime := time.Now().Add(-inactivityDuration) - result := s.db.WithContext(ctx). + result := s.db. Where("last_seen < ?", cutoffTime). Delete(&proxy.Proxy{}) diff --git a/management/server/telemetry/http_api_metrics.go b/management/server/telemetry/http_api_metrics.go index c50ed1e51..28e8457e2 100644 --- a/management/server/telemetry/http_api_metrics.go +++ b/management/server/telemetry/http_api_metrics.go @@ -183,7 +183,18 @@ func (m *HTTPMiddleware) Handler(h http.Handler) http.Handler { w := WrapResponseWriter(rw) + handlerDone := make(chan struct{}) + context.AfterFunc(ctx, func() { + select { + case <-handlerDone: + default: + log.Debugf("HTTP request context canceled mid-flight: %v %v (reqID=%s, after %v, cause: %v)", + r.Method, r.URL.Path, reqID, time.Since(reqStart), context.Cause(ctx)) + } + }) + h.ServeHTTP(w, r.WithContext(ctx)) + close(handlerDone) userAuth, err := nbContext.GetUserAuthFromContext(r.Context()) if err == nil { From d7ad908962e71ea1f7d090cbda68c23b627250e5 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Tue, 14 Apr 2026 20:36:26 +0900 Subject: [PATCH 310/374] [misc] Add CI check for proto version string changes (#5854) * Add CI check for proto version string changes * Handle pagination and missing patch data in proto version check --- .github/workflows/proto-version-check.yml | 62 +++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 .github/workflows/proto-version-check.yml diff --git a/.github/workflows/proto-version-check.yml b/.github/workflows/proto-version-check.yml new file mode 100644 index 000000000..ea300419d --- /dev/null +++ b/.github/workflows/proto-version-check.yml @@ -0,0 +1,62 @@ +name: Proto Version Check + +on: + pull_request: + paths: + - "**/*.pb.go" + +jobs: + check-proto-versions: + runs-on: ubuntu-latest + steps: + - name: Check for proto tool version changes + uses: actions/github-script@v7 + with: + script: | + const files = await github.paginate(github.rest.pulls.listFiles, { + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number, + per_page: 100, + }); + + const pbFiles = files.filter(f => f.filename.endsWith('.pb.go')); + const missingPatch = pbFiles.filter(f => !f.patch).map(f => f.filename); + if (missingPatch.length > 0) { + core.setFailed( + `Cannot inspect patch data for:\n` + + missingPatch.map(f => `- ${f}`).join('\n') + + `\nThis can happen with very large PRs. Verify proto versions manually.` + ); + return; + } + const versionPattern = /^[+-]\s*\/\/\s+protoc(?:-gen-go)?\s+v[\d.]+/; + const violations = []; + + for (const file of pbFiles) { + const changed = file.patch + .split('\n') + .filter(line => versionPattern.test(line)); + if (changed.length > 0) { + violations.push({ + file: file.filename, + lines: changed, + }); + } + } + + if (violations.length > 0) { + const details = violations.map(v => + `${v.file}:\n${v.lines.map(l => ' ' + l).join('\n')}` + ).join('\n\n'); + + core.setFailed( + `Proto version strings changed in generated files.\n` + + `This usually means the wrong protoc or protoc-gen-go version was used.\n` + + `Regenerate with the matching tool versions.\n\n` + + details + ); + return; + } + + console.log('No proto version string changes detected'); From 46fc8c9f65c4ccebfd5e77bd1ee6f1502bc6da0c Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Tue, 14 Apr 2026 13:47:02 +0200 Subject: [PATCH 311/374] [proxy] direct redirect to SSO (#5874) --- proxy/internal/auth/middleware.go | 6 ++++ proxy/internal/auth/middleware_test.go | 50 ++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/proxy/internal/auth/middleware.go b/proxy/internal/auth/middleware.go index f1d1fcc59..055e4510f 100644 --- a/proxy/internal/auth/middleware.go +++ b/proxy/internal/auth/middleware.go @@ -372,6 +372,12 @@ func (mw *Middleware) authenticateWithSchemes(w http.ResponseWriter, r *http.Req cd.SetAuthMethod(attemptedMethod) } } + + if oidcURL, ok := methods[auth.MethodOIDC.String()]; ok && len(methods) == 1 && oidcURL != "" { + http.Redirect(w, r, oidcURL, http.StatusFound) + return + } + web.ServeHTTP(w, r, map[string]any{"methods": methods}, http.StatusUnauthorized) } diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go index 7c634106c..16d09800c 100644 --- a/proxy/internal/auth/middleware_test.go +++ b/proxy/internal/auth/middleware_test.go @@ -761,6 +761,56 @@ func TestCheckIPRestrictions_NilGeoWithCountryRules(t *testing.T) { assert.Equal(t, http.StatusForbidden, rr.Code, "country restrictions with nil geo must deny") } +func TestProtect_OIDCOnlyRedirectsDirectly(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + oidcURL := "https://idp.example.com/authorize?client_id=abc" + scheme := &stubScheme{ + method: auth.MethodOIDC, + authFn: func(_ *http.Request) (string, string, error) { + return "", oidcURL, nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil)) + + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusFound, rec.Code, "should redirect directly to IdP") + assert.Equal(t, oidcURL, rec.Header().Get("Location")) +} + +func TestProtect_OIDCWithOtherMethodShowsLoginPage(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + oidcScheme := &stubScheme{ + method: auth.MethodOIDC, + authFn: func(_ *http.Request) (string, string, error) { + return "", "https://idp.example.com/authorize", nil + }, + } + pinScheme := &stubScheme{ + method: auth.MethodPIN, + authFn: func(_ *http.Request) (string, string, error) { + return "", "pin", nil + }, + } + require.NoError(t, mw.AddDomain("example.com", []Scheme{oidcScheme, pinScheme}, kp.PublicKey, time.Hour, "", "", nil)) + + handler := mw.Protect(newPassthroughHandler()) + + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code, "should show login page when multiple methods exist") +} + // mockAuthenticator is a minimal mock for the authenticator gRPC interface // used by the Header scheme. type mockAuthenticator struct { From e804a705b71ba9a8d44ad5b87e72850fae8d7104 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 15 Apr 2026 00:08:35 +0900 Subject: [PATCH 312/374] [infrastructure] Update sign pipeline version to v0.1.2 (#5884) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 83444b541..5ada1033d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,7 +9,7 @@ on: pull_request: env: - SIGN_PIPE_VER: "v0.1.1" + SIGN_PIPE_VER: "v0.1.2" GORELEASER_VER: "v2.14.3" PRODUCT_NAME: "NetBird" COPYRIGHT: "NetBird GmbH" From 0d86de47df5a8ced0f9b1a9e3c288a7abc7fa9aa Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 15 Apr 2026 18:43:16 +0900 Subject: [PATCH 313/374] [client] Add PCP support (#5219) --- client/internal/portforward/env.go | 15 +- client/internal/portforward/manager.go | 82 +++- client/internal/portforward/pcp/client.go | 408 ++++++++++++++++++ .../internal/portforward/pcp/client_test.go | 187 ++++++++ client/internal/portforward/pcp/nat.go | 209 +++++++++ client/internal/portforward/pcp/protocol.go | 225 ++++++++++ client/internal/portforward/state.go | 63 +++ 7 files changed, 1176 insertions(+), 13 deletions(-) create mode 100644 client/internal/portforward/pcp/client.go create mode 100644 client/internal/portforward/pcp/client_test.go create mode 100644 client/internal/portforward/pcp/nat.go create mode 100644 client/internal/portforward/pcp/protocol.go create mode 100644 client/internal/portforward/state.go diff --git a/client/internal/portforward/env.go b/client/internal/portforward/env.go index 444a6b478..ba83c79bf 100644 --- a/client/internal/portforward/env.go +++ b/client/internal/portforward/env.go @@ -8,18 +8,27 @@ import ( ) const ( - envDisableNATMapper = "NB_DISABLE_NAT_MAPPER" + envDisableNATMapper = "NB_DISABLE_NAT_MAPPER" + envDisablePCPHealthCheck = "NB_DISABLE_PCP_HEALTH_CHECK" ) func isDisabledByEnv() bool { - val := os.Getenv(envDisableNATMapper) + return parseBoolEnv(envDisableNATMapper) +} + +func isHealthCheckDisabled() bool { + return parseBoolEnv(envDisablePCPHealthCheck) +} + +func parseBoolEnv(key string) bool { + val := os.Getenv(key) if val == "" { return false } disabled, err := strconv.ParseBool(val) if err != nil { - log.Warnf("failed to parse %s: %v", envDisableNATMapper, err) + log.Warnf("failed to parse %s: %v", key, err) return false } return disabled diff --git a/client/internal/portforward/manager.go b/client/internal/portforward/manager.go index bf7533af9..b0680160c 100644 --- a/client/internal/portforward/manager.go +++ b/client/internal/portforward/manager.go @@ -12,12 +12,15 @@ import ( "github.com/libp2p/go-nat" log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/portforward/pcp" ) const ( - defaultMappingTTL = 2 * time.Hour - discoveryTimeout = 10 * time.Second - mappingDescription = "NetBird" + defaultMappingTTL = 2 * time.Hour + healthCheckInterval = 1 * time.Minute + discoveryTimeout = 10 * time.Second + mappingDescription = "NetBird" ) // upnpErrPermanentLeaseOnly matches UPnP error 725 in SOAP fault XML, @@ -154,7 +157,7 @@ func (m *Manager) setup(ctx context.Context) (nat.NAT, *Mapping, error) { discoverCtx, discoverCancel := context.WithTimeout(ctx, discoveryTimeout) defer discoverCancel() - gateway, err := nat.DiscoverGateway(discoverCtx) + gateway, err := discoverGateway(discoverCtx) if err != nil { return nil, nil, fmt.Errorf("discover gateway: %w", err) } @@ -189,7 +192,6 @@ func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, externalIP, err := gateway.GetExternalAddress() if err != nil { log.Debugf("failed to get external address: %v", err) - // todo return with err? } mapping := &Mapping{ @@ -208,27 +210,87 @@ func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, func (m *Manager) renewLoop(ctx context.Context, gateway nat.NAT, ttl time.Duration) { if ttl == 0 { - // Permanent mappings don't expire, just wait for cancellation. - <-ctx.Done() + // Permanent mappings don't expire, just wait for cancellation + // but still run health checks for PCP gateways. + m.permanentLeaseLoop(ctx, gateway) return } - ticker := time.NewTicker(ttl / 2) - defer ticker.Stop() + renewTicker := time.NewTicker(ttl / 2) + healthTicker := time.NewTicker(healthCheckInterval) + defer renewTicker.Stop() + defer healthTicker.Stop() for { select { case <-ctx.Done(): return - case <-ticker.C: + case <-renewTicker.C: if err := m.renewMapping(ctx, gateway); err != nil { log.Warnf("failed to renew port mapping: %v", err) continue } + case <-healthTicker.C: + if m.checkHealthAndRecreate(ctx, gateway) { + renewTicker.Reset(ttl / 2) + } } } } +func (m *Manager) permanentLeaseLoop(ctx context.Context, gateway nat.NAT) { + healthTicker := time.NewTicker(healthCheckInterval) + defer healthTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-healthTicker.C: + m.checkHealthAndRecreate(ctx, gateway) + } + } +} + +func (m *Manager) checkHealthAndRecreate(ctx context.Context, gateway nat.NAT) bool { + if isHealthCheckDisabled() { + return false + } + + m.mappingLock.Lock() + hasMapping := m.mapping != nil + m.mappingLock.Unlock() + + if !hasMapping { + return false + } + + pcpNAT, ok := gateway.(*pcp.NAT) + if !ok { + return false + } + + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + epoch, serverRestarted, err := pcpNAT.CheckServerHealth(ctx) + if err != nil { + log.Debugf("PCP health check failed: %v", err) + return false + } + + if serverRestarted { + log.Warnf("PCP server restart detected (epoch=%d), recreating port mapping", epoch) + if err := m.renewMapping(ctx, gateway); err != nil { + log.Errorf("failed to recreate port mapping after server restart: %v", err) + return false + } + return true + } + + return false +} + func (m *Manager) renewMapping(ctx context.Context, gateway nat.NAT) error { ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() diff --git a/client/internal/portforward/pcp/client.go b/client/internal/portforward/pcp/client.go new file mode 100644 index 000000000..f6d243ef9 --- /dev/null +++ b/client/internal/portforward/pcp/client.go @@ -0,0 +1,408 @@ +package pcp + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + "net" + "net/netip" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +const ( + defaultTimeout = 3 * time.Second + responseBufferSize = 128 + + // RFC 6887 Section 8.1.1 retry timing + initialRetryDelay = 3 * time.Second + maxRetryDelay = 1024 * time.Second + maxRetries = 4 // 3s + 6s + 12s + 24s = 45s total worst case +) + +// Client is a PCP protocol client. +// All methods are safe for concurrent use. +type Client struct { + gateway netip.Addr + timeout time.Duration + + mu sync.Mutex + // localIP caches the resolved local IP address. + localIP netip.Addr + // lastEpoch is the last observed server epoch value. + lastEpoch uint32 + // epochTime tracks when lastEpoch was received for state loss detection. + epochTime time.Time + // externalIP caches the external IP from the last successful MAP response. + externalIP netip.Addr + // epochStateLost is set when epoch indicates server restart. + epochStateLost bool +} + +// NewClient creates a new PCP client for the gateway at the given IP. +func NewClient(gateway net.IP) *Client { + addr, ok := netip.AddrFromSlice(gateway) + if !ok { + log.Debugf("invalid gateway IP: %v", gateway) + } + return &Client{ + gateway: addr.Unmap(), + timeout: defaultTimeout, + } +} + +// NewClientWithTimeout creates a new PCP client with a custom timeout. +func NewClientWithTimeout(gateway net.IP, timeout time.Duration) *Client { + addr, ok := netip.AddrFromSlice(gateway) + if !ok { + log.Debugf("invalid gateway IP: %v", gateway) + } + return &Client{ + gateway: addr.Unmap(), + timeout: timeout, + } +} + +// SetLocalIP sets the local IP address to use in PCP requests. +func (c *Client) SetLocalIP(ip net.IP) { + addr, ok := netip.AddrFromSlice(ip) + if !ok { + log.Debugf("invalid local IP: %v", ip) + } + c.mu.Lock() + c.localIP = addr.Unmap() + c.mu.Unlock() +} + +// Gateway returns the gateway IP address. +func (c *Client) Gateway() net.IP { + return c.gateway.AsSlice() +} + +// Announce sends a PCP ANNOUNCE request to discover PCP support. +// Returns the server's epoch time on success. +func (c *Client) Announce(ctx context.Context) (epoch uint32, err error) { + localIP, err := c.getLocalIP() + if err != nil { + return 0, fmt.Errorf("get local IP: %w", err) + } + + req := buildAnnounceRequest(localIP) + resp, err := c.sendRequest(ctx, req) + if err != nil { + return 0, fmt.Errorf("send announce: %w", err) + } + + parsed, err := parseResponse(resp) + if err != nil { + return 0, fmt.Errorf("parse announce response: %w", err) + } + + if parsed.ResultCode != ResultSuccess { + return 0, fmt.Errorf("PCP ANNOUNCE failed: %s", ResultCodeString(parsed.ResultCode)) + } + + c.mu.Lock() + if c.updateEpochLocked(parsed.Epoch) { + log.Warnf("PCP server epoch indicates state loss - mappings may need refresh") + } + c.mu.Unlock() + return parsed.Epoch, nil +} + +// AddPortMapping requests a port mapping from the PCP server. +func (c *Client) AddPortMapping(ctx context.Context, protocol string, internalPort int, lifetime time.Duration) (*MapResponse, error) { + return c.addPortMappingWithHint(ctx, protocol, internalPort, internalPort, netip.Addr{}, lifetime) +} + +// AddPortMappingWithHint requests a port mapping with suggested external port and IP. +// Use lifetime <= 0 to delete a mapping. +func (c *Client) AddPortMappingWithHint(ctx context.Context, protocol string, internalPort, suggestedExtPort int, suggestedExtIP net.IP, lifetime time.Duration) (*MapResponse, error) { + var extIP netip.Addr + if suggestedExtIP != nil { + var ok bool + extIP, ok = netip.AddrFromSlice(suggestedExtIP) + if !ok { + log.Debugf("invalid suggested external IP: %v", suggestedExtIP) + } + extIP = extIP.Unmap() + } + return c.addPortMappingWithHint(ctx, protocol, internalPort, suggestedExtPort, extIP, lifetime) +} + +func (c *Client) addPortMappingWithHint(ctx context.Context, protocol string, internalPort, suggestedExtPort int, suggestedExtIP netip.Addr, lifetime time.Duration) (*MapResponse, error) { + localIP, err := c.getLocalIP() + if err != nil { + return nil, fmt.Errorf("get local IP: %w", err) + } + + proto, err := protocolNumber(protocol) + if err != nil { + return nil, fmt.Errorf("parse protocol: %w", err) + } + + var nonce [12]byte + if _, err := rand.Read(nonce[:]); err != nil { + return nil, fmt.Errorf("generate nonce: %w", err) + } + + // Convert lifetime to seconds. Lifetime 0 means delete, so only apply + // default for positive durations that round to 0 seconds. + var lifetimeSec uint32 + if lifetime > 0 { + lifetimeSec = uint32(lifetime.Seconds()) + if lifetimeSec == 0 { + lifetimeSec = DefaultLifetime + } + } + + req := buildMapRequest(localIP, nonce, proto, uint16(internalPort), uint16(suggestedExtPort), suggestedExtIP, lifetimeSec) + + resp, err := c.sendRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("send map request: %w", err) + } + + mapResp, err := parseMapResponse(resp) + if err != nil { + return nil, fmt.Errorf("parse map response: %w", err) + } + + if mapResp.Nonce != nonce { + return nil, fmt.Errorf("nonce mismatch in response") + } + + if mapResp.Protocol != proto { + return nil, fmt.Errorf("protocol mismatch: requested %d, got %d", proto, mapResp.Protocol) + } + if mapResp.InternalPort != uint16(internalPort) { + return nil, fmt.Errorf("internal port mismatch: requested %d, got %d", internalPort, mapResp.InternalPort) + } + + if mapResp.ResultCode != ResultSuccess { + return nil, &Error{ + Code: mapResp.ResultCode, + Message: ResultCodeString(mapResp.ResultCode), + } + } + + c.mu.Lock() + if c.updateEpochLocked(mapResp.Epoch) { + log.Warnf("PCP server epoch indicates state loss - mappings may need refresh") + } + c.cacheExternalIPLocked(mapResp.ExternalIP) + c.mu.Unlock() + return mapResp, nil +} + +// DeletePortMapping removes a port mapping by requesting zero lifetime. +func (c *Client) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error { + if _, err := c.addPortMappingWithHint(ctx, protocol, internalPort, 0, netip.Addr{}, 0); err != nil { + var pcpErr *Error + if errors.As(err, &pcpErr) && pcpErr.Code == ResultNotAuthorized { + return nil + } + return fmt.Errorf("delete mapping: %w", err) + } + return nil +} + +// GetExternalAddress returns the external IP address. +// First checks for a cached value from previous MAP responses. +// If not cached, creates a short-lived mapping to discover the external IP. +func (c *Client) GetExternalAddress(ctx context.Context) (net.IP, error) { + c.mu.Lock() + if c.externalIP.IsValid() { + ip := c.externalIP.AsSlice() + c.mu.Unlock() + return ip, nil + } + c.mu.Unlock() + + // Use an ephemeral port in the dynamic range (49152-65535). + // Port 0 is not valid with UDP/TCP protocols per RFC 6887. + ephemeralPort := 49152 + int(uint16(time.Now().UnixNano()))%(65535-49152) + + // Use minimal lifetime (1 second) for discovery. + resp, err := c.AddPortMapping(ctx, "udp", ephemeralPort, time.Second) + if err != nil { + return nil, fmt.Errorf("create temporary mapping: %w", err) + } + + if err := c.DeletePortMapping(ctx, "udp", ephemeralPort); err != nil { + log.Debugf("cleanup temporary PCP mapping: %v", err) + } + + return resp.ExternalIP.AsSlice(), nil +} + +// LastEpoch returns the last observed server epoch value. +// A decrease in epoch indicates the server may have restarted and mappings may be lost. +func (c *Client) LastEpoch() uint32 { + c.mu.Lock() + defer c.mu.Unlock() + return c.lastEpoch +} + +// EpochStateLost returns true if epoch state loss was detected and clears the flag. +func (c *Client) EpochStateLost() bool { + c.mu.Lock() + defer c.mu.Unlock() + lost := c.epochStateLost + c.epochStateLost = false + return lost +} + +// updateEpoch updates the epoch tracking and detects potential state loss. +// Returns true if state loss was detected (server likely restarted). +// Caller must hold c.mu. +func (c *Client) updateEpochLocked(newEpoch uint32) bool { + now := time.Now() + stateLost := false + + // RFC 6887 Section 8.5: Detect invalid epoch indicating server state loss. + // client_delta = time since last response + // server_delta = epoch change since last response + // Invalid if: client_delta+2 < server_delta - server_delta/16 + // OR: server_delta+2 < client_delta - client_delta/16 + // The +2 handles quantization, /16 (6.25%) handles clock drift. + if !c.epochTime.IsZero() && c.lastEpoch > 0 { + clientDelta := uint32(now.Sub(c.epochTime).Seconds()) + serverDelta := newEpoch - c.lastEpoch + + // Check for epoch going backwards or jumping unexpectedly. + // Subtraction is safe: serverDelta/16 is always <= serverDelta. + if clientDelta+2 < serverDelta-(serverDelta/16) || + serverDelta+2 < clientDelta-(clientDelta/16) { + stateLost = true + c.epochStateLost = true + } + } + + c.lastEpoch = newEpoch + c.epochTime = now + return stateLost +} + +// cacheExternalIP stores the external IP from a successful MAP response. +// Caller must hold c.mu. +func (c *Client) cacheExternalIPLocked(ip netip.Addr) { + if ip.IsValid() && !ip.IsUnspecified() { + c.externalIP = ip + } +} + +// sendRequest sends a PCP request with retries per RFC 6887 Section 8.1.1. +func (c *Client) sendRequest(ctx context.Context, req []byte) ([]byte, error) { + addr := &net.UDPAddr{IP: c.gateway.AsSlice(), Port: Port} + + var lastErr error + delay := initialRetryDelay + + for range maxRetries { + resp, err := c.sendOnce(ctx, addr, req) + if err == nil { + return resp, nil + } + lastErr = err + + if ctx.Err() != nil { + return nil, ctx.Err() + } + + // RFC 6887 Section 8.1.1: RT = (1 + RAND) * MIN(2 * RTprev, MRT) + // RAND is random between -0.1 and +0.1 + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(retryDelayWithJitter(delay)): + } + delay = min(delay*2, maxRetryDelay) + } + + return nil, fmt.Errorf("PCP request failed after %d retries: %w", maxRetries, lastErr) +} + +// retryDelayWithJitter applies RFC 6887 jitter: multiply by (1 + RAND) where RAND is [-0.1, +0.1]. +func retryDelayWithJitter(d time.Duration) time.Duration { + var b [1]byte + _, _ = rand.Read(b[:]) + // Convert byte to range [-0.1, +0.1]: (b/255 * 0.2) - 0.1 + jitter := (float64(b[0])/255.0)*0.2 - 0.1 + return time.Duration(float64(d) * (1 + jitter)) +} + +func (c *Client) sendOnce(ctx context.Context, addr *net.UDPAddr, req []byte) ([]byte, error) { + // Use ListenUDP instead of DialUDP to validate response source address per RFC 6887 §8.3. + conn, err := net.ListenUDP("udp", nil) + if err != nil { + return nil, fmt.Errorf("listen: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.Debugf("close UDP connection: %v", err) + } + }() + + timeout := c.timeout + if deadline, ok := ctx.Deadline(); ok { + if remaining := time.Until(deadline); remaining < timeout { + timeout = remaining + } + } + + if err := conn.SetDeadline(time.Now().Add(timeout)); err != nil { + return nil, fmt.Errorf("set deadline: %w", err) + } + + if _, err := conn.WriteToUDP(req, addr); err != nil { + return nil, fmt.Errorf("write: %w", err) + } + + resp := make([]byte, responseBufferSize) + n, from, err := conn.ReadFromUDP(resp) + if err != nil { + return nil, fmt.Errorf("read: %w", err) + } + + // RFC 6887 §8.3: Validate response came from expected PCP server. + if !from.IP.Equal(addr.IP) { + return nil, fmt.Errorf("response from unexpected source %s (expected %s)", from.IP, addr.IP) + } + + return resp[:n], nil +} + +func (c *Client) getLocalIP() (netip.Addr, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.localIP.IsValid() { + return netip.Addr{}, fmt.Errorf("local IP not set for gateway %s", c.gateway) + } + return c.localIP, nil +} + +func protocolNumber(protocol string) (uint8, error) { + switch protocol { + case "udp", "UDP": + return ProtoUDP, nil + case "tcp", "TCP": + return ProtoTCP, nil + default: + return 0, fmt.Errorf("unsupported protocol: %s", protocol) + } +} + +// Error represents a PCP error response. +type Error struct { + Code uint8 + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("PCP error: %s (%d)", e.Message, e.Code) +} diff --git a/client/internal/portforward/pcp/client_test.go b/client/internal/portforward/pcp/client_test.go new file mode 100644 index 000000000..79f44a426 --- /dev/null +++ b/client/internal/portforward/pcp/client_test.go @@ -0,0 +1,187 @@ +package pcp + +import ( + "context" + "net" + "net/netip" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAddrConversion(t *testing.T) { + tests := []struct { + name string + addr netip.Addr + }{ + {"IPv4", netip.MustParseAddr("192.168.1.100")}, + {"IPv4 loopback", netip.MustParseAddr("127.0.0.1")}, + {"IPv6", netip.MustParseAddr("2001:db8::1")}, + {"IPv6 loopback", netip.MustParseAddr("::1")}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b16 := addrTo16(tt.addr) + + recovered := addrFrom16(b16) + assert.Equal(t, tt.addr, recovered, "address should round-trip") + }) + } +} + +func TestBuildAnnounceRequest(t *testing.T) { + clientIP := netip.MustParseAddr("192.168.1.100") + req := buildAnnounceRequest(clientIP) + + require.Len(t, req, headerSize) + assert.Equal(t, byte(Version), req[0], "version") + assert.Equal(t, byte(OpAnnounce), req[1], "opcode") + + // Check client IP is properly encoded as IPv4-mapped IPv6 + assert.Equal(t, byte(0xff), req[18], "IPv4-mapped prefix byte 10") + assert.Equal(t, byte(0xff), req[19], "IPv4-mapped prefix byte 11") + assert.Equal(t, byte(192), req[20], "IP octet 1") + assert.Equal(t, byte(168), req[21], "IP octet 2") + assert.Equal(t, byte(1), req[22], "IP octet 3") + assert.Equal(t, byte(100), req[23], "IP octet 4") +} + +func TestBuildMapRequest(t *testing.T) { + clientIP := netip.MustParseAddr("192.168.1.100") + nonce := [12]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} + req := buildMapRequest(clientIP, nonce, ProtoUDP, 51820, 51820, netip.Addr{}, 3600) + + require.Len(t, req, mapRequestSize) + assert.Equal(t, byte(Version), req[0], "version") + assert.Equal(t, byte(OpMap), req[1], "opcode") + + // Lifetime at bytes 4-7 + assert.Equal(t, uint32(3600), (uint32(req[4])<<24)|(uint32(req[5])<<16)|(uint32(req[6])<<8)|uint32(req[7]), "lifetime") + + // Nonce at bytes 24-35 + assert.Equal(t, nonce[:], req[24:36], "nonce") + + // Protocol at byte 36 + assert.Equal(t, byte(ProtoUDP), req[36], "protocol") + + // Internal port at bytes 40-41 + assert.Equal(t, uint16(51820), (uint16(req[40])<<8)|uint16(req[41]), "internal port") + + // External port at bytes 42-43 + assert.Equal(t, uint16(51820), (uint16(req[42])<<8)|uint16(req[43]), "external port") +} + +func TestParseResponse(t *testing.T) { + // Construct a valid ANNOUNCE response + resp := make([]byte, headerSize) + resp[0] = Version + resp[1] = OpAnnounce | OpReply + // Result code = 0 (success) + // Lifetime = 0 + // Epoch = 12345 + resp[8] = 0 + resp[9] = 0 + resp[10] = 0x30 + resp[11] = 0x39 + + parsed, err := parseResponse(resp) + require.NoError(t, err) + assert.Equal(t, uint8(Version), parsed.Version) + assert.Equal(t, uint8(OpAnnounce|OpReply), parsed.Opcode) + assert.Equal(t, uint8(ResultSuccess), parsed.ResultCode) + assert.Equal(t, uint32(12345), parsed.Epoch) +} + +func TestParseResponseErrors(t *testing.T) { + t.Run("too short", func(t *testing.T) { + _, err := parseResponse([]byte{1, 2, 3}) + assert.Error(t, err) + }) + + t.Run("wrong version", func(t *testing.T) { + resp := make([]byte, headerSize) + resp[0] = 1 // Wrong version + resp[1] = OpReply + _, err := parseResponse(resp) + assert.Error(t, err) + }) + + t.Run("missing reply bit", func(t *testing.T) { + resp := make([]byte, headerSize) + resp[0] = Version + resp[1] = OpAnnounce // Missing OpReply bit + _, err := parseResponse(resp) + assert.Error(t, err) + }) +} + +func TestResultCodeString(t *testing.T) { + assert.Equal(t, "SUCCESS", ResultCodeString(ResultSuccess)) + assert.Equal(t, "NOT_AUTHORIZED", ResultCodeString(ResultNotAuthorized)) + assert.Equal(t, "ADDRESS_MISMATCH", ResultCodeString(ResultAddressMismatch)) + assert.Contains(t, ResultCodeString(255), "UNKNOWN") +} + +func TestProtocolNumber(t *testing.T) { + proto, err := protocolNumber("udp") + require.NoError(t, err) + assert.Equal(t, uint8(ProtoUDP), proto) + + proto, err = protocolNumber("tcp") + require.NoError(t, err) + assert.Equal(t, uint8(ProtoTCP), proto) + + proto, err = protocolNumber("UDP") + require.NoError(t, err) + assert.Equal(t, uint8(ProtoUDP), proto) + + _, err = protocolNumber("icmp") + assert.Error(t, err) +} + +func TestClientCreation(t *testing.T) { + gateway := netip.MustParseAddr("192.168.1.1").AsSlice() + + client := NewClient(gateway) + assert.Equal(t, net.IP(gateway), client.Gateway()) + assert.Equal(t, defaultTimeout, client.timeout) + + clientWithTimeout := NewClientWithTimeout(gateway, 5*time.Second) + assert.Equal(t, 5*time.Second, clientWithTimeout.timeout) +} + +func TestNATType(t *testing.T) { + n := NewNAT(netip.MustParseAddr("192.168.1.1").AsSlice(), netip.MustParseAddr("192.168.1.100").AsSlice()) + assert.Equal(t, "PCP", n.Type()) +} + +// Integration test - skipped unless PCP_TEST_GATEWAY env is set +func TestClientIntegration(t *testing.T) { + t.Skip("Integration test - run manually with PCP_TEST_GATEWAY=") + + gateway := netip.MustParseAddr("10.0.1.1").AsSlice() // Change to your test gateway + localIP := netip.MustParseAddr("10.0.1.100").AsSlice() // Change to your local IP + + client := NewClient(gateway) + client.SetLocalIP(localIP) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Test ANNOUNCE + epoch, err := client.Announce(ctx) + require.NoError(t, err) + t.Logf("Server epoch: %d", epoch) + + // Test MAP + resp, err := client.AddPortMapping(ctx, "udp", 51820, 1*time.Hour) + require.NoError(t, err) + t.Logf("Mapping: internal=%d external=%d externalIP=%s", + resp.InternalPort, resp.ExternalPort, resp.ExternalIP) + + // Cleanup + err = client.DeletePortMapping(ctx, "udp", 51820) + require.NoError(t, err) +} diff --git a/client/internal/portforward/pcp/nat.go b/client/internal/portforward/pcp/nat.go new file mode 100644 index 000000000..1dc24274b --- /dev/null +++ b/client/internal/portforward/pcp/nat.go @@ -0,0 +1,209 @@ +package pcp + +import ( + "context" + "fmt" + "net" + "net/netip" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/libp2p/go-nat" + "github.com/libp2p/go-netroute" +) + +var _ nat.NAT = (*NAT)(nil) + +// NAT implements the go-nat NAT interface using PCP. +// Supports dual-stack (IPv4 and IPv6) when available. +// All methods are safe for concurrent use. +// +// TODO: IPv6 pinholes use the local IPv6 address. If the address changes +// (e.g., due to SLAAC rotation or network change), the pinhole becomes stale +// and needs to be recreated with the new address. +type NAT struct { + client *Client + + mu sync.RWMutex + // client6 is the IPv6 PCP client, nil if IPv6 is unavailable. + client6 *Client + // localIP6 caches the local IPv6 address used for PCP requests. + localIP6 netip.Addr +} + +// NewNAT creates a new NAT instance backed by PCP. +func NewNAT(gateway, localIP net.IP) *NAT { + client := NewClient(gateway) + client.SetLocalIP(localIP) + return &NAT{ + client: client, + } +} + +// Type returns "PCP" as the NAT type. +func (n *NAT) Type() string { + return "PCP" +} + +// GetDeviceAddress returns the gateway IP address. +func (n *NAT) GetDeviceAddress() (net.IP, error) { + return n.client.Gateway(), nil +} + +// GetExternalAddress returns the external IP address. +func (n *NAT) GetExternalAddress() (net.IP, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + return n.client.GetExternalAddress(ctx) +} + +// GetInternalAddress returns the local IP address used to communicate with the gateway. +func (n *NAT) GetInternalAddress() (net.IP, error) { + addr, err := n.client.getLocalIP() + if err != nil { + return nil, err + } + return addr.AsSlice(), nil +} + +// AddPortMapping creates a port mapping on both IPv4 and IPv6 (if available). +func (n *NAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, _ string, timeout time.Duration) (int, error) { + resp, err := n.client.AddPortMapping(ctx, protocol, internalPort, timeout) + if err != nil { + return 0, fmt.Errorf("add mapping: %w", err) + } + + n.mu.RLock() + client6 := n.client6 + localIP6 := n.localIP6 + n.mu.RUnlock() + + if client6 == nil { + return int(resp.ExternalPort), nil + } + + if _, err := client6.AddPortMapping(ctx, protocol, internalPort, timeout); err != nil { + log.Warnf("IPv6 PCP mapping failed (continuing with IPv4): %v", err) + return int(resp.ExternalPort), nil + } + + log.Infof("created IPv6 PCP pinhole: %s:%d", localIP6, internalPort) + return int(resp.ExternalPort), nil +} + +// DeletePortMapping removes a port mapping from both IPv4 and IPv6. +func (n *NAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error { + err := n.client.DeletePortMapping(ctx, protocol, internalPort) + + n.mu.RLock() + client6 := n.client6 + n.mu.RUnlock() + + if client6 != nil { + if err6 := client6.DeletePortMapping(ctx, protocol, internalPort); err6 != nil { + log.Warnf("IPv6 PCP delete mapping failed: %v", err6) + } + } + + if err != nil { + return fmt.Errorf("delete mapping: %w", err) + } + return nil +} + +// CheckServerHealth sends an ANNOUNCE to verify the server is still responsive. +// Returns the current epoch and whether the server may have restarted (epoch state loss detected). +func (n *NAT) CheckServerHealth(ctx context.Context) (epoch uint32, serverRestarted bool, err error) { + epoch, err = n.client.Announce(ctx) + if err != nil { + return 0, false, fmt.Errorf("announce: %w", err) + } + return epoch, n.client.EpochStateLost(), nil +} + +// DiscoverPCP attempts to discover a PCP-capable gateway. +// Returns a NAT interface if PCP is supported, or an error otherwise. +// Discovers both IPv4 and IPv6 gateways when available. +func DiscoverPCP(ctx context.Context) (nat.NAT, error) { + gateway, localIP, err := getDefaultGateway() + if err != nil { + return nil, fmt.Errorf("get default gateway: %w", err) + } + + client := NewClient(gateway) + client.SetLocalIP(localIP) + if _, err := client.Announce(ctx); err != nil { + return nil, fmt.Errorf("PCP announce: %w", err) + } + + result := &NAT{client: client} + discoverIPv6(ctx, result) + + return result, nil +} + +func discoverIPv6(ctx context.Context, result *NAT) { + gateway6, localIP6, err := getDefaultGateway6() + if err != nil { + log.Debugf("IPv6 gateway discovery failed: %v", err) + return + } + + client6 := NewClient(gateway6) + client6.SetLocalIP(localIP6) + if _, err := client6.Announce(ctx); err != nil { + log.Debugf("PCP IPv6 announce failed: %v", err) + return + } + + addr, ok := netip.AddrFromSlice(localIP6) + if !ok { + log.Debugf("invalid IPv6 local IP: %v", localIP6) + return + } + result.mu.Lock() + result.client6 = client6 + result.localIP6 = addr + result.mu.Unlock() + log.Debugf("PCP IPv6 gateway discovered: %s (local: %s)", gateway6, localIP6) +} + +// getDefaultGateway returns the default IPv4 gateway and local IP using the system routing table. +func getDefaultGateway() (gateway net.IP, localIP net.IP, err error) { + router, err := netroute.New() + if err != nil { + return nil, nil, err + } + + _, gateway, localIP, err = router.Route(net.IPv4zero) + if err != nil { + return nil, nil, err + } + + if gateway == nil { + return nil, nil, nat.ErrNoNATFound + } + + return gateway, localIP, nil +} + +// getDefaultGateway6 returns the default IPv6 gateway IP address using the system routing table. +func getDefaultGateway6() (gateway net.IP, localIP net.IP, err error) { + router, err := netroute.New() + if err != nil { + return nil, nil, err + } + + _, gateway, localIP, err = router.Route(net.IPv6zero) + if err != nil { + return nil, nil, err + } + + if gateway == nil { + return nil, nil, nat.ErrNoNATFound + } + + return gateway, localIP, nil +} diff --git a/client/internal/portforward/pcp/protocol.go b/client/internal/portforward/pcp/protocol.go new file mode 100644 index 000000000..d81c50c8c --- /dev/null +++ b/client/internal/portforward/pcp/protocol.go @@ -0,0 +1,225 @@ +// Package pcp implements the Port Control Protocol (RFC 6887). +// +// # Implemented Features +// +// - ANNOUNCE opcode: Discovers PCP server support +// - MAP opcode: Creates/deletes port mappings (IPv4 NAT) and firewall pinholes (IPv6) +// - Dual-stack: Simultaneous IPv4 and IPv6 support via separate clients +// - Nonce validation: Prevents response spoofing +// - Epoch tracking: Detects server restarts per Section 8.5 +// - RFC-compliant retry timing: 3s initial, exponential backoff to 1024s max (Section 8.1.1) +// +// # Not Implemented +// +// - PEER opcode: For outbound peer connections (not needed for inbound NAT traversal) +// - THIRD_PARTY option: For managing mappings on behalf of other devices +// - PREFER_FAILURE option: Requires exact external port or fail (IPv4 NAT only, not needed for IPv6 pinholing) +// - FILTER option: To restrict remote peer addresses +// +// These optional features are omitted because the primary use case is simple +// port forwarding for WireGuard, which only requires MAP with default behavior. +package pcp + +import ( + "encoding/binary" + "fmt" + "net/netip" +) + +const ( + // Version is the PCP protocol version (RFC 6887). + Version = 2 + + // Port is the standard PCP server port. + Port = 5351 + + // DefaultLifetime is the default requested mapping lifetime in seconds. + DefaultLifetime = 7200 // 2 hours + + // Header sizes + headerSize = 24 + mapPayloadSize = 36 + mapRequestSize = headerSize + mapPayloadSize // 60 bytes +) + +// Opcodes +const ( + OpAnnounce = 0 + OpMap = 1 + OpPeer = 2 + OpReply = 0x80 // OR'd with opcode in responses +) + +// Protocol numbers for MAP requests +const ( + ProtoUDP = 17 + ProtoTCP = 6 +) + +// Result codes (RFC 6887 Section 7.4) +const ( + ResultSuccess = 0 + ResultUnsuppVersion = 1 + ResultNotAuthorized = 2 + ResultMalformedRequest = 3 + ResultUnsuppOpcode = 4 + ResultUnsuppOption = 5 + ResultMalformedOption = 6 + ResultNetworkFailure = 7 + ResultNoResources = 8 + ResultUnsuppProtocol = 9 + ResultUserExQuota = 10 + ResultCannotProvideExt = 11 + ResultAddressMismatch = 12 + ResultExcessiveRemotePeers = 13 +) + +// ResultCodeString returns a human-readable string for a result code. +func ResultCodeString(code uint8) string { + switch code { + case ResultSuccess: + return "SUCCESS" + case ResultUnsuppVersion: + return "UNSUPP_VERSION" + case ResultNotAuthorized: + return "NOT_AUTHORIZED" + case ResultMalformedRequest: + return "MALFORMED_REQUEST" + case ResultUnsuppOpcode: + return "UNSUPP_OPCODE" + case ResultUnsuppOption: + return "UNSUPP_OPTION" + case ResultMalformedOption: + return "MALFORMED_OPTION" + case ResultNetworkFailure: + return "NETWORK_FAILURE" + case ResultNoResources: + return "NO_RESOURCES" + case ResultUnsuppProtocol: + return "UNSUPP_PROTOCOL" + case ResultUserExQuota: + return "USER_EX_QUOTA" + case ResultCannotProvideExt: + return "CANNOT_PROVIDE_EXTERNAL" + case ResultAddressMismatch: + return "ADDRESS_MISMATCH" + case ResultExcessiveRemotePeers: + return "EXCESSIVE_REMOTE_PEERS" + default: + return fmt.Sprintf("UNKNOWN(%d)", code) + } +} + +// Response represents a parsed PCP response header. +type Response struct { + Version uint8 + Opcode uint8 + ResultCode uint8 + Lifetime uint32 + Epoch uint32 +} + +// MapResponse contains the full response to a MAP request. +type MapResponse struct { + Response + Nonce [12]byte + Protocol uint8 + InternalPort uint16 + ExternalPort uint16 + ExternalIP netip.Addr +} + +// addrTo16 converts an address to its 16-byte IPv4-mapped IPv6 representation. +func addrTo16(addr netip.Addr) [16]byte { + if addr.Is4() { + return netip.AddrFrom4(addr.As4()).As16() + } + return addr.As16() +} + +// addrFrom16 extracts an address from a 16-byte representation, unmapping IPv4. +func addrFrom16(b [16]byte) netip.Addr { + return netip.AddrFrom16(b).Unmap() +} + +// buildAnnounceRequest creates a PCP ANNOUNCE request packet. +func buildAnnounceRequest(clientIP netip.Addr) []byte { + req := make([]byte, headerSize) + req[0] = Version + req[1] = OpAnnounce + mapped := addrTo16(clientIP) + copy(req[8:24], mapped[:]) + return req +} + +// buildMapRequest creates a PCP MAP request packet. +func buildMapRequest(clientIP netip.Addr, nonce [12]byte, protocol uint8, internalPort, suggestedExtPort uint16, suggestedExtIP netip.Addr, lifetime uint32) []byte { + req := make([]byte, mapRequestSize) + + // Header + req[0] = Version + req[1] = OpMap + binary.BigEndian.PutUint32(req[4:8], lifetime) + mapped := addrTo16(clientIP) + copy(req[8:24], mapped[:]) + + // MAP payload + copy(req[24:36], nonce[:]) + req[36] = protocol + binary.BigEndian.PutUint16(req[40:42], internalPort) + binary.BigEndian.PutUint16(req[42:44], suggestedExtPort) + if suggestedExtIP.IsValid() { + extMapped := addrTo16(suggestedExtIP) + copy(req[44:60], extMapped[:]) + } + + return req +} + +// parseResponse parses the common PCP response header. +func parseResponse(data []byte) (*Response, error) { + if len(data) < headerSize { + return nil, fmt.Errorf("response too short: %d bytes", len(data)) + } + + resp := &Response{ + Version: data[0], + Opcode: data[1], + ResultCode: data[3], // Byte 2 is reserved, byte 3 is result code (RFC 6887 §7.2) + Lifetime: binary.BigEndian.Uint32(data[4:8]), + Epoch: binary.BigEndian.Uint32(data[8:12]), + } + + if resp.Version != Version { + return nil, fmt.Errorf("unsupported PCP version: %d", resp.Version) + } + + if resp.Opcode&OpReply == 0 { + return nil, fmt.Errorf("response missing reply bit: opcode=0x%02x", resp.Opcode) + } + + return resp, nil +} + +// parseMapResponse parses a complete MAP response. +func parseMapResponse(data []byte) (*MapResponse, error) { + if len(data) < mapRequestSize { + return nil, fmt.Errorf("MAP response too short: %d bytes", len(data)) + } + + resp, err := parseResponse(data) + if err != nil { + return nil, fmt.Errorf("parse header: %w", err) + } + + mapResp := &MapResponse{ + Response: *resp, + Protocol: data[36], + InternalPort: binary.BigEndian.Uint16(data[40:42]), + ExternalPort: binary.BigEndian.Uint16(data[42:44]), + ExternalIP: addrFrom16([16]byte(data[44:60])), + } + copy(mapResp.Nonce[:], data[24:36]) + + return mapResp, nil +} diff --git a/client/internal/portforward/state.go b/client/internal/portforward/state.go new file mode 100644 index 000000000..b1315cdc0 --- /dev/null +++ b/client/internal/portforward/state.go @@ -0,0 +1,63 @@ +//go:build !js + +package portforward + +import ( + "context" + "fmt" + + "github.com/libp2p/go-nat" + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/portforward/pcp" +) + +// discoverGateway is the function used for NAT gateway discovery. +// It can be replaced in tests to avoid real network operations. +// Tries PCP first, then falls back to NAT-PMP/UPnP. +var discoverGateway = defaultDiscoverGateway + +func defaultDiscoverGateway(ctx context.Context) (nat.NAT, error) { + pcpGateway, err := pcp.DiscoverPCP(ctx) + if err == nil { + return pcpGateway, nil + } + log.Debugf("PCP discovery failed: %v, trying NAT-PMP/UPnP", err) + + return nat.DiscoverGateway(ctx) +} + +// State is persisted only for crash recovery cleanup +type State struct { + InternalPort uint16 `json:"internal_port,omitempty"` + Protocol string `json:"protocol,omitempty"` +} + +func (s *State) Name() string { + return "port_forward_state" +} + +// Cleanup implements statemanager.CleanableState for crash recovery +func (s *State) Cleanup() error { + if s.InternalPort == 0 { + return nil + } + + log.Infof("cleaning up stale port mapping for port %d", s.InternalPort) + + ctx, cancel := context.WithTimeout(context.Background(), discoveryTimeout) + defer cancel() + + gateway, err := discoverGateway(ctx) + if err != nil { + // Discovery failure is not an error - gateway may not exist + log.Debugf("cleanup: no gateway found: %v", err) + return nil + } + + if err := gateway.DeletePortMapping(ctx, s.Protocol, int(s.InternalPort)); err != nil { + return fmt.Errorf("delete port mapping: %w", err) + } + + return nil +} From 95bc01e48f297d52320c879ce0dcae648deddb42 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 16 Apr 2026 02:22:08 +0900 Subject: [PATCH 314/374] [client] Allow clearing saved service env vars with --service-env "" (#5893) --- client/cmd/service.go | 2 ++ client/cmd/service_params.go | 31 ++++++++++++++++++--------- client/cmd/service_params_test.go | 35 +++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 10 deletions(-) diff --git a/client/cmd/service.go b/client/cmd/service.go index 5ff16eaeb..5871d2b8a 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -48,6 +48,8 @@ func init() { rootCmd.PersistentFlags().StringVarP(&serviceName, "service", "s", defaultServiceName, "Netbird system service name") serviceEnvDesc := `Sets extra environment variables for the service. ` + `You can specify a comma-separated list of KEY=VALUE pairs. ` + + `New keys are merged with previously saved env vars; existing keys are overwritten. ` + + `Use --service-env "" to clear all saved env vars. ` + `E.g. --service-env NB_LOG_LEVEL=debug,CUSTOM_VAR=value` installCmd.Flags().StringSliceVar(&serviceEnvVars, "service-env", nil, serviceEnvDesc) diff --git a/client/cmd/service_params.go b/client/cmd/service_params.go index 81bd2dbb5..47f4d2367 100644 --- a/client/cmd/service_params.go +++ b/client/cmd/service_params.go @@ -82,7 +82,7 @@ func currentServiceParams() *serviceParams { if len(serviceEnvVars) > 0 { parsed, err := parseServiceEnvVars(serviceEnvVars) - if err == nil && len(parsed) > 0 { + if err == nil { params.ServiceEnvVars = parsed } } @@ -146,27 +146,38 @@ func applyServiceParams(cmd *cobra.Command, params *serviceParams) { } // applyServiceEnvParams merges saved service environment variables. -// If --service-env was explicitly set, explicit values win on key conflict -// but saved keys not in the explicit set are carried over. +// If --service-env was explicitly set with values, explicit values win on key +// conflict but saved keys not in the explicit set are carried over. +// If --service-env was explicitly set to empty, all saved env vars are cleared. // If --service-env was not set, saved env vars are used entirely. func applyServiceEnvParams(cmd *cobra.Command, params *serviceParams) { - if len(params.ServiceEnvVars) == 0 { - return - } - if !cmd.Flags().Changed("service-env") { - // No explicit env vars: rebuild serviceEnvVars from saved params. - serviceEnvVars = envMapToSlice(params.ServiceEnvVars) + if len(params.ServiceEnvVars) > 0 { + // No explicit env vars: rebuild serviceEnvVars from saved params. + serviceEnvVars = envMapToSlice(params.ServiceEnvVars) + } return } - // Explicit env vars were provided: merge saved values underneath. + // Flag was explicitly set: parse what the user provided. explicit, err := parseServiceEnvVars(serviceEnvVars) if err != nil { cmd.PrintErrf("Warning: parse explicit service env vars for merge: %v\n", err) return } + // If the user passed an empty value (e.g. --service-env ""), clear all + // saved env vars rather than merging. + if len(explicit) == 0 { + serviceEnvVars = nil + return + } + + if len(params.ServiceEnvVars) == 0 { + return + } + + // Merge saved values underneath explicit ones. merged := make(map[string]string, len(params.ServiceEnvVars)+len(explicit)) maps.Copy(merged, params.ServiceEnvVars) maps.Copy(merged, explicit) // explicit wins on conflict diff --git a/client/cmd/service_params_test.go b/client/cmd/service_params_test.go index 3bc8e4f60..b01f85f3d 100644 --- a/client/cmd/service_params_test.go +++ b/client/cmd/service_params_test.go @@ -327,6 +327,41 @@ func TestApplyServiceEnvParams_NotChanged(t *testing.T) { assert.Equal(t, map[string]string{"FROM_SAVED": "val"}, result) } +func TestApplyServiceEnvParams_ExplicitEmptyClears(t *testing.T) { + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { serviceEnvVars = origServiceEnvVars }) + + // Simulate --service-env "" which produces [""] in the slice. + serviceEnvVars = []string{""} + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + require.NoError(t, cmd.Flags().Set("service-env", "")) + + saved := &serviceParams{ + ServiceEnvVars: map[string]string{"OLD_VAR": "should_be_cleared"}, + } + + applyServiceEnvParams(cmd, saved) + + assert.Nil(t, serviceEnvVars, "explicit empty --service-env should clear all saved env vars") +} + +func TestCurrentServiceParams_EmptyEnvVarsAfterParse(t *testing.T) { + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { serviceEnvVars = origServiceEnvVars }) + + // Simulate --service-env "" which produces [""] in the slice. + serviceEnvVars = []string{""} + + params := currentServiceParams() + + // After parsing, the empty string is skipped, resulting in an empty map. + // The map should still be set (not nil) so it overwrites saved values. + assert.NotNil(t, params.ServiceEnvVars, "empty env vars should produce empty map, not nil") + assert.Empty(t, params.ServiceEnvVars, "no valid env vars should be parsed from empty string") +} + // TestServiceParams_FieldsCoveredInFunctions ensures that all serviceParams fields are // referenced in both currentServiceParams() and applyServiceParams(). If a new field is // added to serviceParams but not wired into these functions, this test fails. From 08f624507da775b066ca34dd30c0304da7bf7bf5 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Thu, 16 Apr 2026 13:12:19 +0300 Subject: [PATCH 315/374] [management] Enforce peer or peer groups requirement for network routers (#5894) --- .../http/handlers/networks/routers_handler.go | 11 +++++++ .../networks_handler_integration_test.go | 33 ++++++++++++------- .../server/networks/routers/types/router.go | 26 +++++++++++---- .../networks/routers/types/router_test.go | 15 +++++++-- 4 files changed, 65 insertions(+), 20 deletions(-) diff --git a/management/server/http/handlers/networks/routers_handler.go b/management/server/http/handlers/networks/routers_handler.go index c311a29fe..ce9efb78d 100644 --- a/management/server/http/handlers/networks/routers_handler.go +++ b/management/server/http/handlers/networks/routers_handler.go @@ -105,6 +105,12 @@ func (h *routersHandler) createRouter(w http.ResponseWriter, r *http.Request) { router.NetworkID = networkID router.AccountID = accountID router.Enabled = true + + if err := router.Validate(); err != nil { + util.WriteErrorResponse(err.Error(), http.StatusBadRequest, w) + return + } + router, err = h.routersManager.CreateRouter(r.Context(), userID, router) if err != nil { util.WriteError(r.Context(), err, w) @@ -157,6 +163,11 @@ func (h *routersHandler) updateRouter(w http.ResponseWriter, r *http.Request) { router.ID = mux.Vars(r)["routerId"] router.AccountID = accountID + if err := router.Validate(); err != nil { + util.WriteErrorResponse(err.Error(), http.StatusBadRequest, w) + return + } + router, err = h.routersManager.UpdateRouter(r.Context(), userID, router) if err != nil { util.WriteError(r.Context(), err, w) diff --git a/management/server/http/testing/integration/networks_handler_integration_test.go b/management/server/http/testing/integration/networks_handler_integration_test.go index 4cb6b268b..54f204a8f 100644 --- a/management/server/http/testing/integration/networks_handler_integration_test.go +++ b/management/server/http/testing/integration/networks_handler_integration_test.go @@ -1170,13 +1170,17 @@ func Test_NetworkRouters_Create(t *testing.T) { Metric: 100, Enabled: true, }, - expectedStatus: http.StatusOK, - verifyResponse: func(t *testing.T, router *api.NetworkRouter) { - t.Helper() - assert.NotEmpty(t, router.Id) - assert.Equal(t, peerID, *router.Peer) - assert.Equal(t, 1, len(*router.PeerGroups)) + expectedStatus: http.StatusBadRequest, + }, + { + name: "Create router without peer and peer_groups", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + Masquerade: true, + Metric: 100, + Enabled: true, }, + expectedStatus: http.StatusBadRequest, }, { name: "Create router in non-existing network", @@ -1341,13 +1345,18 @@ func Test_NetworkRouters_Update(t *testing.T) { Metric: 100, Enabled: true, }, - expectedStatus: http.StatusOK, - verifyResponse: func(t *testing.T, router *api.NetworkRouter) { - t.Helper() - assert.Equal(t, "testRouterId", router.Id) - assert.Equal(t, peerID, *router.Peer) - assert.Equal(t, 1, len(*router.PeerGroups)) + expectedStatus: http.StatusBadRequest, + }, + { + name: "Update router without peer and peer_groups", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + Masquerade: true, + Metric: 100, + Enabled: true, }, + expectedStatus: http.StatusBadRequest, }, } diff --git a/management/server/networks/routers/types/router.go b/management/server/networks/routers/types/router.go index e90c61a97..1293a9934 100644 --- a/management/server/networks/routers/types/router.go +++ b/management/server/networks/routers/types/router.go @@ -21,11 +21,7 @@ type NetworkRouter struct { } func NewNetworkRouter(accountID string, networkID string, peer string, peerGroups []string, masquerade bool, metric int, enabled bool) (*NetworkRouter, error) { - if peer != "" && len(peerGroups) > 0 { - return nil, errors.New("peer and peerGroups cannot be set at the same time") - } - - return &NetworkRouter{ + r := &NetworkRouter{ ID: xid.New().String(), AccountID: accountID, NetworkID: networkID, @@ -34,7 +30,25 @@ func NewNetworkRouter(accountID string, networkID string, peer string, peerGroup Masquerade: masquerade, Metric: metric, Enabled: enabled, - }, nil + } + + if err := r.Validate(); err != nil { + return nil, err + } + + return r, nil +} + +func (n *NetworkRouter) Validate() error { + if n.Peer != "" && len(n.PeerGroups) > 0 { + return errors.New("peer and peer_groups cannot be set at the same time") + } + + if n.Peer == "" && len(n.PeerGroups) == 0 { + return errors.New("either peer or peer_groups must be provided") + } + + return nil } func (n *NetworkRouter) ToAPIResponse() *api.NetworkRouter { diff --git a/management/server/networks/routers/types/router_test.go b/management/server/networks/routers/types/router_test.go index 5801e3bfa..a2f2fe6e3 100644 --- a/management/server/networks/routers/types/router_test.go +++ b/management/server/networks/routers/types/router_test.go @@ -38,7 +38,7 @@ func TestNewNetworkRouter(t *testing.T) { expectedError: false, }, { - name: "Valid with no peer or peerGroups", + name: "Invalid with no peer or peerGroups", networkID: "network-3", accountID: "account-3", peer: "", @@ -46,7 +46,18 @@ func TestNewNetworkRouter(t *testing.T) { masquerade: true, metric: 300, enabled: true, - expectedError: false, + expectedError: true, + }, + { + name: "Invalid with empty peerGroups slice", + networkID: "network-5", + accountID: "account-5", + peer: "", + peerGroups: []string{}, + masquerade: true, + metric: 500, + enabled: true, + expectedError: true, }, // Invalid cases From 6b540d145c09ee26b95dc98f98a113b5f7d3ddf8 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 16 Apr 2026 21:02:31 +0900 Subject: [PATCH 316/374] [client] Add --disable-networks flag to block network selection (#5896) --- client/cmd/root.go | 1 + client/cmd/service.go | 1 + client/cmd/service_controller.go | 2 +- client/cmd/service_installer.go | 4 ++++ client/cmd/service_params.go | 6 ++++++ client/cmd/service_params_test.go | 1 + client/cmd/testutil_test.go | 2 +- client/proto/daemon.pb.go | 13 +++++++++++-- client/proto/daemon.proto | 1 + client/server/network.go | 14 ++++++++++++++ client/server/server.go | 6 +++++- client/server/server_test.go | 6 +++--- client/server/setconfig_test.go | 2 +- client/ui/client_ui.go | 24 +++++++++++++++++++----- 14 files changed, 69 insertions(+), 14 deletions(-) diff --git a/client/cmd/root.go b/client/cmd/root.go index aa5b98dfd..c872fe9f6 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -75,6 +75,7 @@ var ( mtu uint16 profilesDisabled bool updateSettingsDisabled bool + networksDisabled bool rootCmd = &cobra.Command{ Use: "netbird", diff --git a/client/cmd/service.go b/client/cmd/service.go index 5871d2b8a..f1123ce8c 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -44,6 +44,7 @@ func init() { serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd, svcStatusCmd, installCmd, uninstallCmd, reconfigureCmd, resetParamsCmd) serviceCmd.PersistentFlags().BoolVar(&profilesDisabled, "disable-profiles", false, "Disables profiles feature. If enabled, the client will not be able to change or edit any profile. To persist this setting, use: netbird service install --disable-profiles") serviceCmd.PersistentFlags().BoolVar(&updateSettingsDisabled, "disable-update-settings", false, "Disables update settings feature. If enabled, the client will not be able to change or edit any settings. To persist this setting, use: netbird service install --disable-update-settings") + serviceCmd.PersistentFlags().BoolVar(&networksDisabled, "disable-networks", false, "Disables network selection. If enabled, the client will not allow listing, selecting, or deselecting networks. To persist, use: netbird service install --disable-networks") rootCmd.PersistentFlags().StringVarP(&serviceName, "service", "s", defaultServiceName, "Netbird system service name") serviceEnvDesc := `Sets extra environment variables for the service. ` + diff --git a/client/cmd/service_controller.go b/client/cmd/service_controller.go index 5fe318ddf..0943b6184 100644 --- a/client/cmd/service_controller.go +++ b/client/cmd/service_controller.go @@ -61,7 +61,7 @@ func (p *program) Start(svc service.Service) error { } } - serverInstance := server.New(p.ctx, util.FindFirstLogPath(logFiles), configPath, profilesDisabled, updateSettingsDisabled) + serverInstance := server.New(p.ctx, util.FindFirstLogPath(logFiles), configPath, profilesDisabled, updateSettingsDisabled, networksDisabled) if err := serverInstance.Start(); err != nil { log.Fatalf("failed to start daemon: %v", err) } diff --git a/client/cmd/service_installer.go b/client/cmd/service_installer.go index 28770ea16..5ada6f633 100644 --- a/client/cmd/service_installer.go +++ b/client/cmd/service_installer.go @@ -59,6 +59,10 @@ func buildServiceArguments() []string { args = append(args, "--disable-update-settings") } + if networksDisabled { + args = append(args, "--disable-networks") + } + return args } diff --git a/client/cmd/service_params.go b/client/cmd/service_params.go index 47f4d2367..5a86aebc6 100644 --- a/client/cmd/service_params.go +++ b/client/cmd/service_params.go @@ -28,6 +28,7 @@ type serviceParams struct { LogFiles []string `json:"log_files,omitempty"` DisableProfiles bool `json:"disable_profiles,omitempty"` DisableUpdateSettings bool `json:"disable_update_settings,omitempty"` + DisableNetworks bool `json:"disable_networks,omitempty"` ServiceEnvVars map[string]string `json:"service_env_vars,omitempty"` } @@ -78,6 +79,7 @@ func currentServiceParams() *serviceParams { LogFiles: logFiles, DisableProfiles: profilesDisabled, DisableUpdateSettings: updateSettingsDisabled, + DisableNetworks: networksDisabled, } if len(serviceEnvVars) > 0 { @@ -142,6 +144,10 @@ func applyServiceParams(cmd *cobra.Command, params *serviceParams) { updateSettingsDisabled = params.DisableUpdateSettings } + if !serviceCmd.PersistentFlags().Changed("disable-networks") { + networksDisabled = params.DisableNetworks + } + applyServiceEnvParams(cmd, params) } diff --git a/client/cmd/service_params_test.go b/client/cmd/service_params_test.go index b01f85f3d..7e04e5abe 100644 --- a/client/cmd/service_params_test.go +++ b/client/cmd/service_params_test.go @@ -535,6 +535,7 @@ func fieldToGlobalVar(field string) string { "LogFiles": "logFiles", "DisableProfiles": "profilesDisabled", "DisableUpdateSettings": "updateSettingsDisabled", + "DisableNetworks": "networksDisabled", "ServiceEnvVars": "serviceEnvVars", } if v, ok := m[field]; ok { diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index 4bda33e65..5c6926f04 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -152,7 +152,7 @@ func startClientDaemon( s := grpc.NewServer() server := client.New(ctx, - "", "", false, false) + "", "", false, false, false) if err := server.Start(); err != nil { t.Fatal(err) } diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index fa0b2f93b..6506307d3 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -4979,6 +4979,7 @@ type GetFeaturesResponse struct { state protoimpl.MessageState `protogen:"open.v1"` DisableProfiles bool `protobuf:"varint,1,opt,name=disable_profiles,json=disableProfiles,proto3" json:"disable_profiles,omitempty"` DisableUpdateSettings bool `protobuf:"varint,2,opt,name=disable_update_settings,json=disableUpdateSettings,proto3" json:"disable_update_settings,omitempty"` + DisableNetworks bool `protobuf:"varint,3,opt,name=disable_networks,json=disableNetworks,proto3" json:"disable_networks,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -5027,6 +5028,13 @@ func (x *GetFeaturesResponse) GetDisableUpdateSettings() bool { return false } +func (x *GetFeaturesResponse) GetDisableNetworks() bool { + if x != nil { + return x.DisableNetworks + } + return false +} + type TriggerUpdateRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -6472,10 +6480,11 @@ const file_daemon_proto_rawDesc = "" + "\f_profileNameB\v\n" + "\t_username\"\x10\n" + "\x0eLogoutResponse\"\x14\n" + - "\x12GetFeaturesRequest\"x\n" + + "\x12GetFeaturesRequest\"\xa3\x01\n" + "\x13GetFeaturesResponse\x12)\n" + "\x10disable_profiles\x18\x01 \x01(\bR\x0fdisableProfiles\x126\n" + - "\x17disable_update_settings\x18\x02 \x01(\bR\x15disableUpdateSettings\"\x16\n" + + "\x17disable_update_settings\x18\x02 \x01(\bR\x15disableUpdateSettings\x12)\n" + + "\x10disable_networks\x18\x03 \x01(\bR\x0fdisableNetworks\"\x16\n" + "\x14TriggerUpdateRequest\"M\n" + "\x15TriggerUpdateResponse\x12\x18\n" + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" + diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 89302c8c3..19976660c 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -727,6 +727,7 @@ message GetFeaturesRequest{} message GetFeaturesResponse{ bool disable_profiles = 1; bool disable_update_settings = 2; + bool disable_networks = 3; } message TriggerUpdateRequest {} diff --git a/client/server/network.go b/client/server/network.go index bb1cce56c..76c5af40e 100644 --- a/client/server/network.go +++ b/client/server/network.go @@ -9,6 +9,8 @@ import ( "strings" "golang.org/x/exp/maps" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/route" @@ -27,6 +29,10 @@ func (s *Server) ListNetworks(context.Context, *proto.ListNetworksRequest) (*pro s.mutex.Lock() defer s.mutex.Unlock() + if s.networksDisabled { + return nil, gstatus.Errorf(codes.Unavailable, errNetworksDisabled) + } + if s.connectClient == nil { return nil, fmt.Errorf("not connected") } @@ -118,6 +124,10 @@ func (s *Server) SelectNetworks(_ context.Context, req *proto.SelectNetworksRequ s.mutex.Lock() defer s.mutex.Unlock() + if s.networksDisabled { + return nil, gstatus.Errorf(codes.Unavailable, errNetworksDisabled) + } + if s.connectClient == nil { return nil, fmt.Errorf("not connected") } @@ -164,6 +174,10 @@ func (s *Server) DeselectNetworks(_ context.Context, req *proto.SelectNetworksRe s.mutex.Lock() defer s.mutex.Unlock() + if s.networksDisabled { + return nil, gstatus.Errorf(codes.Unavailable, errNetworksDisabled) + } + if s.connectClient == nil { return nil, fmt.Errorf("not connected") } diff --git a/client/server/server.go b/client/server/server.go index e12b6df5b..70e4c342f 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -53,6 +53,7 @@ const ( errRestoreResidualState = "failed to restore residual state: %v" errProfilesDisabled = "profiles are disabled, you cannot use this feature without profiles enabled" errUpdateSettingsDisabled = "update settings are disabled, you cannot use this feature without update settings enabled" + errNetworksDisabled = "network selection is disabled by the administrator" ) var ErrServiceNotUp = errors.New("service is not up") @@ -88,6 +89,7 @@ type Server struct { profileManager *profilemanager.ServiceManager profilesDisabled bool updateSettingsDisabled bool + networksDisabled bool sleepHandler *sleephandler.SleepHandler @@ -104,7 +106,7 @@ type oauthAuthFlow struct { } // New server instance constructor. -func New(ctx context.Context, logFile string, configFile string, profilesDisabled bool, updateSettingsDisabled bool) *Server { +func New(ctx context.Context, logFile string, configFile string, profilesDisabled bool, updateSettingsDisabled bool, networksDisabled bool) *Server { s := &Server{ rootCtx: ctx, logFile: logFile, @@ -113,6 +115,7 @@ func New(ctx context.Context, logFile string, configFile string, profilesDisable profileManager: profilemanager.NewServiceManager(configFile), profilesDisabled: profilesDisabled, updateSettingsDisabled: updateSettingsDisabled, + networksDisabled: networksDisabled, jwtCache: newJWTCache(), } agent := &serverAgent{s} @@ -1628,6 +1631,7 @@ func (s *Server) GetFeatures(ctx context.Context, msg *proto.GetFeaturesRequest) features := &proto.GetFeaturesResponse{ DisableProfiles: s.checkProfilesDisabled(), DisableUpdateSettings: s.checkUpdateSettingsDisabled(), + DisableNetworks: s.networksDisabled, } return features, nil diff --git a/client/server/server_test.go b/client/server/server_test.go index 6de23d501..c5148104f 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -103,7 +103,7 @@ func TestConnectWithRetryRuns(t *testing.T) { t.Fatalf("failed to set active profile state: %v", err) } - s := New(ctx, "debug", "", false, false) + s := New(ctx, "debug", "", false, false, false) s.config = config @@ -164,7 +164,7 @@ func TestServer_Up(t *testing.T) { t.Fatalf("failed to set active profile state: %v", err) } - s := New(ctx, "console", "", false, false) + s := New(ctx, "console", "", false, false, false) err = s.Start() require.NoError(t, err) @@ -234,7 +234,7 @@ func TestServer_SubcribeEvents(t *testing.T) { t.Fatalf("failed to set active profile state: %v", err) } - s := New(ctx, "console", "", false, false) + s := New(ctx, "console", "", false, false, false) err = s.Start() require.NoError(t, err) diff --git a/client/server/setconfig_test.go b/client/server/setconfig_test.go index 8e360175d..7f6847c43 100644 --- a/client/server/setconfig_test.go +++ b/client/server/setconfig_test.go @@ -53,7 +53,7 @@ func TestSetConfig_AllFieldsSaved(t *testing.T) { require.NoError(t, err) ctx := context.Background() - s := New(ctx, "console", "", false, false) + s := New(ctx, "console", "", false, false, false) rosenpassEnabled := true rosenpassPermissive := true diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index b1e0aec41..c149b2152 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -314,6 +314,7 @@ type serviceClient struct { lastNotifiedVersion string settingsEnabled bool profilesEnabled bool + networksEnabled bool showNetworks bool wNetworks fyne.Window wProfiles fyne.Window @@ -368,6 +369,7 @@ func newServiceClient(args *newServiceClientArgs) *serviceClient { showAdvancedSettings: args.showSettings, showNetworks: args.showNetworks, + networksEnabled: true, } s.eventHandler = newEventHandler(s) @@ -920,8 +922,10 @@ func (s *serviceClient) updateStatus() error { s.mStatus.SetIcon(s.icConnectedDot) s.mUp.Disable() s.mDown.Enable() - s.mNetworks.Enable() - s.mExitNode.Enable() + if s.networksEnabled { + s.mNetworks.Enable() + s.mExitNode.Enable() + } s.startExitNodeRefresh() systrayIconState = true case status.Status == string(internal.StatusConnecting): @@ -1093,14 +1097,14 @@ func (s *serviceClient) onTrayReady() { s.getSrvConfig() time.Sleep(100 * time.Millisecond) // To prevent race condition caused by systray not being fully initialized and ignoring setIcon for { + // Check features before status so menus respect disable flags before being enabled + s.checkAndUpdateFeatures() + err := s.updateStatus() if err != nil { log.Errorf("error while updating status: %v", err) } - // Check features periodically to handle daemon restarts - s.checkAndUpdateFeatures() - time.Sleep(2 * time.Second) } }() @@ -1299,6 +1303,16 @@ func (s *serviceClient) checkAndUpdateFeatures() { s.mProfile.setEnabled(profilesEnabled) } } + + // Update networks and exit node menus based on current features + s.networksEnabled = features == nil || !features.DisableNetworks + if s.networksEnabled && s.connected { + s.mNetworks.Enable() + s.mExitNode.Enable() + } else { + s.mNetworks.Disable() + s.mExitNode.Disable() + } } // getFeatures from the daemon to determine which features are enabled/disabled. From d4c61ed38b58da67d0561ff50eccb6042e039014 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 16 Apr 2026 21:02:52 +0900 Subject: [PATCH 317/374] [client] Add mangle FORWARD guard to prevent Docker DNAT bypass of ACL rules (#5697) --- client/firewall/iptables/acl_linux.go | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/client/firewall/iptables/acl_linux.go b/client/firewall/iptables/acl_linux.go index d83798f09..e629f7881 100644 --- a/client/firewall/iptables/acl_linux.go +++ b/client/firewall/iptables/acl_linux.go @@ -21,6 +21,10 @@ const ( // rules chains contains the effective ACL rules chainNameInputRules = "NETBIRD-ACL-INPUT" + + // mangleFwdKey is the entries map key for mangle FORWARD guard rules that prevent + // external DNAT from bypassing ACL rules. + mangleFwdKey = "MANGLE-FORWARD" ) type aclEntries map[string][][]string @@ -274,6 +278,12 @@ func (m *aclManager) cleanChains() error { } } + for _, rule := range m.entries[mangleFwdKey] { + if err := m.iptablesClient.DeleteIfExists(tableMangle, chainFORWARD, rule...); err != nil { + log.Errorf("failed to delete mangle FORWARD guard rule: %v, %s", rule, err) + } + } + for _, ipsetName := range m.ipsetStore.ipsetNames() { if err := m.flushIPSet(ipsetName); err != nil { if errors.Is(err, ipset.ErrSetNotExist) { @@ -303,6 +313,10 @@ func (m *aclManager) createDefaultChains() error { } for chainName, rules := range m.entries { + // mangle FORWARD guard rules are handled separately below + if chainName == mangleFwdKey { + continue + } for _, rule := range rules { if err := m.iptablesClient.InsertUnique(tableName, chainName, 1, rule...); err != nil { log.Debugf("failed to create input chain jump rule: %s", err) @@ -322,6 +336,13 @@ func (m *aclManager) createDefaultChains() error { } clear(m.optionalEntries) + // Insert mangle FORWARD guard rules to prevent external DNAT bypass. + for _, rule := range m.entries[mangleFwdKey] { + if err := m.iptablesClient.AppendUnique(tableMangle, chainFORWARD, rule...); err != nil { + log.Errorf("failed to add mangle FORWARD guard rule: %v", err) + } + } + return nil } @@ -343,6 +364,22 @@ func (m *aclManager) seedInitialEntries() { m.appendToEntries("FORWARD", []string{"-o", m.wgIface.Name(), "-j", chainRTFWDOUT}) m.appendToEntries("FORWARD", []string{"-i", m.wgIface.Name(), "-j", chainRTFWDIN}) + + // Mangle FORWARD guard: when external DNAT redirects traffic from the wg interface, it + // traverses FORWARD instead of INPUT, bypassing ACL rules. ACCEPT rules in filter FORWARD + // can be inserted above ours. Mangle runs before filter, so these guard rules enforce the + // ACL mark check where it cannot be overridden. + m.appendToEntries(mangleFwdKey, []string{ + "-i", m.wgIface.Name(), + "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT", + }) + m.appendToEntries(mangleFwdKey, []string{ + "-i", m.wgIface.Name(), + "-m", "conntrack", "--ctstate", "DNAT", + "-m", "mark", "!", "--mark", fmt.Sprintf("%#x", nbnet.PreroutingFwmarkRedirected), + "-j", "DROP", + }) } func (m *aclManager) seedInitialOptionalEntries() { From 7e4542adde54ebe71a3a47f5228c5915c9c1052a Mon Sep 17 00:00:00 2001 From: Michael Uray <25169478+MichaelUray@users.noreply.github.com> Date: Thu, 16 Apr 2026 14:25:55 +0200 Subject: [PATCH 318/374] fix(client): populate NetworkAddresses on iOS for posture checks (#5900) The iOS GetInfo() function never populated NetworkAddresses, causing the peer_network_range_check posture check to fail for all iOS clients. This adds the same networkAddresses() call that macOS, Linux, Windows, and FreeBSD already use. Fixes: #3968 Fixes: #4657 --- client/system/info_ios.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/client/system/info_ios.go b/client/system/info_ios.go index 322609db4..81936cf1d 100644 --- a/client/system/info_ios.go +++ b/client/system/info_ios.go @@ -4,10 +4,12 @@ import ( "context" "runtime" + log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/version" ) -// UpdateStaticInfoAsync is a no-op on Android as there is no static info to update +// UpdateStaticInfoAsync is a no-op on iOS as there is no static info to update func UpdateStaticInfoAsync() { // do nothing } @@ -15,11 +17,24 @@ func UpdateStaticInfoAsync() { // GetInfo retrieves and parses the system information func GetInfo(ctx context.Context) *Info { - // Convert fixed-size byte arrays to Go strings sysName := extractOsName(ctx, "sysName") swVersion := extractOsVersion(ctx, "swVersion") - gio := &Info{Kernel: sysName, OSVersion: swVersion, Platform: "unknown", OS: sysName, GoOS: runtime.GOOS, CPUs: runtime.NumCPU(), KernelVersion: swVersion} + addrs, err := networkAddresses() + if err != nil { + log.Warnf("failed to discover network addresses: %s", err) + } + + gio := &Info{ + Kernel: sysName, + OSVersion: swVersion, + Platform: "unknown", + OS: sysName, + GoOS: runtime.GOOS, + CPUs: runtime.NumCPU(), + KernelVersion: swVersion, + NetworkAddresses: addrs, + } gio.Hostname = extractDeviceName(ctx, "hostname") gio.NetbirdVersion = version.NetbirdVersion() gio.UIVersion = extractUserAgent(ctx) From 633dde8d1f5985afc52071462c42857cb844b9ac Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Thu, 16 Apr 2026 22:30:36 +0900 Subject: [PATCH 319/374] [client] Reconnect conntrack netlink listener on error (#5885) --- .../internal/netflow/conntrack/conntrack.go | 170 +++++++++++-- .../netflow/conntrack/conntrack_test.go | 224 ++++++++++++++++++ 2 files changed, 373 insertions(+), 21 deletions(-) create mode 100644 client/internal/netflow/conntrack/conntrack_test.go diff --git a/client/internal/netflow/conntrack/conntrack.go b/client/internal/netflow/conntrack/conntrack.go index a4ffa3a25..2420b1fdf 100644 --- a/client/internal/netflow/conntrack/conntrack.go +++ b/client/internal/netflow/conntrack/conntrack.go @@ -7,7 +7,9 @@ import ( "fmt" "net/netip" "sync" + "time" + "github.com/cenkalti/backoff/v4" "github.com/google/uuid" log "github.com/sirupsen/logrus" nfct "github.com/ti-mo/conntrack" @@ -17,31 +19,64 @@ import ( nbnet "github.com/netbirdio/netbird/client/net" ) -const defaultChannelSize = 100 +const ( + defaultChannelSize = 100 + reconnectInitInterval = 5 * time.Second + reconnectMaxInterval = 5 * time.Minute + reconnectRandomization = 0.5 +) + +// listener abstracts a netlink conntrack connection for testability. +type listener interface { + Listen(evChan chan<- nfct.Event, numWorkers uint8, groups []netfilter.NetlinkGroup) (chan error, error) + Close() error +} // ConnTrack manages kernel-based conntrack events type ConnTrack struct { flowLogger nftypes.FlowLogger iface nftypes.IFaceMapper - conn *nfct.Conn + conn listener mux sync.Mutex + dial func() (listener, error) instanceID uuid.UUID started bool done chan struct{} sysctlModified bool } +// DialFunc is a constructor for netlink conntrack connections. +type DialFunc func() (listener, error) + +// Option configures a ConnTrack instance. +type Option func(*ConnTrack) + +// WithDialer overrides the default netlink dialer, primarily for testing. +func WithDialer(dial DialFunc) Option { + return func(c *ConnTrack) { + c.dial = dial + } +} + +func defaultDial() (listener, error) { + return nfct.Dial(nil) +} + // New creates a new connection tracker that interfaces with the kernel's conntrack system -func New(flowLogger nftypes.FlowLogger, iface nftypes.IFaceMapper) *ConnTrack { - return &ConnTrack{ +func New(flowLogger nftypes.FlowLogger, iface nftypes.IFaceMapper, opts ...Option) *ConnTrack { + ct := &ConnTrack{ flowLogger: flowLogger, iface: iface, instanceID: uuid.New(), - started: false, + dial: defaultDial, done: make(chan struct{}, 1), } + for _, opt := range opts { + opt(ct) + } + return ct } // Start begins tracking connections by listening for conntrack events. This method is idempotent. @@ -59,8 +94,9 @@ func (c *ConnTrack) Start(enableCounters bool) error { c.EnableAccounting() } - conn, err := nfct.Dial(nil) + conn, err := c.dial() if err != nil { + c.RestoreAccounting() return fmt.Errorf("dial conntrack: %w", err) } c.conn = conn @@ -76,9 +112,16 @@ func (c *ConnTrack) Start(enableCounters bool) error { log.Errorf("Error closing conntrack connection: %v", err) } c.conn = nil + c.RestoreAccounting() return fmt.Errorf("start conntrack listener: %w", err) } + // Drain any stale stop signal from a previous cycle. + select { + case <-c.done: + default: + } + c.started = true go c.receiverRoutine(events, errChan) @@ -92,17 +135,98 @@ func (c *ConnTrack) receiverRoutine(events chan nfct.Event, errChan chan error) case event := <-events: c.handleEvent(event) case err := <-errChan: - log.Errorf("Error from conntrack event listener: %v", err) - if err := c.conn.Close(); err != nil { - log.Errorf("Error closing conntrack connection: %v", err) + if events, errChan = c.handleListenerError(err); events == nil { + return } - return case <-c.done: return } } } +// handleListenerError closes the failed connection and attempts to reconnect. +// Returns new channels on success, or nil if shutdown was requested. +func (c *ConnTrack) handleListenerError(err error) (chan nfct.Event, chan error) { + log.Warnf("conntrack event listener failed: %v", err) + c.closeConn() + return c.reconnect() +} + +func (c *ConnTrack) closeConn() { + c.mux.Lock() + defer c.mux.Unlock() + + if c.conn != nil { + if err := c.conn.Close(); err != nil { + log.Debugf("close conntrack connection: %v", err) + } + c.conn = nil + } +} + +// reconnect attempts to re-establish the conntrack netlink listener with exponential backoff. +// Returns new channels on success, or nil if shutdown was requested. +func (c *ConnTrack) reconnect() (chan nfct.Event, chan error) { + bo := &backoff.ExponentialBackOff{ + InitialInterval: reconnectInitInterval, + RandomizationFactor: reconnectRandomization, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: reconnectMaxInterval, + MaxElapsedTime: 0, // retry indefinitely + Clock: backoff.SystemClock, + } + bo.Reset() + + for { + delay := bo.NextBackOff() + log.Infof("reconnecting conntrack listener in %s", delay) + + select { + case <-c.done: + c.mux.Lock() + c.started = false + c.mux.Unlock() + return nil, nil + case <-time.After(delay): + } + + conn, err := c.dial() + if err != nil { + log.Warnf("reconnect conntrack dial: %v", err) + continue + } + + events := make(chan nfct.Event, defaultChannelSize) + errChan, err := conn.Listen(events, 1, []netfilter.NetlinkGroup{ + netfilter.GroupCTNew, + netfilter.GroupCTDestroy, + }) + if err != nil { + log.Warnf("reconnect conntrack listen: %v", err) + if closeErr := conn.Close(); closeErr != nil { + log.Debugf("close conntrack connection: %v", closeErr) + } + continue + } + + c.mux.Lock() + if !c.started { + // Stop() ran while we were reconnecting. + c.mux.Unlock() + if closeErr := conn.Close(); closeErr != nil { + log.Debugf("close conntrack connection: %v", closeErr) + } + return nil, nil + } + c.conn = conn + c.mux.Unlock() + + log.Infof("conntrack listener reconnected successfully") + + return events, errChan + } +} + // Stop stops the connection tracking. This method is idempotent. func (c *ConnTrack) Stop() { c.mux.Lock() @@ -136,23 +260,27 @@ func (c *ConnTrack) Close() error { c.mux.Lock() defer c.mux.Unlock() - if c.started { - select { - case c.done <- struct{}{}: - default: - } + if !c.started { + return nil } + select { + case c.done <- struct{}{}: + default: + } + + c.started = false + + var closeErr error if c.conn != nil { - err := c.conn.Close() + closeErr = c.conn.Close() c.conn = nil - c.started = false + } - c.RestoreAccounting() + c.RestoreAccounting() - if err != nil { - return fmt.Errorf("close conntrack: %w", err) - } + if closeErr != nil { + return fmt.Errorf("close conntrack: %w", closeErr) } return nil diff --git a/client/internal/netflow/conntrack/conntrack_test.go b/client/internal/netflow/conntrack/conntrack_test.go new file mode 100644 index 000000000..35ceec90d --- /dev/null +++ b/client/internal/netflow/conntrack/conntrack_test.go @@ -0,0 +1,224 @@ +//go:build linux && !android + +package conntrack + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + nfct "github.com/ti-mo/conntrack" + "github.com/ti-mo/netfilter" +) + +type mockListener struct { + errChan chan error + closed atomic.Bool + closedCh chan struct{} +} + +func newMockListener() *mockListener { + return &mockListener{ + errChan: make(chan error, 1), + closedCh: make(chan struct{}), + } +} + +func (m *mockListener) Listen(evChan chan<- nfct.Event, _ uint8, _ []netfilter.NetlinkGroup) (chan error, error) { + return m.errChan, nil +} + +func (m *mockListener) Close() error { + if m.closed.CompareAndSwap(false, true) { + close(m.closedCh) + } + return nil +} + +func TestReconnectAfterError(t *testing.T) { + first := newMockListener() + second := newMockListener() + third := newMockListener() + listeners := []*mockListener{first, second, third} + callCount := atomic.Int32{} + + ct := New(nil, nil, WithDialer(func() (listener, error) { + n := int(callCount.Add(1)) - 1 + return listeners[n], nil + })) + + err := ct.Start(false) + require.NoError(t, err) + + // Inject an error on the first listener. + first.errChan <- assert.AnError + + // Wait for reconnect to complete. + require.Eventually(t, func() bool { + return callCount.Load() >= 2 + }, 15*time.Second, 100*time.Millisecond, "reconnect should dial a new connection") + + // The first connection must have been closed. + select { + case <-first.closedCh: + case <-time.After(2 * time.Second): + t.Fatal("first connection was not closed") + } + + // Verify the receiver is still running by injecting and handling a second error. + second.errChan <- assert.AnError + + require.Eventually(t, func() bool { + return callCount.Load() >= 3 + }, 15*time.Second, 100*time.Millisecond, "second reconnect should succeed") + + ct.Stop() +} + +func TestStopDuringReconnectBackoff(t *testing.T) { + mock := newMockListener() + + ct := New(nil, nil, WithDialer(func() (listener, error) { + return mock, nil + })) + + err := ct.Start(false) + require.NoError(t, err) + + // Trigger an error so the receiver enters reconnect. + mock.errChan <- assert.AnError + + // Wait for the error handler to close the old listener before calling Stop. + select { + case <-mock.closedCh: + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for reconnect to start") + } + + // Stop while reconnecting. + ct.Stop() + + ct.mux.Lock() + assert.False(t, ct.started, "started should be false after Stop") + assert.Nil(t, ct.conn, "conn should be nil after Stop") + ct.mux.Unlock() +} + +func TestStopRaceWithReconnectDial(t *testing.T) { + first := newMockListener() + dialStarted := make(chan struct{}) + dialProceed := make(chan struct{}) + second := newMockListener() + callCount := atomic.Int32{} + + ct := New(nil, nil, WithDialer(func() (listener, error) { + n := callCount.Add(1) + if n == 1 { + return first, nil + } + // Second dial: signal that we're in progress, wait for test to call Stop. + close(dialStarted) + <-dialProceed + return second, nil + })) + + err := ct.Start(false) + require.NoError(t, err) + + // Trigger error to enter reconnect. + first.errChan <- assert.AnError + + // Wait for reconnect's second dial to begin. + select { + case <-dialStarted: + case <-time.After(15 * time.Second): + t.Fatal("timed out waiting for reconnect dial") + } + + // Stop while dial is in progress (conn is nil at this point). + ct.Stop() + + // Let the dial complete. reconnect should detect started==false and close the new conn. + close(dialProceed) + + // The second connection should be closed (not leaked). + select { + case <-second.closedCh: + case <-time.After(2 * time.Second): + t.Fatal("second connection was leaked after Stop") + } + + ct.mux.Lock() + assert.False(t, ct.started) + assert.Nil(t, ct.conn) + ct.mux.Unlock() +} + +func TestCloseRaceWithReconnectDial(t *testing.T) { + first := newMockListener() + dialStarted := make(chan struct{}) + dialProceed := make(chan struct{}) + second := newMockListener() + callCount := atomic.Int32{} + + ct := New(nil, nil, WithDialer(func() (listener, error) { + n := callCount.Add(1) + if n == 1 { + return first, nil + } + close(dialStarted) + <-dialProceed + return second, nil + })) + + err := ct.Start(false) + require.NoError(t, err) + + first.errChan <- assert.AnError + + select { + case <-dialStarted: + case <-time.After(15 * time.Second): + t.Fatal("timed out waiting for reconnect dial") + } + + // Close while dial is in progress (conn is nil). + require.NoError(t, ct.Close()) + + close(dialProceed) + + // The second connection should be closed (not leaked). + select { + case <-second.closedCh: + case <-time.After(2 * time.Second): + t.Fatal("second connection was leaked after Close") + } + + ct.mux.Lock() + assert.False(t, ct.started) + assert.Nil(t, ct.conn) + ct.mux.Unlock() +} + +func TestStartIsIdempotent(t *testing.T) { + mock := newMockListener() + callCount := atomic.Int32{} + + ct := New(nil, nil, WithDialer(func() (listener, error) { + callCount.Add(1) + return mock, nil + })) + + err := ct.Start(false) + require.NoError(t, err) + + // Second Start should be a no-op. + err = ct.Start(false) + require.NoError(t, err) + + assert.Equal(t, int32(1), callCount.Load(), "dial should only be called once") + + ct.Stop() +} From 53b04e512af0d94f619e870467115c43d200830b Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Thu, 16 Apr 2026 16:04:53 +0200 Subject: [PATCH 320/374] [management] Reuse a single cache store across all management server consumers (#5889) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add support for legacy IDP cache environment variable * Centralize cache store creation to reuse a single Redis connection pool Each cache consumer (IDP cache, token store, PKCE store, secrets manager, EDR validator) was independently calling NewStore, creating separate Redis clients with their own connection pools — up to 1400 potential connections from a single management server process. Introduce a shared CacheStore() singleton on BaseServer that creates one store at boot and injects it into all consumers. Consumer constructors now receive a store.StoreInterface instead of creating their own. For Redis mode, all consumers share one connection pool (1000 max conns). For in-memory mode, all consumers share one GoCache instance. * Update management-integrations module to latest version * sync go.sum * Export `GetAddrFromEnv` to allow reuse across packages * Update management-integrations module version in go.mod and go.sum * Update management-integrations module version in go.mod and go.sum --- client/cmd/testutil_test.go | 16 ++++-- client/internal/engine_test.go | 10 +++- client/server/server_test.go | 10 +++- go.mod | 2 +- go.sum | 4 +- .../service/manager/manager_test.go | 27 +++++----- management/internals/server/boot.go | 25 +++++---- management/internals/server/controllers.go | 3 +- management/internals/server/modules.go | 2 +- .../internals/shared/grpc/onetime_token.go | 13 ++--- .../internals/shared/grpc/pkce_verifier.go | 13 ++--- .../internals/shared/grpc/proxy_test.go | 52 ++++++++----------- .../shared/grpc/validate_session_test.go | 23 +++++--- management/server/account.go | 13 ++--- management/server/account_test.go | 7 ++- management/server/cache/store.go | 25 ++++++++- management/server/dns_test.go | 9 +++- .../proxy/auth_callback_integration_test.go | 7 +-- .../testing/testing_tools/channel/channel.go | 37 ++++++------- management/server/identity_provider_test.go | 9 +++- management/server/management_proto_test.go | 9 +++- management/server/management_test.go | 10 +++- management/server/nameserver_test.go | 9 +++- management/server/peer_test.go | 25 +++++++-- management/server/route_test.go | 9 +++- proxy/management_integration_test.go | 7 +-- shared/management/client/client_test.go | 15 ++++-- 27 files changed, 252 insertions(+), 139 deletions(-) diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index 5c6926f04..d7564c353 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -13,6 +13,8 @@ import ( "github.com/netbirdio/management-integrations/integrations" + nbcache "github.com/netbirdio/netbird/management/server/cache" + "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" "github.com/netbirdio/netbird/management/internals/modules/peers" @@ -100,9 +102,16 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp jobManager := job.NewJobManager(nil, store, peersmanager) - iv, _ := integrations.NewIntegratedValidator(context.Background(), peersmanager, settingsManagerMock, eventStore) + ctx := context.Background() - metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + t.Fatal(err) + } + + iv, _ := integrations.NewIntegratedValidator(ctx, peersmanager, settingsManagerMock, eventStore, cacheStore) + + metrics, err := telemetry.NewDefaultAppMetrics(ctx) require.NoError(t, err) settingsMockManager := settings.NewMockManager(ctrl) @@ -113,12 +122,11 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp Return(&types.Settings{}, nil). AnyTimes() - ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := mgmt.NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersmanager), config) - accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, iv, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + accountManager, err := mgmt.BuildManager(ctx, config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, iv, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false, cacheStore) if err != nil { t.Fatal(err) } diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index 1f6fe384a..9fa4e51b2 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -55,6 +55,7 @@ import ( nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/activity" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" @@ -1634,7 +1635,12 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri peersManager := peers.NewManager(store, permissionsManager) jobManager := job.NewJobManager(nil, store, peersManager) - ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, nil, eventStore) + cacheStore, err := nbcache.NewStore(context.Background(), 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + return nil, "", err + } + + ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, nil, eventStore, cacheStore) metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) require.NoError(t, err) @@ -1656,7 +1662,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := server.NewAccountRequestBuffer(context.Background(), store) networkMapController := controller.NewController(context.Background(), store, metrics, updateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManager), config) - accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) if err != nil { return nil, "", err } diff --git a/client/server/server_test.go b/client/server/server_test.go index c5148104f..772997575 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -36,6 +36,7 @@ import ( daemonProto "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/activity" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" @@ -309,7 +310,12 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve jobManager := job.NewJobManager(nil, store, peersManager) - ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, settingsManagerMock, eventStore) + cacheStore, err := nbcache.NewStore(context.Background(), 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + return nil, "", err + } + + ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, settingsManagerMock, eventStore, cacheStore) metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) require.NoError(t, err) @@ -320,7 +326,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve requestBuffer := server.NewAccountRequestBuffer(context.Background(), store) peersUpdateManager := update_channel.NewPeersUpdateManager(metrics) networkMapController := controller.NewController(context.Background(), store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManager), config) - accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false, cacheStore) if err != nil { return nil, "", err } diff --git a/go.mod b/go.mod index 76fb8b7be..32c285e47 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/mdlayher/socket v0.5.1 github.com/miekg/dns v1.1.59 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/netbirdio/management-integrations/integrations v0.0.0-20260210160626-df4b180c7b25 + github.com/netbirdio/management-integrations/integrations v0.0.0-20260416123949-2355d972be42 github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 github.com/oapi-codegen/runtime v1.1.2 github.com/okta/okta-sdk-golang/v2 v2.18.0 diff --git a/go.sum b/go.sum index f06f7deba..e8759d84f 100644 --- a/go.sum +++ b/go.sum @@ -453,8 +453,8 @@ github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 h1:TDtJKmM6S github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944/go.mod h1:sHA6TRxjQ6RLbnI+3R4DZo2Eseg/iKiPRfNmcuNySVQ= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51 h1:Ov4qdafATOgGMB1wbSuh+0aAHcwz9hdvB6VZjh1mVMI= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51/go.mod h1:ZSIbPdBn5hePO8CpF1PekH2SfpTxg1PDhEwtbqZS7R8= -github.com/netbirdio/management-integrations/integrations v0.0.0-20260210160626-df4b180c7b25 h1:iwAq/Ncaq0etl4uAlVsbNBzC1yY52o0AmY7uCm2AMTs= -github.com/netbirdio/management-integrations/integrations v0.0.0-20260210160626-df4b180c7b25/go.mod h1:y7CxagMYzg9dgu+masRqYM7BQlOGA5Y8US85MCNFPlY= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260416123949-2355d972be42 h1:F3zS5fT9xzD1OFLfcdAE+3FfyiwjGukF1hvj0jErgs8= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260416123949-2355d972be42/go.mod h1:n47r67ZSPgwSmT/Z1o48JjZQW9YJ6m/6Bd/uAXkL3Pg= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502 h1:3tHlFmhTdX9axERMVN63dqyFqnvuD+EMJHzM7mNGON8= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 h1:ujgviVYmx243Ksy7NdSwrdGPSRNE3pb8kEDSpH0QuAQ= diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index 69d48f10a..54ac8ab18 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + cachestore "github.com/eko/gocache/lib/v4/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,6 +19,7 @@ import ( nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/mock_server" resourcetypes "github.com/netbirdio/netbird/management/server/networks/resources/types" nbpeer "github.com/netbirdio/netbird/management/server/peer" @@ -29,6 +31,13 @@ import ( "github.com/netbirdio/netbird/shared/management/status" ) +func testCacheStore(t *testing.T) cachestore.StoreInterface { + t.Helper() + s, err := nbcache.NewStore(context.Background(), 30*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + return s +} + func TestInitializeServiceForCreate(t *testing.T) { ctx := context.Background() accountID := "test-account" @@ -422,10 +431,8 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { newProxyServer := func(t *testing.T) *nbgrpc.ProxyServiceServer { t.Helper() - tokenStore, err := nbgrpc.NewOneTimeTokenStore(context.Background(), 1*time.Hour, 10*time.Minute, 100) - require.NoError(t, err) - pkceStore, err := nbgrpc.NewPKCEVerifierStore(context.Background(), 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := nbgrpc.NewOneTimeTokenStore(context.Background(), testCacheStore(t)) + pkceStore := nbgrpc.NewPKCEVerifierStore(context.Background(), testCacheStore(t)) srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) return srv } @@ -703,10 +710,8 @@ func setupIntegrationTest(t *testing.T) (*Manager, store.Store) { }, } - tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100) - require.NoError(t, err) - pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := nbgrpc.NewOneTimeTokenStore(ctx, testCacheStore(t)) + pkceStore := nbgrpc.NewPKCEVerifierStore(ctx, testCacheStore(t)) proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter("")) @@ -1128,10 +1133,8 @@ func TestDeleteService_DeletesTargets(t *testing.T) { mockPerms := permissions.NewMockManager(ctrl) mockAcct := account.NewMockManager(ctrl) - tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100) - require.NoError(t, err) - pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := nbgrpc.NewOneTimeTokenStore(ctx, testCacheStore(t)) + pkceStore := nbgrpc.NewPKCEVerifierStore(ctx, testCacheStore(t)) proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil) proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter("")) diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 88d37ca80..24dfb641b 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -18,6 +18,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + cachestore "github.com/eko/gocache/lib/v4/store" "github.com/netbirdio/management-integrations/integrations" "github.com/netbirdio/netbird/encryption" @@ -26,6 +27,7 @@ import ( accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/activity" + nbcache "github.com/netbirdio/netbird/management/server/cache" nbContext "github.com/netbirdio/netbird/management/server/context" nbhttp "github.com/netbirdio/netbird/management/server/http" "github.com/netbirdio/netbird/management/server/store" @@ -58,6 +60,18 @@ func (s *BaseServer) Metrics() telemetry.AppMetrics { }) } +// CacheStore returns a shared cache store backed by Redis or in-memory depending on the environment. +// All consumers should reuse this store to avoid creating multiple Redis connections. +func (s *BaseServer) CacheStore() cachestore.StoreInterface { + return Create(s, func() cachestore.StoreInterface { + cs, err := nbcache.NewStore(context.Background(), nbcache.DefaultStoreMaxTimeout, nbcache.DefaultStoreCleanupInterval, nbcache.DefaultStoreMaxConn) + if err != nil { + log.Fatalf("failed to create shared cache store: %v", err) + } + return cs + }) +} + func (s *BaseServer) Store() store.Store { return Create(s, func() store.Store { store, err := store.NewStore(context.Background(), s.Config.StoreConfig.Engine, s.Config.Datadir, s.Metrics(), false) @@ -195,10 +209,7 @@ func (s *BaseServer) proxyOIDCConfig() nbgrpc.ProxyOIDCConfig { func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore { return Create(s, func() *nbgrpc.OneTimeTokenStore { - tokenStore, err := nbgrpc.NewOneTimeTokenStore(context.Background(), 5*time.Minute, 10*time.Minute, 100) - if err != nil { - log.Fatalf("failed to create proxy token store: %v", err) - } + tokenStore := nbgrpc.NewOneTimeTokenStore(context.Background(), s.CacheStore()) log.Info("One-time token store initialized for proxy authentication") return tokenStore }) @@ -206,11 +217,7 @@ func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore { func (s *BaseServer) PKCEVerifierStore() *nbgrpc.PKCEVerifierStore { return Create(s, func() *nbgrpc.PKCEVerifierStore { - pkceStore, err := nbgrpc.NewPKCEVerifierStore(context.Background(), 10*time.Minute, 10*time.Minute, 100) - if err != nil { - log.Fatalf("failed to create PKCE verifier store: %v", err) - } - return pkceStore + return nbgrpc.NewPKCEVerifierStore(context.Background(), s.CacheStore()) }) } diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index c7eab3d19..9a8e45d33 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -41,7 +41,8 @@ func (s *BaseServer) IntegratedValidator() integrated_validator.IntegratedValida context.Background(), s.PeersManager(), s.SettingsManager(), - s.EventStore()) + s.EventStore(), + s.CacheStore()) if err != nil { log.Errorf("failed to create integrated peer validator: %v", err) } diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index 374ea5c81..9b2ec2989 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -100,7 +100,7 @@ func (s *BaseServer) PeersManager() peers.Manager { func (s *BaseServer) AccountManager() account.Manager { return Create(s, func() account.Manager { - accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.JobManager(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy) + accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.JobManager(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy, s.CacheStore()) if err != nil { log.Fatalf("failed to create account service: %v", err) } diff --git a/management/internals/shared/grpc/onetime_token.go b/management/internals/shared/grpc/onetime_token.go index 7999407db..acfd6eafb 100644 --- a/management/internals/shared/grpc/onetime_token.go +++ b/management/internals/shared/grpc/onetime_token.go @@ -14,8 +14,6 @@ import ( "github.com/eko/gocache/lib/v4/cache" "github.com/eko/gocache/lib/v4/store" log "github.com/sirupsen/logrus" - - nbcache "github.com/netbirdio/netbird/management/server/cache" ) type tokenMetadata struct { @@ -32,17 +30,12 @@ type OneTimeTokenStore struct { ctx context.Context } -// NewOneTimeTokenStore creates a token store with automatic backend selection -func NewOneTimeTokenStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, maxConn int) (*OneTimeTokenStore, error) { - cacheStore, err := nbcache.NewStore(ctx, maxTimeout, cleanupInterval, maxConn) - if err != nil { - return nil, fmt.Errorf("failed to create cache store: %w", err) - } - +// NewOneTimeTokenStore creates a token store using the provided shared cache store. +func NewOneTimeTokenStore(ctx context.Context, cacheStore store.StoreInterface) *OneTimeTokenStore { return &OneTimeTokenStore{ cache: cache.New[string](cacheStore), ctx: ctx, - }, nil + } } // GenerateToken creates a new cryptographically secure one-time token diff --git a/management/internals/shared/grpc/pkce_verifier.go b/management/internals/shared/grpc/pkce_verifier.go index 441e8b051..a1325256c 100644 --- a/management/internals/shared/grpc/pkce_verifier.go +++ b/management/internals/shared/grpc/pkce_verifier.go @@ -8,8 +8,6 @@ import ( "github.com/eko/gocache/lib/v4/cache" "github.com/eko/gocache/lib/v4/store" log "github.com/sirupsen/logrus" - - nbcache "github.com/netbirdio/netbird/management/server/cache" ) // PKCEVerifierStore manages PKCE verifiers for OAuth flows. @@ -19,17 +17,12 @@ type PKCEVerifierStore struct { ctx context.Context } -// NewPKCEVerifierStore creates a PKCE verifier store with automatic backend selection -func NewPKCEVerifierStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, maxConn int) (*PKCEVerifierStore, error) { - cacheStore, err := nbcache.NewStore(ctx, maxTimeout, cleanupInterval, maxConn) - if err != nil { - return nil, fmt.Errorf("failed to create cache store: %w", err) - } - +// NewPKCEVerifierStore creates a PKCE verifier store using the provided shared cache store. +func NewPKCEVerifierStore(ctx context.Context, cacheStore store.StoreInterface) *PKCEVerifierStore { return &PKCEVerifierStore{ cache: cache.New[string](cacheStore), ctx: ctx, - }, nil + } } // Store saves a PKCE verifier associated with an OAuth state parameter. diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index d5aed3dee..de4e96d93 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -9,13 +9,22 @@ import ( "testing" "time" + cachestore "github.com/eko/gocache/lib/v4/store" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/shared/management/proto" ) +func testCacheStore(t *testing.T) cachestore.StoreInterface { + t.Helper() + s, err := nbcache.NewStore(context.Background(), 30*time.Minute, 10*time.Minute, 100) + require.NoError(t, err) + return s +} + type testProxyController struct { mu sync.Mutex clusterProxies map[string]map[string]struct{} @@ -114,11 +123,8 @@ func drainEmpty(ch chan *proto.GetMappingUpdateResponse) bool { func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { ctx := context.Background() - tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100) - require.NoError(t, err) - - pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := NewOneTimeTokenStore(ctx, testCacheStore(t)) + pkceStore := NewPKCEVerifierStore(ctx, testCacheStore(t)) s := &ProxyServiceServer{ tokenStore: tokenStore, @@ -174,11 +180,8 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) { func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { ctx := context.Background() - tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100) - require.NoError(t, err) - - pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := NewOneTimeTokenStore(ctx, testCacheStore(t)) + pkceStore := NewPKCEVerifierStore(ctx, testCacheStore(t)) s := &ProxyServiceServer{ tokenStore: tokenStore, @@ -211,11 +214,8 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) { func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) { ctx := context.Background() - tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100) - require.NoError(t, err) - - pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := NewOneTimeTokenStore(ctx, testCacheStore(t)) + pkceStore := NewPKCEVerifierStore(ctx, testCacheStore(t)) s := &ProxyServiceServer{ tokenStore: tokenStore, @@ -267,8 +267,7 @@ func generateState(s *ProxyServiceServer, redirectURL string) string { func TestOAuthState_NeverTheSame(t *testing.T) { ctx := context.Background() - pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + pkceStore := NewPKCEVerifierStore(ctx, testCacheStore(t)) s := &ProxyServiceServer{ oidcConfig: ProxyOIDCConfig{ @@ -296,8 +295,7 @@ func TestOAuthState_NeverTheSame(t *testing.T) { func TestValidateState_RejectsOldTwoPartFormat(t *testing.T) { ctx := context.Background() - pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + pkceStore := NewPKCEVerifierStore(ctx, testCacheStore(t)) s := &ProxyServiceServer{ oidcConfig: ProxyOIDCConfig{ @@ -307,7 +305,7 @@ func TestValidateState_RejectsOldTwoPartFormat(t *testing.T) { } // Old format had only 2 parts: base64(url)|hmac - err = s.pkceVerifierStore.Store("base64url|hmac", "test", 10*time.Minute) + err := s.pkceVerifierStore.Store("base64url|hmac", "test", 10*time.Minute) require.NoError(t, err) _, _, err = s.ValidateState("base64url|hmac") @@ -317,8 +315,7 @@ func TestValidateState_RejectsOldTwoPartFormat(t *testing.T) { func TestValidateState_RejectsInvalidHMAC(t *testing.T) { ctx := context.Background() - pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + pkceStore := NewPKCEVerifierStore(ctx, testCacheStore(t)) s := &ProxyServiceServer{ oidcConfig: ProxyOIDCConfig{ @@ -328,7 +325,7 @@ func TestValidateState_RejectsInvalidHMAC(t *testing.T) { } // Store with tampered HMAC - err = s.pkceVerifierStore.Store("dGVzdA==|nonce|wrong-hmac", "test", 10*time.Minute) + err := s.pkceVerifierStore.Store("dGVzdA==|nonce|wrong-hmac", "test", 10*time.Minute) require.NoError(t, err) _, _, err = s.ValidateState("dGVzdA==|nonce|wrong-hmac") @@ -337,8 +334,7 @@ func TestValidateState_RejectsInvalidHMAC(t *testing.T) { } func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { - tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := NewOneTimeTokenStore(context.Background(), testCacheStore(t)) s := &ProxyServiceServer{ tokenStore: tokenStore, @@ -410,8 +406,7 @@ func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { } func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { - tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := NewOneTimeTokenStore(context.Background(), testCacheStore(t)) s := &ProxyServiceServer{ tokenStore: tokenStore, @@ -442,8 +437,7 @@ func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { // scenario for an existing service, verifying the correct update types // reach the correct clusters. func TestServiceModifyNotifications(t *testing.T) { - tokenStore, err := NewOneTimeTokenStore(context.Background(), time.Hour, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := NewOneTimeTokenStore(context.Background(), testCacheStore(t)) newServer := func() (*ProxyServiceServer, map[string]chan *proto.GetMappingUpdateResponse) { s := &ProxyServiceServer{ diff --git a/management/internals/shared/grpc/validate_session_test.go b/management/internals/shared/grpc/validate_session_test.go index 2f77de86e..d1d7fc8b7 100644 --- a/management/internals/shared/grpc/validate_session_test.go +++ b/management/internals/shared/grpc/validate_session_test.go @@ -39,11 +39,8 @@ func setupValidateSessionTest(t *testing.T) *validateSessionTestSetup { usersManager := &testValidateSessionUsersManager{store: testStore} proxyManager := &testValidateSessionProxyManager{} - tokenStore, err := NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100) - require.NoError(t, err) - - pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := NewOneTimeTokenStore(ctx, testCacheStore(t)) + pkceStore := NewPKCEVerifierStore(ctx, testCacheStore(t)) proxyService := NewProxyServiceServer(nil, tokenStore, pkceStore, ProxyOIDCConfig{}, nil, usersManager, proxyManager) proxyService.SetServiceManager(serviceManager) @@ -327,7 +324,7 @@ func (m *testValidateSessionServiceManager) GetActiveClusters(_ context.Context, type testValidateSessionProxyManager struct{} -func (m *testValidateSessionProxyManager) Connect(_ context.Context, _, _, _ string) error { +func (m *testValidateSessionProxyManager) Connect(_ context.Context, _, _, _ string, _ *proxy.Capabilities) error { return nil } @@ -335,7 +332,7 @@ func (m *testValidateSessionProxyManager) Disconnect(_ context.Context, _ string return nil } -func (m *testValidateSessionProxyManager) Heartbeat(_ context.Context, _ string) error { +func (m *testValidateSessionProxyManager) Heartbeat(_ context.Context, _, _, _ string) error { return nil } @@ -351,6 +348,18 @@ func (m *testValidateSessionProxyManager) CleanupStale(_ context.Context, _ time return nil } +func (m *testValidateSessionProxyManager) ClusterSupportsCustomPorts(_ context.Context, _ string) *bool { + return nil +} + +func (m *testValidateSessionProxyManager) ClusterRequireSubdomain(_ context.Context, _ string) *bool { + return nil +} + +func (m *testValidateSessionProxyManager) ClusterSupportsCrowdSec(_ context.Context, _ string) *bool { + return nil +} + type testValidateSessionUsersManager struct { store store.Store } diff --git a/management/server/account.go b/management/server/account.go index d90b46659..7d53cef03 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -181,7 +181,7 @@ func (am *DefaultAccountManager) getJWTGroupsChanges(user *types.User, groups [] return modified, newUserAutoGroups, newGroupsToCreate, nil } -// BuildManager creates a new DefaultAccountManager with a provided Store +// BuildManager creates a new DefaultAccountManager with all dependencies. func BuildManager( ctx context.Context, config *nbconfig.Config, @@ -199,6 +199,7 @@ func BuildManager( settingsManager settings.Manager, permissionsManager permissions.Manager, disableDefaultPolicy bool, + sharedCacheStore cacheStore.StoreInterface, ) (*DefaultAccountManager, error) { start := time.Now() defer func() { @@ -247,16 +248,12 @@ func BuildManager( log.WithContext(ctx).Infof("single account mode disabled, accounts number %d", accountsCounter) } - cacheStore, err := nbcache.NewStore(ctx, nbcache.DefaultIDPCacheExpirationMax, nbcache.DefaultIDPCacheCleanupInterval, nbcache.DefaultIDPCacheOpenConn) - if err != nil { - return nil, fmt.Errorf("getting cache store: %s", err) - } - am.externalCacheManager = nbcache.NewUserDataCache(cacheStore) - am.cacheManager = nbcache.NewAccountUserDataCache(am.loadAccount, cacheStore) + am.externalCacheManager = nbcache.NewUserDataCache(sharedCacheStore) + am.cacheManager = nbcache.NewAccountUserDataCache(am.loadAccount, sharedCacheStore) if !isNil(am.idpManager) && !IsEmbeddedIdp(am.idpManager) { go func() { - err := am.warmupIDPCache(ctx, cacheStore) + err := am.warmupIDPCache(ctx, sharedCacheStore) if err != nil { log.WithContext(ctx).Warnf("failed warming up cache due to error: %v", err) // todo retry? diff --git a/management/server/account_test.go b/management/server/account_test.go index 2f0533281..4453d064e 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -3134,10 +3134,15 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU ctx := context.Background() + cacheStore, err := cache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + return nil, nil, err + } + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - manager, err := BuildManager(ctx, &config.Config{}, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + manager, err := BuildManager(ctx, &config.Config{}, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) if err != nil { return nil, nil, err } diff --git a/management/server/cache/store.go b/management/server/cache/store.go index 54b0242de..2ca8e8603 100644 --- a/management/server/cache/store.go +++ b/management/server/cache/store.go @@ -17,12 +17,24 @@ import ( // RedisStoreEnvVar is the environment variable that determines if a redis store should be used. // The value should follow redis URL format. https://github.com/redis/redis-specifications/blob/master/uri/redis.txt -const RedisStoreEnvVar = "NB_IDP_CACHE_REDIS_ADDRESS" +const RedisStoreEnvVar = "NB_CACHE_REDIS_ADDRESS" + +// legacyIdPCacheRedisEnvVar is the previous environment variable used for IDP cache. +const legacyIdPCacheRedisEnvVar = "NB_IDP_CACHE_REDIS_ADDRESS" + +const ( + // DefaultStoreMaxTimeout is the default max timeout for the shared cache store. + DefaultStoreMaxTimeout = 7 * 24 * time.Hour + // DefaultStoreCleanupInterval is the default cleanup interval for the shared cache store. + DefaultStoreCleanupInterval = 30 * time.Minute + // DefaultStoreMaxConn is the default max connections for the shared cache store. + DefaultStoreMaxConn = 1000 +) // NewStore creates a new cache store with the given max timeout and cleanup interval. It checks for the environment Variable RedisStoreEnvVar // to determine if a redis store should be used. If the environment variable is set, it will attempt to connect to the redis store. func NewStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, maxConn int) (store.StoreInterface, error) { - redisAddr := os.Getenv(RedisStoreEnvVar) + redisAddr := GetAddrFromEnv() if redisAddr != "" { return getRedisStore(ctx, redisAddr, maxConn) } @@ -30,6 +42,15 @@ func NewStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, ma return gocache_store.NewGoCache(goc), nil } +// GetAddrFromEnv returns the redis address from the environment variable RedisStoreEnvVar or its legacy counterpart. +func GetAddrFromEnv() string { + addr := os.Getenv(RedisStoreEnvVar) + if addr == "" { + addr = os.Getenv(legacyIdPCacheRedisEnvVar) + } + return addr +} + func getRedisStore(ctx context.Context, redisEnvAddr string, maxConn int) (store.StoreInterface, error) { options, err := redis.ParseURL(redisEnvAddr) if err != nil { diff --git a/management/server/dns_test.go b/management/server/dns_test.go index bd0755d0d..0e37a3b22 100644 --- a/management/server/dns_test.go +++ b/management/server/dns_test.go @@ -15,6 +15,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/server/config" + "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" @@ -225,11 +226,17 @@ func createDNSManager(t *testing.T) (*DefaultAccountManager, error) { peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() + + cacheStore, err := cache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + return nil, err + } + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.test", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) } func createDNSStore(t *testing.T) (store.Store, error) { diff --git a/management/server/http/handlers/proxy/auth_callback_integration_test.go b/management/server/http/handlers/proxy/auth_callback_integration_test.go index 922bf4352..c99acab63 100644 --- a/management/server/http/handlers/proxy/auth_callback_integration_test.go +++ b/management/server/http/handlers/proxy/auth_callback_integration_test.go @@ -22,6 +22,7 @@ import ( nbproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/management/server/users" @@ -191,11 +192,11 @@ func setupAuthCallbackTest(t *testing.T) *testSetup { oidcServer := newFakeOIDCServer() - tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100) + cacheStore, err := nbcache.NewStore(ctx, 30*time.Minute, 10*time.Minute, 100) require.NoError(t, err) - pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := nbgrpc.NewOneTimeTokenStore(ctx, cacheStore) + pkceStore := nbgrpc.NewPKCEVerifierStore(ctx, cacheStore) usersManager := users.NewManager(testStore) diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index d9d85a0a2..0203d6177 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -35,6 +35,7 @@ import ( "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" serverauth "github.com/netbirdio/netbird/management/server/auth" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/geolocation" "github.com/netbirdio/netbird/management/server/groups" http2 "github.com/netbirdio/netbird/management/server/http" @@ -87,22 +88,22 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee jobManager := job.NewJobManager(nil, store, peersManager) ctx := context.Background() + + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + t.Fatalf("Failed to create cache store: %v", err) + } + requestBuffer := server.NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManager), &config.Config{}) - am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) + am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false, cacheStore) if err != nil { t.Fatalf("Failed to create manager: %v", err) } accessLogsManager := accesslogsmanager.NewManager(store, permissionsManager, nil) - proxyTokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) - if err != nil { - t.Fatalf("Failed to create proxy token store: %v", err) - } - pkceverifierStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - if err != nil { - t.Fatalf("Failed to create PKCE verifier store: %v", err) - } + proxyTokenStore := nbgrpc.NewOneTimeTokenStore(ctx, cacheStore) + pkceverifierStore := nbgrpc.NewPKCEVerifierStore(ctx, cacheStore) noopMeter := noop.NewMeterProvider().Meter("") proxyMgr, err := proxymanager.NewManager(store, noopMeter) if err != nil { @@ -216,22 +217,22 @@ func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile strin jobManager := job.NewJobManager(nil, store, peersManager) ctx := context.Background() + + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + t.Fatalf("Failed to create cache store: %v", err) + } + requestBuffer := server.NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManager), &config.Config{}) - am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) + am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false, cacheStore) if err != nil { t.Fatalf("Failed to create manager: %v", err) } accessLogsManager := accesslogsmanager.NewManager(store, permissionsManager, nil) - proxyTokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) - if err != nil { - t.Fatalf("Failed to create proxy token store: %v", err) - } - pkceverifierStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - if err != nil { - t.Fatalf("Failed to create PKCE verifier store: %v", err) - } + proxyTokenStore := nbgrpc.NewOneTimeTokenStore(ctx, cacheStore) + pkceverifierStore := nbgrpc.NewPKCEVerifierStore(ctx, cacheStore) noopMeter := noop.NewMeterProvider().Meter("") proxyMgr, err := proxymanager.NewManager(store, noopMeter) if err != nil { diff --git a/management/server/identity_provider_test.go b/management/server/identity_provider_test.go index 9fce6b9c0..d51254c55 100644 --- a/management/server/identity_provider_test.go +++ b/management/server/identity_provider_test.go @@ -8,6 +8,7 @@ import ( "net/http/httptest" "path/filepath" "testing" + "time" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -19,6 +20,7 @@ import ( ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" @@ -83,10 +85,15 @@ func createManagerWithEmbeddedIdP(t testing.TB) (*DefaultAccountManager, *update permissionsManager := permissions.NewManager(testStore) peersManager := peers.NewManager(testStore, permissionsManager) + cacheStore, err := cache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + return nil, nil, err + } + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, testStore) networkMapController := controller.NewController(ctx, testStore, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(testStore, peersManager), &config.Config{}) - manager, err := BuildManager(ctx, &config.Config{}, testStore, networkMapController, job.NewJobManager(nil, testStore, peersManager), idpManager, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + manager, err := BuildManager(ctx, &config.Config{}, testStore, networkMapController, job.NewJobManager(nil, testStore, peersManager), idpManager, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) if err != nil { return nil, nil, err } diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index 090c99877..4e6eb0a33 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -29,6 +29,7 @@ import ( "github.com/netbirdio/netbird/management/internals/server/config" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" @@ -369,9 +370,15 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config requestBuffer := NewAccountRequestBuffer(ctx, store) ephemeralMgr := manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)) + cacheStore, err := cache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + cleanup() + return nil, nil, "", cleanup, err + } + networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeralMgr, config) accountManager, err := BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", - eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) if err != nil { cleanup() diff --git a/management/server/management_test.go b/management/server/management_test.go index de02855bf..3ac28cd4a 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -28,6 +28,7 @@ import ( nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/activity" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" @@ -207,6 +208,12 @@ func startServer( jobManager := job.NewJobManager(nil, str, peersManager) ctx := context.Background() + + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + t.Fatalf("failed creating cache store: %v", err) + } + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := server.NewAccountRequestBuffer(ctx, str) networkMapController := controller.NewController(ctx, str, metrics, updateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(str, peers.NewManager(str, permissionsManager)), config) @@ -227,7 +234,8 @@ func startServer( port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, - false) + false, + cacheStore) if err != nil { t.Fatalf("failed creating an account manager: %v", err) } diff --git a/management/server/nameserver_test.go b/management/server/nameserver_test.go index 90b4b9687..d10d4464f 100644 --- a/management/server/nameserver_test.go +++ b/management/server/nameserver_test.go @@ -17,6 +17,7 @@ import ( ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" nbpeer "github.com/netbirdio/netbird/management/server/peer" @@ -794,11 +795,17 @@ func createNSManager(t *testing.T) (*DefaultAccountManager, error) { peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() + + cacheStore, err := cache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + return nil, err + } + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) } func createNSStore(t *testing.T) (store.Store, error) { diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 51c16d730..6f8d924fd 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -32,6 +32,7 @@ import ( ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/internals/shared/grpc" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" @@ -1294,11 +1295,15 @@ func Test_RegisterPeerByUser(t *testing.T) { peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() + + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + require.NoError(t, err) + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1380,11 +1385,15 @@ func Test_RegisterPeerBySetupKey(t *testing.T) { peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() + + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + require.NoError(t, err) + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1534,11 +1543,15 @@ func Test_RegisterPeerRollbackOnFailure(t *testing.T) { peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() + + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + require.NoError(t, err) + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1615,11 +1628,15 @@ func Test_LoginPeer(t *testing.T) { peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() + + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + require.NoError(t, err) + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" diff --git a/management/server/route_test.go b/management/server/route_test.go index d4882eff8..91b2cf982 100644 --- a/management/server/route_test.go +++ b/management/server/route_test.go @@ -20,6 +20,7 @@ import ( ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -1293,11 +1294,17 @@ func createRouterManager(t *testing.T) (*DefaultAccountManager, *update_channel. peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() + + cacheStore, err := cache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + return nil, nil, err + } + updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false, cacheStore) if err != nil { return nil, nil, err } diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 17510f37e..4b1ecf922 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -22,6 +22,7 @@ import ( nbproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/management/server/users" @@ -113,11 +114,11 @@ func setupIntegrationTest(t *testing.T) *integrationTestSetup { } // Create real token store - tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) + cacheStore, err := nbcache.NewStore(ctx, 30*time.Minute, 10*time.Minute, 100) require.NoError(t, err) - pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) - require.NoError(t, err) + tokenStore := nbgrpc.NewOneTimeTokenStore(ctx, cacheStore) + pkceStore := nbgrpc.NewPKCEVerifierStore(ctx, cacheStore) // Create real users manager usersManager := users.NewManager(testStore) diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index f5edb6b95..d9a1a7d65 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -31,6 +31,7 @@ import ( "github.com/netbirdio/netbird/management/internals/server/config" mgmt "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/activity" + nbcache "github.com/netbirdio/netbird/management/server/cache" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/mock_server" @@ -95,9 +96,16 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { settingsManagerMock := settings.NewMockManager(ctrl) jobManager := job.NewJobManager(nil, store, peersManger) - ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManger, settingsManagerMock, eventStore) + ctx := context.Background() - metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) + cacheStore, err := nbcache.NewStore(ctx, 100*time.Millisecond, 300*time.Millisecond, 100) + if err != nil { + t.Fatal(err) + } + + ia, _ := integrations.NewIntegratedValidator(ctx, peersManger, settingsManagerMock, eventStore, cacheStore) + + metrics, err := telemetry.NewDefaultAppMetrics(ctx) require.NoError(t, err) settingsMockManager := settings.NewMockManager(ctrl) @@ -116,11 +124,10 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { Return(&types.ExtraSettings{}, nil). AnyTimes() - ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := mgmt.NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManger), config) - accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false, cacheStore) if err != nil { t.Fatal(err) } From a39787d6796ee26e1f6ad9c67ea33a9da7bf8d66 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Fri, 17 Apr 2026 01:06:38 +0900 Subject: [PATCH 321/374] [infrastructure] Add CrowdSec LAPI container to self-hosted setup script (#5880) --- infrastructure_files/getting-started.sh | 132 ++++++++++++++++++++++-- 1 file changed, 125 insertions(+), 7 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 9236d851d..08da48264 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -182,6 +182,23 @@ read_enable_proxy() { return 0 } +read_enable_crowdsec() { + echo "" > /dev/stderr + echo "Do you want to enable CrowdSec IP reputation blocking?" > /dev/stderr + echo "CrowdSec checks client IPs against a community threat intelligence database" > /dev/stderr + echo "and blocks known malicious sources before they reach your services." > /dev/stderr + echo "A local CrowdSec LAPI container will be added to your deployment." > /dev/stderr + echo -n "Enable CrowdSec? [y/N]: " > /dev/stderr + read -r CHOICE < /dev/tty + + if [[ "$CHOICE" =~ ^[Yy]$ ]]; then + echo "true" + else + echo "false" + fi + return 0 +} + read_traefik_acme_email() { echo "" > /dev/stderr echo "Enter your email for Let's Encrypt certificate notifications." > /dev/stderr @@ -297,6 +314,10 @@ initialize_default_values() { # NetBird Proxy configuration ENABLE_PROXY="false" PROXY_TOKEN="" + + # CrowdSec configuration + ENABLE_CROWDSEC="false" + CROWDSEC_BOUNCER_KEY="" return 0 } @@ -325,6 +346,9 @@ configure_reverse_proxy() { if [[ "$REVERSE_PROXY_TYPE" == "0" ]]; then TRAEFIK_ACME_EMAIL=$(read_traefik_acme_email) ENABLE_PROXY=$(read_enable_proxy) + if [[ "$ENABLE_PROXY" == "true" ]]; then + ENABLE_CROWDSEC=$(read_enable_crowdsec) + fi fi # Handle external Traefik-specific prompts (option 1) @@ -354,7 +378,7 @@ check_existing_installation() { echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." echo "You can use the following commands:" echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" - echo " rm -f docker-compose.yml dashboard.env config.yaml proxy.env traefik-dynamic.yaml nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" + echo " rm -f docker-compose.yml dashboard.env config.yaml proxy.env traefik-dynamic.yaml nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt && rm -rf crowdsec/" echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." exit 1 fi @@ -375,6 +399,9 @@ generate_configuration_files() { echo "NB_PROXY_TOKEN=placeholder" >> proxy.env # TCP ServersTransport for PROXY protocol v2 to the proxy backend render_traefik_dynamic > traefik-dynamic.yaml + if [[ "$ENABLE_CROWDSEC" == "true" ]]; then + mkdir -p crowdsec + fi fi ;; 1) @@ -417,8 +444,12 @@ start_services_and_show_instructions() { if [[ "$ENABLE_PROXY" == "true" ]]; then # Phase 1: Start core services (without proxy) + local core_services="traefik dashboard netbird-server" + if [[ "$ENABLE_CROWDSEC" == "true" ]]; then + core_services="$core_services crowdsec" + fi echo "Starting core services..." - $DOCKER_COMPOSE_COMMAND up -d traefik dashboard netbird-server + $DOCKER_COMPOSE_COMMAND up -d $core_services sleep 3 wait_management_proxy traefik @@ -438,7 +469,33 @@ start_services_and_show_instructions() { echo "Proxy token created successfully." - # Generate proxy.env with the token + if [[ "$ENABLE_CROWDSEC" == "true" ]]; then + echo "Registering CrowdSec bouncer..." + local cs_retries=0 + while ! $DOCKER_COMPOSE_COMMAND exec -T crowdsec cscli capi status >/dev/null 2>&1; do + cs_retries=$((cs_retries + 1)) + if [[ $cs_retries -ge 30 ]]; then + echo "WARNING: CrowdSec did not become ready. Skipping CrowdSec setup." > /dev/stderr + echo "You can register a bouncer manually later with:" > /dev/stderr + echo " docker exec netbird-crowdsec cscli bouncers add netbird-proxy -o raw" > /dev/stderr + ENABLE_CROWDSEC="false" + break + fi + sleep 2 + done + + if [[ "$ENABLE_CROWDSEC" == "true" ]]; then + CROWDSEC_BOUNCER_KEY=$($DOCKER_COMPOSE_COMMAND exec -T crowdsec \ + cscli bouncers add netbird-proxy -o raw 2>/dev/null) + if [[ -z "$CROWDSEC_BOUNCER_KEY" ]]; then + echo "WARNING: Failed to create CrowdSec bouncer key. Skipping CrowdSec setup." > /dev/stderr + ENABLE_CROWDSEC="false" + else + echo "CrowdSec bouncer registered." + fi + fi + fi + render_proxy_env > proxy.env # Start proxy service @@ -525,11 +582,25 @@ render_docker_compose_traefik_builtin() { # Generate proxy service section and Traefik dynamic config if enabled local proxy_service="" local proxy_volumes="" + local crowdsec_service="" + local crowdsec_volumes="" local traefik_file_provider="" local traefik_dynamic_volume="" if [[ "$ENABLE_PROXY" == "true" ]]; then traefik_file_provider=' - "--providers.file.filename=/etc/traefik/dynamic.yaml"' traefik_dynamic_volume=" - ./traefik-dynamic.yaml:/etc/traefik/dynamic.yaml:ro" + + local proxy_depends=" + netbird-server: + condition: service_started" + if [[ "$ENABLE_CROWDSEC" == "true" ]]; then + proxy_depends=" + netbird-server: + condition: service_started + crowdsec: + condition: service_healthy" + fi + proxy_service=" # NetBird Proxy - exposes internal resources to the internet proxy: @@ -539,8 +610,7 @@ render_docker_compose_traefik_builtin() { - 51820:51820/udp restart: unless-stopped networks: [netbird] - depends_on: - - netbird-server + depends_on:${proxy_depends} env_file: - ./proxy.env volumes: @@ -563,6 +633,35 @@ render_docker_compose_traefik_builtin() { " proxy_volumes=" netbird_proxy_certs:" + + if [[ "$ENABLE_CROWDSEC" == "true" ]]; then + crowdsec_service=" + crowdsec: + image: crowdsecurity/crowdsec:v1.7.7 + container_name: netbird-crowdsec + restart: unless-stopped + networks: [netbird] + environment: + COLLECTIONS: crowdsecurity/linux + volumes: + - ./crowdsec:/etc/crowdsec + - crowdsec_db:/var/lib/crowdsec/data + healthcheck: + test: ["CMD", "cscli", "lapi", "status"] + interval: 10s + timeout: 5s + retries: 15 + labels: + - traefik.enable=false + logging: + driver: \"json-file\" + options: + max-size: \"500m\" + max-file: \"2\" +" + crowdsec_volumes=" + crowdsec_db:" + fi fi cat <" + echo " Get your enrollment key at: https://app.crowdsec.net" + echo "" + fi fi return 0 } From 8ae8f2098fb4bb7a9177b3f2d8d908a413b8fc49 Mon Sep 17 00:00:00 2001 From: Nicolas Frati Date: Thu, 16 Apr 2026 20:02:09 +0200 Subject: [PATCH 322/374] [management] chores: fix lint error on google workspace (#5907) * chores: fix lint error on google workspace * chores: updated google api dependency * update google golang api sdk to latest --- go.mod | 32 +++++----- go.sum | 76 +++++++++++------------ management/server/idp/google_workspace.go | 33 ++++------ 3 files changed, 67 insertions(+), 74 deletions(-) diff --git a/go.mod b/go.mod index 32c285e47..5172b1a78 100644 --- a/go.mod +++ b/go.mod @@ -17,12 +17,12 @@ require ( github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.9 github.com/vishvananda/netlink v1.3.1 - golang.org/x/crypto v0.48.0 + golang.org/x/crypto v0.49.0 golang.org/x/sys v0.42.0 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 golang.zx2c4.com/wireguard/windows v0.5.3 - google.golang.org/grpc v1.79.3 + google.golang.org/grpc v1.80.0 google.golang.org/protobuf v1.36.11 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -115,13 +115,13 @@ require ( goauthentik.io/api/v3 v3.2023051.3 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mobile v0.0.0-20251113184115-a159579294ab - golang.org/x/mod v0.32.0 - golang.org/x/net v0.51.0 - golang.org/x/oauth2 v0.34.0 - golang.org/x/sync v0.19.0 - golang.org/x/term v0.40.0 - golang.org/x/time v0.14.0 - google.golang.org/api v0.257.0 + golang.org/x/mod v0.33.0 + golang.org/x/net v0.52.0 + golang.org/x/oauth2 v0.36.0 + golang.org/x/sync v0.20.0 + golang.org/x/term v0.41.0 + golang.org/x/time v0.15.0 + google.golang.org/api v0.276.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.7 gorm.io/driver/postgres v1.5.7 @@ -131,7 +131,7 @@ require ( ) require ( - cloud.google.com/go/auth v0.17.0 // indirect + cloud.google.com/go/auth v0.20.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect dario.cat/mergo v1.0.1 // indirect @@ -210,8 +210,8 @@ require ( github.com/google/btree v1.1.2 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect - github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect + github.com/googleapis/gax-go/v2 v2.21.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/hack-pad/go-indexeddb v0.3.2 // indirect github.com/hack-pad/safejs v0.1.0 // indirect @@ -295,16 +295,16 @@ require ( github.com/zeebo/blake3 v0.2.3 // indirect go.mongodb.org/mongo-driver v1.17.9 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel/sdk v1.43.0 // indirect go.opentelemetry.io/otel/trace v1.43.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/image v0.33.0 // indirect - golang.org/x/text v0.34.0 // indirect - golang.org/x/tools v0.41.0 // indirect + golang.org/x/text v0.35.0 // indirect + golang.org/x/tools v0.42.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index e8759d84f..9293ce73b 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= @@ -285,10 +285,10 @@ github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI= +github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= github.com/gopacket/gopacket v1.1.1 h1:zbx9F9d6A7sWNkFKrvMBZTfGgxFoY4NgUudFVVHMfcw= github.com/gopacket/gopacket v1.1.1/go.mod h1:HavMeONEl7W9036of9LbSWoonqhH7HA1+ZRO+rMIvFs= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -664,8 +664,8 @@ go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= @@ -707,8 +707,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= -golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ= @@ -725,8 +725,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= -golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -745,11 +745,11 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= -golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= -golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -761,8 +761,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -811,8 +811,8 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= -golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -824,10 +824,10 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -839,8 +839,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= -golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -851,19 +851,19 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvY golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.257.0 h1:8Y0lzvHlZps53PEaw+G29SsQIkuKrumGWs9puiexNAA= -google.golang.org/api v0.257.0/go.mod h1:4eJrr+vbVaZSqs7vovFd1Jb/A6ml6iw2e6FBYf3GAO4= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= -google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= -google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= -google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= -google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= +google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7 h1:41r6JMbpzBMen0R/4TZeeAmGXSJC7DftGINUodzTkPI= +google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:EIQZ5bFCfRQDV4MhRle7+OgjNtZ6P1PiZBgAKuxXu/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/management/server/idp/google_workspace.go b/management/server/idp/google_workspace.go index 48e4f3000..dadbfd83e 100644 --- a/management/server/idp/google_workspace.go +++ b/management/server/idp/google_workspace.go @@ -66,14 +66,14 @@ func NewGoogleWorkspaceManager(ctx context.Context, config GoogleWorkspaceClient } // Create a new Admin SDK Directory service client - adminCredentials, err := getGoogleCredentials(ctx, config.ServiceAccountKey) + credentialsOption, err := getGoogleCredentialsOption(ctx, config.ServiceAccountKey) if err != nil { return nil, err } service, err := admin.NewService(context.Background(), option.WithScopes(admin.AdminDirectoryUserReadonlyScope), - option.WithCredentials(adminCredentials), + credentialsOption, ) if err != nil { return nil, err @@ -218,39 +218,32 @@ func (gm *GoogleWorkspaceManager) DeleteUser(_ context.Context, userID string) e return nil } -// getGoogleCredentials retrieves Google credentials based on the provided serviceAccountKey. -// It decodes the base64-encoded serviceAccountKey and attempts to obtain credentials using it. -// If that fails, it falls back to using the default Google credentials path. -// It returns the retrieved credentials or an error if unsuccessful. -func getGoogleCredentials(ctx context.Context, serviceAccountKey string) (*google.Credentials, error) { +// getGoogleCredentialsOption returns the google.golang.org/api option carrying +// Google credentials derived from the provided serviceAccountKey. +// It decodes the base64-encoded serviceAccountKey and uses it as the credentials JSON. +// If the key is empty, it falls back to the default Google credentials path. +func getGoogleCredentialsOption(ctx context.Context, serviceAccountKey string) (option.ClientOption, error) { log.WithContext(ctx).Debug("retrieving google credentials from the base64 encoded service account key") decodeKey, err := base64.StdEncoding.DecodeString(serviceAccountKey) if err != nil { return nil, fmt.Errorf("failed to decode service account key: %w", err) } - creds, err := google.CredentialsFromJSON( - context.Background(), - decodeKey, - admin.AdminDirectoryUserReadonlyScope, - ) - if err == nil { - // No need to fallback to the default Google credentials path - return creds, nil + if len(decodeKey) > 0 { + return option.WithAuthCredentialsJSON(option.ServiceAccount, decodeKey), nil } - log.WithContext(ctx).Debugf("failed to retrieve Google credentials from ServiceAccountKey: %v", err) - log.WithContext(ctx).Debug("falling back to default google credentials location") + log.WithContext(ctx).Debug("no service account key provided, falling back to default google credentials location") - creds, err = google.FindDefaultCredentials( - context.Background(), + creds, err := google.FindDefaultCredentials( + ctx, admin.AdminDirectoryUserReadonlyScope, ) if err != nil { return nil, err } - return creds, nil + return option.WithCredentials(creds), nil } // parseGoogleWorkspaceUser parse google user to UserData. From 2e0e3a3601d90a5d7a6b39dc16ebde2522fdf580 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 20 Apr 2026 17:01:01 +0900 Subject: [PATCH 323/374] [client] Replace exclusion routes with scoped default + IP_BOUND_IF on macOS (#5918) --- client/iface/udpmux/universal.go | 4 +- .../systemops/systemops_bsd_other.go | 10 + .../systemops/systemops_darwin.go | 241 ++++++++++++++++++ .../systemops/systemops_generic.go | 15 +- .../routemanager/systemops/systemops_js.go | 4 - .../routemanager/systemops/systemops_linux.go | 7 - .../systemops/systemops_nonlinux.go | 4 - .../routemanager/systemops/systemops_unix.go | 162 +++++++----- client/net/dialer_init_darwin.go | 5 + client/net/dialer_init_generic.go | 2 +- client/net/env_android.go | 24 -- .../{env_windows.go => env_bound_iface.go} | 19 +- client/net/env_generic.go | 2 +- client/net/env_mobile.go | 25 ++ client/net/listener_init_darwin.go | 5 + client/net/listener_init_generic.go | 2 +- client/net/net_darwin.go | 160 ++++++++++++ client/server/state.go | 7 +- 18 files changed, 579 insertions(+), 119 deletions(-) create mode 100644 client/internal/routemanager/systemops/systemops_bsd_other.go create mode 100644 client/internal/routemanager/systemops/systemops_darwin.go create mode 100644 client/net/dialer_init_darwin.go delete mode 100644 client/net/env_android.go rename client/net/{env_windows.go => env_bound_iface.go} (71%) create mode 100644 client/net/env_mobile.go create mode 100644 client/net/listener_init_darwin.go create mode 100644 client/net/net_darwin.go diff --git a/client/iface/udpmux/universal.go b/client/iface/udpmux/universal.go index 43bfedaaa..89a7eefb9 100644 --- a/client/iface/udpmux/universal.go +++ b/client/iface/udpmux/universal.go @@ -171,7 +171,7 @@ func (u *UDPConn) performFilterCheck(addr net.Addr) error { } if u.address.Network.Contains(a) { - log.Warnf("Address %s is part of the NetBird network %s, refusing to write", addr, u.address) + log.Warnf("address %s is part of the NetBird network %s, refusing to write", addr, u.address) return fmt.Errorf("address %s is part of the NetBird network %s, refusing to write", addr, u.address) } @@ -181,7 +181,7 @@ func (u *UDPConn) performFilterCheck(addr net.Addr) error { u.addrCache.Store(addr.String(), isRouted) if isRouted { // Extra log, as the error only shows up with ICE logging enabled - log.Infof("Address %s is part of routed network %s, refusing to write", addr, prefix) + log.Infof("address %s is part of routed network %s, refusing to write", addr, prefix) return fmt.Errorf("address %s is part of routed network %s, refusing to write", addr, prefix) } } diff --git a/client/internal/routemanager/systemops/systemops_bsd_other.go b/client/internal/routemanager/systemops/systemops_bsd_other.go new file mode 100644 index 000000000..3f09219aa --- /dev/null +++ b/client/internal/routemanager/systemops/systemops_bsd_other.go @@ -0,0 +1,10 @@ +//go:build (dragonfly || freebsd || netbsd || openbsd) && !darwin + +package systemops + +// Non-darwin BSDs don't support the IP_BOUND_IF + scoped default model. They +// always fall through to the ref-counter exclusion-route path; these stubs +// exist only so systemops_unix.go compiles. +func (r *SysOps) setupAdvancedRouting() error { return nil } +func (r *SysOps) cleanupAdvancedRouting() error { return nil } +func (r *SysOps) flushPlatformExtras() error { return nil } diff --git a/client/internal/routemanager/systemops/systemops_darwin.go b/client/internal/routemanager/systemops/systemops_darwin.go new file mode 100644 index 000000000..d6875ff95 --- /dev/null +++ b/client/internal/routemanager/systemops/systemops_darwin.go @@ -0,0 +1,241 @@ +//go:build darwin && !ios + +package systemops + +import ( + "errors" + "fmt" + "net/netip" + "os" + "time" + + "github.com/hashicorp/go-multierror" + log "github.com/sirupsen/logrus" + "golang.org/x/net/route" + "golang.org/x/sys/unix" + + nberrors "github.com/netbirdio/netbird/client/errors" + "github.com/netbirdio/netbird/client/internal/routemanager/vars" + nbnet "github.com/netbirdio/netbird/client/net" +) + +// scopedRouteBudget bounds retries for the scoped default route. Installing or +// deleting it matters enough that we're willing to spend longer waiting for the +// kernel reply than for per-prefix exclusion routes. +const scopedRouteBudget = 5 * time.Second + +// setupAdvancedRouting installs an RTF_IFSCOPE default route per address family +// pinned to the current physical egress, so IP_BOUND_IF scoped lookups can +// resolve gateway'd destinations while the VPN's split default owns the +// unscoped table. +// +// Timing note: this runs during routeManager.Init, which happens before the +// VPN interface is created and before any peer routes propagate. The initial +// mgmt / signal / relay TCP dials always fire before this runs, so those +// sockets miss the IP_BOUND_IF binding and rely on the kernel's normal route +// lookup, which at that point correctly picks the physical default. Those +// already-established TCP flows keep their originally-selected interface for +// their lifetime on Darwin because the kernel caches the egress route +// per-socket at connect time; adding the VPN's 0/1 + 128/1 split default +// afterwards does not migrate them since the original en0 default stays in +// the table. Any subsequent reconnect via nbnet.NewDialer picks up the +// populated bound-iface cache and gets IP_BOUND_IF set cleanly. +func (r *SysOps) setupAdvancedRouting() error { + // Drop any previously-cached egress interface before reinstalling. On a + // refresh, a family that no longer resolves would otherwise keep the stale + // binding, causing new sockets to scope to an interface without a matching + // scoped default. + nbnet.ClearBoundInterfaces() + + if err := r.flushScopedDefaults(); err != nil { + log.Warnf("flush residual scoped defaults: %v", err) + } + + var merr *multierror.Error + installed := 0 + + for _, unspec := range []netip.Addr{netip.IPv4Unspecified(), netip.IPv6Unspecified()} { + ok, err := r.installScopedDefaultFor(unspec) + if err != nil { + merr = multierror.Append(merr, err) + continue + } + if ok { + installed++ + } + } + + if installed == 0 && merr != nil { + return nberrors.FormatErrorOrNil(merr) + } + if merr != nil { + log.Warnf("advanced routing setup partially succeeded: %v", nberrors.FormatErrorOrNil(merr)) + } + return nil +} + +// installScopedDefaultFor resolves the physical default nexthop for the given +// address family, installs a scoped default via it, and caches the iface for +// subsequent IP_BOUND_IF / IPV6_BOUND_IF socket binds. +func (r *SysOps) installScopedDefaultFor(unspec netip.Addr) (bool, error) { + nexthop, err := GetNextHop(unspec) + if err != nil { + if errors.Is(err, vars.ErrRouteNotFound) { + return false, nil + } + return false, fmt.Errorf("get default nexthop for %s: %w", unspec, err) + } + if nexthop.Intf == nil { + return false, fmt.Errorf("unusable default nexthop for %s (no interface)", unspec) + } + + if err := r.addScopedDefault(unspec, nexthop); err != nil { + return false, fmt.Errorf("add scoped default on %s: %w", nexthop.Intf.Name, err) + } + + af := unix.AF_INET + if unspec.Is6() { + af = unix.AF_INET6 + } + nbnet.SetBoundInterface(af, nexthop.Intf) + via := "point-to-point" + if nexthop.IP.IsValid() { + via = nexthop.IP.String() + } + log.Infof("installed scoped default route via %s on %s for %s", via, nexthop.Intf.Name, afOf(unspec)) + return true, nil +} + +func (r *SysOps) cleanupAdvancedRouting() error { + nbnet.ClearBoundInterfaces() + return r.flushScopedDefaults() +} + +// flushPlatformExtras runs darwin-specific residual cleanup hooked into the +// generic FlushMarkedRoutes path, so a crashed daemon's scoped defaults get +// removed on the next boot regardless of whether a profile is brought up. +func (r *SysOps) flushPlatformExtras() error { + return r.flushScopedDefaults() +} + +// flushScopedDefaults removes any scoped default routes tagged with routeProtoFlag. +// Safe to call at startup to clear residual entries from a prior session. +func (r *SysOps) flushScopedDefaults() error { + rib, err := retryFetchRIB() + if err != nil { + return fmt.Errorf("fetch routing table: %w", err) + } + + msgs, err := route.ParseRIB(route.RIBTypeRoute, rib) + if err != nil { + return fmt.Errorf("parse routing table: %w", err) + } + + var merr *multierror.Error + removed := 0 + + for _, msg := range msgs { + rtMsg, ok := msg.(*route.RouteMessage) + if !ok { + continue + } + if rtMsg.Flags&routeProtoFlag == 0 { + continue + } + if rtMsg.Flags&unix.RTF_IFSCOPE == 0 { + continue + } + + info, err := MsgToRoute(rtMsg) + if err != nil { + log.Debugf("skip scoped flush: %v", err) + continue + } + if !info.Dst.IsValid() || info.Dst.Bits() != 0 { + continue + } + + if err := r.deleteScopedRoute(rtMsg); err != nil { + merr = multierror.Append(merr, fmt.Errorf("delete scoped default %s on index %d: %w", + info.Dst, rtMsg.Index, err)) + continue + } + removed++ + log.Debugf("flushed residual scoped default %s on index %d", info.Dst, rtMsg.Index) + } + + if removed > 0 { + log.Infof("flushed %d residual scoped default route(s)", removed) + } + return nberrors.FormatErrorOrNil(merr) +} + +func (r *SysOps) addScopedDefault(unspec netip.Addr, nexthop Nexthop) error { + return r.scopedRouteSocket(unix.RTM_ADD, unspec, nexthop) +} + +func (r *SysOps) deleteScopedRoute(rtMsg *route.RouteMessage) error { + // Preserve identifying flags from the stored route (including RTF_GATEWAY + // only if present); kernel-set bits like RTF_DONE don't belong on RTM_DELETE. + keep := unix.RTF_UP | unix.RTF_STATIC | unix.RTF_GATEWAY | unix.RTF_IFSCOPE | routeProtoFlag + del := &route.RouteMessage{ + Type: unix.RTM_DELETE, + Flags: rtMsg.Flags & keep, + Version: unix.RTM_VERSION, + Seq: r.getSeq(), + Index: rtMsg.Index, + Addrs: rtMsg.Addrs, + } + return r.writeRouteMessage(del, scopedRouteBudget) +} + +func (r *SysOps) scopedRouteSocket(action int, unspec netip.Addr, nexthop Nexthop) error { + flags := unix.RTF_UP | unix.RTF_STATIC | unix.RTF_IFSCOPE | routeProtoFlag + + msg := &route.RouteMessage{ + Type: action, + Flags: flags, + Version: unix.RTM_VERSION, + ID: uintptr(os.Getpid()), + Seq: r.getSeq(), + Index: nexthop.Intf.Index, + } + + const numAddrs = unix.RTAX_NETMASK + 1 + addrs := make([]route.Addr, numAddrs) + + dst, err := addrToRouteAddr(unspec) + if err != nil { + return fmt.Errorf("build destination: %w", err) + } + mask, err := prefixToRouteNetmask(netip.PrefixFrom(unspec, 0)) + if err != nil { + return fmt.Errorf("build netmask: %w", err) + } + addrs[unix.RTAX_DST] = dst + addrs[unix.RTAX_NETMASK] = mask + + if nexthop.IP.IsValid() { + msg.Flags |= unix.RTF_GATEWAY + gw, err := addrToRouteAddr(nexthop.IP.Unmap()) + if err != nil { + return fmt.Errorf("build gateway: %w", err) + } + addrs[unix.RTAX_GATEWAY] = gw + } else { + addrs[unix.RTAX_GATEWAY] = &route.LinkAddr{ + Index: nexthop.Intf.Index, + Name: nexthop.Intf.Name, + } + } + msg.Addrs = addrs + + return r.writeRouteMessage(msg, scopedRouteBudget) +} + +func afOf(a netip.Addr) string { + if a.Is4() { + return "IPv4" + } + return "IPv6" +} diff --git a/client/internal/routemanager/systemops/systemops_generic.go b/client/internal/routemanager/systemops/systemops_generic.go index ec219c7fe..4211eb057 100644 --- a/client/internal/routemanager/systemops/systemops_generic.go +++ b/client/internal/routemanager/systemops/systemops_generic.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/client/internal/routemanager/util" "github.com/netbirdio/netbird/client/internal/routemanager/vars" "github.com/netbirdio/netbird/client/internal/statemanager" + nbnet "github.com/netbirdio/netbird/client/net" "github.com/netbirdio/netbird/client/net/hooks" ) @@ -31,8 +32,6 @@ var splitDefaultv4_2 = netip.PrefixFrom(netip.AddrFrom4([4]byte{128}), 1) var splitDefaultv6_1 = netip.PrefixFrom(netip.IPv6Unspecified(), 1) var splitDefaultv6_2 = netip.PrefixFrom(netip.AddrFrom16([16]byte{0x80}), 1) -var ErrRoutingIsSeparate = errors.New("routing is separate") - func (r *SysOps) setupRefCounter(initAddresses []net.IP, stateManager *statemanager.Manager) error { stateManager.RegisterState(&ShutdownState{}) @@ -397,12 +396,16 @@ func ipToAddr(ip net.IP, intf *net.Interface) (netip.Addr, error) { } // IsAddrRouted checks if the candidate address would route to the vpn, in which case it returns true and the matched prefix. +// When advanced routing is active the WG socket is bound to the physical interface (fwmark on linux, +// IP_UNICAST_IF on windows, IP_BOUND_IF on darwin) and bypasses the main routing table, so the check is skipped. func IsAddrRouted(addr netip.Addr, vpnRoutes []netip.Prefix) (bool, netip.Prefix) { - localRoutes, err := hasSeparateRouting() + if nbnet.AdvancedRouting() { + return false, netip.Prefix{} + } + + localRoutes, err := GetRoutesFromTable() if err != nil { - if !errors.Is(err, ErrRoutingIsSeparate) { - log.Errorf("Failed to get routes: %v", err) - } + log.Errorf("Failed to get routes: %v", err) return false, netip.Prefix{} } diff --git a/client/internal/routemanager/systemops/systemops_js.go b/client/internal/routemanager/systemops/systemops_js.go index 808507fc9..242571b3d 100644 --- a/client/internal/routemanager/systemops/systemops_js.go +++ b/client/internal/routemanager/systemops/systemops_js.go @@ -22,10 +22,6 @@ func GetRoutesFromTable() ([]netip.Prefix, error) { return []netip.Prefix{}, nil } -func hasSeparateRouting() ([]netip.Prefix, error) { - return []netip.Prefix{}, nil -} - // GetDetailedRoutesFromTable returns empty routes for WASM. func GetDetailedRoutesFromTable() ([]DetailedRoute, error) { return []DetailedRoute{}, nil diff --git a/client/internal/routemanager/systemops/systemops_linux.go b/client/internal/routemanager/systemops/systemops_linux.go index bd10f131f..39a9fd978 100644 --- a/client/internal/routemanager/systemops/systemops_linux.go +++ b/client/internal/routemanager/systemops/systemops_linux.go @@ -894,13 +894,6 @@ func getAddressFamily(prefix netip.Prefix) int { return netlink.FAMILY_V6 } -func hasSeparateRouting() ([]netip.Prefix, error) { - if !nbnet.AdvancedRouting() { - return GetRoutesFromTable() - } - return nil, ErrRoutingIsSeparate -} - func isOpErr(err error) bool { // EAFTNOSUPPORT when ipv6 is disabled via sysctl, EOPNOTSUPP when disabled in boot options or otherwise not supported if errors.Is(err, syscall.EAFNOSUPPORT) || errors.Is(err, syscall.EOPNOTSUPP) { diff --git a/client/internal/routemanager/systemops/systemops_nonlinux.go b/client/internal/routemanager/systemops/systemops_nonlinux.go index 905a7bc12..016a62ebd 100644 --- a/client/internal/routemanager/systemops/systemops_nonlinux.go +++ b/client/internal/routemanager/systemops/systemops_nonlinux.go @@ -48,10 +48,6 @@ func EnableIPForwarding() error { return nil } -func hasSeparateRouting() ([]netip.Prefix, error) { - return GetRoutesFromTable() -} - // GetIPRules returns IP rules for debugging (not supported on non-Linux platforms) func GetIPRules() ([]IPRule, error) { log.Infof("IP rules collection is not supported on %s", runtime.GOOS) diff --git a/client/internal/routemanager/systemops/systemops_unix.go b/client/internal/routemanager/systemops/systemops_unix.go index 7089178fb..2d3f9b69a 100644 --- a/client/internal/routemanager/systemops/systemops_unix.go +++ b/client/internal/routemanager/systemops/systemops_unix.go @@ -25,6 +25,9 @@ import ( const ( envRouteProtoFlag = "NB_ROUTE_PROTO_FLAG" + + // routeBudget bounds retries for per-prefix exclusion route programming. + routeBudget = 1 * time.Second ) var routeProtoFlag int @@ -41,26 +44,42 @@ func init() { } func (r *SysOps) SetupRouting(initAddresses []net.IP, stateManager *statemanager.Manager, advancedRouting bool) error { + if advancedRouting { + return r.setupAdvancedRouting() + } + + log.Infof("Using legacy routing setup with ref counters") return r.setupRefCounter(initAddresses, stateManager) } func (r *SysOps) CleanupRouting(stateManager *statemanager.Manager, advancedRouting bool) error { + if advancedRouting { + return r.cleanupAdvancedRouting() + } + return r.cleanupRefCounter(stateManager) } // FlushMarkedRoutes removes single IP exclusion routes marked with the configured RTF_PROTO flag. +// On darwin it also flushes residual RTF_IFSCOPE scoped default routes so a +// crashed prior session can't leave crud in the table. func (r *SysOps) FlushMarkedRoutes() error { + var merr *multierror.Error + + if err := r.flushPlatformExtras(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("flush platform extras: %w", err)) + } + rib, err := retryFetchRIB() if err != nil { - return fmt.Errorf("fetch routing table: %w", err) + return nberrors.FormatErrorOrNil(multierror.Append(merr, fmt.Errorf("fetch routing table: %w", err))) } msgs, err := route.ParseRIB(route.RIBTypeRoute, rib) if err != nil { - return fmt.Errorf("parse routing table: %w", err) + return nberrors.FormatErrorOrNil(multierror.Append(merr, fmt.Errorf("parse routing table: %w", err))) } - var merr *multierror.Error flushedCount := 0 for _, msg := range msgs { @@ -117,12 +136,12 @@ func (r *SysOps) routeSocket(action int, prefix netip.Prefix, nexthop Nexthop) e return fmt.Errorf("invalid prefix: %s", prefix) } - expBackOff := backoff.NewExponentialBackOff() - expBackOff.InitialInterval = 50 * time.Millisecond - expBackOff.MaxInterval = 500 * time.Millisecond - expBackOff.MaxElapsedTime = 1 * time.Second + msg, err := r.buildRouteMessage(action, prefix, nexthop) + if err != nil { + return fmt.Errorf("build route message: %w", err) + } - if err := backoff.Retry(r.routeOp(action, prefix, nexthop), expBackOff); err != nil { + if err := r.writeRouteMessage(msg, routeBudget); err != nil { a := "add" if action == unix.RTM_DELETE { a = "remove" @@ -132,50 +151,91 @@ func (r *SysOps) routeSocket(action int, prefix netip.Prefix, nexthop Nexthop) e return nil } -func (r *SysOps) routeOp(action int, prefix netip.Prefix, nexthop Nexthop) func() error { - operation := func() error { - fd, err := unix.Socket(syscall.AF_ROUTE, syscall.SOCK_RAW, syscall.AF_UNSPEC) - if err != nil { - return fmt.Errorf("open routing socket: %w", err) +// writeRouteMessage sends a route message over AF_ROUTE and waits for the +// kernel's matching reply, retrying transient failures until budget elapses. +// Callers do not need to manage sockets or seq numbers themselves. +func (r *SysOps) writeRouteMessage(msg *route.RouteMessage, budget time.Duration) error { + expBackOff := backoff.NewExponentialBackOff() + expBackOff.InitialInterval = 50 * time.Millisecond + expBackOff.MaxInterval = 500 * time.Millisecond + expBackOff.MaxElapsedTime = budget + + return backoff.Retry(func() error { return routeMessageRoundtrip(msg) }, expBackOff) +} + +func routeMessageRoundtrip(msg *route.RouteMessage) error { + fd, err := unix.Socket(syscall.AF_ROUTE, syscall.SOCK_RAW, syscall.AF_UNSPEC) + if err != nil { + return fmt.Errorf("open routing socket: %w", err) + } + defer func() { + if err := unix.Close(fd); err != nil && !errors.Is(err, unix.EBADF) { + log.Warnf("close routing socket: %v", err) } - defer func() { - if err := unix.Close(fd); err != nil && !errors.Is(err, unix.EBADF) { - log.Warnf("failed to close routing socket: %v", err) + }() + + tv := unix.Timeval{Sec: 1} + if err := unix.SetsockoptTimeval(fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil { + return backoff.Permanent(fmt.Errorf("set recv timeout: %w", err)) + } + + // AF_ROUTE is a broadcast channel: every route socket on the host sees + // every RTM_* event. With concurrent route programming the default + // per-socket queue overflows and our own reply gets dropped. + if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, 1<<20); err != nil { + log.Debugf("set SO_RCVBUF on route socket: %v", err) + } + + bytes, err := msg.Marshal() + if err != nil { + return backoff.Permanent(fmt.Errorf("marshal: %w", err)) + } + + if _, err = unix.Write(fd, bytes); err != nil { + if errors.Is(err, unix.ENOBUFS) || errors.Is(err, unix.EAGAIN) { + return fmt.Errorf("write: %w", err) + } + return backoff.Permanent(fmt.Errorf("write: %w", err)) + } + return readRouteResponse(fd, msg.Type, msg.Seq) +} + +// readRouteResponse reads from the AF_ROUTE socket until it sees a reply +// matching our write (same type, seq, and pid). AF_ROUTE SOCK_RAW is a +// broadcast channel: interface up/down, third-party route changes and neighbor +// discovery events can all land between our write and read, so we must filter. +func readRouteResponse(fd, wantType, wantSeq int) error { + pid := int32(os.Getpid()) + resp := make([]byte, 2048) + deadline := time.Now().Add(time.Second) + for { + if time.Now().After(deadline) { + // Transient: under concurrent pressure the kernel can drop our reply + // from the socket buffer. Let backoff.Retry re-send with a fresh seq. + return fmt.Errorf("read: timeout waiting for route reply type=%d seq=%d", wantType, wantSeq) + } + n, err := unix.Read(fd, resp) + if err != nil { + if errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EWOULDBLOCK) { + // SO_RCVTIMEO fired while waiting; loop to re-check the absolute deadline. + continue } - }() - - msg, err := r.buildRouteMessage(action, prefix, nexthop) - if err != nil { - return backoff.Permanent(fmt.Errorf("build route message: %w", err)) + return backoff.Permanent(fmt.Errorf("read: %w", err)) } - - msgBytes, err := msg.Marshal() - if err != nil { - return backoff.Permanent(fmt.Errorf("marshal route message: %w", err)) + if n < int(unsafe.Sizeof(unix.RtMsghdr{})) { + continue } - - if _, err = unix.Write(fd, msgBytes); err != nil { - if errors.Is(err, unix.ENOBUFS) || errors.Is(err, unix.EAGAIN) { - return fmt.Errorf("write: %w", err) - } - return backoff.Permanent(fmt.Errorf("write: %w", err)) + hdr := (*unix.RtMsghdr)(unsafe.Pointer(&resp[0])) + // Darwin reflects the sender's pid on replies; matching (Type, Seq, Pid) + // uniquely identifies our own reply among broadcast traffic. + if int(hdr.Type) != wantType || int(hdr.Seq) != wantSeq || hdr.Pid != pid { + continue } - - respBuf := make([]byte, 2048) - n, err := unix.Read(fd, respBuf) - if err != nil { - return backoff.Permanent(fmt.Errorf("read route response: %w", err)) + if hdr.Errno != 0 { + return backoff.Permanent(fmt.Errorf("kernel: %w", syscall.Errno(hdr.Errno))) } - - if n > 0 { - if err := r.parseRouteResponse(respBuf[:n]); err != nil { - return backoff.Permanent(err) - } - } - return nil } - return operation } func (r *SysOps) buildRouteMessage(action int, prefix netip.Prefix, nexthop Nexthop) (msg *route.RouteMessage, err error) { @@ -183,6 +243,7 @@ func (r *SysOps) buildRouteMessage(action int, prefix netip.Prefix, nexthop Next Type: action, Flags: unix.RTF_UP | routeProtoFlag, Version: unix.RTM_VERSION, + ID: uintptr(os.Getpid()), Seq: r.getSeq(), } @@ -221,19 +282,6 @@ func (r *SysOps) buildRouteMessage(action int, prefix netip.Prefix, nexthop Next return msg, nil } -func (r *SysOps) parseRouteResponse(buf []byte) error { - if len(buf) < int(unsafe.Sizeof(unix.RtMsghdr{})) { - return nil - } - - rtMsg := (*unix.RtMsghdr)(unsafe.Pointer(&buf[0])) - if rtMsg.Errno != 0 { - return fmt.Errorf("parse: %d", rtMsg.Errno) - } - - return nil -} - // addrToRouteAddr converts a netip.Addr to the appropriate route.Addr (*route.Inet4Addr or *route.Inet6Addr). func addrToRouteAddr(addr netip.Addr) (route.Addr, error) { if addr.Is4() { diff --git a/client/net/dialer_init_darwin.go b/client/net/dialer_init_darwin.go new file mode 100644 index 000000000..e18909ff7 --- /dev/null +++ b/client/net/dialer_init_darwin.go @@ -0,0 +1,5 @@ +package net + +func (d *Dialer) init() { + d.Dialer.Control = applyBoundIfToSocket +} diff --git a/client/net/dialer_init_generic.go b/client/net/dialer_init_generic.go index 18ebc6ad1..78973b47d 100644 --- a/client/net/dialer_init_generic.go +++ b/client/net/dialer_init_generic.go @@ -1,4 +1,4 @@ -//go:build !linux && !windows +//go:build !linux && !windows && !darwin package net diff --git a/client/net/env_android.go b/client/net/env_android.go deleted file mode 100644 index 9d89951a1..000000000 --- a/client/net/env_android.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build android - -package net - -// Init initializes the network environment for Android -func Init() { - // No initialization needed on Android -} - -// AdvancedRouting reports whether routing loops can be avoided without using exclusion routes. -// Always returns true on Android since we cannot handle routes dynamically. -func AdvancedRouting() bool { - return true -} - -// SetVPNInterfaceName is a no-op on Android -func SetVPNInterfaceName(name string) { - // No-op on Android - not needed for Android VPN service -} - -// GetVPNInterfaceName returns empty string on Android -func GetVPNInterfaceName() string { - return "" -} diff --git a/client/net/env_windows.go b/client/net/env_bound_iface.go similarity index 71% rename from client/net/env_windows.go rename to client/net/env_bound_iface.go index 7e8868ba5..593988c2c 100644 --- a/client/net/env_windows.go +++ b/client/net/env_bound_iface.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build (darwin && !ios) || windows package net @@ -24,17 +24,22 @@ func Init() { } func checkAdvancedRoutingSupport() bool { - var err error - var legacyRouting bool + legacyRouting := false if val := os.Getenv(envUseLegacyRouting); val != "" { - legacyRouting, err = strconv.ParseBool(val) + parsed, err := strconv.ParseBool(val) if err != nil { - log.Warnf("failed to parse %s: %v", envUseLegacyRouting, err) + log.Warnf("ignoring unparsable %s=%q: %v", envUseLegacyRouting, val, err) + } else { + legacyRouting = parsed } } - if legacyRouting || netstack.IsEnabled() { - log.Info("advanced routing has been requested to be disabled") + if legacyRouting { + log.Infof("advanced routing disabled: legacy routing requested via %s", envUseLegacyRouting) + return false + } + if netstack.IsEnabled() { + log.Info("advanced routing disabled: netstack mode is enabled") return false } diff --git a/client/net/env_generic.go b/client/net/env_generic.go index f467930c3..18c10bb78 100644 --- a/client/net/env_generic.go +++ b/client/net/env_generic.go @@ -1,4 +1,4 @@ -//go:build !linux && !windows && !android +//go:build !linux && !windows && !darwin package net diff --git a/client/net/env_mobile.go b/client/net/env_mobile.go new file mode 100644 index 000000000..80b0fad8d --- /dev/null +++ b/client/net/env_mobile.go @@ -0,0 +1,25 @@ +//go:build ios || android + +package net + +// Init initializes the network environment for mobile platforms. +func Init() { + // no-op on mobile: routing scope is owned by the VPN extension. +} + +// AdvancedRouting reports whether routing loops can be avoided without using exclusion routes. +// Always returns true on mobile since routes cannot be handled dynamically and the VPN extension +// owns the routing scope. +func AdvancedRouting() bool { + return true +} + +// SetVPNInterfaceName is a no-op on mobile. +func SetVPNInterfaceName(string) { + // no-op on mobile: the VPN extension manages the interface. +} + +// GetVPNInterfaceName returns an empty string on mobile. +func GetVPNInterfaceName() string { + return "" +} diff --git a/client/net/listener_init_darwin.go b/client/net/listener_init_darwin.go new file mode 100644 index 000000000..f2fcc80ed --- /dev/null +++ b/client/net/listener_init_darwin.go @@ -0,0 +1,5 @@ +package net + +func (l *ListenerConfig) init() { + l.ListenConfig.Control = applyBoundIfToSocket +} diff --git a/client/net/listener_init_generic.go b/client/net/listener_init_generic.go index 4f8f17ab2..65a785222 100644 --- a/client/net/listener_init_generic.go +++ b/client/net/listener_init_generic.go @@ -1,4 +1,4 @@ -//go:build !linux && !windows +//go:build !linux && !windows && !darwin package net diff --git a/client/net/net_darwin.go b/client/net/net_darwin.go new file mode 100644 index 000000000..00d858a6a --- /dev/null +++ b/client/net/net_darwin.go @@ -0,0 +1,160 @@ +package net + +import ( + "fmt" + "net" + "net/netip" + "strconv" + "strings" + "sync" + "syscall" + + log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// On darwin IPV6_BOUND_IF also scopes v4-mapped egress from dual-stack +// (IPV6_V6ONLY=0) AF_INET6 sockets, so a single setsockopt on "udp6"/"tcp6" +// covers both families. Setting IP_BOUND_IF on an AF_INET6 socket returns +// EINVAL regardless of V6ONLY because the IPPROTO_IP ctloutput path is +// dispatched by socket domain (AF_INET only) not by inp_vflag. + +// boundIface holds the physical interface chosen at routing setup time. Sockets +// created via nbnet.NewDialer / nbnet.NewListener bind to it via IP_BOUND_IF +// (IPv4) or IPV6_BOUND_IF (IPv6 / dual-stack) so their scoped route lookup +// hits the RTF_IFSCOPE default installed by the routemanager, rather than +// following the VPN's split default. +var ( + boundIfaceMu sync.RWMutex + boundIface4 *net.Interface + boundIface6 *net.Interface +) + +// SetBoundInterface records the egress interface for an address family. Called +// by the routemanager after a scoped default route has been installed. +// af must be unix.AF_INET or unix.AF_INET6; other values are ignored. +// nil iface is rejected — use ClearBoundInterfaces to clear all slots. +func SetBoundInterface(af int, iface *net.Interface) { + if iface == nil { + log.Warnf("SetBoundInterface: nil iface for AF %d, ignored", af) + return + } + boundIfaceMu.Lock() + defer boundIfaceMu.Unlock() + switch af { + case unix.AF_INET: + boundIface4 = iface + case unix.AF_INET6: + boundIface6 = iface + default: + log.Warnf("SetBoundInterface: unsupported address family %d", af) + } +} + +// ClearBoundInterfaces resets the cached egress interfaces. Called by the +// routemanager during cleanup. +func ClearBoundInterfaces() { + boundIfaceMu.Lock() + defer boundIfaceMu.Unlock() + boundIface4 = nil + boundIface6 = nil +} + +// boundInterfaceFor returns the cached egress interface for a socket's address +// family, falling back to the other family if the preferred slot is empty. +// The kernel stores both IP_BOUND_IF and IPV6_BOUND_IF in inp_boundifp, so +// either setsockopt scopes the socket; preferring same-family still matters +// when v4 and v6 defaults egress different NICs. +func boundInterfaceFor(network, address string) *net.Interface { + if iface := zoneInterface(address); iface != nil { + return iface + } + + boundIfaceMu.RLock() + defer boundIfaceMu.RUnlock() + + primary, secondary := boundIface4, boundIface6 + if isV6Network(network) { + primary, secondary = boundIface6, boundIface4 + } + if primary != nil { + return primary + } + return secondary +} + +func isV6Network(network string) bool { + return strings.HasSuffix(network, "6") +} + +// zoneInterface extracts an explicit interface from an IPv6 link-local zone (e.g. fe80::1%en0). +func zoneInterface(address string) *net.Interface { + if address == "" { + return nil + } + addr, err := netip.ParseAddrPort(address) + if err != nil { + a, err := netip.ParseAddr(address) + if err != nil { + return nil + } + addr = netip.AddrPortFrom(a, 0) + } + zone := addr.Addr().Zone() + if zone == "" { + return nil + } + if iface, err := net.InterfaceByName(zone); err == nil { + return iface + } + if idx, err := strconv.Atoi(zone); err == nil { + if iface, err := net.InterfaceByIndex(idx); err == nil { + return iface + } + } + return nil +} + +func setIPv4BoundIf(fd uintptr, iface *net.Interface) error { + if err := unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_BOUND_IF, iface.Index); err != nil { + return fmt.Errorf("set IP_BOUND_IF: %w (interface: %s, index: %d)", err, iface.Name, iface.Index) + } + return nil +} + +func setIPv6BoundIf(fd uintptr, iface *net.Interface) error { + if err := unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, iface.Index); err != nil { + return fmt.Errorf("set IPV6_BOUND_IF: %w (interface: %s, index: %d)", err, iface.Name, iface.Index) + } + return nil +} + +// applyBoundIfToSocket binds the socket to the cached physical egress interface +// so scoped route lookup avoids the VPN utun and egresses the underlay directly. +func applyBoundIfToSocket(network, address string, c syscall.RawConn) error { + if !AdvancedRouting() { + return nil + } + + iface := boundInterfaceFor(network, address) + if iface == nil { + log.Debugf("no bound iface cached for %s to %s, skipping BOUND_IF", network, address) + return nil + } + + isV6 := isV6Network(network) + var controlErr error + if err := c.Control(func(fd uintptr) { + if isV6 { + controlErr = setIPv6BoundIf(fd, iface) + } else { + controlErr = setIPv4BoundIf(fd, iface) + } + if controlErr == nil { + log.Debugf("set BOUND_IF=%d on %s for %s to %s", iface.Index, iface.Name, network, address) + } + }); err != nil { + return fmt.Errorf("control: %w", err) + } + return controlErr +} diff --git a/client/server/state.go b/client/server/state.go index 8dca6bde1..f2d823465 100644 --- a/client/server/state.go +++ b/client/server/state.go @@ -12,7 +12,6 @@ import ( "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/routemanager/systemops" "github.com/netbirdio/netbird/client/internal/statemanager" - nbnet "github.com/netbirdio/netbird/client/net" "github.com/netbirdio/netbird/client/proto" ) @@ -138,10 +137,8 @@ func restoreResidualState(ctx context.Context, statePath string) error { } // clean up any remaining routes independently of the state file - if !nbnet.AdvancedRouting() { - if err := systemops.New(nil, nil).FlushMarkedRoutes(); err != nil { - merr = multierror.Append(merr, fmt.Errorf("flush marked routes: %w", err)) - } + if err := systemops.New(nil, nil).FlushMarkedRoutes(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("flush marked routes: %w", err)) } return nberrors.FormatErrorOrNil(merr) From 95213f715714177b7c9672b3f264f84c9a31cf04 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 20 Apr 2026 17:24:11 +0900 Subject: [PATCH 324/374] [client] Use Match host+exec instead of Host+Match in SSH client config (#5903) --- client/ssh/config/manager.go | 25 ++++++++++++------------- client/ssh/config/manager_test.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/client/ssh/config/manager.go b/client/ssh/config/manager.go index cc47fd2d2..6e584b2c3 100644 --- a/client/ssh/config/manager.go +++ b/client/ssh/config/manager.go @@ -187,24 +187,23 @@ func (m *Manager) buildPeerConfig(allHostPatterns []string) (string, error) { return "", fmt.Errorf("get NetBird executable path: %w", err) } - hostLine := strings.Join(deduplicatedPatterns, " ") - config := fmt.Sprintf("Host %s\n", hostLine) - config += fmt.Sprintf(" Match exec \"%s ssh detect %%h %%p\"\n", execPath) - config += " PreferredAuthentications password,publickey,keyboard-interactive\n" - config += " PasswordAuthentication yes\n" - config += " PubkeyAuthentication yes\n" - config += " BatchMode no\n" - config += fmt.Sprintf(" ProxyCommand %s ssh proxy %%h %%p\n", execPath) - config += " StrictHostKeyChecking no\n" + hostList := strings.Join(deduplicatedPatterns, ",") + config := fmt.Sprintf("Match host \"%s\" exec \"%s ssh detect %%h %%p\"\n", hostList, execPath) + config += " PreferredAuthentications password,publickey,keyboard-interactive\n" + config += " PasswordAuthentication yes\n" + config += " PubkeyAuthentication yes\n" + config += " BatchMode no\n" + config += fmt.Sprintf(" ProxyCommand %s ssh proxy %%h %%p\n", execPath) + config += " StrictHostKeyChecking no\n" if runtime.GOOS == "windows" { - config += " UserKnownHostsFile NUL\n" + config += " UserKnownHostsFile NUL\n" } else { - config += " UserKnownHostsFile /dev/null\n" + config += " UserKnownHostsFile /dev/null\n" } - config += " CheckHostIP no\n" - config += " LogLevel ERROR\n\n" + config += " CheckHostIP no\n" + config += " LogLevel ERROR\n\n" return config, nil } diff --git a/client/ssh/config/manager_test.go b/client/ssh/config/manager_test.go index dc3ad95b3..e7380c7f2 100644 --- a/client/ssh/config/manager_test.go +++ b/client/ssh/config/manager_test.go @@ -116,6 +116,37 @@ func TestManager_PeerLimit(t *testing.T) { assert.True(t, os.IsNotExist(err), "SSH config should not be created with too many peers") } +func TestManager_MatchHostFormat(t *testing.T) { + tempDir, err := os.MkdirTemp("", "netbird-ssh-config-test") + require.NoError(t, err) + defer func() { assert.NoError(t, os.RemoveAll(tempDir)) }() + + manager := &Manager{ + sshConfigDir: filepath.Join(tempDir, "ssh_config.d"), + sshConfigFile: "99-netbird.conf", + } + + peers := []PeerSSHInfo{ + {Hostname: "peer1", IP: "100.125.1.1", FQDN: "peer1.nb.internal"}, + {Hostname: "peer2", IP: "100.125.1.2", FQDN: "peer2.nb.internal"}, + } + + err = manager.SetupSSHClientConfig(peers) + require.NoError(t, err) + + configPath := filepath.Join(manager.sshConfigDir, manager.sshConfigFile) + content, err := os.ReadFile(configPath) + require.NoError(t, err) + configStr := string(content) + + // Must use "Match host" with comma-separated patterns, not a bare "Host" directive. + // A bare "Host" followed by "Match exec" is incorrect per ssh_config(5): the Host block + // ends at the next Match keyword, making it a no-op and leaving the Match exec unscoped. + assert.NotContains(t, configStr, "\nHost ", "should not use bare Host directive") + assert.Contains(t, configStr, "Match host \"100.125.1.1,peer1.nb.internal,peer1,100.125.1.2,peer2.nb.internal,peer2\"", + "should use Match host with comma-separated patterns") +} + func TestManager_ForcedSSHConfig(t *testing.T) { // Set force environment variable t.Setenv(EnvForceSSHConfig, "true") From e3611265159bd372f77ca404d32c86127e036525 Mon Sep 17 00:00:00 2001 From: Michael Uray <25169478+MichaelUray@users.noreply.github.com> Date: Mon, 20 Apr 2026 10:36:19 +0200 Subject: [PATCH 325/374] [client] Fix WGIface.Close deadlock when DNS filter hook re-enters GetDevice (#5916) WGIface.Close() took w.mu and held it across w.tun.Close(). The underlying wireguard-go device waits for its send/receive goroutines to drain before Close() returns, and some of those goroutines re-enter WGIface during shutdown. In particular, the userspace packet filter DNS hook in client/internal/dns.ServiceViaMemory.filterDNSTraffic calls s.wgInterface.GetDevice() on every packet, which also needs w.mu. With the Close-side holding the mutex, the read goroutine blocks in GetDevice and Close waits forever for that goroutine to exit: goroutine N (TestDNSPermanent_updateUpstream): WGIface.Close -> holds w.mu -> tun.Close -> sync.WaitGroup.Wait goroutine M (wireguard read routine): FilteredDevice.Read -> filterOutbound -> udpHooksDrop -> filterDNSTraffic.func1 -> WGIface.GetDevice -> sync.Mutex.Lock This surfaces as a 5 minute test timeout on the macOS Client/Unit CI job (panic: test timed out after 5m0s, running tests: TestDNSPermanent_updateUpstream). Release w.mu before calling w.tun.Close(). The other Close steps (wgProxyFactory.Free, waitUntilRemoved, Destroy) do not mutate any fields guarded by w.mu beyond what Free() already does, so the lock is not needed once the tun has started shutting down. A new unit test in iface_close_test.go uses a fake WGTunDevice to reproduce the deadlock deterministically without requiring CAP_NET_ADMIN. --- client/iface/iface.go | 11 ++- client/iface/iface_close_test.go | 113 +++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+), 2 deletions(-) create mode 100644 client/iface/iface_close_test.go diff --git a/client/iface/iface.go b/client/iface/iface.go index 9b331d68c..655dd1682 100644 --- a/client/iface/iface.go +++ b/client/iface/iface.go @@ -217,7 +217,6 @@ func (w *WGIface) RemoveAllowedIP(peerKey string, allowedIP netip.Prefix) error // Close closes the tunnel interface func (w *WGIface) Close() error { w.mu.Lock() - defer w.mu.Unlock() var result *multierror.Error @@ -225,7 +224,15 @@ func (w *WGIface) Close() error { result = multierror.Append(result, fmt.Errorf("failed to free WireGuard proxy: %w", err)) } - if err := w.tun.Close(); err != nil { + // Release w.mu before calling w.tun.Close(): the underlying + // wireguard-go device.Close() waits for its send/receive goroutines + // to drain. Some of those goroutines re-enter WGIface methods that + // take w.mu (e.g. the packet filter DNS hook calls GetDevice()), so + // holding the mutex here would deadlock the shutdown path. + tun := w.tun + w.mu.Unlock() + + if err := tun.Close(); err != nil { result = multierror.Append(result, fmt.Errorf("failed to close wireguard interface %s: %w", w.Name(), err)) } diff --git a/client/iface/iface_close_test.go b/client/iface/iface_close_test.go new file mode 100644 index 000000000..171e15d0a --- /dev/null +++ b/client/iface/iface_close_test.go @@ -0,0 +1,113 @@ +//go:build !android + +package iface + +import ( + "errors" + "sync" + "testing" + "time" + + wgdevice "golang.zx2c4.com/wireguard/device" + "golang.zx2c4.com/wireguard/tun/netstack" + + "github.com/netbirdio/netbird/client/iface/device" + "github.com/netbirdio/netbird/client/iface/udpmux" + "github.com/netbirdio/netbird/client/iface/wgaddr" + "github.com/netbirdio/netbird/client/iface/wgproxy" +) + +// fakeTunDevice implements WGTunDevice and lets the test control when +// Close() returns. It mimics the wireguard-go shutdown path, which blocks +// until its goroutines drain. Some of those goroutines (e.g. the packet +// filter DNS hook in client/internal/dns) call back into WGIface, so if +// WGIface.Close() held w.mu across tun.Close() the shutdown would +// deadlock. +type fakeTunDevice struct { + closeStarted chan struct{} + unblockClose chan struct{} +} + +func (f *fakeTunDevice) Create() (device.WGConfigurer, error) { + return nil, errors.New("not implemented") +} +func (f *fakeTunDevice) Up() (*udpmux.UniversalUDPMuxDefault, error) { + return nil, errors.New("not implemented") +} +func (f *fakeTunDevice) UpdateAddr(wgaddr.Address) error { return nil } +func (f *fakeTunDevice) WgAddress() wgaddr.Address { return wgaddr.Address{} } +func (f *fakeTunDevice) MTU() uint16 { return DefaultMTU } +func (f *fakeTunDevice) DeviceName() string { return "nb-close-test" } +func (f *fakeTunDevice) FilteredDevice() *device.FilteredDevice { return nil } +func (f *fakeTunDevice) Device() *wgdevice.Device { return nil } +func (f *fakeTunDevice) GetNet() *netstack.Net { return nil } +func (f *fakeTunDevice) GetICEBind() device.EndpointManager { return nil } + +func (f *fakeTunDevice) Close() error { + close(f.closeStarted) + <-f.unblockClose + return nil +} + +type fakeProxyFactory struct{} + +func (fakeProxyFactory) GetProxy() wgproxy.Proxy { return nil } +func (fakeProxyFactory) GetProxyPort() uint16 { return 0 } +func (fakeProxyFactory) Free() error { return nil } + +// TestWGIface_CloseReleasesMutexBeforeTunClose guards against a deadlock +// that surfaces as a macOS test-timeout in +// TestDNSPermanent_updateUpstream: WGIface.Close() used to hold w.mu +// while waiting for the wireguard-go device goroutines to finish, and +// one of those goroutines (the DNS filter hook) calls back into +// WGIface.GetDevice() which needs the same mutex. The fix is to drop +// the lock before tun.Close() returns control. +func TestWGIface_CloseReleasesMutexBeforeTunClose(t *testing.T) { + tun := &fakeTunDevice{ + closeStarted: make(chan struct{}), + unblockClose: make(chan struct{}), + } + w := &WGIface{ + tun: tun, + wgProxyFactory: fakeProxyFactory{}, + } + + closeDone := make(chan error, 1) + go func() { + closeDone <- w.Close() + }() + + select { + case <-tun.closeStarted: + case <-time.After(2 * time.Second): + close(tun.unblockClose) + t.Fatal("tun.Close() was never invoked") + } + + // Simulate the WireGuard read goroutine calling back into WGIface + // via the packet filter's DNS hook. If Close() still held w.mu + // during tun.Close(), this would block until the test timeout. + getDeviceDone := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + _ = w.GetDevice() + close(getDeviceDone) + }() + + select { + case <-getDeviceDone: + case <-time.After(2 * time.Second): + close(tun.unblockClose) + wg.Wait() + t.Fatal("GetDevice() deadlocked while WGIface.Close was closing the tun") + } + + close(tun.unblockClose) + select { + case <-closeDone: + case <-time.After(2 * time.Second): + t.Fatal("WGIface.Close() never returned after the tun was unblocked") + } +} From 7f023ce8014084385e677e67c541677336e25431 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 20 Apr 2026 11:26:30 +0200 Subject: [PATCH 326/374] [client] Android debug bundle support (#5888) Add Android debug bundle support with Troubleshoot UI --- Makefile | 2 +- client/android/client.go | 117 ++++++++++++++++++++-- client/android/platform_files.go | 1 + client/internal/connect.go | 3 + client/internal/debug/debug.go | 18 ++-- client/internal/debug/debug_android.go | 41 ++++++++ client/internal/debug/debug_nonandroid.go | 25 +++++ client/internal/engine.go | 2 + client/internal/mobile_dependency.go | 4 + 9 files changed, 191 insertions(+), 22 deletions(-) create mode 100644 client/internal/debug/debug_android.go create mode 100644 client/internal/debug/debug_nonandroid.go diff --git a/Makefile b/Makefile index 43379e115..5d52b94fa 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ GOLANGCI_LINT := $(shell pwd)/bin/golangci-lint $(GOLANGCI_LINT): @echo "Installing golangci-lint..." @mkdir -p ./bin - @GOBIN=$(shell pwd)/bin go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + @GOBIN=$(shell pwd)/bin go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@latest # Lint only changed files (fast, for pre-push) lint: $(GOLANGCI_LINT) diff --git a/client/android/client.go b/client/android/client.go index d35bf4279..37e17a363 100644 --- a/client/android/client.go +++ b/client/android/client.go @@ -8,6 +8,7 @@ import ( "os" "slices" "sync" + "time" "golang.org/x/exp/maps" @@ -15,6 +16,7 @@ import ( "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/internal/debug" "github.com/netbirdio/netbird/client/internal/dns" "github.com/netbirdio/netbird/client/internal/listener" "github.com/netbirdio/netbird/client/internal/peer" @@ -26,6 +28,7 @@ import ( "github.com/netbirdio/netbird/formatter" "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/domain" + types "github.com/netbirdio/netbird/upload-server/types" ) // ConnectionListener export internal Listener for mobile @@ -68,7 +71,30 @@ type Client struct { uiVersion string networkChangeListener listener.NetworkChangeListener + stateMu sync.RWMutex connectClient *internal.ConnectClient + config *profilemanager.Config + cacheDir string +} + +func (c *Client) setState(cfg *profilemanager.Config, cacheDir string, cc *internal.ConnectClient) { + c.stateMu.Lock() + defer c.stateMu.Unlock() + c.config = cfg + c.cacheDir = cacheDir + c.connectClient = cc +} + +func (c *Client) stateSnapshot() (*profilemanager.Config, string, *internal.ConnectClient) { + c.stateMu.RLock() + defer c.stateMu.RUnlock() + return c.config, c.cacheDir, c.connectClient +} + +func (c *Client) getConnectClient() *internal.ConnectClient { + c.stateMu.RLock() + defer c.stateMu.RUnlock() + return c.connectClient } // NewClient instantiate a new Client @@ -93,6 +119,7 @@ func (c *Client) Run(platformFiles PlatformFiles, urlOpener URLOpener, isAndroid cfgFile := platformFiles.ConfigurationFilePath() stateFile := platformFiles.StateFilePath() + cacheDir := platformFiles.CacheDir() log.Infof("Starting client with config: %s, state: %s", cfgFile, stateFile) @@ -124,8 +151,9 @@ func (c *Client) Run(platformFiles PlatformFiles, urlOpener URLOpener, isAndroid // todo do not throw error in case of cancelled context ctx = internal.CtxInitState(ctx) - c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder) - return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile) + connectClient := internal.NewConnectClient(ctx, cfg, c.recorder) + c.setState(cfg, cacheDir, connectClient) + return connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile, cacheDir) } // RunWithoutLogin we apply this type of run function when the backed has been started without UI (i.e. after reboot). @@ -135,6 +163,7 @@ func (c *Client) RunWithoutLogin(platformFiles PlatformFiles, dns *DNSList, dnsR cfgFile := platformFiles.ConfigurationFilePath() stateFile := platformFiles.StateFilePath() + cacheDir := platformFiles.CacheDir() log.Infof("Starting client without login with config: %s, state: %s", cfgFile, stateFile) @@ -157,8 +186,9 @@ func (c *Client) RunWithoutLogin(platformFiles PlatformFiles, dns *DNSList, dnsR // todo do not throw error in case of cancelled context ctx = internal.CtxInitState(ctx) - c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder) - return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile) + connectClient := internal.NewConnectClient(ctx, cfg, c.recorder) + c.setState(cfg, cacheDir, connectClient) + return connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile, cacheDir) } // Stop the internal client and free the resources @@ -173,11 +203,12 @@ func (c *Client) Stop() { } func (c *Client) RenewTun(fd int) error { - if c.connectClient == nil { + cc := c.getConnectClient() + if cc == nil { return fmt.Errorf("engine not running") } - e := c.connectClient.Engine() + e := cc.Engine() if e == nil { return fmt.Errorf("engine not initialized") } @@ -185,6 +216,73 @@ func (c *Client) RenewTun(fd int) error { return e.RenewTun(fd) } +// DebugBundle generates a debug bundle, uploads it, and returns the upload key. +// It works both with and without a running engine. +func (c *Client) DebugBundle(platformFiles PlatformFiles, anonymize bool) (string, error) { + cfg, cacheDir, cc := c.stateSnapshot() + + // If the engine hasn't been started, load config from disk + if cfg == nil { + var err error + cfg, err = profilemanager.UpdateOrCreateConfig(profilemanager.ConfigInput{ + ConfigPath: platformFiles.ConfigurationFilePath(), + }) + if err != nil { + return "", fmt.Errorf("load config: %w", err) + } + cacheDir = platformFiles.CacheDir() + } + + deps := debug.GeneratorDependencies{ + InternalConfig: cfg, + StatusRecorder: c.recorder, + TempDir: cacheDir, + } + + if cc != nil { + resp, err := cc.GetLatestSyncResponse() + if err != nil { + log.Warnf("get latest sync response: %v", err) + } + deps.SyncResponse = resp + + if e := cc.Engine(); e != nil { + if cm := e.GetClientMetrics(); cm != nil { + deps.ClientMetrics = cm + } + } + } + + bundleGenerator := debug.NewBundleGenerator( + deps, + debug.BundleConfig{ + Anonymize: anonymize, + IncludeSystemInfo: true, + }, + ) + + path, err := bundleGenerator.Generate() + if err != nil { + return "", fmt.Errorf("generate debug bundle: %w", err) + } + defer func() { + if err := os.Remove(path); err != nil { + log.Errorf("failed to remove debug bundle file: %v", err) + } + }() + + uploadCtx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + key, err := debug.UploadDebugBundle(uploadCtx, types.DefaultBundleURL, cfg.ManagementURL.String(), path) + if err != nil { + return "", fmt.Errorf("upload debug bundle: %w", err) + } + + log.Infof("debug bundle uploaded with key %s", key) + return key, nil +} + // SetTraceLogLevel configure the logger to trace level func (c *Client) SetTraceLogLevel() { log.SetLevel(log.TraceLevel) @@ -214,12 +312,13 @@ func (c *Client) PeersList() *PeerInfoArray { } func (c *Client) Networks() *NetworkArray { - if c.connectClient == nil { + cc := c.getConnectClient() + if cc == nil { log.Error("not connected") return nil } - engine := c.connectClient.Engine() + engine := cc.Engine() if engine == nil { log.Error("could not get engine") return nil @@ -300,7 +399,7 @@ func (c *Client) toggleRoute(command routeCommand) error { } func (c *Client) getRouteManager() (routemanager.Manager, error) { - client := c.connectClient + client := c.getConnectClient() if client == nil { return nil, fmt.Errorf("not connected") } diff --git a/client/android/platform_files.go b/client/android/platform_files.go index f0c369750..3be40c0bd 100644 --- a/client/android/platform_files.go +++ b/client/android/platform_files.go @@ -7,4 +7,5 @@ package android type PlatformFiles interface { ConfigurationFilePath() string StateFilePath() string + CacheDir() string } diff --git a/client/internal/connect.go b/client/internal/connect.go index bc2bd84d9..ac498f719 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -94,6 +94,7 @@ func (c *ConnectClient) RunOnAndroid( dnsAddresses []netip.AddrPort, dnsReadyListener dns.ReadyListener, stateFilePath string, + cacheDir string, ) error { // in case of non Android os these variables will be nil mobileDependency := MobileDependency{ @@ -103,6 +104,7 @@ func (c *ConnectClient) RunOnAndroid( HostDNSAddresses: dnsAddresses, DnsReadyListener: dnsReadyListener, StateFilePath: stateFilePath, + TempDir: cacheDir, } return c.run(mobileDependency, nil, "") } @@ -338,6 +340,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan log.Error(err) return wrapErr(err) } + engineConfig.TempDir = mobileDependency.TempDir relayManager := relayClient.NewManager(engineCtx, relayURLs, myPrivateKey.PublicKey().String(), engineConfig.MTU) c.statusRecorder.SetRelayMgr(relayManager) diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 6a8eae324..bddb9a69e 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -16,7 +16,6 @@ import ( "path/filepath" "runtime" "runtime/pprof" - "slices" "sort" "strings" "time" @@ -31,7 +30,6 @@ import ( "github.com/netbirdio/netbird/client/internal/updater/installer" nbstatus "github.com/netbirdio/netbird/client/status" mgmProto "github.com/netbirdio/netbird/shared/management/proto" - "github.com/netbirdio/netbird/util" ) const readmeContent = `Netbird debug bundle @@ -234,6 +232,7 @@ type BundleGenerator struct { statusRecorder *peer.Status syncResponse *mgmProto.SyncResponse logPath string + tempDir string cpuProfile []byte refreshStatus func() // Optional callback to refresh status before bundle generation clientMetrics MetricsExporter @@ -256,6 +255,7 @@ type GeneratorDependencies struct { StatusRecorder *peer.Status SyncResponse *mgmProto.SyncResponse LogPath string + TempDir string // Directory for temporary bundle zip files. If empty, os.TempDir() is used. CPUProfile []byte RefreshStatus func() // Optional callback to refresh status before bundle generation ClientMetrics MetricsExporter @@ -275,6 +275,7 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen statusRecorder: deps.StatusRecorder, syncResponse: deps.SyncResponse, logPath: deps.LogPath, + tempDir: deps.TempDir, cpuProfile: deps.CPUProfile, refreshStatus: deps.RefreshStatus, clientMetrics: deps.ClientMetrics, @@ -287,7 +288,7 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen // Generate creates a debug bundle and returns the location. func (g *BundleGenerator) Generate() (resp string, err error) { - bundlePath, err := os.CreateTemp("", "netbird.debug.*.zip") + bundlePath, err := os.CreateTemp(g.tempDir, "netbird.debug.*.zip") if err != nil { return "", fmt.Errorf("create zip file: %w", err) } @@ -373,15 +374,8 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add wg show output: %v", err) } - if g.logPath != "" && !slices.Contains(util.SpecialLogs, g.logPath) { - if err := g.addLogfile(); err != nil { - log.Errorf("failed to add log file to debug bundle: %v", err) - if err := g.trySystemdLogFallback(); err != nil { - log.Errorf("failed to add systemd logs as fallback: %v", err) - } - } - } else if err := g.trySystemdLogFallback(); err != nil { - log.Errorf("failed to add systemd logs: %v", err) + if err := g.addPlatformLog(); err != nil { + log.Errorf("failed to add logs to debug bundle: %v", err) } if err := g.addUpdateLogs(); err != nil { diff --git a/client/internal/debug/debug_android.go b/client/internal/debug/debug_android.go new file mode 100644 index 000000000..a4e2b3e98 --- /dev/null +++ b/client/internal/debug/debug_android.go @@ -0,0 +1,41 @@ +//go:build android + +package debug + +import ( + "fmt" + "io" + "os/exec" + + log "github.com/sirupsen/logrus" +) + +func (g *BundleGenerator) addPlatformLog() error { + cmd := exec.Command("/system/bin/logcat", "-d") + stdout, err := cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("logcat stdout pipe: %w", err) + } + + if err := cmd.Start(); err != nil { + return fmt.Errorf("start logcat: %w", err) + } + + var logReader io.Reader = stdout + if g.anonymize { + var pw *io.PipeWriter + logReader, pw = io.Pipe() + go anonymizeLog(stdout, pw, g.anonymizer) + } + + if err := g.addFileToZip(logReader, "logcat.txt"); err != nil { + return fmt.Errorf("add logcat to zip: %w", err) + } + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("wait logcat: %w", err) + } + + log.Debug("added logcat output to debug bundle") + return nil +} diff --git a/client/internal/debug/debug_nonandroid.go b/client/internal/debug/debug_nonandroid.go new file mode 100644 index 000000000..117238dec --- /dev/null +++ b/client/internal/debug/debug_nonandroid.go @@ -0,0 +1,25 @@ +//go:build !android + +package debug + +import ( + "slices" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/util" +) + +func (g *BundleGenerator) addPlatformLog() error { + if g.logPath != "" && !slices.Contains(util.SpecialLogs, g.logPath) { + if err := g.addLogfile(); err != nil { + log.Errorf("failed to add log file to debug bundle: %v", err) + if err := g.trySystemdLogFallback(); err != nil { + return err + } + } + } else if err := g.trySystemdLogFallback(); err != nil { + return err + } + return nil +} diff --git a/client/internal/engine.go b/client/internal/engine.go index be2d8bbf3..b49e02c6d 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -140,6 +140,7 @@ type EngineConfig struct { ProfileConfig *profilemanager.Config LogPath string + TempDir string } // EngineServices holds the external service dependencies required by the Engine. @@ -1095,6 +1096,7 @@ func (e *Engine) handleBundle(params *mgmProto.BundleParameters) (*mgmProto.JobR StatusRecorder: e.statusRecorder, SyncResponse: syncResponse, LogPath: e.config.LogPath, + TempDir: e.config.TempDir, ClientMetrics: e.clientMetrics, RefreshStatus: func() { e.RunHealthProbes(true) diff --git a/client/internal/mobile_dependency.go b/client/internal/mobile_dependency.go index 7c95e2b99..310d61a25 100644 --- a/client/internal/mobile_dependency.go +++ b/client/internal/mobile_dependency.go @@ -22,4 +22,8 @@ type MobileDependency struct { DnsManager dns.IosDnsManager FileDescriptor int32 StateFilePath string + + // TempDir is a writable directory for temporary files (e.g., debug bundle zip). + // On Android, this should be set to the app's cache directory. + TempDir string } From 3098f48b25e2a613d8f70466243d992c498e0fd2 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 20 Apr 2026 11:49:38 +0200 Subject: [PATCH 327/374] [client] fix ios network addresses mac filter (#5906) * fix(client): skip MAC address filter for network addresses on iOS iOS does not expose hardware (MAC) addresses due to Apple's privacy restrictions (since iOS 14), causing networkAddresses() to return an empty list because all interfaces are filtered out by the HardwareAddr check. Move networkAddresses() to platform-specific files so iOS can skip this filter. --- client/system/info.go | 54 ---------------------------- client/system/info_ios.go | 62 ++++++++++++++++++++++++++++++++ client/system/network_addr.go | 66 +++++++++++++++++++++++++++++++++++ 3 files changed, 128 insertions(+), 54 deletions(-) create mode 100644 client/system/network_addr.go diff --git a/client/system/info.go b/client/system/info.go index f2546cfe6..175d1f07f 100644 --- a/client/system/info.go +++ b/client/system/info.go @@ -2,7 +2,6 @@ package system import ( "context" - "net" "net/netip" "strings" @@ -145,59 +144,6 @@ func extractDeviceName(ctx context.Context, defaultName string) string { return v } -func networkAddresses() ([]NetworkAddress, error) { - interfaces, err := net.Interfaces() - if err != nil { - return nil, err - } - - var netAddresses []NetworkAddress - for _, iface := range interfaces { - if iface.Flags&net.FlagUp == 0 { - continue - } - if iface.HardwareAddr.String() == "" { - continue - } - addrs, err := iface.Addrs() - if err != nil { - continue - } - - for _, address := range addrs { - ipNet, ok := address.(*net.IPNet) - if !ok { - continue - } - - if ipNet.IP.IsLoopback() { - continue - } - - netAddr := NetworkAddress{ - NetIP: netip.MustParsePrefix(ipNet.String()), - Mac: iface.HardwareAddr.String(), - } - - if isDuplicated(netAddresses, netAddr) { - continue - } - - netAddresses = append(netAddresses, netAddr) - } - } - return netAddresses, nil -} - -func isDuplicated(addresses []NetworkAddress, addr NetworkAddress) bool { - for _, duplicated := range addresses { - if duplicated.NetIP == addr.NetIP { - return true - } - } - return false -} - // GetInfoWithChecks retrieves and parses the system information with applied checks. func GetInfoWithChecks(ctx context.Context, checks []*proto.Checks) (*Info, error) { log.Debugf("gathering system information with checks: %d", len(checks)) diff --git a/client/system/info_ios.go b/client/system/info_ios.go index 81936cf1d..ad42b1edf 100644 --- a/client/system/info_ios.go +++ b/client/system/info_ios.go @@ -2,6 +2,8 @@ package system import ( "context" + "net" + "net/netip" "runtime" log "github.com/sirupsen/logrus" @@ -42,6 +44,66 @@ func GetInfo(ctx context.Context) *Info { return gio } +// networkAddresses returns the list of network addresses on iOS. +// On iOS, hardware (MAC) addresses are not available due to Apple's privacy +// restrictions (iOS returns a fixed 02:00:00:00:00:00 placeholder), so we +// leave Mac empty to match Android's behavior. We also skip the HardwareAddr +// check that other platforms use and filter out link-local addresses as they +// are not useful for posture checks. +func networkAddresses() ([]NetworkAddress, error) { + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var netAddresses []NetworkAddress + for _, iface := range interfaces { + if iface.Flags&net.FlagUp == 0 { + continue + } + addrs, err := iface.Addrs() + if err != nil { + continue + } + + for _, address := range addrs { + netAddr, ok := toNetworkAddress(address) + if !ok { + continue + } + if isDuplicated(netAddresses, netAddr) { + continue + } + netAddresses = append(netAddresses, netAddr) + } + } + return netAddresses, nil +} + +func toNetworkAddress(address net.Addr) (NetworkAddress, bool) { + ipNet, ok := address.(*net.IPNet) + if !ok { + return NetworkAddress{}, false + } + if ipNet.IP.IsLoopback() || ipNet.IP.IsLinkLocalUnicast() || ipNet.IP.IsMulticast() { + return NetworkAddress{}, false + } + prefix, err := netip.ParsePrefix(ipNet.String()) + if err != nil { + return NetworkAddress{}, false + } + return NetworkAddress{NetIP: prefix, Mac: ""}, true +} + +func isDuplicated(addresses []NetworkAddress, addr NetworkAddress) bool { + for _, duplicated := range addresses { + if duplicated.NetIP == addr.NetIP { + return true + } + } + return false +} + // checkFileAndProcess checks if the file path exists and if a process is running at that path. func checkFileAndProcess(paths []string) ([]File, error) { return []File{}, nil diff --git a/client/system/network_addr.go b/client/system/network_addr.go new file mode 100644 index 000000000..5423cf8ad --- /dev/null +++ b/client/system/network_addr.go @@ -0,0 +1,66 @@ +//go:build !ios + +package system + +import ( + "net" + "net/netip" +) + +func networkAddresses() ([]NetworkAddress, error) { + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var netAddresses []NetworkAddress + for _, iface := range interfaces { + if iface.Flags&net.FlagUp == 0 { + continue + } + if iface.HardwareAddr.String() == "" { + continue + } + addrs, err := iface.Addrs() + if err != nil { + continue + } + + mac := iface.HardwareAddr.String() + for _, address := range addrs { + netAddr, ok := toNetworkAddress(address, mac) + if !ok { + continue + } + if isDuplicated(netAddresses, netAddr) { + continue + } + netAddresses = append(netAddresses, netAddr) + } + } + return netAddresses, nil +} + +func toNetworkAddress(address net.Addr, mac string) (NetworkAddress, bool) { + ipNet, ok := address.(*net.IPNet) + if !ok { + return NetworkAddress{}, false + } + if ipNet.IP.IsLoopback() { + return NetworkAddress{}, false + } + prefix, err := netip.ParsePrefix(ipNet.String()) + if err != nil { + return NetworkAddress{}, false + } + return NetworkAddress{NetIP: prefix, Mac: mac}, true +} + +func isDuplicated(addresses []NetworkAddress, addr NetworkAddress) bool { + for _, duplicated := range addresses { + if duplicated.NetIP == addr.NetIP { + return true + } + } + return false +} From 45d9ee52c00ff48cfa9c5a6d7919eb187d508659 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 21 Apr 2026 10:21:11 +0200 Subject: [PATCH 328/374] [self-hosted] add reverse proxy retention fields to combined YAML (#5930) --- combined/config.yaml.example | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/combined/config.yaml.example b/combined/config.yaml.example index dce658d89..af85b0477 100644 --- a/combined/config.yaml.example +++ b/combined/config.yaml.example @@ -119,6 +119,8 @@ server: # Reverse proxy settings (optional) # reverseProxy: - # trustedHTTPProxies: [] - # trustedHTTPProxiesCount: 0 - # trustedPeers: [] + # trustedHTTPProxies: [] # CIDRs of trusted reverse proxies (e.g. ["10.0.0.0/8"]) + # trustedHTTPProxiesCount: 0 # Number of trusted proxies in front of the server (alternative to trustedHTTPProxies) + # trustedPeers: [] # CIDRs of trusted peer networks (e.g. ["100.64.0.0/10"]) + # accessLogRetentionDays: 7 # Days to retain HTTP access logs. 0 (or unset) defaults to 7. Negative values disable cleanup (logs kept indefinitely). + # accessLogCleanupIntervalHours: 24 # How often (in hours) to run the access-log cleanup job. 0 (or unset) is treated as "not set" and defaults to 24 hours; cleanup remains enabled. To disable cleanup, set accessLogRetentionDays to a negative value. From 06dfa9d4a5c7035b3abb2e1407c5636d0f0115db Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Tue, 21 Apr 2026 13:59:35 +0200 Subject: [PATCH 329/374] [management] replace mailru/easyjson with netbirdio/easyjson fork (#5938) --- go.mod | 2 ++ go.sum | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 5172b1a78..1b5861a37 100644 --- a/go.mod +++ b/go.mod @@ -323,3 +323,5 @@ replace github.com/pion/ice/v4 => github.com/netbirdio/ice/v4 v4.0.0-20250908184 replace github.com/libp2p/go-netroute => github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 replace github.com/dexidp/dex => github.com/netbirdio/dex v0.244.0 + +replace github.com/mailru/easyjson => github.com/netbirdio/easyjson v0.9.0 diff --git a/go.sum b/go.sum index 9293ce73b..3772946e1 100644 --- a/go.sum +++ b/go.sum @@ -400,8 +400,6 @@ github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tA github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= @@ -449,6 +447,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/netbirdio/dex v0.244.0 h1:1GOvi8wnXYassnKGildzNqRHq0RbcfEUw7LKYpKIN7U= github.com/netbirdio/dex v0.244.0/go.mod h1:STGInJhPcAflrHmDO7vyit2kSq03PdL+8zQPoGALtcU= +github.com/netbirdio/easyjson v0.9.0 h1:6Nw2lghSVuy8RSkAYDhDv1thBVEmfVbKZnV7T7Z6Aus= +github.com/netbirdio/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 h1:TDtJKmM6Sf8uYFx/dMeqNOL90KUoRscdfpFZ3Im89uk= github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944/go.mod h1:sHA6TRxjQ6RLbnI+3R4DZo2Eseg/iKiPRfNmcuNySVQ= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51 h1:Ov4qdafATOgGMB1wbSuh+0aAHcwz9hdvB6VZjh1mVMI= From 5a89e6621bf41cb6cb0da3040ffe68ae790e3c02 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 21 Apr 2026 15:52:08 +0200 Subject: [PATCH 330/374] [client] Supress ICE signaling (#5820) * [client] Suppress ICE signaling and periodic offers in force-relay mode When NB_FORCE_RELAY is enabled, skip WorkerICE creation entirely, suppress ICE credentials in offer/answer messages, disable the periodic ICE candidate monitor, and fix isConnectedOnAllWay to only check relay status so the guard stops sending unnecessary offers. * [client] Dynamically suppress ICE based on remote peer's offer credentials Track whether the remote peer includes ICE credentials in its offers/answers. When remote stops sending ICE credentials, skip ICE listener dispatch, suppress ICE credentials in responses, and exclude ICE from the guard connectivity check. When remote resumes sending ICE credentials, re-enable all ICE behavior. * [client] Fix nil SessionID panic and force ICE teardown on relay-only transition Fix nil pointer dereference in signalOfferAnswer when SessionID is nil (relay-only offers). Close stale ICE agent immediately when remote peer stops sending ICE credentials to avoid traffic black-hole during the ICE disconnect timeout. * [client] Add relay-only fallback check when ICE is unavailable Ensure the relay connection is supported with the peer when ICE is disabled to prevent connectivity issues. * [client] Add tri-state connection status to guard for smarter ICE retry (#5828) * [client] Add tri-state connection status to guard for smarter ICE retry Refactor isConnectedOnAllWay to return a ConnStatus enum (Connected, Disconnected, PartiallyConnected) instead of a boolean. When relay is up but ICE is not (PartiallyConnected), limit ICE offers to 3 retries with exponential backoff then fall back to hourly attempts, reducing unnecessary signaling traffic. Fully disconnected peers continue to retry aggressively. External events (relay/ICE disconnect, signal/relay reconnect) reset retry state to give ICE a fresh chance. * [client] Clarify guard ICE retry state and trace log trigger Split iceRetryState.attempt into shouldRetry (pure predicate) and enterHourlyMode (explicit state transition) so the caller in reconnectLoopWithRetry reads top-to-bottom. Restore the original trace-log behavior in isConnectedOnAllWay so it only logs on full disconnection, not on the new PartiallyConnected state. * [client] Extract pure evalConnStatus and add unit tests Split isConnectedOnAllWay into a thin method that snapshots state and a pure evalConnStatus helper that takes a connStatusInputs struct, so the tri-state decision logic can be exercised without constructing full Worker or Handshaker objects. Add table-driven tests covering force-relay, ICE-unavailable and fully-available code paths, plus unit tests for iceRetryState budget/hourly transitions and reset. * [client] Improve grammar in logs and refactor ICE credential checks --- client/internal/engine.go | 2 +- client/internal/peer/conn.go | 105 ++++++--- client/internal/peer/conn_status.go | 14 ++ client/internal/peer/conn_status_eval_test.go | 201 ++++++++++++++++++ client/internal/peer/env.go | 2 +- client/internal/peer/guard/guard.go | 68 ++++-- client/internal/peer/guard/ice_retry_state.go | 61 ++++++ .../peer/guard/ice_retry_state_test.go | 103 +++++++++ client/internal/peer/guard/sr_watcher.go | 8 +- client/internal/peer/handshaker.go | 56 ++++- client/internal/peer/signaler.go | 10 +- 11 files changed, 567 insertions(+), 63 deletions(-) create mode 100644 client/internal/peer/conn_status_eval_test.go create mode 100644 client/internal/peer/guard/ice_retry_state.go create mode 100644 client/internal/peer/guard/ice_retry_state_test.go diff --git a/client/internal/engine.go b/client/internal/engine.go index b49e02c6d..09d80a87d 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -570,7 +570,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.connMgr.Start(e.ctx) e.srWatcher = guard.NewSRWatcher(e.signal, e.relayManager, e.mobileDep.IFaceDiscover, iceCfg) - e.srWatcher.Start() + e.srWatcher.Start(peer.IsForceRelayed()) e.receiveSignalEvents() e.receiveManagementEvents() diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 8d1585b3f..1e416bfe7 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -185,17 +185,20 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager) - relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() - workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) - if err != nil { - return err + forceRelay := IsForceRelayed() + if !forceRelay { + relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() + workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) + if err != nil { + return err + } + conn.workerICE = workerICE } - conn.workerICE = workerICE conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay, conn.metricsStages) conn.handshaker.AddRelayListener(conn.workerRelay.OnNewOffer) - if !isForceRelayed() { + if !forceRelay { conn.handshaker.AddICEListener(conn.workerICE.OnNewOffer) } @@ -251,7 +254,9 @@ func (conn *Conn) Close(signalToRemote bool) { conn.wgWatcherCancel() } conn.workerRelay.CloseConn() - conn.workerICE.Close() + if conn.workerICE != nil { + conn.workerICE.Close() + } if conn.wgProxyRelay != nil { err := conn.wgProxyRelay.CloseConn() @@ -294,7 +299,9 @@ func (conn *Conn) OnRemoteAnswer(answer OfferAnswer) { // OnRemoteCandidate Handles ICE connection Candidate provided by the remote peer. func (conn *Conn) OnRemoteCandidate(candidate ice.Candidate, haRoutes route.HAMap) { conn.dumpState.RemoteCandidate() - conn.workerICE.OnRemoteCandidate(candidate, haRoutes) + if conn.workerICE != nil { + conn.workerICE.OnRemoteCandidate(candidate, haRoutes) + } } // SetOnConnected sets a handler function to be triggered by Conn when a new connection to a remote peer established @@ -712,33 +719,35 @@ func (conn *Conn) evalStatus() ConnStatus { return StatusConnecting } -func (conn *Conn) isConnectedOnAllWay() (connected bool) { - // would be better to protect this with a mutex, but it could cause deadlock with Close function - +// isConnectedOnAllWay evaluates the overall connection status based on ICE and Relay transports. +// +// The result is a tri-state: +// - ConnStatusConnected: all available transports are up +// - ConnStatusPartiallyConnected: relay is up but ICE is still pending/reconnecting +// - ConnStatusDisconnected: no working transport +func (conn *Conn) isConnectedOnAllWay() (status guard.ConnStatus) { defer func() { - if !connected { + if status == guard.ConnStatusDisconnected { conn.logTraceConnState() } }() - // For JS platform: only relay connection is supported - if runtime.GOOS == "js" { - return conn.statusRelay.Get() == worker.StatusConnected + iceWorkerCreated := conn.workerICE != nil + + var iceInProgress bool + if iceWorkerCreated { + iceInProgress = conn.workerICE.InProgress() } - // For non-JS platforms: check ICE connection status - if conn.statusICE.Get() == worker.StatusDisconnected && !conn.workerICE.InProgress() { - return false - } - - // If relay is supported with peer, it must also be connected - if conn.workerRelay.IsRelayConnectionSupportedWithPeer() { - if conn.statusRelay.Get() == worker.StatusDisconnected { - return false - } - } - - return true + return evalConnStatus(connStatusInputs{ + forceRelay: IsForceRelayed(), + peerUsesRelay: conn.workerRelay.IsRelayConnectionSupportedWithPeer(), + relayConnected: conn.statusRelay.Get() == worker.StatusConnected, + remoteSupportsICE: conn.handshaker.RemoteICESupported(), + iceWorkerCreated: iceWorkerCreated, + iceStatusConnecting: conn.statusICE.Get() != worker.StatusDisconnected, + iceInProgress: iceInProgress, + }) } func (conn *Conn) enableWgWatcherIfNeeded(enabledTime time.Time) { @@ -926,3 +935,43 @@ func isController(config ConnConfig) bool { func isRosenpassEnabled(remoteRosenpassPubKey []byte) bool { return remoteRosenpassPubKey != nil } + +func evalConnStatus(in connStatusInputs) guard.ConnStatus { + // "Relay up and needed" — the peer uses relay and the transport is connected. + relayUsedAndUp := in.peerUsesRelay && in.relayConnected + + // Force-relay mode: ICE never runs. Relay is the only transport and must be up. + if in.forceRelay { + return boolToConnStatus(relayUsedAndUp) + } + + // Remote peer doesn't support ICE, or we haven't created the worker yet: + // relay is the only possible transport. + if !in.remoteSupportsICE || !in.iceWorkerCreated { + return boolToConnStatus(relayUsedAndUp) + } + + // ICE counts as "up" when the status is anything other than Disconnected, OR + // when a negotiation is currently in progress (so we don't spam offers while one is in flight). + iceUp := in.iceStatusConnecting || in.iceInProgress + + // Relay side is acceptable if the peer doesn't rely on relay, or relay is connected. + relayOK := !in.peerUsesRelay || in.relayConnected + + switch { + case iceUp && relayOK: + return guard.ConnStatusConnected + case relayUsedAndUp: + // Relay is up but ICE is down — partially connected. + return guard.ConnStatusPartiallyConnected + default: + return guard.ConnStatusDisconnected + } +} + +func boolToConnStatus(connected bool) guard.ConnStatus { + if connected { + return guard.ConnStatusConnected + } + return guard.ConnStatusDisconnected +} diff --git a/client/internal/peer/conn_status.go b/client/internal/peer/conn_status.go index 73acc5ef5..b43e245f3 100644 --- a/client/internal/peer/conn_status.go +++ b/client/internal/peer/conn_status.go @@ -13,6 +13,20 @@ const ( StatusConnected ) +// connStatusInputs is the primitive-valued snapshot of the state that drives the +// tri-state connection classification. Extracted so the decision logic can be unit-tested +// without constructing full Worker/Handshaker objects. +type connStatusInputs struct { + forceRelay bool // NB_FORCE_RELAY or JS/WASM + peerUsesRelay bool // remote peer advertises relay support AND local has relay + relayConnected bool // statusRelay reports Connected (independent of whether peer uses relay) + remoteSupportsICE bool // remote peer sent ICE credentials + iceWorkerCreated bool // local WorkerICE exists (false in force-relay mode) + iceStatusConnecting bool // statusICE is anything other than Disconnected + iceInProgress bool // a negotiation is currently in flight +} + + // ConnStatus describe the status of a peer's connection type ConnStatus int32 diff --git a/client/internal/peer/conn_status_eval_test.go b/client/internal/peer/conn_status_eval_test.go new file mode 100644 index 000000000..66393cafe --- /dev/null +++ b/client/internal/peer/conn_status_eval_test.go @@ -0,0 +1,201 @@ +package peer + +import ( + "testing" + + "github.com/netbirdio/netbird/client/internal/peer/guard" +) + +func TestEvalConnStatus_ForceRelay(t *testing.T) { + tests := []struct { + name string + in connStatusInputs + want guard.ConnStatus + }{ + { + name: "force relay, peer uses relay, relay up", + in: connStatusInputs{ + forceRelay: true, + peerUsesRelay: true, + relayConnected: true, + }, + want: guard.ConnStatusConnected, + }, + { + name: "force relay, peer uses relay, relay down", + in: connStatusInputs{ + forceRelay: true, + peerUsesRelay: true, + relayConnected: false, + }, + want: guard.ConnStatusDisconnected, + }, + { + name: "force relay, peer does NOT use relay - disconnected forever", + in: connStatusInputs{ + forceRelay: true, + peerUsesRelay: false, + relayConnected: true, + }, + want: guard.ConnStatusDisconnected, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got := evalConnStatus(tc.in); got != tc.want { + t.Fatalf("evalConnStatus = %v, want %v", got, tc.want) + } + }) + } +} + +func TestEvalConnStatus_ICEUnavailable(t *testing.T) { + tests := []struct { + name string + in connStatusInputs + want guard.ConnStatus + }{ + { + name: "remote does not support ICE, peer uses relay, relay up", + in: connStatusInputs{ + peerUsesRelay: true, + relayConnected: true, + remoteSupportsICE: false, + iceWorkerCreated: true, + }, + want: guard.ConnStatusConnected, + }, + { + name: "remote does not support ICE, peer uses relay, relay down", + in: connStatusInputs{ + peerUsesRelay: true, + relayConnected: false, + remoteSupportsICE: false, + iceWorkerCreated: true, + }, + want: guard.ConnStatusDisconnected, + }, + { + name: "ICE worker not yet created, relay up", + in: connStatusInputs{ + peerUsesRelay: true, + relayConnected: true, + remoteSupportsICE: true, + iceWorkerCreated: false, + }, + want: guard.ConnStatusConnected, + }, + { + name: "remote does not support ICE, peer does not use relay", + in: connStatusInputs{ + peerUsesRelay: false, + relayConnected: false, + remoteSupportsICE: false, + iceWorkerCreated: true, + }, + want: guard.ConnStatusDisconnected, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got := evalConnStatus(tc.in); got != tc.want { + t.Fatalf("evalConnStatus = %v, want %v", got, tc.want) + } + }) + } +} + +func TestEvalConnStatus_FullyAvailable(t *testing.T) { + base := connStatusInputs{ + remoteSupportsICE: true, + iceWorkerCreated: true, + } + + tests := []struct { + name string + mutator func(*connStatusInputs) + want guard.ConnStatus + }{ + { + name: "ICE connected, relay connected, peer uses relay", + mutator: func(in *connStatusInputs) { + in.peerUsesRelay = true + in.relayConnected = true + in.iceStatusConnecting = true + }, + want: guard.ConnStatusConnected, + }, + { + name: "ICE connected, peer does NOT use relay", + mutator: func(in *connStatusInputs) { + in.peerUsesRelay = false + in.relayConnected = false + in.iceStatusConnecting = true + }, + want: guard.ConnStatusConnected, + }, + { + name: "ICE InProgress only, peer does NOT use relay", + mutator: func(in *connStatusInputs) { + in.peerUsesRelay = false + in.iceStatusConnecting = false + in.iceInProgress = true + }, + want: guard.ConnStatusConnected, + }, + { + name: "ICE down, relay up, peer uses relay -> partial", + mutator: func(in *connStatusInputs) { + in.peerUsesRelay = true + in.relayConnected = true + in.iceStatusConnecting = false + in.iceInProgress = false + }, + want: guard.ConnStatusPartiallyConnected, + }, + { + name: "ICE down, peer does NOT use relay -> disconnected", + mutator: func(in *connStatusInputs) { + in.peerUsesRelay = false + in.relayConnected = false + in.iceStatusConnecting = false + in.iceInProgress = false + }, + want: guard.ConnStatusDisconnected, + }, + { + name: "ICE up, peer uses relay but relay down -> partial (relay required, ICE ignored)", + mutator: func(in *connStatusInputs) { + in.peerUsesRelay = true + in.relayConnected = false + in.iceStatusConnecting = true + }, + // relayOK = false (peer uses relay but it's down), iceUp = true + // first switch arm fails (relayOK false), relayUsedAndUp = false (relay down), + // falls into default: Disconnected. + want: guard.ConnStatusDisconnected, + }, + { + name: "ICE down, relay up but peer does not use relay -> disconnected", + mutator: func(in *connStatusInputs) { + in.peerUsesRelay = false + in.relayConnected = true // not actually used since peer doesn't rely on it + in.iceStatusConnecting = false + in.iceInProgress = false + }, + want: guard.ConnStatusDisconnected, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + in := base + tc.mutator(&in) + if got := evalConnStatus(in); got != tc.want { + t.Fatalf("evalConnStatus = %v, want %v (inputs: %+v)", got, tc.want, in) + } + }) + } +} diff --git a/client/internal/peer/env.go b/client/internal/peer/env.go index 7f500c410..b4ba9ad7b 100644 --- a/client/internal/peer/env.go +++ b/client/internal/peer/env.go @@ -10,7 +10,7 @@ const ( EnvKeyNBForceRelay = "NB_FORCE_RELAY" ) -func isForceRelayed() bool { +func IsForceRelayed() bool { if runtime.GOOS == "js" { return true } diff --git a/client/internal/peer/guard/guard.go b/client/internal/peer/guard/guard.go index d93403730..2e5efbcc5 100644 --- a/client/internal/peer/guard/guard.go +++ b/client/internal/peer/guard/guard.go @@ -8,7 +8,19 @@ import ( log "github.com/sirupsen/logrus" ) -type isConnectedFunc func() bool +// ConnStatus represents the connection state as seen by the guard. +type ConnStatus int + +const ( + // ConnStatusDisconnected means neither ICE nor Relay is connected. + ConnStatusDisconnected ConnStatus = iota + // ConnStatusPartiallyConnected means Relay is connected but ICE is not. + ConnStatusPartiallyConnected + // ConnStatusConnected means all required connections are established. + ConnStatusConnected +) + +type connStatusFunc func() ConnStatus // Guard is responsible for the reconnection logic. // It will trigger to send an offer to the peer then has connection issues. @@ -20,14 +32,14 @@ type isConnectedFunc func() bool // - ICE candidate changes type Guard struct { log *log.Entry - isConnectedOnAllWay isConnectedFunc + isConnectedOnAllWay connStatusFunc timeout time.Duration srWatcher *SRWatcher relayedConnDisconnected chan struct{} iCEConnDisconnected chan struct{} } -func NewGuard(log *log.Entry, isConnectedFn isConnectedFunc, timeout time.Duration, srWatcher *SRWatcher) *Guard { +func NewGuard(log *log.Entry, isConnectedFn connStatusFunc, timeout time.Duration, srWatcher *SRWatcher) *Guard { return &Guard{ log: log, isConnectedOnAllWay: isConnectedFn, @@ -57,8 +69,17 @@ func (g *Guard) SetICEConnDisconnected() { } } -// reconnectLoopWithRetry periodically check the connection status. -// Try to send offer while the P2P is not established or while the Relay is not connected if is it supported +// reconnectLoopWithRetry periodically checks the connection status and sends offers to re-establish connectivity. +// +// Behavior depends on the connection state reported by isConnectedOnAllWay: +// - Connected: no action, the peer is fully reachable. +// - Disconnected (neither ICE nor Relay): retries aggressively with exponential backoff (800ms doubling +// up to timeout), never gives up. This ensures rapid recovery when the peer has no connectivity at all. +// - PartiallyConnected (Relay up, ICE not): retries up to 3 times with exponential backoff, then switches +// to one attempt per hour. This limits signaling traffic when relay already provides connectivity. +// +// External events (relay/ICE disconnect, signal/relay reconnect, candidate changes) reset the retry +// counter and backoff ticker, giving ICE a fresh chance after network conditions change. func (g *Guard) reconnectLoopWithRetry(ctx context.Context, callback func()) { srReconnectedChan := g.srWatcher.NewListener() defer g.srWatcher.RemoveListener(srReconnectedChan) @@ -68,36 +89,47 @@ func (g *Guard) reconnectLoopWithRetry(ctx context.Context, callback func()) { tickerChannel := ticker.C + iceState := &iceRetryState{log: g.log} + defer iceState.reset() + for { select { - case t := <-tickerChannel: - if t.IsZero() { - g.log.Infof("retry timed out, stop periodic offer sending") - // after backoff timeout the ticker.C will be closed. We need to a dummy channel to avoid loop - tickerChannel = make(<-chan time.Time) - continue + case <-tickerChannel: + switch g.isConnectedOnAllWay() { + case ConnStatusConnected: + // all good, nothing to do + case ConnStatusDisconnected: + callback() + case ConnStatusPartiallyConnected: + if iceState.shouldRetry() { + callback() + } else { + iceState.enterHourlyMode() + ticker.Stop() + tickerChannel = iceState.hourlyC() + } } - if !g.isConnectedOnAllWay() { - callback() - } case <-g.relayedConnDisconnected: g.log.Debugf("Relay connection changed, reset reconnection ticker") ticker.Stop() - ticker = g.prepareExponentTicker(ctx) + ticker = g.newReconnectTicker(ctx) tickerChannel = ticker.C + iceState.reset() case <-g.iCEConnDisconnected: g.log.Debugf("ICE connection changed, reset reconnection ticker") ticker.Stop() - ticker = g.prepareExponentTicker(ctx) + ticker = g.newReconnectTicker(ctx) tickerChannel = ticker.C + iceState.reset() case <-srReconnectedChan: g.log.Debugf("has network changes, reset reconnection ticker") ticker.Stop() - ticker = g.prepareExponentTicker(ctx) + ticker = g.newReconnectTicker(ctx) tickerChannel = ticker.C + iceState.reset() case <-ctx.Done(): g.log.Debugf("context is done, stop reconnect loop") @@ -120,7 +152,7 @@ func (g *Guard) initialTicker(ctx context.Context) *backoff.Ticker { return backoff.NewTicker(bo) } -func (g *Guard) prepareExponentTicker(ctx context.Context) *backoff.Ticker { +func (g *Guard) newReconnectTicker(ctx context.Context) *backoff.Ticker { bo := backoff.WithContext(&backoff.ExponentialBackOff{ InitialInterval: 800 * time.Millisecond, RandomizationFactor: 0.1, diff --git a/client/internal/peer/guard/ice_retry_state.go b/client/internal/peer/guard/ice_retry_state.go new file mode 100644 index 000000000..01dc1bf2d --- /dev/null +++ b/client/internal/peer/guard/ice_retry_state.go @@ -0,0 +1,61 @@ +package guard + +import ( + "time" + + log "github.com/sirupsen/logrus" +) + +const ( + // maxICERetries is the maximum number of ICE offer attempts when relay is connected + maxICERetries = 3 + // iceRetryInterval is the periodic retry interval after ICE retries are exhausted + iceRetryInterval = 1 * time.Hour +) + +// iceRetryState tracks the limited ICE retry attempts when relay is already connected. +// After maxICERetries attempts it switches to a periodic hourly retry. +type iceRetryState struct { + log *log.Entry + retries int + hourly *time.Ticker +} + +func (s *iceRetryState) reset() { + s.retries = 0 + if s.hourly != nil { + s.hourly.Stop() + s.hourly = nil + } +} + +// shouldRetry reports whether the caller should send another ICE offer on this tick. +// Returns false when the per-cycle retry budget is exhausted and the caller must switch +// to the hourly ticker via enterHourlyMode + hourlyC. +func (s *iceRetryState) shouldRetry() bool { + if s.hourly != nil { + s.log.Debugf("hourly ICE retry attempt") + return true + } + + s.retries++ + if s.retries <= maxICERetries { + s.log.Debugf("ICE retry attempt %d/%d", s.retries, maxICERetries) + return true + } + + return false +} + +// enterHourlyMode starts the hourly retry ticker. Must be called after shouldRetry returns false. +func (s *iceRetryState) enterHourlyMode() { + s.log.Infof("ICE retries exhausted (%d/%d), switching to hourly retry", maxICERetries, maxICERetries) + s.hourly = time.NewTicker(iceRetryInterval) +} + +func (s *iceRetryState) hourlyC() <-chan time.Time { + if s.hourly == nil { + return nil + } + return s.hourly.C +} diff --git a/client/internal/peer/guard/ice_retry_state_test.go b/client/internal/peer/guard/ice_retry_state_test.go new file mode 100644 index 000000000..6a5b5a76f --- /dev/null +++ b/client/internal/peer/guard/ice_retry_state_test.go @@ -0,0 +1,103 @@ +package guard + +import ( + "testing" + + log "github.com/sirupsen/logrus" +) + +func newTestRetryState() *iceRetryState { + return &iceRetryState{log: log.NewEntry(log.StandardLogger())} +} + +func TestICERetryState_AllowsInitialBudget(t *testing.T) { + s := newTestRetryState() + + for i := 1; i <= maxICERetries; i++ { + if !s.shouldRetry() { + t.Fatalf("shouldRetry returned false on attempt %d, want true (budget = %d)", i, maxICERetries) + } + } +} + +func TestICERetryState_ExhaustsAfterBudget(t *testing.T) { + s := newTestRetryState() + + for i := 0; i < maxICERetries; i++ { + _ = s.shouldRetry() + } + + if s.shouldRetry() { + t.Fatalf("shouldRetry returned true after budget exhausted, want false") + } +} + +func TestICERetryState_HourlyCNilBeforeEnterHourlyMode(t *testing.T) { + s := newTestRetryState() + + if s.hourlyC() != nil { + t.Fatalf("hourlyC returned non-nil channel before enterHourlyMode") + } +} + +func TestICERetryState_EnterHourlyModeArmsTicker(t *testing.T) { + s := newTestRetryState() + for i := 0; i < maxICERetries+1; i++ { + _ = s.shouldRetry() + } + + s.enterHourlyMode() + defer s.reset() + + if s.hourlyC() == nil { + t.Fatalf("hourlyC returned nil after enterHourlyMode") + } +} + +func TestICERetryState_ShouldRetryTrueInHourlyMode(t *testing.T) { + s := newTestRetryState() + s.enterHourlyMode() + defer s.reset() + + if !s.shouldRetry() { + t.Fatalf("shouldRetry returned false in hourly mode, want true") + } + + // Subsequent calls also return true — we keep retrying on each hourly tick. + if !s.shouldRetry() { + t.Fatalf("second shouldRetry returned false in hourly mode, want true") + } +} + +func TestICERetryState_ResetRestoresBudget(t *testing.T) { + s := newTestRetryState() + for i := 0; i < maxICERetries+1; i++ { + _ = s.shouldRetry() + } + s.enterHourlyMode() + + s.reset() + + if s.hourlyC() != nil { + t.Fatalf("hourlyC returned non-nil channel after reset") + } + if s.retries != 0 { + t.Fatalf("retries = %d after reset, want 0", s.retries) + } + + for i := 1; i <= maxICERetries; i++ { + if !s.shouldRetry() { + t.Fatalf("shouldRetry returned false on attempt %d after reset, want true", i) + } + } +} + +func TestICERetryState_ResetIsIdempotent(t *testing.T) { + s := newTestRetryState() + s.reset() + s.reset() // second call must not panic or re-stop a nil ticker + + if s.hourlyC() != nil { + t.Fatalf("hourlyC non-nil after double reset") + } +} diff --git a/client/internal/peer/guard/sr_watcher.go b/client/internal/peer/guard/sr_watcher.go index 6f4f5ad4f..0befd7438 100644 --- a/client/internal/peer/guard/sr_watcher.go +++ b/client/internal/peer/guard/sr_watcher.go @@ -39,7 +39,7 @@ func NewSRWatcher(signalClient chNotifier, relayManager chNotifier, iFaceDiscove return srw } -func (w *SRWatcher) Start() { +func (w *SRWatcher) Start(disableICEMonitor bool) { w.mu.Lock() defer w.mu.Unlock() @@ -50,8 +50,10 @@ func (w *SRWatcher) Start() { ctx, cancel := context.WithCancel(context.Background()) w.cancelIceMonitor = cancel - iceMonitor := NewICEMonitor(w.iFaceDiscover, w.iceConfig, GetICEMonitorPeriod()) - go iceMonitor.Start(ctx, w.onICEChanged) + if !disableICEMonitor { + iceMonitor := NewICEMonitor(w.iFaceDiscover, w.iceConfig, GetICEMonitorPeriod()) + go iceMonitor.Start(ctx, w.onICEChanged) + } w.signalClient.SetOnReconnectedListener(w.onReconnected) w.relayManager.SetOnReconnectedListener(w.onReconnected) diff --git a/client/internal/peer/handshaker.go b/client/internal/peer/handshaker.go index 9b50cecd1..741dfce60 100644 --- a/client/internal/peer/handshaker.go +++ b/client/internal/peer/handshaker.go @@ -4,6 +4,7 @@ import ( "context" "errors" "sync" + "sync/atomic" log "github.com/sirupsen/logrus" @@ -43,6 +44,10 @@ type OfferAnswer struct { SessionID *ICESessionID } +func (o *OfferAnswer) hasICECredentials() bool { + return o.IceCredentials.UFrag != "" && o.IceCredentials.Pwd != "" +} + type Handshaker struct { mu sync.Mutex log *log.Entry @@ -59,6 +64,10 @@ type Handshaker struct { relayListener *AsyncOfferListener iceListener func(remoteOfferAnswer *OfferAnswer) + // remoteICESupported tracks whether the remote peer includes ICE credentials in its offers/answers. + // When false, the local side skips ICE listener dispatch and suppresses ICE credentials in responses. + remoteICESupported atomic.Bool + // remoteOffersCh is a channel used to wait for remote credentials to proceed with the connection remoteOffersCh chan OfferAnswer // remoteAnswerCh is a channel used to wait for remote credentials answer (confirmation of our offer) to proceed with the connection @@ -66,7 +75,7 @@ type Handshaker struct { } func NewHandshaker(log *log.Entry, config ConnConfig, signaler *Signaler, ice *WorkerICE, relay *WorkerRelay, metricsStages *MetricsStages) *Handshaker { - return &Handshaker{ + h := &Handshaker{ log: log, config: config, signaler: signaler, @@ -76,6 +85,13 @@ func NewHandshaker(log *log.Entry, config ConnConfig, signaler *Signaler, ice *W remoteOffersCh: make(chan OfferAnswer), remoteAnswerCh: make(chan OfferAnswer), } + // assume remote supports ICE until we learn otherwise from received offers + h.remoteICESupported.Store(ice != nil) + return h +} + +func (h *Handshaker) RemoteICESupported() bool { + return h.remoteICESupported.Load() } func (h *Handshaker) AddRelayListener(offer func(remoteOfferAnswer *OfferAnswer)) { @@ -90,18 +106,20 @@ func (h *Handshaker) Listen(ctx context.Context) { for { select { case remoteOfferAnswer := <-h.remoteOffersCh: - h.log.Infof("received offer, running version %s, remote WireGuard listen port %d, session id: %s", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString()) + h.log.Infof("received offer, running version %s, remote WireGuard listen port %d, session id: %s, remote ICE supported: %t", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString(), remoteOfferAnswer.hasICECredentials()) // Record signaling received for reconnection attempts if h.metricsStages != nil { h.metricsStages.RecordSignalingReceived() } + h.updateRemoteICEState(&remoteOfferAnswer) + if h.relayListener != nil { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil { + if h.iceListener != nil && h.RemoteICESupported() { h.iceListener(&remoteOfferAnswer) } @@ -110,18 +128,20 @@ func (h *Handshaker) Listen(ctx context.Context) { continue } case remoteOfferAnswer := <-h.remoteAnswerCh: - h.log.Infof("received answer, running version %s, remote WireGuard listen port %d, session id: %s", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString()) + h.log.Infof("received answer, running version %s, remote WireGuard listen port %d, session id: %s, remote ICE supported: %t", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString(), remoteOfferAnswer.hasICECredentials()) // Record signaling received for reconnection attempts if h.metricsStages != nil { h.metricsStages.RecordSignalingReceived() } + h.updateRemoteICEState(&remoteOfferAnswer) + if h.relayListener != nil { h.relayListener.Notify(&remoteOfferAnswer) } - if h.iceListener != nil { + if h.iceListener != nil && h.RemoteICESupported() { h.iceListener(&remoteOfferAnswer) } case <-ctx.Done(): @@ -183,15 +203,18 @@ func (h *Handshaker) sendAnswer() error { } func (h *Handshaker) buildOfferAnswer() OfferAnswer { - uFrag, pwd := h.ice.GetLocalUserCredentials() - sid := h.ice.SessionID() answer := OfferAnswer{ - IceCredentials: IceCredentials{uFrag, pwd}, WgListenPort: h.config.LocalWgPort, Version: version.NetbirdVersion(), RosenpassPubKey: h.config.RosenpassConfig.PubKey, RosenpassAddr: h.config.RosenpassConfig.Addr, - SessionID: &sid, + } + + if h.ice != nil && h.RemoteICESupported() { + uFrag, pwd := h.ice.GetLocalUserCredentials() + sid := h.ice.SessionID() + answer.IceCredentials = IceCredentials{uFrag, pwd} + answer.SessionID = &sid } if addr, err := h.relay.RelayInstanceAddress(); err == nil { @@ -200,3 +223,18 @@ func (h *Handshaker) buildOfferAnswer() OfferAnswer { return answer } + +func (h *Handshaker) updateRemoteICEState(offer *OfferAnswer) { + hasICE := offer.hasICECredentials() + prev := h.remoteICESupported.Swap(hasICE) + if prev != hasICE { + if hasICE { + h.log.Infof("remote peer started sending ICE credentials") + } else { + h.log.Infof("remote peer stopped sending ICE credentials") + if h.ice != nil { + h.ice.Close() + } + } + } +} diff --git a/client/internal/peer/signaler.go b/client/internal/peer/signaler.go index b28906625..f6eb87cca 100644 --- a/client/internal/peer/signaler.go +++ b/client/internal/peer/signaler.go @@ -46,9 +46,13 @@ func (s *Signaler) Ready() bool { // SignalOfferAnswer signals either an offer or an answer to remote peer func (s *Signaler) signalOfferAnswer(offerAnswer OfferAnswer, remoteKey string, bodyType sProto.Body_Type) error { - sessionIDBytes, err := offerAnswer.SessionID.Bytes() - if err != nil { - log.Warnf("failed to get session ID bytes: %v", err) + var sessionIDBytes []byte + if offerAnswer.SessionID != nil { + var err error + sessionIDBytes, err = offerAnswer.SessionID.Bytes() + if err != nil { + log.Warnf("failed to get session ID bytes: %v", err) + } } msg, err := signal.MarshalCredential( s.wgPrivateKey, From 75e408f51cb54f82c42d4c04d65f3a38e42c433a Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 22 Apr 2026 00:56:56 +0900 Subject: [PATCH 331/374] [client] Prefer systemd-resolved stub over file mode regardless of resolv.conf header (#5935) --- client/internal/dns/file_parser_unix.go | 1 + client/internal/dns/host_unix.go | 134 ++++++++++++++++++------ client/internal/dns/host_unix_test.go | 76 ++++++++++++++ 3 files changed, 180 insertions(+), 31 deletions(-) create mode 100644 client/internal/dns/host_unix_test.go diff --git a/client/internal/dns/file_parser_unix.go b/client/internal/dns/file_parser_unix.go index 8dacb4e51..50ba74c0c 100644 --- a/client/internal/dns/file_parser_unix.go +++ b/client/internal/dns/file_parser_unix.go @@ -13,6 +13,7 @@ import ( const ( defaultResolvConfPath = "/etc/resolv.conf" + nsswitchConfPath = "/etc/nsswitch.conf" ) type resolvConf struct { diff --git a/client/internal/dns/host_unix.go b/client/internal/dns/host_unix.go index 422fed4e5..d7301d725 100644 --- a/client/internal/dns/host_unix.go +++ b/client/internal/dns/host_unix.go @@ -46,12 +46,12 @@ type restoreHostManager interface { } func newHostManager(wgInterface string) (hostManager, error) { - osManager, err := getOSDNSManagerType() + osManager, reason, err := getOSDNSManagerType() if err != nil { return nil, fmt.Errorf("get os dns manager type: %w", err) } - log.Infof("System DNS manager discovered: %s", osManager) + log.Infof("System DNS manager discovered: %s (%s)", osManager, reason) mgr, err := newHostManagerFromType(wgInterface, osManager) // need to explicitly return nil mgr on error to avoid returning a non-nil interface containing a nil value if err != nil { @@ -74,17 +74,49 @@ func newHostManagerFromType(wgInterface string, osManager osManagerType) (restor } } -func getOSDNSManagerType() (osManagerType, error) { +func getOSDNSManagerType() (osManagerType, string, error) { + resolved := isSystemdResolvedRunning() + nss := isLibnssResolveUsed() + stub := checkStub() + + // Prefer systemd-resolved whenever it owns libc resolution, regardless of + // who wrote /etc/resolv.conf. File-mode rewrites do not affect lookups + // that go through nss-resolve, and in foreign mode they can loop back + // through resolved as an upstream. + if resolved && (nss || stub) { + return systemdManager, fmt.Sprintf("systemd-resolved active (nss-resolve=%t, stub=%t)", nss, stub), nil + } + + mgr, reason, rejected, err := scanResolvConfHeader() + if err != nil { + return 0, "", err + } + if reason != "" { + return mgr, reason, nil + } + + fallback := fmt.Sprintf("no manager matched (resolved=%t, nss-resolve=%t, stub=%t)", resolved, nss, stub) + if len(rejected) > 0 { + fallback += "; rejected: " + strings.Join(rejected, ", ") + } + return fileManager, fallback, nil +} + +// scanResolvConfHeader walks /etc/resolv.conf header comments and returns the +// matching manager. If reason is empty the caller should pick file mode and +// use rejected for diagnostics. +func scanResolvConfHeader() (osManagerType, string, []string, error) { file, err := os.Open(defaultResolvConfPath) if err != nil { - return 0, fmt.Errorf("unable to open %s for checking owner, got error: %w", defaultResolvConfPath, err) + return 0, "", nil, fmt.Errorf("unable to open %s for checking owner, got error: %w", defaultResolvConfPath, err) } defer func() { - if err := file.Close(); err != nil { - log.Errorf("close file %s: %s", defaultResolvConfPath, err) + if cerr := file.Close(); cerr != nil { + log.Errorf("close file %s: %s", defaultResolvConfPath, cerr) } }() + var rejected []string scanner := bufio.NewScanner(file) for scanner.Scan() { text := scanner.Text() @@ -92,41 +124,48 @@ func getOSDNSManagerType() (osManagerType, error) { continue } if text[0] != '#' { - return fileManager, nil + break } - if strings.Contains(text, fileGeneratedResolvConfContentHeader) { - return netbirdManager, nil - } - if strings.Contains(text, "NetworkManager") && isDbusListenerRunning(networkManagerDest, networkManagerDbusObjectNode) && isNetworkManagerSupported() { - return networkManager, nil - } - if strings.Contains(text, "systemd-resolved") && isSystemdResolvedRunning() { - if checkStub() { - return systemdManager, nil - } else { - return fileManager, nil - } - } - if strings.Contains(text, "resolvconf") { - if isSystemdResolveConfMode() { - return systemdManager, nil - } - - return resolvConfManager, nil + if mgr, reason, rej := matchResolvConfHeader(text); reason != "" { + return mgr, reason, nil, nil + } else if rej != "" { + rejected = append(rejected, rej) } } if err := scanner.Err(); err != nil && err != io.EOF { - return 0, fmt.Errorf("scan: %w", err) + return 0, "", nil, fmt.Errorf("scan: %w", err) } - - return fileManager, nil + return 0, "", rejected, nil } -// checkStub checks if the stub resolver is disabled in systemd-resolved. If it is disabled, we fall back to file manager. +// matchResolvConfHeader inspects a single comment line. Returns either a +// definitive (manager, reason) or a non-empty rejected diagnostic. +func matchResolvConfHeader(text string) (osManagerType, string, string) { + if strings.Contains(text, fileGeneratedResolvConfContentHeader) { + return netbirdManager, "netbird-managed resolv.conf header detected", "" + } + if strings.Contains(text, "NetworkManager") { + if isDbusListenerRunning(networkManagerDest, networkManagerDbusObjectNode) && isNetworkManagerSupported() { + return networkManager, "NetworkManager header + supported version on dbus", "" + } + return 0, "", "NetworkManager header (no dbus or unsupported version)" + } + if strings.Contains(text, "resolvconf") { + if isSystemdResolveConfMode() { + return systemdManager, "resolvconf header in systemd-resolved compatibility mode", "" + } + return resolvConfManager, "resolvconf header detected", "" + } + return 0, "", "" +} + +// checkStub reports whether systemd-resolved's stub (127.0.0.53) is listed +// in /etc/resolv.conf. On parse failure we assume it is, to avoid dropping +// into file mode while resolved is active. func checkStub() bool { rConf, err := parseDefaultResolvConf() if err != nil { - log.Warnf("failed to parse resolv conf: %s", err) + log.Warnf("failed to parse resolv conf, assuming stub is active: %s", err) return true } @@ -139,3 +178,36 @@ func checkStub() bool { return false } + +// isLibnssResolveUsed reports whether nss-resolve is listed before dns on +// the hosts: line of /etc/nsswitch.conf. When it is, libc lookups are +// delegated to systemd-resolved regardless of /etc/resolv.conf. +func isLibnssResolveUsed() bool { + bs, err := os.ReadFile(nsswitchConfPath) + if err != nil { + log.Debugf("read %s: %v", nsswitchConfPath, err) + return false + } + return parseNsswitchResolveAhead(bs) +} + +func parseNsswitchResolveAhead(data []byte) bool { + for _, line := range strings.Split(string(data), "\n") { + if i := strings.IndexByte(line, '#'); i >= 0 { + line = line[:i] + } + fields := strings.Fields(line) + if len(fields) < 2 || fields[0] != "hosts:" { + continue + } + for _, module := range fields[1:] { + switch module { + case "dns": + return false + case "resolve": + return true + } + } + } + return false +} diff --git a/client/internal/dns/host_unix_test.go b/client/internal/dns/host_unix_test.go new file mode 100644 index 000000000..e936281d3 --- /dev/null +++ b/client/internal/dns/host_unix_test.go @@ -0,0 +1,76 @@ +//go:build (linux && !android) || freebsd + +package dns + +import "testing" + +func TestParseNsswitchResolveAhead(t *testing.T) { + tests := []struct { + name string + in string + want bool + }{ + { + name: "resolve before dns with action token", + in: "hosts: mymachines resolve [!UNAVAIL=return] files myhostname dns\n", + want: true, + }, + { + name: "dns before resolve", + in: "hosts: files mdns4_minimal [NOTFOUND=return] dns resolve\n", + want: false, + }, + { + name: "debian default with only dns", + in: "hosts: files mdns4_minimal [NOTFOUND=return] dns mymachines\n", + want: false, + }, + { + name: "neither resolve nor dns", + in: "hosts: files myhostname\n", + want: false, + }, + { + name: "no hosts line", + in: "passwd: files systemd\ngroup: files systemd\n", + want: false, + }, + { + name: "empty", + in: "", + want: false, + }, + { + name: "comments and blank lines ignored", + in: "# comment\n\n# another\nhosts: resolve dns\n", + want: true, + }, + { + name: "trailing inline comment", + in: "hosts: resolve [!UNAVAIL=return] dns # fallback\n", + want: true, + }, + { + name: "hosts token must be the first field", + in: " hosts: resolve dns\n", + want: true, + }, + { + name: "other db line mentioning resolve is ignored", + in: "networks: resolve\nhosts: dns\n", + want: false, + }, + { + name: "only resolve, no dns", + in: "hosts: files resolve\n", + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := parseNsswitchResolveAhead([]byte(tt.in)); got != tt.want { + t.Errorf("parseNsswitchResolveAhead() = %v, want %v", got, tt.want) + } + }) + } +} From 064ec1c83226d4a12fd8b73f07597d029e06af05 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 22 Apr 2026 00:57:16 +0900 Subject: [PATCH 332/374] [client] Trust wg interface in firewalld to bypass owner-flagged chains (#5928) --- client/firewall/firewalld/firewalld.go | 11 + client/firewall/firewalld/firewalld_linux.go | 260 ++++++++++++++++++ .../firewalld/firewalld_linux_test.go | 49 ++++ client/firewall/firewalld/firewalld_other.go | 25 ++ client/firewall/iptables/manager_linux.go | 18 ++ client/firewall/nftables/manager_linux.go | 5 + client/firewall/nftables/router_linux.go | 18 ++ client/firewall/uspfilter/allow_netbird.go | 9 + client/firewall/uspfilter/common/iface.go | 1 + client/firewall/uspfilter/filter_test.go | 8 + client/internal/engine.go | 3 + 11 files changed, 407 insertions(+) create mode 100644 client/firewall/firewalld/firewalld.go create mode 100644 client/firewall/firewalld/firewalld_linux.go create mode 100644 client/firewall/firewalld/firewalld_linux_test.go create mode 100644 client/firewall/firewalld/firewalld_other.go diff --git a/client/firewall/firewalld/firewalld.go b/client/firewall/firewalld/firewalld.go new file mode 100644 index 000000000..188ea61dd --- /dev/null +++ b/client/firewall/firewalld/firewalld.go @@ -0,0 +1,11 @@ +// Package firewalld integrates with the firewalld daemon so NetBird can place +// its wg interface into firewalld's "trusted" zone. This is required because +// firewalld's nftables chains are created with NFT_CHAIN_OWNER on recent +// versions, which returns EPERM to any other process that tries to insert +// rules into them. The workaround mirrors what Tailscale does: let firewalld +// itself add the accept rules to its own chains by trusting the interface. +package firewalld + +// TrustedZone is the firewalld zone name used for interfaces whose traffic +// should bypass firewalld filtering. +const TrustedZone = "trusted" diff --git a/client/firewall/firewalld/firewalld_linux.go b/client/firewall/firewalld/firewalld_linux.go new file mode 100644 index 000000000..924a04b0a --- /dev/null +++ b/client/firewall/firewalld/firewalld_linux.go @@ -0,0 +1,260 @@ +//go:build linux + +package firewalld + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strings" + "sync" + "time" + + "github.com/godbus/dbus/v5" + log "github.com/sirupsen/logrus" +) + +const ( + dbusDest = "org.fedoraproject.FirewallD1" + dbusPath = "/org/fedoraproject/FirewallD1" + dbusRootIface = "org.fedoraproject.FirewallD1" + dbusZoneIface = "org.fedoraproject.FirewallD1.zone" + + errZoneAlreadySet = "ZONE_ALREADY_SET" + errAlreadyEnabled = "ALREADY_ENABLED" + errUnknownIface = "UNKNOWN_INTERFACE" + errNotEnabled = "NOT_ENABLED" + + // callTimeout bounds each individual DBus or firewall-cmd invocation. + // A fresh context is created for each call so a slow DBus probe can't + // exhaust the deadline before the firewall-cmd fallback gets to run. + callTimeout = 3 * time.Second +) + +var ( + errDBusUnavailable = errors.New("firewalld dbus unavailable") + + // trustLogOnce ensures the "added to trusted zone" message is logged at + // Info level only for the first successful add per process; repeat adds + // from other init paths are quieter. + trustLogOnce sync.Once + + parentCtxMu sync.RWMutex + parentCtx context.Context = context.Background() +) + +// SetParentContext installs a parent context whose cancellation aborts any +// in-flight TrustInterface call. It does not affect UntrustInterface, which +// always uses a fresh Background-rooted timeout so cleanup can still run +// during engine shutdown when the engine context is already cancelled. +func SetParentContext(ctx context.Context) { + parentCtxMu.Lock() + parentCtx = ctx + parentCtxMu.Unlock() +} + +func getParentContext() context.Context { + parentCtxMu.RLock() + defer parentCtxMu.RUnlock() + return parentCtx +} + +// TrustInterface places iface into firewalld's trusted zone if firewalld is +// running. It is idempotent and best-effort: errors are returned so callers +// can log, but a non-running firewalld is not an error. Only the first +// successful call per process logs at Info. Respects the parent context set +// via SetParentContext so startup-time cancellation unblocks it. +func TrustInterface(iface string) error { + parent := getParentContext() + if !isRunning(parent) { + return nil + } + if err := addTrusted(parent, iface); err != nil { + return fmt.Errorf("add %s to firewalld trusted zone: %w", iface, err) + } + trustLogOnce.Do(func() { + log.Infof("added %s to firewalld trusted zone", iface) + }) + log.Debugf("firewalld: ensured %s is in trusted zone", iface) + return nil +} + +// UntrustInterface removes iface from firewalld's trusted zone if firewalld +// is running. Idempotent. Uses a Background-rooted timeout so it still runs +// during shutdown after the engine context has been cancelled. +func UntrustInterface(iface string) error { + if !isRunning(context.Background()) { + return nil + } + if err := removeTrusted(context.Background(), iface); err != nil { + return fmt.Errorf("remove %s from firewalld trusted zone: %w", iface, err) + } + return nil +} + +func newCallContext(parent context.Context) (context.Context, context.CancelFunc) { + return context.WithTimeout(parent, callTimeout) +} + +func isRunning(parent context.Context) bool { + ctx, cancel := newCallContext(parent) + ok, err := isRunningDBus(ctx) + cancel() + if err == nil { + return ok + } + if errors.Is(err, errDBusUnavailable) || errors.Is(err, context.DeadlineExceeded) { + ctx, cancel = newCallContext(parent) + defer cancel() + return isRunningCLI(ctx) + } + return false +} + +func addTrusted(parent context.Context, iface string) error { + ctx, cancel := newCallContext(parent) + err := addDBus(ctx, iface) + cancel() + if err == nil { + return nil + } + if !errors.Is(err, errDBusUnavailable) { + log.Debugf("firewalld: dbus add failed, falling back to firewall-cmd: %v", err) + } + ctx, cancel = newCallContext(parent) + defer cancel() + return addCLI(ctx, iface) +} + +func removeTrusted(parent context.Context, iface string) error { + ctx, cancel := newCallContext(parent) + err := removeDBus(ctx, iface) + cancel() + if err == nil { + return nil + } + if !errors.Is(err, errDBusUnavailable) { + log.Debugf("firewalld: dbus remove failed, falling back to firewall-cmd: %v", err) + } + ctx, cancel = newCallContext(parent) + defer cancel() + return removeCLI(ctx, iface) +} + +func isRunningDBus(ctx context.Context) (bool, error) { + conn, err := dbus.SystemBus() + if err != nil { + return false, fmt.Errorf("%w: %v", errDBusUnavailable, err) + } + obj := conn.Object(dbusDest, dbusPath) + + var zone string + if err := obj.CallWithContext(ctx, dbusRootIface+".getDefaultZone", 0).Store(&zone); err != nil { + return false, fmt.Errorf("firewalld getDefaultZone: %w", err) + } + return true, nil +} + +func isRunningCLI(ctx context.Context) bool { + if _, err := exec.LookPath("firewall-cmd"); err != nil { + return false + } + return exec.CommandContext(ctx, "firewall-cmd", "--state").Run() == nil +} + +func addDBus(ctx context.Context, iface string) error { + conn, err := dbus.SystemBus() + if err != nil { + return fmt.Errorf("%w: %v", errDBusUnavailable, err) + } + obj := conn.Object(dbusDest, dbusPath) + + call := obj.CallWithContext(ctx, dbusZoneIface+".addInterface", 0, TrustedZone, iface) + if call.Err == nil { + return nil + } + + if dbusErrContains(call.Err, errAlreadyEnabled) { + return nil + } + + if dbusErrContains(call.Err, errZoneAlreadySet) { + move := obj.CallWithContext(ctx, dbusZoneIface+".changeZoneOfInterface", 0, TrustedZone, iface) + if move.Err != nil { + return fmt.Errorf("firewalld changeZoneOfInterface: %w", move.Err) + } + return nil + } + + return fmt.Errorf("firewalld addInterface: %w", call.Err) +} + +func removeDBus(ctx context.Context, iface string) error { + conn, err := dbus.SystemBus() + if err != nil { + return fmt.Errorf("%w: %v", errDBusUnavailable, err) + } + obj := conn.Object(dbusDest, dbusPath) + + call := obj.CallWithContext(ctx, dbusZoneIface+".removeInterface", 0, TrustedZone, iface) + if call.Err == nil { + return nil + } + + if dbusErrContains(call.Err, errUnknownIface) || dbusErrContains(call.Err, errNotEnabled) { + return nil + } + + return fmt.Errorf("firewalld removeInterface: %w", call.Err) +} + +func addCLI(ctx context.Context, iface string) error { + if _, err := exec.LookPath("firewall-cmd"); err != nil { + return fmt.Errorf("firewall-cmd not available: %w", err) + } + + // --change-interface (no --permanent) binds the interface for the + // current runtime only; we do not want membership to persist across + // reboots because netbird re-asserts it on every startup. + out, err := exec.CommandContext(ctx, + "firewall-cmd", "--zone="+TrustedZone, "--change-interface="+iface, + ).CombinedOutput() + if err != nil { + return fmt.Errorf("firewall-cmd change-interface: %w: %s", err, strings.TrimSpace(string(out))) + } + return nil +} + +func removeCLI(ctx context.Context, iface string) error { + if _, err := exec.LookPath("firewall-cmd"); err != nil { + return fmt.Errorf("firewall-cmd not available: %w", err) + } + + out, err := exec.CommandContext(ctx, + "firewall-cmd", "--zone="+TrustedZone, "--remove-interface="+iface, + ).CombinedOutput() + if err != nil { + msg := strings.TrimSpace(string(out)) + if strings.Contains(msg, errUnknownIface) || strings.Contains(msg, errNotEnabled) { + return nil + } + return fmt.Errorf("firewall-cmd remove-interface: %w: %s", err, msg) + } + return nil +} + +func dbusErrContains(err error, code string) bool { + if err == nil { + return false + } + var de dbus.Error + if errors.As(err, &de) { + for _, b := range de.Body { + if s, ok := b.(string); ok && strings.Contains(s, code) { + return true + } + } + } + return strings.Contains(err.Error(), code) +} diff --git a/client/firewall/firewalld/firewalld_linux_test.go b/client/firewall/firewalld/firewalld_linux_test.go new file mode 100644 index 000000000..d812745fc --- /dev/null +++ b/client/firewall/firewalld/firewalld_linux_test.go @@ -0,0 +1,49 @@ +//go:build linux + +package firewalld + +import ( + "errors" + "testing" + + "github.com/godbus/dbus/v5" +) + +func TestDBusErrContains(t *testing.T) { + tests := []struct { + name string + err error + code string + want bool + }{ + {"nil error", nil, errZoneAlreadySet, false}, + {"plain error match", errors.New("ZONE_ALREADY_SET: wt0"), errZoneAlreadySet, true}, + {"plain error miss", errors.New("something else"), errZoneAlreadySet, false}, + { + "dbus.Error body match", + dbus.Error{Name: "org.fedoraproject.FirewallD1.Exception", Body: []any{"ZONE_ALREADY_SET: wt0"}}, + errZoneAlreadySet, + true, + }, + { + "dbus.Error body miss", + dbus.Error{Name: "org.fedoraproject.FirewallD1.Exception", Body: []any{"INVALID_INTERFACE"}}, + errAlreadyEnabled, + false, + }, + { + "dbus.Error non-string body falls back to Error()", + dbus.Error{Name: "x", Body: []any{123}}, + "x", + true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := dbusErrContains(tc.err, tc.code) + if got != tc.want { + t.Fatalf("dbusErrContains(%v, %q) = %v; want %v", tc.err, tc.code, got, tc.want) + } + }) + } +} diff --git a/client/firewall/firewalld/firewalld_other.go b/client/firewall/firewalld/firewalld_other.go new file mode 100644 index 000000000..cfa28221d --- /dev/null +++ b/client/firewall/firewalld/firewalld_other.go @@ -0,0 +1,25 @@ +//go:build !linux + +package firewalld + +import "context" + +// SetParentContext is a no-op on non-Linux platforms because firewalld only +// runs on Linux. +func SetParentContext(context.Context) { + // intentionally empty: firewalld is a Linux-only daemon +} + +// TrustInterface is a no-op on non-Linux platforms because firewalld only +// runs on Linux. +func TrustInterface(string) error { + // intentionally empty: firewalld is a Linux-only daemon + return nil +} + +// UntrustInterface is a no-op on non-Linux platforms because firewalld only +// runs on Linux. +func UntrustInterface(string) error { + // intentionally empty: firewalld is a Linux-only daemon + return nil +} diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index a1d4467d5..7d8cd7f8c 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" nberrors "github.com/netbirdio/netbird/client/errors" + "github.com/netbirdio/netbird/client/firewall/firewalld" firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface/wgaddr" "github.com/netbirdio/netbird/client/internal/statemanager" @@ -86,6 +87,12 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { log.Warnf("raw table not available, notrack rules will be disabled: %v", err) } + // Trust after all fatal init steps so a later failure doesn't leave the + // interface in firewalld's trusted zone without a corresponding Close. + if err := firewalld.TrustInterface(m.wgIface.Name()); err != nil { + log.Warnf("failed to trust interface in firewalld: %v", err) + } + // persist early to ensure cleanup of chains go func() { if err := stateManager.PersistState(context.Background()); err != nil { @@ -191,6 +198,12 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error { merr = multierror.Append(merr, fmt.Errorf("reset router: %w", err)) } + // Appending to merr intentionally blocks DeleteState below so ShutdownState + // stays persisted and the crash-recovery path retries firewalld cleanup. + if err := firewalld.UntrustInterface(m.wgIface.Name()); err != nil { + merr = multierror.Append(merr, err) + } + // attempt to delete state only if all other operations succeeded if merr == nil { if err := stateManager.DeleteState(&ShutdownState{}); err != nil { @@ -217,6 +230,11 @@ func (m *Manager) AllowNetbird() error { if err != nil { return fmt.Errorf("allow netbird interface traffic: %w", err) } + + if err := firewalld.TrustInterface(m.wgIface.Name()); err != nil { + log.Warnf("failed to trust interface in firewalld: %v", err) + } + return nil } diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index 0b5b61e04..8cd5cc6b3 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -14,6 +14,7 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/sys/unix" + "github.com/netbirdio/netbird/client/firewall/firewalld" firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface/wgaddr" "github.com/netbirdio/netbird/client/internal/statemanager" @@ -217,6 +218,10 @@ func (m *Manager) AllowNetbird() error { return fmt.Errorf("flush allow input netbird rules: %w", err) } + if err := firewalld.TrustInterface(m.wgIface.Name()); err != nil { + log.Warnf("failed to trust interface in firewalld: %v", err) + } + return nil } diff --git a/client/firewall/nftables/router_linux.go b/client/firewall/nftables/router_linux.go index 904daf7cb..8cc0d2792 100644 --- a/client/firewall/nftables/router_linux.go +++ b/client/firewall/nftables/router_linux.go @@ -19,6 +19,7 @@ import ( "golang.org/x/sys/unix" nberrors "github.com/netbirdio/netbird/client/errors" + "github.com/netbirdio/netbird/client/firewall/firewalld" firewall "github.com/netbirdio/netbird/client/firewall/manager" nbid "github.com/netbirdio/netbird/client/internal/acl/id" "github.com/netbirdio/netbird/client/internal/routemanager/ipfwdstate" @@ -40,6 +41,8 @@ const ( chainNameForward = "FORWARD" chainNameMangleForward = "netbird-mangle-forward" + firewalldTableName = "firewalld" + userDataAcceptForwardRuleIif = "frwacceptiif" userDataAcceptForwardRuleOif = "frwacceptoif" userDataAcceptInputRule = "inputaccept" @@ -133,6 +136,10 @@ func (r *router) Reset() error { merr = multierror.Append(merr, fmt.Errorf("remove accept filter rules: %w", err)) } + if err := firewalld.UntrustInterface(r.wgIface.Name()); err != nil { + merr = multierror.Append(merr, err) + } + if err := r.removeNatPreroutingRules(); err != nil { merr = multierror.Append(merr, fmt.Errorf("remove filter prerouting rules: %w", err)) } @@ -280,6 +287,10 @@ func (r *router) createContainers() error { log.Errorf("failed to add accept rules for the forward chain: %s", err) } + if err := firewalld.TrustInterface(r.wgIface.Name()); err != nil { + log.Warnf("failed to trust interface in firewalld: %v", err) + } + if err := r.refreshRulesMap(); err != nil { log.Errorf("failed to refresh rules: %s", err) } @@ -1319,6 +1330,13 @@ func (r *router) isExternalChain(chain *nftables.Chain) bool { return false } + // Skip firewalld-owned chains. Firewalld creates its chains with the + // NFT_CHAIN_OWNER flag, so inserting rules into them returns EPERM. + // We delegate acceptance to firewalld by trusting the interface instead. + if chain.Table.Name == firewalldTableName { + return false + } + // Skip all iptables-managed tables in the ip family if chain.Table.Family == nftables.TableFamilyIPv4 && isIptablesTable(chain.Table.Name) { return false diff --git a/client/firewall/uspfilter/allow_netbird.go b/client/firewall/uspfilter/allow_netbird.go index 6a6533344..b120cdf12 100644 --- a/client/firewall/uspfilter/allow_netbird.go +++ b/client/firewall/uspfilter/allow_netbird.go @@ -3,6 +3,9 @@ package uspfilter import ( + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/firewall/firewalld" "github.com/netbirdio/netbird/client/internal/statemanager" ) @@ -16,6 +19,9 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error { if m.nativeFirewall != nil { return m.nativeFirewall.Close(stateManager) } + if err := firewalld.UntrustInterface(m.wgIface.Name()); err != nil { + log.Warnf("failed to untrust interface in firewalld: %v", err) + } return nil } @@ -24,5 +30,8 @@ func (m *Manager) AllowNetbird() error { if m.nativeFirewall != nil { return m.nativeFirewall.AllowNetbird() } + if err := firewalld.TrustInterface(m.wgIface.Name()); err != nil { + log.Warnf("failed to trust interface in firewalld: %v", err) + } return nil } diff --git a/client/firewall/uspfilter/common/iface.go b/client/firewall/uspfilter/common/iface.go index 7296953db..9c06eb3f7 100644 --- a/client/firewall/uspfilter/common/iface.go +++ b/client/firewall/uspfilter/common/iface.go @@ -9,6 +9,7 @@ import ( // IFaceMapper defines subset methods of interface required for manager type IFaceMapper interface { + Name() string SetFilter(device.PacketFilter) error Address() wgaddr.Address GetWGDevice() *wgdevice.Device diff --git a/client/firewall/uspfilter/filter_test.go b/client/firewall/uspfilter/filter_test.go index 39e8efa2c..5fb9fef0e 100644 --- a/client/firewall/uspfilter/filter_test.go +++ b/client/firewall/uspfilter/filter_test.go @@ -31,12 +31,20 @@ var logger = log.NewFromLogrus(logrus.StandardLogger()) var flowLogger = netflow.NewManager(nil, []byte{}, nil).GetLogger() type IFaceMock struct { + NameFunc func() string SetFilterFunc func(device.PacketFilter) error AddressFunc func() wgaddr.Address GetWGDeviceFunc func() *wgdevice.Device GetDeviceFunc func() *device.FilteredDevice } +func (i *IFaceMock) Name() string { + if i.NameFunc == nil { + return "wgtest" + } + return i.NameFunc() +} + func (i *IFaceMock) GetWGDevice() *wgdevice.Device { if i.GetWGDeviceFunc == nil { return nil diff --git a/client/internal/engine.go b/client/internal/engine.go index 09d80a87d..8d7e02bd5 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -26,6 +26,7 @@ import ( nberrors "github.com/netbirdio/netbird/client/errors" "github.com/netbirdio/netbird/client/firewall" + "github.com/netbirdio/netbird/client/firewall/firewalld" firewallManager "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/iface/device" @@ -604,6 +605,8 @@ func (e *Engine) createFirewall() error { return nil } + firewalld.SetParentContext(e.ctx) + var err error e.firewall, err = firewall.NewFirewall(e.wgInterface, e.stateManager, e.flowManager.GetLogger(), e.config.DisableServerRoutes, e.config.MTU) if err != nil { From eb3aa96257eee3a6dcef868e3d8816286f242c2e Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Tue, 21 Apr 2026 18:37:04 +0200 Subject: [PATCH 333/374] [management] check policy for changes before actual db update (#5405) --- management/server/policy.go | 114 +++++++----- management/server/types/policy.go | 38 ++++ management/server/types/policy_test.go | 193 ++++++++++++++++++++ management/server/types/policyrule.go | 105 +++++++++++ management/server/types/policyrule_test.go | 194 +++++++++++++++++++++ 5 files changed, 600 insertions(+), 44 deletions(-) create mode 100644 management/server/types/policy_test.go create mode 100644 management/server/types/policyrule_test.go diff --git a/management/server/policy.go b/management/server/policy.go index 3e84c3d10..48297ca11 100644 --- a/management/server/policy.go +++ b/management/server/policy.go @@ -5,6 +5,7 @@ import ( _ "embed" "github.com/rs/xid" + "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" @@ -46,25 +47,40 @@ func (am *DefaultAccountManager) SavePolicy(ctx context.Context, accountID, user var isUpdate = policy.ID != "" var updateAccountPeers bool var action = activity.PolicyAdded + var unchanged bool err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - if err = validatePolicy(ctx, transaction, accountID, policy); err != nil { - return err - } - - updateAccountPeers, err = arePolicyChangesAffectPeers(ctx, transaction, accountID, policy, isUpdate) + existingPolicy, err := validatePolicy(ctx, transaction, accountID, policy) if err != nil { return err } - saveFunc := transaction.CreatePolicy if isUpdate { - action = activity.PolicyUpdated - saveFunc = transaction.SavePolicy - } + if policy.Equal(existingPolicy) { + logrus.WithContext(ctx).Tracef("policy update skipped because equal to stored one - policy id %s", policy.ID) + unchanged = true + return nil + } - if err = saveFunc(ctx, policy); err != nil { - return err + action = activity.PolicyUpdated + + updateAccountPeers, err = arePolicyChangesAffectPeersWithExisting(ctx, transaction, policy, existingPolicy) + if err != nil { + return err + } + + if err = transaction.SavePolicy(ctx, policy); err != nil { + return err + } + } else { + updateAccountPeers, err = arePolicyChangesAffectPeers(ctx, transaction, policy) + if err != nil { + return err + } + + if err = transaction.CreatePolicy(ctx, policy); err != nil { + return err + } } return transaction.IncrementNetworkSerial(ctx, accountID) @@ -73,6 +89,10 @@ func (am *DefaultAccountManager) SavePolicy(ctx context.Context, accountID, user return nil, err } + if unchanged { + return policy, nil + } + am.StoreEvent(ctx, userID, policy.ID, accountID, action, policy.EventMeta()) if updateAccountPeers { @@ -101,7 +121,7 @@ func (am *DefaultAccountManager) DeletePolicy(ctx context.Context, accountID, po return err } - updateAccountPeers, err = arePolicyChangesAffectPeers(ctx, transaction, accountID, policy, false) + updateAccountPeers, err = arePolicyChangesAffectPeers(ctx, transaction, policy) if err != nil { return err } @@ -138,34 +158,37 @@ func (am *DefaultAccountManager) ListPolicies(ctx context.Context, accountID, us return am.Store.GetAccountPolicies(ctx, store.LockingStrengthNone, accountID) } -// arePolicyChangesAffectPeers checks if changes to a policy will affect any associated peers. -func arePolicyChangesAffectPeers(ctx context.Context, transaction store.Store, accountID string, policy *types.Policy, isUpdate bool) (bool, error) { - if isUpdate { - existingPolicy, err := transaction.GetPolicyByID(ctx, store.LockingStrengthNone, accountID, policy.ID) - if err != nil { - return false, err - } - - if !policy.Enabled && !existingPolicy.Enabled { - return false, nil - } - - for _, rule := range existingPolicy.Rules { - if rule.SourceResource.Type != "" || rule.DestinationResource.Type != "" { - return true, nil - } - } - - hasPeers, err := anyGroupHasPeersOrResources(ctx, transaction, policy.AccountID, existingPolicy.RuleGroups()) - if err != nil { - return false, err - } - - if hasPeers { +// arePolicyChangesAffectPeers checks if a policy (being created or deleted) will affect any associated peers. +func arePolicyChangesAffectPeers(ctx context.Context, transaction store.Store, policy *types.Policy) (bool, error) { + for _, rule := range policy.Rules { + if rule.SourceResource.Type != "" || rule.DestinationResource.Type != "" { return true, nil } } + return anyGroupHasPeersOrResources(ctx, transaction, policy.AccountID, policy.RuleGroups()) +} + +func arePolicyChangesAffectPeersWithExisting(ctx context.Context, transaction store.Store, policy *types.Policy, existingPolicy *types.Policy) (bool, error) { + if !policy.Enabled && !existingPolicy.Enabled { + return false, nil + } + + for _, rule := range existingPolicy.Rules { + if rule.SourceResource.Type != "" || rule.DestinationResource.Type != "" { + return true, nil + } + } + + hasPeers, err := anyGroupHasPeersOrResources(ctx, transaction, policy.AccountID, existingPolicy.RuleGroups()) + if err != nil { + return false, err + } + + if hasPeers { + return true, nil + } + for _, rule := range policy.Rules { if rule.SourceResource.Type != "" || rule.DestinationResource.Type != "" { return true, nil @@ -175,12 +198,15 @@ func arePolicyChangesAffectPeers(ctx context.Context, transaction store.Store, a return anyGroupHasPeersOrResources(ctx, transaction, policy.AccountID, policy.RuleGroups()) } -// validatePolicy validates the policy and its rules. -func validatePolicy(ctx context.Context, transaction store.Store, accountID string, policy *types.Policy) error { +// validatePolicy validates the policy and its rules. For updates it returns +// the existing policy loaded from the store so callers can avoid a second read. +func validatePolicy(ctx context.Context, transaction store.Store, accountID string, policy *types.Policy) (*types.Policy, error) { + var existingPolicy *types.Policy if policy.ID != "" { - existingPolicy, err := transaction.GetPolicyByID(ctx, store.LockingStrengthNone, accountID, policy.ID) + var err error + existingPolicy, err = transaction.GetPolicyByID(ctx, store.LockingStrengthNone, accountID, policy.ID) if err != nil { - return err + return nil, err } // TODO: Refactor to support multiple rules per policy @@ -191,7 +217,7 @@ func validatePolicy(ctx context.Context, transaction store.Store, accountID stri for _, rule := range policy.Rules { if rule.ID != "" && !existingRuleIDs[rule.ID] { - return status.Errorf(status.InvalidArgument, "invalid rule ID: %s", rule.ID) + return nil, status.Errorf(status.InvalidArgument, "invalid rule ID: %s", rule.ID) } } } else { @@ -201,12 +227,12 @@ func validatePolicy(ctx context.Context, transaction store.Store, accountID stri groups, err := transaction.GetGroupsByIDs(ctx, store.LockingStrengthNone, accountID, policy.RuleGroups()) if err != nil { - return err + return nil, err } postureChecks, err := transaction.GetPostureChecksByIDs(ctx, store.LockingStrengthNone, accountID, policy.SourcePostureChecks) if err != nil { - return err + return nil, err } for i, rule := range policy.Rules { @@ -225,7 +251,7 @@ func validatePolicy(ctx context.Context, transaction store.Store, accountID stri policy.SourcePostureChecks = getValidPostureCheckIDs(postureChecks, policy.SourcePostureChecks) } - return nil + return existingPolicy, nil } // getValidPostureCheckIDs filters and returns only the valid posture check IDs from the provided list. diff --git a/management/server/types/policy.go b/management/server/types/policy.go index d4e1a8816..d410aec8d 100644 --- a/management/server/types/policy.go +++ b/management/server/types/policy.go @@ -93,6 +93,44 @@ func (p *Policy) Copy() *Policy { return c } +func (p *Policy) Equal(other *Policy) bool { + if p == nil || other == nil { + return p == other + } + + if p.ID != other.ID || + p.AccountID != other.AccountID || + p.Name != other.Name || + p.Description != other.Description || + p.Enabled != other.Enabled { + return false + } + + if !stringSlicesEqualUnordered(p.SourcePostureChecks, other.SourcePostureChecks) { + return false + } + + if len(p.Rules) != len(other.Rules) { + return false + } + + otherRules := make(map[string]*PolicyRule, len(other.Rules)) + for _, r := range other.Rules { + otherRules[r.ID] = r + } + for _, r := range p.Rules { + otherRule, ok := otherRules[r.ID] + if !ok { + return false + } + if !r.Equal(otherRule) { + return false + } + } + + return true +} + // EventMeta returns activity event meta related to this policy func (p *Policy) EventMeta() map[string]any { return map[string]any{"name": p.Name} diff --git a/management/server/types/policy_test.go b/management/server/types/policy_test.go new file mode 100644 index 000000000..b1d7aabc2 --- /dev/null +++ b/management/server/types/policy_test.go @@ -0,0 +1,193 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPolicyEqual_SameRulesDifferentOrder(t *testing.T) { + a := &Policy{ + ID: "pol1", + AccountID: "acc1", + Name: "test", + Enabled: true, + Rules: []*PolicyRule{ + {ID: "r1", PolicyID: "pol1", Ports: []string{"80"}}, + {ID: "r2", PolicyID: "pol1", Ports: []string{"443"}}, + }, + } + b := &Policy{ + ID: "pol1", + AccountID: "acc1", + Name: "test", + Enabled: true, + Rules: []*PolicyRule{ + {ID: "r2", PolicyID: "pol1", Ports: []string{"443"}}, + {ID: "r1", PolicyID: "pol1", Ports: []string{"80"}}, + }, + } + assert.True(t, a.Equal(b)) +} + +func TestPolicyEqual_DifferentRules(t *testing.T) { + a := &Policy{ + ID: "pol1", + Enabled: true, + Rules: []*PolicyRule{ + {ID: "r1", PolicyID: "pol1", Ports: []string{"80"}}, + }, + } + b := &Policy{ + ID: "pol1", + Enabled: true, + Rules: []*PolicyRule{ + {ID: "r1", PolicyID: "pol1", Ports: []string{"443"}}, + }, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyEqual_DifferentRuleCount(t *testing.T) { + a := &Policy{ + ID: "pol1", + Rules: []*PolicyRule{ + {ID: "r1", PolicyID: "pol1"}, + }, + } + b := &Policy{ + ID: "pol1", + Rules: []*PolicyRule{ + {ID: "r1", PolicyID: "pol1"}, + {ID: "r2", PolicyID: "pol1"}, + }, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyEqual_PostureChecksDifferentOrder(t *testing.T) { + a := &Policy{ + ID: "pol1", + SourcePostureChecks: []string{"pc3", "pc1", "pc2"}, + } + b := &Policy{ + ID: "pol1", + SourcePostureChecks: []string{"pc1", "pc2", "pc3"}, + } + assert.True(t, a.Equal(b)) +} + +func TestPolicyEqual_DifferentPostureChecks(t *testing.T) { + a := &Policy{ + ID: "pol1", + SourcePostureChecks: []string{"pc1", "pc2"}, + } + b := &Policy{ + ID: "pol1", + SourcePostureChecks: []string{"pc1", "pc3"}, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyEqual_DifferentScalarFields(t *testing.T) { + base := Policy{ + ID: "pol1", + AccountID: "acc1", + Name: "test", + Description: "desc", + Enabled: true, + } + + other := base + other.Name = "changed" + assert.False(t, base.Equal(&other)) + + other = base + other.Enabled = false + assert.False(t, base.Equal(&other)) + + other = base + other.Description = "changed" + assert.False(t, base.Equal(&other)) +} + +func TestPolicyEqual_NilCases(t *testing.T) { + var a *Policy + var b *Policy + assert.True(t, a.Equal(b)) + + a = &Policy{ID: "pol1"} + assert.False(t, a.Equal(nil)) +} + +func TestPolicyEqual_RulesMismatchByID(t *testing.T) { + a := &Policy{ + ID: "pol1", + Rules: []*PolicyRule{ + {ID: "r1", PolicyID: "pol1"}, + }, + } + b := &Policy{ + ID: "pol1", + Rules: []*PolicyRule{ + {ID: "r2", PolicyID: "pol1"}, + }, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyEqual_FullScenario(t *testing.T) { + a := &Policy{ + ID: "pol1", + AccountID: "acc1", + Name: "Web Access", + Description: "Allow web access", + Enabled: true, + SourcePostureChecks: []string{"pc2", "pc1"}, + Rules: []*PolicyRule{ + { + ID: "r1", + PolicyID: "pol1", + Name: "HTTP", + Enabled: true, + Action: PolicyTrafficActionAccept, + Protocol: PolicyRuleProtocolTCP, + Bidirectional: true, + Sources: []string{"g2", "g1"}, + Destinations: []string{"g4", "g3"}, + Ports: []string{"443", "80", "8080"}, + PortRanges: []RulePortRange{ + {Start: 8000, End: 9000}, + {Start: 80, End: 80}, + }, + }, + }, + } + b := &Policy{ + ID: "pol1", + AccountID: "acc1", + Name: "Web Access", + Description: "Allow web access", + Enabled: true, + SourcePostureChecks: []string{"pc1", "pc2"}, + Rules: []*PolicyRule{ + { + ID: "r1", + PolicyID: "pol1", + Name: "HTTP", + Enabled: true, + Action: PolicyTrafficActionAccept, + Protocol: PolicyRuleProtocolTCP, + Bidirectional: true, + Sources: []string{"g1", "g2"}, + Destinations: []string{"g3", "g4"}, + Ports: []string{"80", "8080", "443"}, + PortRanges: []RulePortRange{ + {Start: 80, End: 80}, + {Start: 8000, End: 9000}, + }, + }, + }, + } + assert.True(t, a.Equal(b)) +} diff --git a/management/server/types/policyrule.go b/management/server/types/policyrule.go index bb75dd555..52c494a6a 100644 --- a/management/server/types/policyrule.go +++ b/management/server/types/policyrule.go @@ -1,6 +1,8 @@ package types import ( + "slices" + "github.com/netbirdio/netbird/shared/management/proto" ) @@ -118,3 +120,106 @@ func (pm *PolicyRule) Copy() *PolicyRule { } return rule } + +func (pm *PolicyRule) Equal(other *PolicyRule) bool { + if pm == nil || other == nil { + return pm == other + } + + if pm.ID != other.ID || + pm.PolicyID != other.PolicyID || + pm.Name != other.Name || + pm.Description != other.Description || + pm.Enabled != other.Enabled || + pm.Action != other.Action || + pm.Bidirectional != other.Bidirectional || + pm.Protocol != other.Protocol || + pm.SourceResource != other.SourceResource || + pm.DestinationResource != other.DestinationResource || + pm.AuthorizedUser != other.AuthorizedUser { + return false + } + + if !stringSlicesEqualUnordered(pm.Sources, other.Sources) { + return false + } + if !stringSlicesEqualUnordered(pm.Destinations, other.Destinations) { + return false + } + if !stringSlicesEqualUnordered(pm.Ports, other.Ports) { + return false + } + if !portRangeSlicesEqualUnordered(pm.PortRanges, other.PortRanges) { + return false + } + if !authorizedGroupsEqual(pm.AuthorizedGroups, other.AuthorizedGroups) { + return false + } + + return true +} + +func stringSlicesEqualUnordered(a, b []string) bool { + if len(a) != len(b) { + return false + } + if len(a) == 0 { + return true + } + sorted1 := make([]string, len(a)) + sorted2 := make([]string, len(b)) + copy(sorted1, a) + copy(sorted2, b) + slices.Sort(sorted1) + slices.Sort(sorted2) + return slices.Equal(sorted1, sorted2) +} + +func portRangeSlicesEqualUnordered(a, b []RulePortRange) bool { + if len(a) != len(b) { + return false + } + if len(a) == 0 { + return true + } + cmp := func(x, y RulePortRange) int { + if x.Start != y.Start { + if x.Start < y.Start { + return -1 + } + return 1 + } + if x.End != y.End { + if x.End < y.End { + return -1 + } + return 1 + } + return 0 + } + sorted1 := make([]RulePortRange, len(a)) + sorted2 := make([]RulePortRange, len(b)) + copy(sorted1, a) + copy(sorted2, b) + slices.SortFunc(sorted1, cmp) + slices.SortFunc(sorted2, cmp) + return slices.EqualFunc(sorted1, sorted2, func(x, y RulePortRange) bool { + return x.Start == y.Start && x.End == y.End + }) +} + +func authorizedGroupsEqual(a, b map[string][]string) bool { + if len(a) != len(b) { + return false + } + for k, va := range a { + vb, ok := b[k] + if !ok { + return false + } + if !stringSlicesEqualUnordered(va, vb) { + return false + } + } + return true +} diff --git a/management/server/types/policyrule_test.go b/management/server/types/policyrule_test.go new file mode 100644 index 000000000..816e72abb --- /dev/null +++ b/management/server/types/policyrule_test.go @@ -0,0 +1,194 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPolicyRuleEqual_SamePortsDifferentOrder(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Ports: []string{"443", "80", "22"}, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Ports: []string{"22", "443", "80"}, + } + assert.True(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_DifferentPorts(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Ports: []string{"443", "80"}, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Ports: []string{"443", "22"}, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_SourcesDestinationsDifferentOrder(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Sources: []string{"g1", "g2", "g3"}, + Destinations: []string{"g4", "g5"}, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Sources: []string{"g3", "g1", "g2"}, + Destinations: []string{"g5", "g4"}, + } + assert.True(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_DifferentSources(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Sources: []string{"g1", "g2"}, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Sources: []string{"g1", "g3"}, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_PortRangesDifferentOrder(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + PortRanges: []RulePortRange{ + {Start: 8000, End: 9000}, + {Start: 80, End: 80}, + }, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + PortRanges: []RulePortRange{ + {Start: 80, End: 80}, + {Start: 8000, End: 9000}, + }, + } + assert.True(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_DifferentPortRanges(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + PortRanges: []RulePortRange{ + {Start: 80, End: 80}, + }, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + PortRanges: []RulePortRange{ + {Start: 80, End: 443}, + }, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_AuthorizedGroupsDifferentValueOrder(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + AuthorizedGroups: map[string][]string{ + "g1": {"u1", "u2", "u3"}, + }, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + AuthorizedGroups: map[string][]string{ + "g1": {"u3", "u1", "u2"}, + }, + } + assert.True(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_DifferentAuthorizedGroups(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + AuthorizedGroups: map[string][]string{ + "g1": {"u1"}, + }, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + AuthorizedGroups: map[string][]string{ + "g2": {"u1"}, + }, + } + assert.False(t, a.Equal(b)) +} + +func TestPolicyRuleEqual_DifferentScalarFields(t *testing.T) { + base := PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Name: "test", + Description: "desc", + Enabled: true, + Action: PolicyTrafficActionAccept, + Bidirectional: true, + Protocol: PolicyRuleProtocolTCP, + } + + other := base + other.Name = "changed" + assert.False(t, base.Equal(&other)) + + other = base + other.Enabled = false + assert.False(t, base.Equal(&other)) + + other = base + other.Action = PolicyTrafficActionDrop + assert.False(t, base.Equal(&other)) + + other = base + other.Protocol = PolicyRuleProtocolUDP + assert.False(t, base.Equal(&other)) +} + +func TestPolicyRuleEqual_NilCases(t *testing.T) { + var a *PolicyRule + var b *PolicyRule + assert.True(t, a.Equal(b)) + + a = &PolicyRule{ID: "rule1"} + assert.False(t, a.Equal(nil)) +} + +func TestPolicyRuleEqual_EmptySlices(t *testing.T) { + a := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Ports: []string{}, + Sources: nil, + } + b := &PolicyRule{ + ID: "rule1", + PolicyID: "pol1", + Ports: nil, + Sources: []string{}, + } + assert.True(t, a.Equal(b)) +} + From 2fb50aef6be47eca9158750f1195d2e5194fb867 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 21 Apr 2026 19:05:58 +0200 Subject: [PATCH 334/374] [client] allow UDP packet loss in TestICEBind_HandlesConcurrentMixedTraffic (#5953) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The test writes 500 packets per family and asserted exact-count delivery within a 5s window, even though its own comment says "Some packet loss is acceptable for UDP". On FreeBSD/QEMU runners the writer loops cannot always finish all 500 before the 5s deadline closes the readers (we have seen 411/500 in CI). The real assertion of this test is the routing check — IPv4 peer only gets v4- packets, IPv6 peer only gets v6- packets — which remains strict. Replace the exact-count assertions with a >=80% delivery threshold so runner speed variance no longer causes false failures. --- client/iface/bind/ice_bind_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/client/iface/bind/ice_bind_test.go b/client/iface/bind/ice_bind_test.go index 1fdd955c9..f49e68508 100644 --- a/client/iface/bind/ice_bind_test.go +++ b/client/iface/bind/ice_bind_test.go @@ -239,8 +239,12 @@ func TestICEBind_HandlesConcurrentMixedTraffic(t *testing.T) { ipv6Count++ } - assert.Equal(t, packetsPerFamily, ipv4Count) - assert.Equal(t, packetsPerFamily, ipv6Count) + // Allow some UDP packet loss under load (e.g. FreeBSD/QEMU runners). The + // routing-correctness checks above are the real assertions; the counts + // are a sanity bound to catch a totally silent path. + minDelivered := packetsPerFamily * 80 / 100 + assert.GreaterOrEqual(t, ipv4Count, minDelivered, "IPv4 delivery below threshold") + assert.GreaterOrEqual(t, ipv6Count, minDelivered, "IPv6 delivery below threshold") } func TestICEBind_DetectsAddressFamilyFromConnection(t *testing.T) { From 703353d3540cf4507475b7a0c614a6431ae9ee1c Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 21 Apr 2026 19:06:47 +0200 Subject: [PATCH 335/374] [flow] fix goroutine leak in TestReceive_ProtocolErrorStreamReconnect (#5951) The Receive goroutine could outlive the test and call t.Logf after teardown, panicking with "Log in goroutine after ... has completed". Register a cleanup that waits for the goroutine to exit; ordering is LIFO so it runs after client.Close, which is what unblocks Receive. --- flow/client/client_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/flow/client/client_test.go b/flow/client/client_test.go index 55157acbc..c8f5f4af4 100644 --- a/flow/client/client_test.go +++ b/flow/client/client_test.go @@ -457,6 +457,18 @@ func TestReceive_ProtocolErrorStreamReconnect(t *testing.T) { client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) require.NoError(t, err) + + // Cleanups run LIFO: the goroutine-drain registered here runs after Close below, + // which is when Receive has actually returned. Without this, the Receive goroutine + // can outlive the test and call t.Logf after teardown, panicking. + receiveDone := make(chan struct{}) + t.Cleanup(func() { + select { + case <-receiveDone: + case <-time.After(2 * time.Second): + t.Error("Receive goroutine did not exit after Close") + } + }) t.Cleanup(func() { err := client.Close() assert.NoError(t, err, "failed to close flow") @@ -468,6 +480,7 @@ func TestReceive_ProtocolErrorStreamReconnect(t *testing.T) { receivedAfterReconnect := make(chan struct{}) go func() { + defer close(receiveDone) err := client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { if msg.IsInitiator || len(msg.EventId) == 0 { return nil From 1165058fad020a42d6cdd72f5ab1c7248ee2c3ce Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 21 Apr 2026 19:07:20 +0200 Subject: [PATCH 336/374] [client] fix port collision in TestUpload (#5950) * [debug] fix port collision in TestUpload TestUpload hardcoded :8080, so it failed deterministically when anything was already on that port and collided across concurrent test runs. Bind a :0 listener in the test to get a kernel-assigned free port, and add Server.Serve so tests can hand the listener in without reaching into unexported state. * [debug] drop test-only Server.Serve, use SERVER_ADDRESS env The previous commit added a Server.Serve method on the upload-server, used only by TestUpload. That left production with an unused function. Reserve an ephemeral loopback port in the test, release it, and pass the address through SERVER_ADDRESS (which the server already reads). A small wait helper ensures the server is accepting connections before the upload runs, so the close/rebind gap does not cause a false failure. --- client/internal/debug/upload_test.go | 34 +++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/client/internal/debug/upload_test.go b/client/internal/debug/upload_test.go index e833c196d..f224b8d3f 100644 --- a/client/internal/debug/upload_test.go +++ b/client/internal/debug/upload_test.go @@ -3,10 +3,12 @@ package debug import ( "context" "errors" + "net" "net/http" "os" "path/filepath" "testing" + "time" "github.com/stretchr/testify/require" @@ -19,8 +21,10 @@ func TestUpload(t *testing.T) { t.Skip("Skipping upload test on docker ci") } testDir := t.TempDir() - testURL := "http://localhost:8080" + addr := reserveLoopbackPort(t) + testURL := "http://" + addr t.Setenv("SERVER_URL", testURL) + t.Setenv("SERVER_ADDRESS", addr) t.Setenv("STORE_DIR", testDir) srv := server.NewServer() go func() { @@ -33,6 +37,7 @@ func TestUpload(t *testing.T) { t.Errorf("Failed to stop server: %v", err) } }) + waitForServer(t, addr) file := filepath.Join(t.TempDir(), "tmpfile") fileContent := []byte("test file content") @@ -47,3 +52,30 @@ func TestUpload(t *testing.T) { require.NoError(t, err) require.Equal(t, fileContent, createdFileContent) } + +// reserveLoopbackPort binds an ephemeral port on loopback to learn a free +// address, then releases it so the server under test can rebind. The close/ +// rebind window is racy in theory; on loopback with a kernel-assigned port +// it's essentially never contended in practice. +func reserveLoopbackPort(t *testing.T) string { + t.Helper() + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + addr := l.Addr().String() + require.NoError(t, l.Close()) + return addr +} + +func waitForServer(t *testing.T, addr string) { + t.Helper() + deadline := time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) { + c, err := net.DialTimeout("tcp", addr, 100*time.Millisecond) + if err == nil { + _ = c.Close() + return + } + time.Sleep(20 * time.Millisecond) + } + t.Fatalf("server did not start listening on %s in time", addr) +} From 57b23c5b25ebcc1eb560f354c1fc50aa1435beaa Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 21 Apr 2026 23:06:52 +0300 Subject: [PATCH 337/374] [management] Propagate context changes to upstream middleware (#5956) --- .../server/http/middleware/auth_middleware.go | 44 ++++++++++--------- .../server/telemetry/http_api_metrics.go | 16 ++----- 2 files changed, 27 insertions(+), 33 deletions(-) diff --git a/management/server/http/middleware/auth_middleware.go b/management/server/http/middleware/auth_middleware.go index 63be672e6..8106380f2 100644 --- a/management/server/http/middleware/auth_middleware.go +++ b/management/server/http/middleware/auth_middleware.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/otel/metric" "github.com/netbirdio/management-integrations/integrations" + serverauth "github.com/netbirdio/netbird/management/server/auth" nbcontext "github.com/netbirdio/netbird/management/server/context" "github.com/netbirdio/netbird/management/server/http/middleware/bypass" @@ -87,17 +88,14 @@ func (m *AuthMiddleware) Handler(h http.Handler) http.Handler { switch authType { case "bearer": - request, err := m.checkJWTFromRequest(r, authHeader) - if err != nil { + if err := m.checkJWTFromRequest(r, authHeader); err != nil { log.WithContext(r.Context()).Errorf("Error when validating JWT: %s", err.Error()) util.WriteError(r.Context(), status.Errorf(status.Unauthorized, "token invalid"), w) return } - - h.ServeHTTP(w, request) + h.ServeHTTP(w, r) case "token": - request, err := m.checkPATFromRequest(r, authHeader) - if err != nil { + if err := m.checkPATFromRequest(r, authHeader); err != nil { log.WithContext(r.Context()).Debugf("Error when validating PAT: %s", err.Error()) // Check if it's a status error, otherwise default to Unauthorized if _, ok := status.FromError(err); !ok { @@ -106,7 +104,7 @@ func (m *AuthMiddleware) Handler(h http.Handler) http.Handler { util.WriteError(r.Context(), err, w) return } - h.ServeHTTP(w, request) + h.ServeHTTP(w, r) default: util.WriteError(r.Context(), status.Errorf(status.Unauthorized, "no valid authentication provided"), w) return @@ -115,19 +113,19 @@ func (m *AuthMiddleware) Handler(h http.Handler) http.Handler { } // CheckJWTFromRequest checks if the JWT is valid -func (m *AuthMiddleware) checkJWTFromRequest(r *http.Request, authHeaderParts []string) (*http.Request, error) { +func (m *AuthMiddleware) checkJWTFromRequest(r *http.Request, authHeaderParts []string) error { token, err := getTokenFromJWTRequest(authHeaderParts) // If an error occurs, call the error handler and return an error if err != nil { - return r, fmt.Errorf("error extracting token: %w", err) + return fmt.Errorf("error extracting token: %w", err) } ctx := r.Context() userAuth, validatedToken, err := m.authManager.ValidateAndParseToken(ctx, token) if err != nil { - return r, err + return err } if impersonate, ok := r.URL.Query()["account"]; ok && len(impersonate) == 1 { @@ -143,7 +141,7 @@ func (m *AuthMiddleware) checkJWTFromRequest(r *http.Request, authHeaderParts [] // we need to call this method because if user is new, we will automatically add it to existing or create a new account accountId, _, err := m.ensureAccount(ctx, userAuth) if err != nil { - return r, err + return err } if userAuth.AccountId != accountId { @@ -153,7 +151,7 @@ func (m *AuthMiddleware) checkJWTFromRequest(r *http.Request, authHeaderParts [] userAuth, err = m.authManager.EnsureUserAccessByJWTGroups(ctx, userAuth, validatedToken) if err != nil { - return r, err + return err } err = m.syncUserJWTGroups(ctx, userAuth) @@ -164,17 +162,19 @@ func (m *AuthMiddleware) checkJWTFromRequest(r *http.Request, authHeaderParts [] _, err = m.getUserFromUserAuth(ctx, userAuth) if err != nil { log.WithContext(ctx).Errorf("HTTP server failed to update user from user auth: %s", err) - return r, err + return err } - return nbcontext.SetUserAuthInRequest(r, userAuth), nil + // propagates ctx change to upstream middleware + *r = *nbcontext.SetUserAuthInRequest(r, userAuth) + return nil } // CheckPATFromRequest checks if the PAT is valid -func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts []string) (*http.Request, error) { +func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts []string) error { token, err := getTokenFromPATRequest(authHeaderParts) if err != nil { - return r, fmt.Errorf("error extracting token: %w", err) + return fmt.Errorf("error extracting token: %w", err) } if m.patUsageTracker != nil { @@ -183,22 +183,22 @@ func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts [] if m.rateLimiter != nil && !isTerraformRequest(r) { if !m.rateLimiter.Allow(token) { - return r, status.Errorf(status.TooManyRequests, "too many requests") + return status.Errorf(status.TooManyRequests, "too many requests") } } ctx := r.Context() user, pat, accDomain, accCategory, err := m.authManager.GetPATInfo(ctx, token) if err != nil { - return r, fmt.Errorf("invalid Token: %w", err) + return fmt.Errorf("invalid Token: %w", err) } if time.Now().After(pat.GetExpirationDate()) { - return r, fmt.Errorf("token expired") + return fmt.Errorf("token expired") } err = m.authManager.MarkPATUsed(ctx, pat.ID) if err != nil { - return r, err + return err } userAuth := auth.UserAuth{ @@ -216,7 +216,9 @@ func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts [] } } - return nbcontext.SetUserAuthInRequest(r, userAuth), nil + // propagates ctx change to upstream middleware + *r = *nbcontext.SetUserAuthInRequest(r, userAuth) + return nil } func isTerraformRequest(r *http.Request) bool { diff --git a/management/server/telemetry/http_api_metrics.go b/management/server/telemetry/http_api_metrics.go index 28e8457e2..e48e6d64a 100644 --- a/management/server/telemetry/http_api_metrics.go +++ b/management/server/telemetry/http_api_metrics.go @@ -193,20 +193,12 @@ func (m *HTTPMiddleware) Handler(h http.Handler) http.Handler { } }) - h.ServeHTTP(w, r.WithContext(ctx)) + // Hold on to req so auth's in-place ctx update is visible after ServeHTTP. + req := r.WithContext(ctx) + h.ServeHTTP(w, req) close(handlerDone) - userAuth, err := nbContext.GetUserAuthFromContext(r.Context()) - if err == nil { - if userAuth.AccountId != "" { - //nolint - ctx = context.WithValue(ctx, nbContext.AccountIDKey, userAuth.AccountId) - } - if userAuth.UserId != "" { - //nolint - ctx = context.WithValue(ctx, nbContext.UserIDKey, userAuth.UserId) - } - } + ctx = req.Context() if w.Status() > 399 { log.WithContext(ctx).Errorf("HTTP response %v: %v %v status %v", reqID, r.Method, r.URL, w.Status()) From a822a33240da6f32b53e9f4c7ee4144262ef2de4 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 22 Apr 2026 17:35:22 +0900 Subject: [PATCH 338/374] [self-hosted] Use cscli lapi status for CrowdSec readiness in installer (#5949) --- infrastructure_files/getting-started.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 08da48264..2a3f840b4 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -472,7 +472,7 @@ start_services_and_show_instructions() { if [[ "$ENABLE_CROWDSEC" == "true" ]]; then echo "Registering CrowdSec bouncer..." local cs_retries=0 - while ! $DOCKER_COMPOSE_COMMAND exec -T crowdsec cscli capi status >/dev/null 2>&1; do + while ! $DOCKER_COMPOSE_COMMAND exec -T crowdsec cscli lapi status >/dev/null 2>&1; do cs_retries=$((cs_retries + 1)) if [[ $cs_retries -ge 30 ]]; then echo "WARNING: CrowdSec did not become ready. Skipping CrowdSec setup." > /dev/stderr From 801de8c68d4725f313344d4b6f684c3a86e59b90 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 22 Apr 2026 22:10:14 +0900 Subject: [PATCH 339/374] [client] Add TTL-based refresh to mgmt DNS cache via handler chain (#5945) --- client/internal/dns/handler_chain.go | 94 ++++ client/internal/dns/handler_chain_test.go | 164 ++++++ client/internal/dns/mgmt/mgmt.go | 489 ++++++++++++++---- client/internal/dns/mgmt/mgmt_refresh_test.go | 408 +++++++++++++++ client/internal/dns/mgmt/mgmt_test.go | 55 ++ client/internal/dns/server.go | 1 + 6 files changed, 1114 insertions(+), 97 deletions(-) create mode 100644 client/internal/dns/mgmt/mgmt_refresh_test.go diff --git a/client/internal/dns/handler_chain.go b/client/internal/dns/handler_chain.go index 6fbdedc59..57e7722d4 100644 --- a/client/internal/dns/handler_chain.go +++ b/client/internal/dns/handler_chain.go @@ -1,7 +1,10 @@ package dns import ( + "context" "fmt" + "math" + "net" "slices" "strconv" "strings" @@ -192,6 +195,12 @@ func (c *HandlerChain) logHandlers() { } func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { + c.dispatch(w, r, math.MaxInt) +} + +// dispatch routes a DNS request through the chain, skipping handlers with +// priority > maxPriority. Shared by ServeDNS and ResolveInternal. +func (c *HandlerChain) dispatch(w dns.ResponseWriter, r *dns.Msg, maxPriority int) { if len(r.Question) == 0 { return } @@ -216,6 +225,9 @@ func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { // Try handlers in priority order for _, entry := range handlers { + if entry.Priority > maxPriority { + continue + } if !c.isHandlerMatch(qname, entry) { continue } @@ -273,6 +285,55 @@ func (c *HandlerChain) logResponse(logger *log.Entry, cw *ResponseWriterChain, q cw.response.Len(), meta, time.Since(startTime)) } +// ResolveInternal runs an in-process DNS query against the chain, skipping any +// handler with priority > maxPriority. Used by internal callers (e.g. the mgmt +// cache refresher) that must bypass themselves to avoid loops. Honors ctx +// cancellation; on ctx.Done the dispatch goroutine is left to drain on its own +// (bounded by the invoked handler's internal timeout). +func (c *HandlerChain) ResolveInternal(ctx context.Context, r *dns.Msg, maxPriority int) (*dns.Msg, error) { + if len(r.Question) == 0 { + return nil, fmt.Errorf("empty question") + } + + base := &internalResponseWriter{} + done := make(chan struct{}) + go func() { + c.dispatch(base, r, maxPriority) + close(done) + }() + + select { + case <-done: + case <-ctx.Done(): + // Prefer a completed response if dispatch finished concurrently with cancellation. + select { + case <-done: + default: + return nil, fmt.Errorf("resolve %s: %w", strings.ToLower(r.Question[0].Name), ctx.Err()) + } + } + + if base.response == nil || base.response.Rcode == dns.RcodeRefused { + return nil, fmt.Errorf("no handler resolved %s at priority ≤ %d", + strings.ToLower(r.Question[0].Name), maxPriority) + } + return base.response, nil +} + +// HasRootHandlerAtOrBelow reports whether any "." handler is registered at +// priority ≤ maxPriority. +func (c *HandlerChain) HasRootHandlerAtOrBelow(maxPriority int) bool { + c.mu.RLock() + defer c.mu.RUnlock() + + for _, h := range c.handlers { + if h.Pattern == "." && h.Priority <= maxPriority { + return true + } + } + return false +} + func (c *HandlerChain) isHandlerMatch(qname string, entry HandlerEntry) bool { switch { case entry.Pattern == ".": @@ -291,3 +352,36 @@ func (c *HandlerChain) isHandlerMatch(qname string, entry HandlerEntry) bool { } } } + +// internalResponseWriter captures a dns.Msg for in-process chain queries. +type internalResponseWriter struct { + response *dns.Msg +} + +func (w *internalResponseWriter) WriteMsg(m *dns.Msg) error { w.response = m; return nil } +func (w *internalResponseWriter) LocalAddr() net.Addr { return nil } +func (w *internalResponseWriter) RemoteAddr() net.Addr { return nil } + +// Write unpacks raw DNS bytes so handlers that call Write instead of WriteMsg +// still surface their answer to ResolveInternal. +func (w *internalResponseWriter) Write(p []byte) (int, error) { + msg := new(dns.Msg) + if err := msg.Unpack(p); err != nil { + return 0, err + } + w.response = msg + return len(p), nil +} + +func (w *internalResponseWriter) Close() error { return nil } +func (w *internalResponseWriter) TsigStatus() error { return nil } + +// TsigTimersOnly is part of dns.ResponseWriter. +func (w *internalResponseWriter) TsigTimersOnly(bool) { + // no-op: in-process queries carry no TSIG state. +} + +// Hijack is part of dns.ResponseWriter. +func (w *internalResponseWriter) Hijack() { + // no-op: in-process queries have no underlying connection to hand off. +} diff --git a/client/internal/dns/handler_chain_test.go b/client/internal/dns/handler_chain_test.go index fa9525069..034a760dc 100644 --- a/client/internal/dns/handler_chain_test.go +++ b/client/internal/dns/handler_chain_test.go @@ -1,11 +1,15 @@ package dns_test import ( + "context" + "net" "testing" + "time" "github.com/miekg/dns" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" nbdns "github.com/netbirdio/netbird/client/internal/dns" "github.com/netbirdio/netbird/client/internal/dns/test" @@ -1042,3 +1046,163 @@ func TestHandlerChain_AddRemoveRoundtrip(t *testing.T) { }) } } + +// answeringHandler writes a fixed A record to ack the query. Used to verify +// which handler ResolveInternal dispatches to. +type answeringHandler struct { + name string + ip string +} + +func (h *answeringHandler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { + resp := &dns.Msg{} + resp.SetReply(r) + resp.Answer = []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP(h.ip).To4(), + }} + _ = w.WriteMsg(resp) +} + +func (h *answeringHandler) String() string { return h.name } + +func TestHandlerChain_ResolveInternal_SkipsAboveMaxPriority(t *testing.T) { + chain := nbdns.NewHandlerChain() + + high := &answeringHandler{name: "high", ip: "10.0.0.1"} + low := &answeringHandler{name: "low", ip: "10.0.0.2"} + + chain.AddHandler("example.com.", high, nbdns.PriorityMgmtCache) + chain.AddHandler("example.com.", low, nbdns.PriorityUpstream) + + r := new(dns.Msg) + r.SetQuestion("example.com.", dns.TypeA) + + resp, err := chain.ResolveInternal(context.Background(), r, nbdns.PriorityUpstream) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.Equal(t, 1, len(resp.Answer)) + a, ok := resp.Answer[0].(*dns.A) + assert.True(t, ok) + assert.Equal(t, "10.0.0.2", a.A.String(), "should skip mgmtCache handler and resolve via upstream") +} + +func TestHandlerChain_ResolveInternal_ErrorWhenNoMatch(t *testing.T) { + chain := nbdns.NewHandlerChain() + high := &answeringHandler{name: "high", ip: "10.0.0.1"} + chain.AddHandler("example.com.", high, nbdns.PriorityMgmtCache) + + r := new(dns.Msg) + r.SetQuestion("example.com.", dns.TypeA) + + _, err := chain.ResolveInternal(context.Background(), r, nbdns.PriorityUpstream) + assert.Error(t, err, "no handler at or below maxPriority should error") +} + +// rawWriteHandler packs a response and calls ResponseWriter.Write directly +// (instead of WriteMsg), exercising the internalResponseWriter.Write path. +type rawWriteHandler struct { + ip string +} + +func (h *rawWriteHandler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { + resp := &dns.Msg{} + resp.SetReply(r) + resp.Answer = []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP(h.ip).To4(), + }} + packed, err := resp.Pack() + if err != nil { + return + } + _, _ = w.Write(packed) +} + +func TestHandlerChain_ResolveInternal_CapturesRawWrite(t *testing.T) { + chain := nbdns.NewHandlerChain() + chain.AddHandler("example.com.", &rawWriteHandler{ip: "10.0.0.3"}, nbdns.PriorityUpstream) + + r := new(dns.Msg) + r.SetQuestion("example.com.", dns.TypeA) + + resp, err := chain.ResolveInternal(context.Background(), r, nbdns.PriorityUpstream) + assert.NoError(t, err) + require.NotNil(t, resp) + require.Len(t, resp.Answer, 1) + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.3", a.A.String(), "handlers calling Write(packed) must still surface their answer") +} + +func TestHandlerChain_ResolveInternal_EmptyQuestion(t *testing.T) { + chain := nbdns.NewHandlerChain() + _, err := chain.ResolveInternal(context.Background(), new(dns.Msg), nbdns.PriorityUpstream) + assert.Error(t, err) +} + +// hangingHandler blocks indefinitely until closed, simulating a wedged upstream. +type hangingHandler struct { + block chan struct{} +} + +func (h *hangingHandler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { + <-h.block + resp := &dns.Msg{} + resp.SetReply(r) + _ = w.WriteMsg(resp) +} + +func (h *hangingHandler) String() string { return "hangingHandler" } + +func TestHandlerChain_ResolveInternal_HonorsContextTimeout(t *testing.T) { + chain := nbdns.NewHandlerChain() + h := &hangingHandler{block: make(chan struct{})} + defer close(h.block) + + chain.AddHandler("example.com.", h, nbdns.PriorityUpstream) + + r := new(dns.Msg) + r.SetQuestion("example.com.", dns.TypeA) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + start := time.Now() + _, err := chain.ResolveInternal(ctx, r, nbdns.PriorityUpstream) + elapsed := time.Since(start) + + assert.Error(t, err) + assert.ErrorIs(t, err, context.DeadlineExceeded) + assert.Less(t, elapsed, 500*time.Millisecond, "ResolveInternal must return shortly after ctx deadline") +} + +func TestHandlerChain_HasRootHandlerAtOrBelow(t *testing.T) { + chain := nbdns.NewHandlerChain() + h := &answeringHandler{name: "h", ip: "10.0.0.1"} + + assert.False(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream), "empty chain") + + chain.AddHandler("example.com.", h, nbdns.PriorityUpstream) + assert.False(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream), "non-root handler does not count") + + chain.AddHandler(".", h, nbdns.PriorityMgmtCache) + assert.False(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream), "root handler above threshold excluded") + + chain.AddHandler(".", h, nbdns.PriorityDefault) + assert.True(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream), "root handler at PriorityDefault included") + + chain.RemoveHandler(".", nbdns.PriorityDefault) + assert.False(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream)) + + // Primary nsgroup case: root handler lands at PriorityUpstream. + chain.AddHandler(".", h, nbdns.PriorityUpstream) + assert.True(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream), "root at PriorityUpstream included") + chain.RemoveHandler(".", nbdns.PriorityUpstream) + + // Fallback case: original /etc/resolv.conf entries land at PriorityFallback. + chain.AddHandler(".", h, nbdns.PriorityFallback) + assert.True(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream), "root at PriorityFallback included") + chain.RemoveHandler(".", nbdns.PriorityFallback) + assert.False(t, chain.HasRootHandlerAtOrBelow(nbdns.PriorityUpstream)) +} diff --git a/client/internal/dns/mgmt/mgmt.go b/client/internal/dns/mgmt/mgmt.go index 314af51d9..988e427fb 100644 --- a/client/internal/dns/mgmt/mgmt.go +++ b/client/internal/dns/mgmt/mgmt.go @@ -2,40 +2,83 @@ package mgmt import ( "context" + "errors" "fmt" "net" - "net/netip" "net/url" + "os" + "slices" "strings" "sync" + "sync/atomic" "time" "github.com/miekg/dns" log "github.com/sirupsen/logrus" + "golang.org/x/sync/singleflight" dnsconfig "github.com/netbirdio/netbird/client/internal/dns/config" + "github.com/netbirdio/netbird/client/internal/dns/resutil" "github.com/netbirdio/netbird/shared/management/domain" ) -const dnsTimeout = 5 * time.Second +const ( + dnsTimeout = 5 * time.Second + defaultTTL = 300 * time.Second + refreshBackoff = 30 * time.Second -// Resolver caches critical NetBird infrastructure domains + // envMgmtCacheTTL overrides defaultTTL for integration/dev testing. + envMgmtCacheTTL = "NB_MGMT_CACHE_TTL" +) + +// ChainResolver lets the cache refresh stale entries through the DNS handler +// chain instead of net.DefaultResolver, avoiding loopback when NetBird is the +// system resolver. +type ChainResolver interface { + ResolveInternal(ctx context.Context, msg *dns.Msg, maxPriority int) (*dns.Msg, error) + HasRootHandlerAtOrBelow(maxPriority int) bool +} + +// cachedRecord holds DNS records plus timestamps used for TTL refresh. +// records and cachedAt are set at construction and treated as immutable; +// lastFailedRefresh and consecFailures are mutable and must be accessed under +// Resolver.mutex. +type cachedRecord struct { + records []dns.RR + cachedAt time.Time + lastFailedRefresh time.Time + consecFailures int +} + +// Resolver caches critical NetBird infrastructure domains. +// records, refreshing, mgmtDomain and serverDomains are all guarded by mutex. type Resolver struct { - records map[dns.Question][]dns.RR + records map[dns.Question]*cachedRecord mgmtDomain *domain.Domain serverDomains *dnsconfig.ServerDomains mutex sync.RWMutex -} -type ipsResponse struct { - ips []netip.Addr - err error + chain ChainResolver + chainMaxPriority int + refreshGroup singleflight.Group + + // refreshing tracks questions whose refresh is running via the OS + // fallback path. A ServeDNS hit for a question in this map indicates + // the OS resolver routed the recursive query back to us (loop). Only + // the OS path arms this so chain-path refreshes don't produce false + // positives. The atomic bool is CAS-flipped once per refresh to + // throttle the warning log. + refreshing map[dns.Question]*atomic.Bool + + cacheTTL time.Duration } // NewResolver creates a new management domains cache resolver. func NewResolver() *Resolver { return &Resolver{ - records: make(map[dns.Question][]dns.RR), + records: make(map[dns.Question]*cachedRecord), + refreshing: make(map[dns.Question]*atomic.Bool), + cacheTTL: resolveCacheTTL(), } } @@ -44,7 +87,19 @@ func (m *Resolver) String() string { return "MgmtCacheResolver" } -// ServeDNS implements dns.Handler interface. +// SetChainResolver wires the handler chain used to refresh stale cache entries. +// maxPriority caps which handlers may answer refresh queries (typically +// PriorityUpstream, so upstream/default/fallback handlers are consulted and +// mgmt/route/local handlers are skipped). +func (m *Resolver) SetChainResolver(chain ChainResolver, maxPriority int) { + m.mutex.Lock() + m.chain = chain + m.chainMaxPriority = maxPriority + m.mutex.Unlock() +} + +// ServeDNS serves cached A/AAAA records. Stale entries are returned +// immediately and refreshed asynchronously (stale-while-revalidate). func (m *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { if len(r.Question) == 0 { m.continueToNext(w, r) @@ -60,7 +115,14 @@ func (m *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { } m.mutex.RLock() - records, found := m.records[question] + cached, found := m.records[question] + inflight := m.refreshing[question] + var shouldRefresh bool + if found { + stale := time.Since(cached.cachedAt) > m.cacheTTL + inBackoff := !cached.lastFailedRefresh.IsZero() && time.Since(cached.lastFailedRefresh) < refreshBackoff + shouldRefresh = stale && !inBackoff + } m.mutex.RUnlock() if !found { @@ -68,12 +130,23 @@ func (m *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { return } + if inflight != nil && inflight.CompareAndSwap(false, true) { + log.Warnf("mgmt cache: possible resolver loop for domain=%s: served stale while an OS-fallback refresh was inflight (if NetBird is the system resolver, the OS-path predicate is wrong)", + question.Name) + } + + // Skip scheduling a refresh goroutine if one is already inflight for + // this question; singleflight would dedup anyway but skipping avoids + // a parked goroutine per stale hit under bursty load. + if shouldRefresh && inflight == nil { + m.scheduleRefresh(question, cached) + } + resp := &dns.Msg{} resp.SetReply(r) resp.Authoritative = false resp.RecursionAvailable = true - - resp.Answer = append(resp.Answer, records...) + resp.Answer = cloneRecordsWithTTL(cached.records, m.responseTTL(cached.cachedAt)) log.Debugf("serving %d cached records for domain=%s", len(resp.Answer), question.Name) @@ -98,101 +171,260 @@ func (m *Resolver) continueToNext(w dns.ResponseWriter, r *dns.Msg) { } } -// AddDomain manually adds a domain to cache by resolving it. +// AddDomain resolves a domain and stores its A/AAAA records in the cache. +// A family that resolves NODATA (nil err, zero records) evicts any stale +// entry for that qtype. func (m *Resolver) AddDomain(ctx context.Context, d domain.Domain) error { dnsName := strings.ToLower(dns.Fqdn(d.PunycodeString())) ctx, cancel := context.WithTimeout(ctx, dnsTimeout) defer cancel() - ips, err := lookupIPWithExtraTimeout(ctx, d) - if err != nil { - return err + aRecords, aaaaRecords, errA, errAAAA := m.lookupBoth(ctx, d, dnsName) + + if errA != nil && errAAAA != nil { + return fmt.Errorf("resolve %s: %w", d.SafeString(), errors.Join(errA, errAAAA)) } - var aRecords, aaaaRecords []dns.RR - for _, ip := range ips { - if ip.Is4() { - rr := &dns.A{ - Hdr: dns.RR_Header{ - Name: dnsName, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: ip.AsSlice(), - } - aRecords = append(aRecords, rr) - } else if ip.Is6() { - rr := &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: dnsName, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 300, - }, - AAAA: ip.AsSlice(), - } - aaaaRecords = append(aaaaRecords, rr) + if len(aRecords) == 0 && len(aaaaRecords) == 0 { + if err := errors.Join(errA, errAAAA); err != nil { + return fmt.Errorf("resolve %s: no A/AAAA records: %w", d.SafeString(), err) } + return fmt.Errorf("resolve %s: no A/AAAA records", d.SafeString()) } + now := time.Now() m.mutex.Lock() + defer m.mutex.Unlock() - if len(aRecords) > 0 { - aQuestion := dns.Question{ - Name: dnsName, - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - } - m.records[aQuestion] = aRecords - } + m.applyFamilyRecords(dnsName, dns.TypeA, aRecords, errA, now) + m.applyFamilyRecords(dnsName, dns.TypeAAAA, aaaaRecords, errAAAA, now) - if len(aaaaRecords) > 0 { - aaaaQuestion := dns.Question{ - Name: dnsName, - Qtype: dns.TypeAAAA, - Qclass: dns.ClassINET, - } - m.records[aaaaQuestion] = aaaaRecords - } - - m.mutex.Unlock() - - log.Debugf("added domain=%s with %d A records and %d AAAA records", + log.Debugf("added/updated domain=%s with %d A records and %d AAAA records", d.SafeString(), len(aRecords), len(aaaaRecords)) return nil } -func lookupIPWithExtraTimeout(ctx context.Context, d domain.Domain) ([]netip.Addr, error) { - log.Infof("looking up IP for mgmt domain=%s", d.SafeString()) - defer log.Infof("done looking up IP for mgmt domain=%s", d.SafeString()) - resultChan := make(chan *ipsResponse, 1) +// applyFamilyRecords writes records, evicts on NODATA, leaves the cache +// untouched on error. Caller holds m.mutex. +func (m *Resolver) applyFamilyRecords(dnsName string, qtype uint16, records []dns.RR, err error, now time.Time) { + q := dns.Question{Name: dnsName, Qtype: qtype, Qclass: dns.ClassINET} + switch { + case len(records) > 0: + m.records[q] = &cachedRecord{records: records, cachedAt: now} + case err == nil: + delete(m.records, q) + } +} - go func() { - ips, err := net.DefaultResolver.LookupNetIP(ctx, "ip", d.PunycodeString()) - resultChan <- &ipsResponse{ - err: err, - ips: ips, +// scheduleRefresh kicks off an async refresh. DoChan spawns one goroutine per +// unique in-flight key; bursty stale hits share its channel. expected is the +// cachedRecord pointer observed by the caller; the refresh only mutates the +// cache if that pointer is still the one stored, so a stale in-flight refresh +// can't clobber a newer entry written by AddDomain or a competing refresh. +func (m *Resolver) scheduleRefresh(question dns.Question, expected *cachedRecord) { + key := question.Name + "|" + dns.TypeToString[question.Qtype] + _ = m.refreshGroup.DoChan(key, func() (any, error) { + return nil, m.refreshQuestion(question, expected) + }) +} + +// refreshQuestion replaces the cached records on success, or marks the entry +// failed (arming the backoff) on failure. While this runs, ServeDNS can detect +// a resolver loop by spotting a query for this same question arriving on us. +// expected pins the cache entry observed at schedule time; mutations only apply +// if m.records[question] still points at it. +func (m *Resolver) refreshQuestion(question dns.Question, expected *cachedRecord) error { + ctx, cancel := context.WithTimeout(context.Background(), dnsTimeout) + defer cancel() + + d, err := domain.FromString(strings.TrimSuffix(question.Name, ".")) + if err != nil { + m.markRefreshFailed(question, expected) + return fmt.Errorf("parse domain: %w", err) + } + + records, err := m.lookupRecords(ctx, d, question) + if err != nil { + fails := m.markRefreshFailed(question, expected) + logf := log.Warnf + if fails == 0 || fails > 1 { + logf = log.Debugf } - }() - - var resp *ipsResponse - - select { - case <-time.After(dnsTimeout + time.Millisecond*500): - log.Warnf("timed out waiting for IP for mgmt domain=%s", d.SafeString()) - return nil, fmt.Errorf("timed out waiting for ips to be available for domain %s", d.SafeString()) - case <-ctx.Done(): - return nil, ctx.Err() - case resp = <-resultChan: + logf("refresh mgmt cache domain=%s type=%s: %v (consecutive failures=%d)", + d.SafeString(), dns.TypeToString[question.Qtype], err, fails) + return err } - if resp.err != nil { - return nil, fmt.Errorf("resolve domain %s: %w", d.SafeString(), resp.err) + // NOERROR/NODATA: family gone upstream, evict so we stop serving stale. + if len(records) == 0 { + m.mutex.Lock() + if m.records[question] == expected { + delete(m.records, question) + m.mutex.Unlock() + log.Infof("removed mgmt cache domain=%s type=%s: no records returned", + d.SafeString(), dns.TypeToString[question.Qtype]) + return nil + } + m.mutex.Unlock() + log.Debugf("skipping refresh evict for domain=%s type=%s: entry changed during refresh", + d.SafeString(), dns.TypeToString[question.Qtype]) + return nil } - return resp.ips, nil + + now := time.Now() + m.mutex.Lock() + if m.records[question] != expected { + m.mutex.Unlock() + log.Debugf("skipping refresh write for domain=%s type=%s: entry changed during refresh", + d.SafeString(), dns.TypeToString[question.Qtype]) + return nil + } + m.records[question] = &cachedRecord{records: records, cachedAt: now} + m.mutex.Unlock() + + log.Infof("refreshed mgmt cache domain=%s type=%s", + d.SafeString(), dns.TypeToString[question.Qtype]) + return nil +} + +func (m *Resolver) markRefreshing(question dns.Question) { + m.mutex.Lock() + m.refreshing[question] = &atomic.Bool{} + m.mutex.Unlock() +} + +func (m *Resolver) clearRefreshing(question dns.Question) { + m.mutex.Lock() + delete(m.refreshing, question) + m.mutex.Unlock() +} + +// markRefreshFailed arms the backoff and returns the new consecutive-failure +// count so callers can downgrade subsequent failure logs to debug. +func (m *Resolver) markRefreshFailed(question dns.Question, expected *cachedRecord) int { + m.mutex.Lock() + defer m.mutex.Unlock() + c, ok := m.records[question] + if !ok || c != expected { + return 0 + } + c.lastFailedRefresh = time.Now() + c.consecFailures++ + return c.consecFailures +} + +// lookupBoth resolves A and AAAA via chain or OS. Per-family errors let +// callers tell records, NODATA (nil err, no records), and failure apart. +func (m *Resolver) lookupBoth(ctx context.Context, d domain.Domain, dnsName string) (aRecords, aaaaRecords []dns.RR, errA, errAAAA error) { + m.mutex.RLock() + chain := m.chain + maxPriority := m.chainMaxPriority + m.mutex.RUnlock() + + if chain != nil && chain.HasRootHandlerAtOrBelow(maxPriority) { + aRecords, errA = m.lookupViaChain(ctx, chain, maxPriority, dnsName, dns.TypeA) + aaaaRecords, errAAAA = m.lookupViaChain(ctx, chain, maxPriority, dnsName, dns.TypeAAAA) + return + } + + // TODO: drop once every supported OS registers a fallback resolver. Safe + // today: no root handler at priority ≤ PriorityUpstream means NetBird is + // not the system resolver, so net.DefaultResolver will not loop back. + aRecords, errA = m.osLookup(ctx, d, dnsName, dns.TypeA) + aaaaRecords, errAAAA = m.osLookup(ctx, d, dnsName, dns.TypeAAAA) + return +} + +// lookupRecords resolves a single record type via chain or OS. The OS branch +// arms the loop detector for the duration of its call so that ServeDNS can +// spot the OS resolver routing the recursive query back to us. +func (m *Resolver) lookupRecords(ctx context.Context, d domain.Domain, q dns.Question) ([]dns.RR, error) { + m.mutex.RLock() + chain := m.chain + maxPriority := m.chainMaxPriority + m.mutex.RUnlock() + + if chain != nil && chain.HasRootHandlerAtOrBelow(maxPriority) { + return m.lookupViaChain(ctx, chain, maxPriority, q.Name, q.Qtype) + } + + // TODO: drop once every supported OS registers a fallback resolver. + m.markRefreshing(q) + defer m.clearRefreshing(q) + + return m.osLookup(ctx, d, q.Name, q.Qtype) +} + +// lookupViaChain resolves via the handler chain and rewrites each RR to use +// dnsName as owner and m.cacheTTL as TTL, so CNAME-backed domains don't cache +// target-owned records or upstream TTLs. NODATA returns (nil, nil). +func (m *Resolver) lookupViaChain(ctx context.Context, chain ChainResolver, maxPriority int, dnsName string, qtype uint16) ([]dns.RR, error) { + msg := &dns.Msg{} + msg.SetQuestion(dnsName, qtype) + msg.RecursionDesired = true + + resp, err := chain.ResolveInternal(ctx, msg, maxPriority) + if err != nil { + return nil, fmt.Errorf("chain resolve: %w", err) + } + if resp == nil { + return nil, fmt.Errorf("chain resolve returned nil response") + } + if resp.Rcode != dns.RcodeSuccess { + return nil, fmt.Errorf("chain resolve rcode=%s", dns.RcodeToString[resp.Rcode]) + } + + ttl := uint32(m.cacheTTL.Seconds()) + owners := cnameOwners(dnsName, resp.Answer) + var filtered []dns.RR + for _, rr := range resp.Answer { + h := rr.Header() + if h.Class != dns.ClassINET || h.Rrtype != qtype { + continue + } + if !owners[strings.ToLower(dns.Fqdn(h.Name))] { + continue + } + if cp := cloneIPRecord(rr, dnsName, ttl); cp != nil { + filtered = append(filtered, cp) + } + } + return filtered, nil +} + +// osLookup resolves a single family via net.DefaultResolver using resutil, +// which disambiguates NODATA from NXDOMAIN and Unmaps v4-mapped-v6. NODATA +// returns (nil, nil). +func (m *Resolver) osLookup(ctx context.Context, d domain.Domain, dnsName string, qtype uint16) ([]dns.RR, error) { + network := resutil.NetworkForQtype(qtype) + if network == "" { + return nil, fmt.Errorf("unsupported qtype %s", dns.TypeToString[qtype]) + } + + log.Infof("looking up IP for mgmt domain=%s type=%s", d.SafeString(), dns.TypeToString[qtype]) + defer log.Infof("done looking up IP for mgmt domain=%s type=%s", d.SafeString(), dns.TypeToString[qtype]) + + result := resutil.LookupIP(ctx, net.DefaultResolver, network, d.PunycodeString(), qtype) + if result.Rcode == dns.RcodeSuccess { + return resutil.IPsToRRs(dnsName, result.IPs, uint32(m.cacheTTL.Seconds())), nil + } + + if result.Err != nil { + return nil, fmt.Errorf("resolve %s type=%s: %w", d.SafeString(), dns.TypeToString[qtype], result.Err) + } + return nil, fmt.Errorf("resolve %s type=%s: rcode=%s", d.SafeString(), dns.TypeToString[qtype], dns.RcodeToString[result.Rcode]) +} + +// responseTTL returns the remaining cache lifetime in seconds (rounded up), +// so downstream resolvers don't cache an answer for longer than we will. +func (m *Resolver) responseTTL(cachedAt time.Time) uint32 { + remaining := m.cacheTTL - time.Since(cachedAt) + if remaining <= 0 { + return 0 + } + return uint32((remaining + time.Second - 1) / time.Second) } // PopulateFromConfig extracts and caches domains from the client configuration. @@ -224,19 +456,12 @@ func (m *Resolver) RemoveDomain(d domain.Domain) error { m.mutex.Lock() defer m.mutex.Unlock() - aQuestion := dns.Question{ - Name: dnsName, - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - } - delete(m.records, aQuestion) - - aaaaQuestion := dns.Question{ - Name: dnsName, - Qtype: dns.TypeAAAA, - Qclass: dns.ClassINET, - } - delete(m.records, aaaaQuestion) + qA := dns.Question{Name: dnsName, Qtype: dns.TypeA, Qclass: dns.ClassINET} + qAAAA := dns.Question{Name: dnsName, Qtype: dns.TypeAAAA, Qclass: dns.ClassINET} + delete(m.records, qA) + delete(m.records, qAAAA) + delete(m.refreshing, qA) + delete(m.refreshing, qAAAA) log.Debugf("removed domain=%s from cache", d.SafeString()) return nil @@ -394,3 +619,73 @@ func (m *Resolver) extractDomainsFromServerDomains(serverDomains dnsconfig.Serve return domains } + +// cloneIPRecord returns a deep copy of rr retargeted to owner with ttl. Non +// A/AAAA records return nil. +func cloneIPRecord(rr dns.RR, owner string, ttl uint32) dns.RR { + switch r := rr.(type) { + case *dns.A: + cp := *r + cp.Hdr.Name = owner + cp.Hdr.Ttl = ttl + cp.A = slices.Clone(r.A) + return &cp + case *dns.AAAA: + cp := *r + cp.Hdr.Name = owner + cp.Hdr.Ttl = ttl + cp.AAAA = slices.Clone(r.AAAA) + return &cp + } + return nil +} + +// cloneRecordsWithTTL clones A/AAAA records preserving their owner and +// stamping ttl so the response shares no memory with the cached slice. +func cloneRecordsWithTTL(records []dns.RR, ttl uint32) []dns.RR { + out := make([]dns.RR, 0, len(records)) + for _, rr := range records { + if cp := cloneIPRecord(rr, rr.Header().Name, ttl); cp != nil { + out = append(out, cp) + } + } + return out +} + +// cnameOwners returns dnsName plus every target reachable by following CNAMEs +// in answer, iterating until fixed point so out-of-order chains resolve. +func cnameOwners(dnsName string, answer []dns.RR) map[string]bool { + owners := map[string]bool{dnsName: true} + for { + added := false + for _, rr := range answer { + cname, ok := rr.(*dns.CNAME) + if !ok { + continue + } + name := strings.ToLower(dns.Fqdn(cname.Hdr.Name)) + if !owners[name] { + continue + } + target := strings.ToLower(dns.Fqdn(cname.Target)) + if !owners[target] { + owners[target] = true + added = true + } + } + if !added { + return owners + } + } +} + +// resolveCacheTTL reads the cache TTL override env var; invalid or empty +// values fall back to defaultTTL. Called once per Resolver from NewResolver. +func resolveCacheTTL() time.Duration { + if v := os.Getenv(envMgmtCacheTTL); v != "" { + if d, err := time.ParseDuration(v); err == nil && d > 0 { + return d + } + } + return defaultTTL +} diff --git a/client/internal/dns/mgmt/mgmt_refresh_test.go b/client/internal/dns/mgmt/mgmt_refresh_test.go new file mode 100644 index 000000000..9faa5a0b8 --- /dev/null +++ b/client/internal/dns/mgmt/mgmt_refresh_test.go @@ -0,0 +1,408 @@ +package mgmt + +import ( + "context" + "errors" + "net" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/internal/dns/test" + "github.com/netbirdio/netbird/shared/management/domain" +) + +type fakeChain struct { + mu sync.Mutex + calls map[string]int + answers map[string][]dns.RR + err error + hasRoot bool + onLookup func() +} + +func newFakeChain() *fakeChain { + return &fakeChain{ + calls: map[string]int{}, + answers: map[string][]dns.RR{}, + hasRoot: true, + } +} + +func (f *fakeChain) HasRootHandlerAtOrBelow(maxPriority int) bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.hasRoot +} + +func (f *fakeChain) ResolveInternal(ctx context.Context, msg *dns.Msg, maxPriority int) (*dns.Msg, error) { + f.mu.Lock() + q := msg.Question[0] + key := q.Name + "|" + dns.TypeToString[q.Qtype] + f.calls[key]++ + answers := f.answers[key] + err := f.err + onLookup := f.onLookup + f.mu.Unlock() + + if onLookup != nil { + onLookup() + } + if err != nil { + return nil, err + } + resp := &dns.Msg{} + resp.SetReply(msg) + resp.Answer = answers + return resp, nil +} + +func (f *fakeChain) setAnswer(name string, qtype uint16, ip string) { + f.mu.Lock() + defer f.mu.Unlock() + key := name + "|" + dns.TypeToString[qtype] + hdr := dns.RR_Header{Name: name, Rrtype: qtype, Class: dns.ClassINET, Ttl: 60} + switch qtype { + case dns.TypeA: + f.answers[key] = []dns.RR{&dns.A{Hdr: hdr, A: net.ParseIP(ip).To4()}} + case dns.TypeAAAA: + f.answers[key] = []dns.RR{&dns.AAAA{Hdr: hdr, AAAA: net.ParseIP(ip).To16()}} + } +} + +func (f *fakeChain) callCount(name string, qtype uint16) int { + f.mu.Lock() + defer f.mu.Unlock() + return f.calls[name+"|"+dns.TypeToString[qtype]] +} + +// waitFor polls the predicate until it returns true or the deadline passes. +func waitFor(t *testing.T, d time.Duration, fn func() bool) { + t.Helper() + deadline := time.Now().Add(d) + for time.Now().Before(deadline) { + if fn() { + return + } + time.Sleep(5 * time.Millisecond) + } + t.Fatalf("condition not met within %s", d) +} + +func queryA(t *testing.T, r *Resolver, name string) *dns.Msg { + t.Helper() + msg := new(dns.Msg) + msg.SetQuestion(name, dns.TypeA) + w := &test.MockResponseWriter{} + r.ServeDNS(w, msg) + return w.GetLastResponse() +} + +func firstA(t *testing.T, resp *dns.Msg) string { + t.Helper() + require.NotNil(t, resp) + require.Greater(t, len(resp.Answer), 0, "expected at least one answer") + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok, "expected A record") + return a.A.String() +} + +func TestResolver_CacheTTLGatesRefresh(t *testing.T) { + // Same cached entry age, different cacheTTL values: the shorter TTL must + // trigger a background refresh, the longer one must not. Proves that the + // per-Resolver cacheTTL field actually drives the stale decision. + cachedAt := time.Now().Add(-100 * time.Millisecond) + + newRec := func() *cachedRecord { + return &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: "mgmt.example.com.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: cachedAt, + } + } + q := dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET} + + t.Run("short TTL treats entry as stale and refreshes", func(t *testing.T) { + r := NewResolver() + r.cacheTTL = 10 * time.Millisecond + chain := newFakeChain() + chain.setAnswer(q.Name, dns.TypeA, "10.0.0.2") + r.SetChainResolver(chain, 50) + r.records[q] = newRec() + + resp := queryA(t, r, q.Name) + assert.Equal(t, "10.0.0.1", firstA(t, resp), "stale entry must be served while refresh runs") + + waitFor(t, time.Second, func() bool { + return chain.callCount(q.Name, dns.TypeA) >= 1 + }) + }) + + t.Run("long TTL keeps entry fresh and skips refresh", func(t *testing.T) { + r := NewResolver() + r.cacheTTL = time.Hour + chain := newFakeChain() + chain.setAnswer(q.Name, dns.TypeA, "10.0.0.2") + r.SetChainResolver(chain, 50) + r.records[q] = newRec() + + resp := queryA(t, r, q.Name) + assert.Equal(t, "10.0.0.1", firstA(t, resp)) + + time.Sleep(50 * time.Millisecond) + assert.Equal(t, 0, chain.callCount(q.Name, dns.TypeA), "fresh entry must not trigger refresh") + }) +} + +func TestResolver_ServeFresh_NoRefresh(t *testing.T) { + r := NewResolver() + chain := newFakeChain() + chain.setAnswer("mgmt.example.com.", dns.TypeA, "10.0.0.2") + r.SetChainResolver(chain, 50) + + r.records[dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET}] = &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: "mgmt.example.com.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: time.Now(), // fresh + } + + resp := queryA(t, r, "mgmt.example.com.") + assert.Equal(t, "10.0.0.1", firstA(t, resp)) + + time.Sleep(20 * time.Millisecond) + assert.Equal(t, 0, chain.callCount("mgmt.example.com.", dns.TypeA), "fresh entry must not trigger refresh") +} + +func TestResolver_StaleTriggersAsyncRefresh(t *testing.T) { + r := NewResolver() + chain := newFakeChain() + chain.setAnswer("mgmt.example.com.", dns.TypeA, "10.0.0.2") + r.SetChainResolver(chain, 50) + + q := dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET} + r.records[q] = &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: time.Now().Add(-2 * defaultTTL), // stale + } + + // First query: serves stale immediately. + resp := queryA(t, r, "mgmt.example.com.") + assert.Equal(t, "10.0.0.1", firstA(t, resp), "stale entry must be served while refresh runs") + + waitFor(t, time.Second, func() bool { + return chain.callCount("mgmt.example.com.", dns.TypeA) >= 1 + }) + + // Next query should now return the refreshed IP. + waitFor(t, time.Second, func() bool { + resp := queryA(t, r, "mgmt.example.com.") + return resp != nil && len(resp.Answer) > 0 && firstA(t, resp) == "10.0.0.2" + }) +} + +func TestResolver_ConcurrentStaleHitsCollapseRefresh(t *testing.T) { + r := NewResolver() + chain := newFakeChain() + chain.setAnswer("mgmt.example.com.", dns.TypeA, "10.0.0.2") + + var inflight atomic.Int32 + var maxInflight atomic.Int32 + chain.onLookup = func() { + cur := inflight.Add(1) + defer inflight.Add(-1) + for { + prev := maxInflight.Load() + if cur <= prev || maxInflight.CompareAndSwap(prev, cur) { + break + } + } + time.Sleep(50 * time.Millisecond) // hold inflight long enough to collide + } + + r.SetChainResolver(chain, 50) + + q := dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET} + r.records[q] = &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: time.Now().Add(-2 * defaultTTL), + } + + var wg sync.WaitGroup + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + queryA(t, r, "mgmt.example.com.") + }() + } + wg.Wait() + + waitFor(t, 2*time.Second, func() bool { + return inflight.Load() == 0 + }) + + calls := chain.callCount("mgmt.example.com.", dns.TypeA) + assert.LessOrEqual(t, calls, 2, "singleflight must collapse concurrent refreshes (got %d)", calls) + assert.Equal(t, int32(1), maxInflight.Load(), "only one refresh should run concurrently") +} + +func TestResolver_RefreshFailureArmsBackoff(t *testing.T) { + r := NewResolver() + chain := newFakeChain() + chain.err = errors.New("boom") + r.SetChainResolver(chain, 50) + + q := dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET} + r.records[q] = &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: time.Now().Add(-2 * defaultTTL), + } + + // First stale hit triggers a refresh attempt that fails. + resp := queryA(t, r, "mgmt.example.com.") + assert.Equal(t, "10.0.0.1", firstA(t, resp), "stale entry served while refresh fails") + + waitFor(t, time.Second, func() bool { + return chain.callCount("mgmt.example.com.", dns.TypeA) == 1 + }) + waitFor(t, time.Second, func() bool { + r.mutex.RLock() + defer r.mutex.RUnlock() + c, ok := r.records[q] + return ok && !c.lastFailedRefresh.IsZero() + }) + + // Subsequent stale hits within backoff window should not schedule more refreshes. + for i := 0; i < 10; i++ { + queryA(t, r, "mgmt.example.com.") + } + time.Sleep(50 * time.Millisecond) + assert.Equal(t, 1, chain.callCount("mgmt.example.com.", dns.TypeA), "backoff must suppress further refreshes") +} + +func TestResolver_NoRootHandler_SkipsChain(t *testing.T) { + r := NewResolver() + chain := newFakeChain() + chain.hasRoot = false + chain.setAnswer("mgmt.example.com.", dns.TypeA, "10.0.0.2") + r.SetChainResolver(chain, 50) + + // With hasRoot=false the chain must not be consulted. Use a short + // deadline so the OS fallback returns quickly without waiting on a + // real network call in CI. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + _, _, _, _ = r.lookupBoth(ctx, domain.Domain("mgmt.example.com"), "mgmt.example.com.") + + assert.Equal(t, 0, chain.callCount("mgmt.example.com.", dns.TypeA), + "chain must not be used when no root handler is registered at the bound priority") +} + +func TestResolver_ServeDuringRefreshSetsLoopFlag(t *testing.T) { + // ServeDNS being invoked for a question while a refresh for that question + // is inflight indicates a resolver loop (OS resolver sent the recursive + // query back to us). The inflightRefresh.loopLoggedOnce flag must be set. + r := NewResolver() + + q := dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET} + r.records[q] = &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: time.Now(), + } + + // Simulate an inflight refresh. + r.markRefreshing(q) + defer r.clearRefreshing(q) + + resp := queryA(t, r, "mgmt.example.com.") + assert.Equal(t, "10.0.0.1", firstA(t, resp), "stale entry must still be served to avoid breaking external queries") + + r.mutex.RLock() + inflight := r.refreshing[q] + r.mutex.RUnlock() + require.NotNil(t, inflight) + assert.True(t, inflight.Load(), "loop flag must be set once a ServeDNS during refresh was observed") +} + +func TestResolver_LoopFlagOnlyTrippedOncePerRefresh(t *testing.T) { + r := NewResolver() + + q := dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET} + r.records[q] = &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: time.Now(), + } + + r.markRefreshing(q) + defer r.clearRefreshing(q) + + // Multiple ServeDNS calls during the same refresh must not re-set the flag + // (CompareAndSwap from false -> true returns true only on the first call). + for range 5 { + queryA(t, r, "mgmt.example.com.") + } + + r.mutex.RLock() + inflight := r.refreshing[q] + r.mutex.RUnlock() + assert.True(t, inflight.Load()) +} + +func TestResolver_NoLoopFlagWhenNotRefreshing(t *testing.T) { + r := NewResolver() + + q := dns.Question{Name: "mgmt.example.com.", Qtype: dns.TypeA, Qclass: dns.ClassINET} + r.records[q] = &cachedRecord{ + records: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1").To4(), + }}, + cachedAt: time.Now(), + } + + queryA(t, r, "mgmt.example.com.") + + r.mutex.RLock() + _, ok := r.refreshing[q] + r.mutex.RUnlock() + assert.False(t, ok, "no refresh inflight means no loop tracking") +} + +func TestResolver_AddDomain_UsesChainWhenRootRegistered(t *testing.T) { + r := NewResolver() + chain := newFakeChain() + chain.setAnswer("mgmt.example.com.", dns.TypeA, "10.0.0.2") + chain.setAnswer("mgmt.example.com.", dns.TypeAAAA, "fd00::2") + r.SetChainResolver(chain, 50) + + require.NoError(t, r.AddDomain(context.Background(), domain.Domain("mgmt.example.com"))) + + resp := queryA(t, r, "mgmt.example.com.") + assert.Equal(t, "10.0.0.2", firstA(t, resp)) + assert.Equal(t, 1, chain.callCount("mgmt.example.com.", dns.TypeA)) + assert.Equal(t, 1, chain.callCount("mgmt.example.com.", dns.TypeAAAA)) +} diff --git a/client/internal/dns/mgmt/mgmt_test.go b/client/internal/dns/mgmt/mgmt_test.go index 9e8a746f3..276cbba0a 100644 --- a/client/internal/dns/mgmt/mgmt_test.go +++ b/client/internal/dns/mgmt/mgmt_test.go @@ -6,6 +6,7 @@ import ( "net/url" "strings" "testing" + "time" "github.com/miekg/dns" "github.com/stretchr/testify/assert" @@ -23,6 +24,60 @@ func TestResolver_NewResolver(t *testing.T) { assert.False(t, resolver.MatchSubdomains()) } +func TestResolveCacheTTL(t *testing.T) { + tests := []struct { + name string + value string + want time.Duration + }{ + {"unset falls back to default", "", defaultTTL}, + {"valid duration", "45s", 45 * time.Second}, + {"valid minutes", "2m", 2 * time.Minute}, + {"malformed falls back to default", "not-a-duration", defaultTTL}, + {"zero falls back to default", "0s", defaultTTL}, + {"negative falls back to default", "-5s", defaultTTL}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Setenv(envMgmtCacheTTL, tc.value) + got := resolveCacheTTL() + assert.Equal(t, tc.want, got, "parsed TTL should match") + }) + } +} + +func TestNewResolver_CacheTTLFromEnv(t *testing.T) { + t.Setenv(envMgmtCacheTTL, "7s") + r := NewResolver() + assert.Equal(t, 7*time.Second, r.cacheTTL, "NewResolver should evaluate cacheTTL once from env") +} + +func TestResolver_ResponseTTL(t *testing.T) { + now := time.Now() + tests := []struct { + name string + cacheTTL time.Duration + cachedAt time.Time + wantMin uint32 + wantMax uint32 + }{ + {"fresh entry returns full TTL", 60 * time.Second, now, 59, 60}, + {"half-aged entry returns half TTL", 60 * time.Second, now.Add(-30 * time.Second), 29, 31}, + {"expired entry returns zero", 60 * time.Second, now.Add(-61 * time.Second), 0, 0}, + {"exactly expired returns zero", 10 * time.Second, now.Add(-10 * time.Second), 0, 0}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + r := &Resolver{cacheTTL: tc.cacheTTL} + got := r.responseTTL(tc.cachedAt) + assert.GreaterOrEqual(t, got, tc.wantMin, "remaining TTL should be >= wantMin") + assert.LessOrEqual(t, got, tc.wantMax, "remaining TTL should be <= wantMax") + }) + } +} + func TestResolver_ExtractDomainFromURL(t *testing.T) { tests := []struct { name string diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index f7865047b..d4f54dec5 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -212,6 +212,7 @@ func newDefaultServer( ctx, stop := context.WithCancel(ctx) mgmtCacheResolver := mgmt.NewResolver() + mgmtCacheResolver.SetChainResolver(handlerChain, PriorityUpstream) defaultServer := &DefaultServer{ ctx: ctx, From 5da05ecca655831e99bd348316a1b10941ef4c24 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 22 Apr 2026 20:54:18 +0200 Subject: [PATCH 340/374] [client] increase gRPC health check timeout to 5s (#5961) Bump the IsHealthy() context timeout from 1s to 5s for both the management and signal gRPC clients to reduce false negatives on slower or congested connections. --- shared/management/client/grpc.go | 4 +++- shared/signal/client/grpc.go | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index a01e51abc..e9bea7ffb 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -30,6 +30,8 @@ import ( const ConnectTimeout = 10 * time.Second +const healthCheckTimeout = 5 * time.Second + const ( // EnvMaxRecvMsgSize overrides the default gRPC max receive message size (4 MB) // for the management client connection. Value is in bytes. @@ -532,7 +534,7 @@ func (c *GrpcClient) IsHealthy() bool { case connectivity.Ready: } - ctx, cancel := context.WithTimeout(c.ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(c.ctx, healthCheckTimeout) defer cancel() _, err := c.realClient.GetServerKey(ctx, &proto.Empty{}) diff --git a/shared/signal/client/grpc.go b/shared/signal/client/grpc.go index 5368b57a2..d0f598dd7 100644 --- a/shared/signal/client/grpc.go +++ b/shared/signal/client/grpc.go @@ -23,6 +23,8 @@ import ( "github.com/netbirdio/netbird/util/wsproxy" ) +const healthCheckTimeout = 5 * time.Second + // ConnStateNotifier is a wrapper interface of the status recorder type ConnStateNotifier interface { MarkSignalDisconnected(error) @@ -263,7 +265,7 @@ func (c *GrpcClient) IsHealthy() bool { case connectivity.Ready: } - ctx, cancel := context.WithTimeout(c.ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(c.ctx, healthCheckTimeout) defer cancel() _, err := c.realClient.Send(ctx, &proto.EncryptedMessage{ Key: c.key.PublicKey().String(), From b6038e8acda35545ff551847458bd191f2f008c3 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Thu, 23 Apr 2026 15:13:22 +0200 Subject: [PATCH 341/374] [management] refactor: changeable pat rate limiting (#5946) --- management/internals/server/boot.go | 12 +- management/server/http/handler.go | 44 +---- .../server/http/middleware/auth_middleware.go | 13 +- .../http/middleware/auth_middleware_test.go | 22 ++- .../server/http/middleware/rate_limiter.go | 97 ++++++++++ .../http/middleware/rate_limiter_test.go | 171 ++++++++++++++++++ .../testing/testing_tools/channel/channel.go | 4 +- 7 files changed, 304 insertions(+), 59 deletions(-) diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 24dfb641b..2b40c0aad 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -30,6 +30,7 @@ import ( nbcache "github.com/netbirdio/netbird/management/server/cache" nbContext "github.com/netbirdio/netbird/management/server/context" nbhttp "github.com/netbirdio/netbird/management/server/http" + "github.com/netbirdio/netbird/management/server/http/middleware" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/telemetry" mgmtProto "github.com/netbirdio/netbird/shared/management/proto" @@ -109,7 +110,7 @@ func (s *BaseServer) EventStore() activity.Store { func (s *BaseServer) APIHandler() http.Handler { return Create(s, func() http.Handler { - httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies) + httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies, s.RateLimiter()) if err != nil { log.Fatalf("failed to create API handler: %v", err) } @@ -117,6 +118,15 @@ func (s *BaseServer) APIHandler() http.Handler { }) } +func (s *BaseServer) RateLimiter() *middleware.APIRateLimiter { + return Create(s, func() *middleware.APIRateLimiter { + cfg, enabled := middleware.RateLimiterConfigFromEnv() + limiter := middleware.NewAPIRateLimiter(cfg) + limiter.SetEnabled(enabled) + return limiter + }) +} + func (s *BaseServer) GRPCServer() *grpc.Server { return Create(s, func() *grpc.Server { trustedPeers := s.Config.ReverseProxy.TrustedPeers diff --git a/management/server/http/handler.go b/management/server/http/handler.go index ad36b9d46..56b2d8203 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -5,9 +5,6 @@ import ( "fmt" "net/http" "net/netip" - "os" - "strconv" - "time" "github.com/gorilla/mux" "github.com/rs/cors" @@ -66,14 +63,11 @@ import ( ) const ( - apiPrefix = "/api" - rateLimitingEnabledKey = "NB_API_RATE_LIMITING_ENABLED" - rateLimitingBurstKey = "NB_API_RATE_LIMITING_BURST" - rateLimitingRPMKey = "NB_API_RATE_LIMITING_RPM" + apiPrefix = "/api" ) // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. -func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix) (http.Handler, error) { +func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix, rateLimiter *middleware.APIRateLimiter) (http.Handler, error) { // Register bypass paths for unauthenticated endpoints if err := bypass.AddBypassPath("/api/instance"); err != nil { @@ -94,34 +88,10 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks return nil, fmt.Errorf("failed to add bypass path: %w", err) } - var rateLimitingConfig *middleware.RateLimiterConfig - if os.Getenv(rateLimitingEnabledKey) == "true" { - rpm := 6 - if v := os.Getenv(rateLimitingRPMKey); v != "" { - value, err := strconv.Atoi(v) - if err != nil { - log.Warnf("parsing %s env var: %v, using default %d", rateLimitingRPMKey, err, rpm) - } else { - rpm = value - } - } - - burst := 500 - if v := os.Getenv(rateLimitingBurstKey); v != "" { - value, err := strconv.Atoi(v) - if err != nil { - log.Warnf("parsing %s env var: %v, using default %d", rateLimitingBurstKey, err, burst) - } else { - burst = value - } - } - - rateLimitingConfig = &middleware.RateLimiterConfig{ - RequestsPerMinute: float64(rpm), - Burst: burst, - CleanupInterval: 6 * time.Hour, - LimiterTTL: 24 * time.Hour, - } + if rateLimiter == nil { + log.Warn("NewAPIHandler: nil rate limiter, rate limiting disabled") + rateLimiter = middleware.NewAPIRateLimiter(nil) + rateLimiter.SetEnabled(false) } authMiddleware := middleware.NewAuthMiddleware( @@ -129,7 +99,7 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks accountManager.GetAccountIDFromUserAuth, accountManager.SyncUserJWTGroups, accountManager.GetUserFromUserAuth, - rateLimitingConfig, + rateLimiter, appMetrics.GetMeter(), ) diff --git a/management/server/http/middleware/auth_middleware.go b/management/server/http/middleware/auth_middleware.go index 8106380f2..6d075d9c2 100644 --- a/management/server/http/middleware/auth_middleware.go +++ b/management/server/http/middleware/auth_middleware.go @@ -43,14 +43,9 @@ func NewAuthMiddleware( ensureAccount EnsureAccountFunc, syncUserJWTGroups SyncUserJWTGroupsFunc, getUserFromUserAuth GetUserFromUserAuthFunc, - rateLimiterConfig *RateLimiterConfig, + rateLimiter *APIRateLimiter, meter metric.Meter, ) *AuthMiddleware { - var rateLimiter *APIRateLimiter - if rateLimiterConfig != nil { - rateLimiter = NewAPIRateLimiter(rateLimiterConfig) - } - var patUsageTracker *PATUsageTracker if meter != nil { var err error @@ -181,10 +176,8 @@ func (m *AuthMiddleware) checkPATFromRequest(r *http.Request, authHeaderParts [] m.patUsageTracker.IncrementUsage(token) } - if m.rateLimiter != nil && !isTerraformRequest(r) { - if !m.rateLimiter.Allow(token) { - return status.Errorf(status.TooManyRequests, "too many requests") - } + if !isTerraformRequest(r) && !m.rateLimiter.Allow(token) { + return status.Errorf(status.TooManyRequests, "too many requests") } ctx := r.Context() diff --git a/management/server/http/middleware/auth_middleware_test.go b/management/server/http/middleware/auth_middleware_test.go index f397c63a4..8f736fbfd 100644 --- a/management/server/http/middleware/auth_middleware_test.go +++ b/management/server/http/middleware/auth_middleware_test.go @@ -196,6 +196,8 @@ func TestAuthMiddleware_Handler(t *testing.T) { GetPATInfoFunc: mockGetAccountInfoFromPAT, } + disabledLimiter := NewAPIRateLimiter(nil) + disabledLimiter.SetEnabled(false) authMiddleware := NewAuthMiddleware( mockAuth, func(ctx context.Context, userAuth nbauth.UserAuth) (string, string, error) { @@ -207,7 +209,7 @@ func TestAuthMiddleware_Handler(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - nil, + disabledLimiter, nil, ) @@ -266,7 +268,7 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - rateLimitConfig, + NewAPIRateLimiter(rateLimitConfig), nil, ) @@ -318,7 +320,7 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - rateLimitConfig, + NewAPIRateLimiter(rateLimitConfig), nil, ) @@ -361,7 +363,7 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - rateLimitConfig, + NewAPIRateLimiter(rateLimitConfig), nil, ) @@ -405,7 +407,7 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - rateLimitConfig, + NewAPIRateLimiter(rateLimitConfig), nil, ) @@ -469,7 +471,7 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - rateLimitConfig, + NewAPIRateLimiter(rateLimitConfig), nil, ) @@ -528,7 +530,7 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - rateLimitConfig, + NewAPIRateLimiter(rateLimitConfig), nil, ) @@ -583,7 +585,7 @@ func TestAuthMiddleware_RateLimiting(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - rateLimitConfig, + NewAPIRateLimiter(rateLimitConfig), nil, ) @@ -670,6 +672,8 @@ func TestAuthMiddleware_Handler_Child(t *testing.T) { GetPATInfoFunc: mockGetAccountInfoFromPAT, } + disabledLimiter := NewAPIRateLimiter(nil) + disabledLimiter.SetEnabled(false) authMiddleware := NewAuthMiddleware( mockAuth, func(ctx context.Context, userAuth nbauth.UserAuth) (string, string, error) { @@ -681,7 +685,7 @@ func TestAuthMiddleware_Handler_Child(t *testing.T) { func(ctx context.Context, userAuth nbauth.UserAuth) (*types.User, error) { return &types.User{}, nil }, - nil, + disabledLimiter, nil, ) diff --git a/management/server/http/middleware/rate_limiter.go b/management/server/http/middleware/rate_limiter.go index 936b34319..bfd44afee 100644 --- a/management/server/http/middleware/rate_limiter.go +++ b/management/server/http/middleware/rate_limiter.go @@ -4,14 +4,27 @@ import ( "context" "net" "net/http" + "os" + "strconv" "sync" + "sync/atomic" "time" + log "github.com/sirupsen/logrus" "golang.org/x/time/rate" "github.com/netbirdio/netbird/shared/management/http/util" ) +const ( + RateLimitingEnabledEnv = "NB_API_RATE_LIMITING_ENABLED" + RateLimitingBurstEnv = "NB_API_RATE_LIMITING_BURST" + RateLimitingRPMEnv = "NB_API_RATE_LIMITING_RPM" + + defaultAPIRPM = 6 + defaultAPIBurst = 500 +) + // RateLimiterConfig holds configuration for the API rate limiter type RateLimiterConfig struct { // RequestsPerMinute defines the rate at which tokens are replenished @@ -34,6 +47,43 @@ func DefaultRateLimiterConfig() *RateLimiterConfig { } } +func RateLimiterConfigFromEnv() (cfg *RateLimiterConfig, enabled bool) { + rpm := defaultAPIRPM + if v := os.Getenv(RateLimitingRPMEnv); v != "" { + value, err := strconv.Atoi(v) + if err != nil { + log.Warnf("parsing %s env var: %v, using default %d", RateLimitingRPMEnv, err, rpm) + } else { + rpm = value + } + } + if rpm <= 0 { + log.Warnf("%s=%d is non-positive, using default %d", RateLimitingRPMEnv, rpm, defaultAPIRPM) + rpm = defaultAPIRPM + } + + burst := defaultAPIBurst + if v := os.Getenv(RateLimitingBurstEnv); v != "" { + value, err := strconv.Atoi(v) + if err != nil { + log.Warnf("parsing %s env var: %v, using default %d", RateLimitingBurstEnv, err, burst) + } else { + burst = value + } + } + if burst <= 0 { + log.Warnf("%s=%d is non-positive, using default %d", RateLimitingBurstEnv, burst, defaultAPIBurst) + burst = defaultAPIBurst + } + + return &RateLimiterConfig{ + RequestsPerMinute: float64(rpm), + Burst: burst, + CleanupInterval: 6 * time.Hour, + LimiterTTL: 24 * time.Hour, + }, os.Getenv(RateLimitingEnabledEnv) == "true" +} + // limiterEntry holds a rate limiter and its last access time type limiterEntry struct { limiter *rate.Limiter @@ -46,6 +96,7 @@ type APIRateLimiter struct { limiters map[string]*limiterEntry mu sync.RWMutex stopChan chan struct{} + enabled atomic.Bool } // NewAPIRateLimiter creates a new API rate limiter with the given configuration @@ -59,14 +110,53 @@ func NewAPIRateLimiter(config *RateLimiterConfig) *APIRateLimiter { limiters: make(map[string]*limiterEntry), stopChan: make(chan struct{}), } + rl.enabled.Store(true) go rl.cleanupLoop() return rl } +func (rl *APIRateLimiter) SetEnabled(enabled bool) { + rl.enabled.Store(enabled) +} + +func (rl *APIRateLimiter) Enabled() bool { + return rl.enabled.Load() +} + +func (rl *APIRateLimiter) UpdateConfig(config *RateLimiterConfig) { + if config == nil { + return + } + if config.RequestsPerMinute <= 0 || config.Burst <= 0 { + log.Warnf("UpdateConfig: ignoring invalid rpm=%v burst=%d", config.RequestsPerMinute, config.Burst) + return + } + + newRPS := rate.Limit(config.RequestsPerMinute / 60.0) + newBurst := config.Burst + + rl.mu.Lock() + rl.config.RequestsPerMinute = config.RequestsPerMinute + rl.config.Burst = newBurst + snapshot := make([]*rate.Limiter, 0, len(rl.limiters)) + for _, entry := range rl.limiters { + snapshot = append(snapshot, entry.limiter) + } + rl.mu.Unlock() + + for _, l := range snapshot { + l.SetLimit(newRPS) + l.SetBurst(newBurst) + } +} + // Allow checks if a request for the given key (token) is allowed func (rl *APIRateLimiter) Allow(key string) bool { + if !rl.enabled.Load() { + return true + } limiter := rl.getLimiter(key) return limiter.Allow() } @@ -74,6 +164,9 @@ func (rl *APIRateLimiter) Allow(key string) bool { // Wait blocks until the rate limiter allows another request for the given key // Returns an error if the context is canceled func (rl *APIRateLimiter) Wait(ctx context.Context, key string) error { + if !rl.enabled.Load() { + return nil + } limiter := rl.getLimiter(key) return limiter.Wait(ctx) } @@ -153,6 +246,10 @@ func (rl *APIRateLimiter) Reset(key string) { // Returns 429 Too Many Requests if the rate limit is exceeded. func (rl *APIRateLimiter) Middleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !rl.enabled.Load() { + next.ServeHTTP(w, r) + return + } clientIP := getClientIP(r) if !rl.Allow(clientIP) { util.WriteErrorResponse("rate limit exceeded, please try again later", http.StatusTooManyRequests, w) diff --git a/management/server/http/middleware/rate_limiter_test.go b/management/server/http/middleware/rate_limiter_test.go index 68f804e57..4b97d1874 100644 --- a/management/server/http/middleware/rate_limiter_test.go +++ b/management/server/http/middleware/rate_limiter_test.go @@ -1,8 +1,10 @@ package middleware import ( + "fmt" "net/http" "net/http/httptest" + "sync" "testing" "time" @@ -156,3 +158,172 @@ func TestAPIRateLimiter_Reset(t *testing.T) { // Should be allowed again assert.True(t, rl.Allow("test-key")) } + +func TestAPIRateLimiter_SetEnabled(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 1, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + assert.True(t, rl.Allow("key")) + assert.False(t, rl.Allow("key"), "burst exhausted while enabled") + + rl.SetEnabled(false) + assert.False(t, rl.Enabled()) + for i := 0; i < 5; i++ { + assert.True(t, rl.Allow("key"), "disabled limiter must always allow") + } + + rl.SetEnabled(true) + assert.True(t, rl.Enabled()) + assert.False(t, rl.Allow("key"), "re-enabled limiter retains prior bucket state") +} + +func TestAPIRateLimiter_UpdateConfig(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 2, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + assert.True(t, rl.Allow("k1")) + assert.True(t, rl.Allow("k1")) + assert.False(t, rl.Allow("k1"), "burst=2 exhausted") + + rl.UpdateConfig(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 10, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + + // New burst applies to existing keys in place; bucket refills up to new burst over time, + // but importantly newly-added keys use the updated config immediately. + assert.True(t, rl.Allow("k2")) + for i := 0; i < 9; i++ { + assert.True(t, rl.Allow("k2")) + } + assert.False(t, rl.Allow("k2"), "new burst=10 exhausted") +} + +func TestAPIRateLimiter_UpdateConfig_NilIgnored(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 1, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + rl.UpdateConfig(nil) // must not panic or zero the config + + assert.True(t, rl.Allow("k")) + assert.False(t, rl.Allow("k")) +} + +func TestAPIRateLimiter_UpdateConfig_NonPositiveIgnored(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 1, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + assert.True(t, rl.Allow("k")) + assert.False(t, rl.Allow("k")) + + rl.UpdateConfig(&RateLimiterConfig{RequestsPerMinute: 0, Burst: 0, CleanupInterval: time.Minute, LimiterTTL: time.Minute}) + rl.UpdateConfig(&RateLimiterConfig{RequestsPerMinute: -1, Burst: 5, CleanupInterval: time.Minute, LimiterTTL: time.Minute}) + rl.UpdateConfig(&RateLimiterConfig{RequestsPerMinute: 60, Burst: -1, CleanupInterval: time.Minute, LimiterTTL: time.Minute}) + + rl.Reset("k") + assert.True(t, rl.Allow("k")) + assert.False(t, rl.Allow("k"), "burst should still be 1 — invalid UpdateConfig calls were ignored") +} + +func TestAPIRateLimiter_ConcurrentAllowAndUpdate(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 600, + Burst: 10, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + var wg sync.WaitGroup + stop := make(chan struct{}) + + for i := 0; i < 8; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + key := fmt.Sprintf("k%d", id) + for { + select { + case <-stop: + return + default: + rl.Allow(key) + } + } + }(i) + } + + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 200; i++ { + select { + case <-stop: + return + default: + rl.UpdateConfig(&RateLimiterConfig{ + RequestsPerMinute: float64(30 + (i % 90)), + Burst: 1 + (i % 20), + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + rl.SetEnabled(i%2 == 0) + } + } + }() + + time.Sleep(100 * time.Millisecond) + close(stop) + wg.Wait() +} + +func TestRateLimiterConfigFromEnv(t *testing.T) { + t.Setenv(RateLimitingEnabledEnv, "true") + t.Setenv(RateLimitingRPMEnv, "42") + t.Setenv(RateLimitingBurstEnv, "7") + + cfg, enabled := RateLimiterConfigFromEnv() + assert.True(t, enabled) + assert.Equal(t, float64(42), cfg.RequestsPerMinute) + assert.Equal(t, 7, cfg.Burst) + + t.Setenv(RateLimitingEnabledEnv, "false") + _, enabled = RateLimiterConfigFromEnv() + assert.False(t, enabled) + + t.Setenv(RateLimitingEnabledEnv, "") + t.Setenv(RateLimitingRPMEnv, "") + t.Setenv(RateLimitingBurstEnv, "") + cfg, enabled = RateLimiterConfigFromEnv() + assert.False(t, enabled) + assert.Equal(t, float64(defaultAPIRPM), cfg.RequestsPerMinute) + assert.Equal(t, defaultAPIBurst, cfg.Burst) + + t.Setenv(RateLimitingRPMEnv, "0") + t.Setenv(RateLimitingBurstEnv, "-5") + cfg, _ = RateLimiterConfigFromEnv() + assert.Equal(t, float64(defaultAPIRPM), cfg.RequestsPerMinute, "non-positive rpm must fall back to default") + assert.Equal(t, defaultAPIBurst, cfg.Burst, "non-positive burst must fall back to default") +} diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 0203d6177..1a8b83c7e 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -135,7 +135,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } @@ -264,7 +264,7 @@ func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile strin customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } From fa0d58d093a883ddd6183c52e1b92e0888b2bafd Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 23 Apr 2026 16:01:54 +0200 Subject: [PATCH 342/374] [management] exclude peers for expiration job that have already been marked expired (#5970) --- management/server/account_test.go | 23 +++++++++++ management/server/management_proto_test.go | 4 +- management/server/peer.go | 4 ++ management/server/store/sql_store.go | 2 +- management/server/store/sql_store_test.go | 40 ++++++++++++++----- .../testdata/store_with_expired_peers.sql | 1 + 6 files changed, 62 insertions(+), 12 deletions(-) diff --git a/management/server/account_test.go b/management/server/account_test.go index 4453d064e..bcc73d52f 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -2311,6 +2311,29 @@ func TestAccount_GetExpiredPeers(t *testing.T) { } } +func TestGetExpiredPeers_SkipsAlreadyExpired(t *testing.T) { + ctx := context.Background() + + testStore, cleanUp, err := store.NewTestStoreFromSQL(ctx, "testdata/store_with_expired_peers.sql", t.TempDir()) + t.Cleanup(cleanUp) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + // Verify the already-expired peer is excluded at the store level + peers, err := testStore.GetAccountPeersWithExpiration(ctx, store.LockingStrengthNone, accountID) + require.NoError(t, err) + + for _, peer := range peers { + assert.NotEqual(t, "cg05lnblo1hkg2j514p0", peer.ID, "already expired peer should be excluded by the store query") + assert.False(t, peer.Status.LoginExpired, "returned peers should not already be marked as login expired") + } + + // Only the non-expired peer with expiration enabled should be returned + require.Len(t, peers, 1) + assert.Equal(t, "notexpired01", peers[0].ID) +} + func TestAccount_GetInactivePeers(t *testing.T) { type test struct { name string diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index 4e6eb0a33..18d85315d 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -267,8 +267,8 @@ func Test_SyncProtocol(t *testing.T) { } // expired peers come separately. - if len(networkMap.GetOfflinePeers()) != 1 { - t.Fatal("expecting SyncResponse to have NetworkMap with 1 offline peer") + if len(networkMap.GetOfflinePeers()) != 2 { + t.Fatal("expecting SyncResponse to have NetworkMap with 2 offline peer") } expiredPeerPubKey := "RlSy2vzoG2HyMBTUImXOiVhCBiiBa5qD5xzMxkiFDW4=" diff --git a/management/server/peer.go b/management/server/peer.go index a02e34e0d..a95ae17a3 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -1405,6 +1405,10 @@ func (am *DefaultAccountManager) getExpiredPeers(ctx context.Context, accountID var peers []*nbpeer.Peer for _, peer := range peersWithExpiry { + if peer.Status.LoginExpired { + continue + } + expired, _ := peer.LoginExpired(settings.PeerLoginExpiration) if expired { peers = append(peers, peer) diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 8189548b7..0ff57b752 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -3310,7 +3310,7 @@ func (s *SqlStore) GetAccountPeersWithExpiration(ctx context.Context, lockStreng var peers []*nbpeer.Peer result := tx. - Where("login_expiration_enabled = ? AND user_id IS NOT NULL AND user_id != ''", true). + Where("login_expiration_enabled = ? AND peer_status_login_expired != ? AND user_id IS NOT NULL AND user_id != ''", true, true). Find(&peers, accountIDCondition, accountID) if err := result.Error; err != nil { log.WithContext(ctx).Errorf("failed to get peers with expiration from the store: %s", result.Error) diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index 8ea6c2ae5..5a5616abc 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -2729,7 +2729,7 @@ func TestSqlStore_GetAccountPeers(t *testing.T) { { name: "should retrieve peers for an existing account ID", accountID: "bf1c8084-ba50-4ce7-9439-34653001fc3b", - expectedCount: 4, + expectedCount: 5, }, { name: "should return no peers for a non-existing account ID", @@ -2751,7 +2751,7 @@ func TestSqlStore_GetAccountPeers(t *testing.T) { name: "should filter peers by partial name", accountID: "bf1c8084-ba50-4ce7-9439-34653001fc3b", nameFilter: "host", - expectedCount: 3, + expectedCount: 4, }, { name: "should filter peers by ip", @@ -2777,14 +2777,16 @@ func TestSqlStore_GetAccountPeersWithExpiration(t *testing.T) { require.NoError(t, err) tests := []struct { - name string - accountID string - expectedCount int + name string + accountID string + expectedCount int + expectedPeerIDs []string }{ { - name: "should retrieve peers with expiration for an existing account ID", - accountID: "bf1c8084-ba50-4ce7-9439-34653001fc3b", - expectedCount: 1, + name: "should retrieve only non-expired peers with expiration enabled", + accountID: "bf1c8084-ba50-4ce7-9439-34653001fc3b", + expectedCount: 1, + expectedPeerIDs: []string{"notexpired01"}, }, { name: "should return no peers with expiration for a non-existing account ID", @@ -2803,10 +2805,30 @@ func TestSqlStore_GetAccountPeersWithExpiration(t *testing.T) { peers, err := store.GetAccountPeersWithExpiration(context.Background(), LockingStrengthNone, tt.accountID) require.NoError(t, err) require.Len(t, peers, tt.expectedCount) + for i, peer := range peers { + assert.Equal(t, tt.expectedPeerIDs[i], peer.ID) + } }) } } +func TestSqlStore_GetAccountPeersWithExpiration_ExcludesAlreadyExpired(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/store_with_expired_peers.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + peers, err := store.GetAccountPeersWithExpiration(context.Background(), LockingStrengthNone, accountID) + require.NoError(t, err) + + // Verify the already-expired peer (cg05lnblo1hkg2j514p0) is not returned + for _, peer := range peers { + assert.NotEqual(t, "cg05lnblo1hkg2j514p0", peer.ID, "already expired peer should not be returned") + assert.False(t, peer.Status.LoginExpired, "returned peers should not have LoginExpired set") + } +} + func TestSqlStore_GetAccountPeersWithInactivity(t *testing.T) { store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/store_with_expired_peers.sql", t.TempDir()) t.Cleanup(cleanup) @@ -2887,7 +2909,7 @@ func TestSqlStore_GetUserPeers(t *testing.T) { name: "should retrieve peers for another valid account ID and user ID", accountID: "bf1c8084-ba50-4ce7-9439-34653001fc3b", userID: "edafee4e-63fb-11ec-90d6-0242ac120003", - expectedCount: 2, + expectedCount: 3, }, { name: "should return no peers for existing account ID with empty user ID", diff --git a/management/server/testdata/store_with_expired_peers.sql b/management/server/testdata/store_with_expired_peers.sql index dfcaeee6f..189bd1262 100644 --- a/management/server/testdata/store_with_expired_peers.sql +++ b/management/server/testdata/store_with_expired_peers.sql @@ -31,6 +31,7 @@ INSERT INTO peers VALUES('cfvprsrlo1hqoo49ohog','bf1c8084-ba50-4ce7-9439-3465300 INSERT INTO peers VALUES('cg05lnblo1hkg2j514p0','bf1c8084-ba50-4ce7-9439-34653001fc3b','RlSy2vzoG2HyMBTUImXOiVhCBiiBa5qD5xzMxkiFDW4=','','"100.64.39.54"','expiredhost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'expiredhost','expiredhost','2023-03-02 09:19:57.276717255+01:00',0,1,0,'edafee4e-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMbK5ZXJsGOOWoBT4OmkPtgdPZe2Q7bDuS/zjn2CZxhK',0,1,0,'2023-03-02 09:14:21.791679181+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); INSERT INTO peers VALUES('cg3161rlo1hs9cq94gdg','bf1c8084-ba50-4ce7-9439-34653001fc3b','mVABSKj28gv+JRsf7e0NEGKgSOGTfU/nPB2cpuG56HU=','','"100.64.117.96"','testhost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'testhost','testhost','2023-03-06 18:21:27.252010027+01:00',0,0,0,'edafee4e-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINWvvUkFFcrj48CWTkNUb/do/n52i1L5dH4DhGu+4ZuM',0,0,0,'2023-03-07 09:02:47.442857106+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); INSERT INTO peers VALUES('csrnkiq7qv9d8aitqd50','bf1c8084-ba50-4ce7-9439-34653001fc3b','nVABSKj28gv+JRsf7e0NEGKgSOGTfU/nPB2cpuG56HX=','','"100.64.117.97"','testhost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'testhost','testhost-1','2023-03-06 18:21:27.252010027+01:00',0,0,0,'f4f6d672-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINWvvUkFFcrj48CWTkNUb/do/n52i1L5dH4DhGu+4ZuM',0,0,1,'2023-03-07 09:02:47.442857106+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); +INSERT INTO peers VALUES('notexpired01','bf1c8084-ba50-4ce7-9439-34653001fc3b','oVABSKj28gv+JRsf7e0NEGKgSOGTfU/nPB2cpuG56HY=','','"100.64.117.98"','activehost','linux','Linux','22.04','x86_64','Ubuntu','','development','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'activehost','activehost','2023-03-06 18:21:27.252010027+01:00',0,0,0,'edafee4e-63fb-11ec-90d6-0242ac120003','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINWvvUkFFcrj48CWTkNUb/do/n52i1L5dH4DhGu+4ZuM',0,1,0,'2023-03-07 09:02:47.442857106+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); INSERT INTO users VALUES('f4f6d672-63fb-11ec-90d6-0242ac120003','bf1c8084-ba50-4ce7-9439-34653001fc3b','user',0,0,'','[]',0,NULL,'2024-10-02 17:00:32.528196+02:00','api',0,''); INSERT INTO users VALUES('edafee4e-63fb-11ec-90d6-0242ac120003','bf1c8084-ba50-4ce7-9439-34653001fc3b','admin',0,0,'','[]',0,NULL,'2024-10-02 17:00:32.528196+02:00','api',0,''); INSERT INTO installations VALUES(1,''); From c07c726ea7a7d6bc9b34c1a5dc138785c7bd1214 Mon Sep 17 00:00:00 2001 From: alsruf36 <33592711+alsruf36@users.noreply.github.com> Date: Fri, 24 Apr 2026 01:20:54 +0900 Subject: [PATCH 343/374] [proxy] Set session cookie path to root (#5915) --- proxy/internal/auth/middleware.go | 1 + proxy/internal/auth/middleware_test.go | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/proxy/internal/auth/middleware.go b/proxy/internal/auth/middleware.go index 055e4510f..3b383f8b4 100644 --- a/proxy/internal/auth/middleware.go +++ b/proxy/internal/auth/middleware.go @@ -433,6 +433,7 @@ func setSessionCookie(w http.ResponseWriter, token string, expiration time.Durat http.SetCookie(w, &http.Cookie{ Name: auth.SessionCookieName, Value: token, + Path: "/", HttpOnly: true, Secure: true, SameSite: http.SameSiteLaxMode, diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go index 16d09800c..2c93d7912 100644 --- a/proxy/internal/auth/middleware_test.go +++ b/proxy/internal/auth/middleware_test.go @@ -391,6 +391,15 @@ func TestProtect_SchemeAuthRedirectsWithCookie(t *testing.T) { assert.Equal(t, http.SameSiteLaxMode, sessionCookie.SameSite) } +func TestSetSessionCookieHasRootPath(t *testing.T) { + w := httptest.NewRecorder() + setSessionCookie(w, "test-token", time.Hour) + + cookies := w.Result().Cookies() + require.Len(t, cookies, 1) + assert.Equal(t, "/", cookies[0].Path, "session cookie must be scoped to root so it applies to all paths") +} + func TestProtect_FailedAuthDoesNotSetCookie(t *testing.T) { mw := NewMiddleware(log.StandardLogger(), nil, nil) kp := generateTestKeyPair(t) From f732b01a055d4de1fd736f2e8b42196c31f291a5 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 23 Apr 2026 21:19:21 +0200 Subject: [PATCH 344/374] [management] unify peer-update test timeout via constant (#5952) peerShouldReceiveUpdate waited 500ms for the expected update message, and every outer wrapper across the management/server test suite paired it with a 1s goroutine-drain timeout. Both were too tight for slower CI runners (MySQL, FreeBSD, loaded sqlite), producing intermittent "Timed out waiting for update message" failures in tests like TestDNSAccountPeersUpdate, TestPeerAccountPeersUpdate, and TestNameServerAccountPeersUpdate. Introduce peerUpdateTimeout (5s) next to the helper and use it both in the helper and in every outer wrapper so the two timeouts stay in sync. Only runs down on failure; passing tests return as soon as the channel delivers, so there is no slowdown on green runs. --- management/server/account_test.go | 9 ++++++++- management/server/dns_test.go | 6 +++--- management/server/group_test.go | 14 +++++++------- management/server/nameserver_test.go | 4 ++-- management/server/peer_test.go | 16 ++++++++-------- management/server/policy_test.go | 14 +++++++------- management/server/posture_checks_test.go | 10 +++++----- management/server/route_test.go | 12 ++++++------ management/server/user_test.go | 4 ++-- 9 files changed, 48 insertions(+), 41 deletions(-) diff --git a/management/server/account_test.go b/management/server/account_test.go index bcc73d52f..bef791d77 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -3253,6 +3253,13 @@ func setupNetworkMapTest(t *testing.T) (*DefaultAccountManager, *update_channel. return manager, updateManager, account, peer1, peer2, peer3 } +// peerUpdateTimeout bounds how long peerShouldReceiveUpdate and its outer +// wrappers wait for an expected update message. Sized for slow CI runners +// (MySQL, FreeBSD, loaded sqlite) where the channel publish can take +// seconds. Only runs down on failure; passing tests return immediately +// when the channel delivers. +const peerUpdateTimeout = 5 * time.Second + func peerShouldNotReceiveUpdate(t *testing.T, updateMessage <-chan *network_map.UpdateMessage) { t.Helper() select { @@ -3271,7 +3278,7 @@ func peerShouldReceiveUpdate(t *testing.T, updateMessage <-chan *network_map.Upd if msg == nil { t.Errorf("Received nil update message, expected valid message") } - case <-time.After(500 * time.Millisecond): + case <-time.After(peerUpdateTimeout): t.Error("Timed out waiting for update message") } } diff --git a/management/server/dns_test.go b/management/server/dns_test.go index 0e37a3b22..c443223c6 100644 --- a/management/server/dns_test.go +++ b/management/server/dns_test.go @@ -458,7 +458,7 @@ func TestDNSAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -478,7 +478,7 @@ func TestDNSAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -518,7 +518,7 @@ func TestDNSAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) diff --git a/management/server/group_test.go b/management/server/group_test.go index fa818e532..5821b90a3 100644 --- a/management/server/group_test.go +++ b/management/server/group_test.go @@ -620,7 +620,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -638,7 +638,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -656,7 +656,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -689,7 +689,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -730,7 +730,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -757,7 +757,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -804,7 +804,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) diff --git a/management/server/nameserver_test.go b/management/server/nameserver_test.go index d10d4464f..b2c8300d6 100644 --- a/management/server/nameserver_test.go +++ b/management/server/nameserver_test.go @@ -1087,7 +1087,7 @@ func TestNameServerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1105,7 +1105,7 @@ func TestNameServerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 6f8d924fd..050baa595 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -1907,7 +1907,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1929,7 +1929,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1994,7 +1994,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2012,7 +2012,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2058,7 +2058,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2076,7 +2076,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2113,7 +2113,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2131,7 +2131,7 @@ func TestPeerAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) diff --git a/management/server/policy_test.go b/management/server/policy_test.go index a3f987732..a553b7d05 100644 --- a/management/server/policy_test.go +++ b/management/server/policy_test.go @@ -1231,7 +1231,7 @@ func TestPolicyAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1263,7 +1263,7 @@ func TestPolicyAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1294,7 +1294,7 @@ func TestPolicyAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1314,7 +1314,7 @@ func TestPolicyAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1355,7 +1355,7 @@ func TestPolicyAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1373,7 +1373,7 @@ func TestPolicyAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } @@ -1393,7 +1393,7 @@ func TestPolicyAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) diff --git a/management/server/posture_checks_test.go b/management/server/posture_checks_test.go index 7f0a48dc7..394f0d896 100644 --- a/management/server/posture_checks_test.go +++ b/management/server/posture_checks_test.go @@ -244,7 +244,7 @@ func TestPostureCheckAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -273,7 +273,7 @@ func TestPostureCheckAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -292,7 +292,7 @@ func TestPostureCheckAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -395,7 +395,7 @@ func TestPostureCheckAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -438,7 +438,7 @@ func TestPostureCheckAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) diff --git a/management/server/route_test.go b/management/server/route_test.go index 91b2cf982..91bd8b050 100644 --- a/management/server/route_test.go +++ b/management/server/route_test.go @@ -2070,7 +2070,7 @@ func TestRouteAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } @@ -2107,7 +2107,7 @@ func TestRouteAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2127,7 +2127,7 @@ func TestRouteAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2145,7 +2145,7 @@ func TestRouteAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2185,7 +2185,7 @@ func TestRouteAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -2225,7 +2225,7 @@ func TestRouteAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) diff --git a/management/server/user_test.go b/management/server/user_test.go index 8fdfbd633..c77ea53d1 100644 --- a/management/server/user_test.go +++ b/management/server/user_test.go @@ -1586,7 +1586,7 @@ func TestUserAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) @@ -1609,7 +1609,7 @@ func TestUserAccountPeersUpdate(t *testing.T) { select { case <-done: - case <-time.After(time.Second): + case <-time.After(peerUpdateTimeout): t.Error("timeout waiting for peerShouldReceiveUpdate") } }) From d6f08e48408a7a09995076c4bd595832555eb407 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Fri, 24 Apr 2026 13:13:27 +0200 Subject: [PATCH 345/374] [misc] Update sign pipeline version (#5981) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5ada1033d..1d29c8406 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,7 +9,7 @@ on: pull_request: env: - SIGN_PIPE_VER: "v0.1.2" + SIGN_PIPE_VER: "v0.1.3" GORELEASER_VER: "v2.14.3" PRODUCT_NAME: "NetBird" COPYRIGHT: "NetBird GmbH" From 34167c8a160d66668eb5592fb2589b13adcd8ee0 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 27 Apr 2026 10:55:38 +0200 Subject: [PATCH 346/374] [misc] Update release pipeline version (#5995) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1d29c8406..826c05ff3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,7 +9,7 @@ on: pull_request: env: - SIGN_PIPE_VER: "v0.1.3" + SIGN_PIPE_VER: "v0.1.4" GORELEASER_VER: "v2.14.3" PRODUCT_NAME: "NetBird" COPYRIGHT: "NetBird GmbH" From 154b81645a5922d0fa1fbfff6785798a14640e02 Mon Sep 17 00:00:00 2001 From: Vlad <4941176+crn4@users.noreply.github.com> Date: Mon, 27 Apr 2026 16:02:54 +0200 Subject: [PATCH 347/374] [management] removed legacy network map code (#5565) --- .../network_map/controller/controller.go | 269 +- .../controllers/network_map/interface.go | 3 - management/server/account_test.go | 98 +- .../http/handlers/peers/peers_handler.go | 2 +- management/server/peer_test.go | 11 - management/server/route_test.go | 245 -- management/server/store/sql_store.go | 2 - management/server/types/account.go | 403 --- management/server/types/account_test.go | 399 --- management/server/types/holder.go | 47 - management/server/types/networkmap.go | 67 - .../types/networkmap_comparison_test.go | 592 ----- .../server/types/networkmap_components.go | 2 - .../types/networkmap_components_test.go | 787 ++++++ .../server/types/networkmap_golden_test.go | 967 ------- management/server/types/networkmapbuilder.go | 2317 ----------------- 16 files changed, 807 insertions(+), 5404 deletions(-) delete mode 100644 management/server/types/holder.go delete mode 100644 management/server/types/networkmap.go delete mode 100644 management/server/types/networkmap_comparison_test.go create mode 100644 management/server/types/networkmap_components_test.go delete mode 100644 management/server/types/networkmap_golden_test.go delete mode 100644 management/server/types/networkmapbuilder.go diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index 4b414df6f..4b47ecaa0 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -7,7 +7,6 @@ import ( "os" "slices" "strconv" - "strings" "sync" "sync/atomic" "time" @@ -16,11 +15,9 @@ import ( "golang.org/x/exp/maps" "golang.org/x/mod/semver" - nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller/cache" "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral" - "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/account" @@ -58,13 +55,6 @@ type Controller struct { proxyController port_forwarding.Controller integratedPeerValidator integrated_validator.IntegratedValidator - - holder *types.Holder - - expNewNetworkMap bool - expNewNetworkMapAIDs map[string]struct{} - - compactedNetworkMap bool } type bufferUpdate struct { @@ -81,29 +71,6 @@ func NewController(ctx context.Context, store store.Store, metrics telemetry.App log.Fatal(fmt.Errorf("error creating metrics: %w", err)) } - newNetworkMapBuilder, err := strconv.ParseBool(os.Getenv(network_map.EnvNewNetworkMapBuilder)) - if err != nil { - log.WithContext(ctx).Warnf("failed to parse %s, using default value false: %v", network_map.EnvNewNetworkMapBuilder, err) - newNetworkMapBuilder = false - } - - compactedNetworkMap := true - compactedEnv := os.Getenv(types.EnvNewNetworkMapCompacted) - parsedCompactedNmap, err := strconv.ParseBool(compactedEnv) - if err != nil && len(compactedEnv) > 0 { - log.WithContext(ctx).Warnf("failed to parse %s, using default value true: %v", types.EnvNewNetworkMapCompacted, err) - } - if err == nil && !parsedCompactedNmap { - log.WithContext(ctx).Info("disabling compacted mode") - compactedNetworkMap = false - } - - ids := strings.Split(os.Getenv(network_map.EnvNewNetworkMapAccounts), ",") - expIDs := make(map[string]struct{}, len(ids)) - for _, id := range ids { - expIDs[id] = struct{}{} - } - return &Controller{ repo: newRepository(store), metrics: nMetrics, @@ -117,12 +84,6 @@ func NewController(ctx context.Context, store store.Store, metrics telemetry.App proxyController: proxyController, EphemeralPeersManager: ephemeralPeersManager, - - holder: types.NewHolder(), - expNewNetworkMap: newNetworkMapBuilder, - expNewNetworkMapAIDs: expIDs, - - compactedNetworkMap: compactedNetworkMap, } } @@ -153,17 +114,9 @@ func (c *Controller) CountStreams() int { func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID string) error { log.WithContext(ctx).Tracef("updating peers for account %s from %s", accountID, util.GetCallerName()) - var ( - account *types.Account - err error - ) - if c.experimentalNetworkMap(accountID) { - account = c.getAccountFromHolderOrInit(ctx, accountID) - } else { - account, err = c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) - if err != nil { - return fmt.Errorf("failed to get account: %v", err) - } + account, err := c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) + if err != nil { + return fmt.Errorf("failed to get account: %v", err) } globalStart := time.Now() @@ -197,10 +150,6 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin routers := account.GetResourceRoutersMap() groupIDToUserIDs := account.GetActiveGroupUsers() - if c.experimentalNetworkMap(accountID) { - c.initNetworkMapBuilderIfNeeded(account, approvedPeersMap) - } - proxyNetworkMaps, err := c.proxyController.GetProxyNetworkMapsAll(ctx, accountID, account.Peers) if err != nil { log.WithContext(ctx).Errorf("failed to get proxy network maps: %v", err) @@ -243,16 +192,7 @@ func (c *Controller) sendUpdateAccountPeers(ctx context.Context, accountID strin c.metrics.CountCalcPostureChecksDuration(time.Since(start)) start = time.Now() - var remotePeerNetworkMap *types.NetworkMap - - switch { - case c.experimentalNetworkMap(accountID): - remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, p.AccountID, p.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) - case c.compactedNetworkMap: - remotePeerNetworkMap = account.GetPeerNetworkMapFromComponents(ctx, p.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) - default: - remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, p.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) - } + remotePeerNetworkMap := account.GetPeerNetworkMapFromComponents(ctx, p.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) c.metrics.CountCalcPeerNetworkMapDuration(time.Since(start)) @@ -318,10 +258,6 @@ func (c *Controller) bufferSendUpdateAccountPeers(ctx context.Context, accountID // UpdatePeers updates all peers that belong to an account. // Should be called when changes have to be synced to peers. func (c *Controller) UpdateAccountPeers(ctx context.Context, accountID string) error { - if err := c.RecalculateNetworkMapCache(ctx, accountID); err != nil { - return fmt.Errorf("recalculate network map cache: %v", err) - } - return c.sendUpdateAccountPeers(ctx, accountID) } @@ -371,16 +307,7 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe return err } - var remotePeerNetworkMap *types.NetworkMap - - switch { - case c.experimentalNetworkMap(accountId): - remotePeerNetworkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) - case c.compactedNetworkMap: - remotePeerNetworkMap = account.GetPeerNetworkMapFromComponents(ctx, peerId, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) - default: - remotePeerNetworkMap = account.GetPeerNetworkMap(ctx, peerId, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) - } + remotePeerNetworkMap := account.GetPeerNetworkMapFromComponents(ctx, peerId, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] if ok { @@ -451,17 +378,9 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr return peer, emptyMap, nil, 0, nil } - var ( - account *types.Account - err error - ) - if c.experimentalNetworkMap(accountID) { - account = c.getAccountFromHolderOrInit(ctx, accountID) - } else { - account, err = c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) - if err != nil { - return nil, nil, nil, 0, err - } + account, err := c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) + if err != nil { + return nil, nil, nil, 0, err } account.InjectProxyPolicies(ctx) @@ -493,20 +412,10 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr return nil, nil, nil, 0, err } - var networkMap *types.NetworkMap - - if c.experimentalNetworkMap(accountID) { - networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peer.ID, approvedPeersMap, peersCustomZone, accountZones, c.accountManagerMetrics) - } else { - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - if c.compactedNetworkMap { - networkMap = account.GetPeerNetworkMapFromComponents(ctx, peer.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) - } else { - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) - } - } + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + networkMap := account.GetPeerNetworkMapFromComponents(ctx, peer.ID, peersCustomZone, accountZones, approvedPeersMap, resourcePolicies, routers, c.accountManagerMetrics, groupIDToUserIDs) proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] if ok { @@ -518,108 +427,6 @@ func (c *Controller) GetValidatedPeerWithMap(ctx context.Context, isRequiresAppr return peer, networkMap, postureChecks, dnsFwdPort, nil } -func (c *Controller) initNetworkMapBuilderIfNeeded(account *types.Account, validatedPeers map[string]struct{}) { - c.enrichAccountFromHolder(account) - account.InitNetworkMapBuilderIfNeeded(validatedPeers) -} - -func (c *Controller) getPeerNetworkMapExp( - ctx context.Context, - accountId string, - peerId string, - validatedPeers map[string]struct{}, - peersCustomZone nbdns.CustomZone, - accountZones []*zones.Zone, - metrics *telemetry.AccountManagerMetrics, -) *types.NetworkMap { - account := c.getAccountFromHolderOrInit(ctx, accountId) - if account == nil { - log.WithContext(ctx).Warnf("account %s not found in holder when getting peer network map", accountId) - return &types.NetworkMap{ - Network: &types.Network{}, - } - } - - return account.GetPeerNetworkMapExp(ctx, peerId, peersCustomZone, accountZones, validatedPeers, metrics) -} - -func (c *Controller) onPeersAddedUpdNetworkMapCache(account *types.Account, peerIds ...string) { - c.enrichAccountFromHolder(account) - account.OnPeersAddedUpdNetworkMapCache(peerIds...) -} - -func (c *Controller) onPeerDeletedUpdNetworkMapCache(account *types.Account, peerId string) error { - c.enrichAccountFromHolder(account) - return account.OnPeerDeletedUpdNetworkMapCache(peerId) -} - -func (c *Controller) UpdatePeerInNetworkMapCache(accountId string, peer *nbpeer.Peer) { - account := c.getAccountFromHolder(accountId) - if account == nil { - return - } - account.UpdatePeerInNetworkMapCache(peer) -} - -func (c *Controller) recalculateNetworkMapCache(account *types.Account, validatedPeers map[string]struct{}) { - account.RecalculateNetworkMapCache(validatedPeers) - c.updateAccountInHolder(account) -} - -func (c *Controller) RecalculateNetworkMapCache(ctx context.Context, accountId string) error { - if c.experimentalNetworkMap(accountId) { - account, err := c.requestBuffer.GetAccountWithBackpressure(ctx, accountId) - if err != nil { - return err - } - validatedPeers, err := c.integratedPeerValidator.GetValidatedPeers(ctx, account.Id, maps.Values(account.Groups), maps.Values(account.Peers), account.Settings.Extra) - if err != nil { - log.WithContext(ctx).Errorf("failed to get validate peers: %v", err) - return err - } - c.recalculateNetworkMapCache(account, validatedPeers) - } - return nil -} - -func (c *Controller) experimentalNetworkMap(accountId string) bool { - _, ok := c.expNewNetworkMapAIDs[accountId] - return c.expNewNetworkMap || ok -} - -func (c *Controller) enrichAccountFromHolder(account *types.Account) { - a := c.holder.GetAccount(account.Id) - if a == nil { - c.holder.AddAccount(account) - return - } - account.NetworkMapCache = a.NetworkMapCache - if account.NetworkMapCache == nil { - return - } - c.holder.AddAccount(account) -} - -func (c *Controller) getAccountFromHolder(accountID string) *types.Account { - return c.holder.GetAccount(accountID) -} - -func (c *Controller) getAccountFromHolderOrInit(ctx context.Context, accountID string) *types.Account { - a := c.holder.GetAccount(accountID) - if a != nil { - return a - } - account, err := c.holder.LoadOrStoreFunc(ctx, accountID, c.requestBuffer.GetAccountWithBackpressure) - if err != nil { - return nil - } - return account -} - -func (c *Controller) updateAccountInHolder(account *types.Account) { - c.holder.AddAccount(account) -} - // GetDNSDomain returns the configured dnsDomain func (c *Controller) GetDNSDomain(settings *types.Settings) string { if settings == nil { @@ -756,16 +563,7 @@ func isPeerInPolicySourceGroups(account *types.Account, peerID string, policy *t } func (c *Controller) OnPeersUpdated(ctx context.Context, accountID string, peerIDs []string) error { - peers, err := c.repo.GetPeersByIDs(ctx, accountID, peerIDs) - if err != nil { - return fmt.Errorf("failed to get peers by ids: %w", err) - } - - for _, peer := range peers { - c.UpdatePeerInNetworkMapCache(accountID, peer) - } - - err = c.bufferSendUpdateAccountPeers(ctx, accountID) + err := c.bufferSendUpdateAccountPeers(ctx, accountID) if err != nil { log.WithContext(ctx).Errorf("failed to buffer update account peers for peer update in account %s: %v", accountID, err) } @@ -775,14 +573,6 @@ func (c *Controller) OnPeersUpdated(ctx context.Context, accountID string, peerI func (c *Controller) OnPeersAdded(ctx context.Context, accountID string, peerIDs []string) error { log.WithContext(ctx).Debugf("OnPeersAdded call to add peers: %v", peerIDs) - if c.experimentalNetworkMap(accountID) { - account, err := c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) - if err != nil { - return err - } - log.WithContext(ctx).Debugf("peers are ready to be added to networkmap cache: %v", peerIDs) - c.onPeersAddedUpdNetworkMapCache(account, peerIDs...) - } return c.bufferSendUpdateAccountPeers(ctx, accountID) } @@ -817,19 +607,6 @@ func (c *Controller) OnPeersDeleted(ctx context.Context, accountID string, peerI MessageType: network_map.MessageTypeNetworkMap, }) c.peersUpdateManager.CloseChannel(ctx, peerID) - - if c.experimentalNetworkMap(accountID) { - account, err := c.requestBuffer.GetAccountWithBackpressure(ctx, accountID) - if err != nil { - log.WithContext(ctx).Errorf("failed to get account %s: %v", accountID, err) - continue - } - err = c.onPeerDeletedUpdNetworkMapCache(account, peerID) - if err != nil { - log.WithContext(ctx).Errorf("failed to update network map cache for deleted peer %s in account %s: %v", peerID, accountID, err) - continue - } - } } return c.bufferSendUpdateAccountPeers(ctx, accountID) @@ -872,21 +649,11 @@ func (c *Controller) GetNetworkMap(ctx context.Context, peerID string) (*types.N return nil, err } - var networkMap *types.NetworkMap - - if c.experimentalNetworkMap(peer.AccountID) { - networkMap = c.getPeerNetworkMapExp(ctx, peer.AccountID, peerID, validatedPeers, peersCustomZone, accountZones, nil) - } else { - account.InjectProxyPolicies(ctx) - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - if c.compactedNetworkMap { - networkMap = account.GetPeerNetworkMapFromComponents(ctx, peer.ID, peersCustomZone, accountZones, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) - } else { - networkMap = account.GetPeerNetworkMap(ctx, peer.ID, peersCustomZone, accountZones, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) - } - } + account.InjectProxyPolicies(ctx) + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + networkMap := account.GetPeerNetworkMapFromComponents(ctx, peer.ID, peersCustomZone, accountZones, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) proxyNetworkMap, ok := proxyNetworkMaps[peer.ID] if ok { diff --git a/management/internals/controllers/network_map/interface.go b/management/internals/controllers/network_map/interface.go index 64caac861..cfea2d3de 100644 --- a/management/internals/controllers/network_map/interface.go +++ b/management/internals/controllers/network_map/interface.go @@ -12,9 +12,6 @@ import ( ) const ( - EnvNewNetworkMapBuilder = "NB_EXPERIMENT_NETWORK_MAP" - EnvNewNetworkMapAccounts = "NB_EXPERIMENT_NETWORK_MAP_ACCOUNTS" - DnsForwarderPort = nbdns.ForwarderServerPort OldForwarderPort = nbdns.ForwarderClientPort DnsForwarderPortMinVersion = "v0.59.0" diff --git a/management/server/account_test.go b/management/server/account_test.go index bef791d77..756c42421 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -408,7 +408,7 @@ func TestAccount_GetPeerNetworkMap(t *testing.T) { } customZone := account.GetPeersCustomZone(context.Background(), "netbird.io") - networkMap := account.GetPeerNetworkMap(context.Background(), testCase.peerID, customZone, nil, validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) + networkMap := account.GetPeerNetworkMapFromComponents(context.Background(), testCase.peerID, customZone, nil, validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) assert.Len(t, networkMap.Peers, len(testCase.expectedPeers)) assert.Len(t, networkMap.OfflinePeers, len(testCase.expectedOfflinePeers)) } @@ -1171,11 +1171,6 @@ func TestAccountManager_AddPeerWithUserID(t *testing.T) { assert.Equal(t, peer.IP.String(), fmt.Sprint(ev.Meta["ip"])) } -func TestAccountManager_NetworkUpdates_SaveGroup_Experimental(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") - testAccountManager_NetworkUpdates_SaveGroup(t) -} - func TestAccountManager_NetworkUpdates_SaveGroup(t *testing.T) { testAccountManager_NetworkUpdates_SaveGroup(t) } @@ -1231,11 +1226,6 @@ func testAccountManager_NetworkUpdates_SaveGroup(t *testing.T) { wg.Wait() } -func TestAccountManager_NetworkUpdates_DeletePolicy_Experimental(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") - testAccountManager_NetworkUpdates_DeletePolicy(t) -} - func TestAccountManager_NetworkUpdates_DeletePolicy(t *testing.T) { testAccountManager_NetworkUpdates_DeletePolicy(t) } @@ -1274,11 +1264,6 @@ func testAccountManager_NetworkUpdates_DeletePolicy(t *testing.T) { wg.Wait() } -func TestAccountManager_NetworkUpdates_SavePolicy_Experimental(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") - testAccountManager_NetworkUpdates_SavePolicy(t) -} - func TestAccountManager_NetworkUpdates_SavePolicy(t *testing.T) { testAccountManager_NetworkUpdates_SavePolicy(t) } @@ -1332,11 +1317,6 @@ func testAccountManager_NetworkUpdates_SavePolicy(t *testing.T) { wg.Wait() } -func TestAccountManager_NetworkUpdates_DeletePeer_Experimental(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") - testAccountManager_NetworkUpdates_DeletePeer(t) -} - func TestAccountManager_NetworkUpdates_DeletePeer(t *testing.T) { testAccountManager_NetworkUpdates_DeletePeer(t) } @@ -1397,11 +1377,6 @@ func testAccountManager_NetworkUpdates_DeletePeer(t *testing.T) { wg.Wait() } -func TestAccountManager_NetworkUpdates_DeleteGroup_Experimental(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") - testAccountManager_NetworkUpdates_DeleteGroup(t) -} - func TestAccountManager_NetworkUpdates_DeleteGroup(t *testing.T) { testAccountManager_NetworkUpdates_DeleteGroup(t) } @@ -1633,75 +1608,6 @@ func TestFileStore_GetRoutesByPrefix(t *testing.T) { assert.Contains(t, routeIDs, route.ID("route-2")) } -func TestAccount_GetRoutesToSync(t *testing.T) { - _, prefix, err := route.ParseNetwork("192.168.64.0/24") - if err != nil { - t.Fatal(err) - } - _, prefix2, err := route.ParseNetwork("192.168.0.0/24") - if err != nil { - t.Fatal(err) - } - account := &types.Account{ - Peers: map[string]*nbpeer.Peer{ - "peer-1": {Key: "peer-1", Meta: nbpeer.PeerSystemMeta{GoOS: "linux"}}, "peer-2": {Key: "peer-2", Meta: nbpeer.PeerSystemMeta{GoOS: "linux"}}, "peer-3": {Key: "peer-1", Meta: nbpeer.PeerSystemMeta{GoOS: "linux"}}, - }, - Groups: map[string]*types.Group{"group1": {ID: "group1", Peers: []string{"peer-1", "peer-2"}}}, - Routes: map[route.ID]*route.Route{ - "route-1": { - ID: "route-1", - Network: prefix, - NetID: "network-1", - Description: "network-1", - Peer: "peer-1", - NetworkType: 0, - Masquerade: false, - Metric: 999, - Enabled: true, - Groups: []string{"group1"}, - }, - "route-2": { - ID: "route-2", - Network: prefix2, - NetID: "network-2", - Description: "network-2", - Peer: "peer-2", - NetworkType: 0, - Masquerade: false, - Metric: 999, - Enabled: true, - Groups: []string{"group1"}, - }, - "route-3": { - ID: "route-3", - Network: prefix, - NetID: "network-1", - Description: "network-1", - Peer: "peer-2", - NetworkType: 0, - Masquerade: false, - Metric: 999, - Enabled: true, - Groups: []string{"group1"}, - }, - }, - } - - routes := account.GetRoutesToSync(context.Background(), "peer-2", []*nbpeer.Peer{{Key: "peer-1"}, {Key: "peer-3"}}, account.GetPeerGroups("peer-2")) - - assert.Len(t, routes, 2) - routeIDs := make(map[route.ID]struct{}, 2) - for _, r := range routes { - routeIDs[r.ID] = struct{}{} - } - assert.Contains(t, routeIDs, route.ID("route-2")) - assert.Contains(t, routeIDs, route.ID("route-3")) - - emptyRoutes := account.GetRoutesToSync(context.Background(), "peer-3", []*nbpeer.Peer{{Key: "peer-1"}, {Key: "peer-2"}}, account.GetPeerGroups("peer-3")) - - assert.Len(t, emptyRoutes, 0) -} - func TestAccount_Copy(t *testing.T) { account := &types.Account{ Id: "account1", @@ -1824,9 +1730,7 @@ func TestAccount_Copy(t *testing.T) { AccountID: "account1", }, }, - NetworkMapCache: &types.NetworkMapBuilder{}, } - account.InitOnce() err := hasNilField(account) if err != nil { t.Fatal(err) diff --git a/management/server/http/handlers/peers/peers_handler.go b/management/server/http/handlers/peers/peers_handler.go index 6b9a69f04..bf6937a49 100644 --- a/management/server/http/handlers/peers/peers_handler.go +++ b/management/server/http/handlers/peers/peers_handler.go @@ -417,7 +417,7 @@ func (h *Handler) GetAccessiblePeers(w http.ResponseWriter, r *http.Request) { dnsDomain := h.networkMapController.GetDNSDomain(account.Settings) - netMap := account.GetPeerNetworkMap(r.Context(), peerID, dns.CustomZone{}, nil, validPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) + netMap := account.GetPeerNetworkMapFromComponents(r.Context(), peerID, dns.CustomZone{}, nil, validPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), nil, account.GetActiveGroupUsers()) util.WriteJSONObject(r.Context(), w, toAccessiblePeers(netMap, dnsDomain)) } diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 050baa595..17202597a 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -179,11 +179,6 @@ func TestAccountManager_GetNetworkMap(t *testing.T) { testGetNetworkMapGeneral(t) } -func TestAccountManager_GetNetworkMap_Experimental(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") - testGetNetworkMapGeneral(t) -} - func testGetNetworkMapGeneral(t *testing.T) { manager, _, err := createManager(t) if err != nil { @@ -1016,11 +1011,6 @@ func BenchmarkUpdateAccountPeers(b *testing.B) { } } -func TestUpdateAccountPeers_Experimental(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") - testUpdateAccountPeers(t) -} - func TestUpdateAccountPeers(t *testing.T) { testUpdateAccountPeers(t) } @@ -1600,7 +1590,6 @@ func Test_RegisterPeerRollbackOnFailure(t *testing.T) { } func Test_LoginPeer(t *testing.T) { - t.Setenv(network_map.EnvNewNetworkMapBuilder, "true") if runtime.GOOS == "windows" { t.Skip("The SQLite store is not properly supported by Windows yet") } diff --git a/management/server/route_test.go b/management/server/route_test.go index 91bd8b050..d0caf4b9b 100644 --- a/management/server/route_test.go +++ b/management/server/route_test.go @@ -2,10 +2,8 @@ package server import ( "context" - "fmt" "net" "net/netip" - "sort" "testing" "time" @@ -1840,11 +1838,6 @@ func TestAccount_getPeersRoutesFirewall(t *testing.T) { }, } - validatedPeers := make(map[string]struct{}) - for p := range account.Peers { - validatedPeers[p] = struct{}{} - } - t.Run("check applied policies for the route", func(t *testing.T) { route1 := account.Routes["route1"] policies := types.GetAllRoutePoliciesFromGroups(account, route1.AccessControlGroups) @@ -1858,116 +1851,6 @@ func TestAccount_getPeersRoutesFirewall(t *testing.T) { policies = types.GetAllRoutePoliciesFromGroups(account, route3.AccessControlGroups) assert.Len(t, policies, 0) }) - - t.Run("check peer routes firewall rules", func(t *testing.T) { - routesFirewallRules := account.GetPeerRoutesFirewallRules(context.Background(), "peerA", validatedPeers) - assert.Len(t, routesFirewallRules, 4) - - expectedRoutesFirewallRules := []*types.RouteFirewallRule{ - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerCIp), - fmt.Sprintf(types.AllowedIPsFormat, peerHIp), - fmt.Sprintf(types.AllowedIPsFormat, peerBIp), - }, - Action: "accept", - Destination: "192.168.0.0/16", - Protocol: "all", - Port: 80, - RouteID: "route1:peerA", - }, - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerCIp), - fmt.Sprintf(types.AllowedIPsFormat, peerHIp), - fmt.Sprintf(types.AllowedIPsFormat, peerBIp), - }, - Action: "accept", - Destination: "192.168.0.0/16", - Protocol: "all", - Port: 320, - RouteID: "route1:peerA", - }, - } - additionalFirewallRule := []*types.RouteFirewallRule{ - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerJIp), - }, - Action: "accept", - Destination: "192.168.10.0/16", - Protocol: "tcp", - Port: 80, - RouteID: "route4:peerA", - }, - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerKIp), - }, - Action: "accept", - Destination: "192.168.10.0/16", - Protocol: "all", - RouteID: "route4:peerA", - }, - } - - assert.ElementsMatch(t, orderRuleSourceRanges(routesFirewallRules), orderRuleSourceRanges(append(expectedRoutesFirewallRules, additionalFirewallRule...))) - - // peerD is also the routing peer for route1, should contain same routes firewall rules as peerA - routesFirewallRules = account.GetPeerRoutesFirewallRules(context.Background(), "peerD", validatedPeers) - assert.Len(t, routesFirewallRules, 2) - for _, rule := range expectedRoutesFirewallRules { - rule.RouteID = "route1:peerD" - } - assert.ElementsMatch(t, orderRuleSourceRanges(routesFirewallRules), orderRuleSourceRanges(expectedRoutesFirewallRules)) - - // peerE is a single routing peer for route 2 and route 3 - routesFirewallRules = account.GetPeerRoutesFirewallRules(context.Background(), "peerE", validatedPeers) - assert.Len(t, routesFirewallRules, 3) - - expectedRoutesFirewallRules = []*types.RouteFirewallRule{ - { - SourceRanges: []string{"100.65.250.202/32", "100.65.13.186/32"}, - Action: "accept", - Destination: existingNetwork.String(), - Protocol: "tcp", - PortRange: types.RulePortRange{Start: 80, End: 350}, - RouteID: "route2", - }, - { - SourceRanges: []string{"0.0.0.0/0"}, - Action: "accept", - Destination: "192.0.2.0/32", - Protocol: "all", - Domains: domain.List{"example.com"}, - IsDynamic: true, - RouteID: "route3", - }, - { - SourceRanges: []string{"::/0"}, - Action: "accept", - Destination: "192.0.2.0/32", - Protocol: "all", - Domains: domain.List{"example.com"}, - IsDynamic: true, - RouteID: "route3", - }, - } - assert.ElementsMatch(t, orderRuleSourceRanges(routesFirewallRules), orderRuleSourceRanges(expectedRoutesFirewallRules)) - - // peerC is part of route1 distribution groups but should not receive the routes firewall rules - routesFirewallRules = account.GetPeerRoutesFirewallRules(context.Background(), "peerC", validatedPeers) - assert.Len(t, routesFirewallRules, 0) - }) - -} - -// orderList is a helper function to sort a list of strings -func orderRuleSourceRanges(ruleList []*types.RouteFirewallRule) []*types.RouteFirewallRule { - for _, rule := range ruleList { - sort.Strings(rule.SourceRanges) - } - return ruleList } func TestRouteAccountPeersUpdate(t *testing.T) { @@ -2665,11 +2548,6 @@ func TestAccount_GetPeerNetworkResourceFirewallRules(t *testing.T) { }, } - validatedPeers := make(map[string]struct{}) - for p := range account.Peers { - validatedPeers[p] = struct{}{} - } - t.Run("validate applied policies for different network resources", func(t *testing.T) { // Test case: Resource1 is directly applied to the policy (policyResource1) policies := account.GetPoliciesForNetworkResource("resource1") @@ -2693,127 +2571,4 @@ func TestAccount_GetPeerNetworkResourceFirewallRules(t *testing.T) { policies = account.GetPoliciesForNetworkResource("resource6") assert.Len(t, policies, 1, "resource6 should have exactly 1 policy applied via access control groups") }) - - t.Run("validate routing peer firewall rules for network resources", func(t *testing.T) { - resourcePoliciesMap := account.GetResourcePoliciesMap() - resourceRoutersMap := account.GetResourceRoutersMap() - _, routes, sourcePeers := account.GetNetworkResourcesRoutesToSync(context.Background(), "peerA", resourcePoliciesMap, resourceRoutersMap) - firewallRules := account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers["peerA"], validatedPeers, routes, resourcePoliciesMap) - assert.Len(t, firewallRules, 4) - assert.Len(t, sourcePeers, 5) - - expectedFirewallRules := []*types.RouteFirewallRule{ - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerCIp), - fmt.Sprintf(types.AllowedIPsFormat, peerHIp), - fmt.Sprintf(types.AllowedIPsFormat, peerBIp), - }, - Action: "accept", - Destination: "192.168.0.0/16", - Protocol: "all", - Port: 80, - RouteID: "resource2:peerA", - }, - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerCIp), - fmt.Sprintf(types.AllowedIPsFormat, peerHIp), - fmt.Sprintf(types.AllowedIPsFormat, peerBIp), - }, - Action: "accept", - Destination: "192.168.0.0/16", - Protocol: "all", - Port: 320, - RouteID: "resource2:peerA", - }, - } - - additionalFirewallRules := []*types.RouteFirewallRule{ - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerJIp), - }, - Action: "accept", - Destination: "192.0.2.0/32", - Protocol: "tcp", - Port: 80, - Domains: domain.List{"example.com"}, - IsDynamic: true, - RouteID: "resource4:peerA", - }, - { - SourceRanges: []string{ - fmt.Sprintf(types.AllowedIPsFormat, peerKIp), - }, - Action: "accept", - Destination: "192.0.2.0/32", - Protocol: "all", - Domains: domain.List{"example.com"}, - IsDynamic: true, - RouteID: "resource4:peerA", - }, - } - assert.ElementsMatch(t, orderRuleSourceRanges(firewallRules), orderRuleSourceRanges(append(expectedFirewallRules, additionalFirewallRules...))) - - // peerD is also the routing peer for resource2 - _, routes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), "peerD", resourcePoliciesMap, resourceRoutersMap) - firewallRules = account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers["peerD"], validatedPeers, routes, resourcePoliciesMap) - assert.Len(t, firewallRules, 2) - for _, rule := range expectedFirewallRules { - rule.RouteID = "resource2:peerD" - } - assert.ElementsMatch(t, orderRuleSourceRanges(firewallRules), orderRuleSourceRanges(expectedFirewallRules)) - assert.Len(t, sourcePeers, 3) - - // peerE is a single routing peer for resource1 and resource3 - // PeerE should only receive rules for resource1 since resource3 has no applied policy - _, routes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), "peerE", resourcePoliciesMap, resourceRoutersMap) - firewallRules = account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers["peerE"], validatedPeers, routes, resourcePoliciesMap) - assert.Len(t, firewallRules, 1) - assert.Len(t, sourcePeers, 2) - - expectedFirewallRules = []*types.RouteFirewallRule{ - { - SourceRanges: []string{"100.65.250.202/32", "100.65.13.186/32"}, - Action: "accept", - Destination: "10.10.10.0/24", - Protocol: "tcp", - PortRange: types.RulePortRange{Start: 80, End: 350}, - RouteID: "resource1:peerE", - }, - } - assert.ElementsMatch(t, orderRuleSourceRanges(firewallRules), orderRuleSourceRanges(expectedFirewallRules)) - - // peerC is part of distribution groups for resource2 but should not receive the firewall rules - firewallRules = account.GetPeerRoutesFirewallRules(context.Background(), "peerC", validatedPeers) - assert.Len(t, firewallRules, 0) - - // peerL is the single routing peer for resource5 - _, routes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), "peerL", resourcePoliciesMap, resourceRoutersMap) - assert.Len(t, routes, 1) - firewallRules = account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers["peerL"], validatedPeers, routes, resourcePoliciesMap) - assert.Len(t, firewallRules, 1) - assert.Len(t, sourcePeers, 1) - - expectedFirewallRules = []*types.RouteFirewallRule{ - { - SourceRanges: []string{"100.65.29.67/32"}, - Action: "accept", - Destination: "10.12.12.1/32", - Protocol: "tcp", - Port: 8080, - RouteID: "resource5:peerL", - }, - } - assert.ElementsMatch(t, orderRuleSourceRanges(firewallRules), orderRuleSourceRanges(expectedFirewallRules)) - - _, routes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), "peerM", resourcePoliciesMap, resourceRoutersMap) - assert.Len(t, routes, 1) - assert.Len(t, sourcePeers, 0) - - _, routes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), "peerN", resourcePoliciesMap, resourceRoutersMap) - assert.Len(t, routes, 1) - assert.Len(t, sourcePeers, 2) - }) } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 0ff57b752..0a716d08d 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -1196,7 +1196,6 @@ func (s *SqlStore) getAccountGorm(ctx context.Context, accountID string) (*types account.NameServerGroups[ns.ID] = &ns } account.NameServerGroupsG = nil - account.InitOnce() return &account, nil } @@ -1635,7 +1634,6 @@ func (s *SqlStore) getAccount(ctx context.Context, accountID string) (*types.Acc if sExtraIntegratedValidatorGroups.Valid { _ = json.Unmarshal([]byte(sExtraIntegratedValidatorGroups.String), &account.Settings.Extra.IntegratedValidatorGroups) } - account.InitOnce() return &account, nil } diff --git a/management/server/types/account.go b/management/server/types/account.go index c448813db..e7c1e2dce 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -8,7 +8,6 @@ import ( "slices" "strconv" "strings" - "sync" "time" "github.com/hashicorp/go-multierror" @@ -27,7 +26,6 @@ import ( networkTypes "github.com/netbirdio/netbird/management/server/networks/types" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/posture" - "github.com/netbirdio/netbird/management/server/telemetry" "github.com/netbirdio/netbird/management/server/util" "github.com/netbirdio/netbird/route" "github.com/netbirdio/netbird/shared/management/domain" @@ -110,16 +108,9 @@ type Account struct { NetworkResources []*resourceTypes.NetworkResource `gorm:"foreignKey:AccountID;references:id"` Onboarding AccountOnboarding `gorm:"foreignKey:AccountID;references:id;constraint:OnDelete:CASCADE"` - NetworkMapCache *NetworkMapBuilder `gorm:"-"` - nmapInitOnce *sync.Once `gorm:"-"` - ReverseProxyFreeDomainNonce string } -func (a *Account) InitOnce() { - a.nmapInitOnce = &sync.Once{} -} - // this class is used by gorm only type PrimaryAccountInfo struct { IsDomainPrimaryAccount bool @@ -155,108 +146,6 @@ func (o AccountOnboarding) IsEqual(onboarding AccountOnboarding) bool { o.SignupFormPending == onboarding.SignupFormPending } -// GetRoutesToSync returns the enabled routes for the peer ID and the routes -// from the ACL peers that have distribution groups associated with the peer ID. -// Please mind, that the returned route.Route objects will contain Peer.Key instead of Peer.ID. -func (a *Account) GetRoutesToSync(ctx context.Context, peerID string, aclPeers []*nbpeer.Peer, peerGroups LookupMap) []*route.Route { - routes, peerDisabledRoutes := a.getRoutingPeerRoutes(ctx, peerID) - peerRoutesMembership := make(LookupMap) - for _, r := range append(routes, peerDisabledRoutes...) { - peerRoutesMembership[string(r.GetHAUniqueID())] = struct{}{} - } - - for _, peer := range aclPeers { - activeRoutes, _ := a.getRoutingPeerRoutes(ctx, peer.ID) - groupFilteredRoutes := a.filterRoutesByGroups(activeRoutes, peerGroups) - filteredRoutes := a.filterRoutesFromPeersOfSameHAGroup(groupFilteredRoutes, peerRoutesMembership) - routes = append(routes, filteredRoutes...) - } - - return routes -} - -// filterRoutesFromPeersOfSameHAGroup filters and returns a list of routes that don't share the same HA route membership -func (a *Account) filterRoutesFromPeersOfSameHAGroup(routes []*route.Route, peerMemberships LookupMap) []*route.Route { - var filteredRoutes []*route.Route - for _, r := range routes { - _, found := peerMemberships[string(r.GetHAUniqueID())] - if !found { - filteredRoutes = append(filteredRoutes, r) - } - } - return filteredRoutes -} - -// filterRoutesByGroups returns a list with routes that have distribution groups in the group's map -func (a *Account) filterRoutesByGroups(routes []*route.Route, groupListMap LookupMap) []*route.Route { - var filteredRoutes []*route.Route - for _, r := range routes { - for _, groupID := range r.Groups { - _, found := groupListMap[groupID] - if found { - filteredRoutes = append(filteredRoutes, r) - break - } - } - } - return filteredRoutes -} - -// getRoutingPeerRoutes returns the enabled and disabled lists of routes that the given routing peer serves -// Please mind, that the returned route.Route objects will contain Peer.Key instead of Peer.ID. -// If the given is not a routing peer, then the lists are empty. -func (a *Account) getRoutingPeerRoutes(ctx context.Context, peerID string) (enabledRoutes []*route.Route, disabledRoutes []*route.Route) { - - peer := a.GetPeer(peerID) - if peer == nil { - log.WithContext(ctx).Errorf("peer %s that doesn't exist under account %s", peerID, a.Id) - return enabledRoutes, disabledRoutes - } - - seenRoute := make(map[route.ID]struct{}) - - takeRoute := func(r *route.Route, id string) { - if _, ok := seenRoute[r.ID]; ok { - return - } - seenRoute[r.ID] = struct{}{} - - if r.Enabled { - r.Peer = peer.Key - enabledRoutes = append(enabledRoutes, r) - return - } - disabledRoutes = append(disabledRoutes, r) - } - - for _, r := range a.Routes { - for _, groupID := range r.PeerGroups { - group := a.GetGroup(groupID) - if group == nil { - log.WithContext(ctx).Errorf("route %s has peers group %s that doesn't exist under account %s", r.ID, groupID, a.Id) - continue - } - for _, id := range group.Peers { - if id != peerID { - continue - } - - newPeerRoute := r.Copy() - newPeerRoute.Peer = id - newPeerRoute.PeerGroups = nil - newPeerRoute.ID = route.ID(string(r.ID) + ":" + id) // we have to provide unique route id when distribute network map - takeRoute(newPeerRoute, id) - break - } - } - if r.Peer == peerID { - takeRoute(r.Copy(), peerID) - } - } - - return enabledRoutes, disabledRoutes -} - // GetRoutesByPrefixOrDomains return list of routes by account and route prefix func (a *Account) GetRoutesByPrefixOrDomains(prefix netip.Prefix, domains domain.List) []*route.Route { var routes []*route.Route @@ -276,106 +165,6 @@ func (a *Account) GetGroup(groupID string) *Group { return a.Groups[groupID] } -// GetPeerNetworkMap returns the networkmap for the given peer ID. -func (a *Account) GetPeerNetworkMap( - ctx context.Context, - peerID string, - peersCustomZone nbdns.CustomZone, - accountZones []*zones.Zone, - validatedPeersMap map[string]struct{}, - resourcePolicies map[string][]*Policy, - routers map[string]map[string]*routerTypes.NetworkRouter, - metrics *telemetry.AccountManagerMetrics, - groupIDToUserIDs map[string][]string, -) *NetworkMap { - start := time.Now() - peer := a.Peers[peerID] - if peer == nil { - return &NetworkMap{ - Network: a.Network.Copy(), - } - } - - if _, ok := validatedPeersMap[peerID]; !ok { - return &NetworkMap{ - Network: a.Network.Copy(), - } - } - - peerGroups := a.GetPeerGroups(peerID) - - aclPeers, firewallRules, authorizedUsers, enableSSH := a.GetPeerConnectionResources(ctx, peer, validatedPeersMap, groupIDToUserIDs) - // exclude expired peers - var peersToConnect []*nbpeer.Peer - var expiredPeers []*nbpeer.Peer - for _, p := range aclPeers { - expired, _ := p.LoginExpired(a.Settings.PeerLoginExpiration) - if a.Settings.PeerLoginExpirationEnabled && expired { - expiredPeers = append(expiredPeers, p) - continue - } - peersToConnect = append(peersToConnect, p) - } - - routesUpdate := a.GetRoutesToSync(ctx, peerID, peersToConnect, peerGroups) - routesFirewallRules := a.GetPeerRoutesFirewallRules(ctx, peerID, validatedPeersMap) - isRouter, networkResourcesRoutes, sourcePeers := a.GetNetworkResourcesRoutesToSync(ctx, peerID, resourcePolicies, routers) - var networkResourcesFirewallRules []*RouteFirewallRule - if isRouter { - networkResourcesFirewallRules = a.GetPeerNetworkResourceFirewallRules(ctx, peer, validatedPeersMap, networkResourcesRoutes, resourcePolicies) - } - peersToConnectIncludingRouters := a.addNetworksRoutingPeers(networkResourcesRoutes, peer, peersToConnect, expiredPeers, isRouter, sourcePeers) - - dnsManagementStatus := a.getPeerDNSManagementStatus(peerID) - dnsUpdate := nbdns.Config{ - ServiceEnable: dnsManagementStatus, - } - - if dnsManagementStatus { - var zones []nbdns.CustomZone - - if peersCustomZone.Domain != "" { - records := filterZoneRecordsForPeers(peer, peersCustomZone, peersToConnectIncludingRouters, expiredPeers) - zones = append(zones, nbdns.CustomZone{ - Domain: peersCustomZone.Domain, - Records: records, - }) - } - - filteredAccountZones := filterPeerAppliedZones(ctx, accountZones, peerGroups) - zones = append(zones, filteredAccountZones...) - - dnsUpdate.CustomZones = zones - dnsUpdate.NameServerGroups = getPeerNSGroups(a, peerID) - } - - nm := &NetworkMap{ - Peers: peersToConnectIncludingRouters, - Network: a.Network.Copy(), - Routes: slices.Concat(networkResourcesRoutes, routesUpdate), - DNSConfig: dnsUpdate, - OfflinePeers: expiredPeers, - FirewallRules: firewallRules, - RoutesFirewallRules: slices.Concat(networkResourcesFirewallRules, routesFirewallRules), - AuthorizedUsers: authorizedUsers, - EnableSSH: enableSSH, - } - - if metrics != nil { - objectCount := int64(len(peersToConnectIncludingRouters) + len(expiredPeers) + len(routesUpdate) + len(networkResourcesRoutes) + len(firewallRules) + +len(networkResourcesFirewallRules) + len(routesFirewallRules)) - metrics.CountNetworkMapObjects(objectCount) - metrics.CountGetPeerNetworkMapDuration(time.Since(start)) - - if objectCount > 5000 { - log.WithContext(ctx).Tracef("account: %s has a total resource count of %d objects, "+ - "peers to connect: %d, expired peers: %d, routes: %d, firewall rules: %d, network resources routes: %d, network resources firewall rules: %d, routes firewall rules: %d", - a.Id, objectCount, len(peersToConnectIncludingRouters), len(expiredPeers), len(routesUpdate), len(firewallRules), len(networkResourcesRoutes), len(networkResourcesFirewallRules), len(routesFirewallRules)) - } - } - - return nm -} - func (a *Account) addNetworksRoutingPeers( networkResourcesRoutes []*route.Route, peer *nbpeer.Peer, @@ -421,39 +210,6 @@ func (a *Account) addNetworksRoutingPeers( return peersToConnect } -func getPeerNSGroups(account *Account, peerID string) []*nbdns.NameServerGroup { - groupList := account.GetPeerGroups(peerID) - - var peerNSGroups []*nbdns.NameServerGroup - - for _, nsGroup := range account.NameServerGroups { - if !nsGroup.Enabled { - continue - } - for _, gID := range nsGroup.Groups { - _, found := groupList[gID] - if found { - if !peerIsNameserver(account.GetPeer(peerID), nsGroup) { - peerNSGroups = append(peerNSGroups, nsGroup.Copy()) - break - } - } - } - } - - return peerNSGroups -} - -// peerIsNameserver returns true if the peer is a nameserver for a nsGroup -func peerIsNameserver(peer *nbpeer.Peer, nsGroup *nbdns.NameServerGroup) bool { - for _, ns := range nsGroup.NameServers { - if peer.IP.Equal(ns.IP.AsSlice()) { - return true - } - } - return false -} - func AddPeerLabelsToAccount(ctx context.Context, account *Account, peerLabels LookupMap) { for _, peer := range account.Peers { label, err := GetPeerHostLabel(peer.Name, peerLabels) @@ -800,19 +556,6 @@ func (a *Account) GetPeerGroupsList(peerID string) []string { return grps } -func (a *Account) getPeerDNSManagementStatus(peerID string) bool { - peerGroups := a.GetPeerGroups(peerID) - enabled := true - for _, groupID := range a.DNSSettings.DisabledManagementGroups { - _, found := peerGroups[groupID] - if found { - enabled = false - break - } - } - return enabled -} - func (a *Account) GetPeerGroups(peerID string) LookupMap { groupList := make(LookupMap) for groupID, group := range a.Groups { @@ -941,8 +684,6 @@ func (a *Account) Copy() *Account { NetworkResources: networkResources, Services: services, Onboarding: a.Onboarding, - NetworkMapCache: a.NetworkMapCache, - nmapInitOnce: a.nmapInitOnce, Domains: domains, } } @@ -1304,31 +1045,6 @@ func (a *Account) GetPostureChecks(postureChecksID string) *posture.Checks { return nil } -// GetPeerRoutesFirewallRules gets the routes firewall rules associated with a routing peer ID for the account. -func (a *Account) GetPeerRoutesFirewallRules(ctx context.Context, peerID string, validatedPeersMap map[string]struct{}) []*RouteFirewallRule { - routesFirewallRules := make([]*RouteFirewallRule, 0, len(a.Routes)) - - enabledRoutes, _ := a.getRoutingPeerRoutes(ctx, peerID) - for _, route := range enabledRoutes { - // If no access control groups are specified, accept all traffic. - if len(route.AccessControlGroups) == 0 { - defaultPermit := getDefaultPermit(route) - routesFirewallRules = append(routesFirewallRules, defaultPermit...) - continue - } - - distributionPeers := a.getDistributionGroupsPeers(route) - - for _, accessGroup := range route.AccessControlGroups { - policies := GetAllRoutePoliciesFromGroups(a, []string{accessGroup}) - rules := a.getRouteFirewallRules(ctx, peerID, policies, route, validatedPeersMap, distributionPeers) - routesFirewallRules = append(routesFirewallRules, rules...) - } - } - - return routesFirewallRules -} - func (a *Account) getRouteFirewallRules(ctx context.Context, peerID string, policies []*Policy, route *route.Route, validatedPeersMap map[string]struct{}, distributionPeers map[string]struct{}) []*RouteFirewallRule { var fwRules []*RouteFirewallRule for _, policy := range policies { @@ -1387,50 +1103,6 @@ func (a *Account) getRulePeers(rule *PolicyRule, postureChecks []string, peerID return distributionGroupPeers } -func (a *Account) getDistributionGroupsPeers(route *route.Route) map[string]struct{} { - distPeers := make(map[string]struct{}) - for _, id := range route.Groups { - group := a.Groups[id] - if group == nil { - continue - } - - for _, pID := range group.Peers { - distPeers[pID] = struct{}{} - } - } - return distPeers -} - -func getDefaultPermit(route *route.Route) []*RouteFirewallRule { - var rules []*RouteFirewallRule - - sources := []string{"0.0.0.0/0"} - if route.Network.Addr().Is6() { - sources = []string{"::/0"} - } - rule := RouteFirewallRule{ - SourceRanges: sources, - Action: string(PolicyTrafficActionAccept), - Destination: route.Network.String(), - Protocol: string(PolicyRuleProtocolALL), - Domains: route.Domains, - IsDynamic: route.IsDynamic(), - RouteID: route.ID, - } - - rules = append(rules, &rule) - - // dynamic routes always contain an IPv4 placeholder as destination, hence we must add IPv6 rules additionally - if route.IsDynamic() { - ruleV6 := rule - ruleV6.SourceRanges = []string{"::/0"} - rules = append(rules, &ruleV6) - } - - return rules -} - // GetAllRoutePoliciesFromGroups retrieves route policies associated with the specified access control groups // and returns a list of policies that have rules with destinations matching the specified groups. func GetAllRoutePoliciesFromGroups(account *Account, accessControlGroups []string) []*Policy { @@ -1508,65 +1180,6 @@ func (a *Account) GetResourcePoliciesMap() map[string][]*Policy { return resourcePolicies } -// GetNetworkResourcesRoutesToSync returns network routes for syncing with a specific peer and its ACL peers. -func (a *Account) GetNetworkResourcesRoutesToSync(ctx context.Context, peerID string, resourcePolicies map[string][]*Policy, routers map[string]map[string]*routerTypes.NetworkRouter) (bool, []*route.Route, map[string]struct{}) { - var isRoutingPeer bool - var routes []*route.Route - allSourcePeers := make(map[string]struct{}, len(a.Peers)) - - for _, resource := range a.NetworkResources { - if !resource.Enabled { - continue - } - - var addSourcePeers bool - - networkRoutingPeers, exists := routers[resource.NetworkID] - if exists { - if router, ok := networkRoutingPeers[peerID]; ok { - isRoutingPeer, addSourcePeers = true, true - routes = append(routes, a.getNetworkResourcesRoutes(resource, peerID, router, resourcePolicies)...) - } - } - - addedResourceRoute := false - for _, policy := range resourcePolicies[resource.ID] { - var peers []string - if policy.Rules[0].SourceResource.Type == ResourceTypePeer && policy.Rules[0].SourceResource.ID != "" { - peers = []string{policy.Rules[0].SourceResource.ID} - } else { - peers = a.getUniquePeerIDsFromGroupsIDs(ctx, policy.SourceGroups()) - } - if addSourcePeers { - for _, pID := range a.getPostureValidPeers(peers, policy.SourcePostureChecks) { - allSourcePeers[pID] = struct{}{} - } - } else if slices.Contains(peers, peerID) && a.validatePostureChecksOnPeer(ctx, policy.SourcePostureChecks, peerID) { - // add routes for the resource if the peer is in the distribution group - for peerId, router := range networkRoutingPeers { - routes = append(routes, a.getNetworkResourcesRoutes(resource, peerId, router, resourcePolicies)...) - } - addedResourceRoute = true - } - if addedResourceRoute { - break - } - } - } - - return isRoutingPeer, routes, allSourcePeers -} - -func (a *Account) getPostureValidPeers(inputPeers []string, postureChecksIDs []string) []string { - var dest []string - for _, peerID := range inputPeers { - if a.validatePostureChecksOnPeer(context.Background(), postureChecksIDs, peerID) { - dest = append(dest, peerID) - } - } - return dest -} - func (a *Account) getUniquePeerIDsFromGroupsIDs(ctx context.Context, groups []string) []string { peerIDs := make(map[string]struct{}, len(groups)) // we expect at least one peer per group as initial capacity for _, groupID := range groups { @@ -1658,22 +1271,6 @@ func (a *Account) GetPoliciesAppliedInNetwork(networkID string) []string { return result } -// getNetworkResourcesRoutes convert the network resources list to routes list. -func (a *Account) getNetworkResourcesRoutes(resource *resourceTypes.NetworkResource, peerId string, router *routerTypes.NetworkRouter, resourcePolicies map[string][]*Policy) []*route.Route { - resourceAppliedPolicies := resourcePolicies[resource.ID] - - var routes []*route.Route - // distribute the resource routes only if there is policy applied to it - if len(resourceAppliedPolicies) > 0 { - peer := a.GetPeer(peerId) - if peer != nil { - routes = append(routes, resource.ToRoute(peer, router)) - } - } - - return routes -} - func (a *Account) GetResourceRoutersMap() map[string]map[string]*routerTypes.NetworkRouter { routers := make(map[string]map[string]*routerTypes.NetworkRouter) diff --git a/management/server/types/account_test.go b/management/server/types/account_test.go index 00ba29b7f..9b1c9e31d 100644 --- a/management/server/types/account_test.go +++ b/management/server/types/account_test.go @@ -4,8 +4,6 @@ import ( "context" "fmt" "net" - "net/netip" - "slices" "testing" "github.com/miekg/dns" @@ -19,7 +17,6 @@ import ( routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" nbpeer "github.com/netbirdio/netbird/management/server/peer" - "github.com/netbirdio/netbird/management/server/posture" "github.com/netbirdio/netbird/route" ) @@ -451,402 +448,6 @@ func Test_AddNetworksRoutingPeersHandlesNoMissingPeers(t *testing.T) { require.Len(t, result, 0) } -const ( - accID = "accountID" - network1ID = "network1ID" - group1ID = "group1" - accNetResourcePeer1ID = "peer1" - accNetResourcePeer2ID = "peer2" - accNetResourceRouter1ID = "router1" - accNetResource1ID = "resource1ID" - accNetResourceRestrictPostureCheckID = "restrictPostureCheck" - accNetResourceRelaxedPostureCheckID = "relaxedPostureCheck" - accNetResourceLockedPostureCheckID = "lockedPostureCheck" - accNetResourceLinuxPostureCheckID = "linuxPostureCheck" -) - -var ( - accNetResourcePeer1IP = net.IP{192, 168, 1, 1} - accNetResourcePeer2IP = net.IP{192, 168, 1, 2} - accNetResourceRouter1IP = net.IP{192, 168, 1, 3} - accNetResourceValidPeers = map[string]struct{}{accNetResourcePeer1ID: {}, accNetResourcePeer2ID: {}} -) - -func getBasicAccountsWithResource() *Account { - return &Account{ - Id: accID, - Peers: map[string]*nbpeer.Peer{ - accNetResourcePeer1ID: { - ID: accNetResourcePeer1ID, - AccountID: accID, - Key: "peer1Key", - IP: accNetResourcePeer1IP, - Meta: nbpeer.PeerSystemMeta{ - GoOS: "linux", - WtVersion: "0.35.1", - KernelVersion: "4.4.0", - }, - }, - accNetResourcePeer2ID: { - ID: accNetResourcePeer2ID, - AccountID: accID, - Key: "peer2Key", - IP: accNetResourcePeer2IP, - Meta: nbpeer.PeerSystemMeta{ - GoOS: "windows", - WtVersion: "0.34.1", - KernelVersion: "4.4.0", - }, - }, - accNetResourceRouter1ID: { - ID: accNetResourceRouter1ID, - AccountID: accID, - Key: "router1Key", - IP: accNetResourceRouter1IP, - Meta: nbpeer.PeerSystemMeta{ - GoOS: "linux", - WtVersion: "0.35.1", - KernelVersion: "4.4.0", - }, - }, - }, - Groups: map[string]*Group{ - group1ID: { - ID: group1ID, - Peers: []string{accNetResourcePeer1ID, accNetResourcePeer2ID}, - }, - }, - Networks: []*networkTypes.Network{ - { - ID: network1ID, - AccountID: accID, - Name: "network1", - }, - }, - NetworkRouters: []*routerTypes.NetworkRouter{ - { - ID: accNetResourceRouter1ID, - NetworkID: network1ID, - AccountID: accID, - Peer: accNetResourceRouter1ID, - PeerGroups: []string{}, - Masquerade: false, - Metric: 100, - Enabled: true, - }, - }, - NetworkResources: []*resourceTypes.NetworkResource{ - { - ID: accNetResource1ID, - AccountID: accID, - NetworkID: network1ID, - Address: "10.10.10.0/24", - Prefix: netip.MustParsePrefix("10.10.10.0/24"), - Type: resourceTypes.NetworkResourceType("subnet"), - Enabled: true, - }, - }, - Policies: []*Policy{ - { - ID: "policy1ID", - AccountID: accID, - Enabled: true, - Rules: []*PolicyRule{ - { - ID: "rule1ID", - Enabled: true, - Sources: []string{group1ID}, - DestinationResource: Resource{ - ID: accNetResource1ID, - Type: "Host", - }, - Protocol: PolicyRuleProtocolTCP, - Ports: []string{"80"}, - Action: PolicyTrafficActionAccept, - }, - }, - SourcePostureChecks: nil, - }, - }, - PostureChecks: []*posture.Checks{ - { - ID: accNetResourceRestrictPostureCheckID, - Name: accNetResourceRestrictPostureCheckID, - Checks: posture.ChecksDefinition{ - NBVersionCheck: &posture.NBVersionCheck{ - MinVersion: "0.35.0", - }, - }, - }, - { - ID: accNetResourceRelaxedPostureCheckID, - Name: accNetResourceRelaxedPostureCheckID, - Checks: posture.ChecksDefinition{ - NBVersionCheck: &posture.NBVersionCheck{ - MinVersion: "0.0.1", - }, - }, - }, - { - ID: accNetResourceLockedPostureCheckID, - Name: accNetResourceLockedPostureCheckID, - Checks: posture.ChecksDefinition{ - NBVersionCheck: &posture.NBVersionCheck{ - MinVersion: "7.7.7", - }, - }, - }, - { - ID: accNetResourceLinuxPostureCheckID, - Name: accNetResourceLinuxPostureCheckID, - Checks: posture.ChecksDefinition{ - OSVersionCheck: &posture.OSVersionCheck{ - Linux: &posture.MinKernelVersionCheck{ - MinKernelVersion: "0.0.0"}, - }, - }, - }, - }, - } -} - -func Test_NetworksNetMapGenWithNoPostureChecks(t *testing.T) { - account := getBasicAccountsWithResource() - - // all peers should match the policy - - // validate for peer1 - isRouter, networkResourcesRoutes, sourcePeers := account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate for peer2 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer2ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate routes for router1 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourceRouter1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.True(t, isRouter, "should be router") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 2, "expected source peers don't match") - assert.NotNil(t, sourcePeers[accNetResourcePeer1ID], "expected source peers don't match") - assert.NotNil(t, sourcePeers[accNetResourcePeer2ID], "expected source peers don't match") - - // validate rules for router1 - rules := account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers[accNetResourceRouter1ID], accNetResourceValidPeers, networkResourcesRoutes, account.GetResourcePoliciesMap()) - assert.Len(t, rules, 1, "expected rules count don't match") - assert.Equal(t, uint16(80), rules[0].Port, "should have port 80") - assert.Equal(t, "tcp", rules[0].Protocol, "should have protocol tcp") - if !slices.Contains(rules[0].SourceRanges, accNetResourcePeer1IP.String()+"/32") { - t.Errorf("%s should have source range of peer1 %s", rules[0].SourceRanges, accNetResourcePeer1IP.String()) - } - if !slices.Contains(rules[0].SourceRanges, accNetResourcePeer2IP.String()+"/32") { - t.Errorf("%s should have source range of peer2 %s", rules[0].SourceRanges, accNetResourcePeer2IP.String()) - } -} - -func Test_NetworksNetMapGenWithPostureChecks(t *testing.T) { - account := getBasicAccountsWithResource() - - // should allow peer1 to match the policy - policy := account.Policies[0] - policy.SourcePostureChecks = []string{accNetResourceRestrictPostureCheckID} - - // validate for peer1 - isRouter, networkResourcesRoutes, sourcePeers := account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate for peer2 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer2ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 0, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate routes for router1 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourceRouter1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.True(t, isRouter, "should be router") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 1, "expected source peers don't match") - assert.NotNil(t, sourcePeers[accNetResourcePeer1ID], "expected source peers don't match") - - // validate rules for router1 - rules := account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers[accNetResourceRouter1ID], accNetResourceValidPeers, networkResourcesRoutes, account.GetResourcePoliciesMap()) - assert.Len(t, rules, 1, "expected rules count don't match") - assert.Equal(t, uint16(80), rules[0].Port, "should have port 80") - assert.Equal(t, "tcp", rules[0].Protocol, "should have protocol tcp") - if !slices.Contains(rules[0].SourceRanges, accNetResourcePeer1IP.String()+"/32") { - t.Errorf("%s should have source range of peer1 %s", rules[0].SourceRanges, accNetResourcePeer1IP.String()) - } - if slices.Contains(rules[0].SourceRanges, accNetResourcePeer2IP.String()+"/32") { - t.Errorf("%s should not have source range of peer2 %s", rules[0].SourceRanges, accNetResourcePeer2IP.String()) - } -} - -func Test_NetworksNetMapGenWithNoMatchedPostureChecks(t *testing.T) { - account := getBasicAccountsWithResource() - - // should not match any peer - policy := account.Policies[0] - policy.SourcePostureChecks = []string{accNetResourceLockedPostureCheckID} - - // validate for peer1 - isRouter, networkResourcesRoutes, sourcePeers := account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 0, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate for peer2 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer2ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 0, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate routes for router1 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourceRouter1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.True(t, isRouter, "should be router") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate rules for router1 - rules := account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers[accNetResourceRouter1ID], accNetResourceValidPeers, networkResourcesRoutes, account.GetResourcePoliciesMap()) - assert.Len(t, rules, 0, "expected rules count don't match") -} - -func Test_NetworksNetMapGenWithTwoPoliciesAndPostureChecks(t *testing.T) { - account := getBasicAccountsWithResource() - - // should allow peer1 to match the policy - policy := account.Policies[0] - policy.SourcePostureChecks = []string{accNetResourceRestrictPostureCheckID} - - // should allow peer1 and peer2 to match the policy - newPolicy := &Policy{ - ID: "policy2ID", - AccountID: accID, - Enabled: true, - Rules: []*PolicyRule{ - { - ID: "policy2ID", - Enabled: true, - Sources: []string{group1ID}, - DestinationResource: Resource{ - ID: accNetResource1ID, - Type: "Host", - }, - Protocol: PolicyRuleProtocolTCP, - Ports: []string{"22"}, - Action: PolicyTrafficActionAccept, - }, - }, - SourcePostureChecks: []string{accNetResourceRelaxedPostureCheckID}, - } - - account.Policies = append(account.Policies, newPolicy) - - // validate for peer1 - isRouter, networkResourcesRoutes, sourcePeers := account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate for peer2 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer2ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate routes for router1 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourceRouter1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.True(t, isRouter, "should be router") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 2, "expected source peers don't match") - assert.NotNil(t, sourcePeers[accNetResourcePeer1ID], "expected source peers don't match") - assert.NotNil(t, sourcePeers[accNetResourcePeer2ID], "expected source peers don't match") - - // validate rules for router1 - rules := account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers[accNetResourceRouter1ID], accNetResourceValidPeers, networkResourcesRoutes, account.GetResourcePoliciesMap()) - assert.Len(t, rules, 2, "expected rules count don't match") - assert.Equal(t, uint16(80), rules[0].Port, "should have port 80") - assert.Equal(t, "tcp", rules[0].Protocol, "should have protocol tcp") - if !slices.Contains(rules[0].SourceRanges, accNetResourcePeer1IP.String()+"/32") { - t.Errorf("%s should have source range of peer1 %s", rules[0].SourceRanges, accNetResourcePeer1IP.String()) - } - if slices.Contains(rules[0].SourceRanges, accNetResourcePeer2IP.String()+"/32") { - t.Errorf("%s should not have source range of peer2 %s", rules[0].SourceRanges, accNetResourcePeer2IP.String()) - } - - assert.Equal(t, uint16(22), rules[1].Port, "should have port 22") - assert.Equal(t, "tcp", rules[1].Protocol, "should have protocol tcp") - if !slices.Contains(rules[1].SourceRanges, accNetResourcePeer1IP.String()+"/32") { - t.Errorf("%s should have source range of peer1 %s", rules[1].SourceRanges, accNetResourcePeer1IP.String()) - } - if !slices.Contains(rules[1].SourceRanges, accNetResourcePeer2IP.String()+"/32") { - t.Errorf("%s should have source range of peer2 %s", rules[1].SourceRanges, accNetResourcePeer2IP.String()) - } -} - -func Test_NetworksNetMapGenWithTwoPostureChecks(t *testing.T) { - account := getBasicAccountsWithResource() - - // two posture checks should match only the peers that match both checks - policy := account.Policies[0] - policy.SourcePostureChecks = []string{accNetResourceRelaxedPostureCheckID, accNetResourceLinuxPostureCheckID} - - // validate for peer1 - isRouter, networkResourcesRoutes, sourcePeers := account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate for peer2 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourcePeer2ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.False(t, isRouter, "expected router status") - assert.Len(t, networkResourcesRoutes, 0, "expected network resource route don't match") - assert.Len(t, sourcePeers, 0, "expected source peers don't match") - - // validate routes for router1 - isRouter, networkResourcesRoutes, sourcePeers = account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourceRouter1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.True(t, isRouter, "should be router") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 1, "expected source peers don't match") - assert.NotNil(t, sourcePeers[accNetResourcePeer1ID], "expected source peers don't match") - - // validate rules for router1 - rules := account.GetPeerNetworkResourceFirewallRules(context.Background(), account.Peers[accNetResourceRouter1ID], accNetResourceValidPeers, networkResourcesRoutes, account.GetResourcePoliciesMap()) - assert.Len(t, rules, 1, "expected rules count don't match") - assert.Equal(t, uint16(80), rules[0].Port, "should have port 80") - assert.Equal(t, "tcp", rules[0].Protocol, "should have protocol tcp") - if !slices.Contains(rules[0].SourceRanges, accNetResourcePeer1IP.String()+"/32") { - t.Errorf("%s should have source range of peer1 %s", rules[0].SourceRanges, accNetResourcePeer1IP.String()) - } - if slices.Contains(rules[0].SourceRanges, accNetResourcePeer2IP.String()+"/32") { - t.Errorf("%s should not have source range of peer2 %s", rules[0].SourceRanges, accNetResourcePeer2IP.String()) - } -} - -func Test_NetworksNetMapGenShouldExcludeOtherRouters(t *testing.T) { - account := getBasicAccountsWithResource() - - account.Peers["router2Id"] = &nbpeer.Peer{Key: "router2Key", ID: "router2Id", AccountID: accID, IP: net.IP{192, 168, 1, 4}} - account.NetworkRouters = append(account.NetworkRouters, &routerTypes.NetworkRouter{ - ID: "router2Id", - NetworkID: network1ID, - AccountID: accID, - Peer: "router2Id", - }) - - // validate routes for router1 - isRouter, networkResourcesRoutes, sourcePeers := account.GetNetworkResourcesRoutesToSync(context.Background(), accNetResourceRouter1ID, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap()) - assert.True(t, isRouter, "should be router") - assert.Len(t, networkResourcesRoutes, 1, "expected network resource route don't match") - assert.Len(t, sourcePeers, 2, "expected source peers don't match") -} - func Test_ExpandPortsAndRanges_SSHRuleExpansion(t *testing.T) { tests := []struct { name string diff --git a/management/server/types/holder.go b/management/server/types/holder.go deleted file mode 100644 index de8ac8110..000000000 --- a/management/server/types/holder.go +++ /dev/null @@ -1,47 +0,0 @@ -package types - -import ( - "context" - "sync" -) - -type Holder struct { - mu sync.RWMutex - accounts map[string]*Account -} - -func NewHolder() *Holder { - return &Holder{ - accounts: make(map[string]*Account), - } -} - -func (h *Holder) GetAccount(id string) *Account { - h.mu.RLock() - defer h.mu.RUnlock() - return h.accounts[id] -} - -func (h *Holder) AddAccount(account *Account) { - h.mu.Lock() - defer h.mu.Unlock() - a := h.accounts[account.Id] - if a != nil && a.Network.CurrentSerial() >= account.Network.CurrentSerial() { - return - } - h.accounts[account.Id] = account -} - -func (h *Holder) LoadOrStoreFunc(ctx context.Context, id string, accGetter func(context.Context, string) (*Account, error)) (*Account, error) { - h.mu.Lock() - defer h.mu.Unlock() - if acc, ok := h.accounts[id]; ok { - return acc, nil - } - account, err := accGetter(ctx, id) - if err != nil { - return nil, err - } - h.accounts[id] = account - return account, nil -} diff --git a/management/server/types/networkmap.go b/management/server/types/networkmap.go deleted file mode 100644 index 68c988a93..000000000 --- a/management/server/types/networkmap.go +++ /dev/null @@ -1,67 +0,0 @@ -package types - -import ( - "context" - - nbdns "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/zones" - nbpeer "github.com/netbirdio/netbird/management/server/peer" - "github.com/netbirdio/netbird/management/server/telemetry" -) - -func (a *Account) initNetworkMapBuilder(validatedPeers map[string]struct{}) { - if a.NetworkMapCache != nil { - return - } - a.nmapInitOnce.Do(func() { - a.NetworkMapCache = NewNetworkMapBuilder(a, validatedPeers) - }) -} - -func (a *Account) InitNetworkMapBuilderIfNeeded(validatedPeers map[string]struct{}) { - a.initNetworkMapBuilder(validatedPeers) -} - -func (a *Account) GetPeerNetworkMapExp( - ctx context.Context, - peerID string, - peersCustomZone nbdns.CustomZone, - accountZones []*zones.Zone, - validatedPeers map[string]struct{}, - metrics *telemetry.AccountManagerMetrics, -) *NetworkMap { - a.initNetworkMapBuilder(validatedPeers) - return a.NetworkMapCache.GetPeerNetworkMap(ctx, peerID, peersCustomZone, accountZones, validatedPeers, metrics) -} - -func (a *Account) OnPeerAddedUpdNetworkMapCache(peerId string) error { - if a.NetworkMapCache == nil { - return nil - } - return a.NetworkMapCache.OnPeerAddedIncremental(a, peerId) -} - -func (a *Account) OnPeersAddedUpdNetworkMapCache(peerIds ...string) { - if a.NetworkMapCache == nil { - return - } - a.NetworkMapCache.EnqueuePeersForIncrementalAdd(a, peerIds...) -} - -func (a *Account) OnPeerDeletedUpdNetworkMapCache(peerId string) error { - if a.NetworkMapCache == nil { - return nil - } - return a.NetworkMapCache.OnPeerDeleted(a, peerId) -} - -func (a *Account) UpdatePeerInNetworkMapCache(peer *nbpeer.Peer) { - if a.NetworkMapCache == nil { - return - } - a.NetworkMapCache.UpdatePeer(peer) -} - -func (a *Account) RecalculateNetworkMapCache(validatedPeers map[string]struct{}) { - a.initNetworkMapBuilder(validatedPeers) -} diff --git a/management/server/types/networkmap_comparison_test.go b/management/server/types/networkmap_comparison_test.go deleted file mode 100644 index c5844cca0..000000000 --- a/management/server/types/networkmap_comparison_test.go +++ /dev/null @@ -1,592 +0,0 @@ -package types - -import ( - "context" - "encoding/json" - "fmt" - "net" - "net/netip" - "os" - "path/filepath" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/require" - - nbdns "github.com/netbirdio/netbird/dns" - resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" - routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" - networkTypes "github.com/netbirdio/netbird/management/server/networks/types" - nbpeer "github.com/netbirdio/netbird/management/server/peer" - "github.com/netbirdio/netbird/management/server/posture" - "github.com/netbirdio/netbird/route" -) - -func TestNetworkMapComponents_CompareWithLegacy(t *testing.T) { - account := createTestAccount() - ctx := context.Background() - - peerID := testingPeerID - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - pid := fmt.Sprintf("peer-%d", i) - if pid == offlinePeerID { - continue - } - validatedPeersMap[pid] = struct{}{} - } - - peersCustomZone := nbdns.CustomZone{} - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - - legacyNetworkMap := account.GetPeerNetworkMap( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - nil, - groupIDToUserIDs, - ) - - components := account.GetPeerNetworkMapComponents( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - groupIDToUserIDs, - ) - - if components == nil { - t.Fatal("GetPeerNetworkMapComponents returned nil") - } - - newNetworkMap := CalculateNetworkMapFromComponents(ctx, components) - - if newNetworkMap == nil { - t.Fatal("CalculateNetworkMapFromComponents returned nil") - } - - compareNetworkMaps(t, legacyNetworkMap, newNetworkMap) -} - -func TestNetworkMapComponents_GoldenFileComparison(t *testing.T) { - account := createTestAccount() - ctx := context.Background() - - peerID := testingPeerID - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - pid := fmt.Sprintf("peer-%d", i) - if pid == offlinePeerID { - continue - } - validatedPeersMap[pid] = struct{}{} - } - - peersCustomZone := nbdns.CustomZone{} - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - - legacyNetworkMap := account.GetPeerNetworkMap( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - nil, - groupIDToUserIDs, - ) - - components := account.GetPeerNetworkMapComponents( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - groupIDToUserIDs, - ) - - require.NotNil(t, components, "GetPeerNetworkMapComponents returned nil") - - newNetworkMap := CalculateNetworkMapFromComponents(ctx, components) - require.NotNil(t, newNetworkMap, "CalculateNetworkMapFromComponents returned nil") - - normalizeAndSortNetworkMap(legacyNetworkMap) - normalizeAndSortNetworkMap(newNetworkMap) - - componentsJSON, err := json.MarshalIndent(components, "", " ") - require.NoError(t, err, "error marshaling components to JSON") - - legacyJSON, err := json.MarshalIndent(legacyNetworkMap, "", " ") - require.NoError(t, err, "error marshaling legacy network map to JSON") - - newJSON, err := json.MarshalIndent(newNetworkMap, "", " ") - require.NoError(t, err, "error marshaling new network map to JSON") - - goldenDir := filepath.Join("testdata", "comparison") - err = os.MkdirAll(goldenDir, 0755) - require.NoError(t, err) - - legacyGoldenPath := filepath.Join(goldenDir, "legacy_networkmap.json") - err = os.WriteFile(legacyGoldenPath, legacyJSON, 0644) - require.NoError(t, err, "error writing legacy golden file") - - newGoldenPath := filepath.Join(goldenDir, "components_networkmap.json") - err = os.WriteFile(newGoldenPath, newJSON, 0644) - require.NoError(t, err, "error writing components golden file") - - componentsPath := filepath.Join(goldenDir, "components.json") - err = os.WriteFile(componentsPath, componentsJSON, 0644) - require.NoError(t, err, "error writing components golden file") - - require.JSONEq(t, string(legacyJSON), string(newJSON), - "NetworkMaps from legacy and components approaches do not match.\n"+ - "Legacy JSON saved to: %s\n"+ - "Components JSON saved to: %s", - legacyGoldenPath, newGoldenPath) - - t.Logf("✅ NetworkMaps are identical") - t.Logf(" Legacy NetworkMap: %s", legacyGoldenPath) - t.Logf(" Components NetworkMap: %s", newGoldenPath) -} - -func normalizeAndSortNetworkMap(nm *NetworkMap) { - if nm == nil { - return - } - - sort.Slice(nm.Peers, func(i, j int) bool { - return nm.Peers[i].ID < nm.Peers[j].ID - }) - - sort.Slice(nm.OfflinePeers, func(i, j int) bool { - return nm.OfflinePeers[i].ID < nm.OfflinePeers[j].ID - }) - - sort.Slice(nm.Routes, func(i, j int) bool { - return string(nm.Routes[i].ID) < string(nm.Routes[j].ID) - }) - - sort.Slice(nm.FirewallRules, func(i, j int) bool { - if nm.FirewallRules[i].PeerIP != nm.FirewallRules[j].PeerIP { - return nm.FirewallRules[i].PeerIP < nm.FirewallRules[j].PeerIP - } - if nm.FirewallRules[i].Direction != nm.FirewallRules[j].Direction { - return nm.FirewallRules[i].Direction < nm.FirewallRules[j].Direction - } - if nm.FirewallRules[i].Protocol != nm.FirewallRules[j].Protocol { - return nm.FirewallRules[i].Protocol < nm.FirewallRules[j].Protocol - } - if nm.FirewallRules[i].Port != nm.FirewallRules[j].Port { - return nm.FirewallRules[i].Port < nm.FirewallRules[j].Port - } - return nm.FirewallRules[i].PolicyID < nm.FirewallRules[j].PolicyID - }) - - for i := range nm.RoutesFirewallRules { - sort.Strings(nm.RoutesFirewallRules[i].SourceRanges) - } - - sort.Slice(nm.RoutesFirewallRules, func(i, j int) bool { - if nm.RoutesFirewallRules[i].Destination != nm.RoutesFirewallRules[j].Destination { - return nm.RoutesFirewallRules[i].Destination < nm.RoutesFirewallRules[j].Destination - } - - minLen := len(nm.RoutesFirewallRules[i].SourceRanges) - if len(nm.RoutesFirewallRules[j].SourceRanges) < minLen { - minLen = len(nm.RoutesFirewallRules[j].SourceRanges) - } - for k := 0; k < minLen; k++ { - if nm.RoutesFirewallRules[i].SourceRanges[k] != nm.RoutesFirewallRules[j].SourceRanges[k] { - return nm.RoutesFirewallRules[i].SourceRanges[k] < nm.RoutesFirewallRules[j].SourceRanges[k] - } - } - if len(nm.RoutesFirewallRules[i].SourceRanges) != len(nm.RoutesFirewallRules[j].SourceRanges) { - return len(nm.RoutesFirewallRules[i].SourceRanges) < len(nm.RoutesFirewallRules[j].SourceRanges) - } - - if string(nm.RoutesFirewallRules[i].RouteID) != string(nm.RoutesFirewallRules[j].RouteID) { - return string(nm.RoutesFirewallRules[i].RouteID) < string(nm.RoutesFirewallRules[j].RouteID) - } - - if nm.RoutesFirewallRules[i].PolicyID != nm.RoutesFirewallRules[j].PolicyID { - return nm.RoutesFirewallRules[i].PolicyID < nm.RoutesFirewallRules[j].PolicyID - } - - if nm.RoutesFirewallRules[i].Port != nm.RoutesFirewallRules[j].Port { - return nm.RoutesFirewallRules[i].Port < nm.RoutesFirewallRules[j].Port - } - - return nm.RoutesFirewallRules[i].Protocol < nm.RoutesFirewallRules[j].Protocol - }) - - if nm.DNSConfig.CustomZones != nil { - for i := range nm.DNSConfig.CustomZones { - sort.Slice(nm.DNSConfig.CustomZones[i].Records, func(a, b int) bool { - return nm.DNSConfig.CustomZones[i].Records[a].Name < nm.DNSConfig.CustomZones[i].Records[b].Name - }) - } - } - - if len(nm.DNSConfig.NameServerGroups) != 0 { - sort.Slice(nm.DNSConfig.NameServerGroups, func(a, b int) bool { - return nm.DNSConfig.NameServerGroups[a].Name < nm.DNSConfig.NameServerGroups[b].Name - }) - } -} - -func compareNetworkMaps(t *testing.T, legacy, current *NetworkMap) { - t.Helper() - - if legacy.Network.Serial != current.Network.Serial { - t.Errorf("Network Serial mismatch: legacy=%d, current=%d", legacy.Network.Serial, current.Network.Serial) - } - - if len(legacy.Peers) != len(current.Peers) { - t.Errorf("Peers count mismatch: legacy=%d, current=%d", len(legacy.Peers), len(current.Peers)) - } - - legacyPeerIDs := make(map[string]bool) - for _, p := range legacy.Peers { - legacyPeerIDs[p.ID] = true - } - - for _, p := range current.Peers { - if !legacyPeerIDs[p.ID] { - t.Errorf("Current NetworkMap contains peer %s not in legacy", p.ID) - } - } - - if len(legacy.OfflinePeers) != len(current.OfflinePeers) { - t.Errorf("OfflinePeers count mismatch: legacy=%d, current=%d", len(legacy.OfflinePeers), len(current.OfflinePeers)) - } - - if len(legacy.FirewallRules) != len(current.FirewallRules) { - t.Logf("FirewallRules count mismatch: legacy=%d, current=%d", len(legacy.FirewallRules), len(current.FirewallRules)) - } - - if len(legacy.Routes) != len(current.Routes) { - t.Logf("Routes count mismatch: legacy=%d, current=%d", len(legacy.Routes), len(current.Routes)) - } - - if len(legacy.RoutesFirewallRules) != len(current.RoutesFirewallRules) { - t.Logf("RoutesFirewallRules count mismatch: legacy=%d, current=%d", len(legacy.RoutesFirewallRules), len(current.RoutesFirewallRules)) - } - - if legacy.DNSConfig.ServiceEnable != current.DNSConfig.ServiceEnable { - t.Errorf("DNSConfig.ServiceEnable mismatch: legacy=%v, current=%v", legacy.DNSConfig.ServiceEnable, current.DNSConfig.ServiceEnable) - } -} - -const ( - numPeers = 100 - devGroupID = "group-dev" - opsGroupID = "group-ops" - allGroupID = "group-all" - routeID = route.ID("route-main") - routeHA1ID = route.ID("route-ha-1") - routeHA2ID = route.ID("route-ha-2") - policyIDDevOps = "policy-dev-ops" - policyIDAll = "policy-all" - policyIDPosture = "policy-posture" - policyIDDrop = "policy-drop" - postureCheckID = "posture-check-ver" - networkResourceID = "res-database" - networkID = "net-database" - networkRouterID = "router-database" - nameserverGroupID = "ns-group-main" - testingPeerID = "peer-60" - expiredPeerID = "peer-98" - offlinePeerID = "peer-99" - routingPeerID = "peer-95" - testAccountID = "account-comparison-test" -) - -func createTestAccount() *Account { - peers := make(map[string]*nbpeer.Peer) - devGroupPeers, opsGroupPeers, allGroupPeers := []string{}, []string{}, []string{} - - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - ip := net.IP{100, 64, 0, byte(i + 1)} - wtVersion := "0.25.0" - if i%2 == 0 { - wtVersion = "0.40.0" - } - - p := &nbpeer.Peer{ - ID: peerID, IP: ip, Key: fmt.Sprintf("key-%s", peerID), DNSLabel: fmt.Sprintf("peer%d", i+1), - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", Meta: nbpeer.PeerSystemMeta{WtVersion: wtVersion, GoOS: "linux"}, - } - - if peerID == expiredPeerID { - p.LoginExpirationEnabled = true - pastTimestamp := time.Now().Add(-2 * time.Hour) - p.LastLogin = &pastTimestamp - } - - peers[peerID] = p - allGroupPeers = append(allGroupPeers, peerID) - if i < numPeers/2 { - devGroupPeers = append(devGroupPeers, peerID) - } else { - opsGroupPeers = append(opsGroupPeers, peerID) - } - } - - groups := map[string]*Group{ - allGroupID: {ID: allGroupID, Name: "All", Peers: allGroupPeers}, - devGroupID: {ID: devGroupID, Name: "Developers", Peers: devGroupPeers}, - opsGroupID: {ID: opsGroupID, Name: "Operations", Peers: opsGroupPeers}, - } - - policies := []*Policy{ - { - ID: policyIDAll, Name: "Default-Allow", Enabled: true, - Rules: []*PolicyRule{{ - ID: policyIDAll, Name: "Allow All", Enabled: true, Action: PolicyTrafficActionAccept, - Protocol: PolicyRuleProtocolALL, Bidirectional: true, - Sources: []string{allGroupID}, Destinations: []string{allGroupID}, - }}, - }, - { - ID: policyIDDevOps, Name: "Dev to Ops Web Access", Enabled: true, - Rules: []*PolicyRule{{ - ID: policyIDDevOps, Name: "Dev -> Ops (HTTP Range)", Enabled: true, Action: PolicyTrafficActionAccept, - Protocol: PolicyRuleProtocolTCP, Bidirectional: false, - PortRanges: []RulePortRange{{Start: 8080, End: 8090}}, - Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, - }}, - }, - { - ID: policyIDDrop, Name: "Drop DB traffic", Enabled: true, - Rules: []*PolicyRule{{ - ID: policyIDDrop, Name: "Drop DB", Enabled: true, Action: PolicyTrafficActionDrop, - Protocol: PolicyRuleProtocolTCP, Ports: []string{"5432"}, Bidirectional: true, - Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, - }}, - }, - { - ID: policyIDPosture, Name: "Posture Check for DB Resource", Enabled: true, - SourcePostureChecks: []string{postureCheckID}, - Rules: []*PolicyRule{{ - ID: policyIDPosture, Name: "Allow DB Access", Enabled: true, Action: PolicyTrafficActionAccept, - Protocol: PolicyRuleProtocolALL, Bidirectional: true, - Sources: []string{opsGroupID}, DestinationResource: Resource{ID: networkResourceID}, - }}, - }, - } - - routes := map[route.ID]*route.Route{ - routeID: { - ID: routeID, Network: netip.MustParsePrefix("192.168.10.0/24"), - Peer: peers["peer-75"].Key, - PeerID: "peer-75", - Description: "Route to internal resource", Enabled: true, - PeerGroups: []string{devGroupID, opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{devGroupID}, - }, - routeHA1ID: { - ID: routeHA1ID, Network: netip.MustParsePrefix("10.10.0.0/16"), - Peer: peers["peer-80"].Key, - PeerID: "peer-80", - Description: "HA Route 1", Enabled: true, Metric: 1000, - PeerGroups: []string{allGroupID}, - Groups: []string{allGroupID}, - AccessControlGroups: []string{allGroupID}, - }, - routeHA2ID: { - ID: routeHA2ID, Network: netip.MustParsePrefix("10.10.0.0/16"), - Peer: peers["peer-90"].Key, - PeerID: "peer-90", - Description: "HA Route 2", Enabled: true, Metric: 900, - PeerGroups: []string{devGroupID, opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{allGroupID}, - }, - } - - account := &Account{ - Id: testAccountID, Peers: peers, Groups: groups, Policies: policies, Routes: routes, - Network: &Network{ - Identifier: "net-comparison-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(16, 32)}, Serial: 1, - }, - DNSSettings: DNSSettings{DisabledManagementGroups: []string{opsGroupID}}, - NameServerGroups: map[string]*nbdns.NameServerGroup{ - nameserverGroupID: { - ID: nameserverGroupID, Name: "Main NS", Enabled: true, Groups: []string{devGroupID}, - NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("8.8.8.8"), NSType: nbdns.UDPNameServerType, Port: 53}}, - }, - }, - PostureChecks: []*posture.Checks{ - {ID: postureCheckID, Name: "Check version", Checks: posture.ChecksDefinition{ - NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.26.0"}, - }}, - }, - NetworkResources: []*resourceTypes.NetworkResource{ - {ID: networkResourceID, NetworkID: networkID, AccountID: testAccountID, Enabled: true, Address: "db.netbird.cloud"}, - }, - Networks: []*networkTypes.Network{{ID: networkID, Name: "DB Network", AccountID: testAccountID}}, - NetworkRouters: []*routerTypes.NetworkRouter{ - {ID: networkRouterID, NetworkID: networkID, Peer: routingPeerID, Enabled: true, AccountID: testAccountID}, - }, - Settings: &Settings{PeerLoginExpirationEnabled: true, PeerLoginExpiration: 1 * time.Hour}, - } - - for _, p := range account.Policies { - p.AccountID = account.Id - } - for _, r := range account.Routes { - r.AccountID = account.Id - } - - return account -} - -func BenchmarkLegacyNetworkMap(b *testing.B) { - account := createTestAccount() - ctx := context.Background() - peerID := testingPeerID - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - pid := fmt.Sprintf("peer-%d", i) - if pid != offlinePeerID { - validatedPeersMap[pid] = struct{}{} - } - } - - peersCustomZone := nbdns.CustomZone{} - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = account.GetPeerNetworkMap( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - nil, - groupIDToUserIDs, - ) - } -} - -func BenchmarkComponentsNetworkMap(b *testing.B) { - account := createTestAccount() - ctx := context.Background() - peerID := testingPeerID - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - pid := fmt.Sprintf("peer-%d", i) - if pid != offlinePeerID { - validatedPeersMap[pid] = struct{}{} - } - } - - peersCustomZone := nbdns.CustomZone{} - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - components := account.GetPeerNetworkMapComponents( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - groupIDToUserIDs, - ) - _ = CalculateNetworkMapFromComponents(ctx, components) - } -} - -func BenchmarkComponentsCreation(b *testing.B) { - account := createTestAccount() - ctx := context.Background() - peerID := testingPeerID - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - pid := fmt.Sprintf("peer-%d", i) - if pid != offlinePeerID { - validatedPeersMap[pid] = struct{}{} - } - } - - peersCustomZone := nbdns.CustomZone{} - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = account.GetPeerNetworkMapComponents( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - groupIDToUserIDs, - ) - } -} - -func BenchmarkCalculationFromComponents(b *testing.B) { - account := createTestAccount() - ctx := context.Background() - peerID := testingPeerID - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - pid := fmt.Sprintf("peer-%d", i) - if pid != offlinePeerID { - validatedPeersMap[pid] = struct{}{} - } - } - - peersCustomZone := nbdns.CustomZone{} - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - groupIDToUserIDs := account.GetActiveGroupUsers() - - components := account.GetPeerNetworkMapComponents( - ctx, - peerID, - peersCustomZone, - nil, - validatedPeersMap, - resourcePolicies, - routers, - groupIDToUserIDs, - ) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = CalculateNetworkMapFromComponents(ctx, components) - } -} diff --git a/management/server/types/networkmap_components.go b/management/server/types/networkmap_components.go index 23d84a994..6f84c8d30 100644 --- a/management/server/types/networkmap_components.go +++ b/management/server/types/networkmap_components.go @@ -19,8 +19,6 @@ import ( "github.com/netbirdio/netbird/shared/management/domain" ) -const EnvNewNetworkMapCompacted = "NB_NETWORK_MAP_COMPACTED" - type NetworkMapComponents struct { PeerID string diff --git a/management/server/types/networkmap_components_test.go b/management/server/types/networkmap_components_test.go new file mode 100644 index 000000000..dde639ccb --- /dev/null +++ b/management/server/types/networkmap_components_test.go @@ -0,0 +1,787 @@ +package types_test + +import ( + "context" + "net" + "net/netip" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + networkTypes "github.com/netbirdio/netbird/management/server/networks/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/posture" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/route" +) + +func networkMapFromComponents(t *testing.T, account *types.Account, peerID string, validatedPeers map[string]struct{}) *types.NetworkMap { + t.Helper() + return account.GetPeerNetworkMapFromComponents( + context.Background(), + peerID, + account.GetPeersCustomZone(context.Background(), "netbird.io"), + nil, + validatedPeers, + account.GetResourcePoliciesMap(), + account.GetResourceRoutersMap(), + nil, + account.GetActiveGroupUsers(), + ) +} + +func allPeersValidated(account *types.Account, excludePeerIDs ...string) map[string]struct{} { + excludeSet := make(map[string]struct{}, len(excludePeerIDs)) + for _, id := range excludePeerIDs { + excludeSet[id] = struct{}{} + } + validated := make(map[string]struct{}, len(account.Peers)) + for id := range account.Peers { + if _, excluded := excludeSet[id]; !excluded { + validated[id] = struct{}{} + } + } + return validated +} + +func peerIDs(peers []*nbpeer.Peer) []string { + ids := make([]string, len(peers)) + for i, p := range peers { + ids[i] = p.ID + } + return ids +} + +func TestNetworkMapComponents_RegularPeerConnectivity(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + assert.NotNil(t, nm) + assert.Contains(t, peerIDs(nm.Peers), "peer-dst-1", "should see peer from destination group via bidirectional policy") + assert.Contains(t, peerIDs(nm.Peers), "peer-router-1", "should see router peer via resource policy") + assert.NotContains(t, peerIDs(nm.Peers), "peer-src-1", "should not see itself") + assert.Empty(t, nm.OfflinePeers, "no expired peers expected") +} + +func TestNetworkMapComponents_IntraGroupConnectivity(t *testing.T) { + account := createComponentTestAccount() + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-intra-src", Name: "Intra-source connectivity", Enabled: true, AccountID: account.Id, + Rules: []*types.PolicyRule{{ + ID: "rule-intra-src", Name: "src <-> src", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Bidirectional: true, + Sources: []string{"group-src"}, Destinations: []string{"group-src"}, + }}, + }) + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + assert.Contains(t, peerIDs(nm.Peers), "peer-src-2", "should see peer from same group with intra-group policy") +} + +func TestNetworkMapComponents_FirewallRules(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + require.NotEmpty(t, nm.FirewallRules, "firewall rules should be generated") + + var hasAcceptAll bool + for _, rule := range nm.FirewallRules { + if rule.Protocol == string(types.PolicyRuleProtocolALL) && rule.Action == string(types.PolicyTrafficActionAccept) { + hasAcceptAll = true + } + } + assert.True(t, hasAcceptAll, "should have an accept-all firewall rule from the base policy") +} + +func TestNetworkMapComponents_LoginExpiration(t *testing.T) { + account := createComponentTestAccount() + account.Settings.PeerLoginExpirationEnabled = true + account.Settings.PeerLoginExpiration = 1 * time.Hour + + expiredTime := time.Now().Add(-2 * time.Hour) + account.Peers["peer-dst-1"].LoginExpirationEnabled = true + account.Peers["peer-dst-1"].LastLogin = &expiredTime + + validated := allPeersValidated(account) + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + assert.Contains(t, peerIDs(nm.OfflinePeers), "peer-dst-1", "expired peer should be in OfflinePeers") + assert.NotContains(t, peerIDs(nm.Peers), "peer-dst-1", "expired peer should NOT be in active Peers") +} + +func TestNetworkMapComponents_InvalidatedPeerExcluded(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account, "peer-dst-1") + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + assert.NotContains(t, peerIDs(nm.Peers), "peer-dst-1", "non-validated peer should be excluded") + assert.NotContains(t, peerIDs(nm.OfflinePeers), "peer-dst-1", "non-validated peer should not be in offline peers either") +} + +func TestNetworkMapComponents_NonValidatedTargetPeer(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account, "peer-src-1") + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + assert.Empty(t, nm.Peers, "non-validated target peer should get empty network map") + assert.Empty(t, nm.FirewallRules) +} + +func TestNetworkMapComponents_NetworkResourceRoutes_SourcePeer(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var hasResourceRoute bool + for _, r := range nm.Routes { + if r.Network.String() == "10.200.0.1/32" { + hasResourceRoute = true + break + } + } + assert.True(t, hasResourceRoute, "source peer should receive route to network resource via router") + assert.Contains(t, peerIDs(nm.Peers), "peer-router-1", "source peer should see the routing peer") +} + +func TestNetworkMapComponents_NetworkResourceRoutes_RouterPeer(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-router-1", validated) + + var hasResourceRoute bool + for _, r := range nm.Routes { + if r.Network.String() == "10.200.0.1/32" { + hasResourceRoute = true + break + } + } + assert.True(t, hasResourceRoute, "router peer should receive network resource route") + assert.NotEmpty(t, nm.RoutesFirewallRules, "router peer should have route firewall rules for the resource") +} + +func TestNetworkMapComponents_NetworkResourceRoutes_UnrelatedPeer(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-dst-1", validated) + + for _, r := range nm.Routes { + assert.NotEqual(t, "10.200.0.1/32", r.Network.String(), "unrelated peer should not receive network resource route") + } +} + +func TestNetworkMapComponents_NetworkResource_WithPostureCheck(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.PostureChecks = []*posture.Checks{ + {ID: "pc-version", Name: "Version check", Checks: posture.ChecksDefinition{ + NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.30.0"}, + }}, + } + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-posture-resource", Name: "Posture resource access", Enabled: true, AccountID: account.Id, + SourcePostureChecks: []string{"pc-version"}, + Rules: []*types.PolicyRule{{ + ID: "rule-posture-resource", Name: "Posture -> Resource", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Sources: []string{"group-src"}, + DestinationResource: types.Resource{ID: "resource-guarded"}, + }}, + }) + + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: "resource-guarded", NetworkID: "net-guarded", AccountID: account.Id, Enabled: true, + Type: resourceTypes.Host, Prefix: netip.MustParsePrefix("10.200.1.1/32"), Address: "10.200.1.1/32", + }) + account.Networks = append(account.Networks, &networkTypes.Network{ + ID: "net-guarded", Name: "Guarded Net", AccountID: account.Id, + }) + account.NetworkRouters = append(account.NetworkRouters, &routerTypes.NetworkRouter{ + ID: "router-guarded", NetworkID: "net-guarded", Peer: "peer-router-1", Enabled: true, AccountID: account.Id, + }) + + t.Run("peer passes posture check", func(t *testing.T) { + account.Peers["peer-src-1"].Meta.WtVersion = "0.35.0" + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var hasGuardedRoute bool + for _, r := range nm.Routes { + if r.Network.String() == "10.200.1.1/32" { + hasGuardedRoute = true + } + } + assert.True(t, hasGuardedRoute, "peer passing posture check should get guarded resource route") + }) + + t.Run("peer fails posture check", func(t *testing.T) { + account.Peers["peer-src-1"].Meta.WtVersion = "0.20.0" + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + for _, r := range nm.Routes { + assert.NotEqual(t, "10.200.1.1/32", r.Network.String(), "peer failing posture check should NOT get guarded resource route") + } + }) +} + +func TestNetworkMapComponents_NetworkResource_MultiplePostureChecks(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.PostureChecks = []*posture.Checks{ + {ID: "pc-version", Name: "Version", Checks: posture.ChecksDefinition{ + NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.30.0"}, + }}, + {ID: "pc-os", Name: "OS check", Checks: posture.ChecksDefinition{ + OSVersionCheck: &posture.OSVersionCheck{Linux: &posture.MinKernelVersionCheck{MinKernelVersion: "5.0"}}, + }}, + } + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-multi-posture", Name: "Multi posture", Enabled: true, AccountID: account.Id, + SourcePostureChecks: []string{"pc-version", "pc-os"}, + Rules: []*types.PolicyRule{{ + ID: "rule-multi-posture", Name: "Multi posture rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Sources: []string{"group-src"}, + DestinationResource: types.Resource{ID: "resource-strict"}, + }}, + }) + + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: "resource-strict", NetworkID: "net-strict", AccountID: account.Id, Enabled: true, + Type: resourceTypes.Host, Prefix: netip.MustParsePrefix("10.200.2.1/32"), Address: "10.200.2.1/32", + }) + account.Networks = append(account.Networks, &networkTypes.Network{ + ID: "net-strict", Name: "Strict Net", AccountID: account.Id, + }) + account.NetworkRouters = append(account.NetworkRouters, &routerTypes.NetworkRouter{ + ID: "router-strict", NetworkID: "net-strict", Peer: "peer-router-1", Enabled: true, AccountID: account.Id, + }) + + t.Run("passes both posture checks", func(t *testing.T) { + account.Peers["peer-src-1"].Meta.WtVersion = "0.35.0" + account.Peers["peer-src-1"].Meta.GoOS = "linux" + account.Peers["peer-src-1"].Meta.KernelVersion = "6.1.0" + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var found bool + for _, r := range nm.Routes { + if r.Network.String() == "10.200.2.1/32" { + found = true + } + } + assert.True(t, found, "peer passing both checks should get resource route") + }) + + t.Run("fails version posture check", func(t *testing.T) { + account.Peers["peer-src-1"].Meta.WtVersion = "0.20.0" + account.Peers["peer-src-1"].Meta.KernelVersion = "6.1.0" + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + for _, r := range nm.Routes { + assert.NotEqual(t, "10.200.2.1/32", r.Network.String(), "peer failing version check should NOT get resource route") + } + }) + + t.Run("fails OS posture check", func(t *testing.T) { + account.Peers["peer-src-1"].Meta.WtVersion = "0.35.0" + account.Peers["peer-src-1"].Meta.KernelVersion = "4.0.0" + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + for _, r := range nm.Routes { + assert.NotEqual(t, "10.200.2.1/32", r.Network.String(), "peer failing OS check should NOT get resource route") + } + }) +} + +func TestNetworkMapComponents_RouterPeerFirewallRules(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-router-1", validated) + + var resourceFWRules []*types.RouteFirewallRule + for _, rule := range nm.RoutesFirewallRules { + if rule.Destination == "10.200.0.1/32" { + resourceFWRules = append(resourceFWRules, rule) + } + } + assert.NotEmpty(t, resourceFWRules, "router should have firewall rules for the network resource") + + var hasSourcePeerIP bool + for _, rule := range resourceFWRules { + for _, sr := range rule.SourceRanges { + if sr == account.Peers["peer-src-1"].IP.String()+"/32" || sr == account.Peers["peer-src-2"].IP.String()+"/32" { + hasSourcePeerIP = true + } + } + } + assert.True(t, hasSourcePeerIP, "resource firewall rules should include source peer IPs") +} + +func TestNetworkMapComponents_DNSManagement(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + t.Run("peer in DNS-enabled group", func(t *testing.T) { + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + assert.True(t, nm.DNSConfig.ServiceEnable, "peer in non-disabled group should have DNS enabled") + }) + + t.Run("peer in DNS-disabled group", func(t *testing.T) { + nm := networkMapFromComponents(t, account, "peer-dst-1", validated) + assert.False(t, nm.DNSConfig.ServiceEnable, "peer in DNS-disabled group should have DNS disabled") + }) +} + +func TestNetworkMapComponents_NameServerGroups(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + assert.True(t, nm.DNSConfig.ServiceEnable) + + var hasNSGroup bool + for _, ns := range nm.DNSConfig.NameServerGroups { + if ns.ID == "ns-main" { + hasNSGroup = true + } + } + assert.True(t, hasNSGroup, "peer in NS group should receive nameserver configuration") +} + +func TestNetworkMapComponents_RoutesWithHADeduplication(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.Routes["route-ha-1"] = &route.Route{ + ID: "route-ha-1", Network: netip.MustParsePrefix("172.16.0.0/16"), + Peer: account.Peers["peer-dst-1"].Key, PeerID: "peer-dst-1", + Enabled: true, Metric: 100, AccountID: account.Id, + Groups: []string{"group-src", "group-dst"}, PeerGroups: []string{"group-dst"}, + } + account.Routes["route-ha-2"] = &route.Route{ + ID: "route-ha-2", Network: netip.MustParsePrefix("172.16.0.0/16"), + Peer: account.Peers["peer-src-1"].Key, PeerID: "peer-src-1", + Enabled: true, Metric: 200, AccountID: account.Id, + Groups: []string{"group-src", "group-dst"}, PeerGroups: []string{"group-src"}, + } + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + haCount := 0 + for _, r := range nm.Routes { + if r.Network.String() == "172.16.0.0/16" { + haCount++ + } + } + assert.Equal(t, 1, haCount, "peer should only receive one route from HA group (not both, since it's a member of one)") +} + +func TestNetworkMapComponents_RoutesFirewallRulesForAccessControl(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.Routes["route-acl"] = &route.Route{ + ID: "route-acl", Network: netip.MustParsePrefix("192.168.100.0/24"), + Peer: account.Peers["peer-src-1"].Key, PeerID: "peer-src-1", + Enabled: true, Metric: 100, AccountID: account.Id, + Groups: []string{"group-dst"}, + PeerGroups: []string{"group-src"}, + AccessControlGroups: []string{"group-dst"}, + } + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var hasFWRule bool + for _, rule := range nm.RoutesFirewallRules { + if rule.Destination == "192.168.100.0/24" { + hasFWRule = true + } + } + assert.True(t, hasFWRule, "routing peer should have firewall rules for route with access control groups") +} + +func TestNetworkMapComponents_RoutesDefaultPermit(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.Routes["route-open"] = &route.Route{ + ID: "route-open", Network: netip.MustParsePrefix("10.99.0.0/16"), + Peer: account.Peers["peer-src-1"].Key, PeerID: "peer-src-1", + Enabled: true, Metric: 100, AccountID: account.Id, + Groups: []string{"group-src"}, + PeerGroups: []string{"group-src"}, + } + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var hasFWRule bool + for _, rule := range nm.RoutesFirewallRules { + if rule.Destination == "10.99.0.0/16" { + hasFWRule = true + } + } + assert.True(t, hasFWRule, "route without access control groups should have default permit firewall rules") +} + +func TestNetworkMapComponents_SSHAuthorizedUsers(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.Peers["peer-dst-1"].SSHEnabled = true + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-ssh", Name: "SSH Access", Enabled: true, AccountID: account.Id, + Rules: []*types.PolicyRule{{ + ID: "rule-ssh", Name: "SSH to dst", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Bidirectional: true, + Sources: []string{"group-src"}, Destinations: []string{"group-dst"}, + }}, + }) + + nm := networkMapFromComponents(t, account, "peer-dst-1", validated) + assert.True(t, nm.EnableSSH, "SSH-enabled peer with matching policy should have EnableSSH") +} + +func TestNetworkMapComponents_DisabledPolicyIgnored(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + for _, p := range account.Policies { + p.Enabled = false + } + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + assert.Empty(t, nm.Peers, "with all policies disabled, peer should see no other peers") + assert.Empty(t, nm.FirewallRules) +} + +func TestNetworkMapComponents_DisabledRouteIgnored(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + for _, r := range account.Routes { + r.Enabled = false + } + for _, r := range account.NetworkResources { + r.Enabled = false + } + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + assert.Empty(t, nm.Routes, "disabled routes should not appear in network map") +} + +func TestNetworkMapComponents_DisabledNetworkResourceIgnored(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + for _, r := range account.NetworkResources { + r.Enabled = false + } + + nm := networkMapFromComponents(t, account, "peer-router-1", validated) + + for _, r := range nm.Routes { + assert.NotEqual(t, "10.200.0.1/32", r.Network.String(), "disabled resource should not generate routes") + } +} + +func TestNetworkMapComponents_BidirectionalPolicy(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nmSrc := networkMapFromComponents(t, account, "peer-src-1", validated) + nmDst := networkMapFromComponents(t, account, "peer-dst-1", validated) + + assert.Contains(t, peerIDs(nmSrc.Peers), "peer-dst-1", "src should see dst via bidirectional policy") + assert.Contains(t, peerIDs(nmDst.Peers), "peer-src-1", "dst should see src via bidirectional policy") +} + +func TestNetworkMapComponents_DropPolicy(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-drop", Name: "Drop traffic", Enabled: true, AccountID: account.Id, + Rules: []*types.PolicyRule{{ + ID: "rule-drop", Name: "Drop src->dst", Enabled: true, + Action: types.PolicyTrafficActionDrop, Protocol: types.PolicyRuleProtocolTCP, + Ports: []string{"5432"}, + Sources: []string{"group-src"}, Destinations: []string{"group-dst"}, + }}, + }) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var hasDropRule bool + for _, rule := range nm.FirewallRules { + if rule.Action == string(types.PolicyTrafficActionDrop) && rule.Port == "5432" { + hasDropRule = true + } + } + assert.True(t, hasDropRule, "drop policy should generate drop firewall rule") +} + +func TestNetworkMapComponents_PortRangePolicy(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.Peers["peer-src-1"].Meta.WtVersion = "0.50.0" + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-range", Name: "Port range", Enabled: true, AccountID: account.Id, + Rules: []*types.PolicyRule{{ + ID: "rule-range", Name: "Range rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + PortRanges: []types.RulePortRange{{Start: 8080, End: 8090}}, + Sources: []string{"group-src"}, Destinations: []string{"group-dst"}, + }}, + }) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var hasRangeRule bool + for _, rule := range nm.FirewallRules { + if rule.PortRange.Start == 8080 && rule.PortRange.End == 8090 { + hasRangeRule = true + } + } + assert.True(t, hasRangeRule, "port range policy should generate corresponding firewall rule") +} + +func TestNetworkMapComponents_MultipleNetworkResources(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: "resource-2", NetworkID: "net-1", AccountID: account.Id, Enabled: true, + Type: resourceTypes.Host, Prefix: netip.MustParsePrefix("10.200.0.2/32"), Address: "10.200.0.2/32", + }) + account.Groups["group-res2"] = &types.Group{ID: "group-res2", Name: "Resource 2 Group", Peers: []string{"peer-src-1", "peer-src-2"}, + Resources: []types.Resource{{ID: "resource-2"}}, + } + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-res2", Name: "Resource 2 Policy", Enabled: true, AccountID: account.Id, + Rules: []*types.PolicyRule{{ + ID: "rule-res2", Name: "Access Resource 2", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Sources: []string{"group-src"}, + DestinationResource: types.Resource{ID: "resource-2"}, + }}, + }) + + nm := networkMapFromComponents(t, account, "peer-router-1", validated) + + resourceRouteCount := 0 + for _, r := range nm.Routes { + if r.Network.String() == "10.200.0.1/32" || r.Network.String() == "10.200.0.2/32" { + resourceRouteCount++ + } + } + assert.Equal(t, 2, resourceRouteCount, "router should have routes for both network resources") +} + +func TestNetworkMapComponents_DomainNetworkResource(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: "resource-domain", NetworkID: "net-1", AccountID: account.Id, Enabled: true, + Type: resourceTypes.Domain, Domain: "api.example.com", Address: "api.example.com", + }) + account.Groups["group-res-domain"] = &types.Group{ + ID: "group-res-domain", Name: "Domain Resource Group", + Resources: []types.Resource{{ID: "resource-domain"}}, + } + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-domain", Name: "Domain resource policy", Enabled: true, AccountID: account.Id, + Rules: []*types.PolicyRule{{ + ID: "rule-domain", Name: "Access domain resource", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Sources: []string{"group-src"}, + DestinationResource: types.Resource{ID: "resource-domain"}, + }}, + }) + + nm := networkMapFromComponents(t, account, "peer-src-1", validated) + + var hasDomainRoute bool + for _, r := range nm.Routes { + if r.NetworkType == route.DomainNetwork && len(r.Domains) > 0 && r.Domains[0].SafeString() == "api.example.com" { + hasDomainRoute = true + } + } + assert.True(t, hasDomainRoute, "source peer should receive domain route for domain network resource") +} + +func TestNetworkMapComponents_NetworkEmpty(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + nm := networkMapFromComponents(t, account, "nonexistent-peer", validated) + + assert.NotNil(t, nm) + assert.Empty(t, nm.Peers) + assert.Empty(t, nm.FirewallRules) + assert.NotNil(t, nm.Network) +} + +func TestNetworkMapComponents_RouterExcludesOtherNetworkRoutes(t *testing.T) { + account := createComponentTestAccount() + validated := allPeersValidated(account) + + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: "resource-other", NetworkID: "net-other", AccountID: account.Id, Enabled: true, + Type: resourceTypes.Host, Prefix: netip.MustParsePrefix("10.200.99.1/32"), Address: "10.200.99.1/32", + }) + account.Networks = append(account.Networks, &networkTypes.Network{ + ID: "net-other", Name: "Other Net", AccountID: account.Id, + }) + account.NetworkRouters = append(account.NetworkRouters, &routerTypes.NetworkRouter{ + ID: "router-other", NetworkID: "net-other", Peer: "peer-dst-1", Enabled: true, AccountID: account.Id, + }) + account.Groups["group-res-other"] = &types.Group{ID: "group-res-other", Name: "Other resource group", + Resources: []types.Resource{{ID: "resource-other"}}, + } + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-other-resource", Name: "Other resource policy", Enabled: true, AccountID: account.Id, + Rules: []*types.PolicyRule{{ + ID: "rule-other", Name: "Other resource access", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Sources: []string{"group-src"}, + DestinationResource: types.Resource{ID: "resource-other"}, + }}, + }) + + nm := networkMapFromComponents(t, account, "peer-router-1", validated) + + for _, r := range nm.Routes { + assert.NotEqual(t, "10.200.99.1/32", r.Network.String(), "router-1 should NOT get routes for other network's resources") + } +} + +func createComponentTestAccount() *types.Account { + peers := map[string]*nbpeer.Peer{ + "peer-src-1": { + ID: "peer-src-1", IP: net.IP{100, 64, 0, 1}, Key: "key-src-1", DNSLabel: "src1", + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, UserID: "user-1", + Meta: nbpeer.PeerSystemMeta{WtVersion: "0.35.0", GoOS: "linux"}, + }, + "peer-src-2": { + ID: "peer-src-2", IP: net.IP{100, 64, 0, 2}, Key: "key-src-2", DNSLabel: "src2", + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, UserID: "user-1", + Meta: nbpeer.PeerSystemMeta{WtVersion: "0.35.0", GoOS: "linux"}, + }, + "peer-dst-1": { + ID: "peer-dst-1", IP: net.IP{100, 64, 0, 3}, Key: "key-dst-1", DNSLabel: "dst1", + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, UserID: "user-2", + Meta: nbpeer.PeerSystemMeta{WtVersion: "0.35.0", GoOS: "linux"}, + }, + "peer-router-1": { + ID: "peer-router-1", IP: net.IP{100, 64, 0, 10}, Key: "key-router-1", DNSLabel: "router1", + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, UserID: "user-1", + Meta: nbpeer.PeerSystemMeta{WtVersion: "0.35.0", GoOS: "linux"}, + }, + } + + groups := map[string]*types.Group{ + "group-src": {ID: "group-src", Name: "Sources", Peers: []string{"peer-src-1", "peer-src-2"}}, + "group-dst": {ID: "group-dst", Name: "Destinations", Peers: []string{"peer-dst-1"}}, + "group-all": {ID: "group-all", Name: "All", Peers: []string{"peer-src-1", "peer-src-2", "peer-dst-1", "peer-router-1"}}, + "group-res": { + ID: "group-res", Name: "Resource Group", + Resources: []types.Resource{{ID: "resource-1"}}, + }, + } + + policies := []*types.Policy{ + { + ID: "policy-base", Name: "Base connectivity", Enabled: true, + Rules: []*types.PolicyRule{{ + ID: "rule-base", Name: "Allow src <-> dst", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Bidirectional: true, + Sources: []string{"group-src"}, Destinations: []string{"group-dst"}, + }}, + }, + { + ID: "policy-resource", Name: "Network resource access", Enabled: true, + Rules: []*types.PolicyRule{{ + ID: "rule-resource", Name: "Source -> Resource", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Sources: []string{"group-src"}, + DestinationResource: types.Resource{ID: "resource-1"}, + }}, + }, + } + + routes := map[route.ID]*route.Route{ + "route-main": { + ID: "route-main", Network: netip.MustParsePrefix("192.168.10.0/24"), + Peer: peers["peer-dst-1"].Key, PeerID: "peer-dst-1", + Enabled: true, Metric: 100, + Groups: []string{"group-src", "group-dst"}, PeerGroups: []string{"group-dst"}, + }, + } + + users := map[string]*types.User{ + "user-1": {Id: "user-1", Role: types.UserRoleAdmin, IsServiceUser: false, AutoGroups: []string{"group-all"}}, + "user-2": {Id: "user-2", Role: types.UserRoleUser, IsServiceUser: false, AutoGroups: []string{"group-all"}}, + } + + account := &types.Account{ + Id: "account-components-test", Peers: peers, Groups: groups, Policies: policies, Routes: routes, + Users: users, + Network: &types.Network{ + Identifier: "net-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(16, 32)}, Serial: 1, + }, + DNSSettings: types.DNSSettings{DisabledManagementGroups: []string{"group-dst"}}, + NameServerGroups: map[string]*nbdns.NameServerGroup{ + "ns-main": { + ID: "ns-main", Name: "Main NS", Enabled: true, Groups: []string{"group-src"}, + NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("8.8.8.8"), NSType: nbdns.UDPNameServerType, Port: 53}}, + }, + }, + PostureChecks: []*posture.Checks{}, + NetworkResources: []*resourceTypes.NetworkResource{ + { + ID: "resource-1", NetworkID: "net-1", AccountID: "account-components-test", Enabled: true, + Type: resourceTypes.Host, Prefix: netip.MustParsePrefix("10.200.0.1/32"), Address: "10.200.0.1/32", + }, + }, + Networks: []*networkTypes.Network{ + {ID: "net-1", Name: "Resource Net", AccountID: "account-components-test"}, + }, + NetworkRouters: []*routerTypes.NetworkRouter{ + {ID: "router-1", NetworkID: "net-1", Peer: "peer-router-1", Enabled: true, AccountID: "account-components-test"}, + }, + Settings: &types.Settings{PeerLoginExpirationEnabled: false, PeerLoginExpiration: 24 * time.Hour}, + } + + for _, p := range account.Policies { + p.AccountID = account.Id + } + for _, r := range account.Routes { + r.AccountID = account.Id + } + + return account +} diff --git a/management/server/types/networkmap_golden_test.go b/management/server/types/networkmap_golden_test.go deleted file mode 100644 index 53261f22d..000000000 --- a/management/server/types/networkmap_golden_test.go +++ /dev/null @@ -1,967 +0,0 @@ -package types_test - -import ( - "context" - "encoding/json" - "fmt" - "net" - "net/netip" - "os" - "path/filepath" - "slices" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/zones" - resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" - routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" - networkTypes "github.com/netbirdio/netbird/management/server/networks/types" - nbpeer "github.com/netbirdio/netbird/management/server/peer" - "github.com/netbirdio/netbird/management/server/posture" - "github.com/netbirdio/netbird/management/server/types" - "github.com/netbirdio/netbird/route" -) - -const ( - numPeers = 100 - devGroupID = "group-dev" - opsGroupID = "group-ops" - allGroupID = "group-all" - sshUsersGroupID = "group-ssh-users" - routeID = route.ID("route-main") - routeHA1ID = route.ID("route-ha-1") - routeHA2ID = route.ID("route-ha-2") - policyIDDevOps = "policy-dev-ops" - policyIDAll = "policy-all" - policyIDPosture = "policy-posture" - policyIDDrop = "policy-drop" - policyIDSSH = "policy-ssh" - postureCheckID = "posture-check-ver" - networkResourceID = "res-database" - networkID = "net-database" - networkRouterID = "router-database" - nameserverGroupID = "ns-group-main" - testingPeerID = "peer-60" // A peer from the "dev" group, should receive the most detailed map. - expiredPeerID = "peer-98" // This peer will be online but with an expired session. - offlinePeerID = "peer-99" // This peer will be completely offline. - routingPeerID = "peer-95" // This peer is used for routing, it has a route to the network. - testAccountID = "account-golden-test" - userAdminID = "user-admin" - userDevID = "user-dev" - userOpsID = "user-ops" -) - -func TestGetPeerNetworkMap_Golden(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) - normalizeAndSortNetworkMap(legacyNetworkMap) - legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") - require.NoError(t, err, "error marshaling legacy network map to JSON") - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - normalizeAndSortNetworkMap(newNetworkMap) - newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") - require.NoError(t, err, "error marshaling new network map to JSON") - - if string(legacyJSON) != string(newJSON) { - legacyFilePath := filepath.Join("testdata", "networkmap_golden.json") - newFilePath := filepath.Join("testdata", "networkmap_golden_new.json") - - err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) - require.NoError(t, err) - - err = os.WriteFile(legacyFilePath, legacyJSON, 0644) - require.NoError(t, err) - t.Logf("Saved legacy network map to %s", legacyFilePath) - - err = os.WriteFile(newFilePath, newJSON, 0644) - require.NoError(t, err) - t.Logf("Saved new network map to %s", newFilePath) - - require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps from legacy and new builder do not match") - } -} - -func BenchmarkGetPeerNetworkMap(b *testing.B) { - account := createTestAccountWithEntities() - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - var peerIDs []string - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - validatedPeersMap[peerID] = struct{}{} - peerIDs = append(peerIDs, peerID) - } - - b.ResetTimer() - b.Run("old builder", func(b *testing.B) { - for range b.N { - for _, peerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) - } - } - }) - b.ResetTimer() - b.Run("new builder", func(b *testing.B) { - for range b.N { - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - for _, peerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, peerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - } - } - }) -} - -func TestGetPeerNetworkMap_Golden_WithNewPeer(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - newPeerID := "peer-new-101" - newPeerIP := net.IP{100, 64, 1, 1} - newPeer := &nbpeer.Peer{ - ID: newPeerID, - IP: newPeerIP, - Key: fmt.Sprintf("key-%s", newPeerID), - DNSLabel: "peernew101", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", - Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, - LastLogin: func() *time.Time { t := time.Now(); return &t }(), - } - - account.Peers[newPeerID] = newPeer - - if devGroup, exists := account.Groups[devGroupID]; exists { - devGroup.Peers = append(devGroup.Peers, newPeerID) - } - - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = append(allGroup.Peers, newPeerID) - } - - validatedPeersMap[newPeerID] = struct{}{} - - if account.Network != nil { - account.Network.Serial++ - } - - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) - normalizeAndSortNetworkMap(legacyNetworkMap) - legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") - require.NoError(t, err, "error marshaling legacy network map to JSON") - - err = builder.OnPeerAddedIncremental(account, newPeerID) - require.NoError(t, err, "error adding peer to cache") - - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - normalizeAndSortNetworkMap(newNetworkMap) - newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") - require.NoError(t, err, "error marshaling new network map to JSON") - - if string(legacyJSON) != string(newJSON) { - legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_new_peer.json") - newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded.json") - - err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) - require.NoError(t, err) - - err = os.WriteFile(legacyFilePath, legacyJSON, 0644) - require.NoError(t, err) - t.Logf("Saved legacy network map to %s", legacyFilePath) - - err = os.WriteFile(newFilePath, newJSON, 0644) - require.NoError(t, err) - t.Logf("Saved new network map to %s", newFilePath) - - require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with new peer from legacy and new builder do not match") - } -} - -func BenchmarkGetPeerNetworkMap_AfterPeerAdded(b *testing.B) { - account := createTestAccountWithEntities() - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - var peerIDs []string - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - validatedPeersMap[peerID] = struct{}{} - peerIDs = append(peerIDs, peerID) - } - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - newPeerID := "peer-new-101" - newPeer := &nbpeer.Peer{ - ID: newPeerID, - IP: net.IP{100, 64, 1, 1}, - Key: fmt.Sprintf("key-%s", newPeerID), - DNSLabel: "peernew101", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", - Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, - } - - account.Peers[newPeerID] = newPeer - account.Groups[devGroupID].Peers = append(account.Groups[devGroupID].Peers, newPeerID) - account.Groups[allGroupID].Peers = append(account.Groups[allGroupID].Peers, newPeerID) - validatedPeersMap[newPeerID] = struct{}{} - - b.ResetTimer() - b.Run("old builder after add", func(b *testing.B) { - for i := 0; i < b.N; i++ { - for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) - } - } - }) - - b.ResetTimer() - b.Run("new builder after add", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = builder.OnPeerAddedIncremental(account, newPeerID) - for _, testingPeerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - } - } - }) -} - -func TestGetPeerNetworkMap_Golden_WithNewRoutingPeer(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - newRouterID := "peer-new-router-102" - newRouterIP := net.IP{100, 64, 1, 2} - newRouter := &nbpeer.Peer{ - ID: newRouterID, - IP: newRouterIP, - Key: fmt.Sprintf("key-%s", newRouterID), - DNSLabel: "newrouter102", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", - Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, - LastLogin: func() *time.Time { t := time.Now(); return &t }(), - } - - account.Peers[newRouterID] = newRouter - - if opsGroup, exists := account.Groups[opsGroupID]; exists { - opsGroup.Peers = append(opsGroup.Peers, newRouterID) - } - - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = append(allGroup.Peers, newRouterID) - } - - newRoute := &route.Route{ - ID: route.ID("route-new-router"), - Network: netip.MustParsePrefix("172.16.0.0/24"), - Peer: newRouter.Key, - PeerID: newRouterID, - Description: "Route from new router", - Enabled: true, - PeerGroups: []string{opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{devGroupID}, - AccountID: account.Id, - } - account.Routes[newRoute.ID] = newRoute - - validatedPeersMap[newRouterID] = struct{}{} - - if account.Network != nil { - account.Network.Serial++ - } - - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) - normalizeAndSortNetworkMap(legacyNetworkMap) - legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") - require.NoError(t, err, "error marshaling legacy network map to JSON") - - err = builder.OnPeerAddedIncremental(account, newRouterID) - require.NoError(t, err, "error adding router to cache") - - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - normalizeAndSortNetworkMap(newNetworkMap) - newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") - require.NoError(t, err, "error marshaling new network map to JSON") - - if string(legacyJSON) != string(newJSON) { - legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_new_router.json") - newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded_router.json") - - err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) - require.NoError(t, err) - - err = os.WriteFile(legacyFilePath, legacyJSON, 0644) - require.NoError(t, err) - t.Logf("Saved legacy network map to %s", legacyFilePath) - - err = os.WriteFile(newFilePath, newJSON, 0644) - require.NoError(t, err) - t.Logf("Saved new network map to %s", newFilePath) - - require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with new router from legacy and new builder do not match") - } -} - -func BenchmarkGetPeerNetworkMap_AfterRouterPeerAdded(b *testing.B) { - account := createTestAccountWithEntities() - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - var peerIDs []string - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - validatedPeersMap[peerID] = struct{}{} - peerIDs = append(peerIDs, peerID) - } - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - newRouterID := "peer-new-router-102" - newRouterIP := net.IP{100, 64, 1, 2} - newRouter := &nbpeer.Peer{ - ID: newRouterID, - IP: newRouterIP, - Key: fmt.Sprintf("key-%s", newRouterID), - DNSLabel: "newrouter102", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", - Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, - LastLogin: func() *time.Time { t := time.Now(); return &t }(), - } - - account.Peers[newRouterID] = newRouter - - if opsGroup, exists := account.Groups[opsGroupID]; exists { - opsGroup.Peers = append(opsGroup.Peers, newRouterID) - } - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = append(allGroup.Peers, newRouterID) - } - - newRoute := &route.Route{ - ID: route.ID("route-new-router"), - Network: netip.MustParsePrefix("172.16.0.0/24"), - Peer: newRouter.Key, - PeerID: newRouterID, - Description: "Route from new router", - Enabled: true, - PeerGroups: []string{opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{devGroupID}, - AccountID: account.Id, - } - account.Routes[newRoute.ID] = newRoute - - validatedPeersMap[newRouterID] = struct{}{} - - b.ResetTimer() - b.Run("old builder after add", func(b *testing.B) { - for i := 0; i < b.N; i++ { - for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) - } - } - }) - - b.ResetTimer() - b.Run("new builder after add", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = builder.OnPeerAddedIncremental(account, newRouterID) - for _, testingPeerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - } - } - }) -} - -func TestGetPeerNetworkMap_Golden_WithDeletedPeer(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - deletedPeerID := "peer-25" - - delete(account.Peers, deletedPeerID) - - if devGroup, exists := account.Groups[devGroupID]; exists { - devGroup.Peers = slices.DeleteFunc(devGroup.Peers, func(id string) bool { - return id == deletedPeerID - }) - } - - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = slices.DeleteFunc(allGroup.Peers, func(id string) bool { - return id == deletedPeerID - }) - } - - delete(validatedPeersMap, deletedPeerID) - - if account.Network != nil { - account.Network.Serial++ - } - - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) - normalizeAndSortNetworkMap(legacyNetworkMap) - legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") - require.NoError(t, err, "error marshaling legacy network map to JSON") - - err = builder.OnPeerDeleted(account, deletedPeerID) - require.NoError(t, err, "error deleting peer from cache") - - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - normalizeAndSortNetworkMap(newNetworkMap) - newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") - require.NoError(t, err, "error marshaling new network map to JSON") - - if string(legacyJSON) != string(newJSON) { - legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_deleted_peer.json") - newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeerdeleted.json") - - err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) - require.NoError(t, err) - - err = os.WriteFile(legacyFilePath, legacyJSON, 0644) - require.NoError(t, err) - t.Logf("Saved legacy network map to %s", legacyFilePath) - - err = os.WriteFile(newFilePath, newJSON, 0644) - require.NoError(t, err) - t.Logf("Saved new network map to %s", newFilePath) - - require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with deleted peer from legacy and new builder do not match") - } -} - -func TestGetPeerNetworkMap_Golden_WithDeletedRouterPeer(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - deletedRouterID := "peer-75" - - var affectedRoute *route.Route - for _, r := range account.Routes { - if r.PeerID == deletedRouterID { - affectedRoute = r - break - } - } - require.NotNil(t, affectedRoute, "Router peer should have a route") - - for _, group := range account.Groups { - group.Peers = slices.DeleteFunc(group.Peers, func(id string) bool { - return id == deletedRouterID - }) - } - - for routeID, r := range account.Routes { - if r.Peer == account.Peers[deletedRouterID].Key || r.PeerID == deletedRouterID { - delete(account.Routes, routeID) - } - } - delete(account.Peers, deletedRouterID) - delete(validatedPeersMap, deletedRouterID) - - if account.Network != nil { - account.Network.Serial++ - } - - resourcePolicies := account.GetResourcePoliciesMap() - routers := account.GetResourceRoutersMap() - - legacyNetworkMap := account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, resourcePolicies, routers, nil, account.GetActiveGroupUsers()) - normalizeAndSortNetworkMap(legacyNetworkMap) - legacyJSON, err := json.MarshalIndent(toNetworkMapJSON(legacyNetworkMap), "", " ") - require.NoError(t, err, "error marshaling legacy network map to JSON") - - err = builder.OnPeerDeleted(account, deletedRouterID) - require.NoError(t, err, "error deleting routing peer from cache") - - newNetworkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - normalizeAndSortNetworkMap(newNetworkMap) - newJSON, err := json.MarshalIndent(toNetworkMapJSON(newNetworkMap), "", " ") - require.NoError(t, err, "error marshaling new network map to JSON") - - if string(legacyJSON) != string(newJSON) { - legacyFilePath := filepath.Join("testdata", "networkmap_golden_with_deleted_router_peer.json") - newFilePath := filepath.Join("testdata", "networkmap_golden_new_with_deleted_router.json") - - err = os.MkdirAll(filepath.Dir(legacyFilePath), 0755) - require.NoError(t, err) - - err = os.WriteFile(legacyFilePath, legacyJSON, 0644) - require.NoError(t, err) - t.Logf("Saved legacy network map to %s", legacyFilePath) - - err = os.WriteFile(newFilePath, newJSON, 0644) - require.NoError(t, err) - t.Logf("Saved new network map to %s", newFilePath) - - require.JSONEq(t, string(legacyJSON), string(newJSON), "network maps with deleted router from legacy and new builder do not match") - } -} - -func BenchmarkGetPeerNetworkMap_AfterPeerDeleted(b *testing.B) { - account := createTestAccountWithEntities() - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - var peerIDs []string - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - validatedPeersMap[peerID] = struct{}{} - peerIDs = append(peerIDs, peerID) - } - - deletedPeerID := "peer-25" - - delete(account.Peers, deletedPeerID) - account.Groups[devGroupID].Peers = slices.DeleteFunc(account.Groups[devGroupID].Peers, func(id string) bool { - return id == deletedPeerID - }) - account.Groups[allGroupID].Peers = slices.DeleteFunc(account.Groups[allGroupID].Peers, func(id string) bool { - return id == deletedPeerID - }) - delete(validatedPeersMap, deletedPeerID) - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - b.ResetTimer() - b.Run("old builder after delete", func(b *testing.B) { - for i := 0; i < b.N; i++ { - for _, testingPeerID := range peerIDs { - _ = account.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, []*zones.Zone{}, validatedPeersMap, nil, nil, nil, account.GetActiveGroupUsers()) - } - } - }) - - b.ResetTimer() - b.Run("new builder after delete", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = builder.OnPeerDeleted(account, deletedPeerID) - for _, testingPeerID := range peerIDs { - _ = builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - } - } - }) -} - -func normalizeAndSortNetworkMap(networkMap *types.NetworkMap) { - for _, peer := range networkMap.Peers { - if peer.Status != nil { - peer.Status.LastSeen = time.Time{} - } - peer.LastLogin = &time.Time{} - } - for _, peer := range networkMap.OfflinePeers { - if peer.Status != nil { - peer.Status.LastSeen = time.Time{} - } - peer.LastLogin = &time.Time{} - } - - sort.Slice(networkMap.Peers, func(i, j int) bool { return networkMap.Peers[i].ID < networkMap.Peers[j].ID }) - sort.Slice(networkMap.OfflinePeers, func(i, j int) bool { return networkMap.OfflinePeers[i].ID < networkMap.OfflinePeers[j].ID }) - sort.Slice(networkMap.Routes, func(i, j int) bool { return networkMap.Routes[i].ID < networkMap.Routes[j].ID }) - - sort.Slice(networkMap.FirewallRules, func(i, j int) bool { - r1, r2 := networkMap.FirewallRules[i], networkMap.FirewallRules[j] - if r1.PeerIP != r2.PeerIP { - return r1.PeerIP < r2.PeerIP - } - if r1.Protocol != r2.Protocol { - return r1.Protocol < r2.Protocol - } - if r1.Direction != r2.Direction { - return r1.Direction < r2.Direction - } - if r1.Action != r2.Action { - return r1.Action < r2.Action - } - return r1.Port < r2.Port - }) - - sort.Slice(networkMap.RoutesFirewallRules, func(i, j int) bool { - r1, r2 := networkMap.RoutesFirewallRules[i], networkMap.RoutesFirewallRules[j] - if r1.RouteID != r2.RouteID { - return r1.RouteID < r2.RouteID - } - if r1.Action != r2.Action { - return r1.Action < r2.Action - } - if r1.Destination != r2.Destination { - return r1.Destination < r2.Destination - } - if len(r1.SourceRanges) > 0 && len(r2.SourceRanges) > 0 { - if r1.SourceRanges[0] != r2.SourceRanges[0] { - return r1.SourceRanges[0] < r2.SourceRanges[0] - } - } - return r1.Port < r2.Port - }) - - for _, ranges := range networkMap.RoutesFirewallRules { - sort.Slice(ranges.SourceRanges, func(i, j int) bool { - return ranges.SourceRanges[i] < ranges.SourceRanges[j] - }) - } -} - -type networkMapJSON struct { - Peers []*nbpeer.Peer `json:"Peers"` - Network *types.Network `json:"Network"` - Routes []*route.Route `json:"Routes"` - DNSConfig dns.Config `json:"DNSConfig"` - OfflinePeers []*nbpeer.Peer `json:"OfflinePeers"` - FirewallRules []*types.FirewallRule `json:"FirewallRules"` - RoutesFirewallRules []*types.RouteFirewallRule `json:"RoutesFirewallRules"` - ForwardingRules []*types.ForwardingRule `json:"ForwardingRules"` - AuthorizedUsers map[string][]string `json:"AuthorizedUsers,omitempty"` - EnableSSH bool `json:"EnableSSH"` -} - -func toNetworkMapJSON(nm *types.NetworkMap) *networkMapJSON { - result := &networkMapJSON{ - Peers: nm.Peers, - Network: nm.Network, - Routes: nm.Routes, - DNSConfig: nm.DNSConfig, - OfflinePeers: nm.OfflinePeers, - FirewallRules: nm.FirewallRules, - RoutesFirewallRules: nm.RoutesFirewallRules, - ForwardingRules: nm.ForwardingRules, - EnableSSH: nm.EnableSSH, - } - - if len(nm.AuthorizedUsers) > 0 { - result.AuthorizedUsers = make(map[string][]string) - localUsers := make([]string, 0, len(nm.AuthorizedUsers)) - for localUser := range nm.AuthorizedUsers { - localUsers = append(localUsers, localUser) - } - sort.Strings(localUsers) - - for _, localUser := range localUsers { - userIDs := nm.AuthorizedUsers[localUser] - sortedUserIDs := make([]string, 0, len(userIDs)) - for userID := range userIDs { - sortedUserIDs = append(sortedUserIDs, userID) - } - sort.Strings(sortedUserIDs) - result.AuthorizedUsers[localUser] = sortedUserIDs - } - } - - return result -} - -func createTestAccountWithEntities() *types.Account { - peers := make(map[string]*nbpeer.Peer) - devGroupPeers, opsGroupPeers, allGroupPeers := []string{}, []string{}, []string{} - - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - ip := net.IP{100, 64, 0, byte(i + 1)} - wtVersion := "0.25.0" - if i%2 == 0 { - wtVersion = "0.40.0" - } - - p := &nbpeer.Peer{ - ID: peerID, IP: ip, Key: fmt.Sprintf("key-%s", peerID), DNSLabel: fmt.Sprintf("peer%d", i+1), - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", Meta: nbpeer.PeerSystemMeta{WtVersion: wtVersion, GoOS: "linux"}, - } - - if peerID == expiredPeerID { - p.LoginExpirationEnabled = true - pastTimestamp := time.Now().Add(-2 * time.Hour) - p.LastLogin = &pastTimestamp - } - - peers[peerID] = p - allGroupPeers = append(allGroupPeers, peerID) - if i < numPeers/2 { - devGroupPeers = append(devGroupPeers, peerID) - } else { - opsGroupPeers = append(opsGroupPeers, peerID) - } - - } - - groups := map[string]*types.Group{ - allGroupID: {ID: allGroupID, Name: "All", Peers: allGroupPeers}, - devGroupID: {ID: devGroupID, Name: "Developers", Peers: devGroupPeers}, - opsGroupID: {ID: opsGroupID, Name: "Operations", Peers: opsGroupPeers}, - sshUsersGroupID: {ID: sshUsersGroupID, Name: "SSH Users", Peers: []string{}}, - } - - policies := []*types.Policy{ - { - ID: policyIDAll, Name: "Default-Allow", Enabled: true, - Rules: []*types.PolicyRule{{ - ID: policyIDAll, Name: "Allow All", Enabled: true, Action: types.PolicyTrafficActionAccept, - Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, - Sources: []string{allGroupID}, Destinations: []string{allGroupID}, - }}, - }, - { - ID: policyIDDevOps, Name: "Dev to Ops Web Access", Enabled: true, - Rules: []*types.PolicyRule{{ - ID: policyIDDevOps, Name: "Dev -> Ops (HTTP Range)", Enabled: true, Action: types.PolicyTrafficActionAccept, - Protocol: types.PolicyRuleProtocolTCP, Bidirectional: false, - PortRanges: []types.RulePortRange{{Start: 8080, End: 8090}}, - Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, - }}, - }, - { - ID: policyIDDrop, Name: "Drop DB traffic", Enabled: true, - Rules: []*types.PolicyRule{{ - ID: policyIDDrop, Name: "Drop DB", Enabled: true, Action: types.PolicyTrafficActionDrop, - Protocol: types.PolicyRuleProtocolTCP, Ports: []string{"5432"}, Bidirectional: true, - Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, - }}, - }, - { - ID: policyIDPosture, Name: "Posture Check for DB Resource", Enabled: true, - SourcePostureChecks: []string{postureCheckID}, - Rules: []*types.PolicyRule{{ - ID: policyIDPosture, Name: "Allow DB Access", Enabled: true, Action: types.PolicyTrafficActionAccept, - Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, - Sources: []string{opsGroupID}, DestinationResource: types.Resource{ID: networkResourceID}, - }}, - }, - { - ID: policyIDSSH, Name: "SSH Access Policy", Enabled: true, - Rules: []*types.PolicyRule{{ - ID: policyIDSSH, Name: "Allow SSH to Ops", Enabled: true, Action: types.PolicyTrafficActionAccept, - Protocol: types.PolicyRuleProtocolNetbirdSSH, Bidirectional: false, - Sources: []string{devGroupID}, Destinations: []string{opsGroupID}, - AuthorizedGroups: map[string][]string{sshUsersGroupID: {"root", "admin"}}, - }}, - }, - } - - routes := map[route.ID]*route.Route{ - routeID: { - ID: routeID, Network: netip.MustParsePrefix("192.168.10.0/24"), - Peer: peers["peer-75"].Key, - PeerID: "peer-75", - Description: "Route to internal resource", Enabled: true, - PeerGroups: []string{devGroupID, opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{devGroupID}, - }, - routeHA1ID: { - ID: routeHA1ID, Network: netip.MustParsePrefix("10.10.0.0/16"), - Peer: peers["peer-80"].Key, - PeerID: "peer-80", - Description: "HA Route 1", Enabled: true, Metric: 1000, - PeerGroups: []string{allGroupID}, - Groups: []string{allGroupID}, - AccessControlGroups: []string{allGroupID}, - }, - routeHA2ID: { - ID: routeHA2ID, Network: netip.MustParsePrefix("10.10.0.0/16"), - Peer: peers["peer-90"].Key, - PeerID: "peer-90", - Description: "HA Route 2", Enabled: true, Metric: 900, - PeerGroups: []string{devGroupID, opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{allGroupID}, - }, - } - - users := map[string]*types.User{ - userAdminID: {Id: userAdminID, Role: types.UserRoleAdmin, IsServiceUser: false, AccountID: testAccountID, AutoGroups: []string{allGroupID}}, - userDevID: {Id: userDevID, Role: types.UserRoleUser, IsServiceUser: false, AccountID: testAccountID, AutoGroups: []string{sshUsersGroupID, devGroupID}}, - userOpsID: {Id: userOpsID, Role: types.UserRoleUser, IsServiceUser: false, AccountID: testAccountID, AutoGroups: []string{sshUsersGroupID, opsGroupID}}, - } - - account := &types.Account{ - Id: testAccountID, Peers: peers, Groups: groups, Policies: policies, Routes: routes, - Users: users, - Network: &types.Network{ - Identifier: "net-golden-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(16, 32)}, Serial: 1, - }, - DNSSettings: types.DNSSettings{DisabledManagementGroups: []string{opsGroupID}}, - NameServerGroups: map[string]*dns.NameServerGroup{ - nameserverGroupID: { - ID: nameserverGroupID, Name: "Main NS", Enabled: true, Groups: []string{devGroupID}, - NameServers: []dns.NameServer{{IP: netip.MustParseAddr("8.8.8.8"), NSType: dns.UDPNameServerType, Port: 53}}, - }, - }, - PostureChecks: []*posture.Checks{ - {ID: postureCheckID, Name: "Check version", Checks: posture.ChecksDefinition{ - NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.26.0"}, - }}, - }, - NetworkResources: []*resourceTypes.NetworkResource{ - {ID: networkResourceID, NetworkID: networkID, AccountID: testAccountID, Enabled: true, Address: "db.netbird.cloud"}, - }, - Networks: []*networkTypes.Network{{ID: networkID, Name: "DB Network", AccountID: testAccountID}}, - NetworkRouters: []*routerTypes.NetworkRouter{ - {ID: networkRouterID, NetworkID: networkID, Peer: routingPeerID, Enabled: true, AccountID: testAccountID}, - }, - Settings: &types.Settings{PeerLoginExpirationEnabled: true, PeerLoginExpiration: 1 * time.Hour}, - } - - for _, p := range account.Policies { - p.AccountID = account.Id - } - for _, r := range account.Routes { - r.AccountID = account.Id - } - - return account -} - -func TestGetPeerNetworkMap_Golden_New_WithOnPeerAddedRouter_Batched(t *testing.T) { - account := createTestAccountWithEntities() - - ctx := context.Background() - validatedPeersMap := make(map[string]struct{}) - for i := range numPeers { - peerID := fmt.Sprintf("peer-%d", i) - if peerID == offlinePeerID { - continue - } - validatedPeersMap[peerID] = struct{}{} - } - - builder := types.NewNetworkMapBuilder(account, validatedPeersMap) - - newRouterID := "peer-new-router-102" - newRouterIP := net.IP{100, 64, 1, 2} - newRouter := &nbpeer.Peer{ - ID: newRouterID, - IP: newRouterIP, - Key: fmt.Sprintf("key-%s", newRouterID), - DNSLabel: "newrouter102", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, - UserID: "user-admin", - Meta: nbpeer.PeerSystemMeta{WtVersion: "0.26.0", GoOS: "linux"}, - LastLogin: func() *time.Time { t := time.Now(); return &t }(), - } - - account.Peers[newRouterID] = newRouter - - if opsGroup, exists := account.Groups[opsGroupID]; exists { - opsGroup.Peers = append(opsGroup.Peers, newRouterID) - } - if allGroup, exists := account.Groups[allGroupID]; exists { - allGroup.Peers = append(allGroup.Peers, newRouterID) - } - - newRoute := &route.Route{ - ID: route.ID("route-new-router"), - Network: netip.MustParsePrefix("172.16.0.0/24"), - Peer: newRouter.Key, - PeerID: newRouterID, - Description: "Route from new router", - Enabled: true, - PeerGroups: []string{opsGroupID}, - Groups: []string{devGroupID, opsGroupID}, - AccessControlGroups: []string{devGroupID}, - AccountID: account.Id, - } - account.Routes[newRoute.ID] = newRoute - - validatedPeersMap[newRouterID] = struct{}{} - - if account.Network != nil { - account.Network.Serial++ - } - - builder.EnqueuePeersForIncrementalAdd(account, newRouterID) - - time.Sleep(100 * time.Millisecond) - - networkMap := builder.GetPeerNetworkMap(ctx, testingPeerID, dns.CustomZone{}, nil, validatedPeersMap, nil) - - normalizeAndSortNetworkMap(networkMap) - - jsonData, err := json.MarshalIndent(networkMap, "", " ") - require.NoError(t, err, "error marshaling network map to JSON") - - goldenFilePath := filepath.Join("testdata", "networkmap_golden_new_with_onpeeradded_router.json") - - t.Log("Update golden file with OnPeerAdded router...") - err = os.MkdirAll(filepath.Dir(goldenFilePath), 0755) - require.NoError(t, err) - err = os.WriteFile(goldenFilePath, jsonData, 0644) - require.NoError(t, err) - - expectedJSON, err := os.ReadFile(goldenFilePath) - require.NoError(t, err, "error reading golden file") - - require.JSONEq(t, string(expectedJSON), string(jsonData), "network map from NEW builder with OnPeerAdded router does not match golden file") -} diff --git a/management/server/types/networkmapbuilder.go b/management/server/types/networkmapbuilder.go deleted file mode 100644 index 6448b8403..000000000 --- a/management/server/types/networkmapbuilder.go +++ /dev/null @@ -1,2317 +0,0 @@ -package types - -import ( - "context" - "fmt" - "slices" - "strconv" - "strings" - "sync" - "time" - - log "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" - - "github.com/netbirdio/netbird/client/ssh/auth" - nbdns "github.com/netbirdio/netbird/dns" - "github.com/netbirdio/netbird/management/internals/modules/zones" - resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" - routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" - nbpeer "github.com/netbirdio/netbird/management/server/peer" - "github.com/netbirdio/netbird/management/server/telemetry" - "github.com/netbirdio/netbird/route" -) - -const ( - allPeers = "0.0.0.0" - allWildcard = "0.0.0.0/0" - v6AllWildcard = "::/0" - fw = "fw:" - rfw = "route-fw:" - - szAddPeerBatch = 10 - maxPeerAddRetries = 20 -) - -type NetworkMapCache struct { - globalRoutes map[route.ID]*route.Route - globalRules map[string]*FirewallRule //ruleId - globalRouteRules map[string]*RouteFirewallRule //ruleId - globalPeers map[string]*nbpeer.Peer - - groupToPeers map[string][]string - peerToGroups map[string][]string - policyToRules map[string][]*PolicyRule //policyId - groupToPolicies map[string][]*Policy - groupToRoutes map[string][]*route.Route - peerToRoutes map[string][]*route.Route - - peerACLs map[string]*PeerACLView - peerRoutes map[string]*PeerRoutesView - peerDNS map[string]*nbdns.Config - peerSSH map[string]*PeerSSHView - - groupIDToUserIDs map[string][]string - allowedUserIDs map[string]struct{} - - resourceRouters map[string]map[string]*routerTypes.NetworkRouter - resourcePolicies map[string][]*Policy - - globalResources map[string]*resourceTypes.NetworkResource // resourceId - - acgToRoutes map[string]map[route.ID]*RouteOwnerInfo // routeID -> owner info - noACGRoutes map[route.ID]*RouteOwnerInfo - - mu sync.RWMutex -} - -type RouteOwnerInfo struct { - PeerID string - RouteID route.ID -} - -type PeerACLView struct { - ConnectedPeerIDs []string - FirewallRuleIDs []string -} - -type PeerRoutesView struct { - OwnRouteIDs []route.ID - NetworkResourceIDs []route.ID - InheritedRouteIDs []route.ID - RouteFirewallRuleIDs []string -} - -type PeerSSHView struct { - EnableSSH bool - AuthorizedUsers map[string]map[string]struct{} -} - -type NetworkMapBuilder struct { - account *Account - cache *NetworkMapCache - validatedPeers map[string]struct{} - - apb addPeerBatch -} - -type addPeerBatch struct { - mu sync.Mutex - sg *sync.Cond - ids []string - la *Account - retryCount map[string]int -} - -func NewNetworkMapBuilder(account *Account, validatedPeers map[string]struct{}) *NetworkMapBuilder { - builder := &NetworkMapBuilder{ - cache: &NetworkMapCache{ - globalRoutes: make(map[route.ID]*route.Route), - globalRules: make(map[string]*FirewallRule), - globalRouteRules: make(map[string]*RouteFirewallRule), - globalPeers: make(map[string]*nbpeer.Peer), - groupToPeers: make(map[string][]string), - peerToGroups: make(map[string][]string), - policyToRules: make(map[string][]*PolicyRule), - groupToPolicies: make(map[string][]*Policy), - groupToRoutes: make(map[string][]*route.Route), - peerToRoutes: make(map[string][]*route.Route), - peerACLs: make(map[string]*PeerACLView), - peerRoutes: make(map[string]*PeerRoutesView), - peerDNS: make(map[string]*nbdns.Config), - peerSSH: make(map[string]*PeerSSHView), - groupIDToUserIDs: make(map[string][]string), - allowedUserIDs: make(map[string]struct{}), - globalResources: make(map[string]*resourceTypes.NetworkResource), - acgToRoutes: make(map[string]map[route.ID]*RouteOwnerInfo), - noACGRoutes: make(map[route.ID]*RouteOwnerInfo), - }, - validatedPeers: make(map[string]struct{}), - } - builder.apb.sg = sync.NewCond(&builder.apb.mu) - builder.apb.ids = make([]string, 0, szAddPeerBatch) - builder.apb.la = account - builder.apb.retryCount = make(map[string]int) - - maps.Copy(builder.validatedPeers, validatedPeers) - - builder.initialBuild(account) - - go builder.incAddPeerLoop() - return builder -} - -func (b *NetworkMapBuilder) initialBuild(account *Account) { - b.cache.mu.Lock() - defer b.cache.mu.Unlock() - - b.account = account - - start := time.Now() - - b.buildGlobalIndexes(account) - - resourceRouters := account.GetResourceRoutersMap() - resourcePolicies := account.GetResourcePoliciesMap() - b.cache.resourceRouters = resourceRouters - b.cache.resourcePolicies = resourcePolicies - - for peerID := range account.Peers { - b.buildPeerACLView(account, peerID) - b.buildPeerRoutesView(account, peerID) - b.buildPeerDNSView(account, peerID) - } - - log.Debugf("NetworkMapBuilder: Initial build completed in %v for account %s", time.Since(start), account.Id) -} - -func (b *NetworkMapBuilder) buildGlobalIndexes(account *Account) { - clear(b.cache.globalPeers) - clear(b.cache.groupToPeers) - clear(b.cache.peerToGroups) - clear(b.cache.policyToRules) - clear(b.cache.groupToPolicies) - clear(b.cache.globalRoutes) - clear(b.cache.globalRules) - clear(b.cache.globalRouteRules) - clear(b.cache.globalResources) - clear(b.cache.groupToRoutes) - clear(b.cache.peerToRoutes) - clear(b.cache.acgToRoutes) - clear(b.cache.noACGRoutes) - clear(b.cache.groupIDToUserIDs) - clear(b.cache.allowedUserIDs) - clear(b.cache.peerSSH) - - maps.Copy(b.cache.globalPeers, account.Peers) - - b.cache.groupIDToUserIDs = account.GetActiveGroupUsers() - b.cache.allowedUserIDs = b.buildAllowedUserIDs(account) - - for groupID, group := range account.Groups { - peersCopy := make([]string, len(group.Peers)) - copy(peersCopy, group.Peers) - b.cache.groupToPeers[groupID] = peersCopy - - for _, peerID := range group.Peers { - b.cache.peerToGroups[peerID] = append(b.cache.peerToGroups[peerID], groupID) - } - } - - for _, policy := range account.Policies { - if !policy.Enabled { - continue - } - - b.cache.policyToRules[policy.ID] = policy.Rules - - affectedGroups := make(map[string]struct{}) - for _, rule := range policy.Rules { - if !rule.Enabled { - continue - } - - for _, groupID := range rule.Sources { - affectedGroups[groupID] = struct{}{} - } - for _, groupID := range rule.Destinations { - affectedGroups[groupID] = struct{}{} - } - if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { - groupId := rule.SourceResource.ID - affectedGroups[groupId] = struct{}{} - b.cache.peerToGroups[rule.SourceResource.ID] = append(b.cache.peerToGroups[rule.SourceResource.ID], groupId) - } - if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID != "" { - groupId := rule.DestinationResource.ID - affectedGroups[groupId] = struct{}{} - b.cache.peerToGroups[rule.DestinationResource.ID] = append(b.cache.peerToGroups[rule.DestinationResource.ID], groupId) - } - } - - for groupID := range affectedGroups { - b.cache.groupToPolicies[groupID] = append(b.cache.groupToPolicies[groupID], policy) - } - } - - for _, resource := range account.NetworkResources { - if !resource.Enabled { - continue - } - b.cache.globalResources[resource.ID] = resource - } - - for _, r := range account.Routes { - if !r.Enabled { - continue - } - for _, groupID := range r.PeerGroups { - b.cache.groupToRoutes[groupID] = append(b.cache.groupToRoutes[groupID], r) - } - if r.Peer != "" { - if peer, ok := b.cache.globalPeers[r.Peer]; ok { - b.cache.peerToRoutes[peer.ID] = append(b.cache.peerToRoutes[peer.ID], r) - } - } - } -} - -func (b *NetworkMapBuilder) buildPeerACLView(account *Account, peerID string) { - peer := account.GetPeer(peerID) - if peer == nil { - return - } - - allPotentialPeers, firewallRules, authorizedUsers, sshEnabled := b.getPeerConnectionResources(account, peer, b.validatedPeers) - - isRouter, networkResourcesRoutes, sourcePeers := b.getNetworkResourcesForPeer(account, peer) - - var emptyExpiredPeers []*nbpeer.Peer - finalAllPeers := b.addNetworksRoutingPeers( - networkResourcesRoutes, - peer, - allPotentialPeers, - emptyExpiredPeers, - isRouter, - sourcePeers, - ) - - view := &PeerACLView{ - ConnectedPeerIDs: make([]string, 0, len(finalAllPeers)), - FirewallRuleIDs: make([]string, 0, len(firewallRules)), - } - - for _, p := range finalAllPeers { - view.ConnectedPeerIDs = append(view.ConnectedPeerIDs, p.ID) - } - - for _, rule := range firewallRules { - ruleID := b.generateFirewallRuleID(rule) - view.FirewallRuleIDs = append(view.FirewallRuleIDs, ruleID) - b.cache.globalRules[ruleID] = rule - } - - b.cache.peerACLs[peerID] = view - b.cache.peerSSH[peerID] = &PeerSSHView{ - EnableSSH: sshEnabled, - AuthorizedUsers: authorizedUsers, - } -} - -func (b *NetworkMapBuilder) getPeerConnectionResources(account *Account, peer *nbpeer.Peer, - validatedPeersMap map[string]struct{}, -) ([]*nbpeer.Peer, []*FirewallRule, map[string]map[string]struct{}, bool) { - peerID := peer.ID - ctx := context.Background() - - peerGroups := b.cache.peerToGroups[peerID] - peerGroupsMap := make(map[string]struct{}, len(peerGroups)) - for _, groupID := range peerGroups { - peerGroupsMap[groupID] = struct{}{} - } - - rulesExists := make(map[string]struct{}) - peersExists := make(map[string]struct{}) - fwRules := make([]*FirewallRule, 0) - peers := make([]*nbpeer.Peer, 0) - - authorizedUsers := make(map[string]map[string]struct{}) - sshEnabled := false - - for _, group := range peerGroups { - policies := b.cache.groupToPolicies[group] - for _, policy := range policies { - if isValid := account.validatePostureChecksOnPeer(ctx, policy.SourcePostureChecks, peerID); !isValid { - continue - } - rules := b.cache.policyToRules[policy.ID] - for _, rule := range rules { - var sourcePeers, destinationPeers []*nbpeer.Peer - var peerInSources, peerInDestinations bool - - if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { - peerInSources = rule.SourceResource.ID == peerID - } else { - peerInSources = b.isPeerInGroupscached(rule.Sources, peerGroupsMap) - } - - if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID != "" { - peerInDestinations = rule.DestinationResource.ID == peerID - } else { - peerInDestinations = b.isPeerInGroupscached(rule.Destinations, peerGroupsMap) - } - - if !peerInSources && !peerInDestinations { - continue - } - - if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { - peer := account.GetPeer(rule.SourceResource.ID) - if peer != nil { - sourcePeers = []*nbpeer.Peer{peer} - } - } else { - sourcePeers = b.getPeersFromGroupscached(account, rule.Sources, peerID, policy.SourcePostureChecks, validatedPeersMap) - } - - if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID != "" { - peer := account.GetPeer(rule.DestinationResource.ID) - if peer != nil { - destinationPeers = []*nbpeer.Peer{peer} - } - } else { - destinationPeers = b.getPeersFromGroupscached(account, rule.Destinations, peerID, nil, validatedPeersMap) - } - - if rule.Bidirectional { - if peerInSources { - b.generateResourcescached( - rule, destinationPeers, FirewallRuleDirectionIN, - peer, &peers, &fwRules, peersExists, rulesExists, - ) - } - if peerInDestinations { - b.generateResourcescached( - rule, sourcePeers, FirewallRuleDirectionOUT, - peer, &peers, &fwRules, peersExists, rulesExists, - ) - } - } - - if peerInSources { - b.generateResourcescached( - rule, destinationPeers, FirewallRuleDirectionOUT, - peer, &peers, &fwRules, peersExists, rulesExists, - ) - } - - if peerInDestinations { - b.generateResourcescached( - rule, sourcePeers, FirewallRuleDirectionIN, - peer, &peers, &fwRules, peersExists, rulesExists, - ) - - if rule.Protocol == PolicyRuleProtocolNetbirdSSH { - sshEnabled = true - switch { - case len(rule.AuthorizedGroups) > 0: - for groupID, localUsers := range rule.AuthorizedGroups { - userIDs, ok := b.cache.groupIDToUserIDs[groupID] - if !ok { - continue - } - - if len(localUsers) == 0 { - localUsers = []string{auth.Wildcard} - } - - for _, localUser := range localUsers { - if authorizedUsers[localUser] == nil { - authorizedUsers[localUser] = make(map[string]struct{}) - } - for _, userID := range userIDs { - authorizedUsers[localUser][userID] = struct{}{} - } - } - } - case rule.AuthorizedUser != "": - if authorizedUsers[auth.Wildcard] == nil { - authorizedUsers[auth.Wildcard] = make(map[string]struct{}) - } - authorizedUsers[auth.Wildcard][rule.AuthorizedUser] = struct{}{} - default: - authorizedUsers[auth.Wildcard] = maps.Clone(b.cache.allowedUserIDs) - } - } else if policyRuleImpliesLegacySSH(rule) && peer.SSHEnabled { - sshEnabled = true - authorizedUsers[auth.Wildcard] = maps.Clone(b.cache.allowedUserIDs) - } - } - } - } - } - - return peers, fwRules, authorizedUsers, sshEnabled -} - -func (b *NetworkMapBuilder) isPeerInGroupscached(groupIDs []string, peerGroupsMap map[string]struct{}) bool { - for _, groupID := range groupIDs { - if _, exists := peerGroupsMap[groupID]; exists { - return true - } - } - return false -} - -func (b *NetworkMapBuilder) getPeersFromGroupscached(account *Account, groupIDs []string, - excludePeerID string, postureChecksIDs []string, validatedPeersMap map[string]struct{}, -) []*nbpeer.Peer { - ctx := context.Background() - uniquePeers := make(map[string]*nbpeer.Peer) - - for _, groupID := range groupIDs { - peerIDs := b.cache.groupToPeers[groupID] - for _, peerID := range peerIDs { - if peerID == excludePeerID { - continue - } - - if _, ok := validatedPeersMap[peerID]; !ok { - continue - } - - peer := b.cache.globalPeers[peerID] - if peer == nil { - continue - } - - if len(postureChecksIDs) > 0 { - if !account.validatePostureChecksOnPeer(ctx, postureChecksIDs, peerID) { - continue - } - } - - uniquePeers[peerID] = peer - } - } - - result := make([]*nbpeer.Peer, 0, len(uniquePeers)) - for _, peer := range uniquePeers { - result = append(result, peer) - } - - return result -} - -func (b *NetworkMapBuilder) generateResourcescached( - rule *PolicyRule, groupPeers []*nbpeer.Peer, direction int, targetPeer *nbpeer.Peer, - peers *[]*nbpeer.Peer, rules *[]*FirewallRule, peersExists map[string]struct{}, rulesExists map[string]struct{}, -) { - for _, peer := range groupPeers { - if peer == nil { - continue - } - if _, ok := peersExists[peer.ID]; !ok { - *peers = append(*peers, peer) - peersExists[peer.ID] = struct{}{} - } - - fr := FirewallRule{ - PolicyID: rule.ID, - PeerIP: peer.IP.String(), - Direction: direction, - Action: string(rule.Action), - Protocol: firewallRuleProtocol(rule.Protocol), - } - - var s strings.Builder - s.WriteString(rule.ID) - s.WriteString(fr.PeerIP) - s.WriteString(strconv.Itoa(direction)) - s.WriteString(fr.Protocol) - s.WriteString(fr.Action) - s.WriteString(strings.Join(rule.Ports, ",")) - - ruleID := s.String() - - if _, ok := rulesExists[ruleID]; ok { - continue - } - rulesExists[ruleID] = struct{}{} - - if len(rule.Ports) == 0 && len(rule.PortRanges) == 0 { - *rules = append(*rules, &fr) - continue - } - - *rules = append(*rules, expandPortsAndRanges(fr, rule, targetPeer)...) - } -} - -func (b *NetworkMapBuilder) getNetworkResourcesForPeer(account *Account, peer *nbpeer.Peer) (bool, []*route.Route, map[string]struct{}) { - ctx := context.Background() - peerID := peer.ID - - var isRoutingPeer bool - var routes []*route.Route - allSourcePeers := make(map[string]struct{}) - - peerGroups := b.cache.peerToGroups[peerID] - peerGroupsMap := make(map[string]struct{}, len(peerGroups)) - for _, groupID := range peerGroups { - peerGroupsMap[groupID] = struct{}{} - } - - for _, resource := range b.cache.globalResources { - - networkRoutingPeers := b.cache.resourceRouters[resource.NetworkID] - resourcePolicies := b.cache.resourcePolicies[resource.ID] - if len(resourcePolicies) == 0 { - continue - } - - isRouterForThisResource := false - - if networkRoutingPeers != nil { - if router, ok := networkRoutingPeers[peerID]; ok && router.Enabled { - isRoutingPeer = true - isRouterForThisResource = true - if rt := b.createNetworkResourceRoutes(resource, peerID, router, resourcePolicies); rt != nil { - routes = append(routes, rt) - } - } - } - - hasAccessAsClient := false - if !isRouterForThisResource { - for _, policy := range resourcePolicies { - if b.isPeerInGroupscached(policy.SourceGroups(), peerGroupsMap) { - if account.validatePostureChecksOnPeer(ctx, policy.SourcePostureChecks, peerID) { - hasAccessAsClient = true - break - } - } - } - } - - if hasAccessAsClient && networkRoutingPeers != nil { - for routerPeerID, router := range networkRoutingPeers { - if router.Enabled { - if rt := b.createNetworkResourceRoutes(resource, routerPeerID, router, resourcePolicies); rt != nil { - routes = append(routes, rt) - } - } - } - } - - if isRouterForThisResource { - for _, policy := range resourcePolicies { - var peersWithAccess []*nbpeer.Peer - if policy.Rules[0].SourceResource.Type == ResourceTypePeer && policy.Rules[0].SourceResource.ID != "" { - peersWithAccess = []*nbpeer.Peer{peer} - } else { - peersWithAccess = b.getPeersFromGroupscached(account, policy.SourceGroups(), "", policy.SourcePostureChecks, b.validatedPeers) - } - for _, p := range peersWithAccess { - allSourcePeers[p.ID] = struct{}{} - } - } - } - } - - return isRoutingPeer, routes, allSourcePeers -} - -func (b *NetworkMapBuilder) createNetworkResourceRoutes( - resource *resourceTypes.NetworkResource, routerPeerID string, - router *routerTypes.NetworkRouter, resourcePolicies []*Policy, -) *route.Route { - if len(resourcePolicies) > 0 { - peer := b.cache.globalPeers[routerPeerID] - if peer != nil { - return resource.ToRoute(peer, router) - } - } - return nil -} - -func (b *NetworkMapBuilder) addNetworksRoutingPeers( - networkResourcesRoutes []*route.Route, peer *nbpeer.Peer, peersToConnect []*nbpeer.Peer, - expiredPeers []*nbpeer.Peer, isRouter bool, sourcePeers map[string]struct{}, -) []*nbpeer.Peer { - - networkRoutesPeers := make(map[string]struct{}, len(networkResourcesRoutes)) - for _, r := range networkResourcesRoutes { - networkRoutesPeers[r.PeerID] = struct{}{} - } - - delete(sourcePeers, peer.ID) - delete(networkRoutesPeers, peer.ID) - - for _, existingPeer := range peersToConnect { - delete(sourcePeers, existingPeer.ID) - delete(networkRoutesPeers, existingPeer.ID) - } - for _, expPeer := range expiredPeers { - delete(sourcePeers, expPeer.ID) - delete(networkRoutesPeers, expPeer.ID) - } - - missingPeers := make(map[string]struct{}, len(sourcePeers)+len(networkRoutesPeers)) - if isRouter { - for p := range sourcePeers { - missingPeers[p] = struct{}{} - } - } - for p := range networkRoutesPeers { - missingPeers[p] = struct{}{} - } - - for p := range missingPeers { - if missingPeer := b.cache.globalPeers[p]; missingPeer != nil { - peersToConnect = append(peersToConnect, missingPeer) - } - } - - return peersToConnect -} - -func (b *NetworkMapBuilder) buildPeerRoutesView(account *Account, peerID string) { - ctx := context.Background() - peer := account.GetPeer(peerID) - if peer == nil { - return - } - resourcePolicies := b.cache.resourcePolicies - - view := &PeerRoutesView{ - OwnRouteIDs: make([]route.ID, 0), - NetworkResourceIDs: make([]route.ID, 0), - RouteFirewallRuleIDs: make([]string, 0), - } - - enabledRoutes, disabledRoutes := b.getRoutingPeerRoutes(peerID) - for _, rt := range enabledRoutes { - if rt.PeerID != "" && rt.PeerID != peerID { - if b.cache.globalPeers[rt.PeerID] == nil { - continue - } - } - - view.OwnRouteIDs = append(view.OwnRouteIDs, rt.ID) - b.cache.globalRoutes[rt.ID] = rt - } - - aclView := b.cache.peerACLs[peerID] - if aclView != nil { - peerRoutesMembership := make(LookupMap) - for _, r := range append(enabledRoutes, disabledRoutes...) { - peerRoutesMembership[string(r.GetHAUniqueID())] = struct{}{} - } - - peerGroups := b.cache.peerToGroups[peerID] - peerGroupsMap := make(LookupMap) - for _, groupID := range peerGroups { - peerGroupsMap[groupID] = struct{}{} - } - - for _, aclPeerID := range aclView.ConnectedPeerIDs { - if aclPeerID == peerID { - continue - } - activeRoutes, _ := b.getRoutingPeerRoutes(aclPeerID) - groupFilteredRoutes := account.filterRoutesByGroups(activeRoutes, peerGroupsMap) - haFilteredRoutes := account.filterRoutesFromPeersOfSameHAGroup(groupFilteredRoutes, peerRoutesMembership) - - for _, inheritedRoute := range haFilteredRoutes { - view.InheritedRouteIDs = append(view.InheritedRouteIDs, inheritedRoute.ID) - b.cache.globalRoutes[inheritedRoute.ID] = inheritedRoute - } - } - } - - _, networkResourcesRoutes, _ := b.getNetworkResourcesForPeer(account, peer) - - for _, rt := range networkResourcesRoutes { - view.NetworkResourceIDs = append(view.NetworkResourceIDs, rt.ID) - b.cache.globalRoutes[rt.ID] = rt - } - - allRoutes := slices.Concat(enabledRoutes, networkResourcesRoutes) - b.updateACGIndexForPeer(peerID, allRoutes) - - routeFirewallRules := b.getPeerRoutesFirewallRules(account, peerID, b.validatedPeers) - for _, rule := range routeFirewallRules { - ruleID := b.generateRouteFirewallRuleID(rule) - view.RouteFirewallRuleIDs = append(view.RouteFirewallRuleIDs, ruleID) - b.cache.globalRouteRules[ruleID] = rule - } - - if len(networkResourcesRoutes) > 0 { - networkResourceFirewallRules := account.GetPeerNetworkResourceFirewallRules(ctx, peer, b.validatedPeers, networkResourcesRoutes, resourcePolicies) - for _, rule := range networkResourceFirewallRules { - ruleID := b.generateRouteFirewallRuleID(rule) - view.RouteFirewallRuleIDs = append(view.RouteFirewallRuleIDs, ruleID) - b.cache.globalRouteRules[ruleID] = rule - } - } - - b.cache.peerRoutes[peerID] = view -} - -func (b *NetworkMapBuilder) updateACGIndexForPeer(peerID string, routes []*route.Route) { - for acg, routeMap := range b.cache.acgToRoutes { - for routeID, info := range routeMap { - if info.PeerID == peerID { - delete(routeMap, routeID) - } - } - if len(routeMap) == 0 { - delete(b.cache.acgToRoutes, acg) - } - } - - for routeID, info := range b.cache.noACGRoutes { - if info.PeerID == peerID { - delete(b.cache.noACGRoutes, routeID) - } - } - - for _, rt := range routes { - if !rt.Enabled { - continue - } - - if len(rt.AccessControlGroups) == 0 { - b.cache.noACGRoutes[rt.ID] = &RouteOwnerInfo{ - PeerID: peerID, - RouteID: rt.ID, - } - } else { - for _, acg := range rt.AccessControlGroups { - if b.cache.acgToRoutes[acg] == nil { - b.cache.acgToRoutes[acg] = make(map[route.ID]*RouteOwnerInfo) - } - - b.cache.acgToRoutes[acg][rt.ID] = &RouteOwnerInfo{ - PeerID: peerID, - RouteID: rt.ID, - } - } - } - } -} - -func (b *NetworkMapBuilder) getRoutingPeerRoutes(peerID string) (enabledRoutes []*route.Route, disabledRoutes []*route.Route) { - peer := b.cache.globalPeers[peerID] - if peer == nil { - return enabledRoutes, disabledRoutes - } - - seenRoute := make(map[route.ID]struct{}) - - takeRoute := func(r *route.Route, id string) { - if _, ok := seenRoute[r.ID]; ok { - return - } - seenRoute[r.ID] = struct{}{} - - if r.Enabled { - // maybe here is some mess - here we store peer key (see comment below) - r.Peer = peer.Key - enabledRoutes = append(enabledRoutes, r) - return - } - disabledRoutes = append(disabledRoutes, r) - } - - peerGroups := b.cache.peerToGroups[peerID] - for _, groupID := range peerGroups { - groupRoutes := b.cache.groupToRoutes[groupID] - for _, r := range groupRoutes { - newPeerRoute := r.Copy() - // and here we store peer ID - this logic is taken from original account.getRoutingPeerRoutes - newPeerRoute.Peer = peerID - newPeerRoute.PeerGroups = nil - newPeerRoute.ID = route.ID(string(r.ID) + ":" + peerID) - takeRoute(newPeerRoute, peerID) - } - } - for _, r := range b.cache.peerToRoutes[peerID] { - takeRoute(r.Copy(), peerID) - } - return enabledRoutes, disabledRoutes -} - -func (b *NetworkMapBuilder) getPeerRoutesFirewallRules(account *Account, peerID string, validatedPeersMap map[string]struct{}) []*RouteFirewallRule { - routesFirewallRules := make([]*RouteFirewallRule, 0) - - enabledRoutes, _ := b.getRoutingPeerRoutes(peerID) - for _, route := range enabledRoutes { - if len(route.AccessControlGroups) == 0 { - defaultPermit := getDefaultPermit(route) - routesFirewallRules = append(routesFirewallRules, defaultPermit...) - continue - } - - distributionPeers := b.getDistributionGroupsPeers(route) - - for _, accessGroup := range route.AccessControlGroups { - policies := b.getAllRoutePoliciesFromGroups([]string{accessGroup}) - - rules := b.getRouteFirewallRules(peerID, policies, route, validatedPeersMap, distributionPeers, account) - routesFirewallRules = append(routesFirewallRules, rules...) - } - } - - return routesFirewallRules -} - -func (b *NetworkMapBuilder) getDistributionGroupsPeers(route *route.Route) map[string]struct{} { - distPeers := make(map[string]struct{}) - for _, id := range route.Groups { - groupPeers := b.cache.groupToPeers[id] - if groupPeers == nil { - continue - } - - for _, pID := range groupPeers { - distPeers[pID] = struct{}{} - } - } - return distPeers -} - -func (b *NetworkMapBuilder) getAllRoutePoliciesFromGroups(accessControlGroups []string) []*Policy { - routePolicies := make(map[string]*Policy) - - for _, groupID := range accessControlGroups { - candidatePolicies := b.cache.groupToPolicies[groupID] - - for _, policy := range candidatePolicies { - if _, found := routePolicies[policy.ID]; found { - continue - } - policyRules := b.cache.policyToRules[policy.ID] - for _, rule := range policyRules { - if slices.Contains(rule.Destinations, groupID) { - routePolicies[policy.ID] = policy - break - } - } - } - } - - return maps.Values(routePolicies) -} - -func (b *NetworkMapBuilder) getRouteFirewallRules( - peerID string, policies []*Policy, route *route.Route, validatedPeersMap map[string]struct{}, - distributionPeers map[string]struct{}, account *Account, -) []*RouteFirewallRule { - ctx := context.Background() - var fwRules []*RouteFirewallRule - for _, policy := range policies { - if !policy.Enabled { - continue - } - - for _, rule := range policy.Rules { - if !rule.Enabled { - continue - } - - rulePeers := b.getRulePeers(rule, policy.SourcePostureChecks, peerID, distributionPeers, validatedPeersMap, account) - - rules := generateRouteFirewallRules(ctx, route, rule, rulePeers, FirewallRuleDirectionIN) - fwRules = append(fwRules, rules...) - } - } - return fwRules -} - -func (b *NetworkMapBuilder) getRulePeers( - rule *PolicyRule, postureChecks []string, peerID string, distributionPeers map[string]struct{}, - validatedPeersMap map[string]struct{}, account *Account, -) []*nbpeer.Peer { - distPeersWithPolicy := make(map[string]struct{}) - - for _, id := range rule.Sources { - groupPeers := b.cache.groupToPeers[id] - if groupPeers == nil { - continue - } - - for _, pID := range groupPeers { - if pID == peerID { - continue - } - _, distPeer := distributionPeers[pID] - _, valid := validatedPeersMap[pID] - - if distPeer && valid && account.validatePostureChecksOnPeer(context.Background(), postureChecks, pID) { - distPeersWithPolicy[pID] = struct{}{} - } - } - } - - if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { - _, distPeer := distributionPeers[rule.SourceResource.ID] - _, valid := validatedPeersMap[rule.SourceResource.ID] - if distPeer && valid && account.validatePostureChecksOnPeer(context.Background(), postureChecks, rule.SourceResource.ID) { - distPeersWithPolicy[rule.SourceResource.ID] = struct{}{} - } - } - - distributionGroupPeers := make([]*nbpeer.Peer, 0, len(distPeersWithPolicy)) - for pID := range distPeersWithPolicy { - peer := b.cache.globalPeers[pID] - if peer == nil { - continue - } - distributionGroupPeers = append(distributionGroupPeers, peer) - } - return distributionGroupPeers -} - -func (b *NetworkMapBuilder) buildPeerDNSView(account *Account, peerID string) { - peerGroups := b.cache.peerToGroups[peerID] - checkGroups := make(map[string]struct{}, len(peerGroups)) - for _, groupID := range peerGroups { - checkGroups[groupID] = struct{}{} - } - - dnsManagementStatus := b.getPeerDNSManagementStatus(account, checkGroups) - dnsConfig := &nbdns.Config{ - ServiceEnable: dnsManagementStatus, - } - - if dnsManagementStatus { - dnsConfig.NameServerGroups = b.getPeerNSGroups(account, peerID, checkGroups) - } - - b.cache.peerDNS[peerID] = dnsConfig -} - -func (b *NetworkMapBuilder) getPeerDNSManagementStatus(account *Account, checkGroups map[string]struct{}) bool { - - enabled := true - for _, groupID := range account.DNSSettings.DisabledManagementGroups { - _, found := checkGroups[groupID] - if found { - enabled = false - break - } - } - return enabled -} - -func (b *NetworkMapBuilder) getPeerNSGroups(account *Account, peerID string, checkGroups map[string]struct{}) []*nbdns.NameServerGroup { - var peerNSGroups []*nbdns.NameServerGroup - - for _, nsGroup := range account.NameServerGroups { - if !nsGroup.Enabled { - continue - } - for _, gID := range nsGroup.Groups { - _, found := checkGroups[gID] - if found { - peer := b.cache.globalPeers[peerID] - if !peerIsNameserver(peer, nsGroup) { - peerNSGroups = append(peerNSGroups, nsGroup.Copy()) - break - } - } - } - } - - return peerNSGroups -} - -func (b *NetworkMapBuilder) buildAllowedUserIDs(account *Account) map[string]struct{} { - users := make(map[string]struct{}) - for _, nbUser := range account.Users { - if !nbUser.IsBlocked() && !nbUser.IsServiceUser { - users[nbUser.Id] = struct{}{} - } - } - return users -} - -func firewallRuleProtocol(protocol PolicyRuleProtocolType) string { - if protocol == PolicyRuleProtocolNetbirdSSH { - return string(PolicyRuleProtocolTCP) - } - return string(protocol) -} - -// lock should be held -func (b *NetworkMapBuilder) updateAccountLocked(account *Account) *Account { - if account.Network.CurrentSerial() > b.account.Network.CurrentSerial() { - b.account = account - } - return b.account -} - -func (b *NetworkMapBuilder) GetPeerNetworkMap( - ctx context.Context, peerID string, peersCustomZone nbdns.CustomZone, accountZones []*zones.Zone, - validatedPeers map[string]struct{}, metrics *telemetry.AccountManagerMetrics, -) *NetworkMap { - start := time.Now() - - b.cache.mu.RLock() - defer b.cache.mu.RUnlock() - - account := b.account - - peer := account.GetPeer(peerID) - if peer == nil { - return &NetworkMap{Network: account.Network.Copy()} - } - - aclView := b.cache.peerACLs[peerID] - routesView := b.cache.peerRoutes[peerID] - dnsConfig := b.cache.peerDNS[peerID] - sshView := b.cache.peerSSH[peerID] - - if aclView == nil || routesView == nil || dnsConfig == nil { - return &NetworkMap{Network: account.Network.Copy()} - } - - nm := b.assembleNetworkMap(ctx, account, peer, aclView, routesView, dnsConfig, sshView, peersCustomZone, accountZones, validatedPeers) - - if metrics != nil { - objectCount := int64(len(nm.Peers) + len(nm.OfflinePeers) + len(nm.Routes) + len(nm.FirewallRules) + len(nm.RoutesFirewallRules)) - metrics.CountNetworkMapObjects(objectCount) - metrics.CountGetPeerNetworkMapDuration(time.Since(start)) - - if objectCount > 5000 { - log.WithContext(ctx).Tracef("account: %s has a total resource count of %d objects from cache", - account.Id, objectCount) - } - } - - return nm -} - -func (b *NetworkMapBuilder) assembleNetworkMap( - ctx context.Context, account *Account, peer *nbpeer.Peer, aclView *PeerACLView, routesView *PeerRoutesView, - dnsConfig *nbdns.Config, sshView *PeerSSHView, peersCustomZone nbdns.CustomZone, accountZones []*zones.Zone, validatedPeers map[string]struct{}, -) *NetworkMap { - - var peersToConnect []*nbpeer.Peer - var expiredPeers []*nbpeer.Peer - - for _, peerID := range aclView.ConnectedPeerIDs { - if _, ok := validatedPeers[peerID]; !ok { - continue - } - - peer := b.cache.globalPeers[peerID] - if peer == nil { - continue - } - - expired, _ := peer.LoginExpired(account.Settings.PeerLoginExpiration) - if account.Settings.PeerLoginExpirationEnabled && expired { - expiredPeers = append(expiredPeers, peer) - } else { - peersToConnect = append(peersToConnect, peer) - } - } - - var routes []*route.Route - allRouteIDs := slices.Concat(routesView.OwnRouteIDs, routesView.NetworkResourceIDs, routesView.InheritedRouteIDs) - - for _, routeID := range allRouteIDs { - if route := b.cache.globalRoutes[routeID]; route != nil { - routes = append(routes, route) - } - } - - var firewallRules []*FirewallRule - for _, ruleID := range aclView.FirewallRuleIDs { - if rule := b.cache.globalRules[ruleID]; rule != nil { - firewallRules = append(firewallRules, rule) - } else { - log.Debugf("NetworkMapBuilder: peer %s assembling network map has no fwrule %s in globalRules", peer.ID, ruleID) - } - } - - var routesFirewallRules []*RouteFirewallRule - for _, ruleID := range routesView.RouteFirewallRuleIDs { - if rule := b.cache.globalRouteRules[ruleID]; rule != nil { - routesFirewallRules = append(routesFirewallRules, rule) - } - } - - finalDNSConfig := *dnsConfig - if finalDNSConfig.ServiceEnable { - var zones []nbdns.CustomZone - - peerGroupsSlice := b.cache.peerToGroups[peer.ID] - peerGroups := make(LookupMap, len(peerGroupsSlice)) - for _, groupID := range peerGroupsSlice { - peerGroups[groupID] = struct{}{} - } - - if peersCustomZone.Domain != "" { - records := filterZoneRecordsForPeers(peer, peersCustomZone, peersToConnect, expiredPeers) - zones = append(zones, nbdns.CustomZone{ - Domain: peersCustomZone.Domain, - Records: records, - }) - } - - filteredAccountZones := filterPeerAppliedZones(ctx, accountZones, peerGroups) - zones = append(zones, filteredAccountZones...) - - finalDNSConfig.CustomZones = zones - } - - nm := &NetworkMap{ - Peers: peersToConnect, - Network: account.Network.Copy(), - Routes: routes, - DNSConfig: finalDNSConfig, - OfflinePeers: expiredPeers, - FirewallRules: firewallRules, - RoutesFirewallRules: routesFirewallRules, - } - - if sshView != nil { - nm.EnableSSH = sshView.EnableSSH - nm.AuthorizedUsers = sshView.AuthorizedUsers - } - - return nm -} - -func (b *NetworkMapBuilder) generateFirewallRuleID(rule *FirewallRule) string { - var s strings.Builder - s.WriteString(fw) - s.WriteString(rule.PolicyID) - s.WriteRune(':') - s.WriteString(rule.PeerIP) - s.WriteRune(':') - s.WriteString(strconv.Itoa(rule.Direction)) - s.WriteRune(':') - s.WriteString(rule.Protocol) - s.WriteRune(':') - s.WriteString(rule.Action) - s.WriteRune(':') - s.WriteString(rule.Port) - s.WriteRune(':') - s.WriteString(strconv.Itoa(int(rule.PortRange.Start))) - s.WriteRune('-') - s.WriteString(strconv.Itoa(int(rule.PortRange.End))) - return s.String() -} - -func (b *NetworkMapBuilder) generateRouteFirewallRuleID(rule *RouteFirewallRule) string { - var s strings.Builder - s.WriteString(rfw) - s.WriteString(string(rule.RouteID)) - s.WriteRune(':') - s.WriteString(rule.Destination) - s.WriteRune(':') - s.WriteString(rule.Action) - s.WriteRune(':') - s.WriteString(strings.Join(rule.SourceRanges, ",")) - s.WriteRune(':') - s.WriteString(rule.Protocol) - s.WriteRune(':') - s.WriteString(strconv.Itoa(int(rule.Port))) - return s.String() -} - -func (b *NetworkMapBuilder) isPeerInGroups(groupIDs []string, peerGroups []string) bool { - for _, groupID := range groupIDs { - if slices.Contains(peerGroups, groupID) { - return true - } - } - return false -} - -func (b *NetworkMapBuilder) isPeerRouter(account *Account, peerID string) bool { - for _, r := range account.Routes { - if !r.Enabled { - continue - } - - if r.PeerID == peerID { - return true - } - - if peer := b.cache.globalPeers[peerID]; peer != nil { - if r.Peer == peer.Key && r.PeerID == "" { - return true - } - } - } - - routers := account.GetResourceRoutersMap() - for _, networkRouters := range routers { - if router, exists := networkRouters[peerID]; exists && router.Enabled { - return true - } - } - - return false -} - -func (b *NetworkMapBuilder) incAddPeerLoop() { - for { - b.apb.mu.Lock() - if len(b.apb.ids) == 0 { - b.apb.sg.Wait() - } - b.addPeersIncrementally() - b.apb.mu.Unlock() - } -} - -// lock on b.apb level should be held -func (b *NetworkMapBuilder) addPeersIncrementally() { - peers := slices.Clone(b.apb.ids) - clear(b.apb.ids) - b.apb.ids = b.apb.ids[:0] - latestAcc := b.apb.la - b.apb.mu.Unlock() - - tt := time.Now() - b.cache.mu.Lock() - defer b.cache.mu.Unlock() - - account := b.updateAccountLocked(latestAcc) - - log.Debugf("NetworkMapBuilder: Starting incremental add of %d peers", len(peers)) - - allUpdates := make(map[string]*PeerUpdateDelta) - - for _, peerID := range peers { - peer := account.GetPeer(peerID) - if peer == nil { - b.apb.mu.Lock() - retries := b.apb.retryCount[peerID] - b.apb.mu.Unlock() - - if retries >= maxPeerAddRetries { - log.Errorf("NetworkMapBuilder: peer %s not found in account %s after %d retries, giving up", peerID, account.Id, retries) - b.apb.mu.Lock() - delete(b.apb.retryCount, peerID) - b.apb.mu.Unlock() - continue - } - - log.Warnf("NetworkMapBuilder: peer %s not found in account %s, retry %d/%d", peerID, account.Id, retries+1, maxPeerAddRetries) - b.apb.mu.Lock() - b.apb.retryCount[peerID] = retries + 1 - b.apb.mu.Unlock() - b.enqueuePeersForIncrementalAdd(latestAcc, peerID) - continue - } - - b.apb.mu.Lock() - delete(b.apb.retryCount, peerID) - b.apb.mu.Unlock() - - b.validatedPeers[peerID] = struct{}{} - b.cache.globalPeers[peerID] = peer - - peerGroups := b.updateIndexesForNewPeer(account, peerID) - b.buildPeerACLView(account, peerID) - b.buildPeerRoutesView(account, peerID) - b.buildPeerDNSView(account, peerID) - - peerDeltas := b.collectDeltasForNewPeer(account, peerID, peerGroups) - for affectedPeerID, delta := range peerDeltas { - if existing, ok := allUpdates[affectedPeerID]; ok { - existing.mergeFrom(delta) - continue - } - allUpdates[affectedPeerID] = delta - } - } - - for affectedPeerID, delta := range allUpdates { - b.applyDeltaToPeer(account, affectedPeerID, delta) - } - - log.Debugf("NetworkMapBuilder: Added %d peers to cache, affected %d peers, took %s", len(peers), len(allUpdates), time.Since(tt)) - - b.apb.mu.Lock() - if len(b.apb.ids) > 0 { - b.apb.sg.Signal() - } -} - -func (b *NetworkMapBuilder) enqueuePeersForIncrementalAdd(acc *Account, peerIDs ...string) { - b.apb.mu.Lock() - b.apb.ids = append(b.apb.ids, peerIDs...) - if b.apb.la != nil && acc.Network.CurrentSerial() > b.apb.la.Network.CurrentSerial() { - b.apb.la = acc - } - b.apb.sg.Signal() - b.apb.mu.Unlock() -} - -func (b *NetworkMapBuilder) EnqueuePeersForIncrementalAdd(acc *Account, peerIDs ...string) { - b.enqueuePeersForIncrementalAdd(acc, peerIDs...) -} - -type ViewDelta struct { - AddedPeerIDs []string - RemovedPeerIDs []string - AddedRuleIDs []string - RemovedRuleIDs []string -} - -func (b *NetworkMapBuilder) OnPeerAddedIncremental(acc *Account, peerID string) error { - tt := time.Now() - peer := acc.GetPeer(peerID) - if peer == nil { - return fmt.Errorf("NetworkMapBuilder: peer %s not found in account", peerID) - } - - b.cache.mu.Lock() - defer b.cache.mu.Unlock() - - account := b.updateAccountLocked(acc) - - log.Debugf("NetworkMapBuilder: Adding peer %s (IP: %s) to cache", peerID, peer.IP.String()) - - b.validatedPeers[peerID] = struct{}{} - - b.cache.globalPeers[peerID] = peer - - peerGroups := b.updateIndexesForNewPeer(account, peerID) - - b.buildPeerACLView(account, peerID) - b.buildPeerRoutesView(account, peerID) - b.buildPeerDNSView(account, peerID) - - log.Debugf("NetworkMapBuilder: Adding peer %s to cache, views took %s", peerID, time.Since(tt)) - - b.incrementalUpdateAffectedPeers(account, peerID, peerGroups) - - log.Debugf("NetworkMapBuilder: Added peer %s to cache, took %s", peerID, time.Since(tt)) - - return nil -} - -func (b *NetworkMapBuilder) updateIndexesForNewPeer(account *Account, peerID string) []string { - peerGroups := make([]string, 0) - - for groupID, group := range account.Groups { - if slices.Contains(group.Peers, peerID) { - if !slices.Contains(b.cache.groupToPeers[groupID], peerID) { - b.cache.groupToPeers[groupID] = append(b.cache.groupToPeers[groupID], peerID) - } - peerGroups = append(peerGroups, groupID) - } - } - - b.cache.peerToGroups[peerID] = peerGroups - - for _, r := range account.Routes { - if !r.Enabled || b.cache.globalRoutes[r.ID] != nil { - continue - } - for _, groupID := range r.PeerGroups { - if !slices.Contains(b.cache.groupToRoutes[groupID], r) { - b.cache.groupToRoutes[groupID] = append(b.cache.groupToRoutes[groupID], r) - } - } - if r.Peer != "" { - if peer, ok := b.cache.globalPeers[r.Peer]; ok { - if !slices.Contains(b.cache.peerToRoutes[peer.ID], r) { - b.cache.peerToRoutes[peer.ID] = append(b.cache.peerToRoutes[peer.ID], r) - } - } - } - b.cache.globalRoutes[r.ID] = r - } - - return peerGroups -} - -func (b *NetworkMapBuilder) incrementalUpdateAffectedPeers(account *Account, newPeerID string, peerGroups []string) { - updates := b.collectDeltasForNewPeer(account, newPeerID, peerGroups) - for affectedPeerID, delta := range updates { - b.applyDeltaToPeer(account, affectedPeerID, delta) - } -} - -func (b *NetworkMapBuilder) collectDeltasForNewPeer(account *Account, newPeerID string, peerGroups []string) map[string]*PeerUpdateDelta { - updates := b.calculateIncrementalUpdates(account, newPeerID, peerGroups) - - if b.isPeerRouter(account, newPeerID) { - affectedByRoutes := b.findPeersAffectedByNewRouter(account, newPeerID, peerGroups) - for affectedPeerID := range affectedByRoutes { - if affectedPeerID == newPeerID { - continue - } - if _, exists := updates[affectedPeerID]; !exists { - updates[affectedPeerID] = &PeerUpdateDelta{ - PeerID: affectedPeerID, - RebuildRoutesView: true, - } - } else { - updates[affectedPeerID].RebuildRoutesView = true - } - } - } - - return updates -} - -func (b *NetworkMapBuilder) findPeersAffectedByNewRouter(account *Account, newRouterID string, routerGroups []string) map[string]struct{} { - affected := make(map[string]struct{}) - enabledRoutes, _ := b.getRoutingPeerRoutes(newRouterID) - - for _, route := range enabledRoutes { - for _, distGroupID := range route.Groups { - if peers := b.cache.groupToPeers[distGroupID]; peers != nil { - for _, peerID := range peers { - if peerID != newRouterID { - affected[peerID] = struct{}{} - } - } - } - } - - for _, peerGroupID := range route.PeerGroups { - if peers := b.cache.groupToPeers[peerGroupID]; peers != nil { - for _, peerID := range peers { - if peerID != newRouterID { - affected[peerID] = struct{}{} - } - } - } - } - } - - for _, route := range account.Routes { - if !route.Enabled { - continue - } - - routerInPeerGroups := false - for _, peerGroupID := range route.PeerGroups { - if slices.Contains(routerGroups, peerGroupID) { - routerInPeerGroups = true - break - } - } - - if routerInPeerGroups { - for _, distGroupID := range route.Groups { - if peers := b.cache.groupToPeers[distGroupID]; peers != nil { - for _, peerID := range peers { - affected[peerID] = struct{}{} - } - } - } - } - } - - return affected -} - -func (b *NetworkMapBuilder) calculateIncrementalUpdates(account *Account, newPeerID string, peerGroups []string) map[string]*PeerUpdateDelta { - updates := make(map[string]*PeerUpdateDelta) - ctx := context.Background() - - groupAllLn := 0 - if allGroup, err := account.GetGroupAll(); err == nil { - groupAllLn = len(allGroup.Peers) - 1 - } - - newPeer := b.cache.globalPeers[newPeerID] - if newPeer == nil { - return updates - } - - for _, policy := range account.Policies { - if !policy.Enabled { - continue - } - - for _, rule := range policy.Rules { - if !rule.Enabled { - continue - } - var peerInSources, peerInDestinations bool - - if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID == newPeerID { - peerInSources = true - } else { - peerInSources = b.isPeerInGroups(rule.Sources, peerGroups) - } - - if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID == newPeerID { - peerInDestinations = true - } else { - peerInDestinations = b.isPeerInGroups(rule.Destinations, peerGroups) - } - - if peerInSources { - if len(rule.Destinations) > 0 { - b.addUpdateForPeersInGroups(updates, rule.Destinations, newPeerID, rule, FirewallRuleDirectionIN, groupAllLn) - } - if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID != "" { - b.addUpdateForDirectPeerResource(updates, rule.DestinationResource.ID, newPeerID, rule, FirewallRuleDirectionIN) - } - } - - if peerInDestinations { - if len(rule.Sources) > 0 { - b.addUpdateForPeersInGroups(updates, rule.Sources, newPeerID, rule, FirewallRuleDirectionOUT, groupAllLn) - } - if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { - b.addUpdateForDirectPeerResource(updates, rule.SourceResource.ID, newPeerID, rule, FirewallRuleDirectionOUT) - } - } - - if rule.Bidirectional { - if peerInSources { - if len(rule.Destinations) > 0 { - b.addUpdateForPeersInGroups(updates, rule.Destinations, newPeerID, rule, FirewallRuleDirectionOUT, groupAllLn) - } - if rule.DestinationResource.Type == ResourceTypePeer && rule.DestinationResource.ID != "" { - b.addUpdateForDirectPeerResource(updates, rule.DestinationResource.ID, newPeerID, rule, FirewallRuleDirectionOUT) - } - } - if peerInDestinations { - if len(rule.Sources) > 0 { - b.addUpdateForPeersInGroups(updates, rule.Sources, newPeerID, rule, FirewallRuleDirectionIN, groupAllLn) - } - if rule.SourceResource.Type == ResourceTypePeer && rule.SourceResource.ID != "" { - b.addUpdateForDirectPeerResource(updates, rule.SourceResource.ID, newPeerID, rule, FirewallRuleDirectionIN) - } - } - } - } - } - - b.calculateRouteFirewallUpdates(newPeerID, newPeer, peerGroups, updates) - - b.calculateNetworkResourceFirewallUpdates(ctx, account, newPeerID, newPeer, peerGroups, updates) - - b.calculateNewRouterNetworkResourceUpdates(ctx, account, newPeerID, updates) - - return updates -} - -func (b *NetworkMapBuilder) calculateNewRouterNetworkResourceUpdates( - ctx context.Context, account *Account, newPeerID string, - updates map[string]*PeerUpdateDelta, -) { - resourceRouters := b.cache.resourceRouters - - for networkID, routers := range resourceRouters { - router, isRouter := routers[newPeerID] - if !isRouter || !router.Enabled { - continue - } - - for _, resource := range b.cache.globalResources { - if resource.NetworkID != networkID { - continue - } - - policies := b.cache.resourcePolicies[resource.ID] - if len(policies) == 0 { - continue - } - - peersWithAccess := make(map[string]struct{}) - - for _, policy := range policies { - if !policy.Enabled { - continue - } - - sourceGroups := policy.SourceGroups() - for _, sourceGroup := range sourceGroups { - groupPeers := b.cache.groupToPeers[sourceGroup] - for _, peerID := range groupPeers { - if peerID == newPeerID { - continue - } - - if account.validatePostureChecksOnPeer(ctx, policy.SourcePostureChecks, peerID) { - peersWithAccess[peerID] = struct{}{} - } - } - } - } - - for peerID := range peersWithAccess { - delta := updates[peerID] - if delta == nil { - delta = &PeerUpdateDelta{ - PeerID: peerID, - } - updates[peerID] = delta - } - - if !slices.Contains(delta.AddConnectedPeers, newPeerID) { - delta.AddConnectedPeers = append(delta.AddConnectedPeers, newPeerID) - } - - delta.RebuildRoutesView = true - } - } - } -} - -func (b *NetworkMapBuilder) calculateRouteFirewallUpdates( - newPeerID string, newPeer *nbpeer.Peer, - peerGroups []string, updates map[string]*PeerUpdateDelta, -) { - processedPeerRoutes := make(map[string]map[route.ID]struct{}) - - for routeID, info := range b.cache.noACGRoutes { - if info.PeerID == newPeerID { - continue - } - - b.addRouteFirewallUpdate(updates, info.PeerID, string(routeID), newPeer.IP.String()) - - if processedPeerRoutes[info.PeerID] == nil { - processedPeerRoutes[info.PeerID] = make(map[route.ID]struct{}) - } - processedPeerRoutes[info.PeerID][routeID] = struct{}{} - } - - for _, acg := range peerGroups { - routeInfos := b.cache.acgToRoutes[acg] - if routeInfos == nil { - continue - } - - for routeID, info := range routeInfos { - if info.PeerID == newPeerID { - continue - } - - if processedRoutes, exists := processedPeerRoutes[info.PeerID]; exists { - if _, processed := processedRoutes[routeID]; processed { - continue - } - } - - b.addRouteFirewallUpdate(updates, info.PeerID, string(routeID), newPeer.IP.String()) - - if processedPeerRoutes[info.PeerID] == nil { - processedPeerRoutes[info.PeerID] = make(map[route.ID]struct{}) - } - processedPeerRoutes[info.PeerID][routeID] = struct{}{} - } - } -} - -func (b *NetworkMapBuilder) addRouteFirewallUpdate( - updates map[string]*PeerUpdateDelta, peerID string, - routeID string, sourceIP string, -) { - delta := updates[peerID] - if delta == nil { - delta = &PeerUpdateDelta{ - PeerID: peerID, - UpdateRouteFirewallRules: make([]*RouteFirewallRuleUpdate, 0), - } - updates[peerID] = delta - } - - for _, existing := range delta.UpdateRouteFirewallRules { - if existing.RuleID == routeID && existing.AddSourceIP == sourceIP { - return - } - } - - delta.UpdateRouteFirewallRules = append(delta.UpdateRouteFirewallRules, &RouteFirewallRuleUpdate{ - RuleID: routeID, - AddSourceIP: sourceIP, - }) -} - -func (b *NetworkMapBuilder) calculateNetworkResourceFirewallUpdates( - ctx context.Context, account *Account, newPeerID string, - newPeer *nbpeer.Peer, peerGroups []string, updates map[string]*PeerUpdateDelta, -) { - for _, resource := range b.cache.globalResources { - resourcePolicies := b.cache.resourcePolicies - resourceRouters := b.cache.resourceRouters - - policies := resourcePolicies[resource.ID] - peerHasAccess := false - - for _, policy := range policies { - if !policy.Enabled { - continue - } - - sourceGroups := policy.SourceGroups() - for _, sourceGroup := range sourceGroups { - if slices.Contains(peerGroups, sourceGroup) { - if account.validatePostureChecksOnPeer(ctx, policy.SourcePostureChecks, newPeerID) { - peerHasAccess = true - break - } - } - } - - if peerHasAccess { - break - } - } - - if !peerHasAccess { - continue - } - - networkRouters := resourceRouters[resource.NetworkID] - for routerPeerID, router := range networkRouters { - if !router.Enabled || routerPeerID == newPeerID { - continue - } - - delta := updates[routerPeerID] - if delta == nil { - delta = &PeerUpdateDelta{ - PeerID: routerPeerID, - } - updates[routerPeerID] = delta - } - - if !slices.Contains(delta.AddConnectedPeers, newPeerID) { - delta.AddConnectedPeers = append(delta.AddConnectedPeers, newPeerID) - } - - delta.RebuildRoutesView = true - } - } -} - -type PeerUpdateDelta struct { - PeerID string - AddConnectedPeers []string - AddFirewallRules []*FirewallRuleDelta - AddRoutes []route.ID - UpdateRouteFirewallRules []*RouteFirewallRuleUpdate - UpdateDNS bool - RebuildRoutesView bool -} - -func (d *PeerUpdateDelta) mergeFrom(other *PeerUpdateDelta) { - for _, peerID := range other.AddConnectedPeers { - if !slices.Contains(d.AddConnectedPeers, peerID) { - d.AddConnectedPeers = append(d.AddConnectedPeers, peerID) - } - } - - existingRuleIDs := make(map[string]struct{}, len(d.AddFirewallRules)) - for _, rule := range d.AddFirewallRules { - existingRuleIDs[rule.RuleID] = struct{}{} - } - for _, rule := range other.AddFirewallRules { - if _, exists := existingRuleIDs[rule.RuleID]; !exists { - d.AddFirewallRules = append(d.AddFirewallRules, rule) - existingRuleIDs[rule.RuleID] = struct{}{} - } - } - - for _, routeID := range other.AddRoutes { - if !slices.Contains(d.AddRoutes, routeID) { - d.AddRoutes = append(d.AddRoutes, routeID) - } - } - - existingRouteUpdates := make(map[string]map[string]struct{}) - for _, update := range d.UpdateRouteFirewallRules { - if existingRouteUpdates[update.RuleID] == nil { - existingRouteUpdates[update.RuleID] = make(map[string]struct{}) - } - existingRouteUpdates[update.RuleID][update.AddSourceIP] = struct{}{} - } - for _, update := range other.UpdateRouteFirewallRules { - if existingRouteUpdates[update.RuleID] == nil { - existingRouteUpdates[update.RuleID] = make(map[string]struct{}) - } - if _, exists := existingRouteUpdates[update.RuleID][update.AddSourceIP]; !exists { - d.UpdateRouteFirewallRules = append(d.UpdateRouteFirewallRules, update) - existingRouteUpdates[update.RuleID][update.AddSourceIP] = struct{}{} - } - } - - if other.UpdateDNS { - d.UpdateDNS = true - } - if other.RebuildRoutesView { - d.RebuildRoutesView = true - } -} - -type FirewallRuleDelta struct { - Rule *FirewallRule - RuleID string - Direction int -} - -type RouteFirewallRuleUpdate struct { - RuleID string - AddSourceIP string -} - -func (b *NetworkMapBuilder) addUpdateForPeersInGroups( - updates map[string]*PeerUpdateDelta, groupIDs []string, newPeerID string, - rule *PolicyRule, direction int, allGroupLn int, -) { - for _, groupID := range groupIDs { - peers := b.cache.groupToPeers[groupID] - cnt := 0 - for _, peerID := range peers { - if peerID == newPeerID { - continue - } - if _, ok := b.validatedPeers[peerID]; !ok { - continue - } - cnt++ - } - all := false - if allGroupLn > 0 && cnt == allGroupLn { - all = true - } - newPeer := b.cache.globalPeers[newPeerID] - fr := &FirewallRule{ - PolicyID: rule.ID, - PeerIP: newPeer.IP.String(), - Direction: direction, - Action: string(rule.Action), - Protocol: firewallRuleProtocol(rule.Protocol), - } - for _, peerID := range peers { - if peerID == newPeerID { - continue - } - if _, ok := b.validatedPeers[peerID]; !ok { - continue - } - targetPeer := b.cache.globalPeers[peerID] - if targetPeer == nil { - continue - } - - peerIPForRule := fr.PeerIP - if all { - peerIPForRule = allPeers - } - - b.addOrUpdateFirewallRuleInDelta(updates, peerID, newPeerID, rule, direction, fr, peerIPForRule, targetPeer) - } - } -} - -func (b *NetworkMapBuilder) addUpdateForDirectPeerResource( - updates map[string]*PeerUpdateDelta, targetPeerID string, newPeerID string, - rule *PolicyRule, direction int, -) { - if targetPeerID == newPeerID { - return - } - - if _, ok := b.validatedPeers[targetPeerID]; !ok { - return - } - - newPeer := b.cache.globalPeers[newPeerID] - if newPeer == nil { - return - } - - targetPeer := b.cache.globalPeers[targetPeerID] - if targetPeer == nil { - return - } - - fr := &FirewallRule{ - PolicyID: rule.ID, - PeerIP: newPeer.IP.String(), - Direction: direction, - Action: string(rule.Action), - Protocol: firewallRuleProtocol(rule.Protocol), - } - - b.addOrUpdateFirewallRuleInDelta(updates, targetPeerID, newPeerID, rule, direction, fr, fr.PeerIP, targetPeer) -} - -func (b *NetworkMapBuilder) addOrUpdateFirewallRuleInDelta( - updates map[string]*PeerUpdateDelta, targetPeerID string, newPeerID string, - rule *PolicyRule, direction int, baseRule *FirewallRule, peerIP string, targetPeer *nbpeer.Peer, -) { - delta := updates[targetPeerID] - if delta == nil { - delta = &PeerUpdateDelta{ - PeerID: targetPeerID, - AddConnectedPeers: []string{newPeerID}, - AddFirewallRules: make([]*FirewallRuleDelta, 0), - } - updates[targetPeerID] = delta - } else if !slices.Contains(delta.AddConnectedPeers, newPeerID) { - delta.AddConnectedPeers = append(delta.AddConnectedPeers, newPeerID) - } - - baseRule.PeerIP = peerIP - - if len(rule.Ports) > 0 || len(rule.PortRanges) > 0 { - expandedRules := expandPortsAndRanges(*baseRule, rule, targetPeer) - for _, expandedRule := range expandedRules { - ruleID := b.generateFirewallRuleID(expandedRule) - delta.AddFirewallRules = append(delta.AddFirewallRules, &FirewallRuleDelta{ - Rule: expandedRule, - RuleID: ruleID, - Direction: direction, - }) - } - } else { - ruleID := b.generateFirewallRuleID(baseRule) - delta.AddFirewallRules = append(delta.AddFirewallRules, &FirewallRuleDelta{ - Rule: baseRule, - RuleID: ruleID, - Direction: direction, - }) - } -} - -func (b *NetworkMapBuilder) applyDeltaToPeer(account *Account, peerID string, delta *PeerUpdateDelta) { - if len(delta.AddConnectedPeers) > 0 || len(delta.AddFirewallRules) > 0 { - if aclView := b.cache.peerACLs[peerID]; aclView != nil { - for _, connectedPeerID := range delta.AddConnectedPeers { - if !slices.Contains(aclView.ConnectedPeerIDs, connectedPeerID) { - aclView.ConnectedPeerIDs = append(aclView.ConnectedPeerIDs, connectedPeerID) - } - } - - for _, ruleDelta := range delta.AddFirewallRules { - b.cache.globalRules[ruleDelta.RuleID] = ruleDelta.Rule - - if !slices.Contains(aclView.FirewallRuleIDs, ruleDelta.RuleID) { - aclView.FirewallRuleIDs = append(aclView.FirewallRuleIDs, ruleDelta.RuleID) - } - } - } - } - - if delta.RebuildRoutesView { - b.buildPeerRoutesView(account, peerID) - } else if len(delta.UpdateRouteFirewallRules) > 0 { - if routesView := b.cache.peerRoutes[peerID]; routesView != nil { - b.updateRouteFirewallRules(routesView, delta.UpdateRouteFirewallRules) - } - } - - if delta.UpdateDNS { - b.buildPeerDNSView(account, peerID) - } -} - -func (b *NetworkMapBuilder) updateRouteFirewallRules(routesView *PeerRoutesView, updates []*RouteFirewallRuleUpdate) { - for _, update := range updates { - for _, ruleID := range routesView.RouteFirewallRuleIDs { - rule := b.cache.globalRouteRules[ruleID] - if rule == nil { - continue - } - - if string(rule.RouteID) == update.RuleID { - if hasWildcard := slices.Contains(rule.SourceRanges, allWildcard) || slices.Contains(rule.SourceRanges, v6AllWildcard); hasWildcard { - break - } - - sourceIP := update.AddSourceIP - - if strings.Contains(sourceIP, ":") { - sourceIP += "/128" // IPv6 - } else { - sourceIP += "/32" // IPv4 - } - - if !slices.Contains(rule.SourceRanges, sourceIP) { - rule.SourceRanges = append(rule.SourceRanges, sourceIP) - } - break - } - } - } -} - -func (b *NetworkMapBuilder) OnPeerDeleted(acc *Account, peerID string) error { - b.cache.mu.Lock() - defer b.cache.mu.Unlock() - - account := b.updateAccountLocked(acc) - - deletedPeer := b.cache.globalPeers[peerID] - if deletedPeer == nil { - return fmt.Errorf("peer %s not found in cache", peerID) - } - - deletedPeerKey := deletedPeer.Key - peerGroups := b.cache.peerToGroups[peerID] - peerIP := deletedPeer.IP.String() - - log.Debugf("NetworkMapBuilder: Deleting peer %s (IP: %s) from cache", peerID, peerIP) - - delete(b.validatedPeers, peerID) - - routesToDelete := []route.ID{} - - for routeID, r := range account.Routes { - if r.Peer != deletedPeerKey && r.PeerID != peerID { - continue - } - if len(r.PeerGroups) == 0 { - routesToDelete = append(routesToDelete, routeID) - continue - } - newPeerAssigned := false - for _, groupID := range r.PeerGroups { - candidatePeerIDs := b.cache.groupToPeers[groupID] - for _, candidatePeerID := range candidatePeerIDs { - if candidatePeerID == peerID { - continue - } - if candidatePeer := b.cache.globalPeers[candidatePeerID]; candidatePeer != nil { - r.Peer = candidatePeer.Key - r.PeerID = candidatePeerID - newPeerAssigned = true - break - } - } - if newPeerAssigned { - break - } - } - - if !newPeerAssigned { - routesToDelete = append(routesToDelete, routeID) - } - } - - for _, routeID := range routesToDelete { - delete(account.Routes, routeID) - } - - delete(b.cache.peerACLs, peerID) - delete(b.cache.peerRoutes, peerID) - delete(b.cache.peerDNS, peerID) - delete(b.cache.peerSSH, peerID) - - delete(b.cache.globalPeers, peerID) - - for acg, routeMap := range b.cache.acgToRoutes { - for routeID, info := range routeMap { - if info.PeerID == peerID { - delete(routeMap, routeID) - } - } - if len(routeMap) == 0 { - delete(b.cache.acgToRoutes, acg) - } - } - - for _, groupID := range peerGroups { - if peers := b.cache.groupToPeers[groupID]; peers != nil { - b.cache.groupToPeers[groupID] = slices.DeleteFunc(peers, func(id string) bool { - return id == peerID - }) - } - } - delete(b.cache.peerToGroups, peerID) - - affectedPeers := make(map[string]struct{}) - - for _, r := range account.Routes { - for _, groupID := range r.Groups { - if peers := b.cache.groupToPeers[groupID]; peers != nil { - for _, p := range peers { - affectedPeers[p] = struct{}{} - } - } - } - - for _, groupID := range r.PeerGroups { - if peers := b.cache.groupToPeers[groupID]; peers != nil { - for _, p := range peers { - affectedPeers[p] = struct{}{} - } - } - } - } - - for affectedPeerID := range affectedPeers { - if affectedPeerID == peerID { - continue - } - b.buildPeerRoutesView(account, affectedPeerID) - } - - peersToRebuildACL := make(map[string]struct{}) - peerDeletionUpdates := b.findPeersAffectedByDeletedPeerACL(peerID, peerIP, peerGroups, peersToRebuildACL) - for affectedPeerID, updates := range peerDeletionUpdates { - b.applyDeletionUpdates(affectedPeerID, updates) - } - - for affectedPeerID := range peersToRebuildACL { - b.buildPeerACLView(account, affectedPeerID) - } - - b.cleanupUnusedRules() - - log.Debugf("NetworkMapBuilder: Deleted peer %s, affected %d other peers", peerID, len(affectedPeers)) - - return nil -} - -func (b *NetworkMapBuilder) findPeersAffectedByDeletedPeerACL( - deletedPeerID string, - peerIP string, - peerGroups []string, - peersToRebuildACL map[string]struct{}, -) map[string]*PeerDeletionUpdate { - - affected := make(map[string]*PeerDeletionUpdate) - - for peerID, aclView := range b.cache.peerACLs { - if peerID == deletedPeerID { - continue - } - - if slices.Contains(aclView.ConnectedPeerIDs, deletedPeerID) { - peersToRebuildACL[peerID] = struct{}{} - if affected[peerID] == nil { - affected[peerID] = &PeerDeletionUpdate{ - RemovePeerID: deletedPeerID, - PeerIP: peerIP, - } - } - } - } - - affectedRouteOwners := make(map[string]struct{}) - - for _, groupID := range peerGroups { - if routeMap, ok := b.cache.acgToRoutes[groupID]; ok { - for _, info := range routeMap { - if info.PeerID != deletedPeerID { - affectedRouteOwners[info.PeerID] = struct{}{} - } - } - } - } - - for _, info := range b.cache.noACGRoutes { - if info.PeerID != deletedPeerID { - affectedRouteOwners[info.PeerID] = struct{}{} - } - } - - for ownerPeerID := range affectedRouteOwners { - if affected[ownerPeerID] == nil { - affected[ownerPeerID] = &PeerDeletionUpdate{ - RemovePeerID: deletedPeerID, - PeerIP: peerIP, - RemoveFromSourceRanges: true, - } - } else { - affected[ownerPeerID].RemoveFromSourceRanges = true - } - } - - return affected -} - -type PeerDeletionUpdate struct { - RemovePeerID string - RemoveFirewallRuleIDs []string - RemoveRouteIDs []route.ID - RemoveFromSourceRanges bool - PeerIP string -} - -func (b *NetworkMapBuilder) applyDeletionUpdates(peerID string, updates *PeerDeletionUpdate) { - if routesView := b.cache.peerRoutes[peerID]; routesView != nil { - if len(updates.RemoveRouteIDs) > 0 { - routesView.NetworkResourceIDs = slices.DeleteFunc(routesView.NetworkResourceIDs, func(routeID route.ID) bool { - return slices.Contains(updates.RemoveRouteIDs, routeID) - }) - } - - if updates.RemoveFromSourceRanges { - b.removeIPFromRouteFirewallRules(routesView, updates.PeerIP) - } - } -} - -func (b *NetworkMapBuilder) removeIPFromRouteFirewallRules(routesView *PeerRoutesView, peerIP string) { - sourceIPv4 := peerIP + "/32" - sourceIPv6 := peerIP + "/128" - - rulesToRemove := []string{} - - for _, ruleID := range routesView.RouteFirewallRuleIDs { - if rule := b.cache.globalRouteRules[ruleID]; rule != nil { - rule.SourceRanges = slices.DeleteFunc(rule.SourceRanges, func(source string) bool { - return source == sourceIPv4 || source == sourceIPv6 || source == peerIP - }) - - if len(rule.SourceRanges) == 0 { - rulesToRemove = append(rulesToRemove, ruleID) - } - } - } - - if len(rulesToRemove) > 0 { - routesView.RouteFirewallRuleIDs = slices.DeleteFunc(routesView.RouteFirewallRuleIDs, func(ruleID string) bool { - return slices.Contains(rulesToRemove, ruleID) - }) - } -} - -func (b *NetworkMapBuilder) cleanupUnusedRules() { - usedFirewallRules := make(map[string]struct{}) - usedRouteRules := make(map[string]struct{}) - usedRoutes := make(map[route.ID]struct{}) - - for _, aclView := range b.cache.peerACLs { - for _, ruleID := range aclView.FirewallRuleIDs { - usedFirewallRules[ruleID] = struct{}{} - } - } - - for _, routesView := range b.cache.peerRoutes { - for _, ruleID := range routesView.RouteFirewallRuleIDs { - usedRouteRules[ruleID] = struct{}{} - } - - for _, routeID := range routesView.OwnRouteIDs { - usedRoutes[routeID] = struct{}{} - } - for _, routeID := range routesView.NetworkResourceIDs { - usedRoutes[routeID] = struct{}{} - } - } - - for ruleID := range b.cache.globalRules { - if _, used := usedFirewallRules[ruleID]; !used { - delete(b.cache.globalRules, ruleID) - } - } - - for ruleID := range b.cache.globalRouteRules { - if _, used := usedRouteRules[ruleID]; !used { - delete(b.cache.globalRouteRules, ruleID) - } - } - - for routeID := range b.cache.globalRoutes { - if _, used := usedRoutes[routeID]; !used { - delete(b.cache.globalRoutes, routeID) - } - } -} - -func (b *NetworkMapBuilder) UpdatePeer(peer *nbpeer.Peer) { - b.cache.mu.Lock() - defer b.cache.mu.Unlock() - peerStored, ok := b.cache.globalPeers[peer.ID] - if !ok { - return - } - *peerStored = *peer -} From f8745723fcb8edd19e8c07bcc12c06fb97226b1e Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 28 Apr 2026 12:42:19 +0300 Subject: [PATCH 348/374] [management] Add Microsoft AD FS support for embedded Dex identity providers (#6008) --- idp/dex/config.go | 4 +++- idp/dex/connector.go | 6 ++++-- management/server/identity_provider.go | 4 +++- management/server/types/identity_provider.go | 5 ++++- shared/management/http/api/openapi.yml | 1 + shared/management/http/api/types.gen.go | 3 +++ 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/idp/dex/config.go b/idp/dex/config.go index 7f5300f14..e686233ad 100644 --- a/idp/dex/config.go +++ b/idp/dex/config.go @@ -193,7 +193,7 @@ func (c *Connector) ToStorageConnector() (storage.Connector, error) { // are stored with types that Dex can open. func mapConnectorToDex(connType string, config map[string]interface{}) (string, map[string]interface{}) { switch connType { - case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": + case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak", "adfs": return "oidc", applyOIDCDefaults(connType, config) default: return connType, config @@ -218,6 +218,8 @@ func applyOIDCDefaults(connType string, config map[string]interface{}) map[strin setDefault(augmented, "claimMapping", map[string]string{"email": "preferred_username"}) case "okta", "pocketid": augmented["scopes"] = []string{"openid", "profile", "email", "groups"} + case "adfs": + augmented["scopes"] = []string{"openid", "profile", "email", "allatclaims"} } return augmented diff --git a/idp/dex/connector.go b/idp/dex/connector.go index ba2bb1f00..8aba92999 100644 --- a/idp/dex/connector.go +++ b/idp/dex/connector.go @@ -168,7 +168,7 @@ func (p *Provider) buildStorageConnector(cfg *ConnectorConfig) (storage.Connecto var err error switch cfg.Type { - case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": + case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak", "adfs": dexType = "oidc" configData, err = buildOIDCConnectorConfig(cfg, redirectURI) case "google": @@ -220,6 +220,8 @@ func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} case "pocketid": oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} + case "adfs": + oidcConfig["scopes"] = []string{"openid", "profile", "email", "allatclaims"} } return encodeConnectorConfig(oidcConfig) } @@ -283,7 +285,7 @@ func inferIdentityProviderType(dexType, connectorID string, _ map[string]interfa // inferOIDCProviderType infers the specific OIDC provider from connector ID func inferOIDCProviderType(connectorID string) string { connectorIDLower := strings.ToLower(connectorID) - for _, provider := range []string{"pocketid", "zitadel", "entra", "okta", "authentik", "keycloak"} { + for _, provider := range []string{"pocketid", "zitadel", "entra", "okta", "authentik", "keycloak", "adfs"} { if strings.Contains(connectorIDLower, provider) { return provider } diff --git a/management/server/identity_provider.go b/management/server/identity_provider.go index 8fd96c238..f965f36b8 100644 --- a/management/server/identity_provider.go +++ b/management/server/identity_provider.go @@ -274,7 +274,7 @@ func identityProviderToConnectorConfig(idpConfig *types.IdentityProvider) *dex.C } // generateIdentityProviderID generates a unique ID for an identity provider. -// For specific provider types (okta, zitadel, entra, google, pocketid, microsoft), +// For specific provider types (okta, zitadel, entra, google, pocketid, microsoft, adfs), // the ID is prefixed with the type name. Generic OIDC providers get no prefix. func generateIdentityProviderID(idpType types.IdentityProviderType) string { id := xid.New().String() @@ -296,6 +296,8 @@ func generateIdentityProviderID(idpType types.IdentityProviderType) string { return "authentik-" + id case types.IdentityProviderTypeKeycloak: return "keycloak-" + id + case types.IdentityProviderTypeADFS: + return "adfs-" + id default: // Generic OIDC - no prefix return id diff --git a/management/server/types/identity_provider.go b/management/server/types/identity_provider.go index c4498e4d4..0c1f9509c 100644 --- a/management/server/types/identity_provider.go +++ b/management/server/types/identity_provider.go @@ -39,6 +39,8 @@ const ( IdentityProviderTypeAuthentik IdentityProviderType = "authentik" // IdentityProviderTypeKeycloak is the Keycloak identity provider IdentityProviderTypeKeycloak IdentityProviderType = "keycloak" + // IdentityProviderTypeADFS is the Microsoft AD FS identity provider + IdentityProviderTypeADFS IdentityProviderType = "adfs" ) // IdentityProvider represents an identity provider configuration @@ -112,7 +114,8 @@ func (t IdentityProviderType) IsValid() bool { switch t { case IdentityProviderTypeOIDC, IdentityProviderTypeZitadel, IdentityProviderTypeEntra, IdentityProviderTypeGoogle, IdentityProviderTypeOkta, IdentityProviderTypePocketID, - IdentityProviderTypeMicrosoft, IdentityProviderTypeAuthentik, IdentityProviderTypeKeycloak: + IdentityProviderTypeMicrosoft, IdentityProviderTypeAuthentik, IdentityProviderTypeKeycloak, + IdentityProviderTypeADFS: return true } return false diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 0b855db67..b70f89499 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -2917,6 +2917,7 @@ components: - okta - pocketid - microsoft + - adfs example: oidc IdentityProvider: type: object diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 0317b8183..d56cb9b74 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -518,6 +518,7 @@ const ( IdentityProviderTypeOkta IdentityProviderType = "okta" IdentityProviderTypePocketid IdentityProviderType = "pocketid" IdentityProviderTypeZitadel IdentityProviderType = "zitadel" + IdentityProviderTypeAdfs IdentityProviderType = "adfs" ) // Valid indicates whether the value is a known member of the IdentityProviderType enum. @@ -537,6 +538,8 @@ func (e IdentityProviderType) Valid() bool { return true case IdentityProviderTypeZitadel: return true + case IdentityProviderTypeAdfs: + return true default: return false } From 6f0eff3ba0696f26032fe4412e35def32e27eac6 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 28 Apr 2026 14:48:28 +0300 Subject: [PATCH 349/374] [management] Handle single-string JWT group claim from IdPs (#6014) --- shared/auth/jwt/extractor.go | 10 ++++++++-- shared/auth/jwt/extractor_test.go | 9 +++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/shared/auth/jwt/extractor.go b/shared/auth/jwt/extractor.go index 5806d1f4d..d113f53d5 100644 --- a/shared/auth/jwt/extractor.go +++ b/shared/auth/jwt/extractor.go @@ -146,7 +146,11 @@ func (c *ClaimsExtractor) ToGroups(token *jwt.Token, claimName string) []string userJWTGroups := make([]string, 0) if claim, ok := claims[claimName]; ok { - if claimGroups, ok := claim.([]interface{}); ok { + switch claimGroups := claim.(type) { + case string: + // Some IdPs emit a single group claim as a string instead of an array. + userJWTGroups = append(userJWTGroups, claimGroups) + case []any: for _, g := range claimGroups { if group, ok := g.(string); ok { userJWTGroups = append(userJWTGroups, group) @@ -154,9 +158,11 @@ func (c *ClaimsExtractor) ToGroups(token *jwt.Token, claimName string) []string log.Debugf("JWT claim %q contains a non-string group (type: %T): %v", claimName, g, g) } } + default: + log.Debugf("JWT claim %q is not a string or string array (type: %T): %v", claimName, claim, claim) } } else { - log.Debugf("JWT claim %q is not a string array", claimName) + log.Debugf("JWT claim %q is missing", claimName) } return userJWTGroups diff --git a/shared/auth/jwt/extractor_test.go b/shared/auth/jwt/extractor_test.go index 45529770d..4f8fe0007 100644 --- a/shared/auth/jwt/extractor_test.go +++ b/shared/auth/jwt/extractor_test.go @@ -249,6 +249,15 @@ func TestClaimsExtractor_ToGroups(t *testing.T) { groupClaimName: "groups", expectedGroups: []string{}, }, + { + name: "extracts single group string from claim", + claims: jwt.MapClaims{ + "sub": "user-123", + "groups": "admin", + }, + groupClaimName: "groups", + expectedGroups: []string{"admin"}, + }, { name: "handles custom claim name", claims: jwt.MapClaims{ From 9c50819f20b466aa0a56a56e4677ce24a7b5222a Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 28 Apr 2026 15:04:41 +0200 Subject: [PATCH 350/374] Don't mark management disconnected on transient job stream errors (#6005) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The JOB stream is a separate channel from the SYNC stream. Server-side EOF or other transient errors on the JOB stream do not indicate that the management connection is unhealthy — the SYNC stream remains the authoritative state signal. Previously, a JOB stream EOF would call notifyDisconnected and the client would emit OnConnecting to the UI. The backoff retry would reconnect the JOB stream, but handleJobStream never calls notifyConnected on success, so the UI was stuck on "Connecting" until the next SYNC event or health check. Keep notifyDisconnected for codes.PermissionDenied since IsLoginRequired relies on managementError to detect expired auth. --- shared/management/client/grpc.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index e9bea7ffb..2a51a777d 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -252,21 +252,19 @@ func (c *GrpcClient) handleJobStream( c.notifyDisconnected(err) return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer case codes.Canceled: - log.Debugf("management connection context has been canceled, this usually indicates shutdown") + log.Debugf("job stream context has been canceled, this usually indicates shutdown") return err case codes.Unimplemented: log.Warn("Job feature is not supported by the current management server version. " + "Please update the management service to use this feature.") return nil default: - c.notifyDisconnected(err) - log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + log.Warnf("job stream disconnected, will retry silently. Reason: %v", err) return err } } else { // non-gRPC error - c.notifyDisconnected(err) - log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + log.Warnf("job stream disconnected, will retry silently. Reason: %v", err) return err } } From 8fc4265995e81dd6ff12e438b8b4232c7bd377bc Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Tue, 28 Apr 2026 15:04:48 +0200 Subject: [PATCH 351/374] [relay] evict foreign client cache on disconnect (#6015) * [relay] evict foreign client cache on disconnect When a foreign relay's TCP connection drops, the manager's onServerDisconnected handler only triggered reconnect logic for the home server; the disconnected foreign entry stayed in the relayClients cache. Subsequent OpenConn calls reused the closed client until the 60-second cleanup tick evicted it, breaking peer connectivity through that relay for up to a minute. Evict the foreign entry from the cache on disconnect so the next OpenConn dials a fresh client. Also: - Make the reconnect backoff cap configurable via WithMaxBackoffInterval ManagerOption; the previous hard-coded 60s constant forced TestAutoReconnect to sleep ~61s. Test now polls Ready() and finishes in ~2s. - Add NB_HOME_RELAY_SERVERS env var that overrides the relay URL list received from management, so a peer can be pinned to a specific home relay (used by the netbird-conn-lab Edge 4 reproducer). * [client] treat empty NB_HOME_RELAY_SERVERS as unset Returning (urls=[], ok=true) when the env var contained only separators or whitespace caused callers to wipe the mgmt-provided relay list, leaving the peer with no relays. Treat a parsed-empty result the same as an unset env. --- client/internal/connect.go | 4 +++ client/internal/engine.go | 7 ++++- client/internal/peer/env.go | 28 +++++++++++++++++++- shared/relay/client/guard.go | 30 +++++++++++++--------- shared/relay/client/manager.go | 40 +++++++++++++++++++++++++---- shared/relay/client/manager_test.go | 23 +++++++++++++++-- 6 files changed, 111 insertions(+), 21 deletions(-) diff --git a/client/internal/connect.go b/client/internal/connect.go index ac498f719..72e096a80 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -333,6 +333,10 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan c.statusRecorder.MarkSignalConnected() relayURLs, token := parseRelayInfo(loginResp) + if override, ok := peer.OverrideRelayURLs(); ok { + log.Infof("overriding relay URLs from %s: %v", peer.EnvKeyNBHomeRelayServers, override) + relayURLs = override + } peerConfig := loginResp.GetPeerConfig() engineConfig, err := createEngineConfig(myPrivateKey, c.config, peerConfig, logPath) diff --git a/client/internal/engine.go b/client/internal/engine.go index 8d7e02bd5..351e4bfe9 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -944,7 +944,12 @@ func (e *Engine) handleRelayUpdate(update *mgmProto.RelayConfig) error { return fmt.Errorf("update relay token: %w", err) } - e.relayManager.UpdateServerURLs(update.Urls) + urls := update.Urls + if override, ok := peer.OverrideRelayURLs(); ok { + log.Infof("overriding relay URLs from %s: %v", peer.EnvKeyNBHomeRelayServers, override) + urls = override + } + e.relayManager.UpdateServerURLs(urls) // Just in case the agent started with an MGM server where the relay was disabled but was later enabled. // We can ignore all errors because the guard will manage the reconnection retries. diff --git a/client/internal/peer/env.go b/client/internal/peer/env.go index b4ba9ad7b..ed6a3af53 100644 --- a/client/internal/peer/env.go +++ b/client/internal/peer/env.go @@ -7,7 +7,8 @@ import ( ) const ( - EnvKeyNBForceRelay = "NB_FORCE_RELAY" + EnvKeyNBForceRelay = "NB_FORCE_RELAY" + EnvKeyNBHomeRelayServers = "NB_HOME_RELAY_SERVERS" ) func IsForceRelayed() bool { @@ -16,3 +17,28 @@ func IsForceRelayed() bool { } return strings.EqualFold(os.Getenv(EnvKeyNBForceRelay), "true") } + +// OverrideRelayURLs returns the relay server URL list set in +// NB_HOME_RELAY_SERVERS (comma-separated) and a boolean indicating whether +// the override is active. When the env var is unset, the boolean is false +// and the caller should keep the list received from the management server. +// Intended for lab/debug scenarios where a peer must pin to a specific home +// relay regardless of what management offers. +func OverrideRelayURLs() ([]string, bool) { + raw := os.Getenv(EnvKeyNBHomeRelayServers) + if raw == "" { + return nil, false + } + parts := strings.Split(raw, ",") + urls := make([]string, 0, len(parts)) + for _, p := range parts { + p = strings.TrimSpace(p) + if p != "" { + urls = append(urls, p) + } + } + if len(urls) == 0 { + return nil, false + } + return urls, true +} diff --git a/shared/relay/client/guard.go b/shared/relay/client/guard.go index f4d3a8cce..d7892d0ce 100644 --- a/shared/relay/client/guard.go +++ b/shared/relay/client/guard.go @@ -8,10 +8,7 @@ import ( log "github.com/sirupsen/logrus" ) -const ( - // TODO: make it configurable, the manager should validate all configurable parameters - reconnectingTimeout = 60 * time.Second -) +const defaultMaxBackoffInterval = 60 * time.Second // Guard manage the reconnection tries to the Relay server in case of disconnection event. type Guard struct { @@ -19,14 +16,23 @@ type Guard struct { OnNewRelayClient chan *Client OnReconnected chan struct{} serverPicker *ServerPicker + + // maxBackoffInterval caps the exponential backoff between reconnect + // attempts. + maxBackoffInterval time.Duration } -// NewGuard creates a new guard for the relay client. -func NewGuard(sp *ServerPicker) *Guard { +// NewGuard creates a new guard for the relay client. A non-positive +// maxBackoffInterval falls back to defaultMaxBackoffInterval. +func NewGuard(sp *ServerPicker, maxBackoffInterval time.Duration) *Guard { + if maxBackoffInterval <= 0 { + maxBackoffInterval = defaultMaxBackoffInterval + } g := &Guard{ - OnNewRelayClient: make(chan *Client, 1), - OnReconnected: make(chan struct{}, 1), - serverPicker: sp, + OnNewRelayClient: make(chan *Client, 1), + OnReconnected: make(chan struct{}, 1), + serverPicker: sp, + maxBackoffInterval: maxBackoffInterval, } return g } @@ -49,7 +55,7 @@ func (g *Guard) StartReconnectTrys(ctx context.Context, relayClient *Client) { } // start a ticker to pick a new server - ticker := exponentTicker(ctx) + ticker := g.exponentTicker(ctx) defer ticker.Stop() for { @@ -125,11 +131,11 @@ func (g *Guard) notifyReconnected() { } } -func exponentTicker(ctx context.Context) *backoff.Ticker { +func (g *Guard) exponentTicker(ctx context.Context) *backoff.Ticker { bo := backoff.WithContext(&backoff.ExponentialBackOff{ InitialInterval: 2 * time.Second, Multiplier: 2, - MaxInterval: reconnectingTimeout, + MaxInterval: g.maxBackoffInterval, Clock: backoff.SystemClock, }, ctx) diff --git a/shared/relay/client/manager.go b/shared/relay/client/manager.go index 6220e7f6b..37104bfe7 100644 --- a/shared/relay/client/manager.go +++ b/shared/relay/client/manager.go @@ -39,6 +39,15 @@ func NewRelayTrack() *RelayTrack { type OnServerCloseListener func() +// ManagerOption configures a Manager at construction time. +type ManagerOption func(*Manager) + +// WithMaxBackoffInterval caps the exponential backoff between reconnect +// attempts to the home relay. A non-positive value keeps the default. +func WithMaxBackoffInterval(d time.Duration) ManagerOption { + return func(m *Manager) { m.maxBackoffInterval = d } +} + // Manager is a manager for the relay client instances. It establishes one persistent connection to the given relay URL // and automatically reconnect to them in case disconnection. // The manager also manage temporary relay connection. If a client wants to communicate with a client on a @@ -64,12 +73,13 @@ type Manager struct { onReconnectedListenerFn func() listenerLock sync.Mutex - mtu uint16 + mtu uint16 + maxBackoffInterval time.Duration } // NewManager creates a new manager instance. // The serverURL address can be empty. In this case, the manager will not serve. -func NewManager(ctx context.Context, serverURLs []string, peerID string, mtu uint16) *Manager { +func NewManager(ctx context.Context, serverURLs []string, peerID string, mtu uint16, opts ...ManagerOption) *Manager { tokenStore := &relayAuth.TokenStore{} m := &Manager{ @@ -86,8 +96,11 @@ func NewManager(ctx context.Context, serverURLs []string, peerID string, mtu uin relayClients: make(map[string]*RelayTrack), onDisconnectedListeners: make(map[string]*list.List), } + for _, opt := range opts { + opt(m) + } m.serverPicker.ServerURLs.Store(serverURLs) - m.reconnectGuard = NewGuard(m.serverPicker) + m.reconnectGuard = NewGuard(m.serverPicker, m.maxBackoffInterval) return m } @@ -290,19 +303,36 @@ func (m *Manager) onServerConnected() { go m.onReconnectedListenerFn() } -// onServerDisconnected start to reconnection for home server only +// onServerDisconnected handles relay disconnect events. For the home server it +// starts the reconnect guard. For foreign servers it evicts the now-dead client +// from the cache so the next OpenConn builds a fresh one instead of reusing a +// closed client. func (m *Manager) onServerDisconnected(serverAddress string) { m.relayClientMu.Lock() - if serverAddress == m.relayClient.connectionURL { + isHome := m.relayClient != nil && serverAddress == m.relayClient.connectionURL + if isHome { go func(client *Client) { m.reconnectGuard.StartReconnectTrys(m.ctx, client) }(m.relayClient) } m.relayClientMu.Unlock() + if !isHome { + m.evictForeignRelay(serverAddress) + } + m.notifyOnDisconnectListeners(serverAddress) } +func (m *Manager) evictForeignRelay(serverAddress string) { + m.relayClientsMutex.Lock() + defer m.relayClientsMutex.Unlock() + if _, ok := m.relayClients[serverAddress]; ok { + delete(m.relayClients, serverAddress) + log.Debugf("evicted disconnected foreign relay client: %s", serverAddress) + } +} + func (m *Manager) listenGuardEvent(ctx context.Context) { for { select { diff --git a/shared/relay/client/manager_test.go b/shared/relay/client/manager_test.go index fb91f7682..5bbcad886 100644 --- a/shared/relay/client/manager_test.go +++ b/shared/relay/client/manager_test.go @@ -2,6 +2,7 @@ package client import ( "context" + "fmt" "testing" "time" @@ -360,7 +361,8 @@ func TestAutoReconnect(t *testing.T) { t.Fatalf("failed to serve manager: %s", err) } - clientAlice := NewManager(mCtx, toURL(srvCfg), "alice", iface.DefaultMTU) + clientAlice := NewManager(mCtx, toURL(srvCfg), "alice", iface.DefaultMTU, + WithMaxBackoffInterval(2*time.Second)) err = clientAlice.Serve() if err != nil { t.Fatalf("failed to serve manager: %s", err) @@ -384,7 +386,9 @@ func TestAutoReconnect(t *testing.T) { } log.Infof("waiting for reconnection") - time.Sleep(reconnectingTimeout + 1*time.Second) + if err := waitForReady(ctx, clientAlice, 15*time.Second); err != nil { + t.Fatalf("manager did not reconnect: %s", err) + } log.Infof("reopent the connection") _, err = clientAlice.OpenConn(ctx, ra, "bob") @@ -393,6 +397,21 @@ func TestAutoReconnect(t *testing.T) { } } +func waitForReady(ctx context.Context, m *Manager, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + if m.Ready() { + return nil + } + select { + case <-time.After(100 * time.Millisecond): + case <-ctx.Done(): + return ctx.Err() + } + } + return fmt.Errorf("manager not ready within %s", timeout) +} + func TestNotifierDoubleAdd(t *testing.T) { ctx := context.Background() From 9417ce3b3af8102381d3245419b968f389c8c98d Mon Sep 17 00:00:00 2001 From: EL OUAZIZI Walid <156519881+WalidDevIO@users.noreply.github.com> Date: Tue, 28 Apr 2026 17:22:51 +0200 Subject: [PATCH 352/374] fix(getting-started): Infinite healthcheck loop with existing traefik (#5871) --- infrastructure_files/getting-started.sh | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 2a3f840b4..9d1b57258 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -231,7 +231,20 @@ get_upstream_host() { wait_management_proxy() { local proxy_container="${1:-traefik}" + local use_docker_logs=false set +e + + if [[ "$proxy_container" == "detect-traefik" ]]; then + proxy_container=$(docker ps --format "{{.ID}}\t{{.Image}}\t{{.Ports}}" \ + | awk -F'\t' '$2 ~ /traefik/ && $3 ~ /:(80|443)->/ {print $1; exit}') + + if [[ -z "$proxy_container" ]]; then + echo "Warning: could not auto-detect Traefik container, log output will be skipped on timeout." > /dev/stderr + else + use_docker_logs=true + fi + fi + echo -n "Waiting for NetBird server to become ready" counter=1 while true; do @@ -242,7 +255,13 @@ wait_management_proxy() { if [[ $counter -eq 60 ]]; then echo "" echo "Taking too long. Checking logs..." - $DOCKER_COMPOSE_COMMAND logs --tail=20 "$proxy_container" + if [[ -n "$proxy_container" ]]; then + if [[ "$use_docker_logs" == "true" ]]; then + docker logs --tail=20 "$proxy_container" + else + $DOCKER_COMPOSE_COMMAND logs --tail=20 "$proxy_container" + fi + fi $DOCKER_COMPOSE_COMMAND logs --tail=20 netbird-server fi echo -n " ." @@ -518,7 +537,7 @@ start_services_and_show_instructions() { $DOCKER_COMPOSE_COMMAND up -d sleep 3 - wait_management_direct + wait_management_proxy detect-traefik echo -e "$MSG_DONE" print_post_setup_instructions From db44848e2d28038118ca09b6ae87332f247b43e1 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Tue, 28 Apr 2026 18:25:56 +0300 Subject: [PATCH 353/374] [management] Drop netmap calculation on peer read (#6006) --- management/server/peer.go | 81 +++------------------------------- management/server/peer_test.go | 22 ++------- 2 files changed, 9 insertions(+), 94 deletions(-) diff --git a/management/server/peer.go b/management/server/peer.go index a95ae17a3..07428539b 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -33,8 +33,8 @@ import ( const remoteJobsMinVer = "0.64.0" -// GetPeers returns a list of peers under the given account filtering out peers that do not belong to a user if -// the current user is not an admin. +// GetPeers returns peers visible to the user within an account. +// Users with "peers:read" see all peers. Otherwise, users see only their own peers, or none if restricted by account settings. func (am *DefaultAccountManager) GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) { user, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, userID) if err != nil { @@ -46,14 +46,8 @@ func (am *DefaultAccountManager) GetPeers(ctx context.Context, accountID, userID return nil, status.NewPermissionValidationError(err) } - accountPeers, err := am.Store.GetAccountPeers(ctx, store.LockingStrengthNone, accountID, nameFilter, ipFilter) - if err != nil { - return nil, err - } - - // @note if the user has permission to read peers it shows all account peers if allowed { - return accountPeers, nil + return am.Store.GetAccountPeers(ctx, store.LockingStrengthNone, accountID, nameFilter, ipFilter) } settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID) @@ -65,41 +59,7 @@ func (am *DefaultAccountManager) GetPeers(ctx context.Context, accountID, userID return []*nbpeer.Peer{}, nil } - // @note if it does not have permission read peers then only display it's own peers - peers := make([]*nbpeer.Peer, 0) - peersMap := make(map[string]*nbpeer.Peer) - - for _, peer := range accountPeers { - if user.Id != peer.UserID { - continue - } - peers = append(peers, peer) - peersMap[peer.ID] = peer - } - - return am.getUserAccessiblePeers(ctx, accountID, peersMap, peers) -} - -func (am *DefaultAccountManager) getUserAccessiblePeers(ctx context.Context, accountID string, peersMap map[string]*nbpeer.Peer, peers []*nbpeer.Peer) ([]*nbpeer.Peer, error) { - account, err := am.requestBuffer.GetAccountWithBackpressure(ctx, accountID) - if err != nil { - return nil, err - } - - approvedPeersMap, err := am.integratedPeerValidator.GetValidatedPeers(ctx, accountID, maps.Values(account.Groups), maps.Values(account.Peers), account.Settings.Extra) - if err != nil { - return nil, err - } - - // fetch all the peers that have access to the user's peers - for _, peer := range peers { - aclPeers, _, _, _ := account.GetPeerConnectionResources(ctx, peer, approvedPeersMap, account.GetActiveGroupUsers()) - for _, p := range aclPeers { - peersMap[p.ID] = p - } - } - - return maps.Values(peersMap), nil + return am.Store.GetUserPeers(ctx, store.LockingStrengthNone, accountID, userID) } // MarkPeerConnected marks peer as connected (true) or disconnected (false) @@ -1230,7 +1190,8 @@ func peerLoginExpired(ctx context.Context, peer *nbpeer.Peer, settings *types.Se return false } -// GetPeer for a given accountID, peerID and userID error if not found. +// GetPeer returns a peer visible to the user within an account. +// Users with "peers:read" permission can access any peer. Otherwise, users can access only their own peer. func (am *DefaultAccountManager) GetPeer(ctx context.Context, accountID, peerID, userID string) (*nbpeer.Peer, error) { peer, err := am.Store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) if err != nil { @@ -1255,36 +1216,6 @@ func (am *DefaultAccountManager) GetPeer(ctx context.Context, accountID, peerID, return peer, nil } - return am.checkIfUserOwnsPeer(ctx, accountID, userID, peer) -} - -func (am *DefaultAccountManager) checkIfUserOwnsPeer(ctx context.Context, accountID, userID string, peer *nbpeer.Peer) (*nbpeer.Peer, error) { - account, err := am.requestBuffer.GetAccountWithBackpressure(ctx, accountID) - if err != nil { - return nil, err - } - - approvedPeersMap, err := am.integratedPeerValidator.GetValidatedPeers(ctx, accountID, maps.Values(account.Groups), maps.Values(account.Peers), account.Settings.Extra) - if err != nil { - return nil, err - } - - // it is also possible that user doesn't own the peer but some of his peers have access to it, - // this is a valid case, show the peer as well. - userPeers, err := am.Store.GetUserPeers(ctx, store.LockingStrengthNone, accountID, userID) - if err != nil { - return nil, err - } - - for _, p := range userPeers { - aclPeers, _, _, _ := account.GetPeerConnectionResources(ctx, p, approvedPeersMap, account.GetActiveGroupUsers()) - for _, aclPeer := range aclPeers { - if aclPeer.ID == peer.ID { - return peer, nil - } - } - } - return nil, status.Errorf(status.Internal, "user %s has no access to peer %s under account %s", userID, peer.ID, accountID) } diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 17202597a..dae676e77 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -559,25 +559,9 @@ func TestDefaultAccountManager_GetPeer(t *testing.T) { } assert.NotNil(t, peer) - // the user can see peer2 because peer1 of the user has access to peer2 due to the All group and the default rule 0 all-to-all access - peer, err = manager.GetPeer(context.Background(), accountID, peer2.ID, someUser) - if err != nil { - t.Fatal(err) - return - } - assert.NotNil(t, peer) - - // delete the all-to-all policy so that user's peer1 has no access to peer2 - for _, policy := range account.Policies { - err = manager.DeletePolicy(context.Background(), accountID, policy.ID, adminUser) - if err != nil { - t.Fatal(err) - return - } - } - - // at this point the user can't see the details of peer2 - peer, err = manager.GetPeer(context.Background(), accountID, peer2.ID, someUser) //nolint + // the user can NOT see peer2 because it is not owned by them. + // Regular users only see peers they directly own. + _, err = manager.GetPeer(context.Background(), accountID, peer2.ID, someUser) assert.Error(t, err) // admin users can always access all the peers From e5474e199fefc5dcceaf1bde67470321254a67a3 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 29 Apr 2026 03:54:06 +0900 Subject: [PATCH 354/374] [client] Use WinRT COM for Windows toasts (#6013) * Use WinRT COM for Windows toasts instead of fyne's PowerShell path * Quote autostart path and split HKCU registry into per-user component --- client/installer.nsis | 13 +++- client/netbird.wxs | 22 ++++++- client/ui/client_ui.go | 9 ++- client/ui/event/event.go | 19 ++++-- client/ui/event_handler.go | 17 +++-- client/ui/notifier/notifier.go | 27 ++++++++ client/ui/notifier/notifier_other.go | 9 +++ client/ui/notifier/notifier_windows.go | 88 ++++++++++++++++++++++++++ client/ui/profile.go | 6 +- go.mod | 1 + go.sum | 2 + 11 files changed, 188 insertions(+), 25 deletions(-) create mode 100644 client/ui/notifier/notifier.go create mode 100644 client/ui/notifier/notifier_other.go create mode 100644 client/ui/notifier/notifier_windows.go diff --git a/client/installer.nsis b/client/installer.nsis index 96d60a785..8b2b8ea39 100644 --- a/client/installer.nsis +++ b/client/installer.nsis @@ -200,6 +200,7 @@ Pop $0 !macroend Function .onInit +SetRegView 64 StrCpy $INSTDIR "${INSTALL_DIR}" ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\$(^NAME)" "UninstallString" ${If} $R0 != "" @@ -214,6 +215,10 @@ ${If} $R0 != "" ${EndIf} FunctionEnd + +Function un.onInit +SetRegView 64 +FunctionEnd ###################################################################### Section -MainProgram ${INSTALL_TYPE} @@ -228,6 +233,7 @@ Section -MainProgram !else File /r "..\\dist\\netbird_windows_amd64\\" !endif + File "..\\client\\ui\\assets\\netbird.png" SectionEnd ###################################################################### @@ -247,9 +253,11 @@ WriteRegStr ${REG_ROOT} "${UI_REG_APP_PATH}" "" "$INSTDIR\${UI_APP_EXE}" ; Create autostart registry entry based on checkbox DetailPrint "Autostart enabled: $AutostartEnabled" ${If} $AutostartEnabled == "1" - WriteRegStr HKCU "${AUTOSTART_REG_KEY}" "${APP_NAME}" "$INSTDIR\${UI_APP_EXE}.exe" + WriteRegStr HKLM "${AUTOSTART_REG_KEY}" "${APP_NAME}" '"$INSTDIR\${UI_APP_EXE}.exe"' DetailPrint "Added autostart registry entry: $INSTDIR\${UI_APP_EXE}.exe" ${Else} + DeleteRegValue HKLM "${AUTOSTART_REG_KEY}" "${APP_NAME}" + ; Legacy: pre-HKLM installs wrote to HKCU; clean that up too. DeleteRegValue HKCU "${AUTOSTART_REG_KEY}" "${APP_NAME}" DetailPrint "Autostart not enabled by user" ${EndIf} @@ -283,6 +291,8 @@ ExecWait `taskkill /im ${UI_APP_EXE}.exe /f` ; Remove autostart registry entry DetailPrint "Removing autostart registry entry if exists..." +DeleteRegValue HKLM "${AUTOSTART_REG_KEY}" "${APP_NAME}" +; Legacy: pre-HKLM installs wrote to HKCU; clean that up too. DeleteRegValue HKCU "${AUTOSTART_REG_KEY}" "${APP_NAME}" ; Handle data deletion based on checkbox @@ -321,6 +331,7 @@ DetailPrint "Removing registry keys..." DeleteRegKey ${REG_ROOT} "${REG_APP_PATH}" DeleteRegKey ${REG_ROOT} "${UNINSTALL_PATH}" DeleteRegKey ${REG_ROOT} "${UI_REG_APP_PATH}" +DeleteRegKey HKCU "Software\Classes\AppUserModelId\${APP_NAME}" DetailPrint "Removing application directory from PATH..." EnVar::SetHKLM diff --git a/client/netbird.wxs b/client/netbird.wxs index 03221dd91..23aa250f4 100644 --- a/client/netbird.wxs +++ b/client/netbird.wxs @@ -18,10 +18,17 @@ - - + + + + + + + + + @@ -46,8 +53,19 @@ + + + + + + + + + + diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index c149b2152..0a4687eda 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -42,6 +42,7 @@ import ( "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/client/ui/desktop" "github.com/netbirdio/netbird/client/ui/event" + "github.com/netbirdio/netbird/client/ui/notifier" "github.com/netbirdio/netbird/client/ui/process" "github.com/netbirdio/netbird/util" @@ -260,6 +261,7 @@ type serviceClient struct { // application with main windows. app fyne.App + notifier notifier.Notifier wSettings fyne.Window showAdvancedSettings bool sendNotification bool @@ -364,6 +366,7 @@ func newServiceClient(args *newServiceClientArgs) *serviceClient { cancel: cancel, addr: args.addr, app: args.app, + notifier: notifier.New(args.app), logFile: args.logFile, sendNotification: false, @@ -892,7 +895,7 @@ func (s *serviceClient) updateStatus() error { if err != nil { log.Errorf("get service status: %v", err) if s.connected { - s.app.SendNotification(fyne.NewNotification("Error", "Connection to service lost")) + s.notifier.Send("Error", "Connection to service lost") } s.setDisconnectedStatus() return err @@ -1109,7 +1112,7 @@ func (s *serviceClient) onTrayReady() { } }() - s.eventManager = event.NewManager(s.app, s.addr) + s.eventManager = event.NewManager(s.notifier, s.addr) s.eventManager.SetNotificationsEnabled(s.mNotifications.Checked()) s.eventManager.AddHandler(func(event *proto.SystemEvent) { if event.Category == proto.SystemEvent_SYSTEM { @@ -1548,7 +1551,7 @@ func (s *serviceClient) onUpdateAvailable(newVersion string, enforced bool) { if enforced && s.lastNotifiedVersion != newVersion { s.lastNotifiedVersion = newVersion - s.app.SendNotification(fyne.NewNotification("Update available", "A new version "+newVersion+" is ready to install")) + s.notifier.Send("Update available", "A new version "+newVersion+" is ready to install") } } diff --git a/client/ui/event/event.go b/client/ui/event/event.go index b8ed09a5c..ea968f60a 100644 --- a/client/ui/event/event.go +++ b/client/ui/event/event.go @@ -8,7 +8,6 @@ import ( "sync" "time" - "fyne.io/fyne/v2" "github.com/cenkalti/backoff/v4" log "github.com/sirupsen/logrus" "google.golang.org/grpc" @@ -18,11 +17,17 @@ import ( "github.com/netbirdio/netbird/client/ui/desktop" ) +// Notifier sends desktop notifications. Defined here so the event package +// does not depend on fyne or the platform-specific notifier implementation. +type Notifier interface { + Send(title, body string) +} + type Handler func(*proto.SystemEvent) type Manager struct { - app fyne.App - addr string + notifier Notifier + addr string mu sync.Mutex ctx context.Context @@ -31,10 +36,10 @@ type Manager struct { handlers []Handler } -func NewManager(app fyne.App, addr string) *Manager { +func NewManager(notifier Notifier, addr string) *Manager { return &Manager{ - app: app, - addr: addr, + notifier: notifier, + addr: addr, } } @@ -114,7 +119,7 @@ func (e *Manager) handleEvent(event *proto.SystemEvent) { if id != "" { body += fmt.Sprintf(" ID: %s", id) } - e.app.SendNotification(fyne.NewNotification(title, body)) + e.notifier.Send(title, body) } for _, handler := range handlers { diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 60a580dae..876fcef5f 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -9,7 +9,6 @@ import ( "os" "os/exec" - "fyne.io/fyne/v2" "fyne.io/systray" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" @@ -87,7 +86,7 @@ func (h *eventHandler) handleConnectClick() { if errors.Is(err, context.Canceled) || (ok && st.Code() == codes.Canceled) { log.Debugf("connect operation cancelled by user") } else { - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to connect")) + h.client.notifier.Send("Error", "Failed to connect") log.Errorf("connect failed: %v", err) } } @@ -112,7 +111,7 @@ func (h *eventHandler) handleDisconnectClick() { if err := h.client.menuDownClick(); err != nil { st, ok := status.FromError(err) if !errors.Is(err, context.Canceled) && !(ok && st.Code() == codes.Canceled) { - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to disconnect")) + h.client.notifier.Send("Error", "Failed to disconnect") log.Errorf("disconnect failed: %v", err) } else { log.Debugf("disconnect cancelled or already disconnecting") @@ -130,7 +129,7 @@ func (h *eventHandler) handleAllowSSHClick() { if err := h.updateConfigWithErr(); err != nil { h.toggleCheckbox(h.client.mAllowSSH) // revert checkbox state on error log.Errorf("failed to update config: %v", err) - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to update SSH settings")) + h.client.notifier.Send("Error", "Failed to update SSH settings") } } @@ -140,7 +139,7 @@ func (h *eventHandler) handleAutoConnectClick() { if err := h.updateConfigWithErr(); err != nil { h.toggleCheckbox(h.client.mAutoConnect) // revert checkbox state on error log.Errorf("failed to update config: %v", err) - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to update auto-connect settings")) + h.client.notifier.Send("Error", "Failed to update auto-connect settings") } } @@ -149,7 +148,7 @@ func (h *eventHandler) handleRosenpassClick() { if err := h.updateConfigWithErr(); err != nil { h.toggleCheckbox(h.client.mEnableRosenpass) // revert checkbox state on error log.Errorf("failed to update config: %v", err) - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to update Rosenpass settings")) + h.client.notifier.Send("Error", "Failed to update Rosenpass settings") } } @@ -158,7 +157,7 @@ func (h *eventHandler) handleLazyConnectionClick() { if err := h.updateConfigWithErr(); err != nil { h.toggleCheckbox(h.client.mLazyConnEnabled) // revert checkbox state on error log.Errorf("failed to update config: %v", err) - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to update lazy connection settings")) + h.client.notifier.Send("Error", "Failed to update lazy connection settings") } } @@ -167,7 +166,7 @@ func (h *eventHandler) handleBlockInboundClick() { if err := h.updateConfigWithErr(); err != nil { h.toggleCheckbox(h.client.mBlockInbound) // revert checkbox state on error log.Errorf("failed to update config: %v", err) - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to update block inbound settings")) + h.client.notifier.Send("Error", "Failed to update block inbound settings") } } @@ -176,7 +175,7 @@ func (h *eventHandler) handleNotificationsClick() { if err := h.updateConfigWithErr(); err != nil { h.toggleCheckbox(h.client.mNotifications) // revert checkbox state on error log.Errorf("failed to update config: %v", err) - h.client.app.SendNotification(fyne.NewNotification("Error", "Failed to update notifications settings")) + h.client.notifier.Send("Error", "Failed to update notifications settings") } else if h.client.eventManager != nil { h.client.eventManager.SetNotificationsEnabled(h.client.mNotifications.Checked()) } diff --git a/client/ui/notifier/notifier.go b/client/ui/notifier/notifier.go new file mode 100644 index 000000000..8d1cbe4c4 --- /dev/null +++ b/client/ui/notifier/notifier.go @@ -0,0 +1,27 @@ +// Package notifier sends desktop notifications. On Windows it uses the WinRT +// COM API directly via go-toast/v2 to avoid the PowerShell window flash that +// fyne's default implementation produces. On other platforms it delegates to +// fyne. +package notifier + +import "fyne.io/fyne/v2" + +// Notifier sends desktop notifications. +type Notifier interface { + Send(title, body string) +} + +// New returns a platform-specific Notifier. The fyne app is used as the +// fallback notifier on platforms where no native implementation is wired up, +// and on Windows when the COM path fails to initialize. +func New(app fyne.App) Notifier { + return newNotifier(app) +} + +type fyneNotifier struct { + app fyne.App +} + +func (f *fyneNotifier) Send(title, body string) { + f.app.SendNotification(fyne.NewNotification(title, body)) +} diff --git a/client/ui/notifier/notifier_other.go b/client/ui/notifier/notifier_other.go new file mode 100644 index 000000000..686d2885f --- /dev/null +++ b/client/ui/notifier/notifier_other.go @@ -0,0 +1,9 @@ +//go:build !windows + +package notifier + +import "fyne.io/fyne/v2" + +func newNotifier(app fyne.App) Notifier { + return &fyneNotifier{app: app} +} diff --git a/client/ui/notifier/notifier_windows.go b/client/ui/notifier/notifier_windows.go new file mode 100644 index 000000000..c7afb43ae --- /dev/null +++ b/client/ui/notifier/notifier_windows.go @@ -0,0 +1,88 @@ +package notifier + +import ( + "os" + "path/filepath" + "sync" + + "fyne.io/fyne/v2" + toast "git.sr.ht/~jackmordaunt/go-toast/v2" + "git.sr.ht/~jackmordaunt/go-toast/v2/wintoast" + log "github.com/sirupsen/logrus" +) + +const ( + // appID is the AppUserModelID shown in the Windows Action Center. It + // must match the System.AppUserModel.ID property set on the Start Menu + // shortcut by the MSI (see client/netbird.wxs); otherwise Windows + // groups toasts under a separate, unbranded entry. + appID = "NetBird" + + // appGUID identifies the COM activation callback class. Generated once + // for NetBird; do not change without coordinating an installer bump, + // since old registry entries pointing at the previous GUID would orphan. + appGUID = "{0E1B4DE7-E148-432B-9814-544F941826EC}" +) + +type comNotifier struct { + fallback *fyneNotifier + ready bool + iconPath string +} + +var ( + initOnce sync.Once + initErr error +) + +func newNotifier(app fyne.App) Notifier { + n := &comNotifier{ + fallback: &fyneNotifier{app: app}, + iconPath: resolveIcon(), + } + initOnce.Do(func() { + initErr = wintoast.SetAppData(wintoast.AppData{ + AppID: appID, + GUID: appGUID, + IconPath: n.iconPath, + }) + }) + if initErr != nil { + log.Warnf("toast: register app data failed, falling back to fyne notifications: %v", initErr) + return n.fallback + } + n.ready = true + return n +} + +func (n *comNotifier) Send(title, body string) { + if !n.ready { + n.fallback.Send(title, body) + return + } + notification := toast.Notification{ + AppID: appID, + Title: title, + Body: body, + Icon: n.iconPath, + } + if err := notification.Push(); err != nil { + log.Warnf("toast: push failed, using fyne fallback: %v", err) + n.fallback.Send(title, body) + } +} + +// resolveIcon returns an absolute path to the toast icon, or an empty string +// when no icon can be located. Windows requires a PNG/JPG for the +// AppUserModelId IconUri registry value; .ico is silently ignored. +func resolveIcon() string { + exe, err := os.Executable() + if err != nil { + return "" + } + candidate := filepath.Join(filepath.Dir(exe), "netbird.png") + if _, err := os.Stat(candidate); err == nil { + return candidate + } + return "" +} diff --git a/client/ui/profile.go b/client/ui/profile.go index 74189c9a0..7ee89e631 100644 --- a/client/ui/profile.go +++ b/client/ui/profile.go @@ -548,7 +548,7 @@ func (p *profileMenu) refresh() { if err != nil { log.Errorf("failed to switch profile: %v", err) // show notification dialog - p.app.SendNotification(fyne.NewNotification("Error", "Failed to switch profile")) + p.serviceClient.notifier.Send("Error", "Failed to switch profile") return } @@ -628,9 +628,9 @@ func (p *profileMenu) refresh() { } if err := p.eventHandler.logout(p.ctx); err != nil { log.Errorf("logout failed: %v", err) - p.app.SendNotification(fyne.NewNotification("Error", "Failed to deregister")) + p.serviceClient.notifier.Send("Error", "Failed to deregister") } else { - p.app.SendNotification(fyne.NewNotification("Success", "Deregistered successfully")) + p.serviceClient.notifier.Send("Success", "Deregistered successfully") } } } diff --git a/go.mod b/go.mod index 1b5861a37..1958a3278 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( require ( fyne.io/fyne/v2 v2.7.0 fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 + git.sr.ht/~jackmordaunt/go-toast/v2 v2.0.3 github.com/awnumar/memguard v0.23.0 github.com/aws/aws-sdk-go-v2 v1.38.3 github.com/aws/aws-sdk-go-v2/config v1.31.6 diff --git a/go.sum b/go.sum index 3772946e1..2abf55142 100644 --- a/go.sum +++ b/go.sum @@ -15,6 +15,8 @@ fyne.io/fyne/v2 v2.7.0 h1:GvZSpE3X0liU/fqstInVvRsaboIVpIWQ4/sfjDGIGGQ= fyne.io/fyne/v2 v2.7.0/go.mod h1:xClVlrhxl7D+LT+BWYmcrW4Nf+dJTvkhnPgji7spAwE= fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 h1:829+77I4TaMrcg9B3wf+gHhdSgoCVEgH2czlPXPbfj4= fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= +git.sr.ht/~jackmordaunt/go-toast/v2 v2.0.3 h1:N3IGoHHp9pb6mj1cbXbuaSXV/UMKwmbKLf53nQmtqMA= +git.sr.ht/~jackmordaunt/go-toast/v2 v2.0.3/go.mod h1:QtOLZGz8olr4qH2vWK0QH0w0O4T9fEIjMuWpKUsH7nc= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AppsFlyer/go-sundheit v0.6.0 h1:d2hBvCjBSb2lUsEWGfPigr4MCOt04sxB+Rppl0yUMSk= From 407e9d304b839c934fa7b30f42d01d7777651f9e Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 29 Apr 2026 15:09:55 +0900 Subject: [PATCH 355/374] [client] Move macOS sleep detection into the daemon (purego) (#5926) --- client/internal/sleep/detector_darwin.go | 435 ++++++--- client/proto/daemon.pb.go | 1131 ++++++++++------------ client/proto/daemon.proto | 16 - client/proto/daemon_grpc.pb.go | 438 ++++----- client/server/server.go | 1 + client/server/sleep.go | 75 +- client/ui/client_ui.go | 60 -- go.mod | 2 +- 8 files changed, 1066 insertions(+), 1092 deletions(-) diff --git a/client/internal/sleep/detector_darwin.go b/client/internal/sleep/detector_darwin.go index 3d6747ed1..ef495bded 100644 --- a/client/internal/sleep/detector_darwin.go +++ b/client/internal/sleep/detector_darwin.go @@ -2,217 +2,358 @@ package sleep -/* -#cgo LDFLAGS: -framework IOKit -framework CoreFoundation -#include -#include -#include - -extern void sleepCallbackBridge(); -extern void poweredOnCallbackBridge(); -extern void suspendedCallbackBridge(); -extern void resumedCallbackBridge(); - - -// C global variables for IOKit state -static IONotificationPortRef g_notifyPortRef = NULL; -static io_object_t g_notifierObject = 0; -static io_object_t g_generalInterestNotifier = 0; -static io_connect_t g_rootPort = 0; -static CFRunLoopRef g_runLoop = NULL; - -static void sleepCallback(void* refCon, io_service_t service, natural_t messageType, void* messageArgument) { - switch (messageType) { - case kIOMessageSystemWillSleep: - sleepCallbackBridge(); - IOAllowPowerChange(g_rootPort, (long)messageArgument); - break; - case kIOMessageSystemHasPoweredOn: - poweredOnCallbackBridge(); - break; - case kIOMessageServiceIsSuspended: - suspendedCallbackBridge(); - break; - case kIOMessageServiceIsResumed: - resumedCallbackBridge(); - break; - default: - break; - } -} - -static void registerNotifications() { - g_rootPort = IORegisterForSystemPower( - NULL, - &g_notifyPortRef, - (IOServiceInterestCallback)sleepCallback, - &g_notifierObject - ); - - if (g_rootPort == 0) { - return; - } - - CFRunLoopAddSource(CFRunLoopGetCurrent(), - IONotificationPortGetRunLoopSource(g_notifyPortRef), - kCFRunLoopCommonModes); - - g_runLoop = CFRunLoopGetCurrent(); - CFRunLoopRun(); -} - -static void unregisterNotifications() { - CFRunLoopRemoveSource(g_runLoop, - IONotificationPortGetRunLoopSource(g_notifyPortRef), - kCFRunLoopCommonModes); - - IODeregisterForSystemPower(&g_notifierObject); - IOServiceClose(g_rootPort); - IONotificationPortDestroy(g_notifyPortRef); - CFRunLoopStop(g_runLoop); - - g_notifyPortRef = NULL; - g_notifierObject = 0; - g_rootPort = 0; - g_runLoop = NULL; -} - -*/ -import "C" - import ( - "context" "fmt" "runtime" "sync" "time" + "unsafe" + "github.com/ebitengine/purego" log "github.com/sirupsen/logrus" ) -var ( - serviceRegistry = make(map[*Detector]struct{}) - serviceRegistryMu sync.Mutex +// IOKit message types from IOKit/IOMessage.h. +const ( + kIOMessageCanSystemSleep uintptr = 0xe0000270 + kIOMessageSystemWillSleep uintptr = 0xe0000280 + kIOMessageSystemHasPoweredOn uintptr = 0xe0000300 ) -//export sleepCallbackBridge -func sleepCallbackBridge() { - log.Info("sleepCallbackBridge event triggered") +var ( + ioKit iokitFuncs + cf cfFuncs + cfCommonModes uintptr - serviceRegistryMu.Lock() - defer serviceRegistryMu.Unlock() + libInitOnce sync.Once + libInitErr error - for svc := range serviceRegistry { - svc.triggerCallback(EventTypeSleep) - } + // callbackThunk is the single C-callable trampoline registered with IOKit. + callbackThunk uintptr + + serviceRegistry = make(map[*Detector]struct{}) + serviceRegistryMu sync.Mutex + session *runLoopSession + + // lifecycleMu serializes Register/Deregister so a new registration can't + // start a second runloop while a previous teardown is still pending. + lifecycleMu sync.Mutex +) + +// iokitFuncs holds IOKit symbols resolved once at init. +type iokitFuncs struct { + IORegisterForSystemPower func(refcon uintptr, portRef *uintptr, callback uintptr, notifier *uintptr) uintptr + IODeregisterForSystemPower func(notifier *uintptr) int32 + IOAllowPowerChange func(kernelPort uintptr, notificationID uintptr) int32 + IOServiceClose func(connect uintptr) int32 + IONotificationPortGetRunLoopSource func(port uintptr) uintptr + IONotificationPortDestroy func(port uintptr) } -//export resumedCallbackBridge -func resumedCallbackBridge() { - log.Info("resumedCallbackBridge event triggered") +// cfFuncs holds CoreFoundation symbols resolved once at init. +type cfFuncs struct { + CFRunLoopGetCurrent func() uintptr + CFRunLoopRun func() + CFRunLoopStop func(rl uintptr) + CFRunLoopAddSource func(rl, source, mode uintptr) + CFRunLoopRemoveSource func(rl, source, mode uintptr) } -//export suspendedCallbackBridge -func suspendedCallbackBridge() { - log.Info("suspendedCallbackBridge event triggered") +// runLoopSession bundles the handles owned by one CFRunLoop lifetime. A nil +// session means no runloop is active and the next Register must start one. +type runLoopSession struct { + rl uintptr + port uintptr + notifier uintptr + rp uintptr } -//export poweredOnCallbackBridge -func poweredOnCallbackBridge() { - log.Info("poweredOnCallbackBridge event triggered") - serviceRegistryMu.Lock() - defer serviceRegistryMu.Unlock() - - for svc := range serviceRegistry { - svc.triggerCallback(EventTypeWakeUp) - } +// detectorSnapshot pins a detector's callback and done channel so dispatch +// runs with values valid at snapshot time, even if a concurrent +// Deregister/Register rewrites the detector's fields. +type detectorSnapshot struct { + detector *Detector + callback func(event EventType) + done <-chan struct{} } +// Detector delivers sleep and wake events to a registered callback. type Detector struct { callback func(event EventType) - ctx context.Context - cancel context.CancelFunc -} - -func NewDetector() (*Detector, error) { - return &Detector{}, nil + done chan struct{} } +// Register installs callback for power events. The first registration starts +// the CFRunLoop on a dedicated OS-locked thread and blocks until IOKit +// registration succeeds or fails; subsequent registrations just add to the +// dispatch set. func (d *Detector) Register(callback func(event EventType)) error { - serviceRegistryMu.Lock() - defer serviceRegistryMu.Unlock() + lifecycleMu.Lock() + defer lifecycleMu.Unlock() + serviceRegistryMu.Lock() if _, exists := serviceRegistry[d]; exists { + serviceRegistryMu.Unlock() return fmt.Errorf("detector service already registered") } - d.callback = callback + d.done = make(chan struct{}) + serviceRegistry[d] = struct{}{} + needSetup := session == nil + serviceRegistryMu.Unlock() - d.ctx, d.cancel = context.WithCancel(context.Background()) - - if len(serviceRegistry) > 0 { - serviceRegistry[d] = struct{}{} + if !needSetup { return nil } - serviceRegistry[d] = struct{}{} - - // CFRunLoop must run on a single fixed OS thread - go func() { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - C.registerNotifications() - }() + errCh := make(chan error, 1) + go runRunLoop(errCh) + if err := <-errCh; err != nil { + serviceRegistryMu.Lock() + delete(serviceRegistry, d) + close(d.done) + d.done = nil + serviceRegistryMu.Unlock() + return err + } log.Info("sleep detection service started on macOS") return nil } -// Deregister removes the detector. When the last detector is removed, IOKit registration is torn down -// and the runloop is stopped and cleaned up. +// Deregister removes the detector. When the last detector leaves, IOKit +// notifications are torn down and the runloop is stopped. func (d *Detector) Deregister() error { + lifecycleMu.Lock() + defer lifecycleMu.Unlock() + serviceRegistryMu.Lock() - defer serviceRegistryMu.Unlock() - _, exists := serviceRegistry[d] - if !exists { + if _, exists := serviceRegistry[d]; !exists { + serviceRegistryMu.Unlock() return nil } - - // cancel and remove this detector - d.cancel() + close(d.done) delete(serviceRegistry, d) - // If other Detectors still exist, leave IOKit running if len(serviceRegistry) > 0 { + serviceRegistryMu.Unlock() return nil } + sess := session + serviceRegistryMu.Unlock() log.Info("sleep detection service stopping (deregister)") - // Deregister IOKit notifications, stop runloop, and free resources - C.unregisterNotifications() + if sess == nil { + return nil + } + + if sess.rl != 0 && sess.port != 0 { + source := ioKit.IONotificationPortGetRunLoopSource(sess.port) + cf.CFRunLoopRemoveSource(sess.rl, source, cfCommonModes) + } + if sess.notifier != 0 { + n := sess.notifier + ioKit.IODeregisterForSystemPower(&n) + } + + // Clear session only after IODeregisterForSystemPower returns so any + // in-flight powerCallback can still look up session.rp to ack sleep. + serviceRegistryMu.Lock() + session = nil + serviceRegistryMu.Unlock() + + if sess.rp != 0 { + ioKit.IOServiceClose(sess.rp) + } + if sess.port != 0 { + ioKit.IONotificationPortDestroy(sess.port) + } + if sess.rl != 0 { + cf.CFRunLoopStop(sess.rl) + } return nil } -func (d *Detector) triggerCallback(event EventType) { - doneChan := make(chan struct{}) +func (d *Detector) triggerCallback(event EventType, cb func(event EventType), done <-chan struct{}) { + if cb == nil || done == nil { + return + } + select { + case <-done: + return + default: + } + + doneChan := make(chan struct{}) timeout := time.NewTimer(500 * time.Millisecond) defer timeout.Stop() - cb := d.callback - go func(callback func(event EventType)) { + go func() { + defer close(doneChan) + defer func() { + if r := recover(); r != nil { + log.Errorf("panic in sleep callback: %v", r) + } + }() log.Info("sleep detection event fired") - callback(event) - close(doneChan) - }(cb) + cb(event) + }() select { case <-doneChan: - case <-d.ctx.Done(): + case <-done: case <-timeout.C: - log.Warnf("sleep callback timed out") + log.Warn("sleep callback timed out") } } + +// NewDetector initializes IOKit/CoreFoundation bindings and returns a Detector. +func NewDetector() (*Detector, error) { + if err := initLibs(); err != nil { + return nil, err + } + return &Detector{}, nil +} + +func initLibs() error { + libInitOnce.Do(func() { + iokit, err := purego.Dlopen("/System/Library/Frameworks/IOKit.framework/IOKit", purego.RTLD_NOW|purego.RTLD_GLOBAL) + if err != nil { + libInitErr = fmt.Errorf("dlopen IOKit: %w", err) + return + } + cfLib, err := purego.Dlopen("/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", purego.RTLD_NOW|purego.RTLD_GLOBAL) + if err != nil { + libInitErr = fmt.Errorf("dlopen CoreFoundation: %w", err) + return + } + + purego.RegisterLibFunc(&ioKit.IORegisterForSystemPower, iokit, "IORegisterForSystemPower") + purego.RegisterLibFunc(&ioKit.IODeregisterForSystemPower, iokit, "IODeregisterForSystemPower") + purego.RegisterLibFunc(&ioKit.IOAllowPowerChange, iokit, "IOAllowPowerChange") + purego.RegisterLibFunc(&ioKit.IOServiceClose, iokit, "IOServiceClose") + purego.RegisterLibFunc(&ioKit.IONotificationPortGetRunLoopSource, iokit, "IONotificationPortGetRunLoopSource") + purego.RegisterLibFunc(&ioKit.IONotificationPortDestroy, iokit, "IONotificationPortDestroy") + + purego.RegisterLibFunc(&cf.CFRunLoopGetCurrent, cfLib, "CFRunLoopGetCurrent") + purego.RegisterLibFunc(&cf.CFRunLoopRun, cfLib, "CFRunLoopRun") + purego.RegisterLibFunc(&cf.CFRunLoopStop, cfLib, "CFRunLoopStop") + purego.RegisterLibFunc(&cf.CFRunLoopAddSource, cfLib, "CFRunLoopAddSource") + purego.RegisterLibFunc(&cf.CFRunLoopRemoveSource, cfLib, "CFRunLoopRemoveSource") + + modeAddr, err := purego.Dlsym(cfLib, "kCFRunLoopCommonModes") + if err != nil { + libInitErr = fmt.Errorf("dlsym kCFRunLoopCommonModes: %w", err) + return + } + // Launder the uintptr-to-pointer conversion through a Go variable so + // go vet's unsafeptr analyzer doesn't flag a system-library global. + cfCommonModes = **(**uintptr)(unsafe.Pointer(&modeAddr)) + + // NewCallback slots are a finite, non-reclaimable resource, so register + // a single thunk that dispatches to the current Detector set. + callbackThunk = purego.NewCallback(powerCallback) + }) + return libInitErr +} + +// powerCallback is the IOServiceInterestCallback trampoline, invoked on the +// runloop thread. A Go panic crossing the purego boundary has undefined +// behavior, so contain it here. +func powerCallback(refcon, service, messageType, messageArgument uintptr) uintptr { + defer func() { + if r := recover(); r != nil { + log.Errorf("panic in sleep powerCallback: %v", r) + } + }() + switch messageType { + case kIOMessageCanSystemSleep: + // Not acknowledging forces a 30s IOKit timeout before idle sleep. + allowPowerChange(messageArgument) + case kIOMessageSystemWillSleep: + dispatchEvent(EventTypeSleep) + allowPowerChange(messageArgument) + case kIOMessageSystemHasPoweredOn: + dispatchEvent(EventTypeWakeUp) + } + return 0 +} + +func allowPowerChange(messageArgument uintptr) { + serviceRegistryMu.Lock() + var port uintptr + if session != nil { + port = session.rp + } + serviceRegistryMu.Unlock() + if port != 0 { + ioKit.IOAllowPowerChange(port, messageArgument) + } +} + +func dispatchEvent(event EventType) { + serviceRegistryMu.Lock() + snaps := make([]detectorSnapshot, 0, len(serviceRegistry)) + for d := range serviceRegistry { + snaps = append(snaps, detectorSnapshot{ + detector: d, + callback: d.callback, + done: d.done, + }) + } + serviceRegistryMu.Unlock() + + for _, s := range snaps { + s.detector.triggerCallback(event, s.callback, s.done) + } +} + +// runRunLoop owns the OS-locked thread that CFRunLoop is pinned to. Setup +// result is reported on errCh so Register can surface failures synchronously. +func runRunLoop(errCh chan<- error) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + sess, err := setupSession() + if err == nil { + serviceRegistryMu.Lock() + session = sess + serviceRegistryMu.Unlock() + } + errCh <- err + if err != nil { + return + } + + defer func() { + if r := recover(); r != nil { + log.Errorf("panic in sleep runloop: %v", r) + } + }() + cf.CFRunLoopRun() +} + +// setupSession performs the IOKit registration on the current thread. Panics +// are converted to errors so runRunLoop never leaves errCh unsent. +func setupSession() (s *runLoopSession, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic during runloop setup: %v", r) + } + }() + + var portRef, notifier uintptr + rp := ioKit.IORegisterForSystemPower(0, &portRef, callbackThunk, ¬ifier) + if rp == 0 { + return nil, fmt.Errorf("IORegisterForSystemPower returned zero") + } + + rl := cf.CFRunLoopGetCurrent() + source := ioKit.IONotificationPortGetRunLoopSource(portRef) + cf.CFRunLoopAddSource(rl, source, cfCommonModes) + + return &runLoopSession{rl: rl, port: portRef, notifier: notifier, rp: rp}, nil +} diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 6506307d3..31658d5a1 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -143,56 +143,6 @@ func (ExposeProtocol) EnumDescriptor() ([]byte, []int) { return file_daemon_proto_rawDescGZIP(), []int{1} } -// avoid collision with loglevel enum -type OSLifecycleRequest_CycleType int32 - -const ( - OSLifecycleRequest_UNKNOWN OSLifecycleRequest_CycleType = 0 - OSLifecycleRequest_SLEEP OSLifecycleRequest_CycleType = 1 - OSLifecycleRequest_WAKEUP OSLifecycleRequest_CycleType = 2 -) - -// Enum value maps for OSLifecycleRequest_CycleType. -var ( - OSLifecycleRequest_CycleType_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SLEEP", - 2: "WAKEUP", - } - OSLifecycleRequest_CycleType_value = map[string]int32{ - "UNKNOWN": 0, - "SLEEP": 1, - "WAKEUP": 2, - } -) - -func (x OSLifecycleRequest_CycleType) Enum() *OSLifecycleRequest_CycleType { - p := new(OSLifecycleRequest_CycleType) - *p = x - return p -} - -func (x OSLifecycleRequest_CycleType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (OSLifecycleRequest_CycleType) Descriptor() protoreflect.EnumDescriptor { - return file_daemon_proto_enumTypes[2].Descriptor() -} - -func (OSLifecycleRequest_CycleType) Type() protoreflect.EnumType { - return &file_daemon_proto_enumTypes[2] -} - -func (x OSLifecycleRequest_CycleType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use OSLifecycleRequest_CycleType.Descriptor instead. -func (OSLifecycleRequest_CycleType) EnumDescriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{1, 0} -} - type SystemEvent_Severity int32 const ( @@ -229,11 +179,11 @@ func (x SystemEvent_Severity) String() string { } func (SystemEvent_Severity) Descriptor() protoreflect.EnumDescriptor { - return file_daemon_proto_enumTypes[3].Descriptor() + return file_daemon_proto_enumTypes[2].Descriptor() } func (SystemEvent_Severity) Type() protoreflect.EnumType { - return &file_daemon_proto_enumTypes[3] + return &file_daemon_proto_enumTypes[2] } func (x SystemEvent_Severity) Number() protoreflect.EnumNumber { @@ -242,7 +192,7 @@ func (x SystemEvent_Severity) Number() protoreflect.EnumNumber { // Deprecated: Use SystemEvent_Severity.Descriptor instead. func (SystemEvent_Severity) EnumDescriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{53, 0} + return file_daemon_proto_rawDescGZIP(), []int{51, 0} } type SystemEvent_Category int32 @@ -284,11 +234,11 @@ func (x SystemEvent_Category) String() string { } func (SystemEvent_Category) Descriptor() protoreflect.EnumDescriptor { - return file_daemon_proto_enumTypes[4].Descriptor() + return file_daemon_proto_enumTypes[3].Descriptor() } func (SystemEvent_Category) Type() protoreflect.EnumType { - return &file_daemon_proto_enumTypes[4] + return &file_daemon_proto_enumTypes[3] } func (x SystemEvent_Category) Number() protoreflect.EnumNumber { @@ -297,7 +247,7 @@ func (x SystemEvent_Category) Number() protoreflect.EnumNumber { // Deprecated: Use SystemEvent_Category.Descriptor instead. func (SystemEvent_Category) EnumDescriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{53, 1} + return file_daemon_proto_rawDescGZIP(), []int{51, 1} } type EmptyRequest struct { @@ -336,86 +286,6 @@ func (*EmptyRequest) Descriptor() ([]byte, []int) { return file_daemon_proto_rawDescGZIP(), []int{0} } -type OSLifecycleRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Type OSLifecycleRequest_CycleType `protobuf:"varint,1,opt,name=type,proto3,enum=daemon.OSLifecycleRequest_CycleType" json:"type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *OSLifecycleRequest) Reset() { - *x = OSLifecycleRequest{} - mi := &file_daemon_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *OSLifecycleRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OSLifecycleRequest) ProtoMessage() {} - -func (x *OSLifecycleRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OSLifecycleRequest.ProtoReflect.Descriptor instead. -func (*OSLifecycleRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{1} -} - -func (x *OSLifecycleRequest) GetType() OSLifecycleRequest_CycleType { - if x != nil { - return x.Type - } - return OSLifecycleRequest_UNKNOWN -} - -type OSLifecycleResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *OSLifecycleResponse) Reset() { - *x = OSLifecycleResponse{} - mi := &file_daemon_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *OSLifecycleResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OSLifecycleResponse) ProtoMessage() {} - -func (x *OSLifecycleResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OSLifecycleResponse.ProtoReflect.Descriptor instead. -func (*OSLifecycleResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{2} -} - type LoginRequest struct { state protoimpl.MessageState `protogen:"open.v1"` // setupKey netbird setup key. @@ -478,7 +348,7 @@ type LoginRequest struct { func (x *LoginRequest) Reset() { *x = LoginRequest{} - mi := &file_daemon_proto_msgTypes[3] + mi := &file_daemon_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -490,7 +360,7 @@ func (x *LoginRequest) String() string { func (*LoginRequest) ProtoMessage() {} func (x *LoginRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[3] + mi := &file_daemon_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -503,7 +373,7 @@ func (x *LoginRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. func (*LoginRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{3} + return file_daemon_proto_rawDescGZIP(), []int{1} } func (x *LoginRequest) GetSetupKey() string { @@ -792,7 +662,7 @@ type LoginResponse struct { func (x *LoginResponse) Reset() { *x = LoginResponse{} - mi := &file_daemon_proto_msgTypes[4] + mi := &file_daemon_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -804,7 +674,7 @@ func (x *LoginResponse) String() string { func (*LoginResponse) ProtoMessage() {} func (x *LoginResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[4] + mi := &file_daemon_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -817,7 +687,7 @@ func (x *LoginResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LoginResponse.ProtoReflect.Descriptor instead. func (*LoginResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{4} + return file_daemon_proto_rawDescGZIP(), []int{2} } func (x *LoginResponse) GetNeedsSSOLogin() bool { @@ -858,7 +728,7 @@ type WaitSSOLoginRequest struct { func (x *WaitSSOLoginRequest) Reset() { *x = WaitSSOLoginRequest{} - mi := &file_daemon_proto_msgTypes[5] + mi := &file_daemon_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -870,7 +740,7 @@ func (x *WaitSSOLoginRequest) String() string { func (*WaitSSOLoginRequest) ProtoMessage() {} func (x *WaitSSOLoginRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[5] + mi := &file_daemon_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -883,7 +753,7 @@ func (x *WaitSSOLoginRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitSSOLoginRequest.ProtoReflect.Descriptor instead. func (*WaitSSOLoginRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{5} + return file_daemon_proto_rawDescGZIP(), []int{3} } func (x *WaitSSOLoginRequest) GetUserCode() string { @@ -909,7 +779,7 @@ type WaitSSOLoginResponse struct { func (x *WaitSSOLoginResponse) Reset() { *x = WaitSSOLoginResponse{} - mi := &file_daemon_proto_msgTypes[6] + mi := &file_daemon_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -921,7 +791,7 @@ func (x *WaitSSOLoginResponse) String() string { func (*WaitSSOLoginResponse) ProtoMessage() {} func (x *WaitSSOLoginResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[6] + mi := &file_daemon_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -934,7 +804,7 @@ func (x *WaitSSOLoginResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitSSOLoginResponse.ProtoReflect.Descriptor instead. func (*WaitSSOLoginResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{6} + return file_daemon_proto_rawDescGZIP(), []int{4} } func (x *WaitSSOLoginResponse) GetEmail() string { @@ -954,7 +824,7 @@ type UpRequest struct { func (x *UpRequest) Reset() { *x = UpRequest{} - mi := &file_daemon_proto_msgTypes[7] + mi := &file_daemon_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -966,7 +836,7 @@ func (x *UpRequest) String() string { func (*UpRequest) ProtoMessage() {} func (x *UpRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[7] + mi := &file_daemon_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -979,7 +849,7 @@ func (x *UpRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpRequest.ProtoReflect.Descriptor instead. func (*UpRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{7} + return file_daemon_proto_rawDescGZIP(), []int{5} } func (x *UpRequest) GetProfileName() string { @@ -1004,7 +874,7 @@ type UpResponse struct { func (x *UpResponse) Reset() { *x = UpResponse{} - mi := &file_daemon_proto_msgTypes[8] + mi := &file_daemon_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1016,7 +886,7 @@ func (x *UpResponse) String() string { func (*UpResponse) ProtoMessage() {} func (x *UpResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[8] + mi := &file_daemon_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,7 +899,7 @@ func (x *UpResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpResponse.ProtoReflect.Descriptor instead. func (*UpResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{8} + return file_daemon_proto_rawDescGZIP(), []int{6} } type StatusRequest struct { @@ -1044,7 +914,7 @@ type StatusRequest struct { func (x *StatusRequest) Reset() { *x = StatusRequest{} - mi := &file_daemon_proto_msgTypes[9] + mi := &file_daemon_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1056,7 +926,7 @@ func (x *StatusRequest) String() string { func (*StatusRequest) ProtoMessage() {} func (x *StatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[9] + mi := &file_daemon_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1069,7 +939,7 @@ func (x *StatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. func (*StatusRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{9} + return file_daemon_proto_rawDescGZIP(), []int{7} } func (x *StatusRequest) GetGetFullPeerStatus() bool { @@ -1106,7 +976,7 @@ type StatusResponse struct { func (x *StatusResponse) Reset() { *x = StatusResponse{} - mi := &file_daemon_proto_msgTypes[10] + mi := &file_daemon_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1118,7 +988,7 @@ func (x *StatusResponse) String() string { func (*StatusResponse) ProtoMessage() {} func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[10] + mi := &file_daemon_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1131,7 +1001,7 @@ func (x *StatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{10} + return file_daemon_proto_rawDescGZIP(), []int{8} } func (x *StatusResponse) GetStatus() string { @@ -1163,7 +1033,7 @@ type DownRequest struct { func (x *DownRequest) Reset() { *x = DownRequest{} - mi := &file_daemon_proto_msgTypes[11] + mi := &file_daemon_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1175,7 +1045,7 @@ func (x *DownRequest) String() string { func (*DownRequest) ProtoMessage() {} func (x *DownRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[11] + mi := &file_daemon_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1188,7 +1058,7 @@ func (x *DownRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DownRequest.ProtoReflect.Descriptor instead. func (*DownRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{11} + return file_daemon_proto_rawDescGZIP(), []int{9} } type DownResponse struct { @@ -1199,7 +1069,7 @@ type DownResponse struct { func (x *DownResponse) Reset() { *x = DownResponse{} - mi := &file_daemon_proto_msgTypes[12] + mi := &file_daemon_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1211,7 +1081,7 @@ func (x *DownResponse) String() string { func (*DownResponse) ProtoMessage() {} func (x *DownResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[12] + mi := &file_daemon_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1224,7 +1094,7 @@ func (x *DownResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DownResponse.ProtoReflect.Descriptor instead. func (*DownResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{12} + return file_daemon_proto_rawDescGZIP(), []int{10} } type GetConfigRequest struct { @@ -1237,7 +1107,7 @@ type GetConfigRequest struct { func (x *GetConfigRequest) Reset() { *x = GetConfigRequest{} - mi := &file_daemon_proto_msgTypes[13] + mi := &file_daemon_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1249,7 +1119,7 @@ func (x *GetConfigRequest) String() string { func (*GetConfigRequest) ProtoMessage() {} func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[13] + mi := &file_daemon_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1262,7 +1132,7 @@ func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. func (*GetConfigRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{13} + return file_daemon_proto_rawDescGZIP(), []int{11} } func (x *GetConfigRequest) GetProfileName() string { @@ -1318,7 +1188,7 @@ type GetConfigResponse struct { func (x *GetConfigResponse) Reset() { *x = GetConfigResponse{} - mi := &file_daemon_proto_msgTypes[14] + mi := &file_daemon_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1330,7 +1200,7 @@ func (x *GetConfigResponse) String() string { func (*GetConfigResponse) ProtoMessage() {} func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[14] + mi := &file_daemon_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1343,7 +1213,7 @@ func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. func (*GetConfigResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{14} + return file_daemon_proto_rawDescGZIP(), []int{12} } func (x *GetConfigResponse) GetManagementUrl() string { @@ -1555,7 +1425,7 @@ type PeerState struct { func (x *PeerState) Reset() { *x = PeerState{} - mi := &file_daemon_proto_msgTypes[15] + mi := &file_daemon_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1567,7 +1437,7 @@ func (x *PeerState) String() string { func (*PeerState) ProtoMessage() {} func (x *PeerState) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[15] + mi := &file_daemon_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1580,7 +1450,7 @@ func (x *PeerState) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerState.ProtoReflect.Descriptor instead. func (*PeerState) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{15} + return file_daemon_proto_rawDescGZIP(), []int{13} } func (x *PeerState) GetIP() string { @@ -1725,7 +1595,7 @@ type LocalPeerState struct { func (x *LocalPeerState) Reset() { *x = LocalPeerState{} - mi := &file_daemon_proto_msgTypes[16] + mi := &file_daemon_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1737,7 +1607,7 @@ func (x *LocalPeerState) String() string { func (*LocalPeerState) ProtoMessage() {} func (x *LocalPeerState) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[16] + mi := &file_daemon_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1750,7 +1620,7 @@ func (x *LocalPeerState) ProtoReflect() protoreflect.Message { // Deprecated: Use LocalPeerState.ProtoReflect.Descriptor instead. func (*LocalPeerState) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{16} + return file_daemon_proto_rawDescGZIP(), []int{14} } func (x *LocalPeerState) GetIP() string { @@ -1814,7 +1684,7 @@ type SignalState struct { func (x *SignalState) Reset() { *x = SignalState{} - mi := &file_daemon_proto_msgTypes[17] + mi := &file_daemon_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1826,7 +1696,7 @@ func (x *SignalState) String() string { func (*SignalState) ProtoMessage() {} func (x *SignalState) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[17] + mi := &file_daemon_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1839,7 +1709,7 @@ func (x *SignalState) ProtoReflect() protoreflect.Message { // Deprecated: Use SignalState.ProtoReflect.Descriptor instead. func (*SignalState) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{17} + return file_daemon_proto_rawDescGZIP(), []int{15} } func (x *SignalState) GetURL() string { @@ -1875,7 +1745,7 @@ type ManagementState struct { func (x *ManagementState) Reset() { *x = ManagementState{} - mi := &file_daemon_proto_msgTypes[18] + mi := &file_daemon_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1887,7 +1757,7 @@ func (x *ManagementState) String() string { func (*ManagementState) ProtoMessage() {} func (x *ManagementState) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[18] + mi := &file_daemon_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1900,7 +1770,7 @@ func (x *ManagementState) ProtoReflect() protoreflect.Message { // Deprecated: Use ManagementState.ProtoReflect.Descriptor instead. func (*ManagementState) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{18} + return file_daemon_proto_rawDescGZIP(), []int{16} } func (x *ManagementState) GetURL() string { @@ -1936,7 +1806,7 @@ type RelayState struct { func (x *RelayState) Reset() { *x = RelayState{} - mi := &file_daemon_proto_msgTypes[19] + mi := &file_daemon_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1948,7 +1818,7 @@ func (x *RelayState) String() string { func (*RelayState) ProtoMessage() {} func (x *RelayState) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[19] + mi := &file_daemon_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1961,7 +1831,7 @@ func (x *RelayState) ProtoReflect() protoreflect.Message { // Deprecated: Use RelayState.ProtoReflect.Descriptor instead. func (*RelayState) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{19} + return file_daemon_proto_rawDescGZIP(), []int{17} } func (x *RelayState) GetURI() string { @@ -1997,7 +1867,7 @@ type NSGroupState struct { func (x *NSGroupState) Reset() { *x = NSGroupState{} - mi := &file_daemon_proto_msgTypes[20] + mi := &file_daemon_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2009,7 +1879,7 @@ func (x *NSGroupState) String() string { func (*NSGroupState) ProtoMessage() {} func (x *NSGroupState) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[20] + mi := &file_daemon_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2022,7 +1892,7 @@ func (x *NSGroupState) ProtoReflect() protoreflect.Message { // Deprecated: Use NSGroupState.ProtoReflect.Descriptor instead. func (*NSGroupState) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{20} + return file_daemon_proto_rawDescGZIP(), []int{18} } func (x *NSGroupState) GetServers() []string { @@ -2067,7 +1937,7 @@ type SSHSessionInfo struct { func (x *SSHSessionInfo) Reset() { *x = SSHSessionInfo{} - mi := &file_daemon_proto_msgTypes[21] + mi := &file_daemon_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2079,7 +1949,7 @@ func (x *SSHSessionInfo) String() string { func (*SSHSessionInfo) ProtoMessage() {} func (x *SSHSessionInfo) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[21] + mi := &file_daemon_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2092,7 +1962,7 @@ func (x *SSHSessionInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SSHSessionInfo.ProtoReflect.Descriptor instead. func (*SSHSessionInfo) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{21} + return file_daemon_proto_rawDescGZIP(), []int{19} } func (x *SSHSessionInfo) GetUsername() string { @@ -2141,7 +2011,7 @@ type SSHServerState struct { func (x *SSHServerState) Reset() { *x = SSHServerState{} - mi := &file_daemon_proto_msgTypes[22] + mi := &file_daemon_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2153,7 +2023,7 @@ func (x *SSHServerState) String() string { func (*SSHServerState) ProtoMessage() {} func (x *SSHServerState) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[22] + mi := &file_daemon_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2166,7 +2036,7 @@ func (x *SSHServerState) ProtoReflect() protoreflect.Message { // Deprecated: Use SSHServerState.ProtoReflect.Descriptor instead. func (*SSHServerState) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{22} + return file_daemon_proto_rawDescGZIP(), []int{20} } func (x *SSHServerState) GetEnabled() bool { @@ -2202,7 +2072,7 @@ type FullStatus struct { func (x *FullStatus) Reset() { *x = FullStatus{} - mi := &file_daemon_proto_msgTypes[23] + mi := &file_daemon_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2214,7 +2084,7 @@ func (x *FullStatus) String() string { func (*FullStatus) ProtoMessage() {} func (x *FullStatus) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[23] + mi := &file_daemon_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2227,7 +2097,7 @@ func (x *FullStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use FullStatus.ProtoReflect.Descriptor instead. func (*FullStatus) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{23} + return file_daemon_proto_rawDescGZIP(), []int{21} } func (x *FullStatus) GetManagementState() *ManagementState { @@ -2309,7 +2179,7 @@ type ListNetworksRequest struct { func (x *ListNetworksRequest) Reset() { *x = ListNetworksRequest{} - mi := &file_daemon_proto_msgTypes[24] + mi := &file_daemon_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2321,7 +2191,7 @@ func (x *ListNetworksRequest) String() string { func (*ListNetworksRequest) ProtoMessage() {} func (x *ListNetworksRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[24] + mi := &file_daemon_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2334,7 +2204,7 @@ func (x *ListNetworksRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListNetworksRequest.ProtoReflect.Descriptor instead. func (*ListNetworksRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{24} + return file_daemon_proto_rawDescGZIP(), []int{22} } type ListNetworksResponse struct { @@ -2346,7 +2216,7 @@ type ListNetworksResponse struct { func (x *ListNetworksResponse) Reset() { *x = ListNetworksResponse{} - mi := &file_daemon_proto_msgTypes[25] + mi := &file_daemon_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2358,7 +2228,7 @@ func (x *ListNetworksResponse) String() string { func (*ListNetworksResponse) ProtoMessage() {} func (x *ListNetworksResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[25] + mi := &file_daemon_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2371,7 +2241,7 @@ func (x *ListNetworksResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListNetworksResponse.ProtoReflect.Descriptor instead. func (*ListNetworksResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{25} + return file_daemon_proto_rawDescGZIP(), []int{23} } func (x *ListNetworksResponse) GetRoutes() []*Network { @@ -2392,7 +2262,7 @@ type SelectNetworksRequest struct { func (x *SelectNetworksRequest) Reset() { *x = SelectNetworksRequest{} - mi := &file_daemon_proto_msgTypes[26] + mi := &file_daemon_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2404,7 +2274,7 @@ func (x *SelectNetworksRequest) String() string { func (*SelectNetworksRequest) ProtoMessage() {} func (x *SelectNetworksRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[26] + mi := &file_daemon_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2417,7 +2287,7 @@ func (x *SelectNetworksRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SelectNetworksRequest.ProtoReflect.Descriptor instead. func (*SelectNetworksRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{26} + return file_daemon_proto_rawDescGZIP(), []int{24} } func (x *SelectNetworksRequest) GetNetworkIDs() []string { @@ -2449,7 +2319,7 @@ type SelectNetworksResponse struct { func (x *SelectNetworksResponse) Reset() { *x = SelectNetworksResponse{} - mi := &file_daemon_proto_msgTypes[27] + mi := &file_daemon_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2461,7 +2331,7 @@ func (x *SelectNetworksResponse) String() string { func (*SelectNetworksResponse) ProtoMessage() {} func (x *SelectNetworksResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[27] + mi := &file_daemon_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2474,7 +2344,7 @@ func (x *SelectNetworksResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SelectNetworksResponse.ProtoReflect.Descriptor instead. func (*SelectNetworksResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{27} + return file_daemon_proto_rawDescGZIP(), []int{25} } type IPList struct { @@ -2486,7 +2356,7 @@ type IPList struct { func (x *IPList) Reset() { *x = IPList{} - mi := &file_daemon_proto_msgTypes[28] + mi := &file_daemon_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2498,7 +2368,7 @@ func (x *IPList) String() string { func (*IPList) ProtoMessage() {} func (x *IPList) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[28] + mi := &file_daemon_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2511,7 +2381,7 @@ func (x *IPList) ProtoReflect() protoreflect.Message { // Deprecated: Use IPList.ProtoReflect.Descriptor instead. func (*IPList) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{28} + return file_daemon_proto_rawDescGZIP(), []int{26} } func (x *IPList) GetIps() []string { @@ -2534,7 +2404,7 @@ type Network struct { func (x *Network) Reset() { *x = Network{} - mi := &file_daemon_proto_msgTypes[29] + mi := &file_daemon_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2546,7 +2416,7 @@ func (x *Network) String() string { func (*Network) ProtoMessage() {} func (x *Network) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[29] + mi := &file_daemon_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2559,7 +2429,7 @@ func (x *Network) ProtoReflect() protoreflect.Message { // Deprecated: Use Network.ProtoReflect.Descriptor instead. func (*Network) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{29} + return file_daemon_proto_rawDescGZIP(), []int{27} } func (x *Network) GetID() string { @@ -2611,7 +2481,7 @@ type PortInfo struct { func (x *PortInfo) Reset() { *x = PortInfo{} - mi := &file_daemon_proto_msgTypes[30] + mi := &file_daemon_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2623,7 +2493,7 @@ func (x *PortInfo) String() string { func (*PortInfo) ProtoMessage() {} func (x *PortInfo) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[30] + mi := &file_daemon_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2636,7 +2506,7 @@ func (x *PortInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use PortInfo.ProtoReflect.Descriptor instead. func (*PortInfo) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{30} + return file_daemon_proto_rawDescGZIP(), []int{28} } func (x *PortInfo) GetPortSelection() isPortInfo_PortSelection { @@ -2693,7 +2563,7 @@ type ForwardingRule struct { func (x *ForwardingRule) Reset() { *x = ForwardingRule{} - mi := &file_daemon_proto_msgTypes[31] + mi := &file_daemon_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2705,7 +2575,7 @@ func (x *ForwardingRule) String() string { func (*ForwardingRule) ProtoMessage() {} func (x *ForwardingRule) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[31] + mi := &file_daemon_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2718,7 +2588,7 @@ func (x *ForwardingRule) ProtoReflect() protoreflect.Message { // Deprecated: Use ForwardingRule.ProtoReflect.Descriptor instead. func (*ForwardingRule) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{31} + return file_daemon_proto_rawDescGZIP(), []int{29} } func (x *ForwardingRule) GetProtocol() string { @@ -2765,7 +2635,7 @@ type ForwardingRulesResponse struct { func (x *ForwardingRulesResponse) Reset() { *x = ForwardingRulesResponse{} - mi := &file_daemon_proto_msgTypes[32] + mi := &file_daemon_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2777,7 +2647,7 @@ func (x *ForwardingRulesResponse) String() string { func (*ForwardingRulesResponse) ProtoMessage() {} func (x *ForwardingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[32] + mi := &file_daemon_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2790,7 +2660,7 @@ func (x *ForwardingRulesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ForwardingRulesResponse.ProtoReflect.Descriptor instead. func (*ForwardingRulesResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{32} + return file_daemon_proto_rawDescGZIP(), []int{30} } func (x *ForwardingRulesResponse) GetRules() []*ForwardingRule { @@ -2813,7 +2683,7 @@ type DebugBundleRequest struct { func (x *DebugBundleRequest) Reset() { *x = DebugBundleRequest{} - mi := &file_daemon_proto_msgTypes[33] + mi := &file_daemon_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2825,7 +2695,7 @@ func (x *DebugBundleRequest) String() string { func (*DebugBundleRequest) ProtoMessage() {} func (x *DebugBundleRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[33] + mi := &file_daemon_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2838,7 +2708,7 @@ func (x *DebugBundleRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugBundleRequest.ProtoReflect.Descriptor instead. func (*DebugBundleRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{33} + return file_daemon_proto_rawDescGZIP(), []int{31} } func (x *DebugBundleRequest) GetAnonymize() bool { @@ -2880,7 +2750,7 @@ type DebugBundleResponse struct { func (x *DebugBundleResponse) Reset() { *x = DebugBundleResponse{} - mi := &file_daemon_proto_msgTypes[34] + mi := &file_daemon_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2892,7 +2762,7 @@ func (x *DebugBundleResponse) String() string { func (*DebugBundleResponse) ProtoMessage() {} func (x *DebugBundleResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[34] + mi := &file_daemon_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2905,7 +2775,7 @@ func (x *DebugBundleResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugBundleResponse.ProtoReflect.Descriptor instead. func (*DebugBundleResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{34} + return file_daemon_proto_rawDescGZIP(), []int{32} } func (x *DebugBundleResponse) GetPath() string { @@ -2937,7 +2807,7 @@ type GetLogLevelRequest struct { func (x *GetLogLevelRequest) Reset() { *x = GetLogLevelRequest{} - mi := &file_daemon_proto_msgTypes[35] + mi := &file_daemon_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2949,7 +2819,7 @@ func (x *GetLogLevelRequest) String() string { func (*GetLogLevelRequest) ProtoMessage() {} func (x *GetLogLevelRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[35] + mi := &file_daemon_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2962,7 +2832,7 @@ func (x *GetLogLevelRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLogLevelRequest.ProtoReflect.Descriptor instead. func (*GetLogLevelRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{35} + return file_daemon_proto_rawDescGZIP(), []int{33} } type GetLogLevelResponse struct { @@ -2974,7 +2844,7 @@ type GetLogLevelResponse struct { func (x *GetLogLevelResponse) Reset() { *x = GetLogLevelResponse{} - mi := &file_daemon_proto_msgTypes[36] + mi := &file_daemon_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2986,7 +2856,7 @@ func (x *GetLogLevelResponse) String() string { func (*GetLogLevelResponse) ProtoMessage() {} func (x *GetLogLevelResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[36] + mi := &file_daemon_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2999,7 +2869,7 @@ func (x *GetLogLevelResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLogLevelResponse.ProtoReflect.Descriptor instead. func (*GetLogLevelResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{36} + return file_daemon_proto_rawDescGZIP(), []int{34} } func (x *GetLogLevelResponse) GetLevel() LogLevel { @@ -3018,7 +2888,7 @@ type SetLogLevelRequest struct { func (x *SetLogLevelRequest) Reset() { *x = SetLogLevelRequest{} - mi := &file_daemon_proto_msgTypes[37] + mi := &file_daemon_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3030,7 +2900,7 @@ func (x *SetLogLevelRequest) String() string { func (*SetLogLevelRequest) ProtoMessage() {} func (x *SetLogLevelRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[37] + mi := &file_daemon_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3043,7 +2913,7 @@ func (x *SetLogLevelRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetLogLevelRequest.ProtoReflect.Descriptor instead. func (*SetLogLevelRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{37} + return file_daemon_proto_rawDescGZIP(), []int{35} } func (x *SetLogLevelRequest) GetLevel() LogLevel { @@ -3061,7 +2931,7 @@ type SetLogLevelResponse struct { func (x *SetLogLevelResponse) Reset() { *x = SetLogLevelResponse{} - mi := &file_daemon_proto_msgTypes[38] + mi := &file_daemon_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3073,7 +2943,7 @@ func (x *SetLogLevelResponse) String() string { func (*SetLogLevelResponse) ProtoMessage() {} func (x *SetLogLevelResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[38] + mi := &file_daemon_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3086,7 +2956,7 @@ func (x *SetLogLevelResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetLogLevelResponse.ProtoReflect.Descriptor instead. func (*SetLogLevelResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{38} + return file_daemon_proto_rawDescGZIP(), []int{36} } // State represents a daemon state entry @@ -3099,7 +2969,7 @@ type State struct { func (x *State) Reset() { *x = State{} - mi := &file_daemon_proto_msgTypes[39] + mi := &file_daemon_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3111,7 +2981,7 @@ func (x *State) String() string { func (*State) ProtoMessage() {} func (x *State) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[39] + mi := &file_daemon_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3124,7 +2994,7 @@ func (x *State) ProtoReflect() protoreflect.Message { // Deprecated: Use State.ProtoReflect.Descriptor instead. func (*State) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{39} + return file_daemon_proto_rawDescGZIP(), []int{37} } func (x *State) GetName() string { @@ -3143,7 +3013,7 @@ type ListStatesRequest struct { func (x *ListStatesRequest) Reset() { *x = ListStatesRequest{} - mi := &file_daemon_proto_msgTypes[40] + mi := &file_daemon_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3155,7 +3025,7 @@ func (x *ListStatesRequest) String() string { func (*ListStatesRequest) ProtoMessage() {} func (x *ListStatesRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[40] + mi := &file_daemon_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3168,7 +3038,7 @@ func (x *ListStatesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListStatesRequest.ProtoReflect.Descriptor instead. func (*ListStatesRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{40} + return file_daemon_proto_rawDescGZIP(), []int{38} } // ListStatesResponse contains a list of states @@ -3181,7 +3051,7 @@ type ListStatesResponse struct { func (x *ListStatesResponse) Reset() { *x = ListStatesResponse{} - mi := &file_daemon_proto_msgTypes[41] + mi := &file_daemon_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3193,7 +3063,7 @@ func (x *ListStatesResponse) String() string { func (*ListStatesResponse) ProtoMessage() {} func (x *ListStatesResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[41] + mi := &file_daemon_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3206,7 +3076,7 @@ func (x *ListStatesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListStatesResponse.ProtoReflect.Descriptor instead. func (*ListStatesResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{41} + return file_daemon_proto_rawDescGZIP(), []int{39} } func (x *ListStatesResponse) GetStates() []*State { @@ -3227,7 +3097,7 @@ type CleanStateRequest struct { func (x *CleanStateRequest) Reset() { *x = CleanStateRequest{} - mi := &file_daemon_proto_msgTypes[42] + mi := &file_daemon_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3239,7 +3109,7 @@ func (x *CleanStateRequest) String() string { func (*CleanStateRequest) ProtoMessage() {} func (x *CleanStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[42] + mi := &file_daemon_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3252,7 +3122,7 @@ func (x *CleanStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CleanStateRequest.ProtoReflect.Descriptor instead. func (*CleanStateRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{42} + return file_daemon_proto_rawDescGZIP(), []int{40} } func (x *CleanStateRequest) GetStateName() string { @@ -3279,7 +3149,7 @@ type CleanStateResponse struct { func (x *CleanStateResponse) Reset() { *x = CleanStateResponse{} - mi := &file_daemon_proto_msgTypes[43] + mi := &file_daemon_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3291,7 +3161,7 @@ func (x *CleanStateResponse) String() string { func (*CleanStateResponse) ProtoMessage() {} func (x *CleanStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[43] + mi := &file_daemon_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3304,7 +3174,7 @@ func (x *CleanStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CleanStateResponse.ProtoReflect.Descriptor instead. func (*CleanStateResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{43} + return file_daemon_proto_rawDescGZIP(), []int{41} } func (x *CleanStateResponse) GetCleanedStates() int32 { @@ -3325,7 +3195,7 @@ type DeleteStateRequest struct { func (x *DeleteStateRequest) Reset() { *x = DeleteStateRequest{} - mi := &file_daemon_proto_msgTypes[44] + mi := &file_daemon_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3337,7 +3207,7 @@ func (x *DeleteStateRequest) String() string { func (*DeleteStateRequest) ProtoMessage() {} func (x *DeleteStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[44] + mi := &file_daemon_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3350,7 +3220,7 @@ func (x *DeleteStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteStateRequest.ProtoReflect.Descriptor instead. func (*DeleteStateRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{44} + return file_daemon_proto_rawDescGZIP(), []int{42} } func (x *DeleteStateRequest) GetStateName() string { @@ -3377,7 +3247,7 @@ type DeleteStateResponse struct { func (x *DeleteStateResponse) Reset() { *x = DeleteStateResponse{} - mi := &file_daemon_proto_msgTypes[45] + mi := &file_daemon_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3389,7 +3259,7 @@ func (x *DeleteStateResponse) String() string { func (*DeleteStateResponse) ProtoMessage() {} func (x *DeleteStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[45] + mi := &file_daemon_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3402,7 +3272,7 @@ func (x *DeleteStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteStateResponse.ProtoReflect.Descriptor instead. func (*DeleteStateResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{45} + return file_daemon_proto_rawDescGZIP(), []int{43} } func (x *DeleteStateResponse) GetDeletedStates() int32 { @@ -3421,7 +3291,7 @@ type SetSyncResponsePersistenceRequest struct { func (x *SetSyncResponsePersistenceRequest) Reset() { *x = SetSyncResponsePersistenceRequest{} - mi := &file_daemon_proto_msgTypes[46] + mi := &file_daemon_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3433,7 +3303,7 @@ func (x *SetSyncResponsePersistenceRequest) String() string { func (*SetSyncResponsePersistenceRequest) ProtoMessage() {} func (x *SetSyncResponsePersistenceRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[46] + mi := &file_daemon_proto_msgTypes[44] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3446,7 +3316,7 @@ func (x *SetSyncResponsePersistenceRequest) ProtoReflect() protoreflect.Message // Deprecated: Use SetSyncResponsePersistenceRequest.ProtoReflect.Descriptor instead. func (*SetSyncResponsePersistenceRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{46} + return file_daemon_proto_rawDescGZIP(), []int{44} } func (x *SetSyncResponsePersistenceRequest) GetEnabled() bool { @@ -3464,7 +3334,7 @@ type SetSyncResponsePersistenceResponse struct { func (x *SetSyncResponsePersistenceResponse) Reset() { *x = SetSyncResponsePersistenceResponse{} - mi := &file_daemon_proto_msgTypes[47] + mi := &file_daemon_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3476,7 +3346,7 @@ func (x *SetSyncResponsePersistenceResponse) String() string { func (*SetSyncResponsePersistenceResponse) ProtoMessage() {} func (x *SetSyncResponsePersistenceResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[47] + mi := &file_daemon_proto_msgTypes[45] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3489,7 +3359,7 @@ func (x *SetSyncResponsePersistenceResponse) ProtoReflect() protoreflect.Message // Deprecated: Use SetSyncResponsePersistenceResponse.ProtoReflect.Descriptor instead. func (*SetSyncResponsePersistenceResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{47} + return file_daemon_proto_rawDescGZIP(), []int{45} } type TCPFlags struct { @@ -3506,7 +3376,7 @@ type TCPFlags struct { func (x *TCPFlags) Reset() { *x = TCPFlags{} - mi := &file_daemon_proto_msgTypes[48] + mi := &file_daemon_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3518,7 +3388,7 @@ func (x *TCPFlags) String() string { func (*TCPFlags) ProtoMessage() {} func (x *TCPFlags) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[48] + mi := &file_daemon_proto_msgTypes[46] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3531,7 +3401,7 @@ func (x *TCPFlags) ProtoReflect() protoreflect.Message { // Deprecated: Use TCPFlags.ProtoReflect.Descriptor instead. func (*TCPFlags) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{48} + return file_daemon_proto_rawDescGZIP(), []int{46} } func (x *TCPFlags) GetSyn() bool { @@ -3593,7 +3463,7 @@ type TracePacketRequest struct { func (x *TracePacketRequest) Reset() { *x = TracePacketRequest{} - mi := &file_daemon_proto_msgTypes[49] + mi := &file_daemon_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3605,7 +3475,7 @@ func (x *TracePacketRequest) String() string { func (*TracePacketRequest) ProtoMessage() {} func (x *TracePacketRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[49] + mi := &file_daemon_proto_msgTypes[47] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3618,7 +3488,7 @@ func (x *TracePacketRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TracePacketRequest.ProtoReflect.Descriptor instead. func (*TracePacketRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{49} + return file_daemon_proto_rawDescGZIP(), []int{47} } func (x *TracePacketRequest) GetSourceIp() string { @@ -3696,7 +3566,7 @@ type TraceStage struct { func (x *TraceStage) Reset() { *x = TraceStage{} - mi := &file_daemon_proto_msgTypes[50] + mi := &file_daemon_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3708,7 +3578,7 @@ func (x *TraceStage) String() string { func (*TraceStage) ProtoMessage() {} func (x *TraceStage) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[50] + mi := &file_daemon_proto_msgTypes[48] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3721,7 +3591,7 @@ func (x *TraceStage) ProtoReflect() protoreflect.Message { // Deprecated: Use TraceStage.ProtoReflect.Descriptor instead. func (*TraceStage) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{50} + return file_daemon_proto_rawDescGZIP(), []int{48} } func (x *TraceStage) GetName() string { @@ -3762,7 +3632,7 @@ type TracePacketResponse struct { func (x *TracePacketResponse) Reset() { *x = TracePacketResponse{} - mi := &file_daemon_proto_msgTypes[51] + mi := &file_daemon_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3774,7 +3644,7 @@ func (x *TracePacketResponse) String() string { func (*TracePacketResponse) ProtoMessage() {} func (x *TracePacketResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[51] + mi := &file_daemon_proto_msgTypes[49] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3787,7 +3657,7 @@ func (x *TracePacketResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TracePacketResponse.ProtoReflect.Descriptor instead. func (*TracePacketResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{51} + return file_daemon_proto_rawDescGZIP(), []int{49} } func (x *TracePacketResponse) GetStages() []*TraceStage { @@ -3812,7 +3682,7 @@ type SubscribeRequest struct { func (x *SubscribeRequest) Reset() { *x = SubscribeRequest{} - mi := &file_daemon_proto_msgTypes[52] + mi := &file_daemon_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3824,7 +3694,7 @@ func (x *SubscribeRequest) String() string { func (*SubscribeRequest) ProtoMessage() {} func (x *SubscribeRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[52] + mi := &file_daemon_proto_msgTypes[50] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3837,7 +3707,7 @@ func (x *SubscribeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SubscribeRequest.ProtoReflect.Descriptor instead. func (*SubscribeRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{52} + return file_daemon_proto_rawDescGZIP(), []int{50} } type SystemEvent struct { @@ -3855,7 +3725,7 @@ type SystemEvent struct { func (x *SystemEvent) Reset() { *x = SystemEvent{} - mi := &file_daemon_proto_msgTypes[53] + mi := &file_daemon_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3867,7 +3737,7 @@ func (x *SystemEvent) String() string { func (*SystemEvent) ProtoMessage() {} func (x *SystemEvent) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[53] + mi := &file_daemon_proto_msgTypes[51] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3880,7 +3750,7 @@ func (x *SystemEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use SystemEvent.ProtoReflect.Descriptor instead. func (*SystemEvent) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{53} + return file_daemon_proto_rawDescGZIP(), []int{51} } func (x *SystemEvent) GetId() string { @@ -3940,7 +3810,7 @@ type GetEventsRequest struct { func (x *GetEventsRequest) Reset() { *x = GetEventsRequest{} - mi := &file_daemon_proto_msgTypes[54] + mi := &file_daemon_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3952,7 +3822,7 @@ func (x *GetEventsRequest) String() string { func (*GetEventsRequest) ProtoMessage() {} func (x *GetEventsRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[54] + mi := &file_daemon_proto_msgTypes[52] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3965,7 +3835,7 @@ func (x *GetEventsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetEventsRequest.ProtoReflect.Descriptor instead. func (*GetEventsRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{54} + return file_daemon_proto_rawDescGZIP(), []int{52} } type GetEventsResponse struct { @@ -3977,7 +3847,7 @@ type GetEventsResponse struct { func (x *GetEventsResponse) Reset() { *x = GetEventsResponse{} - mi := &file_daemon_proto_msgTypes[55] + mi := &file_daemon_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3989,7 +3859,7 @@ func (x *GetEventsResponse) String() string { func (*GetEventsResponse) ProtoMessage() {} func (x *GetEventsResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[55] + mi := &file_daemon_proto_msgTypes[53] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4002,7 +3872,7 @@ func (x *GetEventsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetEventsResponse.ProtoReflect.Descriptor instead. func (*GetEventsResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{55} + return file_daemon_proto_rawDescGZIP(), []int{53} } func (x *GetEventsResponse) GetEvents() []*SystemEvent { @@ -4022,7 +3892,7 @@ type SwitchProfileRequest struct { func (x *SwitchProfileRequest) Reset() { *x = SwitchProfileRequest{} - mi := &file_daemon_proto_msgTypes[56] + mi := &file_daemon_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4034,7 +3904,7 @@ func (x *SwitchProfileRequest) String() string { func (*SwitchProfileRequest) ProtoMessage() {} func (x *SwitchProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[56] + mi := &file_daemon_proto_msgTypes[54] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4047,7 +3917,7 @@ func (x *SwitchProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SwitchProfileRequest.ProtoReflect.Descriptor instead. func (*SwitchProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{56} + return file_daemon_proto_rawDescGZIP(), []int{54} } func (x *SwitchProfileRequest) GetProfileName() string { @@ -4072,7 +3942,7 @@ type SwitchProfileResponse struct { func (x *SwitchProfileResponse) Reset() { *x = SwitchProfileResponse{} - mi := &file_daemon_proto_msgTypes[57] + mi := &file_daemon_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4084,7 +3954,7 @@ func (x *SwitchProfileResponse) String() string { func (*SwitchProfileResponse) ProtoMessage() {} func (x *SwitchProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[57] + mi := &file_daemon_proto_msgTypes[55] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4097,7 +3967,7 @@ func (x *SwitchProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SwitchProfileResponse.ProtoReflect.Descriptor instead. func (*SwitchProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{57} + return file_daemon_proto_rawDescGZIP(), []int{55} } type SetConfigRequest struct { @@ -4145,7 +4015,7 @@ type SetConfigRequest struct { func (x *SetConfigRequest) Reset() { *x = SetConfigRequest{} - mi := &file_daemon_proto_msgTypes[58] + mi := &file_daemon_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4157,7 +4027,7 @@ func (x *SetConfigRequest) String() string { func (*SetConfigRequest) ProtoMessage() {} func (x *SetConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[58] + mi := &file_daemon_proto_msgTypes[56] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4170,7 +4040,7 @@ func (x *SetConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetConfigRequest.ProtoReflect.Descriptor instead. func (*SetConfigRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{58} + return file_daemon_proto_rawDescGZIP(), []int{56} } func (x *SetConfigRequest) GetUsername() string { @@ -4419,7 +4289,7 @@ type SetConfigResponse struct { func (x *SetConfigResponse) Reset() { *x = SetConfigResponse{} - mi := &file_daemon_proto_msgTypes[59] + mi := &file_daemon_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4431,7 +4301,7 @@ func (x *SetConfigResponse) String() string { func (*SetConfigResponse) ProtoMessage() {} func (x *SetConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[59] + mi := &file_daemon_proto_msgTypes[57] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4444,7 +4314,7 @@ func (x *SetConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetConfigResponse.ProtoReflect.Descriptor instead. func (*SetConfigResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{59} + return file_daemon_proto_rawDescGZIP(), []int{57} } type AddProfileRequest struct { @@ -4457,7 +4327,7 @@ type AddProfileRequest struct { func (x *AddProfileRequest) Reset() { *x = AddProfileRequest{} - mi := &file_daemon_proto_msgTypes[60] + mi := &file_daemon_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4469,7 +4339,7 @@ func (x *AddProfileRequest) String() string { func (*AddProfileRequest) ProtoMessage() {} func (x *AddProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[60] + mi := &file_daemon_proto_msgTypes[58] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4482,7 +4352,7 @@ func (x *AddProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddProfileRequest.ProtoReflect.Descriptor instead. func (*AddProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{60} + return file_daemon_proto_rawDescGZIP(), []int{58} } func (x *AddProfileRequest) GetUsername() string { @@ -4507,7 +4377,7 @@ type AddProfileResponse struct { func (x *AddProfileResponse) Reset() { *x = AddProfileResponse{} - mi := &file_daemon_proto_msgTypes[61] + mi := &file_daemon_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4519,7 +4389,7 @@ func (x *AddProfileResponse) String() string { func (*AddProfileResponse) ProtoMessage() {} func (x *AddProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[61] + mi := &file_daemon_proto_msgTypes[59] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4532,7 +4402,7 @@ func (x *AddProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AddProfileResponse.ProtoReflect.Descriptor instead. func (*AddProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{61} + return file_daemon_proto_rawDescGZIP(), []int{59} } type RemoveProfileRequest struct { @@ -4545,7 +4415,7 @@ type RemoveProfileRequest struct { func (x *RemoveProfileRequest) Reset() { *x = RemoveProfileRequest{} - mi := &file_daemon_proto_msgTypes[62] + mi := &file_daemon_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4557,7 +4427,7 @@ func (x *RemoveProfileRequest) String() string { func (*RemoveProfileRequest) ProtoMessage() {} func (x *RemoveProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[62] + mi := &file_daemon_proto_msgTypes[60] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4570,7 +4440,7 @@ func (x *RemoveProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveProfileRequest.ProtoReflect.Descriptor instead. func (*RemoveProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{62} + return file_daemon_proto_rawDescGZIP(), []int{60} } func (x *RemoveProfileRequest) GetUsername() string { @@ -4595,7 +4465,7 @@ type RemoveProfileResponse struct { func (x *RemoveProfileResponse) Reset() { *x = RemoveProfileResponse{} - mi := &file_daemon_proto_msgTypes[63] + mi := &file_daemon_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4607,7 +4477,7 @@ func (x *RemoveProfileResponse) String() string { func (*RemoveProfileResponse) ProtoMessage() {} func (x *RemoveProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[63] + mi := &file_daemon_proto_msgTypes[61] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4620,7 +4490,7 @@ func (x *RemoveProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveProfileResponse.ProtoReflect.Descriptor instead. func (*RemoveProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{63} + return file_daemon_proto_rawDescGZIP(), []int{61} } type ListProfilesRequest struct { @@ -4632,7 +4502,7 @@ type ListProfilesRequest struct { func (x *ListProfilesRequest) Reset() { *x = ListProfilesRequest{} - mi := &file_daemon_proto_msgTypes[64] + mi := &file_daemon_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4644,7 +4514,7 @@ func (x *ListProfilesRequest) String() string { func (*ListProfilesRequest) ProtoMessage() {} func (x *ListProfilesRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[64] + mi := &file_daemon_proto_msgTypes[62] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4657,7 +4527,7 @@ func (x *ListProfilesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListProfilesRequest.ProtoReflect.Descriptor instead. func (*ListProfilesRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{64} + return file_daemon_proto_rawDescGZIP(), []int{62} } func (x *ListProfilesRequest) GetUsername() string { @@ -4676,7 +4546,7 @@ type ListProfilesResponse struct { func (x *ListProfilesResponse) Reset() { *x = ListProfilesResponse{} - mi := &file_daemon_proto_msgTypes[65] + mi := &file_daemon_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4688,7 +4558,7 @@ func (x *ListProfilesResponse) String() string { func (*ListProfilesResponse) ProtoMessage() {} func (x *ListProfilesResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[65] + mi := &file_daemon_proto_msgTypes[63] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4701,7 +4571,7 @@ func (x *ListProfilesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListProfilesResponse.ProtoReflect.Descriptor instead. func (*ListProfilesResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{65} + return file_daemon_proto_rawDescGZIP(), []int{63} } func (x *ListProfilesResponse) GetProfiles() []*Profile { @@ -4721,7 +4591,7 @@ type Profile struct { func (x *Profile) Reset() { *x = Profile{} - mi := &file_daemon_proto_msgTypes[66] + mi := &file_daemon_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4733,7 +4603,7 @@ func (x *Profile) String() string { func (*Profile) ProtoMessage() {} func (x *Profile) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[66] + mi := &file_daemon_proto_msgTypes[64] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4746,7 +4616,7 @@ func (x *Profile) ProtoReflect() protoreflect.Message { // Deprecated: Use Profile.ProtoReflect.Descriptor instead. func (*Profile) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{66} + return file_daemon_proto_rawDescGZIP(), []int{64} } func (x *Profile) GetName() string { @@ -4771,7 +4641,7 @@ type GetActiveProfileRequest struct { func (x *GetActiveProfileRequest) Reset() { *x = GetActiveProfileRequest{} - mi := &file_daemon_proto_msgTypes[67] + mi := &file_daemon_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4783,7 +4653,7 @@ func (x *GetActiveProfileRequest) String() string { func (*GetActiveProfileRequest) ProtoMessage() {} func (x *GetActiveProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[67] + mi := &file_daemon_proto_msgTypes[65] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4796,7 +4666,7 @@ func (x *GetActiveProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetActiveProfileRequest.ProtoReflect.Descriptor instead. func (*GetActiveProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{67} + return file_daemon_proto_rawDescGZIP(), []int{65} } type GetActiveProfileResponse struct { @@ -4809,7 +4679,7 @@ type GetActiveProfileResponse struct { func (x *GetActiveProfileResponse) Reset() { *x = GetActiveProfileResponse{} - mi := &file_daemon_proto_msgTypes[68] + mi := &file_daemon_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4821,7 +4691,7 @@ func (x *GetActiveProfileResponse) String() string { func (*GetActiveProfileResponse) ProtoMessage() {} func (x *GetActiveProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[68] + mi := &file_daemon_proto_msgTypes[66] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4834,7 +4704,7 @@ func (x *GetActiveProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetActiveProfileResponse.ProtoReflect.Descriptor instead. func (*GetActiveProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{68} + return file_daemon_proto_rawDescGZIP(), []int{66} } func (x *GetActiveProfileResponse) GetProfileName() string { @@ -4861,7 +4731,7 @@ type LogoutRequest struct { func (x *LogoutRequest) Reset() { *x = LogoutRequest{} - mi := &file_daemon_proto_msgTypes[69] + mi := &file_daemon_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4873,7 +4743,7 @@ func (x *LogoutRequest) String() string { func (*LogoutRequest) ProtoMessage() {} func (x *LogoutRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[69] + mi := &file_daemon_proto_msgTypes[67] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4886,7 +4756,7 @@ func (x *LogoutRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LogoutRequest.ProtoReflect.Descriptor instead. func (*LogoutRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{69} + return file_daemon_proto_rawDescGZIP(), []int{67} } func (x *LogoutRequest) GetProfileName() string { @@ -4911,7 +4781,7 @@ type LogoutResponse struct { func (x *LogoutResponse) Reset() { *x = LogoutResponse{} - mi := &file_daemon_proto_msgTypes[70] + mi := &file_daemon_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4923,7 +4793,7 @@ func (x *LogoutResponse) String() string { func (*LogoutResponse) ProtoMessage() {} func (x *LogoutResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[70] + mi := &file_daemon_proto_msgTypes[68] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4936,7 +4806,7 @@ func (x *LogoutResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LogoutResponse.ProtoReflect.Descriptor instead. func (*LogoutResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{70} + return file_daemon_proto_rawDescGZIP(), []int{68} } type GetFeaturesRequest struct { @@ -4947,7 +4817,7 @@ type GetFeaturesRequest struct { func (x *GetFeaturesRequest) Reset() { *x = GetFeaturesRequest{} - mi := &file_daemon_proto_msgTypes[71] + mi := &file_daemon_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4959,7 +4829,7 @@ func (x *GetFeaturesRequest) String() string { func (*GetFeaturesRequest) ProtoMessage() {} func (x *GetFeaturesRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[71] + mi := &file_daemon_proto_msgTypes[69] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4972,7 +4842,7 @@ func (x *GetFeaturesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFeaturesRequest.ProtoReflect.Descriptor instead. func (*GetFeaturesRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{71} + return file_daemon_proto_rawDescGZIP(), []int{69} } type GetFeaturesResponse struct { @@ -4986,7 +4856,7 @@ type GetFeaturesResponse struct { func (x *GetFeaturesResponse) Reset() { *x = GetFeaturesResponse{} - mi := &file_daemon_proto_msgTypes[72] + mi := &file_daemon_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4998,7 +4868,7 @@ func (x *GetFeaturesResponse) String() string { func (*GetFeaturesResponse) ProtoMessage() {} func (x *GetFeaturesResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[72] + mi := &file_daemon_proto_msgTypes[70] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5011,7 +4881,7 @@ func (x *GetFeaturesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFeaturesResponse.ProtoReflect.Descriptor instead. func (*GetFeaturesResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{72} + return file_daemon_proto_rawDescGZIP(), []int{70} } func (x *GetFeaturesResponse) GetDisableProfiles() bool { @@ -5043,7 +4913,7 @@ type TriggerUpdateRequest struct { func (x *TriggerUpdateRequest) Reset() { *x = TriggerUpdateRequest{} - mi := &file_daemon_proto_msgTypes[73] + mi := &file_daemon_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5055,7 +4925,7 @@ func (x *TriggerUpdateRequest) String() string { func (*TriggerUpdateRequest) ProtoMessage() {} func (x *TriggerUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[73] + mi := &file_daemon_proto_msgTypes[71] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5068,7 +4938,7 @@ func (x *TriggerUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TriggerUpdateRequest.ProtoReflect.Descriptor instead. func (*TriggerUpdateRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{73} + return file_daemon_proto_rawDescGZIP(), []int{71} } type TriggerUpdateResponse struct { @@ -5081,7 +4951,7 @@ type TriggerUpdateResponse struct { func (x *TriggerUpdateResponse) Reset() { *x = TriggerUpdateResponse{} - mi := &file_daemon_proto_msgTypes[74] + mi := &file_daemon_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5093,7 +4963,7 @@ func (x *TriggerUpdateResponse) String() string { func (*TriggerUpdateResponse) ProtoMessage() {} func (x *TriggerUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[74] + mi := &file_daemon_proto_msgTypes[72] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5106,7 +4976,7 @@ func (x *TriggerUpdateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TriggerUpdateResponse.ProtoReflect.Descriptor instead. func (*TriggerUpdateResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{74} + return file_daemon_proto_rawDescGZIP(), []int{72} } func (x *TriggerUpdateResponse) GetSuccess() bool { @@ -5134,7 +5004,7 @@ type GetPeerSSHHostKeyRequest struct { func (x *GetPeerSSHHostKeyRequest) Reset() { *x = GetPeerSSHHostKeyRequest{} - mi := &file_daemon_proto_msgTypes[75] + mi := &file_daemon_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5146,7 +5016,7 @@ func (x *GetPeerSSHHostKeyRequest) String() string { func (*GetPeerSSHHostKeyRequest) ProtoMessage() {} func (x *GetPeerSSHHostKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[75] + mi := &file_daemon_proto_msgTypes[73] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5159,7 +5029,7 @@ func (x *GetPeerSSHHostKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetPeerSSHHostKeyRequest.ProtoReflect.Descriptor instead. func (*GetPeerSSHHostKeyRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{75} + return file_daemon_proto_rawDescGZIP(), []int{73} } func (x *GetPeerSSHHostKeyRequest) GetPeerAddress() string { @@ -5186,7 +5056,7 @@ type GetPeerSSHHostKeyResponse struct { func (x *GetPeerSSHHostKeyResponse) Reset() { *x = GetPeerSSHHostKeyResponse{} - mi := &file_daemon_proto_msgTypes[76] + mi := &file_daemon_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5198,7 +5068,7 @@ func (x *GetPeerSSHHostKeyResponse) String() string { func (*GetPeerSSHHostKeyResponse) ProtoMessage() {} func (x *GetPeerSSHHostKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[76] + mi := &file_daemon_proto_msgTypes[74] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5211,7 +5081,7 @@ func (x *GetPeerSSHHostKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetPeerSSHHostKeyResponse.ProtoReflect.Descriptor instead. func (*GetPeerSSHHostKeyResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{76} + return file_daemon_proto_rawDescGZIP(), []int{74} } func (x *GetPeerSSHHostKeyResponse) GetSshHostKey() []byte { @@ -5253,7 +5123,7 @@ type RequestJWTAuthRequest struct { func (x *RequestJWTAuthRequest) Reset() { *x = RequestJWTAuthRequest{} - mi := &file_daemon_proto_msgTypes[77] + mi := &file_daemon_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5265,7 +5135,7 @@ func (x *RequestJWTAuthRequest) String() string { func (*RequestJWTAuthRequest) ProtoMessage() {} func (x *RequestJWTAuthRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[77] + mi := &file_daemon_proto_msgTypes[75] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5278,7 +5148,7 @@ func (x *RequestJWTAuthRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestJWTAuthRequest.ProtoReflect.Descriptor instead. func (*RequestJWTAuthRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{77} + return file_daemon_proto_rawDescGZIP(), []int{75} } func (x *RequestJWTAuthRequest) GetHint() string { @@ -5311,7 +5181,7 @@ type RequestJWTAuthResponse struct { func (x *RequestJWTAuthResponse) Reset() { *x = RequestJWTAuthResponse{} - mi := &file_daemon_proto_msgTypes[78] + mi := &file_daemon_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5323,7 +5193,7 @@ func (x *RequestJWTAuthResponse) String() string { func (*RequestJWTAuthResponse) ProtoMessage() {} func (x *RequestJWTAuthResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[78] + mi := &file_daemon_proto_msgTypes[76] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5336,7 +5206,7 @@ func (x *RequestJWTAuthResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RequestJWTAuthResponse.ProtoReflect.Descriptor instead. func (*RequestJWTAuthResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{78} + return file_daemon_proto_rawDescGZIP(), []int{76} } func (x *RequestJWTAuthResponse) GetVerificationURI() string { @@ -5401,7 +5271,7 @@ type WaitJWTTokenRequest struct { func (x *WaitJWTTokenRequest) Reset() { *x = WaitJWTTokenRequest{} - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5413,7 +5283,7 @@ func (x *WaitJWTTokenRequest) String() string { func (*WaitJWTTokenRequest) ProtoMessage() {} func (x *WaitJWTTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[77] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5426,7 +5296,7 @@ func (x *WaitJWTTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitJWTTokenRequest.ProtoReflect.Descriptor instead. func (*WaitJWTTokenRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{79} + return file_daemon_proto_rawDescGZIP(), []int{77} } func (x *WaitJWTTokenRequest) GetDeviceCode() string { @@ -5458,7 +5328,7 @@ type WaitJWTTokenResponse struct { func (x *WaitJWTTokenResponse) Reset() { *x = WaitJWTTokenResponse{} - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5470,7 +5340,7 @@ func (x *WaitJWTTokenResponse) String() string { func (*WaitJWTTokenResponse) ProtoMessage() {} func (x *WaitJWTTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[78] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5483,7 +5353,7 @@ func (x *WaitJWTTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitJWTTokenResponse.ProtoReflect.Descriptor instead. func (*WaitJWTTokenResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{80} + return file_daemon_proto_rawDescGZIP(), []int{78} } func (x *WaitJWTTokenResponse) GetToken() string { @@ -5516,7 +5386,7 @@ type StartCPUProfileRequest struct { func (x *StartCPUProfileRequest) Reset() { *x = StartCPUProfileRequest{} - mi := &file_daemon_proto_msgTypes[81] + mi := &file_daemon_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5528,7 +5398,7 @@ func (x *StartCPUProfileRequest) String() string { func (*StartCPUProfileRequest) ProtoMessage() {} func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[81] + mi := &file_daemon_proto_msgTypes[79] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5541,7 +5411,7 @@ func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartCPUProfileRequest.ProtoReflect.Descriptor instead. func (*StartCPUProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{81} + return file_daemon_proto_rawDescGZIP(), []int{79} } // StartCPUProfileResponse confirms CPU profiling has started @@ -5553,7 +5423,7 @@ type StartCPUProfileResponse struct { func (x *StartCPUProfileResponse) Reset() { *x = StartCPUProfileResponse{} - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5565,7 +5435,7 @@ func (x *StartCPUProfileResponse) String() string { func (*StartCPUProfileResponse) ProtoMessage() {} func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[80] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5578,7 +5448,7 @@ func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartCPUProfileResponse.ProtoReflect.Descriptor instead. func (*StartCPUProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{82} + return file_daemon_proto_rawDescGZIP(), []int{80} } // StopCPUProfileRequest for stopping CPU profiling @@ -5590,7 +5460,7 @@ type StopCPUProfileRequest struct { func (x *StopCPUProfileRequest) Reset() { *x = StopCPUProfileRequest{} - mi := &file_daemon_proto_msgTypes[83] + mi := &file_daemon_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5602,7 +5472,7 @@ func (x *StopCPUProfileRequest) String() string { func (*StopCPUProfileRequest) ProtoMessage() {} func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[83] + mi := &file_daemon_proto_msgTypes[81] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5615,7 +5485,7 @@ func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StopCPUProfileRequest.ProtoReflect.Descriptor instead. func (*StopCPUProfileRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{83} + return file_daemon_proto_rawDescGZIP(), []int{81} } // StopCPUProfileResponse confirms CPU profiling has stopped @@ -5627,7 +5497,7 @@ type StopCPUProfileResponse struct { func (x *StopCPUProfileResponse) Reset() { *x = StopCPUProfileResponse{} - mi := &file_daemon_proto_msgTypes[84] + mi := &file_daemon_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5639,7 +5509,7 @@ func (x *StopCPUProfileResponse) String() string { func (*StopCPUProfileResponse) ProtoMessage() {} func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[84] + mi := &file_daemon_proto_msgTypes[82] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5652,7 +5522,7 @@ func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StopCPUProfileResponse.ProtoReflect.Descriptor instead. func (*StopCPUProfileResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{84} + return file_daemon_proto_rawDescGZIP(), []int{82} } type InstallerResultRequest struct { @@ -5663,7 +5533,7 @@ type InstallerResultRequest struct { func (x *InstallerResultRequest) Reset() { *x = InstallerResultRequest{} - mi := &file_daemon_proto_msgTypes[85] + mi := &file_daemon_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5675,7 +5545,7 @@ func (x *InstallerResultRequest) String() string { func (*InstallerResultRequest) ProtoMessage() {} func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[85] + mi := &file_daemon_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5688,7 +5558,7 @@ func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultRequest.ProtoReflect.Descriptor instead. func (*InstallerResultRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{85} + return file_daemon_proto_rawDescGZIP(), []int{83} } type InstallerResultResponse struct { @@ -5701,7 +5571,7 @@ type InstallerResultResponse struct { func (x *InstallerResultResponse) Reset() { *x = InstallerResultResponse{} - mi := &file_daemon_proto_msgTypes[86] + mi := &file_daemon_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5713,7 +5583,7 @@ func (x *InstallerResultResponse) String() string { func (*InstallerResultResponse) ProtoMessage() {} func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[86] + mi := &file_daemon_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5726,7 +5596,7 @@ func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultResponse.ProtoReflect.Descriptor instead. func (*InstallerResultResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{86} + return file_daemon_proto_rawDescGZIP(), []int{84} } func (x *InstallerResultResponse) GetSuccess() bool { @@ -5759,7 +5629,7 @@ type ExposeServiceRequest struct { func (x *ExposeServiceRequest) Reset() { *x = ExposeServiceRequest{} - mi := &file_daemon_proto_msgTypes[87] + mi := &file_daemon_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5771,7 +5641,7 @@ func (x *ExposeServiceRequest) String() string { func (*ExposeServiceRequest) ProtoMessage() {} func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[87] + mi := &file_daemon_proto_msgTypes[85] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5784,7 +5654,7 @@ func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExposeServiceRequest.ProtoReflect.Descriptor instead. func (*ExposeServiceRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{87} + return file_daemon_proto_rawDescGZIP(), []int{85} } func (x *ExposeServiceRequest) GetPort() uint32 { @@ -5855,7 +5725,7 @@ type ExposeServiceEvent struct { func (x *ExposeServiceEvent) Reset() { *x = ExposeServiceEvent{} - mi := &file_daemon_proto_msgTypes[88] + mi := &file_daemon_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5867,7 +5737,7 @@ func (x *ExposeServiceEvent) String() string { func (*ExposeServiceEvent) ProtoMessage() {} func (x *ExposeServiceEvent) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[88] + mi := &file_daemon_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5880,7 +5750,7 @@ func (x *ExposeServiceEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use ExposeServiceEvent.ProtoReflect.Descriptor instead. func (*ExposeServiceEvent) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{88} + return file_daemon_proto_rawDescGZIP(), []int{86} } func (x *ExposeServiceEvent) GetEvent() isExposeServiceEvent_Event { @@ -5921,7 +5791,7 @@ type ExposeServiceReady struct { func (x *ExposeServiceReady) Reset() { *x = ExposeServiceReady{} - mi := &file_daemon_proto_msgTypes[89] + mi := &file_daemon_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5933,7 +5803,7 @@ func (x *ExposeServiceReady) String() string { func (*ExposeServiceReady) ProtoMessage() {} func (x *ExposeServiceReady) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[89] + mi := &file_daemon_proto_msgTypes[87] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5946,7 +5816,7 @@ func (x *ExposeServiceReady) ProtoReflect() protoreflect.Message { // Deprecated: Use ExposeServiceReady.ProtoReflect.Descriptor instead. func (*ExposeServiceReady) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{89} + return file_daemon_proto_rawDescGZIP(), []int{87} } func (x *ExposeServiceReady) GetServiceName() string { @@ -5987,7 +5857,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} - mi := &file_daemon_proto_msgTypes[91] + mi := &file_daemon_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5999,7 +5869,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[91] + mi := &file_daemon_proto_msgTypes[89] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6012,7 +5882,7 @@ func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { // Deprecated: Use PortInfo_Range.ProtoReflect.Descriptor instead. func (*PortInfo_Range) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{30, 0} + return file_daemon_proto_rawDescGZIP(), []int{28, 0} } func (x *PortInfo_Range) GetStart() uint32 { @@ -6034,15 +5904,7 @@ var File_daemon_proto protoreflect.FileDescriptor const file_daemon_proto_rawDesc = "" + "\n" + "\fdaemon.proto\x12\x06daemon\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"\x0e\n" + - "\fEmptyRequest\"\x7f\n" + - "\x12OSLifecycleRequest\x128\n" + - "\x04type\x18\x01 \x01(\x0e2$.daemon.OSLifecycleRequest.CycleTypeR\x04type\"/\n" + - "\tCycleType\x12\v\n" + - "\aUNKNOWN\x10\x00\x12\t\n" + - "\x05SLEEP\x10\x01\x12\n" + - "\n" + - "\x06WAKEUP\x10\x02\"\x15\n" + - "\x13OSLifecycleResponse\"\xb6\x12\n" + + "\fEmptyRequest\"\xb6\x12\n" + "\fLoginRequest\x12\x1a\n" + "\bsetupKey\x18\x01 \x01(\tR\bsetupKey\x12&\n" + "\fpreSharedKey\x18\x02 \x01(\tB\x02\x18\x01R\fpreSharedKey\x12$\n" + @@ -6566,7 +6428,7 @@ const file_daemon_proto_rawDesc = "" + "\n" + "EXPOSE_UDP\x10\x03\x12\x0e\n" + "\n" + - "EXPOSE_TLS\x10\x042\xfc\x15\n" + + "EXPOSE_TLS\x10\x042\xac\x15\n" + "\rDaemonService\x126\n" + "\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" + "\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" + @@ -6604,8 +6466,7 @@ const file_daemon_proto_rawDesc = "" + "\x0eRequestJWTAuth\x12\x1d.daemon.RequestJWTAuthRequest\x1a\x1e.daemon.RequestJWTAuthResponse\"\x00\x12K\n" + "\fWaitJWTToken\x12\x1b.daemon.WaitJWTTokenRequest\x1a\x1c.daemon.WaitJWTTokenResponse\"\x00\x12T\n" + "\x0fStartCPUProfile\x12\x1e.daemon.StartCPUProfileRequest\x1a\x1f.daemon.StartCPUProfileResponse\"\x00\x12Q\n" + - "\x0eStopCPUProfile\x12\x1d.daemon.StopCPUProfileRequest\x1a\x1e.daemon.StopCPUProfileResponse\"\x00\x12N\n" + - "\x11NotifyOSLifecycle\x12\x1a.daemon.OSLifecycleRequest\x1a\x1b.daemon.OSLifecycleResponse\"\x00\x12W\n" + + "\x0eStopCPUProfile\x12\x1d.daemon.StopCPUProfileRequest\x1a\x1e.daemon.StopCPUProfileResponse\"\x00\x12W\n" + "\x12GetInstallerResult\x12\x1e.daemon.InstallerResultRequest\x1a\x1f.daemon.InstallerResultResponse\"\x00\x12M\n" + "\rExposeService\x12\x1c.daemon.ExposeServiceRequest\x1a\x1a.daemon.ExposeServiceEvent\"\x000\x01B\bZ\x06/protob\x06proto3" @@ -6621,226 +6482,220 @@ func file_daemon_proto_rawDescGZIP() []byte { return file_daemon_proto_rawDescData } -var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 93) +var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 91) var file_daemon_proto_goTypes = []any{ (LogLevel)(0), // 0: daemon.LogLevel (ExposeProtocol)(0), // 1: daemon.ExposeProtocol - (OSLifecycleRequest_CycleType)(0), // 2: daemon.OSLifecycleRequest.CycleType - (SystemEvent_Severity)(0), // 3: daemon.SystemEvent.Severity - (SystemEvent_Category)(0), // 4: daemon.SystemEvent.Category - (*EmptyRequest)(nil), // 5: daemon.EmptyRequest - (*OSLifecycleRequest)(nil), // 6: daemon.OSLifecycleRequest - (*OSLifecycleResponse)(nil), // 7: daemon.OSLifecycleResponse - (*LoginRequest)(nil), // 8: daemon.LoginRequest - (*LoginResponse)(nil), // 9: daemon.LoginResponse - (*WaitSSOLoginRequest)(nil), // 10: daemon.WaitSSOLoginRequest - (*WaitSSOLoginResponse)(nil), // 11: daemon.WaitSSOLoginResponse - (*UpRequest)(nil), // 12: daemon.UpRequest - (*UpResponse)(nil), // 13: daemon.UpResponse - (*StatusRequest)(nil), // 14: daemon.StatusRequest - (*StatusResponse)(nil), // 15: daemon.StatusResponse - (*DownRequest)(nil), // 16: daemon.DownRequest - (*DownResponse)(nil), // 17: daemon.DownResponse - (*GetConfigRequest)(nil), // 18: daemon.GetConfigRequest - (*GetConfigResponse)(nil), // 19: daemon.GetConfigResponse - (*PeerState)(nil), // 20: daemon.PeerState - (*LocalPeerState)(nil), // 21: daemon.LocalPeerState - (*SignalState)(nil), // 22: daemon.SignalState - (*ManagementState)(nil), // 23: daemon.ManagementState - (*RelayState)(nil), // 24: daemon.RelayState - (*NSGroupState)(nil), // 25: daemon.NSGroupState - (*SSHSessionInfo)(nil), // 26: daemon.SSHSessionInfo - (*SSHServerState)(nil), // 27: daemon.SSHServerState - (*FullStatus)(nil), // 28: daemon.FullStatus - (*ListNetworksRequest)(nil), // 29: daemon.ListNetworksRequest - (*ListNetworksResponse)(nil), // 30: daemon.ListNetworksResponse - (*SelectNetworksRequest)(nil), // 31: daemon.SelectNetworksRequest - (*SelectNetworksResponse)(nil), // 32: daemon.SelectNetworksResponse - (*IPList)(nil), // 33: daemon.IPList - (*Network)(nil), // 34: daemon.Network - (*PortInfo)(nil), // 35: daemon.PortInfo - (*ForwardingRule)(nil), // 36: daemon.ForwardingRule - (*ForwardingRulesResponse)(nil), // 37: daemon.ForwardingRulesResponse - (*DebugBundleRequest)(nil), // 38: daemon.DebugBundleRequest - (*DebugBundleResponse)(nil), // 39: daemon.DebugBundleResponse - (*GetLogLevelRequest)(nil), // 40: daemon.GetLogLevelRequest - (*GetLogLevelResponse)(nil), // 41: daemon.GetLogLevelResponse - (*SetLogLevelRequest)(nil), // 42: daemon.SetLogLevelRequest - (*SetLogLevelResponse)(nil), // 43: daemon.SetLogLevelResponse - (*State)(nil), // 44: daemon.State - (*ListStatesRequest)(nil), // 45: daemon.ListStatesRequest - (*ListStatesResponse)(nil), // 46: daemon.ListStatesResponse - (*CleanStateRequest)(nil), // 47: daemon.CleanStateRequest - (*CleanStateResponse)(nil), // 48: daemon.CleanStateResponse - (*DeleteStateRequest)(nil), // 49: daemon.DeleteStateRequest - (*DeleteStateResponse)(nil), // 50: daemon.DeleteStateResponse - (*SetSyncResponsePersistenceRequest)(nil), // 51: daemon.SetSyncResponsePersistenceRequest - (*SetSyncResponsePersistenceResponse)(nil), // 52: daemon.SetSyncResponsePersistenceResponse - (*TCPFlags)(nil), // 53: daemon.TCPFlags - (*TracePacketRequest)(nil), // 54: daemon.TracePacketRequest - (*TraceStage)(nil), // 55: daemon.TraceStage - (*TracePacketResponse)(nil), // 56: daemon.TracePacketResponse - (*SubscribeRequest)(nil), // 57: daemon.SubscribeRequest - (*SystemEvent)(nil), // 58: daemon.SystemEvent - (*GetEventsRequest)(nil), // 59: daemon.GetEventsRequest - (*GetEventsResponse)(nil), // 60: daemon.GetEventsResponse - (*SwitchProfileRequest)(nil), // 61: daemon.SwitchProfileRequest - (*SwitchProfileResponse)(nil), // 62: daemon.SwitchProfileResponse - (*SetConfigRequest)(nil), // 63: daemon.SetConfigRequest - (*SetConfigResponse)(nil), // 64: daemon.SetConfigResponse - (*AddProfileRequest)(nil), // 65: daemon.AddProfileRequest - (*AddProfileResponse)(nil), // 66: daemon.AddProfileResponse - (*RemoveProfileRequest)(nil), // 67: daemon.RemoveProfileRequest - (*RemoveProfileResponse)(nil), // 68: daemon.RemoveProfileResponse - (*ListProfilesRequest)(nil), // 69: daemon.ListProfilesRequest - (*ListProfilesResponse)(nil), // 70: daemon.ListProfilesResponse - (*Profile)(nil), // 71: daemon.Profile - (*GetActiveProfileRequest)(nil), // 72: daemon.GetActiveProfileRequest - (*GetActiveProfileResponse)(nil), // 73: daemon.GetActiveProfileResponse - (*LogoutRequest)(nil), // 74: daemon.LogoutRequest - (*LogoutResponse)(nil), // 75: daemon.LogoutResponse - (*GetFeaturesRequest)(nil), // 76: daemon.GetFeaturesRequest - (*GetFeaturesResponse)(nil), // 77: daemon.GetFeaturesResponse - (*TriggerUpdateRequest)(nil), // 78: daemon.TriggerUpdateRequest - (*TriggerUpdateResponse)(nil), // 79: daemon.TriggerUpdateResponse - (*GetPeerSSHHostKeyRequest)(nil), // 80: daemon.GetPeerSSHHostKeyRequest - (*GetPeerSSHHostKeyResponse)(nil), // 81: daemon.GetPeerSSHHostKeyResponse - (*RequestJWTAuthRequest)(nil), // 82: daemon.RequestJWTAuthRequest - (*RequestJWTAuthResponse)(nil), // 83: daemon.RequestJWTAuthResponse - (*WaitJWTTokenRequest)(nil), // 84: daemon.WaitJWTTokenRequest - (*WaitJWTTokenResponse)(nil), // 85: daemon.WaitJWTTokenResponse - (*StartCPUProfileRequest)(nil), // 86: daemon.StartCPUProfileRequest - (*StartCPUProfileResponse)(nil), // 87: daemon.StartCPUProfileResponse - (*StopCPUProfileRequest)(nil), // 88: daemon.StopCPUProfileRequest - (*StopCPUProfileResponse)(nil), // 89: daemon.StopCPUProfileResponse - (*InstallerResultRequest)(nil), // 90: daemon.InstallerResultRequest - (*InstallerResultResponse)(nil), // 91: daemon.InstallerResultResponse - (*ExposeServiceRequest)(nil), // 92: daemon.ExposeServiceRequest - (*ExposeServiceEvent)(nil), // 93: daemon.ExposeServiceEvent - (*ExposeServiceReady)(nil), // 94: daemon.ExposeServiceReady - nil, // 95: daemon.Network.ResolvedIPsEntry - (*PortInfo_Range)(nil), // 96: daemon.PortInfo.Range - nil, // 97: daemon.SystemEvent.MetadataEntry - (*durationpb.Duration)(nil), // 98: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 99: google.protobuf.Timestamp + (SystemEvent_Severity)(0), // 2: daemon.SystemEvent.Severity + (SystemEvent_Category)(0), // 3: daemon.SystemEvent.Category + (*EmptyRequest)(nil), // 4: daemon.EmptyRequest + (*LoginRequest)(nil), // 5: daemon.LoginRequest + (*LoginResponse)(nil), // 6: daemon.LoginResponse + (*WaitSSOLoginRequest)(nil), // 7: daemon.WaitSSOLoginRequest + (*WaitSSOLoginResponse)(nil), // 8: daemon.WaitSSOLoginResponse + (*UpRequest)(nil), // 9: daemon.UpRequest + (*UpResponse)(nil), // 10: daemon.UpResponse + (*StatusRequest)(nil), // 11: daemon.StatusRequest + (*StatusResponse)(nil), // 12: daemon.StatusResponse + (*DownRequest)(nil), // 13: daemon.DownRequest + (*DownResponse)(nil), // 14: daemon.DownResponse + (*GetConfigRequest)(nil), // 15: daemon.GetConfigRequest + (*GetConfigResponse)(nil), // 16: daemon.GetConfigResponse + (*PeerState)(nil), // 17: daemon.PeerState + (*LocalPeerState)(nil), // 18: daemon.LocalPeerState + (*SignalState)(nil), // 19: daemon.SignalState + (*ManagementState)(nil), // 20: daemon.ManagementState + (*RelayState)(nil), // 21: daemon.RelayState + (*NSGroupState)(nil), // 22: daemon.NSGroupState + (*SSHSessionInfo)(nil), // 23: daemon.SSHSessionInfo + (*SSHServerState)(nil), // 24: daemon.SSHServerState + (*FullStatus)(nil), // 25: daemon.FullStatus + (*ListNetworksRequest)(nil), // 26: daemon.ListNetworksRequest + (*ListNetworksResponse)(nil), // 27: daemon.ListNetworksResponse + (*SelectNetworksRequest)(nil), // 28: daemon.SelectNetworksRequest + (*SelectNetworksResponse)(nil), // 29: daemon.SelectNetworksResponse + (*IPList)(nil), // 30: daemon.IPList + (*Network)(nil), // 31: daemon.Network + (*PortInfo)(nil), // 32: daemon.PortInfo + (*ForwardingRule)(nil), // 33: daemon.ForwardingRule + (*ForwardingRulesResponse)(nil), // 34: daemon.ForwardingRulesResponse + (*DebugBundleRequest)(nil), // 35: daemon.DebugBundleRequest + (*DebugBundleResponse)(nil), // 36: daemon.DebugBundleResponse + (*GetLogLevelRequest)(nil), // 37: daemon.GetLogLevelRequest + (*GetLogLevelResponse)(nil), // 38: daemon.GetLogLevelResponse + (*SetLogLevelRequest)(nil), // 39: daemon.SetLogLevelRequest + (*SetLogLevelResponse)(nil), // 40: daemon.SetLogLevelResponse + (*State)(nil), // 41: daemon.State + (*ListStatesRequest)(nil), // 42: daemon.ListStatesRequest + (*ListStatesResponse)(nil), // 43: daemon.ListStatesResponse + (*CleanStateRequest)(nil), // 44: daemon.CleanStateRequest + (*CleanStateResponse)(nil), // 45: daemon.CleanStateResponse + (*DeleteStateRequest)(nil), // 46: daemon.DeleteStateRequest + (*DeleteStateResponse)(nil), // 47: daemon.DeleteStateResponse + (*SetSyncResponsePersistenceRequest)(nil), // 48: daemon.SetSyncResponsePersistenceRequest + (*SetSyncResponsePersistenceResponse)(nil), // 49: daemon.SetSyncResponsePersistenceResponse + (*TCPFlags)(nil), // 50: daemon.TCPFlags + (*TracePacketRequest)(nil), // 51: daemon.TracePacketRequest + (*TraceStage)(nil), // 52: daemon.TraceStage + (*TracePacketResponse)(nil), // 53: daemon.TracePacketResponse + (*SubscribeRequest)(nil), // 54: daemon.SubscribeRequest + (*SystemEvent)(nil), // 55: daemon.SystemEvent + (*GetEventsRequest)(nil), // 56: daemon.GetEventsRequest + (*GetEventsResponse)(nil), // 57: daemon.GetEventsResponse + (*SwitchProfileRequest)(nil), // 58: daemon.SwitchProfileRequest + (*SwitchProfileResponse)(nil), // 59: daemon.SwitchProfileResponse + (*SetConfigRequest)(nil), // 60: daemon.SetConfigRequest + (*SetConfigResponse)(nil), // 61: daemon.SetConfigResponse + (*AddProfileRequest)(nil), // 62: daemon.AddProfileRequest + (*AddProfileResponse)(nil), // 63: daemon.AddProfileResponse + (*RemoveProfileRequest)(nil), // 64: daemon.RemoveProfileRequest + (*RemoveProfileResponse)(nil), // 65: daemon.RemoveProfileResponse + (*ListProfilesRequest)(nil), // 66: daemon.ListProfilesRequest + (*ListProfilesResponse)(nil), // 67: daemon.ListProfilesResponse + (*Profile)(nil), // 68: daemon.Profile + (*GetActiveProfileRequest)(nil), // 69: daemon.GetActiveProfileRequest + (*GetActiveProfileResponse)(nil), // 70: daemon.GetActiveProfileResponse + (*LogoutRequest)(nil), // 71: daemon.LogoutRequest + (*LogoutResponse)(nil), // 72: daemon.LogoutResponse + (*GetFeaturesRequest)(nil), // 73: daemon.GetFeaturesRequest + (*GetFeaturesResponse)(nil), // 74: daemon.GetFeaturesResponse + (*TriggerUpdateRequest)(nil), // 75: daemon.TriggerUpdateRequest + (*TriggerUpdateResponse)(nil), // 76: daemon.TriggerUpdateResponse + (*GetPeerSSHHostKeyRequest)(nil), // 77: daemon.GetPeerSSHHostKeyRequest + (*GetPeerSSHHostKeyResponse)(nil), // 78: daemon.GetPeerSSHHostKeyResponse + (*RequestJWTAuthRequest)(nil), // 79: daemon.RequestJWTAuthRequest + (*RequestJWTAuthResponse)(nil), // 80: daemon.RequestJWTAuthResponse + (*WaitJWTTokenRequest)(nil), // 81: daemon.WaitJWTTokenRequest + (*WaitJWTTokenResponse)(nil), // 82: daemon.WaitJWTTokenResponse + (*StartCPUProfileRequest)(nil), // 83: daemon.StartCPUProfileRequest + (*StartCPUProfileResponse)(nil), // 84: daemon.StartCPUProfileResponse + (*StopCPUProfileRequest)(nil), // 85: daemon.StopCPUProfileRequest + (*StopCPUProfileResponse)(nil), // 86: daemon.StopCPUProfileResponse + (*InstallerResultRequest)(nil), // 87: daemon.InstallerResultRequest + (*InstallerResultResponse)(nil), // 88: daemon.InstallerResultResponse + (*ExposeServiceRequest)(nil), // 89: daemon.ExposeServiceRequest + (*ExposeServiceEvent)(nil), // 90: daemon.ExposeServiceEvent + (*ExposeServiceReady)(nil), // 91: daemon.ExposeServiceReady + nil, // 92: daemon.Network.ResolvedIPsEntry + (*PortInfo_Range)(nil), // 93: daemon.PortInfo.Range + nil, // 94: daemon.SystemEvent.MetadataEntry + (*durationpb.Duration)(nil), // 95: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 96: google.protobuf.Timestamp } var file_daemon_proto_depIdxs = []int32{ - 2, // 0: daemon.OSLifecycleRequest.type:type_name -> daemon.OSLifecycleRequest.CycleType - 98, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 28, // 2: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus - 99, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp - 99, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp - 98, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration - 26, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo - 23, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState - 22, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState - 21, // 9: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState - 20, // 10: daemon.FullStatus.peers:type_name -> daemon.PeerState - 24, // 11: daemon.FullStatus.relays:type_name -> daemon.RelayState - 25, // 12: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState - 58, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent - 27, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState - 34, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 95, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 96, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range - 35, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo - 35, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo - 36, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule - 0, // 21: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel - 0, // 22: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel - 44, // 23: daemon.ListStatesResponse.states:type_name -> daemon.State - 53, // 24: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags - 55, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage - 3, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity - 4, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 99, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 97, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry - 58, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 98, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 71, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile - 1, // 33: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol - 94, // 34: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady - 33, // 35: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList - 8, // 36: daemon.DaemonService.Login:input_type -> daemon.LoginRequest - 10, // 37: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest - 12, // 38: daemon.DaemonService.Up:input_type -> daemon.UpRequest - 14, // 39: daemon.DaemonService.Status:input_type -> daemon.StatusRequest - 16, // 40: daemon.DaemonService.Down:input_type -> daemon.DownRequest - 18, // 41: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest - 29, // 42: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest - 31, // 43: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest - 31, // 44: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest - 5, // 45: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest - 38, // 46: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest - 40, // 47: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest - 42, // 48: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest - 45, // 49: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest - 47, // 50: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest - 49, // 51: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest - 51, // 52: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest - 54, // 53: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest - 57, // 54: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest - 59, // 55: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest - 61, // 56: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest - 63, // 57: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest - 65, // 58: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest - 67, // 59: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest - 69, // 60: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest - 72, // 61: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest - 74, // 62: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest - 76, // 63: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest - 78, // 64: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest - 80, // 65: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest - 82, // 66: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest - 84, // 67: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 86, // 68: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest - 88, // 69: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest - 6, // 70: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest - 90, // 71: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 92, // 72: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest - 9, // 73: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 11, // 74: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 13, // 75: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 15, // 76: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 17, // 77: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 19, // 78: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 30, // 79: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 32, // 80: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 32, // 81: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 37, // 82: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 39, // 83: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 41, // 84: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 43, // 85: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 46, // 86: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 48, // 87: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 50, // 88: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 52, // 89: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 56, // 90: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 58, // 91: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 60, // 92: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 62, // 93: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 64, // 94: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 66, // 95: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 68, // 96: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 70, // 97: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 73, // 98: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 75, // 99: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 77, // 100: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 79, // 101: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse - 81, // 102: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 83, // 103: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 85, // 104: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 87, // 105: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse - 89, // 106: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse - 7, // 107: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse - 91, // 108: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 93, // 109: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent - 73, // [73:110] is the sub-list for method output_type - 36, // [36:73] is the sub-list for method input_type - 36, // [36:36] is the sub-list for extension type_name - 36, // [36:36] is the sub-list for extension extendee - 0, // [0:36] is the sub-list for field type_name + 95, // 0: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 25, // 1: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus + 96, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp + 96, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp + 95, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration + 23, // 5: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo + 20, // 6: daemon.FullStatus.managementState:type_name -> daemon.ManagementState + 19, // 7: daemon.FullStatus.signalState:type_name -> daemon.SignalState + 18, // 8: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState + 17, // 9: daemon.FullStatus.peers:type_name -> daemon.PeerState + 21, // 10: daemon.FullStatus.relays:type_name -> daemon.RelayState + 22, // 11: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState + 55, // 12: daemon.FullStatus.events:type_name -> daemon.SystemEvent + 24, // 13: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState + 31, // 14: daemon.ListNetworksResponse.routes:type_name -> daemon.Network + 92, // 15: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 93, // 16: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 32, // 17: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo + 32, // 18: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo + 33, // 19: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule + 0, // 20: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel + 0, // 21: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel + 41, // 22: daemon.ListStatesResponse.states:type_name -> daemon.State + 50, // 23: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags + 52, // 24: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage + 2, // 25: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity + 3, // 26: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category + 96, // 27: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 94, // 28: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 55, // 29: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent + 95, // 30: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 68, // 31: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile + 1, // 32: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol + 91, // 33: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady + 30, // 34: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList + 5, // 35: daemon.DaemonService.Login:input_type -> daemon.LoginRequest + 7, // 36: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest + 9, // 37: daemon.DaemonService.Up:input_type -> daemon.UpRequest + 11, // 38: daemon.DaemonService.Status:input_type -> daemon.StatusRequest + 13, // 39: daemon.DaemonService.Down:input_type -> daemon.DownRequest + 15, // 40: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest + 26, // 41: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest + 28, // 42: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest + 28, // 43: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest + 4, // 44: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest + 35, // 45: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest + 37, // 46: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest + 39, // 47: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest + 42, // 48: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest + 44, // 49: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest + 46, // 50: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest + 48, // 51: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest + 51, // 52: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest + 54, // 53: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest + 56, // 54: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest + 58, // 55: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest + 60, // 56: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest + 62, // 57: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest + 64, // 58: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest + 66, // 59: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest + 69, // 60: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest + 71, // 61: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest + 73, // 62: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest + 75, // 63: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest + 77, // 64: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest + 79, // 65: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest + 81, // 66: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest + 83, // 67: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 85, // 68: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 87, // 69: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 89, // 70: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest + 6, // 71: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 8, // 72: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 10, // 73: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 12, // 74: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 14, // 75: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 16, // 76: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 27, // 77: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 29, // 78: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 29, // 79: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 34, // 80: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 36, // 81: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 38, // 82: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 40, // 83: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 43, // 84: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 45, // 85: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 47, // 86: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 49, // 87: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 53, // 88: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 55, // 89: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 57, // 90: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 59, // 91: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 61, // 92: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 63, // 93: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 65, // 94: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 67, // 95: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 70, // 96: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 72, // 97: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 74, // 98: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 76, // 99: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse + 78, // 100: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 80, // 101: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 82, // 102: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 84, // 103: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 86, // 104: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 88, // 105: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 90, // 106: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent + 71, // [71:107] is the sub-list for method output_type + 35, // [35:71] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 35, // [35:35] is the sub-list for extension extendee + 0, // [0:35] is the sub-list for field type_name } func init() { file_daemon_proto_init() } @@ -6848,20 +6703,20 @@ func file_daemon_proto_init() { if File_daemon_proto != nil { return } - file_daemon_proto_msgTypes[3].OneofWrappers = []any{} + file_daemon_proto_msgTypes[1].OneofWrappers = []any{} + file_daemon_proto_msgTypes[5].OneofWrappers = []any{} file_daemon_proto_msgTypes[7].OneofWrappers = []any{} - file_daemon_proto_msgTypes[9].OneofWrappers = []any{} - file_daemon_proto_msgTypes[30].OneofWrappers = []any{ + file_daemon_proto_msgTypes[28].OneofWrappers = []any{ (*PortInfo_Port)(nil), (*PortInfo_Range_)(nil), } - file_daemon_proto_msgTypes[49].OneofWrappers = []any{} - file_daemon_proto_msgTypes[50].OneofWrappers = []any{} + file_daemon_proto_msgTypes[47].OneofWrappers = []any{} + file_daemon_proto_msgTypes[48].OneofWrappers = []any{} + file_daemon_proto_msgTypes[54].OneofWrappers = []any{} file_daemon_proto_msgTypes[56].OneofWrappers = []any{} - file_daemon_proto_msgTypes[58].OneofWrappers = []any{} - file_daemon_proto_msgTypes[69].OneofWrappers = []any{} - file_daemon_proto_msgTypes[77].OneofWrappers = []any{} - file_daemon_proto_msgTypes[88].OneofWrappers = []any{ + file_daemon_proto_msgTypes[67].OneofWrappers = []any{} + file_daemon_proto_msgTypes[75].OneofWrappers = []any{} + file_daemon_proto_msgTypes[86].OneofWrappers = []any{ (*ExposeServiceEvent_Ready)(nil), } type x struct{} @@ -6869,8 +6724,8 @@ func file_daemon_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_proto_rawDesc), len(file_daemon_proto_rawDesc)), - NumEnums: 5, - NumMessages: 93, + NumEnums: 4, + NumMessages: 91, NumExtensions: 0, NumServices: 1, }, diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index 19976660c..f4e5b8e4d 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -104,8 +104,6 @@ service DaemonService { // StopCPUProfile stops CPU profiling in the daemon rpc StopCPUProfile(StopCPUProfileRequest) returns (StopCPUProfileResponse) {} - rpc NotifyOSLifecycle(OSLifecycleRequest) returns(OSLifecycleResponse) {} - rpc GetInstallerResult(InstallerResultRequest) returns (InstallerResultResponse) {} // ExposeService exposes a local port via the NetBird reverse proxy @@ -114,20 +112,6 @@ service DaemonService { -message OSLifecycleRequest { - // avoid collision with loglevel enum - enum CycleType { - UNKNOWN = 0; - SLEEP = 1; - WAKEUP = 2; - } - - CycleType type = 1; -} - -message OSLifecycleResponse {} - - message LoginRequest { // setupKey netbird setup key. string setupKey = 1; diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index e5bd89597..026ee2361 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.1 +// - protoc v6.33.1 +// source: daemon.proto package proto @@ -11,8 +15,47 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + DaemonService_Login_FullMethodName = "/daemon.DaemonService/Login" + DaemonService_WaitSSOLogin_FullMethodName = "/daemon.DaemonService/WaitSSOLogin" + DaemonService_Up_FullMethodName = "/daemon.DaemonService/Up" + DaemonService_Status_FullMethodName = "/daemon.DaemonService/Status" + DaemonService_Down_FullMethodName = "/daemon.DaemonService/Down" + DaemonService_GetConfig_FullMethodName = "/daemon.DaemonService/GetConfig" + DaemonService_ListNetworks_FullMethodName = "/daemon.DaemonService/ListNetworks" + DaemonService_SelectNetworks_FullMethodName = "/daemon.DaemonService/SelectNetworks" + DaemonService_DeselectNetworks_FullMethodName = "/daemon.DaemonService/DeselectNetworks" + DaemonService_ForwardingRules_FullMethodName = "/daemon.DaemonService/ForwardingRules" + DaemonService_DebugBundle_FullMethodName = "/daemon.DaemonService/DebugBundle" + DaemonService_GetLogLevel_FullMethodName = "/daemon.DaemonService/GetLogLevel" + DaemonService_SetLogLevel_FullMethodName = "/daemon.DaemonService/SetLogLevel" + DaemonService_ListStates_FullMethodName = "/daemon.DaemonService/ListStates" + DaemonService_CleanState_FullMethodName = "/daemon.DaemonService/CleanState" + DaemonService_DeleteState_FullMethodName = "/daemon.DaemonService/DeleteState" + DaemonService_SetSyncResponsePersistence_FullMethodName = "/daemon.DaemonService/SetSyncResponsePersistence" + DaemonService_TracePacket_FullMethodName = "/daemon.DaemonService/TracePacket" + DaemonService_SubscribeEvents_FullMethodName = "/daemon.DaemonService/SubscribeEvents" + DaemonService_GetEvents_FullMethodName = "/daemon.DaemonService/GetEvents" + DaemonService_SwitchProfile_FullMethodName = "/daemon.DaemonService/SwitchProfile" + DaemonService_SetConfig_FullMethodName = "/daemon.DaemonService/SetConfig" + DaemonService_AddProfile_FullMethodName = "/daemon.DaemonService/AddProfile" + DaemonService_RemoveProfile_FullMethodName = "/daemon.DaemonService/RemoveProfile" + DaemonService_ListProfiles_FullMethodName = "/daemon.DaemonService/ListProfiles" + DaemonService_GetActiveProfile_FullMethodName = "/daemon.DaemonService/GetActiveProfile" + DaemonService_Logout_FullMethodName = "/daemon.DaemonService/Logout" + DaemonService_GetFeatures_FullMethodName = "/daemon.DaemonService/GetFeatures" + DaemonService_TriggerUpdate_FullMethodName = "/daemon.DaemonService/TriggerUpdate" + DaemonService_GetPeerSSHHostKey_FullMethodName = "/daemon.DaemonService/GetPeerSSHHostKey" + DaemonService_RequestJWTAuth_FullMethodName = "/daemon.DaemonService/RequestJWTAuth" + DaemonService_WaitJWTToken_FullMethodName = "/daemon.DaemonService/WaitJWTToken" + DaemonService_StartCPUProfile_FullMethodName = "/daemon.DaemonService/StartCPUProfile" + DaemonService_StopCPUProfile_FullMethodName = "/daemon.DaemonService/StopCPUProfile" + DaemonService_GetInstallerResult_FullMethodName = "/daemon.DaemonService/GetInstallerResult" + DaemonService_ExposeService_FullMethodName = "/daemon.DaemonService/ExposeService" +) // DaemonServiceClient is the client API for DaemonService service. // @@ -53,7 +96,7 @@ type DaemonServiceClient interface { // SetSyncResponsePersistence enables or disables sync response persistence SetSyncResponsePersistence(ctx context.Context, in *SetSyncResponsePersistenceRequest, opts ...grpc.CallOption) (*SetSyncResponsePersistenceResponse, error) TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) - SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) + SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) @@ -77,10 +120,9 @@ type DaemonServiceClient interface { StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) // StopCPUProfile stops CPU profiling in the daemon StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) - NotifyOSLifecycle(ctx context.Context, in *OSLifecycleRequest, opts ...grpc.CallOption) (*OSLifecycleResponse, error) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) + ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) } type daemonServiceClient struct { @@ -92,8 +134,9 @@ func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { } func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LoginResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/Login", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_Login_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -101,8 +144,9 @@ func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts } func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLoginRequest, opts ...grpc.CallOption) (*WaitSSOLoginResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitSSOLoginResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitSSOLogin", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_WaitSSOLogin_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -110,8 +154,9 @@ func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLogin } func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grpc.CallOption) (*UpResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(UpResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/Up", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_Up_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -119,8 +164,9 @@ func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grp } func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/Status", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_Status_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -128,8 +174,9 @@ func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opt } func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (*DownResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DownResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/Down", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_Down_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -137,8 +184,9 @@ func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts .. } func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetConfigResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetConfig", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetConfig_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -146,8 +194,9 @@ func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigReques } func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListNetworksResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListNetworks", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_ListNetworks_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -155,8 +204,9 @@ func (c *daemonServiceClient) ListNetworks(ctx context.Context, in *ListNetworks } func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/SelectNetworks", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_SelectNetworks_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -164,8 +214,9 @@ func (c *daemonServiceClient) SelectNetworks(ctx context.Context, in *SelectNetw } func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNetworksRequest, opts ...grpc.CallOption) (*SelectNetworksResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SelectNetworksResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeselectNetworks", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_DeselectNetworks_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -173,8 +224,9 @@ func (c *daemonServiceClient) DeselectNetworks(ctx context.Context, in *SelectNe } func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*ForwardingRulesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ForwardingRulesResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/ForwardingRules", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_ForwardingRules_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -182,8 +234,9 @@ func (c *daemonServiceClient) ForwardingRules(ctx context.Context, in *EmptyRequ } func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRequest, opts ...grpc.CallOption) (*DebugBundleResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DebugBundleResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/DebugBundle", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_DebugBundle_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -191,8 +244,9 @@ func (c *daemonServiceClient) DebugBundle(ctx context.Context, in *DebugBundleRe } func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRequest, opts ...grpc.CallOption) (*GetLogLevelResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetLogLevelResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetLogLevel", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetLogLevel_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -200,8 +254,9 @@ func (c *daemonServiceClient) GetLogLevel(ctx context.Context, in *GetLogLevelRe } func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRequest, opts ...grpc.CallOption) (*SetLogLevelResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetLogLevelResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetLogLevel", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_SetLogLevel_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -209,8 +264,9 @@ func (c *daemonServiceClient) SetLogLevel(ctx context.Context, in *SetLogLevelRe } func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequest, opts ...grpc.CallOption) (*ListStatesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListStatesResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListStates", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_ListStates_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -218,8 +274,9 @@ func (c *daemonServiceClient) ListStates(ctx context.Context, in *ListStatesRequ } func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequest, opts ...grpc.CallOption) (*CleanStateResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CleanStateResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/CleanState", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_CleanState_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -227,8 +284,9 @@ func (c *daemonServiceClient) CleanState(ctx context.Context, in *CleanStateRequ } func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*DeleteStateResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteStateResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/DeleteState", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_DeleteState_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -236,8 +294,9 @@ func (c *daemonServiceClient) DeleteState(ctx context.Context, in *DeleteStateRe } func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in *SetSyncResponsePersistenceRequest, opts ...grpc.CallOption) (*SetSyncResponsePersistenceResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetSyncResponsePersistenceResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetSyncResponsePersistence", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_SetSyncResponsePersistence_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -245,20 +304,22 @@ func (c *daemonServiceClient) SetSyncResponsePersistence(ctx context.Context, in } func (c *daemonServiceClient) TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TracePacketResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/TracePacket", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_TracePacket_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], "/daemon.DaemonService/SubscribeEvents", opts...) +func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_SubscribeEvents_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &daemonServiceSubscribeEventsClient{stream} + x := &grpc.GenericClientStream[SubscribeRequest, SystemEvent]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -268,26 +329,13 @@ func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *Subscribe return x, nil } -type DaemonService_SubscribeEventsClient interface { - Recv() (*SystemEvent, error) - grpc.ClientStream -} - -type daemonServiceSubscribeEventsClient struct { - grpc.ClientStream -} - -func (x *daemonServiceSubscribeEventsClient) Recv() (*SystemEvent, error) { - m := new(SystemEvent) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_SubscribeEventsClient = grpc.ServerStreamingClient[SystemEvent] func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetEventsResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetEvents", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetEvents_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -295,8 +343,9 @@ func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsReques } func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SwitchProfileResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/SwitchProfile", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_SwitchProfile_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -304,8 +353,9 @@ func (c *daemonServiceClient) SwitchProfile(ctx context.Context, in *SwitchProfi } func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetConfigResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/SetConfig", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_SetConfig_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -313,8 +363,9 @@ func (c *daemonServiceClient) SetConfig(ctx context.Context, in *SetConfigReques } func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequest, opts ...grpc.CallOption) (*AddProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AddProfileResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/AddProfile", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_AddProfile_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -322,8 +373,9 @@ func (c *daemonServiceClient) AddProfile(ctx context.Context, in *AddProfileRequ } func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfileRequest, opts ...grpc.CallOption) (*RemoveProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RemoveProfileResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/RemoveProfile", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_RemoveProfile_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -331,8 +383,9 @@ func (c *daemonServiceClient) RemoveProfile(ctx context.Context, in *RemoveProfi } func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfilesRequest, opts ...grpc.CallOption) (*ListProfilesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListProfilesResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/ListProfiles", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_ListProfiles_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -340,8 +393,9 @@ func (c *daemonServiceClient) ListProfiles(ctx context.Context, in *ListProfiles } func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiveProfileRequest, opts ...grpc.CallOption) (*GetActiveProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetActiveProfileResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetActiveProfile", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetActiveProfile_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -349,8 +403,9 @@ func (c *daemonServiceClient) GetActiveProfile(ctx context.Context, in *GetActiv } func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LogoutResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/Logout", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_Logout_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -358,8 +413,9 @@ func (c *daemonServiceClient) Logout(ctx context.Context, in *LogoutRequest, opt } func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRequest, opts ...grpc.CallOption) (*GetFeaturesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetFeaturesResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetFeatures", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetFeatures_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -367,8 +423,9 @@ func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRe } func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TriggerUpdateResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/TriggerUpdate", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_TriggerUpdate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -376,8 +433,9 @@ func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpda } func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetPeerSSHHostKeyResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetPeerSSHHostKey", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetPeerSSHHostKey_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -385,8 +443,9 @@ func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeer } func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWTAuthRequest, opts ...grpc.CallOption) (*RequestJWTAuthResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RequestJWTAuthResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/RequestJWTAuth", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_RequestJWTAuth_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -394,8 +453,9 @@ func (c *daemonServiceClient) RequestJWTAuth(ctx context.Context, in *RequestJWT } func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTTokenRequest, opts ...grpc.CallOption) (*WaitJWTTokenResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WaitJWTTokenResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitJWTToken", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_WaitJWTToken_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -403,8 +463,9 @@ func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTToken } func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StartCPUProfileResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartCPUProfile", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_StartCPUProfile_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -412,17 +473,9 @@ func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUP } func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StopCPUProfileResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopCPUProfile", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *daemonServiceClient) NotifyOSLifecycle(ctx context.Context, in *OSLifecycleRequest, opts ...grpc.CallOption) (*OSLifecycleResponse, error) { - out := new(OSLifecycleResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/NotifyOSLifecycle", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_StopCPUProfile_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -430,20 +483,22 @@ func (c *daemonServiceClient) NotifyOSLifecycle(ctx context.Context, in *OSLifec } func (c *daemonServiceClient) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InstallerResultResponse) - err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetInstallerResult", in, out, opts...) + err := c.cc.Invoke(ctx, DaemonService_GetInstallerResult_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (DaemonService_ExposeServiceClient, error) { - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], "/daemon.DaemonService/ExposeService", opts...) +func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], DaemonService_ExposeService_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &daemonServiceExposeServiceClient{stream} + x := &grpc.GenericClientStream[ExposeServiceRequest, ExposeServiceEvent]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -453,26 +508,12 @@ func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServi return x, nil } -type DaemonService_ExposeServiceClient interface { - Recv() (*ExposeServiceEvent, error) - grpc.ClientStream -} - -type daemonServiceExposeServiceClient struct { - grpc.ClientStream -} - -func (x *daemonServiceExposeServiceClient) Recv() (*ExposeServiceEvent, error) { - m := new(ExposeServiceEvent) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_ExposeServiceClient = grpc.ServerStreamingClient[ExposeServiceEvent] // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer -// for forward compatibility +// for forward compatibility. type DaemonServiceServer interface { // Login uses setup key to prepare configuration for the daemon. Login(context.Context, *LoginRequest) (*LoginResponse, error) @@ -509,7 +550,7 @@ type DaemonServiceServer interface { // SetSyncResponsePersistence enables or disables sync response persistence SetSyncResponsePersistence(context.Context, *SetSyncResponsePersistenceRequest) (*SetSyncResponsePersistenceResponse, error) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) - SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error + SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) @@ -533,129 +574,129 @@ type DaemonServiceServer interface { StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) // StopCPUProfile stops CPU profiling in the daemon StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) - NotifyOSLifecycle(context.Context, *OSLifecycleRequest) (*OSLifecycleResponse, error) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) // ExposeService exposes a local port via the NetBird reverse proxy - ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error + ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error mustEmbedUnimplementedDaemonServiceServer() } -// UnimplementedDaemonServiceServer must be embedded to have forward compatible implementations. -type UnimplementedDaemonServiceServer struct { -} +// UnimplementedDaemonServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedDaemonServiceServer struct{} func (UnimplementedDaemonServiceServer) Login(context.Context, *LoginRequest) (*LoginResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Login not implemented") + return nil, status.Error(codes.Unimplemented, "method Login not implemented") } func (UnimplementedDaemonServiceServer) WaitSSOLogin(context.Context, *WaitSSOLoginRequest) (*WaitSSOLoginResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WaitSSOLogin not implemented") + return nil, status.Error(codes.Unimplemented, "method WaitSSOLogin not implemented") } func (UnimplementedDaemonServiceServer) Up(context.Context, *UpRequest) (*UpResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Up not implemented") + return nil, status.Error(codes.Unimplemented, "method Up not implemented") } func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") + return nil, status.Error(codes.Unimplemented, "method Status not implemented") } func (UnimplementedDaemonServiceServer) Down(context.Context, *DownRequest) (*DownResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Down not implemented") + return nil, status.Error(codes.Unimplemented, "method Down not implemented") } func (UnimplementedDaemonServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetConfig not implemented") + return nil, status.Error(codes.Unimplemented, "method GetConfig not implemented") } func (UnimplementedDaemonServiceServer) ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNetworks not implemented") + return nil, status.Error(codes.Unimplemented, "method ListNetworks not implemented") } func (UnimplementedDaemonServiceServer) SelectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SelectNetworks not implemented") + return nil, status.Error(codes.Unimplemented, "method SelectNetworks not implemented") } func (UnimplementedDaemonServiceServer) DeselectNetworks(context.Context, *SelectNetworksRequest) (*SelectNetworksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeselectNetworks not implemented") + return nil, status.Error(codes.Unimplemented, "method DeselectNetworks not implemented") } func (UnimplementedDaemonServiceServer) ForwardingRules(context.Context, *EmptyRequest) (*ForwardingRulesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ForwardingRules not implemented") + return nil, status.Error(codes.Unimplemented, "method ForwardingRules not implemented") } func (UnimplementedDaemonServiceServer) DebugBundle(context.Context, *DebugBundleRequest) (*DebugBundleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DebugBundle not implemented") + return nil, status.Error(codes.Unimplemented, "method DebugBundle not implemented") } func (UnimplementedDaemonServiceServer) GetLogLevel(context.Context, *GetLogLevelRequest) (*GetLogLevelResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetLogLevel not implemented") + return nil, status.Error(codes.Unimplemented, "method GetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) SetLogLevel(context.Context, *SetLogLevelRequest) (*SetLogLevelResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetLogLevel not implemented") + return nil, status.Error(codes.Unimplemented, "method SetLogLevel not implemented") } func (UnimplementedDaemonServiceServer) ListStates(context.Context, *ListStatesRequest) (*ListStatesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListStates not implemented") + return nil, status.Error(codes.Unimplemented, "method ListStates not implemented") } func (UnimplementedDaemonServiceServer) CleanState(context.Context, *CleanStateRequest) (*CleanStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CleanState not implemented") + return nil, status.Error(codes.Unimplemented, "method CleanState not implemented") } func (UnimplementedDaemonServiceServer) DeleteState(context.Context, *DeleteStateRequest) (*DeleteStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteState not implemented") + return nil, status.Error(codes.Unimplemented, "method DeleteState not implemented") } func (UnimplementedDaemonServiceServer) SetSyncResponsePersistence(context.Context, *SetSyncResponsePersistenceRequest) (*SetSyncResponsePersistenceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") + return nil, status.Error(codes.Unimplemented, "method SetSyncResponsePersistence not implemented") } func (UnimplementedDaemonServiceServer) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TracePacket not implemented") + return nil, status.Error(codes.Unimplemented, "method TracePacket not implemented") } -func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeEvents not implemented") +func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error { + return status.Error(codes.Unimplemented, "method SubscribeEvents not implemented") } func (UnimplementedDaemonServiceServer) GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetEvents not implemented") + return nil, status.Error(codes.Unimplemented, "method GetEvents not implemented") } func (UnimplementedDaemonServiceServer) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SwitchProfile not implemented") + return nil, status.Error(codes.Unimplemented, "method SwitchProfile not implemented") } func (UnimplementedDaemonServiceServer) SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetConfig not implemented") + return nil, status.Error(codes.Unimplemented, "method SetConfig not implemented") } func (UnimplementedDaemonServiceServer) AddProfile(context.Context, *AddProfileRequest) (*AddProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddProfile not implemented") + return nil, status.Error(codes.Unimplemented, "method AddProfile not implemented") } func (UnimplementedDaemonServiceServer) RemoveProfile(context.Context, *RemoveProfileRequest) (*RemoveProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveProfile not implemented") + return nil, status.Error(codes.Unimplemented, "method RemoveProfile not implemented") } func (UnimplementedDaemonServiceServer) ListProfiles(context.Context, *ListProfilesRequest) (*ListProfilesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListProfiles not implemented") + return nil, status.Error(codes.Unimplemented, "method ListProfiles not implemented") } func (UnimplementedDaemonServiceServer) GetActiveProfile(context.Context, *GetActiveProfileRequest) (*GetActiveProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetActiveProfile not implemented") + return nil, status.Error(codes.Unimplemented, "method GetActiveProfile not implemented") } func (UnimplementedDaemonServiceServer) Logout(context.Context, *LogoutRequest) (*LogoutResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") + return nil, status.Error(codes.Unimplemented, "method Logout not implemented") } func (UnimplementedDaemonServiceServer) GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetFeatures not implemented") + return nil, status.Error(codes.Unimplemented, "method GetFeatures not implemented") } func (UnimplementedDaemonServiceServer) TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TriggerUpdate not implemented") + return nil, status.Error(codes.Unimplemented, "method TriggerUpdate not implemented") } func (UnimplementedDaemonServiceServer) GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") + return nil, status.Error(codes.Unimplemented, "method GetPeerSSHHostKey not implemented") } func (UnimplementedDaemonServiceServer) RequestJWTAuth(context.Context, *RequestJWTAuthRequest) (*RequestJWTAuthResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RequestJWTAuth not implemented") + return nil, status.Error(codes.Unimplemented, "method RequestJWTAuth not implemented") } func (UnimplementedDaemonServiceServer) WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WaitJWTToken not implemented") + return nil, status.Error(codes.Unimplemented, "method WaitJWTToken not implemented") } func (UnimplementedDaemonServiceServer) StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartCPUProfile not implemented") + return nil, status.Error(codes.Unimplemented, "method StartCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StopCPUProfile not implemented") -} -func (UnimplementedDaemonServiceServer) NotifyOSLifecycle(context.Context, *OSLifecycleRequest) (*OSLifecycleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NotifyOSLifecycle not implemented") + return nil, status.Error(codes.Unimplemented, "method StopCPUProfile not implemented") } func (UnimplementedDaemonServiceServer) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetInstallerResult not implemented") + return nil, status.Error(codes.Unimplemented, "method GetInstallerResult not implemented") } -func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, DaemonService_ExposeServiceServer) error { - return status.Errorf(codes.Unimplemented, "method ExposeService not implemented") +func (UnimplementedDaemonServiceServer) ExposeService(*ExposeServiceRequest, grpc.ServerStreamingServer[ExposeServiceEvent]) error { + return status.Error(codes.Unimplemented, "method ExposeService not implemented") } func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} +func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} // UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to DaemonServiceServer will @@ -665,6 +706,13 @@ type UnsafeDaemonServiceServer interface { } func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { + // If the following call panics, it indicates UnimplementedDaemonServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&DaemonService_ServiceDesc, srv) } @@ -678,7 +726,7 @@ func _DaemonService_Login_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/Login", + FullMethod: DaemonService_Login_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Login(ctx, req.(*LoginRequest)) @@ -696,7 +744,7 @@ func _DaemonService_WaitSSOLogin_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/WaitSSOLogin", + FullMethod: DaemonService_WaitSSOLogin_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitSSOLogin(ctx, req.(*WaitSSOLoginRequest)) @@ -714,7 +762,7 @@ func _DaemonService_Up_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/Up", + FullMethod: DaemonService_Up_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Up(ctx, req.(*UpRequest)) @@ -732,7 +780,7 @@ func _DaemonService_Status_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/Status", + FullMethod: DaemonService_Status_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Status(ctx, req.(*StatusRequest)) @@ -750,7 +798,7 @@ func _DaemonService_Down_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/Down", + FullMethod: DaemonService_Down_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Down(ctx, req.(*DownRequest)) @@ -768,7 +816,7 @@ func _DaemonService_GetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/GetConfig", + FullMethod: DaemonService_GetConfig_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) @@ -786,7 +834,7 @@ func _DaemonService_ListNetworks_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/ListNetworks", + FullMethod: DaemonService_ListNetworks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListNetworks(ctx, req.(*ListNetworksRequest)) @@ -804,7 +852,7 @@ func _DaemonService_SelectNetworks_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/SelectNetworks", + FullMethod: DaemonService_SelectNetworks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SelectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -822,7 +870,7 @@ func _DaemonService_DeselectNetworks_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/DeselectNetworks", + FullMethod: DaemonService_DeselectNetworks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeselectNetworks(ctx, req.(*SelectNetworksRequest)) @@ -840,7 +888,7 @@ func _DaemonService_ForwardingRules_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/ForwardingRules", + FullMethod: DaemonService_ForwardingRules_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ForwardingRules(ctx, req.(*EmptyRequest)) @@ -858,7 +906,7 @@ func _DaemonService_DebugBundle_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/DebugBundle", + FullMethod: DaemonService_DebugBundle_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DebugBundle(ctx, req.(*DebugBundleRequest)) @@ -876,7 +924,7 @@ func _DaemonService_GetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/GetLogLevel", + FullMethod: DaemonService_GetLogLevel_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetLogLevel(ctx, req.(*GetLogLevelRequest)) @@ -894,7 +942,7 @@ func _DaemonService_SetLogLevel_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/SetLogLevel", + FullMethod: DaemonService_SetLogLevel_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetLogLevel(ctx, req.(*SetLogLevelRequest)) @@ -912,7 +960,7 @@ func _DaemonService_ListStates_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/ListStates", + FullMethod: DaemonService_ListStates_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListStates(ctx, req.(*ListStatesRequest)) @@ -930,7 +978,7 @@ func _DaemonService_CleanState_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/CleanState", + FullMethod: DaemonService_CleanState_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).CleanState(ctx, req.(*CleanStateRequest)) @@ -948,7 +996,7 @@ func _DaemonService_DeleteState_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/DeleteState", + FullMethod: DaemonService_DeleteState_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).DeleteState(ctx, req.(*DeleteStateRequest)) @@ -966,7 +1014,7 @@ func _DaemonService_SetSyncResponsePersistence_Handler(srv interface{}, ctx cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/SetSyncResponsePersistence", + FullMethod: DaemonService_SetSyncResponsePersistence_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetSyncResponsePersistence(ctx, req.(*SetSyncResponsePersistenceRequest)) @@ -984,7 +1032,7 @@ func _DaemonService_TracePacket_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/TracePacket", + FullMethod: DaemonService_TracePacket_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TracePacket(ctx, req.(*TracePacketRequest)) @@ -997,21 +1045,11 @@ func _DaemonService_SubscribeEvents_Handler(srv interface{}, stream grpc.ServerS if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).SubscribeEvents(m, &daemonServiceSubscribeEventsServer{stream}) + return srv.(DaemonServiceServer).SubscribeEvents(m, &grpc.GenericServerStream[SubscribeRequest, SystemEvent]{ServerStream: stream}) } -type DaemonService_SubscribeEventsServer interface { - Send(*SystemEvent) error - grpc.ServerStream -} - -type daemonServiceSubscribeEventsServer struct { - grpc.ServerStream -} - -func (x *daemonServiceSubscribeEventsServer) Send(m *SystemEvent) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_SubscribeEventsServer = grpc.ServerStreamingServer[SystemEvent] func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetEventsRequest) @@ -1023,7 +1061,7 @@ func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/GetEvents", + FullMethod: DaemonService_GetEvents_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetEvents(ctx, req.(*GetEventsRequest)) @@ -1041,7 +1079,7 @@ func _DaemonService_SwitchProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/SwitchProfile", + FullMethod: DaemonService_SwitchProfile_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SwitchProfile(ctx, req.(*SwitchProfileRequest)) @@ -1059,7 +1097,7 @@ func _DaemonService_SetConfig_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/SetConfig", + FullMethod: DaemonService_SetConfig_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).SetConfig(ctx, req.(*SetConfigRequest)) @@ -1077,7 +1115,7 @@ func _DaemonService_AddProfile_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/AddProfile", + FullMethod: DaemonService_AddProfile_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).AddProfile(ctx, req.(*AddProfileRequest)) @@ -1095,7 +1133,7 @@ func _DaemonService_RemoveProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/RemoveProfile", + FullMethod: DaemonService_RemoveProfile_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RemoveProfile(ctx, req.(*RemoveProfileRequest)) @@ -1113,7 +1151,7 @@ func _DaemonService_ListProfiles_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/ListProfiles", + FullMethod: DaemonService_ListProfiles_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).ListProfiles(ctx, req.(*ListProfilesRequest)) @@ -1131,7 +1169,7 @@ func _DaemonService_GetActiveProfile_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/GetActiveProfile", + FullMethod: DaemonService_GetActiveProfile_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetActiveProfile(ctx, req.(*GetActiveProfileRequest)) @@ -1149,7 +1187,7 @@ func _DaemonService_Logout_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/Logout", + FullMethod: DaemonService_Logout_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).Logout(ctx, req.(*LogoutRequest)) @@ -1167,7 +1205,7 @@ func _DaemonService_GetFeatures_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/GetFeatures", + FullMethod: DaemonService_GetFeatures_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetFeatures(ctx, req.(*GetFeaturesRequest)) @@ -1185,7 +1223,7 @@ func _DaemonService_TriggerUpdate_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/TriggerUpdate", + FullMethod: DaemonService_TriggerUpdate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).TriggerUpdate(ctx, req.(*TriggerUpdateRequest)) @@ -1203,7 +1241,7 @@ func _DaemonService_GetPeerSSHHostKey_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/GetPeerSSHHostKey", + FullMethod: DaemonService_GetPeerSSHHostKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetPeerSSHHostKey(ctx, req.(*GetPeerSSHHostKeyRequest)) @@ -1221,7 +1259,7 @@ func _DaemonService_RequestJWTAuth_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/RequestJWTAuth", + FullMethod: DaemonService_RequestJWTAuth_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).RequestJWTAuth(ctx, req.(*RequestJWTAuthRequest)) @@ -1239,7 +1277,7 @@ func _DaemonService_WaitJWTToken_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/WaitJWTToken", + FullMethod: DaemonService_WaitJWTToken_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).WaitJWTToken(ctx, req.(*WaitJWTTokenRequest)) @@ -1257,7 +1295,7 @@ func _DaemonService_StartCPUProfile_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/StartCPUProfile", + FullMethod: DaemonService_StartCPUProfile_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StartCPUProfile(ctx, req.(*StartCPUProfileRequest)) @@ -1275,7 +1313,7 @@ func _DaemonService_StopCPUProfile_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/StopCPUProfile", + FullMethod: DaemonService_StopCPUProfile_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).StopCPUProfile(ctx, req.(*StopCPUProfileRequest)) @@ -1283,24 +1321,6 @@ func _DaemonService_StopCPUProfile_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _DaemonService_NotifyOSLifecycle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(OSLifecycleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServiceServer).NotifyOSLifecycle(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/daemon.DaemonService/NotifyOSLifecycle", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServiceServer).NotifyOSLifecycle(ctx, req.(*OSLifecycleRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _DaemonService_GetInstallerResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(InstallerResultRequest) if err := dec(in); err != nil { @@ -1311,7 +1331,7 @@ func _DaemonService_GetInstallerResult_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/daemon.DaemonService/GetInstallerResult", + FullMethod: DaemonService_GetInstallerResult_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DaemonServiceServer).GetInstallerResult(ctx, req.(*InstallerResultRequest)) @@ -1324,21 +1344,11 @@ func _DaemonService_ExposeService_Handler(srv interface{}, stream grpc.ServerStr if err := stream.RecvMsg(m); err != nil { return err } - return srv.(DaemonServiceServer).ExposeService(m, &daemonServiceExposeServiceServer{stream}) + return srv.(DaemonServiceServer).ExposeService(m, &grpc.GenericServerStream[ExposeServiceRequest, ExposeServiceEvent]{ServerStream: stream}) } -type DaemonService_ExposeServiceServer interface { - Send(*ExposeServiceEvent) error - grpc.ServerStream -} - -type daemonServiceExposeServiceServer struct { - grpc.ServerStream -} - -func (x *daemonServiceExposeServiceServer) Send(m *ExposeServiceEvent) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_ExposeServiceServer = grpc.ServerStreamingServer[ExposeServiceEvent] // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, @@ -1479,10 +1489,6 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "StopCPUProfile", Handler: _DaemonService_StopCPUProfile_Handler, }, - { - MethodName: "NotifyOSLifecycle", - Handler: _DaemonService_NotifyOSLifecycle_Handler, - }, { MethodName: "GetInstallerResult", Handler: _DaemonService_GetInstallerResult_Handler, diff --git a/client/server/server.go b/client/server/server.go index 70e4c342f..e70b83bf8 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -120,6 +120,7 @@ func New(ctx context.Context, logFile string, configFile string, profilesDisable } agent := &serverAgent{s} s.sleepHandler = sleephandler.New(agent) + s.startSleepDetector() return s } diff --git a/client/server/sleep.go b/client/server/sleep.go index 7a83c75a6..877ad9690 100644 --- a/client/server/sleep.go +++ b/client/server/sleep.go @@ -2,13 +2,18 @@ package server import ( "context" + "os" + "strconv" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/internal/sleep" "github.com/netbirdio/netbird/client/proto" ) +const envDisableSleepDetector = "NB_DISABLE_SLEEP_DETECTOR" + // serverAgent adapts Server to the handler.Agent and handler.StatusChecker interfaces type serverAgent struct { s *Server @@ -28,19 +33,61 @@ func (a *serverAgent) Status() (internal.StatusType, error) { return internal.CtxGetState(a.s.rootCtx).Status() } -// NotifyOSLifecycle handles operating system lifecycle events by executing appropriate logic based on the request type. -func (s *Server) NotifyOSLifecycle(callerCtx context.Context, req *proto.OSLifecycleRequest) (*proto.OSLifecycleResponse, error) { - switch req.GetType() { - case proto.OSLifecycleRequest_WAKEUP: - if err := s.sleepHandler.HandleWakeUp(callerCtx); err != nil { - return &proto.OSLifecycleResponse{}, err - } - case proto.OSLifecycleRequest_SLEEP: - if err := s.sleepHandler.HandleSleep(callerCtx); err != nil { - return &proto.OSLifecycleResponse{}, err - } - default: - log.Errorf("unknown OSLifecycleRequest type: %v", req.GetType()) +// startSleepDetector starts the OS sleep/wake detector and forwards events to +// the sleep handler. On platforms without a supported detector the attempt +// logs a warning and returns. Setting NB_DISABLE_SLEEP_DETECTOR=true skips +// registration entirely. +func (s *Server) startSleepDetector() { + if sleepDetectorDisabled() { + log.Info("sleep detection disabled via " + envDisableSleepDetector) + return } - return &proto.OSLifecycleResponse{}, nil + + svc, err := sleep.New() + if err != nil { + log.Warnf("failed to initialize sleep detection: %v", err) + return + } + + err = svc.Register(func(event sleep.EventType) { + switch event { + case sleep.EventTypeSleep: + log.Info("handling sleep event") + if err := s.sleepHandler.HandleSleep(s.rootCtx); err != nil { + log.Errorf("failed to handle sleep event: %v", err) + } + case sleep.EventTypeWakeUp: + log.Info("handling wakeup event") + if err := s.sleepHandler.HandleWakeUp(s.rootCtx); err != nil { + log.Errorf("failed to handle wakeup event: %v", err) + } + } + }) + if err != nil { + log.Errorf("failed to register sleep detector: %v", err) + return + } + + log.Info("sleep detection service initialized") + + go func() { + <-s.rootCtx.Done() + log.Info("stopping sleep event listener") + if err := svc.Deregister(); err != nil { + log.Errorf("failed to deregister sleep detector: %v", err) + } + }() +} + +func sleepDetectorDisabled() bool { + val := os.Getenv(envDisableSleepDetector) + if val == "" { + return false + } + disabled, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s=%q: %v", envDisableSleepDetector, val, err) + return false + } + return disabled } diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 0a4687eda..28f98ae59 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -38,7 +38,6 @@ import ( "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/profilemanager" - "github.com/netbirdio/netbird/client/internal/sleep" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/client/ui/desktop" "github.com/netbirdio/netbird/client/ui/event" @@ -1149,9 +1148,6 @@ func (s *serviceClient) onTrayReady() { go s.eventManager.Start(s.ctx) go s.eventHandler.listen(s.ctx) - - // Start sleep detection listener - go s.startSleepListener() } func (s *serviceClient) attachOutput(cmd *exec.Cmd) *os.File { @@ -1212,62 +1208,6 @@ func (s *serviceClient) getSrvClient(timeout time.Duration) (proto.DaemonService return s.conn, nil } -// startSleepListener initializes the sleep detection service and listens for sleep events -func (s *serviceClient) startSleepListener() { - sleepService, err := sleep.New() - if err != nil { - log.Warnf("%v", err) - return - } - - if err := sleepService.Register(s.handleSleepEvents); err != nil { - log.Errorf("failed to start sleep detection: %v", err) - return - } - - log.Info("sleep detection service initialized") - - // Cleanup on context cancellation - go func() { - <-s.ctx.Done() - log.Info("stopping sleep event listener") - if err := sleepService.Deregister(); err != nil { - log.Errorf("failed to deregister sleep detection: %v", err) - } - }() -} - -// handleSleepEvents sends a sleep notification to the daemon via gRPC -func (s *serviceClient) handleSleepEvents(event sleep.EventType) { - conn, err := s.getSrvClient(0) - if err != nil { - log.Errorf("failed to get daemon client for sleep notification: %v", err) - return - } - - req := &proto.OSLifecycleRequest{} - - switch event { - case sleep.EventTypeWakeUp: - log.Infof("handle wakeup event: %v", event) - req.Type = proto.OSLifecycleRequest_WAKEUP - case sleep.EventTypeSleep: - log.Infof("handle sleep event: %v", event) - req.Type = proto.OSLifecycleRequest_SLEEP - default: - log.Infof("unknown event: %v", event) - return - } - - _, err = conn.NotifyOSLifecycle(s.ctx, req) - if err != nil { - log.Errorf("failed to notify daemon about os lifecycle notification: %v", err) - return - } - - log.Info("successfully notified daemon about os lifecycle") -} - // setSettingsEnabled enables or disables the settings menu based on the provided state func (s *serviceClient) setSettingsEnabled(enabled bool) { if s.mSettings != nil { diff --git a/go.mod b/go.mod index 1958a3278..8e6a481d2 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( github.com/crowdsecurity/go-cs-bouncer v0.0.21 github.com/dexidp/dex v0.0.0-00010101000000-000000000000 github.com/dexidp/dex/api/v2 v2.4.0 + github.com/ebitengine/purego v0.8.4 github.com/eko/gocache/lib/v4 v4.2.0 github.com/eko/gocache/store/go_cache/v4 v4.2.2 github.com/eko/gocache/store/redis/v4 v4.2.2 @@ -179,7 +180,6 @@ require ( github.com/docker/docker v28.0.1+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/ebitengine/purego v0.8.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fredbi/uri v1.1.1 // indirect github.com/fyne-io/gl-js v0.2.0 // indirect From 28fe26637b3d94b45444cbe5fa9cab921257cf09 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 29 Apr 2026 18:01:07 +0900 Subject: [PATCH 356/374] [client] Fix Windows installer upgrade detection for pre-0.70.1 installs (#6025) --- client/installer.nsis | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/client/installer.nsis b/client/installer.nsis index 8b2b8ea39..6b8d3258e 100644 --- a/client/installer.nsis +++ b/client/installer.nsis @@ -200,9 +200,17 @@ Pop $0 !macroend Function .onInit -SetRegView 64 StrCpy $INSTDIR "${INSTALL_DIR}" + +; Pre-0.70.1 installers ran without SetRegView, so their uninstall keys live +; in the 32-bit view. Fall back to it so upgrades still find them. +SetRegView 64 ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\$(^NAME)" "UninstallString" +${If} $R0 == "" + SetRegView 32 + ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\$(^NAME)" "UninstallString" + SetRegView 64 +${EndIf} ${If} $R0 != "" # if silent install jump to uninstall step IfSilent uninstall From 7eba5dafd8bbba9e5a0c4e8bd34d14dfda565db9 Mon Sep 17 00:00:00 2001 From: Nicolas Frati Date: Wed, 29 Apr 2026 11:28:55 +0200 Subject: [PATCH 357/374] [misc] Add comment automation on release workflow for PRs (#6016) * feat: add comment automation on release workflow for PRs * update action permissions --- .github/workflows/release.yml | 156 ++++++++++++++++++++++++++++++++-- 1 file changed, 150 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 826c05ff3..081bcafc4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -115,6 +115,12 @@ jobs: release: runs-on: ubuntu-latest-m + outputs: + release_artifact_url: ${{ steps.upload_release.outputs.artifact-url }} + linux_packages_artifact_url: ${{ steps.upload_linux_packages.outputs.artifact-url }} + windows_packages_artifact_url: ${{ steps.upload_windows_packages.outputs.artifact-url }} + macos_packages_artifact_url: ${{ steps.upload_macos_packages.outputs.artifact-url }} + ghcr_images: ${{ steps.tag_and_push_images.outputs.images_markdown }} env: flags: "" steps: @@ -213,10 +219,13 @@ jobs: if: always() run: rm -f /tmp/gpg-rpm-signing-key.asc - name: Tag and push images (amd64 only) + id: tag_and_push_images if: | (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) || (github.event_name == 'push' && github.ref == 'refs/heads/main') run: | + set -euo pipefail + resolve_tags() { if [[ "${{ github.event_name }}" == "pull_request" ]]; then echo "pr-${{ github.event.pull_request.number }}" @@ -225,6 +234,17 @@ jobs: fi } + ghcr_package_url() { + local image="$1" package encoded_package + package="${image#ghcr.io/}" + package="${package#*/}" + package="${package%%:*}" + encoded_package="${package//\//%2F}" + echo "https://github.com/orgs/netbirdio/packages/container/package/${encoded_package}" + } + + image_refs=() + tag_and_push() { local src="$1" img_name tag dst img_name="${src%%:*}" @@ -233,35 +253,56 @@ jobs: echo "Tagging ${src} -> ${dst}" docker tag "$src" "$dst" docker push "$dst" + image_refs+=("$dst") done } - export -f tag_and_push resolve_tags + cat > /tmp/goreleaser-artifacts.json <<'JSON' + ${{ steps.goreleaser.outputs.artifacts }} + JSON - echo '${{ steps.goreleaser.outputs.artifacts }}' | \ - jq -r '.[] | select(.type == "Docker Image") | select(.goarch == "amd64") | .name' | \ - grep '^ghcr.io/' | while read -r SRC; do - tag_and_push "$SRC" - done + mapfile -t src_images < <( + jq -r '.[] | select(.type == "Docker Image") | select(.goarch == "amd64") | .name | select(startswith("ghcr.io/"))' /tmp/goreleaser-artifacts.json + ) + + for src in "${src_images[@]}"; do + tag_and_push "$src" + done + + { + echo "images_markdown<> "$GITHUB_OUTPUT" - name: upload non tags for debug purposes + id: upload_release uses: actions/upload-artifact@v4 with: name: release path: dist/ retention-days: 7 - name: upload linux packages + id: upload_linux_packages uses: actions/upload-artifact@v4 with: name: linux-packages path: dist/netbird_linux** retention-days: 7 - name: upload windows packages + id: upload_windows_packages uses: actions/upload-artifact@v4 with: name: windows-packages path: dist/netbird_windows** retention-days: 7 - name: upload macos packages + id: upload_macos_packages uses: actions/upload-artifact@v4 with: name: macos-packages @@ -270,6 +311,8 @@ jobs: release_ui: runs-on: ubuntu-latest + outputs: + release_ui_artifact_url: ${{ steps.upload_release_ui.outputs.artifact-url }} steps: - name: Parse semver string id: semver_parser @@ -360,6 +403,7 @@ jobs: if: always() run: rm -f /tmp/gpg-rpm-signing-key.asc - name: upload non tags for debug purposes + id: upload_release_ui uses: actions/upload-artifact@v4 with: name: release-ui @@ -368,6 +412,8 @@ jobs: release_ui_darwin: runs-on: macos-latest + outputs: + release_ui_darwin_artifact_url: ${{ steps.upload_release_ui_darwin.outputs.artifact-url }} steps: - if: ${{ !startsWith(github.ref, 'refs/tags/v') }} run: echo "flags=--snapshot" >> $GITHUB_ENV @@ -402,12 +448,110 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: upload non tags for debug purposes + id: upload_release_ui_darwin uses: actions/upload-artifact@v4 with: name: release-ui-darwin path: dist/ retention-days: 3 + comment_release_artifacts: + name: Comment release artifacts + runs-on: ubuntu-latest + needs: [release, release_ui, release_ui_darwin] + if: ${{ always() && github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository }} + permissions: + contents: read + issues: write + pull-requests: write + steps: + - name: Create or update PR comment + uses: actions/github-script@v7 + env: + RELEASE_RESULT: ${{ needs.release.result }} + RELEASE_UI_RESULT: ${{ needs.release_ui.result }} + RELEASE_UI_DARWIN_RESULT: ${{ needs.release_ui_darwin.result }} + RELEASE_ARTIFACT_URL: ${{ needs.release.outputs.release_artifact_url }} + LINUX_PACKAGES_ARTIFACT_URL: ${{ needs.release.outputs.linux_packages_artifact_url }} + WINDOWS_PACKAGES_ARTIFACT_URL: ${{ needs.release.outputs.windows_packages_artifact_url }} + MACOS_PACKAGES_ARTIFACT_URL: ${{ needs.release.outputs.macos_packages_artifact_url }} + RELEASE_UI_ARTIFACT_URL: ${{ needs.release_ui.outputs.release_ui_artifact_url }} + RELEASE_UI_DARWIN_ARTIFACT_URL: ${{ needs.release_ui_darwin.outputs.release_ui_darwin_artifact_url }} + GHCR_IMAGES_MARKDOWN: ${{ needs.release.outputs.ghcr_images }} + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const marker = ''; + const { owner, repo } = context.repo; + const issue_number = context.payload.pull_request.number; + const runUrl = `${context.serverUrl}/${owner}/${repo}/actions/runs/${context.runId}`; + const shortSha = context.payload.pull_request.head.sha.slice(0, 7); + + const artifactCell = (url, result) => { + if (url) return `[Download](${url})`; + return result && result !== 'success' ? `_Not available (${result})_` : '_Not available_'; + }; + + const artifacts = [ + ['All release artifacts', process.env.RELEASE_ARTIFACT_URL, process.env.RELEASE_RESULT], + ['Linux packages', process.env.LINUX_PACKAGES_ARTIFACT_URL, process.env.RELEASE_RESULT], + ['Windows packages', process.env.WINDOWS_PACKAGES_ARTIFACT_URL, process.env.RELEASE_RESULT], + ['macOS packages', process.env.MACOS_PACKAGES_ARTIFACT_URL, process.env.RELEASE_RESULT], + ['UI artifacts', process.env.RELEASE_UI_ARTIFACT_URL, process.env.RELEASE_UI_RESULT], + ['UI macOS artifacts', process.env.RELEASE_UI_DARWIN_ARTIFACT_URL, process.env.RELEASE_UI_DARWIN_RESULT], + ]; + + const artifactRows = artifacts + .map(([name, url, result]) => `| ${name} | ${artifactCell(url, result)} |`) + .join('\n'); + + const ghcrImages = (process.env.GHCR_IMAGES_MARKDOWN || '').trim() || '_No GHCR images were pushed._'; + + const body = [ + marker, + '## Release artifacts', + '', + `Built for PR head \`${shortSha}\` in [workflow run #${process.env.GITHUB_RUN_NUMBER}](${runUrl}).`, + '', + '| Artifact | Link |', + '| --- | --- |', + artifactRows, + '', + '### GHCR images (amd64)', + ghcrImages, + '', + '_This comment is updated by the Release workflow. Artifact links expire according to the workflow retention policy._', + ].join('\n'); + + const comments = await github.paginate(github.rest.issues.listComments, { + owner, + repo, + issue_number, + per_page: 100, + }); + + const previous = comments.find(comment => + comment.user?.type === 'Bot' && comment.body?.includes(marker) + ); + + if (previous) { + await github.rest.issues.updateComment({ + owner, + repo, + comment_id: previous.id, + body, + }); + core.info(`Updated release artifacts comment ${previous.id}`); + } else { + const { data } = await github.rest.issues.createComment({ + owner, + repo, + issue_number, + body, + }); + core.info(`Created release artifacts comment ${data.id}`); + } + trigger_signer: runs-on: ubuntu-latest needs: [release, release_ui, release_ui_darwin] From ad93dcf9807e46ac648ff67b0ab994696b8cb6fc Mon Sep 17 00:00:00 2001 From: shuuri-labs <61762328+shuuri-labs@users.noreply.github.com> Date: Wed, 29 Apr 2026 13:14:46 +0200 Subject: [PATCH 358/374] [client] Enable UI autostart for silent and MSI installs (#6026) * fix(client): enable UI autostart for silent and MSI installs The MSI installer had no autostart logic and the EXE silent installer skipped the autostart page, leaving the registry entry unwritten. This caused the NetBird UI tray to not start at login after RMM deployments. Add an AUTOSTART property (default: 1) to the MSI that writes the HKLM Run key, and initialize AutostartEnabled in the NSIS .onInit so silent installs match the interactive default. * add real guid for NetBirdAutoStart component --- client/installer.nsis | 2 ++ client/netbird.wxs | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/client/installer.nsis b/client/installer.nsis index 6b8d3258e..63bff1c5b 100644 --- a/client/installer.nsis +++ b/client/installer.nsis @@ -201,6 +201,8 @@ Pop $0 Function .onInit StrCpy $INSTDIR "${INSTALL_DIR}" +; Default autostart to enabled so silent installs (/S) match the interactive default +StrCpy $AutostartEnabled "1" ; Pre-0.70.1 installers ran without SetRegView, so their uninstall keys live ; in the 32-bit view. Fall back to it so upgrades still find them. diff --git a/client/netbird.wxs b/client/netbird.wxs index 23aa250f4..2849bc6b9 100644 --- a/client/netbird.wxs +++ b/client/netbird.wxs @@ -13,6 +13,9 @@ + + + @@ -63,9 +66,21 @@ + + + + AUTOSTART = "1" + + + + + + From df197d5001c19dbeedb6e4bb44f51a6d298b3422 Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Wed, 29 Apr 2026 15:04:27 +0300 Subject: [PATCH 359/374] [management] Prevent JWT reuse during peer login (#6002) --- client/cmd/testutil_test.go | 2 +- client/internal/engine_test.go | 2 +- client/server/server_test.go | 2 +- management/internals/server/boot.go | 2 +- management/internals/server/controllers.go | 7 ++ management/internals/shared/grpc/server.go | 37 +++++++++- management/server/auth/session.go | 61 ++++++++++++++++ management/server/auth/session_test.go | 82 ++++++++++++++++++++++ management/server/management_proto_test.go | 2 +- management/server/management_test.go | 1 + shared/management/client/client_test.go | 2 +- 11 files changed, 192 insertions(+), 8 deletions(-) create mode 100644 management/server/auth/session.go create mode 100644 management/server/auth/session_test.go diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index d7564c353..fd1007bb4 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -135,7 +135,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil, nil) if err != nil { t.Fatal(err) } diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index 9fa4e51b2..f4c5be70a 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -1671,7 +1671,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil) if err != nil { return nil, "", err } diff --git a/client/server/server_test.go b/client/server/server_test.go index 772997575..54ad47e55 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -335,7 +335,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil) if err != nil { return nil, "", err } diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 2b40c0aad..f2ab0a2c4 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -173,7 +173,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server { } gRPCAPIHandler := grpc.NewServer(gRPCOpts...) - srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider()) + srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider(), s.SessionStore()) if err != nil { log.Fatalf("failed to create management server: %v", err) } diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index 9a8e45d33..89bdf0abe 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -6,6 +6,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/management-integrations/integrations" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy" proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager" @@ -66,6 +67,12 @@ func (s *BaseServer) SecretsManager() grpc.SecretsManager { }) } +func (s *BaseServer) SessionStore() *auth.SessionStore { + return Create(s, func() *auth.SessionStore { + return auth.NewSessionStore(s.CacheStore()) + }) +} + func (s *BaseServer) AuthManager() auth.Manager { audiences := s.Config.GetAuthAudiences() audience := s.Config.HttpConfig.AuthAudience diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 6e8358f02..0c1611e7f 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + jwtv5 "github.com/golang-jwt/jwt/v5" pb "github.com/golang/protobuf/proto" // nolint "github.com/golang/protobuf/ptypes/timestamp" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/realip" @@ -67,6 +68,7 @@ type Server struct { appMetrics telemetry.AppMetrics peerLocks sync.Map authManager auth.Manager + sessionStore *auth.SessionStore logBlockedPeers bool blockPeersWithSameConfig bool @@ -98,6 +100,7 @@ func NewServer( integratedPeerValidator integrated_validator.IntegratedValidator, networkMapController network_map.Controller, oAuthConfigProvider idp.OAuthConfigProvider, + sessionStore *auth.SessionStore, ) (*Server, error) { if appMetrics != nil { // update gauge based on number of connected peers which is equal to open gRPC streams @@ -140,6 +143,7 @@ func NewServer( integratedPeerValidator: integratedPeerValidator, networkMapController: networkMapController, oAuthConfigProvider: oAuthConfigProvider, + sessionStore: sessionStore, loginFilter: newLoginFilter(), @@ -535,7 +539,7 @@ func (s *Server) cancelPeerRoutinesWithoutLock(ctx context.Context, accountID st log.WithContext(ctx).Debugf("peer %s has been disconnected", peer.Key) } -func (s *Server) validateToken(ctx context.Context, jwtToken string) (string, error) { +func (s *Server) validateToken(ctx context.Context, peerKey, jwtToken string) (string, error) { if s.authManager == nil { return "", status.Errorf(codes.Internal, "missing auth manager") } @@ -545,6 +549,10 @@ func (s *Server) validateToken(ctx context.Context, jwtToken string) (string, er return "", status.Errorf(codes.InvalidArgument, "invalid jwt token, err: %v", err) } + if err := s.claimLoginToken(ctx, peerKey, jwtToken, token); err != nil { + return "", err + } + // we need to call this method because if user is new, we will automatically add it to existing or create a new account accountId, _, err := s.accountManager.GetAccountIDFromUserAuth(ctx, userAuth) if err != nil { @@ -828,6 +836,31 @@ func (s *Server) prepareLoginResponse(ctx context.Context, peer *nbpeer.Peer, ne return loginResp, nil } +func (s *Server) claimLoginToken(ctx context.Context, peerKey, jwtToken string, token *jwtv5.Token) error { + if s.sessionStore == nil || token == nil { + return nil + } + + exp, err := token.Claims.GetExpirationTime() + if err != nil || exp == nil { + log.WithContext(ctx).Warnf("JWT has no usable exp claim for peer %s", peerKey) + return status.Error(codes.Unauthenticated, "jwt token has no expiration") + } + + err = s.sessionStore.RegisterToken(ctx, jwtToken, exp.Time) + if err == nil { + return nil + } + + if errors.Is(err, auth.ErrTokenAlreadyUsed) || errors.Is(err, auth.ErrTokenExpired) { + log.WithContext(ctx).Warnf("%v for peer %s", err, peerKey) + return status.Error(codes.Unauthenticated, err.Error()) + } + + log.WithContext(ctx).Warnf("failed to claim JWT for peer %s: %v", peerKey, err) + return status.Error(codes.Unavailable, "failed to claim jwt token") +} + // processJwtToken validates the existence of a JWT token in the login request, and returns the corresponding user ID if // the token is valid. // @@ -838,7 +871,7 @@ func (s *Server) processJwtToken(ctx context.Context, loginReq *proto.LoginReque if loginReq.GetJwtToken() != "" { var err error for i := 0; i < 3; i++ { - userID, err = s.validateToken(ctx, loginReq.GetJwtToken()) + userID, err = s.validateToken(ctx, peerKey.String(), loginReq.GetJwtToken()) if err == nil { break } diff --git a/management/server/auth/session.go b/management/server/auth/session.go new file mode 100644 index 000000000..7621a1c10 --- /dev/null +++ b/management/server/auth/session.go @@ -0,0 +1,61 @@ +package auth + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "time" + + "github.com/eko/gocache/lib/v4/cache" + "github.com/eko/gocache/lib/v4/store" +) + +const ( + usedTokenKeyPrefix = "jwt-used:" + usedTokenMarker = "1" +) + +var ( + ErrTokenAlreadyUsed = errors.New("JWT already used") + ErrTokenExpired = errors.New("JWT expired") +) + +type SessionStore struct { + cache *cache.Cache[string] +} + +func NewSessionStore(cacheStore store.StoreInterface) *SessionStore { + return &SessionStore{cache: cache.New[string](cacheStore)} +} + +// RegisterToken records a JWT until its exp time and rejects reuse. +func (s *SessionStore) RegisterToken(ctx context.Context, token string, expiresAt time.Time) error { + ttl := time.Until(expiresAt) + if ttl <= 0 { + return ErrTokenExpired + } + + key := usedTokenKeyPrefix + hashToken(token) + _, err := s.cache.Get(ctx, key) + if err == nil { + return ErrTokenAlreadyUsed + } + + var notFound *store.NotFound + if !errors.As(err, ¬Found) { + return fmt.Errorf("failed to lookup used token entry: %w", err) + } + + if err := s.cache.Set(ctx, key, usedTokenMarker, store.WithExpiration(ttl)); err != nil { + return fmt.Errorf("failed to store used token entry: %w", err) + } + + return nil +} + +func hashToken(token string) string { + sum := sha256.Sum256([]byte(token)) + return hex.EncodeToString(sum[:]) +} diff --git a/management/server/auth/session_test.go b/management/server/auth/session_test.go new file mode 100644 index 000000000..3a7d85f4c --- /dev/null +++ b/management/server/auth/session_test.go @@ -0,0 +1,82 @@ +package auth + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbcache "github.com/netbirdio/netbird/management/server/cache" +) + +func newTestSessionStore(t *testing.T) *SessionStore { + t.Helper() + cacheStore, err := nbcache.NewStore(context.Background(), time.Hour, time.Hour, 100) + require.NoError(t, err) + return NewSessionStore(cacheStore) +} + +func TestSessionStore_FirstRegisterSucceeds(t *testing.T) { + s := newTestSessionStore(t) + ctx := context.Background() + + require.NoError(t, s.RegisterToken(ctx, "token", time.Now().Add(time.Hour))) +} + +func TestSessionStore_RegisterSameTokenTwiceIsRejected(t *testing.T) { + s := newTestSessionStore(t) + ctx := context.Background() + token := "token" + exp := time.Now().Add(time.Hour) + + require.NoError(t, s.RegisterToken(ctx, token, exp)) + + err := s.RegisterToken(ctx, token, exp) + require.Error(t, err) + assert.ErrorIs(t, err, ErrTokenAlreadyUsed) +} + +func TestSessionStore_RegisterDifferentTokensAreIndependent(t *testing.T) { + s := newTestSessionStore(t) + ctx := context.Background() + exp := time.Now().Add(time.Hour) + + require.NoError(t, s.RegisterToken(ctx, "tokenA", exp)) + require.NoError(t, s.RegisterToken(ctx, "tokenB", exp)) +} + +func TestSessionStore_RegisterWithPastExpiryIsRejected(t *testing.T) { + s := newTestSessionStore(t) + ctx := context.Background() + token := "token" + + err := s.RegisterToken(ctx, token, time.Now().Add(-time.Second)) + require.Error(t, err) + assert.ErrorIs(t, err, ErrTokenExpired) +} + +func TestSessionStore_EntryEvictsAtTTLAndAllowsReRegistration(t *testing.T) { + s := newTestSessionStore(t) + ctx := context.Background() + token := "token" + + require.NoError(t, s.RegisterToken(ctx, token, time.Now().Add(50*time.Millisecond))) + + err := s.RegisterToken(ctx, token, time.Now().Add(50*time.Millisecond)) + assert.ErrorIs(t, err, ErrTokenAlreadyUsed) + + time.Sleep(120 * time.Millisecond) + + require.NoError(t, s.RegisterToken(ctx, token, time.Now().Add(time.Hour))) +} + +func TestHashToken_StableAndDoesNotLeak(t *testing.T) { + a := hashToken("tokenA") + b := hashToken("tokenB") + assert.Equal(t, a, hashToken("tokenA"), "hash must be deterministic") + assert.NotEqual(t, a, b, "different tokens must hash differently") + assert.Len(t, a, 64, "sha256 hex must be 64 chars") + assert.NotContains(t, a, "tokenA", "raw token must not appear in hash") +} diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index 18d85315d..1b77ea335 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -391,7 +391,7 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config return nil, nil, "", cleanup, err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil, nil) if err != nil { return nil, nil, "", cleanup, err } diff --git a/management/server/management_test.go b/management/server/management_test.go index 3ac28cd4a..f1d49193c 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -256,6 +256,7 @@ func startServer( server.MockIntegratedValidator{}, networkMapController, nil, + nil, ) if err != nil { t.Fatalf("failed creating management server: %v", err) diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index d9a1a7d65..a8e8172dc 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -138,7 +138,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil, nil) if err != nil { t.Fatal(err) } From 11ac2af2f5130899633b31ac575a683afea7e308 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 29 Apr 2026 23:07:33 +0900 Subject: [PATCH 360/374] Use BindListener for all userspace bind in lazyconn activity (#6028) --- .../lazyconn/activity/listener_bind_test.go | 13 ------------- client/internal/lazyconn/activity/manager.go | 12 ------------ 2 files changed, 25 deletions(-) diff --git a/client/internal/lazyconn/activity/listener_bind_test.go b/client/internal/lazyconn/activity/listener_bind_test.go index f86dd3877..1baaae6be 100644 --- a/client/internal/lazyconn/activity/listener_bind_test.go +++ b/client/internal/lazyconn/activity/listener_bind_test.go @@ -3,7 +3,6 @@ package activity import ( "net" "net/netip" - "runtime" "testing" "time" @@ -18,10 +17,6 @@ import ( peerid "github.com/netbirdio/netbird/client/internal/peer/id" ) -func isBindListenerPlatform() bool { - return runtime.GOOS == "windows" || runtime.GOOS == "js" -} - // mockEndpointManager implements device.EndpointManager for testing type mockEndpointManager struct { endpoints map[netip.Addr]net.Conn @@ -181,10 +176,6 @@ func TestBindListener_Close(t *testing.T) { } func TestManager_BindMode(t *testing.T) { - if !isBindListenerPlatform() { - t.Skip("BindListener only used on Windows/JS platforms") - } - mockEndpointMgr := newMockEndpointManager() mockIface := &MockWGIfaceBind{endpointMgr: mockEndpointMgr} @@ -226,10 +217,6 @@ func TestManager_BindMode(t *testing.T) { } func TestManager_BindMode_MultiplePeers(t *testing.T) { - if !isBindListenerPlatform() { - t.Skip("BindListener only used on Windows/JS platforms") - } - mockEndpointMgr := newMockEndpointManager() mockIface := &MockWGIfaceBind{endpointMgr: mockEndpointMgr} diff --git a/client/internal/lazyconn/activity/manager.go b/client/internal/lazyconn/activity/manager.go index 1c11378c8..cccc0669f 100644 --- a/client/internal/lazyconn/activity/manager.go +++ b/client/internal/lazyconn/activity/manager.go @@ -4,14 +4,12 @@ import ( "errors" "net" "net/netip" - "runtime" "sync" "time" log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/iface/wgaddr" "github.com/netbirdio/netbird/client/internal/lazyconn" peerid "github.com/netbirdio/netbird/client/internal/peer/id" @@ -75,16 +73,6 @@ func (m *Manager) createListener(peerCfg lazyconn.PeerConfig) (listener, error) return NewUDPListener(m.wgIface, peerCfg) } - // BindListener is used on Windows, JS, and netstack platforms: - // - JS: Cannot listen to UDP sockets - // - Windows: IP_UNICAST_IF socket option forces packets out the interface the default - // gateway points to, preventing them from reaching the loopback interface. - // - Netstack: Allows multiple instances on the same host without port conflicts. - // BindListener bypasses these issues by passing data directly through the bind. - if runtime.GOOS != "windows" && runtime.GOOS != "js" && !netstack.IsEnabled() { - return NewUDPListener(m.wgIface, peerCfg) - } - provider, ok := m.wgIface.(bindProvider) if !ok { return nil, errors.New("interface claims userspace bind but doesn't implement bindProvider") From ed828b7af4e25e64d5f2fdccaaa1285964e27bd8 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Wed, 29 Apr 2026 23:08:47 +0900 Subject: [PATCH 361/374] Tolerate EEXIST when adding macOS scoped default routes (#6027) --- .../routemanager/systemops/systemops_darwin.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/client/internal/routemanager/systemops/systemops_darwin.go b/client/internal/routemanager/systemops/systemops_darwin.go index d6875ff95..3fcac4c6a 100644 --- a/client/internal/routemanager/systemops/systemops_darwin.go +++ b/client/internal/routemanager/systemops/systemops_darwin.go @@ -89,8 +89,16 @@ func (r *SysOps) installScopedDefaultFor(unspec netip.Addr) (bool, error) { return false, fmt.Errorf("unusable default nexthop for %s (no interface)", unspec) } + reused := false if err := r.addScopedDefault(unspec, nexthop); err != nil { - return false, fmt.Errorf("add scoped default on %s: %w", nexthop.Intf.Name, err) + if !errors.Is(err, unix.EEXIST) { + return false, fmt.Errorf("add scoped default on %s: %w", nexthop.Intf.Name, err) + } + // macOS installs its own RTF_IFSCOPE defaults for primary service + // selection on multi-NIC setups, so a route on this ifindex can + // already exist before we try. Binding to it via IP[V6]_BOUND_IF + // still produces the scoped lookup we need. + reused = true } af := unix.AF_INET @@ -102,7 +110,11 @@ func (r *SysOps) installScopedDefaultFor(unspec netip.Addr) (bool, error) { if nexthop.IP.IsValid() { via = nexthop.IP.String() } - log.Infof("installed scoped default route via %s on %s for %s", via, nexthop.Intf.Name, afOf(unspec)) + verb := "installed" + if reused { + verb = "reused existing" + } + log.Infof("%s scoped default route via %s on %s for %s", verb, via, nexthop.Intf.Name, afOf(unspec)) return true, nil } From 57945fc3286a4a7c7f06c688fb251e90e38bfbce Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Wed, 29 Apr 2026 17:19:22 +0200 Subject: [PATCH 362/374] [client] Trigger mobile submodule bump PRs on release tags (#6029) Trigger mobile submodule bump PRs on release tags --- .github/workflows/sync-tag.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/.github/workflows/sync-tag.yml b/.github/workflows/sync-tag.yml index 1cc553b12..a75d9a9d5 100644 --- a/.github/workflows/sync-tag.yml +++ b/.github/workflows/sync-tag.yml @@ -9,6 +9,8 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }} cancel-in-progress: true +# Receiving workflows (cloud sync-tag, mobile bump-netbird) expect the short +# tag form (e.g. v0.30.0), not refs/tags/v0.30.0 — github.ref_name, not github.ref. jobs: trigger_sync_tag: runs-on: ubuntu-latest @@ -20,4 +22,30 @@ jobs: ref: main repo: ${{ secrets.UPSTREAM_REPO }} token: ${{ secrets.NC_GITHUB_TOKEN }} + inputs: '{ "tag": "${{ github.ref_name }}" }' + + trigger_android_bump: + runs-on: ubuntu-latest + if: github.event.created && !github.event.deleted && startsWith(github.ref, 'refs/tags/v') && !contains(github.ref_name, '-') + steps: + - name: Trigger android-client submodule bump + uses: benc-uk/workflow-dispatch@7a027648b88c2413826b6ddd6c76114894dc5ec4 # v1.3.1 + with: + workflow: bump-netbird.yml + ref: main + repo: netbirdio/android-client + token: ${{ secrets.NC_GITHUB_TOKEN }} + inputs: '{ "tag": "${{ github.ref_name }}" }' + + trigger_ios_bump: + runs-on: ubuntu-latest + if: github.event.created && !github.event.deleted && startsWith(github.ref, 'refs/tags/v') && !contains(github.ref_name, '-') + steps: + - name: Trigger ios-client submodule bump + uses: benc-uk/workflow-dispatch@7a027648b88c2413826b6ddd6c76114894dc5ec4 # v1.3.1 + with: + workflow: bump-netbird.yml + ref: main + repo: netbirdio/ios-client + token: ${{ secrets.NC_GITHUB_TOKEN }} inputs: '{ "tag": "${{ github.ref_name }}" }' \ No newline at end of file From 3fc5a8d4a1fe308ff1068764a09b90b0859ab8fe Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Wed, 29 Apr 2026 23:44:38 +0200 Subject: [PATCH 363/374] [misc] fix MSI generation add installer tests (#6031) Add Windows installer build test workflow --- .github/workflows/release.yml | 149 +++++++++++++++++++++++++++++++++- client/netbird.wxs | 3 +- 2 files changed, 148 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 081bcafc4..c1ae01a98 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -114,7 +114,7 @@ jobs: retention-days: 30 release: - runs-on: ubuntu-latest-m + runs-on: ubuntu-24.04-8-core outputs: release_artifact_url: ${{ steps.upload_release.outputs.artifact-url }} linux_packages_artifact_url: ${{ steps.upload_linux_packages.outputs.artifact-url }} @@ -455,6 +455,151 @@ jobs: path: dist/ retention-days: 3 + test_windows_installer: + name: "Windows Installer / Build Test" + runs-on: windows-2022 + needs: [release, release_ui] + strategy: + fail-fast: false + matrix: + include: + - arch: amd64 + wintun_arch: amd64 + - arch: arm64 + wintun_arch: arm64 + defaults: + run: + shell: powershell + env: + PackageWorkdir: netbird_windows_${{ matrix.arch }} + downloadPath: '${{ github.workspace }}\temp' + steps: + - name: Parse semver string + id: semver_parser + uses: booxmedialtd/ws-action-parse-semver@v1 + with: + input_string: ${{ (startsWith(github.ref, 'refs/tags/v') && github.ref) || 'refs/tags/v0.0.0' }} + version_extractor_regex: '\/v(.*)$' + + - name: Checkout + uses: actions/checkout@v4 + + - name: Add 7-Zip to PATH + run: echo "C:\Program Files\7-Zip" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + + - name: Download release artifacts + uses: actions/download-artifact@v4 + with: + name: release + path: release + + - name: Download UI release artifacts + uses: actions/download-artifact@v4 + with: + name: release-ui + path: release-ui + + - name: Stage binaries into dist + run: | + $workdir = "dist\${{ env.PackageWorkdir }}" + New-Item -ItemType Directory -Force -Path $workdir | Out-Null + $client = Get-ChildItem -Recurse -Path release -Filter "netbird_*_windows_${{ matrix.arch }}.tar.gz" | Select-Object -First 1 + $ui = Get-ChildItem -Recurse -Path release-ui -Filter "netbird-ui-windows_*_windows_${{ matrix.arch }}.tar.gz" | Select-Object -First 1 + if (-not $client) { Write-Host "::error::client tarball not found for ${{ matrix.arch }}"; exit 1 } + if (-not $ui) { Write-Host "::error::ui tarball not found for ${{ matrix.arch }}"; exit 1 } + Write-Host "Client: $($client.FullName)" + Write-Host "UI: $($ui.FullName)" + tar -zvxf $client.FullName -C $workdir + tar -zvxf $ui.FullName -C $workdir + Get-ChildItem $workdir + + - name: Download wintun + uses: carlosperate/download-file-action@v2 + id: download-wintun + with: + file-url: https://pkgs.netbird.io/wintun/wintun-0.14.1.zip + file-name: wintun.zip + location: ${{ env.downloadPath }} + sha256: '07c256185d6ee3652e09fa55c0b673e2624b565e02c4b9091c79ca7d2f24ef51' + + - name: Decompress wintun files + run: tar -zvxf "${{ steps.download-wintun.outputs.file-path }}" -C ${{ env.downloadPath }} + + - name: Move wintun.dll into dist + run: mv ${{ env.downloadPath }}\wintun\bin\${{ matrix.wintun_arch }}\wintun.dll ${{ github.workspace }}\dist\${{ env.PackageWorkdir }}\ + + - name: Download Mesa3D (amd64 only) + uses: carlosperate/download-file-action@v2 + id: download-mesa3d + if: matrix.arch == 'amd64' + with: + file-url: https://downloads.fdossena.com/Projects/Mesa3D/Builds/MesaForWindows-x64-20.1.8.7z + file-name: mesa3d.7z + location: ${{ env.downloadPath }} + sha256: '71c7cb64ec229a1d6b8d62fa08e1889ed2bd17c0eeede8689daf0f25cb31d6b9' + + - name: Extract Mesa3D driver (amd64 only) + if: matrix.arch == 'amd64' + run: 7z x -o"${{ env.downloadPath }}" "${{ env.downloadPath }}/mesa3d.7z" + + - name: Move opengl32.dll into dist (amd64 only) + if: matrix.arch == 'amd64' + run: mv ${{ env.downloadPath }}\opengl32.dll ${{ github.workspace }}\dist\${{ env.PackageWorkdir }}\ + + - name: Download EnVar plugin for NSIS + uses: carlosperate/download-file-action@v2 + with: + file-url: https://nsis.sourceforge.io/mediawiki/images/7/7f/EnVar_plugin.zip + file-name: envar_plugin.zip + location: ${{ github.workspace }} + + - name: Extract EnVar plugin + run: 7z x -o"${{ github.workspace }}/NSIS_Plugins" "${{ github.workspace }}/envar_plugin.zip" + + - name: Download ShellExecAsUser plugin for NSIS (amd64 only) + uses: carlosperate/download-file-action@v2 + if: matrix.arch == 'amd64' + with: + file-url: https://nsis.sourceforge.io/mediawiki/images/6/68/ShellExecAsUser_amd64-Unicode.7z + file-name: ShellExecAsUser_amd64-Unicode.7z + location: ${{ github.workspace }} + + - name: Extract ShellExecAsUser plugin (amd64 only) + if: matrix.arch == 'amd64' + run: 7z x -o"${{ github.workspace }}/NSIS_Plugins" "${{ github.workspace }}/ShellExecAsUser_amd64-Unicode.7z" + + - name: Build NSIS installer + uses: joncloud/makensis-action@v3.3 + with: + additional-plugin-paths: ${{ github.workspace }}/NSIS_Plugins/Plugins + script-file: client/installer.nsis + arguments: "/V4 /DARCH=${{ matrix.arch }}" + env: + APPVER: ${{ steps.semver_parser.outputs.major }}.${{ steps.semver_parser.outputs.minor }}.${{ steps.semver_parser.outputs.patch }}.${{ github.run_id }} + + - name: Rename NSIS installer + run: mv netbird-installer.exe netbird_installer_test_windows_${{ matrix.arch }}.exe + + - name: Install WiX + run: | + dotnet tool install --global wix --version 6.0.2 + wix extension add WixToolset.Util.wixext/6.0.2 + + - name: Build MSI installer + env: + NETBIRD_VERSION: "${{ steps.semver_parser.outputs.fullversion }}" + run: wix build -arch ${{ matrix.arch == 'amd64' && 'x64' || 'arm64' }} -ext WixToolset.Util.wixext -o netbird_installer_test_windows_${{ matrix.arch }}.msi .\client\netbird.wxs -d ProcessorArchitecture=${{ matrix.arch == 'amd64' && 'x64' || 'arm64' }} -d ArchSuffix=${{ matrix.arch }} + + - name: Upload installer artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: windows-installer-test-${{ matrix.arch }} + path: | + netbird_installer_test_windows_${{ matrix.arch }}.exe + netbird_installer_test_windows_${{ matrix.arch }}.msi + retention-days: 3 + comment_release_artifacts: name: Comment release artifacts runs-on: ubuntu-latest @@ -554,7 +699,7 @@ jobs: trigger_signer: runs-on: ubuntu-latest - needs: [release, release_ui, release_ui_darwin] + needs: [release, release_ui, release_ui_darwin, test_windows_installer] if: startsWith(github.ref, 'refs/tags/') steps: - name: Trigger binaries sign pipelines diff --git a/client/netbird.wxs b/client/netbird.wxs index 2849bc6b9..6f18b63b5 100644 --- a/client/netbird.wxs +++ b/client/netbird.wxs @@ -68,8 +68,7 @@ - - AUTOSTART = "1" + From f29f5a09784380a3003ef3de5a2c7de4b5733657 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 30 Apr 2026 14:52:54 +0200 Subject: [PATCH 364/374] [management] add monitoring for nmap update source (#6036) --- .../network_map/controller/controller.go | 15 ++++++-- .../controllers/network_map/interface.go | 4 +- .../controllers/network_map/interface_mock.go | 16 ++++---- .../peers/ephemeral/manager/ephemeral_test.go | 4 +- management/internals/modules/peers/manager.go | 3 +- .../service/manager/l4_port_test.go | 2 +- .../reverseproxy/service/manager/manager.go | 17 +++++---- .../service/manager/manager_test.go | 10 ++--- .../modules/zones/manager/manager.go | 5 ++- .../modules/zones/records/manager/manager.go | 7 ++-- management/server/account.go | 4 +- management/server/account/manager.go | 4 +- management/server/account/manager_mock.go | 16 ++++---- management/server/dns.go | 2 +- management/server/group.go | 16 ++++---- management/server/mock_server/account_mock.go | 12 +++--- management/server/nameserver.go | 6 +-- management/server/networks/manager.go | 3 +- .../server/networks/resources/manager.go | 6 +-- management/server/networks/routers/manager.go | 7 ++-- management/server/peer.go | 8 ++-- management/server/peer_test.go | 4 +- management/server/policy.go | 8 +++- management/server/posture_checks.go | 7 +++- management/server/route.go | 6 +-- .../telemetry/accountmanager_metrics.go | 20 ++++++++++ management/server/types/update_reason.go | 37 +++++++++++++++++++ management/server/user.go | 2 +- 28 files changed, 165 insertions(+), 86 deletions(-) create mode 100644 management/server/types/update_reason.go diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index 4b47ecaa0..36de950e9 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -257,7 +257,10 @@ func (c *Controller) bufferSendUpdateAccountPeers(ctx context.Context, accountID // UpdatePeers updates all peers that belong to an account. // Should be called when changes have to be synced to peers. -func (c *Controller) UpdateAccountPeers(ctx context.Context, accountID string) error { +func (c *Controller) UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) error { + if c.accountManagerMetrics != nil { + c.accountManagerMetrics.CountUpdateAccountPeersTriggered(string(reason.Resource), string(reason.Operation)) + } return c.sendUpdateAccountPeers(ctx, accountID) } @@ -331,9 +334,13 @@ func (c *Controller) UpdateAccountPeer(ctx context.Context, accountId string, pe return nil } -func (c *Controller) BufferUpdateAccountPeers(ctx context.Context, accountID string) error { +func (c *Controller) BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) error { log.WithContext(ctx).Tracef("buffer updating peers for account %s from %s", accountID, util.GetCallerName()) + if c.accountManagerMetrics != nil { + c.accountManagerMetrics.CountUpdateAccountPeersTriggered(string(reason.Resource), string(reason.Operation)) + } + bufUpd, _ := c.accountUpdateLocks.LoadOrStore(accountID, &bufferUpdate{}) b := bufUpd.(*bufferUpdate) @@ -348,14 +355,14 @@ func (c *Controller) BufferUpdateAccountPeers(ctx context.Context, accountID str go func() { defer b.mu.Unlock() - _ = c.UpdateAccountPeers(ctx, accountID) + _ = c.sendUpdateAccountPeers(ctx, accountID) if !b.update.Load() { return } b.update.Store(false) if b.next == nil { b.next = time.AfterFunc(time.Duration(c.updateAccountPeersBufferInterval.Load()), func() { - _ = c.UpdateAccountPeers(ctx, accountID) + _ = c.sendUpdateAccountPeers(ctx, accountID) }) return } diff --git a/management/internals/controllers/network_map/interface.go b/management/internals/controllers/network_map/interface.go index cfea2d3de..44d8f7d72 100644 --- a/management/internals/controllers/network_map/interface.go +++ b/management/internals/controllers/network_map/interface.go @@ -18,9 +18,9 @@ const ( ) type Controller interface { - UpdateAccountPeers(ctx context.Context, accountID string) error + UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) error UpdateAccountPeer(ctx context.Context, accountId string, peerId string) error - BufferUpdateAccountPeers(ctx context.Context, accountID string) error + BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) error GetValidatedPeerWithMap(ctx context.Context, isRequiresApproval bool, accountID string, p *nbpeer.Peer) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, int64, error) GetDNSDomain(settings *types.Settings) string StartWarmup(context.Context) diff --git a/management/internals/controllers/network_map/interface_mock.go b/management/internals/controllers/network_map/interface_mock.go index 4e86d2973..073a75d3b 100644 --- a/management/internals/controllers/network_map/interface_mock.go +++ b/management/internals/controllers/network_map/interface_mock.go @@ -44,17 +44,17 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { } // BufferUpdateAccountPeers mocks base method. -func (m *MockController) BufferUpdateAccountPeers(ctx context.Context, accountID string) error { +func (m *MockController) BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BufferUpdateAccountPeers", ctx, accountID) + ret := m.ctrl.Call(m, "BufferUpdateAccountPeers", ctx, accountID, reason) ret0, _ := ret[0].(error) return ret0 } // BufferUpdateAccountPeers indicates an expected call of BufferUpdateAccountPeers. -func (mr *MockControllerMockRecorder) BufferUpdateAccountPeers(ctx, accountID any) *gomock.Call { +func (mr *MockControllerMockRecorder) BufferUpdateAccountPeers(ctx, accountID, reason any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BufferUpdateAccountPeers", reflect.TypeOf((*MockController)(nil).BufferUpdateAccountPeers), ctx, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BufferUpdateAccountPeers", reflect.TypeOf((*MockController)(nil).BufferUpdateAccountPeers), ctx, accountID, reason) } // CountStreams mocks base method. @@ -238,15 +238,15 @@ func (mr *MockControllerMockRecorder) UpdateAccountPeer(ctx, accountId, peerId a } // UpdateAccountPeers mocks base method. -func (m *MockController) UpdateAccountPeers(ctx context.Context, accountID string) error { +func (m *MockController) UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateAccountPeers", ctx, accountID) + ret := m.ctrl.Call(m, "UpdateAccountPeers", ctx, accountID, reason) ret0, _ := ret[0].(error) return ret0 } // UpdateAccountPeers indicates an expected call of UpdateAccountPeers. -func (mr *MockControllerMockRecorder) UpdateAccountPeers(ctx, accountID any) *gomock.Call { +func (mr *MockControllerMockRecorder) UpdateAccountPeers(ctx, accountID, reason any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPeers", reflect.TypeOf((*MockController)(nil).UpdateAccountPeers), ctx, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPeers", reflect.TypeOf((*MockController)(nil).UpdateAccountPeers), ctx, accountID, reason) } diff --git a/management/internals/modules/peers/ephemeral/manager/ephemeral_test.go b/management/internals/modules/peers/ephemeral/manager/ephemeral_test.go index fc3010dd1..314e84501 100644 --- a/management/internals/modules/peers/ephemeral/manager/ephemeral_test.go +++ b/management/internals/modules/peers/ephemeral/manager/ephemeral_test.go @@ -62,7 +62,7 @@ func (a *MockAccountManager) GetDeletePeerCalls() int { return a.deletePeerCalls } -func (a *MockAccountManager) BufferUpdateAccountPeers(ctx context.Context, accountID string) { +func (a *MockAccountManager) BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { a.mu.Lock() defer a.mu.Unlock() if a.bufferUpdateCalls == nil { @@ -248,7 +248,7 @@ func TestCleanupSchedulingBehaviorIsBatched(t *testing.T) { return err } } - mockAM.BufferUpdateAccountPeers(ctx, accountID) + mockAM.BufferUpdateAccountPeers(ctx, accountID, types.UpdateReason{}) return nil }). Times(1) diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index d3f8f44ff..c913efb92 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/status" ) @@ -178,7 +179,7 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs } } - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourcePeer, Operation: types.UpdateOperationDelete}) return nil } diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go index 28461641d..fc91b8616 100644 --- a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go +++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go @@ -85,7 +85,7 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, - UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + UpdateAccountPeersFunc: func(_ context.Context, _ string, _ types.UpdateReason) {}, GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) }, diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index ed9d4201b..0fb5f46ff 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -25,6 +25,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/status" ) @@ -231,7 +232,7 @@ func (m *Manager) CreateService(ctx context.Context, accountID, userID string, s m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationCreate}) return s, nil } @@ -515,7 +516,7 @@ func (m *Manager) UpdateService(ctx context.Context, accountID, userID string, s } m.sendServiceUpdateNotifications(ctx, accountID, service, updateInfo) - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationUpdate}) return service, nil } @@ -819,7 +820,7 @@ func (m *Manager) DeleteService(ctx context.Context, accountID, userID, serviceI m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationDelete}) return nil } @@ -860,7 +861,7 @@ func (m *Manager) DeleteAllServices(ctx context.Context, accountID, userID strin m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", oidcCfg), svc.ProxyCluster) } - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationDelete}) return nil } @@ -916,7 +917,7 @@ func (m *Manager) ReloadService(ctx context.Context, accountID, serviceID string m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Update, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster) - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationUpdate}) return nil } @@ -1098,7 +1099,7 @@ func (m *Manager) CreateServiceFromPeer(ctx context.Context, accountID, peerID s } m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Create, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster) - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationCreate}) serviceURL := "https://" + svc.Domain if service.IsL4Protocol(svc.Mode) { @@ -1210,7 +1211,7 @@ func (m *Manager) deletePeerService(ctx context.Context, accountID, peerID, serv m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster) - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationDelete}) return nil } @@ -1261,7 +1262,7 @@ func (m *Manager) deleteExpiredPeerService(ctx context.Context, accountID, peerI meta := addPeerInfoToEventMeta(svc.EventMeta(), peer) m.accountManager.StoreEvent(ctx, peerID, serviceID, accountID, activity.PeerServiceExposeExpired, meta) m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster) - m.accountManager.UpdateAccountPeers(ctx, accountID) + m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceService, Operation: types.UpdateOperationDelete}) return nil } diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index 54ac8ab18..e9403849c 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -447,7 +447,7 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { StoreEventFunc: func(_ context.Context, _, _, _ string, activityID activity.ActivityDescriber, _ map[string]any) { storedActivity = activityID.(activity.Activity) }, - UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + UpdateAccountPeersFunc: func(_ context.Context, _ string, _ types.UpdateReason) {}, } mockStore.EXPECT(). @@ -549,7 +549,7 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { StoreEventFunc: func(_ context.Context, _, _, _ string, activityID activity.ActivityDescriber, _ map[string]any) { storedActivity = activityID.(activity.Activity) }, - UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + UpdateAccountPeersFunc: func(_ context.Context, _ string, _ types.UpdateReason) {}, } mockStore.EXPECT(). @@ -593,7 +593,7 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) { StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, meta map[string]any) { storedMeta = meta }, - UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + UpdateAccountPeersFunc: func(_ context.Context, _ string, _ types.UpdateReason) {}, } mockStore.EXPECT(). @@ -704,7 +704,7 @@ func setupIntegrationTest(t *testing.T) (*Manager, store.Store) { accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, - UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, + UpdateAccountPeersFunc: func(_ context.Context, _ string, _ types.UpdateReason) {}, GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) }, @@ -1173,7 +1173,7 @@ func TestDeleteService_DeletesTargets(t *testing.T) { mockAcct.EXPECT(). StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceDeleted, gomock.Any()) mockAcct.EXPECT(). - UpdateAccountPeers(ctx, accountID) + UpdateAccountPeers(ctx, accountID, gomock.Any()) err = mgr.DeleteService(ctx, accountID, userID, service.ID) require.NoError(t, err) diff --git a/management/internals/modules/zones/manager/manager.go b/management/internals/modules/zones/manager/manager.go index 8548dd48c..439671e65 100644 --- a/management/internals/modules/zones/manager/manager.go +++ b/management/internals/modules/zones/manager/manager.go @@ -11,6 +11,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/status" ) @@ -144,7 +145,7 @@ func (m *managerImpl) UpdateZone(ctx context.Context, accountID, userID string, m.accountManager.StoreEvent(ctx, userID, zone.ID, accountID, activity.DNSZoneUpdated, zone.EventMeta()) - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceZone, Operation: types.UpdateOperationUpdate}) return zone, nil } @@ -206,7 +207,7 @@ func (m *managerImpl) DeleteZone(ctx context.Context, accountID, userID, zoneID event() } - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceZone, Operation: types.UpdateOperationDelete}) return nil } diff --git a/management/internals/modules/zones/records/manager/manager.go b/management/internals/modules/zones/records/manager/manager.go index 5374a2ef2..7458b41db 100644 --- a/management/internals/modules/zones/records/manager/manager.go +++ b/management/internals/modules/zones/records/manager/manager.go @@ -13,6 +13,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/status" ) @@ -95,7 +96,7 @@ func (m *managerImpl) CreateRecord(ctx context.Context, accountID, userID, zoneI meta := record.EventMeta(zone.ID, zone.Name) m.accountManager.StoreEvent(ctx, userID, record.ID, accountID, activity.DNSRecordCreated, meta) - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceZoneRecord, Operation: types.UpdateOperationCreate}) return record, nil } @@ -154,7 +155,7 @@ func (m *managerImpl) UpdateRecord(ctx context.Context, accountID, userID, zoneI meta := record.EventMeta(zone.ID, zone.Name) m.accountManager.StoreEvent(ctx, userID, record.ID, accountID, activity.DNSRecordUpdated, meta) - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceZoneRecord, Operation: types.UpdateOperationUpdate}) return record, nil } @@ -201,7 +202,7 @@ func (m *managerImpl) DeleteRecord(ctx context.Context, accountID, userID, zoneI meta := record.EventMeta(zone.ID, zone.Name) m.accountManager.StoreEvent(ctx, userID, recordID, accountID, activity.DNSRecordDeleted, meta) - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceZoneRecord, Operation: types.UpdateOperationDelete}) return nil } diff --git a/management/server/account.go b/management/server/account.go index 7d53cef03..4b71ab486 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -400,7 +400,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco } if updateAccountPeers || extraSettingsChanged || groupChangesAffectPeers { - go am.UpdateAccountPeers(ctx, accountID) + go am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceAccountSettings, Operation: types.UpdateOperationUpdate}) } return newSettings, nil @@ -1581,7 +1581,7 @@ func (am *DefaultAccountManager) SyncUserJWTGroups(ctx context.Context, userAuth if removedGroupAffectsPeers || newGroupsAffectsPeers { log.WithContext(ctx).Tracef("user %s: JWT group membership changed, updating account peers", userAuth.UserId) - am.BufferUpdateAccountPeers(ctx, userAuth.AccountId) + am.BufferUpdateAccountPeers(ctx, userAuth.AccountId, types.UpdateReason{Resource: types.UpdateResourceUser, Operation: types.UpdateOperationUpdate}) } return nil diff --git a/management/server/account/manager.go b/management/server/account/manager.go index b4516d512..626ed222d 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -124,8 +124,8 @@ type Manager interface { GetAccountIDForPeerKey(ctx context.Context, peerKey string) (string, error) GetAccountSettings(ctx context.Context, accountID string, userID string) (*types.Settings, error) DeleteSetupKey(ctx context.Context, accountID, userID, keyID string) error - UpdateAccountPeers(ctx context.Context, accountID string) - BufferUpdateAccountPeers(ctx context.Context, accountID string) + UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) + BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) BuildUserInfosForAccount(ctx context.Context, accountID, initiatorUserID string, accountUsers []*types.User) (map[string]*types.UserInfo, error) SyncUserJWTGroups(ctx context.Context, userAuth auth.UserAuth) error GetStore() store.Store diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go index 36e5fe39f..8f3b22ecc 100644 --- a/management/server/account/manager_mock.go +++ b/management/server/account/manager_mock.go @@ -111,15 +111,15 @@ func (mr *MockManagerMockRecorder) ApproveUser(ctx, accountID, initiatorUserID, } // BufferUpdateAccountPeers mocks base method. -func (m *MockManager) BufferUpdateAccountPeers(ctx context.Context, accountID string) { +func (m *MockManager) BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { m.ctrl.T.Helper() - m.ctrl.Call(m, "BufferUpdateAccountPeers", ctx, accountID) + m.ctrl.Call(m, "BufferUpdateAccountPeers", ctx, accountID, reason) } // BufferUpdateAccountPeers indicates an expected call of BufferUpdateAccountPeers. -func (mr *MockManagerMockRecorder) BufferUpdateAccountPeers(ctx, accountID interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) BufferUpdateAccountPeers(ctx, accountID, reason interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BufferUpdateAccountPeers", reflect.TypeOf((*MockManager)(nil).BufferUpdateAccountPeers), ctx, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BufferUpdateAccountPeers", reflect.TypeOf((*MockManager)(nil).BufferUpdateAccountPeers), ctx, accountID, reason) } // BuildUserInfosForAccount mocks base method. @@ -1597,15 +1597,15 @@ func (mr *MockManagerMockRecorder) UpdateAccountOnboarding(ctx, accountID, userI } // UpdateAccountPeers mocks base method. -func (m *MockManager) UpdateAccountPeers(ctx context.Context, accountID string) { +func (m *MockManager) UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { m.ctrl.T.Helper() - m.ctrl.Call(m, "UpdateAccountPeers", ctx, accountID) + m.ctrl.Call(m, "UpdateAccountPeers", ctx, accountID, reason) } // UpdateAccountPeers indicates an expected call of UpdateAccountPeers. -func (mr *MockManagerMockRecorder) UpdateAccountPeers(ctx, accountID interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) UpdateAccountPeers(ctx, accountID, reason interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPeers", reflect.TypeOf((*MockManager)(nil).UpdateAccountPeers), ctx, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPeers", reflect.TypeOf((*MockManager)(nil).UpdateAccountPeers), ctx, accountID, reason) } // UpdateAccountSettings mocks base method. diff --git a/management/server/dns.go b/management/server/dns.go index baf6debc3..c62fa5185 100644 --- a/management/server/dns.go +++ b/management/server/dns.go @@ -86,7 +86,7 @@ func (am *DefaultAccountManager) SaveDNSSettings(ctx context.Context, accountID } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceDNSSettings, Operation: types.UpdateOperationUpdate}) } return nil diff --git a/management/server/group.go b/management/server/group.go index 7b5b9b86c..e1d05171e 100644 --- a/management/server/group.go +++ b/management/server/group.go @@ -117,7 +117,7 @@ func (am *DefaultAccountManager) CreateGroup(ctx context.Context, accountID, use } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationCreate}) } return nil @@ -185,7 +185,7 @@ func (am *DefaultAccountManager) UpdateGroup(ctx context.Context, accountID, use } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationUpdate}) } return nil @@ -253,7 +253,7 @@ func (am *DefaultAccountManager) CreateGroups(ctx context.Context, accountID, us } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationCreate}) } return globalErr @@ -321,7 +321,7 @@ func (am *DefaultAccountManager) UpdateGroups(ctx context.Context, accountID, us } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationUpdate}) } return globalErr @@ -493,7 +493,7 @@ func (am *DefaultAccountManager) GroupAddPeer(ctx context.Context, accountID, gr } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationUpdate}) } return nil @@ -531,7 +531,7 @@ func (am *DefaultAccountManager) GroupAddResource(ctx context.Context, accountID } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationUpdate}) } return nil @@ -559,7 +559,7 @@ func (am *DefaultAccountManager) GroupDeletePeer(ctx context.Context, accountID, } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationUpdate}) } return nil @@ -597,7 +597,7 @@ func (am *DefaultAccountManager) GroupDeleteResource(ctx context.Context, accoun } if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceGroup, Operation: types.UpdateOperationUpdate}) } return nil diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index ff369355e..ac4d0c6d6 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -128,8 +128,8 @@ type MockAccountManager struct { GetOrCreateAccountByPrivateDomainFunc func(ctx context.Context, initiatorId, domain string) (*types.Account, bool, error) AllowSyncFunc func(string, uint64) bool - UpdateAccountPeersFunc func(ctx context.Context, accountID string) - BufferUpdateAccountPeersFunc func(ctx context.Context, accountID string) + UpdateAccountPeersFunc func(ctx context.Context, accountID string, reason types.UpdateReason) + BufferUpdateAccountPeersFunc func(ctx context.Context, accountID string, reason types.UpdateReason) RecalculateNetworkMapCacheFunc func(ctx context.Context, accountId string) error GetIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) @@ -200,15 +200,15 @@ func (am *MockAccountManager) UpdateGroups(ctx context.Context, accountID, userI return status.Errorf(codes.Unimplemented, "method UpdateGroups is not implemented") } -func (am *MockAccountManager) UpdateAccountPeers(ctx context.Context, accountID string) { +func (am *MockAccountManager) UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { if am.UpdateAccountPeersFunc != nil { - am.UpdateAccountPeersFunc(ctx, accountID) + am.UpdateAccountPeersFunc(ctx, accountID, reason) } } -func (am *MockAccountManager) BufferUpdateAccountPeers(ctx context.Context, accountID string) { +func (am *MockAccountManager) BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { if am.BufferUpdateAccountPeersFunc != nil { - am.BufferUpdateAccountPeersFunc(ctx, accountID) + am.BufferUpdateAccountPeersFunc(ctx, accountID, reason) } } diff --git a/management/server/nameserver.go b/management/server/nameserver.go index 3d8c78912..5859bfb0d 100644 --- a/management/server/nameserver.go +++ b/management/server/nameserver.go @@ -82,7 +82,7 @@ func (am *DefaultAccountManager) CreateNameServerGroup(ctx context.Context, acco am.StoreEvent(ctx, userID, newNSGroup.ID, accountID, activity.NameserverGroupCreated, newNSGroup.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceNameServerGroup, Operation: types.UpdateOperationCreate}) } return newNSGroup.Copy(), nil @@ -133,7 +133,7 @@ func (am *DefaultAccountManager) SaveNameServerGroup(ctx context.Context, accoun am.StoreEvent(ctx, userID, nsGroupToSave.ID, accountID, activity.NameserverGroupUpdated, nsGroupToSave.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceNameServerGroup, Operation: types.UpdateOperationUpdate}) } return nil @@ -176,7 +176,7 @@ func (am *DefaultAccountManager) DeleteNameServerGroup(ctx context.Context, acco am.StoreEvent(ctx, userID, nsGroup.ID, accountID, activity.NameserverGroupDeleted, nsGroup.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceNameServerGroup, Operation: types.UpdateOperationDelete}) } return nil diff --git a/management/server/networks/manager.go b/management/server/networks/manager.go index b6706ca45..c96b60bb2 100644 --- a/management/server/networks/manager.go +++ b/management/server/networks/manager.go @@ -15,6 +15,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" + serverTypes "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/status" ) @@ -177,7 +178,7 @@ func (m *managerImpl) DeleteNetwork(ctx context.Context, accountID, userID, netw event() } - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, serverTypes.UpdateReason{Resource: serverTypes.UpdateResourceNetwork, Operation: serverTypes.UpdateOperationDelete}) return nil } diff --git a/management/server/networks/resources/manager.go b/management/server/networks/resources/manager.go index 86f9b6579..5a0e26533 100644 --- a/management/server/networks/resources/manager.go +++ b/management/server/networks/resources/manager.go @@ -162,7 +162,7 @@ func (m *managerImpl) CreateResource(ctx context.Context, userID string, resourc event() } - go m.accountManager.UpdateAccountPeers(ctx, resource.AccountID) + go m.accountManager.UpdateAccountPeers(ctx, resource.AccountID, nbtypes.UpdateReason{Resource: nbtypes.UpdateResourceNetworkResource, Operation: nbtypes.UpdateOperationCreate}) return resource, nil } @@ -270,7 +270,7 @@ func (m *managerImpl) UpdateResource(ctx context.Context, userID string, resourc } }() - go m.accountManager.UpdateAccountPeers(ctx, resource.AccountID) + go m.accountManager.UpdateAccountPeers(ctx, resource.AccountID, nbtypes.UpdateReason{Resource: nbtypes.UpdateResourceNetworkResource, Operation: nbtypes.UpdateOperationUpdate}) return resource, nil } @@ -352,7 +352,7 @@ func (m *managerImpl) DeleteResource(ctx context.Context, accountID, userID, net event() } - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, nbtypes.UpdateReason{Resource: nbtypes.UpdateResourceNetworkResource, Operation: nbtypes.UpdateOperationDelete}) return nil } diff --git a/management/server/networks/routers/manager.go b/management/server/networks/routers/manager.go index 82cac424a..c7c3f2ff4 100644 --- a/management/server/networks/routers/manager.go +++ b/management/server/networks/routers/manager.go @@ -15,6 +15,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/modules" "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/store" + serverTypes "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/status" ) @@ -119,7 +120,7 @@ func (m *managerImpl) CreateRouter(ctx context.Context, userID string, router *t m.accountManager.StoreEvent(ctx, userID, router.ID, router.AccountID, activity.NetworkRouterCreated, router.EventMeta(network)) - go m.accountManager.UpdateAccountPeers(ctx, router.AccountID) + go m.accountManager.UpdateAccountPeers(ctx, router.AccountID, serverTypes.UpdateReason{Resource: serverTypes.UpdateResourceNetworkRouter, Operation: serverTypes.UpdateOperationCreate}) return router, nil } @@ -183,7 +184,7 @@ func (m *managerImpl) UpdateRouter(ctx context.Context, userID string, router *t m.accountManager.StoreEvent(ctx, userID, router.ID, router.AccountID, activity.NetworkRouterUpdated, router.EventMeta(network)) - go m.accountManager.UpdateAccountPeers(ctx, router.AccountID) + go m.accountManager.UpdateAccountPeers(ctx, router.AccountID, serverTypes.UpdateReason{Resource: serverTypes.UpdateResourceNetworkRouter, Operation: serverTypes.UpdateOperationUpdate}) return router, nil } @@ -217,7 +218,7 @@ func (m *managerImpl) DeleteRouter(ctx context.Context, accountID, userID, netwo event() - go m.accountManager.UpdateAccountPeers(ctx, accountID) + go m.accountManager.UpdateAccountPeers(ctx, accountID, serverTypes.UpdateReason{Resource: serverTypes.UpdateResourceNetworkRouter, Operation: serverTypes.UpdateOperationDelete}) return nil } diff --git a/management/server/peer.go b/management/server/peer.go index 07428539b..d1c52002e 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -1221,12 +1221,12 @@ func (am *DefaultAccountManager) GetPeer(ctx context.Context, accountID, peerID, // UpdateAccountPeers updates all peers that belong to an account. // Should be called when changes have to be synced to peers. -func (am *DefaultAccountManager) UpdateAccountPeers(ctx context.Context, accountID string) { - _ = am.networkMapController.UpdateAccountPeers(ctx, accountID) +func (am *DefaultAccountManager) UpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { + _ = am.networkMapController.UpdateAccountPeers(ctx, accountID, reason) } -func (am *DefaultAccountManager) BufferUpdateAccountPeers(ctx context.Context, accountID string) { - _ = am.networkMapController.BufferUpdateAccountPeers(ctx, accountID) +func (am *DefaultAccountManager) BufferUpdateAccountPeers(ctx context.Context, accountID string, reason types.UpdateReason) { + _ = am.networkMapController.BufferUpdateAccountPeers(ctx, accountID, reason) } // UpdateAccountPeer updates a single peer that belongs to an account. diff --git a/management/server/peer_test.go b/management/server/peer_test.go index dae676e77..36809d354 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -975,7 +975,7 @@ func BenchmarkUpdateAccountPeers(b *testing.B) { start := time.Now() for i := 0; i < b.N; i++ { - manager.UpdateAccountPeers(ctx, account.Id) + manager.UpdateAccountPeers(ctx, account.Id, types.UpdateReason{}) } duration := time.Since(start) @@ -1033,7 +1033,7 @@ func testUpdateAccountPeers(t *testing.T) { peerChannels[peerID] = updateManager.CreateChannel(ctx, peerID) } - manager.UpdateAccountPeers(ctx, account.Id) + manager.UpdateAccountPeers(ctx, account.Id, types.UpdateReason{}) for _, channel := range peerChannels { update := <-channel diff --git a/management/server/policy.go b/management/server/policy.go index 48297ca11..40f3908e3 100644 --- a/management/server/policy.go +++ b/management/server/policy.go @@ -96,7 +96,11 @@ func (am *DefaultAccountManager) SavePolicy(ctx context.Context, accountID, user am.StoreEvent(ctx, userID, policy.ID, accountID, action, policy.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + policyOp := types.UpdateOperationCreate + if isUpdate { + policyOp = types.UpdateOperationUpdate + } + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourcePolicy, Operation: policyOp}) } return policy, nil @@ -139,7 +143,7 @@ func (am *DefaultAccountManager) DeletePolicy(ctx context.Context, accountID, po am.StoreEvent(ctx, userID, policyID, accountID, activity.PolicyRemoved, policy.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourcePolicy, Operation: types.UpdateOperationDelete}) } return nil diff --git a/management/server/posture_checks.go b/management/server/posture_checks.go index 9562487c0..1e3ce4b8a 100644 --- a/management/server/posture_checks.go +++ b/management/server/posture_checks.go @@ -11,6 +11,7 @@ import ( "github.com/netbirdio/netbird/management/server/permissions/operations" "github.com/netbirdio/netbird/management/server/posture" "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" "github.com/netbirdio/netbird/shared/management/status" ) @@ -76,7 +77,11 @@ func (am *DefaultAccountManager) SavePostureChecks(ctx context.Context, accountI am.StoreEvent(ctx, userID, postureChecks.ID, accountID, action, postureChecks.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + postureOp := types.UpdateOperationCreate + if isUpdate { + postureOp = types.UpdateOperationUpdate + } + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourcePostureCheck, Operation: postureOp}) } return postureChecks, nil diff --git a/management/server/route.go b/management/server/route.go index 2b4f11d05..a9561faf0 100644 --- a/management/server/route.go +++ b/management/server/route.go @@ -191,7 +191,7 @@ func (am *DefaultAccountManager) CreateRoute(ctx context.Context, accountID stri am.StoreEvent(ctx, userID, string(newRoute.ID), accountID, activity.RouteCreated, newRoute.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceRoute, Operation: types.UpdateOperationCreate}) } return newRoute, nil @@ -245,7 +245,7 @@ func (am *DefaultAccountManager) SaveRoute(ctx context.Context, accountID, userI am.StoreEvent(ctx, userID, string(routeToSave.ID), accountID, activity.RouteUpdated, routeToSave.EventMeta()) if oldRouteAffectsPeers || newRouteAffectsPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceRoute, Operation: types.UpdateOperationUpdate}) } return nil @@ -288,7 +288,7 @@ func (am *DefaultAccountManager) DeleteRoute(ctx context.Context, accountID stri am.StoreEvent(ctx, userID, string(route.ID), accountID, activity.RouteRemoved, route.EventMeta()) if updateAccountPeers { - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceRoute, Operation: types.UpdateOperationDelete}) } return nil diff --git a/management/server/telemetry/accountmanager_metrics.go b/management/server/telemetry/accountmanager_metrics.go index 3b1e078eb..518aae7eb 100644 --- a/management/server/telemetry/accountmanager_metrics.go +++ b/management/server/telemetry/accountmanager_metrics.go @@ -4,6 +4,7 @@ import ( "context" "time" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" ) @@ -11,6 +12,7 @@ import ( type AccountManagerMetrics struct { ctx context.Context updateAccountPeersDurationMs metric.Float64Histogram + updateAccountPeersCounter metric.Int64Counter getPeerNetworkMapDurationMs metric.Float64Histogram networkMapObjectCount metric.Int64Histogram peerMetaUpdateCount metric.Int64Counter @@ -48,6 +50,13 @@ func NewAccountManagerMetrics(ctx context.Context, meter metric.Meter) (*Account return nil, err } + updateAccountPeersCounter, err := meter.Int64Counter("management.account.update.account.peers.counter", + metric.WithUnit("1"), + metric.WithDescription("Number of account peers updates triggered, labeled by resource and operation")) + if err != nil { + return nil, err + } + peerMetaUpdateCount, err := meter.Int64Counter("management.account.peer.meta.update.counter", metric.WithUnit("1"), metric.WithDescription("Number of updates with new meta data from the peers")) @@ -59,6 +68,7 @@ func NewAccountManagerMetrics(ctx context.Context, meter metric.Meter) (*Account ctx: ctx, getPeerNetworkMapDurationMs: getPeerNetworkMapDurationMs, updateAccountPeersDurationMs: updateAccountPeersDurationMs, + updateAccountPeersCounter: updateAccountPeersCounter, networkMapObjectCount: networkMapObjectCount, peerMetaUpdateCount: peerMetaUpdateCount, }, nil @@ -80,6 +90,16 @@ func (metrics *AccountManagerMetrics) CountNetworkMapObjects(count int64) { metrics.networkMapObjectCount.Record(metrics.ctx, count) } +// CountUpdateAccountPeersTriggered increments the counter for account peers updates with resource and operation labels. +func (metrics *AccountManagerMetrics) CountUpdateAccountPeersTriggered(resource, operation string) { + metrics.updateAccountPeersCounter.Add(metrics.ctx, 1, + metric.WithAttributes( + attribute.String("resource", resource), + attribute.String("operation", operation), + ), + ) +} + // CountPeerMetUpdate counts the number of peer meta updates func (metrics *AccountManagerMetrics) CountPeerMetUpdate() { metrics.peerMetaUpdateCount.Add(metrics.ctx, 1) diff --git a/management/server/types/update_reason.go b/management/server/types/update_reason.go new file mode 100644 index 000000000..9d752da9a --- /dev/null +++ b/management/server/types/update_reason.go @@ -0,0 +1,37 @@ +package types + +// UpdateReason describes why an account peers update was triggered. +type UpdateReason struct { + Resource UpdateResource + Operation UpdateOperation +} + +// UpdateResource represents the kind of resource that triggered an account peers update. +type UpdateResource string + +const ( + UpdateResourceAccountSettings UpdateResource = "account_settings" + UpdateResourceDNSSettings UpdateResource = "dns_settings" + UpdateResourceGroup UpdateResource = "group" + UpdateResourceNameServerGroup UpdateResource = "nameserver_group" + UpdateResourcePolicy UpdateResource = "policy" + UpdateResourcePostureCheck UpdateResource = "posture_check" + UpdateResourceRoute UpdateResource = "route" + UpdateResourceUser UpdateResource = "user" + UpdateResourcePeer UpdateResource = "peer" + UpdateResourceNetwork UpdateResource = "network" + UpdateResourceNetworkResource UpdateResource = "network_resource" + UpdateResourceNetworkRouter UpdateResource = "network_router" + UpdateResourceService UpdateResource = "service" + UpdateResourceZone UpdateResource = "zone" + UpdateResourceZoneRecord UpdateResource = "zone_record" +) + +// UpdateOperation represents the kind of change that triggered the update. +type UpdateOperation string + +const ( + UpdateOperationCreate UpdateOperation = "create" + UpdateOperationUpdate UpdateOperation = "update" + UpdateOperationDelete UpdateOperation = "delete" +) diff --git a/management/server/user.go b/management/server/user.go index c1f984f2f..b1fb51195 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -675,7 +675,7 @@ func (am *DefaultAccountManager) SaveOrAddUsers(ctx context.Context, accountID, if err = am.Store.IncrementNetworkSerial(ctx, accountID); err != nil { return nil, fmt.Errorf("failed to increment network serial: %w", err) } - am.UpdateAccountPeers(ctx, accountID) + am.UpdateAccountPeers(ctx, accountID, types.UpdateReason{Resource: types.UpdateResourceUser, Operation: types.UpdateOperationUpdate}) } return updatedUsersInfo, globalErr From dcd1db42ef212d3c8e14a0be451681460fce0a7c Mon Sep 17 00:00:00 2001 From: Nicolas Frati Date: Thu, 30 Apr 2026 17:21:35 +0200 Subject: [PATCH 365/374] [management] Enable PAT creation during setup (#6003) * enable pat creation on setup * remove logic from handler towards setup service * fix lint issue * fix rollback on account id returning empty * fix coderabbit comments * fix setup PAT rollback behavior --- management/server/account/pat.go | 8 + management/server/http/handler.go | 6 +- .../handlers/instance/instance_handler.go | 31 +- .../instance/instance_handler_test.go | 254 +++++++++++++- management/server/instance/manager.go | 54 +++ management/server/instance/manager_test.go | 87 ++++- management/server/instance/setup_service.go | 216 ++++++++++++ .../server/instance/setup_service_test.go | 318 ++++++++++++++++++ management/server/user.go | 5 +- shared/management/http/api/openapi.yml | 28 +- shared/management/http/api/types.gen.go | 9 + 11 files changed, 997 insertions(+), 19 deletions(-) create mode 100644 management/server/account/pat.go create mode 100644 management/server/instance/setup_service.go create mode 100644 management/server/instance/setup_service_test.go diff --git a/management/server/account/pat.go b/management/server/account/pat.go new file mode 100644 index 000000000..8e5e3e3f9 --- /dev/null +++ b/management/server/account/pat.go @@ -0,0 +1,8 @@ +package account + +const ( + // PATMinExpireDays is the minimum allowed Personal Access Token expiration in days. + PATMinExpireDays = 1 + // PATMaxExpireDays is the maximum allowed Personal Access Token expiration in days. + PATMaxExpireDays = 365 +) diff --git a/management/server/http/handler.go b/management/server/http/handler.go index 56b2d8203..b9ea605d3 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -62,9 +62,7 @@ import ( "github.com/netbirdio/netbird/management/server/telemetry" ) -const ( - apiPrefix = "/api" -) +const apiPrefix = "/api" // NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints. func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix, rateLimiter *middleware.APIRateLimiter) (http.Handler, error) { @@ -141,7 +139,7 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks zonesManager.RegisterEndpoints(router, zManager) recordsManager.RegisterEndpoints(router, rManager) idp.AddEndpoints(accountManager, router) - instance.AddEndpoints(instanceManager, router) + instance.AddEndpoints(instanceManager, accountManager, router) instance.AddVersionEndpoint(instanceManager, router) if serviceManager != nil && reverseProxyDomainManager != nil { reverseproxymanager.RegisterEndpoints(serviceManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, permissionsManager, router) diff --git a/management/server/http/handlers/instance/instance_handler.go b/management/server/http/handlers/instance/instance_handler.go index cd9fae6b8..e98ce4d7c 100644 --- a/management/server/http/handlers/instance/instance_handler.go +++ b/management/server/http/handlers/instance/instance_handler.go @@ -7,6 +7,7 @@ import ( "github.com/gorilla/mux" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/management/server/account" nbinstance "github.com/netbirdio/netbird/management/server/instance" "github.com/netbirdio/netbird/shared/management/http/api" "github.com/netbirdio/netbird/shared/management/http/util" @@ -15,13 +16,15 @@ import ( // handler handles the instance setup HTTP endpoints type handler struct { instanceManager nbinstance.Manager + setupManager *nbinstance.SetupService } // AddEndpoints registers the instance setup endpoints. // These endpoints bypass authentication for initial setup. -func AddEndpoints(instanceManager nbinstance.Manager, router *mux.Router) { +func AddEndpoints(instanceManager nbinstance.Manager, accountManager account.Manager, router *mux.Router) { h := &handler{ instanceManager: instanceManager, + setupManager: nbinstance.NewSetupService(instanceManager, accountManager), } router.HandleFunc("/instance", h.getInstanceStatus).Methods("GET", "OPTIONS") @@ -55,24 +58,36 @@ func (h *handler) getInstanceStatus(w http.ResponseWriter, r *http.Request) { // setup creates the initial admin user for the instance. // This endpoint is unauthenticated but only works when setup is required. func (h *handler) setup(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var req api.SetupRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { util.WriteErrorResponse("invalid request body", http.StatusBadRequest, w) return } - userData, err := h.instanceManager.CreateOwnerUser(r.Context(), req.Email, req.Password, req.Name) + result, err := h.setupManager.SetupOwner(ctx, req.Email, req.Password, req.Name, nbinstance.SetupOptions{ + CreatePAT: req.CreatePat != nil && *req.CreatePat, + PATExpireInDays: req.PatExpireIn, + }) if err != nil { - util.WriteError(r.Context(), err, w) + util.WriteError(ctx, err, w) return } - log.WithContext(r.Context()).Infof("instance setup completed: created user %s", req.Email) + log.WithContext(ctx).Infof("instance setup completed: created user %s", req.Email) - util.WriteJSONObject(r.Context(), w, api.SetupResponse{ - UserId: userData.ID, - Email: userData.Email, - }) + resp := api.SetupResponse{ + UserId: result.User.ID, + Email: result.User.Email, + } + + if result.PATPlainToken != "" { + resp.PersonalAccessToken = &result.PATPlainToken + } + + w.Header().Set("Cache-Control", "no-store") + util.WriteJSONObject(ctx, w, resp) } // getVersionInfo returns version information for NetBird components. diff --git a/management/server/http/handlers/instance/instance_handler_test.go b/management/server/http/handlers/instance/instance_handler_test.go index 470079c85..711e01964 100644 --- a/management/server/http/handlers/instance/instance_handler_test.go +++ b/management/server/http/handlers/instance/instance_handler_test.go @@ -10,12 +10,18 @@ import ( "net/mail" "testing" + "github.com/golang/mock/gomock" "github.com/gorilla/mux" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/idp" nbinstance "github.com/netbirdio/netbird/management/server/instance" + "github.com/netbirdio/netbird/management/server/mock_server" + nbstore "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/auth" "github.com/netbirdio/netbird/shared/management/http/api" "github.com/netbirdio/netbird/shared/management/status" ) @@ -25,6 +31,7 @@ type mockInstanceManager struct { isSetupRequired bool isSetupRequiredFn func(ctx context.Context) (bool, error) createOwnerUserFn func(ctx context.Context, email, password, name string) (*idp.UserData, error) + rollbackSetupFn func(ctx context.Context, userID string) error getVersionInfoFn func(ctx context.Context) (*nbinstance.VersionInfo, error) } @@ -67,6 +74,13 @@ func (m *mockInstanceManager) CreateOwnerUser(ctx context.Context, email, passwo }, nil } +func (m *mockInstanceManager) RollbackSetup(ctx context.Context, userID string) error { + if m.rollbackSetupFn != nil { + return m.rollbackSetupFn(ctx, userID) + } + return nil +} + func (m *mockInstanceManager) GetVersionInfo(ctx context.Context) (*nbinstance.VersionInfo, error) { if m.getVersionInfoFn != nil { return m.getVersionInfoFn(ctx) @@ -82,8 +96,12 @@ func (m *mockInstanceManager) GetVersionInfo(ctx context.Context) (*nbinstance.V var _ nbinstance.Manager = (*mockInstanceManager)(nil) func setupTestRouter(manager nbinstance.Manager) *mux.Router { + return setupTestRouterWithPAT(manager, nil) +} + +func setupTestRouterWithPAT(manager nbinstance.Manager, accountManager account.Manager) *mux.Router { router := mux.NewRouter() - AddEndpoints(manager, router) + AddEndpoints(manager, accountManager, router) return router } @@ -161,6 +179,7 @@ func TestSetup_Success(t *testing.T) { router.ServeHTTP(rec, req) assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "no-store", rec.Header().Get("Cache-Control")) var response api.SetupResponse err := json.NewDecoder(rec.Body).Decode(&response) @@ -293,6 +312,239 @@ func TestSetup_ManagerError(t *testing.T) { assert.Equal(t, http.StatusInternalServerError, rec.Code) } +func TestSetup_PAT_FeatureDisabled_IgnoresCreatePAT(t *testing.T) { + t.Setenv(nbinstance.SetupPATEnabledEnvKey, "false") + + manager := &mockInstanceManager{isSetupRequired: true} + // NB_SETUP_PAT_ENABLED=false: request fields must be silently ignored + router := setupTestRouterWithPAT(manager, &mock_server.MockAccountManager{}) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin", "create_pat": true}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + var response api.SetupResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&response)) + assert.Nil(t, response.PersonalAccessToken) +} + +func TestSetup_PAT_FlagOmitted_NoPAT(t *testing.T) { + t.Setenv(nbinstance.SetupPATEnabledEnvKey, "true") + + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouterWithPAT(manager, &mock_server.MockAccountManager{}) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin"}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + var response api.SetupResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&response)) + assert.Nil(t, response.PersonalAccessToken) +} + +func TestSetup_PAT_MissingExpireIn_DefaultsToOneDay(t *testing.T) { + t.Setenv(nbinstance.SetupPATEnabledEnvKey, "true") + + createCalled := false + manager := &mockInstanceManager{ + isSetupRequired: true, + createOwnerUserFn: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + createCalled = true + return &idp.UserData{ID: "u1", Email: email, Name: name}, nil + }, + } + accountMgr := &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, userAuth auth.UserAuth) (string, error) { + assert.Equal(t, "u1", userAuth.UserId) + return "acc-1", nil + }, + CreatePATFunc: func(_ context.Context, accountID, initiator, target, name string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) { + assert.Equal(t, "acc-1", accountID) + assert.Equal(t, "u1", initiator) + assert.Equal(t, "u1", target) + assert.Equal(t, "setup-token", name) + assert.Equal(t, 1, expiresIn) + return &types.PersonalAccessTokenGenerated{PlainToken: "nbp_plain"}, nil + }, + } + router := setupTestRouterWithPAT(manager, accountMgr) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin", "create_pat": true}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "no-store", rec.Header().Get("Cache-Control")) + assert.True(t, createCalled) + var response api.SetupResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&response)) + require.NotNil(t, response.PersonalAccessToken) + assert.Equal(t, "nbp_plain", *response.PersonalAccessToken) +} + +func TestSetup_PAT_ExpireOutOfRange(t *testing.T) { + t.Setenv(nbinstance.SetupPATEnabledEnvKey, "true") + + manager := &mockInstanceManager{isSetupRequired: true} + router := setupTestRouterWithPAT(manager, &mock_server.MockAccountManager{}) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin", "create_pat": true, "pat_expire_in": 0}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnprocessableEntity, rec.Code) +} + +func TestSetup_PAT_Success(t *testing.T) { + t.Setenv(nbinstance.SetupPATEnabledEnvKey, "true") + + manager := &mockInstanceManager{ + isSetupRequired: true, + createOwnerUserFn: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + return &idp.UserData{ID: "owner-id", Email: email, Name: name}, nil + }, + } + + gotAccountArgs := struct { + userID string + email string + }{} + accountMgr := &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, userAuth auth.UserAuth) (string, error) { + gotAccountArgs.userID = userAuth.UserId + gotAccountArgs.email = userAuth.Email + return "acc-1", nil + }, + CreatePATFunc: func(_ context.Context, accountID, initiator, target, name string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) { + assert.Equal(t, "acc-1", accountID) + assert.Equal(t, "owner-id", initiator) + assert.Equal(t, "owner-id", target) + assert.Equal(t, "setup-token", name) + assert.Equal(t, 30, expiresIn) + return &types.PersonalAccessTokenGenerated{PlainToken: "nbp_plain"}, nil + }, + } + + router := setupTestRouterWithPAT(manager, accountMgr) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin", "create_pat": true, "pat_expire_in": 30}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.Equal(t, "no-store", rec.Header().Get("Cache-Control")) + var response api.SetupResponse + require.NoError(t, json.NewDecoder(rec.Body).Decode(&response)) + assert.Equal(t, "owner-id", response.UserId) + require.NotNil(t, response.PersonalAccessToken) + assert.Equal(t, "nbp_plain", *response.PersonalAccessToken) + assert.Equal(t, "owner-id", gotAccountArgs.userID) + assert.Equal(t, "admin@example.com", gotAccountArgs.email) +} + +func TestSetup_PAT_AccountCreationFails_Rollback(t *testing.T) { + t.Setenv(nbinstance.SetupPATEnabledEnvKey, "true") + + ctrl := gomock.NewController(t) + accountStore := nbstore.NewMockStore(ctrl) + accountStore.EXPECT().GetAccountIDByUserID(gomock.Any(), nbstore.LockingStrengthNone, "owner-id").Return("", status.NewAccountNotFoundError("owner-id")) + + rolledBackFor := "" + manager := &mockInstanceManager{ + isSetupRequired: true, + createOwnerUserFn: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + return &idp.UserData{ID: "owner-id", Email: email, Name: name}, nil + }, + rollbackSetupFn: func(_ context.Context, userID string) error { + rolledBackFor = userID + return nil + }, + } + accountMgr := &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, _ auth.UserAuth) (string, error) { + return "", errors.New("db down") + }, + GetStoreFunc: func() nbstore.Store { + return accountStore + }, + } + + router := setupTestRouterWithPAT(manager, accountMgr) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin", "create_pat": true, "pat_expire_in": 30}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.Code) + assert.Equal(t, "owner-id", rolledBackFor, "RollbackSetup must be called with the created user id") +} + +func TestSetup_PAT_CreatePATFails_Rollback(t *testing.T) { + t.Setenv(nbinstance.SetupPATEnabledEnvKey, "true") + + ctrl := gomock.NewController(t) + accountStore := nbstore.NewMockStore(ctrl) + account := &types.Account{Id: "acc-1"} + accountStore.EXPECT().GetAccount(gomock.Any(), "acc-1").Return(account, nil) + accountStore.EXPECT().DeleteAccount(gomock.Any(), account).Return(nil) + + rolledBackFor := "" + manager := &mockInstanceManager{ + isSetupRequired: true, + createOwnerUserFn: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { + return &idp.UserData{ID: "owner-id", Email: email, Name: name}, nil + }, + rollbackSetupFn: func(_ context.Context, userID string) error { + rolledBackFor = userID + return nil + }, + } + accountMgr := &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, _ auth.UserAuth) (string, error) { + return "acc-1", nil + }, + CreatePATFunc: func(_ context.Context, _, _, _, _ string, _ int) (*types.PersonalAccessTokenGenerated, error) { + return nil, status.Errorf(status.Internal, "token store unavailable") + }, + GetStoreFunc: func() nbstore.Store { + return accountStore + }, + } + + router := setupTestRouterWithPAT(manager, accountMgr) + + body := `{"email": "admin@example.com", "password": "securepassword123", "name": "Admin", "create_pat": true, "pat_expire_in": 30}` + req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.Code) + assert.Equal(t, "owner-id", rolledBackFor, "RollbackSetup must be called when CreatePAT fails") +} + func TestGetVersionInfo_Success(t *testing.T) { manager := &mockInstanceManager{} router := mux.NewRouter() diff --git a/management/server/instance/manager.go b/management/server/instance/manager.go index 9579d7a35..2c355bb3b 100644 --- a/management/server/instance/manager.go +++ b/management/server/instance/manager.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/dexidp/dex/storage" goversion "github.com/hashicorp/go-version" log "github.com/sirupsen/logrus" @@ -60,6 +61,13 @@ type Manager interface { // This should only be called when IsSetupRequired returns true. CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) + // RollbackSetup reverses a successful CreateOwnerUser by deleting the user + // from the embedded IDP and reloading setupRequired from persistent state, so + // /api/setup can be retried only when no accounts or local users remain. Used + // when post-user steps (account or PAT creation) fail and the caller wants a + // clean slate. + RollbackSetup(ctx context.Context, userID string) error + // GetVersionInfo returns version information for NetBird components. GetVersionInfo(ctx context.Context) (*VersionInfo, error) } @@ -70,6 +78,7 @@ type instanceStore interface { type embeddedIdP interface { CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) + DeleteUser(ctx context.Context, userID string) error GetAllAccounts(ctx context.Context) (map[string][]*idp.UserData, error) } @@ -187,6 +196,51 @@ func (m *DefaultManager) CreateOwnerUser(ctx context.Context, email, password, n return userData, nil } +// RollbackSetup undoes a successful CreateOwnerUser: deletes the user from the +// embedded IDP and reloads setupRequired from persistent state. +func (m *DefaultManager) RollbackSetup(ctx context.Context, userID string) error { + if m.embeddedIdpManager == nil { + return errors.New("embedded IDP is not enabled") + } + + var deleteErr error + if err := m.embeddedIdpManager.DeleteUser(ctx, userID); err != nil { + if isNotFoundError(err) { + log.WithContext(ctx).Debugf("setup rollback user %s already deleted", userID) + } else { + deleteErr = fmt.Errorf("failed to delete user from embedded IdP: %w", err) + } + } + + if err := m.loadSetupRequired(ctx); err != nil { + reloadErr := fmt.Errorf("failed to reload setup state after rollback: %w", err) + if deleteErr != nil { + return errors.Join(deleteErr, reloadErr) + } + return reloadErr + } + + if deleteErr != nil { + return deleteErr + } + + log.WithContext(ctx).Infof("rolled back setup for user %s", userID) + return nil +} + +func isNotFoundError(err error) bool { + if err == nil { + return false + } + if errors.Is(err, storage.ErrNotFound) { + return true + } + if s, ok := status.FromError(err); ok { + return s.Type() == status.NotFound + } + return false +} + func (m *DefaultManager) checkSetupRequiredFromDB(ctx context.Context) error { numAccounts, err := m.store.GetAccountsCounter(ctx) if err != nil { diff --git a/management/server/instance/manager_test.go b/management/server/instance/manager_test.go index e3be9cfea..5ffb016de 100644 --- a/management/server/instance/manager_test.go +++ b/management/server/instance/manager_test.go @@ -10,16 +10,19 @@ import ( "testing" "time" + "github.com/dexidp/dex/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/shared/management/status" ) type mockIdP struct { - mu sync.Mutex - createUserFunc func(ctx context.Context, email, password, name string) (*idp.UserData, error) - users map[string][]*idp.UserData + mu sync.Mutex + createUserFunc func(ctx context.Context, email, password, name string) (*idp.UserData, error) + deleteUserFunc func(ctx context.Context, userID string) error + users map[string][]*idp.UserData getAllAccountsErr error } @@ -30,6 +33,13 @@ func (m *mockIdP) CreateUserWithPassword(ctx context.Context, email, password, n return &idp.UserData{ID: "test-user-id", Email: email, Name: name}, nil } +func (m *mockIdP) DeleteUser(ctx context.Context, userID string) error { + if m.deleteUserFunc != nil { + return m.deleteUserFunc(ctx, userID) + } + return nil +} + func (m *mockIdP) GetAllAccounts(_ context.Context) (map[string][]*idp.UserData, error) { m.mu.Lock() defer m.mu.Unlock() @@ -223,6 +233,77 @@ func TestIsSetupRequired_ReturnsFlag(t *testing.T) { assert.False(t, required) } +func TestRollbackSetup_UserAlreadyDeletedIsSuccess(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "management status not found", + err: status.NewUserNotFoundError("owner-id"), + }, + { + name: "dex storage not found", + err: fmt.Errorf("failed to get user for deletion: %w", storage.ErrNotFound), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + idpMock := &mockIdP{ + deleteUserFunc: func(_ context.Context, userID string) error { + assert.Equal(t, "owner-id", userID) + return tt.err + }, + } + mgr := newTestManager(idpMock, &mockStore{}) + mgr.setupRequired = false + + err := mgr.RollbackSetup(context.Background(), "owner-id") + require.NoError(t, err) + + required, err := mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.True(t, required, "setup should be required when no accounts or local users remain") + }) + } +} + +func TestRollbackSetup_RecomputesSetupStateWhenAccountStillExists(t *testing.T) { + idpMock := &mockIdP{ + deleteUserFunc: func(_ context.Context, _ string) error { + return status.NewUserNotFoundError("owner-id") + }, + } + mgr := newTestManager(idpMock, &mockStore{accountsCount: 1}) + mgr.setupRequired = true + + err := mgr.RollbackSetup(context.Background(), "owner-id") + require.NoError(t, err) + + required, err := mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required, "setup should not be required while an account still exists") +} + +func TestRollbackSetup_ReturnsDeleteErrorButReloadsSetupState(t *testing.T) { + idpMock := &mockIdP{ + deleteUserFunc: func(_ context.Context, _ string) error { + return errors.New("idp unavailable") + }, + } + mgr := newTestManager(idpMock, &mockStore{}) + mgr.setupRequired = false + + err := mgr.RollbackSetup(context.Background(), "owner-id") + require.Error(t, err) + assert.Contains(t, err.Error(), "idp unavailable") + + required, err := mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.True(t, required, "setup state should be reloaded even when user deletion fails") +} + func TestDefaultManager_ValidateSetupRequest(t *testing.T) { manager := &DefaultManager{setupRequired: true} diff --git a/management/server/instance/setup_service.go b/management/server/instance/setup_service.go new file mode 100644 index 000000000..92a4923be --- /dev/null +++ b/management/server/instance/setup_service.go @@ -0,0 +1,216 @@ +package instance + +import ( + "context" + "fmt" + "os" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/server/account" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/shared/auth" + "github.com/netbirdio/netbird/shared/management/status" +) + +const ( + setupPATTokenName = "setup-token" + + // SetupPATEnabledEnvKey enables setup-time Personal Access Token creation. + SetupPATEnabledEnvKey = "NB_SETUP_PAT_ENABLED" + + setupPATDefaultExpireDays = 1 +) + +// SetupOptions controls optional work performed during initial instance setup. +type SetupOptions struct { + // CreatePAT requests creation of a setup Personal Access Token. It is honored + // only when SetupPATEnabledEnvKey is set to "true". + CreatePAT bool + // PATExpireInDays defaults to 1 day when CreatePAT is requested and setup PAT + // creation is enabled. + PATExpireInDays *int +} + +// SetupResult contains resources created during initial instance setup. +type SetupResult struct { + User *idp.UserData + PATPlainToken string +} + +// SetupService orchestrates the initial setup use case across the instance and +// account bounded contexts and owns the compensation logic when a later step +// fails. +type SetupService struct { + instanceManager Manager + accountManager account.Manager + setupPATEnabled bool +} + +// NewSetupService creates a setup use-case service. +func NewSetupService(instanceManager Manager, accountManager account.Manager) *SetupService { + return &SetupService{ + instanceManager: instanceManager, + accountManager: accountManager, + setupPATEnabled: os.Getenv(SetupPATEnabledEnvKey) == "true", + } +} + +func normalizeSetupOptions(opts SetupOptions, setupPATEnabled bool) (SetupOptions, error) { + if !opts.CreatePAT { + return opts, nil + } + + if !setupPATEnabled { + opts.CreatePAT = false + opts.PATExpireInDays = nil + return opts, nil + } + + if opts.PATExpireInDays == nil { + defaultExpireInDays := setupPATDefaultExpireDays + opts.PATExpireInDays = &defaultExpireInDays + } + + if *opts.PATExpireInDays < account.PATMinExpireDays || *opts.PATExpireInDays > account.PATMaxExpireDays { + return opts, status.Errorf(status.InvalidArgument, "pat_expire_in must be between %d and %d", account.PATMinExpireDays, account.PATMaxExpireDays) + } + + return opts, nil +} + +// SetupOwner creates the initial owner user and, when requested and enabled by +// SetupPATEnabledEnvKey, provisions the account and a setup Personal Access +// Token. If account or PAT provisioning fails, created resources are rolled +// back so setup can be retried. If account rollback fails, user rollback is +// skipped to avoid leaving an account without its owner user. +func (m *SetupService) SetupOwner(ctx context.Context, email, password, name string, opts SetupOptions) (*SetupResult, error) { + opts, err := normalizeSetupOptions(opts, m.setupPATEnabled) + if err != nil { + return nil, err + } + + if opts.CreatePAT && m.accountManager == nil { + return nil, fmt.Errorf("account manager is required to create setup PAT") + } + + userData, err := m.instanceManager.CreateOwnerUser(ctx, email, password, name) + if err != nil { + return nil, err + } + + result := &SetupResult{User: userData} + if !opts.CreatePAT { + return result, nil + } + + userAuth := auth.UserAuth{ + UserId: userData.ID, + Email: userData.Email, + Name: userData.Name, + } + + accountID, err := m.accountManager.GetAccountIDByUserID(ctx, userAuth) + if err != nil { + err = fmt.Errorf("create account for setup user: %w", err) + if rollbackErr := m.rollbackSetup(ctx, userData.ID, "account provisioning failed", err, ""); rollbackErr != nil { + return nil, fmt.Errorf("%w; failed to roll back setup resources: %v", err, rollbackErr) + } + return nil, err + } + + pat, err := m.accountManager.CreatePAT(ctx, accountID, userData.ID, userData.ID, setupPATTokenName, *opts.PATExpireInDays) + if err != nil { + err = fmt.Errorf("create setup PAT: %w", err) + if rollbackErr := m.rollbackSetup(ctx, userData.ID, "setup PAT provisioning failed", err, accountID); rollbackErr != nil { + return nil, fmt.Errorf("%w; failed to roll back setup resources: %v", err, rollbackErr) + } + return nil, err + } + + result.PATPlainToken = pat.PlainToken + return result, nil +} + +func (m *SetupService) rollbackSetup(ctx context.Context, userID, reason string, origErr error, accountID string) error { + if accountID == "" { + resolvedAccountID, err := m.lookupSetupAccountIDForRollback(ctx, userID) + if err != nil { + rollbackErr := fmt.Errorf("resolve setup account for rollback: %w", err) + log.WithContext(ctx).Errorf("failed to resolve setup account for user %s after %s: original error: %v, rollback error: %v", userID, reason, origErr, rollbackErr) + return rollbackErr + } + accountID = resolvedAccountID + } + + if accountID != "" { + if err := m.rollbackSetupAccount(ctx, accountID); err != nil { + rollbackErr := fmt.Errorf("roll back setup account %s: %w", accountID, err) + log.WithContext(ctx).Errorf("failed to roll back setup account %s for user %s after %s: original error: %v, rollback error: %v", accountID, userID, reason, origErr, rollbackErr) + return rollbackErr + } + log.WithContext(ctx).Warnf("rolled back setup account %s for user %s after %s: %v", accountID, userID, reason, origErr) + } + + if err := m.instanceManager.RollbackSetup(ctx, userID); err != nil { + rollbackErr := fmt.Errorf("roll back setup user %s: %w", userID, err) + log.WithContext(ctx).Errorf("failed to roll back setup user %s after %s: original error: %v, rollback error: %v", userID, reason, origErr, rollbackErr) + return rollbackErr + } + log.WithContext(ctx).Warnf("rolled back setup user %s after %s: %v", userID, reason, origErr) + return nil +} + +func (m *SetupService) lookupSetupAccountIDForRollback(ctx context.Context, userID string) (string, error) { + if m.accountManager == nil { + return "", fmt.Errorf("account manager is required to resolve setup account") + } + + accountStore := m.accountManager.GetStore() + if accountStore == nil { + return "", fmt.Errorf("account store is unavailable") + } + + accountID, err := accountStore.GetAccountIDByUserID(ctx, store.LockingStrengthNone, userID) + if err != nil { + if isNotFoundError(err) { + return "", nil + } + return "", fmt.Errorf("get setup account ID for rollback: %w", err) + } + + return accountID, nil +} + +// rollbackSetupAccount removes only the setup-created account data from the +// store. It intentionally avoids accountManager.DeleteAccount because the normal +// account deletion path also deletes users from the IdP; embedded IdP cleanup is +// owned by instanceManager.RollbackSetup. +func (m *SetupService) rollbackSetupAccount(ctx context.Context, accountID string) error { + if m.accountManager == nil { + return fmt.Errorf("account manager is required to roll back setup account") + } + + accountStore := m.accountManager.GetStore() + if accountStore == nil { + return fmt.Errorf("account store is unavailable") + } + + account, err := accountStore.GetAccount(ctx, accountID) + if err != nil { + if isNotFoundError(err) { + return nil + } + return fmt.Errorf("get setup account for rollback: %w", err) + } + + if err := accountStore.DeleteAccount(ctx, account); err != nil { + if isNotFoundError(err) { + return nil + } + return fmt.Errorf("delete setup account for rollback: %w", err) + } + + return nil +} diff --git a/management/server/instance/setup_service_test.go b/management/server/instance/setup_service_test.go new file mode 100644 index 000000000..12ec7d0fa --- /dev/null +++ b/management/server/instance/setup_service_test.go @@ -0,0 +1,318 @@ +package instance + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/mock_server" + nbstore "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/auth" + "github.com/netbirdio/netbird/shared/management/status" +) + +type setupInstanceManagerMock struct { + createOwnerUserFn func(ctx context.Context, email, password, name string) (*idp.UserData, error) + rollbackSetupFn func(ctx context.Context, userID string) error +} + +func (m *setupInstanceManagerMock) IsSetupRequired(context.Context) (bool, error) { + return true, nil +} + +func (m *setupInstanceManagerMock) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { + if m.createOwnerUserFn != nil { + return m.createOwnerUserFn(ctx, email, password, name) + } + return &idp.UserData{ID: "owner-id", Email: email, Name: name}, nil +} + +func (m *setupInstanceManagerMock) RollbackSetup(ctx context.Context, userID string) error { + if m.rollbackSetupFn != nil { + return m.rollbackSetupFn(ctx, userID) + } + return nil +} + +func (m *setupInstanceManagerMock) GetVersionInfo(context.Context) (*VersionInfo, error) { + return &VersionInfo{}, nil +} + +var _ Manager = (*setupInstanceManagerMock)(nil) + +func intPtr(v int) *int { + return &v +} + +func TestSetupOwner_PATFeatureDisabled_IgnoresCreatePAT(t *testing.T) { + t.Setenv(SetupPATEnabledEnvKey, "false") + + createCalls := 0 + setupManager := NewSetupService( + &setupInstanceManagerMock{ + createOwnerUserFn: func(_ context.Context, email, _, name string) (*idp.UserData, error) { + createCalls++ + return &idp.UserData{ID: "owner-id", Email: email, Name: name}, nil + }, + }, + &mock_server.MockAccountManager{}, + ) + + result, err := setupManager.SetupOwner(context.Background(), "admin@example.com", "securepassword123", "Admin", SetupOptions{ + CreatePAT: true, + }) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, "owner-id", result.User.ID) + assert.Empty(t, result.PATPlainToken) + assert.Equal(t, 1, createCalls) +} + +func TestSetupOwner_PATFeatureEnabled_MissingExpireDefaultsToOneDay(t *testing.T) { + t.Setenv(SetupPATEnabledEnvKey, "true") + + createCalled := false + setupManager := NewSetupService( + &setupInstanceManagerMock{ + createOwnerUserFn: func(_ context.Context, email, _, name string) (*idp.UserData, error) { + createCalled = true + return &idp.UserData{ID: "owner-id", Email: email, Name: name}, nil + }, + }, + &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, userAuth auth.UserAuth) (string, error) { + assert.Equal(t, "owner-id", userAuth.UserId) + return "acc-1", nil + }, + CreatePATFunc: func(_ context.Context, accountID, initiatorUserID, targetUserID, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) { + assert.Equal(t, "acc-1", accountID) + assert.Equal(t, "owner-id", initiatorUserID) + assert.Equal(t, "owner-id", targetUserID) + assert.Equal(t, setupPATTokenName, tokenName) + assert.Equal(t, setupPATDefaultExpireDays, expiresIn) + return &types.PersonalAccessTokenGenerated{PlainToken: "nbp_plain"}, nil + }, + }, + ) + + result, err := setupManager.SetupOwner(context.Background(), "admin@example.com", "securepassword123", "Admin", SetupOptions{ + CreatePAT: true, + }) + + require.NoError(t, err) + require.NotNil(t, result) + assert.True(t, createCalled) + assert.Equal(t, "nbp_plain", result.PATPlainToken) +} + +func TestSetupOwner_PATFeatureEnabled_MissingAccountManagerFailsBeforeCreateUser(t *testing.T) { + t.Setenv(SetupPATEnabledEnvKey, "true") + + createCalled := false + rollbackCalled := false + setupManager := NewSetupService( + &setupInstanceManagerMock{ + createOwnerUserFn: func(_ context.Context, email, _, name string) (*idp.UserData, error) { + createCalled = true + return &idp.UserData{ID: "owner-id", Email: email, Name: name}, nil + }, + rollbackSetupFn: func(_ context.Context, _ string) error { + rollbackCalled = true + return nil + }, + }, + nil, + ) + + result, err := setupManager.SetupOwner(context.Background(), "admin@example.com", "securepassword123", "Admin", SetupOptions{ + CreatePAT: true, + }) + + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "account manager is required") + assert.False(t, createCalled) + assert.False(t, rollbackCalled) +} + +func TestSetupOwner_AccountProvisioningFails_RollsBackSideEffectAccountAndUser(t *testing.T) { + t.Setenv(SetupPATEnabledEnvKey, "true") + + ctrl := gomock.NewController(t) + accountStore := nbstore.NewMockStore(ctrl) + account := &types.Account{Id: "acc-1"} + accountStore.EXPECT().GetAccountIDByUserID(gomock.Any(), nbstore.LockingStrengthNone, "owner-id").Return("acc-1", nil) + accountStore.EXPECT().GetAccount(gomock.Any(), "acc-1").Return(account, nil) + accountStore.EXPECT().DeleteAccount(gomock.Any(), account).Return(nil) + + rolledBackFor := "" + rollbackCalls := 0 + setupManager := NewSetupService( + &setupInstanceManagerMock{ + rollbackSetupFn: func(_ context.Context, userID string) error { + rollbackCalls++ + rolledBackFor = userID + return nil + }, + }, + &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, userAuth auth.UserAuth) (string, error) { + assert.Equal(t, "owner-id", userAuth.UserId) + return "", errors.New("metadata update failed") + }, + GetStoreFunc: func() nbstore.Store { + return accountStore + }, + }, + ) + + result, err := setupManager.SetupOwner(context.Background(), "admin@example.com", "securepassword123", "Admin", SetupOptions{ + CreatePAT: true, + PATExpireInDays: intPtr(30), + }) + + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "create account for setup user") + assert.Equal(t, "owner-id", rolledBackFor) + assert.Equal(t, 1, rollbackCalls) +} + +func TestSetupOwner_CreatePATFails_RollsBackSetupAccountAndUser(t *testing.T) { + t.Setenv(SetupPATEnabledEnvKey, "true") + + ctrl := gomock.NewController(t) + accountStore := nbstore.NewMockStore(ctrl) + account := &types.Account{Id: "acc-1"} + accountStore.EXPECT().GetAccount(gomock.Any(), "acc-1").Return(account, nil) + accountStore.EXPECT().DeleteAccount(gomock.Any(), account).Return(nil) + + rollbackCalls := 0 + setupManager := NewSetupService( + &setupInstanceManagerMock{ + rollbackSetupFn: func(_ context.Context, userID string) error { + rollbackCalls++ + assert.Equal(t, "owner-id", userID) + return nil + }, + }, + &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, userAuth auth.UserAuth) (string, error) { + assert.Equal(t, "owner-id", userAuth.UserId) + return "acc-1", nil + }, + CreatePATFunc: func(_ context.Context, accountID, initiatorUserID, targetUserID, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) { + assert.Equal(t, "acc-1", accountID) + assert.Equal(t, "owner-id", initiatorUserID) + assert.Equal(t, "owner-id", targetUserID) + assert.Equal(t, setupPATTokenName, tokenName) + assert.Equal(t, 30, expiresIn) + return nil, status.Errorf(status.Internal, "token store unavailable") + }, + GetStoreFunc: func() nbstore.Store { + return accountStore + }, + }, + ) + + result, err := setupManager.SetupOwner(context.Background(), "admin@example.com", "securepassword123", "Admin", SetupOptions{ + CreatePAT: true, + PATExpireInDays: intPtr(30), + }) + + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "create setup PAT") + assert.Equal(t, 1, rollbackCalls) +} + +func TestSetupOwner_CreatePATFails_AccountAlreadyGoneStillRollsBackUser(t *testing.T) { + t.Setenv(SetupPATEnabledEnvKey, "true") + + ctrl := gomock.NewController(t) + accountStore := nbstore.NewMockStore(ctrl) + accountStore.EXPECT().GetAccount(gomock.Any(), "acc-1").Return(nil, status.NewAccountNotFoundError("acc-1")) + + rolledBackFor := "" + rollbackCalls := 0 + setupManager := NewSetupService( + &setupInstanceManagerMock{ + rollbackSetupFn: func(_ context.Context, userID string) error { + rollbackCalls++ + rolledBackFor = userID + return nil + }, + }, + &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, _ auth.UserAuth) (string, error) { + return "acc-1", nil + }, + CreatePATFunc: func(_ context.Context, _, _, _, _ string, _ int) (*types.PersonalAccessTokenGenerated, error) { + return nil, errors.New("token failure") + }, + GetStoreFunc: func() nbstore.Store { + return accountStore + }, + }, + ) + + result, err := setupManager.SetupOwner(context.Background(), "admin@example.com", "securepassword123", "Admin", SetupOptions{ + CreatePAT: true, + PATExpireInDays: intPtr(30), + }) + + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "create setup PAT") + assert.Equal(t, "owner-id", rolledBackFor) + assert.Equal(t, 1, rollbackCalls) +} + +func TestSetupOwner_CreatePATFails_AccountRollbackFailureStopsBeforeUserRollback(t *testing.T) { + t.Setenv(SetupPATEnabledEnvKey, "true") + + ctrl := gomock.NewController(t) + accountStore := nbstore.NewMockStore(ctrl) + account := &types.Account{Id: "acc-1"} + accountStore.EXPECT().GetAccount(gomock.Any(), "acc-1").Return(account, nil) + accountStore.EXPECT().DeleteAccount(gomock.Any(), account).Return(errors.New("delete failed")) + + rollbackCalls := 0 + setupManager := NewSetupService( + &setupInstanceManagerMock{ + rollbackSetupFn: func(_ context.Context, userID string) error { + rollbackCalls++ + return nil + }, + }, + &mock_server.MockAccountManager{ + GetAccountIDByUserIdFunc: func(_ context.Context, _ auth.UserAuth) (string, error) { + return "acc-1", nil + }, + CreatePATFunc: func(_ context.Context, _, _, _, _ string, _ int) (*types.PersonalAccessTokenGenerated, error) { + return nil, errors.New("token failure") + }, + GetStoreFunc: func() nbstore.Store { + return accountStore + }, + }, + ) + + result, err := setupManager.SetupOwner(context.Background(), "admin@example.com", "securepassword123", "Admin", SetupOptions{ + CreatePAT: true, + PATExpireInDays: intPtr(30), + }) + + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "create setup PAT") + assert.Contains(t, err.Error(), "failed to roll back setup resources") + assert.Equal(t, 0, rollbackCalls) +} diff --git a/management/server/user.go b/management/server/user.go index b1fb51195..43e0a9821 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/idp" nbpeer "github.com/netbirdio/netbird/management/server/peer" @@ -395,8 +396,8 @@ func (am *DefaultAccountManager) CreatePAT(ctx context.Context, accountID string return nil, status.Errorf(status.InvalidArgument, "token name can't be empty") } - if expiresIn < 1 || expiresIn > 365 { - return nil, status.Errorf(status.InvalidArgument, "expiration has to be between 1 and 365") + if expiresIn < account.PATMinExpireDays || expiresIn > account.PATMaxExpireDays { + return nil, status.Errorf(status.InvalidArgument, "expiration has to be between %d and %d", account.PATMinExpireDays, account.PATMaxExpireDays) } allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Pats, operations.Create) diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index b70f89499..c5fdbfbe0 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -3426,6 +3426,17 @@ components: description: Display name for the admin user (defaults to email if not provided) type: string example: Admin User + create_pat: + description: If true and the server has setup-time PAT issuance enabled (NB_SETUP_PAT_ENABLED=true), create a Personal Access Token for the new owner user and return it in the response. Ignored when the server feature is disabled. + type: boolean + example: true + pat_expire_in: + description: Expiration of the Personal Access Token in days. Applies only when create_pat is true and the server feature is enabled. Defaults to 1 day when omitted. + type: integer + minimum: 1 + maximum: 365 + default: 1 + example: 30 required: - email - password @@ -3442,6 +3453,12 @@ components: description: Email address of the created user type: string example: admin@example.com + personal_access_token: + description: Plain text Personal Access Token created during setup. Present only when create_pat was requested and the NB_SETUP_PAT_ENABLED feature was enabled on the server. + type: string + format: password + readOnly: true + example: nbp_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx required: - user_id - email @@ -4980,7 +4997,10 @@ paths: /api/setup: post: summary: Setup Instance - description: Creates the initial admin user for the instance. This endpoint does not require authentication but only works when setup is required (no accounts exist and embedded IDP is enabled). + description: | + Creates the initial admin user for the instance. This endpoint does not require authentication but only works when setup is required (no accounts exist and embedded IDP is enabled). + + When the management server is started with `NB_SETUP_PAT_ENABLED=true` and the request includes `create_pat: true`, the endpoint also provisions the NetBird account for the new owner user and returns the plain text Personal Access Token in `personal_access_token`. The optional `pat_expire_in` value applies only when `create_pat` is true and defaults to 1 day when omitted. If a post-user step fails, setup-created resources are rolled back when safe; if account cleanup fails, the owner user is left in place to avoid leaving an account without its admin user. tags: [ Instance ] security: [ ] requestBody: @@ -4993,6 +5013,12 @@ paths: responses: '200': description: Setup completed successfully + headers: + Cache-Control: + description: Always set to no-store because the response may contain a one-time plain text Personal Access Token. + schema: + type: string + example: no-store content: application/json: schema: diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index d56cb9b74..11cb8e46a 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -4297,6 +4297,9 @@ type SetupKeyRequest struct { // SetupRequest Request to set up the initial admin user type SetupRequest struct { + // CreatePat If true and the server has setup-time PAT issuance enabled (NB_SETUP_PAT_ENABLED=true), create a Personal Access Token for the new owner user and return it in the response. Ignored when the server feature is disabled. + CreatePat *bool `json:"create_pat,omitempty"` + // Email Email address for the admin user Email string `json:"email"` @@ -4305,6 +4308,9 @@ type SetupRequest struct { // Password Password for the admin user (minimum 8 characters) Password string `json:"password"` + + // PatExpireIn Expiration of the Personal Access Token in days. Applies only when create_pat is true and the server feature is enabled. Defaults to 1 day when omitted. + PatExpireIn *int `json:"pat_expire_in,omitempty"` } // SetupResponse Response after successful instance setup @@ -4312,6 +4318,9 @@ type SetupResponse struct { // Email Email address of the created user Email string `json:"email"` + // PersonalAccessToken Plain text Personal Access Token created during setup. Present only when create_pat was requested and the NB_SETUP_PAT_ENABLED feature was enabled on the server. + PersonalAccessToken *string `json:"personal_access_token,omitempty"` + // UserId The ID of the created user UserId string `json:"user_id"` } From c4b2da4c92520d006af448d90c6f533352b10769 Mon Sep 17 00:00:00 2001 From: Misha Bragin Date: Thu, 30 Apr 2026 18:36:50 +0200 Subject: [PATCH 366/374] [management] Add public connection ipv4 and ipv6 posture check (#6038) This change enables admins to configure posture checks for connecting public IPs of their peers. It changes the behavior of the check as well and now the evaluation is if the received network is part of the configured network. --- management/server/posture/network.go | 47 ++++- management/server/posture/network_test.go | 200 ++++++++++++++++++++++ shared/management/http/api/openapi.yml | 9 +- shared/management/http/api/types.gen.go | 6 +- 4 files changed, 247 insertions(+), 15 deletions(-) diff --git a/management/server/posture/network.go b/management/server/posture/network.go index f78744143..4b4b3ccaa 100644 --- a/management/server/posture/network.go +++ b/management/server/posture/network.go @@ -17,19 +17,48 @@ type PeerNetworkRangeCheck struct { var _ Check = (*PeerNetworkRangeCheck)(nil) +// prefixContains reports whether outer fully contains inner (equal counts as contained). +// Requires the same address family, that outer is no more specific than inner (its +// netmask is shorter or equal), and that inner's network address falls inside outer. +// This is stricter than netip.Prefix.Contains(Addr) — a peer's /24 NIC will not match a +// configured /32 rule, since the rule covers a single host but the NIC describes a whole +// subnet whose host bits are unknown. +func prefixContains(outer, inner netip.Prefix) bool { + outer = outer.Masked() + inner = inner.Masked() + return outer.Bits() <= inner.Bits() && + outer.Addr().BitLen() == inner.Addr().BitLen() && // same family + outer.Contains(inner.Addr()) +} + +// Check evaluates configured ranges against the peer's local network interface prefixes +// and its public connection IP (as a /32 or /128). A configured range matches when it +// fully contains one of those prefixes, so operators can target both private subnets +// and public CIDRs (e.g. 1.0.0.0/24, 2.2.2.2/32). Including the connection IP is what +// lets a public-range posture check work — peer.Meta.NetworkAddresses only carries +// local NIC addresses. func (p *PeerNetworkRangeCheck) Check(ctx context.Context, peer nbpeer.Peer) (bool, error) { - if len(peer.Meta.NetworkAddresses) == 0 { + peerPrefixes := make([]netip.Prefix, 0, len(peer.Meta.NetworkAddresses)+1) + for _, peerNetAddr := range peer.Meta.NetworkAddresses { + peerPrefixes = append(peerPrefixes, peerNetAddr.NetIP) + } + // Unmap collapses 4-in-6 forms (::ffff:a.b.c.d) so an IPv4 range matches. + if connIP := peer.Location.ConnectionIP; len(connIP) > 0 { + if addr, ok := netip.AddrFromSlice(connIP); ok { + addr = addr.Unmap() + peerPrefixes = append(peerPrefixes, netip.PrefixFrom(addr, addr.BitLen())) + } + } + + if len(peerPrefixes) == 0 { return false, fmt.Errorf("peer's does not contain peer network range addresses") } - maskedPrefixes := make([]netip.Prefix, 0, len(p.Ranges)) - for _, prefix := range p.Ranges { - maskedPrefixes = append(maskedPrefixes, prefix.Masked()) - } - - for _, peerNetAddr := range peer.Meta.NetworkAddresses { - peerMaskedPrefix := peerNetAddr.NetIP.Masked() - if slices.Contains(maskedPrefixes, peerMaskedPrefix) { + for _, peerPrefix := range peerPrefixes { + for _, rangePrefix := range p.Ranges { + if !prefixContains(rangePrefix, peerPrefix) { + continue + } switch p.Action { case CheckActionDeny: return false, nil diff --git a/management/server/posture/network_test.go b/management/server/posture/network_test.go index a841bbe08..4af394c62 100644 --- a/management/server/posture/network_test.go +++ b/management/server/posture/network_test.go @@ -2,6 +2,7 @@ package posture import ( "context" + "net" "net/netip" "testing" @@ -134,6 +135,205 @@ func TestPeerNetworkRangeCheck_Check(t *testing.T) { wantErr: true, isValid: false, }, + { + name: "Peer connection IP matches the denied /32", + check: PeerNetworkRangeCheck{ + Action: CheckActionDeny, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("109.41.115.194/32"), + }, + }, + peer: nbpeer.Peer{ + Meta: nbpeer.PeerSystemMeta{ + NetworkAddresses: []nbpeer.NetworkAddress{ + {NetIP: netip.MustParsePrefix("192.168.0.123/24")}, + }, + }, + Location: nbpeer.Location{ConnectionIP: net.ParseIP("109.41.115.194")}, + }, + wantErr: false, + isValid: false, + }, + { + name: "Peer connection IP does not match the denied /32", + check: PeerNetworkRangeCheck{ + Action: CheckActionDeny, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("109.41.115.194/32"), + }, + }, + peer: nbpeer.Peer{ + Meta: nbpeer.PeerSystemMeta{ + NetworkAddresses: []nbpeer.NetworkAddress{ + {NetIP: netip.MustParsePrefix("192.168.0.123/24")}, + }, + }, + Location: nbpeer.Location{ConnectionIP: net.ParseIP("8.8.8.8")}, + }, + wantErr: false, + isValid: true, + }, + { + name: "Peer connection IP matches the allowed /32 with no NetworkAddresses", + check: PeerNetworkRangeCheck{ + Action: CheckActionAllow, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("109.41.115.194/32"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("109.41.115.194")}, + }, + wantErr: false, + isValid: true, + }, + { + name: "IPv6 connection IP matches the denied /128", + check: PeerNetworkRangeCheck{ + Action: CheckActionDeny, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("2001:db8::1/128"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("2001:db8::1")}, + }, + wantErr: false, + isValid: false, + }, + { + name: "IPv6 connection IP does not match the denied /128", + check: PeerNetworkRangeCheck{ + Action: CheckActionDeny, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("2001:db8::1/128"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("2001:db8::2")}, + }, + wantErr: false, + isValid: true, + }, + { + name: "IPv4-mapped IPv6 connection IP matches IPv4 /32", + check: PeerNetworkRangeCheck{ + Action: CheckActionDeny, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("109.41.115.194/32"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("::ffff:109.41.115.194")}, + }, + wantErr: false, + isValid: false, + }, + { + name: "Connection IP falls inside an allowed /24 range", + check: PeerNetworkRangeCheck{ + Action: CheckActionAllow, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("1.0.0.0/24"), + netip.MustParsePrefix("2.2.2.2/32"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("1.0.0.55")}, + }, + wantErr: false, + isValid: true, + }, + { + name: "Connection IP falls inside an allowed /23 range", + check: PeerNetworkRangeCheck{ + Action: CheckActionAllow, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("3.0.0.0/23"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("3.0.1.200")}, + }, + wantErr: false, + isValid: true, + }, + { + name: "Connection IP outside the allowed /24 range", + check: PeerNetworkRangeCheck{ + Action: CheckActionAllow, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("1.0.0.0/24"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("1.0.1.5")}, + }, + wantErr: false, + isValid: false, + }, + { + name: "Connection IP inside a denied /24 range", + check: PeerNetworkRangeCheck{ + Action: CheckActionDeny, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("1.0.0.0/24"), + }, + }, + peer: nbpeer.Peer{ + Location: nbpeer.Location{ConnectionIP: net.ParseIP("1.0.0.7")}, + }, + wantErr: false, + isValid: false, + }, + { + name: "Local NIC /24 does not match a /32 rule even if host bit lines up", + check: PeerNetworkRangeCheck{ + Action: CheckActionAllow, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("192.168.0.5/32"), + }, + }, + peer: nbpeer.Peer{ + Meta: nbpeer.PeerSystemMeta{ + NetworkAddresses: []nbpeer.NetworkAddress{ + {NetIP: netip.MustParsePrefix("192.168.0.5/24")}, + }, + }, + }, + wantErr: false, + isValid: false, + }, + { + name: "Local NIC address inside an allowed /16 range", + check: PeerNetworkRangeCheck{ + Action: CheckActionAllow, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("192.168.0.0/16"), + }, + }, + peer: nbpeer.Peer{ + Meta: nbpeer.PeerSystemMeta{ + NetworkAddresses: []nbpeer.NetworkAddress{ + {NetIP: netip.MustParsePrefix("192.168.5.7/24")}, + }, + }, + }, + wantErr: false, + isValid: true, + }, + { + name: "Empty NetworkAddresses and empty ConnectionIP still errors", + check: PeerNetworkRangeCheck{ + Action: CheckActionDeny, + Ranges: []netip.Prefix{ + netip.MustParsePrefix("109.41.115.194/32"), + }, + }, + peer: nbpeer.Peer{}, + wantErr: true, + isValid: false, + }, } for _, tt := range tests { diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index c5fdbfbe0..327e20614 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -1687,15 +1687,18 @@ components: - locations - action PeerNetworkRangeCheck: - description: Posture check for allow or deny access based on peer local network addresses + description: | + Posture check for allow or deny access based on the peer's IP addresses. A range matches when it + contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, + so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. type: object properties: ranges: - description: List of peer network ranges in CIDR notation + description: List of network ranges in CIDR notation, matched against the peer's local interface IPs and its public connection IP type: array items: type: string - example: [ "192.168.1.0/24", "10.0.0.0/8", "2001:db8:1234:1a00::/56" ] + example: [ "192.168.1.0/24", "10.0.0.0/8", "1.0.0.0/24", "2.2.2.2/32", "2001:db8:1234:1a00::/56" ] action: description: Action to take upon policy match type: string diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 11cb8e46a..dc916f81a 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1626,7 +1626,7 @@ type Checks struct { // OsVersionCheck Posture check for the version of operating system OsVersionCheck *OSVersionCheck `json:"os_version_check,omitempty"` - // PeerNetworkRangeCheck Posture check for allow or deny access based on peer local network addresses + // PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. PeerNetworkRangeCheck *PeerNetworkRangeCheck `json:"peer_network_range_check,omitempty"` // ProcessCheck Posture Check for binaries exist and are running in the peer’s system @@ -3312,12 +3312,12 @@ type PeerMinimum struct { Name string `json:"name"` } -// PeerNetworkRangeCheck Posture check for allow or deny access based on peer local network addresses +// PeerNetworkRangeCheck Posture check for allow or deny access based on the peer's IP addresses. A range matches when it contains any of the peer's local network interface IPs or its public connection (NAT egress) IP, so ranges may target private subnets, public CIDRs, or single hosts via a /32 or /128. type PeerNetworkRangeCheck struct { // Action Action to take upon policy match Action PeerNetworkRangeCheckAction `json:"action"` - // Ranges List of peer network ranges in CIDR notation + // Ranges List of network ranges in CIDR notation, matched against the peer's local interface IPs and its public connection IP Ranges []string `json:"ranges"` } From 057d651d2e1f27c539a16c010c34e1ba88a117de Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 4 May 2026 18:28:56 +0900 Subject: [PATCH 367/374] [client, proxy] Add packet capture to debug bundle and CLI (#5891) --- client/Dockerfile | 1 + client/Dockerfile-rootless | 1 + client/cmd/capture.go | 196 ++++++ client/cmd/debug.go | 41 ++ client/cmd/root.go | 1 + client/cmd/service.go | 1 + client/cmd/service_controller.go | 2 +- client/cmd/service_installer.go | 4 + client/cmd/service_params.go | 6 + client/cmd/service_params_test.go | 1 + client/cmd/testutil_test.go | 2 +- client/embed/capture.go | 65 ++ client/embed/embed.go | 53 +- client/firewall/uspfilter/filter.go | 32 +- .../firewall/uspfilter/forwarder/endpoint.go | 17 +- .../firewall/uspfilter/forwarder/forwarder.go | 10 + client/firewall/uspfilter/forwarder/icmp.go | 4 + client/iface/device/device_filter.go | 54 +- client/iface/device/device_filter_test.go | 2 +- client/internal/debug/debug.go | 33 +- client/internal/engine.go | 65 ++ client/internal/lazyconn/manager/manager.go | 5 +- client/internal/netflow/store/memory.go | 4 +- .../internal/routeselector/routeselector.go | 13 +- client/proto/daemon.pb.go | 563 ++++++++++++---- client/proto/daemon.proto | 34 + client/proto/daemon_grpc.pb.go | 131 +++- client/server/capture.go | 365 ++++++++++ client/server/debug.go | 5 +- client/server/server.go | 10 +- client/server/server_test.go | 6 +- client/server/setconfig_test.go | 2 +- client/ui/debug.go | 187 ++--- client/wasm/cmd/main.go | 96 +++ client/wasm/internal/capture/capture.go | 176 +++++ management/server/account_test.go | 2 +- proxy/cmd/proxy/cmd/debug.go | 114 ++++ proxy/internal/debug/client.go | 70 ++ proxy/internal/debug/handler.go | 77 +++ util/capture/afpacket_linux.go | 199 ++++++ util/capture/afpacket_stub.go | 26 + util/capture/capture.go | 59 ++ util/capture/filter.go | 528 +++++++++++++++ util/capture/filter_test.go | 263 ++++++++ util/capture/pcap.go | 85 +++ util/capture/pcap_test.go | 68 ++ util/capture/session.go | 213 ++++++ util/capture/session_test.go | 144 ++++ util/capture/text.go | 638 ++++++++++++++++++ 49 files changed, 4421 insertions(+), 253 deletions(-) create mode 100644 client/cmd/capture.go create mode 100644 client/embed/capture.go create mode 100644 client/server/capture.go create mode 100644 client/wasm/internal/capture/capture.go create mode 100644 util/capture/afpacket_linux.go create mode 100644 util/capture/afpacket_stub.go create mode 100644 util/capture/capture.go create mode 100644 util/capture/filter.go create mode 100644 util/capture/filter_test.go create mode 100644 util/capture/pcap.go create mode 100644 util/capture/pcap_test.go create mode 100644 util/capture/session.go create mode 100644 util/capture/session_test.go create mode 100644 util/capture/text.go diff --git a/client/Dockerfile b/client/Dockerfile index 64d5ba04f..53e4555ef 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -17,6 +17,7 @@ ENV \ NETBIRD_BIN="/usr/local/bin/netbird" \ NB_LOG_FILE="console,/var/log/netbird/client.log" \ NB_DAEMON_ADDR="unix:///var/run/netbird.sock" \ + NB_ENABLE_CAPTURE="false" \ NB_ENTRYPOINT_SERVICE_TIMEOUT="30" ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ] diff --git a/client/Dockerfile-rootless b/client/Dockerfile-rootless index 69d00aaf2..706bf40de 100644 --- a/client/Dockerfile-rootless +++ b/client/Dockerfile-rootless @@ -23,6 +23,7 @@ ENV \ NB_DAEMON_ADDR="unix:///var/lib/netbird/netbird.sock" \ NB_LOG_FILE="console,/var/lib/netbird/client.log" \ NB_DISABLE_DNS="true" \ + NB_ENABLE_CAPTURE="false" \ NB_ENTRYPOINT_SERVICE_TIMEOUT="30" ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ] diff --git a/client/cmd/capture.go b/client/cmd/capture.go new file mode 100644 index 000000000..95caaa5cd --- /dev/null +++ b/client/cmd/capture.go @@ -0,0 +1,196 @@ +package cmd + +import ( + "context" + "fmt" + "io" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + + "github.com/hashicorp/go-multierror" + "github.com/spf13/cobra" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + + nberrors "github.com/netbirdio/netbird/client/errors" + "github.com/netbirdio/netbird/client/proto" + "github.com/netbirdio/netbird/util/capture" +) + +var captureCmd = &cobra.Command{ + Use: "capture", + Short: "Capture packets on the WireGuard interface", + Long: `Captures decrypted packets flowing through the WireGuard interface. + +Default output is human-readable text. Use --pcap or --output for pcap binary. +Requires --enable-capture to be set at service install or reconfigure time. + +Examples: + netbird debug capture + netbird debug capture host 100.64.0.1 and port 443 + netbird debug capture tcp + netbird debug capture icmp + netbird debug capture src host 10.0.0.1 and dst port 80 + netbird debug capture -o capture.pcap + netbird debug capture --pcap | tshark -r - + netbird debug capture --pcap | tcpdump -r - -n`, + Args: cobra.ArbitraryArgs, + RunE: runCapture, +} + +func init() { + debugCmd.AddCommand(captureCmd) + + captureCmd.Flags().Bool("pcap", false, "Force pcap binary output (default when --output is set)") + captureCmd.Flags().BoolP("verbose", "v", false, "Show seq/ack, TTL, window, total length") + captureCmd.Flags().Bool("ascii", false, "Print payload as ASCII after each packet (useful for HTTP)") + captureCmd.Flags().Uint32("snap-len", 0, "Max bytes per packet (0 = full)") + captureCmd.Flags().DurationP("duration", "d", 0, "Capture duration (0 = until interrupted)") + captureCmd.Flags().StringP("output", "o", "", "Write pcap to file instead of stdout") +} + +func runCapture(cmd *cobra.Command, args []string) error { + conn, err := getClient(cmd) + if err != nil { + return err + } + defer func() { + if err := conn.Close(); err != nil { + cmd.PrintErrf(errCloseConnection, err) + } + }() + + client := proto.NewDaemonServiceClient(conn) + + req, err := buildCaptureRequest(cmd, args) + if err != nil { + return err + } + + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + stream, err := client.StartCapture(ctx, req) + if err != nil { + return handleCaptureError(err) + } + + // First Recv is the empty acceptance message from the server. If the + // device is unavailable (kernel WG, not connected, capture disabled), + // the server returns an error instead. + if _, err := stream.Recv(); err != nil { + return handleCaptureError(err) + } + + out, cleanup, err := captureOutput(cmd) + if err != nil { + return err + } + + if req.TextOutput { + cmd.PrintErrf("Capturing packets... Press Ctrl+C to stop.\n") + } else { + cmd.PrintErrf("Capturing packets (pcap)... Press Ctrl+C to stop.\n") + } + + streamErr := streamCapture(ctx, cmd, stream, out) + cleanupErr := cleanup() + if streamErr != nil { + return streamErr + } + return cleanupErr +} + +func buildCaptureRequest(cmd *cobra.Command, args []string) (*proto.StartCaptureRequest, error) { + req := &proto.StartCaptureRequest{} + + if len(args) > 0 { + expr := strings.Join(args, " ") + if _, err := capture.ParseFilter(expr); err != nil { + return nil, fmt.Errorf("invalid filter: %w", err) + } + req.FilterExpr = expr + } + + if snap, _ := cmd.Flags().GetUint32("snap-len"); snap > 0 { + req.SnapLen = snap + } + if d, _ := cmd.Flags().GetDuration("duration"); d != 0 { + if d < 0 { + return nil, fmt.Errorf("duration must not be negative") + } + req.Duration = durationpb.New(d) + } + req.Verbose, _ = cmd.Flags().GetBool("verbose") + req.Ascii, _ = cmd.Flags().GetBool("ascii") + + outPath, _ := cmd.Flags().GetString("output") + forcePcap, _ := cmd.Flags().GetBool("pcap") + req.TextOutput = !forcePcap && outPath == "" + + return req, nil +} + +func streamCapture(ctx context.Context, cmd *cobra.Command, stream proto.DaemonService_StartCaptureClient, out io.Writer) error { + for { + pkt, err := stream.Recv() + if err != nil { + if ctx.Err() != nil { + cmd.PrintErrf("\nCapture stopped.\n") + return nil //nolint:nilerr // user interrupted + } + if err == io.EOF { + cmd.PrintErrf("\nCapture finished.\n") + return nil + } + return handleCaptureError(err) + } + if _, err := out.Write(pkt.GetData()); err != nil { + return fmt.Errorf("write output: %w", err) + } + } +} + +// captureOutput returns the writer for capture data and a cleanup function +// that finalizes the file. Errors from the cleanup must be propagated. +func captureOutput(cmd *cobra.Command) (io.Writer, func() error, error) { + outPath, _ := cmd.Flags().GetString("output") + if outPath == "" { + return os.Stdout, func() error { return nil }, nil + } + + f, err := os.CreateTemp(filepath.Dir(outPath), filepath.Base(outPath)+".*.tmp") + if err != nil { + return nil, nil, fmt.Errorf("create output file: %w", err) + } + tmpPath := f.Name() + return f, func() error { + var merr *multierror.Error + if err := f.Close(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("close output file: %w", err)) + } + fi, statErr := os.Stat(tmpPath) + if statErr != nil || fi.Size() == 0 { + if rmErr := os.Remove(tmpPath); rmErr != nil && !os.IsNotExist(rmErr) { + merr = multierror.Append(merr, fmt.Errorf("remove empty output file: %w", rmErr)) + } + return nberrors.FormatErrorOrNil(merr) + } + if err := os.Rename(tmpPath, outPath); err != nil { + merr = multierror.Append(merr, fmt.Errorf("rename output file: %w", err)) + return nberrors.FormatErrorOrNil(merr) + } + cmd.PrintErrf("Wrote %s\n", outPath) + return nberrors.FormatErrorOrNil(merr) + }, nil +} + +func handleCaptureError(err error) error { + if s, ok := status.FromError(err); ok { + return fmt.Errorf("%s", s.Message()) + } + return err +} diff --git a/client/cmd/debug.go b/client/cmd/debug.go index e3d3afe5f..2a8cdc887 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -9,6 +9,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/debug" @@ -239,11 +240,50 @@ func runForDuration(cmd *cobra.Command, args []string) error { }() } + captureStarted := false + if wantCapture, _ := cmd.Flags().GetBool("capture"); wantCapture { + captureTimeout := duration + 30*time.Second + const maxBundleCapture = 10 * time.Minute + if captureTimeout > maxBundleCapture { + captureTimeout = maxBundleCapture + } + _, err := client.StartBundleCapture(cmd.Context(), &proto.StartBundleCaptureRequest{ + Timeout: durationpb.New(captureTimeout), + }) + if err != nil { + cmd.PrintErrf("Failed to start packet capture: %v\n", status.Convert(err).Message()) + } else { + captureStarted = true + cmd.Println("Packet capture started.") + // Safety: always stop on exit, even if the normal stop below runs too. + defer func() { + if captureStarted { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := client.StopBundleCapture(stopCtx, &proto.StopBundleCaptureRequest{}); err != nil { + cmd.PrintErrf("Failed to stop packet capture: %v\n", err) + } + } + }() + } + } + if waitErr := waitForDurationOrCancel(cmd.Context(), duration, cmd); waitErr != nil { return waitErr } cmd.Println("\nDuration completed") + if captureStarted { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := client.StopBundleCapture(stopCtx, &proto.StopBundleCaptureRequest{}); err != nil { + cmd.PrintErrf("Failed to stop packet capture: %v\n", err) + } else { + captureStarted = false + cmd.Println("Packet capture stopped.") + } + } + if cpuProfilingStarted { if _, err := client.StopCPUProfile(cmd.Context(), &proto.StopCPUProfileRequest{}); err != nil { cmd.PrintErrf("Failed to stop CPU profiling: %v\n", err) @@ -416,4 +456,5 @@ func init() { forCmd.Flags().BoolVarP(&systemInfoFlag, "system-info", "S", true, "Adds system information to the debug bundle") forCmd.Flags().BoolVarP(&uploadBundleFlag, "upload-bundle", "U", false, "Uploads the debug bundle to a server") forCmd.Flags().StringVar(&uploadBundleURLFlag, "upload-bundle-url", types.DefaultBundleURL, "Service URL to get an URL to upload the debug bundle") + forCmd.Flags().Bool("capture", false, "Capture packets during the debug duration and include in bundle") } diff --git a/client/cmd/root.go b/client/cmd/root.go index c872fe9f6..29d4328a1 100644 --- a/client/cmd/root.go +++ b/client/cmd/root.go @@ -75,6 +75,7 @@ var ( mtu uint16 profilesDisabled bool updateSettingsDisabled bool + captureEnabled bool networksDisabled bool rootCmd = &cobra.Command{ diff --git a/client/cmd/service.go b/client/cmd/service.go index f1123ce8c..56d8a8726 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -44,6 +44,7 @@ func init() { serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd, svcStatusCmd, installCmd, uninstallCmd, reconfigureCmd, resetParamsCmd) serviceCmd.PersistentFlags().BoolVar(&profilesDisabled, "disable-profiles", false, "Disables profiles feature. If enabled, the client will not be able to change or edit any profile. To persist this setting, use: netbird service install --disable-profiles") serviceCmd.PersistentFlags().BoolVar(&updateSettingsDisabled, "disable-update-settings", false, "Disables update settings feature. If enabled, the client will not be able to change or edit any settings. To persist this setting, use: netbird service install --disable-update-settings") + serviceCmd.PersistentFlags().BoolVar(&captureEnabled, "enable-capture", false, "Enables packet capture via 'netbird debug capture'. To persist, use: netbird service install --enable-capture") serviceCmd.PersistentFlags().BoolVar(&networksDisabled, "disable-networks", false, "Disables network selection. If enabled, the client will not allow listing, selecting, or deselecting networks. To persist, use: netbird service install --disable-networks") rootCmd.PersistentFlags().StringVarP(&serviceName, "service", "s", defaultServiceName, "Netbird system service name") diff --git a/client/cmd/service_controller.go b/client/cmd/service_controller.go index 0943b6184..88121c067 100644 --- a/client/cmd/service_controller.go +++ b/client/cmd/service_controller.go @@ -61,7 +61,7 @@ func (p *program) Start(svc service.Service) error { } } - serverInstance := server.New(p.ctx, util.FindFirstLogPath(logFiles), configPath, profilesDisabled, updateSettingsDisabled, networksDisabled) + serverInstance := server.New(p.ctx, util.FindFirstLogPath(logFiles), configPath, profilesDisabled, updateSettingsDisabled, captureEnabled, networksDisabled) if err := serverInstance.Start(); err != nil { log.Fatalf("failed to start daemon: %v", err) } diff --git a/client/cmd/service_installer.go b/client/cmd/service_installer.go index 5ada6f633..2d45fa063 100644 --- a/client/cmd/service_installer.go +++ b/client/cmd/service_installer.go @@ -59,6 +59,10 @@ func buildServiceArguments() []string { args = append(args, "--disable-update-settings") } + if captureEnabled { + args = append(args, "--enable-capture") + } + if networksDisabled { args = append(args, "--disable-networks") } diff --git a/client/cmd/service_params.go b/client/cmd/service_params.go index 5a86aebc6..192e0ac60 100644 --- a/client/cmd/service_params.go +++ b/client/cmd/service_params.go @@ -28,6 +28,7 @@ type serviceParams struct { LogFiles []string `json:"log_files,omitempty"` DisableProfiles bool `json:"disable_profiles,omitempty"` DisableUpdateSettings bool `json:"disable_update_settings,omitempty"` + EnableCapture bool `json:"enable_capture,omitempty"` DisableNetworks bool `json:"disable_networks,omitempty"` ServiceEnvVars map[string]string `json:"service_env_vars,omitempty"` } @@ -79,6 +80,7 @@ func currentServiceParams() *serviceParams { LogFiles: logFiles, DisableProfiles: profilesDisabled, DisableUpdateSettings: updateSettingsDisabled, + EnableCapture: captureEnabled, DisableNetworks: networksDisabled, } @@ -144,6 +146,10 @@ func applyServiceParams(cmd *cobra.Command, params *serviceParams) { updateSettingsDisabled = params.DisableUpdateSettings } + if !serviceCmd.PersistentFlags().Changed("enable-capture") { + captureEnabled = params.EnableCapture + } + if !serviceCmd.PersistentFlags().Changed("disable-networks") { networksDisabled = params.DisableNetworks } diff --git a/client/cmd/service_params_test.go b/client/cmd/service_params_test.go index 7e04e5abe..f338c12f4 100644 --- a/client/cmd/service_params_test.go +++ b/client/cmd/service_params_test.go @@ -535,6 +535,7 @@ func fieldToGlobalVar(field string) string { "LogFiles": "logFiles", "DisableProfiles": "profilesDisabled", "DisableUpdateSettings": "updateSettingsDisabled", + "EnableCapture": "captureEnabled", "DisableNetworks": "networksDisabled", "ServiceEnvVars": "serviceEnvVars", } diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index fd1007bb4..c24965e8d 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -160,7 +160,7 @@ func startClientDaemon( s := grpc.NewServer() server := client.New(ctx, - "", "", false, false, false) + "", "", false, false, false, false) if err := server.Start(); err != nil { t.Fatal(err) } diff --git a/client/embed/capture.go b/client/embed/capture.go new file mode 100644 index 000000000..30f9b496f --- /dev/null +++ b/client/embed/capture.go @@ -0,0 +1,65 @@ +package embed + +import ( + "io" + + "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/util/capture" +) + +// CaptureOptions configures a packet capture session. +type CaptureOptions struct { + // Output receives pcap-formatted data. Nil disables pcap output. + Output io.Writer + // TextOutput receives human-readable packet summaries. Nil disables text output. + TextOutput io.Writer + // Filter is a BPF-like filter expression (e.g. "host 10.0.0.1 and tcp port 443"). + // Empty captures all packets. + Filter string + // Verbose adds seq/ack, TTL, window, and total length to text output. + Verbose bool + // ASCII dumps transport payload as printable ASCII after each packet line. + ASCII bool +} + +// CaptureStats reports capture session counters. +type CaptureStats struct { + Packets int64 + Bytes int64 + Dropped int64 +} + +// CaptureSession represents an active packet capture. Call Stop to end the +// capture and flush buffered packets. +type CaptureSession struct { + sess *capture.Session + engine *internal.Engine +} + +// Stop ends the capture, flushes remaining packets, and detaches from the device. +// Safe to call multiple times. +func (cs *CaptureSession) Stop() { + if cs.engine != nil { + _ = cs.engine.SetCapture(nil) + cs.engine = nil + } + if cs.sess != nil { + cs.sess.Stop() + } +} + +// Stats returns current capture counters. +func (cs *CaptureSession) Stats() CaptureStats { + s := cs.sess.Stats() + return CaptureStats{ + Packets: s.Packets, + Bytes: s.Bytes, + Dropped: s.Dropped, + } +} + +// Done returns a channel that is closed when the capture's writer goroutine +// has fully exited and all buffered packets have been flushed. +func (cs *CaptureSession) Done() <-chan struct{} { + return cs.sess.Done() +} diff --git a/client/embed/embed.go b/client/embed/embed.go index 88f7e541c..baa1d94d6 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -24,6 +24,7 @@ import ( "github.com/netbirdio/netbird/client/system" "github.com/netbirdio/netbird/shared/management/domain" mgmProto "github.com/netbirdio/netbird/shared/management/proto" + "github.com/netbirdio/netbird/util/capture" ) var ( @@ -65,7 +66,7 @@ type Options struct { PrivateKey string // ManagementURL overrides the default management server URL ManagementURL string - // PreSharedKey is the pre-shared key for the WireGuard interface + // PreSharedKey is the pre-shared key for the tunnel interface PreSharedKey string // LogOutput is the output destination for logs (defaults to os.Stderr if nil) LogOutput io.Writer @@ -81,9 +82,9 @@ type Options struct { DisableClientRoutes bool // BlockInbound blocks all inbound connections from peers BlockInbound bool - // WireguardPort is the port for the WireGuard interface. Use 0 for a random port. + // WireguardPort is the port for the tunnel interface. Use 0 for a random port. WireguardPort *int - // MTU is the MTU for the WireGuard interface. + // MTU is the MTU for the tunnel interface. // Valid values are in the range 576..8192 bytes. // If non-nil, this value overrides any value stored in the config file. // If nil, the existing config MTU (if non-zero) is preserved; otherwise it defaults to 1280. @@ -469,6 +470,52 @@ func (c *Client) VerifySSHHostKey(peerAddress string, key []byte) error { return sshcommon.VerifyHostKey(storedKey, key, peerAddress) } +// StartCapture begins capturing packets on this client's tunnel device. +// Only one capture can be active at a time; starting a new one stops the previous. +// Call StopCapture (or CaptureSession.Stop) to end it. +func (c *Client) StartCapture(opts CaptureOptions) (*CaptureSession, error) { + engine, err := c.getEngine() + if err != nil { + return nil, err + } + + var matcher capture.Matcher + if opts.Filter != "" { + m, err := capture.ParseFilter(opts.Filter) + if err != nil { + return nil, fmt.Errorf("parse filter: %w", err) + } + matcher = m + } + + sess, err := capture.NewSession(capture.Options{ + Output: opts.Output, + TextOutput: opts.TextOutput, + Matcher: matcher, + Verbose: opts.Verbose, + ASCII: opts.ASCII, + }) + if err != nil { + return nil, fmt.Errorf("create capture session: %w", err) + } + + if err := engine.SetCapture(sess); err != nil { + sess.Stop() + return nil, fmt.Errorf("set capture: %w", err) + } + + return &CaptureSession{sess: sess, engine: engine}, nil +} + +// StopCapture stops the active capture session if one is running. +func (c *Client) StopCapture() error { + engine, err := c.getEngine() + if err != nil { + return err + } + return engine.SetCapture(nil) +} + // getEngine safely retrieves the engine from the client with proper locking. // Returns ErrClientNotStarted if the client is not started. // Returns ErrEngineNotStarted if the engine is not available. diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index 24b3d0167..3787e63a8 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -115,12 +115,13 @@ type Manager struct { localipmanager *localIPManager - udpTracker *conntrack.UDPTracker - icmpTracker *conntrack.ICMPTracker - tcpTracker *conntrack.TCPTracker - forwarder atomic.Pointer[forwarder.Forwarder] - logger *nblog.Logger - flowLogger nftypes.FlowLogger + udpTracker *conntrack.UDPTracker + icmpTracker *conntrack.ICMPTracker + tcpTracker *conntrack.TCPTracker + forwarder atomic.Pointer[forwarder.Forwarder] + pendingCapture atomic.Pointer[forwarder.PacketCapture] + logger *nblog.Logger + flowLogger nftypes.FlowLogger blockRule firewall.Rule @@ -351,6 +352,19 @@ func (m *Manager) determineRouting() error { return nil } +// SetPacketCapture sets or clears packet capture on the forwarder endpoint. +// This captures outbound response packets that bypass the FilteredDevice in netstack mode. +func (m *Manager) SetPacketCapture(pc forwarder.PacketCapture) { + if pc == nil { + m.pendingCapture.Store(nil) + } else { + m.pendingCapture.Store(&pc) + } + if fwder := m.forwarder.Load(); fwder != nil { + fwder.SetCapture(pc) + } +} + // initForwarder initializes the forwarder, it disables routing on errors func (m *Manager) initForwarder() error { if m.forwarder.Load() != nil { @@ -372,6 +386,11 @@ func (m *Manager) initForwarder() error { m.forwarder.Store(forwarder) + // Re-load after store: a concurrent SetPacketCapture may have seen forwarder as nil and only updated pendingCapture. + if pc := m.pendingCapture.Load(); pc != nil { + forwarder.SetCapture(*pc) + } + log.Debug("forwarder initialized") return nil @@ -614,6 +633,7 @@ func (m *Manager) resetState() { } if fwder := m.forwarder.Load(); fwder != nil { + fwder.SetCapture(nil) fwder.Stop() } diff --git a/client/firewall/uspfilter/forwarder/endpoint.go b/client/firewall/uspfilter/forwarder/endpoint.go index 692a24140..96ab89af8 100644 --- a/client/firewall/uspfilter/forwarder/endpoint.go +++ b/client/firewall/uspfilter/forwarder/endpoint.go @@ -12,12 +12,19 @@ import ( nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" ) +// PacketCapture captures raw packets for debugging. Implementations must be +// safe for concurrent use and must not block. +type PacketCapture interface { + Offer(data []byte, outbound bool) +} + // endpoint implements stack.LinkEndpoint and handles integration with the wireguard device type endpoint struct { logger *nblog.Logger dispatcher stack.NetworkDispatcher device *wgdevice.Device mtu atomic.Uint32 + capture atomic.Pointer[PacketCapture] } func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) { @@ -54,13 +61,17 @@ func (e *endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) continue } - // Send the packet through WireGuard + pktBytes := data.AsSlice() + address := netHeader.DestinationAddress() - err := e.device.CreateOutboundPacket(data.AsSlice(), address.AsSlice()) - if err != nil { + if err := e.device.CreateOutboundPacket(pktBytes, address.AsSlice()); err != nil { e.logger.Error1("CreateOutboundPacket: %v", err) continue } + + if pc := e.capture.Load(); pc != nil { + (*pc).Offer(pktBytes, true) + } written++ } diff --git a/client/firewall/uspfilter/forwarder/forwarder.go b/client/firewall/uspfilter/forwarder/forwarder.go index d17c3cd5c..925273f24 100644 --- a/client/firewall/uspfilter/forwarder/forwarder.go +++ b/client/firewall/uspfilter/forwarder/forwarder.go @@ -139,6 +139,16 @@ func New(iface common.IFaceMapper, logger *nblog.Logger, flowLogger nftypes.Flow return f, nil } +// SetCapture sets or clears the packet capture on the forwarder endpoint. +// This captures outbound packets that bypass the FilteredDevice (netstack forwarding). +func (f *Forwarder) SetCapture(pc PacketCapture) { + if pc == nil { + f.endpoint.capture.Store(nil) + return + } + f.endpoint.capture.Store(&pc) +} + func (f *Forwarder) InjectIncomingPacket(payload []byte) error { if len(payload) < header.IPv4MinimumSize { return fmt.Errorf("packet too small: %d bytes", len(payload)) diff --git a/client/firewall/uspfilter/forwarder/icmp.go b/client/firewall/uspfilter/forwarder/icmp.go index cb3db325d..217423901 100644 --- a/client/firewall/uspfilter/forwarder/icmp.go +++ b/client/firewall/uspfilter/forwarder/icmp.go @@ -270,5 +270,9 @@ func (f *Forwarder) injectICMPReply(id stack.TransportEndpointID, icmpPayload [] return 0 } + if pc := f.endpoint.capture.Load(); pc != nil { + (*pc).Offer(fullPacket, true) + } + return len(fullPacket) } diff --git a/client/iface/device/device_filter.go b/client/iface/device/device_filter.go index 4357d1916..fc1c65efa 100644 --- a/client/iface/device/device_filter.go +++ b/client/iface/device/device_filter.go @@ -3,6 +3,7 @@ package device import ( "net/netip" "sync" + "sync/atomic" "golang.zx2c4.com/wireguard/tun" ) @@ -28,11 +29,20 @@ type PacketFilter interface { SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) } +// PacketCapture captures raw packets for debugging. Implementations must be +// safe for concurrent use and must not block. +type PacketCapture interface { + // Offer submits a packet for capture. outbound is true for packets + // leaving the host (Read path), false for packets arriving (Write path). + Offer(data []byte, outbound bool) +} + // FilteredDevice to override Read or Write of packets type FilteredDevice struct { tun.Device filter PacketFilter + capture atomic.Pointer[PacketCapture] mutex sync.RWMutex closeOnce sync.Once } @@ -63,20 +73,25 @@ func (d *FilteredDevice) Read(bufs [][]byte, sizes []int, offset int) (n int, er if n, err = d.Device.Read(bufs, sizes, offset); err != nil { return 0, err } + d.mutex.RLock() filter := d.filter d.mutex.RUnlock() - if filter == nil { - return + if filter != nil { + for i := 0; i < n; i++ { + if filter.FilterOutbound(bufs[i][offset:offset+sizes[i]], sizes[i]) { + bufs = append(bufs[:i], bufs[i+1:]...) + sizes = append(sizes[:i], sizes[i+1:]...) + n-- + i-- + } + } } - for i := 0; i < n; i++ { - if filter.FilterOutbound(bufs[i][offset:offset+sizes[i]], sizes[i]) { - bufs = append(bufs[:i], bufs[i+1:]...) - sizes = append(sizes[:i], sizes[i+1:]...) - n-- - i-- + if pc := d.capture.Load(); pc != nil { + for i := 0; i < n; i++ { + (*pc).Offer(bufs[i][offset:offset+sizes[i]], true) } } @@ -85,6 +100,13 @@ func (d *FilteredDevice) Read(bufs [][]byte, sizes []int, offset int) (n int, er // Write wraps write method with filtering feature func (d *FilteredDevice) Write(bufs [][]byte, offset int) (int, error) { + // Capture before filtering so dropped packets are still visible in captures. + if pc := d.capture.Load(); pc != nil { + for _, buf := range bufs { + (*pc).Offer(buf[offset:], false) + } + } + d.mutex.RLock() filter := d.filter d.mutex.RUnlock() @@ -96,9 +118,10 @@ func (d *FilteredDevice) Write(bufs [][]byte, offset int) (int, error) { filteredBufs := make([][]byte, 0, len(bufs)) dropped := 0 for _, buf := range bufs { - if !filter.FilterInbound(buf[offset:], len(buf)) { - filteredBufs = append(filteredBufs, buf) + if filter.FilterInbound(buf[offset:], len(buf)) { dropped++ + } else { + filteredBufs = append(filteredBufs, buf) } } @@ -113,3 +136,14 @@ func (d *FilteredDevice) SetFilter(filter PacketFilter) { d.filter = filter d.mutex.Unlock() } + +// SetCapture sets or clears the packet capture sink. Pass nil to disable. +// Uses atomic store so the hot path (Read/Write) is a single pointer load +// with no locking overhead when capture is off. +func (d *FilteredDevice) SetCapture(pc PacketCapture) { + if pc == nil { + d.capture.Store(nil) + return + } + d.capture.Store(&pc) +} diff --git a/client/iface/device/device_filter_test.go b/client/iface/device/device_filter_test.go index eef783542..8fb16ca8d 100644 --- a/client/iface/device/device_filter_test.go +++ b/client/iface/device/device_filter_test.go @@ -158,7 +158,7 @@ func TestDeviceWrapperRead(t *testing.T) { t.Errorf("unexpected error: %v", err) return } - if n != 0 { + if n != 1 { t.Errorf("expected n=1, got %d", n) return } diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index bddb9a69e..90560d028 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -61,6 +61,7 @@ allocs.prof: Allocations profiling information. threadcreate.prof: Thread creation profiling information. cpu.prof: CPU profiling information. stack_trace.txt: Complete stack traces of all goroutines at the time of bundle creation. +capture.pcap: Packet capture in pcap format. Only present when capture was running during bundle collection. Omitted from anonymized bundles because it contains raw decrypted packet data. Anonymization Process @@ -234,6 +235,7 @@ type BundleGenerator struct { logPath string tempDir string cpuProfile []byte + capturePath string refreshStatus func() // Optional callback to refresh status before bundle generation clientMetrics MetricsExporter @@ -257,7 +259,8 @@ type GeneratorDependencies struct { LogPath string TempDir string // Directory for temporary bundle zip files. If empty, os.TempDir() is used. CPUProfile []byte - RefreshStatus func() // Optional callback to refresh status before bundle generation + CapturePath string + RefreshStatus func() ClientMetrics MetricsExporter } @@ -277,6 +280,7 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen logPath: deps.LogPath, tempDir: deps.TempDir, cpuProfile: deps.CPUProfile, + capturePath: deps.CapturePath, refreshStatus: deps.RefreshStatus, clientMetrics: deps.ClientMetrics, @@ -346,6 +350,10 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add CPU profile to debug bundle: %v", err) } + if err := g.addCaptureFile(); err != nil { + log.Errorf("failed to add capture file to debug bundle: %v", err) + } + if err := g.addStackTrace(); err != nil { log.Errorf("failed to add stack trace to debug bundle: %v", err) } @@ -669,6 +677,29 @@ func (g *BundleGenerator) addCPUProfile() error { return nil } +func (g *BundleGenerator) addCaptureFile() error { + if g.capturePath == "" { + return nil + } + + if g.anonymize { + log.Info("skipping capture file in anonymized bundle (contains raw packet data)") + return nil + } + + f, err := os.Open(g.capturePath) + if err != nil { + return fmt.Errorf("open capture file: %w", err) + } + defer f.Close() + + if err := g.addFileToZip(f, "capture.pcap"); err != nil { + return fmt.Errorf("add capture file to zip: %w", err) + } + + return nil +} + func (g *BundleGenerator) addStackTrace() error { buf := make([]byte, 5242880) // 5 MB buffer n := runtime.Stack(buf, true) diff --git a/client/internal/engine.go b/client/internal/engine.go index 351e4bfe9..8c9553e52 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -28,6 +28,7 @@ import ( "github.com/netbirdio/netbird/client/firewall" "github.com/netbirdio/netbird/client/firewall/firewalld" firewallManager "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/firewall/uspfilter/forwarder" "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/iface/device" nbnetstack "github.com/netbirdio/netbird/client/iface/netstack" @@ -68,6 +69,7 @@ import ( signal "github.com/netbirdio/netbird/shared/signal/client" sProto "github.com/netbirdio/netbird/shared/signal/proto" "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/util/capture" ) // PeerConnectionTimeoutMax is a timeout of an initial connection attempt to a remote peer. @@ -218,6 +220,8 @@ type Engine struct { portForwardManager *portforward.Manager srWatcher *guard.SRWatcher + afpacketCapture *capture.AFPacketCapture + // Sync response persistence (protected by syncRespMux) syncRespMux sync.RWMutex persistSyncResponse bool @@ -1703,6 +1707,11 @@ func (e *Engine) parseNATExternalIPMappings() []string { } func (e *Engine) close() { + if e.afpacketCapture != nil { + e.afpacketCapture.Stop() + e.afpacketCapture = nil + } + log.Debugf("removing Netbird interface %s", e.config.WgIfaceName) if e.wgInterface != nil { @@ -2168,6 +2177,62 @@ func (e *Engine) Address() (netip.Addr, error) { return e.wgInterface.Address().IP, nil } +// SetCapture sets or clears packet capture on the WireGuard device. +// On userspace WireGuard, it taps the FilteredDevice directly. +// On kernel WireGuard (Linux), it falls back to AF_PACKET raw socket capture. +// Pass nil to disable capture. +func (e *Engine) SetCapture(pc device.PacketCapture) error { + e.syncMsgMux.Lock() + defer e.syncMsgMux.Unlock() + + intf := e.wgInterface + if intf == nil { + return errors.New("wireguard interface not initialized") + } + + if e.afpacketCapture != nil { + e.afpacketCapture.Stop() + e.afpacketCapture = nil + } + + dev := intf.GetDevice() + if dev != nil { + dev.SetCapture(pc) + e.setForwarderCapture(pc) + return nil + } + + // Kernel mode: no FilteredDevice. Use AF_PACKET on Linux. + if pc == nil { + return nil + } + sess, ok := pc.(*capture.Session) + if !ok { + return errors.New("filtered device not available and AF_PACKET requires *capture.Session") + } + + afc := capture.NewAFPacketCapture(intf.Name(), sess) + if err := afc.Start(); err != nil { + return fmt.Errorf("start AF_PACKET capture on %s: %w", intf.Name(), err) + } + e.afpacketCapture = afc + return nil +} + +// setForwarderCapture propagates capture to the USP filter's forwarder endpoint. +// This captures outbound response packets that bypass the FilteredDevice in netstack mode. +func (e *Engine) setForwarderCapture(pc device.PacketCapture) { + if e.firewall == nil { + return + } + type forwarderCapturer interface { + SetPacketCapture(pc forwarder.PacketCapture) + } + if fc, ok := e.firewall.(forwarderCapturer); ok { + fc.SetPacketCapture(pc) + } +} + func (e *Engine) updateForwardRules(rules []*mgmProto.ForwardingRule) ([]firewallManager.ForwardRule, error) { if e.firewall == nil { log.Warn("firewall is disabled, not updating forwarding rules") diff --git a/client/internal/lazyconn/manager/manager.go b/client/internal/lazyconn/manager/manager.go index b6b3c6091..fc47bda39 100644 --- a/client/internal/lazyconn/manager/manager.go +++ b/client/internal/lazyconn/manager/manager.go @@ -6,7 +6,6 @@ import ( "time" log "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" "github.com/netbirdio/netbird/client/internal/lazyconn" "github.com/netbirdio/netbird/client/internal/lazyconn/activity" @@ -91,8 +90,8 @@ func (m *Manager) UpdateRouteHAMap(haMap route.HAMap) { m.routesMu.Lock() defer m.routesMu.Unlock() - maps.Clear(m.peerToHAGroups) - maps.Clear(m.haGroupToPeers) + clear(m.peerToHAGroups) + clear(m.haGroupToPeers) for haUniqueID, routes := range haMap { var peers []string diff --git a/client/internal/netflow/store/memory.go b/client/internal/netflow/store/memory.go index b695a0a12..a44505e96 100644 --- a/client/internal/netflow/store/memory.go +++ b/client/internal/netflow/store/memory.go @@ -3,8 +3,6 @@ package store import ( "sync" - "golang.org/x/exp/maps" - "github.com/google/uuid" "github.com/netbirdio/netbird/client/internal/netflow/types" @@ -30,7 +28,7 @@ func (m *Memory) StoreEvent(event *types.Event) { func (m *Memory) Close() { m.mux.Lock() defer m.mux.Unlock() - maps.Clear(m.events) + clear(m.events) } func (m *Memory) GetEvents() []*types.Event { diff --git a/client/internal/routeselector/routeselector.go b/client/internal/routeselector/routeselector.go index 61c8bbc79..30afc013b 100644 --- a/client/internal/routeselector/routeselector.go +++ b/client/internal/routeselector/routeselector.go @@ -7,7 +7,6 @@ import ( "sync" "github.com/hashicorp/go-multierror" - "golang.org/x/exp/maps" "github.com/netbirdio/netbird/client/errors" "github.com/netbirdio/netbird/route" @@ -44,8 +43,8 @@ func (rs *RouteSelector) SelectRoutes(routes []route.NetID, appendRoute bool, al if rs.selectedRoutes == nil { rs.selectedRoutes = map[route.NetID]struct{}{} } - maps.Clear(rs.deselectedRoutes) - maps.Clear(rs.selectedRoutes) + clear(rs.deselectedRoutes) + clear(rs.selectedRoutes) for _, r := range allRoutes { rs.deselectedRoutes[r] = struct{}{} } @@ -78,8 +77,8 @@ func (rs *RouteSelector) SelectAllRoutes() { if rs.selectedRoutes == nil { rs.selectedRoutes = map[route.NetID]struct{}{} } - maps.Clear(rs.deselectedRoutes) - maps.Clear(rs.selectedRoutes) + clear(rs.deselectedRoutes) + clear(rs.selectedRoutes) } // DeselectRoutes removes specific routes from the selection. @@ -116,8 +115,8 @@ func (rs *RouteSelector) DeselectAllRoutes() { if rs.selectedRoutes == nil { rs.selectedRoutes = map[route.NetID]struct{}{} } - maps.Clear(rs.deselectedRoutes) - maps.Clear(rs.selectedRoutes) + clear(rs.deselectedRoutes) + clear(rs.selectedRoutes) } // IsSelected checks if a specific route is selected. diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 31658d5a1..11e7877f2 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -5847,6 +5847,288 @@ func (x *ExposeServiceReady) GetPortAutoAssigned() bool { return false } +type StartCaptureRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TextOutput bool `protobuf:"varint,1,opt,name=text_output,json=textOutput,proto3" json:"text_output,omitempty"` + SnapLen uint32 `protobuf:"varint,2,opt,name=snap_len,json=snapLen,proto3" json:"snap_len,omitempty"` + Duration *durationpb.Duration `protobuf:"bytes,3,opt,name=duration,proto3" json:"duration,omitempty"` + FilterExpr string `protobuf:"bytes,4,opt,name=filter_expr,json=filterExpr,proto3" json:"filter_expr,omitempty"` + Verbose bool `protobuf:"varint,5,opt,name=verbose,proto3" json:"verbose,omitempty"` + Ascii bool `protobuf:"varint,6,opt,name=ascii,proto3" json:"ascii,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartCaptureRequest) Reset() { + *x = StartCaptureRequest{} + mi := &file_daemon_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartCaptureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartCaptureRequest) ProtoMessage() {} + +func (x *StartCaptureRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[88] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartCaptureRequest.ProtoReflect.Descriptor instead. +func (*StartCaptureRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{88} +} + +func (x *StartCaptureRequest) GetTextOutput() bool { + if x != nil { + return x.TextOutput + } + return false +} + +func (x *StartCaptureRequest) GetSnapLen() uint32 { + if x != nil { + return x.SnapLen + } + return 0 +} + +func (x *StartCaptureRequest) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *StartCaptureRequest) GetFilterExpr() string { + if x != nil { + return x.FilterExpr + } + return "" +} + +func (x *StartCaptureRequest) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +func (x *StartCaptureRequest) GetAscii() bool { + if x != nil { + return x.Ascii + } + return false +} + +type CapturePacket struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CapturePacket) Reset() { + *x = CapturePacket{} + mi := &file_daemon_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CapturePacket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CapturePacket) ProtoMessage() {} + +func (x *CapturePacket) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[89] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CapturePacket.ProtoReflect.Descriptor instead. +func (*CapturePacket) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{89} +} + +func (x *CapturePacket) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type StartBundleCaptureRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // timeout auto-stops the capture after this duration. + // Clamped to a server-side maximum (10 minutes). Zero or unset defaults to the maximum. + Timeout *durationpb.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartBundleCaptureRequest) Reset() { + *x = StartBundleCaptureRequest{} + mi := &file_daemon_proto_msgTypes[90] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartBundleCaptureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartBundleCaptureRequest) ProtoMessage() {} + +func (x *StartBundleCaptureRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[90] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartBundleCaptureRequest.ProtoReflect.Descriptor instead. +func (*StartBundleCaptureRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{90} +} + +func (x *StartBundleCaptureRequest) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type StartBundleCaptureResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartBundleCaptureResponse) Reset() { + *x = StartBundleCaptureResponse{} + mi := &file_daemon_proto_msgTypes[91] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartBundleCaptureResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartBundleCaptureResponse) ProtoMessage() {} + +func (x *StartBundleCaptureResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[91] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartBundleCaptureResponse.ProtoReflect.Descriptor instead. +func (*StartBundleCaptureResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{91} +} + +type StopBundleCaptureRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopBundleCaptureRequest) Reset() { + *x = StopBundleCaptureRequest{} + mi := &file_daemon_proto_msgTypes[92] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopBundleCaptureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopBundleCaptureRequest) ProtoMessage() {} + +func (x *StopBundleCaptureRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[92] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopBundleCaptureRequest.ProtoReflect.Descriptor instead. +func (*StopBundleCaptureRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{92} +} + +type StopBundleCaptureResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopBundleCaptureResponse) Reset() { + *x = StopBundleCaptureResponse{} + mi := &file_daemon_proto_msgTypes[93] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopBundleCaptureResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopBundleCaptureResponse) ProtoMessage() {} + +func (x *StopBundleCaptureResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[93] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopBundleCaptureResponse.ProtoReflect.Descriptor instead. +func (*StopBundleCaptureResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{93} +} + type PortInfo_Range struct { state protoimpl.MessageState `protogen:"open.v1"` Start uint32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` @@ -5857,7 +6139,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} - mi := &file_daemon_proto_msgTypes[89] + mi := &file_daemon_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5869,7 +6151,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[89] + mi := &file_daemon_proto_msgTypes[95] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6410,7 +6692,23 @@ const file_daemon_proto_rawDesc = "" + "\vservice_url\x18\x02 \x01(\tR\n" + "serviceUrl\x12\x16\n" + "\x06domain\x18\x03 \x01(\tR\x06domain\x12,\n" + - "\x12port_auto_assigned\x18\x04 \x01(\bR\x10portAutoAssigned*b\n" + + "\x12port_auto_assigned\x18\x04 \x01(\bR\x10portAutoAssigned\"\xd9\x01\n" + + "\x13StartCaptureRequest\x12\x1f\n" + + "\vtext_output\x18\x01 \x01(\bR\n" + + "textOutput\x12\x19\n" + + "\bsnap_len\x18\x02 \x01(\rR\asnapLen\x125\n" + + "\bduration\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\bduration\x12\x1f\n" + + "\vfilter_expr\x18\x04 \x01(\tR\n" + + "filterExpr\x12\x18\n" + + "\averbose\x18\x05 \x01(\bR\averbose\x12\x14\n" + + "\x05ascii\x18\x06 \x01(\bR\x05ascii\"#\n" + + "\rCapturePacket\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\"P\n" + + "\x19StartBundleCaptureRequest\x123\n" + + "\atimeout\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"\x1c\n" + + "\x1aStartBundleCaptureResponse\"\x1a\n" + + "\x18StopBundleCaptureRequest\"\x1b\n" + + "\x19StopBundleCaptureResponse*b\n" + "\bLogLevel\x12\v\n" + "\aUNKNOWN\x10\x00\x12\t\n" + "\x05PANIC\x10\x01\x12\t\n" + @@ -6428,7 +6726,7 @@ const file_daemon_proto_rawDesc = "" + "\n" + "EXPOSE_UDP\x10\x03\x12\x0e\n" + "\n" + - "EXPOSE_TLS\x10\x042\xac\x15\n" + + "EXPOSE_TLS\x10\x042\xaf\x17\n" + "\rDaemonService\x126\n" + "\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" + "\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" + @@ -6449,7 +6747,10 @@ const file_daemon_proto_rawDesc = "" + "CleanState\x12\x19.daemon.CleanStateRequest\x1a\x1a.daemon.CleanStateResponse\"\x00\x12H\n" + "\vDeleteState\x12\x1a.daemon.DeleteStateRequest\x1a\x1b.daemon.DeleteStateResponse\"\x00\x12u\n" + "\x1aSetSyncResponsePersistence\x12).daemon.SetSyncResponsePersistenceRequest\x1a*.daemon.SetSyncResponsePersistenceResponse\"\x00\x12H\n" + - "\vTracePacket\x12\x1a.daemon.TracePacketRequest\x1a\x1b.daemon.TracePacketResponse\"\x00\x12D\n" + + "\vTracePacket\x12\x1a.daemon.TracePacketRequest\x1a\x1b.daemon.TracePacketResponse\"\x00\x12F\n" + + "\fStartCapture\x12\x1b.daemon.StartCaptureRequest\x1a\x15.daemon.CapturePacket\"\x000\x01\x12]\n" + + "\x12StartBundleCapture\x12!.daemon.StartBundleCaptureRequest\x1a\".daemon.StartBundleCaptureResponse\"\x00\x12Z\n" + + "\x11StopBundleCapture\x12 .daemon.StopBundleCaptureRequest\x1a!.daemon.StopBundleCaptureResponse\"\x00\x12D\n" + "\x0fSubscribeEvents\x12\x18.daemon.SubscribeRequest\x1a\x13.daemon.SystemEvent\"\x000\x01\x12B\n" + "\tGetEvents\x12\x18.daemon.GetEventsRequest\x1a\x19.daemon.GetEventsResponse\"\x00\x12N\n" + "\rSwitchProfile\x12\x1c.daemon.SwitchProfileRequest\x1a\x1d.daemon.SwitchProfileResponse\"\x00\x12B\n" + @@ -6483,7 +6784,7 @@ func file_daemon_proto_rawDescGZIP() []byte { } var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 91) +var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 97) var file_daemon_proto_goTypes = []any{ (LogLevel)(0), // 0: daemon.LogLevel (ExposeProtocol)(0), // 1: daemon.ExposeProtocol @@ -6577,125 +6878,139 @@ var file_daemon_proto_goTypes = []any{ (*ExposeServiceRequest)(nil), // 89: daemon.ExposeServiceRequest (*ExposeServiceEvent)(nil), // 90: daemon.ExposeServiceEvent (*ExposeServiceReady)(nil), // 91: daemon.ExposeServiceReady - nil, // 92: daemon.Network.ResolvedIPsEntry - (*PortInfo_Range)(nil), // 93: daemon.PortInfo.Range - nil, // 94: daemon.SystemEvent.MetadataEntry - (*durationpb.Duration)(nil), // 95: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 96: google.protobuf.Timestamp + (*StartCaptureRequest)(nil), // 92: daemon.StartCaptureRequest + (*CapturePacket)(nil), // 93: daemon.CapturePacket + (*StartBundleCaptureRequest)(nil), // 94: daemon.StartBundleCaptureRequest + (*StartBundleCaptureResponse)(nil), // 95: daemon.StartBundleCaptureResponse + (*StopBundleCaptureRequest)(nil), // 96: daemon.StopBundleCaptureRequest + (*StopBundleCaptureResponse)(nil), // 97: daemon.StopBundleCaptureResponse + nil, // 98: daemon.Network.ResolvedIPsEntry + (*PortInfo_Range)(nil), // 99: daemon.PortInfo.Range + nil, // 100: daemon.SystemEvent.MetadataEntry + (*durationpb.Duration)(nil), // 101: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 102: google.protobuf.Timestamp } var file_daemon_proto_depIdxs = []int32{ - 95, // 0: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 25, // 1: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus - 96, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp - 96, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp - 95, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration - 23, // 5: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo - 20, // 6: daemon.FullStatus.managementState:type_name -> daemon.ManagementState - 19, // 7: daemon.FullStatus.signalState:type_name -> daemon.SignalState - 18, // 8: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState - 17, // 9: daemon.FullStatus.peers:type_name -> daemon.PeerState - 21, // 10: daemon.FullStatus.relays:type_name -> daemon.RelayState - 22, // 11: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState - 55, // 12: daemon.FullStatus.events:type_name -> daemon.SystemEvent - 24, // 13: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState - 31, // 14: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 92, // 15: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 93, // 16: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range - 32, // 17: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo - 32, // 18: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo - 33, // 19: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule - 0, // 20: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel - 0, // 21: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel - 41, // 22: daemon.ListStatesResponse.states:type_name -> daemon.State - 50, // 23: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags - 52, // 24: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage - 2, // 25: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity - 3, // 26: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 96, // 27: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 94, // 28: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry - 55, // 29: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 95, // 30: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 68, // 31: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile - 1, // 32: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol - 91, // 33: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady - 30, // 34: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList - 5, // 35: daemon.DaemonService.Login:input_type -> daemon.LoginRequest - 7, // 36: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest - 9, // 37: daemon.DaemonService.Up:input_type -> daemon.UpRequest - 11, // 38: daemon.DaemonService.Status:input_type -> daemon.StatusRequest - 13, // 39: daemon.DaemonService.Down:input_type -> daemon.DownRequest - 15, // 40: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest - 26, // 41: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest - 28, // 42: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest - 28, // 43: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest - 4, // 44: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest - 35, // 45: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest - 37, // 46: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest - 39, // 47: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest - 42, // 48: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest - 44, // 49: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest - 46, // 50: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest - 48, // 51: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest - 51, // 52: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest - 54, // 53: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest - 56, // 54: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest - 58, // 55: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest - 60, // 56: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest - 62, // 57: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest - 64, // 58: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest - 66, // 59: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest - 69, // 60: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest - 71, // 61: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest - 73, // 62: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest - 75, // 63: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest - 77, // 64: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest - 79, // 65: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest - 81, // 66: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 83, // 67: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest - 85, // 68: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest - 87, // 69: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 89, // 70: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest - 6, // 71: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 8, // 72: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 10, // 73: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 12, // 74: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 14, // 75: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 16, // 76: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 27, // 77: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 29, // 78: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 29, // 79: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 34, // 80: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 36, // 81: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 38, // 82: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 40, // 83: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 43, // 84: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 45, // 85: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 47, // 86: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 49, // 87: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 53, // 88: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 55, // 89: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 57, // 90: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 59, // 91: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 61, // 92: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 63, // 93: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 65, // 94: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 67, // 95: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 70, // 96: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 72, // 97: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 74, // 98: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 76, // 99: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse - 78, // 100: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 80, // 101: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 82, // 102: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 84, // 103: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse - 86, // 104: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse - 88, // 105: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 90, // 106: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent - 71, // [71:107] is the sub-list for method output_type - 35, // [35:71] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 35, // [35:35] is the sub-list for extension extendee - 0, // [0:35] is the sub-list for field type_name + 101, // 0: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 25, // 1: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus + 102, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp + 102, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp + 101, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration + 23, // 5: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo + 20, // 6: daemon.FullStatus.managementState:type_name -> daemon.ManagementState + 19, // 7: daemon.FullStatus.signalState:type_name -> daemon.SignalState + 18, // 8: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState + 17, // 9: daemon.FullStatus.peers:type_name -> daemon.PeerState + 21, // 10: daemon.FullStatus.relays:type_name -> daemon.RelayState + 22, // 11: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState + 55, // 12: daemon.FullStatus.events:type_name -> daemon.SystemEvent + 24, // 13: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState + 31, // 14: daemon.ListNetworksResponse.routes:type_name -> daemon.Network + 98, // 15: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 99, // 16: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 32, // 17: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo + 32, // 18: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo + 33, // 19: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule + 0, // 20: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel + 0, // 21: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel + 41, // 22: daemon.ListStatesResponse.states:type_name -> daemon.State + 50, // 23: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags + 52, // 24: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage + 2, // 25: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity + 3, // 26: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category + 102, // 27: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 100, // 28: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 55, // 29: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent + 101, // 30: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 68, // 31: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile + 1, // 32: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol + 91, // 33: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady + 101, // 34: daemon.StartCaptureRequest.duration:type_name -> google.protobuf.Duration + 101, // 35: daemon.StartBundleCaptureRequest.timeout:type_name -> google.protobuf.Duration + 30, // 36: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList + 5, // 37: daemon.DaemonService.Login:input_type -> daemon.LoginRequest + 7, // 38: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest + 9, // 39: daemon.DaemonService.Up:input_type -> daemon.UpRequest + 11, // 40: daemon.DaemonService.Status:input_type -> daemon.StatusRequest + 13, // 41: daemon.DaemonService.Down:input_type -> daemon.DownRequest + 15, // 42: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest + 26, // 43: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest + 28, // 44: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest + 28, // 45: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest + 4, // 46: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest + 35, // 47: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest + 37, // 48: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest + 39, // 49: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest + 42, // 50: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest + 44, // 51: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest + 46, // 52: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest + 48, // 53: daemon.DaemonService.SetSyncResponsePersistence:input_type -> daemon.SetSyncResponsePersistenceRequest + 51, // 54: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest + 92, // 55: daemon.DaemonService.StartCapture:input_type -> daemon.StartCaptureRequest + 94, // 56: daemon.DaemonService.StartBundleCapture:input_type -> daemon.StartBundleCaptureRequest + 96, // 57: daemon.DaemonService.StopBundleCapture:input_type -> daemon.StopBundleCaptureRequest + 54, // 58: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest + 56, // 59: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest + 58, // 60: daemon.DaemonService.SwitchProfile:input_type -> daemon.SwitchProfileRequest + 60, // 61: daemon.DaemonService.SetConfig:input_type -> daemon.SetConfigRequest + 62, // 62: daemon.DaemonService.AddProfile:input_type -> daemon.AddProfileRequest + 64, // 63: daemon.DaemonService.RemoveProfile:input_type -> daemon.RemoveProfileRequest + 66, // 64: daemon.DaemonService.ListProfiles:input_type -> daemon.ListProfilesRequest + 69, // 65: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest + 71, // 66: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest + 73, // 67: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest + 75, // 68: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest + 77, // 69: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest + 79, // 70: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest + 81, // 71: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest + 83, // 72: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 85, // 73: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 87, // 74: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 89, // 75: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest + 6, // 76: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 8, // 77: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 10, // 78: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 12, // 79: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 14, // 80: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 16, // 81: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 27, // 82: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 29, // 83: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 29, // 84: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 34, // 85: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 36, // 86: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 38, // 87: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 40, // 88: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 43, // 89: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 45, // 90: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 47, // 91: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 49, // 92: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 53, // 93: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 93, // 94: daemon.DaemonService.StartCapture:output_type -> daemon.CapturePacket + 95, // 95: daemon.DaemonService.StartBundleCapture:output_type -> daemon.StartBundleCaptureResponse + 97, // 96: daemon.DaemonService.StopBundleCapture:output_type -> daemon.StopBundleCaptureResponse + 55, // 97: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 57, // 98: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 59, // 99: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 61, // 100: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 63, // 101: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 65, // 102: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 67, // 103: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 70, // 104: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 72, // 105: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 74, // 106: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 76, // 107: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse + 78, // 108: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 80, // 109: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 82, // 110: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 84, // 111: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 86, // 112: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 88, // 113: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 90, // 114: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent + 76, // [76:115] is the sub-list for method output_type + 37, // [37:76] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_daemon_proto_init() } @@ -6725,7 +7040,7 @@ func file_daemon_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_proto_rawDesc), len(file_daemon_proto_rawDesc)), NumEnums: 4, - NumMessages: 91, + NumMessages: 97, NumExtensions: 0, NumServices: 1, }, diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index f4e5b8e4d..3fee9eca8 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -64,6 +64,17 @@ service DaemonService { rpc TracePacket(TracePacketRequest) returns (TracePacketResponse) {} + // StartCapture begins streaming packet capture on the WireGuard interface. + // Requires --enable-capture set at service install/reconfigure time. + rpc StartCapture(StartCaptureRequest) returns (stream CapturePacket) {} + + // StartBundleCapture begins capturing packets to a server-side temp file + // for inclusion in the next debug bundle. Auto-stops after the given timeout. + rpc StartBundleCapture(StartBundleCaptureRequest) returns (StartBundleCaptureResponse) {} + + // StopBundleCapture stops the running bundle capture. Idempotent. + rpc StopBundleCapture(StopBundleCaptureRequest) returns (StopBundleCaptureResponse) {} + rpc SubscribeEvents(SubscribeRequest) returns (stream SystemEvent) {} rpc GetEvents(GetEventsRequest) returns (GetEventsResponse) {} @@ -832,3 +843,26 @@ message ExposeServiceReady { string domain = 3; bool port_auto_assigned = 4; } + +message StartCaptureRequest { + bool text_output = 1; + uint32 snap_len = 2; + google.protobuf.Duration duration = 3; + string filter_expr = 4; + bool verbose = 5; + bool ascii = 6; +} + +message CapturePacket { + bytes data = 1; +} + +message StartBundleCaptureRequest { + // timeout auto-stops the capture after this duration. + // Clamped to a server-side maximum (10 minutes). Zero or unset defaults to the maximum. + google.protobuf.Duration timeout = 1; +} + +message StartBundleCaptureResponse {} +message StopBundleCaptureRequest {} +message StopBundleCaptureResponse {} diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index 026ee2361..66a8efcc3 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -37,6 +37,9 @@ const ( DaemonService_DeleteState_FullMethodName = "/daemon.DaemonService/DeleteState" DaemonService_SetSyncResponsePersistence_FullMethodName = "/daemon.DaemonService/SetSyncResponsePersistence" DaemonService_TracePacket_FullMethodName = "/daemon.DaemonService/TracePacket" + DaemonService_StartCapture_FullMethodName = "/daemon.DaemonService/StartCapture" + DaemonService_StartBundleCapture_FullMethodName = "/daemon.DaemonService/StartBundleCapture" + DaemonService_StopBundleCapture_FullMethodName = "/daemon.DaemonService/StopBundleCapture" DaemonService_SubscribeEvents_FullMethodName = "/daemon.DaemonService/SubscribeEvents" DaemonService_GetEvents_FullMethodName = "/daemon.DaemonService/GetEvents" DaemonService_SwitchProfile_FullMethodName = "/daemon.DaemonService/SwitchProfile" @@ -96,6 +99,14 @@ type DaemonServiceClient interface { // SetSyncResponsePersistence enables or disables sync response persistence SetSyncResponsePersistence(ctx context.Context, in *SetSyncResponsePersistenceRequest, opts ...grpc.CallOption) (*SetSyncResponsePersistenceResponse, error) TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) + // StartCapture begins streaming packet capture on the WireGuard interface. + // Requires --enable-capture set at service install/reconfigure time. + StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) + // StartBundleCapture begins capturing packets to a server-side temp file + // for inclusion in the next debug bundle. Auto-stops after the given timeout. + StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) + // StopBundleCapture stops the running bundle capture. Idempotent. + StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) SwitchProfile(ctx context.Context, in *SwitchProfileRequest, opts ...grpc.CallOption) (*SwitchProfileResponse, error) @@ -313,9 +324,48 @@ func (c *daemonServiceClient) TracePacket(ctx context.Context, in *TracePacketRe return out, nil } +func (c *daemonServiceClient) StartCapture(ctx context.Context, in *StartCaptureRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CapturePacket], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_StartCapture_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[StartCaptureRequest, CapturePacket]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_StartCaptureClient = grpc.ServerStreamingClient[CapturePacket] + +func (c *daemonServiceClient) StartBundleCapture(ctx context.Context, in *StartBundleCaptureRequest, opts ...grpc.CallOption) (*StartBundleCaptureResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(StartBundleCaptureResponse) + err := c.cc.Invoke(ctx, DaemonService_StartBundleCapture_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) StopBundleCapture(ctx context.Context, in *StopBundleCaptureRequest, opts ...grpc.CallOption) (*StopBundleCaptureResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(StopBundleCaptureResponse) + err := c.cc.Invoke(ctx, DaemonService_StopBundleCapture_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SystemEvent], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], DaemonService_SubscribeEvents_FullMethodName, cOpts...) + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], DaemonService_SubscribeEvents_FullMethodName, cOpts...) if err != nil { return nil, err } @@ -494,7 +544,7 @@ func (c *daemonServiceClient) GetInstallerResult(ctx context.Context, in *Instal func (c *daemonServiceClient) ExposeService(ctx context.Context, in *ExposeServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ExposeServiceEvent], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[1], DaemonService_ExposeService_FullMethodName, cOpts...) + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[2], DaemonService_ExposeService_FullMethodName, cOpts...) if err != nil { return nil, err } @@ -550,6 +600,14 @@ type DaemonServiceServer interface { // SetSyncResponsePersistence enables or disables sync response persistence SetSyncResponsePersistence(context.Context, *SetSyncResponsePersistenceRequest) (*SetSyncResponsePersistenceResponse, error) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) + // StartCapture begins streaming packet capture on the WireGuard interface. + // Requires --enable-capture set at service install/reconfigure time. + StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error + // StartBundleCapture begins capturing packets to a server-side temp file + // for inclusion in the next debug bundle. Auto-stops after the given timeout. + StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) + // StopBundleCapture stops the running bundle capture. Idempotent. + StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) SwitchProfile(context.Context, *SwitchProfileRequest) (*SwitchProfileResponse, error) @@ -641,6 +699,15 @@ func (UnimplementedDaemonServiceServer) SetSyncResponsePersistence(context.Conte func (UnimplementedDaemonServiceServer) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) { return nil, status.Error(codes.Unimplemented, "method TracePacket not implemented") } +func (UnimplementedDaemonServiceServer) StartCapture(*StartCaptureRequest, grpc.ServerStreamingServer[CapturePacket]) error { + return status.Error(codes.Unimplemented, "method StartCapture not implemented") +} +func (UnimplementedDaemonServiceServer) StartBundleCapture(context.Context, *StartBundleCaptureRequest) (*StartBundleCaptureResponse, error) { + return nil, status.Error(codes.Unimplemented, "method StartBundleCapture not implemented") +} +func (UnimplementedDaemonServiceServer) StopBundleCapture(context.Context, *StopBundleCaptureRequest) (*StopBundleCaptureResponse, error) { + return nil, status.Error(codes.Unimplemented, "method StopBundleCapture not implemented") +} func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, grpc.ServerStreamingServer[SystemEvent]) error { return status.Error(codes.Unimplemented, "method SubscribeEvents not implemented") } @@ -1040,6 +1107,53 @@ func _DaemonService_TracePacket_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _DaemonService_StartCapture_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StartCaptureRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DaemonServiceServer).StartCapture(m, &grpc.GenericServerStream[StartCaptureRequest, CapturePacket]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type DaemonService_StartCaptureServer = grpc.ServerStreamingServer[CapturePacket] + +func _DaemonService_StartBundleCapture_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartBundleCaptureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StartBundleCapture(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_StartBundleCapture_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StartBundleCapture(ctx, req.(*StartBundleCaptureRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_StopBundleCapture_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopBundleCaptureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StopBundleCapture(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_StopBundleCapture_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StopBundleCapture(ctx, req.(*StopBundleCaptureRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _DaemonService_SubscribeEvents_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SubscribeRequest) if err := stream.RecvMsg(m); err != nil { @@ -1429,6 +1543,14 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "TracePacket", Handler: _DaemonService_TracePacket_Handler, }, + { + MethodName: "StartBundleCapture", + Handler: _DaemonService_StartBundleCapture_Handler, + }, + { + MethodName: "StopBundleCapture", + Handler: _DaemonService_StopBundleCapture_Handler, + }, { MethodName: "GetEvents", Handler: _DaemonService_GetEvents_Handler, @@ -1495,6 +1617,11 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{ + { + StreamName: "StartCapture", + Handler: _DaemonService_StartCapture_Handler, + ServerStreams: true, + }, { StreamName: "SubscribeEvents", Handler: _DaemonService_SubscribeEvents_Handler, diff --git a/client/server/capture.go b/client/server/capture.go new file mode 100644 index 000000000..308c00338 --- /dev/null +++ b/client/server/capture.go @@ -0,0 +1,365 @@ +package server + +import ( + "context" + "io" + "os" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/proto" + "github.com/netbirdio/netbird/util/capture" +) + +const maxBundleCaptureDuration = 10 * time.Minute + +// bundleCapture holds the state of an in-progress capture destined for the +// debug bundle. The lifecycle is: +// +// StartBundleCapture → capture running, writing to temp file +// StopBundleCapture → capture stopped, temp file available +// DebugBundle → temp file included in zip, then cleaned up +type bundleCapture struct { + mu sync.Mutex + sess *capture.Session + file *os.File + engine *internal.Engine + cancel context.CancelFunc + stopped bool +} + +// stop halts the capture session and closes the pcap writer. Idempotent. +func (bc *bundleCapture) stop() { + bc.mu.Lock() + defer bc.mu.Unlock() + + if bc.stopped { + return + } + bc.stopped = true + + if bc.cancel != nil { + bc.cancel() + } + if bc.sess != nil { + bc.sess.Stop() + } +} + +// path returns the temp file path, or "" if no file exists. +func (bc *bundleCapture) path() string { + if bc.file == nil { + return "" + } + return bc.file.Name() +} + +// cleanup removes the temp file. +func (bc *bundleCapture) cleanup() { + if bc.file == nil { + return + } + name := bc.file.Name() + if err := bc.file.Close(); err != nil { + log.Debugf("close bundle capture file: %v", err) + } + if err := os.Remove(name); err != nil && !os.IsNotExist(err) { + log.Debugf("remove bundle capture file: %v", err) + } + bc.file = nil +} + +// StartCapture streams a pcap or text packet capture over gRPC. +// Gated by the --enable-capture service flag. +func (s *Server) StartCapture(req *proto.StartCaptureRequest, stream proto.DaemonService_StartCaptureServer) error { + if !s.captureEnabled { + return status.Error(codes.PermissionDenied, + "packet capture is disabled; reinstall or reconfigure the service with --enable-capture") + } + + if d := req.GetDuration(); d != nil && d.AsDuration() < 0 { + return status.Error(codes.InvalidArgument, "duration must not be negative") + } + + matcher, err := parseCaptureFilter(req) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid filter: %v", err) + } + + pr, pw := io.Pipe() + + opts := capture.Options{ + Matcher: matcher, + SnapLen: req.GetSnapLen(), + Verbose: req.GetVerbose(), + ASCII: req.GetAscii(), + } + if req.GetTextOutput() { + opts.TextOutput = pw + } else { + opts.Output = pw + } + + sess, err := capture.NewSession(opts) + if err != nil { + pw.Close() + return status.Errorf(codes.Internal, "create capture session: %v", err) + } + + engine, err := s.claimCapture(sess) + if err != nil { + sess.Stop() + pw.Close() + return err + } + + if err := engine.SetCapture(sess); err != nil { + s.releaseCapture(sess) + sess.Stop() + pw.Close() + return status.Errorf(codes.Internal, "set capture: %v", err) + } + + // Send an empty initial message to signal that the capture was accepted. + // The client waits for this before printing the banner, so it must arrive + // before any packet data. + if err := stream.Send(&proto.CapturePacket{}); err != nil { + s.clearCaptureIfOwner(sess, engine) + sess.Stop() + pw.Close() + return status.Errorf(codes.Internal, "send initial message: %v", err) + } + + ctx := stream.Context() + if d := req.GetDuration(); d != nil { + if dur := d.AsDuration(); dur > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, dur) + defer cancel() + } + } + + go func() { + <-ctx.Done() + s.clearCaptureIfOwner(sess, engine) + sess.Stop() + pw.Close() + }() + defer pr.Close() + + log.Infof("packet capture started (text=%v, expr=%q)", req.GetTextOutput(), req.GetFilterExpr()) + defer func() { + stats := sess.Stats() + log.Infof("packet capture stopped: %d packets, %d bytes, %d dropped", + stats.Packets, stats.Bytes, stats.Dropped) + }() + + return streamToGRPC(pr, stream) +} + +func streamToGRPC(r io.Reader, stream proto.DaemonService_StartCaptureServer) error { + buf := make([]byte, 32*1024) + for { + n, readErr := r.Read(buf) + if n > 0 { + if err := stream.Send(&proto.CapturePacket{Data: buf[:n]}); err != nil { + log.Debugf("capture stream send: %v", err) + return nil //nolint:nilerr // client disconnected + } + } + if readErr != nil { + return nil //nolint:nilerr // pipe closed, capture stopped normally + } + } +} + +// StartBundleCapture begins capturing packets to a server-side temp file for +// inclusion in the next debug bundle. Not gated by --enable-capture since the +// output stays on the server (same trust level as CPU profiling). +// +// A timeout auto-stops the capture as a safety net if StopBundleCapture is +// never called (e.g. CLI crash). +func (s *Server) StartBundleCapture(_ context.Context, req *proto.StartBundleCaptureRequest) (*proto.StartBundleCaptureResponse, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.stopBundleCaptureLocked() + s.cleanupBundleCapture() + + if s.activeCapture != nil { + return nil, status.Error(codes.FailedPrecondition, "another capture is already running") + } + + engine, err := s.getCaptureEngineLocked() + if err != nil { + // Not fatal: kernel mode or not connected. Log and return success + // so the debug bundle still generates without capture data. + log.Warnf("packet capture unavailable, skipping: %v", err) + return &proto.StartBundleCaptureResponse{}, nil + } + + timeout := req.GetTimeout().AsDuration() + if timeout <= 0 || timeout > maxBundleCaptureDuration { + timeout = maxBundleCaptureDuration + } + + f, err := os.CreateTemp("", "netbird.capture.*.pcap") + if err != nil { + return nil, status.Errorf(codes.Internal, "create temp file: %v", err) + } + + sess, err := capture.NewSession(capture.Options{Output: f}) + if err != nil { + f.Close() + os.Remove(f.Name()) + return nil, status.Errorf(codes.Internal, "create capture session: %v", err) + } + + if err := engine.SetCapture(sess); err != nil { + sess.Stop() + f.Close() + os.Remove(f.Name()) + log.Warnf("packet capture unavailable (no filtered device), skipping: %v", err) + return &proto.StartBundleCaptureResponse{}, nil + } + s.activeCapture = sess + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + bc := &bundleCapture{ + sess: sess, + file: f, + engine: engine, + cancel: cancel, + } + + s.bundleCapture = bc + + go func() { + <-ctx.Done() + s.mutex.Lock() + if s.bundleCapture == bc { + s.stopBundleCaptureLocked() + } else { + bc.stop() + } + s.mutex.Unlock() + log.Infof("bundle capture auto-stopped after timeout") + }() + log.Infof("bundle capture started (timeout=%s, file=%s)", timeout, f.Name()) + + return &proto.StartBundleCaptureResponse{}, nil +} + +// StopBundleCapture stops the running bundle capture. Idempotent. +func (s *Server) StopBundleCapture(_ context.Context, _ *proto.StopBundleCaptureRequest) (*proto.StopBundleCaptureResponse, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.stopBundleCaptureLocked() + return &proto.StopBundleCaptureResponse{}, nil +} + +// stopBundleCaptureLocked stops the bundle capture if running. Must hold s.mutex. +func (s *Server) stopBundleCaptureLocked() { + if s.bundleCapture == nil { + return + } + bc := s.bundleCapture + if bc.engine != nil && s.activeCapture == bc.sess { + if err := bc.engine.SetCapture(nil); err != nil { + log.Debugf("clear bundle capture: %v", err) + } + s.activeCapture = nil + } + bc.stop() + + stats := bc.sess.Stats() + log.Infof("bundle capture stopped: %d packets, %d bytes, %d dropped", + stats.Packets, stats.Bytes, stats.Dropped) +} + +// bundleCapturePath returns the temp file path if a capture has been taken, +// stops any running capture, and returns "". Called from DebugBundle. +// Must hold s.mutex. +func (s *Server) bundleCapturePath() string { + if s.bundleCapture == nil { + return "" + } + + s.bundleCapture.stop() + return s.bundleCapture.path() +} + +// cleanupBundleCapture removes the temp file and clears state. Must hold s.mutex. +func (s *Server) cleanupBundleCapture() { + if s.bundleCapture == nil { + return + } + s.bundleCapture.cleanup() + s.bundleCapture = nil +} + +// claimCapture reserves the engine's capture slot for sess. Returns +// FailedPrecondition if another capture is already active. +func (s *Server) claimCapture(sess *capture.Session) (*internal.Engine, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.activeCapture != nil { + return nil, status.Error(codes.FailedPrecondition, "another capture is already running") + } + engine, err := s.getCaptureEngineLocked() + if err != nil { + return nil, err + } + s.activeCapture = sess + return engine, nil +} + +// releaseCapture clears the active-capture owner if it still matches sess. +func (s *Server) releaseCapture(sess *capture.Session) { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.activeCapture == sess { + s.activeCapture = nil + } +} + +// clearCaptureIfOwner clears engine's capture slot only if sess still owns it. +func (s *Server) clearCaptureIfOwner(sess *capture.Session, engine *internal.Engine) { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.activeCapture != sess { + return + } + if err := engine.SetCapture(nil); err != nil { + log.Debugf("clear capture: %v", err) + } + s.activeCapture = nil +} + +func (s *Server) getCaptureEngineLocked() (*internal.Engine, error) { + if s.connectClient == nil { + return nil, status.Error(codes.FailedPrecondition, "client not connected") + } + engine := s.connectClient.Engine() + if engine == nil { + return nil, status.Error(codes.FailedPrecondition, "engine not initialized") + } + return engine, nil +} + +// parseCaptureFilter returns a Matcher from the request. +// Returns nil (match all) when no filter expression is set. +func parseCaptureFilter(req *proto.StartCaptureRequest) (capture.Matcher, error) { + expr := req.GetFilterExpr() + if expr == "" { + return nil, nil //nolint:nilnil // nil Matcher means "match all" + } + return capture.ParseFilter(expr) +} diff --git a/client/server/debug.go b/client/server/debug.go index 81708e576..33247db5f 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -43,7 +43,9 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( }() } - // Prepare refresh callback for health probes + capturePath := s.bundleCapturePath() + defer s.cleanupBundleCapture() + var refreshStatus func() if s.connectClient != nil { engine := s.connectClient.Engine() @@ -62,6 +64,7 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( SyncResponse: syncResponse, LogPath: s.logFile, CPUProfile: cpuProfileData, + CapturePath: capturePath, RefreshStatus: refreshStatus, ClientMetrics: clientMetrics, }, diff --git a/client/server/server.go b/client/server/server.go index e70b83bf8..648ffa8ce 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -33,6 +33,7 @@ import ( "github.com/netbirdio/netbird/client/internal/statemanager" "github.com/netbirdio/netbird/client/internal/updater" "github.com/netbirdio/netbird/client/proto" + "github.com/netbirdio/netbird/util/capture" "github.com/netbirdio/netbird/version" ) @@ -89,7 +90,11 @@ type Server struct { profileManager *profilemanager.ServiceManager profilesDisabled bool updateSettingsDisabled bool - networksDisabled bool + captureEnabled bool + bundleCapture *bundleCapture + // activeCapture is the session currently installed on the engine; guarded by s.mutex. + activeCapture *capture.Session + networksDisabled bool sleepHandler *sleephandler.SleepHandler @@ -106,7 +111,7 @@ type oauthAuthFlow struct { } // New server instance constructor. -func New(ctx context.Context, logFile string, configFile string, profilesDisabled bool, updateSettingsDisabled bool, networksDisabled bool) *Server { +func New(ctx context.Context, logFile string, configFile string, profilesDisabled bool, updateSettingsDisabled bool, captureEnabled bool, networksDisabled bool) *Server { s := &Server{ rootCtx: ctx, logFile: logFile, @@ -115,6 +120,7 @@ func New(ctx context.Context, logFile string, configFile string, profilesDisable profileManager: profilemanager.NewServiceManager(configFile), profilesDisabled: profilesDisabled, updateSettingsDisabled: updateSettingsDisabled, + captureEnabled: captureEnabled, networksDisabled: networksDisabled, jwtCache: newJWTCache(), } diff --git a/client/server/server_test.go b/client/server/server_test.go index 54ad47e55..641cd85fe 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -104,7 +104,7 @@ func TestConnectWithRetryRuns(t *testing.T) { t.Fatalf("failed to set active profile state: %v", err) } - s := New(ctx, "debug", "", false, false, false) + s := New(ctx, "debug", "", false, false, false, false) s.config = config @@ -165,7 +165,7 @@ func TestServer_Up(t *testing.T) { t.Fatalf("failed to set active profile state: %v", err) } - s := New(ctx, "console", "", false, false, false) + s := New(ctx, "console", "", false, false, false, false) err = s.Start() require.NoError(t, err) @@ -235,7 +235,7 @@ func TestServer_SubcribeEvents(t *testing.T) { t.Fatalf("failed to set active profile state: %v", err) } - s := New(ctx, "console", "", false, false, false) + s := New(ctx, "console", "", false, false, false, false) err = s.Start() require.NoError(t, err) diff --git a/client/server/setconfig_test.go b/client/server/setconfig_test.go index 7f6847c43..b90b5653d 100644 --- a/client/server/setconfig_test.go +++ b/client/server/setconfig_test.go @@ -53,7 +53,7 @@ func TestSetConfig_AllFieldsSaved(t *testing.T) { require.NoError(t, err) ctx := context.Background() - s := New(ctx, "console", "", false, false, false) + s := New(ctx, "console", "", false, false, false, false) rosenpassEnabled := true rosenpassPermissive := true diff --git a/client/ui/debug.go b/client/ui/debug.go index 4ebe4d675..cf5ac1a75 100644 --- a/client/ui/debug.go +++ b/client/ui/debug.go @@ -16,6 +16,7 @@ import ( "fyne.io/fyne/v2/widget" log "github.com/sirupsen/logrus" "github.com/skratchdot/open-golang/open" + "google.golang.org/protobuf/types/known/durationpb" "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/proto" @@ -38,6 +39,7 @@ type debugCollectionParams struct { upload bool uploadURL string enablePersistence bool + capture bool } // UI components for progress tracking @@ -51,25 +53,58 @@ type progressUI struct { func (s *serviceClient) showDebugUI() { w := s.app.NewWindow("NetBird Debug") w.SetOnClosed(s.cancel) - w.Resize(fyne.NewSize(600, 500)) w.SetFixedSize(true) anonymizeCheck := widget.NewCheck("Anonymize sensitive information (public IPs, domains, ...)", nil) systemInfoCheck := widget.NewCheck("Include system information (routes, interfaces, ...)", nil) systemInfoCheck.SetChecked(true) + captureCheck := widget.NewCheck("Include packet capture", nil) uploadCheck := widget.NewCheck("Upload bundle automatically after creation", nil) uploadCheck.SetChecked(true) - uploadURLLabel := widget.NewLabel("Debug upload URL:") + uploadURLContainer, uploadURL := s.buildUploadSection(uploadCheck) + + debugModeContainer, runForDurationCheck, durationInput, noteLabel := s.buildDurationSection() + + statusLabel := widget.NewLabel("") + statusLabel.Hide() + progressBar := widget.NewProgressBar() + progressBar.Hide() + createButton := widget.NewButton("Create Debug Bundle", nil) + + uiControls := []fyne.Disableable{ + anonymizeCheck, systemInfoCheck, captureCheck, + uploadCheck, uploadURL, runForDurationCheck, durationInput, createButton, + } + + createButton.OnTapped = s.getCreateHandler( + statusLabel, progressBar, uploadCheck, uploadURL, + anonymizeCheck, systemInfoCheck, captureCheck, + runForDurationCheck, durationInput, uiControls, w, + ) + + content := container.NewVBox( + widget.NewLabel("Create a debug bundle to help troubleshoot issues with NetBird"), + widget.NewLabel(""), + anonymizeCheck, systemInfoCheck, captureCheck, + uploadCheck, uploadURLContainer, + widget.NewLabel(""), + debugModeContainer, noteLabel, + widget.NewLabel(""), + statusLabel, progressBar, createButton, + ) + + w.SetContent(container.NewPadded(content)) + w.Show() +} + +func (s *serviceClient) buildUploadSection(uploadCheck *widget.Check) (*fyne.Container, *widget.Entry) { uploadURL := widget.NewEntry() uploadURL.SetText(uptypes.DefaultBundleURL) uploadURL.SetPlaceHolder("Enter upload URL") - uploadURLContainer := container.NewVBox( - uploadURLLabel, - uploadURL, - ) + uploadURLContainer := container.NewVBox(widget.NewLabel("Debug upload URL:"), uploadURL) uploadCheck.OnChanged = func(checked bool) { if checked { @@ -78,13 +113,14 @@ func (s *serviceClient) showDebugUI() { uploadURLContainer.Hide() } } + return uploadURLContainer, uploadURL +} - debugModeContainer := container.NewHBox() +func (s *serviceClient) buildDurationSection() (*fyne.Container, *widget.Check, *widget.Entry, *widget.Label) { runForDurationCheck := widget.NewCheck("Run with trace logs before creating bundle", nil) runForDurationCheck.SetChecked(true) forLabel := widget.NewLabel("for") - durationInput := widget.NewEntry() durationInput.SetText("1") minutesLabel := widget.NewLabel("minute") @@ -108,63 +144,8 @@ func (s *serviceClient) showDebugUI() { } } - debugModeContainer.Add(runForDurationCheck) - debugModeContainer.Add(forLabel) - debugModeContainer.Add(durationInput) - debugModeContainer.Add(minutesLabel) - - statusLabel := widget.NewLabel("") - statusLabel.Hide() - - progressBar := widget.NewProgressBar() - progressBar.Hide() - - createButton := widget.NewButton("Create Debug Bundle", nil) - - // UI controls that should be disabled during debug collection - uiControls := []fyne.Disableable{ - anonymizeCheck, - systemInfoCheck, - uploadCheck, - uploadURL, - runForDurationCheck, - durationInput, - createButton, - } - - createButton.OnTapped = s.getCreateHandler( - statusLabel, - progressBar, - uploadCheck, - uploadURL, - anonymizeCheck, - systemInfoCheck, - runForDurationCheck, - durationInput, - uiControls, - w, - ) - - content := container.NewVBox( - widget.NewLabel("Create a debug bundle to help troubleshoot issues with NetBird"), - widget.NewLabel(""), - anonymizeCheck, - systemInfoCheck, - uploadCheck, - uploadURLContainer, - widget.NewLabel(""), - debugModeContainer, - noteLabel, - widget.NewLabel(""), - statusLabel, - progressBar, - createButton, - ) - - paddedContent := container.NewPadded(content) - w.SetContent(paddedContent) - - w.Show() + modeContainer := container.NewHBox(runForDurationCheck, forLabel, durationInput, minutesLabel) + return modeContainer, runForDurationCheck, durationInput, noteLabel } func validateMinute(s string, minutesLabel *widget.Label) error { @@ -200,6 +181,7 @@ func (s *serviceClient) getCreateHandler( uploadURL *widget.Entry, anonymizeCheck *widget.Check, systemInfoCheck *widget.Check, + captureCheck *widget.Check, runForDurationCheck *widget.Check, duration *widget.Entry, uiControls []fyne.Disableable, @@ -222,6 +204,7 @@ func (s *serviceClient) getCreateHandler( params := &debugCollectionParams{ anonymize: anonymizeCheck.Checked, systemInfo: systemInfoCheck.Checked, + capture: captureCheck.Checked, upload: uploadCheck.Checked, uploadURL: url, enablePersistence: true, @@ -253,10 +236,7 @@ func (s *serviceClient) getCreateHandler( statusLabel.SetText("Creating debug bundle...") go s.handleDebugCreation( - anonymizeCheck.Checked, - systemInfoCheck.Checked, - uploadCheck.Checked, - url, + params, statusLabel, uiControls, w, @@ -371,7 +351,7 @@ func startProgressTracker(ctx context.Context, wg *sync.WaitGroup, duration time func (s *serviceClient) configureServiceForDebug( conn proto.DaemonServiceClient, state *debugInitialState, - enablePersistence bool, + params *debugCollectionParams, ) { if state.wasDown { if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil { @@ -397,7 +377,7 @@ func (s *serviceClient) configureServiceForDebug( time.Sleep(time.Second) } - if enablePersistence { + if params.enablePersistence { if _, err := conn.SetSyncResponsePersistence(s.ctx, &proto.SetSyncResponsePersistenceRequest{ Enabled: true, }); err != nil { @@ -417,6 +397,26 @@ func (s *serviceClient) configureServiceForDebug( if _, err := conn.StartCPUProfile(s.ctx, &proto.StartCPUProfileRequest{}); err != nil { log.Warnf("failed to start CPU profiling: %v", err) } + + s.startBundleCaptureIfEnabled(conn, params) +} + +func (s *serviceClient) startBundleCaptureIfEnabled(conn proto.DaemonServiceClient, params *debugCollectionParams) { + if !params.capture { + return + } + + const maxCapture = 10 * time.Minute + timeout := params.duration + 30*time.Second + if timeout > maxCapture { + timeout = maxCapture + log.Warnf("packet capture clamped to %s (server maximum)", maxCapture) + } + if _, err := conn.StartBundleCapture(s.ctx, &proto.StartBundleCaptureRequest{ + Timeout: durationpb.New(timeout), + }); err != nil { + log.Warnf("failed to start bundle capture: %v", err) + } } func (s *serviceClient) collectDebugData( @@ -430,7 +430,7 @@ func (s *serviceClient) collectDebugData( var wg sync.WaitGroup startProgressTracker(ctx, &wg, params.duration, progress) - s.configureServiceForDebug(conn, state, params.enablePersistence) + s.configureServiceForDebug(conn, state, params) wg.Wait() progress.progressBar.Hide() @@ -440,6 +440,14 @@ func (s *serviceClient) collectDebugData( log.Warnf("failed to stop CPU profiling: %v", err) } + if params.capture { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := conn.StopBundleCapture(stopCtx, &proto.StopBundleCaptureRequest{}); err != nil { + log.Warnf("failed to stop bundle capture: %v", err) + } + } + return nil } @@ -520,18 +528,37 @@ func handleError(progress *progressUI, errMsg string) { } func (s *serviceClient) handleDebugCreation( - anonymize bool, - systemInfo bool, - upload bool, - uploadURL string, + params *debugCollectionParams, statusLabel *widget.Label, uiControls []fyne.Disableable, w fyne.Window, ) { - log.Infof("Creating debug bundle (Anonymized: %v, System Info: %v, Upload Attempt: %v)...", - anonymize, systemInfo, upload) + conn, err := s.getSrvClient(failFastTimeout) + if err != nil { + log.Errorf("Failed to get client for debug: %v", err) + statusLabel.SetText(fmt.Sprintf("Error: %v", err)) + enableUIControls(uiControls) + return + } - resp, err := s.createDebugBundle(anonymize, systemInfo, uploadURL) + if params.capture { + if _, err := conn.StartBundleCapture(s.ctx, &proto.StartBundleCaptureRequest{ + Timeout: durationpb.New(30 * time.Second), + }); err != nil { + log.Warnf("failed to start bundle capture: %v", err) + } else { + defer func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := conn.StopBundleCapture(stopCtx, &proto.StopBundleCaptureRequest{}); err != nil { + log.Warnf("failed to stop bundle capture: %v", err) + } + }() + time.Sleep(2 * time.Second) + } + } + + resp, err := s.createDebugBundle(params.anonymize, params.systemInfo, params.uploadURL) if err != nil { log.Errorf("Failed to create debug bundle: %v", err) statusLabel.SetText(fmt.Sprintf("Error creating bundle: %v", err)) @@ -543,7 +570,7 @@ func (s *serviceClient) handleDebugCreation( uploadFailureReason := resp.GetUploadFailureReason() uploadedKey := resp.GetUploadedKey() - if upload { + if params.upload { if uploadFailureReason != "" { showUploadFailedDialog(w, localPath, uploadFailureReason) } else { diff --git a/client/wasm/cmd/main.go b/client/wasm/cmd/main.go index d8e50ab6d..cb512f132 100644 --- a/client/wasm/cmd/main.go +++ b/client/wasm/cmd/main.go @@ -5,6 +5,7 @@ package main import ( "context" "fmt" + "sync" "syscall/js" "time" @@ -14,6 +15,7 @@ import ( netbird "github.com/netbirdio/netbird/client/embed" sshdetection "github.com/netbirdio/netbird/client/ssh/detection" nbstatus "github.com/netbirdio/netbird/client/status" + wasmcapture "github.com/netbirdio/netbird/client/wasm/internal/capture" "github.com/netbirdio/netbird/client/wasm/internal/http" "github.com/netbirdio/netbird/client/wasm/internal/rdp" "github.com/netbirdio/netbird/client/wasm/internal/ssh" @@ -459,6 +461,95 @@ func createSetLogLevelMethod(client *netbird.Client) js.Func { }) } +// createStartCaptureMethod creates the programmable packet capture method. +// Returns a JS interface with onpacket callback and stop() method. +// +// Usage from JavaScript: +// +// const cap = await client.startCapture({ filter: "tcp port 443", verbose: true }) +// cap.onpacket = (line) => console.log(line) +// const stats = cap.stop() +func createStartCaptureMethod(client *netbird.Client) js.Func { + return js.FuncOf(func(_ js.Value, args []js.Value) any { + var opts js.Value + if len(args) > 0 { + opts = args[0] + } + + return createPromise(func(resolve, reject js.Value) { + iface, err := wasmcapture.Start(client, opts) + if err != nil { + reject.Invoke(js.ValueOf(fmt.Sprintf("start capture: %v", err))) + return + } + resolve.Invoke(iface) + }) + }) +} + +// captureMethods returns capture() and stopCapture() that share state for +// the console-log shortcut. capture() logs packets to the browser console +// and stopCapture() ends it, like Ctrl+C on the CLI. +// +// Usage from browser devtools console: +// +// await client.capture() // capture all packets +// await client.capture("tcp") // capture with filter +// await client.capture({filter: "host 10.0.0.1", verbose: true}) +// client.stopCapture() // stop and print stats +func captureMethods(client *netbird.Client) (startFn, stopFn js.Func) { + var mu sync.Mutex + var active *wasmcapture.Handle + + startFn = js.FuncOf(func(_ js.Value, args []js.Value) any { + var opts js.Value + if len(args) > 0 { + opts = args[0] + } + + return createPromise(func(resolve, reject js.Value) { + mu.Lock() + defer mu.Unlock() + + if active != nil { + active.Stop() + active = nil + } + + h, err := wasmcapture.StartConsole(client, opts) + if err != nil { + reject.Invoke(js.ValueOf(fmt.Sprintf("start capture: %v", err))) + return + } + active = h + + console := js.Global().Get("console") + console.Call("log", "[capture] started, call client.stopCapture() to stop") + resolve.Invoke(js.Undefined()) + }) + }) + + stopFn = js.FuncOf(func(_ js.Value, _ []js.Value) any { + mu.Lock() + defer mu.Unlock() + + if active == nil { + js.Global().Get("console").Call("log", "[capture] no active capture") + return js.Undefined() + } + + stats := active.Stop() + active = nil + + console := js.Global().Get("console") + console.Call("log", fmt.Sprintf("[capture] stopped: %d packets, %d bytes, %d dropped", + stats.Packets, stats.Bytes, stats.Dropped)) + return js.Undefined() + }) + + return startFn, stopFn +} + // createPromise is a helper to create JavaScript promises func createPromise(handler func(resolve, reject js.Value)) js.Value { return js.Global().Get("Promise").New(js.FuncOf(func(_ js.Value, promiseArgs []js.Value) any { @@ -521,6 +612,11 @@ func createClientObject(client *netbird.Client) js.Value { obj["statusDetail"] = createStatusDetailMethod(client) obj["getSyncResponse"] = createGetSyncResponseMethod(client) obj["setLogLevel"] = createSetLogLevelMethod(client) + obj["startCapture"] = createStartCaptureMethod(client) + + capStart, capStop := captureMethods(client) + obj["capture"] = capStart + obj["stopCapture"] = capStop return js.ValueOf(obj) } diff --git a/client/wasm/internal/capture/capture.go b/client/wasm/internal/capture/capture.go new file mode 100644 index 000000000..53e43c45e --- /dev/null +++ b/client/wasm/internal/capture/capture.go @@ -0,0 +1,176 @@ +//go:build js + +// Package capture bridges the util/capture package to JavaScript via syscall/js. +package capture + +import ( + "strings" + "sync" + "syscall/js" + + netbird "github.com/netbirdio/netbird/client/embed" +) + +// Handle holds a running capture session so it can be stopped later. +type Handle struct { + cs *netbird.CaptureSession + stopFn js.Func + stopped bool +} + +// Stop ends the capture and returns stats. +func (h *Handle) Stop() netbird.CaptureStats { + if h.stopped { + return h.cs.Stats() + } + h.stopped = true + h.stopFn.Release() + + h.cs.Stop() + return h.cs.Stats() +} + +func statsToJS(s netbird.CaptureStats) js.Value { + obj := js.Global().Get("Object").Call("create", js.Null()) + obj.Set("packets", js.ValueOf(s.Packets)) + obj.Set("bytes", js.ValueOf(s.Bytes)) + obj.Set("dropped", js.ValueOf(s.Dropped)) + return obj +} + +// parseOpts extracts filter/verbose/ascii from a JS options value. +func parseOpts(jsOpts js.Value) (filter string, verbose, ascii bool) { + if jsOpts.IsNull() || jsOpts.IsUndefined() { + return + } + if jsOpts.Type() == js.TypeString { + filter = jsOpts.String() + return + } + if jsOpts.Type() != js.TypeObject { + return + } + if f := jsOpts.Get("filter"); !f.IsUndefined() && !f.IsNull() { + filter = f.String() + } + if v := jsOpts.Get("verbose"); !v.IsUndefined() { + verbose = v.Truthy() + } + if a := jsOpts.Get("ascii"); !a.IsUndefined() { + ascii = a.Truthy() + } + return +} + +// Start creates a capture session and returns a JS interface for streaming text +// output. The returned object exposes: +// +// onpacket(callback) - set callback(string) for each text line +// stop() - stop capture and return stats { packets, bytes, dropped } +// +// Options: { filter: string, verbose: bool, ascii: bool } or just a filter string. +func Start(client *netbird.Client, jsOpts js.Value) (js.Value, error) { + filter, verbose, ascii := parseOpts(jsOpts) + + cb := &jsCallbackWriter{} + + cs, err := client.StartCapture(netbird.CaptureOptions{ + TextOutput: cb, + Filter: filter, + Verbose: verbose, + ASCII: ascii, + }) + if err != nil { + return js.Undefined(), err + } + + handle := &Handle{cs: cs} + + iface := js.Global().Get("Object").Call("create", js.Null()) + handle.stopFn = js.FuncOf(func(_ js.Value, _ []js.Value) any { + return statsToJS(handle.Stop()) + }) + iface.Set("stop", handle.stopFn) + iface.Set("onpacket", js.Undefined()) + cb.setInterface(iface) + + return iface, nil +} + +// StartConsole starts a capture that logs every packet line to console.log. +// Returns a Handle so the caller can stop it later. +func StartConsole(client *netbird.Client, jsOpts js.Value) (*Handle, error) { + filter, verbose, ascii := parseOpts(jsOpts) + + cb := &jsCallbackWriter{} + + cs, err := client.StartCapture(netbird.CaptureOptions{ + TextOutput: cb, + Filter: filter, + Verbose: verbose, + ASCII: ascii, + }) + if err != nil { + return nil, err + } + + handle := &Handle{cs: cs} + handle.stopFn = js.FuncOf(func(_ js.Value, _ []js.Value) any { + return statsToJS(handle.Stop()) + }) + + iface := js.Global().Get("Object").Call("create", js.Null()) + console := js.Global().Get("console") + iface.Set("onpacket", console.Get("log").Call("bind", console, js.ValueOf("[capture]"))) + cb.setInterface(iface) + + return handle, nil +} + +// jsCallbackWriter is an io.Writer that buffers text until a newline, then +// invokes the JS onpacket callback with each complete line. +type jsCallbackWriter struct { + mu sync.Mutex + iface js.Value + buf strings.Builder +} + +func (w *jsCallbackWriter) setInterface(iface js.Value) { + w.mu.Lock() + defer w.mu.Unlock() + w.iface = iface +} + +func (w *jsCallbackWriter) Write(p []byte) (int, error) { + w.mu.Lock() + w.buf.Write(p) + + var lines []string + for { + str := w.buf.String() + idx := strings.IndexByte(str, '\n') + if idx < 0 { + break + } + lines = append(lines, str[:idx]) + w.buf.Reset() + if idx+1 < len(str) { + w.buf.WriteString(str[idx+1:]) + } + } + + iface := w.iface + w.mu.Unlock() + + if iface.IsUndefined() { + return len(p), nil + } + cb := iface.Get("onpacket") + if cb.IsUndefined() || cb.IsNull() { + return len(p), nil + } + for _, line := range lines { + cb.Invoke(js.ValueOf(line)) + } + return len(p), nil +} diff --git a/management/server/account_test.go b/management/server/account_test.go index 756c42421..e259856e3 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -1761,7 +1761,7 @@ func hasNilField(x interface{}) error { if f := rv.Field(i); f.IsValid() { k := f.Kind() switch k { - case reflect.Ptr: + case reflect.Pointer: if f.IsNil() { return fmt.Errorf("field %s is nil", f.String()) } diff --git a/proxy/cmd/proxy/cmd/debug.go b/proxy/cmd/proxy/cmd/debug.go index 59f7a6b65..1b1664490 100644 --- a/proxy/cmd/proxy/cmd/debug.go +++ b/proxy/cmd/proxy/cmd/debug.go @@ -2,7 +2,12 @@ package cmd import ( "fmt" + "os" + "os/signal" + "path/filepath" "strconv" + "strings" + "syscall" "github.com/spf13/cobra" @@ -99,6 +104,27 @@ var debugStopCmd = &cobra.Command{ SilenceUsage: true, } +var debugCaptureCmd = &cobra.Command{ + Use: "capture [filter expression]", + Short: "Capture packets on a client's WireGuard interface", + Long: `Captures decrypted packets flowing through a client's WireGuard interface. + +Default output is human-readable text. Use --pcap or --output for pcap binary. +Filter arguments after the account ID use BPF-like syntax. + +Examples: + netbird-proxy debug capture + netbird-proxy debug capture --duration 1m host 10.0.0.1 + netbird-proxy debug capture host 10.0.0.1 and tcp port 443 + netbird-proxy debug capture not port 22 + netbird-proxy debug capture -o capture.pcap + netbird-proxy debug capture --pcap | tcpdump -r - -n + netbird-proxy debug capture --pcap | tshark -r -`, + Args: cobra.MinimumNArgs(1), + RunE: runDebugCapture, + SilenceUsage: true, +} + func init() { debugCmd.PersistentFlags().StringVar(&debugAddr, "addr", envStringOrDefault("NB_PROXY_DEBUG_ADDRESS", "localhost:8444"), "Debug endpoint address") debugCmd.PersistentFlags().BoolVar(&jsonOutput, "json", false, "Output JSON instead of pretty format") @@ -110,6 +136,12 @@ func init() { debugPingCmd.Flags().StringVar(&pingTimeout, "timeout", "", "Ping timeout (e.g., 10s)") + debugCaptureCmd.Flags().DurationP("duration", "d", 0, "Capture duration (0 = server default)") + debugCaptureCmd.Flags().Bool("pcap", false, "Force pcap binary output (default when --output is set)") + debugCaptureCmd.Flags().BoolP("verbose", "v", false, "Show seq/ack, TTL, window, total length (text mode)") + debugCaptureCmd.Flags().Bool("ascii", false, "Print payload as ASCII after each packet (text mode)") + debugCaptureCmd.Flags().StringP("output", "o", "", "Write pcap to file instead of stdout") + debugCmd.AddCommand(debugHealthCmd) debugCmd.AddCommand(debugClientsCmd) debugCmd.AddCommand(debugStatusCmd) @@ -119,6 +151,7 @@ func init() { debugCmd.AddCommand(debugLogCmd) debugCmd.AddCommand(debugStartCmd) debugCmd.AddCommand(debugStopCmd) + debugCmd.AddCommand(debugCaptureCmd) rootCmd.AddCommand(debugCmd) } @@ -171,3 +204,84 @@ func runDebugStart(cmd *cobra.Command, args []string) error { func runDebugStop(cmd *cobra.Command, args []string) error { return getDebugClient(cmd).StopClient(cmd.Context(), args[0]) } + +func runDebugCapture(cmd *cobra.Command, args []string) error { + duration, _ := cmd.Flags().GetDuration("duration") + forcePcap, _ := cmd.Flags().GetBool("pcap") + verbose, _ := cmd.Flags().GetBool("verbose") + ascii, _ := cmd.Flags().GetBool("ascii") + outPath, _ := cmd.Flags().GetString("output") + + // Default to text. Use pcap when --pcap is set or --output is given. + wantText := !forcePcap && outPath == "" + + var filterExpr string + if len(args) > 1 { + filterExpr = strings.Join(args[1:], " ") + } + + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + out, cleanup, err := captureOutputWriter(cmd, outPath) + if err != nil { + return err + } + defer cleanup() + + if wantText { + cmd.PrintErrln("Capturing packets... Press Ctrl+C to stop.") + } else { + cmd.PrintErrln("Capturing packets (pcap)... Press Ctrl+C to stop.") + } + + var durationStr string + if duration > 0 { + durationStr = duration.String() + } + + err = getDebugClient(cmd).Capture(ctx, debug.CaptureOptions{ + AccountID: args[0], + Duration: durationStr, + FilterExpr: filterExpr, + Text: wantText, + Verbose: verbose, + ASCII: ascii, + Output: out, + }) + if err != nil { + return err + } + + cmd.PrintErrln("\nCapture finished.") + return nil +} + +// captureOutputWriter returns the writer and cleanup function for capture output. +func captureOutputWriter(cmd *cobra.Command, outPath string) (out *os.File, cleanup func(), err error) { + if outPath != "" { + f, err := os.CreateTemp(filepath.Dir(outPath), filepath.Base(outPath)+".*.tmp") + if err != nil { + return nil, nil, fmt.Errorf("create output file: %w", err) + } + tmpPath := f.Name() + return f, func() { + if err := f.Close(); err != nil { + cmd.PrintErrf("close output file: %v\n", err) + } + if fi, err := os.Stat(tmpPath); err == nil && fi.Size() > 0 { + if err := os.Rename(tmpPath, outPath); err != nil { + cmd.PrintErrf("rename output file: %v\n", err) + } else { + cmd.PrintErrf("Wrote %s\n", outPath) + } + } else { + os.Remove(tmpPath) + } + }, nil + } + + return os.Stdout, func() { + // no cleanup needed for stdout + }, nil +} diff --git a/proxy/internal/debug/client.go b/proxy/internal/debug/client.go index 01b0bc8e6..e01149522 100644 --- a/proxy/internal/debug/client.go +++ b/proxy/internal/debug/client.go @@ -310,6 +310,76 @@ func (c *Client) printError(data map[string]any) { } } +// CaptureOptions configures a capture request. +type CaptureOptions struct { + AccountID string + Duration string + FilterExpr string + Text bool + Verbose bool + ASCII bool + Output io.Writer +} + +// Capture streams a packet capture from the debug endpoint. The response body +// (pcap or text) is written directly to opts.Output until the server closes the +// connection or the context is cancelled. +func (c *Client) Capture(ctx context.Context, opts CaptureOptions) error { + if opts.AccountID == "" { + return fmt.Errorf("account ID is required") + } + if opts.Output == nil { + return fmt.Errorf("output writer is required") + } + + params := url.Values{} + if opts.Duration != "" { + params.Set("duration", opts.Duration) + } + if opts.FilterExpr != "" { + params.Set("filter", opts.FilterExpr) + } + if opts.Text { + params.Set("format", "text") + } + if opts.Verbose { + params.Set("verbose", "true") + } + if opts.ASCII { + params.Set("ascii", "true") + } + + path := fmt.Sprintf("/debug/clients/%s/capture", url.PathEscape(opts.AccountID)) + if len(params) > 0 { + path += "?" + params.Encode() + } + + fullURL := c.baseURL + path + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fullURL, nil) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + // Use a separate client without timeout since captures stream for their full duration. + httpClient := &http.Client{} + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("request failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode >= 400 { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("server error (%d): %s", resp.StatusCode, strings.TrimSpace(string(body))) + } + + _, err = io.Copy(opts.Output, resp.Body) + if err != nil && ctx.Err() != nil { + return nil + } + return err +} + func (c *Client) fetchAndPrint(ctx context.Context, path string, printer func(map[string]any)) error { data, raw, err := c.fetch(ctx, path) if err != nil { diff --git a/proxy/internal/debug/handler.go b/proxy/internal/debug/handler.go index c507cfad9..6cd124554 100644 --- a/proxy/internal/debug/handler.go +++ b/proxy/internal/debug/handler.go @@ -174,6 +174,8 @@ func (h *Handler) handleClientRoutes(w http.ResponseWriter, r *http.Request, pat h.handleClientStart(w, r, accountID) case "stop": h.handleClientStop(w, r, accountID) + case "capture": + h.handleCapture(w, r, accountID) default: return false } @@ -632,6 +634,81 @@ func (h *Handler) handleClientStop(w http.ResponseWriter, r *http.Request, accou }) } +const maxCaptureDuration = 30 * time.Minute + +// handleCapture streams a pcap or text packet capture for the given client. +// +// Query params: +// +// duration: capture duration (0 or absent = max, capped at 30m) +// format: "text" for human-readable output (default: pcap) +// filter: BPF-like filter expression (e.g. "host 10.0.0.1 and tcp port 443") +func (h *Handler) handleCapture(w http.ResponseWriter, r *http.Request, accountID types.AccountID) { + client, ok := h.provider.GetClient(accountID) + if !ok { + http.Error(w, "client not found", http.StatusNotFound) + return + } + + duration := maxCaptureDuration + if durationStr := r.URL.Query().Get("duration"); durationStr != "" { + d, err := time.ParseDuration(durationStr) + if err != nil { + http.Error(w, "invalid duration: "+err.Error(), http.StatusBadRequest) + return + } + if d < 0 { + http.Error(w, "duration must not be negative", http.StatusBadRequest) + return + } + if d > 0 { + duration = min(d, maxCaptureDuration) + } + } + + filter := r.URL.Query().Get("filter") + wantText := r.URL.Query().Get("format") == "text" + verbose := r.URL.Query().Get("verbose") == "true" + ascii := r.URL.Query().Get("ascii") == "true" + + opts := nbembed.CaptureOptions{Filter: filter, Verbose: verbose, ASCII: ascii} + if wantText { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + opts.TextOutput = w + } else { + w.Header().Set("Content-Type", "application/vnd.tcpdump.pcap") + w.Header().Set("Content-Disposition", + fmt.Sprintf("attachment; filename=capture-%s.pcap", accountID)) + opts.Output = w + } + + cs, err := client.StartCapture(opts) + if err != nil { + http.Error(w, "start capture: "+err.Error(), http.StatusServiceUnavailable) + return + } + defer cs.Stop() + + // Flush headers after setup succeeds so errors above can still set status codes. + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + timer := time.NewTimer(duration) + defer timer.Stop() + + select { + case <-r.Context().Done(): + case <-timer.C: + } + + cs.Stop() + + stats := cs.Stats() + h.logger.Infof("capture for %s finished: %d packets, %d bytes, %d dropped", + accountID, stats.Packets, stats.Bytes, stats.Dropped) +} + func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request, wantJSON bool) { if !wantJSON { http.Redirect(w, r, "/debug", http.StatusSeeOther) diff --git a/util/capture/afpacket_linux.go b/util/capture/afpacket_linux.go new file mode 100644 index 000000000..bf59e806a --- /dev/null +++ b/util/capture/afpacket_linux.go @@ -0,0 +1,199 @@ +package capture + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "sync" + "sync/atomic" + + log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// htons converts a uint16 from host to network (big-endian) byte order. +func htons(v uint16) uint16 { + var buf [2]byte + binary.BigEndian.PutUint16(buf[:], v) + return binary.NativeEndian.Uint16(buf[:]) +} + +// AFPacketCapture reads raw packets from a network interface using an +// AF_PACKET socket. This is the kernel-mode fallback when FilteredDevice is +// not available (kernel WireGuard). Linux only. +// +// It implements device.PacketCapture so it can be set on a Session, but it +// drives its own read loop rather than being called from FilteredDevice. +// Call Start to begin and Stop to end. +type AFPacketCapture struct { + ifaceName string + sess *Session + fd int + mu sync.Mutex + stopped chan struct{} + started atomic.Bool + closed atomic.Bool +} + +// NewAFPacketCapture creates a capture bound to the given interface. +// The session receives packets via Offer. +func NewAFPacketCapture(ifaceName string, sess *Session) *AFPacketCapture { + return &AFPacketCapture{ + ifaceName: ifaceName, + sess: sess, + fd: -1, + stopped: make(chan struct{}), + } +} + +// Start opens the AF_PACKET socket and begins reading packets. +// Packets are fed to the session via Offer. Returns immediately; +// the read loop runs in a goroutine. +func (c *AFPacketCapture) Start() error { + if c.sess == nil { + return errors.New("nil capture session") + } + if !c.started.CompareAndSwap(false, true) { + return errors.New("capture already started") + } + if c.closed.Load() { + c.started.Store(false) + return errors.New("cannot restart stopped capture") + } + + iface, err := net.InterfaceByName(c.ifaceName) + if err != nil { + c.started.Store(false) + return fmt.Errorf("interface %s: %w", c.ifaceName, err) + } + + fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_DGRAM|unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC, int(htons(unix.ETH_P_ALL))) + if err != nil { + c.started.Store(false) + return fmt.Errorf("create AF_PACKET socket: %w", err) + } + + addr := &unix.SockaddrLinklayer{ + Protocol: htons(unix.ETH_P_ALL), + Ifindex: iface.Index, + } + if err := unix.Bind(fd, addr); err != nil { + unix.Close(fd) + c.started.Store(false) + return fmt.Errorf("bind to %s: %w", c.ifaceName, err) + } + + c.mu.Lock() + c.fd = fd + c.mu.Unlock() + + go c.readLoop(fd) + return nil +} + +// Stop closes the socket and waits for the read loop to exit. Idempotent. +func (c *AFPacketCapture) Stop() { + if !c.closed.CompareAndSwap(false, true) { + if c.started.Load() { + <-c.stopped + } + return + } + + c.mu.Lock() + fd := c.fd + c.fd = -1 + c.mu.Unlock() + + if fd >= 0 { + unix.Close(fd) + } + + if c.started.Load() { + <-c.stopped + } +} + +func (c *AFPacketCapture) readLoop(fd int) { + defer close(c.stopped) + + buf := make([]byte, 65536) + pollFds := []unix.PollFd{{Fd: int32(fd), Events: unix.POLLIN}} + + for { + if c.closed.Load() { + return + } + + ok, err := c.pollOnce(pollFds) + if err != nil { + return + } + if !ok { + continue + } + + c.recvAndOffer(fd, buf) + } +} + +// pollOnce waits for data on the fd. Returns true if data is ready, false for timeout/retry. +// Returns an error to signal the loop should exit. +func (c *AFPacketCapture) pollOnce(pollFds []unix.PollFd) (bool, error) { + n, err := unix.Poll(pollFds, 200) + if err != nil { + if errors.Is(err, unix.EINTR) { + return false, nil + } + if c.closed.Load() { + return false, errors.New("closed") + } + log.Debugf("af_packet poll: %v", err) + return false, err + } + if n == 0 { + return false, nil + } + if pollFds[0].Revents&(unix.POLLERR|unix.POLLHUP|unix.POLLNVAL) != 0 { + return false, errors.New("fd error") + } + return true, nil +} + +func (c *AFPacketCapture) recvAndOffer(fd int, buf []byte) { + nr, from, err := unix.Recvfrom(fd, buf, 0) + if err != nil { + if errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EINTR) { + return + } + if !c.closed.Load() { + log.Debugf("af_packet recvfrom: %v", err) + } + return + } + if nr < 1 { + return + } + + ver := buf[0] >> 4 + if ver != 4 && ver != 6 { + return + } + + // The kernel sets Pkttype on AF_PACKET sockets: + // PACKET_HOST(0) = addressed to us (inbound) + // PACKET_OUTGOING(4) = sent by us (outbound) + outbound := false + if sa, ok := from.(*unix.SockaddrLinklayer); ok { + outbound = sa.Pkttype == unix.PACKET_OUTGOING + } + c.sess.Offer(buf[:nr], outbound) +} + +// Offer satisfies device.PacketCapture but is unused: the AFPacketCapture +// drives its own read loop. This exists only so the type signature is +// compatible if someone tries to set it as a PacketCapture. +func (c *AFPacketCapture) Offer([]byte, bool) { + // unused: AFPacketCapture drives its own read loop +} diff --git a/util/capture/afpacket_stub.go b/util/capture/afpacket_stub.go new file mode 100644 index 000000000..bde368e88 --- /dev/null +++ b/util/capture/afpacket_stub.go @@ -0,0 +1,26 @@ +//go:build !linux + +package capture + +import "errors" + +// AFPacketCapture is not available on this platform. +type AFPacketCapture struct{} + +// NewAFPacketCapture returns nil on non-Linux platforms. +func NewAFPacketCapture(string, *Session) *AFPacketCapture { return nil } + +// Start returns an error on non-Linux platforms. +func (c *AFPacketCapture) Start() error { + return errors.New("AF_PACKET capture is only supported on Linux") +} + +// Stop is a no-op on non-Linux platforms. +func (c *AFPacketCapture) Stop() { + // no-op on non-Linux platforms +} + +// Offer is a no-op on non-Linux platforms. +func (c *AFPacketCapture) Offer([]byte, bool) { + // no-op on non-Linux platforms +} diff --git a/util/capture/capture.go b/util/capture/capture.go new file mode 100644 index 000000000..0d92a4311 --- /dev/null +++ b/util/capture/capture.go @@ -0,0 +1,59 @@ +// Package capture provides userspace packet capture in pcap format. +// +// It taps decrypted WireGuard packets flowing through the FilteredDevice and +// writes them as pcap (readable by tcpdump, tshark, Wireshark) or as +// human-readable one-line-per-packet text. +package capture + +import "io" + +// Direction indicates whether a packet is entering or leaving the host. +type Direction uint8 + +const ( + // Inbound is a packet arriving from the network (FilteredDevice.Write path). + Inbound Direction = iota + // Outbound is a packet leaving the host (FilteredDevice.Read path). + Outbound +) + +// String returns "IN" or "OUT". +func (d Direction) String() string { + if d == Outbound { + return "OUT" + } + return "IN" +} + +const ( + protoICMP = 1 + protoTCP = 6 + protoUDP = 17 + protoICMPv6 = 58 +) + +// Options configures a capture session. +type Options struct { + // Output receives pcap-formatted data. Nil disables pcap output. + Output io.Writer + // TextOutput receives human-readable packet summaries. Nil disables text output. + TextOutput io.Writer + // Matcher selects which packets to capture. Nil captures all. + // Use ParseFilter("host 10.0.0.1 and tcp") or &Filter{...}. + Matcher Matcher + // Verbose adds seq/ack, TTL, window, total length to text output. + Verbose bool + // ASCII dumps transport payload as printable ASCII after each packet line. + ASCII bool + // SnapLen is the maximum bytes captured per packet. 0 means 65535. + SnapLen uint32 + // BufSize is the internal channel buffer size. 0 means 256. + BufSize int +} + +// Stats reports capture session counters. +type Stats struct { + Packets int64 + Bytes int64 + Dropped int64 +} diff --git a/util/capture/filter.go b/util/capture/filter.go new file mode 100644 index 000000000..d463450b8 --- /dev/null +++ b/util/capture/filter.go @@ -0,0 +1,528 @@ +package capture + +import ( + "encoding/binary" + "fmt" + "net/netip" + "strconv" + "strings" +) + +// Matcher tests whether a raw packet should be captured. +type Matcher interface { + Match(data []byte) bool +} + +// Filter selects packets by flat AND'd criteria. Useful for structured APIs +// (query params, proto fields). Implements Matcher. +type Filter struct { + SrcIP netip.Addr + DstIP netip.Addr + Host netip.Addr + SrcPort uint16 + DstPort uint16 + Port uint16 + Proto uint8 +} + +// IsEmpty returns true if the filter has no criteria set. +func (f *Filter) IsEmpty() bool { + return !f.SrcIP.IsValid() && !f.DstIP.IsValid() && !f.Host.IsValid() && + f.SrcPort == 0 && f.DstPort == 0 && f.Port == 0 && f.Proto == 0 +} + +// Match implements Matcher. All non-zero fields must match (AND). +func (f *Filter) Match(data []byte) bool { + if f.IsEmpty() { + return true + } + info, ok := parsePacketInfo(data) + if !ok { + return false + } + if f.Host.IsValid() && info.srcIP != f.Host && info.dstIP != f.Host { + return false + } + if f.SrcIP.IsValid() && info.srcIP != f.SrcIP { + return false + } + if f.DstIP.IsValid() && info.dstIP != f.DstIP { + return false + } + if f.Proto != 0 && info.proto != f.Proto { + return false + } + if f.Port != 0 && info.srcPort != f.Port && info.dstPort != f.Port { + return false + } + if f.SrcPort != 0 && info.srcPort != f.SrcPort { + return false + } + if f.DstPort != 0 && info.dstPort != f.DstPort { + return false + } + return true +} + +// exprNode evaluates a filter condition against pre-parsed packet info. +type exprNode func(info *packetInfo) bool + +// exprMatcher wraps an expression tree. Parses the packet once, then walks the tree. +type exprMatcher struct { + root exprNode +} + +func (m *exprMatcher) Match(data []byte) bool { + info, ok := parsePacketInfo(data) + if !ok { + return false + } + return m.root(&info) +} + +func nodeAnd(a, b exprNode) exprNode { + return func(info *packetInfo) bool { return a(info) && b(info) } +} + +func nodeOr(a, b exprNode) exprNode { + return func(info *packetInfo) bool { return a(info) || b(info) } +} + +func nodeNot(n exprNode) exprNode { + return func(info *packetInfo) bool { return !n(info) } +} + +func nodeHost(addr netip.Addr) exprNode { + return func(info *packetInfo) bool { return info.srcIP == addr || info.dstIP == addr } +} + +func nodeSrcHost(addr netip.Addr) exprNode { + return func(info *packetInfo) bool { return info.srcIP == addr } +} + +func nodeDstHost(addr netip.Addr) exprNode { + return func(info *packetInfo) bool { return info.dstIP == addr } +} + +func nodePort(port uint16) exprNode { + return func(info *packetInfo) bool { return info.srcPort == port || info.dstPort == port } +} + +func nodeSrcPort(port uint16) exprNode { + return func(info *packetInfo) bool { return info.srcPort == port } +} + +func nodeDstPort(port uint16) exprNode { + return func(info *packetInfo) bool { return info.dstPort == port } +} + +func nodeProto(proto uint8) exprNode { + return func(info *packetInfo) bool { return info.proto == proto } +} + +func nodeFamily(family uint8) exprNode { + return func(info *packetInfo) bool { return info.family == family } +} + +func nodeNet(prefix netip.Prefix) exprNode { + return func(info *packetInfo) bool { return prefix.Contains(info.srcIP) || prefix.Contains(info.dstIP) } +} + +func nodeSrcNet(prefix netip.Prefix) exprNode { + return func(info *packetInfo) bool { return prefix.Contains(info.srcIP) } +} + +func nodeDstNet(prefix netip.Prefix) exprNode { + return func(info *packetInfo) bool { return prefix.Contains(info.dstIP) } +} + +// packetInfo holds parsed header fields for filtering and display. +type packetInfo struct { + family uint8 + srcIP netip.Addr + dstIP netip.Addr + proto uint8 + srcPort uint16 + dstPort uint16 + hdrLen int +} + +func parsePacketInfo(data []byte) (packetInfo, bool) { + if len(data) < 1 { + return packetInfo{}, false + } + switch data[0] >> 4 { + case 4: + return parseIPv4Info(data) + case 6: + return parseIPv6Info(data) + default: + return packetInfo{}, false + } +} + +func parseIPv4Info(data []byte) (packetInfo, bool) { + if len(data) < 20 { + return packetInfo{}, false + } + ihl := int(data[0]&0x0f) * 4 + if ihl < 20 || len(data) < ihl { + return packetInfo{}, false + } + info := packetInfo{ + family: 4, + srcIP: netip.AddrFrom4([4]byte{data[12], data[13], data[14], data[15]}), + dstIP: netip.AddrFrom4([4]byte{data[16], data[17], data[18], data[19]}), + proto: data[9], + hdrLen: ihl, + } + if (info.proto == protoTCP || info.proto == protoUDP) && len(data) >= ihl+4 { + info.srcPort = binary.BigEndian.Uint16(data[ihl:]) + info.dstPort = binary.BigEndian.Uint16(data[ihl+2:]) + } + return info, true +} + +// parseIPv6Info parses the fixed IPv6 header. It reads the Next Header field +// directly, so packets with extension headers (hop-by-hop, routing, fragment, +// etc.) will report the extension type as the protocol rather than the final +// transport protocol. This is acceptable for a debug capture tool. +func parseIPv6Info(data []byte) (packetInfo, bool) { + if len(data) < 40 { + return packetInfo{}, false + } + var src, dst [16]byte + copy(src[:], data[8:24]) + copy(dst[:], data[24:40]) + info := packetInfo{ + family: 6, + srcIP: netip.AddrFrom16(src), + dstIP: netip.AddrFrom16(dst), + proto: data[6], + hdrLen: 40, + } + if (info.proto == protoTCP || info.proto == protoUDP) && len(data) >= 44 { + info.srcPort = binary.BigEndian.Uint16(data[40:]) + info.dstPort = binary.BigEndian.Uint16(data[42:]) + } + return info, true +} + +// ParseFilter parses a BPF-like filter expression and returns a Matcher. +// Returns nil Matcher for an empty expression (match all). +// +// Grammar (mirrors common tcpdump BPF syntax): +// +// orExpr = andExpr ("or" andExpr)* +// andExpr = unary ("and" unary)* +// unary = "not" unary | "(" orExpr ")" | term +// +// term = "host" IP | "src" target | "dst" target +// | "port" NUM | "net" PREFIX +// | "tcp" | "udp" | "icmp" | "icmp6" +// | "ip" | "ip6" | "proto" NUM +// target = "host" IP | "port" NUM | "net" PREFIX | IP +// +// Examples: +// +// host 10.0.0.1 and tcp port 443 +// not port 22 +// (host 10.0.0.1 or host 10.0.0.2) and tcp +// ip6 and icmp6 +// net 10.0.0.0/24 +// src host 10.0.0.1 or dst port 80 +func ParseFilter(expr string) (Matcher, error) { + tokens := tokenize(expr) + if len(tokens) == 0 { + return nil, nil //nolint:nilnil // nil Matcher means "match all" + } + + p := &parser{tokens: tokens} + node, err := p.parseOr() + if err != nil { + return nil, err + } + if p.pos < len(p.tokens) { + return nil, fmt.Errorf("unexpected token %q at position %d", p.tokens[p.pos], p.pos) + } + return &exprMatcher{root: node}, nil +} + +func tokenize(expr string) []string { + expr = strings.TrimSpace(expr) + if expr == "" { + return nil + } + // Split on whitespace but keep parens as separate tokens. + var tokens []string + for _, field := range strings.Fields(expr) { + tokens = append(tokens, splitParens(field)...) + } + return tokens +} + +// splitParens splits "(foo)" into "(", "foo", ")". +func splitParens(s string) []string { + var out []string + for strings.HasPrefix(s, "(") { + out = append(out, "(") + s = s[1:] + } + var trail []string + for strings.HasSuffix(s, ")") { + trail = append(trail, ")") + s = s[:len(s)-1] + } + if s != "" { + out = append(out, s) + } + out = append(out, trail...) + return out +} + +type parser struct { + tokens []string + pos int +} + +func (p *parser) peek() string { + if p.pos >= len(p.tokens) { + return "" + } + return strings.ToLower(p.tokens[p.pos]) +} + +func (p *parser) next() string { + tok := p.peek() + if tok != "" { + p.pos++ + } + return tok +} + +func (p *parser) expect(tok string) error { + got := p.next() + if got != tok { + return fmt.Errorf("expected %q, got %q", tok, got) + } + return nil +} + +func (p *parser) parseOr() (exprNode, error) { + left, err := p.parseAnd() + if err != nil { + return nil, err + } + for p.peek() == "or" { + p.next() + right, err := p.parseAnd() + if err != nil { + return nil, err + } + left = nodeOr(left, right) + } + return left, nil +} + +func (p *parser) parseAnd() (exprNode, error) { + left, err := p.parseUnary() + if err != nil { + return nil, err + } + for { + tok := p.peek() + if tok == "and" { + p.next() + right, err := p.parseUnary() + if err != nil { + return nil, err + } + left = nodeAnd(left, right) + continue + } + // Implicit AND: two atoms without "and" between them. + // Only if the next token starts an atom (not "or", ")", or EOF). + if tok != "" && tok != "or" && tok != ")" { + right, err := p.parseUnary() + if err != nil { + return nil, err + } + left = nodeAnd(left, right) + continue + } + break + } + return left, nil +} + +func (p *parser) parseUnary() (exprNode, error) { + switch p.peek() { + case "not": + p.next() + inner, err := p.parseUnary() + if err != nil { + return nil, err + } + return nodeNot(inner), nil + case "(": + p.next() + inner, err := p.parseOr() + if err != nil { + return nil, err + } + if err := p.expect(")"); err != nil { + return nil, fmt.Errorf("unclosed parenthesis") + } + return inner, nil + default: + return p.parseAtom() + } +} + +func (p *parser) parseAtom() (exprNode, error) { + tok := p.next() + if tok == "" { + return nil, fmt.Errorf("unexpected end of expression") + } + + switch tok { + case "host": + addr, err := p.parseAddr() + if err != nil { + return nil, fmt.Errorf("host: %w", err) + } + return nodeHost(addr), nil + + case "port": + port, err := p.parsePort() + if err != nil { + return nil, fmt.Errorf("port: %w", err) + } + return nodePort(port), nil + + case "net": + prefix, err := p.parsePrefix() + if err != nil { + return nil, fmt.Errorf("net: %w", err) + } + return nodeNet(prefix), nil + + case "src": + return p.parseDirTarget(true) + + case "dst": + return p.parseDirTarget(false) + + case "tcp": + return nodeProto(protoTCP), nil + case "udp": + return nodeProto(protoUDP), nil + case "icmp": + return nodeProto(protoICMP), nil + case "icmp6": + return nodeProto(protoICMPv6), nil + case "ip": + return nodeFamily(4), nil + case "ip6": + return nodeFamily(6), nil + + case "proto": + raw := p.next() + if raw == "" { + return nil, fmt.Errorf("proto: missing number") + } + n, err := strconv.Atoi(raw) + if err != nil || n < 0 || n > 255 { + return nil, fmt.Errorf("proto: invalid number %q", raw) + } + return nodeProto(uint8(n)), nil + + default: + return nil, fmt.Errorf("unknown filter keyword %q", tok) + } +} + +func (p *parser) parseDirTarget(isSrc bool) (exprNode, error) { + tok := p.peek() + switch tok { + case "host": + p.next() + addr, err := p.parseAddr() + if err != nil { + return nil, err + } + if isSrc { + return nodeSrcHost(addr), nil + } + return nodeDstHost(addr), nil + + case "port": + p.next() + port, err := p.parsePort() + if err != nil { + return nil, err + } + if isSrc { + return nodeSrcPort(port), nil + } + return nodeDstPort(port), nil + + case "net": + p.next() + prefix, err := p.parsePrefix() + if err != nil { + return nil, err + } + if isSrc { + return nodeSrcNet(prefix), nil + } + return nodeDstNet(prefix), nil + + default: + // Try as bare IP: "src 10.0.0.1" + addr, err := p.parseAddr() + if err != nil { + return nil, fmt.Errorf("expected host, port, net, or IP after src/dst, got %q", tok) + } + if isSrc { + return nodeSrcHost(addr), nil + } + return nodeDstHost(addr), nil + } +} + +func (p *parser) parseAddr() (netip.Addr, error) { + raw := p.next() + if raw == "" { + return netip.Addr{}, fmt.Errorf("missing IP address") + } + addr, err := netip.ParseAddr(raw) + if err != nil { + return netip.Addr{}, fmt.Errorf("invalid IP %q", raw) + } + return addr.Unmap(), nil +} + +func (p *parser) parsePort() (uint16, error) { + raw := p.next() + if raw == "" { + return 0, fmt.Errorf("missing port number") + } + n, err := strconv.Atoi(raw) + if err != nil || n < 1 || n > 65535 { + return 0, fmt.Errorf("invalid port %q", raw) + } + return uint16(n), nil +} + +func (p *parser) parsePrefix() (netip.Prefix, error) { + raw := p.next() + if raw == "" { + return netip.Prefix{}, fmt.Errorf("missing network prefix") + } + prefix, err := netip.ParsePrefix(raw) + if err != nil { + return netip.Prefix{}, fmt.Errorf("invalid prefix %q", raw) + } + return prefix, nil +} diff --git a/util/capture/filter_test.go b/util/capture/filter_test.go new file mode 100644 index 000000000..d5fd17566 --- /dev/null +++ b/util/capture/filter_test.go @@ -0,0 +1,263 @@ +package capture + +import ( + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// buildIPv4Packet creates a minimal IPv4+TCP/UDP packet for filter testing. +func buildIPv4Packet(t *testing.T, srcIP, dstIP netip.Addr, proto uint8, srcPort, dstPort uint16) []byte { + t.Helper() + + hdrLen := 20 + pkt := make([]byte, hdrLen+20) + pkt[0] = 0x45 + pkt[9] = proto + + src := srcIP.As4() + dst := dstIP.As4() + copy(pkt[12:16], src[:]) + copy(pkt[16:20], dst[:]) + + pkt[20] = byte(srcPort >> 8) + pkt[21] = byte(srcPort) + pkt[22] = byte(dstPort >> 8) + pkt[23] = byte(dstPort) + + return pkt +} + +// buildIPv6Packet creates a minimal IPv6+TCP/UDP packet for filter testing. +func buildIPv6Packet(t *testing.T, srcIP, dstIP netip.Addr, proto uint8, srcPort, dstPort uint16) []byte { + t.Helper() + + pkt := make([]byte, 44) // 40 header + 4 ports + pkt[0] = 0x60 // version 6 + pkt[6] = proto // next header + + src := srcIP.As16() + dst := dstIP.As16() + copy(pkt[8:24], src[:]) + copy(pkt[24:40], dst[:]) + + pkt[40] = byte(srcPort >> 8) + pkt[41] = byte(srcPort) + pkt[42] = byte(dstPort >> 8) + pkt[43] = byte(dstPort) + + return pkt +} + +// ---- Filter struct tests ---- + +func TestFilter_Empty(t *testing.T) { + f := Filter{} + assert.True(t, f.IsEmpty()) + assert.True(t, f.Match(buildIPv4Packet(t, + netip.MustParseAddr("10.0.0.1"), + netip.MustParseAddr("10.0.0.2"), + protoTCP, 12345, 443))) +} + +func TestFilter_Host(t *testing.T) { + f := Filter{Host: netip.MustParseAddr("10.0.0.1")} + assert.True(t, f.Match(buildIPv4Packet(t, netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2"), protoTCP, 1234, 80))) + assert.True(t, f.Match(buildIPv4Packet(t, netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.1"), protoTCP, 1234, 80))) + assert.False(t, f.Match(buildIPv4Packet(t, netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.3"), protoTCP, 1234, 80))) +} + +func TestFilter_InvalidPacket(t *testing.T) { + f := Filter{Host: netip.MustParseAddr("10.0.0.1")} + assert.False(t, f.Match(nil)) + assert.False(t, f.Match([]byte{})) + assert.False(t, f.Match([]byte{0x00})) +} + +func TestParsePacketInfo_IPv4(t *testing.T) { + pkt := buildIPv4Packet(t, netip.MustParseAddr("192.168.1.1"), netip.MustParseAddr("10.0.0.1"), protoTCP, 54321, 80) + info, ok := parsePacketInfo(pkt) + require.True(t, ok) + assert.Equal(t, uint8(4), info.family) + assert.Equal(t, netip.MustParseAddr("192.168.1.1"), info.srcIP) + assert.Equal(t, netip.MustParseAddr("10.0.0.1"), info.dstIP) + assert.Equal(t, uint8(protoTCP), info.proto) + assert.Equal(t, uint16(54321), info.srcPort) + assert.Equal(t, uint16(80), info.dstPort) +} + +func TestParsePacketInfo_IPv6(t *testing.T) { + pkt := buildIPv6Packet(t, netip.MustParseAddr("fd00::1"), netip.MustParseAddr("fd00::2"), protoUDP, 1234, 53) + info, ok := parsePacketInfo(pkt) + require.True(t, ok) + assert.Equal(t, uint8(6), info.family) + assert.Equal(t, netip.MustParseAddr("fd00::1"), info.srcIP) + assert.Equal(t, netip.MustParseAddr("fd00::2"), info.dstIP) + assert.Equal(t, uint8(protoUDP), info.proto) + assert.Equal(t, uint16(1234), info.srcPort) + assert.Equal(t, uint16(53), info.dstPort) +} + +// ---- ParseFilter expression tests ---- + +func matchV4(t *testing.T, m Matcher, srcIP, dstIP string, proto uint8, srcPort, dstPort uint16) bool { + t.Helper() + return m.Match(buildIPv4Packet(t, netip.MustParseAddr(srcIP), netip.MustParseAddr(dstIP), proto, srcPort, dstPort)) +} + +func matchV6(t *testing.T, m Matcher, srcIP, dstIP string, proto uint8, srcPort, dstPort uint16) bool { + t.Helper() + return m.Match(buildIPv6Packet(t, netip.MustParseAddr(srcIP), netip.MustParseAddr(dstIP), proto, srcPort, dstPort)) +} + +func TestParseFilter_Empty(t *testing.T) { + m, err := ParseFilter("") + require.NoError(t, err) + assert.Nil(t, m, "empty expression should return nil matcher") +} + +func TestParseFilter_Atoms(t *testing.T) { + tests := []struct { + expr string + match bool + }{ + {"tcp", true}, + {"udp", false}, + {"host 10.0.0.1", true}, + {"host 10.0.0.99", false}, + {"port 443", true}, + {"port 80", false}, + {"src host 10.0.0.1", true}, + {"dst host 10.0.0.2", true}, + {"dst host 10.0.0.1", false}, + {"src port 12345", true}, + {"dst port 443", true}, + {"dst port 80", false}, + {"proto 6", true}, + {"proto 17", false}, + } + + pkt := buildIPv4Packet(t, netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2"), protoTCP, 12345, 443) + + for _, tt := range tests { + t.Run(tt.expr, func(t *testing.T) { + m, err := ParseFilter(tt.expr) + require.NoError(t, err) + assert.Equal(t, tt.match, m.Match(pkt)) + }) + } +} + +func TestParseFilter_And(t *testing.T) { + m, err := ParseFilter("host 10.0.0.1 and tcp port 443") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 55555, 443)) + assert.False(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoUDP, 55555, 443), "wrong proto") + assert.False(t, matchV4(t, m, "10.0.0.3", "10.0.0.2", protoTCP, 55555, 443), "wrong host") + assert.False(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 55555, 80), "wrong port") +} + +func TestParseFilter_ImplicitAnd(t *testing.T) { + // "tcp port 443" = implicit AND between tcp and port 443 + m, err := ParseFilter("tcp port 443") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 1, 443)) + assert.False(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoUDP, 1, 443)) +} + +func TestParseFilter_Or(t *testing.T) { + m, err := ParseFilter("port 80 or port 443") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "1.2.3.4", "5.6.7.8", protoTCP, 1, 80)) + assert.True(t, matchV4(t, m, "1.2.3.4", "5.6.7.8", protoTCP, 1, 443)) + assert.False(t, matchV4(t, m, "1.2.3.4", "5.6.7.8", protoTCP, 1, 8080)) +} + +func TestParseFilter_Not(t *testing.T) { + m, err := ParseFilter("not port 22") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 1, 443)) + assert.False(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 1, 22)) + assert.False(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 22, 80)) +} + +func TestParseFilter_Parens(t *testing.T) { + m, err := ParseFilter("(port 80 or port 443) and tcp") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "1.2.3.4", "5.6.7.8", protoTCP, 1, 443)) + assert.False(t, matchV4(t, m, "1.2.3.4", "5.6.7.8", protoUDP, 1, 443), "wrong proto") + assert.False(t, matchV4(t, m, "1.2.3.4", "5.6.7.8", protoTCP, 1, 8080), "wrong port") +} + +func TestParseFilter_Family(t *testing.T) { + mV4, err := ParseFilter("ip") + require.NoError(t, err) + assert.True(t, matchV4(t, mV4, "10.0.0.1", "10.0.0.2", protoTCP, 1, 80)) + assert.False(t, matchV6(t, mV4, "fd00::1", "fd00::2", protoTCP, 1, 80)) + + mV6, err := ParseFilter("ip6") + require.NoError(t, err) + assert.False(t, matchV4(t, mV6, "10.0.0.1", "10.0.0.2", protoTCP, 1, 80)) + assert.True(t, matchV6(t, mV6, "fd00::1", "fd00::2", protoTCP, 1, 80)) +} + +func TestParseFilter_Net(t *testing.T) { + m, err := ParseFilter("net 10.0.0.0/24") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "10.0.0.1", "192.168.1.1", protoTCP, 1, 80), "src in net") + assert.True(t, matchV4(t, m, "192.168.1.1", "10.0.0.200", protoTCP, 1, 80), "dst in net") + assert.False(t, matchV4(t, m, "10.0.1.1", "192.168.1.1", protoTCP, 1, 80), "neither in net") +} + +func TestParseFilter_SrcDstNet(t *testing.T) { + m, err := ParseFilter("src net 10.0.0.0/8 and dst net 192.168.0.0/16") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "10.1.2.3", "192.168.1.1", protoTCP, 1, 80)) + assert.False(t, matchV4(t, m, "192.168.1.1", "10.1.2.3", protoTCP, 1, 80), "reversed") +} + +func TestParseFilter_Complex(t *testing.T) { + // Real-world: capture HTTP(S) traffic to/from specific host, excluding SSH + m, err := ParseFilter("host 10.0.0.1 and (port 80 or port 443) and not port 22") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 55555, 443)) + assert.True(t, matchV4(t, m, "10.0.0.2", "10.0.0.1", protoTCP, 55555, 80)) + assert.False(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 22, 443), "port 22 excluded") + assert.False(t, matchV4(t, m, "10.0.0.3", "10.0.0.2", protoTCP, 55555, 443), "wrong host") +} + +func TestParseFilter_IPv6Combined(t *testing.T) { + m, err := ParseFilter("ip6 and icmp6") + require.NoError(t, err) + assert.True(t, matchV6(t, m, "fd00::1", "fd00::2", protoICMPv6, 0, 0)) + assert.False(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoICMP, 0, 0), "wrong family") + assert.False(t, matchV6(t, m, "fd00::1", "fd00::2", protoTCP, 1, 80), "wrong proto") +} + +func TestParseFilter_CaseInsensitive(t *testing.T) { + m, err := ParseFilter("HOST 10.0.0.1 AND TCP PORT 443") + require.NoError(t, err) + assert.True(t, matchV4(t, m, "10.0.0.1", "10.0.0.2", protoTCP, 1, 443)) +} + +func TestParseFilter_Errors(t *testing.T) { + bad := []string{ + "badkeyword", + "host", + "port abc", + "port 99999", + "net invalid", + "(", + "(port 80", + "not", + "src", + } + for _, expr := range bad { + t.Run(expr, func(t *testing.T) { + _, err := ParseFilter(expr) + assert.Error(t, err, "should fail for %q", expr) + }) + } +} diff --git a/util/capture/pcap.go b/util/capture/pcap.go new file mode 100644 index 000000000..0a9057045 --- /dev/null +++ b/util/capture/pcap.go @@ -0,0 +1,85 @@ +package capture + +import ( + "encoding/binary" + "io" + "time" +) + +const ( + pcapMagic = 0xa1b2c3d4 + pcapVersionMaj = 2 + pcapVersionMin = 4 + // linkTypeRaw is LINKTYPE_RAW: raw IPv4/IPv6 packets without link-layer header. + linkTypeRaw = 101 + defaultSnapLen = 65535 +) + +// PcapWriter writes packets in pcap format to an underlying writer. +// The global header is written lazily on the first WritePacket call so that +// the writer can be used with unbuffered io.Pipes without deadlocking. +// It is not safe for concurrent use; callers must serialize access. +type PcapWriter struct { + w io.Writer + snapLen uint32 + headerWritten bool +} + +// NewPcapWriter creates a pcap writer. The global header is deferred until the +// first WritePacket call. +func NewPcapWriter(w io.Writer, snapLen uint32) *PcapWriter { + if snapLen == 0 { + snapLen = defaultSnapLen + } + return &PcapWriter{w: w, snapLen: snapLen} +} + +// writeGlobalHeader writes the 24-byte pcap file header. +func (pw *PcapWriter) writeGlobalHeader() error { + var hdr [24]byte + binary.LittleEndian.PutUint32(hdr[0:4], pcapMagic) + binary.LittleEndian.PutUint16(hdr[4:6], pcapVersionMaj) + binary.LittleEndian.PutUint16(hdr[6:8], pcapVersionMin) + binary.LittleEndian.PutUint32(hdr[16:20], pw.snapLen) + binary.LittleEndian.PutUint32(hdr[20:24], linkTypeRaw) + + _, err := pw.w.Write(hdr[:]) + return err +} + +// WriteHeader writes the pcap global header. Safe to call multiple times. +func (pw *PcapWriter) WriteHeader() error { + if pw.headerWritten { + return nil + } + if err := pw.writeGlobalHeader(); err != nil { + return err + } + pw.headerWritten = true + return nil +} + +// WritePacket writes a single packet record, preceded by the global header +// on the first call. +func (pw *PcapWriter) WritePacket(ts time.Time, data []byte) error { + if err := pw.WriteHeader(); err != nil { + return err + } + + origLen := uint32(len(data)) + if origLen > pw.snapLen { + data = data[:pw.snapLen] + } + + var hdr [16]byte + binary.LittleEndian.PutUint32(hdr[0:4], uint32(ts.Unix())) + binary.LittleEndian.PutUint32(hdr[4:8], uint32(ts.Nanosecond()/1000)) + binary.LittleEndian.PutUint32(hdr[8:12], uint32(len(data))) + binary.LittleEndian.PutUint32(hdr[12:16], origLen) + + if _, err := pw.w.Write(hdr[:]); err != nil { + return err + } + _, err := pw.w.Write(data) + return err +} diff --git a/util/capture/pcap_test.go b/util/capture/pcap_test.go new file mode 100644 index 000000000..c3d21ef4a --- /dev/null +++ b/util/capture/pcap_test.go @@ -0,0 +1,68 @@ +package capture + +import ( + "bytes" + "encoding/binary" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPcapWriter_GlobalHeader(t *testing.T) { + var buf bytes.Buffer + pw := NewPcapWriter(&buf, 0) + + // Header is lazy, so write a dummy packet to trigger it. + err := pw.WritePacket(time.Now(), []byte{0x45, 0, 0, 20, 0, 0, 0, 0, 64, 1, 0, 0, 10, 0, 0, 1, 10, 0, 0, 2}) + require.NoError(t, err) + + data := buf.Bytes() + require.GreaterOrEqual(t, len(data), 24, "should contain global header") + + assert.Equal(t, uint32(pcapMagic), binary.LittleEndian.Uint32(data[0:4]), "magic number") + assert.Equal(t, uint16(pcapVersionMaj), binary.LittleEndian.Uint16(data[4:6]), "version major") + assert.Equal(t, uint16(pcapVersionMin), binary.LittleEndian.Uint16(data[6:8]), "version minor") + assert.Equal(t, uint32(defaultSnapLen), binary.LittleEndian.Uint32(data[16:20]), "snap length") + assert.Equal(t, uint32(linkTypeRaw), binary.LittleEndian.Uint32(data[20:24]), "link type") +} + +func TestPcapWriter_WritePacket(t *testing.T) { + var buf bytes.Buffer + pw := NewPcapWriter(&buf, 100) + + ts := time.Date(2025, 6, 15, 12, 30, 45, 123456000, time.UTC) + payload := make([]byte, 50) + for i := range payload { + payload[i] = byte(i) + } + + err := pw.WritePacket(ts, payload) + require.NoError(t, err) + + data := buf.Bytes()[24:] // skip global header + require.Len(t, data, 16+50, "packet header + payload") + + assert.Equal(t, uint32(ts.Unix()), binary.LittleEndian.Uint32(data[0:4]), "timestamp seconds") + assert.Equal(t, uint32(123456), binary.LittleEndian.Uint32(data[4:8]), "timestamp microseconds") + assert.Equal(t, uint32(50), binary.LittleEndian.Uint32(data[8:12]), "included length") + assert.Equal(t, uint32(50), binary.LittleEndian.Uint32(data[12:16]), "original length") + assert.Equal(t, payload, data[16:], "packet data") +} + +func TestPcapWriter_SnapLen(t *testing.T) { + var buf bytes.Buffer + pw := NewPcapWriter(&buf, 10) + + ts := time.Now() + payload := make([]byte, 50) + + err := pw.WritePacket(ts, payload) + require.NoError(t, err) + + data := buf.Bytes()[24:] + assert.Equal(t, uint32(10), binary.LittleEndian.Uint32(data[8:12]), "included length should be truncated") + assert.Equal(t, uint32(50), binary.LittleEndian.Uint32(data[12:16]), "original length preserved") + assert.Len(t, data[16:], 10, "only snap_len bytes written") +} diff --git a/util/capture/session.go b/util/capture/session.go new file mode 100644 index 000000000..09806e10c --- /dev/null +++ b/util/capture/session.go @@ -0,0 +1,213 @@ +package capture + +import ( + "fmt" + "sync" + "sync/atomic" + "time" +) + +const defaultBufSize = 256 + +type packetEntry struct { + ts time.Time + data []byte + dir Direction +} + +// Session manages an active packet capture. Packets are offered via Offer, +// buffered in a channel, and written to configured sinks by a background +// goroutine. This keeps the hot path (FilteredDevice.Read/Write) non-blocking. +// +// The caller must call Stop when done to flush remaining packets and release +// resources. +type Session struct { + pcapW *PcapWriter + textW *TextWriter + matcher Matcher + snapLen uint32 + flushFn func() + + ch chan packetEntry + done chan struct{} + stopped chan struct{} + + closeOnce sync.Once + closed atomic.Bool + packets atomic.Int64 + bytes atomic.Int64 + dropped atomic.Int64 + started time.Time +} + +// NewSession creates and starts a capture session. At least one of +// Options.Output or Options.TextOutput must be non-nil. +func NewSession(opts Options) (*Session, error) { + if opts.Output == nil && opts.TextOutput == nil { + return nil, fmt.Errorf("at least one output sink required") + } + + snapLen := opts.SnapLen + if snapLen == 0 { + snapLen = defaultSnapLen + } + + bufSize := opts.BufSize + if bufSize <= 0 { + bufSize = defaultBufSize + } + + s := &Session{ + matcher: opts.Matcher, + snapLen: snapLen, + ch: make(chan packetEntry, bufSize), + done: make(chan struct{}), + stopped: make(chan struct{}), + started: time.Now(), + } + + if opts.Output != nil { + s.pcapW = NewPcapWriter(opts.Output, snapLen) + } + if opts.TextOutput != nil { + s.textW = NewTextWriter(opts.TextOutput, opts.Verbose, opts.ASCII) + } + + s.flushFn = buildFlushFn(opts.Output, opts.TextOutput) + + go s.run() + return s, nil +} + +// Offer submits a packet for capture. It returns immediately and never blocks +// the caller. If the internal buffer is full the packet is dropped silently. +// +// outbound should be true for packets leaving the host (FilteredDevice.Read +// path) and false for packets arriving (FilteredDevice.Write path). +// +// Offer satisfies the device.PacketCapture interface. +func (s *Session) Offer(data []byte, outbound bool) { + if s.closed.Load() { + return + } + + if s.matcher != nil && !s.matcher.Match(data) { + return + } + + captureLen := len(data) + if s.snapLen > 0 && uint32(captureLen) > s.snapLen { + captureLen = int(s.snapLen) + } + + copied := make([]byte, captureLen) + copy(copied, data) + + dir := Inbound + if outbound { + dir = Outbound + } + + select { + case s.ch <- packetEntry{ts: time.Now(), data: copied, dir: dir}: + s.packets.Add(1) + s.bytes.Add(int64(len(data))) + default: + s.dropped.Add(1) + } +} + +// Stop signals the session to stop accepting packets, drains any buffered +// packets to the sinks, and waits for the writer goroutine to exit. +// It is safe to call multiple times. +func (s *Session) Stop() { + s.closeOnce.Do(func() { + s.closed.Store(true) + close(s.done) + }) + <-s.stopped +} + +// Done returns a channel that is closed when the session's writer goroutine +// has fully exited and all buffered packets have been flushed. +func (s *Session) Done() <-chan struct{} { + return s.stopped +} + +// Stats returns current capture counters. +func (s *Session) Stats() Stats { + return Stats{ + Packets: s.packets.Load(), + Bytes: s.bytes.Load(), + Dropped: s.dropped.Load(), + } +} + +func (s *Session) run() { + defer close(s.stopped) + + for { + select { + case pkt := <-s.ch: + s.write(pkt) + case <-s.done: + s.drain() + return + } + } +} + +func (s *Session) drain() { + for { + select { + case pkt := <-s.ch: + s.write(pkt) + default: + return + } + } +} + +func (s *Session) write(pkt packetEntry) { + if s.pcapW != nil { + // Best-effort: if the writer fails (broken pipe etc.), discard silently. + _ = s.pcapW.WritePacket(pkt.ts, pkt.data) + } + if s.textW != nil { + _ = s.textW.WritePacket(pkt.ts, pkt.data, pkt.dir) + } + s.flushFn() +} + +// buildFlushFn returns a function that flushes all writers that support it. +// This covers http.Flusher and similar streaming writers. +func buildFlushFn(writers ...any) func() { + type flusher interface { + Flush() + } + + var fns []func() + for _, w := range writers { + if w == nil { + continue + } + if f, ok := w.(flusher); ok { + fns = append(fns, f.Flush) + } + } + + switch len(fns) { + case 0: + return func() { + // no writers to flush + } + case 1: + return fns[0] + default: + return func() { + for _, fn := range fns { + fn() + } + } + } +} diff --git a/util/capture/session_test.go b/util/capture/session_test.go new file mode 100644 index 000000000..ab27686c6 --- /dev/null +++ b/util/capture/session_test.go @@ -0,0 +1,144 @@ +package capture + +import ( + "bytes" + "encoding/binary" + "net/netip" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSession_PcapOutput(t *testing.T) { + var buf bytes.Buffer + sess, err := NewSession(Options{ + Output: &buf, + BufSize: 16, + }) + require.NoError(t, err) + + pkt := buildIPv4Packet(t, + netip.MustParseAddr("10.0.0.1"), + netip.MustParseAddr("10.0.0.2"), + protoTCP, 12345, 443) + + sess.Offer(pkt, true) + sess.Stop() + + data := buf.Bytes() + require.Greater(t, len(data), 24, "should have global header + at least one packet") + + // Verify global header + assert.Equal(t, uint32(pcapMagic), binary.LittleEndian.Uint32(data[0:4])) + assert.Equal(t, uint32(linkTypeRaw), binary.LittleEndian.Uint32(data[20:24])) + + // Verify packet record + pktData := data[24:] + inclLen := binary.LittleEndian.Uint32(pktData[8:12]) + assert.Equal(t, uint32(len(pkt)), inclLen) + + stats := sess.Stats() + assert.Equal(t, int64(1), stats.Packets) + assert.Equal(t, int64(len(pkt)), stats.Bytes) + assert.Equal(t, int64(0), stats.Dropped) +} + +func TestSession_TextOutput(t *testing.T) { + var buf bytes.Buffer + sess, err := NewSession(Options{ + TextOutput: &buf, + BufSize: 16, + }) + require.NoError(t, err) + + pkt := buildIPv4Packet(t, + netip.MustParseAddr("10.0.0.1"), + netip.MustParseAddr("10.0.0.2"), + protoTCP, 12345, 443) + + sess.Offer(pkt, false) + sess.Stop() + + output := buf.String() + assert.Contains(t, output, "TCP") + assert.Contains(t, output, "10.0.0.1") + assert.Contains(t, output, "10.0.0.2") + assert.Contains(t, output, "443") + assert.Contains(t, output, "[IN TCP]") +} + +func TestSession_Filter(t *testing.T) { + var buf bytes.Buffer + sess, err := NewSession(Options{ + Output: &buf, + Matcher: &Filter{Port: 443}, + }) + require.NoError(t, err) + + pktMatch := buildIPv4Packet(t, + netip.MustParseAddr("10.0.0.1"), + netip.MustParseAddr("10.0.0.2"), + protoTCP, 12345, 443) + pktNoMatch := buildIPv4Packet(t, + netip.MustParseAddr("10.0.0.1"), + netip.MustParseAddr("10.0.0.2"), + protoTCP, 12345, 80) + + sess.Offer(pktMatch, true) + sess.Offer(pktNoMatch, true) + sess.Stop() + + stats := sess.Stats() + assert.Equal(t, int64(1), stats.Packets, "only matching packet should be captured") +} + +func TestSession_StopIdempotent(t *testing.T) { + var buf bytes.Buffer + sess, err := NewSession(Options{Output: &buf}) + require.NoError(t, err) + + sess.Stop() + sess.Stop() // should not panic or deadlock +} + +func TestSession_OfferAfterStop(t *testing.T) { + var buf bytes.Buffer + sess, err := NewSession(Options{Output: &buf}) + require.NoError(t, err) + sess.Stop() + + pkt := buildIPv4Packet(t, + netip.MustParseAddr("10.0.0.1"), + netip.MustParseAddr("10.0.0.2"), + protoTCP, 12345, 443) + sess.Offer(pkt, true) // should not panic + + assert.Equal(t, int64(0), sess.Stats().Packets) +} + +func TestSession_Done(t *testing.T) { + var buf bytes.Buffer + sess, err := NewSession(Options{Output: &buf}) + require.NoError(t, err) + + select { + case <-sess.Done(): + t.Fatal("Done should not be closed before Stop") + default: + } + + sess.Stop() + + select { + case <-sess.Done(): + case <-time.After(time.Second): + t.Fatal("Done should be closed after Stop") + } +} + +func TestSession_RequiresOutput(t *testing.T) { + _, err := NewSession(Options{}) + assert.Error(t, err) +} diff --git a/util/capture/text.go b/util/capture/text.go new file mode 100644 index 000000000..b44bd0cad --- /dev/null +++ b/util/capture/text.go @@ -0,0 +1,638 @@ +package capture + +import ( + "encoding/binary" + "fmt" + "io" + "net/netip" + "strings" + "time" + + "github.com/google/gopacket" + "github.com/google/gopacket/layers" +) + +// TextWriter writes human-readable one-line-per-packet summaries. +// It is not safe for concurrent use; callers must serialize access. +type TextWriter struct { + w io.Writer + verbose bool + ascii bool + flows map[dirKey]uint32 +} + +type dirKey struct { + src netip.AddrPort + dst netip.AddrPort +} + +// NewTextWriter creates a text formatter that writes to w. +func NewTextWriter(w io.Writer, verbose, ascii bool) *TextWriter { + return &TextWriter{ + w: w, + verbose: verbose, + ascii: ascii, + flows: make(map[dirKey]uint32), + } +} + +// tag formats the fixed-width "[DIR PROTO]" prefix with right-aligned protocol. +func tag(dir Direction, proto string) string { + return fmt.Sprintf("[%-3s %4s]", dir, proto) +} + +// WritePacket formats and writes a single packet line. +func (tw *TextWriter) WritePacket(ts time.Time, data []byte, dir Direction) error { + ts = ts.Local() + info, ok := parsePacketInfo(data) + if !ok { + _, err := fmt.Fprintf(tw.w, "%s [%-3s ?] ??? len=%d\n", + ts.Format("15:04:05.000000"), dir, len(data)) + return err + } + + timeStr := ts.Format("15:04:05.000000") + + var err error + switch info.proto { + case protoTCP: + err = tw.writeTCP(timeStr, dir, &info, data) + case protoUDP: + err = tw.writeUDP(timeStr, dir, &info, data) + case protoICMP: + err = tw.writeICMPv4(timeStr, dir, &info, data) + case protoICMPv6: + err = tw.writeICMPv6(timeStr, dir, &info, data) + default: + var verbose string + if tw.verbose { + verbose = tw.verboseIP(data, info.family) + } + _, err = fmt.Fprintf(tw.w, "%s %s %s > %s length %d%s\n", + timeStr, tag(dir, fmt.Sprintf("P%d", info.proto)), + info.srcIP, info.dstIP, len(data)-info.hdrLen, verbose) + } + return err +} + +func (tw *TextWriter) writeTCP(timeStr string, dir Direction, info *packetInfo, data []byte) error { + tcp := &layers.TCP{} + if err := tcp.DecodeFromBytes(data[info.hdrLen:], gopacket.NilDecodeFeedback); err != nil { + return tw.writeFallback(timeStr, dir, "TCP", info, data) + } + + flags := tcpFlagsStr(tcp) + plen := len(tcp.Payload) + + // Protocol annotation + var annotation string + if plen > 0 { + annotation = annotatePayload(tcp.Payload) + } + + if !tw.verbose { + _, err := fmt.Fprintf(tw.w, "%s %s %s:%d > %s:%d [%s] length %d%s\n", + timeStr, tag(dir, "TCP"), + info.srcIP, info.srcPort, info.dstIP, info.dstPort, + flags, plen, annotation) + if err != nil { + return err + } + if tw.ascii && plen > 0 { + return tw.writeASCII(tcp.Payload) + } + return nil + } + + relSeq, relAck := tw.relativeSeqAck(info, tcp.Seq, tcp.Ack) + + var seqStr string + if plen > 0 { + seqStr = fmt.Sprintf(", seq %d:%d", relSeq, relSeq+uint32(plen)) + } else { + seqStr = fmt.Sprintf(", seq %d", relSeq) + } + + var ackStr string + if tcp.ACK { + ackStr = fmt.Sprintf(", ack %d", relAck) + } + + var opts string + if s := formatTCPOptions(tcp.Options); s != "" { + opts = ", options [" + s + "]" + } + + verbose := tw.verboseIP(data, info.family) + + _, err := fmt.Fprintf(tw.w, "%s %s %s:%d > %s:%d [%s]%s%s, win %d%s, length %d%s%s\n", + timeStr, tag(dir, "TCP"), + info.srcIP, info.srcPort, info.dstIP, info.dstPort, + flags, seqStr, ackStr, tcp.Window, opts, plen, annotation, verbose) + if err != nil { + return err + } + if tw.ascii && plen > 0 { + return tw.writeASCII(tcp.Payload) + } + return nil +} + +func (tw *TextWriter) writeUDP(timeStr string, dir Direction, info *packetInfo, data []byte) error { + udp := &layers.UDP{} + if err := udp.DecodeFromBytes(data[info.hdrLen:], gopacket.NilDecodeFeedback); err != nil { + return tw.writeFallback(timeStr, dir, "UDP", info, data) + } + + plen := len(udp.Payload) + + // DNS replaces the entire line format + if plen > 0 && isDNSPort(info.srcPort, info.dstPort) { + if s := formatDNSPayload(udp.Payload); s != "" { + var verbose string + if tw.verbose { + verbose = tw.verboseIP(data, info.family) + } + _, err := fmt.Fprintf(tw.w, "%s %s %s:%d > %s:%d %s%s\n", + timeStr, tag(dir, "UDP"), + info.srcIP, info.srcPort, info.dstIP, info.dstPort, + s, verbose) + return err + } + } + + var verbose string + if tw.verbose { + verbose = tw.verboseIP(data, info.family) + } + _, err := fmt.Fprintf(tw.w, "%s %s %s:%d > %s:%d length %d%s\n", + timeStr, tag(dir, "UDP"), + info.srcIP, info.srcPort, info.dstIP, info.dstPort, + plen, verbose) + if err != nil { + return err + } + if tw.ascii && plen > 0 { + return tw.writeASCII(udp.Payload) + } + return nil +} + +func (tw *TextWriter) writeICMPv4(timeStr string, dir Direction, info *packetInfo, data []byte) error { + icmp := &layers.ICMPv4{} + if err := icmp.DecodeFromBytes(data[info.hdrLen:], gopacket.NilDecodeFeedback); err != nil { + return tw.writeFallback(timeStr, dir, "ICMP", info, data) + } + + var detail string + if icmp.TypeCode.Type() == layers.ICMPv4TypeEchoRequest || icmp.TypeCode.Type() == layers.ICMPv4TypeEchoReply { + detail = fmt.Sprintf("%s, id %d, seq %d", icmp.TypeCode.String(), icmp.Id, icmp.Seq) + } else { + detail = icmp.TypeCode.String() + } + + var verbose string + if tw.verbose { + verbose = tw.verboseIP(data, info.family) + } + _, err := fmt.Fprintf(tw.w, "%s %s %s > %s %s, length %d%s\n", + timeStr, tag(dir, "ICMP"), info.srcIP, info.dstIP, detail, len(data)-info.hdrLen, verbose) + return err +} + +func (tw *TextWriter) writeICMPv6(timeStr string, dir Direction, info *packetInfo, data []byte) error { + icmp := &layers.ICMPv6{} + if err := icmp.DecodeFromBytes(data[info.hdrLen:], gopacket.NilDecodeFeedback); err != nil { + return tw.writeFallback(timeStr, dir, "ICMP", info, data) + } + + var verbose string + if tw.verbose { + verbose = tw.verboseIP(data, info.family) + } + _, err := fmt.Fprintf(tw.w, "%s %s %s > %s %s, length %d%s\n", + timeStr, tag(dir, "ICMP"), info.srcIP, info.dstIP, icmp.TypeCode.String(), len(data)-info.hdrLen, verbose) + return err +} + +func (tw *TextWriter) writeFallback(timeStr string, dir Direction, proto string, info *packetInfo, data []byte) error { + _, err := fmt.Fprintf(tw.w, "%s %s %s:%d > %s:%d length %d\n", + timeStr, tag(dir, proto), + info.srcIP, info.srcPort, info.dstIP, info.dstPort, + len(data)-info.hdrLen) + return err +} + +func (tw *TextWriter) verboseIP(data []byte, family uint8) string { + return fmt.Sprintf(", ttl %d, id %d, iplen %d", + ipTTL(data, family), ipID(data, family), len(data)) +} + +// relativeSeqAck returns seq/ack relative to the first seen value per direction. +func (tw *TextWriter) relativeSeqAck(info *packetInfo, seq, ack uint32) (relSeq, relAck uint32) { + fwd := dirKey{ + src: netip.AddrPortFrom(info.srcIP, info.srcPort), + dst: netip.AddrPortFrom(info.dstIP, info.dstPort), + } + rev := dirKey{ + src: netip.AddrPortFrom(info.dstIP, info.dstPort), + dst: netip.AddrPortFrom(info.srcIP, info.srcPort), + } + + if isn, ok := tw.flows[fwd]; ok { + relSeq = seq - isn + } else { + tw.flows[fwd] = seq + } + + if isn, ok := tw.flows[rev]; ok { + relAck = ack - isn + } else { + relAck = ack + } + + return relSeq, relAck +} + +// writeASCII prints payload bytes as printable ASCII. +func (tw *TextWriter) writeASCII(payload []byte) error { + if len(payload) == 0 { + return nil + } + buf := make([]byte, len(payload)) + for i, b := range payload { + switch { + case b >= 0x20 && b < 0x7f: + buf[i] = b + case b == '\n' || b == '\r' || b == '\t': + buf[i] = b + default: + buf[i] = '.' + } + } + _, err := fmt.Fprintf(tw.w, "%s\n", buf) + return err +} + +// --- TCP helpers --- + +func ipTTL(data []byte, family uint8) uint8 { + if family == 4 && len(data) > 8 { + return data[8] + } + if family == 6 && len(data) > 7 { + return data[7] + } + return 0 +} + +func ipID(data []byte, family uint8) uint16 { + if family == 4 && len(data) >= 6 { + return binary.BigEndian.Uint16(data[4:6]) + } + return 0 +} + +func tcpFlagsStr(tcp *layers.TCP) string { + var buf [6]byte + n := 0 + if tcp.SYN { + buf[n] = 'S' + n++ + } + if tcp.FIN { + buf[n] = 'F' + n++ + } + if tcp.RST { + buf[n] = 'R' + n++ + } + if tcp.PSH { + buf[n] = 'P' + n++ + } + if tcp.ACK { + buf[n] = '.' + n++ + } + if tcp.URG { + buf[n] = 'U' + n++ + } + if n == 0 { + return "none" + } + return string(buf[:n]) +} + +func formatTCPOptions(opts []layers.TCPOption) string { + var parts []string + for _, opt := range opts { + switch opt.OptionType { + case layers.TCPOptionKindEndList: + return strings.Join(parts, ",") + case layers.TCPOptionKindNop: + parts = append(parts, "nop") + case layers.TCPOptionKindMSS: + if len(opt.OptionData) == 2 { + parts = append(parts, fmt.Sprintf("mss %d", binary.BigEndian.Uint16(opt.OptionData))) + } + case layers.TCPOptionKindWindowScale: + if len(opt.OptionData) == 1 { + parts = append(parts, fmt.Sprintf("wscale %d", opt.OptionData[0])) + } + case layers.TCPOptionKindSACKPermitted: + parts = append(parts, "sackOK") + case layers.TCPOptionKindSACK: + blocks := len(opt.OptionData) / 8 + parts = append(parts, fmt.Sprintf("sack %d", blocks)) + case layers.TCPOptionKindTimestamps: + if len(opt.OptionData) == 8 { + tsval := binary.BigEndian.Uint32(opt.OptionData[0:4]) + tsecr := binary.BigEndian.Uint32(opt.OptionData[4:8]) + parts = append(parts, fmt.Sprintf("TS val %d ecr %d", tsval, tsecr)) + } + } + } + return strings.Join(parts, ",") +} + +// --- Protocol annotation --- + +// annotatePayload returns a protocol annotation string for known application protocols. +func annotatePayload(payload []byte) string { + if len(payload) < 4 { + return "" + } + + s := string(payload) + + // SSH banner: "SSH-2.0-OpenSSH_9.6\r\n" + if strings.HasPrefix(s, "SSH-") { + if end := strings.IndexByte(s, '\r'); end > 0 && end < 256 { + return ": " + s[:end] + } + } + + // TLS records + if ann := annotateTLS(payload); ann != "" { + return ": " + ann + } + + // HTTP request or response + for _, method := range [...]string{"GET ", "POST ", "PUT ", "DELETE ", "HEAD ", "PATCH ", "OPTIONS ", "CONNECT "} { + if strings.HasPrefix(s, method) { + if end := strings.IndexByte(s, '\r'); end > 0 && end < 200 { + return ": " + s[:end] + } + } + } + if strings.HasPrefix(s, "HTTP/") { + if end := strings.IndexByte(s, '\r'); end > 0 && end < 200 { + return ": " + s[:end] + } + } + + return "" +} + +// annotateTLS returns a description for TLS handshake and alert records. +func annotateTLS(data []byte) string { + if len(data) < 6 { + return "" + } + + switch data[0] { + case 0x16: + return annotateTLSHandshake(data) + case 0x15: + return annotateTLSAlert(data) + } + return "" +} + +func annotateTLSHandshake(data []byte) string { + if len(data) < 10 { + return "" + } + switch data[5] { + case 0x01: + if sni := extractSNI(data); sni != "" { + return "TLS ClientHello SNI=" + sni + } + return "TLS ClientHello" + case 0x02: + return "TLS ServerHello" + } + return "" +} + +func annotateTLSAlert(data []byte) string { + if len(data) < 7 { + return "" + } + severity := "warning" + if data[5] == 2 { + severity = "fatal" + } + return fmt.Sprintf("TLS Alert %s %s", severity, tlsAlertDesc(data[6])) +} + +func tlsAlertDesc(code byte) string { + switch code { + case 0: + return "close_notify" + case 10: + return "unexpected_message" + case 40: + return "handshake_failure" + case 42: + return "bad_certificate" + case 43: + return "unsupported_certificate" + case 44: + return "certificate_revoked" + case 45: + return "certificate_expired" + case 48: + return "unknown_ca" + case 49: + return "access_denied" + case 50: + return "decode_error" + case 70: + return "protocol_version" + case 80: + return "internal_error" + case 86: + return "inappropriate_fallback" + case 90: + return "user_canceled" + case 112: + return "unrecognized_name" + default: + return fmt.Sprintf("alert(%d)", code) + } +} + +// extractSNI parses a TLS ClientHello and returns the SNI server name. +func extractSNI(data []byte) string { + if len(data) < 6 || data[0] != 0x16 { + return "" + } + recordLen := int(binary.BigEndian.Uint16(data[3:5])) + handshake := data[5:] + if len(handshake) > recordLen { + handshake = handshake[:recordLen] + } + + if len(handshake) < 4 || handshake[0] != 0x01 { + return "" + } + hsLen := int(handshake[1])<<16 | int(handshake[2])<<8 | int(handshake[3]) + body := handshake[4:] + if len(body) > hsLen { + body = body[:hsLen] + } + + extPos := clientHelloExtensionsOffset(body) + if extPos < 0 { + return "" + } + return findSNIExtension(body, extPos) +} + +// clientHelloExtensionsOffset returns the byte offset where extensions begin +// within the ClientHello body, or -1 if the body is too short. +func clientHelloExtensionsOffset(body []byte) int { + if len(body) < 38 { + return -1 + } + pos := 34 + + if pos >= len(body) { + return -1 + } + pos += 1 + int(body[pos]) // session ID + + if pos+2 > len(body) { + return -1 + } + pos += 2 + int(binary.BigEndian.Uint16(body[pos:pos+2])) // cipher suites + + if pos >= len(body) { + return -1 + } + pos += 1 + int(body[pos]) // compression methods + + if pos+2 > len(body) { + return -1 + } + return pos +} + +func findSNIExtension(body []byte, pos int) string { + extLen := int(binary.BigEndian.Uint16(body[pos : pos+2])) + pos += 2 + extEnd := pos + extLen + if extEnd > len(body) { + extEnd = len(body) + } + + for pos+4 <= extEnd { + extType := binary.BigEndian.Uint16(body[pos : pos+2]) + eLen := int(binary.BigEndian.Uint16(body[pos+2 : pos+4])) + pos += 4 + if pos+eLen > extEnd { + break + } + if extType == 0 && eLen >= 5 { + nameLen := int(binary.BigEndian.Uint16(body[pos+3 : pos+5])) + if pos+5+nameLen <= extEnd { + return string(body[pos+5 : pos+5+nameLen]) + } + } + pos += eLen + } + return "" +} + +func isDNSPort(src, dst uint16) bool { + return src == 53 || dst == 53 || src == 5353 || dst == 5353 +} + +// formatDNSPayload parses DNS and returns a tcpdump-style summary. +func formatDNSPayload(payload []byte) string { + d := &layers.DNS{} + if err := d.DecodeFromBytes(payload, gopacket.NilDecodeFeedback); err != nil { + return "" + } + + rd := "" + if d.RD { + rd = "+" + } + + if !d.QR { + return formatDNSQuery(d, rd, len(payload)) + } + return formatDNSResponse(d, rd, len(payload)) +} + +func formatDNSQuery(d *layers.DNS, rd string, plen int) string { + if len(d.Questions) == 0 { + return fmt.Sprintf("%04x%s (%d)", d.ID, rd, plen) + } + q := d.Questions[0] + return fmt.Sprintf("%04x%s %s? %s. (%d)", d.ID, rd, q.Type, q.Name, plen) +} + +func formatDNSResponse(d *layers.DNS, rd string, plen int) string { + anCount := d.ANCount + nsCount := d.NSCount + arCount := d.ARCount + + if d.ResponseCode != layers.DNSResponseCodeNoErr { + return fmt.Sprintf("%04x %d/%d/%d %s (%d)", d.ID, anCount, nsCount, arCount, d.ResponseCode, plen) + } + + if anCount > 0 && len(d.Answers) > 0 { + rr := d.Answers[0] + if rdata := shortRData(&rr); rdata != "" { + return fmt.Sprintf("%04x %d/%d/%d %s %s (%d)", d.ID, anCount, nsCount, arCount, rr.Type, rdata, plen) + } + } + + return fmt.Sprintf("%04x %d/%d/%d (%d)", d.ID, anCount, nsCount, arCount, plen) +} + +func shortRData(rr *layers.DNSResourceRecord) string { + switch rr.Type { + case layers.DNSTypeA, layers.DNSTypeAAAA: + if rr.IP != nil { + return rr.IP.String() + } + case layers.DNSTypeCNAME: + if len(rr.CNAME) > 0 { + return string(rr.CNAME) + "." + } + case layers.DNSTypePTR: + if len(rr.PTR) > 0 { + return string(rr.PTR) + "." + } + case layers.DNSTypeNS: + if len(rr.NS) > 0 { + return string(rr.NS) + "." + } + case layers.DNSTypeMX: + return fmt.Sprintf("%d %s.", rr.MX.Preference, rr.MX.Name) + case layers.DNSTypeTXT: + if len(rr.TXTs) > 0 { + return fmt.Sprintf("%q", string(rr.TXTs[0])) + } + case layers.DNSTypeSRV: + return fmt.Sprintf("%d %d %d %s.", rr.SRV.Priority, rr.SRV.Weight, rr.SRV.Port, rr.SRV.Name) + } + return "" +} From 50b58a682868851a3666a99662aeb00d7fbb3846 Mon Sep 17 00:00:00 2001 From: Viktor Liu <17948409+lixmal@users.noreply.github.com> Date: Mon, 4 May 2026 18:40:25 +0900 Subject: [PATCH 368/374] [client, relay] Advertise relay server IP via signal for foreign-relay fallback dial (#6004) --- client/internal/engine.go | 18 ++ client/internal/peer/handshaker.go | 8 +- client/internal/peer/signaler.go | 20 +- client/internal/peer/status.go | 2 +- client/internal/peer/worker_relay.go | 11 +- shared/relay/client/client.go | 116 +++++++- shared/relay/client/client_serverip_test.go | 280 ++++++++++++++++++ shared/relay/client/dialer/quic/quic.go | 15 +- shared/relay/client/dialer/race_dialer.go | 17 +- .../relay/client/dialer/race_dialer_test.go | 2 +- shared/relay/client/dialer/ws/conn.go | 16 +- .../client/dialer/ws/dialopts_generic.go | 10 +- shared/relay/client/dialer/ws/dialopts_js.go | 10 +- shared/relay/client/dialer/ws/ws.go | 21 +- shared/relay/client/manager.go | 37 ++- shared/relay/client/manager_serverip_test.go | 144 +++++++++ shared/relay/client/manager_test.go | 19 +- shared/signal/client/client.go | 69 +++-- shared/signal/proto/signalexchange.pb.go | 88 +++--- shared/signal/proto/signalexchange.proto | 10 +- 20 files changed, 789 insertions(+), 124 deletions(-) create mode 100644 shared/relay/client/client_serverip_test.go create mode 100644 shared/relay/client/manager_serverip_test.go diff --git a/client/internal/engine.go b/client/internal/engine.go index 8c9553e52..7f19e2d28 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -2454,6 +2454,8 @@ func convertToOfferAnswer(msg *sProto.Message) (*peer.OfferAnswer, error) { } } + relayIP := decodeRelayIP(msg.GetBody().GetRelayServerIP()) + offerAnswer := peer.OfferAnswer{ IceCredentials: peer.IceCredentials{ UFrag: remoteCred.UFrag, @@ -2464,7 +2466,23 @@ func convertToOfferAnswer(msg *sProto.Message) (*peer.OfferAnswer, error) { RosenpassPubKey: rosenpassPubKey, RosenpassAddr: rosenpassAddr, RelaySrvAddress: msg.GetBody().GetRelayServerAddress(), + RelaySrvIP: relayIP, SessionID: sessionID, } return &offerAnswer, nil } + +// decodeRelayIP decodes the proto relayServerIP bytes (4 or 16) into a +// netip.Addr. Returns the zero value for empty input and logs a warning +// for malformed payloads. +func decodeRelayIP(b []byte) netip.Addr { + if len(b) == 0 { + return netip.Addr{} + } + ip, ok := netip.AddrFromSlice(b) + if !ok { + log.Warnf("invalid relayServerIP in signal message (%d bytes), ignoring", len(b)) + return netip.Addr{} + } + return ip.Unmap() +} diff --git a/client/internal/peer/handshaker.go b/client/internal/peer/handshaker.go index 741dfce60..1d44096b6 100644 --- a/client/internal/peer/handshaker.go +++ b/client/internal/peer/handshaker.go @@ -3,6 +3,7 @@ package peer import ( "context" "errors" + "net/netip" "sync" "sync/atomic" @@ -40,6 +41,10 @@ type OfferAnswer struct { // relay server address RelaySrvAddress string + // RelaySrvIP is the IP the remote peer is connected to on its + // relay server. Used as a dial target if DNS for RelaySrvAddress + // fails. Zero value if the peer did not advertise an IP. + RelaySrvIP netip.Addr // SessionID is the unique identifier of the session, used to discard old messages SessionID *ICESessionID } @@ -217,8 +222,9 @@ func (h *Handshaker) buildOfferAnswer() OfferAnswer { answer.SessionID = &sid } - if addr, err := h.relay.RelayInstanceAddress(); err == nil { + if addr, ip, err := h.relay.RelayInstanceAddress(); err == nil { answer.RelaySrvAddress = addr + answer.RelaySrvIP = ip } return answer diff --git a/client/internal/peer/signaler.go b/client/internal/peer/signaler.go index f6eb87cca..5e437d96b 100644 --- a/client/internal/peer/signaler.go +++ b/client/internal/peer/signaler.go @@ -54,19 +54,19 @@ func (s *Signaler) signalOfferAnswer(offerAnswer OfferAnswer, remoteKey string, log.Warnf("failed to get session ID bytes: %v", err) } } - msg, err := signal.MarshalCredential( - s.wgPrivateKey, - offerAnswer.WgListenPort, - remoteKey, - &signal.Credential{ + msg, err := signal.MarshalCredential(s.wgPrivateKey, remoteKey, signal.CredentialPayload{ + Type: bodyType, + WgListenPort: offerAnswer.WgListenPort, + Credential: &signal.Credential{ UFrag: offerAnswer.IceCredentials.UFrag, Pwd: offerAnswer.IceCredentials.Pwd, }, - bodyType, - offerAnswer.RosenpassPubKey, - offerAnswer.RosenpassAddr, - offerAnswer.RelaySrvAddress, - sessionIDBytes) + RosenpassPubKey: offerAnswer.RosenpassPubKey, + RosenpassAddr: offerAnswer.RosenpassAddr, + RelaySrvAddress: offerAnswer.RelaySrvAddress, + RelaySrvIP: offerAnswer.RelaySrvIP, + SessionID: sessionIDBytes, + }) if err != nil { return err } diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index abedc208e..7bd19b0e1 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -919,7 +919,7 @@ func (d *Status) GetRelayStates() []relay.ProbeResult { // if the server connection is not established then we will use the general address // in case of connection we will use the instance specific address - instanceAddr, err := d.relayMgr.RelayInstanceAddress() + instanceAddr, _, err := d.relayMgr.RelayInstanceAddress() if err != nil { // TODO add their status for _, r := range d.relayMgr.ServerURLs() { diff --git a/client/internal/peer/worker_relay.go b/client/internal/peer/worker_relay.go index 06309fbaf..0402992c9 100644 --- a/client/internal/peer/worker_relay.go +++ b/client/internal/peer/worker_relay.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net" + "net/netip" "sync" "sync/atomic" @@ -53,15 +54,19 @@ func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) { w.relaySupportedOnRemotePeer.Store(true) // the relayManager will return with error in case if the connection has lost with relay server - currentRelayAddress, err := w.relayManager.RelayInstanceAddress() + currentRelayAddress, _, err := w.relayManager.RelayInstanceAddress() if err != nil { w.log.Errorf("failed to handle new offer: %s", err) return } srv := w.preferredRelayServer(currentRelayAddress, remoteOfferAnswer.RelaySrvAddress) + var serverIP netip.Addr + if srv == remoteOfferAnswer.RelaySrvAddress { + serverIP = remoteOfferAnswer.RelaySrvIP + } - relayedConn, err := w.relayManager.OpenConn(w.peerCtx, srv, w.config.Key) + relayedConn, err := w.relayManager.OpenConn(w.peerCtx, srv, w.config.Key, serverIP) if err != nil { if errors.Is(err, relayClient.ErrConnAlreadyExists) { w.log.Debugf("handled offer by reusing existing relay connection") @@ -90,7 +95,7 @@ func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) { }) } -func (w *WorkerRelay) RelayInstanceAddress() (string, error) { +func (w *WorkerRelay) RelayInstanceAddress() (string, netip.Addr, error) { return w.relayManager.RelayInstanceAddress() } diff --git a/shared/relay/client/client.go b/shared/relay/client/client.go index b10b05617..1800bddb2 100644 --- a/shared/relay/client/client.go +++ b/shared/relay/client/client.go @@ -2,8 +2,12 @@ package client import ( "context" + "errors" "fmt" "net" + "net/netip" + "net/url" + "strings" "sync" "time" @@ -146,6 +150,7 @@ func (cc *connContainer) close() { type Client struct { log *log.Entry connectionURL string + serverIP netip.Addr authTokenStore *auth.TokenStore hashedID messages.PeerID @@ -170,13 +175,22 @@ type Client struct { } // NewClient creates a new client for the relay server. The client is not connected to the server until the Connect +// is called. func NewClient(serverURL string, authTokenStore *auth.TokenStore, peerID string, mtu uint16) *Client { + return NewClientWithServerIP(serverURL, netip.Addr{}, authTokenStore, peerID, mtu) +} + +// NewClientWithServerIP creates a new client for the relay server with a known server IP. serverIP, when valid, is +// dialed directly first; the FQDN is only attempted if the IP-based dial fails. TLS verification still uses the +// FQDN from serverURL via SNI. +func NewClientWithServerIP(serverURL string, serverIP netip.Addr, authTokenStore *auth.TokenStore, peerID string, mtu uint16) *Client { hashedID := messages.HashID(peerID) relayLog := log.WithFields(log.Fields{"relay": serverURL}) c := &Client{ log: relayLog, connectionURL: serverURL, + serverIP: serverIP, authTokenStore: authTokenStore, hashedID: hashedID, mtu: mtu, @@ -304,6 +318,23 @@ func (c *Client) ServerInstanceURL() (string, error) { return c.instanceURL.String(), nil } +// ConnectedIP returns the IP address of the live relay-server connection, +// extracted from the underlying socket's RemoteAddr. Zero value if not +// connected or if the address is not an IP literal. +func (c *Client) ConnectedIP() netip.Addr { + c.mu.Lock() + conn := c.relayConn + c.mu.Unlock() + if conn == nil { + return netip.Addr{} + } + addr := conn.RemoteAddr() + if addr == nil { + return netip.Addr{} + } + return extractIPLiteral(addr.String()) +} + // SetOnDisconnectListener sets a function that will be called when the connection to the relay server is closed. func (c *Client) SetOnDisconnectListener(fn func(string)) { c.listenerMutex.Lock() @@ -332,10 +363,23 @@ func (c *Client) Close() error { func (c *Client) connect(ctx context.Context) (*RelayAddr, error) { dialers := c.getDialers() - rd := dialer.NewRaceDial(c.log, dialer.DefaultConnectionTimeout, c.connectionURL, dialers...) - conn, err := rd.Dial(ctx) - if err != nil { - return nil, err + var conn net.Conn + if c.serverIP.IsValid() { + var err error + conn, err = c.dialRaceDirect(ctx, dialers) + if err != nil { + c.log.Infof("dial via server IP %s failed, falling back to FQDN: %v", c.serverIP, err) + conn = nil + } + } + + if conn == nil { + rd := dialer.NewRaceDial(c.log, dialer.DefaultConnectionTimeout, c.connectionURL, dialers...) + var err error + conn, err = rd.Dial(ctx) + if err != nil { + return nil, fmt.Errorf("dial via FQDN: %w", err) + } } c.relayConn = conn @@ -351,6 +395,52 @@ func (c *Client) connect(ctx context.Context) (*RelayAddr, error) { return instanceURL, nil } +// dialRaceDirect dials c.serverIP, preserving the original FQDN as the TLS ServerName for SNI. +func (c *Client) dialRaceDirect(ctx context.Context, dialers []dialer.DialeFn) (net.Conn, error) { + directURL, serverName, err := substituteHost(c.connectionURL, c.serverIP) + if err != nil { + return nil, fmt.Errorf("substitute host: %w", err) + } + + c.log.Debugf("dialing via server IP %s (SNI=%s)", c.serverIP, serverName) + + rd := dialer.NewRaceDial(c.log, dialer.DefaultConnectionTimeout, directURL, dialers...). + WithServerName(serverName) + return rd.Dial(ctx) +} + +// substituteHost replaces the host portion of a rel/rels URL with ip, +// preserving the scheme and port. Returns the rewritten URL and the +// original host to use as the TLS ServerName, or empty if the original +// host is itself an IP literal (SNI requires a DNS name). +func substituteHost(serverURL string, ip netip.Addr) (string, string, error) { + u, err := url.Parse(serverURL) + if err != nil { + return "", "", fmt.Errorf("parse %q: %w", serverURL, err) + } + if u.Scheme == "" || u.Host == "" { + return "", "", fmt.Errorf("invalid relay URL %q", serverURL) + } + if !ip.IsValid() { + return "", "", errors.New("invalid server IP") + } + origHost := u.Hostname() + if _, err := netip.ParseAddr(origHost); err == nil { + origHost = "" + } + ip = ip.Unmap() + newHost := ip.String() + if ip.Is6() { + newHost = "[" + newHost + "]" + } + if port := u.Port(); port != "" { + u.Host = newHost + ":" + port + } else { + u.Host = newHost + } + return u.String(), origHost, nil +} + func (c *Client) handShake(ctx context.Context) (*RelayAddr, error) { msg, err := messages.MarshalAuthMsg(c.hashedID, c.authTokenStore.TokenBinary()) if err != nil { @@ -716,3 +806,21 @@ func (c *Client) handlePeersWentOfflineMsg(buf []byte) { } c.stateSubscription.OnPeersWentOffline(peersID) } + +// extractIPLiteral returns the IP from address forms produced by the relay +// dialers (URL or host:port). Zero value if the host is not an IP. +func extractIPLiteral(s string) netip.Addr { + if u, err := url.Parse(s); err == nil && u.Host != "" { + s = u.Host + } + host, _, err := net.SplitHostPort(s) + if err != nil { + host = s + } + host = strings.Trim(host, "[]") + ip, err := netip.ParseAddr(host) + if err != nil { + return netip.Addr{} + } + return ip.Unmap() +} diff --git a/shared/relay/client/client_serverip_test.go b/shared/relay/client/client_serverip_test.go new file mode 100644 index 000000000..7e699e37d --- /dev/null +++ b/shared/relay/client/client_serverip_test.go @@ -0,0 +1,280 @@ +package client + +import ( + "context" + "fmt" + "net" + "net/netip" + "testing" + "time" + + "go.opentelemetry.io/otel" + + "github.com/netbirdio/netbird/client/iface" + "github.com/netbirdio/netbird/relay/server" + "github.com/netbirdio/netbird/shared/relay/auth/allow" +) + +// TestClient_ServerIPRecoversFromUnresolvableFQDN verifies that when the +// primary FQDN-based dial fails (unresolvable .invalid host), Connect +// recovers via the server IP and SNI still uses the FQDN. +func TestClient_ServerIPRecoversFromUnresolvableFQDN(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + listenAddr, port := freeAddr(t) + srvCfg := server.Config{ + Meter: otel.Meter(""), + ExposedAddress: fmt.Sprintf("rel://test-unresolvable-host.invalid:%d", port), + TLSSupport: false, + AuthValidator: &allow.Auth{}, + } + srv, err := server.NewServer(srvCfg) + if err != nil { + t.Fatalf("create server: %s", err) + } + + errChan := make(chan error, 1) + go func() { + if err := srv.Listen(server.ListenerConfig{Address: listenAddr}); err != nil { + errChan <- err + } + }() + t.Cleanup(func() { + if err := srv.Shutdown(context.Background()); err != nil { + t.Errorf("shutdown server: %s", err) + } + }) + if err := waitForServerToStart(errChan); err != nil { + t.Fatalf("server failed to start: %s", err) + } + + t.Run("no server IP, primary fails", func(t *testing.T) { + c := NewClient(srvCfg.ExposedAddress, hmacTokenStore, "alice-noip", iface.DefaultMTU) + err := c.Connect(ctx) + if err == nil { + _ = c.Close() + t.Fatalf("expected connect to fail without server IP, got nil") + } + }) + + t.Run("server IP recovers", func(t *testing.T) { + c := NewClientWithServerIP(srvCfg.ExposedAddress, netip.MustParseAddr("127.0.0.1"), hmacTokenStore, "alice-with-ip", iface.DefaultMTU) + if err := c.Connect(ctx); err != nil { + t.Fatalf("connect with server IP: %s", err) + } + t.Cleanup(func() { _ = c.Close() }) + + if !c.Ready() { + t.Fatalf("client not ready after connect") + } + if got := c.ConnectedIP(); got.String() != "127.0.0.1" { + t.Fatalf("ConnectedIP = %q, want 127.0.0.1", got) + } + }) +} + +// TestClient_ConnectedIPAfterFQDNDial verifies ConnectedIP returns the +// resolved IP after a successful FQDN-based dial. The underlying socket's +// RemoteAddr must be exposed through the dialer wrappers; if it returns +// the dial-time URL instead, ConnectedIP returns empty and the dial +// IP we advertise to peers is empty too. +func TestClient_ConnectedIPAfterFQDNDial(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + listenAddr, port := freeAddr(t) + srvCfg := server.Config{ + Meter: otel.Meter(""), + ExposedAddress: fmt.Sprintf("rel://localhost:%d", port), + TLSSupport: false, + AuthValidator: &allow.Auth{}, + } + srv, err := server.NewServer(srvCfg) + if err != nil { + t.Fatalf("create server: %s", err) + } + errChan := make(chan error, 1) + go func() { + if err := srv.Listen(server.ListenerConfig{Address: listenAddr}); err != nil { + errChan <- err + } + }() + t.Cleanup(func() { _ = srv.Shutdown(context.Background()) }) + if err := waitForServerToStart(errChan); err != nil { + t.Fatalf("server failed to start: %s", err) + } + + c := NewClient(srvCfg.ExposedAddress, hmacTokenStore, "alice-fqdn", iface.DefaultMTU) + if err := c.Connect(ctx); err != nil { + t.Fatalf("connect: %s", err) + } + t.Cleanup(func() { _ = c.Close() }) + + got := c.ConnectedIP().String() + if got != "127.0.0.1" && got != "::1" { + t.Fatalf("ConnectedIP after FQDN dial = %q, want 127.0.0.1 or ::1", got) + } +} + +func TestSubstituteHost(t *testing.T) { + tests := []struct { + name string + serverURL string + ip string + wantURL string + wantServerName string + wantErr bool + }{ + { + name: "rels with port", + serverURL: "rels://relay.netbird.io:443", + ip: "10.0.0.5", + wantURL: "rels://10.0.0.5:443", + wantServerName: "relay.netbird.io", + }, + { + name: "rel with port", + serverURL: "rel://relay.example.com:80", + ip: "192.0.2.1", + wantURL: "rel://192.0.2.1:80", + wantServerName: "relay.example.com", + }, + { + name: "ipv6 server IP bracketed", + serverURL: "rels://relay.example.com:443", + ip: "2001:db8::1", + wantURL: "rels://[2001:db8::1]:443", + wantServerName: "relay.example.com", + }, + { + name: "no port", + serverURL: "rels://relay.example.com", + ip: "10.0.0.5", + wantURL: "rels://10.0.0.5", + wantServerName: "relay.example.com", + }, + { + name: "ipv6 server with port returns empty SNI", + serverURL: "rels://[2001:db8::5]:443", + ip: "10.0.0.5", + wantURL: "rels://10.0.0.5:443", + wantServerName: "", + }, + { + name: "ipv4 server with port returns empty SNI", + serverURL: "rels://10.0.0.5:443", + ip: "10.0.0.6", + wantURL: "rels://10.0.0.6:443", + wantServerName: "", + }, + { + name: "ipv6 server IP no port", + serverURL: "rels://relay.example.com", + ip: "2001:db8::1", + wantURL: "rels://[2001:db8::1]", + wantServerName: "relay.example.com", + }, + { + name: "missing scheme", + serverURL: "relay.example.com:443", + ip: "10.0.0.5", + wantErr: true, + }, + { + name: "empty", + serverURL: "", + ip: "10.0.0.5", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ip netip.Addr + if tt.ip != "" { + ip = netip.MustParseAddr(tt.ip) + } + gotURL, gotName, err := substituteHost(tt.serverURL, ip) + if tt.wantErr { + if err == nil { + t.Fatalf("expected error, got nil") + } + return + } + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if gotURL != tt.wantURL { + t.Errorf("URL = %q, want %q", gotURL, tt.wantURL) + } + if gotName != tt.wantServerName { + t.Errorf("ServerName = %q, want %q", gotName, tt.wantServerName) + } + }) + } +} + +func TestClient_ConnectedIPEmptyWhenNotConnected(t *testing.T) { + c := NewClient("rel://example.invalid:80", hmacTokenStore, "x", iface.DefaultMTU) + if got := c.ConnectedIP(); got.IsValid() { + t.Fatalf("ConnectedIP on disconnected client = %q, want zero", got) + } +} + +// staticAddr is a net.Addr that returns a fixed string. Used to verify +// ConnectedIP parses RemoteAddr correctly. +type staticAddr struct{ s string } + +func (a staticAddr) Network() string { return "tcp" } +func (a staticAddr) String() string { return a.s } + +type stubConn struct { + net.Conn + remote net.Addr +} + +func (s stubConn) RemoteAddr() net.Addr { return s.remote } + +func TestClient_ConnectedIPParsesRemoteAddr(t *testing.T) { + tests := []struct { + name string + s string + want string + }{ + {"hostport ipv4", "127.0.0.1:50301", "127.0.0.1"}, + {"hostport ipv6 bracketed", "[::1]:50301", "::1"}, + {"url with ipv4", "rel://127.0.0.1:50301", "127.0.0.1"}, + {"url with ipv6", "rels://[2001:db8::1]:443", "2001:db8::1"}, + {"fqdn url returns empty", "rel://relay.example.com:50301", ""}, + {"fqdn hostport returns empty", "relay.example.com:50301", ""}, + {"plain ipv4 no port", "10.0.0.1", "10.0.0.1"}, + {"empty", "", ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{relayConn: stubConn{remote: staticAddr{s: tt.s}}} + got := c.ConnectedIP() + var gotStr string + if got.IsValid() { + gotStr = got.String() + } + if gotStr != tt.want { + t.Errorf("ConnectedIP(%q) = %q, want %q", tt.s, gotStr, tt.want) + } + }) + } +} + +// freeAddr returns a 127.0.0.1 address with an OS-assigned port. The +// listener is closed before returning, so the port is briefly free for +// the caller to bind. Avoids hardcoded ports that can collide. +func freeAddr(t *testing.T) (string, int) { + t.Helper() + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("get free port: %s", err) + } + addr := l.Addr().(*net.TCPAddr) + _ = l.Close() + return addr.String(), addr.Port +} diff --git a/shared/relay/client/dialer/quic/quic.go b/shared/relay/client/dialer/quic/quic.go index 2d7b00a80..602803b19 100644 --- a/shared/relay/client/dialer/quic/quic.go +++ b/shared/relay/client/dialer/quic/quic.go @@ -23,7 +23,7 @@ func (d Dialer) Protocol() string { return Network } -func (d Dialer) Dial(ctx context.Context, address string) (net.Conn, error) { +func (d Dialer) Dial(ctx context.Context, address, serverName string) (net.Conn, error) { quicURL, err := prepareURL(address) if err != nil { return nil, err @@ -32,11 +32,14 @@ func (d Dialer) Dial(ctx context.Context, address string) (net.Conn, error) { // Get the base TLS config tlsClientConfig := quictls.ClientQUICTLSConfig() - // Set ServerName to hostname if not an IP address - host, _, splitErr := net.SplitHostPort(quicURL) - if splitErr == nil && net.ParseIP(host) == nil { - // It's a hostname, not an IP - modify directly - tlsClientConfig.ServerName = host + switch { + case serverName != "" && net.ParseIP(serverName) == nil: + tlsClientConfig.ServerName = serverName + default: + host, _, splitErr := net.SplitHostPort(quicURL) + if splitErr == nil && net.ParseIP(host) == nil { + tlsClientConfig.ServerName = host + } } quicConfig := &quic.Config{ diff --git a/shared/relay/client/dialer/race_dialer.go b/shared/relay/client/dialer/race_dialer.go index 34359d17e..15208b858 100644 --- a/shared/relay/client/dialer/race_dialer.go +++ b/shared/relay/client/dialer/race_dialer.go @@ -14,7 +14,9 @@ const ( ) type DialeFn interface { - Dial(ctx context.Context, address string) (net.Conn, error) + // Dial connects to address. serverName, when non-empty, overrides the TLS + // ServerName used for SNI/cert validation. Empty means derive from address. + Dial(ctx context.Context, address, serverName string) (net.Conn, error) Protocol() string } @@ -27,6 +29,7 @@ type dialResult struct { type RaceDial struct { log *log.Entry serverURL string + serverName string dialerFns []DialeFn connectionTimeout time.Duration } @@ -40,6 +43,16 @@ func NewRaceDial(log *log.Entry, connectionTimeout time.Duration, serverURL stri } } +// WithServerName sets a TLS SNI/cert validation override. Used when serverURL +// contains an IP literal but the cert is issued for a different hostname. +// +// Mutates the receiver and is not safe for concurrent reconfiguration; a +// RaceDial is intended to be constructed per dial and discarded. +func (r *RaceDial) WithServerName(serverName string) *RaceDial { + r.serverName = serverName + return r +} + func (r *RaceDial) Dial(ctx context.Context) (net.Conn, error) { connChan := make(chan dialResult, len(r.dialerFns)) winnerConn := make(chan net.Conn, 1) @@ -64,7 +77,7 @@ func (r *RaceDial) dial(dfn DialeFn, abortCtx context.Context, connChan chan dia defer cancel() r.log.Infof("dialing Relay server via %s", dfn.Protocol()) - conn, err := dfn.Dial(ctx, r.serverURL) + conn, err := dfn.Dial(ctx, r.serverURL, r.serverName) connChan <- dialResult{Conn: conn, Protocol: dfn.Protocol(), Err: err} } diff --git a/shared/relay/client/dialer/race_dialer_test.go b/shared/relay/client/dialer/race_dialer_test.go index aa18df578..a53edc00e 100644 --- a/shared/relay/client/dialer/race_dialer_test.go +++ b/shared/relay/client/dialer/race_dialer_test.go @@ -28,7 +28,7 @@ type MockDialer struct { protocolStr string } -func (m *MockDialer) Dial(ctx context.Context, address string) (net.Conn, error) { +func (m *MockDialer) Dial(ctx context.Context, address, _ string) (net.Conn, error) { return m.dialFunc(ctx, address) } diff --git a/shared/relay/client/dialer/ws/conn.go b/shared/relay/client/dialer/ws/conn.go index d5b719f51..9497fab89 100644 --- a/shared/relay/client/dialer/ws/conn.go +++ b/shared/relay/client/dialer/ws/conn.go @@ -12,14 +12,24 @@ import ( type Conn struct { ctx context.Context *websocket.Conn - remoteAddr WebsocketAddr + remoteAddr net.Addr } -func NewConn(wsConn *websocket.Conn, serverAddress string) net.Conn { +// NewConn builds a relay ws.Conn. underlying is the raw TCP/TLS conn captured +// from the http transport's DialContext; when set, RemoteAddr returns its +// peer address (an IP literal). When nil (e.g. wasm), RemoteAddr falls back +// to the dial-time URL. +func NewConn(wsConn *websocket.Conn, serverAddress string, underlying net.Conn) net.Conn { + var addr net.Addr = WebsocketAddr{serverAddress} + if underlying != nil { + if ra := underlying.RemoteAddr(); ra != nil { + addr = ra + } + } return &Conn{ ctx: context.Background(), Conn: wsConn, - remoteAddr: WebsocketAddr{serverAddress}, + remoteAddr: addr, } } diff --git a/shared/relay/client/dialer/ws/dialopts_generic.go b/shared/relay/client/dialer/ws/dialopts_generic.go index 9dfe698d0..8008d89d3 100644 --- a/shared/relay/client/dialer/ws/dialopts_generic.go +++ b/shared/relay/client/dialer/ws/dialopts_generic.go @@ -2,10 +2,14 @@ package ws -import "github.com/coder/websocket" +import ( + "net" -func createDialOptions() *websocket.DialOptions { + "github.com/coder/websocket" +) + +func createDialOptions(serverName string, underlyingOut *net.Conn) *websocket.DialOptions { return &websocket.DialOptions{ - HTTPClient: httpClientNbDialer(), + HTTPClient: httpClientNbDialer(serverName, underlyingOut), } } diff --git a/shared/relay/client/dialer/ws/dialopts_js.go b/shared/relay/client/dialer/ws/dialopts_js.go index 7eac27531..5b11fe765 100644 --- a/shared/relay/client/dialer/ws/dialopts_js.go +++ b/shared/relay/client/dialer/ws/dialopts_js.go @@ -2,9 +2,13 @@ package ws -import "github.com/coder/websocket" +import ( + "net" -func createDialOptions() *websocket.DialOptions { - // WASM version doesn't support HTTPClient + "github.com/coder/websocket" +) + +func createDialOptions(_ string, _ *net.Conn) *websocket.DialOptions { + // WASM version doesn't support HTTPClient or custom TLS config. return &websocket.DialOptions{} } diff --git a/shared/relay/client/dialer/ws/ws.go b/shared/relay/client/dialer/ws/ws.go index 37b189e05..301486514 100644 --- a/shared/relay/client/dialer/ws/ws.go +++ b/shared/relay/client/dialer/ws/ws.go @@ -26,13 +26,14 @@ func (d Dialer) Protocol() string { return "WS" } -func (d Dialer) Dial(ctx context.Context, address string) (net.Conn, error) { +func (d Dialer) Dial(ctx context.Context, address, serverName string) (net.Conn, error) { wsURL, err := prepareURL(address) if err != nil { return nil, err } - opts := createDialOptions() + var underlying net.Conn + opts := createDialOptions(serverName, &underlying) parsedURL, err := url.Parse(wsURL) if err != nil { @@ -52,7 +53,7 @@ func (d Dialer) Dial(ctx context.Context, address string) (net.Conn, error) { _ = resp.Body.Close() } - conn := NewConn(wsConn, address) + conn := NewConn(wsConn, address, underlying) return conn, nil } @@ -64,7 +65,10 @@ func prepareURL(address string) (string, error) { return strings.Replace(address, "rel", "ws", 1), nil } -func httpClientNbDialer() *http.Client { +// httpClientNbDialer builds the http client used by the websocket library. +// underlyingOut, when non-nil, is populated with the raw conn from the +// transport's DialContext so the caller can read its RemoteAddr. +func httpClientNbDialer(serverName string, underlyingOut *net.Conn) *http.Client { customDialer := nbnet.NewDialer() certPool, err := x509.SystemCertPool() @@ -75,10 +79,15 @@ func httpClientNbDialer() *http.Client { customTransport := &http.Transport{ DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - return customDialer.DialContext(ctx, network, addr) + c, err := customDialer.DialContext(ctx, network, addr) + if err == nil && underlyingOut != nil { + *underlyingOut = c + } + return c, err }, TLSClientConfig: &tls.Config{ - RootCAs: certPool, + RootCAs: certPool, + ServerName: serverName, }, } diff --git a/shared/relay/client/manager.go b/shared/relay/client/manager.go index 37104bfe7..3858b3c83 100644 --- a/shared/relay/client/manager.go +++ b/shared/relay/client/manager.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "net" + "net/netip" "reflect" "sync" "time" @@ -75,6 +76,9 @@ type Manager struct { mtu uint16 maxBackoffInterval time.Duration + + cleanupInterval time.Duration + keepUnusedServerTime time.Duration } // NewManager creates a new manager instance. @@ -95,6 +99,8 @@ func NewManager(ctx context.Context, serverURLs []string, peerID string, mtu uin }, relayClients: make(map[string]*RelayTrack), onDisconnectedListeners: make(map[string]*list.List), + cleanupInterval: relayCleanupInterval, + keepUnusedServerTime: keepUnusedServerTime, } for _, opt := range opts { opt(m) @@ -130,7 +136,10 @@ func (m *Manager) Serve() error { // OpenConn opens a connection to the given peer key. If the peer is on the same relay server, the connection will be // established via the relay server. If the peer is on a different relay server, the manager will establish a new // connection to the relay server. It returns back with a net.Conn what represent the remote peer connection. -func (m *Manager) OpenConn(ctx context.Context, serverAddress, peerKey string) (net.Conn, error) { +// +// serverIP, when valid and serverAddress is foreign, is used as a dial target if the FQDN-based dial fails. +// Ignored for the local home-server path. TLS verification still uses the FQDN via SNI. +func (m *Manager) OpenConn(ctx context.Context, serverAddress, peerKey string, serverIP netip.Addr) (net.Conn, error) { m.relayClientMu.RLock() defer m.relayClientMu.RUnlock() @@ -151,7 +160,7 @@ func (m *Manager) OpenConn(ctx context.Context, serverAddress, peerKey string) ( netConn, err = m.relayClient.OpenConn(ctx, peerKey) } else { log.Debugf("open peer connection via foreign server: %s", serverAddress) - netConn, err = m.openConnVia(ctx, serverAddress, peerKey) + netConn, err = m.openConnVia(ctx, serverAddress, peerKey, serverIP) } if err != nil { return nil, err @@ -203,16 +212,22 @@ func (m *Manager) AddCloseListener(serverAddress string, onClosedListener OnServ return nil } -// RelayInstanceAddress returns the address of the permanent relay server. It could change if the network connection is -// lost. This address will be sent to the target peer to choose the common relay server for the communication. -func (m *Manager) RelayInstanceAddress() (string, error) { +// RelayInstanceAddress returns the address and resolved IP of the permanent relay server. It could change if the +// network connection is lost. The address is sent to the target peer to choose the common relay server for the +// communication; the IP is sent alongside so remote peers can dial directly without their own DNS lookup. Both +// values are read under the same lock so they cannot diverge across a reconnection. +func (m *Manager) RelayInstanceAddress() (string, netip.Addr, error) { m.relayClientMu.RLock() defer m.relayClientMu.RUnlock() if m.relayClient == nil { - return "", ErrRelayClientNotConnected + return "", netip.Addr{}, ErrRelayClientNotConnected } - return m.relayClient.ServerInstanceURL() + addr, err := m.relayClient.ServerInstanceURL() + if err != nil { + return "", netip.Addr{}, err + } + return addr, m.relayClient.ConnectedIP(), nil } // ServerURLs returns the addresses of the relay servers. @@ -236,7 +251,7 @@ func (m *Manager) UpdateToken(token *relayAuth.Token) error { return m.tokenStore.UpdateToken(token) } -func (m *Manager) openConnVia(ctx context.Context, serverAddress, peerKey string) (net.Conn, error) { +func (m *Manager) openConnVia(ctx context.Context, serverAddress, peerKey string, serverIP netip.Addr) (net.Conn, error) { // check if already has a connection to the desired relay server m.relayClientsMutex.RLock() rt, ok := m.relayClients[serverAddress] @@ -271,7 +286,7 @@ func (m *Manager) openConnVia(ctx context.Context, serverAddress, peerKey string m.relayClients[serverAddress] = rt m.relayClientsMutex.Unlock() - relayClient := NewClient(serverAddress, m.tokenStore, m.peerID, m.mtu) + relayClient := NewClientWithServerIP(serverAddress, serverIP, m.tokenStore, m.peerID, m.mtu) err := relayClient.Connect(m.ctx) if err != nil { rt.err = err @@ -364,7 +379,7 @@ func (m *Manager) isForeignServer(address string) (bool, error) { } func (m *Manager) startCleanupLoop() { - ticker := time.NewTicker(relayCleanupInterval) + ticker := time.NewTicker(m.cleanupInterval) defer ticker.Stop() for { select { @@ -389,7 +404,7 @@ func (m *Manager) cleanUpUnusedRelays() { continue } - if time.Since(rt.created) <= keepUnusedServerTime { + if time.Since(rt.created) <= m.keepUnusedServerTime { rt.Unlock() continue } diff --git a/shared/relay/client/manager_serverip_test.go b/shared/relay/client/manager_serverip_test.go new file mode 100644 index 000000000..a354beade --- /dev/null +++ b/shared/relay/client/manager_serverip_test.go @@ -0,0 +1,144 @@ +package client + +import ( + "context" + "io" + "net/netip" + "testing" + "time" + + "github.com/netbirdio/netbird/client/iface" + "github.com/netbirdio/netbird/relay/server" +) + +// TestManager_ForeignRelayServerIP exercises the foreign-relay path +// end-to-end through Manager.OpenConn. Alice and Bob register on different +// relay servers; Alice dials Bob's foreign relay using an unresolvable +// FQDN. Without a server IP the dial fails; with Bob's advertised IP it +// recovers and a payload round-trips between the peers. +func TestManager_ForeignRelayServerIP(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + // Alice's home relay + homeCfg := server.ListenerConfig{Address: "127.0.0.1:52401"} + homeSrv, err := server.NewServer(newManagerTestServerConfig(homeCfg.Address)) + if err != nil { + t.Fatalf("create home server: %s", err) + } + homeErr := make(chan error, 1) + go func() { + if err := homeSrv.Listen(homeCfg); err != nil { + homeErr <- err + } + }() + t.Cleanup(func() { _ = homeSrv.Shutdown(context.Background()) }) + if err := waitForServerToStart(homeErr); err != nil { + t.Fatalf("home server: %s", err) + } + + // Bob's foreign relay + foreignCfg := server.ListenerConfig{Address: "127.0.0.1:52402"} + foreignSrv, err := server.NewServer(newManagerTestServerConfig(foreignCfg.Address)) + if err != nil { + t.Fatalf("create foreign server: %s", err) + } + foreignErr := make(chan error, 1) + go func() { + if err := foreignSrv.Listen(foreignCfg); err != nil { + foreignErr <- err + } + }() + t.Cleanup(func() { _ = foreignSrv.Shutdown(context.Background()) }) + if err := waitForServerToStart(foreignErr); err != nil { + t.Fatalf("foreign server: %s", err) + } + + mCtx, mCancel := context.WithCancel(ctx) + t.Cleanup(mCancel) + + mgrAlice := NewManager(mCtx, toURL(homeCfg), "alice", iface.DefaultMTU) + if err := mgrAlice.Serve(); err != nil { + t.Fatalf("alice manager serve: %s", err) + } + + mgrBob := NewManager(mCtx, toURL(foreignCfg), "bob", iface.DefaultMTU) + if err := mgrBob.Serve(); err != nil { + t.Fatalf("bob manager serve: %s", err) + } + + // Bob's real relay URL and the IP that would ride along in signal as relayServerIP. + bobRealAddr, bobAdvertisedIP, err := mgrBob.RelayInstanceAddress() + if err != nil { + t.Fatalf("bob relay address: %s", err) + } + if !bobAdvertisedIP.IsValid() { + t.Fatalf("expected valid RelayInstanceIP for bob, got zero") + } + + // .invalid is reserved (RFC 2606), so DNS resolution always fails. + const brokenFQDN = "rel://relay-bob-instance.invalid:52402" + if brokenFQDN == bobRealAddr { + t.Fatalf("broken FQDN must differ from bob's real address (%s)", bobRealAddr) + } + + t.Run("no server IP, dial fails", func(t *testing.T) { + dialCtx, dialCancel := context.WithTimeout(ctx, 5*time.Second) + defer dialCancel() + _, err := mgrAlice.OpenConn(dialCtx, brokenFQDN, "bob", netip.Addr{}) + if err == nil { + t.Fatalf("expected OpenConn to fail without server IP, got success") + } + }) + + t.Run("server IP recovers", func(t *testing.T) { + // Bob waits for Alice's incoming peer connection on his side. + bobSideCh := make(chan error, 1) + go func() { + conn, err := mgrBob.OpenConn(ctx, bobRealAddr, "alice", netip.Addr{}) + if err != nil { + bobSideCh <- err + return + } + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + bobSideCh <- err + return + } + if _, err := conn.Write(buf[:n]); err != nil { + bobSideCh <- err + return + } + bobSideCh <- nil + }() + + aliceConn, err := mgrAlice.OpenConn(ctx, brokenFQDN, "bob", bobAdvertisedIP) + if err != nil { + t.Fatalf("alice OpenConn with server IP: %s", err) + } + t.Cleanup(func() { _ = aliceConn.Close() }) + + payload := []byte("alice-to-bob") + if _, err := aliceConn.Write(payload); err != nil { + t.Fatalf("alice write: %s", err) + } + + buf := make([]byte, len(payload)) + if _, err := io.ReadFull(aliceConn, buf); err != nil { + t.Fatalf("alice read echo: %s", err) + } + if string(buf) != string(payload) { + t.Fatalf("echo mismatch: got %q want %q", buf, payload) + } + + select { + case err := <-bobSideCh: + if err != nil { + t.Fatalf("bob side: %s", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for bob side") + } + }) +} diff --git a/shared/relay/client/manager_test.go b/shared/relay/client/manager_test.go index 5bbcad886..9e964f688 100644 --- a/shared/relay/client/manager_test.go +++ b/shared/relay/client/manager_test.go @@ -3,6 +3,7 @@ package client import ( "context" "fmt" + "net/netip" "testing" "time" @@ -101,15 +102,15 @@ func TestForeignConn(t *testing.T) { if err := clientBob.Serve(); err != nil { t.Fatalf("failed to serve manager: %s", err) } - bobsSrvAddr, err := clientBob.RelayInstanceAddress() + bobsSrvAddr, _, err := clientBob.RelayInstanceAddress() if err != nil { t.Fatalf("failed to get relay address: %s", err) } - connAliceToBob, err := clientAlice.OpenConn(ctx, bobsSrvAddr, "bob") + connAliceToBob, err := clientAlice.OpenConn(ctx, bobsSrvAddr, "bob", netip.Addr{}) if err != nil { t.Fatalf("failed to bind channel: %s", err) } - connBobToAlice, err := clientBob.OpenConn(ctx, bobsSrvAddr, "alice") + connBobToAlice, err := clientBob.OpenConn(ctx, bobsSrvAddr, "alice", netip.Addr{}) if err != nil { t.Fatalf("failed to bind channel: %s", err) } @@ -209,7 +210,7 @@ func TestForeginConnClose(t *testing.T) { if err != nil { t.Fatalf("failed to serve manager: %s", err) } - conn, err := mgr.OpenConn(ctx, toURL(srvCfg2)[0], "bob") + conn, err := mgr.OpenConn(ctx, toURL(srvCfg2)[0], "bob", netip.Addr{}) if err != nil { t.Fatalf("failed to bind channel: %s", err) } @@ -301,7 +302,7 @@ func TestForeignAutoClose(t *testing.T) { } t.Log("open connection to another peer") - if _, err = mgr.OpenConn(ctx, foreignServerURL, "anotherpeer"); err == nil { + if _, err = mgr.OpenConn(ctx, foreignServerURL, "anotherpeer", netip.Addr{}); err == nil { t.Fatalf("should have failed to open connection to another peer") } @@ -367,11 +368,11 @@ func TestAutoReconnect(t *testing.T) { if err != nil { t.Fatalf("failed to serve manager: %s", err) } - ra, err := clientAlice.RelayInstanceAddress() + ra, _, err := clientAlice.RelayInstanceAddress() if err != nil { t.Errorf("failed to get relay address: %s", err) } - conn, err := clientAlice.OpenConn(ctx, ra, "bob") + conn, err := clientAlice.OpenConn(ctx, ra, "bob", netip.Addr{}) if err != nil { t.Errorf("failed to bind channel: %s", err) } @@ -391,7 +392,7 @@ func TestAutoReconnect(t *testing.T) { } log.Infof("reopent the connection") - _, err = clientAlice.OpenConn(ctx, ra, "bob") + _, err = clientAlice.OpenConn(ctx, ra, "bob", netip.Addr{}) if err != nil { t.Errorf("failed to open channel: %s", err) } @@ -453,7 +454,7 @@ func TestNotifierDoubleAdd(t *testing.T) { t.Fatalf("failed to serve manager: %s", err) } - conn1, err := clientAlice.OpenConn(ctx, clientAlice.ServerURLs()[0], "bob") + conn1, err := clientAlice.OpenConn(ctx, clientAlice.ServerURLs()[0], "bob", netip.Addr{}) if err != nil { t.Fatalf("failed to bind channel: %s", err) } diff --git a/shared/signal/client/client.go b/shared/signal/client/client.go index 5347c80e9..9dc6ccd37 100644 --- a/shared/signal/client/client.go +++ b/shared/signal/client/client.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "net/netip" "strings" "github.com/netbirdio/netbird/shared/signal/proto" @@ -14,17 +15,17 @@ import ( // A set of tools to exchange connection details (Wireguard endpoints) with the remote peer. -// Status is the status of the client -type Status string - -const StreamConnected Status = "Connected" -const StreamDisconnected Status = "Disconnected" - const ( + StreamConnected Status = "Connected" + StreamDisconnected Status = "Disconnected" + // DirectCheck indicates support to direct mode checks DirectCheck uint32 = 1 ) +// Status is the status of the client +type Status string + type Client interface { io.Closer StreamConnected() bool @@ -38,6 +39,24 @@ type Client interface { SetOnReconnectedListener(func()) } +// Credential is an instance of a GrpcClient's Credential +type Credential struct { + UFrag string + Pwd string +} + +// CredentialPayload bundles the fields of a signal Body for MarshalCredential. +type CredentialPayload struct { + Type proto.Body_Type + WgListenPort int + Credential *Credential + RosenpassPubKey []byte + RosenpassAddr string + RelaySrvAddress string + RelaySrvIP netip.Addr + SessionID []byte +} + // UnMarshalCredential parses the credentials from the message and returns a Credential instance func UnMarshalCredential(msg *proto.Message) (*Credential, error) { @@ -52,27 +71,27 @@ func UnMarshalCredential(msg *proto.Message) (*Credential, error) { } // MarshalCredential marshal a Credential instance and returns a Message object -func MarshalCredential(myKey wgtypes.Key, myPort int, remoteKey string, credential *Credential, t proto.Body_Type, rosenpassPubKey []byte, rosenpassAddr string, relaySrvAddress string, sessionID []byte) (*proto.Message, error) { +func MarshalCredential(myKey wgtypes.Key, remoteKey string, p CredentialPayload) (*proto.Message, error) { + body := &proto.Body{ + Type: p.Type, + Payload: fmt.Sprintf("%s:%s", p.Credential.UFrag, p.Credential.Pwd), + WgListenPort: uint32(p.WgListenPort), + NetBirdVersion: version.NetbirdVersion(), + RosenpassConfig: &proto.RosenpassConfig{ + RosenpassPubKey: p.RosenpassPubKey, + RosenpassServerAddr: p.RosenpassAddr, + }, + SessionId: p.SessionID, + } + if p.RelaySrvAddress != "" { + body.RelayServerAddress = &p.RelaySrvAddress + } + if p.RelaySrvIP.IsValid() { + body.RelayServerIP = p.RelaySrvIP.Unmap().AsSlice() + } return &proto.Message{ Key: myKey.PublicKey().String(), RemoteKey: remoteKey, - Body: &proto.Body{ - Type: t, - Payload: fmt.Sprintf("%s:%s", credential.UFrag, credential.Pwd), - WgListenPort: uint32(myPort), - NetBirdVersion: version.NetbirdVersion(), - RosenpassConfig: &proto.RosenpassConfig{ - RosenpassPubKey: rosenpassPubKey, - RosenpassServerAddr: rosenpassAddr, - }, - RelayServerAddress: relaySrvAddress, - SessionId: sessionID, - }, + Body: body, }, nil } - -// Credential is an instance of a GrpcClient's Credential -type Credential struct { - UFrag string - Pwd string -} diff --git a/shared/signal/proto/signalexchange.pb.go b/shared/signal/proto/signalexchange.pb.go index d9c61a846..0c80fb489 100644 --- a/shared/signal/proto/signalexchange.pb.go +++ b/shared/signal/proto/signalexchange.pb.go @@ -229,8 +229,13 @@ type Body struct { // RosenpassConfig is a Rosenpass config of the remote peer our peer tries to connect to RosenpassConfig *RosenpassConfig `protobuf:"bytes,7,opt,name=rosenpassConfig,proto3" json:"rosenpassConfig,omitempty"` // relayServerAddress is url of the relay server - RelayServerAddress string `protobuf:"bytes,8,opt,name=relayServerAddress,proto3" json:"relayServerAddress,omitempty"` - SessionId []byte `protobuf:"bytes,10,opt,name=sessionId,proto3,oneof" json:"sessionId,omitempty"` + RelayServerAddress *string `protobuf:"bytes,8,opt,name=relayServerAddress,proto3,oneof" json:"relayServerAddress,omitempty"` + SessionId []byte `protobuf:"bytes,10,opt,name=sessionId,proto3,oneof" json:"sessionId,omitempty"` + // relayServerIP is the IP the sender is connected to on its relay server, + // encoded as 4 bytes (IPv4) or 16 bytes (IPv6). Receivers may use it as a + // fallback dial target when DNS resolution of relayServerAddress fails. + // SNI/TLS verification still uses relayServerAddress. + RelayServerIP []byte `protobuf:"bytes,11,opt,name=relayServerIP,proto3,oneof" json:"relayServerIP,omitempty"` } func (x *Body) Reset() { @@ -315,8 +320,8 @@ func (x *Body) GetRosenpassConfig() *RosenpassConfig { } func (x *Body) GetRelayServerAddress() string { - if x != nil { - return x.RelayServerAddress + if x != nil && x.RelayServerAddress != nil { + return *x.RelayServerAddress } return "" } @@ -328,6 +333,13 @@ func (x *Body) GetSessionId() []byte { return nil } +func (x *Body) GetRelayServerIP() []byte { + if x != nil { + return x.RelayServerIP + } + return nil +} + // Mode indicates a connection mode type Mode struct { state protoimpl.MessageState @@ -451,7 +463,7 @@ var file_signalexchange_proto_rawDesc = []byte{ 0x52, 0x09, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0xe4, 0x03, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2d, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0xc3, 0x04, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, @@ -471,40 +483,46 @@ var file_signalexchange_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x33, 0x0a, 0x12, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x22, 0x43, 0x0a, 0x04, 0x54, 0x79, 0x70, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x09, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, + 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x29, + 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x50, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x02, 0x52, 0x0d, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x49, 0x50, 0x88, 0x01, 0x01, 0x22, 0x43, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x46, 0x46, 0x45, 0x52, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4e, 0x53, 0x57, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4d, 0x4f, 0x44, 0x45, 0x10, - 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x47, 0x4f, 0x5f, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x05, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x2e, 0x0a, 0x04, - 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x06, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x88, 0x01, - 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x22, 0x6d, 0x0a, 0x0f, - 0x52, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x28, 0x0a, 0x0f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x75, 0x62, 0x4b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, - 0x61, 0x73, 0x73, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, - 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x32, 0xb9, 0x01, 0x0a, 0x0e, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x4c, - 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x20, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, - 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x20, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x20, 0x2e, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, - 0x20, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x47, 0x4f, 0x5f, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x05, 0x42, 0x15, + 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x49, 0x50, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x2e, 0x0a, 0x04, 0x4d, + 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x06, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x88, 0x01, 0x01, + 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x22, 0x6d, 0x0a, 0x0f, 0x52, + 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, + 0x0a, 0x0f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x75, 0x62, 0x4b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, + 0x73, 0x73, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, + 0x6e, 0x70, 0x61, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x32, 0xb9, 0x01, 0x0a, 0x0e, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x4c, 0x0a, + 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x20, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x20, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, + 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x20, 0x2e, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x20, + 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/shared/signal/proto/signalexchange.proto b/shared/signal/proto/signalexchange.proto index 0a33ad78b..96a4001e3 100644 --- a/shared/signal/proto/signalexchange.proto +++ b/shared/signal/proto/signalexchange.proto @@ -63,9 +63,17 @@ message Body { RosenpassConfig rosenpassConfig = 7; // relayServerAddress is url of the relay server - string relayServerAddress = 8; + optional string relayServerAddress = 8; + + reserved 9; optional bytes sessionId = 10; + + // relayServerIP is the IP the sender is connected to on its relay server, + // encoded as 4 bytes (IPv4) or 16 bytes (IPv6). Receivers may use it as a + // fallback dial target when DNS resolution of relayServerAddress fails. + // SNI/TLS verification still uses relayServerAddress. + optional bytes relayServerIP = 11; } // Mode indicates a connection mode From 6262b0d841a5a4c1bd758d45332a6dba51cb09dd Mon Sep 17 00:00:00 2001 From: Bethuel Mmbaga Date: Mon, 4 May 2026 12:47:13 +0300 Subject: [PATCH 369/374] [management] Track pending approval in peer event metadata (#6040) --- management/server/peer.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/management/server/peer.go b/management/server/peer.go index d1c52002e..25c6ecd8c 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -818,6 +818,9 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe if !addedByUser { opEvent.Meta["setup_key_name"] = peerAddConfig.SetupKeyName } + if newPeer.Status != nil && newPeer.Status.RequiresApproval { + opEvent.Meta["pending_approval"] = true + } if !temporary { am.StoreEvent(ctx, opEvent.InitiatorID, opEvent.TargetID, opEvent.AccountID, opEvent.Activity, opEvent.Meta) From a21f6ecb0a5d7ba45b4bf570a7af62ba1f66447d Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 4 May 2026 11:59:01 +0200 Subject: [PATCH 370/374] [client] release Status.mux before invoking notifier callbacks (#6039) The Status recorder used to fire notifier callbacks while holding d.mux: - notifyPeerListChanged / notifyPeerStateChangeListeners ran from inside the locked section of every Update*/AddPeerStateRoute/etc. - notifyAddressChanged ran from UpdateLocalPeerState and CleanLocalPeerState while d.mux was held. - onConnectionChanged was registered with a defer above defer d.mux.Unlock, so it executed before the mutex was released in the Mark*Connected/ Disconnected helpers. - notifyPeerStateChangeListeners did a blocking channel send under d.mux, so a slow subscriber stalled every other d.mux holder. A listener that re-enters the recorder (e.g. calls GetFullStatus from within a callback) deadlocks against d.mux, and any callback that takes longer than expected stalls every other state query for its duration. Capture the values needed for notification under the lock, release d.mux, then call the notifier. Build per-peer router-state snapshots inside the lock and dispatch them via dispatchRouterPeers afterwards. The router-peer channel send stays blocking, but now happens outside d.mux so a slow consumer cannot stall any other d.mux holder, and no peer state transitions are silently dropped. The notifier itself is unchanged: its internal state was already protected by its own locks, and the field d.notifier is set once in NewRecorder and never reassigned, so reading it without d.mux is safe. Also fix a pre-existing race in Test_notifier_RemoveListener / Test_notifier_SetListener: setListener spawns a goroutine that writes listener.peers, but the tests read listener.peers without waiting for it. --- client/internal/peer/notifier_test.go | 17 ++ client/internal/peer/status.go | 229 +++++++++++++++++--------- 2 files changed, 170 insertions(+), 76 deletions(-) diff --git a/client/internal/peer/notifier_test.go b/client/internal/peer/notifier_test.go index bbdc00e13..0b7722b0c 100644 --- a/client/internal/peer/notifier_test.go +++ b/client/internal/peer/notifier_test.go @@ -8,6 +8,7 @@ import ( type mocListener struct { lastState int wg sync.WaitGroup + peersWg sync.WaitGroup peers int } @@ -33,6 +34,7 @@ func (l *mocListener) OnAddressChanged(host, addr string) { } func (l *mocListener) OnPeersListChanged(size int) { l.peers = size + l.peersWg.Done() } func (l *mocListener) setWaiter() { @@ -43,6 +45,14 @@ func (l *mocListener) wait() { l.wg.Wait() } +func (l *mocListener) setPeersWaiter() { + l.peersWg.Add(1) +} + +func (l *mocListener) waitPeers() { + l.peersWg.Wait() +} + func Test_notifier_serverState(t *testing.T) { type scenario struct { @@ -72,11 +82,13 @@ func Test_notifier_serverState(t *testing.T) { func Test_notifier_SetListener(t *testing.T) { listener := &mocListener{} listener.setWaiter() + listener.setPeersWaiter() n := newNotifier() n.lastNotification = stateConnecting n.setListener(listener) listener.wait() + listener.waitPeers() if listener.lastState != n.lastNotification { t.Errorf("invalid state: %d, expected: %d", listener.lastState, n.lastNotification) } @@ -85,9 +97,14 @@ func Test_notifier_SetListener(t *testing.T) { func Test_notifier_RemoveListener(t *testing.T) { listener := &mocListener{} listener.setWaiter() + listener.setPeersWaiter() n := newNotifier() n.lastNotification = stateConnecting n.setListener(listener) + // setListener replays cached state on a goroutine; wait for both the state + // and peers callbacks to finish so we don't race on listener.peers. + listener.wait() + listener.waitPeers() n.removeListener() n.peerListChanged(1) diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index 7bd19b0e1..e8e61f660 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -320,10 +320,10 @@ func (d *Status) RemovePeer(peerPubKey string) error { // UpdatePeerState updates peer status func (d *Status) UpdatePeerState(receivedState State) error { d.mux.Lock() - defer d.mux.Unlock() peerState, ok := d.peers[receivedState.PubKey] if !ok { + d.mux.Unlock() return errors.New("peer doesn't exist") } @@ -343,23 +343,29 @@ func (d *Status) UpdatePeerState(receivedState State) error { d.peers[receivedState.PubKey] = peerState - if hasConnStatusChanged(oldState, receivedState.ConnStatus) { - d.notifyPeerListChanged() - } - + notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) // when we close the connection we will not notify the router manager - if receivedState.ConnStatus == StatusIdle { - d.notifyPeerStateChangeListeners(receivedState.PubKey) + notifyRouter := receivedState.ConnStatus == StatusIdle + routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) + numPeers := d.numOfPeers() + + d.mux.Unlock() + + if notifyList { + d.notifier.peerListChanged(numPeers) + } + if notifyRouter { + d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } return nil } func (d *Status) AddPeerStateRoute(peer string, route string, resourceId route.ResID) error { d.mux.Lock() - defer d.mux.Unlock() peerState, ok := d.peers[peer] if !ok { + d.mux.Unlock() return errors.New("peer doesn't exist") } @@ -371,17 +377,20 @@ func (d *Status) AddPeerStateRoute(peer string, route string, resourceId route.R d.routeIDLookup.AddRemoteRouteID(resourceId, pref) } + numPeers := d.numOfPeers() + d.mux.Unlock() + // todo: consider to make sense of this notification or not - d.notifyPeerListChanged() + d.notifier.peerListChanged(numPeers) return nil } func (d *Status) RemovePeerStateRoute(peer string, route string) error { d.mux.Lock() - defer d.mux.Unlock() peerState, ok := d.peers[peer] if !ok { + d.mux.Unlock() return errors.New("peer doesn't exist") } @@ -393,8 +402,11 @@ func (d *Status) RemovePeerStateRoute(peer string, route string) error { d.routeIDLookup.RemoveRemoteRouteID(pref) } + numPeers := d.numOfPeers() + d.mux.Unlock() + // todo: consider to make sense of this notification or not - d.notifyPeerListChanged() + d.notifier.peerListChanged(numPeers) return nil } @@ -410,10 +422,10 @@ func (d *Status) CheckRoutes(ip netip.Addr) ([]byte, bool) { func (d *Status) UpdatePeerICEState(receivedState State) error { d.mux.Lock() - defer d.mux.Unlock() peerState, ok := d.peers[receivedState.PubKey] if !ok { + d.mux.Unlock() return errors.New("peer doesn't exist") } @@ -431,22 +443,28 @@ func (d *Status) UpdatePeerICEState(receivedState State) error { d.peers[receivedState.PubKey] = peerState - if hasConnStatusChanged(oldState, receivedState.ConnStatus) { - d.notifyPeerListChanged() - } + notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) + numPeers := d.numOfPeers() - if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { - d.notifyPeerStateChangeListeners(receivedState.PubKey) + d.mux.Unlock() + + if notifyList { + d.notifier.peerListChanged(numPeers) + } + if notifyRouter { + d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } return nil } func (d *Status) UpdatePeerRelayedState(receivedState State) error { d.mux.Lock() - defer d.mux.Unlock() peerState, ok := d.peers[receivedState.PubKey] if !ok { + d.mux.Unlock() return errors.New("peer doesn't exist") } @@ -461,22 +479,28 @@ func (d *Status) UpdatePeerRelayedState(receivedState State) error { d.peers[receivedState.PubKey] = peerState - if hasConnStatusChanged(oldState, receivedState.ConnStatus) { - d.notifyPeerListChanged() - } + notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) + numPeers := d.numOfPeers() - if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { - d.notifyPeerStateChangeListeners(receivedState.PubKey) + d.mux.Unlock() + + if notifyList { + d.notifier.peerListChanged(numPeers) + } + if notifyRouter { + d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } return nil } func (d *Status) UpdatePeerRelayedStateToDisconnected(receivedState State) error { d.mux.Lock() - defer d.mux.Unlock() peerState, ok := d.peers[receivedState.PubKey] if !ok { + d.mux.Unlock() return errors.New("peer doesn't exist") } @@ -490,22 +514,28 @@ func (d *Status) UpdatePeerRelayedStateToDisconnected(receivedState State) error d.peers[receivedState.PubKey] = peerState - if hasConnStatusChanged(oldState, receivedState.ConnStatus) { - d.notifyPeerListChanged() - } + notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) + numPeers := d.numOfPeers() - if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { - d.notifyPeerStateChangeListeners(receivedState.PubKey) + d.mux.Unlock() + + if notifyList { + d.notifier.peerListChanged(numPeers) + } + if notifyRouter { + d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } return nil } func (d *Status) UpdatePeerICEStateToDisconnected(receivedState State) error { d.mux.Lock() - defer d.mux.Unlock() peerState, ok := d.peers[receivedState.PubKey] if !ok { + d.mux.Unlock() return errors.New("peer doesn't exist") } @@ -522,12 +552,18 @@ func (d *Status) UpdatePeerICEStateToDisconnected(receivedState State) error { d.peers[receivedState.PubKey] = peerState - if hasConnStatusChanged(oldState, receivedState.ConnStatus) { - d.notifyPeerListChanged() - } + notifyList := hasConnStatusChanged(oldState, receivedState.ConnStatus) + notifyRouter := hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) + routerSnapshot := d.snapshotRouterPeersLocked(receivedState.PubKey, notifyRouter) + numPeers := d.numOfPeers() - if hasStatusOrRelayedChange(oldState, receivedState.ConnStatus, oldIsRelayed, receivedState.Relayed) { - d.notifyPeerStateChangeListeners(receivedState.PubKey) + d.mux.Unlock() + + if notifyList { + d.notifier.peerListChanged(numPeers) + } + if notifyRouter { + d.dispatchRouterPeers(receivedState.PubKey, routerSnapshot) } return nil } @@ -594,17 +630,33 @@ func (d *Status) UpdatePeerSSHHostKey(peerPubKey string, sshHostKey []byte) erro // FinishPeerListModifications this event invoke the notification func (d *Status) FinishPeerListModifications() { d.mux.Lock() - defer d.mux.Unlock() if !d.peerListChangedForNotification { + d.mux.Unlock() return } d.peerListChangedForNotification = false - d.notifyPeerListChanged() + numPeers := d.numOfPeers() + // snapshot per-peer router state to deliver after the lock is released + type routerDispatch struct { + peerID string + snapshot map[string]RouterState + } + dispatches := make([]routerDispatch, 0, len(d.peers)) for key := range d.peers { - d.notifyPeerStateChangeListeners(key) + snapshot := d.snapshotRouterPeersLocked(key, true) + if snapshot != nil { + dispatches = append(dispatches, routerDispatch{peerID: key, snapshot: snapshot}) + } + } + + d.mux.Unlock() + + d.notifier.peerListChanged(numPeers) + for _, rd := range dispatches { + d.dispatchRouterPeers(rd.peerID, rd.snapshot) } } @@ -655,10 +707,12 @@ func (d *Status) GetLocalPeerState() LocalPeerState { // UpdateLocalPeerState updates local peer status func (d *Status) UpdateLocalPeerState(localPeerState LocalPeerState) { d.mux.Lock() - defer d.mux.Unlock() - d.localPeer = localPeerState - d.notifyAddressChanged() + fqdn := d.localPeer.FQDN + ip := d.localPeer.IP + d.mux.Unlock() + + d.notifier.localAddressChanged(fqdn, ip) } // AddLocalPeerStateRoute adds a route to the local peer state @@ -721,30 +775,36 @@ func (d *Status) CleanLocalPeerStateRoutes() { // CleanLocalPeerState cleans local peer status func (d *Status) CleanLocalPeerState() { d.mux.Lock() - defer d.mux.Unlock() - d.localPeer = LocalPeerState{} - d.notifyAddressChanged() + fqdn := d.localPeer.FQDN + ip := d.localPeer.IP + d.mux.Unlock() + + d.notifier.localAddressChanged(fqdn, ip) } // MarkManagementDisconnected sets ManagementState to disconnected func (d *Status) MarkManagementDisconnected(err error) { d.mux.Lock() - defer d.mux.Unlock() - defer d.onConnectionChanged() - d.managementState = false d.managementError = err + mgm := d.managementState + sig := d.signalState + d.mux.Unlock() + + d.notifier.updateServerStates(mgm, sig) } // MarkManagementConnected sets ManagementState to connected func (d *Status) MarkManagementConnected() { d.mux.Lock() - defer d.mux.Unlock() - defer d.onConnectionChanged() - d.managementState = true d.managementError = nil + mgm := d.managementState + sig := d.signalState + d.mux.Unlock() + + d.notifier.updateServerStates(mgm, sig) } // UpdateSignalAddress update the address of the signal server @@ -778,21 +838,25 @@ func (d *Status) UpdateLazyConnection(enabled bool) { // MarkSignalDisconnected sets SignalState to disconnected func (d *Status) MarkSignalDisconnected(err error) { d.mux.Lock() - defer d.mux.Unlock() - defer d.onConnectionChanged() - d.signalState = false d.signalError = err + mgm := d.managementState + sig := d.signalState + d.mux.Unlock() + + d.notifier.updateServerStates(mgm, sig) } // MarkSignalConnected sets SignalState to connected func (d *Status) MarkSignalConnected() { d.mux.Lock() - defer d.mux.Unlock() - defer d.onConnectionChanged() - d.signalState = true d.signalError = nil + mgm := d.managementState + sig := d.signalState + d.mux.Unlock() + + d.notifier.updateServerStates(mgm, sig) } func (d *Status) UpdateRelayStates(relayResults []relay.ProbeResult) { @@ -1012,18 +1076,17 @@ func (d *Status) RemoveConnectionListener() { d.notifier.removeListener() } -func (d *Status) onConnectionChanged() { - d.notifier.updateServerStates(d.managementState, d.signalState) -} - -// notifyPeerStateChangeListeners notifies route manager about the change in peer state -func (d *Status) notifyPeerStateChangeListeners(peerID string) { - subs, ok := d.changeNotify[peerID] - if !ok { - return +// snapshotRouterPeersLocked builds the RouterState map for a peer's subscribers. +// Caller MUST hold d.mux. Returns nil when there are no subscribers for peerID +// or when notify is false. The snapshot is consumed later by dispatchRouterPeers +// outside the lock so the channel send cannot stall any d.mux holder. +func (d *Status) snapshotRouterPeersLocked(peerID string, notify bool) map[string]RouterState { + if !notify { + return nil + } + if _, ok := d.changeNotify[peerID]; !ok { + return nil } - - // collect the relevant data for router peers routerPeers := make(map[string]RouterState, len(d.changeNotify)) for pid := range d.changeNotify { s, ok := d.peers[pid] @@ -1031,13 +1094,35 @@ func (d *Status) notifyPeerStateChangeListeners(peerID string) { log.Warnf("router peer not found in peers list: %s", pid) continue } - routerPeers[pid] = RouterState{ Status: s.ConnStatus, Relayed: s.Relayed, Latency: s.Latency, } } + return routerPeers +} + +// dispatchRouterPeers delivers a previously snapshotted router-state map to +// the peer's subscribers. Caller MUST NOT hold d.mux. The method takes a +// fresh, short read of d.changeNotify under the lock to grab subscriber +// channels, then sends outside the lock so a slow consumer cannot block other +// d.mux holders. The send itself stays blocking (only short-circuited by the +// subscriber's context) so peer state transitions are not silently dropped. +func (d *Status) dispatchRouterPeers(peerID string, routerPeers map[string]RouterState) { + if routerPeers == nil { + return + } + + d.mux.Lock() + subsMap, ok := d.changeNotify[peerID] + subs := make([]*StatusChangeSubscription, 0, len(subsMap)) + if ok { + for _, sub := range subsMap { + subs = append(subs, sub) + } + } + d.mux.Unlock() for _, sub := range subs { select { @@ -1047,14 +1132,6 @@ func (d *Status) notifyPeerStateChangeListeners(peerID string) { } } -func (d *Status) notifyPeerListChanged() { - d.notifier.peerListChanged(d.numOfPeers()) -} - -func (d *Status) notifyAddressChanged() { - d.notifier.localAddressChanged(d.localPeer.FQDN, d.localPeer.IP) -} - func (d *Status) numOfPeers() int { return len(d.peers) + len(d.offlinePeers) } From a547fc74edd71268767d258a6ebe8513fa65f467 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Mon, 4 May 2026 11:59:25 +0200 Subject: [PATCH 371/374] [client] Use ctx.Err() instead of gRPC codes.Canceled to detect shutdown (#6019) Detecting shutdown by inspecting the gRPC status code conflates a local context cancellation with a server- or proxy-sent codes.Canceled. When the latter occurs (e.g. an intermediary proxy resets the stream), the retry loop silently terminates and the client never reconnects. Switch to ctx.Err() in the signal Receive loop and management Sync/Job handlers, and stop matching gRPC Canceled/DeadlineExceeded in the flow client's isContextDone helper. With this change, a server-sent Canceled is treated as a transient error and the backoff retry loop continues. --- flow/client/client.go | 15 +++++------- shared/management/client/grpc.go | 39 ++++++++++++-------------------- shared/signal/client/grpc.go | 2 +- 3 files changed, 21 insertions(+), 35 deletions(-) diff --git a/flow/client/client.go b/flow/client/client.go index 8ad637974..180a4b441 100644 --- a/flow/client/client.go +++ b/flow/client/client.go @@ -13,11 +13,9 @@ import ( "github.com/cenkalti/backoff/v4" log "github.com/sirupsen/logrus" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/status" nbgrpc "github.com/netbirdio/netbird/client/grpc" "github.com/netbirdio/netbird/flow/proto" @@ -301,12 +299,11 @@ func defaultBackoff(ctx context.Context, interval time.Duration) backoff.BackOff }, ctx) } +// isContextDone reports whether the local context has been canceled or has +// exceeded its deadline. It deliberately does not inspect gRPC status codes: +// a server- or proxy-sent codes.Canceled / codes.DeadlineExceeded must not +// short-circuit our retry loop, since retrying is the correct response when +// the local context is still alive. func isContextDone(err error) bool { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return true - } - if s, ok := status.FromError(err); ok { - return s.Code() == codes.Canceled || s.Code() == codes.DeadlineExceeded - } - return false + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 2a51a777d..80625fe06 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -246,27 +246,23 @@ func (c *GrpcClient) handleJobStream( for { jobReq, err := c.receiveJobRequest(ctx, stream, serverPubKey) if err != nil { + if ctx.Err() != nil { + log.Debugf("job stream context has been canceled, this usually indicates shutdown") + return nil + } if s, ok := gstatus.FromError(err); ok { switch s.Code() { case codes.PermissionDenied: c.notifyDisconnected(err) return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer - case codes.Canceled: - log.Debugf("job stream context has been canceled, this usually indicates shutdown") - return err case codes.Unimplemented: log.Warn("Job feature is not supported by the current management server version. " + "Please update the management service to use this feature.") return nil - default: - log.Warnf("job stream disconnected, will retry silently. Reason: %v", err) - return err } - } else { - // non-gRPC error - log.Warnf("job stream disconnected, will retry silently. Reason: %v", err) - return err } + log.Warnf("job stream disconnected, will retry silently. Reason: %v", err) + return err } if jobReq == nil || len(jobReq.ID) == 0 { @@ -381,22 +377,15 @@ func (c *GrpcClient) handleSyncStream(ctx context.Context, serverPubKey wgtypes. err = c.receiveUpdatesEvents(stream, serverPubKey, msgHandler) if err != nil { c.notifyDisconnected(err) - if s, ok := gstatus.FromError(err); ok { - switch s.Code() { - case codes.PermissionDenied: - return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer - case codes.Canceled: - log.Debugf("management connection context has been canceled, this usually indicates shutdown") - return nil - default: - log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) - return err - } - } else { - // non-gRPC error - log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) - return err + if ctx.Err() != nil { + log.Debugf("management connection context has been canceled, this usually indicates shutdown") + return nil } + if s, ok := gstatus.FromError(err); ok && s.Code() == codes.PermissionDenied { + return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer + } + log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + return err } return nil diff --git a/shared/signal/client/grpc.go b/shared/signal/client/grpc.go index d0f598dd7..b245b2296 100644 --- a/shared/signal/client/grpc.go +++ b/shared/signal/client/grpc.go @@ -167,7 +167,7 @@ func (c *GrpcClient) Receive(ctx context.Context, msgHandler func(msg *proto.Mes // start receiving messages from the Signal stream (from other peers through signal) err = c.receive(stream) if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.Canceled { + if ctx.Err() != nil { log.Debugf("signal connection context has been canceled, this usually indicates shutdown") return nil } From 4268a5cfb7046bf713feac4b862539d20769d944 Mon Sep 17 00:00:00 2001 From: Lauri Tirkkonen Date: Tue, 5 May 2026 01:24:52 +0900 Subject: [PATCH 372/374] [client] Use atomic write/rename pattern for ssh config --- client/ssh/config/manager.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/client/ssh/config/manager.go b/client/ssh/config/manager.go index 6e584b2c3..5d69fd35c 100644 --- a/client/ssh/config/manager.go +++ b/client/ssh/config/manager.go @@ -224,15 +224,20 @@ func (m *Manager) buildHostPatterns(peer PeerSSHInfo) []string { func (m *Manager) writeSSHConfig(sshConfig string) error { sshConfigPath := filepath.Join(m.sshConfigDir, m.sshConfigFile) + sshConfigPathTmp := sshConfigPath + ".tmp" if err := os.MkdirAll(m.sshConfigDir, 0755); err != nil { return fmt.Errorf("create SSH config directory %s: %w", m.sshConfigDir, err) } - if err := writeFileWithTimeout(sshConfigPath, []byte(sshConfig), 0644); err != nil { + if err := writeFileWithTimeout(sshConfigPathTmp, []byte(sshConfig), 0644); err != nil { return fmt.Errorf("write SSH config file %s: %w", sshConfigPath, err) } + if err := os.Rename(sshConfigPathTmp, sshConfigPath); err != nil { + return fmt.Errorf("rename ssh config %s -> %s: %w", sshConfigPathTmp, sshConfigPath, err) + } + log.Infof("Created NetBird SSH client config: %s", sshConfigPath) return nil } From bde632c3b2f4fbf1252f0b37209f000661f466d9 Mon Sep 17 00:00:00 2001 From: alexsavio Date: Mon, 4 May 2026 18:49:39 +0200 Subject: [PATCH 373/374] [client] Replace WG interface monitor polling with netlink subscription on Linux (#5857) --- client/internal/wg_iface_monitor.go | 31 +---- client/internal/wg_iface_monitor_linux.go | 134 ++++++++++++++++++++++ client/internal/wg_iface_monitor_other.go | 56 +++++++++ 3 files changed, 195 insertions(+), 26 deletions(-) create mode 100644 client/internal/wg_iface_monitor_linux.go create mode 100644 client/internal/wg_iface_monitor_other.go diff --git a/client/internal/wg_iface_monitor.go b/client/internal/wg_iface_monitor.go index a870c1145..2a2fa2366 100644 --- a/client/internal/wg_iface_monitor.go +++ b/client/internal/wg_iface_monitor.go @@ -6,7 +6,6 @@ import ( "fmt" "net" "runtime" - "time" log "github.com/sirupsen/logrus" @@ -28,6 +27,10 @@ func NewWGIfaceMonitor() *WGIfaceMonitor { // Start begins monitoring the WireGuard interface. // It relies on the provided context cancellation to stop. +// +// On Linux the watcher is event-driven (RTNLGRP_LINK netlink subscription) +// to avoid the allocation churn of repeatedly dumping the kernel link +// table; on other platforms it falls back to a low-frequency poll. func (m *WGIfaceMonitor) Start(ctx context.Context, ifaceName string) (shouldRestart bool, err error) { defer close(m.done) @@ -56,31 +59,7 @@ func (m *WGIfaceMonitor) Start(ctx context.Context, ifaceName string) (shouldRes log.Infof("Interface monitor: watching %s (index: %d)", ifaceName, expectedIndex) - ticker := time.NewTicker(2 * time.Second) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - log.Infof("Interface monitor: stopped for %s", ifaceName) - return false, fmt.Errorf("wg interface monitor stopped: %v", ctx.Err()) - case <-ticker.C: - currentIndex, err := getInterfaceIndex(ifaceName) - if err != nil { - // Interface was deleted - log.Infof("Interface monitor: %s deleted", ifaceName) - return true, fmt.Errorf("interface %s deleted: %w", ifaceName, err) - } - - // Check if interface index changed (interface was recreated) - if currentIndex != expectedIndex { - log.Infof("Interface monitor: %s recreated (index changed from %d to %d), restarting engine", - ifaceName, expectedIndex, currentIndex) - return true, nil - } - } - } - + return watchInterface(ctx, ifaceName, expectedIndex) } // getInterfaceIndex returns the index of a network interface by name. diff --git a/client/internal/wg_iface_monitor_linux.go b/client/internal/wg_iface_monitor_linux.go new file mode 100644 index 000000000..2662b99d6 --- /dev/null +++ b/client/internal/wg_iface_monitor_linux.go @@ -0,0 +1,134 @@ +//go:build linux + +package internal + +import ( + "context" + "fmt" + "syscall" + + log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// watchInterface uses an RTNLGRP_LINK netlink subscription to detect +// deletion or recreation of the WireGuard interface. +// +// The previous implementation polled net.InterfaceByName every 2 s, which +// on Linux issues syscall.NetlinkRIB(RTM_GETLINK, ...) and dumps the +// entire kernel link table on every call. On hosts with many veth +// interfaces (containers, bridges) the resulting allocation churn was on +// the order of ~1 GB/day from this single ticker, which on small ARM +// hosts manifested as a slow RSS climb (see netbirdio/netbird#3678). +// +// The event-driven version below allocates only when the kernel actually +// publishes a link event for the tracked interface — typically zero +// allocations between events. +func watchInterface(ctx context.Context, ifaceName string, expectedIndex int) (bool, error) { + done := make(chan struct{}) + defer close(done) + + // Buffer the channel to absorb event bursts (e.g. when many veth + // pairs are created/destroyed at once by container runtimes). + linkChan := make(chan netlink.LinkUpdate, 32) + if err := netlink.LinkSubscribe(linkChan, done); err != nil { + // Return shouldRestart=true so the engine recovers monitoring + // via triggerClientRestart instead of silently losing it for + // the rest of the process lifetime. + return true, fmt.Errorf("subscribe to link updates: %w", err) + } + + // Race window: the interface could have been deleted (or recreated) + // between the initial getInterfaceIndex() in Start and LinkSubscribe + // completing its handshake with the kernel. Re-check explicitly so we + // do not block forever waiting for an event that already fired. + if currentIndex, err := getInterfaceIndex(ifaceName); err != nil { + log.Infof("Interface monitor: %s deleted before subscription completed", ifaceName) + return true, fmt.Errorf("interface %s deleted: %w", ifaceName, err) + } else if currentIndex != expectedIndex { + log.Infof("Interface monitor: %s recreated (index changed from %d to %d) before subscription completed", + ifaceName, expectedIndex, currentIndex) + return true, nil + } + + for { + select { + case <-ctx.Done(): + log.Infof("Interface monitor: stopped for %s", ifaceName) + return false, fmt.Errorf("wg interface monitor stopped: %w", ctx.Err()) + + case update, ok := <-linkChan: + if !ok { + // The vishvananda/netlink subscription goroutine closes + // the channel on receive errors. Signal the engine to + // restart so monitoring is re-established instead of + // silently ending. + log.Warnf("Interface monitor: link subscription channel closed unexpectedly for %s", ifaceName) + return true, fmt.Errorf("link subscription channel closed unexpectedly") + } + if restart, err := inspectLinkEvent(update, ifaceName, expectedIndex); restart { + return true, err + } + } + } +} + +// inspectLinkEvent classifies a single netlink link update against the +// tracked WireGuard interface. It returns (true, err) when the engine +// should restart monitoring; (false, nil) means the event is unrelated +// and the caller should keep waiting. +// +// The error component, when non-nil, describes the kernel-side reason +// (deletion or rename); the recreation case returns (true, nil) since +// no error condition is reported. +func inspectLinkEvent(update netlink.LinkUpdate, ifaceName string, expectedIndex int) (bool, error) { + eventIndex := int(update.Index) + eventName := "" + if attrs := update.Attrs(); attrs != nil { + eventName = attrs.Name + } + + switch update.Header.Type { + case syscall.RTM_DELLINK: + return inspectDelLink(eventIndex, ifaceName, expectedIndex) + case syscall.RTM_NEWLINK: + return inspectNewLink(eventIndex, eventName, ifaceName, expectedIndex) + } + return false, nil +} + +// inspectDelLink reports a restart when an RTM_DELLINK arrives for the +// tracked interface index. +func inspectDelLink(eventIndex int, ifaceName string, expectedIndex int) (bool, error) { + if eventIndex != expectedIndex { + return false, nil + } + log.Infof("Interface monitor: %s deleted", ifaceName) + return true, fmt.Errorf("interface %s deleted", ifaceName) +} + +// inspectNewLink reports a restart when an RTM_NEWLINK either: +// +// 1. Introduces a link with our name at a different index (recreation +// after a delete), or +// +// 2. Reports a link still at our index but with a different name +// (in-place rename). The previous polling implementation caught +// this implicitly because net.InterfaceByName(ifaceName) would +// start failing; the event-driven version has to test it. +// +// Same name + same index is just a flag/state change on the existing +// interface and is ignored. +func inspectNewLink(eventIndex int, eventName, ifaceName string, expectedIndex int) (bool, error) { + if eventName == ifaceName && eventIndex != expectedIndex { + log.Infof("Interface monitor: %s recreated (index changed from %d to %d), restarting engine", + ifaceName, expectedIndex, eventIndex) + return true, nil + } + if eventIndex == expectedIndex && eventName != "" && eventName != ifaceName { + log.Infof("Interface monitor: %s renamed to %s (index %d), restarting engine", + ifaceName, eventName, expectedIndex) + return true, fmt.Errorf("interface %s renamed to %s", ifaceName, eventName) + } + return false, nil +} diff --git a/client/internal/wg_iface_monitor_other.go b/client/internal/wg_iface_monitor_other.go new file mode 100644 index 000000000..afebbf4df --- /dev/null +++ b/client/internal/wg_iface_monitor_other.go @@ -0,0 +1,56 @@ +//go:build !linux + +package internal + +import ( + "context" + "fmt" + "time" + + log "github.com/sirupsen/logrus" +) + +// watchInterface polls net.InterfaceByName at a fixed interval to detect +// deletion or recreation of the WireGuard interface. +// +// This is the fallback used on non-Linux desktop and server platforms +// (darwin, windows, freebsd). It is also compiled on android and ios so +// the package builds on every supported GOOS, but it is never reached +// at runtime there because Start() in wg_iface_monitor.go exits early +// on mobile platforms. +// +// The Linux build (see wg_iface_monitor_linux.go) uses an event-driven +// RTNLGRP_LINK netlink subscription instead, because on Linux +// net.InterfaceByName issues syscall.NetlinkRIB(RTM_GETLINK, ...) which +// dumps the entire kernel link table on every call and produces +// significant allocation churn (netbirdio/netbird#3678). +// +// Windows is also reported in #3678 as affected by RSS climb. A future +// follow-up could implement an event-driven watcher there using +// NotifyIpInterfaceChange from iphlpapi. +func watchInterface(ctx context.Context, ifaceName string, expectedIndex int) (bool, error) { + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + log.Infof("Interface monitor: stopped for %s", ifaceName) + return false, fmt.Errorf("wg interface monitor stopped: %w", ctx.Err()) + case <-ticker.C: + currentIndex, err := getInterfaceIndex(ifaceName) + if err != nil { + // Interface was deleted + log.Infof("Interface monitor: %s deleted", ifaceName) + return true, fmt.Errorf("interface %s deleted: %w", ifaceName, err) + } + + // Check if interface index changed (interface was recreated) + if currentIndex != expectedIndex { + log.Infof("Interface monitor: %s recreated (index changed from %d to %d), restarting engine", + ifaceName, expectedIndex, currentIndex) + return true, nil + } + } + } +} From 104990dfdd5d9eae1760da5f57ba47a2df7052a6 Mon Sep 17 00:00:00 2001 From: JungwooShin <166088609+typhoon1217@users.noreply.github.com> Date: Tue, 5 May 2026 01:59:29 +0900 Subject: [PATCH 374/374] [client] Display QR code for device auth login URL (#5415) --- client/cmd/login.go | 14 +++++++++++--- client/cmd/qr.go | 25 +++++++++++++++++++++++++ client/cmd/qr_test.go | 26 ++++++++++++++++++++++++++ client/cmd/up.go | 5 +++++ go.mod | 2 ++ go.sum | 4 ++++ 6 files changed, 73 insertions(+), 3 deletions(-) create mode 100644 client/cmd/qr.go create mode 100644 client/cmd/qr_test.go diff --git a/client/cmd/login.go b/client/cmd/login.go index 4521a67c9..bd37e30f1 100644 --- a/client/cmd/login.go +++ b/client/cmd/login.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "golang.org/x/term" "google.golang.org/grpc/codes" gstatus "google.golang.org/grpc/status" @@ -23,6 +24,7 @@ import ( func init() { loginCmd.PersistentFlags().BoolVar(&noBrowser, noBrowserFlag, false, noBrowserDesc) + loginCmd.PersistentFlags().BoolVar(&showQR, showQRFlag, false, showQRDesc) loginCmd.PersistentFlags().StringVar(&profileName, profileNameFlag, "", profileNameDesc) loginCmd.PersistentFlags().StringVarP(&configPath, "config", "c", "", "(DEPRECATED) Netbird config file location") } @@ -256,7 +258,7 @@ func doForegroundLogin(ctx context.Context, cmd *cobra.Command, setupKey string, } func handleSSOLogin(ctx context.Context, cmd *cobra.Command, loginResp *proto.LoginResponse, client proto.DaemonServiceClient, pm *profilemanager.ProfileManager) error { - openURL(cmd, loginResp.VerificationURIComplete, loginResp.UserCode, noBrowser) + openURL(cmd, loginResp.VerificationURIComplete, loginResp.UserCode, noBrowser, showQR) resp, err := client.WaitSSOLogin(ctx, &proto.WaitSSOLoginRequest{UserCode: loginResp.UserCode, Hostname: hostName}) if err != nil { @@ -324,7 +326,7 @@ func foregroundGetTokenInfo(ctx context.Context, cmd *cobra.Command, config *pro return nil, fmt.Errorf("getting a request OAuth flow info failed: %v", err) } - openURL(cmd, flowInfo.VerificationURIComplete, flowInfo.UserCode, noBrowser) + openURL(cmd, flowInfo.VerificationURIComplete, flowInfo.UserCode, noBrowser, showQR) tokenInfo, err := oAuthFlow.WaitToken(context.TODO(), flowInfo) if err != nil { @@ -334,7 +336,7 @@ func foregroundGetTokenInfo(ctx context.Context, cmd *cobra.Command, config *pro return &tokenInfo, nil } -func openURL(cmd *cobra.Command, verificationURIComplete, userCode string, noBrowser bool) { +func openURL(cmd *cobra.Command, verificationURIComplete, userCode string, noBrowser, showQR bool) { var codeMsg string if userCode != "" && !strings.Contains(verificationURIComplete, userCode) { codeMsg = fmt.Sprintf("and enter the code %s to authenticate.", userCode) @@ -348,6 +350,12 @@ func openURL(cmd *cobra.Command, verificationURIComplete, userCode string, noBro verificationURIComplete + " " + codeMsg) } + if showQR { + if f, ok := cmd.OutOrStdout().(*os.File); ok && term.IsTerminal(int(f.Fd())) { + printQRCode(f, verificationURIComplete) + } + } + cmd.Println("") if !noBrowser { diff --git a/client/cmd/qr.go b/client/cmd/qr.go new file mode 100644 index 000000000..8b2c489ff --- /dev/null +++ b/client/cmd/qr.go @@ -0,0 +1,25 @@ +package cmd + +import ( + "io" + + "github.com/mdp/qrterminal/v3" +) + +// printQRCode prints a QR code for the given URL to the writer. +// Called only when the user explicitly requests QR output via --qr. +func printQRCode(w io.Writer, url string) { + if url == "" { + return + } + qrterminal.GenerateWithConfig(url, qrterminal.Config{ + Level: qrterminal.M, + Writer: w, + HalfBlocks: true, + BlackChar: qrterminal.BLACK_BLACK, + WhiteChar: qrterminal.WHITE_WHITE, + BlackWhiteChar: qrterminal.BLACK_WHITE, + WhiteBlackChar: qrterminal.WHITE_BLACK, + QuietZone: qrterminal.QUIET_ZONE, + }) +} diff --git a/client/cmd/qr_test.go b/client/cmd/qr_test.go new file mode 100644 index 000000000..d12705b9e --- /dev/null +++ b/client/cmd/qr_test.go @@ -0,0 +1,26 @@ +package cmd + +import ( + "bytes" + "testing" +) + +func TestPrintQRCode_EmptyURL(t *testing.T) { + var buf bytes.Buffer + + printQRCode(&buf, "") + + if buf.Len() != 0 { + t.Error("expected no output for empty URL") + } +} + +func TestPrintQRCode_WritesOutput(t *testing.T) { + var buf bytes.Buffer + + printQRCode(&buf, "https://example.com/auth") + + if buf.Len() == 0 { + t.Error("expected QR code output for non-empty URL") + } +} diff --git a/client/cmd/up.go b/client/cmd/up.go index f5766522a..f4136cb23 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -39,6 +39,9 @@ const ( noBrowserFlag = "no-browser" noBrowserDesc = "do not open the browser for SSO login" + showQRFlag = "qr" + showQRDesc = "show QR code for the SSO login URL (useful for headless machines without browser access)" + profileNameFlag = "profile" profileNameDesc = "profile name to use for the login. If not specified, the last used profile will be used." ) @@ -48,6 +51,7 @@ var ( dnsLabels []string dnsLabelsValidated domain.List noBrowser bool + showQR bool profileName string configPath string @@ -80,6 +84,7 @@ func init() { ) upCmd.PersistentFlags().BoolVar(&noBrowser, noBrowserFlag, false, noBrowserDesc) + upCmd.PersistentFlags().BoolVar(&showQR, showQRFlag, false, showQRDesc) upCmd.PersistentFlags().StringVar(&profileName, profileNameFlag, "", profileNameDesc) upCmd.PersistentFlags().StringVarP(&configPath, "config", "c", "", "(DEPRECATED) NetBird config file location. ") diff --git a/go.mod b/go.mod index 8e6a481d2..e82e6b10d 100644 --- a/go.mod +++ b/go.mod @@ -71,6 +71,7 @@ require ( github.com/libp2p/go-netroute v0.2.1 github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81 github.com/mdlayher/socket v0.5.1 + github.com/mdp/qrterminal/v3 v3.2.1 github.com/miekg/dns v1.1.59 github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/netbirdio/management-integrations/integrations v0.0.0-20260416123949-2355d972be42 @@ -308,6 +309,7 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + rsc.io/qr v0.2.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 2abf55142..a71f47d8d 100644 --- a/go.sum +++ b/go.sum @@ -415,6 +415,8 @@ github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0 github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= +github.com/mdp/qrterminal/v3 v3.2.1 h1:6+yQjiiOsSuXT5n9/m60E54vdgFsw0zhADHhHLrFet4= +github.com/mdp/qrterminal/v3 v3.2.1/go.mod h1:jOTmXvnBsMy5xqLniO0R++Jmjs2sTm9dFSuQ5kpz/SU= github.com/mholt/acmez/v2 v2.0.1 h1:3/3N0u1pLjMK4sNEAFSI+bcvzbPhRpY383sy1kLHJ6k= github.com/mholt/acmez/v2 v2.0.1/go.mod h1:fX4c9r5jYwMyMsC+7tkYRxHibkOTgta5DIFGoe67e1U= github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= @@ -915,3 +917,5 @@ gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= gvisor.dev/gvisor v0.0.0-20260219192049-0f2374377e89 h1:mGJaeA61P8dEHTqdvAgc70ZIV3QoUoJcXCRyyjO26OA= gvisor.dev/gvisor v0.0.0-20260219192049-0f2374377e89/go.mod h1:QkHjoMIBaYtpVufgwv3keYAbln78mBoCuShZrPrer1Q= +rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY= +rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
    Account IDDomainsServices Age Status
    {{.AccountID}}{{.Domains}}{{.Services}} {{.Age}} {{.Status}}
  • YkcmBK%&UtRej3lY2>A>}$5sjk-yG+@aKn{^gazvCd>b*Fk1>gL3 zGc@9{#{|s2eMzqENLhpv*|>;+@S)*K4X?wt(0@XTB)JFr1cwI(j-4P58D<&g+7dgK z1fa}L;2bGVK~}i zBGC7tEfB1WUu2RlE6dHvDJd(-%gWEm&c2ael$)Ike$qRcOkxS?=HyC+0)tH_bNjb%`N;N{`A(;R2?_gDT(`8 z*njptStH8h;^L7a^`Q9u=YRiUJSQQYBFnR{&`o!7$i%IWCu_(W$+;qqYe>YJ;T8Tq zem*2zJpT~?N|G0%cWUdeU8^!Tl-1Q$wKo>j)Yg(qsHa@s)W%DAlc}>)tm8UONnY+6 z|G+?v+TB0cHy|h=EX)&WjL1mgtKHp1%$qvQofwpz9k#iQN-n!qR*H5JWaH(wsZx?6>TX;lub^1Cpn3LJ{T3`Xs<8N=YeJP9IOYn zu#q3QnK1@^ykH`b(}$`oC)e=}g(3;t#v(^d9t(-A=fc8RIrcd~igM&A=cd=E@}v;W z7Y_0tYe}gA3{VC_xG1Se?>htZM$<^olk|Ho4BAAYJcA+=wT)Noa|g{{V=q@LvNGgp z!Q&Q3zN1hDIYy~D_mpbaDC8l1tOcCtB*r^;znOz?2(MK{!F6A!gIzPP&-#M;Bq@hG z;Y&_vSEOBtMZsFmnRpbgpj)=^z1 zoP_k?UbEL_BI@$}bt*bK=RtJzNFK@;w?0SvNe3slzgh)}1$SiSuAX5xC3V`n_Yo#4!@YDj@^V5V%N@0f?h( z9&u$c&EW&p?xsDKYMxZuQY^$Nera^?$>R{zAL^EA#pW{(Jc3!?QEA3j^wZe1fk>u_A)k~K$uiwZi zG+r&r$;rBrh5)eAva%Ag0i&&~qQ1*LVswD0t*O!A6Rx z;Y?_fHQY1YaW2P@Xsxr>^8v_`zbz%@w?yPbpJ3&!b-Dy=9WUn>ig%=>>`a6-4Yf8} z>yl!PMeW#1SPf06rm?A^v6#2E;&7CN&`sq9s(%kQNFN#6#?QMt@^YE4l<$MhzFSVkfa;O{`et z`oRC_h>>E%+^#JPp1FEDYb>E}LR#t}I@V7AZy=_iR`i5{$I%v3Fpj5S90!$`U&+bM zymt922Jz+0OP4ANDoTx2#l?u-%q3fOdSyes$#0TJb6L?K@Md&=H420(>qS%d*o>pSy7J%2)5b_wA&p@Cgqrcz)6I z&tQ~kNvKYLKlc`wzy%=lUN&AgkYC|x?k&Up)~nVIJ_zJ}@QUKytR>dVNkK+qYkgf) zXIE{_mu4rKbGMW8Gb9npJsUD0Pq~cKf>?_8D7Qk<{rGG z@!q+Wj`w*(>3H8CO2@lAn~rydpMAVLLJr?bHG{VVnTJV$~)B-_H&|UxUVnk{&A{T;B7Ggw>yge_(9Y^TxarcgfY~byg zBL*;pZ;#F&%$U9{>x7hLofyDCcNoz>7y&Ty_AC{56bQHP0&pZDM&6#s;*JvI_FV#w zL`YxW_)BlzICbh&Rb6K4#fv9ToxYHol5!X6JqX@-`}sw0EP4L*KfGUFSQ7U5Ywr$7 zy#H^!;kJD3FQ+pOZ~f1zH~#YWu#!s~2PFUh&#;A9EY8nv=rWrnVo`UO#d4QYQZW_EUZTH4VomoDa~!%TKPBQw9s z)L_iGUT_IgU#C>N$VK8)#_~$5L3y>bvc=<(iC$vZGok`3ex zxj@c|wXmGU(@hWZq_2MbdDGXcf1x3^O%T4&mwmVh9RKpGFn1r}IB3g$hD1!ckvtCf z5$5^#h0|8%e$2i1)l8V;Uc!k#f998p^{ang9gmH1bj!p8FtDYNbJ!=no@~ZSe-qyK zlXUT#_aNGJ_ggPpOL-SQRG7^}d4Fr0^{Rmm{AZEvKAq0|ST2Qg_i=B19As*!t7~j- zR&|=qRZTFxwKO)=6kf`#Xlku8VS>;pn_EoQ?t4T0eMJW4=s;hWy5CZ2y19>olzewr zYkmEsXm_{JP*~x@Lr0E@(6|Hz(#TLh3M|RYNbhbfucg4~u_Ilcn?5$2>#?TYYI_kh z+bL|OiWg?8IR1UAv|!>r%H}G0&s^njuFPQ+Ca}5A!0?R>A2$H}NQQr%;UgJ-2g9oe zfOltjABNw+@U$uM7%@F~C-5R&o7;243M)%-P61dBn*)=yWI`O>oHmp$Ck=W~!L_jx zWN?j}2RgR6W2*b!Zg1JDOF{@OvQ>!^$iq}$79!MeT$V5nDCTwz8yFqR!&@kQ%cgIB zHiUg0Of3mL>)@@Mu?~Ko)@bO%a8WIzorUEa{X=#h*+j2?f$)#8mGE4Y`V^k{HPDz# zIApUHk)bVhl`!_#x0%{4V&poov^2D|n33p9B{H?Q)mIc16;(7M(mfpc5H#4`F2i>^ z8=6}>%ob!pXr|}AU>`rugOPEjR>RQpMg=|dMQLfTNk4u1bV_Tt3+SOm?AVuOqIiT$Bg=u*Ex^wOr)8Pz zN-J6k%e?cA`1SDi6EpeuZCQC<7k`tdoDolteK8D8%gU$cyj;0><-*AmCr(^Q%ScO0 zJ$d->;Zql{T+O8EcnZ>SAY3?-8Z@@Go6JaRpm0;WC}k2XGu8BL7o>)Orf%lt&IS?A zXVDKaHPtuQmX(y0R5!P`HB}TKKYmedV~0qAQ(kbYBsL{T+Z)?jJG%6~P9Cn#G7&-A z0vEZs3rpn2Rx=bm7u!w+hvO}h<;7?dw&LOZc)EQ~REm1yQh^fgrk4jZyoKR4eejoi z@Ki6Sdr9b8)*ff56@3fd_*l$$0nC}r2CXQW;>A!M*XcN!41aivk`TE5T~JsG3ZH_) z`{2Ldga4+qnY!ejfj(#sjfUoXl8Z%M1LDbYowOE*EQ&b8$dC|lr9{%*Qad0n)lP0| z?pC_HDH9)_dH(8+TDj=bmcs+$++rP>znt8olaG6N@#4kLKmFt&8F`p%n74y?MbMrT!{TICMdgbE9)le$?f-B$kN;TQuJi`dD1Kx4w&n45k1#KEIDfA9QxWVa`jry zK*q^F9SQkVUP*@+=MnP1kC}%V&ktcdzmf6$RyJl%_wc;KG1tZL5e%Ql@YfhVe*k!- z$wJJVsEpxvGrWr7M+^WT&+xMu{&9w1&G4xMz}GXp2gC1V_*{nX9svGbhPUOiKF#pi z3|}$;yc@%p_j!IL!^iaC`?m!IQ<n`!&ux{)>@n=u{s|4V(^U(2d+fK?&4r~VG{E({;tr~S_{{HVTo zswdoLKlGz`JwvCk(u){+Ag%AqN^fU)mTW;(#qc-!mVT?pUCo#7|+)ZAhD|DVLW z-d2Aan?%&C!!n+hL1z zl?yY0aH6VKlm}%UGVPjh95i;%?Ypb?_AtW_K@1sy8yS8p!=D}i-d27f!`sSFW%!B# z;DcHD7KYcb@=F>1au41iFP96vES&4QYo@@(Tn&=ZmsPV7ho<^u`5pr&q;S(a%{506lNgbhr#B@7~h+&(39M5xW zmH_dtbn-8J8+(6Be93X#VeikFRPq+?u=fYMm(Su3(Wa9kNGtB_(WZ1&NNM%n`y)P; zEoAM+9+2!HL?Y0-iSDD3rps`TNK@=wrZK083H1vl>EIgb^XWg*l)yIXpjI>g$3ta|IMcb*6#c|WcE{BuLkkr|4{mi%)Uua>qn#*weZAKq~P zAd&1N2N0Y0E%$&b9+u~?xjA$0C-v|lOSl-)LUK5FZVIuGB5fR|3!S8_&>HBgPoPPS z_=S=m8?B0k31ka>Ap^xC1aI^lH>LO`rNCI2UzB+~9u~FN6uYk%Wzsdgr znTg)!)~}=aWI*D9vX=zWA@)om2q3m^3N4N?tBkC4;Mzpq! z3U`${YrMRDy4u&T-?e?`-b4F#9zK(dz4$uGkO*sDu^vGk_4!5C@F;h8cSFyC9>UBt z7X39A{WSyqH5UDa6UvBRIig1D2;W^-ZTan2S({0zax%Awk?Sr+XK;6Ncb56a#>PfP zMFqHd`MKd#jwHYF<7FI6o4uGq{?I&u^!6 z$BrHAZoIHNDFOZhypsHkMxs%5n!21^yL|&%Wn4pjUALqKsq;>vz%G+jYO1WPL}sUU zb6ayOP9sV3(`Y(8edJ1~0DlrcCw5|_M(q?lDjGTUB7iV;Sj3Gu9R-JFibzWtjnKZ` zTTf@0o8J1W6hX6$#uGv_%tUQ*DP=KlD$u-}T~NjFqZs~IhUXaGsRut1{c{ic=OOgZ zJt&_>y$5w=Tt@V`gFZT`Ti$OU1zFCHSR5N$zVDmWt5>f%(R_4kMZP5>dZb5}WY=0m z?29i>eP7joXL;;Ut9R_#GkMIMMbADp&eNCktK7dL5&5tG#J|P=N%Zacx8SnXV!tm5 zi^rzOcS(ffo(n22uPQAlu&Nd99VVqqm$zTDgs-cu=@KDlEQ;N;>uBv>-*p|MkAJl$Qx#*K)KK{WdEe!G$Bg%6+|pGP~=^LZaYJ3fqd9NPPw zNr&x;lNpm^1LzS8a=#JD{qw&|?_(yB{t+=-Gx?!#d(qc${t;fwKgdr+%JARIKHatN z@Q}I9jpiPJ$g1h*d`ZWhG_?2el6M$ty$_e{zt6<-?)E)#{39Wr znr5G|<7V7p6!tz_@(v@i|2a)aOmLL0EyC&BN%|I{vyEZ&eeX2^9mC?Y*}|Ftdi>cS zVdD()mOOTaysNYC7o$61plymnZ=TZl6zIGL9{MYI=qd0JJ?nQQ%~dV@49wHBd|+DQ z$T{ao<+(ZI9vYygsN=pnuj4{m2dOLuZBUiG@XEF0%l-?6k()mseqXUDIM|&oeg^Ll z8VRS9@H0T|arwZsN0qUtsHm#3(>O2<@{@sTkZ+^+|AyYDk*;r}_uoeEPnO9vZUI3m zVri}_uc&H*M@j0BnZ{hvs)jwIxx1q(FE_Wi!Ntkl%}pkkH{=)MOb$+s)wLd)pG+Sr zQK%F$3H1$Hd7iYkHZ<{`Uc&jhoY>N;R3e^U2|WfoIjlsYG)&&GAv%8cxFD&*#ltTk zILKGy9PH<*j&80Zp)up5y!<NwXLsI z_Y2M(nHk0f4aPc8EBm9HTE6-T(dHZjF``e4rq+lZ)gJPw!>WWTzW!Dcs{P5kk zwDiyqK3M(rH)~IwJc}(+m#^GNzjWgG>a{!5h)TI}jJv^}G4SCTe?}k{5 zKTkj$KVFiLv&g8wBXgIyxjAOBPPzDv4?g%{=|`NY*)aL|@x6O~-g@}Vh3k2R1$j48 z&zw&FW!v7psRh^e@BQt}_4AkWs;a9?G7cQty=TwyW5+To%afCjpWYAqgI>h*W__ro zv%S-7>4tv=JwcqE-CVia8XPrY?&iBC&K(^H`&Z&@V=E3Tv6F* z<_X_gQB;2AJmMnD+N!TzNy)D+uCb_`oh2=2&!=Zx%`Pfx>yR~;v5JRIUn+!+eFxOPq10wOp>FJ65n;rS{-= z>(;IR@pM~lR!OckCU(4w`+-f5J@lU~&}j{B>fo^GiE(jpXU?1{kcTBgPs7eBxuB~> zPG2~G-bl53)QsZY2B)zAZoh{PR8c+~Z%F zw#BBe>5ytODbQ%FBj&R5if&m;Sw&HE9fn4ev8wvuiNZ`|mNK}tH+AyZn_&XCTUr}i za0eBhOA6FzN+mdu~E-iQpe)Ti56oK;g( zgI!d2i!ruT3Hn_P@V&fvotoSq?YH{J1NhM&RkpD}z6!*>h-uV(mAhCjmaRSf^&0PsN! zAHnd+46k8$a}VB;Rnn=7S|vlza)`G)GL~8;MXA&(sdTqnCHuD!J0+mmZ96+c>B9>< zJH$s`Kn2*|4&+g%W+=M1Ls@}}+`hNt)>!Z9RWyQghESL1g}SUyr4gJ>?)C`I>EM7_ zko!+!1llvrh7}qMjQRO@8i#qhPaKWIFw18b=S>fWF1~x!>eb(Uf2^TAEkC_8B05&? zyld@)vwvKV%^T8UUzKxgt;0b{PRC`tUyz5K#3V z&Y)T@=;czqcER(=(I)k+%yX`p;p)W*h&&Dh}8`3wt49%T~pWNKsqQbl)oQGCXoS$2imy0~V*;%=HxjA$f znwaNBoKB6&va8F&V>^(W8plTHA^GU9K|a3u*v4(!fBsZR80q%OxnDbLlw&7N59b>s z`!|g@S0^B75%d1|yVn;j!D-eme{c$#Yu>)JKSNwye{N+0O$Ea*(sM(G&|gEivAnFZs-m(QlUQkGO_|YH zR8U-5TV7LZtgftYs&B&PQEX@vbDWjesp&ZlQVBK$wbnObKG8#J(qBWk#@oxs&)?r4 zzwSQ%UK)3IwTEAzw;#5a`UMRM48U3#d);oO($>)2n~Gp3n1+6dN54FXewl`TnTCD| zDJh{R(>b1ta&*fbje(FXjrD9qMBY!Iu3Y)WzxI^noxh%1?;jk|E!w&&e%d!*!{H&G zHZ3K^A^&^LsV2{!JUM&X6K^efJuxEs-X}XRtX>W~`bVOFXZ$UWd@KFp^H-OU4`D<- zKXc|Cxjw{LOmluao}9MeIUMkOh?|?Mo2O@`Le5IPv_9~I*|adr?< z0DDJ{UxJo>3oT3Ic9x)JY242E#^UUf#*(Zvm#f&h2dRYlVPY7n?E+;yq({OLA2ntxd|9RKOrynTYviF3-@71IT9p!&bbfc zYZq;_;Yb91=#--GYm-qZ~{-_Cu-!wIpXXn>8RT-}q6xNx|?Y_ZvSwDThKO^nB zu?#-9u~uvK=`C9hl%Z9{gm-clcO!EYPRBv3%fjR0BB8igWiIViZJb0iG&V@0lv)kr zJv1RvJ|m~hogEbvHge=BPx%n9>7z%*PMk7&*ytIv6DNm^o;Y#L*x1LO9TV8x;5BLN zxCzln4(ij{S(p6jr(5&jfHQS;NL5@HX>)gX66<|jqNYv_!zdTaRn6r%2heTQy~E@x zuJbPW0P0f(y(fUp-SsRVKmm0nYDIoD9VwH;4s{nX!{;&l8HV>^_}6>zQgHRH5wLou zA$SN-Y~#~1FxRVaLDo1dTf)lD?^CungTLBScKAPiE)lWR^*N++0L5 z?5ey#rD0c>q&la#op3k+(QFYz{IzE!Cl_?lk;_@F-R(Frv%!oIe~a(Tg$wWT5*fx< zD8iy5Ls~DUHp-{X89R1Tq_i<}-==lLV*@Q!nftf=wC!?A<}Y9W@cpVCIo&~#(O-S@ z^F?x+mepQadSc_IeJ3v!hy(q`ErRG<2!n-80$C{cj=VQv+C3u_RmDdkDmSNQWtE#m z1{ZHd@%f7ve*PhuFK$eR+u*PKhthAZ*2`xtg#lcB;LzzZdDIhcy#DgT6Bo??C;{^C zS8`CgI5wgq^$fls`TNTgq4~Nh6jphdt0rpJ+!@nHNwRm%pO2nQGleXkZ4;K8AvhsEMfZ6?5*DOom6cXHN4OVV`sSM>$8Z$knX9?RqU;-) z8JA8bpFVRYxwA?puC17BxF;TIj#xH#Tjp>mc-0+bjBg2PJnKE)j%M)3FXdJi_=#YnGxcxxOVEzy}c1yW_bq$<76buUo-^1~(?HNcg( z=oL#F+S*#7zqRRI!3!rELmq$p@%rgYmo5!YOHysy_*1U;2QNgcC;ovR7pwF5==bAV zCeC%LUif@$(A{8y#*d^hTXpF~ldj96JaE(T-_MbQJgAs|3Ke^!Pg$oz@2R!~Ts{Y86K|{nvetu9f z33htwmdP7mAJ^5Ygn7vYXk>)INgg4Ulm_b&r12o$=#e1w2t*=&MMRl?qZa0XvU23M zB`{XNV1b-N$}vHk4{rN5dK%pI1f~d>Bgmf(l0!E>TKpkS?Gqo_fbILpuY$A>aJbPs zgR-`6XoQOz^RkYW{|ndY+=g}ohcAN)c8D#i)Zi|6wcbB|Cvg{N-tsH;S4;Kft&AM zwt{8+31slA3?9kgEj?vBnyL8xbfy||io@#s@;o|Ig}c$2ig&ipRF05i_tUlXy#n$r z0eQlWuB9uS?Q3aA$d>(dH)Q%LE|hTR=h5AeGu-HI$hpq;-H;=}H6y__bp0_BTr(P6 z<5kkuQkIclkda+hm06g7_1e`d**S>m5d1Z;wo-r1O=j{MHGPux_|?*klp}|JDZBdB zf4*Dh9fLp;k>uG}y$q(VH=dW=DQcX%pIj#pu8aJC8!6k6d zl4aZy62}EHzshXNfq1mNQ+N*z4jLNf?H&;rJUnLjh_F!NqZ3>wG6>9y z8*w)oRqP_RAo1Xn8h`K3pvhBadH8#4)SlK(q+AxfBQn^w8d|5`WZ^rEsG6$0d|^cW zr}v2JDDH1WH7M>dqOM&x78;8(GvFfZHKK-%9uXN4lD?}=FvAd`sQrY*XX#MfG11DliA}3d~$O0uu=K? z_3LTrIR&s0lpY;S$6jp1OU}x{v5q_DxphNB9;hBVa?62ytPiEEl$IZI` zAofgLfPn+b6L0R+DVo!N)M~XqT&&WGIh?+2kTx5Wf88Y{3;Kk7A$fO<<>Ds^7=8=+ z$HY-FQNB8)Ns(aoaSzvV!ycnfQ{hx`1a&+=Q78765t+e%`D05iv=vzzJxf2hc(w0k zx>-81UXML8it^*@*WbCF!%|X&{V3oO$-l-~IUl3(uzc>n<-@!@UFnYshao!X zgyCqej}?`zFm&9ZFC4Bc$8Q%_#XjIUOw?Mi4=!g|mb@DDux-9}h%aXNK!!ia@bwIj zDUdS5o+$*Y?k_(kHUg ze`KY%Gq|d!bVUf;i>e4=`C=3yLOw?-gYNOM(er`mGM zB@Wv1j_Xug?#iXwvhSF?Y0KT)xhR~1Eg-7}q&1i3F&B@q=P`FwX2W)>E$3b0pe=ub zR8@qF$fery;xTq@+1_5aJ*}<1>)F#*veuHZr|s%_T8H?)<9rmu+s1h>yrY_T39S_= zJo8H7nH557dE9I*hb7x2tS76vO~O{Pnh!+674H%xd>~r~9VEg5O7-MxDmf%HAsW+w z9F{+x;e8lh%kXCyzGeV;Tlt=S%CBU2^#JfvR(>nP*Rk?BhHvY^J8bz~luskwD4(v( zqI|0On)0c~a66xlfqaM~Y=t!mddei|DFNkh@HkLVl6A{&*XEQtrE1xMw92}=dTM)k zmVb(W%jJh%XIlM-gn#wbSF1LX9G${>{(NT#%7`){wzPe8u(htSI*}wJ1;k~t%jJ(# zRp-`yyYiLSUztB02ceQ;wx7ND&WcBXvAEO-Ykq%9Ub4>MNXrXwb=SB$P%=UQbD&-@ z?g>I0jzSyuKHt~1qU>6FT4ur3?40z1oGhBXQr?Wb#8y#P^D@p?4MUUk$dLoTo%$i3 zCc@xC{{bJC7F&Gu3D#z-g@2gahf_u$wYFOE^%V0=Vj=5c3fw^cEqY<$pW!6oOGrNF zqm3h(I(<^54~KCgu^1MR89YBZa48< zLv>SI-OYcx`1`By&ptEeZE{z&R~SvX!cHXaZ7@zI9Y`vcO*D;OSc};C@s_h3IDRkX zN0-&$N8$lAxFDx18jytu@7GQc^E3BcGvH_z->!zd&qQ zv&N9XFTj0>uAjdd63|WSh+g#Y!%O3lEO>nq*YN*P_a5+3RoffrIn#T3@4b*ffY4h) z02NTNf?W~4R;*yX*PKiMyH^BMnu$)rqq-<~r=OE44f|NVaN z&B&BEYoC4gUVH7eSNYb!d`w;fbr$Hs+o^5z3vb>9?8NP`$MO-j`4yhA!gt=Ceb=(r zR;NZ&sZ^1u)!TE*gjG-9J0*%L#spu%)#)X47gwVXb%h?#BHm&E9&xasrXOkMy;^N&Pfzb4)z{Ob1$JGW*xE{@ z>+aTxtgM-vJ}ZSCjlF(;`pyp0n1IWbJ34Rx5x^OQHCF0ihh_~_9~x`zs;uZ}QahO& zi?}x?=H3XlG4p379o>rb9QxN^5otzuN3+&{v-BOpZ`zYaemjc^SQAT2!EeT}od_0c z&-iw${2tQ6G`0l|+rpr^g><%sB=R;w3-xRZTDFBqa|;@_g(?eLuw`3FV_OI^w;*9# zXttmQ5!(WnZ9!&k!JciQ`bGM;o%<>|V3rA~tjCjDxO6u+|v$6vG zF;~lo1&H%*Xz0`Q3gWrq>YlzR!(;N(zwbMG^xUa4$F~K?f4>%q8xp6~f`W{UKhK}L zn1__VSQwH~vt~SOx~~Y=ZrpzC*iRb)C;SRS2c2catDk-JzUkBNee~JY@m%$6U+eim z3;csRfkNm&=;q%uFJ}JSYcW(Jj6vtAN>O28$fTGB4?puRi+s`@8-=vISo*$QjQGR% zmqy)xfArG#K4j>Vix|$@PuCJ0!YA;2o3m!!+ISxgkb(#G5<$PFN7Gc_*Mb-ePVYdM z+CUq0LW!0O+e$3WAHZF9cJ?+js5?6a46Wm16eMDSy}eK@8Ho4k0+x%We-J421G@h1 z?gs6^psq(V(BIqN-`lO$(vX(1G>;6Gz;}|rf26N~G(v{5qqe@UuR+y0U_z+9q{?!}_oVUy#&8ICT5TTkQ>be8(2-*cOu57Q)RfSg|d%-e|$>r@VQEk<-WTzogpS-j)rf~}Z4MFyv2$}%U1c1gyRi`R0F9A$zRYPq9 zf{2VxOdcaNh@=xI5-4_x7tisXKsIRkv16AqB?24y13P!Dr;akQsFNmar|e>4Y_#nR z^=v$c?i!d&X~2S8=}+E{qLkcM=8sLC3n6}m$^*w;0m)8A=rwWv^UueO^J1u-@itw+ zN$JnSaM)ZV2>WU$@|QvvuXDR?Snoak95yS#r1R-*Y4yo7_E|qIyit0)|@%>|RD#`NU+g zM!V{RXbk;#)H4kAjB){@evMCqmdXdf`kZI)hQCrzdsFq)B^1E=6Nw5ooLQ4Fu?mL! zC>{*|(LZVpgMBfEe;Hmzl0*zV7Su(hLwkE`du@-pOO4V_y*(Wbog_0PogqQeJVqjd z@0;T21~jd(AL^72j(AwRx_Wxx;pXBBuzWik6e~3S8o?J>vk1uG8h;&2IOvXRz$D?V zFhXF#e!m^+T6X|I*AE&R=1`}ez)%TXG;l$B|hvNi5fo!57%J6_#1 zi$eD5*JrWQc;_5Tc3BC#%ht0kyaZWj)HY1}bT&JbWDG+v2N76vv$5tvu;xM_SAdd_ z$L+G^JVeoLvPH0IhrVYx`SK|cc%q_uZ2&_blG>NAvQ37EOx$ghQXS8{@4_tpk+a#mLw)uv!%>YxI zPZ`^^V%z+Lv`I!0yfDwU@#t!CLU~t+7pdq z>re60s_L4`s;0(P%rkZ~PCVJhJGzJ{E>5S$0AhbYv1`nnVC)RQE- zVj-c8rq;8xKNxcrIzr9y6$AlxCivu0@QE44-3_3NJW*eLeSKMZWkVhIrd#+%prhck zMBu!b7Kz1T9(A3M9A}RiJ7wDV$_lB?^lr|)@Y=?&S#<% zsYPA9_JX{`xzrD^R{hL>!e%h*PRw^JZ0|>JGs5MeU0q%MxAUYMa=H`Gr6bj+v7xS< zu+ZS*3gywyn-Tju zz~u`ci<#MtJWbXvshfH0iN_c-Yh*0vbW)3hmYX@MmgNWi4SpbU=?=SOx^b6WLv~4H zTx6(umz)I}f~4>m=8{CN%>o~Llvk7&V2Aa{9op zr_UEtxi=cv3yHyB`on=^yT6@BuzvCUx8WnsfUZv|{?GGs$NAbDT5H^E!F*wpJK?UyY)+DFdt( z>hdbppq0(MhbDRlhs~HVAu{l;XQJiyp8f$rK|U_xZq!`rVo;4r&g1oV(bi%FOlDth zY*gCI*Hk|c=z27&Iqe; zc6PGobL6%TQV~L2_lBt*&U?39VR7#UB%)nTD)>YKREcJbHO>g_(# zSk6UE4>lVCTYjhy1AuRG^c8PzxB#nRnfK_My=JkTlPs;8rA=jNomko#BhVgUX**fk zt1PVtOFQ>EtrUDW6c6oWJ%|~~Q5nlshY?l)szNL`H3f~ZaA;08Xo!VFn=uQGF@Vha zn=vrmNAGa=G4py~h93jV?qhh`qwE-T+%g94mW^K#OPj>fX0YQopQU|iggzItv{!GT zUBJ@5G6HQTOPj#bUSY3$A4~hrb=u+Qt?N% zT5moh(AKiFa+Wrer441T>wcZqtep|-qX2t^0NN8#PV2W3yL#9;8vfOkXroo#jp%i= zCYbfQ;d@-kUbTn4YBhV+vFugF*RMJ}tpiI-v$P2;Z7oa78G+U$5%nysNg@g@8|6Iq zx)PSwBoPtV$UvIDKEidyEUk$%-eCKjXW9MMu(T+wNv`C}(wZc3!3ftq&(fMCi5E+& zV6QuM1lnwt)`q?AUY0hMrS-Z_OE|-Pb|v{cX3p5YjZUOsfy*W2^!{kt9p7MEjU^4x zffLLz8#hmWc~15a*6G|2iyBS zZ11nIy`O%)_rufHv9wU^$cTrrw5GKvyH0D?o8G@!Z@PLp;m>pDQ*v^(y_r{X@>M0- zxfcq1TT?UBl2cN03UV%J#qz zWQ>WRw^yVv)HTI%yL)YHlsvUsF6oUXI90rF^6AjmOyz)iE4=CQjYtYn2216EGpA4W zmP+!I8V9@ED=Qo8)wzEaD+D?XEZ+SbyO8nY#)gnhQm)lX6oyU>VjafLa&l5arA#kE zaR5C>*ULdjq^(5P)!CyA7P$L+`ve3z)AfCTM{_`YvYoH2slmgQ#HhNud5(?n_x1=H zgE^Bc6neG6)gB%I{y;&2l4oVrqfs!j!G0YOKg41eCtZE3$x>%Nx9~Bu#GXRV;tZxN zp1~O7C7Qgrk(UX~-|1ifiw72?69Cdp&)ins`uC1vi zLAND^Wfe85+QOVGdD&SR8HgLm%gIHgGD$8k%c^JwWLR}pVnSk4T3TANXL4R0d?tnY z`T1pKDM{H`ATKOD`{$*U%Ng0Fu#GWc0~!g1%o$WPEGxt5v|p_ut`$91m5Pk4%jBs4I0^LJk!%xm+AUN~!(m~J@n-FI$IvVq2eOXm_wTC{*r zaSE9>ZQ7*qkuxG(Q8dCzAlJ9%3GYMV*n0A`fA3z=;q|{BJag{cxjd2Qn8)%@sVNN; z3cpelmuf|rOiRn%cQ0JH(6tR2r=K4e-yJaVU;p~od^swp5G)R@z`BWBFI`|h?jHODz{@hh*q;`2kaVT$2>X@aWzc%_rm!x z%b$Pmxe$+@zm8|u>x43EzekWKwOF2?R@_vdck$vyRiD#}1z4dw;pW(fS3Jcpj29>E zf9JiYsi};cUa#&@_yz=rj0qnL1iq4@YT4vxpM7?kpjUltE_gua&Dl;=`5%OLGj;pV4ZEbFAtgor6ZtJExd(;|DTU&c)cNg%YnwpxM z8`bqC1qB7QYMzj%#|Y~@b-+VuY-t0KNn@i{-=hY3Tbo$Q(bAfpCaMnnCJ^^hs0{8E z80hB*v<0D5OuS+o$}19xD>G-$iH?ejj*gxi9VHS9xN#w3VRx@0boT7ov!d^co*Wh! z7#a$~Ab(UspEz-*Oe2s=AXzj$R~zEzNRX%{8|amYCHPM&l{+~)+Pm0WNrWP62R~~Y z2M;e9-O|eUcH@bFn&aZ{C?eC(cMJ#)@B+pFDwo^R5{cXj#ULdT0JS(O5CmW~V_c+% zuQM{vCoz;1sJUh?1TuD9%PO}ME7?zwe$FVzE~!2R%zr2+2J}Cb$0dx*Q8V zcsqar#ak_U_IKyMVwQFWOWViNs#x0I>$Jm5N+Z~@hR8?MreQ#Xmx(4xNp?+wbU|u( zJq(FOqMJf;`v*&|m6q43N(z#T`g_u@U4rzua5+6cI~^6hQqHDj;ls6S1*pS{q{_N7 zy9KKRHpyhrbL4h5BG?;#yEb4m8}^1L zCh$5_zJjZFGYrSauy%K2;qAd@xd5McP+PDR4`9>$g7@xAufP8KTq+ipp1dgfm?7uK!A%( z;N$A*9T*rm0XdT7;fYKNepeSy1QZo{SQxJtT-cmnoRM7(TSR|Xr@9tWM8`qE9lIBr zZQ(cD6O4xi)pj}`8es971aAhF7Yx zvq!-t6b%=@g`5(u4*vGnQE&_ODue$lV;Qb{lA+=Mif^I}s}LEq>L0LAxT>vxwtAvRj(>6EYGgv zYSOP=y^?gTu(l{A@k&xkLPBN%YAz#3vanR8szx1q=r2bOoi8fRN~d;{1yY^Y)5Am@ zK3rJX+Xx|Yt)RA>=5e`Ah1G495I9+-)s21JBZ>V3u*Yi#wF8j&{r!nYl+&xbOHQ4< zbTl}4|K|0(@*4B5o!Wo!@WtZtMoD1Uct0seoq6=&zLV*}!Q;Z3Pwv>TY4=W~SRdH) z%ML0DVulWX?KS1t4I6S1iVE>f$t2W<4F}8hUJtHZxpI+{UUhr}YR)?wFS`8Mr=NcM z?lEwaE}(t{O79==C+x)_9HFl!l4SP1)GrA2`U8LYk^j+~@4x%@yYQ^=BIy&KBB$(Q z-q%UGajRnB)7l09`BveI#H17FMGK#a0oL$Nn5mVVmmdSv$hT0DexrVl1E=m$OC^5 zIiPrQ*L_1(DW~^dK2_8Tw5W=T(wfT$(>Z+4i4oy;{am}?2@&Ih?C9S1ww9*$!HUMp z%9_T;Zs=k5j{KJ9o<;$z;m0O-+}wAizX&s#A@fQYzu}sWB5glo8YM4ReG7 zcrClgoSmH9 zP!mh8<64J#IQmQt39_|8`P%VB#7Pt#9gHvXwnGH{5OOVb2)V?bt#IVFv|_{7>bYEf zzZKQm9VZc5scI@KEAtB|E7tmGJ_n1uQA^GH#yPlsJ8e%v>e&;ihxhSB?BRV3k$be` z44^C&5(`TpYhjsTwy@lZ(sw)YoJ}@TzHFqt6SEAcFOUc zyY`&ER#Ybq88<0FN_8aR9J~Lfjo5wT#vJ0FQ+Md)QtZQfOLaa=u@5hC)>R!O`|!6o zvK|0$|BMx|%d`*wihcMO?9T_N4L0wud}rmWD?f=aoc?NE+;`XX;cu{7_ixk&&NDy| z0IzGhrBbp3ixqKv2N$Y)z~l#b)z46|#k~J+$Tx%_tZf^77pbYueJxGce>=2|$yxma zG<-7sogF$_JD}kSG~K=0!NIN;AY1|j!aYPTSGorUNu9AtPYL&w>9stbTd0?{8x)UF zKQA{0PZ=z1>UQvPaz$#NPneIkUE~$y7d$4!B_Lu#XpryN2tO|`A74KoA8H!mOp&a! zQz;7x5eBoM=~%5bY|funeqPUXq=X zRgjl-=3HV*MtX8a9%N!+NpW^gdeX(q>4nwRMaUs%{Iy+8Re7l6m3scrwoPX*Bxm51 zSk}eU}UJIR5k4C&5K%xuzI$#fm1 zrR`D|TSdRBs*~sB$n7k;n%|{T4R}RPacIb|K)FQ5-|OMktL}4yhGp&6d2mY+mxB|t zV%AIVJ{Jy0T>rqB*Y4#Psp3N%?Co%zaQF9h5R0rm{M_9TR^;gB;bdoLXJhM(Y!EL; zKR+kAoSMZ5wc0^Ft*{ZtxeFC`Ha?1hPPEyh=Zoy%I)bZ6+tN_i0!@~c56dvKPh(~Y zk~7hYo+4V&-jd>dqnO<_7S3+CmCdOWqnP2?(6M7Fi)Y#Oxw68?|Jh6{|JhWBPc0~z zJc{X#rL(i+ESz%AibQD?_4{aMUTN{v8+V3Pn0X~;{%6em`kz}WrNhzrh8 zpF98Osei^(!u@g)PT@Qbso(iN?ha4Mk4L`!M?z(qV9~PsAAV%%z4zVo>@%-Czu09I zTu1Fd1e*NB@#Du2?caas=+UG5|9jX{!81+&*ZPe=U+Qk{Y;VYDZ0_?Lg&?AOqYQ62 z{4oglNPR7=g%y`hpbq0`WRuzYaBHMU4fYcp@I0^utbyf!1q$G@-(AR?7$WfR>dcNWDs3A8D&xQxNoyr9f40}@s<;6sXxj>4=Ru0TI;Xom$vl_<(e+3{O4(55e5N=qtE+<7P`?HW-O zlJJ^~M->b;h=(wAG!&&nF(h#zO8btE+!|h+Ork=@^PwH? zO4UdriIrwN-F)FIrO){S7_L*#?*HwZ|8C#9dHY8nZT)%I_VvHab}BoybLY--&zOJD z!=dOe<1?1vNg*mk4K+>|$CYpz8c0+~Tery^6U)6%)(D#^8y2$WaI@6{qdI6WtEs8! z;Mm&Vy>wboSOmgCCWkCsKHt$RFbI|tPdhmdIPLAkIf!BClQ=t*u#l{a_4P`7YwO7& zFvb8ZQox5XMk}^=bV682xE;!rxcHBquxN&-kDq5ykhi;I_bJo)?}N|21P{DTnb6-;pg&$af9cZio43NEf9_hI#XR9577OY2c5?b< zk=-rh5#fniC%ZAbzWE{s(OB=|`R}mU_22HWoO7(i;)aapp3699#&oxgmH1u-za857 z38~Zb73ZzTUm-f@djJ1CJ89G`=GfI0vfRsS`Kx9m_ffOl)Jc;f#z)SXG2`JEo}V>q zW^|{KKZ*=P#T-VR?L-&rmatr4eh> zJv{t5kC{7hyt|ukfaPIxT%(1?`6dgE<#BY6gZq|&`&NSc9tZb5 z2JQ<>%Fn-Oxr`&gUKZ|sVQq85zqee9(eP1Nv;_wAhtKW*9a(BxHu= z(g-QomTrn+ZW39L*9S4z-(s%+i@Bb2H6<-QJ>wD{S76Xe zxp?LB<>d5??9{YtNf+S1P8^93L^l+mIIdpH;V}Lvu4SN*gbp7FpM%ek!W;w2npmVB z@l^^hJ9{K?d?5Yys{^?MlV;4-=KcjR_jv|6t(y{M_$cD|nWPg34U=YuNW00FQZTIU4V`=OR`42zAK!lTkA!y zpLy!X>}it%P##UDX`@EHm(H0wX~OvN(As9qm^Nj~kyxTvgr zBxzw57Ke=EYsqOaQf6giYpsw;#4?F!q_uBe`p#(H^3^Q{*NDJ1#0r5-68uAq8~pQx z2(ZZZ_$Im3E7gbkR z)fVId^R%=CR)L&~s_H6u0xJ;qlbM{H`FH#%yV;0vn(?rfaUoO1!0&Xxd6#$X0(lJZ zIDDy#)5abF$2)?)4D6Jp5DwWU|Rqis5xIM#x85t5X}e5QRQlK7Q*hgW;)~wk|D~tL5|KXrah5<`%s7=KcQi zmV+cX>jT5PLMBmSH|D^86aind%X31b*oLJ%EJ^EcWYW~cKasF;@ z?k<7Buvs`cd*k8h?&j`+ASvI#z>x5Wv46K#WH(z*S{TL+dniZK)>Yd~f(tcJtZ7=H z)eovWyLz>8);gV~yX(}L*}*P$PCh=Kfqu4JaZ-{qYQrcLaw}Uqdk0S+XIt+v9)GvW zWH%dNTo_45uL0YAUrTdYQ=B5Cqq`d&L*)-cPitGh(pulwVbL;VEi5D(d1w|)+7)a! zv2=b5a$llyiS3JO#>YZ$>^lqHTOJhN zq_otOl=RF@qp$EvGOT%(gFLlP4|^70IN2{gtckWqu&te3ZY$}5QS93B!?Wgp_k8kfP5V}P~M zKQU33eDJjomQj-!#)%^k8+v<85sYV2N=j~xV3d8nw^;A}7#e&i@}9BrVQnRdb1f?? z$;&EhXhQY8!tC7K{Ni%tS!QQu=9N_=jJC0zE2kvxJW9nNrB1@B%&m{}tVzo+ZyTV2 zw%*xLQIUxeDNinwQwD?<2H!W+D?TiU)6nNQX2E?k{Qcp{^$GBIk;$Z1_6T5fKr~{I zpPRM7ZcKzSHTr*Xc@>pkhP1_4|8!~g`kD8>C?K^kRv8_UsZWWg+o%(QA zVl9HWl%wJo&Wi_oqNrW}jBVD8l5N7v5m?kzkPSrRqJpfnB9w%xtH{Ml$tJPQWmsrg zfI2R(P&FIGT7fe|)i7%;W}S%XGpnmd$3AN;XCGGb46o<96aI$K)`5Qz-BId|&wZOEU-sb?u6$NK_8Pv4`W zUq#eT3~l{)vsB4mxs$!JX|#gaD?fMr%EQmngj+^yBirkImezIzTCzD{mL}dZS}KS|b zm_*KQzAAEddo||fvzq|&^!DtyzHM|6MfVfz0@`;HIj3q>1nX87W5&82-WRALObI32 zpKvAd1-=GQ?n&e?G~rCV3kYkPe1UiBcds!wZ;bKkSB+N}s7P*}dyM&%JUoAxR&fwJ z$EH=>$d2@lJ!E*=R(6hsENuWg!>0X*e|@Bfr!8P<3FZ}8EP$JL?L*p1lBl zFflVZF#*a$a&BH~Ms8M0BF0vjn3$QJn3&rd=Y<5LzP_HWuBIjwGS>D(Rja<5kapz~ z)Y)@?e))5v(y=!Ej}s?O{CVNRUzdteZiMUVDe?7EZH*z7qVfF1X5X<&8#qC1+S=>R zZ1;&!V8)O{q@nF_az+5EwzY1cUzM2|8ag4uuJzANn>Ky*L$>|IWzd6{J}}3nPZ{Vq zW-*d*o_lGjtCj>P$*YT3imJu#+(hq1j z5*hSCYoCai1(O2;va%`-;~)ChTq>SCWviD4a;kR6$HyN?J(V3Frjb~qCnAZpy-BA7D=-*@LMvw(n9`BuPeqSLl=}QzOTX88c>_x3{~yPjKX{2xnU_ zcgKJL!lywsr+4nVS`7ufS0_T*KAP6)2722X)Hue~CGFaJfke}CgziozFZErR>qVIB zWti)E6peoz%2r_$He;MeXv=`MuC~1)|BoFT<6-TVZTJQ`9L1GIrD_G0$lzegTjqFUY?5(7%d}t>z#MseRsu+za&()Ds98OIdx?QK}&Z_ zP1U)J4gU8&gCJ-n@1e!7J^%4Mm_mM|E+VL4142xmW$gO;dVAF${~LwHSH1lGA9c84 zT1z5~Oiy}8Cd$Wi9(U_Q7d4gHPcVi=YEQxptYuO=1^?snrIKN0_(5t*p7VAg{9M>b~#R5{J=i(lxQ$b{@Hwm6BFxfbr-(#+SHBMw%9+ zG1#vW^zQiW@R@|&`#xL0t)xt^X}q}Qn;65}s3-QZ;oqFS2lvlIQ2BeP(eZ@gc{&!2 zKgz@kSFAwNo<;K&jjUFsT|G5@taVSb+!g?7V-pP9$n?#4F%zWo_>D5{tgP)3C}?SO-L68L}2n zUM#CAt?09+KC^5p{f}HwtIDfddIxLjE~ghcxlErJU?{kDaQo>$R80-Xj;;Uxr@yH6 zqnbI0K)011lLK4^EAsmcsuopg@u|P6JQgf_U;HSO;N$sx|*iOj$W$Qk{O#i#oob1QgN*^?~<#X z6s3`DIDp?N+#R6IJE#iP3Y)i=Uvb}9x$@=tR2-m@ey5Uw#)|mt(5|ji>AD+h2h=UyH5`M0 zFS98wd0+|Z^mq+jZ&I87p6g_OpTZ{DH0I}VdG;KKXCs&i=rdP zg-1kAY4_R@04j(^w z__TSsl@uYdo)Zf^VT-2Gd&r8b?a^v_`^+m&*Bq@sqEC#a{yVw}9J1z?BL5+lxBR*1 zZd-Ozzi3)OwZ~HT{*(C|vh)hllFExR%}cMcdQv1Z94wkY{~@bS<{1L2KCB_XSC z(v$%Wpn5Hzy*r(!DnFyS6z#Jfr*^t^6%CkPaW7)-Xw^cb2^pm=@u`qnw#4IU_Nm0u zz@tQ51^jnk;ln3v8QW+ofu0N$K{2rk5o^nQvp0{N2+e0n95wNoL*XifG9KQ??Rf2{ zJ|?=o0Uz}sykQMABR~-x4&u*=16)>E3|ioEPgIe%{7n~|0W zBV9>edh+i-A4xB`c>Yw<)yrwAmoBDcXJ=ncxt5U)|5XVN6H}msM?MD!8?^+qEDuzPqcTrcnd+ji*W3ypiFG0rDA5GV^5{&vl4MBQmXOh0|<` zP9RFQ!ZZ4#7$~&HC|St4*GFtb2stC@PrawLk%N| znrKgb_~D9~0+l&pmLI#joErb3a++0LEzktAFXUNyjvF^^+>+PswX+$32cm;xWjplS}zN4#X@dM)k?qI_W54nF1hE6VV@NgF4 zd{S}cT5A_$*Dtr>dw4k8h(#o(mSgAR72u(q7Qk}~x3TrIw?TG>qie*xXO~6#JA>LM zeB!vdQDF}D-Y5aYnHK6~#YI^OscYDzg)t`n-wN*g1>8q+R=0rrHiP@-oJ+iR zEh#baG>)m?Z`_q|{?zf&@uTz>hMY4ru5olMD!z$F=M0T>939t6Z(-ZvVjurEPIlb7 zU6mHLU%X`!(UZ-81C6B#-2}#==QsuNzKiFH{$ZU9V%sv8&~!A%+&#JZh?)(YBla-Mo&X$V!9@GBx8{!r25?q52)rQ}LpPDwrB zycxb>pu4@ZFV3~8p3k?rYxU~YPfZrKUjro2!45Bf|8wVDoTTw`Z&@>3=5+%KdzYCH z-yMy-kX5|x)wUDgMnNiI1mVeBF-~>?mZ}fLpZCk3Tl&cSaKNJ@;8o$^6!5eI7 zI7f_4_ZXkAaCbCWFOM-5kx+`f0FCt6W6mN&d_ZqU9bC^<)paP9p`*H68k>z^MJ+Ar zp1}bams(n(GVu`@KYq1b?N=tH?H9r8#;d<4)41k}JFM;UCONLg~> zV0UkSU$2I**J}IK-NaGO;|h3ufmjWQ9t5wT%&jL)4|by!Jwh934woxN)n97~XQ00k zr>8EIdf}_pgM&KSwBLPz8GR2kx(PE%auin+ZR^KUwG)c2%V2RD=uc!q(0PXgUu>QD=fuES>ggRzBf=eOc!Y9l3b^#EFEWYuT3aK|Z74`&$(pIB?+T zNmSKvahV(iBNA@b66Wyi5Tb^F^Vmntl`L(>^pRm#7-~e|iUoLa9v9_K%Jyb8_(@K@bizm^Ht@U6@8X6ABP#oy;7zFwu6I5cPA9yv@NK#H5zV=1UE*4 z8}9`-MuHn7!Hq6Cr3gpMO*r@GAAcM>otStnKP^2cGdnkr+Qm?*N+?5Di8lZ--;5wF z2LdHOU+Hp)fEMgNbKvmVgbSBaGLn}pdooPae)SO5$xus_VlOWlfho|-gBih;#Sj@U zJfI{n@&uA#5g2DPwG02&0diw2wZjM+bci}6$i8x}*={`S{6EA}?=hsbKxb!JeH^E= zjlUOO9mKLRpqqDWOub09paKeUWlK&^ zG^^Rg#o5Y`DT|gp5_9*G2jCMh4lT42BEj_gfi2 zVq$=?+1X>2(s3@$88uxJH$!br_I&CvHipyG2czH&eo@`Z=P+0jz`@fWe~7HHe=TRY zfQP>$x9|=W1qBr;!`nL=+B@2kuatGPcMSAvTJOX`Rh4ywizEav9=UiVCxPJtK>k*) zl9;#(d7C_CxP!ReARJJ2_XCMVVkJjz;_#d%6g>G9=2iksc|&Lza=>OzWw^kYzm@wW zChjw~ZREgZ-~xhP@d>y9&?QFniqL|R+S;m;k}N>R9X^;)Sd^KTU0PS1lXorw)gw<{ zxNt4+T547XLbj;i7~7#|qEtqbn3$D`GTb+>?E)pj>LbEiPW`&^`#ryH*|PKC-W|Ul z{xu|c?}0z}C8Z=MrDmlpS^7k{pdEDo563=<%aX1;)@4%BvtO=GEdlr!1lzr8q6t%Pvm(>lC)6!(p8{Wr^lWf18 zm~pd}X8P^xel#3#ix6;t5U`vhrcYh`*rW6AdEmjA`3oPob}3mp8V)gD*DM_zARR2_ zm|H}}Tvk*FKls06qgy1101{*v%%+jpYI<9HeOr5L(&f_jwsvTaEu&$xTO^DFB#fnO zH~n^TGa5F$MfM0FdyGkeBeUgoIrId$lGqMO7T_{S$gy>`#U+5uC@O;8ueerKS5sMr z>UqWGwWPF2aS6)Im6lgx-H~{}Qe@%BK{goTpgqS4xdNd`gbc7a4!ppOh#u_g2b|cT zo}u(>$3nyS<@+BGZr{4)r{6ZXM@$qp7MweLct>TuILJR@&-N{AzfI_J@(yrbzh%n> z74;_*PwC@41|`Ag&ri7r-s$B_mXIxvV~C0}G@V+%etnjI6vK^(bmJ)LTmEY%u~R{erdjt04L8$!b~Ht8)>!{}c5af4;aN^=fhq?1aA}M{A?tmE}+R*5+r- z1IXA>)V8=pZH}eh!5x*Bw#IQfZq-v()BE$|I93WZL#r3h$MyCYpGT1^9ma6NX86XB zg${#9E3iMCn;I)BFiiE04UJ9p73CFGRn3j~V^h7Vs-n83xuuCz#xagl9HP?WggWRp zC`GBqXwn8oWDo(DQ;4x-VE*LBkF~OKadL8TcXK0PL{`=c8+#`gXID2UyErwVES{^A zlY>2wIq}iWg$lVt%h>uv20q#a#wLfMIBb5yWRn&d11`9sMW&oTpORhQ(9+i0(^gws zl37%go1dGV4gJHZmw3JbuV_-q5);*JCY5aN{mIGqFPt(jI&#AJ@Ic?-Uw+(lxsek( zm*|m)mEx+Z9^($&UC0QMkKxF<<)D%-%!&pC^C2nldCNN6OY$?XB_$D)>=Eh=^(BAB zUD66{5>%*?Yh~rbRI=Y=bt35t|(Ru`H=2eW={a7=^ZyD6qLeo5Dv}yK>>|S*Y6=&z%Ai%ISopbak_F6fwH-&UT7H zIdm0uqiSH#2Vv9-k*2q$AbF=U#_%Rem_BJ(Ci&{%l{SIE?yIjzX;z=Assa3-?karv zF`)Xs^wP`JATA^FiGDcwuyLRduXPL=;}tQ($HUdxLChVL2y|Tyl=aMK|8qAuzm#gD z>QK#5fGRJ?sEypm!UMgcDIKMYjiN5Ye`948630_m>kof1pT&q^UunHIjxP~b9*zO< z7?}=C2>np#+90_Vbof?P8^GkWwc7q(&A{NG-iEIS76hWoIeeM5Lhj@Q%+**QD=Si~ z8-hcN=oui-4pMx4=hFi{QU~K)N!A9ncX2hf#W{_90goGW*L)uXSKu&xVuY`~!d_u5 z<@0InU_TryHcYIoy*)0e?d|UCX;m8^9A2EXt+wBbwldD5wJDnLHO%7&n8yv6$Ja2A zuVNk}n*cc5OxGpfEwG^`q zqeSPOXIZ>v*SRJ#%WIfVTdBf21=4g8DEz7GWgIUt=*M2Gw24NE;iO z+-;wLL>iHV>1LMxt)%rGF#{f-hbjtYggdPL@8B+mW-m-Y2F@TCfq?)Sf*8ShB1 z+hwepUCN$03)nNKCwu06s!6_YDZyJ0aIYBmL~146xrM*Y9L_ z+5whU&HlQFrPZ+C1S8NISlUkZ*VAvgZs&DcDeielW*UWh!Y3Jq3POCcH)(*4S}vb~ zM%buj9_a+Cy5R@(AQ}VoH>)9tF(YQ5{!Muh2n#|TM>I%xrVy5&v68U7CWQpb4+bMp zp5?q@d&AW=8+&6Qy%8I)+l`OqdSh>Z%*y3C8vja@-nbik)A@JSuCeUM*nzGgw*?`|Dzs*2ERwBhZ>^crIdT?_+5b*>CCBX>X1L!K|99y9|;gyf&7* zh}=|3lw?v9%aF@Pui=me#rvbndjHtmm!s-)cd zdK?EC&b&ZL91jjC-%zE(&OIn#a?}KCJ4c0w7d3|b-|4P#JSaaC?>1=`ce>}MlVne` z&ZL{*I9o}Cjng$6;Z;6k@M@Iv!;L=A(5MBtMJ%O{=jj!JR24op#q$ija_W72^pEFh z?F_~raN>Dfe*pk6Vkr*0gZ=ZK49~lk?R~@3TDIGe5g;*a(wp|pSg!8mG2>phE|%w= z(nW2?w#Y4Cd9QK5!{2!D%WlNqcy#a>>LJsf8Hknw&{70i@&NyMfPeUdea8=Wbw=Z) z_B8$L-e|)L!;7&|hNVobSglS^PfSctSF7XsuA+XAFjo>Q_SyGyajpZvXe@~#7O!9b z`s)V6>#wh0AI~q63O`SzJ3!XP$-g2P>Nu?VeoE&E)on@$9_@4$`WH~ z-dGM7%*pB^*u#u5z0YAJW@049V z23I_T4ATt{(m($kO}=4}DTZ;@)DZsDg9||6uqCZ zZ)j*zb9hqtd(WRg-_fTJ2@ZiBnYUz)o3>${Pc8DOYf0(aN>o^`sHjv`*Hu=YJPk`) zO|4PuLLm;G!2nChI*J#E2#;uk%pi-!EPUo;YimOs(1@9&)xHFoPgBHeNPZiG;>}SDJ8Zn7F^4T*s+75 zJ)Gj{`8Dy>oAa^Wj~LQnk7=a7Fe~r$sRyGe2&P!114J2C)2mi7baHY${r%*4>$Wbz z(G$I0gvZizE6Ona9LKOQhW7G`r#%AWt)1Nh;L0L(P@ENZ&MwqghND*~7?@q+DIL3l z@4_7|#vQ$cJDP_(nu|N~9qO0J&pwxYB|W>gIJ3CCFfD`H!?+FgKbY>CLZw`C;|@b> zMeP1B!;dz+WLPaw1_k}{3pX5~scQ=Qb0vDCZ6`kah`aJLPqZ+>M#K`{mtA)HdVwx+tdvb|e)?4+g} z_T1)-yvlOw0%Jea(0}u=``TN}6?&dcxTllvw9sHXTNig1_dwWhoosAeT&$4oM2$0f zljJvflWw1x;klg;_QFao0{7r;BACLsuCegR@*ayfaz>PZQEy+}#+YXPKBEPpj3uvN z$vueNAi-iFXQjg(=^I#jHY2T4h#(?C4Ut;5gE=C^-M7q z>KS|WJLwh^_mQ*sL?h|@M$&EP$w|C-tobDF4{i&k(XoGoOAM4BZ${ z|LGCW;g?*BLjAzRFPoyF@g1NuGEl#g=cgl4iP#TN`cWlB8#a!R~)cKR8GG$kF? zH1fMzs%psk#2R5}lVq@Vv=h5IJ34!MIy!p0IXaN_$?581Xf8LN;<7X8H=M*N$Vo`K znvp{`(~6?>OlmJ9zHROOe^%(QGi=T$kYS$8HVl3#;#g4gd>wxWf>WUMuQM7ixJgM+ zKGyM@5HLdi$ajW^7UzI*l&cG9oykoSPM&Q#s1pB(mJr; zJVv0USy~)k9ZpZ&(aP8(|C(rLK+P{`a+$Xth-s)qXd(z3G4$4-=#lor&QMgt`r zMKmKusG8A`%oDG3xX>2h6S8B=_n*hce)i?|Z3j)mgowyhU_IQSxG# zfKH*D&UQHEH^K384yw(U)N{lz#Evbut1EOlBVPWB8Nzp+LyZc+IZcg?w4Dt+TkX}R z!RKl;eFjEsAV`nsvsSatfiJXoumT=Z; z;&6pxRE-nx`S{194FF=a5ns)yY^bfuIr87wd9ay34c+l6!y9~cRejo_?J-EWdJ^S* zme9(*bM;yPyFJV)Ey2dWfKe=2vgE#*L7r0F5D&q1(eNd1lR8DA*x6IFLxm;9#p|Ld zgUF@1psKREKVtcO>?^fYH`PqlQ#z@ai+kk#k3Rd-tFJG+e=aSd1+;(@Hin9#Z5bg@ z?$2Jjkl7kX7jKUS7}b2~*|R_G-tgUq-_B>M44?V-R9021>grp&Iy!s0JK8nvb@lai z&8;o%?aeqpb~QA@$}WUqd{Af5>f#i7UB4#4ZxBwE!G1(MFcfE~03NmQ5I#|QwtVk0&ow11lG(mr6+$qTZ{}^V3WGLT- z8MzBH5>SDz)zz2cQI5dX>Jl;)b@fd^-m5e^iA}s|!U1&{9tZSO+YXmdPcLcw)*nt+ z@S_&oE3CK*1kU>n&eo#2YZET#od4s`vl%Al9Sv~jI{r&j+ENQH%mYjRh6s~ys938v z9({1hqPrI_u**CCZJ*wE(E>!IRKm$s3}(O1=#2Z{!fk#UP+wbBSy@3`x((#q(%jJ4 zsH&_et*)x6Gv*VUmVpU~(qVXH%ufRxp}AXIIiu*kBXX8)f%xlWXQhxr$G0c$TvyW? z8KIdQdm+(V&30*_H-TH_(25JR0_TV~472I`u#CQ>AEvLutazU8!6ed@42sa-YXS|Y zjGBNOE1X2Es8X@tjK-qj&oVp;>S zI&Jz@)i_7z=2tf6rX}qY#xFS(oFC#8dp_GxjQ==CtZL9*_-GYAk>hqM^^u51s zJK910%5Vi8f1N@_5oGNNnGKu1{dxds9K5F`skvA0!Ad%eP(HFb`~qeAJH&8pB@%ab>rK`zaz>NkZi!Za zv#v2ykL#wR$}tTFB`2*9GW%urvlSl}nji7mPRVuaN9Da(l#}CjbE!7tCll|ZJ;{*v zMChk6+poF}Ii$vxZLHDSr}#`-8wXE-u{F1;$SNeCZf#A0-jNd|$s^=CyCO>6*qXHj zxRT^k6t#Rjr7ZT(N!!phfN4L&JR`OehS2?lgFwCzo9*Y;p zc-k52?^wLXr2O3a4Z}D(!Bo;d83>@#6Maw@1bcFDu)e-aN-SZSmWq%H~ z`I`RETQT$u8YV`P*uHe(LK^-L{2`hSr~PSn+L3mqohdzxEpkS3{(NJfWK2|46n`J7 z%!`m=tKxjbEw^d~;bUi?zY{->sp#)R=3Z5C?xjEfJc~dDU>Wug3=ULqxhIQwdaXci zU7lRprIzZ_seX)JH==Fg1Oi(ZZ*M1Y9B075R}N)gObIOz4E70qyyY?iSBHm0B9n?; z9l8V}Az%LgF!$YoQB~>N_fBugr1yk`R1!iBy`>R40wPv)#V)S8x^`D{XChcu0V#^0 zB1jcP>Ai%OMo1^UOxmPOdM`8IbLJ$GGAa1mKfa7+CLzx~_nz~f_q_eJv62|%mX3N| z88e0x*45QY6cQEk3f9Ub77iZn7Ipp5I^hX-^7USRVs_BT*Izf-8~OTI>PG(^KFq1TUA6>b}PJ-e~InI3c@}c!o#qVxbhKe@b|$zK$q`Wd#4Ot|GRO!8yUI|&p{YEy<&g&610RU>v*qb?OCuC8uvGgB~P{2?LiD6yU0Ft4$r(Zrbi!p^Q|ZdPK{Cot+) z7EoRp`l(P%PnC86Htt?Uv~CNMocEjbxUpE=nX>G%s0CKzcG z9bHnAejO@y%UuOlneE zdPc_WyyWPsSLe@Lv~aA2u<-oJqf;h^j4+g3I(T3wQ_ZO?8){oRA=?^SwS`rrM|*R# zO44P&=&iTjdSRvmC$1WWPmfO@9|b$wa!kedU_{>{hL2d(x8eH^WG!sQp?4Cm!|1?w z2}j>$CR$vQeyhqMV9|d_0O60!Z=!<4IKBIXS3~i~+nGHu!+*)CtP;{HNu{NZ%)Q4o z-FVkkwsY+6&%$B!f5Q0s7rW&*lwl4>8!`Hef3iPbj|wbAH}Jaha`Xz%PfE_qDM45u z`9d<328MF-d&r+frDdd8CzHUb_$|$?-72BJy{WmIT`TG8B<)(;I=VPw#vOv*fQV0h%RAdTsrIoF%wVAmwdeGUK7?V{9ha+`3Wiki<0B7=1 zZf4F&@PQOqk}5c{oP5G>DgFHilt8l0kAM>10VRA4`kPT&QVq+#+`P4f1c>I9KOkk? z70T>@P-a*RhfFCm#?D^$(2UUe3#QKun}79uwBP|L=dO5W9^#pYPD=8SfbODZflHue zOLIvWs(qxQxDGMY{*MXluE=H{BAfp<#my+vWEW`)&z`yk?@M(RBIX~A0`CfO<{`v+ zC?q+mKJC=Z|(SiBzjkbGY=8Y4@$G7!W!XTBAig;}38Bw<@Y!gJvu+^*8gaYD#hKtX7ch=;+Qad_vdzXm#M#tF!0LpEq;N$WdO7E=aVuwN*NgfT@4iPdm=$Y89g-+cuS3cgY?Zh1WB&vFCBYH7j>)2Nx;h5LLA&Jsoe9JyRH7c;Wy0{oE zreTYz>Flhnt0+R0EtpfUZd|M>0`ICSFN5z(V`Nn_xsj zz=?%W?crA zA7J^%E?JV4^nfI=#&W16z=bi1c>N#gV9np9gRz;Jg@q4D3-DA9g%-x5PBuax9*7>+ zSUng$^jdiDuGgA}{S7%c5PR{nNY>uCQ}A;M=ooKpeHIAQXgx&-QFLuDnx*LWK6J1B z;vS0u_8DX&;scpf&B9-yBTfV21xX+RFAOEU77UW>IS#ZuAGACUv^);9?9hLP7=|~U z59ZK+_7`J1*DK_K(z%!c(zYD>Ptsycr)0q$(q94=aym*n%%haSSg#3C5 z3}P<~p+CcfbA!b5qsrj}rQ{i10J6Va00XS4Ut@DkZEZ(;yQQZ`XJ>~2w(9;8@uFi%SN6R-S2iIbp`o=L)feIu z5;M|L2Uyui5y?)5xfKZ^5~S=4`d!&aLT881nH4@4s~aOj0&f2+8!1}%Evhv9c7qc^ z^u#qyudF1YxXxhJU0#|JYFyou{I~q9v=6v1!AZV?*0Ywp*pJ@gNRAw`ZUfFd6KpD#0}Bk(nccG2u>5d0lC&~=*`*H&(a1>nvA4Nv6v^fFn4e=_v(LX#i$pB z-Wu-i9LZ`!hOOzPHCh8m_f8IbmQ-bU6MObe?AghQ$*BF55sxo4f=EK$&V+=d^o*?3 zw3J&hvH0qL5BE4S>`gl|`sJ)v4k61T=8Qjh^-`fY@a5Ov87;0o9|9{{u;-Xpe=pZ{ zYKw|XTl=Slq4+$+JWxpSu;%VP)W-DoA#f}aZ4zA7~{jcv`sw7R+Hzh(Cd5US<+-_o#yC;jo zxyLqlr_3SllR46;%rTbA915DMqN7qrYoF9%8rw~=dWxMzu@Z`H9|T)Tv5gd4OtIb+ z8$Jluhhj%kY%0ZGrP%yIu+0=ZnPN*QRz|UHec1l2t!=xB<#%cWMB$#CAW=3EghkH1 zmfv3FJ#sV~u$$OvA0x=V%$#4diKVv2c`!?D^=@LRt!rRmsm*5bYA~O60P{;(F zQ_iJ1my(mxbB>)jeeU#$Lno6jT|9d{H#(p`mIl+9AaYTosUUL`QzyEQ)fO~wk+YXh2aq|zwq3%&jd4@B9WG)n#+c; zqmc0@!OIHSexunC^<4c$eX71$--uVCK23j9e=*9ru&~2M(ON}H63Wc%?DS=Id}VgN zR;Q~(Sz(o=w$mvv&_`TTtCm>B`co(TbEn&-V#Kz{;cd|W}XyPX$71W=@43etB3owIT$r%U@DJslO zjk|X4J+m)z>~h-pQboe%j;I!?rJbX-R3++YsI6_*3qm2puS4ne7siN6cZSlc z?E>|$xuS`|%sRDKL#3t=5eh|>ORSy8rP|Bg3(I2R=;iC~He$kr38Q`7{XK{K1_lQD zx!F3Sj2@~wXuu>n<$u@)FIDc#Wy%XCPQUTgPN@2%S<# z^7I7GH&`G*o<#*U60XoY`oBSAZ{P}RaD~@#g||Rsd=yGK5P}L-AC1|)d-wkRzyA6w z-mVRW@_ynh&m<%!qPtjJoJJEJO?(_@hC+-JJ8N}ic^T;zP8L7y+H)i(JHK@(G#BP$Xmidy z@zm}eJ9eypK$@P|SW{&aG6w-mq){y5nCx4JLggAxB4~jzh!_IZR$ZvwAsPzR^WBDx zdODavZsh!{0pU<}k_@ruT+B zrEfP%-$yBZ$I-pf&_|u7v56GxNwL)w+eoqUL9oRXTSBo{D0U>pJ~0T^hGJ(^>~j=* zmSWon!Ct0VD~i2DvE39KFbH-I#m=DEG>Yw{Sj$1MB@|nF*YgHY>|=dcB4pS#0*>{k{?ATjAU3v z@>h~cgMbi1W)1HGGHsDeSyUwQHIO75fggC(j%|9)C(`HWnfvv04zlTUNc*0{G`5gp zed!$Z%zXvLt{engO|b@w&8Fk>DR!{AucFvyigl(~ImPk@8Ml^V{VDb;#ad9TVi0W4 zxRw-~L9sRzI~bk*U+(#Ctj}G~s~qH>P42BT^q!mk57@RoY_BYRZ_Z@H79xcQrn8X3 zqsJ3-v{@yQ!kr>|rEt?!)x(9H=`8nfAsxNr!A|!ay1Ka(dxp+b5XC;%cgLp3{r?I( z_TGCo6?+)TFK)mC(@fWrKs+?fbiERd2d9~?Bh7Fj+7;^wCrm(JrmN{WNJQ0o2vMFi zF~1tQefX)7(U3PeGZSz9*FJQoST~A2O0jVi+b{^WXI$sI#??`*Z69_wxakhaR3{-* z{S0on>n@pU)&t5^H()i);tG03K*v&n<16ON8v zW2aA_9^~R8sHmtYNWs0PYBXJr>i@IU=9rUJ#kaL}bhI~Dy4l&v+R6$vg}LSBS)hXd zt2Ad1OK>YBGJabdO6T!Wc~>MtcdoWp^{l@o=D~W{)Qs+C4Yl>=_|)8j|5{o)+FL+w zgUNsXCbS-f!i+eaVG~5@EM!_j0m8dNq3)FnXFaG~_{h?Q^XJbE4-K6#C2a1(#ml4* zKfL1c$6k2uaqNVF1jf<*X;Y~1TmjXpT%&2i3>vy1Z|Zrx+QJ?!t6vWOlaYhx8aen> zBL~+QIrz+84lcotB+<3Rj-tShgnR?}1nGubxHST{&I_6n@pd3IT2C1H8iB#00m>38 zyFptbx%Yim4I!>{e-zsmVkt`}-^P<~mE>ClH;_D2Lr;J?^aS{vo&Y`P`(S5fk4z_~ zSVxLYr<{1O^ZhCvS4y!xG6`Y6;G7P9Cl7hbdvoYi6uE#RE9j`lDRQv$j-}Ye6q`)3 zmnl}zH>zn~-$t?1?i#n}Y5y8`fID76k+F2tXgaFvAfxu^-|tfFPKs4htn(mPJ-uTa zifyOZN{W^AVTl!eIcSp9RwHqSBnsN^W@23IQ1t9A9KTSbDIAJs?PuwEc?>2!{N zbNu+5Ls6d{t1IWue^7;aJSP`+b|U!af$Tnh=teRmt+8k%}TfcZ)!GZ-egU z0yP&X4^fv?-q0#nv~`#(n@bCd3i7kEi!3awbJMf3b23Z1t?ks2(&*^eSPcsCcXX(% zS}Qeqx2|4^hU}1(UsBT!J6sgU`|iNk!G?eFHtt4QgI66l?CRiIyEr*n3)_>634*&0T85{q5WG?Vb!>PSUA;`}U!w zXKYT){(VPxYU<=3NQRWPRV5zXwQE-la-dX-&6|^mO<7MGno3kQ2EP|ygk^bme23*s z7?z(LC#aA6T8#kH2xcF%6R-X7^6q2468!sRWGK!=n&B}@ih2!r>=1cyw?u{2i zX^lt0+7m4<%Y&I8)guZEwZ-M}N0S9o&&iV~k8)Q^GGj_xt1Crv2X9!4odgKGhCtR0 zH;>Aztn6wke7;O~L#3`A6XZWZE@u=9en(wRC2C@J2*ieNg?faAh25}Ww$0TdXFn9q zvlHosTAjIzy@Q97le>?P&p5PUC0}mL6i#LB+8#_E582Qwr$%kFURB#CiU@gc=8X_qK^7uT~#>UCh)3Loy zHf$ssGC0Y2(&~CEcV`!GH#aYTfB%Uf3Gy|Z8P4&EMg0M2;zQ8H`=E(GK@%Tw|0CvNs;0ZKzQ*66 zxoP(o;T|#tI&+ry7v~r%Jx;s74)+5vD_S4^&R$VNBh`VtXSLK$V?`2XBYBqK;>Od> znY`@+gB-}lV?nWw6k9>DVxvQz_iG>4w6un1j2v+I7D~P$;DG~za@t3*tBB10B6bzY zTr{4gR}y10qj&$2o{(*`Gx^??UGwt(E!pE2I7WSWTa86@;KqfMh zH2qeHM77O084T@gS4X#suMnep2t7AZ;ZIS~+Y?am*&kKP2;;dqeeA@M^PZh;Yt@EQ zShYMW+xbt;2pBbfy4^5eKO74lj@H696qaL22PFi&kM+iM=ovFdd(!#u777h>N`*qq z=Zi_xzb@3Kw#C8ei(Y=C7R?Vt?=&kbN3T)i76f}ig!e^>9|ug312TWemynr+i`Lc2 z#5gazB|@1(f~&W5IXI{lD4Ei7e!u;u++a>$E5&CShwc+tV{zx?lyKdfKBb=Obdeg6PtV1i2yl`?|9 z|LLnO`R)^*`S8WZXN`_{>E(~!eQ;Xomks_6BAVtq*3U0s?AU2=&Oi0^qM-59$36gA znP7uMrnG6p-JD$%R>P)^^d2*3@~InTKzI8vlhedvZ+7 z&4kzs(b4 zEhVo2YTz(Zsk^q8R8JvwxCO|zVH%3l;=pyZ%~@Fpu(4@uyj_A$Ee&Nwc^P0nWpzdA zrCn+t|omw`{Q=G1;rR@N!C< zt&^K&YkBUe?>2n3m2@~&l3Lx*K6~qEZ#@=HE|nYVTgaWHZr-f6S-pCU0Mh>Nnr??B z;82fwbm#2>w^0C^{kVf zif=BhtyLrIsIgT>Vv2thyyb7m*wicjZ!a(mUrpE;2ye{Tpjp$VEq~#;88fDb1X-9j z=NFZBDlNkw9`Eiqa@w?UK|XVy2{E&A^YSKKP1;ylU4wEC&YUYP)J+P-&`L4jKn<*g z6znbKT1AE0#>{M-uK>zH7hh@skE?Crn^`E-lPxTUxjT3S1O$gHnHuOl$z`Oyxs|1v z+-dCmsSXZauEU3W472c=JJ;3$K|A1jHbS16m91QY27IpO=61vD>nBg{tvJ)rz>~A( z)!=;{k`W`+g8F)eq=8ciMEq{T0?RA;4%YNU3k1!L2F;Ov2mzqE0MMLmY0j-%H?Hs6 zxqI`v4I8&^{w)Um^hmt`OQf^LkX+VF7vqp1kdT#|NGhG}+xyFJ7j8y>`_<0AhSKs% zWIC&q<>mD-z#AKC)zlQ_S4Y`eSU92O+o-UxhgLkca>c__-K=dKn1%RLG|a;h(IvD% z1FnwB0yv_HuDl4du>`a6CT4>~T#+8%-Ws&pDa*;ty?W$Wc6Lq%>Q9)Q9hwZ`@>enh zq~rT$%lqB}M*Ym*eFcsBnNC{fXdX`O`^p^go*mj$-&X=@pt<+Hw>nZkYjIz-Bom9{ zeQ*7weirM#dP@DR-o5r?nLDifyed#GL`E~vB;LAd**05}+iwhUa9PNOx% zO$-{=SyFt1c2~cD$QZ-rdqv)7ow|dg|mL;})o^YllCG*ICyIPnF$pryiMz z^toS!S$GMvuo!!Q^!)I-dIe^o3m4A+w&}a`=P#VOaIK$dp_@R?!fn&j`@z&a3AS8q zs%u+>iFnhn8j0hN7*?Q6)GBm)c-!!S;fwxfu{SyBa9@W96*4?c%&XuD%aWS>MKA_l zhhgvtY(;yRi^wHB!|Y~0L&3*>XPxc_*{YZN`ZcJ;{jY-$%R^ZzRB~||I|^Bzaxv4- z!t`bX5swn)6RjCkh`qdLseDo`MtF@FKWS?4^!anAO$fEi?Pu+=T4JK$voHb&xXSSM_wu>3T4`a@Ym@(`BqVk`L!d5 zF69Rq-IKi(P zzsMBec#Dm)84>93;4sRjDmkgN+CE5n^S8ab5ZoOpf$ok_8U6zfQ__I+3p=+Q*i2-dKkc=CO} zV@XB?Es$!gg(F+o>&Z7AUt~~VSR@uB($JSYGA)Ip`4q$V)Hb_{TZm*xDI6KL0X~LP ziLdug?#-|mx1MBJ%qDk2GAwAtRbwrjhK0Qu79_)A3U*ZpcGVQ@swvo2$Z*Jtk4sKT ziHpn5&92KWEiNd~C1>YUC0#-t>PTw{&++M~^N0xAyu7^PqGXtLIaZ^Ia`yEtyu5Gs z-hGGTs)~}djU&g5cQ8BpOL)kkgG>y7=rS#__wXG*b?Q{qgAxp%_S$Q&k?`&)>l-%~ zE_m_NkKS7mG&=lgVZx@bKii#8WbR*un>N1lA+sDz1mG@h@`*ir_wJoFYgT76!s|&l z+NU@cWzwCCibNJDxoz#_C@<5thz)ooSduET8d2sFBjc1X=(|}4E!#FaE zFcx>~YisMe+VbrkZ0vBf+1NTc+f0~f%~((%l%n>=va&M5po=wVQN^+C?R4fRWA-Lu zs8Gz_M9khq%$`$ePD)OPt_0N zMJrnL;XmSB3W2h{OE#hQkNrCn%qPvCyCj4`u!;0P&n;N6;k$pYCK(zuEn%JF$#GJc z&F0OUH*=(um}8pppV|pEMXJulCf>Sv^J=;FN;Y~2W+j}7Psqx`iK2oQEJg1*e97f< zoEB*7&BK3cC!`)Yb)soEdbK$^30+)WU7ZyjR_2awj2%#@45u`cAvOoy59DSRQVwbL zj5}_NXeh5?)>dQIdgJw9$E;1r&W9haqN=(q^JYe3Lj0Ap$F8SWmZe0WI(7VT%%KCj zcKm#1|M5SMUA&Q;c;x6WzZ}0DpO}gQ-`p5hZtv{rE0drXTSFVKv#Pu}KYIU(Yf$Zu zZ`$z9SNmgNzl^_hAr1m+k-$~SH$+XyEMsjQ9Go1tZaJD!Wf$n}%5M|E3}D05Rm7e? z^2dP#!-ut0-Q4}%&->4wi~fGi4||SXOhAJ})o6W3SEVlF?E3ZVf78gE$4{O-)~uqo z1Ct|Z>acPMd>UT8#SX1i331nNwK`acc~Rndu0?TZ7oQdaH{TDIFF!T1IV(9fyH=x# zJF(|dr}H!6@Ca^)aIuNmD9k8m@tpTgCbuI-^7W*kFE-F zmI~C7L5(`Jw5zPD(;D~+v?la$7E~wQ$f@dJh3@9I_QQs`dyJ7-3Tw)8b>{Y?r};Wt zDW!~@8!wZY%M5xOYkyMU!$HXtcXTsCv74nHlie&4=ql3E5bM-!rr;?=V!mG6j_O0| z33g5{wq^>MbhvMjt+%VvewhDoD}%tn&(md8fX&F!K@&zg2Dm!=1bREWj}8c(?B(DZ z;LnWZ#<#b(RB7_FGHz=Lqu}Z4J5c-7Y=oo@yVfmix)*A;LDB7&+Dk<|fuSt1M!;{U zJ#)SVMQs8_k!ZzD*oT`yQL`#5>g!tTDk|d6o=Yy)kk)SxMq}bWBC{k`sF5RyyvYL} zVbGQB@6es(qRw-e*ZyC3tRaahpH7eZ;>+#Zb{_eg)QFqX-m0rJc8c4!^?Pzte}g~; z%cL2F1@obkZG~VH1(D_pP~17@8YphN^gsXjU(>IDS()pKg$q}#TD^MpYj3>STw780 zcPM(6rZ^|J01X;=mKL2Al@&T|5%WMaF76}pS;W+3VPWhzXFxIl^qL1FdtndV&)QQg z;p_R9*0vo2hvCk)!)p&e*iI1lZG~A*PLi7BWUo=+_hSM_$}R1k9&B?6@7x`0u{$>OeXj*^Y^yIx%21Gox61Q#~*?XpUj9l z5mPSr95eiH?TCY%W3n8+UJ-kHov z^ADz5mZYXN{SWs7`#54<{>zD{hCTAan{U4UZZH$c26?Jn0+uaq`CI#;wyD0dyt24X z#H*@;3Rz#9|M&L8C2m%Qwz>f($hww}I#kJPu5Zx&&HZ3&Zy^RJH&-gm%q`3m=4Oh& zw;$Y5>=UI2{u|W#V4L91J#epGjb<6fL4tq+qN2pDYlkkU+u%1*2d6Pb~7|AqK)~HN2b~s+J}6IkEGJH9eH%BjX9FX z@6*PF;I{WiS8paRc=X8TZ~=UilW$`=szs34rg^Zi6(q#R#bjg@Bo#3mxPY}kpM_H_F*6_MVsTlm zt_C}gFB<7coX1TiB?Y-TIT@Mh$w-)pJFE_Lbhd8EjybUPhwZzsCLQ0j{)_!LwB|NH zZ2LVvr_RjT$w8{E>FLe4rt z=BFoIk4s`5Lm>XXFjg(RaVp1m#V7L-*K`_Z4l!JQiHdIPjd4yV1Ru?EtIY|8NE^c( zw0L=}qb}`sRs%c3xitT_*|axE+0!2)nU6W%<}(#4Z+&g{^>bI^Q`)U36BpaK$0CN| zdF1XBLuO8z6rilQ3Ssx;d39h}MRB#z%6rV{(UZJn9bKKBJf+j zOC4zC=_Hn#lgj+wKDN+%oZS4JZCve@3Of&T3F-<91qPl}CYSI!tb|zAfVqL@P&hjp zo4ZI3VwXS!<9buowVYZUxMFLBT?%-D_O1>*NuN%%5;X8SXn;gGlK8ikpn*|ksBl(FK&`(fMnpU*bD1WVrAhX0`8>8AqKXKSzj z8Ekl4_(L{NWPUu@@DiJHKG^WSVt=ED-x%ArR#2?*66CM zsH`7IP*vS`WI7#?NkI zKj`j?8U_6q{Tu9@q5dNO*~8rpYOpi0*P)Svje|XU2-w@ei(zZ$L~12Wc%X|Lb>-JV z7w>^Cz64#6+;*ZXPt3^&WuSCpE>YIA(RAbX?W}Bk%FZS2%L?*PCKN3>N>SvIbT%tN zGYCy?E_0M)Sk6cx=)u8*DFhwOy3obGy}e82z~gmwMTA2ykYpbhT z$aqWg~B-Q>V71x}hxP^5x{jGL?g!%l3^RnLRgCt5|bULun=7 zZ`!Q)KX^YxEqVXrg9i^@@AiM`Auy2bu)Xgm_6#;)~bWSliIebhR70JK8%8>WTJtXg_6UVdH>46OK+Ut}v}Q6Y6nvu(l)ZL2Rt8 z3DtqBEUjRDK`xZFgFT9u8%G1>qEUz*+nr^20yJsw8BM^Kkk9yEfO~-{%s^Hf-D!zE zhWHFaKw}Z0u|Uum@i5rJ1K6zK@kQP3`0MPP(z;FyGlXS};oXSN%*?M*N!pq^;KVVn z%7)MIb;GATaZ5uO+MPdxc=eCjZ@&nGcK}UW49^O;{FzjxMXdK?E|PuiH5GewYp~%{ z*_Q7&Z`2Fr(wg+k>JUUZujN%RWpfb(e~Y<|QqI4^%XJ)F;b+lXGXn+M8pz9=xX2L0 zyM}jF?Cdv4umpLnXV#0P25>gzD77t*?d;Mb)uK$J?UuURwH2erDWk-wZEY$uTSr%s z0ISg1+-l|QsJ5LO&J)?WjqnT%4qLin&f70PG{(`HnTtQJu2z}NK$m;8pKm2z9m`(3 z74a$(yA`P!^$_OiAiko`{&Klkqw8nl99W}^DC?M&)zS0i2uBl| znn_7+Tw-SC!S4^GhGXh{47P?q!+&_^lM9AC#n$4Ve?ffu`p?m=@TYtv$jKSgb1pfT zl;0-FIClC%D8d+iV73b0nq<#r4s|Ik4IjRbBYEjasWg082Rt^dNT(lQaeQoSZOTd< zLdKiBgsymeioJtbKdWWk+1&}+^qgB+Id7IN0vuWy6U*l_Gh(Vsz@j%_SvYsqQ!l*y z=z>xGtfw_Ql4#M(UpE+nCwX8#q>PCLHtqxBVVa8dU4-==i}fYlFzrzwg5klp*xB8I zErWUy-8=!frvZc_7ORxq9pIlRDvTY4ZPnF{z3`IZ1D=HJg;%f_o-w>`zV^EfUvJ&~ z&E5+!MGdutirH6QQ7I1}{ej~+)f()4AZzom3BHG`zk;opj}V|FCIiRhKCH_I9GTm} z{5FW*T}22r ztN=WZHFO}g6S*{zuwH{D2_+&S_yY=;qO&EBRolyD5EqmRbD3BsHkXUdrD8rgCPX~| zKH4RKe@JjH;BXM|du+KRn)F@F8IfH{H0cV=*@&td)E;bvdq7)RU9GDrM<*&>Lt_iH z(rTPVWn~RjMcS6;>o>qpaPYw7B7plF_rdZ=pD2V)qKyWtl9PMqWoGhXk+h>0o+)nB zR-}g-788f7M795x%)&l-|AtUZv7=tYrYM39OQc_{`|(TlXKS}MG=?A<;x$&>Le}H` zNM;9zD;Hn76k>=lL@3fOBB=j-zBs_kt#MPZA&jlt5MofGt;(rWrxNq31zjB5wVGpA zqZ!Cr!3wjgmcrT0d9bE9lv?|l`NsU6f4%YQORHZP*OdB)y=%y;;V_sPkl@qK#Ioh( zAuyg@V=KT?zhfdHxTJCJ21BdEqc?AMy9EaayGgq>IqA1fe4jiy$P1JyZ_%NpD}(-Z zq(S66NU$(NB{&HQ1x0@-b2B18n8Q0QmnoFCl90Fp#w|bbz_)^&QA_A>vRsAsCJ|=y0mi)8i{pz|Ww7Oi;jA zpn$JH0kgD?s6g3VUr|z`$uH1WL5MA_s@7E$mlW5uceHo5wxBp%V@)+Y#Px(2%Bw4D znC{M&rpES~`g(0~i-1taRgUv)uP!lGfVy_^(j^%3Q!{dlDx0z6I$1GfT@WMooQgP> zGBfq0yB6}IaKG^K@HxJbZme>(4e$)qmn2`<^W!gmV;Tx#wuB)%V>QU=Bl*7T8ikkd zsN!S4e60$F@cXjiBhlgb^fH~ck%ZH&GCXVX*^x^pa+8k#wDsEq2M?c5?xUz<95+I* z=XH@V5x&&IVh*_BLym^x!%#`KpSaO9apFWDb5UglOxCRh7dgudry=Nk3+MbblBb?t z!x#x{A&StS0-=0vd~Jd^AcfJhBUpcQRC)CI74QX|Wh&T-?2GJb_I37Eb_x2f*nl&j zeI9g$Z(*gqiq}OZp1H*A61@^6)K*rN&jtm?;lhVu!P?Hg`R{pP9~u+}ld`_-V!R$= zXW*wk3T1hm2-VCD*wrdQ(SfKaY$|XU!#jeX3s~EyNbdx^EdM=8UXhfPlvyXURN$HA zc8;8d`*J+>BlDkpe8GYROC!QO1-(=}%US|1A(C0y+dI0tSSuZzUF9{;a2#&kvs+v2fAs>9ZCro-r*9 z3Ey+)KQ!ITHFV{wRnPz9=~q5{|AiM<|MQg>!w>~IdeX$OB@17Ca(dX)&piI{lBLhQ zfXBeTc#acccj4l!%2{gX=GI9{aka2c4!9MWTu$n3*_o4D;WgH?oaMMcVD_CvVm0%C*Tk*|!TzAieW>Z92`(B=E;btF|_#va%!{ zPS@DPn2X1b9lNbnvxT=p4RZ{S8J?1BY8z9|UDLMeb*FwL6pWyZNGfV=+N5F^y%fxF zvR7Yz^`Az$Ct#MEZ7~Z)&e103cX`sa^Qm>U1(nUiSG+{x7JozBB9*eBZhWa?&u$1u zMe(TGSpMcpRxt$MVc6w{> zIkt5$`hEl}rVWYurevuQ2&12};-(>sPNKHwpKt3&S_;7y_=?rF4plr7B>wlmi33YS zj(irvglUnLk)S7-DY?fnERly`0F0L>G8+Y@uPdkU(!3i1sz}1H@?jM{`)uHz?eLk79qCxUP9k{2^qbFkX(Z9 zyxwQHH&S90xdfxfz9Dzrh)qB&l;%oep|qacYoY89{cIlMEEpGxCWK-{A5^laW!X8m zH`1a%^zb~?vSf}Cw1=^r{3cS*(qGA~!ef2EwczXOZDjPs7CDT8ZKK+T_r+1`J zucFUx{@1e;EpH6gk3>n2z!i*I-sQ{1#rbLJnYZGTlWrY6bTay;rhqvNEe~J!YI(N5 z9_!Zm?cBWKNX*6UU$6Ntp6I+`e5{SyP*zUD(VznAhdNoWY2>&+gOaLT5=877m`S zzEfu`SQ7HjRrCC3hqs={x`FIrb@hawn#7PrvPIl_#sWR=cueM(J<4dB= zyzRL0__2%qOp*zW&cv)qtr&s1xhAntx`qU-EHFIhc(hU6-&~r|-b_!fSdk$d}7+~a9MDCc3$*qBrie4Lkn8b`NGeX}pLVpz#KGqz1Cm%C~Y&uM8@>WK%9_XI- zF5zLr#e|1F6?!KRGx@{{`oz`ri96{Nx6vn-{Pn~^*s&zGJs3N-ucL;p5uI*wl1b6n z^wbnY`oyK&DzB(PEnkZcbPsHcj&80-w2DsGf@C3neLd5mc9=SOw6}-f&hIzHRPqBt zLzl$H&I_NsU?zev1$sTdDU%b&{`uvXUv5AAhninhg!(zc)SGc}H;hetNSDoZ!WZT$ z3NlkdAYmUCmE@q~aIznZ6121Yq#retX}g_STt*VHvM zHq>=?Hn!yFl87*Y2zs~#ZM$p~3Wc?`OwCs+8IjtdIKSCkse}dD#@q}(99AZ)>r&_| zQ0o`TPEGY~oS232&D_e`Le00a>FDaw$Ay?9;(ZchjtD)Ib3%p50L|7D0%A#hXBEaZ z5|b0&$S&yJLwBM`ClzD&4KYQA-hnK`ZBXQVj5i14t;BdUF+j#+lKY{)+-H*NHVNv6^uO}NI!WZ_e_w}C45V=ld`kYQ_<8fB{lY9 z3>x>v#-${srr;|TP5R(b%FfQpX3iJ`6ZLvxcx&s_qdOLTrZ65lh$a%&(Eb%+H*Pbze??MBbR2r+yo%osffbya2V9TE}} zJS}LHQNBp@adNh|G*?Q&wB-sk;FHT>Z6kjmLfLwAD|`IA6C)eS z#f*D!I@T`?>-P}WZz|SrD%Q`X8wMQ z7QYD6Dl0At$I>J)X;2e6!SjBdO*J8~I>bi`I%-fWH5h&#{-kl^CJnGtT<`=P)nz(3 zLs2V-NBk2afl?`!HPlrxt(>G0T+uW)bUAfcFFt)N9xdN=HMNx`#c;peE-bvBqDi~R zoJ0G=)KtYld*e?2Chp;ikvw0@-~1_mODFs-l6RHz#@s#*XBwMMv7;&0oMLqp+dK%i zr_z5N#gPNmq36nl_j+Xulerr3ED+d{D@j6`m|a}aFLY`sOXk5X(Z#a5I=H0)nyPJz-eATST2wr3a zx8$(#4BO(wj%NmzbsLObBx>qeNUYVyE)p&|L}Go!saIk(z4v=&nWaxVgU*zYK5cd1 z)AnKq+`mzG@87@Bcg~g6oyELSYm44ESCfs8+dv){(jC$Ym|V;OnJ&{>36TOxnJuF{ z=S%Oqhi8@3`!)=6-|5UPvROwP{~9f>iF|~gbxZ+yYi9iLcBdqH_fz{)v_D0s_oA;- zbnYPNp0R!J8M~aK4JOgOvM-Thjdw1l=ukSgiek(9u)Xt5EJJtA_bn2)HaQ2h$u!`h z>5Lcj%(!3QjE@Fw6WwGs@FY8J9A?};v#_*81KlJWF&aqKONqaHBQ_?H*~6jCUuR8p zw62x30q$(oMMu}Pe`e3R1bDeaE&1-x+hxrjvm=(oUA}g+6n$AppipHZC%JZc_cz~s zv-|Qj2&GzsKrNi+rc1olqn4ax4v0Q|baZ!7c2bzJozzk08{VVH9)jd)>+8?4Gtj_u_y~er@_*7bvBZ%k)p%o{wmI_HauGE@i zt;C<%D+EGw3#Glit+`ZgE`hCGZYH;}LnRae&h<6{CqWG!ssCPHq zeIBJ_E{s%V?C+_ZyQA;2@#D3UI%9)9kFN?-3sZ5_(BCj zp;%yUWnn3YaKtCVk;w&dCbE$xJEx?qRHv&dDJUw*zm4Rq;-bpxUeTzZg&Bxf#L$&- zqg{b|a^MJ2V7ctE*mv*tyH~Y}V(Tfkn67GHC!>L}l@wcd7uJW48__pz?{2-v_Q6)~ zAvTD5+85{@qYc8?7wBP^fjRmNI_gq7>L2uu)99$peWMCNhxhB&pkNin83?YYKP4#* zG|(J1(h=np*+EC_rXzOuVf%Yd;U2=3Bn{w7ENjf`Yd3aXlDhPsn5Ore$x*xR9o00p z!PFh=DOTTi$EL9+#~t$1wi%yxn(=8h#;2t{rUZjLtuMV}e>%shbdLMFVhy}LJ)Fsx zVtY7KIepr{);6MtGd&B=M07S{{TP0S^`oBy;09?Hhc9aUFv+Ke-(mUa=Uhm=)9}#` zpAz0{`55fPU>^m)TGwOxkPy?yz+7wi9oCP24w7>2zorh7;dfX*`Z-e&)YfD87!n7& zTjoe-ZV+ZQ8d;3+YblJ6g`agyIr()fBIfA+?A>Gc`h?nUkUcgnm7M$<%!$#AT2ACr z!I*n@+1(@i(vkh?$f>;}U!^1G_l?}USMEml+_&#N_gzlWsD99M-*=+#ly;0yEvD#T zI<|^pwfAA~=G%+uewjyUzKQOa=Am(ia*7>Ku|4NSF~!cik2~D`#Lg5;xi3oTMADIu zQ*`Nrp;dRIYx+e?u~#E`sB1vT;Z@^(2Q%w?&@INXgY`L)_u=oJvl5D*N}u~Yz4L?g z&O1$F?_4Y6*kXzeCfL62#shPw9$w5+Dr?FTc>}EVYYISawId`%v-r~ zo^5+{bd++`s2#D_lhS2ULF=4DhahjO#9#b+yQ=E;?l0D`r!#_?KP0cc_~ep@A9-YH zMhI+tX_180s6r6nwvPYC?AZwZU&m}+qiDQchTzj?O+^FW%!OkbaUF5>$z8inR*Bar z#S#mJg^f!^O$*N;RUnoh|5%@#lapL8USqs>KU4R9>EyK=C<*fCsdI6KHOaVlCYobo z4~3yr%L{mYz#fXluhhyzho)KC$wkSjw@WsS8fAQh_nsVKYdhk}_tu0UB=5AOz|%iy z*7RvpX9hE;c^PMo8%#xl%--a0IC~ahWdAa}v?kKr#f*=rEK3t0gn78zUMcK$ZynP?2GIirUg39^UsID``*Y#urEcidrzSTn&h?T zR?G{V89cKZNd%HrPd)kYlJLpnQbU+a%uQKIQwuATdySqs|6$|Kf1gvKnY-}a7gw)- z{S$mbcXqc|H#7PY`ZDjXenSa-!AVWm&IGni?7$5W4#P6H^fxS`=Sm>#epHZA43x zoSeFXi@$7q+(-xP3U(rMhF!TblsUqVW>;{b_|1f#-yB8j%N6P{{SW$62ASa%cKh}) z#P^x$&*-MEbt$Q{b8+?d8y_4IjB@LzehWn~R1~92n}7> zLkQU^rCnV*dLN z#rslx3dKiLyzQX)^S$_y6fdXvDvBR9C_cXzKc3=?DPBVHqwm1ONOqUqv8OW}QvgCG z9C2bbWIQ(OeInrrlfe&}+1O^i9MSaCQ+|gHG>MLWj*fnSjy~>=(PvkKG6M*P}+ptUZ73+&i}!`$3s**j7EjI>j!!OR}XYyViWc*KaQZQ{9bH}4xk;x#l zt|P_g z-si2mc$V_HJLTZNJRM!emI6rP-AVXb1=d4F*xg9n65dLT#eDj=SBfY)lcGt)fe}5N zqIG@fUW)*cu!LA!68*se8^r+`h4Vv2KDv=_bzIMGoCkWC2YMiJNArPM0*q&2)~$@( ztkm?>^xH+1<%PG?np>J$T44%ms4T0f&OW;L50d|ea*zC$TI5=&Y`Y9HJ#*vA4N?ge zLF*STpKC>@Qnlw&o6^Ga2F6T2%*8)+$x_!V!4t-MTZtr1t<|@$U+GwORYNNAhTIa_fElsq(HBsQhqMBa*{L{YgqjyO<@Hd+a(~IF*|(kxX1)@=8{yTK zn_`db1F2_SQ>VnfmOuW{Q2fC@<{-POG8k1$J(v%0?N2^g{q{#6fBdnEr|=jRY?u{E z0_?wA8)o>3ES%weA%>~Sg*bQ&d^Jq%*(oITJftEdBpBM-x&C?w0s(@97r@=ea(Q^j%rP!OW8A0Bi{#+Es(gSt!NL zSh_4V<(|{AUX!t2z1GKRSTC2Zn_lLq6_cyUNK4AjuGMN%&;EA&Vr+V8s{}%tQ=_h4 zuV~Lu_R7aLkdKR7(cx6mp?2v^xO%g+LqBfRFEQ7WlEngEYs8_=KcC8OpQ)(@ljS6PE_1*h<{3 zXt=GZLvmVywt-c+Sah@e%FI#-mEv}x&_-_N--?^6i~jS^=sGdSbL(zg>*Cz3fVb95 ziJ6x>c@%0Yn{}+bQ#AIWU~klc5Z8(=ZRPgU8h~rMm6LO;5xDNW5*1=^h9KWIQA@pz z@lmi@ZK-^li!0OfXcbn3$f%^oFv*wx2UcYCIpjj-qo@*!m0#b#=Tuz6^^*fFQUVtk zO)3WAtF^P4gm2WAZ?4Xgp zvzc$OA}9fb*H^MfJ(&JhSzLPXOnQg^q8CCTlWb)^7ku#S!>is8B}>U94w(eL=OXzV z4rey1*p`|I6sUX)Whu$)J*4SH4V5}ZE39SB#d#%JH{-Px)~;fKqmSi41cP=kj)KPA zWK>>C%gRi}TOQ-g^hZOZo3z@3#OTXOO;v?DbGDxC8b)c2c;pDPat7QEx zF^L-6FWbAne$3Lxp9!(bs|ZCL1@bvWuRQbEQ_rnx4@Zmz5<;0`W+(rJg%Got1DxQ* z`t>JFQ@n(4`zGs=))*H^}f;X*4)!6niv!yjX?6IPkx!AGX_CB zP_!FGLxZE}EQ&tehwioa^jb2dJrVj^qu!ry)cYR?n8>-!MDK_1SPBU(d_b<;vue(C z^uAT=qND%Cl})c&1-}o&y(9KmNgDb_>_z`?>7>U-Gw2!CJNn&pGK=2(40`X<-g}qPxz6mn_u1fUB!lP? z@U>|Si~o_F_hSN*uN?pTM^fSWlUJ_AUc7kV$f@i3H6>X|SB~vmk1LCBr6*rK5nFxq zKGT<|ApYE$GiPq7T?&=<<}F2e`wvaEwv)?Hnxka%sEFyFj5hNYZc5z#&wu{s->d$C zc-8lw8Sm*i{+ahUUZ9g$#TeAEhY3D>eA&WRL$S4PGM9;^EM&p5#dGEcGk=OQ4*h;Z zGW%)tJBuXb%hZM;z3x3!<^O>9?S)Q`7|`D0uRD7d(*2u;mpIi>Un{Sf18O$b+k31MQ2uZi1b9=AHQ7z1 zpE!~2CGKorzRT!i>`VrhV=v}&?z0f6OdTA4I!|;0yx;884SM6d?_6QRc~~2Afk<8| z!AG*Qu*e-mCy^MDNP;(Bs5ds9#7p0Ig;n$lF7ygPy;ta_S1A4K3Z|*7lv0@;rLy_G zR2ECA?B9J<=8YNf#`vQ!1KyxAZ%~1J|TMnYoTh2L+)6TQSt3$spXou#F& zBp1qRl;zYpfm@T)N^7lcJiMO2a`_nS)RAUO4Tjn_*2TrpTx&2ajpAj-q=z8Y|8wz& zD;Ga9!$I^*C{&4E%+W~ZQ~dRh27`Ox)aA>kjYK}`k5QJ@Rhi{AkkKpn4NaYj_I7lT zjWla#*{;Ugv@}s0a$eh`ETv+pRAFwew6HT*NF*W&l3*guT3Ob`ePTesWDjSSZSB#* zhT+*f@N9v2wq6UjqOA1yLvb;iw_LjX^*UG=UO|7muFT|QWZt0xT9nw`a@Di*CT{yJ z1gUp(`QPqlu0bZ*_sItzaQtUfYu84xO}r4;1()(O>znnlzc+g~ zX`1eqZn`LYj}8hV!!0U^BH-)5t*?r+H!1XWfTA)42O#1rLsT|YHiZ^SDQ)R)lD6rd zY11ZY#{YYATSQ-9{QZ9aiTh$lJJa-aa#D$Yk^9QH$}PF-VnV zxOxS!PCc3p;jUY&-4DhGQJ%9d?ws)AG~6J$w&EUE(=B$o7I;gG@#&#JB34FY1ky1A zLs^JEtqmp=3b;8824wHnc}9X01ZWBQ@1xOerGj*_DWk_nM1`pyTfz>@JokKos{Y`y z^XQl}+OE;Eqa!4vlXbpjv`$dp(`)Lxb(m(8VWJj6zx@Wr|1}x7RnEDHXzi^-Hj^DP z)YF)2J*U%@nd%Hph1WG2GJ1A(f z8-RHD=+lj9kcJN<3+xN#F83t&NcTARSa&2aAjTr){YgM+EO5`nZSL#mxmSQ+m%$Q~tu+&>_b znY{N?hdGfdmoB>Sx{r@%n#_)VK!)}^kr~JVHi_FgFyL?^bcly|B_d$}=|28s`?g>HLTs=# z02BqkvNZre;1)5$bsf}xjKm|^BhNl}@8mm|y}TsZ=hh{2i)3Mu{-w*F9V!uX>#`Y= zG$G-Uz>q=Rs^oj`j*E*O5%wp^!{YiYFwO)_00pL~TO?7)q$zjINt<{7{ZBpf=CTJD zJf41Y8d%B17Pe4Knmkmth5ECk`m_Di6N^Zn_N*3cduk%2GB`lpAQu6<7fLaae{Fka zTN{U32ePfJ*w&G3YuVqlzA@Cggl%2Pww}SZe)w-%mkhNo{zL0zw)LW$t^aIw9+J?_ z5EI1etiC|Q>Ri8oXye1}(8g)iVmh7W&7rnKn*2~(N8I%eNiF}7%2fViQjl-LsDP%k*$1ytzTVV(Xg?7U z%0b+AU?v%tFJG}}tk{tK>#x7+f{>|UFd1v=+S=Re4W_1gV2#%s_4Q5l4OAYQArnKc z7Q=Z((x1|?QYM~?XPa>aVlj~^016NY zxD>G<4$6HmjG7ll%?G3Ag;Db&@<4B>u4-t&0i2+{tPz$pAKf`_9Q33Ndey2moXNwU zhKZNEcQ4U1QVHVN8ewBYbBn=%@C$;ovPS5q3X4JS6Y;BFKj__6qbK5B4@FOE^rS{l z;)bTO%8DkVkvhcaxHg+eq@xwSX>d5MbWh_3U#kGutzvX^Ru+Lp|Cf%opHBl1bkgx$ z>!L-N>SjhalcdLxB11Z{xy$8twp%O+_0<8!%WC!f$?59CpE`&B#PS-+gI$WdA@zdp z$K4QnbR0}$z2-Kk0uAlGR?7g%d4ZwWY_@TL2Tofpz3mOaRcq)(B@JDq!y!~kCCKIA z@vNQZo_@Qdx3|aK+wT-0rA;Ok2@ut&iyNK3ds9P~2!P;fv7@iG1V|WpB}StyY&yW= zQi2_IC%!>&>(*0L6~nzadGbYuPY-mpby~FoH{VO65+j-ZJ@l4 z{j7^at+B$Qa$<5UyOw_~?;PT}@`_4}imsf?&dxrcf32`g4-7W_4P+|mB6&Ppm!;1J zKx>CcrI3pSsGTKO$i!Sb8EK0Z1^rMVum`VRJD8fbn>%~j2|`_KPk*1eNl(zm+Pke> zF$R=BK-Dp;`F&lM-u?ltzZ6w31soJ@@sbO<1EhQiAt9722l+Y_^Bao!9fkP~#r%e1 ze&y%S<>uvFxPrPFd6y|2v=%7T3ZYOTWN`eE3wz62<<>fDkg(NFm|d8aMUBtS9>0pA zR_W;crc~-I^#gsylnRCY6Z!g(vXG z6R7Y6DzaYmK#TXR&0~y~tWEKW<8vSLtj-SnfvnDD%RWwXzk)~x&k9vAh|(Y{RNUB9 zQ|(!!m+(cbQL#{DZu6{CKm6h5DkaE91bJ^Ydc7CDlG^sePyZGw)SuqH>xX~mbn#A| zJ^@jtuWW89-uroW_UfHQZ5>tm!e2b9;EPO#%(7rg5Z`WfbJ{NKsV8&t!mz?6@=)7>1`MeY>?5biQ$TjKm(z*q7Q^=dC$!3Y7o`1;FBmZB2fVaiCi z`K7tT+;iOT3%>pSlMkUPX419&sW1#bFZkC%eQV)q9K4@%zsS8%yI^b@K=!JrwVWrH zy+WYDj#K;T)Uk7@5~`6Z2gLeMfI_OIL+AkRiw{y0m<+^5NO&%3)U@QC`>xbhrl2H4 zIvHuamji?GQzi}S>rxq{*PYP%$EHG5JN_iYzhD>@O$21@i(Jr5<4>%bDYw4iLAq0j2w|Paly!u|40w>QTeEZ!-oY>Nf~K zhvba~@HPU8`ia=+18So{Dwl8ExbaW}9X$V8yzUI(G#%Nf>g-hMhEs96v{r)w0mhWmP0}o(T@$QA z;92S7z10$$rtCdtp+MmWV8qCn;WjUk1PEE`pfHJAC6UU-F!lKQB9rAmN%914>i{9? z8gTJAEJ=3%n~IqwwlspqGh|B(H33kyq`0OY&}B^xCfqb$B_%bD4W_Cp6BH&!f?X9l z1YaVTav3pCA|}6xCA_TA$S>D#*tFw7P}ImVqY}sdxNZBHc6!sE*5JT|9Xk@Jd^9a+ za*=QIoec$y_X#Ji1L?b~<|NFzTv2%Fn{9g_3cOx_?-9xMw^AGS z?mYbd`-iFVAaG>sCmZnkV$%;t81dfHV5nT_!KHh%LZLP_cXc-!0RjvVP~1CSU0sbe zdP8%w0h$vd!I>2L-9TTD#mQhB?kB(W_c^jcl@Sr)N#hfuv2f&G%82OU<7Z4($yFM) zEIL{SUO-bPaCHX;to=Y%68nVvlF{@H_sQ~D6B5HH)NJRWEE1QR9YzCL0}+D+1s5+s zr;<->FAWXGdgv4l4fRHRpsbpq%cHkP^D8J5Of&1&>Qn>* zzQkK!CK&e~iG(;tU7)_;-aD3vH&5jb;p?w09t9?QF zPJfc&ph5{Fa6?AW@(?Z8g;mES(7d&u<_};cdJ?n&lSOkBGv9j~G{HqYuFGn(ySO|) zk58jGO9rsWV&emN&~2SsF+M0>KzyuH$?*OCgS=%tEuSa%4)XUOv?GE0WtcYw=1l^* zCc(UMFWO;QBz^%-#vRZ*pTy5J;0f2bUxl!*!*5MGuAh>+fuGzuu0{oeBjP*;cWcRi zp~&DnLlhsw&%aZw!fz}^;9U(;?8i?-6lI_&!97ZDxkpYmIfns127Z5Tg+DN=GpJ@A zS9O_s3O`-eaihAa7X0L+z>i1F#Yn z_}!rT5=(VXIF3BGyVKV_R7VX`z0-52!#z|v&yhPlJ4o|O(0rVwsg6L&pg+?5r|UuWA(pBn+!I1YEZ##iO~!PH8#&pfcwREo6EV{Po|$gd zaUQvbd)$w|Ht4vcx{&jgL3!)AG2uYY%Aoph;YBjnf8g%RrPOMC2XkE!%4n8M23lnN-0BS0CKb0}a-I;0b1yr#6Vf^ml?@aE>{23TR z4})i>Q9rsnsGl=f9?#@OLiRPVO3<&Uu(+ zb#mvaKnDKB!|a}$pOZ$oosIv#_1=G{{<{0cL4Iec-|2o}kmCt;?sLg3%d@n5!OLy^Tq@?~&rV1HQfq}ZEr zDZCDW*bJZ)ngEbB2 z0WD|1T(3*0t;QilZvc+H2_@880cc-S2@STYrmhhq*^25u4Ry8kh8le>Qj_@ZE`syI zD1irTbGSUoy--V>Hb8LT?|le8LEQijOk=tPrI$nwbr5(hD2f*n9O&x<$Z>^Cu2lQ^ z`fCDUVDt0E+09#}q=tjAm4G4t6=8wR)(>=A=NfG99v->_eDD;X}Q=4^jAz|)j>aq=Y9$DYb80u z9-{Ag9oARRo1rol5=UoupL&ccCJ;p%9D;Nz6BFY3Z~CA2;Om*{n)789C52ayohm7) z>l*&*E7MxJ&&+{sXdMpRN2nd(y^jdi9sS08sQ{(fg+A6Y8MAQ~eiOj?Z+YJDDl>UD zJVo=7`mN=}d`4x?#?7(+`2xPUGBcvRtsONB+5x28X14SV*n7>rh>o<8d}k!dSo->@ zdJKxqlXPvjQ*J&EmM$lUPtH^n$tkv5?Y7K_@UZa6i16^Ru<)>msOb2($neOxxP%dj z2?=q4ABu}lAQdHVA%g+|jsd$H;KBesy-CJ1cYmqj721(7;nDC`6P$C!BGVN$W!y#R9;z)@)_kdwFZNy&Q(L5zOk~}0JCltDkd0eYmGHE zV8G? z`RT4EQ_0DlA7ymS0mja2Fm*hDYS0hB6a2XQ6`}U4ZQH*4b`u^z#=sI13@gmt+z-CZ z>C$oxKWt3L_wPeCiP|k-6isKgeD>w%h{|2JW&Nfc#7f?CCw(6PHD}O^=_T|ce2$^o zs8fQsqhg0gB|UH-4(uINJe`Cjgj(dk*eNRoEEiz(9D{v@;DPS}C;o=ve+NC&S~`l_ z%uOCK!dJ_SnLJ_QJ!w$M0_aReG48P!ADMU8qD6~l&wu2F$HwVmDq5OMrl#g517vY| zc|}=yMRjdOJw^%#_U1+tOdU0_Y2Y}?_YP3HP88X63+w-{>msCi*DRMs{E%cBk7!n_iH~(Rcp7=HVVKYaak%sz4xL%MRgt9zUm9@rZKvZZUd&%Jtcgm1}W)9_X zY4|IZzG}Z9FS(z_CpbDL#8-tApC1M+0WI3wVU1^70CCFY=x=Ll8`OOLFh+hDqbQ6K z@iF*8zDkYtO{PJQ#vV8%e0&&UIh8*5NM*;MX9E>MUtzefP_Un-y6<*B!%L3--O1q% zd+s9I;lSshw?k?&8I6M;k3uGsSW~53v06Up^_aj2P~ESAx@l7-el3C_OZ-}JdwKLq zUcAE(80l+xCZP{ihU*eKfqD+trj`o;m0H6q=Ux9@GaU;GGeChvK8*#1v3Rb)qU+a) zHNLR$(!~oog{X0Kp|Hr4l0+)%@@z1>qfja=t*~;#_B>#<>6AetJ5TNda^E#5s7`z(>qaI&{D8G@{=ud}aSyTrink_jvLs#TZv?%t&n6?G`Qr2K*2 z-kE?YD+2Pn0Rx!^^VvR3A$=ts(mxmI4^>nrq&`F*F{8q{&V6;Z#sQN7v3#=5nx*cg z9EbPP6zUsYU}sk?++cXMv{qLY7TRnbMtut@TSMjIDNvCJW>9pK2^|O4dcH`g3yh3K z{T8wdxVR9ta755CmTz!C@@*z+aNo<($4vcvo_+OHVS8SE;*LONp~ zVdFy2(`ASY6;qL9fB6aSxYg|b!UqLDDDW|6e2f_%V}|1blgUt}?;|@IKO^m&&s_NYtYb&3I8&odxA2nk}&czp?e@%IU#RYZ zeo)?38gB)%w|rDy{())<@J0iv5o}8+J~#t#I0y{5rR7a-Nx(Qx!Z^l)TVla2J|0xb z>xISn`K4v~xp`Nv6%|weW-|CV5+GpH``2fb3ARf)Y_y*P28EY#ljHasU z(&|<~SX#PGP30u%#Q-+V;GIl>eZ_^9m-{9x+>f<_k8!eki#=^Qksuc z+6sn~eY$S7&}ZzzrFTvUwa=oVfx{tKilfeV{N;0}B&YYyq$#Q!^(!`UUVRHI8ev_t z@W15!3$=|}gN^S)YBm3f2VZ^lfyanA`;g(>6T@E})YIE%aXS0V19obly{Wd&*wE7n zwHrqqvW@Yf31O4T_)zvwBcYOE#fqY~3m2!Ukh*41rj=AnAN|kTi8y~%43-uc_T%k~V(LD&a zFq_-E9SR?X0yRYrx6{7!Ud3yUn$vxJt+K02iCPSuMOX3*%6e6*5%cc9=k7U)@~9~K zP#VsVP-6HSZ`kcYPtT;b1A%=Hl_^Nywd>6{sSrkmM|048xC`MyDqrYczv(uL?c`~BP-6^>2V-Yen+l0L!#AP_%%4YpcZM(hI9M9ob52&^hovf=61xq z3>ONW{cZu`S8O((h-Y;Og^0c3+TC0S)?}Ak;OcWLyOhc@N`QN85vaIvq~u0P?q$%+A{r%;rRtHvdr$3)iP^k5e)_%ILh{CFxX0=opqzJ2+-@78?}5L^0XER+nQSCfA_lBh>g z*4I#LGS$dN>~Qq-I9)wvaA3FD<$y|>MKnON`KUu9pMQE}Ow6d!G10(|h>9II5_<+L zcCZI}z7rV8?(8G*L`is}ad@JkIA6b3WKdyeudFODy-{LpXl!V0Y^)}SB}7(XdzX;- zOuR@X$t*ty9Xv8VcJr1^pX|736!|4aaAwT-=_dqW^QKLk8Y^QYQtOQ~XU-Hf{A2#K zEPj7`$%Tu#M7`Y!BW;HK{`)4j{jzVbe#@4ZUIHKr@ABpArandzlJe-XWy_L%UAdRD z{3@zp;DhtBrLCj0t6pEDuc)bMY9cy;3m!17gvNf37pcjZH;y{XPxrDY&M(T3=I*K>*xxs}ZVKO$|^X8sKo(V<->~ zfThl$_#i^`a1bWql@C=@NDR=yrTM?^*!9E236p1zmUI=I-gDr5(cUlPri~8Se-5W; z-h~UfRTf6pCyGx=Nf{fob>FEA&olJ%p2#J^h%xgXU;HRew(q&$6n}TUux-Yh@4mNu zdMfnz&xOD3IbOVQ8WqLxLPG;ZJ&f$izdwdXvb(6&D>8H)L#-PMXbL81&r%t=FGMGZ z0%0Bh}pZN~+9O$k2tJU?>k)1z{C-29k;V%i@pQxt{BbsPI>56O{935KJ_V`WpYAl|}w#QJz!9*K`l6{%xmD z&|Ve~q%&tPAWhk0D*KrwhZ0^_+uClbYi~C|_7S@Q`IjYE#w5r^(Ft-PQu?7dGB|CL ze}9t3w`QwWL}n1)AzBd0atbLbLLp;g zEo@Loa|G{PFD$F7MW9`6b$Kb6M<>LX!*b$uXG(|!b9ll>XkEz2;J^{bIE|daEpM>Wq}Bu|8J-+5mAadIceTzvYy-Ix;06eDJ~ciRYVc^xf!Vs>Ir zz+F$Tcp^>KuxAh9m#tE(;lLN4e7G&I)(5a!g92Rm=9_OmvtZJ21&;TSTh;ME0ve*WopXyMu9;*tPO{Rp%0G=l#$gK+r zjSTXQ8VyhdxjcCKqM)GB<6{El_$F848wP9vhus0(?aFo)!!VYb_)3 z*OIV5W)Wr3=W*nHduTI!(R~M#L6ay_l9Gh5+Dqi(PedXs%ZSxwEM{UFW@5*Nez0vI>V-#6IimOw=jnzYKOjK#ac_TAOOj-PmH{ zgS(+p#9Aq!0=yrCDdQWWB$va3X3{B`}!TiL+MZymr={% zWcu1?iGTSyQGq2sJ!N`m)7h-7C2tgGe=-xB z+=~GGuviXUkK6EmJXqt8FEbodIM~iIwF~o`vywg=2k?Sbq>HV zhCAr$fPnNKim{5YJ&o>>!RA z-Tec=mGSlS5db)ii&Vlm1VwR0QmMdYMewvh?(ViDELub%5VF@s!BAtfxWobn&*|jp z61Y89c|^qLpRe6;!emW)bosLnjSaJ${N}SSBK@3g-Q0l&6jg7u7oU!q3d)SoFMb#T z5KF&j-M;VfwidgGFH0jj6r~j|jzh?szPL#%wpkIv=ZT5d@xwYVT_`mWyWCM~Gw+4j zQ~XY)A)M?L_cQ!&I=upC0(4^w^}QlDC*LRti<|xEBMZ|Ld7-ITSNCO5F>sTF@VuA9wY>e%p-ZNItyn5^nJv`5^z?nYgezB(hIs?F zD}8mz$=(0ny?5vCGr2`D0#%fk7GKRf@ypNqfBNOriR|-dsdY>o4`Gj1i^a|nVX*8y zw3h&bXQjl@UxXCAd_ojfxAPoaF5JG-h`12BG$=46)K}b(8m=8iLodfmJ=Q`N)}a?8EgH`4_WyA$oG_u3vx6>sAaKHcZu?fAHgU6ged2dLMT` z$N2<@L*e-e6V8K=UCjS34C544~XOQH5_VU$&8`TiE)g>@jlORs+|1dv60tuiZ3V)B?tsQ3T?l89@ zrKqmO+-K=-tSJZRbX!+H2Mbfmv%?Gq+X+qo&+`T zuoyIc7O}IT3FTDlNo8&>mxTAJVA6wO29{YDexf-NZ&@a>r~5}tsBgLQ!*^d_&#mli z2$(Qt%$TuW6`y?;7EXjPg0pha76I~)jSaG(94n3rW5R<2&=!Yl?#~-%JeQ7n`W4>O zZ0g_A6@m_~sPR&Mp+(N|9L?#fhEznwAI!ihF(VB@?szL!A1&&Gwzzxu?p;TUJ84Aq z%QJ(GhUQ8=RJ`IAQ(eiK{Q6qswQCKnt-WpiZftZJDws*2@59E*!B1{iUq^2y#w;_q zt+}t;t@PG(^tHFd2?YUuzG9yV98Q0|nM<&cRIRP8wT5P5Q-Rp-b#}FwiSLa)@Bj5_ z*8o=m&_xoTmq>Sxm$yBz260UT7aOd(PlMWyS8-c@`p$9yH9QW1g`aR?xY7Ff2^Rg zPb|tk6hun2Feo)c+3Z?jQ?VZW9=CWYbB3C_#;M@mFd}6$`Hs?Tb7q|`oytg zzaBVn;OObHCd5mdsxF*7a_HBe4jnpjBr zo(+*>w|FvcS()DI-~d#f?rX)FyRD<9y1dfVBl4C>#40amm&stL>Cj(3SJc$(Kpks5 zKtBkJw8IeY4LRP8@RK%<7stVah*vA6e12_d^DQ!c9oFs^tlbNk`*m2mYq55daXRMn zD2LNwg-L;6YayV<#iPtkO^v3;wri_jzECpH_4vrG@HB3 zBs`|i&cS(57Z*0=nP(oGIeOgKF-h^T9>yl7J-qC`2_Zf~fdLAtyNzf3`LlH&Y$*?% z`A#~F)dvwnaf;fhdG3y|@_+C6>gUVXs(HM?sNn$$nIAQYQ9B%_k_-8*P8$h$!Xow> z7AV*9g<`J5N*ugU7I+V69L8OPtQD~SV4W-Mk+;QF?d>yQwZ9g5@9vWG^yyx@i^FOYNcG#`&%hr6)q;5+XIq z&{2~oPaYTL9Tpt~KDC%BiA)H6o+P@h;^er;5Osv)!2L{pE?=JLZQ8qj{rZ2`NPUxE zoQ-{K-QZ64HMNt&?XzYk=UmJ=eda>$rQCd2w2)$2dZW1D;+YFY)zvkIn)2e@yi#oO zwN+(h#RZr1@-F3^&n<)@^+u&0N3^oC%Im!A1$i)iCqqkdbGYsS2ZwlTEx;;JKJBQ+Xt z|>qP>^$M&yLOOKUuef2(jKtR6k2%6jS-o@<+j{@q5jtlKktC z-HlLXh=ZBqL<9_BN}nmWJfuKD^*Zp7Lbru|NFEPbjRX&^iw}(*Icjvs!j+eIef^7p z8ov0Ax8Hwv@mOWqFW>D=oe(0G_$r;9UEOw(FL@@hGGWTOb5o~Iq4y3Q%l0-?0SgD& zP5Ago0Zp5#+O>R`Y6N0_rr6q7TTpQ0@Q#UnjhZeyAPS=|Fi+P15*A0gcw$#)b%dh921SZI_GuMj^UDh6mJXESHym;=+>2sIz zsBMHJ+H!7LuXZyf=j>@e+H#+ocE#2?`7i@K*C}0}eMW3(y1( zPl}hJ)^S)=s7fs0OOz_aaWKJkC-#aELRQ>PJZ|<5hm3=HH(Rhdxw#xIU+KfO_6}-@ ze9ZKJMzUNY&g)|G@elIx1NmrWSe-HoEl9-O6!1nQMN<*r4V9&*qtyf(0&yOhs@rQ| z4-tD51w4=0tfO;sbmFB`dw$K>urvGYHG;Lvfe-p^?w(CJj;25oF4YbN+{IT zRLY)EZ>t4gMTh`&=4fgLx$xL9R3RK&tQNciBSqqxiFIQIMrs8{YE)fYld-O@o`n3F zOt2{-BeE4TzO}JdUtLvGTVtrh=GagV9SjjS6;&`}C((Q^>+tj#4H3}mbYP{?E?XaT z1~(0r29|9arDFwL0;Zyi^qcX(NcphH;NalM(bJ~f_1vnLpMPM%ijWDTlzjsZ8`UpN zdlQQxOZr%Bh_ZO=hINNpC|@QiVS!gWMT_Jp@k9`FP;Fav=R^02l`7D-LY)xrLvrlOMTx%v9@KmC04`1dmr@Hz#N)swu}xE8$j z(u+$bO%Uf3R2-vn%9}F*mQA8jH=)WxG85YR(HS^a9RqY1#7C=a(yUpFCe`ik^d2{D zR8)+b_MEU}D<;MUlx*F)^|ClL;ZZ=&=tddJs_SYE0E#fc><{z5USCsLUR6<9Q-?!g zV{N0U4*yewf?EwRNVm7bgq;LwN(_Q7w}beP5k$)6aa;~JM+nGRKJ6ADu#`i?ieX1a z9sF*(QQl%4QM^?Ei~#(M!V5};Tq1_AN1@jE`TME;G-`q?q|#{o0)i0Hijemt_W1XFTWdKO&rG`M6WYK7ssfFQ8@ zi=DX@H!5?B^3LzydEnZ$>!{RPT4}hr@5dAQxu+@*|NPC`jVBTJ#mKEKb>-I(8E;Km zkeqB}G)C_bJABnQY8u^R#nM;CZK>;W59E}t zlZ>4ZE$2vkPHz5u?e_eRilTa9u;kOyd~56F>>{8TGccpk*fDti@d2HkT6)UcB!2Nz z2?7!B4dt8mqsVI-f&yo|S5nzpo>ZFqYjLJR)>~C_qtrOy9qh~N?HQShoq4VBw<>*E z6=(Eav#>+|>d~(cQ0oQ%oTqH7s=7dcX?F?(uaxxp1XC}gS6e?V6li&hsHwAN={U-! zooN7!!<)CEN<8MDY1rxUMmUF62cKU-y(M#Gbp!001k@y8VzjHnT<;vJq(oZf(QnQIw5cCj6S5!V z^WS+)H`Al<=2E-q2SvJA|6#%2fLoBM!qXP7cuaFSaZ*i12bbIK^wRkH%gxno1GeVM zNr}HrOr<`fKHxnu!H*h;*^+tze+Z@u)5$M%x*3eO`_zw@$q~g4#9f?P;^$9wGn6|O z>d8F?*tSPVTP@pCa63C&cJm$VGUgM zWVCQiCCo9H!}nh(kQ*ly2!9X?gh2=e!XbnLVUgQYAVW?l5H2AU2%8WJgiiC z>yUdOyhHARFwf9EfM@?46OXwd?1OKn5dI+)2m=ucgo6kL!a}#FFfnum+T&7!=D2jAwdaqwU$4Pz+XZafH`%s(*|xvSm_Kk)7@H4( zXSWSLJK}mgu=@D_7nZY%WJ6b7hsjt}Lgt5|&q@f7TiF zhI-^fHJECRP(dpztMw#?lmoR$3m`a36DZ@-4g^<1ebUOUwU-+xDA7(g1@%TN=OWJ% zhewfs=dkr#b(3Qg;=`miV91;~XN-L0z65i@sWWFzpS^hg%;6uu*^%E6lsbC>=W4I9 zjfzd5JvUJ?aCP_h>p$I_M=~rX_{d3$8CMk;AL}bezGs8kiSS7z^fh0sr#}2}PmLpP z`t<3ja!@A?Pka6K*B?z%Zu;cI4|T#zdyie~^_j8~DV56i-g#|F+MN5Bzx2i%ufOuz zd&^&*2|K`g?Ac$a=S0+JAKq8wn2;7HD%|(Ow>QKIPoyJ&V>R;gb|Dk@1*BjiYvCb! zXPeg7T#0UcP-*z)EijwWRP7-6+H)Z#Eui%cp# zw123uEy#dH@EPjW;>@Ce!rxlo<#6#-{@y@)(8`5=(S5id0hgv=Bhbp7c32(B$;QDG zibcA~N-w3KzmJbE3I6~pphKn!@DK3yRs~b7E)CK)RU~WEOQP~e0;f{t9|YTZU_gMs zFEx^M$>nQ-)j_%Yd!23`YQ2&!Lv4M|T675$ZZBLII8{mjS3|mV^T?Ojvl7W0hyf3h zNN|!j5CjThQj{uReABnhD^0u!iF6ZOwIh#QxQklFUPeJRra~V)W?CSIU}g z&fb1)7;KF~k;K~7#}_LSQ>O&^Nhq6*uax`vB5EJtb~fDvr^q)rI7lOQKlRk2c_}H= z0^2KpI^%0AI+cCM6a+}PCUjL@+f{2H`}mVDPE}h^ZTx)QG5Bh|L?XFD!sW~5Egk(_ zmA_aQ21Wj{^QT1 z$F_t0)5YCmM$LU{<@3+4eC~w@AHX8akiGNP^6AO)-0!~Gd`X#-js^A;9B5~#tPI2v zF$$q~U?{htp{2zL$Ah5*2l$sg?{^fN_8q=h+SKI?8J(z6@TpXSAj*AXidPu{;l!J} zv)ykYK%;dNOn?N0GqAe40|k|8YfYRWABk8kv$eFeSL9u75=bouQv;xv$k)oNYJtSp z+}J_#6cs&)Q>rN_7(Xr{#7pWU*M{MA1h0b9S3b~hE1EUgR0v9eFFV*-4`1_HsiDlltSWteUB$3h6 z8ypbTHI1D;W`V@ho88vO_fm?4mJ=I)tpCs6SY4l3iAfGC@r@HJ@k`*ai9esdQCHVU zdGm}1_Fv4ucCDb`=#M*g|MKr$zwG|5T_WwRHdGg!JoL+fA3oc0rTh0& z3^U&0uymQ_aN+m&BOI*T+C{?6I?a|4(SW0_uH@nsePJF>kEedznwJmvZB=6%qC@I* z6YHA!!GWPu=1-KKx=?xjr*$7>es{5s9}vFd`_t9Zq~wXQ32`cO{pCGdw+suF^cU^l z`PEN(2Tu`WYq;CmCCBN=+k4WaNipu469`=Y?vfq|PL#K|&?N|vakL6}oW?AA`Q?}I zjCKI6&s(R6kr^)LUTNmgO^+>1O8rL~#B(O~5r0l%0H@1D3<4SQ=jSvDIfm2c&((Uz z&3GP$48i;PH#&VLzLko=bshhyDa#%MiYF6}xbxshUqsSXRaN!)#>_!COJ|Lb^9ELj zWT2xLzG9uctGxIZ+}A;)J18hfqMIn7ds|u?FaCV0nI}?HR;0ZM70RISps>ggWy#UQ zd4{T*HYe=`dv9p4+HC_;56y+OmCuaFYEt`H^mBPo5~MPJ z-@wTDgqWn6bBFuJj!7OndTfx(OHMtE2bB6Fla{=|!QI`+^XBt8{Y|~Tey~FHx0dDC zShNaYAt;o%A-Tf=u|1e^|JOQR1^5KI$^=XvE?=@psKfDW_{|?3vYOI+;LBCuOC9*~ z9q{EU@a5F~r%DjHWD=uPHu(U3?6F?ZF29V_*9$Y(Mj z!l1aQvHE&GIQ3ddMMYIzd!Iu$t*nuUP-I_kh5XE=vh2+uyf3d_6dVKBUWn9P`|IY- zn~&!oJ|jpR6BdR{TubYX6MJ^<-1*aQS4!b$ViIkw_4PeGmQgjem-m1nc3rWCC*AoE zu*||}K?7k;Z*Ly0l5%OSkS|wzM+^^KNe!DH6qGeCp>N_FB3ffJ^j`j*gCs2=>OzP7#T`{QdldeJt0i!aZCo zbF>+a=3a1VQ%idvVN+Cg=eBh8_G8QEND;w{Knsy>+T^5R8jU7u`a?^XFOh~$KJs%k4Q9zJ^f@Szh$r9YC=?;)T6`2DVf zS2h$~--FnxeZO70np0YO@qAv+iQU__e~p~yXMtvbnqCmBPDzNx+zEbd?zWjBT*1R4 zjl8#~AA*&H*wrI(zWiF=)*nxSZ7*N1$vIP4ryFair9;CuZTj|TaZl{X@!?#%QgZSO z?dLyyx9fD(Nr|hOj*L&zbQ{kfN*WQ^Uvu@BZ@=A&=1qS;YZr6qN4?-ujZ z_dfVCNlee8)@WXs^|aX%+II2Wi7xfjrHFtwdPPlnE)5RewJ1KcEG?aE-mzStxbd@= zg8HoCe!Y;a;8{&XumGU~5~N)P4b&VY^0c_TO9zL9sH3T0qRW_I?`Sl^Gv=nGO6Z=v zZiiJpbCWa~fG~S^T@0`29 zpQc%VZ6Rc`q-oH5L{E+>WqWQEho;olae8QTX|N z@$u%v7dv7SM#*{w)R9lYMG@<^ef9M(`9~;Atu;DfjM!9qxh8$pV8UT|EOVll6to1sgmzysOK>2b3IF5v=wC>d**vbK!MT zFK!*B4P7!dsyIKVQ#Aj9CFygXeE*%do|v)Zfrr)l6Gwk7wZ;A;6^!?h@Op8*BOq?l zGUCM3Q4II-dC2-j@4zb*&O{e;6XYzT`9=owtTvX>Y*mMFV|y#mJF=7zb0|a17g&vF z*4MYT&?*s9+7vvqv05`__Lz|H@L0;Y4SY&kqPI^<%6$)|$Bh~_ z`i>Q9-@s7|9)I%5Cl}ojpw_HfHZyhRq&x1KJ2S<{ckF_DC#TK7dmgfQhyaYIMN057 zIE}KlvYe(;X#xVIET?g38ScV6RH+OPS84gZRe2X{-Gg3(Z@_E2Au zn*tBGZM@I6Z`-+f9wMM-;XpJ8KG+3_FqndWDF8HD{(lvT^hW3qqZrkcDe)l@!YyBY znGIc}ydq}Al>eKYOarLxadPf>bLAUzfwF>AA7XGifLCyXG6Lh@LY4i0iBc{RrKq&P zW^OAo4|h=MQ(evQ23YN1VDEYJ|0HCSeJH0;wD>WJ6p5C@hWTlN{iy$y*cIQDyD}!( z&o6P5Z%EX{N7AtzLW7|e{O=^O@TMr{&lsMVJpC=?lO!)#FlWxak1Tm;=JYZDC;5zc z!&!_fHg?=_F6A?M>0L<9j)_tS$NqoF?7!B8ud$kNAFBz+vzl-ls|j;%YQlfr_Gh-O zm2I2IwvA%jhW}05HnuIY0Eo_QXWL$3+lKy4+jh3?bhd3R+qR5tOO6Y-^8(egZR7vY z@8I+PUB83(Jd15x!S-9u_8apz{Z_O6KFPK{z_yKH+eY1NOQOV|fUG0?Dv1(b0$G=Y zB#~OU(Hk4^0h3u%#f2lk9?Z_ZRMFU6kBYh#)kvy=|JFdR zD+2_-<#El~AFa`SeS>xuA32!^uRqtXsJdIHt~h!!$3A-cvb!dY9y|FT&%E#o%qa{7 zfUqptYvH+jzg&l~x%EF3_yGppZ0Tv%Uo9vsE-g9oQ3{Zww5tzx$BgQ?RqR9l*c;r! z=H>U!hSB6l*cyKTvieV`*?Cv&WSlU*#J2jc3~C8Xbw&04I)#&xIomI9-?)C`x7#-V zYfU=c6-|U@obvH5!2c2itIURduTyaLL&HTJ_aN zL`@*Uh2&y3n;MXV(M-@1XjH0!$=wYYtbqZE2B5qGt_wy-WEKu^7%^PuWE_QjPG*wY z-+%bHu|fVg7pUbtj*x4*bU546*unMpRjK^cA^tcB`1vTX4F~uJP?PZ#kx||{wMY}7 zmWz<*=AtA%K#J}m#N`Fe?cH7NT_m1cXtfObKqr732wu$saKkuo!#Hq5!1YQ~WpT;L z-;N(W`si}D`9(ON-ngL)*uNt-_Q!q4 z_Z_~JcO@^cAore!AB{x}-F`#~Q}^h^!-mO_;mdK!V;RB3`LH)Xd9RK*ymtYOWgZHJ ze~HQ20*gczKDXiLJ%~`*FSwp_vROU)xfE)122v*pglKnnb)%Nk-N9k#-`36q)F64k zLG9B85J;2827p|1I3gZ$6}vh+Fy@Vo#Q52#6L)tHAh6MntTRSn=|m_EAe{h{rBucZ zlWF27PE1XZN~Fj^Mns=XgBV4sODAr#isdkw^N?pMaQ6|^fY#eZvb14@AZqTyBjd6a zcZ`b~{o}#~$=gYdTDC1hX34h`*|ueD+xEX{dzNjhWZNdOZIx_W+26Evv2E{T+d9~` zN7=UGziI1c+djy)t!LYwXWOET)oq`*k8PXwhkgg2_wV{0yyqos+bXu-3v9otzv;Jv z?bpS&Jhoz6+(eb+U+S2b|Dl9zYq$9VF(4nF@yqPncGt!OhYITt|1hL?qO&g{{9|@o`UcW zp+J}iS2^JxTxEoPZhsGif$&Wf7how679tb~4-pE4iEd9}Xf6mJp+zTQBtn635}`m? z>Gl)|GvS*~!cBw%VJAX?@Drgx7;2EhU+YypAOJDrM0;F1(Hs{!TI2G&#P~0~M8q8F zoya-TI}voGcOvOX??lwyw)ercz+rnI%l0m2d+$PHR!;tx-iMx@v?jfi=A?Jhp7c%# z4E8<`yW3sZ-JZtoHV?ZSsW=x+f&~$$*wR#QfYZoW4|w>h=GK7tv>0>7Z3Ft475igSk!9y*j)f3VTEwk_vFnhYm*enyPBg;07yK-fm z&PQ&myqI&Nq|PZ1opSdBv*xBo$C$JvLi_{w+nfWv}bMYd!~`@f^LNpdgu=6n9 zN>f_R&BofIvWlj*()^qqzt#h428V6E7KD(SP{c68$s{_ZXJlYN)Q=y1kZUT~v;Es& zyQ!ZTzNjzjC_}xU!oDJAHhuZ&M>bZX6FlgaU!JoJG3~#(BIWj=Ik&?6_=5V3_d4|s z!Q1d(!O%-zOoyM2TpxlR(ZijmQ*O;_YiYB%qIaG8CWnoJN#lrlyd5dh_pNc)F?cj`i%xczifyxPIdt&`x7TC;A91VBP-%$J2_b!Cu`$mWq%{v&&fJD*bHY-=ojSbmeNPG)@+0d!&hW|DWBlf|Fg$ z$#!wFhdEj6-_$GQWRw0<@A%XHT}zGM^Ua*BiK~~W5^i1fH}$r2_4aVG*_`YJPImTn zWrY|+T?64M+i~v%S9yZ@NY#^n z_(X}->s-%>p&Gs zeD{uX*@dkmeXV5;{gUXp3w;7L%9=()*LNffI9zb|eb2tUW=%gis&aTn?Ar0gN8fyd z*;gXr_13<&I@v{3t#_-qRNg6&@w+NV(^oIY{`eqhs!QQb-H-j<`#2%`7QgvKzBJK$ zprO$_Ff49*;`|f@pCHNv9LKzo{G9|G_7%dJCG90xy_V`T^B-9R@AY~V_MTWt@-W2_ zsf5e=dHyFZF6)=rC-hO37TXYCC3EX>_a~nG{BgqH$6+q6yRYxk2>|k}B6N+T~lt~_HA7wAevbB#mtR4yA zzLt+?hLDIda$2?yefJGkd+s%s47t9t8VpaFUU{aVdVVPBba63kBE;8@q& zQ&-p8R$X0lIls29r^`@TSyfPw1IV;eVEklI^g0T7P9+`I;7@5OE3Zz+&Q^4nme3gDS3se@<6K4K|2Q$9k;)YDhqHg{6Y^7|io zhVgp*>6aD+Isg^CXwjlbYtvEUp4kTXjMujBe%yCp=WoYOojh^;Lglbn=BPgM^_I`U z!>ao5Y{7=_6a0FE?|43X;|(zQ=de1W_jIAU@{P*8w> zP}tng{m5e+uZpuJh#zM@^oyW3&w_rJI?c1H}V;I2siWXsZQb1RZdQSP;%Hp-M}sbJ^MFbV9pED)Ie16;XJv zD~}bQ{OZ&9zo`+suV*}*>o-H`M8v8J4JB5Ud*&BkefBY|Dk?M-D81A(I>BB1n&y&a zu&8#yeM1tBzH)JtZlvDlz0hJzMgGx#x@iP>k@Q*$D6Nc`m|Ok*3!h4d>xW$HfTjp@ zi|X&k+_o0(yS142*7|Hbmv#JnZvLi>jk|Vc9sQuXvdP%nbM_qYt~55!aJSyx$RMy%W8w7Z74^P-+ z6#PWM=pG)o%?Rc_!3li=?RhiW^Cq-s1metUO7gCpKXdxb<>E5SNOgX7ZQg~8KxDm| zUs+q4a|HkpKz_&wGu0NtJ9f3Ws-5JKwp0%e0tckCzNw`FP zH}0vk2i=M6py18NZfbbrs5l92+r6um+bLPwrcbC*%F(9nV(|2-wTx^ikX&D?H8FtDDOI)ZF4mg_>AqHeN zoSZ0^Q-F5EE@}kh&?Xc*`R=+1rA!qL4GoKljf?S>^bC82dize9mJky;DLl+a>lYdp z8WBUBn#xLhz$UoiAcajBwXK~ZbOg2+~9MF48C}3 z{M3b;#M=;x|6lfW4ks(*Wc@kWa86czO;!>@0lg5|>meMHp(F%eL>hJo%17O_$64A= z;iq;YPs{wEqWEd@HTheiYqvrEQRv#O&^5vn8?|@u-e2|>SAt%lp|bEwab@ZGqh}HM zQ(oNASW#YDRepKfwx71|*g^eBRsnsGqrau904LRF_HOw!`w|d>OG{f2>tiPKz73Hr zQK9~UezGn~8{qG&rn^VGix2LdK6Cz@Nn$#G^JgFI$}jFM-}}`!+knc=^M-Z?%M@ag zRm$-Sv-r%Q?0u*KBIYKPg^&ekO0+-1x7HY zXnl}vbd2Y)SS+d_%b~3mG%%riTiP9-Dl$`wgtR_t_MEv%iC%%e2-Nip^in7^VUvA* zRWb>9fs|T5jW+DjNAJ2}X$s&tG5}bjgAO}|!b~p_0z?BmoI@zDq1ox{kA!%)pu|{C z>qzh2g5G-uy+_zgSEBbKiz<&~GMeth=G;LUwlc6@1m0>MKZ>wN=+QS6;bdu?!9L^^ZFBkzKSFF_0>%>-Y1m zA}a68Pu|;cwYX9`Z_%=Ncafu~^75H8r_Wb{wTlr5`1aw^OuuKIdHLm4;bTQV;8=fK z$ynGuFTeckv$s$4@m7lUa(`zdEQor$-*b1}%bTAL8|@&Xeos=n`OhW}0io$lET^x? zH(cV${H85ldHdbB&!QeEE4gNp8AIFp1!Vr%F_eU)&gfwj}yQng!dSL@*;@@S$%S8ra#eH z%a~Z>g_J6O4OyTA28ypokb5?Rv;*?O| zAg_SvX^C;+L5Mwv(KtLz%VnAV2=nM}AK)pqC{U%~4Ix;wre@H?M>;w@!BI^xMG1cS zjp)TV^kN)(QQKfSXU^pcs)un?D0*tHTp?Z7(19%2Asb(zlb)>| z8&OLv8@6stMK;}2ypMm}ya+b!@7wp?WsD4Q!GFf9o(Gp<>oIwcM9Ef zqAAYSuDmu>y$j=J4_HP@scdQ+qUaCeH{bGJn1_|}XM)V6igPtDPP);^^x|g-<1)EG z2+otT4xM%w`Gi3HKsu&WHaaw_@=uBp!*CIf0LmRlKyFs6HYh-ZxOU#yuui*Rl32z^ z#Il>4uf{ii`n1I}ZdeL5J|Um42R6`DcV^rT>f)+sWt|Kz!lb zTJo)id<$Uy*PJY%c9QA$4oKmx5psC5LlSSAYZ<>2wUND@km7c-nM5*^lVgcoCMQK- zle+#YFwE6>J14n|t5L?uj{S{nCnu}sWM^}-g`BMUZ)9^hSuH0!my-n+6?sy@-^hwN z*}FMeDJT2u|Jn0OIN5tS*?vy;Bq!VVH}$fd?9#u~JN~?X*Usbj{0Jx8&eeO0t5^Cr z^%}W)2@oAx=Q1aIm6J{Q8`<%8mT|ITPFBRpdjCas!dvi%hhx-LFRalFPCA>DmU7a6 zWT0PPvvRn*4&r2+IN6cw)l58Ci1vi9V!|uq&&jTI$$~PKFn#0E4|CFg=+=Mnatvi~ zch07d-?_I-K7|H=7SXU2sx0F#&pzQpn;1<Ohb9WwiP4+>|7*F82Kf!Z9i06I)^J=`Is;aW2F#l?9VQGGDxxoyqvlccwT{~s~ zj%Z_ZV_8mNeNA=#<@`dh&s0-;VFTx0y?p7?rOQ>lJotK%mM>Ebz{2V6?CQ6W<${q6 zbyVjQ9_A8#*s3*;+%+#UL<7U#H!=Z9*?s{*A(O(@{U^Wr{G(r*#evhGUIYjF7s#H^ zl-!yWYHzG5MRq~Pl*QAO*v47MND>ad_(IPBmBaSL!zrGYx?c$|@QGf@lvU3^|NLW1 zqV)n_iCgsRHvkN1kWQdi;JOX3WLhf=8L6qPsA_S#-|;kATEBy@`2h79efO+IbR5lx zU9^t(#>+6$zsASCa6_NQ%4-HwO9Y;C>(4bXb=fhiT%mK9d5Aqc+?6{0j}`W`#gv}p zZ@8{^`O6hXcnn;5+W?hOu>yTOkPb{OCS&Iy7#a}gZN~9NZI^At-=EjfMpyv*hkB40 z)z)qXuxwWs04XwL2oS~tvtt8&7F__}+5)ydr-M(!E$>cjCTD+VTNm=EN9|61e7Mpl zA~AW!g0uy5QqyKljrMhOmrq`Lk3|^~6B!v57dJa~W@L19Vsc#ay!rFyQMaS`jagkN zo^LZ28Fc>bmCb!4L%od+-6Or`K~j82VR1z(il@b#4Sol7~3s;M-WF0@1b^7R$>?`Fp73Jl1wZPxI3PK`AK4AstLs1yW^mVXoQ~YVE@Wr1vk^GAkiDIKlC1*b!u`SzPiE)r#_sc9 zQs+p{=FdLdQ{35^ivN8wBZa*UHA&qBqc)hwfG$EN7o&ldwY%AE+QTrD*RBe+75})= zRa1b|Wga{Hmji%jgx1o*bPz5z=)|gl`_W4s5{L9QtLo7IFP!`t$;KdgJ*KGM8b|a5#)d`N`lA{M5uXqXyj}k zK_Bkz8lV&!5SG&V=#cOL@R|fggx;7AYO{M*q%RA<`Hs8qzVr5#E0zZNsMJW^2@VZf zdLytFi3w(LO2s;Vi5ptVZby+?xdW^-_-gHTDzqL{XD4SV5Vr7i^#1MW{h8?fndtq% zj=s*u+S=mM%8JtJ!tz=G4I@>dq^ha8wz!H|&uY8ra&~rMr=3g!_RhlW?8^pgCS6sf zca!@~3h|5Dw&~05xr1Q;5qz1~Y936-ka-ZE&}Z1!MeF7A_3OcVh{m**?qj6;vi6b9 z4c-{Rz9Nf2uwH02=AAvAlZJH1&!}&B|C*&rrCO+S{O6{GPFYN`2wfZjrV?Pg0TGxA zmuEh@>Jr27`pPdeyvxEz$!6Pi@g5bD-;{K}7g$?f)6&t@G0@XH&}HfCXzw=nT89UL z{|x|estCe@fZ2>RQ43lsFm7rS8Q$z*@q2-4xx&lOGYqgu3E?whCQpftn3+0%Mr?$? zKej(NToVhH6ES)4xESXfdIBx`5?b~nwCq!8*{9L6(>gj@u(hnMt#x5w5NJGOb3=J` zO;Z!tdJJ{dwIxNR6_r(uh+8r>)Ya7+JFqomB5XaTx&mxhu9P*k_Q7}B+f`N7)r$|H z7pZ_re5s_-)NKV`P`Am{W7GQytkNKVf8UVUZ99Mb^79Y(nzT2syeYRRuP^Y{#c;uW z4?A}Y4%t3Z@nsqxZ@<{tNrBpkWdG=dAYgw35zNaUY*izSRuSkUto!V<&rVmLJzISm zA8JH?!3xI1N=ZfjUg*NfH{N{n&Cvp#L<#oKiju;z+9fD?19DY&VUj&ey)Rf5X|AbB z!!MA&DtY?`W#OglqPp@Wa1(AuFA;~}7An*2iHDOz)7HH5+VhK`zl=~Q;k9?NjKEww zC{H6{*tqEVh&|%XAOA^^qOdfyGD5ny3t7II)J==1N_sjy9eAU+=#wrL)Bstfw!Esb z2H>Hc4aFCa96E9)>&$_jU%$U$!>3!m{PxGKpKsctz?mJzEHixHfSQY#N1=+WUJE3mhQ0b=LO6t>s2wpO?4rwKs7I8q(xsgAG){U3Crx;tT_%xNw4Lk1w@+o za0vVCTDZxiMErj}xhE(8Iw#-7)&AZ@?W5PN{f{RXbJA=6RC@+}?a5u0XH0OgOxUk~ zNOSj|LSq!d(Hv3+Q5{kSr#qw!qCD3rgYd|Y-v{XeQU=icKsPd z14${Q8?RH!pPq&&5h;ae5h;bJ5xEmik4PCrk^XlXM3qPxM3+bz+@1WnMgHwR2wOT~ zjr`Mna8EK*&beQsdnw%*fUeEG3num9+)O_f4M@Stt)=3>2MZ9;vssYmGB zy%Y}2E!1|zc>hf8#G3zs`0ZzIyy>jN@@e!5vEklDpFD#;A?L?4 z%gY;Ev3YLo8Wp$GX!O|Df?h{&V>u-^U5z?xB`N2kh_rl{<<;(miR8v zT&3>V59lU=q+Co30_V<6lmpgq2$q~3dqO173=Iz9gN{W@rEdep+Z9)9v4FFpVC)6YG<=CL)8-I*LKys-Jx556lH z4tj2pO9A#%n|Q0JxeSPoG*iRNzu#Lc^H9oojOOAuUv2v0^RJIrR~MW=efo5j#c8wS zybCnKf=9!w#h<0I&yq{9rGXdxKnnG~Zf0L!O9Rfl>TAjhiAx_}hU%iig8ZVgvf^e+ z#dp9)7#pN{AhI4C05S>UFWLv}fb3i+2FbS_Jl_cRVwn(t4)!C5Vi*~Z?!?RjmjZ&+ z-Q@(t6Vz8?fo^7EVz7r2A#)-Ybj9vSlhjU43RbxL#!gR+MGCqo5b1Ddl=i9da0kLhA zQKFQ4X37z4fq~eTy)7dnLl@rE*az&_;m&$vFHR9bW!W*%3W~I$;jXTBGeL70YR5dG zlVJiAc`C(-pl~osWam;s@H)tmAZ&H=@zA<(%>)^hmmdJ$|iaJ^>^tT5(&!RPJT`fz%vmH0h;@F#RL+hsmb*> zHru;mFSQoA4uGc2pj%qTXL>)>M!2j4QJXhvb8%UhSE9!!Y7}Lclw^UPJ<@~S`8 z+}Fp@s9pHsCpX>j*E^v7{BTG??!v7F2}OUp6C#7VakG!Ay#8`W%pZ3(v4*a)50_OlS-X;A@kn^@9U#w&v(F5^rk?wiZzp*?$}ql1Au$oN@pSi~oEa#`6%zsE znn6GV1GTWT6TU5Ak_>es2BaG$$Z=<5mae9{>}(qVhZ!+`&dI6kwCOxG6way>r^heZ z{pGuW=GX|zve(7_sZIO#e?k~7#2YqjK2ex+fZ=c0fD|b5^Xso4IIveIad-heUSw&# zv9>Py9SmH8zXFXiS{^cYJk4fP$ZWP)&O%+9W_=U$BrZ8VaX zo;SAEk-|R86W5w=gd65gHiWHVmpXG1`P7QE)u-79dFH-JDeMzi`F(|B>1}DK0MVeS z5et0-&^m{DdWrYX47_?K&f#zi2ni8F_j^0q%_hXao6YsrRe-c9IDh`a6-@TU)du)* z;3)>bj6;{eii4Aq_k4SQ=AAr`b!a9DjIx^lIKH+9KNi%1+cUOIa1tBcSY z8>siF_xP_Yrye3s#>j`(u6;8NQQWxP-?%wJ2GEAEu&}mEKcpe35SQlbW5=#^5xdGh zIARmIX%%Xqqv;Y_+KP_+o}E+I0_2l1TjRyj(sRwA#~o;IDk{2oA-BfZ z#EL)O%I-2TrC{O&g5L7HydhGB94Tqw zn07csIEx+`p*0#0{@BWb}%y6MzEh@q$ zgrRY=E6mQWtjxwTvOD4S`+km!0$M%t9QiT3LDz>lr;4hAyC;T?S(ifnDlFN)_mP~O zbeLadaB;d(zmh~eCe_*F?9rvtfpp+Hd1q5?88Cu7`appJ_7#S%uh-F`q3h%@aW$Z} z@l+v80{aUxLUgdi*KwYdiFuyv*T_82?WKzvxxJF&S=X2ygUX-fOLAg9vO+m+}uyURR=VE+8a3eCAX%+LAVn$%GXRLot(^2tqi?? zzl#;VtKHDm-_?ZW7Cf1maL&7?4X2Zk!;^5@fbalM1$a~w@t;0E40v~G{3q82Dd@8s z(PzX8OTiN?!V?5HHgv-23g2v7Pb)C&fMMNO=X$F8LXbe9c@*GMZvb8bkMHAWXQkpy znaKpMGxC&pM7rQ)3j3Hx#+GllY}ved>o=z>9bN$u-N=M>J^bDPf`su;J|6raqfW;Z z(A4j64)eojm7dLNaK@&myV`;@1i#fj{qCi}+xZefVc$_dV0X2f`d%94Jycs=E)BWX zOpS}E3rOYX8Yq)d?~jX%(+<}+w09NmQEJ6qHK+FPceNU6vf!51ntDSkA~EYK8UT~n zY-nm}Z|VS-c$)=;Cg^2F00ytO6wHflEdqMPnkfqipcX6$3tO-tAV5zmRagQ8URx21 z-IRS)qnI4&rB;W*5JO`t0$@y0QyDduI>fy?MLlKZ?b48#08dY(vXJKgqwUE~|3ByE zW~dCNCiZxNVF7TUu+xh_{!r#b(J9k1AMd|63$uw)04*Ix+ z=p&_P%~T_uOx4o$4=p0i4eK|P_Z$hOJaSDfMPLw)i#u>Yry`Bo+c`4QZRqOlKoko( zV8{@04H?Xc(5PWy0RcLdr>9>~fHE*5Q0WVFGJJu4DU(*Ih^m4V1o$NG?mA9iBk{~6 zUw8(dc_KqcH4Kt*cDS>rf2h5EkR9q9=CXvdv&VIEeB_BNo3@vg-3V;?bcF71c6Q+! zWq(5Y?kaW#mi$l#QAX5b6OT_6P}oK}TPiA+fHvk7^#R*KmCzAX0o??a^PSWoj7D-s z_Yd;=^Pp6Ta7sqgZgLq12ejFv}crWC2VS}14P{!66^~4TMee{Y(@ZV z%{I$A&2V!`hl65y{=0W4EZYC!`+EzEFt=rhHf-4N$+vs<>jbaA-q~eYCtSM5vQ7hFH7r6-C;Sgq zD_<(10Vrxh1^Dvt4GtiIY=J(qbsCX4z+dAg_)S>5hICFvb|sI zh+lN+_ifnvUA}zt&ES;9fDt^FHGO*42h@wHFo`}Eyq4g9>l!jTNM3y^4!#caD&gkD z-;NIy{*tq2mwW)C`k^5+5dO&?v9r@)z|yDVBZ{5(fzlm82VS0PUmxrj{r%NyY^pFU z1Vh5kPT>a`vnh!(O7Y1aJRG3-HkbZ!xlg0W1CH8?U|g+7pj3qDNu|Tn!Nj zk4}@J@x;*rLzAJiqpjgDjgrgAz#@m%$P@}O8#6UNK7QUTMid70&1-EpvFebhwm-%dF?vYA&ifhAIH*h%ZWwRG=D`~= zdAO0~g_c*M<^3S7gytx5Z&G*%>WXXNAvi+N+R*Q$MWv@srQLbxM|cTLn9Q+HCkD)B z`ovGfuHHdh1rITq+$LQLB|tDhQxgz9Gs*T`>gA=7X?@`?n2cY*UEu0D+^Iykh z4~7K^Zp3#)DPoAQ0QVW;|*GsxZ zz5)Jzo{k<8&~Ym}pPkFj!}ldvSe|6-*t^*W1Rowhd+BFl=e{m=SgwBi)hGMPy1OSL zI_~5mI&7|p4x3HjCS{agef8Cz8o`u19>v!kQv@}8zFIGt7u1Vr=ib1%x6wZIOnMFd zj_X=O&!T;)Tj^Nn_a#Jd{*38+FI7tO=!n}%8Yzjz|Bd>Y_x9YmsSxx*tk3P^5gjxl zI#`&D2<{k1HWaM%5)J-F;-X!b;I~1M*UA)%Ssy)Y!YRF>rKjI6*Q%7BNV6WMJc0fJ z_DFWf)Bu216QVh=Dh-m1V?=WZT+tk?z&Xak%L^bQg|lyvP-+bJITfDb_0baG-~zMQ zORCa%B7c|WE9G8FsgQO8Sxzhmr(i&UMk-aQupQ+>IV578k67XaKbD;!8ENe;W^8zd z%`J9qkoS6*mMlh(lhcD+(Bn(c8OkHVZm9e2JzpOMT|7rz* zvs+sc<0Vw7+ys4=Oi^oDK~+E7&d8gwP07w~rh1w?MrA6Uhq+Ir>g~%vpNqY&`RZ>+ z0CkC=(J|P%^fw(#5WI6c&J9+>wtWTb;-+&j;GSSSfCj4B^u^Y#Tleid)HLz3)D~XFuJ#maFYl%!YZqf{`VC?OGLVP% zF8_%*@H6UUZ@%^R+i%~N7_3&h!Iz;&8XM^8SWAEheEk5C7J9k+X+3#Dk<`;ytC1=+ z!9Kp>!C_u{lpa*3x@;g_;>;lk*|n3(iu0UxMy3kn1+P2|+%ST(&;PaV|> zrmdJ9>+?U?#3t$jnNpk-!JelNU}-o+eipcvUtT3h&;QpNZr~;)9w)sQ((rX>@Jw*f z;LRNm_*dzFpXH8_KU>S$3t5$(o_ST42seWSauSnVr*`P(W&>c@?#>Re*AvxZbLzhd9+r*Z3I z6#g9a+)lLp`=sGa2<5|@JB)U&?L~i@r$uN#5f)w86?Da6x`Co6*U-Zht-gnbd%688 z&7)|qDB6#!Hvp0$XnC@qzUD!wEv_-v6je7@V@C~LkrMAH_gW{6+V(Nu3lNBJ(ftB4 zxxS5sYbW;k8#a*Fddjtr&Z5UmshAee(QI2PCdf;44(|OU;%Y#~7x#U=96a*PhE8M) zbagdh|G>~_8obuaJ$-@bGXQFKQt$q{yN`mI;P`b3C_=&XO((ZLBF5MynA@D}ETjt|?J!o<5DV5%sQc zWs(vKuJdkgvkbL&bauA`!58s+CIFvTW8uzc#AG5Ku~9s|1X@K8bi*U1^Om-@7alxx zINxZ*_Sm<(^x&_1f4wS(o{BGCyjaxKiWn{O=c9%qgx8Mm@n)k{2x`W1vI((7ko4v~bPq-Wl^33stISo*StLU;a z!ZkJ*%k(?^(--r$q{1h>8vDT2#H^J(cD=?EwNav;88;(akOa>j2HVlc{QKrtRHR~q zm@By>zPrt+o*9b%)=ikZ|FtFkiQ~>GPl-ib-t)`(>S}BrJ&ad&?*MwY0~!V+Su%9JW_9V>M3xw_o+Kggc3sO7t3mAQx>VRL zI5eTYp=HQt1va0ZS%>$%uE9-oH5I*bCwhg%M=nIK%tx;T4-exg-e_!UHkFmaCDhP5 z+|g9g*kHu*z}T=EXYwATXR@oR{!8x&MV5V8RaMJfAX+!Ex4;WO7mH{L-j@JSay23; z)4<-ekiAi$%f3?dZ7P^7A4V5F%x%?m*VS-(2sc|yU8J+JQZcLU-~}e%N_@6IU@_ey zcr`7Y{Qhg|cj;WOvcjvjAPMy}gW*LT(*4)Y3Kofif)L8)?-MjNCO9N?Qn1>~Ds)czNOc!h>w@7eqxZxZW@bb_zrN5)$L%qQ%nQ(z5byKGuO*v*m-Og?WV?kw|sN z(m>Vdt~)4Pz4zZ+AGY&#eU;{`b}wxLTi)T)*{MVW@vX zLfrrV)}NT=|7=b0<#OsAvyLxGAt*xpkuqu--VhOnsh)2WU{>Ph_!NvAlG#OOGvZ;L zL&i<^S;CAwY8^U%-ZqGta@2vjz81_bP2?SZiVOuNctrRjT@~|Mzhx{F>mD`O)ip@W zi{Sj@=j?M*m-^IIU%GPX=&|FaWtjT6T{;*(bHxhmtg0Z78ULV3j(qf!Cr?(^G_T`N zKXq#QdiST^ndVthbj5;gK>X8b)HXO)H}h8oaBIL$;mRk%+{Zs>U|3{~PycQY%+umN zfZn?@!FwJ5((>h()(7*2gz+~&ub9WnEvc%k%+GOz(^Z&z;Mz@4_14AYSa9WB)|qoT#Z~$FRn)f(6DbdhPog=b$X4v>%m~vj%Da^P>E>SuxVLM= zM>RJ$HR(jTXRn%hQXDG3!@e1$`}WxB-q5Hxg|R;SyN@>g6fUYccP97fPv3t3!;Yij z$QOSeYy2@LS|SOVY&-Jxx!m88yRo}goJhj+@1DQt@tb5#9dJb0K}Vnf6!V1(I$@-+ zzNoe{4SVFzsEoj;R-`D4_U_(s*sKYgf9Dg=ojZ22OfWh9k!PQM_L0T!ksb6V-p5GS zmk0#fX{%pd`zkn1GqFp+CAbUR=?BsfIEqW0xnbi+A8s$`$&_-tyq+FUPu-NT>9fK` zIAYdJj`p9LG$kr>+U%$qvnJV%jr_RzNx{)c)2Ag(i}jy8*&ipCOr)`zf8&}+D@bR> zp7ODdR>b4ebj8*~Bf%;}&^W-eXG69DP3xwFM@0mvlwdVTiVg@+O9ix4s|`(@y)e~3 zbZTO3L||x4bW}*7RKS-YJ|@6!9|7Eb_o!F}^$7B^o9h7w-aRVScxe@)Od)u|K|{>I zBgLHo-;W+&g&u!(yvOfBk54t%)|#7|032{BAGz&CdFM|Y-uv^wyzzFXaI5xB#{u@r0vCo12*sr=_>8u7qstOKO?{FJZL~bQqgk8lVHU=FWDLRR@fU zy?YBzoGL1BXOSc-6Isl~NB80D(1nsZdth>MviD%S@%(o>o$f4k5$>xwIXQEf5Twu8 ztPXUq1Lu))F_PtfJl`vxazFb1iDiN8<&DIjo=OO4J??r=*s~%51UoGj{G z7vui=Blpf2&IYZ)snPhAi?I%=9+FINp^%WR zBQ?X>R%b%=es}kn+(+Z@F>VdcM6VN%KRNlIKnTt7l~M-8%~(cydHfy`Qo8BECM0?Y zMuEY`sF2BAU0H@`Ae`}(<0uZRAVW7d>-@eBNiw(Y+4)0$zG1KDzO!H?CMM?SQE;}jSoJjMXZ;W#5uy^u69?X}hM#`iccG{nG*t#u zd-r&}&1pubwY67O)*6sAfU~3gQhcZ`t7^4g3;mfiFQ#9dwDOL-7f(`H^Dss2$f~Y2 zG*nhqw^;e(!8sNtNDBAWZa~C zEAEywN{@)i{qzejFU9G?mpD|?BLrnT7N;Ks|4I-4Q*6&0jQna^TG|v*Px<~WTekde zVB9?jX^Pz0(~MJbit{H?cJ$ok#rQ@t`3o6BGtwjQBuL z{vfn$fTvK&K*4MWceSPTg zH-`#qYKsq$#bx#%&eY?K*4$olBB!IvSVg`n>&zVoKG{)n&BH3mDl-TsMNJ7T0}2!E z?8Jega?_rl_w3lZdDk`P>p4bqo2j69)GKOQLPEmn(^*xtW@J#c(B+cVye?=ux@-5& zlQpgPz06L&f9;DeJpba_|8Uf%G8*?g9=$m-fSm!~(#^up@2 zxajEkTh^{xwf3=Fm)&^7G|k{O_wPfDX4hzYtEsHUf+@z&&wr%GRCVCvjV`A!r|ZNH zxEC$_()g-yH?ae*W$l$zl@Z_y1emyz^$>+xR9cvmot4+sX|~Iw?L#PQeUl)=lZA#}O>7JtYLeVs5fOJBkFo5B8MS4EsifsXza% z-ZHxLvuR7QqksVbKfJ>QW6>m=fU$TuQCzzl2Ep&S=e|B-n;as+`aesj^>>UTS465r_Gv8%_Dx@QJYgB z%nTAKv|bwNXg6$tdb8at2sVLAJlfrb=wh6J@VIlYb?EI6(c72N+v_l%)}yx%kLU_T`qZ6C%*%+lmH z1EY362PQMv*V)}aV%Ph7dU^->`zoF7?C9mMKHd0z{!rMmTb3+Yo;X(VE6J24JKH%p z7PvQ<(eL>A#E~nfPFy(G?iSd6?!txaGg&82p3XW`(k)aIs*ZU|Z}Hd|j|X<{Xu9k0 zM<0Fkdy{}4e)lV{ymChfZ7<#N6-baafAu44$Gy}S6(d=Y4tzp2?y9P& z=<%BsdfyF*F((Do}vmLq6t5>fKD9<8^WnbFv-`fHw%cSDN7fU zLAxx72qxS7fMWEX@@zEdNdTZal&A%eVf&Lfz`67e3-WS*xbS|gqI@fo-nuM4W&y^j4 z4CJGPV0*m_x9K{(^lBl#00$zrkJ3m{9komNlpO!<=eG_oJvV>f59FMQSkKStZk4oUC(t?Nz>?){~FH^%&B?b~uS65AC zB|{JNF_U`A(FQsB0D~_mDypigttq*51(D%RV}fD8l-O)S@r+4&ESkJ6XOG{@E@PL% z1$G}B?>e@7fF;*G@IBpz8GE5`N_Dh{9Q1fcK>>=RiabPQFjeLx_af(mmi!L9UUDNf8*o-Oo!{X$aQUVe>8Sd zLdyIpFyiN~czP*3y3l!YQD%huuID`z$V&!_fgqyRciTvf#;CwnC(GTz9)U@b$? zt-hwBtP@vjYvbcX=%RgF2=)#x_xE?#I73#w`R1FqCohZBCxaDMB$CR+{t`ZLx@5@D zmT7SS=PAU5&leNbWig~==oe3~fP^`*GnXae2v!^u6RN=;Gnx@fWW1)b*32MdenmqM zcAlWPYin#QLN7F2sh1N`aM`8dc5M@XVAc^(hSt!>DHUpK?Gt}dZo+rc6GPOX*fY_E>haw;s zF+LK*AQuKXWk+*)8z7u33Q9^ZUCu42rp~xh6-Gvi^P!2@m0T^u%lM8D`~AFQveUkvIsr>dj-z*g!kWn=0dj|JtufEI{mq~Q>e=s)C2fy zl0!@%Yrrc1GP&sYcj=YYm8LpFeS@*HwPkRix3dFvX97#Oe<%|nIo6Ruy;3Cc5KCB0 zZrvR!xxlFeX=o-NDQMn@>Ym=HaL{Az)%9>zLM2zs1GObv%W{c7{K2XR6* zBc!PbTk)~hs;aBo;S2jd$IvdBGH>AxX_4Ll{p0a*;++8fesrc^ps*qD%z^K|KWpl+ zB7qyQnF%wsjk)zCJ!^Bot1Hq&iog8$y`OU~8O|R)dAev=JNv;W9^CY$NiqfI*>)_5 zzfn5gJx{Di!>P({>J!lmFF*g_Qjj@|$t_4E8lW1HGn6mki{9h9Hn`mwY%M*XwP(k% z+}Z&&|D~NfbrVz)Zz+z{2on}8T(KxZt_YJ-65TAza6Wud-NuUME`hVDB0o2;;LtBQ zSym8C^!K(@*LU?JUbvyL8#ypOLETlmzdl+~QrcujIK~Wt-3}P-jte_?9y6o0`4sEs z&UXlHz_cRSIig7^DU+cePLbTwK1>TdBjN%DQlV2fOX0I3UF{hX=^ZAEMf&-9 z$pvG*?JX@WHCK)vsH`=%!>usFlZt4mTCGN!lh>^`&588W#frse&X}zpm_Yr!MMG?V z*^&LpGiD`dM%ym_j6m`O<%Y3<=rGOa2WtBIVbSX%k+UG-IqU=nv(u=!_0g1hE@-+G zwY78?jQZcMp51!f9vLjr`9(#=#Du%G<0P-5-r|{f|4Yx^|LK>%l{-{P&!uCZ@-&G+isu#mWIQI$U+3q>EB2*nx(Ie!0KKbF9@kDDw3Y4%S|&8ftB|ht8Po z-BD!#h@dW#rbF(0?bTZX;9yk-4E*{{jerl&kjH|j);b!F#&~HV+Pw zYXtDI#xyjOidU-26ynIy(M0oz`Z^_@e{+6?ZR-+9bKpW`M2CJd_tBLNnw6uao2FaDC zep`1{w-}8k!~wJ(K7u>JRty7Mz@sz$+Ar+CYQoGa67a2~ z7R%@;T%6W1m}_<#DD$XV7b}(egN!!ZS4Qy%%CZ+fm@2ni`0@K4KVKd43e?a|l7yMl zPnEqok{SBCKSBR$&W7t&oD+EY;fPo#GS*qR*y&gOaqy6?jL_`Ud! z;M#V+1!%5|;Qsj$7vpx@o=O5^I%X>TeN!3^0CDj$c4sX{WXw8{)~>@Ad0WOJ*B}2# zeIR(|p@(iwV)#{2QB}HFqp`ULgsw<8sVc1N?*KPpOGj6y$yjvi(8XpWnhwxUE#^k} z!#?>KaiL;_DFMjY$!7X_&%13#09wojFTDgNj|2n+V)&%t-eve;schB7I%%rO(As7} zz@&#l#6va&h}c1(*w=(EwCKV~ZKQNo)>L1kbdU+a!t?t4h;!%4^wEc&l_wHX0#?qHz%cgyceDu(k-QVR3Gq$MiDPoiEe(+^Q}i#ta;+j2cMrm z+b?e6)bPj~Zo;Pi0+0ufQ#tsa4c*v7U8EXe#&#lDO~YgeHf~&=3<7tSNyk#U0iCX= z*72WQzHuYNGj!WBB|-@fIzIUDut6V*Eb;a>q$QhhE{%8`I6G_W8|%O_VQOh@Z8NnZ z!V(`~Bk%>lSk`$UHbo>9i;x8are^`m>ZGg_3FBkPpaFoS7kJr`FO5|Lc_jEkj*L5Y zgMfx4nbbI{(0F_Mc*6+I;9;wyL&g?NWmjn=C5vU(iI6Qr${M$?NLC;@N52hyN^piq zR-m5&uIg6!Vw((2pfhj8y|)^Xw`gj{DHXHsUgv=1RkqQv1$@H!P99&_0hn+j`_+eQ(_WvgM1U^HAe{B76Lf-za>?SXPJt|2;#Ux{T zJcA;Y)Z)d1!O#Y#Th^;7)B&28N6I>*^J{L#8VwCabZ%pV zm=nvC6DhHXa)>09QzRWDhzmAqN1YR zYEFN*2oR}HkV`KbZM^X9#xLtqv4vem)#(DwhUOMftRQW?!Q9c_&}3@s?1FkB%hb>e z8^)b@maqpx))>@joy2K(+Q*zDBTikQ25PQ=YAY2U8gE~Pr^ed{DO5gQBoZ6JvfI+N$cR8Uznv{=$FY zH(0(%LZAtpHyGaTEI_`nAB6dVJatCOf+QMIG1xL@lDP{@tVfksP$wugu>Y&7*SdpasK)qFMB&!#RS8K)1%Uw8qn+p{K3SzWPoYrY2m1TUAsK zPDv3m#=b3JHg1(lG6imOYS@xVjp-B%lH7eG6f*Dohuvak&sen>0kplCz3ku&-K2T( z#Ya*?SKe~>?Wqx3txr_io%h^Lr8AT_Bl4!qW-3^p4npAN2r;^ey;-0ycZR>d7}>PH z<8=`I`Yzo*oGBQ!IK`q&%0m}zG&Gu!$lhdVFf`TGH4s+X-o9=G5*dw62DFjEK-#L= zfS4)FE$(O=w2%#r0;B{+LIXcRg4vwak*NkIQ8Yqn)yNT5s+B6WN~!S;3<&i1Mi8n> zMScU@7ood2F;P5aS3~yO|=E;tR35>V?<=;wuyBt#H2l)8~L7?}4ty!{ySL zxJ2Qou+aS5C!cTn{DUtKrLe*LpMLrvJC*$hn}B8KpNvQMv(I)@DKL51m__%!`LBOv zN>njP@#**9e?N|i-+E2&c1ya#ug7PKyyM_Upk94jc4mN{BKMhd`->W(z2Lg~kI1jMNX&tl7}mSXJLtS6hYP z&yspjP-2!gG&LAnyINX}uv~CuS(WO>-DBvT!Rt8Ps1Q5duenA_CrmFii zt!rtmt23Eu>r4g`e^0W%5mkU>Vc;z9`FhH{gES8UR`T|j#>Hy6&JT+{rgacU!8OT8 zP^!ZY?20kW7d#&0jhSc6;ed~6%z=0Y%#s4AEdB@@LJ5VLGN+yFXKhv-j5DyHojULQ z(|b>x9E_ha&!fKf==S8~OW%IFZTnA~etN}Oi$(QjEDTCk32)(P=Mm>2XSTD-X>gu* zp24SFXR+(b!}m5;z)r?;63ZqzkKymGI?c{D=K#EjE0LvsH=D-#IBo1CHogRj?0D z{MeN*lC?ny_F;tZo!T>y zbIfn{4NI2IjZKX3;QxQDy$4`aRrWvr-kV-ClSwb6cR~W87fA?31zc>byx&Z^9XAk zW$t@s+`@i}oyZkKby;mnq8 zB?Br}EwyP=CKG@Ee`aG%w9@@z!z)Fk?Mm1BW@WDI+Zf?ZK{R2E@Ufx)#1^9X*#1|P6 z5fu{?4Nj7TgfZZZNgfj)7abKA5*i&H8y_1D402RdR77-m#HfJa;E=GG#N?1*Utb^8 zBmr3o9ssHP!Taw=&HUvqTo|pT0Z#W#xP~aw+np@1`C}L(4bp*|a8el8xCe}D*X(bD z8pV$?S&kbw0VngVH{qVy(lE27V`h64x+x9Kkp|rq+S}XF4Cn(fuTZV82hH8r+tu9E z)6?2YUeJK50Knx*i=i={G{;pk7g0*63_MicGMQ%Ah8_ElR?t6v(bXZII3dN?W=GAL zj|ING{ki`919{X|WtRWcCGP@t0{2VMj43G(y#N0D)H6=!Gp~z8ud5h|fL`$Wvgu9GX01{P_j{HOGSFc`;K#9EX z+}d=^o}~8Da~V9~bDK8fp$Z)u(!nDDc7Lr_r`4!I$2&NrF`3nB1Mnn3`WSF~@bLno zXhoOPPAgd{B4mLF<}{ls6wFNI6Iq6sQFMbGX9NPP2Y8vhKp6YS2XM^_B!4*-EuV&# zznRtQ)rjomhK8=L>s9p)b#<-qDOVvSnFUUHYZfXHW7bj9EN51llkXfwJ9^W=#qkcB ze-?6KgPd{nNwo3%^x>v?LE!4jW4-G&H2F6Kf z7&0Y(^E+3IkbD5L$|8(0H(5^nk%r&ZP~Y9t*xcON+|<(9*wEF{(EAePiKlW#kIn&tCdL_qP%gHrGzy}yEDfyDqv?h7)95?!eBz0_s5TdW%o3^#R1Fo> z^R$=_VeWeM^;D!7$4-YTovWM&{EqtxHZStba8WfV(`bL%UzO(A@7TZ0zo);cv8TVe zf#iU8^>(*_+YPta)P&I`#3F`AC@v_{UlNIm9v)ui9}p<^2n>=+uxOHj#7yQF=pmEB zn}}4F-`&C0rq7{GpGTX1iZ*=?tz!?5YpKb72etrHtN4TZ53vyufOJL<4ME9q(+g*s2c0`wzYTnbXJ%C zk+-d_b?w*PUEe{u-SoK8%pZH)cyO*TVgCMMs_da56KO$n@B)29>L+i z^14}_<)sc*)wN$dZ{#F@kmx!mmbzL6Lo;cvPVk`o2ViGt+#Vn%R1y420} z?L8e`EyLYIy){S#`;;dQ<5s0T8Tuuu8+n(om1sO((3B#%Aa& zl(1z`($6^JC80wl=NJuKF95O>^kG=ml8eZG=@*4T3Eo#ON40j*h0Z zl!5csyZ7F6=LdfWSI z4|F&eaY%CHOa%jJz!DYNi+0#r?sKW6xDXm%1QsgD@etZx_RIQ>FjU>Tev9gKdB%xx z31nsUHJFU$*Gj8PO1hi6J6d|%2>!Jm2JuRPddTcQNulj*EmH}kR*UvJi!TyOvjm=@ zp`KhnKkk?@a3hGP+_mU$_dW8&Ba0V**x!$$k>bPqcb_QDy--|QyaA*WJpL=M@Jq*y zE6pScWs?E*dXxV8>vY8CXToTUQKe(n^QGBTF8u}~#>W7;ilBY71U-G^+1GRUa4PkM zZ+TaLbIs+FtL2s7efZ(V8HsVz7CrIo3(r2CN_~}qg~iId=r*?ut-M`$E z*IhIy4Gc+E3A7xpG|>)vZ*5kTObS*ODRFrF`>?sbA<(`8A%M&@c!bS^p`n3(0l`!# z8c;rYR{GnhLg98zbV9%|}JDsHTzrk5`q*oaKJmKFzN}lWoBH5>)Y~B%O_Sqj? zI6rkqwf066?8tbPX=~J^vrT}$N_)%zMZSDQ{ zhk0W4n^e(!VsAx7Ef(~ZMdu3&E}T1;e{}cW6Xy#qS7QC#(GFs??CcY#&zw7#S9rY! zjH`()p`k5owrOY>1xqk!og8H4h#A7lsUIPZKWbZa;&6(jN zoy{EsdT_cnRMvvCuvhB@a>J8F4T2@AxeeembRxwlS$V0cc}k9cXjo^mX9a?I*J98i z;yW~`H4TG;OF-H6dO{C_VsNf?8JSbwglv2a*?1AM@eE{R9+iQ`=>lj)@;;wZX2j|c z+3X%NV~P-o#9{FePk06R7Q6|Y^po;cAu#W@0pW^puuaL)u;NMp-;^QjU4e6AE8!A z=0z&Hv)5!j`TD=#Or@ffpP+J~)ZbT}$uV_bK6dQ8InXhW-Vm3US?gPbcgz6`i%Vdr zZPaI+7v|4@VnHPQth2$uK@0-Czn|(%kGoiofLK#?@%jARoSc){2M-@PnuB^K2x@n9 z)n3d!dHl%!M|Uef$4{KqL@Q(QV5F z_jsOR;P%CUMbJYCRo=ENyaQRtf-HOkS$GGs@DB9I^tRrf-kz=wxaqqAGi=536ONpY z!Jz?YlTP^Z>k$klb%^>ArNEz{Vh}|qC=0}IE-K^g7}J<%9N@LG+u;WRAGO1d_yrF* zNIsWEAukON3*5%eEci#3GVl?rrk`W1!$T|Y%Hi{OMGMIUFYbt&cwT;NrzkJvMvOrmI)$>M%Hq zOKK?=9Sf3HM6BRMeI|~oTz^xyY^%W~MU3&!WFv1JAqS?nvRUQrTTpo7R9C`-kKT9e z*nN*b{=}?c52=6TEclS`b3V)d2wUueVCP2uqO*NIV;lO9C}hQO6JfF4Rqt2 zS8KZlh74?9FlzYDICkvt;lsnJk4(bj-FH8Hc-**@X^t~nNb%#xh^8h1t||n$WS60j3z;V5b;YxR47^Ae}|R zRt(&JsxfSIkCVplg-$1y6I?cyr!20?Xu-;r;R;;^VaZW!uD>u!krVq;3{Jtu?EJr- z)V(GpypYk5$uW@05XdCKY0H5@X(#HnvSMHm@s2Kp_Ln1EgZMR~HY$0;&`?9&z);iJ z;BDD^zl9&=yprECcWz5vYHHp4O8WixmDKwxaddQKcw~5VbWX*;m;5W0`jT~e3w588 zcLD4PCD!Fno|I*c}<~D@iOD-2)>uzt4(K5W_hY#-F zvf{ZHEpFYno~Ywz}rAlTaQ^;a8F_51tvTX(@V^@dUzqz5Y&Xpic#48d6R zh>Gg$*4iNjup~)AD$8(hPgIOorWCb-YRb#Y>-vlswN*t*R(r>gNg3WzR|3>`aZQ_= z6eI7bMvAu0Hqg}8*FuURV?C^~sf3Lq-ZBrS_58uL0ONiD0QAz;dwPSS0>Y@PJ9dCz ztFjSs{xtijf2+M#lLOq2f!N*-3QRyzMz`NYobnWkfq zC-Qf!7Z)j&fvx9`uLr_r)fa2O1N&HxJ!LT9{x?4S@WZ#?{1;al2w@e;INFpbKZJN? zd|;3ahVt|KR;^lfTB2mt=3Qt57mN-xt&CpdK#bdqPIT9AZnTFH;Nl`bd+YwsRG~8+ zwe?NLsCf^*{NBI+_0r?1o?!XtZqX#a^N_Hlph*762X_L_^BP`Pg6I@_ES}?F$2>^C zMZZfwPd`V$O22|KRu|bH-M{e8zuyT6?Kx@~W$YeDZ0H~KBXlIpU!hVd@QFyCw|Md5 z$6tCU9c~a;c8T!Gq^Qo?Q=3rxOht$+-(`CBsO)Pu4)sLInM^5*EB1f^Gb&~@GWAmC zE7PdW@cBL*p=8?IQNh`1?CY~&Evwi3Qgu3tZvElss?#-e0^*|^R5J@3Fn_dks7(Y_ z!0_mS`g&v$YEih(g!QkPn5lZCEj2a_GD2b(vz$gXQE?q6t9_)Ytu6NY`P_{gHto+V zDDFZOr=_UiyQLcs@7Z~=6x4V9ZRG_hCU*>FnW$<=yU52c9Lb|mL0%+RMN?lN=r1Kq z?@0foXcd<$^YyE*9mte&J%dp7ZcL1i05T9iNy!Qh^5kcR3Bk2(;Yk?13Bn$m0+IqO z(AbQPFbRO3(a9F8X7Ym+04_u(O`bV(+LY;O)8Yb=>K&InW%}d^r_Y`~b>^M-%uio9Cp~>u{DjFf#*YdQk5#Z7L0-Yh6Jq1WlBSJNm>i*r5s`@# zCZYK4gvp8V@uL$HQBOYo_wPqwLds+p!YH@i4_oZ%;{NoJBQPFx<9?L#YLP!~MCR}7 z$Aw3WFDA9OMN(>L06`OjmEm#RD?*Y5NN~d>i{vI7KvI2U8>+W;w6}M*wKUe%)pvK* z)q;h;wyw6J0fFNhe5yzMyc2!Z-qrvg9zky*bdPptIUOn%<{U>RFk*nRX2HZk(F%mY zxExi|cb|zBySK!R8JnmpJ9_W{_z<`Jv~kv<>eLNk;yi+wi;QPjKSukuba z+OncLd(aDnqhS}dg)S(#9}sfF*>e(;_ZGQ3j9YuHSQ8Zb zH?WCiU?x)$_VTJ9XF;#w$sFd6Q?Z_S;=ZU%_LvznrUhp*Ttk(TDcy~dD6U^nS5%|n zkHc(<7?9Rr#A*oD9Dt017GXd|NMeV?bH~txnnAAvW$6{11f1>Qm$q2!G@~NRCnl3j z*K8&Wg}k^9gv}98QY9f5*Owq;JpqpOLv2iCbs}L3k3fRKCdD%GF%X5sGbyQJTwc+4n36# zF6~*a)PiN4yMk@D;LxSPEy%mjY7qwRgF*sMWngF++yMO}MiZvJ8eAEQ7){W1tKzP*E0hQJPqzbT0uADApF5)BP35zm@ zULLqi;S(g$b(QV?V&x~mpV9S~_J6hFljY07va=aGc}#u=S&&O6dMNpF1%uGH(?h-- za6?{yS7X0?^vqXh0kQZ!wTatQTVsq(yLUEL&bu)@z#PqVbs^rEg@g3wt;xv9Sd-VQ zHn!g%!JGq6`mKJYw5x&i&bN4x=3;#?jWfYLe& z1o`{}g94>eNUe__Bp3lI8IUkHce2^ZPVi|u$;QoJ!a<86GUQ_B=b*(nXt7M{Yvs}m zri@yJH~Jo2ZJ}~$COq=kjKx=khlyyqe_+3V-lFLeux2J`f|l;ChDOiINw{q+tmvXJe;fKmFa7-Af$_+HAR>QVB(;%Cq^t8uH;VfP_e$^T9y-f zHFX=xA<=9#Dv~O`C~VT`wL{fsj&A)dYo))RsQcn(Hz4w5=X31kxz%rB+Eg+hP+OGX z?j*0w&=o5vFb_1fq5q+N0Wh(rJo50vXfMy5+WmI&1cS>=?NY^{RQG4ai^wYt4h!eQ?y3I(5T@w(A&|~JBTDx z8?w_z`iD±)A*UftV6K-K68$R~MahH?p`9pWkm#4Z3hKm$S`U~E|IBuNnuRRStB zC=}ja3Moj7ycJk!BV!RjVqYJ4th~JesPqM10_7htmBs#iU|-jPU44QByOgc#z?gb& z9@uSP2q6PP$N+HruDWSL$POz$0KU=;imhUGPQy^opi)(=2__sqgW9ZORpnyLWl%O1 z>tr*P==$V77Rk8zJC5}ljzuzV9>cK`YdXYKSLbk_3I0}@mq_FbEUv)P+gaDw#ukW# zw*LBh3iT%hq7iEYECyCXWMqR%A`sZvJg12^T7(L&1N9=!=0U!c6cES+)wP+#XjO?~ zaen^z@#ebxGp9}!l!8C#+_B@wPPNLu5ZY6wr%j3Q*|$DE?XI-+h5r1LsJH_DyG6=@nR{q+u= zdZ@CfuCMRniQ<8QzUIrlSk)Dn^ZhS~%2&4~C9IAPA0H)W%ox{u!k96CL1MnT zJz;Tchq;iJ*^rheAuV$uEwdplVY(4@Pgm!_h~8kujA7C0j39LP;F$D7t;lw38$#i# z5Sxv|qs*?n9iyFx!p{ba-A;4C{{d7CWi_b6YKEm)?TLnJ^0$2T)!HBPE>xO*g2H{Q z{ndxoty#71FkxS2BL-x(WqMo9F!-qzE4J6$BOd{S_Jg5x(~cD?!Ts@SphO2^@Be5v z<_r`LfpvOP_@CDn`sw;R)IO8{J_ljY!5a&yjR36}l8i!@M5xS1JKQ5zfxhrZU-+XhfONnM+}oEgS5-7tU1#IY zGFb+L$OvfEht3aqyujf3<07a7%1rtrz(UqjJYb#>7~oZ9XLnc`dR+xPF(D;_{@F&rHxw-H?YT| zhR>I%QuaCZ8T0YuGtx;RbZR^GX$G}G3BmY|yC9^mDElYn4fCI~bkDhSb^1)6W1#B# z<+H~R&q8hsW`wNp?k;Vw20*yJhL*V~qbl`Xv*I~yd->Z*%v7y(#fWsI`$QT&4o z!7v(GKEc7D53<>GYNM03^fscneOCC`BwwpT;U$vZ@%u1aS>jQK?Pcjz% zR;0Pi#+_mH*&9nKNhNCr#|&agPxV z5r&nl6TSj(xzu4}%-Z20qa%x*DD9{~uHj*r%IkbyUX;*#`uM?fBVmkJivLjApk8#1-Y z^;|A}XtHcOmZcT$1#KkQA}*~No!I%yhP)C`*|_3@LjvnF0Re`Mp@ zjk^je3NK&AYUbkE6Icu#J9+xdnG?sd&*dIFe(=!Av$=&u7Yokkoz1y;^*Vs@mr=H2 zJe%EjcI(!H5wNx)P!7y4=oiR3lyaJ(TzFB_rp1dPafT7wh@B11f^{U~a}#YXgYb{p zxeU?Z-Q8NS&DssR9weA_Yi%BWo}L1fLbN+Ho!#xj3~d1$a(llC952c+jv25X4OS=y zgAr~m*Xe9#I|ckL$0IB$X%s(GM2mbuw!jy`FJN8jmaqs&SUe;w84?x&35$S)$uAc< zOm

    Wp5w2-fb{UFDmINb^#iv&?cqb16a#&HJSe%>GI{b?bKjuW&$k_E z?CK%NQ3U(}TqR!&upMwIG}~Q|;2zxDy|}lRaBn0#^8wsjw4Br5&{S75K%*YI+%q67 zr2F)l+}#G_{^JL)c7b&O{v!`xESZ&_vbf2U1JR%qU+m!no3U;C7fNV`>S{X@s=Ykn z%+2ByREGQc>Wkp#*@7QzL70dXv;CHNi-H%P?K$C|d!8YvB0Tx8PWCfh*t&J= z8lYnU&n2|H&j?FP2jIH%50CcaXGIqkUaPLCscUIy9UReVy02fXIeWfd9T730R$r`c z(_!+p_g*DD+n&QUg@s*M3ey(jAW3J|vZVz0?WCIE9PLJBG<#NbeK$W)A(dIxT`f%w z^%bS%j<{q+gwZhbo_i*YWM8NU^sK28^HZy8HwsKp`sHWp^wCqfCu5T&k&?z zkZ>Hsh-BZ$6kLoxB%G3Opbr1B10u z(T$zW4)3w!#!r|q{*JkT7G^*zXWmX<3R?AJgG(B%UjD0LP=~C^sip%%qrDAX*Gevo ziQb!c;qyKmFKSbD_Jw!s`cE((9>Dks0xqz8qdgJ zzW|PTxDN}=rkVzR%GBUQ)P{ZNp_z{JrJXvBwx=21%nV-)o7?Yd*If4+dirDZ^jGNV z*KW*p6PojKEBia!YsxCyySrK&yOEgDQq$O6SzJ=p(AL_}P+wc$g6J@)2O3)OxuL$U ztqTkUDVXXKLM$4M74sSL4Oj$c;(*5;8CyWbTWsK=((6q&q!r=86>#iW?5V^jk5^N{ zell%UVcEHF0TKgtPbV@~oZb3)264T<07kl#Z@-CJ)4cU>6|6@_J z1sraZqWP^?HZNVS68$aS+Kjrv4SL^%`-n?rJ++pTSJ35?^2P#8X}hr)`VLdwHfYh~ zm>`MMY$vr^^x~s4c}+FfJ7r_i-<*xVAHiR9P)y|C)Z4fWX=p;MACJytcV5WO{wnf3hMoRV zL4wX64bWO;)vanP#FOTJ9U8Ir11Ib`W}gof`e5ztq2Jb z0V?7gDJg5WAwZ8g@Dd5ZeZXSbO%{eP^-x9fz+H+UzCDfMNre(URhCc^)mIXGK2ASMSrsd z@h`nNHaMqj6>;1!A0yZc4;o6163VUDSveKY%tuL(3~GuJg7N}Uwo2sbWeGx}V+RJN zO-UlO5{%9j)y)zFOJ$?NVxtqNJMksbNfGLGuM=0|YL??_w&H45;%X4npk_9=A^rj1 zY$wv9@eV*QDg1{FI>bim8j#2d>;&+F-JNZ9Rrsp84emwYD)E0(@uIPjX~J@MCaQnI zwFzt{LL~?cU{ql7h42{AI|$ojnP)Ttbq3Bh7T{Nini<^_DTtheYL(+HhFDh;U(P8RjaCwpr9Y~jhU)9B?68?{6Bfts zF~+`R{y}|&Yvc%cY&(z(dY#!~&m7|bKan~4K>VlG0GaIVQN=r%`26Tps($dna*UBsFh{L+#Oiq49YZKPzO^tO` z)m7I@$|~z>!OUIP)Y4FmxCinkD{5-0YMDkX7-j;>!e&b(a6s7++QIbzO5yScvxHKf z)eb^b37^H~5UMovAtR6wS0@`wY2gQOJ^!vF!LOn9)AVzY_nz(#SxEJv25oGX9`As>y$ zV>=8cBwn&I{mB9s$`syK!1;J^(&7Ic5f6jeZ5tEqPU_Q>EDxgHUxREK`AN-Wmfql>UvG=Ak#aG*CZ*QKyrMB+Up6|Z?;>SEviexN-ycaE7w(LyY2Oqrl zKw4Vb^rXlD9`0G&t@D^ZCyFX6YOqC4e(pc;c0M&NM$l5EVm~^;RMSN8p?j#W6-(xF z$^ieodg}1L?4nCo`n_WpemDnmw-L`Dq3c(QUz?po*PYEdR%q}Y|BrM`gx?ZkQRZ2! zlR@6+KP^4|#dJ_RlILe3bNB!gz?4vTHkmoNaZ_hib(g_rd-~~x3G~2qM9E$B(wucA zYT?YMRTHevk(SC94MiKo@rkivVqIr*dEw=vvK~jTP8J^t{vVyjBJ%e4_4MGHhMFoW zsz4X2C7Rsc-_~XV#Lp>6Ne-dGXROxhdj_p0qa@Tz+c#w8!;b<6GjLsj`4K}~uUAb7 zjR*u5SZGsl1wOtCmSW1hsi;UsLy+Kdl86MDccA*Q*dvoKR{%)2i+V5x0dPp4t)ru( z=xA>4#cJTahU|QU9!d!TK&wt?b%lGarePFRgp02JedhLAe`{fX1eZGncN-@?#uqtS0+=r>}4$~wwx&eu8Y zHd(NEunjd*k0>*Qt5w_E!S=y0rjcbbk8nR{ztx||LfrwoN=1qp2w(ZPg-vHB47$E74DCx$6lHkCqa4VpG<|Bt&%`lJ)*-g!?* zU*D8i;vwBEoj55ZM}Q{z9!gMCk1 zS?nzKG-3U9_*)KQQTeUMYxj*C$~k%<$27|#+`Q@JwNz>?>%+GeKa~zTpUqHH>scS( zOFa);Gbr_*dq9%5#^qGmM19VCK0R{a(xpqWzpQ><;I@UvrzD1nmqs2udf>;kTMp!1 zIDI7Na`Bbx1wZxmU8~rCU|_hry|)$2 z+OfB}zp1qrJl>6$5WP|Id1ZNL&Q@tzT=$BL-~b6 z;5U6A+?$>cPPF2kt_)bz#P1=IaU?(cJ$`n1u&(y~@C-q5?!4jf;4^6miJrLW@NoM& zdPgtwP!AWJR}YOF>mgSNJb(6d-0*X>v}AcCkL3iX&mw+~z8((erk}&LIAeh~(0v!g zMkZ`J#6~6&!o|3YIF5Y?kXZvq%wqNFM!Ngij1(mheLd_fMQB)jgN=)=8g~^Be^$vzw zwQ6_$h+xX2k3Kq?J6yYal}b2og6T?GdG+}%$KOe%vYGVsN9JJ}w;gSG1Z%d_0M&de zdj8Q_epZ9V88-Dn1et%pv(@QoEyulJ$LS04Lfr7KS{9#;Z>BhFD~n_v3G4FaE;fU{|VevTvSnm>Oa>?>zZp( zA-?!>VL=gyEXu18DJN_u|y5au-{UKFLyjlu{#W>K`q6|{2 z2O5ERtgLU=vEYPtYKMk85U%OeXh)m^_bQ6?^h4-rQd5O^LtH&=raeG(#W!iKRxgpz zS7EUa15wqjrftaIZR{GcP#*AJd2&flv)Qy|WEk*Jaym5uST^(x8Yni8=g^{PA}ot8 zt#-u0Q^gH8TrMcQ+#33n%1bw7;)$_V8iiV>w8(V+`>)shbfIQ2?9K&?QbVXF=mT05 z5c~HuNV`=Tbp3iah|D-LZxK@Uz&{ryHeTAcZCjy57PI&r6+LA>>DQysIQ!tfI!!fE zmt~?$Gwz5s)zy}qzfjl{5H%CaoTadUR#QsBlXFMY9nH1f!W*3|<_$DeY5kSrBS&&C zoIig)H?OG8Oc7iCP4TCvYRob4GRN8)&U^|ugetD08sz{h&hFe~am0_0HdWV^Ub%X; zq`a!={AKiW6(E&$C~sa~O8VCX`gD%E_~4Jqphq8>J1!w1AmYm9_f~VfsV(r45~-CDBdS>m~`zZrqJAvno$^9gM(b5 zmwRY=M__1;jtr9!dyAHM%7bEJVt`74L)P8DgrAAzmoGv8-mHeIj3OE2W(tgO975nL zpa2VD2|ay5ARuBWlcpc&cmripMu9FAB{_X$0+@F;%}{U8kO5&m%|LfgzY#$liKnlR zf^Sg87^I_NUrn75=gn1@=HiiG+M%Tal4hVLNQBVZa``mMvQ*=UFs$_hJ$)KWrawpK zAK)z^A-?ZUHn0?_@Zdi`FT&P;c7@81+j?T>&Ye4UpRCsTr!88v=-vf)gh)mTw<2JN zNSE_N(eZx%#E0Md&wu`t>N+UT(?7rm1!QFe&*TH#udA|Xag*g14&w#Y*)k*Z=4^nk4^@K8uF6bVa&6)-xW;{ZLUg)pO7 z7!o`bVM7~04GO$H1)2W99U72Sgz4U_8&>OKSb?+#7NO)u{Lc%$N2)eQqW63;I(?ys zRf67Lz0RiBM&Q<$xdVU_f8M4;YuAR3pZM(j>G(Q>{`_;*&rv|`&ht;tefw=xy93WN zdq?g?@(WiKRP}Q}5SAW?ob`JE^f z#f-zjvly*w+v?iJV!te=iERI1KM&V$?XK+=*Y+;gex$|zXukm0Z^yc}XIxvSYd_1M z?C0b9ZGvlCF*nhI$ZCvLgkS^j0C){n1K)OghE!eee=C&0Ctej|AIk%-G5-#y38g5%r zutYuKb8b&t1o3JOk$+8NxJ@4JUsf9lHVaR9+o~h3t(Bs0Q+*^bz*XF~3UM1lx35RS z(bIA0>8a@HDD-p;(IYZLM_=Ut7=6N%Mma}NN)`k6S0<4c(JIkXl6)A?C_Q1T*bhv%<|m7~IP zeQC1;WX(jde`PFr5hWy3@VzB8m!0g#XVZw`;Ya^vFu8pGzpIl{T;pbnYun-4f@B*y zsqIh3P@wC#ZjF}f`fPCRhXCFm>Lj-7w^G;Ez27+3e%^nwAM2L=+`7!YpU*G*vC)$^ zBmacT3AjWgMyj%?WEEAYqJB_Oq4*D9Ma7U$ZhcGkVY{9zY(dRO_L+yDN%7i-7Z zyj(5F$FBwW4Ii?CDG@J-=Dc)O2Gy;irLn3EKuc8gwl(-7OGR^OymzYTm@vHG$RHv| zxQSK#`3Q>^sOa&5Dq5bPqNFG>p`y2bp(3TUKUUE!hlyw&BDn&R5YJlhz$R4{-Znp2zUAnUgz=3$Llg)|F`T2FxIjVXJ@dL zA`p?qOafdsi|M62Zpy%8xZlUnV$Y++9z%;Zbp; z6Go3tfc1^P#>d5lg^f;#icuxhfvg;55Y)pOjX@6t>QHw}okpV@(G8>euO5y$vlc#X zR5utL)TK+`Y3^vnaO+uo&iI!)b|4%~I`oY@LRa73_l@8)m6opa{Q92NVU z?Hj&A_R~jES;q$CLK7VjHg#H(m$Bec$Y9tG7i+Rs2Et z0q$xKNErnS_H7UcuQ{ANE|@i_^k~1@9h`k&-(H2n(SEi3p0`|f6Kmi7_wU2~qSP4v z)d8a1)P#@K^@Pc|iMX(J;w6<2&R9#o`eMxiyfGS$ax7y4$1|EK2r8ij1 zXr%JldVAF-C+Z2YSR}s}2w$`!T2iYwnhY!&K|`Av42?#B;z6s1e@5a~cJeIhVLn&I zC-ee0j*S9sHm%M=^L{zD+3e(dNZFuqb{*R-C%@a@Rdw(tle1s=}9y(N$k9J+}XJ zLCrO4*6S~R@b3@aG79O=rov0tv?(t>`rIpTfX$Eh!Cju}M;o+n!C)U`1&>QUjOgTB z&bO8PQ7HlKcR>W)X0g{>_mp34cPJEl_Z`d*?kaAh_U^m?0ae22SP;Y^u^_5<-A3$q7gjYFU1$gD*^?z#-)H{LBaBp$t_?+U>D>|}u z@5lQcEuBhbsG!*XTV4IEr}@x2q`LDB3>d|T$jZP&#UPxbM5g2ojWzkdH5QpA8SIPo z9?|Z2VMbucsj_Z7ez}+1{oQY7n18!2{de1l==Phl*w5GQfp#I+{)esfyYqX9Yu@y7 zZHHakFxPym`DNZDXC$q4>ukbA;YMO?5F(;`!(INpUVdMrLTX} zCvGdk*R@BpYa8OazrjB_-~YN_sOubV+w9i+`?KqGw~KqfM%OXi67XmHxv$f`&U4>i zr0f3Fe{z5S>-pUG=k7oEb-LU07D@eGd9k?e$-Sb?c72X>U8mPyxX!v;+C_I;`@t6k zO|{0g9pyUTe_iLz5%u#J{$)HM)04@^o>Y&D`Q0(`n|*&76J%fYulD_~aq*jd-Tgt% z4qO3KL*Bb@p7+1T#-HpY z&GoCZ-a2BAkt0A3e|OCM=3alfUW%n+h}V)DRx$2&`m=E}3ct=`+}@~5uFny!aWnGE zcw2^#el5|!{wk75kCiSxVfJt^$qpH3KzvoD7z>u2+3qj z!jwS3vNHpCVu8$42uA}~B;_EO#gQUsYs{=rhQ-hFtFtIjbwOR#P%n%Q4UKiQmhJg` z)vDzyzl4<*8&KCldwEC&#aNR2kLH|i;Q0Eaz5VvvPtHsn4UCVH0C}^*pP3w_5F%&JD>^&i(VSP(!J)JnQQ39e$5Y}%kb{?wSk5j)1+q9(huTnXMJYu*Uhd5rZn;i# zjHUZNPbUm}cz($Fe&?Brx=N)_^Zrezu2ks|nHjl$E-&Zs!6T&>?`iMN0ZfiOHv&od zDg7>LT`cE3U@0$dwW(%yfrY%e%QvL|>gIjTot>x>)UDADvIK+uUENZ4dv6c;;!%^K zw>{@XzmDneLD;scqSr=iE#{_cJsky19zf21#J{fd8yjTlD?U^(I1Chq$;tFwD7C-` zpS!1)vJsTwP;1A9V-V``Uzf3rO`T4{()$fu z4C@F~6`iw;$3{JDDM?YyU`?8I#~oQTpQmK;c_=nWkfv^2b}Y`AfHRURnbA07G|ni? zMP|y`bA^`*a&xmmwr~pEBJU^xvlSl#N?tSA3%hYxXH&@&Q@8Bw5l$lyaxc7{*nHWLP2mP zrBjNL0~9gu$&WtzXi=<(UPTWMDJ5n-EK*X%1!z%t5U|V@0|CdAkXVC?Q1_~z9}28uD9NK{P>2;- z=u`$^UOCF8Y1A3|1M)u&(kYjMeUk91+^2MB6G!6+;W)x1v{e+^Dhh2SKUK}X$*z+zw^o1n^EX+gQ zH@@i18Buq)Nk5#)8b<1pJLV<89g;k2VJHjlG+qpqfzHnW>}n}#qV>w9u0}&My$!^q zhJ2C1>;gO!jYTr9m*5C5;|LjOqa`@P5*#5ZZhT_$n-! zi=Px9HF#xL=JHQ3P`OHl`2mmCo)Q1>fUyswmgGDIi-nRV9z3t$$@fnl6E*1p@cULA zS#wj6*Bq%}t5D}f=IpAfufAT=;W=(DTr=NO+d0D;R>af??uH)C2Se#;1kk>rwgDP; z0GV`$xDO|Lcn8jXoz(Kmagk0CPvCK-P#)_CyrrzK>_Em%h5AqS4;L?XJ2QQRTs10g z_3MPOa=l(Yk$^5AnJCw*l1?8wd2HXlb$bsUJb0#5jT~G%Woa+Day}>f`00YnMR}*r z=bb)r;^f)$mr9C@F95ojC@=2hdhj&;6o6Y+t0E;WEk!|)*G6NXnI{ow8sq@+iaf}W z67gwOlD==C#}Y^*!5xV!-l2g$V6wD=uN;L-0>&iBMuu%R7Kjb_vH#8cYJ|CONE_Q|=9?e4ZT(^0 z#vi^z9_IRuKknGReG|w*(a2pK31_&M6b?WmkH68#sOQ*c;)wa$205E8l*myBRxS~; zRY^a7x#?tn&N8eKzCBaz9DmRJQ~9O!ttOeTx10)&hz$0P?#N%id0V41^v?7>Fr#eh-)zzog8FG2?TuF zZb8`wJ84n%_yCK>ECmdl#DoWnZ>Ru(%6)leolzPISFBmD0mMwBH#=2H2@@wwj^+f1 zg@#5%j1p2@p|^KPM0i-jgp~1otzI@dIWjUZDmvOHBnGt;MpL26#A`kQGB!IZCcs;S zGZ;W_$MJxY@%9&Rzz=Ve23<26QNIv0SwL{#Km|MiiFpSSL$UxLfYg#arW73#=76}= z0(7at>T(#Fe&m4)6xa?UZ&imcbj78W71ys^DL9{ht`M_J25ZD%b(JFel5v|er$}>;D;`foO9yRBq-_Fuf z|KO-7AI?y7by<&J%G|fpuxi`GQ4dR|-22MikcP{ks=5FMma|kNIIT)Bp~;D1&~pJE zk`b+^XH4?=)a2xyJClF41V|ayju?UVhD3}2V64X$B^bycp{1A|X9% z*B-B~Dk$x!KJv}FZ_c3@Br(swkSK{i!K$vdcXgO(PD76G;7ql zS=h-*?U8(}nEcq^?@#m?yik12JMh#DPgQW_y>&w-vT%n{F+1*H=S+B%erMtYpe?%6d zuv{zd?HypLxZ8JN%6blkMKht|KY_GnaNl@t`}VCcVOUNMpQGe*MSKV4Ay?AFT9v1O zC-(9W5As&{_ymMT`pYG-24%h;BAJg5pDpzZiH-E}iHlKrMn{JS`Xo&pKRO|H?1Utr zz-Vfrl7(6$w35pb@)+YFh_9F%{(5r9vAE;0H|{u=+;M$HNpVRjiWFZfExK?%@8UJw z@drwow!5=mV`e$ET9e(ZZEwRR#ruh+a<8D6gnirAqM+tCdr@TsizuXmhR8++2CnzW zeYnGYDt_VNAHMl+!`9=4vyjTShFZ#e@E$Z;2DL5&%3r1(Kot_cA786=2wB?73LJPq z2f`+^eyCq>vct@$4HQQy^9hRx_VwThe0*eBBSl5~s60cX$0Q|IgrlAY0A_1kL`U70Z9%Foxwe`>xg>etuEO3lklz2!nBL4hRJ`18dg z1Io_2amA8plOr)P|9sUf6tCgkxOBIEB`q0%YY)7&C95jWo~y3PCCkNsDS2270BNSt zs5s&BUQtT83$~R(eUD~byY^)S^_(hB&2aAOb2UT`GBT(!$_!U2PSTdk1c=Wu1A`on zOX0&hw=NTS`$mO&dM71O_h1fCClsZHW4HxLC=U1mA#LE|^c6 zkaAtLy9Ego^v#;dgbDfyTy&*Ywl;(051~~&2;wThtDcEq}^q~WrH*7r8z2SmR=pl-XjEfE)IBOs;~T3D$Ip~lF% zAGqMbmevR&hQAh(PXv*KCtido%`ZACA~ZB45b5uJUSL>}q3D38e^_KxWO#g#+;en1 zw8H-qlfNkygj@b+sVFMBhO&WIuU6GmBNL9mvr9{=tE!McS5{tGSzcOpwY0dn^jc+U z`Lz<*n13xQpZHS+*Eb+|^q80l5!Nfm4jubp{Wt3)JUJfnu*h&p?XE)yjvf7e!|nph zp~6G!VG8`UxIB1k#7~Gj>*_A#Ud*p;3VVAF%$*I8jD0|_pA^(;^zBWTFV_p6oeKk< z@KvmZq->{tV4m=9gg1t^_<(XA0`a5re+t+qn)M_ZPOl#T%Zo;bMFgHOkH8C}15QCh zR&nw!TvtM z9PTKnx4#y&cX@D5Lra{6ZmWSjWJ61wf|i(B({BXFZI{_2z?1J8JvKHAQ!ec!N&FZWRx<*>{Y@}RiU zaibI-VlO|CZq9_F^73TNHa@b1{lxX#zuw;jFLy{h5XCUagnTPalEy)-2`C*4X%UX% z4dpBk;Hbg%CiIZ-{IS|cJ|x9*nvs))+874nnZbx7yNb4N-+r_#zPhle zs8tx*-B1zW%z5tl=hx1-Nz=UR>n|=0P$`Jw3`V7EhK-F5Nc24oyYk;Y)`JlL_$hbY zm4>`azq_XkMc#ZTLt*ig`24z^Cr+Fw+Q%N3z6O{twv$lkKR@2OBug0l+ux2` z>}_K~YOtxK^h$ki+KS&S1tZs^=usCcuN|lI)sH^;yQiOj_Sq+H zU$!7i6_YULmOJlybi>9c7Gn)OjlXa4mES+Q_V&Bi+_U!4`_?@4w1Qv2gCRxp!@Rb_vBoO!^z=gDKA|rZ*dI_t7qMH*If_X znP4m^(C01i`+`9yc3FpGC^GYA91_XIiOP(}9(!!j&@rQ{>d>J>XX@P2q+8aY7Z2S3 znA1>k>EVZOi|su3^_O3MdD_P9=FEDST_A8)Rsy!X^w_a0W^2`jvhr?)Iw0vRE-9{Z z>T4P;HoSSY{>o{9xtuDrc_?#9RY^r-!=-bVsu3*I&=TLx^LPfVwN*8)#?F>Ah3Bu7 zo<4TIwxRiI2?WCFN+oa59U+&}kl=g#6E;4zDj7EUHv8}}5(Hq^l!;KRCx{?^44WL< zgKzTFb4C8V7`{+Ixjm@dBNFq51C&T96f&aNF|m1!kEcnG6S50#iVAoTY#tnJw-2>f zoW69~PIGKN^RW}hFElsTmR+r=XsR!{Sbq8P`O=FehPtW?XNpfAIeqzxq0i&>`2)dt znlsq%C^wqBBWv`v#U-WZPMiP;R8?L16{yR)1)c%FC|(7KIcu{yYUSE#KCHuscOnbG zWHQ;^LL~>Qi7zGw7ME`1W{PfqbxMx~*d4Gl^bHZsGREz5BP7mC@pv48GC>X|92k}- zt9W6FRD}6WJa?Ea_#ej7q&Xo`l*Ev5A$3w}R)^grA4wwu9o2&ux9dh^_tCS7$B>*I zig6@o!)hKndmA|$QGeRdYvetX3*bWQ$p5e$LaxD#w1D}i7U72EqTCguboAxOVB1q>B9+PjRS@}VvACA@QlK=N@f=|$l?>w%TMTa zN=J!C=rbPeC0Up$xJC+vldu9qzNOdPjzk*nf#P#Ng52~y7}Kj;Q_P*UzmF3%2a?{UcNk?RWvj_`lyY2@4SY(jgQ*syXR1I@r7f{ zmK|fcCzdTc!77YE9d$IdB9jWa9`(%*pboKIU>o~K9#J$~qmD>SiIB?F8eV(~pUSxL z3*nL8fS$aHo@_!-NT&G)^kjzFjOacBj6l-KO^`TEX7DkB|4c^mrkT8f&rJr4#bC19 z@ENM2!~OLRaHdhJK#T_*rcC7NsE9Xv~K7FCA z_GEP+dD`q-CPg?(J|g9&o<{MZSHxe`CsBtb+QXlJYC1!dFOwS~oKU@XBH09WsS!KJ z$zzaEkF`RlefQmM1rAQu{m(!5{F*eXVduNI4Kz)a4Sae)cI1nJJEk}EvC~&BojV0f z;?#M|R^9#J!y7kkdief3*W9ye#gZjCIg6JsyZzq#)~>ntsa0!!f9LY&URj_Wvotks zF~P2VceH!jNt1vH#2GeKlvfyssEDLVT`~po!|qZP%+MAvZ8E<1zGxb zpvT$m7QAOOwt{8^gvAcxqJ{z_QIiAJ1Z)=S9LXW_2ar1-$YVHME@-6#-Y9tuxCu~I zr$leb+rc29Cqf68%g%rR3IJ*%ZOUb`Xf@oSsAh{`K?TAEWm2R|M&TQ%m9D}s3IpJM znL!o~40^8aVw6Y&Q~z$?TbAOx>4EzK5!GO&gD@^ht< zI;h){g9w0kxq3bIJG|*=Gm*k;TNsT75W-`0Tf$sNyjbK)k=I|TxzTC|YWWuCy>~o5 zxY}{0BI+!sx)#L$5N|u%4SGOxZ(-o|jRchT$hAlw5z*gCO^0Z-DjKa4TAN#J&3d~P zna=1*9w?SiIYNcNgAVZ3^PDyZChWaBB87bsTLap823myHTK$jH}*|>ecvorqY2*2EsQey%P*;) zbX(|VJu&jWnTrhJEsV)vvxUf{gi)CquM1-;N1|tuBUF-VX*>tL^{D?syp2Y?-N-Wg ziZT?WPV2=53of$E7^Tez^jB`|U;i4*ashI|;g}4CoMDs_26rg1wsBFKP!Jx?U%NUn zWs(rik*f~~g#&O-k6ayA3gW6By*h=-2lo;uh-5qR1WVYzxOyZ;MXH6Lk_V6s1dfE4 zDaP2ZVaTh(IdckGW=3)m34-A+I&oqV%kUXoFhV!7W+H@>%Tf14aCm_*jsM@aodMl{ zxr-4iZ$1LBu00@To|q@YdkXP8&%j7c$C!`$=oLqf0XpyW*$bC1oi8dXJbeslw=##r zjIuB&Rp{u1wY&pyJL0L&kr|WEymn_)&t*g&zZ`sxlegQ!hI|0L-^BG@1-Ner$)2~v zCY}6Sz(ccl1aAx8K_DdYLmt<`kN+@AXWfrV>GuN%a}|Kw5i`T{pi-VEkH<))VqU(Y ztn_@*`3vXI6r6&^v#9tAX;4f|Vp>K<642_Sq+&@_46B%YGo12SncDb-l(f{eF%zh4 zI4v|u&|;H9CFsP`Xib=d=Azekq1UU?>sv6!)4-pJp$hlsF5o`r&z(Pa;o|uVq<{)) zx)m0k#|`rkwT=mi#S6p}6VOsD-~d+i`w)hd^Ws8~Vhg3?-6^u3{WGD(f5>$QyuyL1 z@>087eDkE~i3t7;Q)c5$e8ZQcl=-aIm+}xOBc422Nw-5Bb9fY!+53WPLqFvIeW4%n znmikQd>AD*Q&4bFgL)>n2RCxw{p-hMa|uz83cf83QnTl%Y1{ep=Pd#C95&(L@l&Ty zojQB&T+!)*Q>Ts}LQKJl6UR@Xb7#+BL3s@{2f&Okn5WhA#aEud1pNO?U&^B?KVd8{sP@ptX~6 zrJHdjV#AEra;Ru*c#xM-oT>v*{^ZYWCbBfv(!X);eH2d7x3C=MMLvDk(&JZOdF05Y z6?68Vrf$(G$B!SQqT1?_c)WD!any9Kt{%@ulv_MNy&W`_=YZiKpi3_2fcYO_N=l|o zA#~&kgw)V5Dy5@D&YRKt)>dLQGawg(A$esKVBNlQoT3apX$Xgtgntd}W8l@=6A2CJ& zsvU9wl{)g-&-+rO7UQeEZuf>DV#NS%Aq#j;@F{4PP%CpGRdKirv8t>O?9XG2C^<&H z!^i;!4DycbyJVFlo_(TcFc@V9v*tNEuHo|n z;qyk9*l_!Q{XB#u>acBu&%=LmzmRTmEsO2f&-?G)kK_kjuaoFN2P6^b$ohc$;)MELj!yLIscP($w4n7-9({XqiY)P?5@v9Jl8U!8&a!S zCd$kB6c|iswf< z`lXjPXH(zO<3(XZ^cfw!ZQEuhL7p&;TCAfB3pX>h$8G(fJl)o|In>5cjpv5Az0p30 zX@AQo?LVj2hv*g;rd#VM?FpADelcSht3}@e3{Q%SUzS#i5evxqfx@hwh+n*sm_zNL zJ^`Z@=|~i30m~STb|-ZaMh#-uU?LSUYAS2wRhUmi+MhF+NE~K_HWY`6F(SP3N8RR} z;P}8kT@F=3zlX(%lMO2nSPX2k3@s)jLhMImgo&nzv@NV}7-7ztT%0~z_XeV0#|IwB z&GxsD=eWuTx!Iii@SMudrr`5ihNoO@HuYQlCs&i=YNL7(St-QiYV?`h>|i#;kYU}H z+=b+V{x@(*A$dhE$yuM9%}v28Dmw>)3%*Tl!7E(%VJh@A+*b*%O=x=kxHGLbYb$6< zbGe(bVADoPXxv#HJ#iw&{S!JWDVy4f3l{0%pi)Kkdr(QQ$qXy z7cECuDl+#*S6#+z4f7CBNAXz3r`=LWOmFw#mt`)TduSn$l%TUxKVgA2gn7rOL+uqt zNdTw8JE2!UuQ4Jmj}qYe*H)=fz6nVg0Yl&|u=hAv4Y4eQcxQxl!dDSBar=zqiXOKi4V>WBUufx{>(Wx}FA``7h0>>nQV5qzAI0%!@2?t5nK@|Aa*yw#a@eM&yzl@2#F}z87Lo77Ty&+I3#505^ z2?faPA(A**i!f&p1r2t8h)j-_uJ;rVEA&e`I)Te_gRx{(1E^ZS=F+av$kLoQu-^MHwlv3% zZwzB}>7m;2djG$DJwoqawvQ8G0|g$|5o7?ah!4w8I47Ue-iQW%j?E{Z)rx#CCo&WYau-UC$WeK;;D>@q6`!i%f)J(t<4v}r-;Ifkfh%VwV2#P0gjbje~;!P zmBo82VHO;@2g9RiqPIoIM2CXz^Pz1Mi`3v?4t0Ut#h1dD-^7QVaP%&47(wN_b{8VA zla)#cx{S)jdFaDt!rRa*0}Z-d653B^l6qz1$V2w%PWU^YM@3X~q4p#5kUFV!)S9ZRnLkSo~+xu1J zbWH_aWvBq(YBT8#dNfmSKs%*0?e%hj=Sa$&4hLAl?s2M*(pB4!E&|BqwBV%R1aAI6 z&}wmP>=$1Q>~cPN>2hUFc5o_nkd-}B+wn@}z_SmpyywnUuRe{Y^PJA-pTCq%ea9@b z&YVAGCbE90F{}(!u^JGPZnHQ6MU8GqgF%MzM{r@Z=WqoaR;H3G)H0<;rHMf%Qxv-M z%Ws6JKH8&k*Y^k=YBhjS%y79_T0%YYdVw-BjQ*fkZkCS@?fxp3S%gSmQXB4F-i)-5 z&?5#NiXAwJzVK@*FJF4nxr>#cLyxR8_5cx_Q*F&AvRAf_L7$wL#*hrAnN#M^v@S#c zSQ)xxw!w&EG?-AAFVrW%fcZHB^a%ZmU;#v+R;gqPcpah@%9v1}Zu~~5PilIcf)O32-+_L?U$iV3qkvZ;NfI_i`7=EH<}S< zh9bzQ>rvZ4rm4f}Xf=dZNVyMjFFYP#r|OllQdhTO()O_qtXq(4YTq zVemi65BFf>P%@kR!n#Ul?C_P^rfzBC(kCBzVCJ}}9;3BAYTR8fKKINL5J0`Qx_a%G z+bf46(xy(C>TABZ-Q!WtS^c|(h|zn8dYgOyjKpC!*-o-OG3()^v{>N9G}*E67_Bhl zw%g2vR&p37QLPUV`Iv5WaB#q<6Y+#nF=97@ZXeAiYho30u}BdqRm3H$Rq^o>p;#)5 zlp>icK0bzQGb2|R>Kj6lU0M>!!4>>N13p|qDi_luQcx18O>hDU2aL+{QnCtK%=%V~ zt%Yy_(x1FEL3%_T79T53mF+p5B>wJLLi)E~fo|euSFS7@^TQ8g8vXu8R!SDV#-7(U zH8;NW3dq4D>m8ryh`Bues+luau~L)KhHA4myHRf^OPv_2caY*E4V%kFaYC}tDdZ}( zQl=r?apR|94tWV}hAxHl8!q)SIOHX8$ZUa7ds9MM1TMv({v{`9Z)!Jpx(9+hfEN{> zYj)JPo;zJoaQ1ZJ*@ENSKltX;9S8THI8}7=B>E?(Wg@`11Dq6vhle~KS5ICVcNlhG zj)>+)N#VK$@DIrt^ZPscup$cqUC;0(Ty}O+iZ+TPwiF#IZn8yccYXQg(Td8xq-43r z{Ns1285tVB%z5GDmDV<8`VU|4I9^@ZlbROow;kF8vT?+%Uy|w_p`VOhg+TEC>Fd9r z7Xafav(E|qratC3Q~Qy7QyGkgY{egHk9lArAtRM5czx--6)RROyN{JhqQ|d%c*EkU z=@V~CjEKkqkL3wB<=i}R=HexfJo5@ErSWd!$#eRkQth@i8k$N^o}4#L-QC<`?j2Ml zj-5LXBP19|Ub$}FgO8(P)%%ElB0qWKUU}v26y%3^tj(wyU>PWIM5m{xPg=6({eR&CpVh1=Do(T*XD= zl1D(3+1Wvh143T+K%3Lj){eoy+3V~Eoh3ZfMC0-~9F+{NcsKaXZ>~XfOmwWe=ssusEF_e2fAH-{nMw!N)ptQX3U&D zW7d?Zv*+IP$es7!wP?xQnN#M>B~v5VGsuJAjvqvkZXwdiWqD~n0tFNp2>Ltw(I6o= z$m8(@T&_}roqvc=BZH-H6y5z6@ZT-qzgxh6#4~pb_%G3Bb+kF0MEJA7d0=R53e^p7 zG$Q?;NJjBo)ZOwS6$WWAU?t>0V=jTAa2F02zN-|;Ku^E@QsLE>HhJovua7oZJGrT2 z)L#8hyTB~*lLwFQ-1*I}DyzO#-&AtSJ8RYLs}?0nPaVyx4NsFcu774P=~MhUeYaPSxq1x3#ptm7n>%g-ia(UC+-Wzxt{ zpsq?Cmq4Qb;+4uMwF;z;C&k^!*?gP}cMBH>h8jy75`#w8AR_ONYQUqqf(|sx*P-h$ z(L?cq5aNKF<=tfH+o;7;hP6S7wLyTod5FRUN9V%m`G#QPtm$dHiNKi4ojtp_IA2h5-sF4j zx40ft{KF4RmK3vsit>xaC^Z0?398f^0)+rIp*7%q&Md!QN6iY`9*E^u3i=bDvK04{ z;$8@B67XSs&ZYVMe5TA4O;crf@yaXtoRza&I#R!qe)G+IhRZ>5Iu2fVJoy~J*kL~n zzX**G(MU(6t7=S`TCuPNH^XDBsR+%JiXrqQlKbcom zHIL=Iyz;(3sNuenob&SWly&Q;o4|v0nX#qcrr0L|Y|dL=lfcNfGVaY$mnrfb0Su!Y;`r1(S#YJ)bH0 z>1eq~oD^u_6TIkeJx>QBOiDmCK@3Qn5@1=DXp;&7M<&CdCt>YI}j}O$2TaeG>N7Bm2$_^m-n8JrBK}fnLu* zuVXFd8i(!jWhmMY?6BCufiWU?H%b|CJ$`w_;9!K@@8JeGbhlf?%EbP< zVQuF?dgGk^Tq>blnmDNW5TZ zd0=F&lIpP}_v%H^>_yP*InZo@@#h1!4;%>W#4qxN5Q2eMIPk`m@|LEC`kyMz5L8TM z`>oDe8&FD(S50;rp^;k=<3fyzv@QujPgIaIGz9sL!$BUs+Y9XvEX?#<5E8~=x8Bsv z9lLDZLmO9*lX>fo@7(dl{+c>dHlYef#$PB`zv% z$wLo4bmxpH%64KqfRBhA#LzrhLS{xr27>3sG8KRw7KX3=M;2*2ih-xavP~_8% z4^Ct>I8k?^7okvg17cNClM&AVlxyomE?h^4#pp0J3-sW!whl1;b`&%~(7HsB7YiAk z8X~%&XRzN*`JtKeMN&jE1wCwXWF*k1`kR|Op15~zDpmB!2Y=aF?}=LYfWF$qQ>zuy ztz0v20aSgK{4h`5lHfi2(L!Lc;D`R;EW0^6e(}0>PoV(g18}i_iwa&JeD&1{+usGb zxqmP6CCqz#M8V@fKA&ek_i-+(O@=dwFgHwYp-1unZJw*ah{5hp)5)n(ZfkS>M;~|0 zS^wy7?~ZIc{zEsa_@-@t=N-XAFTcEQVs4VPxfT3h{Mk_(C-K&e8)vJLot!D2DJXxp z*gf%q7Z(@|WO`W0Z2QI0Twh-gTX9`=11gHx+8nL+HY=XhSJxWqt7=I3cB|gjCUAiE z%?()OxATFjOAYo9qdoz{P(leG)DuaFe29@8>>Hv2G@ng~r?diYs3-sQz~javv|2u9 z!8|R~URm3(@H;Gh+}QDHGQb-|C*`YM#h>av#iZo(J?5exFOsAqkCzvg2S$11cOyLV zI9X#zU>uArXd9jMh9jGy1}(dwbPuAJxXonLvN@&wQd4XJlri#H(j*+IXAo=x3i@1W)Ku#dPpG zPQhwSBBZe_haSl4j3iIEDS*X{b^CvtQx?b(G$@lH=1w8Qe;FPKcaTFCNx*r-&qI&D z$}$3g`lzFoRFZCY?5}c800o-)VQKo0SYb9`g}DdZ`p1zSYOb~24%?+!@2IUCZfk3A zce>mDEyrA2RHAy(GL?*^?qOX-^YNq?!o+`LrHPt5cewdVd8I=zXqP1{xpmc~wCeN! zhI69Gwx`fem^W`8xI02Sd;R+LoUQ+IZHbcl7{tw+%z?Nh+r_WG+;*&fT+UWSl=~uh z;+?O{U4mIyK(Kt_mF<=?#f?z>+VyQ8p_ zuyZRbx`0m7+v`HLAE&Fmy-i;s3$HIybHI!eIfh^3mOf5#oym?MgmKOgo{$XmrIMfiHrQ^`C3mSZx&F=N0n)l{|aJ3n}#vZMXjF_-K3 zaf(+}#;3}vki(Ei?R|#}OpA0KmCn+Op2()&V>YBOe`e959N0TIOUI9Y<{2uGv&c;? znoT`}uP^vBws5MP)mUs0ydlh)a67XV>ojI*D`fBU_!9TekgS1kszD;PDwMFIR!2n< zhf=f>^NYA1e*RKOvVRzTdmMdx7JYjJeR~9bn`J?Q4Ma0+7om03=&)Mq%yuL0h;Uhw z1#Y$&4MsC`xmHX9*pN+_Ja9pr&}}Bw(BVnI+63igC{O4oTN(`891fz@X)d#c^1n?7 zIJ#-I)op`JidGvbz`LB_kau{0Q}(_0_`KrGC3mm6Z^_J=Icx5_|MrzDLyj+c)eG6w$HJ#4-1YLz8LzxTy`sxP zDL&MI$AYO6xSb%oXONgN!M1Hs);dwvS0YDgQfc4~YQfvHh$suApb6>-hQwgeo6MbN zJ(7|@F=8&XSd2*XGPNL=$!;;*iT(hQ(FU}d&9S&Dq$C_r%OZ$)3lWW^e37taG~2s{ z;=Vl#>!Vz$R4JumC^0fw6mFgr6^W!2P$DJ@f|*i|!B$A4fMNnHM{odKq$*;b+bqEi zAc>CLKqm^_0F(>g0PuO_1|g1^3XULI7{o6F2y*;!sCckByfFqq4?^Dq-?i#tplk(d zP%}jHJOmJsa)caGA`%HtdAMkPJedB>nF9xo5vJ!+Wi_SekDRKh`}*_0Y^Ap1l=Sq* zMqPLY@X}-Z`eO6w#>RYl(V`{b|6J;I)Yp9vaSF^LG6m+YfG_YZ{N$0z0bGM}q-*~O zeeg$Isl0sBq+b4O%-1Y3FeAF;#dV z0FA^Zyn-b&S^?Pu>$j536{v>*T2jZvs5OvdfAy5GG%^MBngDvu2fe0%UQz2i8LsB3lf>_ z4niM{pALVUVW1)Oc?TQ;|@Y4Jj z^Z|&{OJu7rY~Rl;7hN*{>0k57tekE506t%5Td^w zO9WkV3|OrVRh3l^N(~Ks+?X-&YL5FQ@nPSxf}aRanGl-;pE036M1SOif^_e4{2V4v zj%AgDg_RhlU|z#-{z{xA`QMBmvQlalP9{f4D}#DT{(}svK}%C(lVvatmX0{8v#67jkDo7>sSceg zYqa$F`YerQr&a34&p!QZPotVjabW8*8X)=U$spLBD9+MsFgZFqyE+^;6Ewskn0CQB z*ae~q6F~+*W-RfEg>V6AnoSXlB4tt0KzY+3*G3_eBqVA`0S@^8q)}>3LK0$S|IH;r zeE%e<{S^5ADe(Q1;QOb*_tOvz0H@nvPhU5ge!!`5b_{e>6+UY3f~rwYdT2P*L{z7THJ1Z~@}IVS^_eO$McRF_Vi4#x@na`wPJHt5 z-}X_3x+xHMdpoaOXwWy?U8+fI)~vaE$@F9)F7NJa3nmQ>Wo1PSZQFKYcPB&QrewdFHBhH-cF2xz$Bhv08z*lBPTLR{n_4lyz^ zQW_JNkd~Pel{IIIR-s7B)Mj7;1cwzuAzY6#9#=uUk1Bu=OmVroLbZ^(yLvj>T^Q%K zJ`WQY3HJ+6>aOlZ9aD!3n2GInxE$JDu5KXRjaZnLk{Jq~Sqh$63Z7XCo=IsjIq|JF zOKVegMMXt*Lo*<*fQ4FTaJIo}XSE`uI=Q!Z07i%a9Ggf^^b7(8)76DyZ%~zld{1Y4 z51Y~vRARwU1YM060giX6so4=dFK6la2!HRn&p!U!u5zkLmpnM=4FH=3W8K?39PkZp z-MYJRShE-yDT`u;8+UJIQxtR$uw=R^dCDXJ2~SQ<^j8&}I96!krQg2svB!Ti9|>W} zh!g+>A`l$$2Lun%z4y-1h=Yv<1qBTOv1ZP_tZ1;)j&0rU8t~G(Xqxs9(HwxjQNez@ z16b287iH5Wi^Y5*d}E-vu#xk{5=o?p>qUhdfQfTOG6|caQcDZWBTTnwrer2tyg6dm89Yd>#ao zEe&)}1Kky&?H{5V?7&bU5L;NLV-e|19Or&N+~z>O)@t|eb@38n6deth_Ut=Q3oQel zsC3HJfZo=1aMM{uvwqn!)DShBd9l-OUA8cpdK6zV8iyp>?3`?^)MEtDgAoEaOc&S` zOfV4IApEhwL&V@=Jo#90S)}$yMLTk==^n=KI6VQ+G=*M{U zLv1u7Vhq+2fI2{cL5Cp97^$~)6ePT`I;3MbxEYUT)d%)!wFeF!ICyB^-XHeu)n<;B z^fw*GvOtYzF`x`)gFvQZ#@+JZx(Da6>P5F13_00LmSitkzGTrdeQkA51o9>xBZ0{R z52_piy|_!qG+*AkXYYAdOf2C&!e@FW~IVgSnd@l914oLZjTe{TulQ2crOQ`K$&Eb7%MQ`^ZDkq&>(w#94~x-gQsd$h z6lE==62ECC<5``Vq<^nEgzY|X!(d6+L2exhC(At5nkdiIPNQ(@L)dMu3>j~>jNpmf)M`oY%swjaUbG0ILd0htLwD?MGeRb*aveM9TYMZc9>sC>?8)<`wUt*Y@iE@P<7NDv zIaYY)av7eLL|+P>M;@+_PsxEi8R+)?tyIEW65>08P=~S)G>^;j6scM|lBMX)(jX1h zN+6(o?jhjFe6#?oE1wD~@c{Xc957nL2h`VW2;{W1|`4Mv;^bdPbG9I!mnj@wI9*!GvVuLh;AVbn5 zPfw@A(gX3+|50cSIE1-Wfw^SDTsn-obcoC);L8Vx0A1%r7!$Tsf4i}%>_TDDnZnBz z)wRuam6c^Bh^4BuxcUMDkwuSXt_IF#M|)@QfS2-m2Q+OcF$g#0P)~>5Vl+0^*AY3Q z-VC{63S=GBDDc4g6b=W2dW8Tf9s?X^gr(t0NG{?b^3d<|Q7{1c5cn{@MnX~}-q#czjbZg*4R))jyWx)*>4 zd8nTK0{HWm;OgL${4JmFE$g7((g}+%{`kIb%ljXEke?lVS^6F1$lA`JIC;{XIdkqN zez5PTe{lZeDP-Rx(RlhZOY!zNnXlJSUhHctJ%0SSa7u=R6YRA~mM>pE4esR`_@B{L zW99e+<8O(q#y75Zu@oQ--=@9>vi5%JOKJ=6Pp__DvSvBn`T}T{dJI?%RSE=dUCh1N zH$_DVqQ=iz@bGhwuNoUY+*o616HkAbm_pu^zp#4V#H3_(u*>I9%)DjAL$4#CfH!vh zqKz-zM(I#q<|9NFoS-fM7N-ES>Jz-)!o2jm`(}@!P;^wsU$f@c`SVwzq+#xYKio5K z;eBiFn-@*@)|^5if)nR29D6_aokQm@oWQH%cAwP2yRMOF3H%a@LvxLRD=WNja!7$z`0(AU#t zclGqrGO^d)WooP{K3!mG8*ujxz@VsaY5;UcQBg^yrn$B5;`y?x)s6Lf?C~94cC%}s z*JTBw&jJ7lN)TMh$Lt#5@(_Ior3=d=SQ^5n$Lsft_yn69q34AC1blqQ3s5Ai;vfZutXJ~HF3)1X;a3frzC3; zq>MSY-`vkAg^YqOJ@U=Ae|+)D50@%Bc<@_bud9QI?sLxC*l`v zQzveGc@rxiGd?ONNXI12{>`S@8=qcz=aZxa4d9chcO`$gWBSA>Ux4Dul4n1-1O~!y zu&;bg0TVRhm3t-!^|j`@D&)d#(TU^Zr!7(z{`Iea{phbwmQjz*%#;N3=;0xqNER3W z;YY`fR%gWOg}5+&_{%zqRHD+4*r}~8unL5{gl1%6!88C<3~C!0TIy;W8(JZgS^%_* zln{tmO^wYhO--!`1e1`pAHg{Y1@;DMHe>~1HQ<5nVUlD73#5=NlzfzUp*cY>I~8I& z1SMjF)TrW+_>+*JiGf)X@4;Xrk5)sPih=l-6sL-kMFODqCp4f=Abm#Q74Ulrwx1(ND9~@Egj+#MoGXrpCp`YSl1yM-$}K>%T&W zGsRWtbu4;K5U62y!XL7W%obOBi{54>F!v*mF6-mBGbaOA};-TxQm+ zmtUSWE1&zs1N(R70589pGhsfK)o<|8{{29--=8nEn3|hOS`ajzTn3sZ=6Jp^Dq0kw z)+iOQ`&@f7G=mqSEep|>6==&sv?T{^Nv;QUOnFOrGfYs8jrFzFS1z4zDy^xiEGxsl zTt~I)5<5F>&CS4ygANS9XQ(FTb|kRr^(_tdwvJ9V+2ZifZ5>ytTN9ISnK_o%Tz+83 z$y1K5jw46@zU`YrKt?2jA%|HV^pEZPo?8(MB~c)F`H!!?_S#cV|8Tj(%f@J>`U*>X zv)xcvbNGZIap~G;U)!*5)4G=yQy&7n^a!@FH#xUb59<^hlzd} z`4%}2nO?%n>;U*7-vSXpI5-PH<17p@?M zAz1@V<>X{3b@x1Vp2UynDID(pf@*K{h<>gGrKf|^Q$gub?M`NDYj1(ekJ!pA1|v~Y zIV|G5bv!Q6*oAC%@G1ItWs8FggS!JB02`JU?X3AyyV86~!pkaD*u%Wyr8f`o-tr*}NJ(d;&du5Z zytEp;G}dmzTnQZ(YzrnBkjY`O!fJ%@p?3HwjCy@@ixHW=`Ft$*5PgGr8nT)|UPUAr zDh5#y<)FCpIF#4T#=4Z56l!kZ7Zo=m1pEk*P$U#OnR!@=-vR9G$5bBwjx0&-cUwTd zSGj*V2lZSmGW2SQBM6rd=8>Hl5-*h#d?ok_^WFhVF#XYmp)ABVnHN?iahkUQNr8=x zfaNCwwPZrYJW$Ys+(;x!O;CVN#v3if z9{v_%Sd!q#gin+B zH4!_D1OLP#kW(auDjX9Nt5H!}G#P#E^Rt65fO&JVFy~@v zf8k925&0V5SMDE|tiMql@Fm|vHJ zUx`;|Ir_33{JI?cn%dZ8?{qmD%Bp_cfBewCoqG=MKeF#w>6xQP4jn$<1joDy!K7Mv zm7hd=zXo|p@s#Zlo7xLwImMIk z2V8xf?H%ja+HFpod458-%PGiO^}@@qK8zq}t;f^b&m^T1XI5lnObiUOJuc7ksX{s5=clAm!juUK38`bpre$VKPKnYaC#NJQsss!#61%t-uJ=BlOav** z+}Id^85ZmVKb6~ujl_o_f&iP^Z#BExY^~P*VQX7gTa(_^;q2$O7~0z$_SVY!#-?%% zgtoie2R*a}Xu_!NMG6-T4*I*>+6MbwFd*C9fISM2z$%QuDvZD?jKC_4z$%PDMq#V5 z-ePyP+ngqo{&>NGZ@>L^=dMFn&m4eGaICnpv8AP@mimV-&0@JyQv(~UqrKgVkvMwf zTw!SgMxnm6ycEN5siwhbXEXNw^v#z$cORapNL7 z>JEMN)mP`>pi7fVqmq)8N@Cb?U_9(?!$W>P#^Qp5lZ?TbEFEq+#Ad92@`=YcZ5;6Y z?y=uK`PgsnSh;%j>IXJ%%82fTF?Kj^(w!SNY*<9C)}?t!`KkVXrBW)5#i(dgqcsX1 zMr3vzuMun+kw$>y1MKb9=Cg-e{_6))s|mej~I})`l4fp z4t)RJ?w?Mats)lR`jWzfyLTNaIDH1V6UjDPb9qI{xpU`E|Mc~R!c#ad*Vh{n4rDX5 za^3AVHpNPVw-g+g?egJ65s4RFnY4S}+>jk#kng zn6Y^A;u*0#E6mbYt{y4smPfHEW0hF7{KE8Ux88lb-RV}Q&wTL37hk;N=9s|X@Y=gp z&ZF+qB_m@*ERtaqWbT&Y`Z}0fng${?x7>2e)XCYCRK5W=h3V*}L_!|p=g2fM(fpu5 zrjFzd_3?a^Ix9_%)i6bu?DIt@#lRx$;kg@&;bEa^MC1yw$@effr_lXAHpScyUhE$v zR&WD6Nj8hkh&URPwR51;-0HLu+dI4yJzYZs*!bEy+|mIT3@NBKG=u^2_BA;BhX+UI z#Y)VJm6#W|VP34nyjY2Ok>1dN!R=@_R3G^9B#hhp5A8d)|9I)yqsVwHxKOFLSVA&l zD)L~>b+yoDi@*S6dR)Cq)~06QYGPJguBmIbv*|pJ5Wy~rNC{VotcFi_BqpcD@vfK| z2th-Q=XbpS{`)^-eM*%|l~UTStqH$SclCh62kyLlv3G`8eE;25YtrLHZmaw z1j)2W?`k3a%0+n#$CrayafGpfqDJ40=5t zu}mp39bnxd`EdTAAJZXypv?w14;&|MYgdQMWFjQ@>07M`2)DHAA=oxnQ&)AV-Q7dp zP(_8#kOLCjKQM&JKx#o@GN1s%@CZg5sn~W8#)QN*j#g|NbFtil2)mBv^QRC0_~Va7 z#(^PM)oJ7do;g=$2GvT6FCW=|=vWD+M5f(tYHD;g6&4m=xL9=R^tp3auU6I8lwCM; z=1g<9$%QH8>g4v@*)h%`5kkOad{0*`CQ!Mp4}s@#Q>RW%iWoeHl20Z2PG#2NZwrz4 zlo=hZ5b}fp+-W#Mrh>x=Au-*;#L3y&*^4HMojAR+v7slL9g~_QCazauTlh9!(H|s6w2jr`T*sR+De!gZbTg7W|W_aWFq1KSEh@^OTS-l z!bo*GdWM2J)$nki3z1G3EptO3ar=p4;}l+YOn2>NeJ>AS`+X?7)!~G*ySl}NyYvjU zw{T~;v?=YwwD@Es^r*#dF%qX&w zkr_qwoF$l1A+Kb7OjcHOOoWsN>!P#Zn;knpIehf!*(<1BV~9R{h~}cAb237&+=#a3 z^ZVK)j-yA9p1yecSk3uMMkg#zWi`EhTyOpN+uvuu*s*)x?tEPi z%*}sfK7HpO{%#6>8eMWyQgTu;D!N{(tequ+uxGRYP-e!A%;+j_?2eoBeywl%6^qTHh-4*q9q3drj)<>hlT|Ls&n0 z-Jbe}O32NwPD4u>7Ce|k3JSb-lwdQ~RuO}88AA5k*eQm7X>6=&|Ir5HAo6N&(o%dL znPN(rlrPt))SrI-WpTMJ0{ON>P3L#ODsu?1YLk+aV?@I}-L~rLYG*`xdJJS|m69JE zK>xbz2*#kK$)`_O_A)8A-g@iwXxe(3jY>-r5BGZRUzMF2-QV5SSZ^dtWBgEeZ~H(` zckd&=SvP;Cmcr@sn-`LG@jmK4-6SWXCwTG*G1SgzkHu<2)H?(vpBM{elvENE!=s$j zre#G2?8H9a?pID@r$`xy<;ZO55GSGfQT*5xK5Acr>jlEp2?^Sav14vZj*d-)HX5&1 zNnwhV5(5opAT3rX{n)YKK1<=@K6Yz&KgqNKHXl;+MP4+ef0!6EQ7*a7$xi92Dz^{! zwDq-jV=uG&7|~F>1G8!ng>y{^l5H~RhhT8R4uc>kJp`b7h<=ki9!5$#dxF^0201`{ z$IMZx#e6r$wZ|W5YvT@hL?W)wO(s1h5GqAsy@EvfO#y$9`q86Nele|fL_Sz9mt8n} zXy@nKc6|TUSBK7CA#PvQQL?cW6cl36i;IgKj-qoAG0w6vUw@UP{(Q&wFZO-EbLY3; zexEt^Wfdehy%S-0`2r=IY* z2O`tvK1^-WsUX2AQMCj3>M9h#?(c`lQ&iN##&D6-=MO|n8I+8HOBZ}0iGb%NN~a3t z-e3Vl*9Q9W;2DP1%g4qTP1x{lbui?6>g!DcQ~Lcx&B2aKhM4g*W)-fs zR)j0VlmR;=eDel)_hqO?=dP=3Xlz1=T3vm0-AKP?pyBF7(}0hQ5~?2jpzr@Z-PTuuw$>#S*z^ zXnZ1qJjTsh_TU3+=Ti^qG%+!f9vfI4yKVv?E#!$42{lrqd~BQ_R+2tbooCqT_IY3} z=OHE+)=g{{L7fIv3GHTJ{s9-F9Z&<80j#+qnvLr~?59U8wA(sNJ$>k&)nT?e5gTfy zDs-B*Hld6UP9Whmin_YS7DBNaIQ~Z{mJW)I1H}l&QaUJ>4vNX`wuVE8YMNaT9SWOj zPf>exkxdm91qBtAWO6tvuQ*uVfo<xT|0lpH@rib`X=p(F>IS~Jc3mtx+NXz#rsb z9pRWduA-_F71k<=q(%_m?*g^%0=4b}wK826JM4e8C1=ZuOU~~5^0W8edh^d)-+TA( zpMQVo(DBp9kDtBVNFbvK_+19(Q%lPguvl4VXWa$-zlbH~(glc97cMv)rNwxHMTgBw z((Kyt-8Xx7e}DAAiGzD~?fq{1_ODY?Go>9hd%xca0hZrh`Q6uFA^I=_b1#A+ep7FZ z+)rxz`us{|{M0+{xMNm|Qkh7eOixy_Sp&ZNHa@uFkw+f7>!H=_pL+VYPe1?sQ+}UY zJtq5+jhj$vTbViM;SHORlbV5fI^c=K9F!OxJcLz8M~M3S9hFdys%`!K)+#)yarLuV z{=hKYCIJqw$31{0JVGjn5W?HZ0d@o)VLAeSAuMeXx(v+e?yg?!D5J6WRfG>_1aF zx&xq#C$S_K6<;iLI0`R@cYvKcl4HN!x#Oz?`*wf#{r9`F#@;OHYuN?Msw!QHQQ!vr zXorU{QYw?CV+WWU+5vnIKJxTqYaU#;iR=I#H{2yffO;@h^rHm=pUs{g;G-RTXBh>Y5+v@_}th5V9hp|Ks8%!OXq;Q zjqCs>g9G9I2nHfMKwF!|jM%`=o*_`B*LmFz@T&+Zr7c8%Y`l1Q=gysbPF55hKpN4Z z!b*~t*#h%!yv^29Q+E|ST3S_$Y;9LVN$Ew9>S{}q!Nn$8DWdlUU1g~HP*`aiNJg5I zw99x10U(7~YLPY=5f~(6=H`gx@)IY{8{8@=-gC8#{REqsMe-atk*Ra>)}mQsRh?B; zRo>(&YhQTbh1;m5y75ESm(4o=JhHj#sfGr!xzizN0( zp$`ECTyCTgbq6@G+zE8?!Jywe1mGYK!JdLt$${||pAB^(2boQ5>xX#R?`|iGmDAdb z%2Zyj(_**49@K@@m3Y+K9q zs5RW>X!pXthV*JB)PrBzoBF*^{`QH-h@}aY)7Cxq_#@9g{@BKinW{dd$H3mS8oJwJ z*f`Q$F(LUBrOLGmwH#dN=^h+({nFk9+y1bxGZ+gvavUh0LHh(eO00-WN=lN8_+&UD zs0>)I0{@4-_W+NpxcbKL-QIV#Dy!a$dzEc$TqGNJY+ArTsHQ{Ug%BW+yrA_J5E38> zAr#YWW3Vx9;4WLPvL&nc-lbJrX{GIb@AsSC71&@qU zW={DXUt?nnDiH!UN(6r!4k9-2$DqCmCv9L7rVS6ZLqu2CL+9Jo)l^wgUswvwt%{6@{4AjNur(DBy~p=N z?~#y@k`jv{2?=UH%m{fl`1Kw{7dTr*^d6$yu?Nj&)zJ3KZr1RMM%dXGAy_W&a{TgmbWYCj;D zp`N>f-h=pKPJuKf(Rq_0O^H8d*f8+-n(K}nJhk_`14oYTE9!>mJfu})iIkKS;J}u5 z^1@j#nDZwFn6QMXt=qSKi81_Q^X~6z8wukLQ?+;B-aQ8kY8&B<)KGlp z;K8r9Z`<<8w|kEj0;;R4;r!u)+rQp+zDh5S5a4aHT1UadLC_@lUsH z+ph53_$!X~s@e0a>l_!F~ zZCFg&_19lNC5&h70!UTokVC9uX3t%k>?RhUdmgxHg%sXnLY0)YTa2QVjN6`h_@M_M zK*;{I+a7&z<3qP)ZFu~-=U><`hk9B&({8tNW0MnN(x%LuIaAIB*c&uxK7|H+pW}3U zT$T~Arx9AH0(59ZteUplnb~qaUj)D^KI`OB8K)`iP^!2#hczeST?y$O6oNpIC^{8fLbx zsA_m{Xs}-o^BErq4x_^eQ5YWWYBhAU4-5_s8T#x!4M>x0;JiNEqR%#~@F zE1_zcI@HG#C__y?Rv?ZD=fDMz2`woxm>`^n208~!W=nT_{?ViP4CPu1@VY;;FRT8v z<>R;hwCUs14MVK^-A}xu*o^)`8;T&ivP0PEoREh`MJl`lhU{zu6V7}7`IjENb^Tp; zK6qnloPy@vy^FhyUK*%EDtic5eA}+ZWr8A32M|dsugO zOHY1&H&8U-T}7;CBs`9Zh))8-&Zi%LaHL8PmGAalF)@kBG1$0;At#&LU}!CdhPAYd zSjoWaRE(6%g{||cr=PuZ*+iaJ^z4%ozJrxv+ZJZC2?QEV(rmEm={U++07NIYF;GIl zrwG0e%7$!3k<-&ctX_7z8~)o>r8u#`EZf%&47cw3rrOq# z{swH>!Ymg3FgeO#qBy}q9Egy$jz>QvgA&98IvJEm1|^a~31!K!uKZ$Y*@aX2N8Wqq z1eL3mlM}m%7a2)I@0dTpD3diGojFxh0i5DS&3pfVe}o+DarcRKr?c(&9(0*eTIlE2 zuU)!eN!F4q@oTSAuW039ifgkcN+cOsH=>h_QsSnG#X2M|A~GqB4mIh5zl;>9gl6O3oZP5WX93)OyQUS4-xe15?Oe z(5)Y~dVOCaWRe4N5S-oTd7o5Q=e?iJzQjI78w^?OQ@or5)z#l+V+lRQC37PiS!X}s zzx?YB8-D%!Z1^5-rqoROfaS3p7GFPWcILcAi>ceDXG}~9naBfgZc3qhn;WPv`T&BGl1Q(2YBnD+&h(WFZb! zMY@=DkHvruFW=7N*sMZcWPD6~Xqa3Pr%*tBE947kA7805;f$7w@#^je_X`yd_29N~ z6oFDY+&LWSDKwHUb^21$f-;Pp9kMZsSs2B|7)3w2qPetgbhrVo$qgk1d7o}AE7-Z~ zLK&0>Cok5z$=PAx8+U$wV;fH{w|6x{m-;QVY~TNN)22Uvhgr(yIwf8>>PH^jckoDe zYfXvb?_1vaI09DjoNn6X@lXq0*uHs-sq@zMz)%X6@*fv z7jCeO(FoLZ`laivyc%G)Xn3)_s4 z61Az=!iHbBVnuei(8a{H7u9z+G}gf!)_3mYk)sX8C(f0Yw{;#kUsKZqgK~`H;4Dzm zmpp6M47Yt~XvRK=qoH=od+!~pX}pA{1hI1Yr8E^cBq~R$&YY_l?k+2;sx3No;_%yV z9XNQV%O_C5V^F;|qP%3y*HpD*>(j-=iJGfx8#+c@iJA{i=3lJr z9cpU~c~_hJ&yzZn02$F^A}XdK? z{rlT)UNG3ruD;XZNW7`=6$3%|@mLNCbk)u65MiksFNPd{bNd`DZmXAAd~2DSf~RGje|Zet41&FhZr^rkj$( zzCD}`N1D61I}gKSNvdvMxpJk!Fb&ojH6s+O&YY#>H?7R3_ELMfx37Y(U1~`vDK6G% zdb5`t4DI{cUEHX+gLDEUFR_YqWS>5N8xnLJz~s@HC^t8s{^-4Tc3^Twsnr6H zWe7HWqlc$Z@893*iCwW`MVzN)KOS|0D{$^$*&^mjFS^64@2FQ6&^sWo#J-Q}li+|}tyAhel;YtU92 zp}H1*0Z(p2f`x>uQ@NUIgwP&h_l2O|fQ~l#k~ROsKB@L%aX|~zjhgVb zvhs530QO1SKRG$*^}<*M9~118l#yzMB1|KccpR2Lzejd3>J_VQ`qdqGU}hpP5Bns( zmxn-1pNy5oB){?k^$_++XU}~9^;chEho=q=#XgA#ZxB9oo2_ju90ws)<*u|zN>hJ7 zPQwtb*e5mAgNKzj)H*wAE*6&nPD1W58+icz0D~BY>N41eEc9UQd!?rQ+MIa$^647+Bj9!-+oK|MH@RX+zKEt0x#27bNX~GctL+lRY~m# zAIFgaCNfT8Ri>n;PY7}MoIaU3T|8Xy)y{8r9jel2vk$Qkar251SQ@L&tJDj_qB52& zS(2z$+fkKMDho@PlKK6?gK8$y%864t;oQoqQdg|*(or!>@B8(y?_NJyKtLZibMLu{ zTBeP)ESnU|AJa8LQ3#`>5KKW}HTAVN_pDyI(!#`BZOBh4k;;AMQEbzD3??zC7ojlL zSgr807}dtQyvC8?5j_b*=8MH#OusRs!#UPofrxJx6Yp{iG;s_i1DLN5V7}fD3O@h}Pirh| z9yJbhl~$KuyjWa`K!VmLL{e4P)Dl}mQ)ShKi$z5cMJ4&CPM)vAq@JSFIS^s%gtnxs z5Bj=(99wj~#7^B^U*FtIoJ2-PM}`bzJwTn-G1EE>!tk(v{&i1@J}NfJ*&?SahQg!c zsGf^Ew?{{Eduon+_Qe-FcJ2D)!;e1RQwkS>DPX6bv9UTFPipl7z~sPoW0EFkXJ;n^ z{cjY5V}qvM?QvS|v|P&a*kY86I=M<}8F(?)qb2EdRR zp;$uXA;dD4s@r7;dv|YALLX~4kD1Lj8z8+A2*OP3sc1DDo7;x_Ac=dMicI#h7NA@Y zm|X@e4FgWyb&d56Jw3J6mDP=%Si@5gjX#QnprG@JKWNmDFZ!f==fQ ziHPuGsE0-g_|*Wh8V!qQst)t$Jw`XT9z5UULH?Exl`*DKBB8Z5O^qsHo@y1g@?l!?2I0Y#Kz0~*qF3f zE)x?^8!wa8)zWV#Iw^mp|0P<~;zw0lrM8 z98ju2VMZ7&NiS}bz5jj|`zrOWmX8B5wjx^P%qpGk&O58>>Sn6L^@Kr(c=*H1XY-%` z?aR+8Ax8fA5>D(&J-FCcUnR!H=`A{V9$A;Lglm7U7(<2rNA#z;KF}>Z6z9gij#>Sy9a?%%nPux&# zK;$@FTy}KqL|#ict+zipf_R5ywOVK$ z9&W%+yTcxunVC6nagt7V!wmv~n^Zq$_DR)D^o`d=NtFNn>eF{Fjg1PQvu53fdzJDR z9$SC^gZKVnIa2t3fB>mY_-HvV+y~LQNtPF2jfhXnvt-f^!$)oeez^)uZ_5h zs^Af4fr}s?b_fAqA`QnGJ|RS57~xCi&h^^tL(&N|rT|GSfxr|dLmlWG9VI3r*z>@{ z1{>jU;&z`rSxgK>b{9YxnAD@EDv@*HOy1GMd3ne8e}AT-wYvUT9-z59_WtF~%~<~5 z9Uzpm*-WEAoP;KD%n0GC8Sy zAK{pK1GG#C!SGU87JL|13;>SnCAxgbDue_e+G~)RURFOU3|A`+rA0?Jzxg^>YHups zv-$mh{Oz5;N^KjNAiO8b8hRQrfvt&+!oXZ!)&RulVnT!X-z2)V#zbNK{R zw05f3Yc-gWj|Fnk?qOL#8R>dzh+`J~7{M_;ULs)#+67pK)yYh+tFCWBphZ(%U3Ga$ zDd40^PoKn&p?2?{edwWwCj+72=S-Xj*^zhg#FR!R? zYpz3Fcu940JMmZT!k#t-TT!dQV8w16<1>t&^ux=lAI3+lT+Bjp8D!GBMud^!YTwA* zq7$P-b;rNk@#WXw9IdSGq9USVUA?u3w|?>IaRiK{sMX;TYw!8*b+CpGBeJf&6@j(t zW5+Ib8-?&Xi>6Gi#~8`{Obx|@UzFv>TS-LYTNqYt#M4i|@ceTRu3otyS*?;L&0T)) z)6YJCFZG-@1-5XDC^S7ot&WY2O@NsyHA>{LWM|KgQxdC`o>r!2Gbvt<$850+B_oXm z`;OGRMSQ6gUXdDpl*|}xc@e8L;pSe(8Qe1T5LJ`@mSCp3*BdR3D>E!XO=FU=b>a^%V zzurER3#jgFw1pAJrDss=#ED4=IiX#&7HSe?n-ik} z7{|e0q(c-s;5ZBS`|)$ci{Kh0iuPs9l@~BqUIf>eUsT?X@kRi=aR3Xxxvc2?vAuhC z@7}s?=dPVQzW)AP(aF90_MIxK?(XiYxp)q|3W3j9y;am-wexhk?w)}$)96TFZ(IKm z;6wEz!~MV(M9>>xw7W?lF6<)c89@6GdNOSAKQTDU%VKM3-^EZ z^^R|M?b`Fr_U+pbR@4|%GbV9+3;zD@Uk*2z5|Y!XzLL*2f0z#koq4cL^0DANUKi_j zn24%E4}VwPI%yUnuQ5Wl8<#G zqlCk0vl(4Z2Rz#X$l$PoAl4KpN(dGs2wRww;2@|B>LIXvSW~p~U{FDHl%0rZ9?@+; zGXmrxhl{WYn#yr$x0y35jtPCuN)DIXRi_;2+P^aBw zu@56@kAtuxixFjw2W5!jtX_u%uws)-E)5EKyO$wktyHMF%6p9>i| zM;MLp&cdp8`d#Mn2OL1s${Nbg6r5`{6dn7!)x&f4HFb4$G*#6#z|PiETU%FKg@9>9 zXNTMEdcE7LO7%y=@g?v!6$4y{5kb0^v``C~yWD=wm`+V^loO`yYPx*{7d=_NgQy7Ne!4T|GdT?*m_S_*5z>hY2y6 zJ$&Iq4?gn9V;k?imvR~rnWL14CIWyUP9_eCk3>+ZhzSu1M2gT5sf+}SsxjlC)l<5l z1#mjYkPL#w!GyzOLID2XRNn|!wzkfW#)i&e#FiH#o{;R2w za3l{-DkPlWe--+dt}Zz1>pI(zig5@=PX#ODd%OfamIs7;z%r^5Q<4(n;x%7x-MVf2 zj&F7&I9{PqsKpMzg0>7&QcbLeQCY|O2lS)vjrZSs_njN=x`$e+RXF%QIwUPFE-gjP zcNrb<6|n*30yq+W#7zWriM0yTfZl8c#I_N>VxvYAqcRU$tQMS;NA!r^8qfnwPT?3e z<7nh`nh;rH`@vIjT#I_yzM-MvVqsHrQ4!IiLZ%pvSpAGRQL|;wzE3yr#Apa3J3BKo z3ko6`vB`1AnmZo<^-Z9@(3g_Z+PZwXkAYT4EENhQQldizo}LX3vtx`{uLr^yQi*}O zLRcVRbwpdQv4837udC7ZH8xN?(GIs8@ZVOnBSIvMik5%yId+%A$by2|vpYM{4z7}G z^obuMA*4dz^5w0qDJecC2WYT-juzrng?8w1ZZ(>=Ft#f#gSY`}fJ&XLDZ z2%gt+$y*s@Kg1;AX}O3|A7*%8@91d9i(b`o`AQ}K66%v2u>|Xk=+gbYw%ef1M1T_% zq(UN6a4fMhbgV2J=sz#fX%SiMa~vQoYlX?@&L!XFa@~b!|Ilmzg?vbR0LS$i^|wIp zxf?cct1~jHF-AP(W#GD90KO6;CJl>f*uaQ-I<{=<>Dju??;;|UbGh;mp)e%aGCo!t z$qU9u%lK#+AN|)NbS5{L4DMItE`wZ(wE@0!$Xsh<|lz9U~8JAeUc2tv{3J&XCI^x$}`r*)>amGQknK zpWLzJQgzJ|yU8P;k;`w%#X>H>pjG}ce2_fsf5-TU2(Cmc;xdm1^U38&auJaWI&v-g z`l)r6JoqTNsLAF0HGA?;@`#@RE6AOYT$-<0;v@2ifByPsV;cGOe|t6_B9(3m6}O;;K7-;rmE$mM2o(UA)V{_-&wT*iU?BSNWSOzzr$GA5U|>>Kh3VRytzj@vkL+49r3{NI$hyk-7jN9%)Q zA42Z-{iJ1=mpDKk!BA*n$;8FKw2of0#9zpBedMx?Tt>-d;F=};ts+bk#wJ{(*EDSFc&d|DHe@|9iOpGJgn`kz!qmu~Q*Ay`&V4 zZ>X_ioBrEdZo%z`+BX9;dpw?U z9_sobo{|`ISusxlBR&AiMNnExc&uJO>_Tc$j>jX`hQg%iLau1oG7!Iy5CfXxIhg21 zGZ2eMtG<8#pC5c(P}0p8%dBn1Uv2sHDC9#V^x#9CQ2TTbjXK%Ad#m+SYHF&WzjQAX zoh*Ypq&g$(?mO>Xn>jVg>#|E?XRf>Xwk%A0ZG=z}g&31CIec)JE=`iVppEk=lb15l zatXqAxKedea!Q<9EJIiZ0+4_KD8f=i%&9iaPShwqA$-UAfIWgD&PH+gO!Sxm2MRq< zZ~(!Lcyk=%%s_2`z7%Uq!LmLM@+{B-ocDm34wn?D!R#!@9XKCkf%aLT{SweC3$*w9 zZCsUJLLNYFgEZ4ziDt;T>}2AbEO37}S08!*gO9$sP}<3t$ZhRKUv2&T@agu(JS^W> zLOUI3*D`4Gu)BAc>1ntMaQdru+bk_@O#D^pH#0VIE}>!cRcI&{Ya*C2#U;vkSCZECz&TnyES z27nH^N?ImWjaaO;4kf%Tpa91Mi&wxV4t&;RqgJebBHI zg&=fufz~fU>z4)RIG6)D9=5p8yayUWd)Eec}dwrFbZKylM^i*=>-s$ z5jtw*Ozpz#WcBSeZTyMAcFc{8Q97hC35h9T979#v^vU6#_QS`IpDgb65O3+%xaX^p zC^IMe=(%RZ3kks>*s(Dvt!jHLV`eIP3euYA#`hX{?qTTlj4okfa&3LLH+t^8>lV(5 z@jdcL=0xlia-tVch>@}`GoG>0w2~}xp#UKG9a&43I)}tU_08hlRcjB%Q%~4WBq6|43;sHEHUMC`U}l2rq5o)G&6W zq;TQ{4QuK+U2wjlvhw`#6DON-QkV?otkY<$gkkhzivhY~L`!y9#4vIKqEy#RhQn$b z8`SrA_8D1bs(?4z(KCNRYB<%=+F%e2TNMuIh=(a@#-!G^L9Zxs_Uz2bDJj$E&6^XA z1G{ztPZb|O9R{mu2_k50z;Qw-3)Hp-w_6ZF1`GKqFcrhpqTHz?uFu8lDHgP zq1!m@<3gJ$;&!wdMu@u9O(T~!vJ6SULqa^Bh6XfedIuaF3h2+2 zgGhCAYeT~boUDXG4l>Rvf$XOh%ft%gJ85ts$$#GDp=VWR{DJc_Y&YVNdp7TaXv^=@8Tl&p6pX}uFoVLwdu@6-wRo2uj zSWsS5lSGO7YHD&r`i#2tNZSp|Zdu{QEV+3tpi~w8Y4P!;r5R~y{gg8tyf?&Vlcyv{ zQFK}w;>_YABES>?qO28RO9OaOz#+Z?L1-Kt0rE(D$CznE4;g^02YjJcq#~J^_>@Qx zz-0ZY^dOlFluqZ$yG3Hs6NNyj81IR{ADMHA^oW+PHXJzc$xa^MZr{2c{g9D<_rfg_qX1K2u%nm z6<-~pA*~-Auvmkw7x>#Q4z_)HaORKW7%oTKlP#lwVr**GjoF~%X5HH&u%6V&$jC4RoiNE29UiG@+ZW$o=t3@n z{)UR8GeZ8>Jr@oh$lv|(ho5}%*=k$4dF!ukT~000CepMtIw@%)LKIVw z;AMEYyWSih-_bET%7KT0CI%^~lEr)6#kOI-3bLLDbJoiW8#?BQ8DrOj^IZ({X6+(~QWp;Koj=&^|4XF_5qaFx2*q&)>OdAh;k>q$DJTxImFbFD8 zKLLG#`r?k&K%2;zmyF}GJ&l1)2I0TLjZDD^|I7S^|Q8@=q|!!_Mb7|c0? zO$CRUsVOmHhA~*REN%{`OnftXZ{qT6(m{X%ok!FI=-~#e8ax zmZUiZRynpo@!{vxb#UMI>Ne~mQz**$FR5cG&cPqk@uVYjawahzaHoAdrEQM7` z?9@hE%{G1L(;oT@A-weC+6V2alX@X&gqfVUNC}@X+^r&*EIGO%V8o zn)CP8)b#ekAG5Zseb`MMKHMms-YO-*+1CwWoq*Io!T)Sq~{M7Uq z4(pLcCC|=Ux;&H0)F#0s%n41HH76cG$Eoo1nv{&(ZS&_(ik3j|_+*-l`Amw7=L6-0 zw5jpjH{aXkeBj1%TpS_98=v-Ajb75Z^A<1^ANL(>M{GV7U8SF zGAUMITUaff7Y*F_j1;O1xxwJp1SF%*&c0!!c(B?mR4b`TEcUqj%Cs6T*9$kj5u*>_ zIyADuW9tG>3mY96i7<>rI7T8IBSGv}#Ao za`;J!iI6ThJ`@-0Ml<3T!?`wxkC^QE_|9B$M0|qV86OvcQ)dnbvJpr8T(JPLyFeHg zktaE@>T4+^we-)0|3V9%MGHt)*Jsg!|H53DYcyD~skNZ3aH4j&y$-mQJ8gCc{Mp9< zlml66vzrlPW3eN5n$=^s!sWzGm}BFUc>40~+rBv4 zG#nlogLF>&_UtZ(ra2RSD`p&=f#ubYOdEA|ZMyz31HzF8N5>rOsZ$L-Bzdp7x9U_` zS+_-qpnJZtwTuxYg!*85WZ`jVvzWNyiF21N&Ax8VoLS3mSUx`^Re`7{PuPST*Wa`# zQ93qipw(&F%P`euX|=Ni;0b_=hu=9MF5;sALM2nmm0?N=z_4d0sl_-ci6Rnb!|#H& z!e_+B3rS(-@`WtSsEW`qKFEY+h-4>xE5dFGa)^*aGUK>0**u_+Uo(Kz3W&?b=?e+Z zgvdfkA(00UxT;VdIKjSIoEZNvr;uP#;<{F0$7ONE~H>S}s zxY%Indx4i{qa0SV!)XBm3A}7DQQ@;eU=Q&qIS)`nS`mDG0KMQtqJjvH2;4*@6Cnp8 zO?hE(h8%Z$9Gifh;l)8g0xbrQCqe>+z&ySuC_U~AN{{<6i?k_lzaJSHwey6408gD7 z4?f`-8RV&iNL?b5_$Z_o5VKkiva)C)JW_Fb07N4Y?}mCxN*Xc6XsLijDn?uXF!Gt8 zvy{ilLS_jm?7{1FRA|_@U+pi~aV0WoVtN|TpS{h67mt7Q`PMJgQ8PDr;p!F3uS-t|a~sC2bZBZ;cGfHikI9H3L~?;C>(;HCha`WX z4D=FmO=9|#jBqZ_2s3A4PtA}TU@w6Oywb}BqxAyG$b zqU01$f^-E+g}`NkFD8*55SYNmt0lh9NObBskcF@=cTaAOjNwhhs33=+HwKANC$lOLNkt~Wy?rJ`F;7DFs-4?5(sDcfRjM9Yh4Cb!< zyhAum!XH#b%=U2LZyNSQrD-Do`++xc?fs3lv9Ywd3xE-A245_boRECo>Xj?z&ACM( zoH0de0rswu3R5S|Td;6?LI_|UxR4z}c(EtQIU@tw2yiYeUaLnDo6y^9=ZE5a86mXy zG6_5vFn5I(k1Tz`9c#Fwb~xF1cy8OMVa(z6K5XQ%dwL`qxamtI!)JA}7 z0@pDlgsgKg3^DdlIK+>S0FfKx`l<2gH#iEyRgfbTFvMHJ2B#heBaW4yEDIG}tf#uQ z!iZC+B0|oe4`IYZrkNARTvL*vO%s|@Q%xh2CXFzdsZN)c3r1no3d0f59imj?Su*q# z>v97Fog`zV;pa2nic1mCl!-Jnz`!IRvQjLSa=b2z;-9Sy7eeL6JsTN$mZA95Qb(Os zl$JZeV3>ekNfZw3xLo9aQ865!$IU^KI=@s3NxZy&ME?e9jH=>Ls9WN4k7cTVT;Mu|Ch~UgXk#Cb$D3eNMWVgaV z%i@A63j14+mVpA!hddwt_y5#;DGl!r>D6`AmKE2vwYD}?Tr92ahDWjzn_?^usew2CI(LL z<5AbL{ofrqa(Mr~-Eh3z4NJuBS}C*u7C`F)JjCWRiQWjs`rBHm-5xi8-6M}Yl9ed4Gqi}W71PKN9wbkE#ok*JyBNA?^M zB4UX<$>*CCct<8EGY`AF#gNOH*xk*=J0ee=I9CFn#Il;|a*`wQVr5ljLtRY)Abui% zHD)r8VYg=_yu?j1bpn9j!bFBp^%s!5u?Y!rU9zP37*}V}{%=3;K;x)?WU;q|z0uj& zf~*cE6dS6_+q`-6@jAmrMkozk`@r4zJ^VPVJ>u7Xb3=w)tyz4>V@s*G5p40gKa~4z zZA2TzPv(R!zU7t)7?FfIHzGmbJ=s%COr*s=M#2XAI*~*OzTZPV-90VsNK*61iN$XuA6=kCRxGU3>PPEN`l>IeT#T=Fhej`fX7WOERa$Mn>X* zp@FIo(0$n8+60>U0a~SkR>ZS!GH8_wT1^J6R8`2ph5SzKbqyU&4V9&^-zx~TrJkUj zLS*6xqmuIMHV^pjtdz55CBr@(lLQyHf3@rE08h)`vSp~bs+bX5>4=$2Z&J*~_9%@HJeRbVHZ*?<3l;BdNfNB#cx@dFY z2P1B8D&6<-Ki_$$?Br7R0VHAjBj=lXbn!Z^;!e1t&y7W7dQnl>qC4*lt`I)@CPLqc zjgF7L@zFOeR%b3p1Y;OAmBBuJxt8i?f_CFet-4n$xBv2TepjSxmxIL=?}Mqt|2`N; z$onM33VEqj|L5=1`^!}X%Vh-0`AE5upbhfM{cHD^GX={@1Lgb%(1>f6v-!(41k0rc z%9;Q5`_K#YukP}44pI&;^{@FWw^QgZmme&LL`#G&XrdN6vj0T64*&ZO2j90O@IDQB zU&S@wH|8&g6juK6n-eIfB<1?ASq?U4(4{h1E;CS$CFNSLSuQ}&mS8zWpq$^YL2=D; zfpNbWEH^7q&Ocv~8fhd%ab4<#@$=OH zrxp}!3l@wD6ts|n?rZiA1V_29%gdQaInOoA!9fJ&4g||VbwFlk5-FE_&2lIG<;J-L z&UmDp-$yJpSS}G%A#$7KdHz9e@7sU)@S%NszeTQ;Z?7%4Gg1xUar)cR*?v7LJJHkRDq#)>_EO#R9nUiin^NkrSznY+W)HHW&y( z<}&i05u`;M?rDXj>mX7anKmSdFXr%(bR#IW(PkpGRcJHu$R$#nfADzTVL0k=)1d!G zr0u+dIwG}2z=>mceU{f&1w{5yk?1JH7n{riBC@S1DOQFb)0Zb9B70Ft$f6*x76#{v z1aBZX!V*8P{s6vw)9B7BXMyizI(2$0dn1LFOcw11aKr6}4oXi|jhcH+N9^D1E zD^dwhOU-0xD=K*ht}_^x<60JSI(5TYG9G=Xw^Li;*LbOJXk z(U@GWdoK0g7~R|)PC0eE_VpajX?Ux>?&o*1Ahl${zTkvF5GpRC=8xA%Ja)(&zI;?{m_Rwe zCqj1{ve-M(-EdMN*PZ$rm4!sq{sx*KrT$2&8+n#`6hDby9qH@OYU7T6Mdb$D_G?rX z5~ynK^Qh|kAl;&<8UB{dhcDny=yqAlxPlW1q#|_t3UvE*pr%Eypr&I%s>MJ#7oeKr z(wctWD%);&A_Y45B3gwlh`)mmqOL8$4lc$VCLMefik(YZ_4B%f`S7+wUHMt;J*Z0^ zsB0R*C&D@-qZ#oa^?Op6?Z3zjxqPl&*2D1?aaWLHSP?#7kYZ#-j8p7(xHY0atcai0 zclnCw$YSq8n^u$h4rj61s1J;rtO%f?zmbzg7Cc!I8hDQ3w>_lRibtt+|FV^2ojvkn zE7{~rTKShiugj;x%Zs#fKKc0zdVMJ$P+d~rzF>W~Be*2c>+Gx5M|{q5D9qqz(Z2Ph zeVD=2VG8~E1yuayb7#qT zeG0Tr1wNnzmqH?KeEYd`Hv0s718BEM`zH8vHoKCQWBy>qOnrj`4vo~um@)=*{EZx5 z`R(^pFB2!f(;JphvA7|$+=qG(q28}h@5iWj7wS#;?!dR(zWVHo-G>f+cl6Au!h)mw zzT0!I==6pChYoG~bo&>F9zq^zoM71L$b>qHox;vXs>r4I+j4e2(uV#DIi{Dh3-S8` z_GV8Pc#jA zBa#ynX0q)DHaR zeeO%oKmY7XAn(0TeMz083h^nSj`3q-A9{#-O)LE3i*L`Am$#WW3CwNf<)?Oip`ENN zFF(9x%a#M>Tvb!FA^wGPPWoD+S_>VriHZwC8UYSvunQI??bJF!w(kDSV zl$bVi>FOI+tXQ>f)tq&!moLv+H#a49;>4*5FFn2yz~B$v^Wf93u0tzw{LmV?2;|N*LU3E_aXgOD~O4Sk5YyT8JUl(l!nK~#KvYQL*1KF z?Mi7%EOJUaN6q~()OEJ^jOwk>J$i(r<`(?kFskQz-b`3`%es}TSFgS4)|+m;?XC?Q zZePD<{puS5tiSrE4Y%ET>&-Xck8}I``2I807jFYp;5|_DJ>V05fXt+u@%ac@O5efv z+xXkx{hzn-f08x)pZNTP`UmgXXaDdAEODJyh(z$fXB7%I$pldP+H7bHewS>L$>D7P zquZ7L%k%#?;44dqP!tWI1Dc%aN#X;t7y6g0?6v>#Dhp#ZtOC?-<6UN4{ zsmNhG37L#zST%lEvFYq&HU(=l)c*t@T5KVnk6d=iGCgtiWxC`SEz@f(W8N=V$FKcw ztz%!Yug_=q+3;=hnSGL|6Fpj9HbaRJpunP8@hrzg=9 z@JXjbDJx~7y1{SG;B%6y!M%|Vp~LZsqJ=y^r^!Ot)fITnl7ENST!|M+|1-=;5M)O8 zQ^%-pz(sdbBv0;F)aT%#`>504p=Uvf0_p;I=>??iF2P^U;d2sq2dS;pHt^mZ$i4d+ zFUZbDuFB4q{33RC4R*)-h5YWd|0%zt@{p-H5B-C46=uW<>J+|@ff^V5v^Yy0hx|E? z{~g1Xd=BIP1^E9deDbOMAit!Q|F`_|-{7Xtft$VrZu$hprSAbZeeC}UZu*dK$T#fk z^|kogef7RZpU!8%XUJzniaZB#(ZbyTe(wY4A-fLNH|T$&1stdi&l@h`#_#{kQKi-+JFHU!pJBm*h+LrTH>^i+pQ*xA^|- z`#bpi`@V;K_xf)2#reX0(Y{dMvP-zV_#nmXU#Hii2NGOb*Fra3Uv*o`7EwWksbUEg$p<@VcgGAazvS5ACUz z3gTYiY(v2l%CBWtp?(>pF(pK<1m8itV1br1jf!M)IL=K_q^v|S19Cgg&!Tpsewmgt zK*<=I=g6YIzzg$1hk0w|7o7G~fYkiVE)_`fZ{)8-E8ad?U+OgI7Epdq*{#cYLko=&4ZrcYPMsi!SlC93JIjcy#f-)MWA`H=v=v z+B=^0HQ*hO{p20u%ieKj{2g~>QLT6fz$*jqs9p{y(I4NzM?VPO!*~?w50+ysblJIx zB7K`!f~JwnecGT;X&lNBJdrq1cHBFOyZxIzI{-XF=O9G&6?A!qR(SO2i4)|T;qH{D zzXz2#-PjDJCI0!CRu~$}vgDiLKJOcQ2HHD1X+jnS;vv)s5ZsZG+`AVqe{v~w8Aqw_q=F%1 z#EOUSoe>t3_3TpULEfacg%|gSOrIXofB6qe4csK7$p~amBq2v)MSy6xr~whhfjjvn zSH#{(-D4ITFD!NHpKa`0e&36QC}no9By zUhV!8pNF5%+$utH67iiy!pgPO-^k^!<1=@BwqHJKeq;=P5H#%ZewY$}@VkGVllkfS!=wz@A|8je+JE`6%+KBgAGBPKgM`>~ zWP$j1{6R}4UNUokP6IM?sn>DI3Lqc&CvL{I+XkDu7R-*6@mtbgA-}@_{A1gG{+^%G z8f3nN)@h*JQq1?PL@qTYk*4M)auAsWTPtz%!v)Sj9L4zy=g(iLtFNo8|L(|^-Jgk} z`wV_9WA7(0^6dRNA;{xOmWO}Z5*Ip}lf^zm|Lu*XYy!?bbA(gY-_Vw4E-hOMmGSfJ z8rAZ+-s8Q?1J~m_b{wbtHQ=peo#Bhrzb}60lbp-}w*v|$TxZFb!wEoJf@+JefkCk-F$_lj52=^3;Y;=x>Ls}Ap?1V- zptU4ogS<-xNLD1w90V#=|De+(_6@-a=*KUGL;_8{f3}f%^Yb~7Plg4UPPo$k$##wq z8YcQ5%KV%Ykum{}HB9c#lFQ@462ullte3<_MXanu`f{NrZD!!>N`WKKSbU^t{C>-> zyHXi2F)D_^GlP8$MH7k6RjHyPg7<;8kJn194Ww4GXMi@$?qs+PBtLKtN1pQuLtMRS zhIZ3g&^|oNaQh-7`|z)c(ZacKN!FB%Adi|~4x$Da>87`483|AQc z_iYT9h!cjM5}6G>%T|U<1PH@TUA=w+!zE&a;b!T0%i#nIVG)oNQJ_L3s1SqphM~P- zXzw!iHJBJ)VqfNN-}U7-Mi>-eM;ZD;9ns`Jz;(yJ^G`+?lwEk?L(gSXqf`TZ9bSBd zI>-ovlFPve1=FvSrmns@jS&Xr)(9iKYtoGSV|Byv3?Lf)QfoXui}Z5b2KmF7?A`a> zeujJK$dN+~%-G?pG=_@^ ziSf+b$*6ZuA`jnODjB^@rZ_4kg(cu$23Sp4RVonCOWE%ii&Ew zyFF11rQsL`jkb}#`l`47(KtrS5z85wmhzj25*bZlVRaWlY%D2}BhYJ7ES#Y;X3lgJ zGD>)8i~==Sk0>Yr;hM*kG6be6VBQD;vNqiBWy+MKB;>tB*hq3pa!N*eiV_KX$B+mX z&W%XG=J=u75^q^+%^v!kuEtGlz4K+r*%^p>Qsu(-n|T(frVjW;qmJhx!dlFUCL@>lF} ziOVmZDY(E$1R4p4C*ZlpcmhV!)6>=2)zjI5X^tvrga*GQfj77+BoyH%DmZxr{+7eT ztPJo3E`HMZCKkNYS_XTlJ8|{jLK@(m|MruY%D`Yn+b^y1M~?JU?h<^un>;=1(jqw= zto-q(bAZ`~2ky{%FAbMpJNjr%!eLrW_#v!DDh$ze}nxq^d z_ zZP!GKU754fH0^cg&Yf#$*G!nrfOvgCiSF<3Lf9S9T7jN{ul{~=N8~N}0jyrQ43q4b zLxUp-65B+J#bEaizv$A&foLQq11pL}JQAGJK}Te%z?YBq(#%X131bP-XhOom5z7%3 z>Hk88G28*w6>*)BQ4tyqkfkG|5F8p6M@{@U-{7C2Ld*;yW`+Awj$pjKm-&Km#*io%?@$154DT4k zI~vIE2LkN}^nLJ+m+p}f34rxUgbRmW*hNEmAZ?{@r0D!ex`3jqzMzd5j~==|u!6GD zSCXY}8R(RalG&hBdV4!i?>oCXJHa1;D`0W9<0tk5t^QvLAT|DsFa|9}Z$fM>3+QXy zk>FU0vx#EgPA>w^dQC$Ic32(FI6t+uojrHvG;)1a)|6j_yLVe_aZyPHwN3l_l}e}G zbI(1$TDxMw%vm$%EL^_!mgioDv3ldeh1sjtuD@gLs%487-g)P3>p_e6(E#q18vqw? z08t1s73oeB~4 zsu0Z95RYArpDIMy`hUg05Qh+g?fKOz2vC>gRlEn(B~g1nAhlre0H}M6l1YaK`*Z+7 z#(L`M5sN&-U0vNkh1?VcAAb=5V!{;&H1mS%c#L|F4cX9;hhh_TVvTPHOq~|^aXiAl z2=|CJaDPZ*BWRms4eY~n-~zFM-N?SoK8fv8D9jPf*o|$XewEEW%Dxo3wODUyt1l@i zC?Fe#j3h~D=6%_S75JFi%h1mu{lMqYgb}OJc39Tmg~fdn^(w4JZdyu9txI87)KS~% z|N7kuY~25fLf^vrbc|}j=IHl1P}M|D<55n#n>%^Zyk*OlAzfTyO^u7gMQ~F!6OX~@ z?&+>OpLe1ig9Gs6j*hP0-qWW`ntFP>z}PoM0XQ891dcHqNnS&`jx5)z5QKYin0Smt zSYe$zA#=v{%cdg00CA@i7R{NJnaI&~gi#5kbl!yy3+oBf$U#52e;|yTN2}5oFXsiN z#s5DV|Jl*V93PFuA4elJLKYZHi+<$t(TK#_h{xJU#@dJk=h1-kNV@vkstbDi^u6%? z6bUG|h|GTl!$ol1zATnc+htkoJkB=AY4YD+ojZ5^^(Pq4)6|RER3UWea@+qQ?>*q7 ztg^rH`%KSFdasa#gx;%2O9Ch;C}LULs;qtOt0Isr>Xh+@IE7Nl4p5Snxd z3F&Pny%*9m&;NVwok=5;uVnHOoeS@7XgE$7R$As226@EGnA57gAj`@EZ;M$Xg{E z_+j79ya>MnTQ>N%FssOl!RkRs`nhWn1V!rTfK*19CMdTkA1i-Vr*?Yf#3{?AdYw$q zUE}rZ4OZ(7!$1FgILCP*j;Ob?qwHh#99{j?IF=&$bYdj^!)&_Ka4vmgNXW+0RjW#6 zFG9Y=T2YjrZ>=oM$4K#@7Sy*QWK;)YuPCw?H6wIHsGp~oUnqo&;q*jQv`N+d1Yeg1 zZBa={VV14W`qL1dHk0L1JP4k|9oLFeti_P_fpv6 z#YitHG_eMsMobH}sy#LJAZAp|tFy+AQ@1rMnu(Zkll&+mdS!=BgTGG5H{(h6kTg;n~<+*IL(z2+^DCCLHd|F~mHVtk zkoCYt=3vd@`6In0O-;QvtYKb->vIp@&QbLwJiUO}-9wd+;z_=IPhDyhjWUWrszOPPwSKQo;;EoXd^G55^Dd@S2})E2$nNb|8lm1O*U zi@&#T%P)s~pas)!{+PbL;zozAlI9u83^%d&%vs%5QG%py#brqBmzPWPjJCO^2@{!f z=9F|RRj;8{N1d&@l0m;VP^(U5 z@>b;_Ka6d_R#jGN6;CK^Jm3Qy5KJ@ZK&_%Z(RJvn>(N&f50q#A{+++FE38>*#||F& z;@cxf_W!mc_4q-Ei7ANZ8-@@7P%)+K{PEP${mIGJvVHrKxBvLr$6L1H2O0kD&29hK_Y zm-S>uWeFR*^5*%y_M=yU_LQ-7j&t=o2*2S&Am^Er(EZgk96Gdb*UwuvZ#_saIB5{= z_io$r@n=77Pu{n$%!)9uM^F8@GX?zt+It>Oc=GYP|Fn4SyoJlIyX~`@1enOZ&o-zZ zy>sEs3G6#;;>T-pp-W7*SQbjlrRmtu|0q3q_af-c-v-h5u;1Osr9F249k<+c+ue_# zU)Hh?BGj|Wv$_}F`|OjGXTFWc!u5!>$|nIL#{}hm?MLr_@Xe;R3HWxQa-%8?7U~OO zvppBIU9DaB_6M)7e~~sru}Y{^wgmm4OZxio!MEPt;OL>X5w%#Bs~g+xMa9T7cqj$o zZ<<;>{Ip2CRa*-w*AgVDyuxFKn+(iu&&o#>%+^Mumco&)jX1LJ_k%k>fA_sFH~)~5 zlApb6_vV-W_4%g5dIZVZgBFI&pMUepv8<#r1;N5|4PvR zPY(KX-lrVgr%vtuZhP|nV`t7ZR3Xb!UeUp$siZjq?D6~XiHXRo^X>2eUmvf~@US2+Qv&Dr_3PKa^6p2UZhHTX z7ssV7!c?|ESqTgG$EA0kTL`mzll`du$0TV^SiEwh@`(PcAAdiw``hoIi&w5wmV33` z``ZCqN9(}@yS9Gw%{F3pXfzVA9l@XOiF@vyJ@cBUu`7^v1Si0dFiA>Z*{iqqLNBJUCy~(d=PVEB2_72+GuUt}xqH zZce%jsM9dYB%yd2;j5*6-c#7PDiKm-`=5k+qc=@&{z?CgP7m`qx~p+2bN3Ey|W0O1NWc@7AUja)5g8{#`~K- z{pj6S&`E1q)FO}>3Y;T{2Qt8=;o>_?Dvyo-8 zy-;>x1aRZa_3U}=Q}^8d(3-~**eB?U5-E5Gx<>c(H8W@5d(RV&PFrgZB8nqKmWLaZ zA*04ai_+0rlwE-{8qWWUy&m$$xi*A285xC$M2HpKK@phOMj$Lyn?7V@Y+xATNE8;U zr10tELWZB)@$tTW3i1Emy_>&HXCVQjCx@$4<+d)no3Cf|hya2W{Qm%Y<8$=JN9c_Q z&>Ih9_dK(|cA~hXydq^^Mr9=>X+!efoTJHmtUO;=Z>$nCv7?Wb`G?4xn%kqi=p`UnlZ6)`ho zyvI%It%wnOR^znz)-La+dXguq>WcY(wnsrT2HTJ6?R= zrEz0?HTv3W5;W}Rs%!Nc-!XBQR*Un7|9d&r)L)SNU*J^p|Gk`A)h)>W&v5GG|7}j4 zh&lCQ%&7^OQzv3horpO#pyFJGwan(!9hFoSW>pkphV~N@P(KZlq=TULu?})jFIAG1 zI&~r2t>i$;`Gn1t{e(!=Pp8Dz3&Cir=5@^HtGm@p(ws93m8EPdOHuA+hkL|{^Hog& zS*LLZUEowW<)&FH({c$@Au{z-I|Wo$=^$b_l}|c1zfhwdBi&C38vRsIV$Jz~iazC_ z4`LrUlyetEpQ+%V>m56!ncyG3Cxw8C!?x;5YkOlE4lZyaSCoD-J1spMAJ99A@U7I; z<>b7e%gHHg#d|pjhcbV8@7rw2*xFcGTE2hYs#~vFG&gembqVY_Xt0hmSOmfe^0KAU zENM7g6W_#{@cY;c?1VNl!*Us16_4Wm=lITyz0YpQV6oCHcA3m76QC4pW3Oo*y6^74 zKDroNhmXLJpR$kH5s!tT5i^!9`Rikk-FL@bGg+JjBS*8rycm9JS73Yffbt9khnHSW zzPweEAM+HPH~$Qo{t9KJ9D(I5I|q4A^G>GcAyzy>16!*b>YAIX zZB~9xpoiCm&j>^fehwjqqDO=vf|^~9@bdODr53asbrHiu{rm#FJUr}8EuN}U8IAj5e7*zUKZGM36MRp8XWQg> zX#`s<1w69|JIznfH=nTgv=2UT-(T-vxC|=FCOCX$V&8cH2RGy@`3Y2)U$6`_b;A;7 zR$9@MS?mY)4diB-y&^sOU;>^j!vTK+YsG!D^x&hH;7Dg3jDkLaR{95)#XF+d2G%R8 zt1O~kKwq3We)90q)SO&WLnj&#jjlE~4+dRp-EhgvEp5ROxf}sAih$*DBZETKYMqCN z$;Zzxdg2)008R1Xy&lvRXukWAY}14>lP?;9_^Bp?VZ=p~$Baixj3e$2Nnr`b!Ce>! z_n|MAU>q#LJQ`hWD<~`}L3fnYwNy7EC~R@D;E(L={NiJ&`6ckWHzS8K;;n?BD>^$P zBmFh#cf7+@;~yDGU&@|JRauAM>^<*&`1wz}vW{;~z`iX(nc$rOUGV$2T{YRerB0>u z2}HCPvugc%9Q%f&Tf*UaIU5{$J>TTbuq$vd{7v&E%KLJu{WpA2aP)RNe!LzJ%~nRq zIwkPIKMgN^C;@%-DSI(^^_puhzjXPZZW{66Z2zZMUUkj2S1*ek9ua}y5oS7oTaT`~ z+HwOtFBUQdN|58QN-spmv`9v{1Z3kJ?rLm7vvJ7bg}9!T_^krZ{Empm3+3p-qLSj& zf|AUPGsg}d+Pm)vvWX&CYgSG!Ok}dkZJDPL{=%$=PGhSjWb0Oh-+t}Y_r5~(HduIW z+rIs)_g;Ms-))tnM~)gdHa2$LxN#R>nlNTUq^>kGE@8BP=v}L>m^)?quwhZL>`GCN zWC=kO_$gzeV|^M=?u!VPLL&AZsq&1C9y29e5~GRYU2j4Uet{mm8$EcxkS;J~<>wcb zoysVJ(*dkO!@pPhKyQK0%f5^2SOWyV`4Tq=Q!u2 zx4}8^k}Q6k?zncEe@Bzf7hI)RDax>iE=flM|lEL+x>!ddGbJByXOPU}pU@H^Z%4?(r z(kNC0p>RD$6Dd&OE)FH?I6$sI32RW&Y$*9t?R(g_NuwsZJ+kD|rI#;>4}eaEZ{hN*LT+a_S>*0>gD4VNfuRHxag#lYec2p)HWH}93! zZdi;X2rt-7F*rqhEv6ZoN%Bmbnq$~kRoEJ;Y#8^>>9?fVN>gE8 z&ROTwY|TqMc^a0|IoLE{La-p*BLaoej36cT%*~126Lyjttlk{cMAFQWenEfv^X0QA zO$`r=jKM3wgoC)5S3kVQ%QSNKjLKHu$ndGtC&BrC>_v;>MvaS5S<~jlBZP(-v0zgU zS9-)mjh-AP2Na*o$j@mEm^lx6&~n?n30>Lw87GTnW7y=;Q8C^PCsL63m1}TUfg=;a zk?X;ct3ZHN;K<4SY&CAOEO3tJ1=mFaYe zzAzioXrQIu;FatX3Q1G(yW(W-TnT6&Jx)2G%Ap?I+X{t>mr<$lG#Og54j#$N`u+1)Ufl5Z zkNcLw;rk#CE=l(*V|1UEJf&#OpAl@d3$utgtc>ZU?wAy$Yt9D)fsJMc>zXrgdf8V; zvX;G-07cr<%9Fb1H-7Qq)*a?}L@W#M_;~UA&v1(>U}c>=lJ$H%=~57vAmkDL-b#C_h)NZX}aVD@y@!`>HQG5x`kW^hdvTqDOgL z^qxb|Nq$~8Uo*+Awy~_BnM{2sPmICG*WWKBECgn0;lslG{X0r(>T5h_E%Ng7Lx452 zZ=YjP;*<6cUVM88B1L0%=zmVC1I+^9 z0v&uYf_%YMdYi4LCO;bn>sm?O)@szt`p{o&ekt{c>#7}d_VbeyZ;wNEhTYjZj<%n;uB_19s}EUZKIpM)7~ZaB+mTsQK;AH z$>)|)TAB_Uqddw4uSX#DuGUVt+||W-{u&W)-Hn)~`WxDgPq-@P3^<@6PPt#T_YxeZ z-l|rv#c!xyWx8%VT>Mlgud7BV^1S@oQW(L&7+l|`H{x_tqsVUKr_7ojF)3~`a*XP` zS~?|lS*IiZqLY@+=!g{cMpIm9g_Z0&P1BBa_BA5K1wOop;V=W=6xfPz1hr1%jf5mI zmUTkQGhi7dc#@Zqj@Tts#-ayNMiP}#WD{lh`VUb?PC*VYV^>`=`vPU$5qt;ASdTJn z1)>ap@1e@T78co;)@dp#w!Zx97{rWf>uz`ynj+2I|J|D#ioG3l>Oq>#iN{eh&*@e7Po$eee_4N-o`S42V zv*N*_g?)KWxR>PO>(+>G)ZbXB6N838y| z=&yL?GWLn;HFO*H45N`+ZojNT_a(4H%H1pp^=`lq&~q4-*=TKnwd|}79h#^zvKF+~ z+b>w}kB$|eMa|^lJ0=p5%_G&!2d(vwR2lS4ujZa=dYCdax1MV$Fl%7rtSA=dUx&1m z1>1l4=18%9%*1Jap}+hzffZO1J?6{}Qf#|+?XoFBb2Bs7vP3VH&I5@7VHMckW-p40 zD3%kE&k#}1BM}iT$QKdv*TTuRwY9merOu4N`_LL$bXrkyA~KS~o{;+>a84lW@EBkj z4J>^lB{nQl1)ZOcF0gScPEIlz$yq^yFA@HX$Zmp^Ca}|yx{!0Ev#kXov)xoi*s;Tn z4}Zu5gDixq&SoginAxD#d#PAkeN($$ry-9ytwsl9GW;d4b3+;2@lUNa!06fA)63f@ z(hLu>L=++@xB|G%M&AtwZYw~+OTjOrN^2XcaiWZ2QktIwcY@NIhPslPMhXsZD=fp; zuv4q3M&DR;YAvFuy6Y1|kPeWwz#^!<9k$L4Nmz6`qX%$S+U3{PR?Pyq$IA4ly?=gq z$DTLI<^X@v8++u}^=9+iDV^bS?@54~3xB$M=4q_^-ZIN>1{e@%&}>BS+VsK;`de48 zzIl>imj2|6hgv7y_}HlYvu9uylwVR&mS0+d{3ZDrXVMEYQqwXsGmDDz@HJFSIhkbY zu0xI%*4)&V7=oNS%nb%aK+m0)O>8Th8(R=MLnn`d1mb*+UNAROTX{OY$)~H#)~F5{ zKf=p50B#L|m?(!?UI?i=^q%Ft!&NAz-J0_K!7>s%I_wOY3gLXjJ=+q#Ve1)lxF>E{+_+A zJRm)js4ghY#1p!9ruGhUi{$TknPks512tZRW;yLCBO$hNUjVw}m0bv$z$2tMKQB8w zD=QN#d=hv4SnGurkd>fTr_*YXH@IEK24#b`Lz?v9<4?q)R~BoP)Yo2kK2CWS`b;O# z`t=)9sv0Ifv3m6rae>~)H*g@$S8=@CRNhwEEvM_0_7g!7I-w9E#qZ0`0 zVp$@y({9xc)ELG-WZCw7Ni?-NnkP03`k*UZ%7~fhbg>w{M?g+Qh4ju;9w_l&f`?| zT?L0$3_;;|Xz0F{=FfK)az2IO%=lkX*l6^I$KwA0g*D(|4gbSD(r@Nzt*}VGF@0ws zT9y1GHCU0leXKB-L;BTXCsk08oMb@8F)XB5YV=Lb$cWU`T=AS<$F#JjXm!5c`16gS zZ5KU^U6;3aq>z6pJ9Id32*+sZ&JInFl@(^t$pbu<4HCSIsmuguIn>GuRVlprWmca6 zg|mlpy{v{cvu}d(7nHD96@~4F%sdiszEsbCklm=LjQp}fUX;{|6lq8vjb!3TLyD0u ztNB-aT$9BTt!{LlOBPGBF|MO90T$aTN(x-vsaRD9(`k)rTkJ>8oM zS7|pmKT{J_ud;`dR4=hdlJM@nB-JZ;rz28DZ|XwWbse=U=2Hc4J^8mAyPl=5Mtq8S zM>bPK;Scdj5-u;%1@G=-uO~%OlM8W7;_A+E#+h@Hs_!L=2;*b&3UGBNg7t&pAH}GB z3iKln%|}7Mr$E1_K)(fqgLq+P%!h|rKJ+%w?O+(^!Dt0W?Dz^MyYvjb7oWx4PVXfk zyCDF6XK>7fLI>PLVFlDGhHqpO`M2}`xb*w90n^k}Y~{+;v;T7Ey*JJbwIAI0?!P`g zGHKF@Q^nz7g-B><7&Rs{ZPKJu_#9ejY;HBuXW3cXyYFo;RfjLV^Nu?&k6^{y-;+%v z@^ho6oH{inHYz(`HpS*=^G_q9a*+}VliFGYqg7ERPq%h!+Non1)$O`r6DCd?>0epx z7hq~?G-3JatgZF&5ucmfkTTj{U+dvTRibSQn>Z=jOWl}@P$cJ>umSeOt00CMPq64F}4i3!Izz9I?2;t|LENuLF2iJlVa zwGl;xu)sDE-^eBhVX+lP44A&377%}k7uHc&Q^Q4z+{QhztRl@2Y#_y&inF9DKGCe0Dwf?0WDSoLQYO!DoVxT=NyN6^!se^`xP= zC>mTeG}kx_8;VIrI+$c=rhp*A(Hn#ZdNo_{0QKu&ea@TB37^!rPy2p?=dSZ5dU&WF zr4EI2B|3D_a(Vx)ME_lj{<{JFw-Wug68$#@y_ZkyOA|pJrUm5fL1^K!QmB6Oa&wD{ zt>ogJnVpRWB$CIUU6I3xMKRo1YbVpW84TEo8gMuR5dhiGWb;To17BpE-ZGpYkYBgL z>Mpi{F`-X+O-@ku5>6K4psC+P+Aa657GmsowmFop;pO}#`!Su?;v*Ghjzq%=F*n(T*nFYej zBQp7`QV$|a2FDZevkWUPYT5qLi{^iQ7pJT=eE5R-jc+_dXMsqj6v^> zLGJ}{>^bM*ofUnCZUm+l^0$>F5I{i=Qzj)Pf)sdR7YsEUPGFR^f8-HV#~q-!-*SE~ zNCc|!>M&cOA9WxL^5%L6UP9qqNCWBNSE`O>MmA56zgy4Uk1Hj`A` zPhumF$MKOG78NNx9)p<*a=VeY9s;V$2tSh2%94_NYeBNv4f(58DxC_+aba>(m4I`f zhonnY*UvhXV9$i_n7530-EsC7RpwoH#Ysz4ciokVPcqw}a@(64r#z|6stA5^#vSqo z&GZL+($iskuTD?*c|bPO32qid-lAeuf(SWCYHCMvbECJn2M(aU0(}D_dwJ(ZJ-01s*d@}*J?4;Bt{SNnd%LCsVhtz>cbEtWX;wVCgM>7C5B zB18d#YNxS!m2V4-g!C9$SKv753WRdPJK0cBQD0qav$9hbbt|Jhx~;OoJ2Wa{xW6Bp z((|3*yJ+A=)^@XiS2XyJ`)E4wf(=AQX4ctm^eCZfXD3c>dt)gbt-J(bwI6|z?o6DE z|C3_H^+#H}a^)9atXzq-CcV*@d|P17k7N2BntLvv`oQ^3(SG#NHP?Lf5h9<-UXDgp zRaTXimXvq5O%KZyLaJ5vax@Vk)q{fqL%Lhq^91h)vQWAS?OlQPPDOjCVw?r#ALP`LR<4A?zool} zZIa@ifDo2BY@>Ab<>Vr{MSb(-U#Dy?|9MuhOd)ya*}@k{a^pGNJO@q zag~E?R-#>v)mB?mSz20I3Hp^)Rf2+^FzEq*bVB1m33xj!L`O2e^7kWJ4)gOL4riR8 zurSawI6Mp#bp^pGn+3{NAv!Ugn`D8qS)l9!>`ZAAE5N){bCYfusnH99R? z6`etyRW~s#k1b_IS;r3?{AKjCX`@5D z4V^75u$KxuvCANxtF5c=H2H>&8b9{pW667tWoK4eS|Ml97+lykR%yg?v z`{BKJ-q^d#J9g==HI^emmSCR%e#yoUZ&*F-h- z!)f^q*oQO~rQ7Z4MNN2nt|;yB&-e)sESWWKxCgYC?!(5aRAYy^V~j1CF$M1>)3C9# zR|4Vy9{lORnIhU$oU^4L#K)aS(m7-Vwq9c|=GCCbiGRV**hA4an44mNU9DCaxqw~y zZ{TfguxPi1C-DL18DLc_$qmHIMc9kjY$YfGi!A+MwPHlzp`2{mq~Qf+5n_Z8ye%i& zit{zu17ZYN5Nt;rC?#@+MM7dF(GwS9bK-M~DThNrusNkOh6({@gh@DdM1Dukvi9fb zXNt~Qz%PKPK}$M>VJ=l*V}tz-qFMP-7FrLeM(`V=SK-1Hd|qDofNYk2 z{{DU>mv~PD1olM!{(&d~y8`;b&krDY$lU}PhBgbHcu#?$5QW0sz-Z9BHXNMfw__44sUTHBcJZ}^-@-o=rQRp04dY=W3xgv_fI zwbiIy*3OwVW7eE`vdPD8_t^s_#l0iQ*5G0ML-HuqefQoT&vr_XQcLVNFLI!BGthY$ zW+6z|(u0%<%!KWHUO{#)PKso$wY0p{S}2>SAF(aj7M%LyP*^2U@6Xp03Wt`_@ z-DkGeVw`dnkRhlBO>4Ky`cCQNqWDf&bE*uw@8gu)rR_-IJk{{v67N;)28)@EClkSK z_&;9OTMI$h5+tTGYd}?<3W4Ul1N?jfSZwzuujJ%Sf^1@1cLj|G_ws>HB?N7VX7>c+JW3z(PUw6Q()>c~j+izuM zCr%u}yX>^%d-m)}J$brRw#4jcxQN9Ji=8lbWN6^<@Uc@SPyOkanX~6ijVfZ}r_Ks) z&G}}_<`g`)&%BFycm#MWr8c-uR?yM4uWwXTpvO-?6?FwKUAi=kmHdR~B8_G2WNtxr zUQvZD_3){qzaN=9bN{}B`}TyjA3uDsSUdizYp$P<=fW=z$H_)RlXh5iKtKRuWCR5@ zAyaKt`;sN$nrb+sHmO3F$fL2F%*-wD3UpVgb$%gUqozkkkDfGX%=mG$MuiNEjU5#` zBFw{N6iw~~jgX(bvD3?llLBiN9BSL!F`_UXw03GSx>ZVxT^@~ZFw&ZetyL8z`Nd6b z#TB&`*?Cpfbdz6-UcKR#Z($ADm z_GabyarNre796iyaH@;U>lI~X%97wfWm^fv77O{v;?KhEc){Ixgl1b{l0Z4h>=qP|!YV80v7jIe@Wp-{_Qn{2-8d(CI0~i;2w$_KV5%U=12Urr zrba&si3EK6{K)a0iqUXcB;s&HcJE7rF=OCGat+-j9UVL3bWx=a%FoQT7UmQ}n5Cu^ zWWi3k6k=?$POq}VVnj!h7({P`8-T|ffDoN-!iiH;LKp%VGruJKilWzgu3!7o=HIQI z6R&@K%`=bQFiBQV%E>?guLPIjA6ZMYEZ|4_vl=44Prf7HXWDal>5|2ryl=|`jiXqcv=;4CC;Ki zK(=1@=oJx??T44B$Abz>t-1L&IyNO57U1Z|nhE7ZvaZdJ%CCfNS6?t*5g6z1)%jx(0V+0c|21EKql))Wum+Pc_c0> zEllpAuhBtYz1og4{hZ|RS-cf?0+655x^n2hm1xC8QZ#4?0gz}x_Lzo{4A$oEZRF2 zG)w>uV?o1MrtmA$*^$B*_`0s)7Wnu1|XT%hYP^v>P8=lcWml%8;M;T)q0P#bHwUuJ<_$maZjYHO0&9d3CvN z&fN(t2~6~o?jN^K3F_GYE^QD2*%|Sa{5&6;x|e?}k%p~+`!HuV)uUI0uSGC(DJDe* zV)?Kwh4o`$PF5DMECFKB@<5%U!9cU9+hDszyCPrz0L0SsamN{vw@+Z?`45TJY$WIr zi`6Waso6--gYwpsS1DwYlG41ha0tuJ$}1*cqa^qCb{*C(1w7m?>EtBo*T29gNU8vc|Ji|#m2Iodfj`vN1mGeY91bX=9$Dq z_Dn>gHR2g-BKKm&zA-DasJs9!XeCA2dAvTHkGC_Ogqc7GO8`I2oY;dI-F!p5P2K^4 zz9vsB1WE3FpBk!F*;tg(rUKMuBkf_5J2eJv!Yqvgc{xe{eM9RGwf$2#?L2`~s86Lu z8-Xx225m}*Q^@Moc<6!XbthkHy|X=mZBo*e#nQKr;~{Dc+Vspb&oCTK3n~^DW#y4u z9cYQ zKCh~#pt7W}q|jDbMq;FidWzb_JIZJJ^z`&Cb5@<2IR$=WQDe)I7hE<`KT(T#H)+25 z#$t(EsD#?wopSV_nx z>IZ5U?<^jsE!b!Xq*et`S7G1W_Yu)1in!>5@#ul^=*W|ZL8gMj4lI}qO{oRvffg3N z4uM56cjCH^sY!LRPF)=mQoT+*rbre9MfQ%wPS_&KjX}ATaefqN6$4tufL7i)h!2{R z4WE-7tU=IU?X~D}nQyp+SDrsCxNLKlbt_#NgL2XW!4>zkl{@ zi!}Rj3wvDlzK#83=CXTN9L~69WjVX$Qg$=)pRiRs7cbt42)8>a8Y=$F-Zn(kv=!U( z@@!ZfpxD}n+!9QS%DTENQdb+2#LI9I48%yqCd(gF85CVH=Rd>et#Gu*c`h1`_JpH7 z?nNaqCM$$w1Lf*-FxBR4|mzU7t(zYMiOK9UdZ-A6u$|oJFy%N zs&`{qh~bu>nMKEa2)L@&b|BF*#5_=4c0cmn3r6LIH;j$z-a+hJl)44r)T&EkXp;LJT4|H!U+z=07N`-Ag@A_$#k{s?z-P zWuS=^#nN(|xvrP|{BSknmgK&o?%`4MZ%W8ayWv(YcFmJEV%KuLR9s96e{oB4x8@f^ z0c0h90BPM?I#{a$_dKYm-Tk}*eEt0Whz&s5-amEsK8<|h=5D&eAfC2R&@L6Q9TwPl zX@M)pS$0p~{WjCQ{Jfv)_0I&tX3cU26ZUSTWzF&0WvnwEZZYdr*7gt!`!kq!o>{Yz z;Z&g;aZct!m~nA3AFvo(;G2wH7$MHd?1Ol&oXjzOf9v4~ikvjQH{O{!so6PcX$ARd z>BtjcR&{jHaKQdpCto66B>kP)q`$#)1-^VpXBW@fB@c5Pj>pyR+1YLu$qmV!|0a_& z>cw!!x^huGJiIcc3W@I9@L(lMeB_aZ7Pb(gFPgMQb}zM67uzZ;3koYM*irnf)zWas zZogf25Au%)@DB^~_6ZAxL2RGjx+zbR@R;5$NXtwu$jg8QRAv?!(ug@k+tH!r9Gw7f zs~42zZpLQwn{Qa;H({4(ypsL6V9%ZfbN26_1H`RtEq23o-iTra9Oq( zF^OzkwQA!pix>YQyH}Q07ZjDjda1Mo#>7U<935J1hv4t9pa^f@U|1&w1+r0nf9uAY zW((r)Zhk4IEh_e?`59TnFEkD@TnKa7eO`=WY&qv{Y)O0*@`3Tn9~ogwSYy}rto~!A z$>i2t=M%@?lvV@RMBuuL5uSvv?4C_~dTf0Q%SzZG;DfPe?k0B%9ga;%a3CAq z_cx9!&D0Zs>jdE1JyYjrrDOM%m79Au9if~vF>iQknHEMRi0BI~5p2z5GuwiE$=bBN zFK(==BK^kWnlmXceF)yY6Az!!&N^{o7Cn5?$&(k!(yl3S%;KT=UpAwv?6D_|!>^5F zcUxd#@d{>YslFapv#z?DZJj)MtLzP@By6$Z`c+$6r9b=&C1S4u~RG_)jlbJ&G?3 zZg?5pj0Qa%oncdp=BPDHWwB_f%_`|RanhJb(S4x^^#YmTGYB(8y+H@jqDvm@iXLqq zldrtz$!DHjbNk{5UE|3g-rWF+;92#$cXk%`AyuT4R7{p%{$zJivuWxz5B~k{e}DAW zOGf&4rGERmEKhMoLX%XLb#!m?u9Rar)y-1im|1c0@pGrdzznMoX`{MH%jV6SH7-=& z*;thNJ5*VR&sIpD0V5_|G*9*;K^=pIh>rjCf>DDIWQr04)zG3U!oTZz=JR_ z`9~aB^?@r!yCJ(u|2pDvGA|n*zIu9SfM?|%crkLzV#6z69n3AP@ScA+re&c=iU%i< zmFG-w0!0Y82%IonOE2_2e`&N@{6oJZgq)UmR7G=?1S3I+*YFUP$h8ltBGD?mB&D-U zeo;qlI~?|S0?>Gpt`H;<2dOdFaf)PtEs9NrZ4EN$b5L znyp6;?*8qB)etsocKqTg{h;Q}n>WI*Ivd@Sl3ndLVjfx1uH!htK?N^#3lUl^0S?kW z_S*`4EERH<(?~pcJJ~j9)PERG%o9-MfMOb^1^uv3!G9IZB2<>7q$H5Xf{Q{IyT#!b zpmIDU3aR(OoBT7jZ&w z?t;TIK-h4)w9uuFE=O>2%>fRY#^NE>Zh_@U(Z0;?x`=m(llyWVhg=J==01(9Be21^ zb+n-9Fsd;_fJTTKt$k`dpU)xDQeV^(WTb$M5|FFC9nAs`*S7*2JT z2}lQhdPay2^wC8^nTwID!^ky6V&Q{+EgXhejG0-8ycXMa3AqSH!!QTm;N*I6PIuG_ zz-|t3jLXU)ARTi-XfH^|TySOx$S6VGFoH}KEgdbWTRa3LETE`gnh6rxS-e+G6OcoV z%l~BN>YFVBry5k}Jk$wQD^?w<)zLH9PvH~s{J^6m5A|Trd^0!8fQ*v=A=G;iRzlGD^N!t#fJgl3+w9ZisD1*E^IvwR51dI3qDqBaAZ*_dbzUUuj2)s*>^=yRuX`T~$oT@&X9&m{0&))3=~$&5 zv>j@#b?hl<{Zj1^b=pLoO+(ZP_bcPGB(HVWUhcB?^PwGUX`lldbrZCs?g4fyf2{7iK=lWJIc4X%3#3|u9g}&wR?Gf? z?94UA7c4+eJUM#w$v+}Qt0$zUPUx+4xBy9d#;m=2XZ-@U)uf8+FZa5XtzFl0LGty0m0gIC-NW(ck2ub=Vyt#>o(I?3z_pG|Am2Llz@N|a zj(OsOMhxxj`|hoH?bzAFvf8n+8hEr2B<*>jp~QdiJ4O{ox`3o9Dw0pW$m})v`W7u_ zH5Gw4b{)gS)NsBGLQcp%_3?JDU7Luh5SYq+r=;!~jqv;%&x*XyhJ}mkYd++pAt{{^ zFNt&o!7kXz=MGp?#$zPy~b^;T+K=>$w|`8T!CM+srC!dWU^Nd99qQe3y47fRAM^QX&nx(q&|I9BuNxpj2m1@xawXl8fn zEzOLR)*)>Z2m`u-+G|i6Fg%gjq;I1KN(Q||bOQ#EONA92yG4veg^Gtf@Ll;tX zxvqn=Auo9^J#42-^?_|Ca_=q5Q9fR`~CnpUdPNBZzoMvl8R=}T_tu;7X#cUmm1d5Pc zSu2js!&nhAlC+DtM;F=iye_hPcISOY5-UPQLs+FcaK)G$0%{nZAgBxu6~&>jwdp(0 z=_7m3jl4COJ_*fCR?h1Z!@oDp`oi#)1Pnkh83cox&L3NSVFs^fDd@HYR9gVL#iP`C z?8rwb8V#~0;-%wPeqeXrnH{&|z?j1>+SR$8nuN@u4)3%iQ1&V3yaPNn$&W5h@w6l( z_5nZdBu@?ZSzVH=<2-I?Hu#a=e_g;3+=Fffmv*2_A+FJOsyEL$_f!8#uM&(?&gxl> z!3o_`R^QR{N1i`_#}-Y@sG`dmx?oJ0Stc${es-~o#8Y&r%K5Z=q&YF_yC>u=81Q*b zuklag*)jehT9VWSfpY|C3UGvUhR8`6^@gDRodY5YZy;qcEsJEs@vi%M(v|A*W8d>E zA696A09uPo?zn}+MjW?#_7UC6xr9n}^@lY^lHA+Hih99@F<<(mW*=CD#;REbUrT{2 z!MmaEz7lsUf|ai|OX)Cmxrg4w!Q$vgD#MN0=w?9fY(afz{Y1?kIk*ePko@U(+-MVP zR%uu&eWhnB`$6_?bt9tM;W1wpOSfniMK@2DNjHUs;5JaJ=`0(Ov|tF2#|qg2x{YE} z>E_0cSRjxS1qOl?dIJ8CSydn)r?cq<*@+O4xCOxOaWL2p_9Z=*#w>J;XOGiO2Sa1r zv}_`7uIuf9*%2NqMSMuyma+%wrW2nuu+U&(cjR@}q0m$U$Kn z*{Ae$J8PiZa`pt>JXjXpx)3gOuzK6!<$=da*;{m5?rbm5%Pz5HgTj`xe-Tg_T*Pp@ zoIORi2xg+2H{67K>OG$)$FN-dF6{zOUO{he0Z+a`fC0>lZbr5Qx4~%rZ+fhWCDAR8 zt)`m?)6)&!2)Gd}dDBeAc$f-)c6xaSBGo|=`3obIS#bsYovKRmVM<#d2Zo1+goIMe zhG4|)9}_(+I5-%7&!KYQ(gki6O-<*TyOe;aIX7IhB3jA)W#g3d*~rKu(;maTW9d}89gFgYACI!>eT2$fuGtFLWpXlg>ry}ZKG%KG~H znudm2IglI@f?T-4R zcT`dd0R&)W4JR+C$Z}!j`Bxdq5nvo{VNc1>Kh+~%qpb3w&y$4xjyp0tRrR~ zQ?-=vVx)>vS$#%_g5s6GviDFx7B5SBDoIv*X;GTwUo5MA+)$o0Dc^zDH7M~Glz2Z% zr09nfw`An*l>PhmA2{^e&TTt(rtIIpYgbCj!6S!%`(fKIc&=rhJ$?MxzJrI3qt@Fj zu~qPBE2}{lh|E`|B_$hLuj#?z4<0k+-psGSX$+FNP)Dz3Qe|wROkt{T(pl(;hCfm+gBOp|9xA>z_a6z(A-dBjZeSNRVeP zzF*BYv(1J_u9^blq$$@vMxLT^>^0rekch~Ue@0x_ee|+GP1W$;F~S%+Fc5xGfj(~FK`?f0%H9IS)4#B=vbq)yezoOgh4QG}%=EOhGl+j+ zD=J9OL%ekaK{%C~UtW@zvU}&xzaBVrF!kh#{fHwPJ7MC;;lo2ljT$v96h3%9Q>RUv zX3WcN2^urWYHet2gy~pgZLQVOv*eRtLeH9dttzUj&(Uj`D%hDgWgLPdoW|RBkOtul zFnZM!m$KV&3!TIlOd87rmQwMlqm@T>5Pw6h@V&HNS zaJdS&IPK0tl9TtO?Ay10|Ni9NyOV$UY1_7)yAK|KW9B{-{)8pC6}~J8V&ADKo#3hR zx^o!1ZHSc6CWn6T#mRPhPn<~k z^wUp|H~24Fw8+bvyk(ah&t792OgBy$J8ArgVXZ9ma^yC71t!=@hO1``uWxSCB=7;F zE7`ei;|JR}CBP;ATIE{J`kgy>zJ4i8b8l2G;(trvb^ny~MVcJi($d`2*x1m}fJikh zozNcJTkGl(Vzm)rqJt?3D=bA}2dD`R4Gr=(!Sh>>I2L+2G%YRl^yyP4Po7Lmx8-NS zTt748x1n1@^;9_E8Vnag^gbLG*r(VnfM9X;4a!GGmrljmiPTsu>BX!^Y zltagl9Xq`5x8HvK?bqF)<&y#xgp7n=7@-pAP07L?s4Tq5s{~&1#U(X$Zh_OTx$?50 zx+9-{^5H>$mS!>D(i{oY|ovOzJ-y6x`2 z-1Xq26d~hP!tOG`6>Ow0FP>RxksCbHdJ#n4zMsxrZ4} zpGK~jbYwR@eQM94WBU&rIDR5MEj{gQI{Tc%wKNMjxc29il%cN>g`=TO(30YXUJY6< z0xd7b>~}F{ztHa~!OV`G|K74?%a{M&w(Z~l-nwd2>xPgL3 zPd`wTeE-M&>H3Sq+;y_yx#WP0Mgg;R*MVlL{L0t;Wk+$WF z1I114a#%$vmhFOq3S_sfEvu}pswyo-{+p`irb?Dgko6T6#h@wXdXU!vxwim29U_YyP1D*BAqH~WjMRBuR?a2uLbJ*U7++HJ*$Z9I`M9fy^AvE@U zB~FR8*VqqUNFUpeTxcJw(Z~M*H{2k3u)XXJaKd|R6FbW)B{yl3G**gXEr?0pwv^hm#j3>F<7248ReAne*gwYg$f*S`$5 zf9vG4EA8J(F8V_))BReI_vSv~zqEUZ#ckgSfGP5ZeiQykdQzL3^>50q=dID^#< zGpqCT>?ey^nja{e)eS!E9gBs<;5^mC7dW292LryhyHS>2mV>uGcIJ+^iyUQ|1}M`4 z*|WRIeLY2b_bzfmPmyuXBKd4!j1)3@Z#kKtxCZ`&XiRiY{Eh`e$#{+)BnmxH!>t`? zIdpX{OE8EMj*Aj*a+VM%O4xS+C3uPwOriwQ9#rlrN~rHCflh&Spf#O7>p*L#Esp*t z2?-E<-oaR8z929;-MQ6DIZU#2=MT+r!Nu2g~iyN7yC3y&KIyerJr$%Of^TEaX-MVNQ&AtdE;;k>Q>cr*Op1ZR&U3pK&n*9WpVyj|qrY?ri{a)P(T zfZ~AtE)>NZ!3`I|5ze-_gdZc|7YKMl1@I07GPfRhSG0&eL9|$$L$ugDmS~|@h!%ly zPFlFEIeI(Cd;x2uDmRInffdjvJ*gKQQmA7Dp4W@vg&cm2;DW6}QC1@qWu?@oL%;{@ zqWbMDcu05;0e_Kz|3Sca3HWn8@UAGQ+ewt0Uq_Vt!ILQ0dWa~e z3LcVjeu8q1$Que=bGfof)hg6S?ri3em}>=mmw+JGFdr2DN>nQtdxSQ6H!>iIBPwXCFA}*mnl>4u*2uvAP#e?VbUD{0YNF%z(xcwh?b> zZ69C3tav-*3^-EE1LwpHc7jG85p*NnN>GHOEiP#hEZ}PeJRkXHRil9a?gHS43Hb2> zo|=M+D+PRR54+V30cA+`3m~eA`bZuhur3bblK)0 zfpfFK*(7kT5I9%&;Or9K{V?aB$lYv)MzX#Xh+M!(Za>1?5+vI4syIYAE)EeMr$dAxW&$4pKSIDC6!5JAKBNcU6&Ls( zAzH+6S{$8Dw5Z!gva9SWh16nHPv>2*f(Bdc|AZSrXNC;Yl6CxKt3!_*WB3KIb zFVB%y5vZYZ+I_Mvtv}(*W%Pk zg20bY9(2TTelKXT2+%nY-%~0AzD2-i6MUk8KHCHBst*j`P#-iNBfP&&CA>Sb3Gd)3 z&OV^*T>a1b64^JPDb^IM-5l%R1lC=m-lm>J~)pOEuJrM zzC%!Ko51;CPn(@J_m_Coy(FGMhs5K~C7znv{&k-x^Z9P$Ja!CWT_E#~g2cv89F1ZP zub^9BE<3LTqbNZmO7L)&;3G;X>?y$|{@aLh#uq_3=7IW&cJ4JqyPBFI@%Iz*;vs%y z3CD{#XaF!gyhvm+tExyG8UlZlC_xb=P*f9MLZB$YdI2RkXjs60_%CS~BuerIx zXb7p7)6j#{uy#Ni_8D=G9*kkf3IBRfU@grQeC1L|dJ+_L)qifh|0W-&x*GvY{dbP9 zUAnRT`|mu7XG{_2zo|mv`Ar<}7trxO&9&VUk2@r${^zJP-r0#bWipi@(QyTx!6K+f z9D9zy+Yp}xH-Co15YYmcrR;H*LZ@lGl$p*_T16>&1DDd9j_jbbln!Sp)uNO{QHH6f z3}QxT8{B%nd`tVUb-+IBckZog`puhJY;*^?HhWPYKvj0yeB0*2&-f zSMbUYXnvZ!3;iJ-i|4ab81=@Ssm|UYNT)u@&`YAJ{}75uXpRENgaY@ebT*IsXsTKY zm?MqtJF@!M-1s5Y9G*-yA5Nf}qeRVfoHY+j=W--o1xd$o(-+l#HlIHEbCVOOA?D!| z(b%HzpL?&;FeEF zYQ{>CCTHM+o>#0j~&nT@Sn~s&#%w+&-_Ks8;+0QBB47 zUfNJ6)%v$u*h(ovu80!KntWk-B+;kDA?%Ly0&)uQi3Ye|K}5iPFHvC~)=9N=e6;0!&T+0@WRJ!T#Ky3b#482V8pj4e#m3<)>i7O_ zq0)zapHsaWEws06P)5pn+=@07 z2YNM$eSAKO!6zMxz|9}vgU$Pu0=7oLdJ5P80h>1jY`uUTCtwMEROlgKP2I4rxUcz5 zqTZA=qTc>#)MthYqF(SNPU^XYpXsS~Cm87#w#4suqK^_*qo%7^M4vf=~@1>D4m4T_onI zfhllA*ndU}`_D#U|Jf<*KO;$K=QGB@u;~IeRKRW#u(bmAuS39A3)qnYHebMo2w2Z< zSXb0*_!m)c{4Yd3>r=!lJ|?1GP`7?#VCpqcwSs!%scJzzD^)M3=R+t6>ID%R-PAKi z_DF8d^@eL|F3triW@#H_r7J)!331uU2sTlezJC@r9!yGv&q5^1hX6k8pMW=e-@ z%}yJgLHfQy%&|rRIl0d!=}kTHD^IwO*p$paqKPX)sB4XRZaJ5?rd%EwU?$eZcly{#~8^+%z{=w37mIyx7St8P4CbMo62i$ zK#~Be*|U*qR(zcNMAn?WHK%U0`XcTJ1)0UH+QnCXEcpDB9a*X{JZ6Ep$=sV(;DO-_ zobb~HyiLGs1^mQb@U)9`uHbHvR}F}fP0AE25FqR?roDrdvj({14DJ+-Ynd%X{b)en zU1VR{>0n<7{4=;YKb-L?wktkIBT+eSReXvk{0FAIT|j#Y=n5zF7y)e^0=k}Cj9}Hz zMqS+gha2?=h%@+X=7z6b*Ih24#|h|m)Xky81@!4|Xji>n#e2Po_xjBaeyiiXZg+Fl z>wIKNR`lAvM4m7Np2jVgEvFl$9l*_m@XrG8-hI~V1Q;43Q!g!bsS*@xPrYxx=R)K93b8P8H%+xY5l8tH87 zzLckXactyb5d*EI50SA{ZO5(IwO|+2o)bFMQCoQz- zgWl^F$;*kbQ^i`vy=4^IdnMBTK9MY)`izghnv@2DBF7XCH&N`_6d@TL?uK=Rsg_Ij z1}+&$e9zX#B?Gr`C#KFBp>M5?qE?fr)mzlsDQYe4u61D8`av*0G6bxnwGN*mM{B(< zw6#Kyu|UW{ytP6OQgyf1NvHm+R_8$)$qQ1^qpaAmc1Wevr>e>QDb;u84Hd1uic>0% zZ8r;A{UmVyxx2kCdGWusx%zMIcVY^-?kNoEB*14|5jhCA5@GpdV!Gb@IAf%cJxON? z{Rbb4o?8IU%o5p#UuRhx&t8?@hr$E$9ty(!Ap6R}-!3UIGMi(dhfOA`?qK)P?U;~r zikZ!cflGK_VSf@N>`y)x_9tz^{$xy#{fSHX|JLSXd;`bdF+^?LZBCWPUZ!R!thqn_ zz4?ce(0?n3PesUfET9Xp1o9Qgz}H>)xxp*0XFopReCW4>mryS}HVZk^L8rj2&-A1c z6Yw6GmB@Rlw-1(Cwa?;mo6=96l#%?3XPF6;@_96UL4X_NxV|BFaOB8$8R&kVP=+@+&r$xlN z0P>RJ1myK6%?a*pq&mU9f^;Xi8%TMwuJ8V@?|j`MC{}`u06?LbOf6|A6F|2JHV3!f zJFvd6WYLVrn#7D%!>*@K4g)|#w?36HC*C2?J1ls#a|PyE9gywUfo)bu?||Z=^q##> z4lk0CFrTW2=`QYHpJiv9EueAOyWhH3b?FV_V(C{DL{oqSi&6FxA*22OSbGn^rpmT` z{G5~BjHYS2cj>0=y_=Q|A_yWbhKPt;UlkY9lT#ECMNv^u6p)SVA$yd)N-3o~ZPLA) z?j}ug{@0VV$m^@G-}nE1Nt3Re@jUna+~c~(Lxeyw<3}Kh|27L^YBTvZ)e7 zx|WgbK{f0DJKfywlKs^W<82Lc*|23!8x$bqC>t3(8?p5Hr*U=B|^{BQUDww&S4pn9n&!H0+k@+NSBZAl z>=7=bJHQaA63yTkA5tYseaX=kV2%AKICzm`;Vz<&4O}-=iJDz8RdpAKc_F^0aCun* zSdM7DaqY?x0($^?qRr+MX3&?0Wb8?o3>VQ)M_r}%f_wL~$vTvm{E&jK^(e4xb2LH; zGMUEx$+Q}Zj;x~|SPM~D3uFhUFsuc#gOmHUt5;IbpTBq!mpFX*1gSg2Rmi@n4Q9On zC2@@_)V`|D#+rtv`oSS5#Kx_JM7&aI}luCbw^*4)}^X9d<4FhMdZvxFXM zy+ABhyRT4Z<>X$!el<1y-hJ#Ho0U!NV@2w~z`&lqdJ*Q$5Dl;%=-4zrMDl>>{ zD!@Q!4@yA$lKV}dRwFc0<-8wE-M=_gqL6D+HvH6ylP6PUoNJbd#30&A1u77~N$#>3 zx=%>^6yjuvNjT4j9c0MaXx@wn3`I^w(wWNss2U+2O}Fe(5ZQyhG#u3^6`4omi{T{P z#!o=F{q)@A1V%~z)>w>uo==jg{Ruq&#^Z0U`y#udlKu+8pq^JKjU7lCN_zIj@jW-L z9Xk%Py~|=h8^J*e3?dsearc3Q*!2gHi-MEU_YmU%p;BlKqoWF`o3w9#9c84hU|I|; zJXQfhcS}>mK&r^b0iudZYkga(0G)rutju`RP?DZekcUct>Ty=^IMuJ83+0@F`*{NQ zLn@GGV4h~+exiE}?9+ef&_{<28Z;=zr>!8*P+3+(`}+5YkM9!`5)cv<7dU7@SPvLC z5qAdqcQ$u=_+ZBfJnC+rJ9oPd41f|!Z)s;shbrO@8Hp-(XuMXY9y?~VuAj2`<~kjA zCs^+7F72o^-8gpm=!vVpZQrr)@YVr=tA9ld)6+6bWvXoP3-<2y#G7xvIl)sPXl>KW zCEB3jkB{gZHF%b#Ya&!?tp`<}m7jg@cB!WS__uT@>f0_dH!6ordvP-LGfK72Qm3gc zNRQq|?V}E$tnVQ07NMgusNc{+bRv+MBblms%Wo2XiTGiy7TsU0q=fKtFTZZfCqg z1H9#O5q2fCpoZPQcTZ)r*-=%~+6>x945=x&`3J$;mSS5jjzwIn^Kf(cSf(UCR-yy=V6{Aze^u$^E z<19(widb+(PjH3jv2&;vh6p=#>iF@K$Bv%3d5uW3`K(?*if;8%a|3S5!vlLUYcFLx z1`P;Vv%MY_VflAXo_S=@HnK&GfR?tKolLA3n(yDfc2!5MrdHDq2dASpQELQ4JH`Gw zh@E{x-NO0LzMTll9RP?|MdM~cv7cGt{{8!lqn#3n6~zUKAd+<0cK%wb~JSl`^*Xe!Q0%P2thuh=D! z)Ox9JPl%|pvewqF*>GYSp?`QEh~^U0*(n2vw_%`865j)>pZw;ugJDVR$K=d~Mc}ko zF`vYKdARZ{8Bhu#3(c!JmwclbqF$VcAOtSenZUo+CE>794M$ADeJm}m>AEW+&iSXV zP82nsOF=iQ6lxb<61-yYa^At;embn{(~5^bO{8bHmDFu-(c>p^q&F7r83$7xl9=AT zme}l&n_?0EW8>)P9{B#S1peD|gbn@kITZ50IfolY`ET5e`yg{h?t=nne8JJXzUExB z_25ooB5+sTGexkW|LN>AJkjo${Z^viUHXM~SV18>nSHsrYxap2j>etEVTFWagGE+Mb8m-e0jzf(e%77Fn#iJ#{ztewY+CJZ}z@y9Q8ybM5C#yuUMgpRPpca{5H zyXHq;r*`Z(#nGmAWPmYmE_`1HL$sOhJ3t3%_6?@p#LDyL&(9+-COn+sl%<8mmE@(6 z_wb>zSZU$$Dl~sNXS?h3_TMQszzP_*}zbFb?Nmz-Ye8i&L=CX9Lnr@t%#BGRg#$p61u-Rslcbqtd#bn{_66?}~ z<8jP*%)ldY9v{46XH9vd!%?4mXiulbQr*;0UR-JM z>yZuFbB+y($?(VQqXXLdp@lRzbVy%g)E z_6^c1?HvbB@;)&G2lVbUX6j@es?%376X2kR6o!Wvl9y8H=@%Rv9IjRmNlHpeKuPcr zU1DMnB^UbBx(%B)ZTNZXH*3HCdhOS1)~s3mC4N}@4Zgnl=KFQufQT5HXI-5qd7U|P z_UyTHq`yHb9v4zCT)2SmXU=e8SFYKt_2!6hnN%W?N?-vnLi`|=;Y%h{DrBg?2+0Ty z%^)w02JJrr12Kc)y?XVEjfsqmjERpA)VOGdgt-U%)N3*STFk$Mtfkwx&Yt5QtRzlC z7SNE`ZHGBY_ln^}ruKK{$-{KiFW7!U(!D%530W4E#pSh}qzhJLvBY`YUjKlCdMGtjNtH*QSIVEw#cpqtMKST$ZD&c(PgG@0@^KjuRB-6cpnf z>kbAgtV$G}e!Iyfc^Qlh88dkO{VqhX?VS=z-F0<$e44T*@~=6H9kX#>JUD@(^3ogVI623-POT%S<`{BbhHot4u%jGgy9GPtJ;^cTl&*38z4i^yCG z3(pO2OUYXvc?;-Tx7|mO7>9`JUhBV;k8pTPfF+L~Z<4p3Y?m$!##MrGm1rD88u&ku z-GtY6?cDGqa>Bbf&LzJ&WjEJF`+fH-aIOc^+Q|%}+V*!|@^~rN-{S| zM4ZSq_=zMLgAs1TniaUZqv@y%^d45w-a%@b^aOcwQH% z1Xba%u}HTwfu)}M2tUCtgrP$UVQEU>D5u8ZZ}Ki$5gXypmL?-aP81T6k#_^%7&@4y z2nCSN0{{zM8Iai~2OX%fUFRT{6`}XPQI8l!AeX^OzWwCKM)HQAl8G%P!ZH6_>E3;> z1bOeaXK=6Ey6&}!+$+N%aO%3#Jm;OdA?W+yPLZ7La}!64ymi#C)LJ^XK}VgTc7RWQ zWl_pSS4ni#7kHx2jFiUSe~ef`Nm&$83;BT7)&NdMGcqYHa91KI>HMorDz&-zg79+; zW-%W37K(d|!o4Y4T3YHFn%eE14fRG7r?{MU`SR`T?Aw>moH%iYLnkG{dF}cQxI>|@ z)uKdqjSrd6s12+_TcXwasMS7N;o-wVZXDX8{6zgo52OcCH-&Qr%yVz(Xc5(hrU(hF zLK&e{dbG89D790kYB`ls>gK5l>Ok(2#v2=lvI#8{_eWy;zbQSo zxu$mQy2Gx9E(#mE$#j&Jb=XP+155vgFHq$V*5v>av$)9=mRuezAtXOydjGVpBPiFp z4#QE(mgchZ^g9<)Gc#}B!Ag;L>+8+G{IZ!-j_J4eRG+YodqR6{I)w^Cd2CKjEO}v% zN178AG`;bZIFPuNDMCwbk6~bD7R$Cvy?QW=2dAvBtZizlHMiAz`M05NRj#S6)sPpK z4A05RDgykJ?n$}<*g)_s^B95z@@{?T-mjm#i++SV|GWwib30E!ad09TB(96`7Lm8e zu5}R8MfJWIZ49W6UBD>K_3PJDFI~Nrk>1=?T6QPx`~{pv{mF_?KKXLx$}d0Z9y_P` z(a`VM@9PHnc#rP;+sWRcKkde4)O;GxV<%mP@16zDZOQ=k;uoj-v@~0>*X)WMe3U77C=qtu+?mV!K(IGqXAQJu#bQD+;E!V z{rmUnJ4kescE|JtS%VX?`}5sWFi^N!R>zOL?+l zA4vK8S$ERAIf8Hl;erRVq6``K+sST@AlyK>;K5wT9I82E9^weh0hoa>!Gqx|Yp&f! zTd}(4=DNmql>f^K6FeBSa>#wJ+1(65IDv4$gP}TQ+y9soj1O}{_ejGZ9_hWztosi} zx{Z}@*}8ShA0yp+#!K|5?#xXxTBug}VUg(YA0wSSLK*n*NTGM-k9c}M^2b2QCcwnz zDK0RULm|uUot^eS20EaB-+m7U8oA~}m$b{$rQ`x(YBIw4S zWj{?KNUu7NBMD(xEc|O^cHSayGrMI&D2^l^PSitlq&Rr!$l=SEj{UxQ(^_KkzRJqa zUAS8)G0wvrb|Z}}z(ZqVE4 zmGq{#A$P@(e#EtZjg^!3y**!ly(f8b%3(u?4vPtoijR+q>xROy_kCBD7ZZ22teUOhYlS+d>Hu2{sa4A+Tr_=BSaTC^+8xmgHF9St>&OnAI8zij5X3wM$R9XyAsxDa389(_lQ7%?CqK!ApWg?G^`F2iK%^c5XB zA|fv?NYGH-)Lvhaow4fM+(rfpLFN|~@#^zo*>x{JPWvQK-!aQkk{fd4#{JTUhK8!N zH1*)YgL{RbMPJ;&ft@$pXU=pdFD}g7P+#9rUt3#SRf8WI%=j{!Tbj*znK^}o?Xa13 zh(e)M!bbuG1ZND}Lj%6ScZH%YJ-v;*xG)X==Ap)C=?4*o7!mKxGDFbIll*qy74G&RWp7R?@aJQRFG3<|#Sb0Z>?)b7AfY#>jTZb* z9dUhK9dXOSnJ?l*K-9ZB;?}tWTz&r`!WiVLiT88HhpB81cU}ot8sGZ-Lq}XsS4Z4% z$UJ=7>I#uJxkBX8&hn(cj}JzMj#aqI(T%Q> zhf$;){V!*mLu1^D>oS-7jo>-2f4}Q*ls+KcrUK;PJ*2e zW+Y+%Y2=5&;7VstT#GB2xBox%#0`L7fXfJ}6!Oj2l}yzLBLKg}JM+^Cox*YLa7cfm zAqiR+4jQY__4Cf{+vozBaq+^1tG78-3>rTA`J+o8I?Cc3r-}{5j`2dVU~Hd#=gznK za;i5+jZU1h5J8E?#)V7Xdy5n171!B$RwB(?{d&B&?Ra1ON=c>lnG{#08Vr>cMm zgB+`CY($U0wgygBQc+RS)QJAHRaK4n#;M9Gstbz@x!6a&p%Jb8IMqFa!BAOn?b@~M z?5u)&_a3r|AJ%=uCWfwquY^>zV~{-rwuA&S;}2Q_ZaKM|cvyzY7MCEx~V1OO=^f(Xug(4jNzm+0p9K9zu5(UCrcz%)W}KeQDHtcN?!iO6Z!eDuJ1 zqd-&A=Qs>B4a0o6A3t&O?D_L&&Ky5^>-O!d7r<5Tu+EUA8X-w&2=xl}q0@JAi}G^{ zzTGn-I1HZ0Nu^wcdGsf!eKu=Mt%d)Mjd>aM6rMCJ!bK+h>CO2J55*Y7@-9 z18lNn%a$#>)~?vUUx!wOKJ?1lE7aI{9_F5q>g;SbqeG@M77FA9kr9E_YBsM>*Voo` zTJ20rV?$YKNkbh-lq`XlR?&gm(o^lOxqdhX-R-G%1q!eU2>ARM=-d^9J^=E+2s{Os zB0*>moDo52m!%Ym+3AJZ8G+e}=$@SusbH#W$4;C*d+OAg6C~XDHcMuO)=ThC@d|~4 zQ(wzxh7Ji|ySu5rrlK(8EVKsc*t>>WN3EbfW)lgGs4s;dFMN9T<3M&-3YUhK)YFK5 zQ0qjoQznjkX1TMWEtX{xnHoGNQ)^trq}Q%ULSxBy$dglxY>1Av+B+#BadsuJ`Gn{> zD|2Hd_~s@jP?p-<&{$bsZmu&s1Y)|e3AnAElIkTe2BZPxW?f_bTmJmm(S$2M#jC)d zU&CbtcSlg(1P*};&g$asL71t*n5oH_sX>@2?6yhymz5P73-Zts7pykg3$IKW zx)Vl%Ubb}UmMy>TO}&>>&&odgY}Hr$?ont9$_e-E9UlB*0<}SUKF57PzuxraT_lMm zb>~?ABjD{%^RxM7f*s3|$$~ij0)Bzyz4zXG@sSa+?jXf_@)UpY=%}FTsw&RkXsjr! zsxg(6m6w;57>zY1(iPNTFjm7f(Mv5Bbi@^+U#eavkw`?qb4DE^9Pm;)MIwj8LSwf$G_$o>IL)42X-#&UrR2m9 zV_ymPI5hf+y}u-(>*-_sWPY9?=TZ`?=_=3nH&!E8`a5MwyJTF-Ji zL>5@_qQ34L?U)asx&-B^MhmL%n9AGB##1|}Q`A|s0=)%+=z;ra zoRM}o=?wN#q(Dso?wmhD6B@PJ-9ziGQmEX$RZ1^kG`#ln^M=Lnw?{dt_zI-{)DwoS8wx+gD2n2yZAW-rm8nz2ZY)-pkvHG7fzAwQp#+ygqx+=PS7%uNo8hWu9L(7av}| zGG+CS^ZC-q$Zvkwc@eUNRoGkW>YMG*q$-WKLLbJ{0zv3(w6%Yxe;{MHapT6AF=OJz zHTQR*qwnq_n~kPZJVyGKr={P?YLH4AuU@&{5Hb3BA`&;C1^VSnCS_Q{Yf029%*c1t zC!z)87dWEzZxO`Ndq z_K6ceCm}!LRq|A&97wAV=-mFvsD6#&~jz#)^jyNZm`vcmYRHLeR)N@ zNG7Vw&Mhb`tt~O!$tZ8IiA2KACUaGlxuy}VqwSro%`t|G`5ZLe9mi1e%H+YSk``qIBBQy=izx!-$bfB=Q(cV^?ot{NNXK(9v7m)2>|4X|RiXP8>IGc$5+jCMy$q z#mt+`e8eu~;@2C;3@=36knb#5hT9(?t4#$bVPXQsJqlnLA;NtNeATT zUF8oqV92)%WMY|8i`-YRwrQJQumAo^RYz#jai6&4XD%s~q| zswfeW@#EANf(fBbzb5jp18jOu_*FwcQ;UwD#m^G_o-ut0iSB-_T>ji+vmc+(-`}IX zARF!#Gj801B&dxo?zU8cNUMIgs{ZY0{lo=t?Kv@A4|+T-_y~!3AK<5f85~=s!0UyG4fz zs7atdc9r+>rZH6PZ$-xH95Q|&=k(6Q)+Z$!@{t@q_H12zH{$e7oBaw@Vwo|9XG1 zJ!-Kth?DH3zYvK{4gq7%%PlG^FI>ce`rrYTVe;g}z{aA~AHW>jOd@odP8>T(+u3Be zc|P^r_7ysGLw{fJ>*=AvI`sJGgrTzI))iv05rJ<&w zxwW;esiCH#!XUylFrd21Vxgg-vC%n*DMAb^*x%3B%f~l3Ff7nFFf=?oB0SJns~z*7 zpBFhl@1oKIQ0Yai(iE)H1(3=MuuA(Fi}Le}N=iWxm~Oen#W1=H^PyG>a`Vb7OAPr1 zB>YVbaZ^c2aRopIHQi}}8bZn;ws^o2g-I)=g)%YVdNIOqA}MOroL&SzWD{`mrlL~W zz*)1N2(@OdA@1rEY>1`Wka>LT?tSMntKu>*@A+lxZB<+ddieY>XUt@j`mvHFix(C*7alV zrCo3HAN@W_Xxk=kYZr(1d*Xw!PSKtMH*ydC>Q4#$8}#`xNz_U_zoc0Ca|6741<(zq z+*MVvReE}9fw5|UwyNFXM#!kh|Lp~wtGU~^__taC`giyiPAn~lv?xku zdQ;DlZ#P`9*I??UF3+O-XcAi>NRzA1HWf#|Pd;xYAgtbn2Hrzb7f`3iF!7&s9Zl?w z?kIV;y`d+-p8)KHBxLFqCyz%2NP}?&`4)y0Tzt!Q zHT>{%R(=v0_8hL9g4MAO_x&91`#G$R0ol2^c?AX|dE`J;H&2r8y5|1s+vqtKL&Avh3PsDcS`Yx8tNo9cVi z6=HFPp+-+Dg7nxvgMOkY1=%drhZHAS`be~<$yi*F?GLYp8N=Y5PjfB*9v=AnzH3yE5)>0c|Dr8>vfI}$9n4flmS&0 zmF1=Ir17Y%tgfys1t4ImD2MhaE-oprsI01lt5#$x#!54rE6a;a)o7$6kfAtM?*~PL z*=K|>C`CdU-QI4)emYoT4xVK|l~0+LhR&1$e#Gncgmr^tcwc`%AFUQ^$H!acuGVNg zv1ySXQsmv;J&1cA8tUuq?%_}M{a;<-KM|6Ls)*^r-x1BHJuZ^>;>YM;&^5<{?nG^` z0`Jb1I08I513WngJUIe9IRZQxR8U@3wEyI(6Tf5Epi?K09^JG1;L%e^4?A`2&;jae zmX?Z~;SJz2lo7slE9BlPT+Mc9bL{!NXprj+!|Yo+h?DBw)tv)jW2>x9J8deb~5}a z6EtKR8Xg7Feki7FhtK^ZJ%4@@IAc1sm-DNCS~C65e^ zR;47lpcE=uJbQGc15gha)Y(|yf!%)sf)Kd&(1e7<#s&Czf$#l-!y~XMJ>Ajil!_v_ zQFMwM@neKA7YU>iq-bfU?QXU@;4rP;16o?#RD5%JQK6xrq`^Y9<4`#(f!emarUwxx zt<>6F4bQ*J7D~XJ&%&HPi8)WeoF`z;!wgvWL&VlOA2LV9C>u&z>=2 zNVre?o+I0~Unzh%6xi%;N{+gl$S>!U`Q?Hv>gBhRAeUC)rDv82g?iD*mno^(&WXl^ z&=zY`B2?XO>Zs_|3-A2WJ4TZdR)U2_)?8(Esi~;QP-rN^^c6wSl~$HlVD~S8 zAy`F#gOo^462pD$w8L$*SW>VP3B#-i^TaYDE6Uej>*4OMR4Luv@j?(#>q~GhFDmN4 z9`EG*Bv4@?sPG)9kOV3ufePV-0>(V-Zbwp=ydY^SZ4c}b zUtK{G+n-^ik=}_YOL&cc11sSJ6xrBpR=vATpq4YV)X(qdpMO4FrW%_-CDY57v%+SY zZa`jLeLcxp*}&Al^u~Dji+f=^?xyy@emw2=LiEVDCVO>SS`?4#s86Y%nEHHkBDD)O zN9(XIQdlq$<s8Oj02&B6+aT%p~&Y-{NlhlH&ksBI8*$bb~QFd`h{<5!lNfblhz zu}oQ?Hxh|K3x=5+$-<=P=vMOQ0nm`HKR?@p>uoYsRS`oGG6u?y5WA{^Sdq@Zfzinq zK_#om-${ELFM`*&=Z3T$`^kolQ>A|K4t4j?AjvML15`UfZpJ4e=#+|g^_#rr0kpT%e5 zE8_@8jORhxn|_`aBibSoSOPpFN9s5oewEVC%pHrOq#CN0xEQ~GhZ z!3C~Z3;%&?;|*hp-Da=8b?Umg-_%}Q4=i6JESSo2;&v4pVa=37T~$^iI0?E|*VI&j ztegjr`4RBIM{H#6TC51lNE|@wfRr*^4=i9c*;&!s6Xw234THrSD$>{Ahiua6=Z{U# zK+AtPiIbLbpyfEwawKRO2U^B~mOh5E(y~GW+!&Z(hWxyILjj>KbsCVC9p!~$g$GcD z6v_@0HCAvlH4}Y!mowZq>n1z5Hd_c}Y$pFE%#0`)8PoXbOnp5TqBkpcI8MYZ`*@iS zlJOe`__Lnr$UXY0V)2=4M?9WLH1E7&@SI2Rmamv)ufMIMZc(?G&`_+Rl`Jbvy2GV# zNxZk?dqfC6q;BdNagU@=eM<5R@{3F0zQR&R_@%6*l*})R3s{-J4iVDQ0B{LT@25VV za)n&u>8@~7DAk@`-d>(sGKo~ge|*w8FRL&wyD%?XF)ynyFRL&wW8l*j6Hgh62gWCH zp`aXSjqM4H?H2wE@|cDjz(j4+KthpN6Sl(B;k1H z)9rAfc=U-ACrf=IXCzRZ|3^}~Qywv5WN$o%ju{-`*4#?H_4b4q-;$G@=k#f>ytDZE z1@mSlP3RZXr+2T2{v*Z^EWMB_r|PM#J)W7PKKk3QZF48}syeW9({0KpVeb5S3txYB z@RZ&`p-2menfT^YW8ti4QUz2Qb%I_BcKC#qj2ky@NNjYh7pL&;6W?q2I3&kg`z~@h zI*R2Bdw1rcmSa|A<9WDP?73LG^3_B3+eWkFf%!CvjY%D2| zxXEQgE&|>Mg3_=w0cyd6mq>s}!A=zm5l**K0K$mDs#ie{5?G9~!b)dQ$~6Z5yNWpX zP{$q~@bf)AA-8>e{UNQ@3O6^U#={eP?@HZO?r>9(G^JI0sN_VhqfQ1mfUnTc%YzF4 ze{d(xH837)U=G&6Y^;IqthXM<{DLB&LEvhb)v&z?qZ{&J2V(^qOz??|U=OPa;0RTX zX@J)&gm_;e0oF8a6m2`y>5A8=~$SwU%(i~Q#dm^IzpqD#m;)d(&RsJ;>1*zO63&wkM}U1 zJ9GZzl`@ZBQ%Q~E&#I;ddB0KP7Jjhw?KhuJ7?i6+G3r)93*u(1Z0_87z09V@v|l*v zu8ZeV;ZN7Emv$md%QTwq-Gw$oUEy4=hq0^_J0-)W0r^31BjJLQGMK{|#a3+q6MaufTw?ZUGaLI+h-4FRcd&@DkF2R%${)Pwc7;>&w&A9sZ^VjN^Yg zi_7xz0%iO`nGjIM3(|?yGRpxzW#t&Mjiv&a!pT6+bp3o z=FEr}l$9->J9UUJJ;meq6)RTkuj~}{n7~b;3hCGBNpv4P2he?Cg=avcU8Ob%mrR-U z#N6rBdAb)ppB_es)8ptV_*n)gS`HVw1Hvv3!RvL*vUgq~u_gG;G{o}8*3xQuxRTdv z$}(y~Vjf-mWdDHgf4x~zbfPTcbpp-cDO#%Y%KT;(7gJu>P*;u3WC@SLqca(c%WGY6RsWhjXz{c~%arkpjH(^YNd&ylen@$chx%d8!khjT>+4>}bOSYLrz% z)Y_n3frY@`w{oJ?k@})rVD6>i8-$}hq9)9qIb(pteD~PFBfn?yR%O456DPzf?Rgi@ zpUuZ<6CBknB}#DAB_{$J8OHZY7BPaxJI8() z=TS45b?Vp-T!N#gX~)8F+)GUb(Z5i<6cN}Y*LF<^~UAo^Tg35y6@90TAeCAle8=x9udSs?e6 z2oA9u4aD9J52zj@4AhBU25xmBt-RjX!8b9rNI1 z#9)it;dqXO6ZwHy|HF;SZClo&5=XKjGcTvQqw?1TevbU^xlom<+?;#z*mkV@MMQLR z^elgUxL2=%ckroBk-Ju_RSH(F`sv4?ZZ}soE8`Or6YtJ?>+v zR6+hPOrp}LR+^%6k&m*I`VEY+iCQUoZ&A8F23?3ftgMU$D+2o$cbpvt45}co8Z^6g~C?Q4>D}tRky^UPJQ-tls z?Y0J2gqi3TWP{@Mhi+k!ex1U?n8urqyrC?SMaO;D5k#gC?3Cmn5Sc=HYs6fwZ_T}# zn_rfHH}guKy(9h3)$^&BuU*TzdFAHq^h>FCvk;-albug(X0ZlJ?l(8H!KtaaxviDR z+`E3SxP?IzgYxWRbJ^Xyce9I1Dw}Ow%(=T)_WgEjYfQ}6wJSHKSKPaKbnEt=2T$cz z$Yc8qige=}Z|vT_dG9r7J60g*{qoCP;CJ8MqcHwfvqLGTzW8Fd!4dYvJMX+R!=EqR z{{=T5&g#!#kYDh=o!UxYyEY!*e+5Xo86eU=>RXTZUi;T;FE4(*KXv?zPxas2-TitZXgxM&3C4wpf@b1iS5q_6L$NwvV z+Q`K?veRp=wbjPzmPVeG(X;`;r0n)OD{t*+r3EbwO&#rR;Ef_yWU~eJR4Uov(W9fI z<-S9Q4js}fSmCeVQ6d`ox9>`6`86jH0ey;jbo8pihKvV;S!qY}D5nX0joQ^dwftq?tRmDu*7iB)L4nd(a9DYhZuL*Sz7N1*C= zpz3F!>PMjJN1*B$f+)cffU^aqkPm@D+%57a=sf({`A)($fFlYF`H0-Wb}cO_g3E^g z8jvbS2~)%bD+3?ufy(IpXc3C45w1bh4N)7Q%aolc5k%rysQ}O?%ym1jr)X|)-S9qw z%JV<}xb?=JbC<7IDmsGZKEL4A_uhVbSX8*Wt>m%k!y|imi!JRZ*8RNc$Mgc-mYx`OmnD6}^qHQoXI|B{T|c=Gav zQyF!s=eiTgJnx?jquqzdHj*^H@Mw;oCw>DgJoBMI#;o&pTBK(O)*wS|?e zT>10)`}cF}^gcF8SdmQzPE&=%#?JcSgAZPcDGDk0X(dMmP8uvV=DhRvlIg?y`pOhi zH{bZ7W9Pj*_rfKs->79{fnXnit#X>$%Djf+DIL7`PlT^d9yQ936IpHYR)JVA6gFJH z4ygJA@)WJV+bZv~7=AiQdgvN5`To$Dy+QuMGGHTM5{$moP=b&{W$7ga@fL|vAj4*W zlvoMiC8oSUG8IxSL03>wtMo*K!B-n0@Ba9|f7)qhkQ~XcAuYB;T96#cH?VStz&$m= zlE6e4!F9;Zg?kTt5>`h>#(nr2Bv%aP6Fl2Igm>{pq(+emsmCm`c(LmT3DXnoR_{)9 z6~Sci00`-@vtk+%M_3yy^cVmh_;?0igkN-&I@n8@XFF2vB+(VTum zB0Z%n?LcJ1BW}ph0I5_WbCU;!2S<%!>5O$4GtA_5@Q*Do;GW!3oky2H|e+wP6H zFusV`Kv6{W|Ih0=`STI*=hNWN*TA2TfIlArf5sIODFDc|kbqE#GD43La0MnYK>UIOa@X#xI?5s_FIBlt{POG{|+o-LOP+ae~-m^y8AxV`G$ zhD0J9_(dAz1*(Fvcy}B^ zmq;#v8vBf51urF`9*Y%PYD?xkM{;NI6h0r#P^ZR%)8BN-4%uNk+|BLkRf(rcFGlJ$ zqxDm!#9`=0g6U8K^WEft-jg%ol)05>tfhVkI-MEbBQBEhn=i)+e@Vd@m*a;hCWc3!Me z_;_igmIkI#oCqJ`7R3dXUj^;DH`b|z6=b&@4o-hM<2cXDwl}x ziJ7`2{dkJ(-u-M#!UrTH{|a>n*&=3Wl26^AdSb!T&n43&vlDs<^)*u(dG^&82Q-YgN9|SKY2H+prk_*qSNU@9J_9IKr3ie zNKW|dvm?b^pDGetF#sZf$A}cHT9Qg)B2#E03#zabB_%cHCQC_kRc&2E1N94w97jGy zOJgO=N{9U=P2*upl@--cEbg zmmqmzLGm_p@%_6Q6`dL{#9>DL*V8jINBZcc4hEKPd%LBjD-U4-_+vHr;}`J90`SKI z!XJjrdl~nT$WL--Gw&DV69+jrD-%`7fJO2R#ykRx00<{oY+e?YvqmnmTL=OIYy>Bv z?Z9&MKA@6BrjRQXBy|Gt4v`yTsSN3mqA4&P#=t4=jc0%7Gl_o`*5OCe!&N3rN8Y~m z+*sHbvk~B$%P&*yICZzd{`0{sz#jLGYynQP1UBR%IvFaeFJO!>d}nO_3V0aDS?sv{ z?ahyl^2NM*4taDQv21^KTC=OfinTGgWBppi+#d9ng z?#!njp@-8wp}#Vz_aRN(y~cbvf%*}hJQ|_0B!JhlsSSenpXlNC2*L%cAYE2K@+2ce zkNmtErodXfRwXCUfLQn-pEXk!Em^W;@!~QfNP?$)y|)PWRg$~K(e)@UD@CfkskEZHwiXy$C2UVQxI zK(rR#s=>pW)=p%U16~2}0_%ns0F>1G*nwEIx3spjw2`0>0zi;f){gd0u8(J6P;gLi zNJwZ%aByIN)(fdfS|9(wKp<|uFlg1@1b{(O41tfm{ei*!FE8-lB$v~(P9ySa5P2&j zZ%JLAwH}=LEjV)%IP+srRS(XT-OWf(OH0eoJ)DfPxB;L1b{6dlQ`4`flEzinGw?>YUZUAc$~X-=iudwhY#&LdGO$g(}#C&+k0@=J~XOfhg)0QI_i!0vvad@OlBLI zi{h*2ZeO_s@B8ej)tgdNQ`0k2Q!}x%c*(6RMo7vt7lFb987?g0l&3G{)zs8z`iE6i zFeNA>TI75#57~X#=rM5MK#%%{@{>P@^;MOooj7{*=&1`AHlDb2{*uWw2|(UF;JzP2 z{k(=`DAI`XO?nkL_;Ge5+Pu`2TOD;7*Ww2y4E3x#wQk+IFMi0-4wwVZd1A&GKg5&T zX-|~XN+(R{8xR_O?a=Z2NEj`z7%+SS7ZmPpIDY(iL$8VPlAxq{ufP8KjNzfBzd^bF znoGyL4D(oi?R0@x+?09q=FNX{v<|%dgYezQXAYk`nYx2#4xTqrzj7FoFdx|QG04Jt zmh}<)L=79&J1`)(pjr?=bpf&P9trHo-q>YIDSu7fdkqk1i@B}?v?1vuNFY&e%*~1H z+gt)Dw59E<6hQLa2(5%vWi_{|2Ttv!gy$`UQdCPJ^B8+uaduh$bz{4m8nR#NrnXmA zciPoLJKt*V6v`O8h(&*MHaKWNTzGI$5Rk$!Jo-YCgbxXC=1Gt%`_Lt$-nXO85f<-9 ziA2GXGPO$BBLpHxfn+EVCk+h=>(Qe}VAM?FPy9S!`K)2_@$td%(R{*V2aWFM=NTI0 z6BQMd`0Ui)evUR-wA#C8&pr{OM-S>ZB58syJ|+;ovh~9xn!pfWm8{+7uryS(;RehZ z2e#jA$4x6dqQU@e)}`({a<>+DpqAA&bk=CJm;nK8wm{rDHSxpUFi6#>2y@1?A>@H* z5{pteu>K2d3M#vIxcXz%bghH#OjnX`O8R<{nrcB?z4O2n(p8%j;W8ySc!j{t9Y@on z$tK-ns2X;K>&kCqCB280v>Io4A7^+QXBgUPZ?A_VR1f&88O?1PP|SfJz*9}A&T*i( zro1Ynx~c$@J|`ERbzM7z77_$-Sz___hhV`UNPZhmN*qn}L*ogRJ6fBXNmd$t4{d5v zuUP~_+Sbx&Zn4o^WIN^CXY|OCgQGN6=g(!WF-7onpnHA#jjLxa=I8MP7c6nX zzLJdxN&l9owYyH+JFl%>wer(nE)*zYBEH&i@G8~`a3hgTENE>gUBuGw0}W5{sF^)` z_S8sg-BGa6ZzfbRiBseQg6h)lT;H(f+_~KslfaB0LT-F2m^W26gTzzLA=q+@`c3rK zi~&@BdLr^Rc2eIn3$gx?@kzTCU&|~haTRGSgaxx$q69NslgG^&3-iEL*Y}n1m1)xw z0#bS)SLr_bxul~C@#g)^%JRzc{It78Fdp;MPw(Eb``E!l7eVxAS+~T*(L)FJAD*b2 zVqv{3mWDCIh7Ic(J$PWklqpmC(#6H?5s6%c)!c&WSBget$||J&{oG_?3l?)b?`WTL zRk-y=hX?W`--q^mUH!%WbGg-3m3bF;Zn)GW6e&_CK@D$!a-9LS`U=utpNE0CT=2uM zqz~)s{On}xe=vZdrVbof{N8(}gR57sK3vw`UfBSSZpgWR=i0>!sdq}-91+mquq5*B zBL6{&Nuvfw3OJGfBV(Rxbd2R=F%!q(H*6NZyR?$TFVnN5u`)O9`t`JO0XN84Qc`Xz&N3onokXgPPLq!~tcLTy zoh)u^hF@yL2P%M4R94i~qqe84g5)QuZ7P+Wgb^j7aSTxC{T* zXe0#FWXRG^83csFL!lJ8d-(vgL~^>fr^a38Y_Jm;5#kXV2yj!54`{TWS}%>g!R8+u z>5XggWEm_w|#** zUIK1=9o(iodoiORzcep1^WwRUKYTJ4sT-tJ=3V|n;pb~sfAPf=V1?)SXOaB11d?_Z z(u&>$54?n*m%_^@&yT(<4sEB0SQccqMsds+AM@{C~in)VfLYg2h|_n0kTez|Iu4p&+%|7`nj2R5gC zwd>fC!|TT(Ec-I@1qT3J>&NWbsh^G{-{;Zxw+~|cQHbutiA5kV0IBf7&flKqUrbKm z$FeMK_%VThiH%*g>MJ}pU#k$t4;s|3Z@9l4?BMR}+3&emUw!q-0X~kKm(E{F&o|a} ztPrWq_io;D25WW$A6pHNEsff&c;m^(CeC|%@#3k&V#mBa7XHUpss@Sj9?X-^Et`&T z&Rwbl&Wn}~CeAXEllXGH!qU_1AmS@`NG12P8Z(cZ|Py#lWhDT1;L|ZGCg4P!)>Gz;Iu_2^qrY6JYxgI&l_c2Bw;BCpXA zD6a(@wA$Q+Dw(*`WNf4aU3L@6eMt-z;`Jc+MWu9VK`bH=2oGgRem>lSl2Yflvy^ZJ zzC6?jXF5MX)|&Hkr1JlJVg8-Fqk|IxQGJFC&fME~6VP|?GwLxlGkv=AQ_N$JF+x14 zzd8e0eTERdAvdHYLd8AHPZQGk~sRg0yrqV@Kn-%lMpeDLVmaHp0hw#Z&q0x_6XrkVL3g65KU-+gyT zd%Ikrm&H6bQzPmHkEepADmeM53BFl}e*68vEm2$&ne_E4hg}prXw+Q7Z$zOS6bxg?oCcP*JT^ z$RSUC(3g|o!oJ8F)u2#FLvpGq6xjXaX`EV{AiYn4Ci6g(ZlpH~QVfIuu#N2BiZWwi zem)6&;dTk+OQM&#xh6pBEDCc-ZOfl%lOn4aZg!iE>!(`-E;C|&n`^zb~{Vj z=FL;Jb(*U?GEbd8d-%}d8k@V@Nkd8V}om37U zoiNEi=g7VjhteeRemhCsZSj-rZ+q9Wv0YMrGEeJL zAP@MYDSp#t%zSiGl1?}Dhm9N7Z`{0n*REZMF5a$S3$hT>O8;Q_vSrIj zeE~})OP4KsWm2yIQAdjozpj+O`}o)>N7YMDc1S=pPfuyPxmg%F%H6$hxQ|ajkXGg< z(ktZ}H#cNT&7M6b49N;9(I|RCg=nd21j#AZ4J_G-jvF>mms5klkS8#-kcZ}g zI;T_A+wYv*_`|k?`?qdAkc|`qO9!cDc|x|Us>Ofw*hl9?W#|z7drZ(&51APf9gQ~j z1_^cUY$bG}*lM?5LYdK*>(5brN5)Xm(JAWD`Q}NpUR(CzD@*2#9obhS*64tj@05M` z^zim8=Wkyt4M00EYPF!a5(3k$q-4pGm)?3~zTDET7ur3B44K&#c9Fd@P(FW@s1#6I zN_erM&>5mfm>$i05S2%`tpwr-BDA2eyd28mGV2EU1F%?xs(yx3gO<<{VyFtC2(@55 zpAxQ7s+_TQ#M`ybAUk4co+`N<0#u>$_ExDNE&u5x|3-Hpa*Mb_5o9u_CeS@@5g>>VRZrv=z5hSbz5}qSs_p;Wo1N}9-FrdH z-czBAB?8J8#Q`EH;%2^%+$1e13cd;=3Ni!)L=;evB~Vs@Qo1+YO*h@UP5!^qhIagO4{ z+^6TAI*Wc#S5+-p^5QTn=UKD+>J&D}6rF|@3=TWxzh-F)Q!80nnSqTChb7eU5|TVg5{@tW>5wCpewQiuEx2eFKenHU?y@39O$ zG81ECCdNhx20W=0(9Z$8fg2Y6fkl@lq|)rW1trih3LsR)NExw}B0kPg9z+Ig2K9{- zptuqVkkTeX({#4Cb($em>iU8bx5uBD)n8&fKvAYQJrRlJ%98+BIT}B<^zx29`_ju( zN}WcBVg30@@EenK?zS{^(%Q9apB(_zNsi#88mOY+ekFHOOHxW1=muUhuaNAyU1_gZ z2M!x)m6Ms)85jl%i7(ksS$%`qYuJiNXdDIv2{v)d&qtZp;<|_ie4Bd7pvYl1RYe7t zK}&9T*mIy~Wy)EsScg);1%UcQlIpU`)B_1teLW%vvlZbm#$zPY+^rJyAy@*a8Ibh> z$O|MD{f0&=D#*#mF%}o(LKP{%pt=j|&$bKm5;GHsg&+lHy%R9DNcI7Eu?LW)r-^+dyrf?I}<&aJ@gNtZ>8B=&`Rtv!faU3KRx1qpDD}oIVq~KA4SD0 zj(?u(%{a{-xSqINdKSm0KcvAA($F8$;0Cke;5FS)6t=9q3G~*s9pr zl9If|uvF&J@8;aECbtBd82tK67T=IOGmz8|*483ktwtTUk+1+{)J?@7o|DvLJ!8wlUaw zc-Y(9xP>7D%F(lzFBW@)q^U_R2WF$&V7Y?1xPs-lf;Vvmb8!W8aRs4iX~^b5G6Ujh zNGKKa6Em@@)69=P6;%?VZpZ<$9;Sdyq7?kZ1>Hn=B)UPe0a-!R(6Ycu@n5}k<1Te; zPMCQn$F~4n%I%CHOotxJ`f>zcSHm1#=c0}I?z>axQ(EjaKD`1IgDSI$=Ob8j|vwddWt@`XWf*sxJXI1@Wt<~?A>%2li1dV_!_;jD(0@k*PKVeGKf z)b#SpuT6z-V>|3k@`oHj&cm;O@cfKVK5|_7!YC`I?ANQeJAiFfjjr<`=90w28_yCE zsLa6=EXjW4Ks&-dcLg$W<;rFdoS6DX^(s8Nb06J&L;C4MtE!MFgH(vpQiS5+vIelc z9J&B4BA6RQRTdzdf>OAqrAUeBZWH`OHGn5<9o-=H;50$NTGWP!;)~aZdU_(Piqr^% zxR8wssD(T57I1f9Rl!syhVJ3`uy404dlsqzvWg<(FQcjmclP&V0yHU7d*pEkt zMj%<>Df7RqKfhE`-5jecOz`n&EuI25$WrrgI3gyQ7a+#;EX>3e@{e|$xPFT=+i>i? zh?4Hj;fKOeEqibEYQg43nU$l10(r4$_||s4XsJHb-aE+{)_T+oUuz!A;qgY4Zs90L82IY2q#UhW{UaCHwrv?w4jFeuQ^ z$H(2>6O11J9n~0& z2ed;v>&a;mVNuM;3W3hL#Wty@kMweK)3^tZocL}esH?8xkuMu(>)EgOW7Dt>(I`I$ zNBCCu4EF-}972cR0NnK{XN#!!o5BPUbF&`b1M-GnKGRPpYX^K!Dn~L76lC!^SSFAmX4xBV4~gVu4_s zGFq$I9f2~Eag)h-uucKSXs}1XTPkmdxk5@Z2gIBXByVQJ0AR%(9g#}n;R2{8v=%~Q z!t8c+cZ4IEWnAxE5RV`0#+9D0fXer zbu#EXSl)GZBA8ta>l30xBG&^w%Wc^@5x&p9$U<-fu5TGUpu29|ijRcFPF~XY)!TlF zyLR!)jpSS3eL58~HXC1h#5@6+osYngIS6ye5^xr5`gr3+xH0=6L1`d9dd|FD{%vMK z`6ZC)1By6`U$^d^shZqf2sY=U>P-Xr?;ESSyhe`j;@9!K_sEf+UDZuM%yu`RCQi;# zd~f$%&E&V%=_&Sn2Jz-LPRgk{Th5WSK=MxV@4o%cM53U=GM9ig1v&1LH(!3`&F3It z2}qVcgWb-tEICYzHKB zCn~R!$VoprQ_v9EW1`3w_UhATZXcbZp}3&ZCBzLb;&p=DJ=CS5prlbJ$G`gYnuN@G z3ydtPvEbB3O4=WT1&DxxScyOw1D`x>85lJrs>o&rB!ku|{8!9=KspqB@q|n+f|CJN z674W9&2V@@W-+(J7YbJ94$d=gcqL ze0DJ@*LCKC+0TvPYtxY<^QxoS$*Cpp3JOu4{q<~0jnYX1sF8P8)^A@!ioUohYZytW z(~1C;rye{#Zs5n4Pie?3lWW5I~Md04rG~moNH1 znM9TV3^M7SAWP`cL346aQoykZ0ByGp78BTSFt5Ow!Xsdaq&%|uIGr2_X_CebRCc)J zhbfpfDEHoq^egeMF+VO9zD;W|O@{LE3?e1ZLj!#S^UX`v}VofmnM3y2kp=! zk7GceW_JPEdY1baw-WbPxsj{_EW8K!#lEd`piLeD%s3WHTPeUSzhSL92UX@4?zgey z9?U5Fu#Lq`a+F*0ayVzl*udaMwvRRu6ZScgGOXm4xp>@_iwl%mpIY??@pC%;(Q-xyi1m9=iJ2`?GoE|?QIP$NRt{f!->s<;#LV}#qB2oxw zdL{Q*0fArS20{!0wOAG$bzTLgKcNObN|-HC1ly8FXlvt0daxcU>g`X6xhui@%n z!_|*WONTL?4Ye&R6SY>L-+@%Ss4$*1^u#-NFnn$&#NUcfjlTu9QILuzB_$`J3JbWD zw6-!CCT!6k2fdVf711RD>#_m83g#jSC%{O<5E0}Ax`QJ{A8y)jI>X;Q@#QpAlj)wR z3VN&!L?OZSqv@0>+jQ6556btb(ruC+1)5j)* zX_;x6S%Xi)O;b%Dnl6~)Ot(xKrn9Dtcy$SnQd21sqDKg7a@ICCpFgP8Ld9H*!MB!c zs@L)LDd)p6P@m;i3;-jbY0bL#-+%vQI2wLtKih0gBHg+xCfZ;yY~QL62Qo5@fAQ^c zkFyexm^1=*+lh6%7dMWZ#0>#hGM4R-?Q8S?mO;B?jX09`m%O=43 zpt}w(lutN6jgEI6IYHnGn?PLNrqki%ekl}eeC=_v3gx@+;sT!V)sL*Igu_S-c$8OF zS3!#`E-uK+ZvstnO&yGj`ufHOfFY}!np+xC*chm%d$rXVpULRWPCzKcX%6UB19rFM6YZe(MUm2~)#Ee6}GO|v3|8_Ms`PQw3q-$p{ z-H6-2@0aZC-VI|P+P?q(jKH{p_g}hr?Zj{L$qOT4OYFsJE$~a{fopp+Y_M%m0KKo76# z>ERjZ;ThD!nFaYUFkx_JW&?8%f^`Z86~MH}g1wNRpPPdjypSesh^SzNnUSApRAGzzqR z8n77v#VR}c>!oiufzf5Q^z(!HH(>@C1#b92L8P)(UCwK2sHtF`U7TPqQ*_l-oR5x) zzTDDh$IYNLMbJvVetgT7j#r5v;autueyh&LfnePAaqWbPbCD0w;z= ztHSKFzS#Cf0^7RP&>Oivuo_`SmQ$sN(h?&@lL|n4RR+Dhu&|^Q3r#+)EwIZ4D=canHc`at6}g2tb@eo3fwQng5rf2UYH*i;=~(zkoU4D zhd97_t@kXW>_?rO@-yK?Z*;n>VfnaswvDDL3CJ^LfN1b&mc%LEn73DfZ@-G6y#= zMT5ybz|U7*k@5YGZ`@qWm1S3VZTsw-qd#w1|Ix=gPF^3?I~f&ipa)jv3B7*W2dvJ& z&y>ptj2JzlpTCGF@^l_N`&HnHB0X5r%`4Ya^2=KVwj6(YY2e+sJJ(-^wfsHC*B*ZBC27d@e4l)}(X1v-SMxy`OyY$+b$` zz9UD6Jv!W9jp=+PJ@mtr=6m(^o#rl?ZTPf#^T1~kJ}N9M{PCw3y)tvnJ1;I?w&=NO z)22W1;)_d{Eq!U(!pDqdFRx+YXmXE!At7M{P+9`jJUrd8;06YU3u+Y6GByc1+lm>9d9}STOa8 zr{_+aFmck%d2{E^m@(t&dEqV|v*v^6SKvJQ3B4>o73A**M3Z$#OJf_NEMAlStS}rq z+B!QpPG#@z72qVW8vON0V+^G6D@fzFkj5BDV+^EmJO#aJGRF;Z9YhO=85lkyayi01 z3SW1T5mObkVfq$AcKOAW@QjcpD7E25lH}w8%>fEDa*vv0LSa*(ss<0h4V_At85rR1 zZV`L~{SKZ}7<(v|B)O;wjp-!3W0JF}bphR_wJka6%Z zzuj!^e|qzal64ze)^ZR)~4EDCu0GA4)hQ&!J+vCyp@lc zSMd9wEh~_|aVNX1mD3NGc9rEO-X1-AjD6KbOfwrUmN|{Re7V}U|C8^(#+q9k2ZoOu z(7*qHu!#N+wUuqpuZZmDReAZcUNd9roM)eV=9#Cag-;lP{v0uK{HQTl8q#3!m9Z`U z&&&@#fBYaHKH%P|V~4IV=b_W*&Y3-T%AhGjL;4RGG-}rCK;nK0G0tT9?0bG0Bok~z zia~=0jT{#2Z>P7i4-6hMd?03!icsVcie4YKLA@-v{Myv169QUqe*xw)t-Z{~M$%e= zRimi9nNwQH89tS7xzXZ3Vd_*Qq~mj1Qp7D_C(u4Y#aCWI-b|o45a7wn#!?L@Abkrl z=EA$gCIGf4Ob+lFNlJ@YtN_YFa{O~Kp%Nh0&}V=VVLpSqxSNpNBBThqDq;d~7hze4 z2@XO_n2&^pkRUZ3&IDSCaB~u~$cTg(J|gmbs?{o_pxUafh~f)&6Mfa5>fMtw$q8)V z@FGc^9TC?0Kc0oSu5E;zr$f$fK+ZQp&Nq^rBeft0m=@eDFW@>S2A0D7yKqp5IG3W5 z!ouuqU^C%dNApk%p+)gbzS3NqTr?l)^>A@yA}${zhlYa)FH-nnW80EV4Y+p)aebn& zOf<2$Nbv+tY#V~H0Qzo8N|P9Zk9`0APZyKhJv3D}29FyhjX!(rr|C21h5Ne?sJ*#i zPj-7noyP=EZ}J_!t8gfXRe$%^7BG!--zG-DuZmEM^t0=k#)?RcjtTIh&gHZ*VdmEr z#}e!sPknWGZ#V*QGqotYr)+DeZ$ZvPG74v@P3O5Nc2_kWzAK5jp{?)INj;{{adsVy zF_XIv+l@NC0xCMNxe|m`^0+9|d2^7}2{1jj)U`F;+NRY`#`^T6c?`P_AINsg zF}Rod>StocCaB@Z$ZyyI6?!u~AKMY7iN+@+)?45k#a0efRJGfqFA`|<0H*VkY^VH# z#S`s}3UfGS_=AoPKHgql?oK`$wZ_NY$J5Hu!#fJulh;Le%EzkLCV883DkZ^TFNMQJ zdr2F{_G3nb87y}Q40Om$DAAEFsw(|nuhW0~`GWbc5J(7j{<+{G(fF56R&poPp^vu? zHkQBzKS$vvt@?wf7JR;ID|Y$5R6o8a#f=bbVy+vU|Db@&%P%a3t3^Zw<>lt*8;wZS zMtA^oAx(#vKJiQr*boEmaoez0=m&5cF@7cH?)_#lfSE47yLIZ2 zuw}EMUtR7QnUuu-4PaEC_KeK+(6DIC(#V9-+cPqkL}BWf zTN;fuTPG{L`N#0-^}INIY(H4I;PbEljp6e{IYY{J&sa<>)xpVV8H#&9U+@fQy2b0~ zp6{g}4@JUQX0(hTQRR4W1VI6SdO^@ADF0 zq!tX~9zob+2zrC~go7A|2hW_6Q!>(15}_-jWHv}D)8Q&ULA-rjheTlAIyC-<35bUd zMeunx)Y#eFzHJfEQiIKJDWXdgckBoVfFkDr%&Nd`SSYaVI&Rgf4O0PdC`1c?iTT70 z9ydM4Uny3=A322=0gCqVadGq!JlPxQfeDk1))BUB)dr_~H5HXLbx{3Kdj~|9Koqgt z@Jb7uyc-nQI%KeJaPskX_we?EV&?7T3EoQpi*UUUf5$T06MLl=*Ni=XXRqv^pO0v9 zAq;BrUI18v036T=fBh)}a>V{F(SZP-fvwmi@RC1$z$AvrFl)C#abE(1HyS>nMdnwy zZ;#AEhW-}_1P_L7Ou75hkYI!20Z6;q4hO?hn7!+vwmffsmD_)GGXDD|yx2qWU1AEJ zVSb<23Gfvq3+w>34m*H9177nZ**<}d)Ukc|Gl;JZuV9YckNE9vAPn^I5#_O5T=FK^ zr&~bf+X-_ijTun@f+p~lK$-04WQhMh_}x!`|Lz`qw+N)#zp}5mm9RqJ6+jyaW>Pjt z$A%c#Ac4P3A6&iv0lhZ#FDyi`HXo2Wa;KG)A^{drUJ&~H)u#ydf%gN_#d!P~@S5-b z(+8~E(BHuUu~_2q11}U9@WB{`ATHv4fAuNCet$Z`UmemS6Yh|S@qSV^)K9|PF%H}r zZ*2v$fD=)WaQ|6R*#ZI?WM&nd6;z0;BiMxhGKl&DS%(#Wb5nFA!gLD>CdO_0kW^aW zO(cS!Exxft5NuZT6&+#gz__t=MKJ2D;vP(+;4`c8Vx#+rDGL|QTlykcSqxAu1?k#6 zBU#1vg^k#)twpQT>3%7^v3c_iy%kjspwM`ES;197g`f!Hie{AGvZ8tb6u)+JwGV#q zAC_yR*+B`pS3&M&xJJsI7T86>z>y%zF*HBmQ-S#j2Fso<*fR)w24PPZ z?CF9%ZEoBwC`e1o&ySB!Pe&an2{02~8^aM(`HIVqBR|;$?$+%{({aWL9$@bpamOkT3b+;wW_eNsY&!> z0f5R&H`?^>WpD2ZsA*7;hX)%eSU&w@RKJdMe29}aC)E<6#j+xL0rGRxQ`1sYfFR3& zZbxuI(sQY<0cePLo{=`|iC#CG+FM#W%>Yo7bq@+}V0S@UDv|4n_sI*BUoaxJAsEMl zFv34jZasATT->c&CqJA9>9pn;>pr#haFiHZ5KwA17@0j#w7+#6t}4F->W5a-95 zF`VgsNc!#zudI0c33dwJ-KmHwEkX!)G0+?u_<*fBXM8aQ}TCOuTZVEICSlM zA}g&?=A(kUNMxm*^7iT(zT1Ax0Z5?Kd+~kO;;W;bGq!@2wzS$3fWeF=wTuJIo-AunWdrCfp#Zo(#M#Rddd2!C zn$r#%4=+zIaI#?H24qNtqXSL^sKq}$q_~dFhJ;d$gZm7UA}eirW;R>|WEy9KhE&Y` zfG&=4Tma1iDZ^=*xv9yiSwtVJb?A^aU>pG+ib@>Ntzg>+%Zi9i+&;dUUfsrYi24ecDY2ZxKCbgeenf z^)Z0VfYw3%@!=EaQ8FOu)QLS|=C=h&&CL^gqA+7D67TPD+fTsCHMI%+;p^2Hj+;DR ziaJpe_Q=TL!-qaH?$L4M!@?s2W$EWH$2AOma(N`%&5-VciTfo$HO^o%KQ37rh2maX z^rQK6ZkX2Q-S<|%v0}-h7ZxpAy5g<(1m5?F6;mJ!mso*#zl7yr1r+H6Sa6SXPuUtm zD^Z*Y3Md8rz)35nj6f45QxMuq6$n$xORDcxmz4q#taXrg+F}+Ixfc=AK`+Y%YoKjq z=zO(G0a`{N4fyC#(C^qmi=@z=NV`L?s15>Xoha1j<>{vJ^6_$Y1}n-xJ*L>_|1+Kj zZa)!E1GU?NrvWn)@L&()X%yrRl2LG{2Tnsa^nbR~*Mf;^d>6qk`%;52YC z{25OJbgzh~0VKeJr*Q)D+Mn?>e|;d!to%O4s79gL`-_LuR98H^zLJhO*=rf5{QKN zO*iefLkACjcl5%Y{Q3?Yz-3U>q>14^4JG>l1(bfgZ}0Apqwr0tHU0#GdY_*L+Sq%; z2K~n;jZ&YW;Qo(J3-jaowQHBmo&}Xv~!Pb6#4zmgfT^pBUda z$WKvp{3G24z0=Ho-KBsM>gp1!@9{bsCH)wwUr(d_o?sUlRE0EpiA-lx6F&(noi_oXIiFhw8 zDvP2>cSDecwf(?0H`J9-7+w0@Hy^uD(oA zl_h;cQ#Igy{&39uu-;9S={$?0RN52(2Jm%xDhxD{N=!BQj!|%V2#F|3}dj`d$?%G;`k>= zv-D;Jdh;dp=F{lS2=rzIdNY9P>6zgO==@8P)mVFUBJ_hLQ2s^umKk9@110i4W_{iY1wfrU z5-+4A>_I2K2n$Ld!BV)XtjuXPtrUpS%%AAvu>G21NLa*@_oJx6ao@{s>B9l&I*KtA z1rOF)xVyHo4cwY_lc2qCKonpTR&=T!r?p-&d$g(o>}D+?Q|5mV#tvzrLg4(j6($tw zG@MM$cb8@qzA!GtomU$V!mqXSus(q5?;v^)YXS-#1@_Th@btn1jA2cTo$MM8K}laa zL&FOM7Ov9E8%8;(-|#g z30Ya;S+8lAx%Ba~cXqV3S7QEBb$9jd#R9P#y!JK~@foP=R$5rWP*ujH*R(K&okFUH zv`e~M+FBYLt*x1?=eu0W)$F@>lUjI&q^W|Q2&a!9gUuynK4_c!_LmCuSGd76l_+zt zfF_kDmi!{m2ghbCL+cETymp+@xUOHHHjUK^lJmodpWi4McksYPJI3KUa(vz7&ru@%`fyx ziI2OLoL*)uOpiVG-VZ+vduIOhsL{iF4GDYdtyBJIuEbrw_`|VNzg@cp7l)vWM8wYr zp`Cw8zBzjI>qyL})6L$dE)2dF1TPOtSB&uWolXR}H^ANz0WZ}}wn_SgukYA5BCw`r zvlasyE0Xia^!f$UE8lub$nJ+`CuP z3XVLjEG`ZtXK@BOr%|rGn?dX(9%{-P{K?NVN)LNyoW{x-VyNXy={KQ;b3xY@MF`bJvEkZymE#~JK!w|ghJY6Y%zmM2xMX%XU>F# z3@-ZYv(J7hZZYnTMnPs+->Vw^fM@3DUg3>X)Lp-1k&6_uM(u_Gn1_wIX+F2>B z{cWrr9lhDx0%B`zjV7jX(DBXnT>?s-*Ye?bS*EKr>w`gS=>lM;J!0LYe<2Zw z-ar?_`iy`a-oQS<#P{D0BF^`6^aJ^-<>&_)`ay<%5aB8Yi;Fr8?Twf9^mItD^qnpC zal$^f*vA(8*kT_UWi|n_M6Ny_>+!$LtN2PPUegKX)0LL4N;|1V&0XUvBycy6tDR5s{m{SN0V^$FbM|hZT8!S(oJP{C_i2Gw) zQv;<`zl{=Or^1_o>XdEX_Q+_qUf@r$CpYjvu%|bK0dyuA7QwFJev#97ghPcq#&HVJ zlymK&5qS3<+r>9DvoQLRQs%uud~$||0BRmZh4b@E^MM8|%cGzM5R%*m@d+lQBUCpE z*4o)Q+X3L`YD>Y3KfWQh*)Uw=I9%gsT;niY<1k#KClDND|A97u(z_s+cpygvjylK# zMtQUh0U*$y=@dw|#7-b>kbgXO!S(YEb>Yyv<~Uxf96oMRsNI+AtCG&1Jo)p6&7k#vO026!M%1$inMG=II1*j6-a5mNaHwABP$ z_5`$504ZrSU*Sds>{*uVH+%?_nxfEIu$8q)9YT9yNMlfgHd%`E=VO?ReHOkn##JGDkQ?T)fJ><8s8+QpbhFZYoGS* zmNpNY5H*)_@CC_RE0MO~r=0r4yt(sytq@+cQd?OmZ3$>2_kKdDT>SjdAwvfa(g%<_ z4)6-y5Ira$W;QZEi-0wSdWttNHnr#xv^8l&oItslkPL}bfc;NX7>^>lR9^zEjT_yC=_*S3{!b19C?5|UI590Yb zkG=8w8;hRR%ZH7Zws8Yrp3DrOG5-`G>lhdptKmP`;5=)@*fC?nr_X-{3(v4VA&(9k zFnARBFC32Z{Pj+%a3{BNC@x29pAyej-33SQ)pOUcUr(>q2h+L%iXC9;VE*F|KBuB0 zj0sfMEX4~1LcuF>6$Ls3TLe5IeHgN$G@=qK0`E+!lY_~r8$JSkFfAV_J#C>$hCS~D z5Eaq@G!7_Cgcp?6<-~&j(Xqt7a>J+@>?dUd{Upo{jqWB!b!ZWX!aypURnLkK!OA_S z1umXO3Kl%3)sz(^T*#8xnLBxDr&&HQ_{$v<7!s_Dm=IShv|8bASetXNRk?!yKT1vXW~wvcp(X6p9!{TE(yXh7Dtlc)NCO z3_sd;8LCxDjvQfjq%pl0j@gY}q%!vhP02zP#m!`~JS~{qv>>*ZtoW&%6ADc;0q^w?u2XYOH7{ z=v109Ah9vi;VfrPtN(vS&`%2n0g5V~Z0RYEo?HcsPV(1#+@!CF3QQoqilwKZ|GGyV zeMQuEg6Y+#^wj=e_c%pgxlB(p>B)hftb6wO+m^`vS!{`V1liFNerSmfOB2-nzInJM z9@5X)FZ3eV;3dvbfO%=K>_Z;;g^{r3-u`%jWOJmwnc;bWy{7IpY~L53e!TDW&D%NCLEP{t!itZ> z(Xs|w_A0XhIbcTEQJ&^!%xhKA8+RT!c0S9d5A;B!uN@KOM>ZmW`W3zS3NJoBBJfA9 zR_b`0N9}Z6)m1%r=|}|9Pk`-{Eqiy>qzQvS0s?i+9X4VYCxdBt9RZRJ+)i#AlCoR@ zTu1?sXgAw}l5m5H&ag0UnCLQ_h&h#VrRfeES)li*`1bqzFotU4fjZva-BHH{KC2HZ zET9U~NNg@MBBiRXuCAu0x}~+XxxTixrmDQOqO7=(^j5h5cawPg;{_NGcs<9#GsR2w zK@RrdP@#fut}cH5{(gRb-rioldiM_Y_k}aj&Dq`6$&rot2L}?zFX`rT^dt4XoN;)- z?}Y}r8_OYqwOYbd7hFwJDH6s|4-k1mG9&_uxl{>>aF|gRn8(Z6}E`POQBC-VrAnNLilA+H*`~5nprnNB~QnJeS)7@Xi zY})tp@nhe9y5XxYU~0S~JwS6MC?5o6`t<4POu~gMASu9Ix+sLDqWBsW5qMv5Aj{m% zf(Cl%qzWZ#2iZhLTK@VlwwYfu|Mf7IC$&>~j6+t(21H}sfZbn=w7u~tv2&RnlDzS# zr|T^Enrt_NV%{)p^=B>GkU@DU8ii-Nq*?%jP%=l4ZH zsagmjf80Dx8l7PZToZ|u%j3B3mVGdDWD8iirJ!tAD0GOrN!Jggxa>XXL~yoN*CA7} zv9<=qG)hbFL8x#;J<-Zagm3XyM9?G%uH@li-JR|2NZvw1jzrAMl1V9553o*AW7Hv! z*}Q%r1!#SsD#7*YPW!`YFJ*?QF|n)D`LN*RKZ+ zq>r#MAUh+^n9}a!h13UG@tu>m;56J2!P#Qup5~@ZiC{+&Be&H`#$3BLMjs9r>t4yA zK^+!ykTGqUfn@HOv0J2VUlI#cIRRbsuCe1IFmO$p{m37F)NaXreEpp_K)_!Q|~ zLDt>WP|@Dq)`;Aps-~I-NqMCKM*!P(Mk91S&|IFqpqPvvJ!FVq zXxQWtBOe(&WW=c8kipjWlcPXEL*EAD&+koUks@M?nu|80Doww$MZsBCSqVNsr26H+ zxm{LSQ4N!_)R=d-vMLq!!>t@tD=LK+^$Sb@Fu{|>(A6mw6ug;oqs_cPG@LR)0?Img zMx3QoA38KN1b+0Uio%50*w~p*E*Q}*Pu}z8_D4s5|6|w-r>_(T4e$TaWfw`KoK@ne zrjFV&+)w~OP;?D_4!2=lf~2AXYPMv_n{O@@xHne`Y?WRfcjnxs+JR9~P|puzf%%4g zFV9N6+2Y@44!J1*&A#GGZULY8ks>p8e*ZxK$34REjhBZ_9TNx_hgNm;Trr(};b|m^ z#Q*RSZa>7sNwy4J$?S7M;{EyOo&x(Eo^i~7@90BeIcDdSlvaZw2t8A1gxU_i7|?hE zV$_Jb;a%00WoR)9+-OxG?IM#=D(fPScFY)U9O_rKx3f+`0(v_*#;UrYAnQZH2Ne+D z=)@`AHO^KNnJuU(HI5z}3khwqnh73_kQ|r3j&9a+sPZ_ft5_1r#REr8NJWBeTx!Cf zj+!g5+#bKkV)XA~^sfQ^yBK3)F~-DTq#`#pR)YZ!Hb@<+Bs4d-ApgT?gxBD1aYgR6i=WZm(k*$v!@PBl+tJv9t$B!RBz(s9t zE`sv1Y}uRBoxIC_{qe_P!}jgFc4FdFvu0enI(PM*?DjEJBll$Yb60`65B&nm{%)UW~#LfdZtUT!@#q*H^iPPkko>8U4rE8F2t& zAj)Pfofj#sA_UoCMO)FDX_4WtjtK{N{UhnZl~0Vq0NjEfKYF^5uCMqUNy7Ie3E#&X zq#yD8N9=FqVKD61bR@r4RKbk_(-nD9cR`<7Pyp|8Z2^qgqT=+lD&)yFmE`B-K&=xk z?=BftPr#@lxsZ@tNK7C*0BVgrC>#}{a18EfH`&;Bb=NdWOs3}Qdx(D2b$3eb?L9oK zoyQKTs&sV9YaWXg$>ECGskW)L5h^!?#KVPtLgPw8l1@UBo?uYk?CQGt@J~D-DTF*A zHuMLP2jg?W6(q*^a>>C|1Q!Al!|sX!K;*)}c+bj3P7%B+g$3Dn!EcKfNFM`{Xi7@P zUATZ@Ht1Y|3m|m}@Z)Z(U8cYi#NlFqqv)dI9bE?dwszcL)iT)5lvspG1HMzA#J1`? zXU?6wa{af8yFXRWU-Z&otK#CzKX3c;>mM&&x`(=I?_rW&P3o-1a3`n3&Ps7N<;N_i zp`#-l@6`SHi^x(vN4BvhI2b94I#(;t0MK-~y56`E*E;N}HLF)+8E@>ew$l4n+>Zb4 z@HfoVkbZ7EN;0hfKEKI1JS@zr0%?324XVw$OjJj%c>T3km(LyM+dr)1+Qn1J_5J<( zJ{<{cDAirs;IVw#m^sT|d-J*3&rh{2Dk{6z?LBFzeM^0%$yWIsy1 zZ`@!n2s#@_FZ-@4Nc+fQA2kSY% zz*t;b0D31RuoQtzrlzK%82)SEf_1I{6`(&5UIVHJHTT-F2%EdnXLh68*i(UI za7su~nY4=|w~>H{emsgH14n@HB`9Sy8erqCQC`$ZjBO(A1iH1W#u2Uv2b9-#adNhY zvkEyYj$oki_I7uI;~R+M|NmdNtY9+8?SERJC&|vbO;5w=Ni;9-S5N+1TW`i?(bn@t z5E!;zE+&R}GV=0RHf{2eSsZQo&HL4H9<-aaTp?6rc)GQGE?lTMfEy0*Bfa0nKBG-B ztH$O*d;g*RVd&@3*AOGa224a+CeYlkz z#8%`7Kz&0SQzB(7ou-VH(xwnd{fEz1hAWjZ4iCgjaGf~COnMqXPoi9m5G5D8L&tcyeUkxuLYK0iw9KD& zxCwF(Z0g_>z$S=|$L2w~czEw(WU%6EO@K#Y6T_y{rUN zM5*=^TZaGHa>_=w+p@d-{+Uxxsr@WAtkRETN&vIShP9=O$JPI2xq7UkxOVuoG;|kHg-t7*QrIX#l+d%k4X&ReQfkNxMWmGYbuE8Ijmrj0 z6L^L&BwrV0V3H7{l+CZ-A3lEkw^L_+-Wg)ph7`~>YS;LTj5~MEUi|HH8nEq3)8v&XFrRJE37BNG-3gZz(TtuCJF19E!B5I#6v$qd~>p4nwe& zZDv|SQxp6eFw9LI-R+Hy<*1_F-PGFAj^bi1jm=#gY)pBRwRIDdQgg}!h6e==3y{;n zsJZJ(%G%n>OY1v&<}R|0h#8tN(qtPw4TC~@h;Aeo#3Py#@TBY;i72txFbU_!C+soTQ#AK%e z@v7R{9#o5+2H$##H3qemZ?A^mrlz6jZdz7Zw~dXJ+FEk2=x$*F(T{$se9ED`w6?Am zMpkbqlU*j6EfxRcc)5*@qPw}dS>Yr%$)TjcuCjyGFU8ONX|4L-d-49-2cK`5aRypu z`4TpcW}ITnyKd%!cYi4y!#c^$O^E~3@twrvTUV|$G*%n2k_zsK=f=i%Qigt8Ctgq~&UU;2 z?3NO+ySlogZS$KQ`~yPv?D+SdZ-34+Rz-qQ;(4T+y(2YzwD-vI37|Lag^ZRlz?i-x zIhu0uTS{kKWPX`DfODTM2z+^Jt{~6Z4t|M)Ie2jhFTTzZ_}mnuj>Sd-y7~(4YuROk zY=T>HVtQO8CfJ|h@BCc4=Goyx{O7=vCgl|1Ji7%4=7;cYtm2;Jp68zC#v&e=jIVwU zU1lvRF@41ZZZXsm5B#Y8h`9o{1PM{VGP4-&ttEH|3Q^0ux0eX~+~IaQnJjef+)%l} z7s)6H!NS1_YyzxTklavl*YYx=&O)+mw;)SS$~6*sAR>Ph8Ky*ar3^2K8&n}f*Rj3P58}I(sPR{8ZptBRo2#Oslj8%u?yc5{O`mK0=A}KKl$xO z!v;I2_O$CiMZu*#A6ZK?%r982-+ii7VHIOo|25koNUn_;b1m9Of?AU>mUyY!-hS}H zl`B_1;lV0@2H^K_E;x}o43elv`wtIi``A9<;=%wnUN&Xy*s+t~xDepU&Ow&p4XN6( z*JDd3p;vQ}o@56)sxGXuTO{v4J$_gusH)lsc!%ZnXF=jpS?QveOHmII@o$~1@UX$K zQ5ON;6Wpb{^7&pbO%%bco8gAtKz+~8_mS%R!a4-O5zug@2)gH{r{xycb#ym^I+DCq zB_);js!Gb6JIw9Ch*ZNng=o9ppF#;7kK$@5=SOZJ5Z1MEyh(06#~C)NJRZp$sLqL4O8_CsDXT>EybNLU z%6rDb>{8;m<*x$}X_pd@VV{&(3ChG|uq>qrd~ED>gZIp)A%`xFm^j(jzFT63X3%Xf zFTHu=^ni(%p4tuFKpV!{>A6L(5)6EcymxOqf8hNhVB&uPZ|=*B^!T@dKcgBH#$d{<0uZ;Ot)6unyj3$I^)?MnxZy^XuKyQiyC9_Zx&fT5Q4(%ZRMOQjfnGCZIb*tzIgXwTY48YzvY zSJ9Re(Uud@mcb2G#d#H_Id`&93G&uGV=2I}C3zL7t%gkO($ebsisHNFn7S=(>KYr% zQj?NW*+XrckkHe%e6Kh;Fo-@|LPB=$ZZ0nGPl3SK`}jyVMQSPW9&OT ze(q6Le-z*E2KMm`KDo#(8b=<%pSS=Fp-i;YS23DcvpJSM)Im4X#10C+c_S$;wVNvSs4VwWt}k^%y~BKq259~g?cMkY12#`$zERF z=7e}oChfGbQ={Oblbwx+x0{EnLKf(0Z|SudO^1z52YaaNl=foRiSytb^xSLcxwp`B z_dWF9GHDmk(Cw|=sCkYt3U5s-7!>g{UA&j0NvF(B?)Gp`%Yb#HRhW`6!fKODg344{ z3I(XSwA3WnH#f89*x1_U=GxfU=G(EcxApS!;)3Hx_wAy|WR-cHR^G|+#lOaW83_m5 zi&*Ivb4|73l%vZzds24K)_prO&!I-iW^~e7_Oqg)t=4M%g2&&Cgg%*p`7Vo7 zw-Unc78h&|V_DL7=RGxF5e~5Nr`(7z5fO#nyoz7!<&Fu-&b^nQ>Q#Mw7j9>odkGQ{ zdF~P<4!d?a2;8pg0=ur)hL|~w4-+c(AoHoCO(L6*Yb-@Ya!WjG z1$sUjJ--z_f1fpOcxGnW%?qb5+)7Hgaq~`MR%U7nskrDqRr-UIn~Nzhug7OScBHz10!7_v$yl~)M#AD`Cne)=4NfBakg$UVFtGg>hI9p+9wpA z2d!0I9jfDYBD;y}{6n@n+ZqhYPOOQrbN(z~!;1=y9d;hpjm1^f#**^FJSa8AMWt2M z<<%4fczkMkWf1}pg%{7Hl$12stBk*0mhguAqsd3k?-pQ<}$V|8#3stJ2G3E04 ziGk)7U;ifE@<^vEIL}zM@dP<)q$1 zFc7!vcaqW*E?v3=ySTmy2K-0Lb7$fB|LwOc=k(6G^)~L#j;6dD$^pyYT>V&(hO;03 z&blzB?K`utpRKzckE|NCtD8#U>LP7vZR6}ca<1&$zV*b7+XZL4pG?mKnz z4O!QJ*|7?A51jT&|UqW-wd2CTvP*MGtnnK=REUY(u5 z8ES?Gv?IeuHZ*Ej^PxT4(=<{~&s3PqYt3&uoa`*_bPAfbbjkA(UUqg)P6HMb6R4hG z5&f+Dy&B0buxp4nZInIs*wE3VMuvlHG)cC!W7jwE7zrA?;K7g1T7=sWS;qeT`v(sm z%%>l)6t;gy=b~1%=Vycb2GYTI)KugX_(L)HnF#22xNDVe{%&rbMx&>jetdm7^011F z${SMCk;0vxns7TAr>7^nr@5`I19eo8d8k)WW_UqTTUv+iAQIU~RBl^pvUg3{NL_|Ikq^PZT7&-=t4nu|nS=*y_nw@o5 z*Mx!IO^u@hSMaZ~N6{9I^=2y-g{~+ZA>O*X%+W5bbuFE0m=<6$R)MvX%EBW{h0`nB z5zhktr=`6|XOuyDDH`ywkNOz-dbi*y%;KX)O(lU%7J77}zqZ~diVkLY-^t+RkF#fK z6KDCbo!pM%>U@9S2Ytp`z6(EtZ-+qJh}I$jQOlmE_fGH$VbhKf-GAG=*kbj1L*1vn zU({Nxe{bju*jt5m`eX0^w4%EKyo2$43G?lH^dgf{`UiG2feSoZhx`u9-hbUv+o@%8 z#FpAjFB|CzNI8qY;P1ToIoqjhsy*Pj(HNju*Lxr=m4G; zU%M*`$o|n@gtC123<^a5HGE~%WgHB606VaE37y!3{`LFq^>934C$&!kJ&AFqQhF6B zSmFuyW#d7QJjupRI&higBvY}U_%mYk!VCNUajyS)tyFmyCwch(wf4P#twEM+<^JPZ zAO3Aei(HyTxr7{wQrTsZ%Fh2mDg!N^op8~!gPduAq3O(ju=sXr9`fx_D^olD5n)nV z_O7G7b(THbEPJZ{W6y`r*@PnM_#M%8MPp|uy{m*ag^iL;#D;uAlutrjEff>`-Fq)F zwPR#-_QKPOc*ilAj$3oqBRDi{|Rn8dfS6uvmM-Y zxO&ud=0g+!h+}&s6LQ*f1ryK)Q*i}z(FRe>QZL27s5CRJ7$NyPDK}G7^X`_WXWmIo z%Q6;L6oU3UF+Uf@1~oZlWjQRXAU5_^A^2n43b81cmw^U3Kkmwvxcp{=qQ8%Jb7!@w zHO#!o`KSGd57k#1lgA%8w)eYn#>VE3V`r`vgqas>*tLr|=o>2GW6KXavlyLT^~?Pj|r>8Zb_gs}t4R}p#$4IMM2FcPZWK0pAEDD%96 zLk7&88U9!p+mEZ(333r-GXJ2Um^#T*F87!;Rp5>-5fs0it5>P&&;4TXudWE`3la^r zGmo5M7`}bf8dRf^2WqTkPU_AgN9H3Ss?sh)2DT#DsuB(Zt6=bxv{?*BIP~K!OPlF!a1pnDsCF>I?6Me>m~A>BECJ8)d}*1Awv?_56=pUV4sN#7EJQN zLFVid__G&Kj}dq=e)EO1ma!j!z91~xJoH5b`XUT-m2XaQbxBfg5y11=V9dX9?RsKz zW_B8IoT+Iyf4e}OK<&Z7?Sd<+PUolec&ly@PjgyUo)O+Ez3(Uc`#3f#QHR6BMbT0O zW`gN6M?1@F&wcUlO}nqbW`f=0?fr@1DpknL1j{0y)SzeJlbYe$RR*7wK8$U2Uj5S* zPrewnSc=SEQ&z58yzj`>8r5)eN1cGZ^&7x_FOFa{uvTv7Y;5rS|5$quz$nV?{eNal zb~l^eNl!?j_g+&dB1LRqSBe#}i(<|01Q1YcpopSeMHKKVwu=ZzlPUsILr5hB2!v1q zr0)JdXEp&W_g?S!_g_e|ZD!ti-}9dLyyraUIe42?N;aB2-lkcOW^dDnd9IfCN%hay zwYai^19?^OR*_e8^u*aZ)mVE@s;gCiqO5MHRjM5^{v{=Bw*=l%-{es`caCib^|Dkv zN?KfSsE;?5gGguyj|}nliHNqrKu4KlvQ1jHZ;O(!`NqWf5{ZBZPV*PnkxjNOoWKXy zk!QygPOpd4>n-W^aC$wQUWaqv+n#?kAmC^oi#H@7miTx|&Dy(l`?dIog*z=2VtvQ1xnF)*1NfN6$|R+EsuEG6FIOqWCemNHNBq`j!9q5j0N+M4nTf>Ta& zef^m;Cr)JB0|L6Xk8jnzOL7XPh0|PENV$Xa9%&%RjTg(r>@>n2r}AS^A__`NON#Q# za&t-3hCD4V?_fb$;labK`dOxgP}0R6;^DJiB5dWZQ!M0dRc_w3KPNvr17GxW?kQfN zss8c7CyUeIIoM=5nl9K^6vhJ%^CSCqZQiuip@vwU)>CWi6yvwbpdHy>>9^jLHfa3X z3<>6u5dXy&gE8(u2K4h1e-9fvY`QZY@A6Mfxo-&*-fk*)h4vipP?!JcOjCX`-%z2r z?m!idQ!Zpj9LI7jKY^3vB-@>nBtf4$QGD!7C9&KrQ{l-vgV}*k)M>SmJ!J#|?9Pq| zNN(M)XSjcKbc=w1l+<=9De>WflwoGkG4Ht{y>BZyqS9&L2#3*2scnwQ+(6IWO3zKE z=cM97*n=W7~#;I<%^hV7Ev0P(cV zZufFss-{e)uX4Vg2$NO4bRM-OV58a#ijTEHOEsOZ@oky-_SzxxP-PEqd z=~WBgo|hEiI(^R6cnq4Aibl@putcfB4G@~51IL}F z+8V`b$u=icZQGnvltE-_mGZvni6Pwrf07L~Z13i`a%yb7pqhGDS>m^2>#h<@LffHt z-ge8-j<`x3=A;pK4Ih60V8{wSMKQ|1X@(xkrolL8M(h7>b3b98|NZyd%PD?r_falf z%1)&S2jFlaGCuI@zdrqJ>GIFM-nJ(vH?Qz;&cVGqxBalRZI0wY#rlc7asEn|9WBtl_$>ILt~=CLJ0-f zyuBMQUZ_5IrcSxwa73Osbd&tkQlaPRLl)*I&F;?=;ys-$l<{Xtp8$7-hTW zj7o}X$XV7n_1k21tDc$HZ>acLj;fw(ega1HA=Vf%-8Y$((nn|kRB>Cr5wh^krjWWmh_~5~hN^!FHriWJ&LCK*4 zAheq&$T2^VS5fa3RtIH#mI+{CZLQPRSZ6u?{U@KSJY|^!o!iBgI%>5+ED|f@XUsisy z@S{bauEdyQ2E?b-{pG}OLvqVQyWTnF z`R8Rpo#~0im~Ly~b?(bdPn5>}yq9yv^j8<}It_mdBqe5(mYUeTSCUtq!4T2DOUrIO z+O|sV*g3vK*Mx?WLSx7DZgH)cIuy zU5X20U2RkeY-Xn>wMvc-3XF`1O>UjsxpV8J_*U)WlTuS#wTVh-*SURiOafI1;$yI1 zqZ=A7Q6ur#d4rw!OJo>V5O|7c2TQ|b|@Y8?R?-BJ~Qq&`FDx$1+xN@foP*m}uiNy?p4~h4h_`{#v-|e%-v0BJ-G@s`azSI_ z2Rl}L1Q+EI)(uBe!?}v$1G^c^^y2SftcQ++n zJ2=f=v19SlFH4Wr2jY;3ZehKsoiW(LR!DEjQset;M#qkbL`hU)-PDqkQ!&z z5-FGz7atQB9~T>&km7q10B=rVO;Fdno_h4Ix95L*$Q{xR8lXj}cKNr+GNDuWnSJY5 zznHFMQ!)F=ValzJU@3zm*h-dg!lm}m=ukUmRMp<4Z#tZV5Y*>9B4K{zixNxnL~(}8 zzZsSnr%ZY7p6;P0t^2vtB%z$SKw!>w@!Ww8yH7Ow*&C{k969YB5$Ib}T6D%37HT+? zv*p0q($d=4u02AI9VlRHX%A#|jZ02Wj*AKpbR@;yH09}0QOHL?)Y&iJKWsEG$bS7F zcxqgGL8R2ReR!n#a9MVDJ!IP=GKza{LB2Jv%czzKAy$ijbYfH_6r!P#(a`uq*&p5_ zHZc~^dKgSBiH>06s0!(n7H$)Q`+Aq^7YLlg;ujp4;8!Pc!)lk=8XSUz1^6_OVsq+3 zbByo7CYcfz$_J6D2a&0+MdhbKr{^8~b=RKUzpW+g7cQJCFQPtC5vTuqDkG@O+%_ft z>1R_U-!(%B59Gmbzs}&u?VszR`hvw=3 ztYNzPd7HhgzJWp~8T7Zcjs5J|OR##9#ZiCh?B7#8U85plHj=t0i3w4ETMf0kbctkV zxe&;vzo(KsHi7Waip8IZ{9Ly9OLO(O@4kJxlF)CVuFdw}e#^jBvp~1nzpyo7JR-QXoSlHw%OQB`$Xjyk!))L8XiDU<^+qOGr`$37+ z96XR;v~w4lFpDIz(h`T2FpSib-&m@pyNwic=&%U9q0i?MSlPGkyFnkk0vK7L-tuHa z-Qe}v2B|Htq-lfIhiGtX66uzz3ek_D3S>o?^ovN53n6K%7uNhST)KnIl$JM0jd|rq zE%5F!7psj`5~(Ax?wdGm^YB*Avr05t@;G|$Hhs6AuY;k^^IVfQvu|7 z!-Z^v)SFkJ<`BQjRPOR!?s6%2IhDJd%3b#NWc44$;VfzXMN}TAzA%8CJW37goMGfi z2*0GH|K}VA0?A);jVP>9d=$zM;N`+w0b?O>bSKoo)T~8YL#7ptN0{t-@`bSDBJ>`BX%=yW>f(vI#kDseKU6{A^+wWF}LD4 zuPaC0a(C;HOWPKa<_(ddVOq{gIrdKejkdn|&mWEjhQ*v?yYqy@eyQfvu`}mu$#p-u zBpoQoGwRYZXR8KduvZA4?m>fdccKW&+ui-_GgEF!%=`9W&0~*U->H4UwCU3q?K#u( zF->`FkSqx>?PkG~{=!3d_VKT;scy^|N`zngq9;NGHIGkeif zd0T6JospGg7mk4`IJ9q-O9Z#=e<39t$UY$pyBU)8eYRpRc|2%9kR-gCN<2+fYc;;7o(o?*DvYU4|zi!(^zDhAx1uArCb8!hmiPuCPujO^T5a$t^c{_cb6uZlD2Maw(? z+L~8ma778Y>>l@>wwa&rkBn;H#dP5PLA)M|B#w4ZwfuPK;IC`Rigoz>(4i;wqcZz; zNQn;ia+)Fse6#1g)A!8o?{2x}yN^iX{^vJ6uFv33or=S|mv3bKflZq>ZCtmSgwFYj z!>6}DbwV01HX41~-t^dDa$5e0B3XkPnP<6eU~9|y>Lct`vz6CuPYr49Re!R`-1){q zx-`y#9m8q4rE_OLwI57BJ~w>x*++{$TJ-+w(?9(FsMECb<5sOhDFu<#YSye)VRvP?ta;}_Tw!M! zRX<0I+>b>^1ACm{Lm)yxVC63889HD3ZE5sb&qO4Eu+gTAerp zy@EE;Fn6Gr32G4=8AxTKz%W?bph*N8D!3aYP2>yuR*I}hB8#vpBANJY{|}ea1f{RLnU-CKZL+@iib_FufKMuR93SqR^w{7x0u%$hG1D zQZfrW!088GORvZT!K+aKj7B${+~|8Z-VLXBLtTT>=xRVgLLOo;W~UQ03pBJaeLECd z=AQ0MXtsM(UvVs4Z}YeQx+jw=x>Hb|&s*kv@a?9Jo7R+E1NYldop*mArGah%2lBLW z=38&i5d>r#Fp%OId&X<)iudIwk7L7K=+s9$@ zwJUa;)87;rTUlAYh0nh{9g@(yjh3#?RcES=-oqeLilDlB7d1sq<+lMw?yn8cKYia2 zi^^0zcvhd0$&_PRM``}Q%{COD0G6{n33lc}nr{G8J(?wT>% zx2GVmdND5ZB=K{#mAxfJ|WmSw)T``*M$%33DyldycV;6lDeVB86+&eYgwkn%Pr$c-Aad&b7teF7t3 z0Efsmlsqb`uS4dFTm}RN2SWB435BGvz61yHluyXC;z4f9|K(~Pi%R%JeNNF=) zPwS!rXeW3`cp0sSvl6}@xNdn`0Au*!9HAVBml%6XV;4Zq7(2&liYi25unRZ3*$26q zoed4s)jj`8r>bWMpEWsqjZtCYLGGHv`(Xin6mjeVn#cr2{20@`^?P^JUF#l1HHXeF zx9fzfl7DC1W~zR-!O5vOG$m#TLG@2XoACp(!^_VHlF^p8318!W(;S1L^q=W~a=WXy zZB6Gy_gSXTm_9wb&TZ79qenlLQGKrBq_5dkV+rUpYRZe_2VXNfBiN_isB6-$!FXDL zU-Vs0IvuNUzB0q`!u2e)a~ztjq-LjJLl)tgHB}Xt)EFUq^`gb5SI-P7RC zZe4V=^r-X>EJl(Uc@#gd^d0L3zfoc2=ChQRmMA~dS#IbAcr;AIs{|z*B)>+L?V`#W zVlqu{9S{@}L?4%;jFcd+~<{nvBpI!I%5CChU$qpP8V zG<48F5<6iXDucL>s$6Vy^8qD-N~FMK`r*Qa4!jsq$xV*vwum{itEL%Ne0UHjQS&Tx%pcp)oze$kR;Yxd=r)LP*Cxm5V;_n&_B&b(J&UGT$BSKD5_hqkUR z`gOq|bk;<;sKP4VVM{g5b&Z zk)mc6yN}&!Q0Bb;&oaAD*NK$2nb^&z#VOe6eQ(Rze=f++P!Fd@?fA)Ql$Ln(n${g2 zO;cVCeCp?CMzV?9ASn+gi%PR^*Vp$WeRa%M~EBy!;OUkkF<=H!64)2u+`TI?gF2M z!5Kj^4x>kk3idV&sBUQk2RD0>`iWHf`q<$x4Db)pH$fp05n;h0h$IUo0M;NjMxnBD zN(HjuCVE>;Hw-w4FcjkY|Bp@Z=qo`9UyDSKLZbU4(fyI=7?w*aRAQ4I6*fv?M9+mU z{kOe`a}OMVX2_fDd#q6S@UYQ*MH?%s>O%nq_#QOn5SuJ-=c(#S^87E>pRWMyXq?+{ z_^mkt_m8QXREO=B|#JbW#S(I)T18vt}a zC;z-ZIdtq&r*V_xwr`KqVpvnC*@(RZdm4He0`YR_#j8#qD?fRX%uR20&zRf>N@=>C zz6=$B4|jE~lzmdQ7?ue9Hdruz!a5lQ|2ILB7ICrRVNp>~>HWupbnB3!!J=kBaM zj!hP*%6~ZavYiU-LM;6XC@Uq&+gbw6KFmPqq%^rS$3PE{>ld3%rr^?FL< zzi+e2bL>r?V+%dUHh7K=`O9N%%p(uFrqP38c$|_&T<{a+Vg3lDNYBtQ7%yWtS~3e$ zn1vmfg()1J!YmAIXsoKNJb9X!+$D-vUu{C*fV+|k&E4W_0h~-!Z2)J9?CjD4ce4B{7h$qGRydUhBDFN zP|XI1H6XrY$BrG^W8?TS)q4z8ta%n$${glHoseH7#yu-;!I7dO z9e>AaNgZ7`HURXKn2C)Jcn)F3G)vk9vk*nF8F2Ts+B+NF8k$NpNzpH zBTtiB^pf!6yC3!Xu)ZR%3Lesy5OjFeC>Luu-^FUBhR%BX?frF1(xgd~5|z3GZ+Gv$ zeY=+2vr{N$T0>BW?tN~WJbBWj8~YAS@71Mq+jgyzJNNFJHfUU(*JNTzj^8(~XRl5U zQ_eQ1svYL|US(x5FrjC{s6_tV8cA$P8UMx8n z?rJtWGNV*Pf~VAMVmA-BKr%FjQqK|>0E-5m0o-g*2?Pg-xeMDs5Kw_oCHcRfC`)F> zUytE`$_BSFhJV5XsVfovM`SAG0G#1Z2jvd=vjcP?e|qTiL$2q;(Sk>kY+`Rl(i@q{ zQdV*>)@!l@Iy(O1tqo`fL zcWJ7wJa;hedTWyTbM@W-KK`k5(+S{D$eIvgXz8xvrfa^ccl=d2F70~PmcIZ9)7wmt{SN%6VjCkEYX=T%993 z8}ZmDG6H>~r{9{t|9j1NM!s`aekIGVH{=(*hrg4=-ygAFzM_ktL7r`pU#r5zv7yX`0Ew4QRk$tiBJk0(~Le9I*OK6f4z*O3O>v;Q|wK1%}x)>F=4;R;`uMyd$z7z?poJ)&S1r%b9d( zHE<@8*9RGqM90i5GpC#T9p}D_7Y0t5PC6iU64YzuL(V&Qe$$6uoO|^3%Cqu5XZPmp z&HCjs-jTEO?dxPz<98^Z@|RQ6$VPd`w1efBziKO`ck_&r@oVNSG}A`UcrzM?YBs0Y zh9G6UbgVu+@mKY(+?Ox+CHPLkD>~pNThB?MBe+M+q7Bi!@JUyDs3F|TXO8J4i^>+S>qzyzchX6<9FdjgHalL)G99V^bTcP>l;;v2oeYc45zaY@ z*XHw=)^1Q{$g{wp-hE)z;Te4s%y|Q8q42%}4B>|{xYuq!$>A+i>+MvAKFxaK*RN291|G@7s8%%kpMtyu9- zuBl)B&s{eTkh{iV+B5x`@nX}kiL?oe!P);QD5bp zJ$RXkL3WRFM%hJQ-6fqLn|S;bu1TJqIUCVJNprmNpY(*j`sxfUkH~%L&ssIzS95Rt ze#H{W+yOu?+i({af3svp{Nb2C_h7X$h8_%|*-u;H<#7jluJb8kf1h+|{aL)&lw;=9V zL1&7ek8O9h5rQli{vS>GVURn*ZNaOGAJ+2EW9Q4Na=&^n1K>`qJHfQ>$0e_24dtVo z*@F%xA^Cb7vX7aj&s*{hH)e4}S5$1mr~3Myl)RJ&o_lU;W^8PP54(CxU0#yuqEo$8 zsVOfFR$Hkp)L81e>?XhXvB&QnKbST2OSYsX?1fvXk3KpQ+xlO4O)JPSwyJjCi$?ZN zqnCbG-cX-Z($pCaL#tMq&5qHVl2QtGlQbwB)~UWLaU4}~(ZgCW& zq-uir?eR)b=Lnw|PY5F6ohTMI{6J5CK-A#rAv{RgIFww5X^N_~)XSJl4LlwYz*F1T zUtD24!JB}0AyDY@|Klli&2T;ACTPMbjNA2$o8XXI<-w;awQIqy9;Jx1K631_DO9ez z;*Y>aemM8244&~4fkr2hUzfpT)PnG8=|#s)RODt%smfIEAbmF?u`2&i(b2PRYk=9$ zbo}7YUw!gX|B<1`4^g@BQT*i(6Y_Y;d-c8pCC1n0_i2w4!T4>ETJJR1SyOTie$HU8 zpy=p9&pi9=#Dv_GUtVQMoo}sjC>SqeGL&Nw=`QtouuFoa-gUOxJEhC0=hBJ4ylyVL zsKj?C(D)(Aa7&brmDfygNte#-zzebg_b;>ODzEr2l7?P5ERx_P1I(YDV zAq@e$Z2oX}UP=Nn9o^rpJdSr_vO}4ywJIg-U4~PMhy={+81)RG0NVJA>l^#N_~^0n z0vSW2jd-3@804w`tp(vj((7wC&|Qc+)x69O#jLdof&5ge&=O_w^QB0Y3?~`nRMpnK z+texx3J=o@ApE@(dUQ-8V=H{9Js8_W#x{|$^*@|@D6a?{z>(eia!6hN%Au0AX*S?& z$2YBBsMqA?$Gg+j_3)Ipceha2>>k7l(c8U-tj4=oCm7LBr8I^)TA4dn3C5VZQ~8IM z0TJyR%5L=zYdYma6V#WU9Hc(3rYb*BTGWa%oZ(QOOv4Fc&Q(T?P)blDmCBQv|FQBD zrA353%Fb4TZ6XyyHQOC#7oi%QzpCA=`G<$K2my~C7Mc(f5f=^P#UH=nxz~8^H9g*> z42w4^@!V^?VjvDmA~3!={Wse*zg*n({4b)UCo1Sii1cxCv60l>eg*}dF~s`a*B`z% zh|$ z*KFFfzdpWa&z|x1`!&Dv;-jVfFQSqrI$;aE_js<@3+gUqm*N+O-<$sdRK;Y%r~L7A zPoIR*CoznFJmY`4<`am5q9YQQSnaT=Mxf{QX0v8I=(<1{{6Xg8bE<3!S~@HzLoY1Gk`P2B-t$2q`I|K%&z686rW_mvm!tf<|w>cWN9tIi+YuyfaueLokS zUALm*_=*+f2Uc!bcVNfL14qAITvYV==lMIoSVexTB|7@}asS||O6nz71q0MBFZU0u zHiIO;Z6(_Hn zJ~PT^T4$UlXrH5y6_%7xL4+Wbye=s%Dk?1{=!VYbf05@kQdGho$s+8Ux);HYhXFnG zm#>&AxKlb4-{u(I^#!>XYoZlmzkw`%@ zQjmfaL?Q)|NP+$Eks=C_0V~{(O#{xQ`JqW=mZ@s>DvUV8lGRJoDI1@{T#Rr~C6)vR z{#(piI*{_$)pkz~hQnzu6Yn2Nk=g}m9q+J7mcQqr*^iYKQKk$^2g0LL+X9d!@(2&Jb5h(>)dRQn=m?VZyE@eF`BLH7&EE3?3m0W6@~=WWie^C z&~Ro_U}(6)Ji7d@-kvtJN3hjhXiqcWb+CXkpiTp;A~jribqI#PtN^@85{;7b)VRIi zeAoQ86~b(q^<6v9^d1+>xOrz| zMrJSX4%5`fvdD)81E#)0L&iIl5bY1U#3>y6C8PBtqcw$Nry%dW@yQ-IOkOgk;E^MT zat`L@9ie9G;X`m3><5BQInbPg2Y%bP7brT>8puX|-Mw=c5j?njdZO;cuHYBumlQ}f z0t6#&u3}mM@2~(n1ztp4(d}x$AmgD>EDSSEpbn2Qfy^j;r`zf}XZZf>*CmbOF3=BY zlwE(sFnz(Q2`qY#u{-QasXK{Qd%=o71VimP?0~zeS()Vab)9roxvDu^XP^-G!~q}% z4?Hk>=l2Ugc>lX!OY7?Eii6}k)qaV5Cq)~o4#5sjQ75ZWO0{}}`keawOOuuL@L@Cn zjA*B}#uA6yNO=|uy-1Bw`>S`TudB1v8CV|eRBxCwE-CeDZ}m2Hs=7@5O#PVh3xm~U zU0=YLIXH)a3K+Vy$;Y`SFv-$}3K>11c*! zal8VmL+TbEJ8=p!eNw?FYI?llWL4#9Dc*46#Hp&P^JlBiovu6yk!4TMuzJRqNUhYj z0lUI{!LSB3RSQZU)E_ZOYByj`@=!=H!pQ6$85R*09u^r*1(?v#77>vlA)$dGA>klU zqoX4u&~D)o;F)7$rP4!-7SVBWEfbOwV`HM=&HTS#)ibABA>)GZ>=u6|<`MA2=1t32 zuivz3;#W%fy?cwvu*8e79r&4 zv;3~~nJg9fxikECF8^Zh;d5bahK1*Jdyuu%@5SQ4j;Ya`59O`;bnc>+v-PY*Fb`i+ z5YpvmKi%fGO!jW``M1Dla+}k3e@I&Xx^opnpUkH+*~2Vi7xQMwlcNUptSaBNPwm{b zRqgJbC6(R!51o{$gBs@HB$(8X0(=QxUV(8*b*BLP!#R@iZ^8kd={h{%H`o@)>|KrM|7Z5ahndE+1d5kqKWeU$iH2ni7&Y5ZYs?~!L z=!KkKgPitY1bQH+kw@^run))sHo*hKnuq=dkj*TAL1a=o#q$ID#;M4EEpqFJ^FQAK zs_V?TQ-<1eLH=j=?Af_(>-G&>iOmF@89wBOapT54_{4KlpMCmK|8Uh|u7w)(1YVZg zfqR5cz4x};2X#z|@@wJiQ+}a!mu>@xjT|=eW|rycjx2Sa<66o}JcdI+>K;Ev(Bg6Z z@1urd_vP#BG+gkr0o>q$iOYlSQ)wwa)GU92Gs0l*`C-X&N*yl3gQ0jtgppftLd5*x z7z4a_zS$Ye@?qI|}- zV7B*&KAld&OK0xF%y#(rw1~P;uxHPn!m6lHYjz-|Cjilv6qX7c8Lttu8aNqOR%LS^v*L5}sCc0fy0FQ^k=9$^SlA68xl$XKf`r%dv%suAG#edo-Zn{K2?1DeATJ4V`W8E)g=D= z*Vh}3myEUs>!nL13fqiYV9S8Or0BqimT}RNOdXd52IJ4)x-!cot|;**GovjLf2SVr zSDQFEJ+Y^6)S(WihpgeWaFAR%S|ShTYYb7g9eMr$i_j;0h7d>*JUA=APkDn)>RarZ>T_Ayll@sXHy=3JX^7K?wXX? zokES|Wf5yKRw;ISwuv>PXhRkPg{ZPHBgz8Z&&jFik*2&6=yqS*w(93~%in$H-EX%38au=diP|i$ zjJo`TMb) zYt}1EPEn%GH2U2B4x`8z@34POWVf839EOVrJ7`MC$js3r2lndFyG@7wL%a1Ke#0H3 z@4Ag;VV2`vD@8iRj5)gT6)UAS#Zn1MyefwB*Ri^OEC< z0)mv}rdRqEpoWLx+@xf1g#irO0A@*y`1i|Uku1ekBWN|58mziSr6m;EUh9YggoAac z;ty_utf9zRM#S~cmi$PX`$_xTd7;rU$&D-Du0Kx9LkJ09V#odhdCXHWa;Kju-}lz1 z%z;N8QSZP1{(^OvEJGf;_uhLeD^J^6ST2>EXY>c`*fBtB(Xp$tWyP1vKUsO)Zi|m^ zlX=sPx7<81B{q7?0jc(Tw*c>=NU6>Yv0vD??A>?Yjfsf~_qt%OJq}@NPNz;eT8u<- za&s{r@X-mLP$ZYdxF|0_=MWti#XVqRx^ezDfePhLxF6ZlXfc7*Y4xT~s}Ja1VM!I) zWKjm^3^&S?_QA>I_k{-V7Iie}~`J|NNc89)QMW5j0aDnslYFu^g_OE?T z0bw+5+r(0>P_SySxu(X~w}$BKV8vInx9=Hy_as;RgE#fZ!*4sA*`>?bvt7Dmp4IFO zS4l|$J~pYyYjYzacq|NHB1tEu2CpC9F{!(I^(U9x7*892=iOpgfRP2LNl;B$R)Sni z9>9s3u~n2gI?a8H!QLhoZ~~}?dWS7`Z`C99{%wnFZ)w9Qsq!8^<>UN2iAnnw-t$_P zk{2Denojmzm6e+ndnZbQ7Gntsnn>V8Syp02Seqa^H9Ph_CJ*o+!3SOaiDza@kN&B( zG_>dlGx|uL-dZ>{U7V0gEOel<$J8U@`|~^w!otQnlCrEn87rT3ba6XF*VU ziH+F%nBTO#skN26?42q**_F+jorV^bF}KS~_11EB10A9?YIdPK31zB>hUl%m^2wFf z{%QPCJb-@*Qi*36&!K1h*=bovT&P?A>uKvxkg+}r;7G<_S=&7RWb-ioLCJldzB$Bu zNXDJ|_!oP|znJky^ORv~p_=vaXY6JC**MXqC|e2Ai2D;?;pI;*>zQzLWhAc$fU$F zP&IqwsYPpcpRWzswUt~2#o{oYv{+8^M6>%wkIv}bxpVZ0+m#1k_H-EIE?kHcicdRm zYBX6flb_ulp&C8+kSCfQl(VESZ=QT@l)Az*hc8&Xm>c&=)CWQ5wX7G#5Q>M0q6 zrvl;^JdPTg~PMuC18ENG%UYZF=RowxHdbabC?x4ujRiJw5GEJ4a|nyji#@ z4Qa|k&7fB5u1eK7L(9T1aTc1J6PnRgBhk6@aPaC9B{rh?EB^u~q=({5|FqUMAK`_b zG_w+U?9q$zy%KikbhQyDWvskbn|JG=(3M!OlE5*c923gwN@# z(+0CWd&<4QorojwF5~huVjsg2tNi3tZMJEHNjqAjtXD^Zg zs;Z0B-s<_Gb!l8_Fs3Lsy@5! z!={-OX|Y7=Gb!5MyZfluO1}WKur6|8F<_!viw(08$>o(diI0WT)^(UUT zNe+5*zqMerS|CMs*(~cnqM;q+SWuKiKiMzU7f)|CRG-@Vi{7-zg#7Y06{k;!Xm)gR z*sVbkVYiOdo8~I^Pr8`oZOzl{vi;S6L@$%whyUMmpR(uGe?%=`^+A^4pu{%IM7jqe zMVD=}3zzEgt=7WZ12+5gnd;MLD=RDO8fr*EItQg3Orpn5oIH*9@9de=Cn4ECb?QXL zsgoy9o~}54O7q@d8ITYY*|>krVpzarN<8cT_S*I9VN%`q+i%;pZQr0Qc0`REcYXJk z!57QR%P$7E?0)^Yn3#mbq&98Zq$I}2Xc*(V9P7VCIT?*!Pe1;^gomDcNf0G-lxe2t z9+~_!s0K|r8&Fr*XoNT0+t}!10ZZVe#Psgfs~0~#d-Ukhy+`++z0wEv=-GqnD1CbO z&H#%Wb>>XP(fmWZw{PFR`%wPTiZg^|%JO$Z0e`5lLW_wAYk}js1xdw0zCJ#|E#ks` z>~@PiG(I6A0d|$Bu3g)urluxP#{jPC)OKCFcW>V+5l>Zos}9|-(oAB9{~2Y}NA|gf zMI!SJe~gECF&527c_AZI>QDd=zsdmt(0yjB1t`U(y!x7@A=_|@>MPkE>9TO-AJMoib`#gA zEYhmMp9C+W&N@T~d%bYAzxYe>*%G=*N>llW^h&P*|3Mi z)xvWo+R|d<&fiL*7xqr>*RNlsNvVUQXaCxNP(|@`O^Is#_!E=vzWEl`*FJ+sUFTyD zh)GWB*neDhYTMzYGfD9pEM>SygH@|)4O=H zKO&>bswRkW6K-aIpsIe#$bE zwWuiSuJ!5eG46@_-#c`eGV6MOhuVLfLm8(jL-AuRBGI%Qb8ivprt2~E9&wHn|VIyG4JQdX$JJYS)DA!pOE zjJIy>IonrUCTs)QLy&I%{^mRnwlI7BGq2GNM8d_DwZpBu9_T?w7F%R6xI&CRVNhvOV@Il$C z6dyZUz7>42+ihm65*Qv7mfG}6pZ|f#li-qNCr@ccehiB3z=8by{RfH)(V^KShG*O6 z&Fk6oyRDQbUdY*Zr46BQ~hiX|`8qb!hj_(nyOW zag&>(yrQL}?&hoK&ZR5sDb2p|Ffdw8Qd#-YY!%R`>Q~>v*S&DCX|~A7pdkJFZJOWu zeGBByFSh`u!sa{IbivSUGhb=j_QP+J{JD3{_)BKyFSOu2S|E4-o$`_Khuz%0X3Si@ zI#aWqthjvlnm)XmEh_Tz{Wrb!zhD1@Z+30;f4Kgy`t{S-u1){{;rdrb{__3L z;Qqf(^xXf}fdjYd_wO0~@05kce|fH-xpuAG|CQ0deE(O*|9AJ_tmnh7nwz$QB8Vjb zAmOIA;J?D%A$q&Ib ziqdRHi%QE$>?+4~Yr&m|LqoKC5NX_GX@w~%SH0^Q2f^@&j*$4$Wekr*kL98J*x0tC za|Er|P>;)iB@sS7qQpQwwBW>0NV@iIrx?q=Bh^Zal-501c>}v|`+oJ7ojZ1H*_Jlw znqkBHcId1H?y4!Jq(A`ipOR?zK5DS>jFo zxf97tjur13u22w5dHI+#+$$@#SU18YgW~q47cp(vF1X!JsAGr?Xc%d%tVfE|R zw_oR&V2jgWwAvi%Z@+0?zG1CKJoX~=g--RELFu@!UJvpQ?|$u&q3M144I0p?&$5ik0L$V>2I?y6jL4xTl2>V6gztLa$d}q&yB?s!|fzp;#TtIj34zGmYBY zb&#bCfhoFPeZL;$yuxo0vCK6*q__o5Bt5d=iKr9eR~fVm#$m(>9pxX za7aInGdi1};X3K=vkpgmIX)8ij&A1f3_Z_zo^9i%m@BS!Ft^|+i`7~4qCGAS`S~b1WuRn(CKY#>H zL4w90L1Va7TWCzMq40U+eX9=qvhRHZsqY4ZvaeUKeaij!XSWEh+xoU6YsRca2fX?}{rGpE?mlrYddTp> zBYL(N40%qGGJ#}|Unr^M<&}AcR3J;ym)=V~W1kX19gc z1WS`$T}W7DR8$ZSXu*u|h)9f&0>~8>79STG1yf}d$?1RhtRCGVmRNrz@6PzkyFrVW z;s*)`&kB4GZH=>Mzkx*>l%<@=4i4gNw#yQalt22e+1~b=!5L-S_iQaKE^r8*>0wi_ z|A&jd_3a(jSh=Cpua)^!-im#V*^0Hjz3k*^?=IIoltC)SY-NTzY0yZ*8rzxbOO-E_ ztxA<@hOX#3br?Xs)`3?}dj5qpB`ffm7Y6lh-#V$=z@ax!dVcDow>rQ?-ty|}3*Y~6zBX+*pWjYS z!^_|ppL9QHxsxlnCgfK@~Y@X+JAbkJsDs*-h_$dAG0uzCc5 z>7e-!xcS}*_uMgd)Ue^5`u7{quba5>*n0a-x;@?3YBR)j=z7B)1KOr)Szd4xyQUTj!NI3a)dUSp8|Zh=Rk&f}hTo3ZYg9;_hA~J_fUcd$px!~I+eBW!V0ibPw}$fa z9{x?>XQG6G?XSJ{_PlrBa?E_?jX6#^xfcP4(`TJwfu@oYlgY21t>}f)Y{kW#t{X(LnqMqNK~PoKaSwgDhrZlH6c!*tB=bTChw4fN*&?N-Fi*=u{&9pmF$^#Y z(fqir#pZP|88rX99~ghf9XAgj*eiX=kde1ucW)f870& z>8*M1E}Y(Ux*8{zkIAkkCPX+))UhZ$?ocBWv?vU71VogijxQC{7Iy=Yz~bUk4wZ<5 z%gH_gTb+g9q03971NVUz1v@%Y;RlXGhn(v37hxfvBrxPZAF4}7Dm~kgp6y4^rqZ*i z^lXTPd?k^Rsfq?iKuDn|(q`bmLO>n(X;7jno5pM)SG^Wwb_%m^iHc%qCieAdw#;EfU4%cECtv6#M{QEM^9aInzbY z^`lq(1OD_ik4$B9=MuNLkvnhBu8o6Kt)%EE*-_{zPznMoVH?GD842kR(Nv5Bqah$2 z9U^WJXs^CdVQi>c+<-eCzHj_JcicUqb=y|Yzxcpi!wIhV_(XQ>Gi<`tpoS~6{fn5fsrM0tER7*9S4SOr8e4uKkS6+Q3D@!VEykMF>YxcXdWVN}Mx%Lu8 z5$7)d@|*8Jd2Ln(ef1RZi+S(NnFNVm$pWJ1SWt5@jKy6+C2#D5FKFOKb%ezNRT%jGd6}u@l@* zf5MWq%*Ec%K&Oi`Z6vYz#WW==J~{Q7(PPG3KYG-#8%Cs!x%;kLhYe2cFz(5z4~^{K z9`H;}tY$cFG6}kr}rdd#LLTIat0Tc zq;!t6K)2sxS1vHLvj(rIR4q}=ABKvDtP@8V3syu{cb4>~xnIZvob6fJRjLQ#@{{>b z-U;I&>!nfE68%{gWGxrb4+^j|9R5-P7Cpd3(A9wde?N<7MuadUy2QgJ9d86npz>p| zdy6G1u4p>h8?S<3tYk#8&CF2Q6!-)M+;?A6x9&rF_3GHAT?RYOY|1{oH17ITO))uG zEM1VI8f^(o(ry_U8F8R6-kK}AV7&E>`SVvSn7d&9g2B8LcD;$xp~`Hr2o={fwf?-r ze7-tAFTY&##t9&ZDCwANGqWD+(3`!z;GE<(o8QwVOz_1cxR3j|k0yLEs29&&7`YMJ zqYJib2;pW3ND-1^R!BrjZmw{;s1+izGz0Xpk`xj4@FQ(o_3C>~m&B;J$VZ-f_JMIX zw{J6Gz=*D`ViTjEn)=d%6UKJLIhpO{BGcYzigWn3=*qpuC@zW%84Ym`a|^lm7$Q4N z>X%3W8sUDOzmK>dQ>QuEavC(vIgBmZ4g6eBM?8phWih>hVRK%eJ6CkvaHLu=ubjWCd|zohz!hx5Lj0ZqLWpX2%a=vstaOWVA9?&o|)B%&yl+O$qeFh9BK;{4z|2f3D%QREnM%wgfX}S)OH9;Dbt+;2{X`HSE zplt|fe$&&+7+RUq)MqhBK@5@`q)V;{ZnNalc}=1_JE)=VhMRoNp<#(_Q!HMI$z55q0F4?qp-G-IxH*VRoC1>5b^-bctcFo!~C9{Hb*%i@k zmR-8ENqA>VTRj`EX5poCn?LllyEp9~OuKKQcYD)ry&}DsVLX_2d15U#y_7DOl~ZF6 z!8ZYoi5aiyyC_Xoj%@Q|ow^R4@InUG?LU<-LT(>>`|bBU(kbVNrR{)Q@4q)GAu=kq zgBG)YKaBULy0ZQIcWvIhdH??XHM~?S_wWBrUZR7hm=-L2Z}#*I4BV&PPnec2|KK$Y z{^#5e80IgSDenTEHN7%z$uj&oFY}d$wHUmk7!g)$U7bB7SnA%&OPj5(&QD%yF=EE? zV49bTDc|%`hFI1`?ntySpgK|3n!Zs2)kPXe3^FNmpKc7nkt++Iyc98zrSRV$=;__p z=-s*W?p%8JHTvu|q+%dur5Jd+DTH-IYYHI2mcB)p^~Hgf%8u2uG8dKb9cerEI=k4S zqn4wHlMIF|8q!J!P_StFUmo=UXWe)*c$-y7JQ@5kOgGl?4%-shmpHAGQwnK4)n*6Y zaQ&EJJzLgY7(A?ZQbbL|g&`wCC?tNt=pWPj&Km}H={I=vb=S4?i|aLf)W{LTGrG0& zfdsW}@||~09N4FA?Zujl7ChXk9Rt}Nv~RfD?@?ztCLRxWm3TaU%Wb^u@W4)QbXA|L zafW&6Tjoo$We)OenfD*D*U_fM3l}W-$JaZ5ef`Z(zgxX^+b@fk?D-|X;ArmV9Ur~D zWciO@eEG#!-~6y^^_L6h&6_)S&inuRVaLwxufIKS{yX!QEnBy3+iuI=?W@pN{<*=Zl~?6f08&tIZV41tW8 zZkS`MGf+p5m7yRQ7u~M%ctNeuycK;O%H z!ITo6tvgTnt;vF_%vSW8z#WVt@sk*g+Aj`xI<`*j}4iv-^qHGaKJ#C=Lqr&!` zm++AU`0^dG(folsgPicQ_@NI(hW~$PjBW+ZM9$wv&X*zQGm-O|BIk6LZeWUr5;Qc-`o+&twuudV_zjTJ9NpZE#pvi%hL`LtX3@?_QIX8>>ai_vRWK z&}6z>Pf^WY4q1BPx(r}Tzj5E0tM~Nk3_PqaLc*)P{pDBb_%@#Qf90JImw)lqj90!{ zv1!AmpO$_p`_t#WXU+Lw&YN$%J@13>W)0zhshD(mDObzBw6uokQLV#lRTC4V{T(K@ zb>c3FNzeu&_4?LG)-n-%9BL#%l#1M6CHTCMr9o_4`3j$)pZHZ+XcBZ}iDm{I#@Nv( z8ku&U2U}WpQ+D3IEcK`~jg`eLdk?n%S2-UBBJo7k$UY#dCPI&?5Q4`_WM1Esc-)8* zaKlR>HT&c%eBv($@TN(?n8E+=4e@C9aY+1hBz`s$KMsi>hs1Xl)DucV{^Kgu#a{ZQ z$sz3#ev$sG9n!c6FKcn}NxPeT()tEWH2GCNY16g0*87ajgvb3|me@CpFFu&jKLdav zgh-Z$?i!Fja6o#$K}^KCeEzyJ*LugOK?C~?AA8s2VQiJ&=4&&Q#|W3q)=%)R`ov9| z;Sih0KYOfenmyLW>H;MD_a18~$BeJ9T=Mo{wy5IPoWK@hsycUGCgfe9mfSBOg0H{* z+I)<^$@+(r-H%x2zq9nSWs6^bQ%XhNN2>JfX<38VbRftg)T(oVX1L(x<@mGj`h2tR z+F}j(lkd8-V8iH6`OliEi>T<6CI_}hP(@O)j?gPIDu~xrT5lRYY_-XUt#9*14PWKM z?kq4fQMS6i^@yIn{CgbP9sv{yyxNidzg)m0P1hq$w;)Y-BTdaLUMbL_;}FVY%@+iL zt~5oOkSBRhEOG9)AYYUHvcpS6g{I@wmPw_jXQK)g$;*SS15-dt7~ML(f9p0~+PClC zXV}==?znkKpQP|Mna|(WYDA{Du_2|?@Ru@ibUXPsTbZh*46z(qv#%<`R}4INP&fs! zVtNef>ObZ+v#1ygbWt~GH2qb5Xs$e2o8PL@Ym8O7oBw@>I9Ly1n^!x-Mr7&b?z z+WgHil(GEFaSRnr`TKE{HSd2KOViY_TgOFeZT{wXS`q>-{hcul`O}!bGi}-%e;C*3 ze><)v=j%>tZT{xi${7FUaD@JOd}TG!+WgHima+byPV5=$iH!AqjP=Wm^+cxKM6`b= z5knD9+y}Z&LNn=tipCeEFArucpBw#KvTV#*T|f!UNZ`!DXv@ys8DIdc!?Xo# z;xgzwN)FY~sC63NFQcDtRO__s9)04eC!=EGdk!Bta?IFUZ@X#a$U%dz9ZHB{Nc+@l z?|JB!P6N8JmvO~voq$95AKtZQ&6>OubuL$x!VbN@u?}}8{C+Nr-?7t|>VI0hB`3Z! zAoO6Rrf98Zo&DzA`AZkgc*7y}J)c6QKX3kY>9Q|Ant}Vs^xA8$y^0^{Wy6d)6nvb5 zRpLOWzigQH>3dRcYzpdqvSr%*yk)|LvP%rR?V)F5e3d4*lP+pn0 z@#!q@ubHnI(58beP9lgwu}9CkR7aT6*EgYm|Ned#?R5l5ekWXn0tzK&KGMoeZTgnk3Nv?^-B!?oH?H_d;e=sqeApX1)V*6ipHKx zIS~*aA0Ob2^7{YSd(XhAs;!UvOvy~zr1t_LgoGYK2L+N)r1vJGSG*SN_1aJ$CMTgN z3IZb71yn>pKv0lgRGOhffDl3|Nk}J!l$rOp&IC~5dhheRAKowTnPle7nVECW-h1t} zSN*RBof?5fnTCm0B)&&9A$%Fxd92PN5z4F(lT#zWZ^EKQxpB5D4aAmU%$udN+0_V) z$#7)RWW?==_JaAsH$W5}m^6p+ME>mwv_6-b>7n#_D1CmTf}%yU#YjE?v+-hviopn3 zbPmuO52oH?~}dC~czuXlXC?1!Qgd{pdQ?Qhi) zQf1MME)v8KMP_GlG6Y-Q9E-zNC<+;no<27YYxiX4o?N*nSMJHopqilr17nS2$?$-) zaf(XJc8X!){$CfXsCy{}dP_c{&G?kGGq`XD?+7FR2C^qK9A_hoJhq8qHs)>eH3bd2XU!Xu1YB%TlK3=DFdg z(M*4GH;zLZ1&M?*h5mhktKX1N+S4$c!HCBcc+z&^x^jdOgS5ry3!4)888_&86_zTZ zrY24$yEA3sPW&OGKvQCiQne&m6VV^zB7^#jdj4OpKhrnB^}z@G#Q>2S*|STparX=y zO%X(jeDM?*_^*FG{rpoeKBl_H#+H?pn?t(cqfACT3kYt}BD!6>jpDX>x$)HE_&~Mc0K04y=0F1#oDh|tzNhG zn{U1ugbn%);L-@b;PEO6jQPa&)m$ZxKQq~{zPk8oadBaBadv)s`pM(zl}@Gxo7JhV zt};D6j}RFs0EUKFQ=?>wj1d4wS*TzpV#|i*F-LIfT*}izLXkr?**n=?a-u~hVMEt4 zAf%G*%Gz$LvlAl29Dvc`>lEI>uDI5D5tY>fRcS^Y*q;S`BjA5=Vr{&8GTwq4)Q|D* z$$aX`d9(bWbQw8jaL>-M z6cp?^wD*MjUi{aqkByixWb%XxP!xg&5bTO&&1Ucp_VX;b>>k+ep27DFR|>ACD2ssb za+#Yv>$7jRZ29@XMyS{_f8K(3=FXeHXyvT=%h#+?gHx+3E0F%L7L+;#`1c5RD;K<+ z(@KxduE`;grx=6?Z^CnIXoz`!o=(m^YH?fJ2;-sVKw-_;QNM~tM2}3SJO~<95W7Hu7Hkl~d zmsMx_PuMbsIW@{q2Kw%RU|f zeLhWWR}Y~3I)mmXW}QL6V-JObUpJ)8NkVh(%;PRR^=~MjsWJ6>@6guWI<@ejsFjC{ zOIsYv?V1NvUOt~*V#h7qSg+StQO+<=jrnl#mn#>(w*&;04Zr-lfA)LJ)^6LmXWw^+ ze%rZY=iXJzKmYu*BdM#t`|?9IMpsrMWm2;*)YR705mhV5)}?^Wni?Bi@if)b>n<*? zE>3EU*zROlG`SasA&i0{%E^akz(m+&a$LNmI`bR;kZM)bvIdzD+J+ejN=p{2d!rfy zUa1IX;s|1_ct>0XBj*<=4mbwYAstve|GU#^;~Bwt-ta2kib{g6pnk|+Ryc?s7lv4# zL`^j4l0Eekr{0&?txH@~r%s(ZhXUkjHF{9m4h{3}KCB}xJXFs^OO}4PbnTCOx9tDr zm%TfFIiOlCdh)o4Y)Tis@2W&3G1(wms8!ZLXco7ESSf8N<&E}aQna)+kd~s1GTZ<1 zxntFkx(m%C99diBnu2Ff)|O063CuN1xgCg4BmfnrqgaAlb#LD)YlLil;G=Ai`!GqdI zCr)_s-hKmm`_)-}%pos@`S~Hp`v-+abV~Tg^V45O`>EGa=2*(Sx%#?YDX2i#`TmE| z!+Q1Z9qrm!Te!n!`*|B);a~PwiKXQ97bFeMpZm!dpRf3I;rv+xSz8c@nWnFy;Jm#G z1?OcZ>ObsHJHP+Y%AXD%J8^zny!|Ejk5+#5=@(yoymZl$ufP8=Q43XjN}U<2b9S?s z6)+vkv$Gp4?z)HweW|emoVKiLi+c;gHafMBX&nXSQp5>?~r=1L%qh$X7WXp~{?;brW&Zm8Rm;Nuk`o<&O?(NzpC^)=B>uzyQAgk3*fylVa0bw6(W{PXwvYo__9?T@=A zzqjz`-8<(kcsF^L8cQH~b!7t=bZ-XeWSh2a!s?4CCr+bEb;O&Oms82}-03r?&)BGg zS5i)+=o1($(@2;rv7L0^B~2q1NrYoSZF58f5sl?sQe$Kx6ah^_bNOQv*Ah!6sxhl1 zA0xi9ClPHTov5*deZqB#l60tFh#f@hiaIPp3CFxJz(X&5uCXo8|HCyj%9sNGsDYo^ zA`Hl*I{x(?+RIqVVyQP{w~!rP^o0MJ9S6)D;hm&7bF4k|0rm5QeH3dba|(bmUM0Xr zXp4=7N1oV259TT~QuOGi2SLn^@Cb=9WT3@zDt)CW10>cksox``dq=i>bo!Ku;|8{S z`l+#fqTSs`jvdgyZ-?k7pBUD?g^TO(QGNT8rvLo2WA2La@ESdCQ2#!0ecyWP!I9m< z!ykHNf?@oq_)!T8aFcGYOdB2-_peu;eEgqd#!FhqQn%M09@V4Aj5nWo`mxDl5|ky% zBG;FvjqK9xwHZ%7{_q5~pO25ew5(L`;}cA+$xuH(eQ8Ol-p>#4*Psx0_r}7)!bW#@ z@Z(t#ee^|zMFh%%66;5*qVDR|t2&=ppI~yjLcP3feAVXV1vs-`z|9}Zii)@J4?hf3 zl@ZYHbPV}VYew5QvFZp!E}}J$=i0n$O`11<;j)hwzW@Ha@4UTu{`?O=`fT~~&p!WX z>EcE2q|ADo&z3G+v~d2sd9O5`=NW$c8fSS^WE{dlUPgF%j$n{6#Hx-*xS2{=$TTg4 zUBcMMb9^X5&LEPHM)R$Ec}zm9l!_oDIW~^xk-Wc`P-*$y{rp~jH_1L;)$BczS*FRJtVldzX2z(DmW4HnSBY%WM0+Nf z@FR^U%`^$2a8j{eG(D`sp_1f;OebcO7@#?#w0ErREbrKxu`V+Z;J5KA@lZ0)*hEt0 ziM{{LnYEcD6>i2eliD+rJh_~wqPMs!X5<3q3Q;|JgS$3GsKqCf;nF-MX277XwK+vC zVwyF$yVTSuExTl2zSe!{m7Lm4Xa7qVyR>cABDP=5^@2VURg)AMot}9$B(m$VV_5-F z8R=KZb7^SkXsqtryJvLXiS7Z;naBK8CFanfV`q-#Uc6jZtCSX(+VV+2EIX7+e8;*q zTlehxVdcurKWzPe^R}((ckWsjQ!`IG^YT1*tzW2oCq3XjPivErHU-lrciLp6O-7g( zU8&2+v?}_q1$+NTWsSah^UqXsB%5#nRWTo0x9;mz`*s~t-_d1dT6I2NO_q%q&enr- z#IXv+aJCp^d4JfX@CDXcmK`GTSbyN6z^>xCF=#q23+b!`Wu-F9>y5|n9o)|&v!GoeOXi6W8-7J5U zc_ESjnZFW6CpHHULQwwvHLVW?Vf}`!s0qDDY(+3tB(O{1%0RbHm4gOs-o8O~>XUik zKna1HPHu|H)77H#8XXO)mH|QI@=RH34TuBY+TP8x`!PadiMn}qg+~&_8bzX*rR%Lg`QmD zW^^cD(^h<%;VP>iwkdvp2^)}*2RZDXshRD1_U$|J(FX=8Gx5&OQL@N1JfqAq4(Z_U z=`AD10JVvUifYxWecQGHwZEsP9zAhNRVHX5wr}wKG}_i9Fc=A|9|OagObp~WSkh#K zzK677eMU`YG-Tl3a~0tQ>lTf^Y$d^!d9S*Px`K5J@yYwfF-Mz5(x%6t#1trTB$PN3 zN^A#kmW)2(B3TZYO4Z`i|=r%XuzUSOm9{ZGB}?6Xfi^1$RNBl^WB5Roz8{f)=(>qfPaz}AVQ{z3Ti zET_KRM-S~QnMg=xzq%Ff9HO%~3LoW!BLJW(}}c~W$C!)ysS7tOPw!Ri8A zfs{lpn%xfYF)_7z`C2?2Ae+LdWv+p%1L`c?T8s&-q5((E(?%S&Rdb*okecE;C6z=b; zxMdwXaVn3xJusG8*3>kT|EsyHnN}hMvY{I@pa1{1LNUJA^`K(wua}qAQZ%5hp{}OdF|PX< z&8=G**G*e@GRBPSuO$YZy>RdEXOH}{cJ->C_v}ANZn!KME(#;cxV8!QWQ-Ztj-fE< z7TpKj)jTpfx^;)RxQR|hCxhTo5r;>v!fUE-W96v%2SlJ6Zes!lsdi|4P~~ zbqah8i*?Sy{qT0?^xcssFQ%}*;OI@?$4l0OlA zeSL~X@i%&=*B^W4b}IN}30HZiF zUdfT}`750`y61LUnQ%}1{h)QzJ%6Pat$T!KeE&={{$9EuC0e@YuXIEAI5>jzk4SVq zn);{nohfI>p0758>~tr}*|TTEow(q^LkTQo&p|hqJJOBa?t3Se*gh&=NzpwI>3n%d z(rMMI_D-bJx8Ky^)Jwk;=`=K4yA$bTU8u-Y@~m3!_MdrWy%g#Do8GyddNyoc_ALcJ z??gRX@7zv3$4?}%XunGD_)G8HMmt*X+)g_K?oJ?7O(9>fDCF*;3_2c_NZT&JY8jL6A~ii)aAc2K@cOTfA; zDp9JfZEoj8AAPiJ+0vy;mn~cN0o#%fmMmGq>qj4{p&@}j{^8ww-qkw7-z}i{!o^GF z6~(nqPF0y(H|njH^6S;b1*J+v+l+JBO0Kod?X1eh#ji#C_U)tG-LFxh?&#hvTej>w zapJ0r+B`nKfB(Tl2M-=PBChw~sjs~J(%3g2fB32Ar%xL*+~_gx$v1`|{LUtjOC{{@ z3o0n)gNg7lgOFxzb31qQ_6}}|VBFTX2x%rc(PVMZiwlf%kx+aae_ zmjY2!T2)*^Ew7xM!jowk*_6{Muh8o&3j~wzcQ)h$V2~&asDft{`cc-ug2L|t8;PHqr=z$PSXGVIRBNM z|NC(+EdrCXxRi35IVGhfxtA&`{(hYQI(h$ooVzlg2Syk{zAz|lkRk?0XjLPz8zR1l zCmQ*Fq>`)T@h{J((FRXz&&S!z0f}uLuwgg0{#wu?_#ZR9z4#$6MbT-+5Xrm2y1R7` z=*9!wO$kWlEoRjcL#+G2mlG&EYu7zH#cN=A&bFL^j^}OLzWGMcPQrblwaO+ZaXGqi zYh7I1E&<_`AozgX#qs68PmNb*di|E?6X%oXc-HG9BbAZ1I9n(FCs;Z|EYo5QjenVHn{-o`K&^wklcjRA_bRFL(2s#~6br8H2uz!DA8n za1tA&-O_UTmv`J49Vsya`Y_DMt}d>alr3cWVg%lG0o=C_46+3uvGbI~wIa_Hq_>cR2`wp2nZQ8VZ6B63Mri?WJ$JUz7 zE&8fT`xwv4>bmAVMm_!bqy21ohkid^?GZg7zE8lIK0V_H-rcQLL|Rr%ztL0gn>gvQ zmtIIvwkaF+&nk~wV^dR$6#p(=y7((aAWhxcw^)3Gn>A||>E+dCJP*_+PYHwtR4jtOGZq=$)>-O!#Pun^ zj5}8f3aV9|&YvsK)vlz|fjRE&+9E&+;7Zr8H#S{KX#U^ea@Pr+1FdZZ4vwehO%yMv z(qT}1XDD9k7j)AyKL0@RZdCkB{XO;6nbXIQ9d|o-_Vg+0|7p+oZE<7B!yMA*x0UTi zrfoJ4ZniaB{yi+r4S6tS+&TMp%W$Hsc+H@&zO7hNqY zFDs-(Q%SMgAL&F1G+rwxE^V}gM@L2Y)fED3S84DI4-Irv12=8jahOWpm(LyEvFVTd zP5mJ`xIVNlg4W&0!F5Zeh+{F&p&Po$}C)G|DC>LhTuG)u*>NOSb10Uv_TZy4Nc zOBRS(GTEgG!#lVnPD@l)Ac8z$-nQ*>pFV*N+uX7)ojZU2a?X|F;;Z0S1IUVL&tMQD zSKXYimsei1S%A}x3{u^8?AUei1Xnt7aMzBe`wQUy0-y|u?z@2$>YQD8B{%bYIj4MRKXCulM5RipR_5uO@88E& zTX5aHywD)k`RJh|r%zq_c`rvpyh1_(n>7mzQJpXR^26J6Q@0w7s`GU)r9gqJzJBXo zVi-5^1xUZNiqN|fIqX=!5Z}zM1l<}9nA2T$?c6SRwtdGg#p^9xvFmu>z1Lam4Q zw(Y;1RK4nnhRVx3M$pu@w7k6H8gvWyi9Ot_YX=XRGVPIPUVI@@*{|%yI{uRCb@5`M z-peaAA|j$0CNzD~Mb+hc6=8Mt4TOle1c!u$HK%N0XfVnB!vD2ZQ0H1%THjDvSyo(p zz1Ec|yld6fRn^yPt2Hho>&e};qc`oin|9odeivF?QID^tx+p*E+$m5tujJ%l6unwj zT3u6Hq=ue8ee{<-+c&LSw{Fw+J--}1ee~%5-9p`mgSs!%NnztDk>hu%vEN?A;5<=iqR0;o*Jt=TpJsVQ~a*0ZeaipCE5% zvq|q{5t&yF1-OG+lO@8GbaPtG-h$zP^P)r~+6^^=5Yu)iTlS zWAbc%-Pm@*^G{Eis8W5UCKnVrq;Nywg)G$qL|kRT)xyH7#buRnS4(lRV&+QH;}Q&I z@oMCLORH+Q1zSVCYH4h|Qo!;AT3XGuGHT3eqc6O*6L&7T=fYd1s<3OjcI`WKXy36@ zr%th|fJ8XKEcnx@L+g&#ovu4kcas0q`pEiz^-tG7SO2&=RCiq;rgzbs_3n(^dEF)6 z2kEY>12K)SBRBaAFj!X@ADc90+;{>3QxELfdoXqPo@4o{D=DRm7A;=5V8Md-RaYG5 z^#TKcpImi4b_{3F(cceKTJq1I{9o?7VOPU14SO5*@xQatr!lti{>BFz@BRPaz6BAw z4YN(q3vMDpJBS`XJI-~WTrOvttE;a!P!Q<4;&kHhW%s`05}EyLoYp&x(Rw_}BbuRTrxn}EtZqsA+#&ZABKoq{8}4ZJsjgtS!E zjK?)cvStCcG#ja)WH#d|1(Dg)>*f)yhoiXDwopc2C_}1sN_4PqNeOl5ON#S~OG@y3 zXBQR~p1TS=gk}v&<81vkA{TwL&;62GT5;{d?>RZEHZNPW?4wV%DPG}-EhjfH3D@MDAhrfSI z)PBD}zyzGoqd|*LF?tzI29G9NS9{vrn>M$n&4i=yk0&7`834x5+}+FSQ^SR%Y9VS{EdeX9LhO29Cy}er2o6^JvB$xB)m0!>@DoE z_D5A;SJ$%alB#;8NMJ9j*FSSG(2cXA~;#UV0C} za7^wYUe&7agMBUTuC|P!Pfi#+cIy4ppMQD!(~rw{uT@+=eOB@DHyh{(FQ2N0>L!k0 zq7QE9B!cr~q7QHYDbyV^;d>_O7h+QtzP<-Pu&RKLqS1@ssRe*2Qb8>4-e0G8{6x>YNvEv8Gx=lmMU zm{03Au2J1mi;7a`7ZuHySBiiDW{WEffiEXVbrWA*T`4KL*E^~@XA@dMXJx&rE2~yh*c`vO zk#XS1FK%QUm~T`LAajL4B?4yks(Hq17LVTTEMcm-p|G%FXkp<{)%?M-OBt(vxO7G} z>mwrcnGq40s+rhZf(j4-XPQ{zh$*$HXgCBfN+!mwV~(p!<=+7jMrCxw&C0*NKAO-C zE|X-*-$<(~x15IAGB&R_b$T#5L9-RYO#_H0@|E8hN^^0^vbRaL?A zUtlo0ntY?8qFTpx?b-z-=Pi)Ia!ypwEz{1xE-Vw;?vRV zB}Q8O)x;D?uryUxy?yWWJx9+?r{|=GI@CX%@s88FhLu7(i!e~>uS*hGA))c- z($A%xOOsu~)_|ZrcaCpmsc|(mwNmI+s=X3V%vMvaZB;durNkRwtCV+DEIG*Aby~En z8aJ}0tj;gGP21>ZLC(c_clYbs%qO%(bE@hkju;%Fr0@Oh_v*l&V;+96Pe{#$OIhyi zM?9{E{f+;Ft4tSq&61Q!eM zISGK{$!e=31di*gYs<3J(y}VAt@?Ju&I1RIU(Crq_xlGQpRElZanC^a{NIRff8Ax~ z_6%EGyO@$gUxQtuK0-;deXJ6jl;x})Ut<0K5M^T#0^|4Mm`DEDp{*pT)Nwegr1Q9- z>?R92T-Tc_=e>0iILw#>zbg;Z^Ai)1viqxX+F-I~%J32|DkDtziUiwU z%FNHnzmk7Nc8RV8yf#0dZ)K=)0`~)yuT%sk>w(RMrHjqY#X^`L@}~Ai7GDC!wFqc6 zP9p|ZVj&M=MukF%#0EH45` zDYes}fDYa%CcM>MgGY@Ubx(Y3Gw* z{Iqt>+RZ9Pi6>0oulxGLdGF2YPY9e8M1D;-ckVD2;eG^5Od>$yAH0}lZ0Ms3rveJc}&Dye8x0W7$oIi{qp zojb?1{@1I|jOg06YmeS!`VHxS&xq-woA5Bc_2wIIKBop}2@`-&h?{^y5WOYZO-Lt; zsCS8EOY#!I8aF;YY%o&B$Yc!m(dsS(B?1+YmJ>iY03}L=S^WDSX=_J2MzkB8q9>f9 z9V5~XPJxU>tczd*GID3mrJbP=a~km!fYPw$F~|~)slBSk@K&$CbWeW*R~9HwDhqTI zUZV!AS$|iMZJ4vIqiVR;w(T`CwTi;R+Ia%q<{1{|8KdeB?iyk@QObCX-Iv!sc6WkL z66_{(##gH|KR%PWS~VDt9Wz>;3i3~#DlV`(zD@HOos`FnU-%atxblv60(AUm<2R z5G9~0l*^vWit(wH%ZAAsyS@~rp>IgDi`Vv0Wji92Q zWW9*7dYF9_V(Vc05Y^JYL%UX?)K~Wni;V8lS+!iwfZM_MkZsshi)LtsLy#witzETv zdpNMfkZ;&-`&?^_4{rrQ3 z@OcRegcErE-S4#C6$wvJ;T6u|uah`6-BglC> zs=FAwRR03N(w|LZr+wC|6zCM6|7*{v*AQxa@D?Ium!ZIX9I zdyV4TQcEa`c1JPOJWWpa>Z&S2mTOCkOG>K%3&wQy+wPw?VanRHbMLQ5Pph6S{e5sv z`1+X&c7OlFPiM`lXJSP~Ng)O*Qnt@rHMC7sJxeP~$j`#;LZptf*?vv+bZy=|$QMIY z^XB0p{@(7cs^|53L2JV>rx3N{WN1{$d7$zK>>U23S)NqSib?{u3X03hYie+NSJkMV z7?&{8;+@5FP=uNJFFKj%1roES>160$ZB)gTykFD<1OF&y7}+bDej^C=EhE5ojJR1V zNNP_x1a7m0yz;mtariQg6_3$lM)ye=6&=&5eXFQp{ky0hX8+K#YkK|lvQU4g@OGi9 z;xX%kS?@1%UsRY}tpZ2g{tJI&8nA*#iH9!#BPx&c{o+XV# z5JK1-^f8!wTg$z@&Akn#J%bf}%Sc5+EV&t@IITn_;Jsr{KG3#Em;^V?CP^*Y4hf++ z`nAkZE&Cut@&N((;-}U?O|s%W0kRH(1{G6GS;{Mji>sLht1(V3$Ncb}vYApD8xa|I zV{3g)u_6L~%x5Q*0vwqc6wcVee0hg7#azV4v;1G;vm?rRLSA!~%e*VWth|~tY{$uY znOhH}+>|@lEhQv&oAQm?qqN!;8%AuexKJ`TtIP8*T)0pX(mip=;307?j?db6Yae{` zv&ztJeYMY2r9YW20>?eWtiA_e`3Hi|_lEstr+43-JA0OO078vC01(zE&3RWa1VFk9 znl*pPC!c)0?1T4Htcw@p-g*~RaNav_&;LN}k$n;Q?cAl@A_V`k;_MufL9ZgH5MO;E z%kf$I<-FpXpB3j`PUo|8nQA2LOcYxgWKwcvLj}y095i7K@}DJHhk)X-9$5o5HG=od zJvqRpMT;~>!rnv)7TO@$MK;t%f0hnHLfIFh`ESmn@jc1$_Gec7F~{4Sk$&MELA{x( zl@Yf>JF01Vvrx}YGl;{T!@&y|tU5x>aA+BlX8-nu*rp$`NAp2A5W(5msyQnsH=mdZ zIRQcvCsEBlSdj75dEGjsUH`^vF7c$dc%2fV%HI)HIixW$2i>;wS)atuT(Mlpb{Xn6yRkzO3A^uSV zA1J(>Uf`+vM@D+pknDBr;Nj!Pj~_m8EF&YQ&L>h;z8S!}9!PNTDEn0Xk3S&vK45>| zb$y8`Z|g>Pzb@TsRsYn~Tyxv*-S6reMZ|Jcm%F-mZ*9y;RsFNIGSMvimoB60Ai`c@ z%Tu-VH&%Yp6xi59{6Y5c_w{hYn2s3WiJdJ7c89$>DB$0I-_b9w5f0mt8~xIRwZMcG z7JzuF7|L`WjjCa}#j+e*Qe8e4M_q1iGM7z}(BNA;e>Wa(sxm;8ur1&t*c{jG!FUW} zJce-H9*jp1#v_E}Z1~B=3wRLEoIHK{#EBC)-p--_oWG!o+@9V*Ai)Cg9oQM-Ch~ z`1{@qwl;0rbm-EPEZ6QV!SS{-#@o&APo>~C_OX`ajj2b@ojo4a!rfl51+?&owbYy; z_Pf=PbX$NYioDJe92wct%T#(A_4CrHgZuXFJ*|e2v(0SG&ZQP>VKK#5vD{u|6(c}9 zSA!@>_K`t@6~dD>`a-a>t0AOpGx>b+m4t?bhlb)o4-X5$WkOi?-~7zco3Zq!7$w|mVS1)`21&y z7`{hfmRtJC0KzW1+XK)`CK%T`_36lB|MABke)8?6O`E>`WXZ=LfBjowlD^=Vm8w6^ zt-LGwFgC*1B|G49?iHN(swYw>HICSjKSi)k!tm6n`m=Zk1O>vsh)>s`DEx!K1O9hk zcl3Dr33>#}hD&e`VfWnzyEA+ZqgsHIb1u zHk-F4EX+cUhR!hwwoF@*dDL~&1LgNT;HtX0rKY->O!>gN7FVinWmI%1FD|JlF0UxT zm`b*Sq#X(;QQab%dwI7AZx$KeBC^?EK6Biy#M&2{F8AR4*J(FGwIi$vTCU3N-Gnjm8xEvsHcgl-qFAWX`rf?*6C@Ts&8srynT|Ymxle# zId#-pv=ylUF*~bzOW|)#=WIw&zSB)n_1FbCXKALYudJ+8b@i*`?f0sB%t%e=6#wiP z?sGWzdDB08{Meba%h+Vjvs7T1xpe%*(cj6VJ^Xv>@e}GyGn~oR(9qZ*SeWo-Abc7n z&zlV&xcSVZ!xEv}@bWr0RAZ8bQWMC)=C0|FLbcrISV8g-EE5 zwhG%3+gjTUw+)*rd^F`JFm5q@X}?4!yh2-7Q=z!JQ~`X^#0M%C+L;sgtE@ zXerSZmoS{I3-r&73{1da^`_3dZh)=aR!P@Su)nDPYQ-o0VTFnI2q@3p z+>GvybgiJfuNGL{3rrDod2@3)U0#BJyR5uI0s>HTO7rrIuVR+T%_o#8Hz(_o>TW|0sKk0EA8ICcBu~DOiQvhm2FVgeE}ARbRIP9u5PWvr~-zUQ0MF$)~vao35MUMU5|T44I4Ui z?9@jdoBq&5ib#*1aNh$DP8cwFU}v>6iVjf}y85beR7iqgas?md(z%P7N@E#$P}F_N zyHb1&8~U}%Mt2_wG1y0KzHrg1Z@xKn`1hm7j-NRG`@vri98Nt>EYG&@S1f*e?wr}` z0EG7$+T(vL>YO>VX3tx&aM3$Umwxhz+9@=YiaQbBfyCQ}HftWyEIg)Thjtx0Mx)&N zwQSjpIv`=8L4kqbz__CsGb$-`AS*w0HZXS~05FUq2WTmvm~1scUF^0bCh`T++#MY# zk_?Ct_*g`&5vIX7BiZP!F4_xPnSp)!SKuA#2f?71mEx?!q$ zMMK!WpdGuyRNaOn0}&qvBLlx>|Iq$V)!fH)#**%9!J(C#2T5TSRLurpj|o`9jfD}0 z>@;82l$*!2d$|9&A}S#79^sVBYD)2jdaYfl81p0-S2 zXI30Ps8VZGZ!#6APC1!~WksswzXDF==Y_O|eyzRJuH?+T2#7E$fe{A$e5800u%&?t zWTs^xg6fn6YBJ8zyO%wKiCv{!yx!Aux~e4DTiZM8PGpF9|A4AwSoH-3U{qyZ$rAID z-YdvQUbQR88h<0+uL*8*=H)cQg03(Lb_?+f|-%vo%Z zx9%$eM^SHG+Xv#6w{=7M7v3>$c+fI0EGxMrYCEKPgufOHu3XuQ!IEXc1=FM-#j#1T z|HIK+bI72PA0IGhJ*C!1dA)c?Nm z|D1uguJ>|HP(2$OK}DK9d+wSw->oGavO!e_vxtr%qC#wy9kGtQ1Q7Dfl%%BevmdKi z`p8po+<*UGy&{}cmu%b(x%gkM6GGkpq9MwC$W- zRCFZ|T|Hkw5Olg(ist|f?Xz`*l+pGv`jWE8F@%yRH3Bs5JjZERwPrb6FbTV&HM=w&&_-WkQTlTS@yr;p6-X?KZ&v#5 z-Mco<&8#|h;aQ@N!*ulvMyvH0wH8x& zvxTlT1bnwHUKgwj&_(LH>F&}^(hb#lW4&~Zii>U@>=!+J{8KN!_~Kh6py@kCxw0qvNUk^v)sIVQkrP_YCrLljN#060)-0eM7-!o_S{rJ=V)0u}iE&qhfoG+H2 zPCui1rTnyY`;N5pOP8-(zxLpl%a#Jx^8|DDc?oQ(b_$LRP_C8Q%25F8u4zFqyR*pu zZIO6p^WoIgefxe~yL!tHTjaZL9uaNZC?!{~+eoM^E0G`=t&aqcLG&t#IcdUUP~5cq z?hNDyL_)2Qw1l#tgo0P-hyVqTr3jjHrx^DAjHKs#)*t`*GQd_ZNKr zxjNrX%W)Rl7K_M;GZETuikWkF!(L$|ltIKJog1DR@KG)R z0|8EyD_R!_eh>^RYrOJqVCNvW@{;QwEZbqtE}T7o%_}0Vd)I_vgT*uzTwQ%=_r@Q7 zSo76aU#(lc=U6rVU(hT3f_#ZJjJH4H^Tq0oE0(YP_PbSIEMK{A$NCkYd^CR`CO@cO zA{Oi;)nHfG$S8gxH*Hj^G9~m_wyW8tsvG_rkT!C&G52v}5HYBEn&{-_%}@OO{P`W= zQ-A!#(dMzVc`Q9WmYyC9HI9WEqot2C9X*|aJb@^|5n^jNCdgCT3m>b|h$S|(GGS0! zPAlc95g|lUX2nGy;{%B|Rij5tdg7_)hV{QIzIW%&UHU!v+N+Na@6#tXEG(?!h>4@e zj~~}B*3Ub-_b}CvmX>|h8euF_$2@y;Rv7??IwdGe&p0eAoSLG87QBjzYrbaJY zv|!=;?|u0B=d0GOUOs2>+@$vw&0Y5Cr^{C?{d`Gs^1KD#efQml)nBhrqobmlHH-GJ z^iRC2ZD?IZc}0VlmaE#DB z`ACZpP@^FRib0ATWppw~djTs$SAY_j_DE5D;Do59Mhkff;15JbaFEnsW1m1Fj%hhu z^1{de{TUr&HjFVF#h6WI%!YC0VT@T6(let3`^By&A~qw%8{TT0c7zcF&sjE*4F__vXboEwto%3y_kL=g4pJ&CDLx+zXJ6qR)?2|GG3Vs$# z?Bn>^rrZDJoU&rawjFDiBqMb^Y=6c(nLLeoi$5maZ~0sxDd)aBj}s%Y%56536_wQ5 zmEiDDmx_8M^dxf#!M?bD?P?V5WrX1W*jYs;W_WlbF#vFGz9^= zRCF*ns_)2V#k~?GIC+9j?Cql&t7TkxBOzU~i6G3t{a2@QjLwjCf5D<10GBh)*tV%3Rh+=(VFk#ZP2k(Du z;(x4a7+^G3UAtzs1P9gG3`FxGf!6$Ol|xV`U4A6yXT!!%<}br&`4%HP!=55#g#NeX z4+Ctrs+x+*{EO+xHubd)b;^GzhZrm27D!l(HN-7j#(yY}2*Rp(CeRgSIc7~p0H%20 zzblp)E1?+@mqTp~)Eu_|R6Oy2c>xD)+|x9B1!x0{o@78AcgAa}B-=%Ha4jp>mqs|P_ z)jE}y`uVw)k&IUCVQfhur*`2rE=i;+7(LugI#su9X#(@?E%)RdKW*Ch-P+|J&q}h+ z1Uud{>AiXLXU$#s{wLotD>wbTUd~ogA5I;wN>hu-7A+N90SiHHfyFh+R8wVBygYPj z3mTL~fDQ?3g%Y^{j<>^j0D2(7ZU_?DG*A-d#JDUr13gkSD?LZ3#N%K`lsl&6H1!Ax zYYPpLKnAei9ku~4un(~qAandTr_toSHq43kj6*x-gyf&LVNUpJLI|8v>^Fed>`I&ybLa)^T_B zACi#JcgmD|$Byb37u&7-G>h9bwL?*@J0TK?Tb?dnPF1Y7lCWQOt-->SVuTrN_j;1~ zE3uJNhAhSH>r_mr2fkmjc&c z3N=(NM2wx`ylCzB-+q;{b=z#!E4jGQ4kNZ=4XeW*$k;EYMQs_I&0Wkp3fi@eq3 zL_j6=#ZqDggD6J}))c152I;~xnvTk%uN~Fo0eM2hf19)|I+%{Z=z%us*sM(eb8}(^ zh{#Wi%M@*ZO&Uq+rsJqA{@PJZE|91Hd`ia%Co;lA7~x@za3VL5$Ot!w!xP&8iwEvn zL}_^v;9SUCLd`CUbiuu{!czbnP(5Mp4ZH|ru_9Qiz7O2p5IJn(sKJB#_v{eWp=;M( z{qGvnAMfKk=&G~KlM>pNS60$L|C0k)3&WpbOa^=@w zuUs}yFgE|iI8k+C{>SS#|Mc^h+!MH&a;n-SFdK}{y2uv(R-+WAa#XdVRj_S9-K7w= zh|2OLK)p;F3CYXm3__)>yFdlnXd>*~$cu0jD`q(=6iebV;FwYmTSQ}d62w$ylhniZ z{f}pG^!dQ1J{OPgK>B>3tg2cMJ7jmr1Jkp}#1iAc9@cPfkcp&Ep{J$S9mRS@-xF&Z zS`FP!Qti1jaj}<7))1q~*x&*hq@4ovU=&9<10K&qi>SmFK z0{$i%SYkqysUc_Yp1oOCUE2gTQs;X1q^_(gH@)zxuDay9sj;G}zV_sqj2g4^#fv8o zY}>l#Fg88dtEsL|VznERd|QN{KX$n;?cDK0oue@qd)Vr7^G7dsR&_(HDqHieg= zF7|L^Kuu6TwRnk?k-O3AE)%HHRCgI_f?-5zy)4xr1Zm$3Yp;mB%tQDWM=eqx`ahmT zW0rVBc(}|_?Y-kDBKgc__fDSF zvRRlnCKMM>gVHeWdVPJz9=!*T96o$#|FL5ysiv?n5A6PSYk&qM-At*kue$DJ1W*G* z{J{fP3~KZJA1q$@!J17}tlhV3*H63lzQ2bR_WM=Kmw&Wm$%kJqU8GvW|H$+U_3$2` zhN^38gE8ENcvPv)iH^Ln0B9-&ove8)L?jS;q9NL1AhDV(20}O5d&g0{$2`4VLb?xpS~XVxnjNfFftTO8yHN%H z^uGQ3a#7PBwLha<`ku%jGjwa$CV)Nnnr;rT&dzEqO2AK0~$Y zi;AcRFK5x}!5dpfwG33bThYoy;1F4l7mZi5%iUrvbCP5U4Mvw>bqvNq8fSngMR_Kd zqDetx_s-vU@BzVe>p(wsr=Qxw2in31{2@mA2l-m0Pi^9G@l2+^#+HQNCTsi2t}`Bt zHuW8+gb#fd?{bpMtC1bX21E?L?~#eB5@}UdCMnI5ll z+@EfEk8XO@X~9=-zrA=4?q>V5%2J|)tV*XOFvFF!WZ7?@><>ZFW&~jo_ci9kxN$So z9pO%dO(tvm$^J}Ek!EE+3S^~&WC$a^?VB2fNbSg_w0u7;pG(UfwIc=MSX!NxNx@bj z5(!5VR>2=LlWveeatN7SVIO>kA|mfJ@-|9col7A73)mk_jUhryWSIqNL%1aV0 z-HWi;LBvl@z&tS>1I0Le5Cb^+iv3mRxeJ!8`gYaAxk-Z%VD2N_VLXrf?T_nHW)C17 zWfY#+XYBuSN=ccm&Ut45i3k%wVVq`v$myN8Kl=EyB_pukS@F0};33$(FF+tqVkmtY z;&_3Q>22J7Vd{aN#_M(0jb8rj2Y@6&ovBEz+G>Nbs-75AXJ@y%dW!e?OU+f2Ns9M* znT@_M97oYrN=pg=xTZX2QfFA^mPB>QGRGfNj^UN4e34@{riE#Q1TPs_8T8D{MwV=r zj3Cr5v}JA7Fl@4~(`qK#L?x4>KtJL}YciX7!CCm@Ycg#cvG=L{sFxzeePuy$7yN4x znkW}RW(}-wFG3CNMIpUDacq8l9pCz*pyr&Sf)>0|wafA9HdxyA&;{;>ppT)J9 zr=PrcV9(etJqApg{%rRyVa|=LD{f)!dJdZ~a`529o*i0=2siK!B3vsqq_JEhXd!N+ zT?qZEM%-?HC)V}H0RJ$G`3ytKq7Pt5Zz0S*Z+|Fs<;sme?>Tkq3-`FF5f_$d*gQvIK2iloWJ4IgP#ilB*)RnEJT#;cToXKy zMsQG?LtvyXkrXiflfc+TD93`f>EUTXF@b!3Yk$^Jj4t6EQ1e$5!Sz1PewSr6V z$JyA+9V@g0tF!$}FH9ZYuWR?7gCBU}m7YDKJTa}?+*@?+^}-9`VGlesW^}^n0h1?B zwCU^gt-AjMwk&i~P+@>hToqx0SFeF~?&GGd;@qoa70+N5FHjR%wVxsPG3nipmoB9! zxX{Pbo~ypwvFiZgsr$BV{Wdvi>y~)?tDZ^kFSgE_ot%`sapRUBKL6|;HArJrzU~H? zlgT3xn(+6~m=>$N2yfadZ()@WQ`<9@FrUC(=n6(XJBJVjqD;}wq#y=7CsRZ!VN&dp ziYp{Qm>_4t&&<9<4bo|asT`pCNCQ;(0S8!v)n^$LeH7mb!Hc3WXoWRkA({#E$5))*F-q6DhNsMSnf`{Khl=+N+La{05&ksyS@$f!c7_Bk#o9pVu$g`Rf42@*z@Kdf8_fPj`4^Qd<=S!XLIB z1cB?|77%Lp9$obaD%>0;%GUYew*i#YMvoSQ0V@#Q6ph z4K40pfHfpn(bL;M7}$}2`+Z0M^^Z`9^D**oP$op^lm{Y^x(KH*V0p(S3yF)IFT-o_ zYeC#r%ZL_{k*#peuon@5ooc49U3!MkwQuo#>9Dv15v`k{~pyo($(f_VIgdPS5{g^mBGb>+N%Hs!~yf4LqOE8;BoxAYuC9N&g0@u>~m0? z)zqGO`lv~JV7b#zP= z;!mp>-nNP1U8|YFe2cG(dK4xfw87R~FK2~_>)G9Gc zKWBeQzkGkHWPiS8n15*NE-;mY3kpo-k;aDf+VWGzKvO|Mut~+bs0Epci4Crujw=E8 z*i2L<5o^gLq&uWf?AR9t9v(j9;qLB^kr|9WG`Bl{)zMq6ntGr$J{=V)WV9>S@jneC1@ycvxZ}$TZ`}IRs&&iv?Au(BajCMPHZAS4>YbT>qOLgCRo~EHa?35P zJCmNtF=5C&A^71!!A(plaR`Y9CYz+q?1S)w5a#evVhIzEF=f90`X!AXq=us46gbnu zrZ^@g-aa0a^mEo^-AVSx@cIR>&s&q@l}~3X&&un(naXIZ1MSYl8UBh!p`WSjvL@@@ zmAyRcm4EWwU2*3*K#8y>JNnk?55L37syNZNR%JI|8l*kHN6=KVcEBn>((YEUStN5b zoV#tpW>H*DpSeJgZDuC0m;fv*b+ZS8AQDT;&uN7!*7oK&vq#)%q6OoGaZ&_CB-z`B@_V7AoN$IUDFZAy-PI#!8;3! zxQX}g1dJ3c&v@cOTINPwBaWQ}yN^9a_bD1=UwdCYXN|Xmvvx2c;oxj#psuH`B|_~i zvYdX@ySs89Q>=zQn>Y8FZ7j*Xl9{SAG$t9F`3HG*ls(5)@7U8D{?O9~s0jU^O#)AxNe;?dUUdjTn=+yu3XaC(4ZKN$u4qLyc7O%<)&kf zc1e_nL;y*|;SFRYi%gHp1TLkMy~Q2Oq`0q^f0AtR3uxV=Tj#G<|M24vKPdUunFhP7 zr!ff}Ft=n&byd-o^6Ez(7&md+BizqSL)pbViib7SCtGB?Um`FG)P1?FnZyU`l9_19 z!aPvu5nfn-xVc-OXni9Y1#)+i?Rg`kz#YDUk%ySs3+c)ZD{x9=Fu3H9Vj{ba6m+ej~X{i5f|SHpIf+0{-F< z@~}04l_HKLP4wgD)3bpUVSZXzSdjD@>+L2NRx|>u4Nhh;DX?7GXFFBbRb4)^LP|hM z0bO8OyuZ&gRi+*I{c>(YV^Mn6$$3dm31Vyl+9BRP+G_9*aMxMQEyg{b7xv^6Pb9f! zre1#T)t8@n=9&Kfvx5iqA2@vU@S#Hob?lrtsLmdFE&cMv+G^)cqn{A_!zV`1`|k5p zFgsYC-2%KJxv60pLjZxhzOS4mWAvMs` zcEya;ZWYnn6%RQ-r|;wK(~M`+RxbQ_F#Y>7MvMn}PR3|4)&7k6z5Uh2dgaoW12Kb4 z!9sG1__txYQ;fj~tE<0%0v-P5bC**ylgv*%@nm@3hwOdody<^Ikz5{0EY5B z-HT)=FCYl;pGuO~8y!1#=`dhm*EU_d4U2sK`A9~=YBjsLmC@JruJpU}%mWMo1Mw9j z;_4s9@EDqDW6+#xZ5N50$YHt@?v#_6heHKpnndS7&u|Ji1R#1_r%0x*RaCF)I+2_p zv)RqPr9c}V9vA~5QGQ+l9%81z&PIg0ub0x|Z-4FR`+vUzb=6j&HOdpWtw7F4fsVUe#Z4Z>L8vv#dP)Hulq7;l_OTG z-pz!!u5+hAL-ENIoEdC>NMEUTY986j-=yTQ6f|00NUAt?w4l+G@;=VuRToDzCzjNI zu#y#LDfc2?<%nCyG_ELpF4^RI+Inl%uahcQXT!>KwG81d3E%+RQ-L2dzi;P+^G87QD&ByamgKlJM)hv z;QYDsB=DRgPT>573mF$l{lxVlIX&mnG`sw4?h_v*!=FSaQ~XM#FzPhgC1MsSUys+| zDYC>akbnH6&Xu)(BhNL;3UZhCDaVyd2t3~_dpJ_4rI)e_}RGM5kX*w3fi z$Zg)MY~a_MM8|+0N;rB*S)`lsx@ssaY}qnJZ#0ULpT#yKBT3h*m({tdQQ&QJu2fmg zRhE`)wkq`*+fdtFGJpoy2H9TXF~~OCw%K;pw!!v2k4x-tw!Lp#XnU6c(v!CBwr^}7 z*$#1hxow4QrELe_*=E~k`-T7gw!O9#+Z_Jq+kUWpZTs1F-j=C?j#5;V!h&0Abt=CpqLoYTmR2H-la)$vCcAQBBVk|223@>3NK{xYufD~eL(2H7Dubvvc|0SN{YQ{F z*=$atLZ>yjV-9nCbJXP4IZSVYr9jq#f^X+#K-DW$&B^bxcTk6O=MYVyXm=kI!eRv3 zfuy7ZBH!8}`i9symIN@;7y(=O+1wq*XZ z6d?ibLbcu3j5T#R_x9V#vX8b1AIGsz`MX)Husj}C$jW4-g>SJbWiWk~b4q10;f0XH zAky*8Q4cCjkJ7w4OQWb$6~cq%%{Er&C`~nXtgmtl=DE&-{QOh_@thAUH^*@^x{E@< zZkv-5awZj}!V07?IKw_%&h_h;>?n)uQn$`N>54Z8!~`aRRanKP{t=Hby0PZC4x3=+ zE?-XOoLD?7W!KT~rNSy?DaTZ(#s|9{z+-%nB#Opv6z)lx@4Clshs;TVjbbY@-&<|X zmil@SLI{AW$l6+nsMX*m$Qv43LE@pyylUwvIluSkBd0QR_J5mzXeFd!a@W9E;DlvbL)j++1A~Hs+Hj_m)1_LFLP|A{LCs@ObIPDYTw*ckIp2)a8DMVB zq=+|!8D6+}@xpmz?>m*oz7V|0y#X75cB2W+D1C*?;hN~X@#w+*`}X{{?@UphM?iH$ zlhxMNRCspZCd_!=L43yzI4&;|7e!th?}H2)NE#a%Eb?zWiEY7T&t;EHjZoHHI+>A^ zdF;%!CjZ2RkG{;}1n+vFfFA<#JmDQ}b;2|c+U4;6Y^>l7+BdMO zxAS230YHSonp48IZBzf%T##S5cMtnS0Iv4PAt2!w_sKr zK5VlcKAf!X-+$ZsVTt?qL*t@ByOKM3GM;_S4scofq?D;<_HgHbT8S{N@{0Vt@^WlU z(~w$;h$SCC1K5!<@zbDMDV2R2{{bl}T*YoEuADlZs|TF=84eSPS31^J&aXdbaXdB6 zV3gmG*dMy1-8!{iGikl?urnUvtE({><_W9?|HK3+I3IA2n>j5bwbx85HPkTrEj>** z?nx^tc^AXc62x9>xQ6I9Y(0_eK^e=<1-HE(UQAws$H7b{m#N6=i4Ejl`~A+F5kucx z3iMcRUM`A$R^aOEVP`0}!Asg$PoOB7AP6h*Cou^cl!Y~b?PV)lCx88w$DVn45?0j> zP{sDJU6`I`33_+;P#Hoj+}$Tkm^fkLxKPnhgt)l^X$ypumEvOX=Iha9S`R`?CYl_qM+qk0krksmHw1*5cRdX}URpZ%W>06F zK7T$VGu8D-uN)8*d(V(Qn^Q<{@;~#=UF`XRk)Q9)b?&0jhn~bScjN#f$1*e}=@$(g zUOvpj8221GDpzS#6?NW|A6+z9ci+=6qunwqkgvhBqVpF8?=#ol#QN3Ve$Dx=z1j8Q z_k1tA6@`#YNis;< z$S2v`9zt@wa{?G{c_lLlEJCt{*v|q}h0aU4G3@j$BFBYmY&u z*%xv4`#HG92E?(ixK_sL?<1*q(_jR2%O>TQAKF_sujE1yu4bPH=QV;_E?p$qJaXhv zYRZ8F2U1cG9ohEfm)k@#xD=e!BTkCQoh#OCsw-UKL^%L{RVaRCs?olUEgeHFz#LG!$IFjD(m)WtEi`B`}Q(Nd6F% z1LXn2N3`Xwt?i;A+#7rIeuKs(Cd9>fLEk0$tEi~5LPj}et*VIh@QCbe+b;Qw@07pT z|5^UB&R@LnRUi4In%+_VKK)+aP}ePgQmy|d`O|jF-7#b&D}6}mSa-FL z^ieA?=6^@&!%hn`-dXgd=8^int96q8`_{Q|G4ss-Vsr+K-tou(a&$U%w$AVb3cY;s zzZjpDX_-y88=rS)WCMTJs)}X z4D0ZUU9j-}2OeDX_|iw&0zsLOnwnY{ID_&F&^rM$eB*MZ+S@;TR6>F%E3n%SQ*z-4 z?X%t2pK^ZeMb_?Q3=fVwqGD_}_i%Su4F1m66J$}I`-cHzx-2wY|Da^Qx_i!Ntn ziT!K<9M#w$M+4{}5+rRwcOZbFb ziG3wy|K30L?ROk85*-&hRs%&RDNCk>P<{*eJpP?{ESGmXp9MKfsBPmx`oWUWSy-=S zL8i?!1#fVDYjaJ7 zg%6I*&nX!^cIQc`FUWEdGHl$qaU+aI8E8(fpF45lL{>pTs}EIB^q19?*IBE}va{BF zaK*yA=$zEP;h|6eJ;-v-7zLM*F+KfaaZODPh?QMqVqyjdB6@t_$dRq-5)F>O1x$jq zH2~$8SKzaBwhwgASIKSV{U4O~@Dt>6?WXy~`xA`(c+K z!q+W&q#{Vd=+RphNlQyReDp-t*;J3->d49!D_5?2d)-PYzgLe+LN}1PcBv(suSM;cbjt@LU3vku_|U*TaFTit0M=p_3n7ylB+oso`(DxnwY6 z)5*hc>YYKV_h&wm=amM7k{7)^|6CEOi)VEu$d)V>H<2Zznzgno6|NQa4F#UP^_cFw zGA#H`t4XMzpRdnQdssM{+SFTv>HT?H9Cj=E_rGhJqrFL^A+M(r?X7?WDIpucphLv}O-a3LCD0}&*$p_r^($Z);L!DKVQ9TDNb7L4Lm2C{6pm-5 zr5-(Y^5lu5P{9rzI*fe>CwpwH9kkOtw;c1QLM1x*4B%Y=FiAab#|$PAr=3Hmhhr4wVZTm(HF!bM@?T&=VbIn_(2} z*~8`K)mGiWs&S*LmS76-s}X-x0;Gdt*T$MSFdHGiPb;TErW<8jV|6L^-zpky(Bp0* zDFs$l5Gh4O1o^n@;W^<|t{%pah@c>UZv#wP7kA$vQGLAFHPqzCF8r}M5dhT(0B3zf zXWn@rW#8UC`wksGdhGaN(Kvtpcvov-DuSzz);I+w&IdeINlP^XeU&08G*quWy>#i5 zk3F$$3QI;>5@KtAAB@ExQShN++4Il7`mcW_Vz91-{UnM;w7OEGlwyi3RrPiip&eVm zq|wyM+(b9bdy?`k%_x!y!Wl+Ij~X98ea4I#Q{&?%B~6N*FkaCPqeLzP9Z)MsC;`r7 zoDtz?Aaw*;6l^QKi`vPc)?#jS0|*Fgclz4ehIWO9zIiyy+Zt-o3mk`D0%KjW)Uw18 zLDn{Tth%l3Yo(>RIi+RS*d9UTT3S}9_l+F+_4}iz?}Ot<1S9zh*(2Y74~B)05=*d9EkOt0XMaY7+Q4NzKbG%E3d^B+dYWGcU#aA9z#j^^*Zt3#;qv zKK*!4##O7%b??5TS$o~v0wd}@H8-xO{qgfIG2l-Lt8PhM)Hi$8BX?|LfN)WCnvD7Lh<7gF#k#JtRcc0Ixl|)mm*y4rsPQc|!IA zF9u-s$L1&!OzR=j&RVrvsYI41Pfz5G@$vHX5gm81 zGbmXN77N{>;M|1^=Pq2yDJ(982TBaURy#xiu`KXwW1mm24-JhO9UUFLV8MbpQ^o?f zB=tAx@3gMPnjF@36j?WL^AMiOaipeqg{QOo-PBfMNp*#m+5G~k)5o`3H(YuJ0g)Cc zhTW5&ay>83K8suNi)!(hY(_tc_Bn*{u+Ks6Nx*;5#ZW8gNTtahi5&R=<*fT&HK?I2 zWZtrwu?ZMBcw*O6xK*;| zQGkmDI)nxEV2w@J@_brM4jw1+MYwtmAMWL7nXdW0@2uNQ)9h(FOr(Po9gyNrYD>DJ z3)iDyUHF_0IEB&+;)ulHIDIr7+W+#J^@Mf2?$!@_XAokN+<@9%-+*H^U<#cN`3lJHEUP!{g<; zc+l^@cq_x>Z^!gaw=z7A@iHd%Vt7;yjvP3M{pWh1s3KKgqIZ<~gZQmz*QNLVj(q12 z-)x5?4_GKrc4dnSo5!k+>|4R%Pvrt#;*mV$19Kc))Ur|^pHfi~!W4tqU~+;kkO2pH-Lbabwv-=!_~u)bvaqWZkDw2JWZSz+QE21XChj%dX-j7*X8C|28aAY> zlmb(1DI3_klp(0g1k{Cg7KWhDhoCjNYk7i!w^^=dRMpj6tYw$aZ~SCGBmjY_dabvU zffZND)G(S`9eJtN{m_}0Y92_?#(~y;6yj4J21Fwk-vaG}?T?y)J_6HDU11ecQ`MYI z*}iq_){RnXDupF|$M-wd#Mzg)+&B5LCzd{Z-?Xu#$3$9Kg@CBr{!#ti1+CYh@rlS! z6)WYZ@*Jh+?QP8wVLk?(zQHdvJbdoL8Fc%mCUx5l3(_$d2u)k-*HX zn`I?Ar&E9b{ZMAAu~ln#!qM&T=H{$Y^6l2Tnwq-04vj_urfNk` zD;Z8zImx-Eo4IF?A50Zju3o(=xhwGDC4TdN{+$S7?N`xmOZjck2mLh~OX;=D<44nx z$&twSTB?h(PVVS#HiLh2RaHfkHZ*p@<4-PmI%5!R;k}u_V~Pc^QZb_ ztl~bDd^5nuRo+wu?MCYrG?aR@cf%nA>7&v4^%&3@=BZm9wIJw?`(6GUg0WlMk zl3sY_DX8ZDXjhr5kB5e9m+M_>t`t}e0Y1E{yM=+8@p?EqW^~-Nd4K!c!r5`-$KN&Q z;is3q_`(}+CPBse5Ng+IvW?7a0^O?-%OujVTjhGavcs;_xw@!sZU4B=_Me9Kk3;*% zq5T7@E!ZJavKC}XDu)%+V6)4haH(6$OTO808qM#As4K0Tr>m;Hxy5c*I?1uS)T{Mf zjuhM5TI-6=rT+5mcfxWZ36Tf?0=Vb}K)jyf-~VuRq7E{fYq_JL_mCro$?2;IC z0PYkxVC>8}GvkMc_ZByCR ztPeikhdDrjO(>b%CTK4gwA$e`s=INi%t=(f`_sweC-W{-j+;rC0mdNp$AeJb=OXpT zSbKmy(*Adv0|eL7tA)t^S-OAse?I!?qjy&2mL_}Jb55T<^W%p5#y;}+KNiiKIxc3^ zKsypr3qa``^@2thIC2aaMPs5lTe9b9^3y4Yg@(Gh7#jUULnH3Jf2MR_PkF1|oq78^ z`-1Sx)ibnTvd5A*1c3#Qoc{8{dGj86_^B6PUHaTo2-A1Cb4RPy3MgW*!37ZGenz{} zvCa-hJCKKvthM2gyy0jE?*R8HsP)r_uMshu&>cikA&BBMZmytS=rRtiypamVmzkBiPE&_-E=YQ zY-V~v%8nlnU&+Zkc(uG;9yQDz8j@#IlM8F9;nMaVy4J=Ux<$u(h+jp;%^H>UMw9!X z{-HxchD{hhWW*>aQMkftEx#$dkp9R1JQe(3YByG#Z^H*emP3q}vAZRwAeG9o-uY4G)SZm+&^!_o?ZJbACYhMMh} z#Yio7=6K59eY3G{5bSReOuvO_zjaDave|) z_-7T(rRR_I%6I60ao3k$Z{K|~x2m~0ATV^`@G%o7#zse)Kp4thWd3xu-+&6x7=zx)iIcf8S8s$hvL>Q}KWlM*=*;;nctqEzG}JaK^v+`khPc`lTJ6{o z1{Q{MWbM_g_9o0CUWL(U_~cYwWBtZ$AFo`o_S^5beEb766u~)v-_Oj5WLpYnUDp_# zg4*44i+ph}KTEESxA73kF0Vbk_`Z9iM zqrR=Wy2HiIMcqfz_FQkU)RkX5eQ?(=l(y@B$l7D@6_b7XsC_a2_F8iyf|?#f^z8y} zJ=CmDK|?l{s?sZ&nT5#GeDln$g7)q28HJ6kBQ$K#uuji9Qchkftg3NR*~+hE96OQzeahJeHaHwqn+`#<6yz4yA*fi{R9$6h z?IUO&aMz#1JChZ{oQeS5;u7vLZa9Q0f(zt4I6ddgx!~>1sX@ya&5ecl4dOx|Xc&?t zXq7qVvaZ$1go)R?Ai@a7s*DU)1i*4Gh%2XEc*K)SyYb+p zBFYa&zwqDW;MfaUTBCmTr6u>@A3kv6lDFcqavyTJKRi4@t7>XIeEhoHc-Ot-Dm4o zZxk_O`^)7nzP`RLKn^!U^TB>npRb||Vx(!Cz6c-{`c39H2uoo4gK`a?Bv<_{HRKV+ zOo7Z4#4K&y7xumcZhpbhS~O3DqooIQu^>5m@0U;!^IT=QVGv0OMRat|&` z09iPaFl!%NxM0ronX?&hoP-%-kDynnI%-Qxvq4^;U0Pb(p#sprD=;M3=&v`p%fPXw z&@&G~A00i!iLRpcmN^(RfE3#F2Z&U{}l*qf|iP_VRt@b?vZMEM-fdr=D24P8R zZWW^3hHZVcW*p7nsq9h)HBRksSoZK7e%$v6$U5-zCdkBP2PQfgRZiSN`41 z`U#NeDm_%6 zs_fiMJ0ku$rro`tYrR}tdg(}-tCOg$HyZ2HjK(zjEh-S=T3Aqw7&2hM>?x*0q= zzxBvP5}siWo`K|If{AxCNP`IbfK(=45IF-lbn(i*iZr`OgOrnfDaU{~0&r}$V^7gW zI)b&e4IuytQ1LIYOt1SNxG!l~M8wa#3;QQN_2#S3B?Wa9msNO+?n{>r!Yx5pj-&tG zOPiW5?%$Xou`w-^eYEo|u#5}|+@D6^(^&0>e{bCU>E_Li^_O>kMmjkm)uuTm|CrU$ z+He3p0f=nm?u#CpjCJ8dEK$d#_jib0d*dOX5AxyUE4f)(f*?PD z^lz4z7!c+mSHSZG+69dUN{s>mF65k*f?$xd-535m5PSfX@9pc`&l4Tp4-J8rbQx`r zc3nx=I;4=~r%MV6T_oksiXr&{>H`BY2y*bCnLa@T?-FDgJ|!m;(w%e3gjDNrD6PO=gzfE96lg zWkuzp6qHln8g8PX93XkMX7jp@Yfoff%ee~ofW~+D=-7#)hR4PZ^A&O9f)$I^lo)w zW_ri*VU&JFqZF8#Isx$mJdgb%iXO0G%Lpk9;fOK{Y*MdrNNw1+71>=qjrf=2^H}Nk1Lr;e^!@YMQbC8!n3C-5Yzz}w zc}}5dc4;wq%ajl~mCoJU6VM^uEuLq+8e_1*+HQB!IAH^y1qr$m%~A^JV2# zqU%`Sfsm0qw|2ygnlOGsc!S6c%XjbIv+qRO z@w6j9fAQsJ5%Zd`+*u!F3JMICDAjnS=!70C#JeDmh7S}ipy@Q~?!tLB@!8>R(C5<$ zGCBcJQS2Sk+(%=U#(l5VbBPE=>V7kM=V>HD`RhJgg|pSY&R($sd(YU1UJ+*x=09fP z0yNU)xS~fRz0K-;uVZz7PFkI7`>f9YSJ6yVv)h!sKxBI{^*&{Wp~C(tmE2W3%5U$9 z9_)u6q^w9@CeUt5J8g1=aOUNMf&{ZWQ-wr7e0NavLo-CxS6*IUz0dRSi;Q3YK5XJQ z6`y^+b`A4Epo+=lIeo!`>7pFrqy_mlUc zQ*$>_KTHGB6CO0#(Fa{KHO=l_(O#H|n*=CJ(2R-%HeCdKmSrK`5NS=n(JF{F4+~y* zAKs>2wu@|!7{}f-V~$|2NbS-hp@uV}~4tdd5*evs^-g%&;XvcTo?f4Fg9j*X*L|Aaw>5d?6XdMUzbT4)8eP-VlyJd_3l5L|k(S_DT z8YMtfNG;r@&(is1T}k?ElE@-N7AVp>dWiZ3*!nP$f#?qzCb2zrOYhuL))bK`MSdlf z)!e5n8Z9KV0q7gbapi83lmq~ANMdC?!&p}hnDETP!LiRgh_qT$rd%p};DIucaqAK3 z`6j<&wTR?Z_SelMwEU$@eAyp=l!-EuSy=~|<1y`1gUBU;MW7t8qf#AdwxdY;K-7_9 z1(CUeAr~##PT=%Y5oH2qIILa^Ct(|SJM#bh%(ij!k(ot(ynL{JXnDFg*`5e((+!&w zo^VZ0v`3gRv#A36H&q2%n!`mE*kcIDjodFBkLi-wQq*96A?heS=9iwoI-V#DPc#%y zM0+SokwGEv=)ls&Vdx?@f++}EPFb8Nd%3MyT4|HHc0-(fu41J|nZM_QczC*A zR|wx_S`ZOzM{uNGaMqvOvu@qG-_BKI_qaynl-}`R0C-=nGfcYg*#ux4-es%h4^19S z03+y2lzwUYW3RpT+9T7WVRFWZW**KdKnvs?LTjMN3QQ$H8P&pZOQc@X8k_vzokTM> z`H4rM$L~TtiFIZS>gmzY&`?!VXKio2SqXS+d07#xm=e+BSmv=~r?az9A4^HuzweJd zd-f3MPjuk@XjP&W0(ieSWvL#YY=T2d?qjp~d=d|~s7=luMIN;Jd579QNMT9XH~W-r z=0=eI9#!pAyiA~UOSHg_{2F82_D`a^{gqmuOK7Oo;KSsChXN@g_?8w#s%R(%8a<4l4Us>xb8|el z2cB2#-da()2^&;zsLjhaY}<9{;QpQHo_hq3wx+hW=8}@;wxdTc-E0Rqp#A2hqn%@k z#$oRrTgfg!HeJF@Z6ZAwPns;Cczh(93Wppe;;OljBf?)KqJ0z4zCjdE@K=Z$K(Rz) zSeVJ%%|!y$=-vATg#`v0Jqc8!bMp=qx#FJ?p{7l~+!P!$e281=shtR`dKxYLlK#De zXU?9^NIwlYV)o?=>_Z_DuCO?~|6z=bbT2=b^4aRutGA^WHv2|`#Vzj|!ecIEU&#}> z4keb}fCq;J0MFo}(fRa`9tE{wTzG%XX8wVJLxx60Fq6RUkH-*v$M61Gv+ln8p1Tu) zOB_EoX8hz?3m49qoG^6g(20|#EEJ_G#*H5ZOay>6NfNRq5zrc7YvKW}89$M#r~yzK zJEdfB@PZ2vt~|K)h!Bt_R#A#+|0~1|9)>_S^#I)%pijAL|~=9z1gT;?+W!FU2_*GL9ZRbLQ;XtSeWpASCNd=cpQw@$oq3 zpC?U@Y=w`Tcz;;Jcvk$7&w~6H)IHrOWIu$Jo|k~OfY}npHY4+N+R3yt=gwxF%1HH- zfVKp)1;~~VwtcY4Eb2~BTSdrS@#D2sXLtVc`wg#=_syB<*HMsi8?l`iWSbS!zYlWQU8xYk}RVxF= zkB)Z1YCQB#P~E`5;juFpFJ3$|b~u94dOnMONK#oVVRs321K2M3;_?gfkwF#K!3jSJ zyNkIK8X}flXXn0X=qTiB^+USWe*L_C@xdP$T>y21g97R*tH99R0Q^^j)nxPw4Db)` z9~Kr8&O8Omt%7yPx1Ya%KjQMC&%`<6hbip_pu{z`V65r=xwD@YowH|O#7`>Cm=ey? zg+57uPEWz1_uPuJo``XKN1Qc9g|L)6@Y+m z)!!LsEyd2<5ocXdUS0tV-yLz*QcR8iop9Dz*<9cyxFg8ASF8Ic6HM!$#M?r_JqoU(r$3eT?E;rK(@HPIe zQo-H@(Pv?y2yY2%Tcw6&i-iM}JoyyejlY;lzaT4!qIBI*Dp^4k6D+%$laKg(2_(Z6 zTjR0Yv0s#3nK|={$bI)!JUI=`Oo@Q$f7D=nduC8)c?%M;{BHdvele3$M|-dDoy z7;yidXz7;vI)IlQk&X&i_9e&)1lf?RtRW&-Wy31RzXaka*_DWhE283bM%H=uxgZ0X z54eX;X9R};dFlApr-Z)o*eyYUQ0BTzD6~Ldca)&0vN_pUW6$jokBw-}uG=>>u|u8T z92&xHb#8=ILFWe93i+>OOtoOHDI&=Ep%+9JEXaf7^Yi0HL^(cXe_59D+s}I_v|^bV z;n9e=l&2rkXbwT=E-ua|M&EY$L~%3d+@K|)zi_v3uaV_PwtA*M(BDDw{X z{t~jdBwvP)z>Z6{(~ZvgijW4xPe%;Vgftk57WE?klp|nSB81B!iniSAS4A&KFyeL? z5G?TV!nAE~?T~{CRrH!YDQ?cZ`(`F4#z(o-0ePBttrFYDexjG*#OaF#WktEUSs7<9 z1smlpwJlC^S0kS06>sx@R;^gMcEi6v*qN4EAKL(AqgNGD&}*F|rp{UX z(Cpda5x#-Wt<~4hR*HBsj9(nELq+6yB}MSk6&IF@UZj1&(nBhB(#yzy1;RH_hk$ns zBOC|pP$826G8$p}_i*#Vc)#N{j<)O?qg^9NXM*h{2R$5X6=dx)S-YrETR-@keJ03a z($izWXrOvw3D|CA=Y|eFCt{;RuEf?U&TE&6PzOk98#08)<=x|xGE>ssbtdqMIqu$K z#hmQ8JEykB1Hv*NE|r-*Ma)TH(>jYlbx~z*{{dd!#yU&z^6FE9Cra={2{gw@jAh^z zsV5$XQ^yXU6ghuwoc$$k6|9J5LL5us?`F@5+_ie7x8k0KwkEeM?(Y&wNI+bXQq4Un<($>gwT=k)1nrJVR7?u`4x*(f~)CnX6nT za;5DwgZ^$txhUpQzUFV|!J(!`Ocjb58VxwIS?;GqU_XszamBRiD5WFee35=zsr%vfcj2JP{50V!V71ew~ z^sTL_M)(Z=?#Um}(p9Yw3ya6uBfqK_xxdp%Y^}h~xF31rH4hG=+qQhJ}U= z2tz(Z>1F_u*MZ>@k>n0p^T8M=Yk9Bm`wQ=O>;|g z8+LGP?T7|VME;8pJFIqRIp3cB%i08d{ippk`RB*0z^BwvBk22smer*R%v4Rz{b$XZ z1?LX;5_YlQVIMkMtv(@DB1PAP2wrtkWSL4`vh%+wy6V_h}TJ!o7#GhioF%h}{3EGd!Cp%8>jpx77Z)6ot z_xKZzKOinHpi^V@LyHVWUyny$6Jy>G6AxDh!b!4ML4! zHVXpM@m?*g>KhgDoi1!MI2GLf)EIXA3Wm1OHZ@P1{=nkLo_%RqPY8s-#omqJAhgEr z+q8yWFO!MR)_Uw(wY0ajSk7jZJF5}d0QxsIo)YR7m=>j{l`LJ3*IDcBfDhKTw>4OP zK3T5QQxF5pIK9af8|#n+TE`Zm-Y=rww2mz>@jl*I-mxlh@G(>EdkiygHLO3pkwk;! z90Y<0Wk4KKpm(a{wPF4Gb?erzU%U1lyw`5nz}Uc|^7>}$S6dHXD&~BQY3W(lZWhk!)z*R|Mg=#Wqm48Q1@kjX z4Jf%B@TsRYrP;_>)!1HBSy@qDS5t*}6~K-wt1Wf)b)eX-&oIJ|MVx*VNf9+-)VOiO zhsV#DGI`SU>EogtJ!eACnJ|V4^BIhu3&t3BDXu^=w47_jMMT^HnRiZ2-IJ2CCpGnu zC&z5NJG31@J}0FBzwF;p6(vitV$i%5W9po&#<^X4SNHSsk2 zot;KgofDkP%8Gq`i_6NcOT$u^o|3xjO4ivE>HCUr9>ZGr79e9XPyxbEsgtyQ-8H3Z z@0oYs`R&+;Ckr>@jK>6!LSpw1Bw1?z*jWG0v3F}tBKp)MFaOF_7INpBM9(^W_P?mf zB~}UmpSRTMcdSXu<}Jk7I`P(;ICa+~5p+i3aId{!j9I2{*VY|^VM+DFAF76v; z<=x%a$5Jlko;;P7b}A$5a_&{M)xQL(`6H-A(DD_6Q0xv|Y>$@blAox~Uafzwvk38tqv1Ox=wd5HKJn zK0ba-P>>pN;>FpgPoKVAR>lR0w=^%sK)<@|^*AP|UU`1;gB}5o{A(JP^pAnAq4>ow z2xtYmy=t}tZJjEZy4xDK{uXR_f&1G#BqYSt4>5&7!NIaBG5GYMBfsZfJfD6#9oksh zNoZk55O$iD2JH;UtjyEtY^7l8ZhmY%V*$#2RefZ Z45&OeSD3Au{DRD*>1T?{E}>lZ{{Y=nJdywa literal 0 HcmV?d00001 diff --git a/proxy/web/dist/assets/Inter-VariableFont_opsz_wght.ttf b/proxy/web/dist/assets/Inter-VariableFont_opsz_wght.ttf new file mode 100644 index 0000000000000000000000000000000000000000..e31b51e3e9388ae61767c692885e5d77ff7b5346 GIT binary patch literal 874708 zcmd?ScU)A*`aeEr&hFXW!@9Jk2#AP^*iaF%D>m$1V`A*RYizM3(HL8z(Zs~W7-Niy z#*(Nps3;axR4kxkLji?F5OC?R^qt>(b{9*MdvEUjeD3%2$B%hE@8_A`o|!XqcIH3? zLWm1MK?e2g-XnBm=r8XRq6;I$r*qFi1Bb5P{QDlFa9&C1hzUK14)1z*+lJYM9QPq) z?3caX=rSZawsRXo0*VM}e5^n03FAEsgoyAb`vwke7U=fQ^aH0{@_iv&YZt|LJ%6L~ud=lRlXOf6eO0j)?yr{C}D{e!)C& z2ZWyluAVw;>6C*_KXxTT*?K~2nx-QBJg0**i8SLWk+SoqO&&ifFnZ)~2>%q}gQvm4 zb+z{>*f+vnZ`$mIOR|n+JtSo0FN8D=pEY;lc)RvWg zIpb$fp84}OS2UK95KZ*FxeFHFoH?eH2+KkUSnW0l0|R)>xE?ktEXM8$v>O zG@DHuW4ET}b~o8?itpx@Et~&j@#S$8r0cjbYknJR-=Ua@)^rVv*L}v50ROJst!ftX zOM+D-)Rv1vSzGvwC<#%vRK&n%yOq-zvXOp|ULi^mu9PT}31v>?2L_wvZ^+R8LwSK1 zv(`tQM88BcAPx#LMi~d@gI3_>azK1a^h86nbLTBsMs#rFp&;)XzO-4MSrA>yG3PDzxz6rUNO+F1d*eY|)6ImX;T-%ZjEEDT|eGP1U(dD@u-IC-m==Qz8tIF zU%H^*1fBvAeNGhY0*}KItTA_wB3@+U?SS2V;4h$*yg&2=BZf2~l((mV(aGCTfdBM` z<7WvWNXM51kai@L3?XC446>N4BAdu|a)2Bs7f395MADHK>L;|dw6diIgm!$~S=@=x z-jajWy#s;1+T1%L?`pXdaepCqMk%>FVxaHPYc{z#L9f~57QEL`eqK+?dz+$HYH1+F~2^{*X0%kxlZVEs&5W|IXXOY>|pB|2$> zO%};0NwUcb;-NI!WF-k!9=FL7+;`Yy6&a@NWs}uJtqifr8sek$vB`FjE%ihhw6DlD z1Yhy7EsTx?i~DSH&6j0ECEUf&tnTPfEoy10i=Zd2;vAdoLcGKwHrbUl68&wm8wsHq zHrbujqfs`wCgfc<*#q)>HrbPSP+L0)UXX3|5^51A8f**WO&qA7O|A`hy-oHZO@!*a z>JWF~md)Ll_zH(?a$OQ2thCAXh!RJ(1P7b!51ljH0CrduzTy(C<%dax0{>$|eURuX#4PHPV@4 zliNVQ2iWAc2p?*b+d-}_e|yN)ZPfvCfGtc%$kpxE39`4%y)$H2n;e3&$u_wQB!ba8 zm&_*P$sE#~ECioLrjv=NeH)Z-A$X9DH@EN>Hun~!wap%E9?dMs_wh%50$sb*&Sn4nbxz9q% zO%Z<~;>;&Y5qdmAk4H)`Q|}4acP;j5kQTsi5yFOAYQGS%d0d|JLX=`Y(wj_%!#>Ya z=BAdiE`aQV6kbn<*J(PmGC@MRmtXB;tnP6AOvr7UNbJwMg1qEbzb-3Z8ZmN zu+S2EA@bqzUk$@+GSSw5T$`+Yy#T#19jQ;W^w2*F(;Z=``kMyl0V*yGSY7585J|ZVtO2y021fkxr#O8J2rSu^! zP%=(FuaxZdl5^eXr5TSEgjE~IqsREzdv!#Nhb}KWw2tw(ET4J{uGO zoQ!rJV(HV_&=+2+E~tgIMHax$>r@@SH)N|m`e01=wv@}NEwB1i*NwL)p96VXTx;fI zX7sW2i_eP`1CUB}noUuT$ri2U^XU9PwQCT{+Z?6^X>Zf2rg%nh6NH(1B77#myiCvM z59$5A`0WvEDCrMs|4LeaEZZOLub2E~8Td>)5A!IW*{pNB51u!?4#8*(uDQI=UxxP~ zgAjHu)|Qh|BR*SqL9g(cleEO_7UF%zAa7yP8AmcmxxP&%f;ou_r))PUx^0bQ?VRa zDOO>nNfZ)LtxyAHg$#61)C2k{{DF-XO@IN47Qi4yYhYUi*7AyeihjTWiXp&ZirK)q zin+jd6!U=#706xjuHpmWO2yZ}ZxjcChZMg7k0~w#uPIW1X^LmSOhpbbPmu>KP?(9J z6qG2B(oSgy)G2j92c;v>S?L0FQ+fgGE9(OrDH{QsD4PLWC{a#jure6fM%feF;Wchwsa4eASD2wNjXGR5f%1Vs$r^Oz}c#Gz>ihX9M$Kl&w*d4z5pIjT>@TF zMFVfDZUS$sZUZw^CSZxG1p8@G4Xsd5RUn+0s3mt+L{I$XoaSQ2JNhAr9nGu+H26xnogQdzz|IrU^h)SU{6g? zU|&sNV1EtTTr*HJ5I9&f82FY3ZLXQEnFn01Spob|vj(_MgF0$9YxV<=YM>{YYnmIt zo0>S_1I+_qf+iLCOorad1Lb#!C@++k12@Qzfdz5_uvErd4?AKqT&neIA?9BX1O zA_j7?KMOD)5+;(9!!5!F@*7-OfASOgaEe9P!-dx7Jfw4^D;F@W?rX}Ekv%m>y0dr|g@!mVh$Yj$gj8+CYX^(VszF3X@HS+ZH}pG2|uX0<~o_Uju_j-em zCi@FJvQEs0WGxM~@SLSx!CS)K$-;A&b!H)~2}yc4h;?U;NOD0v7RnlvxpW}<%1IDb z)MI_j$wKG%{aGJ2m~5LJWZ~b~c;w_J7W;V%e~+xHY6tsT^03T}^<#eI)^cAqfCZ6d zlV`Gl%%8+9scqr+mw2+laKE&;1{=z{l4I|Q7M`@Qf(`*p>`I5wGZk!8>arW#j=0P$)sA410tB)t^E!;5Kfpb9`t+jA@v>LoM>55@HYc|yQVTD8yPS<Rug59<$LQ&@=LH)aN#3e~;6 zq%E7vR*~O^k6?@8KBRvywuH4M7w5ZL`12V$@Q&oo_S>*bsbSRr6=svX%#ApcqVp(mr zgdE(e5z#qazAmZ7{Djc2OB~pnB<`DHH|9(dw`X)>?MdQ}0@R$`-BExBA@Ms39N27b z(}F$RQCNdbBB|femPoqYcV-8qneiRkl$(VhR}X zuu(tuw&3>YnTwUVvI)fWDBYU{lcxBHCChrbLyhs;etSvo` z?1CZWDQG2IiBhk`m|sagV(+u}$=7T;rpsfjFY8Orux_jyiDqpugD-R-@E%^QBuo_Ls4_l7Q z4apv0eX?##s?dO}x8RpwUQoM{PqzN-;VyLkZk1Ui*S`;|lu69)<7Fyx8z@4)TEsLX|Iy8gAtX!1$ymZGQR!`P%84fzU~O$u(_ zY@i`q@2nOxNYcFxRUYKt{VkOh9?=bx7YgXYwp;)Nc7)fi1iQMk?GGm?b3!e zs#f~lzbvk5wELH3RernAEw1w2eQvqA?oZ*A*sX?jo}c*nMB9epU%cmWH+)}s8~^arW7I+62~FgNCy#em zcQ~1rTD$kj%v8_br;;0#C{KACLpyL76mhjjWygrvf%W!AJP8)lB8*jQTAhA;)UV0u zba$_^(^)(H8gpoJI@`25=yXB5-0r7MfzSJ$Hnn@+jYId- z@N7yGWz)0iHMLG>wU4{@IrrVj+Ue&a+SE>u3?JpyB=Y>*a+An-Euf+qTA zd;OE>xGqJ_qaU{`bBRu`Q`tP)SX(ki7c?qp7A?tb+FVU&C>LF=s!`G4s$Affebp|c zM0G8xjWX?;(MOSe{Y20F`q$5ouB>(aMpvJh>*;Rhyz3ct#q8_3&Sl=$3!RH=ae(9k zC~?mBzV6Je{&k9K+%P=Jt9K(~AWOMn^vr8|!#J=anFFi?E1usl4u+IBuq5S1k-yO= z=3-0MASS7vu0hP>Mowul$-%jvF@}+4>X=kJ!7(P&v!qT;wr8Q9g9`^A4i>itG1;{s z<@7?}oIZej&s-M{bz({a@|`*8V=4l&^|43C>r!Hq#^=xzK7Q5xwx>+(NzwD+9q+2JONw;rB`_-*~E8a)#a4W-8_PqUV z>$-t)nWA5DTxJWu^ti(QMPgi0Ku(Rg(hfN_?u2&~Gwxhz6I1ify=JbtcOEvTPwymn zX}s?gx6jwzDejoBy;IUYzs8-i_SqKD-6`w9rH*Ox>NT zj+xp!tbJyUJ6iW_`#U;^eEU024u}DOVT8s)pLVushfbM+GLEV>sTtV_C=@~qJF-cvu7>%FvR@JP!9WIlWDdC#PW zZ$-Q3G6%bR7pI(p?{&?PqX(6zfrsJ-X8q)<*&9-j5QxJac)J(BqlYqbIGNyFN;71uFnjYYuJU zk=8oh<jy!kt#PRSEZ+rMV>B<1rq37P}-o+X|29OIoc2qZ?8&)=YEfP&2{k3CMOv(XtyMT&{hgTS8vVl$r_o&MCDM z3M;bfClpq3XqxSkP!gJEpHNYOa21^Zs#XBE;8QLM1IyDEBwiSi=8~A)P_5)3C8jh~ zD-sQjU`=oRRG*mT^wcLY%L`Jr)+i?yNKb2X@JTdD$v%lCp-*dY&?T12PdyS#>%=*6 za89fUP1Yq=Qbf}wX9Okc8p6#f`Dswn#Sy7`4lYSCeIccFOmR$l?vm=3lnAV;PiOFErZh&{?vW)q`gUe zlGZfI15oDjq()kbv% zT@#ZXlHE)+C)qz5Qs~BHhZLmSCdJ_Tq;*PVzhsSJU5HCx!`VSKn;N2a_r0s2;p-f9`DWs((hCWoLCASh(X+rbo{nGjL zm43TfhC_NnEeFnSB1s z)Vy`&N>)na%IxR&nMU{gLCdFw&!0bcsl%bc^E`K@z47uWIm#G4UfJ75q=zYWlz+`S5 zq&FC|@A(&3W+%40UyyAWl+qykx#mt0hvMv9&CTMR*hWr@oV)$9PUbuql2eqE7?Ksv z;bcy7a6(Z|a!C5goHSpxBKO>iE{?g8-LBQn&64jG=04ZlF5pm@n-QfYFXOU~QpW%N=$;gFnX>?I`URXua4pI4RXP&ZGglq&N)jA}*xGq)@=2bPag z<>%+ufMia#ua|E&*!vcQdnCIToc2g`FL>gaEEWhQIbIxU74n5wq0!Jdw=g$R=~{@) za|`o4lvzcWf>?P`iCR%mT%2oXS6o~Skd1ae#d40Fw<$^v^*6>B&9asncH(6 z2m6w&zVLYN_RPMdFi~(SQKj3dOHX$+t4nWpHa9PQ;-hvgEi6>kD6KHcg=P6ltyor3 z0jMfB)h-jt3%of%sst#@b846Km0)>Zres%M>|H69^JBpG8kk3@*tXxR+Q)LO2<#hz1 zveL)mCEyRSm8CjIs#cpzsds#W9}P6zb`sNSrBX$sj{$ZzNl1`SE!_vu{W}%a+BIDm6xh3C55W8OqJ8;dY-DF`}HCf zW12Fx;2oZ?VM0-%Mp?6@Sl(+0^p_70H@b7EDWB+!gjG5k!jzD7Zz?9KHiaBpn192j^t9?$SVqg}$=P{TBP zeU6=-V{x(eYD*zs+rfB6UE{W2p{B-d|3a0H&rLe1{Pzm`XH##)*yoQ1`0FY%9PW0` z%X3H`2jJ@~2Yn6WGsp0br5OM*!_mLobjyhe6; zJ!o@%JaiUX8x;>dKD9#+l{V-xf5hv(F_Y*qiC}W+us2p8XgYPoMP*EhbB!Jw{(~zL z*yZY!=;%~4rjL`ehq^{?l4o0&gaohjCQu=HH^h5;dm9Ws zF5XY>`o{FBDc5zclNjf3FvwB<`ar`Gf9~73UQ(s5sj2R@bAj9w z@E)1JGuUxt(vm$FecI1ytjPH5ZWOkqDPNN@q!Q)FB4wEC{qXd z>c-afVs8evzgg76^{!M^aPK4krVo-DHjV$$zg4`^UmveG#3nw{n-giWKKX{vu;gUf z$B_BdJGHb{t@JNFywa0BDluX9gri0G9^??Fe@K*&rA$51YKw@jJ|*@AG6`rH2dW@F zdPI=HgAbF@qkqI`E)`W(B~lIdY*qb}1+odXf?9Khmi7Jy9X7qry6kK{isF`;>E>2y zSLIf2SK*Oqa;s}^OoFq?-#dP9u&L|gc8xDz?qCQsbue{^@7ORt1=&Op4_m8R-o$K^ z+dny_4>Vq20=YoVB+A<;-$)LEf^_gs)tSrjDAXF`$r0W=M!igodVQ>=wz9WeRVJ1R zRjgQ1R#A>=83Pi%DnoA(%-b7n?2o?GN0C7P*m%5*xAvvwJ@YC?LpC2B)&Khg3!5G> zg-$y9&5~P@aJ#1U^NkHO$hYq|eR?n7PKf_Kv`uB_qU$SJwqD9-MOxbZt-Z9Dh$I+vI8a3*`8}UctjTl`=2I-Fs z8uZ)1fyevw3GdY_s)Hd`Z@3vR-?huadLbTd2hdQjaz;6Mo#7^tu$&Wvd{*jEv_=?DO zjvyFho$ixH@$vrg7-uG9dcJ*@nHCdW1yNRBzEN4FsCpzaq7yRtb3~9B%@Zj#Q_)OS z_z{axXS3+c6e>ZXs;5lBiP%avk{LkadJqiY2vP^R^8>7kAL-uML%%(aGsvlpQ`@DP z(!4WrGpW&3Ov@!R74WKGfr(w)-r4N(+|^tw)hD?w^Eb4*-Xmi0;URlQ|2XZG<$xtm zOI*3~)BHoJkx5B*sYzKnqp={hKvQH_%f41EztkQ*MvunfVV)~S1DYQVOU-y>l<19? zBbSzU&=Dxz1@i@Rp;Npmo)|DfiX<~ZIe1a=N=+(PLCN$+|M(tc@Zk}u_(cUjuHh8# zT+18O&&k(h8`(`xk?SM_zmD<4wmd(&EA({m9q+sPWSt=}$1BGtr=7{QTsAYwBnR0J ze=2_5^Gk#w%~-^!wyuBsjy-w~8a)Z`Y%T9me~frwC}y(GJD^2ikHMpH&dQ?HA8**W zKjPBmxO+*DQ`0jtjoBsTm1aq;vD4Z+>fLJj_|^?*)vibA;1Q#zEnI|>;GJLy%&ULZ zM`x{WR$q?Q?k`<K+sSHq3gBX6&i($)?I6__0`r@+pE2toV4BP`ailN`~roN zE}#qKg&hB(X-XGpLl9P_cGtRTwc6TRA9W422M_P$$bH~n^KTeYtI|qZq7}4MtI(=> z3i;kk-pAAiv{b@Wc`$tA6(6z4{ z$)5JMx%8oL;H=HRXWl>@t9dLnwU}notEQ0zBfH8g!2{(cQsAG=2q{q8MG92c)Yj0} zQF~hbxr_ASpN+zSw*Fr-&8gJ{(Eo|?qug|{pdW2OYt!2DR*tV3Yp4JoO4i!KLUxkX z)JdkR;nE1Eu?4yG@0+_$I!fsvI-F8%2ZbZOt8jGEY1>j?>MIZ7_=*`%$I@V%X+zu4 zUNjgdcc#5?tM&_~^XNRa9ml_6=2NTTakOeBoi9(5r`g=*tF^S}pUjI?52@j+26cF8f?ts>u4)ynL0xR7?hC2fmeLs!sjJo)WKKN?Lhiw&ht(ps@5Esu|Lh{wk|oK;R(LB0V+nLUk?WN|DN@^oI@im_GJ`JSa z)LYb0lc*Kj(pl7Zue+)p)D8~L z4h~umZ7r>z+EwkKttEBVhHC3OX{?-X)OJ-nshykzCl%)~8Y{{o49p@`<}K0#(tLv?g_?&XSte zrxU3cwen816YWSl=wzKnXZPRF!KsGWT5KkYqL)}#tcN46jbR)_cd;Fs*2ltw#WrFq z8cSo<3JHp=Q~mofw5_y_9qhHuwB58pPFig*xrVm4gQK>AgPk@&>+ev*L8}ecHqd%G zNlu~@bx=4E2hl-rplV6$sg~rPw2iEjme9r8j?(829#VVh18Iq@k;l@Q>oRwmX2J98~T)#eZohI6g=i~Ty+ z1kBHvx3Sl*-dodmMK|nExv}j_ZM#6e=ff`VRJ@LKL>-!<>kg&_K z?r`|t#=4)uPDQL;y>oqSe}Yv$o;MtAYkI8HeQi(f7wcckD!HdTo3DNOy4JFSeYw)) zYgRwodhx|7(Yp3~eGMSJitz}tVSyobfvGm#=XC9^2HMMP(_}bb|nnzoSD!M?d zDLdd9$6KhO)PSzj_QdYGk?d)CKD*%A%Ad4q^bn8D*AbRftbN4MNl@#hEfxuL{Y#xz zJIRZWZUQ!;RXJ z8?A=!mK?5uex+TCiTdeG@|X!}~MiW|w!VhyPk#=>wqO`1-Z(RZYo zY8O#s@#i|a7H?-9Xeb?E?N_*?N2Nw$9kGUW^<`ak$=;GTt&kSsH2)IXLh`|SsT4cf zkNFy2ena-eN(*{y(}KyCy_7{0q}sF_Esz7O@4s+Aqz`qq?SRiqb>yLRrCdwyD|Mk) zu@)xKTCOoJTxT5g^c>7S{tn5>PTO8;s%@`zc2a8FNsYAav`#eNGNPN&WVM}xMlI3@ z@(iu)q|mmOMrm7X9WiEVOOvFAa;Vf%_Qe_Ue)3yVU%46mmM-PmCry^U)Ec@}8Y+#1 zX+xLF?s5xxraY70mWN4$@wVikJc*u>dP}|0#-G#ca(}v6?L;4mILJq@;g-=^dRE#< z?>RU*IMJWw2~vNlzdV74NmJ$VbTgg%!t9_wP@}vYc??m%(Cq(xL2Sy6yi|Ti?j?Ij zpGcqJ3951MlGo$;n*P6^-+w1XZ71B>x+k}m8)1Ymk=kPh7{kXlAMaeo+F%T?jog#3 z9PkQVl9o%$iDWroDp|^8Cu?XM?k6_K#)#if(cwn6PTSFP0GNpU3k6uV!tevZcD5O1 zr9(|VgV1#ObLk6}8R^dw3IJ)kF%&}t`V`2*2Y45E{y zk8$T^9Sx$ju=|)NkCuDO?}{xYNN{Obm4QbS&gyEI#*4bo`&ZT`+%{~`C)zlHbtwFs&4G1>yI8bAEXRgCq|RuJ3; zu4?@D{A;&gDh9!oD2^zOm`AZMsf_y?4(89zO2JcTCbTt2n3v#YOtxTej$mzthqzAg zvH1b3BaFwLlF!X)=I3MuuCJt-Gg!DViallH%vtP|@B!||3^12h$)Yb=OV%{zGUG;9^$$~XA+3e zwXmfKBwg7XLI-vi2j9E1S%Mdf!zuY+*kke)8-!~KwJg7XSblZjy=ZRBdI(x_RB0Z_ zI^ZHsabxovxGa%Ga`om(>?0I^kHL`;E1^+$tIt^$J&{*k$_D_igpyU4PMC+q0PZsB5;Gkd=_>K{fXu@8(anXN=Gukt}l4Z$6xagij5EfeOj zA&De}MS?<-39h7Bh5`R^2)`<^2Dmi5=2!$y=yxR_?XCe%$+o;7(WEWOx|BoUOUP>D zZFIP69W2nx9~T*buh=VF62<^GVpFBKkqGbqWPInFTT6^ ziTRk|J7=uffU6dlQ%8b#6KXwaZ7yIsp_6MoQ?j|j%yr|LjB69Ale@8+xQHN*8EfJF z=e5H9sM93e^F8K?dnc)*x`1~h*C%-~FBU9}IU2(1uogm#Pu~XbEwudTZRQ6>dobA1 z!dY((^T+*~;<0Adi0R4m(QQ~0sLu7bJy^=~g*8VyOASWwsU+T^JMQ^}k(3pU zSs>isPR?RMxR3KhYQkDE2U60fDd$4%uJtW^`mhS9kRN%_%ih9S2riUBfs@8OVC||N z(>Ft>unxGNGWbRo3&9}SwrnKs`8bdRLqfrulgz$1I2RndMX?^Z*YaVHh0wbJg3q8) zxDU~dlz5M2{h3VWj~R#iIwILOu`}-JxR8{keK;31?_eMye80}^*xR_YHhXdxHrAX> z_RJs7#^M^-f-w`=WLBH(n*9du&&(ou8SZQvem#tN?1)EDwYz}S?&6z)h&BKtKH*XM|&jt$ey)L*Y7D}=oH04}qohh=V=8s8#RZk54 zx5??@!Qi7v%sqSX=_KjyS+>l4MA#&SvSq9%ZafTN%Tb?w!O(wvjp&Ox-S8{sYxi=@ z4+X!qjY3#0q2AgXPPl6FWNrKnwt^hxjvj*iS>;_8LY{4@EMh}R@fX?Cac$(mR;dpz z)@DH{n@*M<3YDFoWPp=%hM|J zM7sRfJ1ud^i2d5i6L+Xi{n|&3D#rX;$J0E4t~wMKXf_F7fw5tN$Dz0Y^O4} zrX7t9z}=`(oCOH;ISUnZoH>%T-(#AxY2^3cpW36G6T)sZ!L6ztVX@8GBBAH8>owVO zl6fpQmt=xnZ)`p*tUrFO5mO6GIMWL6apoaR;;f0#owF$OyM-6sc_Fc}Qwj_;5pFteF#JayR*m#yC8te!BAvqr+3oOKoSoJ|l`aW;u; z;%pJ=cFM~hm0r(n6Up?5OZ8Et;haq)+ag$f++}JHn>kK+?{ri>q&bDN31sH!@(lJK zvDnsNXnoD^>@V!v#RgQ6uIkwEle{V=c(--xB(x zqH&+poz4c5sy%1gvSDPyBd5!_1ozoyV+ZyY`C(r`V-z_3!cX0CJ#%yMkKND+JEAMk zfo;ES@6=XMd(Q$ph*HC9r_(-~aK0*5u>;-_+VgA`fi2A56jye&g;=b{{-{ zuPOQd;Jte_$>D`QMZ=d^V1#->1%~OIr019n=Qz(-*3k>CFNo3^YX~1u)Q~$kWa#l zH+;ywV;`2~lC8(qTyISFANv^i;Mg~qx;Sdd<>MQ3a>(lAyRJ1Nr;h&wynOtq#U%{# zq43qf!{O_rYhmKq80}AvhJOiM8@@Z*Ulwur0{2G{c zvh~HjBj`+%z~xF_Q4OZCad(|e=pk=3Vv0j}jJtU6r@ zdDm%@D3fo_e1EYnW|RFF>td$(>VhxXdv+V}%d@+IYtJ44eqzDZXAeP6JZr`TGXLxl z$hXdYQr3<{oeL{*B$p%CNBWQ_kw4(W8|xzvMCyQt;IcCEC~#BcsmNr(DKZZ7s>sA- zANFP>!4&e71>-o9k-!0h&f+pB@(uV-h#Uj=Bj+?RV97oEFwrjN{&Xg`kkaH!88V$*>{AmkA!Fo>$7W@_c{ktq;$Pkdszz8&xpnqhFWRJF&WGaaN!8mq(Qj z`}q76=Sv&YyXU5FKDDsw$Yw0>`+mM|$wR--zh0Ev_w$p>9a?TF_b?6k;^cB&i!bAQ z*xlOt^R&7*x1Jo<;KtUQ-BpQSt((&6#@DBY_{DC!AC%F3o9py!mu*2KQhI&!uziD` z+Ye9Dg>8@Y%!&N=yQPlfzP%FKDCygWt<$>faO-=#+jqyhxVHQ5Sa;XfJ5LU+6T9G-(Ua0zVogfi#-nSitO$l%%RP$sD<$%y9^zYLw02~f7*PP4EQ0ehgyaj#yR*zxDz<~Sbu`Pfj$+W@EAKOg5%zs|#&`@)CTirW|6 z&mmyn)3JBk?2jK**m!@T{hihaid^G@57d+5IviZJ_*~P25$|}mI(TJ7OwV6##3cs) zx^rQ{#b0+9918q3rD>Agq1gU$frlQ2#Q7fHxaeHhBdZrnU61Tqe5?79#9lX=A3YFy zx5d%3lYeS()G7X}Ccl68ZcNkPe_s&OH0*~Nss~{|yptUic63TX^RVzYZ#afsZW7ll zEOy3?CSeaI)}mp?AXtl?i?zqXCrRPQ&d#HIkDc#)z45Wz?*i^DKgf>Zbaue8%mvr$ zAIp6oZpFm_!TEaQ%mk1oCzef;zrkgmCXGZ~I-7d?gZa2tsJQWud=W*)M61Zh}0Lm*b`$eo-bg6#C z5AOhe1CKrvoD90?v;GTzngF>G`D@5tk-Iis=G)z=J&FzC!y;^-pgc6;Pt1Kd6WH0%Lr-Tz|Bna&~4JkIoOcCp#nZL==;o()?9I6V!JM=vxw z+bQIN@42I8zID%?d+S2|bLah3P0w9@>wKNa9|k|E6M1|Yrn1vxB7-6$r$M?f2@o{_ z5IqiXZ6x5vQ$Q@)rii>X0McD=z@rer<8~Z6M0!Ui2SLi>vtm{~fN3>{<&o;ha&p-x zvQiBz;l55z0B<~gBAbjoTYUb*chA+na6r0DFWd?|>vSQ}KW@l{g5tBv3*{tkNK}HZ zaAZ_rpVK!kJ)M4zUjAaNx%~3i3p{);hfRvky?lPGSr;7@7(Vvu*?JfJuIBdtz2>!@ z6O4PWotSg3-?jJ|$K$R!_=fq#oSHylV$O^sw_>>zcgEMb@J`(9)LM7qXGJ*NeKhNr zVRw_<%ZA1ucXlNYPfS1e?9mP8@0EDg>#oD&gR`$0pL{bczQz-S*MrLBUuNujnjGF) ze>3^M@2;%mc>m)~lM`LxmKd<>1|A1_BstG3XHFRrZuS)!_r@L&GcN~T zcgp%=siD5{$>>v!bMxwcZp=@5|HmZLFW&yDl2mMpN^gd!OYx^xm;Kh=^>O9FnND>o zWBVx-Rfbt7uT_ocf%^a(gK-Qx*}brerMMSV2q*jL?rW237MAONpZHagi=kU~Ov|sm zV=62sd-*96yw#D))um23?in8G-Ugp!zhwX9CdmQGfyqJ1ZIZ1E>ktxxN6pJWtQcNe z;1>cL$7=VNF7Qx>2)z_Xz}JwV|8x4COi+TPEHXhw!E-<+P;V=}Qa+7E=HXv0_uq`K zDkMR#lClE{`vd8vVWh(gw|KHl`LAi7CCp_KCOt-6@V}%KqL*8cFiwh_|4(Tx&rg|w zwrmJ$fi}k1AXRHX?|_z~Ek9zbx&NQabrR(~$(|`JoUBxqqCNfwNjicPWB;0z1t?bm z*=yrmevPDHlnri@gVkMSAYp1-7?mB$-VZd4Oi+IY+6Vd)v;%IrR+9Fz)jS+e<6nj4 zVW=PK+mCa04Rn(pC9dGbaJS=rkmEpkATuZ*Zusq&d}VX*4Q_ya7U&xqM0SFQiJi#= zxL*@jk_qB^v~d(D5flx&1*&T$YkElc5nFAIV=afcj*R-xh?Tz@Pl>+A7w0BO=z9r$ zkFUT@ka*wUuu^qcMSIpk0bO`u|C~%xaC%_lkd@=mAAdp8aip~uezUFqinVCZKM^y( zaJxVP6+aL?zC{YZW^qDvFwUilQUMssTu3E+z^+NkslOFX9!n_2#^!>{?O6ca_B5#CS z2MQqWN?)?@-=<#>w-NN>pHVXsqHG{UzZRwl@p3?%|2K)}@h4*CuRdQe?*#oLQZ6EG zU(k1uIdQi+c&>(#HWtq1e@1T-iTkTmXeZ7Gp|5`eZG#Rxx4Ln6NnuW}4ufw_F8fT#TX%?=qg*gEJe@ByO7RjKv?nr;a^XmxsIB>|)Eb!H!EYjJ+5k`t2 zdORzpal-S!!mI6n;_s0miVL6!v~L7i57JoZ6zm|?cNX`Z_mjg*wR}8 zdpaoWCGmXN5#;k8JwrNke|p(UkWbi%$4Td9QA~Q3Q2y8K#EaW)Whz1(p3g&W7f0a# z4K!ybsHHxZFuiEFg};w5sWv)eSzAm%-y8zXA$oBLs2=);uQ`Iqdb+JzW~s2(r9rI9 zl71rEnf_zq{qQO&%ekFhv9$Rg{9X;mW$6IfsdPg48_wUU9ooD3Ekkd1G>?j-IXR;>4dfI_fdc@M<*VA$)Z zvDZ;0SV{VUeeo~D@bqdUudzt;0@gg>D2HCz7<&kmMR6L>n@vdX5NWHrPueQ-$@@ya z2N@2+{y|x3UE3?KTplqy?&&VvNhb3EN;uN23#hcK$%L&N`(pe??yfm*uU+^^Ka1q8^Zo^r=?GS z!MxND^8}~0Hr|g+fjmvQ1-#ntuedYf{vF-^pM=4C0_*dLU9s`Q;44A%@H~nJ?R$ma zg?t}=L#>=Yx7gdDd!R7vIf51V|0&pmHhg^!#+p8Zy}TLeR%%7YNi;DHN`lK+cc>i+Ab7vjw3wAh~g~%zZ3S2mNoPg z;;l#}b3||O^B9*qNI#65qCy0BCY*S-jujKE_@3QAF-s5TUtnEU^;Vb^zEyU-J#$<`&9LDhz zj9(M>NbA50!69S)xeTXPKj7uJg!>upAZ2s136ugl1NsWI6m%5yHRw2Kn@z3`_de`A z%vny<`fdpK_eeF(5`72*%>xawa{TWqtNkzCmCc0#RwCzY>2l8Lu8qRLFTO(D&sHu$ zg8U!EBbQNTQccPR7;l)1X@9Z{y0?pStS@#c+{h4h02u;#2;?aW9k`B6q0h-9&^VC6 zM$5sMi_^(xpylZIWw5^*<~2Y3Hw9n3r&>Y{SWoR2B-)Tfa#=h?K2z{_+0Dr~e$bYa zVx>?`L6FDbxv>Sb1G4)c!~e6e)D`vOgmYCGGjZZtvdcna$Pyt3WpO2gU>_tzlPTgS zWQr1VuL^a2)h^_cP2vNxi4MTLHvnyvg*MuOG$FsB8CE}w9kS?5CV>XwnKB79NZFSh z0%e0@ZDauFbP*Ky3UU9RL3=^}Nt|SHFK!g(kogw6gubI>KHeK%R1PDP@J?c1HDO#1 zQN)rV$}d6bn46xFF?i;V5%-W);t!;om`DZ)TgjXFUmoKWabylCOtBH`hW9ZyIFa2- zXT*ypr>K#oD>aw{mLZL4(Enf(DtiWV0BoPr16C$D_IVvIw!g;G_x(WL4z0cjxKKDNNta;wlSP^;LC+ zUenmfb3QL)pX2b?5-O6n*_;#Kb$*!wbzmN}wmQowtBZ``JpKO);Nj!8{@QCe9kDQgDEB~KglpVGrhfgZNPcByXJgh_n2GD~0l_uwh|;p()!4CQ%u z%phn317ZCCku<)PTFwrsA$oI`oGz`O(@h2l!{&R{T~}EBc3ix+U)wpP5UDw=VwsTBZ*)|F5Yzs|J4GDD*A9Uw(qf|MNCV zapJYAn)b;!-QWB2 z_iM~py0yP1*qiy1zg}lq(%Y;TX|vwLx($tdCT)Ixf?uzG7FWjRG_PlOAXQJ$8 z-NEc(-MUY8=l)Z$r}Q7~4+VQj^l{AJL+oJRhJ6(~AhD-n^d9Vg2IpJ%FK&~AH9@l< za4f+c>-T0o-Cqx1e`722xanpt$9n7x)^oST@ny3n?XNqV`c13>Pcv)2!J4gEXZ5+c zzIt;#)2w?Mck+Wh7^WHiX&q6|XP-K;J{Zm`|7oso*4m6aYg4zbX+?tdt6SE+BEkAn zpo2Tkro4JF`N~7t{B>&P_=9e2#a^cyoBa>-VbQ*Z2}v^I0-aQdBu|llCCS`YjXm;QA2O(~_eZ=~o63ZYKR4{hpcE zJGE$+cd_5RM>@L?GpEVGTqHH|rDpEYNE#V;+G%p6sWh=amyR~!6YX!l{jqe5JjI-% z5o;@rrHPqmM6y%&&2cw2q0yUJn=st)&;3|~Xl5VB4jRZ*zpa|^@i_YU=iqx|8Scl2 z4V!f-I~Qfm%9`&e<~jJW@1gHK##pbfKB?lbpPDsPpBsOe{@CxUP5)~8cGHiV^xGzyBtRqDW68>o!Xx4tsx@T-2Ya(VHB?zDBL(TdqX(pTYY1U-@b=O&PRx8GA5u+w?NrnMnQZ#&-G6ys?s58}`>@O@D$v`j{Pl+KF;HtS>V5pJAZR zni>7!KiTPq9^XvV!<$knx=%_a{Z5_C!Ohgw94Q<9kbZEupHKfD?Q(y`9=clWRk9v7 z!093bu&YA$b}8gNE2U#~(PJIo?S9Ucy$Vv<>_6yc- z4od-p^~#Owl(mtuZ}NMDc~2jsD{QwJ?V-63GIqR(dtql?KuY*BX{vUtFFcOlB{G>X zL;UMMNH=Y$>c@YXHnZPA8f|nQ@_Gxz-N%~Yr&q&|^8|aQcXE`uS*077@Yja5uJn{+ zru`-Lds6x^2V*{F_-hE~op2a-!{&rsj%!s=U4exAb-q_Z#{C-8qmUKIhGbX*#PwG< zVEV>CNXWJAFcP@#yRGBgk?&u_WLPTlXGXXWxe;*fua>X~xdcoFp7N9eYBg(B3G_Uv zr#HfJI1kmBFM^qa8n{0VP$ki$nZGjce@ioU=d*Fn)V*u@Vr!U+#x+PX|@v zTy0J|aIJR2tqy({z=6aW`I^ShF|K>axL+fW*WL%xx>fQ0H$PF>40?_x-Zi=-e{5meTs3&Uqv+cMb5-=owEMb1}L}hcY(#a1^GV$$8hUm+8=Q(0i(as zas2D}`6~-!lmfvRg=a_#8OZO31V8G2TxSgg8vEx;w6j~FzuE5VC_nexI3wvF2U;^( zQv#zq)qy^rdn z$!2_YV+~=Os4kMpNk?D(pj0yVRh#`mA&>N9-A5}59B?%ncw7S351IgE=IW6 z7B~ohiX;sJeybye4K3h1;CsrHkQeA@Qa%ZtU?|YHrCblxMk)oFp*Yk8!l&vBjO$aq z0UyFQa8V?66mmgXcpRRGQLqr!!ag`Dl14#hC=U2bLwV9{g#&O#BrQ5mn+-|<&hHo_Nho;MDY*DT!MvXIv-PeE^(0q?>!Uz@XQjTg zQr}tW$FgDrS-*x0BH273KiSAnHu96L4GaYImks@8+YX1}vdBH?@16`$1gb$Z=mq0p z383qHz65lg9lzPLLunx1?CqcrP~Pm6B|BxwPPuc?&E?1n*liB%HV1Z_1G~*ZJ?D56 z&{vLc;a8EINpLSb2v0x<7z%UX9ry%(f~z9A?t;SbFgye3G8g*KH4o5HF7%iiJ?2JF zx$^^cn43DxO&#XOE^_|>e~8>$85+au@E%}y_hNT>u)93iT^{T%PXp)(!(cY7guNnp zNjER)=A{nvQipjdcivCoJD}|Os>8F;8zum0ggMMjAfrL09M zYf;Ks6ulIE3fjU;fUb&_1>!D7+{Jc_6vtkR*MR15Ql!Kr!0t+7cO|jAlGt6zI?x6N z!gN>zdqhgr1oCx%en6M^qs#lzWoZl0W$BWDE=#w9{xAjJfsaHUcoH_i5x62!CJhvT zO3(BVXmnS2^-kZUtbB9BA? zn|lPicm%t61Uq_U6f6Yl_K|&XQly#!{8htWHT+e39$tcJ@Fq~F)xL#aMXD#kz3?F5 zw>o~S4~AK=3W&2haaJdPHPAgjP zg_2MUT0wunUoHIA!e6bQ;17}7=&*KPcnGk$+MNI!syz?h1^m{=Z|$ohkKF}@;bCYD z=lA5?&bK>t&(B5W0TybTP5>97K}!(pHd^$Ax$J5cxamy0}64}KMC z&{yP1?kP`Fzfbmq$*>Hzz(M$h|4|kR3_`W$r|oJ5lCNbAfVnq8y!V$O^@wI&1^(lbwGP>5>HZ!h`TM zbcZo82e7HGMd5(R3w7X2I43N%0BLq>4}*ZdqdVp8L0joD5@y0m*a=5~dh3}63IOHl z*$DaodFxdYYQnS7Q=~U>_a^S%l&3do_NF|&DNpaofWCX9@7~1O`?5%%RFDVCL46=U zeaKIr>98E|+vgzsBGNYoxdFd@@!Pi>jD|(9j;TIn?)NHu2>alqNPh*H0loDnkNsOf zUqJ8uUl$p05Bv+T!2#%Qz}Ij=?`ot_IlCVCr@V z`WZrbhl~U2W(fX32h6z;32*f-gm8knb7fdj|QQLB3~@?-}HK z2KtzZoy^2eX7&K|I`f*ytfqhs&vv0FU^la0gZ1z^oMvgvf~-&yY65wgL%!#X0P-@2 zH0GuU>R~SRGM6~#lEz%>V(w8Ooq5>GywxJ}(eZqAJij751(az%@yve(eiM0xGQC21 zU-F;1Nd_nZ=w!){@TbV@wEx%1-|J)G9FMvb1I4Z2-}?%o>nO)M!mT5o^=SYdug4zNlg@h5Sw9je*ZTKiFPvbxO&~jzhbGVmXhR#4VLx0D z*_aCQKouaajXmHDzr2$dXv3SHfDSMiW&rx%v;(MvP1L~$DdAoy3y;I|fZq>h!77o> z=ymgEkq@!S56R<)lJ&lYUTLW?So(@tc0C#1g?neNCO3+5>VzHU11{NZ^vG^AhI(J z6oY!u6(+(`AfBDX^RYmC9(9U97nlI+;A@dj2=~cwm@l$B2aw0z_}Psee2Tt5%?BM} zHT){FCkfEuo{~@to`&wQM&vW}_SwJS31|hRy|*EB2GZC|8hek!4YqeP0_|oWcD9ea z?z<+kKRxsYYhBBk@&$GNL%g4ckP%ZUaX}zQGp0 z;rl__*+F!1@U+Oc*vhvjL=N2p_XGXdAg2#<9w<9LVqS(ohF}6giO_W{R97-IMRaw<4#eh@3`G zrW&a zLm$~X!7@?zgQsq&Gxd_MvCVa-$J z&kThDUFEL}EucHR43s55I?cZxK85e$qA0e_RDrvJaug^F*iwOKpbHF!`EXKH!Ky(1 z3zB}pv!V)hfW4v$li$KsfIQt-1hB39-WT;R>XNlfRiq|RhebvM8Fisk|GSClx4 zVnaobz%QbTMF3kVRshg>vD(lK2w!XnOa*jYjQES~0_wlmIZ<>Cs(3me-r}X91~h@; zun3Z2JCIHZ4frcj4cYw-Ogbl_Z{$rJy170qnEn1~@GK)>x_BoU1}-QTOAo z^kbqPSPS34AEL@+hO*EY&{rAsQHDH}Sq)o6mHih`KV=D57Co2k1TVrkmXP)C)p$I8XvOHr&#sw%|&a3xqEs;U6~AGr(Wi>ijbReJ!c18t*P zTfoMvjR0)5+UxKh?0~P}IQ%ZEx(k`1Fgyg`|k2Mrk2VK{B1+d#X*F@E&&g)Ktjc{F5 zJ>q|S8SEETpS;yC3h3bp>hOsNVUwr^glRzj8vG^d$wu%Rd?u=4I;al)U=^GY^;8ac z3dX|$QH>~TBkG`W1t1@d--ZtWoisi#stIW}Axx78pf0q9mtY3qx5*Cp9)1_~bZW>C z72zr90wdrRcn>~<<8VV%(~M9Qs>8F;8z#W(@Bw@U=S4l^LJoKU>Oxz131-0Cumiq_ z-$gY`4f&xWJSFPcqEKB_^Ym~ZVC&7BLJt@Vi(vzxhvw+NMFg@zDX0ytVE{~p<**eF z!6i{GQ$QXl4-KFr41>9l47=edToctQJ=_OXp(*r$v9K66zyUbJmUslRK`E#WtziI6 zh2^jn4#6c+&!vDoP#zjUM;HckMYW+$+Z2aaf&RG7=Wtq7Tl(U*S)nA*w%fLX{xAjJ zgpc4`_*GQ9B)AtIgvSBBw?psku+4V#A?@(n4m*B62awkD#P|Gr@EIHz)t)rk4+Q$r z4o?E%JJ8>=7O6UX2!!o$QB=n$YO;1PHRdcruMop-0N=}w(>r_Op%XFaI19@JTnC!hlig;_wHJw68F z>}dgM_j~~A0(H}qy6yQ2P=`IK!=A?94N<+Q-(L3v_1miz^oJ?%27CzLz(rBLQ$v2J z2(-Q4U0?#d4j;f*a8^_w((aQLibHjvJbfrnAIj6`Rag(7!)Z}{@!OYt_Qh{s{Puko zdIRzHCEmU};5eY8es@7(co?3B?l1<3v)@PXE&M8~e-h+^2cRxI2mN6ZP^SKrsXuw` ze_qr87byP#{0_kHfVS`w%z(Fn^aqgsfE%J-q>f&sj$S1F7aId*dXe;BTnNPZB6@le zJq;wzf%gD99rzfug1$id1Ig>awQyS0OBQ5>2LV4XanE~+{(De9psyZ8KR>7)^nr=6 z6uAEnqJJ37J!SC2@C3XIU&94aL%2r{DFpN_Ll|ESp?wbFJ}_iGd=8{L1YHhI3FL8T zQ6Qb6V_`mU4;p$@)UbPj{11B`(A}^%VIzD7*G0YD61oBR#g`YsIw0)Jr$i0ckOfLW z4QK}3Ux$;%@Wp`N;d@1mP>>c1LS=Xw$m@s&Kwd|X*O3pwlh6S+!2waD$kQnFIqEl2 zqtV-F@;Ca7s4<+!Oabyb<|Fu4)L3*k7Tt{{U*jHy^`gd;r|}y^O^5>ZHZdj>)}6O(_7xF_pTXIt$(qHLWnL0P-{)|I@LV>0FzE9nGMOGb_NyqGsg= z^e}6>sM(AaW_O1%fSs`3s^*}FIVVKTCBC`DHLp0J=lRb7Wq743ppyl}vEWrv3-5s* zq823q*I%74YVnJ(4}K8!S|RvG)RJs)AGClkM7{nL5bsjrUAjlq8|3ee5r7_+q5EaD zk!8C9+hQG4y_p)YjW2D}dM0e12h;Z{@! z{H&n7Z`TH{t*iv-dDU!K23z1Dp!3x+KxeFdD%L&~YoBU$Hy90z0NYu;A5MvSM?)6C z?>qGY_vE_J7G47Kll(U90P>RjyQp{D0Qz223#gy>>cACI@4o?SfqcJz9BznOn-TIu zMR*Fjzz87TwQs>Tz{b{6wskI0H|xp*d02-&*S!qH&3dR>KNx6N8<012g^@55mIC?O z_&i(_wP^xS#t&?GN7Uv=0Nehsy{IkdWeYaG1smV;CQ$!dz5&wzCT@r3u(un`hGf_ZUy9nt^?h95$Mt=qfH?Qx1$lw;?tcV`Yrk>-Nz~^a6a~uoIXe2B zcs@TV>Og9s9u7PUBY^T8!2Z6-3H<>1%Lq{3uWXVL+#cX`_eH&-c&3W>H6IA4iDiNL3gHpNe9wQ~f|* zfB0F{k52*p+K;=Hq;raVoazLBiaLEa!W#+Jb0)5fmEZ*+e;20#b#alhU;ItfFU8>_ zI05AGSN#6k7&^cXQI}Febs+qu*{}f)i~8+Bm?i3R9w0xLY0sA@!8`DSsNYiqb@BT^ zSO(`sUC9T1;jE}Xo(A&pXFb6F{v_?IBZ0bQO;TN3C+aWi;jf;eu9tvqqHffNX0TYa znD^IJf!XjqoQE5tRca^=&0rnu7Oib~7S@Zl(nB{m2xs8B=tvsK2_=9yBgDzGvYKBZ z(GlXcd3l%J3UIfV!zS1(+KIqZ@I3T~rSLxN0^(;aP`kwIHipTt6L5Dg32*m>Oi&0O z2jcg7!U*^dNSFU?*U`J6EVPH$;eu%MTJu<2plnHvpdD-h%AcYdj1ip@KPd^9vMX#5 zo$6kw0QG>lQjwoj9|7s4eiW7fc~AX0{46>Rc}r6O&_^2blQtJT1P!4JjDlsb3-F(g za;78Rbi?5d_z(_>z6*WdbszMGRq%!A^u=H-%!TDZ{?b#H^!QJI2Cj*|I|bYi^`SGo zBRWG2n!{^=4l~w-mjRt-LRXoxKuKr{gvm4=Ho|A3GiL*2}`a>}O1>ekLF?d#WlET?2(#rlTiCj{vUZ`TuB z%$pWHiSp*bBuODDC6%O>G?G@*$z76OGD;@NELkM0WRrU&yX26Zl1p;Sy^=@rNeN+#*{WBu z;(80m5_$#4l6p4BQhGSY`*k;trFAoo59miZmeHj-mesji^(tOWr{Y*#{mHQeU3ahI zCDne8rPQX@9iMNj*0k>2rIT9Ordx|vYI4Uetvaf){&Bc}9MrjIr*5iWmu{^)t8QKS zqB?dbZ>mlA=R3Di&AWFmR$MjYSVGn2SW;EtSW1=Qc)u#bv9!v=@d1^EV;PmEd(W2L zRjhl@uH99n2eI)lInQ`T>= zGWk+9q>R4w2vP=LGE0Pa`x5&+D!m_T8vn>%5plEYjO|vG%Ko>s{xQivrtps`Ii|#R z@5XjBW4HHUv$?R>yx3|%VNZ)V*kL1%`=zX(P9vNO;u8Fs5tB2rXJNJ(VvyEcCE z&&Su)t6eW&#r+jmR-9hGO!;i(b=llyV`XDyc9vOMX5<6A9_aW$hEj)0?J2dQ)WA~Z zA$zHTC9lIFcpoN1d#D8IOARcs_kj#0R+Ly!{8Y}RIZNcsl;d*trgz8EH@K^Gx@+l9 zrTa47*avo{E0A_{nwe?Zq)C_hLh3y!cBUwuG%$80);wCmlSsKp3hSV?)aqqb)z|cJ z-A!FryVXopKsmg`ll!%wbH;w+xQq(edoxEaSl`F{k&Ez-@?yHUE}={EuG#x}Yw`oSjCpUheo&X!59tcL z!Mu{Ltnc+^db7OQriRp2&0ed{th2~HI-AZWIlMYvJ<4!1y=?UCGi0XBVzeqFv9a>hyHRIZNEp zj0kR~@+fzOQ*v6)$XPkZc;R;}7{$uCf#Qmt2<{M(Ii`OGT8e z9OWucMZFpx_i*k>+xUp=kgtgUkerlsUPdpYn$#XLAFEgBrlpY-v{qZ| ztk0~EtYqs$>mBPo>l5o;YmfD@^{KVS`oLOmZL~I7Ypq?@25X=7zO~odZEd!;SX-^_ z)(&f@war`Xz2+_PUiX%IZ+OeRH$Cq1)cPcADz!e{S|t&0h&TE+9p{nM`nY~-95ln&#q=Sop zbmwiwi`~iYUOz;6yN*-Y>BbwWUv)>i+iwpMO&7f@nkJeynmU>)nm&3r8qoGm`(qJ% zx4n;VVUE-hIqTWHGuiVZp5;Zon3v?G@KSoIy)?YdJr!^DzRSDrRrhXqcY7JU^jJeC zFSC~gd#K^n^d9wUdGANX%NkW)Ht!xUyO$%Xy_{Yy?_RHfm(R=X<@X9kEw7MQ*t;(p z@rrmwy?=Sdyy9L7ucTMTE9I5;9`Nq>9`wq3<-GFVLtX{1qF33g$QtIrq;drQP8dk4L5yuIE&Z@2e}_nG&p_l38|+wbji7C39Y zW8MkxxOXU;(mU;K^3Hgld*6EBd5694y(8Wa-jCi--cj#o?-%ciciFq@UGjeO{_uYF ze)rCL=e+aY1@EGF&HIabKFe4$HFxB!w3#aMu+-uH+DKYS8+k$c$QYR{Q~ZAEHU1az zb=v(-*-zU)>Gvj=c>pA;(x|(5ll46+yUM5X^G53uijj+Is2ZzhRcqB(4N=2cj2x*Z zt0`)#nyzN3SJVRamU>sMQSYhu)q3@T+N`##UFtKnSM5`us{`tgI;@VUU(|2vhIVw4 zPQkPz6+Lp1u=lN|tLu8YseVSc)tz-;J&1mN0{!_6{ia^7-_omevVK=@*4y-FdY}GA zAEH&C)Ti|sdVtHO^;?z|u~J%Tth81}tFTqXDrJ?m>RQiOEv%MSE332B#p-IkV0E*) zTZ643)=+DB;*K-Xnrtnw7Fvs}SFI)1GV4ui6@A)zdMwj-eL#P;jb7^u>nrP!^__LZ z`oa3yI&Gbc=!hMe8(9;1&wj>kW}mG5_96Q_`+NI{{iFSpebkxnyyqNr zzIA?ZjylJkv(b-Y>0)=sio{;#o#Z28V|Xw5^4NQ^wXt=vqp{<$6S0%A)3GzLv$1or z3rQ+TCuK^?oRlT$9)3kCY{lG1YPieXH{Ip#TkZ3;0)azAl*yPvvy+|S&-?mlj&El}FvDDy!aVpcuoH|}GMZyZ5@qTvjZ?o_MNMcRRVA-7aod_XW3`+uiNq_H=u>z1==;U$>vz-yPt- z=niyWatFDC-68H!cbNOKJKP;XZ##&zFCWv)YhkHc(A+hHa{`-Nb&H(a$4{O&(TJ5_D(|Gy6tKDd+|L&+iKCk$XZ1|t%E&tuo@GX5_BRi;} zDq$UU4{K>mSWk#9!G8|UaAQd_f-JX&UBmE&X%mN`M@ zVx5y^9u_*CUnhFic~xG)R+sSWH%nQWU5L%TDT|!9owvn|wMWWgW5MzoHat#V$BrlR zTe{mBgTG-cT9#qcpU86VqkH5ne|ERRpF_Rv&kk2ILVa4^@n;{&%wR{$8h>uJh53Da z7OYtNV@?1O=LU113^qFpOxf<^7|fHI={gylj0WS6)#}RXs%2JjnB=fN7-Q6@q!gI- z789-#M||POT*-3h6|P++q(!g6niBO;8JRZi&+~#*%?ydY$f@MK;EZw>`F)a^y*y|? z=2UdLIwPHh^fauuhxr^N577hFvFrMMLw#znf&C;k*w}7qx3b&VZSB_fb9OuXdAp3VushpbZW%vywO_Ei+1>3Pc2B#P-P`VC_qF@k{p|tvK>H4*)Q9} z?Gg4!dz3xee$gIdkG03y<0((tzn6y|ucpN8IrcnBvR|=ZkyQ3Vdy%BJ7u#>(f0eyT zGBUE*$osJvxk+X-a+9o#-F}pN>{E`#4+KP=n3QqOI~Sy!bJe-Zy4dX{{-^OvO@AC1 z9|g{_=bAL_d8EJCe$8HLzhN)2Uk^t#XY8|#YR-q_noIU?_GKsPk8l34|747FEga?W z6HJcf#7u6;iK)TIXdi-x8&k?>&_dG9+~vJ4g`K=kJ}19Zz$xeyatb^5IsbBsI7OXe zPI0G%Q_?Br-0zfj9&pMyWu0=)gHCzpA*TYhTiJQo@7HV4&T2WeX=invdd}lcedkH% z38w)zXe`!H8u>m}Br>s{jcxjSAM7z9&0-eHPEW#({o91wG+&xzPhnhVT;nlm<|kIX z-@WO^s-)JVHG5>CFVt}wS`ndHQZyPh*?LMwihpHrXH8o9z$nE%rzDR(qSh z-QK~>#*DoGjb$?S&f(^CbGf-0gXeMcy7}DvZc+C>_g`)iw}4yFE#wwPk1^|e>wE0h zv`gi;%Lr}cRk3Lm$@DR6><#?1%lG#8au=h1eu0x#7?oU%@iS6Krr&!cnuV2$quwrW zCF=xDxzm+kokEzC>|y@1fU$c!w}D%M^iyzeDP(7`BausyA0vAr??)Cy#zzK3+C}QK zewmXM%*)ncR-;yOZyIQ|wrW_ZSX(-X_7=0E*`$&pqs9-_ObSSD}<{#$Nh%Z z6l;)hGb?)RU5aC7V2S&H_U{8WW0Betr7v$tkJ-p>${N2Vu5EZm3pN|c@bH&d`vzZW3|CG+fI&u{Cu3cTgG_7m%=NX)F!=)D?AZl&Ixy|mgv>E(37hraXGv)R%5l=?6&(-=-xvQ z!W|nn=Y(tgzj2BYOEc9>-(tbKKK?QhM>QjbzPTf74e>j($vY)dtoo{R@sBPL`ZiY% zu$q6seE2P~AQt1kffP6JxqO@J64G0uj@;%p8Q(k!66An&G^GaNZxGKCuyjqzo#P+w zJ4$tirM`QFl{7x=3n-3CP4rzYRY_Hnk$kTN{d_mg2#}t@j0060-$e;`1orrpV&982 zl&T6bON+;mfQX9i^40<37!;3rjrjTu)r%o5?%-(B?|^aE~K5dmL4C zYss(M=$9ov)^D%J>2cD5b-C62T6MC1kN2Rg)th80J>@o;&3)!$S-@T9GkKLYy2G-R zS;0A3!@kfp*`}{s4P?93&}yRYv6@=FRBrBu!_`A(R-kH|S%Ip@J@K;YXkCpsYEtA0 zJCmAcXR)*BR&K%fFB0^zS3N2$|D_rtC?qsd;9-N~SXRIVo#dw;Cmf)L8YZYN}qt zih8IE>QA-F`rZ0NeN2YxtKD{1yNu4nn6s%aZBMW#=titn&DM?W74{0<%E{tn(XE|z z&U*cvv(ee8+d3aOn{_*9yR%)lcXm3T=nl>v=L_A%x#8TX1~QG&Fbl0r-#arJG*Vgq_P3zwP=Xxq-;Rl{NmA8h!|6?h)aY zWd92HL_f@PGo^(oC$B=||hrQm_YKN2*FWo;t}uY$@2kEy#HOihn$vaN)ThT+Hz_@kCs9 zo)Qw{b|gy<%a^hXT`reJ?2C?P+F)RNQnQIJSPobULj2XwHZY8%g^PSx6 zMWk{g&Y#TGk2!~&{q*>ooMgtj3s`v?>kMJVwX@UOX~Mj^8e`HD%+a$u=^3Aiec3+4 z3dGl}LvOX$*(;fm&9x`9-Z7Ask@oB~HDp($ie1JoV&~xwnZ|Y^*CM|}PDBnz4n%fE zHnS_WJhCVxy;GI?B@!`{9xk{*x2_S10_hPWWG)@K3Mn`(K^#zdGT6Rl@(Ogn!za z?~hq<;v6q8>r`$|`U&(yh(OvDN*hBdIh0n1QV`Z?D<0P9E0BV)MrVOr5Z34|?q)O> zNI_Vmy}&I9Ycv?R1!0XA<8DTeffR%_x(wWcutuM8H>1%&3c?z#25v!EquIbM2y3(( zcQg78q#&%(ao`q&4fKqr5@C%L4{M}&SR=*58Yv#uNbz*hZ5TGtvvCW;26{GbLD)dg z_zJ@s8#VbeZb8_MVY-2y&D9{?K+mKQhBX!&k0sEvxf-M!=$V{^=^EROhY0j+t_I}| z^laRMbOSw8iZHCPYwbQar4Y;(9hx5H`>= zr47RddNyuB*g((5EeIRv8U2J|13eqJAZ(y#;}(P^4}Kk?(J*YFXX6%x4fJf>g0O*} zxfX^E^laRMuz{Y9TM#zTGqHtX13eqJAZ(y#;}#D~s);-sDXwQD#r15YcvvIF^=zad zY@lay9EJ__Y}|sdfu4<95H`>=WemdxdNyuB*g((5EeIRv8Lfn213eqJAZ(y#;}(P^ z|9%~z$1rT5XX6%x4fJf>g0O*}aS6i)dNyuB*g((5EgqJTiFAz=*Rzr0dNxu#tdZh+ zHc}8a&@-uqVFNuIw;*hwXX6%x4fISt!?1y#jav{l(6ez1!UlS#WMSAq&&DkX8|c}% z1z`g{ql++Xpl9P2gbnm;+=8%_-LE6G7lsY=Y}|sdfu4=qDrRL%)LfqP8p%^TJyd(u zoRzEEJPTGv72$cFEIdcy@Qmp%a)Mdj0of&+S?e7}cC*7nyv+5>15md)@ z8TBRm$JHDyzqIjl@O_oWaYFnSSc8-U(x6EQO4?f^>)31k@`|S zUr%M6K1lb{9T}xJ(sguI#^}X(LOPpH$73_sSrI2C9aXVP)_aeL^4B2bf80)@yjuZ;_s<$LnFtBD%8j*pxX$4W7j=#S9{+ z&cJhnT3ul+?x;G*vxVEq;cA{Fo6nxiXjWc(5qC?~h!wA@tYDPliL*Q^n@Z0LRs=ik zBi-CQ*x(hH{}^JfT}BVoJ(#I9*9~=T<|$=(8Zi&Elr-8=*LYU(1kWrUV8>@O&owSr zi+HAeyc(wZsjjLG`LC~Ps0yS}Naa)+NP+iBvJ!Tb9ihFlT{h7YR2?C%Wp zQ@__wJ;|g_PRv~9ihtgeIB!gxlN0CFiSsJXDf||5+CQ%*y!pP0b6)I#o)8~#$;1_x zOl)z<#21%LjB&}tX(W2WPzu7*6NYX?-rZdZ#?aXoJI`eokZ{00%pfTQO5~Jnc z`C0C#THtoS)^#z@I^X^oW{Iq24F<9GX{|z76@9%*4$Z-*^8FYpU}n!KfvyVVx8 zm=Q=vdfYN9C;Iw@r_#)S7A<0Be-LZQEg4>K)QJ?;$Mqjy(!g+)&UxNx?{siFI-OWG?!wb|-JBl&6SsYN)^ULIBF{Pwat5=i zJk%NP40B$l1clknt->1YlZ>QBusS=Rr-MG@*`y=v#oCMyYgv!;bW&UPVJETk`Xx`~ z9JQ|f+uWO{*W$Xlx!+*+$czO>qGn|E6O!UTPygY%J|0}xN5ZzwYsqPW`TTxCtIS*> zCiL#dhwyzM9-!4Bmd{d zdnyBanyYydXg<5gqv@5*4$K?u^>py1rJ?j%C@l`9S3~LbP+Ag7i$ZB(D7_L&^FwJ~ zD9sHe_AC=Q;QtpB(t=Rp>AZy7%ut#UO8iHB;_9?eni@(|LdpDELA;ccLTO?sO$a5P z&P@0l7fNG8iQgeeTpb-sqe5w9D2)iE;i2?$C=Cmxp`kP+lm>^=pip`#lm>p3 z%YaboA4>f~sc$It2_^osArZD$DD@1b9--7dl)8n|3!&6Cl=%IGM0`9Ch7`6eYYUd; zw>-C#f5lF;2+OnwYbotUc&>r=cxx#A_5bUwf_}j5=(mbvq4ZNI{TNC=gwoHUbTpKX zgc47$CF1)oln#Z`x1n?}l)ee2uS4l@D18-5Uxv~bp>!aWJ`bh+p~Qb$Ci1d3l=v@> zgxj7_`ZSbwhtemZv@4W84yB!;v?G+ZhtjrC+8Rn9h0>N#`Y@C>htdb3v?-J}hSG*m zS|3X5LTPO%y&p>Nh0>Z(dN-7kL+PDRS{+I&L+R~MS`kW&0TP;A9!hT}q=){;{*US{ z=4QA3*V%3VXJ-D#ENqkJiM>RdT;}KNwl>Ll)U-)|9O#!r(Ne>(skcg%R7X;MnQCjQ zWvTk4s+Mw4%3dkUr;Mc-o?<|X`YAFcT~A8(_rfQ#2VOpUEV`Wk4Nd01Q@irtuPOLX zy)ryqo6XhEUPe0cJ>)(1RGyvB#NP0s$n40(NUz9!{@!jw_H?!0tOv3#GnVVMVS;4n=5A{ z5qyn{&NN=5z0x3B(ny{IK2uZ_0l{gQHa@7cFAO&!OukNwDD=3!slu00>?Af zd0Zx0=S-Z|S&k>IlemoIIcaiX{mF5m^Ydw*0kKy3?pP`L!$Qd))=5|_ zlYHf(p$WPDnk{ZJkK7&>qqXSK)W#6QH)+E2HoA43a2 zhUOfvSkK~e%4&u@-g?H5t*IYF+z#scKI`}{x7J-*T+Ui$IG(W{z|SPBH1bL7e%~jy zZk6+;^#8TmlR{Qu@|WEzL|hlGf*j9VriF~P@^L(C<>h$A%0rx|tlY@cty~<(TSYle zvWnn-*2;`L%gV{|l9hww1S>o4r>%_0W2{WbXRLdW$648sPgq$wp5(4xm8Yi4Ft-oe zpmi6%u3PDRjitrcSt|`LW3ALCgp~?;ypJEuS5ctvj^^eO$JA8sqh zNj#H^HuXl_PiyW&&Kb>}CpJcJ;5g3wx|ef8uj6t6W3uDf%brfGL*gs~(V>Cf?u?uW}H-52?+rah8=7mm|) zXO3gFX_XT+?U6jTMINL5R;m3~sh`LF-pE~avZ1Y5aNWcg*;0?#&KF$4>kN4tNSrlGa(XIK`SALw4sD`(mG-jvETnv zUn_+)Z7nISPHAG)v=ej`Bd$|AHS&2)tB9S|rUi`G3ZIW^S`k`H<>wMhI~S&;dqDcc&;3Mso%JAR$cP_{>pK@`i0|I zb<{)}eY+hicz3jNMazv{SG3lo^lCjv z{}=jVSJYbKJEcrp91s8{OKKIziE1Us3F>XGp61OTCSPyiGDa=u zI8M=KDIaZ?JboaSbDArgIG@4weX(L=aDUI7Nrz{2`F=vZ>z_~G*)OoBZSuO%uh|8} zcviiF%UCs!<9Icf<1{rN_v6%ToqwoV`hlb@YN{V{iXU>4A9At@!~X}E@Dn(WQ{!oi=$EzkBC#lA`pXI+D=r5G%H7=<~IZjYDaX+o9AdgWG zBmXbn-UG~vqHEW#)YVfx3`mZ%_Y5%PoHIzyNrL1Y3?LZ;3L*-K0xDUN3?c}Kk`*w5 z!@uXB}Sk|{c-d}?}{(YTK~(P2-kmX7I747-$jc*%1RujJz)eHq#QlHQC1jF z25AuoF`WThKq-m+`0ICq?kU&tZ3W6o?8WqTfjbLQx*OB0u$jnJ_V+szEC^3qu`II9 zQml7FEW!M1Vlk$Z#Ue~!|K~iyyEh#T&sFRjR~6QK3-gy@hqNVKTT}3MPFv#4#iYp6 z>}a+5M~m(M`0g&AisZvUY`b(B%WBE|5?@2j|r zlBNEWS0?C5d>>v1ojb0d*mNjSN=thze+_R{l%l`+aROH#mNk$3ovy*xQT~*gR+yhH znqZ214!<+veT?=<*W3;GbhO9FQwDFQbVfBJ+fn`o_}f(YiKk&Nd&HNhC;b?GL>B*V zJ-5kjGh%!}+B@tP`I`~@y#_y;(wPm@H!#u>r63OTGw_t7JdKT{z+u}wxHr<#HKvo0 z7Ah4V^Yn&B=jCB~6;>a){!yz;7Wf^!QXNdep5saJ_dF53f^&%64=~ap=YEdJ@2m7( zWF+?`Ed81rYnM#n63_1W4a;Oi0PK#6HK=TJ22f9oW%5Wa2nII!C6eN2WaI9ehPlZH1c(A zaF3#31rK78j#wRwmb+LNis@pqc6=R^z_+>KKY-ZFU=bC(pijNI^6CtQ1wS| zpU&v((h@B~bS7R z_bjHu!&?!)iho*I`!ZU?XM6W;8*e0jjYAvvEbpFu#2bM%e)ooB*(+%Ip5y&y+j_&W z>{k!Z0emHo=K#Kz$8!K*^$bFXEd(Su|lrprA% zUGN2eZ|wIaZ<%fF^~2m)Z>eqN^+h^gL_0mc2eU1`UidWzE&OkLi){-JPayAn%t=g> zW4^`oT+9hfPsbd?^i<4or0oUt3YcMO#0d{Aj;qikqp8;x^P|vXV47{>wZZ(e=s$qb zSul2sXP?2>E^pX|UTe&c#ONwhY<-OE;-TGgDf)HP^;%+n82TN&X6tw@Fh2yn5MH$p zd(AOF2raG?Y)#zJ6xuuKxiZjJ$8>MpB>%*AG4J( z|C7Vh2Q8-+G5@2(69Mh06)^2<%VXNd;yYoqua?D_D$4)C#o$*jwDXq1-+J28Sa!?d zY2knG=xNczmcsl^N6(Dzwj|c=W=q8MLB4jyeV}*@J+ZskVwiTeMKSGUi(uN(7RI!L zErjW#wjieMZ2@fC4x@75`Q@iMJeB+#4);_4y2HK1|BlA;_OIcSROMfdd4TDa82Uyp zdbd`u%Gx&D|2TqUga15wV@&QpMsB2W8*TQ#j@-!OHp(~upN`wuBRXp1RT{C8$7}p! zw8noPtC2@))C2!9Qloy6M{4}PGfpE$QTca00~%33gvcG}|N00nF$ZEB#vH^*8-E=W z=iiOF@f1dk#8{F4WgMJIe~cj+^Bs?e^S?6^M`WzY|D%y6$?L+i?Z1yT`3~OK|I;{= zng1MPl17+(_-|uO@(7bt|DTR9`N#N@2P0!k^2m~Zj4S!iqe}LOj4Ao#U&fRi9UW7W z#xVZ>KB8o!|1ydajiA&QBPo%^&@tq&w+t+k{9KMRo;6LPN z^fURH{VaY~KbxODGAic($(WcT@@9&6tmdb2kEFL$^sJ@(C4OsoAM~3>D;Lda)C~Ny zlUXBEy00cM=#MBcB1Qy^T15T!B6z!@`512nG(Y3zh9)s~AG84DO@S6jiZGT&94N|o-$Q9!2H+u9uPDx-Kevc{fm;I0>D)>(-UrZ9pbVDNNbod1czMj% zgjN8RF<%E-g~7MoLeP4^I}feK;5%IbUnPQ<2Bi_^0eYkg_-_%s>Cjq?w+Q-h6nf{a z9fg+B@rXvCZWJ1$qF$74(E3p(LK{SR9g5NFBP@W@_QY49F(U(L6Gmo$Hf7KkNYI$; zk#X0lOqw&2^0EaZDQztoITDI7-wAxOD>L5_i9%=pN-@su7)H;`{bG3I<^+<6+i z1EXvaawimH))R6!^if70f_7l!2`EO1A|#b>C&t4V6{zcs;H`mnVKAbCz`Jz>8v~{M z0@j0eXDscr2ZOQO1nLVSC@P0ujG{d2%_vHHA4c_sMp5H2-;YsmLi;o7UFc&BJR^Y` zjR^341nM{Kl8I=qjz^MDsfsCd!KgH;5&_Rr*G91ikDu*GAru-htXiC>GMn4T5 z&geIwBN&XDB~TL*K_7$CdVofg!nj!x^hGF*q5|~q(B~MBY-|+ct%5$!cnhIq6TrI- zozGY*JK6?Nzi71WM)2}L7cpK4x|s1+LYFXJJaj4Jt%fdRENx5n1njra<%~@SeV@U* zp%5z=?+TRm3#@@sx`B;_(td$G3nhC3HY=3M4|qR8*Mg6*oUYH08B6)Lj?vSh>lvF1 zx`FX7KsPcr6}pMB4?{OIcyktF3*%jde!|#o&`%jlf8Wa3?NIs~@Mb~j_ve6Y=8GtF zj$cO6(66GzK);Ss9l9e5+0{<48*~SIz&=2~_eY_14=|3-@gO6ML&=_w;5w}jJ<6ai zS)evE0^V~3>Ng{JKR{0~vM%%_IE`(o+|Dp)a}oF+DuVH$WS2mXfF?6&8xkUgk-tOF zGiW~&;sS#fC4uirBA|^)2r2`hvOzB~Dh$2MD7qJttz5%8bRB=ksD9AvAPvi@+-@>x z5fvE0EP|o?-z`R4D4i#4N9TtA%%DA5pq?lKTBwD%!+1YJ?=oo77UCE18@8PZrE>%N zP3S!ay*3c|ol%XT4;b|9h`KrWjq{a~L5mxzX&8EvOV~sN>`x-U2&%i19;32AaSjAM zD7n4S=GI3A$$^YD)yP*HSE(2z39Z=O3Y- zg2Gk^x|gFBAc7hM&BLI+I}(QkZ3aT&dY2&vwc(LCCK7g}W?(*nL0!3!iHt%y%DfC} ze}v4(s9Dhb4BZQ55`#8yAqy~cZ<7TX^&YekL)WJ)%&7IyA`D%xvM7VrdjvLT=-Q=g zl|b9RkR=$pj%7(kZHJa(=$b~GO9ZtRT843S9%UJ|7g{a~m3eta(Y6($JOr)CppSx( zl^E9&S~&{keHBJsfmV$|(_8bO;e=$C?E z+)-$|77Ti=ARu=X+D9u!lO45=G6MPtqv`lJ5o1$~S`e=H#>t;A|T=>vKWl+r}30hA7)--Zs1@*(so zMpN1bMIk#M%xFs6kSHHPhcfzI=&&ezpu-vTeiM?)i8uvDGU#(B&?3y7hCUPJ7W7%h zNa%A>et?c*(EASoexv*drMv*>8;HQY3`Ua^lJX8HvTL#lK)wR%n zt`PKP#-Z%xD-6BYqwPBa`4Dmfqnbh|GV~5EUu7ci_meOWTu$g~j3Qg3^Z~@xLl9hs z-YFxt1^aUGm?wJzE;p2H2%zsDg5feAT_aT9K#)yNV@zQv-HU-*44uKy`-Pmzh;N{? zqGX58X3&otL2(&%9{M)pT0zMsfGY|mJ0L0mDtn+~q4T1UUCfVC54wQSxu6RfR{*+* zagRVLtpt_%5=J+JE@fO1=&~r3-uDj3pFfIn68bTtFG1HuxeHy-=zGu&j0-_GGA;qS32a8* z)`4zeyc*C?7_T<;Q^s|KZe?6!=r+bZ3Ed7p$9^dPzhK-D=vNH+g6LBdLG^;}U|c@v z&L}ib_5su}=q|?5x$llr3%ZBVS)h9vksi8_Q5w3RL2EvOyE5uC=)ovWp@$fq2&MH1 z%F81R`lKRQYZS^?N)ymzU&o_R9+T|?^k+rD)+m(U-!hu)?o<@Y`_l~i!Xj`hqh5xd zWl#etVGx9P(Vn zGp;!_#8BM>`GFDMhbAztIFz;_I)c26D*=W55e~MbC@%onh@$fc8fB{rFzA0FRKX~e ze}x#T6HC=prSMZP5YyC0D3o6B^gcoF2&G$qAJa3I!+me z?g6SSqpv~BMZvYI$}^hmq5`9zgI0{P16m1G#<_QelDz_|XH``hsvlIxGmg%wHsiWO>oB4ov@S#Ubw$5{qw}uMP(7Av5T!q~Aw#|> z`OFc%fi`C7eM(VUfu{Uy%Fw%vYR2ex(B=%?|H$tp^krzvC}ekZ{wOO)c}e>sD2-Ge zz_o?azS>856bc;WWd{ITbCge=Kxe$~4TF-M0W%Ux`3}qoXtyXkp_E6&Tc8KyC=Yr@ zc?a5yG3TMZ8AoN?C(06NU(gRM1eABgCP3u`%xvi6QT9Qfh(dYtWRz6sfG8KC0~tp) z{1h03d=t>YQK;O9Fgkz^WwZ~a^9P#l4Z|5t`AO#wG?gdi3vkF^^)%xskI4>zZV!Ey zaWqdh4K(Hb3yem-E3yNiJ3z;P7cozHGnUbm7cVicDs&v<$QE8<9NGSOMn4aoz-T)D zL`Khmz6vHG&6Jm9XFwl@zRu_q(8-Lx4V}X1-=I?&LwWcn<7z>v?18HdoyNF^(CLgL zTc%?J*AzOFah0I67#};O=QQESj^;3i%H-`RbD?BUz){)KZ{i?$H_BP)yeRje^BGMx zuz+z?c4TM3;o4J+qV$9=j?x>tlyRM*%NR#x_a381L6=8)1NuJW$WB&(mDoS+^8?1! zhpqyv0quJYgZ>AC>h1{#yP>)d!j^!3#MlbZj~Pd0x{h&AL)SA7wx~8Rmh5CBW2-|q zfz3jAC!t%I*y7Mn81EbCr%WuRXDbt14!VtrEeqWaJ`*CA()Ky{62Y|#Lcd~y0?@A+ zdm6ffv2>m8WMWH0>3F-b?K$WkCbm3uFB4k^x{rx11>MiY7KR>RVv9l#GO)*rZPbtC>6V!)Z zWrDiUYv4PqPsh0qZeX7BKaKGyzi%=D<>&X{4%VTvzRLtu*1v!USPr|=g0VP`mW;*m zwPMgyQ)sM2SR7Lu#v&b5Pfei5r=a^RfqtTb>aYm(_!RUWK%h6Lp!+g`{-A>D%Lw!$ z74+^)Se&2E%mi;jvogV3(A*6AH41unCoImN-fswtJfL?PdVfJ5S)q$D!AvO1Ls!D` zS`4(4&)>H452=I20rfu}Iv0y+!~$2=Wp1ViRg8HNO6Ly@ogbA4Ft|_a z4;j-2x)!X%@_|tL+Xl>&Eo=mvFi-innQ?oeTNp!W`2>6lTiOTR${0%5wkTDh+rekp z52f*Q@D=9im|rty2lNjvV16g~2I~O5i!oFtyBUM@={-@ZK`BkZP)b>mmDyw_o0gi?IFj$r9O~orv6{R&)Gj0?V zD@AAu^%$xPG#2>S_6jtHk?Elh1fU*>jZzbuj&ZODvSUJ4hSD~`je};0QUUr9BT7Lt zM!~tzwM~cy(9Dc`4w@xOacEXXybR42r35s46xwGFhT2k0PDWON=894pnmbB4XdZ^z zg-kp{ZO$gdP@T641KiKt7%1+WCO@bMP&S04^uSgk(76|k0^2r)Krv7Y&^eR@bdHo( zK)$!3G?fLgZ9{1yDF4ed^qy@f?})2_j!ozH9iZa@w+u?>Nu&WfHUV2R)uP;nR*!NQ zT7#iyg`u(}U`LU(BE7mL<{xIrH#4=P+=td-$i7Y8C=a0Z7=0c}`vo4^Ae}St$nF|~ zMws`ZjX@L4lWmbL0*`F18EAp|hoF>>R>-TdP_kn{@5lz%1K}1!DSf~r8)?hXv(B^w z?XmuD=%Wn1TbT}^Bi1K7?Zl}0(9Voo4(-CY_n=)Fx{ggZ#-r=0JL6t~_F&xm(4LGN z5ADTxanRn3n*i;@xE0X8jGGAU$GDZy{)~GS`WWLrfIiN+Nzf-4w+i|s<6eUfVBBix zK*qfeeTs2wpo17U8A`Se+=tL1QOE{|GMdVn@{~Y%nBj~^*VYKeO@WSN+*;_lovpfEz|iEHvpY8aEqXHp1?~7 zrE>$+rer96KsSX_nuwy{jVPC(Z!+!$=v$0i3Y`{((m$Q?GDBxXDG8k!lpk~q;68?~WE|xU9Rs*^&{d41d|J)8_0TnpqdfbN zaT}m(8F>^+`2su%rDG6P0PPoe*`c&Ag6?OuJ@7o}#wcVDn;6eRH%FoS+!jVrT0V)= z9QrBa<$!Kw+zjY8#?ijFgU@h|Goha|j*jz1l(Nt-quhjk#W*VSucQ0~CHn?~@^xpF z-=NMNRHAHn!`Nxucy8lx%ysq6v0-chvanD2P7{JjrP5T9IK9u$a9NFEkQHnu-i$dr9J0qz)?=hZ$-e=qb z=mW;VX1$UOr6YY!#;YtuJhma+0B9WJo`U9P+#qNj#wJ0L*MuzqZ3r4+9(EhwjIl+b zEf|Yq#-qFm3%iYP%~)Jt@$DE}4%!|(ighYMJ2AEjv?pV`LVGc`AG9~mNy(7L;rpfeQLZ2TcCr*(0C z#UsCiF3{slfa^H^1h|alJ)p2dBG?Os4HEd)QN+Ur3_^~f`ub472;_YzmQgt0 zP&!6D3WYro3g;V2&xj7t42(j#gdSoghw77=iK(WoA@&D4iz|PeQXYRNo!S z#)tvX>JX_Sle-bl!~^O?lOrp?-X! zCJfc*g_<(5EwmYCirm9t`cv$QjUnj2;5*&&Zk3#~3{n`Zz;< zi$YH@dMxxwM$U&0VDwASfeiII3Q_t1)merpO@R6zg(w|>>MuidY#_Hl>3D$Z-a^9| z`3ZD5Lv?VW5sds4I+CF}xe%2TkXxZtHh}8sLeDaC8}vDb>g__K80u#edY;iopf50T z7j!g3^-!TP4E41My~yZqp_CUu?t{|#1APig=M3b2D4i$J$Dw2&KpudSO#rIL3XvTE zc@Rov52!vX^eQ6{Lnkph8TuL{Db24lIt4nJp?)->DUALOI+c+>LEm7g9xC)EBY%dz z#ZbLeXc{ALL&=5!)lr4WZh%@1C0hYh_Z*tVXgcm}Mqh`{VWlu*%-N48P(2a~34Bf;~yK-nVBk0=N!caSNh^{3daE*mN zWvIP5w3QLVpxYQV3c8&U!=axs>Urqrj2Hp^f}!^6(3gxD3H^$pcI(jBjCdNlgHdCk zI~j2d`VB+vg(153fH)4_%~1PcXb&ShK)JhdW50A#i65&E(4|X0qSQQqBH?r7J7oAe#fDcj4lWLmXY0|rx;xxdYX|v zpl29e0eY5^J)!3qYF`Q^GqM*ng`qa5(0NAEwR3^dm7y0IxgDCyP`gCv5+kV$FEi9e z5xT-iD$lD7wO53$G1P}KME4FrZ5JWBo`Iw?y1`HzMktMuR7N)$YR?FL&qyl&TMV^t zgnnSCe_!ZFM$`U&VkDjS&y1#h-)1D`!5v1^aqcqI2Qc&tLv{b5Ul~Vb`Wxf=LVsr* zmFYdk^@HAL9F^$<#`PC;8H%uv`(8UJ?hD}zSdb6;5Xg$;aZ>`^0qp(A!vrYrQ@~)?XmtQXa`23+`_P9LSBJ( zVI;~e+>Mbaw{UlWbE0~SFzxFp%x8uUVyHefJeYB?&F~P$yZ{}_I7%Dt1q9Xig@-fl z40HrT^;_YQj5`aZya3b=8h(ax$xz&P2&%^kKgT$fV|Wxp^&jEq83$Vqj{)PcKAqD9 zFhvM~atTibGqHRUbQYM0<)1+3gJoEL6#5=mjrly#HQ-~+BQL`1!3NAf3Ec=bW80$8 zEsQ`pg=t?vRE2)Z1eC6=jKj4R-o^x@pxc>%@`uh11W0!n*HReQ2i0GOVfW!3n4baN z32=`SFF|*K-I%BHrE&rS+l2Qr!D#3{CU_CLp9yH&1K<$$xfFVsap$1Nz;P^J4m|-* zVV{&{Dl-s_fl@hvfb5gD0l_$EGQc%Nb!=fO1K{vGOxxo=LUoGaOW-o*agB$s0NfKK z2w!I;UJN=cR_M-4_47FK> ze`ly2D146zCPMEs!3#np2oQ_9g&(000c;n9pqUu&BWPyETMNwsa4mbVm4s|e5D(4H z1lVCh4v-V;pbQgo0i?@AnIz-^*tduBO9(UGK`3pLh%{JeUdCGw&Bu5s_XL~+5m364 z81F2!0OKLA5(+W_&LN>BV{buAfzsGNrJ)Qchcq05mIoCv4_ip6#02S~l|dCOM;Rtm zWdb_yYK-?Lv^wL>hSp#L%BPx4KzUP(2`FD5W`YdR+Ds4!t;2X5p>-LH>ms2Z=Snv|or{j!cEbSZDQo<`(j$Gru+i79rO*xc7(pk*xt~$7&`z;c?}$$>vYC;fl_(E7HoIu9LDy6z75{N z^8QdNKMC{-D;X~@^aI8#2weqM!)8iDKV-ZD(6x+L9QqOX80!>)u4AY!Hi7IIc=@55 z7>};w&5V}-{elAM8LuLA8{<`ho?-&p-)Sb;06hcF3gJHiy}|@^obMTo zo34?&T?gv|g&J`#qOBkvN6BK-;qJ0N^YR~bgYUK4TuA#5fn?1r!hp)DD^1KJ9- z#_~PTM?f3QBVQBSf{vK~4B82F#r#faH^w5b61#)Zu;Paa1B_4Qd5H0;ypAwFl_%N9Ijr9odJ&{z zzBiQe0{G3L*BBr7rMxI7!lKOcqMQhu4~p|AsP9Q$lrdpZ{&_PpHVj4i5#B9mF2?4C z(!PL=hr$*J3%kjSYnbp-pbZ)CXDH5}@NPnp*1UZ%e;tav%!_>V--XU(`~}cij6WZ* z;+pYWK@H=>Uh?5Q2){KHl1f_F#P2OMyO&-w29)Bm62t6wJ){rJ&gvzcjQRper-Y9(E{gLbeBAGf z;<_nH>(qkgVf@FT$VbBOFGR6XjKzJR*l0%LIK>|@7y(n1$iaAXgjhO{37!#R8P*}( z2qBi^9!vzWLL>{uuLo5C%fw75&NI0v=C47E0pyd)2Ss@hY9q8MqqaeD4utv|ihBy7 zc0pS(3iqmHTyM#pu^joEjBA6?ZK1ek2#xD58RtyskKidSWf$%!FaHjWICTkSbhge+XEXwmw=_1N1i1w1MguT_l@M`UwC<1KXY|jnBSea2f{Rex>xlr{q#(bD*!s|PAU&32A1N7_*apyy05%+4 zg=PoH*Wfxd5#+^u8Zd=D+cVC+|sQWR8#o#Wh7DgoT9e4JNG6;K`XOQAJDJIsFt zMcJfu#5~S91!a>0`|&Bwy%<{_`UT_dfF1%^*AGFFCq!&RDDvh!{vM;D$mjDXFnz2x82u|0`AO*UP@f5YfFfTBALW)t`vT%? zDDsw&i=lAi_ZFh&0bw%>dADT*;bWBGyR9P#A2Efqu0k2c zC`tq+;{~x2QK!F9ZPas!kUbTn``%P{w9x?~eP_^_byl5Q=hKCBFazGggnhs5WOuM^)WzFBL zyW$VUACJEre>MK+_y?i4Ll44!I72vVI6ho3TrylSTr*rR+$`Ka+#%d4+$G#C+%No8 z`1$bo@Rab(@VxM{@api#;V;97!^gv?!Z*V|h3_ZCCKOC)o6tU?Tf*Rk$qDZytWH>$ zusLB{!r6o?3EwBmM4f08U1H|M?1_033nUgzERk3yv1($o#BPbB6JJT3m^eLgcH;Yq zA11C#+>&@8@lfKC#B+(MiI)?v9eFANxPHwCtXUqi9xSzfocVM z6zEf6e!i~A2Az~b;PLChDcWMlai#&do_y@HX#mm)&@U2Xqn2?byY6?JWl z6WvP>)6eR0u!PBakv^@z*T3lB%@{NPZW+GinLj;&;a%jz1B9CH`9c z?;*6tg;B#DekhzHoDeP)E)}j6t`%+=ZuzGrbPx9r4+@V7PY6#9&kD~EzZYH;ULXEC zd@Ot-d?x%u_;vzn+Y(AfEMZW>u!N}z^AkQy*qHE1!l{TQz*kN5B9@RXF$7B}@~0)V zOzfWc5-eeA;*7-QiE9!+O57Z^gtLhkqn6;o5;FeH5_;#Io=?LPlK!-WeXxY?`Pah| z=ED-!Caq7}oU}dZ%cNa@T7vg4mM{jE@N?7>GW}aiP|4|%izPQnZkgN#mheRKi^&s` z-%OsFyf}Gh^6BIsVF@lJT}sB3>?w&6ODLLBDWyhA`;<-*OE{DQQ#il#{K+(#=F-wd zEFnIv04$+l+7MX6gtVz?bJFIfElWF`b|URu)DrLn`qzK-9*eupSbfSL?Z4nZqj%xg z6Mhdwh-+i<+@*i_aBu(nzXxC6{`LBR>picxy>ecNE4T+-eh>sqIp7r}j_jliD+7V(NgD#TV|KPe^_2{N(empMUNAq|_%b;$7+Dx#SfW ze@n@qTK?j&iyLUGi-+kw;o^5fT*7@DdwJo~Tcm&eclFTKwA3T1_wd)dS1Zw9&RV(uWsr^#>T+D+bWCSr69$dJ6 zVa0_HE=;+ArwEo{Zx>5mIC5eCg*`|?;R}V%Z#X|UWo~jN`s*M6Ql30_Fy+~l1}XJZ zg5=A|7nA2C$0z4HcjMfpt=@O@#pRRlI!qMl>R6IVm*S9@Jc0W_F zUc&T*vx#`}@dYLn zn3(@iev{O+aK8eH1(FIp#A(SgFU!uXPh@>9d(P~+gvj%G&Ov!LLpSDWhUu$9Jd{6{ zqALpwTK>8E)};5~p<}%3VHh`iB}%;-`KJH)N1p8;|NO>&bE+-yd-@an@BJVA`*_6s z>dO7`cT(zI=dMJ##(!~8cOOqtL%#zR8z~?D+kcwIe*5>o#9m5=N171na)2tJVLH4& zK)0sbp6;i(ba6%E%EVQOt47P{UmVuKv=S@L)4w=8U*lSXcG2G*S^qkXYl^>vzy6D> z6;~fi8~(8@uI?Xmq;XXcb+JZV-MISyyxm{dimX}cpO^gW?>L-cTzv4zkw!$=S4FBEU#3;VIy%k<=yU&~A?V?eutwXHr zQy4k_G@@w#yRog$MaIrg!|3@K{Z}aQuxKROh;Cw{cwH*fsxF#{ z?xKh2DSCcDP)!xU4T5;QgtNtnK|E>&o}#3Pd>gKz?g{JwiPse^*B3La3>1 zh{DlKJ*FO4PpE!~AH4<96h1bO;Mtm6=h5*xhtBT(sB`L>dX}E8-_%!BBb|y^5t&6g zc~8WtETXAuE}E$pqPc1*TBuf{o$BOrsCJ^adQ$XJ14LgnQ1nwziT-Mkn5w3WH`EOA zrkW|eCz1WZMs1K;E;-K0t4ym;+ui7hP)gk$iI_X-YB^1F3UufB1`CWvZRiarF42(QOC$eIwTwGuxz3erWMdWNf+~slA^)qsj>mZlN<$9V)a)sqdeM#Qc z_gu&&xH_(`SHr9AI=aU0NjKCDbHg!$QdiZ><&*i;F_$E3X~ZT~S{zoN$)-9{eyr!o zb$Y&BuNTM-dZFB?7pa#;29-`cq%w#aDnU$E)5J72TV_<>%4RyRY_9XUuzOe?#`rs3 zR1Yy-%@M0yF=&h&ZA?7f00>vW3nsTk0e^ zPIr?p>+W)sUMx53CGt%@P`;~QbcqF4o=Im12f zp3sN&3HTbn8R2TV+ODB%=9;=nzHvRwP&L;LL}Zk+>Yi@x8o4HjNz&VOc8}^&>J3-V z^>n>leZ0#qv&GyqwuG(Vo^?;Vk+!O>?w$0$jVtOd(=@cy_Z4Fcgx*-`Ug+D zSKRw3oF-jp%7WLEQo zSb?uQJ}|$UCSDfP%**E9bRWC7+&VYSt#{Mi26@zzriz!{RP#Jj9TAyps5Pd!m&46) z8{JGd%WX1c%}qDk{3PDTHz6y{?{1FUEDM=C>UU9E#fvg3Bx>Pnp9yN5Y2miGx7{c1 z9rvl5>$aMfUQSunZIjj9b~!{hki*<(a=824z3aZvcl3{lF7y+;y`SAY_oaK@EphYR zSEiV`ERT7@EpT7k0=AGXY)jhGww$eGDlj<&> z#5Vy0(EooR`uabmMv6h|X)#Gn6*JV^VhQ@;FGZjGW$0hOP|XwXq2K&2^pW4G*2zrj zl+3J7%Pi`Qj91^ukh(6z>V`~EX);MGSwL%9P#alBKP1cQjIx~0B+Ki}vaK#6+v%dR zy)Gsn)y3u0y19Htw~(*s9&)_yDJSS&a-!}n-_lRXX?l>Ht_RB*dWf8x9Fwv6TM7+s^61a^>VpQzc0VhN8~PjRPNQ^%6(z1NyW)sL#m5 zI$0jkDe|oTPX1(!{MmT&wy|openp?v*HuZLUz<8WhY%qcGjh37hOhn)n#QjT~2n_bxif78{SGB3E( z=2dsbEOBRb9hYoAb1CMCyI`I)1JHM>m?$nvimJi$?qV=1cr_Sr-ZIn7bTh-u6fcQ! zVw2b`z6?eOF9t7}rDmCV&z%d#1Y?77=5zCf`O=*?tIZnomHAqJC$GyJh*0%>@N)1< zFu^376gf~1GPzAglRubc6^ zChx3wCdg=S+5Pr_cgQ>J9r2EN$GsCldhe2VImqI@>CFx@2iby$f(${Nx6zLG-u32t zGrXCIsXo=_rdSw3_8@DJBgh#v4{`;$gFLpTEpF@EhPIJyVw>7#_7U60wzVy6OIrr9 zjE2A~8;%%A&mcb1D8x>B0THBLM#QKwh{n_oae{i=0d6oN2z5uKp`M6C)CYdtV?i^o zgjdR|Ad+$Y!(|><+uf?zNxVt@bnfx&6X^X}_{x+nx3syUXsjn*-lH z7Q_S&F*|Y~T1QUA=g94M_j~!>Y(C>`*t_Mw>(BES`1Ac%{v&>CzpdZN@9cN=yZB}O zihg;&a)dH|1-~3>9EjR}U7H`isE^nmSrG#ygP&b(j7Fq8pxf$Bh=IlNzIs@Ssg~2W zvaJy(=~2W@>V#-YeG%(vAR<7GwJ*W1eHF2yUPm0MHxOfL8sbmQK$NIOh#<8T(WTx; zfem`$Upi&`9J%&{M*5gv2FZ2v9%FX zu6t~c7}N{Lq9zyKQ{>C*!FgB4*Wi#1awva7FA$U`L#7?nGwvw&oBeIQbE8EHT@=@7Ac9flD zXW2z|mEB}_*+ce}y<~6MNA{KdWPkaXd|W;upOgb|#Xcnm$-#06-uj2h;i9k{0l)HT zQA9o?pOw#vyK4V7sOHyq53f%YldHr&xmvD~ABz3t zBO>12NAhF2PW&p@%MFNsw@Gf6TjVG5Q$)ktCb!GaBd+-{kM|p1dy~z`)KciDyA6rQxl5 zsQ2+HB%CM>pAwZZDxHc$G`|e$A(c^OQkfCkFDv}n>?()K3Ewt1#IOD}( zaR8oW2;OdjN>q7OK9yf3AtGQwRY(<9MO0B$OchrpR7q7zl}4<S0w|)lqd-Jyl;dPz_Zh)fn%-WmOZ^6uxwG)dHSyIe5b5RcpL; zwoz?SZw+5Wbx<8uCqyOeA}Xk^cvJXITvXjv4|oN=RBzQs^@RuCUsQ~G<4>vqYM^>b z4N`;E5H%EU6qWEsaZDT$N8wA4P$SjT>KXMcT1Q5y=TR>{T8&XJM&1|4sh3epKK?IP z`s-@4nj$Kr73B?41@-4|scC9Dq9x80Rn;stTg_2#t9L{-HCKG6-W6NaJT+e}i11X$xk|@4&$u}aU|6-bzIDc z*L)HY9#0`c)fxCu=MZNxMV*JQb`jp%C3RU{LEOb_iegvYP-$o#`d-~aRLCFIPwHpH zW4xmf&q~x&zpCHV@9G{RMm`W}qCWX};wf=oE4)kLn>_7dT&)ID!sm;D#~C2dW;z|G z)1!@v;$US$OH&q|RWuYsbv7|r3=%_hcGQaGL|js?AqnZQPSA-uug<6Q>m*%37euR6 zVO>NQMTTqo^%_@*6oC*4_h z5yQnWF#<6&yXo$_2U^8?>E02)wV&<}-}!O@8-o~@WA#gDPk9+{bmI}7b0S*VCZToZb+oTcLHw^b zP%rcrB6m*LGotlGb3{x1HllgX)$i(gdcJ5So<^(OLbS##)=TtKy-dHSmm}im3cXT) zpjYYDs7D!vZv__UHR4(EjCf9esMqR`^v8M~VuNnb8}%l=S#Lq4&`|uRGA4QbWTkt|`jkE`9zomS zS$z)iM^i)_eO_M>ZS_T+iio6_^%Z?pU(??qKIsjerf;I<=oX@t{s^DqXVDH`{vG%i zzlh&ODxRpn>EHD|#4~+hgpp#RQKG%k#u(37ZXRN`4 zdfWZeM;~H_M(S5apoZmXw75Pix|rw8DD%8|f&6!I8D9NZ^OCq>#+jGRD`vczU?!SZ z%_Q@hdEHDlQ_NKJhI#WJZ_4EHn>nZncn7Vw@0xjLzFA-vnnh-@S;BA4X1RIatS~Dh zbpiCo{NbN90_)8N)B|iXo6Q#UiTTuQHQUT~^BKQi)0?%~VRlC940fA6X0O>Nu9^Mj zfH`OmnZxFYIckoXgJ-rRsUe-pm( zE%O7s`k&0t@a*rHyXF`8$G@51%{_A;EzrV~@a{GId(X2GAKyj1{B&L%`Sa}Sdzrl~ zURE!gm)*P8m*6GhEjXW--%G+>gjvYBlbo7HBs*&|*?E}L7d7aP!L#}glmb>b`WC0ZTg(SjDX2{zH@ zwfSs*#Az)cGKh>~g)J!Jsis7P#22=REh@H(ZDP52UxZP&H%&|z31W&!FK&wO#YXX# zSSEVdV(Q>4i&TKhb-c~@ZVg3)Ov&3vMM>5+)% z*)Hlsb&PmXT~M#n&32D^m%VKt+t>CZACu~O;9(7j_*jGNV0fxSBfjbgc&krG{MF~8 z9_#3cw>lPn*tm%AIv)PmM0jMAB0kw5~XycThkKSqq=^@xSM(QdMv(Z=&h#An@RxBty= z-SMaAx(8nEKJw@7LHN9f?Gby_9<#^o342of6p2WATKp_-qyG0S>VlIawZRwcMYJPb zvX>Fb{3_awzP8uwcc|mNVbknQw11rwb?o z?9cYLy<_j%U+k~;Hw;JmyS-=c+XuezrLTPL8{hliiGB%RJExxuo^~ET{@+GzE#w#W zi}*$TVt#SIgkRDx<(Kx$z~e3lZ@U8g?n>~xtN2ynWmorW_%;1n{=@LQ>-cs3dVYPs zf#1+?gcjc>epA00+I?I2E#bYlhX39M{(C#Wz5l4+!S5LH=E`Mv!< zeqX|FRR0bCP5&)_nm^s2;m`DE`Lq2w{@eaL z{@h6Y!hF;)ER6W=OaAcO{rCM9{!0G?f0e)5UjrX~t^bk#vA@n=?{Dxo`kVaC{ucie z|5Jafzs=w7f98Mgf8l@Wf8~Ge@9=l}-}t-y-ToebufNaV?;r3F`iK0({t^GEf6PDb zpYTum-}w`j`5Eo2U)Ag*NjaBlQ8d{X70$ z|Cj&8+MB>xQ62l^x9{4wdlrV(B`$z4E|J`AHZ{V`+}nVnAd9G|ILn0@7-q&<*n&b_ z(8#c8+!OaiNnB!-m}nB@C7Kwsyu`%i8Izd2L{ZF(Pm?@^nLGdQsjAc6GYsnQ^Z9>1 zFx{v6be%eNs_N9KI;U^!K;nk%gp8^{4(6%p1%tW}ms$ z+-7b!cbGStH<@2HZ#Hi+Z#8c-Z#VBS?=-(+e$~9oyqkXg`9AtZ=dYPxHy<<~GIyHa zFu!R&Y(8Q>YCdLu%Y5AYw)uqlq`Axdj`@`NUGsb9)8;eg_st)eKg2H@KWpwW|HJ%| z`D61r^Lg_p=1X7)#wW!m$4|hoxx5pf8ZSp?^`!X8 z@l)cTjh~8K;pgHB#9>p2!Ddu$FJ2X|j@KZwcY6Gc_~%uA51F@G(ZXU9)^WzJU&s!8fKfXAALHt7GQkNhXcZtd`#xILEP&TePo=3cQ zdAudws&aeCwXTY{$2;Pk@yp{~l6PGlUxO^&I^^oU5Z{0t!CakdV5u-~cd7_wr>hs9ry|33ak{15Rz#{U$5GydoJ zU*i8A|0^c7_{$Bj=@&AecBmRE;f8!s-KaBq~{;&AISgqCyYo*m@t+Lv!4y)6;TxH38IkR=vdg}{_@n3;R{6@s^zlbRQ z)z&o>t-sE?9ufK*kT>nKwp!beL*0Q4>P^;{t(z%_dYg5-b%%AQ^%d)@)?L=!);-p} z)_vCf)&thptgl-SS`S$}t#4T0v>vt|u^zP^v%Y0LZhhN&!g|u$Wqrqb%K9#HR8L#a zxEU)iU-g{zy!8|7r`8MB&#V`%pHtTASJr=8zqWp3y=48?`knQ%^@{bX^_ulx*6Y^q ztv9ScSbyYPGUbE+YW)+P@S^KSz ztOM3TtKS;13f7<jy=uB z_IdVvdx5>sUSywdFSakRFSIYRm)IBEm)MtbT-$E6o9t#gZ!fc#+bwphigMd+_A0yG z?yx)U%k3_^+wQS@?bY@gd#zR5)!M$ip{chgZ_{nZ@{acW%2@5HhNiBL_GoQmSAKOq zRx9A%_SQtQJ{zNJHpkaog088AzNWaGs#Moxj^W8%Ej`DTp5vO~=e1I=wpz-ya;=vu zwUf-%$@_Kkex1BuR~e~o>uhPT=xMIlIRLlF-@_x0vS1s*VOZ(N*ezmk;t=q4U z&S_ZH*xaDVNhEWrSZ$7miprU+^gki|Na!;0Gm*-q`Mv5S*H5Oxb@?7aR$XP+H??*( z^{!ghmS1btH+S?jG&SYhdsJPF8Ig{zau<`;j5F;}R}#tU+Gw_^0TLqBxo5eA44H5; z$DB##lBr0xv7w6zNjfPryPn=&8q0~G&nKVArl0_a?T|1Rh z&k3^zIT^l#%qFG@!KrF{mPZ{?oJtDcQ^N0*rb)&zRU4kAyO*i2V$QJrNMtm}*p4KU zPGu<9(_rGiZ#YCkoRkblN`@mP!;umlOv!MhLmQJOmQ`*q66-YX1 z=~P-em6lGWrBhjXKP&HNg(+EKo~UgmQ5Bj!Eoslzt-HdK(*jge<-+PAU9*>WHLT97 z8?t|p&e@(QDKK#guL!RcNCXxgrYQ}gJKI$V-AuhBcVM7HB9o|OEVu(}?6bV?vnFL&0Mb<~ z5E&7OWLnHdj)x|f77LdajZ2fpB{DoYCo(L@5X0)o*$thY4VZzHb3lzG({<4~t_4Zv z7&opOw+J7L2__E`6F3a|Ig3j!BQ#`)1}Jhicup_1kah?KrqufAxxR)o7149uhN0<# zFeBz7Bj$pckj!OjBj>hsbgT^J+LsgF3|nO|k4eg~bxve3rz(q=$q5^HicX}fDud@T zNy+-U$lR6=*!;PohjU4?is!cU!gK2CUDejm+r#|H$^@6=UO<8fp5kc{+Q~YeVk4AD zvGKuG;CgT}tY^85FswotRw08{FVE}cIXfQ7TvncI&w%F)z=dC;kGZV8&mLSd#~vK6 zDSK{rTSIpXvwrTXR^Zd)LALyRSJ2PGVWE>AUIK9q@)JXl_xU1%t&OM`cQpKEAcW}m$onT@C$#XkJY*8h14FS z*Y1@gT@n>YL8ljq^C(x+t5PWpq0y-j{i+bXst|pukO^1#0`6BAP zb&)&wF?ls2t2IJ>4UG^?Q6_v5G%IS0}<-C+=Mxi3nV3 z5fN>hYN?F(nDm?$`dTJASIfRZO8nepmM5@OO7Pcen=0+pvM8n!=~x3xajd};)nu-w z)@tz9v~=r#oJd9*$b%=d&jvS<5yPF)a-IY7yk|x-*@{pDe2NAFsT=KF2uoy!2UFa; zqoTCEU?IZE9LXVo1uj}a^G!X^<@;ZJ#d){hl6W`j6m;YMxJlhrb46(S@U zJmMT%LM%7go}|QcNLE+dO&)iwreVU%3wlf^B)o}CCK+q?o4g7!Pom1HR5IG^&H`wS zwJ9g-{+z7$b4d}gBvT7FjWt|EE?Hk0XN77Z6XE3$3wyb5c$W`(A(2d_qsu*(B^~KPDj8lr zt-HxmnwzRj;Fg7mS%|K%CCRXtgeqitaTRH(sfe_wF52|BjhqFG?v>k+3b!b_T0FX# z;G{4%B@9hzYGj~fA(^Y;)-rWMkwRckYIvdypb>>Od$kqNh7bQUhsbpY$#rqZ5J!`Sl~*p9n&kZ)nKhV8ebrt8KTmPE2JVW$wd}}(BbO1aW%HKI z>4}G9*H937&J#aYNFsCW;G!)3l1N659bCZ0+_IMfhg4XbOWPg3l~aeToaE|XIkDg6 z$e1|121YQB32?-tb%eoAm8==6(%}xRwHc1PV1-l_>BwtI$W*ajWjK6{)qspqg}$=B zWqA3WNMXr;xhPPVUy(A|NEg{j`pZv>x|X$ewbv;eG&^$clOrjXj|NRMJkxJQpcvN9d*@pqF1 z6V2$W2uh_0T9uGjQx)prsZ_P5?<(PRnT$?AsiY1i0i~XMD_y6sQU{Q7RpQpcH-oDe z?Ip_?=_S2Ws+ACjJf*S_qKCbthml@d_Vo_c#v#)TFA9=6_RMhvOh2@2!Qcj-dFIB3Yt;X@QO|Dk!EKW z;pgIBPf|4hs>B&q6vJ)q)r&3nJsph8RcU`1eRZEoNDuv%U>qm}9@i-S4^n5RAuUt({be(@8Ekfc*vpRMp>8%XMYG4dWNU&aKgOhR6k#)MecuJ82 zJZ0x3gQWvpd6nx7uYuh)v5Vh0cDvqWq?+k)tN>bTqi5(#oDc@!Oq2svVI9FC(V9m%OWx~WP-nu@KP%+6Lnfv%IRz-N_`fli?z>icmeEYOOcVlP=*6T2s4YfoUB5axoKS&u`I($ z69j~bp@;(WY@gvU5&~y3#O#YFGMp~LRr}s7Wf_hgAcUjgGKoq8H^c5S-V?Z9K}cLS z!#+Kpvwh2mo1Nk83gEFb@(wdT>C{z)8=(S`0sU*jrCTi?LxDD&NOwNf8Mc_w7n97#DlvY2-y)a!`<;Yh03kp;I?l?ty1i6Q(q z@!!YvlrSsBQ@0~YN=H_LjwCA`SrIz2-@%c{l_NnC{(6dX%9Fx57M?sXyADQ)l7|LU%>}XUr|hI5^@zJCYW1 z#3nnO9zrBUXp@wWBP%FJ0=kZ5ARO_h9mzyEl7VotRalRytqj4nt)VVAYA@o(egNV* ztRLz6RHU2CWluwIte1shtXzlmt#lZ$NXouZl-!9UD*;DxD2}WM9LX*?;w3x0_(jk` ztBzPq$&onX@jIM3Lgs;N46aq7&X!ga|Cg&gYY0?FI+Rtn=&z3=NfS8Yu{x42aKv+U zByHe`2kS`sz!6W@kzltY`)M4GkiwRWGIO{JW#O)@1030B<46S7;pkb?;dMH$QePt0 zj#CvSy%p2WB9h3kh~O$ln#CHXUW~QQC`#rr!(xp3#86zt=(4+jvP=;y)|eUu4`vi031eL78a6$&2UZ(^~Bh7ZVY9iU%N*<+z4CA`<#P9J!zkle<%x`Y#zW5X`clb zWqDux{0v6~;qeIH*a=67i1>_aHk|A2qP!Uj^fIp1(awA~jEFJ|DAK#wO+d8ZS`%(- z?a~iv;8BU>1rLcqD5r{>IuHwN>h0>vw>Pa5RAf0&i=-1*nlo{wZsDq*Yx0O~fYWdR zE6=ex9xjxb9ZA`Im8`3`*5|uArqSx#ghQtSCCOM%3$zff^0JOzEGq#qyIa@NO}xET zSE3htmPOTDxb&>)z{TWOdzQuMU3G26-UB>W81TFsJL1~aO}=$`OAl^hgt2E?vEXA` zY{p7$T0#yg7YSB^+l-R#iv$Z5%%%>^n5Z7_%y+eRG^^%_xwv62PisPeR^jW;BiNw$ zt_ld==G)uy%M@9~ROk{SiyE;=&+A82(2q(~X=NTkOM0(Viu!Sxhg8}e?ZYgfG7r73 z5w*V#f$E|EKV5L1Cw^v!~iZRp8&sXi-E{_@_2HWW$)uFIyr zvD-U(^nI+etydv2SGBg&jlHV3t*5oKZ5@E|=GN7%&BR$%O;;(wW{9qiwHifn0|F5o z&&4}-LpNw??Ous{B1aF7CP$acJX&@4hY&2lVs1uCqg>JjP(<|Wx=f+2>-IzdCDI)! zmU(>=$QXae1UA~&I{}SyyC?v&yA$$+4pTfnQJaukYy+*vEh~SfHlAPGgqa^f7J-Pt zVJnIaeWK=UwNtrdHA#|4A}Tie=3`$dF7B|~MhbeKSn8ZKxWDa*9#0tr3qgDR5H zV+@r>t8%P?uhz2G<-J|`W^9V!4kRnAj?R30V{cnqzDN3$F6rrNZD?QKmLCEZOU4F0 zRZ&}O`-Is>m=;K>u~M%QD%<&0?lYTFay_qQDyz^mSeT~5n4vWIE7bc7NI0N>^eRZA zLhca||I`6(0aT%6s9u#F zqEwcrQN=^lDemf$gp3-5O13Mv>L6I5lO=3O56JL^pW!Q?6 z2%(R}QEad>(REa+bZ=^-t<&0jS9L|(=^_$WXXaOR_N>E<9c`G_4X-!a$d@L*H1h?M zp?bEAFUX#$QVU;@*HNVvd|AmCe9)V%lU#?RRO#Cg1m<+_3{M) zTvb}b7fidVgfNS`tfxzy=WGB<-8=sjjt9jwm*a*fL^U}p1TvMX+ftrY)h*SL$sWZL zp7y;5Y6e&J*bBf-F}VY}g2i(>;pbUh-HNAxyAt*pbakKgQT6=RN0osN$l!f{PkeT-2!IqDB=LHLAF%QN=}#DlTeNaZ#g+ ziyBp26gzQIql$|fRb14l;-W?s7d5K5s8Pj5jVdl`RB=(Gii;XmT-2yGcI#1XbVs!j znACm2^cK6fRL#3pqmV8gTCQT5QWlVTrtW+^Q$^w#!!^%TNqDAig=cOZ=9wxo z&wLLQ&s33krf!I5ZUy0)DhtolE%Qv>G0)uU%rmOZJfmCY8CCaqriy-^sj}u7y~T{7 zYTm7%XX=qE(*hAG4w+~;PLDFL+mgI+M44A!Nsf4+tlwjoEXfN~zG?bNR%`*H)C6RH*{u2 zzNsfd!4r70yjOi1wOx#;F?_?;CeA7Ahi;rDkxs#XPOzO7>o7Fbw((QT8w0RNU}UYWfU!rTN$X5sY8ikTy`D5t@qRwPJQk)K?G z^+}g-YS%xI0M>ZX#>LwKhVHFizVun9v-<|4$ zo!*PsyYDY>3sQ|~w3^$OH!f{z$v3SmqT9xXwziI*wMc%qHY^AK=-S&!16+YaQB>|? zIjP_PQ>uakWT^_?a)vSwX=QD~Rw+I#hOiB0rQEM+Y3;$8WN2)@xv^s{W9HBvRz_?Y z>Nr-Ps!OJenp+!Ib*S7tP(bs&!qT<+8KKnAsIYWxsJoX(zfu&lvN9QI$wT*8SUI+e z-D7NPD#iBHX&c0uXq-!`9n*=X^Bv1t+m+gO_aGq6nA4neNu>n`YzW~_b8;n>=6paZ z%}EnnMRO$pCy^`EV>s*-zt4FC?15yvovzQ7V1t1g5fGp=nb;Q~W}I^|D6{2BN;nRu z9|&ENgX*ayZ}P{L(bjR=B2~fpQZQSVsl2rXJ8n2YC&{x^Mus)RXXj96P0mPnu;Go` zljIhjvsuq@;0|SOH^XP^QW+7K45u|v=Kf?j{);jXO@`yVD6?2e{1#T1Sh{xfU%iKVut}%*yRo^RR?2h^%B^*3)-?+S28E-`Tq}9=w@f)#bhiGb$8!sab zY?f#g7rbZ`mvxwlOBN$lE-VdbN=$XCrn-b5=~Xd3u1#079qjGYZHuLXN!E~3WocoW zb(w3hRpnOMGKyIgkY}Crdt?1pN(1BAlBcFs%w*UqCwJIqCwJH}CwDpb&T;Q%>N^{U zAN75FmU}nby*tajJKMdR4aDf%o7uxXXW@M2kAafyS`Qmu&M-1;4oPI*UG$5JBa&TW*1mLK^Q zL-2TtApln~r0i1+{fes?x{9Y50?~@0-*6Q}0Di^LuSo1oimVt?a1}$!q8Pd=uj-`t zvasXQsAY(plSWG2QCdede{}p+w?q>?R7T1^mC-e{$_%MNDkBiT%4m>JWdt--Wdx|a z0PL?$(W;?D7a;#wc=5AzrN2O1P~}Oc!uO=YS1pyS_C4VZtr{C`drg;7OR@*nS2yFs zg5`(z1pc7Ez+@1;nD3OnYI-qwQUSq3l`m275-%a5t06`T&BwDc9ol4m0y0`d*aQ&* z0=YFxspviCH*{>2(2jGFNC=gnV+h)fqg8EcGyPBkT!>;jw!Km}sq*gC# zsL11!g)zf3Dx|pc`mKcf1V?inI+z_VBcGU(szW{dsy%9LfYSRmS(|^qr2Wa z89%o&{&zA=2J?U=R1n~-u0l{UcL+mdC_Ea}c}`zT^?sh_hP33b#u_CpRrpD*@KD34 z^JA;@^HQCN5XrzcRDU~_er%O~Y`k;5s$|ijJxXRtABCVQ4&-7sAha67ar0L>{j$M8Q&#PJSz20!}%Jk z;Xao6#Fi{g)hJIQ8K5kYIrdFENI;2@92?t)hZs7n673A~_6;yCr1Rc9H{Y0g?Dfb|T2qI5O zy5N5ykO1z!Yx+0|1ua4L+?R(;>H>!Dx zih^ICv3FX(lgwdfv{%5^W?zB%Q{wJY_C2fiQ;v<`-dojv4plS0l68j+8SI0f{fG%E z6s6uyUmr?*6-7N{v0j7$-=+Ym3}JfJ;Q${(b*8{ybjkNd+Ivo8?G=->JKlX8Pqm#R4^54@ zSDD7#EAozc_gxxy|5LT8ou1OHijUv;3*KPk#I8@qGJYA$_&p?i*TIJ%=YJ>XlS}Lh z_f?Zgf+=uV%Bg4c=?z~+|J0YsdIAUPJfXryEKkl+@I)I0|2rak(yS-T3GA_^Mu%+e zB*^eRkpw&v=|lBxA}#9_5qGJ^7e~CQ90iZ3*uv<4;S2NXCe9?Y<)fnD5ptd@x ztpij6i6hml3a4`0?#?MHa!?;TQ*=pHmQtBVra(sfkO_zeCMEJP`f%L!4JjDv^F5q; zpDd@bM@Ds&aHjD^7q8%R{Sb@ugcJo&bnqn+Mo_HjAPm@jAqNzwIL3V_i+;4|Kgmg~ z9(rPz-YeFL5Gs`+wB>HnAjky7V^vJFwVPy1-~?%FqI#p~ z+!TP*4gvIXjUzSXZURSF9pTCFcL=+`J@*~&$?$jF^w1&gcu(Y=qcm-N9LL|#NRRYe zTwA|OZ#A)!95uotVa$<~oEq}d;h#uLBvZ1+r^$*I{668sH-TsvxMJFqszlP~^(2rN z^s31kp9MDQbEpz>w6#i2MY`bG5uo&8ku^SJku|<9Fd1ZL-8H`Ghb$RKSWj2G3wpht z_8@uzUw!BjzZ&T{+E|4Bj!=T?VM}~@!@tC@9&w3JsXBZNkJ1-79Gk>P@wz%1DAxdR zota}(Op#`s_oK))9#BL&k(3?8m=je*6=lYkO4bKwpT(g1s_+~riWF*tb7zMa>XS;z z>Wq0lHrsY1`x?A(zPYNYp;M;`Kn#V=;e}G-5?3ZN#RpH}I8&G%6EJ0Waiuf>t~owA zR)f_|47-`IfvX4m8|kXB#~T|O*U`Rkv#GlWiHW9`I34Uz55q7EtBKf@{^Ub-rIqh) zY3u0dbRWr%XXJgjDj$fz;a}!UiVsGBAHpv=wo@U;4lA-*bGh_e5M)Z0BY)M?&<2at z3I3D~E#gS&FbHVzzcQ=>o&v(SlUaL(I$Z+82E;`xRqD094aY`Xo8o+=0pLiR`Yr^$ zkgUZ~V72d`52-Mcii|oK(1tH#wA*y577UeR+Bk>HBbDUM*621RFd!y0LU>LI2#g>l zC~&}C#Dq_;V&uACr&IO*dPL}+X^4?6d% z&IvcDb8^^b)D*_1m8LLMr%63aC2)X+r}AXDv$ZoHQ-8oNORgfXwN;K|%~w)_4Nmzk zLR*omu;^4Vfh=N;_cS9P;>d8u4M9BSL`DQABj44^@HZN89*bo?!zXr7X3k{fyIUDK z+9xTXgrt6OrIZ=2f?t2AiBH`DPP&0BD{4l*yOiP6FNuskq|M1}Y{_TA&hQAJEbnp3 z4o3#0%qcM(O<)5dsV|f%rKOx);u8E@xypO;U9k+G*Z^NxA~KFV=S(E_*2;Ul^8@u* z(lVl88Ihn2pTWja1l>=ehf`qax4fs%Q1dB7z*$l=eCiNDt8DifUML$&bO1p&UQqRzkR zX7sf(O6ZbdoP-jXeB{KDbK;I1p?2g5wIfHU9r;j!!-xH0<*BQ1HHj4{Q_DDc$D)vy zFKlJ&9J33bVQ7al$2ahxErO757^hp6qm(<$DO?Z97_LW2Aikvx5LZ;G89|tbZBtEP z!!EV}@qN!4Y(mNNTgzIP^{gwRDmZeE4L;4IxK*?zfMNpRYZSfRu&|29IQq;xZ<5Rz zlu4;^r5@ud`EEJql4PX>oP-uvQVv{c@Np%j!j<#JNgg|tiF3FT=Wx|#QQBd13`dBJ z1UBSQ{n{En1d7igaD7-EqC3H68cP-$VO$e(ssZzPk>975>fCICzeS=}Is(^dMkmnk{Ym6FfKrSt?ZpN~t) zcLlJhB77-1RGgAS#wq>%MEx}o{nN3Kc)1Cn#3NaMBh`IKJ5#M{-@+ra!QV@ zrR3aUO7uP@dY=+Ik;-QHqeEE9H?_+xw$*m$7{%% z3U4cS5_?@}hZt?`myI#*ZZr9mQCiN5rDdW@i(yX7gq0QpotB9!ErvQR6Ifadc3LK~ zv>5KROlWC2HlCHQ-DbtgWaUGWS@|qVR*rLK<%5!0`8-NiJ}jA)&!lAK1Cv?#TuN3x zG?|sprex(XWmXPTX5}ztR;*Z7z6Y3<4@6|;kY!dr6p_`2p1<7(LoSA0oX!lNV+NdE zsEmBRI>TFXYAe|lN~FbgO6#w$$XB!+cENBYl9UcdY{jZde;oO^og@8n{w0P8u=}8 zlhX1TjbC!%W?L!oWx9vBbk;Hj%kt8w47v2%SpzxoM24L3C6UXY)s2Z z#k8DMOv_2dw0v7E%_b_5)>15PNLo%VrsY#CY5Cq%T0YyBmJddyWu1_gbwXM`@Rb%f zB`t1CTD}00<~^B-H1F`iRpwUN;e%)jjYd{>8X=&f$|{y3aGGflzvR)%%E9Zb9K6oT zx74z7@H)#olTcs3Pg007&&gT&Olwx8Co58u6=})Jq3Ns~n$F6h>8yNwD68p`uR3Ps zvnW~l8fI3cEGtr$73s>dnM4{%`XL8>vvSZkD+hhE;&WtW>6aC$%!<@!WnqxeW>-Go zm68**DREO%OkX0Es~f9B75=zQ5rthODpZ8oPrhT))-+C69}-G&k$9mOR2?x$QcUp? zY7BuJ_eC$xGa|5e#D;sl_dV)+n?|_=Q_RL!%OkJ1smvops?jt7g`L*h6z32umZn97 z8S|~GR6+&XSfGmhOxT1Cp-nBQr@2O(oUtZV6>(Gz4i*YS9!deH6k3~?`<@mvKpvMi z!LRCSXl!k7?p3(T)Xpyc4FI=>+DQRV0F9^`qwojYg?ZVg00?~!kN&BEp+{^nF-C=7 zRP`b%3Gy=6dM(8>93bo&r673P*?>i-s;>cvEOd1!fUUk-AnzBo!QE8uje1b33&Kyg zER~B{WONB8lieL^#VyIpW7M~h6wNISZOh~pY~^iVPUGVP+6pKYu+^ktISv9!v515K zU#C+oa6wn}oGtYXu`-|Ur6A^7a9rJ#vV^z+Q=4QN{Dpjb_q|O;iVUiY2fhO2bEKHz z2*BQ^(M&J>(uZmrn1(jr)Q*!&;5W5HH8jMNeGMU8QfIs;RsL6fVE_B$yxK|_4#nX| zJqC|g;XAA`Npou=CEs#Q>2D-TI3*4G>@HNja!NH%YE9uPZ^Ak$7*)PRD z5%n!fRlxU2v2|4nII@|df;!!(;(<$*_YZfrq^+R~o0HX{S=D5zob{y*ph&|IRs?6J zsU~do4jdW$uW=1E8OEjUv?&e?5_n2ZZW;l(gsi!*gcO^vgm38jO33>8O8Bn7uY_!z zuY|8`QArtS%+-~UdbU`4sESEub;*=3Y}M+Ux%y_2zG=`md41ENZ`N7zI=)+0L??tr z&r2{OuoOr_Ems0>YI!NVE|UikKb9i(mg6_hD*~qnW(ESr#IlLTxQP{$t12drONYuPmc=Glqzzmrk4smi z$4#y%S5L=HQ6*JnTp6AdjGqWJw0~e;@bQ9a%y{+H!k*V&GiJ=#vu7}1%-DmQ87K_~ z_G~Kb*)(IuCS%5?J;sb3I|_R`3zwO1?hnkEv9WfBu?e+N$53wtHlfa@8E@XZX~w-X zW?W_*f7y&TXP_|SGW-h!1DQY|a$e*m>QEqUq>ahO)T!m^Xf$Sw0g;*DF>eGh+aAN+vEV|`%U7zpTg%|B?k{TU4}%!%9* z)Ah@_ejsKq4&CCt7n`Zyi|z~jm#-bK{q4r*8Lsd}Uefi4;qmHk^V4g({IK2*tnl#r z;BOhf^58KSPxReDqYwUoZr9px9P5Lxap8TMe*^EUfp+1Q#(mu1!jd8IKI2y(Ihuc7 zyVi}ycRl>(s~V4ef-z}C_>{xIrw)NX>EbEZa7B;XZaI%LW?nP2)4VNA3v3ELYN%sTe^s;tC!eFPw~8*Z(;X2n8~Ob%1WeowAex z!iJ@NE~AR){!yK3VN?S# z)T$>r18q_N=&JY6ruSu(@!UVkQ!R|LdOvu#`@Vm)@xFQjX_HY$?|k+l{#WC5JT){= zF+mKQ9uA!vhK>|ZF0oaZTuSj<-*|iCI82f9f3i$3zX&%DWP*1OGzMSo@4jTOna z2f{Cak}-i}An$>6*~HZN@#Dr~nj3d)P+ccanV71q$V{C&d13|b)a~SnyMm*>`tzo2 zN3FBvlI3qaUbxG+^sz6`*>YLoc4OAOtL9vHW8sO&ODmsTb=ehXjV=jaaDQ*-uGXEK z8_vFR&b`yS&uQ2U!l~hyGRL)waG0?^B5+q>i6Y1czm3f9uEG{S9MjJO1Ru*C(Dm(_ z!S@P341@z`K>6N8$87xI_ep_tATXu^2fg5JOp1;hH|EoI^Nc(1ym(3dj6Z&IZ^IXz zb^EOk)y`X2PT~mtjLT$A)PulIAQX%5NOAk zj2@p(of)i3mxMy4&lX;O_+jIeXD|QRy(`D=9(7K~=GmKHyK>`e*UsMDan7jSV^-e# zv$0Pbi{JaZvG|#~jo;}$r)S=Y*B3r|c6Z^U>rR^6JEv>crh3RD+QcuE-vW88fM}P2 zQK|70#$n9CtTN*hs0crE^R|gKrK655UU=@nL!W}8(13BrhN~mtTY|x4rm+0rKR$^< zaA|#Ef#M2=c{Yj5Ey3{MzZ6_?i{zZ((Y?VVNQ{QTFrQ9H!Bx=0II^OG=Ma1*C|0=JC$!CN03SYb@~6pY#(o3nq{H$RCxkeNfW~>E!<#S{kl={^#=<-Dp`N6VOtXQ4Swrz4f?e9aBZ`KM&^(REF{s}7I=;=mFh29Fowf^ zX$z+K01?yhP1=I_;jmzR0v|I3{W}J zEEMfzUW8{8FP5rSqVEL$ta&j6zSp>ZBslu;nh&lV4Z?#(5j)GfV9jUKbjJAi1#b1# zSJN5S-yS&NgDZ7rc%N>!FeltcW5DoRgWEJW{B&B|hS1~U$713*AAYr%U_AQ+zh`*i z(lEH@!f!EpJ^bW^Q+<13;5HAB2e-bb;e}a4@N5nINn~QVACAT0MuDSUm;W$Fs$Gnx z%MZ++e!BXME&Lww)$Jb^Eh4J~SBoy97wxQf>237rwND8A$%W7M!KWMsK6MCOEoPV> zdKRD1cLIwQUeAJ{@{mu zWc3kw04o>;zlh*hjDADvKTNBPr3T(b{VUy7#@h@RztUU93<~@ys_(60_6hucf_tl& z4+Z{Bf|Gwccnvx_00jpjy9X*dCCQ`FhGq=L*L#r9PaJy7gK3#FW&4V{!5$ z#gyy{T{}<{yn5gYH8{bY0~-Rt!M6*G!m47ankhmRV<5WEIsa^d9-*F+3>TZ~m~Qw`bUFA*F1j7>%l$l3%3MVKs4 z62g)GF9d%&a7FNHmV(00;5DeL+=jK7NZ|?}-uZ$PJOR?AX4%lVpB8?%bEk3Qv#Wo3 z|H`q?O_ zHG7sH^|_VbF)lc+uqOEV!w&c5zuCTJ$rCp%9&7Zz^x(jlA!B}Zm(0vLX%1Tg$i2_X0g62SEjl>n}Ps01*aC4ju1 z$eFMSwlY=ld+>iU{_ffxD%rie|AWX&0~>x&*q;yI!|qaHPc+~%HfEm@M&pVY@N<)|E9HIy zVw(mwZ|a}s4%NN)szrj=o|=gU-!m8D{bQ7RPn}u;?VT)L7)!H_OgGVJ$Cv+f1$qHw zduCm~^s=k7yT9=Jv7K++|AT=Qz}XhOx!-Kxp3QD=4_-BJ<=5V5*W*spC&mS3@IAU1 zjg<%P8}2AXZzo6LzTu8S^j@0C?;GwY#NJ@|a7Q8bPk|3}6k=}*e3+vU+beLNqoC-F zolfD-`-}mSV>jFhxC(b_I0e3xV-fj)9E?DP#CMgS~8OfaORZd~e`0)apxsFZAHn*13LY3&m}7j$r88>X`duYXw}Q$W z$-yHmR^MT$*(TH=!V@$+?9q1%Pg1LY0X!R1i#o=$f3SmEZ3F(DUVZDvfJyK>3QLTe zU20&U1IH29?jGD9#1bPI4gQ0znc|`i>ca#`D7sM(}r} z(9aJy7~xS2A8s(hCklL+!3d8P_%MSJ{*1tf8H`W|8I0$L8w|je!O(Cv7>60n!NZL{ z3`}q;8JOn}H83M_PSx^rcw|mO!La@bodV9Q=^eITIqUd4?qVZe_`l$P7WNzF&hS+` zcOJY_E&Ql5sn)wn<*Y4(%W2pNPO8Q5zp_|q_y zSBoXWg2mNT4?77P!}kV$IT9QZlVf~vrCMD7y}@UFaFqaH_`X4oPz-Aqi?r!nzc44% z?$K%8YIKsYa((5U5FQBm$n^up+5Y`9Yf;%|z`Te{5 zy1V*z|KHfXugsfw{r2tGhfh5C+ADj35pRZ{Rby;eXjgf-uhN()_*%y!B@D93y8z^DNFW>g@Jfsi?QccBT`B~oP=mFfJ7YU76=4P0Xt#_kS< zg1;O1+v?zE?O9oyH{`=@!_ z0{B7f0&Fb(8(IYh6KGE38aD@7N^}*6Lilu`E1y?=+;VEZ0 z+7aIX?QHSC2bNzgLov$_hk^V#!Ba38mnjw+M?#mP_R5qJVTf6HIz#=z^1`K&{%AMUKMikIeYE<**J;K?H{-2SDUxLz8=8R3NfkCSO)krD>MaKTPYbC8%eb;#Mv2i; zJaNjT;*(1z;jgj6DE+<>3K^rueeTo~OA1eZz3^Dc)Ke2jjt+)`!N^Pf3nw=9bSI}> z-n(p4=n-rgS}<+(Rhy^hwqM(RLjR+o1(R1^edFxvtFHY*vj0&YwdiIVh!c1qg7<3~ z1{@aKAUM{I0xv94#QWf17sl<0Wj(lkMuY>MkP50FRd{rJs5fF+WO4zR1AZ!5r_^Ln zh3~)-m2wJ#BX0&TJt8}I>d|pCxce^yf1PsnWiuy7Ohn=OXHMz4X~Fq-T{A6o6M8=H z^eY~|vf~GRC-y%k^0`2{j|Sp05QX;wgSw}Y1;hb{?+q4^yYUeR%fD68}<7#t2LN5y=Ss*0HiNx*s6&HO@u~ORw9`lghoMz z)Xsy+G%Z9HJR6CJC(pd>tf>Qk{mbs489%!G+}woVrEiAjpV;?I&;`IK{1vm9f2|ew~keaiqNrr}wu?xrMosGV+S7wOo@00Cl%2qB=ULnJgp?XMUZclVP z$)c)X&Uj$4&rp_Iw@=!G=R?YnEmbaD8oPo_kCHW*o@h+bY4t_bmKu&tehm@mQtVss z#h^KVTJ9!uc_Tn*6tc*VJc11X&l)Eqsr$;a*S@-Oz2f+068ewdyVot``AhK-}Yx$yk*`pfRR z;$!+1S+OB%Mn~^{YSE&iQAG!?()saE?xq@j?#??#mvl#U--GWBvVXvAapBP9R$qN3 zEsSR$oEPQY4zIu6*fJ74`l@l_NN`vv!H-Ro7=4Z3z97in`FaJX$x^jj&TutZj@&-- zmyi3}fj7?dVlyNy)R|tjsZ!u?2fpXk_u%$%#wZU?a}D(yme9v1ufFwGkOBmP|5X?N zyM8?8JsNHwGX(C|w-@^Pq}E_U=l3)mewvR@pocB+@Nu{?81d-$@L+Rh=ZNqrhk;KW z0!KcR>9NZ-T+!pok=mSz@l;xU2wiIJptg_r_($(C{7|_hxHY67G(&QIS1nch(fdIc z*Z10mx_Wepx(;g}@)lM7fgCqtBhX5f|3dkvpwvxz0{)0vjTyCuZ2jY;C*Zrdz5x%| zTa69A7ki)K!8rt%)ffV^rofvCE|EIa|EIv`5nNVdfcFdh0)lhWbCNP!YHOv)#q|ZO zN3|J3{SZ9n$>NcJg5`-6^#_9CkFhtQ>U;0)-}Py%%;C64Qt9I|OR1V$Seu3zU8>hX zqv5^hE0eKC8PY}XsW}6MYcR2^jrW0xy1$5dOp^U#6UQODK_L(o1_@1guJ97JjZJ&b zIKJ@aj@Q?(e|?8>{46h<{(WQaAOCF3eP-vj!oas5D-3Mk>CdcVW(;F5VqgL}$Fg0} z+Z~AQf$)e6kNtVosy81i+-6+1dPi;TjcW=oMqb+Z%C**TSzY%t~@e=V{rgDCkCkP5M?P0EQ8#gd_yS+on}niTzIJv7>Sb5?n2^x zIV%mFt95D%V!FhIaaPna-fa(Q`754CKs8txthU;4>|C-48s{U`7K81Xh< zdi}J*Q?FOw`H2#)2q`Rz)`AeVpQsXq;H1W6>=8wdMQv29qHSgTEW$(_5m+aizcgH(bMTHsn{bWhQPwu$>#g48QL-}nt@0z`!_}EdU*d}mb z+P*JbfBAnJpZVs4#`s@!Ufc7x125iX6hHWYQFPDkg?~TtRN>z{2A;lV!^00}lEL7u z;fTn|UXoE-rRYtqLkDgm*6E(4t~B~qZ?CJ{0Z9$sS-AZb15vUyzrOa`Uvz&Xw{h`> zSI<81a`2vk1{t-zB($4EXm4hr#mX3sLTDNOwsAeXd=PS9eQY}Vt1IL_I412=d~h{s z6CP`S;B6mVDFws(4BhS$f4fkYZ_9E7aB><{eYL^{TsaK^y-?iZe-H2<;yJ=Jg?sVd z5eN$$qYR1xA7e_@;S^0VG4#iQJzc>Q3(uWc=yA2+P<}>l-d}V??AA-$_Jd0!uvRq< z%0X6XNwrqS&KKHU zU0#kfC00V%csqW=WZHQfMVxN@gb9c+U{Msj@xH%rj{n$fykl|xzDv*BJa5;o7p~vC zpKcdnAwT{;<8BgWuK%8{Zy#^0Amt*sd9MpUGVp=W z2|VPnD*Q_Lfk$~{x*Z-1cs}a+A!Ada;yhJn!m80&s5Pk?8Vp4&R)S5~oJCga*5Hkg ze6VrRH8-AdOVu?E^L8{``&98Wk*ogl3#VNZf}bH*u3t&qq?43 z-?IIZ>QN_^RxCZ&Sv7sls>`2jY5C!mH}6iT$Del5thsCJDpoDL;`=QUXQ_s4PEzwM zwq#EnhgB7C&Yz3}aR3{?l)RE(=JysU8Q z?#RhIcOG~pas>+d5q!Kn-VEY<+hb1~(AT_tCxP=O- ztGP|T7P@fT&AVqWu#Yb-{e1s_6GID&uGwImTX-UKYA|?bI1+uC`3t%m2R|2nA^HH! z5HeO1ltq=se;qu~HxNBQum2|2v%fEL&>IvUI7qN!CZZ~Xdm}SI77i6*@0w7h$db(t z;o!l?3m@&+z4Y<*>%VpB?j41LN8IrK9idnIt5-gB*<}x{4872Q(jD*Lpk_~C`ZpHy zma@SHL`~N5I0-NU+VJLGYS}*Utzpq~l8_lpcS=nE!q4A7dv^ zDm@DKqxU^mX!*ln;K*s?PaJ>pF@q`~JNve;%%A_2+h&Ju^4}xe|M>;L}698P5H|^q}ytaGi#F zNc~ML8zNn0O1$1M%2Vf@H0PvqKW9w+{lFW+#!zSBxbwc;*>%e~#-ICl4BU-*0TJy; zUt})?AJZ%51#rnQ&J1!w60>XY{u2heUNgRCoR$3IO=qPlFD)<;!%niPCx;LPAD z^B2Y@-7(>)83XGE?g}=E$eujG1u=K89yc-j>cu6DqxYu17GpSWPrF`6%A%QHyV%l+Es>w+kG$a1Vt&L zpmRQ0&3vM)@KASIJnkt3vsvIL6WrYf5&N#%?qIwYyn^7o-C;P3 zwi_uQiw+r@0=8&Geyz4Upg-8x1-o>8f#vqFJ;SW(ADml_C3p{8_TdI?11I2%A@Trw zIROC)tmDQRNF|kHMKy8URlz$?U6vkq@iP|>{3~2E(3x0SG4A5;UK}jh9QsrJb#-S( zLp!hUPtM$yQ+Hr4p(MF{Uy%m6+)ABcxH2gOhkW?qN!?DHz$!`5V z;|7wm^>BB6aJaj-l3R#?9SV7gyMfNbaX?at4?Ik#DL#%L z9=&!;;0Gjx!g>g$>Nkc0r>^e7P+;4wF=<*@Ely}4j*TcURC&kYBhLBa>Yk0aT-*^l zuK)FLQUB`~-?Tb^@ud}?`P@lkjy$6Q*L_v{Oax3F8+4Wn1!2XG~Y1GK5OZPN7Y`QJ)`BQd3Uy7wl+7( zDlVCHQq*b4q|Pp{S=OC6dfr`E4;*)U`GV?u&b=c0mEv)u?2uW0_KefcJrR;aG*Vpp zph!*?acUSGyRqPE==!lHNCMTjCkC*;Rly5OLT|cwkY!Z$Y4WZiNg;f`_a^E06pwuH zvxdOWblaKihr8|IRAAsbk1ij4s|OF4`S70*;B$4u>81J~L+=#*-Y|Nn4uMa1=`Hue zUAm&tcYoz_F&dQGQESU@Jm|k%=-aP&(2&Lq9hMPN91pko;66@AHI|t5mT}rh9Kyc7 z9V5ZvWFm~LM>_gB7ytgi4IVu5TTNwjUyf=Q&JR!asIe~!!cB9reAUJBuv+_#0N)Y7 z4sQ*Q_38Td(HgGsM>U>ui|Qkb?Y1-U{t#|d8buzyh~`E#`rb(@XMOkJ2jLUKYgsc5 zic!E-LYTyjlT0WHsw^;8%Hz^-7$!w=2>;BG#8-k3iDou!{CuYMobxW9RR+!c)?%%h z*G+HQ!kSsv3C#?z)v8%kI`QOat}&CGQ(lwrA=M22`Sug%Lp3+#ST*BQ=S=shCRiF( zs-{`vijq1)=~Ky}!UWR>g^PX&FkNqS3S0@>7FI|4Ub$j^_-eh z7kzQ|{F^QdR?J^pd-~O{eC6zu7S33FA&i%?6uc9&HhJaBfeSvjVEPHnndpZ~B!lpU zG5zf`p3xkOeyBJm@V&uDJh{-O{!xJG%7ACq|8)dDS_~`ex1STfSo3%vsxx*k**E__h)1n|blEPaeGg(c>GU}8#X-Mvg-T2z0Y0_qN!0L4*EuOb|(=x431%j`=jeCOBSU1u)J?G9CPz57mu>M z%3V@%!!EMCgwOZhB>kT1v=4sP5I8Kav@_WcciT~xcaKMx55Cn0Kg);z1eKpv{TN0s z)&CfJr|9>F(K~er9F|w;E%(D+x?p)@E*F)uP;FRV%Y(v72=whz4+_i6n88sQA=LwA z>s+{x)5`J+{4JwXBBKSMcl){Mdt&m@5g0M)zGwwP6{zc+|im za;TH#2o_VYJZ${V!=fyyz;_sMqjjsw@-o~$TEhq4AJW@Om9f|Lm1U*+u&nTdm1W%< zz7iglR;8)slab2BwmS+(s}DUy$<6Vr($`m9<%}8o*shW>*-fkZUS7BM<%e3g&UCh} zSl%~t*486tUvk`|*dqF0$1a)GhxG0fh5l{#UH)uu&-dF~p5Cxw_e#)WajT%EPRYLA zFnDF~b6Cx2YC?gd0$3D`DTkA-@)(~;)4@wZxx$Y=maa(69d{h~)+Z4sM68ayNh|@Y zDUm`1sF?*;W<;h8KR_A$>Pp3=uYHr4wDpyBYhSs4<&N32wy$d4Hv6pYg^hP^n^;|1 zdR)=MbHT7<9|>Rj#4$@|Z7mEu{_VoSZFhG6XwB-~ovlB-^2+~M73?>fyrZ&rfqlfL z=npmfAn4x+sP9MJ4ME3X`r!AFNx2(tzYh+#|6y|b??%*p*x+N4{V4{&WY@?SI>}>2J56w`0xu6CN!}&Ahm( zbBE*X=&ZhUcB<%+@r&2qbi@uL{K&VA$c~fecF#_pGOOa4-rb$;KkPZSYR;+2vwIhu z0x54nN2fu`LjkxGV^mOKlAbh1ZudLE7jGD-G^T!H>)33Ept*6wry&B}UK=?c@sSEF zU&^PdRGfSRXTpR}A}F*YH+{`*lRj5G?$ol1sYlEzoc@WVg+B9xN4^>f-5d(lWq;ZK z?N6dmnFz%uGEHaElyr+)0V}u~Ffz>q$8xoOBsj7pyu#-C$`CW2zQfetrt7PAUHs~! z6KI#FSYJCd{{x?lAn6>2CVEKV*j~;i)Y;g$Mzu)6I1aFYGAx@@@nDHVRu@-E`9S>?sWedab z{EaJmaL}M@BhwdtY}-vw%vxA)BqfdoB$iA zX*}eMOju zP;TI(k7XKWg9GC=5g*4WY~=qCgLxcwqc|$+fT#e*j1RbErj}2Q;;2M<73I~DyZv+& zhF|E~GJV(lNv-*FzBzu|%V```aqstcE1`sH(s^9#4mI(tU))7 z2U0U#%=)RlMqIljVbO}cMpb&S4l7A+jq*jneyoj!Ubb5+;sLO zH!q&E^M=*NhJhuxISb#n{E75jZ20WKMT5r#j71v^DkkC z&I)#fPccXuZZA=vNFAZoZT?nWDzPLx=+#!s9mc*t@Lk4&S=@&mv-koJJ1s1Uc6(vB zi^YRl-(x7OgdfM&!1u+fEce5a8RiVI@>o1RD$fCJVsYwn0v@p+U!SpDTOHqfF)Zcg zd9>0ZiTV8A`G?XP!^f(=@6~qu5VNNo20nENTrGWxb9T9gD=J++V{^CSKP|C+)RMo0 z_YNJeVN#_Q%S64>hC^#JZ=kZx2&&bES5wGeRy7ntQ#(Vd^j9qWq3>X&(u}{{p;R{1 z+&`dI_Ri&^vfm4@-ccw#@^Im!VLE%oBc!v7HqJu3d!eGg!VENuw;hoi9FtE$?p<=z zc{3J{O_iKFb-|qQhYeTNjf>wIDOz;i!3ya0SiJcP-VD*THj4qxJjvo=W>0 zLvUPr=bZ=dKLpd=z@%nD7gOjEObZKl&ifRYKvU=j>J9GHHmV`?z}%1fA7D8;G3ID% ze93}2g|5(Qq9z=EAQ)8r3O#q{o#-1V3Y~UXJZihn-5Sq?;stXq4rUKUgTbbb#l8a( zxSmBQFxPlO!QqrDrVBh|8s8>S(u{}(H$SC>WjYTI-rtP(qxOpF3MyFfHfkxs6s$BL z)Ec7t1iz138;f1M$53ko51BzcCd2H6&!UxI1^ZIvRWv(RsC3F?%D52!*y+Ts#E|S> z#CJkzT^MZI@SU}jZ#rV;y2j0qmW|1Mv2F7!8&p{Di_Vy`Z(a3=t-hFE!I|Fj`Ad&K zw`$W3ZytX^{l(N1u?_Im&yXObnF8S8_HQ!mj>Z#7|$D5SHQE(zt$tA7`AYw9n zL9aZ&|5s{5w|Wk8;mA)<`7m$!zzd(gBN!~zkt#5GYVamCC^$U*rNOfq9zKh#48dV# zFqR|0w+A=_3_Pf>W)ZI6XNdJ7IFgitf0fZPBA#s{Lqp1D=I$CGzs1fk`?yU3 z6OLCgVQoa^0+(>d$TPXC&PMLcZC1LX=>5guNNR46bO*|aI1-EUCRXK5V<1+K>DqpN zxRRhYbVxr7+3BBA5}q9jEuO-&(=IL_Tl?2PV#rALq|PvAY=|exJA;rN@^xbq(gLZH zK@%2OUES63Le}C}BTubP^3R?`S)Jj5%h;Y1{(6yiq;O_MsXIl4ixwb!>BN3RAW$j#4Um)1 z2zgLO(q>A|`zp4DC9g)XWuL62Ktbw!C*3;LjI_PcXJTzZ6mYbBjvVDip{qW^IuKzF zMOJgdG!U$Bwb4?9{ChnzK@&h%>Kdh0Un3oVAvEd{!KP`P>d`{gr`|fbQ3SyfI246A zD6`&NMv!qq;R8+&3*6kNu!k~*x3j=Lep^8;{XFk*Onz!mj(2r+o8G%}5)VhY&g?gR zYxtS9zgb1!*d?zlcF&9ABk|?**s#~v%a1cx4&067d_y*GmS2%4-+*wI%Y{;BDMH~z zaH0jStKe!|za$?LA)fCt>|#1tJj%`7x}C;yu${~Nz=8Qm{#lbw$q~Ydo!O|Ly%Opo z2tt`g?B$ans@8X9Wm$r z5t09KCyk&Invy%@Er2yGb*A7DE~Q9{QODE1JqIsR;AZK(Jn3VSYMPvtv~T1MHcC5f zFwG^~x9%7(kN7WL`2tx<`&KTy)+@i>?5n@MfG2s6P3pl)KZ)DWI>m%tIsAok%R%K7 zK3|;V+NBVaK388XP>O(9@kDs#)l$T>Jya{~oe6+D2vk}loTLyBV0RpL!UVpe<8aup zJ2?Cl;2zx8R^UI#yE*(*z&mk^ox{~X%YShAe~^i)y8#;q#8#pey#1TgfWMU@Km+|( z3LG<{FuLJa(1?BO<^i|g(V;*-gDPL$uT%V^=nJbez|}Z>Nj*7>x<(qHG>(BBvmXF& z&F4L(mWOq)gi-!8z+W;czrGF=BH(`_SJfs1uEdf7=4Ushx&uQo6Fm7vR!tt!o=WlmXeor-fs7QHoK zOWX)J$a7bYU_lsfZ1Y<>TG&ANlp_W^Pq9g*3wn0<^#o%C&fDU@e_9Yw!YOuPB(Da$ zkbH-zcinD1oHtHz_#x^5r;an~NE2T*#+oSI1goKg5n?Qzpy$k?unx}A8)CA+24HNm zlrzzx78DjnUR*i@>1gfp)?0$h)vMdYzT39de!30KP|uSMD05U1;|r*0hR>B_L3+~{ zOE3L)K6f*=7LdKcf;F3-;~uysfc+}B8#Zp_K$n*fIZTQlg5J8PP6}E!VGkQOsNa~F zfhXM3S6b==`;f-{m@s5#T8>sg! zjZg!+VRnW3A!ofB(f_SMNZiy!x09foJUx#`clPx++GkdBX|>Qs4$c-VS?M?I7ws=v zTPdWksMECZT5)liz0g~;d1@8uanD>~)LYw4xHApsBT+E5T`-S={V(8~B7!TEO= zWKY4x2#1oHLNfuZSDw`n!%W-12D(vLE!&!zQEfk>bC*^<+q*Ygy-fP@S&)y5ByG`{ z13@c%NLjrdg#SX|!DCjGC6}Pv6lerNXXaD3Jc3SFpa|wvx0ru}fx`e5iNknw0j=ou zCnBwJNgTI5Icr#p4e?rrHK~puVFA zc}yJC&!<#d(GY*?xH5eI#c*hh!w2|FeE2xrFnsxtV@m&s{wl-Qkq;kAE5pZ28-@?& zo=IsMzKMq6dx;Mpm8anY+&Fw@b{IY%Ln}0_e*8NvhA+zq$M7lli&Sa05@o zgm298RbDOhmLh6B_bVDh9U~>uwmY^u=>P~V;Phu5Ar{7!i~g_jW3099tX$8uGMn~R z^U9{ptca}6pYFY2RQ2-6@k`vLh^I4BMsH3FPMZ*XC`a2`_+?p~%R&#WgHRWqI3{(P zcATO~c(ZLNlK1(A>x&_xnqXYTKE3(RSk+s^u~%OJNs@z~fn)^H2HKPke_$!4`(+dh zpdG;^c|oEsK#wO;r#yY>R zsp+n^=kqxI)9fdWYZS}6x5-PRr;TL;=M)&au^}03l<1AmVbRbh_1u6EcaR5-c4DM# zJ*P^Yh=Vz|&_NExw^@$!$w1BkzsJTkRvtl_SaIsP&ZM6EW*1H1kOZZEEOYer&oHfD zzqSW{1*UN0S{?gpCiuYji%bkkf7F9%sl)51JcWqFrGM}Qf^zHrR=*kQT0c=yr^3m08X zQi8wGHQo$45=5v4+8S;~gyv8#-W;m_If|e`bstM9Qh~l~i}4IrBrltz3WaaDo7rGc zqkhV<9xxL0fkus^KC6FlNJzgP2ZBSVy*Wh**LtoS;$O**48Lby^3_MT%5ZT3>&-yeG_n=dtA>O8tgeM{luQLt{%e2FsiiY4BjBg zH^D21zwgL-&6KB1;if!j0$2Khg*x>wNA;&ph&=!AVq}S99oXg2a6doaEp&#vSa=cP zdTj6E;$+#egODlu`S+SI(y2p-j;eGq=*5Lj+R~wWhoPOPCwJ`L(V<@iV&0Xb>;J4J#Rs*2 zvD_xKqp)n_M(v^(Qj5>gwh_&**$&)vf*{0-aB^bR5{Jp5AEr24?uw(!dXeD_D?`3C zgg=fp9eLmqE)Iu}uRF|T+Ks23eIaPmSIbiWvylbJ2g6o-PFf8YXImj%t3K%{^u!V8 z^i5BiRACF&1k?^hA2$QgUGRmt=Lh=y^uRS63GY!lq zKas9oeb6ObOjsYhKfyfNU<>ZojG61)uOt{>u+O``g$YWpdtnUvsVvTYF8s^3@zZ5u{ASiTS@ zVxe(cv)R8O^&E9y1ji6n7YKnqNZHgWLy2&V_SZiIDNCnK0;%WzV~}!_fz(X}DRbwC z@QJjL9G38JSSx$4x}bjKOe#Oxvg@uGE9bV&#`W~if&=z&$Q*&45}Hj+nyhtI7I^&o zHaa(AgLUQeHJZ9W`|3R3|+p!!$-3Z8}jha15)vWo%WN+a-Ix^F;#1;C}%!sBBcbN{TZ>bp+5v)iRs`go)OxbX)r9@A^Qo}A$(Zi z|EuP6EZnyy!=-oMBFI?y*-%<-UL?VusQZqF)&SM0AJjp=KtH7cew()T_>im3pYT(4 zaIl~>X(!$ye1b}gZ~>dNNIpdF0w&k6@Ci5GsXX}*0Vm`AGPO+(U=y}d9Kb{;Y&wkN zYy~=DduJ$RH&ZeslCza|FdSJqyYSU#pM51CWZvsDGK*`4Zt~4*Y=ji?){gBpWwBx5 zakVb&sjPm!J_wwWq6{I%O4TCOS5;e_nXz6z#imfbo!HR#<#*tYbcqWOi!H0!zT+(+ zhS$f;=GXlsehmH6PYI*w<*eKj3IDxmt~_06p%pUaDY?;vj!UYRTD{snHzuhdAn4qd zjq_hmS+aX}hrPD(^J8+SM<3i$y6Elp6IVs`2@8#J8|h=&zh_x;)P^9pWqy4}M~AtO z^|tNZvusK9)~V>ze8>&qU_sZYliXN1>w+Gft+Dw>Q~e8~#fpkH9t;Xa5wpkC*ftY_qicc;`4TmiWPo;0uZLS9}oRA0VD zdsb*{SJU4B?_gG-!xQyzfu{N)pi~cE&6)I%n;rC?nZ<(j-viqruFFyvFGPt4^?T3G z{5U<|9&yIm7G-Nhljd;gaO&&xH*Pr>7*x1)?h2bDmWfr#E8kwUe*3}b>A5lS^KHfO zlr2FUqLRyc_P6vI=^is}szc-|x2fA^F5Y6_+tz!md)Uk|Xdc=|N>kk>$ulTm%6w@3 zeD*=^_*S-NEUFh>&CYy#X;ys6$@y1T?c2AsVB5+SGd$hfdu zty^E+mV9nr<=XhgrH8zdCd^r%JMY_NMZe4e$-=dNQ6SlQM-Uf=4 z|3m3Jn|Jz$!%?SG`V8;mJ}1^a`b2VcWq-FG^QOk7y2s6R@BebY?67$WlKr#)NcP`; z=UtxR9qlopzmu=m)U@d_>%0bc+hH{%*n4E?O;`n+= zxlvJ{=#O&DKpP05+L{B^V>~RJZ0tLE*p*=R*tU(CKRf5`)=b)BZoYL5zO|Brg;!Av z?VDoNWvTdyd-Za(wV_IA6^4)tC74N9?*l~VB~G!jIbqXn_t|bQv+xGZ$l(ajn-Uf? ztLP1tTGg;gATIj~OLR>tYRjYnS{kum*Bq+$m~3GWygg-=;3fN3YyH|hqh6BDQeiTA=s$18Nm=#`~RBqF3Y2;<$Z#e)jxTZ*&gxBS_fe` zkGr<2`w2Hh#+i*Zn^4ai*)ZZ%F9dv-#^@18h6≷9WK0CApUWn4bB=meh;stxvU@ zINy8LhJb)gG2<7xIgOpM$=_+^=Ahs;{?&eiT#6@pm6lImvv$$>XVV3*_M4bg`6^Rw zoLVv8ahh}Nxs2=!alL(<`wxl^8xuRmt#F<+>e*XUCx*HEB#c=fA6*tK-saf%k^d6b zLXLSNC|aJ1x-B)Hmak6tb$Zp2Tj$I1sCdamLh$|u2zn%(+VCvY4s%B#K7y(g!*RSzEBxo5%JzGG*`4;T<1HMTdoakSIp zR*f0EGEUfEyUV%X_U!c8i`EBp?;fyzQLU^@Dk_RBBOi|51=kogRc6nu1n2tw5O%Fy zs|{+b4+gT5FOUdG+Yh=;DjGO$R4H)7CRlwqj4CWZ_4V-8gfb8a{MSQ`wP5?+F`%~8?F?7=1RfZ)Hu*ZLqID-CxowmC$4%_%Cu=IqfW)WV-73|w8jKR^yKr8#U8(_#9yJKW8f(jral{V@?D#n~5-j zvbY;jQ8(hWbEODSfVazW|45!47%qQ*mHQ=)pbSvKlDyrny{x%26sXyN};Yfh#E0>%9pbjM&N5K%?-V1V0lvxi*w~3iWUVYk}S-@IbC$l=_JimGZ>`bUTPK9$ZA` zAA(UGvF(J#tHMXlj+7o59y!h$n(^#4CP#i$Fx;E*{)YY{#_zUfZTLNpukCQ5*&K-BVr^S zLfEn;D6M%Tq$DX0F7ECy;^1n;W;D0l;POI(aqLpli3VUz@4VPYLCmRxgY)42{eu_! z_jy-->#6*G7yBVY`}Yrt_wWBM8+A!`gLf%GyKBgR7W=yd1+Y!pgTlK)kC8)K?(aT5 zL{1faYu_aqLBcQ+FIaPy&bBrXEoAIA;aa{TEgWd+Jl*r9Zd~Aoae@2oifr8@GShFc zyBxnm)wF681l;}ke0fZtPR8j zk51jVw#P)#$;sijE9_y9s0{F}>hTU$o7UjlXFH6lU!~IV?JzX(F$FeD&U|zNZM4w?$=2BQ!oTcp z)hqJ&FIcL>$Zjn03)w}m(|*O$h3~atE^de|?F#%2T55x8usf$BZ=^{o95{Ha#re)t zv=#a~*}V1T4@Y^-HPn10do~0o6T0gd>S#{WpAi{sI5_k&DlRBA+%WNL5 z!zNAjUY<2)O~S-sE(1Gu9ni_8Z@2uUWJfRc*6s&CIoDsF9YQb95?xOx(DS>tfX znkk$lR)H6ptuiTZgs;=X;g-k98Oj&3AzaonMYIHwv>Cz0AU2%KKlG_z>Hp`A(=jpjFR?enVG}3OCbU8WY&87RtE)7H? zRSbMJYQ+HximV5r5YZ~RXdP=*AJJVhAE8zBO-1iTx=n^oQQ3EAcEf}o*M+Xno{EyZ zvL#hq33k_Ap9;@6`Lw_Sq&sn`1jQNjbZp{sHv}aYvQhtVv77iZ?k%i(kz3ucc_HDf z5XY2DgAzYOF4ID(cWpm$W$jR@ePyMv6F)_jl}hJTWAO>PMuE=Mebv~S+DGU=PQ&!Y zy+I$LR30kU;xD*Wz}3jsZ4C>Ks_hO(elO(qW7Z6rf+;^&ck4V>*>CFPjvc+H43N_$ zZ|$M5(y?uO_=JsMW92u-#82Gjy0+I-Q%_Dr^y>#+pSXbU6;Jp;zIf&g8~?%T57_uK zXXJ|?tX{qQK~~m--K#4eW@SChKQrbS8+Pp)b2@rde)rln`Npw**$;M>-_OpzU%vA} zw)~Yk`0v9Qez1o+6n*c&)1hNA3`0Pd0~yYKT+2nxGccp7{A}PWf9}oO^XDr;XTxtE z%767{n6}{TSzL0Tb?{kbP*CNwgKUMo_QH>I=KOepB|jYq%3|YKbqoyChbY_K2~oe; z4B>~evFO%7Zo69yZwGb6GZ0u?wX?OzWbrAl1kXJA$A%4moQw|Ho5JF?KkYl4{OS7Q zPm|B=ubk9l@56Ol>(0f-o~zrs?&01ZlPX#N{;c`Cd+%pt+~0d${@(!k7K9iM8(evR z*Q}S+*6b!Uq5kH9e}dQ0)uB>be(k*4JVUuU6raAO)WhJ|Mm@$G2?PZ$1aY|T>QFPY zW*v3#cn&w-RG`+ur*OFL>QIz#t%FB$xbX&qnHn$!@S%m0+I(g7b){dLnRK&(0nTqW zsM~-dy(piU)D~WG+*448)~USQPHoz+Ud?|HNAdb}{lk0H{?Yr1{JpN9a=T`x{d6!# zKg~4Fe~_~D_1Eh!zL(1Q_qu-Ld((c?`<48?uK##%+JAaqz~AfWAh$z*jdZ|!@oWBG zM-TqqNDt$C<;I6JxgBvJ=4+gfCcFh$qQsfFnsp!ezBlaQVeN477R23nV;Zq!FH!*3 zJW;Cz&#HrvhKok83$~HFqL3@5;M26o@<_o}zFu_Tko?IT#g9(T6l!j{mKk2>y z?wYNCmzMs$bMZHA>V%AcV>kBs2&dL&aJx*w2*x52;k$dTK+oZ|YplG1Z`-6LK{<{0AyFBYkoMHpcgoitUh4>8#&c+!tyCAHd=HWG`c6N{Xl!j-l_AT zUJip_8fLHh=-KrAb6JCY7mmzM={zA|&Z=N(|INaLPwLF`OGI49@fy@HtsQP74JPB2c(EWUJYA)ciNHPlT@JkwA zQ~L%yA8z+pHqzo}-j&_hw_nPM+Kuue<+v>FDQYfU)6Nq%Yn^KU7B*_4Os*{1Fo19NP$}A!nq$>U)2lcS`xMKpVF+IMRm=G?CxFdC&I)Z<>p@? z`%8+jMmtbzCcL5zQ|O9#Kf)QpM3Vy1R*$m8J?EzV*V+_1xnm+o5!m-Vt4jzv1Jmykk$>V?nFv(}}_x3>zL z_B7M{P$hS}P|0wRJNmIu@k*im?DXL$x%=67H8t0@Q3}GfH?>iUkg4d1csrVVXd0uT z!@!BHMZ4(U?jeI)`iWa=v>#7sF(|ZPPUVQ5W#UB`1^sG2&wVp<42ah7UgK(9ZDdJj z8CziO1>%ksz+Gqv7L-KEC&41!sUg0yhSgv}}Q1|^mn+9#(l|S3P zoAr4&=LYMI5&8T^TH?;n7cc&NXX1`87BBuHxh8%G>vH7^>#}2qeCNs)`Oc1G@$YZi z^nSeZuUw?g`8yN+r~d9ozx4ebBP~ULMG^{4U(N+BhqUV0+1fv%RhfD1QY$ ziv3=n65xTRh5mG!nuq~q zL1Juy3k}KnZ>WqbbzN;LkVsmy{dQ7KQ0dInFJr>KlWS|_{?cJ_9aZbcjxSg}&d=-I zNFR3TX_d;Shu(Nq|B0$wkQyfHDnHdVf^A^7cVi*>fZiKeU_Zcns81&_{&52;5p$n@R&rgOd7hf zn^u3yrQKN5SwaB*ZCxiEm7`*6q_BTl*^R5H5K2i-o_75A^__s=8D;%TXDG}NkgL0t z1X&hnA*P(N$J&%RN{Kb{_2d%Vf}O+)<@h7G!~MIXOs+j}n=SlQ)z53?v?*C`2bbK) z&HW$+$v$=wuhvyc?-Q>!Pv^4XVRnma2*8_>_&P|K3ZXeXBrI9LcclBtWYUd{evM07 zo#&T_P1Ft>M_4O5;&K#>jMxU0z-{&YE-t=S5zj{`Z&L2(nVFyGPRgSZvcw2!7YJ2W zdQwctI<>@SDMm>C`_e`EDMl!1a$)P9qTLvmegk%uTb*N5e?GxFzCG`l{O&ke4q%#l`YZR`&}`^qgO{ z!b5WM@f_E7|9;9Wkd0xcRU%da96Jo8K5J=XGKh`hJD24DmPiY0Ek{Jw6s{#iR{G`Y z5V@Irk@Y5Qt&TA#YJJngynLpvxyUs6`qoMQPRMuPo(HBRt86(83bl$13gzy@g}??b zTwIpMw*`)Nlcv+8!zYel<``g%T#fQfB~+M??!)j`C{ zY`enB0z9Y?Y&zUEUOfPRb)krNx+k6K#R;>Ots~S9qJ!Htdi+#;Tf1+NFkPTGMJ!Ku%kn|N=a z1HCu&*NDG?9!XxXb=MH0Q^{7el-3s?8)HG1}`S0p+ z7j3TSTKjpXVQdUgeQVhCpGgx_IehT`&=H+Tqf)vmXj5gOtZ_1|)UnH{KiQ`wNYllg zQYu87^2OIyt+={q{u?V+f18)|US3dsgryDZ8n)W2C~aLpK+?Fvl(gar**MzuYueek z*(a7RD)DaSqzQbrXyv%}dW-sP&wbOvjLh12ts_AFBdY zz1>BeZLz@Swy)_vzR!tjD=E=rE5AriWQXKX)I=)Fl~fsRpYSj~>ZdiuQtS0168SRM!cXu4#uk(=py{&B`)*PR^_s3te2DtjjR$lG-w=h4MF4Hkgk9AT*|5bkIsr5<6zh`hUwHrU8B zAPgq5Qyj7lFfLn>lwY!J(HfU37XQU{j_>8H%-7OKSGlZ9Ua&GHWrYFXcpsmrxtCX; zlm9F-pgXzbW=>M|?0WdpSptdh6>yf|XMSy_fw1~C$%G4Si7U)4D*66+D8Hr``I3qG zTMI(_jH>xIC)zmA^3T{bCeb%<$BKP3dZyfr+K~rBE{4RHe^{RVSZ{8{Q~LO%fkmiiMu zP2h!7=G@Dz!z^(Bm-DJz)-Fsd&QDrVrbDZ2MM}!b1kH{UowmlylRNGQpHVFTJ6 zU{;T(zxReum-=p)lQV3FZ&$Hr=!~fj^6O$dd0j;MX#dh^ z$Mr?>cb9thwTL{CHhcg1mHQ_|dUPBqsa3-6HL~5Z?de`q7Yt2VaRmsF{-9HSvF~_m=%XHIBLy0JYor6!OiWC4(|=o!8r*83 zw`!sWAN$6>smB@IHsRl>f1YFb#sF2?fxR+^Rv&4Y$}?3Dpr?3}LvH8OA`qg1H6Xtj z5Xq(6B4k~#ux{Iqk|GGp%kO1m+$&f9tvGzR0zY4gX6>t)*B#e8%&WRuK7&8xGu69s zCAyTyB-TN`C)ei5wemf|dh=$I6H152BL~QUFdPGDEI*(aTl=;lKVaS4_^^*A3Pgjc z43B`f{FA~|R3RrO!Lgm#9$m*VgeSsy3VTvYBhfGxUm#;PFHNkbl+GAT(1Zri%x+Z2 zJN2L%^Q=1T-~oQ10X(}73p!nSXU@hPL-Q3T<-3}|b9CizQ9Yc66kJx`qFO!$w37*R z6~A{lh_^{Bqm<3>r!Q;lJ1$14Tf#Bf@U+vj>as;y!gmyMb*z2293dMQ^7rJQP%l8}0Aas+F{(S*ziR zGip73y<4}Gwrg7BfgO&&8pFdwfxuFz`$tgsjz)FgleWPet#$U2vo-m4bw?h#8OW_B zuJ_%ohxT9all<4Dv<;hGd=HdPPd*(fFAzKB-~D~p<{zFMJAZ8NbW;3p^z38t@!8pH zLkC(|TaH{hGhxMOwaQ{^akExwIrElR6s}0mBi)Z{2kV|lM>$Vh(mq*3zP2YL8`wzw zGMDS}U)s3U7F1&sW(Cqvrf$)U&KQbUw{C@jL&}~E3FXwEg$0-1(^Eewpvx(h{2;Cc@v%i-xOp-{{NOkf3JYZU#Bup)u`WMI!o`#&uP>bQ z+Vb>^v`fXCdNX)rtnyJXgFPqtId#+ ziL-EoDh#Pjc?wNpC%&E$B=9mhOJP^SeG8RBwxo%7*Fwm1TNRo8#)9qgovR&QlYiS7 zuzlg636aC1E);gydSkZpTVfGV-{CXvFts%c&LJiNaSF=R>i6-eufue&{+i}W~d;Ym(L6sRgO@9Lc@*xP9 zkNufl9kMUEI^~tn>g+#{S65pVvNo3mYwg3$S7*+=x|#mEjNZzL8uKTAahd&1-3&4` z#OO(Y&z@)siW(A(@K^>%QwIvxlv2vM_{j-Q*IiP5(-Yzs9*bG9ba_DP= zCij~BBu$0g)RTvo(zJ|u84F}&LU5$WKkRHe>YX?0=kEreU3_7d z<(n6@?)NR_wWZ||N0+4@4v#sxbirY|po>BRx`GC6%{t(2O^P*wt%jr#I?*=-8&SY5 z*%aN&*|pQ3CC7|U@tL-72@V<04vmfu#qX}O6PL`E??@4QS4SsWHTPIHB{bjnl|c8< zsIp*BcR!FDO^Ly1N(FK&@kha088?^guSN1B(t7k>wGc=lj)XzG4dJDPLx*@V)>i(F z7X?Q&mxHM)aLSJ)Nw9!25Y>$+rB38K>n&fDF7ZA=9vhER>K)c&XwQ!3_QHpUs%qa~ zDp<<7?b|xb7uh|#pr!kku+z1kh*26(0JtT7J1J|?f$>=9;Q9)5x8(kO zGOWe*b`gk*=>oEV4;9uVZzs)Crm;~SS%>5+>wRY(S>))wbi#~1bLDu{U2so(|6O*g ze0=IFvl7mxS#7WkO!f@P@kYRj(pIwEUx$7+ABSI+aS(m+TZzlrz|n?#gvUK#=qV`8 zf@0FktJy#{>LYmzTlkUuoqT-mu3hzeAi8R=pcX51{wLJJL&YfQgr;u;39MN0)Ou79 zZV!Xy$*G&`YOR-6UK(v$ zy^amO;5}3@A1(ZW9&qg?*`Qes9)Ks|D%s$AkW+-1c$Rb+(H8@R6$E^*N2pmR(+$@OEbA+X)LVFVlXbS^5O|C7@X*iw}Tu zH-$!qlvfToT4zTJ4F6jy(j;u}GTOGag{RHL{xAOnFiKTL3!zR$XKA5Ss)H~@hsIO5q7k)wSxcX7k`6(qj5skxxptEc62N^y* z1dzP0q@P7C8#8CYf;lh)#-F}*>-4jj>VCrPzSa8!Ci{i#Kf0-`iXROAv9eNzZxv^+ z1>FDp6ZgRwL)e^o2pi4UN(fso3Sr}6^lD2qFANAQO%1g@+;XT_#0X`X=p5!TwB-@o z(3Ii;F8|lH&jeNbenCmzLk3KC?gF<6mW-X9Jo^ptTH@DNuLIbVJ=8;FUJ8W|$PmCr zGZys+T2fiT>e_C&NnC!YH`~H?DGJ9lqxzx}cX#Cg^w)@cM5YjldxZ4O*2@qN0t^Bd zeFzeBmmJv*J?M^EC1wpKwj+;N6F;Pcf|*6kTRun$1=$7dfcM#mYvLP*go1S5o3wyj z!;kyDq|YQ{vg)@pI0z6at(cVTeg7FN-A5<-`!62d(Phn|`6uV4e3-|aU_r_?6@HuN zhp(70VMTc6yD3Yr=M;aJt*S6spYk2`6OK1cDZO;fV1R}z`<8m8uUa~6b?&Tv(eeXk zv;FtAeZw}&BWM~x*sxtieDpe>3by3ajU(2rjZ@9lPYmb>xylA&Ty%V)ABzT#ISQb& zRRZYro`itnlu+9vEr)u9IV;>TBEoBE%fmKPQcLB@U<>_t^z~cfHKd=XQ)eYte;4P; z1BQ4f1@!}4P)-1}ueLz@V`0S1rs~13n+G0iL4er*N$e*ai`fS9#}0!R8)|apr~dXFwlio zBORG45S`Z$chXW1d)AIdfxr}K5`CvjOicmu@HMgQQ)kL&E)dNA?f!c9$EE$z3EHS) z_OJKXXUM72a!foLigS}`rbbwYl#hQ1he;~(RWdmFOWM3Tb?HSNn zejVK_5Gvzm?eeKD0%I+$gBw_V7&M~T5KyKUxv3rb#y3|w_401okrGly-BuSZB7~%l z-u+8n^EaDCZ=EuGcSJy*e|7ac`9BqJxU*6^*Xnd`REhu0+~J!>m#hlOdi13x?@8LF zxYsj+7dTBw_4AxQ60W$KjhPqX;;I%eNO4)M5)Ricvihy6z^IwQY%+UB%;`3yK^G8mXRMX~Dn)T~UKWNhmV5S)^95+`b#3xCDs zO_-23M?O&_w5t3fKK_fN+P1>9N{>u`{|qlW%)9J*TH3oQYY^8hnhgW;|G{Vx8}tI| zn(ofZ56)>D;)O}#N#}9(h!eav(m>bRNlv2s#M*b9q(ra&vlGXqT~0{2oHl-ObRX}@ zHxjC3$~tdTwfMUDW=Vj~^byWJXnrKv2KT<= zFXb*hLWK2#5yBVr-JSJ%)(#VzrN-V_>g-tSKa@Sv{#RnnV%JaJ`HFns@rRQ?E{gf` zq>A0U<*2n8DlI;8+W0U<9VoVC8JdoS7=^={dHyMvBr7X#M># zVQU-hqj(`-j6ZNdm_u>)wSQRF;{5Vg2zr0Bxb_IJ?~kq|>uQB_h}Q{HSd!A^|ItN| zc1$zx$xY4BCpS+rr`A~AvzOa4$4m0uEAsi4^7*&qc<~#ULX{O2;^b$3(h2&(S@Y6o z3sJMwKr>**d2!u9GVeqWMZQf(kukKxQX|}V| zk)F<}Z0EE0BuDzu{`m2qqn&GB9<*|K@}_r_K32I5aP#aKdw8zjw#5_D2MrrAeMmsG z_UT@E;9jju^}z|*y*oP292pcfGd{aA0DT{bzCYFVU4KrWMjdQ~DacNL`4T^Gg8Q*m zPoEA`-FP}|%eO*Adsjb)mzp;dYVK%%u=kGguv0Zti6=paw@OR3I9*ihVZXFAD0Jtl zfnq7>;y)_m)yzfb3wzfdg+fcKb5N!7xfwHrGB@jW3vZV~x^=<+kbhechYq#~Z>RQs zTfTMR0PB4*`Mo{yR#$8%&t2uW^3%+$FV|#Viho&l6GoTIzwoK}t#fP&KYZYxQ#Ic+ zF|Z$t~7>D|!tu=+onP^aT4bg!eih~@|E$yW! zVeBjX-|*)Tba>@`o|*M|u9x9Jhq~~|IgR_1bCSmW?-;$>(;w`}m;b(4Bmcd+kDqd| zLr0)EP9WF@|4bkSBjK&oK`(%ZLYfys;Ad4Vc-M*)GgA&3t2LNz^w)DZEBofUQBurv z;|$S7P&FUZYM}k}OjqIL$(C(HlE(MUptQGnVY| z8;&4xw>Os>oR4voL+1yk>h1Isz zq1hgRX=#DQPj~re+}s?{+QDvH&wf_(ugHH^_MZ^@dL)@W~>5S&__Sr9fZ(X+a` zrA2V6$D9og=Iv(W1w?EOJMB7aP(r%O>}ifFQ+q)>QTRL~-ecu-ZHyFgZgAM>#E?N@ zqo-7@)%F&?4vp@+9;j4fU_QlipcT0{>2#vCf(izi3}|v$&>*Poss-26CoTkp$;&UW zlnZRt^ni;ePD>FdtKQb`kgwc-G!QuMKhjNf%^IOa^a(v!h1*5OeH% z2;O)qm1-}alpk-}%vu~d$y#o{F!xMK!r29L-`yZjRrga1JH+9&*Tvyj5bPh!j<7zj zU0~K_fV$WWB#hHbC=%X%Fz~1 z2A!t1H!(Qt?}bsWAcMUo1g2^q2yJ(~JIi&^c=v+vIk%6?J6Xih4U-F^uqWW@pEG%N ziF}eP#dE5U;;R-!WlU<b;_ts@0sq&knuR=m#6nM>mJdC8Bp<5YkGSs(HLO+9 zdijr<3kaIW3ONcc;dK~1o%*k;_4o1M%>mmPnuv16K?_Gr%JdJ(a6O&&QEu*gNh3>F$>)S)YlIHd^V8jY6UVq^1h0B$ z(W3K70b4H@%KKW9VN5OH#0lKSV~E*X#F_nGSAv#xIR*4PyrxTV9vE+QT-DpO)A`@i zOYh{WE~!HbLLy3n1GX(%wroo7r17Z%lXFAFGt#OrRu){FpPLxD+GEvStC*9?9&?<> zC;J4XyYF*J^qI79jB~74(#e>F)AKTq_h{KBdUHhDrA3hURN#6FRBKLKEtbezgNx45 zb%y&e94%6X!X-X(2Qd*@lTNWg+ofi|*2*m_ja7lz)QU8-u#kSgF$7TZyePWMfd#w1@YED(+ zgt$?`8zZ8&OzGdlMz~Wc`#21lwEFb?_1~@l*0j=`jaEgvNdfpGsnQFyja#s83+-N@ zeI&@JeA@Bfkidyo97ep0O>sxg+ltSE`@wAWUKIUQ00Y+OPF3QQguL)`Tf@B9U(d>T zE3@6X4uwk-_f2itJ7KL)_N}btceYF~4h|@q?wT}pNMNy*YXxg}^C}zi@q(C>Ni%cu zv{hrL4a)qcxZwNMmD9_@XKb446rM7EY8eIjkqv+>A|wdN;YcPUdj~l$B=JpcysB%< z$`en&0pSSyWn2sfnYk$V(>(9&RL`5Iu~vr7h`iz&vNVmYf<6|OmEBu0c~#JY?Btud zD{rQ(m>00hcjdj!vx@zKOT!{cgMCY5{L(yJ(*pw2UEP*jAsQ%R_xv`^I-N^SteOK# zC!g!otj+x05zEfU?;D@w7nJ4Yl@;WdG+wyqlIZQdaLky6-rk8W7{pFtS|U+waDDc<=<|;CExsbfpBqnc*=y(P2tnZ z!a#lA`a(&tNVzx#PqT?HMTrk9_*qi)g=0Xg{2c<^Dkn zx<*7v;DI^ejnmYNI)3W8^phIqfSI*l#UW{K%?-@QMtSar`T;zY)zIm5=LP4~|aE@Nj}Ru*W# z?`pB|-HhzF7n>*=4JGv>MWd)0GE-88fY-Gb#HI3zYOZ8z+v;=-tF5K54;u0T=o`P4 zj}WERA+1UckqsuorSPW^y!O+yuv8ZCp4_^fmCu~f`PUNCKUovH(%XAwXlSmtcdiwr zXWlO-*zejteck#jey?Ed=gap77R`uQ=kHe-7G4}k4MUO!U_^PtOgS~kj~AP8n!!V^ zXm25iH!L4}k3HfIufCp;{%L;5DsS&qA=L0H)m^1!xsC9d(sIFPyyfsBf4_Azl$MLo zlfm^c0WXx_xXm^VWo)tfihv6dGuvX(_f@*fv3$bYOu zC%;1{)3`2eK{F!hev0LAvj0xpDxXj$(25G<6p9J%0zOFuc52laJKDJ23~Y<}vr7kh zUWWIlQKecnj?wc;yieiJ678J@TD@)JALGlG_(i>SP7Fg-Mq*9k z#HvB(7jt5GyHe+2DC3!kJ()kxm$$M~vk$Omd9CioQyC?S>mK7iFNPAgf1@w>v(y*c z*?2~|+jYP3@S9?@NRvX#l>S`g+-ZlJX)+IBkY+&TZ*t^NIYo$%eS*+8D2sejF z7MuLToQ;tKL{D~_)zz^c1|sbory~-RIgxh#NzZk49w1@?CnBwe67bxeKksNu&wLBr` z^P*XQX^|9-Zys0>PBnO5$e$m6NY59|-0(gH?|=Ti{_`*V`4)Rg3KN&${jZPc{VlVB zQWzGBYSn-FbG2EFv1qs+-xI{*0^Z)zO+cCLZ9jI z)L!1l_oam#^04kh4!LOdAZNf5A_GQ*4sx72 zLOA+`_Fexhuj%_1ChVE&IBlf-X_&WrX5h3}awZ>fozl6d*MF=QytX8|C~d-Cs}a7# zJi-PB3?C7@(6#oGuz%*(&C_OW37t|h&2?#Xzpq@A+`JZz^~%4Ly0KS{D_|`#HLW?m~iQt3Zs8cRuN2Fw3otBQ4rggs2 zs;cE2XFpzzTt;i091-~rl#`_r1j7eF*rK4lXyT{^UMi0Eic3Iyysv^bC@Ab~k-s^#FVJ?2CA2e6gvXvB zw0nEbo!Wxk^aVwUvNV-|dIvc5?CI;k!;+hN%$d^vHc z_c5Qvqn#Fo9?M!OMDAs?nRoh5`LKMdG*|vj{_x}%>@vhI3UyfM>KNXR{4mI+rR22H zL`_@hI6A@in0L~I+zk2t88(Eq&Msx%EO=+Sd`jN7=NozKC#d5N>PSExSZP`!*%$f5 z@35jf0Bc;g{{1T*meUuZ01duU7j*@3!t0$-DlB}qWlL>4-3QDp9d$O>)#-%rS!@ZV z-$AYN{jKVDTc6OSxB4Q+I&}rvY55(I3M+N$J`l}{QRlCy^Z#h16#xHbq!7km7!i13 zBjt<#zl;=6Elp0+j8m1Hd6`YaNc8BbxGN|PHLXn*A@8N!L9CDGu|*n4*j0e>Bg=ZOzhQY2nSAbCaQ&MmDdJX`SR9sK#u|3llGz*Sjo z|HEfL&vVWJR16V8Qv?xkM3fnsWRiKFK~zLU98(a)AxE5W!g)3awDg*pO*W^QnOUKk zWw+E@U9;?Vy;+K!hxfbo^Bg$fvF`o+{%@ZIp0(FrYwfkCwbx#IZ+uVZ-@Ewt9DG~y z?>+o`9ln3z-_Nw9f62f1@$c&9Onn&cao212V`#k&-!azK(j7i`sqxItxR{CgIqMmI zi})>k@cWkEn%?U!6?AV3{cX!{Uv-!A5D?tx!FFukEW^u%O~XA~w`z2nDopcPu;1z6 zDOuY?6vq?I>$DOkPmQ(E-c!Te{p>~j)2hFPn{%)3FDTf5b?%%m4~!ah;LABxr)SPQ zT~+yhMaBCFtT|U*eQwr;hg;P@x{f*W-O{DsJvVyvbNK!JkNjRx*kcrp4as|4i+=Kyc&0&Hy zXK-D2{{h_m5ZYl@_4!WT~&BScFVgrALhu))1i z_}Q4p!X%G{=04AZwghUF0xKQ6F({XYbrBbcb!4NEJCdd^W2}Y{mQ$CNRhN^yJ}YZI z>tg&X7f_90l2M6OZ<7`Esn~H?%d{brn>1}&68>>=IqS2t*XMxT;;G35Po4^ayjioD zEF3fzN=!C*eE)Rfl&Jk8d}(BV0dfz_X++zMtDKGAZ8=|MPs^!l75oDq3vhI1yX->QMet* zR6cMB-H_O3XrXkWG5g4IOKH&f^wV{ z3#E?0d4nKmoO{I`>@08L8e0qidv~tP5qAiFh7Zl802_r&R=_2tXavUv%mjxR4dN1Gy6(A_e8zPA zrgXy7A{wbJZ!T`}lInBBybbFsJj<0wSQFe`%i$n-g_CBQHR47#RxJ){zC& zh~YiZM4A}O#rl%d{E{hGVl054rC#;jT!)6y9k1Gn7RQb63sPPDZm2 zRDr9D11|hJT%xZ9oST3|UrRfcdW8Yq)RGQ3r7QW^F=+tyqbMKwb~N0#vsd#mJEoNc zcMktbNs&a#@z@k9!FknFL?s|7j1-kNsC+wa)jjg{Z&tfarv8(K=+)wTYycFO)uizmp4hK|H*h+cUMYdj^)`A8` zASdMl%?;}bD)J9(Ggk$&OHmyDPY(Yl!9n&Iydi~B5uGc9Z`zLe*|{^jW8!hcFf58M z3KsL(*JtD-9eY~IN62`-1N2=AIzu!g{7LvJ?P^L9KGEw9L&f8Qh1~Z&mTA?qBg;G^ z_r``+H zn$z(6EJuUj4Co*TyWOLLmCg33e-TV=4wE( zxS~-*m!BmqCmud5eAbb$Pa;Hj#Ir|L=Cinmn)e3bxsEU1hT@^Hl1tHHl!(+;zA zYz|bX_=LKL#s~+*^koS#RjJQtXFw~&zsgp8WFgt>{np>BZ*jJ+~ub66J|MpWvIBG)F#pdH!vOiWiqinc+kEL=K<*kLaJw%0`4wK18tZ&Lg{}MDaSdB>Ab6S3KSwwr{cObL3oliK+5uz2tBRRrB zL%y6V%-^yF*=K-;F#`q+=hiILCXLyeen-|r(!htGBV)*xE$jni7Bp$N3wEYc{2a8$z2Os>TM14Tj&sK=;;3uz0Vr%6YDXiSD!|o3LtULu5 zTdqx#SNL~mX0O@ZU2!4s)8+T<*lY4gyL-z&4ZN`8t{pB&k~GCEPx=)@ys{!fv;|*T z|H*2|&)v%&2Bm6^Dp40?Uc1-`&sAO8o1ee;Qq}BBd-L-4UYcEfuCnr6_3RHTD?hxq zc+pbXi#^dN$uA()_4J^7UH~LGyy=TrB2MP)fd@)D; zE!?U)Q=YqZY}}ZURk1OZBS*)N-JV-<#!%HR8z*(lBqJwarc`h*lgkjpAC*Rl&_^(c zx?lU>luDacai(&@vy-ilKT(kxJ8R^~S+SW*t&dyJJU*fFOog2FUzG}&x!b8!l`*kZ zRI07Hr-XQrfNt|<90L}vw} zena`py2UYpfzgHq^6LLiWGy=WDQPJ>unzC4(*mv=#1_41p~0g9G3?N9n$pDdpNsY% z7<*g3IGRl#+ca&gY+wh)Q-Xs$n=Lck5*(C&^u8)PlaYbGmL|R|P1j!Go`+#{ln9Gl zeoTrlQ0WYjyBoJ&5vND99WKhoKPZeMiM$jkc#v9;F{L?P!NUXk4GT()7g)}u{Bb#h z1EOH|x%CT5O`m*r^q!xJ_U$jay?c!Ka(3!~etr8mx>{IuN(l@sOiS(GcYu9QC%sjt z^sv};Ns+TvW(=^>;?oqXXsIW4NVLT7}H- z>Zwzgu4R8Q`rtk*HgqMMS|-;qOL^U7Hia|VnRbPWzz)j!^AZx~8P%@vlISr7V^z98 zQkA4k<#Ja(f+66en_f>{j}@5KZNXUI9f1i6fxCAX)i4$^VZ@`gOl&g}LJC3>GUkmf z7%|=-)m@0{eo1>-<=^iS?k%eob?j0?f8mPOwPnvDD{JP@F0Hg0+aY{-TtZUPuyE_q zohCmjgtWY7`SPNCFW-WK{IMS1nd2~#EfRx87wtDn7({!g$|Ihx%cHgx6l@z+ur)t_ zYk^nBuwfZq-kHOOWs1Rh+X_Z)&(GgJs$g5*W&&q;d1ZJ51w0~zWnzC=E-sp3yiIfO z3+8s1UP)Cae*@EsC1BXdRSgxwh_C_m@wgudGlT`#i_gWNj~0_)6*vT_~4++l=l`c zK0SHz>BS3APhOk6I5~Mq3jM!089Z`?Tdb4b9>X_9{-G_I8qP(}B*Tz(>+9nq+!7s{ zZi(}J`?~n}xVU*^Rv#nW628@0AS537$m!4H`-~yTeRR!xw&{9Qq&~HFPu9qhqogZx z7tvusNL=01hJUl)5_9QYq^aZ%OEc?7#lp68; z4Ki2D4N@0a4Ix-f(kRw)8V(1r-CdP!Qa?ZL!9$;Fy^Xq?y;g~a)2o-?*y2@Sv=U@; zQ#Q=(1+N+V#&vt9`^2fEW(O2(88i*;c=!bf`z zpAas320IoVv+kW2)jMdgeA6i-#4*HEZirhoc0m55)MURnZ`%WIC9{HppGuIl0W*@< z9Nf31|JVsxu|DzMcJ=NPW(0;#i_z)(CglvHo33lSbWaQKA06ENfNjt85&cGn^+ffS zYQEFkN}UiY8KOx;ff3I|gEQY;Gdd{wbd7>wU$|wv&lg@)GDd)i6y;c`)|dZA>(_HC zG`+Lt_@^fg>NTXRja#qKmu63YdGh3!rzb6)GSFeLjg6=MfGJCoo_cM{l-H_QNLg6a zEbrb!?X10BJZ8s)m4#@NSV(DTLe-FdLps_Gb@Z+r8Cn`5bz}pg20ZEA-F{HdkZ~aP z)T>hl#|*IdvA6T@)<1Uelvkgc{>o%|3i5{za}4Tb8}2-KL~vTd2 z6MWhGpBk<#n|IcAnS5a(i~C*r8haQvhJXAf+R!T^BDP8{Y8|CD_{*MN60zr@l8@WR zP{L)8CP~v%4Ew4++nt-c`?IQ9pX|xY+w;jRTv=51!lblylY%F{I4nh)p78K!-tNyT zD?izjo4e14a+q=*qOS-iujjdf_ ziwI|*Csul}A@aFSHEfgo)JhC_x3?JnW;nWWqmUt_Z!vT;IDk~NSSh@VMWqwp@#hXa z>M(8Q4j4f#bBB))dX6%6_)Sf20FR+gB%rNwKza`3`4RL zAJW`i$l$S224*tCXyQ^PG}=cH3nzB9diIFE%Y@>TrK!nFS7qkrW~Sxmi`8+1riz2) z4+f=0)J~mPJ95c{)U=Y~jI~h z3uwAL)gxv}Nm@qngw)gtN-;zP9nr>Io0%2Mb$VV0Md9cBY2?FMU-stoec9Q&%0_R` zH*6HTr{-t)rw%I12}-fq+Hj1J))8`-5mQw<)Sc7;v%7| z?psW+bfg>)Pnb+5Hnw1Qu%>WuT-Sb`oVpGfyiXni^i`#0opp01UEf}YNy4Q0bTM3p zuukm2#r04GcK1`R`66qiW!l8h#`kJ<2mIXnNGheCb!Y4sGdhR8BxiBo_2j z{lmghpvmGQ)?fDz^nEjSN5zpWH-CROcYl9qW0GN$@TO;_}(J zu{%h%u3`Y&nz+s@jz-Q-?P2TVo#pPwDQWK*X2oUqUD(G-_@;%L$jhTgc@zU1;U-?L zUJTENVvU4HJ6UH(W^q=2XMp_O5$1SjCX0}UesV`X$K;FRpr*^LQ?)!>Q3`Vc5%Ow6 z4^V;bSkq;3kdh>=fOpA_D&Grl8&dsuvsV_u>TxY}8>9B@Y$|qju(p3X-aETUfX@Rr$GaQo4VJr)Q>L+GJFJg|J&30P1|Cf?`zTk&jVm&bU6= z)}#ejJZsgZL>Z&f3(LCdkEjC`h$L4T2dTKh^558)mXV73%x=-r$OC7|9_=a2gokK( zRBu;Spo}G>%WSQlm7i4y5;VB+n9_a=nsMeJcC_${i?;Vx8wE^D)W#< zx6;r~y*hOo)H$J;s_&`bU`g7n)yDYCdr@0y7)>vtdC&nCnwq(-g`rKojDwFA#(`B` zNb6Yb(+BK#bsQW%AS`~E|NKeC2bay+@Z^?`{rvm-CVB=gDIK4_UaTCI>D;@YtxHdb z-d5dgb0YG`r^oen=xy!Xt6LwdZZ>HnLJDZv<0Q^v7j$#f(TT!5$k4-xr`Ngh8!r|f z@o?(Y%fjhNKj+c2nXf!iH^=ZwZ+phH!W;`L#FJDaS(x+1w>%Mk*dvdzET6gK_WSjQ#wd187yV+SeTH1B% zSV~M9{g&lstO#su^jn#gzG8TzVNl&+08!tB(4Tx&M!}J^>{MM*~M)olD@Lo>~j}pU_s1 zCtSL?bYFi`e%C&F!pMH%!trXMqws{IT_0g8=`JR5b^vsjJ~UxKC$N};qMQc{IE=9Y z=7YJ=4jU_WQ`kItvk-tyzbbLjXtq#WQzoy}(u={P5I~Tkdlebr+(ip+*?%!LG}sp} zesJ!@i4V@cIJ%@{^ympCV%4=XXRm$z{+VlQYM0imS-W(}YK)zxv}i}?G1T;ACbp*- zJc7-NGDCJiFQK423N{YP{SQWH)lhcP`}nc{{LSLpCp1tC+ZJw}3#1|-eGepRdP+3E z3wdE}n4gtRZ0v*Ydt0DlV*%U3`5|==zd-yLxzUT3HBTu;5RtBEGW2iL}Y#kU8kbNsL8>q1pd&5 zYUsHuZd^qI<=t;)JQTz(6z>_0nAg(Sl%grcSwG3C!b17Pr25fg4v19&6G!$R7Ut&_ z_tXh(ZMEFwZXGZqK4DrAS`V5$TF)^oAIJ!3Id-V^A!p%uwzDc85px0ZP3=C(qDF9f zS3YX7qUQPGLO4|4n!C+eB4ggE2jv|?u>23*PM+296LKC4Vm7iCMVkZV_5)w%N0e(k ztf+6bnc2B9o3q$+LSaNo>6DayDLr=k;5x$sCtdf_5n0QU&xlnalOsI?lEcE2dv$kX z&(vOX_6V;?nzLK&9-gSh-gKdevMXUU0F;%q$-LLnW5{XB8lsj=4$FV8a@5)^%fDHr zjffmIMBi&wM#kcVcXub1mL~gTc!=5}v8rFe_NNxVH+jUA$N`=ss?r0Z!e_?K-LoV; zI5@4}hgT2FBwL9=6D5Y8bGOCmtGuYY}3{W^XAh+?+Eu)_+vM z*!qd76DK7nPby7|EF2XTH440fCxm%s1X}8?OTt6O1_TrZhnHCEEdn!!hD`{rj`H@7 zjP&-7Vr5}NhJ*xrdW_&&neW~j|ERb|u>N5u3#iUIWQ0T% zcdF{XNG#eLIC@ z4$WFd^_NgwoPhe%y%sV%cf{;axq(el>MwI1)n8CbN)WHV4Z<(%OWk+8LsH~5f{H;I zldzlHh5-q~cE=_54)@)io+*8v^Y)8Ti{+mxN3O4*I13Sda994qHmX@^Af#7$ky)ot z@(XuNjN9!MKOi$*_+{dubt5a8-QuVh-_EJNUxAFfknuD!QkzlZf_E{uMT>-A#NX!| zbYxu=17)R(qK5evFIvRhgkR?Z-no4ozS-UAn#@4O0(DWHMmr{07*j( zpIxr00jX8e5#c*+05{gL{1U!19MT4?p)1lhOZ|ka+BXym#wdx+Lb%~AAzbTOTPxqM zr7i1d`2*oy76Iq4t48xM1&|8)yf}c#fWXY>kPP75T!tUT{v3j>VU9BeIOjPeM1_n($asoi0bZexK*ChWIOzF5Tw^0t zND(04b4a)fnGOkiaXJwyWCAFAb4a8K#GmJiQX%7!D}_U%AAyWiA#*^dAIFJNAtbXW zrySb?`9RcioH!MdhLYalkoXpyb7C;h1@|4-#whtL=+I_HVhf}})C!h>B&m=MatQlU z@CGDVg(Ly8kwf4<=X7=fa+*U@RY)-)D>($tHjcBR*+clAjRKu?6|$=Nq41q%1t1wJ zWOH+sa21iLdR-<#G)$h3Fo^ElTgtCJl&4FYXPYz>6OCS*)cl9=p6H{Ppe_4R7zo>wr19PP9Qre<~%$Y&4AzA51Er(qL`Q_}mk$VTIN!7&E^W2t zz^pK5hk-#)1`U|;Zii)M@;{!m4xSz!T^fiMlMkhx2NP~~`f;;UgqBbxFgx>E2sEkUi>EKrw z)MsEqa7dy2_%R(^O2VScqYu=EW~PL>jToraXIu76-L-J@y9GfdBV32go9p8rS(QBN z;Nq;%pe)x=_rM}mfLSQ3Ev+1=v=D%so80nk-!F?3pN{hkJG@Vp`*tZBIAigyihziS zfWW9I%Mr8FLzWjYjXcYxUv>Gcx&<-8Bch{2LSxZPFenO7g9myOPWJL;>U@?h&k+3O zL&A;@HMNE@=*~1UyaHA9hAJ@4sIvUYqea$qgiEQbyA1D)+4-!B@VK3Ee(mD z6`i~)um7a+5qTT428H(%wV9D&=}A^W6T{uS{imn<1VznGN?#l&vK~phmTY~mfNdNY z>EbgsJ0zIi4aO)h%o2UoeFGBO;*NEwmcJ+HhB}W(`k2EjKM# z00K*!UtrIR$IThBOWmUe)rBSW3K?1#o6I(3pL;!Gnfz@QdeWn6F8~F5x}xHB7k6(GcnZNdV*mhpbQ`@j^Yr zWe&husX}4_ImjWaRLD4?URcL-)vAzU(0Q9fR+~U@$0+EmQ6c%rb)G}kJ_1>%Ldu05 z!cmU1UWJg%Z*j_XCJ>xO0ObuTWF#m*%^@32a9-iLHnl)@g3h?8=a z8esv;5p%e|&7_Wvtwva2+HBY=mL{f{HXA_DY*1hG*$!5U(=K}59#CEj$QpK>#ffCh z?*(KfAh1Aig<*rnU8zAFnO`Qr;8Rj`rYY`93NUKLD;xtOsXj-04S9JB!S7|_6B>ln z<3$>1fM_)Tq1$9ay_LplmBu*G$mTS7>)@2GOz(``0OW4(BrV~lIQr252qkk=t6$56%D3-9yw9y?i?qmr_Mw79cCaXvg644rL!(ovmp;{EKvc zJg~#)#SxK<3&$)Bk61W5AtWd%enhC$RRt~@(F$zp%$FBpX!^>uq_h?3=_~MSW%}ZY z5lP`?W#LH?6PuhLn}mLZRf(QTVfmp+DN;3~$yom@N&hOH+c1BkiT|ynqO+OeKN$Xh zDE{KVP>dxhrsJ-5ig`-&v<6N7uL}QH>3^x5|EJBwtn$nPf1mLGi$?!K*XWi`&A;g0 zL8o);0P~l3+kf(|<|p25sziE5bECcvJEU$pcO_j^x^;k5+ccl2-_y@DlkO!I@bu{B zf9fY{e?op&9Ehb!!I#r_mnxg4BR{1_nMwCk(+^9(GJ~!gZZNrpe1hgV=~uA`---** zg+giJrw-mwac=j z!YvZbN|`EdYMsS?+IuMD&s@~Y+9*NBgP}UzPyBnF^iVj&@iCPkfBWxh+>~FT!`1y( z%1}E%Iw<(-cH$exQ5$|RsCZEwq$J%Ryr}SDpwE~sDpq8o_?2g5q$mCv=Bt04?DKAk z)`7zauKs(C^bS@#G!}N+Td1AR*nJv;wtn_5&I8$JZJ#i87(o zE$TI~A{r9G@$n6!u8r^&;z15a5wtj3E9e-kH{f~WJ5}E(xS<0hi`88Qdxo@xdkPNF z{ih7>QaT~ZeiUW^XE~xVmqlC$HZ@|hoP&#C=wr(pY}{OZu) z0C5V7@Wo96PCh|s0Ow&#?bABY=D9Nh{pcl4M0YFC4I$dXQM9)gvkc`zZTTgy#f!Xq z4oFY$F9{Fj{^{uhdb*D)9_4;Xs6}dn*Yt_*PL|e|^_HD2oO;Ejr@J*Mkz~t)X|quz3hVyc)qz2#dYK- z%{nQWECqKoFFzdmP%;{>3r=#`L2akT{~VM`mF$L_hU=_XeLXAW0h*wKQvEknEL{J% zc2K{-3>BP2hU%}Xx^edl5&bB*7(N?{gF+~-ySpEg63Q=eCHEwSJd`DY^xu0S-xhH^xRfVkzae@oQ&TE*I)_v+jy>5o!Bsd~7p0-eRPDD_kR-LyYqAx5LoDA2&_>U(nTo2w~pkHwCQ6db1 zzf7BrgS=?%e%MjNyvAdT>=D@ph2y0$JDWyvyL=_)Xnoj#zLDX*ge$WQ1AF;+I&{!? zw9=gsx7S-eVcEfcsJEkG;4I;aV_3Ay0ME(=^L*tiXzhOod3vD}5LSOQM!q6$Kf?z2&RbCFIlv`4%+WAlmhicw_fY!|mQPsG zs`&3{uz}JjB9G%%N<%?-sXMfZ_Xlv%zK@MIu&IO%;NrVPjW8iW;)~mxO1S(Eo<6;V z&u1A1^a_vctH{p=fB}|SP!DH70fZO}+#xyomGT#j3XO{RZ7GoWY(P^9sZNowQ61RyD-S++@}MrP zwS!P9#>h5<9OnQ%P=|XQ2de|dUiZ73DF>@h58nIypk>XUbKrB2_9m7*XwSBc=_vpW zR}=GqOp4*M7WMk`kDXk^LZOyw-h0tvFJ*N6S?bC)uFOg_`QX{In=tc1-xBY_p^t!t za`Qj-f6&hSv^NbHE-`sMRqEa{iYk+rfP}WQUe|{|n^YQRY_w-BUB4lcN8gK#6c{X6 zgo8Gs+z<;0o+9x)uXQvywi%(@G{1T{`b3MS`8TwHuDvaTQ_NJar#$t3g z62{G)MoppmTQ%4!1Wn#%TRIA7I&GWujR#ch7S{dM-lk|3x!NdQ?QM$CBqi*)NjeRM zXSMC(d<#wq(ozDlT=?K?zJrV3=B~@gSeKiSu=3l*i@#mjmH$fZhgH$8(_gcg=Y~+yO!OFS=nfW|s5XY!0z)fgAyZa*@OI zIkcT`G=I~LmR`Vsqp^j3V9&m(VDrRovzwk0c2&tM#V(aiGjSi(8qMFdE2IPHb=d2m zYs7J1816urEiRfpyJ`MxapB))i}PnUE#f~wwxoHV{y2ImtZ-XrR-%t<%EKw3PbmfF z_i-KVmD79VCE@|BjXfTa*;>>bt=pulr4-nex=jy0)_I*0_lXCP_72kit)$u8uy=Gr z@i;{jo!V<%zceE6mmL5MAUfe6m75Q~^U_);qw=8*{X5`|i| zN&!5`A=;>-9DI0+OY`7stze*I5Unx+)858XgjkIgQpCeIxo`&f(}hLQSQt3}#%1Rh zSP^gI)-~2pSU|djiC>9dHMs>OCkNnuN|%>9Uewh-D1S5AFCZla5O#vmD-p(n`lw92 zhS~P6JRI^C&7G^aH+M`MvbWNLv&9^>_GkLdQg451-z>LBPv@gLVTIT6L7d)Vvw>Sk zjh|>uKs%!(S6q#NJa@vV59`bJPsv+o z0~^ooHOE78sB5bJf5)h!*1z%GPIHWeggQC;F^s5NIaug)r%*PHrD&mR@^0!UUKMUN zd5WKkbX=}AUA+y^va|N4oFk{<>ZA+!jc`+3K$<3}u;b#V3S8TfpzK(a2Vt@2fyHEdIp{53dYcVWYyI5y%1>8I zfj-n!zNFXR!&2PhqzLR z3PRv&Dw_hur3ELNGxT5S=EFzQ8}2&eC5L{ve<2j@gFGZ^OoO@=Kv5~QHO?OeqNBrf zcMyNfm=<8P&rC7nFkI*{Kq)$JQ*Q32y!?&1xf}C`r4JpNK8*jgTf+iNSWxi|c1k`i zpDAI1P4XH;Q85cFHmqQ$a^Jb3v%*V`CX8U{u0#i|C%+7lI64T6tx!8sCHvfA1UJhRHS|Xdojd5|AXWL?#a2O z-vYNVU5SD5+Re=uheoiL=Rdo3!~1}UM5438nEClnovxd`Ty!#yn%QEHHtq#4zb27z@Nt;d#q_m`UK- zW6YL0+hT6Dl{^ezePy^R9Qd?B*k}0aDw{8t3pKcKw#HB`&oES@Gq&c#+|inoX+>9O zQFPXd?P(akViu*$T7G@vc=m*1E9{fNmraPfKvz82#m?ZROlHpEk&gBM2;ik|9gVibd zJ(UsNCITNJLsl@vFH9A5uOt5!Dsh96Du9?`h!uJpZYV?zKQoDWuoEPdoMhO@-jc(Y zonWuAlR_86FDStM=5O>JaZA&11i)o$#%pG3D$%ag$R)T&O*=f?x+Z-eH}=qRIvNHz z7P>h8#9|(o6sW_2Enc>59UPny4$(SV*iwWC5?x>vbR!)K-r4IiGuLGwKCJx4g_^8q z*1DXWby@gXU$6YePx(7sv^gYU@ZrI6gYo}ikN6?{FVVT)BYyDUcn|zk|EeDxZ@A2Q z%HQ>2S?jVn#mx0N*=w`1)@Ik&E5D5tv*~6~g09Qj!UiP_MqZD>34_FK^&WA92E}=J zBn%pq;Gz8e@EyV+KEOa2YELwZ^P+=xmi9WCmhi^F+rxrp2w6hb47sQ1fWHjS&0sgt zD9*D3z@rr%+BJ`(;n<%@`yPH|%%B~%cW={-mQ2brbCz(N6Q@NV(@HZ8&k@P*gyZ;& z*wFoLC0LH$M}V~!haRzt)6y7h@5=WfXrTBnPDP6z2LB?VL?|t4y78CiqaB3vV+>n` z(lH(W>vPi7C(3#8J3vveH_(i^PgQzDc-0W|P!>iAyV%!;cMapT4fXYnm+Q5I)UHI? z_~m$52lFh*YIll#gK(5kxvyaydslcB*p!DNe6X*jJH-6!=nIFsl*|A-|d$GJeZx3#oYS^8ZyQe`r zu)?rt2K-@CeSP`I@?FBXP+oq4S#Pgwz@hlv4Zz%!52?8b`o5S-`#^v{<1E7TZuk?P z@p3$S31#~ee)3v&lF?;_ZMZtO+Rk=;lnblW2M5TAwjr+a-H*%5KW5fDsv2Ii-|WD?XFdLs6JYUAh9vWK$`?`V%tH&{%YCfuJ+2m869Yj{I`>nY)nr<%6XPa6I} zmabTkU@vtfvM`b1ds3ETjCh>VP8EJI^k{l>s_@6OX$H%w!u^K}rDZ~x;hl$g&xY;@ zU_L|*k5SW)W{nUoM1ml8QuK7W53TUX_tav2^UehpCw1RFcu%{vu~h0}&`lTa8!V;^ zjd(nmOWLGRdy30Rp>f~pim3q1PvlA&?mK5YPO^qf*a%yw$>CMs{Y^S)*TZp~lSbdOH5*-Sj69ULm6NcA+Rek4D2s^9lnn#d zKUxLUGECEoN+b_CL7Zr46b?1Ti7z`qJJKr;W2Np~$WypGqFCD1A|%O2dK3?m9u0N} zaG_xg^CC4iZ52zKwz8rFw5)HJ4^nE8izJn1vzHock?E}ih%hFN95^8FrmUzwG)T!s zXWd$Tww8jLkGP%!pOXeb*dUMJFOL^CNV)7KIo{A^>sEP_bmL)f;W%y)2bJaq$pte- zOCvyODLb*}jbNG_j}o`i@c>;L5ShSxZJvHffmW2;xo&GLSElwk%XjAHwdZ~zEO!alj+)-8Dy z5Sr)Tg(!k{nRQ7;KNeAK0j^w%QX`iMy&-=ruloqM zx5yvgoICd>>TLR7i+5!14MP0r0uiBXMC8IpMC8U?$R1|cXLZ5C1GbYR$xg0Jb>Xag zbmbh_xS-l%YxRN!)qkW~oON(?boe8%d+*+ceN7gdIkarf!oN8>Se)ZY{tk{XSyko0 zWSz)2UR8$pF5}-JP$LM}%)&>AFdIzh8S!I<#b)6d!avQzM~fIG&Cppv(IIBw zs|AD)n1Mg@82J0rTyyx$N*rVs-dXsMS$KCb*(|&-yK5FcNI)>jqjItl!UD5!oVqj% zpKo5DYysQ_=IE~$WV7%cLZeyuVL=JcXjkue{_%oY{Eygfv+xGtvRU|5;i_5quOh6e zNA;r}F2cw(0}m2kHw(vQZ)V}46rE&-PMC1fEPRA$Z5AFOb~Ou+G~XVyQDQf<_|cD{ zGg4FnBiiX-8zX*XmQJj2-z+>%xMCI_F9w-~Cp-qeUMe+ysm)QtB=|IY(_$UZ}7ZJ1%~UraTB zHqOUt!y4ZaGPZ7Q{QR%xqdy(+&FJCZyu6}Gd7q?p5%)>`k5ieXTO#gUV)*6%P|(_` zQsUJ6+x}k{@-8L=bCvxc;g&hL-a){j&PSD{@BSEg55er@q1Ul5`Q}W!#;5PV_M3%! z3YHpwGkk{%q9zn@5K={Kf{%YZ#S@;g&-)7;51;qB5k>{Ln+ARDdrcAo++5>A4ltI5e z{3;%xF-QMb5mxh~d@+eC#z8we+91(vy=a5Q9%ku;szyV5I$^@+X7P++rxi)4Ez^3cWDw;@%bdmA1H2}ME`W9q4nW& z=7X;07aT3q;Y018tUTHa`)}b}2!-ueb;%>*TC6_CneI=;5e+o4J$S~vG|HUy;r}4X zUEv$E>iiE$_^A5z4uYq-YQ%J89^OMRJ3M1L`jk&Q?KFewXuDasCwewxR@e|$?+ zfGs@9q3E;(?X5zko_HGm`~MW-!o&7ZdX#63RV#@*Rt#7N)J_&J7o>HSKkNLta+ zKitPRA}?jpUMh|J340HbA9_7J8V?;*2!9?&ff%31fiqwUaF64i6bB7_7|L6${GKR& z#M2AG&ve&{XcDu}W`<4-O^*&vit%zCXzLp1zfpY>NdBZ((b_S&#qmLbaUp#qt&}=G z!cqLU=|e$cgIK9GXx#L0`$0Mbdy(sNFJ4)y)ZPRf>q?TE;Lyfiv4Ncdz5_n+s2_!5 zx7;Iu-4)t*Vao>j$GO@LboGi!3XV<>ov1Yw^oksx3if?MVuuICkIxyqrijye1+;GR zD;jA#=FxMky1%z0CB9PN7@?Ai3k-@c&K=xM65(Ra#tWq&F~a&IRhc~ zD=`XSxX`#;t!2TiT%)(R!OlO32Ceq#tJ#xn)3FPas*hIl>n*Np#xsi8@+P>{B{ zO<6141Rs19ILSf%mdnw~|0EoJ=Q#cdYEh7D6~{j*XGnC|*G*%DZ{YIM(STBpAFJRC z_6k1aN#Jnm=|pFyk&ZTj(=o#5aympmmg@zRGxV}rb5$+(G9?{y#d3O-PUY@n#_8w!YdYa=?IjT6EkA@aE1REoLZ3u@yqw?Kh7Jfs}82M^L zI9~-HaN^tiE&QA#Ii2?vI`YeeufQ$7fSd~auKYTu^8$Kqg$e!*4cYz}c z?eJI4-Bvi~qqa}BRe^(^$t(_f+c}?dg16ukAHccXO1>gvKW95sxp#AVMaakdc`ICz z8yhNNoF1$Y(7CQr@J|w)+AmY!cwe2|2HFkJ37C&fm3}ebnYAB z?dabSM5BJSzw>&1Rw zD)mPNA8^hWww$zv)A>~`S1rd!e~{L2d?UO@!EYJ=xD|?iqKtn9IEXlq=xA;$@-$E2 zbU2)kTMGPc^W;{z3H}Yu1qCi=hy#hQN(XSxHxKaZIPU4j9)dpRLycN*%*Sl{Y7JR^ z7>|tU%|D3Od3h+m&f@xKWFzJgRCiCMs7bmaCn@GKZRRq1dri#GF5mIr&9_#iluFY=PPso zk0zZ6CBUb-5VJBb^SBm1(VP#Zqs%guj<~c!sTAWB?nI4B$Ek5Rr_vQO(iQy@z*UWE z_OcU1M~;@_IQ2S}>v*EmjK#Xti)vQD9s{*ZJorjIXmdOG4a{5wCoT2jbQJs>Y&_?S z_Qmo!9PSQCe*@>Fnwv4334>dpM13dc%HjHP;7Ztfi@P%BRZ!o8d9am=8&M$*X&CWn zzK7$ixWYos7Ye)|2?TZTFt)j0^D~r(bEC2-HT4CT z;s#6PWkJbs=QB=a3)eaPBQ{K^fOMN6T`uRLz;ECpKa%Soa)!`}Mt_d)Zo%Pv*6pn5 z3}WIH6r{tFr1A*Mjr?`R(hO6$k-vm}c;h+?S{QG{H(h@boa0lAE2fr3`yjk!y`}JB zQbvDarH_g6D)wHbWYGBMoU3X-YLt}_G;Xr8gfxoAsV0Db3DgPSsBxUwevi0J zZoVa6()U5?1kUAOOmP(|aFR)Z|EgKVxpO$@uE1}wp$Z(Mn8^wjx){pKHksfp_~1@( zUUr_Z`DXK7p+fgH_|OWMTK#Lx3UYOA2fv|lRNyF0>xvkq)GXwDb~oQu`kJ^NbWm@K zt_VtfM}^+iDw;*VlnPZScq)BH!)cgc|E6HKqw#wyl(z_l-+i-`Z?Fr+G zm__ksJW2`|IIn*%jz1sm;|`b62p`Px`OLs)02koXtaww+H`Q$VYRx-s(`m*begfr3 z8LdcaWgbF5R-yMhw?JEPsVvjDY+MFDZx9WFE4=y0OERk?vO4B!Xe?a9=M#)sgs(~B z^U3b!`|23^FAg`3kr-1=JV%5dN@e3? z;260%ocOf*i-?ahi|)qZb)_9B^U!r(Y7XVSK!LXJGs-LkTwV|Dv5kEOluc+D`wXDG z&nT4J_Zfw-^>;&AX)SAG5}{1AupRT|+_TOvB@>TNps z7IMBLRrnN+?+y7caX5M%$9Lm!v?=Ka4j-<<6>kUl>I(GH8aF8taCgo7l4_kYOP-Hg zrLpb$?(yQd-g1CxF{L3pA<;N*95F z=ilW-qS5>z;6wGjTj1X|qqdE-O{n~xs3;swpq#b>B^sQg-r`%$E>7bFUai=P(L`^7 zb(1&?{zgc|b|UChL=Yvex;t_E)-8g{|Kd=@f!MZQgomT6E+=|=c-pSMJ#~!yud&I2 z0bOp%51v{GIXE2&9EmbYL-gr2drpbNFSLcD!u-Kk!FRX#h~^Q>C&3bhmuxTOT8WzB zhxlyqOpP2qB}Vs_w(`AMi{36P8ylAFa``rUx5J7}+5P*?$&McHKeRM`{_#=c zPt2HobhvwLX>JEO^wupZl90f5;%Z`ENFf12sD(tNS{DdGE?+#%;^GINv=_x`da<27 z$4tGw+Sb#f=k43_zZO2l^tZYM1SX6BYmAE4Qaob;&z!+1gms&U zpm2=cpF;06k1It&(<8eBYp|M^6}Vz|Y{nWaubGMjJU&~iBMX#)!ndzU`i3>Op67tL zu+s&Nhx7%zt?$F@mM%L&aJ846$LR<1PutEQslWUMTXyCQ5R0Xsg&%OZm;%eO<>H6> zcM);RG3v=~-I^DFKJwGoXLOq9pSP6nu@08$R3J^UZjA5)Tzz^AxMJY^6j@(XbgiDk zp82_ima3)}+!xg?FUDO;CNta9l$#1JvTE+U^l#1?N zOX1KtNt;yJ;#9*GC!%q0!2-E^8wwLqu=_*-3VThrOLVrF1_*pTB$?(lZGX{O_t2E4 zyHPUS81*D*YF}BiW&x1|(q~$o7=T(*k4BomKB}c?kGn?7&`e}?ZM!K;RKzy&KqG@m z5233K+fBE9Ejy8usSO;y35~u5eiHa5+rEIKRRbSzQ%64FV~ucA2Rq=6M!2a18gRuR zC}f`ht!6x+sb~Y5y^wBZ^H|Jtre*<7j=AjZK475D z)0f6&8@A^}`*=O^*_{$R(YMa;gCFh;ya5LpPPO#W#P!Ot>6TTRvDxd zO$KpdziQ7a6^5x~f&zaMO{Bv_ELH_g75H!0o`gQY-y3%?{zVP|rO1~lM@cUOd2bw4Z_xoq@|rISBgwfM^=(ko)-(uCB? z@c1R!vu97;k(yCADsxqq_8sk#e{`MpPIcktqRt)r9G_qI^7P^p6|p6L!BgWBXM`S% zo|Bq5YlPo~k+V(|Lsas={~J2dNu7S9hDs~0`4F-n35nfmoX>FfBLxie?Sl7JaO4>m z7P*9oIB-G7EhORY((j%dEvF?SpX6sxfp?J{qt!tctmFOgj!VnA zNxV<%gUY}Z)vX(t(wk#_-HP7RPk*6w-b>@=URztves21^)X#A5pkZy+?rSyA$c=j< zinnG~UU+(K!^|D*e#axL3^%Oxd*!>&FZ=FL!NTuQa?Xu*_ADK#l~}D|cIa`q3LH{6 zC{noU=(HT|*>}Me?hwL`gfJfSB>DVcr#vG+e5U5w?yPl&1BQFjfTnBgr>E9_J#XC0 zbIM+rrjOYC{epr+-z|Hdb>4$p$a=3j(owe9F|%Rq(-$f;w-%!TE|L?ZI;gw{UF~A) z+`;iW!0YAWfCDBm7omM2GTTA5!Hca<&qant&ni_I>Hhtt+5< zA6M!7!Q0-Sw&B*kj4wvLu{wW?$Hj{Sw`Z?@bIcdn^}lQ>dwUaG`RT9BNo0mIkz>8b zzQTlOs-^k$KhN_)#I4ZL@G1MxFyE>lkItNVfQc`S_AOd+zgD*|_at3UO3T)BG>a(P ziXBREzj_zc5%pO*rzO&}7q;%WCbVUYa>nr`x{ZQXPR^h0YPy)mHf_H#r~1YY!}YgV z(fVf-Ysboy#Fw+#`-YQ~KUlu{;&dFRyR>(CW_1KLH;f9;KpmJ<_^1GC>LZ5Avll+f z8wPawk+5>h-PKo))vZ68p4e%ib&m+=6|Zglt?Sfl2gUnM7WF%K)(g)TB?;0Y3#(U; z7_tw1jT_={L*_Q^7%Uh?+@ayDSl#YM+4XvKD`kX*?6?pTr;6;Re)Sdm`Ox>3DO<_{ z

    s4ToVzzbdvnbS5gPVjeiI(+NtNy7mq)^rsP1mqy3Sn%D9xu@W#K(+eEpkqZQcH zqy6lHst|2Y4Vr=jU2OdzKGm}FMk7_Sud83TcCZjZ`^m-Ax`K?CHkLh;chN6v$WwDX z4O0tP)~g#Q@6G-c50H(SIo$RO>0zS>yDx0{WkuG9$E5BL|2cEM=Y)OP1js&rR0YIL zosD{%0rxy^goIcqJ3W-j`&Snn2v!DLy$z`sLm3?%lUUr<($lQxsXWiZRrxbl#yl~6 zf5i*Ta>EAsfxJiCaKBbx{$TdsoDToQte5y#JU7iJd3_OEF~om`{Nv}|r{!lcywHRq zkCAqOrwu>3PH)s=)YPHZ@-1mF>G z^wPA#y!Tep7c(hcisU+g`$Xmm-p1KOylquH^c^>+ToiDA=-ec5^Nn2Ih_#9JDCP|kQo z+fu+2Wm2U)+PhxpVb~~-zaw-%D1Cni_mY195Yc}$Dk0AaTO7PR#oMczX}HsE+M%eCOV~y9kH{ z5JW`!(xff0NUuwk-aAMKX#&zkR76EVEWwVbSYnBaMola)y&7X;G|^~!G3A+NqVApj zow;`xvAy@cpU?mE&wIFgXXnnGnK^U%nep=Ky?fTQ`B0TxbaQaR0-xw++IHWp4cC_T zeYBup_uIvDcB1KDZ81M+Si0?l&aRF_>A{_u)tfS5dMbg}Lbfh^n;@9Z^$hzDGNQ$> z5!GUdYPrW>EHsV!0zLmL+hT}cM(;Q*X8%C5^0sAXZ_6v)l9IDE*DpQYFEHI!PPx)U z_dh@x^i@TLoW6eFC~I3@?$(^b?WuWNv(LoG#KV7XVU{pj_5XX>o=orTpdT078cGoPZJSqckS7|!SqW_%H6 z^ryfO2TpCK5&|V*NeE@+Zx+{=!V#*jL=2$FZA(1aRh_#)jZ}2WbtfM?K1nyYv$FS- zmgXz_%GSUwx)mk8Vd1L{!*gaGVseg~WQFx&_UJLJShKVCd_%)a^Gi>+wVkUVDR8bjV z66)H&L^9JZd#+1ZsI$2j6W3|%ZR!|V02iiMnOH4a=Ne|>H@~taDs3g5)ajEoclEhC z_mu;ICG7{0y`e?*i52P9QitqLOMBlP8={w#SWdS+&|VU_c9pF5T<_dWSh^f6r|$v4 zgu0Q7LwR0G;-2|cv{0`W+4>=QtKgi?=GPiVu9lTw+uij_75$oMQDs%LlVdafxh^qp zPw|Gk1L*d^-y6%0*XFFKJ(P*3WgM!3iIL+TMi2TiC7cie*MI{B3CmXX!yS+h*UMYL zp?tJzpL3U)8}NIEUctJd)7qi&X2b6;_;l0e>-80HteB#$ZJ(8yXc6MoR+!)FJzuVC zV^p%lANTB>5)?UO1=8KGH^TOKB zDPU!SAf{hoK_mhaKE#nxD}A9#jTmDDB60*Ow=A%i3Bi+qaL0-LD{5sz*6d09knzMBQfH&tnnuQfhTcm)*)nO# z<^{!7&*#tEky+G!Fx`6Y9OvXk;baY;$M@DFuc+qG=^O1+mdEHYO%rti{C3>Lv=Q6B zFhU@}xPKf8o8WC^{v&IkamiBECOoHL7Em(2aP0-u61bsI?#9 zY}>d6QE^QnK1tR;H~-?2?(OIlU{^`F1I3HJGA{&v8-fW zRzZn1lhcWHx=`eU*X6MmDUlhw%M#Y*1(wA7>Q9w;&Grn~9l0b$UJ~i9r|%gE(*UMc z;bLH{P9PSLY2amQUPza~@la~ArQVKuuw7MR6*akOPx5G18oJQ1Dn9Yd;QGp!7B1LX zG<}yrMP=c#jDr1x1J!RXN#4`Ml8# z8b1AulGHq(;goT(x+Xb)Ptk_I);g{IYr_cpDJ6j+sQaLS4_NZUfHnP(q8%+~TW&n7 zGb9}z{_~mrfg}yymv6w8%lGEy$Z#uZ!l(ojYj@sCK&Ft^bx+JrbieGLkdxqEaemRd zw@W^tqpD@4HFoR)Wabj&;Tc_%2$$qt^^23a2INQOtxqi7lNp&}nMkK@+5{7-8XVXI zz?&!#Yvl{(v)>am2O#bNGVziuI9&)5t09o^S5)&fjcBdZ6c=Z1GWRaJP$hg-qph;K z)!Dg~xlG?0+4%QBc0b{*)mba*4y9#h9jZZp;ECuD(u->OW^$~Yn129G$M96m#`WRQ z9_}oX5EqvxnD|s#&6JCj#mI)g*Sf%>l>iH0UVAXj#d%Rv9Q!#XVLyd_y5UrjD?pkJ zh*%|a=XWM&YTy|K42+S1z^6Z1Amo4j{NOLGg?WJm9v931we-a+r||K&Dry_>V!Zs# z->X9}ZnA6NnVX*HnB-NtJ@v({D}Q?a?Na-5Yp?=elJ0bDJJMXxGmdRqb8HhPSGj=A z0*u-YYJwrq5OONiM8GJrDS(a;F&K_;N*JYE*kAr0LVa4eET5XjI&)rHNA&}AF7`TUmESwn3tC|uTxC~sqAf_Q)`Y!8?4jGup zQ|C-|oDyjp)1GgRDoo^Qo>Ltshs}#6?V;$ZXY^ z=uwxc2#<-<(Pp-Yb>IOVta!aJ@4ZsC3_u#iA+YBLUlO)^~%P6#N+Vlmd%556< zWzg^47ch4_5LynfRDlIx2Ea0dWY{Bpm2^&|An*L$t$i=QP`xF`KJF0GfBki&cPP$2XLI$DvuhA)N{q|0&i#_99BKV% zNy@JB4aZ+T^DD+#n@(mle|@5SWTgDW*UcFxH)SE}*E27_Fj&4TWywdaz#h0A4e$Y% z)K%LVpQ@a~9?W@F6gWS$Y@v%=b6LYejaP*g|5`SDt)l9aEgi4dqs%#(?zIOqJPMB2 z4b-+@s2pSmJz$3ylluk-aXqsPkYFl^K_DIKsyDNWb~sXC?jq6-Y(c(r9ZDA#ZLsC5 zEnB~AZvJv>{h6hqm#iYIBOd^|n`TY-m6zBXi49ifg{F&+$5j|W_Wrh*D^LPjwoZAoR@a1>$Q6szQN!9)n z+)(J+Iz5lW!i4(*(FH~nP#bBy_*$1Hyl)N2)hyD3LFog65zz-IE;+`Yh_)UoQ4?fM z3;Gc$S4@uV212Hm$f-N5$N9@rznOBGct&GL(LyOz`yP7jfN7xF1U(O<#Ch+%_u7G3 zfu zhf0tjNDg$AU_EEZgIinOQTJ#VbqFI9^QJki2QyO7Z4TSeKKCuuk-M;>x+)=8AAg9h z>QKo?e=xA5=(Gb(WzQGc#I+_&j?HS!%&GULypA4)o;^UkvE+#6bQtI}jFA>*uk`-< zS^)}NdNMcnN` z*#cpvc_9+;mt&E@wH~v8@kW7X8o;Dr0o!_i?ZCaQhFic8c5l>h^Gh#nHXJ;Au0MS+ zJ2$K5c=>aG4@lPjv$6a{Rc2n+U^-n>@cO#V?-drlyJ^Gg^S1V{h5y#|ui0x?Ra8-L zv6(42v3TS0n3&@myH4aYn=R@q3ajklBtqDJqO08gHwOyfC4g9a#ET7}I|FMa;{c-| zhl^tHY>=(QoleHZ;&238Uxy}x9J7nN^V+=VYQ^g-ayOI*YELdbxuoZnvdhS$sc_NC zX)~90&1-aHT`!$yP0C*FS#qLOM=PLWLtfA8<(J$W3l}dn)?2x#u*rjR)7!0F8`1kt zm+!$pyH+bN=v5ytKhS=??Lg_Vn+(41NSW##RIcv&^Pq3nJG~KWm8bNoj+Gv0yWaME z`EfEdoF}>p1XKtnUK=<=GYW<_MJ9zayVXR_;2EHktBQamJbMOc*_4C2iroJMy>RG1 z4j(M3vp>9a|1y2y*<+}F_V{5V^nxt*J`9hz-vzp3auHx07tzISXMF=;>%Em1+Rt8( zJ+{u%aY1%YRY^_5rAyS=l^2SxU95SbaPf+oy1~Ad``8zxOCspZ0v%Jtd;`)Y!>K6w zoQR-0I?#&H2ckkrEeLT1<}A!t{6K56236G073m7in9-4u5!zniw1IwmWCYc724W3D zeCS5uF<1*tpnduPvMJDLU^qxuDu&<=FdWeEm3$Lh&=UHorHEdcqq#c4Cs&;LJe8ci z{rRlCK9sydWAy38^snf(71*DO#6I2RAM_e~l6yd}F2DZ?y08rUQsFqDhx~&su&1~O z^y)I0TL70j57r7orUho4+xekcL`LRd$A_;ToE0)lOIK=?lKTa_m)U#toQ7zGVG477 zHgAThl!Il^9Hvgh)ZB!@3m~MKG2y0?k4Aw;h^L4sSB#0m4z-`8Y)%MWhlhvAlyTF> z+k26Sx4Qlh_Ffycz4vINSJ=Rpo5IWYHw6%TZ=Fx%Blh06O6Kf9Mt{5+J?Qb=oA(EU zIu3)qH>+xpu&4rX^j!eD%|N&d;TgKR5~Qo<4`4VjATmFPZeVeQD~hCWISjh}ZRRLm zdBHy|%|8$w<n;B4b&;~ZevyfSbq6dDG140vVEvkPsg zEc2WZAM3GaqJxu1PSN6w$aT#T5iRQ?G8Px*csMys+~W}!H^Z~6vLV#2FoRsi4%dOv zp8$MXL_D_w+pXFy1p+B3v*54+3$I##fYar{7&W`~T;;_%`M&=7G6x4^56kJ!E#3v` z(<{#p9?W@nH?@b&1RvDFgU%fbP-}>{rpAs*Gqcu2<0ZS_&4CV2fk!9o%1ASROG>f60OXtiAw1cX)maIIRKmUDv<3(y85)XQ}7NpdN&};5r7GfL` z)Bf7Z@KyKsIB)%OXQOmPvh2!`bIIYdm`;9^F&2Pms+$O;AeaHg<@6qM@~$;n^#vMu z_2bEs*)y!HeT?2Wq{4`5Mddqp3>U7u63y;FW*asD#uBP!z)2coTZ>R8vbE@5c8mQe z-8@RI1#0v@QOwAn%KEYjz5xva&!{e#gdtu)rb}KT#vrgf5xPVGGNJXSY)3juL(3Tk zS|*y)W>^|3(9{!%qL7|}otxF4+yAz5vo|nAG=%`kH+mIkrO&oc%_;Gw$^qQ)Id%1o zKI#2^>4e0j^{q{p)-^SIv!?(E5N7BR00Ml@TqFlXm7t}#pB}&i^x2&|6~y&g1-fu3 zgcT8M*?6U%3wlu5ZF%91AOpHkccidte`e#3IBGtQNk~i2TbM0RGjEh^MLqKl)tD|h zm0x0$lFB(*K(B_Be<08XDUpyQp&-Ep_@_X438t6RcTvHx@X~!} z;U%aFAx@CpbqS(*7Q6*EhzSb>*NBf@Ir^|1*aw&Oh4hGx0x`+yIp@!%HC7Q)zrtXEp{$b3ZtEEYPrO;mx)q%d_h6T(`H7s(O zI#Gg%LT(Xc=xQ7v>Fiq`swesK^lA?JrfdTJNvdA)1t15kG~*AP*RLgD}(ty1MT zi0l&RU+e+KQd(e3VibsSXa$$N0SW*QnhK&g&?)fwLYxta{ejbEO(=psLLY@#_{Z7F zbE~@}mR;-UxV9{!yL#^Av-%Z7Z`Pj2R}O&N?tJx|L-X&)pQFP^Mo!Vu4Y9V%@x2S@ z@%`mCaShRQ-DpS9X_Wf&FDUg)VEYM_tMc>1%#r+VfQSzpfZj+nh6T7=Il_$y;ihMu zVLzZ|-k>bs!mrJoE3@>7HNx-9DVB0#KO}!dyrJt8{yKNzUbLbY-Gsw}`7@uRxTeJ* zAChfM8h@e#P~r4Z-8rV{G<%UUxla0ceMYP&-v>4oTJW-yB71yS(f7|k>`F-Zg@^=7 zlWt64(@6*mAe)SfIDsxmFLzi$J2WT(2s?Xt4B`|$v(=@}U40fMfj_=}oP7b(7vPNJtMwow1t zy&H%=Y!i)DDnN)y)f%`F@di8_7yyBw;-f%OfALU#Sa}wpGF(2uqI0w9qm?i3Wq-Vh z{$)%+yT;yrI%s6RsFfolY~xdbqy-%~fc{e40Ftn~zz=qUIxOd*o?)g7!6}@|1gJ?| zOK=q-V5A3$_TcOa7P=&c8_>l4G_qg)SzFt!jqCch^>nYsS5~gOi}c$*>y<_ZdPW&1 z2Dita-e#BD60mD?-~7Wz*5lvG@SnoVIR_i+59f8PDz03_{`tngmY*;7cX11IoY-xg zxg@o#GpH%PtA~02;6ZJON1dY06ChCvM z2uXD0Nf*T>ERp_2no!T0fga#}AI6HSza3xTf;X%g5Obm2mw{9pT45Ex<|BYPd`BdIQgH$ae~4EQ>j@4#a%A>tW2%;-F5r^49JZd`Ttm71EF zO8uR$UuCaTf!FZ^J1^H+wkA?<2Yiousk@sEr2|*fCipdZURHpAR=ynhC~o&EPN5d! zL-tP3^vB)MKLnZzoxn_QNkU7lIO(4!tdGRK0$PI{phF44en(wwzPcxOH=22}^1`NK z-E+FNJ2N}pX=r?>J!_~|H!8Aw6wT2%xgzJda_AG}w%4bAq#~omf8lo{d%tb;Z%)e^ zZVg=lV>k_q@>D(mz9o|kcYh~KXZ%K$IES;{5-B)^rPTB-##p&7gzsRzF!1JRp9=R1tt z5!eDSNp!DxTiR%4R(8c`MP^p{Xxf(I`CAf^N8(n!-2GLQf)(b~*5*+U#y**<{ka7P z%F7Or>#>b8U(IL`1vtF@w_9(q%`?sx& zzS*?!LbJ{Z?cn**`Rfzn*Uc+jGwp=Vg0ocT@G!c`DFrJxAZG9LNVq<2sM0mvv-ot! zqI0FCnZ-MaN(`nqQj78~ofg$@n-i^;(!6u*ER1#2u+a$S%_ zlbJ;3;JYEzo>2X#b|>`ax;uS;$H_$W&o*_Rav%0NbaHDszQ*@SlpeHn%NE==uJ^Fe zhrQ=P?~c&>c#cR2IM8~1650wDJz$7rkWVn5yx%@Xp&$J=8q$)0UrJ~VMVZH6#TA!P zvA)j~l=CT`hn=W9!+*a0_MgMU^P63qo2!PYK=wR(mD<9tu(YFU*&_50+%h00Qy`84 zAPhMtrX>hHyp0#Ma$0KT@ia)D&JNpSo0n7=BHv%H9Ot2G3t{JvhS53Z09xf`RHW}S z8Rc99(447%_csj}yX!IZx1Wx|$4%Giks%!TR7fd7287e}gLz0M2mpLk+e60G4b>Nj zsZi-19II4W+<6u$g)|*?UK4twW>l2>xH=4VCC(UQC) z&5`N(McIza_*^5LBkxL2?~*4*&rKXkoI5vRD6%Izr@`GhuroKeH#*qQFL+3NN}Y>D zc0SG#Zh=$&M9kaV&L#-mLw2?h&{-f7-Tr_ug&Y{xAX5(X;U_2s<|g-7)LI5 z9jwEo9A$Flz_f_=3Q~IwDcABvoA}nDJ~?wh#8f}>Y)zh_UcR74L9_%tfGUBs3#oEk zyHqY$AMEEMp6>j}mG`03Tsbf^W<4N?wUFWaqd~HVb{+}rQ9p|)bzZXOs)u>Wq?FG~ z2A1?C_coW8%$1W-i!@h_%S(onF8I7;(NFcHCqPl4prj2#L7^_!0++2!#=)QHL9&&h zE>}wGa;2&qX3bR(bC}6*d=9hbs>|xu5m!pa2dzKMVHSZ+^3fb-=(`_kQT$m8Fe}_8 zv^>icBPn<(4J?37$vBn?(IlLFF0Q5P6ghCi6wbZ8$|Ke;$39H9<|58UCqCTmKG!za zR_-x^?sj)$O=`(Xs_4_Cfsl}aq)*v(%h;6<5>^KWu11FI3>P;4Y*oy^w*)|yQD2^fNT~Ki{;hqQ_7Q`SaL4-_!p1# zqH<4k_8z-b6YHI$X{vL>kL7Hn|%kzl%sI!?s^4D zpynhTkPta$$D4y-e?L9TGKDwLEvg#IRj|N@dk2fm#HL_T>3ey3@0U=|Js5`OxD0$)e)W{VwdpV=svuVo%p{!E zkPCjLW+x0Zr5GI4c8$pKtm&7_`)g!bVXoST3{n>M#}9tn+4-%ZePTnD%j{^GN&VTX znsW^^-NWr&A{NBj_2UOe4q`<=K{zm`0|f$GC|Cl3;1b>htVvHqk0Bn2h`eshX&`Fd zm@4p?JJ$pLq+DNv@%;-^3=V0#hGofkXgqTwT(u8QOKloJ2f2z+8T<4j=HL+|?6-?u z5aD7U?mn~OTus&4dJ|dnY?r8pM0)^9Jqb?%39#fpO^K-rB-{TwHE8b`NP+)#mOw58 z`u!$Q0@yaXW7}psZOg8r8rW5k&?4B0lXrr+0WuYi;UZduPh-o z$l3QrS5*4m;I6!a=aQ11E9lz@xZ4fT_QQ6T0;~qahG#hzz{J_mEynEZgp2bAH&sx* zAH^)6U>+W3{v!TqOZv9rEoZ7LrK9RX6y9`}j-DvII6DMo*DRxh( z-JbO7aaKO?ens8Il}L`yEZfxAN4;E{6z) zXc9A%Dg#AgJ=IFe$CN(;CAaZJZuhN6-wBm+4?C41`Yq8J8}thJmnx)Rw>{(+AK%IPImzi=+i z3~6dW@59)dse|69LEw-dES&|2q!2#^IA)+PBEj!85yPNlEETC___8uzyn4ZjE{XO`{RN+G-uziZ#akx}+Pu8l3JTi1z1y(0 z)XQAQpzZ?FKb1kJ@4UVuT5qy>Km$6GyDe*`^K3)Cy%@s3NAy>H(KV8}AyvQTSZV38 z8vWD_BYzBmE_j-T!!-{!ZtI5mI8 z-r6&=_U@o$@Q*bkCHv<|@()#@&vx(Tr3)eAbh9MF3N-&f-5lwMYz_K5uDn)N4h9bu zOW<|9PE~m?Uk;~{%;4&S>;^K&ABIp{`6iUwit|9uA*EuiDEtFgo0n-oZscSd((5u+ zE94v*cb#-PU=vZJPSCzvlJEWZa z&O~!_1@DO$j{Po%TZ!OzXx|Oq1Dh+Dkigl(Ka2h*^~f`MU&yZk)SCb&>8Q$~eNs+- z7eB9lPb>I|-=QA)oz$D~;TRkwUr7Z&T-3nBF$drnvyKBC@GSbCYmZpV;eBBP_db+Q zm^fAr?UQm2jxFl;xzu|Rn$A6|g&noE9_E2N(^y3EqWSsd&d3!ZOYRwT4OAhb;e?7~ ze-e!Cy-RAf!K|0?rO=zTW-kREDC!uxL;8kn&)>Osp_VZ98MP#QE$)<{FZj3Uuhege z<~;V67|sNTwxMS`5Wak2i;L*XoV*Ut!dOz9Jaesq>5srbf5z1Z-Anu(|2vcejLahP zsoBdc=AT8JmZaG$f*lO)L;VS#{MY*MKB-UI(*P}s^rw+O2K~e*pueSp-W{Ny!a;wF zgC2A*;T5j8TYPKq8`m0>$F&B(i(h}@cLF{9PI?z{226lPC5pmoB@pbTzTn5%Wx4rvFQpCur^cA^RK16|w0`uJWA=irs-bXG6+CT3fIM(&U-~j-`BmSV`Be=>2 z?CXHUy5JQh_h^#fVq^e`b;Q1fEI@bBqr#;-*fq5C2ecbIu*-!?py#knN2!Cf=>Eq- z2gM(3=21A-5($jZZ;T%uNMw9uL7YS&h;te6CKThh+b}3?L1cVGlDGa|)57kN{DQs9 z3(;@pA#*G&y+TZbnsQ|xxy{f@I9`ozie889DlmYXL1l3FV)%_4OVL#P>mN6Sou75# zU+>+IS+>;-g8lm=)YIPU!4^dZzxxzDdn##tp%! z4}VjDe?Y6ka4k*orS)T_y>cj}**7S!mdEO%nmqg^S|5Td7f_z;TlHuie5LIl7J9L7 z@P#<>Vc}R!z=9n1b6T$40c!y=G@>C-QgB-R1o1GD@O@SX`LlY6u&Q}Cn_fRc=>vJ`_A!2M}54! z{QSJUeAJ)l2t1Jzznk5(agX9V+NekI4T^IK4t8-248RReK|xMV!NK$O$o9`_Cw>JlFRF{0W{J)s+wzl3x%UH+Kn17pqft(*4vM zkR1oOp!gG@@q;F%qAAeiELb=XSS+!%rG+^OjPbD41x@0SPF&wWHt+l9l;E>>?%)rK zs-oXQM(;kQW6@+3w0mn%PiZ^y6r-ID64x6#b{*shp3hiIpqu$Y9! z(Xe591P%0c`mHf~AW-jjpk=U$0LiW^4$+A(u=|UtRD63KeH1>6(UpFjK;;#)JE;TY zXT`rusN7;WH%Wv|sSm`*!0L4v_=;F~7&&lQZ9~~oC*g`Hk?`ms!SY9*1(XuwJbEX* z!S6;AsRw`bPDM7oOOcJ0J9Z$nV+Z`2Cb&X}K@bGl>CiI79zyt$uI|H}X0V*8(q_5X zH;F{bXV{9+m67-GtwmjX92enRsMbHjMXaNpv&m)JtjSZS8B8`eoxL?(Yof>{lip70 zaB(HS<6mjh2e$U{b0(SE8eebqjCR%4H#E|nnVq=s%5;0HDRbiOVF~Ji6>k;#k&r_~ zL#7^t!dS_K+@BPlfdwFoaQyknBVXcMawK^R--<%!rw@LEtfOfgJb1A4AijXszy~Vd z*L48Y&GG{sFhgI^>(Oz-VTGS2K+BN$pM#Dn&%qoBge2WVk7 zuE@xt3lh+&EL4_kPLDSuCdL5dTGk<>5hZIb#TSt)(Oa&K;EU%dvpu;x3vCK_=8mlX zd96+VFRRhJ;OYj-N`=wBLQ@rGC5L|z;;M@_$qyaHpZ4LaN1qR;!q7DIDl4Hr#lHA9 zyp%^J(mp~d|B@~&r4XF(LSY_tgNx?0M26@!dQBU$J$?vxBD>az=O2FwSy@56s=Jky zKJ9~VV=w9kYl+@K)95v!hmPZ`IO6E@5v@;vM-%@JxN~T#_YjbL32TOITf?6R_+CT$ z9DF}vA9!n6@H{98GkIhq^FnytFxr8?-h$Tj^?i-%FvtXu!6(FZy_}lm;ri;yJ$zikOO66m{eo}o@S92 zm_Xn;fT~7eU>g)`f*QcS1HBL6TzZ`zpMk#0dGkiuXAzs5_6psjG5~fj4Z}JCw2V1kmysFhN8`R7G78Bqs_YuOEVX zXdYS@99$O~S{o8l8|oSw>gpOQXYcry`1_Uk`j+_lm-yBNIXMOeIXaPq$Gd{NR1{?l zboxg@o`R^Smzg{CN^ll|m&J!n_}H`NpEhJa7UOU@yfKS}ya)}rh$4=V>q|I*I7&{v zu-to|uQbGcj;*c73a=tRyFf30iLKXe?Wk(s8CEm=XN#ih0^kdgN1~Z_bVH<~fFxK% z0*D#?+;gZOeprIr|Lp5~ycRK+TR|-pxCPfdRu5aC8%lzTfN-QW3w3MoLpN^W8yjDV zGzJ7i-Pl+)8Ys^k+ETfVYfB4;4#+~}XfybuHsK=OP1M0H_{ENn@94(*IVEZRYB=Fw3Y?5%HUlwN%M{UlgO2 zg+;iv5Op)li_xk=T#NcjajgI;|3vesa9T{nR9f;d=N$w=Yl#MfY9>x|_0%-iG&2q~ zk4Sf$N=deaCRBTBQrjnLhg1<%EQZKN7vUMsqboq13PnshkT=T32N5B28ju%vDb*-W zi_D6PD6|09k(0ZtxL{`<{{7S`X02arO?LMDO(Av21CUH1PQx5L z8YBmZ6Ij6^06s?xcP^TnGw+bxFR4CNe(BtX=W^Qo8b8M0j4b!%8glfbBLNoc839T3dqH<4jJEsE%p`CZFVwN^5(gzW$B2)3+v`)vhm%s0|FN zqpjG3PN%6p_5qc|9;D{2Yq-?2apec~r(a%Jke{^aUuM_Aqo6-IQ2!8U(C18NZA>!H+ZMx$`-$#DRnDZS5W1I#7F2 zUaMGhxmZi5M(Sf{KRa1xa%yC9QP%>YBbVd8vYe+yfZk*qSBJhg=zKJk(?*VY90XUO~QwOj; zg0E!WARwRpAfry!v8<|z>7=9{ww}huGCL;^QKzt*8ID;Lp;xoJDSnpE>?lV)r@0PJ zrqQ(mT5$ndKQDd@z|`dyhX5)jR$XoeAoGfX86YPMs+I`P>8{^j+}2WBSw2A|&9Rrt zCuzz(y#xG(M$}=&?a4z;+U?uAT0-M%y{mkDYPx`kcq;1S-*v?D{7JV^)em54yGgKKP zDo6n=dJlTP$YBw&bP#bAv};`h_K0=S`q!IY@2@atj=h6&2tMOskO~zTW{80o2;&B+ zkk+-3h(WPqcjn~nJaBk>QSr7z`$eA}IDmh}w+|fxK^_jIV*}XFaE-DXrj>w6#wb&d zk(*2QwWUSxPBMi-dZ4TVo#{#e(A5#DeS$b_i>ks zromvr8>V{XLi5?GI#iq&TqNs6?)DZ^G^Yo(7X;?JS0f*LCmUf^VS!7!4-gbL)fY!t zcO7O}nFxz82`G0MbnqDYk~}0_>J(i--~WNo6xlcs($-O!+tvb9&EC#FT~mKrQ3N4t zs4}sUP@&dFoPn%e{e3;d0-Zvqhs~%Eo-A;1QW3m(M~>D>Z6e$v2(811LYrg8D3mlL zJ5et>VV0%bCRo65!yvdshT$d%`acYWcSb#xB#n(BuazHzUAorT7%CbDf*WJT5v>0V zI}o$vMe--88CigRN&{{e7zvyKZi0xC$5!D#F%cFoTmN-)ih8>9S~av*@SMcnj&PFn zaE_BSqkQ&S-JfH#`}M3~vvKUiGFZh<4g+z0Rs1gTaB>H%aVjstI5j5~dvFAc=Vbt= zH5*``zO5jdvIiU1uVwjt0ndA5eB zEIrUCE;GfoY#<9z{9&Jjfs!=8|K6*WP-69;dq$r4xDAT^uK+lmlbwf+FiKy;=SAc= zKyz*#9=huxWtJp5NL1VvC?I5}JWjpYW8RBw1I!a`!sI$l(JiULmHsOWLv!54V%Hq| z;DAY5;r_uve)Kgb*A!cGPhYgRh5a_M*zr>2qFC*=?SU<+GvZrAD}y|1yaRx)$3TLX zVg#1rf>=Xf3X}4XZ>SCN{3#4FccP8}gF3x%>z4Y3t>RWq4?jN$JVde-H~)#Ph|*od z?CUA-s907JXK&>gX&WDutf!q&`TGoC81!2}1m6)~1S&dqu7{8oOJp)#$OL0!y3zGt zS5C>@WOgj>p7^5T8`}6!{6}9)?NMN+R0iHHKx`dzjJ?Y-A0SRsEZDlGO9_Jk(nmqt z>%<~x8^lMo+=tiU>qgx|)2b@;9msY`07D+o)d^5f1eviqP?M*`7Cd6Jn0P?*6V9?ZklCEb)~4}+triD7C0p!-pH>j=Cx1A;XLM3m=MX97P7 z9@P_xMUX(QLs`gp?<+wbSVQ7}Wj}j)hqbet`I2rlp|mk3OiY1MVuIMsH6q@OnIP`b z5jhE0&3CqvSYjh8DL2t8QIFDr1ECly(KgheJwojB0t#2|pE6yjwPcPis zS86HI$MIil{S4QTD$W2NZh)NuUQh%fzZ@+fVgQecM8f4FJ9)muF(RzvA&+Qn%bwTW zE|N-Z>^8;xzbr|fps zu@!2TIE8I5U)(W2EPb{}?3yMC44pDDA~4wB|LvA(lQkLbsk04z+?jnF8#I>ei)f9S zmC_PWnHoMLGOr{>BRUamTXnN$GMU=Kqy!=}DE866;D3pSA=JYOz#uct`5vstol{|< z%a@L!8+h^o;ebZ7i=*lnuV{2D4RsQwMg;{&xck``b}ySZ+uqYN$ki^cJElpLTAwz* z(JC>-*gD$ZJKP6tEK1AHk975z?ID}Xx>N5a7dr_w?i=7&&?zuJ6Vcrk93`QGL_xj) zT(d|e20{o5h^`Qc!U=^I9AD5?|HeJh>L*oAUVP;7X6!v~U1^o5$+xK%Jo%bzV;!oS zms?lu=q8&@CDzsE7c8hlvt@1$FD*duP*;Z>WUh{H^0lfQ-QAto^L3~;uV6tf{Nm=o zp0BSfD5$GJvpw7#j@O{Nym<@i9O3&&Equ>kP(y0jGk?aHmBrim_~F+Q%gPdMy?xlP z;>t;h4|^$oenq0h-ygq}SW%H^;}^i(jJO1PM#6e8P*OO*r^5?th>;fZtA!a`AL7vDL*+`GDSg^#0sZq2GR2jO;0 zN6WJL)|1^`;0tW!0--+66lH*&00!?a6&qlUk@H{(*MGZh%>wv+7=GWZ`W+aLM2a_$ zB=|5Fgg4&~S+E9Ra2*c=NTa-MkU#-wXF}n(hK@*MV+z6@Q+qaa#a1VT3%7Se*G{s@ z)&=#JP8J{>u7`mV-G*=?Ywq|dc%3`Ymfzz9f7RrSi-=KaeO3^F!=5;9*o}T^SR1r( zu19Hb6XRn$+iQk@u!l3eB*`RLaNdQPIj{t$9xMm(g z^01ln>SmqB_B;j)+}y*D=}mPrPeMbQc?6oxTsw~d!?h!Txsi+0VQ#<< z_9QsEJpPI~!pNDniFx7txW%n`#&tg9fkxXOy)6Xbf27|h;eUF+X!*EBp8Cv=ACB^g zj%lPPG-P5#e}h`gh4`tu=5W@G$13hSx12rBAtBE2N13a>;Gbblg@#-q`9bw}n!6cE z)iNal;SNZYppCgl>%h%Oq$k1|$F)Abr>9@TLJhje0Zv^&-J4g$h8sMP;1&$S=Q4=ygIGdtr++0hqitaCi$p)eIN;0# zp*}qV`3z(n&m0Bq{5ZTq{l~$Az273#M-AYB0(n(;+|Ya(JXVjmzrZl!jto?n{7h3P)j-90 z%mZpKG)RnsoKb>T3b};0W${a5mZpb>rZ1hlgn1CPI5s3TH6*r^Tqb76?4dkGTj90; z=6P}nZwd=*3XfO_ie-d*M1;FrL8OnDWy9u`+IJ--^r4c!melikFf(Nq&4p5iVWfQ{?9wov#U&ufmKs7xoa)}I4JGpa7o*wWa4*X$4o9Rjgb>JZ9(f$`=sHeVc~3{ z;gRBEX72d6mMqE1+|1EZ5CHY5xO%qnG+$`J7_aPr$IG^m3h zc=j;qI9>QXM=eC0^rLUhZ5o`O(%PEh;N|7OWT48_)cPD0;Ara)Q`^m~#XpL6z$@UY zCK32#_IUMYe83p~ad&WJbkOdR-Zi^$s-LHqpO25HpK!Hza*9{e=FONTabB=;fW$`Z zulN@wA2S5++@TMAWU#LzV=dBe6 z&-y{(hq|&d7B9-m=`1&Pw{Z3McXbQ+8Fgjmc6McFc9omBnYji9xw-`5-_l!J($kun z(v02ALuWgNg*m3Tw5O;FL6MWl8V9jDzji4+I_ATxA|Qab)3Lg zj>9Ae;Zo`iiHL_Huhu2K0 zoAi7~HU0yCg@5=vJG`8^Y3x@Sq;F!PKgZvys9n#*M9+&G%r+RzMNZFqEOeH7W4Ccd z|AFl61O4SQciZ-y{Jdk)=O=rZZ8Q8l%*{RgX82SkySgTW0fmiWKc*AMwmWC?1GWKM zTqvf#yg#40K8jlBVe?UBSB$^T&c?r?>0Y;PxdDa^356PiB3%I&tRsYzScNy>8cFUG z0>Y+};2pR|l3eifvCx*nsw)k7w+{&B6x_{skz8d6ck>PM_ZX5UMUhWw5}G1Sl1_1y zC{mfucS`P?B%PA`CP@wWq!kpiRq4Y8lJKGm1joTqM7%x*#B)4JXIH;^4Bs6H@Pfck zPp<$f+RxL+$Ir{tPb7d#9c}+{4Gea6^z$2V^@mWIfB*sg;5%sC92(bB$wA{928#e1 z-#B)Lc1Z+j06zTcKX5BzzB;y4sNozO>}($xC?c0fZg2|ncXbSaqt#gr_G6r>+)H+t zkhi+1&TZq*Z{y5qO=26SEo)C9WZU_MY}mK2GreFz?B^y^g+)@PByHonjd> zbG4C?x0R{+6zvHUG$$Gv8XIX%m>|{`#)w@UXBcW~!2c#1OfxXipFCmW1cOOHH9uxf zD(8u2@Qj)^5K1I6q4@S(+!*s>3__8j8B4xeGG)p7C4l2O%vI$?m=j_wprr=D&pki; zItZ;KcViB~hESY>3I(bP?qG3J|V~Ut|=a8Ebbhf)Ngq0V8{u_k6qu$y-fiHtLzZ z^b+B(M4;Cd>Wjf8;%?fUCI@k-$roQ%jRWDo7a z zX~Ph|ZdhLrh7HBXc)D^g1YD8;FR7SAN1k+n5s8;U#1eL1?>yV^>DGQe=CjA=^-Ex@;Ub%*zekqre2zp#^3y{e>cc7tzCcvO2- z`@X=+WuX$^#c3%a0STc2GoAH}riTg#oQe|W)(7tDt=Ul#IJH7AWL0Z^QH+0hu!d&r zR6W=>@6ngI;Dkw>UqLG7H37*qV$7Gwh`Oa38gC@!6X{6bBwS&0vS^kxEk$H*Wn~fZ z%+Q6yGel-iECWf|kf-|&fh=g{;W&8To$bf8dF%yTu zQfJ0Z@zW3Z?zdHk?)_Rb2_$S;-tM04jAD;g80TIx>FGMs4YJkC4h;5eHd zDx4=`N|4arMKxnPIZu%WC(Xl*sX~dk;f#e6d2n-u{Ci3!Iu+KY6?fZuxXW}Iey))J zV{t`Z!7dRX<$8ik(-j(5ANEU}nvWgw3&;IR*eNCDLB(OR`02@;D4Xc+O{sD7W(Ajo z3)@i6H~3{P@Pzy`7KMUBVETcf5*PhmkQ@xaQ56i!Jx+Fzjr&fH%;6A{7o`bB1Y+^@ zSTt5-4~s}*xc;<(e{8I*ZCoBkr-?MXNlby8x1GI1iaI(?oU4lS0!nIUVk(ia-;N7c zn;BwmB>O$d#bkWA8Uw!mVf>N`cq=XmeH_5!fv5iy{@$^%f_r?dtZaU>v$C+Wv#_#r zcr1MEN#N#v@bzj3sMZyH$jNA!DZ9|<>se~)ms#&{jSG;(mz3s-y6}W7sue+(Ko3Dv)RkDjSh~<__ z@%}QeLQdtE%vs@EJEad0 z5fWM^BaWqUda422tnqQ;_3Me3MY~0p6F>bFzA)e2?d)Ax-`DleKTG=S7xZ`D1r_0A zVJL-#AA#leC(e@lGINKT0v1M*=m$L#$$l4}qqyfE^8}!sB*8x$>0VycB`1aJf>V~- zh-Jwa;ZShuJ4HSG2+{petwmi;9T`Ej;M&wn)Xb3)RL#``(>fTuM?l&6FQ#=!D4?G- z7bk*3#5`%qPEqMcDsvyK1aI!6lwUCSC{58{oI>)5F_~zlYJ>7E_7l=5Z&OB!&Q355 za$s^V{znf08R(N>qGNoEV3{9pa39z5|1bd%xc6ksF>saqvnBvmXgA#T?jg2^Z@h8` z&{>VMppW+`5d9MU{DeIow9VyDnB*zJS8yx54FIj049?*An7@a_xKPi!4<3I`RN2Sx zgO4EVrk-rol$Zi;KLcd$yh&oXe{x=Qc}xD2Q$v-jZTpaZrYD+Dk7!&40|cNxIVn_#TTie&~dz-Jud2;r_cb4DpWH>AZM<=I(9Z%$wES8@=9O7%21Ec5LsxYpC9<(1q;Ayb4BC?J@9ACJTyCcpxtRJ;+B?% zhL$dkTPbpi>q(Bx&W=p(i38HK0P9m%ltHjI|9}AruFq5S;t+gEnF~9TK+wriaw9Wfo$Os^wB2;R)K!%d12kGCCk!^i!+xMK2w@*%djP-B+0*@ zSLPUer*lAM_E%|odva#HF%1Qq=*n}Q#*QFW_gqV-?{XmdQ0bt{p@iW1zrLlOJq=Ajajdmr7=)d0sCK2-o$Z;*{=P z%WiWP;`P#n6|Q1;3i%b@&5&?>%)>KK=1P%Zb4~o?^UsIm{G{t`fyR_xzxwvqYgBa! zp4|0MCW32+tC-y9NyrC6>`Neg&WyEw!OuUx{gz}?VBesgS+3$YSA6@eqkptOm#8Xo z9DU)G>t6JyP#5a`7$y5UBh=A$Vf4I^p9J(&wUM_L49{_$36B;>%bForo^WB2{J1L# zf3v_|uh{whhl27ZDD1^cBLvPu8hFSeypl=6D;Xa5yL#45x}FtXZlq@2B$MX(gNoi= ztu%u|ft!hV{s`Uaeo}qJV{`mZcF^EH*77sym6o3=4`WxV_^V%`Ad_4_b!E9%gSq8G z#CPof$Dqo^A8M$xw* zi4T5#Yt}tx)D5AJOztOOL?gJsc|W3 zsmP~B_=9Vg_%d$d`1utr%NWJ=08A*!L-E|GYn!{;J>GMpIE&{Gy0q>m-K#t|ihnib z+gu*=Y`-0}WQog-k3HkTzx)?$@xAoH|Kp>Cr$&r$RX!k8$)AoAWKa)z{slGDa%H#Uq+Uq8h;7fJ{PZ_{&_C4SCpxdU7*1 z4MSnC*ouEOXWfQ^cHZhLd)KbL@Td0Q>>?A^NoYRnXeLLHNk%)pg(e!A>~FuvkjMt~ z+a(wf^uXpJHluHMrH21Y_KSuHSL)4`%Z3e?_U!gc#}JUSw=7r=ibemZKt86x5bG*q z3cl6uN~OrUu;;Q7#a9@-L9dh&aiEH}->X-W|77yZ=k|W+dNn062|q`YQX;6X?*wCf zdZs=;1Fx^J0N}6UYZ#31S9@ssFKYYo7dDdMzYO3NHX{9N+fo2w0w2?eWa$SXB2x^6U@fzlp2VRFox>Ro=yAxx(_7ssT?FRSn6` z9#U2GMDmqZgZ!=V@sjMj?#eECyij=Nl64>=87&`?oZUWY`GLa$`&Ke7S}Dt#(j-=>?s)SGr?}CZdfI z>ztgWb_k3W;i#GhUZH-x65CaKAJZqUBqU!!_go?%fozo*3Ki(N*+2|`sIqmfl@VOk zOv_yjVi)%wG@eIEx9>KgkFA!53a{eN+F0b<)V|fiPiF+n&VwjzHOlA-!I+JOVd-QK z9{$u*!7~u2W%;2);4o76+F<{_` zcV0Fl!8GYp;n$vV1pn_p&p2Y5exQAFybZ&M%;%pn6=&DqvmS+*gzcPt@JAJFt0TGD z^B|i)|KiK_eBabku{yZ#B)aPP$|bk2YwUa59mTy%XFEykIm~*eEV%-Z#CY& zq+-r*T>aOtr(fEOXN?|}`Pjhvf4(>Bp>^Y5yRg_b19CnCITvy{V@o!d^B5TmFVMR~ z&TLbXAk9~P;u0RNQiZH(@HGKXl;0M2<-qCjL+S3J2#XSzi>oC)S9b7=6061K)zt{7 z6`z;7N;mrgY7y}Kj2Q^15|#^nWKRl+qiPAZidA2zK*(Zj$^8de&fc8i8y@)1(Bk_t zgoqbFO<(Tni3nyuvo`|U!Y(~lC}5n~8eRoxBk4R(m}%8pEUd~^#gh&a_LF5?3V24C z=$X{mpz|2fF8ygzkHDU`pQPj$pI_`6q^kJ8^P;=PEIacWp2CnuVTG)U>v(I0w@lv0 z;@)NR5vV#rDM@;4VAdn0(e}GyZ+BMSmG#KIF{WX{ij;zh27@I%aqxo%LlQU*bblmgsqy?0Oib6I7fqJ& zrDhTD(=ocd-yP!~i=;zEGd`+;-5*{<=)Rub(ehUbn-EBdkT2X#U+4XzMe-lJ+ZHzp z<14zGx(3{$Ge^?bTi&0!R8bZPG^Gf-j}Lq)W8`u8JX>Ful`dZnb-xaN>F(F0^HMz) zKHx8V**n43^&?y^?2SfWq3WUeZ#T|cuXn&pI&0&;HUT3 z44k=i$;^R;xs#u*ot*2xYB2ACJBK_n|I?RWJ@MKvZqLj$w6Z1XU~3W1E97c8*NlIX6A6i;mx)r$EAz>nqTZeknP_p17xS@u;7f=WgA7Nr7B44MkT z`wZwpPcgE`JlZmC+Bu=zg^%u?&k_De!o`PK`^hFC;pcA?AI1ci^B*u$cAEQqAwKn3GW+b9iH)%@X(+B zTz;S;Ecll6{K)w9J{3KB^vo=b&S5(;%;Ey+J_*YeAr)k8mg2+(^X7R268!_HH-LyQ znrgmiLL#CjS0SYFJ>LLkHvaWD!6PYNC?=0%K` zhwcqUuOcv)SBf}6kb#gJGzcTM+^FGOiIB6qcqjaQ{jqH+=Sa?O_$P8uj+~vhABDer zDAwWR4!^4${;Y@2@;-!>pYg^Syghfru7q5B4vRfHSBLzt2{Q`Za3;H!d9LqoM5xG9 zs5#619TD<#NEY};=B~(d<~HV?$=jY=l{*utciwE~a%bn2<=5j{pIe_-lDjBxW!?zp za#wKInA_?Qx51kR`4#DTe)knj%x^9@k-s8;FWl<4Q{l?|P5GM&0t*5`IgGpy z^Q%4u#{8{?-C9#L=QD@0fiKI6=f-luHOfK6$_6H%$DCEm@bWwplIrjek0D!s?y8ER ztru4XQ}$jd*nMEifh&di=HteHa>Fj=R%3*i&6!wXGxO->CjP~NXb1DPdqqj$MD^t$2M{lD| zY4Kq(AarU>18^P~ddAQ%`c#8i_Su3kLUcwt3^WJg|>0RDoow~uf zlN)J&9W9%?s-rQ{@A|dr9Z&UR#Vp4Qo9q3R<9+N8`IU!WZ)*4Q%Tu~*E0?*4lt)}1 z;-R>U%JuFYQ;XWdEB$(zHY!8z`f{_of4Ti)>cv;?56V+7rg*O0&E;%#bj2Uj2lwC0 z3oegPvHD_K`2gQFPvciwpH8`($~XJs@N@6-!EU@;a+ea~`i4=;hq+6qT77Zc4V6qg{IDd3kl#y`X%AdwuyB|2XbP+OFJT<;UBuzz~h0ex@zB=?!} z3LlM~-2=<3{IA=^QRUU`(*Ng)d=)cvJDUwJx8zcilzwY#c( z&ZXl+cFhl4d8@^^j+QUHIGz7h8}{!@E-k0qYccHp-JiFm=_qplxqKOmd(l|p*7B9U z^!{{@jru|^KfAtnRNs~cZ6elHcz@c+3FmE*zn)qmetmaC`!&xKw(miF}-OwRu8 zrmrpSXi44n@@*Z7>*nom`!Ug`?rcBga;p2i^4*tHFQ)kG&cz{@U!mRI$EdKKgO}-e zL-~=*Q(R2Bl0ICCesSu5@71OE)6v{6>&KePPjxiywZwDpoO*&C9Zyq!w&QVscueS} z3+0nBf7AKbr~C7%L5%yg+DdzAz$@5SQumgrc*@=ObJtJp({^=rgq8%9*Q!> zyZqX8|1ncXx%l@T9ZmC}k9TeJM*n9;KVI3dQbuRnIP=FU&-K{Ak5#t+rhMFoD(AG7 z=gM;}7RTK*)!7#JMx)A$#(p=N`;F%J@0G=kEiIc*`v3lavJH3h)O2?5{?Do}lEtB^ z?aY4sV_b~?FUZb+%y9AD{olXxJzMhVL)-I}|HQb!eSGSWb|v0>>LoKkPb~M|c4Plr zi5K?||JJOoiMjiG-)MD&-PDm+i-qy^*sF!#=rH$zsS`TRb81<~<6eJEch%HKuRry_ zKc%~N>hynCF7CZkXa2i*|F`kI(eDEP+wSjr%X4vmKXulne4rz9FQs+#+TAd99?~!J zkAJo2QU38R##5JHeC@lMy2^+C7tYsr?;X9fscSkK^S>pY`(Ks+w)YP=(%vt&k^dIy zwO@`a%>&wxeQi{KIr!c~TpNeWV}IW>WB+lh|G3`2ESGcjym#z~EIJaWJ^sG@{max| zynhD`&m1>)l<&2?C+jHB8y9ctmK(?G+Bl(o*nXuHm{ae%QrQ0--8^;Ae{Y$m?*H#C zTgOXwX}|8jHi2<*Z~cB=?)l#z=f{lUKJdeHbG1x8l=;Rd#8o*=jj`r z&-L&_InEI*4fpTQu7{5=&87XO`zv1v^zC?s(sp&_XM_H6eAqpyqLU9_-?^gemD64S zdlHnZ_m%nH_z?G*iZIky%=K}usJk?syRV1B)=aRqNZ0ziY<APT^rMiGe_ls3a=%wm($ToSckbj#;YwI>6hB#cO3c8FD^}WW!tP2FN=H0T8&G?J9_Ptr*Azuw+rtW z+sDHri%N~}+IMyz^dacXGal^Qk9Muk^Ddny`{eKbds`k?L{XQAxTjTiyPSICDDH;J zmHyn^eyc#;zw-~dcI^JL{WR`YD`-EQQ-j;W+O6%TIX}{il z(59qbn)cFm(LWrvmF0hZHSBJxECBxjSBvxiTbO%i<(M0B&+apogKq>MLMn&d7(U;} zrEVNn)ZdlRXUv#jq zzhOCE10x}geMd*oy#$|lF#D(`D0Q5ofiLRr(A~+tOOqz!2m}RxXRUmxoKU~j-lUta z!#4nx&#`j{-;hehY%C4gr|EEDB%jtB2gdnn_<%E!m80y>aWw}ueDNRi4lop&8nii90O-oJ7OBwTh;8m-8PB%;Uyl%Gc1>J9SYjlme1G-b-K8Ssl&|3nMhG*Alc>9os{xoeQ zj-lQ&+P$=L?=cjveVs^HiHQ2o$k`H z_|q!o1=5X`T6voE22TMW@FTWfIInmAIc+@X_;{oBD5QaZP2*oB3Ltg(3ejTn2YgfM zeeyYZN6-n+ko`ifuv8c(yega!CJ85nbHYaHTj@LD16h>^3Oi^f%@jj4F0Cy7L>sKN zh@sj_?NTvSyG^@8d;tG5*5F={rm(!N_|B4&>t0~3P>S9EvQ`m^CZV2A(tg~}Wj>LV zb?!sfk;>cLZ)84bi*X*HtN@R}%Ioll5n0yaT%uaIpU8bD^NCC|+<#A9M04DyQkT;g zz;%^^cXFv~2vweTr=%`aR=68emm!}9Ybf$zek1c)?Yh^e*3kXj|BU(SS>|j0$o(DM z|B(6QCW;zN9ZB=JKO6p7r2Zha45^v_8S@nl=o1u_6h3M{bprT-4t~tv!Tk@J4^4`c zwX_dSrWv#!&7y^LHEr-{8~r=|LZM1erBHcVS**OOysoTM-c}k_{12w;R8!PcHBC)d zGt^<~S=Hsy%z!-7MW`X!DF=mTnNP#HpTKvCOmx*P}}%RXJ}wO@be5q=6?qcV;DZ@h{^EtnBg-Y|8xvG zK6k4GxW7>X@eW_4yIh~sk6ifg5*7k(3?U{`PHqkT9Qp$qt1U=730gAS-K4w#9(CZM zpd{<*HXNumaG0!~$N2*CQKXmfN6F8-C)3}!3Cf;C&$yTJGO37PkNEYVzM;rGzx7C0 zk97EUEWRkelzzzLF4YL`r5e$_R2zt+Gwxi4)*Xk=9j68EDsW#4?zP~Pqfpm+qQQMU zNV9M>;Mj&dces<7oq~H3xF-=Ueck;AG-xvJXEN?*GD=hrZJ7*RnG8*ttoac4u0`4^ z`T=6>!1+%&@8)+_rMBRF!Cj@g5G#Pm88Ls2n6-%cYs9QY%v$9526Fs0V%H+}uMxYJ z1ffh-h@C^XLt4;uq{yL%arX+OpyCK{H=|yfmCnGc-Ah@0fJ+WIa^3K4yAC7DsIXH50-8r-EQ$r~ua8;S-p(BkL> z+#Tnhko;}r4{8$bAsL6K6mOsu&?a)9^b+jnHX2UjsevYWttf})!jh`!6LdO#l2+51 z^cngreV)EZm%_T%(YI+MeTS~6zo(n&Hu?eGPX9=E&_B`Lt(JF~enF4ZFX;*T6+J~y z({Jc^^c+1;Tc{f!lf^gmV43(oXGNoE6`c~GbW#G9&PsRX7Gh=pfjQcE+F~j4s=-ek(u~j>0t`Hl@Vku9`BP*o`q{-wr zQn@shG)a$1v&jb8C>zNxd7wOyd?bG%e?gjQ2n``0(_3gSd{Zs5&RFs7wXT|pOfS2By|$`Qhi*lA}7>U>MC+lU9GMrU#n}>HRP1~uDX%@ zOFf~UAm6B`)idObCO{KFzS9J1x{!04Zkk}?(!^=Zf}k0s86>EhyEH=ujpkm>eL^Qq zh2{yNvu3eosnA37y5@DEr{=es-wL;A-qE}x^wO-?tQT(8%34|It<`Be2|v|FY9oa{ z+Ei_-&{sQM`=k)2eL?%85TSiV`-%{)eN&77jny`48wI^~y>^2Tr`@RCC>XTcwL1i( zc9*tUuxL+fPYVe;StkpLf;}ntQfDtu3U;oz9%rBKTw!mv*V^msi|i}yYwa8D+w9G) zoNAmGIs5*$Yx_z2Ifv{Bbc8tiIP{JLN4oup{iL(zQmV7YQQ%zDzHcDNiar^eaU*~=N>G~z#Yvz#T)Af|isC8E7|N6Q&8% z$Y;U|;eB#I*emQME@7YW6>$sa#oqYvXJ0WxaEL?1A;J*Wk_dN;zZKUAQ^mF7R-sb- zqqtL;CGHYG66Q$3Qm!ypxZFf^KT3zCGomV;lg^1fVG26&7TG4- z#9negxu1Be{Fb~`>`en`fM}#$Xcy7MS{u=<1S>s6ixRGci#FD(hzUxZ5+^1q7R4gk z6^G&w9ZHIlB0819%3v{xwJ>6`QmT}SDauG?q?oFVRmO^G%6MhGn9f=oF+-J9SQm}dVt;j|x>6j#S|Rav z^-c9n{5!=v>O0~c>Uwp(I7t1S`aAJX^*!}Haj-_xXvDi%TO{7A>7nT<-mmGSi4sR? zESe;7oF-F~B~I4lX>J$GS-T`o*9_N;6st9lXeNu#YNl&y#5tN5G%t$tHFcT=;zG?b z&1&&g&6}Dx#RkpWns>zCYTnhnE3VaS(rgmn(!8hHBCgZCulYc1)O@b_TwJg1qwOPZ z(Au;K;=8QX5`U+CSo^TJNjq2jlK6Y=FSWlE-_tJAE)ut}R!jT?Yqi9!+TUw8i`%q+ z(EdT(u05zdB>qwRPwhX&o!VpCW8#O}o3-C+zZE~$p3|Na zcWYa;E#jZGZmnC~qr+4{{3M`XKtFLWwC*V!82^$Ck_QbOM26wZmiM8@DngI-2Qr_$ z5B*ysbjBZLd?SQF3(qqx6ov{_LXB__<`XXqqghXe`NXTx$w#4)-w6#|BX>a~^Muo^ zZxeemy%fX6`{5$QpTR{k9TlU*r^RQ*Jn=bkwpc9A5toX$vz`y(E5tS8Na*hS;v-CV z#V48WBK98fu=tF4L~If3MVFK-z9AJ#L&Wc-d!&()%KAvD3+p4LQ1p>=r9RN?Lz0te zw)8a9Y-ygcE%Hw({z+YqX4WFd z&_Eg}$1V9$EI-U_L;g8zW4t_3ouE#ZC#h4^O1T17Q7u0XOZbiajQYCzru?k>mimr7 z2R5)p{-t_UJtHrM_Uq&ZO(#v1+^C7s43hs0EgwmV<^jzcG*GifvzFe)^q3B1dQ9(S zdQ3~19@G1n9@G1w$H(YM%oID((b^Dg2%W6$sqIP2pxNPc3hR$)CDU*EDAR8`O`EID zrH^ax*4|C4pzDv&C$vv$pQJOiPidc`PibG&zC^2;-qUBaFKb_>HQM>w`Se-sLhV9Y z3rko+pVO|@uBEe?J$RU~KcNe>pK3p)uV_EhenuC< zPX11R#q5MG)qbJryGOM_UydeaWm9T_qLRW4BQ@IVyfDP;w=5X!b1?|rhPD1a8i>cxW zxI3ZAPl8$LKGo%)#_cCRAFDuad0%$Yz9z(7E(0LuxcsWr1xja#hk|)WNva%)+^ zt>xD=2-dO^e>iZnyoz?G-R0L|Ilbi7+*;m*wHV~J+*%r8ElKh_G=-+g?_!RSD{rFt zG++KbEvCitX4;<)kl&+s(Yxe7(7Wl~@>XtF@58RrMv+^FjS@}TufOc2@ zr2L5nE1xQ#(jFKu%QS>pDZLd|`ZVpWKBLx9hx&s00!>n1R9~dYu+l%!6!m>|8@*fo zQ2mhJqkg1*M29geh1E&0$#JmB59m(Ke$9Txq&ccNrkI)4DK=(xN*1i{7A0HTTN|q6 zYQwZ)N&&MyrI6X4Qp9XeDbc2DGnD??e%gM@?aU69fy@q-J79+sltFmH^SClZ`-Ju> zZ%9rv0@tNxMe-mQn%R+p0`sqh{qfW^c-DW^c+|*xTQg zmza$y3z>~6zhX9~EP{=Ft1M>Lr7UOGrMw2~Qj`_Cv%0g&DrQ~E>jC)z`O4~mI|BwQ zZ-{@f%=@9vaM*CdaMmaqI~jwGp~e^=Kf^u`YA1Y;VEl!aKwS&n_bGq#u%Q;?~r zDa;gavYRql3R973Fi&~Va>JZyfoZsDtZ9;InrWtK4&!TD(8k@g9C+Ceai#`qulAkk zxV4w5$+X3^)3lexFdbqP%jzHIOlQ35@kaqa$TeCT&BOx#dYsj1U3hKIth1c9oVJ!Q zXYS@XU%!7f4v@-bk2Cl7p=&!^hggT0qnLkv&a#6!v)OZQ-!~_N4sEnXbSRH3bB@($ z9$+479%&wLE;mm%*P83hi_9y{Yt0+Y+sw`8{pKU)ljd{gb7(IPT4YP0CB)LlqPHYi z(k%s+L6%a>7|TS;5;nDS0`}WsnuO|8F1+Q&iEw@t# zTu%O4jI@`@+_DEP3gl!t2CXZxX0^qDK8>@utQyd}@^X!XG>y1o@mS0f9+$OgBXE>i ztH8eo$6V_|+K^cBL!Ciq=w|3`h%%TB$%Y)m03SaC@t}6YXC!iG7-|^Fkh70ryt7Z6 zvmdg@Uyn1C+v5$}4ATv@hC0I{!%D+i!$!k4AE(4w%o&>PW7~0)?3cO3`iq@mf7`X; zq~RPM-vt^&jD3uHV}dc=aKvyjvHntOV!h)WbH)PCxqaWh95Gho7-TFpjxkO&K5DEs z&Nj~DDVMPj$2oh^_d8>wakFuUagXt!@tENn-YJJGadG%caR_A z61OvFI^j9T^lHyJi0Q29Y~t?3-DZ*b*W(;@i3jY%&7JJSnKK95hbJCzbi3S{L(QR% zBOYgOus4`vd{=E}=a?(G#G{Ev{lm;w&y_jVKG;6kImo`i-jYHxobA93|k25bZ zFL4li6LaQO=2aeaiND2uMQ2`T-)Y}z-o*S%++O=-F0sYq%-enF+Rm(Q&AZ!b_*#)& zF(0ria6$GU^HJveoMWZO*?Tf)KIJ*L@0(jd54+4|TE^mmsbcAA3A4mo?3PSRk!7%D zxFzT^%2>xxd!}WQquepxGR-p6F%m7)w(D@sj%#yXd%3FAPCBm1S>`b3NQN`Fx|NPa zmIapOj#~Si=+hRAFf2{>v5p+e7S_{Sc3SpY4q1*zyDVp{#HzD)vktKKwnkaa)?{lA z@KEbW>v(Iqb-K0IT4!BkU1?ow-DurrZMN=@(O8dIPg>96p>~2T&=z9rW77kr+X`%h zY^Am_wu!b!ZPm8fwt2Q?w$-*q`?SOgd!}u(ZHH}-?V#gn^mJsj)JF7La1eG%({JUtKVQBXeb*x{nS1TA_@ zuhv$YwXt9wnagwi9iw18)N^w`q^X*@Kvkd2I^z``Eo{rnkKzhWX1CMTw z-i|0}Zn7f>63gK-JLeb*n?x?4Opod1s6{-M8_RE{V{J@V^nP6(8)31{pfQ=91Ya34 z55Wjl?@Vx}I}4nHVtS#?AEW#>=a6BAp_d`TV9eQ)bBH;^+?*!(PL8t-C59nh`Ut}~ zjd7gjdG=T%7FNEI&?w5um49`#aQo@Ws_FEA}PH5ATd&hw-I=e_JXLGIAp zp{6rtV%FsjHFv|&n^8Q@Y&IvGb1s?-Mw!isa|tzfXvti2tz``SIvn1)BIbhWxbn^z zOXk9zEt$(^hivxP-nwq$U{*GQpU0&gD4bKW##&aghPmAF%*QOP2IpODmR7RM=L#2C zS6J7;-(cNp-Bq~2=WL>NpY^Quu=RxXtW9Lp+(oucw$ZtZa2{rxkXvgDwRHjx#t~|Z zu~~UYD)ZZ%EzdTPU)9eZKr@*au*dYPf&0KCG<=PONcLAp1ToO_JqvBhJ>Po!SILU7+cuD96KjWDrzoj zPMC&cCPO&SNm$@Pxkqx3B%Db&p0GTjA(13BC2V07&~|#xKzkDo`LH+ac*2?7BZ&lu zu4G|iw~~c| zXp0h8Caz7~n79q0`xB28?axb3JehdTF53h1(p%kZdr00Gd!M{9d9&?$dxAYZZ=$^b z#~}9qdG=EKGWZCCKM}{H_G-`n>Un(oZ2LT*G4^Hl&GyyyM*HTXDEp3rQ2U;OP@seM zWA@VpF?Lr$j6+itRTSmu>gWYOf@33)(~-rn$2m&!mNDlT;uyis{Y8&+)HudD${baW z8g`wxnYp4U&)G58b8hvIIXgKPIz`8dyd91;&S1xeykm~7j$Mv@dB=Rt8O)I5u;YZ| zET`?rJLVLfot(kNjm}U!dWgZ1ha(mKz@nkdIfpq%J15{=;hf=|<*aws=lAls!luF| z=aT$h&Q;EJ&P_OP_t2Ay`o}nTCuPDvz;IHL^QiNbvn5GM3Sw7DJ(I$c;u*qW$8{!- z!AZlD#=@G*Hz{xmrk_RLY zEhz&U$^15#JU+RcT_N`LuhJ!hcCl+8Zu4cn2j2Xv$>sY>mdax8s$%IOpr zuqJ&$NmZLm?Zt3vR~$g8MjR384W4jzRZ^9|Ce@jmmA@u`S87S>kkk>W<5J5~tMYf{ z?<(n)T9Z1rq*sBMdIHD7)D@{~QV*wY0Nz@FmwBnXaO?wxg`VYWi;sdH%$#qPF~dc5 zxr@@4V7+l_+j`@)8ELc9>it(BdnrrOR;8^=+f-DSwmofk+U}xlX$Oi<<}T`QO*@L? zRM9!+(pu7$^dPvN>0#;dMRn=+^vsNr=|x4&B^~;u4^AKM7jZ}lT>9AbvFz;UMwDEe z8&?9CK8g7kyYy-4Gd+~_IUdI(e{oHD>IBr~zWOPtxi(ikh`IEp^rqtK^ex4u={w=} zrXOOiXgXZE&lS%u=~cX%x%A_3XEI2JE~8uVpp4$d^NNx)q6!-_%sGcLk_)G?u}x8P zMh+Y4WDLz1kTIZW8_pv$re}=LD2HF0QJ1kOV`awLjEx!FGMY2?XB^2mnQ<;t&J2VL z$?TJ<$DbjmXBK1*$}G(slQ}W-(ah@1*_rb)m%**hY|Px8xg&E==E2NknWr;d{WSf$ z_UqLz0?yda*)OYKNxvceM)VukudH8HznXq?`z`FZqTiZ+8{oF~+tqJhzr+1b^gEj+ zW_8L6&I--S%Nm#!lV#0H#hKxOS;MkMXHCeeD4dgXD0flTjI3E%^;t`@R%NZr+LX0D zdvMn7tOHp`vrc8TWGmT0**&wvvg5Pu*_qiz*@F#1*~4*+#W5*+TK3HBIoS)cmuEL* zH)U_h-kH5O`%uoL?Bm&Ia!8IYr&~_%ocJ7jPG(L~&fuKkIb(As8G>`B;h2eIPR;`S zuWSQGX*1c#1mlV2g=iU>Lp#S>lf8u)d15@f6C=>QIF9F>$tAhElC8Pj3P$Jl#(2=2 zo1B}IJ0N#x!Gw~zxg$&FGETYUbIVJ1Q2g+--$8axn(T z-G6bfo_nNZVeZM?b9r)JU|vXGpFBPK;{^1=1qHh?@*0F=Oy0!2N70+I9(Er3ztwq- z=+$=Q?a4ccUg~t7D_@h}6@7~_KcasOe52={3i9%u{e$zf3Y+pv3Kg`%Lp<$m{)qf> zy!}MmnP0_@{F?l^g?sZCUfjCjYEAxz{HKlqU2ve_Xu+w1 zmO`a4sIX^YSYdpjy)d(|h+8OcI|~OF4lf*AIO*#3Rd^ikOc5#46?H4>jnP$9k-50B zD7h%7XaM@>kwxQSZ_|rs7u6Nj;;1WHRJ4*=Q_)(Ck~S7?;~3+klDS1kicS`tE0&7` zi$jY06zhu&u77Z-4gE-fBYJhAxE;_Cil@$BMx#miu~jm4XbcNFg_K3IH=^^V1^ z5=}|hl3pbdu-AwZV~MlBSmG?nLK`;(Z5V5f%D7Z&FfLz+QQ8_dL%|q$U&-N;6D4Oc zhUtV}IkbOFe{28L{&`q)EL35E2ML9*A)Po5thEiapf$e_qX1D}xF;Hx~oLMX6?U_mZ?hE?tdkwjpDmn}%F0}c`n@;Dy;zu__2 zU0{hX{G0ea29hY?Ad!`yl5X&IB!N?AGoNEf1z1bCltj-xcIPo=_<B+Fb>OGLl zCrFIonALt~UP5RArxd~uAToUHOM)LHKF9qR;OqFk;|z>{4u_AwOXvPi;LE&J0_QJs zi3N#apa*a)@_G*v_Hqi#s}sLR!plZTG$^f@e+NB=xIHD zg~ZWc(O(e@T}+n|D_u^P6FXf&R}cq%ovtQM>{9qGNybiv^(2+POW!3~Y_9^z#%_fV zNiOy&>?ZxOSK%``~}Me$MtLkaFx! z=uRrIpXe4+siY|-vDo>yil&ydAf#Z*gPQ=V65lNHK~%3QKisZ)MQ zRw=J2zap!ZrOGn$TV=Jfn!Jg@xca_EjaL{!&m+ z>8PhP(u1a>wtkA*$|8w0n`VQ8-6DvYhdo^uwi*nS0$PALg|rZLP=tCl@p{$rde!oJ z)uKk9#15uvS`971{u1D4QO6Rm;}FzwJqe>P)0asIok!;Z&!_WASGs^MfR?-h4d}`> z;AUvRV&Elo3Fz1jgB@nepc7iI6I!kl*ds(=Cu!Ijwi@@+KpU{HXdQhAc{R}{5`%p# z>w%emNYIaMBoI4VKEQ4u>}r8_?w~tB`4jyUC_AA&I<7qdTzgcmJrvq=h){YMI@Ad| zbcFPw|D^vUed$qp6!>#!RbOb;SGdPh^b~Tzei-2IXbXHdbz={fporMvg&i>@nC*!n zU9c=t2YP@KK%%fa20JXFh3F@+OQtj8FkS2dUF?aawYNYct(NGS%T1bS@ddK?Zt#x60XOerJ1l_^R&@Kj|g zaD`F&bZ0w$umkQkSc1SULBlOU150ocjfx#W>$ zmZ<7?>N!HOGw3`C#O|OL5`Y~-7l2)`nE-AxI@rv1(uwUEBC6(3nw^AdK77i||3t?jDq3VgHnX5enHw9eepD7Q3;TN?TiQqL_-5=V)n zNM~+uR&H+r+}`52y>;gHCVAUGwl7HHHimtiu(9sk#zOsUEQ;G$2=+7fLui|&#j_Qc zpfF1d<(3x3EiJ^)(tgU@OMzRLNN3Qepkd6$D1Dkf4a_V}q&2h#nAsbpv*~QuCu>=| zajWait*#rly58L0x^PSD!fmVzWj3bzS(L;rDwh5hEv^8&Y64GYQF`9)3f!)!pIvq0 zc6F1VU3K!at4`dmG_b2f*o*jg%Jx7qySjRGZPPj0q#m+O%k^avG?N zhrS9lk%xu=jpvB*8_Po%0F7n{2eFOd=vyTiZ#El|??Rv*w$(%uhXW0>Ed%Xy`mt?@ zZ8>O^er-zzZ%MqDHroauXNjBzN=MG1DI!8=fcC8-*?QWlkoGljiMJJbQJ5_m=qb=b zZ37sh=WM~)p^g-Cscooj0cZ<>2HWPa63C_2eYTmPeGaq-cZ?DsG}ShVN|JsD6pwEYNK#jz80!Hq zim>iRS(=blx&_X73grP>0C2SeBrT?1NI9(prSAVcIUgz79KFyiC^7-EL$x*Kzm!7Zhh3U4Y|d;Pg%BDb^&z)+GN=Zw7~t1WwSL0DX>4?veB9h zgnP5#PI2eIAp%xt?(@<%G8+4CfhUyCM2dPV#!%?@h-EHMu>>h*TUK~!wLHa4gyKM1 zx+8Rkr3UhY)GXDEM!p4_&XD*x5N|ESIY1Sh76HUt3*il*iJS{70oNh%mq7D)JmhW} z&XDvV&@fIL3N(Zv;X5D?=j|wSfd!T)VL!hm#{!GTxMHJaoF&gAE6V^&7DJF^j>U=4 zIHb+v{6+y~F+^EO(_2w;E3z}BtV5`kqn$uTjv!Bqo}X5Lx|jX-A^P22%=nxox7 zCmE8#)qI>Il+ApUBjjm@h9VRtG9P3}dIV^HE85GDh&nQZ6A z#*oqlXbVTMY4awIAOrITj?fC2QAU(`0?^u4)WDGRSI|~5B-aA1U`Sad9yKrHXf4np zhSZZl3mC$V$fM?ZPCE@Wm!lS-*$gQU0f7@z%mBZc4AFGqym^MD7I{EU<|-bq2lc%|k48kPCFrJc!2|DMnf5nbD`n z{XqkFhJ+>NJPRyYgf5%27)|(xTxw1?SW(i!VieYek4J9N;>%{cWf9OU*>1L)nRfnO znr@zEWWCTga(A=dY)0Bp(Bh5gg`^(xTjm(E5l8_|Z)Ckt08o^1I7{nlHfI`P3*tBK zQKploGc2vE*>uW?UPwaEU^-zs&C-H)(#TrY0HEVW*0O#iJzzR&Isy5?R!zrPnblti z7Sj>aamHDy@Px`g6StX;8PKw7^oVza$Aev(4lzVe0G(t=6DA%-N`@kVw)1#@7S5Xv zo6tV!%G@SE88({)Z)Fn$hgdrLO#Pts|b7C66Lk9vC0pUw0+^=R-5w&3pY(=_Oq%b5$ zB2?rk4Lz5OBiy_797kw!tY;XKzze;p522R98Eu2{D5ot2I?NGdYdpw9M+5EWG&Vxm z%n=(QY-EU!5Slo$fwqn#HbPj#5gQ?JP2eL0t_e~-Xv77&-N^e!O=E?p@Ih)b6vE?w-GC$K2SSr5de zD?xr1Pm1wCJcSHNSa=E<^0e?2GVa5|Q^@OpjxjEHibyO+TG1iS8TVy5z>pLUw2#wX zLvGa^!8c52NDe_LxC4a(ZDfc-`@ACr$i#wtKpPIUh9hXb1*HJzPk?xBv2m4UIiuk# zA;b{R(c3^V9HD*;5geUIJYFC2%b+1|aQ+gg8%NL=1LOso6{r(KQZclg>zX`(ka#X3 zu?UDuh*@>~nRv7?Y5>q#mO{J-dGOqXJkYp=giN3#JTx1~BcW2cH2y#|T3+%yS7Q9W z_=BkP`_O)$LRbzl4{BYG9-E1(s|3E&JZj9jzo_N35h~hUB{t zx{4vpwn=;#LsBqk{C)-KPdu(5jrYWtJ*Ls zPj~}J6OFN;uv(gK8X1lLkDQaH$DNC2v#*fHuWp9L_m! zj>7mKU$U4Uw=rrHP%uza6nZWJy?ords8v9(OVf>eqFDV71Z_nW8~@J(ZClhVPrh;U zqGo``xYR{KUxYcX=D5%(rY}2zdPSjs5#9g_i5dp9+SP2F6g3bfI0u?8ij9IBE_{Jc zUUXum0!(1=Dt^jsq(tGG?SBC-nl_X=8pbxaS~j7ASz2ZD9$>wGi= z*B92K>t}M>e4rT|!DjR;IeHUZxP7ozO3y6-PxZVlA?rJD8QRK^TMru_8n}*Av}XEr zU#K@ND~rc^fD8k9=q#X;R#Xr-5+!9N$l)}!2l`Bo=0dYuIGRF8>~W5I0v+Y(X@nkT zNKOZBAE(U$+QSjb9NWxMHPB9uCL#qShuqMf#%|(hI%u9dN4%cXIE`^$z)=lQJx92X zMR|}Gp|Q9ZFRJyS_%e>3LJH)AP=>}s(h{DQlGw4FRs}SgA#pQ82Xn+qI)owNW+2{f zh-VR+%0tly#5)<1z6MI*G?X&d%uyUrJV&U7*cgsb=kaK5z^?$bUJQ}buK3uH*xo>B zSz~*~hN1+ezEBaPn>a30DF?JbmO?^_Vs)HfIFP~-T0K}V3zZ0oK^cL9Kx<@3K>HQ9 zmZM27Ys^802;#)-XGo$zdt1?NhQv)s!8MWfgE5>ln+wN4HVDlI+Q^WQ2-L)B_X4eB zh|Qy8)^J)h&}trUD$q)fAcL6YJQR8rvxK9EfEF?&$;daD(?)~F>r$)&;&mxbaOK9# zj6wS%OaiTj#|wi_9^+^z&=H1Mi>^P!X?2LlbzFQKXe18}1scvnPaxhVjpC}p3344LNjvNtd7!?_;ikHVq2_e3qh-i zJ!@rcAwyQHFEqx=+QQB1{MgXAO&m>$Jqn?KruihcQyiNeZr1FH72^^-DCTTzBhdTm z_}I0vO`Ntec3rH8b8gH&JT<}ipEgHxeFx{Vn5|ayAe@GxdJuXwAUTpYM?sf?(tu8L z6bp2cBMZ=Rh9uk<`aB=v@}===ZtMUv=8ix$vFVnB4AJJ;WEM|+Qk@cOk4QOZq}D@G%9Ah zehx#LPog)*ZsTZA^x9aA3&csZ*<+8=UubhAmy7f_ATAerD-f3pwE=nLavRV=R#F-b zOR;7KQKHgQ`Kr{FG=^^mi)1ZAF|9SB-Q#Gbxs0PJF`;_s8mHBTA5T;h1ytw9$vmI*xL&(?Le5&Z;$0(~Qf#Xp)KP)mfz7#3gxFEsGj%4n&IHv^j!n zle7xRqfK3axYXoVP~_TMfjehBs@?5nO6`qt+b3r6zv^w2R4rqa9q5 zy`b$`3<-AwrE_#MlO$%w=zW0j6$%eB9W)%}XjF6zT1^%@B{~BA9*<|*Y=rDt3R9yA zIteu2v^pjVclj_l%RINqKmfSQ-@+=m9Z4SmaXXUU0`lDXyFea&2?FAGE-}RKoaZsf zxRK|PYbr26ro#IO<@YA)f%v^4UYSX6gq;gpAp4DM?!ZwKpF4mS8Da?Lv{6y3qIYmK zC29qF7#=U;oB?{pQbe3IrgAht;s_fpOQUIXIC>?Z@j$Z}!nbXj!+FnuZ`3r0S92Qb zBYZkX>wwTlBc2whq7{`f#Pg^%9Oo!EVtPEY%W}j+e~diBfzZeB(6We;tQMqcxFZ>J z3m{mA+Ck1Q9>`;5FGCu84UFH*>XgXc z(U6)XtD_=!MXlwiCUS?t=|SNqFiT^hHIW-JD`jX(WK;A^FKwNnD@VEUz2h++Mrc`l zH})KlrHI!V6ozPXJPH65q}GJj#-H+{>6l9~T5fnbn;r1b@t7SlG(UVK8!ZD(i5wE$ zlOfF~;R7NEakM8qCvpG}t%)p(hfX5isK~s?GhSL2W_XyZ)8?>XmaiNN)RiGR42ZY) z6zx@*j?;z%DIARf5*d;{q0N0=90jRmVSD38AZ-(9Tu;SX$o@!t2xMQYPKh|g<{e@! z?Ck(gu?%P*Lvm-JJq(G@B1LoD5f6%sj~nYnVR5Vs;EZ!>2JXpM%zDwe| zaWubgeT3dai-^LIl!r!y={@{LMTADMzVeIshBqFd$D-(0K}WikT?TnnH|d{J_9AFjVSfdViv}hdC^=pwiM%);P6R3_A{i8 zh^Y==1Z1J-!bkU5#%T%RBf{%F@nXjGSiosNiz$upgpPw8S+w8$7e?{VVk$1#0+Fg=Z@Ux#<=!8B)(TpAwKV-t@j#>hROuJNJ)#&wnKy(T2O3Aug&hq($!Q5;$HESK;>9csKFVo7iR@IcERU7J&;y{zm?5mk5g$P5OM*9W^mW*r zVAQnmB5akn#o{96-LRUlB?uj*1c%HCW^x_{evbw-xiH$a;9(vVHa?i?K@L)k4VwUT zk6aoW#W~-Dd`AQqF&h0iba!wGL*OzfcmPMip__t(y=Yx<7)Ozzt76!_-9x_)Gj~75 zkovRG`f#QP_oyR6XN9%!c)?*kJ)w#iTG9QShZZ`aJ7gu)D?w3Jak!A77Q z(9Q-&0L=#NSa2^!Q{U=)q&wsy)B_#r4*S5H{ov@K-K)K5K=-*ENqx6>U*M%BcSo(V zczri?XZL$A{Wv zRuFMdDQ-j&sWpgu6mY>55w}zkkswkRE-G~)iik@Qky=o+R;^Xs5fLl3RLSrCd~Q&* zwqNb{|9kyj|JQlVJ5Ofj%$b?znVILDd+uDWae015i*!l8my$Zal!sQyx3TluJX&t~ zIzt5|H=^4v8o(%rcW zly76%g52GyH{~u;>ON)l_N2?Ky<$f05|h$SE}K?SCuu_2mF*_EoT?!cIJ#OIcUbDy?f=XYLU9+ zSPeRpFDGXcQU=$^%q^yrQAp{z38aFw))n!J97!2_?W*9qXsdPmFWXZxQby^b_FR{~ z_x3t5XBN`6L3R5rIWH-$V6Up2F(xtB_0Ku2C9YS_xE85PPPZ1RT~1#~`|MSq6$kyc zzc;5-Dz*KRoHCPQpS53rk+x=UZjm-;(=V3Vep>e1Ez-H! zuS(jd?X#NOt-RB+7ZcZq@)qT^LpqGOW3%adzcTK1oRf{c_yb6-&%v6AE9iJpHaa({ zeeY~EBdKRLnnCj0cgwD)l#!rci0fv?opx`vdyly3X+t``*K*V3O7kKMzU1yq9EVI8phEiMz-o zFNr@?)o}vS&BRT%IP(rW_EzeB#EqB4eRl`l*#uUXF&5`$B8^h&jYz{x@?S=(l*D~S z%G>14p$)~0l#e_AcHNYU@7P8y_qLK+r_?`k4YkNS4V~|uWwXF(^nKf`1xO`9b@`WB z+iV12CB<3T3sOPHMAk(nkuRP#jeN7ow<&84662%z$ShkCvB~1ftg$A=HWv5ILg#*e z=Fa&lnK!vQ+>&(@sa-jDL6%w5Ye@65%$g=6HCW01Fr*u@CKLAwXIzkV3et4quE;tb zsXKAUTgkyOL3PpVS(U_{OzPw;8)cSSmemvK)L7fZy7KXq$4p%$Kg>MZa`mY<1u501 zS0JfRz4Mu~2dIbe3d;L_Bk37{$CHHeUWy}Mc@LA^XJT)acP-~??kBOgicTzNOuIM7 zJ}K|gt{pY-+lcFBwUm|sh|EWAtjy=1Na&AsJ zd72BeipZs_m8Kcp?+3i;rmhG}0a&IjQ%4nPWu-h*yqm1?3y}-S-bVun|NK4&*g_&iWkh*xC zOShJ?HoA|x{Ytl4$?QqJt8`0QHPTw|fYJ|2w_3jt_fgp}>le3Q!K-DraK_`D@p{=D zq~8&@w(JU|&BU!Q!=BxhNNdZ^K^lVeY}rJlxk#&3@+SBB(v_u8x8z%8CA;@g%B->h z)-U9HpsY7>w-9$xSy!ax#4ReTK%$kU3(AU+UZGa=N|z%&gVa!ZH_{78HQORd-Hk!F`#uU&vNuhe>NE>c73BBbk)rj}aUu8M6a9WP5{UMoFO61H1< ztVyvW$v3u?w#6nQ9a(B^yMWXY%EwqJtuiU?d{U1nrEPxqxK~<9x*z9mF_jwUX-}!1 z_V|ay4k#T^%6Q{@4jb|{|MA}ThSGyeza-Ae)A)#uLDKl(9qtYIi&htAxY7zqw0i zB%Mv%Ic-0pysMEWD=r)93`tj#?*d8O+qadrVmBeBv}M;Ft5jyBwnv&2e;rBMik-u` zseTzqTz}=eoH(r?dJdjW9KZWiXRWa-rCN=C+7>AHEaLKZpVv0k)|sRRisOv72&r>( zw@V^5ck4ION0O*p$(xcWujDmJJOe4&Bnc}lN%cF=KuXqIoIQWfU5*8%q`OOAl~kF# zpzVMb>829KuE!Iw+p5;8_uXN8HE%ebt<79_Jk=nql8%tsWR6qVeYdhjX7Vc_VsC% zZF`iorQ~b4?yQm=NkdBNN_ryAA?~7LoBM7eU%O)FG@gM}=FC?r&o**yuu^zZQJFKR zq^w1{qL^9L|BTd&bd43HZY#0gHR-)#<_DfdRF-Ti-qIqiE2h_Yc2HS7shGLXo5AWo zK_$~0#aoMQ?t7YZhZWo0_cSFBP{}+AC?1eDmorT2oi@cJ&h45usYR+tV;=Fk1=Ynx zX={-d#OsUmii=H(6&0tq%|kjhwjnw)&GcJIs@LW#lVU|}zAV{+#65EMinIcgVjHrT zrCAS|v^Xu(q@X%`o?Vwa)ERBo#di{S5;b@vzKt0k4Yql@*w#RkRH}DN0)iirH-vZ*!gi@yPEjjKpkT%EB zKGMcEuf@%B_-vugrubyy3gWM~+0cgGkhG?a^#=88(}(XcAqCZKdc+?-yT>*pcJd7>d>5o`_U$S>;SEJQV%Bn#?0bJmcsp;$k44&- zxHtJ83KDTI#cgcyyra!IvDaFpiLpma;=0u_Yq?2dd5g_@ZAfB$5w*gndp+@V5$lPh z6-5ikcWGK>qN}CaSSapTG@rPw#8nm1SN<#X_!ULg8y6s5UQ|aMqoU}HqNzxXg{&P# zW+iJmqixXyBt~+ZE=5-7)kx!tMv?C{q%lR*+3YWCZBZXmkEMR+#%Q_UA8ASqUApKp z%?YgK*1HpeElAi^)?0zKoW5!^Hh2|jG}4j5dL*8_wHXmSjg(BQEPSo-Eo&Qb;{sZ4 zrDWY5ET-gsu?>Z5f`v$|6NOI)4P2M<3ReZQiTe}MW5Eoh%Cs4Uj|3Nx%JcWa<*Ctw zBwO=VQC^PX0;Eih9xJaw5^GGMMzUE%;hccpb?-r{56sp&BVENix%L!cV&RNJ`jnbZ z%<5H0pL)!(g{g7Qr&5K}RNiAq=cc5zS%qg5GCsV=iJPRjXZ+_2Cl*d7?sQ5%Bj|~A zDbi^Hw&tCWG|`^RdFRIq3ablGLHZSO;{&rbtKW$%0=_$#^0XFt9fRu3L4{*UJszpQ zPmlX~NPT?k@r$TI-@@@GamK-_!F|N_DjY!^Yd~QS#Z4!!YvC~B{)p76up1KoN1?n9 zkCm^mo$@`5WQ&hk*sQ{ig+)kHiOW+e>q06OOPtzzWueW9ucS>a_=f{zNY9BgfB!FvT}In13rNwije5icm%?A>irP+jnr&s<{nErpIZFC`Uh z@X(Ccm3-^H%aOVxt@WmH#y|K~gGt^?NN;fNW8PY>L65iY<*h>MPrl{eIL;WY)O(S5 zDpqidhplBvQ}yZ2dSj?qzKEY<9#i zD>%&ykwzm;aCaenf>iDP%~FHvj7NB5lh1Ey<&o+&^qXyBP^9$(ZCazxanBO>|$R_PJb* zi;;?sGLDzD0%@E{mYS~~_p^{TEA>pIw@mVXL%v=nIiJM+{0**I>;aU#w>uT715$-M zDK&mG*>hc;?FMil%yrsJ?g0~Sqj;S78}VkCuJ6t#oNC1^fCWxl zlN}zDan?BTT$t^Iir-H$%I|EF{DRTDMXAc~tdTrK90s!-SM(H9DfSb)imSzSBDZi{ z-D;lDcit23(I&gKM(-qXGW7NRq=Y+Hsh=zUE~C>O#`LZEgvVsgxvdp{us9p$IRnMM zFxPQJt6`y&Cb$azkG zm~gDjY{z;g+p+#HP&oxossz(}uIg5((sI?ex!S8CSKn95b#lm)=M;<94|z(>b2{r> z>+M{9n=M~GpR0S2ycX?SYJtirZ~|oJbkVGc`CBww%5RB}At$ttN4A*fbW@%-qFH91 za|p~;D{>u;S?4{)`4p1nU1Q_!LvYj48+v+^7ZTd8#k=Qzc`Z1h%% zw<|_7h9|4=hAMusxIomb;tfO2b~;*&^S9brZ7NJf{P4UW;XRm%KmBmPKW26y&$q zbEci;YD{@`k{=cy5*Hbrly%w2%4@L-J1bW@@|^&=K=#kK8)|>65?9j$F{)eTyeT~k!l1)^;ht~#-8n|@m*PO!s$~wbnIYk+ z-(0OzZWA%BHCH6ul(nB_d7SmiSs;0<(M|QkBZ~P-F;7UY6}u?r4)K1)TwwHWlYEVM zB+PIE$EtBv9rCtIv zHD_i>%bBTJ#k*9gk=u z&^%lq%PCOL7f5$`S`G8mKM5z*S0`{*fn)kAaI81-v>#NS*6YHSx@n$q?j$u|eV(iQ z30)=8vQF)#bIm66w8G|TXQyn{A-m=3HpcTb2j)qGc}}1h(|Ddr&TGlpj+lHWP<$aw zNKQCrQ3YE23MA*L6?u+nHn(N|>8AXjh&PKi2PT}WV1~|0IL|5ORHHXa@<{P5qdP|O z46%=RohUsz*P869PrUJx4-!X<(v)|(PdH|V$3QMUTaZx_EZPY=HZ0b2y|7wW8@F;a($NE&dv^&xgtKA`&Vm;n+Hoso5am zoT5~n>!d8D{B$l#bdfzD(>W4in2^Yx!)s(t_V|e`nU7;pD(<$9ghbb8Nz`LQc4g z#k0jTjLz%gU&M{#YvNMax~1Q&J{j_AqLlCCq_lINa{gX?Kpbjxw}_vKTNN)^Uqnwh zzf_Fs;2bS^jB?Hvr77=9$pgi8VydsQO?KWCH;8`{-w^*OzA7#gRf2XqOlY@{1e+aM z3G>ATqU_nZMe^;?myN~Lw+ZKGiXRTMTkb?sGmLCc_EVtVF3?$ddj627JG(@S->VtJ zy?_#OQ={IKjkyP^mfUh zDaO{gjMQxAOjXPw;v#XSD4lvq$#cXr#Z*6ZmYlN5k0hTi{sCsB>hH?Zyona?9cy$g zHBa`R*Rn=@t@u|I-wOIIW2CLcJ0FOeb5i$E&KHUqZS>9&JyG`Koh5mIxLPzzNO)sm zrc)`oMto8{LQKtIDN7!q7_;rxsa3$Uc_wU`!DNl@cI7!5W;#P89|bdXPmz`CRZl(R zEmi7Z@pn1r>zUtsiu^7kNp75@M zv6Kyamnr9qN^MfAddV$NyxCiJ>MG8~is=VqP9XU`qodw%drDT_+$ECDwlh}2lMlL%HKcI*H}!7 z-%tDA|CJF(foReXAL(ljdSLI>*x8%kYU;cTf9YGJ%5Y*h@!z-m-=v)TZ{mMYAK|h+bMxQp zI;<`!>3iJWo*(!3d-m}!w)H=yGb=93ofa-j9~bUSUz1f8?#v5p>?GFP>_DFr8?6?p zF><8^JHnm2+eH~$cFWNYTJ>XUVP&Z0;WX0=C5ZO7CA^-JT8-ga(qm z`CHB~4eoiEZd3E2=E2kqXtucfFukDT-CCwsm!;BdbnYIXj8$@w=R0Rf6FR>|vl^HG z*%~NynU&OH?K>mtEz1g1dgTf&)_l4E*ekSvI z_npn-M~>rcXe}MF%?WHzfsvEEPvAM;RyZvlgO@mga(7bhPSpRTk==C8Y9|P?nu|tu z4GwKij@(-{J2Bda8Xf5b)~*q+2Qg~fH;5apjU(RXI<1=LjqK*-Hs3L_msi|;_Q*b7 zQ}eSU2avMbaYk(OuY~JjA2laOY={TVO(QltF{Nx$$^%MyBz_(#e~V8uIx+W6{(kAc zCH_@>+nBbidD)0v_}|YtgGX$z7LJ%>EgW%^wQ$5d`iOWZuG)69+K!lQwH+~Awe7`K z>PI}yRUn_k?A2T|Vv%zoyxZvwmpKQ+hnz#;V@@Bq+KIdKIDd(Io00yh9xw&pcMeKF) z4G}#IKhg9sd~({C785=%t`q-YbTYX1;NhFOw()UsrMOPCn)k4p58q%lAHL3NK74~} z-j~{NRye`vwD#U?t{c9<|BLvR_*e05WALH)kuk;xu#iV?5ZfAdcX+MI;T&W1tdaZN zS&lPwr`y};{a#!n{z2RzGImLQNqpJpryJQD8HT4CqeG3TkLr<|@M5Y%Zy$I>WVdJeAA-yLh3qnh1C^O*P$z{u8_Jy>RPqZ>N<3V)ph6!tLxAe+(FNBMpw5*%m%7HG8?GcW;RfDyxG7|&un0*$5RPxv}&cz2vxnzQmabC1!}Njo}8PlkeT{3?A~L_kj4I z_^|khxI%0aSrf_mgt$s%eI({-@fq=1@pG7M~HH6`vPhFj5OhEsQ6Nr-<{6PJyl4gYQNc9V%Z$ z7si?5mEtV%e(`bf9dVoZnfST5L;PCYWkeT7Ukr@s!k8hpHlhniU#w}KS2@moPFyQK z-&|jLq`OXhL0m8XL3~mCqcP}g`GbSR(Z;Z^*w0eK{^DWA=vs@9J{Gr#pNLzH)Pi+$ z@Dy7&;fCfFgEzSw#h1jF#ZBTXMq674U&PvKGHdJL%WZ8PJT)9(G2uXQnK7CvHi+}Y zJH&0`XX19FQ|P|h+;qfJ_fO($;-AIW#WzH}4f6a&#OFZ9=YaSe5DyS;7T*=$6F(NW zh@Tomd^vcyc!V)xeuT`AM=Yf-`OP(l&vkDV=ZUw8^Tpf61>zmzLa|ZAKjd8eLwJ`l zz(0Y9iG##3#wk z8L5l$FwuIUe=x>KT_ANa_B2u#<8<+QBfrvec60UNV;C>Snc|h=Eb)HvagkMmJln+2 z#LvYY;@9FXBjd&Bi-CAvbJ3uO@ckfThg8Onv9*!01DztXmqC}9y+G^*VlNPTf!GVg zULf`Yu@{KFKW7l^$Ky2QNMK{L#123=xS1F@PxmzdQ+tY*+9 zPLZvr11H$pJ8-qFy#rU%UrmE|qSfUV`pfv4__?@4{90uFA9M@- zW%R|sNFNz9#MVao2+~LS&D8@oVLQfo;%(x5@pf^6c!#)9Y!vSl7mIfpZQKu(H$89( zc4RS!iG##3Ml8tK(})Ebr;FDcRip6(#!(~VJn=ShzIeO1K)gd-C^m}RJ8>5GPVg=x zwJ;tg4id*0v1UkJj6IFi#W-EOUgYlUux(BovylP4%{m~~0kMt&z0qC6fRE6fai(~s zI7_@=d|Z4-+$Me|elC73?lPh^qb~+Vv}Vi@TZ>rPfWGLC_*A_Pn{V}k)C*Ft!{$@3 zWdC*4%ZUE~@gE@m1H^xT_zw{O0pdSE{0E5t0P!3koMc}Q*8ZpSX}jrt6p){D{i)a*ebJih^<3x9b)ScTZh;>#MU9U4zYEJtwU@b zV(SoFKWr7XGh*uyTR&`-**e75A+`>!b{S^zhplo-%nR@TnvKMMZ~Az)$auC8&lcj@ z!mpZ}`u8`V8gd=jY&4%5x}qm?Wn#E8WL!hWHN-dW|C;&6{a-WR7~&g4X1#u!%{T7< zn)$|%8KQrG^Sk@^H$S=mYfcCAllx6IKdA4Q<|p@?YTgjU8-jR45N`p&&jK#6O34QvIfym)>ux(}5N^{g%-JoNda%o6zOQA> z5L=6dB2T7>Z(~gR!Wf<=>gu7c9_s25{nz*HXn;6SREwe$B~KC6Khf_@b`GYu$_`yY zZyC{apT*vzlAjaTitmbOsL#E2&(!Au_Gh>JpNZSW&yDn-ahk|4iXmShUMOB9UMyZB zUMkj!mx-5))5RI$Oz{dM{byV*zAx(P^dIr`pD{yhEf$JJVjFRe(d?bdI z)=5Qk5Zyy`4>QFqF(GD)IbyDuC+3RNpvav(Wga9REFL2E5xEa1 zwXfJu>@OZBV(|xW@;8b*jG_8JRR4$S|4{uOs{cdve{`1RjII{%H9BJ*uetu7c=8FYlp;#og5sSqVv8`AtmWkzJJF&f3 zA$AZuihGHBi~ES3#Li+DabK~kxS!Zf>@M~Y_ZNGL2Z+7I1I6CrLE^#UAz~l#P_eJb zoi07hoi5}~7Y;Dm>VME!f1~6bMq9fNT5fAMRBzi#ebDhdWv)4BDR&w$O*Ki=b<=dM zP`V4HyHL6drMpnN3#Gg0EGs{{TD;fjoaEiv+|+xuc_O{X`S=vQSNr%B@H%m>$Q>0i zcn1*g0Ny0tEH;S05pNN>|056gfABVOzR3L_F$=^y#D!v`xJXoe{Kb;F3nc#%@wejL z;yvQM;(g*$F)1z+@!>cZuZnizRYANeh*t&ist#OXUKPZvf_PO2t}w3((gP5$3gT5k zyef!hAew=ARlUcVSJiu*c~uav3X;FK?xlLqHUARM6!9;S@h_p(ZLay3@EsBV5*hyz z;$K4iOGphNHS9gt{7Y!%&o%$D_gwQYA^s)AzwA9Xyd16$r;B#=)uCN|b$Eqnbyyu< zC0;FBeO8+f*n6D$fDkPoxWaruh@aYfu6csJ$F&+{Ii2gglIE9tHF&$kW~1+kz8Hux zF)pTwp%{s+#0;^um?>t72{BvD5p%^nakw}_93}otJW@PL94j6zjuVd;PY_QOPZF!e zpNr$gUx*XLlf_fS2aLgFu}-{9yj+|v&JbscSBO`NSBY1Pv&3t}dhyrdZ1Gxgj(D9o zSG-=lLA+7CNxWHHDkjBc;{D?9#D~O(#YeUNRHt{oYhxm8#OYtA#PVt}OSK`;km@F+Oi;BsjVzQ`MmQrOi zFC!}BlTpoQSmPsow**K%l^GRJY?8x^zpDDD?DU{hph0B z6&|v}Lsoc*hXwJiAifpEw}SXq5Z?;oTS0s)h;Ie)tsuS?94?L!M~OcZj}(s*$BIXb zs^s&;Y2x{! zdL~pqglc=Jwuir0>T||OHHlP{NOl;h%xIBP?-cJ6mx#X=?-A9{k;;$MpV3Cezhrdo zveDLcg*_M8Z@oPifDel36B&I%Gzn=dq`r_+A*DjJ2dOpW3J{IK=fxL9Ml)AwHri;0 zz8HuxF)q^gq|#zYi(xA^i{nK`Gcgmylf_d+da~;Z8_lpzWHcjRE>0I`h%?12#4E+C z#H&R{GiO~R){DOuXN%X0bHwY!x#IQW4I-nN5^fT27MF@iahZtr$-`)d4~b|W8SO(x zGsFfUHUO~!$Y_R)W{5pN>;YmAaJ7gk1pqP!?sQ8Ce!(Po4H#Z)2JmS&ofqWZ8_3>8>kmOvBHO zA-djgy*+z^=og-;_(|efqI7Meoc!mBjB;c~Ih?1n)Ni4B$;LYI>KPmB$j=#VtV7k) z#yYY}wy}ofD&NLB`8SF$8F^0e0HLqrIAb^t<=*#1{=0xIM4Dt<2h%YGsXF%v(Y)%+2p+DoaemmyyINtyytx2%y2&9xYCVyE_#)l#m*sD zyOnOO^Mreedx`U@d#ihwv(0_peaZRS-Q<4irn&$0irw~JTd&L==ymYMxkq@%ddIob zyc4|f?gidy-b}a7yVASbUEp2g)w_+}9Pc{!PH(<{xNmyzdY`**dtb1d$!Fd!zD)HGHbRWKU-^-r&PFU5exB#~1%4ZTbF##*^0NG4 z{*hike~drIJHj99AL9-7kMmFQhWgX|^Sz_}3;hedWBiN#>%C+7o%f~Q+5R&B0q<&m zqrcIs_uup1_kQi~@OQAUY#4;zoFF|&_pS>vf-G-tuy4@SyD8`vbYoxM{ezy~Z-QRI z!QQRGp~0cvf}npez`G+D6b$he1;c~k-rd2N;3)5&;8($~y!(QCf_uEB!F|E~UNU$v zSmP}Z-VHwR)&?I3pLpwoPlHdrKL(!%&EAHX7t8Vf63dI_^P4Dzu_A9fzdm2;{Vi4= zEBAK9Dq-Ztf%)6``vkOXKY|>AbY(}j7{{uik%TV!}~fmDK^R5Wxpfu zHQO)9`wsiuUFqvrf6IRN-2ZF%Vfdl{cEsyv{yR}Tiv0JYR#ArkQB)ii`&*)Y zqJ8{NqW)2Te`|Djbh!U%G&&mNZ;OtLj`O!iXGN3!&!Z{P6#t9p!l=&Q5lxS#`(H&f zqbvQdqpPD?ffHRH-57Y$%~3-Ti*AkP1!>V8(H%h)HAYK<^yr@G-XIYzkA4^AL@T1l zg1qR7=$W7}`hE0VP!_!(eHgTlwnW>5j=aabBiOgqpjLx|uKeEm&|p93J$n&s$&S*$OM^<}ZXEY_FB`m$JG7VFDmeOat8i}ic!x(A5|i-(AP#6v|{tS^i8WwE|2 z)|bWlvRHqRavm-oAr2NR#UWyqI8>B{`?6kN)*Hxr16gk%>kYkVYRfvh)>^#-!uK-L?`dIMQ+AnOfey@9MZko5+#-aytH$a;eZb;aL{ zYs8PmW@9WN=8Lk5nDid1MWI?0szsq%6skp`S`?~9q1qB^bc7lmp+-lj(GmX2N)FEz zYsH(yo5cq4H{va#tTcR7^5f!4VIk_loz4OU0zPOyq4$`h>SJA#YaJ%_xp33+1^J}IsipAw%IpAnxG*NV@Je3H&t ze-t-}8^ulHE8?r-pTrNukBn}Xm=LqY95GkS6AQ#bu}Ewq7K>zd&_Y(IO_YpgZoy9KVzG7E#Ke3zGUF;$5FZL7<5POLSiU)}Yi-(AP#6!itVn4CJ zc$hdqJWre^o-bY?$|l{5BwsAniI<6&i_=Bftt-2AWw);E)|K75vRikSa>{z$ddasK zy+Ps;;$X2-93obULq(P5sT_Xg+|E@w-mfKJE6x{h7Z->cDeR|d`886!MUpilyt^ge zBi<`&M0gqzo<@YH5#h62#J`JQivJLIivJY9GWsLMsiHK_H#{s}z3NLxzWUZz-}>rX zUw!M(R!;S>KgZ-CO$@~hv5UB`C|hMuS<5L~4SGwKWd^dqK(-eQRQ%zhY#@;KgDS-g z6NejP8b>je6H_^{K8ly7;w2`>n~iC*^E6#0O?IB9Yo+O0X(uX=`Z-PgoTh$GQ$MGv zhtswx&u5}~IE+jV(?zv6Y%Muc%n}o#W|&YjOsKgf%vXGY*hVZCRp+p+?^DbJqI!(S zH1tE{ik|3;F)=R6HX`*>q&X_mxQ@d1n@*43u z@qO_F@k23HPw6p|?pjq@e5;{G_g3>h*pGRTh}|J$caSXNr@=v&6~b+2R!O zm*P3%uf%i3T5+n#=%+O1dw9NxPLOAaGez`3%$4F*;??3c;vFKELLMvyE)wq)7mIg^ zOT^!bcZ>Ik_lnpT=Q4s}QbZ5P_lwva@`K`X@ps}w;=|%2;-lgUu}OSPd|X^9J|V6W zpA=V%Pl->9&xp^8YsKfqb>bhz4dO;|llY4Gs`w}I0})FsTxQ+}WNv`W4KPQ{74yUb zu}~}$+lb5)Npm>mYuy}~rM?6&QEA|uni-(B=#Ph^y;`!nQqHNN<4_bY(SSMa4UM@}- zWw+*ikW+SR-UqVm*1QkoS)#1hyboe-F`D-Qj}QlomEsVwN*pSxH1j^luX4=$KxSrw z*NXGS+rSerNc>pbB7P!n6+adKCVnCA5dSWIDgHy; zDgIOZ%IG_yD~=SWin0XrO30($Hm?L(eQsU}vijV-66D#UdfvPeVy+YCir0%bh&PHi zi8qT4;%`J*f`6;zdE#xNwCPKmzO?B}o4&N^OPjv5=}VjbVx4uDD9!rPtS`;_(yTAd z`qHc~&HB=;FU|V*+qvdj!BET)yNLUW8j0pxkw+uZd@JNbM2#==t%w;Y9xlql&9@@H zN*pE*H^zK%FHxnLhedoJQS~$rt8kflScS{X!-Be&d05E0qIp=zCyF15AB(ad^RP&j z<(P+s{F%7JXdV_!7u9O>u!zYNv&4j$Eov4u4~sncVu9F3EEZK~^RUQMDwc`mVmqEKyb$&X;_< zD60!)b)l@z{4(k$`w1VA{E^Z8GU$q)D4j+z$#GG3Wquhs)j#H)ArBVSQuEG;xk$WJ ztP?L2RsTpeH}8y`_2REZjjU*m}#Z+x&1LmC-E^9SNtP+PBy)^UDGDn$@ z1`ijpUu0?uv0^wx+I8m$-&k)ZPCy8f?lf|>eDdI21bHrbX=ZdxBRFS!m`mid(^F=g+j5WcTBHBQ{ zQoKsMTD(TQLtH3gHRN0*-YG5??-G}YzZLHm?-B16u{F+R4uDCKxeEDy5gSB)P+Ttl zPJBpwSbRi$R9qo8iI0hoiz~$^#8u*x;%f0J@oDiH@mX=L_`JAI{G+%*+$e4mUlCsw z|0I4OVvm`l%twRFE|A#;=7_mso>(9jibY}@kH5|Nn(xl}9@%f)tLd$B_7Aa)e@ z689GO5j%;U#V+E$VpnlLv76Xk>>=(i_7o2gdx-~%2Z;xZhlqW|L&d&gKe4}fm^eT@ zPn;&6FJ2(ZCe25q)fbC(;$`CH;&f4VYd#t|Ww+*|AcBj%lvM`OgiGvpf-^BQcxI%0a&GHBFPLXkyXx2W6cZ!T=;e&Xm z$Y{1bh;I-XWy$98aV^=fd3?x!7GD?N5dR{+CH_@>TYN{{EWRhcFMc3?C~8D_A4}dM zej;uaKNbHbej)A<|1N$h{zKd;{!{!)+y$$6CkpaTl+hOhF($^vG%*w-v6Yx1wiYwR zEYU`86>m5iZRA$*hNIC&ZWZrD8ApgC;V6HU;_>1M;)&u(Vzu~l zalH5oae{cVc#1d`j`B5v%(q3Bt(tF(EQ>YY7P(&hwK!Y6R-7Z6T~(QHm^sRPLukD* zD(E8aD{2NX-;j9C0OlJaYrNZYH{=20K=E+#2vH-;d_(dK6RqDy1(RV_P$ym{UM@}- zXNWegtAZ;;YhzVlZLA8cja9s%X|y(01=hx@z}i?9SR1PXYhzVlZLA8e6X%N8i#Lcj ziZ_Wji%Z3%xJTq)Xktm2(6l>i61y8W%lKF$1L>|ajW>LxJ~>_{9HNzCVnCAP`r(!s=&rk z74Lc(Z5&kvHjb(S8%I^a*GAs;G6rlB+M74NOg5{i;*Bq3D>x{YX(jXCm&Ig@8W+6x zMNHa9aFjg*g;wWL=AlEY?I`omq1AR2?}l0a&qb@}DD&1~x|kuh7BfY=;;1ko+I2_S z^H6Bl9cA7+ED+m>#bSxrRxB0E#B#Bn*k0@`b`kd#yNYAPqeL60qe2^}qe2^}qj;;0 zKH;r4<0<0lik~Rfh-Zjr!m4nRc$PR>oGRK_t_r7#=Zo{;pzt<0%KUeDySPBSLtH2} ziuZ{Rh_XZ8k+U*oX}lw6^6$mxL}|)={LE42<3rQ_sK^&><{TBpMYDuayi-RByi;eC zcFfZ!ez16?;?-BYYiD^bRQyHarDC0UnP}sBlzIJdmRK+TTD0*vinsNwoIAz4#3kZy z#e2jjM75N+^(_AyI4ZI>j^fQdi+M?WUojtuABw3SkY(C^L*^)Zo{Q(1a5vZ4ub47H zv3G2`JFR$ncbO1D>LCe2($I8K?(0p#oJXfwS#h@+E}39Z*jCkaE?mOQB87GPL7j7{v2IB z$B82cY8~%kB8Te3K=F=#6Dd=_JQFz`N%fSWPk1L<~I*UDYk7{%oyyw4u+BT$~S&ixM0^_NxEOeCb{GJfe#1?U^5BZ_nPIby4}f`D@x0 z`Ppjne|tm?KjtW%(X&r9!;ZwOKPLZw%9Ea%I-=R>nbGWwoqO(i+G_Hi|Fy_wPl?RM ztvlr$iA5%`PmAf*x>M_iBqa()MG2)?njPd$rN9;6NDHHHkJ-rm$+;%n&Y7vR+S>K7 z`xcGlJVbnpyvN@})R5aYx9xvw;r~93p4me$ZPrDdbXaXt()XBN`Ar*kx8*A$s_R%6p&Yc&=+-#rwUnK8ru{<|YR(^4Ylo0LU;%Z}sNBmAPMqU@%qqRX(12{wAe z*IF&Mc_*sKTHosF@HNu}+}%4ie>7xF(EnA@Pd?I*4c`mjiWRnSMrk+kbat z42Z_i>n%0O9~O1`F}-Q|x#mwiTF=!Xf+h_jj-#WC7b_@va%!is4UE7^1S z+UBI?Z*EX3yAa!FiMAJU^RtR4HMpm_7P224yAMC)j3&ou{62hi+b`1bY`0zePC9!G zb49Lw4br8}4M78?T3j9H)*|gk+y~9|oWb|rIjc^46ermyD~a?cQaf8ICV79P?pE>` zO0H#R5w7OF?TpqH{oS=-H_g$hG>f-SFxa=4{qJ0MwxiS<$LH#mwkxt~z;6sYC(+^t z$|a3lb>yleriowreVmqYw?$vPzZrR z{z>?nx&{H?#^2-oq+@yN$Wuq2I`Y(MSL9@HUvq7+y!lXe{HkXMlCquWnA@pc%CLxRdN;AtPM-4e@NUb5YhSVBTYuGvY`z5FPrr!F-3EAhk5ltu2 zIX2zg#LmWz>}%W@t|R<`{t4B~XE)dH)}!@wjQu`hRn7J4@sMwiB>f`FF`H|o?(B0$ z+iSQ&oq8qWDm7fChO2zjS6r!vE7fqN8m?61MC=*S@SR?IhrVEM)p)S$i*NLm{vUJg z*{7{+r&`6-4J^h`J?9UvF-HdZtKhVj=lVt zzG(e}wdwuCHDd3u!KziJ`sO(_@B-mmi#LsY+mqkgcvBjCgs@**6TA9;Ux)17*fInB zi$2j@6Pjy6b4_TjiF)|`6wEUjmE$9nd5)SSS+j`h*?EhRTl zaswqdP;vt$H&Ai|B{xuV10^?5aswqdP;vt$H&Ai|CEIG&K*5UHlG2lu zo}~07r6(ypN$KpBYCEOcKB=}#s_l_#JEUrL{0o2gSRq}!2zN69JGT8$Z8ucg3)TG; zD`UqR+yB&dKefG2ZRb;`@W)sk`vN7|4=5S1-{emk9c{idQh6%+&0N{r8T>EvCA)!p zJkMphJ)F`a4@@V=3@&#(QbD8R^PWD z^zg$r^j}}a{I(x`m15fRzqZspeTjc-wI%=6?C{-r`Uh9A@6Y&}HQKuA%^7Yh0&Ah$ znvltwXe+_@XW{S7c>lHSm2<@;S4?unBv(vw#iaLBXTv`;*QY$5XDhF3Ad) zWQ9wrz2)+=&SqVlL6}KkuPNK@D#rd?w%?WQcJ&=khmmW$s@k4cw&RugownPR?R8~4 zUD-ZY_y|q$rOrvV&z0?RWqVw4_3wB_wnvrI{zvsJ>$&atW&3^EZeNV%%XMu-!B!kTD1X4!sOPQ`ad|G)E+?q_848IQdSU~dB0djRY#GMIfuQfoJ#hj@G% zqBWfL%I@zI;1j|0ZVvmKv&OSe?KYr{i_h)>7 z*9qne`~~|kf8ei#w+SCLn_p0iR>q|LgIRs2Y-0?zF(!NgdA)38Ot_K1FE=OC?cSlg z%N`Z(R|NK_aM=^ZWk(bq(R15JK=61O)Z@9l$5VPQPGIi}k53{zKJ4<+39Sejgw}*i z0()3^2?D!Vct3~!|%>Uf?_223H z8|dd73D492|49eE*v&u5Uj9jUEnx$5@J7N*gqI1M2(J)cWu4(poRzqNmAHYGxPg_p zft9#{eaahHi5s|AZE)Wv;NP7@!6fkKc8>`;X=Yigo_E65H2O~ zQIYL%>t9ZoPMATMNw|VfjqW2XC%jLfOxw*ihIO(Ns}sXAV_01btFs+!V{;sCrkmek z?^VZkv1Q*?$3C#K{cCOa+Gqe_Ab~pBuC?s2>O{21_N#UJutwIh-q+%9)#7i};&0XB zZ`I;&)#7i};&0XBZ`I;&)#A<7;&0WW-zN0ignpaQV=a2DMUS=Uu@*hnqQ_eFSc@KO z(PJ%otVNHt=&=?()}qH+^jM1?YtdsZdaT9wueIkh=&^|vz7~J37Jsf5oi^dq)#A_9 z;?LEhU-Q0d(Q7Swt;N5q#lNdXuTA)Pwdl4M-PWSdCiK~aKAX^IlXEC7{mGuCzk&Oh zmk2KtutFaz^sz!8EA-zd@T9>1knj-!tMsu-{}aMi0@mrX7rM_*=>B#Bd!hS(BYZ*F zN%$w$Yfnlg zcc*AOQQAI~A^w2vL1{ZshWG@w`($`E0pGy(l;nF>>@e=~_J{2*&Ns0JyR2X?Z%w$o zGvV^agu8%n2Z47b+(rWL6S#{ByiveAd86$OyKh$1<^0cYsp;w9zpk#MH7~3r{kyBI zJ@xzUD$9Q4_~oOu+OFfTt+?z(Zab0NKIFCwx$Qx2JCFx`oWa3igh2$}pU|qzZsch^ zGYR)6^d#5{{R?L_Pg6#Jvr@A&xvkdGKJx2FN7MOxJ*)R%4;^{v$a|E)81@*$-n#_+ zB@che+fKk|@{c1-Bb-mTfN&wIjz+(1(vceDvX?4s4;YQlSj4+uQp zXq5HS0$;>qj-~M>Xq5HS0$;>qj-~M>Xq5HS0$; z>qj-~M-A&o4eLjZ%acf#Cz0+81fHe3Jdt!?B>a)EfhQgt2`>>|CTt?SLU^^gk~OE2 zHK&p_r;;_Nk~OE2HK&p_r;;_Nk~OE2HK&p_r;;_Nk~OE2HK&p_r;;_Nk~OE&-Nw`8 z&j{NIpA-H@_=2#5@OJ{gHs}6>u#;!d_N2daa}Cd?oAkuL7k>}pe}4aiwW^x6swNmr z+-Uyt3n0Oz+)MBqAgo!{tXVZ-KSF;3zkY*%QjdR9&pKAgI#$U#Rv8X%t_v#(%k(6t znl-H&f2E#vt%h~2nsu#`b*&Pgr5>N99-pP2HLiv=u7)+PhBdARpQRq3r5>N9-l;`1 zKl#}bnyEuGb!esz%_PxG63ryhOcKo`(M%G}B+)<(8mK`7HE5s)4b-548Z=OY25Qhi z4H~FH12t%%1`X7pff_VWg9d8QKn)tGVSR04eQjcWZSpq}ULpL6!1EcO`(yug!W)D) z3GWa#6PT@7f16l;n^=FFSbv*Xf16l;n^=FFSbv*Xf16l;n^=FFSbv*Xf16l;n^=FF zSbv*Xf16l;o6u?vTCGEqb!hV2=W3(*pJ!{V!A-2eP4NPrh8Gfw2yO5%Tb{Y0**Y|v zM6*dWn?$2EXtV~6*05eTh4qAA6J`@waap^YSi758yX(=gX}1pT)}!4Tv|GbE-o!fI z#5&%@I^Kka>(Fo=8m>dbb__NE=uYTC;F+AmGdbq~LNCIBgx&;vcYen*$*))@`3=jY z!>@Td`0)-s%dX~0_B2mA1F*Uucna}fT6qIj-hh=iFb5==1Cq=EN#=kgb3l?gAjuq% zWDZC&2PByTlFR`~c2ZCJ#ItjH63eN_avHE4dv3a?<=C^+`q(nOw>A7-M_AwN@EMWA zGm7rq?RMvGw>!J6C(~#*JFMIDO1snQ&gXaae12EYXGsp9B{}T7o@Cec|HIz7z}Zyo z5B#_HUi+NAXU3Su`m;$ zXQXbqa)lIEa$Q&U|GU@BF=uAZJP1Ah&V0W6{MK*1erxT0_FliW_g?clYp%b}itDdC z>}OV6k39Nja^85I^M>!0HFMTNir0*LP{KVZ(MQ?M_%i;H_5k0HFgo;)wgt$HRynd} za%9cq$ePL8gBQ`u-Qi;B0hhp~a2fQ35cGn}VXCZ>hxmRN9)W4_C`^aP0AEp_fEn;4 zz2~2cTFEM-BXTFW06GKK8(42#2;Bf13~Vs4!N3Ls8w_kPu))9v0~-u%FtEYs4Oc)P zxDxupRnQNvhW;=BlHeK`2-m_OxDE!x^)Li(fT3_B41=3sIM92>2)G4C!mTh0{sg1p zHW&jF;4V1KxqRa#^1KYMz^lNXF_{2+MzIYYm(p8K_S1}e{#aMI|W6@QNMOQHvUB$UN ziF0)l=jtTR#YvotlQt9w|epyMZk2jkCsyA1)Ql8O%8q>cMGHA5Mn` z5I(P;h1?RT*J%a(QXT4cI72!7Vja#=Uvm*0v0B|gRXVs!7oUm*qPJARo-{4$gHWhTj1*2Q0n50`8* zel{8J;>#uD%OzVTGNU<5feir&f&(rv{cqk7Ea1ta!1*ixYLl3wVE`y$M zIrN4rpbzwetAR1JH2{*}8W;%I!XUT~2E(J+HXS(E;9Dl+TPEXMCgWQs<69=baphKz!xJzAo`eOk5MGBx@CLjMT*VbvafJ_~@L|*vSPIMF zZ}2|+9hSofumV1Wm9Pp{!#A)Q81X1ZJZc9px=}yCPS^$ihQ06~*a!awMmNeI-QX#x zpBce$obbORl3Cq;DQnv=wK+Ce*M6zZalo4POIgu=sofSj@I1o>d|wFNk!e43?6=w2 z%{45JYgioDu(n*o+HwtR%Z%+rW^5-iV>^)<+llyQbL?-u_4sFV>}`B+_qqmbu4}vT z&wkYN>us6Ioybh?M6PjhT;t+cJAbJ&9Bu~o7i;D(b#8~Ta0g7{ddT$)Uq2aNKN(*? z8DBpcUq2aNKN(*?*`ZHaO@AqC=`Up^{iUpFCYMt$ob1v!K7EJjoG z1FP#Vd`rfA3^c`p1 zZcLC@Skw0k677vy^+ma@)o_OZ)4WRSNXkHukw4pUgh@z^D*-=<3ZN?oo6Jo*6(uTF>?hg{w`p} z-yGvz^KN^03K!Sw>=cse_8o=k9b5%#x$$UX|R^7}O^-91CR7l;-8o;-xk*xSTO5J6yRXj6h zZcww-Eb|LBTg^7VRL`h+=0DVYRtWxv6@oXJJ6R$4TkB+22==VHwrK~gbL?2VoOPa^ zU?*A~>`Hbe>jJx~UDfJrSGTKMUHo-|t*-t$!B#iBf!)Bm$X_AY>Tb8S+gcafo$bz6 z54)>

    >Hp*`91&WTj>HKem$WwRVa%(9X0ot-xZ|kqlm8|l+oK=2ruvR%YvC3~MtNcF9s=bf0%I{9+Iac{SpjY|z zoHv~}mEo`Qt4x2DUu8KfoR5^_ukWkkoll(m0wkfRelXs*|l9;RdpTL zRn`2pepL-O-Yu_cxmDaM>SVW?TV2(0>$&w*U4Qjob*jJmFYE8J`mZ|8?dkSX_5Ia< zRYQOEU)9K8{Z}<%_20YH8SdTgJ*ow(|K6w0cJF8P-&XD;?jx#=JDoLv+xpK&s`K?4 zz#a4&z@1nF_+@p0UIVzhyVzZHG_PIs3Y6jLdtk{TS-ET*}-Uat*&gRzj+2tuqy5Mm{Q5ObIz=IuhPK@egEf{=8B z-f#uK5)hWlX(JOKU^3;tR0Id~rC!aR5Z=EIBd z61)trz^kwTUW0{z{~?Qj{}3sQf&UK43Xvh!hYYbgWQa3xh?OBjtP2?u{!gSV1^!2* z@Z2S7<1DI(az)br9 zX4(fZ(>{Qi_5sYa4`8N!05k0am}wutO#1+{6EohOp$l|{3!xia1k}M>_95P}5AlY5 zi1+J5yj>qM=fE@YEO1QmPJM_s>O;IwAJQ}T97nuKAL2dw5O2|kOpYVopbzo>e2BN_ z3(xHP&nx;f`_b>4a}4Qc7z@tq=RVIEdH+28#EpL+o#!0$X7;=I&&owUdBc14A(LZ@ zcj`mDQ6J)c`jGi0`~$v*jqp#{1mD1B*aF|eR@esH;XBv?-@^~E6ZXJQuowOV`{2K@ zAN~gi^qqw+tj-v+#sK@Dx5PudBOc-n@euEahj=?Y#Jk}kRy_)_=26I^zgh1nq=Mi; z95jN)!12fX-yz=q4)N}Hh&R9eyA}g@_dCR$ivfybjCFxR+`$;2F5sK}%&I^k-un*m z)^|u<483@AnCA^_j(yf!46znah?RgsHv5ZJfI@jsDTP=CD8w2-Ayxni1u2)+fBZWk z1A_herayx8N09yq(jP(kf;WmoyiXho(iglx9OCWakb~{4^Alo~pO8}?5`f<@?+b@` zTR6nK!Xe%i4ms7JI#56B`Gi=_C*;(Ilc5fr0u6zFVEvvDtM`PsBQt#mL*~Oc$;@(^S_SP$6K1oQ!0EaX7&n~>8)R%TYsLy>y_Ep8}vSh)i>&{TEqAX)-ej{4U`2^*m`7<xnQSr=L;g8?1hx73!rgbQszVNYF zI9?>@@uIpyngJ)Peq!%^IxQ{B-v}7QtI{7<7u+uKGEAyT|leOI_dRVVsi3rY>-pak0o7Q{k<`# zfo|jajQ)|ia^)Nqsq?mFaXrcVRLp5g8EwKr{arh!Wf5J01o;7PT}}tCO~dmwxspD1 zinO6{s@#3zeUpPoy1n#Y;m-gRv_Gsae0+rUzxnScfAbdpG6TD0 zMZcCGVX6Lk?g+CUm28p6nSXx!we2}7jq(dAqyOZO(aQcwnOgR=M=9l`^yl+SN|}%^ zAE}gb?~&&G<@uuYUze0=zo^_gDGR-?QqrYP%6h*XeXXsN@)g&C8HdWPlQR2|oG0k9 z`RggqOU0Ce^9YnzByD&+RwpHYOi(A~EuHS9lqLGROUiQpJNLS;HF~_DuRR=N__nLD z@he!H8=Jq?Y5X>n^3iQ0jW;t*UvEoEUV3z1pB+j#Aa4w#`|pr8Pe|E}?T6Z5-0y}l zR+MW#MZ}~Wh@5ZGW4qd^br`E08uR7XnDekN>oJ5sR(d@}pCiNV*XbxPKmFlIsivRW zo1f~0bLsP*KOUS2bD0yVofBB4l6q(inx8teLB4z#68%>+ zb!z=cUF7+_fWCzDo_Aep3vW?sd-f&IeP-nL*Da|{BHM8=Gc6{6d``&?Qu}*JX`8&& zIjO9A5%r_Y|gZM#Oulq4~{>~4}w$!2en@zZny}79)!aBbU${ekKmlIRR zX+38@U4O(r@#4_=eg^%S`$?S~mI{CSZ8*3!b$VgFA3kIQxwXG0X$ihw+cDFZ^3&vo zqHn|HgwyLjo1QucUutk<4v+o|hkbv~{}^`aE4dPN<@IZ>-$$77lK=5$r}Dg4t{mPL z4(oIga}MEjT3_gVu74+aZH-JBh(zd*L*5-2z~ zbR<eFi_ESmMr-E(B z9a}}(!<=oQ{_|rC&f|vXyb9*~V~2_x3;dB=9sSr&&5Ufb9_QVdx*<~M%ifZ-9))#< z!`{r?`93eg+wo?mZY-QGGF%5GM^>^gBZep(P#P)D{Ao~3Y=I+}^mg4QrF5)+MJF>e+rYW4KU|gY)Kc5`+ zQ=C0f6PHQpjZWQCri8~ZQNjMp)_za)>Pm`>o+DE0C1+KQ4F6)e%aNCWFSg^zbN@Oy zkEH*?&zKyJ#nG>0hf~7QO?xD5IJy=bUTH_tf3)$3+*&gCglqHmX6_Hmg}=R&EL2&6 z!uqm=y|1#EWr*_8=e_*;qogQNmEVRElO2`FV%u<7r5}F(g~tir($syCRfYF?I6gAY zm%URNwAD%F?H}YFX{vC%P7^U+h#EgJPrHyZ3e_JieQ~1I6gj_FN}m+S7bOtwxf4HG!MP9S#r!$hLh~L`ii@HudEO{WhTpxaaNqrs z21WWZ(aYhPfO%n|MRy@Dx@FjTB>QCfLvUBfBD*P3ux zBqDBgt~T=dyVCZz_j+32@ce_{PepyFyfP2tS=Gq;^X)8~RN^z!OEM2#Z6dE@k#gZ$ zyxn>Bug{zx8RzANy}UBLfoZ+`wukkRX$!~^*C$`^_g{hd-%FrDIwOg^Pn+zGVet?1 zU1&f3(sp_4vziwzBjOpnqLGI*-cQ+!4h0TZD8|o?ZqMNs{>MrmK6ezpCKtHhPLsW* zX-Qt&v?*S%w85qIK;iSPcUyMe86`4jb`w7ou0JxSuwjZ9PSt^x2Cj*kG#e|LeBj5A6ft9_W}1i z=H!Q>Oa46(@5Agv&t-&DmF4*Te(BD!LZ!y@TQPHOFXO#Wd|x_|NVUw?ONP*&NEFder+@k!^@fU6Ne7 zT}QZ|N~$z3?r{1qT%5lyfVU*Q74KEI&PxBE&5zc z_&HZUHvf~}JS-owwMBZpTpjhd4aZWa|9m;`godwA`7uSMlGZkeOzpS9tC!x$*X3Ue z(Z&zNc1F z5xMJDq_g5>#_rsAggMq`rVotF<3AT(c)o!&L;N(w9q&3%;rBIar{5bcw?IBUE~oys z;aJM$*Q(#Uj?m@D7L@!p96XRds-QME#9ADAVxZ`u2bOP{OHKIqNO?o)D_!;g&| z2Oaz>*Ph6IS6qp?Z743bpx#SLpIuOMq@mnCJF?u_2M;S>cwM0`=}Qjf8+u48UI?cy z@V+L=!{tTZr^rvcoVDhGj7)ySa%uPb!flA`v&hP#^`Ebsl)n5> zvT&dHib(lTf;^$5xWZ|pgrmKe6D8RRRWUtqy!x;3*smCKil_@*lZwQiP?V(yBG;@b zDZR(LnMb;xN-FpF5a)fIzSg(XTbr&ORmSD(kEPw-H|boZ!=EEtH%I4Y-JIM~%Jv+1 zN$ci>tttLHL(j{F<0F0Iu&$)<$T3Pu>B<%txo%GJIS%=VQIhlt#m|DTD7Bt@7}*4UutW8e2?^%u$RS zZ9H5|(&MEr+;_jEUhnmc_TI{j&c3f3?niH3M(gmt3V;8iI+zD2{X_i`^A~zug05V% z^WJSMDZk%_lHw!dyyRle0g;KyAm{d384~(QBG#cQxj!=sy+c;U@=qkA2XZ)Cj^vZk z^GI^#=E>;eeP3`59dA>{h{*EzEln>FUY{{2G8~pUo<@h|BmFL1m(Su+dkN(y`HGv-9b zd#f|%MTQUh&$pdjQ0K>&>G`|7eBt}?Wyup+ccknk=f0oduRU4#i;N4)rIis*7y0d9$Gxl!{^JyL$y+lPNv(``eEHC|qE?1}R>r$7V?|l6`K6^P zEk4pVU*=!j7ds~(nYOfR_TknWS$1K$@V$e=@ns1g>faI)N7~HqZB007$@5Mb>%!@d z=l8*gu|-Ms9*w>$sr=tkT-3F6F);ESHodM_8P;2l)-L}ZS5bZbddo%Q{wSKGyQa?J z&cz(-Ui9rLi%lhub&s``$ESR6QKkylRMI^;-tmbEr#qhC2buAVeAd6Xx+Bl!#ijW@ z>v`uYA#8u{n%(~Mg_$+OI^+hv%sb_E^L4*e3O~ye{w_;-hx^=PS<3wXm!{Nr>dMe1 zjMrO+la)4p3#W_x=2&VI87@iwe{rr{l6izadPzO^3*%a3cKC%Q9JlP8!+uf!9r`~$ zzmfkdR8q(N7Jd&$`hSH=YFBA--nPt#y@>x$QCj-r6<^ZWCensuyWcWrMb>rv<;-Vu z*ObeA_L$e*rfm0okGUPcMg^rk!}M#|bfV5EJy3LA+bD@jR7IXI%HmU&SlBU${4Lsa z44Qe|iaZ+Q^5bUg@4TWvx=;Um-aX#v%$44h%+zojyp@@s_~EdQ?*)EbI2=xQINycy z5?-8F=DTGr2fN-cF0J2&;^K~#zHpnu_4xf4mZNhrdfs{hNucpQYao{-yRrUq|iNTi5NEw&z6t-&+r4 zwL4;qgIV_;ao!>sv!)hN6;Wgt`wU}|q=gj6^8ZK`s_I0rOg)g*jWy&iEmP7HiH<`L zWc5A{m7J)eNDpN7E226AMR9Wtkwy7)fJgG|LSz{yCo;R%Xzw_7I2$ytZ-{CTkEO*&*9qu!?ztpDxcfW_GSp&~vYf zk5<`bd3P*YUEaW~QHAy1u&l9#b$^ubFa16JQTpNdv?FW6@u{a^E#9W}rOe3`O#lBr z^c(SW!|#fD{{QpNd*{pBR&?)#b;bQ}+3^3QD4O$^X~H)BvQmEkxxEQl)0o%O>xmsa zkTtuw7I}TLo-Z!$g#Os~fVU)TVS!rOWxZ7(Ts!Nf0^#3aphz2jO&bgM->+%QF|OO& zlC`A7DuP+dON>8)Nbl3E)kl!6Ko;-Q9R1vfRLokNACbkIL3y86VIlmT6>(=hPggPP zt8i>_-yD~li;FuR`oe2r{o3{%k2W9EI(lYpKc?mX(F^~b_h00^%n2Xyw|lTg?x)|b zmA_5PyiM7ek(PNkX6^DuXYGyDl~s;5uNX~N*7ANo>9P*|B3rv;J34=t>=uVrPIx`J zP_|hhU%0#i%Dg~Hb#%#&&(BiII#JKdf`2V0p zs0xbyJ1}K=6S;7E9?bo}a0Ls?3;AV6rVsa@A6?QrHzmaV-mVMbdnI{2@~eLxpZ+WE zU9#-PhU`I+;e7e< zpQrOa%^nevUyp8b!`b8V^;|19WlxL@`!fIkaI#d)PA;rJ((qCJ|Acu)M84x0eeJTM z@3|h0aYS*?b{tmU49K2&Sm~oAF6$VCeK{ve_5v!>9Jl}XS)k(Ib)ew?`}|!uCBk^` zpAms$AbKAbJ@@m!QddN;Dtk?I{cl^V*Saj}7v3&=-EX@EMXf75=TbC9&f`UOzn;dM zpS|JNtNldHg?r$rTsI3BdKAJ(XCBsH-#2^XQ84E>Rm740Kegl4h_bG4biC~Ob?V7c z-j1A@f_3L4NK8)UL%IezHGP?MdUn#=nA4!Fp)4zPf2n4*8h5vx2=@_l~5_ChqkU# zPOqb7CG$glqn1@ECn>7FBxR+Xk%yuy&HyVZOVDLsK3clh47|G%C5 zPt@c7^_*wJ>2gy=-_zl0rGzzf#S|v|4XpDaDsBCBBqs@B5rD3n%fmZQ|dta|-PGEhYu>M3iw+FmW*ThnM3->R+P=ynn8*dc5mB8t=m%diJFH`bI}=e=&KzmFt@yahbo7jDLhS{6@O+ zu*~qjJFMh??8L5^6GalF6jNz+j6?D7-^@|MdMQ_vC+ER(F(q$&<*m&{xOM>xF z9qZd!MaPQxbye1TfsqAroNh&B-7|5zuK2vVd}|YA3EQCVJzF1p98+i&@!u4U`$UW(Io#Y;ytqK__9@7Z{L49Dv<38r6{NE5d4`uMSR&tn}LC$}x~ zo{l|5Z`*6Ve`J=^_KR*yIeo;&<=OTiu`#+WiK39>r6yZ)uuafyNifrW+4!0*$2;3{ zx+mkcoFIQfQ$-)=O7GuzxeQIbbY~lz_shBz*%ie$UV5A*HNw;#+o$O-05t$60>oQWKp+b8C`$mMmP#OSkSj6Rm)CHEJth;_7u z@p*f>9vacHj_#!d;~!{TU0S?!(Xlq$px$4OK7L|zEi|*xCzzRR%jx}FKCeI5YyD$- zo540F?>IRhxx8*;Z0^xz`oEeOeax5Bd4sxFW3+8ST}z_wpBUYrqR&q8ayGf#yrcJV zWXbz|wL%VvVr%NYiqZWOlXoQeX8>2%Zs;C}k!EO4(!Y)b^GW0wsmC_K9K_bqeNtZA zvGELeVt%V^HUm|*p}0lxVgVj^I9}9a<10<``Xa0uyx)9b2OSb-G}kg8CmlB z+27ZuF2~e0np&Tz>k?^9Y$YwnNONS-`&8*;+s-?}{bR4Zt}9j)a)Q~At*K=ZU*??0 z*42Gw>phZSEhj=9FKZifo)I}TSK?5ir%{V z4DaZkPcT;!n_zt9Z_RpaMIXuWx`Y_<&tufdHePbCFa9~u(fv?f{PSMCuD4uXY44!n zY^1mTH6pm-)mlzqTQ2v^VEE@ZS6@|KU2lSsh9*AmI@?6Y`fZ8H>zQ2H zzYf~E1XCX~G5TB`qkkiD`g~<;+0HG~d|Q`LpKZK8t}5w0DEgX`D1N`W+Conpkw{LlVG;c+b6Wfzfy}nuGs&`w(ccc+mm3nLKCZd$&~5r=_g71oUD-< z@`-Gh_l+1MQ@%E`%`1#o%q#h8WM0KzWAkeBIlw$>8WUUjpU zs*6<*YqjdBZnf5^KdC=i+tn;J%lb~uRKQf9`d-afZ&*L7H`OM~Q=9EX)y%GF zS5--Nb-Sjz&OXgPO%1Wnu$!qH>=t$lHOy{hw^BFR=i2SmaQl3_gBodfwl7qp?2GIk zYK%S99;(LK!|Y+|PJ6gLT#dJHv2Rg-wkO&TstNWJ_7iHVJ=1<#J!n5;KcgPCpR?zw z>GphkzM5gbWWS`Iv|qCqs+sm8`wcbAe#c&_X4`+W|E8X|Kd@J*x%L`6MZI8W+L`J# z`wROYYN7qL{k3|--ehl9i|sA;Hubi>!``djv;P~g)XG305T{ZC@qzLxJ5VW5L#+?g z4>VR^2hIpISN{r}6=<)v1v&;gshAdDFviCTPoj2`$&U?;# z_I_uXv)ullv%>i(AkN25N+95*IT?X+&NgRzAi+8491J8f;EW4YaO2%dfjVw=w|by~ zTidN2Xy~5qo*roAwsczt8oTY?_JJnu1?~lbGu$q2mq1gur`s#g%)QFJD$v61@AeOz z<=*U$2()xZx+4Rv+&kTS0_V8*xl;oj+=twU0$trl+(!Zzx{ta~1iHCTx-$cpxKFvW z1DCnaxz7bc?p*ifKri=IcR}DPcd@%TaJBoE`);7WyTn}oJ6JnahG!NVr+bOnF;O^LNu@?mIpdJtatHFA{26kuJivA;e>LSn{tS7D->2gDxf=N+ex+6Tm99npOj424Bm+59 zvXHaobL21ht0J4_Uu?JXXUI1Gs_@I+E(X7CMyN)4qat!8qmq<2DjR1bw=!BuP2(J+ z19C@Wj40!FE=_UoMX;I^Md(;IOcrwMG2TMnTyc8VZJFRnQxiz$f@RE&3BRCGv6b2iMb5(s~Pj zBcHF%NA93HB6m`qkS|c3k-Mlal+#spMZQp7NZxLWU!3Z$E@s<9^*|F+AxiG0dXejL zb+edigc>21x<%b0SE-R|q_k7Fs#_&ajZ&kewttO59<3%I-=*%78`VVhB8NM--3 z!)LG!WtSO%(3 z)IX)1+N3r~jQU1>BbTYoY71$;Ro{|+tJ*4!)i$+VPFCNk@5r@7{hQd`YB#Yzs-KA6 ztA0jvP#r|$DNjsWY(q}AO`Fl7&7U~7vXz8v+YU(34%!@Pc8nb(0Xx=?m6Pl^8=t|B zx8tR;UEVHFY=WIYIVagCAt&025@%PiIVSB&c4hQc>}sU1ZdaEWyM|pudf7GYnsSzZ zeU(c7^;PQmS61Y6?KW)N+HIwc-Og?&UAfAhFBjPz><)6CeSv)e`p$M|8Dw{{yU6AC zh4zJLy4l@Gd69h)DZAU-S9QWdFtf3-T;`7V=Z} zQ_{+wZO@i-Y_4p`T-}kMv!6qL-hLi=u05AFzF@yV?JwFdvVGZpneA)#YjVB4&|XNZ z7uk#CQu_`24eEWzUP8S~?WNMw9|55Gz+Qo7jlD)%*dN;;%Nh13_NUU+UTd#K^O^k_ znss)HH1Nj<$czulZK{S75-wl|Z0i@ilU*x%aUN=JLEy%l+zy$yMXy^~h& zvj0uW-S%$k`qBOod5`@Q`n~pE0_|jI;Jmu;L5-)5*rv9xJ{}CZV!x=_JMJMameEX5Ii7f1b+_xjC?S7P|jckW=aFca+q0gT!(o&C)UBQbK;yt~PC>5gG(g|bX@Y!)(_AVzXF4sUymOY*R$Ql@(@ttR=Q-y~U8jT7 zQ7Sr}oX!&ObaA>$bw+z32|B%;%gNi@=_jUhwR5#Jar!&`kq0=}pdaWAl#`rmok4P{ zbDc97`FiJisp$-HhDgAVL8Ql#F-{1JC8b#p?TbSLTWfO9Ip7zY-cv*Kkdw=%-5XP*e-My($d$R*JE>2&E3joWEEoIZU@7m9gGtlY zZ7Vey4WBQj+rjNfUPi;{d%BDb+>jfR3*26AFEosdiDhKW_Gb5HvE30aeeREs(cI~d zC(T{%-E1ehld$t1_a17w&%IA7x|7|>=%=`hxZDTa2gp0sor;|7GVbz6(o)Nv=1wEm zbay)2$K1!r`?&jr#51;jk~A~jnUwRC`xLRW-Py>Dv)RsbU%-a>?tE-`(R~^DRrgi4 z3)}_N!pK`<7)v+XCe1tUU(qaem!bK)%bs#Sa6d#|<*q_r?XH%S-H+Ul(5!Jk zre4PAq|b0OiT%R;0{yox`@;Rs{Q;YIx{SfxUG8oyVeBr3KX#Wo{@7ir`D1r+7`u0q zO0gHkG8Sb-S%(pPJ4W#5F^&)FaeO7l@gb=#z4%j%=r2d^E&Y)PNRm{RYZ%`L8Qo7| z`v7D5mW=6Vu)52W{8f=xWdSGI*Z35+XEJ+i+@kL;^3vS&XTe_?!INssRHD!3uG;KhOk!0B^@!bI22LwI|R-djc+=KsK>C_yEvO<*83vZzw-ooXS(;J^5#=6qFk`nq_ebM;- zgQ@+880|kKX#b%S{=*&AIL;bJnmesKk;hx(7jZX3-(Ds$EU3(Yx@h-lh{LR+CB-YwuZNbiO@i$_%zfn*78?n{_ z%VTWKk4sKd2EIpS=87Cf%)al@JkR%tRk0Gx^F4g8qY++5BdMest0tuMeUJ0C?@?3x z9_{fxT1ZuO79L2YJm16jI^rU{j`Q(1IK!%rcpB$xPs3213wakw%t(d3i))yd>M8BD zkI@JpgR7(JjgL`Y`xuq9k8y+cF$QTLBSCu?hV~_DXkVg+_9bd)U!sQgC2X}oEs#4S zJPHF};w}7yd@sV#Uc@chi#SPp5wZ9WnX*V_C~sBZ=|R8L;j@wkS=yt{E%+i1L>kYke=EDxlMZ@H~I5| zjM14FWXrrDTi*jo)E-C$?SUj}52TLvKoYeFQbBtliP{6HqkWIA+V{9Z`yO4j?{S6p zJvwRM;{xq_bke@Z1-9>dbke@Z1={y$W6!haQSW?vK3nD*+4{c680~vBw0+;Bx%NF0 z@jc$dXIO#5)J&@Mg136cFAnmjV(pq~UXK4?lwe~=o`7@Mw z9h>k&nrc5JQTrk1YCoix_Cqe$en>Cvhg`1xkP6xlNz{Hw9qorCYCoic_Cpf2A5ua4 zA&J@#siXanMD2%Ezz?zTMtnb{v-U$eYCoio_Cxw=KctQJL;7kzq_g%zI%+?pt@cA2 zYCq&=Jdo4zPZ|Uo$fbdX_#tO&KV*pZLqd2UO&Q-e3pA4(1I_V3F2(m~NgChl2x+fl zr1m;SX|Lntc>>OVx@;a7t7Z7 zL;7exMa7W+{WZx&LroEAB+8e2+{gA=T+H&_J=mxnX z%G_-o82jb~DhQFy#_5?(|iUP?>trPS5_NiC;_Q$wmcHJw_F^}}9DLuu?Z!dI!H zeU*yZS1E_DaxNZ18>fw&>9oafakSskNc$}fwBJ%w`z>+WZ#h-_Ehc^oBMs+D{1!v| zE%mhDa*Fm_OzpS0+HYxs-*P>^P}pO+0nf+xSWG;YG331+k0nNXEC~@Fi={o5SnaW# zu059e+GB~;9!q8Iu~^z;aq(E5mU_-}_$>kLw*{)l_Wc><&+bbl?bVdiUQI=RcAs%Mv-_0k`!fmJpQ*0>8C&}^W`sYJ5aG|5+MkKR zpP3-#@MrGA^1JbB8f&knw)SeuX|JZHd%t@>wfJ65ZM>SP_#50Az!Kk|X~dm@N2R^? zXKHJIrkwU?;&2h!H=43mNU zCkLP_5I%Xm$SLoOoH`Atqux3m8X|9f^ru$@>^OZ2Q10optpWYifPQH}+J>!REW8W* zcxZ`mBf^acHzM4)C5#68xG{a&gg$RF3Kqi-_F*F!39rC*k*0*34gnAmhph?EguGKY1IiP0rj>f{#-|-?F`5k zX;%>_vmJfVE>+|_>N~He$oa!zzDTDLun>L_xu7Xv;|17w0XBBV#?I8)dA&%Ns?Zl6 zhjk)d;{ltxZWQT8zHXG)?In?m*caWG0JdC=T^CdD#T!I=Gza!ck1s_oLB0fgFUb_S zv;j}g>SRvAfa38{bD6h{#!2T;+!5H8;=u5qQhrlwCs}ceED(dYQ1KnXNd?j*q zOCav*`6B)61MTn6KJ34nNhWlHNw7lX9>Vt!zK8HVewecEC44X8dkNo1J@<8hi9mi#kjaE6 z6P~;r_KV!#1~BOUB|vxz;VH?mR^)*=!1f0SKY(phW1t<-H&ee9d9Xgvjt6OHGX0xO z8;Ed!FAU@lQn{u&2_|2h>2zf1Vtu0R{!T>|?>-fIK2<2~B( z-X4)9tzZni1G_|))q(9Ie`^FoME>3rsPFICx}4*0d0)WxPl%Ul7`)B!{J$xj70tuFW*@l57||r7jPV86ZTho z$)RmI*qXCOWW9pkFb$~dbL#%QGho-}%K$q!P~V0T@CvXmzo-iXVK%H6`LZHV=9kl9 zoyb38M7}2N*VMO>`ZrSk<^<>gQ(%e67LMyJ{`X#yZz=!VN$@`G7unhhMgi^Jx?N-& zeYb54EQTLMwl@Oy(RSLj{VS30sN*~8_>MMwM;ms~h8^tx9hA9)GIvns_mufPWqwbY zJJIi4A@Xn7gWY?`x91a)pXiIdv}fNek^SREehvVA@iTq#Gkx(hwjRXRgV=KLCD_2| zYBqey{2TLeHuoKZbHs>w7xsw}+XhAh&m_d|5F@TW3>71Or5NQC#7HDhBF|V<90-(C zc|4G}@*Xj&G>73ZA2x|mwKm|#RGk6oVpJo4wH`19R=@!^Ai&0&8^owpQH@Gz_sqciq)?h2EDI=UnP>ATRE zUDk`ymG*U|EnR6#S8TnI^cRx;LegLOKI|8xTX&!>7j**KaM4;Zx(9%8_X$A0i@O2s z??J!y*eJ#&HK8wH|0VR%rIlf;7(GY8JlHHos2(K2Ofh;56XWs@Fcnsd(c6UfVqDQ$ zj6Tnc(YFnZgC($AjH_Az{dd)Jz{Y+p#kjgIjD+{a=pQFW5^YQB1=E4PxF#O3^BUTI z4Sh9`z8FXw1`eZ#Q(CdwXuK#URe*{yxV_){b357_>v zOfg1N*61EE1y;a8F>b^5+bCnqGBL)|r(-7pw%s9O+=;z+V(Xm?fbz#v_xOQ;ZR4rq z&sAYCJPTinF@f-e-Y^Z;ig6cqa*a0bnh2DC*IqFu(kBz?--)ll7BTL|=DSD1Lf9_G zr21mq*AwXD`>=U(06GCSPNtocDdT?1xSular;Ph4V+v(FKwnLzzNyqVl{O?d1=2iB zcv^Qcrc>5qwEr>M{}^RGL77id-mH3JJk?5!+55zJy0sW{+KcgQoEXm~it#+(bBUXa zeRHX6E_KbLu6ao?3n=r21~6ER`PBI$X45`;3YBMd|r&VY4>0Iit+9^F_vKSQsVyBMvUb{#rWV}F;--Y@nL1?12bTQ7%S=X zm4sJ54(r8OwONdhW&!dV(yqA_9)>kyd~8BH7%#>ra{&84jTd7rWv#0tM#@-t3wDW- z+60EdeApyLT5aeDq)kf~Bb|Kd*qA;UR=`0qGFrnJSOh!7$ZQ1kO(w@j=2v25)db4m zKkym+XDcI{dUB{|J;%p-j*s>G#Q3}=jDS~QqZk{=vtc022HNyRMYt5E!fG-8fertd z0xQM%iuQfg0mj3-ut$uqTfj)brmwMSBYnPc5K!Jf`--uN@;71ACT!ZA1arjLG6d$s z7BRjh{kMd_od@*ow$3mKmILK~M>#ua$M;Re*x6HzU8}|Tw}MVUx_^`I-}KGyb}$y+ z0rLIW04Vpz`9QgQ>Wc9b>3>=w#$NL8rJlWn|3h2%#RFykm*e1nwD-VUK%GBR=g-4| zzWRBK7zgW#;dKJ;wn=rKV_YYu$|`^BunTP9UT z0r!Qg60WvG%<4^HpqMp?tFcSWT3yAg-3EpOvoUo{F;7Vsvo2-S#r{)Qi&+m{y$)iY z)=td&lVGoyr?(Wd!APL&hM$Ppm@=Cz7qcmL;4_;oVt}$+kiI3HGe^ueoy2U{90rMr zuWO!{Eav%?(QyV)XD4jx)E#J7rxkEe%nMq>XjlZ;-x>QmlczIvblxLomsT(qmcRiq zyOO>u>ASuIyT!b)C6N9?(s!>c=EbI%my8kfvbAFNWJa#n5SS@uZ~EuT5n}dx7by4Y z=0Km|N19h}60<*dV*B?KGpUJ~*YJJK48Z1r*gO!M2V(O;Y#xZs*R}!bxRyGvrK~}% z02>D_0c^akJxm1B59Xb!!5v@{ye~YyDdv!=V%|Xd8{UFlOes<4Q0yE!2R4X#V`bWw2jNd_407(!W6Z7fJI{ zU%-}EDvJ5)G*~OFFU&N?yQTQ24j%3nr#%h(5hTPf!9 z_F}H6E#`-H#az`w%+(9UToWhe$Jp~J`)}<6F+bz`v!P%rQiuo;V-Bwl1?TOG`%=LU^6Bq*Isk5OVr!E((9&J7?4lWg|K7Chz zlvoXB1NJwpBUWSjt;v^SHSI4}Gtx9~4aA>G-DgtInbgsedRtW%>)icfwV{kQLxDbQ zlPOkP_CZ^0YCA!!cJ0JE?+3BkQ%3tqut}`*vG089JbxkV5vv3FJJ9Y9gga7C$1P&v zGgzJ0i*>;?pq-t0E3k7b7!I=_Rje-bVHe8nLf!ZZRu^<#1%?6gg`~d_d+`x?nFZ+o zZZlw?SQlZ}MKfW8Slt`Iz3`=27uN>rxR^S7!~=OQA@3zUVLD`sb!kl)1aHA^u`X)? zXoX6>Add@0lppz3+;3ALZN+4+O-TO8E~S5bGi8eK=XHY1r{7_D>%v)??&( zoIZJ+zL-I}CrLAtx@L_Q>#03r&E7879QyYecozA&MPkkEFV;Njm`{JqUnbUzJH&b! z8(&Tr>y;T|y^2k*Vc+Yc#9Gt_kQY<#TeR_=20$6_RTXP#O|h1}CDz}t>HWd5T&%yN z`}=rcUoCG617SL>guP;YK>iQ<0eL>a<`3vA&V|;Boe{_Cqic!124e_u|A4}4nUheng{g98VB0IaF_$B zVlke#K5hv^VK$)qgf@Lj-n9>l^%>=SMq5(In|e^Jbn4BZ{!H>^%@iwVvRI!BOcLvh ztz!LyKL48Xzox#8l(TWPSpTG5o9e<6vA#jKnX>;?9q5C9k^f)IV7FLX8bA`ze_Pf7 z^?i$N-wuOkfikyZ&(=;rpKPW5TQ`ffjk2~;);7x8UK=LD`*1+4@2Km$X+XLit>9ju zobQ_eZTX(I{XqZyFbEclwUc)4>;vH2Pu2Ar~r3C1DC>NSOPzY;{Lq~ZW6`Qy2>GsGY9sH za!KcI5EVn7Sl+bY=|mN~2BZzh?JTIzNeNnY%iK;`{rzDH2 zN4WlSQKwU0!#Gim9v9W5j;J$8-@LD=7KB@3drR7NHuas|3ns!s_)=6W+SdvjTMYry zwxX<7J4BsR9ccGC_X4_e4v1<^y4H(Coy+&R;{kme1*B_({cX{=r9ax$g_W>b)Ok~Z z_MEq0)cM%lfpR-ybI1O$L{ul*e8B`!U8uk7`=Yv0?nQlpdM}zMs(S~ZEf>!h)nl2c zp3Q;sLfwHnF5fPy_h_Jx`qUJ4Wdi&lsxS6jHA7UtVepQqt4V+LYEhhnRsVER1Nw?`JX z)b+h#GQ11eHUyi7TnZCmF>DrfLuH`PZWsp(;44u>v3qDIpe;jb>(KS0Zj6C;Faogi zM(iGj&BI#3P?!bSJq#Of!p57h@g{7%2^(+PD{6QXKsS7bsGGNn8i6ghQ2s5i0CkRx zhxRZMD1T(C@TeTL0s7+BIe>1I2`ym=%!E(ifT%w;g(R2;D`1bP(e=X3}agPvwWFb)Iw5Bi=mH>4=+6IV!^exyUYI;i`Je~B@ zN>>AERxL(SMK8&yUvw;vatth<}3kCn)a;%6o$HW>DUYaX|bG;-4h`N%B9r0Jeyl ziS0AT1N(9&w*Q6p{be$&6dt97&M*P!lUWBuJw@L>H3rzHPwf(gU!-P_0K(kwQcqL< z(<6aCdwP$kIn9B-nzImo5cNz;z@BH8i+Wa|Cp-&Zih8ap^oHq>E{Zum^*sCJ`6;kM z)Z9e46zJQz*f%c#oq&BdkMi)d)C;YE`d*;E7Y_a(b#DS6MVb5$SI>0!%;Zi8ApsI5 z9N|7h+hARXJAs4yt z`#z`t->07$5+DP^?tb3S`^!+%J;PKzS3ULAQ&mqjiR&x?l(#OC#IHR7Xy4ap-`8m* zt`7pt1E9S1$4UIAFW>>dDnJs68}R!LX#WN)U^f8c_bupui~7L-CT;{jY@7yI0yskA zcc|}oXwP?V1MvTFgNU0Z1D*%`2tfP49|Cw3uojR-;^r{GgMg0#sM8PVgC9_*A5e!M zP=_CV0J8vV0Hq{ui2y7Dd=Jr`dov04Q@S@V7ny_y};8#BFH9HuT{({QtH? zByLBYw@(8s0sI80C2_}Cz%zg!0re#QJPz^Q~>BfhZA?(N!$|!cm;sZd#XuH7y)=35D&;E5xSkYcL886iTj=f>?H9Q^wlqS z15!!+bv>Yv!~?*^zE*@zCLY9h2UAEq)En?10JIL3lXw{4A9)P0hQy<}BqojnJOx+< zs3q|j>Us=)aqI}7oy6m4=W*2QIB1_31y}^Y|DQ-E@#GUEo|*_)Okz@hzyl9Qxqg4gktc0qqpfOIZg%z0QXL<^xdHd6bok zaZN>gQ!y?VaJ>)*KpoRiUK-k$J{IsSi5U+8J|-~}Q;a@7i=c6D1gM`N)k&(08oAj$}h<$u^eqE2fd0660063u_lPbI{daCpBoxUY+g@d zE9%sWa#~SNJKAV}oFrNaNFYhKiX=n`OES)s7_&(7cnz?VB(HrW`MeFl)&DmD#DMu9 zCn?ZEQcs+54PHc2?~^3;SpwKVQs_jI!giBnCctEp!a;k$e3AwY0pPb05hM+nM$%A} zF+7Q+QJ<28J+1_sp)|Ibq+39D0)9I&jikxIpPE6^ZL>*=dX}W=sN0NGlHh}qVAGTC zI7HH{6q4@r1H44iUHHvCfh5h@MACh8N&2mkqA_Z#9zr`FE+lEu`v8>vNMFDMfK>pL^(bgQih4b|7y!DDW|QB%=p`U~j)4R}u<0HCj)K|7xT{b#lTno0WmIKbn8cmU`;i>xHi z&I17FS@g|wXcyL9={eNpxnz=_$NxY7AW2IGlC(4kfc{*HIxI~i>BZgv;J%38zKGwx zyn&=wKPJhFHpHSXap;HTH8=&>Z9KhRvlO(;V1EB72z5+-9w3D|_3ckddUqH}?}7IFUL<`GL(+$+--n?8Ad zX%og^Qw$)Mr0*91(1y)B0Hq}TFa+={U>g9Re?%L8d(FmA9 z(y@soA!b23F&6L?U;`kZq?40LN{Rs-05p>XzmSx?lB6?Z0BF-$+@Hh!Ib2hQ0MHjH zNhF;Q0?Y)UFH!>mpn1Uyu%4te3rXqtKK(~PB}p0RgUr5wSU@gGSy2G|?qYAiDIrZq%`r{-utRblpWi;ZxX)#I72_&@v&%T&snn$v*jb!~~k|mtelM_fb z%mus;$RgSJILRh6;8T)4XOrw@C4{;I-XUUO@o-t{3ow zSCQO%D&SGTMi<%dwt!+l9!6hQK$kCXh^Sd#zr8UTIrIOsir`aXejp9HW zg0{z^KVv5X(DvAm0ibE~0-#A0}NPc%Q$?t*w`?Vy0fU;MC&W9UG{%0WILBP9!Qj%9c zPV&Fd&VS+m{*7_?7-RMEev&^yn?FJMpCps~sR)=w^5;uQ{^EH6>acbc0QFtFljJW4 z0)Y2r7Rm9cB!6|5nbEImf`!kjsgw1QSxEp%Rqi?p9qC+bsk zbJHT#Y4(#=i~5Qn%Xqi-s4Z#fXsE5PZ!WGVtD)jzgMq40uiCOURc$G#r50T9tJZ+t zMo(W)PhYR*c6~!ba4>mkzdCpR{2Bb}Tt&q>8*jabnFzVaaKJ&2D{`oz^*TY&37%Vp zX=K%%MRS%}g%RkU^SIHm@^EPd!DZK>pyw3QIEBAlK_T~Q3i6e87|SWx8HHF!1&;9w zTwv(h__=JsO&q(Lmp|HBejO)(qvqFbK?A37fK%{wQW(i8SUM>fz(ZH|q7|oEx3a$S z;pin!^er5{@lp$n7z3jj`54iDqgmrtHU#K>jRLM32`WJs2z?2kH4;LjmoS=-ff3~! zF^aGPry*hj>H+>kF~rbyqz7P@{T|<73WAe`_aSP$&FSW zYXc*9lV?rN;!pSHz)|4f(LJfzmtMLr%>BT z!8w*)C5sg&zqPT{rnD7j;~lg5SB~Dn(6w33B{!cF4hgRDB04dM1jE0CfkZxRK7iE& ztAq>^8oY#<>qw+*UiWrh_esveD|zAVoppD~1(lq_9!|mNq%e+Cxc&b&g`2Kwg%>0O zvZo+3gC1Apbk^ibgB^ot6{ZE*toj3tl@uCiv+8^~ODkl=_ahlwE0lp-3uA4CVy?I1 zoaB95aYf%cW>_yb`u6|-6#P3|(S6^Z>C(5goqY>m6`Lu%Z`IDe4Y<5-C5)GZ@nRBO z!gxs-F9m}Yr=U~g6h;EK&=?fdBd&|o2M$|(y3VxUZpD0xq$>4afcFeV65)M@f zDhX%31cPI)p+pa0vz5mKC3>Jlef4PiO0m*lo8Iq5`UZC;HeJh6v>*xh$87r6_o)PT zfi``>FBFD5KPPX18cxGrqQ|E4!XNFy5DlTQ_OVxe@u6iMt7Nm?oS%sFFD{_xZrqpIba${GFyp?^rhmAc=HM>Crk`V`1-NT+jDEKizhD&S z;a9hHq1a~AJ;x}{#`h_pQ;u3$$-|}}Wv0ovGdTIle6?PtEhBDgB>$~Wr0I6(rK@#2 znXQD8l!N)zOp)qB%&$sJ76bg5 zAXXrVwVq%UndEe?`)oe}icAnIn23U3gF_M{S(Z&7GKq^~1)}&8QPC#qkpI*Nn+&$g z_5(#fQ1ruS`GJcW#YpvTHO3*08QDm6uR~bjN9<>!iv?*QqD!zxXE?-K^63 zt!wydpirBQp0m)0T8(F;hAB32&SECO6$o%~)JP`5l}i%5`&~pM-$fK~RnZKN(2Ns( zndr14bCh!z5sKChL2E~$UZLQ+P_)+fa=FR0BZJef0}Ia;CLJqNoUW3QajpzG7}u)} zIIz!jHQQ(kn89hXiSxZ&<}t+#|HssqK#xoZ6Y2Agx+ZB(c`=>6ayY8am z2Z&mytIyH~N~O;M{}w!kMN<)(=pFj2Vo?_fb9YW3~0q=mZm9* znx)B~wzI)|1SRA0HSJ}WDw^%o=A3f!D*WjjKb_;}aQurLzfUK=Q@Tqizf(6(WV-Nn z_V!oywwJxlR(QYPi+*SMss9Fyx#)M~up%)#yEMN#Hr&Hor}OvK>-7!!IVpuXxS_hj zl$183hbW3w4JmX{v3YxVsU9YKqprElv!`rVOwG;BHC}@DaS-`g)TGV-PnzxenuGiF&V7LVq5$^}Kg5dRRdw6wM=Kd+{; zQR%UK`SPEuJV#8PJo(6xBL$|ObC)h%YU{CaWB)OCJ@n8+3ucWQcQ>vN-7)133&je# z|B6!o!d|-9cV$Jw^cbtlKDa7MeV)C9HQD%$mHMI>-#)zt^)`7Rv59wxuc$=P8us#D zbmuLUmX^oCYQBhueu{5m}H>oGmN=py! zI9Xm^e(_i-riRVCv0i9w7ELlq&7wF%HFC6-yQ=2oI`Ci9q;R?LSEnEZ9$;_s;552HVFq}uaa(pU9F_&8<*#4^P zUdjI=vq}ul6>v;wOJ8P_@Ho$G5*6)&!zSUhXIzOn>7p=?ggZ3M-)fjYo)>WN>3q8| zq00`-jOY2F%u9MsSSfWVR_d^6+U&?&A)EAbnrJb;n{3nPX3@{M)7$hXgDDYrm5%oR z865o@iL?&1o&ZPx6&&q{e$GqH&OV=(pP%nOckA|z>J=*)42Iq#_PNLD-oe4adFyxX z{Pwd|t5)saJy(56U8u&W7VUKpR=K%&%~S7kd~-V(^%Hf3VeRUpN00ve#p+0P1U~IJ znG8;UAM5Q1z!Qp2TrezFdf|l^o_tVrb9(42D=RS_b_&lv8$;jFSM(`+;nRIoO3la$ zKSX;C6auK7+L0fge;y+EBN3s$FpD;b58rajE%#0*8?6&&;fwY3mbhTrv}ubXX}vHD zYe2rDTNj@e8>2p~y`EH;(z+X*RktK3Ctobh%`GbiBj3Ob(^-;BVx^Xrmcp|Z_hy^E zwY7DtMV&py&#x|c?AWn4F!^+o+cRUOva+(u`pIriL``3iz=urJzFZbxWWqO>z4c{p znysQ;;j;L~G3KHSL;%rZ@H&&;_mDt?+beRhamuM^M`*FpI_DYh3}NKa2s&2JYy zJiYx)Mj81PtCT_Q?M5$ZXn{1R)`q&eCcy}4(pE>_a(#V$lU?@oH0jlL+kmPj&)&Uz zdp1=GLS;(QA0AqGNAIeP&F{SP&iCn6z3*7?&>xb{Rm8$hp`bu&ZizJ%pUcK-G3@tI zO#V`=_{dN?`|l|A74|a3pYQ1CFb@lk^Ol;kjvqgs(=PRkyl>u|NcCCu6)FA<`Hy`$ zing$q^vX@X_M|T()mPQ$<+%_3^^X&107_|sMaVN&YAQUsb?fTs>NDzmb&mFWPWLN~ zpN{Mto3+J@npG*Y4-%=cR9LXlPPJqit|`yB@2Y z&|F?#QBhTueKGBPR|G9(T7@e zpyzFpB`NoO3Fm-kFrF`9Jm153K7;Xm3gbD!-{0TI6TU%NzL;5q3|wxH?=5xq_NJz$ zb`n(dxN5I+bG&b!GiOe4;hA3#9^AiU$AP4zBzMQ(S3jn=R^Xrh_{bxV+&xBrgA5U+ zKE+;+DVu`s*Kky7fQenhD>3|NW0!f7-Wm z=gyqGQ`@(1cbkiD@z#kLE0I(IGeOXKyUA4(ii(O-(~I=Awf3H}P*qjsHkbLfWfj%b z)HGAd+0&;_A3oCN7TfXl7vUyY>$wHTWbX9N#X?G0$GP+Wj9c?z-sxqCIud^ki{w%haH=B|Jr+;@KAdUq%JvKtff z{!!T;PyA!jkf8wt|adGnN)ZdpqJruvQt>W~|(jH`TUb zB{%8VTGb(lw!uPcK}t$WK`SJRuDAKUcR%<#MF@<1_~D160)_KmuX^`Ab8mLei7nwR z&15vh8bU^dW6?iQ0**$C~;Hmh7+ zQd(MCR;PrjSR=i>1A6+1iwTU%S$&|cTl;xvC$HBeXnPL4l>!~+p`Z%)yKcfy& z33%xRQGlM|lb)HLlbf5HnUS8Ak&>NYm7AN}R6#N>r#>Zlc?~`mY=LKlnaaxAt1n`x zFXY6>j~mAZ)C2qHFVl5HAH=pfmeI0zV0#o&QquEe47Mi^Xs;JM_coQ4k=LN{Luizq7_q_aq-2Bq){Z`p%lmxY{ z8I=m{($K`GE6z6obA%{t6n5KEAYjgI<-M#d;$5VrC|KBNOFkq^G20XTxIJb;Ab34eKI8 z9~q7BG6l9^!|vVReeuypA4RKAt54$fvf-m6#l^)Z*S&YA`ggqI)FtYl)CaZKL+YQ| zW{jO?x2Q`l+k<16Ek*b2SGw+- zv>}iL!e})&zw6u|^>^%#E{$HFZbq-`j>z>$y3`2j6EuxLS$V zKeJ(Y4ZEHVqkjUkVT{! zjq6?2w=_XOl9t43A)JJhIzb{KV!v>hga?N?qoTSiRX8q2LT%tCQ6AS`p*>wQ7T7j$qA8dp3~6TvUC z!}Kd>ho%Ik1fI3(jRxOlMIN_5{%HKEOsjtB?(xeNVdGr3B;5(S-}GQ8R*Qt$+hEL9 ze+De2L=+xPc$V z@#k~=Xpa9g$8U54zmnreaQx3Weh0@N<_3OWjz5j#f64KEIlf0Hes`R2)lH$ZY@C*I zDCYVXPu`Nv6{oy{Rw7U}1z*ybF}6{?59`Iv4$H8o zLtZf}Gm9W)D~eU$w-TMEuth~bmhtRz$dMU3#}xvT_Q-?UinHZcgR_` zwoK9L@VQopfEKp4c;Dw-TU_>A8}Iiqyx))UelO?!9_U8DhjDx#j<0fj$Bb`v10NmF z;-G6d{u+)ymY4sB|Eu_;uc|+byk;IU}K z13-$$`90u`?&ys3WBz!vS)_0?_xSsfU^L()qV{LVcC1|rU%SYE#IPSXj~lmte|-GW zqw(>lPNA^nhNVk)?;byX`SLh}>_Bw2s(~P1))c<18T=gUn$mS$bFQX6x?4@_x7+vG z_hB_{ZmGe#_)VWlfs=f&nl6t?T&l>64}Emvqcd1Y;VSrP7M4>*4k`5&eVee58kkT|~b< ziQdF-2HW&U!VuQMV)Wqcr-yNe2)*EP^2c4EO$UFxg{-*m8l%^h8j}?I!%6gS{05Hx zePQ$o?hqF8a0)^vwD3F!H4$|*(?iO&;^>~$A{}OGv@GgW7U!UbX!Ix_!igH)o({T4HM%(tx>jAe)*q;ClNsZ4buTd680*bzQ>|fC1(QZqzoeX= z#vN!@nJLOalj*Kb9%8yH8*dA()GcOx&kmE!LJ_|QTC>d}_y|KlE&`t_ZF&!|2>VHt zZjHuq<}MuMqau+69Qyl2^zB5<_&Jch6H(Gca6q5)=Tq9L0!C710?ACz%+40YloT(o zloZ;gSY*vv=Pe*CT)g{4qlfyYN5xv(GjjJil?H!&oLpcI@ub zJ`$qwc)&Mc2UuZz;Tc*KQCr(W&NlZcDJea$?+oe>t5i{5K_0BV?WLs|8KtF^je6(_ zetuFFN!1)Xv2aq=)RdIO_372is}T?I_4A8}h!{K|SoZU~`R0g-=xCf(e@^jLLwbjX zh7K}USN9L`@$oUlN?BRHzIAmDOS4P8q~|tGFPMftfKL56zj&f1_+$iO$4>oPW1jM3 z_J(YXd2`MMMcNy2&yagC=F3O?s0g?J#LO_80T^@oRIR4vidD?VW5_V(M&DkF^v9@s z#@>T5?_cBX81vs^%ok$J{|fG!hcRc-D*Y>~VEo7~s%vdS(kNsT?O|+hDT2E*JuSDg zqQz)5iDIn2sS?#5+}>`06;E?`wl&n$)Z1m4Nn2~lTY}lNrM*>$GjDd2+-@6M(d-3_ zk#}2z&{2|n?4c+Act@}LuP%Mu8wm zqaJh){BMJ)bfd$5YFghgZ20gkTZZ-Xiu3kt$$;Op#IxssNw?pATO=$|FNhx>>F7H- z8fH?>@hQGE+P7kTq`FjHYFbfQIb+6*iG8tJDKNa*4Te~`t*)%7=-@jshH+;9_z~KvUd=?aDC7(IG zW5@R0yZ7zecQlzkR|Xr6ZFm-+=KIup`uchaP25*m*8$!#iL%`;in^wDg!D94H-X(7 zOY)0qn%mnlQ?p8HD%(wPe)@O{9Tizw6}h>Mjg6`f!d4NRn{P=wWX!Ck^4W} zD_a>&>S$;XVRUZiw$Ljze4J|3HdcPwfZyn)P@^mp*aSPu^mgDBy|pqiReiu$_GD-&we4(0+DbbQX$xXnA0?!5EP9yuoy z_U+rVWy{`U$Bx|qFZl{L_0dXw{IN$Lee|B2gd1kADD@BQ)%(-DmH`&a*kEJ-+itt< zKV~>x(L2Xtg_fEa^*Qw)j2qRZx)tvf{U>}lY1_7KKkwdk_?KUPDK1S(NJzK=zMSAI z;Bm)By=vzI+}CYBom5v>mtR!pYqxuZ2YACye*=80YpH0AH5q)F7|u>P-*O{->{6zR z*_uqv<+jWqF8S*@vJXct<;WQf`P#OX|FGqk>TcmJ_u<7S@#2}(LeDjK z*8R$!yM#ZUiSsV6xbD*P^0;2bpGCqIvc%`WfdjuDPR&hDuBw_b z15vx~&U?dihF!bTPMxn%#B{a}#r69o(e)y9A#v;0C`9q#<@x5uUw3Z(dDpJJdl5W7 z{F>9;*T>ttwnZ-)={~kB#f{1;Zti|b@|Ih4b?NDt#p$VO)kQ@)ISmbnEp*Q3-rcNA zzKC&ct48>#_#j)K9Ga5L)hSO~oocn!$xB!w|~PTeRM{%4C-cWl}1qV?ag>VLU%UB$tpuUx+VBaWVOLkroJ z96s&J#q35o-qnic{tR#_UFAH-x>n2N@n#4MTs$y|aT$ThcD*>Ce2(u2=?08V96y8O z+ugtq;P^v0{*N5LlH+G};&;EY1w&+`)OwEW!;y*S#EVbj z#W!}=-DTfT|j2Ev6TY+6q|gQpF)3x`3PEqg_!Hq+Jm=R z<>emdt#0Hc+pk~pi5<+h5?m>mNm%bA;E^JAM})l?gJ5AQFPYi0!^|QLHg{*sPD(nI zd?fkM=L+W!lhJAI&>7c-^<6o3?bwwE^v2IQyL9V?dJG#gwy87t>xtel}2E~MIZRfug! zO-)U|n0b*IeAtKFTv&QAMps#SgpLlIAW3nd)2H7)XYTE@X3auOXYHVgw=!gwu%9K=?^PPF@Z(_^=OIS+MjR>BiE7 zC{mly))p7q(o&gPPy}k&51FLaVrKGTA8Ko1>k010)+2a%AyBi_p}{!ipVM#1v||>J zxrxo55K6-Zrdcal*ymzOW)~y%$A$UKo!FP}3-Zw8_}R;xHP3O@%;c=u!B|tnW&9Nm z{uz>i0497Ja6@ z-jaVmDk*8y@Sbt~TAN{1%qYgDrns10i(ACj8raH=%;?oYt%__k`1?{@n+=So_%xdW zd-duSXll;QrNBUgtzTG}2`tE>xfu6Bj}7cMVDRt}BSuV}I%RM#oGz2w+uPcywA2Rn zQ+xspDgp*oL%^6Z)Y{T+<1@Q^>-Y}lDyn2}KJ4uXdxH;28z<($yV6FlgN@#zGA~Tb zCEpP{ao_1XI)P8zcf|ZE+;`-QJE!l6QSIvO(N6ipcVtojhF@{-5v<1!@6r9Z=`g6AnEwnp9lXpm z5O-a3IxzmZjAGXjcTj8t#T^>OL%>E(hrn{mz#Vcrl$sG4*2$4qdbfN?aCo<3I=x%$ zAY7++t1mur?-pb>yjuc0;XEH-I{jO$-T(W!BZ2DG-rsA@VdgEk(hINOzL8Hi?JWP1 z)G&B!*jaw@4pR!9w6=&dmu^O=KCyX9Dob+eS4>|(K>>U48b++aSMKr6% z&wF$)GuQ1gjdmn`K%Z*UPdmh~li2*bI!iz&^+(hPH0pgE)W;!f0P_p?*QiI*Inqv( zW1qs$zptXtw0WqJA3B0f#4Bp=cUkRURk_@l;GjQIovG1}aL_+WuWOvi^6#)L1Xg-e zn~@s*rdBp3|10`6^n*rU)#wxQ>Dxfxf=}rEDx^;}IO#h_ll8tcN}~&ZOt6WGX>5in ze1_^~vtD0=kA*gI&=@vH70gl5%a=`31(Q@;Il85K8IL%rYO_>_B>xsV1biIb5A4V0 zsdjX~w1Q1kc648(+Pyy1!1Ww<#+m zEqXguS)qq-QHRZ!uyItZq=oS&;(xU;>ha17zK&iQ$EoRb$S`YB7c0=qQn6RVWfY_~ zE{U{?UB=@a?R9pwM(+Kqqf~~FNu$`M8S%-uc+vRl?z7rBzMbQza{LC4Z|Z`NeqcFv zoRJ$^b|oC`XJ>j#glqTR3(Y$99ZjeJ8fd)(_;R zPv`iHIQ~XndbJz)NgUsw<0o+ZYK}j!6TkbJ)Peeg+wS4W!#VOkj-1AjwOQ!0?si`G zP>#Nzqw6@j*jctqd^daECH{=d`tBWG{xM#@;70jX96y5Be-Fn`=lH*M13#1F_u=@Z z9RDK6_vyrUHD@q^VXcVJ=FBc_&Pa*OTH!g~X|0ejcduUk*D{qop1r-w-ZWi)kis?i z?lHEp-=(s*QS41iSTWv#;aYF`htgH&pMg)EIzQ9YxX#ZsOs?~f#GO;;cR6NmNRV_hFsk7J;NioyK>Lq%rMuvXLuRE;(LbGxI_N7huOmP z4(|D`fzgA`>}{rK$85OPu7T|nSPU1BISbS%I(H3a8cn`yV0nB%6}yIs*QCmK4J>wz zQRTY^F9%g^*C4PQbWFczyM_ljRqPsY9zmPA-Pe`x8g6k=<+}zy2UWgnXmU_xyM|eu zDs~McyPw)UDjQmK1W?H*U&{K$I8=?H~kN?UlB7UH!-P z(drcSeZXk3*+|A_Mb}r~HyXJ^{tkFST)vc+A__e#H7got%QgX4L?UEr5waF7V#i(5 za?|R{>rRNqW@<)QuNWlWI^xzy$470Zc|kj2+Gba~ABzZ}0~U2Ov!@HcLeFA(8j>RD@kS zKtbq1Liq{i85k4k^bB;x?|NE&8^a$=tVu6G&!cFDj~~aP22B16x=1K{*J(+avn3VI zmZUpdlE7P1*Vz)6{ZZ~>N?T0B0& z2(P|ugxfo*u)g&$UzH!5vN;j(Nl#8C5|7I?Qk>C;B!mPwDdu64wCOOWU z6gz8D%iA-evnFiDb;q$_d3Cg6#lSjRGuKJuD5p`=NyBALb~8*$?fn zIH+oxl>Ivg)h!y;$quUTgX$kR)mzN;d^dIN)OnuN>bk~3Rnw&GKRBpZcFXYSEFrO{+dJo@zca_FQQ%{DW9$PU?X%`rztU1&WPmEj?y?h|Z; zYH$xd3f4cCKLdJHu$g{`yRLc^8}rLFv#w))i6YaaKGJF!3Vdi%0p&@JE~9E2^?V{K4C}Y$?O=~j!N$fPU-Bk zuDi%fxd$Omu%Tmu8t;&=aSEge-Xax;KhkAS-;iF?3oROoRcj(_X}!=(!_9)Hhk(op z$j`+7c@z|7X67>Y>cwqVgWcZNf>?38LOl$Fy{D|CONn-xV09CFJo=6O-D`{E_c z>#qpIdd7K_ASF+EVPSQ3Rfg4|(@6$N2m2xQ@DBV)2~*s5LaTgQy8E17WSi-cy>3U(C>F+(0#q`#gydGOc6iV$dw)s^pQRfL$tICONm}Fw1z2}5Qitz2uSPLZxb4&5%!^qbY zR9sw?Ur?M=US7uh_(i3qIVIHZ)c(A&?V8O%a!%xC(7?CaDQTw$BHAU6Zcf30CZ@4()i&+;==`#SS8 zuq-P>z+<<7$5@t?LEy1~)YPP;)YOwFPo6(^^w{}RCr=`1>McQ4*45+o6sM#&>zE@%tU|mWHT)#Gczsa43hJnOG?VXNAOgvXG@D-hg@a`*~5qlXEell zvxtT@3ulB@oo;LCFj$bFFXg4<=%dq0Y@~XtdaJ3YTGOch<(X%eo>GLdo^hC4{9sI+ zE>D6czr-dL7&W~wlo)0 zo7b4>B+M(!>=RC}pH<&65CvO>Aa*!EdIn0j(!e;Y@WBEbx-8DB*Kb56tdxOUsSy={ z_TrcwuIe&qKg;9tu(K|=pf2AgGeJ=`7lMM<#T9PFegO+pOREsBwc{<4gaOIIVG{12 ziv$IoxSnR5F~6b@iuYqBA$_h<^Z3JCr5|#VhM>X~HX$RK#Xsuj;@#>h%8KQ5Dd#y| zus*Y#E(>{1mz^x9i$KF2vzqCPOv17VtxHeI((T^|d9Z{fG-&M)>pDzH+1Q!jBc2od zF}u#a4`+b6e|jM(ECYpAps)~fft`OCot=F+Ir&^l%9*oCr;?ISr|;j718nT?-07s` zl$4`sNlB-ZlTK!3Wo98O6YQ3;o~32g_MlTcsMJSh>5gJi@In29~SJsdG&{@ zzx?56Wa|9>yYIe(S0Of9^-xX7r~ezh^7h+t(a5el9N&1@VuVnk7uC^0dQVuoG=@H; z(@+J&g;0h4Pk$^e=jmVC|GYsvsZ0nILYZ{32z@zW!^;6HA<6Kv!l@b?Bcus?g#}3N^p3D( zNer^@*9s2_`-C)Gw7tS!s}55CqTiC3xH%f<9md&Jdxe7URZ=IaOY}b_CT@&|v7E>n)X)Wu>JR<<&LSl~o0HJB}%`zp|?8%F@y%rdL*0*XHKt<)V_5ge z9$5C*BhcT+!=o3@kotIgdISXY2wo#R3KVK(tlNWN7 z3auc{`Pik_I%KUT!@{_OT+m$6$UvRNWQ-3=>Y{OU>rrjy_!BvP9>+Iw{Kigv=RCeL z3b$b`lSj8`QMlQQnWo{H#l%rdm{ZemO2NzH98Z9+QNxFMu@iW)b-Y+TD^{Dm66TMD z`NL#y=L(|v#57OXVzzW-v9~bgTKe+;7Gn}>H4nAAAGMl=?- z{HMpBc`+K#UeaDrOf2-Z*V}Ks^VOjrHm>{f%P(mVo1%BCw_)*qUik3CXc)6^Rm0W6 zwrC*)iKjbgC*nDy>09U(1B71M6r{KwuYdOK*Xuuz|Kx)YK6viA&z@R%-=j-jWlH($ zh-1Xdij7I^9l}$>9l{_;)C+i>6}*J|g{N$H*po27g4BL$FT6rklReLVG7hb*DXD0% zi%pn9RaGr5#g!Mca#Mf_f`-G~FECZtXF_in2iG|1eiPag_lh&N4~w$OXjgpmv}>q240xwQ2B+HSbtnZHb1 z!JEIh*X(f@9ypvCnJUG+2Z*{D4mU?m4&_ql$8Rbt#%H zB_>CO@9*+Mr$M@(` z{mwPmsl#>EL+ZX}^M3Xq0eZ-`a5nF~FR*#v5#pTpt|UgEubI~HgogQmhPm|uQyPjw zoVyEGn0a3_ok2H9fX=WvT#!f@d4Z`6_k=)Y(0Y45X68RIGvhEb=VNBh$IKjgbsmFj z&eW^p-Ca#xY%Q+Lbl~1pzB&oQjm&MMmJ?wwEG>@1@ls?*;cl$C?oD`?0rfW`2u5C= zT;ZC6;p%h?t|Z0CtJ5!BQ*vCLkinHGxr{?z?aCp0_Uz6rJag#Kq20T8@BQ_cU#}y} zZ-7Ide){h(z4G_xpMSovurTc2NB{1wu>bEk#DB$#Rp0IX8hJLJc;bnn1t;VGKV}ZS zUd&3XXl-qU1|)Q-9oJbkZh$!?sWrc*4v9Np?}K($f1PE;edgfugRSf=PxOMWD?7CH zj3`|RX{je}^ zQxAX5S}3B?*Z6>1pRG3NK_a#E}z5c`uwc@h1PvSIR5B>cW_=D;>68 zOl!fyyNjf9TD{T24c3t4>z-9-(!>cUyD{X6WN3+ePF zyN2|JE6Y*S>1gpiXz@Z&nSu2>h6M7cD#^v^^4yZD#*Ww?9gS5r?UE$5*Hl&JBM(hJ z%RSStz1`$Z_0143NZIXeYR3c4k&cKxal@pij;4BBcx8hL)+vvM%EILR`}ZdoRt8MH z>#n<|22>vV0*PlnJIaEPMWY8bwcz0=O=L14Djg5h5I2@k5Hf7XvX?k71W9MubJPPo z?Ah3$XWKo3#^JczxFCUmT$w8ge{T15cW$ZzP`e`8C}n zIuwJmPCUoV3%bKJi9MKlvn~AedGsqvY9xZj{%o1ps2Tiq9f zpB@1}h1S(I$U!}^o#uZ!)!uSF7AsW5auIe%+fcI^OeTsQb>-LPxp6f$wSr#{Q%p<@ z9yyfJRGaY6jn`$vT!rQ>rzwiz=DTnX^1YFB<|XUe&v71PbHnZF2iqA$o#sYm>GG59_H&YKi2?U za(^`E-hP|`*K!72&l&JwC-=IKNXbOS%1K3vzb{=@Z4)`8sO>vH_(U)KLt zUWM(v3h8dNU(fM}@Fr~H_=h?E=*#h$#jLBfO|z8vGW=MMeUxM8aBO#a--nmp$niZm zeksR4dwJ>IS{Va4{&0@Jh2vLp{46){wc4{>OPV)+Gsmyt_&ILi|F`wO7Jj}PC+I#{YldV9u-0r2V%D0P&zZF*C*NtUamlweyyl_2=IOlV z$9T<)-Ke?B@v}Jo29EE~@o)aWitlq({hdCy%kv@2zK|VOVOkJ;ZUu~a6&i@!9~cAk ze8?V*iFrQc5sZ;L^C7z&k*Gq==IPI#J!!@B>tom@3CH$uX=I0duD6nL{C*sNEyrKa z@ejIzU&HZ3Iet3FKgRKk-M|mz_-!1Y1wLtdq*nivo%k+!xlrR}?FpEhV>Dh~qw(@K zCogv|$5%&PlH=`c6^7j2%a&nWN3wMo*ZbK*%;k9Zd#G%-Y0~0zg4o^X><#W~ZS`O~ zqAO$P^y{=toD^fzy@5bs^)bb&MS*>xv}FmkE6mXfrnEu8T_O+ zVmYH(q@aRt5qIdICff87W^KHfPqq74&fjTgZdp7iJHPUPb|Q8hJ_(2v{ES5kB84?S zzmfnOyW{*yA2YI1^VPR|sxvjJ%(=^`&e5oja8TW@QEhNg-P;+<8D&O5+O?=Ys!@$` zQ2mWYb&P}Rw;I(F2i0#u)e+5!W6j6^W2$GgSV@*anw{YJ5>y@WoJKPpa8Si#e6wtT_N=lm>;F?)vPW7z-PiT%$^637R8 z%k1lIc(N7S^*7anrcPbA?z%gg3`Ktgj~BjyeN0e!`TY4euyF}e^br{u5jVJ98N~YG zx_gt)(Gl0(jSNDvRlny@@Vabvgeixp-9&t_D<9NJ!G^D`fbiO&{ z>8D4}ia|`=H`*)K5HsM(Cl4Nc^2td0H-h9JeN?;ZAAFE*LQR_**%Rj+&lVH^Sw=nn z8#(T@hCbPsMvhl^Ao@S^s~=C!vl{4^Afk%9Ol0` zRz;Hmz7DS*Lf^iP62UJ{Vm*TB_M^ats`|Eiqy)m3^-cA)4S3FST!^1vm3I*G{Dhk2 z+ixFr^8h@baKP9RNI4Y&jJk^2mNGmv9S;no4g~fT6%{%33s-Y=^ifS8?8%>wbpo}8 zK9$F#%m#qw_4GlN9N+ARd7Ixfr8;%8G8s zC{)~MUNmAv{+9RI-;a5lHkB7pcz8G!R(||3LaKzc7z?~5!QGEn$onL&+OcEmxY=-S z-7$1zP*DE1m5QRQ++G;T93WPlIidcMhzNQX%C^gKb5_0kv13*5ZtM`p!2Vo}pBY## z$B(Ve5262_LI2G{|J{ZD8`Sk_d@lR(sM7UWeBJZCH>};4p2@d+_iUtCiB|hxdPTuD zHw~|s)aS(a@Z87uqSfbN@5V#ulot7hg<4r zD39jAz^1-B{=wq!2r^3*EaHi$q~(eyln(n}*sh<8E-#AHYTb02kc@E}9KG0elv^ zqvfr*zLq!iv9y6=+GDd9yP@eR!kQzBV6MLbWlwi9$|sWIXM)dEFf?@NXDbmT{j&t~`x# zeH!Dc3`H)uUi!A?>O$1MsHUY&4GQ$|Xw0j|;?Z2*Sd$BdC9_O4;A5<*BqbG*RV@;> z3b9Z|q}XtsPHk(m3vgf9Rh8=NE2=sIdSK<1g$|*;9?$F$6qBGijqD_vY!lb69WrFp z;69ScKM2w!xTmjozlc#oh8XSs!-fs(sgv_JzWL^x-{d4zZ(4 zoKj4ue)#y~kJqebrJJ9Be)Z}#pX~eP5DSGpo^<-yFMHRlS-pB-@wa%&=ns|sXT-o; z_PzFs3yAJ}e)a0rh^v?%dGC`iys%&_g1uwCii%{^z{6W^t*dK^)wd$l5}PL4EIt$3 zei5k#XS0{METw(qs}|af=YmHpQcR2f{EvV9W9dsmORa6!6Qj&=`QSUtbLAmtCG;Zuj=~*4G~T6)^}<3My-;Br~%z83{dcT&F2FHoU2(N%HZQP+(t^ z2?h4-p|4;Mf^E?X^zjMlub3uI9zA;Wh+(S1ho4!S3|?bdK<@yrUCIKQo$cAuh6l3L zm9(d&;mJ@4u#m_%C8ew^Cx=R)n3#NwIvrFj_S6)~=wZUG!cKRDcG0+UbL%Ti(6~%Y z%7mpk+?1(_mQSYknAJv5~bLw#XkPE8r|F_zcl zl$M@2ckw*3h}i;~YMT+7(9(iVrnctBCfq8}C0SfzMytN5smUni_3t0j&j%gp+b<*{ zBG@v$4+Q|Trm(c3>f*(VSgD(vN-ySs0dVxFJTGeT z7RNu$@tfSh_vHBD9Df(b&*AuYx`E%D;}7Kc`#8Rl%UUxYosTmSB zeE*Cwy+@5I+=@V8CTJKK~p^zogt?53f<>u4gD6iKNeI zO+a#D!cRX<95eH=#~zzLXy~+Q*+0G>g?%huy4SblM&l7oZ>X`5>hm5%qF5=oIK!*5 z^4e4OQX8)^aakK(P22%o5x?g2H3Q$(^yT)l81(*J^#1SA`!VSK81#Ps?6kDh^9UME zNjVR%b7l_yva`~VO)lkpGBXjJr5}{o9#~QF4}1N#wsxG860l7+#6li2uo=eH?)mJi zufBQ>52I3lgH*@6c1ewRjMU9yPA&4+wid9pXrX$ikXL6>AA$;w@a`Tx2AX>sdfXHh z1v!~2gx~!%)}>gXSFczhB*bRi7%_bIlTSYR$X_h!{P1MNEKO2w z#L0>0D3!|S3q*E*L7S}>!BdI#>(QgJs;asqH5ItibY;1pMd1h-Ua>;Iehh5=vkME0 zOAyXkT3S+2P?QHzcQHG+5b5zsODdUBpqx&ja#+pq4^K>zBvFT!w?l%)LV_}|_4mWM z2>d~72K!d0uLP5kI1q!QFmotl{FdJ=@pt*@VZ z=+LaF`Ae28xqrfxIdf_c{2M_;|2|kBNgvb4(yDhKe+rMv(;qn!6LW2@ceLf2Jio#v z&xdoKzs7>aHlWw$_{HeqzoCa;Lk~ZJ9)1!%JgzGL-08GahZ9a!H8tU}2p+z5l!SAf z2M;Etrl+Q67vv=+9Y1{d@Y>H)O8X4+MRZ_c0lIChPSEGL1poW~ZQn8~$XN$;e3?4G))SAs?0=8*x9ze(;oTT;>Ez2((Nq(=3f#scm2iqG~8!~di1JHVSd zvT*NJm)zvuZMj##nC3tL8&d*=gj5nzNpG7@3d!#3S`NvkZW3UDG*TcWA@m-KacDLe z_udP*cgsz(^!~XcxqywumiNAAC6;Xb&&-`UbLPycljqL8ZGN!Q&R@>RG*dH68(Xim z_X*lFQ&QkJb8t^u5?m}hbh@^>%SatvT-w&wg)Qal*O0e$!yY2-6_dQ2lbTL%+jguD zGfI+3Dc|*aTPW;F&X3*yaYqKwVY4L8y$1o$BHw>D7#5XPk1Lmw}|beFF+9UIr?s z`8=nRv}NPqdnmq`;{T+0cZz>*2yd35hWm|@TJd}h14kYV!<(AISqgU}@as!aH`x1d ze+!b*lkdHj4n2tW-9`I$7{6~6G9Z}<{3!ekg%41;*U*!XB>_p#6A8%cGz$snEH8ER z9VQZx^CM;nh|C#@KPRJBs7%$HA#jpW$1-25X~!E#$GFD`*!08f%NS6^P~T?h(LC#< z-aY@P6?E*brip*oH1TEp=n;#k9%^HBISllD-YkcCktyKgRE1~BG*CGVJmxSyl1No& zG95#5m>AD>t>uGHdw!Z|W1`77KB4j>9F^b+KYRf*@!oKLSXfTRew_2Uowlhew@zFsj;fcs4X zmk@y)N}PjYVsD-mDXXtM_Z_(9)(Zs%wS5AOqgY#jAct*V?(4{}{X?VC{NAFR5{IMU zpEhTc)4N<|Jo56(FW)o&u}9*~)&}dhqO910eY;+GZDlYf52fN5xxbwZm3=T4TE z9E!)~o@aTYgl*f-wI*1#7pCm|{@3HlS$)2X7ola!EVqJjf_(~ZaReue9xICr_R{eg1q=X+cJ2cE6j}6_zx?d%kUk+; zV6ZB1Rz!eb@PtW`fgxal(YMckczIM*)SO%5d~Mu(r$o-W4b4!V1I=;EMt~TLC)&LMYk(;fKc>%$#thieVlAEfHCs+^dN+>FB`k z3Vg#p60;KWCbCCnfN#J3?(0pPHhqh)_21^@A-jx5BvXo^AYl3vU|B)gn5k1^=700e zV&fv?Vu+EeX{7|bKW1Fa{_#hwak1zJp-}h(J}C0?dg|wQ{RA_|57;|bIum4TTD97) z_O@nif*nZURGaFqyY6b4vu4ejfUHDc#10j^y!qyIkDHzZ-MlcRbNckotj|CHJgbvs zQ!0rAG#^W#CdnJw^H&m3M_T&Jp{D5Bo=i`|Jm#O|C0XX#1QEA9-uSTb3G!k$<|GC} zSZip&dG7BQ2(t`4R22&H6aN+A4=xznLApF3kA0}u1}WFGknkIYb#dvb}wd`}(=*~Hi4$vn;a zF>K1+te9bMQL^U{;aKRbJ8LXr4L`KG`15iI@WPJJM+O;k&xC07A4~8d5pv-#yVG$Hs zP--!6YwT=^F&IZCzJ!G61Snaul=+!;W3rH2N66^$%3>EHsC_N#MeHHme0n28<=QnM zzE%AH&L}PH8SLW|5(u*Q@rgh=5PyHhmE(;a67aq&IkC`SL5s!oX@uZ;Cu5EGV*d>I zBL|a*hl`7o6V!CmsXapiZrai{vz%kg%Q>dpB)6IO38`&QDq;-VCw0X|HNUBaQsNVE z<(ID`rPGjB7D4GAgw7kg^O;5bD&l~o04YGEJK$A}_Ag6tNlh2=~+rBPxroQ|0% z)rQO#L-*n`+^2E9PWpXMUVeUNenv)JU4A}vjE6ZTQAumHEYXkZ`=Jz;afA^(9XWO<-o5I3CsX$#$9~emA1BLL0bwI7*kdJ_4)@VfYjVO&e z57`&+61?@uEAtTeyph=`e&?RNyubY|j{I79;snUtil~BT&wlQeciwpiyg)60HW<7B zIZ|LUN4Ap3H12W#3_kh7bRH;&k#wV*X>2VlwJ<0 zS>2oJaY}$c1%^!Zb#Mq6P^lUkl*;~oY$hd9&USXj%!CLYH4)0$GL9`PLpZx>eVlQc z_^L#V3uez5d`!u(a%?Hk7WtBh88LYmSMW>?mlQBH#wJ+5C3V=`h#VUm9S7$_%)b0w zpPAPsnt2^sB0>XdKl0p;Bq-_SWZoZUJFRDGT`9>og-N;! z!7~t>{(L+`D4v1X^sSMSgya$9)3=DMa!Jad^n$XooSYrJZXD%{QI?x=y*ywK9du%1 z;@0fTy}`HKa?9bvhmT!?3l>Jp8V5L8OZqozwfdX%7EU-|WCa>=ZT|70pjhE5uiAa_ z#+PZljRdv4oqCYqL?n?2t-U!m>Yn+`=gfZo^*MV_6esky7D_b32gi!7T2WQ2TgxXM zE3uMDq>RkQRw2W-aP@RW?kP7MYkuCIZjSuf<>BoE8A;UNuOHBHGChnS7^p$d33UVd zeyVwo!F(*jd_0Qzn1cBr5`u$0SyGlya$}wq`=T83HEeG7CI}FTGJJykpLinZ(MKPx zsj2C89AmB=IOeeg1eeH2aLE|6wt%;a+)Iye1u|OXoEU!|3pi#Aiq~c5l39>|;Lr{{#GpoE=@)j^7lQ zVm&nT=FtydcYft!DVow9$wx?^KbJl~T*u6mV{T#i+VL&a(-wT`{C!06rnssVL-?^I zPHa4ASDG%efecA^g(F!|LZn>V=<%dnJa`DhHuK0@9w{p!byM`w=DI0k^?7**$$XJ} zk%g4|eio5(CzTK>_kn1$lso3rnr6G1K5Z?1S{a=!{m|2niT9?nHJjomQT!o_?;OI9 zHF6LeX3Lj<=1=%%w&Q0@J~&%r_MA_9cA`BWrajlwo|QvAkBJZ6%ky{{F&UE7+I;Dq z7G?wa$%l!&noh2vv@nWy;qm+#!MAXhw$OZS3)fIuK5fB?j^HpIK|RGQEx`9vd&&i!6~?i~oFeo{av)ouf!RC%PCQo|sR#6lh+_Ld09BQtP5+Tij$OhB`) z7$$`zB@piglY}NOD8RiVzY+ETq5~kWzuWwNGK+y!1i!uw+fe~iW_|Cy@DCkTUXw3e z*xw)Itb(@#G|JnNm)P|}L2;{8X&Ro53XMvR#iTUQJdFOt+F zBsU}qRVv4jDpZ{2`tpP~!yvb#h?#`>@@541pJnW-D)LH8N~-gUOG~PAvq{E9X@{0% zLeO@sW!-FJjc>}ES__l1k8b@X@FT=-sFC_IVIC4Nw4mzQ!GrURiwrFW{Ap_ceB2FR zM77h6%%`IJm)-M9bsSU5xu*)!u z%Q1@~6%{!MS1&9?jDJy4d45T8NpUgK-yzj-US4HMQ8E52EY3rCP=Xxc>bQvsd)wQ% zVV^ZprJJ<0)TmGbVw%bP%7G zex0@(F6o`R?w;=6zRoVKR@o*#ko7-t)mEGDF#@KIVZY*k9eEhN~+L-yXhK*}Nc52a&KXScs zRJ(5G&BLjTKZ8xxppIkWL1zlk8SiWoMbp9YZ{tYtrb=Qxv<02Hg-f)B6!JDs3#M9P z^|Xa>a|_+Hg?nzTd+2_;6z)f8ft;;H@S+&CXd)} zq^=Y^07w*9poWoSBKa-X!f&}2Y&kj$3xFq{4&>&!0DEo$_MBfuX=!Z)}9W z$=x;=jvPFGVZT14xNoPKC31%W$fw6P^Ua6eSgDu z`}S@5*ZcU)u_4Df`5TYkd)v&JOYeH*jYOd~DoVM4l_T6?EAu^)*KQKW&Y3kmIwPJ* zX7=G#AuX6Sd&vWjz4S&rw%vLp)#+5nGP!8|wr!kz&D;0I-G2ML``?D%SQQww8jT}S^od&F>nmtPE+H|!Ay&_*s{!tNLz-fD(;@tC_z zMq4;fTbN*O!H%}jG}MCG9yolTl6p|Tk)Fj2jxh3}n~7!ZN#AwQMB>eZZ^xWv_3C`q zYC#8LX8?-={x9T15b(m zahWM+?N~d(?AcgQTTXZ{zBuu{XBwIQ-gx8F#%GN87;j_#ds8z0UhDOaz>EY(&?B?m?<>BG!?F|p!iKG9q zn$XRdlyBngddv+7@899=rV}cIw(#$qP^QzNg0^s-(}D1Ind+K4L2Mu?NC0JYG-)A@ z*#V-%PbcW1AfPARb!WkJy1390T<3Hda~@5ntb(?1ol|y9d62Jr=(v}KJZNh=oZR@s z=?XcVuD^<=nu{{p`&95RUP@@Er(@I5={UMHOZGm}c0gOiQ^7N4yy<+-8p4NR1){Lm zW?`>|VXuXey#`)FzDDwsB(R^W_=#l0hOVyBc*+NyxH~qs`)a;IUDBPLuFqb?y?gie zUd>|oFzn`7d9WVlS93nfF9f@k!*} zr5LmQXhf|TzJ5HdCMQ@&BHkBb4UnPx6_U3tmL+vz5b>_FBd)fjlB6>#6XTOP;(bgy zw0JL(QC%J7E;vVG=KqR*5a)iyhuMfvh;w%&G3g}22C+7hCZ-FY5a%ADA_>`ex*R;ZQR@*y?^$kz=;c&2L~TM9EUnB zFNps6{f=*8)O$m?e?OB0?DkdnFNuF?HGEb%`F-zwx$BS5{;?L)FI!U+YkXC<_EhEN zyraLhv@B(QV35O`6)0>!5#{WD6>5d zfBZT7>|OIAeSbs3i4joj-q+XIp>URqdOEtg?Cm|B2DGfTU!=1$Bf)sObF2ErLgF>U z$rTa4(gFUt65XAgw*T_Yzkd69^ZMT2!-w6xqT?n+goP%N%EW5^{0r(qvt47Q3Q5qSu1xMOK5N)B|+=4%C zfiF8eP76}nLOE@r)!f1y+Jd_UW3ae|vCdu>n)N4##o612IUpB9UkSMwUQ9Kgy<=hL zR1lAhg#=5~96a`%g!p6}Of~yt+yiLU*z6bOi=I5tcxMT)g+Egl4KuA!? z_2^pyXJG{7u$+<=A@m47JEPOP_uRR0OKy!rV!i$Q_c!@Q$5}xClFz8`>n-WK@(u1DWnVSDy$`{y^`!EFV>YNw9x-Ey71CN;WypmtV} zic|Wdn1QX7x$fR0oRHME9C>5xCwWSE1<&LhQh@`xWlejpo6`OHA-Wf%g==Mw3LVN=EM#TcGePs^25SLL2NiCmmXOfmO?;1}>x$%@Z^*!jy>A1yHcoqzoeG1c#| z{f0#OqYFa4Tx@l9moG!uVcXgk!Rv7Y(}oM}5D{rl-TFr=jW!`RDAq z{{C9HbGEkDRWaK*SA{~*)6rauDjPL5Rh5?uGZE^TdHyK$Lhnkqz%^jc*5n?}#b7Xs z6>zW7=`SM=r3n!#&F%fHP&2DpXAQrQrj#6MyB;5$`g4;1+_ULRCsFy?b2Y{LVLY(6 zca=3&UCQakzbZVZ%<#MT`6r)z^4$pt4l02FNmEk}oGL0WMk47+v5&m^>Z_}tSso_W zffF@WisLw!B};)!N!0$N;!s_%|Z_Y&`Y3Y(Z{q$UU?~$aG z^sLORf-cwSd!E+JG8$Tu;p^>&|#m@DeRWX&<6NN+of?X zNHU^DG8S!T%RqlmPoGZ5utt2)(EV?gzlK}d)Pl*NDxB*=A{eR#EG8Fkebi`XgMO*p zJvugmk2h2B8wgbMnE~nMT1&P zzG#4^g)7fSBQ!0X*c}bgv~bP{g~G@A)PsmT#lJU_|9eQM!KWX6&b4&T^Xbf0(wUn= zXDSL{QcLl zGp?S4(feIY=kF#uf4k|rRZwil;QWn_wW8QsiZxKI4aE+OgEdfW2gUYLtZAq9kAr0? z)|+ChDb|(ts~yCerC_4}7;arLZsRTJ2U%Fqvu`2tv%3S4pM`VG^7H6VYZ|c!?X`mT z>Op($9qe^W1UV|RVN1KPE1z{9xa@(jH2U|k11@wudDOOMWbr=WRO0j+v`v=7y zq1e9v3YO?45o>_|hh@nZwvq>4s}}l^`NI1jeuZPQh$B!LArtkyDPn|q*p7^Kc=jWtd zJa_bHrAV9p+tyQ1mgeWDXPo)t^hxs1*(Y}FIC=CCzD}G$pEn&ooOCfGx468bysW$e zwXxbEcXa4vaSQ!Taz}D9vCbbpc{2Iz+0w$C>^!&w!68?3^R~q>1$k*2g!E4;L4^IlAM_{F# zv2<>*UyzrbP3Fn(At!#kX?K2)7&j?rU)@R0<>0}>Uc*Z-E$KV>%Wpe(?>%s$+$i8!rKG=?uAbC(;wYrp1$-I1 zkC@F+l>CeJxqAVi(6r$7Gs9Ue_4bry|MuH&DFc{)_ugKCmBLok-Po#aZRphctUzLu z2WCxQdRJtE(0j6bH4(_{ZF)CtO74~|ik&fe;-raT*7fPXZGu4N6o3dOUu%gy+tGwC zEhF%Vjt&j?X1b-5qU2Z-2+pp3@St~0U)87~1 z?TCwC#FFJ_R#vd{!J;G* zOT-en4Zh(^-;{qS9hR)JOYKuQ7nw|~|U zUCd88k#sgK2jPUdsY%JHh_p^VfA&;zN^bVK?c4U8I$K;pDp>T@RN(?uS#=;=0B8>S#*ClkVbOK0BzDr}IOR?)D#n26lP*2O{R_oUd%%T_G4*350{CwDJ>U90W1 z)lbiHWIN9xzDnwl2wLgzty65zclMO<+vB%@` zlyB%X&I&$=yIMoR6)VKLL|V0HAaHHJXvw6w5qG&uKkk~x%Am?TEqE$WppjQ)XX5gf zk$G7Vk34918y6ZMwcc`}d*-9~x=EX|Yi%4Xmk+ym2RhfC%5SMUvu*F6sppz)i6?sK zhRUw2B*HJUaO9p zE^q{a*WGchh{Ta~Wu1&=)Ds@`0qxp_hT6f?2sb({2GCJaQd&LYB02Gqht8#+zH~*e zk;|MNacP{e;;q%O%t4RIb8lHVZ_ac?y-c}z!GgK7=EmPVJ!0bQdGqdhjc6LzO?-9H zEI;=!tZk295U!4%9*%CvTM`g%x@yk8Z-M9O{aREOmsZT_)f9$+ufg-0f~~nLEH`9tW4X6I|vFaGB7o>_SjgLB^$@x1|=PrIA9j zMQ69}xRiPcd?796QfV2OPH72-WQRgsT{U?mGNZ1$urN89bL#BOtrqF^owZe{Op}>e z(^{2xS+5E0=(R#fnpJm8ahqqbxA(@4US7c-ofSEk#rUG9EMs+X+ej0hj7kB^b%b&J?_uaei zZrQT^$1gWW8cu<=Bn?EOa@Kb!Z%tm`3La+Wa}tH3uT$XVk|4BIw6j) zwFoAE{XFAe$V;@jj0t!?mia~#YHQPjdFWL-EO=mcKt#m!>5-8Eb63VXxcgyy26#FM z+YuqxDMm7I8<9xc#@NciF0)S8*K3??Y^H{atgQt)fy@BHY3~tOE3Gx56Fuy3z}dJ* zExy^!-rL*9$IHthXz@~KH+Q%&API_8YHx3kXz7WZO-Bb@nJ%N8jHq847N!xkx7*4) zIBTI;pzntAw!Tr|>Ug~q%7apj05b|QGhhPnhOjX5W{nQ6M{UO_Rz$HS6sx3Ir*W`{ zDAq`^`4sC+vF?M|(QnJg>Hwzt4G~-$@i7ppj*<*CLiHO;2Q)J-h*zf4;RyMS5c|+?~ zHneVYhSqJK1?y(nSVmvB|2lTw)pIarGV%rDjZ**)I#a^LU9mTp) ztbQD9EycD|Y&XT4_FLaL*anLAqSy+G^{4%I4PwXAa@6rPoFkK1XgM+^ucfrtAlhrsV6UUklqVgrl48>-wuEAf$HAshtS!aLDAq`^hHM0tb4g?fLvDOruH4gR+#pYA& zB#M<%to=CH0*X~p>|u&MO|iZIL#%0)0%*T`Xuqduzd_^l+eF9QM6owhtZ5(B3}Wvz z$vm$QmwD2)y27(?l0=G))Iwcf#-($xeV$Fr_%r#!g$t*WGjcOiQ&Tgtk+M@P5>@4P zc51u|3%lzf_@w65bird+e>tzJ?J^{u{IaUrPDnhNm1$cyZ{JRGo^Jjr{YqBK0hn|S zq%`%pA-$4oZ&T?p2r5U5Fjk~;pYZ(i8n5;1b4ZkScPC?`fW-4liNS9LB%V9m4b{6o zfpg_&sMEa{nt*Sa?aX%e{P~5vGT~e1du9)_lUb*H^Vt{i^^VKst?$48{!cY73+Kbb zoAFB)L#$9}+pVlL!q!%WU4q!fOVHV85`mr%V9LKTi$Lr1nTXej2oxmEKT!l< zW|Ezwz3^C;M5c;~iSbiNoPv>aCd64X0Fk~MG3#z*TEeH<*l4`!>rZDw4wBn4ZCZ#x zN^3!P8zdlu48jmZAZdMrjjxM~Qt9UA>($>R@d<;iDa0#a(j=1BD=NU(2Oa>ven>(f z?&wfB^z~`10|LZ-kb{uzTh2lRam)IkL5)(gbKt9&|!?;YNt+^_{+P-=KR^4q7M zl7tJ!i_H{?PD~yhD?t-0AqT5L6Qlz4{g8tri!0L7ET;?jBS?@m*Am)@tPcI+3(M() zd7i_677>YX%6X&lzd$Lm#?>&auD)S{3A;Ed&pX5=FDKLtwGpa~c(TbdEv9KsV9ozjr??RJ zp#pMODlRC*-6;F_Ur!L{@$HmHbainPlamVycrI}vzowHF355*>RV@`@C7C5v^}WKZ zz8)wudvr*Ihh)EfS;*|$n7<2^8AC&N?D%PCVOvSo@tr$&9?xpixgsZ#OJ7^{Y5bgA z0nKH@;jJ4sZd~`#hZ~@}L>?(ZATPY2iCnji=OR5_DA~!ZTeq{!;Cm<7!ctem<^AjY zBb@mTAA1y)3l>NCgR5+04lu{?;M>7l_JXS{BB_t@({E5e9R~YQi(Y|V@r_s}QSMRu zQ(|J`kC#1eB_?YyA-w(}S%TRRFL`t^{13=4HenOJ1ZC$o@>1ib$rF*awZ^)nq`Mb> z-2%J*7Q%LZVtx|6=-Zo(oS-YnOO$Y-Hx&K+BuM@2LIT^@J|9Vo-b6BAyaXS7ma2&{ zr0vgWEF)D7Nhtxe;r3X|~M>k4bd zboBMvx(J(^p<~cP17qk5ZthNyOYJUKW6Zhv!6th(k+_Iww@d~7ziP0e4=dfmIx*m8 zj)a@x{dU66@NP%=8LOQRk9D4!$CB8Zw-CFOH!rd6;UzXdHzKhar<*0Vv5?MNh+o$% z9(fm!OmQP#UG>w=UR`6Hssp8|NP+raK2=G^@&Cc83VQv&b*diAYe%Zo|K6!8Z)|Zu z&L5HsE67*hB2z_xKEr&1CQn8>t?1|=KNb8R{Q{U6N|){J8tbqy>3FB=a?l}({(2Hz zak)u56q1%rYTp!Rr{$HGW}Z2llAf8FR#Xg)P;p^e#>LYoPMo?}Se(Or!*Tut?N1fqtY4I=X*RS7uA^EF= z{lO}k#>sP_9BjCJz;n)Xe_QjHUv{3UvGRLxK3Lfi{`IZRgMNz2Q@{T7?CLeo%x2;^ zB!7eS!Wt7KEth`!c=I5xbsqU;IX}&9~J8Q1!-2+4I8w)BRw{rgdy*ic%N)Em~`#0y4P|4QX z2AvHlJk#43{Nxg0VRd4Nvlj}yDBZoBtrbcye02M1S$HN3K{tz8{my?wocgFGGW znVFnKukYd(YR{?3<9MYZLPi=f8|c zJk}%F0rHq~f}dqVB;+2LZ+rzIpl^_ueEp7({|z=V!~OB6AA0^4V`y=vd?-TP2DV%NI;ma&oPIIMl&rA678#<>A9b;e+3=KT*4T{Heq6>$fV-9t82EeYSmTpwFkJlXNpJLGFtqDw zLsb;5qg#8vA?6)E z1U8O*;tP(>G+BFY9ATE~M%afkv-vV7SsB~6p3O^5O-e#4*0WoG%ea_kUmMtM1; zfwEHQN?;c;+lT65AJP!(P}MN&(B(F29cu02twTL6MeSZlU$}9jn_IAFXIai=iI(B&tn007{AGHo0pw^+Z-5b9Aaoe}wzWeUiUvK;Vi%k=ae?ke9 zY?!!eKC{JS9r{l22*l_i>yVw|E2 zU2(~9b8{3+U>%Z5p|u!h9qJPY1ZYrc&_OZGI#ky%XdP0hXvU6fG$T1wGZITRBL!45 z@&wh4cnxVrUd8$HD$bXWL8*f`Ja!wZ z-dS!0_ui6n0Yc-2b7#*(!F4`m%Mhtu&nO-PLX#?7CaY8v;-^Jn<}Ejp3yAu}bx3_O zL}}yM%!$62R6PC*C~Z*RdnP+O$#PBxFEmTfx5(0o6E|AUu~--b2Av?T$D;!67c6-9 zhS^aZx!K#iUUGo{h1_MQqplIHRNSEy=NC$1^=yVaH zlj%IPe(cUz1no?Wh>Fmk{t9xc=weQ{YIQtR#^m*(>V}Du3+(f_@6siMPh4D_uVSFO zBroI8rmUN$1tKa*(c8uh@-YjK%()XIL+P10Y2pOJ((BKL)VW?BM2Z7@^Y-$DzRMTRKOQcQ z_Aah&9zK44em>*TzideQyZ(5FnBaAeTe>qyw7dj6gTyZoJ+n+jGLQ>!i>ZU~{$3pX z9UtW4U>DfV;de8MY>0PTA{-9tnTeiSp&F^DE;s3^L#Un_M915_fsN-i)vn`esylg2 zwJWc0F6Z^lGk8t4<29PgPSTj`^zK&%|`%;q^89`jMXh2+{yOJe;Pws1bOAF(UkSmfG#a=JO!rW|~T_$Zn0 z$E5a&n%%<08s((JB<4N5K@lH|>aO_NjUh>s*y*O9NhW~T&;qeieTwyZ2Au3!v>-lm zgyT-0jDw}*P4>qhi971kA>(l`GxNgAaGYyaYHD)w>9*cpW=KG~-gQk}Q6UuO<}N_~ zfJaF!U=oQMYAJJqL?ztu)~l~_!j&I<`e}ki=bZTU*Q=`j`qu?a1yjL(%zlhyLS;9^ zV%SG8i&>nSd2k*o9KUFMkz>})!<_q-=-~71fJMWx1bLC2B=R5BlkBgX@NC3wP zU}xeS3|4UOvy5d-4G37i+}Zh#JM8W6zkfXTVCD`;_aXG}MqIB$jZ7qBEMf-yUM9If zNX&^@BZ0jBjGEa3g%I=*a-w@kS_cYUs}{`n;Ykv|aa#czU8`oM$1?v^6S6gFCN`q2 zuh3Q!==5Kp)AeX;?!n&=oJq|^imjqc>Cl|R!4zM`$k&Ls+^nn%$;sD0bghPI77lY> zN^4S|S?*x#MsmIDJ+mTD7X7 zVej5~&|f}p{QH#kkjoF`qMVDPr=eFU5!1UO_?U&%gfqTW|4oPGPkDDjem;;uRg|zuB6$)K%g51_yb;DoAw zfdJ0368%7LM@P3lL5afmmhG##vlh$_adU8oLLnm1K`2a3_4oJiu(h>8IYJj7KQ{;e zNY!<>o7FH~j2owD>qZp^VQ*7oX+wfdSX(EG;UFQusIR4^Pop&SqF$TXllM9Y^57o4 zHtIL$JPsx`o7bdK3u6m#I&n-RJ{D+kLUBwyK0e`(Q;NF9dPU)+LjCYz_Mf}46Cc7( zybC*#&$V&lOfI%xS@yY-icEMpB_$=NrJXr-?ou`+;;eJ1rgSmobkf<=C(tbyXwcS` zW)4Pu)YdjNpyUFL4q<3?NKA-rVpx}mSn=GXoqBBz{{8S_uLmETF7CX12q}HP_%W^1 z{ie9s!$-1OP=c5Xl-o)WSY-~z$ja(=y3HKAkwQZ^lAA<{u8O`&C=@R<&a)nPbmhvG z1qH@`{cC@V`}_sWT2qCnUm1Mob zCNp&FSv~A`GD z<`)qeHPhgT)`Z1*t z7xK|6-eedu-t76Gh|F*wcrm%@5*seD0Imcto`5H>xB)77&q6Agpnt((n^WM_nKNg` z1UrH;T16b7=85(H1}?uuo${W}CfEP<9$ zgdLd_6IC>fM->{&$zsCHnN?LcKpMpQ_aD=SFO_8jDQ)~lY2zZLjkZDB7+ntbp;#A+ zEuz>iiuD@@TSc)A6nm6nCsOQ+aj;Gldn?60NU_H#Ry!&dv4D`MA*Voi=4E#`iakvJ zI)Ng?Mny^oku4M%MUls7uMQL|9|!vh#okY`ODJ{<#RiRo{ZD3U`iPnO2kp0*_A42u z-*SqbK*xNDVjC%T)p=DXQgNVt!NFs!bDg^#;q6p7@wts%7+ zj8W#=3nMIfDzhc8mJlQ) z{Od&_Zz1;D2J$V6e5)~iy3RWPCtTh#>0vm1-59sM7jg%YJRbxPxEI`(mprSAFA#C_ zV&Ul|dIv1ahBsMZRvLUxE)~{P7vY+V-dg4Sy)83Ci{&=rpMnOvqk~-Z!`|deT$hRCEb2~)N zrIS3hxx}h{-+hzg;7zj0_T71|O}|2*T>E>6Z_KK{qpG?%C%;aCCBv~T6!!P(6TF>* zgX~nQl9H@}B`BPEi?lVoXGcOp!sq+aGAq0Eny}t(J#MTLeWTvk2``J9n#Rt~#w(3| zyfb}mO^+>8ns@$EgApVUf*5~=h!G(Ww!NSLzWZXafwgjQaBz}oZN04F%WJ6Fk=%ru zabY%MC4^2d7pF!@7hG|2BE=xA5tlC)iYT>)VbdG)!6|3b2z?I15T^Dw6te6+tVfgLc+M=8^{sbe0y z`OicLjNwXMMSH)fqNw9?A#wC21F&&IRH~?_yQu+waec-_2jW&2K7Glp%kR5?)#B;6 zr7%;`rmLHEf=noAs7EM!);=0ZITcSzGHuSolTN~uPQjB}ZQZKVp}NHBUHdkGkFS>f zykqmu6NvL*NN!liWa!SAtDl)KJ|gSxCgShjZ3q#6%J`J%!?kNa|9l=%J{ey`Lr-vO z_JtR?YbGL&c@Jt9DWQjnt6^>#}St#Zj5}`ZUFzd=E%iJ#`lRrDItp`g6Xs+A<|GOfGzoL!f@&mZ_Z;|j_M zqhWS&YDY>qCz7)>`OU&W51Xp70n4j8efZFxy(bU-`s0uDP>c{Sw!1sdu-UL#O=>nM z>GtXEdj4g8`!FwX%nKSBG->5*_^efki2oG1H{XOo=AX1!A+-3-OtYF zdXL~cE+im-+5L|^`{G>>Jof^Mk<1p0LqbReV4i9eVkrt>lP$(HaST)#q^jr;cQ;n_)c_S&_`jXTCHIe=!Ix*O`NYuf8eb8|s6b#-xui-wD8=EPVN zWK9Vt;*Q7v_Qo?qgu}+hqGAqHs%D-Xe|j*?f|Eb~eDKIuU;niIFqlqDi&Dw_H0JbB z`nh@G(x{k)JomA;U$KG-8Dn~`C)F951P)jGY1kWhO9i##treNM@n&G2!W)@eVr3vT z6^Kn{Q1)ieVtt>-U$7tH!@Jyj#LR=#@N59a;YhPE&tdK-<{i?;aCuY=Di(u^K^*UJ ziGl`ZGrspSuX2ef7C^q;&Lv(u>Y?iJWz?K97hE6X)PbM#owHR*Y&|Y+4q(QnUd{HA z5w`}U10@~5Cv2F8p5GJprlM>9xrOA-fbK^=w;4^XEB!$K6@S69`0xZl6o7x?%^Lg= zzYgG;9BqMkN|`M?BdlTUX@H+4_OJ(y%dnGQ+@g&1%WUsJXKo?fB1j^ItaL;~@c9*fJez;+9E#1JJbz7cg>SiO3Wd|Gr(~@i z&@Q?-W@jHf0p0nP`jRY?ZlS(2BO^K4+g_o_&MrXsQ69;)&`^+?nyr~psVZf=yjM7(*jmQKA$94y)?a0IIV}hCx!1ERV>4(%H@3pvikcFMhx6YqHCl~)!y zcXk$6Lr7?<>vuC{ZQvA(X1X(L)~xjH&RS=RRupe8>~_6tjb?_ps_!NY712TxD*6g~Z#><&{g(emCby@x0A_A*vcQH$n+nN5pwYHbF8 z)v}?XoTGR^&&0>y6sEEl8jVIRqndR~yh*3K4pjCvsO&dT*_YUzUto961xfswbn4)- zV}GpwVjC#xMyXC}%IWbfH}`6h+%K?VyhVN@o@eqAm`rj+wS1mDX3w-B_~D; z7$n|unT6yn9DDMqR9zm%JsS6P{smUbzN*$-=$VW7P->+I>%r_bUr zPQ3`V!-cGh)*fPdsU~smm%HG!72G+X^z`(!(RUBDvKB_maU!u8R&NJ#&(FjWH;k{z5jk+J2o-c(9;G{yS+yz5KCkz7NzSqkVNk~#%yOv3exVddAkQW~Z>wFdL)^)b}f`D$*@oPTSPm7Kb8BzpaC#J%)Zq zEORs{)v_g?VdoVYA*o0^e)8n8y?c*k?#s4p&GR{1aiCrP@4x@P_n^0@ySqDlAHHV9 zmJot>qtr$&wGF)O`R5;spK|+SOXB}6%6OcU)umMQ<6>s$o{x~LcWFj>!H;EiH%>A$ zqNap6$n0!Ake_eX>o2GpwyaC!H`if}K-crzBG-xjxwC6^H zlTRg|ICk(4_6Da| z<;$15c9k6ZfobQM2Zm3}zypxGauIWVWf9H#t<;5wy4GCJ5SF1RQ((vv}M($m>mJr^by24I2#tkplN} z9d=k#OJjWl$_y~w8d-Zgsgu&**UL$?&DfnyjT-3wluCbpd(Vl}X9Ypr#NZ@0*7k0U zC&#pDWKB(^q?(nj40#y3piOLUxSqwi3)6x_&lS&#Hx7fvn>9Men+Poq-J`Cy4)D1+ z8n^PfIMOM+k;0uRd=`PzdnEDAei-ZY7S@UQW)q8bb(kg6aCKE{MaUIdYR#9As$YnfG4l%)xsddrG??zw-}V=J(dI1&V7tZ#mP9HN5X zQDg9Y>l-(J{q@&-(m5gOgp62sg=yUrKgv5OIxwZ3;4_myTSM@CJ1qP z`O{Bhq7YO5YE$l{WINg&pJkdb&$8D23IhB0D^NAi`|vAQIh?0c-PSwj+Y z_1?C8wMXo8N9=P;S3lzNeK>-BSVm$oSF;k!Np1vN5reIeNT6{^aEQa)W#TY*gTo-P2@{?{v{ZR{ zD3(%)HTxXNx$~*4 zUI&S~zu%CYY|!>3hF4Te5P}c{bLB3mr%B3^O`CRo`^`7-DgE|~tvO;}=-(fu@Gt2*w=JA2278>3a{UdwWS~VSWZt6hlqf(%jzBPJ}##MusaINqR1Y$Y$jTjir;b#!V=+ zvg+$Yfzr$6-NfMF?(S@Brx0Qx7^Oz$V-v!k;~lXwT_=w9pua+kgx zJK;9$gpm4rqfsnWkW-^y+1EcXK=LEkcJyj-hO{*`H`HVC%gc#DD4@NaxPU_5#8x?e z{N$-WPaHy}wZkVXjz(MLg(t3*O$8}G%s2iO`6Bq2;+v!b z1FnA`e~e4I+OM(_b4n)l?v==E{me7ZG9=rw%x~V^ra9id7L?*0F<0g3f($9n4*ovQ za!#n6_2^$0!A4rlbedi#nGK@1;(UF5;}|`oSI07^VehcBn~)&Fyzl;a0ZsW!zq1B? zf>(u8QKECh~{jix&k5#A2tJ(NRHeSQ%R@*w>K% z0_F;PPVImyRdnIHnn>DdtyUmPuxhF4^))S{(&Rt%0@jh(9lpdmzJPUn9_u)@`U(3Nm9wwjp1H_TRNl2vj zjnN0EFvoT4v>lqMj!I9TM?5%qco`j%dS8!b>OYcU zhqZ*5Sc$<(($^=-`W%`H7h79n)wqm_1T4JQjfwBr%2%u} zq4&Z(dN1^$_riOI?uGv|yn*)bPx~*R{R?RS(xLu`TSbRQesyjoS=FYrv+xnDe3SSH z+7z?!($jmIz4XQ$xs3MQM0@U}J@?X{?Z)r<&#ffp%)OmeF>LXhEFu&KirJG4>*LAZ zOl2MOn6HVF+e-4e-9g422SI`#^?_oN(d~+-Iis5xT!r`tiosO|t-v@c6FCeQ-`*tO zipe)m?s}QXO?7Tnq=utfP{zDS)dl=dxZ9bu21I{ODnpWy66>)-#R>%^9co41K_;1F z#6*5V6Yn$Ui9+SY1O{PfaQPv>5tD49Xz6{m*#b`V4zO0^Z6Cy1VfL<#92p=rY&a$aAtI_qG(bHR%Wgs=yoVwT*!*uV;1AeNG{!Mg=w3I| z4Y8!iu^L4C3zq@5?;sn(u#Ksp8}_CbLF2!X=hc-58^3;74O5qfzkv zqFjQote+o^9X^U&`QNf+G%9{H?bp*rO{6p91Q1pvjn2#o`cvKD%#3cAnLx1;`qz4j zy+VI-9|zk`u|@Q+ODNVv-M5c}4WQVYDfVv^yOaLZJPtOUVrBHNizv2({^U0f*0cwg z(Z9Z(Vo9xJ%-qF6tl5`&WZfwQR`KBTu6vHw=z^^nP0?)|>uyA;p%`pY9w7TSBo8^sglpTS14xG&tMhl z+_dk?(FpYn7D@TvmT1(-8jQvU>lh5*cjSzb(}--g{~({alizpcWZ$vURGcx`eWBQW z^G))+FSvjYI9~{Mtq;&X49nOehAOwLOIB5-YpJZnxJ1dx$BKGz(`#!h&C}RU+P?kg zU(dG*1f9K;CP4(^1Upk&5=0+-@WFRGHB8Dp{*t*>@p`BrC#SL`C51eLAe`+=n1{~s zV(*F8*p@e-zRbnLWo2iwa5caK3+4yQ6GXU*1cYmB?L9m^qvk}}+S)7Z?U``$uQS~f zM7Y{by~Q;0tDoD8RDgPQ@^fVeF4$>`p2HB}8bA(6#EN=fGj;UaN6~F%YV~?%+`wC_!+r&08B}@zd zYG&&3ITK14yx1@J*O%;(Sj57-aXvFr}cv#zcY*F56l4Nv*@UIXqoJuuuZkM-2o zy)TopV)*i6HKgSzZeLc>ko3MnF2)*(gkt2QkmD+fN?z7VHxtZUbSw+{yB-i=Sb-8_TUHY~Tv?gORu?4-zyJOVNu2RnmkJq=xOUyo|gIR~P#nj4p2p&!K44i~{XI^&oYK~1wNo4;uJyF@vVAy-0 zy_pEjmof@Up}-NiL`HI~uWusj9h?XW+TYQ|-Ia*awoY#D%mhwgu(jpb0od*b=pKv5 z6WxL*dIC=rizk|cCsIK}b(L-A>L+x+S}SjQVkSA6BMZ5yDrQRj!$KqRX}(GN)X<3Sz8+ z`K&PC%Mw6HV24GZ6TD5u@*{@pB>6VM#2*zPM>ERo+r*3X9lx=2K;0!2T1KEGT_?cA zy$Nv2o5#dYq<9&{8z{bp;$4UEW1aB2--u*mm%!p|&wYbPHsX`y_AH!WzCDkH4ET-Q zVJGm&H+kgmC&>-Ad4l-{8;F?-2faoP-+{Tg^(EycQ0C|6W+M0q6DKHWY2oCRl|%!a zTwa%~5mZ*T!KGE;t`x)>R~lEc6%|OvDmv~Fj)xRvJp3@owmy;l!#h#X+FB6HY#02Q zSQQ6@{!TU{BbNC;)V&8>R9E%~{@(N%%Fu?6QUnnJyJDx=YfLoVB&P0~x+$jYZsyGp zO=7wx(U{_@F-?>KHzKu#{;p*hz z=;5JKjdoEvkQFFE@F}8dq8K4vfj)Sm5y%wzZ7b~mvtotvmMLc4(z9QK5fd*AcK$75 z;yS^uvV}JXAPtMS=_k8BU4HN#gB)71 z60GT8>#6qPNI#7l2DY2R8t7mDL1D%8leiC-}lVk60wkPQ0xOI%p2?G7Kpbzb%dlQBq#5$2}vzAzIgK0o+4%)=-D6Kb z^Yr5R;Y>IAPT=N7AUG+i9Gtm)DXCV&X8#Zdm2!dN?Aago!_T+p+>Npi?HbC8i;Bzm z6q>S%ipq+HvT7y7&ZSE+F_)vyoxOB1`bvz3QFuA|dLbsu&9>`QB;rnQm@FeR3r~!&r}XUnqLTc)-0XBf4;d7oxThvUYGC=;vO}eaJd;@B3Dk7gJ`gs;ofFD1xV|%Zm&0ic3n%s}VfO zdp5B`hpMr*)z{I%mh!oiqY6}VK@g0Cjg^Ist(}82c!PIkVto&J6aMr{_BNtvn(VVu z<9Xlu0&9&#iw%Z(>`GW@zhduWV;Q#V0^0(@L5*Z8huf8p^SipySY#AirBw7d5z#~8 zZ`UQHxVeT(ty=oWbBNR;Mm`8ZL!Zndk=BFe$_UyCG~s|}kSUXbqFc}{v74#F_gCD}y|1Z356a15T7v6G#>q?GY^p8kd1PTSwwhEM10=c~b*A1ss znY>A-H`;f|iOLdBOKcPhJgovxD|G8-3P1#dR*=vJsl1VY&salmLiQ(PuO)xbhuOA$L>YZk7Y?CguOwpcNL^GoSJI64+QmCckgI*#xGC-pper^Em&`xQ2{( zBOFC)vGa5Da7Df%nVZl`!B>tmz}@&QyMzg4>Eu{j2Q5Fff?~$umbOZtot0(Xg*S-5Zf9!Bi_TtaV`31Up^$g9bZA(8?$ouFbiOLeYsdG-#8J`jSAPEpdD<@sxO2er z^W3QA%a=?aWyhCKa~sp#Y&ZA0=bl^aXFzMF<%6-2z{bT!BXSLyzwVuIQz!f4;TCTa}n$W@O2UMu>k+Fl=bJYajc591nV2{s$%poZ&-2`7cQjKcPPe(hK7oN z4M*%UB3qT&=?JB%>yZQn&%0obB?Ai@dw{5h!tB(`fBp3r-nJPK2GZSCAQT=?#~Z{$ zXU1gLHOYH!Fl|6#XO~na)<`6%$L{GQ*9f`{V(pzg90)M{bhOL7J>j^epAbWOTx@f}XK z*X<#)y}F3U?x%||j#K>-fR z=||Nr(WRy7SD~d|FE3BI939QX#>FtP*W#{SX3%Hqr~Ui(9sKL$>6DbL%+#AnnMug5 zffSe`X-_(GZbnW@PfuHEX?+J?bxl2-lnp@PjSVv$IBCOF zyn7%dI0^=>dr<@8KiI3jMo8Np<|k$!`g#Aw?1i#-in)OQFEcSrG`Ym6py565agyu) z8@UiOU0^$TNd%*&J0B4BA*Zp4k1uIQ6j96Xzh`&iwXsRV{1AyIH8|hndX@e5Fvolx zi2@V!i&xlv@lmcWx1mge!hS}KxfE7aH&v8ZS63#d_itd8e|kk8#( zA@XAcoV>fEhw1L@=+ragZEON;echZ~oK<6FJ`UD4WCOLaL9;o9g*C#T>>+Jz?Np>< z2jdB3Sz5|v2)VNl2ynB)tEH7SCq?~GnM|&*G`+L1rX2JZc^G6)kfZ$pu1lPh|H;@mA zy@Zp35#r*m#m1Q4#axb>9UOkos#W(s{K!3E1h7NTU2(~P2wMRnY)8yC7Dz3I?MCLp zf+r# zI}QlB6(HpH?)dY5qeOZY{7EWN5{cyQMweH=SVF`HFOx*rVBJ#l^2_pa@^h}ll8kw( z8fU?HmYth_R2|qPE=JdIZZFm}sms{7;tI<;x2eE?zW$>XeDjF7^&iZtf~~ zsxhA|Z*_-2f>=uFa0mf!@-oTVS|Wfo0r|>ysF^FWQi?dKAsvM%zmKWqq_**R_w%*n zs4&7$_JXQ*U42DKd38!fJ}T12Ugo1a`AA&JbISv|u!O>?9Fo5EK&S$I2o;pbpQ%&b zN{)Fo5)zw!@hSW0)Tr^bwI#(lW#uKsW!;_i zP1Uf1kRLEWWd-aAMWo+cRYiFzSOgrQ5h~ye(ui0~OA!=rRwETlL20QN&x3?Oyb}qc zAcNYJ-D0_1X3^{1YiVz-v?ekLBF9l>3;9E`@RZgNJg8GlWKc^>dk2)s`t z&772U%V1Dqd{CUsU599NIvkr&8z!6EsVqJNuwb`liW-V$!s` zj}p(qq*@kkfLdaUC%%PvqH*?mnlw&Rl=vMvfrDok5wSaHHW8l{IC%EZuvvj4N(mY^ zD{$~+pP{k>2T$f1X;vWdJP~UVshhu_kW)r0(S$N@5_?fL`Kppn-X>8M_dz=$SzVhT z8Aw*w8bJDAi$~QXBwhZ#>pSQO=lDjkgX5i=y%lVq+1nf){L7qHk%0Yc(SQAQ@$%*9 zWMX<8oG#Qr-WO0T8koh`m(?X&YlG4%%W1AA)X}$LMA^re;`$PC){xcEE|$VLJc6_? zYFY1#7w$Bh#77dk+FM{0z`54e*9UsCpv`Rp-Qy1LVnN)dkr@^-YewU{TZS|T(i zWlEsnLE$r#t2cG(wD$Hc92Y@ecV{af^Q4s;pb$AZd3$masBlQm9GtZD6iIrEsSDf7(YIxLV^4lq0ZQ>Rf@!QSw!bp4$Qe!8a!P!Lo%KUanfC!@m_}J!O z8z02ax`)?|ZHW1--QcxsA+sC#1mrq)cLISMHKem*D48G9*{-7k0_it|Ghjw+*EQDb zT3XvlMpCzVrW(pCp*6E%gvh1vwh`%}kjr4(wNzMGNWq&zkyvU$@>a}q+|a`f&VIZd z$|PA{zE!g1=T#IH6-yPQ_acs|ykB(iJ!8P}mCpe;vv>PjdQJ!mnly9P1bRrwHqT81 zPNsKce)sJe7JfFxSFWVi_Lkvru)wI$-{od8;EXv`#y8n4ic7Kz3i3}LzeW!Syn}Xd zg{SMvB%1asp;&dX9HjJ^c`?6yd%TAFa5XQ-*l&C?Cx_4{HV?`is~G& zb`W)=mTDa~eOiY}uats=OffHg`ilM*eT8T*yvEWo3~V(Wx0%8g({cSM?80GS$5Plp z3Y$V?c_KXQv20=|Epr{tPFk{`*h#CJS=dQ)-XwO?pllX)(iN^IJ1Ozw z5bvj{=L{wWeRBUkxG5eX(R9tOOf+5vZ$1F!znpxNoy+VaudZ%PBxj7Xn}NHKnwzUY zQB{1CSg;x@ zQCuP;qag~V*Fp_1T8tXA=#f?TtiAvK`;(JlWmTjnAO7~&pNI*wi_^%s540Lh)JI!6 zt@Pd3H@*G#rkCH1QevG)eYtP%d;HIuHA!J8%l00-X;UOrL*qrPvzoa~B1LGMK z`<)Su4faxIoz5dNGSa=d zuHqC544y1!(n%dW3nZFCap~$5S;*1I_s48e;Chr5e001h`^S&9cJBHD5UoJ(K6CB* z_3PKpbT8Nuj?_V1!W{+f_eVnSKFMFZ1oxvx6e=4d!I(8@#Aw-uw_zxg^r`4k4@m>O zO=x>)cwO{sy~N|uQ07yuS6?q~Ju#C=dUZl6c21c{S4`|G#idLoH(Dk`t2dDWmJb7J zCK-@@>tLmCs6pagO{3o0L83aseeBe!Q^&e{jw6ZC6UTYFkknL_D;O1} zp!Rg8TYxQyxB|WomOz53;gbXI zJ@5oQP|`J1*e(j|IRLh204(vQ5?JD!rKdUrg>3*V{195=Ya*r21a=r2C8F>J+jocO zz(ddA4*$j-NMEvN!9$Z16I1_uf6F2K{PN2$2YxviKP*#y&)LMq$6vn=b8^h7Q_+`5 z?)|WAHo3mOwywUu4hAfw)gMl{Sv@S@<#NI`0=RZ%<1;hef@aO0yL?zalzVtsD3sDh zs5;@KYZ&u|W2B9f(_L}rERH#`84bBD{kpKIPTU2jPHt+dcbFRrz1<19%uQLYau(rd%j_i4fRv`04 zbdskK(LQ`{3m9<)?B*TF%E3;(o(zrGFfukPsAL6*SrVY<;w`y3rsyUTt78SZpcZZ> z2&1_<>j~1Zore*u;Z;D;#%JY(qRaz99{<6-bPO4Q`_Cy3vM3JZ!{DHZ!j7f*J5OP& zDD0!dz}8V%1BK0|u)>kyzLvtaQdl<%Ye`{&ydfoeBZUp1u$L(;qQQt{wj2i5IA`PY zUZk-06jnJ5?0+-opZtkPhd@&@9jEs-43ENk4hJPLc7!cL*EPyGK0cH-@GCOK+% z<|mgDmZW)-rGzbMo@6v(O`0beNZ6BhP3D91z&r7Ib$kJ`}0?qX&Zhh@>s6qA=R z%sjl9mn2@a`sgSMdxXNqQCQtDu!$7box;{q*g6X9*ay24IQSY`(jU;0zJWB{0ckk@ zuC=6u8;Lh=Bqk;$rKF%)^$i6=r*0%9T)TE1a~MQ@nl`kS4#zzyPXj|Ss>S zvxuT4=;*M5kxWEtzQ~2b)^_duyH?zE%}veCO-+p`vZkyt>C8lcdgM` zT3V5UKh{=OO6YVpGCtnK)>dh$oqyM=UQmGF7;n!}Zf-WVuA{yE6#fC@gC=>3*V}ZtDAgUqMiw2?_;;@P1j8IP$M0{sQG(^%62;bqpeO3iA?sTW6 zY6lmE0-fa7RPw8m{EFo0NqX@8pn=NtMO3D5p)%dLV-L6MmQh$4g(ZD!Ajr}%Dqe=$ z{jLlKYeivQhS^IxDeP=IZWSH3jKW$C18Y2U220O;}r*fnsEsQTC zw!io`HMO=5dyuWl)r<^3G#-gHR^j1qRDJrb(>SDSO08^45 zeU%wE+MTy0I=gz6mrv+Cip2LXj&+VBp(!ljRk@^ zA05{vN&62Xs5thMPohNAV`HcPgJb@ob#paEp(4zc&X7!x{>a;NF|+i%j`edR5!K0O z%rl>gAD_E^>WrdLW~XTCQ zQhf@`8g%LDb@er{o>o`YG^D4eqLsoO95{c?~ z-KgosNHl2t4P;Uuh>6kzVrBS5{7HnU>C7R#u(M$DzDYh;(sEFd%nBo|iC>J+`gO<> z;&ERMS%UhnnVB(Zot;THVv!COlaScjn}>jkq)XALT6r-#DeWeVX-O$)tf%&kj?T(z ztAb7ymtKmV?S)ez`T% z*b2M^OoI;$Jz*>bZH;iY>Scm%fu$CeaUC6fJi2NnqsF03X`WMme{1cYFXE#fdv2{xQ7N8r|7(|jDf z67eIY`vnrM-PSfj;s+wsvUsEn;Ay<|$hqJ(Z~>{0{4==VRdB(};DQjyumKeoM#>Sp zx#_o8UKlBNgkbSkpZM{s9Xqyf{qQSzzD6z!?r79ALau>4LiOw=gB5T1UkDB}CtSHx|TA`EHyd6*J6 zMyE?FhKy!gO4Iq)YHe9r(b33FsVL5eU-)KOV{v*Jw!)=)M7AAPPlPn5Ua6JSTVXb?20Vuou>Tw<3CFDF85$Ryx&Wo z`^(2#&wqaZ{r>-avGdq9%qcx5;m@zO?cM+FxBm<_T*7`Etq*=85_0n_fBhhM3<%>8 zfYa0pDc`9RxT7bUQqj@XKO{JKZ1^MLw$>zI zrbB7B@S!zV=c!nc=K{q6^FL4*z{z2 zY=4by0&Im^f4@MCwqE!CUTGTg&`GeUo*))MCK*LUB3W})DPeO z3Gpy;nksLXdm|-s2sZS}<9oM%_0<<2yuW?-?%g9N`WZxP5%mz8oPxwDzF*`1xa@?4 z=;-)((#G+QmW_M3(WE>_Lj&6vr`#`9DtW2GIY8KypxuQ@xRf^V!X@NrIfTfL9Xr0* zyydg)+rRiva|bVF+Uj!}-TXW}cI_jA=1rk05ze1s*bSBbFsi3l4dDfIEg6a9rg zxIVcH^VBqxW z)0aQ~=#1&p=S;G;;w6=h-GYbb`ivhxeR^P^&-{l%?OeRjQOn28PDlinP7FV~2cjMg zN@aifk$%yY4AG@^u(F!!FY1?HXk$s_SBOFiRs(}gnidg+glxxPD#*~YagMXChsUT< z$OH0Svh0qwjlrA;DPz*q)E^h!FWI_?WD`N$$(y-80asN+1RUkGAV@gUEqNh0Z9F90 zcxZO2!ot+l__$ww{%zNnUvB$m=N~w+r8A#%YTIZW!bvpkh4XRO!HikCH=5u6 z`5TwM*t+$T%LNICNkG!qoJQK!)dTw_!-gSaagwz=Au@8++O>~Ac<(GPlE%0U ze`;eR#U9WjwjoFB>Vk43Mo6aoYG8z9ge?a~mIEWcx7WQfgCUv_;NHK`pxEAlG2h!u z_7sH8B|+Fyq!W zap=(5vsYp(tIE(a!qDDP9E$)x+!2w00`!s7D1<_ikJeUI-V&`1L;mhd_6-fK=qHdO zwzQO|oQNw!oqT>-NF<~c7h?z`ZR5onjpnNpH;}}4kU3}%-7medyQQPCQR3k&EJi## zzYYQd~Gjgt7z8rn+YJMkb1Mus>#YKtWF|pVN z6&vkFdpks_X3w59Y04CLsT|Gns&yT<0qBs;uY<}g9hQWlctA;fjnWJ?{XJ=yx!HuLJq1h3b6nL7Lc{QXok{_u9K;1 z#kK1Qc|-owl^Dk1Ff3G~D$Znql`WcL4_>->_%LF8If2KN4UxpUiA%#o$>cIIyk#!|Cvt zB^DzwzVhZ*0b*h=UA}NR8rdB2@dwlv4QTRC)>&I?3*!(8XY8a=c8a>THl3x1&9udl zk<5J@r%cF3m$3$2XK!Z<(ploRlRnV=>CxMWg;~IYuMrCh6sE;!Qj8~*L#pLwW}@O$ zs#+Fv}9;Tq!rYPdKB*;m1_oyLx8K0|i6Y)&n6758A)VwpUWi2??AOb9ki@rbg< zu^_`~!-i*G`R6}hdgjG9B1{RjPR900VP|I6d+uHD0^$m;zPB@lcks(VSU%&pDW#QA2Sc6p9$)W|1Y zh8GgUAqJLB-V#$LqJ~1&QpivOsiAN^eQ<-1nL{BvC}chza~g$xdKlOV6!swsdyv8w zQ&@*#U^6M~Yzmu0VVf!JbHl(^P}q4CwvfWQP*|rvSaZ=$-JcUry4Tk%$q0Cz&x~y# zo^-=9lPBF2v2$CUEh`MGW*kvw#fv;LEyV(#jGAbY#xh%%Fbi6W#UR8?NGTS&-X*z|ZGk1IaR|T4u^Jm8hSW;~fO%V{eqrr5EGj>ZkDYU)w1 z0IkjHYAQHkVRUq%7UYrYf{bTX3IC=|;%Mu7jcK$43~mlW8s6goPx z&cK(d%gUNZlExb8X-X?pnzx0pwy?rDp7>8oIgof+9s!=nnPWZhv>td0N=?6+QkZum zHu*Y=6`r`9N;<$@Kl|6=3(*IDIsfPJlkl8fJ$&TIAAkID^fI%JS#k1&DJKerB`TC`E@!1V}jeSQ^+{H5= zfBf;~3d`{`X3Pzp;bkGxssrmv$|`Fr+6AOwgp;JM2wk?T4T>@2$M}u&542zqt<|P< z^1wds=LjG5c!k2jSs65j9JoE)I=l6T!jh^cg1O4N#B>AzHP+Y4#1bi54LK{cL2eHA zDw}cReZ}af<>c*dX@@~Pe8)dY#E`olEDOq>P5w^ta6BNiR;j96DXQlGHVUqcWOJV%M%K7ZQ_dfje^Upu~>c_ie z8%&@qv8j=_0qxqkgHGTsIEk+e8xG9i5 z_&XSF)7$SbT`f&_!+G-?>!{>Yc(_Y(!s~2pVD63==kn$v8{S-GK!iGu1b4-q^AD6k z#X&}oeUHS|*TciV_2^yl>s+*lE6Iy-fh6$r_4Z;Umdd;5;5Wd*Bq#d^aPTH@@Y{rg zOG^>3(f5OrPo;Os!O)Tg>_?37uGluGq$o2b`^LrO{a;?pZ67^<_1#K4#m-N*ehJ6( z2Vdl8YlNjocckBq{F6Mka{k-}%h5aWGiEmv&-`xj<{ay+OP4eMC;TcMH+jk9FTecq z8=;Jvo$8};4p_6&`>r?^8*kZwpVGVJSfNxZ7JL*azdJVd@Ngl=mj0i>yJS;z1Q!ZE zKx3M~>vzSTbcZDxHi?^*fA2JAxMS$J4#SLFL}6ViY#N32qp;3> zuqI9V_N;~cZA7nkI>|z>Pk)E#_2aCGUhg!?q}LCA0=h^?b)chG(NQII)PTNGZ-?bO z%?Qp>SQ`ra5rviVu%XO*>ON@GiGtLC?XTRMPac?_D4fVc^Ct>nI(cwgu3; zFp@xLsYoPhDJ?6^&B;kmOSy@|M$8}DKt~sG_SIj${PN3vr(=IdNs_bK?Jn-$eRuF$ zPLmB{gzTF;WgZ?LJ=w{>?Z%v)RBlAmb@iS&ZRX5LvYNAS@f=TUZ0pr1TgwX4<748I z^iE;W0-useVh-v{`%T`Yt?$$fWmA1?8Xe6fy63_MMMhoEZTt#*K*mi`eC034$pIB{zl6g@pw~ z5xkiQ7j$x3W>yYtRe5A$1mFXzDOL39>{OOf>4$qIV~BB==JEy(`$=&Z~-`t{de zZwcq?wZCo!tFtC*OH9m`i*VW>2sJ#cjEUinbRmnAEjfSS0BFUj1wo?~8ViY?o3yF9 zr>UZ{rd{KJ=(iK)t}9or1mSY!GoHk0>cNRoj07N8*@^e8V0JRs`Ky?z!Bx)`^4B%! zY&-2YY>4oMjnRMEh7HRCy0Hz}&s-Z0-w-Z@M}CVD>sop=7Q*(TW}&T>gNUhV(1e43 za9Q3zQG`Pfj=&Cim%a}-JV7q@!+lXeNfGq*riLcCd+KXYey+5#vZSPjHG2&gTLkky*+x3tx(WIGFN-xJY%6WOCjJa4;%m?#nojeaMsl2Hr)oRTz1*z%jhfY;gv;u(bY5_hvq%i)o)oBNqUpZz1$IiCGk`)EI3YyA`OLK0- zTur@Msd094-~9y$`*Tu8br;)KR#Ic|o)bpw3R*_`=9?gGvH!EHK-vHB*D)rMDdDf1 zu&95F8bjoI-(#KCxG7WT&l*3v3WQ21>4Nv6ofI*W`>%hKS;#SU99!1{_fK+WJ%fed zI&t>=189x_VY+)!TET9f=mIQk3VVvcIP5YALsUWvk! z4Jd_QS%%_*6%}PDHCR#t9kaNsyebvJKL(Vh)^ns3s>Zfgj}kCaR2C$}*8`c17fKP> zIHC@|`)ChX1XV6hj!sT0m7T4A4HvB^`8eSB%wKazuo+p=J``&LN z44bexR~a^lzxwlLb2EZ}S8{6h`R6rON4`;iwd2cedWnU+KK-ILjD($XO!;DznkCn7 zD0clZVr{-)zLUHb7A!0;M~Lor4(-j}Fub8*7vR*yTRBLDY>AvSfVK(Ns#vzWy8_WO zMTOJQrlB$Xd#tI{LbZD$pNN^HK<$|u(b~{%Wt!mLCu~>yA<3Bfa<-Lnverm-; zm5Nz}Kd!CSSk3iODwWpO?F^K48&7(kB|g-rfT8Dsp%uW;3Sh_=1#+xy3r^iAO~0`F z&%B%pGZ=GXd0#-zQ9HXMS0SFngJDpXot%9!mJUZ-^TFvo7lJR4V23x zZDEmbM+))~^zc=w94gD4=J?xrPG7gs#Z763K6c%`L^60SuC87(72fN1W)OP$&Ye5a z!8?4z{mU1xTm8(viv!Kj&yEcYhRtlviv~kzFt8wJ%+WI#^qdEJk|=>mpyx!w@i$?o ztcNO+j%1OX<0s;}yW`_wjYHUZYbk7ZRgm>vZQWv{pkPNy>lf(f>M`B4N) z8gDe*V|Yriv#l+RtaG?8R-<5aYx-4Av1glh=dN$R5n0+j&oR$yMH6}|Dk>tFEBv*G z{r9^>VyKxf+2X!bSpHFl7X5G-#wiDM@{Y2tX&QNUV^rNp-#(^^iQ3aJE}I zz%~!PO5j>qOVC(>eXFdrw6eCLskWxNw5YzcJHEgc+lXEyFbGgU1ck1oA_Gc@2r>oH zaX8JKENbsZschTHd>8@Z_G~4)V}OBW(?^9GHp##L=9e8Ge)!=ooi2=o;ISrLGkZAP zvh?DGFvCK_Ld(>1yLRn5n=ctZW|R)@vIw^E^Du+b@R}4AA>#AvMLozt;3)ywvPHDk z3e;F&q{d!s(3~}l(l0&#%JXx@x%;hDGoFaV-atE=UuP>S!pNWY(J7VlG#ENu?oUYQ z^+3V>QSzSRoV3K_J8p(d^#MD|+Zzn%yl5~{&DB~#hz1d3Z);~~t&oZp7M6A@nO%2) z&{p4T5cKF-J!wea+uPa0vKq0T(K9HR6fl;MZhSz%grMNy06!luzp?(oll+3kkFRxw zF-!qxogK#V-Toxam$!hgLa9rkZpJ~Dj?;2i|a);v6D*9Q)W=HJJ%Vim- zf84X<*s)_5ZuYUnQH~pjeX|$pt3W8Xv6=EXB-kn!ebGVa+TR|JZ9_bigRHIw7R>g7 z^PKIu(@+~ezI5Rta&)i9rqsi?iM6_%NoG!9N%D#viVPjaXFVD*gs_3^##fgEpU0SF zpnD;^k-r{cXR>Z!mLmd$2rWcl!$~HSIS(!JJhNBw_o*W6^nJYc4S4N6cKx$U5RsqA zbTU1x6C1%_A*|HIV61@D+hGhMu}~u}`ZX$QtHxAh=Z6Bu{vqOt#%1%`42v3Ar%Npz ztnt7WjxL;y=Q=#`>LvHDTefW3nustTk%n4J@d4>=>{d}T6yhn__UMq3xcJcu{Dp%cdf zA^zS9T{JO9FO)tF;YZF^Ac8_6V#N|{tqLLT%uA*DkKDTi6^oWGi<~`Y;gXP<^A|2( zwiM2b@d}~;s>dFCZ2d#){cfDx}~BXUf|Ky zLUxDQSS?{#Ekqv*B)da(b!ll5ye6g9kZN*iZ)I^ z($db(50>u0pt1JSx-RDcUq3%y6_Uv`Ry8FhT|Hgxyz*61q2=tu!zaU`Hd-O+YB6M# zbvaF*%xgjp4jQXVxlpKd&Ag7)SV(sR;vZZB+^hy}rU5t8fEyR0`Iymum<~?XF_^}~ zt2Vs&9+~ba>3Hg?j=|I#s=$m;jS=1W=%X7$t1d1?RWYNkAX!Ez8%#xUAu5R(wM0^i zsE5H+k$y`a=^HyBPfgGT-a>lX30`l;n-6&<7#6y@ASGUAgG$t%M{HE2`U|m9k-j=I zPjb{2uoDUZ9j0HSuZCqRjCBPi!kAvhiXFosdQ990PpnOMjz58pA8i_cwP}1M9lsx5 z;dv)KKY^eE7|Ebhn{J^u-9k=pAtFxIm^3CvGyZ4Oq%)+SqI zvbC9mUQ+;nH>0A^VGy-E^cgGS@orvZ@_3trb}c|%J?0>R_F|YCGPamur&_V7t@qHP zzS?@`o_+>>`s1djcQHM^fj+%u|I?G0)gVxCzER%r=dYNU%1TmtGd>18>rcNNK7F~c zkoj})^OtJhzPdWnoG>mc>EI9V?T9azq8L>aWXvGPfH3HpYC&XVU(dIXEnj@;-Phh- z=VOC9n&_uF%t@=OD05h8gr5T$w)BPAey@>JREs_a4-6V%wDoGV&h|r zQ5T;H#~+*1%vS>!Bfq@eiEtv?+5>GM#-D=ysc2Tqtl&xj6NPz4;^ zJ#_iaGIZH_$*{_0d!x|K3<^hdvQi?c?@Idq_qToj?kk%n^^~0+7RPscoI-7hqI{ca zUKTDBR?W9_keMOE$c|GXA>)-Eb01x^DB_-FPppg>YlcW8I!^V=FcK#;P=;Lxzd}^i zgA?CVqa_w;myFNT!7cntI9^DUksNEt)a6d%i8>Tig28M2C(O_%U|#sQ!4tx(2SIB1 z643Y!U^+IjPn%+th$jS#l!wF+9sJ0G$#h&9f#d=CQHk8T6^_J3@p4Q5Cy`qMZwg#U zfiWvGX+!_GL~iuW8snO(XLJ*AHYkc6!>}oQzg#N`kypa-TjkYYkUoEs_?%@tX+ls=**@{v7e?wtOGNz$7PD;dN;`)GH6QC6+l|fO?8A-%m z34v9QQZIZpaGfqECk^S|A{fb~GIaQ{hwJ<5Hj!r&qZa%P!BfzH7!W_wRIA*Da8PCbwrY&7+hIo@FJ0ed%oY_q->H5Xy zi7BSU3`Mvb^S%ZVPr@5Nz5o6Yl=aEUfyIdam?Lrl_n{do`&3qrGyRw&xIfBcy(g>f zkjaLidm!kr+!Nj|Pa#eUpo1stsNyZ<(83V<3z@>gk}iVJLsgiLu%*;wbYqB(xq%8( zi3nm!Kck4&cj2_$sgLgNL`79{gzx1TUw)Mw%V|}H(7Nn76~)E%9yrM@STJ*pw5{yw z;r;vf?9pmuCocR3Bl@owPH>`Yzaz(6BPqO&5DHtFMkKREzDN{<$T8E(h?3kJ;?aOZ z`a-gTa3N)!(8j8x7A}mYCYgWal4Vc68^NDJNPu%Fk~cN1C$h%hN5DC`wybi!y+Inh|9t}3OKr33+fas?16kXTzgx+(^}2i*zgfD&O?5i77FW`Pp3 zup-=HZ|JtPCk3N`;np@C$~@^RD;pbSGHVq1b*L{duQ#^_mz0Jway1JenocqH_WIiWuAVfkh>ZIF{6+j@mA3 zsD`IF;f%?XXAHsa{F79dRAK%wp0JFNREkHN9Z7wCC7wj4>+aSKF3{Zn`b#W6eJmdC zLyTKfT1r9@i;7|s3sNsIXE>Q=AaR+j98Xec4&e@vIxq^<0j-N)2%mh1d&VEcHk#6q%U$Y{}`md?9BL*IUvaeV7 z;Lu|!bO41;GeKYG%`<}hVW86}v_FL=`wTyJ1%*aEqd{hVs}{=7oYIC3r_dS-ThRw= z!u9R8o32E{my>hAm&9`OHQ`K20pZM^zAod$(n?Mh#Pb}C9h+nuIHlx~dQv!5WM!r# zCh*6KE0?Z;0vemn`snDo7QW?TOI>tyedl{lohoCS?n`x@5_GB~eT%N3Y=87gmZ zvMcBIqAu3nb622HRvJWF@vPA`I8T^5-fx#YGuyTpqeK|vwLcWBMUDluMhD*jj-C57 zX)W+Ge{E-tfOr@GR7da9_XtZUNo$BORWcHmjVO6@qAy(O`mOD*l;SChWy zq!df&iNacDZEua4ArX}_zCTD0S5gFXjlcG@ zuYko)aWbL^4UK;ONtm`Kstcj|mx#C&a3-@+kS8Ik&|q|MBZ^R2PA*l1O7lyrN@3B5 z+FRFP(u3}v9s{)kV}_7XD>UYg(PR%l;BxdROt%JLh%Y zVnC(0$I#My;68VV&e*umwe`(d4DK81ENmcf#jq3?qwHYC#? zC->~L#_%U_ggt8ew>bmJd&0*Y?0UIcmi53kIh39|vhfF>`#_DtICrB)Vd$It;IJbb z*BpIy{7yH8-|1-PnHAmX$m6Qb*JmE6)u&Kw8@2kvVP-sVm$am?#$8g_2fF~gLF^X9 z!aEa^iP$Z?Z?RjL5&toytrUK!*D< z?^@V5vT3CZgifQ-{uJ8M1YJR)5#e*23?|UGYB~Ja5(*tkp*0k?>JHd}d-1@NnT~S2 z@nqI7n@qDF2s=>gFQBkSt$%6XYB9kMeB#^Au~8%|6qGWaW6OqtzDcSh1I{0TsX!_Kl`W*Eg@B^QF;7Rxlow@NO+&d`jt+cw;E*eT;jk`$Ippg9* zhk>V#*>qInsiUC}*2LMj@0QKvq{OnkG@Z@o&EniZj!7OkCh^h~E<(lwQf)kV%L7u) z15!1gUGQ&@?^4<01i zqx6G)88tN-`##vro=gvA{*pa(-|FS7mM)l|ABI4ZR5dy(ahfnhuYM|gX~BY*!VoyJ zbF-x`qpYseAgr!yWh~q|Mu+=IYyaB2_pe&%W~Ef7l-s&`Ro1r~dgYdkE&0c$n>jf* zo28rix%&?`_sfUQT}6EF*^3viXI7?R?#QKOuN@>|ZcpQSi#>P^Uul&G4~C1aEaWLS z6Vi7E2lJ1x>EUrI)wqW@Z4P77nUk{2X|v`>gf3XRI+QsiNI#7QMjinXaxV)%KYbc; zS+5wL-K@5Dx0ZD48s%a}SDB&XAE8r4Fs16;tW?O6$I{j~&f68fFERfJoi-=~IzidF zc_@k2jCo^SbYohOXe;=k@!1l$$4d!rM71*$-EWs@bvubf^XJbn^_Vx2Xf8T(hH0}k z?W3fw$djP_bD;bZP<|OGKPDmb`qk^Cg30BWnBRXtbnw8TKYlxSaQANq4j=vNaH_$PItXDt|CBv#OJ=(Fq+qwvis|;rd_oafU_g-o4?5mHM0dUpey4 z4Z~9Yo8|gM{jb^x_8B$|q3qk(r=E&Hlx;oYjh=~Oe>#CecCxjrB10yRA3wG>lzCsa zY|)bN`IEfeQo@)EOoF1+!p_4#c;WKE^bZhY}yc*l`XZ~cAa z#^>L`JM9w0hptDRqt%AVxK`oaB{`RyeLg3c=}FHJ5eFDoc2 zt*kDuBBv=->ht$1n2R`FBKLVxtt-rp*7}xb{mmfrndmlVR_O6rzaPDN^K@)!oz`Bm&BH;c zt6{sUG7=x;8G&8LPG?TCk0L4gFoMSibP=!>gB0 z4)RhdD|tRJydnN@=a*l8p)m%SiMM?9<(J#v!zb+$1S|aq$^!TA6>305okppNoE5#@t1ln?%O z3m*&)Z)1WVOW~6#d^Cl392Q<_f_I_tEQPP8@Qa3pFEGJRq3|UXUPj?3+yalo(Ez_T zL>_UAGr2$jk%kjVr*}4!?@?)S6S9z*g+*YJhJ!zSf@$;_boA47^k3=dQ*If3E=G^Q zDq4zFGzZc&4ARsiFF!voB?E2Dv$D#va!G_~URG&QQAWn~j1&?hpv*!|h3iGgN$)7S z9vxj-!AV;Su3o-;wV*Xh*o_*Pp@#LUbHD%gQ$uw_{_Oof|NPVZl=7yIKaQVD2sJ#V zM(nVfjhoC#cbzV4X(>Csi(_YHaMIV;cv)L}t$964m>v_G63XnCJ-&9$3V+{V-=YYZ zFHlpNIUvjN4fG$k;+`e{N`6)}2a6b^`zmrr!J64}Xx4Ju|7PNY(e)YqcAxVYNFmt#9RIqCH(8xIeg zs_Ri6vhF5ZPq~umvJ_V}q?a>_%9P$ND^+!!RPW6+V&H@8IjQfcg%3TnXcU0g{=f-N zpZ}DU<$6=_>G{)915SMd^~r7;sbLLFV4sA{!-Ig=12HAMl;Z9Ktr!EL#S}V|LK6!o z5A98%Yx|(Zpe?Zq5bcWSbYjq!18fur$S7fd6)w^0R?=-@JtcTodSc2=snkLodhpx5C$gI*meA&;(rw$f ze7bu#Vlh7WPeNW^!tM_^K}vbo7FmwiZ_*66mTN}kIU;oC^cmC2BAJU!0j@ac&G*dd z!J{C`z?Rf!Bt7Ln5WEyFMu5>y<`dy73l^YG=ckyTAZ-3<4dOxEbxN7Fuv@IlC~4^I z71y=2Ggj`FsJT;&wW|4p(YpFH_BK2AnQ2Cnr${xZZ*?4WeX*(rG)XPOGCjtv&xJ zSftier%lD3=~{gp6d^fFAA%wz3zyVj3BH(-nS)v)Nr{OG`}ZD;y%uw3C>mvOfzd5G z5@FHS)z*TShKjuCp{N!dw)T-H7rEKXMGfdq+t8G8>BLZU6oAKq=gvbSVBpvVILU0I z1yq>#759x|%u!fUembVod(E>E$YLNVe4h(8J+p}vC-@G<8sbMx|E-)_@Zq6y4cpPU z06~DS@MZd5L%-YiJ{&1y1Y6o!G#1wurY2WZx%t^B+`MgvVwDS=+F8_)j&ROYcsB5o zXK-cAF;8%7b!F9#7NUq1V{SY-&iFa9O2}8J54oz#*aF zkoDk@x!{mcaEK>K#T|+#c5>Pw7p~`zKZb=3Mf4mL&fTy&C3M1q9eU$3(65&62=xj1=Foz-nHNEi!8OI652izdK~ zp#8tD>d|ynE9t5x`ry}C#|Q)GPpYMTt9o#IPzi;m)+4sf1U;2PyWRn<@VvEZgo;8h zAY&6%FAPa5s5vHxrr;+C_)rq5fx?fYbMG*X?o7es?-+fc_K-)RgD7;83HlU;P8tT< zlR|q?=n4~bGKD_U2W{?zU&f#CGx-z#14b`pH-Ex!m78?h!Et_rj_gK9_B4&$N=Hub z8+mZ(CJNn2q3<(68>RS}KIp-9rkzAb%HegUugO~-`6V~$OoNYjk&b9fM{F^TD53MM z?Hkbq{lDdrk?vZi(FgL#B0Bdubna5q+=X=RH~Z$kz^H|+yG09Oe}Izzrpu?tlJhPR zn^scFxr^toWmT5ur(Qk%<2Kw`nwXY!@#xjsBX=0MBn1g)Q62WG*1f1#(ovX{@bke? zv4u>Lnv$5lD|mj0H(QmKh)GF1UwHnlSDttr!K9lWo8sd$<*`kiAjnm+C5$=4d?9@0 z*~i}rM=`7$%tcw@-0=JEU;o5o8$y}GlJpZN~G(&!}i=YL<;s0wq#u zcTYn{OFDFnPP_T55HQ}!35r?EE$%fcz0k?FzJu-RY01xWc2HT_%X+1TtdsFX!boj9 z8|yl1u2=)kYLc(-BqiX4o^C`5qU)cmBAKT#*Tl0w*Tm;2JC8xkv^(=Y_g)yhxoG30 z7Mv%VKVB32P5`Eu>szTyBn~SjH&6@W2)r8WE);iw__N?41vT_W#A|iy9mpAiMA`Cp zSWE9*1TjkfwpJlu`weaj&WFZTepFh8xW6o=p z=%`G8Jc}2eB@lao$zp9;Sop^u@$ox%o;&x!2RO6;jZ@2eGH>2QZd-LzQ!VGU4URN_aNehOpYO9+!-QBNTnK-ez8ObW0xRI!#36Ifft5il=+1MaA(;*^4 zp;)$zc^LobhgbzpSYVBEkCYb)ClFqgVg9#tuVmD0B|me*d9yJ=@>)Z>daU173=u>3EcF0ja5H3nh+AEwM128?=b_= zA{oEGjK}616H% zPtV}cesqrUMb9ZTE|cGgi9(y*@6A zBwU1y-Zu=JgztT}3oGIQBwKCYzS6MF5N=qEMD8G5^O65eN=ZCsScg3DxrV>1`?-#N zOrsIGcy#BMY6L(q!f_R!TueEG(lFaTDs74a5BB3ZoZy)`FMeY*tC;cP8e~&evsSqL zkz8HOBr$ss6YvfSfc%AOKK~DG?*SM^)wPe`nVsz=o8FU6NJ1J!AoP+#KtNG!6bmY1 z<+H$R!}ex&gIKVOpx8i)2!hgk4TKsJ0_kPbdvCkRmfv%CHVD25U-|!cU}ux;xifR` zx#ymH%5yq!t9QcX(?mxDocq^E(4bMI&iX(2R)R`@w=gIeoG*Y7^anB&khzazJK4hy zar1v%QBLZwDc*1uSvF}vzT$dMHp)EGR4L@GyqyxnGJ zmxcc|ej~;Wj~$nM&m#*xJ)d7ZZ}iCcaTCW+8Zk^48vEBYVahamP7|Q>km%JIS-0TG zXb-g1)5BsiP|ioh!?oxjs`*bu*GND49I@jceu%{DCFtxYU8%w2)4zt@DG&rCbPq^f z7T|t_laqnrhJf%s-Wd}Uvth%AAHVwQho67`aoY_uLl77O*16`dLK@^c0m9f1B*OxN zLp5*Syoq7)_dNP&g6H+a=$I~|dGjzZ0;ak0uc{c=4^*-+Wzqt}C@c&5hu9Gi%r$?Nw;PXOzKY?Vy6By$s<_x z9X&Qc`!`=4QBm62go&ZFNvN^mpuTFR8B%SJ3}i@AKkOJCa<{Q6jM1@J;TWw8c$LZ? z3RppP1(Ay}w}_VI1mYB;_)3fR@M`2-0Bj1Gzjju{5#k@cdUbSt<(ciM8uaJ+%KG|B zlP?OWc=c4)ll8p86V*a0dwaT7z)1c5_umg2``kw$h222Nfh3Ac!yWJsPFm~)gVHWe z3ikI8Run>$EK~&h!~AFP96e^t2%WMAR_yYpo?7_GC}qW=9Xoayh(9uwjMde(bx2sE zxD3_RRWus10x-AGEDWfM$x;bIp|OxlFX8YJ<~@T8k|k(qOn^VRX|-;i(YzYp1XdKj z#Q+AM{MENKS6Btf?#LA;3@1Xzu%d!o<7OD2MOW9ycao)gG8WcYW~)o0{Yky3w!xS~ z%zBY`gmyG3(>q4V9sNxG-9c!{9TLPHyeT(UT)vu9URX$W^9F<%%47ycHi|*s(MGIxR6haC zwE-B7CezJ+B|I0+g_?`Ic?NfL7w%>n?nVR4Z(D1z6?7jFo*SmbKbWkOV@sf(!PmaZ<7m9P{LO)etV-Y!G<`0;DkwsgyZn5O7%skw|O zq{}reZWCtCoH@a*<@{PC-~4ozmS4{pJ2r#oWD3%YJRWl|kxL29U3`z*T2H(lNX7z# zgPZM38h}yup22I&mPLycHX}?yMw`Mbe#VR$@m`9C?~?^AEbjXTMJ)CWJLw^MI~KKg zMBzn6;i#F~jamnKu~G$7HA$v$^yC9}EIFM5!|^P~v6RgqW0PRo{KF8A*w!o*8wD4k z&f2+?KrTLcF8fkCp!m`+WnV4^ixpo+j*lT&CNp;ul?0$5u(5WUWm>I5L~K3i#%?PS zDYb@hY6|Q$J4C;^v#A=`2GvcSEfr9aDq1>wDS9;9ORD%~TsIn0=sgu^r<-WC8W}Nn zpq+LTk-2CE{ZorE(_+m0F=kqfnU*k83FSq+Bu7sjId$|@n#4?`8tC{ESrgYPYwD6L zCx=ForG#WoA-~!S$oKYK@-ulok^Gd(-n|>_^<=8V)M4&0bu>t%M9uVj8vuhzHweQE5|mU{X@!oQoJvF zYiqCjFgu&o?Be6f&;sshj79^mtaRzD5qI1%;@&s#8Ou<4`7~QGoVjyg%X}YN<21Y4 zR?VC{H_cXKSKIG~Ciy=8GRN*^D;AZPQ{;H%nrJUmO`V^=x~1T0UIKu$*B3wfEOk}v zB}S&vE6E;gpC(QHdHrV}LA6R_E?r83Cib}aqvMw@9sh{#02YcaT^cto86Y#2`SoVX9%orB~7B_WW-yfVs|4~NmV*uvH2 z7oERcr-=dXk*50Mg`)gwS3)MJUBKs-X}P8_|!Y^UYMM``2LZxF`>bp@v*Vw0ove+Vw1+d zszWw784Uteq^!bUBk3+igWB4>{E$H*AtBDa3AIDa1}8iZPM88tm;z3S0s-bA-cbq~ zGY0Qvg~dih9~+Ac%Ze!)dO>k%X;Bd%D!=eVI5U9$7$gH)tpGs;gcvQTZHO8&I0#}W zXz-9-2h-uqNDf0!>Y= z{^px+_S6u+886^9!;jSL`Np-i)zuJGlzRz6=u&Rc>_-31Yma`dGq6J2 z4N>?JGZpVX-afv*zIYe;xKgK=#??opP(yI56@Uk8O!o=X@$$&TqD_o@{l6A0=0<5VwplN z;^iC)#n2z*-16YC@4ox)+wV8+^p6+@w?_PLzx|dY57=?4FDxu}-MUzk3F1pteb}*! z#iG8dOT0Y&3{BbmYWtb=;*qrf+Q zE&qQLpydHR9 zaunu*r?S7bGUx2clP7nt`RJpJ47q&xgAYD9QreRDD>gG*uUsJ|uwpDE)p?lW_Dpvii`he+Ba@I~vNbIRYF>qr$_Qh5MP-#^g499MTIE41B}(*_xA%~^Ko}J*R=phfos2Xv zZiu%xuh9khyUMM=lC>&a{eyIA!W>dz4yiES*R7?5-QI>3?Ky0nQw;3tY4}X5mM{kL zCQPwd0xsgGI)ljDg_Pox-5?t3Le~Z&bvZNC7?xw>>+J-AwHwcu{sj)w>IgR{MF`tjy4s}Hs?Fq9Pen825Zw+E1j+Wh*qB# zT75!CCf~Bv7agreI9pxdXcfT&cyg*V(N^=Et$u`7?-N=bs=F3@Iq>{9-s`Pe{ms#; z-Px)htxgwOjnG}Q)KU(j<3oG(KSz7CLDUt5;Gt40(k(me76`2mj3e#$ z0WA3IIAgSd>*}ZTfX6%#O>w7feup1w08l!P&IBGaft#E}rvi_ua074L(B=)iG0__fu#}W3 z@os&6`}Zf&gi(+j!w*6#=XIP!uRZpMLUY?OnTPRQEl)xmX{iFzdO`zS2u58R1$KB0v|E;l4mEvPe=$8h-8gGxHj<7j~zqjmGw_=K2~`x6f(ri=$iA z4@J{Cx5anE1Mm&dKKA1{2v6^NjM5kI1R!x3*ZGW3or-I}i9veHalFf>ab`W2ztOnr zs-D7BO$9Qf@TZsXwMA*6O(=TR(%g*n*havpba%J6w03tx_3G{I?g2P!PY+$I<>o$` zHr&?-sBq3k@nWIe631^$ede@Koh~FaG$cf)(*^57!=s|YLkC4g!9X7q6BP|xI1oJV z`dcGV_xf!&G~yUHYA+dBtp-+8QFa@7AFB{Fyfmue1>!RUVYHHJcuW|6f`-SqkPG+( z4X@UPxZ%^l`bWKYe@7jp6O=Knq@G~r2wHp*Gn`REetbq}JclR7iPk!HdNJ<7UA+u$ zdIQq!T18hQf~)dSU*c*(Q3>o*(5Op`D{=?6+{M^xmJ}D!O`N2qot9&^>!miU&DwA3 zXyG+Bhc%UA?^^o%`fUbsY??g`yfYt=+n0m&V9ce{kw)E!>+_4i-F z&+_H>U#&_<*xxJ_mQn6l8Gg!5hEp3BxnR| z>|xkf9Iiuz?fgo1qx0Xvg&93Fl>8tX9~a zAD(f~+(nPZ8zSI9tgEN2R$E<(^0L^ERaBKW3~bS9#XoFE=)O_n<|3nlRWF4&MWIlQ z8?S+Q|cf342N%ViOHDQkd zW+5YW@YL!=L{!3vR(%{M`(stM^{nr>apQd1^-xgt@})~re&qb+lBRxMh7mbh#3Uyt8(g*Y zDozw9x`V~_w%(8Botu5voik?RgH&}QH8u5wN(7eH#HNayv1604 zyjY_leNE)XeV2!GZCAmGf(T*y5*Hh(J)iX;fLVIDnA-GHnI%j0iS`$mM5kq+Sdw67 ze^M{5GE)0K4+B3y3aE*neY$%m&G$?eEac-)M5zjm`{}~ zU;e}4tn4#9JPc{l<;#~YJ+tN44F*|mE{q+PK2z^x$mWBz)&rwZ2xjworoKJ}$jlcU z$tih|)hLaV?65X|BYJh3mj!!vYIqdf5^z_u7T5XMbs7AcnjABww6wOipdd37m9k4p zfO%fu0KXboPbvZGwVr{A5oShIYmg{p20xvS8qo3T@4tBQ#EIx=Yk4_foUookofn6M z=sabhffZ325@IwG6t8yfKEg2$wC?06^kO*pKOBAIo{bznV|mBb?4rDb=g#JztH>dn z`7}g=Fjl~a;UpHPQN?tDq@zPVFX7WG(_ptqw|m$u_L=rp$TSmt>52B)py}k9++12I zl9@FtEeX)pW4MJ2yNh#XK+LRw!)_OIjJe2c0*FUAd6!`!V!j8J3(t%h!xL|w`R=8HHFkA3N5sOHL?WJkx4%El-Do^>renYHYQd$PjJExk^Rx2E7JR9f z2N)PMOvjfTD-P8N>H2VE6xtcY^OPxjKc=jq!~`c})QrBbgrpk^Umgd9#UP|zGsjl%#~2G};3 z0CUMs{zKSd((|$g{|vi*$){t>)~`prq#ht^Jc5pxgoHF^>sG3j>zRqbEj)zE zF-MrQLma;Whwa2y@4L_O)jfOCaDj#f1*3LRGji0Q=WqdrNrUMvBBH}Jur<)t91(%u z(=%EvPb548FT3!35xCoE-0cuNUj*(p0(a|Hke^>b?-*s2axRy_rc8G5qW*r4(=!$e zPpl56kSkZOMj4%r8*@>#eUAMh$?DN(H*O?rc~M^8gAeK%BuHU@!aVt8ntb~F`O~*< zednF9qsf>hr^sb)|LAw#!IGT8i=KXZ<3>I6@WVW|O3bQNX>MTuTKY6KC}Go54r4B~ zeduHsiym0wJgVMN7I5=MMN3-4*b7B#f?U1*{2(m;<6DmLn1CmofG3=ZC#1PR6YzwA zd3pJH7>V3G#Ifh)8*A(HjmEsZ;>!G7R9xM~yFzcL!UdKt;7PXBG)p9UIv*H`BrR~& zs%<+qeY<+~_EN^K)J4W|+v4LlZ6bGle-7`bdDrKl_AwfDaQvi6r!z{sMm_hOo@s66 zndhEMgT*IpyB$kupMQ;Mop zjqUlne*Re(Qg>*jBL@e}FcI20QajQD>+9?4>tUCzudb}DuQL|p)zR{-RV8%|_4UX@ zcZHfwJ-l$-T3FW2&%LitPbUb|*OwLugSW3Q?4G`!?(V)mTDcVUe|&vC)INSF%o=w~ zT1T%&qE}OSGp;VGu7}>gAypFH>QVN(-`b_CNe^TG~?T z7XLsz*S9zWOq@DJ@aJIex-w%84sboPiJhbATjoL4kVgFPK;(NDiV(CmHKIqTL`4pW zivf2eLsZN%4=A<%fanZ_Q{HRb_;GQA0)oi|hZGI57i1a4Wo1%@l)I%YFw)6iOy9id z+t2i^$|(zOz9TJ%=zx@Yms(*`*;T}yp32Bsdh#SAaiVf$6ndBlo+pE%|#tv8o?)qUT-o32dGb@iybjyY(O86G7$DpnU-dmYC% z@-q~CiZ!3cJ^tXo{`C+Y@{f3S)?o26zs}BXlf`1{uv<8`rxQg(yF0+Geel*J6HN?t zMJBcIGD(N=3UXbmBs|Np45eyrt88ooRZBDHYm&HorOx(8|A`-!%# zO>6Mfv|WKy;YyoE6Eh7^Rnuai9XFzs^&9jMZ*0_PhP|FlHqb-*`mhZf-gpC6E>~f8X3OKDcL}F?09L9j-*@DTmM9S1LvoXmKgS06rJ8oQf`9EQ|v02`Oen z^i`-UX)VdatE82Dw{`+(+TB=8HvR{4P5EKk*Fm=P2M?v|p;khY@(TI2?~3F> z+qOrpPKeX|x+#?`;mLBI{on&4${qUV;Y1YOS@+vYT3h=~@;(%%hhf$E3G?E8%#Ir_ z(bV14)7aC^kQO~kV_?PM6}lJ33>q6cc1+6T$*?c;B!p+ij>QCll!$T4;bFM@81Qlo zc-gC@l=^=lISb($MuDlaii-SvvX7?@PX_yPT0R9%9TB_StG&+y{knSf>a~Tv?*9JX z=1G&bZG+pFn=oN)xQdr6TaO<Nb<6sJHOlu)wXdTSfOg*o-Hdw8OGAOI&y{%F7==>w2+60 zAy(URub3cINP@N!5gMw~y0}>28}{)5I^2z4U3Ss-w=uub)|XGV(ZN;2LyFT1Au6rT zfysg`|8VDs4`gx3bvMNPG9~*M4PgdXFf76POuZ(d2Bz{1(?J;bNW2T&HqhfEukUdh z2$r7@J3&clG1V-fGZ~Gv$FF-*R(R6(x=e&{AxbAv)_5r+<3hvw_3`nv$J5g@3cGlD zw`)Q|LY&`sJJXko0|!lf;DHBd!4DkLg2Ik5FTecs2!{$Nf_EOhK62z8K?+`8x$Vav ze>~E_jvl$(i^hG`)nQ*zSy4q51gKEB7xemdqYrmG9eu0Z6RRJ754#xjfmrW|2q5dD z*Khdh+rwFBuaCoA8HY7#3Z8u|xQ*h$xw&F~U=BE6pk!HBR5+%+d z+y-1wkgv3pS6GA+4Y6t!VAUronyuLo2P9}rKO9i2+7xzt5 z9Xon^mJBv$^GtRbPs*J8FzTy9r(yOAo(3?T6tU-cF`UGhTY9x5XJ;yF#$K9>U+=Zt zOrwu~NW~Z|*ORSNh~A!0*4n!P{QE{KjakBfy~Nm1(OB7hk=nVXdWkkd7A_4OlFEjV z5Fyg;<}#A*^)OM+kPmU95%0p!&9G(Q*MVfTbH-3zWGwEO#<){mWGu!#AS)|BKd01K z3h=vYr!mm zN%MVth7Ju6uD~mChMs)LJp;SYVQ7Qq7>q^#(@Jy0(EmO~HFY)U@9J8FWI#!- z#Dmn9mlhFVfZVj}eNL)6F6`N}=X^U#zO7%cibSNxm|)dOlEq7J+T}`Lqjb`vk3Kq4 zVs!3A6~=?_zyJPE8Ae|S(|_(~pX$Q=LI|Lu0))FXG*pyeDJm$dsD_$z^Dg%Bjqs)> zLC>+X=_23-V_TmMwWnb0mEN>#9c$lU^vXQ+?29@P8H`?eP{+8t5m&Goot|_9=a5Pw zH{^kq6y+5b7hOG{lV4U=oSOr?9_=HnLb3tgBt(aONzka2sVNH=o;!IWJ+szspEgtm zUXc!&_R1?ppoi_gym-lyARiU?5B*5wc>a?7tekzRunHj& zgM98jKcM(@?~&>N*be=xj(qgd6HgrZb}~%B^mv!saHN+e_e>#7Hi5p&Wmem>=K#y9|lEOAID)- zDE7vE;8lvBo&a9eK@D)c{+VZ&-<)aPQ(Q(2R-E35+FS2yDj|^Ibm_}wzkqz8c_3}0>#1g52Jq7BAHStv6-OT zOEjdT57BNi2EjhveFRo`GXTJ(7S3wr45`s=?VO+Pi_gE_luO)`o_zAjBzJOk(^sE= zq4Q%~TZy?Eyj^WRdp0f(dp2I`9UTn5$=l$JZ{pTmC6Nn~p-$nDJ`#-(FXPoZz1(bq zZ5cxeBiHbP5ZA?h)LzH?G*{;dCm{q|A4o;pk zXU-H907`dlUrDje>FSv$c{h%{B!5gBS+vo z{DP0-IM!4v)`gTwlsazsC2Nd?SBxzE+}u~#yQT) zty|~c9}m$KfB*a!sQ=or*B~FrUzk6sZSUT_ZIk9t#jf=dBc%u2R;j7v1*A0|Idb>1 z|5-qL6zwpo8UHte+G%8>=)V!!9)&FomM^XLzY*Q;FfiOM#QiUcnuzhH)$*uBrCN8O z+}*vfz6ueJm34(KF3t8HJWfk{soaKIiZmX_4VEq%UrHqjKC-M#BTs|X+2H+UXlPd6 zp&x$u;ZXkNs;VLtGE!n=RfS(tWG0;sDFvxC)WQv++)qsim6gJS4*h#{RA`_&4W0-C za@W{D{+K%zG1-5@Yed`UM$}m>FGf&e-l@#0s;a!4nnqh!Qx5`ncsD52G|-f4 z$oOcpnL2vYs4f}EHZ_?$MJUP0b(!1R;#?FcO7G%5itX#Ycp0h#?*LE^r^DB;?jN~Le1vq}9`|exPo^~kCs2JtQ z$a2zVl5#XbX$K_uo7YQl3lFY*_CPNcVuE`|SWmL+M6zGuPgIar)SiY6LtqjwWoAvk;k4^V$EB*uN zCVv#S`AvWHiN~|9HuK_R#|96k5*$}2CbM2zchv}{kAB1{ySW9C8$f`4IGXhY>-3r(^qpSooau zV~6(d+xzFvUAqsRETZX7wM8cn?b*HakG=a296o;GLQak#zmb`Pbq$gI5(KKs?KiA` zsGQl|Yh|4ByUF6%0-GR*n_F9v^8zjcPK#2hRhgU0QKY`P`tq484Gm}u@|){H!*YFa ze{WBJe{UO&w_wzykApaO*FvYN$g96zet!!2y$16+9pnEg=JRsQ=Y-bQ_Ac1u5S7u{ z(bm!gf!om7+}_m=HN#j@-zwx9RWx@YAQ#I{XFI+F?xwG%*0$EBdJZLa;dV^G<3Y2v zAo9Rb4XBUCSNGFr7CEw$u+Fo@(u*c~di(lmyf3U0fV}Dj(l1_J78c(%LA)ymYDR@u%wTh0zk6taTF*h@={3U!2Hk*r$cI!q!<*jo z{c=}%f8dU=L0_=!nC?q~jc1A#@Dkeovo*oa4armR}%)+vgK?)p4F)6`03kBm1h^{_H zt|Hi^i)>-285?0{ZraeO*ulQ8fpM@>j2Pq=5)q)%bKPA?oq|}SX;N>pr^#@td(Z@` zKgieWg+tt`w!pOen^EPP^vYBu@_dSfV0!#W*0Z7>Q(8iH_N5EwE?hvUz{Sk0%U7U6 z#f_g%8YatpGatSPL*v%3+$*?Xcz{*M$M`JDw3E|=H>L9f?82DwO zVG)rS?C7Y$gLE=@jo2XGP>F(c!xB!-KLjDWP{ z-P%z)==z%xbF7}TTcI5B!EJ3FeWahm0tt0V%=ALu_rkaA0w)xtwts-r!n6c)<5SFy z-@va+FgISt+!)REw_!T8bab|Lp^lTXwA9_rP2Am9TZyS&-Ofqe0(E}K){t`j;On;H zQ{CN1tNQy%BOmF=%IoR2;5nO4|4E&B`>&YMAN?IfEykDoSQQf7#dZs?I14B=D^2hy z-)Ycn-hLdB1gH0HUb|`~^1x}_=X|j>G_?QN+NlV!y3an@aXe`Mw|Mgfn{SXuqW$08 z`#=1#t0x)ZSvV9Qtp0MneUPhQRQyVAbldN z+7lJ^orr}<0eg- zgq>HO#V}g!6@p}_LBT;Gfm(3ph!L>js{%rzhf*Cwr&ij#+Iz*ERH5z%Z!zkSfPlfU z4#9*H$#K1%M4?39mBnM+z0_lQXRH(>BnFsGKSoSq>uPWB?q@h&VQ%TcFk!IlE{4%f zop#t|IHV|eY4EF#4oD|6OI+aBba&;t&FwYDY7th+HZksCixF#F@?ok3J3_eFZ!^ z7d-k9cr?1dmsT-Us}T!Y(@kOa@G{p{6=a^zE(0iSZEYLM0W(%?e99?X4m@Lw^BKD= z*FPgVqr=e|hUkFsC|JcGfBfj0H7BeA$%~+tJvY&#?dbZSHcuL(Q_DOw7CM#09thXv z<=U8WCr^$aKaOl2*nn-RC%&_t%qE>b6dsl8;%2?L55XidE=_8;R6+{v+)2MdQ;uJX z=`Z{w86XKb#2;ogtCC+zLR{ESF6U34yjhv zAF~M@@<-orGR-rR&ra3WHg;O{aJ*87PFnQt{h8>dUw%Pw=;edEf8UHSw=H|mUdYKW z%sYGf^qI3~kL}*Ref!S6`w*@No*0<4SDlkq#a%yXF}qQI4YS)2&2;p^vE}LM?(6F! zk@j_BE$ijnT)>W@94l3O`+G|1?1qIF;m!e-2e%?k)qJIex~lqHEGiZw-wAjLz+ba^ zv5Pxv>h4Puv~Mv+`#&Q*I~gsf>`lU_E845vh}e$$$EHWFGCH_!lIVNgfi)U@W8UKzk(I)0?(|e)=#$I z<2YU-X@(?nlAJWSmXuV~Akd<=wyvqI!&cRWqNq&2RnLLVO??Jt+&F`472F5drNVC^f+n1do_bCvL|rGNr;-Vy z3#N8rhv|-Goc=$-n8(1)fcOPAxU8m<=KA4PR#90~TUA?C21_GSn<~nRVdmQ(%!ozU zWHYoOgrc%YZ^y~b^!3whcY8k}F6yE84O9B-pv{C}VZUopB1sK?^MyMn%?4J3H`3Bf zR)zn4z}1}LuIN9B^tdy2n8Aj|-Rsf}TRl`LDeNikmb^UI>2qc=%@r?R#Y^R_QuXB4Gpo;$SRr`?xP$r7); z;*)>uIB_vEy(r_vpKHE5fQiPd%}rHB+0cf%Bc~@Ml=GhDZb5y(@V#1G-P8@8^78LH z%TJsr7jQrR#e91?Elwn&-gkGdzpA*d&w3{J6M1BaytBEdcssPS-^)-;oeheRf0lc$ zr?co>5h9mRJl4+I5e4lT=Goe+XU4sqM1CfpE8u^h6(FkGjy)62H%YO-Ko03SrSig_ z+%%1QAN91Bb}7Bx?2UC{NtlkGNDqS>+d}VqF~tD|Ln9%+m%jW!R3Ad)Pf=XZ_0q7s zQhQ)HS&WPJnOV6M?FEgVGR44Z>(?U^h#uV6h0tf$jbDc3~YeP0mb0+lkELQ3q2tT3H~ z-a4BG^dUQ4yYGh+?d|O*yWJ5OaQAzU8zwt6)zf4n^MK3{CwHX`;1LiqbmnufEr~iF zJEpYSq87vZjxb!8zUnShOIgvF*ux=jzj1$r1U?vEtx(&#x?yyvOJ8j;Oyb?_M}Iv} zrLhH=7?_aJ_e`+!#7?0%maz+myE>)p&G*wS7GMoU*SDQ4sKHV?gU)yHnQ>OtYE0oN#R-peHdtzDq~S-9h~0_{Dwp*lP>*(H1Bl(H1Bp(H1Bt(H1BxIa|0n4U>Qubat5f-ka}pJ>x4gPjS~0@ahX_}<3s>*I z-PH%Umr~QKQ*wHBN>8s&8*pBI5qNJ2WY$;EAYRn5*DS6f|P*=B{Z#c@nsMiu$!qu)yU zqbE+Bh@h%+d2rI}ufIM)yXuP-9~q?hO_3;U+P{)v8^oc0@2R`)n)f0clCQk@`XkRy z#_vAF_Wf(OSV<;!OJ%a;jYK&Hg-Z5mi?<8bU+8?HhcMxS4ZST|my!YG9e z8+QJDUR@tWuL_0ZNg`30T6;LTCMI!Qpr4YM%@R*C%Y1 zZgh!^3^ARh8!tu$KHDDtv6r)*OJ-yEySm%6C-=}!%cReH}c^WWMS@d9IM$! z$Sgdo#ML(#;Sde=#{A;C$~Nq=7dwu(H7kGLb2_iK%^Gk=thcL#B!P4l_hy1`$FIpW zZr}bUx2eN7@l8PW8pc#smeWZ?(}>}BE-mfza+As2Tr3FSyL$3`om6YXr`qbOiZbkY z9nc*xgx1x--oR_RkSA1p>C(uNu|XP{t5P3|U65U5(|W5dy&YGNovy6uCQ@l%W4#jw zuD-EPY8d10fgl8}D_j%yUKT;PgTfeJUnY1kW|%H4#AojFQ|Mgt^l)>5Jq0^GFPbIb zu43p99M*sTC7F>K=+|ln=5X1MPV06?YcC}a;cN&UIEXI-g+@Do|2nk>N%h6(KOu{Ib+T1!SGqb?hgMel~Em zj`Mn*xlI-1@8;$Ie~n15k*HmUKeBl7;_0Eimo8m$ zGYpPUmP3?RSnaisJvQ>r=iYnoz0rQMj_Trnzw_d>T9|mTkW{)yCBH($1@x;#by&W zcvx~phS8A`e&8#g=sO<)a8mFn#IW8yCfZ%*GV_6Z#?OnAY2bMYjf@V7Nw{;6XXLO+ za3HwDtV`zM2^HSHUT$uP{>E*#w27u=BAjua;C z0P@n!^|?ZD2voO`7&e?%3trg4csTszjVlWJ-0R>-s#VY^idUUd@H&oMu_7ZA40-5} zpTGM2^X2bx&sX3}x$yv);M?@Jwua8X*itKk|D-K;!F7xo z=Orq0GG<%b$a}$;<3sJ4d-lBi(!60#zN}FBMNfb7<(K#DIb(^t_b;w&Ya>%|+3UGd z<6z2UNn2YzO!xy#sTGNE*|I;bW6Lp8YYW&i>(JR|_?!q03KjLs0|Udt-9=aT?aqWf zvdzZX#9m=xfdR4}tGT1A$0`aBM?TvqnM~{DVXmqW_|nZMkQyN5u-^7x&z5q(AOKg| zuVc(HV}>H)%E6eAKdz1jUrxItC=4h}PfoeR$(MuT#)d>pe&UJv_@R-};~bp%_~Yai zp7*+*GaDR?DOYK=QXVK;UGx2fyHj}_7`UCt}bKf4n^$-DPwXa9m2*Vh9-{Q2jfe^37^JA3nX zu0wL@4!WXe2M9qm0}N(jZ; zs$#HW=D9T+4nbg^&nZ8D`bwq2qpXSz)P4W+@8@cjBSwty>u-}t4j@tSosds9Y}jzP z;DAKZCJPG-B_-!~ZH@3aP4)f#3`fu=fJb*LoWoOr8rv_}xgx|Gj>6w!yim-7nBn#ll-1B!%rP4Braf=!`^+~ioEz+wK5)*RMGt{3$ z+?#?%hxC>1!d>mSQqW}8OTuOKa5U6_zyKQLo}j~4M3R`93M*yG`*0$V~dR#(=7 z8FMq+JA0^Qy}N;PQ$tz~(me{ia!75>#x85Y12t$Ou|Eb<=D3MU*fOHsP{L={wVes4(ZnfjQ4Z-4oB(VY!#ZRy`)R1X;C0;~D@$=kXu5P;WMZ9!~XTR$iI zi`lq1kEYyQy0!T%_>HN9_x3x)<9So=WCw}{*43`(-@&g<<#>#e-VTtWm0E!L( zYVBoZ-s7jmV-fI@$CZ^aQjOMCAyr#wCTt68!$>>=0|Q3gd$%A6`}?7Axq#&YecS|& ztEe)2cxkWaI6Gieg8byy@!Xg(iHMqKQ$C?h{Wu@!(`{SJ&4b zg=ic(bz1Vo30iIJjC)5X-hKc5_b+&E8U&#}3Vt#bjfrRb8~%yqG`9zbNj$^t0C3C1_qyB3~Y1%2a->EP4odVMEv2JH9xKTWQP4Ac={iJwfugd$S2zGw2y<=|KWd> zjk?>)#zgxayz97e(Sf1T@4o-`P(?*WL1Dy@asNAkIhkBRmCdchz`ng`5zVo0BKMS9;{*Ub`i z-5x>LjS+O+CPCL_uIairr~Tt*v|$2m$Zcq=1llO!x=!qQ1lCa<+w-Qh6g@#ehPFw#Dr)%AD>)JC3xw;2--h;#Ks#EXy(;vw z^J#sq(T;|!dqM}yQk{s5#?#K#F-R+hiA+K*W!GLb#0IIQoXD?~=4n<|Rc7z|efxp^ z=~pW%kvs)xf|7h-)UkR`RxEC*?h^MPmsTo8KCR8x2OLu}1c^JNbnbD(BA`2 zmcUZN697^zS1lQoz3V6VM8EiA(QcO{kB z^q#iM#M!*%3x36|+vSWIRTcMsnPR)ys(r zM|^0MeL?>56Gsm1If&qBfF4}Ad^InZY~T@Yp*V3OJNp!r)H7$#@sf*I5k;1pXYg6K zE;4fSue&!MIdUxh?Af!(+V<`$-n$O?DrA;H79317jBV;jUNZU~n1P?Z-;e~e>_%XL z%z&MD4FH+epnumRDsG#A6e~u3J@L?)GzV^DDtUtf}CM5)Bb(vjp1e+t8l78Ldp972k%|CeS7e*R=@OH43!m+tBt3v?B%D zVu7|wpw-@n_NLc$J})o|ZrN87;ku9AaNWbgb>+9AtrTeO0_{fOp3e)k<8MQIUZCX! z+A4uoD$owP4Q+)$J4>L=yP=;g*Jy9ls#}()BeI)MH$5DLHdUl zD2v>z1~rfDCVQ#`SShfmo1K|G-OB%+ae;IY+Nf1s%OQm2^0(sHnT;)tz#K5w1Q$xH=D4Jcy*h2cdG%PVY4<`)c>!8VEVPsvI>yt3Dkvm#o>a$wmR2z{ z8j;?OS5;LZO)tx>I*>}2Du5iasEZ*2*oReQwqYF(uWhHLP5k8TwWX)vy4iKAv=-r^ z6DGLU9)`c>%fnl?1O@_^VUSGLZ(?n7F}9{+xs5gT!#|Ths3daHqD7GQs7(N;mX!d6(dS5&55-Se=YKdRi~7dg4iccwa>4M~*yi5FQd)oOl%%*U*%(hT_vm z!nUzMC4{$)J|KNiq-B$EM6^3zh`6UXFEW`#Y(a_b9*bBFkBSFiOhKXGQM1g3EOG$K zAQjFPiC9)qFSLwakyS3O@$;(@)=TPhjDmJS<3tCrw_H1&&>h9qS?5o$+g604<}NZ> zOV5cDMb&7cyS>*YGq}1$!9WBzv$5BBWbaQq+sJxeBxyUemnR<~XjIC7{mmDj_7RzZ zdt3=sWICZ<-i^qjq?M`-i28=o#lA!SO@SW#?%|m`m!u$goE|F>!|S4E4X$erV(j5; zI=toR<=mF?oq)y@^=adjygXuzzf7XkNB}JDYNgI3s0(g#Prp!Ae>(}MkQZ)1qzLzi z^G_yk?*#s(!xC`wIai41!}RjR1K?DPTf%7+E{0P$TDz5YU=e5&1lnH&T9H5tgu7eP zN(I^`fwo_uH4C(|8)&H&_xflllt`wudVzA3KzT->Y`!VwUpt@6f2Y(7bm|-E{vMx| z%1VU09U)v(EL<~Rpk;1DYZquE1lnqWwpgI`xee`2uj_nP`E6)5!gcSt;kvtp>$={C zwo#y^Yz;oxF5L4;fp+q3XmbSGZh^L0ptTFMF}I;@5NHv@Nv~_Xp`TH&^OhWSNuX5- zw8a8#yKr6If6(5DRWwMTo#dcRB&n$a_3r|8(|=Ik$d5{aI#IZCkwDum(B5tiIk}@l zpq(JlI_E*#ZLWJjxUN{B^%iKS{{6ZOAmtvw9HQBd4`97{fLfjFu58(N#iTt@*Isnt z*skAx*}wJk6#F7%{rzTt$G*g6+3K~w?>l-SKey=gxii^CaId|?rv`2J?XK#ui>%%7 zAvKuQH8Ot)XpsWq$4lCI_BXmZJdI;1^ZVJf$f3jB+(>mnxmCq>mmb;l)1|D6ei^B{ zQq;tRj!k5c-H}bp@6y5yVceX#i(Y;D>Goc%Nmgz6uN%Jl-xKbG4ek30;uK^q4ePlp<}t5T$Ft<0~rSj;kow~9#Z{*j{wxoX`|Z>+mnY&Ex5Vu?=;Ql%-nWjbr{xkO`k3p_av^ewIWRwPd)BZ--`zJZry;-yx7G}3+9zhoJnafwM z)MWqi%P-Y~620LVBVrg!E*G~O+)ce#kDMy*vC7>>J@CNT(CE7!U9gDveCCBElfy`M zcJ}1SlVw$h1}r8c*REZ=Y10pzcN{u&==en=&=g4V0kpbiXI0gYKgPu=$3Oe*v#~Bi zhV+htt94k%Su-3Rh~Y_P8~Q~COKCdB=#DKzDb<@(djv5;AB|KmaNf1_uZD2fz-iqxd4a(6F#D zgBqW3^;>jWdH%9~pp5n}z!&E0c z^VX|0VB-V!m71C)SY);_y}b@(5zKTzU{IB+y**)33g$IEmXS1xi;5*c%cB)+@ZXSV ze^vSsP{5GlM_pYmAFu!A;O>tpfLL?$(WB&7biX!S+1eU0%ugy-s3i=_7q$|H2Q<2C zVHMm4P&w<$s%6UV#*0l&2%EE4R>IKdVYA5;wY5V65ZB|PZSS&?+FHQzl5RoYq)6v8 zF*~0_Z_Y$--i_W2hn82In{_@N&aP7zGTTkX=W=ti&SW4d?9}P(tAO%6mwg2gda0pR zg%_bpUdXR#rL(Onud@?*osFgC^+wEx+>-LDc8eh#TB(nJ5`zo81jI*427vU2b04e&s&GkQ!;L0Xgm6T!17Bn7hy$b@>8 zy||MBOel#< z2w)n+J>`rEecfv(K0E?dB;YHln(d}#9wSG9<&as+VL`$q?Bhf>PGvxiU9B#{KOh8= zMB#pl)^^uW#4Scgqr_)yENaMNRs`u#c`Ma5Pw9(Rw7%Xxh;ZzXsoHVRDh<=xgsIun z4-{^v4f5uCbDaa$kQJdbp{e-cv2kj`UE(KK znU2CKv8brgN$DB&F~&mhQyslFqxJ!h;-`pf^s~Ti_k*6IY|a9=%>uVYBG&HLUoYkq zq05VMF6Ne%WS=^H_EL8C#lj-YvZ8{En>PFcVM^9gsh<{PZLcZF#4hmYmepU-kXx(^ z)vYzfbyU#z0c|T1;wV7b%s2uFnPzim{?4Dnhm063Z>~K6;e4RBSzBD@fppiS;dkqxyrcU0vKb{TL3qDIwc9Dk+#`Q-Si%hBubJ^+4Mp- z2?-=26o(!0m!K z);zU3_>Pq~Ah=7Uju3bN4B-PvHb|y52ATYQy*OWWttTi1j$YM>lq7K{C!=>0M(+`f z-f$Jlg!B0o;=uj%Q^=JOJ9-c(wu48{pFg^LH%Q$MokC}GG48giODFIjaCV>x9!|nERNVO^=&;72yd9W+uABmAH{xpyaeRa z9LqAEzCm-?Bab}&^zE^pybXx=f0O5lz2oVpAANM`q|hLp+^RA8NG4Xb&-BC-Pn&Mb zgwI?Yt}mF|Ox2$fx?3EYPpeI)#EfO2ky|#MdWb{9I^{)rAjV7>F^t)!2CrcSVd})e z-eK@IY)T{lr}*KA&D)Qht3$eJLJziU;5M6jq|#mDu-WT+7{XPiV0(I4lq!X5z)N*= za1$V>PNOu36PX=w%K{95EF1JPHyi>WakaH0SxR9^h>9a9rO=3w@#7DLATp0fSk!2! z2`MRH>IyZ*B71qHiAv$H+4c3&8b5P5fEI1d-L!uo$wk!C-7RfxWo6ykU_WP{XUOJG z!YC5H#)TNgahQYSFb9prHf-r`cTW$NoyOW`N8^cO7oiKNwA$9P)2GRhRkb3^v)9FF zEs7&HqNw(-TQ^itoB4LoEIMUWYNOwUEioY3(k8(E^{ zy%u9XA+&G`8er7Kv*auH!7#oXfnxIxl-cpvlwSZ0j4080>`7# zgp8dyA!F)-dC&~N(x{7SLn2&T+pLl3Fs*igW`=r6ieCCptky=(%E6n4AJ38><>W^M zC(e#*ugR?FKU^w$@T$=ci?K)a;2F_YT$6{dMr=4K#)QN~&7`GSY{!#!X0a7`e*Wi5 z-X~shiP-Xc@rr(8ThAY~Z4uiV#I`fUwim^=^?%TI$mf4`+aaI7><`-V;&tbX*Hwzw ztroBA`h&KEV%r&F+a|GXt=Lxo2W_u;-J$Obmj3_Q&vNm)_g!^eFffn-_WOgjbzsShKeo2#XG)1yyM@ldN=IP9b69@&%E>g&5R+XV2y$FrQn}#>=eXK95G%ou6Odj^&#~ zqMcrMSA7wX$Ed1pjX}=BlG&r8g0MFP$EAc52YXmlLIP8>9eBKhuJDw(&!nSd6b|K6 z(?bdBFnO2^|{e*Qx_7HrBr;HS2zRXRn0UahxSzb^95o{Pvoaw3Go!pu`ue==9p z$@#gcXvA8Rky%oQS~pmWGQ*1`U;FbS;OI$Lc=A3?V_H8>hbB~wD|?bU)ksc+|#OPp#m{@2C8K- zFq^mXv-#=5F$bu%hxryfpI|>fcI?>aX+UZ|tZZ@o0zG%1qopMc|Mf6%n@eyhrHXaB zNL0h^WK~Me0M%+^8Jl=FbmM35PGrkAy)9hI^A*#F(wX5H!kXyoK3Ll(h9-Pvs@wC(vEv&x9qcQkXEiIyUFcYJnhS6Vw(I+^d zsTlo;s(J^c7_$lGxeJO2u^Y^0D1cQ}n6f`}T%fDwR8G#t29m4c9BjCllXI#D^*TyQ zta@!kf*9@jrAfPPYb885nM3pCasEZwI;|F7jE|rqvjdm5a>}j0kh%i@{PCBq9J}?> z0L!kGy6P{(mQEKk``1ezAL&X%p`wk4)nQ;rW+km;ca z9nVRoN6Oxa2n+B>kH^HsjEo5z8yf?wcDt3{=^&ND@txf5sa1HUR(+;@+ttGt7`0a zTX9KAaY-r4xK)&w@68H!)>Y@9%sE{MKWS@8>tN$~SS}~d*Eu=qU~65wlL}*;GE6g5 zY|@7B)+3hrt8Y9=NHLc3sgKPa@;!V4we_6<#vjhGpfS_ON9e*vX-%Vv#x7@BW0*`y zqujf8M5wG1CpcDm^l0fZ{8A}11}QoJ0k>2Z9)uB$TX@3_H;iH}3cV8eTyIw zS-GUOljc}QMXxqv2r*Bm0w^R&9mdaRXuAsSa3nQ1N)%EvwK#(+q9@anL9KS9b;9`q z!nRV3G=y>_a5h(7JhprH?!$)<|NPy@VCa7LqmR~q`57Q*AAj}z;nPQc*}wD0-7pNE z#)W-e%GK-Sl+M2XK`%@qp8Ssar~rc03l-k&@`3%|ENw1F{i`OcUabRcS%+G0q2ZAb zlAr*Uc?3!fMg$v(6sdIJD<){J&=F%t2dN_hK`k~KR~hK>qRh0}Ad#^+wSw-Ai0Gy9 zi;`8Mu(}k+uD53ZQwepfGWkr}Tzn1B3aEF7(1~8LI?LZNa?tKJpaWND(Z#|S!ru(8 z_@BO&-&*&GttW`BCyTAu54UdngVy=iYF#kgTJs03tqlpmmGb`f0Is z^}qEwd!_a7Q6r@GC8=GU#lVNUm*6v!iM-&ooe0dDEQww0JG`SIB6AJg%kR!Xgyvf3 z5aouPg9y(5J_i{zxPZ0Hou0}q-2}{)1)0Iz=~!cmhj(JmH*Nc zWI-f%KvqO@2V_YkcOb5bct=AP#i2X+A8Sjjc>S5;_0JDqzY~qc6_t3L|GgJ~Cs(9B zxjs1oxxRP?a(!}&q3dTru880AF33a%=2-^O5-KaE(T^gZFm8p+D}X=fc-+d>h);PkWCO4emt! ze13_qlh;G*zFs(9$x21oj*}LA`uR53&78XBr!}9vXSG`2`Q#sGx;W`Qfv3LzSD{t$ zY!XPvt6F_sbK#@>Cnz1^@}|B~y#DNcbLY;z{=TvEp;cd^Y}6oVt`)RG2qIT<9AlU? z4}5%|!6mA`;V+A3rQLYrjnii>e>nrx0SFs0FD*(5utwL`BGig(^2!DWVaUWc2LZXw zZbP{^?B31wM7fjl5*DT*r-oNe@T}p7oZ(od9E#NH=)geG5o)wr4XUp~3pNCj^jvJt zs2T@FC@hFd_@h$@I@`k-jVCc0t1%i6VKg4bXiP!f5NH#&y2iH7&PHM`+w6^JebeLg$OyUk83w4?XnIO|i0?>`yKJA%gq4=;E1z{?M4{u<&%?tu|3>P0MHA@#M45z4XGf&%W^N zqxk&t$-y1FaIx$L?bO#ZM8%F;Ey+OPIZiDxjEyb)YD>AFzur&61^&AJlMg@qU>g!N zk3-)o>G!!jFfHDs-em8OabA8uoqv=Z>}OSyNZ3kmT0pZ(2!m}ECHd%P0wuFmm0Y@b z?lSJZ0VzQqb0A1AyZX>&P9$-YY}}?6%EqmgqGmC)F$<#7k+f{Eva-AnnwT10!J;An z*u&MpwyEVPmMv#3Q&15F|Eu$3y(rbDBEOEB6s6L}jT5WUtxq0r$XB1TOZ zjl{Ot1aJW2GN@|c@|j15s0sUhSah^ef_nj9u?I}Gtarczg%OXwUblo5qn6i0h89AG zZjWN{D#4ScVr|GyKxne0-j2B(7Lm|4hZcsBe!TfCPYC>^)i=Wr)lwa@x?8%I$iWgEMut?#*$dPeq>CjpTHRGhfNTZSYluA@PLbc7` zP6V4u>E{o$jW{l(nm1Xk)tK8uRef2(2xH2MY_&`-T>ebShEc`T3ubCTDurZ*Imjh7 zGxewwOu5WVP>9Y*)r-}Mf}+IA;Ch_P5GBDm5&tAc91;)U-y?~%QrJB>uQVr*oLRcG zgq&N_iL*=ov0&)@bQm_3myl*T)C7=DTr7+3beW|d5RT}K68NXgBDP?7w^I$>ze>=^ zS}LeovdfGgiyBCHZR-XHBZ)<3F~)XMusDl}bd!=ORLzhTK`;ta&=0raJft>~fKech z2F$arZlnV@basM~969*_WKcz%23EGaztvWeljCyHG$+U3xw#cBE=#DvFd{Z(X6o$u zn>T;{d1T}}@BAY<`NF}2U%_iAm#Hdqk*&MTOog5kH`1^$yva`2XI9b1|uc@ry zlt{t~mJ+(HEnT1l-HZwyPo|@a!n@R$u<3UJ2{0Fbtpne|2cQt%Nxg)91Ao(f1SBB* z%9zbBJx^AWP~2@(Lw$WiJ%Za^jg37$_|Xmk37A6VJj%)V8KPnVI+dYJANkwmiZ#jr zv+#OR^W5!rksu6eV#x6WgUC=4d`H;(o!4`qYvThhvdY znPMDY#n1Ph$KGt`$CGy?4hP7Z@5eTcnoFcs@$uurll(e!CV5(^B0}PPi8>~k)!)Ae zAg=;+poa3$w^IYW55MCbNq_&$RKZhn9;Y#KsMjkz$*%_vkf$}+PwHp%SEIg5pav`& zYT#tlYX2ZA8VzVpsKiRj72{V@S=Q^d1}hjvpvlhS{0iCG-)ryyKVOxP!il!NC)iNZInn zBXjC&YJ;pWze19j@k9nZEab3Ao_Xd~g5$%%Jhd3vY;gM}-1F?SucQ;YE$Tzn%JFJ| z(y* zReXPKZD|Skk}hdz7DMPzJLHV5BG9+ zXJ<>h7d6cZKHrkefZ&?(*M+MIlg1$L@6Qh)0L}O#l26jJRB9DA0aOXsVi!=!DJ_@8 zvK>X|&h--A4hEA%g4xP=oY+)ogTWsap524}z0FSE)sHj)W@!I4VQr3pe8fXOLLna^ zQ8cBH!{tU?Y>eF2Tx}~lSGYS%)<)$N7w1r#A5&7MeNWKopO5(wb^Nfzlu#ugEFQy; zS)EF4N9tPHeYv@r7?@4)ovEnpB!P`f^R@fhEopQF9kE)2;-jU|44a#~;NC|G5W2F` zLPtlhmZN$g9yB8hGw4vsJuwSBTLc#+Xq&?;I&#~{mu#r!jb&T=C~gL?H8Emt zM5hgB#@|soDv=wq9wIRpV)0!QafS$d*DJNVOR5Vl71&56Y|tIba&oGwa&l@oM7XU| z@=Y(brXxQ*lfRxF_-?}n%(>@KYWv(d4&G=wGt-JZJ|c&k=w~XxS53f`pLAAH8juO8pz9pTLlgdA$TlcT8K@x!o|4BP&TO!zB@TSlKyG>~X*UI3_ z@8S%ZnD(|VuWvvaJ1zg{;c6;sR%WKqA*44z;hl8zYx7{md?y@Ru@PxAO zy3$wG8R#n&={NL}l{0J`H*TDn;4Z)!>_ zMU$tt4leiR=EkblUUcOdF2aO~8?5yC-1YS=JuryM*lLrBnms!PtmY<@l@18N&KL3A zP2*tVu-*`LbQG)}Qyef|=yt6MEOZF28IbuR@wt7n*1W z;lpe4Z}G`up{^fY0{M89tjT#;lP};Gs>ywm3461K^E&FnvUHggykm^YCyJH6C{n5I_W028b*cHT4hz zBLUxd62391sXCkq6F?vzZg8CRxH?)o++hF5byQI30VetV{Oj-k@WXAZ(if(tMme ze?K#EKRrEeH!9Ym(wCf&0RcPCVDp;&`zabAm3qAdej=6Wje02y-&-6<_2S=u&ebva z)}zQc<6B>;p^in^>%+S2?Ewo-Z%;Q&HK?6g3MlYB&9z_*<5)0?)%C76^!L~E`T(uY zw0ZOR@$27vXX}N_Borz`LCp8R-m=ZYzVJeGWAAECHrQB_hk7Gv)JAF}mZEfQeH*Fo z*vDqiryk?7G6aoq9X7v*qHS2FfsvB*m)5N|;7(v8A|AysgM$)=0nQq_aTfLznI z%b-*MN4-jZoFzXJx!=|e(mPjQuTZ>Rynsg#R*miC`Yq!1!mm_}Cp)zct;2A=kSGZi z9z~OteCVN&Q&RC)xxJ#a$W~b0EYERN3&<=*$44^`Z5Vaxh7YnN4@^E}O+&W%yL22t zaGUsCYByc{A|}$~^wD|N-<_qmuA$l1Qe)4eJrb)D8!j=^@IbWH5FDZl4~%1asTQgD z&1R$biItRrdmFZrA}Wz@XRDDZ`?2!Fjs}H)sZcn%u&}xso3eEs_MKVRfz z7aht&lP4c~OEo#NtDvCPt?oq?@pNi8IoOxt;#MGTO?GIq``|66xERCZ*WK-;UwP^I z=b!)UeVpvR`0jZ}kIs85q87Q3w(9+R&%mt){6T#ks(Ume#~#?t8jPpk0K6CyQg;m+ zw~Y4;075T$Dzbxs?0KLUAv~{3SDsF}9t_g2m1Tf#N3~1;|C4oy;eK z559UnwL_cwxClLVua=7Xj>_YBR00~@La$X^!2=Ue z#8N(upD7$^`2AtNO$0?8IdbGP5fstZaPZ@gKR$p!{xB$FnFxw-37`lN-2oJ#M-iaS z07WdlACIK~MQr|Lt>U^79nh&dMqGC*9ZFB5AEI9uj)(AjD0Lm4%`mdR7moc@Gd^=2 zass{}2?IF3kUVnTb!m7r6nL3sS3nUK8ImmsJ|Z296At#4AX(4P1Wzm+?9V;2pj(h; zuy*tn0I^S@BhFccug9m=1H&8O@9VKw!}npUBYOz!MdaK95y1+G2%Z%X5s0Viw7$+B z_<{JIPM=mMUprc%7N|khAQM%9l}e$~EA(m>JP#RI=>dcNk+d+uDd+(ufsnlnZq92-4sV$Omd!C&K zfBhH8Jbnj?5f0{ovB++*_`UKf5_^_Tj1CG^`Ey6nnh(rJoED7cfymGH2eB^jY5Yq52^F#%o!aCTzSUhV5hODMnu5p8aHOb z7(I21mD+2O#xBShof7sx_q77*1X((QDUy;)@5R0*@S^*H`xS5Jqe_#wq73ihWS9C) zyDg%Ql8=Q0yN}j8(YhMHP|ZZQ4WY*UTibtM%l44A&mxBfFNdgm$%_ptJ6^O@JYEO_ zBpI?a7P3UNM3T8aHj1W#N(nRp)3B6q5+mkOQB#ApdoLG=EE!0Q-G*351(q1Q9crUB zsLL%vX?C@hJ32dy&sI*KzH=uwRwgCIuaooZJP!!_!Q+$1C8exoAH;3|ZHd^aoBZy5 zFd0tNn2G5-canlg|I7mI4|Czdz&PMyE7HmHFtX0SpU2y3t1URzgP z4Y%KEqH-b^+>dy<5pnT)eZI9?4Fx+()WXT$sR<5*p+UHJ0TK^R@Pncyq)|mmWkWv8 z-}n4)+7oegqoV>4*Fn#3L(jiO{%#VLs$i(yV*km0w_?lCe==Mi%0FGPm#(7dQRQ?W zMeFWB{X4quG>ttsFaZu#q31)Q{(HsxA)6ddzM|^Fssh_CHphj7V@GWWdXFWgnm1)h z;AAvYxpSdT)KQ!HMt}w1$pX<9d74qXzNW?AdXA(y%TT*M#6QAd8cUK|!v0t7`U0rw z_?DH_`{Y~K;#&*y*9sRa-ZovyLm_^}WJt{{NKG=nbuw0<;ELLUix8Si1sE;V=5}DD z99VYtaB^(u9Vo=;G=s?pI=9p30SeI)ym{-_pL~kP*IS9bzqJW@JWX4XkHpE5l(+v} zUQw%60mVDJjKUnWFd@m;Bee6iaAYypCl495%47|q6q@7^>N!rnYSpUSQ%42R)*+9R zbjbuAWYrQ}=jiT%1a)@{=4*?M_}E(NFdO7#bsccgj8y^I;JaJl8?pokw6qpLYV%uL zh@Dz@3fv2)>H=VE$_YD>t)as$e7wKrGEk?YuXQ^5gk*v(LZ3`RpAdvvI95^jfUlzk z?%tLT&?9wq4I(qbOAQXdaUvLA*X|ppzkmJug<#3M1721DY=(~O`8()>f^@+yyMz4* zSv()5;UmkTRoqQXGQ|Ve>NySt4dZ8lZ~L90eexY<#Vo?q2+K?{D;#qxd{#Wh+B$5a z{nu_3z7J2q5+mjqTx3$I%nfXvTgIcXqve`?8II3N<+hq!aH{6k*hs_DgFpWGkQv zHq!q7ty@b<77JuhE5Pb`Gr!=mcy7F3z#n$tmC-~MlX&kB9dDgM9R_pH$OoZ|njz8~Jxeyzcqgd+tC_qp!XJ$sxhCsogCGKpZYYSUz;Gymw z1Wu0)BDJ$?&;}5$t>}y&)^{%!yOx%IC&%VctvxU-BhQ~NP|uq;PmNep0Z6tFUoIdk z{-HzT#@%=$3P6-_S-{(_%dYW9LiT|J2a3yVs~J+rel4Z`>t9X%SY!K5QmN^-p^yP` z&xRI%>53Ib7J?w~}ojIM@I+A}ow}gtVlc zs~>KDOdlW6EKF%{Z>gxC-+>;y8>`L}Sat3|58i=QC-KthoD-KSs;f&cpGOiOlpy?h z{@ev5=bt-z_|&DdIRLHaoQ1w#Onu3537Qef6KEel<)Lf3qtj4P=4O zLuuU2jaDe4;$3^b$T_Z+`R7XcmDC+g?6)2G|t-FbvjGp$SJcEW$oSG zu#^cSCrt*Hdh(=^<0nKjpkqi(oi;LQ@{}o4CMTH^5=>MOda1UQS@dAM6NN_QI*q%z z0r51NF8+MiKx6=d`(D~T=mO3jWzFMb;$uyL`iZE!JQ5*8d`W~5V^XHgm}v}8NE#Cp zHUj+9Bf_Pe1eH=alLyrvT3g!&pL&wiz8#sIX(B7>Gm~GvAD0|C}9XqyUA3A*&8{gSn zY8@PVIXM_kyPb;|PErtuSjbT5BpC`MN4LRg0Y}l6EhkQ#JXzVrk{YdDm8W)W*|KG4 zZe^`z(xgdhdu7pPj^lnTMEuag%*f51!9^e$L9n}BBo_fW;mAZ7OEMAeg3Z10ZHwTa zL8Vdcs#Q*BW*kYll*eVR8V=g2UR{+bKMG!?qw>txGEwQWMmUa3UZd`Wm3C<1!b5^_ zjZ|H!IFtefG>g=Oj?L*}>H$j<5AH(Zfn}&yV6-^_R_W|=GFo_SLV`3*U@*3(5ChZG z1)Zt8j~e7GBZ(#bo1M)Ii3dX30le2j)`8aGZy7D1n{_wdm@*{^DrC}>l(AEh?mBgB z%Jfw7n4Us1#pB`Bn71R-q(*reAT3mo2E)-4FTg1A-(RRBf@Dr2i z;$`^EE?;&yDuPM8q~a6!bJ&nzGij$q%1~+~4;ewQc5I}MY@0;p~sCIvrk+q z6#!IqjX<7&^Aq-k#pr4(DK3ZKuN)zm^ZEFtv;euyoCK>RaY~MyIa3lm=l0ugpB*e| z`*q8fAAUG)vm@vRX0bukpCax_)U6qzggfcy;zm9ZgzrhxuC8`Fypa`kja`&dNxksG z8rAaUNhS~Gu`glHy#(z~NG6_qashyJA7Fvv@XkizurN0yEm$D9E*Wf2FLRnR$ZEo} zRJiT4&pz8x!>Rp^a1LpF?e@GgXDGqDzh>moqc-3l0S&8!+s$S}p=7$VjuhmqEG+?K z2ChteS1ZFRlu}ggHAF^6>&=W(qhKw-s;n6q9-d+%RbV|*T>#2Mu2?ZWEGnE7NLGzW z1)T$w#PvZyFZkiApjMg@w4mUpRxK>EyoV)>^212!~ZTnzP^A zm~upW5?+ht>`JnH+Y{5 z9237_rpo1pvrVb=lSwt1kF1(yCVd>y{nR~^#wSm@`Kjg0pStVDn{K*k{se7MkX9Rt zEPvn<0)H5+Z7C@0$1(&(d7!MWB>UaB%{01Iu-D11zG7&NC%gLXWLLkEe|8Ppg-hp7 z96o&PTxog1$&;5a3%mO8;{I>k3nY;rGm)N3B!-r;dh+P`C^YNK1zWNeVWC1H4nqFlcgQX;Q6noBXo>I{rdc1 zWm{p+!M{HH(!;Yvw|#at_VeKcGqaq~F}(BkI`hy*&SrPYLYHTVTRhTaUcsJ!*F+A3oU~V`}?3X{=r2|X)T5AoVO%iP0ir( z@^V9TWa8w>lM|z(4JxIajgJct)K8c&bt?9Acvc4meufc*pTVFUY{N28>F@-N!1f*> zA8c!F?I!UBi5OS0V5Hx~NT0y`v0$WEW2EPl6(K-jE5Cg9Y)KjX=;zNBOVpiMvoD=L zec~7%ySMG#xP8~jBPWj^-M{ZhUTHqqUe2C5oqGz(>$&_g3i>rSlXVH)8^Uhh>4d7R z4Acy|N$nq(+bQ$&wBxtI0i+?~rM=tfCESEvU42fx=e34w{RL_VRAO=L173P-t!mnB zOQ2SdY&iD$T>kDMm;7A*kr5w#`pdx!r^^rQJ#w_kKc@Tm@#7~>96NjnDs8DdklN32 z0Xl=A6CfLLEZe>pHJg5};^L#1J@d>n%c5yd!KP0?gJb67uPG1iq~hY`K0vEr127Dw zGpenAcgK@Vs*dpK)1R3S=4z1=;v4uEKA|?r|8BzCLC=5o7HR|4MC-``jbR(gZGVN& z67T(YSZ6!%-Y3*rSvA?0#fqV5I=VhwIYDENfe4~&e@Oc_}0@HmGUGwko`%3k)`a^C@yn^2jNXCWA68V#4fEqocwI-I6gV zSip-}h4PYegnk--kZcA3bFP)JUgXTNa^E0m zr-tuA227Q(wizMu(NQclE{dT7A@P9}?WY_HZNlDD!g1y4>g;mVmoxC-VLx!W5DOFn zUQnfJMp@q0sIn0ZG{h&`TEtZpQcmq0|ap*m46Ze ztuC-vkZXd+muEG2y=sJRc ze-QI?ynv8&54uZjozNeh?e+n8D|Wl`a;Rsv3VTyKNh-A$m((<%vUyE)DRqF8x3*Td zww5(Ea5CLg5s7)? zhKb3MYSuE|4Wbjb+vk@15pG4l%$9tFuET5VIwt`gI;qBe+z9?ni;q&;;i%zQq@wLk#RP+QA0ojZ_sHkqyb5J zij)E{3xN`PUn%xqu=DayQ$JXBrNwoXHP!aUmWGC2FvkP6VMgqu%V`G7uCvFglPUb= zfR$_g+uDo@Mh{r7nUPBSeP#w-ZySnVJ%exbJiZZOVt5+g=xKbTX%{Yl>aMQva@oaG zhc9*v3;>Gi=iftJ0y6{r;3egN_&aJZT{;K16Q{b$SPnsX#i0B;giqfNBP+=Z-c&%V7i*yeWtxR2cgFeN8sO@5@oYRNhHn& z48cbDl)gGu)1(+X^Sab&aYh-Y7K{BP`IT2*d0?6;Qd)gJXV->rkJU8~N-PRE8xoV$ zC5KPKtzmk7MM?s*ABniz^Dngn5uH=uHqE~OiAOhV=3_E5u)Az02mAcum8q|%VPE@) z49e_^+0+D1PHq4N0vF6Dh*z*kY#iFs)~OaoI`eW4ZT@E4>C!GU+i_~sCJ-cvH3eSN z?yZoIpEKvi%y^xDj1m)IT3_G!^Oq}II_kTelCI0A3EX$j&*zVJB6Na!1C0(WYvq-l zoxK?1$mr&xjUWGvFi2TBiiR?*$HQ{TttU2ZI@pM=mMP?VolK&Z%NdGnhSHeS)Mzww z@l0n!4^OEg0!@ZIHaglPB%=Kc`(ov$n*Ag$1jq zo<_L!OMl(*Q-!U*8OuGdr21(eZ`ZF+Ox&|4 zFpxa4$Ye2%V-Sm=$86#hn|7$xJEC5CDGI)mq%~-_{55m#mIb=44@ke=*q==vKT`R zzzzEuj6p#fkd%v0D)yS675yW3p$~{p*or>53o^bEeUOAwRjtkTnmR{yo3qPS+uv4I zXDcqPvG=JAa2%oTv)vBpMh#-#KmSbq%E1P5c6wO3)Ew4$eA}4@tezS*1LFW3yQl%| zbh$CPec&I*ZCa85RhTv@HYP~Y)l!<9lQ!4bS+nnpueN9BOGb@17<$9NUU%ZeiE@_# zw4}fY#J#t6?UwUWIpHQ{jU#8=N*sh^B)ON*?fd3E3bc?v<-v@o;TVBSUR7IL(%xE; z-x;1T=PzmS@UCNz2Z2G705oo32=J2}62@3k3P1*2Xm$uhO;uTWS!sD`>HaKQ>gA9kJ8ni_uY8oEpryFfah_}oH1h(pI8k2@;rQ&C#g!R5Y#7!z*0j#-jbC{8!$!QetU78 zH4{U-4zmFV^YG%g-{$C=ZnHwFc>A5V-g;|yjn#;{rM0#i#OaX^Q*Ezz)PtF*;ba!= zX22~glQM45II_?`dCG{uOrTDW$R-%*`~!kuAsK^%o^BO{+F8pYIhH$@vkF2@!PYhyn=qWC=HfF zJiWO$m4CvLM*Jv}-Mjfr)SD}UvHU*8Ns>x-1D&RNa)M7T4+Mx#pQ4^|MEn>v-zK#Ieo5)_`u3jeRI#NG`@@2-GkFN0)@_9!vXZhA+fT^x@v(9<)D)$JLRYQC(w7Fc=mGW(giGGY z;2+>u^09m@y<^8h{5BpO*I~jDg5MwFACdrtl*-TM)PaFe0{1j1$KC~Ssa2-*SF)H_rsTK^DjPzcf{C&}<>nifAldYjmHx1#SJgk%y;h+sK|I1-R5 z!kAUsDv@SiUIOn^NjZvoRl~Usx*ZHC;C)yi2%3WDf_cy|QM`<-Y6=-m`P{^(H0Yu& zSXU^NE%jRbtNa4`e(etE>lqbmWWAR^8d?d0KDz8>^wFcbx{0up2SKV}#Qmz%)2k0h zM;~@R`g{hABLSwz-V#W)WfS^!a=gyG`8jBZS(S$B?Ke{{i+08&b#PpOI`}qq{QPB0 zUdY6%Fd!T!j4v!%k{PF7cJs0&Y4HF##-}a0V;MD<13*s}OgS7>q%{-vw3z&VJ!>uU zjeLGCXo|(-YibSM)C)4rexFQcrnHvPWUC~TwboXP#C%61yg7}I+Pc~rc;f47YcZ2+ zYHG2VP{%odo`cZ~=pz@|H3wZhB|+jl=_=mBkoPSVV;LO~0Qjd)rw`NxB2ORMV|aLo zG0+gGhpWZ_*FJdwUZDrb@Xwu5uo*Kjx+F?S&?v(^3Ne*XIY9E^S$@pE7pW1YQkpRa?34GWWv|yraEENY?R)1b9+Da zcTQTHon1?PZDyGE(;p5IL7NqRvJQKD&9+Svjy`qB8dD2pud1e|x(fPU86>v`{}3pv zYd~USL#@543S%P$+Z<1MEXZdkd$U=Nfg+?49y3Ygj0r^Yi&l>rq(aA5?oo za5yRj2L=G$0L172JcT&N{4ojv7==)bfr52jKT^n=fHz)hT%SsDCez5?)u{e_b?RyW!jZMj*Dvl|4evJn3e zU5IYf(4XR!hp~cwSQ&qZW=i9)_Z~^n{#qysEEEN@(vXL?wyLZexulM|QhT|*{1o^Pz-7)$;oE@SO;Mn> z?fePR$ju$^y;tv$!|GMkHwrtFQYPEFl{~3$IbRZ`+5HU zCse8@ECdfCWZ?@+3Q8Dy8jn`J66iUr#UJ z@9p>Y{M&zveM|DYNX#n@Ju?bDGYU$Ip$snj;^LZ`i+QL3S&X$4fcO0zLvo<3tVAxy zK+z0uO~aM~s%`AsfJo3%X*y5y0el$!!3UTu^uhD#e6a5fACIXshGX_7C-1e=i|$CJ zT4;@iqZe0NF$Co64|N8*gOpJUpf2)mq!^~73ocWIl80`1Hl3b9FQ9LyfBmbODof{| z;O~d&N*_>xuOF`Hrl#uZu6B~pjy0^k9Udr#q&-<#pVJ9hq`kD&5Cqc7&`^K>=n-My z?83?fBP z?uV3(|MOPd_U(52_U(V(l9T(SQeSmLL$zi6pSR`)2Mq?;gMZ$llV9K0cRl~(kG$po zX4R>PRTn8d|Npk^o3KKk<(o?HUAb988E^q;o!==^++ zLD`ExZRyGFZnqs}8;CA|l zTx*m+<>N=W+k5o#_|8~=ixvyrxuWW`W_hzMnlS94^Rv)P^XHD~6 z;HNRDnNgO;&-Pug8sKddA5BhgUw3csU}IYg3aobECc9hQFozu7*0uiKu7EIw${#j^ zdeDP#fbhSC#l`vfUvcIdND75NE(b_WAhLbNN?bb*&NtGJedD+ke?OAMgAXVHgoyox)F&e!l}Q)D5Zp4fL*E zY5WaWUSE-ZhR*rnt9g7lERj&*SkAv9*>b5o1N^X4u?+qVy92{1UVQPj=Wd9INC-BM zVnfD-$Rroe_)l5-!pfEP^;+#`pLtLRvA@@C9N}s^kd>8nfKVBDEw@kww1%EXFQ=E# zH^b+*9=m7>43ri+kd7w@l3se*#{gSc4uBofn_hcOSp4vsj*O@RK!sj=<+X*-j}L%8 zb~-&rICS`rjr4tg&7ddHadZN$$x5X^z;X;bM)r>*%^e*fVP?NR1Z6ymSVdjM52^fL zcRR| z1^Ae&sSgl{)GouK01%e zf^*?bz?=kFc@|KtZ!V;tq!&UuC_*F*DOA(9(N9_y`Y!pp`5-=$55^I}D}A+C3VzoM zT}=q1u>+kbe$?S~HiEIlUeqA+Mw4F9tGxEpZQ6+F6pOOkPZN;jm3Vu*t)z_0&_l21 zd97}svmc5n3{{z44GsSO?Hz=stDy(;onj=qpg@Jk*+3$|W%l+qC!sE7&CEbMn7I<8Y_=#6 z>8PkaC#)#S4U|Q%5Bc+IS2v}RLI-kskf;b}s#4~$Bt=FR7PbvS5W_U?KHA%qclO@D zJ-9Hq_uSX0ee|zWZ2@zZFCSk}0Pd{PCaRNT$k~x|p`e^Dv%Iy!LDSN4cc%|o&C5cl z<}K->)vWxXZ%kt1&Yfc-)mA+~5K~o7QlPU*WgIzu{`@&yFS-znvsFQ+er@g zm1O_69|S0ra&3D%&>z!Glz#Y{NGFGRD0BB968P)8nQ)KXEF8~Cer4wn9}!@lRJQWv zP}r7BmW(p8)}+$XpMTD~OxWW}&!7A?=jfrs*_*fSJ8`NMu2V;SRbKw7ef#zvJb3iz z(bIX<*Ic44D$0g(qkWW$R2%g&z!IqG0eJseE!E@2_$pZhvEfJ^@F*-v2M%;~^>=~; zL(<<=*U&ZSaaWd9HFdRnlt310RkXLQs;W&Wy~Il>hGUMVrXGbeazLWfm`zBJhqJA} zM>x5s-9=7LdHecU);Z`I+E-Q%&DUoj4J#oHBzBaHFC#oWZ7@4+HEkY>g_{?J%vG$% zL1>~&P;kUgrO`q37#^G1+Bj5+Qx8dltFNbLfL1^n+&z>=25A`Z$y6#O!+R`==gzr2 zNxf;^}OfH!V0x^&OIcP$L-sK|yqY%lK!TX^Ta_gpG$Gea0w6RHthof?Ee+gNCW zyKM}XRMCTS|7fbK%e^(5zOVpSUE0p>XCi0XHRzVtSJkrRtCChJ4Lz%nRq~GOJ z`U!yM3c?_LZM?tl(znETdro%i-H$&w6*ll2>_8u-R+*&(^{035{&X%lL{x>gTK$fE}V ztZwV*uvM28S2gk)$bpJ#ww0AvR<=3vato+de1m?RSZNM(qiB=cGbjoG<&@w|sADgc z+KZdSDEtbH{wk~m&tmjfVDtq{UQ0`0;GZ7((Iia$CkH=4p*VW<&kp?1D)9ekP@K_yM3h)D-Hv)4eS<^_4l>va=^l$j<&@ z=h4fRMW^%b!cq|0RY{hD?8>gNMR%{dJFl!qChZy&7L36zsjR0AMT|mgR^ADPZq=Qc zUww7)Vp&-gNTkk{QVgANWqG)76jgX-dDv6};_ukxF!Ru2B5BO8v`|LXg}h@YyT&Y7 zv}p6@MT_RmOV%?IO>8<+&hFx$XTL3I341!7+DZ=g>9Cgke-WtoGJbhrl-}Fj)>20< zT3FlC-iK_A4iB^gZS=LhMi-k8=bjE%| zandDI`+EmG!wy;a_$8K@S|ID{tLp3Fh6Mtvy0)dIwgLsO5t0);wKewI!$!vI;SoiX zu@VYu`rx2Xg38a}7Ke6-oDDO&Iz}pyU;=@dR4OC-JfVPrM~DRKa1~q$QLjI78c|-f zke39=%V@|;aFm4762BkC;t~>x%x1gPSX{Hu%;0KB%LUh(@E9>-)27cp8$B9fV1;eo zJX=|6YS~MuRrnG|y<`DGZpib66O0>|GA^YEDKQ_?hnGr`pKSBx@URSWGMz4}S?BUIGgT;gD)?+J;vQWTONV1Uy9k*+i`{IF(}5MazOM8l zh+A`yodQrBd-bIg$9_G1_T0JL6DN+JI9~(}yej|KL#NK>{(3sU4E}Cg)foUr;9^Dj zXa-}t?fVU1ehhN_PyY4w@iW`j|M0`NUwj62^OH|DY(h|d{kNNNw@-0gcps>p0PU5so0Ieg&251^^rcA^k4%nUfs$nlcwgF{Gn`2MY%;JUk?p9kxX9GU#> zpe1|~L8M9iXhey|^DoQReGl~Ocb55(ZsNGGnqzyuK;ZKq?|+Kk z*zcX#7PjO`=;u%W{RJ5phD}?glXg#x3jxi6R;W&%cYG@Xl?OE(Q+o7-9XaTKTzfHS z4hWKpT7su9y7#XSEt@x50;p7bE1dx>g8Y%ly2|r`D_&ZFIP2qh?w~4$j?Ma~9=`LrKwLqCpF28~Rak45R;Ctf2AmpJ_HNse=r9{39N zOF(1vBLAGEsShRjyaVlxae9ea!z#5P;2V)J7KsTH7juLG@E!V&k=*~oa*=aRfjZ4r zR?*M}o=cP&X(uw?(COjb1AXv0fH>OI(+tmmy^~h*fH@4b*CXgkeKiu~H_c>>)kd0%{KXqg`>ci|h2!0kVeSGr%{TUhQW4bTy+rNK*YU=*|yY}w;;@Orth{qO!EKtjjP73D)?7+CZN;u|%l%^daEsTky`;c>PK!?6Mt`jJ)$bG-iR; z2u(}b36t+nrM}Ig0&#Vah8BUf5(HnM_}U?#U}5gQ`_?!!J9gHr>mtk&*+3B|x$wj4 zp-)h`mhh&AItTnGHFb^6w&s?WCX`!h#(@xJ(^)XpP(GK-D?xn`AW<2KS?a?ZJ}Nvk z*kFVsL~jTQ(}jhHhlYlShv7gNGBokOwi5P~85p~z(2?)Oo-&*b6k)rB-Qj#uQ6ZR| zi*4mt%r9JmOCuNT;Ag=x#33n@#wHBn0GSymFU$?_ALE_XRD34FRZ+Z>91AS+h2so;hCWSa({EtWdWQoW zRF%VMbZi^yVza>1E6|Nf7e#nH5hGVe*gCOJAZH5Yz?zzD4n$YFJK?$j58GfLl8-qh z`L3vSjjw42_b$FP=%^vwt0Qz#Bk@2wP(%c>gpdn5A|?vJd;kMR#ZlA#M@w-(H=*yR zVBXBfyfI-vH&L`!gBb($RG21E3N#4)R+kqOPVu5*+ny{Xh&VAn@DbWfg|7=V7-`$Z z{a+xOYcLoo+r_<~u3NWuEg&FYbDY9|%6$NcKQPUoBNRgTN|au|7ICm_SyOY1WX#;# zNVNWI#8|$OAzjXuICps_9KXc+14v6!UyqMkahj&X?`36Wy??ZcQ`FYMGuY8wSG!h& zXl>2^kGl7OkE%@Dho5uiOwD9UGMV&b5>g?A&;b$|4q6-Ceuxs;|1NJ0}ys z!e3X#)wLqhMVf`)0wDwl2_d~sdheOZB$@KRo--LB%Bt+X-}n2znanwpGv_?L-uHE1 zcYQNul5c4STNV694nkcHi4AZtP{V5dU^e9w5EQ5xJ>oZa&c(>NhjmSioCBkTP45TL zW3svAO`Mp;h&+p54Glg$c=%M3@Z94lu~$%HNq8=aF#dY?G7BXFAuq-kVWdIP=_@J# z)+q*7$6QrXTwPUGf>Ecg{(v5R2FoY_z2OK4l3@`eh=BV4~;7EO=m* zU>_~6s1!|FaOXUdhG~8;K5ImGFL?ibs%tc%OxEyBR22QB3gAVf|AYw!Ijc{Eap>eR zQBi6$2EgX9(bN9mrkxh1Y4V0iUt3$>U~U4y$6VdmR8ZjE*2WoPvmS_znGSjbmU zc20*&kgpOwP_cSV0H8e@t$%=8>*tRlM(Gvz@9r1I5(R39@}4O$mJIWr5z*)Xh}4XP zGl}ZQQB!`BY!XeDf#AqUHkd?(C!kw2Lai@4U?yXQZ|)XC_wE$+`xk^oB{x)#Co3so z7-;h9x-idrv#eH)sn=3hUD!~KFQm7juy9DGfXJ@XQV1#}QF&BUxyf6hbSdOcD{C9_ z@{>4uIpk8^N*x|&u~HeZQ6P+ItP)84Nt5~tj)2xB6E3(fX5h>5dcRFUnmKdkj1Sao z+LW~5w&dhwg>4h5;n#X=0=0o07-Kr?YP&nEyziPdYYujLB}|_Vqw2Hua=*J@d+jxo zKXpyDb7y4I9gjZx=|JU3o{&o;`6HQ0KO$s+Q70d@0T@ZE49kaIp=>E$3UH+z~~Umq(ert$mEu#>rWU z9FDi7JS~(!)`|Y%OqGbFG75$PI6{5(M2)3o7agNPha2>Yz9szI1wlNyNw5 ziJ(D)0J0SUXm0ZE>0)}ihD2hu%+fjt2MY*OT3=9rEeT|38l9FJsk-%GD^TZuL7jVe zv8n!4S6pnfnawOrdEMvVUjHBbrmCNol+@6`afc3h&Nt8u$RwT;wTdPYFb{)$IPJGJ zi%|&)qenwgq4J#n%O>){jWv;JO-*Odn$5?KdCoV`Tt4_sEk#sUr`Pl1#6;mdCNc2_ zTFnQ)t--iQiFs%-^Uz|JU_MxYQDPoiOvJFHAEPRq2HN}DI(lK_EhoJl&E;T9C~qd^ zJY02(Wn|ESvBEJ(z?g5f4gq6`+r&U%T1^qfT~wg&)meOUBlehI9WQR7?91|&;xn6J zTel^h#3h+GN)w+#9(`uOMTM zAON{g+|<#>VFMwyc60-)A{y%MXsyP4Q{9S;g+m&`kpV0Et95{gr7%#?DoJlA7C0yd zrB-E%=?P{~of zoV&OBqU-}v_JJ6ohphsMBqJj)uPis0C!1kJWVL$r13vRt*I%WuHk>daN+3mk9b%AA zi8O5vX1Bdk=yk#ONFL&0=yb2^UdE(OgFRtKM{8Ht36R94iU2H)pqF}jxkDjlMd zAI}uep}?yQo`7nJ{6a&66zCHmTjRX?D`E1J7Lk~!w)RuZTwvVfRMKnEtY zP8&z{0iCtE7m7-2;T+Xm2@MO=k0}SRx&do1Dd8vC?S6BXFQ4v3(<<^-N>Lt|VTPP`^ z+9d8bP_1<;TvGU`u+Vb!cC=d@Y1~+~`CJaAH2Bj0^*bl2@<4u;=^O)Y4NW8VSq<88 zDX>lXkbEpTpIIy(Gyk=@$_ce>WgCJm)uU@!}^bf7_SBdyVS; zsk+MJ$B#oi8{BbmPa1HD!uxOB7Y7e!x0}3rD$Sj+b@SHwt3+v2is6;#RCb=ZzNEYw zmha6q*pU|&LE{3hiiWPb-0U-1<*m(q-T1AtprD5CmfFq0(@f&H*5jKto#}(ArOeUW zHvnlLMi#L`#SLIQk;KfH5wApRvpH4F0DO6ne%PwPm64^mStfFbq>ipO3Iph~!-qn@ zr41t0wgHiUuv#gSibWDxKh_j5OBig&xvdvu{d|OEpt7>Qo7Qw!Q#%zdB05&8lA(Qw z)sf@Ih00RBSf3bgi9+V(Y;U#z9!Xg&N1=UDcK0N-uTf}U5e9~6j~oG<4UB3s&}j7c z3y3;q+5}z}6%{vO!Zb2<(i*A3Fsr#)_t8fJ@=xlGPO)|^`G{x6{RmMOq-v5qKmIt5 zsPor|%geVdSz=ESiXZSacp0?Y1+=4){Do%@E(Ndx2ssIS933RB32U@%ZBCs|(bk5} z2h_a3KMCR)wVu)<1?%-{qSdW|{WL(Em^g5hCk?g)z<#cii-fRP32+3%xow>(BK&UT zjINR?BAjZt;vC%-mvTV@)$ZVA=-3)Rz4=(?g)Kcj+YjtJ-!4~BLnQ!VNF`k0YT|FY zDTZSETrxF;2SGu#TmHd&OQl6c4r)!V)2gIQTHM*6A{eOJ2arf8%#GLrvR|ml%i{%|^ykrx4!Y`sM@xk zo_=(In|(`;o|pssg>}jom)vaV0z{%O?v_Q*!IFW=**k|0hoXmGeDR4}up#{xCX!o# z`NKXH?|0Z0AS(x1?c~LaA9{7znX2sRn}Qu@Hf`F3ebVcgo99qVA+L)U+aS;K4~>lS zm!`$#UoI*uH&?=Scu6xrOYM~xDvzEhYLAF$Y46CoP+ZpwcV^Cxa$o{^->%}bXG{B~ zQYmBWZlH94-JFW|9s;7Ng2KleTYCmvX>qOnKH&krTB)t0p&HBC;`{hXaF=%hugSX(Yp!jpmtwfjQ9K8x<*ZO786hse|1; zyVCs+G=(Oom zCqODWVFLEeMp$j6{+35loFLEn?nk;`O;ta+aDu)8+!rljv*tejKN(x+ zKm@^a_dNOJBY&sJ&J7Uo{0r7y^qs~&Kz`2~g}O|=;rdSa{`gUx`Ja3!GUUG z?Epb!18`{4_;}rDr*q+)ITPFW?LA$IHil89k2eNj0>0t;e-`z>3iZDp_5bYfoH)7k z@DEuKeHWa+SOx`oA%Ij3MVE_xGXgmW0`0=HpcTy)~xy9+Y={FoW4{63Wc?6zs#(#8K6{*mYTD^Oj~R6S`g7q z*SZx0VdH-{7Yl<;@O6H!&@q1T9k2|eBp_R`1o|A(*YC(-$Veba!TSxbhv!E3fSIAw zF!R2@&%%Vq(IT zLlQsetNf()p4!r)(#qOSsB$ODWF4jT3SSK+aAJJJ!zI{06KhUR75EN-BawqhT&-3q z3u^XA#;zH2kU~7mEL*#J&m7ANDbq07+&>aJ4j)l8?)E#$(g&jV;4y!FA`}bF0+lO~TCGp4O=#*9P~izj*ktS19$@^C*t=sjXb zlZChlk`;PUn6K9&C99E=uaJ_pNXcrXWOi94g$_1b!AFL(xzgRfPaHUK@*>(f+`9`# zc~s1Yg_{@i>*mr~QeuI4wlxDnA4D6E=22hqB+mJJLEC>y|EmR3F0YMpg;e|(LMzVQO ztzH&I&V2NRq@>{9!b9tgM&tTJg}ot3rrCvgNY{m&{KCScP$XQ;D-<}x+v$LWN~wU$ z30g_{mr*fzUB;Qd;2Sm{x}c&$l+ri+Y&36Zf*^%*aVp}XhH5b9^ybiVS!^Z5GORi= zTtVy%J^_Z2fpwU`acn=v-F~Ob;S!r>yR0^J5WC3F&xX4!R+pCoD@A!r%MkU$#Yx?! zM6gwdqF)Smw7>-mHly%zj**ga!o&z=F<>KN#?H84=nahESZV|*b-@FN1gI?nwnuD@ z1@|#%EFxeK1tm2B|;+m#L z(9TdjXLVJvjkKdU;5rJPbE)IpG8khNVO%R39?gfKi|hjOj0I;lynP zRh$YcXt9_@33&r@ji12mBQ}L(SGvODK6z&^bfBn5wQ`_)NR5^(u1e3?{mxwXAny0! zvBIJz&ez*fmz|OE{U>0R`5L_8JU^a+g=;2coJZnbd+nLKArhTA9yCb~!gSYlsb(&S zCRtgfHpAqVt6&MV;^t^Yb(V>n8Bm*@M}^#<>i#klTiVlS^Dm_z{{9$RMuR5i4zNXf z{84@2{qmBDA>FxF`a66!i8-T3EB^y6JLIl-EdwW+?CpOdS zs32=wr(I^T4A_VIyE;J~)z#5$v06-%z12SO;wo3gg@9H$%sX`U?a85Ng)FBaf;&B_ zFk;iOUaF8vydVbjl3?JT$Z^nJ4>_?7I)}B&8H%uMACe5%T=pSWApir`(Vz5a5B~;gNzk%4{9Peo(0MYHdE{8z+dN{PhDVNm!tBC_Zzo1^|Wh{;B=RwrcKzalD& zwkU_ne}lM8WVgZT%5RaGMWCi4P*VvQ2O>~Yqfk?t>gwj4GWdN`1}WRB^7FweWK_Is z+EOd0^S{lq2F9k6o~jO|QU+swS6UkE|BQ;KUtG52cfZ5_qN;w45>n0c2hRXL1nyF7 zD9}rcWGQc)4-a`ayT-T@T+>|BIOq!MtZOt?RYl$P@WlN6mhbbc+uA@pXH>lKw`HOI z)RUpiYmLTcYqNC1tDD}@-qylkrwp2+z`)V)C`{KFzz`41dpxb~apYwY@^TyU64h04 zxwN(*`)omTH*m7OVC~tlqpogfC_n$$u>!1<8=Bi;x~8|=LvCBXJXwod#KGa?C&tCa zjnZOuA#aTzH##xFpyTYOsG1g0(5QFU{wuwTjfjtrx1Gt5O8@vrfB#*(-hA_|4`9vx z9W5(fE0x*pHQ!>#`9s@4)a`H#(E`0Cb|}2;aS)n0*+C9wauZ12Sx8*ICZB6o-9Ap0 z3yS$;?ClKmQFH-D7Y7)DP@G$>Q-R|V!8?Unx>rCw6yXtF;3o)T*S`m~?ntS@h$)aA?gVCV2p_Gv- zez@gDptjUUj!c?%{J2a8*>tJ-494*@(JY#OEDap}%A9%`<1t zoHJ`ms06zeumCAvTo5NM#Tu_f95oMtziU$DUAl<4+AQ0@`9IjJg}k{Z`+PwO)>ip> zm*Iu`(xv=@%egr?XJ3RlFU0dA=RoK!5Az|LHy@FUm&L6OYMi?Q_dG+_nO+-L1) zy-aa^ja?X&TB_@PCfv3#(NReGBg(4VTk8w4r!TB;?S%hfsl5~Bf>npF?~t|D%!hP# zwpAdi!U}Vr2s#!Q_QkbOSk!g(^tt52yAN8k)N9cnQM31YYIcBC(mvAF*Y5-BKkwc? z*l?QbItTj%7gc?OF3@!AmDJx7Vq=CZUGCUADWI^Tt;5QI&CrS1fHH;enVvqU)D+iQ zo`tGxo}B9MumIIB^$$Zu`;yMGJ-`p{DeDx`^?Cw}9PlDW89bhHtPSxYmoGQM$)H&4 zuf*K*(o6S8E3IcXZ{D0~)5PBYl8Kpm+XCS7+$-?iAm=!%kzcxX;W)O|4F9qh8UtqB z3mLA!*W~ruy)z6pKre0KLai!g?Io3gyz;y6j-5urj;Eii8H8~xI6{=`4j0>EAWx1R zEI+yqG)Ja5(8i$-_wO3C>7#>s@{4oM(FT>5dp;X@%be`o%lSCNy(p>_Ja-~#-GPmK z$b%0qoB)Y-xJD&|cUy}~2_o%?hzNhBLhiDd;+ie(D4t%s?ff?%e6-7q2T{ZJt{%{v z`+)tlr=uEc(Q2Ve?e_LIvBKSb{6?YsjEOXO(_Iq-pXZ_ANDtA#YNOx1Dp`OUrMkJL zs8On$OF>_ausf^})?=EobJ)U)2JMUrhFdDe-cPHxr60gos+*IJ)Xmue@#p~KbdH9z@CT4t=TIl6q@*OO9j)hn_~D0htq29n5XlJb+_p**NwsrJBT419 zH%vbE%ex^j{5H=XL#K?}qU|MEHwwCU?$Z*x;i0)yBM)~|4;g4^bob{1_W(q%EczU9 zVcata2gK zC7nh|`lynwvyUh>DpLRu!PtRt6w2&A%OD#DvYzAV^;86c^#%-AuF=scHKeNt8=;;q zqEQa(==@O!VWcAg$fVCHa7;|W-uCBWqw7TKQ+Zwl?&O0b*9~l25Bcm2AnuW_o z=ACy;upx9S>%6yZTd?587hx}U^k^piRVV|~6`dXYP}2Sm*q%&QjMS}h>+QUwqb(L6 z9~g0Ydpm^7S9N#S(H9XoX|eHXY>#lP7LIXvERC(C$A7D5BCJ}tH&i&D7mo45bF{dB z>A4``-tofmsBm-%&kg>{b9&+4(ZVrHI0gvMNq*(I2;ttz!tu0l^b($LegEwBDAe&c~__GH}rAO zuH%>cdGvArDdykM*FF1=U+nODYxAJ`hB`fvc24fM_xxy!$`@O-U+w*#y~i()03qTK zFSt1WwNb#c_4vi1z};`6QD5<>FUqqQjrxKjmQatg)W>dhJ7`nF62hF_P*u5D);

    Bd zVhl(~kj})&kdO#H{;wJiLwuc_Cle#GN(*g2J}2-wYjR7UX*to-;gItyzwrKO=v z)5tV@TJaH5w;cB_$6de2z02_wMJZybCA{LIvB;tu*ZA0&xQK}8IM4N%Sb8x6GVz!g z)0k3hL+YNa@?$V3*-gTFuoeH}PtMcx;YuYr^n9gIzzB{;NdN6d!Pd>5Y-oE;(hEA8u z){wdT);s6T^Ysml3RCUe@bSm<0h%|B`TX+_VV3spu>trJj|QG}T9P&|By!*wy$HBN zo;HbIj1KwybNCvUwPV$EYi-cPFeqF90>bNe_a#jX6Sec|+KY{$2fy99)7RHomz%fv z&%zEx^zA+Od~1R+q3|lEK1Zvtk}WDJEvDj4&-GHeMJb}LiSzPIGJgyUA_(9aUItca z&;`r2{=6TygkXHK+nbv^A+q40&Z0(|rZLzzp(xw2S+Lo#VFcu(zuVT|*F`Vlx9)xl zG<0&djt;Td1kulxu={P$*TC(t^lI49hhxXffwxfz``_1fL9si@R7wjM&-4W=hRWfr z!Lor~0aHamSy=(3 zu9RNN?HIdq9R3t?<&bbnr)mse2AN5INi-V4*^<#70Go89R22N$&4Y z)m?T-K>2XgxQi}(iAV%rO3wby=4Nad96+Brp|RvR(-@sbfqEB-9M}{<-inv6PA;d~ zbrHzGb@0Q5m)cvdv)gqhK$Mt)o*ugd+)-D zr|~(4&%eLh3%`0klkwf+xm)^gMkAs&U3Wa9v)RwaZK(;jr%}SbpN&bL@e7DBVi`tNt6srpCw~WEPa~Pm{|Hes0ZEozi!L{LTv8%&j{ZQLd>?SF<W8JYz|F7q6 zy#D?l%OCYuaP8WV71|3gRKWW`tp;HPqpjN0pN6rUWVl@ly^1lhv_Ft06T+ga=AZP4 zpFMlEN5IaIrG4V+vp?w-KYR9S>xRM|JAXXe=!y3yJ>$Q5mad@bs`5&H-SJ*+<)r!F z#QMMfTA$%-bFjwhIaHO=F&BoYiC#1QGG;adQF$38_Pn< z)Qa_O#_5tGbM^64u(SgxrLYj{xa!){(sC>nYuek}8o_c}RMaz2R#nfoP*}0v?v!>_ zwu;n&1|36L!hvq^GK8?agdHrRo2ydj{;`1vmpHoEC5nq!uGoBE!W@Tf-^4l039IdDn0(yKmYm9FF#pZv-pjDU?y08ant^jzYp|88LzOy{Hp(2AlMrwJ8Z^bT4(8SQ8mh<~E zJG-#3hXb+3(-#N%=)ogf3+*<#=R{pz=|A z0eBFf2;L+YlpvJGmRfx$_y=m#L!~UlfDnt=H?oaHbMLxdJs-V4$<#&qoP( z3dp$i96O+i^mBAKn%i8|3&0Q&S&<|L8hS1K2x-1gO zrDJBNYjAjdt3@lPoVYr)@?lO~B0Zaxn|n5kd<@F=tgQ5Om~*^MJ`%%*+#gm^6d<}r zbP=v}0S#lHB@a*-C#dDtNc(!*+AMvf8n+A#tdYjW7z}Z-X);dl+jr>L>C>l=9ooNl@8133qj-rYdwJzvSTpo?p&q(=t#*?fVhW0w#!EI~ z)`o92$rnDa0OE?h%)OWFTEXN4Io&gdvd&8$nS$b-0^rn}!$2t*3|<32T1IWP6iH%DDX-S2m&Kh>(w%%y>zZTKOaE3+_L=h=+EcLR-W9) zEANxb!-4|<#|;P$^XR#RnHIUDBMoKwRTvO5!_INN1S4o-#Bh9wUpACoVxO}D}1S-diM)_z@oj!c@#7UIT$rHzbqB%x*UY*HtnORwxr%$D) zr=L2VnKhzxW+r{}r6xK1^9qqYKHd%)iYV6h6yQAU8~`e_C65ZF^XNR7xeC5)$Xi0` zh%78?5!is)PmkB8vbD!QrJ@@xEHmKi865>apP@^(!ri-!3l8S``v<{`FM@4wze#&O z@BF2U#VDoXi>IM2JWW33WnM>*dNnrMEM2LhE{mBg+&qnD-ptz2!Tf#twabb5yChieG(^6S_qPa zaie^?ODk$Snwy4vbYZw285|G{!)-$pwxm&pQ0R?Ap`sp}Gbw1OcS!3G(m}l1vyUIo zZpXe9?CE;hP@hc|l{2FCsj#;VRBC5IN)kC}KJeBYu!+1Lrjpwl%~?O}-o5+4#?LoE z=Ts*F9fqVX9s0&}(0BU|+^ec?>I)3hk9`0xo)-GTrq!CNmg+*M-!~&EX3D)Fq09g7 zy}4kg#9Q>L6KY1Wp#(N9ke%nxi z@MTJNNgkX4mD_{|sbcvX;k_Dlx}R5QD`+kSw)eyl=;Dr?I1PmL zX|kOkTk=(|eW>o(WB_0v=q70qLXE91` z$ECYzYwLiu-EeE8Vf?JUA`GFm9=+OH_Y19cySue^jBKq>APs55t@RPwS^@8Ct8cgr zgimX~Y;^L?QxnEdFvui!T(^ON8W|j$t~e%m`po^_C%RXi%t5 zX)W&b^6EIZ<*m2g+H$VLOK45z@^(MWfV~Z6hc|8Axqshx8D&K^fa$d6r{`b-X>Lc+ zwl`DimpMNkZl%O2p{z@{a6Nr?NY+l7QR9_?to`Z0wz}E*BhdqJrRp(`8E4 zN%}6slW$9BcUy68aYa*YeRES|U#c!Y{frsW_O+s|xAxhA6tvUEnxzlYa9nh3ke?UE z7Ar{f#A>v4zd*Sd^xAfHNLGJ;O?@Nu`Hl58{lij=C<6hTo!e3_(ZG@@0|Q5jlnG0i zMHp7m(E-j>OHXTSPYn;yAY3tbTJ2;a5X^8*55k$7V%fSQ?;6*rWCJaA7pb{>N&3Ys z^jgJ~1BV*m*8L_~!)dgP(?A2M173OQ&iK*OCWMS${N$5QCVTS?MkbLmI1&czyyVHJ zAOF)U0j~=V8pW?;klOc;xvod)Tl9~8bdbU#Meprw!+MIDbk}1P|FDl75D!{a)9-%f zHnN$V6W*ER2-ro7!RdjwLZ2HoedS!Nbmp#{2mSkY;hlIBawErK>WO?>|>16#Or z9w0YDWUgnpf2OBnunJOp_x1Sr#7>y8C@E>%wj_%sXxh@h{q1i{rv+I|8nr~q04mRj zQBuNN4U1ArUQB&Cwe|M4Rj-jTGD;jP8!l7NjS7iG4kA*ZiIhMj$^C+2QE8Z?gz$qx}tqp z!JhyxyAo~!E%lZA(Wxc>{P5F{-e`g@4%{>1RJ%Q4@$+-Z2Kp8~zc|5eBDW6%YImSo z($P1~!uvRRn|uDD@hVvgIfMEE|Ag|{dxFs~2e`&2jHW5*$z7ON)-Vr)25K{XOJ9F@ z7N*Qu=%1*i-0y+;UnSn2UgaDOk53+C`3IsWX3YvT1$Fl+RUI8A*{8qH&0V@Q*UPIR zbDPOz+LqbiWm0r@!X(>4S6J1&%w@y6uFK{kXvh%64}eWpqDW6axO3~LpKjfG&|Utc zG4@jLVTl+!JH+4r(lnel{1{geXfn^C7+ zx2>fc5SJ9Jv^L=LDaSm$9vGcn_k%W#xqlpX9WRl$c}^584whoaA?3O7X=A3{VPx8U z-4>KSh{GRo_#uv;`uh-XJ~AASq_0DWDcSW3Q*|{q$>j4we4_hq2jv5+8y|I7Ft$z~i zl5EM=5^6X%4x295OQ~EN+Xi}bDJTqdk-Ee?7+;Xf3ZoOvoiDQC)y(rw-86fQl z()OSE;)`!K;UN!Y} ztwIZ}wyvhRxt{s}q^}xyG{TGe2|)oC@HeJC9Sav;^yTA+HD4@MT!#fhlHlj#zO42M z45Jrg;awyy4n#fwf&SFfDk!_ll~!>sJBQ*_FIu}r``oz; z^vcCc`R8(T&k3#KMl`5nHy6l4v8@aD8#T&)elPrmZTo8Ts8R3|93CDjE7-hwBfYfu zynXZ0qnm|RaU(iZpcj-Y6jgwlUMepssS15!K7|De?@eU8yriU@{#2z{u|QbEZ1()h zJ`q}lXBoL1N1;9ZNBY&V*4}P-T)>I~fXdDuY=>aN+15rEv3)(=y;iHh>(SHM1}+dQ z+QW@$SjUD1>(p3wD5Y4$`3FIO7#bR;(+QqZ)Vg5MPtz+w{<<)Z3qyPO4|J?YFiIT9 zC{c{|a2TV+5sVVEYpwQ9tQWz_ZR@ZHV5_dPc8UvYwYuOStrCV!0&Y}`Wu-S7CTu5z z#*Q5uBZxsZWag+6W71rJ|$p9lqp3zyANqhEdX)zPxxdeGCfs)4Sq4G6gn4mKAXe3^@Ap`}`4;A31a z->86rnzCk?vUQj2T=UM69>f+k^X|LhZ`Cnm1+W0cb9fFIz5kpfkrZvi7X8$ov?Hpy z%a<>o6<~L=4M)LV`sJaj`LS}>`|lsBYix>n{PD-1V4zx65nPO<%kK zK5Vimu^!%M$h-O{UijT(e}Db;RZDJ}6OWZ%NMiE52OoL#(G<8Kr0;k9pLy_}2OfCf zzWbK{Vfll?TulNJ>Wk<`#;9d z2DHAYp|ejr5d;bM-WE`C_)rNlNl*_|)M_?&%3voN8zY5@wKC$-M<1Qp^L<6rMaUiw zU+UuYH!TUowfpXQw6^s8sb$ON`!*f<4Avoss+$LrmYHVv6=vk3N#$k0a<{hxa)}y= zW*}@3`{L78McF{G*B9j!Rh>JQo{rzlJ+>;KunU@+OD^XX!J49|IJj}B$7*RT&Ocv# zuCf4&E4aHpdlACy^aEhoo9*MnS#2^k=;1AvZb!n(KiwXN-ClJ$M6D2}vYb{S0w*5K zypSUm@E-Q0r>Dv6xCEP1(25cbHkS%+z)2K<$%++$qk~i7*gh~eL~fcLr5u18K#+3D z#eQ5R_VUHuy~X0rLkIRBGuM_EV0bat6<&gUZdOiCPDS3u6Og!^$gQgGWzb5oOb7-D zI#Ac$+SVh>%mqR4*{pLHvM*svnQ;^=_1O{$84_s>K`aWnrrH0lWm6d_D!%*b;K4Gu z?f_e0fDHR15eRsD8vC%b)!hB}DVAmcR4n}2xg)1ObnQXByTp^dO~&JNRLFGkuE6Q~upkQ11WR=l-+U zZu}`K^YQ3eujC9fl#sEL$PS(8k*NZD80*Q|fBqCL52~&7$U}JyXJi27V1SI%gxHSp z#EO46@NnVCqjrg>4m>tz5qLTRbr3QAC8NkOe1z9fyvAu6eN`SPX%!9WQj>^2$p3R}QxY^sn*=+K#{AT?U5sl-83ThR@hX z%%3-J-uzn@En0%nbjhM;Z@c^DyKj5;<>Zi< zTVg_zZz1sAG8{uvQW(5Om?2F!Ll+;G)T!1&l!b#y zrhhf3q51V^sbeJMo|XYU44(=QQUMswVamT&{Px=zJy-Kj7$q5T?g4DWRR_@KEXCt z6ZTL@K07;;u+aly?0bQPt*$!3!W0l>mFI<`$hEPDQwWQJiNzk5 z)pfxU0?=ndKK`2&ut@#SuxTFn+VC7f^=PpMw;qk6*d#E_!MMhpFfzcGAU7i6%|x$E zh5_BmvM;D)=ZoR*peJF!h+a9zwbZ$X$0IyrNM-_tQyzklXCUO8(CUU+dEucL{-^MQ z)(HpYi4*A<8`6(M({|iw^n#}X_@xrD#APSmN(@P~MM6k{7m1=#u5gu5dh}PtC9M~I zlbm32Wum_n1anMzH2&kpx8?W|l{YH}0SMy2n1=Hm;Hh8gdSLi78?KgSw`>hx%8P6_yoZ`T?Zi*lL86O4OaJR$aQZYE@_LIY{2m z)eZ^5b9Ts`>_dkZEI_J@TZvy>0!_Dxkmu%*2GYR1^pbG)es1|K$;rvLEPqL*dWohR zFj-Kv!s8K_oUf@jr>{mir7HY_{DLSL)=v{DT)0|ODx{n0plRQ6*OZvn z2~y5N*Gg*&2RPdPXY-!U)fE<^5o{v_$c~pxoCxCw%P3}1gYu&yiUl%q4n+V_&>WC-lepnX zNgwD72yhcE!}|>ylj>(N?fH1)H8F{GhP3aliH45oL_rx@)1csFXf$w3cTF_sOGHv4B^*o&VlFv=t z+BKhlhD43xfleWpP2#*fNbPpwLK@80mO)!q39^ zf_V^O6oT;^VT7S(u1SXy^`b(Fd6>aj#RY_MGon}Q!8wQ&B8&o|!1WH0-wMEzV9&!?Nix{#NVth@BUX$N z=wN6s5$-TN+6AovE=duF5@B#BE*8_$d7ft$FJ6t{m@-AO>si_=F$AxM*%_S`tG;Krr&TM?Z>1 zlDV-+#3-~703YVK5-8U(4J|Ir}XhmFaiNc=~V=V02x-QdVyOT&GX2w&ZKrM-!kBOQZ= zM|$@%4dn>ef=KYoOesndBTyO`KF6)b!<(3}kYut2A)e;J6uO$0B60Av5OMndW*K_M zCQlg(V^b9z=E5;9;qYe?cN^C32f%^ci$9aN;PfL);1>}46SB`Fu7;adxMKx-J2?5p zT1rhCJN)0(Hci)y*X}G(M<~BnhX)d;+4&|0fzgoXU=T=#Y)2SR7J9}4&ko1acLmEr z83K3_2wNRcHMHk8p!hKw8ceJPOibvxfYa=u|GQIRKyB)giYU}(7;$?{R(kM(kPA|L z;ETK$&{-4#(bU-kY#Q#Waa#U(KNFF_)`9^>S)n3dr&5rGDeI{^5OK*I!L8OM$J1@ zQ3q)ApPEu>9Z>tuRAidkccvnHMvr|b9o?v9=Fh@lf>|L9!X%T=+<`FSH-$hLra=gV zu@w#1AEM!TfYr|ICZ7a$)bf9s4h6yIkB+@9*)Tza&C`b+T zCv(MS)FXo&A#NK>fL4Ndg4iKi{J8z(f-VFaV6jsN`V3fxzYC%+he{2%Ve z=>ijv=M9(2|9*bxUWJaW8uS>t$D(7a^g!`}T^Dy@yfli;kbS46&V^6eKe?VlWBWV& zYzhM;X`2hx%n=L@QUfuJR*WX2al0Wo*^owcLx3`3k^835s%RWqv`h81aTFgYzPRh+ z)i}f)5q3v9f1Lm%O~XQuZuO`?lrxCMC@~PlXvHdQ<(3M!kwA~>V+U`87;#7q1 zUJSXGIl)TFyhxIn80tA@G(9AH#-9J?*&xIlgAyEtc!N-aK`245rluNfPN_%@5>z=i zXy+hHa}Ba4QC!^m^(nA}r|hWDi`K8lYLJa7M$3CgG~i&w@TW(;p7ivrT9pExpAC3_ zB3iWyPP@n!Ok2|Bd;CMy{;%hmyv*<;gE6eBp#c(9xn07+0-j+cP80Y)sn=-gJDNm~ z_(iAt;fg&;E z7(jxJq}b$jXGX@hufN`wk#Q%|DBF_vy1zQqf6sEH)#OzVlh=w0*ud5cNpwnR3hkW4 zIM`uEon%9>xGi`hK5G$kcnIn>_FIT zQtLGu`iJZwLwaQAr*{aY?#WT?wK<}>L6%TJkSw@!G&ID3atZlCu52a=%?+|d{Q^*y zp!FOsAV*?kDA4jf@**k7JD>Gb^A3}m<_1}U1KmMWJ zOU-46Ijg-N{y3wykJ^#VrQdvJD&g{;YhF5sTU@!~9O0@O%=HjYsRFP+0?{hqLm<_s3)%=&Ah>Disa>vHy%n@bBz z&E~8Scg+BgxX=wx;0~NCovz~b3iCGaMd>2K`#`oFCi*9Lt{?ZL! zoo|%^Fez(2|JC3?Ov=h(#(2@nl$ZgNvJv`_+QwRXsLxP3IXhvg+!Sn0MClKWhp)dc zJ@`eB{LNkZRN6qb3chH!R1Tn_K4U!%3S(HQx#X~9h#4f*LrlQ;0pCY%&6Z>@+!g02 zB@zrvkL5lyhk2nnnHWWqsf&pOBT$36F+(DDVP%Kq9mX5I#y{HgzfjI8h?7S-zm9TF zL7XXsi7+r6##>=?ip4BMcpQqGmZ$rL?*?;a1w4P1m6n4yxdfvsRAE)s_`eLgF_49q z9+rY;bZ7t+;MD009)+zvOz!P21OuCTv51gq0g?W!eW)YrM4q`rAOFMGJ1>;h42H*g zTZ(sYbpO_Q_SmK2-_DoTIz|DdU$T26nj9<5TsIeE!z$NbV^(iJ@WT(^@7u7xKnL2# zNLv$7?2XJL;q{a{fzzo}-pYaYvCj? z-m6Ey^2*9Y0J;aeYA#;9SZX<11b&;SsSECg3EJY>v*WB#l%M!v`$r#rwCSkHV6Lf! zBf0v9I)JhpF-2g?uf~MeP!F|QE#{02DVS)*;QVD%!S?S2i!=sE*yhoa1UzHNK@1un zU#&*%r`P%T`TJuIfjSLJP(QsN^lQGDRHA=12zR@SK^vHjHZU7)AO>w9hA=)_)HSVn zdK6TmB9WHPIOZyIZ8c>PMm3inHflhe0geUmAfk?uI@D`uz^DLtFhRXuGIs{V|FCIS zc1b-GnV_~5@1k`VJihx!KtXq0#w=e|e(o5PeD~e=E--*RIK+31iG;Q_mAC8R^3T!J zV=gYZ?{_Qj8k008@#e*MKKLXp`QVeK;oypRK{jL;c@q@I=_@psG9Xvuq?^sLKy%t1oQU;7-s-%z># zujK2O+XdZ~Azz*UK?&YqTLM;t#?U2JYy<6okbhQj7KJ z0i#TWduGpb0z?okq_jp~V{>2Sp2?d*khq0c6E~bd8BL&e)99l_juj(Pi1q7Pqs&1Z zjM2a@A0zZ4}ZLig-hjwon4rMG8nUSCyBUD=N$A%8bsd z@bkt)D3vBYy>sVjm7Mw&TftR+cjR3NE}r9ICV0<1!YxadqpIemOITAiQ9*T)Y%Z^Vy6 zea9oc@ks9sq&FVvjYoQY;UgYvqe^o%+&}?CW3H%xCdnufVSNNjeqQ3h;?2p5(tO87 zHs}}V8zRT$lm#>$XbLFHffmdtnK`n@*_S=@%>-MWGZ@+oGOhWr7<&k$;_Bi5>t9A&9_QON{zXyw$=Q^J|VAsmb4|)VFjkNeG)~Qg?`^ zPiRpnni8jrI#&I0{LExxVpgnxg_U^P_&=^9Px1aawOQMvMpc&FH#Kzp_|U2Ml~j(3 z+MZRLV+yRU?y54|Y}GYgfcsQmuC6(MzP9dibu~G~`&(sdms4byh`MbGg{@m8v5TB8 zwajV?^kMuZK(%V+dd5c?D%Y$0{M3Q6P$dcH{hb;{Y?WD64jGt!yJQX({t+`8=dff7 zV+>hl;K;)U5xLXA;==?75kDyq7s3LBp75e;PoXacl~F#aL#`GInQFGU?LLLG*qzlRe>ts({; z-GB}MiBu43)nRp8XD-Jsq5K4|9He}+VhQAT_8}4+BrGzA^M|K|Z#V>?MEq4Md)cMR zWht?$@7J&YUKM-KlO5(-yNV=Jt&cHP8$56M@~2;WZP}>Gt!vh--?;H`1J4+RUjzJ6 zUP53PABRI_nLrw7WPr1^y8M$3&V?%%I!pKLXzS{CHmwVI(cfBMUWGPu_Ok<_w>|dQ zG`&7JCgw%_LbD(w-klL-!JT+_MhF$1DZLb4@Ii|uxX$pCpM?>Emee0mQg{F7C57GD zh}V^pa$nUWYoR^8y**r5r0(Ngr9HYko_+QXT~Fz;Es;sl9DW%twKU&`!y7k#ux8Dg ztz^_PI8l0P`SN*SQgoMFP#|`BLWoz2O(;DYeBgjwt6#b#hCQ_55F4}N4^iQM>_Af` zExW*&7*Md^^w?v!H5~Ztv&^QZI`q+c>;F>eO-Jd`eM$;S?>>wbD-29XFjDFMbDu^u zy5Op!^75kUoNiziYr1Pkwm{(d{D97f%$Xwpd&r3Pi2aYy2x%G6;Qf$q)r|6U=PDg- zx`=u6B6JzU<*0_==){|EhL}$_?cs-6ck%h@Jat3^pf8q|oR0Hur;=ADwEhgXK$#6U z(L&Q3k5Sawbut!oxx25gx4o(rj!mobMmEbJn9cR| z!!QwSg z&ppR8DJi@tC51ePFlWC*a(R+#3aG7btgET5uZ7#anwol0>0r*M6MhD661{`91z4d1 zeEs|>0gA7$UJrDWp9T{>{gCKuZW8jg0(tur^7af7krl|>3ao8z!nW89xd%N#5K>u( zQwK!}Q`lEac^PIY{DT|HVc1h%Rstaj$^fG1LpX4&rU18brJl43mm+ z_f6t>3*LhGNri>*I0{=w1&jHE75UoXcPBDJY-fs#ira1UKbu9VEGz4CU9to;^{7+; zfRys+#T*~m1$lAjAW>_5wQ3;j@si77B^nbf7j?kgr+biL){t-CVn&DX-2L}2NYbQ= zd?O>JsodCvoA1A$SNkMB^uPo6&z~}B7GxQK6AMI*%;LW!laJ)TPrUo(DN|l~1zUhv zxZ>uxAO!3q8ECGruI{r^QR_f=cMGx)uLyFbMk|p>oomSKw{F5l-3(a=Jwf zaDns z-ioGwh-8zTbBBsI|8qUhMwJHeTvTr$>QW`-ncn3NmZZ$ZN_P!OGqNcpWj?;qcP4DC z_ZgYoBomk0?n{z*Z|ZQ;Tnp!`4GqmrXJPuwrgCsZhCb(w_O93XYUn92Anw|qgw{R* zc}qlVpNZB!0ePE%)*eQ?t@~A3QHu5~s8!$xv$U+DgaVXPQ8UOQyBa$>K*5=}`M`>b zoSfR)oE*KnrBUao_wwrMnvmoq<0ms|sp9|3+k3!ARc8I;cY4pXBs1xgLMVpNK|rJz z6&ot*u4}=9uDb53>$~;$GP&5+eODLzx{8R1ND=7-LMI`RUMIaxZ<*f9|NBgctE~Ea z-{0r|`%U0xlH7aeKIb{lc~1GBb4p5FE`{P{&Ma!8KrrzQtzh1~<;&;KU%q_y(xpfh z{*Zs)#@{^h$RGYtejfr6%s*{hOiBYC*|Y?m2rD>EO^HN9!^@oW)R0s<^adUd@buJc zc*F>{NrhjA^=6}C#6;lH6>tJ!n2DER7ZQZ}8~J3NkG&MiOz=S}9HU5AXJ#l>1V{aE zSEMMj9F$oB%B%oouKG=>$uMjNh@ArhAu|=P9>_b$1-!8KdwUUs!m+1!Oz6Ey`Uoeq zDc7nND;=`n6|UJ=Z4vYuwVK982BWEoNWreIraqDj-PhFH+jQ>Sc~s@8X-bK&y>AL| z%OI02fum8&Nv1rlFLRlb)~bfMQ?#1p%Wbx@G9nspx#f;!kR639?zru?J2q^1^s&bt zf8Y-4aUv(*6rg~OI!2j+{l`6`=;qCMc{0qRAz~$goX7>m%K!7L?Y!G7u0#&g# z&7kzHp!5px;0o~Ilxry6o8m{1wg+1bbTmTf0Yd(+F;Z`lVXnzD0?CXQ4zbPOlDcq= z)OtP2$e{mGdpn;Owl;#wb^54Sh|YghC+M{IubF7Vf?Ft5stIs3XD7^v4vfIry!l&# z7mgA9j_c8Sa>@E9pL=Td!kgDW0p04cJ8r*m_R}vufhuF5_+PT%4`(Ccgc&dx`r#F} zQ3{w6QzuOlGUzGSP+p&6W{}!|O$-_|A^Z@T4$~Ovx5%)@;usV#<9Ok~-aO?R(x<2R z8??^A9tZ7|P+m^vLp4VGAG$}1_LD&SRgi6~G~ZT3P%1!0xHlk!##BS1peV5Or9!r_ z@38}d#rvy>~e2 zMBVYac*ZSjm(MO%@cD}3*~`}&f@Kf==Ap76?t)8GSEr6-VpFr55lc0F<`$vuK@m4_ zrH8AYy;3dBIDERX8|`?#`tHWl8JXRCKl*5|Et67U-x(r`A>07if56Mb=b0d+7W*SA z4v@}$QE&}Kwhy^N3BHwqn-ySKMez@vH?|u*4T?PjZhZ#a`ZT!p8F1_DQB-sZhds_w z2O_9Y6$T9kpSQE>k|DuxAbY*HcWBUMbC}={9}L8;0c70T03OK_=ff@0{BG*gqoIr`OXwMim%tW!LXp{-vKF$&d(G4>mZ@)a~PE@FcGRlmw0JLKcC**MR5V42` z6Y(I2p3UQNf$Jg%Au^ZbIr)WUMe>q)vx}6n9Kf*fXLd{}5)(Pbn)jORKA+oWvRceGyWQpS_^$duR*_W(o>>K+A)Rej zfoBT42M9{Th_<{?BJA;Eu2Ho- z5evNi_U`VmdIcIyEmwuRcfU;+rX^4^+cKI+D=0_*`tpJ_XD!kVE;-Z6*FE~^qwC74 zTx|hRHAvb-=O_FZ3Pr+u?pdG~CA*Q*(v=XY7u-V^`s_xN$z-&l&k|l#)R#c6P?$>k z0H_-swOS~nwg3er87vNIcgcbijw=$2(}b)jscsmFv4m1FU8v5IA`e!IdCtdVBrD*n(o|Ezsa;QE>@-v=`&- z9p#qLl)S%x<;t`KmU%2~0#?BUY6H$08j4Dbmo8m8K^h-|AA9^mc!LH=A%y#|@j`?# zD84KM;s(LHdM!Kko3V@*HJ-E_Pf9XF)Zh>`V7ekEiP92T3Slf7#A<^gMohuivHMnV zpx&{W@M-VetI?3c?uWkKx0k%vBn|a@vE->T8YSX66%4La%bBtW!OTT8m3h-mgM&+# zu3ouh_04EI+uz?usmgAqo*+mvW+W1?V8lkXoZicO_Ux&qMG#|nbXZh+gA&S11qTXk zEJ2B6*yy1}LHW+O_?_RUc9e9@SnWrW*_I8Or>C$P@OY(ES{{n$5=04;+tAY5(t-!& z0yX^{r?|MJxH!5r`K#n-)Jw1O7Y@``*Y*so@=z78Qi^)yD_^Q5*~ph_@xGw}Fgivg zvgKblE6hWAfufmIWDKoH5GWc@GZ)mnio-yhoq;Wi$>I*cJ=HU~w}RqUq_CVULS7a% zYXv$}|0iml;>g@N`?Jl|OBIyj=uwqsGQeb$H7YP6dK3`oUd3PVSll5vy9|T-DyT>W zR}P0zhHNHzKekJ}f+skD72gCZyn^}sJ68PSAzjU-eoNB;X}FzwrR+&`F!eUBh%BK#$bI-au>AC83$C97nHM&7o;;9W zoESOx&O7hC^_6aE^7`a+ncEK?I<({6x8K=a+tJa{W7*d)hZQI|jVxB-e8D9Y9k`si zo@&sdWI}{(*o!MK`>ZzUB0W#a5^B_uQHR+YbzzzNBvTeV`Q($67;#TWWu|X5;_B=5 zaV0P#qi9xYt#3(lM@6}df5|8)#b;TduG8_=Hh%!wolFH*sNR!PMEaXHV6rbCx{%%rnodpB9$cPjCHV&&l&A z(+gMrY~9T(mo22Rl_;k!T=wKL)Lp?B@AvmE%oQ*{LOG|GlK;V|*s=o2e|INuP2QAT zDcH2L4FcD!Yx-gtDV30Xp7-vaGu<}t!!O-L{f&AHRcrnTD`q>jliG_p|2yZ2pD)W| z{th@d?(bF?DVRRfGIT+B9Laop8rlc_e0uSfOILLLZ8awvF^bfyp&s9K1HJv^dqcyC zlhqx~^{I}?V}Hl-^pP{CuaFCBYwHx>{&xHg(igOB)M-uTye>|n5j7MX6mA8qqk>gf zz(nzV#>^1YPw*o$>@1?#3A>t-+3&0ex@dIsP#02%>cl-}+x9q9xO^mJ!uH|(u$ zU5~Dtm|Vm)GtjHk0j}6Th=n!KMbuOv7j=E4@xb5g3L|=j)VWeKp;j&L9NHHVck=Z^5k?DN6TX|;z@Fg z&C*8Qz=*lK_S~ucpX}LtbjK@X^6@p}l^sWSZ~Ne{A6K5cZ19)|yBY_X*tppvAH2Ww zY-3{DikUe|nPz6Tr}4s9A9y_j7mjXO1EyJv2;c9}o%omF#rerQxGx{QFyNtH*Rn3v z9DEb$ChUqg-}xJW*uNG31M*Slq0%NVSg_#UWz?%+$oHAQd*%h=$$5X~pYOu2C#O-K z&KkBN|M>ay$N#qJFUQVbJb|BT>{P}L>nZhOoUwf2 z^b9IAa-j;pzhI#(7x4YWMWdE4o3?Nz{=PU1^_o#G3LCN!xE#IFWVS-VwVF*9ixC#D z(QGgqiP4Ka8_`ijagB!I6yC%{><0rBwvqG^TxKwt&35Q`7Ms;%)Q=#4d@-uQAQA&O z50tfU*#I`)zF|UdDOP;P)130Tl!Cq9vhyjLH4PzXa5!ER- zCpR|>C80CZmB?}bzw)rD^}7-4_eHGVKVtoE#QNQc1v9H3I96gl!ka|=Nh$Lkz9hI! z2$^B&W09s7ZeOan*YHq3)Gjf+eK3k(S8`!lR!qY)2FmbiTMGbq3x>^k3Qi4kSV+j08!<;e|WU*gwK)LU{FE?g+> zsHQ&EavYBH@4D|_4(29bo0)&+i7E=^QkTqa$E5h29=*fmc>M9()dp|vhlr?bwa%LM z#D6?a%SuWm(uAaZ?rqQiVewPX-F7z${f#;4US+><`@HGt6>LUY{@Q0(qNdGQP1CQa z_k>R@&haAn+R-ZqtG}id<>bs+t~`eV=|^-Oi1#ezT!80{Sj)(9 zh93famf}5Ss`OWyR6&*g&p&2NYbpl!%+sLb9Q3wga8EI~N72>Q*N2u!UEST#lHjlE zAc;v46bPiaT;%S-r-l@I;%P`J%lN)Fd1rE3auKwaTT%r-vjM1&;D2tCBTOV-d5LNb zB;FeNwN@-jjh?93cykLwP90}Fz$k<|$Y$I|Z9;e1O^n+ZZ*GEQOTM&;k%z23Ia8}J znNT;%3OmP$4y52lL#5%^icMm~1{<%Q(3fwY(YqA26@~!}VImFTYv_P|l6i7tH;e_iNe1I_uy$ zCVEA~hs@886)|HcV@Tp5*>N}G?lIy8b=S4G zHp7H(YNak~bDT~iDv$?k$u8R?y5se1=d%H|lC!MD^N0pQ6wE~Ytq%UT)u!8 zclkZP{M}2>{qDIH*sHdUeK8+`EtTVTk2-$)+m~K?>6u4%^@7QiX|I#lq2d3vz)j}? zV%+Z<@uYC~2D1$Ytd$)b?)|M|$G`C~N>b6r~Xb~*TRBJRS_a>q4no%K&18jg-*jF`_ ze$}SbfU0vq)j6Q522|C6s?vVLNLPOk#24foqB7^Th;>1ifKDc0v5?cXXBG8o(t^gX zmy+`2yGZZo{N~)brAcv8ya{{VlH~O?H4#TGj?~9!iiabOlpluox>Zyd8*mRQ!fkeWqapmONyGUQd!a6Eq_Z?uZBD z@gOZ#h$JdS2G%MTH43ZbUGw)b&LLymfOU8?*5TDLD&&-qz!Wg4Fk{IJVNs%T;mcq) zAUe>g>+C=CiZH_DqShavJ|SSj%$ZXn#9_cA|Hy0nUHQ|?pgC`$-V#4u&bOa@S6i`Z zhn`YsS;AgVZUxL+im^F$M6 z$XE!jv`5#2V1mVL8OB(+i3l1m0KbHi{bZBHmxwPy=paj-rOqW?wUjalH0 zt2hbVctD4;4qXF1gWY|d9i4q7s=-7bQ!O(>@m^zz<@?SqN&ZrNVz+c;fj#`;2X6?|dcp1$+v zF&=oB@Bqg02*$Dje7*sE&aisu$wBi#HD0a#20tTV@9G+{cA~~vRn^(*5d-;4y}pO& z%695Y?Np0JKQPn_t-A$fW?{UYJh5%x$@6$!s5YDX`bI1^+mNmaO{m-Yd-XPY8cU3@ zb0nx>!yL^H_0;b9Os3$Eb~hh}XHzPbOIRIVtn3V)zY2cMlL$qj45f@8wAx2Uoi27B z3dWUBEX-oS+Z1Lp6pH-(Jn7}jvAh}AO`bGCp0Heo{t&B}l8pFBBMu9v@cO$Rh`Jrf z0J!&OKl>ScRlabU2nwMoyJXeIjT@I!cWI{vgZ>y>4h1ZY&$6Q4p26mEQgD8-l1iV? z<+224%@TOv@HJWj{G3_zG(Ji{Ck0Y|CLT0PAWS+U(Lk8VjSC@EAcrO>BxpgJm8ph* zgvF9z0Zv8v?EpN%p$NqahT!M7A{WC7#m3{qjshhI4o=kmjYlJaP}moxry0z6_TgcN z8JHihZ`AIy2U!BjZz177=%_>8CQ@?E++yp5zerEDXs7yoL9Y#vX?PJ#egvArEJT=* zp_+*H!@nHy`HaQ@s=u>&thmo<9K%bl!Thhm{I9|MufhDU!Tc9h_3L}|dd#Z6-)1;< z`tYt@yAD;=RieSh@j4y02_2-8Mz61HX@TltHWdMn?BW6T+JKLJio;`b|t;a+c z9X$HQ$De+hle2Z}Cp#+-96yBn95`m;VpHLmE^XVktp;4Jkw`L%iWCY1;!sY^sNS5E zr%MhWHc?q#nJ(adHdR&_%ga0D5~O0p-Y*$Wdk^{F9R| zkrfseDkK7ELn7!j8bpR963j~k;eH&2(zrXoOeDN9HrSmV3nb{GuDW(a8tiuDP?;J# zO{B!6+2{H9`$&~Yg3GSjmiG~ zt=G7^-H2<9?bYW_9Y3(=>mz4sI&yMet4?DVE-)I0yHJe`o9xMbpVw5M zJ$33-ZEGLu@fn8)nIXH8E;L5OaD0m+#=*-6zRE4kP8w>qY}ry{NMse}?KyP0A1YP> z01|vg!~@E^WH;P0do~JkG_zF6s>op}OXsb<=PtK9sh)J(3opEI z+oa5x(|O13YvyA06!`s+Izla&xzC&CbqOaoW5PcAmB)Z@u-_;W55XmnF@5LUY#;E% zrNR{7_mVk~NMvC9K#J{IF(23-%?Ln!y(vJM(2^OV39V<71JIJKBLf`}LY-r5A4__A zdO=5TJWkjjx+2)#>lp>x8$B`W82#5m?yQB}xdrrJ3%Rova;M~U6QRGc?ZVjuP&JLl zFZS#?Qr*-KfoE(ubMnC6eaH4A8@|YBG!3@ZSA+3SUuKU%ni6Qcn-7aL&ERTX}Xx9@}bIxp)S( zQd%MYqUq^3yuK|&m=4L)07N*i{XgqrKXm#Div!tHvDRb4aV^wqp z?97q64p2KA6u>FY{h56xD$wZjQ*N%~aH%_>;zq*i1EdIZhn42x)38!D4--qw8{`WiUsRJTV?QBe_aTy zdzBv_!`L6g*dN8%AH&!m!`P>Hbo5!hT*}#b30vRUBgeXQ!$dxG=nn5Wwtqi@Cr2(_ zId%N_@v19*kPf|obC*LU5vg5H5GoYb=^CMKIvu*RL|VWVP*oKjwT)V>hMty-7cc61 z2Cek;p=f%J>YK`r;Rx*O;%tV14YaPDO9@!A9JTtBPd@pkvE2n#!qZM9#}SC^@`3^- zBWN8#7||&zDN#e0Ql~Sl2JoNa*M?t7DzK;Sjhu=y&j5&$x7uG z5h){=t5iIu&oX;-+oB-j~geS}YDP5h8J~*+Nfu z_uCSFSJ2_HfZYQLM!;eO|Bo2`m{f~}kUBhwUBG4-B+{q@HFwIPStVUovNH!+&B`t(KV}i#fvi?$G!zjEIZS9r5jRsV99stjEj1IoFcZ8mj+RmpeaLKX>S#E3Y}cos zehS#<@r$i`_-HeZkxihgij)PdsX1?kGK8J@JgwTgH795L_HEk^?D`UCe^FeFSj(lY z2-#&65Os&e;$X&tOs+yvJO^#MXVI#so(hLIYo z5b!YpVm8<3v+D3^eW=w55uHdVMO=Xp(K>|cWFi5FNh!4%DU>&Z;ls!R0US`77wd;s z4Gr1tW^-4s#UHewYNyTKC%&7z9zZ8@(A3AjG#OZVA&YeDS>=2zweT5Rt;^u7iC~9_Aa-9lU z(-A)!)DV{pu`&@f6o#Lgp%G|iEV*yV2BFFqi%mz&RS}~Eao{D04$Wd}2`ni|OCyOy z1mHgjL??$Yk*A{*Tk1v#p(4S@hJc;XU@@8N1=bKe8#-NQCpBj4kQ@b)i+nximE?6? zk2Ric1$&28CDEuf0e27CYLLf?qX}7U>Z+=4D=8EMrm{H6s89`hY*aESo*mY4>X3?h zO1Eu0(jODB(CaRcarvvSs-#X~enA21)DMheyv=?pI|OUvrd;GOas{E#bPcT6g6WG< z(RDHPuvU#WG9D9FAtLj+!dOgENGLG9P!Xfm1Tx|s&_$y*ARo~99Iq5e5`b6G!sWs2 zg2O#_K%gbU6jfyDbhZ;C3-b9!qXVr9&BRM+Hba51kWE2twF=P398gogpCbpEKqo?} z&hh#yC7{(5tbr+@6`Ai6&`M@Bb{#*~(Mt@2)4jSA)Lv~`PjhosRddP?Ft^m3Y4(?2 zeklJ7tW)m$MDI_(h@b`YHK?m;%a0y)I@Kz0zCtmtUqQ1Uc;G3{AD2*n=cY9K=uLnE zGhd`$)TW(ZzFehpIKlBgUrVu%MvYCu@CX-ZKPJ=T3;Gb8O+(B(kq{Ae1uD8ELd%FQ zj=pC^Dc6s5wZS=)7J$#+>T(gb4*9JidW;K5MjffSLu#-HHL2QEC^G_*>Miw5lwM_S z&8cK63>5RJ@CK$&L1BvV=L90r%prR{GX#f6$_ua__qiMQxeNEX8x*=56q*QuHEb|< zHC$SMulX7zQe)h!|yLNtc@W{SnhxY8;^(Blae0mxu40Y3*JvfShienU^P+nqgVU7;U#j1GG(1n31hPnM$|$$^8=Y3wH8 zlZPPdOR<}W!^DL2Bt)VZQR!SBc$aswMhh_Y(FMdSE zJDoowkH~nH>PKXJB>p2Z9?IYk%lJ{tkH~l{B;F6p_+{YeW#H(g;OJ%G=w;yO zg3FyIYzU^#%T;@K?%cVz>hiID`}Q3;b-800BA=){c}C-)uC2Ki%v#&rrW-Wc?Om-+ z)u2{&b8DxaE-;N*p;1%YIc$vQ+DsfcaEkHc9vGvwRHHUeCX>KZMr=^h??)m#al)KT zD~BaoE=`(F(9B{PqauqTE1t6qzgsq^I03!dC@Nm~5Dd}P)Cz4Lb~x0UC-$ct?#oDE z@nSVEHiWSGP@I>!m@Z()qnP|?j4e(>S^YFITLf<83)l$?dUpZ(*<%Ed@?&3uJAK`B~t6DJ(4bzPBYDe)7qUk3BW+XL<0M zUv7NrscXG0b``R@K<}%57C2u3TKm4YB>_Mi+Z7r2v*c1E-}ScG?TE_iuZ;Uyy6U?0LqpekTd??1P7|Cu=4XNPB@n>x{>|GmJ_Fyg`(fxG z`=;t!>%a2A30VKv+WlrTnkhhbk6ZsECKH*H@7n!ab93`DIp13U?DV|s?5)?@{RIVu zg(}#7px_)!ur3~?uWb%guw0N z)_<6Rf|vhp_m@G<IRnLB^Cp#t<7bo%lI>btjLXKX~}?@ydg>WQRPYRehVkJZq5jx!tJF9=d67!PGR1w=rHlR+e0{j*TjN%8g zotuFF)i6A+b3A}iKY&p`gi$|$Q9p!Hm$h~eIQ{mX`b#HaNq%##h4?v|&Ye2FbNkmP z&R(d)efnySAK0;D$AM~y*(nBtE7;Lbkoy41VBI%1Tp=BN_kL7)EahvjscCF!Y3}Ii z8!^yj?VilconIb2UvGgiN%<0ryu7@4-?0z=$z=x3y{A6=?6afQhdzV+{rnuYQ#(Z_ zi`hCGt8u&Gh@iN1;)Gl|yn9ZjR8u+=;qyFx0BKG={bnRL(`7Smm?m|4l#>gjP&JhW z3O>jpE6r8RdwS#I!YRw(VqZ4@*88C9u3tnwsZ|Aor{9e!CIJ|)5+!_Y7q(|2IPOP5 zSGXn(aN_z{%FLXiN%=~CEJ&A07|^xCRGKOmudFmgGQwfDBsVX&pde2vL?6)@L!6<8 z&o)Pi`jk@`3|?B1T1X584v$1}JbqNHiTXT4j?{bwgK@ZA#C$e_wq=g$%FdtPTgFNt zWfvVNhQP=UhbQ+&^-H0S$LIAU8_w$TVqa2uy*E6Gj&;zjeWOkSAhBA_(9jW+ zx?etI27#P*rz7ZhIRLwZh#%7ni5F!$=6c+VQjWP!S!O^mq$xZ=#Ktm^7Y#28L#w=U z#pwb`tf(U3@%pU&DKAQT0-@FSc5i?8olWmmwGJf`fB!p4_e}m#tAxX|?nqVD$&(Ug zmO71P9`mCxe)F5(+<()`b+!iY52IJm);YWF1t5m77W-h*A!Qv$`zuoDMWTgBk zRIEZCg`?ar2-x+YW+((dibAVQB+TA;0BxF+VXrUY52pMm8GaBW$`y+wp){2Q4Y1j4 ztj%7n(&b8vcmpA?#}x#AfDt2W{UKdn4`idfIX!~K23 zRaL`iLSPzpz<9)KaF|wQ<>X?8Vs)SI55si+;tO?dVUdDZ&oUW5j&8rbm*JGD8*(}o z3XDQVtAG;)t6+`aIXS0g|pyUod!e8=VLOF@2(P}F#$0u zn0!*n9R_rECcp#4dPc58%HbBr9iv_-kmya!K#P#3dRQ1RG0p%0P;_uKO_R<$nYZY1c?|a=H zh4V#!LTP|a)E~4G$hWmCCJTiVm#)QFX{jX67K$ViRjw+#XrhWL(MllILP2YPe@ZEi z&{93J8~aC0R~mqttM5QJT?u5FbHo%1*{x2DWo%~e!YI~b6t`g%w_y~w5zZd)At|w| z7Y1bK$wMD~^ub5(z5Brj^pfQNlDzxjk;6wTkDsWlJXzHR_I+Mki~)LGE)K`y^x~bv z!{H1%J!H-=w^x6?fB(Ky^_R``1WnEdEzONBy6(<4**=?DZ?wBTqmw_|nu`@^Ht9M$ z+S@)u$Xg9!RQ%VGwzgw`ufV%EkGHi|zKwSZ#Zm--U*`P!{`>F0?^jEK^g%e7o?vr7 zK6}>8^78AZO-zhaV`kbBhc&9_vjFGkm!cWr?nH36^S;f;M^n_T@kpwc`OZfvl764w^ zX&F*!j$E3}WOI3Z7TPEYy)c`KF)PDzrB~-_ z3Fc}E=4u({Y6<3Q3Fa!_)!OLsbT?k9Zmz#{{`BWNYOd_qUensqU0q+>>ct+DYu*)w zEvKg>5|+J32Y2Oec(%U&dqu@Z-+&)Cxx;i0DHu6Efw@4f$CdUMZ!@z|MD zwVSE;w7H543!Kg~jXo~d+*EtU>Ad~|CSw{Ojc3Z2!sZkT#M~Edz4^{N%4W@*cfIn@ zzkU9Xin((pm9D#Q<(lX4h%ak%eK(y~spedlfElYPo1;=)yw&IHvuUPJpKQS{ou8SN zr&Oe=^Cx7@nO{D=ybO_y^65pn^3sel zO44}|I={cQ&d{T4sjV66Z@*l9uCk&1fTAWeYW z)4vXdI$J*9vgOSt-S}=!XiE?`%RyU$xVaIu%@ua!i%fN=&(^uE4VM}k&Yh|{`p(}D z9l0%3`>&%3E8B5j#ldv4*=lyM{gS+^qL&tDBni zCgX_wE&8>8obGqC0UpUwSJz&=aHU(H1_^LN=kxU(g9I3jDiCkZp_PfUiJH>#iHdOI z`IRf?%$_%U<{a5FN{ePX|Hb{&9VIzz3J(xobYuC%iABl~^0PBJE(ClgX>%CiKqQLP5?E59!05U23Z945_3H)R4}>?ioqO;{t51PMaYRaM>YOEPZCO zRgX!Rd{DJWGlu5R)i8nTM|B*VZSP*4Zu|D;=J(#C06R8^VMA6`?ML36nNAghB!Dm` ziz5;(&DghZN%C>VfhxYDd-ZB(`SIh_w`KQO;yU52OQ`LPTkho+Tvq9q`OpjoWr1)t zs(1#2a*tXJ(QzV21qjEoHpAG!^1YS0n=_YTZiea`ca@f zo%LVQG`*|dh@==}{XuAEho}#=fOv9aF6f2s7*DZ(e{U>z)v8r_v7Y_39EAm;mBM=p z=jqrNdj+1igkXa4#h$|FPkCrLSU`)eny<@z5tc|HVnx;#8cv)rXx36$X#%LI!Zana zZ_?9ba3aalBqRz2tbzzm*N6{)_Mt|hmX}0LQNQ08c{!^YO_l~bnlEF3SDM>l`L;J- zhSGQi;7aIuz&4QW66Fd^CSNE9_hu~QH;v8iI7;p2@2r`;AEQ^maO@)C`5&X1-fqK+ z`7ydx_6?r^?En$`VcKyRE-%enF@aEP#ba0L#eZxCD8`?%;vuqZ@WpxX=2;8YLnytj z&GDtEry%t6tzBEl0trtTSems-hEPQ3{U{+NYT1tyQ_G6NX+h`x7)h@Tcbx)RbAF7h z9(QzH9%X_TNzJP)(DrK0D@t(s^u>#Ht*ygQ?|D$EP`-n#nAcu|^I3>q)Zb`%@WbA8 z(;Ab3Cfj8NPoa=P8c@H|^5Ax@tURMo&}fG#m8PYqM=3etYzWJ91RPWVJ$n|3Y@MC$ z$j{*sKS_cK4@t@=T13_`o}>s*@~xiJ+S*aujp&Y2)>l_w4@ADi{P{ zW+IbNE93yr1vuH8ZvyWlQLI{X``zo;g9WkD6|_`FrE?q?arywUAcGxapd-n){c(e-q!YN1p7~(>5aFv?B0#$ zYq@57Z)aQAAnFud`d3IWzfMJH|+IdQWadk$JpCFa?|S7*gpdS zo=}25-wE%i5mgvQy+XKiGi8CEKme7&MzMd!-445h#2MnG5hb|K`Fb@Ql431o|vGilOnM0%#=ioxyhPWtqnaFgns&WI4R zCr)?+q@xyJ#0>f9X-Of&jerJ^jqF+x2Qcn9MSSdRh8IbrF%DahhG;N57!+Vjk#Q)o zHVYLzff03+osN`pC5WVGBnWvPt&l}Ay?&3&i>|jANd@mnqv?-)=DXchxo4knN@W=+RxfoX(v)@o_bnfzu68&k78N zusky}jyboElF~S4#4;h=+)j(l6AU;EbYYK!o2h#LofF+j%(A)G=bJuVq4@MuEVoZF zuirFcUJ-sJ^V){AkxrWk``^Iol`9Q~g)Ga6!4>91@D)zZ0@UE~CvPl8#^;b?kwk*E z7=G}Fn49y60n`-6iUy`>3*zxn?5Qs7yfKPo0E@#Av=N3tA{H=0ZhxFE z1PCu280xp1tq_=&LD=c2`QFtf67lSS01VmSDk}&A`KQ;DEEk6r-eY|iQn^AQ#udAH z*b+(t5j^IbP-Epy)Qm-ON&e|X4LYhpf5N|%8YS#)IN#ic+_c)d_k>a;R&&H=smzQ* z-*Wln`uf;MAHDo?EEWvD@(T5;Ryw2EV7TwT=Jxg(iVPbO(^v0BLb~X3mNYL_KKmlj zc)xo>s8Q_Lp|SZ40tF?EW?omwo?BR0s<}}ZgKEJ{M1cCjXT)L#)Z6XpIduyCbvxVI z5Y&;H6QNLMW-t;lhd2~?_v${Qxw_A2fWU>WNbG4ufyw8l#v$m9afYzj)YQKxkebaH zg-m8b*&O+;J)e9+h7bt+yI5cS-I_tFTOr?{QR+R_I!oc z2_R_-QTxT0OokxuY{-ZPoTGIwSpwM8!8TsFjg541O>a~p+4A>2H6viF?&=dq_DRy; zfB(R?Prv$P%Nu|B>tFx68y%?%k@oJl+dF|@83-zoOF8exLW5z&3NANFZtn836m<4& z3$pm4-~Y#kTjwEJf6=;o9^R-FKL7mvk3I3w?^goX{SK15HjyvJQ_z$uw1qxj0&eJ1 zh)9jdi;5U^7p)4#sZ)A=AO)JFD7&0!UEhp0B=p;>gsqH zv+^)z<#Ei)!yMf0?RZxWjcqNY$TQ1CWXau0J19=f7-s0Pz`u*?! z@cc3)w7x<8MfsmEzWCz$#qbshr!2YeH@|!FR}WDyY182zjBw#%#D{S_Hk%F}>XSp- z3<6C;!I7A6$&!454_knZBgt7pm#1Ya+1S095{X!uSEy2m%GQ<5wuKTeEm?gF)MZq!frwmA<_-)Bz-WILP(eNSy-58Zg~Ec zQ*MXu%#r=OkWPK5u8*{9_x4=`sHhsHQ~`y~5c>5O55V8=vX5Y=>m9Pi#ERp`d)?@B zKAG?7Jx&W3%*n>2iiEB;w-S40Gxb{LA2)9N<*$DAt4D5LI1!s~>B4o$ioTQjwYJFT zbNKkm(uoQM9O?)@6&4p}@k73)OG~5?+&&^LSxT2A*-6X=n>X5Bd+0=WOu!T3CVWN; zsV`sw+!c(Y&Qwt><{b@j)v8EjXvjhq69W230{}`9d9_I>7~u$A(%I04$!a*&V-tP7 z`-}Z&A=av^FC+N~3SDzk1JL!2NWv+CTc7Rs4%USb{`c8HID_8kiD1z?UBu(=b2&%H zW+r9dXxOlAVBbL6qoyya?^aftOotBjp<6i5jCBQan`thG01OyRPTE|>;s2`PC{`Gj zD}W)==@N-GYo4W6X?ec0=guhc!%L74FX1^}f_!)h&oRHIzK^VEh+-s+jda!3TsVrwvKt67g8DjeuBHm{ z@>B4C4-K`{oc;Riug}yFRA3|ZrgomeFgyY@BFf3=`%DP=VfVJ1kYnI*x<}0@*$Zs_ z)$Be4N^4l{h(X{C%P2j+Zj4S z$(g6cS`@Ba1nbgg*xuYNO2?iYFsTJ%~{piQ>YAAuoT@q)9yQ z&;^|?AS^&malX*6qvy{rQ-ZvF5sybuM(JhqS3msRbI(2b?6Xfj^ZfJAKe}dq9(L%0 z`S&0f?Ed-1P-cqe--Xb`I)HlSq0S9z1~HlF{K{eT(jZtQs4>StSOzaD?3*{QKoUkU zC@j&;D=6T32eHBiy}SZ?K1z&$HN#L=z1ZRsP-9Ub3#thxfewH?CdubUsVb<#cp+gO z9|1;Q}X3S~}a2_ysU(^UUv;lmUvX2_u z`i2p2Xl-n`bne`x>S3pyV7c^d=jyJU!#;GTuA#Q-Sk;9~%`o&0UD$}GB7+g=_-;t) zW}q=@@wj>zLpFoOX6PRT)C~rcndFpWD}wPvPiq{aGSnY^u)lE_3zI7lrIBiKnViA% z7Y}`eo#@ND4l5^(7aeFmfixYm6-`Y~=R)kYL5x+|q|$V#2pKX_%rc}m4v+ZL^7GT{ z>IP%{62LujIF34c+6_~rN$NMxt)DY74Wganwb@9GU#?V@ef`g|4gF&I+*0J?3G?UO z4%u}Z_MxdB52eh?ng)#Ani3#2h*kFjv$FP4=|)W7x={N2_yr9WGe!*^mOam zdpqGM=;$C;B(P0@Z;s-pL0G1Juv&a^7GNbo??|^EG0HG12w|rJ2S+0z&s826-;Hj+ zx*OdBSyR&0--+$#%rUF(%ArHGgF{2aNJA_N(M3eD3b+K3SwSc1p9^)clPx~BG8D4ZQ%2`10>N2NmjUk zyrWARyIQ~;I?I)l?}Xy)un|3%p+~fK=pbr3I&|2JIXVhV?eMutJrPFS z?`I`E))8|cU^de7oSbwa?;jsw@oe4t51uRs8G=$i(~dGRHef~~q)vpCmb=}71oIaU zKK9sSzqtPw%!nK5HB!DPdqQz>j);jxwTtivjo|tIHpcx@o!JM%+Ahf5->m$gm?s#^o6qum`EA*Xs^MfXf~vj=BiP*ZH*oe$o57rVH3CD36gTa~6@djd8W#kQk% zX)_xi;GxEN;V15`Z_&5!WJle8+#A&z{&ARnVb&n-&Ga)Lx{JNCx0oQ$_-u}xZ3}wH zy-{eaneFy^ue~?P85e`k(65t|O5b2L{oj3N+r0JSYKkyI?E;}3xBjNDkvHq<~l7D9V628RPvyY=-MYuo8(2~9qV<^}_$jS8k zDdww;SD82VQ_E2EMnQedv?0*NVsMxaJH^o(sTe*L9qrt8Tf=eFiXNi z_rR2InS+@meMC6;9ISu*M=%oEr<=8K<>GJ`IpmQ;i+1xSk{SQQSG-S7sU(MWsD_$q3oF#`R$U#XC7k}Ui=C|_!N?xVz2JPm}c#qyN|2}{JIzQDq zrixtj@Ywkn^uM3~ule3fPEAb++QO0!n>d{R$tOyELTbLReIo7;aTGyjyP_yE0%#%2$l&aK2Ht`a=_64|D`WY?wVqjKah7NSo%rSAVB&LaU#oSjDwpI-YZ|0macU;d`1{u^@h zUy*~H9QIxFWZ%EyFgYb9u^%JvxL1GtmSg^1#{ZR^VkC#uS{c3i<4;_%gPig;IXp@Z zVsgOe{=^lSw5wO7o+^#}h}G~DSCDCk3mJ!$Y`l@Y`)O(V-?`%ZGm;YTpW)8Lxr+Pe zlEap3XYu>=#UyKCoZ;{iIb@RqNbwURNzKS9a^A1Wfk_Tu{NxoMkyC2OVHG({A%|=E z@%vBpCOM^&98%&nPTuYR$i_VH6p8;wpYg9NQfunk zryA!IE=9B;CmF(PIG~ygdCMhllYhQuBtLKs;a^mtA=hyJyVnqTa`kiA-@As?IU>Ev zNM_a5I!6>6x$~!ifBL=l)lKZ!8~;VUp?y`$TD_WDt$mffYSq%Eh#S0`dVA-cc>BKg z)iHTHj;|s0cSx?@Y>YFD!Gv80XAdmEB!drcCZ7?7`wArxwP~@KkP5-f3{!%b*PGN5 zGm)W1LxHTm2rS!h-x(OTr~15bmwEeY;F_y}Dg+#O#M%Q7NuM>G+_|&f%0q1mzO{ZQ zomDD@d7oCg7_EaAl`7z@QIsx3?$ms0tyY^Mkt$&zE2Uy;(IO2>!43_@rG<;=EH2tr z2T<-7rksS!gBgh$!?0&K2=uAo>;h6Yif%bfwpc8nNDMYW@x^qO)e0Yj&5AUq0h<-> zVt}aNI<{EQ z1x}n~@CFr82kOvib7CLXzgDvs2w1jP(RxojVu*A>8{2f9wcS6JEQf>XrR zbq)^2^UWsxWne~f5Y@N#gIBCM)_8mvXF)d%T{$K02>R1hY(3wBVk5?wMaC z#h|1`b8o%%);W{X)iWTwwOL3h2Hi5Hd}`UUVks(Y^hTt`%LHOoCcuFlnD-o^3@8bh zw+I&rg#vgP1wjFdsAOt$;PInCz9=e9puQz3!v#y9;_^^mM~sS3PLLCTY>;i#ISLQ_ zs59kHH7CLYRKHe9BIO7TA{3hd8U!vv+**s+eKf>mk9nDu;NDDdFYz)f!Es7(oB);K zN9JgN4>6LZ(cqv zP^qL+F`sXT=3K!7Wo1?A!?FMb6a+|{`x=(j?AdLceNAD>=`PnW_E)u*R8 zoC(*loXAWZMiJqUw;Z%dBo&n96=_+-A_E!Opm7rA#kJ1;pOLkzH8U!+`c{rgM4C) zn-1q=PaqE!{O78jt5#iqJ=WkR_2OJ<5J(iKGs~K>poXe1o*QzJh*Ycx73MGvh?F6< z+<|a{KS`H|qa$_AC3)P09WKjG5*;(a6KU~zGO`-4wCLU83TM*1 zNyGxgX&G#^-VvwE;E0yVl;{dYaE+8yTgGBJ;Rv)qqOnn_6K&(!Jc>*F$>M?n6%W&i zyk15eX{$`6Y0%~J5T_tJGcQ{VFBU&9LoUxCjdas;W=$x}&d)_Dc?HBeTI)cnvn5oN zxlcX!QF#IvN{?&2V*DM&Tn!kHvCVu3I-1Go6t&c?yC%a^ z3P#m=s=UJ73{H$L>M;bl;*Z`wtcxSi;%T@vJUkhI-yJ*t@sF+B5NJHq1Sg#aT{=WN*1==c_^EhpFfg0XW^nUweaO4zD&Z52^AS)7T6{~mydvO zRv=JXnw_1Vg=tpuAl}p2Y^&qBg)Ne(BeILsk4r>J>W|D%#(i3IVnlw?SCOf+b zjU=$GO`4iLb7nM(Nf+OB*G&^3OcW|Q9ci2)o6U|1KxDQb&h8Fm{%QrxfmTp3#9&{g zeKA(t1gy9Tp#69SO8M}xgu_vEcT1%r2vRni3TPQ~%e4?Rg;S=?rDjimMJ|z)4Gjr) z?fUp*4u{D^=Gh)?TG8d!*2Rmf>*|USl!Vw7==CFIWxRRwR;|*)iFwma*bmao1^M~S z%{Z8;2;|OYc`_+N>jpl*tPE`jP+AiRYep!k37|Zz1Q3yUN03S(*hi|=Gua`=pfgs8@K9Z~BNG3N+C6Zsn zzxrxy9N0YeXvJh4S4UwsZ!N+c^*U<_h1hSUf`=mfJ_noM?gB$L-Z z|JA4WdN%`28fNF`>rvroXb3ek$DW>>dUnCpXCIGpti?Qh8*>MOy6(z_3++}9@%P8t zPG73osn_p6ak#b%PEW)Q91dzwt4Jn=g)?S=kpe(ALv%UMZqrKmARwu|Bozxes2W<* zHW*-WKK|%%+bALm=AP=)l|uIJ-Cuw9>HdQUzuJy8xUZ{WixwfhFB}MTAaL965KWjc zA)P(i+B#)QW1|jHn|`wujkV~K>(oJ`ebM~sQ}V&%rPp6qJ||*ay*dl)=9b?+{|IW5 ztzAeh))q0DveFqd%4W=%QIsu*4%B-Ri@{_%bH;$Iw;WBLlFOt^1VTP4cF3fB(&-|D zBMpc6sx3~1%nYmmd+@cJCVxi4>MK3wW7F?9@>sC0B9MW3qd4i zBp9&uwjv;Nsn(E4IO2VMs*DU#%V(dTf-=zF0hbJtq+F<&6~ZPG_aRx=oxTkB3Wc71 zb`d0J-0P3TQAj?;QNI93J&4uu3(U%c;HU?|QB&P*4TGSyq2J~AIxK#4sXz+KAWC(U z;FHblhVR;oOjVNwiWoJd6~|&3Q#5gh7lM$DV!mluvJ|4VSebxjxMO}lI-XRZoSMh( zr>C|w>M5Myu5YQ{{qCC)rlY-b-`<^{e6jPh58pwj$W{Y3v%V{p+y1$$s-F5vdmZLJ zEz(jRn7CG&W*!ENek!9pEGQY zWEJ0Z)5@85Kl0Ems76$z;z1u^DzmP;VacjV)CBBAf{eTz2~iKEQ3UQl)tZ3;J2nE5 zLJdkxRf`w|gB0T^rqWZ>3UUaLUXUddDGMg#D#fzQSdzn%7ix+iAoJ5ykk8o)36~K^ zyfIgM9h>bLv9aVr5F!Z4Q05}A;miqb~V&w>mtKzDHl z7kPGJi^a)lKVN4d%4`bGnTYd6>?BnA5Ft!77-glSVg`c)j^|?gTf1h>4Okv>c9@5y zEFfhx5+a3!FS$C~t1#O)VYXLcw!f`ECq}jhESZ=zL!6QB6HAe0qUOs0;*s&8ba|Lm zh8CM9iw_`xUyH~g`IA`4#V!hvHV7vX@B%IS^ZnO8GifAIZ+YMMzTcgb>^bMmu4}Ko z_TFo)y;ehM`iD3k7L=CO-hPgx*JUlrS6^jRwqYc;m7n@bhUT<^Ixx3>@c4$Nn5!>? zk?+Ydp_olVW5!ONJb5geWy6ub`W8z_FeH~#r;dlndE9U>=jhRK11*4qC4AUuIjR97 z`Nl@Ch=Q!tt-E225rD00X~jr_U>D1`238FV?-Go7n@7#J1spwEguZEOZ-t>ztVe(& zjCN3!F^pb!PcIR_3L$SJ1NEal*MBNrdgr*@+)0E>O>a!N|! z^~3~6CfMTDHo!43A7=r8d;lOH0LTYmoWZFQGs52q+2|^VpE#@NB}w4+r5HU&jIdaC z?tBXjCaZQ-TwGe(h!kVlkt3z0At9H*^`KRI;q2J~1J0(1p;1vb4Q4FNCsq;XpKPo- z#fa0ffPuvy_-7R>&YiQdMs@Y>)@XSGHR#Z2sUxkdM&=U{0p~>Z7}gE67&!yG^A%Wf z)L=3Ou$t<~(m^9X6Wv$`LkDNivl81x*zapsbVi-V(Nxc^>}w#ks%vtPwSA?%6+81* zCuwgePC8X7Y4uo;>9vsjO|EDq5)l-FBlPH3kLHT315ktB_T6{i6Pbpc*+gJ{eKkz}RFqV}G+ZXA0@lv-1l5M>`c`G*# z(l+kdQ_`RVV(A)6_Q;dN0u(f(3ED6WZB+ciVkTd9*=3WaPMwl8fByU_F<}x{xF3Uq zqm#nSph8f-pEhmU2sY7X8?W~?8B91DYl0K)5FmYszrU}^49PK+r}Qi~c3_|$fP_b^ z*pLuEE%)T9^$(FJL!S}v>tP723Xr62EV(8ydU%jKDpUn_eh#2^xmDSsV zu?v<2SVo7cuz^&zTcNb07+}=WhI7A?7MumBKjAGxPf0>vd=71sRMnRWXTs$japVr0ESAkUZX&Jrv)ctiMnW& zb}pNM@kobaG=i$2x{8w-y}aNX0*wOM_fqkk^jU5No|C@+ZNPJ`&%)2jmK2m%HZ<44 z+Jg?qVeqLtS7i>;(703TpoayK?|ESM**eT9Bih@c$=6ObYXCKb=Sw4`^d1@}X(~=+ z3;x~^9>P#i0N$Cf=8;F8Ds1-){_4}6X=P2IwH{hgZvlpBO49O=e}h@&`)qh(@nGk3 zGvH-?{Siznw&5-4lSAMW{nP+MXIMFVY1*nXPo|&RyW@CMSP0v@H?3NU1nzk>7w(lu z`AO$1s%K0ZVw7MXH>c+K?tNK+WJP7IX~=j?ESC&|?qq3EezQ4p{P^)Pm{~@&wV9)a z3>gWY$TbV6Pag%c>~GeaETK`c5niI;w4Q$6M!%?m zVZl1Mcma^wJ^Wgl%$Qw#btb>Cfss+c-a0^rrk4@ubL?M53?Y*#G}IG44nri3ZO*n< zzaaxL9Yc`?=l5y})=dsrnMH3ZKse?Hx0$})jWH8O5Z^|Nv#GEQF9=Nyuxs1kkhD(n z7UIAZgA!VCNW2YwRF?Mr_djE|UG325J+w|oyGE;=tsyI2E!EI1sBLU%cXr?Fbh9#R z@{ho@;vcGGCP9ia>d9n+(@fiBo!K`gCdQ8@8BuI&YH4csumx6D*5Is3ITTZ2`>Cm} z4%;CdKsU(Ib>$hDSPI(T(`(Akn_(LOy{xUvh5B!9odGKY1Ev{0_NV#h*qB>wfv34a z8<;uWshC%;y6egauZ0T+)HXMuGd%|m!*n%lAed3$?aZ?2 z2M!z-1F9Tj3xt_h@coL<)>>dwrCuS8HZQT^oE+U$Pc}>K$2|Sw#|>)*n-A@RZVJW~ zRKL)=CYoo^T{^Mu#1>oNm@xyeF!a)DHJCl&;v00zpoPw1bU}3~=9a?T3kSEp`OYux z7tSNs%8D9n?Q1GZp>|e6GY#x0ofjlsvki?70MH`CLN(5Y2F$`(cVHH7aLw|5k=^wm z{2~RHK7m#P$vzewn6E@d(+C|TrV0Al`(%OU$H09M?4KQ87aOdJsg0{ZYBy50Y89o3^vd_tr zY3Zl@gFqw(2Kb*kc@lgxZ%lp|OYpF4@PZEmCdtsD4vHK+I2r^X6yFiPU>kb3H#WoW zy0fKS<7LK#ZNM2%i`i%hivebZMTQx?ED*1HX|yH}qcPAI*aEUEsq=WxR&lMQ*7IaR z8pj0dDuAd}&D5&)GJBom1MNFzFiAD%pd%FSOXtCY!*^}}3N8C)7q!Y#U)g5#fO){Q za*fg8X_0+O3NwLmg(WeAp`|rsNDQ?~Fq+^%2g;Xl%Yp5%Ap5i#yoe&VwF|_*)wfft zq~L*G?RAZk#9BbO%xJqoN0wQcA&h%`3kpN4yp2#7(|G$Cjb?9yM$*ETK3^euxv(HA z5?s+p>}@&Lg7qRvUw!-O-xHK;HElIe`_X7(GPY!CgiWs$0Bz>RV^tA9_m*d$efAc} z6xP~2nwkPe%!NwT+z|md3L_YG$p#&ayg&yP{5cPp5WzWEKR+*%9*!N%g_T(ARl*=m zW@Zj7ox+HjUf{>35UoqS$fJV=IPL`;hjtFrZrJ+W*O1h*8Tga6p%eF}LzE^p)Ydl0 zn)5z8->FGBg=^sY3X$4`osw| z`q*Iz+P%ogVHH`oq^jWv5j-Mj$@z(H>{zJs!oaI&(;fWi<1coXYi!#0->=L%aY(jl z{l_FFCEZ|4!YK({;x%U8cHNJ_aBWbuP5Y-m4J`lhN7<5_o1KNRkp}bJ85#LSXsZ_; z&I<^j4I}@Ojk|Y$_d1Su+jj3xQtqHjeBpbvaJ0>9@suf3CWPax^v^&0PhY&))iNzw zrbo;4Xc=E+p=CBmxl%}fhmDYJkV3ISd~rE-Mi)8X#i?2-)kZbdY&BINy~^u1)k%s1 z2?v!0kp`8;wSVgRO;t##4ydW%bAnRo=@sh0$FsKAc9l|;CN-6pnyQRmK^)sps&b05 zLroQ=rt+p&t$m~t8NraNSZz^L!Pz0z5YR|*apHTgUn6C^UQIJVO~aKo(6oN)_m8Er zQ(4>8vSzAf`O~Xo{gzcgQNB=9#j2^esz7tUsRAffp_*!znyQ*!L1U_)c4bkNBWfzx zZ6^qFXrRD;Qk77ulWMAoYN~pARori?|H*bKRGk8~hQVqLYw1;A_@rfoPh7PmS4|b7 zros^Eu8DW?m~>JUd$+Q9DyT*D1H&IF)wgP@7PYKsdIhDce0Q)_R;qf;A6TiPNB@Q{2To&t1nr~M13Q98kU}FK zF)1;Nx~J(q_2h1h&zdz<%cWF)u(0Pj6D58CSCT?mXRfq&&OT@m}O~wX2So;a*a6jfX*CnOL~Q`rP} zZvEEp7FYeoSreQzT31*Z>bD9xe(b8>eCRMz{T5QrDiqy0Qm+scp;?VyDNj)DM=Nio z9Csur(~x61&ru9|lqImgs!;TXo)>ce0l2jAk(Y`o;((iw*PA>qaVTN-DvF~|U{));61Q4tfOqYSj&{07NCL%IM~J&G_y~E! z)wfGwOO4=m8(ZS8pF6*9BZS#u4-oa5Liue=P-dZTt9g6bOt^=&Ca^En-t|vlZ{cYx z<$CNcjOHE~W9m6Z;O$KsJAj0hN3)o1D>eqK((gPG)mrA zTVDGo_5kIU{V2P>x0e4au*VD7Z2{$?GaW`?w~pYzTY*7v z_%>ptRfvBKz_cdN!lZDeRdagjBhVU#D&67Ru7iiqRUFH|M!s8+??=e@E#&(J@*Vi~ zH=n_I#Jit<{qxQqtgw2|tKL2_nP9VLr>2_s;G7%0GV_nxsJWo)@Dbtio zUDpg{Jl>2|E^}R@@ia+^aa~imljJ9rCsEfDn;xc`T1{CMj$}_qMV1LC#EOfnOj(ZP z7)O?=s#vS_2#a$VLn9+2LyeBOFb}xg^@wxuXGffeW>x67N8nMZurUAdx38yQrzEd< zyf$VpwPRn0 zkhMwXZQIhB-3uBkUUrtgZPmbvjGbT~ZrGJs(H;Vh&(M&ziZcY{GZhZ-^a7kfdEIk& z;`HJTcU_L9#mnpy_7gkdx^}aV@g`Zi51&h6g6U=UA^X8~eX0!)zx{S}wf?>L*6%)g zG^Z|ESC@12XzG{m*@nYP#x^K%e0TI{36#TphmISUbmLuj-F0KqxN(Dhw0GSFFHQrn zyA2yR?((}<4I4j{1d)RW4I4je9?WOYzAS0{f?4=193M%noB_-3y&bk%@4n-{WeZsf zPW!BIU8`X@aT$97@6_u>k7c)FqkqpW*I&<7HNUj!!@|M?%|1E>4xn{D=0Ng38*BDy zN{(srF~>ros;#lQrj&bZF0H9|@VHSp@|uZEOm{#Dmrz2GW% zDPMfm6<05Sj^&jLuD$^#e6P9Y78scLZ~VXDx?X{Q)mP!@_*I~}dc6YENA%`p*G0d* z$zF3`nui~L>M0CzASe`bAaOG2lA+S$1cwxrsmF7&C;XJuLypq>uNwZVs*?)Op-jLT zF6d9e;3KpNX?^sougmooYRLpNyVi20`Jw!U-7^j12A8`ba?WD zCKJCD%d9Q~bw}R=b>?pxsQnCNP3W(Vka3Z zcymM%)XRAYPdjHj*SM}9x{!k9|2!!e_&-Q1?(Rt|n4SFs-`XF$uFY`UZD;j@w=mFk zSy&z39B12H*Ks`62%hT28Z?~NB#=1aVrnz<_fVVOsFBCNi5%%%Ps~JbJz6= zTaP#U0g?l*>oD7gH^=Z!y-wii!wyHyA81?z4N{rD2-?iwM9}&nc$(ix@tyH7P>aQ*9^aGv{TCmiNr)vSzFxvp|{ zjyVJa=4+$tGGmG95Pa}Xz5I}(gq?L=MXm`;81-+QuwwXx)z6Ifzhy$Zg_Q%^<*dMU z5!}mJ5&qA+t}<54-;^@-qTkN3O84cS5hwq9X2jnn%H4Y5C-8w>rGsrjuP0)1m`i7aEGe|sbw>qA@Kag$#-M-_N6=b ziUejw?yKCnZ%%?5Md=OJ|Nerm|y_E1r=T9i%`feq}bt@s)UBa9Mb_6Ay zcb9N-4)nacl%PWi7oFw?f05)opCL$4kLQo1=ix574_Wn$08}FYRk!_T!UrH@U6UrtaS5>90$DD!|QciW(Gq$u<+OHZ)s`_Y5Kpx6Yq7uC!p z1JA+1^e;n2BeS5QJoNyx*!0!a0(79EeQvBO^$Qs`8$d+tZ4fH_di2^qNs&pmFxfZq}vnNcRKHL-nUK;o}37sf)70pwT&us|1;0? z)it$M71igetIl7@_G}kKXyB>;sB8!vVYVAXhC#qK%+L(Yv}QvH)V)G{1SmHMK2cDP zje-yDL7O-4I(!B?7iSLd+T7Q@Iu;4i7hxLNPEk!V&>+U`UtEDI+i<7peztoH#koKM zmw6wjI~)n;kR}|ss6P{w2N4H!i-&Ztnu1tlOX*%tcPt2`h*TI6Fx*zUg3dHl5=rx* z(t)-S=0w_3#AdSeB6zX{!9a$1kIAfcMYz z3<+&#{eCaJ2SloP8iO_kM~W<@E1rr_z7cT{))P8qf~Y%i`oNLzcE2lRWXu9X`F32l zSZ5(ztHddSHfmn^kC{11$^zv%WxnT};vEt6zvI(ScQE0JMTzWFc1Y`UZq{A#aY@(` z!UntN0pBsD^e;w7i%?zT#YozgiiR3vp^ZIB7`)cnS$tb{V~wG;LRVPmWio-&&3m8l z<)Yq)ER}lU848652u6D-=qkm&cr@>zDEla=>bSelg<~X@aZ9sE;zsk9WY0gHB||kH zr+}o9)3+XS-@p0!jgc}7v#~H)vKPQwF4Sr;7fKeNkU-bHT6cG@ZVOC2*Nq#$ST9RN zOl8SG>ea7hiD;*cW5w`Z%AS-ZqL#AMo_)qCOGG1OsU&uGCEkECs^hNLe82wY^UvM+ zY6iUYt~=im{)u2!eBjP^$h?K|?tEvKYUgy#HwaJwcantxSNs75e?UQ40Z%;7XslmS z_IlSGuuT?rpGs5`l|M+$173Jp*1O0*nsNIBiC7+G3X@UnCib1IcM<<)S#KIO$!o~G ztA@&Y7wNB-^*0V5vox%vxIDQ%agJ z`nYiPec{+JxReov$&-bsfkNC+p`wc8O(@cjkJR8FHu}((;gNzT)bs>slEP#s2(?E} zZca{KK_1RlE#4J~dEngC3i_!}h&coDK161}@T+^eyq$T3kL?CSduId#i6BS}Hq)YI%s#*Z`svNu> z0&6HPXw%7cI!6)=Xm^}khelmKOv_;^z`8~O_OCQTOS(e1iWuy`#8!`41+~WG zhj6?TGklOpCc`dhbg~h`n1Uj^9S2sBUcks$2_we7K5);76SwYH%&GyNsiVA8Q_TwE zDr0G}7$Ls}6_)>jm|bTXS4JOo&9alm^rRQA*mjCNu4@u2Q3rQyD?3ELU2g*%-R}uw ztq_Nz{+Ro^&y?Nk(@beQ0|Px$Kyv5aRz3j8;}j}>#;I@{6ZxFbl*u4aTBJ0uO zeqx@a=sXR;t-@v|Fyp~U4gMvZ(iibB41{$7`f#0WU@5G+(a6M~L|=k9z@C+VCO<0^ z9-+zlA9o6Uh72szS;&lxt)!~wNum!N2p2GTMa5=v;$D()R2NF%apT6Fy!FN#Pn@{q z{V%?N+;Qu+pAOLH=RG?&e~nXL8^77~eG!&1~!dnvFednavG5&!E{%gF$eCi#U#jRgIde`>tQ>Xqo zG(P^?Yj3~(=EaL}yyA*$Zd`gB4AVaF;QhDXJPYUhC(c^>yr7s%(MaQoOQ3~ai1-*qY{x1NU)WpH7iohan?ab`-oOo|?|nYsgc&(OxrahV z_Y6vX*i^b^XLpUeX*!ALVUG!Ac)?K*%BVmYr|EeUc1HMWgkGImV%PN}t?Jw}$rQWb z%>*S!Sipn>fx=!Uq`fcH0AGrQG8Om3&{mp5CPqq_6Gi+B79_z(r_<9H%tULvVO|>! ztMVYE%rCI#=ly{7LJCuonUKte?BBoda9T!A0nDcqWS>eqb^xx9cJI#3h1JlLhttwe z=jI+dbodxd>BIWot31&YyZh`@-Y4kHr^<0erkIKEBbN zKdJ4U9Ld-XOv9EiiA_U0rvcM^eK4ugXck~jlfJMQ@qyjb9Qc`l_|>k_G{YlMs?8f# zAv`q!?NU`+BRn>>fs^>vJZGvi6Qac1l;z6J%2mpAWw;V)o3BLTdpe$P#{0XK$%^Dm zbN*Ouc@VJSy zX3c`{r2}bc4pCAZttaIOw5F)I=-93wFkw;C?4(EV!>$9Ti;9Xt4U>c0+S}?bV0I_x z8gxp^>}#_4cz~kF5#AOrzqCT%@na?>499^5e1<1X8Z$1UH7m^zb5?S&^rsIXVq z7xoK#_`L$K{nLv4dV{?`hke`khX8xOKpbPFeygvl?hbq6dV~V+qk#A1%On_hZ_2V~ zWx)7+dIpL&z(x>AFXkm#axlkd3CaMFsR8UWnSNQgXV28Bn*p6xTTtoEIEcz7Ca}#bT-7!WWm)fqguGXF>#b$7H`TqovK;HU zqH?uJY5+NG6<}dktH|Eeir~;-EbIJ&EfzGz+#|;Sm+f(n+Z(#mOqFE1X(m=gKKA0` zOn96kva)mM@#128suh;Tn(G_es-*S+ji*VYv02%*^CA5h!P?mYP`lOaBlb1>i9u7y zHjhD12=`y(-D>tOD-?zb3GA8`R`wNfW>@8Lfw{xm6o@1jqv=6xyrpzb-GiTkC+k4lPRE> z!6#c`C0lghWDsa2DCWet;ZjSZq8SNNni65p$<3{3k;J?_Y88F*@&iO@8PVOBKwS#2hv*H944 z6qAeWX7S8iWLK9$uE{JGjSgpLBjG_%rN%myGe|UYFlgk6t~8R!RVh%i5ag~8LAF(*utlPpCI#>shoQ*LQgN1#J@@z6Yl%OmC;a|WG z5p5T<7}eOS@1&AMma}LafGlU`QK3YZVJ5GGN}6;Av;hY^4RAi~#i?ZX7#&oy&lV8@ zE-&hue69Jn#|n(T4bgcnfq!HSr}Z5 zKKdj2XcF)S8!{FFGxzwwMO<#?i6cio`wD(t_WX$Z!(gXCAv+9fWZdo9zUjn~eW|HM z<$L$0Zr$|B-#_1iA7q%kYiyk7{M30+iB_IaUQq5+{s8^b@yZC(;i%*_FTM8W-~PH9 z^zB)?tf+ai=kn#tAG!OFH!Z#6zaBp|d^S7C>FFD6t#J3BW`Rr9AdC~PNU_br>z^1j z^)z??;elmvZ}b5>!-|E#EeR|||EERcCf;@DLr;^_BV1N&kVpoUr2LsK-Scm}e&Eo0 zp!A=G%gU3KP^iQ%)vjCp(mQXgOj4GC4o{Qi5J9QRDQbj)FmZ4cXq$&uQ!9f-FCC0; zCc__&!>KjFjfyDdXW^`|!DC=dxVI>z*uwVh`*3?*sDIqp2uZ3eg{3loOZ1>nZy4P{ z>u^{!n6{gFd9~-7G*Wnwv%TneW>qPRTXgdbm6t$$a25LCdh`KV*rWA*#1YuzKX!2U zuI!9shYnTO!eQG_yT18iJ2*T?j=1_@_mi4TseAX97p11|J91*v_C4qq^nuqQ zoNSyqY0U7!aib>8c&{Omy~5UNmfwE;C6^~*l4=%0G=~lyLhWp`ge!z`tbuJ3{&e?j ztlHn;GuuJW;in$|uRE6B^vAm&LFcUS3VZFf*WO#d<-6jtgCD$pr#1-+j{vRi^N%(5-|)gp9SsWX#nRk?aDXad1>{!BxNZ;9V(Px2GHwL)*m|X`Wu+zc&Dt0 zfXT!1igUG1_TtJ57k>E}7DgJHJ$$9})ldrt&}~72=p7z2(8GX@c2++8b~M+SV1>Zp zSP_v5r)C>JdFS14)^FLfJ1b+?t`A>)ZT%;|=ptHMQ_-%F88dEP6vs-dGLILvJA=X{ zP1CvBygb_YyXGku%on?0P9B~kIB<__|KginKOH=Jw6^-tp<@}@yLKNuhQ0vIlLL@k zF?z#ipTLnoGI@tbg?O%%0L(*4LI`Vysr$<>x&HR$#5;e*yWzbV6Gp`i9zJH$O!Nmw z_0_9azxd|+AAj)ntLuiJnhh|{gue4V$`itWubQP?p?E;(v?)ah3*#Wwz54d%A2xja zIROkH_txI|`UeNgioe^k{yp>up*Y)%upHPfwljpM-)##<;($TX zUcN?WdvS#*hL0W^5W+Gxe6-uWmvYe>vVsV21}dJDGtk3uxc1|o!^e+*_1TdlxUalW zz$p-5E(dsqM1^~sU~>W(0EZhj-bOOfxPmphNQlX{`mcX`^R<`OBu5>dr;GrhxeNs7 zJ`kQKlxXKc=cnYijH9}96SPG=lq-Z)|2-Q&-UG_!8out;w?Fv!{Wo7k=d8e9 zfm0Cu(+}Nw*Tiv){sihGYza()3c(rnf#%U=5B%ZoSzsWpB5DK~1IW@|T&vj{?VsR%bYE#FyB38$os04A{EH1TR3ApoK`(aYUd&Os7ac3{IbBxb%f%DyhfLao!zloz9fx}Nbkbx=vcX@k6>x>tzB02oh*=ec|5I`x^2Vxf}Srddn`IT zSR;IU9?+FF(z(n}4(hfN?c|t<^7x^aA@N;z?R@=@btp`4HP?4rqjqpo;<326q=E(c zx%TtKOVOBJ>r+2?pRc|&VVEyG&sEc&!P!uK9=@7}C0ttVq6`1Q%+>$FOmzeL{}^VD z#EdlsG;JG-u>nVZ*Ccf&2)koUL^x*jL-ONY2#dr+{m5cQwoSry~ zZBR~w3ABMtl>OAbO%FCu$9_PtGU8!%VF?Kn56)7Sz_rHlBEaAA@ z{CUDeVIa%GIVmf9n|%tEk0Vg_n$I%vy&cbQWeyy>PnU`wvSFRSn^3rnDH<7E@A2J0MUu(A-#sRxv&l}CgZ zUP@AKQi8xrNCGP%NV!RP>4mwfjFs5}#2n1I`jGIa9nU2gcBY#iCvfysENQPNS z!@@m=OolvujH?Cju+) z&@95z1AX>7TWP#`tdC(}?AR%+4c0POuZD0m+%``MSB607wE%tcICia}&Qs1^nlI4T zU(9pvavp=YcRX~h9>>oM5N(KT8@PCJdHKT+KYaW3H;jsn9zL4g7_TfIGI;o?k%JYb zzML(#K{*%lXmrjsw#C9V!Ys&*53-|BJI-V0p!jXZJ|UT1gRzz^m?7+RLNliTzaB*_ zMVKW=5pl@RhrLvL=EGrJzIaiBpWuwgMVIMO<9`jkOK{AvoO%ot+vfAH?BCJ!GO=pPot z7OJ@jwqQXBA2Td^uqJ(PM6eJNvG?#=U@#G|(<1Dt0d9Nmy&7{P8FIwXGds3A6OJ+{&1^|t6CqejOM8G0idu|yaXKWOmi z#W&mmJbCzz8y1fqJSct)2IJ3sSy ziEIoTJv1s-O4}12CWMdbLzXUXfX8At%EokVRx=xQ!4qJW7Hp>cMqLoupY6FnRpsk_$awt% zlw;p~^UX0Opsp@p=#-f=XHFR!(5KAZ3W=gBc3WXjXJU+hY&uneL`W`E)>dlodSG+D3l_5zUfp2_!>!Ywjy_A-|B!i0N-u) zl;{)g*nm6Fx^QPH;Z82J=uYk1zMTe0ZeCFt#&KB@6i4#%bI3nx9wa;XY~TJ1;^pQ7 zcdW3suCh65+6YrL8c|V_{eeE@Vg&x}+hOEUWr%222<=zShsskN_5gdaAX@>>^?Dr2 zJi&7Pjuy1R@Mv4X(UT{S?s{+S+O_ZPIy#T#;i7MLoc9T8s_~DnlbM474 zr15#<=e&j(ELA$#8W2dXOc_-KtT@}I(-st9ZB>|i26p?-f4<`0bk#OknE41Z9|_j0(hs)(%aAbs(C|o=WDBqn*IEIeT~WCP-KMV z@bFNwz8Mkf=hs|RReR2oaFw^8ACSiC+f9f`h~jAFxjR~&s!eNEs@Wl3(*kE;85vin z4kU5iU{(%FJkZs|tqn^aRqpkl*#8?~BKFdk=)_)o^7q93ci@N~_}5jdPt_iVX{6yC z_gEFapz!X2FPMJ!QC~KwYwSML`l-=K0U2o<)U@Z+mmdE_TG!gX&vN6`BD&YvSAE&2 zmg|f5`eL*Np^tnq@_a#~_4YG4_=9;&Em`o_=w-eCkLA8Q%3*s_J1p(f6GLy5^%)OD zXWko)H^O)Ga1c(Iw>d$1MAqBOit&dtMcQ^rFSWJEdP@*o2>C#{=O{6ZQH2#Jo0kXJTkVadCYzgd8AMtg~e(f zzW%?GN3NYFz?GUGYk!38)s@oi!M7ui=kcb{AgHGkq$p_MZr*nT6jt@C`r}E0soMchgpp!+wB}s`=GT2%e62mfU zS%MP8mdPUEl)&1>+0S!mC9~uNXelNGDT)iL3eTX?$)dx<0hI9f4f8dk@#?c$dn%cH z#t|lVmYkP!s*E}VF&!Q#Ib&O*5(N{o)G^7z>eUIX0_Dzrn96-#?WJVl>8D|r3dI%{ zSDm5$LbY`bDAw0M%t-yFKJ)&kbD;hxmHbA~IZ$Y|GCkm4SeQ=hR-w_9Lg1e;Awkht z0l?%$Wi&gQqCr;_7bgfYR_c%BM0QjeouWstUzWpDK^?B5~>v#T=DI^j#ABJ2)PHSBu3v?M0=h zOQ~Sd#2V33i(jBVkV@vyyrl%+VUZdZ6RE-%R?7`~rkA+(x*i6GFVvo|v0EXJ#PW*y z(~At)eb2aH9655NG4iMHMnDNG**tYxkWvh962(f;v@>T`Fxd-!R!kb!2H9VmePBe1 zoNV^+^o1>M*ry8e_4M#qfpd?|&2SNN!OGR*lXY6P)MU{qDEt++#prVlYN$aUXy}9s z3lQw7HvVskgi{V+B>DC8M++=y0W|ONFL;@mKk+bJf07v+`I6F-VtY<@R(4LR)zbkV zks7T|iwd^FfVn@cYXf#xE3*n#MmHKM0Pm#ob-|BIk0V45Jgu4!a!}K%`7pD^;^|@X z@U-{`1P1s=_V~4zR%kD31=&?Ip%o^yg6#6arlAIF9q?i7rCF&~(E-(LK`RQ&Rtu|_ z@w?=wg{PAxdsZoa)xiV3#S|FH?>JN@BER8GgMSHpY7zg?j_3O?HORB)7J-uipcKKn zGCtXuc~?R;+Z&6#)^>?DVp@&V-s+Io_&8a+G;Z0lWlxTi+K)g;`}`4^N2R+izdX;Q zUb^G;ls4<2%0o*zrVm8`d9944`V$ppZ7GGH7Fn8Y%{ z&lj!{xEeQ|&rts~zPx5G2$gX7K)|lZ9;gO~464MKbbQjJ9W++ z%P3VBV5ndc_U=tou2rt(2`>VdS|=BvvN@PgCqagHL>&hZ`i%K@1&ap>qxZ-772-vG-apOq=#U0}G-sH1=ra*hy*l7-evo+E|Zs zu&V^$nC^6k)>lM#0)b6*(gSNk&=YVpwEO@-Y6|G6BxrH`N7OgMM-2SYRIXz1OMs8X zb>S8h&nvD3P~@{%BGo+%t-wh4IuvzsufU9im*w_MVh^R@L_3Z_QthBoq|_@5M5<;A zjFN)*FP0@LCP}#s1a!A(K_MyJhynXX=DB1dAD&Ay3p%}$3}@gyRUELDvQwDO6d_qp{Diqch<`Uiu!x`cYXb>_0PMXQF~c@ zi9M7ezQ7(y!QK5S;)}S`abSI5Nno;$8s+=3FK)f~+xINbb-uEqm9qE%docyU3l!k) ze)dX=TIXzg39oZ6-@MMy3Q2+c->yLv0mkBCAy=2Zegj}~k2L&I9b@wWzoo#C2Y?~- z0l)c7h#ACu_^gDpVDwEN{?+Y%5Dpa<7J#dh2gVNPpwThI?)RxyBX;7HSb+~Jqo61P z46)(MX6n3W?5~00^Kd$5UN|e>`u5vzZ!I1*>cols*r;6DBPupOmp=`{lN2>5KUaRy z`^~qu6tgg%B#aerc}w=d`=~J|PK=3-%FdTPV)L{46Vl`ZO{hE_)&_JUD>+zUbveh6 zAIqs@p3$R6kB;`NuJ-dY*VUM1kD5BOnV#S>*ISklc%!h9qoU2ME*CBWa_g8mdX(&e z_jQO>U5Ds+GMimbh&~7`MS=&142K4toy1L-djJK@9{@xFU~EPX4-O5Rsq>xwRROY507-2ynd?^oPz} zEdpE@f&L5w)aWmtn8{#a74hfd;*!d9RaivWi|tvbGqcWQk@Qy9(2x+50|liW&T|9? z5tXCGA;EW*L8I3RHc@f5Vc#p*BmrkFlO^Ih)1&XT_x<_drE{b`>)E2oExSH@CFSKW zWOr_QcWxlFhV{E!$1k|=>HDuwV4oPSfAFb$7ENP&*Y}pE@Y!FW9N_utw+E$}i`i#C zI3{0f-&2 z$3-iD+JVJ^dPIVHxOTXD9m83Cl7?W9Tx!SqU)!pP&~IUDlEA@UCQJ}7Wm}V+KP#)D zIQ=|JNpLoaXYRTyLAXr3>#j3+lA#Xy&2ei18<+qhJ$8}&yk^`3J{cL%0+uo|d>)W> z;AUjmOOdlyfE`n&vv_)$EZ9M~p4C1Kh*bCev@Z>ag#W}y0os#PHQ27VXC6yS12g_> zn`B^+FOvzP*~+!f&z;%IG3VzAY(4f=>OVUPak?-b=FS2Hy=vp0?A$NJd2V|-oa2G% ze8MI*GpGz;Rl~3riHe8-uXs%7OAhxyz-l63H3_hC+2Jwage}d2blTcTRmx32lb!js z5mf@dw`mN_1A8foK~t<~o#(D0j{e*M4S==u?5-!irA41DS_FgI)^5odtO&Upzz(r5 zgg;DA#B$K1h(#c4qY0+47(9ajrM(UA z4ts#^k~AAU1h$C~L?xRA%p-UMDO6jVhRsT1R14}EsBHjG zG0*UN(*vHU?{LfUlVVYw={4Pf2|y`Dab|H9{B9S=K)YrLxKfZ_TKalMdNAGQ%sngnFk^_hJgW=2m z2!hBF6Mp{r4);Pjcw%62$SktAPjwqUPIH!-_-oe?k2sjzG?SS{+0SdA{3e)6kbrj z#TQb)OHdvb{8MI%g6Dv^ORrwEVZ-)}2G)?_HV6brsOA#DY2Fswd+I%sq5N3vM~9^nmy zY8p)&n~GVOHitg`{+Nm}7hAk=;bS&?diwkO;gmW=YyQ~jL&WB5_VmKfW}36TJi&PS z$IDg69PK)Z@7ay-pMy5piQB5h3?%9dvVH?|wk>o*dq=YfZ$K?SlJz&U_$AJGC{{qU z4QmPMq=YqJ))xazcH*{bG08X`FmvON23Tm@>wd#2JDs14M9Fm4*nNJ^gPfLilrC$h zP8>gR>U0?f!=28KDzJxb3=63p;{EsDpU8Fy*fuzwH{aZ)IuApF`f%4uc|bx=wNfi4 zYm3S%%8CkP586t0)K1psp=0xMTzlbOA%i(o6$-eUqF>nNWE(hSw%|b9mIUPvS?^pj zlHEL#Aj4L}*GxPqC2isCZ_Mrl88=wL09Pdl?l&AVcLOr_0W$XiGWUWG-ivh7YrR-4Z$kBt%&F9mgn=oR;sNqrdJ`*QSyd>5yd)u~cS(M0{%DlZThNjB1=g*(J zpoxI5=#fLBLIgNBZV?4v-vI+6ybm2}ix@L}*w9!XX9b+WjR}XoPkt6UJQuoFWX+^V z2^=%!%(24-N(A|OJ$?H0$i^i zGlOdYN%5<%zi^?V>5@x~wK?{Z(uzhy*d<(-AD}ZB;TGHIWe6k_Ax4u@4KJ)MqORQp zhpExg(Xkj>_#rUZOu}w62OivLH@37iH-p#O)C?4?%(CZHf+bj6&)~Zk>+9Cm3(X81 z)5hkO`c`>7jarMPglw1;7FL%-Go`Nn{Mq`mO_Gr{R1^`Lv!t{jtE>V)l%K3Rot>Rs z%p%cC^>AWZWhaXnMP=mLv_)z&Fq|B6w&tO9_!}-QZEOI!&`a(0=So$H9jzk6F2t-N8)7w);q_+tU zstKkEnntX;wlS4kVdX^&eZXZXkJ$w0R+}JRy2`3nAhEMDDLURD1`yuGuGh0MNd`SRs=&JK~KQ8}4Ixrd=;e>Iowd-D5PhnYM9`?8Rl$I3cXXWIk>RO!e>B=0KSHK$=+8kgn z2Kjnf{0Z>_1AM(rp61AlBdO3C2m{_l0-F-Z>-uOq zb^EQto*0?e#ntlsto&3o+vI9?OFJBdc4@P>$vd*g&uW`P(B?5{^E9+M1Z@ssuzIEe zCP)zWP#Zk3U(CV22>gYtRGWm&j>9JDof2bfy~=jJy#`&s3tc}4U7rGlqvP@#uVu^b zm>tZ@ckyoDS-ygrTc0RQ3Tn?8$1AWkUn(AT^kwrpA0>oth=^wY`7?CFT)qKK!9 zl4Z5HqO7cGdPhg;rhc^gcif8v& zztX1c{L+F_6oR6PvUnlX6toB=nd#6b{DUP8q_v6BRf7eK0AYm z%rZGef3JB>$r|&$C9~NYWe>da>|qp(nt~QVF2f+nQrlEgkd>bW5CV|7rMx(53R(mK z4CueBO#wb8Z<9}ecc58s4&-g>9*ehWkgK0YqMrt#p9Z0yU^}R=Fsr;gtJI!T2qsP; z4s?>hNyl5nJIQC(E(nVWMF}mBE_z zx?~-EdJP8lB`EQTO}#}ev^YThFGfT7V#d%A11MRu=9hGM|Ta(P~Y{1Pk-3Fwy!KAa7^CJ}SV*D%|)Myd`8sa~o6 zq=5YfY={J_&Zg!?yL^)QleR(kT|VDs7_;r!xdfz~oksBMA@?EyjPV#d*1k^s3tP9& zcm>a|tK^&>RVF7R9T5Qq{ov24SbA<^)3abzv z5gwzHXy^=t;Ez?2ltNpHEY(bTZ09Q&2%N(H1nsQW=`F=Z>rqc-$^iAu#z=cXR#D~r@#mV zM%bcaxVERfI-?s(Htw_wyQyhq(gXez?v8EA){>ekAj zi2$anFly9F$yKLKjOs)6g6Yf`UtF0;RRBa&Y$hQ0jyZF7yocJz`fM($q`GLK#(=E! z9+*b4K0;kwZ+KmZ>I?v8g#(zqQ5}2s>D;XR%-pQ4KyR&}7noDSxO>Ap#b4g2MxO}3 zya6wGMs2$G*hGOHpTvZdm%z82E@WZ#@{omhOE0n)Ye0N7)ok=;X0(@J4^U8ERC*A! zrd4TCnAiq3VL*sqpii)0;IJ-le^dRk?e6+n4e%-;!Y(Uh9-DwHJ?b~0x_Vo5ZOwK} zJC~g7Qa{431;=TGti@p%P)K+EtnmJ-fvgct28DFjughC2+066+{EX;F56}f;BxK<_ zD3NF&VZqcbaUAerEb^h5ac-pGW8wK= z6(tetk5-yCFXZWjsT*WWD|qB)Gow53h;c(ZF{i~lgvJqmqksDH4w!P|BhNf?nHmmfc$iS)s!UZ{_^W!B(r406MvW%$g1}KeXYx?f?@67_g1pNX@7VEXHQ)`hzLN3xZk4z zod`fDg6u8A03n}*&_-&!m}25k(WsIX(FVD!p&e2%R8nbYGJ)Nx&=Ct~P}#(1o_Xfk ziDf|tQ8D?TY-QpK7&P{l<>W&lWfSn@GZV@}4x#w9NapQxK8H93Ip=+1v7w(4+Yuj6 zv3}KA#7D@g_KTe^rb9zf)`6%7Ww-a6>>r3I+Tz1b3uM)_V#+il?KXjRG?TtqA%~VWcQt6ckc0 zk%Bk`eW#c}QQ9dmP~b}erR_U~mZCT*;CB|fE9y4|kD{S?0tJy2)c2c$HdH1-$Hlc6!c$4EJg9Bpn`%h3Q7?4-SSBkWgrDj6kMR-FoM2Q@TT&DexS(F z6l8ZvLD<$EOORp(A~jGN^ET ziUAb4i~^1_$NNpeqwo|Nl%kn}{zo8rzte3`mPlR&M=C+fdc;M z*>4JtdJc+A;|@-15Geho;3V(@#Y?9kih}%pQ^Zr0p%k2^po2p6-42oBas2z4?g-I2 z#qK)=uNCj}{S=uafBz$j_xWUs%*)WzUH{{p!-{tbwO1YNcHH${D_#cg!QGT%I0Zzp zewH#azmnkBSfIj%PsF*>@R@6fcT`zf-`85jDT> z6dY~&Sk0pp{IS-RLW6Y-X{UD4Po)KPB(^I7SX|ez5x6gefoF$0L+K(rYyVp3vP0v)uDqpQ$L`5TvT(sVEQF)pw_I&xU%eNwrw1VdsyZIlT^* zS5UEb>|51!z*$#&h)r@SFj@b_aH(LfspTCTen`YAmZ`dCCt7p?XE{b=PI z+GD-%H)X#=*K^J9r*j-0-*ep`7@)44v5@Yz_LtUQRd2Xx10Z=cT)DB^E+D1nEMszyG zi#mfzzl<31%Nv0l^3U-77CYOq^*Jj`Lr=6xYN;)s<5_m5WMT5En?R$eJQQF_JOY3tS zfvkhMAYi0cV2EH!Mp*S5#t~xDI4Zbk9MRaLm8-7X{syE2E7t_&Pt@6*j?lF0raZql z{>A&JXv&vQ2I+LiS7m2t_Yh`@4 z-R-JVpZzfyu-C%mhmJ*~CooPBR`%?V?k(YK$?4SWl@#zt3f$1W7+MC3XQF`C#OlsN z({BpCjHE>fE2e-iQTm^mc^B}RnPv>NjOKpJ;MqGVGEc_afr+4>c7W_#8C6_ev~s*U z)^7^l3A~^tN`_8SGw2&uJbNCbQDnX*AsW_qiWrK=0oFoy|AO7-#n^~NA)(h%Du5Hv z`u=eQ!`#Z|P>@N%2nyUYpL@3Kj#Yf?M;!(?kq9{1F2P;j?dHoI-tI$`g0Fx2Uxo5& zoTJDWDBwlb^;-t-Kfdbu{ku&JBiB%OuVH#`IbiE*G+*^xLXQN$zFW=_FNES1QNSPj zhwguNoB#jpHh-Z$=cI_1%6(%ACz>45Gdi#&x8D?;l1!q=G?%F(>U_T`I1+OFqnSqK z#PvU-c=nvC?WYu;6wnChyB(aCOs050Q^0o~{g3l86ps&GzT2cG{K`I?m+?>UHd|0o z-|gT{<#g>;st|_(rr&;2aKK6w`CSV5V?e(te*bQhH=5UlLzfsze3tIJ9eg(DG?DlD zNJ>G(x9=4HW4q00s>_EI#8IH@w^kgSG=!L)0{%$Dh1#LPs_y0`BZQxf?;Cj|x%>&+dYr|BoUC1uwX}J3H4rc8ydETZ*dIekB=ap~ zzd+W~F0wBGOH+34oYFgNwX|RC3$A?YxE8WRZ!W58Z$qs}z9MxyxrAw zS6lu^)A7;P=k5e#`9HfwH(~0;%Zu0Nh{G4?z3(nB-jPv!l?#HUW)IreXK!G#Vtwuv z^}zj})`6^|4-8|#(cXF0##h&}X*c3J!OqA<;ckj%#OQUw zNtEEU8n5*5^4vWo`AF`-`ZB;;&x&N`i~HVE5-XEx^)tmKO?rGqswrF8HhKzTxAdE~RR!rOmQ1lnK?xdP zTK1#*HR4y&ZlP#paB+&SiR?i&5r2aAUB7AD;e`f?&a;o`YZQBqz9iU=;$H;5zS~>J zKA~tW@NJ5(scboYnc;mMU+pXuUwx--WiL^*GWI%sO=VBemsa%%DX`1?O`FGFq1d_5 z8^hOV_7r^$V_x(Xf|GikCXKoP3oM`ZF_qnb@WH>aX74P_#NI z_uy-U+D~Q>UOZ`8k}K_4jKi@Q4`YFqV=)Jf1%3qP=48X%U1nx3%$#sju6Z~~aQciK z7#C!c)s>M<4=DGZwV@I!Lojy)x?OIT0x^#VR<WR$4f$KH~~wsSl$;m;|uoMebWhA)I=&O)m&if_yFEPe(mW~u!h#CBUSo;pZsH&|0`=)0m z$xJ4_GLS$*=!hU7m_kudRK&V#!?vz%MP0jQW`bZZpRNlQiinDZj`ZGpOK+3jd!5W= z-v4*rok_t&_WSlf5Ar6HIrp7=&bjBFcJGx1@7j9+`w1V~xq0*6y}Ph*<#KlR`7>DE zf{uhVH8s@MG}P5Ki3&wsbwfjOR{FVf>3PL$*uLf&Pd{|`a9IHk#m#P0hJ={#8W~l+ zR~EBi!Gai>4ZC*QfVuK~y`uK`*Gbsw6_@W9JMwVaM`d1K_PJBGv&)mjH`ofcLi5}$ zBODILh+CdZU@NSN>>RTw=H7JEO*m8Y6BHU}OV<|miRPhOZ+(eguP3sV?40V!o3Nnd zrYC3Mbj}UZ^==B$T6ww5KjAiP-GuTVX3^iDI20Wp|JY-aj~+Q<_|Rd)hhw+hb~_57?d>fUTvD{)tLHp=4fv`t0X>vK zr3kwqEfdU(6UDp55h#EB7AgVSJDdhF?tyHidldioElh#TUkBNm4cYS8qM>{C?js30 zcyQ02{rh)q-MV$h?jzWz^U!{v^{P3f0~-T4I-RKD>H;0B8nIP?(~hlD?bfi>tIi5! z^~oKMimW}KfByNwQtv6rx5lQk%jS^WvBvzbI@uTNUokQ%Uxk;k zt9`KbT-f+))R&K5z$+`O))A7Bkl`<{3?Qa+Q*G!AAR}Z$Nurc3vVZ}Z|G9L zaO()v&W(Q{3AJj=**+7b_do^)z!6vZe31$wI1uIL}ytw?F8ZK>g)i5 z-afuqPJx%Vx1h#RItm{mmcLO?bi&u$+a$4YLQChBS2Z;?VQ@8ea5_;YGZ}QE^&~Nz zPJY-MkH5N=n10XxLlCtiySHxLzkkocgL^?KgGQs0%T(BjA}}mWfdi9)$?EjjTa%}F zmmY+S?a8WeI7sO9!r4_I6?>e{MZ$*n>IcSyJ|k|$5+R9TUT?qf!ap8+_~l2(8bPZE z{IWqH?Vtn@D0xtQem?qxJ(kBj09w(JR!;{lJDT1O3Kbvw8PwWdU43cQyK7dxzr<>2 zZK|i!soJ`_nwnZ08d|CiSZvni>cnOd)n#Q)Xb+&M)6j_sa&Ijtq%#EQX!|J~_UxsV zh}F?pT~h%HRh46rY_1by$=TV~j>QpN=id!#EeEwe0JUa-T2GmT!KN#z|!>fV;F6!B$gq44YY>=kT``zD0$kK~PIcTe7vwKb?<_<*ye* zUDb4n0peKcRIvGUs+x2?j!ox>x;jD!W>5lj+JG$5_OR$b;$kB9)L$=u1sks2$bMvd zSUxImaj`mCd1?GYLa-QFr_*04 zB`VXEQgY?*j9p`j48vl|kimoE;^It0hnq~{^ooeW$#MgU(@68Uj)BB!z(2U<%(LUA z$HO2$0h;1fvUtoGOIU4(d!m`Wj$5@E$q+NdmyPbRb@Z4$u(qw+*73|lC$*bVIst3m4;EAge$n7_Q4jmvjB>mbxv>eJpqb71(>7aswRyOwO?~e+NA#ReaV;Y?Ndr7m~Ja}ib%oAZs|DrkrINizSZ1$=p+}FkMaRqQ1KSd@fKn|EqL=5 z>i>flWV{9H6r2d&A>crr!HqiN#sS(?vO+ZAAvR9Lf5{3F&_ECqbjt)xaDE^$R)`4t zdLfqyP2R!`jz7U_OnfC1!MZINA+P`h00oA!BFSRe&mzo=1!acNN}pC?H9YrVJkO@A zeD7xi9?u?Agl7xeBIwD0#gnU?o+o=K*IRBm?dv7lkK&<(-&iztw}tz1Wvs^2>jB|~ zy%PLf4*v<(VFBL74$@7<;yv&KVeZ;Xn1?hF<{2`=TsTOWd&PS&_ei%6eeRKNLnH>9 zTe>@LV0mUqy5+0BVXPei>7c>q8Z$b zXyOVf`~xi`IXs~Q2toxnLIXEK;R*@5l8XQbEpEsoT6`BmwD7SLEwp!dXfYt>LE9wE zZ(z;ztd;!c#iK+Ta>e_OpFd#kXOrNEb1rV;0i`-71(a+QP`V1dcDn>G@-V|`?zy+| z=!uKya20rg!%yV!8#%m-!?*Ro4 z9-RYV^gBM2`n`8F`VDvQq?zig1&y&+Y`Qfa_mOBe*USsBC`slRaaXG=9(61;jy&-Jt0hrL$)t z7Koz0)D+=gf3>1wIWDGpI zrB@kw07qXYOY&3vG*Lm7FUe1ohYACZy!IcYC*Q`Tz~l}dnAGsVWG4kCdwAgSU3*gz z;CS+FLy3wz2Gf%@S@h)2P|uS+H0-PYRQ?#;Zv9s}5@80e|2A%BL3m60Pg{fG%k|&L ze1vEJ1N|4lwLmMH1KrM|Dcnl97AQj#R~yB_9KM0WpWyJ796skifcNL{)EzvSx`G>Z z12^izRcHxt&~juh(QICmp_|BOPaPZP>G6P153iJ@ZzvCK z@8Nf!qo?2fOP`h&m%2wVc7Z#}N3eYF zLqLF!VfiK?!N;(p42b-34126VFOMd)9hdqNECt&W#5YoSZ}O#ZJ2pK}p@92U^nil5 zF@y)~kMI^8JYXNu13!=)=#CRTOoxdcCnS2bY$tjcAN+HA=oFZ$-1IQf!zGO-KS2m^ zdbAJ{oE}C(q=z2;O9p+JBqNIVO@;?1m zb9f{UJaikV-*&0r(Ne!Zk@~Hb`mO5j_kfU_h+@%3qSz;yL^18Zh+?XdSEX1a$9WpZ zc?{>(?;PjmKjM6)6(RQWdjj2lk5Te_it4X?wr9+%J!9T&eVt(+K+Xj6+m4dJY56=} zMYq0^_Jzh))DQB>aLDI_Zp8g zGd+D2o}Nbd>$%NeL(b#X@aHXvyal7Dg<#%7$$!v-n}#;FiE#KYXc){}_&?JycoWeO zzOO_>e~E_0SEpeBpFxMgp#%PnpujssLlaNIog!nH({LaThVgm-GCjQ+(8R$o6>;#y z$tyYd2hYd9SkiREdCGGW`T1mXNa^r=N{91jdqdA&nR_JrogGdP$50ExAt~689;nfS zCDckMJ43esgoCcu%I}_5&U#vrd0J`btx#nDYOUZ%8jee+rB}ScTX!c?`YT` zyaFaQ0>7XWQ7xiq4SFMK;4?%~??ewpb%3X+Q(wFEk{l^yGbWxCxiXq7IWA@4%^xd>ydCzWWbAOxCHrH zNxN?YKZNufi5me%a09dabh0F@J>$a@U84M$${m@~tP}zm;z$A5S_mQ}yOqt5?mpyL z3#}7{_#XKFe3ZtO^z^~|>FHY}>o_8WWWh-;X20}OLcf!l@(odU1mcjm9VXFBk@=E^ zY++N}l%^gN&RTevy>Yg=agO6SCy2ArPYLTp3G3jiV%;ub9WG&ABw;;X!rIV{b^j7q zR1f2wOK{aV92~$Uc;{|57H>&{M;l<)bI0m#FX>aa1otm-9o!e_L{sQ~1^{q#*UR`x zzYQ#LgbKb*Xy8UD;6{(fjUN5yQNenN0^_QPZ@b5m)EFe+-fOIfZ?v-#X*D(cbFyQ| z7Eciz&>nB1ZD==aGD*+MWPhidmQi_+5DlgBKHdzBcO(0jzJD!uYckj@`psJ^Ac1#- zOsxbvpcSS0-few05?nKTnQk2{hi-ZtXy(>~z2W=Va3Y6~W)gfR!CN@Ax(E8ovE~^q z?cbBMPiiEpmAp(;)8vq}+rvFn>z~gqKC}n(;7vZS*^%+zgsar-s5xvE|F)gOcJkl6 zt^(W1Vcj9877p9aVO8C*{mVM`kR=sO+Ilb{&SgEn2#s)Amr)0}tfR6c4sYGE-h1}Y zKypAPtpC&@Q^GBWG;U4+El42gt&YQt(8})`lT6ix| zFj^2RV(%Z1>Rkm(e;iQ2+KGj~*}w z&C9KI#h*zM{4FF2ng4_&K;P~I9QJLsuu*v9riG-#Fp93UEPeOG9!(S%iqnPJERp?7 zSRiacWM{u%N5m&eX!)bo{bOuz=t+9I?OFbGcWh6LgD7%nkAIHc6{U<4r-c|pwBQl8 zq0>P>$sc951eTH_(LkDMCFozREn>^^TyPu-)V|6X*WInb;HP2pA`=|Kc3E9kcXn%yMn?jN?5!`5@y zY!0jAuwGYzrEcI6)CJrKecT9j+z9Qyob%Wo9(~yOZ{pmDbfVtj2}C{L3ZkC(3=j1N zqI=`F5~LK5J3K{#ERrCndm#JoYYoS@mE#-E@%85T8oTlBAJ)ic%rG(=Qe>6Vw~@ku zp_3w8LjmYMzdfmV)7;U_QF_G3^At1s7NKQGBq2c-`eL27@Dg$^$WhJku zRPnq@e4mQS-cdU>M*bg|YT7persN3ybkJ%fVym~5@m7v2pbQ*__;ZF|f zhV4zo-ea}se~60VJBW%|uanHg_>#;7jPuBhhk||mjv{(2ryw>V+<~_-AZ8Xlo(JG! z2n9~R079diep*xio}Dm>lBy}fHW)$kAU8rNl-}ngMEB0*3*W| z(}tS2fkHA*8$^+Q{4qglf;oT$Xu?{615riqLsU`D_E1F$XlY#q^sZ+%wXpAL_LPhu zp_dUPk`eSLveQg(DKZ_0baL~niyKhRZrFi1sFv&}QL>-(lKo_t>?h$W_S1j%)c!|( zQ-2t{IL2Mw821nB;INK9y;XA9mTuUAdh3#UJ5lOwi_}};3Tx2=d8KvWI!BT|F~`l- zFNv!{0ddt%(|aHO^p1K_9f-uA+a-*p5h)qSTRF}+V$03lfu7vBjGjDFdU9g|`+X!Ywx13lZ2)AWK5Xk!Ew%1>nvm;o%|-9k@}ld2lzfAKuIe~rGo29)O3mtriY zb8da*Uc!{ltuKQQLJ8dZ3J62qyxaQfzdan2>uE2&r@j21_NMo=m(-^{k34w1WIoTe zNP-|Mp3iKmknaJao9)>+jte&Y7QI8VQn1N#9>>yVLg&? zr8QS0WxB^oQIShhRKzEUMt<064>tyabV+gY!ID2xME-~%J4ujT-P(%gpx!!z=!Eif z>f%Ih(w)Ti!1%3}+wm^;IDR84(KGv-`+R?Kdr%>659%?=gDR0csILAzsNS}OAoX?B z8j`%sw+UmH6q9xh_4Kv3_tazeU-qWb*Yz=IMm{e@JkU-|x+xSgGj^!^%EpU6n zRgL6-AA90TDoB+cnhh8Tq-RJ+;snco(yfC>xz)7Lq{o)g& z_CR0BJueflPLqIzaj8U$bn;~d4MoVopCm%v%Ac(SAO6gW5XmNXhf)L<>E>Sl6< z-pQ?Lw-w>)hV?SR`ZDo-W_nrE!0UV96-1tPBi_VV1<`F&=ipcPj6x)s*msufJ3jxj z=l73$UwguhyW5@+yK(Oywy!R>TdT~p_%bYd!qd) z>Dfsa!C_XCz&?^-B~Q)XZ997~?mI7DVNY1e=YVLs?TNB(oXN`dNM@g`=YLx_=gwgL zpKGup3Z-~qm^2T{c2Rbms!2bln+)5_xz7;4DShfVu82tRi8b z3SXNsQUjQhKxN0IY!KuS-A&;0pY!P6;UX!B3i?% zh}wA-QEX2Y(SY#(ZRO4&UInV=^?!xD3bc^>!>xa;0_`n7l5SFOk@|lUhpWK6nE?ZZ zR(QgNAe#y+!EpPdcIzWUUc3UZkt$$QWH!l&m9_HM4BCj$BZC#UBbQ+>71C?1FC;*tAo z9=Z2tCDgg*?#MmCUYS*-#hSn3Yr;CI7kCqoPDq)=7%mq=w}s#TvyD87Ud|5C)9lhZ zRS2_-e(RJR@!pn6-xazVo~&sgi`?P~yi4Uo8a4tA5G2`pp4nQ%x?{f--U5aFDZYh! z2L-rrub~JR?pg|Q^%MK`NCDv{<%>!<3KiV48O$x4FgD5k^#1#-;Ulkw`#^Q}LJECbEM=so*dn8y+XEnz-hjRI`kf5||K+T8;h ziC1z?8VMa=&`1#WqM>K-R6z&kNs36p*IkEDAnDKMEj73AYG8K&cL!g;Yv<(*8c9D8 z50v1+|F4<4jr)`HxCeZaYyVD8$=Gh~-#^`JIkr7o!nITFyN_Xn#kUaA+f1fi{&*T$_vYqRc6Y>^JsX z0!E&!wW|r%pzKoK1{8@&<1`wr3_~j;(aKCo4_OP4_kr6D3BVnm2ViQ^PP42B-M(5UOJ{Vh@t zuvHF~X<^`gV6_k)7|t)?BRmQF^jgqh3U>bHg_ju^5n@wI&ItEqZ1w9 z3i9>pD7T&1x_OE=R-{xRl@B!&s%f80q zxAk0b1)VOD;p`rW&hKWeUtF*s&cLcVzdj{dd_uZjRD6BpNV#b7(W>f;%ZjU;z2f}X z%P*(K;*7!*r+0sjb>7SU_LDg#Yt?zD zqh#-;PG@I(d%JP`v}xmw9eA}jXPhXpyRtWCF_kJc7HcFc9O1OiR_u}mMg{w+bY60m zx)Yy=jU4XV+S*!yC6G{%F_Wsw8oHd>*=5zOST?Q=4G(uTVhq=m;e;C>UuAR2Bw}dy zY)w`>`3s)8mg^#KNf)sMu8Pd0s z+vv4v<3_xf^oT`HtBEa|hI-)Q=Z8bv0uE<%;YirEYwNHFL2>5cwRlg%R4!=R8zix{ zC>qkv96y$bQ-wYdTrRAA{|$>zXIUi0rbxtTn~Rdz7sM)3m|)UxW(5SKC|1%hW@UV& z5=>7NUl3maC#?Z$R|n|S)g{B)QJG5Jh4)Yb7P;P_Qdu+xKM>dHl*z0CI9C-XI^y_M zP`3uhu;M+m1dbgfbz`t-LPkIVm6o=)y0r#*4;qY6kN2SEp0zaM$y>;!Dr0G^({D{= z>bZT2hcJ=ToK1R13tCdU9TumiJuJP-Ko!R~K+_x?7`Tpqi|gQGBOmX-D*!|9q< zNx=#~{e(Bruh?xEe{oucxH#NB`t?uS7(DpOHoUzDXoG0gThkLBL@L6A2{;SSN{Dbu z^~R)!QvWN8x6))xGX@SV#~nLL=o;J>Ui;Km(G{-}ahwl2}hh0oo z@yg%fFA3wvCb4}G^&-tf*$-vQh75saO{ZzXti+L$m9>?%?ajJ$#rFK|`7bBp*p&CN z!(TxGR#{)d8CwNyl_<8+eZF$#=k(uKtMzr`9u3?hfqUctxJ!rw`PNQ#18^tA=|7<^ zA#Y_8;>r6c@_s=9c?a^q{rE0mkK*#*aqR2=tQ;&=mYzQWWe8pJ}egr{en}) z=?Y-Ky$5~ZUX4CvryVbl(0AkS3;enMAv@QT2cT$}60&XvnyFA>chOJyi>YSJsra$i z%rzbYAZ)ft!RDIy9^eUd(hm_*qNH@g38#h$!wj5rrK@8-~&U z>C*0qVz9%tp1hza-wAa?w+STf)=~fnT?AXkijg8MeH!{Q!6y~St?A)}eQ4<;) z7c+F?4O0_YDqAeOktLW1Pyc;<{6nLnq6QlblarH^6QbZrfG zbE&Ano-Vbubxln+f73|*;&NeozMu~m+^{lWL%L4J20pomtSIvD^!Sc9zNv?_q$H6| z=K8Buj$Lo^tx(k4Kex(f<7G{O?JE3LrnSm3hi|~2no82Ga%_R%f=1eqC_V&bBgYA5 z*<2RM;_FS19*fT^lbRNCw_9aW!_qupl}W7%O(od4#_6OsVf9&M)F7wupS6gM6s##y zi}U8qvvMoaD#HdM_>@}2$-dn!;>>Axi!@%yFYLVwuo=}e7#>aZ;}!amO+Uiiqs0S3 z^IFdc+fH9e#SfZ{@Rmj#il*FiiUChC;32)G74KbODgk#OWEGbGpN2y3eqE{5D zNJ_w85|m`@YI8k<|ED5xc_^EyNMagW^Hn4?+w+-<~hQ@F8z|zzIU`JujN06E*g%GtwE0l;5wGg5vda)U9zR;72FMX&?o(mvzd&n&D zXhSc~{BL!i2g}~kDB{0_CHzca2^o-(%;rdjb?43KlscN_hJe2AQ|e&Jh#hvb99CzC znF(fW(t^zuQL*KhAkJ4>eQ{nfEWi0?6oxAFO{-QlS$n(?ZSV}XhJ7h!u^(`+G_y)N zzE;grG&tB1o0Djm2B&wK6=Ha6Yq(YVR1kc8JfAu>8Vnh-o1%c%P~bHJc$qM&hcSVv z>~{F5ZFDT9944Cgk+VmRoXN;Ib7aqs9eb=`sr-oFzy1ZMNfX%CrqeB10`7VT6bA$Z z8w|k#ip`r9RxNVcq#Nmggc#4+pxn)X=#VE zBqE~3EeT|g^_B#9ZP5SI!I$ME2SNgggo56p1^0N1FfmYppR*VR3JeAew+%?%)%NPN zWBD0b`_(jotyr;&`CND1mMw8{Yt{@Jxnd{eOE)|-b2$CSCYd-%xyN*o)jk!BWBTNw zj#>92p{*H`KP2BzhW!O$($Ky@EiHAy5gk}+sx#Kq80kOutrV?ZMMXhDTJ7}d>UrR) zY#2tE?xM+bk^bw)jni|!ku3J^sS;S5F(#$Xa-p|SXKnNYXY}$h8V0B^^p)(DF!VJH zrDmvV^l(Zh(`994T};ohQLp#w-dX(4M@yG}L_KG^ugk{&{^i8r>$b$L$<>KxMVZ4P z%N(9LoO#_gKJe*VLfX;YM3`6>oou8`<>#lO{xTh$GNvu9Lyuc*I-GJD92FH@W2`YU zohc+VaI6qr6r`PQ(#=VjGb=7`7EPF}Mq(F@Rpi*$?)P+2LwuC_=TmdyY`ce_#7p9#ho4p- z?V93v4?l^K#6S---7@=s8UYs!nWg#g%AP8>Z9lwiw`=cA2(!t_=x$c*7nOkUjC8GqsQ`{3CW6qWKEMSU#{YyDPZ*y z{S3L-LMgv6ANwffX3%WC->_x#=1rS6uV23b*N#0q_Q3LOtZ8Lpedgv3ILf302Y$9W zeMgKh%d^d&fA(xr(%!wu@C*tGi5@$3>eTV!;Yw^cn1A8ev16C8Rbhy7%N8a5x3WRr z9Sv=rjTPD1pM7?r!midRltF{SAAKzD=bwiUPfNSw4(p(kC(}ym>+37do;4y=FlrEv zavd>YLdQw{9e3#IztvDvT~kw4UWEu^O=GUjR&vzf?c?L?>*MEVz;(Xud>ivOqL&yz zlAfTG(W#?GZ=JWdR;|{0X?=dpvxzU;As z_7k5w&8exl9i{i(daA#fmX>PRxZNNnzBGIzjdV{kh-?a)(fWtIhGz&|u+(6BX!ugJ z%X}#sj{5Bd>+#S%@thP{2;Z5t6vFj4d{J78xS5)%=Q*bsY3WAqXZOwfMDjJ~AyVE> zI#UAM&RnDhqOYWy)4n@Wch;;=Rf!lcNqk#b%G!-jLBmhj?~(YzF+hNo)>qqP6e zxD?J;D73-I!;>b`3I!_akbW~!qVi8T9LX~tE+*x6M)Qn^w91uwKm;5ZXMb36c#?)o z#}r;U*TQSyL%VC>`>s6a4&R&gG^A20p`F9rCA5fAQfG!;WpCDSL}BnX8SHD^Mshy2@S>N&RP-Xz zYlw-lA<(`trzJ7no$M8>%@t}M~v18W(FWQhWEDrzTe1n1%op7QW z#*L$RB8=JRr%g|q`v?Nejg5~y`N|@zZ(d#{ZHnlVmlqow>s@U1Z3DO=0HNCn6T(g6Bwo_WYBl%lk$kgY9HJZ?V>6BkmsnQjk(yb5nqPfSwdiAlcYf~}9G zo8qjL1ksn`lAj3EDJ~g9=~N~Upf))5JU+Q6uBqzX2JPVHi&Rb@XiGP&51vncAo<&# zy<73Xw~zrr7!6sQ9uyMUM~H1AeE3O90#He$%~0WS6FvfV@#7c?4W5WwG+G{pmdB#y z!Qd%by!wqBH%TF9iZ~zI@5x;uqKkN~96?*dV#9_FhtH*_r(?Fk#;BEtK)Mvg3EH*SYHZV*$Esc%D{{|2!McvZUQragK{&LryR-(uiA$T&HdGI}%h=OYl zvf2_8h0hizW2e!V=&~BF2)~0;iq6hXFRfXPn^s4m6+>e~6R6bK*aC{NhDH#}!Za2& z+zm}QA30$aMRsKmB2MGgdA z7A4$D7ozE56DC}`w07-8V=YdJuMe2GcH+zjo__l2naEyrnk`;0zr1?&cN-{Fh%QPv(biuRGj@A zWj@(C`L_J*qQau=tjz447Om6iP`U(xIqZxX*Zp|B4wBMv>c{!>y}Z`Vpa0{sWO0FX zEs`()k^N}4sGDqACw@Z!G?BJX)(W9li!eymjvyIkJcCdz!g+Nv|po;`cHu&T1M zs@it>^5qO$rKnS~Hix|d_Sgic)1}sX0e2@8n`D$`Y{@usB)b#aE~}l0rGVs!rl`Gr zDN$CvUjn zhLNi3v>&ir?79M{QxH=9t`8|Yefo4-om$<3dqd3hN0ZR|)$Cj4h07horrbM&S*Wf6 zmsxr5^!x9h{?rWob+>eVsl4yn@ahwXldzZV3bs^sAIk8|I=OFffR9QctGDGhSrxTs zcI@~z8C$zPLzi#rhO>3y>eOK>%pi7!uT}|l1e?L$(%hv%0<-1lnl<~X71oJOEe`ypvlf|0+#3oC zONwkIIhm)=m()7J9cNQRWo1KkGk7I-wO80mt4azm{gy+7TPtf_I*nH4Gt4Nn4Aw-B z88bvrIe3k)8ZFVo+G{Hs+MwYYOY`&XDtEkv{2sDAC??;>??L(0I&-SDrOHzBd?Z)M z*KK*we!C9>Y7wCBfn?qZ$&9Y9t{~A=Dl;-Oi)*UeW%T%t^5Tp$r!Eu}6v8&Dvs>h? zRq&DGkc8E!U6hLLblNdhs(e_p1KMBdY(^m1!62RXRtCYe#w*muNa)hIxP8l)Z!A!Z zn2#jOZ86TwweK(abeKug(%@_@##kSR(+Rh_{^Cz#~I<=nYA*nu(&(!$jV9&eLWc(g{}`|Hz^h#6SJ~WJUsza>hxpbp zR1cgYzcEgsaCO*a_~}w9)F4o2GxJe`U=D?kUXRfY!BXoOWXc*Jpi^rhOZD|ZLx)BM z`Dr>Zr8<0rjUmzTL)A*HUszO_2JF{`GKbk5>FVsTD-@I;_rkbtsdV~K{ZC7W!)_Pl za*M*pAZzbPm2}t?6Jz(8P!j@;9uIEEW5JNWG?Oab@f;Z1I9*p}^n;ea;xoUar_QEb zJasPZ{K;b{Po6$|;rv?fu8s~SSBPqO!lKKJm@;GfsQ0YjWXK{Gy!z^^3;ZiXLlfCn zwpAV!WV1>8)%$#N4mAWVOs(|t31k6g$ac|Mi^cNgzTC#hq&ai)64-ab>#v)AAImQ* zEy&7lkWnUwDXfg8Cqn11RlGD(mGM!Ml#qQyvApYs#>PZa0+=1<%_A-IDYJM#f@Dk5 zm{5It_IZ>M%Vy4eI2ox{X-{O$TggMZ4z66eGBQcZjXntb{0SeItJN787o`V(T*e7j z|F*W~#=7#V#@h0lCTyzSP*+`(>nm5N5$^7C3U-FI-!e0XBlC}sj~zNBJZk9RaMQ5i z!-mBS8DyH)?>D(UG+^y-=~{ty&y|BFn8|UtOM&tZmt5A6 zot0l&mj9R;%mT}N^XAM+ifqi?_a%6_7H$~xo;dZI07p~q@!h+3|L|@iHsgL>zI*p| z*Co=Xf2JXsxSmZf&S8%fs za8N*SBnq8OLl8Kc_Q&sV3SSQjKLrXe28FK&g|BBqBy1m~zd3m+Iz=zK$;lFxMHE0S z7Ukt;+HASmnHgCqFUrf#&dDvn%wpRaAvl~O3gj@buyn;GBJ|fKLZmzKDYUk>wzf53 z(k(a7Rpjl&F%mfY;2!g!_OiV5+t#dEv*B=lWajBDDB#QX4XLdi_16b)yY03Su$m>hqzy{53Deej$)F)a0$!j9H*`BW>@hK61{ zTtMDZe+}T?L-Ev)DyKs;_?mm)ped8C@7Oxwxvf+qaBzxS)q20WaB^sj@;Me>52cWViASfg>*hlN*9~2ZA;D*XA2?G3o*-2K>8&sCoiu6Gng$SBNOT(C%>>TD+~OAC}-gW z9QsQ^L4Iy_*7@_Wi}KSE#*w!ROtglGNL@gT9qN<7 z=EdJKv^!0t>EpU1UG|6_5r!!&rX**th~U|VVCO8)ut8hj-@u&dN>!d;++-2F zBP|JRt#Efis_^EUa0@gq-4Yw z_^q<_qSlQB`4$UbAA2^tM58Q@+NA$D^_R;%%`XrP>QrNz1_IGN==x`cn;mCTMX ze{rgKt*`-IkVb|2ljE&S*4imrg*K-p(beHv#=cCQ0H3+Mw79sCs9RZCQC3o1T2xR_ zUJf0CVNnj3k^TZEl$4b>G*na+6;)vy6uFnk1ZxP03(CutptV}zCA7CYF<5B0;9z)B zTqB0rjQV$F$_x? z{ufW+W2v`i-+R4Y*>*w6gyDzjXY&GK1w*kHHX{7W^JK{5t&qnBkjKf8$H|b# zhzwh4&d=MnZ`-tK)7Ia&ZP>7O?Qfe=g8BQ#-+pGFnFWnfiaj`-I0-P-3)PYihZDIr z!75+o8}q?U;x$m63voolFzK2KpZpDB&6*?})es=Qq5SujEn9y4I1%5u1`?HE7GlfH zYE&xkIhtMLOoj9R?(WR0P~lE*uyAUONFjg-M^kNkbVyJLN)bbYL!zQ^ z#zA-hlGu@^XhaZ%4u_e}VNohDwWNB%*j6eP4E(f4=$cwZhsIu2Qc}_$eqB?OM$=J_ zbdRmJ!vP;(=+K#Ain_X-m|;@Az*yg|mdeu7Qg`en8FZcpI{yW9P6nNmLFd6R3m{*a znOV8Hc{J8(?ka@#EyyK}keQ7DF$t5)tas7ycDR%(#46OV@GJ_s67yH8@@!fo%36|8 zSc5C%tFxM0@mer@`i!YlCq?{#!*N}a6o`R4Ir9=^m?PwFMx(wNDzy? zS`%F6XMz_GgBOp27c;?&ndE5^A8ZJn(0OAS>Df3R>Lnpuv+TRIW`ba`vcKAOL^Yq{^ZN8Wz>ZL^|T5bB$)h~_536<;oE zc>1MT(4Qz@m#!V`sP47Nlb)U(4Ezk!pQZzwzLhm(wIrb?`*Zd=21cqGW+!u545G`K zbJ>Ff ziJd=hgma6k=idgmo(H!))$?Nt3NkWar4WY-_!!E}%Fe#bKLe8V%EU)GIj2ut%tz>i zb-fSLnANadFp`>@m!kAW!+)vFEwch^DR!vqtlPA6>uJ&Mgtwe5J}O-==+|cNIbGkd zW#{h~%TaW&_bm`J$eEL++ zUkpv%f1xS&uj2hygmDtVsr9;;A@T7+UV%C??339V>3UE9{6FT7hz^U34buAA8&Tw* zj4&Hrsj6q5nKdLhjKS(Rv)S(pZKY;e={S^kr_d$b5o{JvB`37@{rKKtsbdNYD=T3V z6JrZ${zJ04ysWH-e+GG{7d|R4Ep2J9E5fQTCVwA9GOJ-Iqj$bOOHpW~;lBjOgjvV< z`GtlW@dA^PL?-r!1P290@XwG`rWbt_7#I^9Wn{yz)}S;y%>wVA2Ja~*G7BSh7L8Pl zM;lx!&inkF?5y+**wJ};7`rfw)6>&3kWaK4h0YF#2v?VdUN=CWvz}ecdk362n|={1U^&8^x!P zkJ)RsYDfLHclVyc(Dv|K|1pyjo$U|?3y%s)l~v_z4-bi-o?uNBw74F6D4AvA+J;Ej ztB8xfumB;qQ>=}30U&@?u_9a-Sv7OO&JGtwOV=o2x^ROKfCSG{L}6PK;TEh?zA-ZU z#EB1PV2*v1n>UKZKc6nFwx}!5ojB1jcJ3%^G)AhejE`H4+|si0s!G&378hew!{x84 zs;)$O)oK(~8l~Xs!qHlGj0&31T^2PR`s;$@YK_KFH3ad2i=i7B6dV?cQUZT}3~LyP zFcU*jkax9K_$a1T4>Q3@%Cn3BCu!9~a9(L?S&1z-&juSTFDC~}28g#K`YI;Y-=uPdt%`bv>tLCKE={ zyJoX8SzL!0Y?A2f`cj61RJ7BQI zNeLCm0Y!e$C@v^jOgr?0UhX$Cd0YXsAQWC-PPAXvb-2hGV}R z_#VIk_f--F6~x!ID-Z10`+Ei=Xcs@5MROi5 zoS28}0pUIH=4G?`#EFw<(rmSzR_NNI%(Ew_PoM5pv@IF-pmb@s+SJo`?ATcv8jDIe zOXwRK<}$=4OdpNc^y#tM=2o`or5j^IN_JYA|LmC$KJmZ<4?KL&%-Q1+&5SdRm^pg} zqDuwR^}&$YGe!98;?0u>=l}A{FJ+;jH{AcoBab|{aPrO3A^3Xyjn6+ngMIJ*`cJ|m z5P`*JHA0csj~hES#HtG(GkWaxGof8NMnB%3_7k(ai)8DU?1s(fdE^UwtkTjOH?oRtU?afZrc3n@fV2sf{?vak@oxD6%<#dwQh zIu$`3;~jF~Wrv{CWaa=Vx&&3MW-g^tbaWE`1Pe|@2SZ^hib@ve2Xze1tJjBy`uV{@ z$LV0+dc7a&bo~8{n1l^@>mM2#6#Zu{>d5`~G|9(u93=K`NbH@E*f@;kX(l?O9fFFk zNK%R}e~4jNAK)$yEa`hdT?vdl`&0Lcny{nDkYh$%X0$o|LLOml-XO3 zwM8&I;devmtT$q|9*b+dI8nN;7a#KaXw7a!$xog2tn`V$-gHfnFxR+!@#4k5mN>ei zZ?(>4^}-@yzA!-;DP3}Q2~){2wnFhdV&3y^V?PU_!d=3(xEA1RV+Fl*VYgwpvj-60 zOOY*FgoNlymV*iY6|?G_*5Xp-ARnu-^iuVpxVa1O8Xx}U>SIMYJ4&OU<>B;Cl`l;X zO#A#jYg~CFfS>)gy&n%8bVSu2+`jn~@Edyx&^ zyZPsjFlS+D+!X(0c8INFv)KvXH=l`*9O-=-!I4Y4k;!i)qZpsACHi?IM+_Z0bi~L} zIPiJgDE0!PR4HcVOn>%BVWCA9b=xzuFg5LGHt9OWmKmP8eGq%&%~xJ{0@KM8FTDKv zo7SMR(%S0sTB!NbGP2+*tE)=E;WH@EQbJuSVS(%9YOPi)r%a%VA>I*{P?f;e!`QQ>VHfSZ`K_<=GSrZT=pSM z-x!k%NvBzbd4ZO}#`p>6GOO$kjS*2BA7SYS$cvnAEOQJ=N=mx0;CZCF5)tHnkG-6# z#+09~4G;hOO)L$wj1#u^X0}$ka8zyZ%L|9b-SzMjw+(}_5#U-W1WvGC!r7BxWij*SAURhC!01`auGV-lTO3P^wqbwAwLO?>KFi^5#mPiX% zULqjo7FcdB7izjdTnBNGl!2{ z&Pt>DiVQT2qeY9;nw!m02M=atwN)HHe*9#5NsEA`#ii**^(8oxI3vHPyxD1uJ8L_$ zcKMn$adF>$xBUC_WfxCug^+ALQPUYZX3Uu2_L{ss_!p`ox;@lYy#7P$EEH<}j4;Y7c+@ZlgyY9&!6y73zpcQhzp>8^ zFaP~nyj~9}TZ_7i?@K~%oB;_~DP30CEH=Ze)N0!ru~u4MU$6DGD1#zo&2}x%u00ja z@^L1)R2-rBB7=>10cCO4I8FNQjAm3o*s80Wvv-|WsG?A*JxHVQi^lo(Ljx3!W(4Ap zNDd;x#7*$=u|`x@9=-?@!^@X7v?{#44c@YLoHyU-(#RY#EclHrEcJ?j!G~yaWN=f7 zE69Z9j)MY1hYdp=Pi$OhU;q?DU@-WlY-;lIZEUo94W@GDI;{jURN?JC7QA^4u0qsn^P_d@w!FM)lLz~1T1!fH ztj1C4W!~PS=FFLMO(OKdJ8X&Sg_}nB)!+bG-?vPIf#R?^^G5G9|9ZaEtVvz`-SK@} z57{OVccs&s;X9h%nt{{<|LtSV3lFi~dylU{>AH96`ClzNN8WPNFf?Mava!ckuleQh zz5|!q3{jLD%Q$%Ow{O1v=Iiw*H!fSX?iI_OXyMu+BT;|BzZl5-Ki0W|4C!f9l{`+6L*8U5( zZ!4~k@*6yQ`fax%ie(Cu2~krfM_QQ63X&J0vI>FcqC#7730y%K-~}kiLx{Dws4yS& zr%#Fwr62$nEr(Gh!#Ytq_ZcyKXP3i)W&R480-T0%pu|r$AiK%d@5^q30rF=I3=A~- z8TG2t2v;IauUmv~ni7|ShXFINZHkUz)0#T0QUTBu?C=iKHPIeB&NU|Hr z4bIKZDK4@}5nD`GxonqN1y2YjxtJ=K++1HIv*Z+4z@$VOGi!LZICAn2%YQnR-AP52 zokH{F9m~J`eo|~h`k7CXagMZf$v-~JR+&{xm##=lJ6&W6?hwKY92)T5Cu;ccTj5GP z8CMXVxqPWLY(hxdnJ1t7+ilm63qe#~6Fhp#?1!Iw@=TiF*gI!pfhI4K+r&PV-NT+Y zlRPe#-#%?xs8wlq>RJ_QMD=S=ocK8j!DC#?Wfxj?W1hhAJJx98TxnS;I4IeAbSur3_w4HCfaL)6Y1O*{w;3Is1C1CRW}fS^E} zs4j(NF>M5}p;v8%^CSU0`2u?3JLrW3NJ#2d9EDV8HmDSa= z3}-*bq0n9*mEmPOx%n&0JWeoJa{t5cPApfkuu`0_^3fx>Lt@hgb(#?clhW-1_g`xJHC&L zn)NKW{q)SJ`0>GKj_upG@1kgB2|}>$si$sCy7$SyJpINSZ@hU2dlxyYLo62s4=VSi z5F&&b!kxk_VYYCcFb=LjoDeL#|NaN&Vfo$$d^uZC3&Y%(0V_JYSUF2)n`Li4fc7nB zbCP6mELPk(W>UDf8Oqn9H^fhx6sgsyy>zWCflsh#7&wqZQ8Ur zoi&Qbi1Lx;FDXepf(9)+{ah0oPFK)7eSkQ#f5w zdP?f+eUa%83=E0F@!%BP)nkc!aBzS@KkRDoTw>-xV#q#Nh*3NT5;F((!B9vrW&pT; zP>KkB;4RF|Y*_!8F7S?CIg|xcizps}D0i7vQv6vZw;+;>$Q4A)4Cg!5udt*}*!s)i za>HD_#_0;qd`?L*ocV3gXi($Tnt62N4|r`k182Z&GWhRYhsgY?h)%y79tBU;TlVk2 z_F8EbXJ=zWfXbEXH%iwmj2Ry3)l5h<2aOrCfFeM!`qa-pTUS>fasA^nz&w6gR{SoIuSchUgdcE`(7GMxO z0;plUqgNn{{SR77q1p;1IQ2){Pg<}3Em?dE=|>IySZn6C-+h4i8+iK)czYjsdoOr9 z3SN9>4prslW#R34`1D!u=Rr)A;~<2Rmz{CpC~dOOteQSY2X`Xl0%8-iq6AAkJu(P}?z z?Jy~(Ja^BpTelUI7H`~$rAfNOM=GF@n~Kh(1S%06@fW^m!Ip+$Uv&(L8>1~cuwuoE zgPH%B2%%D||M@D$@l(%OlMsxrW=GU-&Q-(biFlqeZadi#VI;!|V(fo>7e4U#LWC1< zzhMl5IzG3~!G=34*cNsWKHhfx+|Djg1MpnVnq~Kd*)2mGI<%;a9d_;Hi_3OiNY5?q z(hf>YOpJ2jps+Bzxg1WA5H=HKIs_Io4BJ9Q28Kl;P8$ZoVxidsBc;+0_Xz zUl6(w8H8ffs^Q{d(ajw3W?XZ{WO0Ug6aLQ-Z$ZlDRm~=HtTHx#y@YZhbEPjm;=SbS z&FR%&?}TZ&^J_~IR@>kbEYR|!;g5e9eEX_@BZ6bKci}OoR^2j*>f8J$-LjCXuDF@E zM7hw%_lBG9PQoX+@^ixv$TlJwe*@EG>8 z4*o6Hf%jj_-k2FZm#xGfP;Y{3Io^LOd7$63ZxQoZgo4M&ppWPN?Ww1pda|4fPNL?1 z{Bs^q%KQ1_SuhZ<7Y9k#H1RpbvSnX@6gHQZ zoegMp$W@3Gh@u?9NPBy0n;pTDfB#mB8MMG_;Qy=80#maN?>`4gh9_|8QjRU_;u$!6XR}I*k*v+NW#<+a z<(xuc?!nW$&Ry8Qt1@rbf#XN_Y+1j3*Ou)F`k7+_hlM*FOe3=u*CD=MQ`=>`a2he4 z69*4}zGg4V$j@e!)H^Kdnkt2tjAh#_a=W7obvNyGttz-X@$qLr`e;j8yYiZQ@R}Xj zesSZvb>qi{INHjvSKIlE#p{lyT}+RTcyIB?%f5Iw2}SIw;>)s>FCi_^=95C=BiidK z@4UskZp%k#hfU6InDXL_FW!E`T?s)hj0$_JQ?6i7JaI?T>;=^qGh3m2Rq8wLd&24! z-kNqEI^&*u#!;2?GT%3nf@{{jgV(Q3gRXn{ZLAT|UCyY`4!iEw`|i7M?wCY4o~smZ z-#zz{N9I7q9A@W`8Qcqd@dy03u`IR^TEu3KQYb>=hx?D2DYT~x^N`=VS=T(;u>s9~ z@%x3e(hf^7%AuW1@!4npI?%;#yfZ`z9cc{sZS`l?snt%g8IcDBm71HY%c|{Nh@-WY zm$dt`y!7K|tHh2rN_nfC_R``OdrrB%xu&k2DV=u2O*>O_a-}F&cf9h&pv5_2F{1w+r^X%+k6*Uo0MMZ7!;A@4I7yr%q4_nWS~c z>>##>pjCkEQTlP5Z=q1{p3gkhg;e85tMK^RT7?9cnj*UdNR;{&0v0ztpoQtvvGi_4 z&)K4jFcx0NSa=UDEJ6#5@Z3plZ4DKa#Hp_Xb?VA1>zkYEY7qaaZ*6Hu7Dw5Zce)tG zF(_Td1X)nof+%N4TPsX}Ye^|0`i*X*!pMm>yGSXclmlOBN5MI*VG=H#IC1dcal0J; zTvL5Rt5dN0WZ63WqNAgueC!v#|KSkwQpTMuBdak7!OQi#&YjcFnl&rxFs0>`3l|0k z(gMB5#mBYg@3x@aYtzO9*gzvz6uVl>vB(X!dPTwR%rRke5!=?L9sCxtkX41DQ|`qS zXn*_FSIxRd+q(pxz+iuc9ffv2*G-LRJa_I~P2`j@abu?>A_n)pA~a}7(ql=qF&+EB z@Zw+Z8`*@3y)8CL5Kf#ZDVZhJ`cvp-4T@H*DuifWe45HOSD9n5K|x8A!Y>k%GX37W z6bCd4*aEJ=34eL+x#wnw>g%>ITei%H!Yr`(%(@h~xH%V(AKG~^qtZHQ!>^lmBMY>5 z`}Xa>Z`=O=*!%7Pr^@W@_uky}DVg4Tqr=cUQ&B*gh@z+{f(nWi6Bbu8SRA zQPEYFVnd1`Fhd*4z%adMdhdNElYGy)85CCCpZoj%`TqEFA;~1Sc-S-+lkDPe1+iyK?uGl=J7$ox6B3HTCS#Z~pbKe|_`A4=Bio-;U)AK`J~1 zg#iBp0pNM1jF=b|7>*1cIa@nmFWNph$yCk&C}{42&zYFqmHI*PL!>{pcJ?;RL(q+zeY_*j53HwR-bycB)_I3p`4sF;&;?p%6$W|2(gE#c&pOY_KPP>qtauj1IH8tZjhzGLrE?x%V;-PD{z(953&ql{| zGY5eZ6?gBwN+t5dY>Bo+_x<~u8jiywvGe<^7T*yOZ+-C1dCU$`C6`+ivd-p;)grfX zBf?HD^>^QW_Z<;^jfXI9_LU;VLSfQI`_)5+fBJTE^6As@Y$rD0mSoG9dq0$bRq`ik zds}|@1h49Ir-`$07fk1eG3yZF=QL#(GRw>Dh;(9vdrawwF>paUY*_+kojvF1Z^Dxc z7R30O!Y*CBgbSsfIeGHZrL3&l+M=TCXHubx=3Y%X_}NznfBJsk5ylHnEdz)v0Pjh(G#PKn(D#mbSW!>R%#_)mm9vTtont=hOGC0`LQDf<$^_K5$ ztgLCQE$>o7KZa+fzp0@cmT#}8jgqnD02%2UIqNF6-4EqZRDKlQ-X^`g8*@Hiob+(W;gB>cW z$V#hgZf>ZqswOCi&f?_c+SiAnArRgO52GKVOl9?OsOmgmLE~#zS0#g+hJ?4(G(iWe z4N8L!_L+ok#EVYj1ShV}FFeF8#2;^*umcQl=;*QztN*YFFG7;&EL~kN1Z_92WEn8} z3u-WPAbtQivJ49{zV7VktuL?d9ssbzz8V@Z*0x}*nIIt^!B~3)W6j~%$#nc6Xe+0V zf4Gy9tY~`%NZr@pg4Sc43wYquz}jp;L#{%2^(A!kQ?_Si27^IATSDVJaf8;CfBhrO zqIsgoH(oy{Mi!Tnj)53QO=YRMv^e(~rr^(?Bpo~t@spmBm4%kaI@gq>`Fn~SXpGN3 z`|P8SVr*-LEg*S=hv#MawoefM|1c3laV<3du?QTE;y!Bs68bC~`%+a56D+`*uKC{&u8V7(Q;?=rN(bS_}YZZ@1``>(;G{^|D?(bL#B1 zg0l7`xvnK0QHMAP9&Fsk1+mb=c4_apBZB> zckDnWn?po&^rX0`0BzDJ2oLlHRD#wjUWYb`@*HSIAhOTdcl@;R6Q@j`?x%zFPy|Jd z1OR80r-)KXBJb$yFGdPFL8eVy`+HCznT24iMPRJCW30gv zbVTs}riTMSRp#Py`}5CCFfkzs7%bVPt26y$ArpUncPCq95*FbkJPf84G8gB6T#L0m zaMQcz9KIu8HUEuZ!NwebEHjrC*`Je{#l$R04vrokI#);gbEy3@`<=>hr@!@gNUjbL zV2W%uJ%QX$1pU6<-;szT?!<2+)O;eYn}~T7T3iHgEF?!+334{_%t);%E-FOqgS;KZ zgwBFPS_gp)|0t4`$e$yF2g{^Hf6)VOPn%Jz`}}GU+I`7LzJEVK5m!6)AwkuWd+d8W z_J6%!N5A_4I4OYXRzmuum$)u@87^Ywi!Z)7sk>XHF{p>$xx`gF_MwL#t`=FfQ9C`} zEeE)_pG*AW@2AAFFPv1oDr9o}eQVaNS$qGS`026iBs(dKh=5C41u0Yd^=F;MKA=T0w?c^N~?%Hd|m5><>KXLYq18CPY9cH#l|L>Xxs@J8z( z{%jVDO>#kFpa6lO1W1M+oQMR9P**n{p)}KJ(|#n;koS>eQ+4zWeS6|M={yy?gh5`*Ui!l`wk$>iQPS_#ObJweuu2knL7?1nIcw{Txys2cG4!Q9=1E2kp%-0?*Qs@i>wqWJ@5A zZe*8)7L}F#B7lmE5qHg_JXF{Y5K%zal$AjqsW{R~5&N?x>ELT6N!~v|vlgoyXVQIs z`SO9U_mk*3mTRG45@m+2R{zv58%aVcFfEo~w@7k^GzyA}+Q0umk&>M}RflClfxz2Y zq%drM`y`8=F@!}$nRM}H^W^yDYaw@*Cr%&b%Bx~2%&vYUKB$x=R9O&caX@&B1cPTq zO?f%?d+wQswEexv0CAl(3I2DbILp>EL1ZU`RW*PV69O-0;NngoEB~C~<==wQbTJdY~H0f?%GoysD!*jg`DMk$v6NY-r%nk7c<;UU z{`S`HW)8thzVZC|uLlrsAv9%vq*ij2z_%u`sQ);4XF`}AnAr+j8M82 zR)9Q$ol~k0jH!a+vPzg+c_cRJ>vDvj90dp`yA3X2YA3m3@PPOaxx-i!^<{foU$@wq7_+szbG1Yke=k?0Ufni`Xo{4S>o|=CAihNii z081+nKhTnd)cSaMM2x7ono*Eh$&exk`UKf$SE`+j8h}Cs+>kbU0Wj@I0FuLo^BVnl zJTpq$8{AVD65ze;v;VERXZUa*id#|T8Rxxzqy6eLk7DuOo7vpljj{_uV@-YLmucEz zE-}$;9rf;3*#aNc72^;H2TIGrLQzflc1rreI#yX$TmU1en$}JFx(F^QM+-!p-2r8R z@J@1t)EKZ>;owKxQa}-_zi|jc3Y4RTWdvS7KvNKC#>(a4>ZFCq;|1$S`r462{kikm zrQILN$;ykIfI7baK>ZG`ZzUH63E2R$5U%)3JWJys1mhqK<6tDlK>)@<0PfY9;7RG{ zva^eeinCJF*|+cwit6O#%*>{yWR_WO56?U!h97Nj7Fo zak0ovIAw~JOoj^=5;zBL8W^D7WnTfw6~|6+-*60eKn-@@yLZ6?kvo)cawgbMb5C6z zV)&gX{GN_49UR&~@8$1ka`yEFxGOZy*VWI}FDi(I;7dX1>JmAJ0lV(G7QbORegoNX z%kdkQ<2QI-{^(#?P3FIjWcKKe)wC8|KJ~-i&yVi=TLQ-6UADbI)<5T*^!}&&kDR=m zn_Y13;)V1A7-}zzVHG*0t?ZyrXGNP$Hh@|fHFdp?RVqIx-YV0;OY5m@Pu)x0E|B`R zjokiJ(+F?xefvg5ySlQ;$nb!#_V_yf9#ufEv1#k{YzzLsrgTxjKcb3^jfFb#lx*iu zKVh~#XM4;vqE6kFnbXPI2DCD5b9rvH#nP+NcsYQo!EeaKFc%%*+C80(@`2vwa?}hP zs&tU`p=Q8OzR^tbGXzN0ig4SZ8JguJkPS0MCX$u$5a_u#Zh`EFBRryTx69k9; zh|#m=#znch)pvDTTu#2T=Tb&S9TxNMuGTTbJRO~ZuN>N<>d0ZPKjyWzb@aL@p_?i+ zvY0TPySGa-fwua)26}ZPBO;<5d+pNy(hicGsk@O z+V$%>xf$7cxoKA~UAl55?Rs|h*Ln{riw}t*Hh3u@@$73PW3rf|M`Pj;fV%@ks`^jQ zAb9dEafmDhCuw72Rn=Sx(CK6Iu>}Bq9V>13+BOKLcmDMqM%oji2;l!$AHwywx1=}d zE-(TrUwP>zQMF3x=mBvU`S9n`3-h4&Uq_ zcGQ=b7hbs5-c3OqM<1hSt+}>#A$oT01-2;h3+hE7E&N>77Wg0L@4rgq>Y4 zVxB&_5TNUPy#su`yhB4GBErH#1ATp`{?Y!D?4e9+J^JS@^v__XwZETamq=(CfnucS zDS9UDkrVuiOAs-KI{@A$G9OEd2`*DuoDOH;w_wTZw*mkFNVL+@hYU9|w)*-+AgCA< zE@yYkWbts;JdD8TX6?6G&3$kObXb}$@25l?`ut&-pPl*FFT8deTqd+F0eEIV5z)yG zA3kyfJPL55XhqqUq}Sh$1z!Dl(Ej8qOqy=1OCw@EJntQ|{MeD_p+|3ebZDbX%fT&% zSj^JPF!Pu&b@KSAs08>=t^yLD%KoOAKPixW2f-5)fR|2Y1@x6XDr(I1sSnSG2k*$> zW?%{23T_2Fc;8D2{|<FcoFZaO_-q(0rn{S5Uxa zAPZfg5W%9v{9IiSAcwmIetNP(S%8A*qi;Qa=;S0~yP) zDFvi$pdSH%#0`5py98e&d^Kf001$i5q@%bHIjnkT0(c=h4n>fn z{1&SL+7Ip=naB!QaL`fkFgW20tCbS^5aWini=>EGvFU9KZGqSV0FHWDc1!zlV!s zKh_bXehi3{_)VME&-XHa3S3QIRYOA?IhZ@@Eq)Ux`tzH3-hbjmKTCZFQiTWF8XBsy zvD>F+ulegY8Rtp^rIpiiPS~6|IZADP&pzhYu3bBucw?|#LAJzu%p`kZ-IGr~hf#I9kaaLs_Zu;X}n zSK^>qA-^wj*s!~Y8IIy9c)@z~BI7A|bO9mOxpm5?Na| zsDXm$18ENtz!2Glj)iy-92c14Hd{ZN;z61eCDH=(qa!7X=p_iwM^HWwcMsx}z}NU5 zxm~`<2C(OCxFh3M?<`q&M1TW~CoaSrV0sK*3hoZ+S~i0dzL2h`kRkR5l%!pEBKwAX zUiV`(->`4ODKLRdT)R;Tt1BNa71E>&q(>3KO<~};!TlqdmXa2Yt5dp%2{O)R!ha)! zS^1c4z2m3p70pq|0$Vn$x#$dKD7p4Ix^x$xIdqgBmnsHaGyrb}3jp{k3JNj3YTKkRy$iGFi`NqoHFy;ty#nOrgIs!gtS#CWjdg{? z8r{=l;QD$+5ZD`dg+?i=s!x3M+O}=GPSl9X&}ADoy!2?I?^Z_W(aH0G&_BT%xHxX( z;1-8jjXH4r6C?S%pc&G35)=O#eF*gBF$fIW(%4~qa{}L7XQ$Xc?ujR2i9-Y;;4ip? z$)gP1sL3LC;ktn@IDSdwTikE>82Gz9T1Cz&b#NH)2kM=-`oOE++}hfV6pk`;O#xc)d&*_Hok!wTpM%qvj@66 z8w#%#7hS(zUT9{^fSTHZLle=EC>bYh?+Sl;!3)!Y^%@<_HHhulLCNL|Okt2A@{3$E zoi}*o?I>g2kY7HwkJnlFb>HIG9l)=fgb{R1DNNLtQDMX zE-1*)FUSUN2tk9)tjz1z;LAfy>GBmoc``E(s^G1qwXx4;klEnWG?J$Ws0kzA2QO}{ zb+2_Vf9%-D^R4~Z`k?AW+B~h*)>Lb{wZvM1h3!7uX8yyEKVD&7hE)GnS+`hU zwQjU-us&~n*1F#MB)+3>>G@vkcIz4IIqPL>uJxKV%UWp7!HGZSs3EUoXXlB% zdOdQ^HbGC>#C5hA_=dC-u{2+}bwf>x4V!@CSql^V2lfN^PB!?AEL)OHCewTG7+EY~ z%=q=|XWYR`v6-O~*P$|)D>UF}Y#xX>?Up^2IAg{N-k)2`J%Mc(_Y-%GtKcfR7Oss~ zARwaQ{pnkJ=KZ)j?lO0j+k@?WZYO8t{>p8}k!$ra{2WoQ*s+5-J10J#h#)F$27sZr zb3q;k-hF(c=vdy4so2+JG68P%W4Xu$xfoPR)ypsA2P_OS2G^M@k*QW$S6e}W7t|L3 zprNgew9fVx#Ge|{RnYQj*jML)#5E6JA9sq;`1|<>28RH$ zh(}m(aG<|`^nX0uKCTvETv3MUMkZtPFs>Ft--yqj1fK?yB*RcDwH!Cp)z&9Nn6|wFc}mIGd%F=Y@9wws!Itamy>4V}gUP}C z&Ru_xhiH2i9=OMZ55B+H)+V`Y6kT07;s?WAgiIfT?!6K{y%gHiLzsBjAo}is;CW{o zRENO7{e9f zOI4Lb@FL0)mJWM-^Bl;$bJz~ULf!$$*csYj4S$SF4Y{At5VV1fTIoQXrRp(Ex>vBO zy?M#N^|mG;!i&*K8^7(%i^d2w9sqNB1W$=}>f_bwu(1AqeB|V!^FuF9pEVr11q&5L z$FPV{2aQ_G_jh)?x;Y!2ipe8a3 z{rocI$0o>+QRwGU_MB1-Pg?G_f< z1L;5z25=B_h&+IhiaU375j5Vhwn@ki9u3BWMR*qiwK)LFvp=-r`)&8y*7G0y_+tY8 z^n{{(zipEYV1~B{Klm5hZNko-7ca6HYb9Jm!^K!~$*EI{I@(&BI=Sv1gHDk5HP)WQ zEIQFWZ1rQ0EkY0oyO>xEk_8@mejy1#)((hq59@$C$&Cx-BBafgHDZDLTDIxoWy=yc z73a*V@b_*WVT^d#P^Vc5>k*!R{=WNgrdLI-xlz=<^G;(Gz|#ew2JbZra%VrUSaP3W zQ_plMfGhV3d*>aKXN?(|0LVl{+G#0RU!cm=K&q!gIb{73<_vZQW+@^CwtmsmTL<;h z%hwl4r!Js$fy&|yVL$Zu_qNZ>Y3Q-}n3;<(Gbf?PCShj!R@Gch`Q*#@cEU2>`QDeG zfBwM-yT5|c@YU{5&<%i-)iot20~poZ(bdsw&?F~gcJ^8=m?K8n#Zh;jnFAws6&OGk z{<7U;Tg8=>#G!lcwXG6%!;sodA=HO4>(+=|_i0fHWY@9F*RvZ;DkzZOxOkR6rh5%Su79TjM_(c&vlCFA%~*|IdyTld zp0#bk{NF6-|Ni&C@3;*293B}6o*x2=H{MCryo#ST3KCD zUS4F!b)5KQ>%of>t|KQG*}@>uY8GV#B{K>q%aB+)m;vds=U-o3K&M_le1=dC**|^( z)a}#I{?`a48DD>4h?ivMO51uaX-5J)a#S&Mh3yIGlU-e(+AuC&g-oj}79+p;{4G3= zRx3tLoHB8Q9o6CF?Ge3NLUpk8i)YTK6;^eLAdF;B7=;8IdSHztCC|Zo_zx}?p8M0% zmX38bAG)GFIV*)VabC#Nd6V1Ay#v=p0jq~QY617UwB5%|;a*QG&aBHR$1g}wkJ@AJoH#5&J)DM5ni4a4Y>*l_h8O6+(J#^; zvJ$YM;9(D}dFGjCHa&6QB4Cvsd2YkY>(<3XG9-bgT-0uOan-6-YhL-whF4z&de|hq z{CI!=E5Nhs*=qEP--sbYVn%~?TpJYR=i?s`5EeRQ3?NqV4{uyNapcI6qfv%3JaWje z$S@aCHfiR}NyhQ9lOUP#)wDz(;7o>09O)l`myr{v&KMRsEq3(WxT#20m=!;3@`MQ! z65>b57W);Kyj^x1(R4kK?52JuK=pS2J5L3DvZ;qD@9#6LUo zh$1k%AhRi7Y%l^76$ue78~n{HMGtWRlY zW6H5BSVYeq=mx>fI$NByZ45l!I0?>&I5zfyM<0E3PIPoYEwq6ux%J>z7XY|FaXaP+ z`OxlyD}}bjwwHO-z8016saz|l>gJ3SxbB+5YgeXBnX0Qf30br4WR>Hzl$3^`5p(~t zmUZ>Gjfk6t#vc_EH^Qm0zUQHJ@goB2Q&Nn~Ju!CPop;@J*V6g1iIYcz^Jesv*u+=k z7%@lXhQ68@>x8U!=gC8=f#A*a@E9`>`z~KJdH%%V*mvThb?XwKEl4}FPx%$|A+AiK za?F@9lg5V!I~%pmA(7)JjfNnpihL~I?#p1?G>?T>o|`vsc34*`U{@RTI)#%1>mrH#maymumjk(~z`6^^G>e2+jlz6nFSvVqJ4rk>AgaNqL|%hb6>!FR zxP|`N^VwIN%@}WKkcj6X5jR63ZiYmhMNSDj_yDi;773#OtdtOm82oU^rbNChLVq_s z@MF;qb~3pl2k$?6B5L2|di z;@`i2|97Xa_k={j9pXD&xP1JZ{fieb8lx4w)4%@uvZqx(at*(oghTI(6lj^rWFkWtHIj?``4T4_YPS8tLkE)-D@iQPv zwbm0{0xMF_CoRXS>KXC)>_N@`AUkiHYFjLaW%5C0i&3fSAxAhrKb{qF3Dy%ft)n3W zC+X?$QDuIBon~W%Pi5yJIQ6t~kB>0kN%$;mJAlgl2vtx13g_BR{6&B7z`%N2`s_6Y z+OBu_od9Nj&QKAaeXMwCLA0%OU?7$_Gu%)*4D=PQfj%z2zV4oWURqb*fOvL6+P+f1 zbZ@k#sfi!G6zrb+*?DNRUqApMJ6hoy9`2HxR$-t7I8_yq(C*%O-@S46+v6XK9Gir1 z42p`hT$k{0SL|jLxyXkGzgOLR-_B2Q$iJ$Lvj`7k2XNq(-Xf_MXf25{4b-q@@X8V) z>`em#G6=E=!<-27${os)&JJ8I;59XH`n-k_ZnbT3B3FE$wO^ot0cYBF83Ysenj>Tp z{ucTr2d>RHi#$DcWC&jAEs}9jhE1YcgI~`Tg-D1jmfX)b9?n3MAjMCj(sOeF-rUK_ z8A*O#-Ynw3yt;j!h?pnWF;6aHo`{$yKxVO7=rl}-n||~yv*4yl@COsh@#YzYiGqCZ z-=7qNGsyAQ8Dx|N1$l0dg*N?);(s#{{>)6nTtTsKQLSPB<>1#mKH|TbV}dltjF5qW zaR>%r(Er≠1AU|K%)`OS25Z{|o%e%0D+5{>04U9*Q*1!q^8U;HJTG%Y?XTKK#Ml za=dwRVa|a2;rHjq;AC>Vbut-eVfX_?aAQK?^_Dqt)0Fsw>E(Fy{KB;PZ*M^I&_!V0 z#0Cp&A@rGF@osj1t7L{swk7XkCMw_jyi$UanDdeCepDcU1SfI>1EL_YoQcfC!MQ*0 z{C-47x9`id+49<89@nMrAuD~YJSr(E3dQDpw2l;YlY6_kKu)BjBsYgHgY$d7HYFvA z0S;pE9=S9Q9vf`j`ln-#nDlGvPaQ zp2!D}`t44386!tq{T5%lp8&O`^fZ#5is(rznxxhx?z13BrEo~4LGClHMl$Y6pWt~- zMoz}H-2BYKZ1eYec^}fDn8G+4*80Q9mb~4z8oaNsd;)^K zJ^kH%bzqGe{Luc(h`Ujb`$XnEuwN2)BO6&#LaGERf|52=SO7F|0U{*7|0z2z(elbb z-GP55Q1tRL;_1EV112+Z#+|ca<=zX0(}Y0#v$nO|r(eyrEw{Z3`m)imo@UtQBL8(0 zthvAZH~Yd*d7YBD*%Sl&68}ti&rf1|L^jF5_TZmMdT&}wuwnS@^59r9A@Y_7asv?# zrH`@9Hz4BnIikFU@K_u{!2N|kzc2R!JQXiXMZHrEY>a`8H6g2%PXWakJA2~;dT$yD zV-t&U1=2F0V^&lWw-O>SzyB!;aRDdJ1`J@IX1odS`GJ4?fDM>Ny138^>yG>jsG05_ z03n061Pmp=_bChiGpDqViD4KMaTpU5F(w8pyD$$drW7zDofFc49%eHZMiGXINdu(- z5ix{n(vUbuO?wE0Fl`7*rvb0 z0kmda1l0w3i$xe?*iJ26IlhzTu6X#V=U&-Fd{vZL1>KB_jxQBOmr^Qr)YGLGp#>c( zO?~stRHFwZI)(8D)gjD(d;sYzLXk3Qpv@qDgs8@!E;J9aUbG`fI=#UeFgJ*3(bFgDzaeFWIxgYE$N0MsoF83ANY@8 zkzKCxii_r~l1tfpVNsZ7Ti>>jS43*=XEW9~C4 zy3F;ZopsqTFHMR_ou5mD??ifUzt&=0i+snMajjq23G+cwf(Lld3heBJMY&m78R^%v zD1cu;469fM!A^Mi7j{BFQvIR$0}?1l3<74(Aags>Mk5e^A9x3cB7BZ87`BPFd(~S% zK6LsF9!K7v2VZiSwbR;f)7ZRip@Z8<+cMR*oj(F_b2JG~#m-yV34<0dTROwHxAB1` zb8nv=JJ0p#mMvQjRQ6jV78>thE!^|)Tnv@aG65hOPLpC{`&p6X&qCmi}GPiXJ= z(dc)oF!N%tjE%+^8~qDEphJDBbhD3~$-YtVy{@}n*c%cO+*6qR;fEillyv&S8u#faPW$G=9dGnC zFa5_~cYbyf3=#S`YXcW9}hoTgMy!f$WeM ztUaP^Z%XZe5t`l`+~T{C!RS>nA}TB_Z0LxQdm%atHKk*ifAr&5TX082T6 zMcM-$f8HbhV0U}}%trtGf7HSMi>QMQ=o!je{SXp!17^+!$n_amLf~<( zp!&S9o#}xTRk&K<7zd7tU=s9@_K++Hs+dlTBK9eQo10K=ShtZnfn)>#vJjO)sw7YN zMJ`E0FdMw553z$p6vY{M2w0;_PCgrnwDf5q2tS1n7tWpgFdj~Ain*-|)W7@n-hb^s zbUvr53-U$gF?`0%S(99=s=gAp!5I#Qp6Bh;N)QiZpgy1fwr%jsmAt`^SGf2^jh``R>0OU*+{p7GaSLWd1qZ7uzTK+dW^_sPZ!5*G zuIq9hN$~K|7f#Y1l1)dvo{}#FL}e`We|=raj<_9TbT!84Ll~pC zV~pO8F&c@{NWYLUc$A(;@Hz-|fO+t5kl!>X59A{NRX{yM3L4xsv$=ozgb5>bxQ|}* zWwSioYeMv}aZe^t!o0MZ0EBr4l35SnVsoWPWgzf}K9+o9{BzNj#A~|=A^sijeZtfR zxkLZ8%H}>(B>KM$jX};VO5h{XU`X?Ftk0Opc6QR*YQ)C@;UH(5j_^Q8CkDa=Q$tH6umZ- zY9ro=5&$5FUV%KoFmp4b!W-a}P^u;GEm+fYkBNdDcWOf&NvIJ6Scc>ROm$*|=CZS6 zQPJT6;p+>fnV-yqbxrxqAiR*amuvXmIeBzU+-jtuqnWXN#x5BV&jlU{%0nWS+f<3QU?(&28#is9 zjX5lFU*P!-_E(3^PbZFKG!tc0?_U#(4}X9)`i5u{-YW%E!QjEGG`zL3IJflSnUUVS zrs|&xTuLrV(Eit)6NP?)e~KFT+Tm62Cq73v9V)W{E(jl^Cm;iL&FZ@l+y)r-bhm%(>UP?>L&B@KmuBbS4hgoE_$$oHQv8 z#&{vvv1IcrD&`xxo>Q@?i&~<{U3+(8{A^^j>bdog#ehv|(IO({+lq$Cv7aN>`jc&l zun?hQKn>Q@c3U=9eKWu&b<= z2=lE*_b#S%)+jwa)G9|+cQ0tAVa3P?_vNo91Am*Ktv zaP9JyjDoWAq71Z$RJ#adXL{%4%(ua$6N^6e^4cwSlS_h8i+Nz4`=AMeq zPWYYaQC`(ZpH^3Z$hpQ|5s4Z=n=rF~^)gU1KQD^IE5j8E{s8p>;MO>j2Z&GyQ`y9i3-kgjiakV>){*{8WIF74vVx%}>q_|?FXfaaUf;nUm zLOAh)1?<61d}?y4$a7uWD5SKRe;$0~Tm?K6UtxTLV*3Tmn-@?@zF#I-c|fd1h)EnP z!y4x3=&F}h!VHw8F`;el3>MfO!M~twJW-2462tIWpt+%*VTQp}@}R76`~%r9J;CRJ zf2Ak%u%M7>i7+e`-K0B)S@ig{+>cVF26$hRWFRB|5igOU zY?GKIePn<`#?DkjS0@SRFnDz*$R@!m9kE52hCJDe5TEWHMr2D`*;9DG^LwMfBTMt5 zxJA$e22wqVSfDGs22m@aINvCoZxYTIieD3o^LgZ#mcY9L^$d``0=&=5D=S3`CXW%c z&HnVE-`O}R26{W!(L#aI=LIj1l-TMR+cWYv-$Z1FY}?p?ku1`4!1I!=lx zp5oknW1(*yf#b) zJ@V-MZPJs6r?U$@4^Vkf%+Ukyy}X>9S=3E?NG(m=1T*m)r{FgZauYDWVuX8OTb*?B3$%ifE)=@gq&&g{US;p@%$YVZVEX)Jk!|b}N4&T4`o5&*Cu> zJUt*%=(Oa6u|Lrta!gE2M3OB4uIJ?;c?L#J-cbR(XvkvMK=orb3+u-a40Ux?H5C@B ze42|3T4WBsC$2W@LdH*yj*i~5=Zj+*SAIGH+EHcbJG;Oa|96x{o@CqTQV|{=9@f!~ zj9y!1%8z}0U;i}$2~)J8D2&m$q5_Rj(AC-5)l!_EUHLTWCP|tXURV~;-QA5vb0NwU z2%FYE4L57BYTiN+1^8=SoM=tcs;FL}i!x}DO7+M?kT#>o7^RBeK)~Gk1KrH1+eDe{ zSa+(CF9JKP`n-TFPytECXb8p$3}TRlf*Xj~-tmy+4Z!8#<3%!v3pF_TWicG}DmoB{ za@-aLH6xA|Va7ma@ciIBjKmmS6>R5jh{QacA=UGd8Xm%hZcZuV5yp0v1}faw6db0g zMsd+kGJ(m^{iJ$wN|EA2(6y0Mq(X1>)@Gjl5{#9busz4=lj0~H)Ap+KJMZi}`1Y2h zq$CWGswRcHgfIl(5N>Mf54VrR^l9y@>8W9wgdVdBIIiPNV{nU0{B%Xd6~ zzE6j?Kv}W!8W=mrwqe|#{TVAzPK(jCoCY@{n-tun@IqPnS($|mU=or!VMC?@B6jX> z?B;fn180hcag&BQQSMSsvnv7#u5J;x9BiP2?XuSytvwp8Jt|nnoYC6Oj05ryj!zjJ zj!J^ilopm8mti;~Ge&glt;}w8b{-fIgkFmxBI3gjZR~xKp)mN)q+y9j2B}shp-QSS-SZ|PlfRQ7EzWX&(8e~~}#<#PL1~vr*{M9M3 zN%mG#;!bX?ST~N>^V!f0u%FC+@F(_z>+A|8)$9`@0q>%1IixUtrXsNQ4}Sra`u z(bENb@*Z5}X?6bX5vjC`#77iP?~>^$@;@E{HcOGU&{G&ay+u!;ZI;^P_pc${)fw9F zE_%|@lVb80Ab3jK!?sssJCJ>mXzid4vE=)08`WE1 z+xhAK6ZtOV;R_fdB9%t5Z#xX}59rMYc=OkNBERoUoq>0l;fOq`Gf1b|w=a$vXp`T$ zdIkLbcVeJp146r!HQBv^+%{=@nDfGCSxBqd#omywUmabvkHv7~(HMGhHXI7C54r@m z5B_b(^3$`P_Te4~Gw{wnC`=Rf08~~=N~&sU0V5rV*BTq@Q4GqQty3^04=U|X3OTe_ z11D#w!t^_>h`H?vZZIz&&*0#oApgk7$N+yoRDkl(P5kX?j5v+Gmo(_58}-zQFIB@h zCJW$LiKu4-Mp>#OxeVxCs1+ki&!!q>zO5`5?_@?j} zcM`(x8K-T#KG^cc9)wi(ys>51$Ji zru;G&nRQ_!eGGz9#d>-TeD(FBtrD#Yw`NTYE0+0qjafbyG1@&W6Qt6srEMg55B9L_ zvZX_uoR`3`zRFG^$74S}`v)Suc2DwKc)thvAK!s4_!^Bkejf%9J18oK?cP1idSXuk ztiJ~&YV11%Q;szpP&~UgdbeWp;z?b-Sm1=7E~`RefLQ0Zj;1Vtrb;BIB7>(5FmEu_ z68(53u;h)XWF%R|4H#JsNRUTMUl4tHiv_uY{nWrceK0hr$(3*xW1tz4K}s1uP!6_^ zri_6=q_6rQxhE9hN0El*q?b)ScW$aN7GA&4zy;EG zidZxh0Z)G(#N+rbLHix$QerO++ZJ-xKuB?|WYr$)_MRP!NY%2 zG~Q?$Kj@3UG>vS}p7G=DRTyP5n-fi&msM{6*mdyYk@U*a7~7-r>S{3UZx>&uW^!dm zey5o*Iw2W}gKn2>$dL9Uv5+6x^30WqV`qY&$iV$&MFLV>lCbHSULV7*iLX<0&6MT+ zTXXr*&=X*ZTRLPYSBpS#TX$!_#%u;>T(9`LLtACvO-<$(8L87vnKE{4@X(msCr*u- zI%USx$YIe=PP1pTsnXt=H#M8II^K;|yBn?cG+OO$$e_C+gN8ystg0x;zjieXj-Qf} z+=BdKNFAvXwK=O6m8J{fp+l?-qf4(ABtHlI1A?gJZ6t(jJjz2T;bN2_*$7Z7xgavZ zI253Jf1$gf^vdberx!0?jEtw0_uqei`t$<_Mm%)nyQ{j05wCs2^8jwt;;hy_P!?2I zH}_dZrDfa-JjPo@S#2%lFt2|0*#|`K*%w7%ofH7KpKl(WkN_v^J`}gYrU2$dGhrq` z?Vt~WQCv|Wi63QM^0FZl#@+54gI!*pH*e}x_($~W1INor9F!jisQvQ(?Rf0JERqa- zT+eojvVfgC{Y17CRUeVVu--^blaKl|Ev-oH0MHG|b!89~QjKYIeGY0!SJcB>U0KO4 z;oKIuduwx5WdZU6OC{G|6^IOB{nFY~R77+=b@k@Xs;Zo7;?V$^%T$KR)#uX}T%r2v4O2v>r5?**=(ITuOZ~wtaFbUfMYKEcr2jd|c@!ZVp5~w4vU7=r< z!A8eBA~uCP9cjGa{LHVsR#MVbOwlJWW1jhgabW^F*V+S`?SCH}(eR@x9V`P)t>|zx62fmtZP_yf2SyzYqI44|*90*J*RAds^ha<#81?eO8lCzFp?RvzyG zH-dlP)ygwRKltFo11TvjCyXNI-$oDpyXi$%phCNA&eR1>pD{yuuJXXKOR-3fyGOpQ zJ}e9p$>62+4f3PJz0}kTJ!6-@^wLYcy*;f~C#^B4;qsMJ`~JbKEg45Y0tR*KSA|`k z@Gxm>G3nghQPz%mCTil>qDIA&_e>58o7k3;l2W5d3>$_qMVK?_BEC<|pL+LWk3F^^ z=Fa)LQgnZvFv_zBGfF!mj70mY#5jq^BP2pU63DjUiaY0K>09otG+5 z#N&M>JST8cAvFc1H0#PsQuWS&WZ7ggOE2jn3?6QT1|f-+yoIa>5v?Brra2I1xI25e zdHVRkbpnhE9JsFTNJv3}OMk!+eYdYVX2QYoFWqTLevL%V8riey&> zBO(a9stEJ(G8t4wET3LVSm6x|Q;ji7w(-q2s-82$T-iu_5_Jqt;E5CLr@wbYg5#wE z*;%*R$G05MH?Ta~tyela#`ct+t)ajDp(02x)q_W?gPFuyNj_KeFCyXN-$_IVDpcwDFlvH>sO!fwEeBWzN<=wOxF z?_G}H{u?hkT-a#k@9-3Zk_~t(8QOTVUr&K!h-Z`T7$Bt^cg$7(2+tZ9`W*+S?Km9C zon%~d6W7hSf+nuv)_c8yX0%s^aAEayTO;AVMN2P3OP?n1c@=$V{>{DK>IsnUHJ9#k z6g^2}1EWM5O*A&t|JJkAxZEYlYr>_WkME{SJ_n=mBf6Rc6b!tSAPfjKimEwL>(V)> zmcrI!oh-DFU8QtODCKx|I_j6BhvA$tc;E@gGvrB-LR(cqC}iz@OVPN zQp81rz8j49KsP=Ep}$EaMh|_A?P+Nc+$L_(^Y>XW@V4Qd%u0qCUcDXI0&$D|x6H@6 z?!&pB#JTRksGrX`76FRbVg&0-GICQd=j9d_);1KCLEXtJEvl)=2QKUC<=jlH9*SPM z9F<@MQQe1#roX?Vi>^=PWEN4a=<4pRsX;(fVNy0gHmmFFdtz*NIbJ&YOG_*`-dbdxW5(zbIW_FvFW9F5rhXugnK^CZ*lWZoMx0@0 zW$v^YbC<7t`o(y-j^6=gR6E9amZ+%m_pf3Lz7*A)HZF@@xG?VCjhjU7$9W>ZW!n}4 zfGx-N3~$)7#U#2mHFfv(^$0d64b_=$s_*OML?^zhzomm}uPD?5G8h>OWq&`E`98Ej zOACk-JDOV*@DR(yh)G3Ykpunzp_{*+rHKF`2%IUL?ze*dK@a z^EFuX%%!O6Lnv~sQqJ{O5NbKMyq8z19L5BA_=h+v<#JXjDtmeW5V5p(8~iQZZSC!S zv=sKXSyT+QWV-C<$<&pgD5siU&sz z9X={z#XVQAvQE(l#E#6`)6xcQzA~pAKK7W68#gWw=}AKwJ}H@8^DT3< zUCqh84p@`fA`ebncHd$$vrQ)lT*kf@U%deu&#GE2*5ptJ3!62|G;Gh$ zxlqE)B`809`qYWzKOQ@A>U<#~lxvF4pEz>l(8vEe_|M(n9RrOnh$K}i&N3i4s0{wR z0_Zfs0@Y06B=mqI#aWGkp>cNx+gQITs8!}js4c#iYOmL~lon?f)G`qZV+TF(-oG5H3Pv0u z_*Y4OLr2i_zyp!6<-MyF`(%%_WzyXuw|YGw=d^L_SO42z{N0mW406TLyYC(%H+h5l3J;iAc2B<) zij-6UFCaGS>Z&T5n(FIozzEXO-p8@)`R41Q7K6-=P=tnOltNiz6u_tOB|w%zn@3a{ zSQ}8}Aw~+%V^C;lh_~M`!QI;l`j-J=I7CMY{-==t`UX)xlRW@CD24AqztURz%Agcp zN>UgI=OXf~T?f5&RaIF*W)}D`5Sm8DNH+R8=Xyp}HY#CWxdiX50RNjgnW8uWJq=Yw zBn!`;Jay_aN?~SXgKHTzrFtQCG=j+&wapI^1M&B%@GTW&ojdf|c0-bbrsmWSA15Fc zY>BizrP=jEmZQVgq^)qSiL!H3r=Cj+kSPR|(gYqzqtlIBfofWde5^G;00Md$7^FUv zEG8xz<>}C&>I)ZMeQeIOY10znHxL2zF9Fk3kq{U$bp*_p>$ZKXc0T5w%>0Xm*hlxZtHsOUDlupkN$U z0fZ^J@oEaQ3H@DC)!5Z<>uhdps79^Jii(=L>Y~=3fu7z@sMaz>U^!#33ItzP4wY}Z zdb=sefkzK7FgYY9^_Nm*9A>e&xsmr6wNQ~s?cwF3Q0d$`nWH!HNr8wh&|j$*fIoJF zj%7b9h=5UjfN=&-wP#?UqKzuEb@N)POwk5?sK;XW7!iX_EP8wfdi(~1jel)@MM)`^ z4>&ihDTHImb!lZN`}ZMC!aU@-+6xQ*QX~= zoG9z=9vDC!_wMexqeqX9ntgiYZnB$VIA@S--ZS}UWaOieDxfuJ`OEaEsb~F0aN<*%xl1=;3!L4ZjI7_} zuU2)rQDNi(tXWz|9h%r5hOwkIFCOA%vxCE)ELLw4^jLgfP>E^;niN>9IZ_UxrgR0M+eeH&75y&hAJT6YV) zTU=BWp7p=q&QIIhnaz%eu6djbgWS1rL2~tTGiKZgk$1>03_)r@@svYA5`70h>o?M0 z``9VkxRo>G;D0^Mg<_&mwClqyZpk)VGP|Wu3Zy;>zj!e{E7NRlXlQM%twp+59>z;< z`XvmT3s@r3uB7MZ5Vn)`Z*g-3ML4^qgIuK!3RfvR33l!8KQM6lGMrKt2L~mnG;|IQ zzUU~WJj_F9@2D+qeU6TO?3UhA|Jqx3p|_qyZ#|FRx(mHU{%?aTKVX5*N~O>F&0Mc%drYKxs~IU?A!l^oV+;^%_miYgSQhtpJ#^v$MS1D(X5rS!Z%`b7yCB za&qUD`Nn0{;!EcpkH?ubX8qf}UoLuC%&9G>Q!{D2V=-rSycwkQ_N z6$^zud$w;!cdo>>Dzf2@O3AJe)fK`p$gpWc`3g~1TFRnCSx8irAFPQNMD~J_hUDZ2 zkO#Gc$oAL|3alGGi`F7JT93ZVqm9#OVv!hii5qW=VR^#FM;~9Rj0Jf06D}r3@|L3; z&*74h-Viy?fKsa?>%aX3k0b4oq73wTpNKM-Pd;%GxlhiE?7Y$7>L3#a06v7K2a7@m zSh89z;)$^Y|6{4)G11=7(No5;De{rl$RD*Di9+uuVQHGiaGT7EyGuMl!@v(K@J+GcDcMx zl(n}5^dRePZwIKQy;5YAb{Y2sdfk9t--%vdgI-^QULTJI>B8|Jj-5|CfUv7nwHJKh zsd$hIQ&NIbff*RN?X_vB_4xf>@R97_4=oD6qNWBt-P%-t>mcyPH15RMV80y<0%-Yu z-a-(!m-$2A?TmxMj}2H&ffavu;;ne#^&g{6O$rY%J2}0*vB-;EuXGDCzWruKYH2Jq zox6muzIyXb42{P`y{0Qa`8bLho=tAOWptQSpyHXpemg{j88c?gTzngrz+!F~axgw- zrL+l97!qEdjSpVKY_8zQ_bZd60VzXyd27~8m=Fu-a~HQ__-%-3eF=}~PMl;MQlc+% zPd*uos4cP6x8s&N^M3sF{deARaT+I$p6>=nk4g23v-1b2Df90`Xgo|0pCMz0cq3ia z*B96>rQAgq5CCT{dDClay}TS8JUpB_teEz?@R4ra-NS~VR+V1U(%RnL+uv{FD{mQ6 zCRINTv)_)Z@c{UBJNoOoDjhK?OA+`et*k(W>4pZ7Gp?HE5&c{D9F5&EIrO&P0vm{efl&k-PVrYe(QGC@t=+#KlAg?XO0_Z6|^`( zA9QTa_n8G%X%?a{-ZR&)Uq92^(Gj{Krkm2sTdnr-R$)GI&OZ4m(7MkSl$87=U!0x& z3~7UTt~j4}An9`HhnmDikn+*4!Hq3#_HVS*CV$r%Y-zX?0aYTffn{OMd@SGrKg-rn96vsWU6DOu-L*i}I!DY{vU26hv0p@I ze~FdR;HpvgmgHCB(%>g7fqjy#yg!D$uJne2Tvg@AX0_I!z01mL+cHUI%g#(o%gBNf zk&|)xaz=3pz01rx3}7~NDIM(Fn#nG z0BF>+yQQmNgB3C;C`b(yNH^^Ou79HnupwIKx_a#rQjSskTW|BX_H)Yd%r4tg64k?5VB6dXtQ7oY1qln$iPC{3zh#e3S6%YX%NbfCxKziBq5C~}` zr0#ydGqa%x`uM#6Zw4~6vpX~Qo_p@Oryo~RGAB#UlhWOtkvIaEuVXp+r@Kp$7Zg7W zlmuX85=dt8^x2Pj(m@52_|3cTlzr=q%Sh>WaV8`#SX8x5G+- zEDLDndbIaBIysK^uBL|eCOtE$nvr3~8iLN>R9nBhE~h(NL(gx= zT{vV*nh1qEE1^&IH}n#Bhm}(9>-Qaf?b6ZLaJjGC9eur3vrnCr@UmROOJK2tl^!x) zwEP#W443sW36ef0j-^roS2TH(tdY5Ln?{D9OmMns&Ctqs*U;S-`OYT!PVF6c-rnYR z85`VLI=!xm^`^rW@J={5v0*sS=ydqO-V%4gl9lZ(xEWe@_iMNt$EE_@j^hUrE~T9X&__?HdEgKLp5= z1UCv#3MnapZv_0n&u5%Eb0#aRG%FYU;`zKRIGJ%C<@r2v%FAhWgsBD~lvU$&zAHJ=wtj-^}VdKVchn_F1ZTj(- zou?6mnatJ+>ge?JXm%n-(0sNFoNno^&jfBjhM<`}5pmxk6KAJzRaNoqEB2K#{m9Yt zacqNj%7YWfb&BW_QJB~vnQ3z(x^?P2?!Hm?$FX(jwM9@<^aFcjqo5fx!cVRC9Wh4W zetJgG{QO6)PFMTK&nY35m7O~~X|+z~7QKhBvzuGE6JlIFbkY6>-c6^hHlqqF>?6%8 zI7DkA^yDa_k3b5DWX&yY`|w(Hr(Rpa$P1OVbG$3-aoE1Y?na zv53c51Z8Fx7M@1r3}Trto;j0t`gD5w`7@_alDiV31mW$hY>tR%7Q9H|bk3o0Lfs)K zoN}$@b$mJ6oZ= zHw5{fiHsWz`0LOdDP+yjV)2INsDOn(6Hl5WF{r30E32vq%5^m0Aw6SX$f*Hyta z5&3~@s<4U#7>kQhQHm5q?G6Qzq#|m<@}M>v#UPVOqcM9Sy|l^8*Ino2?!~K}-Mx^Q zpF9%1KqZ_E0RaYC8KkfnNg1T6CuLBhLXRR1^7?N8+zo)c8E`iM?gqxGRp8RFsjPfD zpMD7DyWl86)%*1N)9Z7R6>~14=GUg7RCsDA{=2}UF_pQ(dZ)6sRkl^uCo5WOE(t1A zY-DWYV=&vg)C=-z))6;EetR8pL*AJv|C5thaN)v5626es`fOG~#@X|vvf)F_YFn!S z7Zi*3dK(-{T9}4QQRkexm|s-pp8yT;9Q$lFc^ZAYem&LDJ*!DiMJzOEz;lA?rSoZr z4;~db53NOeWp@ixZfC zC%U7`qQD)3QpcJd){==BA<{Fw2)Iqe2vH9BsQQ{Km&(ctib{YxPF~<6J)P<=Bk2JW z*;QPwu2)h@MS}u*w}`ejyHbZrpSJ7B-mEsT`d09fTJAawcP$Kp1=Nww+BPu?7D=UV z@Uo>Vzs=x05~fa_8t=iSf4gGovS43Dbv4&giWyMaf;lZ{yt@P=nkxepbtP9|{<90L zGnjJ%p+)m}7q|~up>OA#o0wLUq6)mS=jX%c5@5Xgh9&d&M|#w5NRSIQs5aEOg-v)r z9$)lR+`%o?tzM4di+AtO7xFh4iv;^|+$Lt@$Qe>r^-c{-5&G4tZN)4R9*^wUq< z_rR(hq1E#C>kSPpN@q-J?WJ z#Z*{ug?BN#8Jv;M1F1UeFT=6!QVk5`g72k_B9%(5gD%UYXl5=v9$4Q5<7ngcRtvAg z^BQGZ*TEA~)WEIY8l$Q?1R&~auxfGp!nmW1JFtwq#yHg#u0ozee|KkJ=}s5v&UMnA z1=5{^Zrpiy-c{oBQTGp|3kCfIFdO(2(!smZJqlpn?a>W?2#6=Z4H74pOE+}HQ}n{` zDf*H8mB}*~q960RfI!1CMD~{!WOLf<}BpGw&;Qq7JQR&`Ethk1#`74 z&ga!?A=f=YS)Dd%d-v!P**QGO=ht}XE4Gk>54=0c?juFvTG$cj<&ZmXiSp&7B zsUruLt^Q#zmb|pAvfB2PsAIbi=5ZcN=Zhb1K9<&sh`-jfW1By;%wK9&8ie3*X|n~6V|&Aih}m( z9o8!*M2U)oUE}(Nb?*ag!+|uf@+;ST2RvQ=7GtiI{+vo{xfM;X+#*G`(s}S*v%)(q% zbA9IaH4)uoqjXi}JHgKEEU(f=#dMEYyFIh62?O10{P#-yO#Z^#Spz4hXE zTMn1#Lr{9-O9#*MrD0NtQ{n#a*FQh|t>*?~_}$=?C`H3#sYaPaXoEQ;C&ARHg9JEmcL_|1SxK>#ex+zY`B|B8Y#p=n;F3)Bl-{#JU-0^H~ z;8t+!6!&x%5|qluRLFAV?Hw*CtLh4vOz|S|=ANK3QeUP+2%xsb?j{g7&}xR+688;6RBOZ#UW}a($?%ux=w6HjW+J+84!!-%W<)ll?*` zT++T2MO<2%z~aOtt~bmOo=hq%3=_l%v~mNktYT}RId1I{upmolIt;aff_=dI8QL%9u2cPb*OVUriN9Ay`wQ4<0{-Dvc%;Ts!=P(B=` zJo{J@wW4CpCvY@CWWN1%URb-&+G=s(Z1rU&C%0|=@ypfUE)yU)Pqj~SUa`Lwu{CC{ z__t4?>9EgHEIzt!7}#n!$YXzByY1kaCghgPg|zazpugU9^-?VmudS_YT|AO<&f(S< zS~}aD0-e2}T~0{KK8?v<82QMYIj=t&nZNzdrdhM@@71+w%iOth*PN*hoh7o@hanPu ziR|RUzcXXH4DM^5T+$0q1+g0fNh{G7#~shw2jNWVbp zb5sNk;5g)nkdngT8-=~82{~k?RPR{q+M2{6$V&x7mZyiDyVHoIkf@O-dFdc!7&bF9 zs<^vTUVQx0+uZ}<^>EFXT;z-<7WsdFvpj2O0+%V$bUtu-CUAKsaJdh>U{SwPs<{fe zD4U8SBG6REY(ixT#VMyVJhn1Xbp*K@^5CiN+{P1@VngJk!h*B_fQf}tnxeoSA#AHn zp`uV!B~Q5xX%qd{e!2a8`K6kASfcB#M|OUlif!fQJ?YI5Ivo)ax;1+%J9UbTxVjJd zisVAQ6SbDZh1QH?KSA;C{9BK{LlYAd2lng|;Gwbb0SVhKG*}E(C$>HE$hLI|PX1xr zfcp|;Vbf`-xsxgrw(2Lw`9d!{lVO?ekr!_$Kk8w zVNttAbXvHu6KXO@5zESFI%UJ6OISF`N!Y#asQZW!BNF0DqK^3D}OJ};x9 ziU>EAagmTuLP}(nqzbAPs4IfxAH{HPhCilcC2vKdvv!+>w}Fsx7D~U{(xRX;3o0ar zgkajz-UgpoX%qW+d3&J3Cgr9>5<;^lRbxZ|s*HVvZc++aN(4;=9yLeYOh$4P`oFwO z#;{j`VV?lQeguZS3JiM{7}h^64bgI7U9nw&4pUA&l{k$L8HI+BHFBfs#Uf5N1h zL3PXMEIor9zjAZn5txkS-eQD-5UFN?Q*9`8(}pU*yeTxoOC|8g#)RN*qpht?sdOM6 z!kz76JhEOj32e&<)I98lOn7qk;nDV2_<0*opFS;@0iJ1pUAtiQ=imRfd;d=BNQ6KS zmTG$UL&m)Okcj#vrLbrblA&~w_6hb`n$*ol4j+ct6=!Qi!(MyMV(i*4*yGA(2tG&3 zs;UCr)fV2|IU>wm;9;fFsx3yfdmxI1AHfecU-1a+(^-t?mSb0|N5h-n%ymKJbBMHS z;El1Htx}*$t6TxNiPfS^1(ZQwx~l! zKXm-~jdH)LnK9}%tx4=(SBDr-xG7+}LTFlA3MRYqQblcjWoaRDg;it5R3Yk~%Bj&= z%bng9Z+Ex%aNfRs`(=>CmX`X9XTBc}2i9J=OnZ0xM1IwA zKytKw6!y_}S6eeJELdQeU1N_z_IjHjvVh?j`t?{vX%|@viWgz}hd)jgW-jm_DikG1 zmHd>w5BGX$ynUGc84V)b=BA*8f#~ly>Fq)Af@ux#ba7I<^cy$({prXa7VoAG!%8f# zyPvXoidSBHPs+Zkc$ETtBA))22gnK1tfr$l_h`2l{I~a6S`c*#jAzB!vzXgY9XXlT!HyVBs{s%7&& z_MQ4jn{`}=i+CDj1;My0PtM%lsh~c1Go7k0ee`U*s z$g0;=tCCTCawV3QFoQmFOLL1w-B!zEj)0-Sq4 zjdB`K_jV4d{&9(@ot&H7?q}9fvXI%cSqW-m{uI&UCWlYi&NniryG``;>J@*ueZ3}H{D>{DQ@X4H7Lr~%RLYJP= zQQg9u(=&KJ?|d77JvS{azrmu`2X*sv)AI3PO%|wT^!MvlyUyVvr`p)m&2PXAq%RYP zLWHnlGU6|+BBf;*D~uSZO(|HmB8is+u3=4;Hd;7=j)aI%Fe2S+DFfyS?nvN^lEuV$ zx7m5*N=1GjJIc_)BNhy|%`Rg0!qr4@#fl1l6gw?Da4JHWiCP@$K^c8rk-!%?iIlzW zMrQ*M6}B;QvnGcmBk&Uot+df<3KXU*o>*OxCzb(Q|DPHlVRASyd3Z2tO9m@hI50UJ zP5~xLN(TQJ*=vSm36q4sNY;;Zv6)$U*%$ZFcZ@^G4T3;Hl#~d!HB}}jSJu{oh*#Iw z(fQFNxS)-9?C-0OFJF!n39ALCMT>ysMH$yrA-)#F>#xsweWdEz=F~+;2M<0fG9&7@ zO&r-1bUCo+2s$5w*|kH;Ku49WHX&0s60xIfy)K!}6r5gsk@)GVMkrD(jzM->pIP1* z`p}C($BzezCTS?GMWx92T2WqHcKHApZcso32SKL}3enCni~^vI?Dtd~-pk$7FZ9me z$Q;StZOK=qhz7-YwwA2NE0InB2b~lkitq}uiQnqsaJvOPn_3VSS`l_M#8L{V(1IZ7>MqrlD*-9lmDo(?Kb@r{Hj_SI3=^ zBiZu3X8GQ7`QCfvdn5kKz12VfylI5&F)&Z|7%)NV#zmuK!T$V!Hr zoaFwtA>+wE!I_uueL%jqK)$z4zSn%$dv%x%a(rsL>?`n!><+RH$r)3VxeQ3VuTle$ zW!drOFwCV$tO{MRDo`f4NURF3*PDQNpdpuQt~NF`L0!6A5RgJzYr?_uKPEalin-y1>91B-U-b-q^d}08ty`39}<7*fc@lR0=_B3PvU19^Dnn6xZzYn5I}}@(W8Y{(1adI?@dq-Q0Ap6%|nK6VLDM z;U>Crl+6w9weHLW7u$9X3}MP{-TEX7x?w}$34c%DE}gqM^cE`gOog72--rr5Q=w-9 zdXgfbt@tyI{v4z~VS;RTB3L{Cx)Kk%;?U{1xk6E)L=IAr3{V#iaUlv*g<7#8GGgF=8lN<+b9Y53lMT9 zA!H`pMsVoCCeh&p@=)M#4~V5aKPW1eveHphY)3>~GU>`@Cdb)B$!{fds}u*T=rIY4 z?m}EqoFMQ2$#izlOk7(hIJ{b@C3kpzNiCUyw{e-ci-;i{&*C^0HRA+ENNP<7h3V-1 z?RGyYs~I`FQQm4`m6VI^Mpmkm0Zg??qKjUT6TgfK}Z^as>h-VpiZ?d9Ver+|w zYh_B_5gU9)${ax>P&fDns}prv!wXG5s;H z{}Y7a_3$X*(My6b9s_95xw82n`{1t-?!XfTAbgVoNoo9A1TqpsNW`CsLW&Hxj^E!- zu_QeDpK|(1DFdZD6!d)Qg*#k<%7mG8mQQ~I9e3QFe`uF-2RC`+x2a0! zN-28)JVN&oKsnqi&s)7?uIT{*TAPsGhP>#0*^a@O3C!VxPgG*A1Do<+8VeN4rEC0& zH0c`8b?OYSp?MO;p%9C$JQQlH|XHMz+%x z{sfj19Jx-cf5(g>*ix2>+v&nS0Sa|Q@Tb6A@+kr=E(k367U)vH4Coip1@hLT>Sg=z zP=wV_;gZn?e|&I`=kDP=(3cIuTtR{~-v0d$6!kA-4`mr8zdwS|4I0UPvML^R%p6h|< zx}b%W13DScO{C}ARdxl4#V%oCvXY)n*$F5XqdzWC2%z)=ZUzS$!e!x_nqD$*5q@zW zeWDjleF`vLARxVA$4GREY zSV%xcMR&U|zw_sBmWm^gocKZeILH2weI_p~|CZ>7M)0q!JOr)fk~5LHk?m8o&0=xfJVDW^(=dl}QuRP+Wd7MT4x!NSTV`f%4PI)i&W-foMP?Gi z5HJaqI0^RT+y)6zRy0AN#8y^{R7mhUcZUT{hKJ3eH726KNA4cyHjNTlK-i<1Hc^A_qI|;NDR@6YugGtZ|RfVKA z%7oDn<&@!sMoHe4*PB{xq#(k&Y;lKdi=ufLNF~zfNj5(e7eZ*NcR<$Z5_|{lzD}2VXh84So|(%e#K@{`Bl}<=nYZQOM7$+-_2~TU0hfWcD?s#oYzTChtW- zohSXXBQsZFSd*9jE->&fs7aECk`)`AUm6&yBC$mk+-KZd6odOQoXVWLC%#Dr5Zc!w z%lD-5z05)JV3Lv*Sy`C;9|-YmFI&&O+S#H~nqWQD85Mor*74N_Gu`4=OMg!{}y(=BBu}B>l;2q$H;ETWhLc(J5y(RDJQRr!h@2yuh z&=dk5v~77vgATo?WY5OxVMd%5sp_F8b7(#xKOuT4&K{@S^6d&7-&Zd9^pjL#rKhU? zEGRy^aX}*TD#TG4$mBS?n_%wTxnKOy{{8#MB@XD^xx42zP_=q=MICCPQ;?Vk!?SK&mD)80?V5siQSPHDXx9|9s}~g7NPwQ7S6G;rhk^{y zaFe#)@woufd_e(Z`#d zt3-W8wL5&D%gT(dRrpj^<%&;^@09eN7&$H`W?VP(#OKZC88gh?o}V}=F>%u1h$(ZT zqTYNnV(^?PGw!=@#+cr(z*&Gp>Q*d03%TB7=+i5pR-Xu(^z=@hE)@p^iTdz>;!El2 zk&%~*gXmKbeR6!Kr0>LD&?X}>eNk~iL2)r^LO{JtW+2DsLTC^Q3!zUaF0o=*h}ns0B28L4AozaBl8Kqq{pQNI-k+3dOSHvn$jOuB41`hx@*V z2hlQUY5tez%M{EPSP%g$hy)h+0tg11)WBv(rEBcqY~rUE@U6;F((NM-V3mce~s|q-oVzU1RaIF%bTb# zFSVjZ7xD~U0lNdELdPcRz04*Qo+&%lP6m2f9rh8&V<$a`*}Y27nib7uB6}&0E!Wf>EA@EzeKti<>4TLS)4yg}` zy0Quwd!SYZ89)k370=l;yr}c^Hn|v~NXAsZc~ypAC%~^eS`&lTbOQW30e4jpWcR7r(_)gf75bVo5Xm`8z5v^1JMy?uP3 z{Jisb67(tRi*&~%HFRicutL2iFrz3yI%SEX5C^L~JA{vQDAj|*R)Lkvu_9{ROLVR0K6Wj$aXc-3vZ_E@}aYcS7t1)4y^{x}~6;hj9s?{+=h9tIm}h3&j;vuz9Z z^EkKL!@)v4CG_o`mKM`HCN0gS0o6tuT*9FL4DT1B(`Z8ajYpc@@uzlt{`u!SP8C6O zT;%voIQ>RsBM?2%!iAzxawv}DaeVLn7vZy;&8~6Lv?HuP8!lV#Bj3$s{4SAuiu**` zpTc=4%Gv$RbtFQ%pIgLj;!Xi-*tk;%k%LQ};DyD$4NxN~03%0sFIe=z0EGK`di!~M`udrS9!QMj zZUjB@3kX1P6b?S#zJ9@>fqpUn?pe~zAxaSnZ0rMU?64!4Htg85VdtJhhYs!8`RmrL zTYm*6PZqfGh(s?g&dW+i{)qIfykb!Yr3X+86m~!x7RE$GlU)R=` znX_I-WI|@_F}66iiSKJN^_`Ol_oOc&eSEIi9UP4Gr>ZY=IWG50oF%(nDJW`bZh&pC zp&6bRd^;i>jVDfMwPj^hwGB|DH`G>@iLQIsor3cse9Y4`v%p|vAtj{I$tgG(8UC&} zL*8hb4?azF{Qs5H1y0y799)+k8| zd**We`QJ7lKm~$l?9&vVgA}4H&-GT%Ok~VMop<~5s^yuU4RtC}H@L6fntiz@cJRbR z1ko*p^zoH)_6SfyBkI)UWik&ShZoDtt85TmOG_a1VI?jtECmfLr~2APX?lVLf^mV! zfaTT|6||}9y}Q}f3<~H)MYHexl{Av7yHPQ@42ex5Vp2?tq*M&;kl{&Ecod`mFh<`Y zDVU55wi9XCqywigT4hjmKx$vI3!x*yNd^;RT-wL+2wZ#GYwo^{Kc-zNJYov#t~zS( z66$iWp#12jwZA05PySK+!-}OK!UFGZs_FKsBfYuv%iU_64Dcbo8$cO10Y3E$*)N#b z8`w8)Qzs0wwNzC3jv6<@ucoq?b&GrE(90U79ZhsnxDQ;VmUFN|m&P z2bk1%z}{$;Vc!9I^oZyp0bYhXGn!<$cYrIcdE}@6@phSU~Pm5qC*WU|0tf zO*W}j$yf=buPjiZ$XZrEK>r>6 zmW<3<KXol-4iXv82h>XG8=1aEF{K|hGA2GCdR7`l-m8LGSeTI&@ckH-{Xy{xt`lHdX zbRn4!_F33pk@i{kms#O_6ytC%g-Zg26OkU{E&9Fz#djL5@OMAbg9=D;$dhlo!|;m&^%4~@Zemw5+v z{J8IYb;TLq9(|oL6$ReSrK;=WRK(#EN|(>i>|Eu9tXqSJ&rSqlti>!`rk^!(aIdP% z*?D{n=AvM5^7LycOGoCWylOuWZHikdB=}1tz?Y(m7l=YJ=AvK#!6K}ng_MJx(9I07 z+=rxcvllpA@V=Ofcl_#RKPd-U(ybKdwAFh0yfynYQhMcNThE_H48Ku@`fwJ?= z*QDdGw~KMMAGn=6$J%~?Th31VbGGkn?Kpb@L-Cw_C(ggaIyy~ICMKRg58JHrV(Os9 zi7b_++84w9X;A7#F;1c3UVnWo{(b<}+*KTxam&~hh^1HAYupD44c4`{1?5*?jTkZ7 zK9D7{U+v$sP4@ow{`fl(=f8qka;>*o>#M9~^>6|#EwfhDBME}BwN|K5(LqvsN5G-;+fU8J=<*=a9-NbE?WvXMINo*B2A2){!DaiH&Uj7B*&^nl{oA3R<-AOhO zs^CaA5Qm)&oHua@d;xG|Z!(C5z!y{$R2)~lrF%u-W5Z*^A5G?}^#WfAG~SOox*QxT zLA^tyQ;A|s!c*VCQ=h?8CjkzV0EZrV1z3%8ka8ZfEj;$34wcG%(Un@hvTpku5lhz@fFZ z&U*HIQSfxTeYDe=y?eK;TD5A^-u>5y+wHIQtb(t5eXH0bN%P_ZUQ@##Ft+sXV?r2F zBO>>wjT|!~K0dyG|9+^n)IG-aiHT3lMB<)D2gCXu2hT038Us!u5azGfYu&=5`i`F9 z=f~VS_3nEQ_^;@`G2U%{eh($}3=enH>ct+#$b5uML?u{8A>=}`foxMMIq#9NT-xvp z$*-!km>ot<>lCK8gi6aB7!o)Zfy79>HB>?hC1@{l`OwPGi9OtD?KLB_JcL|0^BJj7 zsO-x}R(5H_FC>>Kc=JX4FVB`Rf$VcVgXI_)hs>hlRXi2&gPUglo)l*OZOM*`{a^V8 z*wV_qS`HSn`SUjwAsaY9zV1S|(SpXK+l40&$ipF$!Pk4-vpD=c4UnJJ)F?iHH z1IJ8!Y{F#Os{Sz$vZ&}l3XsvwEi~ok%(%PNYY>c6Dd2Oa+&Z14Hh0*bsj_m1O-YrL zY0U>xl7ylVRo^W%H9@DK4EFU67TL2%R-tU^2J5x*Y z(8-^qHxNHM3Q(an(IL6IyU~EqMv@FR43)yIqPz&gW$&SMSTeSFsI=Zy&%J83e)Sd5e3sxxv82bY zYx+-l=9y=zs;)ZuXs(xC$6yaRabk$bd;){(_iX%O)0VRe|G+^1P6-b@@W6wKU4jGC z(&8!E8M%o0!MkI0fWG0_+K)f}7@okMe1pEF0=SqN9i1t}Wb@Rp1CpYP?z~HAW*@Kmq8RG0os-`A`p$T@&BxVrxJv)0mJd^W$ za`+JFy>+#TF)_8ZF)@j?q8_9P6&y&bgdx`1F%DAH50q>W(@~Xy^g&Sg-1?0)s?^3{ zwDC554r%M4uftzxLXx#M_$qQr5He928tUW}XMarL6dEcWAOPqB|Ezwg)!5alTP}_Q zZ@rwdj3f7HjdYG9$iGF!b!(-i;+DzWJ)6NdC(Ztcx)ZnpN0@rR)) z7iPHpeA0zNS0;}?$eMQa#pX>Xujuxm;`__V^7yme9peu-e2l+G=bkek_T+{Pe^MI% zLV5fPG5#>@mB5Gv<+L>Zi1?!MCnYPYn|i=71tqjJ{x`po#{X6i7)M3+jvd`qEDEd}0MVa-80)0h+zeqaD&-w=#=HzEtf9LsD zg5fuAKR9paBlqKx{Ntu~Yo6!7pN(horA6h|qkNl+U}$#pGDXSf^0UbYhvXRIuR5&v zum-&*aJ{@CwLpQUh+|)f7Sf?{%6Yg{Cvf~F9JeAbC9PC(W!Ov_THXh&M`m#9gIYHYN zi7=gNliGJRJ<5qAy`IzXa zMJ?kv=%+%$aqsriXVR|LHymB>z;R!V##h4eKtun*69PN;dVIVC$Fmz6vLzh1g7GZE z{4OqX;5ewIO~P@L5h{MnZxn`>alGT3e`*^_Rgm>9kLXV_te}`KXq!#c*VP?9eEe`- z-SNZV8FZ$yya3NG9#^ka5LJHg!Eq0oOyeG8&w?t48w+iXxmu@iqU2aYVGb)MvXcz{ z40wUINt7JxGt6G@M0RfdMsAzWEpYPzuJ`~~^i&&J`cG{W5a4KYJnZmQG_J1h>S4KQ zo&kAfT`I0z^$_(Bg8rgq9uLY*vlSY{8^fKnxuPDevP%D{X$W(KUPSIE6&jZQQ_F7o zLYgVTXjek8taTe4tYAZfIim|kPX*O1m{MYM_+$gGZfiyYZRG7ma7mjBw1~CUl@(Vm zQ%%Rq%-s#YqK#Z!!5K+6Y03;?VPS@{r3ojyVA+v#I-v!DqN1& z(yK^4A*rTuRJg^&#Ky+N^r9^~I;L0e-Z7KhYAW+G&Ky6Jfr4y#&>5!ZRl?+`*BLNi z2A$pj3!Oq^&}$*3X!XtrJTM^Hh+k}M*T|?&p@=OF?GzQ+HMUQmE>WQf{tJ!j(&uJo z$GdyD3@6!7@gZS$+h4Wrq~q_ni#*!Yzy!FhF2)Te6kSBQ`%86NHwe!@2Tolm ztFNzjiWoF#kRQ*QiV>;!$6C|?+$1u;(C1&6{`iFZ$Hv{$FLBf;g;MM0=M&nqZ||-l zY=+&~q@#DwGK*NKKOGU*pmwMP4TnJ!=5Dg`-fQknN z;@#Y3&NSlo@8llSIz@hGBqRKFt%oKHbPyG18QYhLcXnCY+T0wQ{1d% zs(gY}xZ=ng;XJVX2E$J}e zumceRn%zIt_EuOH+L^lQ*zw+$>K*F(uj_N_bC}xC<6)Pl1OB5nK5y)6?90^P{h>EK zq=qvwC%iPo18B z(fSJ%5(2-f-_Ma?b6*#rZMWC4AGkvgL7nruz*iPsww{0@N#K3m{7hZwOv20m_EZbk z!sMq~pnJRdR0}lk$?{Vz0H6F+I+LE-p-ZE-{zaE2w^gCUB$pejNIZGVbCFXqkvUqc zXa_dP&85@-{mFxm-uC3(@&CU)dFWrB{EuxW{3Txj&s*?Umv7C@24Tr2Ujan;LAY$Q zi@J3GvUQgOvo`>vH6b}$Yl7-FWr@1rx`bJSuLjOU)P<%tXi7|V%w#`NS7a?I1Ia8S zF9C#fLVV;oQRimz@C5PnWbhKWaaD$0lw%%sK|7*A9il+z-DRm<3MEB+FolkyMTifD z_d*IXVIj&@qvAw&-@vpJIptY}1%;9v$1KfA9XIhlU!jrl%c7)^pdwv9C@HgG)fWZQFOym3(uuTEweKV5hLi zvND9bAPf>+;ZdrD=TcBX3O3>9R=WtMillcbcoA{56s(B8-SIOCJ3`RIF~PDnDFi(X zp@>7`UnEVE#J_+GLEIjA)jX*FN%4wIc>9yFLM!YRNO)MRAb|1xdNvm*!VPaisRXv@0IBx!IQ{!*q|WV6?wcX+Z$1PT4GByOcL)pD+?1+B z;szUn;*l@|wbG$NPT@Yx6ct(P#9^om@rbm)?ELA4#>UH%NU(j~r-^Vfkaq5~4=L%( zR1o8L1R;p4nG%c#F#iDoPA)z^n>YV*-f2J*+&>?gQg?3U%9T*^r>OcP`C!fX5#Uib zvu3WFv_~O(|C<7vMD?maQs9QsOhlfYpV<+|zMZ9@;KvWiaS&StE^-1qTG)3Mm;8+j z*PXt~aUmfguDRbXa-^qGetclU`w7UMfjuR>wT}8+4Ju7XX=}O$NVK%J04#83133q0V`q_HRfmXvT+8=38zReC47McTMFd~a9j$ZwY}u#j7yfef~8 z-ma_`ziF4BA&poEPeN4mud7LRe&%n<$~KH6@3hc+^}1x`I`W*l`gCACTg~PsE61{I z##_sZDvrVNo)$FafrI?COO_E*&fl9u5Zwuu=G%3^9g{`TqMcES@T%r1-!VfxIVWD6UKxPRae$jo>jsqW4Gw-R5=(77t9SBLD8mc7JOh%ME(*_Kl1j4tmEw&>>K16hDfinF^Wvr8!kXG%dvn>3ngzu_%u zJ4+FI_9E|L`fBZVP5-L>E}e3>wx=+sL6Hh1a#KVOy8m@@@x{))e*e9!m6fvnG?lRL z$9XH~#`WRGNP8cgzwt&qxTd|}nfeMr-+i~YxY-noJPom?=Hk8Ity%L+c8#V>TwGii zO-=SMYv!rzp6JpgBI4d;=9Qv$u7|4&(kK4*WQyFkX!Pv~^i4qDqJfXmjPvq@VH`pe z+7H2rq)_?`uX9hRLO{1+3MfS20oeoc;ap)Yzz?A}GN7g~L@Kh9(SIm4Sl4&oeP{Z8 zgF=E{dG+ZD10gbpg$K`mJ?XaQQ z_=Z+^wrI8btJej$_5gR3F4U+I&~J9;-3@4(pwM7}@RVQybq*zdk}M5HRMNUoDfQlf zO4gE#39MZN9maV=!Gl!-#s-QjAd7>Wk-YBy?)UQejKKJe#riRYDcJ~&&j_p^A*6Ga zHW~}cc0kK1or05~{09YAF&XkC8E7~QEmoWW=9cJ*pmmtq;Q(mtKk|{ulP5p<(7pE$ zfn?jSpNxYaxIA-zA5c_HQ18KyKRu{xgqW;Gc1#;R1jeLee-0PlQ!QViktjLo96~TMoiQ>F zodYvUqk*xY!NA`_QB-3#&^F)b>pN@K^k*JP>WvD8y+=Iyj40}xtI@COW-7&`61g>N z5@AB7{S~cc-jcQJ)-G8fB4!moG5QNMV&%F z3B!PKw|pmKRA00kEIGu$o0Z%A2y-PQLZfLTaN~ep$vy z#)qc?fhIrm@JNuKks}{|B@qc4)KriD3$epY)o@T2j$3PFXLgblGqU%onlN0WRF9pN@hQKOIGRUxgjxIe4i^K|RV< zt3Fzm0Iz~M_UR(irZOdj##%vDs|RWI6}48VnGyX|RG0z+U{`3N>;~!sC*;p^>pP?M z326O9w7xT_OJ~NxmmFF%nGHdqN#GO~0nVL334AC3h{;(3$Wl-M8UrsWq~^i$fC&c= z8W5+zu5nOrjURt+Ulfj?@!XK!;qYX+-lPtW9y0YAceM}Hi`=y<*BW&e4}VwJSfE0z zd*ijd;~*txs;WvsN)!tgi4+Mz`@8&t1&dcLNdTMjg8g+}O6sRS{`lk8&*n;54rke) zRV?{<89lMJP2IvaLgp4&2fNU9MNXNItQtOq=o5O1YcCi zumzuRnod@c*-{uc*Jx8fkBw;r1G>a2NK-20?YG0iW{;cz2#%XLvhRZrKm1VA;HUx5 zzWnl(d%F68Rc-PT6%{;>I38Q04F)wg|H5l_+n&9-*3eEvhYk&Aww%2p`A|N?eeww< z&3g}Jf-TraLR0-7Xv<5$m^pAZ!#c^WSV@3>k!F}?$%@Z+VEar|1cPX*)Ec!~!CTz3 z^MgA3X*xyqxhE|b9_2L(wSSZtLKMQW20@essmq6>v}6o~%Q;Oq^iq++>~oC@8&O^u z6BNJ?Ac-{PLTI+ptdllpE{R2Wp#tT|r&6{w$ z!UxM`bV5QxSCMh65F(5Tr(L>w^^c#f1QU@YQLE{|$JxjRz67HQGf67fiiul)4W{;T z(h(Gu$Y_TARitQ!e!$H`#wW)Wxd)Nx!8r6_I(p#n3pRTIDX0;cT;wG~NBr|ZS@L{o z;(?lw!T?BIoS!N^USUzwTcGG?;T|3{XLgP3*Kc5tZcz5moH^s^`vyhQ=dNAx`K6a$ ze&*h|D3Pf}joepth+t^%ioSMI^T@%h(1Zyp>2nV_gG+k7vG7z{g0q+Gv$(l1SH7(P zGL4dQ0No4XJOfU1@7a@qSg&&Puv)#Vup}oZA&&F`(C$azxBDuyh^%P=r!ojC0|BTk zl~bc=0hJTYtrR_q;OHPtN)Ri|PIRm=uP9X-Fq=h9<7Trpd$_^rL7I^uVWf=!<79CA z>#wABnPQ8>0Ml^5)QB-RGES=j2tmtug@(WgTV8>M9I6H8+Sc~K14i(aMlWBrIyBTz zrvh+NRO3d*$H%(~%xx}n{qxT?YYO)5J9u(0)mb}qan~Q{kEpgC`~#Uw$^@pEtD!!? zrdMc@a)oH9u`uMh${(dxd7)KZ(W=g970URsD8$A(0Z-8`TDLi5JOcr>A>9Z%OqO&I zC8!!0w|lqXqDM}+9b3NLw(ZAVCu;P1QCSfYQ6V@V08f16C{Lk|)+0bwyfM*g)?s(_25Sb>Nsa)LOMnRDa4M)A_xQ7M>`V24 z*q5Jk8aN7^)o)@CmGR3}_NDzJyuGI<<=fhKUAlV3~vVwBr zX^K=(6P@G%Ao&7xT#{!I*{WgNg(uTZTeThw7N{-OEG&c69+2F5T+*9y>^nu=pafW< zxH(A)NJsV)+axMm4z#qOB8i2yh-2*WaMSE=PXKO@M0Utn1ZsrZ&Gtx<*M=f zoU|gmbcoPlUBL=Z?E$r$2L~OZURXn*6Y97qjX&`TH2$F&|6BM3@{Yr-(@JQGi7nx! z!ou8awuuu_#aHEG_Tl;PP(QsED?Mng-9{>#dU_|?1L<2x- zK+`cJ=~zw&B0B_F@&Oc6C0-juU47%MT~y8L)_v%s@H_vQA*+|iBacja^7)sd(=W2{ zz7I^B78()|9MnS$xNw1_va+zQa5gtKFqSeAO^Iw&|eE~M1 z2XxG$1J0#U1b0BaAz=xBT5g;v#Q=~Su+bnlbkxy-NDqxNxt!kpCK)Ri0-`Gc(UpMc zLO^sOAUYJU!$zy83`v{=wty!zz_=o9#Adrf2R zH1NKJ*of#MqeqYKZt&_B57vIv(Aa1fNWo!Yk3T-CPme(SyiTKW_v{*joNHotMH@vJ z-H_YsYY`=HlqB}_?DkekVsEakWypSbQ;4jqYd3@U^JlgKaDEqa^sQ(UTm!%O`sXue zmaSO3^>+}rwcngc18_199{F_HhK;{(`Uzn&+fVM^wg$TFkC%M*%g-pmx?;sgAFcXq z+t2vB)yBaa_Mc>x%FH-Yq?IdkSbk<`VYf8xmr!+OIt^p#h71p_dV{qKMF?bn{X z_n{{sagFUYWzKADb7oCR0^^e~e9Fs-(B9JivF7Ec#^bht@nWZc!fF*oOrf9|VRefD zpN_kWw*<;?fd$}cs0akPk~2j3=*4tLT*QKA~ryG8O3F!mj(@kMeM!297tEBMFD4pg$jK6k2nPv z3`BQ;-4O34f8U1Kz}cIKjWY*c(~3&s0sePfL3BGIIlfWg%m0V>OY0xiv!8?Yc_G&4 zfxza0j0=EPM?z>6-P;O?(xF=@CA-%?% z|9RpCXo)SfbI&Bf_yZc|T6sI^h0gm9m8nfaRY_$EUqj`RUpK5slFErA&IerA``pJL zCqjuj37q^}Y5}!imaSer3?E3#FvI?$=HpcxHf&h42$ch%c~{J{h)Gy`-vXA>{x17i zR5Ym7f@|pQ1Xo|xc!%zk7#3M~>TfNEFfKu_utR+cib|J&P{fqda)wYrgatwPkqIT{ zJvd^2lRLz7Ns|f%s;o(sRH-CSy8h#>GTkEI_s22PPhzBR_`cIyApDTBLkR~QNMM{B z!okS9PF93Lct@@lTqd75injz04FXMr8{uyQT&6=6<>6%P+tD^ur~{ zp8mr?h5-wN!Ye*(5^XHSr)e+6)lI{?0V!Ms(s z0^R>_fnHqSa#=Lr1@Kr79WW(!4#J=edz>OV<__?KC;gjWOVn&C#$YJ|!Cn67kowHNK6O`)Em)QkU^rf08SPx|c zC>RQgb!^bOw~NsehmRcU*LCp3DQ{y72@4-^??Vqye){RBr%j&n80wCWn>cRl&;gIX z_~N7ehQ^|zgL7{&x&`*ytW$sd@kf4Di_KPThj7^1aP`WS8YsG%&ZYcHL+hU1M>B&v zAsdYTN@d1wQ8RzZich}#)G~h<@SikSFCwgY?b@~LKb?!rGQ7AL8!0K~E}c6J8ZQ_j z@b@i+_}MbbEJ{CrRkPx=EqnIt*}YYaMl4jlJ^~uk9zl>C%z>U-HOx&~5_r`uwM_=I ztEOAGnBHkQP<>rfYlFLq(NO6EZ15zcjlf0(qGLtuxF&rot)e7@BQ(Gc{R63TEcnj} zWT{Ans)@m@W~b070F=NB`Xff0Txhj3NcpE^r8G|ERSzp3Hd-U4ZzcW@rl|kz1rmk~ z0)~)XiE{c30)`9%h6FphLBmhzP<5G9O`y-zM=bXRFaaY=Ma zOn}9CczjY!%m7q5b$(|;d|y{o9KG5jao9`oY#y7ZN=WF@!!<-qhLf2~QIo~X$zTW@ zI(Sg5XJdOg^eShqJ$k@<9L64}PnIs6KY!uk6)V2qngI3Ef9&t*R_r+V#~-_P?fV4L z;O`K7r26o~jT=ARN-dgea;vSqf<|8Rh`gi@4h}Xti3$Y-ieLwN(?HN*oxeybCiD#- zNdA*_3-)=Ka0LFEs|q{p%nFXASuwc7Py`N}V6KFb#@!u-Wbi@Kqr)@+iT|$mN)RTE zj~^iH2MCkK#|sda{O4#m6|!us`>^Qt!dzTuPYRTuD8mA280V!BN!lA|2aZy+F)Pk0#d#q%XkIU?7<5h$TL zE=z6F2H(uVs0~Iw#bD4{7_pTk+Kclz{xObc+_9CgRNxu}WN{u-lFTd4lkTSohK}s8 zI`jz{D`Dma%#;WMu|mrtDBW8c$0x%@Sh@fE% ziE=I%4?N2Q@Nt1;08En&3>YPmwJ*~So7vgpwa5IrbcqK&OIAMlqzXE^)5{p#S{|{_ z&@5lL_3+M}^XJFYzyV%4r=@K(@jkiCWs0v z^I6O`!Wl?&7=B>ewr$!(F?#Qaid^QmSZ~dz$wIe7#{ja#E!LY*y;M?rfa8`gNpJ6m zw@(4oW&vvbFn;~OoB0umgKm)ywBXZ$R4NbvIE5drEk&T7#33X17QI8FmvkLo$FPQo z6oh!=UMw?YiDEs5dwLGMAL*WFj8Us6Pae|`*zMk}+klZtV-f}pN78*Fh?Je{)mQPG z7oHNGdiJcTsnz-QiHnO%@icpQc!h*Ubne=%dyk%NN;hMh-VUZ17L;mzTLARjiup@F z9FE1|P07agD)_89%0q{iFNdp{xA^tfUw^v%qlMz4MJrY=p1*eM=FOY8{P5j(-^Js{ z@A85mSS%F80)u%|RW`bm;@Q$vgSf^km#eOnmz3w`UOb;$uTZwaq|(w>pPN}G`qAQ$ zkHr{t2r>kXmAw4Yb@**_2+niEii>~vV}tPoqu6G*AsYz4j(aAgp1#W7mUSBm5%8t*VD&}dM8FVZi0%PTtu4og0G0B~Y7o~+%L>K!(A zd{W=&ZWx8=gx-(bH{->ZAMev|;3JPTqW!)DJ%RVChGzzn?qzlTDjMpR~sMI-HUW)-1{JBkq@)A=B7|`$?!dev z%!ITc|8Sp77ZQQtRC{$IV3r6BPsD_bq{W{$NpXipNW2pGSb$ENb(lB6>canzy!Q@| zs@ndC_c_xiGnve!O%jq!LJGZv4gx83=}ov|!7HfOUa#d|&Ey0@KtNFt1qGxiND%}< z=^#xAJ&+K>r1#$2%7*~TL;m0J?dI$LXUr0)I_(Tg4wRX?& zzCArDe~V6|i5Nb7Sfrn4b4i-5ngA?Yns~krO_VJ}e6{+=Ez7V1SiASk*|SSN-@NxY z?zrRp*`r5~p4_(SyY=fYU);L=+tosRcxiD#VL{d%7y=qADvE%LnO|maY*IAab@-|Y zEeHL-mKP#iGL5JT098mDf$VG=!eaeJc6b-nq;_{8V+(TG&_=S20|*uiBV1C8sHQ?f zX;E5%EK6fm6lX|x1qX${zJ*A+ zkBo{}9*aUiL^MNTaoC!nG8E{d-2e7UsqdoEch92lXoieN-$gSnI7nnsG%?EYAh83f_h6f( zH;xNJDbZz6v|*$XFQG%~Nt)B~RS=2`h76Vm%?OULV*_&*5Ana}H2>hR@bIqv2ThnB z6Etv4^tAUs_+U=YY0**0ztXSEfB~`7(RVXreZfKCyEFa~L4oPRSEEaas7pO``oQ_~ z=Zlpl`e^}5ZQH}5-g@W-|5c5ht=FM^LG z02^Jru;Gi%P}3jyWy6Ng<6s#kY@64#pMJIO*s)_PR;>8=V?hCj&4!w$ga};_EOt`TES^-90=e zPmUWtq~{~iEME7~ys3`_2S4-7w5j8VP9c!qRc;^5o7|_*+wVL-@98PCVhFa!?V}fG z3>x&oe_na-YA*pPM&#vM}6aqCEC8lgSUx0s&rLJmA52FE4)p zA_sVQ$mo-Y2j!^r_41-@h%zrPF`=!O7yTyl>gg4L9|Ju+>`0htN1`@_5cnXUy>uad z^6@j7D}MQGV$N|^d^3phMawq zVQHQXJtx$dqP}wuKFxre!g$9h?Bj3-{Z8M_#J5x6;qWM~n+Ug3`u-{Bbx+`0U~D<2 z35y`TLv~~>3FHF8ZDw-uxJCSrRSwL!u!8eK5Y{1Cz%M4@1q!!pa(5sGL<5}u7SfFQ zDbSaKFy(jxmzbm#sC*Ct2p<9nFc^fS5iqu%D9#Mz{X+Rzx`7^@-Ysxz5XhfQ=*gv%I<;$5qVd-i4V7txDJ><2+1`mwLz1rVLZM*Cxu*mc0FDGBl zyOU8~YlrM>F97;p`S}aU$rpC~bnwK{y&E^~-FtAy-ro-j+YM4%z%vSCnySAXu%=X5 zz%XN!QazW8^^>E1KB%7#^^=3wVPT5V!gJ|1s{)^iTW*SqX1{P9!+{F{M#)s?ckkZv z!^xxPg(Y03&B~b&hfb*ZR$z)>c5d6Y^B1Uj-1K;u;5-)(l9A9CPkku%3ZdK#z}=tq}+k5nVOC!$1=~V1NX<7kh&WncmS(>>K!B4y3Lg2i$RWd!+jI;* zkH=?nKYbHCc?!<&@8}G)2TEQ$3*`MFA(;y@zO0NF-~`~QWeqJ5E^2|dNFIZ!*}PkK zNMv0t7Oe`-yK2EV4?-`vAlMe#U;)@UmYB$%6cVEy&kBjOwQ?B*QX<;CmCG<@ zfzk|c8+`>fUx0g%__-Y4iC1*NohdFh92C$6_w9l^YwDYt>X8f#FRQA)3MVBBL{!^`RAYEDz^9F$%BH%4RHmYXbw*|tI91CAfK7+k`==oPO=8S&XAn}(91MF zM!`)hiS(clj@P<-`gCv)g+9X)jYJDoMWLZOzOkGE0AHa-AQN6zmD-2{BcjHPm6e!( zODgNo4Iqa4%KS<}Q(se~hT5>6$C>qwIB2FK(%sSK#7(gQ){K+l|Ns9{OR@!aMqiSz zpwkuz9it{ep(;}k z&YY1t(no`9TF{Z5{S4?->PV?^AtK7;EI2_K7!@7XB`R85-pK1hLyL;4$a$hJK~cxl zYD~F6=$8Q?hI%o-vL5}1UaYGuL625qN^Yj#>+8#F6BM=OI8W?D>Nj!2c%gTx4+*z_ z5NbC7wIkeq#P+nKR5yuIFI>+)3m9(i2dVYvpN}7JN3jZtVks}nAHcDCKAy78ct1$I zo}N$%7~Jm>Fcpf|v>{@kgpcn}QtD^!_sF@vK0Le~NxMlT?ar!=%-!n!AX!&eHv{Xt z4Qabcq+L>QyAlZzJn6aZ$-Ak^$;o@6bq1n!`lEGvp><%VY}fZ1>ie^oZKr_KhPJuC z|HZZ;8ql?)fd`uAesXX&jaV=YJ9weukOx^PoMgd1=OzrGaKg{s?-7XDGHr=OLquYd z*LySMOL&k_#CB;*EE*yfnWF+TEVau01k>6oL^D7J5PhnNW}+TWGgodLJck4&52Kmm z$A4}|Gfpmd`h1LIu;XP=%;1MojKksiAjP=dqnOdrv1}s}$Zl*)FX7?!52Kg9y(h-9 zi4Uch=H}{$(Mz_iwgApC64*;ScGyHIRwT|(6hk`HzD|mv@zj=Eh!pe3xgCdrIr=b) zp_X}&Yb0HbfwSq`XqZ@PmL#!uw8;b1a<5HhS)};p=b4Pnu<(d=<`@)IG@ppFkXUZ|BpC(30R!q9 z>uT!o=cp?Me=Kc6$T@(?ny}T@28c5zCQRpi`SqGLUwyR(9)8$aSD@bd1oi3DC&XuLw8+32X#)gq zLI8q=TU*B2KCKPo50`5<4gNTdNux0VrcxneGKyQ)YW>9t9rHP+1I!9Ky&eiFS}Vcj z#UFveg;|8dT$q`e3JBxW^vgHWGV=rntJK%o;`ccUb=eq?|b{abE|cTcxQUGyQ!XoC!@A zVoFj|uinDJt*bdXf8W9PkJrEKV2Q5_fmM?GyWtwr z%|36Dyp*1`aEQE-sw_^)t;5pGCn0H@j+ew_{KOS`KOGuobs}>->)yPs?RSkE66Rj2ZSbHynHNp zOxD35dJP*kcIMoFUAyKdb#UwyL<+eW0U@M^(|m0pLB965UQ$cYoju~Vr~jP^S* zeX($m136i6W^!^Suh;XpE?&IVAeT4PRM*s0m6umhQkdGtMq6-Zk;-6D6=k~KO zzMhuu-8***kL(uAq~0dIffuONi?!+vn4+`_i6=^}vI7SW9TH$U;4U(N&01+$T7es3HVXo@78avyl@$KbKqzo}e=LRKgzK@9XAh%ACKL#bh|>U?w0JbI~0 zi_0sD3Q9^ixpP{0zSwPRYnfIsA+3U21zOtEWW{ zE+LgQkjhGWIl3QgV7a-rlxx>iDJjXsdkk_uB zJ9=1}nOq1z6(>LW+?A3C%jREonvgJvB}Ntvz|)Ud}Y6fns~ z39`qJ9qSki==w@l$<3ZEAoWF-*_@S|dk4OJM~>`Ey?pu189@!*x0hFd*&HgUZ_DKe zlNK-Dds0A9Yhy!0V_jYO-<74HATtwr!pjBqq2CW3I(6j8sY57>dU{IA!C8X_32H|Y z%%)9EJdy%EK6`d76@;zeJ03fRr-tCUxw!%GU_5{3%;n3e`;HtD)czj+{=P`IhydT) zC-*L1oCH8pLEXR_8fxq6Y8#}o&S1#CApWjZoQOS!rUHz#J#3wZ~gA1Ac2@@rrIbggSqPHCkW7K%H`Xq>$F!V9s`o_)!_ zU2A;W}XtXBZU@ud!S!>n-I13KIV6U*xac(gK`;C||E@o8Bu%W{ML+k7B3mgc* zB6+g_gsVbNZ*VtyBTJV7sY59D5;WLQ(3ymQl9K$q;_@;q+RMs|^YTlos!ECr;crt| zTv8?Vk_EN4VBA|ky%z8Z3xj_chujkyKu;)vla8L%A2*v6qKD``iEfi@_jIUKZsWMp}n$cvJh zClXU?4iYlvVh|A3I+_-;+J&B(nMpU{`*ZV5dOkwXKSv#E5U(@)-{xy!jWEl@kwquj@YlQbF) zmShX$9 ^q}(zjMGi)7yP~$;P+J(yndgmEC|BiOQ%=LqSzMlZ^UA8#zX)(9(d*0s zotdpzuGZ`I%@wu^lc4-`H%K)nWjsub6CESPozlU~g8_0PRP4Q=tM=4t%d<*rnpn}9 zqg&zVylO>mxmJibrXl!k&u5Pf!>V=WqtQ`426j!vsX<*j2f8s+m!YwvM~{vk<|fw) zoMUwe_xt**?tag%xbs;L<~Of}855*L%B1xU{>M+-QubPzCmKv^D%mjbWj zc=dF0j)!1tF&U^g9gu2R6A3bP@V{mXvKo!12AY!Zp!d=Br9w#94GrMZD*Ry!dq-I zf;N@mA$A!7n~Fzbn_6(c5f(X{Vv**ju1@r1Orw~cME1kEr=^LQx|BDCOk85f2KX_Y zQ}AS%qr4_gojf-Bk**$^XJ4E*b>u|gSg(s$?PQCi}?F;m#0Bvo08fGW=?CeZ9O=nWh z5;8!E@fIj9w6G{T&+B!V{H01#HFaJh5HGMt$^4`vCqwaEF^inoQp5$`MoXnt;PAP~x9&D-bSFg? z3=^4($k9T;uLo}`kOeKWH{vD+^^fw-heh=CZ&0c1+4EJby@wI>PA9!Y~OczEF8 znLzZLiToQsRaBPerl#FEcl5})n>Utz`q|pen|2&HdSu_u7#Ol&R&PF;gK(vslbZ-z zgud8#GF$v&10c?Xd4Lo?$xZ?FeS^5KgNA>lxbI@SVbbiSJ^F=Cho^LH5bAS z68B(E4KJX};$FthdLtIfHtfpRXT?Cghyhwej?{Z_Bu1*vjP%Uxyu1uA4RRZ*)EbRe zt2Y<`0H{)dM7@wpO6)tM$sGwijqWBdUtii=Y0Pm!F$9dfMz6Q=bMhL7SgCxB2O*_k^COPoy`YiYBylkiS_|1oh=b z#EJd6W9OM{bRzwREUZ=6j~zR<|EHgR`bEI<_i5$Ut>3R%wgOW>In#W=ee%hpV$MlI zN+TDR=ddb!0|*^+??3f@oI36yr#|nPD@69_7OsLaq6|UcKq{97Lt)T2Dk`ee+wZ4=s+5Kh+^aLh{J!fmFrZt&AanA+wyk{GWq4pa62!^yMDc- zz^ceAxPHB$MDQ#qu-VWSqBm$^UVdR-VPQVXWrrmYR&YBS$N4UJ`uT})_oAbyzpvTf z7i!vvpDMLO0NR0KP&%L;pp?f8$o!($DfnMj76K!z8gOYiAAn&a(ME?EGde^>4DxyO z(HVpAlb2UeW{F#7kS8)OpIEr?#Ia+Wzy0KsA2%c0vpp=8X}7JTx+3uPZg+kYnij!)Fj#S+4kU+i=Qu)`3|@*7ykTneXOWzeM-H4)=;Q& zhZ0--M%)1#rYc&#Y{lY*iGWn;0oanc{NlZZv8Y@h$G?eYj(&pyJ{2t}L>93M#ls(O(7u28wQ;@UADU_lH!#=a01#MqBGl%hE1l*c?! zf)1b)KhOzjVZtpyg@?Npc$ZA6ku_;#)s4y!S^ z%Ay?7U(iJ$E(j#01zDv~g($U9R6t$f`~i2Xt#7Jr;2lcjsr7<}f*Go-kyEgws=B(q zwz9Iawq7u}6c8?b#NSEpogt16$4iD6hgYRjdbglPp{Rc-#;QwW;V$;BEuGxV$z@AG zB`D~D9GmFKee#rX!$wXHi|iX=35^@nU2s=;29(z#a-lT9TM^LNPjFwfdeMq?pZ~OZ z+nyixeD~cqf;)=NLm^zn;~*n1LvXi=JrA#6(P1t_phyEm9EfhdI6}N6Q31SqMW;FA z{l81&HVmbI1^w|l`Xda~8RkT(=9QRuDDQ$y!7F7R)#8pi z!R05aPp{71LPJA4c8=;(SFaB0JOr-PLplf4Kk?+_Go}GKW#Wv-pA797HFm~xu?(?~ zY@zJA86!F(uRA!lzqdDq$mk7TatXqfR(}4C&^zlk z*15N{N-$fOWMvm&jh&a30UyQ;@rU%bKjdbmt=;xq&fEAD7~ zxc0)2n#_+FdgNLSQ{)5Zh~tn}eB{OHk5+Gu-XN_b^6#ak+_6#I7np+b#h?z}z5fdg z;Y5UG+`et)!ox|1c~q8=aC`$L0DjTY4Zj2iTDhz&L7tJFmxXJ=OdWEgm#?RXbliSU zA)l@`a|$B8ECSmju=C=emd+)cKIyV3E{eQB$foa|Mu2^kH!U6VIr6!u-MUG#f;SNg zWI8keZ8(CUhirV_Jtn>W{_FEAl^y%`?Gh5yef<2&ytHhM;2j$qEC~7zi@z8 zhxHlKzkj4WJ4x`)&HzyognK4LUnF_Y&MzQ&!JEoO<&!oXL99aZKV(;=z(MG0Ob~yO z?A^;uw%xxxUuqSt<(@@N3*M!*kkAVsIzZLL;|YnJL&xbia*kpyhtW{Sw+?u*w;r|f zV3%&-UOyzV5#T8ZSH&hhjdVt%K_lZpBcbSnP{zTN8TSH5BqJC8WSk+5Wb2fhH?B}F zcJzsXW+klHKuqWaa8JfQ90^{0NYoCkXP+6~n%P%dj=Zn?ez}|HId=YgF<45h<6e4I z@Q>}*p$6mP=9NFso;{m%;I8{PX1g6KMcVDCoQOOY2q4b-nNM_0!beZtN&r*2%k z5+3Qn6&+qN0=7-bPke;npJw-Q$DGaS{lE@P%CcmHRM~EvKY8+GvS7$20XMIpsEqQE zqpJ+!^pAD~38BAQ>;^9qXMOzwf+@Q=I_H5iq!uEtQ_8J97*y)?I%VFskjnQ~3euV0 zg|Go}&X$&Z?K+;SP$(60+VHM`RFZ(PrVzX<=a28=AJ}EkBVE;%^I!e<%M*I&3T*j; z0lNJ;v!_m-I(yFakt2sjcqH(~PQwKyzis!OmV7Mw-f%p@$8Q%&b}UHN?3gLvDI4C; zT`+v}&FZy30y6T)wX47RX3Lp^1is+R7Qs6|J0~wc4+z~E5GNoM+Oi9w`xcPdwy_BY zo#tk|H#OUVC}3}H61>gG^lSD-0#GC53kZrHUjBg{{Qv57sXfS}+k*BW-MP!7+wIB~ zo9)uItCy}_x^xZtB+!4WK+h=@F>DLlqT1)@vBPiQ4o|_YCM>MRZZD{U4Bu1WdiRd8 z7upL|lN#LTHcXo9Cb+pJCAle;c_on5s|4U=*=!Xhr4`_#B?4h$5Tdfk@dUTvU=NRw zz@U)8kdVOpev?W|h=e+n)`ch!+Gg>wj<@Xj_BqgNVH4u{v;$uVG6GH&;$zuPZVs;% zWT2vx_^C2G8EXz~@X^^F1O11GAVZkWd1(&!v`AQdzGw@Qx+urb>VEB7cO%am1r@0d z20im!9ALhzPUCv8>V4Ftryt^f-PT;6>5g2rRV^gYO?>>y__L?a`yh z)OTZGIyxo<_%w*E+ykxLqhC@|zv9bi>5F1ZD+O7HfXQ!-KnT=1xTBn7U+`~D4j?o2 zN1UOB_QhyQnh`U2i2 zMusK;GLR0jSs`q(-lHBJ(yueLP~N~a;j})`G5V0RS7gtABN00~b1nk#=g!2@WH`Ld znfOTT@a{ryi18&QwJr5k<(LQyv9v@~M5YaScvy2mZeC7CW?o@wReeK4RYi-t2?Xa4 zg~dTo!3Toksd<4P=RE@+E3)~$0%r)S@0&XuHy=OQLEwJ) zb0ny8G;DVNaeV1`K~N1SM+!EzuY7@5vV;rE)c`HeAg5tvubw>t2FCe)fZ!kPB z#87AncQsptU=~=ql`kkr7ZAM!V&dJKYEt^D)PBUCB|io0Ohu3cYzCDtuHy+DxJGV*dMJlG-g zF#7t4M`E1_IfSyL(u&xL9Ktk|+v!g7P2`|P^Ce=9BIAS+teo=zB-3JcPJZA^8h216f%Faqqo)-eK1CWbqGI0ha2F8i9E&XiVJLBY8K0XNOg&MGWGGH$dfAl#8m zP5>M!OtAib-ah7gU$&Mj_`!0e-iE6vv~>5%6~mgjAi4I@e;v(Az!A-WR4ovZ`5ob!GP>Q`1w{!%62Hc z7cE>UFxlDjC)SR`T6wCY1NJw-&1vO~ROG;^J1ZFAw+F3q$e=+nkV0a}{|0txT1w^Q zqX=NfkeOv_voDf)!NtRT?>n(g32lWk$&>cciME1xHLwQg4G^ye5ghXw)O}VZ(#|wP zTwvVOFOm-VTQb(jV`j{M@?|k zM&RjC=Y;`#m<+;zj^r+X1IHb&fI%qypb&)`vNfg`XzHxMW`_ELV`|9Hm^GlDYyCO1&6a|DU=sLP4XvQs4GLe4^9!lZyW#n9 zcz!gVPit(^N?w$MrPrY^^BN(mT)%v`u&hgGH^H;Cw5mEk=OzpGYOaR}CpTCOrNT6% ztULqiq zP1Zn2U?N-?w{YutdxpKyo+9p9_7Z!8L+Q|h-t>-vf@gB_$^E|~8|cOT-+#Mi&6@Qa zZ=@v)9>Tuge*5i8@~ZE4?%a9y+f}OpNANPt^skb6zEcc8aTFT(7-B?ZLdCwy@ z=tTU7T;GE^>(q&TySDH9c|V=&=5FcInH3k-H^cY6yp-(oVjs~OfmSrMMlhg_XpP`T zVWJ2am-MI0xlRLP;LfpBb;&kzz{H7Ae8tPc!oU>e>0#;V@Q~ZN<>O^1e_8YWiWT2| zCoFf%%!K?wiVRYjE_b6~cf=D*+S27x&%mb=-d1uJ1DX!TQ-c{Y(=O5*9At>D#P20`f=k@EaBd=+)1B2ZZ@Ln$^LGz3kDeDEn@uM@ZCw{;^}mMYClB zvQeEs0!8*un>TOXwe5IPwR6h!@vI^Z&p$oaZQ1_qx9hk3u;sgro6elvzjfpKFBcHF zTLQ*yf@7-Sr$y3tTtr$X1*=KF35^tOg;`~SyBI%+5u8K%kW3Vg29SgR00Gxsj29$- zEmD6V9=QF_V#`ON4W^+DrlAd-5ie$6+ypCUFeOsyJ9lafUr{f37djZy4`QAbB?WYm zFQlXvGzTY#Bn@~CJRBxJ$WAO`#+;X4dTH$N;Uk73bYaM2uf6%~ql5Z&g_~HvDN`m+ zo;-Qvpbol-fs=%YTdCP4EzEJ_+V%9JO1N1xm1^LO(NtA<^}54RSCtQGtsy@Hvr5Fu z`4oD-wVO6?`gHY@gk_&DT)h@vGa$@GAAkJuhVRyI-n9N(0THpm!5!W8aj`?X z1>2#7YGFvaVf65|bna19*yI}(7Z(@X$r2`@Ueu(}d(k@~n36aM;z4XH7&8WUGp0)! zMmhK$F+AQ^RQ(wZqy+?u3&;xEKnhs?%PYjz9*x$X3_5=dbUqpsHyVT#hNi>=(Ub(l zayu0&4II)2Ah2}GIYMuO73n0vlWElr&5sp^SZpGA3;~}x1mm;`)37p{HD%IZi^Vc{ z`kY6fra9v?4_LOJ7}pak#=eg}`pl8c4mmRhFN63N!tL|Q3 z3bV_Vn~lv7V-}3U+U@1mbPw;U7km%<>W-E1R$7F=XtV;);?q^%BFE8p3l@B`c=6Ju z0;GA!6@sBLFT1c70%BmGwyFivZ-NT_9RL%YLkL3yi?u~&5D;!I6eVbB+5lGub*luv zi`sG@V-lpv03!|gYG_NKN@IBh$t;Zekv1b@VZFS4pg_SPZE!V$Mpgoc{vU56@qs?* zm3bHgA7BjhL9g_2S-uJXR$Mz^FhCoJf1n35?0q8rV+>I5w9g15igFL6g$bD0IlL=` z^2i}Wr@*7{>4|Z(=3|@p+@t~BdII0p_|dF!gDoI;L*Rf>v)}t~=b-+Ak6d0;U9B;g z&5dx&fU1?bGROEBjlH6snPbkv4t@>m3dkkVj`uZt_kq{!{r<}}pNzoFPkSOE%=}-b zl<~3KYw9X%3ewUrlQ-5iH?qG^F=K{NRUA4PJ`d)Z5#`kTlg*fc_9KoRjGqTH&OG!I zaRd=6)dY;72^c|f=&Lx4pb)ZJh!_ue!*IcXazPA0Dxsf%)CBQZ0h~?;uSjrVXD5&w zAu_XI0qXtalXAIsP}~scA-awo)C0V#Pmh1hkE8et?2A?ZoE{epxDTXp&_YfE&Hz`N z(ZQZzQZh)dM~>dMvk(lRuu$90qk{M6pPxGQ+pc|we%rtQ@c!*TBS3cRp0Ae2L5>l3 z)zUAP?KyE`*Bac+ZnrR{DzIyC`mF>Q_LWK`n+aC(MQ{?hlGMLfq=3rNxR3@3MhC_Z zO>I<1NI>)x(6+FGKw3L!=9&^*7}E*Bq@v_H7>^-jt`PAh@P?6ru|v}#s14XYG+tmp z{2y-7%Dt%VN4Mc#AtLj-gH|TYw?aNuBKFAx)hCW^C?z6I@%)u!F5BJ~m2D&YSx zwu!3>x2V}8Vi!D~pFXBr$MKIpH7TYydZtU%5CI@gzZ;IQ9oGJ@#Ouhons^P%d2gI>ZHPQ@_ zIuxvm=dFaT4*`MW6R5&q8DiEo@F`5-SPwXv7EK=FY$38Npj7@ZZX@=9D;Hx|a4M%P zOZm1a{WqSj#&R73p^Yiv~VgSAwxXy#Jnd00TMcdba3|$-3kP36@@UkP!9C> z?O^HB?~#}xLk7KWPv9p zKM@0xkRPOjj4bLjq9rVEVND{C2Ty{EyLVtj#)h`1NbOA8#<3Wck7881w2i@{9hYbp z;tUv|$OCESl?EkrELQXb3|foi;1QxY0eZnG1uOJ?GS=Q9ZW7G6V<*S;fp1fvzHxKn zq?l0liRy`w0Me+e3>TEOSIhh;0RU4aFpbsF$elQz+cv$OCSmb`oh#!ZkS&OIe5hWz zboX@vKk7rW0X{?Q zBsqcD9_m8k8jhjuArS#CH;d$8=xD<>3GQH@Ab|j5(3LU*{0<#Nf*W`rv_gM|i&JkBtelq~AJz z{CFDU!()VyjvdR&0<=cZp3$pMI)HMJUlL7BQW86qx7lm-$w(C8={%{v(x3!prQtO&--vEbS%lP3jx8P$&3%G&0#+S)15OGozZ zA3I{oj0t0BPniM*cn5b?f|+BF41MI0M?#T#xrR~&$mQowpF4LhS0=FFQ>zho{nOrK zdrzG`eF|89t52Oeb^7OR-)~&~#phpsyLvSY^5BeSCxT)jkCBcTZs4H!QG#9f8o@wf zBAOB8K=a4ly9=QGhVYJQ&jI=)C~J&xv=5PHo8Kg=>w#A21F9Q|aqUEsXMWjl&Y^iNr_$+5xVtk`_R` z!C#_yoap`foKVx?Db3zvUY`8cJC@Fqy*x+Fer>wILal650t-rD{S#SVE8Cr@7FK+I zVCR|xJAm=`fnymOW4>a==EY0bEhf+R=NXiw=dG+)0=tmFZY9!jw?uC|8Px^@AlO9| z27iQa1p$-_fudj((Q%?Tm=dZC$&6qYB{v>IyL#|Usn$b55t}eBPGDRN#kd&CxS&9g zJRGE;u~2Yu;KUfgc$8rSJD1D&K=4KyC|MLEBeEF|Lz)d* zUy5r=D2SJ_WvUNfePZg=sZYEbgRtUdZqNR6=7`?m;o-eU%>1X6!Xwf6^6U|jk^g@G z#aXdkyLRo;t@D3B0tmYHqmM=o2HrsQq^IFPD#$;0^NE-KEhQaHO59~ zA*=3Bgj`pTtR7{Q2wC}%M934M+Zq4_y9sb2p+u_l@jd|>y8$%Is246>3B}9Gr3*)4 z!jUqn&W6YDYjE+EGA2I-@4z<{2qQ>bgdECGz%O_P+zsa{KKbNpF=O(79aizW_i+6i zxH)-!)BXy46g#)gORM(+EA`PU;ODHkKa+BOGqQnrQxYbnm`PdNHj{E7OdiB2?ofb{ z)JX*Oh=D`wID^)_;A$AukcZGM8tNeQ6?GTm0^K2OfP3gE7D;5kw3;HGD`r>z`->&2 z8UU(#161`NP!$xOcsVgvkuQ)aiewV}#~Ub%xP1U|vy2Rsn#w1xw}_rldelS|UZ9V# za-1X44TXfd0vBVdV3DuA_RlH9`}glZe9Avx8$D)FCtqlJd^-&q(<*FW(bf0i!iN7} z5sOOzapu=;CxEaxq>M#+Y{$Rl@vx4;>40Do=LT%pu;;)L7)OsE*t22XLS#%>vT)rl z$|$sJ9nvI#@nL^o_1U^DPiEijsXgRPnf`AY}?n+`BQ1AwlS7>z%;e|w2<6mDU^(0B$|3FX9 zLr*#-ZcmdEeTek|iFa6Zk^Pf1L4$XPU^xMIj?#f?X4-(A=fL){g1XF(NRGGPojk~`bG%J_vyW41V+(%QXf*s}Gv!>4a0-#UHxx2=hZ2M$C#-ZLb8_PO=r#qg8b zvuE$lHER+DbK5cI>=bdl-IxD2NH9T{mj&-%1Wgf4YB19!3DJ&;0-G+NjESP;;_{~>Po-9Wx?5>* zqf4Ox3DHw2)0TGY2Km!TTpAri{lZIb2(SWrr%vqet8p`R=%6c2o%qs_E0!V`kX8w+Np0 zHL$(wPc%20J&dF56%a7iz!d+}&tF|FNJ`p50lC=uElEi+IQUSu^1{Bu0y1P56eue~ z<;`jJ71!my%7OwvrQivvj^rrPkHaPeB>^}pN(A8$Lm4p~gv;eS}5v?ml>>43fqXi`4nsAF@GsYT>Ru_p%Y6ct&h~ncv z8uCsw+sl0zL!!|_wb6Hme^$_JU!!0&zcfA{I41{q{=k(%#@z8QnFZs%qep-EdCSq` z`wH&dsV%C%d#6A!+S0BzmgH#p=4Pc^PFZ7enoTf5Tt;mrp*fqFSeAfv*R*g*R8p03 zERd8;76(yQCj3~~njbhxs-Y4!d;lK?@F0NUh;~dC|I;xO|KYQAl>%3};sWl&@i;+e zKTH2aSPwvXh`f)kc;Gp73O`ExzYJ~-_g_O^3I?>k2U_3hrJxX1S-d>?_AT4~c-ELO z0M~Y z`zr3ehm*D7-kls90PtDr@Zn<4;oc))Vm;3++#GJ-&;)rYZk@!n$P(m1xOERYl3J&e z1yLXCX@Gf3wqO5w!gAvL+VfK1YFrW|u~f#<+4k?CB3Cc)jl`z=l9LN!4@ z0}p|c%eP|P@Vta?q9S|~zot>M5%;Kb!Pt<`BGb9BPD*_{)Z26}z!Oj`%<+CEpq$k5 zCg7ZO4@I^PK%J<8brR;Mng|aYK7v`S0l=PV00@mi)c_cpHphnEz2O=nwyFv}rb5ku zq=$DY*4PRhFa$j#U$m7)3f5@0$BNizEEcTnxb)_VfF`U9iV6usyM<1ypv(lIiefJi@Zh@uT_P@T zg;n8RkBerz)JKm7#Kh!yPGIC+AUy<=j@AXXl96#n>%v=UX#(1BTgVmCkh4gup{S^& zjsyo*S5nl?%d4BS9jGRhzo(P4CQj52>ThhQNo$YWwrE`)j+$s){e9f8QtRq%)Ivn_ z5c?VzJ9}D5J^fhcceSow7Hi_7E2@dB!z(IMwXW_)O+0~GhX!rC^E~x`JCqxfhrh`M zHspHug13U$&-YXK@V}vOS{k%c_+48Y3g7VEclRhfXyEn(|9c7#?DhT=e@)@V=aTB~ zr|`hPqHyh|tS&^}Sy{!6YG|g^jm2Q9_l`yK7Hdry85Z;^VaE`CO1(;ginM9=AK3QO z4_kNa+P*E?@rw5N(IdYdJVY;sq-(NkXDdI2NuN)VeM`QS6SciAhaLU0kzP{&I!Cii znY4b_vRxcfw$0-{zD0Ms*4^Igp8@nDT5WxvOpEyCz5@sK>D{mY8(Pkhk&~N6FX~|u z=-cn6E}it4@vuM%fZ8@H2HR9qO^|yBu^$7Zx-zGljRqRX z468@pfcY;iJ%gs+w41h++s6#Q?A;52eAlks*olJ${uM@D_rW{P0`v*kK^=Py88Rd) zAV2|E-1IAF&YZcHkx_3Z9w!9o0Os0Qk)N9S`R7;i8&qn!!Y4T3*?FDr9MuL20q4)( z$cEY;0o*1m9J>c20I+M{zV+utA8n%t3G_5xsI;^?;ysFsii;6LO5hU}*?LpyX^*yO zfg&q3iLk%U%fuBe$GeAh3=0blCfO}4yt8I9pt87579(;~2EjEo#L_X;5<+C+Y`aj< z3B`g90-c0{3y!srhSEtSBBMcx3@#RcaOJDXw{6D^EQH`X%^5DZbV5@9Hz0)I7LEM`A%?_ONTju+C8%STvd zYytjZwxGi^^e^)M(zURQ`Z$;@}@mK5#fwe)M*TYEujWLCFsv zJbK~}x0y5Zk8eM2R@xD*uKwut=U;f?h38-YXcRPitDGP*tDxii=%Y8@c;iF(z`Ub@Jw_a;utr5Y=kD-je%T}K~a*2w^N*V1N24+I4Vq90oE)VA;5yJNw^h4J9;y{xJO^gzPc zlWo>UAVJ!z+>O*|$5=f1IV0^~Rq0xtnB*S9hX z@{gTi9##MepfH5Krzy(y^y?h?*pqC86%cSJ)IAhFlHVT~S<#N!M5;fCq`QF*d(gkG z&N0H{8j{6b2r3HDR3hQOBbqhZF+Uz7keRUz6vv}D^O0_uZ2J>Yd(tfr#}&a?0?|s4 zG(_S%>9esa60JeBMiMP&=x3{>XB6Q5g-swnwICLuXmFg+{O8|u=gjF8+5fTEDHilc zcG!SVA#SR=x>KiaHW<2&e_RL#Py{YP$|CbX;P&)JhSf&tvqx`y9^i{Izb{+8YQ<;K zj{m9?6E|-7{FBcXqt)Lx;&{UnbU>q*iJiSIa|^_FP1qtzGch3AWAM6p8T42NDKgY<$B(RL3h0vaP)Y0?3$ z!^oCMZyqeu`Zt$Ld@lm69csBJ2S=c_-HHorx1cM+%mtet$_~0QY}oJvkni?EIZ1Cn zY!HCuh9QR?f0|#vsZQJdRYl(QrJ7TmCNNOLT|Xts!{R9lZq8d+pUUSxGh>DVFraFc zD);93^EY!z8o^{q8{V{fpCDhguBFzl^3wL}5n}G%-5k=RA2XghbNSY-3m2faJ9G#+ zjgGTLR)d$9p4HXKp$wE`V$pj^+@G)sT&n`YCQzY1R+K-QEdl#qYGp6evfzNUEbzj* z^L>21$O=`2)E=%iD$y$fz6sj^?(zZ_6>Ok@xK)sQJi3z5v@T8^(Y;vM{uWsd;;yM*i4Ey6iT&fExv_u z;oi3gT5OqrXcYlf0p|j$2mmP)07&pM0{0^TBokH?(kHQ;6oWkyPbFVVioqUi;Yfs` zKA?`^XyuE{vg8AP=l1QJDXF(b2!&J%z=C5;3r1HnaB>=&noW>@=t7ct=z5wffLo@S zHG5|F$e(Zc#Jm#2K4u@wpNTft`sliF%xva@Ccao%v2O*Xl*P^^RWQoh@*Kq&xFrgU zmVUZ?c|rb_t;VCP<>rV$3b_T*%tf3h85sR+L05AQb$;Y8!$ce;mke0((qgX9U(8syvBCiAso% z%5mL2J#I8xfp}ctBG4xG60HHqU!0;V3EvOaXQm}19)%1+3Kp~ubOiLG6cHh4-$#P6 z|NOV38tg>SslZa7cKCp(*w|ivNOwv+ z6;@K{*#uxY(qs=i0Q|LdQ2oP2h_tL;5SZHiCba>{7!T@N{vPv}8O0!h`OXn935>Gj|< zW^Zb0YH4Q8R{hPJ$u|(J2%YWCTPZgM#b$?y3aLuJdibZ!Kd|jqJ;xQAow$%1!iAi~ ziMWueh)J)$^7P~EMXMfSOpX&7vKYva3YA0e300zsSGbwHj8J6gNzsNuhoJ=&BMRvO z%4l7qRi)9pspU!*VnsINOPAr_j*h>RdgFQe98y0MWT2SD=`wnV4#abwA3iC$8{`i_S zfH6w{;g~Ei)0h`ue5seG%&oDJ(|T)=a1^NZF>EH?GTse-OB61J$<3?Yt<7mnqCLW$r5chCno~{!k}9z- zkyC02&<=UG;4zO#I;8eN?a-FGV+NRujvPAl>knUlnJBpzEc;ae$i+9P9x6E+fuE02 zOo6z+=6IRUD+J`GM_*rrs@v_3jxqgXMoyRz9V^HJIz)66YpCtkcM9tE(hGrp9Ry7_ ztUXAt1#ftSp8;AERpU4ff#o5^7M$SQd@I$2VzR0Vkd7{kvh!Erdk>B z1=>CFz8Z}Z*lz-I%9*U}9r1ak&iR^jxfESZ$jhY5^|V0WNA!lw+jng;yQ8^9)@M$i zIWnKk{7R<=nZe=qeaCXg`^+r2c+Y}+90_}1v2vM9$4mlTK7>2DjNL02we)?qK4e6S zc8rH#1`3rZ%@Pmirgc^}6gADVvPba-zx=Cs3$(I*cmoLyyvI$cz1z)TP>}-m36mBO zNCEuA$^`bO!1fC4uE5~+N}1e9YNU7-@{5WF|i?<(xfB}=4FjK=06|IiRq-lmoYIIWC6aujaRQ; zZNkzFo7va5S^9({UtfW_-`%?9!rf9IpS!Sg-%Z~d1Ge;vZq?Rb@8X9oTQ_fk%z#^j zAfz{xt`KiQA=LOJ-NKlYg>b$cpb*j=m_ycDO!*`gP-Tdf(3>X^U+=FUZ}Rp;&>x~& zylIm3CM#~H>A`EAed>}eiPl1R6)GdyJ|3Y25SSG!AtFC+qV}pJ!xa zk{{1=QSkDFxvc0CQ6B*D|qzT_uqei{-b@dOzbPf^Oyy! zYG_{IPzkdG`j>{X&LN`mK}An=`M*3%s$~z2p!j)>|Oaa<^&<^=fa2mBw#Rj~KVylc(S zM1R0-``XPb%L`FSzDK@j41?Mb3K+4V&j0ceKulZ1)^JhmGWSmw!`87+xYu4A0k__D z>?}LWMH}>CdSh*^F(9DVz>y;d_F|ND+hpocXVohphcxpF3dhngxz$Ti0imIxfrmG< zPA@vv>UCOyKe-)vu)tdM*uSB;c-z+iVfuR8A;c4ra(uGYtGTwhxjrYSzWLOt6%zazavHWZ1oE(e5^I>QLK~ zK?ocQ55a;FT{3A>47Dm=HBheC%fH)SZ9FCgY!H7a6N=VULVsIX8m1# zP&=S?A8!UJ23_g;lh`5eEdsC`+zEc&(;MLH9)MmqS-kUryiUODW#t6BUINhd!jdW` zwODTpH2!F@e=yJ?wHR6%bwQJna4M-9W$tzDl#Mob_jc+=1_cKykLd>2NIFLuR}U@N zlZvnJ=^hoL?lIou*X7H9ZL5G}9Krr2LlyDA)kJb1v1sTLf=|`fo&r>4^3{tc;XiTm z;?-nZK}ku<)${xJ?>~Pvr9_}+MWI|ONnY2#tx^)=j~+>u?kOf&2>DC@opuuIli+l0 zM~|Mma4jtxn$PUCYZp!(1-{O;>rm+<$HOIQUZx1&XVI>6L5YNbJ6Fu6f;Mv67|=}4 z&8w|7#bq8i=GE4u(rQt%1d9z#qqzOnY8?s|EiVtIe+nF(d7#Ivs&kFOlIi6E%S)?O zN=(Qt-M#&Dh`8Z1CJ(x2o@r;DsY|XKo3taz64Yv-X>Xqi9zJ(Cy}a7mV={ool8#96 z(_U?DGOaeP;2Hzr4 zfIu?@0zW@5v&~jsR#uMXne*K_3TW+2yap30-v8$eCE}>T`D7>SjZuRPB^WhQQAtRF zeuL-T+jeZa?U;MpF5R^8uZ?6B1Mqi7vb_q$-oG}InazcTe{Ce2;ORL5S>@Um74Gxy zZM&qP;Iw<&&Vcc_oedD)Sv+rOXB-Q)SPbp#Vy!k~vWfS`IG~jwt$oc5YhRI75>JM? z8X&+8a9?SLepd^`lX^@mnBW=wG+%ZsvzI#*RzCeVBn^Ixc7Ek=HfyxK+-m65yLTt| zYWQ!}x_6#5X;LSFaj(7~1O3x-t2(Gl9|RHi>0-fb4)tWW;NWf-u42V_{lXuI4<9}y zXpt%m5#>}3cK`?|2<{M_#fQeEtI#O&6u77t4&sfRK=o0HaY>lrEY)X{nA1P!t3X z+?>fqM1Kzm@tBQanBjRV>_<3_Mh_{nd7UfuNL|+v%{CZfOcak{3W|;nlIk1otnVPy zw=3$~1NBAM$;B*7G$6I!{yso!^9l?$TeM<@j~`Enk7wRFB~aCG-+m>hAvpNlxy$K4 z`}E!@*nh@6zknM#E1G>D@2qxFn`#%Q z1ALaJRL1BiQS=h)Q@0LtFY*gtysQ% z*|OzJ5Q)BY3F~ajtEj8ny!nEyIw0WOxl5@zwu0)atP5K=Ro68a1CF$?oDYsjy>cB* zgZ%RDN~^r^#EBDyR)3|tDLA+bC7l|C?0K+HIV#N1hvVqzK^22OhVl~-SU>Axe`$5`M0iR=%Q;pvz)GCRA{&EG#9 zzKr4i{%)1orKQ!4kem663V&F*Ltv9PckI}aD-}EpGNqh|q_w6n%9Yn5KTUN7fjQMS z*$m~C#C~>-MyjFZnyoP z&aMN#sdDYV=bWr&v`O1^PdX{P?A_9`EeeV{eQu&U7z2LYpSv|2b*W;QD>v&HGDEa&pf5p7(jz^FHJMz|ljG30#B2 zh>>F_j!B(*%Pq5JX3m&9rEk=s86b6q^#$6&gRts}XjEQ)wFFxTZC>7<0|)lx<-xQ_ zG@E@rv5k^O{rfQ1Y%P>Y2uSE~43kEg;1%UaSiJaogqQw=?U9J2wOT7fLxG^~8d|=d zphx?v>W(p!sKpMz=>*5EBeMasuP6M`r@vhPg8nDpJRo%IeIln$jqKxO-E|m@I*<%A ziJ>of>d@3&Qv)xPKdi&3d56yYc^wd`DTC#)3tS|2N@=V}{8cSD^hamgV8K{jr9%`_sHkCD2Yc1f_s>{o(D$z8?{-!ZQ z*`Jdmiek=svpLw)M*D(v$pbrrgFC8D>^+Ez0=LuMSRRqB5)xbQRzr<%VIQ&=(ZQd` zWbqH6lD~=1ok&~zCXK-UmNY2atE#Fn5nON13HH$9^wA4NNAr&z%RhDbmn&#G*j~Gv z)+6R6DQ~c+jdy?Y!PfV-e)7qe|JkwqbGoCyu_F=JDIx;i-XmiH@g&hH1$3GnBV%{O zxcre|4-yEJ9GZXnMo1s05Kv3;xoT_c8(>$~(g^e5`f5vnt4ANgg!1aEuWj782?>&L z57m5Rx{zRH%gOwZ@ECmvp!@?9Gv+utY#?Bahk#L01IMIeD-j#3X=-Y!yaLH|k=5#m zf>y+92m~!&{P3dN?p!i|)`SUdZ6jtaUj5*z`=5RBuhZBO=y5hk<08gGO@JuhfVhZj zX$i$_5fcV6&VT^}7*N|!AKFuL0S>Xp^Yf4WbPQNF`KMnBaM4tE(_Fy@cu0Ee=#isG zj~sznU*1pnMz3SXkO!z6dni?iza-i9PriV;S5Ob%DaB{u-=~+lFo;=4p~iQ$#(2ux80WGL&X7-p^A!>+3kSA%kiO7p~W*&$$jTseV6oIZrpNrJS!dP zyGkvPA(j+NPk^2b^*5eD7Zj8KKnb4Mv0=jo_LB&WoUQwMPhzwj z8aeob`$fiw%wGjW8g{R!j!d1DF=cG(l+59gX37&*02Kg%ucZ6HIYEX19fC_AM@s$x zK)?sGxSRiT(U7bsZoyN!tta4_2kQw?PQIWRNGm9^QM&VNc?==Zpi@8*lGt(T9Vuc@o6J$ibdnBiQec}M2EY?&{^{#aCMBGA#I2N|KBS8WBmjbHhYxI6;y3OJ9{X=q0X;|a8*^g`|6 zdW!1cbq#2@l|Ti!M%Cg3AZzekHx^+)SUP*VZ%D9@9G44U>^}csh>Z}%mo{=R)Ccx$ zAP@w6F`@m*%~F4u1WGRir5B<<^n1I5uH@2G0#P>(uIGd18jlJ<5IhplYC%_EvnUmO zHsB{XIi`QOW;=5sxR6~u^LKIMZ0MdStp=oEE))l(i+e$_n|$4iKc5c3#e1E%J1@b| zXbyKtqPW^<%mjh|_ITOZvTWs=HETnvA6k+ua(qy(JhP@is~ag$V_fc6%du< z;ObnEzoVa(mzPp4p{pd4^I*5K0W;w2Av2n6Msx?2$=``&hKU*)A{pe$<~}$UBKT8zA>y)g?Y#R^I3fgLsqaKqO1^&4^{~h z9+4h8Zt17gcYlA)=_1)?1${W_g958U-0jjbdDs0n6qG?1NE93eK9eZ8EdmZd-cZ>V!Q`a)mm-YkvrtpxMFWcdEt0&; zbMaj8{|DW&(XT|Mvx9?j9*>P+1UP{vHLqnmSP9Y&PK|g+-{7-{ZNmUK#LnUDdiFIt z$gZmiuZ zh;kn+iPx@WbK}R0%8s`By4qT62fq1&Pl+S>a^xb-a?Ef%jWh$_zjIsxi104Q z*N#%h5BT+l<1M5MNc$ZBcD(C2gYzFEZNn8`Ilgxsa2%G>ddGT44zBzjXAj|;QZqCI z_4Vs{fX9gPR>U>3ShBhLdQoZADQvWfDpgxQIez0g4m7v0!lKAy~>K%d-GuFr3!# zhKf5F)-!=Y!I(^Ap~J_EMmzl*{C~~X)nAC4rT9U^(O*)~UsBLt0(j0^+J-G*2kq$^ zdV8@o7fjFR~KR zx1{8gAL<;At^=QxoWFSSycxV^WE?Ct2*SwU5Tpg#1xpk$bbal1Ym-c+>S?yxopL$g zOfH{ksXA9zS5#74ckzO?6zeZoD&{M~gw5LKwAq?#t-c{iNr)swI#$BD^hMptBZ+V> zA?;?c(M-plD-Yc9m!i<6L3lp7Ws>YOfDa0>Wu?((*|Y(7JdJ04TNEtqQ7SVxd}z8e z)ERCfb%mgA@6?)QpJ%_m2kXWtV5AL|T{id6ZKT&GyEpIgZQ_3Ug;#VlQ#zcVGWiQb z@D0@3z;|eviGv?Uw*trNYF^#R`g3w?pETshb+<1ZL{l*s!Iz@ zM;H4}%;vhgbA@>lsbaQdzLrDlw#J8@#1#sK z`_~}XFR3J&+sS{-2>Q8kBWcJ8?H#P$RTLK+D+=%Yi@oD2hBF#r9n;M!(3susa%eUa zAUV^zviANtetiE-tdEhQZ+KdYi9$yw>kxAVOvD1qOLvZU+%R#4a0_SYtcqPj5mjQY zC_s{?=|8$?L}QHn4o4tA&lu~n3#_fJm3WReL<)w`2HIiBn*{p}3Q}wE7iI13c8A)m zJA1XAb=RCd27A%s{rjg$2*~|fC-=@&06+Tdt2m%Sc zm|9dr0IJpj$3WB8B+9f}nVdHVaG1j#N+*!e;5aTgd;-*1^VBMpk19GE(n~w8RH-ht zK}#;R8FBO@LCJA3uBcoiCC?%#lxVw*gTQOgoPmDl@}*0cul#)a6D<_V*WdvP#ELQt zV546@a^UQVW5=)5_eLhP*sKjr4aEg}He&@f#`1QsQI}wAceT{6AS5@yPb8RG24#MVsZlZ1O~8Di8vo zSq{LrGIS|Gr(p?z4g0W>cznSwliB-dt3m!WpU~_@b~2E@yNv-Xm&-2i<_~ut7G=pF zX6&mZUrLsr!9I2FWn0(^^wpQLWtO_qx&jkZWXtp+x}Zc@&boXl70B1k{gDj?^s{I! zo~zppHjYylaMyvu@F)kZb8rEzR58h1YyXU*!4qllL^sV#-~j=`4>W+4@V;DV;%=~M z>;l`uJ!C>K0pbKrL~w9|badsFh^H8Wya@HhZ7pCZDf*?Tg3Re8Rt~%IA4ET_k%WFn zR+@OhU6%Dw*vBWGPiWsSu7Ozr?CqL*dN5iZ6A@nZwI9*^t<9nd@+#A7=IJnfXk<-z zf5m=?FVgNW8{^lxaln}+;^SL+0TM9r~_A$mehv{~3 z8vBgvK?t@5M0{km7y%|Vc2 z##GK2zjW!+OqlgrrS!hA)Ohx+s0~e9v}DO$cinZ*y-V3$qGDQJUS4xV7Ad$XO%u6} z&TNIs7?A)$w%MSU1ohGfRiEAt9aifJMe<3>)OKD2f^Hl1AUHVIzY#5J{M z%e$Lv3(qsmcD!5!gDwb8L8fqa0o{~v=KkosRO1BHm@Mj3(Kiy%Lld!X8-6;ncYod? z@bZVqlXU+9CW918}V!H*VjHQJfcRet|{EN;~R6tZai8hi1&4I`{6y zi|>B!xz%gT;lELdc}sgGr&mzqrbqYh(~z|{bhC!$#^&n)V)O4)l({T?QA0?6BRX(l zi`|Zips5L3%C4?z$PUV&t!rzpt2al$^aTzc(3(MQMtDt#0pTSCc8xhuGYM@p0MrZu zHAAI!txxe!goN;cX`z_oco9xct&TiJUfz~>-`&FaAIl+TStO5{Wj)Y?z@v|GAAX~} zmt7f8?+oC%PeEE>;%*0zyq!;@G&xJPX>*yqj#cQp2!q(sQ^K~bLZDz z{FEmT3YxND0i37%id8hWm6wG&Vz4(2N==0wuk_r%zbm!THuNlgNVYdB`i66!JKU>5 zzwGoBE=hAfOrK!(ak(#jH2`Cj+DNtgT6hw!KOkk4M4a{_&`59#uG^wIgJ_k1PY zGfleZN4k6teV|gF`jADy&3=B!QF0hTG3V*4nm+crt&ijwk^jv-Zf>9Grequzl_oKV zvthFN4;ch<5XEteaS{fj5<%KT7Sp#gLH{DZ1mddQQes-%(V-~P(eAHhhYrHu+8ybr zuq_?ZtS8O=k&cuU>1gY&Udyk;9E~%Ed51YdES}_K-M_1cn3X`U?COtkME26ey?FcL z$-dsXeLEJ?#iD?q8L-Ch4$OTEerYZqg^N?C!|ZG`=E-fMP#qFdZI;I|`A`@Qg580< zKt8ovJ{`MA0k|FjilrUc@0PN2=k_nWO9_m9>+Akf)T%VLnJk4orI=&`hpPKasZN|; ze{(5AQJ2Z63-RA>bUovRLQK0YwS~t4ty9-zx%~56|JVh70$M(u)+jfyibn6~^_zB_ zI(4cH(d+L(MBxl6J%Zq9DuAy}YU5qmMqyPIKPt zd*{?;%a+|Sd&-C*V+J~=62 zQXu=fb_AR4Qf7hi6B9Z-5JoDXwb(Y!CxW~Z&* zYJohe;<~8swY9g@mYzSpecQHeZ@*5F1)?KtR;%^8JbXmPt+%dR`M{dTo__wXfBox| ztMAU5bH~gGwvuv)N=Y4?niQ%*=qf>N2#Xs$&Wn)I%O?*XK72?@Qd|rz^D zkqa~D%%1zq3s1wISKZku?`dspXSx8LsPDG*IOQsZpn@NRLK~(Bhz>Ic-8&Ai2@@v) zj%n)D>GPJ{|LD_CKfUh7>0k}sgk$Y1><#vc2^(4D)!tTH)nJq9)Y*Q#0)`{%-%vgc zl&5%6vq1SYP(BTmk1DMMpW4;dPz@0_MB3JdHqyp-HTLd^w!3Wvq0e$V8HG{(5NL+^uEQ2Y&sq4g9m|-HV!g|=S|SU zMX1#+6rHH5{(3j~LPw|d%5kqg%Z)ZMR;_(@2DVhMB7_vBP0XZx=7E)u ztQgH!Q9e;A6SEd%rKiS4Mh-|!P8l-Bs|BLQtI$X~|BxscgeN7%MTYo87SkRTAD3|J zUGpFSjp*#u^mKK>GuTh3RKf>G7vSsFxufO}!%)x#CqR9Zk(rr+7tAN-WGz~;YRU2? zYzp1q-d26Ju(+z(*3;9{VuR|>i|EoUL*suUXij>1ve_F7nj0_ug7M!_QIt>c0WFq` z=ic7@Ev9@q9FloOuc#@l=)n$GXsx+yRhbp9?!@@7!uVf^@jqQk6s%+=0$8pz>J9=2-f!*%`@N83VadnQ#Pz;9en>X&7x_tTaJLgUrHgwz|k=2W$5Ev8b zFRBA$r9k4NM|0gd#>raLR+*X-0|!&vfRvPhw=Z2F=GxHb$jM}sjwEGO{3 zG;k=}|&T6Cw+k+Z|z(JrckVhq^41cZk>6|S{!4CpWubQl9V zi~$|UHaXO&PEJlviVM?^oR&5ey6(u~1A+~gue7s~Vz9rzTB-H*vA2%PNFQwQ>TxQG z9+xb2we!%`7J(+Sy{K|OW=RiKHjBa3)vO)z0OR4(#!~NI7FxC+L18|9@6ApA5ktnO zrKOD>9IN*W)s|so;;yKa?6er%*$KWmcE*EGOo;%>t5;779T5#4y}7+trRpR(-RN^mld8pw*QA8X1O(g z{YP%Y%hO@9@d$KOlR;eac)!OrXp0Mh4bb6~$yD;Ls&j|E25r>EogaMg!S+2TuQhgc zMMlOX3>prU#gym(8LxF-q`^is9W!OZ&?rBsnp8AhdJ{IP;T$Xpe!fytQPMHP^SC^rD-=5q77ncNI+GPc?v88t&wb#;B44L-0SGBkO=ZVQqN#G@_9 zi-7!U<8c`u6Q&WhCFPY4Kdz-|*P%AHOY~F0V9%fF5kJAeI%y0W8roYrweM_P|NYf7 zKYq7q^G|G(C~9xCwzB+P7_6_*VCCil5O&>62{2(B!Wq3Si2^q9f18Vjr_1BB?qmZ} zAqN;k=L#Ac>ka4b2fgq z{e#UruuT)Smc9RB;VO(}tyW`?%uWn zlh;E8Zc2&Y^3T^Y5QOGAa5Ia!SFt?zbvA25OG`uLl{06`Z#Zw%ocVuuwsAAFZpoZ5 zEG1>+q{$Pd=LDmXZK*q^q;FRbI2y+Tfd@ z=xlCmv)|O&f;btZNR%pAFBid0;_mcxwjo?17mk2Qeo_kJ)LacY(;IyKEjZCHZzJiC9%Yd9M;GIgw_UY>X=b%) zERV}7l60vIu2;r{Yj#Sxg8V13O@8W#&O&_wEvcbtL$LJU=QYnJz)T^liWi41L%Un4N`ana7RK`s; zr2MFUa#sx>G_!a!Y&lFUQDny?-!Mzx4wYs&1{y4qNW^_* ze~+b|GWqOzON~uXvr77=bgY7p@C9R_d0M-cA9fB$qmlZmiJLqbV)wq5qn`i!`CPx= zmrs^6%azaExvqTn?|0>c{5<)P?Nao=C@lEP98m+Z-z1p-u_sSpNF_$ zA3u*hic>JkeaQs5I>Z&@`8~4rh>rwMFNG*{_OctEAJ=cVc?v3pv{^To0+@|1Q8;(< z*eTosN?brlvvT(7+&92CcUvL!P$a#EJa;6EM_60)}8oVOc}{)gvFg+tA!z*-&3q zUeg^Mb^gRDNJxT1L*qxl3wmTk1W-G%YlBVT#q#nS@RLjjVB==Zx~JAGoi$>__j|(T zu9=M;(!1GHJ$>3Rdi3bQQ4q7Ij2zi@!mRc7 zTgZb02rT;%%LBr61FW^ZDgo0iU2;-#a$;N}*x2NOLt=CY{n66Wj3^us^SQmDh!%u4 zaFb;j22e%Cm144Lf=Qv;QrgqK=zeM8xqV=ZcV zdxBYu9a~$M&mF7b>CPU29?mau!sLk)7Y$N7%vv%BsA_E>WHMMIsyn-?ip#2QI2t=U e%~}iuoh~$xLRo=ZP+Q8*9y@uqqUH)pW&aByuM@uj literal 0 HcmV?d00001 diff --git a/proxy/web/dist/assets/favicon.ico b/proxy/web/dist/assets/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..50bb809661773efc9811f9e092610530bc1edc9c GIT binary patch literal 15086 zcmeI3dyHIF9mmhI(DG=ztAgSKb`AJ|5PD}!G*Pk0!vsPUNKj&+yRB`hEtD4eu++M< zJ4n%Jjgb1@&Yj2J z_{U80-QRtjbAI37?>z1~zjM}@KGSa;#~_Ezx8GsRdyO%}!@>QaF@NFNAX4wwpK8o~ zBzQMz$YMr;o_oIwebM{d^FYr7JrDf9d%($-E(IIGhAv=ZYMXmYdZe&4DZ8Dln}+ux zQ0^GJ+;VD7aT@-s65~I1xDVcAV74RvpLNUWwZ(FJWkEQQ8ZDfcmb&yVuzq<_XPqoB<4DO$<@w?^j z%aOuk&YHsFWA@a|MIU+pOKgiS@*KkR`&?~FS>2SX+NZ4ZdY(VU%DX){?48$ z&5K6quT#OCrZXGxfy4aHxwn*~+_M+j26TK3{L^86kY3XT|5Jv;_nI;_uHCti{3-t$ zkO$tJI(0YgKV};VO?NA8hwq)kI1rTid*nM8!Uom@AExX-Tkf9JTp@gp7TYfRk=l;s zFS*y5INsk)8{qyv$~{=a-PXYl>Y&|zyqdPNyN0{?9|o6oi$7((2mBgT`>1lQZ$R8R zPhDHne4|0R-{bj`PHn*N7g5g78tAC#X8cIw%g{Dn$9XQzs$kp+_lv%dH+RCH{1<}f zn{v19NOOorTtl9FzOVG#iZ3em{1n~iwqt`c;SQ1a*|^-jbQ(|B#;gO4nXE0Fl>cAe z?`V}jZRSnnQ~PUH{=D}ZOPbDw=)fs0XW%suW`i?PJd<}{04F=)``}i_ zV8u^=4!Xr(<%~O-6LYaU?ym-0!K2_u;E&)HPylLA8iT9HB3aAKkLyG4v)EyPG(^2=42_-@9bPB;_O)yYuUzcnUf{02D{{ zV;7y(#}6oH!PlDAZOOAE>n=s_N5K=|Pe8V)Pqo^%G@jm5Uw%^etglW(|M}o5K>sS; z)gtaD{VhrH?dj=xpyz>}2kJdg_I}2+ER7i`m&ungiv!v3$(*sW&%4hXE73vvN$CR9 z#*77Wh2)GeRu1WY))*@Xg7=;z-(uZUPQPS6kU7pd={^%k8OlAyc#`5#Xbjm=WDMCo znU@?gCRZ(4Wn?&WWtgGl%n%k*M7fM*p3Dbwl7m%xJ`+eTz5AA=&S?#U%fZ|^twC;j zzn%voJiy%H46qG63>2Sej!fxc*5$XSKR&f6qUhMK(5ZEk)79M5QMB@qc(pQ#;utC4>8 z#Tv!t4-m8O$~hc#Yi~g7_L_E}7@^`m4P&|^1xDn~f6l~|)<%1(^`}15M7JMLK73y2LUe|%x<9JT z@f{}q7G}GB7J3c=or<%63A_eu+DMtQdl}*&yS4Wcty|^EwoTPOAWp9DTox$q6RET2 zz2+U8inBVysj;EvU358rAT9q{;s3ba4I;nub!BT_bQnC%KHqj?@QM?+qF-tEI$3`o zpKHZ)6~p{8(Agi=(E*@u&Oz`SpfO0XbMlo&!KJ`E(}P?GTH!DK{{kPhKCm)~37<(l z4sy*L%zY#B4WwOy4BaQgjzzpzjO++##SgNz<9&2-4|rcqc^b2h#pI#*04zkMew1H}tk!2K5{Jo6xR~UcP7#5LBr7%@8e_Awn`k7-rUk!Y~5i zmkc47oIrRYVEZ(j2<>$i(zl<^Vr|Hmt1{#pEeoEeSuApK5z3=ipSUC5b+qkK=VXv!f|x}393 z$HL};%u}CQ{*~hm{~re3XM_FB7iQ<>*F0jUJI&m6Wf%wYRn9`N{~Y+upNDBKfFFZ> z>0gret<`04zXW^@%*}DUwWOU(Nq6P^iRNItZGNuRX6^@?(^u+F^8lU651;@1m9)!M z267zKe<$@V|2J!^msl$uZ7_fNI%(8qwHE#axC3kln}N<7JInTv-=3i@1 z@g{3WBS{*yiEF_(f!ej=AHqxEfkyf2pOaSmy6RW*m&IB2=~}(_W1z<5S=r}T5N93o ZF-=T$666(A1sU~VzAEK$kOO5O{{yZNCTIWv literal 0 HcmV?d00001 diff --git a/proxy/web/dist/assets/index.js b/proxy/web/dist/assets/index.js new file mode 100644 index 000000000..9ce3e4394 --- /dev/null +++ b/proxy/web/dist/assets/index.js @@ -0,0 +1,9 @@ +(function(){const v=document.createElement("link").relList;if(v&&v.supports&&v.supports("modulepreload"))return;for(const _ of document.querySelectorAll('link[rel="modulepreload"]'))f(_);new MutationObserver(_=>{for(const O of _)if(O.type==="childList")for(const D of O.addedNodes)D.tagName==="LINK"&&D.rel==="modulepreload"&&f(D)}).observe(document,{childList:!0,subtree:!0});function S(_){const O={};return _.integrity&&(O.integrity=_.integrity),_.referrerPolicy&&(O.referrerPolicy=_.referrerPolicy),_.crossOrigin==="use-credentials"?O.credentials="include":_.crossOrigin==="anonymous"?O.credentials="omit":O.credentials="same-origin",O}function f(_){if(_.ep)return;_.ep=!0;const O=S(_);fetch(_.href,O)}})();var Sf={exports:{}},Du={};var Yd;function jm(){if(Yd)return Du;Yd=1;var r=Symbol.for("react.transitional.element"),v=Symbol.for("react.fragment");function S(f,_,O){var D=null;if(O!==void 0&&(D=""+O),_.key!==void 0&&(D=""+_.key),"key"in _){O={};for(var U in _)U!=="key"&&(O[U]=_[U])}else O=_;return _=O.ref,{$$typeof:r,type:f,key:D,ref:_!==void 0?_:null,props:O}}return Du.Fragment=v,Du.jsx=S,Du.jsxs=S,Du}var Gd;function Rm(){return Gd||(Gd=1,Sf.exports=jm()),Sf.exports}var A=Rm(),xf={exports:{}},K={};var Xd;function Hm(){if(Xd)return K;Xd=1;var r=Symbol.for("react.transitional.element"),v=Symbol.for("react.portal"),S=Symbol.for("react.fragment"),f=Symbol.for("react.strict_mode"),_=Symbol.for("react.profiler"),O=Symbol.for("react.consumer"),D=Symbol.for("react.context"),U=Symbol.for("react.forward_ref"),N=Symbol.for("react.suspense"),p=Symbol.for("react.memo"),R=Symbol.for("react.lazy"),H=Symbol.for("react.activity"),V=Symbol.iterator;function st(s){return s===null||typeof s!="object"?null:(s=V&&s[V]||s["@@iterator"],typeof s=="function"?s:null)}var ct={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},G=Object.assign,Q={};function L(s,M,j){this.props=s,this.context=M,this.refs=Q,this.updater=j||ct}L.prototype.isReactComponent={},L.prototype.setState=function(s,M){if(typeof s!="object"&&typeof s!="function"&&s!=null)throw Error("takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,s,M,"setState")},L.prototype.forceUpdate=function(s){this.updater.enqueueForceUpdate(this,s,"forceUpdate")};function gt(){}gt.prototype=L.prototype;function zt(s,M,j){this.props=s,this.context=M,this.refs=Q,this.updater=j||ct}var _t=zt.prototype=new gt;_t.constructor=zt,G(_t,L.prototype),_t.isPureReactComponent=!0;var it=Array.isArray;function Ot(){}var J={H:null,A:null,T:null,S:null},Rt=Object.prototype.hasOwnProperty;function It(s,M,j){var q=j.ref;return{$$typeof:r,type:s,key:M,ref:q!==void 0?q:null,props:j}}function jl(s,M){return It(s.type,M,s.props)}function Pt(s){return typeof s=="object"&&s!==null&&s.$$typeof===r}function I(s){var M={"=":"=0",":":"=2"};return"$"+s.replace(/[=:]/g,function(j){return M[j]})}var Rl=/\/+/g;function tl(s,M){return typeof s=="object"&&s!==null&&s.key!=null?I(""+s.key):M.toString(36)}function ll(s){switch(s.status){case"fulfilled":return s.value;case"rejected":throw s.reason;default:switch(typeof s.status=="string"?s.then(Ot,Ot):(s.status="pending",s.then(function(M){s.status==="pending"&&(s.status="fulfilled",s.value=M)},function(M){s.status==="pending"&&(s.status="rejected",s.reason=M)})),s.status){case"fulfilled":return s.value;case"rejected":throw s.reason}}throw s}function x(s,M,j,q,k){var P=typeof s;(P==="undefined"||P==="boolean")&&(s=null);var yt=!1;if(s===null)yt=!0;else switch(P){case"bigint":case"string":case"number":yt=!0;break;case"object":switch(s.$$typeof){case r:case v:yt=!0;break;case R:return yt=s._init,x(yt(s._payload),M,j,q,k)}}if(yt)return k=k(s),yt=q===""?"."+tl(s,0):q,it(k)?(j="",yt!=null&&(j=yt.replace(Rl,"$&/")+"/"),x(k,M,j,"",function(qa){return qa})):k!=null&&(Pt(k)&&(k=jl(k,j+(k.key==null||s&&s.key===k.key?"":(""+k.key).replace(Rl,"$&/")+"/")+yt)),M.push(k)),1;yt=0;var Wt=q===""?".":q+":";if(it(s))for(var Ut=0;Ut>>1,dt=x[nt];if(0<_(dt,C))x[nt]=C,x[Z]=dt,Z=nt;else break t}}function S(x){return x.length===0?null:x[0]}function f(x){if(x.length===0)return null;var C=x[0],Z=x.pop();if(Z!==C){x[0]=Z;t:for(var nt=0,dt=x.length,s=dt>>>1;nt_(j,Z))q_(k,j)?(x[nt]=k,x[q]=Z,nt=q):(x[nt]=j,x[M]=Z,nt=M);else if(q_(k,Z))x[nt]=k,x[q]=Z,nt=q;else break t}}return C}function _(x,C){var Z=x.sortIndex-C.sortIndex;return Z!==0?Z:x.id-C.id}if(r.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var O=performance;r.unstable_now=function(){return O.now()}}else{var D=Date,U=D.now();r.unstable_now=function(){return D.now()-U}}var N=[],p=[],R=1,H=null,V=3,st=!1,ct=!1,G=!1,Q=!1,L=typeof setTimeout=="function"?setTimeout:null,gt=typeof clearTimeout=="function"?clearTimeout:null,zt=typeof setImmediate<"u"?setImmediate:null;function _t(x){for(var C=S(p);C!==null;){if(C.callback===null)f(p);else if(C.startTime<=x)f(p),C.sortIndex=C.expirationTime,v(N,C);else break;C=S(p)}}function it(x){if(G=!1,_t(x),!ct)if(S(N)!==null)ct=!0,Ot||(Ot=!0,I());else{var C=S(p);C!==null&&ll(it,C.startTime-x)}}var Ot=!1,J=-1,Rt=5,It=-1;function jl(){return Q?!0:!(r.unstable_now()-Itx&&jl());){var nt=H.callback;if(typeof nt=="function"){H.callback=null,V=H.priorityLevel;var dt=nt(H.expirationTime<=x);if(x=r.unstable_now(),typeof dt=="function"){H.callback=dt,_t(x),C=!0;break l}H===S(N)&&f(N),_t(x)}else f(N);H=S(N)}if(H!==null)C=!0;else{var s=S(p);s!==null&&ll(it,s.startTime-x),C=!1}}break t}finally{H=null,V=Z,st=!1}C=void 0}}finally{C?I():Ot=!1}}}var I;if(typeof zt=="function")I=function(){zt(Pt)};else if(typeof MessageChannel<"u"){var Rl=new MessageChannel,tl=Rl.port2;Rl.port1.onmessage=Pt,I=function(){tl.postMessage(null)}}else I=function(){L(Pt,0)};function ll(x,C){J=L(function(){x(r.unstable_now())},C)}r.unstable_IdlePriority=5,r.unstable_ImmediatePriority=1,r.unstable_LowPriority=4,r.unstable_NormalPriority=3,r.unstable_Profiling=null,r.unstable_UserBlockingPriority=2,r.unstable_cancelCallback=function(x){x.callback=null},r.unstable_forceFrameRate=function(x){0>x||125nt?(x.sortIndex=Z,v(p,x),S(N)===null&&x===S(p)&&(G?(gt(J),J=-1):G=!0,ll(it,Z-nt))):(x.sortIndex=dt,v(N,x),ct||st||(ct=!0,Ot||(Ot=!0,I()))),x},r.unstable_shouldYield=jl,r.unstable_wrapCallback=function(x){var C=V;return function(){var Z=V;V=C;try{return x.apply(this,arguments)}finally{V=Z}}}})(Ef)),Ef}var wd;function qm(){return wd||(wd=1,Tf.exports=Bm()),Tf.exports}var Af={exports:{}},kt={};var Ld;function Ym(){if(Ld)return kt;Ld=1;var r=Rf();function v(N){var p="https://react.dev/errors/"+N;if(1"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(r)}catch(v){console.error(v)}}return r(),Af.exports=Ym(),Af.exports}var Kd;function Xm(){if(Kd)return Uu;Kd=1;var r=qm(),v=Rf(),S=Gm();function f(t){var l="https://react.dev/errors/"+t;if(1dt||(t.current=nt[dt],nt[dt]=null,dt--)}function j(t,l){dt++,nt[dt]=t.current,t.current=l}var q=s(null),k=s(null),P=s(null),yt=s(null);function Wt(t,l){switch(j(P,l),j(k,t),j(q,null),l.nodeType){case 9:case 11:t=(t=l.documentElement)&&(t=t.namespaceURI)?cd(t):0;break;default:if(t=l.tagName,l=l.namespaceURI)l=cd(l),t=fd(l,t);else switch(t){case"svg":t=1;break;case"math":t=2;break;default:t=0}}M(q),j(q,t)}function Ut(){M(q),M(k),M(P)}function qa(t){t.memoizedState!==null&&j(yt,t);var l=q.current,e=fd(l,t.type);l!==e&&(j(k,t),j(q,e))}function Hu(t){k.current===t&&(M(q),M(k)),yt.current===t&&(M(yt),Mu._currentValue=Z)}var li,Bf;function Ue(t){if(li===void 0)try{throw Error()}catch(e){var l=e.stack.trim().match(/\n( *(at )?)/);li=l&&l[1]||"",Bf=-1)":-1u||o[a]!==h[u]){var z=` +`+o[a].replace(" at new "," at ");return t.displayName&&z.includes("")&&(z=z.replace("",t.displayName)),z}while(1<=a&&0<=u);break}}}finally{ei=!1,Error.prepareStackTrace=e}return(e=t?t.displayName||t.name:"")?Ue(e):""}function o0(t,l){switch(t.tag){case 26:case 27:case 5:return Ue(t.type);case 16:return Ue("Lazy");case 13:return t.child!==l&&l!==null?Ue("Suspense Fallback"):Ue("Suspense");case 19:return Ue("SuspenseList");case 0:case 15:return ai(t.type,!1);case 11:return ai(t.type.render,!1);case 1:return ai(t.type,!0);case 31:return Ue("Activity");default:return""}}function qf(t){try{var l="",e=null;do l+=o0(t,e),e=t,t=t.return;while(t);return l}catch(a){return` +Error generating stack: `+a.message+` +`+a.stack}}var ui=Object.prototype.hasOwnProperty,ni=r.unstable_scheduleCallback,ii=r.unstable_cancelCallback,s0=r.unstable_shouldYield,d0=r.unstable_requestPaint,rl=r.unstable_now,y0=r.unstable_getCurrentPriorityLevel,Yf=r.unstable_ImmediatePriority,Gf=r.unstable_UserBlockingPriority,Bu=r.unstable_NormalPriority,m0=r.unstable_LowPriority,Xf=r.unstable_IdlePriority,h0=r.log,g0=r.unstable_setDisableYieldValue,Ya=null,ol=null;function ue(t){if(typeof h0=="function"&&g0(t),ol&&typeof ol.setStrictMode=="function")try{ol.setStrictMode(Ya,t)}catch{}}var sl=Math.clz32?Math.clz32:p0,v0=Math.log,b0=Math.LN2;function p0(t){return t>>>=0,t===0?32:31-(v0(t)/b0|0)|0}var qu=256,Yu=262144,Gu=4194304;function Ce(t){var l=t&42;if(l!==0)return l;switch(t&-t){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:return 128;case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:return t&261888;case 262144:case 524288:case 1048576:case 2097152:return t&3932160;case 4194304:case 8388608:case 16777216:case 33554432:return t&62914560;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return t}}function Xu(t,l,e){var a=t.pendingLanes;if(a===0)return 0;var u=0,n=t.suspendedLanes,i=t.pingedLanes;t=t.warmLanes;var c=a&134217727;return c!==0?(a=c&~n,a!==0?u=Ce(a):(i&=c,i!==0?u=Ce(i):e||(e=c&~t,e!==0&&(u=Ce(e))))):(c=a&~n,c!==0?u=Ce(c):i!==0?u=Ce(i):e||(e=a&~t,e!==0&&(u=Ce(e)))),u===0?0:l!==0&&l!==u&&(l&n)===0&&(n=u&-u,e=l&-l,n>=e||n===32&&(e&4194048)!==0)?l:u}function Ga(t,l){return(t.pendingLanes&~(t.suspendedLanes&~t.pingedLanes)&l)===0}function S0(t,l){switch(t){case 1:case 2:case 4:case 8:case 64:return l+250;case 16:case 32:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return l+5e3;case 4194304:case 8388608:case 16777216:case 33554432:return-1;case 67108864:case 134217728:case 268435456:case 536870912:case 1073741824:return-1;default:return-1}}function Qf(){var t=Gu;return Gu<<=1,(Gu&62914560)===0&&(Gu=4194304),t}function ci(t){for(var l=[],e=0;31>e;e++)l.push(t);return l}function Xa(t,l){t.pendingLanes|=l,l!==268435456&&(t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0)}function x0(t,l,e,a,u,n){var i=t.pendingLanes;t.pendingLanes=e,t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0,t.expiredLanes&=e,t.entangledLanes&=e,t.errorRecoveryDisabledLanes&=e,t.shellSuspendCounter=0;var c=t.entanglements,o=t.expirationTimes,h=t.hiddenUpdates;for(e=i&~e;0"u")return null;try{return t.activeElement||t.body}catch{return t.body}}var _0=/[\n"\\]/g;function Sl(t){return t.replace(_0,function(l){return"\\"+l.charCodeAt(0).toString(16)+" "})}function yi(t,l,e,a,u,n,i,c){t.name="",i!=null&&typeof i!="function"&&typeof i!="symbol"&&typeof i!="boolean"?t.type=i:t.removeAttribute("type"),l!=null?i==="number"?(l===0&&t.value===""||t.value!=l)&&(t.value=""+pl(l)):t.value!==""+pl(l)&&(t.value=""+pl(l)):i!=="submit"&&i!=="reset"||t.removeAttribute("value"),l!=null?mi(t,i,pl(l)):e!=null?mi(t,i,pl(e)):a!=null&&t.removeAttribute("value"),u==null&&n!=null&&(t.defaultChecked=!!n),u!=null&&(t.checked=u&&typeof u!="function"&&typeof u!="symbol"),c!=null&&typeof c!="function"&&typeof c!="symbol"&&typeof c!="boolean"?t.name=""+pl(c):t.removeAttribute("name")}function tr(t,l,e,a,u,n,i,c){if(n!=null&&typeof n!="function"&&typeof n!="symbol"&&typeof n!="boolean"&&(t.type=n),l!=null||e!=null){if(!(n!=="submit"&&n!=="reset"||l!=null)){di(t);return}e=e!=null?""+pl(e):"",l=l!=null?""+pl(l):e,c||l===t.value||(t.value=l),t.defaultValue=l}a=a??u,a=typeof a!="function"&&typeof a!="symbol"&&!!a,t.checked=c?t.checked:!!a,t.defaultChecked=!!a,i!=null&&typeof i!="function"&&typeof i!="symbol"&&typeof i!="boolean"&&(t.name=i),di(t)}function mi(t,l,e){l==="number"&&wu(t.ownerDocument)===t||t.defaultValue===""+e||(t.defaultValue=""+e)}function ea(t,l,e,a){if(t=t.options,l){l={};for(var u=0;u"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),pi=!1;if(Ql)try{var La={};Object.defineProperty(La,"passive",{get:function(){pi=!0}}),window.addEventListener("test",La,La),window.removeEventListener("test",La,La)}catch{pi=!1}var ie=null,Si=null,Vu=null;function cr(){if(Vu)return Vu;var t,l=Si,e=l.length,a,u="value"in ie?ie.value:ie.textContent,n=u.length;for(t=0;t=Ja),yr=" ",mr=!1;function hr(t,l){switch(t){case"keyup":return ly.indexOf(l.keyCode)!==-1;case"keydown":return l.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function gr(t){return t=t.detail,typeof t=="object"&&"data"in t?t.data:null}var ia=!1;function ay(t,l){switch(t){case"compositionend":return gr(l);case"keypress":return l.which!==32?null:(mr=!0,yr);case"textInput":return t=l.data,t===yr&&mr?null:t;default:return null}}function uy(t,l){if(ia)return t==="compositionend"||!Ai&&hr(t,l)?(t=cr(),Vu=Si=ie=null,ia=!1,t):null;switch(t){case"paste":return null;case"keypress":if(!(l.ctrlKey||l.altKey||l.metaKey)||l.ctrlKey&&l.altKey){if(l.char&&1=l)return{node:e,offset:l-t};t=a}t:{for(;e;){if(e.nextSibling){e=e.nextSibling;break t}e=e.parentNode}e=void 0}e=Er(e)}}function Mr(t,l){return t&&l?t===l?!0:t&&t.nodeType===3?!1:l&&l.nodeType===3?Mr(t,l.parentNode):"contains"in t?t.contains(l):t.compareDocumentPosition?!!(t.compareDocumentPosition(l)&16):!1:!1}function _r(t){t=t!=null&&t.ownerDocument!=null&&t.ownerDocument.defaultView!=null?t.ownerDocument.defaultView:window;for(var l=wu(t.document);l instanceof t.HTMLIFrameElement;){try{var e=typeof l.contentWindow.location.href=="string"}catch{e=!1}if(e)t=l.contentWindow;else break;l=wu(t.document)}return l}function Oi(t){var l=t&&t.nodeName&&t.nodeName.toLowerCase();return l&&(l==="input"&&(t.type==="text"||t.type==="search"||t.type==="tel"||t.type==="url"||t.type==="password")||l==="textarea"||t.contentEditable==="true")}var dy=Ql&&"documentMode"in document&&11>=document.documentMode,ca=null,Ni=null,Fa=null,Di=!1;function Or(t,l,e){var a=e.window===e?e.document:e.nodeType===9?e:e.ownerDocument;Di||ca==null||ca!==wu(a)||(a=ca,"selectionStart"in a&&Oi(a)?a={start:a.selectionStart,end:a.selectionEnd}:(a=(a.ownerDocument&&a.ownerDocument.defaultView||window).getSelection(),a={anchorNode:a.anchorNode,anchorOffset:a.anchorOffset,focusNode:a.focusNode,focusOffset:a.focusOffset}),Fa&&$a(Fa,a)||(Fa=a,a=Gn(Ni,"onSelect"),0>=i,u-=i,Hl=1<<32-sl(l)+u|e<$?(at=Y,Y=null):at=Y.sibling;var rt=g(y,Y,m[$],T);if(rt===null){Y===null&&(Y=at);break}t&&Y&&rt.alternate===null&&l(y,Y),d=n(rt,d,$),ft===null?X=rt:ft.sibling=rt,ft=rt,Y=at}if($===m.length)return e(y,Y),ut&&wl(y,$),X;if(Y===null){for(;$$?(at=Y,Y=null):at=Y.sibling;var Oe=g(y,Y,rt.value,T);if(Oe===null){Y===null&&(Y=at);break}t&&Y&&Oe.alternate===null&&l(y,Y),d=n(Oe,d,$),ft===null?X=Oe:ft.sibling=Oe,ft=Oe,Y=at}if(rt.done)return e(y,Y),ut&&wl(y,$),X;if(Y===null){for(;!rt.done;$++,rt=m.next())rt=E(y,rt.value,T),rt!==null&&(d=n(rt,d,$),ft===null?X=rt:ft.sibling=rt,ft=rt);return ut&&wl(y,$),X}for(Y=a(Y);!rt.done;$++,rt=m.next())rt=b(Y,y,$,rt.value,T),rt!==null&&(t&&rt.alternate!==null&&Y.delete(rt.key===null?$:rt.key),d=n(rt,d,$),ft===null?X=rt:ft.sibling=rt,ft=rt);return t&&Y.forEach(function(Cm){return l(y,Cm)}),ut&&wl(y,$),X}function pt(y,d,m,T){if(typeof m=="object"&&m!==null&&m.type===G&&m.key===null&&(m=m.props.children),typeof m=="object"&&m!==null){switch(m.$$typeof){case st:t:{for(var X=m.key;d!==null;){if(d.key===X){if(X=m.type,X===G){if(d.tag===7){e(y,d.sibling),T=u(d,m.props.children),T.return=y,y=T;break t}}else if(d.elementType===X||typeof X=="object"&&X!==null&&X.$$typeof===Rt&&we(X)===d.type){e(y,d.sibling),T=u(d,m.props),au(T,m),T.return=y,y=T;break t}e(y,d);break}else l(y,d);d=d.sibling}m.type===G?(T=Ye(m.props.children,y.mode,T,m.key),T.return=y,y=T):(T=ln(m.type,m.key,m.props,null,y.mode,T),au(T,m),T.return=y,y=T)}return i(y);case ct:t:{for(X=m.key;d!==null;){if(d.key===X)if(d.tag===4&&d.stateNode.containerInfo===m.containerInfo&&d.stateNode.implementation===m.implementation){e(y,d.sibling),T=u(d,m.children||[]),T.return=y,y=T;break t}else{e(y,d);break}else l(y,d);d=d.sibling}T=qi(m,y.mode,T),T.return=y,y=T}return i(y);case Rt:return m=we(m),pt(y,d,m,T)}if(ll(m))return B(y,d,m,T);if(I(m)){if(X=I(m),typeof X!="function")throw Error(f(150));return m=X.call(m),w(y,d,m,T)}if(typeof m.then=="function")return pt(y,d,rn(m),T);if(m.$$typeof===zt)return pt(y,d,un(y,m),T);on(y,m)}return typeof m=="string"&&m!==""||typeof m=="number"||typeof m=="bigint"?(m=""+m,d!==null&&d.tag===6?(e(y,d.sibling),T=u(d,m),T.return=y,y=T):(e(y,d),T=Bi(m,y.mode,T),T.return=y,y=T),i(y)):e(y,d)}return function(y,d,m,T){try{eu=0;var X=pt(y,d,m,T);return ba=null,X}catch(Y){if(Y===va||Y===cn)throw Y;var ft=yl(29,Y,null,y.mode);return ft.lanes=T,ft.return=y,ft}}}var Ve=Fr(!0),Ir=Fr(!1),se=!1;function Wi(t){t.updateQueue={baseState:t.memoizedState,firstBaseUpdate:null,lastBaseUpdate:null,shared:{pending:null,lanes:0,hiddenCallbacks:null},callbacks:null}}function $i(t,l){t=t.updateQueue,l.updateQueue===t&&(l.updateQueue={baseState:t.baseState,firstBaseUpdate:t.firstBaseUpdate,lastBaseUpdate:t.lastBaseUpdate,shared:t.shared,callbacks:null})}function de(t){return{lane:t,tag:0,payload:null,callback:null,next:null}}function ye(t,l,e){var a=t.updateQueue;if(a===null)return null;if(a=a.shared,(ot&2)!==0){var u=a.pending;return u===null?l.next=l:(l.next=u.next,u.next=l),a.pending=l,l=tn(t),Hr(t,null,e),l}return Pu(t,a,l,e),tn(t)}function uu(t,l,e){if(l=l.updateQueue,l!==null&&(l=l.shared,(e&4194048)!==0)){var a=l.lanes;a&=t.pendingLanes,e|=a,l.lanes=e,wf(t,e)}}function Fi(t,l){var e=t.updateQueue,a=t.alternate;if(a!==null&&(a=a.updateQueue,e===a)){var u=null,n=null;if(e=e.firstBaseUpdate,e!==null){do{var i={lane:e.lane,tag:e.tag,payload:e.payload,callback:null,next:null};n===null?u=n=i:n=n.next=i,e=e.next}while(e!==null);n===null?u=n=l:n=n.next=l}else u=n=l;e={baseState:a.baseState,firstBaseUpdate:u,lastBaseUpdate:n,shared:a.shared,callbacks:a.callbacks},t.updateQueue=e;return}t=e.lastBaseUpdate,t===null?e.firstBaseUpdate=l:t.next=l,e.lastBaseUpdate=l}var Ii=!1;function nu(){if(Ii){var t=ga;if(t!==null)throw t}}function iu(t,l,e,a){Ii=!1;var u=t.updateQueue;se=!1;var n=u.firstBaseUpdate,i=u.lastBaseUpdate,c=u.shared.pending;if(c!==null){u.shared.pending=null;var o=c,h=o.next;o.next=null,i===null?n=h:i.next=h,i=o;var z=t.alternate;z!==null&&(z=z.updateQueue,c=z.lastBaseUpdate,c!==i&&(c===null?z.firstBaseUpdate=h:c.next=h,z.lastBaseUpdate=o))}if(n!==null){var E=u.baseState;i=0,z=h=o=null,c=n;do{var g=c.lane&-536870913,b=g!==c.lane;if(b?(et&g)===g:(a&g)===g){g!==0&&g===ha&&(Ii=!0),z!==null&&(z=z.next={lane:0,tag:c.tag,payload:c.payload,callback:null,next:null});t:{var B=t,w=c;g=l;var pt=e;switch(w.tag){case 1:if(B=w.payload,typeof B=="function"){E=B.call(pt,E,g);break t}E=B;break t;case 3:B.flags=B.flags&-65537|128;case 0:if(B=w.payload,g=typeof B=="function"?B.call(pt,E,g):B,g==null)break t;E=H({},E,g);break t;case 2:se=!0}}g=c.callback,g!==null&&(t.flags|=64,b&&(t.flags|=8192),b=u.callbacks,b===null?u.callbacks=[g]:b.push(g))}else b={lane:g,tag:c.tag,payload:c.payload,callback:c.callback,next:null},z===null?(h=z=b,o=E):z=z.next=b,i|=g;if(c=c.next,c===null){if(c=u.shared.pending,c===null)break;b=c,c=b.next,b.next=null,u.lastBaseUpdate=b,u.shared.pending=null}}while(!0);z===null&&(o=E),u.baseState=o,u.firstBaseUpdate=h,u.lastBaseUpdate=z,n===null&&(u.shared.lanes=0),be|=i,t.lanes=i,t.memoizedState=E}}function Pr(t,l){if(typeof t!="function")throw Error(f(191,t));t.call(l)}function to(t,l){var e=t.callbacks;if(e!==null)for(t.callbacks=null,t=0;tn?n:8;var i=x.T,c={};x.T=c,vc(t,!1,l,e);try{var o=u(),h=x.S;if(h!==null&&h(c,o),o!==null&&typeof o=="object"&&typeof o.then=="function"){var z=xy(o,a);ru(t,l,z,bl(t))}else ru(t,l,a,bl(t))}catch(E){ru(t,l,{then:function(){},status:"rejected",reason:E},bl())}finally{C.p=n,i!==null&&c.types!==null&&(i.types=c.types),x.T=i}}function _y(){}function hc(t,l,e,a){if(t.tag!==5)throw Error(f(476));var u=jo(t).queue;Co(t,u,l,Z,e===null?_y:function(){return Ro(t),e(a)})}function jo(t){var l=t.memoizedState;if(l!==null)return l;l={memoizedState:Z,baseState:Z,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:Jl,lastRenderedState:Z},next:null};var e={};return l.next={memoizedState:e,baseState:e,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:Jl,lastRenderedState:e},next:null},t.memoizedState=l,t=t.alternate,t!==null&&(t.memoizedState=l),l}function Ro(t){var l=jo(t);l.next===null&&(l=t.alternate.memoizedState),ru(t,l.next.queue,{},bl())}function gc(){return Vt(Mu)}function Ho(){return jt().memoizedState}function Bo(){return jt().memoizedState}function Oy(t){for(var l=t.return;l!==null;){switch(l.tag){case 24:case 3:var e=bl();t=de(e);var a=ye(l,t,e);a!==null&&(fl(a,l,e),uu(a,l,e)),l={cache:Vi()},t.payload=l;return}l=l.return}}function Ny(t,l,e){var a=bl();e={lane:a,revertLane:0,gesture:null,action:e,hasEagerState:!1,eagerState:null,next:null},Sn(t)?Yo(l,e):(e=Ri(t,l,e,a),e!==null&&(fl(e,t,a),Go(e,l,a)))}function qo(t,l,e){var a=bl();ru(t,l,e,a)}function ru(t,l,e,a){var u={lane:a,revertLane:0,gesture:null,action:e,hasEagerState:!1,eagerState:null,next:null};if(Sn(t))Yo(l,u);else{var n=t.alternate;if(t.lanes===0&&(n===null||n.lanes===0)&&(n=l.lastRenderedReducer,n!==null))try{var i=l.lastRenderedState,c=n(i,e);if(u.hasEagerState=!0,u.eagerState=c,dl(c,i))return Pu(t,l,u,0),St===null&&Iu(),!1}catch{}if(e=Ri(t,l,u,a),e!==null)return fl(e,t,a),Go(e,l,a),!0}return!1}function vc(t,l,e,a){if(a={lane:2,revertLane:Wc(),gesture:null,action:a,hasEagerState:!1,eagerState:null,next:null},Sn(t)){if(l)throw Error(f(479))}else l=Ri(t,e,a,2),l!==null&&fl(l,t,2)}function Sn(t){var l=t.alternate;return t===W||l!==null&&l===W}function Yo(t,l){Sa=yn=!0;var e=t.pending;e===null?l.next=l:(l.next=e.next,e.next=l),t.pending=l}function Go(t,l,e){if((e&4194048)!==0){var a=l.lanes;a&=t.pendingLanes,e|=a,l.lanes=e,wf(t,e)}}var ou={readContext:Vt,use:gn,useCallback:Nt,useContext:Nt,useEffect:Nt,useImperativeHandle:Nt,useLayoutEffect:Nt,useInsertionEffect:Nt,useMemo:Nt,useReducer:Nt,useRef:Nt,useState:Nt,useDebugValue:Nt,useDeferredValue:Nt,useTransition:Nt,useSyncExternalStore:Nt,useId:Nt,useHostTransitionStatus:Nt,useFormState:Nt,useActionState:Nt,useOptimistic:Nt,useMemoCache:Nt,useCacheRefresh:Nt};ou.useEffectEvent=Nt;var Xo={readContext:Vt,use:gn,useCallback:function(t,l){return $t().memoizedState=[t,l===void 0?null:l],t},useContext:Vt,useEffect:To,useImperativeHandle:function(t,l,e){e=e!=null?e.concat([t]):null,bn(4194308,4,_o.bind(null,l,t),e)},useLayoutEffect:function(t,l){return bn(4194308,4,t,l)},useInsertionEffect:function(t,l){bn(4,2,t,l)},useMemo:function(t,l){var e=$t();l=l===void 0?null:l;var a=t();if(Ke){ue(!0);try{t()}finally{ue(!1)}}return e.memoizedState=[a,l],a},useReducer:function(t,l,e){var a=$t();if(e!==void 0){var u=e(l);if(Ke){ue(!0);try{e(l)}finally{ue(!1)}}}else u=l;return a.memoizedState=a.baseState=u,t={pending:null,lanes:0,dispatch:null,lastRenderedReducer:t,lastRenderedState:u},a.queue=t,t=t.dispatch=Ny.bind(null,W,t),[a.memoizedState,t]},useRef:function(t){var l=$t();return t={current:t},l.memoizedState=t},useState:function(t){t=oc(t);var l=t.queue,e=qo.bind(null,W,l);return l.dispatch=e,[t.memoizedState,e]},useDebugValue:yc,useDeferredValue:function(t,l){var e=$t();return mc(e,t,l)},useTransition:function(){var t=oc(!1);return t=Co.bind(null,W,t.queue,!0,!1),$t().memoizedState=t,[!1,t]},useSyncExternalStore:function(t,l,e){var a=W,u=$t();if(ut){if(e===void 0)throw Error(f(407));e=e()}else{if(e=l(),St===null)throw Error(f(349));(et&127)!==0||io(a,l,e)}u.memoizedState=e;var n={value:e,getSnapshot:l};return u.queue=n,To(fo.bind(null,a,n,t),[t]),a.flags|=2048,za(9,{destroy:void 0},co.bind(null,a,n,e,l),null),e},useId:function(){var t=$t(),l=St.identifierPrefix;if(ut){var e=Bl,a=Hl;e=(a&~(1<<32-sl(a)-1)).toString(32)+e,l="_"+l+"R_"+e,e=mn++,0<\/script>",n=n.removeChild(n.firstChild);break;case"select":n=typeof a.is=="string"?i.createElement("select",{is:a.is}):i.createElement("select"),a.multiple?n.multiple=!0:a.size&&(n.size=a.size);break;default:n=typeof a.is=="string"?i.createElement(u,{is:a.is}):i.createElement(u)}}n[wt]=l,n[el]=a;t:for(i=l.child;i!==null;){if(i.tag===5||i.tag===6)n.appendChild(i.stateNode);else if(i.tag!==4&&i.tag!==27&&i.child!==null){i.child.return=i,i=i.child;continue}if(i===l)break t;for(;i.sibling===null;){if(i.return===null||i.return===l)break t;i=i.return}i.sibling.return=i.return,i=i.sibling}l.stateNode=n;t:switch(Jt(n,u,a),u){case"button":case"input":case"select":case"textarea":a=!!a.autoFocus;break t;case"img":a=!0;break t;default:a=!1}a&&Wl(l)}}return Et(l),Uc(l,l.type,t===null?null:t.memoizedProps,l.pendingProps,e),null;case 6:if(t&&l.stateNode!=null)t.memoizedProps!==a&&Wl(l);else{if(typeof a!="string"&&l.stateNode===null)throw Error(f(166));if(t=P.current,ya(l)){if(t=l.stateNode,e=l.memoizedProps,a=null,u=Lt,u!==null)switch(u.tag){case 27:case 5:a=u.memoizedProps}t[wt]=l,t=!!(t.nodeValue===e||a!==null&&a.suppressHydrationWarning===!0||nd(t.nodeValue,e)),t||re(l,!0)}else t=Xn(t).createTextNode(a),t[wt]=l,l.stateNode=t}return Et(l),null;case 31:if(e=l.memoizedState,t===null||t.memoizedState!==null){if(a=ya(l),e!==null){if(t===null){if(!a)throw Error(f(318));if(t=l.memoizedState,t=t!==null?t.dehydrated:null,!t)throw Error(f(557));t[wt]=l}else Ge(),(l.flags&128)===0&&(l.memoizedState=null),l.flags|=4;Et(l),t=!1}else e=Qi(),t!==null&&t.memoizedState!==null&&(t.memoizedState.hydrationErrors=e),t=!0;if(!t)return l.flags&256?(hl(l),l):(hl(l),null);if((l.flags&128)!==0)throw Error(f(558))}return Et(l),null;case 13:if(a=l.memoizedState,t===null||t.memoizedState!==null&&t.memoizedState.dehydrated!==null){if(u=ya(l),a!==null&&a.dehydrated!==null){if(t===null){if(!u)throw Error(f(318));if(u=l.memoizedState,u=u!==null?u.dehydrated:null,!u)throw Error(f(317));u[wt]=l}else Ge(),(l.flags&128)===0&&(l.memoizedState=null),l.flags|=4;Et(l),u=!1}else u=Qi(),t!==null&&t.memoizedState!==null&&(t.memoizedState.hydrationErrors=u),u=!0;if(!u)return l.flags&256?(hl(l),l):(hl(l),null)}return hl(l),(l.flags&128)!==0?(l.lanes=e,l):(e=a!==null,t=t!==null&&t.memoizedState!==null,e&&(a=l.child,u=null,a.alternate!==null&&a.alternate.memoizedState!==null&&a.alternate.memoizedState.cachePool!==null&&(u=a.alternate.memoizedState.cachePool.pool),n=null,a.memoizedState!==null&&a.memoizedState.cachePool!==null&&(n=a.memoizedState.cachePool.pool),n!==u&&(a.flags|=2048)),e!==t&&e&&(l.child.flags|=8192),An(l,l.updateQueue),Et(l),null);case 4:return Ut(),t===null&&Pc(l.stateNode.containerInfo),Et(l),null;case 10:return Vl(l.type),Et(l),null;case 19:if(M(Ct),a=l.memoizedState,a===null)return Et(l),null;if(u=(l.flags&128)!==0,n=a.rendering,n===null)if(u)du(a,!1);else{if(Dt!==0||t!==null&&(t.flags&128)!==0)for(t=l.child;t!==null;){if(n=dn(t),n!==null){for(l.flags|=128,du(a,!1),t=n.updateQueue,l.updateQueue=t,An(l,t),l.subtreeFlags=0,t=e,e=l.child;e!==null;)Br(e,t),e=e.sibling;return j(Ct,Ct.current&1|2),ut&&wl(l,a.treeForkCount),l.child}t=t.sibling}a.tail!==null&&rl()>Dn&&(l.flags|=128,u=!0,du(a,!1),l.lanes=4194304)}else{if(!u)if(t=dn(n),t!==null){if(l.flags|=128,u=!0,t=t.updateQueue,l.updateQueue=t,An(l,t),du(a,!0),a.tail===null&&a.tailMode==="hidden"&&!n.alternate&&!ut)return Et(l),null}else 2*rl()-a.renderingStartTime>Dn&&e!==536870912&&(l.flags|=128,u=!0,du(a,!1),l.lanes=4194304);a.isBackwards?(n.sibling=l.child,l.child=n):(t=a.last,t!==null?t.sibling=n:l.child=n,a.last=n)}return a.tail!==null?(t=a.tail,a.rendering=t,a.tail=t.sibling,a.renderingStartTime=rl(),t.sibling=null,e=Ct.current,j(Ct,u?e&1|2:e&1),ut&&wl(l,a.treeForkCount),t):(Et(l),null);case 22:case 23:return hl(l),tc(),a=l.memoizedState!==null,t!==null?t.memoizedState!==null!==a&&(l.flags|=8192):a&&(l.flags|=8192),a?(e&536870912)!==0&&(l.flags&128)===0&&(Et(l),l.subtreeFlags&6&&(l.flags|=8192)):Et(l),e=l.updateQueue,e!==null&&An(l,e.retryQueue),e=null,t!==null&&t.memoizedState!==null&&t.memoizedState.cachePool!==null&&(e=t.memoizedState.cachePool.pool),a=null,l.memoizedState!==null&&l.memoizedState.cachePool!==null&&(a=l.memoizedState.cachePool.pool),a!==e&&(l.flags|=2048),t!==null&&M(Ze),null;case 24:return e=null,t!==null&&(e=t.memoizedState.cache),l.memoizedState.cache!==e&&(l.flags|=2048),Vl(Ht),Et(l),null;case 25:return null;case 30:return null}throw Error(f(156,l.tag))}function Ry(t,l){switch(Gi(l),l.tag){case 1:return t=l.flags,t&65536?(l.flags=t&-65537|128,l):null;case 3:return Vl(Ht),Ut(),t=l.flags,(t&65536)!==0&&(t&128)===0?(l.flags=t&-65537|128,l):null;case 26:case 27:case 5:return Hu(l),null;case 31:if(l.memoizedState!==null){if(hl(l),l.alternate===null)throw Error(f(340));Ge()}return t=l.flags,t&65536?(l.flags=t&-65537|128,l):null;case 13:if(hl(l),t=l.memoizedState,t!==null&&t.dehydrated!==null){if(l.alternate===null)throw Error(f(340));Ge()}return t=l.flags,t&65536?(l.flags=t&-65537|128,l):null;case 19:return M(Ct),null;case 4:return Ut(),null;case 10:return Vl(l.type),null;case 22:case 23:return hl(l),tc(),t!==null&&M(Ze),t=l.flags,t&65536?(l.flags=t&-65537|128,l):null;case 24:return Vl(Ht),null;case 25:return null;default:return null}}function os(t,l){switch(Gi(l),l.tag){case 3:Vl(Ht),Ut();break;case 26:case 27:case 5:Hu(l);break;case 4:Ut();break;case 31:l.memoizedState!==null&&hl(l);break;case 13:hl(l);break;case 19:M(Ct);break;case 10:Vl(l.type);break;case 22:case 23:hl(l),tc(),t!==null&&M(Ze);break;case 24:Vl(Ht)}}function yu(t,l){try{var e=l.updateQueue,a=e!==null?e.lastEffect:null;if(a!==null){var u=a.next;e=u;do{if((e.tag&t)===t){a=void 0;var n=e.create,i=e.inst;a=n(),i.destroy=a}e=e.next}while(e!==u)}}catch(c){ht(l,l.return,c)}}function ge(t,l,e){try{var a=l.updateQueue,u=a!==null?a.lastEffect:null;if(u!==null){var n=u.next;a=n;do{if((a.tag&t)===t){var i=a.inst,c=i.destroy;if(c!==void 0){i.destroy=void 0,u=l;var o=e,h=c;try{h()}catch(z){ht(u,o,z)}}}a=a.next}while(a!==n)}}catch(z){ht(l,l.return,z)}}function ss(t){var l=t.updateQueue;if(l!==null){var e=t.stateNode;try{to(l,e)}catch(a){ht(t,t.return,a)}}}function ds(t,l,e){e.props=Je(t.type,t.memoizedProps),e.state=t.memoizedState;try{e.componentWillUnmount()}catch(a){ht(t,l,a)}}function mu(t,l){try{var e=t.ref;if(e!==null){switch(t.tag){case 26:case 27:case 5:var a=t.stateNode;break;case 30:a=t.stateNode;break;default:a=t.stateNode}typeof e=="function"?t.refCleanup=e(a):e.current=a}}catch(u){ht(t,l,u)}}function ql(t,l){var e=t.ref,a=t.refCleanup;if(e!==null)if(typeof a=="function")try{a()}catch(u){ht(t,l,u)}finally{t.refCleanup=null,t=t.alternate,t!=null&&(t.refCleanup=null)}else if(typeof e=="function")try{e(null)}catch(u){ht(t,l,u)}else e.current=null}function ys(t){var l=t.type,e=t.memoizedProps,a=t.stateNode;try{t:switch(l){case"button":case"input":case"select":case"textarea":e.autoFocus&&a.focus();break t;case"img":e.src?a.src=e.src:e.srcSet&&(a.srcset=e.srcSet)}}catch(u){ht(t,t.return,u)}}function Cc(t,l,e){try{var a=t.stateNode;em(a,t.type,e,l),a[el]=l}catch(u){ht(t,t.return,u)}}function ms(t){return t.tag===5||t.tag===3||t.tag===26||t.tag===27&&Te(t.type)||t.tag===4}function jc(t){t:for(;;){for(;t.sibling===null;){if(t.return===null||ms(t.return))return null;t=t.return}for(t.sibling.return=t.return,t=t.sibling;t.tag!==5&&t.tag!==6&&t.tag!==18;){if(t.tag===27&&Te(t.type)||t.flags&2||t.child===null||t.tag===4)continue t;t.child.return=t,t=t.child}if(!(t.flags&2))return t.stateNode}}function Rc(t,l,e){var a=t.tag;if(a===5||a===6)t=t.stateNode,l?(e.nodeType===9?e.body:e.nodeName==="HTML"?e.ownerDocument.body:e).insertBefore(t,l):(l=e.nodeType===9?e.body:e.nodeName==="HTML"?e.ownerDocument.body:e,l.appendChild(t),e=e._reactRootContainer,e!=null||l.onclick!==null||(l.onclick=Xl));else if(a!==4&&(a===27&&Te(t.type)&&(e=t.stateNode,l=null),t=t.child,t!==null))for(Rc(t,l,e),t=t.sibling;t!==null;)Rc(t,l,e),t=t.sibling}function Mn(t,l,e){var a=t.tag;if(a===5||a===6)t=t.stateNode,l?e.insertBefore(t,l):e.appendChild(t);else if(a!==4&&(a===27&&Te(t.type)&&(e=t.stateNode),t=t.child,t!==null))for(Mn(t,l,e),t=t.sibling;t!==null;)Mn(t,l,e),t=t.sibling}function hs(t){var l=t.stateNode,e=t.memoizedProps;try{for(var a=t.type,u=l.attributes;u.length;)l.removeAttributeNode(u[0]);Jt(l,a,e),l[wt]=t,l[el]=e}catch(n){ht(t,t.return,n)}}var $l=!1,Yt=!1,Hc=!1,gs=typeof WeakSet=="function"?WeakSet:Set,Qt=null;function Hy(t,l){if(t=t.containerInfo,ef=Jn,t=_r(t),Oi(t)){if("selectionStart"in t)var e={start:t.selectionStart,end:t.selectionEnd};else t:{e=(e=t.ownerDocument)&&e.defaultView||window;var a=e.getSelection&&e.getSelection();if(a&&a.rangeCount!==0){e=a.anchorNode;var u=a.anchorOffset,n=a.focusNode;a=a.focusOffset;try{e.nodeType,n.nodeType}catch{e=null;break t}var i=0,c=-1,o=-1,h=0,z=0,E=t,g=null;l:for(;;){for(var b;E!==e||u!==0&&E.nodeType!==3||(c=i+u),E!==n||a!==0&&E.nodeType!==3||(o=i+a),E.nodeType===3&&(i+=E.nodeValue.length),(b=E.firstChild)!==null;)g=E,E=b;for(;;){if(E===t)break l;if(g===e&&++h===u&&(c=i),g===n&&++z===a&&(o=i),(b=E.nextSibling)!==null)break;E=g,g=E.parentNode}E=b}e=c===-1||o===-1?null:{start:c,end:o}}else e=null}e=e||{start:0,end:0}}else e=null;for(af={focusedElem:t,selectionRange:e},Jn=!1,Qt=l;Qt!==null;)if(l=Qt,t=l.child,(l.subtreeFlags&1028)!==0&&t!==null)t.return=l,Qt=t;else for(;Qt!==null;){switch(l=Qt,n=l.alternate,t=l.flags,l.tag){case 0:if((t&4)!==0&&(t=l.updateQueue,t=t!==null?t.events:null,t!==null))for(e=0;e title"))),Jt(n,a,e),n[wt]=t,Xt(n),a=n;break t;case"link":var i=zd("link","href",u).get(a+(e.href||""));if(i){for(var c=0;cpt&&(i=pt,pt=w,w=i);var y=Ar(c,w),d=Ar(c,pt);if(y&&d&&(b.rangeCount!==1||b.anchorNode!==y.node||b.anchorOffset!==y.offset||b.focusNode!==d.node||b.focusOffset!==d.offset)){var m=E.createRange();m.setStart(y.node,y.offset),b.removeAllRanges(),w>pt?(b.addRange(m),b.extend(d.node,d.offset)):(m.setEnd(d.node,d.offset),b.addRange(m))}}}}for(E=[],b=c;b=b.parentNode;)b.nodeType===1&&E.push({element:b,left:b.scrollLeft,top:b.scrollTop});for(typeof c.focus=="function"&&c.focus(),c=0;ce?32:e,x.T=null,e=Zc,Zc=null;var n=Se,i=le;if(Gt=0,_a=Se=null,le=0,(ot&6)!==0)throw Error(f(331));var c=ot;if(ot|=4,_s(n.current),Es(n,n.current,i,e),ot=c,Su(0,!1),ol&&typeof ol.onPostCommitFiberRoot=="function")try{ol.onPostCommitFiberRoot(Ya,n)}catch{}return!0}finally{C.p=u,x.T=a,Vs(t,l)}}function Js(t,l,e){l=zl(e,l),l=xc(t.stateNode,l,2),t=ye(t,l,2),t!==null&&(Xa(t,2),Yl(t))}function ht(t,l,e){if(t.tag===3)Js(t,t,e);else for(;l!==null;){if(l.tag===3){Js(l,t,e);break}else if(l.tag===1){var a=l.stateNode;if(typeof l.type.getDerivedStateFromError=="function"||typeof a.componentDidCatch=="function"&&(pe===null||!pe.has(a))){t=zl(e,t),e=ko(2),a=ye(l,e,2),a!==null&&(Wo(e,a,l,t),Xa(a,2),Yl(a));break}}l=l.return}}function Kc(t,l,e){var a=t.pingCache;if(a===null){a=t.pingCache=new Yy;var u=new Set;a.set(l,u)}else u=a.get(l),u===void 0&&(u=new Set,a.set(l,u));u.has(e)||(Yc=!0,u.add(e),t=wy.bind(null,t,l,e),l.then(t,t))}function wy(t,l,e){var a=t.pingCache;a!==null&&a.delete(l),t.pingedLanes|=t.suspendedLanes&e,t.warmLanes&=~e,St===t&&(et&e)===e&&(Dt===4||Dt===3&&(et&62914560)===et&&300>rl()-Nn?(ot&2)===0&&Oa(t,0):Gc|=e,Ma===et&&(Ma=0)),Yl(t)}function ks(t,l){l===0&&(l=Qf()),t=qe(t,l),t!==null&&(Xa(t,l),Yl(t))}function Ly(t){var l=t.memoizedState,e=0;l!==null&&(e=l.retryLane),ks(t,e)}function Vy(t,l){var e=0;switch(t.tag){case 31:case 13:var a=t.stateNode,u=t.memoizedState;u!==null&&(e=u.retryLane);break;case 19:a=t.stateNode;break;case 22:a=t.stateNode._retryCache;break;default:throw Error(f(314))}a!==null&&a.delete(l),ks(t,e)}function Ky(t,l){return ni(t,l)}var Bn=null,Da=null,Jc=!1,qn=!1,kc=!1,ze=0;function Yl(t){t!==Da&&t.next===null&&(Da===null?Bn=Da=t:Da=Da.next=t),qn=!0,Jc||(Jc=!0,ky())}function Su(t,l){if(!kc&&qn){kc=!0;do for(var e=!1,a=Bn;a!==null;){if(t!==0){var u=a.pendingLanes;if(u===0)var n=0;else{var i=a.suspendedLanes,c=a.pingedLanes;n=(1<<31-sl(42|t)+1)-1,n&=u&~(i&~c),n=n&201326741?n&201326741|1:n?n|2:0}n!==0&&(e=!0,Is(a,n))}else n=et,n=Xu(a,a===St?n:0,a.cancelPendingCommit!==null||a.timeoutHandle!==-1),(n&3)===0||Ga(a,n)||(e=!0,Is(a,n));a=a.next}while(e);kc=!1}}function Jy(){Ws()}function Ws(){qn=Jc=!1;var t=0;ze!==0&&um()&&(t=ze);for(var l=rl(),e=null,a=Bn;a!==null;){var u=a.next,n=$s(a,l);n===0?(a.next=null,e===null?Bn=u:e.next=u,u===null&&(Da=e)):(e=a,(t!==0||(n&3)!==0)&&(qn=!0)),a=u}Gt!==0&&Gt!==5||Su(t),ze!==0&&(ze=0)}function $s(t,l){for(var e=t.suspendedLanes,a=t.pingedLanes,u=t.expirationTimes,n=t.pendingLanes&-62914561;0c)break;var z=o.transferSize,E=o.initiatorType;z&&id(E)&&(o=o.responseEnd,i+=z*(o"u"?null:document;function bd(t,l,e){var a=Ua;if(a&&typeof l=="string"&&l){var u=Sl(l);u='link[rel="'+t+'"][href="'+u+'"]',typeof e=="string"&&(u+='[crossorigin="'+e+'"]'),vd.has(u)||(vd.add(u),t={rel:t,crossOrigin:e,href:l},a.querySelector(u)===null&&(l=a.createElement("link"),Jt(l,"link",t),Xt(l),a.head.appendChild(l)))}}function ym(t){ee.D(t),bd("dns-prefetch",t,null)}function mm(t,l){ee.C(t,l),bd("preconnect",t,l)}function hm(t,l,e){ee.L(t,l,e);var a=Ua;if(a&&t&&l){var u='link[rel="preload"][as="'+Sl(l)+'"]';l==="image"&&e&&e.imageSrcSet?(u+='[imagesrcset="'+Sl(e.imageSrcSet)+'"]',typeof e.imageSizes=="string"&&(u+='[imagesizes="'+Sl(e.imageSizes)+'"]')):u+='[href="'+Sl(t)+'"]';var n=u;switch(l){case"style":n=Ca(t);break;case"script":n=ja(t)}Ol.has(n)||(t=H({rel:"preload",href:l==="image"&&e&&e.imageSrcSet?void 0:t,as:l},e),Ol.set(n,t),a.querySelector(u)!==null||l==="style"&&a.querySelector(Eu(n))||l==="script"&&a.querySelector(Au(n))||(l=a.createElement("link"),Jt(l,"link",t),Xt(l),a.head.appendChild(l)))}}function gm(t,l){ee.m(t,l);var e=Ua;if(e&&t){var a=l&&typeof l.as=="string"?l.as:"script",u='link[rel="modulepreload"][as="'+Sl(a)+'"][href="'+Sl(t)+'"]',n=u;switch(a){case"audioworklet":case"paintworklet":case"serviceworker":case"sharedworker":case"worker":case"script":n=ja(t)}if(!Ol.has(n)&&(t=H({rel:"modulepreload",href:t},l),Ol.set(n,t),e.querySelector(u)===null)){switch(a){case"audioworklet":case"paintworklet":case"serviceworker":case"sharedworker":case"worker":case"script":if(e.querySelector(Au(n)))return}a=e.createElement("link"),Jt(a,"link",t),Xt(a),e.head.appendChild(a)}}}function vm(t,l,e){ee.S(t,l,e);var a=Ua;if(a&&t){var u=ta(a).hoistableStyles,n=Ca(t);l=l||"default";var i=u.get(n);if(!i){var c={loading:0,preload:null};if(i=a.querySelector(Eu(n)))c.loading=5;else{t=H({rel:"stylesheet",href:t,"data-precedence":l},e),(e=Ol.get(n))&&sf(t,e);var o=i=a.createElement("link");Xt(o),Jt(o,"link",t),o._p=new Promise(function(h,z){o.onload=h,o.onerror=z}),o.addEventListener("load",function(){c.loading|=1}),o.addEventListener("error",function(){c.loading|=2}),c.loading|=4,Zn(i,l,a)}i={type:"stylesheet",instance:i,count:1,state:c},u.set(n,i)}}}function bm(t,l){ee.X(t,l);var e=Ua;if(e&&t){var a=ta(e).hoistableScripts,u=ja(t),n=a.get(u);n||(n=e.querySelector(Au(u)),n||(t=H({src:t,async:!0},l),(l=Ol.get(u))&&df(t,l),n=e.createElement("script"),Xt(n),Jt(n,"link",t),e.head.appendChild(n)),n={type:"script",instance:n,count:1,state:null},a.set(u,n))}}function pm(t,l){ee.M(t,l);var e=Ua;if(e&&t){var a=ta(e).hoistableScripts,u=ja(t),n=a.get(u);n||(n=e.querySelector(Au(u)),n||(t=H({src:t,async:!0,type:"module"},l),(l=Ol.get(u))&&df(t,l),n=e.createElement("script"),Xt(n),Jt(n,"link",t),e.head.appendChild(n)),n={type:"script",instance:n,count:1,state:null},a.set(u,n))}}function pd(t,l,e,a){var u=(u=P.current)?Qn(u):null;if(!u)throw Error(f(446));switch(t){case"meta":case"title":return null;case"style":return typeof e.precedence=="string"&&typeof e.href=="string"?(l=Ca(e.href),e=ta(u).hoistableStyles,a=e.get(l),a||(a={type:"style",instance:null,count:0,state:null},e.set(l,a)),a):{type:"void",instance:null,count:0,state:null};case"link":if(e.rel==="stylesheet"&&typeof e.href=="string"&&typeof e.precedence=="string"){t=Ca(e.href);var n=ta(u).hoistableStyles,i=n.get(t);if(i||(u=u.ownerDocument||u,i={type:"stylesheet",instance:null,count:0,state:{loading:0,preload:null}},n.set(t,i),(n=u.querySelector(Eu(t)))&&!n._p&&(i.instance=n,i.state.loading=5),Ol.has(t)||(e={rel:"preload",as:"style",href:e.href,crossOrigin:e.crossOrigin,integrity:e.integrity,media:e.media,hrefLang:e.hrefLang,referrerPolicy:e.referrerPolicy},Ol.set(t,e),n||Sm(u,t,e,i.state))),l&&a===null)throw Error(f(528,""));return i}if(l&&a!==null)throw Error(f(529,""));return null;case"script":return l=e.async,e=e.src,typeof e=="string"&&l&&typeof l!="function"&&typeof l!="symbol"?(l=ja(e),e=ta(u).hoistableScripts,a=e.get(l),a||(a={type:"script",instance:null,count:0,state:null},e.set(l,a)),a):{type:"void",instance:null,count:0,state:null};default:throw Error(f(444,t))}}function Ca(t){return'href="'+Sl(t)+'"'}function Eu(t){return'link[rel="stylesheet"]['+t+"]"}function Sd(t){return H({},t,{"data-precedence":t.precedence,precedence:null})}function Sm(t,l,e,a){t.querySelector('link[rel="preload"][as="style"]['+l+"]")?a.loading=1:(l=t.createElement("link"),a.preload=l,l.addEventListener("load",function(){return a.loading|=1}),l.addEventListener("error",function(){return a.loading|=2}),Jt(l,"link",e),Xt(l),t.head.appendChild(l))}function ja(t){return'[src="'+Sl(t)+'"]'}function Au(t){return"script[async]"+t}function xd(t,l,e){if(l.count++,l.instance===null)switch(l.type){case"style":var a=t.querySelector('style[data-href~="'+Sl(e.href)+'"]');if(a)return l.instance=a,Xt(a),a;var u=H({},e,{"data-href":e.href,"data-precedence":e.precedence,href:null,precedence:null});return a=(t.ownerDocument||t).createElement("style"),Xt(a),Jt(a,"style",u),Zn(a,e.precedence,t),l.instance=a;case"stylesheet":u=Ca(e.href);var n=t.querySelector(Eu(u));if(n)return l.state.loading|=4,l.instance=n,Xt(n),n;a=Sd(e),(u=Ol.get(u))&&sf(a,u),n=(t.ownerDocument||t).createElement("link"),Xt(n);var i=n;return i._p=new Promise(function(c,o){i.onload=c,i.onerror=o}),Jt(n,"link",a),l.state.loading|=4,Zn(n,e.precedence,t),l.instance=n;case"script":return n=ja(e.src),(u=t.querySelector(Au(n)))?(l.instance=u,Xt(u),u):(a=e,(u=Ol.get(n))&&(a=H({},e),df(a,u)),t=t.ownerDocument||t,u=t.createElement("script"),Xt(u),Jt(u,"link",a),t.head.appendChild(u),l.instance=u);case"void":return null;default:throw Error(f(443,l.type))}else l.type==="stylesheet"&&(l.state.loading&4)===0&&(a=l.instance,l.state.loading|=4,Zn(a,e.precedence,t));return l.instance}function Zn(t,l,e){for(var a=e.querySelectorAll('link[rel="stylesheet"][data-precedence],style[data-precedence]'),u=a.length?a[a.length-1]:null,n=u,i=0;i title"):null)}function xm(t,l,e){if(e===1||l.itemProp!=null)return!1;switch(t){case"meta":case"title":return!0;case"style":if(typeof l.precedence!="string"||typeof l.href!="string"||l.href==="")break;return!0;case"link":if(typeof l.rel!="string"||typeof l.href!="string"||l.href===""||l.onLoad||l.onError)break;return l.rel==="stylesheet"?(t=l.disabled,typeof l.precedence=="string"&&t==null):!0;case"script":if(l.async&&typeof l.async!="function"&&typeof l.async!="symbol"&&!l.onLoad&&!l.onError&&l.src&&typeof l.src=="string")return!0}return!1}function Ed(t){return!(t.type==="stylesheet"&&(t.state.loading&3)===0)}function zm(t,l,e,a){if(e.type==="stylesheet"&&(typeof a.media!="string"||matchMedia(a.media).matches!==!1)&&(e.state.loading&4)===0){if(e.instance===null){var u=Ca(a.href),n=l.querySelector(Eu(u));if(n){l=n._p,l!==null&&typeof l=="object"&&typeof l.then=="function"&&(t.count++,t=Ln.bind(t),l.then(t,t)),e.state.loading|=4,e.instance=n,Xt(n);return}n=l.ownerDocument||l,a=Sd(a),(u=Ol.get(u))&&sf(a,u),n=n.createElement("link"),Xt(n);var i=n;i._p=new Promise(function(c,o){i.onload=c,i.onerror=o}),Jt(n,"link",a),e.instance=n}t.stylesheets===null&&(t.stylesheets=new Map),t.stylesheets.set(e,l),(l=e.state.preload)&&(e.state.loading&3)===0&&(t.count++,e=Ln.bind(t),l.addEventListener("load",e),l.addEventListener("error",e))}}var yf=0;function Tm(t,l){return t.stylesheets&&t.count===0&&Kn(t,t.stylesheets),0yf?50:800)+l);return t.unsuspend=e,function(){t.unsuspend=null,clearTimeout(a),clearTimeout(u)}}:null}function Ln(){if(this.count--,this.count===0&&(this.imgCount===0||!this.waitingForImages)){if(this.stylesheets)Kn(this,this.stylesheets);else if(this.unsuspend){var t=this.unsuspend;this.unsuspend=null,t()}}}var Vn=null;function Kn(t,l){t.stylesheets=null,t.unsuspend!==null&&(t.count++,Vn=new Map,l.forEach(Em,t),Vn=null,Ln.call(t))}function Em(t,l){if(!(l.state.loading&4)){var e=Vn.get(t);if(e)var a=e.get(null);else{e=new Map,Vn.set(t,e);for(var u=t.querySelectorAll("link[data-precedence],style[data-precedence]"),n=0;n"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(r)}catch(v){console.error(v)}}return r(),zf.exports=Xm(),zf.exports}var Zm=Qm();const wm=r=>r.replace(/([a-z0-9])([A-Z])/g,"$1-$2").toLowerCase(),Pd=(...r)=>r.filter((v,S,f)=>!!v&&v.trim()!==""&&f.indexOf(v)===S).join(" ").trim();var Lm={xmlns:"http://www.w3.org/2000/svg",width:24,height:24,viewBox:"0 0 24 24",fill:"none",stroke:"currentColor",strokeWidth:2,strokeLinecap:"round",strokeLinejoin:"round"};const Vm=xt.forwardRef(({color:r="currentColor",size:v=24,strokeWidth:S=2,absoluteStrokeWidth:f,className:_="",children:O,iconNode:D,...U},N)=>xt.createElement("svg",{ref:N,...Lm,width:v,height:v,stroke:r,strokeWidth:f?Number(S)*24/Number(v):S,className:Pd("lucide",_),...U},[...D.map(([p,R])=>xt.createElement(p,R)),...Array.isArray(O)?O:[O]]));const Nl=(r,v)=>{const S=xt.forwardRef(({className:f,..._},O)=>xt.createElement(Vm,{ref:O,iconNode:v,className:Pd(`lucide-${wm(r)}`,f),..._}));return S.displayName=`${r}`,S};const Km=Nl("Binary",[["rect",{x:"14",y:"14",width:"4",height:"6",rx:"2",key:"p02svl"}],["rect",{x:"6",y:"4",width:"4",height:"6",rx:"2",key:"xm4xkj"}],["path",{d:"M6 20h4",key:"1i6q5t"}],["path",{d:"M14 10h4",key:"ru81e7"}],["path",{d:"M6 14h2v6",key:"16z9wg"}],["path",{d:"M14 4h2v6",key:"1idq9u"}]]);const Jm=Nl("BookText",[["path",{d:"M4 19.5v-15A2.5 2.5 0 0 1 6.5 2H19a1 1 0 0 1 1 1v18a1 1 0 0 1-1 1H6.5a1 1 0 0 1 0-5H20",key:"k3hazp"}],["path",{d:"M8 11h8",key:"vwpz6n"}],["path",{d:"M8 7h6",key:"1f0q6e"}]]);const km=Nl("EyeOff",[["path",{d:"M10.733 5.076a10.744 10.744 0 0 1 11.205 6.575 1 1 0 0 1 0 .696 10.747 10.747 0 0 1-1.444 2.49",key:"ct8e1f"}],["path",{d:"M14.084 14.158a3 3 0 0 1-4.242-4.242",key:"151rxh"}],["path",{d:"M17.479 17.499a10.75 10.75 0 0 1-15.417-5.151 1 1 0 0 1 0-.696 10.75 10.75 0 0 1 4.446-5.143",key:"13bj9a"}],["path",{d:"m2 2 20 20",key:"1ooewy"}]]);const Wm=Nl("Eye",[["path",{d:"M2.062 12.348a1 1 0 0 1 0-.696 10.75 10.75 0 0 1 19.876 0 1 1 0 0 1 0 .696 10.75 10.75 0 0 1-19.876 0",key:"1nclc0"}],["circle",{cx:"12",cy:"12",r:"3",key:"1v7zrd"}]]);const $m=Nl("Globe",[["circle",{cx:"12",cy:"12",r:"10",key:"1mglay"}],["path",{d:"M12 2a14.5 14.5 0 0 0 0 20 14.5 14.5 0 0 0 0-20",key:"13o1zl"}],["path",{d:"M2 12h20",key:"9i4pu4"}]]);const kd=Nl("LoaderCircle",[["path",{d:"M21 12a9 9 0 1 1-6.219-8.56",key:"13zald"}]]);const Fm=Nl("Lock",[["rect",{width:"18",height:"11",x:"3",y:"11",rx:"2",ry:"2",key:"1w4ew1"}],["path",{d:"M7 11V7a5 5 0 0 1 10 0v4",key:"fwvmzm"}]]);const Im=Nl("LogIn",[["path",{d:"M15 3h4a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2h-4",key:"u53s6r"}],["polyline",{points:"10 17 15 12 10 7",key:"1ail0h"}],["line",{x1:"15",x2:"3",y1:"12",y2:"12",key:"v6grx8"}]]);const Pm=Nl("RotateCw",[["path",{d:"M21 12a9 9 0 1 1-9-9c2.52 0 4.93 1 6.74 2.74L21 8",key:"1p45f6"}],["path",{d:"M21 3v5h-5",key:"1q7to0"}]]);const th=Nl("User",[["path",{d:"M19 21v-2a4 4 0 0 0-4-4H9a4 4 0 0 0-4 4v2",key:"975kel"}],["circle",{cx:"12",cy:"7",r:"4",key:"17ys0d"}]]);const lh=Nl("Waypoints",[["circle",{cx:"12",cy:"4.5",r:"2.5",key:"r5ysbb"}],["path",{d:"m10.2 6.3-3.9 3.9",key:"1nzqf6"}],["circle",{cx:"4.5",cy:"12",r:"2.5",key:"jydg6v"}],["path",{d:"M7 12h10",key:"b7w52i"}],["circle",{cx:"19.5",cy:"12",r:"2.5",key:"1piiel"}],["path",{d:"m13.8 17.7 3.9-3.9",key:"1wyg1y"}],["circle",{cx:"12",cy:"19.5",r:"2.5",key:"13o1pw"}]]);const eh=Nl("X",[["path",{d:"M18 6 6 18",key:"1bl5f8"}],["path",{d:"m6 6 12 12",key:"d8bk6v"}]]);function t0(){return globalThis.__DATA__??{}}function l0(r){var v,S,f="";if(typeof r=="string"||typeof r=="number")f+=r;else if(typeof r=="object")if(Array.isArray(r)){var _=r.length;for(v=0;v<_;v++)r[v]&&(S=l0(r[v]))&&(f&&(f+=" "),f+=S)}else for(S in r)r[S]&&(f&&(f+=" "),f+=S);return f}function ah(){for(var r,v,S=0,f="",_=arguments.length;S<_;S++)(r=arguments[S])&&(v=l0(r))&&(f&&(f+=" "),f+=v);return f}const Hf="-",uh=r=>{const v=ih(r),{conflictingClassGroups:S,conflictingClassGroupModifiers:f}=r;return{getClassGroupId:D=>{const U=D.split(Hf);return U[0]===""&&U.length!==1&&U.shift(),e0(U,v)||nh(D)},getConflictingClassGroupIds:(D,U)=>{const N=S[D]||[];return U&&f[D]?[...N,...f[D]]:N}}},e0=(r,v)=>{if(r.length===0)return v.classGroupId;const S=r[0],f=v.nextPart.get(S),_=f?e0(r.slice(1),f):void 0;if(_)return _;if(v.validators.length===0)return;const O=r.join(Hf);return v.validators.find(({validator:D})=>D(O))?.classGroupId},Wd=/^\[(.+)\]$/,nh=r=>{if(Wd.test(r)){const v=Wd.exec(r)[1],S=v?.substring(0,v.indexOf(":"));if(S)return"arbitrary.."+S}},ih=r=>{const{theme:v,prefix:S}=r,f={nextPart:new Map,validators:[]};return fh(Object.entries(r.classGroups),S).forEach(([O,D])=>{Df(D,f,O,v)}),f},Df=(r,v,S,f)=>{r.forEach(_=>{if(typeof _=="string"){const O=_===""?v:$d(v,_);O.classGroupId=S;return}if(typeof _=="function"){if(ch(_)){Df(_(f),v,S,f);return}v.validators.push({validator:_,classGroupId:S});return}Object.entries(_).forEach(([O,D])=>{Df(D,$d(v,O),S,f)})})},$d=(r,v)=>{let S=r;return v.split(Hf).forEach(f=>{S.nextPart.has(f)||S.nextPart.set(f,{nextPart:new Map,validators:[]}),S=S.nextPart.get(f)}),S},ch=r=>r.isThemeGetter,fh=(r,v)=>v?r.map(([S,f])=>{const _=f.map(O=>typeof O=="string"?v+O:typeof O=="object"?Object.fromEntries(Object.entries(O).map(([D,U])=>[v+D,U])):O);return[S,_]}):r,rh=r=>{if(r<1)return{get:()=>{},set:()=>{}};let v=0,S=new Map,f=new Map;const _=(O,D)=>{S.set(O,D),v++,v>r&&(v=0,f=S,S=new Map)};return{get(O){let D=S.get(O);if(D!==void 0)return D;if((D=f.get(O))!==void 0)return _(O,D),D},set(O,D){S.has(O)?S.set(O,D):_(O,D)}}},a0="!",oh=r=>{const{separator:v,experimentalParseClassName:S}=r,f=v.length===1,_=v[0],O=v.length,D=U=>{const N=[];let p=0,R=0,H;for(let Q=0;QR?H-R:void 0;return{modifiers:N,hasImportantModifier:st,baseClassName:ct,maybePostfixModifierPosition:G}};return S?U=>S({className:U,parseClassName:D}):D},sh=r=>{if(r.length<=1)return r;const v=[];let S=[];return r.forEach(f=>{f[0]==="["?(v.push(...S.sort(),f),S=[]):S.push(f)}),v.push(...S.sort()),v},dh=r=>({cache:rh(r.cacheSize),parseClassName:oh(r),...uh(r)}),yh=/\s+/,mh=(r,v)=>{const{parseClassName:S,getClassGroupId:f,getConflictingClassGroupIds:_}=v,O=[],D=r.trim().split(yh);let U="";for(let N=D.length-1;N>=0;N-=1){const p=D[N],{modifiers:R,hasImportantModifier:H,baseClassName:V,maybePostfixModifierPosition:st}=S(p);let ct=!!st,G=f(ct?V.substring(0,st):V);if(!G){if(!ct){U=p+(U.length>0?" "+U:U);continue}if(G=f(V),!G){U=p+(U.length>0?" "+U:U);continue}ct=!1}const Q=sh(R).join(":"),L=H?Q+a0:Q,gt=L+G;if(O.includes(gt))continue;O.push(gt);const zt=_(G,ct);for(let _t=0;_t0?" "+U:U)}return U};function hh(){let r=0,v,S,f="";for(;r{if(typeof r=="string")return r;let v,S="";for(let f=0;fH(R),r());return S=dh(p),f=S.cache.get,_=S.cache.set,O=U,U(N)}function U(N){const p=f(N);if(p)return p;const R=mh(N,S);return _(N,R),R}return function(){return O(hh.apply(null,arguments))}}const At=r=>{const v=S=>S[r]||[];return v.isThemeGetter=!0,v},n0=/^\[(?:([a-z-]+):)?(.+)\]$/i,vh=/^\d+\/\d+$/,bh=new Set(["px","full","screen"]),ph=/^(\d+(\.\d+)?)?(xs|sm|md|lg|xl)$/,Sh=/\d+(%|px|r?em|[sdl]?v([hwib]|min|max)|pt|pc|in|cm|mm|cap|ch|ex|r?lh|cq(w|h|i|b|min|max))|\b(calc|min|max|clamp)\(.+\)|^0$/,xh=/^(rgba?|hsla?|hwb|(ok)?(lab|lch)|color-mix)\(.+\)$/,zh=/^(inset_)?-?((\d+)?\.?(\d+)[a-z]+|0)_-?((\d+)?\.?(\d+)[a-z]+|0)/,Th=/^(url|image|image-set|cross-fade|element|(repeating-)?(linear|radial|conic)-gradient)\(.+\)$/,ae=r=>Ha(r)||bh.has(r)||vh.test(r),Ne=r=>Ba(r,"length",Uh),Ha=r=>!!r&&!Number.isNaN(Number(r)),Mf=r=>Ba(r,"number",Ha),Cu=r=>!!r&&Number.isInteger(Number(r)),Eh=r=>r.endsWith("%")&&Ha(r.slice(0,-1)),F=r=>n0.test(r),De=r=>ph.test(r),Ah=new Set(["length","size","percentage"]),Mh=r=>Ba(r,Ah,i0),_h=r=>Ba(r,"position",i0),Oh=new Set(["image","url"]),Nh=r=>Ba(r,Oh,jh),Dh=r=>Ba(r,"",Ch),ju=()=>!0,Ba=(r,v,S)=>{const f=n0.exec(r);return f?f[1]?typeof v=="string"?f[1]===v:v.has(f[1]):S(f[2]):!1},Uh=r=>Sh.test(r)&&!xh.test(r),i0=()=>!1,Ch=r=>zh.test(r),jh=r=>Th.test(r),Rh=()=>{const r=At("colors"),v=At("spacing"),S=At("blur"),f=At("brightness"),_=At("borderColor"),O=At("borderRadius"),D=At("borderSpacing"),U=At("borderWidth"),N=At("contrast"),p=At("grayscale"),R=At("hueRotate"),H=At("invert"),V=At("gap"),st=At("gradientColorStops"),ct=At("gradientColorStopPositions"),G=At("inset"),Q=At("margin"),L=At("opacity"),gt=At("padding"),zt=At("saturate"),_t=At("scale"),it=At("sepia"),Ot=At("skew"),J=At("space"),Rt=At("translate"),It=()=>["auto","contain","none"],jl=()=>["auto","hidden","clip","visible","scroll"],Pt=()=>["auto",F,v],I=()=>[F,v],Rl=()=>["",ae,Ne],tl=()=>["auto",Ha,F],ll=()=>["bottom","center","left","left-bottom","left-top","right","right-bottom","right-top","top"],x=()=>["solid","dashed","dotted","double","none"],C=()=>["normal","multiply","screen","overlay","darken","lighten","color-dodge","color-burn","hard-light","soft-light","difference","exclusion","hue","saturation","color","luminosity"],Z=()=>["start","end","center","between","around","evenly","stretch"],nt=()=>["","0",F],dt=()=>["auto","avoid","all","avoid-page","page","left","right","column"],s=()=>[Ha,F];return{cacheSize:500,separator:":",theme:{colors:[ju],spacing:[ae,Ne],blur:["none","",De,F],brightness:s(),borderColor:[r],borderRadius:["none","","full",De,F],borderSpacing:I(),borderWidth:Rl(),contrast:s(),grayscale:nt(),hueRotate:s(),invert:nt(),gap:I(),gradientColorStops:[r],gradientColorStopPositions:[Eh,Ne],inset:Pt(),margin:Pt(),opacity:s(),padding:I(),saturate:s(),scale:s(),sepia:nt(),skew:s(),space:I(),translate:I()},classGroups:{aspect:[{aspect:["auto","square","video",F]}],container:["container"],columns:[{columns:[De]}],"break-after":[{"break-after":dt()}],"break-before":[{"break-before":dt()}],"break-inside":[{"break-inside":["auto","avoid","avoid-page","avoid-column"]}],"box-decoration":[{"box-decoration":["slice","clone"]}],box:[{box:["border","content"]}],display:["block","inline-block","inline","flex","inline-flex","table","inline-table","table-caption","table-cell","table-column","table-column-group","table-footer-group","table-header-group","table-row-group","table-row","flow-root","grid","inline-grid","contents","list-item","hidden"],float:[{float:["right","left","none","start","end"]}],clear:[{clear:["left","right","both","none","start","end"]}],isolation:["isolate","isolation-auto"],"object-fit":[{object:["contain","cover","fill","none","scale-down"]}],"object-position":[{object:[...ll(),F]}],overflow:[{overflow:jl()}],"overflow-x":[{"overflow-x":jl()}],"overflow-y":[{"overflow-y":jl()}],overscroll:[{overscroll:It()}],"overscroll-x":[{"overscroll-x":It()}],"overscroll-y":[{"overscroll-y":It()}],position:["static","fixed","absolute","relative","sticky"],inset:[{inset:[G]}],"inset-x":[{"inset-x":[G]}],"inset-y":[{"inset-y":[G]}],start:[{start:[G]}],end:[{end:[G]}],top:[{top:[G]}],right:[{right:[G]}],bottom:[{bottom:[G]}],left:[{left:[G]}],visibility:["visible","invisible","collapse"],z:[{z:["auto",Cu,F]}],basis:[{basis:Pt()}],"flex-direction":[{flex:["row","row-reverse","col","col-reverse"]}],"flex-wrap":[{flex:["wrap","wrap-reverse","nowrap"]}],flex:[{flex:["1","auto","initial","none",F]}],grow:[{grow:nt()}],shrink:[{shrink:nt()}],order:[{order:["first","last","none",Cu,F]}],"grid-cols":[{"grid-cols":[ju]}],"col-start-end":[{col:["auto",{span:["full",Cu,F]},F]}],"col-start":[{"col-start":tl()}],"col-end":[{"col-end":tl()}],"grid-rows":[{"grid-rows":[ju]}],"row-start-end":[{row:["auto",{span:[Cu,F]},F]}],"row-start":[{"row-start":tl()}],"row-end":[{"row-end":tl()}],"grid-flow":[{"grid-flow":["row","col","dense","row-dense","col-dense"]}],"auto-cols":[{"auto-cols":["auto","min","max","fr",F]}],"auto-rows":[{"auto-rows":["auto","min","max","fr",F]}],gap:[{gap:[V]}],"gap-x":[{"gap-x":[V]}],"gap-y":[{"gap-y":[V]}],"justify-content":[{justify:["normal",...Z()]}],"justify-items":[{"justify-items":["start","end","center","stretch"]}],"justify-self":[{"justify-self":["auto","start","end","center","stretch"]}],"align-content":[{content:["normal",...Z(),"baseline"]}],"align-items":[{items:["start","end","center","baseline","stretch"]}],"align-self":[{self:["auto","start","end","center","stretch","baseline"]}],"place-content":[{"place-content":[...Z(),"baseline"]}],"place-items":[{"place-items":["start","end","center","baseline","stretch"]}],"place-self":[{"place-self":["auto","start","end","center","stretch"]}],p:[{p:[gt]}],px:[{px:[gt]}],py:[{py:[gt]}],ps:[{ps:[gt]}],pe:[{pe:[gt]}],pt:[{pt:[gt]}],pr:[{pr:[gt]}],pb:[{pb:[gt]}],pl:[{pl:[gt]}],m:[{m:[Q]}],mx:[{mx:[Q]}],my:[{my:[Q]}],ms:[{ms:[Q]}],me:[{me:[Q]}],mt:[{mt:[Q]}],mr:[{mr:[Q]}],mb:[{mb:[Q]}],ml:[{ml:[Q]}],"space-x":[{"space-x":[J]}],"space-x-reverse":["space-x-reverse"],"space-y":[{"space-y":[J]}],"space-y-reverse":["space-y-reverse"],w:[{w:["auto","min","max","fit","svw","lvw","dvw",F,v]}],"min-w":[{"min-w":[F,v,"min","max","fit"]}],"max-w":[{"max-w":[F,v,"none","full","min","max","fit","prose",{screen:[De]},De]}],h:[{h:[F,v,"auto","min","max","fit","svh","lvh","dvh"]}],"min-h":[{"min-h":[F,v,"min","max","fit","svh","lvh","dvh"]}],"max-h":[{"max-h":[F,v,"min","max","fit","svh","lvh","dvh"]}],size:[{size:[F,v,"auto","min","max","fit"]}],"font-size":[{text:["base",De,Ne]}],"font-smoothing":["antialiased","subpixel-antialiased"],"font-style":["italic","not-italic"],"font-weight":[{font:["thin","extralight","light","normal","medium","semibold","bold","extrabold","black",Mf]}],"font-family":[{font:[ju]}],"fvn-normal":["normal-nums"],"fvn-ordinal":["ordinal"],"fvn-slashed-zero":["slashed-zero"],"fvn-figure":["lining-nums","oldstyle-nums"],"fvn-spacing":["proportional-nums","tabular-nums"],"fvn-fraction":["diagonal-fractions","stacked-fractions"],tracking:[{tracking:["tighter","tight","normal","wide","wider","widest",F]}],"line-clamp":[{"line-clamp":["none",Ha,Mf]}],leading:[{leading:["none","tight","snug","normal","relaxed","loose",ae,F]}],"list-image":[{"list-image":["none",F]}],"list-style-type":[{list:["none","disc","decimal",F]}],"list-style-position":[{list:["inside","outside"]}],"placeholder-color":[{placeholder:[r]}],"placeholder-opacity":[{"placeholder-opacity":[L]}],"text-alignment":[{text:["left","center","right","justify","start","end"]}],"text-color":[{text:[r]}],"text-opacity":[{"text-opacity":[L]}],"text-decoration":["underline","overline","line-through","no-underline"],"text-decoration-style":[{decoration:[...x(),"wavy"]}],"text-decoration-thickness":[{decoration:["auto","from-font",ae,Ne]}],"underline-offset":[{"underline-offset":["auto",ae,F]}],"text-decoration-color":[{decoration:[r]}],"text-transform":["uppercase","lowercase","capitalize","normal-case"],"text-overflow":["truncate","text-ellipsis","text-clip"],"text-wrap":[{text:["wrap","nowrap","balance","pretty"]}],indent:[{indent:I()}],"vertical-align":[{align:["baseline","top","middle","bottom","text-top","text-bottom","sub","super",F]}],whitespace:[{whitespace:["normal","nowrap","pre","pre-line","pre-wrap","break-spaces"]}],break:[{break:["normal","words","all","keep"]}],hyphens:[{hyphens:["none","manual","auto"]}],content:[{content:["none",F]}],"bg-attachment":[{bg:["fixed","local","scroll"]}],"bg-clip":[{"bg-clip":["border","padding","content","text"]}],"bg-opacity":[{"bg-opacity":[L]}],"bg-origin":[{"bg-origin":["border","padding","content"]}],"bg-position":[{bg:[...ll(),_h]}],"bg-repeat":[{bg:["no-repeat",{repeat:["","x","y","round","space"]}]}],"bg-size":[{bg:["auto","cover","contain",Mh]}],"bg-image":[{bg:["none",{"gradient-to":["t","tr","r","br","b","bl","l","tl"]},Nh]}],"bg-color":[{bg:[r]}],"gradient-from-pos":[{from:[ct]}],"gradient-via-pos":[{via:[ct]}],"gradient-to-pos":[{to:[ct]}],"gradient-from":[{from:[st]}],"gradient-via":[{via:[st]}],"gradient-to":[{to:[st]}],rounded:[{rounded:[O]}],"rounded-s":[{"rounded-s":[O]}],"rounded-e":[{"rounded-e":[O]}],"rounded-t":[{"rounded-t":[O]}],"rounded-r":[{"rounded-r":[O]}],"rounded-b":[{"rounded-b":[O]}],"rounded-l":[{"rounded-l":[O]}],"rounded-ss":[{"rounded-ss":[O]}],"rounded-se":[{"rounded-se":[O]}],"rounded-ee":[{"rounded-ee":[O]}],"rounded-es":[{"rounded-es":[O]}],"rounded-tl":[{"rounded-tl":[O]}],"rounded-tr":[{"rounded-tr":[O]}],"rounded-br":[{"rounded-br":[O]}],"rounded-bl":[{"rounded-bl":[O]}],"border-w":[{border:[U]}],"border-w-x":[{"border-x":[U]}],"border-w-y":[{"border-y":[U]}],"border-w-s":[{"border-s":[U]}],"border-w-e":[{"border-e":[U]}],"border-w-t":[{"border-t":[U]}],"border-w-r":[{"border-r":[U]}],"border-w-b":[{"border-b":[U]}],"border-w-l":[{"border-l":[U]}],"border-opacity":[{"border-opacity":[L]}],"border-style":[{border:[...x(),"hidden"]}],"divide-x":[{"divide-x":[U]}],"divide-x-reverse":["divide-x-reverse"],"divide-y":[{"divide-y":[U]}],"divide-y-reverse":["divide-y-reverse"],"divide-opacity":[{"divide-opacity":[L]}],"divide-style":[{divide:x()}],"border-color":[{border:[_]}],"border-color-x":[{"border-x":[_]}],"border-color-y":[{"border-y":[_]}],"border-color-s":[{"border-s":[_]}],"border-color-e":[{"border-e":[_]}],"border-color-t":[{"border-t":[_]}],"border-color-r":[{"border-r":[_]}],"border-color-b":[{"border-b":[_]}],"border-color-l":[{"border-l":[_]}],"divide-color":[{divide:[_]}],"outline-style":[{outline:["",...x()]}],"outline-offset":[{"outline-offset":[ae,F]}],"outline-w":[{outline:[ae,Ne]}],"outline-color":[{outline:[r]}],"ring-w":[{ring:Rl()}],"ring-w-inset":["ring-inset"],"ring-color":[{ring:[r]}],"ring-opacity":[{"ring-opacity":[L]}],"ring-offset-w":[{"ring-offset":[ae,Ne]}],"ring-offset-color":[{"ring-offset":[r]}],shadow:[{shadow:["","inner","none",De,Dh]}],"shadow-color":[{shadow:[ju]}],opacity:[{opacity:[L]}],"mix-blend":[{"mix-blend":[...C(),"plus-lighter","plus-darker"]}],"bg-blend":[{"bg-blend":C()}],filter:[{filter:["","none"]}],blur:[{blur:[S]}],brightness:[{brightness:[f]}],contrast:[{contrast:[N]}],"drop-shadow":[{"drop-shadow":["","none",De,F]}],grayscale:[{grayscale:[p]}],"hue-rotate":[{"hue-rotate":[R]}],invert:[{invert:[H]}],saturate:[{saturate:[zt]}],sepia:[{sepia:[it]}],"backdrop-filter":[{"backdrop-filter":["","none"]}],"backdrop-blur":[{"backdrop-blur":[S]}],"backdrop-brightness":[{"backdrop-brightness":[f]}],"backdrop-contrast":[{"backdrop-contrast":[N]}],"backdrop-grayscale":[{"backdrop-grayscale":[p]}],"backdrop-hue-rotate":[{"backdrop-hue-rotate":[R]}],"backdrop-invert":[{"backdrop-invert":[H]}],"backdrop-opacity":[{"backdrop-opacity":[L]}],"backdrop-saturate":[{"backdrop-saturate":[zt]}],"backdrop-sepia":[{"backdrop-sepia":[it]}],"border-collapse":[{border:["collapse","separate"]}],"border-spacing":[{"border-spacing":[D]}],"border-spacing-x":[{"border-spacing-x":[D]}],"border-spacing-y":[{"border-spacing-y":[D]}],"table-layout":[{table:["auto","fixed"]}],caption:[{caption:["top","bottom"]}],transition:[{transition:["none","all","","colors","opacity","shadow","transform",F]}],duration:[{duration:s()}],ease:[{ease:["linear","in","out","in-out",F]}],delay:[{delay:s()}],animate:[{animate:["none","spin","ping","pulse","bounce",F]}],transform:[{transform:["","gpu","none"]}],scale:[{scale:[_t]}],"scale-x":[{"scale-x":[_t]}],"scale-y":[{"scale-y":[_t]}],rotate:[{rotate:[Cu,F]}],"translate-x":[{"translate-x":[Rt]}],"translate-y":[{"translate-y":[Rt]}],"skew-x":[{"skew-x":[Ot]}],"skew-y":[{"skew-y":[Ot]}],"transform-origin":[{origin:["center","top","top-right","right","bottom-right","bottom","bottom-left","left","top-left",F]}],accent:[{accent:["auto",r]}],appearance:[{appearance:["none","auto"]}],cursor:[{cursor:["auto","default","pointer","wait","text","move","help","not-allowed","none","context-menu","progress","cell","crosshair","vertical-text","alias","copy","no-drop","grab","grabbing","all-scroll","col-resize","row-resize","n-resize","e-resize","s-resize","w-resize","ne-resize","nw-resize","se-resize","sw-resize","ew-resize","ns-resize","nesw-resize","nwse-resize","zoom-in","zoom-out",F]}],"caret-color":[{caret:[r]}],"pointer-events":[{"pointer-events":["none","auto"]}],resize:[{resize:["none","y","x",""]}],"scroll-behavior":[{scroll:["auto","smooth"]}],"scroll-m":[{"scroll-m":I()}],"scroll-mx":[{"scroll-mx":I()}],"scroll-my":[{"scroll-my":I()}],"scroll-ms":[{"scroll-ms":I()}],"scroll-me":[{"scroll-me":I()}],"scroll-mt":[{"scroll-mt":I()}],"scroll-mr":[{"scroll-mr":I()}],"scroll-mb":[{"scroll-mb":I()}],"scroll-ml":[{"scroll-ml":I()}],"scroll-p":[{"scroll-p":I()}],"scroll-px":[{"scroll-px":I()}],"scroll-py":[{"scroll-py":I()}],"scroll-ps":[{"scroll-ps":I()}],"scroll-pe":[{"scroll-pe":I()}],"scroll-pt":[{"scroll-pt":I()}],"scroll-pr":[{"scroll-pr":I()}],"scroll-pb":[{"scroll-pb":I()}],"scroll-pl":[{"scroll-pl":I()}],"snap-align":[{snap:["start","end","center","align-none"]}],"snap-stop":[{snap:["normal","always"]}],"snap-type":[{snap:["none","x","y","both"]}],"snap-strictness":[{snap:["mandatory","proximity"]}],touch:[{touch:["auto","none","manipulation"]}],"touch-x":[{"touch-pan":["x","left","right"]}],"touch-y":[{"touch-pan":["y","up","down"]}],"touch-pz":["touch-pinch-zoom"],select:[{select:["none","text","all","auto"]}],"will-change":[{"will-change":["auto","scroll","contents","transform",F]}],fill:[{fill:[r,"none"]}],"stroke-w":[{stroke:[ae,Ne,Mf]}],stroke:[{stroke:[r,"none"]}],sr:["sr-only","not-sr-only"],"forced-color-adjust":[{"forced-color-adjust":["auto","none"]}]},conflictingClassGroups:{overflow:["overflow-x","overflow-y"],overscroll:["overscroll-x","overscroll-y"],inset:["inset-x","inset-y","start","end","top","right","bottom","left"],"inset-x":["right","left"],"inset-y":["top","bottom"],flex:["basis","grow","shrink"],gap:["gap-x","gap-y"],p:["px","py","ps","pe","pt","pr","pb","pl"],px:["pr","pl"],py:["pt","pb"],m:["mx","my","ms","me","mt","mr","mb","ml"],mx:["mr","ml"],my:["mt","mb"],size:["w","h"],"font-size":["leading"],"fvn-normal":["fvn-ordinal","fvn-slashed-zero","fvn-figure","fvn-spacing","fvn-fraction"],"fvn-ordinal":["fvn-normal"],"fvn-slashed-zero":["fvn-normal"],"fvn-figure":["fvn-normal"],"fvn-spacing":["fvn-normal"],"fvn-fraction":["fvn-normal"],"line-clamp":["display","overflow"],rounded:["rounded-s","rounded-e","rounded-t","rounded-r","rounded-b","rounded-l","rounded-ss","rounded-se","rounded-ee","rounded-es","rounded-tl","rounded-tr","rounded-br","rounded-bl"],"rounded-s":["rounded-ss","rounded-es"],"rounded-e":["rounded-se","rounded-ee"],"rounded-t":["rounded-tl","rounded-tr"],"rounded-r":["rounded-tr","rounded-br"],"rounded-b":["rounded-br","rounded-bl"],"rounded-l":["rounded-tl","rounded-bl"],"border-spacing":["border-spacing-x","border-spacing-y"],"border-w":["border-w-s","border-w-e","border-w-t","border-w-r","border-w-b","border-w-l"],"border-w-x":["border-w-r","border-w-l"],"border-w-y":["border-w-t","border-w-b"],"border-color":["border-color-s","border-color-e","border-color-t","border-color-r","border-color-b","border-color-l"],"border-color-x":["border-color-r","border-color-l"],"border-color-y":["border-color-t","border-color-b"],"scroll-m":["scroll-mx","scroll-my","scroll-ms","scroll-me","scroll-mt","scroll-mr","scroll-mb","scroll-ml"],"scroll-mx":["scroll-mr","scroll-ml"],"scroll-my":["scroll-mt","scroll-mb"],"scroll-p":["scroll-px","scroll-py","scroll-ps","scroll-pe","scroll-pt","scroll-pr","scroll-pb","scroll-pl"],"scroll-px":["scroll-pr","scroll-pl"],"scroll-py":["scroll-pt","scroll-pb"],touch:["touch-x","touch-y","touch-pz"],"touch-x":["touch"],"touch-y":["touch"],"touch-pz":["touch"]},conflictingClassGroupModifiers:{"font-size":["leading"]}}},Hh=gh(Rh);function Zt(...r){return Hh(ah(r))}const Bh=["relative cursor-pointer","text-sm focus:z-10 focus:ring-2 font-medium focus:outline-none whitespace-nowrap shadow-sm","inline-flex gap-2 items-center justify-center transition-colors focus:ring-offset-1","disabled:opacity-40 disabled:cursor-not-allowed disabled:text-nb-gray-300 ring-offset-neutral-950/50"],qh={default:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900","dark:focus:ring-zinc-800/50 dark:bg-nb-gray dark:text-gray-400 dark:border-gray-700/30 dark:hover:text-white dark:hover:bg-zinc-800/50"],primary:["dark:focus:ring-netbird-600/50 dark:ring-offset-neutral-950/50 enabled:dark:bg-netbird disabled:dark:bg-nb-gray-910 dark:text-gray-100 enabled:dark:hover:text-white enabled:dark:hover:bg-netbird-500/80","enabled:bg-netbird enabled:text-white enabled:focus:ring-netbird-400/50 enabled:hover:bg-netbird-500"],secondary:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900","dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20","dark:bg-nb-gray-920 dark:text-gray-400 dark:border-gray-700/40 dark:hover:text-white dark:hover:bg-nb-gray-910"],secondaryLighter:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900","dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20","dark:bg-nb-gray-900/70 dark:text-gray-400 dark:border-gray-700/70 dark:hover:text-white dark:hover:bg-nb-gray-800/60"],input:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-neutral-200 text-gray-900","dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20","dark:bg-nb-gray-900 dark:text-gray-400 dark:border-nb-gray-700 dark:hover:bg-nb-gray-900/80"],dropdown:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-neutral-200 text-gray-900","dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20","dark:bg-nb-gray-900/40 dark:text-gray-400 dark:border-nb-gray-900 dark:hover:bg-nb-gray-900/50"],dotted:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900 border-dashed","dark:ring-offset-neutral-950/50 dark:focus:ring-neutral-500/20","dark:bg-nb-gray-900/30 dark:text-gray-400 dark:border-gray-500/40 dark:hover:text-white dark:hover:bg-zinc-800/50"],tertiary:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900","dark:focus:ring-zinc-800/50 dark:bg-white dark:text-gray-800 dark:border-gray-700/40 dark:hover:bg-neutral-200 disabled:dark:bg-nb-gray-920 disabled:dark:text-nb-gray-300"],white:["focus:ring-white/50 bg-white text-gray-800 border-white outline-none hover:bg-neutral-200 disabled:dark:bg-nb-gray-920 disabled:dark:text-nb-gray-300","disabled:dark:bg-nb-gray-900 disabled:dark:text-nb-gray-300 disabled:dark:border-nb-gray-900"],outline:["bg-white hover:text-black focus:ring-zinc-200/50 hover:bg-gray-100 border-gray-200 text-gray-900","dark:focus:ring-zinc-800/50 dark:bg-transparent dark:text-netbird dark:border-netbird dark:hover:bg-nb-gray-900/30"],"danger-outline":["enabled:dark:focus:ring-red-800/20 enabled:dark:focus:bg-red-950/40 enabled:hover:dark:bg-red-950/50 enabled:dark:hover:border-red-800/50 dark:bg-transparent dark:text-red-500"],"danger-text":["dark:bg-transparent dark:text-red-500 dark:hover:text-red-600 dark:border-transparent !px-0 !shadow-none !py-0 focus:ring-red-500/30 dark:ring-offset-neutral-950/50"],"default-outline":["dark:ring-offset-nb-gray-950/50 dark:focus:ring-nb-gray-500/20","dark:bg-transparent dark:text-nb-gray-400 dark:border-transparent dark:hover:text-white dark:hover:bg-nb-gray-900/30 dark:hover:border-nb-gray-800/50","data-[state=open]:dark:text-white data-[state=open]:dark:bg-nb-gray-900/30 data-[state=open]:dark:border-nb-gray-800/50"],danger:["dark:focus:ring-red-700/20 dark:focus:bg-red-700 hover:dark:bg-red-700 dark:hover:border-red-800/50 dark:bg-red-600 dark:text-red-100"]},Yh={xs:"text-xs py-2 px-4",xs2:"text-[0.78rem] py-2 px-4",sm:"text-sm py-2.5 px-4",md:"text-sm py-2.5 px-4",lg:"text-base py-2.5 px-4"},Gh={0:"border",1:"border border-transparent",2:"border border-t-0 border-b-0"},Ru=xt.forwardRef(({variant:r="default",rounded:v=!0,border:S=1,size:f="md",stopPropagation:_=!0,className:O,onClick:D,children:U,...N},p)=>A.jsx("button",{type:"button",...N,ref:p,className:Zt(Bh,qh[r],Yh[f],Gh[S?1:0],v&&"rounded-md",O),onClick:R=>{_&&R.stopPropagation(),D?.(R)},children:U}));Ru.displayName="Button";const Xh={default:["bg-nb-gray-900 placeholder:text-neutral-400/70 border-nb-gray-700","ring-offset-neutral-950/50 focus-visible:ring-neutral-500/20"],darker:["bg-nb-gray-920 placeholder:text-neutral-400/70 border-nb-gray-800","ring-offset-neutral-950/50 focus-visible:ring-neutral-500/20"],error:["bg-nb-gray-900 placeholder:text-neutral-400/70 border-red-500 text-red-500","ring-offset-red-500/10 focus-visible:ring-red-500/10"]},Qh={default:"bg-nb-gray-900 border-nb-gray-700 text-nb-gray-300",error:"bg-nb-gray-900 border-red-500 text-nb-gray-300 text-red-500"},c0=xt.forwardRef(({className:r,type:v,customSuffix:S,customPrefix:f,icon:_,maxWidthClass:O="",error:D,variant:U="default",prefixClassName:N,showPasswordToggle:p=!1,...R},H)=>{const[V,st]=xt.useState(!1),ct=v==="password",G=ct&&V?"text":v,L=(ct&&p?A.jsx("button",{type:"button",onClick:()=>st(!V),className:"hover:text-white transition-all","aria-label":"Toggle password visibility",children:V?A.jsx(km,{size:18}):A.jsx(Wm,{size:18})}):null)||S,gt=D?"error":U;return A.jsxs(A.Fragment,{children:[A.jsxs("div",{className:Zt("flex relative h-[42px]",O),children:[f&&A.jsx("div",{className:Zt(Qh[D?"error":"default"],"flex h-[42px] w-auto rounded-l-md px-3 py-2 text-sm","border items-center whitespace-nowrap",R.disabled&&"opacity-40",N),children:f}),A.jsx("div",{className:Zt("absolute left-0 top-0 h-full flex items-center text-xs text-nb-gray-300 pl-3 leading-[0]",R.disabled&&"opacity-40"),children:_}),A.jsx("input",{type:G,ref:H,...R,className:Zt(Xh[gt],"flex h-[42px] w-full rounded-md px-3 py-2 text-sm","file:bg-transparent file:text-sm file:font-medium file:border-0","focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-offset-2","disabled:cursor-not-allowed disabled:opacity-40","border",f&&"!border-l-0 !rounded-l-none",L&&"!pr-16",_&&"!pl-10",r)}),A.jsx("div",{className:Zt("absolute right-0 top-0 h-full flex items-center text-xs text-nb-gray-300 pr-4 leading-[0] select-none",R.disabled&&"opacity-30"),children:L})]}),D&&A.jsx("p",{className:"text-xs text-red-500 mt-2",children:D})]})});c0.displayName="Input";const Zh=xt.forwardRef(function({value:v,onChange:S,length:f=6,disabled:_=!1,className:O,autoFocus:D=!1},U){const N=xt.useRef([]);xt.useImperativeHandle(U,()=>({focus:()=>{N.current[0]?.focus()}}));const p=v.split("").concat(new Array(f).fill("")).slice(0,f),R=Array.from({length:f},(G,Q)=>`pin-${Q}`),H=(G,Q)=>{if(!/^\d*$/.test(Q))return;const L=[...p];L[G]=Q.slice(-1);const gt=L.join("").replaceAll(/\s/g,"");S(gt),Q&&G{Q.key==="Backspace"&&!p[G]&&G>0&&N.current[G-1]?.focus(),Q.key==="ArrowLeft"&&G>0&&N.current[G-1]?.focus(),Q.key==="ArrowRight"&&G{G.preventDefault();const Q=G.clipboardData.getData("text").replaceAll(/\D/g,"").slice(0,f);S(Q);const L=Math.min(Q.length,f-1);N.current[L]?.focus()},ct=G=>{G.target.select()};return A.jsx("div",{className:Zt("flex gap-2 w-full min-w-0",O),children:p.map((G,Q)=>A.jsx("input",{id:R[Q],ref:L=>{N.current[Q]=L},type:"text",inputMode:"numeric",maxLength:1,value:G,onChange:L=>H(Q,L.target.value),onKeyDown:L=>V(Q,L),onPaste:st,onFocus:ct,disabled:_,autoFocus:D&&Q===0,className:Zt("flex-1 min-w-0 h-[42px] text-center text-sm rounded-md","dark:bg-nb-gray-900 border dark:border-nb-gray-700","dark:placeholder:text-neutral-400/70","focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-offset-2","ring-offset-neutral-200/20 dark:ring-offset-neutral-950/50 dark:focus-visible:ring-neutral-500/20","disabled:cursor-not-allowed disabled:opacity-40")},R[Q]))})}),f0=xt.createContext({value:"",onChange:()=>{}}),r0=()=>xt.useContext(f0);function $e({value:r,defaultValue:v,onChange:S,children:f}){const[_,O]=xt.useState(v??""),D=r??_,U=xt.useCallback(p=>{r===void 0&&O(p),S?.(p)},[r,S]),N=xt.useMemo(()=>({value:D,onChange:U}),[D,U]);return A.jsx(f0.Provider,{value:N,children:A.jsx("div",{children:typeof f=="function"?f({value:D,onChange:U}):f})})}function wh({children:r,className:v}){return A.jsx("div",{role:"tablist",className:Zt("bg-nb-gray-930/70 p-1.5 flex justify-center gap-1 border-nb-gray-900",v),children:r})}function Lh({children:r,value:v,disabled:S=!1,className:f,selected:_,onClick:O}){const D=r0(),U=_??v===D.value;let N="";U?N="bg-nb-gray-900 text-white":S||(N="text-nb-gray-400 hover:bg-nb-gray-900/50");const p=()=>{D.onChange(v),O?.()};return A.jsx("button",{role:"tab",type:"button",disabled:S,"aria-selected":U,onClick:p,className:Zt("px-4 py-2 text-sm rounded-md w-full transition-all cursor-pointer",S&&"opacity-30 cursor-not-allowed",N,f),children:A.jsx("div",{className:"flex items-center w-full justify-center gap-2",children:r})})}function Vh({children:r,value:v,className:S,visible:f}){const _=r0();return f??v===_.value?A.jsx("div",{role:"tabpanel",className:Zt("bg-nb-gray-930/70 px-4 pt-4 pb-5 rounded-b-md border border-t-0 border-nb-gray-900",S),children:r}):null}$e.List=wh;$e.Trigger=Lh;$e.Content=Vh;const Kh="/__netbird__/assets/netbird-full.svg",Jh="data:image/svg+xml,%3csvg%20width='31'%20height='23'%20viewBox='0%200%2031%2023'%20fill='none'%20xmlns='http://www.w3.org/2000/svg'%3e%3cpath%20d='M21.4631%200.523438C17.8173%200.857913%2016.0028%202.95675%2015.3171%204.01871L4.66406%2022.4734H17.5163L30.1929%200.523438H21.4631Z'%20fill='%23F68330'/%3e%3cpath%20d='M17.5265%2022.4737L0%203.88525C0%203.88525%2019.8177%20-1.44128%2021.7493%2015.1738L17.5265%2022.4737Z'%20fill='%23F68330'/%3e%3cpath%20d='M14.9236%204.70563L9.54688%2014.0208L17.5158%2022.4747L21.7385%2015.158C21.0696%209.44682%2018.2851%206.32784%2014.9236%204.69727'%20fill='%23F05252'/%3e%3c/svg%3e",ti={small:{desktop:14,mobile:20},default:{desktop:22,mobile:30},large:{desktop:24,mobile:40}},kh=({size:r="default",mobile:v=!0})=>A.jsxs(A.Fragment,{children:[A.jsx("img",{src:Kh,height:ti[r].desktop,style:{height:ti[r].desktop},alt:"NetBird Logo",className:Zt(v&&"hidden md:block","group-hover:opacity-80 transition-all")}),v&&A.jsx("img",{src:Jh,width:ti[r].mobile,style:{width:ti[r].mobile},alt:"NetBird Logo",className:Zt(v&&"md:hidden ml-4")})]});function Uf(){return A.jsxs("a",{href:"https://netbird.io?utm_source=netbird-proxy&utm_medium=web&utm_campaign=powered_by",target:"_blank",rel:"noopener noreferrer",className:"flex items-center justify-center mt-8 gap-2 group cursor-pointer",children:[A.jsx("span",{className:"text-sm text-nb-gray-400 font-light text-center group-hover:opacity-80 transition-all",children:"Powered by"}),A.jsx(kh,{size:"small",mobile:!1})]})}const Wh=({className:r})=>A.jsx("div",{className:Zt("h-full w-full absolute left-0 top-0 rounded-md overflow-hidden z-0 pointer-events-none",r),children:A.jsx("div",{className:"bg-linear-to-b from-nb-gray-900/10 via-transparent to-transparent w-full h-full rounded-md"})}),Fd=({children:r,className:v})=>A.jsxs("div",{className:Zt("px-6 sm:px-10 py-10 pt-8","bg-nb-gray-940 border border-nb-gray-910 rounded-lg relative",v),children:[A.jsx(Wh,{}),r]});function Cf({children:r,className:v}){return A.jsx("h1",{className:Zt("text-xl! text-center z-10 relative",v),children:r})}function jf({children:r,className:v}){return A.jsx("div",{className:Zt("text-sm text-nb-gray-300 font-light mt-2 block text-center z-10 relative",v),children:r})}const $h=()=>A.jsxs("div",{className:"flex items-center justify-center relative my-4",children:[A.jsx("span",{className:"bg-nb-gray-940 relative z-10 px-4 text-xs text-nb-gray-400 font-medium",children:"OR"}),A.jsx("span",{className:"h-px bg-nb-gray-900 w-full absolute z-0"})]}),Fh=({error:r})=>A.jsx("div",{className:"text-red-400 bg-red-800/20 border border-red-800/50 rounded-lg px-4 py-3 whitespace-break-spaces text-sm",children:r});function Id({className:r,htmlFor:v,...S}){return A.jsx("label",{htmlFor:v,className:Zt("text-sm font-medium tracking-wider leading-none","peer-disabled:cursor-not-allowed peer-disabled:opacity-70","mb-2.5 inline-block text-nb-gray-200","flex items-center gap-2 select-none",r),...S})}const _f=t0(),Ft=_f.methods&&Object.keys(_f.methods).length>0?_f.methods:{password:"password",pin:"pin",oidc:"/auth/oidc"};function Ih(){xt.useEffect(()=>{document.title="Authentication Required - NetBird Service"},[]);const[r,v]=xt.useState(null),[S,f]=xt.useState(null),[_,O]=xt.useState(""),[D,U]=xt.useState(""),N=xt.useRef(null),p=xt.useRef(null),[R,H]=xt.useState(Ft.password?"password":"pin"),V=(it,Ot)=>{v(Ot),f(null),it==="password"?(U(""),setTimeout(()=>N.current?.focus(),200)):(O(""),setTimeout(()=>p.current?.focus(),200))},st=(it,Ot)=>{v(null),f(it);const J=new FormData;it==="password"?J.append(Ft.password,Ot):J.append(Ft.pin,Ot),fetch(globalThis.location.href,{method:"POST",body:J,redirect:"manual"}).then(Rt=>{Rt.type==="opaqueredirect"||Rt.status===0?(f("redirect"),globalThis.location.reload()):V(it,"Authentication failed. Please try again.")}).catch(()=>{V(it,"An error occurred. Please try again.")})},ct=it=>{O(it),it.length===6&&st("pin",it)},G=_.length===6,Q=D.length>0,L=S!==null||R==="password"&&!Q||R==="pin"&&!G,gt=Ft.password||Ft.pin,zt=Ft.password&&Ft.pin,_t=R==="password"?"Sign in":"Submit";return S==="redirect"?A.jsxs("main",{className:"mt-20",children:[A.jsxs(Fd,{className:"max-w-105 mx-auto",children:[A.jsx(Cf,{children:"Authenticated"}),A.jsx(jf,{children:"Loading service..."}),A.jsx("div",{className:"flex justify-center mt-7",children:A.jsx(kd,{className:"animate-spin",size:24})})]}),A.jsx(Uf,{})]}):A.jsxs("main",{className:"mt-20",children:[A.jsxs(Fd,{className:"max-w-105 mx-auto",children:[A.jsx(Cf,{children:"Authentication Required"}),A.jsx(jf,{children:"The service you are trying to access is protected. Please authenticate to continue."}),A.jsxs("div",{className:"flex flex-col gap-4 mt-7 z-10 relative",children:[r&&A.jsx(Fh,{error:r}),Ft.oidc&&A.jsxs(Ru,{variant:"primary",className:"w-full",onClick:()=>{globalThis.location.href=Ft.oidc},children:[A.jsx(Im,{size:16}),"Sign in with SSO"]}),Ft.oidc&>&&A.jsx($h,{}),gt&&A.jsxs("form",{onSubmit:it=>{it.preventDefault(),st(R,R==="password"?D:_)},children:[zt&&A.jsx($e,{value:R,onChange:it=>{H(it),setTimeout(()=>{it==="password"?N.current?.focus():p.current?.focus()},0)},children:A.jsxs($e.List,{className:"rounded-lg border mb-4",children:[A.jsxs($e.Trigger,{value:"password",children:[A.jsx(Fm,{size:14}),"Password"]}),A.jsxs($e.Trigger,{value:"pin",children:[A.jsx(Km,{size:14}),"PIN"]})]})}),A.jsxs("div",{className:"mb-4",children:[Ft.password&&(R==="password"||!Ft.pin)&&A.jsxs(A.Fragment,{children:[!zt&&A.jsx(Id,{htmlFor:"password",children:"Password"}),A.jsx(c0,{ref:N,type:"password",id:"password",placeholder:"Enter password",disabled:S!==null,showPasswordToggle:!0,autoFocus:!0,value:D,onChange:it=>U(it.target.value)})]}),Ft.pin&&(R==="pin"||!Ft.password)&&A.jsxs(A.Fragment,{children:[!zt&&A.jsx(Id,{htmlFor:"pin-0",children:"Enter PIN Code"}),A.jsx(Zh,{ref:p,value:_,onChange:ct,disabled:S!==null,autoFocus:!Ft.password})]})]}),A.jsx(Ru,{type:"submit",disabled:L,variant:"secondary",className:"w-full",children:S===null?_t:A.jsxs(A.Fragment,{children:[A.jsx(kd,{className:"animate-spin",size:16}),"Verifying..."]})})]})]})]}),A.jsx(Uf,{})]})}function Ph({success:r=!0}){return r?A.jsx("div",{className:"flex-1 flex items-center justify-center h-12 w-full px-5",children:A.jsx("div",{className:"w-full border-t-2 border-dashed border-green-500"})}):A.jsxs("div",{className:"flex-1 flex items-center justify-center h-12 min-w-10 px-5 relative",children:[A.jsx("div",{className:"w-full border-t-2 border-dashed border-nb-gray-900"}),A.jsx("div",{className:"absolute inset-0 flex items-center justify-center",children:A.jsx("div",{className:"w-8 h-8 rounded-full flex items-center justify-center",children:A.jsx(eh,{size:18,className:"text-netbird"})})})]})}function Of({icon:r,label:v,detail:S,success:f=!0,line:_=!0}){return A.jsxs(A.Fragment,{children:[_&&A.jsx(Ph,{success:f}),A.jsxs("div",{className:"flex flex-col items-center gap-2",children:[A.jsx("div",{className:"w-14 h-14 rounded-md flex items-center justify-center from-nb-gray-940 to-nb-gray-930/70 bg-gradient-to-br border border-nb-gray-910",children:A.jsx(r,{size:20,className:"text-nb-gray-200"})}),A.jsx("span",{className:"text-sm text-nb-gray-200 font-normal mt-1",children:v}),A.jsx("span",{className:`text-xs font-medium uppercase ${f?"text-green-500":"text-netbird"}`,children:f?"Connected":"Unreachable"}),S&&A.jsx("span",{className:"text-xs text-nb-gray-400 truncate text-center",children:S})]})]})}function tg({code:r,title:v,message:S,proxy:f=!0,destination:_=!0,requestId:O,simple:D=!1,retryUrl:U}){xt.useEffect(()=>{document.title=`${v} - NetBird Service`},[v]);const[N]=xt.useState(()=>new Date().toISOString());return A.jsxs("main",{className:"flex flex-col items-center mt-24 px-4 max-w-3xl mx-auto",children:[A.jsxs("div",{className:"text-sm text-netbird font-normal font-mono mb-3 z-10 relative",children:["Error ",r]}),A.jsx(Cf,{className:"text-3xl!",children:v}),A.jsx(jf,{className:"mt-2 mb-8 max-w-md",children:S}),!D&&A.jsxs("div",{className:"hidden sm:flex items-start justify-center w-full mt-6 mb-16 z-10 relative",children:[A.jsx(Of,{icon:th,label:"You",line:!1}),A.jsx(Of,{icon:lh,label:"Proxy",success:f}),A.jsx(Of,{icon:$m,label:"Destination",success:_})]}),A.jsxs("div",{className:"flex gap-3 justify-center items-center mb-6 z-10 relative",children:[A.jsxs(Ru,{variant:"primary",onClick:()=>{U?globalThis.location.href=U:globalThis.location.reload()},children:[A.jsx(Pm,{size:16}),"Refresh Page"]}),A.jsxs(Ru,{variant:"secondary",onClick:()=>globalThis.open("https://docs.netbird.io","_blank","noopener,noreferrer"),children:[A.jsx(Jm,{size:16}),"Documentation"]})]}),A.jsxs("div",{className:"text-center text-xs text-nb-gray-300 uppercase z-10 relative font-mono flex flex-col sm:flex-row gap-2 sm:gap-10 mt-4 mb-3",children:[A.jsxs("div",{children:[A.jsx("span",{className:"text-nb-gray-400",children:"REQUEST-ID:"})," ",O]}),A.jsxs("div",{children:[A.jsx("span",{className:"text-nb-gray-400",children:"TIMESTAMP:"})," ",N]})]}),A.jsx(Uf,{})]})}const Nf=t0();Zm.createRoot(document.getElementById("root")).render(A.jsx(xt.StrictMode,{children:Nf.page==="error"&&Nf.error?A.jsx(tg,{...Nf.error}):A.jsx(Ih,{})})); diff --git a/proxy/web/dist/assets/netbird-full.svg b/proxy/web/dist/assets/netbird-full.svg new file mode 100644 index 000000000..f925d5761 --- /dev/null +++ b/proxy/web/dist/assets/netbird-full.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/proxy/web/dist/assets/style.css b/proxy/web/dist/assets/style.css new file mode 100644 index 000000000..95a00c303 --- /dev/null +++ b/proxy/web/dist/assets/style.css @@ -0,0 +1 @@ +@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-pan-x:initial;--tw-pan-y:initial;--tw-pinch-zoom:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-divide-x-reverse:0;--tw-border-style:solid;--tw-divide-y-reverse:0;--tw-gradient-position:initial;--tw-gradient-from:#0000;--tw-gradient-via:#0000;--tw-gradient-to:#0000;--tw-gradient-stops:initial;--tw-gradient-via-stops:initial;--tw-gradient-from-position:0%;--tw-gradient-via-position:50%;--tw-gradient-to-position:100%;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-ordinal:initial;--tw-slashed-zero:initial;--tw-numeric-figure:initial;--tw-numeric-spacing:initial;--tw-numeric-fraction:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-100:#fde8e8;--color-red-400:#f98080;--color-red-500:#f05252;--color-red-600:#e02424;--color-red-700:#c81e1e;--color-red-800:#9b1c1c;--color-red-950:oklch(25.8% .092 26.042);--color-green-500:#0e9f6e;--color-gray-100:#f3f4f6;--color-gray-200:#e5e7eb;--color-gray-400:#9ca3af;--color-gray-500:#6b7280;--color-gray-700:#374151;--color-gray-800:#1f2937;--color-gray-900:#111827;--color-zinc-50:oklch(98.5% 0 0);--color-zinc-200:oklch(92% .004 286.32);--color-zinc-800:oklch(27.4% .006 286.033);--color-neutral-200:oklch(92.2% 0 0);--color-neutral-400:oklch(70.8% 0 0);--color-neutral-500:oklch(55.6% 0 0);--color-neutral-950:oklch(14.5% 0 0);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-3xl:48rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--text-3xl:1.875rem;--text-3xl--line-height: 1.2 ;--font-weight-light:300;--font-weight-normal:400;--font-weight-medium:500;--tracking-wide:.025em;--tracking-wider:.05em;--radius-md:.375rem;--radius-lg:.5rem;--animate-spin:spin 1s linear infinite;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono);--color-nb-gray:#181a1d;--color-nb-gray-100:#e4e7e9;--color-nb-gray-200:#cbd2d6;--color-nb-gray-300:#aab4bd;--color-nb-gray-400:#7c8994;--color-nb-gray-500:#616e79;--color-nb-gray-700:#474e57;--color-nb-gray-800:#3f444b;--color-nb-gray-900:#32363d;--color-nb-gray-910:#2b2f33;--color-nb-gray-920:#25282d;--color-nb-gray-930:#25282c;--color-nb-gray-940:#1c1e21;--color-nb-gray-950:#181a1d;--color-netbird:#f68330;--color-netbird-400:#f68330;--color-netbird-500:#f46d1b;--color-netbird-600:#e55311}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}}@layer components;@layer utilities{.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.invisible{visibility:hidden}.visible{visibility:visible}.sr-only{clip-path:inset(50%);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.not-sr-only{clip-path:none;white-space:normal;width:auto;height:auto;margin:0;padding:0;position:static;overflow:visible}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.static{position:static}.sticky{position:sticky}.inset-0{inset:calc(var(--spacing)*0)}.top-0{top:calc(var(--spacing)*0)}.right-0{right:calc(var(--spacing)*0)}.left-0{left:calc(var(--spacing)*0)}.isolate{isolation:isolate}.isolation-auto{isolation:auto}.z-0{z-index:0}.z-10{z-index:10}.container{width:100%}@media(min-width:40rem){.container{max-width:40rem}}@media(min-width:48rem){.container{max-width:48rem}}@media(min-width:64rem){.container{max-width:64rem}}@media(min-width:80rem){.container{max-width:80rem}}@media(min-width:96rem){.container{max-width:96rem}}.mx-auto{margin-inline:auto}.my-4{margin-block:calc(var(--spacing)*4)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-6{margin-top:calc(var(--spacing)*6)}.mt-7{margin-top:calc(var(--spacing)*7)}.mt-8{margin-top:calc(var(--spacing)*8)}.mt-20{margin-top:calc(var(--spacing)*20)}.mt-24{margin-top:calc(var(--spacing)*24)}.mb-2\.5{margin-bottom:calc(var(--spacing)*2.5)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.mb-16{margin-bottom:calc(var(--spacing)*16)}.ml-4{margin-left:calc(var(--spacing)*4)}.block{display:block}.contents{display:contents}.flex{display:flex}.flow-root{display:flow-root}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.inline-grid{display:inline-grid}.inline-table{display:inline-table}.list-item{display:list-item}.table{display:table}.table-caption{display:table-caption}.table-cell{display:table-cell}.table-column{display:table-column}.table-column-group{display:table-column-group}.table-footer-group{display:table-footer-group}.table-header-group{display:table-header-group}.table-row{display:table-row}.table-row-group{display:table-row-group}.h-8{height:calc(var(--spacing)*8)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-\[42px\]{height:42px}.h-full{height:100%}.h-px{height:1px}.w-8{width:calc(var(--spacing)*8)}.w-14{width:calc(var(--spacing)*14)}.w-auto{width:auto}.w-full{width:100%}.max-w-3xl{max-width:var(--container-3xl)}.max-w-105{max-width:calc(var(--spacing)*105)}.max-w-md{max-width:var(--container-md)}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-10{min-width:calc(var(--spacing)*10)}.flex-1{flex:1}.shrink{flex-shrink:1}.grow{flex-grow:1}.border-collapse{border-collapse:collapse}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-spin{animation:var(--animate-spin)}.cursor-not-allowed{cursor:not-allowed}.cursor-pointer{cursor:pointer}.touch-pinch-zoom{--tw-pinch-zoom:pinch-zoom;touch-action:var(--tw-pan-x,)var(--tw-pan-y,)var(--tw-pinch-zoom,)}.resize{resize:both}.flex-col{flex-direction:column}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-start{align-items:flex-start}.justify-center{justify-content:center}.gap-1{gap:calc(var(--spacing)*1)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}:where(.space-y-reverse>:not(:last-child)){--tw-space-y-reverse:1}:where(.space-x-reverse>:not(:last-child)){--tw-space-x-reverse:1}:where(.divide-x>:not(:last-child)){--tw-divide-x-reverse:0;border-inline-style:var(--tw-border-style);border-inline-start-width:calc(1px*var(--tw-divide-x-reverse));border-inline-end-width:calc(1px*calc(1 - var(--tw-divide-x-reverse)))}:where(.divide-y>:not(:last-child)){--tw-divide-y-reverse:0;border-bottom-style:var(--tw-border-style);border-top-style:var(--tw-border-style);border-top-width:calc(1px*var(--tw-divide-y-reverse));border-bottom-width:calc(1px*calc(1 - var(--tw-divide-y-reverse)))}:where(.divide-y-reverse>:not(:last-child)){--tw-divide-y-reverse:1}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-hidden{overflow:hidden}.rounded{border-radius:.25rem}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius-lg)}.rounded-md{border-radius:var(--radius-md)}.rounded-s{border-start-start-radius:.25rem;border-end-start-radius:.25rem}.rounded-ss{border-start-start-radius:.25rem}.rounded-e{border-start-end-radius:.25rem;border-end-end-radius:.25rem}.rounded-se{border-start-end-radius:.25rem}.rounded-ee{border-end-end-radius:.25rem}.rounded-es{border-end-start-radius:.25rem}.rounded-t{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.\!rounded-l-none{border-top-left-radius:0!important;border-bottom-left-radius:0!important}.rounded-l{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.rounded-l-md{border-top-left-radius:var(--radius-md);border-bottom-left-radius:var(--radius-md)}.rounded-tl{border-top-left-radius:.25rem}.rounded-r{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.rounded-tr{border-top-right-radius:.25rem}.rounded-b{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.rounded-b-md{border-bottom-right-radius:var(--radius-md);border-bottom-left-radius:var(--radius-md)}.rounded-br{border-bottom-right-radius:.25rem}.rounded-bl{border-bottom-left-radius:.25rem}.border{border-style:var(--tw-border-style);border-width:1px}.border-x{border-inline-style:var(--tw-border-style);border-inline-width:1px}.border-y{border-block-style:var(--tw-border-style);border-block-width:1px}.border-s{border-inline-start-style:var(--tw-border-style);border-inline-start-width:1px}.border-e{border-inline-end-style:var(--tw-border-style);border-inline-end-width:1px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-t-0{border-top-style:var(--tw-border-style);border-top-width:0}.border-t-2{border-top-style:var(--tw-border-style);border-top-width:2px}.border-r{border-right-style:var(--tw-border-style);border-right-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-b-0{border-bottom-style:var(--tw-border-style);border-bottom-width:0}.\!border-l-0{border-left-style:var(--tw-border-style)!important;border-left-width:0!important}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.border-gray-200{border-color:var(--color-gray-200)}.border-green-500{border-color:var(--color-green-500)}.border-nb-gray-700{border-color:var(--color-nb-gray-700)}.border-nb-gray-800{border-color:var(--color-nb-gray-800)}.border-nb-gray-900{border-color:var(--color-nb-gray-900)}.border-nb-gray-910{border-color:var(--color-nb-gray-910)}.border-neutral-200{border-color:var(--color-neutral-200)}.border-red-500{border-color:var(--color-red-500)}.border-red-800\/50{border-color:#9b1c1c80}@supports (color:color-mix(in lab,red,red)){.border-red-800\/50{border-color:color-mix(in oklab,var(--color-red-800)50%,transparent)}}.border-transparent{border-color:#0000}.border-white{border-color:var(--color-white)}.bg-nb-gray-900{background-color:var(--color-nb-gray-900)}.bg-nb-gray-920{background-color:var(--color-nb-gray-920)}.bg-nb-gray-930\/70{background-color:#25282cb3}@supports (color:color-mix(in lab,red,red)){.bg-nb-gray-930\/70{background-color:color-mix(in oklab,var(--color-nb-gray-930)70%,transparent)}}.bg-nb-gray-940{background-color:var(--color-nb-gray-940)}.bg-red-800\/20{background-color:#9b1c1c33}@supports (color:color-mix(in lab,red,red)){.bg-red-800\/20{background-color:color-mix(in oklab,var(--color-red-800)20%,transparent)}}.bg-white{background-color:var(--color-white)}.bg-linear-to-b{--tw-gradient-position:to bottom}@supports (background-image:linear-gradient(in lab,red,red)){.bg-linear-to-b{--tw-gradient-position:to bottom in oklab}}.bg-linear-to-b{background-image:linear-gradient(var(--tw-gradient-stops))}.bg-gradient-to-br{--tw-gradient-position:to bottom right in oklab;background-image:linear-gradient(var(--tw-gradient-stops))}.from-nb-gray-900\/10{--tw-gradient-from:#32363d1a}@supports (color:color-mix(in lab,red,red)){.from-nb-gray-900\/10{--tw-gradient-from:color-mix(in oklab,var(--color-nb-gray-900)10%,transparent)}}.from-nb-gray-900\/10{--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.from-nb-gray-940{--tw-gradient-from:var(--color-nb-gray-940);--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.via-transparent{--tw-gradient-via:transparent;--tw-gradient-via-stops:var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-via)var(--tw-gradient-via-position),var(--tw-gradient-to)var(--tw-gradient-to-position);--tw-gradient-stops:var(--tw-gradient-via-stops)}.to-nb-gray-930\/70{--tw-gradient-to:#25282cb3}@supports (color:color-mix(in lab,red,red)){.to-nb-gray-930\/70{--tw-gradient-to:color-mix(in oklab,var(--color-nb-gray-930)70%,transparent)}}.to-nb-gray-930\/70{--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.to-transparent{--tw-gradient-to:transparent;--tw-gradient-stops:var(--tw-gradient-via-stops,var(--tw-gradient-position),var(--tw-gradient-from)var(--tw-gradient-from-position),var(--tw-gradient-to)var(--tw-gradient-to-position))}.bg-repeat{background-repeat:repeat}.p-1\.5{padding:calc(var(--spacing)*1.5)}.\!px-0{padding-inline:calc(var(--spacing)*0)!important}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-5{padding-inline:calc(var(--spacing)*5)}.px-6{padding-inline:calc(var(--spacing)*6)}.\!py-0{padding-block:calc(var(--spacing)*0)!important}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-10{padding-block:calc(var(--spacing)*10)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-8{padding-top:calc(var(--spacing)*8)}.\!pr-16{padding-right:calc(var(--spacing)*16)!important}.pr-4{padding-right:calc(var(--spacing)*4)}.pb-5{padding-bottom:calc(var(--spacing)*5)}.\!pl-10{padding-left:calc(var(--spacing)*10)!important}.pl-3{padding-left:calc(var(--spacing)*3)}.text-center{text-align:center}.font-mono{font-family:var(--font-mono)}.text-3xl\!{font-size:var(--text-3xl)!important;line-height:var(--tw-leading,var(--text-3xl--line-height))!important}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl\!{font-size:var(--text-xl)!important;line-height:var(--tw-leading,var(--text-xl--line-height))!important}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[\.8rem\]{font-size:.8rem}.text-\[0\.78rem\]{font-size:.78rem}.leading-\[0\]{--tw-leading:0;line-height:0}.leading-none{--tw-leading:1;line-height:1}.font-light{--tw-font-weight:var(--font-weight-light);font-weight:var(--font-weight-light)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-normal{--tw-font-weight:var(--font-weight-normal);font-weight:var(--font-weight-normal)}.tracking-wide{--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide)}.tracking-wider{--tw-tracking:var(--tracking-wider);letter-spacing:var(--tracking-wider)}.text-wrap{text-wrap:wrap}.text-clip{text-overflow:clip}.text-ellipsis{text-overflow:ellipsis}.whitespace-break-spaces{white-space:break-spaces}.whitespace-nowrap{white-space:nowrap}.text-gray-800{color:var(--color-gray-800)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-nb-gray-200{color:var(--color-nb-gray-200)}.text-nb-gray-300{color:var(--color-nb-gray-300)}.text-nb-gray-400{color:var(--color-nb-gray-400)}.text-netbird{color:var(--color-netbird)}.text-red-400{color:var(--color-red-400)}.text-red-500{color:var(--color-red-500)}.text-white{color:var(--color-white)}.capitalize{text-transform:capitalize}.lowercase{text-transform:lowercase}.normal-case{text-transform:none}.uppercase{text-transform:uppercase}.italic{font-style:italic}.not-italic{font-style:normal}.diagonal-fractions{--tw-numeric-fraction:diagonal-fractions;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.lining-nums{--tw-numeric-figure:lining-nums;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.oldstyle-nums{--tw-numeric-figure:oldstyle-nums;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.ordinal{--tw-ordinal:ordinal;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.proportional-nums{--tw-numeric-spacing:proportional-nums;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.slashed-zero{--tw-slashed-zero:slashed-zero;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.stacked-fractions{--tw-numeric-fraction:stacked-fractions;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.tabular-nums{--tw-numeric-spacing:tabular-nums;font-variant-numeric:var(--tw-ordinal,)var(--tw-slashed-zero,)var(--tw-numeric-figure,)var(--tw-numeric-spacing,)var(--tw-numeric-fraction,)}.normal-nums{font-variant-numeric:normal}.line-through{text-decoration-line:line-through}.no-underline{text-decoration-line:none}.overline{text-decoration-line:overline}.underline{text-decoration-line:underline}.antialiased{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.subpixel-antialiased{-webkit-font-smoothing:auto;-moz-osx-font-smoothing:auto}.opacity-30{opacity:.3}.opacity-40{opacity:.4}.\!shadow-none{--tw-shadow:0 0 #0000!important;box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)!important}.shadow,.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-offset-neutral-200\/20{--tw-ring-offset-color:#e5e5e533}@supports (color:color-mix(in lab,red,red)){.ring-offset-neutral-200\/20{--tw-ring-offset-color:color-mix(in oklab,var(--color-neutral-200)20%,transparent)}}.ring-offset-neutral-950\/50{--tw-ring-offset-color:#0a0a0a80}@supports (color:color-mix(in lab,red,red)){.ring-offset-neutral-950\/50{--tw-ring-offset-color:color-mix(in oklab,var(--color-neutral-950)50%,transparent)}}.ring-offset-red-500\/10{--tw-ring-offset-color:#f052521a}@supports (color:color-mix(in lab,red,red)){.ring-offset-red-500\/10{--tw-ring-offset-color:color-mix(in oklab,var(--color-red-500)10%,transparent)}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.blur{--tw-blur:blur(8px);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.drop-shadow{--tw-drop-shadow-size:drop-shadow(0 1px 2px var(--tw-drop-shadow-color,#0000001a))drop-shadow(0 1px 1px var(--tw-drop-shadow-color,#0000000f));--tw-drop-shadow:drop-shadow(0 1px 2px #0000001a)drop-shadow(0 1px 1px #0000000f);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.grayscale{--tw-grayscale:grayscale(100%);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.invert{--tw-invert:invert(100%);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.sepia{--tw-sepia:sepia(100%);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur{--tw-backdrop-blur:blur(8px);-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-grayscale{--tw-backdrop-grayscale:grayscale(100%);-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-invert{--tw-backdrop-invert:invert(100%);-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-sepia{--tw-backdrop-sepia:sepia(100%);-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.backdrop-filter{-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}:where(.divide-x-reverse>:not(:last-child)){--tw-divide-x-reverse:1}.ring-inset{--tw-ring-inset:inset}@media(hover:hover){.group-hover\:opacity-80:is(:where(.group):hover *){opacity:.8}}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-70:is(:where(.peer):disabled~*){opacity:.7}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.placeholder\:text-neutral-400\/70::placeholder{color:#a1a1a1b3}@supports (color:color-mix(in lab,red,red)){.placeholder\:text-neutral-400\/70::placeholder{color:color-mix(in oklab,var(--color-neutral-400)70%,transparent)}}@media(hover:hover){.hover\:bg-gray-100:hover{background-color:var(--color-gray-100)}.hover\:bg-nb-gray-900\/50:hover{background-color:#32363d80}@supports (color:color-mix(in lab,red,red)){.hover\:bg-nb-gray-900\/50:hover{background-color:color-mix(in oklab,var(--color-nb-gray-900)50%,transparent)}}.hover\:bg-neutral-200:hover{background-color:var(--color-neutral-200)}.hover\:text-black:hover{color:var(--color-black)}.hover\:text-white:hover{color:var(--color-white)}}.focus\:z-10:focus{z-index:10}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-red-500\/30:focus{--tw-ring-color:#f052524d}@supports (color:color-mix(in lab,red,red)){.focus\:ring-red-500\/30:focus{--tw-ring-color:color-mix(in oklab,var(--color-red-500)30%,transparent)}}.focus\:ring-white\/50:focus{--tw-ring-color:#ffffff80}@supports (color:color-mix(in lab,red,red)){.focus\:ring-white\/50:focus{--tw-ring-color:color-mix(in oklab,var(--color-white)50%,transparent)}}.focus\:ring-zinc-200\/50:focus{--tw-ring-color:#e4e4e780}@supports (color:color-mix(in lab,red,red)){.focus\:ring-zinc-200\/50:focus{--tw-ring-color:color-mix(in oklab,var(--color-zinc-200)50%,transparent)}}.focus\:ring-offset-1:focus{--tw-ring-offset-width:1px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-neutral-500\/20:focus-visible{--tw-ring-color:#73737333}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-neutral-500\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--color-neutral-500)20%,transparent)}}.focus-visible\:ring-red-500\/10:focus-visible{--tw-ring-color:#f052521a}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-red-500\/10:focus-visible{--tw-ring-color:color-mix(in oklab,var(--color-red-500)10%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.enabled\:bg-netbird:enabled{background-color:var(--color-netbird)}.enabled\:text-white:enabled{color:var(--color-white)}@media(hover:hover){.enabled\:hover\:bg-netbird-500:enabled:hover{background-color:var(--color-netbird-500)}}.enabled\:focus\:ring-netbird-400\/50:enabled:focus{--tw-ring-color:#f6833080}@supports (color:color-mix(in lab,red,red)){.enabled\:focus\:ring-netbird-400\/50:enabled:focus{--tw-ring-color:color-mix(in oklab,var(--color-netbird-400)50%,transparent)}}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:text-nb-gray-300:disabled{color:var(--color-nb-gray-300)}.disabled\:opacity-40:disabled{opacity:.4}@media(min-width:40rem){.sm\:flex{display:flex}.sm\:flex-row{flex-direction:row}.sm\:gap-10{gap:calc(var(--spacing)*10)}.sm\:px-10{padding-inline:calc(var(--spacing)*10)}}@media(min-width:48rem){.md\:block{display:block}.md\:hidden{display:none}}.dark\:border-gray-500\/40:where(.dark,.dark *){border-color:#6b728066}@supports (color:color-mix(in lab,red,red)){.dark\:border-gray-500\/40:where(.dark,.dark *){border-color:color-mix(in oklab,var(--color-gray-500)40%,transparent)}}.dark\:border-gray-700\/30:where(.dark,.dark *){border-color:#3741514d}@supports (color:color-mix(in lab,red,red)){.dark\:border-gray-700\/30:where(.dark,.dark *){border-color:color-mix(in oklab,var(--color-gray-700)30%,transparent)}}.dark\:border-gray-700\/40:where(.dark,.dark *){border-color:#37415166}@supports (color:color-mix(in lab,red,red)){.dark\:border-gray-700\/40:where(.dark,.dark *){border-color:color-mix(in oklab,var(--color-gray-700)40%,transparent)}}.dark\:border-gray-700\/70:where(.dark,.dark *){border-color:#374151b3}@supports (color:color-mix(in lab,red,red)){.dark\:border-gray-700\/70:where(.dark,.dark *){border-color:color-mix(in oklab,var(--color-gray-700)70%,transparent)}}.dark\:border-nb-gray-700:where(.dark,.dark *){border-color:var(--color-nb-gray-700)}.dark\:border-nb-gray-900:where(.dark,.dark *){border-color:var(--color-nb-gray-900)}.dark\:border-netbird:where(.dark,.dark *){border-color:var(--color-netbird)}.dark\:border-transparent:where(.dark,.dark *){border-color:#0000}.dark\:bg-nb-gray:where(.dark,.dark *){background-color:var(--color-nb-gray)}.dark\:bg-nb-gray-900:where(.dark,.dark *){background-color:var(--color-nb-gray-900)}.dark\:bg-nb-gray-900\/30:where(.dark,.dark *){background-color:#32363d4d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-nb-gray-900\/30:where(.dark,.dark *){background-color:color-mix(in oklab,var(--color-nb-gray-900)30%,transparent)}}.dark\:bg-nb-gray-900\/40:where(.dark,.dark *){background-color:#32363d66}@supports (color:color-mix(in lab,red,red)){.dark\:bg-nb-gray-900\/40:where(.dark,.dark *){background-color:color-mix(in oklab,var(--color-nb-gray-900)40%,transparent)}}.dark\:bg-nb-gray-900\/70:where(.dark,.dark *){background-color:#32363db3}@supports (color:color-mix(in lab,red,red)){.dark\:bg-nb-gray-900\/70:where(.dark,.dark *){background-color:color-mix(in oklab,var(--color-nb-gray-900)70%,transparent)}}.dark\:bg-nb-gray-920:where(.dark,.dark *){background-color:var(--color-nb-gray-920)}.dark\:bg-red-600:where(.dark,.dark *){background-color:var(--color-red-600)}.dark\:bg-transparent:where(.dark,.dark *){background-color:#0000}.dark\:bg-white:where(.dark,.dark *){background-color:var(--color-white)}.dark\:text-gray-100:where(.dark,.dark *){color:var(--color-gray-100)}.dark\:text-gray-400:where(.dark,.dark *){color:var(--color-gray-400)}.dark\:text-gray-800:where(.dark,.dark *){color:var(--color-gray-800)}.dark\:text-nb-gray-400:where(.dark,.dark *){color:var(--color-nb-gray-400)}.dark\:text-netbird:where(.dark,.dark *){color:var(--color-netbird)}.dark\:text-red-100:where(.dark,.dark *){color:var(--color-red-100)}.dark\:text-red-500:where(.dark,.dark *){color:var(--color-red-500)}.dark\:ring-offset-nb-gray-950\/50:where(.dark,.dark *){--tw-ring-offset-color:#181a1d80}@supports (color:color-mix(in lab,red,red)){.dark\:ring-offset-nb-gray-950\/50:where(.dark,.dark *){--tw-ring-offset-color:color-mix(in oklab,var(--color-nb-gray-950)50%,transparent)}}.dark\:ring-offset-neutral-950\/50:where(.dark,.dark *){--tw-ring-offset-color:#0a0a0a80}@supports (color:color-mix(in lab,red,red)){.dark\:ring-offset-neutral-950\/50:where(.dark,.dark *){--tw-ring-offset-color:color-mix(in oklab,var(--color-neutral-950)50%,transparent)}}.dark\:placeholder\:text-neutral-400\/70:where(.dark,.dark *)::placeholder{color:#a1a1a1b3}@supports (color:color-mix(in lab,red,red)){.dark\:placeholder\:text-neutral-400\/70:where(.dark,.dark *)::placeholder{color:color-mix(in oklab,var(--color-neutral-400)70%,transparent)}}@media(hover:hover){.dark\:hover\:border-nb-gray-800\/50:where(.dark,.dark *):hover{border-color:#3f444b80}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:border-nb-gray-800\/50:where(.dark,.dark *):hover{border-color:color-mix(in oklab,var(--color-nb-gray-800)50%,transparent)}}.dark\:hover\:border-red-800\/50:where(.dark,.dark *):hover{border-color:#9b1c1c80}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:border-red-800\/50:where(.dark,.dark *):hover{border-color:color-mix(in oklab,var(--color-red-800)50%,transparent)}}.dark\:hover\:bg-nb-gray-800\/60:where(.dark,.dark *):hover{background-color:#3f444b99}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-nb-gray-800\/60:where(.dark,.dark *):hover{background-color:color-mix(in oklab,var(--color-nb-gray-800)60%,transparent)}}.dark\:hover\:bg-nb-gray-900\/30:where(.dark,.dark *):hover{background-color:#32363d4d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-nb-gray-900\/30:where(.dark,.dark *):hover{background-color:color-mix(in oklab,var(--color-nb-gray-900)30%,transparent)}}.dark\:hover\:bg-nb-gray-900\/50:where(.dark,.dark *):hover{background-color:#32363d80}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-nb-gray-900\/50:where(.dark,.dark *):hover{background-color:color-mix(in oklab,var(--color-nb-gray-900)50%,transparent)}}.dark\:hover\:bg-nb-gray-900\/80:where(.dark,.dark *):hover{background-color:#32363dcc}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-nb-gray-900\/80:where(.dark,.dark *):hover{background-color:color-mix(in oklab,var(--color-nb-gray-900)80%,transparent)}}.dark\:hover\:bg-nb-gray-910:where(.dark,.dark *):hover{background-color:var(--color-nb-gray-910)}.dark\:hover\:bg-neutral-200:where(.dark,.dark *):hover{background-color:var(--color-neutral-200)}.dark\:hover\:bg-zinc-800\/50:where(.dark,.dark *):hover{background-color:#27272a80}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-zinc-800\/50:where(.dark,.dark *):hover{background-color:color-mix(in oklab,var(--color-zinc-800)50%,transparent)}}.hover\:dark\:bg-red-700:hover:where(.dark,.dark *){background-color:var(--color-red-700)}.dark\:hover\:text-red-600:where(.dark,.dark *):hover{color:var(--color-red-600)}.dark\:hover\:text-white:where(.dark,.dark *):hover{color:var(--color-white)}}.dark\:focus\:bg-red-700:where(.dark,.dark *):focus{background-color:var(--color-red-700)}.dark\:focus\:ring-nb-gray-500\/20:where(.dark,.dark *):focus{--tw-ring-color:#616e7933}@supports (color:color-mix(in lab,red,red)){.dark\:focus\:ring-nb-gray-500\/20:where(.dark,.dark *):focus{--tw-ring-color:color-mix(in oklab,var(--color-nb-gray-500)20%,transparent)}}.dark\:focus\:ring-netbird-600\/50:where(.dark,.dark *):focus{--tw-ring-color:#e5531180}@supports (color:color-mix(in lab,red,red)){.dark\:focus\:ring-netbird-600\/50:where(.dark,.dark *):focus{--tw-ring-color:color-mix(in oklab,var(--color-netbird-600)50%,transparent)}}.dark\:focus\:ring-neutral-500\/20:where(.dark,.dark *):focus{--tw-ring-color:#73737333}@supports (color:color-mix(in lab,red,red)){.dark\:focus\:ring-neutral-500\/20:where(.dark,.dark *):focus{--tw-ring-color:color-mix(in oklab,var(--color-neutral-500)20%,transparent)}}.dark\:focus\:ring-red-700\/20:where(.dark,.dark *):focus{--tw-ring-color:#c81e1e33}@supports (color:color-mix(in lab,red,red)){.dark\:focus\:ring-red-700\/20:where(.dark,.dark *):focus{--tw-ring-color:color-mix(in oklab,var(--color-red-700)20%,transparent)}}.dark\:focus\:ring-zinc-800\/50:where(.dark,.dark *):focus{--tw-ring-color:#27272a80}@supports (color:color-mix(in lab,red,red)){.dark\:focus\:ring-zinc-800\/50:where(.dark,.dark *):focus{--tw-ring-color:color-mix(in oklab,var(--color-zinc-800)50%,transparent)}}.dark\:focus-visible\:ring-neutral-500\/20:where(.dark,.dark *):focus-visible{--tw-ring-color:#73737333}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-neutral-500\/20:where(.dark,.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--color-neutral-500)20%,transparent)}}.enabled\:dark\:bg-netbird:enabled:where(.dark,.dark *){background-color:var(--color-netbird)}@media(hover:hover){.enabled\:dark\:hover\:border-red-800\/50:enabled:where(.dark,.dark *):hover{border-color:#9b1c1c80}@supports (color:color-mix(in lab,red,red)){.enabled\:dark\:hover\:border-red-800\/50:enabled:where(.dark,.dark *):hover{border-color:color-mix(in oklab,var(--color-red-800)50%,transparent)}}.enabled\:dark\:hover\:bg-netbird-500\/80:enabled:where(.dark,.dark *):hover{background-color:#f46d1bcc}@supports (color:color-mix(in lab,red,red)){.enabled\:dark\:hover\:bg-netbird-500\/80:enabled:where(.dark,.dark *):hover{background-color:color-mix(in oklab,var(--color-netbird-500)80%,transparent)}}.enabled\:hover\:dark\:bg-red-950\/50:enabled:hover:where(.dark,.dark *){background-color:#46080980}@supports (color:color-mix(in lab,red,red)){.enabled\:hover\:dark\:bg-red-950\/50:enabled:hover:where(.dark,.dark *){background-color:color-mix(in oklab,var(--color-red-950)50%,transparent)}}.enabled\:dark\:hover\:text-white:enabled:where(.dark,.dark *):hover{color:var(--color-white)}}.enabled\:dark\:focus\:bg-red-950\/40:enabled:where(.dark,.dark *):focus{background-color:#46080966}@supports (color:color-mix(in lab,red,red)){.enabled\:dark\:focus\:bg-red-950\/40:enabled:where(.dark,.dark *):focus{background-color:color-mix(in oklab,var(--color-red-950)40%,transparent)}}.enabled\:dark\:focus\:ring-red-800\/20:enabled:where(.dark,.dark *):focus{--tw-ring-color:#9b1c1c33}@supports (color:color-mix(in lab,red,red)){.enabled\:dark\:focus\:ring-red-800\/20:enabled:where(.dark,.dark *):focus{--tw-ring-color:color-mix(in oklab,var(--color-red-800)20%,transparent)}}.disabled\:dark\:border-nb-gray-900:disabled:where(.dark,.dark *){border-color:var(--color-nb-gray-900)}.disabled\:dark\:bg-nb-gray-900:disabled:where(.dark,.dark *){background-color:var(--color-nb-gray-900)}.disabled\:dark\:bg-nb-gray-910:disabled:where(.dark,.dark *){background-color:var(--color-nb-gray-910)}.disabled\:dark\:bg-nb-gray-920:disabled:where(.dark,.dark *){background-color:var(--color-nb-gray-920)}.disabled\:dark\:text-nb-gray-300:disabled:where(.dark,.dark *){color:var(--color-nb-gray-300)}.data-\[state\=open\]\:dark\:border-nb-gray-800\/50[data-state=open]:where(.dark,.dark *){border-color:#3f444b80}@supports (color:color-mix(in lab,red,red)){.data-\[state\=open\]\:dark\:border-nb-gray-800\/50[data-state=open]:where(.dark,.dark *){border-color:color-mix(in oklab,var(--color-nb-gray-800)50%,transparent)}}.data-\[state\=open\]\:dark\:bg-nb-gray-900\/30[data-state=open]:where(.dark,.dark *){background-color:#32363d4d}@supports (color:color-mix(in lab,red,red)){.data-\[state\=open\]\:dark\:bg-nb-gray-900\/30[data-state=open]:where(.dark,.dark *){background-color:color-mix(in oklab,var(--color-nb-gray-900)30%,transparent)}}.data-\[state\=open\]\:dark\:text-white[data-state=open]:where(.dark,.dark *){color:var(--color-white)}}@font-face{font-family:Inter;font-style:normal;font-weight:100 900;font-display:swap;src:url(/__netbird__/assets/Inter-VariableFont_opsz_wght.ttf)format("truetype")}@font-face{font-family:Inter;font-style:italic;font-weight:100 900;font-display:swap;src:url(/__netbird__/assets/Inter-Italic-VariableFont_opsz_wght.ttf)format("truetype")}:root{--nb-bg:#18191d;--nb-card-bg:#1b1f22;--nb-border:#32363d80;--nb-text:#e4e7e9;--nb-text-muted:#a7b1b9cc;--nb-primary:#f68330;--nb-primary-hover:#e5722a;--nb-input-bg:#3f444b80;--nb-input-border:#3f444bcc;--nb-error-bg:#991b1b33;--nb-error-border:#991b1b80;--nb-error-text:#f87171}html{color-scheme:dark;background-color:var(--color-nb-gray)}html.dark,:root{color-scheme:dark}body{font-family:Inter,ui-sans-serif,system-ui,sans-serif,Apple Color Emoji,Segoe UI Emoji}h1{margin-block:calc(var(--spacing)*1);font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height));--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium);color:var(--color-gray-700)}h1:where(.dark,.dark *){color:var(--color-nb-gray-100)}h2{margin-block:calc(var(--spacing)*1);font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height));--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium);color:var(--color-gray-700)}h2:where(.dark,.dark *){color:var(--color-nb-gray-100)}p{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height));--tw-font-weight:var(--font-weight-light);font-weight:var(--font-weight-light);--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide);color:var(--color-gray-700)}p:where(.dark,.dark *){color:var(--color-zinc-50)}[placeholder]{text-overflow:ellipsis}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-pan-x{syntax:"*";inherits:false}@property --tw-pan-y{syntax:"*";inherits:false}@property --tw-pinch-zoom{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-divide-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-divide-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-gradient-position{syntax:"*";inherits:false}@property --tw-gradient-from{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-via{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-to{syntax:"";inherits:false;initial-value:#0000}@property --tw-gradient-stops{syntax:"*";inherits:false}@property --tw-gradient-via-stops{syntax:"*";inherits:false}@property --tw-gradient-from-position{syntax:"";inherits:false;initial-value:0%}@property --tw-gradient-via-position{syntax:"";inherits:false;initial-value:50%}@property --tw-gradient-to-position{syntax:"";inherits:false;initial-value:100%}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-ordinal{syntax:"*";inherits:false}@property --tw-slashed-zero{syntax:"*";inherits:false}@property --tw-numeric-figure{syntax:"*";inherits:false}@property --tw-numeric-spacing{syntax:"*";inherits:false}@property --tw-numeric-fraction{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}} diff --git a/proxy/web/dist/index.html b/proxy/web/dist/index.html new file mode 100644 index 000000000..ea253a77d --- /dev/null +++ b/proxy/web/dist/index.html @@ -0,0 +1,19 @@ + + + + + + + NetBird Service + + + + + + + +

    + + diff --git a/proxy/web/dist/robots.txt b/proxy/web/dist/robots.txt new file mode 100644 index 000000000..1f53798bb --- /dev/null +++ b/proxy/web/dist/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: / diff --git a/proxy/web/eslint.config.js b/proxy/web/eslint.config.js new file mode 100644 index 000000000..5e6b472f5 --- /dev/null +++ b/proxy/web/eslint.config.js @@ -0,0 +1,23 @@ +import js from '@eslint/js' +import globals from 'globals' +import reactHooks from 'eslint-plugin-react-hooks' +import reactRefresh from 'eslint-plugin-react-refresh' +import tseslint from 'typescript-eslint' +import { defineConfig, globalIgnores } from 'eslint/config' + +export default defineConfig([ + globalIgnores(['dist']), + { + files: ['**/*.{ts,tsx}'], + extends: [ + js.configs.recommended, + tseslint.configs.recommended, + reactHooks.configs.flat.recommended, + reactRefresh.configs.vite, + ], + languageOptions: { + ecmaVersion: 2020, + globals: globals.browser, + }, + }, +]) diff --git a/proxy/web/index.html b/proxy/web/index.html new file mode 100644 index 000000000..e41f24f38 --- /dev/null +++ b/proxy/web/index.html @@ -0,0 +1,18 @@ + + + + + + + NetBird Service + + + + + +
    + + + diff --git a/proxy/web/package-lock.json b/proxy/web/package-lock.json new file mode 100644 index 000000000..d16196d77 --- /dev/null +++ b/proxy/web/package-lock.json @@ -0,0 +1,3952 @@ +{ + "name": "web", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "web", + "version": "0.0.0", + "dependencies": { + "clsx": "^2.1.1", + "lucide-react": "^0.468.0", + "react": "^19.2.0", + "react-dom": "^19.2.0", + "tailwind-merge": "^2.6.0" + }, + "devDependencies": { + "@eslint/js": "^9.39.1", + "@tailwindcss/vite": "^4.1.18", + "@types/node": "^24.10.1", + "@types/react": "^19.2.5", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.1.1", + "eslint": "^9.39.1", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.4.24", + "globals": "^16.5.0", + "tailwindcss": "^4.1.18", + "tsx": "^4.21.0", + "typescript": "~5.9.3", + "typescript-eslint": "^8.46.4", + "vite": "^7.2.4" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.0.tgz", + "integrity": "sha512-vSH118/wwM/pLR38g/Sgk05sNtro6TlTJKuiMXDaZqPUfjTFcudpCOt00IhOfj+1BFAX+UFAlzCU+6WXr3GLFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", + "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", + "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", + "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", + "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", + "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", + "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", + "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", + "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", + "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", + "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", + "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", + "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", + "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", + "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", + "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", + "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", + "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", + "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", + "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", + "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", + "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", + "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", + "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", + "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", + "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", + "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.2", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.2.tgz", + "integrity": "sha512-izyXV/v+cHiRfozX62W9htOAvwMo4/bXKDrQ+vom1L1qRuexPock/7VZDAhnpHCLNejd3NJ6hiab+tO0D44Rgw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", + "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", + "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", + "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", + "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", + "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", + "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", + "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", + "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", + "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", + "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", + "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", + "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", + "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", + "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", + "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", + "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", + "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", + "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", + "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", + "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", + "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", + "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", + "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", + "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", + "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.18.tgz", + "integrity": "sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.4", + "enhanced-resolve": "^5.18.3", + "jiti": "^2.6.1", + "lightningcss": "1.30.2", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.18.tgz", + "integrity": "sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-x64": "4.1.18", + "@tailwindcss/oxide-freebsd-x64": "4.1.18", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.18", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.18", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-x64-musl": "4.1.18", + "@tailwindcss/oxide-wasm32-wasi": "4.1.18", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.18", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.18.tgz", + "integrity": "sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.18.tgz", + "integrity": "sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.18.tgz", + "integrity": "sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.18.tgz", + "integrity": "sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.18.tgz", + "integrity": "sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.18.tgz", + "integrity": "sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.18.tgz", + "integrity": "sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.18.tgz", + "integrity": "sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.18.tgz", + "integrity": "sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.18.tgz", + "integrity": "sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.0", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz", + "integrity": "sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.18.tgz", + "integrity": "sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/vite": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.1.18.tgz", + "integrity": "sha512-jVA+/UpKL1vRLg6Hkao5jldawNmRo7mQYrZtNHMIVpLfLhDml5nMRUo/8MwoX2vNXvnaXNNMedrMfMugAVX1nA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tailwindcss/node": "4.1.18", + "@tailwindcss/oxide": "4.1.18", + "tailwindcss": "4.1.18" + }, + "peerDependencies": { + "vite": "^5.2.0 || ^6 || ^7" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "24.10.10", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.10.tgz", + "integrity": "sha512-+0/4J266CBGPUq/ELg7QUHhN25WYjE0wYTPSQJn1xeu8DOlIOPxXxrNGiLmfAWl7HMMgWFWXpt9IDjMWrF5Iow==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/react": { + "version": "19.2.10", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.10.tgz", + "integrity": "sha512-WPigyYuGhgZ/cTPRXB2EwUw+XvsRA3GqHlsP4qteqrnnjDrApbS7MxcGr/hke5iUoeB7E/gQtrs9I37zAJ0Vjw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.54.0.tgz", + "integrity": "sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.54.0", + "@typescript-eslint/type-utils": "8.54.0", + "@typescript-eslint/utils": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.54.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.54.0.tgz", + "integrity": "sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@typescript-eslint/scope-manager": "8.54.0", + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.54.0.tgz", + "integrity": "sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.54.0", + "@typescript-eslint/types": "^8.54.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.54.0.tgz", + "integrity": "sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.54.0.tgz", + "integrity": "sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.54.0.tgz", + "integrity": "sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0", + "@typescript-eslint/utils": "8.54.0", + "debug": "^4.4.3", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.54.0.tgz", + "integrity": "sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.54.0.tgz", + "integrity": "sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.54.0", + "@typescript-eslint/tsconfig-utils": "8.54.0", + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0", + "debug": "^4.4.3", + "minimatch": "^9.0.5", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.54.0.tgz", + "integrity": "sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.54.0", + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.54.0.tgz", + "integrity": "sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.54.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.3.tgz", + "integrity": "sha512-NVUnA6gQCl8jfoYqKqQU5Clv0aPw14KkZYCsX6T9Lfu9slI0LOU10OTwFHS/WmptsMMpshNd/1tuWsHQ2Uk+cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.29.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-rc.2", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.18.0" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.19", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.19.tgz", + "integrity": "sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001767", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001767.tgz", + "integrity": "sha512-34+zUAMhSH+r+9eKmYG+k2Rpt8XttfE4yXAjoZvkAPs15xcYQhyBYdalJ65BzivAvGRMViEjy6oKr/S91loekQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.286", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.286.tgz", + "integrity": "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==", + "dev": true, + "license": "ISC" + }, + "node_modules/enhanced-resolve": { + "version": "5.18.4", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.4.tgz", + "integrity": "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/esbuild": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", + "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.2", + "@esbuild/android-arm": "0.27.2", + "@esbuild/android-arm64": "0.27.2", + "@esbuild/android-x64": "0.27.2", + "@esbuild/darwin-arm64": "0.27.2", + "@esbuild/darwin-x64": "0.27.2", + "@esbuild/freebsd-arm64": "0.27.2", + "@esbuild/freebsd-x64": "0.27.2", + "@esbuild/linux-arm": "0.27.2", + "@esbuild/linux-arm64": "0.27.2", + "@esbuild/linux-ia32": "0.27.2", + "@esbuild/linux-loong64": "0.27.2", + "@esbuild/linux-mips64el": "0.27.2", + "@esbuild/linux-ppc64": "0.27.2", + "@esbuild/linux-riscv64": "0.27.2", + "@esbuild/linux-s390x": "0.27.2", + "@esbuild/linux-x64": "0.27.2", + "@esbuild/netbsd-arm64": "0.27.2", + "@esbuild/netbsd-x64": "0.27.2", + "@esbuild/openbsd-arm64": "0.27.2", + "@esbuild/openbsd-x64": "0.27.2", + "@esbuild/openharmony-arm64": "0.27.2", + "@esbuild/sunos-x64": "0.27.2", + "@esbuild/win32-arm64": "0.27.2", + "@esbuild/win32-ia32": "0.27.2", + "@esbuild/win32-x64": "0.27.2" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz", + "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.24.4", + "@babel/parser": "^7.24.4", + "hermes-parser": "^0.25.1", + "zod": "^3.25.0 || ^4.0.0", + "zod-validation-error": "^3.5.0 || ^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.4.26", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz", + "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "eslint": ">=8.40" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-tsconfig": { + "version": "4.13.1", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.1.tgz", + "integrity": "sha512-EoY1N2xCn44xU6750Sx7OjOIT59FkmstNc3X6y5xpz7D5cBtZRe/3pSlTkDJgqsOk3WwZPkWfonhhUJfttQo3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz", + "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/hermes-estree": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", + "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", + "dev": true, + "license": "MIT" + }, + "node_modules/hermes-parser": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", + "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "hermes-estree": "0.25.1" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.2.tgz", + "integrity": "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.30.2", + "lightningcss-darwin-arm64": "1.30.2", + "lightningcss-darwin-x64": "1.30.2", + "lightningcss-freebsd-x64": "1.30.2", + "lightningcss-linux-arm-gnueabihf": "1.30.2", + "lightningcss-linux-arm64-gnu": "1.30.2", + "lightningcss-linux-arm64-musl": "1.30.2", + "lightningcss-linux-x64-gnu": "1.30.2", + "lightningcss-linux-x64-musl": "1.30.2", + "lightningcss-win32-arm64-msvc": "1.30.2", + "lightningcss-win32-x64-msvc": "1.30.2" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.30.2.tgz", + "integrity": "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.2.tgz", + "integrity": "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.2.tgz", + "integrity": "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.2.tgz", + "integrity": "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.2.tgz", + "integrity": "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.2.tgz", + "integrity": "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.2.tgz", + "integrity": "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.2.tgz", + "integrity": "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.2.tgz", + "integrity": "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.2.tgz", + "integrity": "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.2.tgz", + "integrity": "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.468.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.468.0.tgz", + "integrity": "sha512-6koYRhnM2N0GGZIdXzSeiNwguv1gt/FAjZOiPl76roBi3xKEXa4WmfpxgQwTTL4KipXjefrnf3oV4IsYhi4JFA==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-refresh": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz", + "integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/rollup": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", + "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tailwind-merge": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.6.1.tgz", + "integrity": "sha512-Oo6tHdpZsGpkKG88HJ8RR1rg/RdnEkQEfMoEk2x1XRI3F1AxeU+ijRXpiVUF4UbLfcxxRGw6TbUINKYdWVsQTQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz", + "integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/ts-api-utils": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz", + "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/tsx": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.54.0.tgz", + "integrity": "sha512-CKsJ+g53QpsNPqbzUsfKVgd3Lny4yKZ1pP4qN3jdMOg/sisIDLGyDMezycquXLE5JsEU0wp3dGNdzig0/fmSVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.54.0", + "@typescript-eslint/parser": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0", + "@typescript-eslint/utils": "8.54.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "dev": true, + "license": "MIT" + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "dev": true, + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-validation-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz", + "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "zod": "^3.25.0 || ^4.0.0" + } + } + } +} diff --git a/proxy/web/package.json b/proxy/web/package.json new file mode 100644 index 000000000..97ec1ec0d --- /dev/null +++ b/proxy/web/package.json @@ -0,0 +1,36 @@ +{ + "name": "web", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "lint": "eslint .", + "preview": "vite preview" + }, + "dependencies": { + "clsx": "^2.1.1", + "lucide-react": "^0.468.0", + "react": "^19.2.0", + "react-dom": "^19.2.0", + "tailwind-merge": "^2.6.0" + }, + "devDependencies": { + "@eslint/js": "^9.39.1", + "@tailwindcss/vite": "^4.1.18", + "@types/node": "^24.10.1", + "@types/react": "^19.2.5", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.1.1", + "eslint": "^9.39.1", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.4.24", + "globals": "^16.5.0", + "tailwindcss": "^4.1.18", + "tsx": "^4.21.0", + "typescript": "~5.9.3", + "typescript-eslint": "^8.46.4", + "vite": "^7.2.4" + } +} diff --git a/proxy/web/public/robots.txt b/proxy/web/public/robots.txt new file mode 100644 index 000000000..1f53798bb --- /dev/null +++ b/proxy/web/public/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: / diff --git a/proxy/web/src/App.tsx b/proxy/web/src/App.tsx new file mode 100644 index 000000000..ab453aa3e --- /dev/null +++ b/proxy/web/src/App.tsx @@ -0,0 +1,227 @@ +import { useState, useRef, useEffect } from "react"; +import {Loader2, Lock, Binary, LogIn} from "lucide-react"; +import { getData, type Data } from "@/data"; +import Button from "@/components/Button"; +import { Input } from "@/components/Input"; +import PinCodeInput, { type PinCodeInputRef } from "@/components/PinCodeInput"; +import { SegmentedTabs } from "@/components/SegmentedTabs"; +import { PoweredByNetBird } from "@/components/PoweredByNetBird"; +import { Card } from "@/components/Card"; +import { Title } from "@/components/Title"; +import { Description } from "@/components/Description"; +import { Separator } from "@/components/Separator"; +import { ErrorMessage } from "@/components/ErrorMessage"; +import { Label } from "@/components/Label"; + +const data = getData(); + +// For testing, show all methods if none are configured +const methods: NonNullable = + data.methods && Object.keys(data.methods).length > 0 + ? data.methods + : { password:"password", pin: "pin", oidc: "/auth/oidc" }; + +function App() { + useEffect(() => { + document.title = "Authentication Required - NetBird Service"; + }, []); + + const [error, setError] = useState(null); + const [submitting, setSubmitting] = useState(null); + const [pin, setPin] = useState(""); + const [password, setPassword] = useState(""); + const passwordRef = useRef(null); + const pinRef = useRef(null); + const [activeTab, setActiveTab] = useState<"password" | "pin">( + methods.password ? "password" : "pin" + ); + + const handleAuthError = (method: "password" | "pin", message: string) => { + setError(message); + setSubmitting(null); + if (method === "password") { + setPassword(""); + setTimeout(() => passwordRef.current?.focus(), 200); + } else { + setPin(""); + setTimeout(() => pinRef.current?.focus(), 200); + } + }; + + const submitCredentials = (method: "password" | "pin", value: string) => { + setError(null); + setSubmitting(method); + + const formData = new FormData(); + if (method === "password") { + formData.append(methods.password!, value); + } else { + formData.append(methods.pin!, value); + } + + fetch(globalThis.location.href, { + method: "POST", + body: formData, + redirect: "manual", + }) + .then((res) => { + if (res.type === "opaqueredirect" || res.status === 0) { + setSubmitting("redirect"); + globalThis.location.reload(); + } else { + handleAuthError(method, "Authentication failed. Please try again."); + } + }) + .catch(() => { + handleAuthError(method, "An error occurred. Please try again."); + }); + }; + + const handlePinChange = (value: string) => { + setPin(value); + if (value.length === 6) { + submitCredentials("pin", value); + } + }; + + const isPinComplete = pin.length === 6; + const isPasswordEntered = password.length > 0; + const isButtonDisabled = submitting !== null || + (activeTab === "password" && !isPasswordEntered) || + (activeTab === "pin" && !isPinComplete); + + const hasCredentialAuth = methods.password || methods.pin; + const hasBothCredentials = methods.password && methods.pin; + const buttonLabel = activeTab === "password" ? "Sign in" : "Submit"; + + if (submitting === "redirect") { + return ( +
    + + Authenticated + Loading service... +
    + +
    +
    + +
    + ); + } + + return ( +
    + + Authentication Required + + The service you are trying to access is protected. Please authenticate to continue. + + +
    + {error && } + + {/* SSO Button */} + {methods.oidc && ( + + )} + + {/* Separator */} + {methods.oidc && hasCredentialAuth && } + + {/* Credential Authentication */} + {hasCredentialAuth && ( +
    { + e.preventDefault(); + submitCredentials(activeTab, activeTab === "password" ? password : pin); + }}> + {hasBothCredentials && ( + { + setActiveTab(v as "password" | "pin"); + setTimeout(() => { + if (v === "password") { + passwordRef.current?.focus(); + } else { + pinRef.current?.focus(); + } + }, 0); + }} + > + + + + Password + + + + PIN + + + + )} + +
    + {methods.password && (activeTab === "password" || !methods.pin) && ( + <> + {!hasBothCredentials && } + setPassword(e.target.value)} + /> + + )} + {methods.pin && (activeTab === "pin" || !methods.password) && ( + <> + {!hasBothCredentials && } + + + )} +
    + + +
    + )} +
    +
    + + +
    + ); +} + +export default App; diff --git a/proxy/web/src/ErrorPage.tsx b/proxy/web/src/ErrorPage.tsx new file mode 100644 index 000000000..c3120d9a1 --- /dev/null +++ b/proxy/web/src/ErrorPage.tsx @@ -0,0 +1,73 @@ +import { useEffect, useState } from "react"; +import {BookText, RotateCw, Globe, UserIcon, WaypointsIcon} from "lucide-react"; +import { Title } from "@/components/Title"; +import { Description } from "@/components/Description"; +import Button from "@/components/Button"; +import { PoweredByNetBird } from "@/components/PoweredByNetBird"; +import { StatusCard } from "@/components/StatusCard"; +import type { ErrorData } from "@/data"; + +export function ErrorPage({ code, title, message, proxy = true, destination = true, requestId, simple = false, retryUrl }: Readonly) { + useEffect(() => { + document.title = `${title} - NetBird Service`; + }, [title]); + + const [timestamp] = useState(() => new Date().toISOString()); + + return ( +
    + {/* Error Code */} +
    + Error {code} +
    + + {/* Title */} + {title} + + {/* Description */} + {message} + + {/* Status Cards - hidden in simple mode */} + {!simple && ( +
    + + + +
    + )} + + {/* Buttons */} +
    + + +
    + + {/* Request Info */} +
    +
    + REQUEST-ID: {requestId} +
    +
    + TIMESTAMP: {timestamp} +
    +
    + + +
    + ); +} diff --git a/proxy/web/src/assets/favicon.ico b/proxy/web/src/assets/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..50bb809661773efc9811f9e092610530bc1edc9c GIT binary patch literal 15086 zcmeI3dyHIF9mmhI(DG=ztAgSKb`AJ|5PD}!G*Pk0!vsPUNKj&+yRB`hEtD4eu++M< zJ4n%Jjgb1@&Yj2J z_{U80-QRtjbAI37?>z1~zjM}@KGSa;#~_Ezx8GsRdyO%}!@>QaF@NFNAX4wwpK8o~ zBzQMz$YMr;o_oIwebM{d^FYr7JrDf9d%($-E(IIGhAv=ZYMXmYdZe&4DZ8Dln}+ux zQ0^GJ+;VD7aT@-s65~I1xDVcAV74RvpLNUWwZ(FJWkEQQ8ZDfcmb&yVuzq<_XPqoB<4DO$<@w?^j z%aOuk&YHsFWA@a|MIU+pOKgiS@*KkR`&?~FS>2SX+NZ4ZdY(VU%DX){?48$ z&5K6quT#OCrZXGxfy4aHxwn*~+_M+j26TK3{L^86kY3XT|5Jv;_nI;_uHCti{3-t$ zkO$tJI(0YgKV};VO?NA8hwq)kI1rTid*nM8!Uom@AExX-Tkf9JTp@gp7TYfRk=l;s zFS*y5INsk)8{qyv$~{=a-PXYl>Y&|zyqdPNyN0{?9|o6oi$7((2mBgT`>1lQZ$R8R zPhDHne4|0R-{bj`PHn*N7g5g78tAC#X8cIw%g{Dn$9XQzs$kp+_lv%dH+RCH{1<}f zn{v19NOOorTtl9FzOVG#iZ3em{1n~iwqt`c;SQ1a*|^-jbQ(|B#;gO4nXE0Fl>cAe z?`V}jZRSnnQ~PUH{=D}ZOPbDw=)fs0XW%suW`i?PJd<}{04F=)``}i_ zV8u^=4!Xr(<%~O-6LYaU?ym-0!K2_u;E&)HPylLA8iT9HB3aAKkLyG4v)EyPG(^2=42_-@9bPB;_O)yYuUzcnUf{02D{{ zV;7y(#}6oH!PlDAZOOAE>n=s_N5K=|Pe8V)Pqo^%G@jm5Uw%^etglW(|M}o5K>sS; z)gtaD{VhrH?dj=xpyz>}2kJdg_I}2+ER7i`m&ungiv!v3$(*sW&%4hXE73vvN$CR9 z#*77Wh2)GeRu1WY))*@Xg7=;z-(uZUPQPS6kU7pd={^%k8OlAyc#`5#Xbjm=WDMCo znU@?gCRZ(4Wn?&WWtgGl%n%k*M7fM*p3Dbwl7m%xJ`+eTz5AA=&S?#U%fZ|^twC;j zzn%voJiy%H46qG63>2Sej!fxc*5$XSKR&f6qUhMK(5ZEk)79M5QMB@qc(pQ#;utC4>8 z#Tv!t4-m8O$~hc#Yi~g7_L_E}7@^`m4P&|^1xDn~f6l~|)<%1(^`}15M7JMLK73y2LUe|%x<9JT z@f{}q7G}GB7J3c=or<%63A_eu+DMtQdl}*&yS4Wcty|^EwoTPOAWp9DTox$q6RET2 zz2+U8inBVysj;EvU358rAT9q{;s3ba4I;nub!BT_bQnC%KHqj?@QM?+qF-tEI$3`o zpKHZ)6~p{8(Agi=(E*@u&Oz`SpfO0XbMlo&!KJ`E(}P?GTH!DK{{kPhKCm)~37<(l z4sy*L%zY#B4WwOy4BaQgjzzpzjO++##SgNz<9&2-4|rcqc^b2h#pI#*04zkMew1H}tk!2K5{Jo6xR~UcP7#5LBr7%@8e_Awn`k7-rUk!Y~5i zmkc47oIrRYVEZ(j2<>$i(zl<^Vr|Hmt1{#pEeoEeSuApK5z3=ipSUC5b+qkK=VXv!f|x}393 z$HL};%u}CQ{*~hm{~re3XM_FB7iQ<>*F0jUJI&m6Wf%wYRn9`N{~Y+upNDBKfFFZ> z>0gret<`04zXW^@%*}DUwWOU(Nq6P^iRNItZGNuRX6^@?(^u+F^8lU651;@1m9)!M z267zKe<$@V|2J!^msl$uZ7_fNI%(8qwHE#axC3kln}N<7JInTv-=3i@1 z@g{3WBS{*yiEF_(f!ej=AHqxEfkyf2pOaSmy6RW*m&IB2=~}(_W1z<5S=r}T5N93o ZF-=T$666(A1sU~VzAEK$kOO5O{{yZNCTIWv literal 0 HcmV?d00001 diff --git a/proxy/web/src/assets/fonts/Inter-Italic-VariableFont_opsz,wght.ttf b/proxy/web/src/assets/fonts/Inter-Italic-VariableFont_opsz,wght.ttf new file mode 100644 index 0000000000000000000000000000000000000000..43ed4f5ee6cb01173b448af26edb9d7459f9d365 GIT binary patch literal 904532 zcmd>{cU)9Q|L?!2?AZb=#ia_kq9S(1uBh01H)>3x(O47PV~M7lX8MzuW@3yn8jU8# zSYqrQ3o0lAvUJ#`vvgS6?wBZ>?RoWbJb4!?zLtNyJy4;^eoFh#$dAoGg<& zTymK}Z;>nggOtqI;ve%PM0v^Xg$~)+;A%Wuy>{f?h!R4s2q11pMU3Lroi3R;A&fm{L;~u z0%*!bRD^J!kb+3jTa1zOX$Ny@6CXn#^53CXNFm^Mp2RHTWiR?(b|eciW9HAq$4%Cu35YOMbaXSp$CYtY1#4(gx*FuNPy)) zffYuU7@2#SsZpBH_)k9Lm$+n^K+pOcIxM#M>Ofoa=9T+o#n^M z2wOud^jW`Q-5M4GcRgHwqRA{`f+MRRX8`J1jX z*&O~T1pUUb6d+m8i1HP7kDS_c=bk4ikUos-zK7Wbhf=cs&=E!qbtRs)CpVNFZOGyJ ziVX`_qFs0jq3$$@#?TC!PtVfx^alNtcF<4sJ6)kzN})WYh5B*B9IfnV0WOk_J4ZNi z!yP#^g^wWEt22Bg@^02gk>5joG)l?BaR)bs^_o*3OXxMHK92Po%8z<-<5{oq=oRO+ z2^@OPsZV4*=hP>)h%;HxKMbGZuoTu}NCt76{zVEBOJ7vO`-xKP)H%{9MW@b_t5oOI z1?n!{a_S=aNynVJg8E4>J9P=+i=DcXW=S4ST}3L1^##jIO?pLvQ+I)W)2X|*(A~HY zalbQMOZ~*PPTik;#Cc6RD&Z%NaOxbniv66r4srBO-J1eMXI(fS>MW!5H!>I>A@9xxFQ-Gjz>VXu%H#qem^5UI!;eyG7H#o!FApE*h*Hc%%Ij^?l z$G_nW51|nLX{X+fy7S>qy*&l;K2E&@g>vOiJ(Rq-45!`^`W>epM&Vp@`hCfVd&?Q# zk3zUbPQ5?*aTA>SfJc3bc_4u6=?ou4!JL;cgRb?$k#>|In$Agx*}vQP7?C!;OZ%!5L=^^p#G1EXL7Xr#_A}G{vcp z$9Rl%>Ju=3_HpVHF*e&f^+`x4(5X+x*wi@nDbR>UAdiLg3_VAqX*2R(2j2)kt!hJPbMXFy*Djis`VCLwegEk`S3sNFiW*g}V>ef_8J*`KJvUXqV zXpNERw^h(te{Mj?2KsZzQiLo-j7KSqMZ8T8|8i*Sacv{g8{_ED4b%;3tZ}5i0XeNh zX_nAq@UL~0tDB=d>!ItB!XMLN91ngInOKc&bhIn08PnIG9hRVV z{&gydn#agr}B{m+`;YufrOjgg;j@E2h2R(&4O2uqt71CT!|1PITEz3sy zGOz)6%t0>ABmK{#lC?K0#dzf1WWO!PB%2#Hptsgzz0m{znRT(AW4*%SH;r5t!tAO$ zLf0YJrV+9p(*drJj1BWDI?alKnAXHX>MQ7GgEkWTOkwkF392j+2G(je#Xj zcf{O)HqoPe|2r-8=ewAdtpeR4m!O?zIC^>&EP|D4IBL<Ej*c zYObSVA}GkLKq;w`w1hwkQ7gqQOo}>qh+? z|NG#H@C>dj!vB`#Xqj5eyJw((CxgK)V*fGkmUli%k?R%*}C<3O0g34L%o<4 zvi^G%Ur*BzcMaCeOHe*G=MF~;GaF-e%UU|@U;49Y3@t~zMX2vHh`9>=)7<_&kaP36 z>4cT4;8;!LK`rbDd04%E1%APmU$LU&I6v-v$Pc-%A-~}akSSa}WFwEe`7ZoY$mRT7 zkX!k!kniyaAP@1sLLTLhL7wEVLEaDs5+_6mV<5)~vmqZB=0QFwJPA2p*a*2vxC(hq z$bifi@*xX_LdasF7_vfCV128^3YQaIMK?%y(F@XB^n(l#yFhjmyFvC4dqegWr$9~< zr$NpTXF@(EB42T?xBzmYxEb6n}*LS-b*yUAzf-OS}siC&od>iaDjAFctCn7P(p>T!WYtC5eONq&_jkP!XP^< zIzx6>bcgJz=mj}IF#vLqVi4q5#W={x3e-aJs^WFXcNFhJ)+*|VlXwaH3`vsOLbjJe zA;TrqTIw$igB&i+fP74v1NjVg44kw=dKPk(vk_PO5Z_#FMSWWSNZ|+ zC+R21!_r~MW76-C=cIFxH>JBINZC>@amv2RzK~Ot%ORgtJ_or@xejuJ5@!?250v{M z4=4{p9#I~FJf=Jb8Ldo$G$^r(SLUcj6Q>%dLXN7Fs#B0>RhMv1a8(6sQQcDAf=pJy z22=$q>;P1isw(I;DzvkzUeyR`!?uc3^J?sR)UImme$^gow7J?x?E~qr_J?e(Mw_dH z)xnS<>JZ3KHQGuYrVfMbtVa8*`>6Xsj#E#BoUMjUs-ILp4Y^3Y2=ZAqY+L=28ttRr zr-p^7e^O&~sE?^pBlYj<3y@dT*~Do&YbKGPnW~u$xm0r+@{Z;Xq(PI6s~MUM;x#3j z5`>g$N)b|~DTCgi!CptxsId|U!-0jns9j*;E}kx)keysQId-0GU&+o9*dE_oik5Ck zx1~E$4BG4*@>-@@u34dZR`Z-@rDm(0XVa0A3)U3 z3a)?y;8oCvDC9Iz+bZw{ksfC&dYrVi=|~iO0&K&f1Wp?y#Fs|kROKhk7bq9@W$awU z6K4i3PYt@s`{e%g1jWnH5QAwE@;WM&U!*hgQ2BXi|A2M|XA1@jCW9O!M{+0SujJR{ zLU|%xmb2w&a35iBL~?tka5!bKmpOJ7Bpc;d$)7UmYkQsi3U!jdGh%ZALPN1ny2+=k+4>sW`9 zNLxoj31=b09LfWFCsOW@IQQx8k@5h!8+M6r^_2(90@c&^ljRZi46Z%B-&r1Mzsb#_ zgM;Le@<7^6?@e$h-!o;b97-qY?Y0i(EWI5hk3+~QdMijCFZaP=%$x4=M0*tX7=7IZ z$~Epa+7=>Dk~L`M-5upwvP4<*i60atr=fS*QlMQ`ckEwY;OX-^;dU0Fc|^l1ku61w31 zKzYBsAN~C$Ohk@CSN$|iPLk=E)1cg=9dh1n(%`-Uv^$ty zlr!lodE;*Cjptko>qNJG&dqwEuRR-2|aI7B@?vG_7hB5{?5W(;bzX4GxLNBIXyrF4&FU>rkzb(K>_geaSw{Aa9U|(ZPX(<;}2$ z?=~pl4xs3jR(fClg|a`pg#9@?y=*#1V`q<}xUdgYD)~9O^@(DZ{2b@?iD!4Y12<=z z$y*-6J-#j3TYiiiv90G&*^?XgOqY((Y4BLe>wBS}ypeLh7dpwy=-QqN zUVehs>@)S1*VFgESbX8^Va_HxdbFsDIme2s?3LVz6M<1^;>6Sb7v$A+@&ftDld1G_ ziV42c(UnSh1n~yU*c7O!MTc2} zBg}Y{AmBeHeDulFm{)c~&jDL7csJ4(481M%ru+&DyIo#^%AJrWVh%r#$!8?p#C(Nb z#`M*n8e|DWyT3i!9?k8rFSRe_zOgT{FX67*huDYE1oXTD7J#Xq$)OI!MC`(;flirKNfV;8#j!*Q#E5+FI~*R3{6{NcLIPI3E= zH*$1!|K)}%syn#d*M;6W_>1pI`tE3qtfJixnR(`;&hGT)_52)}KECm7u9a@wI3lPi z1yV)7-MDObqjzs4X}i$-(Vtf5P(t+A_1&lnvJIz(BsvzoD^*F?ASL=V`nG#V`t??H zqlUKK-ddAR$8Y~uucVJ+zYLmBAKv@A=qvgvE=FigpWja{FQG5u-x3Pw>-cvI6X~4? zZ{>VXpBPTny2+~z#;`z2PyN6aK#8fJS{vzD>W(5m+Ml`y@@(oJgv6!pQ^eC7X+IQ& zQcT)GEihIj4U=hg)s|{XXs}mT(P`^3PEAMc z=g@jP)c5l5&x`Mg|UVDoyOR(F*^&xykRR~f(aHHi-<_UZ67=1rF{YsJqm+GhdwzOWm%JyYt1x9xOgAPrxQFeD+2|g& z|G?v|-1Z+_Z|}DM;3j*g{pZ%(JM2HVSq|NQbDSl3|Lv9Cy6=yRsO|jY!40-fKOUcv z>h&XUL2z={qgSdPCb5PR}bubMyEb-a;dfTfm3s=?S8^-oqvj%7INX|>CuFyDtuDfAn5Qw}@lLpwNQjJ2%SrjvqKRAPiz) zCnmZb|7Bj#o#Q7*D*GM3I)dwY{OU-En3rh%@ygN@ce=R^K5?&8+u{?c z{l%OU#?BS|iOQj6BTv*0EggBHabk)6gk3rs`TGyEI%od=;|uu#zh4jY>HGVQQ18LN z=Zq?`{%#pmJnH0)88xmaV`hfzIhj6E=ymFRh<3~=7IrFrl=r<;hAE|Wr&7G!dYnq{ zuhE~%=&#nF${A&DI8`*Ga-G+talJ z6g|&;^`dY5nVa3cx}CY(S=;N3VM>YROvbUW@H0ibAcD8{hIm(>#rM~u}^JQ)8x}UEf%7tBs>aOj4A*z?Q+l8B> z^5Z_tT?Se6!&%2Q4@9K4-aF&#Rp`?vH@Iq-D8^kO{{>9&>tJ+*V`9ed` z#e{y^+>6D*r7jmsTEkm1jW4)Z@(iDUk?U{mdeJS{=5Z;;uIPIyWtP!=39XiSDXX6< z^OAXdR^LnJ30ZwEl?K)aUb2K2PrCg380o>~><*3om$T;x*_X5D2|1Tdfz}R}3+JhG zE|&+_`d==eDd$|S=vW~dYZ^3Ip7qp}BH76;XbUHS5b!68>tW@Q&$DHtfU zyHYriZ+FEUU~O}yrhQeXEA>n?w6E-RMV2}4YW&DX&#NheJW{W+?!H>kS8ji`u)V$N zYHh2crYc-uMq1S4ex2|`M=QV3PbLcg>hI6^5jj*^}zuu1zxqiJr-}d^= zk!|l>PwcKKyeZN6cgS8uxk18IAs zv~?xKxO&^ooE|FE&DQd$wZCX&99Bq2K)(~B2 zZI}{W*r6dPy12a}C%QbiJSe&%xGX5Tc5Fr9t@t5sez%g^XgqIadDMs9%IPXq-zwKt zi?@{3y3*S*%iYD>@nPC_x8nzRW!}#7wU^&6Y^@2rZ5@_9^tM|^p~IbDRw}#Qx$4!c z_nljzAsz0-trWW5N$_oGdndDxBKJ$GjBcfHVi9w zjcFKOu8CLW^A znSeH=#3h#5Mr@BWW#M;EjIIp5dtshUe>b^bdBEMww5pDG^SxC5cMAqu^>>T)nxea9 zqf6CyYkKB*+^rp5t-fm?TBN?~>SAes?{p6-?%ur-)m`ox^sb}sC2#Z(x|h2UqGVV; zf3IXjsdTSoWU1m_$*5BCUJdTOS2MO+dC!Uj?pcSWLW?X?-g8w}cDTo^B<`#J9S6mI z_nfzD+`&mzZQ@S8Q0*I+(WfdX4wj>eGj%TTiZgX7@Qf?e+biOVrxm%zm9$0_7?LW^ zIygxcXX{?1ifbF4;d6iQGEcYrsSC;j?x((5?s`9Mh}r9Yp%7Ygzpz(m-u>c$TE+d+ zz$%~nwIi$4@h4xX^o~C@L@0>A-%B>eC-&r9$EQY^z2dV56??}U2Nd|km$XI{X1nps zcHPWRxYgy>1`^$Bs25D(gv`X^)#EcQ;w?#HPUvPTwqCYYEvr(eEL zLe9VTI{V)Que-9%<=fSnH9+eNSP4K9Bz)bXkVMu}dgVZ(ws$g8jJ@!Pc|6MW5NwaO@zi^Y|RUXf)TZDhT>_t8bhg~x~-vX zxKeK@n_#pX$|f=~$!Imyw=%UdG>p&I7__E*hLHtFS$l?&+_MQQ7dV{L8LjI;fEWMo`jVROld zT4wdkxc+QiZN`nMVo}D8X<}gpv*!$E&lz#gTB|Z1bT9DBNEw}`%t#wj?3s}_B+osg z)U(<EYfA>Pb}1B76<0} zXO?)@cxRThl6^AkCg!of2OuJ>&7yOn3WKT zJ#xLXl6&RqvXa?_*8lvvLO)w#v%uUC=r!Z!9k456<<>${(7e%_;~g^vf#H=k?1f9FIH- zryvhA3o8jKfS8?Z&#LQNsLb-nKonmKUz)|nR@P`u%A%|>A7*)EpL?>^&Fss|tqs|i zS6J(_Z@yMvoy`iKo!q*{JKHok*F8J8M{Zy?D@Jxc6GcHex@@y|Zcw(_H@9`R*+17m zyQEcao9t>e!m8cit?q8tW!H|uy>)Vq&Uoh8x^m;0RdrRy^FcLMG__)*9Oz%|1E)#?tP{vQo;?8|x%^8(i}>#zxmXjnT^Ef|#4%!>D@l z&P}6Raejd5#9CXW>BM?ljVXF%on(qxUZgU`FfDdvtz?QD#2HP==n7Nvz&t-wT5zSO zDWiR^n<-15=Vi)jhYO~UYHbQO$*LU87RsEM71dHs>X;NoPU_ec zF(+eiEuWL;ksFv(F(KQUQ=`rf%#jt@9y#i{H*VyF6`mZQdwpJ(S8iIZ$tAbcJx9zf z^~zD^R<|?ga=D5e-(0_T3A$W;x%BqVol*Q)@j6(jtpa7i9I6a|=wZ()$;f zeAD|CHjlN%ke! z%+qv6bII`$a_^F~u8k!n<-P`MNx7e)u|%jfxRhSqR9IJfb#q~DX>4aNvDD;|Qd?>Y zGIT7>afMc?eo$MM?q_XPmKBoPp)A|?LAUY;y>qL|Gg}u3Au_C2eUW|mY{XX+B`tMgmeYO9OfD%`3|T<%v@ zv)ig+2hiN&>Z>bNxKOJSE33JN%FflQ+62#o@x!2_TLa^Iv*c~-BWiBo? zH8n0SmMG1bFiWf@0zx+?++y^)Z?+h{@0VF}ePW9(`Mw3RrO4Y*W-+5rEZCLnAaoW> zx%O_Er9vB9W~m-}w%CHsGXjmqVnLr92_T6DNtdt+9;wKlbFwM%VL zK)Fk8ndV-7ZG~HrTx%<86;i9rb5Yf$wk>R|D{=KGt*fe1y47*Dr9pKrm2I^3_j^dL z^|g(S?dn}BgWVd~=C+}REAww)^=+uv2o()%n+p+8-yrC3yEd?GZsXC>oT~BqSgvQ| zjWvPAjWOXiFYECo?w-~gYg!jsvjd__tZWZ!t?hV6VdZ+dCtHPt;Cd?#OY5x~`KH?H zQW_X+JHABgVLLlSR@&|jlY7`Q^;#dBIpDU*RxaHvv(+2?ax$mt!do>d^zR`Jh%a~tH_Qn$TO1sT&3AS@g z@b;QuIlUv!8OzZUa>ei}QLb)vJzuU4vMJ^Idd^L*cjM~ihRWWha$||MLYCdR3K@Og zMizV&jk2;qZs2|$+JWN=f^L|&619s5SE?=LxpEhcCs!Vzv2itAl^<6lR<-6V6&ej^ zsnn=BOO-~&S*kTk&Z0)-TA!;Xt|7mJAJ@>(tC)!ruCWeL8m$_UvlXiSIGa^~Cf2v+ zL|a*FPFw1t zG`aDOb!sKwXjKcmtx)9)q4wi#jqOF=W^2dub{j5eOEs-{e=l_zA8bes<-?6}UP9E^ zD>=fA$4>t&qy+jF3Tf65UZ~4gss)ycVB>`VW^Kzsgz#c@iON;1ml~_Z`W}~TqEKg3h*AxQPlcLPO0j#D zwNde4L0!EfX+cAcg59R5E>XHD#D+$tl=_6lB5}L|ouCjJlpHVGRlhz~5};~O@b#)* zWf~640TZt-YIinCU8~ObP}gem-PH>J+8WK?Crevt_AV&((d;*dhiQyDa}>r;7$!oE z1wx0hC$~GpX*g?+heoNaFL8`EjV9P))VL|D8#Owt*EN_iEgGF$WtAqF4ao5Qu{Sm0 z;n4;x;=|1@ag)x*yWC$JQ|FTTba|OeL!H*@;^J9TaE8I#a+%9*k9Cf=|%N~$gzRgLywr`sUMeY|SRR_2$YZ@xu!?cYB9_I$zNZ_qR~THQ(pnmQ0bv_Gv|BsZ<|o)d1^>q0fbQC86R4&@>F#Bt-+JU^9*aS-18g| z5?|o8Z?VzEYu^&1#w$LwPT8v7Pnp$x_or;9#S% zO?r5dv5k9{pHiP5UYMF}*BnjG0h7i!nmSp0^saF2Y=1;^J=Esf)0fEre-2tRX+tzo9a`q@f|) z3`(^&-+V3aSKyWG(n^Pi0iFb;!`3j^VlanW48{m^lo20CVmY!8aO35}&LfLA@4Kog zIbsYCJ~epUxKkU)jXPrq)|@epi8y<9^yu>gZJo~#vUR;AT;ZY&8e%n#vP5X2KkVN= z_HOv#yVbpX5+qw?;+tXJlF~bMGwcoPYcPlD3_4?MT8hq|btlUs>yCa_R+dI@EX)Zu z)ddCReI5{)mla^mE6wyMd{eigup%_SxVV$IITl065~efP=`@vUZ7txc`=6<-DUkCp zP}stQ8a|iiHspEg8}mHc*lbm8Y&uNg6+GEdKD*k7qr{|Al6A=>73#>WcJK0C=D~4 z4Rpke*{!m00?RD8{#Fkk>*+b!6pzn97{g-89A>2QNMa^PurX@DC!i6Ng5e)ywkS?_ z6D1p2S!EnwTwvDbu<+RZ!DH@sxmTX5jkue2CZNt}jwmwXyww;StjWn3Tc3A%O^u_1 z1~XY;g%%^aU1v1%zYcAW!#c-Qq6yZ;>I`Gdp#hn8!j~|bU)a67dGiMwH||)vbmtk( z&c(rdpL*(tC!hRjw&94ujB$Kqn(oN7X-B6{{e8lOQ{%=(MHpjs#=8bhf=dw=u?DnX zm?hb0NaMJykTA?yC{b?hk43yttnn+>`I#}ASoi2kU8M?7-)h%_8jaRsFkoEPs?{=w zS5>a$7?hk#5t_oax}g;e$bd%#Y+8qZ0UQ_PYHptv7G|_`N-Y?2)c{KwS6R7Y`*!N! ztOe^gVd%RC+Pn9uz4R2u-`SE@XKC~$Vdw+uY%qiwF#as&ybAXk(l(O9di{>oCfZU2 znY3IX&ctO-lKFB04`p7k^Gd?2Prj91XeEnEe59bURFS!1rbTIMC! zc432uj2=7fsl|Bd>v$2n_iU1}M%HM9yY~toJ^d+srqE%u?|ty$!L!${-%m`>Fy$2# znoDXMtaeGIc5!w0)cFSKL)vxk(|6G5G1KQfwS2=y6c29$Bk{lGzvgcrHH9@@WwJT^ zQ3zfpM$(Vs5x8&5N&n9|K}rRX;weWeqGTxS1MEb5}^Npl$Dz5lE9VfiVFDMso-g;F0% zR<5QTG;l51CinnM#yZ$fbI*oFkmPd;Ym)a|8(H?(~e;}R%iRd2;=5MJxu2-U5 zl_Ub@{tMDTdX0tSds&WaN+ZotIpZnaC`LJ%3YD`dS+$LFR0rU0hr0{)s^D4yPvs77 z2aCtj`0wJfID9DTJE_TKIzDgPjSrO{bLhq<4|D(22WfByh>U@j}+Je%*8ZgSquaTFQ~{aXZ5 zC_ZIB@&7R+TIR1DNgZ1v=F{+pWYCC*It$N}t>R~xlRhIar_1#JntWVqhSeLo)RE>Z zUTzWId>yXpBk-Qn6&!Im@f)}fWaA^fIqW~~ZsH5&5xiBN$$xLJ<3E-=^H0HrF0Fw3 zI$+nCi#XC*d8SxE{(p){me5n~#v_fU z>+%kMEz{+EC*$DyI-c*(P$++xy6|i8UG16FOK6X0K6~!>kyH6g4m}KhAioFZvh;+0 z)LO~Ya3{~gWiD;im!$Q{IU7#Gfd`mbE+XBw_(g?KTr(TB)(@p?=De^_UE z{h1h);bD3He*Q`m#%&x;RJX@H`IrZ?0CWEUy9O}U1^h+^@DN1#M{tk9U!k@9Wwg5_ z2^* zjFCUWH;3StOZe}pweuRg9`s0;Z{zw7hyQhYoPV7<1LgwR?$jqjKLlQ4e%QsDhtxnn zCfo3=_IMc9^7{V?o#pXo&eB@gJ~T=E9(J@| zPQse)L&b3F$@VB%lXO;UXp_=~DkMGWr8?~EWULp5VZAsGYlb(mPQ_@P)^hz--X+(xQccCZ0rkm&^^T|z}y`)SLs7We4}lS5^<&X zu@7Z?yk6Mf6%k&#K~JOws$jUGayGujH%FBLJ&FFI%%*kt7Ts_~4eX_cW-9hjEWS@? z6c5l?#Wbu>|1Th3lWh)LJA`>+BK30c9z{CbNi^j)N`WD2;%vKQ4YFpK@?oJ|%TVd4c#lNWk^yoI3NL zK>BBCw&FFcO*>-_+6rzog=38ELOFIZSNZ~JusmMC{bBUDA{Tlo@&U?aa;&l|{GU*N z=@Q)K@@?@k8lhYO{Zq7A1dUgW#d!IE+{7C4Mw*|oeH+%zui~2oD@7xD3BmHK;$=Kv zzoQVvY&^3b!ycq7{ib%qIG#?66lZ9PayNysxQ??n4_YpTAuc=rX+9H+$DEJ7ol6@E zkuoU~<9M#(9u2~`O6MqIVc!Yzxrb+IF0@t{LC3^wJWCmD?&wBS#q%^v%%tbVDwOLz zT7>WkVl^$nw|>_O;gpK<&veAwhj>5G5$qMe0&jz-!3r=9Yz6Ou-@$V52562q6Z%xA zzlIN$6LGFG3`Bwj%rEFES7;-f;LZ~C4)<%JE&5zfH3+YQyBOijJ&5pXMNe)DVELa0 z&w;g0euT^T8Jq#zobDR92LZ~-jfI=gq|-Za8CRTKhnolvIo)^RMuFYnCdy0A;LL8Y z=uOx7$!NnJG!cU70$+r>7h{Yw%yI=oc#b|5 zo|Vg$Cy_=m#Uf5F++{qzlZ|V8glF;NWG88Z!jmEtcktUlM)@#*kmm87s1Mf23BnQj zMC5Uf+>@55;4&~S>Oq~jh3gLV=;JpqcAh|3DdO)zd@sb`gt%U~&hP|d!9c|Q25~n) zSMtj!-N90>y>JF3QMwZ~!T~o(Na6-@(R4ugfDR}o(*dPB9pIvI=>Gw}{4^Pd^<|ud zI1)cuE@W*bb$lak}jKr{HsN75oOia1sZXkpN8Kg45j&_ZKI*!hZQ(d`thLP(&MqYZ$LQ z9mCn(HN`CYNI9L(H6!Lwh8RoVE54u%MIPpieAY&CK821bF_-9bg=aL6XLzmS2m3q(i97{Q^a0=-vA?w9ce)Zt!=EBQ z!G)CY2f*J~@TbQFElm>wXcC7p&za?TE{VEwNpdP&51>W*uQ8CvHRN#?d2Ga26Sp9b z*OC594CL_@(tH)!_3Uj0i+9IXF{|jwYzC>HF9(+lPhJOovB~{_N0s1C{e+>V7SX<&6 zd$uT3x&E48x&Cf%a{V#hXQ?DQtGo`9>5%dS>~cTt7V5b+{Ab)?zBA`5p5?xg=5yZ& zQ!!>-xTl49ZjPX#M&T=J#QebVi|D8@mD4Dm;lANJ$o1%pXnY6r0uk*d$FB@>_@zN^ z4tSa?lXHlyF-#el*6&VTxrgI5XE@$_BgZ2w7uR#8zH|Za$3B<( za-nd)W%p>Q>0ahahp>)6L=UizSPl10{8mFUsKfmQh+Ba43z~JL?}_w1k-jIws&SoB zfOH&jnTtG*Ag{4-FCgy}Wh<&w`fyLH@OcQ852>-ve?$2gjdJ=`5B+^)lX4mzcFbMV zXc2VA;$~FKX-f-USA9rP4!Tnzo@MzCp5#<2#xOEGgrjrP2u_ej&~<4z-I3sS zAXN{Vk2;h#qdr4-zFJJ`&PifA z?Q#hwF9)vj7k_Y@!~cJAS^fV%#Ie?8?=hY4veJ1cjbk+(QQb)H4Db7vq|JIG1e~!-#FdJg$EbP38?bjUk!p?VaHsd(k!dVVG zw_*K*Hr~Yc$`AL+tbMRQWqVn+cV%ZuI7f(V&YSOxH5twzn*GehSYd1Q*`QOii}m_! zz6*_H`x=Cu>~~n47g4?uz`KRJ6yduRbGW_D2(NQ4c=GNA_-zvh-Qfb9k)GxD zauuA1w1C^o-XpMgDrmbZmN&Z}W!j2$(RV@wCj#vCxm_SlJOcL!+-Cn-_|I}_SXX=k zX5e#{PcZ(*{BZ?+BOo6(e_VCWAEVJ%*y&;IiQm7%FKsjKf;jl^f;i5O{pu~sRYcNU z&0X{nK3jrc#+uAr^wT7TE8f*wXqneVNTeiUW4`XH?)p7(mLT=+5tP9;W#H{XQfTJIM#M- zPG<8co5R_B$Y+Z{o~TkKgy2%jSROYv_}B8m{hy`xju}jWb>a&NnsR(g%+7 zlIHuJ^Cde6MV@om_+)3Vj`P(P;}!K?fU{P|7#;KeX2;Bw}qoLhJ@V&f(ZOl4E>ho@H&~Ja2!Goinkq?>M_cpa08T!18FG zFI&tBjyddYoO547+u#|iNI-d69pSP*XYVq^WAu_^9(k3Xa=2p<#_YidXIeUX5_4jt z;ud|0K6@U0(UG11I?i2@C+|G_bmZ+kbChtF=vd=posT~ImbD-I%#XAV?f5gwz@AY* zQ>;3UVqI#Xe}gdASKr8K>dTl1x5Dpy7PIFlA42^EBgzwx_CwmRt9KOF=^fQV`b;we zbM{mgCL5%wa*ire&SCEWVW)&KI)gn^uooYqh@m~wHu{up3@`?~uphjFakX6d0ej7F zkfr1#`loDcMpw#c`{8C(Fy=^=z2!n@5Eg+!duWWd&e z@SQwg*hOjVy)GZZ%|-eK*s~Gsm4Y_-3M_@bh|k3yFN-9;6YZ4%pY6HwMme0$8(9ms$--p#j{cMCh^SYf+- z2m4+F(pfkuV2{R}aXJ~Q4 zVz?_Qjvs^b6aYK-;7&qRrPsRQc&!%qxK1@m|8;4IkQzJMEEbzbDggq9| zhkC&FsU?Vl{2O&B2f`bFLpe4>XB+@%A8P`_(Kc2+(!oC~t9Kn>_po>yz{`kp4i^ss zRK_|9{(Vrt0xUjg%trlh;@TDXk*5`TacD~p{lQ#PBWxpB20llexqu}X2|x0)W&u0W z!LQ;qvUCb??H#xSKsa=4pKW6S(sT5~NVsPJ%3!^Yd#*KwGo7X5u#H}D5zh+yVEx{R z_Os%XtqrVBb;3Pf;4`^C%uZozDx7PE@|k$=(UCGh7~hfW3VXdGe8qVQU(;O{k9&^p z2=SQXn{59e_Z<6OJ?o2n1Q$tiby+xV0GXi@1MdU(Ghf`UdA9qxmm6KR!&3#;=Oa6MD-(L%+k1k)!!B z_M5yQpXA5d(H{*-u%~gvSy!SR_v7rZ7-wf_bE^veG_VV8jo%7wjDZ^kK4&(J^~)|JkRXZCShD)>{>6wWqtYgL<)luvZ3tZ;-gz0CNVn z73>1P1B^Q^4<|mTHy;f8fhk}qcolpJ4uk6;16XhZs0E>5D3}dagRNjcxBwo2QalkP zfOuj*fH5g90-M1n-~hM;44@oO74(xr4+elK;4p{+3BZhBniN3@hy+iAEnqje1hTN= zAm9gP0hB|Dawt&_WgbqoFs4N|@n9i%0ib<+(Qdw}0PW+4_VH^627{U4Ya)N#>yLZ= zaj*YofVT8U`TK=%?2YG-n5D8G%pw(b2*adzEu^mUOlZW}EK1w+AXfVgcCx6M%y4YENkk=`A21n4V0`bv+!(xb2R-+`0h zF31Nmex=nH^Z;YQ0i9bN8XN`D0A=fl>tQHcSSK(XU`%#e4sL)Yx zI`|qK1<@cI)Dw001f9Te@Hlt@Yy$_uWsnRih=qqrUXfWzOcmkLQekO_t1{gaL7&{Rd4-ps-5g0oW z7&{TyK?cCs8G^AhBoqt<7&}8&gRNi}_#MQ8JYXjp>I1riFjyVEu0?cD55{>l(u=}yF`>{`h&EOM&cw-T7EQ?o1G)@cJ zgFyf`F%C8`?hUX5zy`*_2F4jd1JQWczNTY8t@L- zPBaO5ObP{ui6)~glTnt*xNq_SqA8sK+GHxqFcmgF6*fM#o@g3uWg4zc8x9tNe}LCP z4bgNr@EiW-*K$xoH1irr2e>v1*JgD9L%?HT713j`)5kCw%vOOgfP81e7UoO`D*)1+ zgE2VgS8xMlf?A@vsMlPiH5X~kMOt&81Bg2papxWdaiEasaSr%`9srI1IPBr^O#pR& z{6_$LcsvoHtn*fbc>EmpIDj%d84eKVNyK?F3FHGCewx}FbOW%7r;x||DaL1aX%l?oz~EinvP=cj-#-7T5#MfP0{TXc+<6-m>ms0)U+?dl7sJ(C*8w zgAAhOXxrr-0qVE>DX<=V0MM?>VVlb>L@O487r-`f5TK1#pdDA#5Is8-Am3;2gCe5m zcn|=>!Al?tWP@6wmB?!)`f=qy0K!(m23NrbSHT8X?FXpmss{jhuGWH3@C-oSt3L+& zh}Lukqrj739e5w0?rW}q6i`Vx4hA9M5WYSJyZQ&xTlYDDe;vxRE{$k?4}dzaze%)V zJdlYtz79~wji}?s%ODw45IwI3ZNWe=9iaTr#}I8oe{Pxx76RDWW>3%w3;-jAvZ z+JGToCV-qr9mY2civff;x+OrKkH&SQaorfCW2^%9fRo@JcuN%0lW1I40C|ni08r+} zF9r9BCLm4|)&PVtu`Fl|;D6#cun6n`$G~myooEu$GYPVpgs>)eBbpLS6q*V60LU&B zvJ1ueP{=NH1%Nz4&w@upVH!X@!jR^v1wbOvG{}A03~-BRdM~0GB|r#3-!KDlpJ@-= zK}mr8o7o*8ZZi?LnTY=^l*3slhqF))XF)!*VgO`0>jlwl732o}z+`|l&v62X+Z>el zaFn%hl=pCy_wecfVTB{CaL76wVTB{CaMay!_?=4t`7*ZAe9I+Y11Ne=2Mc6_CAcKX-mxT+!R&a!9Q7~u%E&!x& zF=VkAvRDjREUp1MfJAT$yd!MS0;54PK)x)U2EGz4L*6e#-Y-MmFT*{S^#@^K6*vHp z_sfu`Wgm%_qwFou3y`Mei@|Pi0^9{(h*ls@D+U6Dz2X|SVIXZQA=j1X0OX1wX%*zU z$`2sFR;>d`0P$Hp3hV>8*BZoq&1-ymv?)LxSc`bBLp;|Zp6e=smH_uzhx@EU*F2M7c>zY%p~QxQ-TK*pPKelyN*_5q#2 z9-=Lf@0K>8510%f+pRsp1aO{c8_LEul#OjD8{0M!ZFc}DYui!Qw#NXJ>mA-;2hq;l z0O{XV4y*)_(=NzqcQc|r@UsVg_u{&}y})J=PqeQifLtQ&0LoS*@*%Q5K-r2M2Nr={ z;5fKLw4VX^+g|~+2jjph5C`sn&qPs<0O^jZ2D$>2k*Fm^2arDpP#zCLJ_j#>XGDjl z0_6MQQs6pKG-MKe65In{iDEJV$SbB2XbBz>#g+yQ0pt`5ImIIESX>v2>tbKj;Kde&WLcWEF1#kY7C7 zkOb791jsre2-E^7TM5Ge%0R+K0KW;>z$>C7{fUk`1B8F{F%hZ{CAtDXu!-oHJ3u;) zjRbSR1`rFbf|o=|8ps2R1H>~4X-OIjBESxC6eNSUM91yHGNKb+U>4CyPf!Ll0?6}| z6Twoj7n}n3!8f8)nL$wif2Rh4d0;QN1l|#y&J2nIr1dnyIgM~muO&K@1so(g>kI0G zaRBl=3)!ASJv-MIApG-1KwS_9(2iX|oxjiotO5@J{9i;^7xRI#pf>0Xb`o964$%Hw zY6WobOSt|r|+sBB*<3~h)AWcuY0LbSl?)@|tT*b0(Z!n4IInw{D5#jOar*)!xnG|%b*nj()%$2Aif{>gJa-#fV6#LAO|Q0s({7-ar-l> z@%elJ90$1WGp_sm8EeY+AQvbMkk4OggVq3P{xTZO0L#EO5DiX)Wbhp5Sl@L9c>(VI zwHjy+kdCjo_AAo(brnE)F#a3(3R!-|eZS(qzw*t3wP3{eyB7!sW55EeNf!X+K{N0K zYv0p};mf;>27*K2J~5UHR0U&+VMUtpHN+%*$4N>ASHMH?ff&|lnd}1aEhKp;;j2`j z8t4M%0DR{N|LQU9hF~f-CaVZn#rL(;ZUAAacY&3dRuxPG8v(xLq$PoC;4#2Ahy=b` zgePN|@Bj6T=Igcz)CDDOjpFqbri8|IRX4;!#%Sj zjoA@)_NByfAg(zPpIqg^OmLW3?%bd)xJN8cArL{#O$CsJ+ZAH&H307IQ48RDj~m3W z2FUUb2MdUKB7Dzw0Dki22Z(RJYs9=z-n zyvu^4#C#C8&kztvtdJMjL#(h5XhO^v@$#KdtO)+EDS|MIOa>@}MJ^FTH^lsqZoldP z-{|s#eEg6He#iqqq}gvTNCb$RAM(KO8!><6fxjC-9{3{<{BaNeZUFh^EnCy7L?IqlO0R0=UT7v-m)keH)FC|t7 z@vHNUSY61rF4A8wGe92HgIoQ@#2TO;HHZR9;2J=FG=R(-Laq&6K><(#R0qvKH!uu@ zf`wo+K$&ZJ20*5b5Whz7-w5%-IvQ()ve5{6-3aF!eIVA@9YCIq7Xaj6la>H-ZL*43 zQ{-V&gxgF8o?rynO00P;VlB!Owwi%?;0}0AtW`k(|E+cdgx$I=u{Nk*Z4ho-l>N3< z0OZmR;k7Fbju2}P`L>TD)*&P41K_{I6Ji|$K`*ctARV1@f+hg@-3f8;oE@ONcZR*o zY+_v{kO|}k2)irN+qEY^UUXdo_JEV%9{5VE8~k>Ith-eLEx|xA4Xgo&z(w$sSa%8J z0D+(;=mUDP0$gH1arYA5DTt?=fno1%npX%!TtdC8EbZI@D^f2@`BQ!Ay^O4#ty9m5dP5b z#D--B#X(ch7fb;wz%4C zmjTMiM3j+<`9N9F81x1c0Pa0;7dQrz0n#%G>6wJ|OezWLgYIA)fZs`o=Op-@1izEs z6Pug?Al%6acQV4AjBqDU0?R-&!2KuV{!?)ODLH^Y_zfWKQwD=+U=26~&I8CP6mkm9 z2H-aoenZ=X;b0Ef1meIQ@R?YcBghYM|1gyEFr+^W_YYeF5N6m#@RZn8ggG@UC<3a1 z)?gsO{ihaS6yCXmtv$ug*@SfNl4d5PgP_O5p4Vse(Zh;TP!X1DY2nJ06;vPO3ECuM#!qIPq zqaOh7V(W?n7kJ&Jx@B8)y$;--LYM1pk}hXR{lCthb;pY&lG9Yj5y7v28s-6tV4H!DC`OkggpG z#CCQfw#$#$Zlq<83E=#mp#bsRI}AjF)5P{cHv5ndkw|;wXkz;d1IRQgC%`pPtBD=( z1<11l$fE;@%Yl!?4mtvq--ELN?#>WyMtAuj^vO0o%I^qf-rz6!tJ8+%YQKb7Q{3JF92=^GuMiS(jGz3fs z7&9dy50Wkcl(nR9#ExeMg+OJ1x_P_@Ks=8x1lz$8fUu8$Aa=qGJU~T&_?$$!JXs6u zC3eaOR01sl%KoX5;3TorD5Ixw{TY;*GlpzK`O3UIFr zH^Dn%7fk^7x>yP{0Np?cI0O*xr78er?-I(>r5ONWU)l#yHZDCUb~y_u3{Vy?!{6n> zU=o-Q;P>)AfN(CqCUzx1KzLW+?+W}~!TBo>h+UOHCJ+cZf`I_>y*d}HA$H9P%mGNB z!S5*Rzt<*q-3PP*C}YL$umayfu9myG;NMjj`B zB6drFq5$!{?FCQ{ZbKHg_kc&lQu2VNU^+kcyT<|Q;@!8z?jis0^#O}P0)PzfLoW9lf*BwZq!4?6GVq`T=mchiIARYu=m4$& zo!FxV#2zE9kHd*!`HTHg4y*t#h&^cpP7r(Q0wBkymx(*xp!3n>f}4b?R{JDhSBf*ar|aV0-E4?YlA1*ikM13ZDFUL~&O0?ok$ zupOKOZ-`^av7;6Cx(9v}ojK6%Wb zAHZ=Qlz}|(p9eC@V*%$t3V21_4Ki}W7n9u@g8|?mad(sv_d(z=aSz7yBQp%yJedCnrazOir22#m&{y!*fZq{oQ$0wkfIc4<+W> zY<;D_Q?6pP7zyp<4&b8d4UHKUrBA2e6u`~SgpKVNJTd@)}vLJj?& zs!DvZxg7kes=GO>Iiop;Ij8EN<~ICGS&TC{@A+>i$t=u@naucKz$}|J;|izTX4NqH zQodBmXXv8hPbFuUv&n@;Makc${L)P0sF%r^SN@A?!T)R3jJHtjrB-RGQ@ZVk%fDlG zl71-o#;0jA%2~|?%=yed=0akg;qo(7d7~6;Q-#G8DVRIqObe;o->5pGoK!<9_fJ%5 zlRaPXXC*uHhG{Nccz@pIKUDp3jZI08a#5QqA^)oUWhmhx=D;j1O3J1GL3NT#o6F0k zRS$C}b1pTHabAotT;wwUsQ4mY`2Ua!=BcVUH&i2=UYwWZ`TkZF<%TM3=wh(VZgWvH zh!8acUjr9Er~sR8*#2WB6*5=g!YLPT%Y`}ElvOHZ%4)7)4&c6jqdM{qyiS^`#cS~q zypG&KZkF!$<6Ip+gU?VM4gKFxv-m726Q5v$t&S#rlH$Zgj7xT`&A(gKUK|N>^4``#PNM5o662h zrP)%C|6RQlu1i(f|HbwG@^XA#DM-r6gVIznUJSYJ_(Mrmr7AqbkE37AXHre>!d>Jp zhWv0?jn6|P8Aye_YcYoY%$ zC6D~6{?U)@n5s+}|EgrKRK>kzC#VoPqs=tzf3JLHLpgCDlk#_phw(O|s?>_hJkn(1 zaeSs!M3{tx?|NxwH**%VyV*m{CM%NtKe!a(`$Q&T2G#jS-cG76+X*+3U8*e?k_+&W zyg5SMEc;4z<#FD zzsR@p0sjp(T6W~&oN;E7pmUQfvKdFesK&hJf2gkW^8d9`J-MB{ikC=J6?k8<;2-n# zA6?|U!dcGmApK`bwBhA=SE;R`r8#k1-M}&KH==@F^mhZrH}Dm3AB6COi6LGLcll4qGRnX-UPCMT1VoKwytuTFDGJL2c% zx_q@9AnNiM{!I$Oy+WiAnK_WmWX3=8575)x|LkfEYUNCV%o)s%W=9-JW(T>Fxr*7# zDb3_N%$3zFY8EH%WHwAFn<+U7JD~|7RDSZmqomPt8@ajYBDdxD_!g|*MgXfSMeyzU8H(aA1O>;DNU2cN(#Pn zh>UW;YkvOoE6hP=Z>JpQVuG2g3NB=kMPv{zW&H(Ts%C*}ZX&)!DrU+cD$2#AR&q)|4`3oI_vdFN zZ~mUY|2zLb{H0!nE2H=vxuM)Zt}i$JVM6!J7bo`jk!rVj1(g<+N*_8l8czddl03z7iK53vzk-QWiDd2hwhv{ul zW6TXO?#69ZGKJo@m0dx;>E!UE3@$I~;2o_yMXE9Xd5w-Xz zeod|?YKRW91I9P*Vm^PUx`+Xi#uv&}_#=LU$Meg)O{$U$@ko@>zI*{j!v4Zt@)c!K z{@e43@?buY&y9#nu;55&bz1PtJL4fQQ2SWD*2|4xKc-2qOj~Km6gYe zMPf3qF6YM>sRYj_<{Kk-FwtrU#2&3|kqLH6RU_;=K&0T?R{kiF%a+#O}4 zA4rzH#czD22ol}o>ihx5zYJqlqr_x0%8rA?|A0Dbto%4-F_)B!noF9qJK32_$o}RM z=4@D5=_|SN%2Gzv(aBj=uw*mLYTS*!CanX}MlIzHIAmd&9jcCDl zqGxfC28rVQHGjh&^T+ZM$xhZpWqw5Tl^e^AMPKCAAkl~KYeBjpuSaSv?2 z`QQ6VL!br=C!vVU!d_k^FLKH&Tpb)mbJ0w~9_ar!e&(|Lvt%bKi4yz^Pl7tlpNLVS zzhvTfQO6;DytRtt8BQKzhSWwH#Y5!zVu^_0Rk$0*`7=Zh(OT|}5yLpSGFp$`(in4T zuFHMoKE@G76TNZ7(XwntC|&<7FTPteLmy(N3VuTBD0MJplV&=(%RWwd4B1KJNs~k6 zP}0cAMopRk-7H*rEtpIiNs@6)j(C@hYA#I~NWnFO;Hr|0YmCn_n#BsE^}_d{%~Ccg z8-Gjq&qZpEe{Ae=tqCZ2GuH^QP%M=f%00w#d4s%0UhkkfWN^^r={!YrG}fwJUKsDI zalxOv4*1i|bdSjH94>8u6ir9UiXu8|5mq7cM0kh=t}*^ilE2o2o#dVJPHQQ>8Z*;Q z(yVRu2%bT|sAsUY({tk8#|rsaJLwZ>8%?I%)~>o8#o?9?)^MvG%fX7UlGa0dN%p&5 zpTew%tt06)J!g*A>$*E@uh*tI*84hV1t^ldtPicvX)JA_2i7P0LDojUtGBV)M!?p< z`WfHFcBd&cMK@c|vTn4L8tA#KS6CfdLWOh>y$!3Ox1nLWr|w7(X(jd0^I0#jx-^Y) z!FG`irwNo<&!>mdRQOx17uQV`g;470rS+_MTkS$>qL}`PTX*WCsC)>g=?Hbi0*Pkmhx8J7 zB}^}TjnikJA7bB2*OLxX7rnLq_{t?ZuaDKcq_|UpK2jfbKOZe4Ss(KvjFyw4k9rvj zQ_@Gi3}fZtUY{1zcRE7Z@ve_&da&-3*BvP;uiq;lOWk4aFK?mWZ1G7i2iE@9t<1gG zQ0r9d4klVwgt?OiG%RU7rm~SbO%=6<%;#4o{?3|cu7~RCt2C0R_*^y;yz-W~CZ z4fcXrjYc;3!I(+BHgS8O& z)nM*mGhmLeexcZ=B~ed0vaT7OP%5iYXi^=*zl^r_MNVyD^nx^TZ{0(BGphB>cObPu5qA>XL3PxN6_XR)t2w)MKBkEJThN9kxYW+XSU>W66a(I-c3|E*&W0n&m7 z8#SsOs}k}eP9H#tA>|9{S!mn1QC-l)+#HwuQFo@@6a9pqi|$V>>Sg_%c}?;uk5G5R zFpegB+l~yQGu;cl@(Pz92rKeQFUmZpMrP0p(aLF!i|N17qnR0s>)DxU_P0w2?9%KK z-}MIcX7;OIdIfqg$NCWhnlbNwEj_1YUAdTEN-u(z1?37fg`S_x}U-f`2_;(8Od zbd$10FTjd!z7nJlWzJi&eztbtBeq^Ds%K@Lw?4F@IMm$gQAaP!Xj`!y$m}!Q+;ixS z>BY7WfqG-saNFA))*xPe`-K2pHy(zbo7FOmMlAdG^M$ROn6~4*zx6Ad0t4S&9%&dw z>Hf}he)?z{v(xo5DtiB2XNw>yeRrLyOi$?Pu2bIDFKqGdGrqbL>t+}(tc77zWe%xE zM#Jbyr+44;N4vRX_gR0G1B>D4Ob_;)F0AKg84Safb=Z5V5acq*FuKv6z4smTY^>wH zlRkP~dbiIbKp)Ji?K_zbW#o^2uV3nd4U0VCt((~>!x%sl4Py-TgJF$jllC7kXnoH{ z8^(9G3P%fzjWvcbmSSO`!3>E?DgdA1hT+Sq8pa5kXc!usa^P5g zJr^?{IOYPw;lMFhJuho(IJT!A2c#!@dp6EE>dWdHMt@2+jErp5!9*{;BFk(T9jKFG zbfTGtF_JnSOe}y7J~IapcWP8Ac6yV~KOq>(O}QXegbq#1%yE&?wGAsYY(SA4SFO zaMfGTx}!}^dJmQ%@v|Qa-<{)Vp_YRe>{D}i0*_le^}7_hh56(h2}Wy`o{BNWM%h}QjI6|-x>j@`=XNI7MSodqSB1u?ftCf8<6y)^RI;EDe_t0}5+a#?!}+M* zl)}URD5W>2l^cD%A+VcpZ!` z=H|S4^$cV2{Dx?W(~RZ&U%DasJNCZ0idwca^3@d>JCa}TfDxVidXwRJ>(zD`yKcSO z1_AAe`d~E3Q=+FggpvI6{$^zBjtkE>p-}C-{A2?p^T&!ix%Ck&aK+nqINGs4{22yB z)92r-3ggQB_c>u4-+!A>=axRnzKu3P6vpt$7_gv0rjU@$Uc0P2Lnx#?#g0pUu!OdZ zyL&5`rjM5rXVUdaQ4b$eNLUmrL7S#8IgpDDoVjGlLW-RI^!O_pAHFJgIEBtza#QTpBD869*wx}>*&Kc) zh*oX3T=Au;TN3e5@QN*$FK442TW(w~#`10TzSNNRZeRMWAT8a#^;$6+zjJl&SCqJG z-nr_Ov}@G^M?A;A{agS&+I8w21_isP=37a7_C&~zv~SP((}n5F-q2UCXy)GarwY;3 zz0Xd$(dB*PKH~+A`zD?Ap-ub3pkLj$=;ceBZl-In+tc`cdrlUj#K`F{UZ9m*dBU3x zMy`dP7`YoRlOoTY^rmh5C*a!gQK3l%&|FQ2zAI`j^vO}Hp+`m$^n?Qwj^(Ev2PQ#3 za9|4b2?xTU&o~ePJ@jCBq8Cj$xB~X^2RFjL;-H>v!dnU=j^?9rhc+F}$3`Fe3j6fK z>yLQS#KRk*huZXshquB$*`_ZzY;VoT`bN)B$V-c&S3nPo-UR(j^xjW`wTu3g)QHx_ zM8xN%c`>WwJ!opoX6O@Rw#R$0_A#H2HKdy{Ry1IJVj95xT^=xIM9XXkHvU;-D9fGsWbg`SBZKbJMAWnb&X9 z-Xp6EJY@XH-VYsV*0GC^{3tGI?T0t0s?jeBP|C^G?>*?+$@neVXyK_hm!8qK)2rU) zrI6F>p8C@LGY22%rHHdPZXAN)&Z%dgf5Nl)O@DLhF*ZB|jqT zt;r)i$dckVc_!X_FvsfyMOliun|Mdi(T7X1(Ju5!=yMmg_UU=bLh;XHADAfg z)$H%>Sgkj^9n2K+E_wCu&~L52PB9;DnoM-KXk&+^3HIRyb) zQc@_f+`-xB7#@+f4KC9ruj68y+T9u#Ut4K5KCX`3c*2?i+51d*JNQ}6iQ7kb?4NwJ z<(DQ?wvT@76c#@CLya)m`Ca*`ah)?IPL1!9@#wUNm0mucxnSt)8nX@#`o42^#Nb!8 z=1ds$d%-zTqn!Qc#P&;k7yc#thdT2PG|hiv-l0bMPs~3w!rp)WnX20L1si(jJ-*;_ z;FGEgt_1#3H6pZ2*;5gR8hV{sc(_5nvkR}4%wK)c;rgEE7F{j&N2NtB-JfS!$ohce3sJ7 zVk+baSQb+aTEc+eD=s@z>_MevUtU6E#qL#Fez1Z2+2yg7aukP_v-omL#T+FKt@LtB zrR*h_pQIt+#}W?%SKcd~QeoAav9G*V?W^Qk ze3hlrZNJsV;p!K?Zs%EZr~K_wYqQuVmxFe@+**%1hsv(o(dvf%y4OQ*WH+=N>)s5# z4sF==?CTA;^=}4W&$(e|C;L4ccJ+4Nu_3ad^SKQP4NleCkW|Cjf5ZLOmrHI885CD! zpV8_HE zTE!hV%U|%=IZ2yeWar8O=lpiws(!+6*N#pf19$z={EWwL=W7dm_N?yzyy%`??N1lp zv%l$Sr#<^Sil{w@swMdCT{iS|;k~;CpFXj7@1RqK_8uB|vf$oC-wTEJp6P+Zr-2uW z>|5Pay}IvEw*YwD0^-*seD_QDEPVoDY5XJsEf+-@cb0&U^2BIT|NeCTJ$} z$!w8p$DHtvJlq6YbU(H(($Xl&JMu)I69pnq7dsIUd9Ke%pUC^|PIyIr_y8{->YVW2 zKdsf#y!$tdK3;IYr9Z9RpU}YByx+KD|EWH3yEFt|uKAt_+J7S-4j&c0QFQ;?w<&|8 zrnOG=jGEd1O@^qABa#Y4ZEXf^Zx3iuC7xxAIyelrv!2f~MVj~{ZcW4LuK#OhI^QpeE@g2=>vGJYk^TZx+9_Jo=A!odM z>~*)#)notAarl`fxy3Rbn>*I0P3#BD0yngi+xr;W5NLZoxqDdl4z;*iB1hoxU_WS< zZidzaT6`;LM>;@DY6tDqeQ4Lq8(LLpw@MjWSxauqonf%u4S;sv1KMNb#1l_wFL_)+ z%V&pUSuCH69(!r|!V>dXzNr=u3$4Xjx?pHV!wt>dk})G}x#1Q!ZfI3ClaUUna4gPGJTO75t)Q+Ft>WF82OuOiS_){%n+~Uvs$7YIu5ER4XUlF?P zFLbd%!WQM2lyFvxUkIs~Mt=(gXV-I^qJ0xA|vf=6R zBLR_Zj$iAs|KjnNwfAH@5uG>Q^JHSt$W~_#x7E*|J=k79bM{aN>*=#Mz4h`J&-c2Y z`{MPU+nQXw(_?GnOV~=v)^aZygV?KDqVA{)Nl!&HYY2y}iBNs#Uim%Vj-#`)~i;Y#%28O>rBf( z-w)}Ov+VtGopO|Yc(?MR$j6sGKfQQ-!y&uFIn?K$i7n>SuFKfHaLf5!U{S3Tc8|8S3Tvrngk#g|X# zD~gYwK7A76^Y$v)lRxjQ;+p(*Sr5hc+t!}W?%$^eNz(VTwYdDfSrtjr?__)PO}AP( z({;?|b#kkfLa?EOrVn_r$ZM-32J1MF2N$gJxuW>01GF1jw6Dtt`{ZozzB=scwCKI# zoA-<20vF~!{RQizSf+-OVOZ5K;*-N0*T>l(=DzEe;+0Y$rBI4*N3~RKNDl~GVgSG3{ za-(hy>ehB67ecbg%XXD*#T)|=6>Ikb>+L42(VMZV?|>D4Cvv8YlnMT`VsMiU>tH#s zLY#|oQyy|7ck-aTSbNWhkn;bBA!hkM9%ANyBShqbEuQ)Q?Ueu5=`PT|eXl|MR!2j> z(6Rl%&iquzj=k~!X~!Xc{(K|!06q_TAP<3Fl=p&Oj5mWG#4AHD&i$a5;5ng}`8XFUq1i_3GXgj{^4T+na6fGN64&HoaHhj=k7qn;v4*NAwxodjK2M zcR;5;tXDrAur31;E7opc_dZ=%i-80E{8>Hd0jw(YKvo8NQ5FEb7%Kohh~19k|*4r%Q7;?ok_V*awvUGu-fR9o~%e^Jlo#gj>#^POzzvhVhS`QTI(aW>gru zOukfv0w@r6ZgXwc*rDEuy6x(g3Vs-TBzSY_TBZF;yOanjk-J3h;x~&QF1|GAR!~Tg zci^kQyMgh6;equ)0LT~^9`FjB0_(vT&hA1DM?z4Jt{$x z%rD?S=*(!9r?VE!i^+JM+is-9NR0rhh9?;DekeT0m*&RFjWXcc?${5}Q|g7CVSVr~ zdq1f^{(&7W#vsiS{;h0*W%70sp31@g$PSRs&#SQk5IbLc@!kT@wZYyEJNE~OGQar9 ziIN$C|K=9qU#lhfw{98!%UVGzMFE7(X$IllR@yu5-Je2r!54zOl~Ssg+EneUhH8T} zBNak%Q~cF@Y7@1OIz{WL8H+}#DdVVZ9>$U~J)`IJf?lF4evSJ1mfq2ObXXth6Md#H z^c9~@`HoMapd(`p+h!$fUR3akD~$@Zqgt4($pvI6S;{n ze5u|&{hp8}>`)S(vP_RhGb$*}m6k|5rM1$0X|*&@S}1Lm7Dzj!&C+%$LRu*;m6l5@ zkY8J*WzueGk+e(NCasdzNb95x(ne{Mv|fxAAz~bMxMOb&cF0T?Q*5~~KpKo(7%GL6 zEINvw$hV(&^(04rj-N;FU&LPH%lrzz%CBLM<8|y5xhX~nJe!5QiQ`95h!c4t>`DAA zDcE6B6Ya#$a16N@R?4bH)OPAHb&fVv+lRA4$*&Ywebu(=P<6J}U)%AQD+(!PR3EjK zI!K+N4bk@6uJBZfs@`f#b)Y(3>!WS`%N0IKX|;gbT6CuswTS2ih|vBI!T!83S2edB4$N7_T}k@ko7K)bI!(VlA0wCCCj?UnXgd!xP7-fHi} z@1lUXu6@uxYVT1RK5JjJuP6@%g}3m*9^tv-hW1U|)V^z0O&27R1+(pgH1@Ab*uiQN zw}d_Rl4lUNu~*7TI0$Ev5j)E>i)`2{nH75-T|`ciUE~nCL~iT=a>LUt?%18;De?&~ z?0+tdor6V$pYRs}*hN%S6vG~x;-aJ|B}$7DVwgB8%80U}oG6c7L>2IiQYGwbsVu6B zBO*mq7r%)+Mowd=%Uz=^h+3kyxF_m}y4c~=L^Q%K!^WbixG$Pv-$)DbK(rQZuv@wn z_Rl;N?L`OF`A0@ufEJ;n=q5UeF1DRYJ+U{c$3K=;(M$9eeMDc;U-T0Luy1OR7%YZ} zWnzU`E_RDuVvpD-_KGcHtJomci_K!A*eN!NZDO4|PF*bGM1qJH`^96Ch@D5r#C8!W zqQn7lP#hA6MYM?v>=27( z7M93Pu(RwOJI^lSD>*mWEtbOGu=h;oD)tSV@!W?4c9s_4{&-3#m{-R>s203E+S7h$ zM~Ct0*r7KQJO1Zm&*3V*9(#;-^Mlw~mc&o+Q`jeS8TH(#<@d3h>V<8G(kIC*IZ9cu zFU}8p!YX3NV`Zr>cKX)GuJnf3`P%`zTsmQ|S87Y&AA5qwN#msn(nRd`oF+||!qIju zMayKgQ7h3dtw-w=DMd-qQj8QQ#Y>6ODe0_qPkM;m_Yv|!C0Hq^lvgS!6_o+X5aqBE zt;8r6B~D3DjwnafvFbwgka}2+SC6Vm>KVJ`cF*lzVW(X?Q+rbfQx{WL(-c#82^gbjNhh^uYAc^vLws^u#`+eJ1;Y_TKhB_P+Lb#yY))T|m{fY1(vchBi~1rOnpn zXyMviZJst?TcAZ~3$;bsVr_}GR9mJk*H&mNwN=_`ZH=~8Tc@qpHfS5QP1aDZm!nwisFtY3YYEyBo9&qK z)c=7L1W@3&k8}np^SOqqejbR~d2EN&}ktMN{>;k*SUMV@1 zTuN^AYwn7N;;H0QywKJbPzs{=^HB<+EjFY&T?to~D<_px%30;SazVMMTvDznzbiMD zTgq)EMY*GPQy;0%)X(Y{^{c9DvgWL1{6%8(sG3$stE<)1>T3oLYqAV zeZw$qxHdu?2{lR^t&P#fY9ZQqZNguU#%WWuP;H_%Ntt)0=% zYUi}`+6C>Rc1gReUD2*;x3t^ZO)Xiwr`^?3v^(1G+I2NVU8G&p@J<5E0p`*&v}(qV zuJJ4!?Z^qV9xu>J)J3b%%jg@?Gj3OQC_9y1%5K!<1Y7-0vU~MQEP7xp{J$t2l>HJY zV|cb3qsD(2hy61tp%&#NUj>SapwxCFKh*X0&tCCxKohqf2R?1+M%?M+RZs6RJ*CXJGX6qnxL;4+zc8R#QDuNBxxmr6@*4`7mbj#@MI{MlLNe zLh6I@%Rr2irl?`+bc~ja+S3R1#%Oi6{yK8PyBt>j-MILFDqTCr)baSgDuaJF-v6JD zT7PM?N-94joHt#@jnbvuD_zc$(k1<~O-XN!G2SYOk&Q84vCTyOW~6~Pgg}Nf@b}X! zyfMT0D&<_f#(q9#6i016`f>afb&^hDKj0ZUOXui3`uK}^z}C>8SmD(O(}GT z?$SNHKlcH49X_JR^aop%JXsMch2cpYqsN^g&6H+Iv!yvw zIQjykCosxak@OPgkzU3crkApw>E&!qu}#xY}spr`uaSP7GrkIE+lxZ;5)^lM-FN6dtb z*|8_)a1z{$0%LW-xUM{O_<|qf4BpZ5=QB8mcQufN8jM*xQttsf(r=qv{kW?!D?z)U zx~mP;9_j>J`(VsUaw@*6n_6G(u8v2oUvImgkrGZJ_)@7aWawv;Vjya2F(pVTj&fN^ zsjO5}sw-8Ls>*Ll4W+hHN2#gQQtB%8ei_TuR~je{l}1Wq1z%TInkmhd7D`K{mC{;i ztF%+vD;<=MN++eW(naa2bW^%3J(QkG8>N@hTj`_p#r>WCF+FH~yvd}DQpS+I5~75V zgEC&3K#s~JWh(rKE8&zCz5H@=HAYdC%@{@D17`_J0_8&Ae;?mTexf|366!hiJe5*! zsyFG^S+|$iY%dDWozOQn3G-A6`!6qpGT= z3hcBtsrGojF9Y7v;HWyO&UgnzCOnyuMRhTr6;ZRP+0`7zlRRo}H4k#vL-n+k|AJ^` z3#ogx`sgz?1iuj;EGa;vZ-LstKi)_u#JFiK)T!TbhjZk z+W=({##n}HYDgUO11Yr){pp4^IJ|KO@7($63+o!Z$KSQ2hBVNAbi-`jSUF*6CAy;Y zs|a7FYuG34jTjoWAsGCqCnaDvuEOex?VQIi=YkNTv1dGe6~-uGc=iXYjhHo5AeC3; zRS}zRygQkVwHR`+h2w;0)nf3CUJJiT*`#7paeV)`v{Z#k+QO=gc=e~M(jI9KHIw#9 zQPf;Ih&P3|#%j_P>LJ~bZqjgDTsj~wM$I+a2YeI^I^*HcIquj4K8$*5)brFaXAo>z zFlU*|=kbMj|Hcx$Ib#)`i(ZeXs5kM=d<&j^-p+T}<}&+uBz!Z-$_P2N?)Je~yHjlC z(Gc7^Pm#0PN`^VW@+?Y2XE8z5Y;5a+JJMo+*!#yWwfVQnH(DmOe5b_P&aA{4 z*liL|Ju|{|&1B4(C*$quhM9T=Bct~8f$t6`OS&t^cRkzS2;*3}mML|W#t4PWMX4&> zQ4UN-*+>n8n?{*N;m8}VhWR7pw9;g}zuo3$tfE%rIO6v!>tshV@t|&})Xv>SE z#Vo0m#fr8>n&$FT(RjR#-dM~15x%N^`k|;3a@G3FImKu>ZLMc@oVokU85y%wD2)D@ zlJtwaq;6I><9{Ef>`Zq>e3dCiJk*&#l`S5|vur=((GR0Ro3h2@)i381tYz60W{ym` z^vfApEoCTMJP!WiE}<>-u*G9_x~mb7Fk3v(y#Dyv;t`bY2Q82C_=hs$p}PKZPQk3w zrfl)}_RAR=a{wq?Jbsmvq%2kz+v0H|-M0~sNr;E74Um3@Z{+ACWg2pHxw4To)Yv%6 zh+2CewdpB(GdIjuov@~g^2y$@ces+_o3rV5gl{XMm?>Z5-*7G-;{}^7{Y(st!P7yE zZv-&Y+&Np%PrwB=f~`-rr=OY5cjC+tgk-a&yAR?E;f}Ixm}#zzqipf&2&D~cW7yN( zd%)cfVN^v6_|vr!Tv4_;%ru`U4O}G!t39a-_2cIm_Kalq%uw9xPwtm+$4l%IH+r&E z86X@MgZss>Pk(aT4EOu#E)45fj4gz}h3o>Z#4{;arLrmXJB*=*GmFh#W_=8WwRi@d zv5huYi8W)*C=|!dK)B6~v&u$G0G+WhHWx-6U=IMjQjb|H*gatX5nIM;z{Pkb#tW)^ znlGbg_=Z}Z5&g6=USt?O+)g`ZvpKW*4eq>rs2~KD-Y; z`PPq*qF#8;ZY~YS+So#xiGF4UK8=MnGJKA#}nA^DS>~K3et5vbzO{QkcvyCnX6P*s={)it)JclI0l z%B!rilq@}BQ}Mj-XEtB{Du3lI)d%VW-U@52k9cc4C%bCAv)x#`)qI)VM!Q%Zhm_jc z`WLeGFOsc)k!}5pX6s+D*1x#f`WJUx>1JFF8QEi2BaAgA%y5ojo$>(I zmcr;J{^wc4TCsKP1slX(u`et}x+Yy`$EC+I{x6lkDcQIy#)V~hkkU`-$D3h232!b@ zW+}6HN9C*Xm3LB?s7rZgb-B8ncU4!at9UncgSvtDP&cVtc~5nR8p->rDQXHIs%6q# z_%JPpmWz+l@@hVOtmdl)@=z^E3+6MiLVg=>>UnMFz?a)Ku^Yn=+pV$Nz#pItXsp{$ zmDZ~uNjRgjIzJSiq_X{!XmeLsP^!Y(EyFlB)wj&t429CeP^yfd(Qe?CT^REjEsyat zTA~n}KE}8+{tNo4Qu{N*moXZiS*1q2Ir?1&8;A}V%7REna4VsF?G z8zP8eLu`nMy?5+1!Cnz|X`Vw?t-9j(tLVPXvT>&A}yKY*a3cHPM#JF@%vy<7%bmS`J81aq@tWy|%kZ$K>KCFO< zk8I}FyT>O77w{`0~)+-U|9AD`Tn`$;JE%fqqX%uiezj$@Sl z2*(&@M^l}afkuUboN2_Wl!UFw(jiKW=qo)w^`I;H(UCjW*4S({u~Z=xn?zrTjS4}8 zRp>$fJPkiD;%Qyd%{^{_$`Q!{+NXJ3yXVt3E$6x|W8+!Q7%4gO+)Z9Ytmzp}Ot*|w zmP4Gcc}Pxt!tZ0#JLkakqBpTReHxPM!8VjV*`uN7PD+d8_uVs}E}2g&t`L@dk#OvX z=Vd7+?`OYOr_U*R4YAVhrQs~clTfo)rDyf_rlQHjCe=-<8_#GwwDHi&>81r!C?-{2 zQ+avi`IV<-wJ`DtD>ergG!Tj+MpA=G!L$GS^c1`lK1omUyYt^lw_ROf~$?S@@en4S#bM{^n4_Pt3y8{-yEg z3(8c(Pt3wk%)*b)!jI3wkI%x7&-On)3qL*!KQ0SDE(<>{3s0+?9%oz@eq0uQY!-fO z7M{A6hU2+7Q$1gLt_sr2f_;dQyKXMKCYD{}%dT-{R~}carEzcOc)2T&D>lph^0;C* z^%KkGt~{<-FZaviiUo7OJg!(#{lt#BD~~I-%>DAXVo&uGi{`F8u2?nq%j1e=bH6;U zSXceTzPT%pD>lyk^0>L3L3 z3000OWmM89etF!9<>TgdmS6MZ=62=?<+xH}buYP{<=6bUxt&Q#`M6ScHAZe{`87Y^ z+|J^cA2+u%XHkwTC0X~9+gX0ikDJ?>v(1hxE^pm)c5P8dqFuE6%YTH@CC+ z<#BU6i(ei$w=-5L$Ib05etF#7&f=HH&FzdG%W-o%i(ei$x3l==adSK4Q;wV4S^V<2 zxt+x?kDJ?>eU#(ob{4-pZfdO#V?PW+gbebxVfDHU)Dyz6;lzm)#bGkCr}hF+|LnXl6`TFzbH_;fptt>+x&FVXz( zvFYEONv7)0S)_lH%1qUt6h@zNb(GpazU&%TcICgy{xlqxDZBDtWv}`x=U#T{uf)uL z6_@@huKd@0&uQ6CeV3Zl-a?mH(RWInA%^ zSIMu=%s%VEOp{HyU$17h_(t5V|HO>7CETmO&glBn+^J7-6B#kO)QxbbFoxCN$;#6^ zagW}Gm04T6HCzQFAK%d{I>)|bpSBO%X||2&%su~CZW(KNzR3vvv+iN;_a`w|?sD$- zPouB!DDL%pF*>#lclsS!k+r!?oUuR9_BQ1SqMEgt8#!lxq8Dz7{gByy zuaLqAnddc;QJPDc^>qsS?oZ!pPrDO67@IO5tetIbo6~m{P)<)XCl+}5o#*v0*v{eT z=eXfKsq|xQ>^*r->B@?7?RZLIAwByWvj>+mi|}p6fTlCkaGJf9zTVOHLVJcCY6p=1 z-nNI`j$?GP)wTskaLj#fV0OcY%z${s@FFg&7EPw#_Zs>aX_sq&1#YNN&ugpGA6Sw6BM8+GNH5cjL75FAV=b9;eMy zZRr7Poc2A*Ie5N)J%+QPH`3mko|*LTGAi-s5Yo>Y9G~?jDC%(fiNob%(-IXEXW z+w0`$6lQ##9-R@L8J!iK9i79xxf}kp4=ffi_k)`M_y5zRxhHLk?aMS1ke0J4;E4{Y{TgdpeS_1VW`FleC-&Ukys{a}9#*|+D)#;78FdCW8 za=MJZsnOAu(U@p#G%mV28c%Q4wb6t$Y+5GW^nVjxLQWv;)e`fsM`VmJ9knDCjPa8D zgUb7}ZYx^NJblisM+^TI<8O|ztC=m>l@~j%`gvhW@F3<`jY!| zchQ4h&9qd>zSIBu#Tj}p`AhbsD)}M*Wcd>bC%@#bRlf!4Z~E&h_p0AzO)LE`@&EkS zsxVPGjPB>Jf3FHx_)|Cy`FEdHVWq76`M(_fU;S2vkN*-{VoHpE@?8}t`Ik5v>!1Bs z#glnR|N38!_wRnuxHcS}#&hXRv!b!->@=IsN?^Q&u@gcN#-;PMtn`!P!~ASLS{hqR zwlf_mm;cmeNNaH}?ZuhWdJs?Mjs5FqQ1qGuA=IFfaKmNC=m-%dJn^oftwAyJ23@T(i z;wApFym|Q#zq=Xv?~K&ub=@+P(J|&GdvSIxexbhzyQh0f|6+a2Kl`z7$(N?-R*x+2 z6H=S)7}nnHx9Z4YgW1;2WlrgnobhDlMvZl&{`cMvg_iq>w>cD{IY9A*>!H%A(atWr7_H3wn5CDw&MUi^ zub27pow>|)TG@4K*~J`=?AMdauHj|ZNoCiGW!JE>YiQZUe9dex%yr6Kd`B#E4Jo_$ zu3YAKOxZQK>>5;d4J^9`lwC)cT}PE&N0wdv%dR8Ju6|_~-#W{V%lqt^>#(xx(6Z~0 zvg_co>!7lW`C3_AzSowy`jlOKpDpv-uk6~l?BYvp*{}PQUA@Y#y;E1YX89K=S$1DX8 zlwG`Ik@?+McHLWc-BWf=E4%J4yQY?1ca>dJ%C0-huE}NB9c9<;W!G(G*R5sOEoIlF zvg_uuYhu}TQ`vQ6*>ywNb$!`&UD-9E?7Ft>x~A+JUv^zxcJVfFW}&Oft}$iTm1P(A z09m*z%C5^Z*S7y_Z#j+T8SO9cg#Gy~u=EYEa_us5zpr)jvNTFb2L& zyeyu^yRHvc+{)XZeJdIjJ}h))mQd@0W3Jc4Yz%pBcy)M2*ov{?`N8$UnBbIPy>x81 z4`aH{O?T(eFLOP2);;YuyqR%-cI~2OhF$r#=y0i{VaL>!&Nv8|!H^6HDwx4Ale3Vy zGBOWz0Gy*qd6n=?zI6Ynt;O)LKz zVUG24(vW;-GMeMxVEdYX9iJh7Ub@e@Y-jmd_zd#22FU>b5!+#YCjKDvA!rZ9pLQV` z$Wz#+R;a4T=TUc%UmQbAOP0 z?k~Z1P5mggwfo@+kmej3|Zl-a-JiCM!BOhXP} zZ%zHtY^(fHgq-V-#D5_7SuPps`{OgiAAx)DKkR|)T4g7{Pa3ON8mp&-^n0hV_Dc7# zC)+`O4}50+!ya}{kGo^KhwkYfcEBh0-SC<1w@>%BUAnhz(>-W8=#qxqF!lL!-mQ(# zT)!6E*L*9&4Dl^-&+;wOkd$>+@|Scp|DWZa>Es)cnhkv=`}*7`Y(Mon0IUk`_wdvK+*?A1OvE9#+)DiCxSwzja6jXI&HaIUkoz6? zOy=fFZpF;X+K!n<8?UN^?mPC;lvVG^DfbPb=DM$mH_&}0G2EB92fGH`Gu#(!XF6&u zW!tB1QU$XIC5F`5fo`$&#uh`$eIo|w!}`rA1>t|`fKcRTxw z-EG94?QTuu-NAMUvsA^J6Y-zpxDH`Qt~}WECbomzjrh!TH?W;mw!sAURO=?=H`m=l zs9}F^Md-nhv|q`#(oy5t8}*OS)IZMqGPZ-A)agNgZ5I2v|b^8%(sFQjZJMJa1$f0asbBEwR#2t)# zuA@G3{9bH_xxLvAbW$sianwiB*aPn(?Q!&xt^o#NRD;V0t|3#Y`vGdYUb0waAE#2!Jd_BY!;%n=6L=hNd%$7im62=_4iAYnrL0PbV${cMl1_wnl- zdoSD9>^=Alu+!KMvUlS%(@teO%RXm(G>2cFMPI}BapRK#_&+AfaUMd%Osr*j|XwYSsQTWWYN3wm*_9x5`dj#%TwqF{OmdhTUy4GPFxX!%K_}2_G zoj0TI=U!+M{oMa0x9z^^aeJjH+?zekwzNj%=ALW^TiPv-co6<`c{hUg!qPI4%d|}7 z?{4_dvE6YGush;@jZsCh)ULQ^+FjVrvIk23^49^W&Z6(0d?x(^_>$wl6w}Jsu`X8V*hWo@Eb7b*A};>#*I7v^BMxvaPb4aP+x$W70FwZj`2^3+};o zL)QWg(@r+0LTO zNUi(ZlLaM+t1TblKKI^oG_LpTjao&xakytt4kf14 ztz)=5m*aEhtWhJrEg7|0HDdc;@7>J_sU41I+teJ(9>iv1lffKmHmlIfnVyLM5Z1ww zRMUQvj)824@hl)mq|T5IsatbcFGtQ#>ee75b!#S13u2Sw(h@t9=Ladlv&^Y!$$O1F zN=u|UIsJPUt*DgLzuS)MKB=SraPHKf$|`xAkhE~O!14RA{o8fgQ2X%fnf@+ya1i+~ zso4kjaI**7@^zZ~G1;Tkxw$;k$sV0tWwJLMMpuFigEnl%D)r1UAHqr=D8IK(btdPKwg;^Ar`B(k{yj_H zluWH}#SW&i`Oke3JK3FCqb6UW=B`z1)X-PooG+oS>aRaO>#-lloDJ!-e_ea+nTzo& zJ@&{1C^^)GJ3#U1B@I+|>`&gqzJ>Txi6?7F@!mBdSGSWO@P9B5{~N!;FY|T&T|bvMlV4yq{-eB~Je8I0Ze&LOXx6bfpEWK{Va32> zcw@OQGxK}%wb@;KH)iK=z});6e8ue#=F%)d$Ok>W@B>IT1X6DN%dp>8; z_phtl{G0KmCn>G6>Wz#tHI&DhwqvAeZ^oMX^KQX^8*f^*-j$5X$;gwes4(^)R?m4x z-g;vF2I-^wH!CZY*Vl1{k1Fn^XYT!q<<$J^qU+g?h|XiY>GQBJ+a=*)Y!`=zvRxF? zeuniS?I-WaAH*9s0~ifIBU~7579Pl7M>EEKdbl9kG^9Oc#fCok4P@l|Oy1ntJlr3@ zkHY=fei-h{_JgoD+xJ86EyC`F4=e8CuN^Ax=db6(ZWU8_U+3EBoN)UJN**KV=Z4!A zKB%~pl-A3NdWE`*FWD}t_>}G9iqF`7T(N@f{EE-nF05!^yP)C=_Bk=64d-3{UD#d{ z?#zCV3P(koggtN_8D1Q19PUaw`!i0@`(V*V;ZFQ@1S9-sg%?F#LR$0i#fpd7K40+& z+vyb#v3;iE0k*s!L)s2!R=}x|tU(bn;&>%9GS&|_$9)(x22PIF3pc}k2=fO{iaLdx z;yyUKEIKh-H{1mGfzc(rrQIRi7>jO!`<;rV zxOa^OIh$lD3Q@M69uDT}I)t#f};ZOYjR>fle+KF*qt~ZRla=j_MS+R)i z8x{5VbdR|96ke~W!@WbqHG?tQrWLzj*X_ffqb3zQ;J;nOb%+sMu0xFE%5`Y#i0dCC zyIhSJ;pJ+?NN<6-TSQU#Bm0|Iv5@WDiUn+6tN57hoQnBuXIFg0c2>oQY-d({K+VhA zbb4*K34i>r>nL{o_iHE?ek-q^Xe(U!6W@_uwSHo{dZHcszrK26zINhE>AH!sa?a`H zH5327Vq&^pqThSfdWpV&wqD}@E2|~4mdZcP2IwSn5B_)7ajCdFSy3^K^)~*zGR{A( zywQtw8(FdOKdgo`q`Z>jE4nI9wDP~N$C0l1_)C%^@$tt0u-ap@@=A}g z&f^Y$S?N*Nc^vbYu3MCl_+0xa;~mVl)kb0Y4ln2Iq$fB5j8|#^5)~$(Kpez z(Rb1J(GSs&(ND5A<^R#jlr(_pJt%imJ+IE(?I?@1jQs6+&M=JBG?HLFhsu_rwo$6- zR$9<%C3+pLQ9^kGyp0l0L)$9h2D>6jre@ zMpgoc@C{VfWPtE#R93zQW^NnCt5R5#%CHi43UiwbBV~r3eiODvWp(V$a95+U^2nCB zC5>At;dE5iMhDijGK}h^@J6oTsU#(whi<2YbI|RT@D#M05?+YzkRf-s-7~~*#|&0m z=NVAh58XNAaCDc9OVC|2u0Z8Idf^t3_`=JuyJ95|_fTw8bWep@KSov@Pgfn6^X;iv zvE|;1m9+Iz?5QX#o(sIqZs^BIu_I7cOc$)AN74=U29y=o(^c1}pnViO17*c_!HNwJ zRP3ARL5h7JWyL7LeuW;QgmT57-~bB5wSM$c5j^U<@E@JjS-g|*-eqp~UCd*~!3dIP;hkr~j8;-&<@ zqqix+O7wOml=5?j68wQqR>Cppok}Qirzp`==v_+q1v*vXF3|8L=ag_JI!y_eqW3D{ z3iLiDdJdI*g763Q0VR9~eGsr&Bz5FrB@(+mqIj|Eqe>{{N`8azB~E76o|5dmtLjohu##SrziW^&_FfMIu8-;!zW7{h3bhMo!ZK17I80|K;y&`R! z?Vz|z(2k1KPrI(-u0<&ag49>rSz(;s$hADZp1+Q+uP};l>;_6&hd0Cx^wJsIMUgtq z=t_!{G)taAg_N016n8JWX@=zaW(so*jNLqAHB|Bsn2lg;S0$D-ZkZvr-%4SIgRxs@ zNIpoJ0Omg!yKM&fVYgG53t{Z`3Zpf~c2k%&VdUN=Me>0*NMKGy+BQl1l-gh?6qvtZ z?9Lftk6jdIcNj*!6tPVYg}EQb?w%p%w};|BMWxONyMdHZU9+7GR{X2P`udbz>EvfgB0fZ7{&mV(dZ$Hmoyxj zaV2_~;^ln$W{g1(SG=5GzYM7hM<`y-wSUHq=#h$-uu_MGn?ULPnk^2jKtzu@P6Ea$&*D3A^^m@g;h~A*Y>!CL)afC`4 z6~sOh6}JMtIYY|Dq>P=>TNL*zdaDv!^fo2l0KGj!^7{_Oi~nRL=7jB?8Is;9itmHo zrBwWgPF3Po=-rAx7@d|O$Gk_0H%0HwkYnDLA^Veh0$$4U14E=tCJFpbsnl zRrHYz$;U?(zW{wqsrVUvT&WP7h~1y0ju+9Vl(2+8t%PmSXOws|^jRgYMW0jRUD4_A z0{iKSO8!B-7y7ay&w|XJNpU-&l2)kr1D%l}$CB~^PSQI|iKV>F&gh2DQCM@)Fn1?~ zu~K8_D$b+x6!RAPy5c1N-^kb#eN*ucm2iUC@@>VBK;Ov_yS}ShP@{Q0Ql zhal?evl16`i6I{K;Nq%3@vA^G^Z;>ETrG9<6RP`s4M2BqQ`^h+gf zjeey#vF+DN#W!e%ISlu=itCPkr!cp{*zXm0Ci;US&x`g)B`%;+)`g8g$~QPUrlbQ5 z1xWfA1lJP%O)=Y}zbmc=m9i)t4J#GD4sqCpV6cy)?MR`mb9f4j*&8WyDdNT!>2uFu zG|cFYMvC-9q>F=*cDFB7DDuqaNNdV%s2l_2`AwdeQf@~n6KT2F8*Qw3IZhKr?q8&Q z2wsj!xe??##Rx+rp=D9>Pmm%QCrkv70>qWIb9rWvoIn`L~4Zm#%4(JeBjpmNSzQp`cF@q&@G zh>bw{2jy8VMbf^lV#R*jDRQl2mTQXN9qp$0I#kL8#FFOjN-X={Q87ECJ1NqpJNX;p zN^}=R`mvH4N9|Q*zW-as(!1lrZ=wV7AVf!k9 z#6LXaF0^09aP$Zz7Mu6aI2%1u30_2xQsOnxqccXL17ILr0E05FM+Ymx8R#(?_o2sT zyo3(Pcm+K!<7M=CC8q4U6JRLR!>|m=_Y*TdM^DOFj>`E6w8wH?5xm&*6vcN!PgUX# z(bJTeaxc%=f)_hWx*?W&aHit7M9)#Yl(BOazZE(H&Li$F==qA5awB$uxCMHl5=$CJ zDsfBnV#P~djZ(bi=Ov1l^j->=u^%bFQqI6Xj9#JmC(zM~e-FJ<@$=C!iWeJ>RpNEf zaY|f;UaiD!(eX;$5xquOjhD8&^wh__A^ECQa|p>ka9FtiL24O zVH$Sc7QIJ_YtVZ^%CF?r{R%S;jC(+d<$UFQAZm&}q(m)I$tQ@VZa$*K`=XC3ac}f7 zC2Ea6u0%yt^7To&Y(egOgyco^X(g1jKcgfspwBAFEL3a?$xL)QJdgV&RKlSw5M-Rx zH%Kh{iW15GUR5FqKSN2xPxdvN@RFW6O2XgnH6@X-bCpE)H&002Pl-iKOdI zg+*tLdrL_Y^lc@nLf=u65PerkD$w_oq=3G!M6%Bh;6w64&h;ZDlJw74BC*rQN+kA{ zG6zXJbfJ<+U8qyy-_Uv`S{+@aM6J-p3UgwNTcRYj=u#yqqRW(|8vR5`+Mvsoqz3&| zNlNHv@Hy#`yjTHW;BJIAD4~?cFO?)hzk(kL(**rVNn-S8ScyOFk2gw0*^oXXAtFEJ z8AD)BrjdJSAtFEJzFCOKXSr7snA@4&XUM$x_+f45vD7*f0CGFc9jT_{d$zPfA zEjmUCB#l=oGOq5&Dy9cI4z7kzVZ34_{56U^llW^Db00b(gYxIEQw;UVU$02NxW7S> z@oj&jV%J4)Qly{NPs~_>-mKUG=%ftFw7*43`><}qEi}OGiu7&xI}~|#^pi8bMCCV- z=UqPq?gHw8pQ@OTP{}ip=Sx3L2{uRXQG$KZdzFCl;pO}xkbIH+fIw_RJ1+#Ap%20% zxc5dMg~xFBLm!7HaG#D!Ie=Kw^^_7wem)J<=~&YGtP)&-K9|uNl`{7{`;k0<0Vpql z>Y72_*gRDzVr@@&ID7-+U#Oy!=>+#l~O3*MxZp{YH`J3IDAk*GB)HB7Ls@ zdnFeCAC$zPKPyJ^?ia;&LVr_&vFPuxl4DVJ0^$U;b!H^$GB!cI5+8sPC}n*#RHQFZ z`nKe}Wvnl#P;3)41`-prhsum>`jj5Fd^<&1i|PrkF11>KU@XH59We z+DwW2pv^O6e`_k{BD6(@>|6FL$o{CSf{djEYbi2*60Dse$5|&s_DA^>WLzkqtOzpZ z926Dl^A1WtdlnypwuN>e$Ej7~eyG?*kmGd7kbQTA^?)`i5L<5u;wR@1^6VXK1d=~5 z0p%k_%J*i9Jflb*PML*n0djtn@jz@1@%gBnr!Wt;&UhQ$CgVMH+l&v;?G(AT1lwmU zK)Wek(zSyk&%QzTjD_fqiqw^0r;IvuXT`sW?vk+>m3$SJLJ!ytmc#C_2Ye2DW_*V3 z1$)ES&~&=mfr^| z@x|yMC6=&*m3S0-j1o({W0m+4bchnmK8{o3OVQ(%SoU{<5?_W6RbtuqFeSbmJu#yN zdXnO6(cu{zqbFzlik_mxlCD#g_zLtiC6+XvuK3RA2qiun6}v-xD|)^X|Bi~?u|Xo` zK+c)-OQc-L`GAah267IR^JjM?Y}B_4uar^J$$>lGP;4{peK1HCb$8G2L3o9HCPu8H1~ zA>~l)1?Fz_wu}$a+ZA&UdPl}b=;Vy`&^t4}N2g?LfZmnyBRW-yk3;1c5R3h$De>{B z90Ovp<-JNQ3KS1L-d)9pV4QPco_Pe5=;J1hvzxR6VVrxSjxhS8O_m`GG0et zR^s94D;aW*QntW|U1wy>M`tQV>^dvsV^qoz#3!S3lz2S)nj-TZ(sDl+eIMp2@|+h) zI>2m%zM)9}ZXo3eWPWELWeB9NH+Wky-B3v@NS|)-u41JAy{AZjSb97;W*7KCiKQM$ z`GfcrRLUI0QYYj*AwCtA^g%oUU7*CLp$nB*>PlUPq`yAn7j%(gw?-E$@%88uMdm}K z*O;a3{|4MrKEO)eNtpn7j!LgR%kh^y`c#Q;L_brk*!*)Pz6o8S#HXWQDDgzJLGfaX zFO~G(@hjZm#Wr6naxWQtqj<5?w~E|X2Hz?18R+*)d^7rk5+8{EsKg`CpOpAa^k*fW zgo=%X#_(D>g}%CUaW5WD`N#Al%^m6$drq;3e3|4p`6!nKX5GD_S7vf(OT#sN+iec zuSBG`>PRJ$V{%QcqRd8;Hu6}ADp2etM3v}MN>oChQ)23T)pU4~>&`VObzI;rLQ_Q@ zucF>2*Q3-~A=v_*4R7MU8KrJjVZWp+`ks>9iM|h%!(d zxeNUbR^ood7^-TD!6rqk$T&~YDW*H>71sd`6hodAL&bGO$s57!gpxOcjI|W8i(try zVywt`NHI|iwl88YLB=kM1Qo&s>nRT;%18UxfVB9WZqwK3q|@zi(M5f_S;gCzS81WiWOUKtw_IV zaT~>oUAI-F54E_RV#Q`sZb14Si`^6}_LK4f(g#_Tat~H)DP&(tN149eu_N}m1BUH{FMEIJsp*OftS3M_+Uq(QjWokZKS?|y%?1` z2GUnpJXEox(8CljHtVa%oTlR8iWmF!Q)JFj@d(AsIrUfU)##Cmm-9PHvE$LB6))#H zK(W`L0~IglJxH1ZY(yv(@qS#63af+8?90MadhGKF;D9ip5%*!cCxDNwKe|xytS*|o#HaZK97E)_$$!mik1BQRFSct;%ADLy!~8}v7q7##Y#SZp~$#Uu|ctt=U*x^ zMpXPtv6BB^D>7bG{6?{IuHPy$K2ZElv2xzuD>7D4{6Vo|gC7+cHz@w3$Q;1p&x(vC z6n|0TM(D3fyaW215;sPFSK{vIA4=Q=U8%%78mWe+L^~nr<7vJyXzo}vUJ(NmT93G_6@w?j`?;-}Cv6zMN3ovFl6qh~47&r}lILHsN_ z0xl-~Vw+KLr7>m`bPQaJ{}6Nn+>F2MXA<0rznseyxR3Mw9lakO#=i$Dzj02|w_TER zc#^oCQAr;d@~I^I0<$&xjFKFPN}3>k4$Vjoz&%|_jz+0hLed|70jM9+Ut6NwmtMs! zcA5dSapnS)wyi|FX2foD6hoZSYf5r3N_`NLL(zFk(ieRl-ejNHsPvW+b3Uba;XV8> zMc;>y*#8prVTHLrD%Yrkc~PuB30_ZFDtQoxEe;)oUo>)o3#%9EUas>T)<9ZK1@ZtGcBU zbNuR7uohwNLf3|MaNm!%h9Yk2T6IYYA4Ew+bq)U5tGbO6UW>L>!t2p?N{r2_Yn71l zRoz|*u~l^kCC0whlu;pi6YT;U;g&RP44ZPShtSPn3*3*PU6q*jp?XW$3jbHpt(BPk zsoq8jPeQj5nM!P97`Ch$)64T~XcUNM{MfHwKcpbWv5)DFkR>E`9 zU6k-VbXVv>+D=7xQ^J$c-Ib8Gs(KG4B=4*DguMuV1KLxGzd`p_;;+zNun+nEE!ta2 z)$aiqPNhYN+fwG$A+j0I$Vh)Pb7V(;4k}>bf1oUJydLW7H;yR zn)*R`l{vpA(C{R4;SDjebpB#(ShhiFcSZR(Tm{{+=rpm^XkiS zAB|oCqj4XBj!~j5(5sY4@_eikNj{8IBC+4qN-TCAuSAmn*8q7K?SNjdM3Qe}_Z#t- zyqcsW9no8quqk@060VNkri5#uw<}?S-l2re(aB2K7`;;oBXo)qwnXnz!Wf;ZglnRA zD^AijO$j}Ej}lg(_riUYnKjS{l(2w4sDzTg55dEPq3x)CLFs2}Rv0OO`s2(MKLc}@LU^^k=oNBO_5G_KRDM@>jv#B{;up{hlz0(Jof6`m(2HOsZb|1QN^}!C zMu|Q_uTr9)QSwHJR-ogQ=oggy6Qb`?@=1t(L#aD8*WnhMNct$R(eLQ}@BsPS9eqeC z?0`z1K|#{?h*Ic=QpReY!hcKjbtU=?eN%}fzus1&A5qFq%|gQOiY|erxOYb7e4wx) zD)FJv#h5lu30I<_5(Q|ac*3=5szerDUEycbW(_4IEp66TBFbVL%8wBKfl?NPNRG3) z625@;P{Man@=FLgzcxp~(YRkjv1J?VSRgOkT&on$M<*zS^SBk~ctT-glynP)O;OH8 zC~SgK_k_YmXsuEpEp6LF2mH4|NuyApT(%vd6gERIRtj67mnns==v<|+JvvV*Y-dcn zwo1Z&+i_0q2C>h<=%YZmLQfQ%3I%?vodmby#$L5km5_2+`=C;2j#5s90_RZ6euP2` z^f9G??Q1C~Lh4U>sg*J#{8F_=H4&=Dnm`-t%Cq2z~9z=rLQPzvj! zM=OO+D0UMH>#!sb=Oz>=n;keep+MQ}&_^k>LyuAl;#GjzwE75ZF z2POIf{ZWa;?#tN;Ez@k!N(s+2X7bHSvY#<`5=MykG3G9gC&bH) zsWVEUJxZOdtHk44v=Lw%^BGDy1a}>}r{X4|w9|sS6Qvvr?rwB%#XW%bfdEXD5UnJ-H zAUuToNc3SPqAb-tqC|2|kHTYwISzeXiH4$20QEFF5v7jSQ75C5(HTH~MyI3mm58>k z?qemo5M2NZ2{RI{Q=&`IdRT=2W$0p9ihDFF`3;fS?OULHCm*mpNu(qkr#^-Re~wd6 z8K`fHn`6|k4wTP?{nfXGR=C+$eGRn1y&P?)B$S!@P6}(cn)=SL1!b6XtjE6fTjD1D z^;^NVxXI&s@~^%R?#bu@a4_!a=poP-_sb~u6`~gCi%LlT)xQa12g-Ln=Po2GP;9Vh zIB}Pt)Q3f{@^0O!D7IbnK5kLYe-Ytj+(_&s`TiV45tHQHV&Ci)cZonj)s|GVbpo<30Wx@Xb1lIIDm#;Ei?r-t$`H%e% z{+HnN;HF?@xM7p+tC-PIwNBNJRl8N~Rn@Dich!Mahg2P1bz{|}s;8=ERlQmDUeza6 zpH_WewX%3aab>AcYFb*eR8{Iw+OV`mY5UU7rM*fAlnyE#QaY@3cxhnigwolii%VCQ zt}WeMy0dg&>EY5#rMF7&l|CwcS^BQ@M|EX&hwA;S52!x8dRX=7>Km)?tA3>V$?E5- z7gR5={;I~-_?oCDu4z`&vZhT<`tZNs*$+IDE$ zx$TIy7q`8v?Ywqv+YM?ruia<0x7JRreW>=a+GlE?uYI+4cI~{{Wwl?jEOpd=oAyVv zKf3*-4$V5W?9ivfQ5{Bg__D(<>o)7uwR6Z11-&2K0)Bn@jcG;e6xP}{I`!>$d(8ZKiw$a zRi9P;R%8sVM89-twNk56b*W>iOQ~zATd7B>SGgSYD-9|QEsZFRDvc>kC`~F&DcxUs zwDd~p-O~G|k4xW_eyFC8ta`(=91N`06`+!aybb9r5uc)9DJXZgEjuK9Jsnhb$itHsymEwaBSUq zb))LW)?Hh7d)`E}n?4&wSo^{dyntglJSLFf9e_1o1SP=82T4&JP%AS^m_(TqhO zG}wl?p;1~6sv6o;4tg}4NI4kQFotq)6XoFThJ{%F5RMjou#|gAH8(P`b(DftiOHnf<@I!cV9Gm(G`m>Uv$~hy_V2!Ety_- z_mcVb?UuG!vj36^5^KrBa?M{d$Czd8kG&kejO)W+|9yJTr!$v6vXnM>=?9-SliwE2 zUi!q+#}^*FHH1n*M9KQJ7>;+?Y$$9eB_AU z{T}bozIwd$rL-9V4eg$(4QjcQsNDe`;0}G+y6>+0_PTGa`{ugy)_rx|SJr*LJ=fFr zgW3;ne@wfF+AghKf8EOVuJ*U~ruK79OY;%U?^<)uHIHlYOAD@2tp~2PPwT_i+NO2i z)?w?zj9D$@{)jT*0yo{gA2bU7h+4CT_=&7`J(WD)s?a+>BT;z{%>Na76!t9i!LMIo zWTBz(6)QIXQSp+$Gw$YvZl>CSv!-JN-T0pF^b~H~72H-~Vphiv6W^ zF%6&n)%Z`juqwts`x3u<8kegSl6EQdW|zXnBB~N=5TX~xx(CHUbcyCYS*&u?0T%De5yUC z{7s1&cD8-fzHh%`-TbRqIsemeS@gueS*ia&e+l~h@Jd$Qe=m9?93K46J;@cpAK{qr z_voqci}1&AX1Fw55j_|F5WW+&i)y3C{3+qD;Y-mgtl$1qI4hbI-5A{xo^C?!j0z?& zo_nNi%^s$oIl}ZeN1EAYj^TSS72n(S`3A+V+&ArLyN7Sto$OS5H{Tw($A0LS`vcr+ z_BZO#+OE>I;QJUG@b!W{+@7w7yMeDd-{i&zo4QZjZ|--#fAF*WDSX@g;!pFZb2UB5 z&vc#qD}2SFnQ3HyH;r9$v!~n8?B%+co^B(vx7*nCaobdU>$WgQxxLKMuBRE`_BI1u zFEhyPW5&2s%vJ7GGuEAE#<|nYZSGQYm%GHwb=R7CZlZbJ-E7`)x0pBGxQgH1y|&Ul zWLI-f#+~ijZl+zw&9bfC?246chOKcg*bUqlc0<=-ySOjy7VcBKn_FpjXZhkiytSwJ zb?te6TYJ9W&R*cRx1;?Y_Da8}eAC)q<$Ky2eLs7XKO%PiaC@&GZtwGF+WY-k_5pvk zebAp{AM)qgIsQrek>?v=_G`b$e&ZM0Z~YQ?ylZE6srcH}#-5e4dYj$d`sPrxf5mrh zD^|KXqv9vlvbr=h=E@*(7n%3XOxw~u;a-f_izByu zTbT{aK5jkR#!mLT`?@$`MUb8CtE~BPy4%e*a#QWOep7q7-@-2TGu%4yYVqo>ecXkw zacmi{8Lt(u9qb?U2@VVnaCf=kaT|Ab@M63McaRtGJaK(+hP#h%n!n;+j@NhfZjq~V zpS$Jm2jAXziZ_fm@?GPN{g!@fz6!F7-_`FHw}?B%E%{!^A^y~O=Xe+Yuz!N*jQPGk zUMH@K+r}N^4so++X}nFapF1bsjc=Ab=N9-);&yR+x1-xR-Xh+_ALdSstK)6s?cy4D zu^SaNj`xe2MlIw0<9*}aT+@o-li^dzi{aGdg=AJTldsgioVdg%L1Lq4qL-3alP8m> zlBbi|$&BQci5DJ9P&&nAtM#^Jq5#OmQKl9oxUWUXZFWSyjSG>fmG@trQd;dV?ikZ;cnNsf)? zB}0>8@gMO@yInjzzB`^4-xI!K8%Iw@FVn64vN_A#Z$7k3{kOckTw{9pPwCe_)BbGk z^Pig!Y%8;kUDs~!SJ(mm3wxGtuy@)e{!9BF_vK%kkL)tO;<9hhKR7G6HrOoKJLnbc z6Z8)H1xN7gc1AoUxIDfqo*G=?TL+_qD}!;t)xr4SnqZ25DVQ4E9h@CZ@KwQe!L<1H z`2P5Tpb%69ad21sV0>SETYPW)Q2eml)IDNPh#yIe+1GXo#>RKVljA$hNON(pNqj)C zzMWu}n@??%pncFGsE!Y|TgHd;)@2vo2HiF8!@Hgr1x5g*e$L9t=`2G0; z&OPqY;LG4EcV2v9e1U%}SP~x*UljKb8sa14k?~RS#qrVcsCYnpNjxyVG#(UR77vau z4_f%zVB_G|_?Y;L_*maLK0CfO{+;g%^-A_hdMEoPdnG;Xw!z%^oZt(4hF_ois#oKK z;`4&e!FR!G!R&ZQJleJh-UylnZ`w6^qvmejfVn65C|ED}KG-1mF+MK7GCn>Y6Q2-Y z6%UQa+INCQ!8*ZDK~=DfZ}NN+l-yOphQZJAuy|a2Vti73bMdHHTqGJ)?VjZ3aiZb)uSZb~L5Tks90Et9R1t&=;G zDZIDP+^uE0^WMRZykoEv?-TTLo0tQ5PvAheojJ&DZw_|d%pqlwGsRtLX1NJwhP%eD;hwh5+%vYhd)D$L zSX*?j*^--UtKB?X>lWMgZi(&SmfDTo*LD;4jos9JYd3S>+5LT`?c*ES1AJq9pl@PN z^BdaJeHS~@cefY&9qlN;lfA_6Y{&Ut_G-V69q)VFYy7_UTECyY*&k^q`J?PD{%Cuv zA7G#I7u%=(DEo}R#6IgUwa@v>>`ecNo#h|3ulc9!T>rG4=O4GP`)BMM{#pB$f8M_B zU$6`O9Q&PLYQOi(><|7EH^vY2&-vHfnr>*ky}dr}X4Z5~Obcd^wsfIsbD}%Moa7EQ!P3afh2zT|aZ0JHoun zEXMcTWP6ZrY7h3S*+cy5_E5iuJQ?siuidmH;;uhSot?UfyroxFqGhrJnIlQ+lcF?w-J_#&JWJyAJH zxnFredEA@n9pW8koND~hIL&>)o8=wq%{Hzwt~Rc5AHtpD=NQ)-*QsBrU#s8X?x^!*7}3B zy*1HFTNPHN6}J*rIc^!+A2$#kh}(%~;^v}5ajVg6-0d_McRAJLo}>(J@X1;GxO?Gl zpBmf&RA*&zZ_pHL4{vL;)EsPXVtVEfv)HuE;ihX2)qGPo9ka|VH*K@TjG0B|Ak#Dr zQ#FT~8t!5r<(==H3+--i>n`g~>wWWEb(wm)`ET<-=D)1#t(&b|aC`gZ)|J*(*45TE z*0t7k)(zHQtsAYItV`Vw-K_hO`!R0W_!;+Wtiz2O>+KplYfrKa{Yy(R*VxC|OYIZv zyV<+iL+lauFngr@mD|JZq4p;7H`X3!nf9jE=eVuoN8H-+FY71m zVr_-IEADo^Qr{W3Q_)RawYVYcXWWgHv{Jais2VpK?TmYlCgT>Sskob|!D_@!P4jV! zQ#)>W>cZ_$y|@`_5$?Pu&~?dR<0?HBCT_KWsQ_RID^ z>{skp?KSpm_6MZmSss+d}&7ON%Dtp=%s)iSkQ9ik3ZhpEHW5zw|qs-x7=@(!EL)h*O9kn+Yt z0~@D~S0|`jLuS}k-46QL_Uc4+2Q{uHRB_8qTCGsdRx8zvTBVFrtJNLVoz$JxUDRFG z-IVd_B&AX56F1=~8=(J)+W~!Lg4Bklz<#)=x|h1Qx{o?l-B;Nf8dH<LKc(>S5|^^>DRbouf9WjcSuRS8Y~X)K+z#I$v#57hqO) zsGaIUwF{DckJ_tjqaFd>aj~+kxvHuB+y{4-dbWCwdak^!=K}Rl>V@h>>czM#?o##7>SgL*)XQ;m+!gAT>Q(C1 z&=anO1a-Z7L!j9ye^75xo>y;GZ&PnqUJ!a9ZkxMPy-U4YS*PBk-izDk?pGgBA5{OQ zK7_mI9#&VWkEoBTkExHVPpD6-f5)wfPphk;=RT`Gr#`Q~puVWS1fBO~^&jdh>Z|H& z(0<>*J$7$F8+u25SA7q++I^sYsD7k=tbU??s(z+^u706@DfpJMz2scqs{c|Zs{dC1 zqke~5@P1H#RM)CMK_1)zx8nVxu2=iiyt)Ad_MxVN-^Vo_8mS4pzpXh~6B5uHJ?N)J zS~2d@E7b;RgS9fP9QQd6h0Z!$8=-9i{dJT!T1hI+%B|2UH&tF%UWE3z88q50v@zP2 zxc6}@Z5;07o1ks2ZKG|gZKwS~+aB8P4q9AG;HJJ5WWoxqQp*TULEBN=N!wZ51-JO^ zrcKgnv|32(SuLme+GJ?PQ?xxG-=?IVytlTGHWeCe8X9edHci_fzKQ9uO3%;^(hk;U zYO`=B;Gs&Tb{N*GUzCrv!?k*Cj+Eq@w7FWd)}mx0GqPRl&^on+S{Lr7>`|(qmA$I` zLwN=I@)6o1ZLzjQI})CcqqSpT^*&BpsvWPLAT^hhp}|0R&`#4%*OqC^wKKFcAxTuj zLvoI?Bdp`+Y3FO=UZx9`op6id#kfZi+P$)~cA4@OH0=kq%fXed(5}?3f=A^V*wU}V zEtWUne#0B3oOuhhqTAqY-Ua&0oyw=$UE=0N?OxoPc|X2imWB5BAZ|o_NLvZ*W|j5` zw75sL$Dl_&0Zr=fkbj=ip2mHJ&p<;Iw?RFxy#Q}@yY`Z{Mtd2O(Lc0Tv{$v)loi_R z8t#cwa@t$k+uA$YySU%*edV8uFElZwL&<9&LE=%gPjKtuXUb$%g=Y4J_9Zm5ueEP* z58}7*0EqjgzSF+fe$akYc89F6R#~WYLT>m8whP?r$##qlx}vMPrt9z=neZ9dx}&@B zA;sW5DAr5hNgAXN*2|PB@F)#|PidGwTpyutqK|}kX|%G3zNx;MzPa+VzJ)#pcSDZV zx6;SyGR{aEhV`zed{6Z8||GdWp5ML!icWS#~; z+A?@h&VUc)ESy1@1}*OxNn{(>$m8);x5kH^*eCW>RtNXxS8`_{XXc? z59kjnx8tVHhm`&Gm5?4+>5u4-DhKF~>5nVZ^(XWv^}pjT&!_d(`ZM~o`g6G1^9B7y z{Uv>k{xW3$SM*nv1EGt*4lUqKWxetV*1)&n0en|~5BGq6pns^GrhlZ&&_C8c!Oftb z!4md`{-ypE?g{-y|EK<~&_r=-=y&?}`Va6LuEiaqKkMuCUzCHC;~{tT;a1TN%5m^V zE;STGH8evv48t@m);2vO2F^Zxrl>}Dx=!iQJHD%WbAD0V(e<{W=t|_ zj9O)uQD8iyE% z8iyIPg_f;+2Hm>>UvK%wXfoy+%|?sSYRog{8*Rn{quuB*I*o-!m(g92XN6vF90h&- z7~@#uIQZ3$H%>54G)^*3Hclbg);Qf*W-K?(kQ%#?YtIe!_6v=RpsinGTx$Fop4q<` zmm4dLD~u~i3KsIOalLVa)a-9EZZ>W)ZdJZCZZmE-?lA5&?lSH+?lJB)?lbN;9xxs> z{$@O6tTY}rRtXu|c#QS!zZ*{(PebE=M(Ete^U%3pga*6Dcp19)E5@tPzh5`rFy4d~ z`?m3p@viZn@xJka@uBgN@v-p<^w!U$CjO<=#lJEBDYS0V$e|PeVEkyTg?{_9vCjC# zSPwgH9+q4M5~&7R)qr$rK~{B?OCi5{QerKJliWXLA>pDQnDHv(C(# zx&JTs+;u`5=z_M;gPr3xw3$CCCo9*&k}fobCFYUlQPKu~EVPHE&>v2Ko%^JIcj7HG zmz!rOYhjx|ODR`|C_~M&m0`+oWrTT-vWYTM8KsPd?fyJ@bKakRb$8z7<_hx)^Gfq7 z^J?=N^IG#d^LnZC+z2ng%~IzP+Rq)V;oL(yj*gDZYrctt5N|10nQz0&^^W#7Rp&liSn)TFXcYvJmos2-YSOPRto#aU|2xP zts$_246}yAHn@p$wNk5`pgannNU=3iIbB(1jj~27%dJhV&8*F#?~Q>5dztbVYfEda z@<(ecSkBH>E>bR5E>Unhr8Q2u-x?2%aBHa(ZpT{T4pJjbLc<~bFvFV8PEyy|71ozY zp$?dX7C0H!m?^kv78V)kLHkHeXg_Nj^vMIHMmYmI<-t;`JcRYi!=+ByU^QAz)?BOE zYJt`?4|-FZ)SNn?J1vyjQxEG;i>)Qrk=9Yx(bh56vAD%}sdYSbsuQ7Aoh zpz;ft+HtV`jExlC%7E37N}X_wcB`sK~gJ#Q6S zw{-_J)4Qy@t$VC{t^2I|tp}9vtp}~YSq~{cC_lnV`!FoFk4T&Clxfr|D5%_^@8;xyj1Tgb=FJDnbsP37T!?aRNk^)R^C?L!G^PcDDNuoTd!EJ zTCZ8JTW?rzT5nlzE4uZL^{(}v^}h9i^`Z5V^|AGd_37`qRT3KUI_no}z13&sH+loA zYkPLgF0za561&tMWDmB>;Bg-UEqEC8;Stb>N7|$8(e|eHX7=Xx7WNo>OM9%nl|9ZL zZ%?qdwzsjjwYRhXU~g|vw0E%Mb^l$!P_fv#;YvzOav*k{^j*=O75K&w8_KHt8;{*!&7eUW{!eTjXk{b&0! z`!Dw8_6qw7`%3#N`)d0d`&#=t`+EBZ`>*zm_D%N9_AU0U_HFj<_8s<}_FeYf_C5B! z_I>vK_5=2V_TTJ>?3MPz_A2`k`%(Kb=xiiEW>I`#+J0qM; zoRQ8bXSB1avzfEGvxPIp+0q&7Y~_q|#yb<7t(|S0ZJq6$KRDYv6P+ELxRY>_PRdC; z6;7p-ajKkZXGdozXJ=;@XIE!8XOdIn)H-!e*2y`(GdaBDcrRyfN8E0_ud|;s&Dq~M zz?tqG=*)2B4ad$b=MZ_v@oeXCdB1Uk)95rgbDd_V#c6fsIrE)1XMxl1bU2;PLZ{2= zc6yv%=Llz!vzTvBKH3pCA|K~0b&hvVa87hia!z(maZYvq=$z)9?ksbbJ7+j&I%hd& zJLfp(I_Ej(I~O>AaxQc(!i~U}IF~wqb}n=N;#}^maISE!bgpu)cCK-*b*^)+cW%H< z#5X!O;Wpx1oLimSoZFo{oI9PnoV%TSoO_-7oco;zoClr1IS)B2orj%OxRv-(=P~DT z=LzRY=kLx_&eP6n=Nac&=Q-zj=LP3Q=Ot&2^Rn{~=N0Ew=QZbb=MCpg=Pl=L=N;!= z=RN0r=L6?M=OgE1=M(2s=QHPX=L_dc=PT!H=Nspr&bQ9LoPRt2alUiDcYbhwbk@R) z`Lnam`Ndi9^f`HFgR3ZCz-Ot!=V`d6Yq_@Tz>DX(F<3~8VI3_MUN^VQEq8~&=Qhk8 z?v4u%+abH}?A;3wM#p0w@Y7v0{S2v1wwO}I%n<)-1~ ztb~`WN_hUYz5k8W+@Q<{>H!{zi5AVhTcrQBOxmYN@6Fu-o907mB zV)zt}g#X}Z_)Cs;kApAac=rVNM0j3KhOgyR_*qVak7b#=9A1?(;Q=`t-j8$P;W*#D z06vQg;jg&Zy~MrL{WH81e}Ok*g?k13E?2=rB|T5>^}?e?el&QfZWVqj_^aTla_@HU zaqo5SbMJQ_a36I4=04=EbRUM#?Gg7;_c8Zz_X+n&_wVjg?$hpS_ZjzD_c`}@_XYPw z_a%3Y`?C8F_Z9b5_cix*_YL<=_bvBr_Z|0L_dWN0_XF5FVeNE3aX)oGb3b>#fW7xC z_iOhXSbx8TefQt6^1{YT7GBttVPSTEao4+j!t3Y3tjIn{6COhwenuA_$C&UTdL>?| zH^>|8m3igf5N~LIZz8;so5A0>1-y=1dSl^_9OsSqCU{$W+j!f0+j)QRw)ZA_J9u#~ z;U&G4m-Z^W%82J>XKxp8S8q3Ol2_x^dUamb%Xz*x+1uTl;_cz>>Fp)_R}pX40p4`) zKv>@o@(zZjeHLu(hkA#3vpr!)pW`)njb4*C*K77#yjFNO=fk(Tz-#w9gm=^H^18hq zuh%=mTjVYFmUu^cM|nqk$9Ttj$9YS={@8Qz)R zS>D;+Io`SO9-Z%95c-k|o}|mY72XxzmEKj})!sGUwbGw-gZEeOM(-x?X73j7R_`|N zcJB`FPVX-7ZtouNUhh8de)jSTPwm6-@IC@>@MF>!{3JZUPr)y|8Xn+hz305=y%)R} zy_dW--pk%UyjQ$ez1O_gy*Io!y|=u#y?4BKz4yHLy$`$(y^p+)y-&PPz0bVQy)V2k zy|28ly>GmKdf$5g^8W4p$NSFv-uuD((Oc{N3qzaI7p=9vc!H8XFcH9vczcBsMZODmFT{X>7CD=CLhe zV`5vz#>Tdajf;(sO^9tB+a|VcY`fSWV%x_i#&(FsV~JQYmWrig6|u@#CRP=zj_nxR zDYmn)M8|fEO^VgTYGZY=Y%CY^W0PaM$EL*gi0v8ME4FuRpV-vczOnsc(_;I_4)AKa zTHBlJ8+v=1V)C`4xud;lzFo7RzM-q5-Kv?>)pSIYUBlqs_SSf!E^EtYHb>7~Tt1WW z;F+XwvQj(~If5s0H8LJg8INb0;%hivO%;c0_*utKSx+KY%lT_Le=X;)tu$-eI$P>J z`M0N|y`x)Z^y5^ZAFt*x73{}rIZPG!aoM4SAE%1^xKHu5oWGXy*UJ3yWQxnFmF2|K z3F>${8$79jeii4h;`~*duZruh;`*z&{wl7&DyYBC+N*xSoW^?5oOmLav}u z<^IPRAMqe8@flC1QKa4czU)doQB`AQ8|pzJoSb@=i}R2%CUb12kYg1QKcPlf@6n84coeCHoae;)N;!dTwQz`CLiaV9% z{8`SQWu#;oc|2{?@r*uYVj?yrs68Z>TtGn8nGjY7>YCErRewa2c#-@M)H$U$#G%e2YG|@50u*%%CzO%C)E07QlWF?7It+iLkK@vGC8_%F@CO#q) zEFO|fATR{+L@v2Bw;?UtfQg*d_U@H6qo-caEpArV5Ui&M_KjiH@f-mD)bkq(ohN+8il?^pLUQWrUC>tF+e7%t@&cEmUVwrmJW0zUW+%<@Br&0Ql9&&k3=ak; zP4g_5W`tER!YX*s>NviR<4N#HY~GYT58c#78?pT&qY@qeJd(wG|Zc_LK+ znVc$klq-mo(Jfu(O^XmtMWr>ZKsZU3)RGBCR0SiZ zBB+WHQy~$XOeB15+F}~Rbe*-oEI79Rz_}Ml8|#duDfQ{UZWA3&I(hNp7+0rkD+ zbV0+`bXroTGbvA(q-;(XNsneRE)%t>*ujPJ8Nb}es+@JOtdEKh#4De> z#8V)NIX#_)M}EqARmy>WZS*U6epT?is^Ixl!3!?q$-F+_hT4cUa;(JWSb5DQ z7+(o$Eo2TNSH@qWuF`B=($U;vGTu+LR*`do|?kCv7XUbCxafd zkewqP2099lA?rdp)`4;?MCVws&XFPw+F=^0Wg4sFKGZRW>zI}TO&~?}r)hnRrz?n7 z(%FpJBnG)?c4vK8&&0X21y|JPPE6p}YU0NdSuO!N%$nQO-aIj3LY-^wj&;jNPglqM zrV{yHzJ;0ZEJR>tv4 zL_{b=S7=(KX~{Dt;-bf>MUySuf&vf^8KeCgL2DynvMoC4X{Cns2%pEqCxs4{ zC2D}r;+f9~_A@L1XHrH-Fl*CfM1Z)TF*}+9N=RpDUZu$v3elFws3LgMd`puq8Baog zUC0xtD>{+VS+h&>N%@yzin`{ucDD#V3FEdTlw>C7bTPSET@i|@$XHzkih)8v$!Sr= zNHC?ae3oPRELY(hU7{-xM2-;8?r4)zzSS+Oh;Y?eT6!CNJ)hYl6TpkyMji=fD0?X3*#BDu=I8j{N-f>>byvU(+FG<)T| z6H_bB(q&SZ)OjBE%6Vw^N*$)R|7`5H%#ehe2n?yD6l3|3ItZRphk))Pag8U>nhcLY zhQ}Ziz&dt$ikLd+*_cO z07Drzk%<<=BGemb4q?8)vXskUK*+EgSe8U@Ll|3@&_a~s^s=c<8n1FWRANi~2i4{R zGgZzH2P+p2YAzhq90s+t-*9k+K&*J9(F;n=$P#F{M2%uFMPnoZgo(ncGia%Kgdk$# z*crxc#&;J5crX{qdB3QDIcpK=gNwom4(*Rei+waXw(I8DN0TGp45ac3y&=>BSyxK? zu_cAli8BQ2J^OHS)z#LLs8+~C;&ZiuA%uN3X)-*38A>97@+oH;o+)!l&|SnzH5gD5 zPSQ{vAj%8Jov{S$#yB#V*hv!b;0ub|M-nfvj+DLkj>P&N_4ovSg=!{u2$+-W7c8ik z-AYxb%>!DX5^4vvYWudD`?oePsMq&xY1Q}bCe=8_`ke3AdQ%rPHAijmWcPr5o@R{C z<_w?Ni_c~azmi7LXY+>7?8YafV8Ukyp&wLLL)HVIR7k{=6~Jdz)2|_GfzK+d@7Fo3 zWs|Cv;)OcJYF{dwwq&0f>CElZq$>m6#3|Bj8H&@ih59@C;;cqOvq8!mvjKa9%{&8<{3?8Tl9HxcG?NIyqdHW*Kub3Pk< ze4g??`;&dPVEF7;_Sur*vwztq#3%gPj4=mOz-$)N!fb46gD2(NWSK?|I}w?-e=QeovnjZacs|>Wd}bFu+mC!^8$MADL~BMm%?*Tk z@Yu8Jvr6x?ht($!Yr?O~m_35g7_DGWgTU&6Eu?>$pNtE(ELc% zCCzThE_>>G?Oq~=A^hyux6%Q?A`VAJksSd(YXUwy1bo&Ad^VN)EG7G-{KDuEOdaN! zZ1DG4;`hl70yDbgF?eS5&X!gOzh>by)X{3QLvVFZ{*APVZQedhtUlYmeU@B(wt@RB z!TM|q_gRwl*>dai<`AFEecDm11`x#5l zTW0M6qD*qX%Jbe;^*waKmiH`N^SzbX-4qvJmEW5_|K$r(84fl-tKtVM0fOcM>IP>%1eIx;iFG?c0GsCk!PCyQc-S5K{2mj$WvhfH>W)i{*>k-Wnv5 z8GGhhA{QP#i#qUdC~MDLTjmwdRyfiTFG@gsH@0`Ri
    =9V73*s{c)xy4)_wIxQS z$~rx64iXn}sS+NeBISD#mm&jBLkCt&BzHTTx>`FLMRgLncp)rLY=D7Ql-Jz^V}roE z2*7yT)ZW%KS2U|w2D-SBMRTB}Hw95L5JZ)T(EKJCEoHt^4hGSK3x#r>Ws#B@G`LWF zkSJ0IgUX{7F>FY`+8F4zEa_~KNG{rbc-9sRM}$ucNc6eNJy%TT>7BDOJ+b)mq=)+}6|& z%$$rEda|OXCKe&)7|gUNR9046s6>WiO$)-f7!^tB3N_2HFiiu6DH$m5--c*{x@Z6n z2lP*7MU!NpcwsMQhuF5-1@U{33}M6qfZ5sAx}YgWTd(9h^_y0DxU@2kgd%-OB#{WB zO`UB`L9ECYPx6A9tgR^m$CVz5`o^X%kC>Q5zUWsnRq56X(r;^Ol`9XnnB-hJ|78aP z4j=+0{pZ!-Urj2nFr$k5O(!>;OI~3j75AG(vB(rE=|6u44V*ls17;5R1ak(_Osh8m zeS{Xi1R2Y)uOhCaj}oKUE##oiMx+wP% z;eOml0Qz$uS+pcO@y28)Ux`S?s~puiG^%)rQN=@yDjuRMc!*KOLyRgOVpQ=Eql$+Z zRXoI~;vq&A4>78Eh*8Bupc4-)kPHdf_(sZQf{c~e zXvpn}$4lxP8?nc_8*^q}Q$vp_jVF*~Y2)=oS$8q4#*htL8_1^|M22>jcq$3`L2SeY zvgWwbp&;I*43c6?aw?v7rN2bPI1okVG_`dsf`SF*OTIR62FSOx^785(XXM7oG7F)X zHDTHzpFAm!u|=qFu5#G+sk_!Mk*6rh56)F3@`m<46BIio;nK7c#5a!_Lv z+uAyM7Q^}7 zTHg%($!Bk;9N-F^@R8vzqLT_TFeNLMGqUxrOxvlMD+IIKAAWbEw z$mxE%; z0|HMb_60B-CtnQ0#CZ~Ij>A@dSzLmQ>d6G{SjUr!ttGcbvV#1jKsKwXw6z60Zpc8# z&a-5ihc!(H!4RgIoaXLe!<(#+om+?}ww@-#9l}&^nhuI3(@ZXDa%&(={YjJk7hxKj zG}(C(CbDAtEy7$s*>Mr(`e}zh!dySugAwNXX`^p4P3Ctz32$jKzaz};U=m4_`5kZ? zo-~=?5w0pDl|{(46N%Cr2T5fG(nkPbi6aaT4h@k(ia}CVjbs^2;%Ed?LdKIY$HHF0 zykY6%TFbEF&wCd`-X4( zg>TcsxBbJn1H!lI;oE`X+l=t-pz!VB@NH)BRzze;otD;TMnFtzSp+X;?TSKVjOJuT z(#{>^k<`g~SUcvB6z;-GiD)S*Ix)DM!o*-eI59XJnHWKqaAE{W3KN4N!Ndr1gcE}S z(TNdcVcQ!AX<~37oERMDi4i0vt&V%oiXDXp&4uM$&PYx>D3~JweggYzP?KEfF=aR$ znKD6&V9NAMA*T#OqEjXSMWzgc`cD}K3Ml~ltCLb|CZrN(<-BBc;?z65vu2)W*`tx#ytG*zg|hDN!P4 zOHwmS3TIp+xwy#4dDKE=U9=0SXwQk7xqO^ME#yf@v6YVINmDZj2SlM<01lpt0!pU# zLkJH=p$+12Vx*?JC{Alatw6w}7F2#umrVMODjn-2cEv+(I z8tq)qluZA1j}loqAGtx9LV4H-DJQ3lK|oHR1XJXBG6B?tei8`!NFMKTWA3mlaej&?;VtEN5E!r7fj%6@H5Mw^+WW(qU4m`Sbu zs--63z)OG%agc=qeELNOgmIe*9lJE6jY?5f;Xw>0L|T%9Kr}--ick2_f^wlmMJYKQ z9RsHRl8#@s9TUHRXmK_nQeIsY>$!e4MU&AY$W5(?HnpNqG`K*tw905xE2B-VjCK!u z%nRMCj1~>!K_n(x4>p{On3BRy=^!DfG&&uzUpf*~nBGWMn5YOu=QH+B2YC`=XLKQe zt<8}D;in|rr5uT?inbgZ!3(*nq8O^8@=BKcYRJGJqUeWBNEA_8*y$T75})`GkwTb} zgelH#NeHn6aWIa8;W?m4dZIz*FtP-E3<1Rvgh~(__#*|1egDxQ_KC~jpu%Vr;^lZ0 zf(f~UJk0#p%~AzgM4&)^ z2t@NReQ>n}dW&Npw5+t>_DcyFxNlJcH-U>!d<-X`M`^VVTa8a#6qZ{A3OL0!!)Q#D zk6;6$ETAOxnag0{K@gN%5R%2K*fAua{pICMaTAQ{Xk9mDXA6W#h+P6~eh{H@a_0yu zG|W?tGPS@*H(gTa9G}YsoqoZck&_HbLIKn2Y9_&GsPf@x%r+&^7xp&)X=_+}_69a_KvUBXL}4le*7j4wV~ zQ^7|AE3#RqnfuKJq)VFNW9q4I14rrveg^js;@IZ^2-*_OG9UqB0im)J+1NaB0tAc= z1sBa1?$P!(9PMmvaOv0qz-F8Hc7x22sKIevu{WO%l29QPX|Z{~t-hzVJtj}S0-=1A z8fRr`q!P3t8r_yo29SvyA;e230Y*?d5pW2d$%IarV976@#ITFp*}0 zN%M!C()1k#oS7n8Pt&Oygb6cg{?JpJkKeJICeH2|Jf&L;PcA?BG7z1N160fadpxz(_(pTp#~!tnuO5bT~pSh}5rkSnnS`IeuY zk3VFUrc)Kb3sFSc=XmlKV*e}Wqum)uN0gT436^FGO4B)M92W@s$?YLG75dHjf^*Jv zQV?*W)HIz81Rgjaoyo;<3!1=brpPo?c$)U-0}q^@_uaGG4*d`M%lU&3){z@gh}VP< zd>2MA&NQF#Nz)l$A!IY0+ecsdK>xWsqIlqk>Q6C#**l7}LX6H!}WAi9AI7J6vA+d7-`0|N3qZU*XJS#0(T*AOAQb_S9owBvQ(}hnZv_qJq7f8^agh<)Ym9N5& z$ZuFvC>L=Pdhc0;jUr8yYi{e@o+U*x366zh z6HenGsuguHpjZO%#faW+a9Dw398ad*F$uy9!g5mMDSM13`=t3SN`fXO;F4(Zl#>Hb zIrwlLeF>Tt~J$PHY+~ ziyUD*<9zY}>v>UBrsm{SZJfRrA~YR_2gPzcojJqCXfBp_Ud4ImRlK621Y50pS{v%c zdIdJ@kaKGG>|%Op7hlS#pI?k2fP1&01LFW*Em!mi>#a%_!2#UVRo^BSy!oZk00ex| zHp~;^mq~J~$CKoC$CKt|l8;>_`8#FFV1eiFl_mM?Ka?t2UXl;~Cix(6GWcL$@Wqbc zJ7atdJIUW@N(LWOVt$z9`JN2g7sx_9|C4-jILXJ#l6*EW$@4zR^FGOZBAHFohk>BU zH?;FBw!Q?f*!U8>Vs}jNid{2uQwI*N68lVMbEHF7R}{8eC$Jwy?&6Z$?|Jh|xFd`| zHAYid6C%jZwN_}o9AKP~|SfFNw-my;D-T25w^%jK~E z0CNCMDE@L+igx}t)J6zgxq>axmZj}@uxCUe6&5qN7Yk&6g|a9 z&Qp9^GR1;qiccY?m`+oC+AzhZ4O4u|FvX_~Q+)a`#it2Ve3~%DrwLR1{jU_UsCX)% zVirSEe7Z2jpG!&cH=$DeX|)u8)G5V!LW=c-6n}&(#bQc|#g-I*r6EQ8CgUmEg@Y%r zt-K2d))YA!S>6o^g(_}g$UD{ zoaOI=W|?}jOf^}imMkA^&ho+LEFWyn@&|&lLA&@ff?57_NtQpqm}M%bSy0XM3 z;p*Xj@L}96AI8n{Vcaas99dTVvP_j(rur-^2JwLH@<+Cke9AS+Vrr7w7f-U-m8AZ~ zll&RKWbj2XR&0`dyfVqupJeejS(kI#AWn$o%O#sgu+k)1LQJw)ljIFqXd-`9u4cB_ zF2Mo%YU(ih+?@Qn7?#4>u({w^SmY*6ZG~-a3BG;T#bwY(ok}{Jg5AbKH=-2+URB`%5CMG64;#HW zfCElR85k1SgrWwrB5B`Z^P4y=By{)LVx<;K55x{aDok8MgX{!h8&+6o!RFQ0P|{Z4 zg-wFu@S5n%(2W|ailv=Owgsh!m;L{Dsc-HVg{eFIyW)RcLK*!{9lapHyG)}-Uf+lD322b;$s3&V6^yEhaHgi&Drg8V+{l*aB@F4Q35Fh z&s!t}7eFLb*Doc!645x+DK18!Qs6C!gz%*v8FJ-cymyKwgR4{mwZ*ct0Vx+t1O*|7 zzt>X}yi5&VrUx(e!An!{(hb2Sg;*$XSRwiVxS<8*2ZR$lmW#A;81A;#Q(1PIj!xDy`rqaVv``tg^6 zU|k&o5ECE3;QU1a8xs+wL4ffYhBv8YmDXEa(Ix4&KO!iXb4)VqjdnloEx6cKw)~ z>cB5(wD688CYmABA|evN^xc?gdn3T!hB9ieya7ejElQFHV!16PV5#U0{5j3CjRC_i z3tnamgv}Q5#k5zAyITr|JYVeckA_8B4i6IX60r;@767={$iXp4wo5D#+>lsMA$ZH+ z;C*&CK9|-52r(X*&M}MSxwRmoWVS3wW`NGnAGHnbIN*nQBylKvA3zFhFMv=n0z(b$ z!wNGdnl%EB<{w%}E$Eu-i6o8|U5@Al_4KhSi5*loBnlJXglPg0cR^m4Kiqi1o}}nX zF>ZYvin|>r;C}21WhZ5_qNro=&mNN*o5_r^Q@TE;+#Z{e&#`t2>iyWv1QClX_Ye|E z>~h3Q7{xGDpV7Ckx>&ny{oS5A$$RbVSbkOP^^3MuCvAK9s-dgy-cFsg-Kt@eVv~lh z+G4wyI%(Xbkzd7DjausES9wbZ#V3_c>K&&}iY*;^qsV^tptD8(&O!OZo%erICQUl7 zW|F#e1MZ4nwNzato=Yb!UA1)5`zw}CS}|$T;p!HLPkMh60+SBM|BANZHAOMLH`mAx zD`Rov`&fOvHX&tMwpxx(Y@-i(Tit%u4aHjz-)g@l+vayXag^3K%3QN?5@C;x>E5_U0;Asy!jRHGo^s;hyvY{PYmM8aKah4F0rs{Knv3 z3vQ1uq+b`5<9(x^qbNbW&N~4-w!Qi?!Sj0=KiNV036vh~?b9zP2_eJ#EVn{=FTRV?Co>5rJP7)Ejfv zSrPc60PZzw4#E5WC5AeL4_2>We9YX8w6*k143 zLV8nk?g-P@1@Hm&%W)o9Kk#S?IuGy^;??qg{`zklHsIb~#oP+F%wjgg6~*!nO1~XK z-wvS%j}vJ>MuJ-0dpkkV)O|2_EN`YO;BVmdV1_qI_$((#zdOSZknlso^ivr=OTrJ8 z=}$&muC&LB@|9a8{IFT8Q9i-TBk4I7O*37VmF(xgqVV$Dr8(!N` zYi6u7!A%7(RaAAXs}1BYW@dfrTK&0S{EhM%>&&0-viJJAYoq*zF}x+ZMRMY^cSR@845x-g<28w)HEY8KvEVmFBv9`Stn1*O?R6y-`?aa9gV2&%2;?w)L!jVldAE z7krZ7PaEG9;BvkSxUS3q6@y=+J)45^l~nndW5;Ch zCSJ#mnXy*${`}ySvXX<3o$Mb|TV8g;m$&BcP!GQSmO(|;#~hMBZ=_l`^~}cNLG#be z-)yc~aBs(~qxUTLj9J$m(Rpjrb>}tiafH93cG13v9*<#^4X_1+t`QyZn$@R8LzNj| zJhYH7TscxOV!)S0;nuSoPL>S%Kh%-1oEJ9SlYdAtlnXJUgV4bkuBYBcNhwNsrm_+r zDI2?`W!vSyr@I#xjkxVEhmJm^ZjYj(@&A{u8#DJf|B~x!cHjM%YkwCOFv*} zTiw(B$XQd>m3ro|qjz8a?xoY_+|;?(ImgWz{qTfS?tF2`*=q5Fx2b!p?w@Oqy}M_x zo_)7j`g;C{)0eNR-)7&Qy}IsNS_ck^ig6qB1)$grh`$`DOAZ@ugC2lBUHzX;F|E3x zx1Kg8Q!=7-i`a|ODl0X7eBq?=j z+J+wlTp&-*1qrvF)}EInHUN&b^_mD=kR+AUtWA%=#hfB|Ymi>l8QhC{3z_doIWs{YKi>nqeDDBNq=aF0w6LNvjeAub#+3Jfx~ z^QX*p+;s#Mt<>K!p9r{-yUrRWQUgAL;G!4S?g*=rhrcQDEEbS(QJb+=Up~4oxvyBu z=IpP%CKs?xuFHGZ5k`zRB}Qh8x~$de<^e|f!JpQ4-54B`b!Y@GL>E~O6s_42xL6pe z{AZPaMBri)Q~G&oRRk_3J;9sRCkPIO27L?R3$?QXoJ8A+5skJdEFTKWVUhBMf+EX@ zf--{3KdfK*ooa8Pd@0ydd)6th6v~Of&kNxB-TRf(vf&Y?jOHlZdTv8K!%=UDe=sdk zFUB;)2iD(cznc}NG_13Qd^QxJI|}Wxg(4^03&r-{0B#;rXfG6`O#`^`W(1y#z`u#W z{RsR`sUQ*lCI@iQ&PYEbT?jcsv=_{G#)ewsP04&qnfXR2^htf%wd-%me>IR$B}-i? zw>_7Z|? zmrHo4B-v*({2mDpl_dK(hF>G$p^{{u!tk3UoP_6V(atk5gn2vqxz1SFUCo^uJ33F*tz_4 zL!>Sh%Zi>6GErrwJf&OK*e%Da;mAKYa?s|n19MJ%Wpe0^XN<|HM!y2{hh45i@KDF* zUpeMPQ-$zs+mJIteiI!&dA+-n1+zQ=aLCq}Chp}Sq`lCi`f1loW%qZony$?IdIn!p8$xinjr#e@h$?OLAs7D9wgYx9Zlu)7qX@$KJE}$+MIiWduG?eKZ0W?4Hs$ zYf}lHFBiiSz^$j$FC{q-ET?tjk?RU?nlZ@lr6YryvNA(Pyk(IbScCzjE!$h3o&_X|CxzT03doMD5zXnHWDK zf0)RFvS7x&x&v#=#ly};ceRa z>$Phyn0#)#c68t3tKXPU%a|M?ON<=C&tYV~L!}d7P9Gqb*eNNOTrp5Cv3Hho!xaPN z5@!*?2g)VRnG7EwmpI2Te1KfyoW$^mT%y8kX1^wlw^yheB&AE^tvXo3g)uLHOCz3o z3E)em9CL+wy-eS*-+$|`oEcJzA-sH%FYDKTVPu>q%VGHp@lia8_-+}`a$3HuA3hLo zkkL6bg7(9Uh=Gm@3SU^tkki zNw_yR|6Rb2^c|(hPw=<%KT9}PNbPAV#}{3G;ol=*)1IDh7FDB6nc8};?}vaHX@e!R zBlzkKcLt?vKSbb9D{BI{9xK418s8AY%cUa2Rq<$j^UVAYZBbm+;Aj2?jB=|svVLZz40Sv>qo#EF=LJk!f<35HDU>HVBGK?n%G7P{4 z;RkRgc(i{bqbUep-^l2zU4So_4CV=SsccWfHva=}{qVa{Ji^NW=B7UZyh8{WOWLMX zebmva`*42kXzhj3`JWzEozd4DCtQF1FUMXFf)iOjkkbnyD^~lt8?F{ZFX3Ow=|%9j zH~ck(qY99afMa?cBBs}de6y&Sp|)%(nbtHSh8$O$IlF;_V>0uR%BZOU41&#Zp!Y2I+BoM4y`W{aEoJDsB|OyZtveY0jf962!n&W~-%B{J2pH-j zIU&B(CQEp@f&wlkL;xo@m>9>6;$IM`WQYC6OF0aR@De#8z6>n~aCl-|AY+(YOIGrw z+8}F|whtOH!GgpH@Cg2jj9|^V zW9K!)Z(n))*!(v`?!BROTy0gldehk4w&lx=t$%s_A77$YlxHpy6rp-(4Wwg@EO-L0 zp`HN#g0?M`Wtm>ouUR7fz%aSe%vH$eLUkS@BvW`}uuDdYfx4}aQ8TK##faUGI<`hD zR<*wJSl?M|AD&)abg1@GzVR6KU+Q0<@s}RCv-&~bf$I9#%r$u}|L~!GtF<4`Uw;Iu zn*p^0#iO}m3AUkjfd2_N(6LH(L&9N*JWvqiKseNn;|LC6K2i==pj&Ar$S;e+VOoAb z8qna6w*@+MbV~~D<&Zy9@M5TAu|GyvNA=}L-43z#MQz%reTS-#f1F?ZW=AO5F49ix zo7kslSN9zv`>@KqL7>(slPG`Lz`~B9JqUgH*8EC%MTN?fQY$Nk%>nyvP>HP{^yA7U zW!mj^=Ps*m*m`blzD)i6(|p%k`$4!JtDUf0McKsVo0jaH-);R1+D$Lz#|532V-?CP z`bVhPWSz=}HuaWv$7skbuYz|<_WqGXBJ4)ObrT2)`ErpDNGJGH>b?Qe^^dF(0X(+5 zkS=8hXGPNkxicBLfQ#88%b7rM)DsF3`DM|3fH!fz-J@^_DK`@wvoF%F*sjW{8$L(< z5VZ8XK(M+A#CX{CKcH3}W}Rp00*c;E(5^R5k?3zWPL=2f3E5j9*B=CS#*}ZS4~P2) zvqD-^Z(zOpRcZO}Ui8pA>LtNgQ1TCmmnoAv!CUb*xe`MUP>IYwA&JM;- z!YxrwP@klgjf6?oH6bDiZ4<<)3a2Y(Hpq0WDsiF-oV=;7DoIq29BP&gaV}iB_N{#9 zCKb8#=+fbXZGnKvn=QQXpaU;GDXyQ3?(bW9)Ga40dElh2*WW_*o8Jh5Qw9Ff-{;hw z!wLM2!~?;f)*j>jmW%d;>7P=u#)RbvekRj{5nQs-a#pKSE|3i38;MU!->mHs;4@l2 zX!SeB=dvgqG@1`+Gz!N`?+eBuY_S3kT_4`2U)y3gj>(N!X8#*%f33p*Hmccl5H@Ke zrh|N`hJ*F(sgo|AzW@0>WA*#{QAL%c zuGQ+b1E@m6VQ+sp0vDW$(mzWZ>6ltz>mfM&Y@afn3j_XTr zTl%^hTeke+dSNKXa}F9NEWR%aaaygB4VtU`OLmyxV!BE=rt9t!-o0Us`4zUo1?itx z9z{M~lw-}buEg3lPVS+|B!#a%1M{wMN=(!un<##>Px|>oci1_z_px)!^v(aeS;?FO zC)6BvY5v;(+EH`f(S~IiWh?KQKD}sA(Yh1F4uRj_aWSIQ!PV+*k&YMOPirr13=S(J zQ5YdgP!g5%tlG5E^z(Sf52Y`Zf8TGUZw<=N?ss#7mSAHy{FTi7QxLkzbMg0)#lfO103Msz4_-*WPMy2aa?T6j5cr~aYF0K#zC8fnUYk}Ilw*|?+5?a6 zEdktoF#^y12Hful$Cg&c=i~q`+9NpzjgO=kG2;vEvS!NooS1kJTthnR2anKY6yC2N zGv&HUw7kZ6~LhxN({9BCI#;C}T>JX)eZ80Y6Ov9ADUntD7@E&Ke&KzT*h4O_y-i_yFIT;cXJm8Xw@>F?^PUv&IK_lHqeC zoHRZc^lO99|H^vwsue5N-}QU> z&2QmR(x=!m#P}w9{SAgJ({;#kDddZUB_l@|bS4=&Ru*kDk4Hvnd024?L(&efI-(>H{mz z&HsG+E%~3%TM^w!AoMk{A#u1AwrRIzVRt1qKnV#ufAh0@4jX#TkGJG69Ieh?a@mZM zl5J01oL~Kxx#pNxPd@pLl!UuTbPy9Jv{(Wz7?Fg-6avd9 zIVxlebm>4*p=1TyAp5sNpQ~ES^XvMK`7iC!SLWjfsxPj;=vNIxMHhnDgvK8eYxhR? zUG}dqy7g-Of2lOG=jvD02KbO72KtTg8)(J_!W5z|Qcso>025EH%sp}f*w`!p5mikM zA6`Co!UVhEj{nUCS^M~&6SjEto@2-S^S@36bXP3z7to|*^3yE?b6D)P zg8T&AdS$rCNN&MfvY&}b zlVLJNm0_TVDPU|+AqP@5hh_XqWfZ^LUm?`d-TP9Vja0CTazJ6Z|RlbP_2sA0p|Y z2#t{o*o^*gEd28#aIx^qaM0qs--#*WCcSwh$H$ke36&1UvfFL4L6qF*0 z(gX!W6zmOqLu0{)JsM+)m{?Jh7^Bf>dNFy^OpkA3@NT}B0V`pbC9y##bfSNl+mTzcAtVx-8xWvkG;H=?? zYfid4?EG`xy3eNAoL29y99KQw(kgCt_tia@l=ZiB*wo;bb8dE0VRt0CM~&|79Mb93 zwmEfqIVVaAX7n$N7@pbPwykYw+SFVjPvrsqP(#l!johhmA~d!jy6di#K`2)r&( ze@@VET6#f24u<2j%ymBURl0*+LDklwiz4 zPh6X6seAE-low|^zTvq~?^gQejNCP8J?+}s_Q)I_HK}iC_K@Nexq0X3Y(Cp1#Hsg~ z=<+q)3zKG@%az@e!@;X_d~{Q);~*y}?a%7bE#K z?~ZeFthu-5uKV3J=N+9^tvTgF{uD&}Nek^x34`9aQ6t*hWSw3x?{d-pRSV{?uj1My z=1ryh=;JU!L7I#U%@(^5pc=RE<0Rd(xDl9KSG;hR9#$ zh8n+Mpbf^9X@iS8h(Gv%U+BG59JoV6dBgWsJ6LB%_HY~DFF(fH(&3?z!ZVV)ksAM) zDbW>*{+8kj*94YHil7`#<2Q7(l)tsVq4O}~w^{b6`e)N3Nf?9ADRGHvedAB+&p-^I z#Z(boJqBe&f7J-wz7zkOi9#2 z{G|rp>bsSgDLT}=0vCfh+djaNvUdS?K>*8slvSP7yV}g9*5F5I_@w2L)PvSC$HAlh zEe#3AV{>2(M$;;2`nKe3Y!1K5oo0;G3=UnkRsp9*J?jU?`R58aJ#k@HhpXPjs-iV!!{C?=ZY4`&nMGkNe~bnDS`JNMRO4^- z3OhA+Xeh04=j%z@R$AiX(Ys%3vh33ot%Vuq?$M`jYqH=?eesuB=B-Fza9pqm4c;(0 zdehc~gsq#R`6}h_h+Z5^|7hK^k0v&ZkPtSDV$1jrMHn&EC2V;y5hYx6ja%0u{3?G; z0jFv}>L*(^wWWUOz9hkO5SF4oHV}kP+yp0IQ-Uk8Z@-Kc!nRiiuV%VjL1wI7?wUR zob+w@Wo5%DQqh0(=>Gl2jP5U1B3jKxWJb>Wi~>2Fief0k>5L?Jwu&jp#xIaPNHH|P zKM;mf1p|0e6Zl8`c?mAf2%87(M`EuJ@qHUCxvmL+@uYsDIQiXA{JJ|2>zmz|l>Mw9 zI*}XC7m2Oo*B6z%5KF#kSgYRwAwW->HtHWC1lWxUwnfUF{W&S(3R8o7L(@jk|N1HE zLiUHPSvN4c%fzl?tBG~lTtk~S+{gOGR1jn5V**Ma&9{dO)(X59}NCw$6S_bOLr zSXs3K4#gjJRO9T=EUVKWUaovp&eD+*}5i(x9YqjZ{Tcr$|Ai$5w~Hl+i{q zALFMqC5rkOuRnEO*k+ZhQ%utEEkfhh>NcQ5D_GhHK*LTWKH30h{=0_HJOnG}1`j^a zqf*#+U}=3iyA)9Fg>a}97X5zsNh zb5v8<6+%`HNo{?ie)r2h=|grE z)Xkc8I%mq6snaeNW6jy_99H6}!j9u&KB;WfUME2($Y-knf~|o`=d{-nJeni{A!Qpw2divQGBB1V2apWRyE#g$JA*9dJ7K4Mm*;TvTUHA!7odvqm6GUOwSwm3C%Jk8#DZmc#c)Pp-DM zNuQXvCc$#BbUsYtl2LOvrn-LT7H>3x%}dUw7s9rDX2*qcU^q_KQ6D` zUpHV|cGz`wE211;dLbE&5m>ngKwER4lpQ>O-2daE!`Y_^f zBMK_|+Sq#?c~F~X<2GV(-ptzZu~i@Jn6M?Ud-?o{nxD#5z_b>*9k_E0Ua7kYLCq(5IgpBAVp?{ z^AWE4-=2w@y4RL1kB&YAKj1M4WLdBNnn_1bC1FA(!Z2xRzAM4M$oNVwXko)~eJ7QL zT03~`tvVmqF3hXRb#!buN8j4X;pKI0LTy}ay{zJs zTRC~VM5lWl=4&iHQ=?Xi`eVn%`qkA_U!Pu9R|J>hoJ)ng=Y@skmF!w$=k5zH7$~Ya zZ=fi&NP8V1)DSLZ16a-@o{c&xn{{Mfx49T*2tEiFyLSP;?;Kdv(_ zvDz+qJuh+FSE~|NO$xU-ruOTb*du$}q#oT%QhVo&4Abpzle=J}OYytQ7eBf(t$jji zbYT0)5Rc3q=hG9H#(DRO?a+H@X?%O=6MIa04<^l1rh`FFDAPS{WP3rgqW%$?`W)kn z8QRi^E6MWbK!Z;Gp4uJuUMS`L02ZIqGJmzR^RrOmYyI1-+t{X~m5XCX+wNVg0`%vf z13TXN+~I9%egn@BjJVP8>a&mpLuG(gSRXJHXoc;kh+fQ~aJp_x<1o19D*3S`IP&@G z6mV?oaz8l)`_kk6z`&8)({YXc(+UF^mm2BHap;ah`!n=o_#10X@uNAr2F8|_ms*+d zW6D;hQqhs$Ui&0={Rfp(%6nN`rk#9n)Jx}*xGZYo=Im}2vnOoM>0a(~@7AJ*cVbSUq<8`LPRV9x%pKXEEWUrzfa6%$BOT4BRWHxY$6*@ClYy z@~g_$H2!`Tc7krjT*tmA`n4JyFGX7Qr=EqbICB16QXp>V(TT-b$@-_G4>V!3s$Yi| zv{MDCpan^D{RTXr!&v7(cuc+N$2_0?&hKoZ<{F}(Vb4?2%F9dxf$e}Z&MD7B&O*J5 zD<7e6^(-P{19{~1Ou|7%)JS1~->8QwlC7^|fqMi522clDpg-F!aC&+k1UpWh>2KNd zWM9k3?pQM5-KNZQ1(*k;L~v)n)brO*a)#+bk?32+O< z*t){A6-q+cc~$G9g^lw}*&5`+tAvIx+ys@A-j?HLxjpLtEYdFEJ6qN)Z}`-*=DD%RTOJ?@E|FdVr|7spv z9+SSKz{aY3_3|Z1{`P)v-@wCf@`bR6aI2hQc(OjQpkmz7j?>LWrt>}gB z+H6!N>8w2tr&u-v+=6hBJDHp@ihlBxqZ;9G3up|j{WLgoOTFXBeNpNK?qY(3 zq!@vlL1GwRh>Z%VKs-G^8p=AbLyWQDpfwlm*w){^gLSx7RHF4~4!L^^0wevp+70y@ zk!JUq{rIh7n4m>ct~PFz+PiE|lBHR-AY@K>^q;~FF;EJL7-S9x;b|rZ4gNiKDBjXV zKhfpj(+3nXs;d6^_9=sk4>72(a!?o#Po5ul+(q2xB6|OSj@sA`qrT4QiK#wynl!&; zT=U_qT`hH}*eFvq-IX8W+KjY>+ee5@TJ_>BTT~pI>DuQS6MLd|{nN)JUtm}!?~Zy5h^dn<F_c_2F=vV~{H0bV#maaHA+6)79I z@WVH8-H^BEAIWvuYS(k}xaHTJ{cT+nD)XkF%pShs{Y8ld1AWaMy^daV+4g;(oK0Ds zD`t<|oY!NH7&&-SpCqrC&?U=07koyAWz{X6_xD?6!Gi`lsZZ3;U3(&lD;V>_$+US} z#%wy3K6@i%0XWKshF-KdV@rny@aw-oyQTBPz<)iHW=Dxf2mal}pzP?x2D5`NQoXT= zH@QiTF%n$z{BsgKSA{ih0iI7YhmCN$g-Y#nNVWowD3b)Yh?eu=*rGF)EVa*Txqlpl zzcTg1Hgwt^8~QY?f@*BzJz=C!Xz=P7Y+y%UWMmrjF^o*9JQ&C6mb{slxc$rJ6=(BL zx9XD>F|N9x=eikb8^&9=6W?+r=bT+Oer;2DZDr-7x2C4=pAwedHFe9m%(WYbR;E~2 zEm;L0Ge?J1w->&}MPJ#S@dSScHi(w7;P3j_cCJYG=?TY}r`4!%J1u+~&&G?ZKQwA> zGINd<@95uf|EXvaS}0hc>3Gf{6Qb2{2C!YiNsB)u4>FlBrS?JXq}JVLS0xtLM7Wfl zEOja=jaxB}+o2y1N9XLk*`euYSL=IouPs~~xflZcGYl5=iM5o%phbVsm6u17q15a7 zspR#9JAh(f1E&|97trX24T}Z)o`4n27vpGIG*@eH%j8%y&ss$P#L6e2+ThGpIwPJa zY~*m9cnP6EA-W~x$%o6AW9=xav1;k)j|{tJj!mA?DNypFo9J}=v4ej5DYa)_4t9&V zKD)GLu*HQm=_Fl&hVxXW6dtmWV-h?)+_1bl zmP}`_!%Cxi?!bq&skYsZFD%(M(8cvtC&ww%S`T}FUdh%0&Jot5!p$9ZT33h8 ze*Lo}-4RhKm_A~~INK?;qL{%Q-?Av<+)A=Hxj)Boo3wmjY;lY@Y3_!|%1q#b;V||{ zHa1Qf;0ny**~2qL=te2@UNPS>`Z-Yfs8#p~3vXK&^EPdn^X$x1(RvWt5so!Z%{@ud z4RD%a!1MwVoElfNi~*(@6!5$zaJnZ@Jwl_&{iXIW>rMKF!iTZLhtv-}hDq(SMX7|^ zKry^Y`~1d>|3p)K;D}N514m464q36U-WpyVi+il>Rp zA^f1zwWo!Q`rT4{z6PI2?X}!`a*y?cjez@%ovvv=xI86pV{iyJ_Z>UdNboPj-Ntdx zu|pGsqkIgCk3hfnMb#z~J^>6LTy@v8EJXS##Y@K41V4sRVvVzqzXdI9+n6umOwO=< zRvPPCBZ7Jti z^Mc>$$iJYn(_@r1&k02$a zCkXW_Q~Q1s zI+l*_KQ*-**)54&;{*)#8=KW}QBoCOGX|rSKEalmQ5uXIZ-jrPfafXT%_h?F^g!W( zproFSc?pxIH2ecUDGeztv46vgn2(K6gIscJ5H*w*dco5as6UAGCDOs)hwJo}evgCL zS-z9n!to1;b-NK^&>LKB%3|s`YcKBrsqZkd-dM`N*<|NV3UZrG5KQTb551HSu%$ zTU(7;moX%#zk@~p+Ay=={;j+Fsx{rZkq2oHUwyzOg_S2omq!Q36%G_vlcgP_yhFT# z`g5O4<281GobAGRt?BJl;BGRlVDmMP_^`QzJ~O1#md@^}K^rYHz18na+{p77ULwC% zeV%OLrbuv(6*Ondf^ezCjg8eJ*3Y;w8qM2Z$UM#WVU7p7h+2y%TBQ%m^&|AYsWsL6 zKTzJKaP9|sN0Id&D8XqON+V0lZHy9ED8HwHK14)4~!W#!h>)7#42b%@!La^ds(#Ml9Xuvu%4sRlGY!4;oT&Y>Ttiu#(8QXiICYgSlC!{vD&uw4vR|4VSCUkmgdBEsHB15VIh$ zUEfYa26t*7TcxjQJ`qrX7bN89Dqt833&2_OW zTgw!zr2>w%MD{Aim$e0KNXkouCXaIeff|CMpw^;~*8OyC4Y}_sWHj{IB54=R7NLZm zfbqFj&f0P|W^d8cxRj*@i8UOf2A(+@WD_uLS;VN4UYt{#iXEdSY>af8RXfMJLu$_v zbK)##)XuZbP6=O-VN)t(JW{7k>p66GoMqzlu;iB)=pDHK#b>r1TpXzn=G9~K2h19y zA3_hmxHe5Ou~GG5Y|!*H{@l0;+nXKE>)rnawoOi+FQuXK|k$--a)MgP!B8bJmzza6$ zZj7gTKI|k8)ofuV&{vW?$w_kHT)^pRhrw~WSSIcGxCI2hFu@7(w_K|W zGeSK`EjdEC%`JI8B-DghcJM*;4kCRch3=2kACixOXx<3ZPs;s~Zpk71V?*;lG}|)u z=`G{ZWE&L*w}hCC0-dXbf*e<&XlUS0c!FODCarB-bDE%jQc6&-egX#cp#(WkUFiQr z1TxeGK!D~}P#P$7@#FjK6KEoE&)j+&K7JC8Ps^bYdbf;qqd)az8OVkWq^U}2dF7@x zQy*l+6Hsd6)oXF#z)?8$2yrwYN*HG}Ut<~bQx2nfRA6?>d7_SD3MQA}Zxh&$297kf zM_!iX=7z8c%6_h?E-KnnA!7aDOpq=2pQ`NNg*w4MysO1Vevch>(I}|QfqB%k$>X! zFXHX?1Kcb%)^?WpLWX|+`2BIc^NR;@b8#4$KX(7f&XdX`^>ZNvG&);|)wH9LKPgQ? zvJ@2gve>GmgjtrPxc#e@@LVVD{Ax+g_WT|_O472o<@cyCANzwpGzCL zbOg;%oO2j7XX7w;$pg#@Y4T=tB^Z?H1_ssK$ON=eZ2UmDA*=wsrM90CdICdRHBonD z_0GD1ETM*KVQgCfBO0?|T0;GZE;uzkt-U?(UE|)WYeKKl7a!1^!j>n)-!J3T0&icp zqt`Hh+xkD{T*9Z%jbY+}g40k-3%wz?L2Fz!=7wSTBe&s_ z5u-wST|r*AudPk3jZllu*(NBJDTxNkoJmt3^vc6hQ{T+3#%N=H$Jom(ZF|Fr79EYy zLPgt0bBrS@pz9?l+>Z*nK!%f;Og39P;Q%k1)kUw;a{o%o$mjlpdw14(_-N+nvf={!ckk@#*4lEWF6ZBfpRn20 z$2G(wt<=rOEwJ4D9#s@z3PvKsl1ggDNZJ5%s#cmN-BL!-P=++9CSz-@`9HKm zk`eB0WZV?8qeb{=TDzl#_G9F`Iue7K`bjk~09D8u6Fg0YO`(N;(wnq5DjD-E;%RDc z6nKsK$AYXxnYyQ?%>uCK@;z^34Apc=!l3l|2d)kGU5W3LwIx@+VS|3k^XSOO$jccd z*R&sK3y^IiYXxoy9|bPQ#X9UwzhVcpU=_&}wa0uUX9%(e*rgX*EdtS}g45hvZu;MQ zkl$QIT`1?|+Hj@nospg%`|fY5b*;_ZQ{?2>ZBb^eyO2>Wp4%iI+p@I&YX*3^qm?75*|yX_RgiECrlhGvTG{MTYw&up4pfgeOgM6@jik zU3eC@uqYVqs4(-vlGfx`zgE+oNW+3hmHDYr5>tR&EWzhTi)f)*D!=~^4ahFbx&;` zWM5x0QTee96j0bY8({QAeP z1UxIho|)BrH(|0|TU{$fw&jdE4K=BccvuD>pWx8L*2}V=&xqxzR`9zV9+|_DlbqS$ zX~Xp4&`h@CNY;ss>?>{J=Vj`F1x$29+M`+f^iQ(nFDt)Zg?sa)ET_~GW{IWDlOsamjvH!9d?i`{$ zx5SaGA6xo+pHw+S||MeojgA;kbhYVmxa7 zWW9sr#^8QOFZT4rBq>PK3Jjd(vg&M_cPDSHhizB?KDp60v$)^Z?T_Sma**TNq$TT} zK>?@?z6ZF2r-=qwNWP`T6e15tPH1WUnE!?bw2XwM{DN%-Cs2L_(?o3Ha8~WWG|pDk z(>EoX{LxTt8+N$G}$}lCulM4_%2%={#Gz%+hyi$mO$dT@*4#@0^sgZPb== z8CkgvD2gX)q@IQC=-B}QGv;ok#wL2iHHfbCL3V^vLdc-gCw4ZhU1Mugutq%TN|L`k zR8B z%$;%GmTs=mNT)OYM4XenQFI{7oRA}CCFA1fjW9C{i#t&@*THt)8>PDry7OwX&nh`L zqAb?Td}zk*syPmp3G?^6gjJ+yt+bwLBf2JZ={`gk?6;#LeRbTzY;#8oufAy=hj;BA z?i}Q|WBQO8DV@QAA40WI{OnW_EaGbP)FXxjA*8m(zd`xyk52}fv#2wDH{W~J&UrkiF(&+s%=PooK>(_yJZ|7ud7hR27r_YBJP z=;%MIPgq*G*9J2E|1hhA?@bOOot#R229X|l(WKzoznK)|l(7<%NNTUrLq!df$fj_O zdG2&7zx|+Eu&|IY)*AWv&{I+`g4);%q+M>UQ?^FX7C}qnm{hK%UUEpF1c&@8d(Tl> zu@fzD5iGS`4ec%3k_Co-gjU*MBfOnzzNsIkH)#LR1GFC&5tql2#0NGK{hX*e;_o|P zS9t4Xl?zX*mj06iUy{uPJCWLoRDyASJRDeYxMaqWL|ff~eV0a#HMg8Sbz)V(e&1ym zN*A0OWve?_b2Ka0!fM9&DHYy)dVEEA;`+>@tpUNh*xp03Mx_R1E(%Yn9$&o4Kgc<1 zKvdeqUO*%nZoKulg#%O1p!^voRB+{V4mLbX>@Xn0C!ca$@zMO*r&AmpcTVP3ZE$y3 z`SHSio_k|wC0IJlt(@EgOt$L=8?q+G;b5JjHyOPm4O9lV> z+k*dwE7`+#%o>_GB)mhn*r?=+)CsF&y0qTr)M?bPKFPxpCJh@9(>Hd|q^!kIt0|3t zawpWe*x#tFfO8Bf zJ99JJ@Gip_hT94mTecAMN1Y-fUA0_|#wKh*CXQLF8h_&nI;6pGFgi-5B zkWfQ5J<|^_&e>DDNAJo-yE;70@lJO0pEuw(D!n;?Rr(2qqI2=YEH+r+Vl<((zH$W7 zP#{qbBg#T%#F%j%_?})Y;O}g|F~Y<1#fHr@j%NxdwS^_)_Z2xhhAhe&SQg*PtaIN! z#U%pTWfJK_o@9O39v2Co*QlyHQj&oNC-vGLVf8vD-{ zf56LIoN6=(vw;yt7HPh@NN;cvPr3L%Cel?^%|*H$M4EQ>G?8xJd%MR)x~1WoiFC8E zL8j4I1dNU8<;sh&lq~|L#|N`TU~nvgC0&F@5sP4Ik3|ThOU8)77D4JIN7aEX0&4gw zTGK@s%7}d!i(u*nhONl1l{L=9A}HWkgzan*C^iCH1gZaQ@d_4!sHUJA4NXsWYNk7I z`eMn-NU}Ivv~{5KwAvEK!m_v(<83FOJbk2tOYDc%Y3EkcoaA}yBPx-PU%#8!Y@dgj zupVjXgf9U-5ISMhU{D%5k)U!|6;O4&dK8k>e4q|nDtHR(*@O!J&|yp)jc<()UQW@+ zw~QA5!;Cf`zcq4XgJnzj;moOrbA-;=A4dSDca$-CT3MDRKUt&X1uD!!S&FqqcHN4H z4xc~TuBdiRtDt_KZhp(AJKD9iA2p?@Lx+ip0rqX@QID8@O#Z^Y0~Y3R`x|zKw%PWC; z)S3i}}~=x+=qvhACv|7e?qp1@wC( zv_b*>#soEvhfqCOsb}cIZ8OLunv^rQA#*Mr@JIL zl{N(~cQTBl(y)*`oA1gPwAq+Bs^O+SjF@W?B1tq*P#w?U)7a`OR=EjW!4*n)(|(ww z(tcP11YJ!;DIlr9k@b#E0@y%*6v1jN2V29OG1(qQiNZ!Mt~wR%Tv}UV+qQ62^85tb zlG>@x%RY0M_{M7PoIZBk?0)@bkK<10!&knUjbT`KY!d5DlVH;Gn=C!9&80^%m?S%# zd5c3b(TRP&agEH~3?Cy~HisKTrwB1~kOO35_+#N~*rGLUZ(uhGN>wFoZBUgA%9uog zO3YmZc2M@5Eq-V0Sz>yrwZ!zz`;eGk8jlK-)K(Iv<~`@AVvRlH1yI9WjOs+{#R!)e z2WteltjxWipv)CZ)kKwVXmP=enq2ebMUy#ruZdYG6B&7x!NIG>2{mbNkOYP}o2b;@ zAW_nUD7h%yAPIE(l3rs(l=T{gmNU&kp<1KnU~uN1l3Gi;ltG!qOA8@Md=Ye`Vu>wK zUV%xr)Hl_ztZ$=+mHIX=C*4yGBBWp+NyijHQz`m}rV3;7iyjdw$Y52DQF)jqYhc5r zbru%?d2YUggVmr8qe4mA4gEIcW#r}D$jrQ+%WbOPjg1uQpZ`tzD$&vM#DT0lu`l5s znZ7FSy}?ajsmz;luhTE<^Mnjw$;M?`=7dDs-W2!sU~*uCnNldM9aSfW4ke^`1JQ}j&VT93tl5i4T$WIgNcL68S z1(<3gcOFiqIFWyhI8gzVaLOe+8J|eF8sVtrey~OORqkyCoVsMBesZ`t365+#P}>A2 zJ(xm}`loy-wV%X6>w^||z(YBpg}X{B*6b{=528#c)(i(3V+^ea=jS56c`dhX_tqIb zGQ(f_SpUG0#9p5n)UEZ{Zt3ASxrNKQiJv7^g@;!qeWqW(SiihJVb#Eas}kyAHM238 zd*>osha3`0ah2gzg;F?F=zB&eW}snEuJJ|Kd^-htK7gIcvz0;M6{a4Xc!cmBB|i($ zrq0E*&iP zdnJt^FX#_)?{IB;cd^{>Jvv#O!3}SC2c)@+(v(!ZVDs^z&Q8>sWa3|p#m>LFD7(<~ z#Gp?1AaFWK;L>mC!#Yo0G9oxaoU~1_vWkgI7bZyZcO!?}^r${%YENrSpY_k!^RhJk zZxN87Q1u=4*D8Azy!Cj$81EFCoWgkX;z@|`UqyNKiOd`v5awuQ?P=C0vYmZQef-m~ z6EaryN)7W-3#&Qe*4by_BdaH3E8)ec!CsA_hu|mEL>TVW(9vbx)5hk#Dl6+_pE#b8 zI#eJib*P{75@Q;PYDPE=-_0gCQ`U@{n(N7wnWWZKOjs|7x<;8=)3Ts4d=kM;8)JK8 z@7OnYo8U~WjQvotlKVHd$9~$~)SjtG37Z>KMM|w1YXfTdn{8fFulcI8CioNPLdAvk zX`cP<<9)zXPo`oG{Vr5#%%t-QyGNmIqUM>eMyPshiNrlOxQks%mc|7~s{fLgr+yn} z)c( zCuQgl?{i=Rvmku9)9_e{^|EO3!z&w{eQkTRjSM2%FZ2(h$!Tt%xGl6`#E61WGC`jz z4nZH#7eaURVFB-~u>_Ql*2kIqgVQ5)GQ>si$8iPjm3)B3fi(@^SnkyEA429Gri^NN_!xnOkcP^SaIg%gGkceSux;N-@}9Vzr?RE5588f%5KKtUt^ z)n=jNhM~==)+}&r7}`Wc#@LLA5Z=R-u_fZNoj@XP4hd1fsTfJ{Jk`P$;kgDlYz@%Q z*gi+)z;-zwq{AxUxKYy@HVCI022R+AyCKqx6L5|YH7e{-A3Y82u@C{Gd}PqYhjqlA z%-eM}$H z7@fXK#;dfF1i|x6z0dg9*
    7Z`k6&FZ5{G=^?my95}#j$G_DF4oLJ+e}XS!sx%mZ znd;P^+^@Uc@F$}Qu3oigWFf3v*=n>zdKiQNVF@iB^QHs%AlEw(78nwbkaA4(eSEa- z+{wMRu@QZotn3HI`itkCgcuL~p|o}VESv_9>_jx?;cyvH^oR`0vKY+PSZ@i$nrO{!#^-~3@RQ|>%D7y;w z;19t!ka@A_B^uv00u3-ZQ9FbC{ib+(k&n+WWbCaaw}|f|Zzu_e^UtC`IeRlTub)@W3N34)HmX^MP+LqGY z2RHCTd)TAI6hwl~S*yhY=;-T)2EV_(GwpNL55Q|tl{+r><8L? zS_E)_4|Z?BGg7ZLHquvT0mrKjM!4}+2hC>GF{JZFH6*mp+DcX9&!T;kc?Gt*!1rgL z%k#tM%K6dHhp^A(d5ZQ;<{4m)d8)LnE(i;lD*X(fE9Z;v3tQOt(W62LjRi`V!C#lCJU-xKxg)_t?CJ9ps*={DB#z(;Kl{IqP#{dMd9*}C%ky@4Mc zuuK)Nf4D>Z=Td(DCDIDWG(d1RtAC;bnZ}h7QKE+?9f6iIn(DY}O#UAGmU-JY zR;|ri`FHn^8sB<3>Djv^!#}+BAhoBpcZjn%YAn}}d&$pRt=?~E!||CVVk?dxn{8%& zNUiqv(~s1@A<2Q3$B==7mP}&wl;ne{yv?jrDih}{(5c-!ge8PMh?x!}+8-Lv_o+pVL&N?~Ftrk%y@_^Xm&Ra_^Pujn_ zvV57;qgso}@i3EA^cq~Kw7Xiz9*%7-@339TyJFWGw#~wx{!vU4{s$ZaT8)Bz6JXKj z8_ItljiTqO55$WaUwmdub9)dxz;1)BiN;Cjo0s@c!)ZE1+x(|+bomz2M_4(XJ zefNfcxDEOO%n5H5v2Ftdn8H}+4>W6;Jdc3W>ImdTqXsi#-9dl#K@4w6sgL40(!joW z$hdCOEiT~)eL8vbjyU6Uyl|tR&Mnh-)T_9?`ZUlv1>dB_%X!d-?m2R)0SR%Q3)@w= zOI&=vZZb(Dm%q?Al7LUdMsdp|Aw-`eW}~-ku3GP^cjOl8XVc!q4;la2va?5Mh_{Ld z2ZlN^7NDz0F+<|=dv2c)S7O)6bH}}E*Hv$}_IdS9{qg(f^Y(GMcN=csE9PqUidV(E zFFjbapWHb4`gi(F3Gs$^^_iHi8Ib-SNZZpbObRqnL&Jwh^vBFSXvo+o|KIt|59)5~ z-@oEByx@hL1HE@_;4e_)qv6vj3&w)` znvE3-Q8N06m>*!Ddv0X5cYs*?=FB$jTHAH+mRS&?|3u%hZC2IPgTcMSg}GvQ`Hc$` zCe~fAAm{4+h7=6|xo0%KsF|Q?$1X-d*)_F2=+3XSJy>#!WP7}c&9lVY`xY5|eZdW4 zRovS9eewLY^pfqrtaovZy|SaE`nL_PF7a=7Ck}PuZ+5$tsQZHZ?u$C{mF_X_L=~^@T2u4rT`)Et$$a=<61HEY2asD>HrUbV z3GOF)+>xSa)Va*66`*5o&8Ax^9>eN}*$-GbZq6@xt#ki)8M4C#kAMF~a!m{^SQ?fT zex}bbJ>D*254PRHve7;R1lUYeLWD3TA@W$zkP`eo@}I^RWPUd@;y}*wQTp{6D) zZ4kZrri{ai@eGH1M}PyrfBbs~#UA9U0S8*boY4k5on|(EvxoMhF~VlO_UCA?YS2!2 zbKCWJ_aSvd?E5XtobzLzSHz}+zkOZzwHW9j9QNQ>O?-c!7)OpxUNQia4D9DiE3b4qtc9xN>TR)F9mkw*hhA;=F%WFIp-Ek*xyZezBw%#1=g>Oc)HSJ zQo)rat8Nxy><^+AjZU65h_Vz$xDR{-1b;fR>ay?)eo6ioJn{aZu=6E@opB($_}jdWe#hkwH8$q#C?_!75<)fGbTrwj_Eccez|-QsX# zaa7z5lhmIYM{2~p14HOVMZ>$~*zA3&`w#~iC=;4F#N|%CuzVP)<9m;v7hU<^L#@3N zFI7gbnw9BU7r5-z4~j1mi;}>A@5I->cIRq69Kz?GEFLy(M9>nVy>O9emITF3i!D7l zXV4k(;IF@mFP`O2gDnM`e5SnxH0VK5dc>WcS;6m+?`NJ{x!dkyru%zTxC)FD*Lj!rA@A1C62`s?J&&&9Ut_E8H{7j8N@VyxIn(~9}ggBQ+SG}h1&Sqsuj^6a4wWHHc#Fn9EV_*Y3_ z;2jcuvH1MT;p75ayT$)KP2$?YzNk$%v|?@mFmU@EAn-hPs>#H9KGg39z9VVyx(}0VO}# zNg}^)1dErdY}7|kH&*^UFmqt2ij(AV}e$m(BXEOpkt3DvY8qdn# z4|#iy`r^<=;{RZulYQ6nwA78+I$eiZ<2ueu?KUmaD=0nRaoE7<>-Q`7-e0!lZuL&F z;c{j8S(5VeapG|$|JYLT(S=ju!&OVc~skZ*@3dC9;jK2 z83n1}fK?kOK%Zy=H~MHL{f={kJrKv0W=daLCdWyNC4};Mb>jV^K+iSr6V1j;|I29O zUSH+T-|g+%>%hvT_g?b$PPn*e_=>Vj-!}u6z4~cG<#9_3O|P(Ej}F@xF{=Of7;(Fj ze?)xeojv<4HvUpsDb`;A;g_|GpBi0oV*U`Jdu-%DyZyWA!_tWTq1Y?Ml~NyXA%h`9 zr2`EP-5k3Prx0C~YM+1Ue`!AM!eoyI9m8nZy_X=nb&E!*?IDcc6axHSwd@f3OoyuibuR*wFe6oDO`^+qaDeH1zQR z!{gQ8axg2Q5{4iZI9<~Lzj@Ir@Pq`iXJTZ2a}I&h3WeLD#V2xvk@~3~+zS1` z+`8k>rM*VAPkaC7sM&olh)@zlYw@_4h0KNy^6Eq-C(3jMoW*X}D8j>e!&T7caKlwl z2G@=@;*%lQJ7^9cO9nGrOzOupFT>T*7@YcJ=@nEqU-$$_0h_mReoy$^IL{}1Zk+EE zJ~z($37;G1|AfztbfBLb>0rX&NRKCc{v^5>Zrn3^(FhQui{XyEL@fgqO`|oTCe8Mj zxJRNUXKDfTTR)G!)SDKhDKnx^8-hq~~j6$5!=$~nBX zhLm%Jj&VPg?Hj(z1bAF{gRKiq8voJ>OR-)Ys&Me+>zQZsV-~!aw#{Z%^y0)h#c7LE zd${zPo)$H8Sa!h@-MXsnePb5AG_If~uUk;^@;4lYEEttEeVAWT*R-&KqoOO;ZwyPB zIk01L|DMssgK~Q%FB{vn=cMX!6K-ADlw!adqNNci)|{#ru;rCxHdJS)h6d$!=6S>I zMQSF=spk^C>6NZi6$er@r_@1d@e67!%{u0esI=NPZ~JfuGrP67xox_|(<^ZirKoVe z@7{n}Co1OEjcEs?i z?Dk{hLxA5f;5PvHxyft*i$}6BC^9YLhYAbyw7|k*b7Q+{rvL9<`%hAN_J;Co^Z^lx-w_($x-%J6V5oSAr_~XzMh*gEIuF1 zFuudss(9cxGJNQ`aVIL~zq>AW`1-sNGX}vL0R7a{fS(;U-XU0i8u`?QqOz)FS0Wlo zA#%n0=b@?aM?$;@f7+FFBHEzNZk;SNUf~V<{!er!UkoUWm{`4M)cP^j!VD{&X#2c` zH!up@(Ioaxksn5t*q-im5_~j)fz(3#F{)iH!c$f2jBqDK|7oh@MmVQ{rz_fX3USL) z{lvtrxA=_^1~nE8;yLgyuGv7zx0LSmNsF_Geg$Qbn+3ux#-u;q>P?(a0L ze>cb8f=12lS|<9=+*%$zrl8C8`ENq>TLRY-kZaMuiY{3++>BBuj=TDEd!R;Rx75`*cP7JZ3>i_+*f1WBYssd@LDigvTl7 zl||s`NB^xvdm+v^Za}LGaOP^w4YO1Oz}2{o7{L_gI~Y8YwM{5unff-{)C!@i@rkz@Rh z5h{%X8z_e!ZYIMNxTgLEN{rPnl@4?4L{E>MaJmF`MU8rU6mZ~pEoaYMFmBtgM*3O! zy+YKxcD)l3WEu3DsGqy{({@hn%J=U1tesQ)IhC$FadoLB(cgxDMTWk5QhfF6@bAS} zkL@C!L{&*t#8dpPQB^56h(B_U8#h9JXaUemQnCzi2!#L{uh+2xpleSqvvTX@+1kp& zX^2^61)tq;Wmp&rfNCs2lo-^q^NH3Mi{Z zF1I$+BB!l8GWL^Nr*x&QDYU%;I@*UqBLLlDglf&lH0HArY0=UB3V3Sc7zQ5+_*V*e zS|i3lKeYW71w37DPe;AaL`T9U<6{FZ4;0X`pU6W?pchFMy8=phO9o@8BXweHHF#`EGQy-gx6=M_+DBv4DY#(ktuYRcOU}HR zASCQx9ByO3^5R%A-;MWhUipjI=%ib_?t8a~r7!2&IBwiOdii`OZ}FQ;er@f>zA|&x zmUvG`hdxs?iWl_NsO`3_HM5#IZ*qA}|FV%4D@rQgWD@sKZN@lVM|X?n-0lz7r&7k> znw#ygq;{T>>l;=cJ8|@?t1#b{@_z@tYS=(Q*rc&AqqkZ4%k0u1t=v)#lSD_8vUmiW z8Rbncd>alkH0sNI0>W<3z4tf0a3#UrWAA@g=1;?OzT?OS>G|IB{Olb$c&2zJQcZ6i zC1mw7{gGPneidF()*t8E7hRk_?My+=shP7cP6Ge1p&2LBkUCY{Z!x}V4j)ZUGI*+l zD)@)Bw+Io>Hl$v((_RxXhMd^xlfuo;kLG0gfYH4Lcu~P>%;zA{}m{V`Lk>ufds9`f#g&lgA zov7IPa5d9>f5HrBMMxMgP@@H*T!RtGjb5|TxkOy+##gzCRg*0{MM>9-tv9XU4({5e zpBO!a-7SVPVF~#xHxsFc^55&INK5xEG?xz>pye!Q7p{^;iKhqIKzRmf!ux!b;=oVQ@~T@*L|C}PgDJ;#rEmS_EZJR{bvxU9Yg;L_*l}% z1gBeo+&+sSYG-J#fE%upF??FQFkzyk);v>n&eRLtSy}JOD;nigHOga-SE1+`W9e~!AF}|c*^bjb{T;DnxVs5A;pMMJdZ*N;xQ1?=T08Y* zxhsD6J}mz4*nQHjR{YpuIkCRXIqH9{zMP(Zxtjj=y7=cRu|#YB@IKaHP~#sOXGmKY z>4;9rPD_GH;%2O>3qeO9N=`h%ueeCIx9mWKhg;doKG%kBoa*9{T3oo)z3tJq?h!@P zH+#4Qjp=@^V}hTn$GV#pnM$Vh& z(z~CRP}U$M=MD1q3+~XS!@>&w@A?%z#<-W$I=WFX5^27UztO{oEgr+l(uk@bAqGzq zCp#dGCJQI5y_M#v@r*Cs@m^*4FkelLb^Ml!sV|l%`>5NkKRv$uq{4+0{RB>VQ^e?Z+Oh9A25Y>fYuZ4OKfbj+DzT!3Ozs-TZDw$&|o-C27^YH^nx)ZjAma`L&XQ>ny#KY<7~SuC#;~S zwRQ0;CE}}O;)qf!>$uT)jFYq;ztGYmCrkWR$kaW@*-;cc=+p z0w!vb)`e<(#Dnt|cYZ{CM#t60C6n`?=zk^SJ`-<`99}zoq}Y$^#rcc=_QG;% zC3oSL$w$Ump3=-1GpI1i%(}cd+A(_kDocyeBa_xxAGMobmp^$^xQ*SqG%~Z$-gePd zt0D7KdJXTC&@FL7WmeqK^rVO}OJWjMrc9V=XXdaXKVxlzw3_rDHqCbNp|hoaArOJT zbh6S)pZn73+QpV!^&vi>;d2z-`h;)GMEs(_9-R{4i$NVBW|u%UzR$|DG6oSSMJ=W5 z3}cQQ-r$OxLjH3{|1+6zN4!%#93CYPa!3E2{sQ?>45oY|Hn4B%5buC#?IYQ-+Ca!0 zXhIoDPHVGpBd*g=&kC`#pMR;;ZTNyw77jLTFnpOsn*hP&c z{XYGht~RRi$?XIbWJ9}XdE>lD|Awy!f1}+8dCkEe?%-IlZEB|M6_9V<%PY>9&+fEkk&$HJB%`Q4Q<;8rRecQQ? zr0+@DoIOg^YuAePM-Ph)Yd4j@KDp@1oH;|4Li>a3$4yxJn&~DM%Ztfqc}e zkKKmq`xKZ$f}CVT8#&3sZL}UVWrEclsgcXkO@r*8esmZ z%r&wZiS~daSyY05Nao{nj-FP8v1m`r*)W3uvLzYFKv;)AIRDo#x9M}Tuj7pTf|D5@ z{mSPpJbm&s$+&TYWQ*0arw6s(@91qWGN!`#J1`FIFF^QS9zUJoC69xhNrJyeVny6q zrQEDZM0==B493#x=+FAv9c0H2>A%yDUWCewqMX2=l3pIfhCsg*Akg3dCa;diPl=ld z;Mjtir?IhJ`~NZb9Z*qSTf=kiy;B%qC^K}VN)x2_-h1!8iS#DD3n&PR4GVTrBR1?E zqb6z+lc+Ijs%bB#nDR{37-cU1KKITbro6mw{pIl@vL}6jNqXkR>8-<0t zDB}cx{tRe?SH<7di}&5s^Ht+n0-69}&oF>*@c@W5-R94}u`~FZb-B!Q&Th5g_L7?UC+4kPFadNXlZd64h~*UUe|y>{1pka;_5-3sD;B)d(^7LO#zCjId8}SH% z_)g!O{1-232t2$0tc4!hpoh}`{>E`;%%6aRtMEVFIu5+xn}B|Ez&vwzC}voIQG$IB z>L^Aia5BcORAiOx?$D|gC`yOk_wi{R_12qf(fP@Sx6sh*} zRu9Kc;#-#F$OYz0&WC}2JF7A5iV9SN$s8UAeJ%NEeIbClyH@wli>R;X5(P<8u{+xG{83(Yq$bw;Tm>W=C@m3|VA2V|% z-q*|Rk5g2O-cU$?tBW)@e^R2O5KzDSbNN|Af4)U4y(O%0dCVxPe|N3>@};HRGWqcX z0|SjAqpFmx9r<;0f6!B6jzS9z<}jPYwB96PX<4vLPl?qf>K0u!?(-6TB`=tk_iv=P z!^+WPG$;6|nG;tZ!Kf=6FVt3_Z!mC+p6d|Rm}Cu02kyb(M1bD%9IPEbhT~=RH_x<| zvcb!=m|D}eX;7+QCDNoP{YM8_~-4NFIU&T)LM6DDAyhqJot7~zrbXXK~L_#kSSX!lzDQ4{x;RvDzt9tE9JR+KVDclnqF{?e)(hV z(~nmBRirK0US#LmUR<%qdXF#t5PkRhSm6?BbLAV$*C6th+Ni;t+`eRs0S(7wKmVAj z0~eDL`(ktA?G|qY`mRB@z$FmC)^O)cU7~?YU;@Oy1PJhJ_*xOC89s}s;a{(8Rzexh z+iIh9!!5nFXe$`&GGb40>2}jOlPj3jXU-*goO@@t`ARv!JLR_QV?er5Xn@j7LtfR5Pl(UUro?LV(x{Xvb&S?ckl^n0DZ1JrMiOqLW zwNiDTdvuZp z;*30E=6XgpBv^wuT?sw-n&|<{Aw$d~67z9(<2pZ z?D)cc0w91HsE{neaI|6AF9E3G^(m?d4umeBzPr_1)s)t@90CW?88QDSPFJcAwBY zt}dzCmNTb7FI3r5FQr^no$}l`sawA}j;pYfM-`-!Uu~>v+Lf%LxP#*5l-bD5D`Ga{k-3Eb74a9iEM$Xz zqICzF-soz1rBS*c;tdY+FcMAxo{Oe84J(bw5J48ce&ID@jo#Budel1jKZ`p4G=@69 zXAd%DA}Zi2R>5s%ilR7i9Pma!j2{_0O{Vw+mbq!M2*q{}mM)Qud6@N@l2I|apPw1D z4R<0wbFgA;gM{_J^gpZV3sO|pF0Vdu_rfF8uxG+YQ_^#fhxU=0-o9~VihPly{eV}( zT#5nd>VaV4et~(Uw{ND5`%3B5N@k1HDbndzG< zD`|!}Msx?O5;1$?d&9zelkrMvdb7LtdLvS`)CqJ*@UsZ8SFuv}j>~ZhY{_v+Tomc) z6lZH0ZfoOiqG+p~-5H*o+{KIGHstqUKGS3<0C|&P;PLD+gExoRCg6BcgBEp7>&c`xbxr%p zTcqS8H20-_%9dKQ6I$ocp2R1hSvUe}nL11Z$%Zh*xp0zBoE(C&mjZdc4#<7LsC8~xkuoUL*k6APqI zcG0%Gq<+Wu%}I^Z*LBMGO3BTu>KRLb#Q+qV=PAPkxFX0G18{Z_&;9I$d^p|tx5X^IyE*aw@t*fqX}ZuM`h>T>R>#D* z3@h~ND)1RSieiy)Ygo_m%J8~G>+bjIM@w(tt-ji?Hnwl+$6J#DC(EV+xhHr-@Qcs9 z6&0i@OtHzo>le!mL6i^mv-G8W!h0w4|Hi(_rPVjk>{InGuZ)8@IJex6@S#sOCl-#^ zMz3C2sDII-|M*Sb(Blby=;H~BANqR{4mCT1VvxcX98wkC5N`#&d$@Gyr2?z?1}3CR zhOQF~x`0EIag*}Mx98C{-7O-7!4>)6bV!b;|Dq>gf#)89WiG*P)M%P{0^K=d6w=g$ zD^}o;ke|UUzbK3MVgk{)PFYIUvolv^DO(+HJ8me{nYS+uHIcNGxPRU?#|vp+Br(ng=-(k4 z>d6wTxW;I*?qnxed)N~?sOtnw>F0no`B91WuQEml6K_V=jMkMZ=@OUVzox%FRjg0X zy?fy<<-3LccCBO+eSakA=bI z1JoNk40bz(>h!|Q% z(?^fcPgX_b7&6{R7n|$PRhOLYXuntq%h!LZm)8hJGm>$xfPIC9e3{4tzvM6e(vq7M znN%zhana-#$4SGghSRMYIaz*PxhgfMCQcbDYR_97R@R}dq*JjvK6j~)R_m!YkyEz) zf>m73lU{C@^t?2Pc)zPWtT#&v_e)OLXL~m!*=KpD9a|=~BtA)=XFUwPN(U-v4+5St z_G-Y|M>b3wv#E?$6AKBz&+ts)I)&gU5i%bkoCsw(E(&RaRF&;*hYW!=8kvDN^k90`$ez1I_MO^D~Ec3d&?OFjKj@m zQ{D6eI6Bj&LaE0L>D2-yl#Ife$$3z|#&afJMpYzduF<>5xrM$)EBr0>D4=pj`lkIrm*jV=m`TT!H}S}{x?WN7aM_g2;Enyii}v5?Bz(^PwS z{+=~OkxikaATsCl0NS2)iWNysVePuXI)Nnt3=`~M_WQr;CZS{9Vtoic(hQpxn?vxC z?)#tgnA?wd^b`E~9m1s9U;i)w=EhwJ9me67VCP_*cH|3=@f(&0akG?~Fdw@fWNCQ{ zDZKE)^$<(T6ZCJ(s*iLS8k%(1R2=CtGBoXxoIt5R|AJCao}dr>{E*&vd~79BK0kq! zRxP7{x%49a5WCAAgH3J;Q!KL%nIbnc((<>74~2Av34#>~%&7_e@Y)ZM)dyC?4(Oi| zu-0%l7q)Y<>u_4@NMa)y-$#oGo7W$usO(r3o4l$(bW(HVv%dZd84{sO!R7^YH$fcI z8zgkHmbQn))p&<=q%3}|w)T9ZPhCv$s%12NB zhU_*|f(Mhd(jNM4loLV0-4QUzWEkWos0O_F|6p^+=;m86H|el1jAKk*4;Nj`6P@Ar z)Msq3QBn2i&I;P*bm%szsxMvDA#h$BGw^bnj`r?{63`|UE=pQgP`EC|+EkPHd=DLC z?$Go3@QTm6q4Q19>Wk1SBY8l#-LRH#hQ0qU+a^r(>5uf0&VSbfxX@?M6W#yZjY{ao zHJD)&`J1sA4Xi68lbg!coBY%f&2VO6+-o!mmz)!eolT5e54_jab-s+4P^ho1yU=f- zS5-`O zogXZW`7n@IxZgaR5jQ7B!RzW~%4otcJEdhnFnke@#0p0_UBF#e>Uyb+cu`PWzu@8$ z10|=>3>5??*b_WeE~;=b+X`mD$~%w(Npr4 zl6Lv(WL;j|nkLhozfoTbO?}L_dK3pR`&|}x-4sB;D{8`&yzW2`L%F=xpK%}ST3 zz^>#>*zO{m6$5K^(948$a2DCFEq$AM;ckJ3#^zrnxyL#pOPsx1v*HI+M$l=cJ(BU5N{5<5;|GUW)v zW!^>05Gt5@ESk%FfoASbpuw0u{&nfgDOyVY9rMtx>(cdNz4U7D!PC)NTHC&d}IDX~%Q*}D9LyE*Y-*~=-~iUI9Kbk2369C}dY!}j3YYTN^8IVELv5%i>) z`&HK`t3pRkHPI@>d$KOP-~PugYyc#@16>efq81=A-9vM~{~ z>_VYU{-(RDmtHKi&fWOYin6WMA%+V(nXhh1$Af(hNQ49*F6#PW{{s3U{rK+Uv4$5v zyPJ08yzC3c-3Q}#XGpaoK8nFx2N+U(a`)+y8+FsBPw&yuXXiH@s`*gt3iOr7H890~ z3}w*w*O+G`CXff5Myb6}EstxAaC$$aUt^w4n9!X;c>adTKYJEVKLsNOEk5KoOUBPw zp@K;_k85s#pL^l?b08c%t_7H#Nsb&>ButnUTyDi~(|^DiYGgL*Z~qZ*vQ7Fw#U3c^ zF!r9`tPBpNKR>@?M@MJwSs0sq&oWnKz_WZi@j&FnW5;8~*S+w#Ho^EATydy73~uIz z!1L5Q^hT5ccVF?j`{~C|XvpC=^$W^GAmMP&|AMUHKVXrVZXDf;7@RO9`t#;DeO0_R&vpbgchaA` z=Y*I|&YPRNuH=ix5|n2ioa;e)=4@?M>8KT(sRop9B|459 z93%S8LUP=ZL1!}>vCUgs{8iKZNorlwLZt7J8*HA3N*lf?UXyQ2hNDvm=40bD4!Z&| z4!?jdjxjjx{sqt1!>KT`g0~Fg*bOATsb8i-i3HwKjAH{V`4vHWIg=iu%BF`AzcbIF z_?feBx_nn6PXBc(j5x)lca!jpVhJnqG4U46>M$F-f%1v9l#ujfAU8P>Gz71=v7z3i z3Zcz6)o`CG;JK4|zIF>g*K$Irx2a}Gzy7oQc@y(|gL&SA^r$stJ*2<&6Hed5v8C34 zEX?EHWS$Rjil`xK4m`ij}v_oH>xF;WSh-^2M)7%@bxhF!#m0JwF`zPiJC zi~5So1<0Ear@RZ70mudha&7A03}gxCZBXojB+b1JtoRV*!H}a2tYKbA^NK9R#P6OmNN66;k zd0EYASvBkBb87$EOJo)IW@1;tUnQhfk<7wqb|o>9Vqx<>8=$a~QG7Jyvk zOi{b3+klJjNRVPVUO-VUazA^Hmt=S?Mmqtgs}`Jma?=i$3;102)MaeISS<`&1^ux8 zYN}Xu{??-34I_OO6r*Ci3jKu&MnQ84%XesB{}Klk)tZBq^&0|Jg+avu^pVneN{S9O z@tG$#SBnp77nB&=rhOr)z1WnutTkYl#3RuyDA^&+!=+4;jsnIqW& z3-YZ$4sMAIE%%IE`cmWim#X#W#zn#~UIAO`epphVTtbZh4{Agu(r@&LH|R#n|oZGyBqE)^hn^w^t6e=KobQb9U)HyJMkYYT7&Z9ZSNAUv@r6a|%C4n;UQO8=XY_{OYU&=3l!#xbNKZCWBDyoj z`x|Diqn+x(gWXQPOXkN{xFY0Gp4xsSr~E|iqSt+$BU3}w@x=mkf_#<1DfO6cA@isT zz!}@a|BN;8$QD0`Io!i}3D83&mr_6PrfkU>-8Hy}5n||JM$&W-BNY{r9bu&ll86`8 zgO)CI_8FKTSLtG3k(z)M-(`RgYsG( zgn0s6+SRp8&56(TI>+-4m+q_6))aVq9L{ki@^_;7D5`xseULu2YBBu-ee?Lo=n}X; zyaU{V6L1Sq4T-C9CBV$2$z0IIx31qYPg9%g?tH}6hp#vL%@^qVN5JP?(l?AEP}25x z`Xs%1*9WxEhtQog=#CC_2UJaBWDL3yCw>}oMk{nar|Hji=x_MJ8-IU*-#aSLh<%tq z^$(5*vh2i=4t-Te`m7M~-i8g6>av1p>L)mpkFzb1@VXjPFx#oI)1Z} zr@rwqe#5*x!v1H4*cN03NzO2gVFR@AKjM@|{}(wWh|MP+J?qea$tivP zzs4zH;!QX5=X1AlBCxM5GMUXv;-I_6))lx$1k1Rlpryy(tmh2eq04$igCY-`V<=Wg zXDu`!Nt1t2h>(4s(r4Zbo^5&%2~qBy8_4{azmA$)-loQ=N)IHMy`^FCPERe(W!IVu zJH$fK_G+PRU_?Qnp2pn4e;+(RfA-0+fq}zTq%aY;bCIFge&vq1g1!W?#H=zSVNt3` zMLVBvZ2JzSee^C${C7v+J^G6cW$&M?wu*N0o!9%Xf?4K}Vg9}Z<}iq54jJaJ z1k$aT^e*NZ0rPj6W&W7woshnldG2MNIq=M5o)mY_;ttmoK_Mtp!o8|j}X-_WD7XHB{x)e-t<<~NoZ(dPB*>G_P!eEWtC z93IC8tca(muVLNemm=7JPRkj~yO`gA8-lPFiv&scIn@ucaMQvcE_}rBA$r@d_Pc6o z^j!S9z3ug79-5kqFWoQ5*gKT3s+Dx4x%vI|nwklRi5-RC7t^nf4efopsNnk|Vr=PN zN#21k`})4vpO?Gui~jyE59I9(@7|l=d#9`OcJIPF3rUrZ8y!#kZ?s+O=(yHKd~&1X z1`w@7Q$`@tXu~-Idgh-Lg^@?>HeUAYylZFRjxj-WnT&G!9`!!EbpQrLRB9k*CckLaFbG@2Atg*L%8F zkN93*4*a^N%L(-LAG76PnL(X64K8p5ZW-G69jTLR$YIPxG1}Z|%?zQJHQ=YBQn@KP zb4h@TsA3aR&)SfgzBD0wBr|PU0$|`V6kW#@#kRKTt%?~-(4~Peh*i-Liz-U%^%1F5 zjizTT@>3Dj?g)tMQ5NOJ#CD5B*+@NgbwgWb~lst3M6^yn?=s7Uw7@`>epi!KZdoS!!;eK1NUGTsbXP){P%ql|rI%|MGQ zh7WKxBElHzAq?m+T{J(P-mHhtzH`e|bNMH9>AB_S_Kp(jqaR=IyD*=`;b;$#Ejdg! zFoVF?#>NuC`O+832I&M$#ZF=^84bc2k2|c6MKg5}9c-EBHOzA}ag?GVe2K?hD^C}& z>Fe+_QU_&oO25gobYK=WJngC<^o@w4C!chCs>`EGAEAT_Ed_Dd|KI zgMK$~hoC!odNJ8;9;f z61kMA;xtp_G}|7F-4nRi0+5MuEaGVb1;-Q%zS*;hO0-V#&Xs{YNHwg_ckI{OW!+l#gh?xXccwwj+cOdXtP~YAGPF!1x)NgPua1E6%f;EN4;hSET_JRh(6% zF6EC!STpR4hR_jsZN;ZIXiMb13cZisr!8qq$fpGK|16si984jf5+q%@fr4n`>1@z6 zR0lCo{(!32m^pY&b|nZDr{Wm+RR(?)!}0nArOpH>brcxAJvhejEf9aNCOK&^`II7f z#E3B==t3V|p${nO3+V&E!E-i3kppZI-~q6%4E|H=CMjc*fFjbWS!4krpl4q~DMEcE zlyZfh4aBw!v^AfCc8CNr$q-3glnc#q`PAIW1U>Tb60h#C>tu> zIz=Cc3QmDOYA5KUuvf&i``?rvIiNQQ?%V@<#PzW;Iu=e15|q)l6u4M0S+Udr+`rt@ zlkD9o9o=dAtQbV_+$6TkiV~nTD*}zp9JUo43x?Swf;+lzlvdOyU(qijU!C+s%g4t7 zlUG6GwE{F=vJ;(JF>5kSkJt)n1iTlpmO2C4FQ&IB%Quiu&G9pF z3Fq_DVlBK4c!E?H*KnaAk=U|-zjQp+!^F)f*4HhBcw`=6=@gda?S$*cGr1r1Zmf3+ zIC%mah(9n^W+En!NwDeXCY?qUiw4gp;QqDH4G}cOQ53D!@d+^mR)D#J6NTF z1!I%tlYg7CUM+cXM)1-I7dMlD6y=>NNqw0?ZShtiHvWmCy=qB|GlK`C96U^!w8X_( z{w=9iA=ds$B4T;PhD5IjXXBjY@(qbT@m3}o>2!D*z#<(?vQx@7CV8Y-8KtMfq^t$C z;Y*-41Wx7ed$?9yx*|HVHq_SF+%HXer&`I1n5gPt+dvD?bQN^EeSNTBsI7TsTI-si zpdeergnZbUmqE4oj!fC8$EfK}0OVCx*$Ay+#oank8$OaL8?_i^BZgd*DH~bH+n{Vb zFH<(MrQU`-=Vi)97IFuajVEQwMiz1pIEH;Bt88Q;7mzk}hk{o)fwEDbvlm*w2g=61 zGG!y1X9Sdumu1REhHu~;1ZCsLGG!y1=O8f`)UGc;J@3iR`!A6f6-0c)Xyq^~M3PB! zrSo;k;4tKcRJPN=?o16#Ws*3u7?wV8TyA89-ejg8>^*{W5%rV!U3OedlMp^MnJt8N z&0cV9K?!|OhZO3dezb?!4X=rn7&xSmlpQ zUFn;M%K6Ef7(zY=x`v0l48lAP!QH*{2iUQ>!9J$<(&yP&O}bes@X&8Lrf`b?p-8b1$S^Sn9mPy?N% z!!#Za-1NLQ__GGQx02#4N15CJ(1l?$qZJNKW90n55WTS8PE*s&!buBQT3IM9V~55Z zyD3TZw_NFWlSn$xecsY~rO#a=>6-YwAp5}bJQYdmiMFf*EAWq#NG+o`i+0LDswUyq zLSbmS*=5mo9Y2Ax``Lfb6L(<^CC zhMm2=DUIK3^WxMuyjx%lGz7nn5a_>56p_1hK&QbSQ?6*lqXR=KegPf6$``KK#lj_f z+bNfcz296qnn@fX8jxCmPVn+*dO%7 zx}{HFsq4A7X6Yn+eL>uBxzdoap&&9ptR*76IV>-#U{glp6=_Ss;co{Hqkzfbf(6b8QtFiWw{NUSfp78vdSc`)2s$-4Ji zN;V`BM~LF&h(%e7%1#v#DeH8P7z~{+X*rrAz4DKPX};2cP{ziB$O07F5+2cl$Fng5 zp~fqdga4BeVa1jQ-WFIK0wv9i#UuXbS!wG=k!$-U;xp^t)4Z6Htso3e87#N@ht8vH zlE0>*et1#1r)Q|Nn_m9U{pE5#At$&H*aWHxAtMcHLZ3nW2A4_36%;lf{>Cf-xo)_y z@LnPPB7HqGKONP;zsc&vyu{NYv|EXsAhhXLv_yKJ&}KjQ{o&|Jax(0?9povl5NISe ziaO%3Kzuy2H=tl^C>(u^nqM9b2s=+p3qoRfJnZjsd6~-Td|NN~06uTF zgJ(dvP^mjEEmM@vxAbuK;!-9So^fGH$_uZj=cR97!}RvT){Ojg@>FV~wS|eP(QJWG zG1@;q459dBtJ#L8`i6X=a#B!KEX8*F*${vTW7IHKGa!rj+6f(IIw5e)2~6EAPPD*& zi%FgnpIDcPBGvAD-4{vI;`8D$QbhGCpQ)6j7es%}peLDLl`Kcf$H~cG1H!zMp_y&u z1aA(^fC8{DI6Mu4gT>&QX37V*H$EP?DWD(luLZmqRz7XZL~bhYX-}w~HF+Dkq%F+L z|HfE^>{Xi4tL3yOQlRfwA`Ltr$y*ZN#lu3o>8#G^=uY{}ux)bnXUw|ApJpB7n_aUE z#RHc>xYZE+LD06fg+=$^U2F7>%)E3|n_iHf4IekrCXY--RmEWUZ7SOeYgJVg;(78PW>J7nbL}8rbJ{LQf&A2OFlW@=3kd?I|Cuc)u=7t>SBuB?2XQvb=rxemB zYeROZ#1e80OtGf08{|46yaQNm5RY}C#~uQv zSig%Kb#s#&l2A`dw~w-F%UDWV6(1=^rFv%isc*7bv?1E1p}<}xdrYigveGGXL5M(I zl$LK0U#}=kBbIp8CFT{+RZte&P{GkgP6igyIdgRU>N2Ib>m8H&T~h4J71b3fp@*-h z5A+X0@5m?6ziAw>ytGVbC8x`*E7D}=zAz2VB{w=d_l0X}3|^m~)DoqvY>=0fh`$YU ziGiw1J&P_^R$gAzbGdS5+@iR+#qs!WQ5>|?OWa3y_*!rhU{6n2ax?9Sy}jFeLo99h z0zWe&J1*CkxKC@ANOZ-Nw8-$rdUB&wAi%<(06?FpM_l z9wWlIhkij19T|NsR#S8RkK3j99-VzeSd!FO`<=dlJ8jbUv7#7iiJ)&$&cLPka}wG0 zArF!Thd>Jp#x^~}|4|+=|ldhV6WeWqViMx)|PGVOn)gx3Yq{ zpW&2eoQY5vSszAuIw-5wAFrtzj8xjL9#x#*q9ia2Fr>X>jD^CO`XFkKy^27k^<0~N zZjvHj;%G%nXQf9g^30B*eKzI*VU0FK+VoSrAQeJ?i4}2BRXD&9^Ej zNsVxca@H8LD5>-Gs|n}vXUC>H;`bN~sp}*LT7~-Pydc(33a||G*N3H0#QB16LMelG z+#g@1gW&9k<3I}!23xKHpKLHDAcHYlvNJ`>127>Uq_8fo+-Vm|9v`YOfY20ISHdi2 zVX2k&Y&DUsN$~mB>O*-7{K)2!~>Xh>RB?L$}lm-+eH8~W+%d6+o(_?GzjmG~Ms7?_5Nbo`7A zoD6--VHGR{M}?!X3Je*mEyP8$W8t${U~a)?Z9lL(wV22XmiYr>VM`E~l1FR>QOh!7 zhr)ybs~Rr6<2^@cwD;l2<||Qpl-A8|U3vr~eQ=FSfk9+I)bRO|L&D#qSlpk$P*72OMuund_1xhBe z@@0awh=?}y(z6i-se~tqK9`j4sGX%(Xa$zA9LryKBnMQ#!4Hk1`&(l=!ZyoKB#(zuTS0^4o8WCNYB4(;S&p7<;sN0qh+vws(~0`$5K8+T)|2VHw(bzV2ds?ttJ!)ZRqNo{G}mi z{rigjNKg83DRH0pc0_tmI=*%-kxV3wNHwL}P|-oMjoici1*ohMOOIxl8|6n7dv*bp zK}VC?UDd>8!?F2UN__En@qTp${RXw93T075QhJF<6i&8{%~A>fz=GFrG5?V|sxvz(o&N9>_etD05*5wS&}mwTUZohz_-la#32ZnKM8(uUVKi zR_2oFZen1k7hCHb7&6PzV75oLD~#wYjObMukp%lC&tNFDSPn~ez%zG#FfF2qh=$*lI)PrXK0n;g<6C!opSdrHaav`Lt?4X)v zL)Q8nShB}MVA+!%SM8)2IPRkm78VNc#H5E|aqf*w@+^jcLR9S<7zjgz>d<}Oy}xFP z#*4>G3@z>Xe19`5TRz{9>?JCs>xlD?CZ=xi2<^Ig9<2EV{Gd-DJMYls9nywP+s;Zdx zN2DeRB>JgkZWRrx!Q%l6evSdT0-vz}fro2guD}=LXcsp#Dz8v6C~kIRK0Ochy4d+S z(#y~y2gg818V7LU@eRVlf&h8kFL~q7nni2850s2*@XUKN6RI2(1IB$7LW3hqsc-2G zM8ya%G{L7dh;Bv=uHNA=2@^nB7DIJxjHQWuSenS%-d56|Mr&jB-Ppbha@a;@DGD}`-yGj2Bw=pATf?+kVaVk5KJq6ZprsA->1pf ztIrLs54B3}NiWJ(5_)_zO&_7MxK_J(=h`fv#dTrv*7QDTy>z&rj= zn+!eI9H~3*AvCN?V`*4PzEGgCkB*xrU$8P0oB~-AmS)pQt35}(U^2Gqd9d72){ zG!4@Vc9v1Vj4`8a02=>BT1%)hLfeEv0Vz=kSv$5T&nL;$RT=GrDu_TMiXQHEAz-Sq zcI-*dCy4ybWV2nQLaNE74cbREZA^%(6?1Ad#AhzSvkpJWHS1`l$jGo0S;-iFfaDJJ_HbjXYTyBHg|N`DVg>I5 z#iN>(#Q@pSRfJa3!)^fqZm3$iZX_ZZ>bec9|1#7C%nla6fma3^22~=|bLeo2suI^^ zmW#>73AzuXoIdZ}5f;l9ptEO{rDT=V53Un=24s)-v0ko7fa20On!~eih2ozH=f--$XUg z&TUS~8G2m%v*QW+CTbxD0oH9mYnN_BZDrOac7e&(Wpw`t-O@d`$YE~j+-_*2nb=6a zCTqhD2864B4nII#P0X9axkjG$CIaPS$KZWpq^Y5+>8oex_*wBa(I$b!kv7C5lyo&s zh-$!GcW_J##$1wZ^jsxqVHb=d6n{@kUC&d{4R2~Tx8AEA0pvNsR) zaQn;YrQCj4OQ*>e@;lxcm@llAnznZR!Kp;~_%373mWk$LJB>73&gAEHYHG&i<#oY- zvgPiT#pen?dS%f>;fkTY{uL|x7B2^yJhNVGOZ%Ie!Bnb6{C(}gKIp@MzieX8EFxP6 zM*S;H@G&?AFvAbj)|Bt&MjY!uSS|QT_Z~hCh>blPIw0RKknaV^hu0+=3ucPd+a#(j zQ_zafvDQLy>rK}zfn4pBskLNTy!YfCD0Uc-dI->ib!oB?TkL1Up35LVsu0Hf!xGfv z8ez#*ND2unQWz88ST(X;jb{+llzjl9lUihNJ5o0HbTM%C^K-2m>?<`6iq7_~;V*;g zCjk2sP`wRyzU@Btb3NSVhL(Hx6$ED~@{|i>LMnZr zALpPScK|ywRn;?{D)hsQ4XShl=^0c41W~j;M&T$~5D{OgAjmID6(a$n6SN4RFiUvV zTceeg>JO1E9%W%xj{a_-KRU)8YNIDDL>_ff(KX(%;*JAKPeNON2+HMx8?zI69Nxg# zViZLmMTCbIeQlN~e`h}>=qHR`rpEvm-$D69-bjt+!hqj`0iT9W$XMwa1BTgU0(#}=A&ePKeQhExwh(_8xKmfpN2R%+ZdayT-PGjj@Rf@hN^8+Per7 zZN&Ja1Gg#HgvsqKveOq@oA&0yCkmku(v@u?)@*;VIcIS&agd4#2rdm0@h#RTC%1Sg ziK_M`$2JLtS#d5|9we1awpeCwTCnI+Wk6N%9Q%NlB#)55`pBg2@Wn|!K8cnAmR?EF znPYIYb;97~vJs;-$C4>?q_JIamFo~e5c?);b(Bak!XG&tpoeT#gEH-VR!cei?mYxFfHcZK$u`CIzLoB3J!#4||mCC8y%*sI`p zz!0dn2o*%@X%U;Xqz_>4ZQ(9kE-e7^aae{gL(uetog1IPfUz0MDrPq@N~&pm0lI)? znU)_3vTSRtP!L?=jl$v;g{7mpxkKSX#kwfunhYX)-O-eUMu9?pTwIfaAbUk{8Mt$Y zgp>#biDZjUiNAfarvhJ89Ozr%;Zf)lSS;cTJV7^8;?ox5>>M2I>>Ps1{q5|0ee7)m z0Hv>iyz~<9f_#3Vw_tl>WIh?AgLNy+`$lE1vzUqiYQM|Ivr5MJ29*Sh_?GLFl3MYs z9$66HmSm)+*?!L2DchSMlVjoL?YMVfwB8sM1w6 zD8r$5l^D;kW2!q}m>LZe4KvI;>)VgC${rG z$9e*M9|8EcLnCmEo0}s-C8ErCTW*I$iElA`tG^9(JME?8AwBf>ZQ;XXB~91|w2JtW zP~zRi@wy=Mfm^$U6U;Dlv?{w^qG@8c+suNes@arFeA)MPM;{V51s;7lr}t466g@{g zAO?6}FiQ*DJiw50ZZ9&E^mg@X5D&;lT~ctyMVvm!mMF{S1_lu@toLc)JjiSh@t})% z(lsdr^!-fUCFk=LU>)LP1;`T6Kt4K^vm%>5s6}LJk#`AO>8+b9FuVT?b%315^j^9^N5)^J~x)r0H6ABi>*q$ORgNd#+i?T3cw$3qYjHT4z!2JsoZ ziW$Z`GxaFRAYanAh|5ep%FiIkm~etpA{iuD`alLzd6tKGn<=IG3<9y< zxX)_OAXE$r28jAI2yuyNT`YtA0^>T*KqNBAMd<^m8Dm332KgBvpE3~5XZ4^TP$=Zl zl0iO&dRmyav}KTc^s8tEdK2>K$RJPXYs5#$2OzpEgrfvwpJX69_#5+w}>wph>a}I{pnJ3WqCNT4u2;^Fm_uRL<9Qs8KD3W zI~l}+_D1g$jsUTj)iXxBpb)|uAPzFfue2{j(>nviQ3kmHkYxniIF_~&1_9F19<+)-A2hkurk;g%;iT!8b^rOYpZ~LAq$V?|HQ@;^! zlP;|8B`=>tJZ0h0ATC_!d$#OVG0t#t~o zYn{dV6XHPsP&?#{f_zuWxtJq%5`zX06tEzH153QSR-m`YSwqn#H(tA^pV)zd{Cdm- zEGBP5A#kxIwgOt9_fCJu4|CN~grkoUCGdB4Pf}Q?KGxc`O3c;wDobi3P`*YL(zOos zG;sCub*$*@E!6Xlsf?;oz|>h4aJ~;Jw`NfD-*Lkrotnq?gei=YF>apeLdBS{0QErc zY(7{w+%g2~m2w7S+ZL-S7Du4$c#--A1;@MqBRfB5_kaa~&UW6eHbF)1eT6}pN&@Bl z*wRJN2{q`%>wrFBg<*c4>x3V{@GKTACyeGK^o9y{ph#E0Oa*~&Y?As%h#u7-jMeEE z1=&jil$G+=C^+N>nav6Ga89UrnZK}%{>D_rt1&XH!V3-ukkg3O&=A81yG>tC!!ogo z5;R_G7)pPS&`k~ck)BH7swG`*g!4Iiov^Fx1s5WTVT0v$Fl;cc1EhcO2*sQ<)X=ns zUW3qkxV*akKoTx5{Sb=paz!Q#7i>B8h&T*g0B!sq#`*^?7!D<5nb7R6xRLMa;#)3M zjPTVD(d}J;-qxV^z#B;OJxiLzlJi2ZVn17tAV=GGb8i(!u(U8<62OEfFC6~@Z*jUu$I&yzl=FybNtev7eW|s8H`r~5rXW9 zVZmd@mr=9+k@rUM0wZ6M(8mQSuBKmi*J$-Ct(S-jhP=G@NH2sD|E8ibQ3?LzG1cXUhf z<%xVi&=F>1Sx#T31f6qXTv`}oCGbIDdYsuwpg=PjUSyxP6dYMP<9x!E)yA9R+XIA( zWdSH6L!_9yIwhb;AP5ccnXe!S?Dk9tg|Dkynu3Clf_I5uM4hrgR2-7g=9%vmQY7Ll zH%59F`_x2wx`ao#J4d7HNCyXhe@BN1K%wr`ufzceaga^qG@~mEAp09O*C4jZpV~@z z*u~Xo@r?(==2fVRibk^Ri(-t#8fBX-Eu)<%uCKrUJOx33Qb0|xo$KZy-{|Evsf)t6 zqzk%1{e5Sv@WXI2hSw^ za5ZxEkhBJrEf9+eR(Se=x<0_$y--Ohh;;R7O7(5@rLUj`x>h~)NgW~0$$^21aY4b! z&|V6sn^;BcWm!4wlgzM$-0bdb`lf`ikaQE4()ZtM#fs^86i+NgcksD_m2t#U4e4DC zbZ7Bms3w|ogV;pi8$@A6$nQ^W)*wV0(%0~RbZg;4da}D4s!5wViXMbamL^rwwMLwhQl9)e}_HWbcg3-h7yGgf+TXr`l3GHcGT zu{mEw+>{qryKm6H5g9KDJa;v{@nH)zY%}!;slcnRfw;_!ELdmjCEy2J+Pm|4fx=*i zIqo60R$kE5UHV^0je}jjIV8r*>fKiX@(K-UK+%zSY^)1@wua=wfZp zKvpcZBjeSO3S~VD+aki8++CE!i++uqoms#oNIgeS5U~=%>Q+KsqA7Ev2sapjbO7E` zAn3Vifp!wAFcm3lnmGvg$_ot6nW&V-d>G?rH&--qJF^5Fau>pJ6~Mg<$9@^7mKb{u zQZ9qMLW~jN*uKfDkU`D?#GQK^AeAymBQZuQGxb!-AaKtDsl!03Wsn|#crf+U$RO=d z3Net{KS37AARFNFKSL%@oeVMn_2@G-*UKPn#1`TMCQrk3o-q=e&H21W8KfCX88M}r zWRRuACQ^_4Ipk@UL3TrbDDFo9X^}x*A5xH!h%|JKfplUB%-05@8?{3~6)YpH{bNZioFiz)IwHECEhJ7f zT84Vbc)XB6YfnbQLIMaZB$%`gv>6RRWqe*Yr;=C+kT!G#HIrDb=mE%5fTRIU{|!)L z!A>KjloG*A5DTR6%4eiluqlwEioDL`SUB|?KLU_v!7dZ{E+xTx7;;ooqih{hzwz(| zGV%|W9nd*2vY|bftWhB$@}qK51-XhO9n7Gx}m4*nL3z7h3g>hF?H0E zpUL|1h_~u5`%y>!i^rH+Cg0501hM-I~pvv8TQfO9sC zrH(oz>&I`r*1zmWy$l~403Y^D4rXC9V|mKdQBN&q>wwYXW&LNIHd0>Gt#tlnjwV?v z-Ovi&EEc>!VyuaRmm?vL8*deh6T0ag>||yP0rL(L<)EO=Op>AHVW&Y}%iD z+AQlw+SG4?BY%+tO5hxTxiapj@XbYTQ}=lP0qQh&f#F171aN!pn|$ znIa2|8JG|K+f;m+G2xcqI7eAol({;$pkaWOTfuvZ75)L^6EMC2Rj7!&91( zen`LMr3tYKOOukAujEB#L9xN5rNOa5Ws}B#n^epQO`zSFx~Fh*#>f&ld7OFwm!tcC zFm_TAkoKRj_xqgx3*;>N2ec_*v?&e#0c{I73ptDaS5f!>17baibY1e%WEYeEkRSOM z=@N)t!|CHwU-PTD-$8la)MH#f9B!CM2q?~Ett{Q|FVeMT>7!hNFy(1-c)Zu49Dxj} zA1MJmGmItpn1ON=jIPJlj9sl-`@!qfkI0<<5Sc@SsTcJl9NoC!8M491_7Z_cM4_glHPm=Z|vZ8OQp}-`rRSPtc<~ICuyI36o1^JfQj+ z4_u8<2ZxXl2Zzu7`-(>w@gBPB~1EJYB%vNZwowEF!c+ey%kLPGQcMuwl?)8?>A=HU>Bar z!OR>gmb9rcz&IZ zMg`zB%o8qvE}*FkG+3jjmdY9Hz*T3@1EiTA1m;RU{jlgTuQWRG`j1=!N$Br6XnKYr z4V&Q6o2y>WWETLe6PSMT5{#2wCy;CmAJVqvYA|g!{(7@~q0n$Y%ruU1MOd}HQCs@1 zHupAGdn5UL0F#8Br>2dfjAtx%8X$RQL7w(vePdpicwU@^uZz8ouAx%9xP)u0W31z( zpWUEqtZOl+paw?%1)atDVJZSHv@W6V{T|9eli|`k(q&WC5K96cq2Snvg$Fn3GLg_N z#l6HfTuC$>=BuiR0#ee7mDN>2V0gNk zs)2WR6hGaBUWOPrt<+5=1*Mx(Q#X|slx#{JuE;D%FRMr`$SH;Oe1GbDg+Oq^H3zp8 z*yo%Gas{&v??ca9AWT3GA0)wl9P$G-;J(DYMeslr!;9lR@WK3*|Aa6HAri_9|BY5N z`If;wAUC;R5#|UONE|IqBoK9w2VdK}n2H6ThS}hDilroc0W0G*3K0krxS@fuPk}cb z9|CjC&B-@JIadpZ-!0IUv>$oBVDLmbHA%bM_eJMj9*Eo=U`H6(+jPHL>gv4_aW*x) zu_MnAb@DpQ%w7^Kt$GnR%e#;(sog0u^ zTEy>QfOuT+Xl`X~g|)GDYKj#_=s%^c6638jO%hV$jgAmQ#8AU6^Q5H6Sw;#Xg)s#c zfzhnUr2oU(cYsxOH2vQ_=iCBFFBiCgAWg7Kvw?t$poj=aQ;HQ(R76Cvg2t|>*id6b z#jc6H5H)Yq#NLvqDJE~KNz^1JHn{ij|LvZ80ri@E-}C?SMBvPsGc&t8J3BkOJDccp zOX6U1=YPvV>Fo02r~fgB`dYgRxvLDeU5+vk2v0as_a!gO)BSBT*!9s&vOvLAaLcFT z3RBS!z3EQkuphqsvrLRmlUunh1@a0r;R@}BoZmy|o^P!NdkqGGS;YwrQ>S~h7 z_WOd4$>0AYIx0`i3-yP}8OG=k0xeNIKpnw$dHAB;L(q%Zy*pTrQ@7=OeX)P{4}A-? z@J~+iuM=Ga&VN9BOLd#^1+Ctj$GIEoUuB8d>#I*r`?Y~4+bFlnBAI_JYVjM9x2bq!SyGAPtr z$SAo{+-$K2$UOT|qw+rc;)F)IWWk$ckmO57-Me*m;xZTNwmSNI+t?7LnWag`bNsq$Q!5iQb%#JVU8`d5bGPs) zPyhClN~Z^lpQ8h;1dG)Y3vW*(-J~0fmBg1Wx#BO{%!01Eqlt5KcT2&wTX#?Gp`m=7 zXG}lemJz%5MT?(n_;u%qfAI9uN$veTqr%;E{>9v9Zh;-tW+qmqH06sAG8ciT8BTr;h2Ym|N3v2lwhwaU zJ}cJwyGHi(l1f4RQHl*z=@2D>ZHAN=%P*bEvdc_pc;^;d8Y3v;TV4mbt zUeR6ME4p1f_w8n-X}{4GHR(~W8#MD?{}*zq&i;d(&%^)!m%Yh#%!WT947~NQAKVLC_jDvaaf8<~qW8m=g zSW^zBH$Qx4`Ju_&f961BS{OzJr^AEI2Pju4|PS3-|O3#CXnZ*Ej zJ7D&#Ko?3|u&aAWc^P=HLm3KJ@Ze?cbS(}eO%w9i?zOchl%rmQ|N29U2&9 zOYGfyM0QbA{?*moC0?a{(S(Oy1Xd_21Ijg9_fpYY*_`&LCJg9BkW$4#8=6$8p{#0d ztM1!*2i$u`bXBwZd?3|k+OVN*o(`dPJr8xADAn}?rP>8GExpsdq?OPhfgieIV?CpS zUTFq&!^R52AVqt>>b62AYx{cdhc8KwK+#jFY(d5IpF9)(o3xGj7M9&+rNwGu7hcLz zJM8>>q1NN=T`6fGVyEOxc(7=6_jMJ&;P2)YKGFhg6oKxF(;7 z3DnP)8Pc=KPwe@D{yD{Ni+l>+|6+JnJYvtk8lFvg_WT=t#u7k=;yL^Nq2bxol084t zKM%6IBuA=a#bfsU-?CKTPsQ`^hG*c@=ReppVQq&v#aRlrm1Sn7)Bus&KB&Ra`mAS0Qx>k@dtoYv~=je@_x9 zUyFNsw1XjBBQKPKc$n~vU9vccT;W3~Q~AQWGXmYm28UOfz*qwl%)LE`O4!S7tQF+E z7)Yh47U&%mje-DAZo=UvP9}^v;Ip~}Ceh35a-u5*YNK30RhtM*vRX@FkJgvTrz_`5 zxiXq#9K6@C0?Xt)Z$bLX|aIwOxkWap54M|7QZa|pnpi` z`1sBt{mDT>ug2Mr>=xf5y9M@m$n^;?Y4Qr1@DjO^1U_MlSbMOq#(C=#;Rw z|0RB*F1bTATQ(4z8xu?Jh(B)GAU?h^5rr^h%g|pd@Nd~L_~?I0v7r96J={= z;xV7~;D^4D-@^awWbaVamGlbgMqhd(y$D07{Zp8Gwr&c=s}|-$^x;5NgNg;#Y-JuT zLg;ZSSQw;@S9p5Uo9F2XXAdm+W7j{4ec-dxc1eLCD;#FK({}NAt(>pz7+|tfZtkL0 z1hhBVC0IYe%gK`qwX*S(s(nLvZF6(~V68%94b?ZWqW1;2pqhsK2LHk^UEayD_7l^8 zXDc{XXs7(&^0s2Ulm4AI7h54ly_GkSmDW3|S}0DU24p6@szqIe_KGltd!TL4-{9%l z&&K!q+@B_vigeLNTuol^Ek!GFGkL6Y6IDo-_cz}b@JjMb-b_p;yUBiSTQPy`C%eUD z={KHJ$xgB#4cnBe%tYD1H&VAtJDbZg@YnX>ZtLW_Ht5@BkkgyVDam1rm>xTAXTr<9 zedW?mZxfC_=zo!4#WmzP$r1gC*G=(DZWe@lN`{E-h}#wMD?UKnK>lQp^sV?C($vwn z?TIVik}i_ub@)%)o=PnyJbhYMH>D9gg_co*Vn%tcK1n4gW*~^u9I-~4<)a<4(}c*n zN*Q*r3W9-B8nan4gN8%d4h}R5M$>z-X&*JccxGl8o)I=16QOPT+;9uag6)IT=R{ao z6mIVskRE1Y>fN_P$MjGWRf|4p(dB)J^8hj?Nj%_4PCAHbVr?-|XfFY-n>&DvP1KzN z+;N4d1(9Xlj+;L1sj*z<$oq|0#K(DHKKV$ucq zQ^gO-Nipi)NIZ}(<>iFF7B9fmH6_+JzP8(r%k zD1Sk;+m@bDmJ{mi1_xt~#eP!nn&9qtA2`9-!@_ayk>Q9aEn%PUg1cAQexH z)2$?@#mIRF$q90pv(Y_9VQT8{E9XG7y2_$uNw{Zwj10kXxwL-_9)Z+D)O1H4BGh3V z`wJluYyg++1fmu+=ZytNg9{;^QUVLx089Xs(%&9zm%jcHS@Y-{PrB$zOYmvxtS}3U zF&opW+f}E`4zsizw`pMNtnL;r#8qQ4c2jCvS*V4@xQ!|F^{g;Ui?JJ1Q)h)*I*Z?H z0FG^&7OrxsY=@twJ-;}rG3yc4IxQSmbo0F{+oVRQ%z8$)O$P==GYju^*u}i37(-mz zi9fU;1DBq9YRMF`tnC?%pruw1zGRQzop(S z;ztNkk-~A@N%D7zn9n;Fa>=^gh2%Tv%muPd(TuqwP803 zi~Pz{xZ@gKbe*W-BFKNpR~p?JFFX(r4ztm%dBAzgY3U<&QXE)J*)FIGZ$xWr|O;1s{Vbr6V*_I<~ z=W=#)b)^G2;j(63?VjFFPMd!h@2{tSI#=6wnsaW_FyP-9&&Re? z5ykumrs7g^n9!7@l=WQ$YdQ}{F=?JTWebE43GAwZ@M*Uo%rNR5r2x$6Z^t~ zVcF1xF7Qg3Ik2~d*)Qrw({2#L!q~ zQd}?>TUR5V9?v}=udSp%K@ZwsLVa@q9#=D((h&YR0%V{^)ta)&d?p{ujnZz`{+YoS z5B?1#xv6yng$3Luv3VUf)zEYqB#4sJnXn)AGA@$q35u!EG@6QoPBUo?uEF<5Jcgw= zO?{^L;n~Zt<&|e^bNM_*S^IPlKM|XGn6z4n;CLo0hrB@FC}v^d3=5+yT+N$s3GL;d zx38VXkL3!!!9L;poCz<}Vf9J&h3t;e`b@;1xKxR+bjR;}6WD%WW4@T0`O11QU?DlF!DVr-m+0_qu>a8zvSA#%&b z3L<_@KM_xy+eS~$KrM0c`&O+&*ibhNJjdbNNsZsq4CwffZrzlZK-XTpIJfPbnFa*7 zHB__ej?nLTPypY)uzr^s<&_f;Dlavk{55d}n&*d)w|}C2so{@m9LbTn!m+v-!HG&N zt3Z~OrNV{=iK+9)zMD=3*3MbBt%B5QgeVO*|Db}km3*$Yk__Fp4c}hDx0f2fg~W1w zafSYnxOS#Sh;}7&mu_8#v*Wa$wrvwP((h#gK|zI5g+*fe0*{nwwwY;jvX4XZRV!Xon9SowDJ@{+M@SG>G%H-;dCrLwX4ABphuRkDy|5T}|J zEyDJQ&&1squZhKCmH6{15BIRWI7*xRZ$j>w@gur=(bdaj3~Bdu$;_WHw^(rfqPSZ8 z^sC9ZNoAF;%It`$Et^BpEGi-JyxDx880Py#Ki?+`hpAKY?MS}e>M5$qDWmMfm3Awq zJRhyP=;SVtV!_e*d1t2hIU#S9q0C`1QBo-+T-HEsBqURZky(X%Lpu-wU4?p$ui_y&WDKsf=4@r1-o#hh0FLtlhEb67p+gWGa&A&N-+)JOkBq`& zxDuoABwR6Mj7}Ec!YF(J4pO#VGzHTo7Z7&J3=Y4n=R!Ddk=m#a|+ssyFc0bH?S| z$t^QV=M&DTK42(1VHE$CWD4Kldzbs#DE=SP(EB<*h9aoro8^_e^6X^B8~AR#vr&9^ zuGT2rgZs=VyakU4j1j(GyqQtBw{d;IP=uJF5jwtaq0{m$_29>!GfKyw``swK75Bg> zyfxp^D7+0118qdkLLu2GyqKS96u$T^?Nz~lNQa^~+Uqi0BHTjp2K)+FZxnw0E%-Oz zf`5zRW3ANS`Q8{Jf8{lhRfu{gY_v-r4y}AtleTEM>z03)4Cp~U;FvEkd}h34 zl#UI9m+IlG8Gc{DdA7&bAdfWvh|o2$>WCP+#z`` z;As-}Xo}ql(3K3DD?wv1r>=hpIh^Xp(=(yAbXzy_YQAM86SZ6r9jJBqAs($S&TM$`UDs3}ON*h=VUfsw-rn0C zH#iDzo&Bx5bVoLxPQks*Ow+~B+=i5ArR2tjhW6_2PFlC+1EK;Q`JWPkgX7}51ktO1 zpYyl)mvWuVM|q#s+I=`Rs2{@d#{>cAGkuibV1k7bAR>wyqWD}C@E!3$cXmQB{RwqU z;UQfZ$F+0|9T;SRP?RFbE1;W+Nm_WX9+p-EOXRx2Zaqc}P;he3(CDsR;zp!pl@F6) zdkHQr>kn3xBwY!4z0#57%FQh-$p}g0$&yI&LW{7NE?xUkl0!k#0wfbb61t6ztqoO1 z1#GBG@^P0mRN%1Q;bA5wi9z}b?Bd5Ca~)cqm73G9bLZIZ9;A&_X;>r!QNf%Mh5ddj zS-SpXXrfp6@ny#Oi6+M3#^o4nOgOY-d+6Fsy{@%qy4F#=&IbbG3tj72=SbU7_9J~$ ze@9Hhe*J~8A!u5eujwcr5{(Aaik|JGE1vxiWQ#uU(8?##y{A&H~2Kf0V_%*~tf{RJ~ zdCFH$r>1^C;|o0%cHo$)52*uvjE6Ez^r$|{QjEUOr3ZvMJw_O^b4Y=R|50SyC4AgsNGBomz0G0WQ#qfbKm7+Y!c^SPjI0E?dDIEh` zHlN9w34H9;1-z9We+;8f;ombl@WUCMd`2e|bl5zA2ESODf+RYCd(k@KdIJvIDEmPF zKE8oZAmc;dqs%H=3d+T+0ZOIKm$*}ELb0A2Eghp$pD2d&Gn8e3=StLxA2|RzPGXc) zquc~Nd<3HdJ1oS}mc=RomG+bd`!o$ zrS-%()%U{j+NX>YwJ3lxdQoB~HxNs*K1c?KIyVcbcdXxH_=dH3lNlAhX~)Wfvh%2O zeDxudu9}?YCPKPDAzkB43w193m*P`6_Y<}L4BuCRvr)Gn>z$ydSj%MlhS96x=1X-C ztF~4s#CLi)k$Nrgg?A4FQihj`eblg zM+_g_DV&u?`YyHp32&?T3h(Lsm}>pc#1!-Z$9=nv@ETcP2@d^foKK^a^}`vTarI9G zZ0!b}LeN3Gu|CD1wD0JlR~1y&acG|@LZ%){c}gQE#h%>+`?`cpDK~-MY=lyc&{M{7 z8Y7fzWHFL-X!!y1WMEH}+R^~0v=wy?R6Z}JRNy%X)l5FLEYlNzMrA7sww0Cfm{eDE zBmw^GaCq(#9DQahqa)$hkPehi{SW9fIgHL(&_P|@0+rXGs97S#Odg5JLN&uaFJXUA zv0sH!Y*=CjH^@%0DZB@h-2*&>7!`zt8JvYDWgaMN5R*|4msv6ysdYO*;d-lABejwE z0yf)|+3Y-tj_ew<*(voecyDHhXbeZGswQWs9g=kfoyOP>)t44b9>BSunH{3|z7m|- zA<5fKgZ}-^^iLWaPT`gfs+SbLvI)FK)|$Zwf?i`phw|yg_!Qu@Irc4~g(X`r=_P|Q zosyuEUKXG(N?2V`DAh|G`3FymE$O9%E$QX!G$g%bP^Om>uB4Z*Q?8LkOOzY*a+|(3 z^>8XR(@Tkpq?ajZ!?(;pjn3>Ki~K)yycF6?<&j-yo)A1j<*y9RqJrqF$C>Sq2iN@+ zE_c`C4`Oie)z7Rt>ES;!IGYJkd@@cCmu5&=;9JG$x75RTG5qbIGlszt?4k85OJi_o zfV`B!UG#7kQJ|3k#WQ7;%t!DB+*fuU*B7(zNN@TbTE?hnD4PV^8A{un8wq^$ub=h( zOM$yCQ5%=^T>i^?sVDB2|AfJ*UkB`Wfh~iTm+YBvRe~~mMm?Oy(aFd3bAf%q#;6dy z>&fW+DSdI1(%A?)Sq%E41bwQfE{7#S$!LjuxfT51_)TkM8Iwti8mVM8qSV7HSWSi3 zzmR+sWI<#2dLM;p9p&}AUgB6d?1F3sUyMj%GJLR+IO@mKZK(mu zWS?Js9y|ctPyC(1Ef5E7%%w+91o>IHv<)>iv0CEuYb)E?+2Zd*`giGU^FVw#z6=~F zy~hGLg9ELZYT0}~1aL}Eg70_*4t#|S{3U!}RW;X7dM~WQZ;qzE;_T2u?9?1R!hzd> zlzZR3azy+AqLOd;zqIC}2Sny%`_aViNTA_CgSP}RP(%RPF;04FQp#MKl`rW+rxj@BrmI#>JA4c_bl5iHC5p5GM>J{#|H}ZICt)hXx^W zg^nlgj&cZ~dBp>WtxI;AZL42>mN=N0dbV}h{;Th=;_qeSiSmI>=Pv#EpD9^X4}Ykk zdO)0358zW|_c$8?X@S7b3)>|r+r5Gn-k`&f1bm@TRX?Pd_7FpQ-oc*f`!66CD*U0; zKt-&9D4qld9-}A0N6PLIQP0Urae+DHy@!Dt#PLo@=`RE8VqNIcmpw-7!-Y8+_~ zD%t|q*y0~eib=M5obw>l@Iy?exo<4a;lGb)z8fRo(Tl-RQ*!*{7flcTkXW4aIH%i$ zAM%^{M2$}oB-eA}6sluT8J?KQE%*;{2AOhiPBj5H7vUdoGzkqcgDBtPU(wr=(J%=~ z=&LDd5QwI1=_X#LnFc1Tc$Az}Fa*J^BFvC_YU;QCh(oXtE)Te+i2OY{a> zp?7iEAHR7Y>9@QHzfr-;|Hu(LrkIK^j9>zTVxrlt#8mmo`fy64{-l^ne{)Lw28NI% zrs}JW$`4CRRmva@o*|9Y7>Jii<--!Np@g(58%o(&QrZ&r!!dP22m+I!_eH8hH8M<6 z>gx^83g9_MB{*|d>i-gxWOwUn!D{qFJT)IMN2n5*5517kB&lstthsQnE!h{$DL^;d zB;4T^D_cm-00TiU9aL`m`=xj?6*Vo~A%5Z=GVk0uw3r|~n5}yo_u!> zzm84VbBGgv-gBD!1s^G72M(BsPl=gHB2%zPMK*sywFqKfmW3fU$&-y}`v@i7{7W}k zH`EQ?_e(Z{0<9PEI8C`EX-@=AyoBO>rI;nmRNVokt4%0P2PG<#RijiPv=p?exxEdd zW#|bjhZ)4v?V-I#Pxlm~d%X!=-zE~$G*1SS$5&j;5EJ#(6#c1e;+ZCtMleb@n@}2{ zc-l}_n&(MRNfNyM~b?p zlzsJ-`IVHiLQh$Kth~IG5=PYzk}K$h%y2Hzcxov_QF-!~UM+?gNGo3L*rcDn0D3;q zygSU|F4C=Qy5y^RK;T|LBgZ$u*RX-h08atD$&xSd_cDCIUkSrMASnj;E8+16FeOjl!px-py`+!Vi_z}8wO_zMZw*!UO|3|m3NgzU@dO)WyG9J*zX#t>K6iR6{ z4H`TkSkeKFk05*F)Ed}7Q0!1Zn@)Kj5K|^eFWtH(Q&eD&q}V=yHck)##e9j>f1_I` z)7B56X9l2LO;h)JSUCHIn5ZmfdfxJ}E=KL8Mg;{}-F!5X#xMy6tuQv&?YK*c;~6^x zsW*XVXxa{fIU(TJ`Fs2wYM4}ySR*~6v01=x0*=@;tm6*ZP7Gc6f}4Qf^!lHBbajOa zL}=axoJz##O;1L1&0ckYzb%g^&l*|DFNsgWL(UT;yAN3GQ>Qy&@oTM&5>tB8K zE&zY8Vfcq|_>RZI6@-=op}&(W5MH|ZnZhmTS}jhFAX6932SL*}(bL-0=kfGNea0_x z(IGW=!PaX@4i2lHY!Ua6g5i}}&1@I05s@BthuC%0+1ax`F4*3GW^{ff_(2a%f2RCo zY(b9vAwSP52_a1RJ_)XYw`SJuCn^Q}5P4hqVMz)PdM<|^y~W`e9B#6uhsEg?!9u9+ z)6Lfh+B>ZNyNV?B)WvJ~d17*kxVv(*wV7|>@Npl{n*GrjQu%)EOU`Q5nCPi}A%`)3 zL>3G8_YAzdK=75AHNES{v&lidX5sTP2CmGpKI>TZm)*FtQ!-Z%wQSbny&0p977RZ) zF=k{)=-58}3cGKQnmV9gaYRT?@8W~QA+sB#nuR)2@Q*QrAhH}f<%6`Ccp|jLuI{BihF)LDE{!t zD2Ty{(Q-tC=msEqft7UMkd)cPaQ_=UNhaNLZF|-D%i#gmlELMh=)DvW|G>_Rue9Gd@TO7 zW$u+(?HpWdh+{P;#;FB=H|>{Xjnnw@KQ=qsMN~~4eP*7z$Lb$T2k-b{-T`8<8J(c{ zqCL$-)%wEQ6*DgtCsz)Gu67e+g@Ljnu3j~262^Y3f2!hgpQlmLW zTrL({JIr1*cu9)c9lK4>Rxkg2q_g7z5qEKtO_LLIJsp}K6@L;}e&%|JG`~Gd?GQ)? zyc<+FZ`;V(%evrFg~GQ?Z1%6ue8(9CxHHu3SjFZk=?qVLI6PCoY1rQtaj zmWmMqS$_2-+9A!`uIvMk{4Pn}#ztNMK2k$pQ z@>QQ+dv-oO(`o9}6=RnNYxZB;Z{r@@A#YKFZf2(AjO#1&D}r41-`H>K(XVx0dEy&KNVgE%e~DV>F=@wK$rH{y zI;TwtJ;=(E9nk@o-eQiNEI51io%qRJ61-EhhqU|sT_-ae`@W^=d22^`MaJ$QbM(;= zH&fS(;uR6S;KgEP(aVy3nw9s7?V|&u%EyrTK>yc*pjxp!rT{@Poh+BNWK%=dFaqR^@+^}tJ_rP85 z!R}+es3M=$wq*+H1vP5NC^ly^adyU>8awnkx@pWyO*zmI083ost3O)qtG7HrgHc4} z0%nNzPwt@O+6NOI)Q82tHaV{Uwb0q=2r;X4eD}JedgBJi^*7Y&sttab>+>uvqSG~F ztK;n)4~g29e&f!}3$?YM_t98Q))*_Rm}UILAs1r>*Q!mKQcN zchZT`Yo0G(x%}Dc!(uSG1ZIem$Q4XKO=L0;-Z#L6!*z6m(+%@#;w28#t^AF*+%Ek1 z2>DchtkcMEQJaTP#^V!%jdUXAuz5rl4PQYaKLk$(Q!{o2Agfo{wqP&hKSTmI$@a1E z5SNrPd2;a$H`iV#>HHn-=qUcnTIWIU4$0h*HGE_0kaq{2^oj~h7~mZp93O|uz(68b zp%F3k1t1R_)krmteA9-*CC(wzAQ0S^M>7&S4A6Y9DcfC_d|Ns0z@pCLxQbJ;_Ufv? zDq`kGr1<3ZUi`j;g=h4@zP3x9NlvSxLE{$0SXT32?|D$%ePpQE-(<|GnM=-%baLKq z?OstjAg7CyViQ@?qU*@2bgd_vjIGCGO;EvB*g07wqP6RhhTnlU7PY*-p7gk|GJr|S ze52PBxo`tVPw#GF<#O=EL02osghly_&cwUAZv1h5-sW7~^*eQR&W4fpc79VGqcfZD z6EY7qAJlu1Sby-aShr+L!I|QsQ)6>aPMh|@Xz1K=)Xf%fca?>rnXuQC)eiFlJVP9q zdLaED>cm^AA0g>xONS68+hZvu%OxIp-PWg#m@&MoeLH6}>+Q2$e0&6@Q+TjvU}$@f zNFjTbeW1Hn_kLp%gHuA3a%WGMX=T=CF`0hxi)PP{O`FOmm3GaT_i%O4qKe3zX}d{F zC$I5G7Nm_0Xq{2&+1ev<`M8*}94{9?vb$td#Ii-5$DLjhmx(FU3#`ujK=u}pT<_*G zBcpF)+(4^uOz1i}5nh;R@sZuCPm8M0hC3!Wt^9M_?DzUxe5A_FAG;{lD#_A1M|>EV z9+JI%7eS<`Y^nC@Ws}|yXp#%7N(3^D#hAHGlgt0PMOXw3d zT5i1Bwjt<>s!~f%C-;Z0qboUX+Nqb@;CH%N^UfRoShfD6evZ8LqHmVkG;8M7zh7_H z?!2{I{_r7-ldXB1_-?k|PPr@7#raz-Bl@}yI&yBj-EQ5Ya_x5eNtfr@a7bN|kdWZV zd)gHq%E>uYXg7IZYo&b1W+;41^nMKlWxh8QN>XuThc*lq{RO+wAwk0;k1dmx3$PxGNAv~ZXeD0$+mr{Z|hK%X-LPQtY-J^a<}Ds z_w?;x73&&5*m`(eMP@~D!la1a(Z5c@^~Ii+3T2npf#H2(&=!5g}IMg_$W@Qe;gpff(UP=t*s3T`B7jHA!;vqCXE)^3tf2nxi|>20qN*bZ!( z>LMtR*(#*{X6~xJ9jcpVL-JJyc}vEfUUyEu%-@`vdX=OT{xk8xrpVDL<2Ma&7d$0r z)Xr8r!iZV*!V7tsJHO`so6z5xpF&KD;M3l@oteU|r(@p*8JP=*$GB8z0=p-75AQc- z&zZ!8IdK{B?cQ0Nf3bXps5@UkQsSofvRdq&JTJ~dm~3u~qS%X*g-vvYMebk!9}(Af zN)XhzuLPkJBH2wgOz~oduo$?(Xa{NBXXZz(^wry8RuH<5K~qs;mP8NOqlZzQotnIK zMB8TO*_A_wtRC4PtaKXB)&g*;z2nfUDsmbBCtr{Y9yt z8rI(${lyJM`HSLgOl%xRjmdmhecoZ!?FqZjg}Ir!Eq%DN=xOGi!JKdYp3HWcd-BI8 zWmk`=e72o8m$g6JKS!^7e%4|3tc& zXeu7skMh|5)A~;~-sT3~npXti1|1L2{_r zb&#wx&kRv};c1sNArF8r?2e;@vJH zuybrdB5n=76BZxRF+8j9kd;Zf+XuxYd;9j_!`H4wbtAwl45vGoP1t(AS%Az|_cP38 z03Gn4h*GtCN=a**h|^FnwrF=Qy@PAJ9j7B4E!E4P>@3?KZ+>1mEI+?I-qO<2E;l*h zMgsp$_3Gyp8RgF_^A3+4v~cX+jEszZ^K+UV(MD)c-yQ4@s{V!a!2Pc=x1wcV>2p!@U_95XZ4!ryZQj?O- ztBGxhFX1;JsVyf<6QdC-qp=oh>|rd_l4XB&2r|qbNZgUfxYpT>ba9 zxapa0u{V1IQG4y?pRI^ng`*wG6?59g_`}U@xtZ{lEu%Y#Asktu zb{uznY|F_zQu!;jGU3?_LgqoRftbsALof|Bs6;6_LbOIguu!FdglxL*r4B*jSZ3K< zz$3;R$?K(#Fu&dP+_PB@7N&jU<5#*hU*5ds&+m;})XUt`uJ|(X-RXLs@a6t?mNAoe zjXO}$viVYXJNJan@na$_9J*f@@~fxboHcNJe#Oz6laEC)qw;84$qz?HRacKb^8MuW zW0ivm_xNPZ#O0Brwfzf)l4G~|xL;lL79qQEOywHAF@4N;5tW0ej za`TFoX+CmZd}~hC=}2c2$2GsdJM&Nq85A-&xNu);$go2N72{`|9lb`kHh9^yH97R2 z%h_U)Ktq?dGCwJ>N}Weco|x%D#WZB+l00t<u15jyz_{dn8|~~EbwQi;eBFC28Y>`;A^+;?^g}(*G&@2c!2Rle{5feOneF|cIRF5hvb%o(P;@)32#np>y z$&r)umfDx?F0tqmXYH*vbLuVbbmSr(#jNHIcKepM2(fgsax)*0V&dR#vAL92>U>iM z2*NrOGyaQ;iZ*9vm&}z5t2nNG(Cjm){X*C=7om%OVDzO6Q%V|WXDd^*qhJAq3d8I- z9UV#Wd%L)Ot|E!>oYCdbK3D6>*VjZZDG0H+WIlRG(L0uw8TmL| zBx25v9@f@VFU%HwPaNJzyhkoGx5ytMp5_W;$!2lf;LSNBw`B{N+UU^(lE;Gh91#CT zo{L2BlC!5*1NeHN+UV-h8!#|7H8^?*U{RO-?1JSEM-K0^=oCxtOj+5?v{%BRH4|;E zW?!7VZ-@16m48Z~;j7I|;xe|bnrvr1wjqF>?1>jb)H3|M~wAHg2x!x+$S(AHdwZ5DeJlwk`Jbta$QeDQ&967O8`^@7=(yPG%jUw*M{-oEhPc`N(j!%H`J z^d@a{Qid%|;sWiq)i3X}_)=Nu-oIy+BU!v%!J)i;Q@$K9CqDP!eL<{k%emX_CbIHb zPxk<&YQ4 zD=DhUrtEK23Ag%-;JwygYDHF%(#<_AUM5t(Ik1-f5_ovhApr z_9F+PS`hGVbMr>+E&ePk(agWzYf_>eGrvrn8#ns!n1%}3{S=E^4Hc3!r(fC^Z3bF>}v(TZS)wqqyR+Qp2`n`Iw+v(GK= zgM}Xs&#qZKe*dttITMQ3ET3E}hQd3JgxwFuu3?2h7t#b-IQr}X=E+d!%o+m%H92$^ z8Q@6`IUESNyGHwyE5BSjajcqOnwO%{s$3U7JNlx!x|(mLZMTZQSdFuYEVQqp;UvLS zeh_VC3L9$=m%tsv0v4;W>qiM7S(FSny?SQ9qy`Auae@xCT5W91I#BOux4{}a-ls%A-=GA^q&dw1- zxo9zbKK((?>W;Gq^3fb>k5Svh5O`o45Iq`4DcWyBXkA3!IPdzy-sTa_ef)!x3#@)~ z*6nZ>cD#RDrKna7&+4{r9ctAb@!1ZjMTIcnKIpMcmUPl<*!cuH-qh{IKVg$b>+ej~ zJUjY~^Lughg}h@`)n;0GtOBK|yr6LUA4cU9Q;N3qa*(3(X^6@@7uOK7y~D)wmAOu4 z_U%UXNSvNvsgKI9x-+}{Qzvd`R)fk_G;B9P-W3Cj77C#-LlFfEiH@pWoMW7 zv@G!@U;L^oZaKajZhZ@JpNC*oyVJXOKteoXTX6$UW^Blk{_nJvzhC>ufC*v$2R_=O z+l@Y2>A_cgwB>~}&gAQTwA>xwh3G%-eaxqUG;b+GEFva6lBd#;9$f|CB_9?GZ?LE# z)P2MH2;Ok9!{!d~Q>x6Ioh_7tTWrN-C%fM(?~ORSVQmsFHk=9l5?v5;V{k(O z;!RJSroR{KIQ-gh>z0E&oI>;ZT8Ef=wRQ>d=`^MD(7wD`C&$HCD)(kxsp7Wl1{Kd} zvv+U1=@ZG+o?T5uTn~2A%Rp^h@{uMC?NZOOAw{f&*@*7-q z>s=<%`Dh7#aLZxVuF{z({Rbt%r}78*3&sp?lPzNS*qTD4ZS3kW#Mat~N)L^BSFHi4UG@+=g?l@$@4p4!IEZDukHWOiY6KT0o zT&CeGG-Aa@{&;n@HX2d&S_B4UWIo;6f{8M_6Iq%sQnyndB}8LUV~2GevSY&LD0_ut z`SJAPCGBmkxzWyI9A_0ds7pq;x!ijA;2uL-4{yJZ%pbO|z41;` zf(tD5F#Rng>=5m%#aUmF8A6d*_9>aco!u=u?%>yovpyp;mn@NcF0GA|A7uZ&G(6E^ zfm_U9meYG^;3$Jo0HM1m^aHSGh?-U=EMLuM#QL^qA+DW!B7aHpK&3LI@5fF*WR-Mi z)+}p{kU-M4-}3Id>5=VQcVACpOL|9@#8BcShN}c|3)+WlL!`w;DOd}0${NTpg9<71 zP`7dEKiImxP1@8bY&iKv6VTB)Jj|@wsuRnVZVq<-=G@#5%$iNgEI2S+5bw%WLf6r~ z`C{#wmd$YoE+<=uf-F;7U}&d0Ky{U^UFat_`o2p?W~K!S^6RQo>zcLdAFk=xUhQJq z($3B|zk_YoqJc%lE(1@++jQ&JDIri{r=85Nh#uWdu2Ssg6fU|tVf=d|7FUY`xicyu zG#6|WxEb6uRz%y^i%6#ho(Zg2nU<_r_Fnb5r{*qgQjnuuk@j0^n4#x%>!BR7L&q_E z-gD6fe0lmgw&vA-W?0@9?$OK$Cg>D6lXa2n%1)o8h-c}i2Ds^MdJ-BMXC{%-jA)}m#x{R8?9Ne;_3Zt2 zd_RWKb)Z1F|C#j{7m z1npvSAE}}Rs%nbIdY{dn^?FLbk%xZ3p>tKP+4IxS`O6t6GIZm4`!6L%YA?n}EO3^4 zBF;=@)@HoI4|iHNGL+L9r~(_g;3PX?XC%F?MA~}5X5`>YXX3ESxKJX1=TT0jC+6$O zPwt_N2z4y~zLNa1`on&X-A;$9r_K|d=Iv33k^^NmW!=Te_eq#vl#+|Qr%M-8HT-G~ z8N2LaUd|^gko3A>enrK6+_z<>eaemDM`(YJP2#P!>$wk+^IAzOL3L6u6W!0^gVC-X z9b;fMjLAiZU+XvI<0`v3F8g6M`K9v1zK)?EbW=~8BRbC6r|zaBsJQML{(a4>g5HIo zX_BBTKFP_uxQq(AaM`kjbPp~jhzqN$b;U+S#-JjG3p14y5zh&dg`q1M0vZj|CDb3X zQ_%?&Y7;R>8h@Y>9y%qaV;_RZAp!PKqY#onS}(bjjdi)-SFW&IzHz;^UC{#ZR`9aA z$8PrT{<=6ICe}pNGSV%l``9k#_McVyC(cPTw_CDyDe1iI>I(5cx!-c~TbcVNP1u(; zWAU(p>AJr^`C#6MBfAB4jB0B+*ClOM%KYiwCnuN96Rz&v+YGz1t(&1E{s}$!lbIN4 zFZ(q-n=zBok(r{)GAR|%A~D(Y96F{xl?#7ff&H}jJ{~LQ`(c9g2O&)h$YRMfiHXcK z`B2jY(^J54^-qESop`qC213)@+yI6TzAQC{f?y;Dp5fVtK4a+ZDmx^u2RuSNtC&Oi z3D6AC{ghtm4mfm&rP@)@)I5@j!@*5zQY{(YC~g@usVeaQkr^KBC&Tx*1WX$7{SClN zp?f}p3xnSl!WkS#`fxB52^pk=p{65oh*xhyL7&1U`Xk9PMn4|T@qP-IyE8f?W$6qq z9rUF5Y+(p=Sjs2a4V1f-(UJI$lvOc&Y7m|&i(fy`X*!Zb;dl#YFySzn7+GmYpk7A+ z)*|2+;sVwHtZwN2L3oDe9Rv%9Iuq`OTFkm#<<-W^-lJ8|xUR(NTV0Bp9J_tNy_Kgu zZe;BfB%b6>&7G_3#YZgR-NbXfvIj?mXAJ2{x@mu1tUXSqgr%j0g{G&4iS$ykg^+y~ zZsGKz6K`)CKGd6JwP#xzWMc}?6dhhEjsI}-NvZw@1X!&~Oxr>PT!3$zs!7xe>XqzYmUO{I;t(8-xRL6Bs<#xU(WEa>-p}-Z-Ok74;6v%n7vVB4f7@H&%H6o zVsR1^rgb@syV=Y*GSezGGjp2N%tJG*QU(v6ZUuN+F%#JJ5Z?xHAcJ8CXGI+*YE7 zQG}hu?pnGtneV!*{f2s7rC+na9?lW@_U&dwIJfEUqw>>La^u!-7L#$2NZ-1ur_nw6 z^@08S2d?LuhqZqyWQs?$kKnJNedJpiBYJ3yj&F<^bED~92)0I<3g3V!k=1TGarA_Z z6M5k@xGDSA-F;Opb!)hB?`|=$r?WHi&ieJ@v?hYMg>`5YM1UXx5Txlai%BXS&XjZ~ zHzxELW)URSkF$tC8OB*b<43GNBRxO8>g#-1W7DoMQLIUtoob^YgR%}~m55*5(rLPx zk_-2Ae~a%pbAM>6{hFn}{7(p$g~-C8$ofc7!=y<6hX=donhm(pB74Koi!&#-`U8a_#H*mPhoxoy zo-ut!mzkT79Ie$n99@b(Mv0ZqB)4R1A1lkLCkq~`v;{i`SXoZ0De04zZ)%#B(boB+ zhQoehX4I^-v{_M!vHcP^C-&=?u({9tjLeBa?ITLFvKGho2n*}6S$W)W5D->lHdd=% zAeE*<*v+g35*7$QGHX!>E1^(aX2;+?QOsl@Nr40PnANtW<+CPvysmZ}9Y0179A z7(5wM4a~+drRdAxw+ZM1e+BSqY7T`E&z=|PpMB^vo6P>Ar8{)ZA(b6XIV8N4=yJU& zK25TrkZjo{1|KD5RhDf8zbt5wkMN_!kI!KAU)doBy6joO8BzSDxgwo0gQv+ zvae}sfx8r&m0Dpsay=~rv$HfIjFehHab;oyl;KjV3|vNmB^+o-Wuu@82S9;AnH0}~ zt4~8R>#)=#WA)A86>KUGlXIPirIS~JeeQ9O4k*Dt7fW^i`TgP-f3+mn7*rnU@h zo)wtU^E!Dpcdls4&0ffjxR+GXvu8!pJ>7~qx`lNKOClncB-CL9j>R#BKFqqiVg!cU zu6~{&H(RCh8539*`=%%6kDPkNwRS9e> zn0ahpl>8B2LTw0^O>pNJV*KtNVN+*Ba^^2}wx+uB=cfmDYL>}$noXLELzj|2$nMKg zrBk|*|LT&77BgUx6VGJmE{SoYNHq6srIw~epdQs0c%*Dh#a*reZZtw-?C6%jEXiIiT(ZN5hV_6 zDd)G0FX}hp$jr!-;;u&z#a-0pFwD92j;;IyE`}QixLzD|!|hNACE#WZPE{BiP&kDN zx-Ghu4&9Ju^id$U19c6i;{82gBt{4fQ5X)`-XOKDUUBH0>-brd*6o~}+(u8c8fItL zETYZ85R!3D_bxxf-*f$dBvFxbJu~}i4!5SR3iOy#LnZXpg7rgMEp)DdKpVzaUxb$F zaSa8c2$vHUjp|`~SneI3)~R6Y2pe19X$2ue;zRAWd*;2fW^VYu`JkZZ?JV9!T$`_mJKjDTF{MA%s8zp_e4I5CQ2` z5kisPL_kEWD0VJ3^jZ-W6&1T;Lm}C7zW%YL#F95-h05Hm~q=0Vxp%HPpBK5YO~+7e&I%PxF{y3 zh+fiwX=hj5e(hhgg34y*`2}T1x>j$0V&tG@#qRM%{S)$MmIeVaLvffYE33L825IT< z6b@qo#7rtqwMtfI5=4llIAEjC7(F$IY(7^lR$UM&ABfJ69Sw zk(82iuwzoFl?$+I(zc>t?gXQ<0P5)e&}r(#vx<-hCII^7WpobhGLfJHI<%gc7zZ6) zuWwV#Z?`>E;^egD`_0>)O!71HJxa_T7_l_nLQ}cBIcw*L!9|VzOG`y{Soz>p6ASGN z{uP?HqSn%B)Vi7#gDo8=v-eJTWy7YA=iN;bzsviajC(m{c4c}?sbkP)cc%=`fWoSS zA1%xub|7QX=(`>oys2jGU1;AUfcIf^d)m%)C>l)LsHy=N8`)IoRS`NJb27snw(%** z^*Hw1ENjMm+OhH5H@VGpyyKmP%TE^DN52wdKe2hjU5+!Y;xZCPTCjc6_AYXah>B*- z_L^I&S9k2Up0<6^>0{dLJ71YF{p<>oEj_kq>+1E)-#1Q+<#A@Dr>H9V+;%dCGjbPt|od-ufxNzu!CxPZh45L-b6k?|cfzXot^yQ{> zVrZLKp7M4!Mt0~M+3(JGSl7<6Dk-t>v2bzBcFCKP<76>{S(Z07+sUHpt2vgX11-G! zxH#m{Ru{y4Qd`WiJ4MrwVGGN(Z~qoLY+)G_dQxjQwvgp^YSh4^QSybQHC<&AtVmMm za$zi9Kznqcx-N&lEG7AREN4*Go#=)(N<>4x!p=El5?PDWp9I%XlKT;&O3kFxb3MtN7i=T#mzOV%FS3y*ZMIv>xNpgW*!qI->7)cJ`OXV zA~p{XV=MQ*zr4W4(ld1yxqrmAQLbU3j<$!SRco=vVbzBV_Yc2oh~3oV_4UW6+6}pD zIC_^wIf-zS&K@|G>>$9%b3ci6lU|#&s;G}( z=`^1ByJ$aj-nSnsJp4BPv~K^ndushBAF3wj_wL0+L(iitQm97QJAn?YCATZ|PdGXT zqqvvp-F0+dIwqWA#eKBiT_>TzHe^hu;ywrNf~MAq(m4fieO#;t#YJ63&1LD!F6xSg zR<;%FN|kPTG@H8XwlM-W^Of+_eSO!4O2YGp6)H-;EI24>-mAlf8Hy*7uS(X~TVMqp z*0BCcXP!q1dQ2c22IcnYx4WNc?b65E)XFkKvK}@3)Z+o(3{xivm{0XlF~2?uP^{{1 zW)AzRa8@z1p-Kls1?Uw}XfaelNTqRTE_96yd2ei6nwx{$(uc>^Z%T2sce(x1;+iSW zZq6xHBd5Febx9}tv`;uPI22|4`Fob!^H%NHw+=2j@ZN;GH|#mFXWgm;CwAXO(zl`L zdWfJDvMGX!lytjAtDZM38%Ajl?R3XpUDO?{+4EeXQgnc$w(9g3$r*h-DiWi+-wNc6 z5)R+udK!*?)A+ouA;8__;ZAqJr|#5WM41%l>W z2m5G!DE@LUq5O2Al)c6->SD2AhIFunO+8|X>;w93MwWt-+k(C?un#CK1HSo7G&KEI zpedqEO&xn)fd)2B-JeRLNhkzhJxWrN`8qRe`}(J+Ik-AUY`uNky`iu9?Ypw;>cLl~R}bFy9)|JVQX4-VM24qg3FFX{ zGBT{&(-9URuEBY6VZ|})kB@uo>q>VQ_a{lf6MJ5*GC50{?q2fDY)2P+kJT$H@0k3< z($|)fwA)guqYshrR}Q^MA`Z8Fu=cL|7Jael(2B3t#FQTHc!TYaQS1)nJ^&hKdi<<8 zN{%={t*oenU>rr#$Okq%By2l&{@?F14{7`EDrTNw+M9fE$4}bf_hUO+ z9J)z<;G)m*JxJ+QbZ;m|r@tD%*K~K-0$iM%Qd_DkVR7WW{@V zCuF*(&#sD%t%AuimPxH)JIAjyRJ^3l65O0x9iPw(@s=NC%fUHwYB z3~^h%Y9SxZ`y&@!>snmE7E>jYqR^qEF3jhYb6`zO z73l-37Nljg4o7!7h9eGMV)K=QQ${oSaDin6nSlL91pW!71D_ZC*{zr;MYQuWJOwP+ zS;oD?y{X`7g9;0Rs7-e&5#`twdmc>4=wQb0y5;}ZyA#)`?1l#Ens(MUW?4s?I&(l` zlv>PBj1MQ1)!B)O5o&c_VtiPeIx8Uo_DOm1iD6_C^^aDIc}ejR7nZQD%Q`Nx&dWMJ zB?H5{H*lDBWVjNi098c3Hx%k9#XX#mGfsP=lhcueX;cr&KB1Ga+`>X%clYe_-K@lC<-(^x=?}loP}v z;zp?pmZW6Sz=6SqA=TLf%NG{>X4V56;w&w}Y9!3S{8#npxlq;Aw_ij1I?Oon)$B4D zc`9eqQ1dcM5TT7xHXTDN4xaIw*qU3s;jsr--x&MaM2P?Aww zoi(s@VIg|b9MvrL5__g|#84SV1eOkY^w8~BI*vH8gRV(^W|JTGuDx@ebapUvau9iI zom9qDkJIjA4s{%12X%a3$BY_>Y8WQDvk!~Mk@2;365`jAU}e(rX9AelnO9W;e@7dg zbh>?`vZ|zNHUlx}(4|u7bkORoi*v_FXZ9s}m*gEg2-&d%A>LQL#a`eKD+2+bL%fA$ z1~6G=LBY~>g_MfzWc;WJdZYuj++!GNxw-kiR<=%#HhqUyHofH> z;%k{u?2ozyLCw9-CqpBRp@tVB6eK>eJiUM*FS3==6(Ww5u6%ivcuCq}*zsSGu8tj z>{yF_k~_IwJJ^<(ll|H%PUK@8sD>J&5+@gO+EI!p_fQGm zOw|T-4oZ8_rjP2ROYq54nOig4$ToXvG}B+IBbyhmL$BFPjxzBe#r!anYhtGXLSJy}*~dX(cJ@V{_Ql5?wG*6}DfZfWM}b|w{PUG7(byhAHXk#T z1N2ldTcLAuF6mb>@Lrqi$1;NPYwHFu@`B7cTv9K}Xd7oGZ-qVzE zx0a3)d#Rt<6QsX%uSf5yteDH*H&Jv>Pf7_Ch ze`%zH!84{mDSbM9a>zhL+N!-w_R0@pXmn9{Tf>TDbO6eV>Nruses*?UxT)#jd0E-x z@GPGf5mBmE4+sk{RhuMeFDKQdB-bS+)g`CYB~4Ec3rk503C%!<+^V|5oMK#|;C@k6 z-2rZxG;x<%2|6+;lLlabBl$eTUZE@7p#pz{4u!%CDkz`Od7qMUnJ45C)Ygbs2weRf zpcB<%4q)OJn-xt-j!X`T4hTq9FE=lqk{p;AmEaeUx!0^nfhqx~8UoVfG17>eQa7hcQ@e#^ zG0(>{56O2cAWcyeXjYVf_rS%lw$fO;#Kf%$%+Z9c3DUzYEtjNiq?TSt<7LTm(!>#Z!X;z=_c}{8;3I%6My>$aY8RllZOi_pV(_hDry~ zm%-UGKk3a-l`b2JM3|p0zy9Mc7pc-sXIQ#_KR{xBVtzCZ$ls#oOT+U0q~ZN^a69@aVcU9!)xiZ0(Oq!0vz4&l zaksM;0@Wtb!H5z7lmbmJCofjqBRw&T_JvjNjI*K~rKi%yx6LtjS5V^}ty@5r6iGyQB_cKj&4wZG5pUq3$I-oiOLr@&@`h3$~RqnsaombrH7q2g_`c1qVy z)om$lJGZPdZPW~{FnFZ*7NjXaQ+rvpik6|$A<&G3!s8KaO8C)yOz?z+$X4( zdB<6M#Oc|?Cl53EPTF=SX0wC#kTWyp&Y7>S*tp`g87CiWoLIW5a!ci!K~mUVcd@hE zf8;ErA$Q(M?orI;Gch!M3VOpFV<)t#5asD=0%ND~J5QA#qtXXz)UhSO=>tv0*b>&f z+h=<#_v=0FChR*^>A`tH%B`cl>-}|V=BHw`#oR%8g76V|WlGQKYeXT6>GA@>!3%Vq zNC{=V?Yf=Pz^Ig5IuNM#3=U2ZT1;1u9W}|ymlMS03=?BMH8zdsy@UFug|POc^UDWo zOxHE?-dBSAdwZDVjviO@iG8k*bKDQ*5pHgt&OVldJhF!tlnhf5Z6&u`o(fzj(_mB+ zC~#^pLy^#7L0h?$J9KhDAI_$7PR9HyTeURQg7_1Y;G~2C3)VC-BQo19!DAY?J7-RT z?Z^e`J7$Cq7GlyXhS?Sj@^a7i$wtnvamOXCcmVwkBs`t`JN0+yLDKnQONMEL2rn-W zfe-Ta@ew@UGi&3264!N#U0JPy@n z7MU{^;TiN{9?9+A(qh)KZ5wV|CRY`jl@1?OWL8=U_(M=JbA1g(nVRbH{HuI(ruCR+)5@+fYo}SM~kb~NDAbT1&PFz58q=(ZJOHsCEKzAI_ z)wL;zn{}*h52+3{#FFAhjQ6nM9NKQ1TBBaThC~D;`-rvSQYQ+i;=(#6y7%^c=s z&e+{Ku3?C&sZDmhb6k;=B}t7QK+7nEcV>=ud}W!}Qi_tgZBAC%w8Y72u~U1M;sP`- zYvbjI4bxbuK4spnw-;{DTe#!h*~0llhotYMuZ|o6kAfXEJCm2fMp_M*8PP<Rh*JF z7k$>ERhWEkC38xzP^JlkPEtQcYp+_Xy#p~d%r{-7WNoVX)QD`J^9l;6u`NjZ?#^dG0~!~6N%Z2BZw+T>2A{1F;qr(&Uk*ZQ*OFn!WJ zPCZp&K&H#;0+t;x81O@h7-&$#tf3nUK?7)iO=xm2b{^=HQ>w}KY8+-!bzAtDX|{|- zMRR&?SwB&*%^UBSSzvEVqVh6IF^7b)=RzVYg52X$$i6w+&&tP!ytZSuS^0vTthr?_ zgXU&T%!r*Dk%#%DbU4N`KNQm#QLS=U@Y16pf)dgrtZ!c&X zFV5xqCl?f(o0*0WjL5PypKDE+K!?rT!P_UbGF7%)M;-4SsJB04EVZRffanj30_4Ap`_?_ffx@vndl(CoZ? z6=9uJchY)CK(rtP`z$O$R!tg4o0|+RR*M!^VsuJYfs>~fzMOGCqeiaM7A@E)=y!r(5R{KSvM*{ zz2so0{kFerMy^Lmjish!TyRQ$v5DZ}=8;@8Y5fGDtkABnWmS26MG}bTK3ukKRAldarL$~_0Nnh99);{7UbsE&(+)|FR!d3w!D9s zXI~_K4mq1Fo>NHZuBH4+N0CM>uCZJ^$jZ~%UE^Tx5v%5oGu1V4=*L zl0o14Y5VNj2jK^6sll2&Ee$HPBquWZTT6?rmG(MJimM@~Cr?Iv2V}eo8Mg#cs~bXY zJYwjq9`r=Bfe^CMem1VY4>2vCU65VqjP{UKW0zL$Dv-pKfaG9v#xjgJ&dKDhta*{) zZGxQ=f-FL~gLgKohb}G5om13r=)p#n=i1poK}Mj7CNolG!L%tB);niLjgB>qEGSGb z(3qu0Cr=qY$D+@sG5yVg5^&O0e0abJ^XA2k7NZsjL#9y^3bpJBY(l_$! z9`g6TxLM`i147X&FhB-^V_{cyeCfZwTz0u z3(h!E@pjV3!Z2m&Fg-BGL0YuP_DPoj-tL8YYi1yTKW`@&=>o8HmWw>|Vs?q`)BJ@O zGH0v-9*2R)H60$<3c{eFqnLpC>B}70<#=CYURG6Z@GuK*nP`1*VR3yy8ozxBk`Ifq z^sSlc9qOgxv?^}5$`;fgo%N{p9ej|NBJBi`(K$j~$M}M62hLKZ?E|(6BNh)MYiG}K z;;e?W7Q~K@(KIpPp;(He;WQReAz{g;jplbFVhDgvEjAoaTLma9_IHTIb&xG9SJ??X8 z#NDYdkDkJ&A0X&*4vZULn46hL#tVE%q>r^v(|B|9Mcn7P-roKCH53+=l>Ft`|K>ht zASP5)?6 zTgzUL*pE2=tp{E>1Wt$9Bm=PPF{8k4fZ0|BS~tWLXpaCjcd-&hPUVi*Rk)2_JFSe% zI>{~=SrTzwJ9w~{gQSdeK(=>3OA49kp8R#i+)MK9*R_O-UX$<2@^#6xs+GJW`VKh*ZmVex8pahP&vJdHkn!NGgHV`f-brt$SC@ufN<-Vuw8 za6B8#cR)9)B<`wm^d3!*&c;S*7^ipRL%Udu@C3EGEIb^DluCg{A7pHA*}s|{9-fsI z9-eJ@lCmCkKu8{U8NJRP=%E3iS+~Op}hEMPc(tFW%Y3NyjpuzCz&+qrfO2lhJilur6p*=LztD{YE3Uu>G66*4edTx>pO9oJsi zTAWqkmmN}kTLBvR{oFq0ZDAW|Q=Npi7d0f=k~cBR)NI(ITs+N8hb@eX9;{ZUL_`fX zF-hX~WzNXTo|c(8Ejw#QW=no_bWUzmQ~^-Cf;x%;YSdDVE+5n_VzI{#Igc$JX<=f! z(q`p=0vB5^?(%Xw>qhtZf?(vmh3sS6+0VdZVN%jF0I^KFyJ_aQ)cW{5OAq$5N7L6W ziYUryt2}w@;ia?ed#W&gF|9WnYW0SNFR@|7KWkWIUEeStR}+45?fO;zMLvb)qvD`J zx(Z{4R?yC{n*gEGIp>HX*dkD?qE%yTm>_t^mt~GpTZ}3(x0o|msv7U+?cMiMSOr&5 zQdtflFW@Nq(WoH+p?1t*X&{~lK~3S|ZSUwgmS$6iA1#`p5xo6fBW!&=s-(i&s+`5K zmf_XOzW!FGw^@0>xbj;lXQx3qi;c+Xsf*ac=CC|lKEORndkBjiVJbUT+|D*~k6wF< zPld0o{ETXmU`c&#;Jc{Zq?!9wu)KO)7zp0+EcdefxKIRN-L^yMMutV59cQ6+wYMYN z>=IJ9rgATn+oT1AZE2CT^i7uAPiQAf39!}_g_vtEUCqP{-YhxDljQqhsQRyt!&lpY z$b=NA(4Rr>FT9WMyCW0h=%?-OI&0&aLN}$f2!bqa`kmh4HfcdrYkRIh0~G- zC6oMQ4~c1`rJ~J{E5jCDPhrAsV^=v$!$oE&CieV zO9@({&YPro1o&nMOHFedlHv;Tq61R+W9s<9p?0ozVP5K(if|9lkY7z>%R_BlY(u<- zxFHc6-g_}9es}G8$zI;o+2|ku;bVOqK={XM zL&heJlTO$YoAB)pQnvOynR;d3zcW|QBYdtbcU<6P$TvPj!(uy{nZw|$f8#2pU--|J z@kLPRAkmk0t61rQp<~P~)|6ZL=a{I&gScU2(%{M&RU|1iC>7#xtu41+UMcJVUjktR zR2K>}uG^K`*oJZWUQJw1VX4Vpi>kY-)-IR&tFz-0Vqlb*fRk-kCzlOQESa0LRnms0 zr-kC8O{SEY;d4~*ipS`Q@{qtN}qk2dy0#*XG(r|gbW$*(4+A~Le@lWT&LpN_embo zzvbsTnP!#lEDF8QRvG5>LL9-tAlRYe+-^SYCrk?_Cyi{d)Sx+6O`Ib}M0>{ja6)LL zSEBDF_zkFfoZvEgk zw<5kep1Qe5_Y2MRh(?QPL$P4lbX5n3LU-g*Ed*#)Vd zt(MyuQL{>=x_SoDE^s^9YHa*PiFb@ie#)9sDCe<%MZ>~(_D^%$>sYgF@33L_ZE5t_ z8{YQp=iJVyArs?WlcPL5qLW+`CJuo)Wc$Z_ylN}-)^s$6AZM#n`zR;=b-F+NwKLKt zd7&JJs$DI8eia+Lbv(me;70JjbxFg_7{heoU1E|?ZHsN+zP9nm&SCCuHiSQ`Fta_3zuAzz zj@3)|jTmuk^GuJuk(;0UtYzMt2b;OOU6NxwJ!6tw2TUFu5nc}2QpXAHo6J+41B5a- zV2;6{0C7-7pKFu)a7T}mdxlCm$B8&q`efut=?CJJ@ZpD1Akh4H-XvJCR!qgt@yZkD zB`Z&UCY~@I!+FWje-V1p?3?=2aFsMD7oj_2_H%klFV2^3(;v|bpu&Y400l4#6ph|Q zD&SAaI8vE^LU*(>mg6c%EAvm#qm_}VS==`H6~zWgM-pLIKfQQXnG1~P1u68-%wR4;NH8yg1-WWO8O`Xj1Chh}5i*kmNL^P=^%uAO%xB z(PcEdP*coqDcoz46Q3NeRu{x2hf4SUdPRCvnyb!AOrRFi2?^o6X=oY+n`uIiO5PNb zmI@m%9EsPlLHnlmJ^29L@|f4bw_xdNIc0-L-CjeWdSa*v3yb4eC?ngkA*>-HD_Sjt zCk7J|k{BomnC4g}J*{t6T@=qpri8*dA&}#u(;}>`J#*bxJ3A-)_VchbHPM)uJL6}n zHWAJFfxJtgo3j%>OiWC1YKEJwwVA1jy(Na-q1;3A&w`7N5MUj6Ka4YCMyymEd!qL9 zYQbgE#YI+&HY`F5RdMggn@~tR`Y9Xf0CM5bq0fBDDn|~5ddDV&NJWS@^&w(CFe_$gSY>L zEelTb?@_t0jfc{D^s?nxkE7b=s10RnR@&?}i6|~AGGPgyk(AKZ6kkz+{@H#8F}LYr zvQYZKwnIlMg1>#XWMMz;JV!=ShZhtUo9wrFf_ph3sV%-@a6(fXaQuQhz_#<>Q6|HN z7ey$iVtw9Y-MP=TjoR>6Id%&F-DSs=*Ir9;{tkt>lyjB)@gFI3yTVR(iQDO$lvy*o zCt*5@|EMQnibfr$eN*D)HHKN8n*GvC#4MuCZ@r_H6wSYD-dQWH<#g#QV!vDZf;i5c z37JkugybZ8%FASnr@g=6n;v}^y_F}xgxO8**8AM%RSL*l3DDKq}U+?14D*J zRHR>XbaHq15Aq7DO7`{jbOZyT<+%AWJ*?Chm8#5%P;2Ulb(Ez}K?Q-o2v$ky8V>gM z(g8Ogo_CC}3@Hj!YicWUGAu+#S2tfl9q3|}Jyc`bke8QXYSNc`->08TB0H1>6vbwY z%`f)#bLki8$ySmPIXOd0lfz+3tIgqBPgn6CDT%B~kIGF190E)@0eGubyIKnVXS1($qX5wO^RMsar08M`-<^fzy-s-8p&N_!R2~+w2u{ zYsL&r$;~h^DYmvnzba#2RSZ}9=;Y`+*2GVD3YuT6VkV>;1+J6m!IXt5SG$F%)uV<8 z?*2UQ>s9ebrY%QZ1dsmVDI}LzI@z5pvgkE>4EwOkwFb5v9kbWwRDOk+Oi=tg!~q{cbYxYMosovNbmYZ zr{tQmrtxXvx$g1qGb+j&Ld;n6p$oX(+h&DPqk!RqsZoGOrf(KC78ur=iIRT|`kC?2 z?I>n7iuSOFhUar?OR{eHSgYI1EZv=4&FxJ+F~-7}MEg2ZJ!VXorl$>%0#R6Ykj7H^ z3)n)cj-o@e9$(}@yrC-=w0KEB$C%`Wc=>v`+WGP^Cb^-m0shW*zVR!74c4(b1#TA|%Jb!Q0k61Ye$HFLa}9$7%K_dAuSe zrkm4)b#q|RnhbVPV1K(?adn)2z3TOLy(L$3BP_WN)MXWRS#m{iA;c_6x^mx*?YY?Y z=h;(|zg{;=O`|Y^L>lyulwL$@9N_^fZkcjYcyPb?DBXQtFVNtER$Q4c(}K@qw0o&m$qE5nXj@V}ZI{yKJ20;tFdo z+rAombN4t!gV$NF{k1!5#tK{=;e*2Uejp#QdVA$xKa@$m022qnfB^x_0)=zu)t_k(q zvb-F`!71dsB};x8tntr?@Z#r@s!yfI6>}we>$F`Wnb21&W{|UB3`m7&{S`YH-JqhI z+Kk>oSs1ljsyzN36G`pIVw8uUs~`mVc=@&S&=tE2LVquB??7r;5|r3!Itk)#8DV40 zr$uGJC_hj!og9;^7y!)@tKI$jySe%MySfJjy1V=MXg`OMiI=C&ma<=(mviio<>7t% zDn=%rS&A(srwzwIF>U%>Hbn$iekJ{5h zgr}D`&xiPVqIAQ2ecgCI(9h4qCHwZ_wa%e7^QY+i4q@p?l^u&CF#<Im`D#3{m%&|k#Z z!QozB;a(aDLm(XqM^2A%TjjUJxyUKg;mG8wxN9@DAF7h&$2!%T;FdwPLxkajGq>q# zD#Zw}XyV6vr4{XmZOPlj-i(p1NCRz|6VJW3Z*G+K;sZDoH-Mc#)pMYk$FzA*N}rQ= zr1u{{DgMS?l4Z;$)Uq)a+P`jWxb~~auM)pI)9|?W-q6nzzIuMbDG%<_mDW{FGuJNs z?YBkiX3kjGdPSBQme1m{_%|S#ds%gu$`42s&oB=|3`-YzY*1XbLNI#@E`so4wF+Wj zP>W0*yGlox)58{Q(3F@uH!0rbwYIZuI8=eT)G-qQu+bHQE;9tB){{!mWp2`Rw+(Ht zwJ~n{_me3!EZD6eDASM1Z_|Y3l~Z9iHqt@ym%ce-TZKR!`!8OEyf>KrhiAWgE;Ois)5*#@#HlK6QLz0p!W=J({uh_As41nIYD& z_PJ*zn+#vH$v{t*j+&Y}Vwo!g$F=T8tdOiiBQALe#n*^NyYh(i!^e zXFHH7)Lw~wdM}ADYN)odGG)_BJ)FIdZ}POVp7P+F^=q}?Jev9_|7~La>bkLMEoE&N z&x~z&Xz~H=E@?4U@<_*+Q%VJ3A&^o5FLMjXJvhk$6+ov~a!=;wbiqj)N9c(W3DQbt zp6o=Mj!wL6Sh#h)gpD+pO|a;*w#*_ZOD%>4aYM=Yp(CeNQwduV0VsoHO3*B!k}7c9 zwyLc%-MU=mfONNJZ_O^3>IuZe9do$jsvu=n6tNtF?xO63(|S|{bQ`5?V8!p{hfXDW ziInrFu8Ou>N5bjU3Ye(1j%1UCR2Q{m*b>%lgYZHTZin{R3wl(Q$k$b$c(j#MfZx;x{EpY%@b84@DB&iL0!DW%h9`PB|DEiDT7 zfvvf!oULPdWzhjuKVx;b7kCL}hi8$JnB}DGjaA1JEG?iFPVrkwY{_g}wvK7g-X-Vl zDjTvrN1or*gmSoFew_~h4@t`b>sP#I@WS+vVf>r0sZ$ML_A}?$V5M^a(FFmFDRTl#G$OvH(UEo4grClMuLx_v2RGKsk_|~Z>QsLPu zwIcR?bNc7OEPyMFNw{?1WONfj1WMK0|Du1`zlqKLz63y0M2{pHa8 zK<_X)$k{OuQKg)X&f=?Uv{yRW<;<|^+L+9`ih#tbD4d>Dn~+&II51=!IhYht5fvI5 zog5oq5f&O62VAuB8{Es>N{S~f*~$^zj1m2ZaaC9~K{x;4W?-WV3@wq!g9om3oF2vW z+5X_8^sFfMk@nqCuN=eJR>Wb;MCML3TDq^lr91)AhcOXQAr7LTl}@vD$;l=BppFk& z52X7u$s^zLM-=le7*Gl5x?sJOjF9L@mfJ6Bu?@F!GXr9rr*SMnO`5j{Y27-o99N{8Lwn|5*)#9-X@+SCf2RII1Y2z?Vf1qsRAM3t{{ zkhiGu=4|96XySKqSDAN&Uw{on7RI>HVV(>z(rc1JNA25=G*O*)lXSGU>a$a-08n(+ z#vqKdL&5c)9j2cFb`SM%R~m6QQDhh~dQW4?oiB{+t;kT{-tqyqgKeWUI&aYVVTjQw z2&meR^CI@H4V&L`a^BW<_ggNNZp+?Wz4e`HC#S=bw6*fs;f-4#IJNiKH%$vK-F4T; zEhrf(#gs%C%Ik%GC;=a%3N|hsootg~daG_K@my-qLmQqDln&h9eehcWJ*8>#kQ^m973*wgb1}4GtTg^mp>!yeMkU0@`Z)S>LWob% z$r;r++)}XuPg&zcM+cpTR%cZ-?x6?nu8U1K(Ts?VOV^l;V5JU*`=B^3Zs5SUxMIZq zM&MZs@tVF`I|V*;Yj@*yyyQaRv}iNa^7%PMQ)A3bE9MV~sL*Hz#83tL0D;eK&Pkt` zn$wh%IVq(jJ1V*W3iNz5M7q_{5llK5w>;>eg^PQreb=w2-e*2}{}h&|y~PIsiC?)7 z*<93)CAKuC9oJyQsu%%aR0nB6&1dYX=KV@flK|iDs`w9$3pe_8KQHfp?u>qy5Sc=mz zES8Qj8|Y~DHO7-pO$6X%f#PleeXUFw*aYSr^=Fj6M2I6ix9UAnr^JzVJiVu4DL^Z- z7fXSCRV`E?d#re>sp#Y1H^SQ2W0X`_GdgEctYt)1l3#$e29^Stm4@Mrol)#sN?OwP zoXlH8_{>;vT}B5n)$^?TOy2d_BirgshM10CcX0pe;cA#4C_rI;Ft(<2X7$*G4-@Xe zGtvhyt*FY&3G?+gz`qyp12BJJcX|cB0L?t#1$@bXjLq7%)7GV`N+Ix&A)s=%V^UQ1yA=-GYDrVlxqKGzh;x`o-%5~!#h znLjtmGGKU`Pk@!CRVe{#bMYqODNCxix$vP%5FLe~x49TUki-R!ACo2U?m>op*H-1O zE_-e0vWX*ty@K@FmCnF?o@e~2l?)XIBx71I&hE(CCQGxLHQP5#vYYQXe%acV!8Er9 zLymDHbLuN=794L`_uS&jocw6LJqK#(VdNEz51ns;qc8Rw=)`Y&QMt2*h$UV&QGFwk z$)Qq3LT&Qd4Qa{x3{srP_hk3);xu~YmNoU(^KEMvt(rTiJ5ITy^YW`#Pnf%FTv}|p z0Vk*;-{EoaJenuA257GaZeZT7VZR@cN4Y$sdI>&o!8&uebWD`?YQKll7%uHEU)>EB ze!sLsdJ~EOWuY0bv!$miCP5WpotaYjy07*}Mw_NYq~&XBOmn6dRE;H*Ycw%wk!7Z% z&GILwO&K{oE+Zo@Yhv=!dczCG!>?f%HCzZ)=o2cz?b6&c1BcF!vsi3inU&RKzSv^i zCLy$Bd3pZGk@@AzOMv_Hk{uH*r-P_7kv6lVK$l2ZhkcndiM7vAL%)H zIb57OEM!c3?c1gP(h~VjrOQ)0FLYN*>{CXhefxl|1FY=HHrv<%n-iq|WSq3+ixo&W4Keyo1~d+i(29nux0?qBR^L*4&5b1$tR6-n1rJJ8`(K9pH+|3ie;zU?WXG%rbpiYTQyCr2ibd)y$J-%|5 zYrso^8q3%)b1y3wmmI0+zQe!%npo^d_}!uiam&-%AGOc})7Hf6mp(Y?Fo^At+JXgJ zCsSw9TJ(3%E3?d&e)jQW$vDX&pxe_ka{OJ+eZQuib zf`*kGq>o`3rG&lOhQjQ@XQNWpd|v9OJtrSU0xxv*y?O?%{dVnhs;6b_z@k|qy??bI!UReE zq!KK&5H$_$bFR)oPu(l6l)jhef#rtot=qZ;!`fY?cWGzUTWuUaq26k|CSgFUw0OMr zT@xnMS=DRelUk*f6K}tL;(*k74NK?)$!sTY$O3Z!1Hvoq$sNa$v#+H(X^@;qU9PR9 zO{u+JIwJ?jlXZE;lWC8TOk^iTH~>NiXC5ed{oJ`@xC=Xn?ygk6d$%OxJS0ZDoqJb) zKV=N?f#@1ZhfBsE6IYOPC z5ElideOz=5)!#yG|5g7?Y$8U@vIxh0!!8qpAt9qn%&AojwZ$~1-O0n96fMay;*kWE zftXD;uu14?=|VMoTD=qFP#~p8f!AKnowXFs;*IXD2EiXB_Px`Q2ai2+e&_GelFlbi zNJ8fHOl%C3IV~ftZa{N-M0jo{H9=)nhd`~0>JM%*tzE|+I=}vR(ozSUIH7(2#bkD_hi)b%`_ZMusO!Uyl9DEfJ6c+N3v0xK>Qwl7+tnXij?f+eHa<|_P4dm zF!YpRTP!S{%qqM=6QiF3p|4dNso4BCM%#Sh~AUM{e4tlsS%M zl|yGxBy>$0B;7CHi-e#x&>qAzb&#u*-WD1$ETMoDRlTAw7r|>|9LG?sJ!tRIpXHn# zM0LuzR9hEcjntC%CqY4$cMz`Ak!X1HFsZRSXKP8*Gp5eBmTMUWi|c|$n^1C@X|k!*Y9 z-45v-3=k7T1u;7@AygPhMCmFC3rf5EufGC>GN?}9;G zyR+a1a0ffNZ-!=r-7cme8}3FRV{bU98lkthw^B2t@pg{_gN+xropA>>i3U&UT@sRf zVmh~-%)!xKgxGVhF&RRWVw~TpOCk1lnuH?nG2zLkcHC>XB~KpSFFR>sYBPpd^br*> zPCx{Wssv}K9L7e3)z}JlDs~lZg*J-I%q}cfO&>FR`b1S@-Sn{ys>X?n}-S+!BQADuLMdV^}`q=rcis&xuv0?9Q)Qz70^$kEhx>Ysb{&x86W-H(y_ zA5)#cvj*2=xCFFGqIwP&J(Yx2oyI2EZ^NCTUnE)e1w*0hazXE~6GJEl7tI$Bm^@&J zs1*uJ=dSk!nMde9rvzavQ3fQN83b8$w)3bc70EhXhygI`<$*Q0pUXOp`$t(1;r>*~ z4N975=!lPQZrNfJX7Bu<_J?5#@_y2WEYQa6_{?kRGEVVFQ(?>RqHjzv~+T z@noOQW^c&}&OVfLG3TM|>DhDeW|=dO{<9b6q~h`&!=Ayft{z|6%?AMdkTo zer{gh+(+`Ab6@QCKb04k`yBpw;_Wy5&*yH)eJ;-s`*7~3osg)T4%(Ld6}da{{|^5b z5&IEb_{wAQEOYng{oEPjcjE=3ngEe^?Gl9#_sZ$O_ZupesWvayTPrur#ybvIr;Tj7kcxj?()f5 zpZ=bIZ+T`JdDhJ*EAP4au)h~lKGq1v@9+7^XPXz_2$G!Ld{b@05LXsv zePwvR@pIN?gS)Jnb*1zDV$*TM``__2^ozd()}MhP-*@w|{`0ubPQH2U{(+DqXLYFh zHtJ@)e*paNf&9Dr-tVPxBR{$IMy@w@`_H$B8{=`kYa^vU{=%;ji+=vfuMsT5t^efx z-Q&osW|vSGd1#|^cb~2=xvNiabxg}eY;%c7aFln;`*Qb zdLu-8z5i%?kUwec(Px{Fc8}3W>8t;PzsCH3I#UoDcK+nfx4-ssEA#+ONj7R}hLnx* zGPo&Wjqo*g@9kY_KYIE|iI4WAJRfU(NcX1yhV%^Ma)}Hbrt}MSYpjr~n!roze)4$W z*-GD=3Uw~m-<7ls@V7ONkWV(&7>AOlbbZO*jURP=>E7g-jrF>>W=De`EgSh<<7D|t z<4l7aeadBxbBw>Qd-rc_HFlF*8<+H?59zmZMK^Y%9HlQB?=ZyG<=ODn^)#MKde)U( zrVB^gf&67xoLhO5-)dB9ts5=j*0|jO!`Qty9BEkTf+5BqKQSimH4a7Fg5f7$Y&>Lm z@9p_}c=Q(MzrnpHEp3=H3}r0q`^v4(&2n7h<2RE(S?O=RheSVmrgz`&KJv4TXS%y| z|1#vMyX%d=(8tbFAM~9!m1kmCIF#A>uCE)v_0}GGbH6c^f5x?EpS_h-jK?mtjZ3!@ z_h!S&XPdsD*Ug64g=_rg*24e!(DK>FAJHy_71~yBp2=Po(g7!TjgT z;_t##2Z!O4%9H4L6hT|0`h(vZb!} zpyR3kr|1ItnJAx{`!9MPh$SpV0Y1ee+|x!VjIfApoh3om|JnX8JV#$ z?P^Td8*u3@J*sQ^qq6uT{~N(JPS+Tp8-@L++#qv&fxFWUbD)0PX8-u^rRo; z@4KMM-!uzdU%IzjneXM9&1OC0-~4y8_3wxMbNV9S)c5z{{{6o4m1b|1syXo1AvbUA z8AtZgxfpCmZe*X-Gmi21b#^uF3^TLh`fC18+7g4JY5%Tz*sGvFC;1*K>R`xcBzF`SlvTLHHf_ky1;CjW}cZNn@W|dza7a!pq;@IG%j@#_ru*?Yn*b zkABG8W@rA7B4Cs_=JJ~)6=cDZ~3v=YV=o&-u(Z6beGT0_Wi5*maojV z{i`r^@n<{#J#cOY%53+Zq5eu)80aViOuz~@y#LQUXGi?cr2E%WHKry08l?aIv5jeo z|NXi7>p76WzRCIRuP1fu5J(c;HHK)W;;z!Yn_4L@(-ar%;-X?SA(}Akz@kce1lKcg z!CB_QS>aXjocaq*qXy@>3ol~D3iby}I)n?{P0}e`XK=la3!0y#ONh@2F9@%wRO*xJ z$5p&$oMr;H(E48UJvKBlPkJcnsiYT^-b}iX^hMH-Ngc^TvSqS!vUhR_O^tsXC*PeC z-&OBdA5ddo7L^?!y(qjSyezyzp#fH%rd~5%GeOg!nW$N!S*^KK^Qh)qK)*)!O-)9* zBx`VCyie|n3)FTp#+hWuEt9ixmEanNs}|Q}T+O%^;#!3Zvg_oXxRA4CPyw5L_?OV-Y2Wb9x{ULBj1sk;H zoz|;-GN~SC7}EPGdKcfNclCbden7b&qIZ=!4{2ypIv=3ir{SK9*e@iNBR0LCQtn^E zeIWM0rgzYDNdo~7zJN#X2bB9EdPf`bWxwKC-i`O+~WF@`x4$9qG zxw|O$waR@P?vL>u^j-Lf-o?Jky`OS-SMDCl-BYaz`U*K0 z(ayd>EBl5Il3M|N51_9CoNWG*bW){8KF0GtxYpyk2WcLW6KS6W^h7{URGIK=kmlpS z>Iqz@a9u*W@5-~#I%c75%u*jh-c1m$)?T#R_@airiqcpky^C35!_u5a;v1z`3F0D+VkjZukdI2SNYfYH~6>sxB2t@d;I(S2mD9;Mg9~1GxWsI`7ih{ z`LFn|`EU4d`S1D5{1v`Km?bO~?i0Qez7`pgRr;o@=!QP|zW9N70X=h{zHe$YrWk4S z@E?THc9d!vM%-PhBdTcCeb_NAoivh0)ljmUY*tm0t=NWo9LC%Os>w_c6Rn!Y#Hc)kl&1EZ8huKPYnCc{3#ZFT_!C7%u*!?|{i&VXi zQ#@W)y@8W4uBzVTIykNBEn0S}cX(?Y2>UMYfKlSSpcd4s_XLfgQN1si3+Ad11V?PX zdqId7;#D6Bi9(|4V*!()>Y|V*p>IZ zMT#_~5s@OLNYjXjlu}A*jEIp^L`ow?M2eJBO1YGBDIy|r5hF!PDMgADF(M*TiWvXD zway?~?fu_c_Gi|7v-acc$6D*`Gjqr(4drJr?~b(wOj^)J@HDE+OkTmPmEut}R#vTQfoZdS5wU$HeQIksuG z=}Mk$mTi_&Xj^Q1SaI1NwLPj7*_PRUtQ6ZGvpuG`ZBN>sQarYw+n!PUwwG)#DIwcm zY=2S0N+eeO&nY}TRvq2@c~W>*bZ>ZHcz(D&ye!-qULW2P-WlHaQA_KhpGe`Z8-It- zgfE1zMzly$BqNd)DT;)`$HQl$>;73AT^A{j9{TJQsfk|stQ1T9tQ6fG-5eQ1^7Eva zGrGe}k*4c(<2^DZGBbMYU#G~N=;`R`$im1HdjCW^AN^E{$>^1sE0!KT6j?!Q6}=K! z6WJKq7TFy+5IGV#895iZ6zPdtqbbpTNO@6DG#0IlHblooo1@dBtF6D$Lq3 z9P9kYR@YpQ>+^}(izR+E8|L_KjKb`v*}J_l8FR*5*IPtAC-nCmA}EPy@- zViRIzcvZ(nBUXGJuR-xx5yQUhIWY-4vscBP;*W|=v?@apF`iQ9Ds#o3m37MV;)HTk zIVyUTW6Im&L*;$hUrCcu8C9YbJ(QU2kdG?k<&Wf3$^^MV{zCbt{H5Hid`tdaEl|F# zx>T3)lp0V2$~twX`fcTD^i z_7r~9xm)X&soF*DqI6K4k!kwh^(!*nz(1KmQAOTtBpFGvui-Ep@)o17(O32}`s064 zzSRgDVcDM|j2vJLF^1r`GKU+(Wu{SU)XFTQ(P)&}6m4XVaoTuY7FvRqpd4svur$ab ziaD~_GS)IymRKfOCP=rX$rm@(`E~0k>sUG6dWZFH`EBb=Ym1y~z1MoL{I2yo)*s6G*2UJv@*!)7b*Wrz zU2a`2AF)1eeOz`}S6P25AGNNwu8}{o?y>HX%dH<+Kah{v>^8exK~YOSZmYIc%ayio z+Gfa~*k;;h%AeZqx7{y0DQd|lC~C>oww1P@$ThZ3Tc=!W`>pMF@+sSH+itnu_MGiG z`E%Rzw&&#r+kV@A`Hby=?SR~9>#}vpU)cU&`-9wMd)4-;{H5)f?U>wbJ8nBJf0gjZ zgg?qHu)6Qyg=-QqR8+$P$A}4H7|vMFh&r6Bo)eFY=V5=Fm75fYcu&cI6~0easEk+U zDQ(K_xMFxrnM~&}TroTeo16oS{D-oME%K1emW9eCoYjWn?_o$Oa=4rxj+u0^PH^5dG1l!0IOXmzf4vuzd=@uQooVUq1^Loft*jaD^IIMYE-_ima0|q zT{UkTwt5q+`B^oetXXY=HJ?y_puVR54Zm&-n^rs2cQjpnR7=y+)peQ^ zzaYMz>|6c0R-hHA8#I^ZQlHU^wPJOn=FvRrFX+swZqkBUgZfMDYuYUJMe+pouiDSG z-RgVVVeQXaZ|xLPAMyNo4R zXj$YxS}xpYfR+yr8KMm}h8m-^8sn7lx;CDCKx={zG-_Y9e8DnS`wDJs^d{>EHlcr& z&bay=WWo9*vS9sATb3yg^w>_hO$F|wF zS^u7Gt8J^kz_!h{P5(YTVW+;3JV9@>?X~UIAGEz_dr@z<9kd^?b?J+3M{Gy* zM{Iwx{Ymezov^*8KT6iG{}`6vf!73%yU&Q*#V?d}u^v`FS4m;}zK`vD0o!*6+xH5# z?}M=CLghSJw!D=sJHVD5WXrxyPKV7-V9TD!mOT@ey$*kqO+U&u{bRQ2$JnOVkxi=! zWYcOdvT5~ZvS~GkZQ9Q^9b}uXhfSYQSFkm2QQtuNE!*@Cw&~w#7R{!1X^C2I^$*%j zT8jEATlt@0lNsutwSKVm6Kv~$Wm`WDTOX*t&bIzH*t#Fr`DEqY+Gy>InxTD38>?Bh zaoSh41hRWAMVqZXh&m5z9a<^*fL2aEfYN)lecDuQzxI;$4ee#5Z)vZ?h0MC;hY^>-}}7 z_BeL`1==dzrMtAB>Lq%K)~S1Rul9r<(IeW=^q3yg*04uC36ILuo-%TbJndQbtG)26 z(b`MKmyFxA*Nm?lQ?=8^H;iv;XK=;+J?#x#buZH1G*%ibwe!X*W0m%{@r3b&cER|W zu||8xSZ}P?-ZeHF8?{Tu4r7P*9(Kg9YnRzS--mxr(XLshTJF~SSY}#g>Ni_jEG>Fp z_~%;v7Rx%z(|Xvl$+Ah0S~goY>oM|AeURm}bvQh&8veGy`Wx#u!)ATf`kc{=yv|4> zuQP6i*IhRHTmNBw-*8$#w0>x0lkXWh(Foe6+NK&Y?2cy`gW!!180EI_*cKRdwufv>jM4DDCycSQFE;KZ ze>0|#zZuiuZ##`|kdGOy5A9ce>l@cDV!YGIzCmw0pw!_S}gkeMWxF|2dL-vOnhD>Ym}A z?Oxzs>|X9(?cU(t`cX@J{YdUzB>#2tEwA6p)-%H+6 z-tqo%{&keRlg#(eFaNg|ywfqpJTrM`U8jHZeY1bFcfKF;b0&`;$=goyUnTD{l<4%X zhYij9KOJS2cZ$LLz3S6^NxlqUmaoVc@|F8)d}Dk~zA3&b@cK&M zOy3;eLf;bK3f~&vM&CByZr=gl5#LGQIo~B;&-Dnw@7E*2b+7%T-+bD_O`E;$rysY; z>(+F`I(WrDTIu+@d~7w$`ML5LGY~^8pt))AFGc-TA1g5qInB^cW8c|}GHv*aN>RCW zT$0AJ1Fu8=W3bfIc%AoO(U*r0{->#=vt)hAmXe(%`%1b>j+dOd-d@QvlRh*5XgHoq zE)d1qO0LG*Zlq{=WZCCQZY{FRJ;t5n&TwbBi`*f1xx2w0h=w+Ng$Ssb>|}JRbR2 zip~4yk7QBg`y`Dp!W1iEV^PVeSNtzoi zdegyYKApTyuPbulvy(RvUGZ5deEB~~-m=&=^+9ST`lusw`R`1qGshOfl(IoG#>wn#NkDdNkDN+@w`gopqzxkKhdg1gFaNg|d}AY{d=n$1%;cNun~HVMpyX>onrrgMrEgI*>t82dN3nUb6Q)Ct;-%<1Z zv!(A8N_0o&|8w$P_D}K)B!|DhzrgSFm-?&xqx|Fj37^tTif)Xo_fL-=i0<~!^3RWM zi>`=yK1YiEoB#eV^ENK_ZzX>_rRWl*h1XNe8tw5f^LIwiMK1W)`?vUaM)pNl`1jE{ z+~4It?my$d;J+Hs0!e|4KvtkA5DJtBY64>dO@S$anSnWhg@Gl36@fK@je%`}-GKvv zBY~5FbAd~N9(=u%66_bu3wnaFU}dl&I4;;6oEB^i&I`5$mj+h_*9A8RcLeta4+W0} zPe+c24@TAp&j+uBWGFF|9&(0Up+KlCR2>=}nh=^Cnh}~ES`bg~x^`hNp&G!gIrm!X4q2;kDsS;qBo)*d=sD zj>qQV>~;p{m8-EGhy_XTp)B}A==wf~-+2!cYrC=2i0m`>3fIr~)Q)+6y>Yhu=$vMr z&En@SI!}H2taSY>L+2LroWkdduIqb(kDnzd#z#+b{JtKm5pm5ZdOb!WUSd3);xX)# zaVat?9xHCd2#x}u;n(bSk?G`Tale8G#ZE`szzY69Ep!*jinSWi}k}R4?Y@$mi%ZOTAhY@o`?PD(%7oly4YsS)85!2MEldm4@a!= zRJ(V$_qq>_SU6%0B~JkVGt(OPF=F>=_j&h~xL$e^nHGA|k(?eEl|%W_o(Z1Go*ACm zo&}!8p5-Ivpas&GLC*11e=48ywt7~3Hh8v<*yh;F(_Z8sX=PUJ9)zwh) zjq;86O~U(h-z?w!>VDPzeC@tvzD~Ta$7>5|%;ekW>+&7DB2*U|8=6QO zyjrL%jxx7?T4+(IBeXKK7PP6lxo%2mJ6?N22SZ0gr|PCu&%Ag3&da7Pmy$;`Rw}-33qwp>F`0ymsKsVid2h9r4zs~Wz_V6<3 zJMmiI*b?5-*iyZhz8epB(f8cddmHD{H{FeMDb=sy@4-Wnne<(F{VM!j#ByOI6e*9? zM8-s#=o{w9oXEoZRU>6&Nn{0mBaHvKES~BvMYfG}MRwE6{Em2JAaV|`OLa*2uDG)9 zQq)QuO~DJ_853Vm^!;(&rI9Xthumy_i#&1yCHgLz-Wx`n3EwEkzgecgN6w~%di)+; zXTEKhM^yhysbIfD)kIy&gopl;CRx;d0;!0_pVrJ}LBvAZ+@$x+(B zvAb?zX#w?^Qe&jlhrdfpt4c>**OiWMtZUd+I;nJeV_j)i>Ao6!X?y7`@O-@5OP7^) zHr9dGlf02ix0LRrzfgK#X&3U3<8|gn{wE3TUl^QGdUbHdU~O;`WY*v;*u)Y_gNu+t zgUdlRgU1YR8a$e$KMasZM>ca zA2Dh0$(pHy&y_VmUMj0Biw*86vzDdQOr=!T&wK~vm3gjnJTF#O2|emHjxC#4Hg2e+ zthsC&xOHek+3^6m9Y8*~0IseDiQ!H<d-SoFATj}p;aVRWK?8T6jg*O z$}4Ir##A&_OsSYzF{ff7(vpf56>BOsR&1--U2&k|NX5yDa}}2=dWKntr3~wblsC*X zEH3xuJ3^-ih~D9)yhL;SF46hhoH+<~yiNmK3Zy7#!_@dz*!&eSpJABjd?ZfvBKZyTt z^wjX~;g_pKRYH}cs()2Mm9MI_s;X*K)%dDORnx0xRn4zzuUb~siT~eoOV!S*eN|mm z$E(g%U8uS`LK~4ZB4b3>h@ugp5#=LlMvNKJg#Qn8CU#ggw5z~gU`pLgGfE=HA#NZt zY{b5D;fN*JJ+8oOBW8T}hy#r)M;xh57;zGNxl1E@s;$*2)%|K6jZ>@huscFa)t>5D z@6BRbagN$Ba_h)lBXO1) zd3NN9#>b4>6t~*e7r0!(hx%#Dbml_t-_0(JIQ*cJktM|aiDjO!$kE?IMYg~PE z{WS8R`c~|0=GC_`V^7mKwSHawX2h|*^@r+@)t|0EUw@@>Zi8${WG{9$xEcZtWewHX zTTEz}+%N;ax}afk!}5mJ4I3J^(%GUdTrVkg~#oz)_`qQIVYQ{(nVI)fi=JcYCH(v_N3<)yfJ_p7AbWGKdh=22^2gBh5_U-ZN7axPp+O36+2E4+pN65a2n$Oc(hwx= zVlt${xps}lVwcw-Ej%u}Tm;7dP0W(|oXBJB%_Fj_mC$Iy0cqE025cfkEbfHFtUy}1 z4|cT*+?#uD6B=_3BxZ=^t&mn8OK)x=AwE|$cKqMUkO^F}x2RxE5u{zc4btHL*hCsQ zh5Km}VanBLq|zLhLMxRdGDxrNgtT#=Z9GOB_fzNglZ6Ypc=iU;D$-ffy4y8ck=|ku zWD@r?iD%NTz7LH>+ze?G#gKMo6J&2*U%NVnxi{oZaVw%#=q(x`6Zq)3sSs>e@z4N{ zLTnRWNaRD}|BPl%Wl2ZD{~yhIS_{d2wyUGT3EV3w=va1nfi+|ay?As&^aTqZ9gYC* z&9kcV{MdMnY;56nt)6qKt|sWXHW7hTdAtccE}h3>SIJH^?q@HaE4y$*W7lF_D~~w{ z*;|mbt6zlFxrLipCh@A>BsA#pf7C&0+0M1S02KTQnKTlH2kP`^)~D~j}aFjt>GU;n=F>p#$cAj0|& z^&g6ezF2=)L~-wZhZuzW?^lY!`YL^u7)E#Ci%Q&sze!Z#?)zVh2Hb(aQ#9(k^j%`K z{;d9-_@e&2zF&;R-T1GH@wf~Bw73&@-oGVg&>i>UUfgqkS=^_;kEd5=8&1P5<`@AZ zDB6uL7|mj_ai=j=bm9)4Z-}+V4C5a0l+j{*TRd&tYs?ZqH|E0Fo-q~}-xr&Vhm42B zX5(SwVeuAVhitW~h2w^>L!Mr1hoAF%Gt$4DjKUQc6R>&#(=$Tlj0a&M8%ypig z2MwNnK*>TplN6wPpr9G34@8+Fy$G{kj1~3qiduO^t-PXEtmPy;k8_uP7oM)c^Ag}` zSY4G@Hv_ACpUBZ?>$63Me!qS{c#b{?mhpi8fJoKnV(n9S?NhP#^TFTKzX$ySeF2`3 z`aZ0|%GO|IYp}u^7K<`G+4L~R@`(NjZ0^VU3LNWk{c%{*O4x_WmZ8Ejo)$@XLSsEF z5>IJ}L_DYQ3^W_{jnMo8c4K3^>BV+qVY|^`H_r-Pe-74@2Xz+{&7*BYJbb7`^B;zTMH=*y=yF~$>_IL~9 zdRu=Rd;wON&Q_QVD|{a#x&~Ve!WJ`yVPqNEA|Lmc<%pq1u8{{#zEL1DjY7i(9%u{{ zgYi69k%$?^MhP@-Sg@ZhI2RT?7SG7tX51$F8{>>GgU1`=!M7W?gC`ghM5*x=<0~R+ zG{M#-Y#mS7;n}b$qJ-{T6Mfn4%h~Rou=^I_!qX|=77s?KNk4U29aUi2rts%MdyUg`lj_w zag(*%+ARvLZ&}|0pSPX|zYQO|iG56GA5+=KH2Bz$*~e5_C9A|u>}f&vv_$r_685y4 zIObW{yHxfrJbMW5N@MSGvUioUcNMaCW#CD?9OROZMcBvObcZ1{G~9X3YdIM>UZmRgUQo${T}@ucqTQ?qBslTjup(Od0t{>J9gUP$3{wm_P>0?qqq5lO; zo~G-s>92vw-=uz8e;rI-m&#seXRp)Q>vGuZEbMhT>~*Q^b$0f;0qk|EA@QVGfPK%# zzL)(m-y6ujcMJQTjeW0-eQyx^o{fDkWZ}&vhP{g_f$*B5)xtdK$Shv${uL^uO67d9@v{b(8?ZY zum@V%10B|9tj`FA{m`&(vThOvdt(B7V=jARK6_&>dt*L(V;*~>!QQB|H>yJInQkl% zT@$oBc}9rP6(Z#;pi4wOk6ZeNE--DjoCs!3YhJU=YIr@s=~6)pljslOB2fqtp- z?|N5gTbSxT3fh8RtJwO<(57$#bSn%sv>}Wu6IqW|)`hy_v?jD0G#tDKBDnTA2Qf+!>)c@p98b zGetU&)XMu^gRim-fejY^(Uk^f0@AMC4r%Z%wzsGPYrIF-xJ+-}@Aei^=&f8=Eeg#AO$9B$3ad(9PgfA% z41sb$O=w#cgF)j%W1*9vv7tKXeg+yHsz6V3KKuv1xgUdEI@+ip%HbK_i}-G<3G@Pp zbB)0tzK2|Ro$dwIVWw4FQ;1+S^;G>LsFFwzfy$X;pi-s+)QALkAQxqV0n({8AP?sj zgNm5EpaLSX4U|J9X?C5g`w0lE2wfH^g9&W~9Yo51gX~OWK#5F2kd@1!A3?N)x=|1S zgq*-Nrc0nJOh49|0+;*~(B4YuIL^thfB0UYJ3wA*f$l7q`vr8T1Mpg9J#=hg%1a=& zF#0tFI*Hkh`0Y zF$%=?XW&{fu$^=U)n)rL9s#la8L+7U+n?46Lch^J8qr!J<#rHiVU#mKtC;Qstsqjj zfR-`62ONCcd$i-6d6wSgeET@7{`z%uH=)T!O$J|VoqfR=%^QSDrfg}`PX<+Lw@hWd;FdLwHW0R=vI3%r}Dpqj(eQf zfLfvJ^kPnx`#~$co6w%F-sNBJUnNwFAAMdyeYR97e%!-^NT`;pv&~%XmvWzfg?BD; z52D;MF4qUd)~VxfKU=5uix_VkW=pj;sxRW+f~~0WlJcH^zJEVv3+m8YZY3V>YssD_M(+({sm}-jy8pAXtW-axGrqNKhZzgTvgob zO=E+n_}O-OjZE9^hHfm^fW`Vp`^RBkXhdU3ryK^s!%_DkQ*Gb#L)MEZx& zl@n=~6&-iGRba+{q<-07hI~~!_falv$jpVUtu}KF%&}Qlfp7T{D`6Y(XFseKtx(+n zw@kextA~A(tbKx*xQfb^irg#k32=E3M*;9l>m( z1`)5NS|x<<5bN9^LX$$`A_yb&USvYd_MT^I24S60_t&5^ zAJHi$^v!#MNO>7_j7UuoBD{eK^Bi7Bq&0xnFx?62WWt<;S2E#f;pIf?M$poaXbF+L z1-f=7tZ#S`Qz4Evlj$kYbSBt=cWM~kqwWDsCY|~}AdCR>X9YDe4FHX2N&<~#f^~XF z6RGn+=q+dgsD_C4X4rXyWHof9OlZN&RxEv>0P7H`y==u=8K{VKI>z8FU|I;uA<|%B zUMG=y0K_e;?}0K%hy9xHI+&;xyEh#=8z_Zz;vfk924U{Jm}|^Eti)?@4O}mHr3(>L z^yvwEdb}EPvGQJIf?9e4o@-3!gz#M98V_hqo=dJh$bAF43#1eO0CjV@DCjH`*3xsD ziOQX1`X=Z&k=h@0lt^OFBYyrp-A2?jTbB2Hhy84?uN9T%(%HVOQ^|bR9-} z@F;T!g|lCL2PNQ*9=1Q_5ArcjLGf9%*8|EaJ^{L@&i53#a2}MsK?Sa*&|#16$#J!T z-T^sX^WwUGu2#?>=rUZ>K);uddmJ9R^11|Fx(nwN1-m3qimL(Z`&-$Gdm#@%cN99! z6$2?C;qu^UzgFkFuetKfy6!8kejr?bxG%YIH7icYweAb zu&2viP`CnA1U+p+gZ2}#S8||W8SHro&Jb*WbTv`3lXB^-R>Gb~S7W{^u7RjfvVll1 z1Fd5UA-9An3?2KHJ{CGB>%IbAJ5vm_h{=!KBqr1?F}=qpN=sUOF0?!nI`$qN-d!@C z%OMVxu=mjZw1mBf`iwmv^#w=s!b?G=$ZhbHgUUfQtn-1m_f*$w_8A^p!qHp9SW6H) zP@@vLONi9JLYK!n>OmG0Mp)9nIn5g$jOmwsaCd_2~Agg(0$ zGNDY-RU&17&xoSSMTm7uC3M^?{6dc?y2QC?ujm5T*aqrm8U#Wg(eilEX{NoPlT44m z>e`v^2DK9DoygtCy8A(Un9!r5T}(K~7wsTY&VaTRAzEl?rHE}v=>ox0a5P-^6m4d* zfHpF{4qDH3(et9UM6??zTFtq1j8#kuXa&=85L+18Ur`6^V4WpwFA_1LXhBh%Icr7c zT+wmoQ#rL1G?z$F|7H^@uY!1#3eCYx&c%_6rgJX(Ry39A6lgM$3VSYU=5nz1q9)e; z)N*I>6(7z$>XVkX*u*ki&!}rHZN=xx5T8j`d}eTGTz9Gr=Ph-ejuh`J#uX`AnO?lV_z>t1*7d~)iV>;R6_ydj2TQG> zcD-xhQSQrXOKb72vh=uaKkFzLCJ?Wd_8SndmX-kG)nek+G7KDT z+8~_ij1-J$%HS&4#y-8v^fL7hjALT)WE>+1U2~L1bP?s6qSWWNkvrZ;b3nSWr5&Jm zL8AwwS8^I`yn*|2E9%w|sSU`jDy7+|McoSDB+wnum6g(L+=jly1|@;Mirf&+DyrIy zTwiG0}G z0JMup-v(kE$M5!ax!_CC?F4OMdJeRSNc|ts1|ls0TE{x{s%USi54m5pv=(hG#a@7V zTC};e6qKxYnZAG#-CMMtJ}c0MK)06bcB9#2GM0IBmB1@#YtEKItknXYCo#E}nz8Ks;;OWf0Gr;RW%m8Py=3 zH6sk-Su@H&JZt#Iu?xE^nrEU;BDobdd5K7Q7sPQ*xs0Ax#STOFcT3yAQ?bn;=mwq$ zj|W|_+&Sv&R z;Ii06jPfb;en$+}Dc6A(MNh_c3+NMd`2c;I<7teG@hvzG?octD;Q0rJqa@Xl8F71``0fz564)E z5oLrCZPgUy`lyDAs8Ucug#NV-6w$?K1=jAm7TgcI8D4RP%l!qEO)w&;>@6`XOVk2LN@s~&>?)n zVbX!fHqdF**oXa>s?ssoPT>n}t^@c)M^#T)M!2@p_g3l|ORH;h%v1_WlTQ@#kj^dQ6m(NpxktjFGO*ObS2?Z&^?#~my2T9-DoebXeB5Yb+foGeBafd z>07ALH*^uXGePONE;lJ9G#T_w)J-ay2pWhQ30&iTlrusU|7Iaq7FB@$Y-ua(p-%(U zW0uy!tKlS2mt{oZWqj|!xh{K%R*TA=4N?3%0Xj|JTd5~4j~AXSgw?U`6d#w$ouKcn z)K@Kc79MkzfR-X69pN6}ykFSm>WkSujB*EksLQlJguZBhfNpQd33?fQ*&UkCbZ_C# zfrvRcci&sMeIWb;)LOVTGzs)LYHSXkXI)$2M&B~fk5O*@K$?RU$UPK13tEcft}I;5 zxvhn(LIqesoON*(id8j!09wX`Ggx5<5q@p9t8g**nObQJ?n2#%F)s`06K3sMHhi)dm1u|(mKJ8Q0zD9cC)C^jMQI_Vy$%WHEH|tjm zr}Uw<9BBy^PAVj8x*6pr_kpdU+#Q9@1F82^W8ACnD8WcCxR!pwELU(Py$96W$S=4Q+z!(8 zcM2|~9{>%t+)>a?pPYc23r@Lcyj1rz=UNO|a5(*NTz4oPkwMNidKVm^D|-1Q&_24N z$FErSF4#j?^z!SL%>}y(FsH2BL09zh8_3;8SM;2_yA_pG|;;AF{pb*f2CkeI((n*FevEs!{6jq(8_d-O~F2*V7dEH zoR<3F`wI3E1xx6&0#Iu~hkqJs3`UK`>5HIy471UehW;r(#W9wrVRn^H(3~`kjdioq zHh`AmxGib0V%E(_+Z3m1Y19hUn36`vSdDp}gm2sU&W3`C={ca4n8^ugm}ih$FfMH& zhjo>p3 zRrw{(g8}j*R22njdN+;gkwEwoY9){ucf(=tLyLNUvW%?ZZ~w7 z@~=V%yDunm%rGgx+ffY?`YZWo9kk}K`~1@mvWO%jKmVlP3W9&+A9rA`)C$WT`6u#^ zGBxKP&OZjC8b=&-HbBeyT@E{lj(aelY?pEmp26`Fgc2ck8B+z!pI8Xv;tD^xl8g)0P!Xo4sKpRu%#dYgbF)wlq zdcW3J18P9at5Z8b-$dP2o(Z7mP-8{v0?;1NvQ#QZ)R7tk{T?+Ir_!uVL5;T5lUOa> z6O+F%72{Bz(5K)R+SY*1SbFEr%EvfBiTN%0?V#tOn~~qjl$bv=e-_4u`+xFVQWMbL zTWDoQ>JsR%g89=@VD02l`BPG0?W~)Vf@3g^PuU29=jD%0*#W{GpZTM`)E?;?QsBv4 zt|ny_T7i}1SEb;%e2j|JqIhmu3hbH7#Zo54>xNP=yWEN|1+zxE1uIi9yL^lse1~AR z=S;~1{SOALtFt zsYppgD=!;r-nA6TB=fE$Uj(6*yh|w=Hr=UqrX2YMaUoxBNj3N_9qAByYFC1YNw zFL}q3;mKU?NOC9WnEpy$SMo~GDMRKROkR%T-fUR&_UEB4&OIaY4ip^#jkC1o?JGHr zazDUv_u$h$>Q#E)uH>1Z|H09AB$L%0f^J)KL7cWE=b$cjL3x|VleO=HHZY9?@l}k5 z-EbaX#c2CMos?@}Z zUe-<5K#1dc{cpO0a_3O4uYCd4MY;5wu;yfmc_}whw1rpXCEY~pwgTl6>=U5F9cp=o zeKhEYAZf2gZbWO!?Xj0Z_af-3JpkQPpv!g_Xe8(&_X>Bd<({{vqg)&4oE?^^{Tg(} zPL{X}bc%b`4mxqudFW2*{c?}xoqzuk!%iRq+r97UqFb5v5EQW4=E@Fi8Rp{pC zz`~S=p_`Yx0`zs6l`|)2K4=Mab8`_v#7?CrXLhaz+AsY%tvPc*zlUyC&IKH8EXuXe zah0W@8GPJ!(6n5{4P`rMN^Uu58E8^&2-F`mF}Dab88jg`3+?SyUd$PnbG?=Exkab*!`bk0@i?g2H>ag_%_H92QM-vCwR90wgluPSoJgMJ4p%NYgQ1B&HTft~|} za!NtJQj2mTIT#yLfJUUOh0c?MImK_U7v&V?U{00IpaSZZ%+|Yda>zQFoJ_d0E2kfk z{wOd110{HX!%AMeV(L^X^`y{pePCd9SuGfYv?DJJ+9 zt|h1(cKX?ei7dlGhgesS+yg|~HRyJ;4iU4@P9_&@eiG$LF}P+#b{)0q6ooy~;V$ZlnFgJv=nf_j*!6^_qF zAJ9bBQMn0BxVFz8$AofyGMEs3vTJxIv0B+xq|-NmDmZtL-j#lt3D@!20Ve9Rhl!3` z#FT?_JZlE7)U$a#@QcD-*?l<|bDzG0NP8DLUTLEM#A|6}B6rR8R`mFUhdz-WtBc`kEs;t$y)p1&teIA6!pLUXK+>YEO zS<9fKadc$S-r{f2EzRBw8b-OQXE6eJEZU!ipg$Rt# zP^z**If&@==|`52`l7u6T}e(}oLpJ(3hhC?%fVizb|AMO{2RRoWniZ16NjoS2Rzs$ zdln+F+5}x<))Ek{n;8#iHnc26DSXCQl_jz$@;9O7IXr9lTp;tJWhZJ7owuw6VSO{t zS!flA&RA$p#~JOJr!o;m)xLU0=7~JSUF=J`GV#d}+WQc>M>3CtevjOvd5F7owVip` zQb)NccgTX(XF6cPoYM7v=03|Q&<@LYGxuaq2VFtAT^7uNf_2N>k)08zZJFqwf_2Me z`@=q`D|4%H1v;Y52Ic@YMr3Zx!dU~KR&-^qH%`XeTWcIaU$B?TTy5-%_j#4E8-%!# zxx(0na#f&Z2Ids>N@j<#74%=wEykU4s&c|Op4papElvwFv6kG^`38*~`=-pfmcAhD zvomK~5S7)}P`A|>7pIxV7|_pGb5QDpiWRAa}j71$dfr21fR|<%4|U^ zWuSu0snlmsPUb|=3G~^iQxA@U`sr91`6wtuF9*>|JMhiFX+w6s2s(T^otdc9$h$#S zJp;Kv0O9FZ&=1iv9@xP+Xw9W|)ojJtqg{Xw$8cWNx}n>otOna1l~$)?Az0@bh%5#70m z3Gv>!i-?|0adJc_+i-5v$X=|VEx1=ymG^@-;r>i~uA)zIZg6e`eHXOOxe>>O*E;zu zN#B?_*K*7E8tu;2OuMX=&Q;DeDEDpXI$4*ZzUW+ud|XkXui6kdHgSjP!W! zLCz)Lr(Uhlrr@}L!ZDUPvC?uLbjzKqK)>N!td@EYXqkr9QYTv4oE>~z>`R=B`M8fj z*XG30=<{r6JL_(TZjo~{biW2IaJGRQ$X&>}*qJ-$GkpOxmq^==aLgY{z5-pfJODZZs+5>#iBF@P<@m0fKjU$hsy&&Kya-Itg;h3D7+b>L*8o za5d)4kQKnP&c0fch=g-wK&51ha>v@@tmcNXl&H za+WW#=0_~=BdNZ@@+?X5J(9{-m}jzVflT6-lgwW&5cjbDn=FTrRPZ$-WHZY}kiGbK z5A6I41NdKANiPDNyNqQa%XXGDcDwR(lJW_b9VFGam|tPOljZ#^UnePoB&C%#B_y?* zSWk1P36(WG8)^lye3o;+LsFsgX*}m?A_wdcG-D3og-jPokg1|Sq+N-Tlr#gWBAGZo z7Fvrm{zZs1);pLT{HqJeLPKsc|C&Tv{5ikpp|Q)SNs2?v>$yxd^GcHHmq{x3vYzKh zoMO%QA$#%Da&{$!**v<9a#gky#ZRmagG}SGrwM~KI!W`vQW8il2=?lMY*zpOY)kk3B<~;SkEgc8ktjB&*N3T2u>Aj z+k)6G|3XsZF)CjrRvoP28CL3um4_j1@l}2SoE)Fqo02$?R(eo{py%OaLE##ElAG(kN~6Et22mvjg_=Sr4r zhYDF{D$l%w?Zm-7NfR3MY5a6!8fzTvO%B%J|Di>0dR)(TtNsxhyGnhw3yKEG>`e~- z#TW-$rh{+0bMTs`vS+0V8oNXIsFw2Ikf~fNjqT7OXm0Tf9+1f#H&TU#Oa7H5=^cV* zBb7bEE+&v(Zih^c_hB&SlARsGyP6prWW|A&;&EWC|5|MdPr(_FwbRn zvm6DPz#eT^zsApgHJ$aVSik8Lo<`gHgA`kz=>>WcKI5(S3H`TXA@KQhTg&B za`17J*-Dd5YmwB?RE`*Stq<6)>?Nu4nI&#HH0KVs3J1@;gCnhj`;aWCWU3%9w##=| zzRi-`QO*#vr>6-!*uf=}c|PsZJXez5F3H-{c%&)jD3o=iSIKAX@>e7kj#o-w)?{+- z4@vTjrSrJ#lA?!QyuoECCfL>Qu$%{(%)gbCEGkK(oF^&Cv+N2*47)NFQWqLzvKS6& zmn9^{n~+K7IY?@xQGN=U5cjx|V4I+LWmk7X_7WWZl=n!ZXpqV5%XZ~S)=VU+Qv6Ic zEkXX0G^YLYlhA4GM`@hvV0OgUg8Vd@%dYV|PB&oKs+|80!$FA;S z$@ZomWj;(&bFoZi%|(_xGb)c!UC)}|vb;c2t6=^vOSU)lHDXCCYggG0)mNFHC#lg$ z?Hb##N|v7#k4`j8c69@5l9}1w)UOcBF(eh!O1Cl(V#yIzS;NeIQ|KtY_;q{+V-MB&iOC#4exNJQvVx%eW_+Gm}KQvW_K1OS`g< zIm+xKsj;2QFN2dgZrkOUEw{R9Kg(WB8!M6uZ!XVrwX!ayH>_! zrm!Z+OrC4u^GK>WcI89XSF@Z2nJRLL+L3#=K=@{26VPc5e1%B8FsNK)Zh zlDtMr18aIX_Y2HlV_C|Y@hr)1lg;BQG!J%#^l5w^Pm7Y=yGC6KAYL2Rxdmrskw)fBEv>*C-zlgwv?WZo}1 z*vB35UXj12any3ecQv$L4nDIxc%IYay9C0UVOP9mxNgZUX|jvjI@xOaS)b(Qq$y_`$FY*%kmr2k#I086C zA8>Nq>d02@lE*7aZiUi zgI&#J*^6a0WUu&M?j^9*9ECcHa|>D4vTP)&c}SX9wW1E};2o?Z?!S4g$%jl9vQU1X49hQ4o9%lI*OY%XxOkjSOWf#kfEdNMSvV}{YD`{g*Gs_7qzru13%llbQ zV`*o37t5(6H7E0AlJV<(>aBz4Cz&ImUGjLP;F2`bRMWcTH(7Hx%bQuUe~IrB3mR!} z^L%Q?O#Up$9@nsMnvqRVJh2NNwV-vki-|1fkyO0Q4=|5q*~XHOtGdC-@pWHFda;7# z_gV7Fis{7ix2)k~EOSXJ4>Geoh!>du%JKlqvn+SA`~Wf`zIzXoMg~}ZkEBL6Y?t#%3XVleC$Yjk zS4_>Ftht9}3rk*ch33kxEMU2klBWnl4o8eAF->{ zezF+Kau{jkH%Ka%SpJ12&$+UNnOjqML`pJi%y`116aP&b!F9z6aAG`C9bx^mEcdcB z`}{Vz7hezB1@*@o=gVA%{Y#j>!yY02PI~2skZGpX$=#%p6o>7Sk0SRFi?>+Pd7*ba zmOcsYB{bHsuZbqGU92Xl%w&F!<G;#+?^(sp~=PJD7l2>2(8<$}V zRA@f!Y6eNAKTGyLr5`i6ce=EzISXPo0uQOM$q>{bj=Y`ui zcMVH!hp!6lVmxc;EMr#|vwVm(%z~{>{D3uVOJWuC0+#o&d>k^J{auS+xrd=i=4(2; z;8BZdT#|iRjA8x~Np&D(s^AgH&8&B@j6tUHrvYi4>tJ@6`_K3ly&2`UQF)~pGS%D{ zDYc~0QXrG#S7G&}k?b$>c4i+*h1Wu5E0wvd$zf^s;djhSSWafibrtphvG*qMQB~*v z|J{bU6T-fVhzN+NC@$c_CMpSQQ^zmGl@T{2uBZgmTANbERwFL8HAO@vZl&r__cB(k z0wO{ZLReJXaYIGi*E0Y2^O+1JAz{(>_x*lg;UmOM z59TVIX~UZpww7t5^ZKSvW)xgbOsksp=oU-kd?Xf#(ivBp=FBA~mT8kfZRJKRjenrg zwNca3ze<$;`;S>TkUn^?SlE*m`DZI!XmqN@$Hhy;O-6UU!VinqYb~88#i>Sq)PkO7 z3FprWuhG^aqH5|aP*^SOJge{zqDm6%ZedsXIGS6WxVB3FofQi2Eo#njFH`stqx&oI zbfdS}=qpcut-@`@yT!iZt74{o4kz4Jb6wjSd$OIjKB#!>ucnU0w8{J);htw(og2kG zqkpQxDo>CZtqH}eeS%?xn`hw52AgSRY>_#O@ulbf>q`Gu zaX!p-q|d%;8XT+mdB$K*aiC&s)HK&QOUocr&Oa$$E$OS>{LdA0mna?fA65A8#$Y$t zOxHql9h*0ri!J>srFlRxR+1K)nVZRuYU#gVTLYVmn(GX!WzgGVJoS-RY+=8N7*!he zcyN|tMncyK6doZ)MCp(JwZhMdQ$&p~Kd`X(fv8dD%~n`_c0D-rEYV6RX9Q;!EOTM*mBr&RbhLs+XhjUZa=k zyPgWqGzLc~e5=BmH~g;@qm|LOvbA#hTa15$;@?pCTG9HYl_Smf-qY4oj2hQ1-ANX9 z*D3xrv5#WZR_+;!v372ivGCnzEGC$scv&J50=I+Ib4AmE3Z| zt#sX^rIU&O1u=mWz?QCN0Ow+bA22%7Sl7zwXvJ*lq!n-TUQ73C@mQmGhd5Qd(ij}5 zuyXSjfH>{%;ktMzlGwrJIS7=D5==#yiUBWXAYzgxh57I@e~EmhNAn7F}R5!5&8EW$_lH8^Si3mUsIpUV7*((bgixysGeC;-8J4^-fDq zW%I@={I#et<*v4{tFhp$Rrq{uJ;vz#UOXL!nU%%Ol%!bkrk^cqe%m_8!rpMjw1Vz_ zGn}EQMU!yCf6MzmyzAC`=S&IDw|CBzhTh@KaAs_5Sk=(BZN&|~zxlLnZQp9!;&95= z@7RL&#jyn)I_l4Y`5VdBVTXA+v$oo|!IC$SYF7Qa|L4BP#&*4-?ULBojs;C;@oGH7`vzml6bFYYnTk52^p0=mJzY#hh? zB5col8*SOpwph1@VjGQ1)Y+1VU1M)6Lnc(8+4}6@XI1!9--$L|!$eLH-j_3_`B&k6 zZSFViZ~23bZ`z#L*d(Ks7=B@*_kEk|HG#6$v|;Qn>b4^#B7IFu^i+S$tf75M)JBAw zI^N zoMWidG3`$7ygrkn)wFEd;X`xw$=Qc`!^}IjE_@?4@IUd+9@9Q;j*G3^@(!oO7GyNu z^u=_d+x$9fk*yE4Rs&0)-GiR0vwB#+*34$iVww3%t)88m)Q1{XHNDB0*ESoO*|Etu z)kxdCXEtrkXHBnWM&(wTKu!rAA+!CyklInoFUZaIx1-hULu<_1y7u9HUFNjgAl;at z5k#F-FUmyiEVnPLeTbWM;6r>{K<+kYXSL0aU@7=s>zJ9cXSSVhdcWmud)L&sDz+|X zisQy6rH>u`C&vvwOphLYyyHgpa-7j)`M!Y9F^=n%Iq}h#>vQ2~{;R=zg!mIlw;-Jw z-QRKDm{T(PG~)k6x&w%Rk?rK8{dy`sZ*&j-XXoN{>FCSZD=~VS<9W9^d7}^W=D@*h zJDGhB@yEe|ju*5bWq#0|{QEkddw05U^kDZMBinXi+dh%i>G`9(IG*zYd$bQfH}Z7G zu1>qry_@U?dp7A}VeU)3J3W4M7xso$(@{@2fmcfUzFs-(?^VDdk#}Lf6Ikj|8!h#y zjg)`fsIRooHBR7fOm`ghMbJ5&9JN6u>k;!PQAf5=uTgjT?^4>pf6r)j95t8YG)>PM z^_ABqednkjyiVy0Mtk0@^rF!rTgn_~)D!+hcyr`M@|+oYC!HE~zZ21x+1hfMwiGux zo-I!{Il<^e+}HSg#(hJ4Q+&%9eLX#I)FOVDe8I^ZHPdQ1>UgW+sD#yU)ETr9@jPBl zX-9r#r5!caN;_(-m3HJ09HnGb2}glW6KBtK;iz++e()ma5O|4m7`(zc9A4@4hu1hw z+*#~D$-T`;`y@tPPS158b#~B-7MVsX6^)ugURI*eN;Gnjm1yKrE78a*Yx9w7R4#tc z@opj2aP9>*TBkVhYHAL3IY{V=kJ^~1N|q`DBp|2dr>|}`HX)N z=ZO!CkBEtq5sFNGp!$ zWvw{;AZx`Dy{r{S^s-hQL9YI*>D2H;d`203U1Y2gW~@QR8hl$^C!#OJFrFaeY4{%LdrUPnw1q&R!CVPWgR|E`AxU74xeUa9X`#< zI(!<}*sgKn!zS4LoBuxTN|<&v{zaT8J}f>WJ}N#YerTj!A?*rjSLn2`ejm2P`W-SV zA-xajeOTs99=6tWfG|1$(Sc#@O$UZOVLCAEA=3fE=)kaUrUS#4m<|kE!kIEUI&8Yl z2>CylrVg8K^Fsc7(^taiD@0!*`U=rkh`vJf6>_xvXKV(^U*w^?!=~GD@|T(h51Vcp z44Lcle=t4fSnVRmq?7r5BWH>e#EIfL;!Wagae=s8tP-om)#7X7>*AZ@I`JK2laIuY z#SO;r0i)B-yNS}2dVdga7E8rHi?@oiMAk%7-XY#8vOW@Xw|I|uuXw-sppmC>V0ffB zRLm2n8l86T;`FTi9_|a`67j`!Nq!f1sklrm7c0a{ak(+bvGhSl@gQTko7ly+hP#Vh zjj=@*AA4TZ5o0={)86!R=uxyneCRW@g7IdtRQ$7et2j%%O}sw1>$nCN~{)Fi?4~Vi*JhS z#CMG7g7IT}QkHh?(E_4>m@zVU8LfwieqM&3c0g@qDAok~%tBd^5jGk&C)kHrl}v=%xY zO<#^2ZTbSy7l^(<^aY|X5PgB@3q)Ta`U24xh`vDd1)?tyeSzo;L|-8Ka^z^UV~;%6 zwC2dsrZo_)IdZgV4Mb~>9PMBJFtp*zM|;%y>* zM9l5t9path9I;HiTf9fS*J$H@$Rr!_L#Cid#GpsU?&85lG|1S-hz1!G;`v6EX#5d9 zD3NiNc$+v|yj{FQyi=SbmWiAPu@~n-@LnUOFm@HYiw7IgW=L6#ZH$z~m=MnwIqw?$ zl(UQJ$Ps~Q2c&Hw+Hpic-<2Nm1bt_mAWjs|5pNP_iwnf%VwG4ez9zmdzA3H~-!al_ z#*f7fM$@t*a_Bq4nQ{$2*UAMc7o=Q+&!t?+K{F|r5z7H$IY2B2h~)sW93Ykh#BzXG z4iL)$VlzN&28hi7u^AvX!=Raz%ZSYYu^9%r;~Fxq2j!YoJm@~NiXm1p#3~+ipIOBNA2O?W(0yhVLoDt=xn^+>$~6ml(0xvK zvycZ4H49|GBD0VO4mCRjVuwKN5QrTDu|pts2*eJ7*dY)*1Y(Ck>=1|@0G1rD*mAr?5q0*6@O1BaU3GH|Hb;RA=7^#ZY8Al3`SGKbhO1BaS@ zK5(ehof9Tn?)@4f%}-n4zbuFRyAAyEZ!>45^oc+=ShP-5ARUSo#Gr3+n)Hl z6~0HjSG-@u=4b1J3S)~8m_iLWmf8OUJDUAJu(jF$gt5y9OruV0wX=YM``B3kR_^tSzF}lha4ik0UP{$2*+|b%)UTjyf zvp7opjd+8RKcc$Y>C*oWTE$3@9$rG*Sa`1ZS8Md7HmAFEzHquT; z{xcVzB%Ume6BFVo;&|~?@ig&t@eJ`yae{c3k#;ivQCuwQxU`eSe<*GczY)I`zccbL zuSVMA@Y%E(VcN{7GSFrgRvBnB3xDb4^}mOfBOL22juL+(-e7b_aAnXrwA%5-Ktwf( zX(C3&P()XW&k>u7xneW1x!6K%DYg<@i*3ZVVmpy@PIBuYb`*CJJBjo+Tj_7Oo7hF% zUF<6EA$AkHi#^0W#h&6`VlQ!TaUXGCvA4LNxWCv(JU~29JV-oP>?`&Y4-pR)4-*d; z`-=m_f#M)>uy}+x#OSX!M%Rh&i1aNn?}_h=9~fi9MaCZCtBuaFj+ZVu6kicyAV$O{ zVpI&pn3y9r6?4UAVso*D*ivjIwiernZN+wCd$EJqQQSrBBbOV(~ZPH1W6MmEu*R%HvR5Sys)60?aQHW9=og0ujn1t2yN#3q8+M36p#^hw_(W)tR{U(@Q*l&*6g>Zt1T}T+a5L&6`m|X}Lh}eaMu?rz~A;d0(lnqigX#FzB z>_SLM`pq%B5MmcX?81I?!qZ_@c!p?4uL|wxRpD8pm7yvu6wel|JXL19^_yU}8>D~x zE-~8;Vqf-~V-{S$2|1j3_N#Jk@^($HJorKHb@2`HZE>CWj`*(lp7_4_fw*4$Nc>pb zAbuil6h9R|6F(Qf5Wh6~y~X{+{lz}w0pfvTU$LKfhsUvp{SXh|L1ASs)e) z#6p2sC=d$;Vxd4R6nLa~lz6l_RLm3qU<`gPo+@Io6LY$VwL@m29P zQTi50-vVh@AngjIU3en1+@wE&G$)YeL{!J<3fmg3Hio0cpNR#cdNw>x;p4><#1loe zVW<`h)k>jSDOAgZYLif93stt*2-_?6Yq40ICjM5uQk*WTFJj6$rZ$f~sQ9GOxzEPj zz6PQ z0pAhd72gwSb>eAtNUOt-#E-=d;wK`#!PZYj`h@W3B0WQxp4oSSjcK@_xWCv(JU~29 z>?<;+Ny(Uohl&Hlfg)p?c*Zn5LL4G8rinR9JX#zo=83fDz6)$jL&h{bO*~yZLp)QQ zAf6>Mrb)?|hKy;*n1<(y=ZWWw7l@O@3&o4Xi^a(zW14*#({QSIiFmzugLtEe4v>a1 z4bex44nTAOqDK%-fM^0l6Cm0O(N>67K(qp)6|hXaOGHD6K|>%K0vC#l#OK9jV!2o$ zR*K8TDsi>AMr2Hr4`Uj>BCZt~)5I{Qq4dqhG+}9%jcLNtF1w1mXvP0bED+V>Hr7dhyvSH5%vgu?0(+^2ZNw9!Hnb5> zSnXybp0G-8BYxinHsay0#bR-q_*?Nxak{9UvJp>u^@xpl!bu}nDCQw(HD|rYIs=?B z&dGecjCY>%d#A+7ckXl+IV1V4e5o^ue>7d;Om^ArAL z?{I&L{|E0F|9OA0cfP;cU+qouKkz^BF7!Y2KlCp0KlV3z7YDgPGjD3pB52`V8nh1D zc$Wq3f?d4JgU&%`?>E8j!5$uWA_cv?tAgG^Z~XWj9Q5@{f-%7uZ$?lOlz7(%GlHA& zhIe!D7w?b3qTmJZuffuw+)DY-FiHwSj^43Ov7WtX? zYUG&6G5GB~GctjH0GJq==)D;^CvpzH+Aoh>?!6tkB65YdE;21L&3h+uW#lUF-N@CE ztG)LlzmNRh`yg_Ci-iFA6$O7+^$kNDCZ)0S6q{{m= zQXQ$rSNZD5YVV84YmwKzFC%Y8-t@kXtc$F}7vj5-_q}iV2c?g_A0itg8~N{ruOr`k zo0@cJ(gClHhc+3@e=8i{v@q0!f)ulxDYbs}MY8~ePDjX4>=wBU<56|Hr@1};A z_;-ig!|vZ3UK3vH-ydEV&hQ@$Zw_zv=Y_X~xB7nzXNR}@kA-u>IsOyjUEy8+-@^yO z2mJYV$D^MLUkP9JpA6p&*ZEI}?}qRB&xK!xU-}DUUd;0s#deA9;y)kj66@kGj_nuQ z&wn8{JT}5#5*rm8&&C$` z-^8AeE%U#REsre^yqq;TYXU##qnwX|AZJ6)h9KfR<9?q$y~kpY`v|)~V)qetpKtdO zc7Mj(SMj~Y{lxvnKH>r5f#N~p!D3&rpLmFPsCbxoxY%DDAPy7#3AC5;!)yI zF;C1Fhl#_*5#mU3lsH=anOGqHTs%gMi^q!pC5{n)A&wP~6OR{95Kk0O5>FP#i3#x( zalCk{c$#>+c!qeUI6=HvoGccJQ^cv_CE}&xW#TWz%SG<;?7qnRTH$ZRZ^iG#OrQRs z@V~@OV%q3CqAPl$F9u>nY$8U*P>hK=VpB0!Y$i4rTZk>iR$^?U>>dx(3AJ;lAmUgF-OwAz=(`qJ1y8XHJs18HnZKpGoJV*_byaI{iNZv*LVAiWKww}JFFklqH;+dz67NN)q_ zZ6Li3ZdN|`iGLB7if@RYieHJ+hd@0Zsz#w|6sks{Y80wQp=uPWMxp8wYFvaG7oo;Q zsBsaFv7EzSh+{>SCoEO?&&HVgE~dVVU9XrwiMNQe#M{K#;vDfYqg&{3U)k>S9Dcup z{C)?y#|(0h8O#@liNnP);xELp;&I~f;tAr3;z{Dk;y5uOo+6GHPZduSPZ!S+&lD$! zXNeQVLh)?z9PwQ7Jn?+-0&$Xfp?Hyau{c>Q5~qmVM@LOB5ib>gDPAdFB~BNw7Jn!H zUc6SkQ=B7~iFb*2i`>^o{@m9G?-Tzb&K3VEa=#y2`Rx^Q*B?xZ^aJ6)i4Tbni;swp zijRqpi%*Du7w3y9@k#L?;#1<&;xpnu#b?Fm#0BC)agn%0d{JB~E*GoB72+y!wYWxn zNqk>iZ**ITEk!&H64P32BeoUWiyg#{;x1w*aaXakxSQBT++FM{?jd#)yNf-J(bf_IX#urQ+Yg<$5VN{ z>$L9-@p|zF@ka4+@d@$o;(Srs?n&D{X}c$F_ny&K>A5F8_nuRX^xRvh@bltg@da^- z_@cN}Tqc%_6=J2hQk4FCs})`&z9haZz9Oy_Up4y2iN}i*MYXDbw!&&#Uv2BFZGE+^ ze}T5Dh5ZW^eqa1R{MZ;Y6>~+6uR!A~(D({ED87@ZQ4?sq1R5`aMoJ(Z2-N$*-b%Tz z*xMLU4iV)L*;O&>qsR{yZt{jPs!utERoh0@wo$chRBanot45b= zFU>GfwQ6*|Vm=gA)940;KM^;IpNgM}pNn6LUx{Cf--+LgDs%K-3U3nA#?TR6(Gzn- zwQZ=j4Vx)OGeD>rAk>Hsk5K#&akw}_94U?xHO51Y@$fRm{8GGJ{FQix_&f1BQMwvR zmqO`MC|wF)(^j>7_@=^diSHR>EkyM~Y(Is^ipPm3iYJLDi>HdGi)V^wiPF;8MGBXR zcZtu68dEWishG+cTcUXNW6nnw&QX84e>S_L!(6jF;L+kxF;C1Fhl#WwX~u}Z5Ggw` zlpQj9;0Yr2AWS_V^?>8Vgm{WLUZj3(Jx!#RgwGJEE8z(uH6~1rA>$IBEuJHuE1oAZ zF4@Ysgp5nbxP-J5yjYwp7Ku~DsUliH8ngh?|L{ujDv|k{n5#v!fbj3dYsEXoIU>Em z*1N>J#e2ki#rwp+h;zliiua2Th!2WMah~`$@gebH@e%P+@iFmn@d@$o;(ReBJ}Le~ zd`f&;d`A4Ii0)8wbO$aF7mABSG>G^Y#ib(JL`;>qLR=-T7T1U`iSLW+jb?YimSQWh zwb({%E4CLqh#kdU#7^R_VrOwTv5UC7$k-zPJ;ZKecd>`Kr`S{6OY9}?E$$=kEA|!{ zqvW=~*hf4-JWxDHJXq{2_7e{k4-*d;`-=m_f#M)>u*fXMQHF?9#Y@CX#mhwLq}d&$ z{FPWN{zjZ8{#KOUn%zN4>8;rvgr&D;cM!ftl=hn4LCj2}*&R@=WOfH(wUXH#gw;xB zcMvWV&k@fRH3rS@V5>%-*&T#cPP01*tDI(c5LP+O?jWpkdMcaQAjGI_W`hvEPMjfL zFWw;DD3*zji%*Du7w3!8f3rl$NBVD;2;pZ$X}(z^#7O7O1|j^sxLABaTq3?GE)|!F z9ER;>+SI;#%=l@lEk9quC?ycyXer);4>Dc(u9NBZSrFW{(iQ zKvc_{JwnVy;>DuoJzw*lKSeRpG+&zLOVfO5nlDZBrD?u2&A&qF)tkP0(^qf$>P=t0 z>8m$=^`@`h^rdyaw9Z%0`s!Ipctx45s^+h|r5R1RiU5mwpEs_HP;tf~%k z&8mXR->fRaI*M6Uga?T)ic3Xli&<4{m7bVYMYu{_Ev^?o6jf8Rsz~#RxKaF6{7n2@ z{8Ic%{961@{9aU<&8i}wO=8+;RuyzbPs|b3wq{kawV9~-&8#Y728&0CL&V|Y2yvu1 zN<3XWLp)QQAf6>ktHVNt&lb-SrHy8baV%+LC~XX-jiIzLls1OatMEFd)R;9}jJ?*1 zuZpjU>Myg!*!q_Eo~U*+i>$-kn0nAGGQwlUVyFRR7J(;-)P^v%fz$?$6BFVo;&_o-vh_5Px)P=*AvK2dB&6PudP8~=(vy&$ zgy)LqiRX(Kh|EuHy-=jJ2s1xH<|oMf1eu@UR1u9Jj7C6o0bVIyB{I_xbG3*@5dOV* zt$3$6N2E{KdY5>&c#n9mc%S$eajy7R@qY0E@j)>u&J+J8J|sRYJ|aFUJ|;dcJ|X^H zoG+%tC&hn=Pl->9&xrpN(I85W2Ehg5LUECZ9ufbdxKu=^h^Z1+h^xfaBD%)bm&EtQ z^+vPHU`w%;*jj8OwiVlp9mI~}E@CHfSFy9Wo7hF%U1Wrj{~lsDvAftq+*9l+?j`mT z_ZIgN_Z54Kj8t;lU+g0uARZ_lBpxjG75j;Yiie4Zi~Yp`;y`hbI9Oz!<0wPKsp2K# zrQ&6xbkgiHQvOOT7Jnm76Mrj8Z_O?vrS#V9GQ!eZv&#sVh%=4csgl>cz$`MtY9F)6 z2&;X}A|tH!F^i0Fp?HpXuBg#x78zSL?#v=1tP+|JXcie^mB(x`!q?H0k?jd#)dx(3A8ee7=vhTj4wc8l(YvQ=v*JM0RJY76P zJX4$?o+Vlx3%IYzXmu>$z9ys9u^_NI76ewug23un5Lg`x0;^*|aFKYiI9V(br-)O< zOT_EN8^jw$8-oSh*JQj!EER1G7I0sa(MDnc_ca-9Bo=UAlks-ZMr8r_H5uoKW#V1p z-J*@q0`6-vE)*Au&x>4Pvl8-TK9a@>kn6(bA%wxDe%F;&Gyl=z{PB@kDVd93Gl}jxk#vUMBuhyj=X1 zc!l^o@jCGqSipT^#@V8DlKaFgMj9BJzKt<^ADTXlv1_mJHSrDcP4O*J8p3^Lmcx4% zV^%-xD()v*tqaWdC*JBcCT4XR6FW&fSv*y=Q9s6Pe`q6pjM@J1BJtO-AXY3+6MrjS zDNYw>imD@btXcXpIL6Kx;Dd@uiq9(MIdOrgy4%?UDW#Kk#(+iF(s#H`&T|47Iynl5 zVr$r1e&=J(Ai};pUz3(_GyUI>blSpZ@*VFg z=cv=1n3%$zL-h%F5%+|F(^O&GHzu#mp3{P`BR{sj(tFCm6`Sc_D_o};X`=dvA>RoI zb4;-(Y#AI3TRFy-fgKAk;;@-&(O#u(=j^BW-mtZDZml$}l}~FYSMgTb)=m@1AArNC zV=WL>8}b*ngt?IrHk0q;P@a4}<%3sq;ywA4Z>Cm_)f~mzzrB-be=eEOVl5S8$MV(k zq5co4eNBs5+d581(bBjoTcEAEig#5bPy0q|X!$o&pN38kg}cI-O5&*&_&_5jR4sx` z5AHXF{&0f9In)0;-eLKUy<-Qod@HuD>5|yG@Y(R$RPnRm0&oTcHDym#=ZKQ&!!Rqc0J-fDL9n_JCpHM>Qx-Fvi~+&Ra> z9oBYO+iLcfdhB>8w|VoH&8lLvYrmV`pMAHutnDQ&F3Y^zl(x96O{u-JFTRZ(AIrS^^VvW5p4xS2+Nt)pOjtVcWbNECPwhlY?*|d|r zW79I9KZ*{2*yzOWulrWq{M-ZVv$1zMi_4l$3a6|7)3YsOGeB%;ZgcxC-=g*Wlx{B$kGMC5CG1<$(7X9-IqhSM z8+$kXDBOg0)bw-PufpmbX+xFtCXTvO?`8+o>iuWYVsuyU{?^xMgv7MNLQQAHhIZ(t zmf{<6=&RoSjkNsGu$1>~KJSW!c2{uta`^Hd8$0z6=Z154?P}lb&7PUh&N=VKket!H z-_7Z-cOoaT%ek@Eu^UXfp4g%?e7WU5J#*P(Ugq6uNKP-_vvS8;c_}|7Wl#3AypT!C zuhzPjna}aR%09Vc>(1`6Rni8nC0piTdS3IZ&8nF>Q?q345nIzgH7zjT+QV9;`LUa4 zh)jEC_NkkCXw4Y28RgjqdbdXRGg@wXOiRzN8GYqV?>F7#XqKntzSwdwZL-g78(NEm zPu6QC+L^U;F)PxLwoT}zYHPt9bU&=NR|2Y=nt66yYY*Gc z-a2+cY-rAO^?bFJwpOV!HMa5%wU%dFL2mQh=9U8`qyL+B!h7wz7+Egmr5o6wlV6BwYmYM25%PTbwxF6XV!rFF1MTqtNlp z!>c`2jR#zg$g!`*!|nKVX>cjIT3j*v77^E+xEInT?7@@U>{TqkxJf)TB?-O6)*Q>l zLf%7^-EuyboQtsPI2v~|#dSnKTM9fl#xvV2-n?qyD;Ljec)%?r*FwkV==tV}S0(T% z`8aA^O1^9(RWYfGiAgy={vqS$5t6oe+wrq?;v3s61zuKDk*CrF_?8JBt8GE#FX^Jl z1L>mZTI4m#8u&aQ%ss-(iF=RZ!(yXE(-pb9)Pc@61?{Eyt;WwG9O9i zAxUZa;H%eKuQb3blsx;YwQAb5)QQlhN!qz6xDKIhgU{JB!uHL%M? z$hFQbjMtJTZ?LVYo14zHKJ)SHLA{G@%uw&r$Rfforu#%z&>mUsGv9<;^kqldqFIBb zIi%qd;k7DHrE0FWnM)5mh-}e#>ya%jxkblQQT$urDJ+F2+4_C>BspYefUWwIo=eel zDS9qN&!zb165E^BsG|itmBS9#mn$ATPn*XRw0YJWo9o_MW#~jXhBL2zgM6DV^`Af% zF{&<2Czv}-hx-KAZ~Bt?sE=m58T8mw%)bAW#wO5+5~np9Ul!n@B={AAFOmRHBmsU% z0=$p}=yz}xg6EL{zas%&M}lh*JdW6!5Z*?c+=1LlyR>bvOq0_|yrcB_QDti=Q46G zBj+-5E+gkMaxNq1GIA~>=Q46GBj+-5E+gkMaxNq1GCU%dk$V}rmyvrJxtEc9nQ~8( zdy?FfBYC9lEfSdP9rKwyLi`^h3is^rMercH0ggr4x1e z$(mZ*mS|LKcl}YbL*040!4<6jjIUXvvzwF7b(EyXIcuV=1oda(dNbaS*1g?1 zVv-{!IbxC{COKl#+u7OhN#^>D#glAo#iqVVR=6Z9T#^+osrv3NI}5)}=C#RuHkl_+ z^Vbx?x0HEoGJj2VES&{>$8VE)Z8D!tvOBSV&0CZCYBEnv*a)d6*E{2yFrv**lX+>X zZyA}FC1;NvYFXwGS~ul$1V(p?(Vg(Soa%}h@pYzZa&Pts9-5H;Le1z-g zUIb}^@M{5gYdKyF$w8VTxkxjlInn}YiL^pmBW;kjNIRrG(gEp+?1FScIwQLwU69?8 zuE-upH?G;gi@e9>{QZ#qkv_-)$brZ~6!O3wYS&j;Ijn8*mYZEg(6c0ZmPF5z=vfjy zOQL7nUR}1|?j?2Fy@)oRg4{#fZ>#+eGB2g(qtrZ~1 zDY6VHM=Fp?1W(&OezyG;2%fh6mB=ateb704J?k`kJ|8?szke?aNtSAOF5!!ii;>Cn zZ7s_+b{*euL^xBA&BY2IQfIc9o=%x>Nb?N2`3&qLRvWxS`uK+QSw+k*qw6i<+b@|7fX_=oy^O6{{)|-b!^N$#^-kWd4@N9&2-#i{VM|zaTqcq-3 zWG?bo1dZ@m`8`&CZw12Y@1KC2h@6C+jEqAP$SKHpwj&V;g5X(6Jl3kx3(YR zn$K@%TU|TCu4~q{Bk=XjnMYjq!~=YqO@W_p^YU#zzRkn8`S&*O-odU;UeFckjvS1b z-GQg?C}%ZcYorZgHpo$2ojn@K+9G)UHhUz-8H{=TjwSd!AN%BI|C>gJ|DUoOcVHA{ zZA*=&GnqLPtPcs+hXm_Gg7qQ6`jB9KNU%O6SRWFs4+++X1nWbB^&!FfP{{gF$of#| za@~=C%PDl1A>~K~Qi&`_s*;rsT7x*;rsT7x z*;rsT7x*;rsT7x*`{088DihPEAj(mZ9iF}28jeLWA zi+q&Gw$QoA2s$IgWUBaqe;#^Fx?DX{^dZn0NDW+G7>6IkClB8FX^h%Om zNzyAxdL>Eg7t;ELw0lf1cg|vPltzSs% z7t;ELte+{?&lKxt%6}Yr0-2Ab5YBP^e;`jGPa_MEg$VOB>uZYjHO2axVtq}qzNT1T zQ>?Ek*4Gs4Yl`(X#rm3JeNC~xrdVH7tgk87*A(k(iry@wH;d`PVtR1Pb+Lo^&b2Yt z*c5AQD)Kew6W<`;BHv+K)m$^9M~msvBt4p>N0anqAw5}0PZqKsr^54)^AWD`ur{Yy zn^UaKDc0r^dREuk=+#1cwU9MB#TuPrjZU#fr|8*YdbXIJEv9FSoj*9N=#GyBNCatu zM3E4QAy|P9XW0&C*$!vf4rkd;3#28|3gMcT!!<3ZErO-vv`0E1SdGpu2E&?CDFIiPV{Yrv#=;Eqj9Co5lQ97NIU#)#2+`JRl~8~O}ozEldHM0tNFwt zMVm^|rc$)2)LG|@pk+rQqma?a&yWJ-=g2Wg961*GFXS3$p>rLdGmz_%8;~23n~*;s zHzR*U{)F5@i`l$$h_lc=lJHT;(a2CF56MS{A(Yak3@&AGDT7NHT*}~52A49pl)m)*M^Xsa#N2-%Pa#hu&mdd_ai2w=Llz(lkwwV!$YNIA7my|C z1lM;LVr4DFvRa5`wGhi{A(qubEUSfBRtuSz6U@sA=H&$QZi0C?!MvMb-c2y?CYW~< z%)1HZ-30S)f_XQ=yqjR&O)&2!n0FJ*yZCcM__r+P-30S)f_XRLf5=GsM5Ac{qbU(I zcP8^sOl3hkKHGB++mX0k__Pt$Io+L=)qbxi4SKS@7xDY)JU5{^+J57Fj`aLK%3Pg@ zzQBn22>F5$;v)ePL7E^@Bt&9Jb`Eb#xE;cA!wyJCgky)D5JqaqY{)#H;2QTr=JW*f zc!GI6!91P_uVEWwB7BJvW+Sh?voMAZYV@H~G4x3z5S?LuPcXkHnBNo3?+Hd^f)SZu zL?#%K3Fm!0u&2D~{1(I+f`?VlIavwkWF_9)gx4YOAXxO?d&v992grKlBjjU*GYan$ zWFzt^@)`0u@&)oGt6gtoKV*NT4{`u;;G>j|)41J1+(&cuQV$XUolgmb{)Y~&n-)j42w4p^N7R_9<6av{Ph z9k5CVtkOXdG6mthH@F00g$}MqZa`S616JzbPY7pg0cUFgXKMjxYr(C^EaWz1HgY?1 z2XZIESzJ(t+=bkY+=JYUEI<|_ST+HcO|T3pM=Fp?WI0lWtVY%#FCi}@uOMp?&IE$j z5OgJ2hoB#LPfy`HJ%#7=RDdqvH9dvT^i-rXvIqAb^x(4>vM*uk&-L)42<2wo%4OZk zW!)OUx;21xYXDbQi@Ca5%+=LmuC5ky7CJk!IK755(bbN3R?-Ag!kFX7z1 zgmd>2&fQBmcP|NPYy7pR@YbHfS9=Oi?J4}U+wa-~SPKVmWw)3syTz=D1GutV%$40@ zJh-Ru-=1QP%w>(t#fy6iAMUAGF)|JLEpjC?9ijj5+MdE^dkT;3saO)>{D89@?hJAs zMFw(pZv=N74B&awYq`VcuiSAkhC6^>;Cb5>PPKD_o8xwOPI8C1L!Bb|ioML8>R#c@ z#aHZo&O`28_n&y3d>)UnYur^__kCBsV)w;YY(IB@?@&C(9*M`;!`-7i-tN)fP%jT3 zvgS87Uw&gJ$Zu?+{KlT+P4*_c=ixo}D))T6$KL4{d1d&Joq-S8XWW_Iv);4rU-2Hh z$i3fN?7iSV;4Sl(xk+z1{$%ILlk6jSl6}v8T%Kf~lqcDz-|W~T%>K@PrUbMY{HnLNy15ws85d)VbcCwZ9d>RlOh z3%YyLgFS=2z2C{t>~+CGK>;3Sj|t-Vll`w?y!SwGX>h5R3Vs><(t9%aRq!kCA3n?spBo3cm8Y z2j2$Y`aOd0gYW%4%?qvH)4b67y&|zl%-`F*(E9sCI!8MD`$qPR?CJN8>=o(d?-w~M za+cpGa&F{Y|A5G($Rz*3$mGam|DedFkxTu9BfpIN((j9J*&F?Sky|3O{6ivhB4z%- z$X_CJ{lSq3BJ=zqkw+ts`a>g6MxOHXBF{vg@rOsAi!AU*M4pc<@kg2eS^wve6_FMG zF_AYSZ}{=ZJCS$&V@nVfH)!7m-boP5uedw$Zl!iP2r7yZR^LH+HFia%Sj97(VEKEFZEP@FBa_|0H}Pe8c}Vd>c=)pUIQ# z7vTrtdjG5Nv+y(j8}7sX(*HJ0hiUw}ns3=3hKJelL2m4{*qK4I*m*Jj1u}MV?BbxEd6*5_n}^w;BOYe240bUO zvq9I`b+PM$ZuptKDd=u~W`n)V&up-d`I!w4jLnVB4f@8Cv1HIMwlKCRI3%_t_F`~Y zY-4O=&_5?9r)e-io@WQS5AbY%oG1I^Jl7xR>RX&EPH~58QO+=AI5GzL1u_;n4mln< z0XY#l2{{=Vha`|wknzZ=$Z5#w$Qj6)$OPmpWFk_CoQ<4=oQs@?oR3_9OhPV1EwM1R$P>ulk@-jp zc@p^t@)Ytk@(l7%UPP86%aJN%1+ofRjjTalLf%K#a~-q=(h_Nf zv_{$>ZIO1)IJZ5Y9gvR5E=VV2SEMtt8`1^Y9qEeffpkNWFKT- zq&Ko3vOm%XIRH5jIS4rz>5KG3>~4)i`8*6c9O;h?Kn5a%kip0i$Pi>IatU%Nav4G& z^0a!KXVv38bQE`ggG@tyi_o_`pC0Gw^f=F^$9Xb6&U5K;yamPGYmpLUCUPrnFbla2 znT_0z+=1MQ%t82Fm}|u2xJEpVYsBMthCS|4Z=PR|^YnV0XV>FAxgO`a^*B$h$Gvlq za}jisr`6*;s~+b`^|(j*cuGCaGwN}kP>=I`dYq@zTW>Y826+j28F>X+i@b_(XAXXZ;yiC1_a`E>vwt>1Tl184+^4O5+S};7kX^ao75(O#`#5~-#PO^X=i2+Y z0R0Z==YT%u`RRDDFVY)1m~C8>$FEKtuR8I_t_Xb<`N4_T+&vV>pH3WaI`L>%ggm3< z$@Tkjc+!dUWNw`2a^pOe8|RtaI8Wrpc^)^;)41^{Z4h0Le27r)=mz8yWFzt^@)`0u z@+I;W@-^}u@;$=wqyIuSA!&XKa1a;qkQ{_Iz!Od!KR9uIM;OQN2;)M=A--_pc*2SE zG;N$`Y2!7&E$Cj&oivv$ zZ&K&`PyVu^|4+^$n?YVBcM@#pHE!7?rKsI**us2SnW$PhK?oO21Xb9W$9%x@tv`E?vPLF^-ddWkpNE-0iacSGs%^ zXM4pBZwW$iMBybz~>~iS@xU?gZNUwWDRNyJz*4b-$DUPi?>XRp-~w)4r$YRZY&u zqtlnApQ|d)e$RfU=T}Y7hV8TVKDFbuEmc*rC9dkGEn)V%C-cqPtp1B*Y*oIU9XIL2 zmfcZ5-`d~P{C`kmFX~>a+>NEGA752VD>K^~YCqf3P@T8rwp{(KHt4ea8l_*e-0seD z(rcFgiKn4zUdv}m-wKz{NguoXKDAHNpX8k` zS^kWL?bT2}WVS3{l&u4eAE zebTd*ag}k|{pkw`_o$ke&8PNfrrb68OWDs$pAX;L5Q-Ht%ij$Gw>iz3R7rLuqQqrHd%;nby(Q0OYnhAd+9Ns37C26M?&dVTmMyU`=_>X4#xj0q=lh!F zUu{maIk0StooM@futNK2bKCOP*<|UD%9|+MW!YPs!`Y1ue5T8nVM8w4(7-0m6Tc1E_eXZ8|Dm6^{0}YHzoS@r zEAIbyWK%m2TaRjg|L+KAbuGIN)h@&Tg!0uc&klum;{3_hl%0E(_t?4gKQZ+*ZhWuv zD(_h5>rQ_u@3Pao{9ATtpxyGy_xZ_LcP$^Dey#jmr)&8|mRDv?ET9 z%G0%+dr-2ewtQNg>aZoM!BBSJbZN!24JO!`$eQ-ryvw*6JF|AXZQfXZ#pb%##5Fc< zu!kF1^Vun<{7<#7H<%5c{;K>Mr%So)=go&wU}d#^XgxMNB3YiXL2I`i`PJQLN57TN z%BD=;Q$EMS>Zf8p%eWrWrTji@n+La?BVM_b-F@r&SovzF&xX^~AsgP-r`2IoX?c~U zv2&gV%kasza>#5`d(A3;ZTl%n5wDv{`a%2EjotpY^!SRN>5__r($7_N()rifEiPB`YbHpsB_AiW26hqe^5MQ3;o+<)a!!^FB{D2O}eZiv^d+J^Oxop z*0?OLXlr49m(xpSP=0LZ%piOI=r|Q!ErFF!3G2b?u<8AZ-u0sOb#ob}v%k$3PQ_!k zL1p1Qa7SL*JS?rsZS8ea#bI^oZQE$KY<@Z~=ZhPu7`i!*x@FrnbAHZu=68FgXH^`N z4QFDuY|DOEx?0zU>crQZ&oxeJ>RLP9_QMrVX49mbEI(j#cx>epo5PKiA-nzG@tH2K z7~5Dr>HLb58{1~_wadM&bV^&N-L}+S+v>Gn`mGgfx0m0K-QMW+h##BbcK2J^si7o| zo!d8*_CFW5^)IiN zCf1JsclVQBFK_HsyWEbGv8(E)t=(?fEn8oA%3hnyrucv1vmsmH|Mq$}tlJgm*RHqh zyuymfwc{+Do?r29{kVp{r?Iy9y>0V#KkVeo8cJtzb@jKgRR3q<>s+h+KXXVYUA3}d z+o%Eir~Y|{+2IYf-;T~dvX`*CvakOa$3wR6KmN1&nTGeR);U#nTl%Z&tT~pqnS7YVdp`Dc|J-MQI$5a1jQr2z1>NSlYWn0@C-z&Q%U9#e# zZ1~^unfK#!;e3oUqyJs6AJcxeUTpitZ#MN_y6#q1QBp7ZKlHWYrtM|C{W@fFs6AhA zd!GK0*NwJU(3l+1|GFYb)-l zlV<+Yk?CQq?cRw)U;Pt+6uJPE$YJ*mz36RX=6J-_wUx+?8&m z-gWkK|ZBl372EH_Qe^B=Na(0*hWoSG_!&|m_FYf#mmuL5?JwMhC zXSdsD`m@Zp^qLhzGNHQP(>3{Vjs7F6cl~s=_ffdsyk~LNep{v6PDJf{ino(=^X9Gl zeeNsf&fla5RCY@rTe0Og+(zo& z*!WHtt*mwJwfd3l?zPL1jmv(f=T!z4&i=-m{k=bZY&J#aH`?sC%=XHh?DzVgo6iYq zHl#;awzTy$eG)RSa;tNDOIEwwn_jEzoXvAnc8#yf#%%l3+Hcz_w!O8ncCEda>e9eD zz|PnVjjiFekMsYBe|ih-tSy{5Vr8$Ne&qkHIn=HDk88gzb?5hvo?E^(9J1d;($B3t zsQ%WSS)a&b^Zg#mXGhnk9VlP@a{W8MZ&mIc%6#SK`&hEc8*0Dow(WiH$edR@j|R_k zYbV&5@aF4AJG0YIOS`l6t$(z1dFB4~;~M}9{O}Az~-*whnd#&I4t>4~z?cZ;obN0!d%RPtO^@ZgN*Xt+lr@{uz zwe4K&7w+ag7l8AX&;O+G6u(sqg5y?B4{r;<2Kmft-g(h(mbcwoOZY5X4KL08=k>{5 zJ~h9RC%@*@)LA+IsgZ(Xmna#2WDSi-6%;oO;D zSL*vUp1ndgJAK}#$vCI`Mk~`#>t9$!?&nqfE}!#J*yPS}+u1zlqcD-@@IAG{#1QxN z!};h-0s|4gs<<}NDrS|VJRW~!lje1p*fr^7nbAm zhsO%resZjLT(_#!DQ(Vlw|Z_)>%89s49m-X|NCUy_!DDj zkLyH9jN4Ylo-E~)JEyfHpZ;fCb*A_G!g}YmS6F_2cJt~6tvWe(uWi^SdGlU5_g}7d zp=ZIY;%TKRBfRG$1jntal)I^y_r9l)_x>I2s_AV>UV8eU?o-hBpW;_F4_()C`sK`D z>kz&^apatKKIJ@jBJXJ49I~@k)z4e+m3h9+318Q{#%3>D)kxQm2)90m>yxLqmWRDE zSKSttQ0VJf9%I1?=_)v3Rr^9^o#V#5-_tqA?tl2^|1JCF_5IG(@TZ(h=i2!X-ahgg3+zN&|Qox7^{udQeBn^paO?J<6;{QtRQ;4H6C zKh<7m*DrNKzU}h;e8Z}N`L_MD@+Vm!@9})%zCn&OeV-u5C;LeQ&?)@hx!3=U*QzA1 z)mi-I=M$erw_mGCUc3GLaemr!-SoIT*DW-D>VmV;<4n(WD_F*=C3&}n?*rzoFQ^=j zp9|W4_Vu}+>sD~DGwr9m=R?7gLR*6Gu38&hw2J6It&e^$Va2KxZ%bHC`1MS33fq46 zGSDF>=F-n4$aCK{_j#CmJZ}!U%Z25GBMMln3+t3;TgYB{Dhn#+c@9f6-N=$zB z=Tet@oD==xQ_K_joQ&5lJYM)Ucbo7W^4*r1QYPFMIdS29-q(C}$=ir>Fh?F^CU~1E zSN2}Lzq6GQYJF;oygH}WoVgTSo6;cMD!3%&>TuqB&GtUycG;aZOg9*KRsWlyxe`xc->CQ z-TJ5J^WNwDsgh3Ew-5oj?Zx@lIo!1H`%WqUbxo-&)yv@8wR zkzeTLxsJ^{S@&~txj)O3J71X0-1-#O?$2#!vcMU?|9B<=IopFNDJ#6}nf$l>#BnU8 z=2xFz-5+ADoSTx7-`s!Gx;iQAe$(;KVQ|*4jB{vyT2uV6^Y(Xz@+$jVpASFhcZKrK z*-Z6i@{kFPZ-m(7s<<$*C&z1WHe}Al?Ni;mW_dW{h z{=2O|(`U_ow-ftcQ#}9Y+MeqybFUW##gXSK^OtIWzUlKz&Fi<6^Go^VzvUc$FJu3& z$MnCK{g9uY!9J@y28XTg9^U5VgNs&o3D@!3%-a?&hufXYYwo_}TZa3RZxhZJ_KH63 z3f;~e3fulmZ_7QO+_F8pHRP7>-2?n7pIyi;ewt$DZ1ykY^BuKl5@+>YKVx zxa<}BcnQz{_#kz%gtvQDx&Fex+mNg3WY+s`Ma7fF0&{ui@@(n{1-A8M=4~IgpVz6d z?{gHkjjq3^YeV?nyEo{j|M>0I0-`DHu zza#nk(*IO#gPTt+|C*)XC#SCc{QhQ({KrdcoqzrLU+aB7sL*=|jndkl|Fbw34by(H z=e*>-w{tF9rleh1m<@3t|=Y?N>mL|z>9V^TiFKpXc4*82W3+_CJXSx?p8?6;6o;L26P=xyq z6Z4f*JZ(z8^@UXyPn&&WbMdrkx$-kl{p;aXxjyjiJh5Mm`&Vt*R=&mf)W* zH~7)1_eRe~(ogAgHgf-4ZGP?h@xi%i**|A4-fxipoc@25zQLL45&6wExFS7}U;Qs% z7y1p8klft)Q%Cwe`P`>!o~v|AdbwPs-+eZCPgzH|63bj@wI-(vj#!kEGB>D~Ve!~8kK&$K{rb9%3zDdH!0|0TX- zdE#>?Wzq-zWJSD=1@sRtN+0&Kg0R0!@qt7d<^My{AiA_2LNR3w(v zl?zA%Rx>X(W|BsW=$hNS)XXA{k&?RJAdQp&sZSKCp<6~taqVw zEG;uOM_O5LtNEke>gicRA2}iylP@mb9*Na8k$R5B^&XGZ@}u=ukp$WVjLM{@-WQIx zq0+}N#}xHG-8Y~@_OIf#SKdAfs5^9eJE>;dvVeT)T+P_Ih5O=DdjvcH$|M;ayGK8ngu!7)pHM6*XnxDO3`|DZjR->BOzdV zw>|>ua=rCdoo_)JnbXG8MD#f;Asbp>P!lbl%!r(|BD9itc8dtj*J?}N)<ZH>-3xDTpseY8jGv)(U?tv;<` zbWXju#)^1nicecCU>+s!if7ws@hpiYNFBYWe7Yr{CZ#5zyt6W(?jeoRYgdfkg8_A) zuJ?|#fa*x<*LIGUCgfxE$`+wz1}yI!@#|wFN?YF1bM)(T*~vNIJ^T2@+rM~fq|th{ zbVKW~@>UPgvx^q*ytB1rQ(MEY_4I3N__Rz{&%)3a@XIB%FRIUufbwkamrA4o>s3;p z9xF!czSI`R;HG3 z>iSqMS+q4u=p)~svkG~uilxsfrO!3?1~o-=O+?OE?Wi$Dk&2!X=OTGmUu#s(aqb;O zzMNwwQE&C^8L+l{sj8%FywsngiDwgLtW)o}bwrUGdc+c1rkk_2>!VuTz}A3y%u6kA zg%Q1yN9&e;y;dNb`e=Qm`St2bTWX?0voJHsyLz2;wUq+qZr$EnKcn?A@9Oo+udS-I z4gGrWDDl<}GiNNXy?TTSM!KmoFlURP?y9y1lE9b5~0aD3h%L zb(NP|uaUa?SPEF)ksnarxUSYGph7E>ccqTXSSYcvuQZ=DXPC{+nP#fl&rCPJHvevJGIyA7n7ho~=DX$|^9S=i^RQLboNHCL zt~2*o&8-(L*Lt7-+pG_*Io9pgTx*SWr?sB{N3E}|?N&c)r?tl#Z0+NJn2JzEtY=hF zRmOT=l~d)cS5yU6)f%sAsG8Or>QdFl`lq^0-DZ8J9#@ZB%hVI<3F~u}q@J>ttEbgy zD@Bb_pI95!VwI_4)L!+ys;_=fM^z(rT>YpT+c9=PHMNV`#nkn7NxP(KZkMsks~hYD z`vP^7eUV*7-E3F0YpPrA8|)j@9rlg(jjEk}lYNuA({63IR_*P3?H=kbyRY3>-ETi; z4^-XkBs)p6?|X$HrX|NTilE7x^7LkpZ%EI-yLW#c89so+RNPM+>!PQcZ@s6 zUg^H|L#t<)7>fVG<$2~{MG%{9ot{iU(<2?&HXnxKL3sW z8y&yDt-qra;qTwnj2;$P_h!nxMJ%D>9F z*}vbv-)S8Y5fSO!60tU7t#fNcW<;iQTg2Xoz0U1o8oM~(61h?|mgcNxH^{A=o43hh z2Utb$+P?$@*L-<;{04f{!`A$$ud$$*+>tYSR--9T4S5U7`u&3(tXB0(*4FUDPtTrj$@T9vy3=qS+lHIW;wGW`3ubp#V{{2 ztC3bWn~2G(a~)|jvzgTORv+@+%x+TMe87BwHr>tcw!Uiqo%}@eAJk7WCzF5O zd|ld`Q_LyUykY*6nm5h2q?|d;e4F;u&FR#DbbscGQtF1&?cUbMjU=_Vzd{#H>Ve-ALzlm%0wfag&*45GE zU$kDNJu9z7u=36({g*XIE@1urH|YZF6N$GLS?ScTvDS#~t;pnAk)^D)!`g|=UDhtC zY3;Uli(-Y|Lk(**`R}an~Emf@I2vo&Yaq3H` z5~5T|Rg#)gsuaCStJ2hzQDvwptIA3dR{09#6I25E3sgn&7pe=%S5g;|udFH~=VEm+ z`6{XkeXFXfzlj)Y7Aq&q+X(atokSQZ>qPbnWm;mC-t^kDADQ@^@&8NMQV|>;>z?XZI-H~wEs+f zCe_q3^|{2W2GgYP(<@&dm{64i0Is4UqwtlC+L&5=dfc*FBdn|BJaqOu>>M-?3 z)G^v0SI5ydOJ&I|Tr+=^y53b&F7mFIQk?5$S<-TLIcaE@x68|QTssq_xqX3sfm~%* zvMW)4k$sVLwkz9}rHx(1u0l;!yDBZK+0|%S-L6i{8g>ooV%M~5N>jUq-9j$5TiPv2 zTiLBhxzawePdLlE2@+UrO8E>~7>Au)9(snuza@3-Y4$X! zVZUupms<8a_B+(fuxC*7u02z#cvpS$T=k_gSN%^U!Cqu9Lc(HuG3`IKKb1s#sl8Mh z+n?E=kzZynBmafHl3A~^Q)s!`Ud>pkb}IQaJDvJ9_8Ri*?61jhv^SB@urp}0+1^Zk zi~SAtTkUVjZ?|_y1AC{vOD?l_+q-F#X=h3^dyl;b3+%J^k>79cr_CYz5N!_I$E3D> z+&<2p%CfVhxMMn|+~x2mmGDA*a*5+Q5#%GCNNMXtIZ@=JooMP~oEW*&DdH574o<)+ zN=>X&OfJP=iK9(%r#Sf%P6_fQos#5BIi=)Yr?gXAx;kZ@vT}n{&M7DNJLR47(#?r? z;;FgVsUp`nmpYfql}=r!E@?gIGP%~N@6?yuoXefo!qwSnE2W%v&Yg0#bC+`$`MaIF z?cAw42jS?(jSzxz)*VGMHDUvsVJnK4-tgdj62q#2+e4TFzHqF7n0uDoSyD zqxy1>=Npm7Hxh+!v_cHum%cBhm~W*oh5Tw?s#v}>Uz$Yu(tYXT^R4l%q5do1S5n!x z&bM9?d>eclq>k@v-`7&Zx6!we{3hQf@>_i0NEP2!-%hc8yL`LpmFdf5ls&%h$shC` zB>#i&F!>|Cqf*j$%r(VuEtd$~jc}vM$GAnMm>cUBlOk@MTTc9LdAGb&aO2$sspwwd zR+KpRLiZwxaVxtQOBp=5I^wu>-Fo!B%)L@fw~^aOs=HUYSCPNk#b|m0+6XUlKZ&>kv7a7M*58VjFfbTyTh5?v+lEK z{+#<9^&{Pp)Zl9}*30h8j5W@Eh5W1TtK|Rg{#{DA6WxE1pX9zqezH56{1kTz=^O4F zQp=s{{!=QuZ@O@EJHwqp{Y-Zz>0*~_y63Yp;(B)@`AzO7 zES%wPA^(lbI_!Sye#=a^x!aiOc6U4VJKbGU#m#gx<$8CoyO$InS6X=9t>D}GiLw19 z{3WCuK5iK?{bl{-#OL|A)Z^WfHupCd+kb=q2C+OJm#uC6cTnHn-+}aQ|J|be9sM1V z*2&*V;{5me@1_1P{=blR@pqx`eg6B%-|z26`k?5~N2mOt5_%;F z+WsZ}&!mEXnSUAi&;4JZ3H~>E{BKF{{BJ4k`QPGt{>+{2sh>AHF$WBfj~wQV)-OEYD|ong5dVwoK>V z#5?>)%MAW)?W331-g$ZLmzT#MUoYjgM_yk0;^nm$?&5`)l2S%#qawcYg~o;W&58Km ze*EvIVj0&N*U4Q*GvjaM`{I%Nv`1bIk9;6CPvVzXGCaS$r1r~i(0+MI{PJ;l+pplA z7uVi-9NzgqB^tl{O>vF4jJKqNG0pf7`Gv+3scn3UzwT;(y@vMJtKhGvN?9Wvue~f@ z`#y=%p8AExaeQ=B`{*_C(c{Tiz*Db@r%wFf-6xPr+#zTroy;cq>*c)r1f-s)Zg}?z zhk!B@wFe_j0Acdw4`{wDdi@Y)^iwfnW# zUR`_be!TX7v2_mqdI{~XoA~Ps$S=lgucWucZL(O$cW*S;AIx8S>9 zfbYH?op+c!Xt@hN-qC)%$^DOm)c=4t?>7&d$FR(C^SC@{X5rZz+Oxk%d-evNeI2~; zy7>1O;osMjO4em~`4LtFs{tPW71kBhG_)F$_x%0{?e}v}6TiPWet&y$th=ncXw$*! zK>lv)ZanplL;?94(iM|=MUe*a5&$)3k=YmeVhp2z=J z?eQ1Y9)Bdh{$lRpEy351(7t|s?dw;@*Z+bUtxzlQ+P_p^5?!oRE2&w9-(Ld1Kb3qM z-oJzQzlQu;wU+!>>MQc=)H?F(715g7pf*tRwfdTxjl>D2juRquoM7lUp)zs8H&S11 zB~CDj6SmQEyV^m1r`k#VE~16Yb+q8u(Skv=u%DXmh!6}NA(-4h`+=H6>JT-Di4$rP zCmf;gQFW9WFItGy(L!e(EhG{x#K`@25n_g$ygP09uU_Qvkd7Q~)R9AT9XV9hk;8R5 za%in%hKF^`aGiZIF++15EnKIgh1NP+ctl4F-Mu?=a;bM`j+8rdq+YZTtD}WtI$DU; z(L#cb7Gia@P)tV)u{v5v&~ZXD9Vgtb#;S{)}e(Q(4H zI!n!T1e2* zLL(h5G|u}I zXrp6>TXoD(OvendI%Y`FF+;458H(wcAy&r>#dORNt7C=)9W%u0n4uUk!&YqT#SBe# z%+OfJ3<)}BXs2U_hB{_wr(=euI%a6BV}>hr%urRw3_XYwT)E8gJAP^9L=Y`prlW=X zbhK~_aY7M%_ka_?%P&ft(25A5IBmSx;1(Sl^whCIFC82F%_-xQk-j=YXs;uLemX*U zOh*XqoeE9`dEC1jihs}DQ0eX64V9MO-B405T4<}Ig)TZ;h|$qPY3B~-4!K#!45gj+ zPJ8lRyxZ@F)|RPe1JekiHqhdAPgl~RKEVYQgPRHBFo9Yq9)BG!nb zd{t)^SB;9alu^xT2hnEBwS2{p2Ed z0MUh`qYIyoF3RiZ!qU-2SshncI<7EuTw&_C!f?kESC~4kD5K*FTgMe;bX;NUxWd+P z#T7cPxJ1VlwRBu@iH<9p>9`_R#}&1>3%-asdU3_|I)0b&#~yLsopt4#UUP}2p z5P?+F5lFm_K%#X7Qr6$u-x--+1QJgKav!k-cj6hhhrfqhtz(aP9eYIU*dt0u9c6XY zVdkk}&`-3Ivw6BP_0_9&ra4@1WuO2;0NI`%Mh>=B`3k4XQE{uiYt z_wrw&mluZ^Iu0>)9AfzYK^$W0IHa;Q~eCLUVDAU-5T+xSdATvY|S0qrZ$ zK7n$=0FjD}Td6&a;6p7Rh+I4zmWotypedktmAPi++3*omETh9@> zB?i#{meGJc+mLTF5wb*X?Etfc3p+G{5x^X8$7Z*;fN_u_(l!>_z-XX+2jx2`-!T#x zuN`BwYXy|sQNA+@t_F0tbBRcMlM9U1X$-6rxiV;$if7n6V$332g6!j(xhrR9GwWw+LXY zzLfiJ6?wE0bb?8cA<{1a+QS%FA<}=B$P*$ma5yX%d6M}&iB3xp6UfNVZX@G+CX_I<)M_Hru=jdp!_uDVU&mUgc-0`pD`PpM4BhX<)f0zrDpQ{H+FiT{lg4Qq|80-0FFaZvTjKZFy%K>AJ zo&*^pFUCSs7$Wi#a>m{UV<1&z9Bp6OCNjPhbP}1+QDh?Zlj7hu7zHc&07d{>1LfD2 ziA<*7WX`qM7m2*lL1gM|k+-nrTiEg~Z28twk!db4?li`oHea|I1&J^k=r@Bg-eted ztOvbe7GRtAu+4ke;yuoR_cn;kIwJD^P>~Oq?}tsGkI3w?B6BK2XQ17jEh7I%`Tr>Y zALaj}JU0QD)7(jrA@Wf?pzBA-`DmTUJmxTu@;qeCV~!tlEPdPpI5s{;zxfK9!VsYR zZ_5A1z6%D4{HG0!0s4KiP-GF?7O@`|4Fl}67+WlEF0uqYKCJ{@fw_LVO=M{q$QD^v z1-ip@VBViI@6USycKDp~^4fr1zF@vzya`z%D;R$T`mNX~@+I59OajJVX#(S|oB+&i z)eu-BlHvgMDfCI9yqb2Y?Eo93uHZS)i6U#!eXaL80Op8%(49=(cVF zWQnYA0O+-TCS;0ir~+MJA~62fB>)?JJrc0TM)cU&1O~!}}|?EfEI543Qn9V5P{e43RxUfq7sa+1pTLKX(3Zo5=TUJ2VIu05T4v-(mDSjDCj? ziX5p7?L>~1gAO8D%stqWPX{#M9sut~t8uVS44e0T?ba{?_)LR?>*Lmkequ!OK6z9? zjOZg`#MBWZ&;a_tOvn_YXcg!JlYsl|vAjPV+ZILx@1+-ug61#?=7|x9?6`)|7iIzX zfQ$2qg5unhEIt{yhf$&ubb|4aE=H+^Vw7nmM!9lgl;?e$3dp_yeJ>aelq)*W48}mJ z7#Efh<0A4^QpBi=?p2v{RotlhrTu-~})5T~`xjA|@A14NW zfpG)&yOF*(b^z*b#1<`?e@ptbMAl8{)GAwyn;SxZm6{U4aDeLM~rSwAPE?!8{>4Z0nD!lvU?)07rOQC36p>^9w`O4 z!6;ZRMjr=|(PscG6r(R=JURn*iP5hzbQ7aL`}lF%Jx;sF2f|!9B*p;7AAsxuv>$Lx zj3?MvPxJ=LPtboL{Rei2iLg$LCrdyZzz$Dhha|>ILeC_|PMRY|vI|Xt`6SO1bL(yRQj*G1llz zFpcRY;+I~v; z)0Qw&jHTFo89FXQr)Bj2JRUm21V|TSc>r3A!L`HqVj9rq%bsGaswD>30)uOT!L`6x zeKnx-YINo|g$;gt%;0yD41PDj;2FV2S^{){agZto-|aWjTfi__2-#w+X$07F&1^u% z+B(n+rbDI}>!P5!80)dadhE9WT{p1rHjIIlVtnmD6F`rz(c^1mY($TZ*lr_zHqs}f z8<5W+zZso2GuO@Ifw^vph1P&wwyY52n+RwMNibK8t$oG#wjuO`55(9$5ax@qlR4~M zD#k7sngiusOT^fXKABx%3NYTD5@PJ7et#KgC&qz6Vtjv0jDrmT*#~C>I{(1De_-A} z%!IvS9I64GU?OZ3<8V1>FUAqZK9ViQ(L_Mj(FI~0n;^#V4nW&1T(#+t!l~OQOOg{5rI?Z9em@c2fiJ&erQ_Scoa7fG;w#AGE?ywiB1H*tj z?*Z}wWCv2jEXuoQMSH+pF=LS%+ZU)WRvUPStJqpG7UtS$mq8bxMd?59-$v^K$gOd=~5#^9nw_cvX~`*US*J z@i?GQlR0ouOs*s5_053!UXPuwKPG1L`p_HDvH1=$Z>R*FU_4--7V$v21-iD_D&~!q zp&OvXjoZX*SrNLxB-kS6O|-v>_BYeEHSdSqT1U*=M*@1ZYXd{XY|pm4TZ`EV{W_(J zd2aw(0`tFjk(iwY8j0DZg_!rD>wV~YAG&r$*RJT=mAQ4@CFcF;LL6q^KM~f6*^T|u zts|gIH*|TR5_E+(f%aTC%0%DWe#2^sIeZY%XLz=l&o+Vq zFc*%9IRY6YkTK!|I3VV8^?`XlM|30?DCVd#fK6YZ-J%sF;%&>$P5DPVNGefWEKC zLTfnK{iaO@`n{bY=Ja^LZtp|@y3FV(=DW=8 z-GgGjhd#5g)BEhZ4~B?28(DLDi1~kv`%yJ9=dmB=qtAc(iMg<)n8Yc(bqpKCT-sjD zWo^X#ysemDbQAN-Mq;j{UrH%4S04~F4O!{6#ay>s%ni)#YjoH+UCfMaVs2*4t;pV1 zMa&)8X&3X!q`WTyc8Pg#w3vs+iFqVe%%f~SIzYUyN}0!z_2WD-gM9AK=nKchvPQyE zv7BmR`5dv_Wnx8qAXao0m@QU-PcFtD6f17JSjG9gV@c{u^3Fi%wPKZ>BUX9j#K%EP z;PW2w^MLjhYCsp50I6bKFkh^SZ$gGx7nXw7FbrsaA^KFJeWl*On3cAQbx}F7E{+nb zN|snv(XDD{K$og3;E-6=;-M8FquK(os+-UdkWqaaY!RzQ8E6B;VIeTznsuN%Oonx0 z)rtk=)S^wTC1PEIoJ$%3^SWdv>=LVXW#|MGAYH6W>2qm|Q>lmk^(KmS8TwqdOsx9T z0Xtp6T(9T=^t}?@xld;$CWzG-8I7~WYBEBsYnuZ$YKjicCW+OYaa)c8+TTRGn;7e+ zL4dt)LT0NvkR{eFRiHOu+cvFW9AKMUZ-ZH4-4=i*V%^?AthUVe4)*UIt;K4GUD|CE z>(2Vn6Q;sOvD%k_X3!sI0CjgU{$14F)fYC1)d6`OXy1W(+|8Wt?giMtqXPDK$2Y~g zhjH&|1I*2%J)fIj3uLw)T>PDLfdO)^V-9HeEdtlas6U6FKTdcn_UQguoVw~Qz@7*3o1N-I? z6R^o6$a-W7a2)iB1@=RqB$y3*#rhk4{)XJXygrH^k8TjFAM*M;VhzA11G|fr#C!+Q ze=vQXS}NAiBVr9h)-waddX{!0`iM1>Hlq??hgdIc5oBZ0YP91v@BC1?k<-%P*F*m_F>Aa~0Um;<}S`lcMT0_OeA zEMT0iae(c&GOw-Ef%$$*-M7?z+aJ*HTk5u<$F`=>7qIg-^xYl>iO?Ic@%B`)b~w-g zy2AumA$&*$>OfauE<35)RYt7cEyT*C-<|}({`=~Q^&R;Gqs98ZqgV%N^TQai4kw9q zGy+D5bv#9^9~X!fYywL~Nky0}3ZGJ$twmXUlg;WbN{tm|_l9Y(MU+zp+5n$qbQZ!f zQNB7rTOa*>>qNP+&;ka*TsR=g&!^q}onRcS6cypZJW-MH&>5z}E>TepVH9i;6S7b>19Q80BBY4oJsVXO9jo?)NpMIM>%FQrRaEtQFhW!f z#;LJPR88t@Vo%m|RTFtNvqjZnj9Rq{pcD1zTWdb-2yrPoTzWMOgjuj(RGn&~>N3a6 zkX3(*sLPv*YCygLeHwNXbtSKjXn)mhqOPXxHR#s3vZyB5zA3NGR*Jfzzo-_>^TxTN zZmKV;)k0CNhlsirIk$HZb;l@J0F2j;KJD7VP?!zdMBNz+O`tbS1m=6^F;VRquYC`g z0rb7A4onl(p%Em(B2jm{&=Qu2>PUUZnWFBY-#sfub)ql+uex`usLt%mzaYO$swn(k z)wL_2)BP<(bsH|K`vFlsW{K+AOVmTuJ=7P*^44Q(zy=Sm74-;UCKGer$X$HDX;f*qf#4C$hVjuiEDYf-~!H;i$g87*o! zGM;5WKi5vw$Z@a~GDSU4pXWOPHhv!cN5w#M7zFd+kf;}`0Q$T@n-^#^nl__pGnzJ| z=fe?EFQWg8*yP2jkRfVJIbggo!(bs~iF&C%pvOzo0K1Ki2lN?>eq)*Y%jo-ZBJ_t3 zV6Uigj6IIA$4vm{_DTR+!U&+x_y|Ck@k2#TK$i*GqW<0p`oeVBCTb$%aBWZ%$3lvz ze<-LA-GDj$13CYo-=x~WSd+%U5@24hRfG1xc&{PifQhhD)SCk6 z{U&v9P6GDDTeO?j2atu&t=>i!*EscdFQCoa^qX!1`)7Jjm;rl5y;B9c0J7g%Cu&9s zXagf*nW%RiXaa*^zNnel?Y&aaUDW$cVK^ZB!*Zf#cN6umrK0AJhP9$TstwrTBjz!W zKJ(fG`)?lGKBoLJ<9tlpkC8b)3OWLM%-<;L-)#SPI~WIRMJ=Gbfc6V$zkv4ti3M!> zAIAR=`)DD0FKi3!vxV5>lNjg(%>9!LQHv6Q@fWdu5!)BX!EL}i7qfi{+n02L@sKI% z(;CnfrovWHOXHyfuzl$|QJ>NOvsOU=&#==n^j}7K8Tu?+F6#3bXbtH5`3h0XDKBpU zY+sJOzChnE(B})}eX&c_iiXe^X2D)jU$S4m#GYSH2JEpC{Z`U`^ z>7r7Qm(mtS!7@>+*>9`aZ>t%9HS$Y`TFt zZD39tHj4VX9JB*;`g*OXjb(uH#<76To7l&jTEPfFhYS-M0Wvd?xfvZcHv`7pya3Q) zOKs>5Q(=p!Z{h)&-;9A2QCpF@wGE60#=z%M-`)mn|CaJL?69pZpyRe3qPACp&M*`|~pR1g`#nPLmyWsvcZ zDz-6BY>Vd^SWO`b7Kp6`8bV)~1$)J|t3g+o3>(G9|FWI7Fcwn8_VMalF18zhRxnj; z{{UDdb_8QaAS0?WAR`JH(V1e$R1rHc5@-|qf!J|;(mIaU679t{iUJRTHsqP8a)@ zQDWbEHK5n+rr2%Ah~2ImAmh$1V&Bz4?7J(9eNQ{FJJ%BXFX(EGDE*1OndSVYi_JD0&?EQtXk8{k(|%LS?{)FE#?^^kS;mV}m9$#yqAmhqqm5 z27_Qe91wdtHkjT6Cc+l6-${TDK;L&ZiajG9D9@lgV~5x?ne!~#&SK2>(FOIw%D7{Z!>n;ioL#N{BLQ$4c)i(0%Wi@*{n_WcE;Jx z_}kZsy@TUm2lcy{=PugpLXX|-pWTeRoAU1cVrQa9<~X1|>yW*NvG+`Zm16HTfid=W zg~@u2Sg#3|% zVjnvu_HlH{Y9#iLjP)bB{x}2pHiz)Jd}E$Cyhq@eTf|XSpo=*6BypSp;`m0waz20Q zKqBxRp$Ld<1ic_doalPMr}krpic^Gq5%PiCV81v;`OZdB%CXgeeC#N3iZMp9iQ>dH z1U~s+yr(!N7Qr!bO47e%A8|_chiq|5)4mMfqA!DfWt+nWamq1HIkuJW3rEC>?+3hA z;59)YQ=Ez|#i`eQ=^cJTJHo0$xIQP@<0pxUFA|@|K9X@q_sGGm|Gv__BZtI+ff|u#XFCVhXdmDL;rpo#d(Z5KgPI^%@(KsesP{? zF3ywamQ)egUmEI@=ZQ0z`oU9SB^(oH2sRmVNSvpb&r{fCSS*Z&`Qkj2F3xad3~wdQ zv)E_^@}I*#&mnVU3z!J#F{*<&FEFkN04P9UyFz-#Y;j=f+rm5m&Fh2LfoD7bwEsi+fpu<-7 z%{JPwE;>7CvvY_zyJLVpyOFbdk~oa; za4Jc*l#90v1(l~oX@LFr_mMvd4 z4mQYMk(3x5XgCh(;9>`M9{ur${ofxrvg5m5`(*oe$B}&~b|B+;FsODPkfXuiQCb~~ zEo1W?e8cwra9H*oC>|?Ox>eemH5+M_wr5Y;U_JYY1QDKjDSXp|R|9w-%P>r%#D~V! z?1EIL^Hh>iLo4=FlIdeOUXCO0sgV5&ln@h=us$T=uX!YFIb8yGY){qUs*r?iPr@M2 zGK~CrNP)Fp_rhneJJj0>jekXW{DUC@-<=%a71Q1_kT?(c`XA3CAG%N}qOc(xd`k4VtRs<#8M{Sr?Cud6%}vIheg z<%Huncu$fi#K?*=E(z@eZwAD9oGm>0kiE!zr;=pt{}BO0ldPEf2pF0qI}>GSl4nNh z-d~FdUJreVB11z`ytBpA#M=)pnz#w3J`^tX42d2Wd3rRKnMj~C&(nd|O($kj;E}K~ zv?mUPOc5P6MRv#(MNhY6M8e4dT$12Iu?zP28%-) zoYqwGXh_0=lM?bjD>6d+QlHXE z#-4EZPeR>)^t$V_I<#_!&I)e@_vajo)`tckB8q;+s3cOvJ0m#UWA+&vL4AUNEVm+nlZ>qmJNfGBrzGQB(rl0U$MUm zS$1#8vToS2Ipri1*G+|I$~;WyZ!E69I7*veWWyUE23wuSaY0SYPN#&#N57Cnqm`DJDASF-$bXd*QWGjljXY9gu+>SacW9~J`r}EhZgyc56K-%Py>s`T5%I&4)UX&yE3axF5mTUI&w7oe;>I5zI z*^tzt6{UP0si$gts?>&B+tpfXqDK9(bQjFvk%B-gz_;&SAAM{ZP}eQwTPgJmD2 z?d5~bWu?jgh5YWp=6x&Cv=RAjgRQ#zr6Rf5!B(XUq&B(3Ir}{<(bF`16q$=XiT!DH z4HC=dNIX6`QAQcdk$FE`(g#}?@{}fW5rZvtkp#&3!pBm=>3*F)u+Q2b`MDmE_U&@c zEYG7N4)GMwUK2h|V_CwvbwnBmIm<9L_1Zf%8b=ukp|h(Rmc9ZBd>TWlaTN3EGbx$D zU%nB_usjh;<6zfd*O+zd3JO$)6&R=l`XtJ7v)ft4E5izoSG216#w^3~#4IKE^OtYj z;|n7tpcjkaU~Ah} zIiL?0eO0oNc&;mFfmismKEeu=aMtlxe%EKU_gilPPG$jCG2eSDa55{fb+}|`2~K7S zR#~OIH8`0ySQU%)7U5(TVQq=L61`}`*!!)w2q$wYBGFrglUXyZ67@sNa5Bp<61{ae znH5vn%3FwY;)(#44Y02Q?5hCBuqUr^uv_q+oVC~!+BkSsuwu??>`9hK6Xj(iv?6=L z8_RTAs#l{|Ln91zHqYNiy@L1WTmeEeXc(-Jvs#B{Fj*GntkWr?>^1MOtm;L3h_$TryE$v@?jgo~D--1@bk_^_5LI{o&7qZh zh^lnKg`uT;h}JwT*W0@%8t!3|q*6w6Ej@3t@V`%kGX7yGVUgIs_$t>SW zS>F216UK7Z@BAZ(!=VV`+mL&ez*Sn`7bq8UuY#V7r}Zk&e5-KOU4mZaIqJ%D)D;P> znp<=3>v(>VFMLmjyE}Pq=OlS9(`hT@kOFVwuxC%ddBeRKeaq%x=$4K4y?o)@HEh&( zYWQAxXtEO-DG6oDByhSmobY1ocs+OBP79WUz~ zIg*tn1|uImnw@>F@ z!tStyBOwX9PD;rC+VPC%WzQk;j6~M(k_KU~yAAu*dnz=aPn0(7*IU`IRkwY+f7cP8 zQh~V0$dYjpk^W;lzg@R$i}FWCWbIzRUSf*+{gDTcZ;(yNgQNZaEaeLxF%BP#DCT4* z`;Q(yy2p))G);4mlYRVX1P8<5s`cs}UAOS#k3L$mV%f4U{`2AN4?o%*7!~nj-A0YC zxUj@KuU2b(L*r|2D(%jZEl3QMFHuHi?)iTIww;F$9!)Ox^wUqj`;A=OuwlccOP79Q z$K3qbV~-6kHhp@0jhi}m>U?L@x^-LLbML*KZcMx>Q6?CNrZou;3_c!w)O!Bo4UZ?v zaPynb#x)7{2|g5jKs~j*o0Ll)92F5`#})HOx_--a)u1Nws`nq=wQWOV-MV%4>eVZ= zZ}odkf)9BAgR5p_q>K3cqZe(Ih*`}Xe0*u6gc`+a-UK3lhI*W!OI&&=Gl zY1vNByTQ?i538)>$~C!1eBALR`&~?RC zRz;;qTU0=#cS47R8c1)G-g}+NWS;MTpF2Ym$Uw6DzTfZrGQZ68Ofvs-?{n|D=bn4c zx#wOjC@64tc2-#1t9YKb$tt_1s0d+eD7)<}xRBY-BGA5IH*jAdEO*jOu8rVRHHztL z@1X0p-fJ-LLTc|AQ_MvWZciY!c1SW|?ch2s+{~WUn+$w(EAswAZa(225s>o*S<|;<2Mnk^eyiz|MKI*r!SoNWobY^`F!%1lnGlqp!78}|GV$GaA1CFc6MLs5*xI}Iq_Vxcu^5V2fV;+tttGLnH zI8v$;ZP`?ypGoB_UT+`}X3OA)(1g5{i#b)rMOEcF8I^+=PNKN9GQAW-rs782z=oDs zY--*a$3Hk)<6Q3%JL;Z6j5N_yOSWEGQ&ZE>I(1N^&MtSQ;6o|+kXbtlshlmQZ-Mkp zuy|N>Oj{W|Vm^2su~Fc0F~NX>tj78+x}~b3qN3g;63d;OoSnTq+})hS-AxrmMMZ^h za=Ey=`?$I~Da52v7iF=?)k8D1t}R-$`0?oO+Or!!|NQg+TrO5koBzTKS8|&dQ8l2v54@LJ(=UxY`+!G9&a@W+qdjw3^LI;mBx_VaPpOFJu4_U>Ihi~ke< z4F9O`dPDrZQ8j%Q`2-#T`r1tXDShq12cLf^B5Bx}v^+@3%!{W^9y@gCz`>oncJDcK z@(%IGymB0d5p^J;c+JRqt)Ej zZq^5Mn@#Wvb~MxFWs8ZoNHiJ^nqDd`&V)NwkY?|rhyRKmMxK1~K6?01=;0||K0az! zjm*WxxuV3ZadjaB@9{}3*3OQO4rn+$TA#DF444C^JofaHZl%{woz1+SdLgSI``REl z!c{!lTPXFlS6+JQrAJ4L2g@P}{3{TozYo~ash<4AjM0Abi1>tA{~g=7?p>SV>guW> zNy|R@Z5h;{PrG)UDSS;p6E(7K> z_wu5W?5tAKSk_`$P8$`nHbZ!WMkanQihRo ziw=AnUcvC?48Mcn-5GxT0Pvm+KZ47o`0 zU@hkaKdv~8Hi4EnC1JjQP-qA(aZA#QdD<9S(uUh=+933nwdpgU`6A>3b`_ClAQ!Ns zlZ-6G&md%hVc_C_WZ=T`t%Dfw%LX|GxF;A@Z~S-T`Q!g?8W>3pm4g_vhJnuoGKg`_ zz{lmsgP9NPGydD?cRJ$>D>Jjq&BMdPnd|ImX=rGuFRQ4mYV7D%dieSI1-P19>uM@X zpuaSC&>0_+rn#fFjm>CIH)ph#c5`dr8Eu%#TA7`lT~N_tu4+&W3HjoSm1{4wN{7#d z1b-mhwPNr0Uw#?p%{O*{b@dXto0pqHFZUZ6ifJW%lWmTdt~%Q4HTie8`CtBQ45Xbg zqcTbD6%qgBlTXf#8x!qXla*;qAVnZ zKkX?KCVi2$xuU97FID(WeDJ~7W+4;$X4@qHwfL2g_LH;t4{S=nGq5A9-n)0hp1h=C zsmXhG?b>zl^rg#b85x4X}}t6VW1tDT*Kg2N($JYBi=diDw3d&7@@=0WR(;fi@^<*7FXxelIEuBlF~9`PF`MlIZ4*(B%Pg|=5C!>qEIOG z5}5uaq7?z>ORnZL4}bc_R~Ozhl&|qr)MR92q@3KeY5aKVFjXyEwroSTDEhgjOPA^; zB8f!AuaHJp$;Z`vmI&SJDgJSOiPJB|munQB(Gw<2932x+^*-^oZP?qdOJLn3dBf9E z`eg#^BZG*ya-RgWmz37mmK7Ehr0hRbTyi5lucWxBy0yC6Xe1|4QB+VS)`=~suwKIR zR*Q8-!0doP|L|~sZx2#i?jInL`b0-ZPrffQQllZmP*Fg9d`Mlb05O}aBFPG2E_x8{ zv~G2zv}9JzXO-_|YP%)~SP zo?igPWvP63QDuGcsqM>Q+v0xs;y$RV&kL`4SRIbwM~IFbf!XVWK1;oRTf5i7?*5*I z9cd)Lkt}#maS4kfJ*NvBzbdqN70qz0OyYIqSl+w@Y?G8N@ zm2*BNtHM}VQJQt5{EofVdvVrp&)L^vVYao{9Xqo3>Z{*QWwW6@N~*Vppa+k$C@Cp3 zzv2K0s#pYorEr-CHt9#dkb)P5<$}@02!(bp`HSdk?ZCe?eOf4jn1@s3L&8qLuEW&E0C3X-Vc{33s)D3nknYm_-Q}c!E1exL1eS zqq+JkmBi>sweDkanJc-0%X1y%1zaji$WORr^FkeHG@}w@kH)guYpg^&Yq?}FKCPFi zf$X6d5qX4OPLRX6aKpf>Nw}eS74S5YjK%_>LEz;DUX=W`KmhS#2)o}yKqmlnqCh7AbOJ!frMx^mB0oPMurSxppIm`Sm;R{LCR38j z`0*z*4t;ktZRb}w^!D9GNg-`SK*mwr%q!zaB?E7d{_F1l}Br3Zb>WM6tMB0C8LzL8u?z?xTCl7=9ANr!ssq!z%}X4`BFC zhTp*O!x?_T0PtfNUcvCk7+%Wo9s|HX#PClud;-HCV)%{$;7tr4$?)qK-oo%;y1TYt z6vIzv_>Bzj%J8Zle1GFbE1C)pLqC1Uz;z6q!@%VfeCKsjAS>I%;I_7pVet7qW!rh@ zw)ojMd$(O2|3@vWKsKX*sIj{~y)UD1PoLJSWcX)#@b;(gT?4HskUjkv0UXEAvK8;b z;G(-ff4_ZykinaKO1D41UEbW3>vzJp_uw7LX7?*pN_ON?DfxX2m6Fjsm6ET;*`=f- zWXKgRig2!Z9OeW|43+94o=YX1B90r!<^w02j6)^|LB>&kv{oEwLX+ue$gg+|<=ACf zG@FPrEySD)=t{6ZDx}fqgapI{|Lq=YtvDbU z{cj6t?2Jbf2qBH)4L_n8SU}@&XOI#C8pR8I(Hu0Ok#W=zaMTdYcF~}S^DFTyB5q}+ z#bvk5c*}ZbJhu^}5A3^T)?3yy>lyl=_Lki`?YSA@HoIl&Th=r6xzPpCEz{q!p6QR( z-ac1{qV_lE>Xw#$do7k@N6psc<7gNs zrl$V+&q*$S`&(A?$sexQ?)hg*8QM)R%+2CGdmemHsCbggkRgpt-X02-x2L5Wtw~jY zjt;ApssN)$2Zx6Qc>9NVIcdJP{S{pjdW62k&^MmG#o5+tRF|O< zfPG?wj+@En&=6-LPVW!geNb!e$0r&Ma+LoQ6qpKpLXePJLtJo&AfXO7G6{F~m;_3b zMmtj3p(3_u$35wU#>gSs@tm&^frMC_IjQ6w{0*^=0ZqgocZhY2jUaF0?oP3e;xpM~ z6~2S8iK9V?KM*1lpGO2CS%G`RI{MWUU)&)oQ4>lY)v;O39_vV-v?qG-RUvld9)VgM zKb1z;@IC@X8s~@D8gfXW8AsL&Q6t|nx{@$5|BiH@6zE3S=#HZiDZHzV?hzV~LdtD) ze-!AhXLMsi$s2d1`=&tmUK`zsLVamuABTv)jY54d+30>D&|S&swuO-igP=P}sIP~O z?j@nV9X7fv1iH%^-P+KbQJ+++wsR8I@TgWCM?S*|XV2(9rg09Gkro2hX^7+0n0ey3 z$@o-n5UW%~NxzbSnM992h1ev<{|(-hqLws*t*QBg@w zsw&AzqZv0cBzjiD&`9zg{#;n7R=2b;Sv48;nv8lqih3cpDSlxo%FoXi%bPGI~KGiOQ4thgM12yK3?1h@93n9qnewW zO}8{dKvh-MspIKIMq_nVWl2$Sai3<<%ESbb79w6J<-4dG>Sj9y_ylOoa%XQZb;R(P zsG)xHKJDhp`}qe21x1B4Hir4C)oS_8hGcV-LtQ0gzpyr&j{bs{zn)#3&_Vd5K!@1Y zX1zHCY8m5@+}!>9N(+Dbsjy7o5R=Jh%+5}7I(|IT{hs-w0umk(IArU`?(cy{M(E}Xq*$*&_K5$ElbiVOUpV;a)r5FU=TST(Z?sr>6KR!<~;fE z=y8ueA}~m3XmhiRi_QAD5FA2%3vYo#kja)j0S-|#H`l_e+|(`Bg@`0liF-hxhm2U7 zD&SQ%HaEg_?5e`%M;Ym8B1O7k7K_4NT zf9h;XdI9{)B{wq8A3V7K$k7uglFwhyBI|X-B$Ceh#-?@?Jj}e*$<@_aWJU|MwYIl5 zpvB}8nbit2tJ%cE$J`Fa4{I$eEUj(pvY7MIvx{qLs+xHm`r|Gpb%g~rg~jb{ZRjGH zTy)a5`o@;d?xX;V$z*PCfswhsuDP=j8kdrEx3$3!VCv#+J??0|pef=w!kulxY&(5lRm~@B$WO7t1rL& z@}pxoy=1TqHzaM>man##n#1FU2TEJiv9b63_Z%qBUzc2?=S)pA`S;-0fXL^UidL+w z_^&u~TF0Rs2hJQjdM-KnM0s`k(PJkD#i~>9;hdadR~PeEIvwzw>mYd7OeGJFBULtVKm-k;&48GbjzH!*x=58iG?>21fLcI+Zr&lUz&GjJ^f zmr<}VJ{>;0D=VAI)ShQ|X7KSnWjn;17(SHYb6N2>7`|cv_!$hJ$nf(R{s)Fn@4+vE zd|3kdvJ~=V5#-AYkT0VoT@97xWmU}fz_(DiJjp3yZL-ZK^h_U5Cex2y*5S zujA|_mm}P21fpD=NM}bkosTT-_W8(!c}qs)dGMo_SrOyYP3NVgQC40Z=AmaoocVpNi%0orQ2fOl&;ue zBtgGRgcoNIqMTQW9~~#z9nXKrFHxVsq;fj5_{OO!>yxu-bh;E#q*8>d$}Fvzp?l9! z?}<){rY9o!Pl$n%zo$5SIOlRktxhtLPF6{yPNZby7aH@fUpRf{RPwRpUE6l-J8>$b zth}_S#CRh&b^pHazFEEX;OT>ia5;4B7+Iqmfk~r{AUu{%RuYwcvXW~=t!5!?%*CbI z46h&&(@867R6%7s5lOn6D@vZ z#+puAogE?ZX0#l4`6CCQMtdN{OVasi8wB5!305X|s-IF1O;InTvC$Ggs@H zzB5-6I14$v$VPBh5;#i_&YE#e?XMB5oH&I-UR==Pq>>Mk)xc}__A`AAhWTdPHiHe4 z8^MmZPhVQy`+wPc^6;V4M~@yW zsYp9?_}Cy>8@zpQKeO9ln0>}=6Wk!VAMD@HG&k7s;80!%Fkfa3sigA$Fquj(Jo6TY zkdV91RVIe_WB3CMf0f~z27r$$7UtS9Ox_=X}=qwt8FUQaJ0~{{8$1VY{N)P@4Oo0~k7? z2i?1Gq-fil79}TEI(F{R>i$Mc*E4i`58Ba8BRWWZDpMOd_;h~7vWIZp=c!Lc;(J%0 ziWe)FW943CPur|#z78#s3DD92;sJ>IUF`qd7e8?xCmcv8f!1Q&nyhwGzQ#c z-)H7_9c$?uH&RkEGA>@s$Vg2A4=%HFl-t*v{MEj`pK2pVuU|iU%BoMvDV=z|Lb3iP zYthY)o5VS#Sy}O92R0a-IS@}iC!cc%evT(Uk{w!Dmc`GGAOGx4PIIemXsT^&G@I+| zJ3H&^O=gmf@@(AZ78X7+dc3rB0+_P zg;He;BAW^djb#<(_~|o3Zns$3A28^?@sr{b#!pXt=*ZrKox|>Z;FckDyUofzrsDX& zYrfuipoljr{9Ig+9R`v!9 zGIclSRn>QPG|F6@mGZ8tTZZfHHmkm^+5h-0CBIV67r|RW$M1()>QK{)ib)#1Sg^a2 zjbA&u?@FYEjsCwgHcn@3T*BD6o3gPWnJF)Zq8~@}=0zH@+NbY!_Tt53X?b({bWed! z%f0oM!QLa%cb2+^!gKX&@`zHC!cFy}jI_U!pPx3K3twD;F*T^{4^d+4bP$B$jE zBego2rMosSZ`w5a&^oSkUjqMk`ckYqnim{gT3FX@khEUewCdmZJC*Tv!LM5V*^95f zGtqQo(+VAuf5~P)@x)Aj;*F|!kDjn|Cv6rT7qujjnhNpyLbiCq)vHkvZb`virkaAh z+zRZ+sjS49s;G9hwskAzVlV$7jg^>nGKCzORJyth;7FZICya?cUdqnmV&dr`Hv|U< zDVmz=!w`iR8Qt2fQac5OMUNOcYW&P;_aFiPLwFoZmxWYU8^D!17k3` zS!~{i{yhZ;bxAt#7v9wL?L2++)v+}I^_ki|Aa=i(2);BhGG>OEH@#`k;Zu({Eft@b zH;v}lVBR$9DYARhDAilNX(D*j;G4s@m_Kbl?wCJqKJJ)5tps<>pEetJc7NKf)P%VQ z{xmpf@Ga(1+m1WzTZcz&3GSFbtq6C_pB8N!N4@=N*qbZNVsSk4r_pRBG|LC`r^N{L z?EbW!0?jzGUQ|zBVN~Hw!;U{;boNh`dDGN3s?3`fZllV)X(=|UKZ5G-7*+Vv67EQq z`O>0nRGBX=(ngi}(tfj1{Q^`MFsksRxeS6T^Q6%-IC$uoCoR`Tbp@zC%BaGRCb3bq z_fT(-tY{D2Do*=*68xuru*lP_A#`^!@`jML_=LP6QH=<;6Y_>23rPs>dgl$H9Au|R zV>v{pD8-NPE%f=h0D&US8giKKJ0}hfXwvieW)Xe$JN*VOqieo$(???QZw*-D%v?-T8E1 zMDJJ!@t&XA5EJr4oXhq_3(@=#k~xZ0xPFD~5CM@*qy)Ff4iP&hg3QLPEjz@m6lsf# z&m3mMjPCcB9K8}mwgbQ!B+m!afhJ^!@QbL2HBHD%pz#PL(e_9|d*%b0mw@`NDLrbS zc~cHNKk2E>Ndh&}fm>;9XK`WM($PlupDy{(gu!M@aKW`&-Xxt01P{(hnEv@|MtFOh!=KJ8o^`4!|V zVJr&dYa2o_7<*-epuEv1BgCGc*>KchI7-fC)A6WdFeC*jXXg()RT_aRK6Q`{Nr6AkWz#W9vq4}?ivGd|rH~B5Cyb6tnhZizSu@n2 z!$cv_>|ZBpXF8EP(}|LqPUOUNBJoX~i2Cl4Jrq8B#14bMXnmwYeExpja;Xq?pQT%V zGl)JGe-wh^Q*a}Mx=++CXJeXD>K$84=NB!vAF}369aOk1a3e0U zS(kY*Pi70rqJVr7OD~;ZK3rVLMEkm|_sWV&rkg7(8N*u{K8xYo7+%>6kJ{6mO?@I) zR3<{_Xx<-d%nmD{C!ot1f-txY#x1K)>pL+EC8hm1UeirVSo<9hbTNDqAVovkbD8f|F3sIKoy6 z)i$bhr9fK-b;U~I#XHuOtrUjh+bnk>jS^&%KF$_(cg;qXt`umH8>nKXFnkbH*-F74 zBgj??T{fz8r7(|C#Y(|$1+n+IU9YEECD}@WJ{Mal&>rtuDICKmwo;(me34-gD~0K} zbF@+rC<-furT7+GDNu?%D}^e2Vk-r@LPUPS%Aq6}cX#FL0ZL+rqhf$FfupqwO!i_$ zG$}<@u~un~<5gNAcNbf$*bVy6g>WHvTdVvpV_w>?w7Qe*ao8RMloF9BF%lY1EXK9n z5-T-#uHV>YMutT`fm{7`JU@ev*T(Uqby{&}r$V63SgO2<{iSXFz1rqeC7g_^~BUgQqq*|jY-mWz|e2#Yeb0A+UdyZWDa~x&Q(cJSK4ykOiQ)#hN@w8JZ zXH<}T;jV3Z&`xEKoeIqvCba1gMrGjusGPG?Nwrg%W2a*4UrP@a+ODdQ+j6#(YDNTH z;>D=MBgS(%&D*|%)&y4*B_i~V!&dpmUXx;bO)Bj*X=Kkcyr(8~^wM?Kt^68~#6=ix zlu23tJZ7hn!f3Sg&~R9jGxnNLi=)8vwRS3EM#XsmR1Vpx?6p(5*+0RI%7OtANB$6z%ZgV0JLwdh`2|M;6aD4myY%lx|@<|HfCXrc_Fv)CEK$Kf6spCiG+ zumlO(n17S7D*)Qq)Clqy{H?b()|;ZBjr|65lC=omLgwEibbrA_+(RFmM$Iy(a1VXV zw_cdypp68ElILu^+`r~DkJ@8|tejNGVR@8y3l!rpLST&HRvnO0~QtjOxTt^1&UcE)=MKYNI-VT8^y0+NkP; zx+-i`SAyy@j4Bs)7khADsw_kGP8-!xVwZ;I+S=OUVq;NVT}jD) zy$Z3hX5TO4&xNM&19l2OvlA|B)K@VN}`HXDRVBWK;V8;OL=)?LuIZ`fw*d!94i*F&QGBZ z4h|j?8t&%{H=d7=r&=oJf&v5GP=Y%`{CH00i5weVKE5HLp`pIcI*|pb;DmYEp*}2O z)(Q=t658HbP{JjGy;o317*F4FZFfR#2akwF8FZJ-Ec8BtEbuEL-UU^a6{W@1IXTFnlA8lJX>m?Q zHgXPVC81u(YO3QTV!f!t!s$3Umn5r8Nz2YJ4oQH@zLft!F@F5U^*?Of*mWbG|A>DM z1MGF3^v&y+&zwA`<2)65QSDK)j+>d1B#VDw=IHT{??^yo;s){wJi0TH<7PW{Dtrp- zdFo5?geS+eab!1!z%!C^tJkE(I_{T4`Z#ion^%oLE<%zh@5=mw!s60Aq#!jKYicX2 zYf5Vy>*$;$DXcMzq&m*htrsaJypA*RN#2_1k@5HVy9WmPYBT|XA^riLfdN6VsY?A@MiASg@o$-((t&S^WG@R`;csq?sMgs%cPJ>Dqdrpa7s|T)o57j!n z>7fkJI@0kNRPz^=QnQzv8Y+kI6pqlN_aJ)*8E-=)UATZ=UrIB0nADst$EiQcjs}m7 z2aoxK$A*H(+;elUU(d@+yLLJA%B3q+#n;o2MdP0w&nKzcm7@a3kKgoN&ZL#_Ny